aboutsummaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
authorGravatar gracehoney <31743510+aaroey@users.noreply.github.com>2018-07-17 07:54:31 -0700
committerGravatar gracehoney <31743510+aaroey@users.noreply.github.com>2018-07-17 07:54:31 -0700
commitf340242952de5c4ef2ae78c891490248e5948a1f (patch)
treed4092132e4b30b205a5906dc87c6b59a0bd489bb
parent86f632e29810fa93db559f882567b9569dabfad5 (diff)
parente1fb7a248bb2d932a0bed6fc1d2d9e3d91b50e89 (diff)
Fix conflicts with upstream
-rw-r--r--CONTRIBUTING.md2
-rw-r--r--README.md2
-rw-r--r--RELEASE.md38
-rw-r--r--configure.py57
-rw-r--r--tensorflow/BUILD25
-rw-r--r--tensorflow/c/c_api.cc19
-rw-r--r--tensorflow/c/c_api_experimental.cc27
-rw-r--r--tensorflow/c/c_api_experimental.h14
-rw-r--r--tensorflow/c/c_api_test.cc65
-rw-r--r--tensorflow/c/c_test_util.cc7
-rw-r--r--tensorflow/c/c_test_util.h3
-rw-r--r--tensorflow/c/eager/c_api.cc38
-rw-r--r--tensorflow/c/eager/tape.h7
-rw-r--r--tensorflow/c/python_api.cc2
-rw-r--r--tensorflow/cc/framework/scope.cc30
-rw-r--r--tensorflow/cc/framework/scope_internal.h3
-rw-r--r--tensorflow/cc/framework/scope_test.cc10
-rw-r--r--tensorflow/cc/gradients/array_grad.cc52
-rw-r--r--tensorflow/cc/gradients/array_grad_test.cc7
-rw-r--r--tensorflow/cc/saved_model/BUILD30
-rw-r--r--tensorflow/cc/saved_model/loader.cc70
-rw-r--r--tensorflow/cc/saved_model/reader.cc88
-rw-r--r--tensorflow/cc/saved_model/reader.h39
-rw-r--r--tensorflow/cc/saved_model/reader_test.cc108
-rw-r--r--tensorflow/compiler/jit/BUILD2
-rw-r--r--tensorflow/compiler/jit/encapsulate_subgraphs_pass.cc14
-rw-r--r--tensorflow/compiler/jit/encapsulate_subgraphs_pass_test.cc7
-rw-r--r--tensorflow/compiler/jit/kernels/xla_launch_op.cc13
-rw-r--r--tensorflow/compiler/jit/xla_compile_on_demand_op.cc11
-rw-r--r--tensorflow/compiler/jit/xla_cpu_device.cc1
-rw-r--r--tensorflow/compiler/jit/xla_device.cc70
-rw-r--r--tensorflow/compiler/jit/xla_device.h22
-rw-r--r--tensorflow/compiler/jit/xla_device_context.cc242
-rw-r--r--tensorflow/compiler/jit/xla_device_context.h19
-rw-r--r--tensorflow/compiler/jit/xla_device_ops.h32
-rw-r--r--tensorflow/compiler/jit/xla_gpu_device.cc1
-rw-r--r--tensorflow/compiler/jit/xla_interpreter_device.cc1
-rw-r--r--tensorflow/compiler/jit/xla_launch_util.cc60
-rw-r--r--tensorflow/compiler/jit/xla_launch_util.h9
-rw-r--r--tensorflow/compiler/jit/xla_tensor.cc30
-rw-r--r--tensorflow/compiler/jit/xla_tensor.h26
-rw-r--r--tensorflow/compiler/tests/BUILD150
-rw-r--r--tensorflow/compiler/tests/adadelta_test.py134
-rw-r--r--tensorflow/compiler/tests/adagrad_da_test.py165
-rw-r--r--tensorflow/compiler/tests/adagrad_test.py4
-rw-r--r--tensorflow/compiler/tests/adam_test.py4
-rw-r--r--tensorflow/compiler/tests/adamax_test.py139
-rw-r--r--tensorflow/compiler/tests/addsign_test.py142
-rw-r--r--tensorflow/compiler/tests/binary_ops_test.py4
-rw-r--r--tensorflow/compiler/tests/bucketize_op_test.py4
-rw-r--r--tensorflow/compiler/tests/categorical_op_test.py4
-rw-r--r--tensorflow/compiler/tests/cholesky_op_test.py4
-rw-r--r--tensorflow/compiler/tests/clustering_test.py4
-rw-r--r--tensorflow/compiler/tests/concat_ops_test.py8
-rw-r--r--tensorflow/compiler/tests/conv2d_test.py11
-rw-r--r--tensorflow/compiler/tests/conv3d_test.py6
-rw-r--r--tensorflow/compiler/tests/depthwise_conv_op_test.py4
-rw-r--r--tensorflow/compiler/tests/dynamic_slice_ops_test.py4
-rw-r--r--tensorflow/compiler/tests/dynamic_stitch_test.py4
-rw-r--r--tensorflow/compiler/tests/eager_test.py56
-rw-r--r--tensorflow/compiler/tests/extract_image_patches_op_test.py4
-rw-r--r--tensorflow/compiler/tests/fake_quant_ops_test.py10
-rw-r--r--tensorflow/compiler/tests/fft_test.py10
-rw-r--r--tensorflow/compiler/tests/fifo_queue_test.py201
-rw-r--r--tensorflow/compiler/tests/ftrl_test.py4
-rw-r--r--tensorflow/compiler/tests/function_test.py4
-rw-r--r--tensorflow/compiler/tests/fused_batchnorm_test.py16
-rw-r--r--tensorflow/compiler/tests/gather_nd_op_test.py4
-rw-r--r--tensorflow/compiler/tests/gather_test.py14
-rw-r--r--tensorflow/compiler/tests/image_ops_test.py12
-rw-r--r--tensorflow/compiler/tests/lrn_ops_test.py4
-rw-r--r--tensorflow/compiler/tests/matrix_band_part_test.py4
-rw-r--r--tensorflow/compiler/tests/matrix_triangular_solve_op_test.py4
-rw-r--r--tensorflow/compiler/tests/momentum_test.py4
-rw-r--r--tensorflow/compiler/tests/nary_ops_test.py4
-rw-r--r--tensorflow/compiler/tests/nullary_ops_test.py4
-rw-r--r--tensorflow/compiler/tests/placeholder_test.py4
-rw-r--r--tensorflow/compiler/tests/pooling_ops_3d_test.py4
-rw-r--r--tensorflow/compiler/tests/pooling_ops_test.py6
-rw-r--r--tensorflow/compiler/tests/powersign_test.py142
-rw-r--r--tensorflow/compiler/tests/proximal_adagrad_test.py172
-rw-r--r--tensorflow/compiler/tests/proximal_gradient_descent_test.py156
-rw-r--r--tensorflow/compiler/tests/qr_op_test.py112
-rw-r--r--tensorflow/compiler/tests/random_ops_test.py8
-rw-r--r--tensorflow/compiler/tests/reduce_ops_test.py6
-rw-r--r--tensorflow/compiler/tests/reduce_window_test.py4
-rw-r--r--tensorflow/compiler/tests/reverse_ops_test.py4
-rw-r--r--tensorflow/compiler/tests/reverse_sequence_op_test.py4
-rw-r--r--tensorflow/compiler/tests/rmsprop_test.py121
-rw-r--r--tensorflow/compiler/tests/scan_ops_test.py6
-rw-r--r--tensorflow/compiler/tests/scatter_nd_op_test.py4
-rw-r--r--tensorflow/compiler/tests/segment_reduction_ops_test.py98
-rw-r--r--tensorflow/compiler/tests/slice_ops_test.py6
-rw-r--r--tensorflow/compiler/tests/sort_ops_test.py25
-rw-r--r--tensorflow/compiler/tests/spacetobatch_op_test.py6
-rw-r--r--tensorflow/compiler/tests/sparse_to_dense_op_test.py118
-rw-r--r--tensorflow/compiler/tests/stack_ops_test.py4
-rw-r--r--tensorflow/compiler/tests/stateless_random_ops_test.py4
-rw-r--r--tensorflow/compiler/tests/ternary_ops_test.py4
-rw-r--r--tensorflow/compiler/tests/unary_ops_test.py316
-rw-r--r--tensorflow/compiler/tests/variable_ops_test.py6
-rw-r--r--tensorflow/compiler/tests/while_test.py4
-rw-r--r--tensorflow/compiler/tests/xla_device_test.py4
-rw-r--r--tensorflow/compiler/tf2xla/BUILD16
-rw-r--r--tensorflow/compiler/tf2xla/functionalize_control_flow.cc35
-rw-r--r--tensorflow/compiler/tf2xla/functionalize_control_flow_test.cc56
-rw-r--r--tensorflow/compiler/tf2xla/graph_compiler.cc11
-rw-r--r--tensorflow/compiler/tf2xla/kernels/BUILD13
-rw-r--r--tensorflow/compiler/tf2xla/kernels/aggregate_ops.cc3
-rw-r--r--tensorflow/compiler/tf2xla/kernels/batch_matmul_op.cc5
-rw-r--r--tensorflow/compiler/tf2xla/kernels/batch_norm_op.cc55
-rw-r--r--tensorflow/compiler/tf2xla/kernels/batchtospace_op.cc11
-rw-r--r--tensorflow/compiler/tf2xla/kernels/bcast_ops.cc2
-rw-r--r--tensorflow/compiler/tf2xla/kernels/bias_ops.cc8
-rw-r--r--tensorflow/compiler/tf2xla/kernels/binary_ops.cc120
-rw-r--r--tensorflow/compiler/tf2xla/kernels/bucketize_op.cc21
-rw-r--r--tensorflow/compiler/tf2xla/kernels/cast_op.cc10
-rw-r--r--tensorflow/compiler/tf2xla/kernels/categorical_op.cc25
-rw-r--r--tensorflow/compiler/tf2xla/kernels/cholesky_op.cc7
-rw-r--r--tensorflow/compiler/tf2xla/kernels/clip_by_value_op.cc8
-rw-r--r--tensorflow/compiler/tf2xla/kernels/concat_op.cc5
-rw-r--r--tensorflow/compiler/tf2xla/kernels/const_op.cc31
-rw-r--r--tensorflow/compiler/tf2xla/kernels/conv_ops.cc61
-rw-r--r--tensorflow/compiler/tf2xla/kernels/cross_op.cc21
-rw-r--r--tensorflow/compiler/tf2xla/kernels/cwise_ops.cc18
-rw-r--r--tensorflow/compiler/tf2xla/kernels/depthtospace_op.cc8
-rw-r--r--tensorflow/compiler/tf2xla/kernels/diag_op.cc76
-rw-r--r--tensorflow/compiler/tf2xla/kernels/dynamic_slice_ops.cc4
-rw-r--r--tensorflow/compiler/tf2xla/kernels/dynamic_stitch_op.cc8
-rw-r--r--tensorflow/compiler/tf2xla/kernels/elu_op.cc30
-rw-r--r--tensorflow/compiler/tf2xla/kernels/extract_image_patches_op.cc16
-rw-r--r--tensorflow/compiler/tf2xla/kernels/fake_quantize_ops.cc59
-rw-r--r--tensorflow/compiler/tf2xla/kernels/fft_ops.cc4
-rw-r--r--tensorflow/compiler/tf2xla/kernels/fill_op.cc5
-rw-r--r--tensorflow/compiler/tf2xla/kernels/gather_op.cc7
-rw-r--r--tensorflow/compiler/tf2xla/kernels/if_op.cc11
-rw-r--r--tensorflow/compiler/tf2xla/kernels/image_ops.cc159
-rw-r--r--tensorflow/compiler/tf2xla/kernels/image_resize_ops.cc95
-rw-r--r--tensorflow/compiler/tf2xla/kernels/index_ops.cc12
-rw-r--r--tensorflow/compiler/tf2xla/kernels/index_ops_cpu.cc18
-rw-r--r--tensorflow/compiler/tf2xla/kernels/l2loss_op.cc8
-rw-r--r--tensorflow/compiler/tf2xla/kernels/listdiff_op.cc7
-rw-r--r--tensorflow/compiler/tf2xla/kernels/lrn_ops.cc39
-rw-r--r--tensorflow/compiler/tf2xla/kernels/matmul_op.cc11
-rw-r--r--tensorflow/compiler/tf2xla/kernels/matrix_band_part_op.cc37
-rw-r--r--tensorflow/compiler/tf2xla/kernels/matrix_set_diag_op.cc21
-rw-r--r--tensorflow/compiler/tf2xla/kernels/matrix_triangular_solve_op.cc8
-rw-r--r--tensorflow/compiler/tf2xla/kernels/mirror_pad_op.cc11
-rw-r--r--tensorflow/compiler/tf2xla/kernels/pack_op.cc6
-rw-r--r--tensorflow/compiler/tf2xla/kernels/pad_op.cc6
-rw-r--r--tensorflow/compiler/tf2xla/kernels/pooling_ops.cc101
-rw-r--r--tensorflow/compiler/tf2xla/kernels/qr_op.cc47
-rw-r--r--tensorflow/compiler/tf2xla/kernels/quantize_and_dequantize_op.cc138
-rw-r--r--tensorflow/compiler/tf2xla/kernels/random_ops.cc178
-rw-r--r--tensorflow/compiler/tf2xla/kernels/reduce_window_op.cc11
-rw-r--r--tensorflow/compiler/tf2xla/kernels/reduction_ops.cc34
-rw-r--r--tensorflow/compiler/tf2xla/kernels/reduction_ops.h1
-rw-r--r--tensorflow/compiler/tf2xla/kernels/reduction_ops_common.cc17
-rw-r--r--tensorflow/compiler/tf2xla/kernels/relu_op.cc22
-rw-r--r--tensorflow/compiler/tf2xla/kernels/reshape_op.cc6
-rw-r--r--tensorflow/compiler/tf2xla/kernels/retval_op.cc26
-rw-r--r--tensorflow/compiler/tf2xla/kernels/reverse_op.cc7
-rw-r--r--tensorflow/compiler/tf2xla/kernels/reverse_sequence_op.cc105
-rw-r--r--tensorflow/compiler/tf2xla/kernels/scan_ops.cc11
-rw-r--r--tensorflow/compiler/tf2xla/kernels/scatter_nd_op.cc5
-rw-r--r--tensorflow/compiler/tf2xla/kernels/segment_reduction_ops.cc108
-rw-r--r--tensorflow/compiler/tf2xla/kernels/select_op.cc9
-rw-r--r--tensorflow/compiler/tf2xla/kernels/sendrecv_ops.cc4
-rw-r--r--tensorflow/compiler/tf2xla/kernels/sequence_ops.cc2
-rw-r--r--tensorflow/compiler/tf2xla/kernels/shape_op.cc9
-rw-r--r--tensorflow/compiler/tf2xla/kernels/slice_op.cc7
-rw-r--r--tensorflow/compiler/tf2xla/kernels/softmax_op.cc82
-rw-r--r--tensorflow/compiler/tf2xla/kernels/sort_ops.cc3
-rw-r--r--tensorflow/compiler/tf2xla/kernels/spacetobatch_op.cc9
-rw-r--r--tensorflow/compiler/tf2xla/kernels/spacetodepth_op.cc8
-rw-r--r--tensorflow/compiler/tf2xla/kernels/sparse_to_dense_op.cc88
-rw-r--r--tensorflow/compiler/tf2xla/kernels/split_op.cc7
-rw-r--r--tensorflow/compiler/tf2xla/kernels/stack_ops.cc35
-rw-r--r--tensorflow/compiler/tf2xla/kernels/stateless_random_ops.cc185
-rw-r--r--tensorflow/compiler/tf2xla/kernels/strided_slice_op.cc21
-rw-r--r--tensorflow/compiler/tf2xla/kernels/tensor_array_ops.cc44
-rw-r--r--tensorflow/compiler/tf2xla/kernels/tile_ops.cc10
-rw-r--r--tensorflow/compiler/tf2xla/kernels/topk_op.cc112
-rw-r--r--tensorflow/compiler/tf2xla/kernels/training_ops.cc579
-rw-r--r--tensorflow/compiler/tf2xla/kernels/transpose_op.cc7
-rw-r--r--tensorflow/compiler/tf2xla/kernels/unary_ops.cc248
-rw-r--r--tensorflow/compiler/tf2xla/kernels/unpack_op.cc8
-rw-r--r--tensorflow/compiler/tf2xla/kernels/variable_ops.cc24
-rw-r--r--tensorflow/compiler/tf2xla/kernels/while_op.cc17
-rw-r--r--tensorflow/compiler/tf2xla/legacy_flags/backend_registration_flags.cc63
-rw-r--r--tensorflow/compiler/tf2xla/legacy_flags/backend_registration_flags.h49
-rw-r--r--tensorflow/compiler/tf2xla/lib/BUILD39
-rw-r--r--tensorflow/compiler/tf2xla/lib/batch_dot.cc166
-rw-r--r--tensorflow/compiler/tf2xla/lib/batch_dot.h7
-rw-r--r--tensorflow/compiler/tf2xla/lib/cholesky.cc317
-rw-r--r--tensorflow/compiler/tf2xla/lib/cholesky.h3
-rw-r--r--tensorflow/compiler/tf2xla/lib/qr.cc387
-rw-r--r--tensorflow/compiler/tf2xla/lib/qr.h40
-rw-r--r--tensorflow/compiler/tf2xla/lib/random.cc28
-rw-r--r--tensorflow/compiler/tf2xla/lib/random.h6
-rw-r--r--tensorflow/compiler/tf2xla/lib/scatter.cc52
-rw-r--r--tensorflow/compiler/tf2xla/lib/triangular_solve.cc1104
-rw-r--r--tensorflow/compiler/tf2xla/lib/triangular_solve.h22
-rw-r--r--tensorflow/compiler/tf2xla/lib/triangular_solve_test.cc106
-rw-r--r--tensorflow/compiler/tf2xla/lib/util.cc265
-rw-r--r--tensorflow/compiler/tf2xla/lib/util.h54
-rw-r--r--tensorflow/compiler/tf2xla/lib/util_test.cc24
-rw-r--r--tensorflow/compiler/tf2xla/lib/while_loop.cc27
-rw-r--r--tensorflow/compiler/tf2xla/literal_util.cc2
-rw-r--r--tensorflow/compiler/tf2xla/literal_util.h2
-rw-r--r--tensorflow/compiler/tf2xla/literal_util_test.cc5
-rw-r--r--tensorflow/compiler/tf2xla/tf2xla_test.cc5
-rw-r--r--tensorflow/compiler/tf2xla/xla_compiler.cc102
-rw-r--r--tensorflow/compiler/tf2xla/xla_compiler.h9
-rw-r--r--tensorflow/compiler/tf2xla/xla_compiler_test.cc77
-rw-r--r--tensorflow/compiler/tf2xla/xla_context.cc42
-rw-r--r--tensorflow/compiler/tf2xla/xla_context.h11
-rw-r--r--tensorflow/compiler/tf2xla/xla_cpu_backend.cc4
-rw-r--r--tensorflow/compiler/tf2xla/xla_gpu_backend.cc13
-rw-r--r--tensorflow/compiler/tf2xla/xla_helpers.cc198
-rw-r--r--tensorflow/compiler/tf2xla/xla_helpers.h40
-rw-r--r--tensorflow/compiler/tf2xla/xla_op_kernel.cc106
-rw-r--r--tensorflow/compiler/tf2xla/xla_op_kernel.h48
-rw-r--r--tensorflow/compiler/tf2xla/xla_op_registry.cc8
-rw-r--r--tensorflow/compiler/tf2xla/xla_op_registry.h2
-rw-r--r--tensorflow/compiler/tf2xla/xla_op_registry_test.cc33
-rw-r--r--tensorflow/compiler/tf2xla/xla_resource.cc25
-rw-r--r--tensorflow/compiler/xla/BUILD65
-rw-r--r--tensorflow/compiler/xla/client/BUILD2
-rw-r--r--tensorflow/compiler/xla/client/client.cc2
-rw-r--r--tensorflow/compiler/xla/client/client.h2
-rw-r--r--tensorflow/compiler/xla/client/lib/BUILD110
-rw-r--r--tensorflow/compiler/xla/client/lib/arithmetic.cc180
-rw-r--r--tensorflow/compiler/xla/client/lib/arithmetic.h17
-rw-r--r--tensorflow/compiler/xla/client/lib/constants.cc103
-rw-r--r--tensorflow/compiler/xla/client/lib/constants.h124
-rw-r--r--tensorflow/compiler/xla/client/lib/constants_test.cc159
-rw-r--r--tensorflow/compiler/xla/client/lib/math.cc272
-rw-r--r--tensorflow/compiler/xla/client/lib/math.h57
-rw-r--r--tensorflow/compiler/xla/client/lib/math_test.cc140
-rw-r--r--tensorflow/compiler/xla/client/lib/numeric.cc104
-rw-r--r--tensorflow/compiler/xla/client/lib/numeric.h37
-rw-r--r--tensorflow/compiler/xla/client/lib/numeric_test.cc37
-rw-r--r--tensorflow/compiler/xla/client/lib/prng.cc150
-rw-r--r--tensorflow/compiler/xla/client/lib/prng.h34
-rw-r--r--tensorflow/compiler/xla/client/lib/testing.cc8
-rw-r--r--tensorflow/compiler/xla/client/xla_client/BUILD7
-rw-r--r--tensorflow/compiler/xla/client/xla_client/xla_builder.cc1021
-rw-r--r--tensorflow/compiler/xla/client/xla_client/xla_builder.h1432
-rw-r--r--tensorflow/compiler/xla/client/xla_client/xla_builder_test.cc172
-rw-r--r--tensorflow/compiler/xla/layout_util.cc6
-rw-r--r--tensorflow/compiler/xla/literal.cc1967
-rw-r--r--tensorflow/compiler/xla/literal.h1152
-rw-r--r--tensorflow/compiler/xla/literal_comparison.cc5
-rw-r--r--tensorflow/compiler/xla/literal_comparison.h2
-rw-r--r--tensorflow/compiler/xla/literal_test.cc (renamed from tensorflow/compiler/xla/literal_util_test.cc)540
-rw-r--r--tensorflow/compiler/xla/literal_util.cc2111
-rw-r--r--tensorflow/compiler/xla/literal_util.h1171
-rw-r--r--tensorflow/compiler/xla/overflow_util.h50
-rw-r--r--tensorflow/compiler/xla/packed_literal_reader.cc2
-rw-r--r--tensorflow/compiler/xla/packed_literal_reader.h2
-rw-r--r--tensorflow/compiler/xla/python/BUILD5
-rw-r--r--tensorflow/compiler/xla/python/local_computation_builder.cc229
-rw-r--r--tensorflow/compiler/xla/python/local_computation_builder.h10
-rw-r--r--tensorflow/compiler/xla/python/local_computation_builder.i9
-rw-r--r--tensorflow/compiler/xla/python/numpy_bridge.cc5
-rw-r--r--tensorflow/compiler/xla/python/numpy_bridge.h2
-rw-r--r--tensorflow/compiler/xla/python/xla_client.py20
-rw-r--r--tensorflow/compiler/xla/python/xla_client_test.py15
-rw-r--r--tensorflow/compiler/xla/reference_util.cc5
-rw-r--r--tensorflow/compiler/xla/reference_util_test.cc46
-rw-r--r--tensorflow/compiler/xla/rpc/grpc_client_test.cc16
-rw-r--r--tensorflow/compiler/xla/service/BUILD119
-rw-r--r--tensorflow/compiler/xla/service/algebraic_simplifier.cc180
-rw-r--r--tensorflow/compiler/xla/service/algebraic_simplifier_test.cc351
-rw-r--r--tensorflow/compiler/xla/service/batchnorm_expander.cc56
-rw-r--r--tensorflow/compiler/xla/service/batchnorm_expander_test.cc31
-rw-r--r--tensorflow/compiler/xla/service/bfloat16_propagation.cc156
-rw-r--r--tensorflow/compiler/xla/service/bfloat16_propagation.h14
-rw-r--r--tensorflow/compiler/xla/service/bfloat16_propagation_test.cc70
-rw-r--r--tensorflow/compiler/xla/service/bfloat16_support.cc1
-rw-r--r--tensorflow/compiler/xla/service/buffer_assignment.cc19
-rw-r--r--tensorflow/compiler/xla/service/buffer_assignment_test.cc152
-rw-r--r--tensorflow/compiler/xla/service/buffer_liveness_test.cc29
-rw-r--r--tensorflow/compiler/xla/service/call_graph_test.cc10
-rw-r--r--tensorflow/compiler/xla/service/call_inliner.cc9
-rw-r--r--tensorflow/compiler/xla/service/call_inliner_test.cc22
-rw-r--r--tensorflow/compiler/xla/service/computation_placer.cc2
-rw-r--r--tensorflow/compiler/xla/service/conditional_simplifier.cc2
-rw-r--r--tensorflow/compiler/xla/service/conditional_simplifier_test.cc23
-rw-r--r--tensorflow/compiler/xla/service/copy_insertion.cc26
-rw-r--r--tensorflow/compiler/xla/service/copy_insertion.h8
-rw-r--r--tensorflow/compiler/xla/service/copy_insertion_test.cc146
-rw-r--r--tensorflow/compiler/xla/service/cpu/BUILD40
-rw-r--r--tensorflow/compiler/xla/service/cpu/conv_canonicalization_test.cc8
-rw-r--r--tensorflow/compiler/xla/service/cpu/cpu_compiler.cc35
-rw-r--r--tensorflow/compiler/xla/service/cpu/cpu_copy_insertion_test.cc8
-rw-r--r--tensorflow/compiler/xla/service/cpu/cpu_instruction_fusion_test.cc12
-rw-r--r--tensorflow/compiler/xla/service/cpu/cpu_layout_assignment_test.cc2
-rw-r--r--tensorflow/compiler/xla/service/cpu/cpu_transfer_manager.cc7
-rw-r--r--tensorflow/compiler/xla/service/cpu/cpu_transfer_manager.h5
-rw-r--r--tensorflow/compiler/xla/service/cpu/external_constant_pool.cc50
-rw-r--r--tensorflow/compiler/xla/service/cpu/external_constant_pool.h65
-rw-r--r--tensorflow/compiler/xla/service/cpu/external_constant_pool_test.cc82
-rw-r--r--tensorflow/compiler/xla/service/cpu/ir_emitter.cc724
-rw-r--r--tensorflow/compiler/xla/service/cpu/ir_emitter.h25
-rw-r--r--tensorflow/compiler/xla/service/cpu/parallel_task_assignment_test.cc5
-rw-r--r--tensorflow/compiler/xla/service/cpu/sample_harness.cc15
-rw-r--r--tensorflow/compiler/xla/service/cpu/simple_orc_jit.cc7
-rw-r--r--tensorflow/compiler/xla/service/cpu/simple_orc_jit.h6
-rw-r--r--tensorflow/compiler/xla/service/cpu/tests/BUILD6
-rw-r--r--tensorflow/compiler/xla/service/cpu/tests/cpu_codegen_test.h2
-rw-r--r--tensorflow/compiler/xla/service/cpu/tests/cpu_external_constants_test.cc7
-rw-r--r--tensorflow/compiler/xla/service/cpu/tests/cpu_fusion_test.cc42
-rw-r--r--tensorflow/compiler/xla/service/cpu/tests/cpu_infeed_test.cc130
-rw-r--r--tensorflow/compiler/xla/service/cpu/tests/cpu_literal_caching_test.cc22
-rw-r--r--tensorflow/compiler/xla/service/cpu/tests/cpu_noalias_test.cc4
-rw-r--r--tensorflow/compiler/xla/service/cpu/tests/cpu_outfeed_test.cc5
-rw-r--r--tensorflow/compiler/xla/service/defuser_test.cc6
-rw-r--r--tensorflow/compiler/xla/service/dfs_hlo_visitor.h5
-rw-r--r--tensorflow/compiler/xla/service/dfs_hlo_visitor_with_default.h7
-rw-r--r--tensorflow/compiler/xla/service/elemental_ir_emitter.cc116
-rw-r--r--tensorflow/compiler/xla/service/elemental_ir_emitter_test.cc4
-rw-r--r--tensorflow/compiler/xla/service/flatten_call_graph_test.cc14
-rw-r--r--tensorflow/compiler/xla/service/gather_expander.cc3
-rw-r--r--tensorflow/compiler/xla/service/generic_transfer_manager.cc10
-rw-r--r--tensorflow/compiler/xla/service/generic_transfer_manager.h3
-rw-r--r--tensorflow/compiler/xla/service/gpu/BUILD46
-rw-r--r--tensorflow/compiler/xla/service/gpu/conditional_thunk.cc21
-rw-r--r--tensorflow/compiler/xla/service/gpu/conditional_thunk.h4
-rw-r--r--tensorflow/compiler/xla/service/gpu/convolution_thunk.cc5
-rw-r--r--tensorflow/compiler/xla/service/gpu/convolution_thunk.h4
-rw-r--r--tensorflow/compiler/xla/service/gpu/copy_thunk.cc9
-rw-r--r--tensorflow/compiler/xla/service/gpu/copy_thunk.h7
-rw-r--r--tensorflow/compiler/xla/service/gpu/cudnn_batchnorm_rewriter.cc44
-rw-r--r--tensorflow/compiler/xla/service/gpu/cudnn_batchnorm_thunk.cc14
-rw-r--r--tensorflow/compiler/xla/service/gpu/cudnn_batchnorm_thunk.h10
-rw-r--r--tensorflow/compiler/xla/service/gpu/cudnn_convolution_algorithm_picker.cc8
-rw-r--r--tensorflow/compiler/xla/service/gpu/cudnn_convolution_rewriter.cc2
-rw-r--r--tensorflow/compiler/xla/service/gpu/elemental_ir_emitter.cc2
-rw-r--r--tensorflow/compiler/xla/service/gpu/fft_thunk.cc5
-rw-r--r--tensorflow/compiler/xla/service/gpu/fft_thunk.h4
-rw-r--r--tensorflow/compiler/xla/service/gpu/for_thunk.cc18
-rw-r--r--tensorflow/compiler/xla/service/gpu/for_thunk.h4
-rw-r--r--tensorflow/compiler/xla/service/gpu/gemm_thunk.cc4
-rw-r--r--tensorflow/compiler/xla/service/gpu/gemm_thunk.h4
-rw-r--r--tensorflow/compiler/xla/service/gpu/gpu_compiler.cc35
-rw-r--r--tensorflow/compiler/xla/service/gpu/gpu_executable.cc5
-rw-r--r--tensorflow/compiler/xla/service/gpu/gpu_layout_assignment.cc2
-rw-r--r--tensorflow/compiler/xla/service/gpu/gpu_layout_assignment_test.cc12
-rw-r--r--tensorflow/compiler/xla/service/gpu/gpu_transfer_manager.cc148
-rw-r--r--tensorflow/compiler/xla/service/gpu/gpu_transfer_manager.h14
-rw-r--r--tensorflow/compiler/xla/service/gpu/hlo_execution_profiler.cc73
-rw-r--r--tensorflow/compiler/xla/service/gpu/hlo_execution_profiler.h52
-rw-r--r--tensorflow/compiler/xla/service/gpu/hlo_schedule.cc2
-rw-r--r--tensorflow/compiler/xla/service/gpu/hlo_to_ir_bindings.cc2
-rw-r--r--tensorflow/compiler/xla/service/gpu/infeed_manager.cc69
-rw-r--r--tensorflow/compiler/xla/service/gpu/infeed_manager.h82
-rw-r--r--tensorflow/compiler/xla/service/gpu/infeed_thunk.cc102
-rw-r--r--tensorflow/compiler/xla/service/gpu/infeed_thunk.h15
-rw-r--r--tensorflow/compiler/xla/service/gpu/instruction_fusion_test.cc8
-rw-r--r--tensorflow/compiler/xla/service/gpu/ir_emission_utils.cc19
-rw-r--r--tensorflow/compiler/xla/service/gpu/ir_emission_utils.h16
-rw-r--r--tensorflow/compiler/xla/service/gpu/ir_emitter.cc42
-rw-r--r--tensorflow/compiler/xla/service/gpu/ir_emitter.h1
-rw-r--r--tensorflow/compiler/xla/service/gpu/ir_emitter_unnested.cc1152
-rw-r--r--tensorflow/compiler/xla/service/gpu/ir_emitter_unnested.h65
-rw-r--r--tensorflow/compiler/xla/service/gpu/kernel_thunk.cc5
-rw-r--r--tensorflow/compiler/xla/service/gpu/kernel_thunk.h4
-rw-r--r--tensorflow/compiler/xla/service/gpu/llvm_gpu_backend/BUILD1
-rw-r--r--tensorflow/compiler/xla/service/gpu/llvm_gpu_backend/gpu_backend_lib.cc2
-rw-r--r--tensorflow/compiler/xla/service/gpu/memset_thunk.cc10
-rw-r--r--tensorflow/compiler/xla/service/gpu/memset_thunk.h7
-rw-r--r--tensorflow/compiler/xla/service/gpu/multi_output_fusion.cc128
-rw-r--r--tensorflow/compiler/xla/service/gpu/multi_output_fusion.h3
-rw-r--r--tensorflow/compiler/xla/service/gpu/multi_output_fusion_test.cc94
-rw-r--r--tensorflow/compiler/xla/service/gpu/outfeed_manager.cc32
-rw-r--r--tensorflow/compiler/xla/service/gpu/outfeed_manager.h69
-rw-r--r--tensorflow/compiler/xla/service/gpu/outfeed_thunk.cc111
-rw-r--r--tensorflow/compiler/xla/service/gpu/outfeed_thunk.h52
-rw-r--r--tensorflow/compiler/xla/service/gpu/pad_insertion.cc11
-rw-r--r--tensorflow/compiler/xla/service/gpu/sequential_thunk.cc8
-rw-r--r--tensorflow/compiler/xla/service/gpu/sequential_thunk.h4
-rw-r--r--tensorflow/compiler/xla/service/gpu/stream_executor_util.h1
-rw-r--r--tensorflow/compiler/xla/service/gpu/thunk.h7
-rw-r--r--tensorflow/compiler/xla/service/gpu/tuple_thunk.cc5
-rw-r--r--tensorflow/compiler/xla/service/gpu/tuple_thunk.h4
-rw-r--r--tensorflow/compiler/xla/service/gpu/while_thunk.cc32
-rw-r--r--tensorflow/compiler/xla/service/gpu/while_thunk.h4
-rw-r--r--tensorflow/compiler/xla/service/gpu/while_transformer.cc2
-rw-r--r--tensorflow/compiler/xla/service/gpu/while_transformer_test.cc16
-rw-r--r--tensorflow/compiler/xla/service/gpu/xfeed_queue.h89
-rw-r--r--tensorflow/compiler/xla/service/graphviz_example.cc5
-rw-r--r--tensorflow/compiler/xla/service/heap_simulator_test.cc6
-rw-r--r--tensorflow/compiler/xla/service/hlo_alias_analysis_test.cc81
-rw-r--r--tensorflow/compiler/xla/service/hlo_computation.cc141
-rw-r--r--tensorflow/compiler/xla/service/hlo_computation.h24
-rw-r--r--tensorflow/compiler/xla/service/hlo_computation_test.cc86
-rw-r--r--tensorflow/compiler/xla/service/hlo_constant_folding.cc10
-rw-r--r--tensorflow/compiler/xla/service/hlo_constant_folding_test.cc16
-rw-r--r--tensorflow/compiler/xla/service/hlo_cost_analysis.cc8
-rw-r--r--tensorflow/compiler/xla/service/hlo_cost_analysis.h5
-rw-r--r--tensorflow/compiler/xla/service/hlo_cost_analysis_test.cc143
-rw-r--r--tensorflow/compiler/xla/service/hlo_creation_utils.cc9
-rw-r--r--tensorflow/compiler/xla/service/hlo_creation_utils_test.cc46
-rw-r--r--tensorflow/compiler/xla/service/hlo_cse.cc8
-rw-r--r--tensorflow/compiler/xla/service/hlo_cse_test.cc97
-rw-r--r--tensorflow/compiler/xla/service/hlo_dataflow_analysis.cc62
-rw-r--r--tensorflow/compiler/xla/service/hlo_dataflow_analysis.h3
-rw-r--r--tensorflow/compiler/xla/service/hlo_dataflow_analysis_test.cc193
-rw-r--r--tensorflow/compiler/xla/service/hlo_dce_test.cc37
-rw-r--r--tensorflow/compiler/xla/service/hlo_domain_map.cc13
-rw-r--r--tensorflow/compiler/xla/service/hlo_domain_map.h4
-rw-r--r--tensorflow/compiler/xla/service/hlo_domain_metadata.h6
-rw-r--r--tensorflow/compiler/xla/service/hlo_domain_remover.cc48
-rw-r--r--tensorflow/compiler/xla/service/hlo_domain_remover.h11
-rw-r--r--tensorflow/compiler/xla/service/hlo_domain_test.cc164
-rw-r--r--tensorflow/compiler/xla/service/hlo_domain_verifier.cc124
-rw-r--r--tensorflow/compiler/xla/service/hlo_domain_verifier.h65
-rw-r--r--tensorflow/compiler/xla/service/hlo_element_type_converter.cc2
-rw-r--r--tensorflow/compiler/xla/service/hlo_element_type_converter_test.cc6
-rw-r--r--tensorflow/compiler/xla/service/hlo_evaluator.cc180
-rw-r--r--tensorflow/compiler/xla/service/hlo_evaluator.h11
-rw-r--r--tensorflow/compiler/xla/service/hlo_evaluator_test.cc395
-rw-r--r--tensorflow/compiler/xla/service/hlo_evaluator_typed_visitor.h213
-rw-r--r--tensorflow/compiler/xla/service/hlo_graph_dumper.cc7
-rw-r--r--tensorflow/compiler/xla/service/hlo_graph_dumper_test.cc3
-rw-r--r--tensorflow/compiler/xla/service/hlo_instruction.cc307
-rw-r--r--tensorflow/compiler/xla/service/hlo_instruction.h95
-rw-r--r--tensorflow/compiler/xla/service/hlo_instruction_test.cc127
-rw-r--r--tensorflow/compiler/xla/service/hlo_instructions.cc276
-rw-r--r--tensorflow/compiler/xla/service/hlo_instructions.h111
-rw-r--r--tensorflow/compiler/xla/service/hlo_liveness_analysis_test.cc2
-rw-r--r--tensorflow/compiler/xla/service/hlo_matchers.h3
-rw-r--r--tensorflow/compiler/xla/service/hlo_matchers_test.cc7
-rw-r--r--tensorflow/compiler/xla/service/hlo_module.cc5
-rw-r--r--tensorflow/compiler/xla/service/hlo_module_group_metadata.cc50
-rw-r--r--tensorflow/compiler/xla/service/hlo_module_group_metadata.h16
-rw-r--r--tensorflow/compiler/xla/service/hlo_module_group_util.cc26
-rw-r--r--tensorflow/compiler/xla/service/hlo_module_test.cc8
-rw-r--r--tensorflow/compiler/xla/service/hlo_opcode.h3
-rw-r--r--tensorflow/compiler/xla/service/hlo_opcode_test.cc2
-rw-r--r--tensorflow/compiler/xla/service/hlo_ordering_test.cc6
-rw-r--r--tensorflow/compiler/xla/service/hlo_parser.cc119
-rw-r--r--tensorflow/compiler/xla/service/hlo_parser_test.cc110
-rw-r--r--tensorflow/compiler/xla/service/hlo_query.cc2
-rw-r--r--tensorflow/compiler/xla/service/hlo_reachability_test.cc10
-rw-r--r--tensorflow/compiler/xla/service/hlo_rematerialization.cc2
-rw-r--r--tensorflow/compiler/xla/service/hlo_rematerialization_test.cc8
-rw-r--r--tensorflow/compiler/xla/service/hlo_scheduling_test.cc40
-rw-r--r--tensorflow/compiler/xla/service/hlo_sharding.cc21
-rw-r--r--tensorflow/compiler/xla/service/hlo_sharding.h11
-rw-r--r--tensorflow/compiler/xla/service/hlo_sharding_metadata.cc72
-rw-r--r--tensorflow/compiler/xla/service/hlo_sharding_metadata.h23
-rw-r--r--tensorflow/compiler/xla/service/hlo_sharding_test.cc2
-rw-r--r--tensorflow/compiler/xla/service/hlo_subcomputation_unification_test.cc6
-rw-r--r--tensorflow/compiler/xla/service/hlo_tfgraph_builder.cc2
-rw-r--r--tensorflow/compiler/xla/service/hlo_tfgraph_builder_test.cc2
-rw-r--r--tensorflow/compiler/xla/service/hlo_value.cc2
-rw-r--r--tensorflow/compiler/xla/service/hlo_verifier.cc216
-rw-r--r--tensorflow/compiler/xla/service/hlo_verifier.h5
-rw-r--r--tensorflow/compiler/xla/service/hlo_verifier_test.cc51
-rw-r--r--tensorflow/compiler/xla/service/implicit_broadcast_remover_test.cc2
-rw-r--r--tensorflow/compiler/xla/service/indexed_array_analysis.cc180
-rw-r--r--tensorflow/compiler/xla/service/indexed_array_analysis.h12
-rw-r--r--tensorflow/compiler/xla/service/indexed_array_analysis_test.cc165
-rw-r--r--tensorflow/compiler/xla/service/inliner_test.cc28
-rw-r--r--tensorflow/compiler/xla/service/instruction_fusion.cc3
-rw-r--r--tensorflow/compiler/xla/service/instruction_fusion_test.cc18
-rw-r--r--tensorflow/compiler/xla/service/interpreter/BUILD2
-rw-r--r--tensorflow/compiler/xla/service/interpreter/executable.cc2
-rw-r--r--tensorflow/compiler/xla/service/layout_assignment.cc26
-rw-r--r--tensorflow/compiler/xla/service/layout_assignment_test.cc43
-rw-r--r--tensorflow/compiler/xla/service/llvm_ir/BUILD39
-rw-r--r--tensorflow/compiler/xla/service/llvm_ir/alias_analysis.cc7
-rw-r--r--tensorflow/compiler/xla/service/llvm_ir/alias_analysis_test.cc83
-rw-r--r--tensorflow/compiler/xla/service/llvm_ir/fused_ir_emitter.cc19
-rw-r--r--tensorflow/compiler/xla/service/llvm_ir/fused_ir_emitter.h7
-rw-r--r--tensorflow/compiler/xla/service/llvm_ir/ir_array.cc4
-rw-r--r--tensorflow/compiler/xla/service/llvm_ir/ir_array.h12
-rw-r--r--tensorflow/compiler/xla/service/llvm_ir/kernel_support_library.cc5
-rw-r--r--tensorflow/compiler/xla/service/llvm_ir/kernel_support_library.h18
-rw-r--r--tensorflow/compiler/xla/service/llvm_ir/kernel_tiling.cc118
-rw-r--r--tensorflow/compiler/xla/service/llvm_ir/kernel_tiling.h80
-rw-r--r--tensorflow/compiler/xla/service/llvm_ir/llvm_util.cc15
-rw-r--r--tensorflow/compiler/xla/service/llvm_ir/llvm_util.h2
-rw-r--r--tensorflow/compiler/xla/service/logical_buffer_analysis.cc21
-rw-r--r--tensorflow/compiler/xla/service/logical_buffer_analysis.h2
-rw-r--r--tensorflow/compiler/xla/service/multi_output_fusion.cc25
-rw-r--r--tensorflow/compiler/xla/service/multi_output_fusion.h8
-rw-r--r--tensorflow/compiler/xla/service/name_uniquer.cc17
-rw-r--r--tensorflow/compiler/xla/service/name_uniquer.h36
-rw-r--r--tensorflow/compiler/xla/service/name_uniquer_test.cc29
-rw-r--r--tensorflow/compiler/xla/service/pattern_matcher.h78
-rw-r--r--tensorflow/compiler/xla/service/pattern_matcher_test.cc18
-rw-r--r--tensorflow/compiler/xla/service/platform_util.cc13
-rw-r--r--tensorflow/compiler/xla/service/reshape_mover.cc2
-rw-r--r--tensorflow/compiler/xla/service/reshape_mover_test.cc15
-rw-r--r--tensorflow/compiler/xla/service/shape_inference.cc68
-rw-r--r--tensorflow/compiler/xla/service/shape_inference.h14
-rw-r--r--tensorflow/compiler/xla/service/shape_inference_test.cc122
-rw-r--r--tensorflow/compiler/xla/service/transfer_manager.cc2
-rw-r--r--tensorflow/compiler/xla/service/transfer_manager.h12
-rw-r--r--tensorflow/compiler/xla/service/transpose_folding_test.cc8
-rw-r--r--tensorflow/compiler/xla/service/tuple_points_to_analysis.cc39
-rw-r--r--tensorflow/compiler/xla/service/tuple_points_to_analysis.h2
-rw-r--r--tensorflow/compiler/xla/service/tuple_points_to_analysis_test.cc97
-rw-r--r--tensorflow/compiler/xla/service/tuple_simplifier_test.cc2
-rw-r--r--tensorflow/compiler/xla/service/while_loop_invariant_code_motion_test.cc43
-rw-r--r--tensorflow/compiler/xla/service/while_loop_simplifier_test.cc14
-rw-r--r--tensorflow/compiler/xla/service/while_util.cc9
-rw-r--r--tensorflow/compiler/xla/service/while_util_test.cc4
-rw-r--r--tensorflow/compiler/xla/service/zero_sized_hlo_elimination.cc5
-rw-r--r--tensorflow/compiler/xla/service/zero_sized_hlo_elimination_test.cc13
-rw-r--r--tensorflow/compiler/xla/shape_layout.cc8
-rw-r--r--tensorflow/compiler/xla/shape_layout.h4
-rw-r--r--tensorflow/compiler/xla/shape_util.cc150
-rw-r--r--tensorflow/compiler/xla/shape_util.h50
-rw-r--r--tensorflow/compiler/xla/shape_util_test.cc52
-rw-r--r--tensorflow/compiler/xla/statusor.h286
-rw-r--r--tensorflow/compiler/xla/tests/BUILD76
-rw-r--r--tensorflow/compiler/xla/tests/array_elementwise_ops_test.cc1445
-rw-r--r--tensorflow/compiler/xla/tests/axpy_simple_test.cc32
-rw-r--r--tensorflow/compiler/xla/tests/bad_rng_shape_validation_test.cc12
-rw-r--r--tensorflow/compiler/xla/tests/batch_normalization_test.cc279
-rw-r--r--tensorflow/compiler/xla/tests/bfloat16_test.cc67
-rw-r--r--tensorflow/compiler/xla/tests/binop_scaling_test.cc41
-rw-r--r--tensorflow/compiler/xla/tests/bitcast_convert_test.cc52
-rw-r--r--tensorflow/compiler/xla/tests/broadcast_simple_test.cc320
-rw-r--r--tensorflow/compiler/xla/tests/broadcast_test.cc79
-rw-r--r--tensorflow/compiler/xla/tests/call_test.cc64
-rw-r--r--tensorflow/compiler/xla/tests/check_execution_arity_test.cc22
-rw-r--r--tensorflow/compiler/xla/tests/client_library_test_base.cc44
-rw-r--r--tensorflow/compiler/xla/tests/client_library_test_base.h66
-rw-r--r--tensorflow/compiler/xla/tests/client_test.cc22
-rw-r--r--tensorflow/compiler/xla/tests/compilation_cache_test.cc30
-rw-r--r--tensorflow/compiler/xla/tests/compute_constant_test.cc48
-rw-r--r--tensorflow/compiler/xla/tests/concat_test.cc246
-rw-r--r--tensorflow/compiler/xla/tests/conditional_test.cc480
-rw-r--r--tensorflow/compiler/xla/tests/constants_test.cc53
-rw-r--r--tensorflow/compiler/xla/tests/convert_test.cc149
-rw-r--r--tensorflow/compiler/xla/tests/convolution_dimension_numbers_test.cc12
-rw-r--r--tensorflow/compiler/xla/tests/convolution_test.cc167
-rw-r--r--tensorflow/compiler/xla/tests/convolution_variants_test.cc494
-rw-r--r--tensorflow/compiler/xla/tests/copy_test.cc29
-rw-r--r--tensorflow/compiler/xla/tests/cross_replica_sum_test.cc16
-rw-r--r--tensorflow/compiler/xla/tests/custom_call_test.cc11
-rw-r--r--tensorflow/compiler/xla/tests/deallocation_test.cc23
-rw-r--r--tensorflow/compiler/xla/tests/deconstruct_tuple_test.cc40
-rw-r--r--tensorflow/compiler/xla/tests/deep_graph_test.cc2
-rw-r--r--tensorflow/compiler/xla/tests/dot_operation_test.cc424
-rw-r--r--tensorflow/compiler/xla/tests/dynamic_ops_test.cc111
-rw-r--r--tensorflow/compiler/xla/tests/execution_profile_test.cc4
-rw-r--r--tensorflow/compiler/xla/tests/exhaustive_f32_elementwise_op_test.cc16
-rw-r--r--tensorflow/compiler/xla/tests/filecheck.cc5
-rw-r--r--tensorflow/compiler/xla/tests/floor_ceil_test.cc12
-rw-r--r--tensorflow/compiler/xla/tests/fmax_test.cc10
-rw-r--r--tensorflow/compiler/xla/tests/fusion_test.cc198
-rw-r--r--tensorflow/compiler/xla/tests/gather_operation_test.cc205
-rw-r--r--tensorflow/compiler/xla/tests/half_test.cc91
-rw-r--r--tensorflow/compiler/xla/tests/hlo_metadata_test.cc6
-rw-r--r--tensorflow/compiler/xla/tests/hlo_test_base.cc10
-rw-r--r--tensorflow/compiler/xla/tests/hlo_test_base.h7
-rw-r--r--tensorflow/compiler/xla/tests/literal_test_util.h31
-rw-r--r--tensorflow/compiler/xla/tests/literal_test_util_test.cc46
-rw-r--r--tensorflow/compiler/xla/tests/llvm_compiler_test.cc5
-rw-r--r--tensorflow/compiler/xla/tests/llvm_irgen_test_base.cc27
-rw-r--r--tensorflow/compiler/xla/tests/llvm_irgen_test_base.h8
-rw-r--r--tensorflow/compiler/xla/tests/local_client_allocation_test.cc16
-rw-r--r--tensorflow/compiler/xla/tests/local_client_aot_test_helper.cc10
-rw-r--r--tensorflow/compiler/xla/tests/local_client_execute_test.cc337
-rw-r--r--tensorflow/compiler/xla/tests/log_test.cc8
-rw-r--r--tensorflow/compiler/xla/tests/map_test.cc250
-rw-r--r--tensorflow/compiler/xla/tests/matrix_ops_simple_test.cc86
-rw-r--r--tensorflow/compiler/xla/tests/multidimensional_slice_test.cc9
-rw-r--r--tensorflow/compiler/xla/tests/multioutput_fusion_test.cc123
-rw-r--r--tensorflow/compiler/xla/tests/pad_test.cc67
-rw-r--r--tensorflow/compiler/xla/tests/params_test.cc168
-rw-r--r--tensorflow/compiler/xla/tests/pred_test.cc71
-rw-r--r--tensorflow/compiler/xla/tests/prng_test.cc40
-rw-r--r--tensorflow/compiler/xla/tests/query_inferred_shape_test.cc4
-rw-r--r--tensorflow/compiler/xla/tests/reduce_hlo_test.cc30
-rw-r--r--tensorflow/compiler/xla/tests/reduce_precision_test.cc49
-rw-r--r--tensorflow/compiler/xla/tests/reduce_test.cc224
-rw-r--r--tensorflow/compiler/xla/tests/reduce_window_test.cc195
-rw-r--r--tensorflow/compiler/xla/tests/replay_test.cc24
-rw-r--r--tensorflow/compiler/xla/tests/reshape_motion_test.cc12
-rw-r--r--tensorflow/compiler/xla/tests/reshape_test.cc404
-rw-r--r--tensorflow/compiler/xla/tests/reverse_test.cc8
-rw-r--r--tensorflow/compiler/xla/tests/round_trip_packed_literal_test.cc2
-rw-r--r--tensorflow/compiler/xla/tests/round_trip_transfer_test.cc50
-rw-r--r--tensorflow/compiler/xla/tests/scalar_computations_test.cc363
-rw-r--r--tensorflow/compiler/xla/tests/select_and_scatter_test.cc193
-rw-r--r--tensorflow/compiler/xla/tests/select_test.cc138
-rw-r--r--tensorflow/compiler/xla/tests/slice_test.cc67
-rw-r--r--tensorflow/compiler/xla/tests/test_utils.cc12
-rw-r--r--tensorflow/compiler/xla/tests/test_utils.h2
-rw-r--r--tensorflow/compiler/xla/tests/test_utils_test.cc29
-rw-r--r--tensorflow/compiler/xla/tests/token_hlo_test.cc58
-rw-r--r--tensorflow/compiler/xla/tests/transfer_manager_test.cc90
-rw-r--r--tensorflow/compiler/xla/tests/transpose_test.cc57
-rw-r--r--tensorflow/compiler/xla/tests/tuple_test.cc331
-rw-r--r--tensorflow/compiler/xla/tests/unary_op_test.cc130
-rw-r--r--tensorflow/compiler/xla/tests/vector_ops_reduce_test.cc85
-rw-r--r--tensorflow/compiler/xla/tests/vector_ops_simple_test.cc214
-rw-r--r--tensorflow/compiler/xla/tests/while_test.cc714
-rw-r--r--tensorflow/compiler/xla/tests/xla_hlo_profile_test.cc95
-rw-r--r--tensorflow/compiler/xla/text_literal_reader.cc2
-rw-r--r--tensorflow/compiler/xla/text_literal_reader.h2
-rw-r--r--tensorflow/compiler/xla/text_literal_reader_test.cc2
-rw-r--r--tensorflow/compiler/xla/text_literal_writer.cc2
-rw-r--r--tensorflow/compiler/xla/text_literal_writer.h2
-rw-r--r--tensorflow/compiler/xla/text_literal_writer_test.cc6
-rw-r--r--tensorflow/compiler/xla/tools/BUILD6
-rw-r--r--tensorflow/compiler/xla/tools/replay_computation.cc10
-rw-r--r--tensorflow/compiler/xla/tools/show_literal.cc2
-rw-r--r--tensorflow/compiler/xla/tools/show_text_literal.cc2
-rw-r--r--tensorflow/compiler/xla/util.h18
-rw-r--r--tensorflow/contrib/BUILD34
-rw-r--r--tensorflow/contrib/__init__.py3
-rw-r--r--tensorflow/contrib/autograph/README.md2
-rw-r--r--tensorflow/contrib/autograph/__init__.py7
-rw-r--r--tensorflow/contrib/autograph/converters/BUILD48
-rw-r--r--tensorflow/contrib/autograph/converters/__init__.py14
-rw-r--r--tensorflow/contrib/autograph/converters/asserts.py8
-rw-r--r--tensorflow/contrib/autograph/converters/asserts_test.py4
-rw-r--r--tensorflow/contrib/autograph/converters/break_statements.py35
-rw-r--r--tensorflow/contrib/autograph/converters/break_statements_test.py62
-rw-r--r--tensorflow/contrib/autograph/converters/builtin_functions_test.py60
-rw-r--r--tensorflow/contrib/autograph/converters/call_trees_test.py76
-rw-r--r--tensorflow/contrib/autograph/converters/conditional_expressions.py129
-rw-r--r--tensorflow/contrib/autograph/converters/conditional_expressions_test.py53
-rw-r--r--tensorflow/contrib/autograph/converters/continue_statements_test.py48
-rw-r--r--tensorflow/contrib/autograph/converters/control_flow.py165
-rw-r--r--tensorflow/contrib/autograph/converters/control_flow_test.py211
-rw-r--r--tensorflow/contrib/autograph/converters/decorators_test.py15
-rw-r--r--tensorflow/contrib/autograph/converters/directives.py108
-rw-r--r--tensorflow/contrib/autograph/converters/directives_test.py78
-rw-r--r--tensorflow/contrib/autograph/converters/error_handlers.py52
-rw-r--r--tensorflow/contrib/autograph/converters/error_handlers_test.py55
-rw-r--r--tensorflow/contrib/autograph/converters/ifexp.py49
-rw-r--r--tensorflow/contrib/autograph/converters/ifexp_test.py106
-rw-r--r--tensorflow/contrib/autograph/converters/list_comprehension.py77
-rw-r--r--tensorflow/contrib/autograph/converters/list_comprehensions.py82
-rw-r--r--tensorflow/contrib/autograph/converters/list_comprehensions_test.py (renamed from tensorflow/contrib/autograph/converters/list_comprehension_test.py)40
-rw-r--r--tensorflow/contrib/autograph/converters/lists.py30
-rw-r--r--tensorflow/contrib/autograph/converters/lists_test.py78
-rw-r--r--tensorflow/contrib/autograph/converters/logical_expressions_test.py13
-rw-r--r--tensorflow/contrib/autograph/converters/name_scopes_test.py90
-rw-r--r--tensorflow/contrib/autograph/converters/return_statements.py (renamed from tensorflow/contrib/autograph/converters/single_return.py)0
-rw-r--r--tensorflow/contrib/autograph/converters/return_statements_test.py167
-rw-r--r--tensorflow/contrib/autograph/converters/side_effect_guards_test.py132
-rw-r--r--tensorflow/contrib/autograph/converters/single_return_test.py189
-rw-r--r--tensorflow/contrib/autograph/converters/slices.py10
-rw-r--r--tensorflow/contrib/autograph/converters/slices_test.py47
-rw-r--r--tensorflow/contrib/autograph/core/BUILD36
-rw-r--r--tensorflow/contrib/autograph/core/converter.py120
-rw-r--r--tensorflow/contrib/autograph/core/converter_testing.py60
-rw-r--r--tensorflow/contrib/autograph/core/errors.py272
-rw-r--r--tensorflow/contrib/autograph/core/errors_test.py116
-rw-r--r--tensorflow/contrib/autograph/examples/integration_tests/BUILD29
-rw-r--r--tensorflow/contrib/autograph/examples/integration_tests/keras_test.py (renamed from tensorflow/contrib/autograph/utils/type_hints.py)34
-rw-r--r--tensorflow/contrib/autograph/examples/notebooks/ag_vs_eager_collatz_speed_test.ipynb299
-rw-r--r--tensorflow/contrib/autograph/examples/notebooks/ag_vs_eager_mnist_speed_test.ipynb652
-rw-r--r--tensorflow/contrib/autograph/examples/notebooks/dev_summit_2018_demo.ipynb1493
-rw-r--r--tensorflow/contrib/autograph/examples/notebooks/rnn_keras_estimator.ipynb311
-rw-r--r--tensorflow/contrib/autograph/examples/notebooks/workshop.ipynb1129
-rw-r--r--tensorflow/contrib/autograph/impl/api.py24
-rw-r--r--tensorflow/contrib/autograph/impl/api_test.py15
-rw-r--r--tensorflow/contrib/autograph/impl/conversion.py76
-rw-r--r--tensorflow/contrib/autograph/impl/conversion_test.py8
-rw-r--r--tensorflow/contrib/autograph/operators/__init__.py2
-rw-r--r--tensorflow/contrib/autograph/pyct/BUILD13
-rw-r--r--tensorflow/contrib/autograph/pyct/anno.py91
-rw-r--r--tensorflow/contrib/autograph/pyct/anno_test.py23
-rw-r--r--tensorflow/contrib/autograph/pyct/ast_util.py175
-rw-r--r--tensorflow/contrib/autograph/pyct/ast_util_test.py142
-rw-r--r--tensorflow/contrib/autograph/pyct/cfg.py817
-rw-r--r--tensorflow/contrib/autograph/pyct/cfg_test.py969
-rw-r--r--tensorflow/contrib/autograph/pyct/compiler.py97
-rw-r--r--tensorflow/contrib/autograph/pyct/compiler_test.py4
-rw-r--r--tensorflow/contrib/autograph/pyct/origin_info.py100
-rw-r--r--tensorflow/contrib/autograph/pyct/qual_names.py28
-rw-r--r--tensorflow/contrib/autograph/pyct/qual_names_test.py9
-rw-r--r--tensorflow/contrib/autograph/pyct/static_analysis/BUILD25
-rw-r--r--tensorflow/contrib/autograph/pyct/static_analysis/__init__.py12
-rw-r--r--tensorflow/contrib/autograph/pyct/static_analysis/activity.py226
-rw-r--r--tensorflow/contrib/autograph/pyct/static_analysis/activity_test.py76
-rw-r--r--tensorflow/contrib/autograph/pyct/static_analysis/annos.py10
-rw-r--r--tensorflow/contrib/autograph/pyct/static_analysis/cfg.py446
-rw-r--r--tensorflow/contrib/autograph/pyct/static_analysis/cfg_test.py303
-rw-r--r--tensorflow/contrib/autograph/pyct/static_analysis/live_values.py28
-rw-r--r--tensorflow/contrib/autograph/pyct/static_analysis/live_values_test.py5
-rw-r--r--tensorflow/contrib/autograph/pyct/static_analysis/liveness.py200
-rw-r--r--tensorflow/contrib/autograph/pyct/static_analysis/liveness_test.py149
-rw-r--r--tensorflow/contrib/autograph/pyct/static_analysis/reaching_definitions.py301
-rw-r--r--tensorflow/contrib/autograph/pyct/static_analysis/reaching_definitions_test.py263
-rw-r--r--tensorflow/contrib/autograph/pyct/static_analysis/type_info.py48
-rw-r--r--tensorflow/contrib/autograph/pyct/static_analysis/type_info_test.py5
-rw-r--r--tensorflow/contrib/autograph/pyct/templates.py88
-rw-r--r--tensorflow/contrib/autograph/pyct/templates_test.py6
-rw-r--r--tensorflow/contrib/autograph/pyct/transformer.py139
-rw-r--r--tensorflow/contrib/autograph/pyct/transformer_test.py77
-rw-r--r--tensorflow/contrib/autograph/utils/BUILD1
-rw-r--r--tensorflow/contrib/autograph/utils/__init__.py1
-rw-r--r--tensorflow/contrib/batching/python/ops/batch_ops.py6
-rw-r--r--tensorflow/contrib/bayesflow/python/ops/monte_carlo_impl.py2
-rw-r--r--tensorflow/contrib/bigtable/BUILD213
-rw-r--r--tensorflow/contrib/bigtable/README.md10
-rw-r--r--tensorflow/contrib/bigtable/__init__.py (renamed from tensorflow/contrib/proto/python/kernel_tests/test_case.py)32
-rw-r--r--tensorflow/contrib/bigtable/kernels/bigtable_kernels.cc355
-rw-r--r--tensorflow/contrib/bigtable/kernels/bigtable_lib.cc45
-rw-r--r--tensorflow/contrib/bigtable/kernels/bigtable_lib.h143
-rw-r--r--tensorflow/contrib/bigtable/kernels/bigtable_lookup_dataset_op.cc221
-rw-r--r--tensorflow/contrib/bigtable/kernels/bigtable_prefix_key_dataset_op.cc104
-rw-r--r--tensorflow/contrib/bigtable/kernels/bigtable_range_helpers.cc68
-rw-r--r--tensorflow/contrib/bigtable/kernels/bigtable_range_helpers.h67
-rw-r--r--tensorflow/contrib/bigtable/kernels/bigtable_range_helpers_test.cc107
-rw-r--r--tensorflow/contrib/bigtable/kernels/bigtable_range_key_dataset_op.cc112
-rw-r--r--tensorflow/contrib/bigtable/kernels/bigtable_sample_key_pairs_dataset_op.cc200
-rw-r--r--tensorflow/contrib/bigtable/kernels/bigtable_sample_keys_dataset_op.cc113
-rw-r--r--tensorflow/contrib/bigtable/kernels/bigtable_scan_dataset_op.cc219
-rw-r--r--tensorflow/contrib/bigtable/kernels/test_kernels/bigtable_test_client.cc374
-rw-r--r--tensorflow/contrib/bigtable/kernels/test_kernels/bigtable_test_client.h87
-rw-r--r--tensorflow/contrib/bigtable/kernels/test_kernels/bigtable_test_client_op.cc78
-rw-r--r--tensorflow/contrib/bigtable/kernels/test_kernels/bigtable_test_client_test.cc345
-rw-r--r--tensorflow/contrib/bigtable/ops/bigtable_ops.cc107
-rw-r--r--tensorflow/contrib/bigtable/ops/bigtable_test_ops.cc27
-rw-r--r--tensorflow/contrib/bigtable/python/kernel_tests/__init__.py20
-rw-r--r--tensorflow/contrib/bigtable/python/kernel_tests/bigtable_ops_test.py272
-rw-r--r--tensorflow/contrib/bigtable/python/ops/__init__.py20
-rw-r--r--tensorflow/contrib/bigtable/python/ops/bigtable_api.py741
-rw-r--r--tensorflow/contrib/boosted_trees/estimator_batch/BUILD15
-rw-r--r--tensorflow/contrib/boosted_trees/estimator_batch/distillation_loss.py75
-rw-r--r--tensorflow/contrib/boosted_trees/estimator_batch/dnn_tree_combined_estimator.py66
-rw-r--r--tensorflow/contrib/boosted_trees/estimator_batch/dnn_tree_combined_estimator_test.py24
-rw-r--r--tensorflow/contrib/boosted_trees/estimator_batch/estimator.py85
-rw-r--r--tensorflow/contrib/boosted_trees/estimator_batch/estimator_test.py47
-rw-r--r--tensorflow/contrib/boosted_trees/estimator_batch/model.py183
-rw-r--r--tensorflow/contrib/boosted_trees/lib/learner/batch/base_split_handler.py4
-rw-r--r--tensorflow/contrib/boosted_trees/lib/learner/batch/categorical_split_handler.py12
-rw-r--r--tensorflow/contrib/boosted_trees/lib/learner/batch/categorical_split_handler_test.py135
-rw-r--r--tensorflow/contrib/boosted_trees/lib/learner/batch/ordinal_split_handler.py62
-rw-r--r--tensorflow/contrib/boosted_trees/lib/learner/batch/ordinal_split_handler_test.py271
-rw-r--r--tensorflow/contrib/boosted_trees/lib/quantiles/weighted_quantiles_summary.h2
-rw-r--r--tensorflow/contrib/boosted_trees/lib/utils/batch_features.cc17
-rw-r--r--tensorflow/contrib/boosted_trees/lib/utils/examples_iterable_test.cc24
-rw-r--r--tensorflow/contrib/boosted_trees/python/training/functions/gbdt_batch.py165
-rw-r--r--tensorflow/contrib/boosted_trees/python/training/functions/gbdt_batch_test.py296
-rw-r--r--tensorflow/contrib/boosted_trees/python/utils/losses.py67
-rw-r--r--tensorflow/contrib/checkpoint/__init__.py2
-rw-r--r--tensorflow/contrib/checkpoint/python/containers_test.py3
-rw-r--r--tensorflow/contrib/cloud/BUILD1
-rw-r--r--tensorflow/contrib/cloud/README.md18
-rw-r--r--tensorflow/contrib/cloud/__init__.py13
-rw-r--r--tensorflow/contrib/cluster_resolver/BUILD19
-rw-r--r--tensorflow/contrib/cmake/CMakeLists.txt31
-rw-r--r--tensorflow/contrib/cmake/external/nsync.cmake2
-rw-r--r--tensorflow/contrib/cmake/python_modules.txt8
-rw-r--r--tensorflow/contrib/cmake/tf_c.cmake13
-rw-r--r--tensorflow/contrib/cmake/tf_core_framework.cmake1
-rwxr-xr-xtensorflow/contrib/cmake/tf_python.cmake27
-rw-r--r--tensorflow/contrib/cmake/tf_stream_executor.cmake12
-rw-r--r--tensorflow/contrib/cmake/tf_tests.cmake4
-rw-r--r--tensorflow/contrib/copy_graph/python/util/copy_elements.py6
-rw-r--r--tensorflow/contrib/data/__init__.py2
-rw-r--r--tensorflow/contrib/data/kernels/prefetching_kernels.cc26
-rw-r--r--tensorflow/contrib/data/ops/dataset_ops.cc2
-rw-r--r--tensorflow/contrib/data/python/kernel_tests/BUILD26
-rw-r--r--tensorflow/contrib/data/python/kernel_tests/batch_dataset_op_test.py4
-rw-r--r--tensorflow/contrib/data/python/kernel_tests/bucketing_test.py39
-rw-r--r--tensorflow/contrib/data/python/kernel_tests/map_dataset_op_test.py129
-rw-r--r--tensorflow/contrib/data/python/kernel_tests/optimize_dataset_op_test.py17
-rw-r--r--tensorflow/contrib/data/python/kernel_tests/prefetching_ops_test.py558
-rw-r--r--tensorflow/contrib/data/python/kernel_tests/slide_dataset_op_test.py255
-rw-r--r--tensorflow/contrib/data/python/kernel_tests/window_dataset_op_test.py523
-rw-r--r--tensorflow/contrib/data/python/ops/BUILD2
-rw-r--r--tensorflow/contrib/data/python/ops/batching.py270
-rw-r--r--tensorflow/contrib/data/python/ops/grouping.py136
-rw-r--r--tensorflow/contrib/data/python/ops/prefetching_ops.py203
-rw-r--r--tensorflow/contrib/data/python/ops/readers.py4
-rw-r--r--tensorflow/contrib/data/python/ops/sliding.py69
-rw-r--r--tensorflow/contrib/distribute/BUILD1
-rw-r--r--tensorflow/contrib/distribute/__init__.py2
-rw-r--r--tensorflow/contrib/distribute/python/BUILD1
-rw-r--r--tensorflow/contrib/distribute/python/cross_tower_ops.py89
-rw-r--r--tensorflow/contrib/distribute/python/cross_tower_ops_test.py88
-rw-r--r--tensorflow/contrib/distribute/python/mirrored_strategy.py73
-rw-r--r--tensorflow/contrib/distribute/python/mirrored_strategy_multigpu_test.py424
-rw-r--r--tensorflow/contrib/distribute/python/multi_worker_strategy.py2
-rw-r--r--tensorflow/contrib/distribute/python/one_device_strategy.py12
-rw-r--r--tensorflow/contrib/distribute/python/prefetching_ops_v2.py3
-rw-r--r--tensorflow/contrib/distribute/python/strategy_test_lib.py9
-rw-r--r--tensorflow/contrib/distribute/python/tpu_strategy.py42
-rw-r--r--tensorflow/contrib/distribute/python/values.py229
-rw-r--r--tensorflow/contrib/distribute/python/values_test.py39
-rw-r--r--tensorflow/contrib/distributions/python/ops/bijectors/masked_autoregressive.py8
-rw-r--r--tensorflow/contrib/eager/python/datasets.py1
-rw-r--r--tensorflow/contrib/eager/python/examples/densenet/BUILD29
-rw-r--r--tensorflow/contrib/eager/python/examples/densenet/densenet.py274
-rw-r--r--tensorflow/contrib/eager/python/examples/densenet/densenet_test.py83
-rw-r--r--tensorflow/contrib/eager/python/examples/gan/mnist.py14
-rw-r--r--tensorflow/contrib/eager/python/examples/generative_examples/image_captioning_with_attention.ipynb1184
-rw-r--r--tensorflow/contrib/eager/python/examples/generative_examples/text_generation.ipynb689
-rw-r--r--tensorflow/contrib/eager/python/examples/nmt_with_attention/nmt_with_attention.ipynb6
-rw-r--r--tensorflow/contrib/eager/python/examples/notebooks/2_gradients.ipynb323
-rw-r--r--tensorflow/contrib/eager/python/examples/notebooks/3_datasets.ipynb209
-rw-r--r--tensorflow/contrib/eager/python/examples/notebooks/3_training_models.ipynb485
-rw-r--r--tensorflow/contrib/eager/python/examples/notebooks/4_high_level.ipynb551
-rw-r--r--tensorflow/contrib/eager/python/examples/notebooks/README.md11
-rw-r--r--tensorflow/contrib/eager/python/examples/notebooks/automatic_differentiation.ipynb364
-rw-r--r--tensorflow/contrib/eager/python/examples/notebooks/custom_layers.ipynb399
-rw-r--r--tensorflow/contrib/eager/python/examples/notebooks/custom_training.ipynb478
-rw-r--r--tensorflow/contrib/eager/python/examples/notebooks/eager_basics.ipynb (renamed from tensorflow/contrib/eager/python/examples/notebooks/1_basics.ipynb)502
-rw-r--r--tensorflow/contrib/eager/python/examples/resnet50/resnet50_test.py24
-rw-r--r--tensorflow/contrib/eager/python/examples/revnet/BUILD3
-rw-r--r--tensorflow/contrib/eager/python/examples/revnet/README.md45
-rw-r--r--tensorflow/contrib/eager/python/examples/revnet/blocks.py134
-rw-r--r--tensorflow/contrib/eager/python/examples/revnet/blocks_test.py254
-rw-r--r--tensorflow/contrib/eager/python/examples/revnet/cifar_input.py35
-rw-r--r--tensorflow/contrib/eager/python/examples/revnet/cifar_tfrecords.py89
-rw-r--r--tensorflow/contrib/eager/python/examples/revnet/config.py39
-rw-r--r--tensorflow/contrib/eager/python/examples/revnet/main.py241
-rw-r--r--tensorflow/contrib/eager/python/examples/revnet/revnet.py98
-rw-r--r--tensorflow/contrib/eager/python/examples/revnet/revnet_test.py75
-rw-r--r--tensorflow/contrib/eager/python/examples/workshop/1_basic.ipynb282
-rw-r--r--tensorflow/contrib/eager/python/examples/workshop/2_models.ipynb1018
-rw-r--r--tensorflow/contrib/eager/python/examples/workshop/3_inspecting.ipynb443
-rw-r--r--tensorflow/contrib/estimator/BUILD29
-rw-r--r--tensorflow/contrib/estimator/__init__.py7
-rw-r--r--tensorflow/contrib/estimator/python/estimator/baseline_test.py8
-rw-r--r--tensorflow/contrib/estimator/python/estimator/boosted_trees.py46
-rw-r--r--tensorflow/contrib/estimator/python/estimator/boosted_trees_test.py78
-rw-r--r--tensorflow/contrib/estimator/python/estimator/dnn.py24
-rw-r--r--tensorflow/contrib/estimator/python/estimator/dnn_linear_combined.py30
-rw-r--r--tensorflow/contrib/estimator/python/estimator/dnn_linear_combined_test.py6
-rw-r--r--tensorflow/contrib/estimator/python/estimator/early_stopping.py468
-rw-r--r--tensorflow/contrib/estimator/python/estimator/early_stopping_test.py233
-rw-r--r--tensorflow/contrib/estimator/python/estimator/head.py3
-rw-r--r--tensorflow/contrib/estimator/python/estimator/head_test.py27
-rw-r--r--tensorflow/contrib/estimator/python/estimator/linear.py28
-rw-r--r--tensorflow/contrib/factorization/kernels/wals_solver_ops.cc44
-rw-r--r--tensorflow/contrib/factorization/ops/factorization_ops.cc19
-rw-r--r--tensorflow/contrib/factorization/python/kernel_tests/wals_solver_ops_test.py36
-rw-r--r--tensorflow/contrib/factorization/python/ops/factorization_ops.py1
-rw-r--r--tensorflow/contrib/framework/python/ops/variables_test.py10
-rw-r--r--tensorflow/contrib/gan/BUILD15
-rw-r--r--tensorflow/contrib/gan/python/estimator/python/gan_estimator_impl.py200
-rw-r--r--tensorflow/contrib/gan/python/estimator/python/gan_estimator_test.py227
-rw-r--r--tensorflow/contrib/gan/python/estimator/python/head_impl.py21
-rw-r--r--tensorflow/contrib/gan/python/estimator/python/head_test.py2
-rw-r--r--tensorflow/contrib/gdr/gdr_memory_manager.cc12
-rw-r--r--tensorflow/contrib/image/kernels/image_ops.cc2
-rw-r--r--tensorflow/contrib/image/kernels/image_ops.h30
-rw-r--r--tensorflow/contrib/image/ops/image_ops.cc2
-rw-r--r--tensorflow/contrib/image/python/kernel_tests/image_ops_test.py20
-rw-r--r--tensorflow/contrib/image/python/ops/image_ops.py3
-rw-r--r--tensorflow/contrib/kafka/ops/kafka_ops.cc44
-rw-r--r--tensorflow/contrib/kinesis/BUILD113
-rw-r--r--tensorflow/contrib/kinesis/__init__.py32
-rw-r--r--tensorflow/contrib/kinesis/kernels/kinesis_dataset_ops.cc359
-rw-r--r--tensorflow/contrib/kinesis/ops/dataset_ops.cc42
-rw-r--r--tensorflow/contrib/kinesis/python/kernel_tests/kinesis_test.py139
-rw-r--r--tensorflow/contrib/kinesis/python/ops/kinesis_dataset_ops.py96
-rw-r--r--tensorflow/contrib/kinesis/python/ops/kinesis_op_loader.py24
-rw-r--r--tensorflow/contrib/layers/python/layers/embedding_ops_test.py1
-rw-r--r--tensorflow/contrib/layers/python/layers/rev_block_lib.py26
-rw-r--r--tensorflow/contrib/layers/python/layers/rev_block_lib_test.py20
-rw-r--r--tensorflow/contrib/learn/python/learn/estimators/run_config.py4
-rw-r--r--tensorflow/contrib/legacy_seq2seq/python/ops/seq2seq.py14
-rw-r--r--tensorflow/contrib/linear_optimizer/BUILD1
-rw-r--r--tensorflow/contrib/lite/BUILD2
-rw-r--r--tensorflow/contrib/lite/Makefile83
-rw-r--r--tensorflow/contrib/lite/allocation.cc6
-rw-r--r--tensorflow/contrib/lite/allocation.h1
-rw-r--r--tensorflow/contrib/lite/arena_planner.cc30
-rw-r--r--tensorflow/contrib/lite/arena_planner.h16
-rw-r--r--tensorflow/contrib/lite/arena_planner_test.cc29
-rw-r--r--tensorflow/contrib/lite/build_def.bzl9
-rw-r--r--tensorflow/contrib/lite/builtin_op_data.h23
-rw-r--r--tensorflow/contrib/lite/builtin_ops.h5
-rw-r--r--tensorflow/contrib/lite/context.h38
-rw-r--r--tensorflow/contrib/lite/delegates/eager/BUILD35
-rw-r--r--tensorflow/contrib/lite/delegates/eager/util.cc47
-rw-r--r--tensorflow/contrib/lite/delegates/eager/util.h35
-rw-r--r--tensorflow/contrib/lite/delegates/eager/util_test.cc100
-rw-r--r--tensorflow/contrib/lite/delegates/nnapi/nnapi_delegate.cc63
-rw-r--r--tensorflow/contrib/lite/delegates/nnapi/nnapi_delegate_test.cc145
-rw-r--r--tensorflow/contrib/lite/examples/android/BUILD1
-rw-r--r--tensorflow/contrib/lite/examples/android/app/README.md19
-rw-r--r--tensorflow/contrib/lite/examples/android/app/build.gradle4
-rw-r--r--tensorflow/contrib/lite/examples/android/app/download-models.gradle5
-rw-r--r--tensorflow/contrib/lite/examples/android/app/src/main/assets/pets_labels_list.txt38
-rw-r--r--tensorflow/contrib/lite/examples/android/app/src/main/java/org/tensorflow/demo/DetectorActivity.java13
-rw-r--r--tensorflow/contrib/lite/examples/android/app/src/main/java/org/tensorflow/demo/TFLiteObjectDetectionAPIModel.java220
-rw-r--r--tensorflow/contrib/lite/g3doc/benchmarks.md178
-rw-r--r--tensorflow/contrib/lite/g3doc/models.md32
-rw-r--r--tensorflow/contrib/lite/g3doc/tf_ops_compatibility.md37
-rw-r--r--tensorflow/contrib/lite/interpreter.cc144
-rw-r--r--tensorflow/contrib/lite/interpreter.h26
-rw-r--r--tensorflow/contrib/lite/interpreter_test.cc178
-rw-r--r--tensorflow/contrib/lite/java/demo/app/build.gradle9
-rw-r--r--tensorflow/contrib/lite/java/ovic/demo/app/build.gradle4
-rw-r--r--tensorflow/contrib/lite/java/ovic/src/test/java/org/tensorflow/ovic/OvicClassifierTest.java8
-rw-r--r--tensorflow/contrib/lite/java/src/main/java/org/tensorflow/lite/DataType.java9
-rw-r--r--tensorflow/contrib/lite/java/src/main/java/org/tensorflow/lite/Interpreter.java29
-rw-r--r--tensorflow/contrib/lite/java/src/main/java/org/tensorflow/lite/NativeInterpreterWrapper.java239
-rw-r--r--tensorflow/contrib/lite/java/src/main/java/org/tensorflow/lite/Tensor.java198
-rw-r--r--tensorflow/contrib/lite/java/src/main/native/BUILD1
-rw-r--r--tensorflow/contrib/lite/java/src/main/native/nativeinterpreterwrapper_jni.cc307
-rw-r--r--tensorflow/contrib/lite/java/src/main/native/nativeinterpreterwrapper_jni.h79
-rw-r--r--tensorflow/contrib/lite/java/src/main/native/tensor_jni.cc131
-rw-r--r--tensorflow/contrib/lite/java/src/main/native/tensor_jni.h61
-rw-r--r--tensorflow/contrib/lite/java/src/test/java/org/tensorflow/lite/InterpreterTest.java33
-rw-r--r--tensorflow/contrib/lite/java/src/test/java/org/tensorflow/lite/NativeInterpreterWrapperTest.java251
-rw-r--r--tensorflow/contrib/lite/java/src/test/java/org/tensorflow/lite/TensorTest.java152
-rw-r--r--tensorflow/contrib/lite/java/src/testhelper/java/org/tensorflow/lite/TestHelper.java4
-rw-r--r--tensorflow/contrib/lite/kernels/BUILD45
-rw-r--r--tensorflow/contrib/lite/kernels/add.cc60
-rw-r--r--tensorflow/contrib/lite/kernels/add_test.cc58
-rw-r--r--tensorflow/contrib/lite/kernels/arg_min_max.cc (renamed from tensorflow/contrib/lite/kernels/arg_max.cc)69
-rw-r--r--tensorflow/contrib/lite/kernels/arg_min_max_test.cc (renamed from tensorflow/contrib/lite/kernels/arg_max_test.cc)89
-rw-r--r--tensorflow/contrib/lite/kernels/bidirectional_sequence_lstm.cc56
-rw-r--r--tensorflow/contrib/lite/kernels/cast.cc23
-rw-r--r--tensorflow/contrib/lite/kernels/cast_test.cc67
-rw-r--r--tensorflow/contrib/lite/kernels/conv.cc17
-rw-r--r--tensorflow/contrib/lite/kernels/depthwise_conv.cc4
-rw-r--r--tensorflow/contrib/lite/kernels/div.cc4
-rw-r--r--tensorflow/contrib/lite/kernels/eigen_support.cc81
-rw-r--r--tensorflow/contrib/lite/kernels/eigen_support.h8
-rw-r--r--tensorflow/contrib/lite/kernels/embedding_lookup.cc5
-rw-r--r--tensorflow/contrib/lite/kernels/embedding_lookup_test.cc36
-rw-r--r--tensorflow/contrib/lite/kernels/fake_quant.cc92
-rw-r--r--tensorflow/contrib/lite/kernels/fake_quant_test.cc112
-rw-r--r--tensorflow/contrib/lite/kernels/fully_connected.cc122
-rw-r--r--tensorflow/contrib/lite/kernels/fully_connected_test.cc242
-rw-r--r--tensorflow/contrib/lite/kernels/gather.cc7
-rw-r--r--tensorflow/contrib/lite/kernels/gather_test.cc9
-rw-r--r--tensorflow/contrib/lite/kernels/gemm_support.cc55
-rw-r--r--tensorflow/contrib/lite/kernels/gemm_support.h3
-rw-r--r--tensorflow/contrib/lite/kernels/internal/kernel_utils.cc23
-rw-r--r--tensorflow/contrib/lite/kernels/internal/optimized/depthwiseconv_uint8_3x3_filter.h1
-rw-r--r--tensorflow/contrib/lite/kernels/internal/optimized/legacy_optimized_ops.h73
-rw-r--r--tensorflow/contrib/lite/kernels/internal/optimized/multithreaded_conv.h63
-rw-r--r--tensorflow/contrib/lite/kernels/internal/optimized/neon_tensor_utils.cc174
-rw-r--r--tensorflow/contrib/lite/kernels/internal/optimized/neon_tensor_utils.h4
-rw-r--r--tensorflow/contrib/lite/kernels/internal/optimized/optimized_ops.h267
-rw-r--r--tensorflow/contrib/lite/kernels/internal/optimized/tensor_utils_impl.h6
-rw-r--r--tensorflow/contrib/lite/kernels/internal/quantization_util.h10
-rw-r--r--tensorflow/contrib/lite/kernels/internal/reference/legacy_reference_ops.h73
-rw-r--r--tensorflow/contrib/lite/kernels/internal/reference/portable_tensor_utils.cc16
-rw-r--r--tensorflow/contrib/lite/kernels/internal/reference/portable_tensor_utils.h10
-rw-r--r--tensorflow/contrib/lite/kernels/internal/reference/reference_ops.h355
-rw-r--r--tensorflow/contrib/lite/kernels/internal/strided_slice_logic.h16
-rw-r--r--tensorflow/contrib/lite/kernels/internal/tensor.h15
-rw-r--r--tensorflow/contrib/lite/kernels/internal/tensor_utils.h4
-rw-r--r--tensorflow/contrib/lite/kernels/internal/tensor_utils_test.cc21
-rw-r--r--tensorflow/contrib/lite/kernels/internal/types.h23
-rw-r--r--tensorflow/contrib/lite/kernels/kernel_util.cc18
-rw-r--r--tensorflow/contrib/lite/kernels/kernel_util.h28
-rw-r--r--tensorflow/contrib/lite/kernels/lstm.cc3
-rw-r--r--tensorflow/contrib/lite/kernels/lstm_test.cc11
-rw-r--r--tensorflow/contrib/lite/kernels/mul.cc67
-rw-r--r--tensorflow/contrib/lite/kernels/mul_test.cc58
-rw-r--r--tensorflow/contrib/lite/kernels/pooling.cc110
-rw-r--r--tensorflow/contrib/lite/kernels/pow.cc143
-rw-r--r--tensorflow/contrib/lite/kernels/pow_test.cc117
-rw-r--r--tensorflow/contrib/lite/kernels/reduce.cc111
-rw-r--r--tensorflow/contrib/lite/kernels/reduce_test.cc353
-rw-r--r--tensorflow/contrib/lite/kernels/register.cc14
-rw-r--r--tensorflow/contrib/lite/kernels/select.cc3
-rw-r--r--tensorflow/contrib/lite/kernels/select_test.cc13
-rw-r--r--tensorflow/contrib/lite/kernels/strided_slice.cc27
-rw-r--r--tensorflow/contrib/lite/kernels/strided_slice_test.cc50
-rw-r--r--tensorflow/contrib/lite/kernels/sub.cc62
-rw-r--r--tensorflow/contrib/lite/kernels/sub_test.cc58
-rw-r--r--tensorflow/contrib/lite/kernels/svdf.cc320
-rw-r--r--tensorflow/contrib/lite/kernels/svdf_test.cc186
-rw-r--r--tensorflow/contrib/lite/kernels/test_util.h26
-rw-r--r--tensorflow/contrib/lite/kernels/topk_v2.cc4
-rw-r--r--tensorflow/contrib/lite/kernels/unidirectional_sequence_lstm.cc471
-rw-r--r--tensorflow/contrib/lite/kernels/unidirectional_sequence_lstm_test.cc1767
-rw-r--r--tensorflow/contrib/lite/model.cc49
-rw-r--r--tensorflow/contrib/lite/model.h1
-rw-r--r--tensorflow/contrib/lite/nnapi_delegate.cc291
-rw-r--r--tensorflow/contrib/lite/nnapi_delegate.h6
-rw-r--r--tensorflow/contrib/lite/optional_debug_tools.cc2
-rw-r--r--tensorflow/contrib/lite/profiling/profile_summarizer.cc2
-rw-r--r--tensorflow/contrib/lite/python/BUILD6
-rw-r--r--tensorflow/contrib/lite/python/interpreter.py22
-rw-r--r--tensorflow/contrib/lite/python/interpreter_test.py23
-rw-r--r--tensorflow/contrib/lite/python/interpreter_wrapper/BUILD1
-rw-r--r--tensorflow/contrib/lite/python/interpreter_wrapper/interpreter_wrapper.cc269
-rw-r--r--tensorflow/contrib/lite/python/interpreter_wrapper/interpreter_wrapper.h26
-rw-r--r--tensorflow/contrib/lite/python/interpreter_wrapper/interpreter_wrapper.i43
-rw-r--r--tensorflow/contrib/lite/python/lite.py49
-rw-r--r--tensorflow/contrib/lite/python/lite_test.py276
-rw-r--r--tensorflow/contrib/lite/python/tflite_convert.py11
-rw-r--r--tensorflow/contrib/lite/schema/BUILD1
-rw-r--r--tensorflow/contrib/lite/schema/schema.fbs39
-rwxr-xr-xtensorflow/contrib/lite/schema/schema_generated.h494
-rw-r--r--tensorflow/contrib/lite/testing/BUILD1
-rw-r--r--tensorflow/contrib/lite/testing/generate_examples.py85
-rw-r--r--tensorflow/contrib/lite/testing/generate_testspec.cc85
-rw-r--r--tensorflow/contrib/lite/testing/generate_testspec.h4
-rw-r--r--tensorflow/contrib/lite/testing/generated_examples_zip_test.cc31
-rw-r--r--tensorflow/contrib/lite/testing/tflite_diff_example_test.cc23
-rw-r--r--tensorflow/contrib/lite/testing/tflite_diff_flags.h6
-rw-r--r--tensorflow/contrib/lite/testing/tflite_diff_util.cc7
-rw-r--r--tensorflow/contrib/lite/testing/tflite_diff_util.h6
-rw-r--r--tensorflow/contrib/lite/toco/BUILD43
-rw-r--r--tensorflow/contrib/lite/toco/README.md13
-rw-r--r--tensorflow/contrib/lite/toco/args.h12
-rw-r--r--tensorflow/contrib/lite/toco/export_tensorflow.cc327
-rw-r--r--tensorflow/contrib/lite/toco/g3doc/cmdline_examples.md26
-rw-r--r--tensorflow/contrib/lite/toco/g3doc/cmdline_reference.md4
-rw-r--r--tensorflow/contrib/lite/toco/g3doc/python_api.md54
-rw-r--r--tensorflow/contrib/lite/toco/graph_transformations/dequantize.cc1
-rw-r--r--tensorflow/contrib/lite/toco/graph_transformations/ensure_bias_vectors.cc23
-rw-r--r--tensorflow/contrib/lite/toco/graph_transformations/graph_transformations.h6
-rw-r--r--tensorflow/contrib/lite/toco/graph_transformations/hardcode_min_max.cc28
-rw-r--r--tensorflow/contrib/lite/toco/graph_transformations/identify_lstm.cc124
-rw-r--r--tensorflow/contrib/lite/toco/graph_transformations/identify_prelu.cc25
-rw-r--r--tensorflow/contrib/lite/toco/graph_transformations/make_initial_dequantize_operator.cc11
-rw-r--r--tensorflow/contrib/lite/toco/graph_transformations/move_binary_operator_before_reshape.cc178
-rw-r--r--tensorflow/contrib/lite/toco/graph_transformations/propagate_array_data_types.cc15
-rw-r--r--tensorflow/contrib/lite/toco/graph_transformations/propagate_default_min_max.cc14
-rw-r--r--tensorflow/contrib/lite/toco/graph_transformations/propagate_fake_quant_num_bits.cc31
-rw-r--r--tensorflow/contrib/lite/toco/graph_transformations/propagate_fixed_sizes.cc75
-rw-r--r--tensorflow/contrib/lite/toco/graph_transformations/quantization_util.cc69
-rw-r--r--tensorflow/contrib/lite/toco/graph_transformations/quantization_util.h20
-rw-r--r--tensorflow/contrib/lite/toco/graph_transformations/quantize.cc97
-rw-r--r--tensorflow/contrib/lite/toco/graph_transformations/quantize_weights.cc6
-rw-r--r--tensorflow/contrib/lite/toco/graph_transformations/read_array_minmax_and_narrow_range_from_fake_quant.cc78
-rw-r--r--tensorflow/contrib/lite/toco/graph_transformations/read_fake_quant_min_max.cc112
-rw-r--r--tensorflow/contrib/lite/toco/graph_transformations/resolve_batch_to_space_nd_attributes.cc4
-rw-r--r--tensorflow/contrib/lite/toco/graph_transformations/resolve_constant_fake_quant.cc46
-rw-r--r--tensorflow/contrib/lite/toco/graph_transformations/resolve_constant_strided_slice.cc14
-rw-r--r--tensorflow/contrib/lite/toco/graph_transformations/resolve_constant_unary.cc4
-rw-r--r--tensorflow/contrib/lite/toco/graph_transformations/resolve_fake_quant_args_from_vars.cc80
-rw-r--r--tensorflow/contrib/lite/toco/graph_transformations/resolve_reduce_attributes.cc (renamed from tensorflow/contrib/lite/toco/graph_transformations/resolve_mean_attributes.cc)25
-rw-r--r--tensorflow/contrib/lite/toco/graph_transformations/resolve_reorder_axes.cc9
-rw-r--r--tensorflow/contrib/lite/toco/graph_transformations/resolve_space_to_batch_nd_attributes.cc4
-rw-r--r--tensorflow/contrib/lite/toco/graph_transformations/resolve_tensorflow_matmul.cc38
-rw-r--r--tensorflow/contrib/lite/toco/import_tensorflow.cc80
-rw-r--r--tensorflow/contrib/lite/toco/model.h111
-rw-r--r--tensorflow/contrib/lite/toco/model_cmdline_flags.cc8
-rw-r--r--tensorflow/contrib/lite/toco/tflite/export.cc18
-rw-r--r--tensorflow/contrib/lite/toco/tflite/import.cc2
-rw-r--r--tensorflow/contrib/lite/toco/tflite/operator.cc134
-rw-r--r--tensorflow/contrib/lite/toco/tflite/operator_test.cc8
-rw-r--r--tensorflow/contrib/lite/toco/tflite/types.cc8
-rw-r--r--tensorflow/contrib/lite/toco/tflite/types_test.cc13
-rw-r--r--tensorflow/contrib/lite/toco/toco.cc36
-rw-r--r--tensorflow/contrib/lite/toco/toco_cmdline_flags.cc8
-rw-r--r--tensorflow/contrib/lite/toco/toco_flags.proto2
-rw-r--r--tensorflow/contrib/lite/toco/toco_saved_model.cc189
-rw-r--r--tensorflow/contrib/lite/toco/toco_saved_model.h53
-rw-r--r--tensorflow/contrib/lite/toco/toco_saved_model_test.cc274
-rw-r--r--tensorflow/contrib/lite/toco/toco_tooling.cc21
-rw-r--r--tensorflow/contrib/lite/toco/tooling_util.cc65
-rw-r--r--tensorflow/contrib/lite/toco/tooling_util.h5
-rw-r--r--tensorflow/contrib/lite/tools/BUILD2
-rw-r--r--tensorflow/contrib/lite/tools/benchmark/README.md59
-rw-r--r--tensorflow/contrib/lite/tools/benchmark/benchmark_model.cc3
-rw-r--r--tensorflow/contrib/lite/tools/benchmark/benchmark_model.h1
-rw-r--r--tensorflow/contrib/lite/tools/benchmark/benchmark_params.h4
-rw-r--r--tensorflow/contrib/lite/tools/visualize.py17
-rw-r--r--tensorflow/contrib/makefile/tf_op_files.txt1
-rw-r--r--tensorflow/contrib/metrics/BUILD1
-rw-r--r--tensorflow/contrib/metrics/__init__.py1
-rw-r--r--tensorflow/contrib/metrics/python/metrics/classification.py121
-rw-r--r--tensorflow/contrib/metrics/python/metrics/classification_test.py202
-rw-r--r--tensorflow/contrib/mixed_precision/python/loss_scale_optimizer.py2
-rw-r--r--tensorflow/contrib/mpi_collectives/BUILD1
-rw-r--r--tensorflow/contrib/mpi_collectives/kernels/mpi_ops.cc2
-rw-r--r--tensorflow/contrib/mpi_collectives/mpi_ops.py163
-rw-r--r--tensorflow/contrib/mpi_collectives/ring.cc80
-rw-r--r--tensorflow/contrib/mpi_collectives/ring.cu.cc117
-rw-r--r--tensorflow/contrib/mpi_collectives/ring.h327
-rw-r--r--tensorflow/contrib/nccl/BUILD5
-rw-r--r--tensorflow/contrib/nccl/python/ops/nccl_ops.py6
-rw-r--r--tensorflow/contrib/opt/__init__.py7
-rw-r--r--tensorflow/contrib/opt/python/training/addsign_test.py6
-rw-r--r--tensorflow/contrib/opt/python/training/ggt.py2
-rw-r--r--tensorflow/contrib/opt/python/training/powersign_test.py2
-rw-r--r--tensorflow/contrib/opt/python/training/weight_decay_optimizers.py44
-rw-r--r--tensorflow/contrib/opt/python/training/weight_decay_optimizers_test.py8
-rw-r--r--tensorflow/contrib/optimizer_v2/optimizer_v2.py9
-rw-r--r--tensorflow/contrib/proto/BUILD4
-rw-r--r--tensorflow/contrib/proto/python/kernel_tests/BUILD86
-rw-r--r--tensorflow/contrib/proto/python/kernel_tests/build_defs.bzl89
-rw-r--r--tensorflow/contrib/proto/python/kernel_tests/decode_proto_fail_test.py68
-rw-r--r--tensorflow/contrib/proto/python/kernel_tests/decode_proto_op_test.py275
-rw-r--r--tensorflow/contrib/proto/python/kernel_tests/decode_proto_op_test_base.py310
-rw-r--r--tensorflow/contrib/proto/python/kernel_tests/defaut_values.TestCase.pbtxt94
-rw-r--r--tensorflow/contrib/proto/python/kernel_tests/encode_proto_op_test.py155
-rw-r--r--tensorflow/contrib/proto/python/kernel_tests/encode_proto_op_test_base.py177
-rw-r--r--tensorflow/contrib/proto/python/kernel_tests/minmax.TestCase.pbtxt161
-rw-r--r--tensorflow/contrib/proto/python/kernel_tests/nested.TestCase.pbtxt16
-rw-r--r--tensorflow/contrib/proto/python/kernel_tests/optional.TestCase.pbtxt20
-rw-r--r--tensorflow/contrib/proto/python/kernel_tests/promote_unsigned.TestCase.pbtxt29
-rw-r--r--tensorflow/contrib/proto/python/kernel_tests/proto_op_test_base.py407
-rw-r--r--tensorflow/contrib/proto/python/kernel_tests/ragged.TestCase.pbtxt32
-rw-r--r--tensorflow/contrib/proto/python/kernel_tests/shaped_batch.TestCase.pbtxt62
-rw-r--r--tensorflow/contrib/proto/python/kernel_tests/simple.TestCase.pbtxt21
-rw-r--r--tensorflow/contrib/proto/python/kernel_tests/test_example.proto159
-rw-r--r--tensorflow/contrib/quantize/python/fold_batch_norms.py72
-rw-r--r--tensorflow/contrib/quantize/python/fold_batch_norms_test.py84
-rw-r--r--tensorflow/contrib/quantize/python/quantize.py39
-rw-r--r--tensorflow/contrib/quantize/python/quantize_graph.py4
-rw-r--r--tensorflow/contrib/quantize/python/quantize_parameterized_test.py177
-rw-r--r--tensorflow/contrib/rnn/BUILD1
-rw-r--r--tensorflow/contrib/rnn/__init__.py4
-rw-r--r--tensorflow/contrib/rnn/python/kernel_tests/core_rnn_cell_test.py161
-rw-r--r--tensorflow/contrib/rnn/python/ops/rnn_cell.py342
-rw-r--r--tensorflow/contrib/rpc/python/kernel_tests/BUILD3
-rw-r--r--tensorflow/contrib/rpc/python/kernel_tests/rpc_op_test_base.py52
-rw-r--r--tensorflow/contrib/rpc/python/kernel_tests/rpc_op_test_servicer.py8
-rw-r--r--tensorflow/contrib/rpc/python/kernel_tests/test_example.proto147
-rw-r--r--tensorflow/contrib/seq2seq/python/kernel_tests/beam_search_decoder_test.py42
-rw-r--r--tensorflow/contrib/seq2seq/python/ops/beam_search_decoder.py16
-rw-r--r--tensorflow/contrib/seq2seq/python/ops/decoder.py29
-rw-r--r--tensorflow/contrib/slim/python/slim/evaluation_test.py3
-rw-r--r--tensorflow/contrib/summary/summary_ops_test.py13
-rw-r--r--tensorflow/contrib/tensorboard/db/BUILD1
-rw-r--r--tensorflow/contrib/tensorrt/BUILD32
-rw-r--r--tensorflow/contrib/tensorrt/convert/convert_graph.cc95
-rw-r--r--tensorflow/contrib/tensorrt/convert/convert_nodes.cc1351
-rw-r--r--tensorflow/contrib/tensorrt/convert/convert_nodes.h4
-rw-r--r--tensorflow/contrib/tensorrt/convert/trt_optimization_pass.cc20
-rw-r--r--tensorflow/contrib/tensorrt/convert/utils.cc35
-rw-r--r--tensorflow/contrib/tensorrt/convert/utils.h2
-rw-r--r--tensorflow/contrib/tensorrt/kernels/trt_engine_op.cc31
-rw-r--r--tensorflow/contrib/tensorrt/ops/trt_engine_op.cc10
-rw-r--r--tensorflow/contrib/tensorrt/python/__init__.py1
-rw-r--r--tensorflow/contrib/tensorrt/python/trt_convert.py1
-rw-r--r--tensorflow/contrib/tensorrt/resources/trt_allocator.h3
-rw-r--r--tensorflow/contrib/tensorrt/resources/trt_int8_calibrator.cc37
-rw-r--r--tensorflow/contrib/tensorrt/resources/trt_int8_calibrator.h8
-rw-r--r--tensorflow/contrib/tensorrt/shape_fn/trt_shfn.cc62
-rw-r--r--tensorflow/contrib/tensorrt/test/tf_trt_integration_test.py407
-rw-r--r--tensorflow/contrib/tensorrt/trt_conversion.i22
-rw-r--r--tensorflow/contrib/timeseries/python/timeseries/BUILD2
-rw-r--r--tensorflow/contrib/timeseries/python/timeseries/estimators.py13
-rw-r--r--tensorflow/contrib/timeseries/python/timeseries/head.py14
-rw-r--r--tensorflow/contrib/timeseries/python/timeseries/head_test.py56
-rw-r--r--tensorflow/contrib/tpu/BUILD45
-rw-r--r--tensorflow/contrib/tpu/__init__.py2
-rw-r--r--tensorflow/contrib/tpu/profiler/pip_package/cloud_tpu_profiler/main.py51
-rw-r--r--tensorflow/contrib/tpu/profiler/pip_package/setup.py2
-rw-r--r--tensorflow/contrib/tpu/profiler/version.h2
-rw-r--r--tensorflow/contrib/tpu/proto/BUILD15
-rw-r--r--tensorflow/contrib/tpu/proto/compilation_result.proto4
-rw-r--r--tensorflow/contrib/tpu/python/tpu/keras_support.py1053
-rw-r--r--tensorflow/contrib/tpu/python/tpu/topology.py5
-rw-r--r--tensorflow/contrib/tpu/python/tpu/topology_test.py46
-rw-r--r--tensorflow/contrib/tpu/python/tpu/tpu.py46
-rw-r--r--tensorflow/contrib/tpu/python/tpu/tpu_config.py58
-rw-r--r--tensorflow/contrib/tpu/python/tpu/tpu_config_test.py55
-rw-r--r--tensorflow/contrib/tpu/python/tpu/tpu_context.py110
-rw-r--r--tensorflow/contrib/tpu/python/tpu/tpu_estimator.py184
-rw-r--r--tensorflow/contrib/tpu/python/tpu/tpu_feed.py16
-rw-r--r--tensorflow/contrib/tpu/python/tpu/tpu_optimizer.py6
-rw-r--r--tensorflow/contrib/training/python/training/sgdr_learning_rate_decay.py187
-rw-r--r--tensorflow/contrib/training/python/training/sgdr_learning_rate_decay_test.py145
-rw-r--r--tensorflow/contrib/verbs/rdma.cc6
-rw-r--r--tensorflow/contrib/verbs/rdma_mgr.cc8
-rw-r--r--tensorflow/core/BUILD101
-rw-r--r--tensorflow/core/api_def/api_test.cc39
-rw-r--r--tensorflow/core/api_def/base_api/api_def_BoostedTreesCenterBias.pbtxt41
-rw-r--r--tensorflow/core/api_def/base_api/api_def_BoostedTreesExampleDebugOutputs.pbtxt36
-rw-r--r--tensorflow/core/api_def/base_api/api_def_GatherNd.pbtxt2
-rw-r--r--tensorflow/core/api_def/base_api/api_def_IteratorFromStringHandleV2.pbtxt4
-rw-r--r--tensorflow/core/api_def/base_api/api_def_IteratorV2.pbtxt4
-rw-r--r--tensorflow/core/api_def/base_api/api_def_MatrixExponential.pbtxt2
-rw-r--r--tensorflow/core/api_def/base_api/api_def_MatrixLogarithm.pbtxt2
-rw-r--r--tensorflow/core/api_def/base_api/api_def_NonMaxSuppressionWithOverlaps.pbtxt62
-rw-r--r--tensorflow/core/api_def/base_api/api_def_ReduceJoin.pbtxt2
-rw-r--r--tensorflow/core/api_def/base_api/api_def_ScatterNdAdd.pbtxt6
-rw-r--r--tensorflow/core/api_def/base_api/api_def_ScatterNdNonAliasingAdd.pbtxt6
-rw-r--r--tensorflow/core/api_def/base_api/api_def_ScatterNdSub.pbtxt6
-rw-r--r--tensorflow/core/api_def/base_api/api_def_ScatterNdUpdate.pbtxt6
-rw-r--r--tensorflow/core/api_def/base_api/api_def_SinkDataset.pbtxt (renamed from tensorflow/core/api_def/base_api/api_def_IdentityDataset.pbtxt)2
-rw-r--r--tensorflow/core/api_def/base_api/api_def_SlideDataset.pbtxt9
-rw-r--r--tensorflow/core/api_def/base_api/api_def_Softmax.pbtxt2
-rw-r--r--tensorflow/core/api_def/base_api/api_def_SparseApplyAdagrad.pbtxt4
-rw-r--r--tensorflow/core/api_def/base_api/api_def_SparseApplyCenteredRMSProp.pbtxt6
-rw-r--r--tensorflow/core/api_def/base_api/api_def_SparseApplyFtrl.pbtxt10
-rw-r--r--tensorflow/core/api_def/base_api/api_def_SparseApplyMomentum.pbtxt4
-rw-r--r--tensorflow/core/api_def/base_api/api_def_SparseApplyProximalAdagrad.pbtxt8
-rw-r--r--tensorflow/core/api_def/base_api/api_def_SparseApplyProximalGradientDescent.pbtxt4
-rw-r--r--tensorflow/core/api_def/base_api/api_def_SparseApplyRMSProp.pbtxt6
-rw-r--r--tensorflow/core/api_def/base_api/api_def_StatefulPartitionedCall.pbtxt25
-rw-r--r--tensorflow/core/api_def/base_api/api_def_UnsortedSegmentSum.pbtxt2
-rw-r--r--tensorflow/core/api_def/base_api/api_def_WindowDataset.pbtxt11
-rw-r--r--tensorflow/core/api_def/python_api/api_def_Acos.pbtxt2
-rw-r--r--tensorflow/core/api_def/python_api/api_def_Acosh.pbtxt2
-rw-r--r--tensorflow/core/api_def/python_api/api_def_Add.pbtxt2
-rw-r--r--tensorflow/core/api_def/python_api/api_def_AsString.pbtxt2
-rw-r--r--tensorflow/core/api_def/python_api/api_def_Asin.pbtxt2
-rw-r--r--tensorflow/core/api_def/python_api/api_def_Asinh.pbtxt2
-rw-r--r--tensorflow/core/api_def/python_api/api_def_Atan.pbtxt2
-rw-r--r--tensorflow/core/api_def/python_api/api_def_Atan2.pbtxt2
-rw-r--r--tensorflow/core/api_def/python_api/api_def_Atanh.pbtxt2
-rw-r--r--tensorflow/core/api_def/python_api/api_def_BatchToSpaceND.pbtxt2
-rw-r--r--tensorflow/core/api_def/python_api/api_def_Betainc.pbtxt2
-rw-r--r--tensorflow/core/api_def/python_api/api_def_Ceil.pbtxt2
-rw-r--r--tensorflow/core/api_def/python_api/api_def_CheckNumerics.pbtxt2
-rw-r--r--tensorflow/core/api_def/python_api/api_def_Cholesky.pbtxt2
-rw-r--r--tensorflow/core/api_def/python_api/api_def_Cos.pbtxt2
-rw-r--r--tensorflow/core/api_def/python_api/api_def_Cosh.pbtxt2
-rw-r--r--tensorflow/core/api_def/python_api/api_def_Cross.pbtxt2
-rw-r--r--tensorflow/core/api_def/python_api/api_def_DecodeBase64.pbtxt2
-rw-r--r--tensorflow/core/api_def/python_api/api_def_DecodeCompressed.pbtxt2
-rw-r--r--tensorflow/core/api_def/python_api/api_def_DecodeJSONExample.pbtxt2
-rw-r--r--tensorflow/core/api_def/python_api/api_def_DecodeRaw.pbtxt2
-rw-r--r--tensorflow/core/api_def/python_api/api_def_Dequantize.pbtxt2
-rw-r--r--tensorflow/core/api_def/python_api/api_def_Diag.pbtxt2
-rw-r--r--tensorflow/core/api_def/python_api/api_def_DiagPart.pbtxt2
-rw-r--r--tensorflow/core/api_def/python_api/api_def_Digamma.pbtxt2
-rw-r--r--tensorflow/core/api_def/python_api/api_def_EncodeBase64.pbtxt2
-rw-r--r--tensorflow/core/api_def/python_api/api_def_Equal.pbtxt2
-rw-r--r--tensorflow/core/api_def/python_api/api_def_Erfc.pbtxt2
-rw-r--r--tensorflow/core/api_def/python_api/api_def_Exp.pbtxt2
-rw-r--r--tensorflow/core/api_def/python_api/api_def_Expm1.pbtxt2
-rw-r--r--tensorflow/core/api_def/python_api/api_def_ExtractImagePatches.pbtxt2
-rw-r--r--tensorflow/core/api_def/python_api/api_def_FFT.pbtxt2
-rw-r--r--tensorflow/core/api_def/python_api/api_def_FakeQuantWithMinMaxArgs.pbtxt2
-rw-r--r--tensorflow/core/api_def/python_api/api_def_FakeQuantWithMinMaxArgsGradient.pbtxt2
-rw-r--r--tensorflow/core/api_def/python_api/api_def_FakeQuantWithMinMaxVars.pbtxt2
-rw-r--r--tensorflow/core/api_def/python_api/api_def_FakeQuantWithMinMaxVarsGradient.pbtxt2
-rw-r--r--tensorflow/core/api_def/python_api/api_def_FakeQuantWithMinMaxVarsPerChannel.pbtxt2
-rw-r--r--tensorflow/core/api_def/python_api/api_def_FakeQuantWithMinMaxVarsPerChannelGradient.pbtxt2
-rw-r--r--tensorflow/core/api_def/python_api/api_def_Floor.pbtxt2
-rw-r--r--tensorflow/core/api_def/python_api/api_def_GatherNd.pbtxt2
-rw-r--r--tensorflow/core/api_def/python_api/api_def_Greater.pbtxt2
-rw-r--r--tensorflow/core/api_def/python_api/api_def_GreaterEqual.pbtxt2
-rw-r--r--tensorflow/core/api_def/python_api/api_def_IFFT.pbtxt2
-rw-r--r--tensorflow/core/api_def/python_api/api_def_Igamma.pbtxt2
-rw-r--r--tensorflow/core/api_def/python_api/api_def_Igammac.pbtxt2
-rw-r--r--tensorflow/core/api_def/python_api/api_def_InvertPermutation.pbtxt2
-rw-r--r--tensorflow/core/api_def/python_api/api_def_IsFinite.pbtxt2
-rw-r--r--tensorflow/core/api_def/python_api/api_def_IsInf.pbtxt2
-rw-r--r--tensorflow/core/api_def/python_api/api_def_IsNan.pbtxt2
-rw-r--r--tensorflow/core/api_def/python_api/api_def_Less.pbtxt2
-rw-r--r--tensorflow/core/api_def/python_api/api_def_LessEqual.pbtxt2
-rw-r--r--tensorflow/core/api_def/python_api/api_def_Lgamma.pbtxt2
-rw-r--r--tensorflow/core/api_def/python_api/api_def_Log.pbtxt2
-rw-r--r--tensorflow/core/api_def/python_api/api_def_Log1p.pbtxt2
-rw-r--r--tensorflow/core/api_def/python_api/api_def_LogicalAnd.pbtxt2
-rw-r--r--tensorflow/core/api_def/python_api/api_def_LogicalNot.pbtxt2
-rw-r--r--tensorflow/core/api_def/python_api/api_def_LogicalOr.pbtxt2
-rw-r--r--tensorflow/core/api_def/python_api/api_def_MatchingFiles.pbtxt2
-rw-r--r--tensorflow/core/api_def/python_api/api_def_MatrixBandPart.pbtxt2
-rw-r--r--tensorflow/core/api_def/python_api/api_def_MatrixDeterminant.pbtxt2
-rw-r--r--tensorflow/core/api_def/python_api/api_def_MatrixDiag.pbtxt2
-rw-r--r--tensorflow/core/api_def/python_api/api_def_MatrixDiagPart.pbtxt2
-rw-r--r--tensorflow/core/api_def/python_api/api_def_MatrixInverse.pbtxt2
-rw-r--r--tensorflow/core/api_def/python_api/api_def_MatrixSetDiag.pbtxt2
-rw-r--r--tensorflow/core/api_def/python_api/api_def_MatrixSolve.pbtxt2
-rw-r--r--tensorflow/core/api_def/python_api/api_def_MatrixTriangularSolve.pbtxt2
-rw-r--r--tensorflow/core/api_def/python_api/api_def_Maximum.pbtxt2
-rw-r--r--tensorflow/core/api_def/python_api/api_def_Minimum.pbtxt2
-rw-r--r--tensorflow/core/api_def/python_api/api_def_NonMaxSuppressionWithOverlaps.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_NotEqual.pbtxt2
-rw-r--r--tensorflow/core/api_def/python_api/api_def_ParseTensor.pbtxt2
-rw-r--r--tensorflow/core/api_def/python_api/api_def_Polygamma.pbtxt2
-rw-r--r--tensorflow/core/api_def/python_api/api_def_Qr.pbtxt2
-rw-r--r--tensorflow/core/api_def/python_api/api_def_QuantizedConcat.pbtxt2
-rw-r--r--tensorflow/core/api_def/python_api/api_def_ReadFile.pbtxt2
-rw-r--r--tensorflow/core/api_def/python_api/api_def_Reciprocal.pbtxt2
-rw-r--r--tensorflow/core/api_def/python_api/api_def_RegexReplace.pbtxt2
-rw-r--r--tensorflow/core/api_def/python_api/api_def_Reshape.pbtxt2
-rw-r--r--tensorflow/core/api_def/python_api/api_def_ReverseV2.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_Rint.pbtxt2
-rw-r--r--tensorflow/core/api_def/python_api/api_def_Rsqrt.pbtxt2
-rw-r--r--tensorflow/core/api_def/python_api/api_def_ScatterNd.pbtxt2
-rw-r--r--tensorflow/core/api_def/python_api/api_def_SegmentMax.pbtxt2
-rw-r--r--tensorflow/core/api_def/python_api/api_def_SegmentMean.pbtxt2
-rw-r--r--tensorflow/core/api_def/python_api/api_def_SegmentMin.pbtxt2
-rw-r--r--tensorflow/core/api_def/python_api/api_def_SegmentProd.pbtxt2
-rw-r--r--tensorflow/core/api_def/python_api/api_def_SegmentSum.pbtxt2
-rw-r--r--tensorflow/core/api_def/python_api/api_def_Sin.pbtxt2
-rw-r--r--tensorflow/core/api_def/python_api/api_def_Sinh.pbtxt2
-rw-r--r--tensorflow/core/api_def/python_api/api_def_SpaceToBatchND.pbtxt2
-rw-r--r--tensorflow/core/api_def/python_api/api_def_SquaredDifference.pbtxt2
-rw-r--r--tensorflow/core/api_def/python_api/api_def_StatefulPartitionedCall.pbtxt1
-rw-r--r--tensorflow/core/api_def/python_api/api_def_StringJoin.pbtxt2
-rw-r--r--tensorflow/core/api_def/python_api/api_def_StringStrip.pbtxt2
-rw-r--r--tensorflow/core/api_def/python_api/api_def_StringToHashBucket.pbtxt2
-rw-r--r--tensorflow/core/api_def/python_api/api_def_StringToHashBucketFast.pbtxt2
-rw-r--r--tensorflow/core/api_def/python_api/api_def_StringToHashBucketStrong.pbtxt2
-rw-r--r--tensorflow/core/api_def/python_api/api_def_StringToNumber.pbtxt2
-rw-r--r--tensorflow/core/api_def/python_api/api_def_Substr.pbtxt2
-rw-r--r--tensorflow/core/api_def/python_api/api_def_Tan.pbtxt2
-rw-r--r--tensorflow/core/api_def/python_api/api_def_Tile.pbtxt2
-rw-r--r--tensorflow/core/api_def/python_api/api_def_UnsortedSegmentMax.pbtxt2
-rw-r--r--tensorflow/core/api_def/python_api/api_def_UnsortedSegmentMin.pbtxt2
-rw-r--r--tensorflow/core/api_def/python_api/api_def_UnsortedSegmentProd.pbtxt2
-rw-r--r--tensorflow/core/api_def/python_api/api_def_UnsortedSegmentSum.pbtxt2
-rw-r--r--tensorflow/core/api_def/python_api/api_def_WriteFile.pbtxt2
-rw-r--r--tensorflow/core/api_def/python_api/api_def_Zeta.pbtxt2
-rw-r--r--tensorflow/core/common_runtime/base_collective_executor.h8
-rw-r--r--tensorflow/core/common_runtime/broadcaster.cc4
-rw-r--r--tensorflow/core/common_runtime/broadcaster_test.cc4
-rw-r--r--tensorflow/core/common_runtime/collective_param_resolver_local.cc68
-rw-r--r--tensorflow/core/common_runtime/collective_param_resolver_local.h24
-rw-r--r--tensorflow/core/common_runtime/collective_param_resolver_local_test.cc72
-rw-r--r--tensorflow/core/common_runtime/collective_rma_local.cc14
-rw-r--r--tensorflow/core/common_runtime/collective_rma_local.h2
-rw-r--r--tensorflow/core/common_runtime/collective_rma_local_test.cc2
-rw-r--r--tensorflow/core/common_runtime/copy_tensor.cc19
-rw-r--r--tensorflow/core/common_runtime/copy_tensor.h15
-rw-r--r--tensorflow/core/common_runtime/direct_session.cc12
-rw-r--r--tensorflow/core/common_runtime/direct_session_test.cc339
-rw-r--r--tensorflow/core/common_runtime/direct_session_with_tracking_alloc_test.cc14
-rw-r--r--tensorflow/core/common_runtime/eager/context.cc53
-rw-r--r--tensorflow/core/common_runtime/eager/context.h12
-rw-r--r--tensorflow/core/common_runtime/eager/execute.cc88
-rw-r--r--tensorflow/core/common_runtime/eager/kernel_and_device.cc14
-rw-r--r--tensorflow/core/common_runtime/eager/kernel_and_device.h3
-rw-r--r--tensorflow/core/common_runtime/eager/kernel_and_device_test.cc8
-rw-r--r--tensorflow/core/common_runtime/eager/tensor_handle.cc57
-rw-r--r--tensorflow/core/common_runtime/eager/tensor_handle.h39
-rw-r--r--tensorflow/core/common_runtime/executor.cc23
-rw-r--r--tensorflow/core/common_runtime/executor.h3
-rw-r--r--tensorflow/core/common_runtime/function.cc39
-rw-r--r--tensorflow/core/common_runtime/gpu/cuda_host_allocator.h60
-rw-r--r--tensorflow/core/common_runtime/gpu/gpu_device.cc34
-rw-r--r--tensorflow/core/common_runtime/gpu/gpu_device.h3
-rw-r--r--tensorflow/core/common_runtime/gpu/gpu_device_factory.cc7
-rw-r--r--tensorflow/core/common_runtime/gpu/gpu_device_test.cc4
-rw-r--r--tensorflow/core/common_runtime/gpu/gpu_process_state.cc (renamed from tensorflow/core/common_runtime/gpu/process_state.cc)170
-rw-r--r--tensorflow/core/common_runtime/gpu/gpu_process_state.h (renamed from tensorflow/core/common_runtime/gpu/process_state.h)90
-rw-r--r--tensorflow/core/common_runtime/gpu/gpu_util.cc18
-rw-r--r--tensorflow/core/common_runtime/gpu/gpu_util.h12
-rw-r--r--tensorflow/core/common_runtime/gpu/pool_allocator_test.cc6
-rw-r--r--tensorflow/core/common_runtime/gpu_device_context.h15
-rw-r--r--tensorflow/core/common_runtime/graph_execution_state.cc176
-rw-r--r--tensorflow/core/common_runtime/placer.cc90
-rw-r--r--tensorflow/core/common_runtime/placer.h2
-rw-r--r--tensorflow/core/common_runtime/placer_test.cc84
-rw-r--r--tensorflow/core/common_runtime/pool_allocator.cc (renamed from tensorflow/core/common_runtime/gpu/pool_allocator.cc)10
-rw-r--r--tensorflow/core/common_runtime/pool_allocator.h (renamed from tensorflow/core/common_runtime/gpu/pool_allocator.h)53
-rw-r--r--tensorflow/core/common_runtime/process_state.cc129
-rw-r--r--tensorflow/core/common_runtime/process_state.h132
-rw-r--r--tensorflow/core/common_runtime/rendezvous_mgr.cc2
-rw-r--r--tensorflow/core/common_runtime/ring_reducer.cc19
-rw-r--r--tensorflow/core/common_runtime/ring_reducer_test.cc4
-rw-r--r--tensorflow/core/common_runtime/test_collective_executor_mgr.h3
-rw-r--r--tensorflow/core/debug/BUILD55
-rw-r--r--tensorflow/core/debug/debug_gateway.cc122
-rw-r--r--tensorflow/core/debug/debug_gateway.h83
-rw-r--r--tensorflow/core/debug/debug_gateway_test.cc1011
-rw-r--r--tensorflow/core/distributed_runtime/BUILD2
-rw-r--r--tensorflow/core/distributed_runtime/base_rendezvous_mgr.cc2
-rw-r--r--tensorflow/core/distributed_runtime/collective_param_resolver_distributed.cc27
-rw-r--r--tensorflow/core/distributed_runtime/collective_rma_distributed.cc11
-rw-r--r--tensorflow/core/distributed_runtime/collective_rma_distributed.h1
-rw-r--r--tensorflow/core/distributed_runtime/collective_rma_distributed_test.cc6
-rw-r--r--tensorflow/core/distributed_runtime/eager/BUILD1
-rw-r--r--tensorflow/core/distributed_runtime/eager/eager_service_impl.cc31
-rw-r--r--tensorflow/core/distributed_runtime/eager/eager_service_impl.h3
-rw-r--r--tensorflow/core/distributed_runtime/eager/eager_service_impl_test.cc5
-rw-r--r--tensorflow/core/distributed_runtime/eager/remote_execute_node.h34
-rw-r--r--tensorflow/core/distributed_runtime/graph_mgr.cc2
-rw-r--r--tensorflow/core/distributed_runtime/rpc/grpc_channel.cc8
-rw-r--r--tensorflow/core/distributed_runtime/rpc/grpc_channel_test.cc5
-rw-r--r--tensorflow/core/distributed_runtime/rpc/grpc_server_lib.cc22
-rw-r--r--tensorflow/core/distributed_runtime/rpc/grpc_server_lib.h2
-rw-r--r--tensorflow/core/distributed_runtime/rpc/grpc_session_test.cc33
-rw-r--r--tensorflow/core/distributed_runtime/rpc_collective_executor_mgr.cc26
-rw-r--r--tensorflow/core/distributed_runtime/rpc_collective_executor_mgr.h6
-rw-r--r--tensorflow/core/distributed_runtime/rpc_collective_executor_mgr_test.cc47
-rw-r--r--tensorflow/core/framework/api_def.proto12
-rw-r--r--tensorflow/core/framework/collective.h1
-rw-r--r--tensorflow/core/framework/common_shape_fns.cc12
-rw-r--r--tensorflow/core/framework/common_shape_fns.h17
-rw-r--r--tensorflow/core/framework/graph_to_functiondef.cc2
-rw-r--r--tensorflow/core/framework/kernel_def_util.cc83
-rw-r--r--tensorflow/core/framework/kernel_def_util.h31
-rw-r--r--tensorflow/core/framework/kernel_def_util_test.cc133
-rw-r--r--tensorflow/core/framework/memory_types.cc11
-rw-r--r--tensorflow/core/framework/op_kernel.cc69
-rw-r--r--tensorflow/core/framework/op_kernel.h3
-rw-r--r--tensorflow/core/framework/resource_op_kernel.h25
-rw-r--r--tensorflow/core/framework/stats_aggregator.h4
-rw-r--r--tensorflow/core/framework/types.h4
-rw-r--r--tensorflow/core/graph/tensor_id.h6
-rw-r--r--tensorflow/core/grappler/costs/BUILD1
-rw-r--r--tensorflow/core/grappler/costs/graph_properties.cc173
-rw-r--r--tensorflow/core/grappler/costs/graph_properties_test.cc248
-rw-r--r--tensorflow/core/grappler/costs/graph_properties_testdata/function_error.pbtxt117
-rw-r--r--tensorflow/core/grappler/costs/graph_properties_testdata/function_switch.pbtxt251
-rw-r--r--tensorflow/core/grappler/costs/graph_properties_testdata/function_switch_2.pbtxt251
-rw-r--r--tensorflow/core/grappler/costs/graph_properties_testdata/function_switch_shapes.pbtxt317
-rw-r--r--tensorflow/core/grappler/costs/graph_properties_testdata/large_function_graph.pbtxt597
-rw-r--r--tensorflow/core/grappler/optimizers/BUILD3
-rw-r--r--tensorflow/core/grappler/optimizers/arithmetic_optimizer.cc240
-rw-r--r--tensorflow/core/grappler/optimizers/arithmetic_optimizer.h1
-rw-r--r--tensorflow/core/grappler/optimizers/arithmetic_optimizer_test.cc66
-rw-r--r--tensorflow/core/grappler/optimizers/data/BUILD69
-rw-r--r--tensorflow/core/grappler/optimizers/data/function_rename.cc51
-rw-r--r--tensorflow/core/grappler/optimizers/data/function_rename.h46
-rw-r--r--tensorflow/core/grappler/optimizers/data/function_rename_test.cc42
-rw-r--r--tensorflow/core/grappler/optimizers/data/graph_utils.cc8
-rw-r--r--tensorflow/core/grappler/optimizers/data/graph_utils.h5
-rw-r--r--tensorflow/core/grappler/optimizers/data/graph_utils_test.cc46
-rw-r--r--tensorflow/core/grappler/optimizers/data/map_and_batch_fusion.cc10
-rw-r--r--tensorflow/core/grappler/optimizers/data/noop_elimination.cc90
-rw-r--r--tensorflow/core/grappler/optimizers/data/noop_elimination.h48
-rw-r--r--tensorflow/core/grappler/optimizers/data/noop_elimination_test.cc217
-rw-r--r--tensorflow/core/grappler/optimizers/data/shuffle_and_repeat_fusion.cc10
-rw-r--r--tensorflow/core/grappler/optimizers/meta_optimizer.cc7
-rw-r--r--tensorflow/core/grappler/optimizers/scoped_allocator_optimizer.cc3
-rw-r--r--tensorflow/core/grappler/optimizers/scoped_allocator_optimizer.h3
-rw-r--r--tensorflow/core/grappler/optimizers/scoped_allocator_optimizer_test.cc4
-rw-r--r--tensorflow/core/grappler/utils/scc.cc12
-rw-r--r--tensorflow/core/kernels/BUILD82
-rw-r--r--tensorflow/core/kernels/boosted_trees/BUILD8
-rw-r--r--tensorflow/core/kernels/boosted_trees/boosted_trees.proto17
-rw-r--r--tensorflow/core/kernels/boosted_trees/prediction_ops.cc153
-rw-r--r--tensorflow/core/kernels/boosted_trees/resources.cc23
-rw-r--r--tensorflow/core/kernels/boosted_trees/resources.h6
-rw-r--r--tensorflow/core/kernels/boosted_trees/stats_ops.cc41
-rw-r--r--tensorflow/core/kernels/boosted_trees/training_ops.cc85
-rw-r--r--tensorflow/core/kernels/boosted_trees/tree_helper.h69
-rw-r--r--tensorflow/core/kernels/concat_op.cc2
-rw-r--r--tensorflow/core/kernels/constant_op.cc3
-rw-r--r--tensorflow/core/kernels/conv_ops_fused.cc12
-rw-r--r--tensorflow/core/kernels/conv_ops_test.cc180
-rw-r--r--tensorflow/core/kernels/ctc_loss_op.cc6
-rw-r--r--tensorflow/core/kernels/data/BUILD24
-rw-r--r--tensorflow/core/kernels/data/captured_function.cc12
-rw-r--r--tensorflow/core/kernels/data/dense_to_sparse_batch_dataset_op.cc4
-rw-r--r--tensorflow/core/kernels/data/generator_dataset_op.cc3
-rw-r--r--tensorflow/core/kernels/data/group_by_reducer_dataset_op.cc1
-rw-r--r--tensorflow/core/kernels/data/identity_dataset_op.cc102
-rw-r--r--tensorflow/core/kernels/data/iterator_ops.cc68
-rw-r--r--tensorflow/core/kernels/data/optimize_dataset_op.cc92
-rw-r--r--tensorflow/core/kernels/data/prefetch_dataset_op.cc7
-rw-r--r--tensorflow/core/kernels/data/slide_dataset_op.cc157
-rw-r--r--tensorflow/core/kernels/data/sparse_tensor_slice_dataset_op.cc10
-rw-r--r--tensorflow/core/kernels/data/stats_aggregator_ops.cc29
-rw-r--r--tensorflow/core/kernels/data/stats_dataset_ops.cc13
-rw-r--r--tensorflow/core/kernels/data/window_dataset.cc1
-rw-r--r--tensorflow/core/kernels/data/window_dataset.h2
-rw-r--r--tensorflow/core/kernels/data/window_dataset_op.cc196
-rw-r--r--tensorflow/core/kernels/dense_update_ops.cc2
-rw-r--r--tensorflow/core/kernels/deserialize_sparse_string_op.cc296
-rw-r--r--tensorflow/core/kernels/deserialize_sparse_variant_op.cc372
-rw-r--r--tensorflow/core/kernels/edit_distance_op.cc13
-rw-r--r--tensorflow/core/kernels/fifo_queue.cc15
-rw-r--r--tensorflow/core/kernels/fifo_queue.h23
-rw-r--r--tensorflow/core/kernels/fifo_queue_op.cc39
-rw-r--r--tensorflow/core/kernels/function_ops.cc48
-rw-r--r--tensorflow/core/kernels/initializable_lookup_table.h2
-rw-r--r--tensorflow/core/kernels/mkl_aggregate_ops.cc6
-rw-r--r--tensorflow/core/kernels/mkl_concat_op.cc10
-rw-r--r--tensorflow/core/kernels/mkl_conv_grad_filter_ops.cc659
-rw-r--r--tensorflow/core/kernels/mkl_conv_grad_input_ops.cc476
-rw-r--r--tensorflow/core/kernels/mkl_conv_ops.cc193
-rw-r--r--tensorflow/core/kernels/mkl_conv_ops.h222
-rw-r--r--tensorflow/core/kernels/mkl_lrn_op.cc10
-rw-r--r--tensorflow/core/kernels/mkl_reshape_op.cc5
-rw-r--r--tensorflow/core/kernels/mkl_tfconv_op.h4
-rw-r--r--tensorflow/core/kernels/non_max_suppression_op.cc182
-rw-r--r--tensorflow/core/kernels/non_max_suppression_op_test.cc237
-rw-r--r--tensorflow/core/kernels/pad_op.cc4
-rw-r--r--tensorflow/core/kernels/pad_op_gpu.cu.cc2
-rw-r--r--tensorflow/core/kernels/partitioned_function_ops.cc295
-rw-r--r--tensorflow/core/kernels/quantize_and_dequantize_op.h8
-rw-r--r--tensorflow/core/kernels/queue_op.cc367
-rw-r--r--tensorflow/core/kernels/queue_op.h233
-rw-r--r--tensorflow/core/kernels/queue_ops.cc395
-rw-r--r--tensorflow/core/kernels/reshape_util.cc17
-rw-r--r--tensorflow/core/kernels/resource_variable_ops.cc29
-rw-r--r--tensorflow/core/kernels/resource_variable_ops.h9
-rw-r--r--tensorflow/core/kernels/roll_op.cc3
-rw-r--r--tensorflow/core/kernels/sdca_internal.cc1
-rw-r--r--tensorflow/core/kernels/sdca_internal.h2
-rw-r--r--tensorflow/core/kernels/segment_reduction_ops.h6
-rw-r--r--tensorflow/core/kernels/sendrecv_ops.cc1
-rw-r--r--tensorflow/core/kernels/serialize_sparse_op.cc271
-rw-r--r--tensorflow/core/kernels/set_kernels.cc44
-rw-r--r--tensorflow/core/kernels/sparse_concat_op.cc9
-rw-r--r--tensorflow/core/kernels/sparse_reduce_op.cc12
-rw-r--r--tensorflow/core/kernels/sparse_reorder_op.cc13
-rw-r--r--tensorflow/core/kernels/sparse_slice_grad_op.cc1
-rw-r--r--tensorflow/core/kernels/sparse_slice_op.cc7
-rw-r--r--tensorflow/core/kernels/sparse_softmax_op.cc7
-rw-r--r--tensorflow/core/kernels/sparse_split_op.cc14
-rw-r--r--tensorflow/core/kernels/sparse_tensors_map_ops.cc36
-rw-r--r--tensorflow/core/kernels/sparse_to_dense_op.cc6
-rw-r--r--tensorflow/core/kernels/tensor_array_ops.cc1
-rw-r--r--tensorflow/core/kernels/unary_ops_composition.cc432
-rw-r--r--tensorflow/core/kernels/unary_ops_composition_test.cc179
-rw-r--r--tensorflow/core/kernels/variable_ops.cc3
-rw-r--r--tensorflow/core/lib/bfloat16/bfloat16.h12
-rw-r--r--tensorflow/core/lib/db/sqlite_test.cc2
-rw-r--r--tensorflow/core/lib/gtl/manual_constructor_test.cc3
-rw-r--r--tensorflow/core/ops/boosted_trees_ops.cc47
-rw-r--r--tensorflow/core/ops/compat/ops_history.v1.pbtxt1414
-rw-r--r--tensorflow/core/ops/dataset_ops.cc39
-rw-r--r--tensorflow/core/ops/debug_ops.cc2
-rw-r--r--tensorflow/core/ops/functional_ops.cc26
-rw-r--r--tensorflow/core/ops/image_ops.cc32
-rw-r--r--tensorflow/core/ops/math_ops.cc11
-rw-r--r--tensorflow/core/ops/nn_ops.cc4
-rw-r--r--tensorflow/core/ops/ops.pbtxt314
-rw-r--r--tensorflow/core/platform/cloud/gcs_file_system.cc8
-rw-r--r--tensorflow/core/platform/default/build_config.bzl8
-rw-r--r--tensorflow/core/platform/default/build_config/BUILD12
-rw-r--r--tensorflow/core/platform/env.h2
-rw-r--r--tensorflow/core/platform/numa.h62
-rw-r--r--tensorflow/core/platform/numa_test.cc61
-rw-r--r--tensorflow/core/platform/posix/port.cc24
-rw-r--r--tensorflow/core/platform/profile_utils/cpu_utils.cc37
-rw-r--r--tensorflow/core/platform/s3/BUILD14
-rw-r--r--tensorflow/core/platform/s3/aws_crypto.cc113
-rw-r--r--tensorflow/core/platform/s3/aws_crypto.h35
-rw-r--r--tensorflow/core/platform/s3/s3_file_system.cc6
-rw-r--r--tensorflow/core/platform/vmodule_benchmark_test.cc (renamed from tensorflow/contrib/lite/java/src/main/native/duration_utils_jni.cc)28
-rw-r--r--tensorflow/core/platform/vmodule_test.cc117
-rw-r--r--tensorflow/core/protobuf/config.proto71
-rw-r--r--tensorflow/core/protobuf/debug.proto4
-rw-r--r--tensorflow/core/protobuf/eager_service.proto7
-rw-r--r--tensorflow/core/protobuf/tensorflow_server.proto2
-rw-r--r--tensorflow/core/util/device_name_utils.cc57
-rw-r--r--tensorflow/core/util/device_name_utils.h12
-rw-r--r--tensorflow/core/util/device_name_utils_test.cc47
-rw-r--r--tensorflow/core/util/mkl_util.h168
-rw-r--r--tensorflow/core/util/saved_tensor_slice_util.h1
-rw-r--r--tensorflow/core/util/sparse/dim_comparator.h16
-rw-r--r--tensorflow/core/util/sparse/group_iterator.h6
-rw-r--r--tensorflow/core/util/sparse/sparse_tensor.h196
-rw-r--r--tensorflow/core/util/sparse/sparse_tensor_test.cc91
-rw-r--r--tensorflow/core/util/status_util.h36
-rw-r--r--tensorflow/core/util/status_util_test.cc36
-rw-r--r--tensorflow/core/util/tensor_format.cc2
-rw-r--r--tensorflow/docs_src/api_guides/python/spectral_ops.md1
-rw-r--r--tensorflow/docs_src/deploy/s3.md2
-rw-r--r--tensorflow/docs_src/extend/index.md3
-rw-r--r--tensorflow/docs_src/extend/new_data_formats.md60
-rw-r--r--tensorflow/docs_src/get_started/eager.md3
-rw-r--r--tensorflow/docs_src/get_started/index.md29
-rw-r--r--tensorflow/docs_src/get_started/leftnav_files10
-rw-r--r--tensorflow/docs_src/guide/autograph.md3
-rw-r--r--tensorflow/docs_src/guide/datasets_for_estimators.md6
-rw-r--r--tensorflow/docs_src/guide/debugger.md1
-rw-r--r--tensorflow/docs_src/guide/eager.md9
-rw-r--r--tensorflow/docs_src/guide/feature_columns.md6
-rw-r--r--tensorflow/docs_src/guide/graph_viz.md3
-rw-r--r--tensorflow/docs_src/guide/graphs.md2
-rw-r--r--tensorflow/docs_src/guide/index.md15
-rw-r--r--tensorflow/docs_src/guide/keras.md24
-rw-r--r--tensorflow/docs_src/guide/leftnav_files5
-rw-r--r--tensorflow/docs_src/guide/saved_model.md9
-rw-r--r--tensorflow/docs_src/guide/tensorboard_histograms.md4
-rw-r--r--tensorflow/docs_src/guide/version_compat.md6
-rw-r--r--tensorflow/docs_src/install/index.md31
-rw-r--r--tensorflow/docs_src/install/install_c.md2
-rw-r--r--tensorflow/docs_src/install/install_go.md2
-rw-r--r--tensorflow/docs_src/install/install_java.md2
-rw-r--r--tensorflow/docs_src/install/install_linux.md404
-rw-r--r--tensorflow/docs_src/install/install_mac.md5
-rw-r--r--tensorflow/docs_src/install/install_raspbian.md4
-rw-r--r--tensorflow/docs_src/install/install_sources.md345
-rw-r--r--tensorflow/docs_src/install/install_windows.md4
-rw-r--r--tensorflow/docs_src/install/migration.md3
-rw-r--r--tensorflow/docs_src/javascript/index.md5
-rw-r--r--tensorflow/docs_src/javascript/leftnav_files1
-rw-r--r--tensorflow/docs_src/mobile/leftnav_files1
-rw-r--r--tensorflow/docs_src/mobile/mobile_intro.md3
-rw-r--r--tensorflow/docs_src/mobile/tflite/demo_android.md2
-rw-r--r--tensorflow/docs_src/mobile/tflite/devguide.md9
-rw-r--r--tensorflow/docs_src/mobile/tflite/performance.md174
-rw-r--r--tensorflow/docs_src/performance/xla/operation_semantics.md41
-rw-r--r--tensorflow/docs_src/tutorials/_index.yaml (renamed from tensorflow/docs_src/get_started/_index.yaml)57
-rw-r--r--tensorflow/docs_src/tutorials/_toc.yaml103
-rw-r--r--tensorflow/docs_src/tutorials/eager/custom_training_walkthrough.md3
-rw-r--r--tensorflow/docs_src/tutorials/eager/index.md13
-rw-r--r--tensorflow/docs_src/tutorials/estimators/cnn.md (renamed from tensorflow/docs_src/tutorials/layers.md)47
-rw-r--r--tensorflow/docs_src/tutorials/estimators/linear.md3
-rw-r--r--tensorflow/docs_src/tutorials/image_retraining.md4
-rw-r--r--tensorflow/docs_src/tutorials/images/deep_cnn.md (renamed from tensorflow/docs_src/tutorials/deep_cnn.md)22
-rw-r--r--tensorflow/docs_src/tutorials/images/image_recognition.md (renamed from tensorflow/docs_src/tutorials/image_recognition.md)3
-rw-r--r--tensorflow/docs_src/tutorials/index.md59
-rw-r--r--tensorflow/docs_src/tutorials/keras/basic_classification.md (renamed from tensorflow/docs_src/get_started/basic_classification.md)2
-rw-r--r--tensorflow/docs_src/tutorials/keras/basic_regression.md (renamed from tensorflow/docs_src/get_started/basic_regression.md)2
-rw-r--r--tensorflow/docs_src/tutorials/keras/basic_text_classification.md (renamed from tensorflow/docs_src/get_started/basic_text_classification.md)2
-rw-r--r--tensorflow/docs_src/tutorials/keras/index.md22
-rw-r--r--tensorflow/docs_src/tutorials/keras/overfit_and_underfit.md (renamed from tensorflow/docs_src/get_started/overfit_and_underfit.md)2
-rw-r--r--tensorflow/docs_src/tutorials/keras/save_and_restore_models.md (renamed from tensorflow/docs_src/get_started/save_and_restore_models.md)2
-rw-r--r--tensorflow/docs_src/tutorials/leftnav_files23
-rw-r--r--tensorflow/docs_src/tutorials/next_steps.md (renamed from tensorflow/docs_src/get_started/next_steps.md)2
-rw-r--r--[-rwxr-xr-x]tensorflow/docs_src/tutorials/non-ml/mandelbrot.md (renamed from tensorflow/docs_src/tutorials/mandelbrot.md)0
-rw-r--r--[-rwxr-xr-x]tensorflow/docs_src/tutorials/non-ml/pdes.md (renamed from tensorflow/docs_src/tutorials/pdes.md)3
-rw-r--r--tensorflow/docs_src/tutorials/representation/kernel_methods.md (renamed from tensorflow/docs_src/tutorials/kernel_methods.md)2
-rw-r--r--tensorflow/docs_src/tutorials/representation/linear.md (renamed from tensorflow/docs_src/tutorials/linear.md)10
-rw-r--r--tensorflow/docs_src/tutorials/representation/word2vec.md (renamed from tensorflow/docs_src/tutorials/word2vec.md)10
-rw-r--r--tensorflow/docs_src/tutorials/seq2seq.md5
-rw-r--r--tensorflow/docs_src/tutorials/sequences/audio_recognition.md (renamed from tensorflow/docs_src/tutorials/audio_recognition.md)0
-rw-r--r--tensorflow/docs_src/tutorials/sequences/recurrent.md (renamed from tensorflow/docs_src/tutorials/recurrent.md)4
-rw-r--r--tensorflow/docs_src/tutorials/sequences/recurrent_quickdraw.md (renamed from tensorflow/docs_src/tutorials/recurrent_quickdraw.md)4
-rw-r--r--tensorflow/docs_src/tutorials/wide.md461
-rw-r--r--tensorflow/docs_src/tutorials/wide_and_deep.md243
-rw-r--r--tensorflow/examples/speech_commands/BUILD1
-rw-r--r--tensorflow/examples/speech_commands/freeze.py64
-rw-r--r--tensorflow/examples/speech_commands/freeze_test.py54
-rw-r--r--tensorflow/examples/speech_commands/generate_streaming_test_wav.py10
-rw-r--r--tensorflow/examples/speech_commands/input_data.py135
-rw-r--r--tensorflow/examples/speech_commands/input_data_test.py87
-rw-r--r--tensorflow/examples/speech_commands/models.py302
-rw-r--r--tensorflow/examples/speech_commands/models_test.py40
-rw-r--r--tensorflow/examples/speech_commands/train.py58
-rw-r--r--tensorflow/go/attrs_test.go4
-rw-r--r--tensorflow/go/graph.go14
-rw-r--r--tensorflow/go/op/scope.go31
-rw-r--r--tensorflow/go/op/scope_test.go15
-rw-r--r--tensorflow/go/op/wrappers.go1292
-rw-r--r--tensorflow/go/operation.go6
-rw-r--r--tensorflow/go/operation_test.go23
-rw-r--r--tensorflow/java/maven/hadoop/pom.xml196
-rw-r--r--tensorflow/java/maven/libtensorflow/pom.xml2
-rw-r--r--tensorflow/java/maven/libtensorflow_jni/pom.xml2
-rw-r--r--tensorflow/java/maven/libtensorflow_jni_gpu/pom.xml2
-rw-r--r--tensorflow/java/maven/pom.xml2
-rw-r--r--tensorflow/java/maven/proto/pom.xml2
-rw-r--r--tensorflow/java/maven/run_inside_container.sh5
-rw-r--r--tensorflow/java/maven/spark-connector/pom.xml355
-rw-r--r--tensorflow/java/maven/tensorflow/pom.xml2
-rw-r--r--tensorflow/java/src/gen/cc/java_defs.h2
-rw-r--r--tensorflow/java/src/gen/cc/op_generator.cc29
-rw-r--r--tensorflow/java/src/gen/cc/op_generator.h2
-rw-r--r--tensorflow/java/src/gen/cc/op_specs.cc148
-rw-r--r--tensorflow/java/src/gen/cc/op_specs.h40
-rw-r--r--tensorflow/java/src/gen/java/org/tensorflow/processor/OperatorProcessor.java296
-rw-r--r--tensorflow/java/src/main/java/org/tensorflow/Graph.java79
-rw-r--r--tensorflow/java/src/main/java/org/tensorflow/Input.java48
-rw-r--r--tensorflow/java/src/main/java/org/tensorflow/SavedModelBundle.java73
-rw-r--r--tensorflow/java/src/main/java/org/tensorflow/op/core/Gradients.java153
-rw-r--r--tensorflow/java/src/main/java/org/tensorflow/types/TFBool.java30
-rw-r--r--tensorflow/java/src/main/java/org/tensorflow/types/TFDouble.java30
-rw-r--r--tensorflow/java/src/main/java/org/tensorflow/types/TFFloat.java30
-rw-r--r--tensorflow/java/src/main/java/org/tensorflow/types/TFInt32.java30
-rw-r--r--tensorflow/java/src/main/java/org/tensorflow/types/TFInt64.java30
-rw-r--r--tensorflow/java/src/main/java/org/tensorflow/types/TFString.java27
-rw-r--r--tensorflow/java/src/main/java/org/tensorflow/types/TFType.java20
-rw-r--r--tensorflow/java/src/main/java/org/tensorflow/types/TFUInt8.java30
-rw-r--r--tensorflow/java/src/main/java/org/tensorflow/types/Types.java52
-rw-r--r--tensorflow/java/src/main/native/graph_jni.cc54
-rw-r--r--tensorflow/java/src/main/native/graph_jni.h9
-rw-r--r--tensorflow/java/src/main/native/saved_model_bundle_jni.cc15
-rw-r--r--tensorflow/java/src/main/native/saved_model_bundle_jni.h4
-rw-r--r--tensorflow/java/src/main/native/session_jni.cc42
-rw-r--r--tensorflow/java/src/main/native/utils_jni.cc53
-rw-r--r--tensorflow/java/src/main/native/utils_jni.h33
-rw-r--r--tensorflow/java/src/test/java/org/tensorflow/GraphTest.java103
-rw-r--r--tensorflow/java/src/test/java/org/tensorflow/SavedModelBundleTest.java56
-rw-r--r--tensorflow/java/src/test/java/org/tensorflow/SessionTest.java38
-rw-r--r--tensorflow/java/src/test/java/org/tensorflow/TestUtil.java34
-rw-r--r--tensorflow/python/BUILD77
-rw-r--r--tensorflow/python/client/session.py4
-rw-r--r--tensorflow/python/client/session_test.py69
-rw-r--r--tensorflow/python/compat/BUILD22
-rw-r--r--tensorflow/python/compat/compat.py125
-rw-r--r--tensorflow/python/compat/compat_test.py70
-rw-r--r--tensorflow/python/data/kernel_tests/BUILD1
-rw-r--r--tensorflow/python/data/kernel_tests/batch_dataset_op_test.py57
-rw-r--r--tensorflow/python/data/kernel_tests/iterator_ops_test.py64
-rw-r--r--tensorflow/python/data/kernel_tests/map_dataset_op_test.py7
-rw-r--r--tensorflow/python/data/ops/BUILD2
-rw-r--r--tensorflow/python/data/ops/dataset_ops.py147
-rw-r--r--tensorflow/python/data/ops/iterator_ops.py63
-rw-r--r--tensorflow/python/debug/BUILD15
-rw-r--r--tensorflow/python/debug/examples/debug_keras.py89
-rwxr-xr-xtensorflow/python/debug/examples/examples_test.sh7
-rw-r--r--tensorflow/python/debug/wrappers/framework.py87
-rw-r--r--tensorflow/python/debug/wrappers/grpc_wrapper.py6
-rw-r--r--tensorflow/python/debug/wrappers/local_cli_wrapper.py2
-rw-r--r--tensorflow/python/debug/wrappers/local_cli_wrapper_test.py118
-rw-r--r--tensorflow/python/eager/backprop.py15
-rw-r--r--tensorflow/python/eager/backprop_test.py41
-rw-r--r--tensorflow/python/eager/function.py332
-rw-r--r--tensorflow/python/eager/function_test.py222
-rw-r--r--tensorflow/python/eager/graph_callable.py47
-rw-r--r--tensorflow/python/eager/pywrap_tensor.cc32
-rw-r--r--tensorflow/python/eager/pywrap_tfe_src.cc91
-rw-r--r--tensorflow/python/eager/pywrap_tfe_test.py43
-rw-r--r--tensorflow/python/eager/tensor_test.py2
-rw-r--r--tensorflow/python/eager/test.py1
-rw-r--r--tensorflow/python/estimator/BUILD8
-rw-r--r--tensorflow/python/estimator/api/BUILD5
-rw-r--r--tensorflow/python/estimator/canned/baseline_test.py10
-rw-r--r--tensorflow/python/estimator/canned/boosted_trees.py420
-rw-r--r--tensorflow/python/estimator/canned/boosted_trees_test.py675
-rw-r--r--tensorflow/python/estimator/canned/dnn.py68
-rw-r--r--tensorflow/python/estimator/canned/dnn_linear_combined.py81
-rw-r--r--tensorflow/python/estimator/canned/dnn_linear_combined_test.py12
-rw-r--r--tensorflow/python/estimator/canned/dnn_testing_utils.py100
-rw-r--r--tensorflow/python/estimator/canned/head.py22
-rw-r--r--tensorflow/python/estimator/canned/head_test.py31
-rw-r--r--tensorflow/python/estimator/canned/linear.py76
-rw-r--r--tensorflow/python/estimator/canned/linear_testing_utils.py141
-rw-r--r--tensorflow/python/estimator/canned/optimizers.py2
-rw-r--r--tensorflow/python/estimator/canned/optimizers_test.py30
-rw-r--r--tensorflow/python/estimator/estimator.py139
-rw-r--r--tensorflow/python/estimator/estimator_test.py63
-rw-r--r--tensorflow/python/estimator/export/export_output.py11
-rw-r--r--tensorflow/python/estimator/export/export_output_test.py15
-rw-r--r--tensorflow/python/estimator/inputs/pandas_io.py41
-rw-r--r--tensorflow/python/estimator/inputs/pandas_io_test.py70
-rw-r--r--tensorflow/python/estimator/keras.py40
-rw-r--r--tensorflow/python/estimator/keras_test.py23
-rw-r--r--tensorflow/python/estimator/run_config.py36
-rw-r--r--tensorflow/python/estimator/training.py12
-rw-r--r--tensorflow/python/estimator/training_test.py4
-rw-r--r--tensorflow/python/estimator/util.py22
-rw-r--r--tensorflow/python/feature_column/BUILD68
-rw-r--r--tensorflow/python/feature_column/feature_column.py169
-rw-r--r--tensorflow/python/feature_column/feature_column_test.py6
-rw-r--r--tensorflow/python/feature_column/feature_column_v2.py3600
-rw-r--r--tensorflow/python/feature_column/feature_column_v2_test.py6583
-rw-r--r--tensorflow/python/framework/common_shapes.py12
-rw-r--r--tensorflow/python/framework/error_interpolation.py170
-rw-r--r--tensorflow/python/framework/error_interpolation_test.py138
-rw-r--r--tensorflow/python/framework/function_test.py16
-rw-r--r--tensorflow/python/framework/importer.py6
-rw-r--r--tensorflow/python/framework/ops.py173
-rw-r--r--tensorflow/python/framework/python_op_gen.cc1
-rw-r--r--tensorflow/python/framework/python_op_gen_internal.cc25
-rw-r--r--tensorflow/python/framework/subscribe.py2
-rw-r--r--tensorflow/python/framework/tensor_util_test.py72
-rw-r--r--tensorflow/python/framework/test_util.py44
-rw-r--r--tensorflow/python/framework/test_util_test.py43
-rw-r--r--tensorflow/python/framework/traceable_stack.py135
-rw-r--r--tensorflow/python/framework/traceable_stack_test.py133
-rwxr-xr-xtensorflow/python/keras/BUILD15
-rw-r--r--tensorflow/python/keras/applications/mobilenet.py22
-rw-r--r--tensorflow/python/keras/backend.py64
-rw-r--r--tensorflow/python/keras/backend_test.py141
-rw-r--r--tensorflow/python/keras/callbacks.py238
-rw-r--r--tensorflow/python/keras/callbacks_test.py171
-rw-r--r--tensorflow/python/keras/datasets/mnist.py2
-rw-r--r--tensorflow/python/keras/engine/base_layer.py101
-rw-r--r--tensorflow/python/keras/engine/network.py61
-rw-r--r--tensorflow/python/keras/engine/saving.py125
-rw-r--r--tensorflow/python/keras/engine/saving_test.py78
-rw-r--r--tensorflow/python/keras/engine/sequential.py6
-rw-r--r--tensorflow/python/keras/engine/training.py12
-rw-r--r--tensorflow/python/keras/engine/training_arrays.py18
-rw-r--r--tensorflow/python/keras/engine/training_eager.py487
-rw-r--r--tensorflow/python/keras/engine/training_generator.py20
-rw-r--r--tensorflow/python/keras/engine/training_utils.py138
-rw-r--r--tensorflow/python/keras/engine/training_utils_test.py150
-rw-r--r--tensorflow/python/keras/estimator/__init__.py2
-rw-r--r--tensorflow/python/keras/initializers.py51
-rw-r--r--tensorflow/python/keras/layers/convolutional_recurrent.py2
-rw-r--r--tensorflow/python/keras/layers/core.py9
-rw-r--r--tensorflow/python/keras/layers/cudnn_recurrent_test.py125
-rw-r--r--tensorflow/python/keras/layers/embeddings.py5
-rw-r--r--tensorflow/python/keras/layers/normalization.py30
-rw-r--r--tensorflow/python/keras/layers/normalization_test.py18
-rw-r--r--tensorflow/python/keras/layers/recurrent.py12
-rw-r--r--tensorflow/python/keras/layers/recurrent_test.py18
-rw-r--r--tensorflow/python/keras/layers/wrappers.py128
-rw-r--r--tensorflow/python/keras/layers/wrappers_test.py68
-rw-r--r--tensorflow/python/keras/model_subclassing_test.py6
-rw-r--r--tensorflow/python/keras/models_test.py9
-rw-r--r--tensorflow/python/keras/optimizers.py58
-rw-r--r--tensorflow/python/keras/optimizers_test.py6
-rw-r--r--tensorflow/python/keras/testing_utils.py74
-rw-r--r--tensorflow/python/kernel_tests/BUILD4
-rw-r--r--tensorflow/python/kernel_tests/boosted_trees/prediction_ops_test.py144
-rw-r--r--tensorflow/python/kernel_tests/boosted_trees/training_ops_test.py44
-rw-r--r--tensorflow/python/kernel_tests/constant_op_eager_test.py31
-rw-r--r--tensorflow/python/kernel_tests/dct_ops_test.py96
-rw-r--r--tensorflow/python/kernel_tests/embedding_ops_test.py218
-rw-r--r--tensorflow/python/kernel_tests/functional_ops_test.py88
-rw-r--r--tensorflow/python/kernel_tests/init_ops_test.py46
-rw-r--r--tensorflow/python/kernel_tests/linalg/BUILD23
-rw-r--r--tensorflow/python/kernel_tests/linalg/linear_operator_block_diag_test.py35
-rw-r--r--tensorflow/python/kernel_tests/linalg/linear_operator_circulant_test.py110
-rw-r--r--tensorflow/python/kernel_tests/linalg/linear_operator_composition_test.py68
-rw-r--r--tensorflow/python/kernel_tests/linalg/linear_operator_diag_test.py22
-rw-r--r--tensorflow/python/kernel_tests/linalg/linear_operator_full_matrix_test.py73
-rw-r--r--tensorflow/python/kernel_tests/linalg/linear_operator_identity_test.py31
-rw-r--r--tensorflow/python/kernel_tests/linalg/linear_operator_kronecker_test.py31
-rw-r--r--tensorflow/python/kernel_tests/linalg/linear_operator_low_rank_update_test.py96
-rw-r--r--tensorflow/python/kernel_tests/linalg/linear_operator_lower_triangular_test.py28
-rw-r--r--tensorflow/python/kernel_tests/resource_variable_ops_test.py57
-rw-r--r--tensorflow/python/kernel_tests/rnn_test.py6
-rw-r--r--tensorflow/python/kernel_tests/sparse_serialization_ops_test.py45
-rw-r--r--tensorflow/python/kernel_tests/variable_scope_test.py110
-rw-r--r--tensorflow/python/kernel_tests/variables_test.py2
-rw-r--r--tensorflow/python/layers/base.py41
-rw-r--r--tensorflow/python/layers/base_test.py26
-rw-r--r--tensorflow/python/layers/normalization.py2
-rw-r--r--tensorflow/python/lib/core/numpy.h1
-rw-r--r--tensorflow/python/lib/core/py_seq_tensor.cc39
-rw-r--r--tensorflow/python/lib/core/py_util.cc1
-rw-r--r--tensorflow/python/ops/boosted_trees_ops.py2
-rw-r--r--tensorflow/python/ops/collective_ops.py2
-rw-r--r--tensorflow/python/ops/collective_ops_test.py4
-rw-r--r--tensorflow/python/ops/control_flow_ops.py34
-rw-r--r--tensorflow/python/ops/control_flow_ops_test.py22
-rw-r--r--tensorflow/python/ops/distributions/distribution.py2
-rw-r--r--tensorflow/python/ops/distributions/exponential.py3
-rw-r--r--tensorflow/python/ops/embedding_ops.py157
-rw-r--r--tensorflow/python/ops/functional_ops.py63
-rw-r--r--tensorflow/python/ops/gradients_impl.py183
-rw-r--r--tensorflow/python/ops/gradients_test.py172
-rw-r--r--tensorflow/python/ops/image_ops_impl.py89
-rw-r--r--tensorflow/python/ops/image_ops_test.py2
-rw-r--r--tensorflow/python/ops/init_ops.py28
-rw-r--r--tensorflow/python/ops/linalg/linear_operator.py8
-rw-r--r--tensorflow/python/ops/linalg/linear_operator_diag.py5
-rw-r--r--tensorflow/python/ops/linalg/linear_operator_low_rank_update.py31
-rw-r--r--tensorflow/python/ops/linalg/linear_operator_lower_triangular.py8
-rw-r--r--tensorflow/python/ops/linalg/linear_operator_test_util.py41
-rw-r--r--tensorflow/python/ops/linalg_ops.py2
-rw-r--r--tensorflow/python/ops/logging_ops.py9
-rw-r--r--tensorflow/python/ops/losses/losses_impl.py3
-rw-r--r--tensorflow/python/ops/math_ops.py14
-rw-r--r--tensorflow/python/ops/math_ops_test.py4
-rw-r--r--tensorflow/python/ops/metrics_impl.py19
-rw-r--r--tensorflow/python/ops/nn_ops.py9
-rw-r--r--tensorflow/python/ops/parallel_for/BUILD128
-rw-r--r--tensorflow/python/ops/parallel_for/__init__.py35
-rw-r--r--tensorflow/python/ops/parallel_for/control_flow_ops.py123
-rw-r--r--tensorflow/python/ops/parallel_for/control_flow_ops_test.py1404
-rw-r--r--tensorflow/python/ops/parallel_for/gradients.py126
-rw-r--r--tensorflow/python/ops/parallel_for/gradients_test.py579
-rw-r--r--tensorflow/python/ops/parallel_for/pfor.py2552
-rw-r--r--tensorflow/python/ops/resource_variable_ops.py147
-rw-r--r--tensorflow/python/ops/rnn.py19
-rw-r--r--tensorflow/python/ops/rnn_cell_impl.py56
-rw-r--r--tensorflow/python/ops/script_ops.py2
-rw-r--r--tensorflow/python/ops/special_math_ops.py16
-rw-r--r--tensorflow/python/ops/special_math_ops_test.py64
-rw-r--r--tensorflow/python/ops/spectral_ops.py125
-rw-r--r--tensorflow/python/ops/state_ops.py5
-rw-r--r--tensorflow/python/ops/summary_ops_v2.py12
-rw-r--r--tensorflow/python/ops/variable_scope.py352
-rw-r--r--tensorflow/python/ops/variables.py695
-rw-r--r--tensorflow/python/platform/benchmark.py10
-rw-r--r--tensorflow/python/platform/self_check.py2
-rw-r--r--tensorflow/python/profiler/model_analyzer_test.py2
-rw-r--r--tensorflow/python/tools/api/generator/BUILD (renamed from tensorflow/tools/api/generator/BUILD)17
-rw-r--r--tensorflow/python/tools/api/generator/api_gen.bzl (renamed from tensorflow/tools/api/generator/api_gen.bzl)62
-rw-r--r--tensorflow/python/tools/api/generator/create_python_api.py (renamed from tensorflow/tools/api/generator/create_python_api.py)49
-rw-r--r--tensorflow/python/tools/api/generator/create_python_api_test.py (renamed from tensorflow/tools/api/generator/create_python_api_test.py)11
-rw-r--r--tensorflow/python/tools/api/generator/doc_srcs.py (renamed from tensorflow/tools/api/generator/doc_srcs.py)0
-rw-r--r--tensorflow/python/tools/api/generator/doc_srcs_test.py (renamed from tensorflow/tools/api/generator/doc_srcs_test.py)28
-rw-r--r--tensorflow/python/training/checkpointable/BUILD8
-rw-r--r--tensorflow/python/training/checkpointable/base.py67
-rw-r--r--tensorflow/python/training/checkpointable/data_structures.py293
-rw-r--r--tensorflow/python/training/checkpointable/data_structures_test.py65
-rw-r--r--tensorflow/python/training/checkpointable/layer_utils.py93
-rw-r--r--tensorflow/python/training/checkpointable/tracking.py47
-rw-r--r--tensorflow/python/training/checkpointable/tracking_test.py126
-rw-r--r--tensorflow/python/training/checkpointable/util.py151
-rw-r--r--tensorflow/python/training/checkpointable/util_test.py2
-rw-r--r--tensorflow/python/training/distribute.py104
-rw-r--r--tensorflow/python/training/distribute_test.py39
-rw-r--r--tensorflow/python/training/optimizer.py16
-rw-r--r--tensorflow/python/training/quantize_training.i2
-rw-r--r--tensorflow/python/training/saver.py50
-rw-r--r--tensorflow/python/training/saver_test.py54
-rw-r--r--tensorflow/python/training/server_lib.py9
-rw-r--r--tensorflow/python/util/deprecation.py72
-rw-r--r--tensorflow/python/util/deprecation_test.py28
-rw-r--r--tensorflow/python/util/lock_util_test.py3
-rw-r--r--tensorflow/python/util/nest.py11
-rw-r--r--tensorflow/python/util/py_checkpoint_reader.i1
-rw-r--r--tensorflow/python/util/stat_summarizer.i25
-rw-r--r--tensorflow/python/util/tf_export.py87
-rw-r--r--tensorflow/python/util/tf_export_test.py2
-rw-r--r--tensorflow/python/util/tf_inspect.py10
-rw-r--r--tensorflow/python/util/tf_inspect_test.py12
-rw-r--r--tensorflow/python/util/tf_stack.py103
-rw-r--r--tensorflow/python/util/util.cc6
-rw-r--r--tensorflow/security/advisory/tfsa-2018-001.md2
-rw-r--r--tensorflow/security/index.md2
-rw-r--r--tensorflow/stream_executor/BUILD11
-rw-r--r--tensorflow/stream_executor/cuda/cuda_dnn.cc35
-rw-r--r--tensorflow/stream_executor/cuda/cuda_dnn.h21
-rw-r--r--tensorflow/stream_executor/dnn.h21
-rw-r--r--tensorflow/stream_executor/event.cc11
-rw-r--r--tensorflow/stream_executor/event.h3
-rw-r--r--tensorflow/stream_executor/host/host_gpu_executor.cc9
-rw-r--r--tensorflow/stream_executor/lib/statusor.cc (renamed from tensorflow/compiler/xla/statusor.cc)8
-rw-r--r--tensorflow/stream_executor/lib/statusor.h290
-rw-r--r--tensorflow/stream_executor/lib/statusor_internals.h (renamed from tensorflow/compiler/xla/statusor_internals.h)15
-rw-r--r--tensorflow/stream_executor/lib/statusor_test.cc (renamed from tensorflow/compiler/xla/statusor_test.cc)11
-rw-r--r--tensorflow/stream_executor/stream.cc82
-rw-r--r--tensorflow/stream_executor/stream.h82
-rw-r--r--tensorflow/tensorflow.bzl64
-rw-r--r--tensorflow/tools/api/golden/tensorflow.-config-proto.-experimental.pbtxt6
-rw-r--r--tensorflow/tools/api/golden/tensorflow.-config-proto.pbtxt6
-rw-r--r--tensorflow/tools/api/golden/tensorflow.-g-p-u-options.pbtxt6
-rw-r--r--tensorflow/tools/api/golden/tensorflow.-variable-aggregation.pbtxt16
-rw-r--r--tensorflow/tools/api/golden/tensorflow.-variable-scope.pbtxt2
-rw-r--r--tensorflow/tools/api/golden/tensorflow.-variable-synchronization.pbtxt20
-rw-r--r--tensorflow/tools/api/golden/tensorflow.estimator.-boosted-trees-classifier.pbtxt2
-rw-r--r--tensorflow/tools/api/golden/tensorflow.estimator.-boosted-trees-regressor.pbtxt2
-rw-r--r--tensorflow/tools/api/golden/tensorflow.estimator.-d-n-n-classifier.pbtxt2
-rw-r--r--tensorflow/tools/api/golden/tensorflow.estimator.-d-n-n-linear-combined-classifier.pbtxt2
-rw-r--r--tensorflow/tools/api/golden/tensorflow.estimator.-d-n-n-linear-combined-regressor.pbtxt2
-rw-r--r--tensorflow/tools/api/golden/tensorflow.estimator.-d-n-n-regressor.pbtxt2
-rw-r--r--tensorflow/tools/api/golden/tensorflow.estimator.-linear-classifier.pbtxt2
-rw-r--r--tensorflow/tools/api/golden/tensorflow.estimator.-linear-regressor.pbtxt2
-rw-r--r--tensorflow/tools/api/golden/tensorflow.estimator.-run-config.pbtxt6
-rw-r--r--tensorflow/tools/api/golden/tensorflow.image.pbtxt4
-rw-r--r--tensorflow/tools/api/golden/tensorflow.initializers.variance_scaling.pbtxt2
-rw-r--r--tensorflow/tools/api/golden/tensorflow.keras.-model.pbtxt2
-rw-r--r--tensorflow/tools/api/golden/tensorflow.keras.-sequential.pbtxt2
-rw-r--r--tensorflow/tools/api/golden/tensorflow.keras.callbacks.-early-stopping.pbtxt2
-rw-r--r--tensorflow/tools/api/golden/tensorflow.keras.callbacks.-tensor-board.pbtxt2
-rw-r--r--tensorflow/tools/api/golden/tensorflow.keras.initializers.-variance-scaling.pbtxt2
-rw-r--r--tensorflow/tools/api/golden/tensorflow.keras.initializers.pbtxt4
-rw-r--r--tensorflow/tools/api/golden/tensorflow.keras.layers.-activation.pbtxt2
-rw-r--r--tensorflow/tools/api/golden/tensorflow.keras.layers.-activity-regularization.pbtxt2
-rw-r--r--tensorflow/tools/api/golden/tensorflow.keras.layers.-add.pbtxt2
-rw-r--r--tensorflow/tools/api/golden/tensorflow.keras.layers.-alpha-dropout.pbtxt2
-rw-r--r--tensorflow/tools/api/golden/tensorflow.keras.layers.-average-pooling1-d.pbtxt2
-rw-r--r--tensorflow/tools/api/golden/tensorflow.keras.layers.-average-pooling2-d.pbtxt2
-rw-r--r--tensorflow/tools/api/golden/tensorflow.keras.layers.-average-pooling3-d.pbtxt2
-rw-r--r--tensorflow/tools/api/golden/tensorflow.keras.layers.-average.pbtxt2
-rw-r--r--tensorflow/tools/api/golden/tensorflow.keras.layers.-avg-pool1-d.pbtxt2
-rw-r--r--tensorflow/tools/api/golden/tensorflow.keras.layers.-avg-pool2-d.pbtxt2
-rw-r--r--tensorflow/tools/api/golden/tensorflow.keras.layers.-avg-pool3-d.pbtxt2
-rw-r--r--tensorflow/tools/api/golden/tensorflow.keras.layers.-batch-normalization.pbtxt2
-rw-r--r--tensorflow/tools/api/golden/tensorflow.keras.layers.-bidirectional.pbtxt2
-rw-r--r--tensorflow/tools/api/golden/tensorflow.keras.layers.-concatenate.pbtxt2
-rw-r--r--tensorflow/tools/api/golden/tensorflow.keras.layers.-conv-l-s-t-m2-d.pbtxt2
-rw-r--r--tensorflow/tools/api/golden/tensorflow.keras.layers.-conv1-d.pbtxt2
-rw-r--r--tensorflow/tools/api/golden/tensorflow.keras.layers.-conv2-d-transpose.pbtxt2
-rw-r--r--tensorflow/tools/api/golden/tensorflow.keras.layers.-conv2-d.pbtxt2
-rw-r--r--tensorflow/tools/api/golden/tensorflow.keras.layers.-conv3-d-transpose.pbtxt2
-rw-r--r--tensorflow/tools/api/golden/tensorflow.keras.layers.-conv3-d.pbtxt2
-rw-r--r--tensorflow/tools/api/golden/tensorflow.keras.layers.-convolution1-d.pbtxt2
-rw-r--r--tensorflow/tools/api/golden/tensorflow.keras.layers.-convolution2-d-transpose.pbtxt2
-rw-r--r--tensorflow/tools/api/golden/tensorflow.keras.layers.-convolution2-d.pbtxt2
-rw-r--r--tensorflow/tools/api/golden/tensorflow.keras.layers.-convolution3-d-transpose.pbtxt2
-rw-r--r--tensorflow/tools/api/golden/tensorflow.keras.layers.-convolution3-d.pbtxt2
-rw-r--r--tensorflow/tools/api/golden/tensorflow.keras.layers.-cropping1-d.pbtxt2
-rw-r--r--tensorflow/tools/api/golden/tensorflow.keras.layers.-cropping2-d.pbtxt2
-rw-r--r--tensorflow/tools/api/golden/tensorflow.keras.layers.-cropping3-d.pbtxt2
-rw-r--r--tensorflow/tools/api/golden/tensorflow.keras.layers.-cu-d-n-n-g-r-u.pbtxt2
-rw-r--r--tensorflow/tools/api/golden/tensorflow.keras.layers.-cu-d-n-n-l-s-t-m.pbtxt2
-rw-r--r--tensorflow/tools/api/golden/tensorflow.keras.layers.-dense.pbtxt2
-rw-r--r--tensorflow/tools/api/golden/tensorflow.keras.layers.-depthwise-conv2-d.pbtxt2
-rw-r--r--tensorflow/tools/api/golden/tensorflow.keras.layers.-dot.pbtxt2
-rw-r--r--tensorflow/tools/api/golden/tensorflow.keras.layers.-dropout.pbtxt2
-rw-r--r--tensorflow/tools/api/golden/tensorflow.keras.layers.-e-l-u.pbtxt2
-rw-r--r--tensorflow/tools/api/golden/tensorflow.keras.layers.-embedding.pbtxt2
-rw-r--r--tensorflow/tools/api/golden/tensorflow.keras.layers.-flatten.pbtxt2
-rw-r--r--tensorflow/tools/api/golden/tensorflow.keras.layers.-g-r-u-cell.pbtxt2
-rw-r--r--tensorflow/tools/api/golden/tensorflow.keras.layers.-g-r-u.pbtxt2
-rw-r--r--tensorflow/tools/api/golden/tensorflow.keras.layers.-gaussian-dropout.pbtxt2
-rw-r--r--tensorflow/tools/api/golden/tensorflow.keras.layers.-gaussian-noise.pbtxt2
-rw-r--r--tensorflow/tools/api/golden/tensorflow.keras.layers.-global-average-pooling1-d.pbtxt2
-rw-r--r--tensorflow/tools/api/golden/tensorflow.keras.layers.-global-average-pooling2-d.pbtxt2
-rw-r--r--tensorflow/tools/api/golden/tensorflow.keras.layers.-global-average-pooling3-d.pbtxt2
-rw-r--r--tensorflow/tools/api/golden/tensorflow.keras.layers.-global-avg-pool1-d.pbtxt2
-rw-r--r--tensorflow/tools/api/golden/tensorflow.keras.layers.-global-avg-pool2-d.pbtxt2
-rw-r--r--tensorflow/tools/api/golden/tensorflow.keras.layers.-global-avg-pool3-d.pbtxt2
-rw-r--r--tensorflow/tools/api/golden/tensorflow.keras.layers.-global-max-pool1-d.pbtxt2
-rw-r--r--tensorflow/tools/api/golden/tensorflow.keras.layers.-global-max-pool2-d.pbtxt2
-rw-r--r--tensorflow/tools/api/golden/tensorflow.keras.layers.-global-max-pool3-d.pbtxt2
-rw-r--r--tensorflow/tools/api/golden/tensorflow.keras.layers.-global-max-pooling1-d.pbtxt2
-rw-r--r--tensorflow/tools/api/golden/tensorflow.keras.layers.-global-max-pooling2-d.pbtxt2
-rw-r--r--tensorflow/tools/api/golden/tensorflow.keras.layers.-global-max-pooling3-d.pbtxt2
-rw-r--r--tensorflow/tools/api/golden/tensorflow.keras.layers.-input-layer.pbtxt2
-rw-r--r--tensorflow/tools/api/golden/tensorflow.keras.layers.-l-s-t-m-cell.pbtxt2
-rw-r--r--tensorflow/tools/api/golden/tensorflow.keras.layers.-l-s-t-m.pbtxt2
-rw-r--r--tensorflow/tools/api/golden/tensorflow.keras.layers.-lambda.pbtxt2
-rw-r--r--tensorflow/tools/api/golden/tensorflow.keras.layers.-layer.pbtxt2
-rw-r--r--tensorflow/tools/api/golden/tensorflow.keras.layers.-leaky-re-l-u.pbtxt2
-rw-r--r--tensorflow/tools/api/golden/tensorflow.keras.layers.-locally-connected1-d.pbtxt2
-rw-r--r--tensorflow/tools/api/golden/tensorflow.keras.layers.-locally-connected2-d.pbtxt2
-rw-r--r--tensorflow/tools/api/golden/tensorflow.keras.layers.-masking.pbtxt2
-rw-r--r--tensorflow/tools/api/golden/tensorflow.keras.layers.-max-pool1-d.pbtxt2
-rw-r--r--tensorflow/tools/api/golden/tensorflow.keras.layers.-max-pool2-d.pbtxt2
-rw-r--r--tensorflow/tools/api/golden/tensorflow.keras.layers.-max-pool3-d.pbtxt2
-rw-r--r--tensorflow/tools/api/golden/tensorflow.keras.layers.-max-pooling1-d.pbtxt2
-rw-r--r--tensorflow/tools/api/golden/tensorflow.keras.layers.-max-pooling2-d.pbtxt2
-rw-r--r--tensorflow/tools/api/golden/tensorflow.keras.layers.-max-pooling3-d.pbtxt2
-rw-r--r--tensorflow/tools/api/golden/tensorflow.keras.layers.-maximum.pbtxt2
-rw-r--r--tensorflow/tools/api/golden/tensorflow.keras.layers.-minimum.pbtxt2
-rw-r--r--tensorflow/tools/api/golden/tensorflow.keras.layers.-multiply.pbtxt2
-rw-r--r--tensorflow/tools/api/golden/tensorflow.keras.layers.-p-re-l-u.pbtxt2
-rw-r--r--tensorflow/tools/api/golden/tensorflow.keras.layers.-permute.pbtxt2
-rw-r--r--tensorflow/tools/api/golden/tensorflow.keras.layers.-r-n-n.pbtxt2
-rw-r--r--tensorflow/tools/api/golden/tensorflow.keras.layers.-re-l-u.pbtxt2
-rw-r--r--tensorflow/tools/api/golden/tensorflow.keras.layers.-repeat-vector.pbtxt2
-rw-r--r--tensorflow/tools/api/golden/tensorflow.keras.layers.-reshape.pbtxt2
-rw-r--r--tensorflow/tools/api/golden/tensorflow.keras.layers.-separable-conv1-d.pbtxt2
-rw-r--r--tensorflow/tools/api/golden/tensorflow.keras.layers.-separable-conv2-d.pbtxt2
-rw-r--r--tensorflow/tools/api/golden/tensorflow.keras.layers.-separable-convolution1-d.pbtxt2
-rw-r--r--tensorflow/tools/api/golden/tensorflow.keras.layers.-separable-convolution2-d.pbtxt2
-rw-r--r--tensorflow/tools/api/golden/tensorflow.keras.layers.-simple-r-n-n-cell.pbtxt2
-rw-r--r--tensorflow/tools/api/golden/tensorflow.keras.layers.-simple-r-n-n.pbtxt2
-rw-r--r--tensorflow/tools/api/golden/tensorflow.keras.layers.-softmax.pbtxt2
-rw-r--r--tensorflow/tools/api/golden/tensorflow.keras.layers.-spatial-dropout1-d.pbtxt2
-rw-r--r--tensorflow/tools/api/golden/tensorflow.keras.layers.-spatial-dropout2-d.pbtxt2
-rw-r--r--tensorflow/tools/api/golden/tensorflow.keras.layers.-spatial-dropout3-d.pbtxt2
-rw-r--r--tensorflow/tools/api/golden/tensorflow.keras.layers.-stacked-r-n-n-cells.pbtxt2
-rw-r--r--tensorflow/tools/api/golden/tensorflow.keras.layers.-subtract.pbtxt2
-rw-r--r--tensorflow/tools/api/golden/tensorflow.keras.layers.-thresholded-re-l-u.pbtxt2
-rw-r--r--tensorflow/tools/api/golden/tensorflow.keras.layers.-time-distributed.pbtxt2
-rw-r--r--tensorflow/tools/api/golden/tensorflow.keras.layers.-up-sampling1-d.pbtxt2
-rw-r--r--tensorflow/tools/api/golden/tensorflow.keras.layers.-up-sampling2-d.pbtxt2
-rw-r--r--tensorflow/tools/api/golden/tensorflow.keras.layers.-up-sampling3-d.pbtxt2
-rw-r--r--tensorflow/tools/api/golden/tensorflow.keras.layers.-wrapper.pbtxt2
-rw-r--r--tensorflow/tools/api/golden/tensorflow.keras.layers.-zero-padding1-d.pbtxt2
-rw-r--r--tensorflow/tools/api/golden/tensorflow.keras.layers.-zero-padding2-d.pbtxt2
-rw-r--r--tensorflow/tools/api/golden/tensorflow.keras.layers.-zero-padding3-d.pbtxt2
-rw-r--r--tensorflow/tools/api/golden/tensorflow.keras.models.-model.pbtxt2
-rw-r--r--tensorflow/tools/api/golden/tensorflow.keras.models.-sequential.pbtxt2
-rw-r--r--tensorflow/tools/api/golden/tensorflow.layers.-average-pooling1-d.pbtxt2
-rw-r--r--tensorflow/tools/api/golden/tensorflow.layers.-average-pooling2-d.pbtxt2
-rw-r--r--tensorflow/tools/api/golden/tensorflow.layers.-average-pooling3-d.pbtxt2
-rw-r--r--tensorflow/tools/api/golden/tensorflow.layers.-batch-normalization.pbtxt2
-rw-r--r--tensorflow/tools/api/golden/tensorflow.layers.-conv1-d.pbtxt2
-rw-r--r--tensorflow/tools/api/golden/tensorflow.layers.-conv2-d-transpose.pbtxt2
-rw-r--r--tensorflow/tools/api/golden/tensorflow.layers.-conv2-d.pbtxt2
-rw-r--r--tensorflow/tools/api/golden/tensorflow.layers.-conv3-d-transpose.pbtxt2
-rw-r--r--tensorflow/tools/api/golden/tensorflow.layers.-conv3-d.pbtxt2
-rw-r--r--tensorflow/tools/api/golden/tensorflow.layers.-dense.pbtxt2
-rw-r--r--tensorflow/tools/api/golden/tensorflow.layers.-dropout.pbtxt2
-rw-r--r--tensorflow/tools/api/golden/tensorflow.layers.-flatten.pbtxt2
-rw-r--r--tensorflow/tools/api/golden/tensorflow.layers.-layer.pbtxt2
-rw-r--r--tensorflow/tools/api/golden/tensorflow.layers.-max-pooling1-d.pbtxt2
-rw-r--r--tensorflow/tools/api/golden/tensorflow.layers.-max-pooling2-d.pbtxt2
-rw-r--r--tensorflow/tools/api/golden/tensorflow.layers.-max-pooling3-d.pbtxt2
-rw-r--r--tensorflow/tools/api/golden/tensorflow.layers.-separable-conv1-d.pbtxt2
-rw-r--r--tensorflow/tools/api/golden/tensorflow.layers.-separable-conv2-d.pbtxt2
-rw-r--r--tensorflow/tools/api/golden/tensorflow.math.pbtxt4
-rw-r--r--tensorflow/tools/api/golden/tensorflow.nn.pbtxt4
-rw-r--r--tensorflow/tools/api/golden/tensorflow.nn.rnn_cell.-basic-l-s-t-m-cell.pbtxt2
-rw-r--r--tensorflow/tools/api/golden/tensorflow.nn.rnn_cell.-basic-r-n-n-cell.pbtxt2
-rw-r--r--tensorflow/tools/api/golden/tensorflow.nn.rnn_cell.-device-wrapper.pbtxt2
-rw-r--r--tensorflow/tools/api/golden/tensorflow.nn.rnn_cell.-dropout-wrapper.pbtxt2
-rw-r--r--tensorflow/tools/api/golden/tensorflow.nn.rnn_cell.-g-r-u-cell.pbtxt2
-rw-r--r--tensorflow/tools/api/golden/tensorflow.nn.rnn_cell.-l-s-t-m-cell.pbtxt2
-rw-r--r--tensorflow/tools/api/golden/tensorflow.nn.rnn_cell.-multi-r-n-n-cell.pbtxt2
-rw-r--r--tensorflow/tools/api/golden/tensorflow.nn.rnn_cell.-r-n-n-cell.pbtxt2
-rw-r--r--tensorflow/tools/api/golden/tensorflow.nn.rnn_cell.-residual-wrapper.pbtxt2
-rw-r--r--tensorflow/tools/api/golden/tensorflow.pbtxt22
-rw-r--r--tensorflow/tools/api/golden/tensorflow.spectral.pbtxt4
-rw-r--r--tensorflow/tools/api/golden/tensorflow.variance_scaling_initializer.pbtxt2
-rw-r--r--tensorflow/tools/api/lib/python_object_to_proto_visitor.py3
-rw-r--r--tensorflow/tools/api/tests/api_compatibility_test.py42
-rw-r--r--tensorflow/tools/ci_build/Dockerfile.cpu.ppc64le2
-rw-r--r--tensorflow/tools/ci_build/Dockerfile.gpu.ppc64le2
-rw-r--r--tensorflow/tools/ci_build/Dockerfile.rbe.cpu4
-rwxr-xr-xtensorflow/tools/ci_build/ci_parameterized_build.sh10
-rwxr-xr-xtensorflow/tools/ci_build/ci_sanity.sh6
-rwxr-xr-xtensorflow/tools/ci_build/gpu_build/parallel_gpu_execute.sh28
-rwxr-xr-xtensorflow/tools/ci_build/install/install_bazel.sh2
-rwxr-xr-xtensorflow/tools/ci_build/install/install_bazel_from_source.sh2
-rwxr-xr-xtensorflow/tools/ci_build/install/install_openblas_ppc64le.sh29
-rwxr-xr-xtensorflow/tools/ci_build/linux/mkl/build-dev-container.sh11
-rwxr-xr-xtensorflow/tools/ci_build/pi/build_raspberry_pi.sh4
-rwxr-xr-xtensorflow/tools/ci_build/update_version.py10
-rw-r--r--tensorflow/tools/ci_build/windows/bazel/bazel_test_lib.sh17
-rw-r--r--tensorflow/tools/ci_build/windows/bazel/common_env.sh12
-rw-r--r--tensorflow/tools/ci_build/windows/cpu/pip/build_tf_windows.sh4
-rw-r--r--tensorflow/tools/ci_build/windows/gpu/pip/build_tf_windows.sh66
-rw-r--r--tensorflow/tools/compatibility/ast_edits.py502
-rw-r--r--tensorflow/tools/docker/Dockerfile.devel2
-rw-r--r--tensorflow/tools/docker/Dockerfile.devel-cpu-mkl83
-rw-r--r--tensorflow/tools/docker/Dockerfile.devel-gpu18
-rw-r--r--tensorflow/tools/docker/Dockerfile.devel-gpu-cuda9-cudnn7115
-rwxr-xr-xtensorflow/tools/docker/Dockerfile.devel-mkl17
-rw-r--r--tensorflow/tools/docker/Dockerfile.gpu1
-rw-r--r--tensorflow/tools/docs/BUILD2
-rw-r--r--tensorflow/tools/docs/doc_generator_visitor.py15
-rw-r--r--tensorflow/tools/docs/generate_lib.py80
-rw-r--r--tensorflow/tools/docs/generate_lib_test.py110
-rw-r--r--tensorflow/tools/lib_package/BUILD4
-rw-r--r--tensorflow/tools/pip_package/BUILD22
-rwxr-xr-xtensorflow/tools/pip_package/build_pip_package.sh2
-rw-r--r--tensorflow/tools/pip_package/setup.py12
-rw-r--r--tensorflow/workspace.bzl126
-rw-r--r--third_party/aws.BUILD3
-rw-r--r--third_party/clang_toolchain/download_clang.bzl8
-rw-r--r--third_party/codegen.BUILD16
-rw-r--r--third_party/eigen.BUILD6
-rw-r--r--third_party/eigen3/BUILD60
-rw-r--r--third_party/googleapis.BUILD45
-rw-r--r--third_party/gpus/crosstool/BUILD.tpl20
-rw-r--r--third_party/gpus/crosstool/CROSSTOOL.tpl869
-rwxr-xr-xthird_party/gpus/crosstool/clang/bin/crosstool_wrapper_driver_is_not_gcc.tpl6
-rw-r--r--third_party/gpus/crosstool/windows/msvc_wrapper_for_nvcc.bat.tpl20
-rw-r--r--third_party/gpus/crosstool/windows/msvc_wrapper_for_nvcc.py.tpl192
-rw-r--r--third_party/gpus/cuda/BUILD.windows.tpl163
-rw-r--r--third_party/gpus/cuda_configure.bzl2163
-rw-r--r--third_party/kafka/BUILD5
-rw-r--r--third_party/llvm/llvm.autogenerated.BUILD349
-rw-r--r--third_party/llvm/llvm.bzl140
-rw-r--r--third_party/mkl/LICENSE201
-rw-r--r--third_party/mkl_dnn/BUILD10
-rw-r--r--third_party/mkl_dnn/LICENSE201
-rw-r--r--third_party/mkl_dnn/build_defs.bzl13
-rw-r--r--third_party/mkl_dnn/mkldnn.BUILD29
-rw-r--r--third_party/nanopb.BUILD23
-rw-r--r--third_party/nasm.BUILD180
-rw-r--r--third_party/nccl/nccl_configure.bzl2
-rw-r--r--third_party/repo.bzl49
-rw-r--r--third_party/systemlibs/BUILD0
-rw-r--r--third_party/systemlibs/BUILD.tpl0
-rw-r--r--third_party/systemlibs/astor.BUILD12
-rw-r--r--third_party/systemlibs/build_defs.bzl.tpl32
-rw-r--r--third_party/systemlibs/curl.BUILD12
-rw-r--r--third_party/systemlibs/cython.BUILD13
-rw-r--r--third_party/systemlibs/flatbuffers.BUILD38
-rw-r--r--third_party/systemlibs/gif.BUILD12
-rw-r--r--third_party/systemlibs/grpc.BUILD54
-rw-r--r--third_party/systemlibs/jemalloc.BUILD30
-rw-r--r--third_party/systemlibs/jpeg.BUILD12
-rw-r--r--third_party/systemlibs/jsoncpp.BUILD37
-rw-r--r--third_party/systemlibs/lmdb.BUILD12
-rw-r--r--third_party/systemlibs/nasm.BUILD12
-rw-r--r--third_party/systemlibs/pcre.BUILD12
-rw-r--r--third_party/systemlibs/png.BUILD12
-rw-r--r--third_party/systemlibs/re2.BUILD12
-rw-r--r--third_party/systemlibs/six.BUILD11
-rw-r--r--third_party/systemlibs/snappy.BUILD12
-rw-r--r--third_party/systemlibs/sqlite.BUILD15
-rw-r--r--third_party/systemlibs/swig.BUILD23
-rw-r--r--third_party/systemlibs/syslibs_configure.bzl160
-rw-r--r--third_party/systemlibs/termcolor.BUILD12
-rw-r--r--third_party/systemlibs/zlib.BUILD12
-rw-r--r--third_party/toolchains/BUILD22
-rw-r--r--third_party/toolchains/clang6/CROSSTOOL.tpl3
-rw-r--r--tools/bazel.rc6
2141 files changed, 117237 insertions, 42562 deletions
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index db4b1581ae..f598999f35 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -107,7 +107,7 @@ diff <my_cc_file> /tmp/my_cc_file.cc
#### Python coding style
Changes to TensorFlow Python code should conform to
-[Google Python Style Guide](https://google.github.io/styleguide/pyguide.html)
+[Google Python Style Guide](https://github.com/google/styleguide/blob/gh-pages/pyguide.md)
Use `pylint` to check your Python changes. To install `pylint` and
retrieve TensorFlow's custom style definition:
diff --git a/README.md b/README.md
index 42d7bbc104..05fcb23f7e 100644
--- a/README.md
+++ b/README.md
@@ -96,6 +96,8 @@ The TensorFlow project strives to abide by generally accepted best practices in
| --- | --- | --- |
| **IBM s390x** | [![Build Status](http://ibmz-ci.osuosl.org/job/TensorFlow_IBMZ_CI/badge/icon)](http://ibmz-ci.osuosl.org/job/TensorFlow_IBMZ_CI/) | TBA |
| **IBM ppc64le CPU** | [![Build Status](http://powerci.osuosl.org/job/TensorFlow_Ubuntu_16.04_CPU/badge/icon)](http://powerci.osuosl.org/job/TensorFlow_Ubuntu_16.04_CPU/) | TBA |
+| **IBM ppc64le GPU** | [![Build Status](http://powerci.osuosl.org/job/TensorFlow_Ubuntu_16.04_PPC64LE_GPU/badge/icon)](http://powerci.osuosl.org/job/TensorFlow_Ubuntu_16.04_PPC64LE_GPU/) | TBA |
+| **Linux CPU with Intel® MKL-DNN®** | [![Build Status](https://tensorflow-ci.intel.com/job/tensorflow-mkl-linux-cpu/badge/icon)](https://tensorflow-ci.intel.com/job/tensorflow-mkl-linux-cpu/) | TBA |
## For more information
diff --git a/RELEASE.md b/RELEASE.md
index 76c1401a01..7bb1e3e1c8 100644
--- a/RELEASE.md
+++ b/RELEASE.md
@@ -1,18 +1,38 @@
# Release 1.9.0
## Major Features And Improvements
-* Update tf.keras to the Keras 2.1.6 API.
+* Updated docs for `tf.keras`: New Keras-based [get started](http://tensorflow.org/versions/r1.9/get_started),
+ and [programmers guide page](http://tensorflow.org/versions/r1.9/programmers_guide/keras).
+* Update `tf.keras` to the Keras 2.1.6 API.
+* Added [`tf.keras.layers.CuDNNGRU`](https://www.tensorflow.org/versions/r1.9/api_docs/python/tf/keras/layers/CuDNNGRU) and [`tf.keras.layers.CuDNNLSTM`](https://www.tensorflow.org/versions/r1.9/api_docs/python/tf/keras/layers/CuDNNLSTM) layers. [Try it](https://colab.sandbox.google.com/github/tensorflow/tensorflow/blob/master/tensorflow/contrib/eager/python/examples/nmt_with_attention/nmt_with_attention.ipynb?linkId=53292082).
+* Adding support of core [feature columns](https://www.tensorflow.org/get_started/feature_columns) and [losses](https://www.tensorflow.org/api_docs/python/tf/losses) to [gradient boosted trees estimators](https://github.com/tensorflow/models/tree/master/official/boosted_trees).
+* The [python interface](https://www.tensorflow.org/versions/r1.9/api_docs/python/tf/contrib/lite)
+ for the [TFLite Optimizing Converter](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/lite/toco/README.md)
+ has been expanded, and the command line interface (AKA: `toco`, `tflite_convert`) is once again
+ included in the standard `pip` installation.
+* Improved data-loading and text processing with:
+ * [`tf.decode_compressed`](https://www.tensorflow.org/versions/r1.9/api_docs/python/tf/decode_compressed)
+ * [`tf.string_strip`](https://www.tensorflow.org/versions/r1.9/api_docs/python/tf/string_strip)
+ * [`tf.strings.regex_full_match`](https://www.tensorflow.org/versions/r1.9/api_docs/python/tf/strings/regex_full_match)
+* Added experimental support for new pre-made Estimators:
+ * [`tf.contrib.estimator.BaselineEstimator`](https://www.tensorflow.org/versions/r1.9/api_docs/python/tf/contrib/estimator/BaselineEstimator)
+ * [`tf.contrib.estimator.RNNClassifier`](https://www.tensorflow.org/versions/r1.9/api_docs/python/tf/contrib/estimator/RNNEstimator)
+ * [`tf.contrib.estimator.RNNEstimator`](https://www.tensorflow.org/versions/r1.9/api_docs/python/tf/contrib/estimator/RNNClassifier)
+* The [distributions.Bijector](https://www.tensorflow.org/versions/r1.9/api_docs/python/tf/contrib/distributions/bijectors/Bijector)
+ API supports broadcasting for Bijectors with new API changes.
+
+## Breaking Changes
+ * If you're opening empty variable scopes; replace `variable_scope('', ...)` by
+ `variable_scope(tf.get_variable_scope(), ...)`.
+ * Headers used for building custom ops have been moved from site-packages/external into site-packages/tensorflow/include/external.
+
+## Bug Fixes and Other Changes
+
* `tfe.Network` is deprecated. Please inherit from `tf.keras.Model`.
-* Adding support of core feature columns and losses to gradient boosted trees estimators.
-* The distributions.Bijector API supports broadcasting for Bijectors with new API changes. See [here](https://www.tensorflow.org/versions/r1.9/api_docs/python/tf/distributions/bijectors/Bijector) for more details.
* Layered variable names have changed in the following conditions:
* Using `tf.keras.layers` with custom variable scopes.
- * Using `tf.layers` in a subclassed `tf.keras.Model` class. See [here](https://www.tensorflow.org/versions/r1.9/api_docs/python/tf/layers) for more details
-
-## Breaking Chances
- * If you're opening empty variable scopes; replace `variable_scope`('', ...) by `variable_scope`(`tf.get_variable_scope()`, ...).
-
-## Bug Fixes and Other Changes
+ * Using `tf.layers` in a subclassed `tf.keras.Model` class. See
+ [here](https://www.tensorflow.org/versions/r1.9/api_docs/python/tf/layers) for more details
* `tf.data`:
* The `DatasetBase::DebugString()` method is now `const`.
* Added the `tf.contrib.data.sample_from_datasets()` API for randomly sampling from multiple datasets.
diff --git a/configure.py b/configure.py
index ad585fa52e..eaff83d2cc 100644
--- a/configure.py
+++ b/configure.py
@@ -35,8 +35,8 @@ except ImportError:
_DEFAULT_CUDA_VERSION = '9.0'
_DEFAULT_CUDNN_VERSION = '7'
-_DEFAULT_NCCL_VERSION = '1.3'
-_DEFAULT_CUDA_COMPUTE_CAPABILITIES = '3.5,5.2'
+_DEFAULT_NCCL_VERSION = '2.2'
+_DEFAULT_CUDA_COMPUTE_CAPABILITIES = '3.5,7.0'
_DEFAULT_CUDA_PATH = '/usr/local/cuda'
_DEFAULT_CUDA_PATH_LINUX = '/opt/cuda'
_DEFAULT_CUDA_PATH_WIN = ('C:/Program Files/NVIDIA GPU Computing '
@@ -835,6 +835,8 @@ def set_tf_cuda_version(environ_cp):
'[Default is %s]: ') % (tf_cuda_version, default_cuda_path)
cuda_toolkit_path = get_from_env_or_user_or_default(
environ_cp, 'CUDA_TOOLKIT_PATH', ask_cuda_path, default_cuda_path)
+ if is_windows() or is_cygwin():
+ cuda_toolkit_path = cygpath(cuda_toolkit_path)
if is_windows():
cuda_rt_lib_path = 'lib/x64/cudart.lib'
@@ -1095,8 +1097,10 @@ def set_tf_nccl_install_path(environ_cp):
raise ValueError('Currently NCCL is only supported on Linux platforms.')
ask_nccl_version = (
- 'Please specify the NCCL version you want to use. '
- '[Leave empty to default to NCCL %s]: ') % _DEFAULT_NCCL_VERSION
+ 'Please specify the NCCL version you want to use. If NCCL %s is not '
+ 'installed, then you can use version 1.3 that can be fetched '
+ 'automatically but it may have worse performance with multiple GPUs. '
+ '[Default is %s]: ') % (_DEFAULT_NCCL_VERSION, _DEFAULT_NCCL_VERSION)
for _ in range(_DEFAULT_PROMPT_ASK_ATTEMPTS):
tf_nccl_version = get_from_env_or_user_or_default(
@@ -1232,28 +1236,13 @@ def set_tf_cuda_compute_capabilities(environ_cp):
def set_other_cuda_vars(environ_cp):
"""Set other CUDA related variables."""
- if is_windows():
- # The following three variables are needed for MSVC toolchain configuration
- # in Bazel
- environ_cp['CUDA_PATH'] = environ_cp.get('CUDA_TOOLKIT_PATH')
- environ_cp['CUDA_COMPUTE_CAPABILITIES'] = environ_cp.get(
- 'TF_CUDA_COMPUTE_CAPABILITIES')
- environ_cp['NO_WHOLE_ARCHIVE_OPTION'] = 1
- write_action_env_to_bazelrc('CUDA_PATH', environ_cp.get('CUDA_PATH'))
- write_action_env_to_bazelrc('CUDA_COMPUTE_CAPABILITIE',
- environ_cp.get('CUDA_COMPUTE_CAPABILITIE'))
- write_action_env_to_bazelrc('NO_WHOLE_ARCHIVE_OPTION',
- environ_cp.get('NO_WHOLE_ARCHIVE_OPTION'))
- write_to_bazelrc('build --config=win-cuda')
- write_to_bazelrc('test --config=win-cuda')
+ # If CUDA is enabled, always use GPU during build and test.
+ if environ_cp.get('TF_CUDA_CLANG') == '1':
+ write_to_bazelrc('build --config=cuda_clang')
+ write_to_bazelrc('test --config=cuda_clang')
else:
- # If CUDA is enabled, always use GPU during build and test.
- if environ_cp.get('TF_CUDA_CLANG') == '1':
- write_to_bazelrc('build --config=cuda_clang')
- write_to_bazelrc('test --config=cuda_clang')
- else:
- write_to_bazelrc('build --config=cuda')
- write_to_bazelrc('test --config=cuda')
+ write_to_bazelrc('build --config=cuda')
+ write_to_bazelrc('test --config=cuda')
def set_host_cxx_compiler(environ_cp):
@@ -1447,7 +1436,7 @@ def main():
setup_python(environ_cp)
if is_windows():
- environ_cp['TF_NEED_S3'] = '0'
+ environ_cp['TF_NEED_AWS'] = '0'
environ_cp['TF_NEED_GCP'] = '0'
environ_cp['TF_NEED_HDFS'] = '0'
environ_cp['TF_NEED_JEMALLOC'] = '0'
@@ -1460,19 +1449,31 @@ def main():
# TODO(ibiryukov): Investigate using clang as a cpu or cuda compiler on
# Windows.
environ_cp['TF_DOWNLOAD_CLANG'] = '0'
+ environ_cp['TF_ENABLE_XLA'] = '0'
+ environ_cp['TF_NEED_GDR'] = '0'
+ environ_cp['TF_NEED_VERBS'] = '0'
+ environ_cp['TF_NEED_MPI'] = '0'
+ environ_cp['TF_SET_ANDROID_WORKSPACE'] = '0'
if is_macos():
environ_cp['TF_NEED_JEMALLOC'] = '0'
environ_cp['TF_NEED_TENSORRT'] = '0'
+ # The numpy package on ppc64le uses OpenBLAS which has multi-threading
+ # issues that lead to incorrect answers. Set OMP_NUM_THREADS=1 at
+ # runtime to allow the Tensorflow testcases which compare numpy
+ # results to Tensorflow results to succeed.
+ if is_ppc64le():
+ write_action_env_to_bazelrc("OMP_NUM_THREADS", 1)
+
set_build_var(environ_cp, 'TF_NEED_JEMALLOC', 'jemalloc as malloc',
'with_jemalloc', True)
set_build_var(environ_cp, 'TF_NEED_GCP', 'Google Cloud Platform',
'with_gcp_support', True, 'gcp')
set_build_var(environ_cp, 'TF_NEED_HDFS', 'Hadoop File System',
'with_hdfs_support', True, 'hdfs')
- set_build_var(environ_cp, 'TF_NEED_S3', 'Amazon S3 File System',
- 'with_s3_support', True, 's3')
+ set_build_var(environ_cp, 'TF_NEED_AWS', 'Amazon AWS Platform',
+ 'with_aws_support', True, 'aws')
set_build_var(environ_cp, 'TF_NEED_KAFKA', 'Apache Kafka Platform',
'with_kafka_support', True, 'kafka')
set_build_var(environ_cp, 'TF_ENABLE_XLA', 'XLA JIT', 'with_xla_support',
diff --git a/tensorflow/BUILD b/tensorflow/BUILD
index a15d033013..518c2b0489 100644
--- a/tensorflow/BUILD
+++ b/tensorflow/BUILD
@@ -20,7 +20,7 @@ load(
"tf_additional_binary_deps",
)
load(
- "//tensorflow/tools/api/generator:api_gen.bzl",
+ "//tensorflow/python/tools/api/generator:api_gen.bzl",
"gen_api_init_files", # @unused
)
@@ -216,8 +216,8 @@ config_setting(
)
config_setting(
- name = "with_s3_support",
- define_values = {"with_s3_support": "true"},
+ name = "with_aws_support",
+ define_values = {"with_aws_support": "true"},
visibility = ["//visibility:public"],
)
@@ -244,8 +244,8 @@ config_setting(
)
config_setting(
- name = "with_s3_support_windows_override",
- define_values = {"with_s3_support": "true"},
+ name = "with_aws_support_windows_override",
+ define_values = {"with_aws_support": "true"},
values = {"cpu": "x64_windows"},
visibility = ["//visibility:public"],
)
@@ -258,6 +258,13 @@ config_setting(
)
config_setting(
+ name = "with_cuda_support_windows_override",
+ define_values = {"using_cuda_nvcc": "true"},
+ values = {"cpu": "x64_windows"},
+ visibility = ["//visibility:public"],
+)
+
+config_setting(
name = "with_gcp_support_android_override",
define_values = {"with_gcp_support": "true"},
values = {"crosstool_top": "//external:android/crosstool"},
@@ -272,8 +279,8 @@ config_setting(
)
config_setting(
- name = "with_s3_support_android_override",
- define_values = {"with_s3_support": "true"},
+ name = "with_aws_support_android_override",
+ define_values = {"with_aws_support": "true"},
values = {"crosstool_top": "//external:android/crosstool"},
visibility = ["//visibility:public"],
)
@@ -293,8 +300,8 @@ config_setting(
)
config_setting(
- name = "with_s3_support_ios_override",
- define_values = {"with_s3_support": "true"},
+ name = "with_aws_support_ios_override",
+ define_values = {"with_aws_support": "true"},
values = {"crosstool_top": "//tools/osx/crosstool:crosstool"},
visibility = ["//visibility:public"],
)
diff --git a/tensorflow/c/c_api.cc b/tensorflow/c/c_api.cc
index 9d5f98d4d6..5c218d3f25 100644
--- a/tensorflow/c/c_api.cc
+++ b/tensorflow/c/c_api.cc
@@ -2068,7 +2068,8 @@ TF_ImportGraphDefResults* TF_GraphImportGraphDefWithResults(
TF_Graph* graph, const TF_Buffer* graph_def,
const TF_ImportGraphDefOptions* options, TF_Status* status) {
GraphDef def;
- if (!tensorflow::ParseProtoUnlimited(&def, graph_def->data, graph_def->length)) {
+ if (!tensorflow::ParseProtoUnlimited(&def, graph_def->data,
+ graph_def->length)) {
status->status = InvalidArgument("Invalid GraphDef");
return nullptr;
}
@@ -2098,7 +2099,8 @@ void TF_GraphImportGraphDefWithReturnOutputs(
return;
}
GraphDef def;
- if (!tensorflow::ParseProtoUnlimited(&def, graph_def->data, graph_def->length)) {
+ if (!tensorflow::ParseProtoUnlimited(&def, graph_def->data,
+ graph_def->length)) {
status->status = InvalidArgument("Invalid GraphDef");
return;
}
@@ -2414,7 +2416,18 @@ void TF_AddGradients(TF_Graph* g, TF_Output* y, int ny, TF_Output* x, int nx,
for (int i = first_new_node_id; i < g->graph.num_node_ids(); ++i) {
Node* n = g->graph.FindNodeId(i);
if (n == nullptr) continue;
- g->name_map[n->name()] = n;
+ // We have a convoluted scheme here: Using the C++ graph construction API
+ // to add potentially many nodes to the graph without running the checks
+ // (such as uniqueness of the names of nodes) we run with other functions
+ // that add a node to the graph (like TF_FinishOperation).
+ if (!g->name_map.insert(std::make_pair(n->name(), n)).second) {
+ status->status = tensorflow::errors::Internal(
+ "BUG: The API allowed construction of a graph with duplicate node "
+ "names (",
+ n->name(),
+ "). This is a bug. Please file an issue at "
+ "https://github.com/tensorflow/tensorflow/issues.");
+ }
}
}
diff --git a/tensorflow/c/c_api_experimental.cc b/tensorflow/c/c_api_experimental.cc
index 95b04f9058..170046c802 100644
--- a/tensorflow/c/c_api_experimental.cc
+++ b/tensorflow/c/c_api_experimental.cc
@@ -57,6 +57,33 @@ void TF_EnableXLACompilation(TF_SessionOptions* options, unsigned char enable) {
}
}
+TF_Buffer* TF_CreateConfig(unsigned char enable_xla_compilation,
+ unsigned char gpu_memory_allow_growth) {
+ tensorflow::ConfigProto config;
+ auto* optimizer_options =
+ config.mutable_graph_options()->mutable_optimizer_options();
+ if (enable_xla_compilation) {
+ optimizer_options->set_global_jit_level(tensorflow::OptimizerOptions::ON_1);
+
+ // These XLA flags are needed to trigger XLA properly from C (more generally
+ // non-Python) clients. If this API is called again with `enable` set to
+ // false, it is safe to keep these flag values as is.
+ tensorflow::legacy_flags::MarkForCompilationPassFlags* flags =
+ tensorflow::legacy_flags::GetMarkForCompilationPassFlags();
+ flags->tf_xla_cpu_global_jit = true;
+ flags->tf_xla_min_cluster_size = 1;
+ } else {
+ optimizer_options->set_global_jit_level(tensorflow::OptimizerOptions::OFF);
+ }
+
+ auto* gpu_options = config.mutable_gpu_options();
+ gpu_options->set_allow_growth(gpu_memory_allow_growth);
+
+ TF_Buffer* ret = TF_NewBuffer();
+ TF_CHECK_OK(MessageToBuffer(config, ret));
+ return ret;
+}
+
const char* TF_GraphDebugString(TF_Graph* graph, size_t* len) {
tensorflow::mutex_lock c(graph->mu);
const auto& debug_str = graph->graph.ToGraphDefDebug().DebugString();
diff --git a/tensorflow/c/c_api_experimental.h b/tensorflow/c/c_api_experimental.h
index 20bdace40f..2d81c01e0d 100644
--- a/tensorflow/c/c_api_experimental.h
+++ b/tensorflow/c/c_api_experimental.h
@@ -55,11 +55,21 @@ extern "C" {
// set XLA flag values to prepare for XLA compilation. Otherwise set
// global_jit_level to OFF.
//
-// This API is syntax sugar over TF_SetConfig(), and is used by clients that
-// cannot read/write the tensorflow.ConfigProto proto.
+// This and the next API are syntax sugar over TF_SetConfig(), and is used by
+// clients that cannot read/write the tensorflow.ConfigProto proto.
+// TODO: Migrate to TF_CreateConfig() below.
TF_CAPI_EXPORT extern void TF_EnableXLACompilation(TF_SessionOptions* options,
unsigned char enable);
+// Create a serialized tensorflow.ConfigProto proto, where:
+//
+// a) ConfigProto.optimizer_options.global_jit_level is set to to ON_1 if
+// `enable_xla_compilation` is non-zero, and OFF otherwise.
+// b) ConfigProto.gpu_options.allow_growth is set to `gpu_memory_allow_growth`.
+TF_CAPI_EXPORT extern TF_Buffer* TF_CreateConfig(
+ unsigned char enable_xla_compilation,
+ unsigned char gpu_memory_allow_growth);
+
// Returns the graph content in a human-readable format, with length set in
// `len`. The format is subject to change in the future.
// The returned string is heap-allocated, and caller should call free() on it.
diff --git a/tensorflow/c/c_api_test.cc b/tensorflow/c/c_api_test.cc
index 577f10c5e6..bc04b53fbb 100644
--- a/tensorflow/c/c_api_test.cc
+++ b/tensorflow/c/c_api_test.cc
@@ -1160,7 +1160,7 @@ TEST(CAPI, GetOpDef) {
}
void StringVectorToArrays(const std::vector<string>& v,
- std::unique_ptr<const void* []>* ptrs,
+ std::unique_ptr<const void*[]>* ptrs,
std::unique_ptr<size_t[]>* lens) {
ptrs->reset(new const void*[v.size()]);
lens->reset(new size_t[v.size()]);
@@ -1196,7 +1196,7 @@ class CApiColocationTest : public ::testing::Test {
void SetViaStringList(TF_OperationDescription* desc,
const std::vector<string>& list) {
- std::unique_ptr<const void* []> list_ptrs;
+ std::unique_ptr<const void*[]> list_ptrs;
std::unique_ptr<size_t[]> list_lens;
StringVectorToArrays(list, &list_ptrs, &list_lens);
TF_SetAttrStringList(desc, tensorflow::kColocationAttrName, list_ptrs.get(),
@@ -1700,6 +1700,61 @@ TEST_F(CApiGradientsTest, OpWithNoGradientRegistered_NoGradInputs) {
TestGradientsError(false);
}
+void ScalarFloatFromTensor(const TF_Tensor* t, float* f) {
+ ASSERT_TRUE(t != nullptr);
+ ASSERT_EQ(TF_FLOAT, TF_TensorType(t));
+ ASSERT_EQ(0, TF_NumDims(t));
+ ASSERT_EQ(4, TF_TensorByteSize(t));
+ float* p = static_cast<float*>(TF_TensorData(t));
+ *f = *p;
+}
+
+TEST_F(CApiGradientsTest, MultipleCallsToAddGradients) {
+ const float X = 3.0f, Y = 7.0f;
+ TF_Operation* x = Placeholder(graph_, s_, "x", TF_FLOAT);
+ TF_Operation* y = Placeholder(graph_, s_, "y", TF_FLOAT);
+ TF_Operation* xy = Mul(x, y, graph_, s_, "xy");
+ TF_Output dxy_dx, dxy_dy;
+
+ TF_Output outputs[1] = {{xy, 0}};
+ TF_Output inputs[1] = {{x, 0}};
+ TF_AddGradients(graph_, outputs, 1, inputs, 1, nullptr, s_, &dxy_dx);
+ ASSERT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_);
+
+ inputs[0] = {y, 0};
+ TF_AddGradients(graph_, outputs, 1, inputs, 1, nullptr, s_, &dxy_dy);
+ ASSERT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_);
+
+ TF_SessionOptions* opts = TF_NewSessionOptions();
+ TF_Session* sess = TF_NewSession(graph_, opts, s_);
+ TF_DeleteSessionOptions(opts);
+ ASSERT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_);
+
+ TF_Output feeds[] = {{x, 0}, {y, 0}};
+ TF_Tensor* feedValues[] = {FloatTensor(X), FloatTensor(Y)};
+ TF_Output fetches[] = {dxy_dx, dxy_dy};
+ TF_Tensor* fetchValues[] = {nullptr, nullptr};
+
+ TF_SessionRun(sess, nullptr /* run_options */, feeds, feedValues, 2, fetches,
+ fetchValues, 2, nullptr /* target_opers */, 0,
+ nullptr /* run_metadata */, s_);
+ TF_DeleteTensor(feedValues[0]);
+ TF_DeleteTensor(feedValues[1]);
+ ASSERT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_);
+ TF_DeleteSession(sess, s_);
+ ASSERT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_);
+
+ float dxy_dxValue = 0.0f, dxy_dyValue = 0.0f;
+ ScalarFloatFromTensor(fetchValues[0], &dxy_dxValue);
+ EXPECT_EQ(Y, dxy_dxValue);
+
+ ScalarFloatFromTensor(fetchValues[1], &dxy_dyValue);
+ EXPECT_EQ(X, dxy_dyValue);
+
+ TF_DeleteTensor(fetchValues[0]);
+ TF_DeleteTensor(fetchValues[1]);
+}
+
// REGISTER_OP for CApiAttributesTest test cases.
// Registers two ops, each with a single attribute called 'v'.
// The attribute in one op will have a type 'type', the other
@@ -1784,7 +1839,7 @@ TEST_F(CApiAttributesTest, String) {
TEST_F(CApiAttributesTest, StringList) {
std::vector<string> list = {"bugs", "bunny", "duck"};
- std::unique_ptr<const void* []> list_ptrs;
+ std::unique_ptr<const void*[]> list_ptrs;
std::unique_ptr<size_t[]> list_lens;
StringVectorToArrays(list, &list_ptrs, &list_lens);
int list_total_size = 0;
@@ -1800,7 +1855,7 @@ TEST_F(CApiAttributesTest, StringList) {
ASSERT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_);
EXPECT_TF_META("v", list.size(), TF_ATTR_STRING, list_total_size);
- std::unique_ptr<void* []> values(new void*[list.size()]);
+ std::unique_ptr<void*[]> values(new void*[list.size()]);
std::unique_ptr<size_t[]> lens(new size_t[list.size()]);
std::unique_ptr<char[]> storage(new char[list_total_size]);
TF_OperationGetAttrStringList(oper, "v", values.get(), lens.get(),
@@ -2025,7 +2080,7 @@ TEST_F(CApiAttributesTest, TensorShapeProtoList) {
tensorflow::PartialTensorShape(pts2).AsProto(&proto);
proto.SerializeToString(&bytes2);
- std::unique_ptr<const void* []> list_ptrs;
+ std::unique_ptr<const void*[]> list_ptrs;
std::unique_ptr<size_t[]> list_lens;
const std::vector<string> list = {bytes1, bytes2};
StringVectorToArrays(list, &list_ptrs, &list_lens);
diff --git a/tensorflow/c/c_test_util.cc b/tensorflow/c/c_test_util.cc
index f3b28c1708..24eb6c069b 100644
--- a/tensorflow/c/c_test_util.cc
+++ b/tensorflow/c/c_test_util.cc
@@ -216,6 +216,13 @@ TF_Operation* Min(TF_Operation* l, TF_Operation* r, TF_Graph* graph,
return MinWithDevice(l, r, graph, /*op_device=*/"", s, name);
}
+TF_Operation* Mul(TF_Operation* l, TF_Operation* r, TF_Graph* graph,
+ TF_Status* s, const char* name) {
+ TF_Operation* op;
+ BinaryOpHelper("Mul", l, r, graph, s, name, &op, "", true);
+ return op;
+}
+
TF_Operation* Add(TF_Output l, TF_Output r, TF_Graph* graph, TF_Status* s,
const char* name) {
TF_OperationDescription* desc = TF_NewOperation(graph, "AddN", name);
diff --git a/tensorflow/c/c_test_util.h b/tensorflow/c/c_test_util.h
index c16aba666e..38313d647c 100644
--- a/tensorflow/c/c_test_util.h
+++ b/tensorflow/c/c_test_util.h
@@ -80,6 +80,9 @@ TF_Operation* Add(TF_Output l, TF_Output r, TF_Graph* graph, TF_Status* s,
TF_Operation* Min(TF_Operation* l, TF_Operation* r, TF_Graph* graph,
TF_Status* s, const char* name = "min");
+TF_Operation* Mul(TF_Operation* l, TF_Operation* r, TF_Graph* graph,
+ TF_Status* s, const char* name = "mul");
+
// If `op_device` is non-empty, set the created op on that device.
TF_Operation* MinWithDevice(TF_Operation* l, TF_Operation* r, TF_Graph* graph,
const string& op_device, TF_Status* s,
diff --git a/tensorflow/c/eager/c_api.cc b/tensorflow/c/eager/c_api.cc
index 00b474fe86..6c510536d6 100644
--- a/tensorflow/c/eager/c_api.cc
+++ b/tensorflow/c/eager/c_api.cc
@@ -156,12 +156,14 @@ tensorflow::Status NewRemoteAwareTFE_Context(const TFE_ContextOptions* opts,
// server object (which currently CHECK-fails) and we miss the error, instead,
// we log the error, and then return to allow the user to see the error
// message.
-#define LOG_AND_RETURN_IF_ERROR(...) \
- do { \
- const ::tensorflow::Status _status = (__VA_ARGS__); \
- LOG(ERROR) << _status.error_message(); \
- if (TF_PREDICT_FALSE(!_status.ok())) return _status; \
- } while (0)
+#define LOG_AND_RETURN_IF_ERROR(...) \
+ do { \
+ const ::tensorflow::Status _status = (__VA_ARGS__); \
+ if (TF_PREDICT_FALSE(!_status.ok())) { \
+ LOG(ERROR) << _status.error_message(); \
+ return _status; \
+ } \
+ } while (0);
string worker_name = tensorflow::strings::StrCat(
"/job:", opts->server_def.job_name(),
@@ -346,16 +348,16 @@ TF_DataType TFE_TensorHandleDataType(TFE_TensorHandle* h) {
}
int TFE_TensorHandleNumDims(TFE_TensorHandle* h, TF_Status* status) {
- const tensorflow::Tensor* t = nullptr;
- status->status = h->handle->Tensor(&t);
- return t == nullptr ? 0 : t->dims();
+ int result;
+ status->status = h->handle->NumDims(&result);
+ return result;
}
int64_t TFE_TensorHandleDim(TFE_TensorHandle* h, int dim_index,
TF_Status* status) {
- const tensorflow::Tensor* t = nullptr;
- status->status = h->handle->Tensor(&t);
- return t == nullptr ? 0 : t->dim_size(dim_index);
+ tensorflow::int64 result;
+ status->status = h->handle->Dim(dim_index, &result);
+ return result;
}
const char* TFE_TensorHandleDeviceName(TFE_TensorHandle* h, TF_Status* status) {
@@ -662,17 +664,17 @@ TFE_TensorHandle* TFE_NewTensorHandle(const tensorflow::Tensor& t) {
const tensorflow::Tensor* TFE_TensorHandleUnderlyingTensorInHostMemory(
TFE_TensorHandle* h, TF_Status* status) {
- tensorflow::Device* d = nullptr;
- tensorflow::Device* op_device = nullptr;
- const tensorflow::Tensor* t = nullptr;
- status->status = h->handle->TensorAndDevice(&t, &d, &op_device);
- if (!status->status.ok()) return nullptr;
- if (d != nullptr) {
+ if (!h->handle->OnHostCPU()) {
status->status = tensorflow::errors::FailedPrecondition(
"TFE_TensorHandle is placed in device (not host) memory. Cannot return "
"a tensorflow::Tensor");
return nullptr;
}
+ tensorflow::Device* d = nullptr;
+ tensorflow::Device* op_device = nullptr;
+ const tensorflow::Tensor* t = nullptr;
+ status->status = h->handle->TensorAndDevice(&t, &d, &op_device);
+ if (!status->status.ok()) return nullptr;
return t;
}
diff --git a/tensorflow/c/eager/tape.h b/tensorflow/c/eager/tape.h
index 734e712daa..1adb0458c3 100644
--- a/tensorflow/c/eager/tape.h
+++ b/tensorflow/c/eager/tape.h
@@ -520,7 +520,12 @@ Status GradientTape<Gradient, BackwardFunction>::ComputeGradient(
}
} else {
any_gradient_nonzero = true;
- auto new_gradients = vspace.AggregateGradients(grad_it->second);
+ Gradient* new_gradients = nullptr;
+ if (grad_it->second.size() == 1) {
+ new_gradients = grad_it->second.at(0);
+ } else {
+ new_gradients = vspace.AggregateGradients(grad_it->second);
+ }
if (sources_set.find(grad_it->first) == sources_set.end()) {
gradients.erase(grad_it);
} else {
diff --git a/tensorflow/c/python_api.cc b/tensorflow/c/python_api.cc
index e18fdf6c57..8486b585c8 100644
--- a/tensorflow/c/python_api.cc
+++ b/tensorflow/c/python_api.cc
@@ -155,7 +155,7 @@ void SetResourceHandleShapeAndType(TF_Graph* graph, TF_Output output,
tensorflow::shape_inference::ShapeHandle shape;
status->status =
ic->MakeShapeFromShapeProto(shape_and_type_proto.shape(), &shape);
- if (status->status.ok()) return;
+ if (!status->status.ok()) return;
shapes_and_types.emplace_back(shape, shape_and_type_proto.dtype());
}
ic->set_output_handle_shapes_and_types(output.index, shapes_and_types);
diff --git a/tensorflow/cc/framework/scope.cc b/tensorflow/cc/framework/scope.cc
index 62a889181e..8c886f3171 100644
--- a/tensorflow/cc/framework/scope.cc
+++ b/tensorflow/cc/framework/scope.cc
@@ -37,6 +37,11 @@ Scope& Scope::operator=(const Scope& other) {
return *this;
}
+namespace {
+const char kScopeSeparator[] = "/";
+const char kSuffixSeparator[] = "_";
+} // namespace
+
Scope::Impl::Impl(Graph* graph, Status* status, NameMap* name_map,
ShapeRefiner* refiner, bool disable_shape_inference)
: graph_(graph),
@@ -308,19 +313,23 @@ string Scope::Impl::GetUniqueName(const string& prefix,
return prefix;
}
auto entry = name_map_->find(prefix);
- string unique_name = prefix;
if (entry == name_map_->end()) {
name_map_->insert({prefix, 0});
- } else {
- unique_name = strings::StrCat(unique_name, "_", ++entry->second);
+ return prefix;
}
+ string unique_name;
+ do {
+ unique_name = strings::StrCat(prefix, kSuffixSeparator, ++entry->second);
+ } while (name_map_->find(unique_name) != name_map_->end());
+ name_map_->insert({unique_name, 0});
return unique_name;
}
string Scope::Impl::GetNameForOp(const string& default_name) const {
const string unique_name =
GetUniqueName(default_name, true /* check_single_use */);
- const string sep = name_.empty() || unique_name.empty() ? "" : "/";
+ const string sep =
+ name_.empty() || unique_name.empty() ? "" : kScopeSeparator;
return strings::StrCat(name_, sep, unique_name);
}
@@ -345,7 +354,8 @@ Scope Scope::NewSubScope(const string& child_scope_name) const {
}
const string unique_name =
impl()->GetUniqueName(child_scope_name, false /* check_single_use */);
- const string sep = impl()->name_.empty() || unique_name.empty() ? "" : "/";
+ const string sep =
+ impl()->name_.empty() || unique_name.empty() ? "" : kScopeSeparator;
return Scope(new Impl(*this, Impl::Tags::ScopeName(),
strings::StrCat(impl()->name_, sep, unique_name),
false /* copy_names */));
@@ -412,7 +422,7 @@ CompositeOpScopes Scope::GetCompositeOpScopes(
if (!impl()->single_use_scope()) {
Scope child = NewSubScope(impl()->op_name_.empty() ? composite_op_name
: impl()->op_name_);
- const string child_op_sep = impl()->name_.empty() ? "" : "_";
+ const string child_op_sep = impl()->name_.empty() ? "" : kSuffixSeparator;
const string child_name =
strings::StrCat(impl()->name_, child_op_sep, child.impl()->name_);
return {child,
@@ -435,7 +445,13 @@ class InternalScope {
static Scope NewScope(Graph* graph, Status* status, ShapeRefiner* refiner) {
Scope::Impl::NameMap* name_map = new Scope::Impl::NameMap;
for (const Node* node : graph->nodes()) {
- (*name_map)[node->name()] = 0;
+ const string& name = node->name();
+ (*name_map)[name] = 0;
+ // Add all name prefixes ('/' separated).
+ size_t idx = -1;
+ while ((idx = name.find(kScopeSeparator, idx + 1)) != string::npos) {
+ (*name_map)[name.substr(0, idx)] = 0;
+ }
}
// We provide null destructors for these shared ptrs (except for name_map)
// since the caller owns them and doesn't want the scope to destroy them.
diff --git a/tensorflow/cc/framework/scope_internal.h b/tensorflow/cc/framework/scope_internal.h
index 8efcfed20d..58adaef2e9 100644
--- a/tensorflow/cc/framework/scope_internal.h
+++ b/tensorflow/cc/framework/scope_internal.h
@@ -34,8 +34,7 @@ class Scope::Impl {
// name that has not been used so far in a scope will get no suffix. Later
// uses of the same name will get suffixes _1, _2, _3, etc. Multiple scopes
// can share the same NameMap. For instance, a new scope created using
- // WithControlDependencies() should would share the same NameMap with the
- // parent.
+ // WithControlDependencies() would share the same NameMap with the parent.
typedef std::unordered_map<string, int> NameMap;
Impl(const std::shared_ptr<Graph>& graph,
diff --git a/tensorflow/cc/framework/scope_test.cc b/tensorflow/cc/framework/scope_test.cc
index 9eca9d3fac..b40b345eb8 100644
--- a/tensorflow/cc/framework/scope_test.cc
+++ b/tensorflow/cc/framework/scope_test.cc
@@ -26,6 +26,16 @@ TEST(ScopeTest, BasicNames) {
EXPECT_EQ(root.GetUniqueNameForOp("mul"), "mul");
}
+TEST(ScopeTest, OpAndScopeNameCollision) {
+ Scope root = Scope::NewRootScope();
+ EXPECT_EQ(root.GetUniqueNameForOp("foo"), "foo");
+ EXPECT_EQ(root.GetUniqueNameForOp("foo"), "foo_1");
+ EXPECT_EQ(root.GetUniqueNameForOp("foo_1"), "foo_1_1");
+ EXPECT_EQ(root.GetUniqueNameForOp("foo_2"), "foo_2");
+ EXPECT_EQ(root.GetUniqueNameForOp("foo"), "foo_3");
+ EXPECT_EQ(root.GetUniqueNameForOp("foo_2"), "foo_2_1");
+}
+
TEST(ScopeTest, HierarchicalNames) {
Scope root = Scope::NewRootScope();
Scope child = root.NewSubScope("child");
diff --git a/tensorflow/cc/gradients/array_grad.cc b/tensorflow/cc/gradients/array_grad.cc
index ff348fadb2..b353accddc 100644
--- a/tensorflow/cc/gradients/array_grad.cc
+++ b/tensorflow/cc/gradients/array_grad.cc
@@ -421,6 +421,58 @@ Status StridedSliceGradHelper(const Scope& scope, const Operation& op,
}
REGISTER_GRADIENT_OP("StridedSlice", StridedSliceGradHelper);
+Status SliceGrad(const Scope& scope, const Operation& op,
+ const std::vector<Output>& grad_inputs,
+ std::vector<Output>* grad_outputs) {
+ // Propagate the incoming gradient along all the selected values,
+ // and zero everywhere else. Use the Pad operator for this.
+ //
+ // First create an Nx2 padding where N is the number of input
+ // dimensions. The first column is the number of prepended zeros
+ // for each dimension, and the second column is the number of
+ // appended zeros.
+ //
+ // The first column is just the begin vector.
+ // The second column is the shape of the input element-wise
+ // subtracted by begin+size
+
+ // Running example:
+ // input.shape = [3, 5, 3]
+ // begin = [1, 2, 1], size = [1, 3, 2]
+ Input input = op.input(0);
+ Input begin = op.input(1);
+ // input_rank = 3
+ auto input_rank = Rank(scope, input);
+ // slice_size = [1, 3, 2]
+ auto slice_size = Shape(scope, op.output(0));
+ // padding_shape = [3, 1]
+ auto padding_shape = Stack(scope, {input_rank, 1});
+ // before_padding = [[1]
+ // [2]
+ // [1]]
+ Input before_padding = Reshape(scope, begin, padding_shape);
+ // after_padding_sizes = shape(input) - slice_size - begin
+ // = [3, 5, 3] - [1, 3, 2] - [1, 2, 1]
+ // = [1, 0, 0]
+ auto after_padding_sizes =
+ Sub(scope, Sub(scope, Shape(scope, input), slice_size), begin);
+ // after_padding = [[1]
+ // [0]
+ // [0]]
+ Input after_padding = Reshape(scope, after_padding_sizes, padding_shape);
+ // paddings = [[1 1]
+ // [2 0]
+ // [1 0]]
+ auto paddings =
+ Concat(scope, {before_padding, after_padding}, Const(scope, 1));
+ grad_outputs->push_back(Pad(scope, grad_inputs[0], paddings));
+ // Nothing propagated for "begin" and "size" inputs
+ grad_outputs->push_back(NoGradient());
+ grad_outputs->push_back(NoGradient());
+ return scope.status();
+}
+REGISTER_GRADIENT_OP("Slice", SliceGrad);
+
} // anonymous namespace
} // namespace ops
} // namespace tensorflow
diff --git a/tensorflow/cc/gradients/array_grad_test.cc b/tensorflow/cc/gradients/array_grad_test.cc
index de3bd0fc9e..d09275b648 100644
--- a/tensorflow/cc/gradients/array_grad_test.cc
+++ b/tensorflow/cc/gradients/array_grad_test.cc
@@ -378,5 +378,12 @@ TEST_F(ArrayGradTest, StridedSliceGrad) {
RunTest(x, x_shape, y, {1, 2, 2, 2});
}
+TEST_F(ArrayGradTest, SliceGrad) {
+ TensorShape x_shape({3, 5, 3});
+ auto x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(x_shape));
+ auto y = Slice(scope_, x, {1, 2, 1}, {1, 3, 2});
+ RunTest(x, x_shape, y, {1, 3, 2});
+}
+
} // namespace
} // namespace tensorflow
diff --git a/tensorflow/cc/saved_model/BUILD b/tensorflow/cc/saved_model/BUILD
index 06a3be18e0..730b1b669b 100644
--- a/tensorflow/cc/saved_model/BUILD
+++ b/tensorflow/cc/saved_model/BUILD
@@ -34,6 +34,35 @@ cc_library(
)
cc_library(
+ name = "reader",
+ srcs = ["reader.cc"],
+ hdrs = ["reader.h"],
+ deps = [
+ ":constants",
+ "//tensorflow/core:lib",
+ "//tensorflow/core:protos_all_cc",
+ ],
+)
+
+tf_cc_test(
+ name = "reader_test",
+ srcs = ["reader_test.cc"],
+ data = [
+ ":saved_model_half_plus_two",
+ ],
+ linkstatic = 1,
+ deps = [
+ ":constants",
+ ":reader",
+ ":tag_constants",
+ "//tensorflow/core:lib",
+ "//tensorflow/core:test",
+ "//tensorflow/core:test_main",
+ "//tensorflow/core:testlib",
+ ],
+)
+
+cc_library(
name = "loader",
hdrs = ["loader.h"],
deps = [
@@ -54,6 +83,7 @@ cc_library(
hdrs = ["loader.h"],
deps = [
":constants",
+ ":reader",
] + if_not_mobile([
"//tensorflow/core:core_cpu",
"//tensorflow/core:framework",
diff --git a/tensorflow/cc/saved_model/loader.cc b/tensorflow/cc/saved_model/loader.cc
index faa1e378d0..07807ed2f3 100644
--- a/tensorflow/cc/saved_model/loader.cc
+++ b/tensorflow/cc/saved_model/loader.cc
@@ -18,8 +18,10 @@ limitations under the License.
#include <unordered_set>
#include "tensorflow/cc/saved_model/constants.h"
+#include "tensorflow/cc/saved_model/reader.h"
#include "tensorflow/core/lib/io/path.h"
#include "tensorflow/core/lib/monitoring/counter.h"
+#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/protobuf_internal.h"
@@ -43,56 +45,6 @@ auto* load_latency = monitoring::Counter<1>::New(
constexpr char kLoadAttemptFail[] = "fail";
constexpr char kLoadAttemptSuccess[] = "success";
-Status ReadSavedModel(const string& export_dir, SavedModel* saved_model_proto) {
- const string saved_model_pb_path =
- io::JoinPath(export_dir, kSavedModelFilenamePb);
- if (Env::Default()->FileExists(saved_model_pb_path).ok()) {
- return ReadBinaryProto(Env::Default(), saved_model_pb_path,
- saved_model_proto);
- }
- const string saved_model_pbtxt_path =
- io::JoinPath(export_dir, kSavedModelFilenamePbTxt);
- if (Env::Default()->FileExists(saved_model_pbtxt_path).ok()) {
- return ReadTextProto(Env::Default(), saved_model_pbtxt_path,
- saved_model_proto);
- }
- return Status(error::Code::NOT_FOUND,
- "Could not find SavedModel .pb or .pbtxt at supplied export "
- "directory path: " +
- export_dir);
-}
-
-string GetTagsAsString(const std::unordered_set<string>& tags) {
- string tags_as_string = "{ ";
- for (const string& tag : tags) {
- tags_as_string = strings::StrCat(tags_as_string, tag, " ");
- }
- tags_as_string = strings::StrCat(tags_as_string, "}");
- return tags_as_string;
-}
-
-Status FindMetaGraphDefToLoad(const SavedModel& saved_model_proto,
- const std::unordered_set<string>& tags,
- MetaGraphDef* meta_graph_def_to_load) {
- for (const MetaGraphDef& meta_graph_def : saved_model_proto.meta_graphs()) {
- // Get tags from the meta_graph_def.
- std::unordered_set<string> graph_tags;
- for (const string& tag : meta_graph_def.meta_info_def().tags()) {
- graph_tags.insert(tag);
- }
- // Match with the set of tags provided.
- if (graph_tags == tags) {
- *meta_graph_def_to_load = meta_graph_def;
- return Status::OK();
- }
- }
- return Status(error::Code::NOT_FOUND,
- "Could not find meta graph def matching supplied tags: " +
- GetTagsAsString(tags) +
- ". To inspect available tag-sets in the SavedModel, please "
- "use the SavedModel CLI: `saved_model_cli`");
-}
-
Status LoadMetaGraphIntoSession(const MetaGraphDef& meta_graph_def,
const SessionOptions& session_options,
std::unique_ptr<Session>* session) {
@@ -235,18 +187,8 @@ Status LoadSavedModelInternal(const SessionOptions& session_options,
const string& export_dir,
const std::unordered_set<string>& tags,
SavedModelBundle* const bundle) {
- if (!MaybeSavedModelDirectory(export_dir)) {
- return Status(error::Code::NOT_FOUND,
- "SavedModel not found in export directory: " + export_dir);
- }
- LOG(INFO) << "Loading SavedModel with tags: " << GetTagsAsString(tags)
- << "; from: " << export_dir;
-
- SavedModel saved_model_proto;
- TF_RETURN_IF_ERROR(ReadSavedModel(export_dir, &saved_model_proto));
-
- TF_RETURN_IF_ERROR(
- FindMetaGraphDefToLoad(saved_model_proto, tags, &bundle->meta_graph_def));
+ TF_RETURN_IF_ERROR(ReadMetaGraphDefFromSavedModel(export_dir, tags,
+ &bundle->meta_graph_def));
TF_RETURN_IF_ERROR(LoadMetaGraphIntoSession(
bundle->meta_graph_def, session_options, &bundle->session));
@@ -288,8 +230,8 @@ Status LoadSavedModel(const SessionOptions& session_options,
return end_microseconds - start_microseconds;
}();
auto log_and_count = [&](const string& status_str) {
- LOG(INFO) << "SavedModel load for tags " << GetTagsAsString(tags)
- << "; Status: " << status_str << ". Took "
+ LOG(INFO) << "SavedModel load for tags { " << str_util::Join(tags, " ")
+ << " }; Status: " << status_str << ". Took "
<< load_latency_microsecs << " microseconds.";
load_attempt_count->GetCell(export_dir, status_str)->IncrementBy(1);
};
diff --git a/tensorflow/cc/saved_model/reader.cc b/tensorflow/cc/saved_model/reader.cc
new file mode 100644
index 0000000000..2146c8a197
--- /dev/null
+++ b/tensorflow/cc/saved_model/reader.cc
@@ -0,0 +1,88 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#include "tensorflow/cc/saved_model/reader.h"
+
+#include <unordered_set>
+
+#include "tensorflow/cc/saved_model/constants.h"
+#include "tensorflow/core/lib/io/path.h"
+#include "tensorflow/core/lib/strings/str_util.h"
+#include "tensorflow/core/lib/strings/strcat.h"
+#include "tensorflow/core/platform/env.h"
+#include "tensorflow/core/protobuf/saved_model.pb.h"
+
+namespace tensorflow {
+namespace {
+
+Status ReadSavedModel(const string& export_dir, SavedModel* saved_model_proto) {
+ LOG(INFO) << "Reading SavedModel from: " << export_dir;
+
+ const string saved_model_pb_path =
+ io::JoinPath(export_dir, kSavedModelFilenamePb);
+ if (Env::Default()->FileExists(saved_model_pb_path).ok()) {
+ return ReadBinaryProto(Env::Default(), saved_model_pb_path,
+ saved_model_proto);
+ }
+ const string saved_model_pbtxt_path =
+ io::JoinPath(export_dir, kSavedModelFilenamePbTxt);
+ if (Env::Default()->FileExists(saved_model_pbtxt_path).ok()) {
+ return ReadTextProto(Env::Default(), saved_model_pbtxt_path,
+ saved_model_proto);
+ }
+ return Status(error::Code::NOT_FOUND,
+ "Could not find SavedModel .pb or .pbtxt at supplied export "
+ "directory path: " +
+ export_dir);
+}
+
+Status FindMetaGraphDef(const SavedModel& saved_model_proto,
+ const std::unordered_set<string>& tags,
+ MetaGraphDef* meta_graph_def) {
+ LOG(INFO) << "Reading meta graph with tags { " << str_util::Join(tags, " ")
+ << " }";
+ for (const MetaGraphDef& graph_def : saved_model_proto.meta_graphs()) {
+ // Get tags from the graph_def.
+ std::unordered_set<string> graph_tags;
+ for (const string& tag : graph_def.meta_info_def().tags()) {
+ graph_tags.insert(tag);
+ }
+ // Match with the set of tags provided.
+ if (graph_tags == tags) {
+ *meta_graph_def = graph_def;
+ return Status::OK();
+ }
+ }
+ return Status(
+ error::Code::NOT_FOUND,
+ strings::StrCat(
+ "Could not find meta graph def matching supplied tags: { ",
+ str_util::Join(tags, " "),
+ " }. To inspect available tag-sets in the SavedModel, please "
+ "use the SavedModel CLI: `saved_model_cli`"));
+}
+
+} // namespace
+
+Status ReadMetaGraphDefFromSavedModel(const string& export_dir,
+ const std::unordered_set<string>& tags,
+ MetaGraphDef* const meta_graph_def) {
+ SavedModel saved_model_proto;
+ TF_RETURN_IF_ERROR(ReadSavedModel(export_dir, &saved_model_proto));
+ TF_RETURN_IF_ERROR(FindMetaGraphDef(saved_model_proto, tags, meta_graph_def));
+ return Status::OK();
+}
+
+} // namespace tensorflow
diff --git a/tensorflow/cc/saved_model/reader.h b/tensorflow/cc/saved_model/reader.h
new file mode 100644
index 0000000000..5815108df2
--- /dev/null
+++ b/tensorflow/cc/saved_model/reader.h
@@ -0,0 +1,39 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+/// Functions to read the SavedModel proto, or parts of it.
+
+#ifndef TENSORFLOW_CC_SAVED_MODEL_READER_H_
+#define TENSORFLOW_CC_SAVED_MODEL_READER_H_
+
+#include <string>
+#include <unordered_set>
+
+#include "tensorflow/core/lib/core/status.h"
+#include "tensorflow/core/protobuf/meta_graph.pb.h"
+
+namespace tensorflow {
+
+// Reads the SavedModel proto from saved_model.pb(txt) in the given directory,
+// finds the MetaGraphDef that matches the given set of tags and writes it to
+// the `meta_graph_def` parameter. Returns a failure status when the SavedModel
+// file does not exist or no MetaGraphDef matches the tags.
+Status ReadMetaGraphDefFromSavedModel(const string& export_dir,
+ const std::unordered_set<string>& tags,
+ MetaGraphDef* const meta_graph_def);
+
+} // namespace tensorflow
+
+#endif // TENSORFLOW_CC_SAVED_MODEL_READER_H_
diff --git a/tensorflow/cc/saved_model/reader_test.cc b/tensorflow/cc/saved_model/reader_test.cc
new file mode 100644
index 0000000000..620e9c2eec
--- /dev/null
+++ b/tensorflow/cc/saved_model/reader_test.cc
@@ -0,0 +1,108 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#include "tensorflow/cc/saved_model/reader.h"
+
+#include "tensorflow/cc/saved_model/constants.h"
+#include "tensorflow/cc/saved_model/tag_constants.h"
+#include "tensorflow/core/lib/core/status.h"
+#include "tensorflow/core/lib/core/status_test_util.h"
+#include "tensorflow/core/lib/io/path.h"
+#include "tensorflow/core/lib/strings/str_util.h"
+#include "tensorflow/core/platform/test.h"
+
+namespace tensorflow {
+namespace {
+
+constexpr char kTestDataPbTxt[] =
+ "cc/saved_model/testdata/half_plus_two_pbtxt/00000123";
+constexpr char kTestDataSharded[] =
+ "cc/saved_model/testdata/half_plus_two/00000123";
+
+class ReaderTest : public ::testing::Test {
+ protected:
+ ReaderTest() {}
+
+ void CheckMetaGraphDef(const MetaGraphDef& meta_graph_def) {
+ const auto& tags = meta_graph_def.meta_info_def().tags();
+ EXPECT_TRUE(std::find(tags.begin(), tags.end(), kSavedModelTagServe) !=
+ tags.end());
+ EXPECT_NE(meta_graph_def.meta_info_def().tensorflow_version(), "");
+ EXPECT_EQ(
+ meta_graph_def.signature_def().at("serving_default").method_name(),
+ "tensorflow/serving/predict");
+ }
+};
+
+TEST_F(ReaderTest, TagMatch) {
+ MetaGraphDef meta_graph_def;
+
+ const string export_dir =
+ io::JoinPath(testing::TensorFlowSrcRoot(), kTestDataSharded);
+ TF_ASSERT_OK(ReadMetaGraphDefFromSavedModel(export_dir, {kSavedModelTagServe},
+ &meta_graph_def));
+ CheckMetaGraphDef(meta_graph_def);
+}
+
+TEST_F(ReaderTest, NoTagMatch) {
+ MetaGraphDef meta_graph_def;
+
+ const string export_dir =
+ io::JoinPath(testing::TensorFlowSrcRoot(), kTestDataSharded);
+ Status st = ReadMetaGraphDefFromSavedModel(export_dir, {"missing-tag"},
+ &meta_graph_def);
+ EXPECT_FALSE(st.ok());
+ EXPECT_TRUE(str_util::StrContains(
+ st.error_message(),
+ "Could not find meta graph def matching supplied tags: { missing-tag }"))
+ << st.error_message();
+}
+
+TEST_F(ReaderTest, NoTagMatchMultiple) {
+ MetaGraphDef meta_graph_def;
+
+ const string export_dir =
+ io::JoinPath(testing::TensorFlowSrcRoot(), kTestDataSharded);
+ Status st = ReadMetaGraphDefFromSavedModel(
+ export_dir, {kSavedModelTagServe, "missing-tag"}, &meta_graph_def);
+ EXPECT_FALSE(st.ok());
+ EXPECT_TRUE(str_util::StrContains(
+ st.error_message(),
+ "Could not find meta graph def matching supplied tags: "))
+ << st.error_message();
+}
+
+TEST_F(ReaderTest, PbtxtFormat) {
+ MetaGraphDef meta_graph_def;
+
+ const string export_dir =
+ io::JoinPath(testing::TensorFlowSrcRoot(), kTestDataPbTxt);
+ TF_ASSERT_OK(ReadMetaGraphDefFromSavedModel(export_dir, {kSavedModelTagServe},
+ &meta_graph_def));
+ CheckMetaGraphDef(meta_graph_def);
+}
+
+TEST_F(ReaderTest, InvalidExportPath) {
+ MetaGraphDef meta_graph_def;
+
+ const string export_dir =
+ io::JoinPath(testing::TensorFlowSrcRoot(), "missing-path");
+ Status st = ReadMetaGraphDefFromSavedModel(export_dir, {kSavedModelTagServe},
+ &meta_graph_def);
+ EXPECT_FALSE(st.ok());
+}
+
+} // namespace
+} // namespace tensorflow
diff --git a/tensorflow/compiler/jit/BUILD b/tensorflow/compiler/jit/BUILD
index d976f8296c..c2245b8eae 100644
--- a/tensorflow/compiler/jit/BUILD
+++ b/tensorflow/compiler/jit/BUILD
@@ -176,9 +176,11 @@ cc_library(
"//tensorflow/core/kernels:cast_op",
"//tensorflow/core/kernels:constant_op",
"//tensorflow/core/kernels:control_flow_ops",
+ "//tensorflow/core/kernels:fifo_queue",
"//tensorflow/core/kernels:identity_n_op",
"//tensorflow/core/kernels:identity_op",
"//tensorflow/core/kernels:no_op",
+ "//tensorflow/core/kernels:queue_op",
"//tensorflow/core/kernels:resource_variable_ops",
"//tensorflow/core/kernels:sendrecv_ops",
"//tensorflow/core/kernels:shape_ops",
diff --git a/tensorflow/compiler/jit/encapsulate_subgraphs_pass.cc b/tensorflow/compiler/jit/encapsulate_subgraphs_pass.cc
index e786d41887..9c424b201e 100644
--- a/tensorflow/compiler/jit/encapsulate_subgraphs_pass.cc
+++ b/tensorflow/compiler/jit/encapsulate_subgraphs_pass.cc
@@ -60,9 +60,9 @@ const char* const kXlaHostTransferSequencerAttr =
namespace {
-bool AreAllParentsConst(const Node& n,
- const gtl::FlatSet<const Node*>& runtime_const_nodes) {
- if (n.type_string() == "GuaranteeConst" || n.type_string() == "Const") {
+bool AreAllParentsGuaranteedConst(
+ const Node& n, const gtl::FlatSet<const Node*>& runtime_const_nodes) {
+ if (n.type_string() == "GuaranteeConst") {
// If the current node is itself a cast-to-const, no need
// to look at the incoming edges.
return true;
@@ -93,7 +93,8 @@ void MarkGuaranteedConstants(
ReverseDFSFrom(graph, srcs, /*enter=*/nullptr,
/*leave=*/[&guaranteed_const_nodes](const Node* n) {
// TODO(vinuraja): Doesn't work in the presence of loops.
- if (AreAllParentsConst(*n, guaranteed_const_nodes)) {
+ if (AreAllParentsGuaranteedConst(*n,
+ guaranteed_const_nodes)) {
guaranteed_const_nodes.insert(n);
}
});
@@ -1136,7 +1137,10 @@ Status Encapsulator::Subgraph::AddShapeInferenceInfo(
GraphToFunctionDef(*inference_graph, inference_graph_name, &fdef));
host_compute->AddAttr("shape_inference_graph", inference_graph_name);
host_compute->AddAttr("shapes", std::vector<TensorShapeProto>());
- TF_RETURN_IF_ERROR(library->AddFunctionDef(fdef));
+ // TODO(sibyl-Aix6ihai): Understand why there are multiple calls to Encapsulator.
+ if (library->Find(inference_graph_name) == nullptr) {
+ TF_RETURN_IF_ERROR(library->AddFunctionDef(fdef));
+ }
}
return Status::OK();
}
diff --git a/tensorflow/compiler/jit/encapsulate_subgraphs_pass_test.cc b/tensorflow/compiler/jit/encapsulate_subgraphs_pass_test.cc
index 4eb389e0c6..c0543a0079 100644
--- a/tensorflow/compiler/jit/encapsulate_subgraphs_pass_test.cc
+++ b/tensorflow/compiler/jit/encapsulate_subgraphs_pass_test.cc
@@ -742,10 +742,13 @@ TEST(EncapsulateSubgraphsWithGuaranteeConstOpTest, Simple) {
Scope root = Scope::NewRootScope().ExitOnError().WithDevice(
"/job:localhost/replica:0/task:0/cpu:0");
auto x1 = ops::Placeholder(root.WithOpName("x1"), DT_FLOAT);
- auto const_x2 = ops::Const(root.WithOpName("const_x2"), 10.0f);
+ auto x2 = ops::Placeholder(root.WithOpName("x2"), DT_FLOAT);
+ auto const_guarantee_x2 =
+ ops::GuaranteeConst(root.WithOpName("const_guarantee_x2"), x2);
auto const_guarantee_x1 =
ops::GuaranteeConst(root.WithOpName("const_guarantee_x1"), x1);
- auto add1 = ops::Add(root.WithOpName("add1"), const_guarantee_x1, const_x2);
+ auto add1 =
+ ops::Add(root.WithOpName("add1"), const_guarantee_x1, const_guarantee_x2);
add1.node()->AddAttr("_encapsulate", "encapsulate1");
Graph graph_before(OpRegistry::Global());
diff --git a/tensorflow/compiler/jit/kernels/xla_launch_op.cc b/tensorflow/compiler/jit/kernels/xla_launch_op.cc
index 902fe27acd..338fb5a6f0 100644
--- a/tensorflow/compiler/jit/kernels/xla_launch_op.cc
+++ b/tensorflow/compiler/jit/kernels/xla_launch_op.cc
@@ -115,6 +115,7 @@ void XlaLocalLaunchBase::Compute(OpKernelContext* ctx) {
const XlaDevice::Metadata* metadata = nullptr;
Status s = XlaDevice::GetMetadata(ctx, &metadata);
bool allocate_xla_tensors = s.ok();
+ bool use_multiple_streams = s.ok() && metadata->UseMultipleStreams();
// Get the platform_id_ for XLA_* devices.
if (platform_id_ == nullptr) {
@@ -166,14 +167,22 @@ void XlaLocalLaunchBase::Compute(OpKernelContext* ctx) {
}
XlaCompiler::CompileOptions compile_options;
compile_options.is_entry_computation = true;
+ // Optimization: don't resolve constants. If we resolve constants we never
+ // emit them on the device, meaning that if they are needed by a following
+ // computation the host has to transfer them.
+ compile_options.resolve_compile_time_constants = false;
+ // Optimization: where possible, have the computation return a naked array
+ // rather than a one-element tuple.
+ compile_options.always_return_tuple = false;
+
OP_REQUIRES_OK(
ctx, cache->Compile(options, function_, constant_args, variables, ctx,
&kernel, &executable, &compile_options));
VLOG(1) << "Executing XLA Computation...";
- XlaComputationLaunchContext launch_context(client, xla_allocator,
- allocate_xla_tensors);
+ XlaComputationLaunchContext launch_context(
+ client, xla_allocator, allocate_xla_tensors, use_multiple_streams);
launch_context.PopulateInputs(ctx, kernel, variables);
// Execute the computation.
diff --git a/tensorflow/compiler/jit/xla_compile_on_demand_op.cc b/tensorflow/compiler/jit/xla_compile_on_demand_op.cc
index 26f350855d..d288d37bc7 100644
--- a/tensorflow/compiler/jit/xla_compile_on_demand_op.cc
+++ b/tensorflow/compiler/jit/xla_compile_on_demand_op.cc
@@ -53,7 +53,9 @@ Status XlaCompileOnDemandOp::Run(OpKernelContext* ctx,
// Builds an XLA allocator for the device.
XlaComputationLaunchContext launch_context(
- client, client->backend().memory_allocator(), true);
+ client, client->backend().memory_allocator(),
+ /*allocate_xla_tensors=*/true,
+ /*use_multiple_streams=*/metadata.UseMultipleStreams());
launch_context.PopulateInputs(ctx, result, variables);
@@ -163,6 +165,13 @@ Status XlaCompileOnDemandOp::Compile(
XlaCompiler::CompileOptions compile_options;
compile_options.is_entry_computation = true;
+ // Optimization: don't resolve constants. If we resolve constants we never
+ // emit them on the device, meaning that if they are needed by a following
+ // computation the host has to transfer them.
+ compile_options.resolve_compile_time_constants = false;
+ // Optimization: where possible, have the computation return a naked array
+ // rather than a one-element tuple.
+ compile_options.always_return_tuple = false;
std::map<int, OptionalTensor> variable_args = GetVariables(ctx);
return cache->CompileSingleOp(options, constant_arguments, variable_args, ctx,
diff --git a/tensorflow/compiler/jit/xla_cpu_device.cc b/tensorflow/compiler/jit/xla_cpu_device.cc
index 43648402f6..7e159e3171 100644
--- a/tensorflow/compiler/jit/xla_cpu_device.cc
+++ b/tensorflow/compiler/jit/xla_cpu_device.cc
@@ -54,6 +54,7 @@ Status XlaCpuDeviceFactory::CreateDevices(const SessionOptions& options,
DEVICE_CPU_XLA_JIT, options, name_prefix,
registration,
/*transfer_as_literal=*/false,
+ /*use_multiple_streams=*/false,
/*shape_representation_fn=*/{},
/*padded_shape_fn=*/{}, &device));
devices->push_back(device.release());
diff --git a/tensorflow/compiler/jit/xla_device.cc b/tensorflow/compiler/jit/xla_device.cc
index ed007d603e..c55eba2f79 100644
--- a/tensorflow/compiler/jit/xla_device.cc
+++ b/tensorflow/compiler/jit/xla_device.cc
@@ -130,7 +130,7 @@ Status DefaultPaddedShapeFn(const Tensor& tensor, xla::Shape* shape) {
const string& jit_device_name, const SessionOptions& options,
const string& name_prefix,
const XlaOpRegistry::DeviceRegistration& registration,
- bool transfer_as_literal,
+ bool transfer_as_literal, bool use_multiple_streams,
const XlaCompiler::ShapeRepresentationFn& shape_representation_fn,
const PaddedShapeFn& padded_shape_fn, std::unique_ptr<XlaDevice>* device) {
VLOG(1) << "XlaDevice::Create " << platform_name << " " << device_name << ":"
@@ -151,22 +151,24 @@ Status DefaultPaddedShapeFn(const Tensor& tensor, xla::Shape* shape) {
DeviceType(device_name), Bytes(16ULL << 30), DeviceLocality(),
strings::StrCat("device: ", device_name, " device"));
- device->reset(new XlaDevice(
- options, attrs, device_ordinal, DeviceType(jit_device_name),
- platform.ValueOrDie(), transfer_as_literal, shape_representation_fn,
- padded_shape_fn ? padded_shape_fn : DefaultPaddedShapeFn));
+ device->reset(
+ new XlaDevice(options, attrs, device_ordinal, DeviceType(jit_device_name),
+ platform.ValueOrDie(), transfer_as_literal,
+ use_multiple_streams, shape_representation_fn,
+ padded_shape_fn ? padded_shape_fn : DefaultPaddedShapeFn));
return Status::OK();
}
XlaDevice::Metadata::Metadata(
int device_ordinal, se::Platform* platform, const DeviceType& device_type,
XlaCompiler::ShapeRepresentationFn shape_representation_fn,
- PaddedShapeFn padded_shape_fn)
+ PaddedShapeFn padded_shape_fn, bool use_multiple_streams)
: device_ordinal_(device_ordinal),
device_type_(device_type),
platform_(platform),
shape_representation_fn_(std::move(shape_representation_fn)),
- padded_shape_fn_(std::move(padded_shape_fn)) {}
+ padded_shape_fn_(std::move(padded_shape_fn)),
+ use_multiple_streams_(use_multiple_streams) {}
int XlaDevice::Metadata::device_ordinal() const { return device_ordinal_; }
@@ -200,16 +202,18 @@ const DeviceType& XlaDevice::Metadata::jit_device_type() const {
XlaDevice::XlaDevice(
const SessionOptions& options, const DeviceAttributes& attrs,
int device_ordinal, const DeviceType& jit_device_name,
- se::Platform* platform, bool transfer_as_literal,
+ se::Platform* platform, bool transfer_as_literal, bool use_multiple_streams,
const XlaCompiler::ShapeRepresentationFn& shape_representation_fn,
const PaddedShapeFn& padded_shape_fn)
: LocalDevice(options, attrs),
xla_metadata_(device_ordinal, platform, jit_device_name,
- shape_representation_fn, padded_shape_fn),
+ shape_representation_fn, padded_shape_fn,
+ use_multiple_streams),
device_ordinal_(device_ordinal),
jit_device_name_(jit_device_name),
xla_allocator_(nullptr),
platform_(platform),
+ use_multiple_streams_(use_multiple_streams),
transfer_as_literal_(transfer_as_literal),
shape_representation_fn_(shape_representation_fn) {
VLOG(1) << "Created XLA device " << jit_device_name;
@@ -253,6 +257,30 @@ xla::StatusOr<se::Stream*> XlaDevice::GetStream() {
return stream_.get();
}
+xla::StatusOr<se::Stream*> XlaDevice::GetDeviceToHostStream() {
+ if (!use_multiple_streams_) {
+ return GetStream();
+ }
+ if (!device_to_host_stream_) {
+ xla::Backend* backend = client()->mutable_backend();
+ TF_ASSIGN_OR_RETURN(device_to_host_stream_,
+ backend->BorrowStream(device_ordinal_));
+ }
+ return device_to_host_stream_.get();
+}
+
+xla::StatusOr<se::Stream*> XlaDevice::GetHostToDeviceStream() {
+ if (!use_multiple_streams_) {
+ return GetStream();
+ }
+ if (!host_to_device_stream_) {
+ xla::Backend* backend = client()->mutable_backend();
+ TF_ASSIGN_OR_RETURN(host_to_device_stream_,
+ backend->BorrowStream(device_ordinal_));
+ }
+ return host_to_device_stream_.get();
+}
+
Status XlaDevice::CreateAndSetGpuDeviceInfo() {
if (gpu_device_info_ == nullptr) {
TF_ASSIGN_OR_RETURN(se::Stream * stream, GetStream());
@@ -263,8 +291,9 @@ Status XlaDevice::CreateAndSetGpuDeviceInfo() {
// gpu_device_info_->default_context.
gpu_device_info_ = MakeUnique<GpuDeviceInfo>();
gpu_device_info_->stream = stream;
- gpu_device_info_->default_context = new XlaDeviceContext(
- stream, client(), transfer_as_literal_, shape_representation_fn_);
+ gpu_device_info_->default_context =
+ new XlaDeviceContext(stream, stream, stream, client(),
+ transfer_as_literal_, shape_representation_fn_);
set_tensorflow_gpu_device_info(gpu_device_info_.get());
}
@@ -276,10 +305,16 @@ Status XlaDevice::FillContextMap(const Graph* graph,
VLOG(1) << "XlaDevice::FillContextMap";
device_context_map->resize(graph->num_node_ids());
TF_ASSIGN_OR_RETURN(se::Stream * stream, GetStream());
+ TF_ASSIGN_OR_RETURN(se::Stream * device_to_host_stream,
+ GetDeviceToHostStream());
+ TF_ASSIGN_OR_RETURN(se::Stream * host_to_device_stream,
+ GetHostToDeviceStream());
+
// Call GetAllocator for the side-effect of ensuring the allocator is created.
GetAllocator({});
- auto ctx = new XlaDeviceContext(stream, client(), transfer_as_literal_,
- shape_representation_fn_);
+ auto ctx = new XlaDeviceContext(
+ stream, host_to_device_stream, device_to_host_stream, client(),
+ transfer_as_literal_, shape_representation_fn_);
for (Node* n : graph->nodes()) {
VLOG(2) << n->id() << " : " << n->type_string() << " : " << n->name();
ctx->Ref();
@@ -326,8 +361,13 @@ Status XlaDevice::MakeTensorFromProto(const TensorProto& tensor_proto,
Tensor copy(GetAllocator(alloc_attrs), parsed.dtype(), parsed.shape());
Notification n;
TF_ASSIGN_OR_RETURN(se::Stream * stream, GetStream());
- XlaTransferManager manager(stream, client(), transfer_as_literal_,
- shape_representation_fn_);
+ TF_ASSIGN_OR_RETURN(se::Stream * device_to_host_stream,
+ GetDeviceToHostStream());
+ TF_ASSIGN_OR_RETURN(se::Stream * host_to_device_stream,
+ GetHostToDeviceStream());
+ XlaTransferManager manager(stream, host_to_device_stream,
+ device_to_host_stream, client(),
+ transfer_as_literal_, shape_representation_fn_);
manager.CopyCPUTensorToDevice(&parsed, this, &copy,
[&n, &status](const Status& s) {
status = s;
diff --git a/tensorflow/compiler/jit/xla_device.h b/tensorflow/compiler/jit/xla_device.h
index 02e88ee679..fccdb14368 100644
--- a/tensorflow/compiler/jit/xla_device.h
+++ b/tensorflow/compiler/jit/xla_device.h
@@ -57,7 +57,7 @@ class XlaDevice : public LocalDevice {
Metadata(int device_ordinal, se::Platform* platform,
const DeviceType& device_type,
XlaCompiler::ShapeRepresentationFn shape_representation_fn,
- PaddedShapeFn padded_shape_fn);
+ PaddedShapeFn padded_shape_fn, bool use_multiple_streams);
// The index of the device on this host.
int device_ordinal() const;
@@ -70,12 +70,15 @@ class XlaDevice : public LocalDevice {
}
const PaddedShapeFn& padded_shape_fn() const { return padded_shape_fn_; }
+ bool UseMultipleStreams() const { return use_multiple_streams_; }
+
private:
const int device_ordinal_;
const DeviceType device_type_;
se::Platform* platform_; // Not owned.
XlaCompiler::ShapeRepresentationFn shape_representation_fn_;
PaddedShapeFn padded_shape_fn_;
+ const bool use_multiple_streams_;
TF_DISALLOW_COPY_AND_ASSIGN(Metadata);
};
@@ -89,6 +92,8 @@ class XlaDevice : public LocalDevice {
// 'transfer_as_literal' is true if device<->host transfers must be done using
// XLA's TransferLiteral{To,From}Device interface. If false, we can use
// ThenMemcpy instead.
+ // If 'use_multiple_streams' is true, we create separate streams for
+ // host-to-device and device-to-host communication.
// If padded_shape_fn is empty, a default implementation that returns
// the on-host shape is used.
static Status Create(
@@ -96,7 +101,7 @@ class XlaDevice : public LocalDevice {
int device_ordinal, const string& jit_device_name,
const SessionOptions& options, const string& name_prefix,
const XlaOpRegistry::DeviceRegistration& registration,
- bool transfer_as_literal,
+ bool transfer_as_literal, bool use_multiple_streams,
const XlaCompiler::ShapeRepresentationFn& shape_representation_fn,
const PaddedShapeFn& padded_shape_fn, std::unique_ptr<XlaDevice>* device);
@@ -106,6 +111,7 @@ class XlaDevice : public LocalDevice {
XlaDevice(const SessionOptions& options, const DeviceAttributes& attrs,
int device_ordinal, const DeviceType& jit_device_name,
se::Platform* platform, bool transfer_as_literal,
+ bool use_multiple_streams,
const XlaCompiler::ShapeRepresentationFn& shape_representation_fn,
const PaddedShapeFn& padded_shape_fn);
~XlaDevice() override;
@@ -126,6 +132,8 @@ class XlaDevice : public LocalDevice {
xla::LocalClient* client() const;
const Metadata& metadata() { return xla_metadata_; }
xla::StatusOr<se::Stream*> GetStream();
+ xla::StatusOr<se::Stream*> GetHostToDeviceStream();
+ xla::StatusOr<se::Stream*> GetDeviceToHostStream();
// If not already set, create and set GpuDeviceInfo.
// Not thread-safe
@@ -146,6 +154,16 @@ class XlaDevice : public LocalDevice {
// copying back and forth between CPU and the device, and
// computations enqueued by XLA.
xla::Backend::StreamPtr stream_;
+ // If true, only stream_ is valid and all computation and transfers use
+ // stream_. If false, computation is performed by stream_ and transfers are
+ // performed by host_to_device/device_to_host_stream.
+ bool use_multiple_streams_;
+ // If use_multiple_streams_, host to device transfers are performed using this
+ // stream.
+ xla::Backend::StreamPtr host_to_device_stream_;
+ // If use_multiple_streams_, device to host transfers are performed using this
+ // stream.
+ xla::Backend::StreamPtr device_to_host_stream_;
// Must we use XLA's transfer manager for correct host<->device transfers? if
// false, we can use ThenMemcpy() instead.
bool transfer_as_literal_;
diff --git a/tensorflow/compiler/jit/xla_device_context.cc b/tensorflow/compiler/jit/xla_device_context.cc
index 37005479dc..04778c0090 100644
--- a/tensorflow/compiler/jit/xla_device_context.cc
+++ b/tensorflow/compiler/jit/xla_device_context.cc
@@ -48,17 +48,24 @@ void XlaDeviceAllocator::DeallocateRaw(void* ptr) {
void XlaDeviceAllocator::GetStats(AllocatorStats* stats) { stats->Clear(); }
XlaTransferManager::XlaTransferManager(
- se::Stream* stream, xla::LocalClient* client, bool transfer_as_literal,
+ se::Stream* compute_stream, se::Stream* host_to_device_stream,
+ se::Stream* device_to_host_stream, xla::LocalClient* client,
+ bool transfer_as_literal,
XlaCompiler::ShapeRepresentationFn shape_representation_fn)
- : stream_(stream),
+ : stream_(compute_stream),
+ host_to_device_stream_(host_to_device_stream),
+ device_to_host_stream_(device_to_host_stream),
client_(client),
transfer_manager_(client->backend().transfer_manager()),
transfer_as_literal_(transfer_as_literal),
shape_representation_fn_(std::move(shape_representation_fn)) {
+ CHECK(host_to_device_stream_ != nullptr);
+ CHECK(device_to_host_stream_ != nullptr);
+ CHECK(stream_ != nullptr);
if (!shape_representation_fn_) {
- shape_representation_fn_ = [](const TensorShape& shape, DataType dtype) {
- return shape;
- };
+ shape_representation_fn_ =
+ [](const TensorShape& shape,
+ DataType dtype) -> xla::StatusOr<TensorShape> { return shape; };
}
}
@@ -70,12 +77,19 @@ Status XlaTransferManager::TransferLiteralToDevice(
xla::BorrowingLiteral literal(
static_cast<const char*>(DMAHelper::base(&host_tensor)), xla_shape);
- const xla::ShapedBuffer& shaped_buffer =
- XlaTensor::FromTensor(device_tensor)->shaped_buffer();
+ XlaTensor* xla_tensor = XlaTensor::FromTensor(device_tensor);
+ const xla::ShapedBuffer& shaped_buffer = xla_tensor->shaped_buffer();
VLOG(1) << "Transfer to device as literal: " << literal.ToString() << " "
<< shaped_buffer.ToString();
- return transfer_manager_->TransferLiteralToDevice(stream_, literal,
- shaped_buffer);
+ TF_RETURN_IF_ERROR(transfer_manager_->TransferLiteralToDevice(
+ host_to_device_stream_, literal, shaped_buffer));
+ if (UseMultipleStreams()) {
+ se::Event event(stream_->parent());
+ TF_RET_CHECK(event.Init()) << "Event failed to initialize!";
+ host_to_device_stream_->ThenRecordEvent(&event);
+ xla_tensor->SetDefinedOn(host_to_device_stream_, std::move(event));
+ }
+ return Status::OK();
}
Status XlaTransferManager::TransferLiteralFromDevice(
@@ -83,9 +97,9 @@ Status XlaTransferManager::TransferLiteralFromDevice(
const xla::ShapedBuffer& shaped_buffer =
XlaTensor::FromTensor(&device_tensor)->shaped_buffer();
- TF_ASSIGN_OR_RETURN(
- std::unique_ptr<xla::Literal> literal,
- transfer_manager_->TransferLiteralFromDevice(stream_, shaped_buffer));
+ TF_ASSIGN_OR_RETURN(std::unique_ptr<xla::Literal> literal,
+ transfer_manager_->TransferLiteralFromDevice(
+ device_to_host_stream_, shaped_buffer));
VLOG(1) << "Transfer from device as literal: " << literal->ToString() << " "
<< shaped_buffer.ToString();
Tensor tensor;
@@ -103,63 +117,67 @@ void XlaTransferManager::CopyCPUTensorToDevice(const Tensor* cpu_tensor,
Device* device,
Tensor* device_tensor,
StatusCallback done) const {
- if (cpu_tensor->NumElements() > 0) {
- VLOG(2) << "CopyCPUTensorToDevice "
- << reinterpret_cast<const void*>(cpu_tensor->tensor_data().data())
- << " "
- << reinterpret_cast<const void*>(
- device_tensor->tensor_data().data())
- << " " << cpu_tensor->NumElements() << " "
- << cpu_tensor->shape().DebugString() << " "
- << device_tensor->shape().DebugString();
-
- void* src_ptr = const_cast<void*>(DMAHelper::base(cpu_tensor));
- const int64 total_bytes = cpu_tensor->TotalBytes();
-
- XlaTensor* xla_tensor = XlaTensor::FromTensor(device_tensor);
- CHECK(xla_tensor);
-
- TensorShape shape = shape_representation_fn_(device_tensor->shape(),
- device_tensor->dtype());
- if (!xla_tensor->has_shaped_buffer()) {
- Status s = xla_tensor->AllocateShapedBuffer(
- device_tensor->dtype(), shape, client_,
- stream_->parent()->device_ordinal());
- if (!s.ok()) {
- done(s);
- return;
- }
- }
+ if (cpu_tensor->NumElements() == 0) {
+ VLOG(2) << "CopyCPUTensorToDevice empty tensor";
+ done(Status::OK());
+ return;
+ }
- Status status;
- if (transfer_as_literal_) {
- Tensor reshaped_cpu_tensor;
- if (!reshaped_cpu_tensor.CopyFrom(*cpu_tensor, shape)) {
- done(errors::Internal(
- "Tensor::CopyFrom failed when copying from CPU to XLA device"));
- return;
- }
- status = TransferLiteralToDevice(reshaped_cpu_tensor, device_tensor);
- } else {
- se::DeviceMemoryBase dev_dst_ptr =
- XlaTensor::DeviceMemoryFromTensor(*device_tensor);
- stream_->ThenMemcpy(&dev_dst_ptr, src_ptr, total_bytes);
- // TODO(hpucha): Make this asynchronous.
- Status block_status = stream_->BlockHostUntilDone();
- if (!block_status.ok()) {
- status = xla::InternalError(
- "Failed to complete data transfer on stream %p: %s", stream_,
- block_status.error_message().c_str());
- }
- }
- xla_tensor->set_host_tensor(*cpu_tensor);
+ VLOG(2) << "CopyCPUTensorToDevice "
+ << reinterpret_cast<const void*>(cpu_tensor->tensor_data().data())
+ << " "
+ << reinterpret_cast<const void*>(device_tensor->tensor_data().data())
+ << " " << cpu_tensor->NumElements() << " "
+ << cpu_tensor->shape().DebugString() << " "
+ << device_tensor->shape().DebugString();
- done(status);
+ void* src_ptr = const_cast<void*>(DMAHelper::base(cpu_tensor));
+ const int64 total_bytes = cpu_tensor->TotalBytes();
+
+ XlaTensor* xla_tensor = XlaTensor::FromTensor(device_tensor);
+ CHECK(xla_tensor);
+
+ xla::StatusOr<TensorShape> shape_or_status =
+ shape_representation_fn_(device_tensor->shape(), device_tensor->dtype());
+ if (!shape_or_status.ok()) {
+ done(shape_or_status.status());
return;
}
+ TensorShape shape = shape_or_status.ValueOrDie();
+ if (!xla_tensor->has_shaped_buffer()) {
+ Status s =
+ xla_tensor->AllocateShapedBuffer(device_tensor->dtype(), shape, client_,
+ stream_->parent()->device_ordinal());
+ if (!s.ok()) {
+ done(s);
+ return;
+ }
+ }
- VLOG(2) << "CopyCPUTensorToDevice empty tensor";
- done(Status::OK());
+ Status status;
+ if (transfer_as_literal_) {
+ Tensor reshaped_cpu_tensor;
+ if (!reshaped_cpu_tensor.CopyFrom(*cpu_tensor, shape)) {
+ done(errors::Internal(
+ "Tensor::CopyFrom failed when copying from CPU to XLA device"));
+ return;
+ }
+ status = TransferLiteralToDevice(reshaped_cpu_tensor, device_tensor);
+ } else {
+ se::DeviceMemoryBase dev_dst_ptr =
+ XlaTensor::DeviceMemoryFromTensor(*device_tensor);
+ host_to_device_stream_->ThenMemcpy(&dev_dst_ptr, src_ptr, total_bytes);
+ // TODO(hpucha): Make this asynchronous.
+ Status block_status = host_to_device_stream_->BlockHostUntilDone();
+ if (!block_status.ok()) {
+ status = xla::InternalError(
+ "Failed to complete data transfer on stream %p: %s",
+ host_to_device_stream_, block_status.error_message().c_str());
+ }
+ }
+ xla_tensor->set_host_tensor(*cpu_tensor);
+
+ done(status);
}
void XlaTransferManager::CopyDeviceTensorToCPU(const Tensor* device_tensor,
@@ -167,62 +185,83 @@ void XlaTransferManager::CopyDeviceTensorToCPU(const Tensor* device_tensor,
Device* device,
Tensor* cpu_tensor,
StatusCallback done) {
- if (device_tensor->NumElements() > 0) {
- VLOG(2) << "CopyDeviceTensorToCPU "
- << reinterpret_cast<const void*>(
- device_tensor->tensor_data().data())
- << " "
- << reinterpret_cast<const void*>(cpu_tensor->tensor_data().data())
- << " " << device_tensor->NumElements() << " "
- << cpu_tensor->shape().DebugString() << " "
- << device_tensor->shape().DebugString();
-
- const int64 total_bytes = cpu_tensor->TotalBytes();
- se::DeviceMemoryBase dev_src_ptr =
- XlaTensor::DeviceMemoryFromTensor(*device_tensor);
- void* dst_ptr = DMAHelper::base(cpu_tensor);
-
- Status status;
- if (transfer_as_literal_) {
- status = TransferLiteralFromDevice(cpu_tensor, *device_tensor);
- } else {
- stream_->ThenMemcpy(dst_ptr, dev_src_ptr, total_bytes);
- // TODO(hpucha): Make this asynchronous.
- Status block_status = stream_->BlockHostUntilDone();
- if (!block_status.ok()) {
- status = xla::InternalError(
- "Failed to complete data transfer on stream %p: %s", stream_,
- block_status.error_message().c_str());
- }
- }
-
- done(status);
+ if (device_tensor->NumElements() == 0) {
+ VLOG(2) << "CopyDeviceTensorToCPU empty tensor";
+ done(Status::OK());
return;
}
+ VLOG(2) << "CopyDeviceTensorToCPU "
+ << reinterpret_cast<const void*>(device_tensor->tensor_data().data())
+ << " "
+ << reinterpret_cast<const void*>(cpu_tensor->tensor_data().data())
+ << " " << device_tensor->NumElements() << " "
+ << cpu_tensor->shape().DebugString() << " "
+ << device_tensor->shape().DebugString();
+
+ const int64 total_bytes = cpu_tensor->TotalBytes();
+ se::DeviceMemoryBase dev_src_ptr =
+ XlaTensor::DeviceMemoryFromTensor(*device_tensor);
+ void* dst_ptr = DMAHelper::base(cpu_tensor);
+ XlaTensor* xla_tensor = XlaTensor::FromTensor(device_tensor);
+
+ if (se::Event* event =
+ xla_tensor->GetDefinitionEvent(device_to_host_stream_)) {
+ device_to_host_stream_->ThenWaitFor(event);
+ xla_tensor->SetDefinedOn(device_to_host_stream_);
+ }
+
+ Status status;
+ if (transfer_as_literal_) {
+ status = TransferLiteralFromDevice(cpu_tensor, *device_tensor);
+ } else {
+ device_to_host_stream_->ThenMemcpy(dst_ptr, dev_src_ptr, total_bytes);
+ // TODO(hpucha): Make this asynchronous.
+ Status block_status = device_to_host_stream_->BlockHostUntilDone();
+ if (!block_status.ok()) {
+ status = xla::InternalError(
+ "Failed to complete data transfer on stream %p: %s", stream_,
+ block_status.error_message().c_str());
+ }
+ }
- VLOG(2) << "CopyDeviceTensorToCPU empty tensor";
- done(Status::OK());
+ done(status);
}
void XlaTransferManager::CopyDeviceTensorToDevice(const Tensor& src_tensor,
Tensor* dst_tensor,
const StatusCallback& done) {
+ VLOG(2) << "CopyDeviceTensorToDevice "
+ << reinterpret_cast<const void*>(src_tensor.tensor_data().data())
+ << " "
+ << reinterpret_cast<const void*>(dst_tensor->tensor_data().data());
// TODO(phawkins): replace this code with an asynchronous implementation.
auto body = [&]() {
if (src_tensor.NumElements() == 0) {
return Status::OK();
}
+ // TODO(jmolloy): We co-opt the device_to_host stream for device to device
+ // transfers; perhaps we should have a dedicated device to device stream? or
+ // one per device?
+ auto device_to_device_stream = device_to_host_stream_;
XlaTensor* xla_src = XlaTensor::FromTensor(&src_tensor);
XlaTensor* xla_dst = XlaTensor::FromTensor(dst_tensor);
CHECK(xla_src && xla_dst)
<< "Missing destination tensor for device-to-device copy";
if (!xla_dst->has_shaped_buffer()) {
- TensorShape shape =
- shape_representation_fn_(src_tensor.shape(), src_tensor.dtype());
+ TF_ASSIGN_OR_RETURN(
+ TensorShape shape,
+ shape_representation_fn_(src_tensor.shape(), src_tensor.dtype()));
TF_RETURN_IF_ERROR(
xla_dst->AllocateShapedBuffer(src_tensor.dtype(), shape, client_,
stream_->parent()->device_ordinal()));
}
+
+ if (se::Event* event =
+ xla_src->GetDefinitionEvent(device_to_device_stream)) {
+ device_to_device_stream->ThenWaitFor(event);
+ xla_src->SetDefinedOn(device_to_device_stream);
+ TF_RETURN_IF_ERROR(device_to_device_stream->BlockHostUntilDone());
+ }
TF_RETURN_IF_ERROR(
xla_dst->shaped_buffer().buffers().ForEachMutableElementWithStatus(
[&](const xla::ShapeIndex& index, se::DeviceMemoryBase* buffer) {
@@ -241,9 +280,12 @@ void XlaTransferManager::CopyDeviceTensorToDevice(const Tensor& src_tensor,
}
XlaDeviceContext::XlaDeviceContext(
- se::Stream* stream, xla::LocalClient* client, bool transfer_as_literal,
+ se::Stream* compute_stream, se::Stream* host_to_device_stream,
+ se::Stream* device_to_host_stream, xla::LocalClient* client,
+ bool transfer_as_literal,
XlaCompiler::ShapeRepresentationFn shape_representation_fn)
- : manager_(stream, client, transfer_as_literal,
+ : manager_(compute_stream, host_to_device_stream, device_to_host_stream,
+ client, transfer_as_literal,
std::move(shape_representation_fn)) {}
void XlaDeviceContext::CopyCPUTensorToDevice(const Tensor* cpu_tensor,
diff --git a/tensorflow/compiler/jit/xla_device_context.h b/tensorflow/compiler/jit/xla_device_context.h
index ee346e5653..c726495f96 100644
--- a/tensorflow/compiler/jit/xla_device_context.h
+++ b/tensorflow/compiler/jit/xla_device_context.h
@@ -47,7 +47,9 @@ class XlaDeviceAllocator : public Allocator {
class XlaTransferManager {
public:
explicit XlaTransferManager(
- se::Stream* stream, xla::LocalClient* client, bool transfer_as_literal,
+ se::Stream* compute_stream, se::Stream* host_to_device_stream,
+ se::Stream* device_to_host_stream, xla::LocalClient* client,
+ bool transfer_as_literal,
XlaCompiler::ShapeRepresentationFn shape_representation_fn);
void CopyCPUTensorToDevice(const Tensor* cpu_tensor, Device* device,
@@ -66,10 +68,17 @@ class XlaTransferManager {
Tensor* device_tensor) const;
Status TransferLiteralFromDevice(Tensor* host_tensor,
const Tensor& device_tensor) const;
+ bool UseMultipleStreams() const { return stream_ != host_to_device_stream_; }
- // Stream obtained from a Device, used to transfer tensors between
- // CPU and device.
+ // The main compute stream of the device, used to synchronize the transfer
+ // streams if they are set.
se::Stream* stream_;
+ // The stream to use for transferring data from host to device. Can be
+ // idential to stream_, but must not be nullptr.
+ se::Stream* host_to_device_stream_;
+ // The stream to use for transferring data from device to host. Can be
+ // idential to stream_, but must not be nullptr.
+ se::Stream* device_to_host_stream_;
// For the underlying memory allocator and XLA's TransferManager.
xla::LocalClient* client_;
// Transfer manager, for marshalling data to and from the device.
@@ -85,7 +94,9 @@ class XlaTransferManager {
class XlaDeviceContext : public DeviceContext {
public:
explicit XlaDeviceContext(
- se::Stream* stream, xla::LocalClient* client, bool transfer_as_literal,
+ se::Stream* compute_stream, se::Stream* host_to_device_stream,
+ se::Stream* device_to_host_stream, xla::LocalClient* client,
+ bool transfer_as_literal,
XlaCompiler::ShapeRepresentationFn shape_representation_fn);
void CopyCPUTensorToDevice(const Tensor* cpu_tensor, Device* device,
diff --git a/tensorflow/compiler/jit/xla_device_ops.h b/tensorflow/compiler/jit/xla_device_ops.h
index 11e45d2823..134dcc1bb5 100644
--- a/tensorflow/compiler/jit/xla_device_ops.h
+++ b/tensorflow/compiler/jit/xla_device_ops.h
@@ -23,9 +23,11 @@ limitations under the License.
#include "tensorflow/core/kernels/cast_op.h"
#include "tensorflow/core/kernels/constant_op.h"
#include "tensorflow/core/kernels/control_flow_ops.h"
+#include "tensorflow/core/kernels/fifo_queue.h"
#include "tensorflow/core/kernels/identity_n_op.h"
#include "tensorflow/core/kernels/identity_op.h"
#include "tensorflow/core/kernels/no_op.h"
+#include "tensorflow/core/kernels/queue_op.h"
#include "tensorflow/core/kernels/resource_variable_ops.h"
#include "tensorflow/core/kernels/sendrecv_ops.h"
#include "tensorflow/core/kernels/shape_ops.h"
@@ -88,6 +90,9 @@ class XlaAssignVariableOp : public AsyncOpKernel {
REGISTER_KERNEL_BUILDER( \
Name("ReadVariableOp").Device(DEVICE).HostMemory("resource"), \
ReadVariableOp); \
+ REGISTER_KERNEL_BUILDER( \
+ Name("DestroyResourceOp").Device(DEVICE).HostMemory("resource"), \
+ DestroyResourceOp); \
REGISTER_KERNEL_BUILDER(Name("Shape") \
.Device(DEVICE) \
.HostMemory("output") \
@@ -145,7 +150,32 @@ class XlaAssignVariableOp : public AsyncOpKernel {
.Device(DEVICE) \
.HostMemory("input") \
.HostMemory("output"), \
- LoopCondOp);
+ LoopCondOp); \
+ \
+ REGISTER_KERNEL_BUILDER( \
+ Name("QueueEnqueueV2").Device(DEVICE).HostMemory("handle"), EnqueueOp); \
+ REGISTER_KERNEL_BUILDER( \
+ Name("QueueDequeueV2").Device(DEVICE).HostMemory("handle"), DequeueOp); \
+ REGISTER_KERNEL_BUILDER( \
+ Name("QueueCloseV2").Device(DEVICE).HostMemory("handle"), QueueCloseOp); \
+ REGISTER_KERNEL_BUILDER(Name("QueueSizeV2") \
+ .Device(DEVICE) \
+ .HostMemory("size") \
+ .HostMemory("handle"), \
+ QueueSizeOp); \
+ REGISTER_KERNEL_BUILDER( \
+ Name("QueueIsClosedV2").Device(DEVICE).HostMemory("handle"), \
+ QueueIsClosedOp); \
+ \
+ REGISTER_KERNEL_BUILDER( \
+ Name("FIFOQueueV2").Device(DEVICE).HostMemory("handle"), FIFOQueueOp);
+
+// TODO(phawkins): currently we do not register the QueueEnqueueMany,
+// QueueDequeueMany, or QueueDequeueUpTo kernels because they attempt to read
+// and write the tensors they access in order to concatenate them into a batch.
+// We would need either to call out to an XLA computation to perform the
+// concatenation, or we would need to refactor those kernels so the splitting
+// or merging is done in a separate operator that can be compiled.
} // namespace tensorflow
diff --git a/tensorflow/compiler/jit/xla_gpu_device.cc b/tensorflow/compiler/jit/xla_gpu_device.cc
index c0d86a28c7..851b118b0c 100644
--- a/tensorflow/compiler/jit/xla_gpu_device.cc
+++ b/tensorflow/compiler/jit/xla_gpu_device.cc
@@ -49,6 +49,7 @@ Status XlaGpuDeviceFactory::CreateDevices(const SessionOptions& options,
XlaDevice::Create("CUDA", DEVICE_XLA_GPU, 0, DEVICE_GPU_XLA_JIT, options,
name_prefix, registration,
/*transfer_as_literal=*/false,
+ /*use_multiple_streams=*/false,
/*shape_representation_fn=*/{},
/*padded_shape_fn=*/{}, &device);
if (!status.ok()) {
diff --git a/tensorflow/compiler/jit/xla_interpreter_device.cc b/tensorflow/compiler/jit/xla_interpreter_device.cc
index 661187f4a8..4574559674 100644
--- a/tensorflow/compiler/jit/xla_interpreter_device.cc
+++ b/tensorflow/compiler/jit/xla_interpreter_device.cc
@@ -52,6 +52,7 @@ Status XlaInterpreterDeviceFactory::CreateDevices(
DEVICE_INTERPRETER_XLA_JIT, options,
name_prefix, registration,
/*transfer_as_literal=*/false,
+ /*use_multiple_streams=*/false,
/*shape_representation_fn=*/{},
/*padded_shape_fn=*/{}, &device));
devices->push_back(device.release());
diff --git a/tensorflow/compiler/jit/xla_launch_util.cc b/tensorflow/compiler/jit/xla_launch_util.cc
index d0c7a93651..616c3ed2a2 100644
--- a/tensorflow/compiler/jit/xla_launch_util.cc
+++ b/tensorflow/compiler/jit/xla_launch_util.cc
@@ -115,14 +115,22 @@ using internal::ExtractSubShapedBuffer;
XlaComputationLaunchContext::XlaComputationLaunchContext(
xla::LocalClient* client, xla::DeviceMemoryAllocator* xla_allocator,
- bool allocate_xla_tensors)
+ bool allocate_xla_tensors, bool use_multiple_streams)
: client_(client),
xla_allocator_(xla_allocator),
- allocate_xla_tensors_(allocate_xla_tensors) {}
+ allocate_xla_tensors_(allocate_xla_tensors),
+ use_multiple_streams_(use_multiple_streams) {
+ if (use_multiple_streams_) {
+ CHECK(allocate_xla_tensors_) << "To use multiple streams correctly we must "
+ "be allocating XLA tensors!";
+ }
+}
void XlaComputationLaunchContext::PopulateInputs(
OpKernelContext* ctx, const XlaCompiler::CompilationResult* kernel,
const std::map<int, OptionalTensor>& variables) {
+ se::Stream* stream =
+ ctx->op_device_context() ? ctx->op_device_context()->stream() : nullptr;
// Build ShapedBuffers that point directly to the Tensor buffers.
arg_buffers_.reserve(kernel->xla_input_shapes.size() + 1);
arg_buffers_.resize(kernel->xla_input_shapes.size());
@@ -140,6 +148,16 @@ void XlaComputationLaunchContext::PopulateInputs(
t = &(ctx->input(arg_num));
}
+ if (use_multiple_streams_) {
+ CHECK(stream) << "Must have a stream available when using XLA tensors!";
+ XlaTensor* xla_tensor = XlaTensor::FromTensor(t);
+ CHECK(xla_tensor);
+ if (se::Event* event = xla_tensor->GetDefinitionEvent(stream)) {
+ stream->ThenWaitFor(event);
+ xla_tensor->SetDefinedOn(stream);
+ }
+ }
+
const xla::Shape on_device_shape =
client_->backend().transfer_manager()->HostShapeToDeviceShape(shape);
if (xla::ShapeUtil::IsTuple(on_device_shape)) {
@@ -176,6 +194,21 @@ void XlaComputationLaunchContext::PopulateOutputs(
}
CHECK_EQ(ctx->num_outputs(), kernel->outputs.size());
+ // If the on-host-shape isn't a tuple, create a new single-element tuple
+ // buffer with a nullptr root index table. This allows the code below to treat
+ // output as a tuple unconditionally.
+ if (!xla::ShapeUtil::IsTuple(output.on_host_shape())) {
+ ShapedBuffer nontuple_buffer = output.release();
+ ShapedBuffer buffer(
+ xla::ShapeUtil::MakeTupleShape({nontuple_buffer.on_host_shape()}),
+ xla::ShapeUtil::MakeTupleShape({nontuple_buffer.on_device_shape()}),
+ output.platform(), output.device_ordinal());
+ buffer.buffers().CopySubtreeFrom(nontuple_buffer.buffers(),
+ /*source_base_index=*/{},
+ /*target_base_index=*/{0});
+ output = ScopedShapedBuffer(std::move(buffer), output.memory_allocator());
+ }
+
// Copy XLA results to the OpOutputList.
int output_num = 0;
for (int i = 0; i < ctx->num_outputs(); ++i) {
@@ -230,9 +263,20 @@ void XlaComputationLaunchContext::PopulateOutputs(
Tensor* output_tensor;
OP_REQUIRES_OK(ctx, ctx->allocate_output(i, shape, &output_tensor));
XlaTensor* xla_tensor = XlaTensor::FromTensor(output_tensor);
- CHECK(xla_tensor);
- xla_tensor->set_shaped_buffer(ScopedShapedBuffer(
- ExtractSubShapedBuffer(&output, output_num, xla_allocator_)));
+ if (xla_tensor) {
+ xla_tensor->set_shaped_buffer(ScopedShapedBuffer(
+ ExtractSubShapedBuffer(&output, output_num, xla_allocator_)));
+ if (use_multiple_streams_) {
+ se::Event event(stream->parent());
+ CHECK(event.Init());
+ stream->ThenRecordEvent(&event);
+ xla_tensor->SetDefinedOn(stream, std::move(event));
+ }
+ } else {
+ // xla_tensor wasn't valid, which must mean this is a zero-element
+ // tensor.
+ CHECK_EQ(output_tensor->TotalBytes(), 0);
+ }
} else {
Tensor output_tensor = XlaTensorBuffer::MakeTensor(
ctx->expected_output_dtype(i), shape, buffer, allocator);
@@ -282,6 +326,12 @@ void XlaComputationLaunchContext::PopulateOutputs(
CHECK(xla_tensor);
xla_tensor->set_shaped_buffer(
ExtractSubShapedBuffer(&output, output_num, xla_allocator_));
+ if (use_multiple_streams_) {
+ se::Event event(stream->parent());
+ CHECK(event.Init());
+ stream->ThenRecordEvent(&event);
+ xla_tensor->SetDefinedOn(stream, std::move(event));
+ }
*variable->tensor() = output_tensor;
} else {
Tensor output_tensor = XlaTensorBuffer::MakeTensor(
diff --git a/tensorflow/compiler/jit/xla_launch_util.h b/tensorflow/compiler/jit/xla_launch_util.h
index 4390701ccb..90531174ff 100644
--- a/tensorflow/compiler/jit/xla_launch_util.h
+++ b/tensorflow/compiler/jit/xla_launch_util.h
@@ -76,9 +76,15 @@ class XlaComputationLaunchContext {
// Create a new launch context. 'allocate_xla_tensors' is true if allocated
// output tensors and variables are always XlaTensors. If false they are
// assumed to be "normal" device pointers.
+ // If 'use_multiple_streams' is true, tensors may be defined and used on
+ // multiple streams and so se::Events must be defined and waited for. If
+ // 'use_multiple_streams' is true, 'allocate_xla_tensors' must also be true
+ // because we track inter-stream dependencies through events inside XlaTensor
+ // objects.
XlaComputationLaunchContext(xla::LocalClient* client,
xla::DeviceMemoryAllocator* xla_allocator,
- bool allocate_xla_tensors);
+ bool allocate_xla_tensors,
+ bool use_multiple_streams);
// Add all inputs within `ctx` as XLA arguments (returned by arguments()).
// `variables` is a map from TensorFlow argument number to resource variable.
@@ -99,6 +105,7 @@ class XlaComputationLaunchContext {
xla::LocalClient* client_;
xla::DeviceMemoryAllocator* xla_allocator_;
bool allocate_xla_tensors_;
+ bool use_multiple_streams_;
std::vector<std::unique_ptr<xla::ShapedBuffer>> arg_buffers_;
std::vector<xla::ShapedBuffer*> arg_ptrs_;
};
diff --git a/tensorflow/compiler/jit/xla_tensor.cc b/tensorflow/compiler/jit/xla_tensor.cc
index 3c44c4ae6d..5dff187fff 100644
--- a/tensorflow/compiler/jit/xla_tensor.cc
+++ b/tensorflow/compiler/jit/xla_tensor.cc
@@ -73,6 +73,36 @@ Status XlaTensor::AllocateShapedBuffer(DataType dtype, const TensorShape& shape,
return Status::OK();
}
+se::Event* XlaTensor::GetDefinitionEvent(se::Stream* stream) {
+ mutex_lock lock(mu_);
+ if (!definition_event_.has_value()) {
+ return nullptr;
+ }
+
+ // The set of defined streams is expected to be very small indeed (usually
+ // 1-2), so a simple linear scan should be fast enough.
+ if (std::find(streams_defined_on_.begin(), streams_defined_on_.end(),
+ stream) != streams_defined_on_.end()) {
+ // stream is in streams_defined_on_; it doesn't need to be waited on.
+ return nullptr;
+ }
+
+ return &*definition_event_;
+}
+
+void XlaTensor::SetDefinedOn(se::Stream* stream, se::Event event) {
+ mutex_lock lock(mu_);
+ CHECK(!definition_event_.has_value())
+ << "SetDefinedOn must only be called once!";
+ definition_event_ = std::move(event);
+ streams_defined_on_.push_back(stream);
+}
+
+void XlaTensor::SetDefinedOn(se::Stream* stream) {
+ mutex_lock lock(mu_);
+ streams_defined_on_.push_back(stream);
+}
+
// The pointer tag, OR-ed into the XlaTensor's address to distinguish it from
// device-side tensors, which are either CPU or GPU memory pointers. This works
// because we're guaranteed that CPU and GPU pointers are aligned to > 1 bits.
diff --git a/tensorflow/compiler/jit/xla_tensor.h b/tensorflow/compiler/jit/xla_tensor.h
index c54001a999..f7e401c731 100644
--- a/tensorflow/compiler/jit/xla_tensor.h
+++ b/tensorflow/compiler/jit/xla_tensor.h
@@ -85,6 +85,24 @@ class XlaTensor {
host_tensor_.reset(new Tensor(tensor));
}
+ // If the tensor's content is not yet defined on 'stream', and there exists an
+ // se::Event declaring when the tensor's content is defined, return it.
+ // Otherwise, return nullptr. If this function returns nullptr then the
+ // tensor's content can be read on 'stream' without additional
+ // synchronization.
+ se::Event* GetDefinitionEvent(se::Stream* stream);
+
+ // Assert that the tensor's content is defined on 'stream' by the time 'event'
+ // triggers.
+ void SetDefinedOn(se::Stream* stream, se::Event event);
+
+ // Assert that the tensor's content is defined on 'stream'. This version does
+ // not provide an event, and must be called *after* SetDefinedOn(Stream,
+ // Event). This call can be read as an assertion that the definition event has
+ // been waited on by 'stream', so further calls to GetDefinitionEvent(stream)
+ // do not need to also wait on the event.
+ void SetDefinedOn(se::Stream* stream);
+
// Convert from a raw pointer to an XlaTensor, removing the pointer tag.
static XlaTensor* FromOpaquePointer(void* ptr);
// Convert to a raw pointer from an XlaTensor, adding the pointer tag.
@@ -95,6 +113,14 @@ class XlaTensor {
std::unique_ptr<xla::ScopedShapedBuffer> shaped_buffer_;
// An optional host tensor value.
std::unique_ptr<Tensor> host_tensor_;
+ // An optional event that is triggered when the tensor's content has been
+ // defined. If this event is nullptr, it is assumed that the tensor's content
+ // is always defined.
+ gtl::optional<se::Event> definition_event_;
+ // A list of all streams for which the tensor's content is defined for any
+ // newly enqueued command.
+ gtl::InlinedVector<se::Stream*, 2> streams_defined_on_ GUARDED_BY(mu_);
+ mutex mu_;
};
} // namespace tensorflow
diff --git a/tensorflow/compiler/tests/BUILD b/tensorflow/compiler/tests/BUILD
index e72c409d65..080bed50e6 100644
--- a/tensorflow/compiler/tests/BUILD
+++ b/tensorflow/compiler/tests/BUILD
@@ -71,6 +71,19 @@ py_test(
)
tf_xla_py_test(
+ name = "adadelta_test",
+ size = "medium",
+ srcs = ["adadelta_test.py"],
+ deps = [
+ ":xla_test",
+ "//tensorflow/python:array_ops",
+ "//tensorflow/python:framework",
+ "//tensorflow/python:platform_test",
+ "//tensorflow/python:training",
+ ],
+)
+
+tf_xla_py_test(
name = "adagrad_test",
size = "small",
srcs = ["adagrad_test.py"],
@@ -85,6 +98,19 @@ tf_xla_py_test(
)
tf_xla_py_test(
+ name = "adagrad_da_test",
+ size = "small",
+ srcs = ["adagrad_da_test.py"],
+ deps = [
+ ":xla_test",
+ "//tensorflow/python:array_ops",
+ "//tensorflow/python:framework",
+ "//tensorflow/python:platform_test",
+ "//tensorflow/python:training",
+ ],
+)
+
+tf_xla_py_test(
name = "adam_test",
size = "small",
srcs = ["adam_test.py"],
@@ -99,6 +125,48 @@ tf_xla_py_test(
)
tf_xla_py_test(
+ name = "adamax_test",
+ size = "small",
+ srcs = ["adamax_test.py"],
+ deps = [
+ ":xla_test",
+ "//tensorflow/contrib/opt:opt_py",
+ "//tensorflow/python:array_ops",
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:framework",
+ "//tensorflow/python:training",
+ ],
+)
+
+tf_xla_py_test(
+ name = "addsign_test",
+ size = "small",
+ srcs = ["addsign_test.py"],
+ deps = [
+ ":xla_test",
+ "//tensorflow/contrib/opt:opt_py",
+ "//tensorflow/python:array_ops",
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:framework",
+ "//tensorflow/python:training",
+ ],
+)
+
+tf_xla_py_test(
+ name = "powersign_test",
+ size = "small",
+ srcs = ["powersign_test.py"],
+ deps = [
+ ":xla_test",
+ "//tensorflow/contrib/opt:opt_py",
+ "//tensorflow/python:array_ops",
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:framework",
+ "//tensorflow/python:training",
+ ],
+)
+
+tf_xla_py_test(
name = "argminmax_test",
size = "small",
srcs = ["argminmax_test.py"],
@@ -167,7 +235,7 @@ tf_xla_py_test(
tf_xla_py_test(
name = "cholesky_op_test",
- size = "small",
+ size = "medium",
srcs = ["cholesky_op_test.py"],
tags = ["optonly"],
deps = [
@@ -350,7 +418,7 @@ tf_xla_py_test(
tf_xla_py_test(
name = "eager_test",
- size = "small",
+ size = "large",
srcs = ["eager_test.py"],
disabled_backends = [
# TODO(b/78199195) Support XLA CPU devices in eager runtime
@@ -372,6 +440,20 @@ tf_xla_py_test(
)
tf_xla_py_test(
+ name = "fifo_queue_test",
+ size = "medium",
+ srcs = ["fifo_queue_test.py"],
+ deps = [
+ ":xla_test",
+ "//tensorflow/python:array_ops",
+ "//tensorflow/python:data_flow_ops",
+ "//tensorflow/python:extra_py_tests_deps",
+ "//tensorflow/python:framework",
+ "//tensorflow/python:platform_test",
+ ],
+)
+
+tf_xla_py_test(
name = "fft_test",
size = "medium",
srcs = ["fft_test.py"],
@@ -557,6 +639,53 @@ tf_xla_py_test(
)
tf_xla_py_test(
+ name = "proximal_adagrad_test",
+ size = "medium",
+ srcs = ["proximal_adagrad_test.py"],
+ deps = [
+ ":xla_test",
+ "//tensorflow/python:array_ops",
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:framework",
+ "//tensorflow/python:training",
+ ],
+)
+
+tf_xla_py_test(
+ name = "proximal_gradient_descent_test",
+ size = "medium",
+ srcs = ["proximal_gradient_descent_test.py"],
+ deps = [
+ ":xla_test",
+ "//tensorflow/python:array_ops",
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:framework",
+ "//tensorflow/python:training",
+ ],
+)
+
+tf_xla_py_test(
+ name = "qr_op_test",
+ size = "medium",
+ srcs = ["qr_op_test.py"],
+ disabled_backends = [
+ # Test is very slow on CPU.
+ "cpu",
+ "cpu_ondemand",
+ ],
+ tags = ["optonly"],
+ deps = [
+ ":xla_test",
+ "//tensorflow/python:array_ops",
+ "//tensorflow/python:framework_for_generated_wrappers",
+ "//tensorflow/python:math_ops",
+ "//tensorflow/python:platform_test",
+ "//tensorflow/python:training",
+ "@absl_py//absl/testing:parameterized",
+ ],
+)
+
+tf_xla_py_test(
name = "random_ops_test",
size = "small",
srcs = ["random_ops_test.py"],
@@ -689,6 +818,19 @@ tf_xla_py_test(
)
tf_xla_py_test(
+ name = "sparse_to_dense_op_test",
+ size = "small",
+ srcs = ["sparse_to_dense_op_test.py"],
+ deps = [
+ ":xla_test",
+ "//tensorflow/python:array_ops",
+ "//tensorflow/python:framework",
+ "//tensorflow/python:platform_test",
+ "//tensorflow/python:sparse_ops",
+ ],
+)
+
+tf_xla_py_test(
name = "stack_ops_test",
size = "small",
srcs = ["stack_ops_test.py"],
@@ -858,8 +1000,10 @@ tf_xla_py_test(
tf_xla_py_test(
name = "sort_ops_test",
- size = "small",
+ size = "medium",
srcs = ["sort_ops_test.py"],
+ # Times out in fastbuild mode.
+ tags = ["optonly"],
deps = [
"//tensorflow/compiler/tests:xla_test",
"//tensorflow/compiler/tf2xla/python:xla",
diff --git a/tensorflow/compiler/tests/adadelta_test.py b/tensorflow/compiler/tests/adadelta_test.py
new file mode 100644
index 0000000000..3e3c09c66e
--- /dev/null
+++ b/tensorflow/compiler/tests/adadelta_test.py
@@ -0,0 +1,134 @@
+# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""Tests for Adadelta Optimizer."""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import numpy as np
+
+from tensorflow.compiler.tests import xla_test
+from tensorflow.python.framework import constant_op
+from tensorflow.python.ops import resource_variable_ops
+from tensorflow.python.ops import variables
+from tensorflow.python.platform import test
+from tensorflow.python.training import adadelta
+
+
+class AdadeltaOptimizerTest(xla_test.XLATestCase):
+
+ def testBasic(self):
+ num_updates = 4 # number of ADADELTA steps to perform
+ for dtype in self.float_types:
+ with self.test_session(), self.test_scope():
+ for grad in [0.2, 0.1, 0.01]:
+ for lr in [1.0, 0.5, 0.1]:
+ var0_init = [1.0, 2.0]
+ var1_init = [3.0, 4.0]
+ var0 = resource_variable_ops.ResourceVariable(
+ var0_init, dtype=dtype)
+ var1 = resource_variable_ops.ResourceVariable(
+ var1_init, dtype=dtype)
+
+ grads = constant_op.constant([grad, grad], dtype=dtype)
+
+ accum = 0.0
+ accum_update = 0.0
+
+ # ADADELTA gradient optimizer
+ rho = 0.95
+ epsilon = 1e-8
+ adadelta_opt = adadelta.AdadeltaOptimizer(
+ learning_rate=lr, rho=rho, epsilon=epsilon)
+ adadelta_update = adadelta_opt.apply_gradients(
+ zip([grads, grads], [var0, var1]))
+ self.evaluate(variables.global_variables_initializer())
+ opt_vars = adadelta_opt.variables()
+ self.assertStartsWith(opt_vars[0].name, var0._shared_name)
+ self.assertStartsWith(opt_vars[1].name, var0._shared_name)
+ self.assertStartsWith(opt_vars[2].name, var1._shared_name)
+ self.assertStartsWith(opt_vars[3].name, var1._shared_name)
+ self.assertEqual(4, len(opt_vars))
+ # Assign slots
+ slot = [None] * 2
+ slot_update = [None] * 2
+ self.assertEqual(["accum", "accum_update"],
+ adadelta_opt.get_slot_names())
+ slot[0] = adadelta_opt.get_slot(var0, "accum")
+ self.assertEquals(slot[0].get_shape(), var0.get_shape())
+ self.assertFalse(slot[0] in variables.trainable_variables())
+
+ slot_update[0] = adadelta_opt.get_slot(var0, "accum_update")
+ self.assertEquals(slot_update[0].get_shape(), var0.get_shape())
+ self.assertFalse(slot_update[0] in variables.trainable_variables())
+
+ slot[1] = adadelta_opt.get_slot(var1, "accum")
+ self.assertEquals(slot[1].get_shape(), var1.get_shape())
+ self.assertFalse(slot[1] in variables.trainable_variables())
+
+ slot_update[1] = adadelta_opt.get_slot(var1, "accum_update")
+ self.assertEquals(slot_update[1].get_shape(), var1.get_shape())
+ self.assertFalse(slot_update[1] in variables.trainable_variables())
+
+ # Fetch params to validate initial values
+ self.assertAllClose(var0_init, self.evaluate(var0))
+ self.assertAllClose(var1_init, self.evaluate(var1))
+
+ update = [None] * num_updates
+ tot_update = 0
+ for step in range(num_updates):
+ # Run adadelta update for comparison
+ self.evaluate(adadelta_update)
+
+ # Perform initial update without previous accum values
+ accum = accum * rho + (grad**2) * (1 - rho)
+ update[step] = (
+ np.sqrt(accum_update + epsilon) *
+ (1. / np.sqrt(accum + epsilon)) * grad)
+ accum_update = (
+ accum_update * rho + (update[step]**2) * (1.0 - rho))
+ tot_update += update[step] * lr
+
+ # Check that the accumulators have been updated
+ for slot_idx in range(2):
+ self.assertAllCloseAccordingToType(
+ np.array([accum, accum], dtype=dtype),
+ self.evaluate(slot[slot_idx]),
+ rtol=1e-5)
+
+ self.assertAllCloseAccordingToType(
+ np.array([accum_update, accum_update], dtype=dtype),
+ self.evaluate(slot_update[slot_idx]),
+ rtol=1e-5)
+
+ # Check that the parameters have been updated
+ self.assertAllCloseAccordingToType(
+ np.array(
+ [var0_init[0] - tot_update, var0_init[1] - tot_update],
+ dtype=dtype),
+ self.evaluate(var0),
+ rtol=1e-5)
+
+ self.assertAllCloseAccordingToType(
+ np.array(
+ [var1_init[0] - tot_update, var1_init[1] - tot_update],
+ dtype=dtype),
+ self.evaluate(var1),
+ rtol=1e-5)
+
+
+if __name__ == "__main__":
+ test.main()
diff --git a/tensorflow/compiler/tests/adagrad_da_test.py b/tensorflow/compiler/tests/adagrad_da_test.py
new file mode 100644
index 0000000000..dc1625793a
--- /dev/null
+++ b/tensorflow/compiler/tests/adagrad_da_test.py
@@ -0,0 +1,165 @@
+# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""Tests for AdagradDA optimizer."""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import numpy as np
+
+from tensorflow.compiler.tests import xla_test
+from tensorflow.python.framework import constant_op
+from tensorflow.python.framework import dtypes
+from tensorflow.python.ops import resource_variable_ops
+from tensorflow.python.ops import variables
+from tensorflow.python.platform import test
+from tensorflow.python.training import adagrad_da
+
+
+class AdagradDAOptimizerTest(xla_test.XLATestCase):
+
+ def testAdagradDAWithoutRegularizationBasic1(self):
+ for dtype in self.float_types:
+ with self.test_session(), self.test_scope():
+ global_step = resource_variable_ops.ResourceVariable(
+ 0, dtype=dtypes.int64)
+ var0 = resource_variable_ops.ResourceVariable([0.0, 0.0], dtype=dtype)
+ var1 = resource_variable_ops.ResourceVariable([0.0, 0.0], dtype=dtype)
+ grads0 = constant_op.constant([0.1, 0.2], dtype=dtype)
+ grads1 = constant_op.constant([0.01, 0.02], dtype=dtype)
+ opt = adagrad_da.AdagradDAOptimizer(
+ 3.0,
+ global_step,
+ initial_gradient_squared_accumulator_value=0.1,
+ l1_regularization_strength=0.0,
+ l2_regularization_strength=0.0)
+ update = opt.apply_gradients(
+ zip([grads0, grads1], [var0, var1]), global_step=global_step)
+ variables.global_variables_initializer().run()
+
+ self.assertAllClose([0.0, 0.0], var0.eval())
+ self.assertAllClose([0.0, 0.0], var1.eval())
+
+ # Run a step of AdagradDA
+ update.run()
+
+ # Let g to be gradient accumulator, gg to be gradient squared
+ # accumulator, T be the global step, lr is the learning rate, and k the
+ # initial gradient squared accumulator value.
+ # w = \dfrac{sign(-g)*lr*|g - l1*T|_{+}}{l2*T*lr + \sqrt{k+gg})}
+ # For -0.1*3.0*(0.1 - 0)/(0 + sqrt(0.1 + 0.1*0.1)) = -0.904534
+ # similarly for others.
+ self.assertAllCloseAccordingToType(
+ np.array([-0.904534, -1.603567]), var0.eval())
+ self.assertAllCloseAccordingToType(
+ np.array([-0.094821, -0.189358]), var1.eval())
+
+ def testAdagradDAwithoutRegularizationBasic2(self):
+ for dtype in self.float_types:
+ with self.test_session(), self.test_scope():
+ global_step = resource_variable_ops.ResourceVariable(
+ 0, dtype=dtypes.int64)
+ var0 = resource_variable_ops.ResourceVariable([1.0, 2.0], dtype=dtype)
+ var1 = resource_variable_ops.ResourceVariable([4.0, 3.0], dtype=dtype)
+ grads0 = constant_op.constant([0.1, 0.2], dtype=dtype)
+ grads1 = constant_op.constant([0.01, 0.02], dtype=dtype)
+
+ opt = adagrad_da.AdagradDAOptimizer(
+ 3.0,
+ global_step,
+ initial_gradient_squared_accumulator_value=0.1,
+ l1_regularization_strength=0.0,
+ l2_regularization_strength=0.0)
+ update = opt.apply_gradients(
+ zip([grads0, grads1], [var0, var1]), global_step=global_step)
+ variables.global_variables_initializer().run()
+
+ self.assertAllCloseAccordingToType([1.0, 2.0], var0.eval())
+ self.assertAllCloseAccordingToType([4.0, 3.0], var1.eval())
+
+ # Run a step of AdagradDA
+ update.run()
+
+ self.assertAllCloseAccordingToType(
+ np.array([-0.904534, -1.603567]), var0.eval())
+ self.assertAllCloseAccordingToType(
+ np.array([-0.094821, -0.189358]), var1.eval())
+
+ def testAdagradDAWithL1(self):
+ for dtype in self.float_types:
+ with self.test_session(), self.test_scope():
+ global_step = resource_variable_ops.ResourceVariable(
+ 0, dtype=dtypes.int64)
+ var0 = resource_variable_ops.ResourceVariable([1.0, 2.0], dtype=dtype)
+ var1 = resource_variable_ops.ResourceVariable([4.0, 3.0], dtype=dtype)
+ grads0 = constant_op.constant([0.1, 0.2], dtype=dtype)
+ grads1 = constant_op.constant([0.01, 0.02], dtype=dtype)
+
+ opt = adagrad_da.AdagradDAOptimizer(
+ 3.0,
+ global_step,
+ initial_gradient_squared_accumulator_value=0.1,
+ l1_regularization_strength=0.001,
+ l2_regularization_strength=0.0)
+ update = opt.apply_gradients(
+ zip([grads0, grads1], [var0, var1]), global_step=global_step)
+ variables.global_variables_initializer().run()
+
+ self.assertAllCloseAccordingToType([1.0, 2.0], var0.eval())
+ self.assertAllCloseAccordingToType([4.0, 3.0], var1.eval())
+
+ # Run a step of AdagradDA
+ update.run()
+
+ self.assertAllCloseAccordingToType(
+ np.array([-0.895489, -1.59555]), var0.eval())
+ self.assertAllCloseAccordingToType(
+ np.array([-0.085339, -0.17989]), var1.eval())
+
+ def testAdagradDAWithL1_L2(self):
+ for dtype in self.float_types:
+ with self.test_session(), self.test_scope():
+ global_step = resource_variable_ops.ResourceVariable(
+ 0, dtype=dtypes.int64)
+ var0 = resource_variable_ops.ResourceVariable([1.0, 2.0], dtype=dtype)
+ var1 = resource_variable_ops.ResourceVariable([4.0, 3.0], dtype=dtype)
+ grads0 = constant_op.constant([0.1, 0.2], dtype=dtype)
+ grads1 = constant_op.constant([0.01, 0.02], dtype=dtype)
+
+ opt = adagrad_da.AdagradDAOptimizer(
+ 3.0,
+ global_step,
+ initial_gradient_squared_accumulator_value=0.1,
+ l1_regularization_strength=0.001,
+ l2_regularization_strength=2.0)
+ update = opt.apply_gradients(
+ zip([grads0, grads1], [var0, var1]), global_step=global_step)
+ variables.global_variables_initializer().run()
+
+ self.assertAllCloseAccordingToType([1.0, 2.0], var0.eval())
+ self.assertAllCloseAccordingToType([4.0, 3.0], var1.eval())
+
+ # Run a step of AdagradDA
+ update.run()
+
+ self.assertAllCloseAccordingToType(
+ np.array([-0.046907, -0.093659]), var0.eval())
+ self.assertAllCloseAccordingToType(
+ np.array([-0.004275, -0.009023]), var1.eval())
+
+
+if __name__ == "__main__":
+ test.main()
diff --git a/tensorflow/compiler/tests/adagrad_test.py b/tensorflow/compiler/tests/adagrad_test.py
index 9a93b32164..d775850a80 100644
--- a/tensorflow/compiler/tests/adagrad_test.py
+++ b/tensorflow/compiler/tests/adagrad_test.py
@@ -20,7 +20,7 @@ from __future__ import print_function
import numpy as np
-from tensorflow.compiler.tests.xla_test import XLATestCase
+from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables
@@ -28,7 +28,7 @@ from tensorflow.python.platform import test
from tensorflow.python.training import adagrad
-class AdagradOptimizerTest(XLATestCase):
+class AdagradOptimizerTest(xla_test.XLATestCase):
def testBasic(self):
for dtype in self.float_types:
diff --git a/tensorflow/compiler/tests/adam_test.py b/tensorflow/compiler/tests/adam_test.py
index 3215dc36e5..03554d6933 100644
--- a/tensorflow/compiler/tests/adam_test.py
+++ b/tensorflow/compiler/tests/adam_test.py
@@ -20,7 +20,7 @@ from __future__ import print_function
import numpy as np
-from tensorflow.compiler.tests.xla_test import XLATestCase
+from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import resource_variable_ops
@@ -48,7 +48,7 @@ def adam_update_numpy(param,
return param_t, m_t, v_t
-class AdamOptimizerTest(XLATestCase):
+class AdamOptimizerTest(xla_test.XLATestCase):
def testBasic(self):
for dtype in self.float_types:
diff --git a/tensorflow/compiler/tests/adamax_test.py b/tensorflow/compiler/tests/adamax_test.py
new file mode 100644
index 0000000000..c4fdbc5974
--- /dev/null
+++ b/tensorflow/compiler/tests/adamax_test.py
@@ -0,0 +1,139 @@
+# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""Tests for AdaMax optimizer."""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import numpy as np
+
+from tensorflow.compiler.tests import xla_test
+from tensorflow.contrib.opt.python.training import adamax
+from tensorflow.python.framework import constant_op
+from tensorflow.python.framework import ops
+from tensorflow.python.ops import resource_variable_ops
+from tensorflow.python.ops import variable_scope
+from tensorflow.python.ops import variables
+from tensorflow.python.platform import test
+
+
+def adamax_update_numpy(param,
+ g_t,
+ t,
+ m,
+ v,
+ alpha=0.001,
+ beta1=0.9,
+ beta2=0.999,
+ epsilon=1e-8):
+ m_t = beta1 * m + (1 - beta1) * g_t
+ v_t = np.maximum(beta2 * v, np.abs(g_t))
+ param_t = param - (alpha / (1 - beta1**t)) * (m_t / (v_t + epsilon))
+ return param_t, m_t, v_t
+
+
+class AdaMaxOptimizerTest(xla_test.XLATestCase):
+
+ def testBasic(self):
+ for i, dtype in enumerate(self.float_types):
+ with self.test_session(), self.test_scope():
+ variable_scope.get_variable_scope().set_use_resource(True)
+ # Initialize variables for numpy implementation.
+ m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
+ var0_np = np.array([1.0, 2.0], dtype=dtype)
+ grads0_np = np.array([0.1, 0.1], dtype=dtype)
+ var1_np = np.array([3.0, 4.0], dtype=dtype)
+ grads1_np = np.array([0.01, 0.01], dtype=dtype)
+
+ var0 = resource_variable_ops.ResourceVariable(
+ var0_np, name="var0_%d" % i)
+ var1 = resource_variable_ops.ResourceVariable(
+ var1_np, name="var1_%d" % i)
+ grads0 = constant_op.constant(grads0_np)
+ grads1 = constant_op.constant(grads1_np)
+
+ opt = adamax.AdaMaxOptimizer()
+ update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
+ opt_variables = opt.variables()
+ beta1_power = opt._get_beta_accumulators()
+ self.assertTrue(beta1_power is not None)
+ self.assertIn(beta1_power, opt_variables)
+
+ with ops.Graph().as_default():
+ # Shouldn't return non-slot variables from other graphs.
+ self.assertEqual(0, len(opt.variables()))
+
+ variables.global_variables_initializer().run()
+ # Fetch params to validate initial values
+ self.assertAllClose([1.0, 2.0], var0.eval())
+ self.assertAllClose([3.0, 4.0], var1.eval())
+
+ beta1_power = opt._get_beta_accumulators()
+
+ # Run 3 steps of AdaMax
+ for t in range(1, 4):
+ update.run()
+
+ self.assertAllCloseAccordingToType(0.9**(t + 1), beta1_power.eval())
+
+ var0_np, m0, v0 = adamax_update_numpy(var0_np, grads0_np, t, m0, v0)
+ var1_np, m1, v1 = adamax_update_numpy(var1_np, grads1_np, t, m1, v1)
+
+ # Validate updated params
+ self.assertAllCloseAccordingToType(var0_np, var0.eval(), rtol=1e-2)
+ self.assertAllCloseAccordingToType(var1_np, var1.eval(), rtol=1e-2)
+ self.assertEqual("var0_%d/AdaMax:0" % (i,),
+ opt.get_slot(var=var0, name="m").name)
+
+ def testTensorLearningRate(self):
+ for dtype in self.float_types:
+ with self.test_session(), self.test_scope():
+ variable_scope.get_variable_scope().set_use_resource(True)
+ # Initialize variables for numpy implementation.
+ m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
+ var0_np = np.array([1.0, 2.0], dtype=dtype)
+ grads0_np = np.array([0.1, 0.1], dtype=dtype)
+ var1_np = np.array([3.0, 4.0], dtype=dtype)
+ grads1_np = np.array([0.01, 0.01], dtype=dtype)
+
+ var0 = resource_variable_ops.ResourceVariable(var0_np)
+ var1 = resource_variable_ops.ResourceVariable(var1_np)
+ grads0 = constant_op.constant(grads0_np)
+ grads1 = constant_op.constant(grads1_np)
+ opt = adamax.AdaMaxOptimizer(constant_op.constant(0.001))
+ update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
+ variables.global_variables_initializer().run()
+
+ # Fetch params to validate initial values
+ self.assertAllClose([1.0, 2.0], var0.eval())
+ self.assertAllClose([3.0, 4.0], var1.eval())
+
+ beta1_power = opt._get_beta_accumulators()
+
+ # Run 3 steps of AdaMax
+ for t in range(1, 4):
+ self.assertAllCloseAccordingToType(0.9**t, beta1_power.eval())
+ update.run()
+
+ var0_np, m0, v0 = adamax_update_numpy(var0_np, grads0_np, t, m0, v0)
+ var1_np, m1, v1 = adamax_update_numpy(var1_np, grads1_np, t, m1, v1)
+
+ # Validate updated params
+ self.assertAllCloseAccordingToType(var0_np, var0.eval())
+ self.assertAllCloseAccordingToType(var1_np, var1.eval())
+
+if __name__ == "__main__":
+ test.main()
diff --git a/tensorflow/compiler/tests/addsign_test.py b/tensorflow/compiler/tests/addsign_test.py
new file mode 100644
index 0000000000..9ec5a964cb
--- /dev/null
+++ b/tensorflow/compiler/tests/addsign_test.py
@@ -0,0 +1,142 @@
+# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""Tests for AddSign."""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import numpy as np
+
+from tensorflow.compiler.tests import xla_test
+from tensorflow.contrib.opt.python.training import addsign
+from tensorflow.contrib.opt.python.training import sign_decay
+from tensorflow.python.framework import constant_op
+from tensorflow.python.ops import resource_variable_ops
+from tensorflow.python.ops import variables
+from tensorflow.python.platform import test
+
+
+def py_linear_decay_fn(decay_steps):
+ def linear_decay(step):
+ step = min(step, decay_steps)
+ return float(decay_steps - step) / decay_steps
+ return linear_decay
+
+
+def addsign_update_numpy(params,
+ g_t,
+ m,
+ lr,
+ alpha=1.0,
+ beta=0.9,
+ py_sign_decay_fn=None,
+ t=None):
+ m_t = beta * m + (1 - beta) * g_t
+ if py_sign_decay_fn is None:
+ sign_decayed = 1.0
+ else:
+ sign_decayed = py_sign_decay_fn(t-1)
+ multiplier = alpha + sign_decayed * np.sign(g_t) * np.sign(m_t)
+ params_t = params - lr * multiplier * g_t
+ return params_t, m_t
+
+
+class AddSignTest(xla_test.XLATestCase):
+
+ def _testDense(self,
+ learning_rate=0.1,
+ sign_decay_fn=None,
+ py_sign_decay_fn=None,
+ alpha=1.0,
+ beta=0.9):
+ for dtype in self.float_types:
+ with self.test_session(), self.test_scope():
+ # Initialize variables for numpy implementation.
+ m0, m1 = 0.0, 0.0
+ var0_np = np.array([1.0, 2.0], dtype=dtype)
+ grads0_np = np.array([0.1, 0.1], dtype=dtype)
+ var1_np = np.array([3.0, 4.0], dtype=dtype)
+ grads1_np = np.array([0.01, 0.01], dtype=dtype)
+
+ var0 = resource_variable_ops.ResourceVariable(var0_np)
+ var1 = resource_variable_ops.ResourceVariable(var1_np)
+ global_step = resource_variable_ops.ResourceVariable(0, trainable=False)
+ grads0 = constant_op.constant(grads0_np)
+ grads1 = constant_op.constant(grads1_np)
+
+ opt = addsign.AddSignOptimizer(
+ learning_rate=learning_rate,
+ alpha=alpha,
+ beta=beta,
+ sign_decay_fn=sign_decay_fn,
+ )
+ update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]),
+ global_step=global_step)
+ neg_update = opt.apply_gradients(zip([-grads0, -grads1], [var0, var1]),
+ global_step=global_step)
+ variables.global_variables_initializer().run()
+
+ # Fetch params to validate initial values
+ self.assertAllClose([1.0, 2.0], var0.eval())
+ self.assertAllClose([3.0, 4.0], var1.eval())
+
+ # Run 7 steps of AddSign
+ # first 4 steps with positive gradient
+ # last 3 steps with negative gradient (sign(gm) should be -1)
+ for t in range(1, 8):
+ if t < 5:
+ update.run()
+ else:
+ neg_update.run()
+
+ var0_np, m0 = addsign_update_numpy(
+ var0_np,
+ grads0_np if t < 5 else -grads0_np,
+ m0,
+ learning_rate,
+ alpha=alpha,
+ beta=beta,
+ py_sign_decay_fn=py_sign_decay_fn,
+ t=t,
+ )
+ var1_np, m1 = addsign_update_numpy(
+ var1_np,
+ grads1_np if t < 5 else -grads1_np,
+ m1,
+ learning_rate,
+ alpha=alpha,
+ beta=beta,
+ py_sign_decay_fn=py_sign_decay_fn,
+ t=t,
+ )
+
+ # Validate updated params
+ self.assertAllCloseAccordingToType(
+ var0_np, var0.eval(), half_rtol=1e-2)
+ self.assertAllCloseAccordingToType(var1_np, var1.eval())
+
+ def testDense(self):
+ decay_steps = 10
+ sign_decay_fn = sign_decay.get_linear_decay_fn(decay_steps)
+ py_sign_decay_fn = py_linear_decay_fn(decay_steps)
+ self._testDense()
+ self._testDense(learning_rate=0.01, alpha=0.1, beta=0.8)
+ self._testDense(
+ sign_decay_fn=sign_decay_fn, py_sign_decay_fn=py_sign_decay_fn)
+
+
+if __name__ == '__main__':
+ test.main()
diff --git a/tensorflow/compiler/tests/binary_ops_test.py b/tensorflow/compiler/tests/binary_ops_test.py
index afef36d9d2..9cb3d04546 100644
--- a/tensorflow/compiler/tests/binary_ops_test.py
+++ b/tensorflow/compiler/tests/binary_ops_test.py
@@ -20,7 +20,7 @@ from __future__ import print_function
import numpy as np
-from tensorflow.compiler.tests.xla_test import XLATestCase
+from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.ops import array_ops
@@ -32,7 +32,7 @@ from tensorflow.python.ops import nn_ops
from tensorflow.python.platform import googletest
-class BinaryOpsTest(XLATestCase):
+class BinaryOpsTest(xla_test.XLATestCase):
"""Test cases for binary operators."""
def _testBinary(self, op, a, b, expected, equality_test=None):
diff --git a/tensorflow/compiler/tests/bucketize_op_test.py b/tensorflow/compiler/tests/bucketize_op_test.py
index fde9759a1c..ef4d5f6322 100644
--- a/tensorflow/compiler/tests/bucketize_op_test.py
+++ b/tensorflow/compiler/tests/bucketize_op_test.py
@@ -18,7 +18,7 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
-from tensorflow.compiler.tests.xla_test import XLATestCase
+from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.ops import array_ops
@@ -26,7 +26,7 @@ from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
-class BucketizationOpTest(XLATestCase):
+class BucketizationOpTest(xla_test.XLATestCase):
def testInt(self):
with self.test_session() as sess:
diff --git a/tensorflow/compiler/tests/categorical_op_test.py b/tensorflow/compiler/tests/categorical_op_test.py
index 035cdea178..a4e7f75081 100644
--- a/tensorflow/compiler/tests/categorical_op_test.py
+++ b/tensorflow/compiler/tests/categorical_op_test.py
@@ -22,7 +22,7 @@ import collections
import numpy as np
-from tensorflow.compiler.tests.xla_test import XLATestCase
+from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import random_seed
from tensorflow.python.ops import array_ops
@@ -32,7 +32,7 @@ from tensorflow.python.platform import googletest
# TODO(srvasude): Merge this with
# third_party/tensorflow/python/kernel_tests/random/multinomial_op_test.py.
-class CategoricalTest(XLATestCase):
+class CategoricalTest(xla_test.XLATestCase):
"""Test cases for random-number generating operators."""
def output_dtypes(self):
diff --git a/tensorflow/compiler/tests/cholesky_op_test.py b/tensorflow/compiler/tests/cholesky_op_test.py
index 1a8989d7c2..d2867278af 100644
--- a/tensorflow/compiler/tests/cholesky_op_test.py
+++ b/tensorflow/compiler/tests/cholesky_op_test.py
@@ -23,7 +23,7 @@ import unittest
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
-from tensorflow.compiler.tests.xla_test import XLATestCase
+from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
@@ -32,7 +32,7 @@ from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
-class CholeskyOpTest(XLATestCase):
+class CholeskyOpTest(xla_test.XLATestCase):
# Cholesky defined for float64, float32, complex64, complex128
# (https://www.tensorflow.org/api_docs/python/tf/cholesky)
diff --git a/tensorflow/compiler/tests/clustering_test.py b/tensorflow/compiler/tests/clustering_test.py
index 574f82fc71..e42ebf8f9e 100644
--- a/tensorflow/compiler/tests/clustering_test.py
+++ b/tensorflow/compiler/tests/clustering_test.py
@@ -21,7 +21,7 @@ from __future__ import print_function
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
-from tensorflow.compiler.tests.xla_test import XLATestCase
+from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
@@ -32,7 +32,7 @@ from tensorflow.python.platform import googletest
CPU_DEVICE = "/job:localhost/replica:0/task:0/cpu:0"
-class ClusteringTest(XLATestCase):
+class ClusteringTest(xla_test.XLATestCase):
def testAdd(self):
val1 = np.array([4, 3, 2, 1], dtype=np.float32)
diff --git a/tensorflow/compiler/tests/concat_ops_test.py b/tensorflow/compiler/tests/concat_ops_test.py
index f10973e19f..d9ad428147 100644
--- a/tensorflow/compiler/tests/concat_ops_test.py
+++ b/tensorflow/compiler/tests/concat_ops_test.py
@@ -20,7 +20,7 @@ from __future__ import print_function
import numpy as np
-from tensorflow.compiler.tests.xla_test import XLATestCase
+from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
@@ -30,7 +30,7 @@ from tensorflow.python.ops import math_ops
from tensorflow.python.platform import googletest
-class ConcatTest(XLATestCase):
+class ConcatTest(xla_test.XLATestCase):
def testHStack(self):
with self.test_session():
@@ -292,7 +292,7 @@ class ConcatTest(XLATestCase):
array_ops.concat([scalar, scalar, scalar], dim)
-class ConcatOffsetTest(XLATestCase):
+class ConcatOffsetTest(xla_test.XLATestCase):
def testBasic(self):
with self.test_session() as sess:
@@ -306,7 +306,7 @@ class ConcatOffsetTest(XLATestCase):
self.assertAllEqual(ans, [[0, 0, 0], [0, 3, 0], [0, 10, 0]])
-class PackTest(XLATestCase):
+class PackTest(xla_test.XLATestCase):
def testBasic(self):
with self.test_session() as sess:
diff --git a/tensorflow/compiler/tests/conv2d_test.py b/tensorflow/compiler/tests/conv2d_test.py
index d12e1ff1e8..f9db103f6d 100644
--- a/tensorflow/compiler/tests/conv2d_test.py
+++ b/tensorflow/compiler/tests/conv2d_test.py
@@ -26,23 +26,20 @@ from absl.testing import parameterized
import numpy as np
from tensorflow.compiler.tests import test_utils
-from tensorflow.compiler.tests.xla_test import XLATestCase
+from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_nn_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.platform import googletest
-
DATA_FORMATS = (
("_data_format_NHWC", "NHWC"),
("_data_format_NCHW", "NCHW"),
- ("_data_format_HWNC", "HWNC"),
- ("_data_format_HWCN", "HWCN"),
)
-class Conv2DTest(XLATestCase, parameterized.TestCase):
+class Conv2DTest(xla_test.XLATestCase, parameterized.TestCase):
def _VerifyValues(self,
input_sizes=None,
@@ -236,7 +233,7 @@ class Conv2DTest(XLATestCase, parameterized.TestCase):
expected=np.reshape([108, 128], [1, 1, 1, 2]))
-class Conv2DBackpropInputTest(XLATestCase, parameterized.TestCase):
+class Conv2DBackpropInputTest(xla_test.XLATestCase, parameterized.TestCase):
def _VerifyValues(self,
input_sizes=None,
@@ -534,7 +531,7 @@ class Conv2DBackpropInputTest(XLATestCase, parameterized.TestCase):
expected=[5, 0, 11, 0, 0, 0, 17, 0, 23])
-class Conv2DBackpropFilterTest(XLATestCase, parameterized.TestCase):
+class Conv2DBackpropFilterTest(xla_test.XLATestCase, parameterized.TestCase):
def _VerifyValues(self,
input_sizes=None,
diff --git a/tensorflow/compiler/tests/conv3d_test.py b/tensorflow/compiler/tests/conv3d_test.py
index 3bebf46511..31ee41f04f 100644
--- a/tensorflow/compiler/tests/conv3d_test.py
+++ b/tensorflow/compiler/tests/conv3d_test.py
@@ -21,7 +21,7 @@ from __future__ import print_function
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
-from tensorflow.compiler.tests.xla_test import XLATestCase
+from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
@@ -33,7 +33,7 @@ from tensorflow.python.platform import googletest
# Test cloned from
# tensorflow/python/kernel_tests/conv3d_backprop_filter_v2_grad_test.py
-class Conv3DBackpropFilterV2GradTest(XLATestCase):
+class Conv3DBackpropFilterV2GradTest(xla_test.XLATestCase):
def testGradient(self):
with self.test_session(), self.test_scope():
@@ -66,7 +66,7 @@ class Conv3DBackpropFilterV2GradTest(XLATestCase):
# Test cloned from tensorflow/python/kernel_tests/conv3d_transpose_test.py
-class Conv3DTransposeTest(XLATestCase):
+class Conv3DTransposeTest(xla_test.XLATestCase):
def testConv3DTransposeSingleStride(self):
with self.test_session(), self.test_scope():
diff --git a/tensorflow/compiler/tests/depthwise_conv_op_test.py b/tensorflow/compiler/tests/depthwise_conv_op_test.py
index 03d96a2cd8..98dc73e189 100644
--- a/tensorflow/compiler/tests/depthwise_conv_op_test.py
+++ b/tensorflow/compiler/tests/depthwise_conv_op_test.py
@@ -21,7 +21,7 @@ from __future__ import print_function
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
-from tensorflow.compiler.tests.xla_test import XLATestCase
+from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
@@ -114,7 +114,7 @@ def CheckGradConfigsToTest():
yield i, f, o, s, p
-class DepthwiseConv2DTest(XLATestCase):
+class DepthwiseConv2DTest(xla_test.XLATestCase):
# This is testing that depthwise_conv2d and depthwise_conv2d_native
# produce the same results. It also tests that NCHW and NWHC
diff --git a/tensorflow/compiler/tests/dynamic_slice_ops_test.py b/tensorflow/compiler/tests/dynamic_slice_ops_test.py
index 6a46d2ec3e..154e36b10e 100644
--- a/tensorflow/compiler/tests/dynamic_slice_ops_test.py
+++ b/tensorflow/compiler/tests/dynamic_slice_ops_test.py
@@ -20,14 +20,14 @@ from __future__ import print_function
import numpy as np
-from tensorflow.compiler.tests.xla_test import XLATestCase
+from tensorflow.compiler.tests import xla_test
from tensorflow.compiler.tf2xla.python import xla
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
-class DynamicUpdateSliceOpsTest(XLATestCase):
+class DynamicUpdateSliceOpsTest(xla_test.XLATestCase):
def _assertOpOutputMatchesExpected(self, op, args, expected):
with self.test_session() as session:
diff --git a/tensorflow/compiler/tests/dynamic_stitch_test.py b/tensorflow/compiler/tests/dynamic_stitch_test.py
index c109c27abe..edd78153b5 100644
--- a/tensorflow/compiler/tests/dynamic_stitch_test.py
+++ b/tensorflow/compiler/tests/dynamic_stitch_test.py
@@ -20,14 +20,14 @@ from __future__ import print_function
import numpy as np
-from tensorflow.compiler.tests.xla_test import XLATestCase
+from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.platform import googletest
-class DynamicStitchTest(XLATestCase):
+class DynamicStitchTest(xla_test.XLATestCase):
def _AssertDynamicStitchResultIs(self, indices, data, expected):
with self.test_session() as session:
diff --git a/tensorflow/compiler/tests/eager_test.py b/tensorflow/compiler/tests/eager_test.py
index e438832a23..6ead15da13 100644
--- a/tensorflow/compiler/tests/eager_test.py
+++ b/tensorflow/compiler/tests/eager_test.py
@@ -20,7 +20,7 @@ from __future__ import print_function
import numpy as np
-from tensorflow.compiler.tests.xla_test import XLATestCase
+from tensorflow.compiler.tests import xla_test
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
@@ -40,7 +40,7 @@ from tensorflow.python.platform import googletest
from tensorflow.python.training import adam
-class EagerTest(XLATestCase):
+class EagerTest(xla_test.XLATestCase):
def testBasic(self):
with self.test_scope():
@@ -286,7 +286,7 @@ class EagerTest(XLATestCase):
[2.0, 2.0]], embedding_matrix.numpy())
-class EagerFunctionTest(XLATestCase):
+class EagerFunctionTest(xla_test.XLATestCase):
def testBasic(self):
with self.test_scope():
@@ -403,7 +403,7 @@ class EagerFunctionTest(XLATestCase):
def testSliceInDefun(self):
with self.test_scope():
- @function.defun(compiled=True)
+ @function.defun
def f(x, y):
return x[0::2, y:, ...]
@@ -418,8 +418,24 @@ class EagerFunctionTest(XLATestCase):
self.assertAllEqual(np.ones([1, 2, 4]), z.numpy())
self.assertAllEqual((2, 3, 4), dz.shape.as_list())
+ def testNestedDefun(self):
+ self.skipTest('Nested defuns do not work on TPU at the moment')
+ with self.test_scope():
+
+ @function.defun
+ def times_two(x):
+ return 2 * x
+
+ @function.defun
+ def two_x_plus_1(x):
+ return times_two(x) + 1
+
+ x = constant_op.constant([2, 3, 4])
+ y = two_x_plus_1(x)
+ self.assertAllEqual([5, 7, 9], y.numpy())
+
-class ExcessivePaddingTest(XLATestCase):
+class ExcessivePaddingTest(xla_test.XLATestCase):
"""Test that eager execution works with TPU flattened tensors.
Tensors that would normally be excessively padded when written
@@ -470,6 +486,36 @@ class ExcessivePaddingTest(XLATestCase):
self.assertAllEqual(100 * [[36.0]], reduced)
+def multiple_tpus():
+ devices = context.context().devices()
+ return len([d for d in devices if 'device:TPU:' in d]) > 1
+
+
+class MultiDeviceTest(xla_test.XLATestCase):
+ """Test running TPU computation on more than one core."""
+
+ def testBasic(self):
+ if not multiple_tpus():
+ self.skipTest('MultiDeviceTest requires multiple TPU devices.')
+
+ # Compute 10 on TPU core 0
+ with ops.device('device:TPU:0'):
+ two = constant_op.constant(2)
+ five = constant_op.constant(5)
+ ten = two * five
+ self.assertAllEqual(10, ten)
+
+ # Compute 6 on TPU core 1
+ with ops.device('device:TPU:1'):
+ two = constant_op.constant(2)
+ three = constant_op.constant(3)
+ six = two * three
+ self.assertAllEqual(6, six)
+
+ # Copy 10 and 6 to CPU and sum them
+ self.assertAllEqual(16, ten + six)
+
+
if __name__ == '__main__':
ops.enable_eager_execution(
config=config_pb2.ConfigProto(log_device_placement=True))
diff --git a/tensorflow/compiler/tests/extract_image_patches_op_test.py b/tensorflow/compiler/tests/extract_image_patches_op_test.py
index 0361702e7a..5529fdbb09 100644
--- a/tensorflow/compiler/tests/extract_image_patches_op_test.py
+++ b/tensorflow/compiler/tests/extract_image_patches_op_test.py
@@ -20,13 +20,13 @@ from __future__ import print_function
import numpy as np
-from tensorflow.compiler.tests.xla_test import XLATestCase
+from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
-class ExtractImagePatches(XLATestCase):
+class ExtractImagePatches(xla_test.XLATestCase):
"""Functional tests for ExtractImagePatches op."""
def _VerifyValues(self, image, ksizes, strides, rates, padding, patches):
diff --git a/tensorflow/compiler/tests/fake_quant_ops_test.py b/tensorflow/compiler/tests/fake_quant_ops_test.py
index dfe9400ef0..c48ab178bf 100644
--- a/tensorflow/compiler/tests/fake_quant_ops_test.py
+++ b/tensorflow/compiler/tests/fake_quant_ops_test.py
@@ -17,14 +17,14 @@ from __future__ import division
from __future__ import print_function
import numpy as np
-from tensorflow.compiler.tests.xla_test import XLATestCase
+from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.platform import googletest
-class FakeQuantWithMinMaxArgsTest(XLATestCase):
+class FakeQuantWithMinMaxArgsTest(xla_test.XLATestCase):
"""Test cases for FakeQuantWithMinMaxArgs operation."""
# 8 bits, wide range.
@@ -122,7 +122,7 @@ class FakeQuantWithMinMaxArgsTest(XLATestCase):
result, expected, rtol=1e-3, atol=1e-5, bfloat16_rtol=0.03)
-class FakeQuantWithMinMaxArgsGradientTest(XLATestCase):
+class FakeQuantWithMinMaxArgsGradientTest(xla_test.XLATestCase):
"""Test cases for FakeQuantWithMinMaxArgsGradient operation."""
# 8 bits, wide range.
@@ -223,7 +223,7 @@ class FakeQuantWithMinMaxArgsGradientTest(XLATestCase):
bfloat16_rtol=0.03)
-class FakeQuantWithMinMaxVarsTest(XLATestCase):
+class FakeQuantWithMinMaxVarsTest(xla_test.XLATestCase):
"""Test cases for FakeQuantWithMinMaxVars operation."""
# 8 bits, wide range.
@@ -328,7 +328,7 @@ class FakeQuantWithMinMaxVarsTest(XLATestCase):
result, expected, rtol=1e-3, atol=1e-5, bfloat16_rtol=0.03)
-class FakeQuantWithMinMaxVarsGradientTest(XLATestCase):
+class FakeQuantWithMinMaxVarsGradientTest(xla_test.XLATestCase):
"""Test cases for FakeQuantWithMinMaxVarsGradient operation."""
# 8 bits, wide range.
diff --git a/tensorflow/compiler/tests/fft_test.py b/tensorflow/compiler/tests/fft_test.py
index afb5fa4bb4..c64ea249ec 100644
--- a/tensorflow/compiler/tests/fft_test.py
+++ b/tensorflow/compiler/tests/fft_test.py
@@ -23,10 +23,11 @@ import itertools
import numpy as np
import scipy.signal as sps
-from tensorflow.compiler.tests.xla_test import XLATestCase
+from tensorflow.compiler.tests import xla_test
from tensorflow.contrib.signal.python.ops import spectral_ops as signal
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
+from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import spectral_ops
from tensorflow.python.platform import googletest
@@ -57,7 +58,7 @@ INNER_DIMS_2D = pick_10(itertools.product(POWS_OF_2, POWS_OF_2))
INNER_DIMS_3D = pick_10(itertools.product(POWS_OF_2, POWS_OF_2, POWS_OF_2))
-class FFTTest(XLATestCase):
+class FFTTest(xla_test.XLATestCase):
def _VerifyFftMethod(self, inner_dims, complex_to_input, input_to_expected,
tf_method):
@@ -97,8 +98,11 @@ class FFTTest(XLATestCase):
ph = array_ops.placeholder(
dtypes.as_dtype(data.dtype), shape=data.shape)
out = signal.stft(ph, ws, hs)
+ grad = gradients_impl.gradients(out, ph,
+ grad_ys=array_ops.ones_like(out))
- value = sess.run(out, {ph: data})
+ # For gradients, we simply verify that they compile & execute.
+ value, _ = sess.run([out, grad], {ph: data})
self.assertAllClose(expected, value, rtol=RTOL, atol=ATOL)
def testFFT(self):
diff --git a/tensorflow/compiler/tests/fifo_queue_test.py b/tensorflow/compiler/tests/fifo_queue_test.py
new file mode 100644
index 0000000000..0f64cc87cd
--- /dev/null
+++ b/tensorflow/compiler/tests/fifo_queue_test.py
@@ -0,0 +1,201 @@
+# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""Tests for tensorflow.ops.data_flow_ops.FIFOQueue."""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import time
+
+from six.moves import xrange # pylint: disable=redefined-builtin
+
+from tensorflow.compiler.tests import xla_test
+from tensorflow.python.framework import dtypes as dtypes_lib
+from tensorflow.python.ops import data_flow_ops
+from tensorflow.python.platform import test
+
+
+class FIFOQueueTest(xla_test.XLATestCase):
+
+ def testEnqueue(self):
+ with self.test_session(), self.test_scope():
+ q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32)
+ enqueue_op = q.enqueue((10.0,))
+ enqueue_op.run()
+
+ def testEnqueueWithShape(self):
+ with self.test_session(), self.test_scope():
+ q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32, shapes=(3, 2))
+ enqueue_correct_op = q.enqueue(([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]],))
+ enqueue_correct_op.run()
+ with self.assertRaises(ValueError):
+ q.enqueue(([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]],))
+ self.assertEqual(1, q.size().eval())
+
+ def testMultipleDequeues(self):
+ with self.test_session(), self.test_scope():
+ q = data_flow_ops.FIFOQueue(10, [dtypes_lib.int32], shapes=[()])
+ self.evaluate(q.enqueue([1]))
+ self.evaluate(q.enqueue([2]))
+ self.evaluate(q.enqueue([3]))
+ a, b, c = self.evaluate([q.dequeue(), q.dequeue(), q.dequeue()])
+ self.assertAllEqual(set([1, 2, 3]), set([a, b, c]))
+
+ def testQueuesDontShare(self):
+ with self.test_session(), self.test_scope():
+ q = data_flow_ops.FIFOQueue(10, [dtypes_lib.int32], shapes=[()])
+ self.evaluate(q.enqueue(1))
+ q2 = data_flow_ops.FIFOQueue(10, [dtypes_lib.int32], shapes=[()])
+ self.evaluate(q2.enqueue(2))
+ self.assertAllEqual(self.evaluate(q2.dequeue()), 2)
+ self.assertAllEqual(self.evaluate(q.dequeue()), 1)
+
+ def testEnqueueDictWithoutNames(self):
+ with self.test_session(), self.test_scope():
+ q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32)
+ with self.assertRaisesRegexp(ValueError, "must have names"):
+ q.enqueue({"a": 12.0})
+
+ def testParallelEnqueue(self):
+ with self.test_session() as sess, self.test_scope():
+ q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32)
+ elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
+ enqueue_ops = [q.enqueue((x,)) for x in elems]
+ dequeued_t = q.dequeue()
+
+ # Run one producer thread for each element in elems.
+ def enqueue(enqueue_op):
+ sess.run(enqueue_op)
+
+ threads = [
+ self.checkedThread(target=enqueue, args=(e,)) for e in enqueue_ops
+ ]
+ for thread in threads:
+ thread.start()
+ for thread in threads:
+ thread.join()
+
+ # Dequeue every element using a single thread.
+ results = []
+ for _ in xrange(len(elems)):
+ results.append(dequeued_t.eval())
+ self.assertItemsEqual(elems, results)
+
+ def testParallelDequeue(self):
+ with self.test_session() as sess, self.test_scope():
+ q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32)
+ elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
+ enqueue_ops = [q.enqueue((x,)) for x in elems]
+ dequeued_t = q.dequeue()
+
+ # Enqueue every element using a single thread.
+ for enqueue_op in enqueue_ops:
+ enqueue_op.run()
+
+ # Run one consumer thread for each element in elems.
+ results = []
+
+ def dequeue():
+ results.append(sess.run(dequeued_t))
+
+ threads = [self.checkedThread(target=dequeue) for _ in enqueue_ops]
+ for thread in threads:
+ thread.start()
+ for thread in threads:
+ thread.join()
+ self.assertItemsEqual(elems, results)
+
+ def testDequeue(self):
+ with self.test_session(), self.test_scope():
+ q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32)
+ elems = [10.0, 20.0, 30.0]
+ enqueue_ops = [q.enqueue((x,)) for x in elems]
+ dequeued_t = q.dequeue()
+
+ for enqueue_op in enqueue_ops:
+ enqueue_op.run()
+
+ for i in xrange(len(elems)):
+ vals = dequeued_t.eval()
+ self.assertEqual([elems[i]], vals)
+
+ def testEnqueueAndBlockingDequeue(self):
+ with self.test_session() as sess, self.test_scope():
+ q = data_flow_ops.FIFOQueue(3, dtypes_lib.float32)
+ elems = [10.0, 20.0, 30.0]
+ enqueue_ops = [q.enqueue((x,)) for x in elems]
+ dequeued_t = q.dequeue()
+
+ def enqueue():
+ # The enqueue_ops should run after the dequeue op has blocked.
+ # TODO(mrry): Figure out how to do this without sleeping.
+ time.sleep(0.1)
+ for enqueue_op in enqueue_ops:
+ sess.run(enqueue_op)
+
+ results = []
+
+ def dequeue():
+ for _ in xrange(len(elems)):
+ results.append(sess.run(dequeued_t))
+
+ enqueue_thread = self.checkedThread(target=enqueue)
+ dequeue_thread = self.checkedThread(target=dequeue)
+ enqueue_thread.start()
+ dequeue_thread.start()
+ enqueue_thread.join()
+ dequeue_thread.join()
+
+ for elem, result in zip(elems, results):
+ self.assertEqual([elem], result)
+
+ def testMultiEnqueueAndDequeue(self):
+ with self.test_session() as sess, self.test_scope():
+ q = data_flow_ops.FIFOQueue(10, (dtypes_lib.int32, dtypes_lib.float32))
+ elems = [(5, 10.0), (10, 20.0), (15, 30.0)]
+ enqueue_ops = [q.enqueue((x, y)) for x, y in elems]
+ dequeued_t = q.dequeue()
+
+ for enqueue_op in enqueue_ops:
+ enqueue_op.run()
+
+ for i in xrange(len(elems)):
+ x_val, y_val = sess.run(dequeued_t)
+ x, y = elems[i]
+ self.assertEqual([x], x_val)
+ self.assertEqual([y], y_val)
+
+ def testQueueSizeEmpty(self):
+ with self.test_session(), self.test_scope():
+ q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32)
+ self.assertEqual([0], q.size().eval())
+
+ def testQueueSizeAfterEnqueueAndDequeue(self):
+ with self.test_session(), self.test_scope():
+ q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32)
+ enqueue_op = q.enqueue((10.0,))
+ dequeued_t = q.dequeue()
+ size = q.size()
+ self.assertEqual([], size.get_shape())
+
+ enqueue_op.run()
+ self.assertEqual(1, size.eval())
+ dequeued_t.op.run()
+ self.assertEqual(0, size.eval())
+
+
+if __name__ == "__main__":
+ test.main()
diff --git a/tensorflow/compiler/tests/ftrl_test.py b/tensorflow/compiler/tests/ftrl_test.py
index 8e6407dffd..1da97fd512 100644
--- a/tensorflow/compiler/tests/ftrl_test.py
+++ b/tensorflow/compiler/tests/ftrl_test.py
@@ -20,7 +20,7 @@ from __future__ import print_function
import numpy as np
-from tensorflow.compiler.tests.xla_test import XLATestCase
+from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables
@@ -30,7 +30,7 @@ from tensorflow.python.training import ftrl
from tensorflow.python.training import gradient_descent
-class FtrlOptimizerTest(XLATestCase):
+class FtrlOptimizerTest(xla_test.XLATestCase):
def initVariableAndGradient(self, dtype):
var0 = resource_variable_ops.ResourceVariable([0.0, 0.0], dtype=dtype)
diff --git a/tensorflow/compiler/tests/function_test.py b/tensorflow/compiler/tests/function_test.py
index 8a3f4b0bdc..04fba44446 100644
--- a/tensorflow/compiler/tests/function_test.py
+++ b/tensorflow/compiler/tests/function_test.py
@@ -20,7 +20,7 @@ from __future__ import print_function
import numpy as np
-from tensorflow.compiler.tests.xla_test import XLATestCase
+from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import function
@@ -28,7 +28,7 @@ from tensorflow.python.ops import array_ops
from tensorflow.python.platform import googletest
-class FunctionTest(XLATestCase):
+class FunctionTest(xla_test.XLATestCase):
def testFunction(self):
"""Executes a simple TensorFlow function."""
diff --git a/tensorflow/compiler/tests/fused_batchnorm_test.py b/tensorflow/compiler/tests/fused_batchnorm_test.py
index 34cca512d4..132e42ac7a 100644
--- a/tensorflow/compiler/tests/fused_batchnorm_test.py
+++ b/tensorflow/compiler/tests/fused_batchnorm_test.py
@@ -22,7 +22,7 @@ from absl.testing import parameterized
import numpy as np
from tensorflow.compiler.tests import test_utils
-from tensorflow.compiler.tests.xla_test import XLATestCase
+from tensorflow.compiler.tests import xla_test
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_nn_ops
from tensorflow.python.ops import gradient_checker
@@ -30,7 +30,7 @@ from tensorflow.python.ops import nn
from tensorflow.python.platform import test
-class FusedBatchNormTest(XLATestCase, parameterized.TestCase):
+class FusedBatchNormTest(xla_test.XLATestCase, parameterized.TestCase):
def _reference_training(self, x, scale, offset, epsilon, data_format):
if data_format != "NHWC":
@@ -126,10 +126,6 @@ class FusedBatchNormTest(XLATestCase, parameterized.TestCase):
y_ref, mean_ref, var_ref = self._reference_training(
x_val, scale_val, offset_val, epsilon, data_format_src)
- # TODO(b/110530713): Support data format HWCN on GPU
- if self.device == "XLA_GPU" and data_format == "HWCN":
- self.skipTest("GPU does not support data format HWCN.")
-
with self.test_session() as sess, self.test_scope():
# To avoid constant folding
x_val_converted = test_utils.ConvertBetweenDataFormats(
@@ -214,10 +210,6 @@ class FusedBatchNormTest(XLATestCase, parameterized.TestCase):
grad_x_ref, grad_scale_ref, grad_offset_ref = self._reference_grad(
x_val, grad_val, scale_val, mean_val, var_val, epsilon, data_format_src)
- # TODO(b/110530713): Support data format HWCN on GPU
- if self.device == "XLA_GPU" and data_format == "HWCN":
- self.skipTest("GPU does not support data format HWCN.")
-
with self.test_session() as sess, self.test_scope():
grad_val_converted = test_utils.ConvertBetweenDataFormats(
grad_val, data_format_src, data_format)
@@ -268,10 +260,6 @@ class FusedBatchNormTest(XLATestCase, parameterized.TestCase):
var_val = np.random.random_sample(scale_shape).astype(np.float32)
data_format_src = "NHWC"
- # TODO(b/110530713): Support data format HWCN on GPU
- if self.device == "XLA_GPU" and data_format == "HWCN":
- self.skipTest("GPU does not support data format HWCN.")
-
with self.test_session() as sess, self.test_scope():
grad_val_converted = test_utils.ConvertBetweenDataFormats(
grad_val, data_format_src, data_format)
diff --git a/tensorflow/compiler/tests/gather_nd_op_test.py b/tensorflow/compiler/tests/gather_nd_op_test.py
index 9378b1db72..23b0aed34f 100644
--- a/tensorflow/compiler/tests/gather_nd_op_test.py
+++ b/tensorflow/compiler/tests/gather_nd_op_test.py
@@ -20,13 +20,13 @@ from __future__ import print_function
import numpy as np
-from tensorflow.compiler.tests.xla_test import XLATestCase
+from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import errors
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
-class GatherNdTest(XLATestCase):
+class GatherNdTest(xla_test.XLATestCase):
def _runGather(self, params, indices):
with self.test_session():
diff --git a/tensorflow/compiler/tests/gather_test.py b/tensorflow/compiler/tests/gather_test.py
index 1a8c451911..e9c8ef7c91 100644
--- a/tensorflow/compiler/tests/gather_test.py
+++ b/tensorflow/compiler/tests/gather_test.py
@@ -136,6 +136,20 @@ class GatherTest(xla_test.XLATestCase):
self.assertAllEqual(
[[7]], gather.eval(feed_dict={params: [4, 7, 2], indices: [[1]]}))
+ def testGatherPrecision(self):
+ with self.test_session() as session, self.test_scope():
+ data = np.array([[0, 0, 0, 0], [0, 2 * (1 + np.exp2(-8)), 0, 0],
+ [0, 0, 0, 0], [0.015789, 0.0985, 0.55789, 0.3842]])
+ indices = np.array([1, 2, 3, 1])
+ dtype = dtypes.float32
+ params_np = self._buildParams(data, dtype)
+ params = array_ops.placeholder(dtype=dtype)
+ indices_tf = constant_op.constant(indices)
+ gather_t = array_ops.gather(params, indices_tf)
+ gather_val = session.run(gather_t, feed_dict={params: params_np})
+ np_val = params_np[indices]
+ self.assertAllEqual(np_val, gather_val)
+
class GatherBenchmark(test.Benchmark):
"""Microbenchmarks for the gather op."""
diff --git a/tensorflow/compiler/tests/image_ops_test.py b/tensorflow/compiler/tests/image_ops_test.py
index 7cf953ef25..8b01ef96db 100644
--- a/tensorflow/compiler/tests/image_ops_test.py
+++ b/tensorflow/compiler/tests/image_ops_test.py
@@ -25,7 +25,7 @@ import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
-from tensorflow.compiler.tests.xla_test import XLATestCase
+from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
@@ -41,7 +41,7 @@ def GenerateNumpyRandomRGB(shape):
return np.random.randint(0, 256, shape) / 256.
-class RGBToHSVTest(XLATestCase):
+class RGBToHSVTest(xla_test.XLATestCase):
def testBatch(self):
# Build an arbitrary RGB image
@@ -104,7 +104,7 @@ class RGBToHSVTest(XLATestCase):
self.assertAllCloseAccordingToType(hsv_tf, hsv_np)
-class AdjustContrastTest(XLATestCase):
+class AdjustContrastTest(xla_test.XLATestCase):
def _testContrast(self, x_np, y_np, contrast_factor):
with self.test_session():
@@ -168,7 +168,7 @@ class AdjustContrastTest(XLATestCase):
self.assertAllClose(y_tf, y_np, rtol=1e-5, atol=1e-5)
-class AdjustHueTest(XLATestCase):
+class AdjustHueTest(xla_test.XLATestCase):
def testAdjustNegativeHue(self):
x_shape = [2, 2, 3]
@@ -303,7 +303,7 @@ class AdjustHueTest(XLATestCase):
self._adjustHueTf(x_np, delta_h)
-class AdjustSaturationTest(XLATestCase):
+class AdjustSaturationTest(xla_test.XLATestCase):
def _adjust_saturation(self, image, saturation_factor):
image = ops.convert_to_tensor(image, name="image")
@@ -403,7 +403,7 @@ class AdjustSaturationTest(XLATestCase):
self.assertAllClose(y_fused, y_baseline, rtol=2e-5, atol=1e-5)
-class ResizeBilinearTest(XLATestCase):
+class ResizeBilinearTest(xla_test.XLATestCase):
def _assertForwardOpMatchesExpected(self,
image_np,
diff --git a/tensorflow/compiler/tests/lrn_ops_test.py b/tensorflow/compiler/tests/lrn_ops_test.py
index 69bd8f7230..253b45902f 100644
--- a/tensorflow/compiler/tests/lrn_ops_test.py
+++ b/tensorflow/compiler/tests/lrn_ops_test.py
@@ -22,7 +22,7 @@ import copy
import numpy as np
-from tensorflow.compiler.tests.xla_test import XLATestCase
+from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
@@ -36,7 +36,7 @@ CPU_DEVICE = "/job:localhost/replica:0/task:0/cpu:0"
# Local response normalization tests. The forward tests are copied from
# tensorflow/python/kernel_tests/lrn_op_test.py
-class LRNTest(XLATestCase):
+class LRNTest(xla_test.XLATestCase):
def _LRN(self, input_image, lrn_depth_radius=5, bias=1.0, alpha=1.0,
beta=0.5):
diff --git a/tensorflow/compiler/tests/matrix_band_part_test.py b/tensorflow/compiler/tests/matrix_band_part_test.py
index 29394f9ea5..0d9f99f8a6 100644
--- a/tensorflow/compiler/tests/matrix_band_part_test.py
+++ b/tensorflow/compiler/tests/matrix_band_part_test.py
@@ -19,14 +19,14 @@ from __future__ import print_function
import numpy as np
-from tensorflow.compiler.tests.xla_test import XLATestCase
+from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
-class MatrixBandPartTest(XLATestCase):
+class MatrixBandPartTest(xla_test.XLATestCase):
def _testMatrixBandPart(self, dtype, shape):
with self.test_session():
diff --git a/tensorflow/compiler/tests/matrix_triangular_solve_op_test.py b/tensorflow/compiler/tests/matrix_triangular_solve_op_test.py
index 5819b2bf2b..2bb8a97bda 100644
--- a/tensorflow/compiler/tests/matrix_triangular_solve_op_test.py
+++ b/tensorflow/compiler/tests/matrix_triangular_solve_op_test.py
@@ -22,7 +22,7 @@ import itertools
import numpy as np
-from tensorflow.compiler.tests.xla_test import XLATestCase
+from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
@@ -35,7 +35,7 @@ def MakePlaceholder(x):
return array_ops.placeholder(dtypes.as_dtype(x.dtype), shape=x.shape)
-class MatrixTriangularSolveOpTest(XLATestCase):
+class MatrixTriangularSolveOpTest(xla_test.XLATestCase):
# MatrixTriangularSolve defined for float64, float32, complex64, complex128
# (https://www.tensorflow.org/api_docs/python/tf/matrix_triangular_solve)
diff --git a/tensorflow/compiler/tests/momentum_test.py b/tensorflow/compiler/tests/momentum_test.py
index af9394e7d7..c2592c54cf 100644
--- a/tensorflow/compiler/tests/momentum_test.py
+++ b/tensorflow/compiler/tests/momentum_test.py
@@ -20,7 +20,7 @@ from __future__ import print_function
import numpy as np
-from tensorflow.compiler.tests.xla_test import XLATestCase
+from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
@@ -30,7 +30,7 @@ from tensorflow.python.platform import test
from tensorflow.python.training import momentum as momentum_lib
-class MomentumOptimizerTest(XLATestCase):
+class MomentumOptimizerTest(xla_test.XLATestCase):
def _update_nesterov_momentum_numpy(self, var, accum, g, lr, momentum):
var += accum * lr * momentum
diff --git a/tensorflow/compiler/tests/nary_ops_test.py b/tensorflow/compiler/tests/nary_ops_test.py
index e4843b169b..da08225e9f 100644
--- a/tensorflow/compiler/tests/nary_ops_test.py
+++ b/tensorflow/compiler/tests/nary_ops_test.py
@@ -22,14 +22,14 @@ import unittest
import numpy as np
-from tensorflow.compiler.tests.xla_test import XLATestCase
+from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import googletest
-class NAryOpsTest(XLATestCase):
+class NAryOpsTest(xla_test.XLATestCase):
def _testNAry(self, op, args, expected, equality_fn=None):
with self.test_session() as session:
diff --git a/tensorflow/compiler/tests/nullary_ops_test.py b/tensorflow/compiler/tests/nullary_ops_test.py
index 6f588d8ab5..2f9122645d 100644
--- a/tensorflow/compiler/tests/nullary_ops_test.py
+++ b/tensorflow/compiler/tests/nullary_ops_test.py
@@ -20,13 +20,13 @@ from __future__ import print_function
import numpy as np
-from tensorflow.compiler.tests.xla_test import XLATestCase
+from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.platform import googletest
-class NullaryOpsTest(XLATestCase):
+class NullaryOpsTest(xla_test.XLATestCase):
def _testNullary(self, op, expected):
with self.test_session() as session:
diff --git a/tensorflow/compiler/tests/placeholder_test.py b/tensorflow/compiler/tests/placeholder_test.py
index 5e6d1313bd..a75d99189b 100644
--- a/tensorflow/compiler/tests/placeholder_test.py
+++ b/tensorflow/compiler/tests/placeholder_test.py
@@ -18,14 +18,14 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
-from tensorflow.compiler.tests.xla_test import XLATestCase
+from tensorflow.compiler.tests import xla_test
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
-class PlaceholderTest(XLATestCase):
+class PlaceholderTest(xla_test.XLATestCase):
def test_placeholder_with_default_default(self):
with self.test_session() as sess, self.test_scope():
diff --git a/tensorflow/compiler/tests/pooling_ops_3d_test.py b/tensorflow/compiler/tests/pooling_ops_3d_test.py
index d9285186ba..17f860db61 100644
--- a/tensorflow/compiler/tests/pooling_ops_3d_test.py
+++ b/tensorflow/compiler/tests/pooling_ops_3d_test.py
@@ -20,7 +20,7 @@ from __future__ import print_function
import numpy as np
-from tensorflow.compiler.tests.xla_test import XLATestCase
+from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
@@ -41,7 +41,7 @@ def _AvgPoolGrad(inputs, outputs, output_gradients, ksize, strides, padding):
padding=padding)
-class Pooling3DTest(XLATestCase):
+class Pooling3DTest(xla_test.XLATestCase):
def _VerifyValues(self, pool_func, input_sizes, window, strides, padding,
expected):
diff --git a/tensorflow/compiler/tests/pooling_ops_test.py b/tensorflow/compiler/tests/pooling_ops_test.py
index fe270af3d6..9fc94752ea 100644
--- a/tensorflow/compiler/tests/pooling_ops_test.py
+++ b/tensorflow/compiler/tests/pooling_ops_test.py
@@ -20,7 +20,7 @@ from __future__ import print_function
import numpy as np
-from tensorflow.compiler.tests.xla_test import XLATestCase
+from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
@@ -69,7 +69,7 @@ def GetTestConfigs():
return test_configs
-class PoolingTest(XLATestCase):
+class PoolingTest(xla_test.XLATestCase):
def _VerifyOneTest(self, pool_func, input_sizes, ksize, strides, padding,
data_format, expected):
@@ -288,7 +288,7 @@ class PoolingTest(XLATestCase):
expected=expected_output)
-class PoolGradTest(XLATestCase):
+class PoolGradTest(xla_test.XLATestCase):
CPU_DEVICE = "/job:localhost/replica:0/task:0/cpu:0"
diff --git a/tensorflow/compiler/tests/powersign_test.py b/tensorflow/compiler/tests/powersign_test.py
new file mode 100644
index 0000000000..5fa7706d72
--- /dev/null
+++ b/tensorflow/compiler/tests/powersign_test.py
@@ -0,0 +1,142 @@
+# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""Tests for PowerSign."""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import math
+import numpy as np
+
+from tensorflow.compiler.tests import xla_test
+from tensorflow.contrib.opt.python.training import powersign
+from tensorflow.contrib.opt.python.training import sign_decay
+from tensorflow.python.framework import constant_op
+from tensorflow.python.ops import resource_variable_ops
+from tensorflow.python.ops import variables
+from tensorflow.python.platform import test
+
+
+def py_linear_decay_fn(decay_steps):
+ def linear_decay(step):
+ step = min(step, decay_steps)
+ return float(decay_steps - step) / decay_steps
+ return linear_decay
+
+
+def powersign_update_numpy(params,
+ g_t,
+ m,
+ lr,
+ base=math.e,
+ beta=0.9,
+ py_sign_decay_fn=None,
+ t=None):
+ m_t = beta * m + (1 - beta) * g_t
+ if py_sign_decay_fn is None:
+ sign_decayed = 1.0
+ else:
+ sign_decayed = py_sign_decay_fn(t-1)
+ multiplier = base ** (sign_decayed * np.sign(g_t) * np.sign(m_t))
+ params_t = params - lr * multiplier * g_t
+ return params_t, m_t
+
+
+class PowerSignTest(xla_test.XLATestCase):
+
+ def _testDense(self,
+ learning_rate=0.1,
+ sign_decay_fn=None,
+ py_sign_decay_fn=None,
+ base=math.e,
+ beta=0.9):
+ for dtype in self.float_types:
+ with self.test_session(), self.test_scope():
+ # Initialize variables for numpy implementation.
+ m0, m1 = 0.0, 0.0
+ var0_np = np.array([1.0, 2.0], dtype=dtype)
+ grads0_np = np.array([0.1, 0.1], dtype=dtype)
+ var1_np = np.array([3.0, 4.0], dtype=dtype)
+ grads1_np = np.array([0.01, 0.01], dtype=dtype)
+
+ var0 = resource_variable_ops.ResourceVariable(var0_np)
+ var1 = resource_variable_ops.ResourceVariable(var1_np)
+ global_step = resource_variable_ops.ResourceVariable(0, trainable=False)
+ grads0 = constant_op.constant(grads0_np)
+ grads1 = constant_op.constant(grads1_np)
+
+ opt = powersign.PowerSignOptimizer(
+ learning_rate=learning_rate,
+ base=base,
+ beta=beta,
+ sign_decay_fn=sign_decay_fn,
+ )
+ update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]),
+ global_step=global_step)
+ neg_update = opt.apply_gradients(zip([-grads0, -grads1], [var0, var1]),
+ global_step=global_step)
+
+ variables.global_variables_initializer().run()
+ # Fetch params to validate initial values
+ self.assertAllClose([1.0, 2.0], var0.eval())
+ self.assertAllClose([3.0, 4.0], var1.eval())
+
+ # Run 7 steps of powersign
+ # first 4 steps with positive gradient
+ # last 3 steps with negative gradient (sign(gm) should be -1)
+ for t in range(1, 8):
+ if t < 5:
+ update.run()
+ else:
+ neg_update.run()
+
+ var0_np, m0 = powersign_update_numpy(
+ var0_np,
+ grads0_np if t < 5 else -grads0_np,
+ m0,
+ learning_rate,
+ base=base,
+ beta=beta,
+ py_sign_decay_fn=py_sign_decay_fn,
+ t=t,
+ )
+ var1_np, m1 = powersign_update_numpy(
+ var1_np,
+ grads1_np if t < 5 else -grads1_np,
+ m1,
+ learning_rate,
+ base=base,
+ beta=beta,
+ py_sign_decay_fn=py_sign_decay_fn,
+ t=t,
+ )
+
+ # Validate updated params
+ self.assertAllCloseAccordingToType(var0_np, var0.eval())
+ self.assertAllCloseAccordingToType(var1_np, var1.eval())
+
+ def testDense(self):
+ decay_steps = 10
+ sign_decay_fn = sign_decay.get_linear_decay_fn(decay_steps)
+ py_sign_decay_fn = py_linear_decay_fn(decay_steps)
+ self._testDense()
+ self._testDense(learning_rate=0.1, base=10.0, beta=0.8)
+ self._testDense(
+ sign_decay_fn=sign_decay_fn, py_sign_decay_fn=py_sign_decay_fn)
+
+
+if __name__ == '__main__':
+ test.main()
diff --git a/tensorflow/compiler/tests/proximal_adagrad_test.py b/tensorflow/compiler/tests/proximal_adagrad_test.py
new file mode 100644
index 0000000000..cde87db63d
--- /dev/null
+++ b/tensorflow/compiler/tests/proximal_adagrad_test.py
@@ -0,0 +1,172 @@
+# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""Tests for Proximal Adagrad optimizer."""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import numpy as np
+
+from tensorflow.compiler.tests import xla_test
+from tensorflow.python.framework import constant_op
+from tensorflow.python.ops import resource_variable_ops
+from tensorflow.python.ops import variables
+from tensorflow.python.platform import test
+from tensorflow.python.training import adagrad
+from tensorflow.python.training import proximal_adagrad
+
+
+class ProximalAdagradOptimizerTest(xla_test.XLATestCase):
+
+ def testResourceProximalAdagradwithoutRegularization(self):
+ with self.test_session(), self.test_scope():
+ var0 = resource_variable_ops.ResourceVariable([0.0, 0.0])
+ var1 = resource_variable_ops.ResourceVariable([0.0, 0.0])
+ grads0 = constant_op.constant([0.1, 0.2])
+ grads1 = constant_op.constant([0.01, 0.02])
+ opt = proximal_adagrad.ProximalAdagradOptimizer(
+ 3.0,
+ initial_accumulator_value=0.1,
+ l1_regularization_strength=0.0,
+ l2_regularization_strength=0.0)
+ update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
+ variables.global_variables_initializer().run()
+
+ self.assertAllClose([0.0, 0.0], var0.eval())
+ self.assertAllClose([0.0, 0.0], var1.eval())
+
+ # Run 3 steps Proximal Adagrad.
+ for _ in range(3):
+ update.run()
+
+ self.assertAllClose(np.array([-2.60260963, -4.29698515]), var0.eval())
+ self.assertAllClose(np.array([-0.28432083, -0.56694895]), var1.eval())
+ opt_vars = opt.variables()
+ self.assertStartsWith(opt_vars[0].name, var0._shared_name)
+ self.assertStartsWith(opt_vars[1].name, var1._shared_name)
+ self.assertEqual(2, len(opt_vars))
+
+ def testProximalAdagradwithoutRegularization2(self):
+ with self.test_session(), self.test_scope():
+ var0 = resource_variable_ops.ResourceVariable([1.0, 2.0])
+ var1 = resource_variable_ops.ResourceVariable([4.0, 3.0])
+ grads0 = constant_op.constant([0.1, 0.2])
+ grads1 = constant_op.constant([0.01, 0.02])
+
+ opt = proximal_adagrad.ProximalAdagradOptimizer(
+ 3.0,
+ initial_accumulator_value=0.1,
+ l1_regularization_strength=0.0,
+ l2_regularization_strength=0.0)
+ update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
+ variables.global_variables_initializer().run()
+
+ self.assertAllClose([1.0, 2.0], var0.eval())
+ self.assertAllClose([4.0, 3.0], var1.eval())
+
+ # Run 3 steps Proximal Adagrad.
+ for _ in range(3):
+ update.run()
+ self.assertAllClose(np.array([-1.60261, -2.296985]), var0.eval())
+ self.assertAllClose(np.array([3.715679, 2.433051]), var1.eval())
+
+ def testProximalAdagradWithL1(self):
+ with self.test_session(), self.test_scope():
+ var0 = resource_variable_ops.ResourceVariable([1.0, 2.0])
+ var1 = resource_variable_ops.ResourceVariable([4.0, 3.0])
+ grads0 = constant_op.constant([0.1, 0.2])
+ grads1 = constant_op.constant([0.01, 0.02])
+
+ opt = proximal_adagrad.ProximalAdagradOptimizer(
+ 3.0,
+ initial_accumulator_value=0.1,
+ l1_regularization_strength=0.001,
+ l2_regularization_strength=0.0)
+ update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
+ variables.global_variables_initializer().run()
+
+ self.assertAllClose([1.0, 2.0], var0.eval())
+ self.assertAllClose([4.0, 3.0], var1.eval())
+
+ # Run 10 steps Proximal Adagrad
+ for _ in range(10):
+ update.run()
+ self.assertAllClose(np.array([-6.663634, -9.190331]), var0.eval())
+ self.assertAllClose(np.array([2.959304, 1.029232]), var1.eval())
+
+ def testProximalAdagradWithL1_L2(self):
+ with self.test_session(), self.test_scope():
+ var0 = resource_variable_ops.ResourceVariable([1.0, 2.0])
+ var1 = resource_variable_ops.ResourceVariable([4.0, 3.0])
+ grads0 = constant_op.constant([0.1, 0.2])
+ grads1 = constant_op.constant([0.01, 0.02])
+
+ opt = proximal_adagrad.ProximalAdagradOptimizer(
+ 3.0,
+ initial_accumulator_value=0.1,
+ l1_regularization_strength=0.001,
+ l2_regularization_strength=2.0)
+ update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
+ variables.global_variables_initializer().run()
+
+ self.assertAllClose([1.0, 2.0], var0.eval())
+ self.assertAllClose([4.0, 3.0], var1.eval())
+
+ # Run 10 steps Proximal Adagrad.
+ for _ in range(10):
+ update.run()
+
+ self.assertAllClose(np.array([-0.0495, -0.0995]), var0.eval())
+ self.assertAllClose(np.array([-0.0045, -0.0095]), var1.eval())
+
+ def applyOptimizer(self, opt, steps=5):
+ var0 = resource_variable_ops.ResourceVariable([1.0, 2.0])
+ var1 = resource_variable_ops.ResourceVariable([3.0, 4.0])
+ grads0 = constant_op.constant([0.1, 0.2])
+ grads1 = constant_op.constant([0.01, 0.02])
+
+ update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
+ variables.global_variables_initializer().run()
+
+ self.assertAllClose([1.0, 2.0], var0.eval())
+ self.assertAllClose([3.0, 4.0], var1.eval())
+
+ # Run ProximalAdagrad for a few steps
+ for _ in range(steps):
+ update.run()
+
+ return var0.eval(), var1.eval()
+
+ def testEquivAdagradwithoutRegularization(self):
+ with self.test_session(), self.test_scope():
+ val0, val1 = self.applyOptimizer(
+ proximal_adagrad.ProximalAdagradOptimizer(
+ 3.0,
+ initial_accumulator_value=0.1,
+ l1_regularization_strength=0.0,
+ l2_regularization_strength=0.0))
+
+ with self.test_session(), self.test_scope():
+ val2, val3 = self.applyOptimizer(
+ adagrad.AdagradOptimizer(
+ 3.0, initial_accumulator_value=0.1))
+
+ self.assertAllClose(val0, val2)
+ self.assertAllClose(val1, val3)
+
+
+if __name__ == "__main__":
+ test.main()
diff --git a/tensorflow/compiler/tests/proximal_gradient_descent_test.py b/tensorflow/compiler/tests/proximal_gradient_descent_test.py
new file mode 100644
index 0000000000..11eb768711
--- /dev/null
+++ b/tensorflow/compiler/tests/proximal_gradient_descent_test.py
@@ -0,0 +1,156 @@
+# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""Tests for Proximal Gradient Descent optimizer."""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import numpy as np
+
+from tensorflow.compiler.tests import xla_test
+from tensorflow.python.framework import constant_op
+from tensorflow.python.ops import resource_variable_ops
+from tensorflow.python.ops import variables
+from tensorflow.python.platform import test
+from tensorflow.python.training import gradient_descent
+from tensorflow.python.training import proximal_gradient_descent
+
+
+class ProximalGradientDescentOptimizerTest(xla_test.XLATestCase):
+
+ def testResourceProximalGradientDescentwithoutRegularization(self):
+ with self.test_session(), self.test_scope():
+ var0 = resource_variable_ops.ResourceVariable([0.0, 0.0])
+ var1 = resource_variable_ops.ResourceVariable([0.0, 0.0])
+ grads0 = constant_op.constant([0.1, 0.2])
+ grads1 = constant_op.constant([0.01, 0.02])
+ opt = proximal_gradient_descent.ProximalGradientDescentOptimizer(
+ 3.0, l1_regularization_strength=0.0, l2_regularization_strength=0.0)
+ update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
+ variables.global_variables_initializer().run()
+
+ self.assertAllClose([0.0, 0.0], var0.eval())
+ self.assertAllClose([0.0, 0.0], var1.eval())
+
+ # Run 3 steps Proximal Gradient Descent.
+ for _ in range(3):
+ update.run()
+
+ self.assertAllClose(np.array([-0.9, -1.8]), var0.eval())
+ self.assertAllClose(np.array([-0.09, -0.18]), var1.eval())
+
+ def testProximalGradientDescentwithoutRegularization2(self):
+ with self.test_session(), self.test_scope():
+ var0 = resource_variable_ops.ResourceVariable([1.0, 2.0])
+ var1 = resource_variable_ops.ResourceVariable([4.0, 3.0])
+ grads0 = constant_op.constant([0.1, 0.2])
+ grads1 = constant_op.constant([0.01, 0.02])
+
+ opt = proximal_gradient_descent.ProximalGradientDescentOptimizer(
+ 3.0, l1_regularization_strength=0.0, l2_regularization_strength=0.0)
+ update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
+ variables.global_variables_initializer().run()
+
+ self.assertAllClose([1.0, 2.0], var0.eval())
+ self.assertAllClose([4.0, 3.0], var1.eval())
+
+ # Run 3 steps Proximal Gradient Descent
+ for _ in range(3):
+ update.run()
+
+ self.assertAllClose(np.array([0.1, 0.2]), var0.eval())
+ self.assertAllClose(np.array([3.91, 2.82]), var1.eval())
+
+ def testProximalGradientDescentWithL1(self):
+ with self.test_session(), self.test_scope():
+ var0 = resource_variable_ops.ResourceVariable([1.0, 2.0])
+ var1 = resource_variable_ops.ResourceVariable([4.0, 3.0])
+ grads0 = constant_op.constant([0.1, 0.2])
+ grads1 = constant_op.constant([0.01, 0.02])
+
+ opt = proximal_gradient_descent.ProximalGradientDescentOptimizer(
+ 3.0, l1_regularization_strength=0.001, l2_regularization_strength=0.0)
+ update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
+ variables.global_variables_initializer().run()
+
+ self.assertAllClose([1.0, 2.0], var0.eval())
+ self.assertAllClose([4.0, 3.0], var1.eval())
+
+ # Run 10 steps proximal gradient descent.
+ for _ in range(10):
+ update.run()
+
+ self.assertAllClose(np.array([-1.988, -3.988001]), var0.eval())
+ self.assertAllClose(np.array([3.67, 2.37]), var1.eval())
+
+ def testProximalGradientDescentWithL1_L2(self):
+ with self.test_session(), self.test_scope():
+ var0 = resource_variable_ops.ResourceVariable([1.0, 2.0])
+ var1 = resource_variable_ops.ResourceVariable([4.0, 3.0])
+ grads0 = constant_op.constant([0.1, 0.2])
+ grads1 = constant_op.constant([0.01, 0.02])
+
+ opt = proximal_gradient_descent.ProximalGradientDescentOptimizer(
+ 3.0, l1_regularization_strength=0.001, l2_regularization_strength=2.0)
+ update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
+ variables.global_variables_initializer().run()
+
+ self.assertAllClose([1.0, 2.0], var0.eval())
+ self.assertAllClose([4.0, 3.0], var1.eval())
+
+ # Run 10 steps Proximal Gradient Descent
+ for _ in range(10):
+ update.run()
+
+ self.assertAllClose(np.array([-0.0495, -0.0995]), var0.eval())
+ self.assertAllClose(np.array([-0.0045, -0.0095]), var1.eval())
+
+ def applyOptimizer(self, opt, steps=5):
+ var0 = resource_variable_ops.ResourceVariable([1.0, 2.0])
+ var1 = resource_variable_ops.ResourceVariable([3.0, 4.0])
+ grads0 = constant_op.constant([0.1, 0.2])
+ grads1 = constant_op.constant([0.01, 0.02])
+
+ update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
+ variables.global_variables_initializer().run()
+
+ self.assertAllClose([1.0, 2.0], var0.eval())
+ self.assertAllClose([3.0, 4.0], var1.eval())
+
+ # Run ProximalAdagrad for a few steps
+ for _ in range(steps):
+ update.run()
+
+ return var0.eval(), var1.eval()
+
+ def testEquivGradientDescentwithoutRegularization(self):
+ with self.test_session(), self.test_scope():
+ val0, val1 = self.applyOptimizer(
+ proximal_gradient_descent.ProximalGradientDescentOptimizer(
+ 3.0,
+ l1_regularization_strength=0.0,
+ l2_regularization_strength=0.0))
+
+ with self.test_session(), self.test_scope():
+ val2, val3 = self.applyOptimizer(
+ gradient_descent.GradientDescentOptimizer(3.0))
+
+ self.assertAllClose(val0, val2)
+ self.assertAllClose(val1, val3)
+
+
+if __name__ == "__main__":
+ test.main()
diff --git a/tensorflow/compiler/tests/qr_op_test.py b/tensorflow/compiler/tests/qr_op_test.py
new file mode 100644
index 0000000000..93752a21db
--- /dev/null
+++ b/tensorflow/compiler/tests/qr_op_test.py
@@ -0,0 +1,112 @@
+# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""Tests for tensorflow.ops.math_ops.matrix_inverse."""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import itertools
+
+from absl.testing import parameterized
+import numpy as np
+
+from tensorflow.compiler.tests import xla_test
+from tensorflow.python.ops import array_ops
+from tensorflow.python.ops import linalg_ops
+from tensorflow.python.ops import math_ops
+from tensorflow.python.platform import test
+
+
+class QrOpTest(xla_test.XLATestCase, parameterized.TestCase):
+
+ def AdjustedNorm(self, x):
+ """Computes the norm of matrices in 'x', adjusted for dimension and type."""
+ norm = np.linalg.norm(x, axis=(-2, -1))
+ return norm / (max(x.shape[-2:]) * np.finfo(x.dtype).eps)
+
+ def CompareOrthogonal(self, x, y, rank):
+ # We only compare the first 'rank' orthogonal vectors since the
+ # remainder form an arbitrary orthonormal basis for the
+ # (row- or column-) null space, whose exact value depends on
+ # implementation details. Notice that since we check that the
+ # matrices of singular vectors are unitary elsewhere, we do
+ # implicitly test that the trailing vectors of x and y span the
+ # same space.
+ x = x[..., 0:rank]
+ y = y[..., 0:rank]
+ # Q is only unique up to sign (complex phase factor for complex matrices),
+ # so we normalize the sign first.
+ sum_of_ratios = np.sum(np.divide(y, x), -2, keepdims=True)
+ phases = np.divide(sum_of_ratios, np.abs(sum_of_ratios))
+ x *= phases
+ self.assertTrue(np.all(self.AdjustedNorm(x - y) < 30.0))
+
+ def CheckApproximation(self, a, q, r):
+ # Tests that a ~= q*r.
+ precision = self.AdjustedNorm(a - np.matmul(q, r))
+ self.assertTrue(np.all(precision < 5.0))
+
+ def CheckUnitary(self, x):
+ # Tests that x[...,:,:]^H * x[...,:,:] is close to the identity.
+ xx = math_ops.matmul(x, x, adjoint_a=True)
+ identity = array_ops.matrix_band_part(array_ops.ones_like(xx), 0, 0)
+ precision = self.AdjustedNorm(xx.eval() - identity.eval())
+ self.assertTrue(np.all(precision < 5.0))
+
+ def _test(self, dtype, shape, full_matrices):
+ np.random.seed(1)
+ x_np = np.random.uniform(
+ low=-1.0, high=1.0, size=np.prod(shape)).reshape(shape).astype(dtype)
+
+ with self.test_session() as sess:
+ x_tf = array_ops.placeholder(dtype)
+ with self.test_scope():
+ q_tf, r_tf = linalg_ops.qr(x_tf, full_matrices=full_matrices)
+ q_tf_val, r_tf_val = sess.run([q_tf, r_tf], feed_dict={x_tf: x_np})
+
+ q_dims = q_tf_val.shape
+ np_q = np.ndarray(q_dims, dtype)
+ np_q_reshape = np.reshape(np_q, (-1, q_dims[-2], q_dims[-1]))
+ new_first_dim = np_q_reshape.shape[0]
+
+ x_reshape = np.reshape(x_np, (-1, x_np.shape[-2], x_np.shape[-1]))
+ for i in range(new_first_dim):
+ if full_matrices:
+ np_q_reshape[i, :, :], _ = np.linalg.qr(
+ x_reshape[i, :, :], mode="complete")
+ else:
+ np_q_reshape[i, :, :], _ = np.linalg.qr(
+ x_reshape[i, :, :], mode="reduced")
+ np_q = np.reshape(np_q_reshape, q_dims)
+ self.CompareOrthogonal(np_q, q_tf_val, min(shape[-2:]))
+ self.CheckApproximation(x_np, q_tf_val, r_tf_val)
+ self.CheckUnitary(q_tf_val)
+
+ SIZES = [1, 2, 5, 10, 32, 100, 300]
+ DTYPES = [np.float32]
+ PARAMS = itertools.product(SIZES, SIZES, DTYPES)
+
+ @parameterized.parameters(*PARAMS)
+ def testQR(self, rows, cols, dtype):
+ # TODO(b/111317468): implement full_matrices=False, test other types.
+ for full_matrices in [True]:
+ # Only tests the (3, 2) case for small numbers of rows/columns.
+ for batch_dims in [(), (3,)] + [(3, 2)] * (max(rows, cols) < 10):
+ self._test(dtype, batch_dims + (rows, cols), full_matrices)
+
+
+if __name__ == "__main__":
+ test.main()
diff --git a/tensorflow/compiler/tests/random_ops_test.py b/tensorflow/compiler/tests/random_ops_test.py
index 2e71b00ba6..14c5e7a975 100644
--- a/tensorflow/compiler/tests/random_ops_test.py
+++ b/tensorflow/compiler/tests/random_ops_test.py
@@ -22,7 +22,7 @@ import math
import numpy as np
-from tensorflow.compiler.tests.xla_test import XLATestCase
+from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
@@ -31,7 +31,7 @@ from tensorflow.python.ops.distributions import special_math
from tensorflow.python.platform import googletest
-class RandomOpsTest(XLATestCase):
+class RandomOpsTest(xla_test.XLATestCase):
"""Test cases for random-number generating operators."""
def _random_types(self):
@@ -140,10 +140,10 @@ class RandomOpsTest(XLATestCase):
def testShuffle1d(self):
with self.test_session() as sess:
with self.test_scope():
- x = math_ops.range(20)
+ x = math_ops.range(1 << 16)
shuffle = random_ops.random_shuffle(x)
result = sess.run(shuffle)
- expected = range(20)
+ expected = range(1 << 16)
# Compare sets to avoid randomness behavior changes but make sure still
# have all the values.
self.assertAllEqual(set(result), set(expected))
diff --git a/tensorflow/compiler/tests/reduce_ops_test.py b/tensorflow/compiler/tests/reduce_ops_test.py
index 7420724bdb..cea2ec816f 100644
--- a/tensorflow/compiler/tests/reduce_ops_test.py
+++ b/tensorflow/compiler/tests/reduce_ops_test.py
@@ -22,7 +22,7 @@ import functools
import itertools
import numpy as np
-from tensorflow.compiler.tests.xla_test import XLATestCase
+from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.ops import array_ops
@@ -30,7 +30,7 @@ from tensorflow.python.ops import math_ops
from tensorflow.python.platform import googletest
-class ReduceOpsTest(XLATestCase):
+class ReduceOpsTest(xla_test.XLATestCase):
def _testReduction(self,
tf_reduce_fn,
@@ -156,7 +156,7 @@ class ReduceOpsTest(XLATestCase):
self._testReduction(math_ops.reduce_any, np.any, np.bool, self.BOOL_DATA)
-class ReduceOpPrecisionTest(XLATestCase):
+class ReduceOpPrecisionTest(xla_test.XLATestCase):
def _testReduceSum(self,
expected_result,
diff --git a/tensorflow/compiler/tests/reduce_window_test.py b/tensorflow/compiler/tests/reduce_window_test.py
index e78a63465b..c69b6837b0 100644
--- a/tensorflow/compiler/tests/reduce_window_test.py
+++ b/tensorflow/compiler/tests/reduce_window_test.py
@@ -20,7 +20,7 @@ from __future__ import print_function
import numpy as np
-from tensorflow.compiler.tests.xla_test import XLATestCase
+from tensorflow.compiler.tests import xla_test
from tensorflow.compiler.tf2xla.python import xla
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import function
@@ -28,7 +28,7 @@ from tensorflow.python.ops import array_ops
from tensorflow.python.platform import googletest
-class ReduceWindowTest(XLATestCase):
+class ReduceWindowTest(xla_test.XLATestCase):
"""Test cases for xla.reduce_window."""
def _reduce_window(self, operand, init, reducer, **kwargs):
diff --git a/tensorflow/compiler/tests/reverse_ops_test.py b/tensorflow/compiler/tests/reverse_ops_test.py
index 18fabca28c..d01c676e7c 100644
--- a/tensorflow/compiler/tests/reverse_ops_test.py
+++ b/tensorflow/compiler/tests/reverse_ops_test.py
@@ -21,14 +21,14 @@ from __future__ import print_function
import itertools
import numpy as np
-from tensorflow.compiler.tests.xla_test import XLATestCase
+from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import googletest
-class ReverseOpsTest(XLATestCase):
+class ReverseOpsTest(xla_test.XLATestCase):
def testReverseOneDim(self):
shape = (7, 5, 9, 11)
diff --git a/tensorflow/compiler/tests/reverse_sequence_op_test.py b/tensorflow/compiler/tests/reverse_sequence_op_test.py
index 1a5d05094e..ccfa630016 100644
--- a/tensorflow/compiler/tests/reverse_sequence_op_test.py
+++ b/tensorflow/compiler/tests/reverse_sequence_op_test.py
@@ -20,13 +20,13 @@ from __future__ import print_function
import numpy as np
-from tensorflow.compiler.tests.xla_test import XLATestCase
+from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
-class ReverseSequenceTest(XLATestCase):
+class ReverseSequenceTest(xla_test.XLATestCase):
def _testReverseSequence(self,
x,
diff --git a/tensorflow/compiler/tests/rmsprop_test.py b/tensorflow/compiler/tests/rmsprop_test.py
index ecdce4f052..ff8bbac911 100644
--- a/tensorflow/compiler/tests/rmsprop_test.py
+++ b/tensorflow/compiler/tests/rmsprop_test.py
@@ -20,7 +20,7 @@ from __future__ import print_function
import numpy as np
-from tensorflow.compiler.tests.xla_test import XLATestCase
+from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables
@@ -28,33 +28,104 @@ from tensorflow.python.platform import test
from tensorflow.python.training import rmsprop
-class RmspropTest(XLATestCase):
+class RmspropTest(xla_test.XLATestCase):
+
+ def _rmsprop_update_numpy(self,
+ var,
+ g,
+ mg,
+ rms,
+ mom,
+ lr,
+ decay=0.9,
+ momentum=0.0,
+ epsilon=1e-10,
+ centered=False):
+ rms_t = rms * decay + (1 - decay) * g * g
+ denom_t = rms_t + epsilon
+ if centered:
+ mg_t = mg * decay + (1 - decay) * g
+ denom_t -= mg_t * mg_t
+ else:
+ mg_t = mg
+ mom_t = momentum * mom + lr * g / np.sqrt(denom_t, dtype=denom_t.dtype)
+ var_t = var - mom_t
+ return var_t, mg_t, rms_t, mom_t
def testBasic(self):
for dtype in self.float_types:
- with self.test_session(), self.test_scope():
- var0 = resource_variable_ops.ResourceVariable([1.0, 2.0], dtype=dtype)
- var1 = resource_variable_ops.ResourceVariable([3.0, 4.0], dtype=dtype)
- grads0 = constant_op.constant([0.1, 0.1], dtype=dtype)
- grads1 = constant_op.constant([0.01, 0.01], dtype=dtype)
- rms_opt = rmsprop.RMSPropOptimizer(3.0)
- rms_update = rms_opt.apply_gradients(
- zip([grads0, grads1], [var0, var1]))
- variables.global_variables_initializer().run()
-
- # Fetch params to validate initial values
- self.assertAllClose([1.0, 2.0], var0.eval())
- self.assertAllClose([3.0, 4.0], var1.eval())
-
- # Run 3 steps of RMSProp
- for _ in range(3):
- rms_update.run()
-
- # Validate updated params
- self.assertAllCloseAccordingToType(
- np.array([2.91705132e-04, 1.00029182e+00]), var0.eval())
- self.assertAllCloseAccordingToType(
- np.array([2.89990854, 3.89990854]), var1.eval())
+ for centered in [False, True]:
+ with self.test_session(), self.test_scope():
+ # Initialize variables for numpy implementation.
+ var0_np = np.array([1.0, 2.0], dtype=dtype)
+ grads0_np = np.array([0.1, 0.1], dtype=dtype)
+ var1_np = np.array([3.0, 4.0], dtype=dtype)
+ grads1_np = np.array([0.01, 0.01], dtype=dtype)
+ mg0_np = np.array([0.0, 0.0], dtype=dtype)
+ mg1_np = np.array([0.0, 0.0], dtype=dtype)
+ rms0_np = np.array([1.0, 1.0], dtype=dtype)
+ rms1_np = np.array([1.0, 1.0], dtype=dtype)
+ mom0_np = np.array([0.0, 0.0], dtype=dtype)
+ mom1_np = np.array([0.0, 0.0], dtype=dtype)
+
+ var0 = resource_variable_ops.ResourceVariable(var0_np)
+ var1 = resource_variable_ops.ResourceVariable(var1_np)
+ grads0 = constant_op.constant(grads0_np)
+ grads1 = constant_op.constant(grads1_np)
+ learning_rate = 3.0
+ rms_opt = rmsprop.RMSPropOptimizer(learning_rate, centered=centered)
+ rms_update = rms_opt.apply_gradients(
+ zip([grads0, grads1], [var0, var1]))
+ variables.global_variables_initializer().run()
+
+ mg0 = rms_opt.get_slot(var0, "mg")
+ self.assertEqual(mg0 is not None, centered)
+ mg1 = rms_opt.get_slot(var1, "mg")
+ self.assertEqual(mg1 is not None, centered)
+ rms0 = rms_opt.get_slot(var0, "rms")
+ self.assertTrue(rms0 is not None)
+ rms1 = rms_opt.get_slot(var1, "rms")
+ self.assertTrue(rms1 is not None)
+ mom0 = rms_opt.get_slot(var0, "momentum")
+ self.assertTrue(mom0 is not None)
+ mom1 = rms_opt.get_slot(var1, "momentum")
+ self.assertTrue(mom1 is not None)
+
+ # Fetch params to validate initial values
+ self.assertAllClose([1.0, 2.0], var0.eval())
+ self.assertAllClose([3.0, 4.0], var1.eval())
+
+ # Run 3 steps of RMSProp
+ for _ in range(3):
+ rms_update.run()
+
+ var0_np, mg0_np, rms0_np, mom0_np = self._rmsprop_update_numpy(
+ var0_np,
+ grads0_np,
+ mg0_np,
+ rms0_np,
+ mom0_np,
+ learning_rate,
+ centered=centered)
+ var1_np, mg1_np, rms1_np, mom1_np = self._rmsprop_update_numpy(
+ var1_np,
+ grads1_np,
+ mg1_np,
+ rms1_np,
+ mom1_np,
+ learning_rate,
+ centered=centered)
+
+ # Validate updated params
+ if centered:
+ self.assertAllCloseAccordingToType(mg0_np, mg0.eval())
+ self.assertAllCloseAccordingToType(mg1_np, mg1.eval())
+ self.assertAllCloseAccordingToType(rms0_np, rms0.eval())
+ self.assertAllCloseAccordingToType(rms1_np, rms1.eval())
+ self.assertAllCloseAccordingToType(mom0_np, mom0.eval())
+ self.assertAllCloseAccordingToType(mom1_np, mom1.eval())
+ self.assertAllCloseAccordingToType(var0_np, var0.eval())
+ self.assertAllCloseAccordingToType(var1_np, var1.eval())
if __name__ == "__main__":
diff --git a/tensorflow/compiler/tests/scan_ops_test.py b/tensorflow/compiler/tests/scan_ops_test.py
index 3260e63b23..4292352e76 100644
--- a/tensorflow/compiler/tests/scan_ops_test.py
+++ b/tensorflow/compiler/tests/scan_ops_test.py
@@ -20,7 +20,7 @@ from __future__ import print_function
import numpy as np
-from tensorflow.compiler.tests.xla_test import XLATestCase
+from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
@@ -69,7 +69,7 @@ def handle_options(func, x, axis, exclusive, reverse):
return x
-class CumsumTest(XLATestCase):
+class CumsumTest(xla_test.XLATestCase):
valid_dtypes = [np.float32]
@@ -147,7 +147,7 @@ class CumsumTest(XLATestCase):
math_ops.cumsum(input_tensor, [0]).eval()
-class CumprodTest(XLATestCase):
+class CumprodTest(xla_test.XLATestCase):
valid_dtypes = [np.float32]
diff --git a/tensorflow/compiler/tests/scatter_nd_op_test.py b/tensorflow/compiler/tests/scatter_nd_op_test.py
index 638946e234..f606f88545 100644
--- a/tensorflow/compiler/tests/scatter_nd_op_test.py
+++ b/tensorflow/compiler/tests/scatter_nd_op_test.py
@@ -22,7 +22,7 @@ import functools
import numpy as np
-from tensorflow.compiler.tests.xla_test import XLATestCase
+from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import errors
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
@@ -68,7 +68,7 @@ def _NumpyUpdate(indices, updates, shape):
return _NumpyScatterNd(ref, indices, updates, lambda p, u: u)
-class ScatterNdTest(XLATestCase):
+class ScatterNdTest(xla_test.XLATestCase):
def _VariableRankTest(self,
np_scatter,
diff --git a/tensorflow/compiler/tests/segment_reduction_ops_test.py b/tensorflow/compiler/tests/segment_reduction_ops_test.py
index 4a9c0e7471..772c20fd42 100644
--- a/tensorflow/compiler/tests/segment_reduction_ops_test.py
+++ b/tensorflow/compiler/tests/segment_reduction_ops_test.py
@@ -21,26 +21,40 @@ from __future__ import print_function
import functools
import numpy as np
-from tensorflow.compiler.tests.xla_test import XLATestCase
+from tensorflow.compiler.tests import xla_test
+from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import googletest
-class SegmentReductionOpsTest(XLATestCase):
+class SegmentReductionOpsTest(xla_test.XLATestCase):
"""Test cases for segment reduction ops."""
- def UnsortedSegmentSum(self, data, indices, num_segments):
+ def _segmentReduction(self, op, data, indices, num_segments):
with self.test_session() as sess, self.test_scope():
d = array_ops.placeholder(data.dtype, shape=data.shape)
if isinstance(indices, int):
i = array_ops.placeholder(np.int32, shape=[])
else:
i = array_ops.placeholder(indices.dtype, shape=indices.shape)
- return sess.run(
- math_ops.unsorted_segment_sum(d, i, num_segments),
- {d: data,
- i: indices})
+ return sess.run(op(d, i, num_segments), {d: data, i: indices})
+
+ def _unsortedSegmentSum(self, data, indices, num_segments):
+ return self._segmentReduction(math_ops.unsorted_segment_sum, data, indices,
+ num_segments)
+
+ def _unsortedSegmentProd(self, data, indices, num_segments):
+ return self._segmentReduction(math_ops.unsorted_segment_prod, data, indices,
+ num_segments)
+
+ def _unsortedSegmentMin(self, data, indices, num_segments):
+ return self._segmentReduction(math_ops.unsorted_segment_min, data, indices,
+ num_segments)
+
+ def _unsortedSegmentMax(self, data, indices, num_segments):
+ return self._segmentReduction(math_ops.unsorted_segment_max, data, indices,
+ num_segments)
def testUnsortedSegmentSum0DIndices1DData(self):
for dtype in self.numeric_types:
@@ -49,14 +63,14 @@ class SegmentReductionOpsTest(XLATestCase):
[[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [0, 1, 2, 3, 4, 5],
[0, 0, 0, 0, 0, 0]],
dtype=dtype),
- self.UnsortedSegmentSum(
+ self._unsortedSegmentSum(
np.array([0, 1, 2, 3, 4, 5], dtype=dtype), 2, 4))
def testUnsortedSegmentSum1DIndices1DData(self):
for dtype in self.numeric_types:
self.assertAllClose(
np.array([1, 3, 2, 9], dtype=dtype),
- self.UnsortedSegmentSum(
+ self._unsortedSegmentSum(
np.array([0, 1, 2, 3, 4, 5], dtype=dtype),
np.array([3, 0, 2, 1, 3, 3], dtype=np.int32), 4))
@@ -64,7 +78,7 @@ class SegmentReductionOpsTest(XLATestCase):
for dtype in self.numeric_types:
self.assertAllClose(
np.array([6, 3, 0, 6], dtype=dtype),
- self.UnsortedSegmentSum(
+ self._unsortedSegmentSum(
np.array([0, 1, 2, 3, 4, 5, 6], dtype=dtype),
np.array([3, -1, 0, 1, 0, -1, 3], dtype=np.int32), 4))
@@ -76,7 +90,7 @@ class SegmentReductionOpsTest(XLATestCase):
dtype=dtype)
indices = np.array([8, 1, 0, 3, 7], dtype=np.int32)
num_segments = 10
- y = self.UnsortedSegmentSum(data, indices, num_segments)
+ y = self._unsortedSegmentSum(data, indices, num_segments)
self.assertAllClose(
np.array(
[[30, 31, 32, 33], [20, 21, 22, 23], [0, 0, 0, 0],
@@ -92,7 +106,7 @@ class SegmentReductionOpsTest(XLATestCase):
dtype=dtype)
indices = np.array([0, 1, 2, 0, 1], dtype=np.int32)
num_segments = 4
- y = self.UnsortedSegmentSum(data, indices, num_segments)
+ y = self._unsortedSegmentSum(data, indices, num_segments)
self.assertAllClose(
np.array(
[[40, 42, 44, 46], [70, 72, 74, 76], [30, 31, 32, 33],
@@ -102,30 +116,30 @@ class SegmentReductionOpsTest(XLATestCase):
def testUnsortedSegmentSum2DIndices3DData(self):
for dtype in self.numeric_types:
data = np.array(
- [[[0, 1, 2], [10, 11, 12]], [[100, 101, 102], [110, 111, 112]],
- [[200, 201, 202], [210, 211, 212]], [[300, 301, 302],
- [310, 311, 312]]],
+ [[[0, 1, 2], [10, 11, 12]], [[100, 101, 102], [110, 111, 112]], [[
+ 200, 201, 202
+ ], [210, 211, 212]], [[300, 301, 302], [310, 311, 312]]],
dtype=dtype)
indices = np.array([[3, 5], [3, 1], [5, 0], [6, 2]], dtype=np.int32)
num_segments = 8
- y = self.UnsortedSegmentSum(data, indices, num_segments)
+ y = self._unsortedSegmentSum(data, indices, num_segments)
self.assertAllClose(
np.array(
- [[210, 211, 212], [110, 111, 112], [310, 311, 312],
- [100, 102, 104], [0, 0, 0.], [210, 212, 214], [300, 301,
- 302], [0, 0, 0]],
+ [[210, 211, 212], [110, 111, 112], [310, 311, 312], [
+ 100, 102, 104
+ ], [0, 0, 0.], [210, 212, 214], [300, 301, 302], [0, 0, 0]],
dtype=dtype), y)
def testUnsortedSegmentSum1DIndices3DData(self):
for dtype in self.numeric_types:
data = np.array(
- [[[0, 1, 2], [10, 11, 12]], [[100, 101, 102], [110, 111, 112]],
- [[200, 201, 202], [210, 211, 212]], [[300, 301, 302],
- [310, 311, 312]]],
+ [[[0, 1, 2], [10, 11, 12]], [[100, 101, 102], [110, 111, 112]], [[
+ 200, 201, 202
+ ], [210, 211, 212]], [[300, 301, 302], [310, 311, 312]]],
dtype=dtype)
indices = np.array([3, 0, 2, 5], dtype=np.int32)
num_segments = 6
- y = self.UnsortedSegmentSum(data, indices, num_segments)
+ y = self._unsortedSegmentSum(data, indices, num_segments)
self.assertAllClose(
np.array(
[[[100, 101, 102.], [110, 111, 112]], [[0, 0, 0], [0, 0, 0]],
@@ -138,10 +152,40 @@ class SegmentReductionOpsTest(XLATestCase):
data = np.ones((4, 8, 7), dtype=dtype)
indices = np.ones((3, 2), dtype=np.int32)
num_segments = 4
- self.assertRaises(ValueError,
- functools.partial(self.UnsortedSegmentSum, data,
- indices, num_segments))
+ self.assertRaises(
+ ValueError,
+ functools.partial(self._segmentReduction,
+ math_ops.unsorted_segment_sum, data, indices,
+ num_segments))
+
+ def testUnsortedSegmentOps1DIndices1DDataNegativeIndices(self):
+ """Tests for min, max, and prod ops.
+
+ These share most of their implementation with sum, so we only test basic
+ functionality.
+ """
+ for dtype in self.numeric_types:
+ self.assertAllClose(
+ np.array([8, 3, 1, 0], dtype=dtype),
+ self._unsortedSegmentProd(
+ np.array([0, 1, 2, 3, 4, 5, 6], dtype=dtype),
+ np.array([3, -1, 0, 1, 0, -1, 3], dtype=np.int32), 4))
+
+ for dtype in self.int_types | self.float_types:
+ minval = dtypes.as_dtype(dtype).min
+ maxval = dtypes.as_dtype(dtype).max
+
+ self.assertAllClose(
+ np.array([2, 3, maxval, 0], dtype=dtype),
+ self._unsortedSegmentMin(
+ np.array([0, 1, 2, 3, 4, 5, 6], dtype=dtype),
+ np.array([3, -1, 0, 1, 0, -1, 3], dtype=np.int32), 4))
+ self.assertAllClose(
+ np.array([4, 3, minval, 6], dtype=dtype),
+ self._unsortedSegmentMax(
+ np.array([0, 1, 2, 3, 4, 5, 6], dtype=dtype),
+ np.array([3, -1, 0, 1, 0, -1, 3], dtype=np.int32), 4))
-if __name__ == '__main__':
+if __name__ == "__main__":
googletest.main()
diff --git a/tensorflow/compiler/tests/slice_ops_test.py b/tensorflow/compiler/tests/slice_ops_test.py
index 305ca0c6b7..6c4890565d 100644
--- a/tensorflow/compiler/tests/slice_ops_test.py
+++ b/tensorflow/compiler/tests/slice_ops_test.py
@@ -18,14 +18,14 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
-from tensorflow.compiler.tests.xla_test import XLATestCase
+from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import googletest
-class SliceTest(XLATestCase):
+class SliceTest(xla_test.XLATestCase):
def test1D(self):
for dtype in self.numeric_types:
@@ -110,7 +110,7 @@ class SliceTest(XLATestCase):
self.assertAllEqual([[[1, 1, 1, 1], [6, 5, 4, 3]]], result)
-class StridedSliceTest(XLATestCase):
+class StridedSliceTest(xla_test.XLATestCase):
def test1D(self):
for dtype in self.numeric_types:
diff --git a/tensorflow/compiler/tests/sort_ops_test.py b/tensorflow/compiler/tests/sort_ops_test.py
index 8ae579abda..9e2ef964a1 100644
--- a/tensorflow/compiler/tests/sort_ops_test.py
+++ b/tensorflow/compiler/tests/sort_ops_test.py
@@ -64,20 +64,29 @@ class XlaSortOpTest(xla_test.XLATestCase):
if self.device in ["XLA_CPU", "XLA_GPU"]:
return
- # Only bfloat16 is implemented.
- bfloat16 = dtypes.bfloat16.as_numpy_dtype
- if bfloat16 in self.numeric_types:
- for x in [np.arange(20)]:
+ supported_types = set(
+ [dtypes.bfloat16.as_numpy_dtype, np.float32, np.int32, np.uint32])
+ for dtype in supported_types.intersection(self.numeric_types):
+ # Use small input size for bfloat16. Otherwise, we'll get duplicate values
+ # after conversion to bfloat16, so the possible resulting index array is
+ # no longer unique.
+ if dtype == dtypes.bfloat16.as_numpy_dtype:
+ array_size = 20
+ k_options = [0, 1, 2, 10, 20]
+ else:
+ array_size = 200 * 1000
+ k_options = [0, 1, 2, 10, 20, 100, 1000, 200 * 1000]
+ for x in [np.arange(array_size)]:
np.random.shuffle(x)
- for k in [0, 1, 2, 10, 20]:
+ for k in k_options:
indices = x.argsort()[::-1][:k]
def topk(v, k=k):
return nn_ops.top_k(v, k=k, sorted=True)
self._assertOpOutputMatchesExpected(
- topk, [x.astype(bfloat16)],
- expected=[x[indices].astype(bfloat16), indices])
+ topk, [x.astype(dtype)],
+ expected=[x[indices].astype(dtype), indices])
def testTopKZeros(self):
"""Tests that positive and negative zeros sort correctly."""
@@ -99,7 +108,7 @@ class XlaSortOpTest(xla_test.XLATestCase):
{p: np.array([0., -0., 0., 3., -0., -4., 0., -0.], dtype=bfloat16)})
self.assertAllEqual(
np.array([3., 0., 0., 0.], dtype=bfloat16), results[0])
- self.assertEqual(list([3, 0, 1, 2]), list(results[1]))
+ self.assertEqual(list([3, 0, 2, 6]), list(results[1]))
def testTopKInfinities(self):
"""Tests that positive and negative infinity sort correctly."""
diff --git a/tensorflow/compiler/tests/spacetobatch_op_test.py b/tensorflow/compiler/tests/spacetobatch_op_test.py
index f37c34156f..c685bc548f 100644
--- a/tensorflow/compiler/tests/spacetobatch_op_test.py
+++ b/tensorflow/compiler/tests/spacetobatch_op_test.py
@@ -20,7 +20,7 @@ from __future__ import print_function
import numpy as np
-from tensorflow.compiler.tests.xla_test import XLATestCase
+from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_array_ops
@@ -68,7 +68,7 @@ def space_to_batch_direct(input_array, block_shape, paddings):
return permuted_reshaped_padded.reshape(output_shape)
-class SpaceToBatchTest(XLATestCase):
+class SpaceToBatchTest(xla_test.XLATestCase):
"""Tests input-output pairs for the SpaceToBatch and BatchToSpace ops."""
def _testPad(self, inputs, paddings, block_size, outputs):
@@ -149,7 +149,7 @@ class SpaceToBatchTest(XLATestCase):
self._testOne(x_np, block_size, x_out)
-class SpaceToBatchNDTest(XLATestCase):
+class SpaceToBatchNDTest(xla_test.XLATestCase):
"""Tests input-output pairs for the SpaceToBatchND and BatchToSpaceND ops."""
def _testPad(self, inputs, block_shape, paddings, outputs):
diff --git a/tensorflow/compiler/tests/sparse_to_dense_op_test.py b/tensorflow/compiler/tests/sparse_to_dense_op_test.py
new file mode 100644
index 0000000000..3db8101c4b
--- /dev/null
+++ b/tensorflow/compiler/tests/sparse_to_dense_op_test.py
@@ -0,0 +1,118 @@
+# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""Tests for tensorflow.kernels.sparse_op."""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import numpy as np
+
+from tensorflow.compiler.tests import xla_test
+from tensorflow.python.framework import dtypes
+from tensorflow.python.ops import array_ops
+from tensorflow.python.ops import sparse_ops
+from tensorflow.python.platform import test
+
+
+def _SparseToDense(sparse_indices,
+ output_size,
+ sparse_values,
+ default_value,
+ validate_indices=True):
+ feed_sparse_indices = array_ops.placeholder(dtypes.int32)
+ feed_dict = {feed_sparse_indices: sparse_indices}
+ return sparse_ops.sparse_to_dense(
+ feed_sparse_indices,
+ output_size,
+ sparse_values,
+ default_value=default_value,
+ validate_indices=validate_indices).eval(feed_dict=feed_dict)
+
+
+class SparseToDenseTest(xla_test.XLATestCase):
+
+ def testInt(self):
+ with self.test_session(), self.test_scope():
+ tf_ans = _SparseToDense([1, 3], [5], 1, 0)
+ np_ans = np.array([0, 1, 0, 1, 0]).astype(np.int32)
+ self.assertAllClose(np_ans, tf_ans)
+
+ def testFloat(self):
+ with self.test_session(), self.test_scope():
+ tf_ans = _SparseToDense([1, 3], [5], 1.0, 0.0)
+ np_ans = np.array([0, 1, 0, 1, 0]).astype(np.float32)
+ self.assertAllClose(np_ans, tf_ans)
+
+ def testSetValue(self):
+ with self.test_session(), self.test_scope():
+ tf_ans = _SparseToDense([1, 3], [5], [1, 2], -1)
+ np_ans = np.array([-1, 1, -1, 2, -1]).astype(np.int32)
+ self.assertAllClose(np_ans, tf_ans)
+
+ def testSetSingleValue(self):
+ with self.test_session(), self.test_scope():
+ tf_ans = _SparseToDense([1, 3], [5], 1, -1)
+ np_ans = np.array([-1, 1, -1, 1, -1]).astype(np.int32)
+ self.assertAllClose(np_ans, tf_ans)
+
+ def test2d(self):
+ # pylint: disable=bad-whitespace
+ with self.test_session(), self.test_scope():
+ tf_ans = _SparseToDense([[1, 3], [2, 0]], [3, 4], 1, -1)
+ np_ans = np.array([[-1, -1, -1, -1],
+ [-1, -1, -1, 1],
+ [ 1, -1, -1, -1]]).astype(np.int32)
+ self.assertAllClose(np_ans, tf_ans)
+
+ def testZeroDefault(self):
+ with self.test_session():
+ x = sparse_ops.sparse_to_dense(2, [4], 7).eval()
+ self.assertAllEqual(x, [0, 0, 7, 0])
+
+ def test3d(self):
+ with self.test_session(), self.test_scope():
+ tf_ans = _SparseToDense([[1, 3, 0], [2, 0, 1]], [3, 4, 2], 1, -1)
+ np_ans = np.ones((3, 4, 2), dtype=np.int32) * -1
+ np_ans[1, 3, 0] = 1
+ np_ans[2, 0, 1] = 1
+ self.assertAllClose(np_ans, tf_ans)
+
+ def testBadShape(self):
+ with self.test_session(), self.test_scope():
+ with self.assertRaisesWithPredicateMatch(ValueError, "must be rank 1"):
+ _SparseToDense([1, 3], [[5], [3]], 1, -1)
+
+ def testBadValue(self):
+ with self.test_session(), self.test_scope():
+ with self.assertRaisesOpError(
+ r"sparse_values has incorrect shape \[2,1\], "
+ r"should be \[\] or \[2\]"):
+ _SparseToDense([1, 3], [5], [[5], [3]], -1)
+
+ def testBadNumValues(self):
+ with self.test_session(), self.test_scope():
+ with self.assertRaisesOpError(
+ r"sparse_values has incorrect shape \[3\], should be \[\] or \[2\]"):
+ _SparseToDense([1, 3], [5], [1, 2, 3], -1)
+
+ def testBadDefault(self):
+ with self.test_session(), self.test_scope():
+ with self.assertRaisesOpError("default_value should be a scalar"):
+ _SparseToDense([1, 3], [5], [1, 2], [0])
+
+
+if __name__ == "__main__":
+ test.main()
diff --git a/tensorflow/compiler/tests/stack_ops_test.py b/tensorflow/compiler/tests/stack_ops_test.py
index 94342f9567..b7dd787fef 100644
--- a/tensorflow/compiler/tests/stack_ops_test.py
+++ b/tensorflow/compiler/tests/stack_ops_test.py
@@ -20,7 +20,7 @@ from __future__ import print_function
import numpy as np
-from tensorflow.compiler.tests.xla_test import XLATestCase
+from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
@@ -28,7 +28,7 @@ from tensorflow.python.ops import gen_data_flow_ops
from tensorflow.python.platform import test
-class StackOpTest(XLATestCase):
+class StackOpTest(xla_test.XLATestCase):
def testStackPushPop(self):
with self.test_session(), self.test_scope():
diff --git a/tensorflow/compiler/tests/stateless_random_ops_test.py b/tensorflow/compiler/tests/stateless_random_ops_test.py
index abce190d83..d162675ef8 100644
--- a/tensorflow/compiler/tests/stateless_random_ops_test.py
+++ b/tensorflow/compiler/tests/stateless_random_ops_test.py
@@ -22,7 +22,7 @@ import math
import numpy as np
-from tensorflow.compiler.tests.xla_test import XLATestCase
+from tensorflow.compiler.tests import xla_test
from tensorflow.contrib import stateless
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
@@ -30,7 +30,7 @@ from tensorflow.python.ops.distributions import special_math
from tensorflow.python.platform import test
-class StatelessRandomOpsTest(XLATestCase):
+class StatelessRandomOpsTest(xla_test.XLATestCase):
"""Test cases for stateless random-number generator operators."""
def _random_types(self):
diff --git a/tensorflow/compiler/tests/ternary_ops_test.py b/tensorflow/compiler/tests/ternary_ops_test.py
index ef047005b6..effa5a59fe 100644
--- a/tensorflow/compiler/tests/ternary_ops_test.py
+++ b/tensorflow/compiler/tests/ternary_ops_test.py
@@ -20,7 +20,7 @@ from __future__ import print_function
import numpy as np
-from tensorflow.compiler.tests.xla_test import XLATestCase
+from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_math_ops
@@ -28,7 +28,7 @@ from tensorflow.python.ops import math_ops
from tensorflow.python.platform import googletest
-class TernaryOpsTest(XLATestCase):
+class TernaryOpsTest(xla_test.XLATestCase):
def _testTernary(self, op, a, b, c, expected):
with self.test_session() as session:
diff --git a/tensorflow/compiler/tests/unary_ops_test.py b/tensorflow/compiler/tests/unary_ops_test.py
index e610b63e30..5f25ff9002 100644
--- a/tensorflow/compiler/tests/unary_ops_test.py
+++ b/tensorflow/compiler/tests/unary_ops_test.py
@@ -23,7 +23,7 @@ import unittest
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
-from tensorflow.compiler.tests.xla_test import XLATestCase
+from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import bitwise_ops
@@ -44,11 +44,16 @@ def nhwc_to_format(x, data_format):
raise ValueError("Unknown format {}".format(data_format))
-class UnaryOpsTest(XLATestCase):
+class UnaryOpsTest(xla_test.XLATestCase):
"""Test cases for unary operators."""
- def _assertOpOutputMatchesExpected(self, op, inp, expected,
- equality_test=None, rtol=1e-3, atol=1e-5):
+ def _assertOpOutputMatchesExpected(self,
+ op,
+ inp,
+ expected,
+ equality_test=None,
+ rtol=1e-3,
+ atol=1e-5):
"""Verifies that 'op' produces 'expected' when fed input 'inp' .
Args:
@@ -81,10 +86,10 @@ class UnaryOpsTest(XLATestCase):
def testAllTypeOps(self):
for dtype in self.numeric_types:
self._assertOpOutputMatchesExpected(
- array_ops.diag,
- np.array([1, 2, 3, 4], dtype=dtype),
- np.array([[1, 0, 0, 0], [0, 2, 0, 0], [0, 0, 3, 0], [0, 0, 0, 4]],
- dtype=dtype))
+ array_ops.diag, np.array([1, 2, 3, 4], dtype=dtype),
+ np.array(
+ [[1, 0, 0, 0], [0, 2, 0, 0], [0, 0, 3, 0], [0, 0, 0, 4]],
+ dtype=dtype))
self._assertOpOutputMatchesExpected(
array_ops.diag_part,
np.arange(36).reshape([2, 3, 2, 3]).astype(dtype),
@@ -102,8 +107,7 @@ class UnaryOpsTest(XLATestCase):
expected=np.array([[-1, 1]], dtype=dtype))
self._assertOpOutputMatchesExpected(
- array_ops.matrix_diag,
- np.array([[1, 2], [3, 4]], dtype=dtype),
+ array_ops.matrix_diag, np.array([[1, 2], [3, 4]], dtype=dtype),
np.array([[[1, 0], [0, 2]], [[3, 0], [0, 4]]], dtype=dtype))
self._assertOpOutputMatchesExpected(
array_ops.matrix_diag, np.array([1, 2, 3, 4], dtype=dtype),
@@ -115,10 +119,10 @@ class UnaryOpsTest(XLATestCase):
np.array(
[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]], dtype=dtype),
np.array(
- [[[[1, 0, 0], [0, 2, 0], [0, 0, 3]],
- [[4, 0, 0], [0, 5, 0], [0, 0, 6]]],
- [[[7, 0, 0], [0, 8, 0], [0, 0, 9]],
- [[10, 0, 0], [0, 11, 0], [0, 0, 12]]]],
+ [[[[1, 0, 0], [0, 2, 0], [0, 0, 3]], [[4, 0, 0], [0, 5, 0], [
+ 0, 0, 6
+ ]]], [[[7, 0, 0], [0, 8, 0], [0, 0, 9]], [[10, 0, 0], [0, 11, 0],
+ [0, 0, 12]]]],
dtype=dtype))
self._assertOpOutputMatchesExpected(
array_ops.matrix_diag_part,
@@ -159,36 +163,30 @@ class UnaryOpsTest(XLATestCase):
continue
x = np.arange(-0.90, 0.90, 0.25)
self._assertOpOutputMatchesExpected(
- math_ops.acos,
- x.astype(dtype),
- expected=np.arccos(x).astype(dtype))
+ math_ops.acos, x.astype(dtype), expected=np.arccos(x).astype(dtype))
self._assertOpOutputMatchesExpected(
- math_ops.asin,
- x.astype(dtype),
- expected=np.arcsin(x).astype(dtype))
+ math_ops.asin, x.astype(dtype), expected=np.arcsin(x).astype(dtype))
x = np.arange(-3, 3).reshape(1, 3, 2)
self._assertOpOutputMatchesExpected(
- math_ops.atan,
- x.astype(dtype),
- expected=np.arctan(x).astype(dtype))
+ math_ops.atan, x.astype(dtype), expected=np.arctan(x).astype(dtype))
self._assertOpOutputMatchesExpected(
math_ops.acosh,
np.array([1, 2, 3, 4], dtype=dtype),
- expected=np.array([0, 1.3169579, 1.76274717, 2.06343707],
- dtype=dtype))
+ expected=np.array(
+ [0, 1.3169579, 1.76274717, 2.06343707], dtype=dtype))
self._assertOpOutputMatchesExpected(
math_ops.asinh,
np.array([1, 2, 3, 4], dtype=dtype),
- expected=np.array([0.88137359, 1.44363548, 1.81844646, 2.09471255],
- dtype=dtype))
+ expected=np.array(
+ [0.88137359, 1.44363548, 1.81844646, 2.09471255], dtype=dtype))
self._assertOpOutputMatchesExpected(
math_ops.atanh,
np.array([0.1, 0.2, 0.3, 0.4], dtype=dtype),
- expected=np.array([0.10033535, 0.20273255, 0.3095196, 0.42364893],
- dtype=dtype))
+ expected=np.array(
+ [0.10033535, 0.20273255, 0.3095196, 0.42364893], dtype=dtype))
self._assertOpOutputMatchesExpected(
math_ops.ceil,
@@ -198,8 +196,8 @@ class UnaryOpsTest(XLATestCase):
self._assertOpOutputMatchesExpected(
math_ops.cosh,
np.array([1, 2, 3, 4], dtype=dtype),
- expected=np.array([1.54308063, 3.76219569, 10.067662, 27.30823284],
- dtype=dtype))
+ expected=np.array(
+ [1.54308063, 3.76219569, 10.067662, 27.30823284], dtype=dtype))
# Disable float16 testing for now
if dtype != np.float16:
@@ -229,8 +227,8 @@ class UnaryOpsTest(XLATestCase):
self._assertOpOutputMatchesExpected(
math_ops.is_finite,
- np.array([[np.NINF, -2, -1, 0, 0.5, 1, 2, np.inf, np.nan]],
- dtype=dtype),
+ np.array(
+ [[np.NINF, -2, -1, 0, 0.5, 1, 2, np.inf, np.nan]], dtype=dtype),
expected=np.array([[0, 1, 1, 1, 1, 1, 1, 0, 0]], dtype=np.bool))
# Tests for tf.nn ops.
@@ -271,16 +269,20 @@ class UnaryOpsTest(XLATestCase):
self._assertOpOutputMatchesExpected(
math_ops.rint,
- np.array([[-1.7, 1.2, 4.0, 0.0], [-3.5, -2.5, -1.5, -0.5],
- [0.5, 1.5, 2.5, 3.5]], dtype=dtype),
- expected=np.array([[-2, 1, 4, 0], [-4, -2, -2, 0], [0, 2, 2, 4]],
- dtype=dtype))
+ np.array(
+ [[-1.7, 1.2, 4.0, 0.0], [-3.5, -2.5, -1.5, -0.5],
+ [0.5, 1.5, 2.5, 3.5]],
+ dtype=dtype),
+ expected=np.array(
+ [[-2, 1, 4, 0], [-4, -2, -2, 0], [0, 2, 2, 4]], dtype=dtype))
self._assertOpOutputMatchesExpected(
math_ops.round,
- np.array([[-1.7, 1.2, 4.0, 0.0], [-3.5, -2.5, -1.5, -0.5],
- [0.5, 1.5, 2.5, 3.5]], dtype=dtype),
- expected=np.array([[-2, 1, 4, 0], [-4, -2, -2, 0], [0, 2, 2, 4]],
- dtype=dtype))
+ np.array(
+ [[-1.7, 1.2, 4.0, 0.0], [-3.5, -2.5, -1.5, -0.5],
+ [0.5, 1.5, 2.5, 3.5]],
+ dtype=dtype),
+ expected=np.array(
+ [[-2, 1, 4, 0], [-4, -2, -2, 0], [0, 2, 2, 4]], dtype=dtype))
self._assertOpOutputMatchesExpected(
math_ops.rsqrt,
@@ -289,10 +291,7 @@ class UnaryOpsTest(XLATestCase):
self._assertOpOutputMatchesExpected(
math_ops.sigmoid,
- np.array(
- [[1, 1, 1, 1],
- [1, 2, 3, 4]],
- dtype=dtype),
+ np.array([[1, 1, 1, 1], [1, 2, 3, 4]], dtype=dtype),
expected=np.array(
[[0.7310586, 0.7310586, 0.7310586, 0.7310586],
[0.7310586, 0.880797, 0.95257413, 0.98201376]],
@@ -306,8 +305,8 @@ class UnaryOpsTest(XLATestCase):
self._assertOpOutputMatchesExpected(
math_ops.sinh,
np.array([1, 2, 3, 4], dtype=dtype),
- expected=np.array([1.17520119, 3.62686041, 10.01787493, 27.2899172],
- dtype=dtype))
+ expected=np.array(
+ [1.17520119, 3.62686041, 10.01787493, 27.2899172], dtype=dtype))
self._assertOpOutputMatchesExpected(
math_ops.sqrt,
@@ -317,15 +316,12 @@ class UnaryOpsTest(XLATestCase):
self._assertOpOutputMatchesExpected(
math_ops.tan,
np.array([1, 2, 3, 4], dtype=dtype),
- expected=np.array([1.55740772, -2.18503986, -0.14254654, 1.15782128],
- dtype=dtype))
+ expected=np.array(
+ [1.55740772, -2.18503986, -0.14254654, 1.15782128], dtype=dtype))
self._assertOpOutputMatchesExpected(
math_ops.tanh,
- np.array(
- [[1, 1, 1, 1],
- [1, 2, 3, 4]],
- dtype=dtype),
+ np.array([[1, 1, 1, 1], [1, 2, 3, 4]], dtype=dtype),
expected=np.array(
[[0.76159418, 0.76159418, 0.76159418, 0.76159418],
[0.76159418, 0.96402758, 0.99505478, 0.99932933]],
@@ -333,10 +329,7 @@ class UnaryOpsTest(XLATestCase):
self._assertOpOutputMatchesExpected(
nn_ops.log_softmax,
- np.array(
- [[1, 1, 1, 1],
- [1, 2, 3, 4]],
- dtype=dtype),
+ np.array([[1, 1, 1, 1], [1, 2, 3, 4]], dtype=dtype),
expected=np.array(
[[-1.3862944, -1.3862944, -1.3862944, -1.3862944],
[-3.4401896, -2.4401896, -1.4401897, -0.44018969]],
@@ -370,10 +363,7 @@ class UnaryOpsTest(XLATestCase):
self._assertOpOutputMatchesExpected(
nn_ops.softmax,
- np.array(
- [[1, 1, 1, 1],
- [1, 2, 3, 4]],
- dtype=dtype),
+ np.array([[1, 1, 1, 1], [1, 2, 3, 4]], dtype=dtype),
expected=np.array(
[[0.25, 0.25, 0.25, 0.25],
[0.032058604, 0.087144323, 0.23688284, 0.64391428]],
@@ -382,8 +372,8 @@ class UnaryOpsTest(XLATestCase):
self._assertOpOutputMatchesExpected(
nn_ops.softsign,
np.array([[-2, -1, 0, 1, 2]], dtype=dtype),
- expected=np.array([[-0.66666669, -0.5, 0, 0.5, 0.66666669]],
- dtype=dtype))
+ expected=np.array(
+ [[-0.66666669, -0.5, 0, 0.5, 0.66666669]], dtype=dtype))
self._assertOpOutputMatchesExpected(
math_ops.is_finite,
@@ -393,9 +383,78 @@ class UnaryOpsTest(XLATestCase):
[[True, False, True], [False, True, True]], dtype=np.bool))
self._assertOpOutputMatchesExpected(
- lambda x: array_ops.quantize_and_dequantize_v2(x, -127, 127, True, 8),
+ math_ops.lgamma,
+ np.array(
+ [[1, 2, 3], [4, 5, 6], [1 / 2, 3 / 2, 5 / 2],
+ [-3 / 2, -7 / 2, -11 / 2]],
+ dtype=dtype),
+ expected=np.array(
+ [
+ [0, 0, np.log(2.0)],
+ [np.log(6.0), np.log(24.0),
+ np.log(120)],
+ [
+ np.log(np.pi) / 2,
+ np.log(np.pi) / 2 - np.log(2),
+ np.log(np.pi) / 2 - np.log(4) + np.log(3)
+ ],
+ [
+ np.log(np.pi) / 2 - np.log(3) + np.log(4),
+ np.log(np.pi) / 2 - np.log(105) + np.log(16),
+ np.log(np.pi) / 2 - np.log(10395) + np.log(64),
+ ],
+ ],
+ dtype=dtype))
+
+ self._assertOpOutputMatchesExpected(
+ math_ops.digamma,
+ np.array(
+ [[1.0, 0.5, 1 / 3.0], [0.25, 1 / 6.0, 0.125], [2.0, 3.0, 4.0],
+ [6.0, 8.0, 9.0]],
+ dtype=dtype),
+ expected=np.array(
+ [
+ [
+ -np.euler_gamma, -2 * np.log(2) - np.euler_gamma,
+ -np.pi / 2 / np.sqrt(3) - 3 * np.log(3) / 2 -
+ np.euler_gamma
+ ],
+ [
+ -np.pi / 2 - 3 * np.log(2) - np.euler_gamma,
+ -np.pi * np.sqrt(3) / 2 - 2 * np.log(2) -
+ 3 * np.log(3) / 2 - np.euler_gamma,
+ -np.pi / 2 - 4 * np.log(2) -
+ (np.pi + np.log(2 + np.sqrt(2)) - np.log(2 - np.sqrt(2)))
+ / np.sqrt(2) - np.euler_gamma
+ ],
+ [
+ 1 - np.euler_gamma, 1.5 - np.euler_gamma,
+ 11 / 6.0 - np.euler_gamma
+ ],
+ [
+ 137 / 60.0 - np.euler_gamma, 363 / 140.0 - np.euler_gamma,
+ 761 / 280.0 - np.euler_gamma
+ ],
+ ],
+ dtype=dtype))
+
+ def quantize_and_dequantize_v2(x):
+ return array_ops.quantize_and_dequantize_v2(
+ x, -127, 127, signed_input=True, num_bits=8)
+
+ self._assertOpOutputMatchesExpected(
+ quantize_and_dequantize_v2,
+ np.array([-1, -0.5, 0, 0.3], dtype=dtype),
+ expected=np.array([-1., -0.5, 0., 0.296875], dtype=dtype))
+
+ def quantize_and_dequantize_v3(x):
+ return array_ops.quantize_and_dequantize_v3(
+ x, -127, 127, num_bits=8, signed_input=True, range_given=False)
+
+ self._assertOpOutputMatchesExpected(
+ quantize_and_dequantize_v3,
np.array([-1, -0.5, 0, 0.3], dtype=dtype),
- expected=np.array([-1, -64.0 / 127, 0, 38.0 / 127], dtype=dtype))
+ expected=np.array([-1., -0.5, 0., 0.296875], dtype=dtype))
def testComplexOps(self):
for dtype in self.complex_types:
@@ -576,13 +635,13 @@ class UnaryOpsTest(XLATestCase):
for dtype in self.float_types:
self._assertOpOutputMatchesExpected(
math_ops.is_inf,
- np.array([[np.NINF, -2, -1, 0, 0.5, 1, 2, np.inf, np.nan]],
- dtype=dtype),
+ np.array(
+ [[np.NINF, -2, -1, 0, 0.5, 1, 2, np.inf, np.nan]], dtype=dtype),
expected=np.array([[1, 0, 0, 0, 0, 0, 0, 1, 0]], dtype=np.bool))
self._assertOpOutputMatchesExpected(
math_ops.is_nan,
- np.array([[np.NINF, -2, -1, 0, 0.5, 1, 2, np.inf, np.nan]],
- dtype=dtype),
+ np.array(
+ [[np.NINF, -2, -1, 0, 0.5, 1, 2, np.inf, np.nan]], dtype=dtype),
expected=np.array([[0, 0, 0, 0, 0, 0, 0, 0, 1]], dtype=np.bool))
def testLogicalOps(self):
@@ -599,14 +658,15 @@ class UnaryOpsTest(XLATestCase):
self._assertOpOutputMatchesExpected(
lambda x: gen_nn_ops.bias_add_grad(x, data_format="NCHW"),
- np.array([[[1., 2.], [3., 4.]], [[5., 6.], [7., 8.]]],
- dtype=np.float32),
+ np.array(
+ [[[1., 2.], [3., 4.]], [[5., 6.], [7., 8.]]], dtype=np.float32),
expected=np.array([10., 26.], dtype=np.float32))
def testCast(self):
shapes = [[], [4], [2, 3], [2, 0, 4]]
- types = (set([dtypes.bool, dtypes.int32, dtypes.float32]) |
- self.complex_tf_types)
+ types = (
+ set([dtypes.bool, dtypes.int32, dtypes.float32])
+ | self.complex_tf_types)
for shape in shapes:
for src_type in types:
for dst_type in types:
@@ -648,14 +708,11 @@ class UnaryOpsTest(XLATestCase):
self._assertOpOutputMatchesExpected(
rank_op, dtype(7), expected=np.int32(0))
self._assertOpOutputMatchesExpected(
- rank_op, np.array(
- [[], []], dtype=dtype), expected=np.int32(2))
+ rank_op, np.array([[], []], dtype=dtype), expected=np.int32(2))
self._assertOpOutputMatchesExpected(
- rank_op, np.array(
- [-1, 1], dtype=dtype), expected=np.int32(1))
+ rank_op, np.array([-1, 1], dtype=dtype), expected=np.int32(1))
self._assertOpOutputMatchesExpected(
- rank_op, np.array(
- [[-1, 1]], dtype=dtype), expected=np.int32(2))
+ rank_op, np.array([[-1, 1]], dtype=dtype), expected=np.int32(2))
self._assertOpOutputMatchesExpected(
rank_op,
np.array([[-1], [1], [4]], dtype=dtype),
@@ -720,97 +777,97 @@ class UnaryOpsTest(XLATestCase):
equality_test=self.ListsAreClose)
def testDepthToSpace(self):
+
def make_op(data_format):
+
def op(x):
- return array_ops.depth_to_space(x, block_size=2,
- data_format=data_format)
+ return array_ops.depth_to_space(
+ x, block_size=2, data_format=data_format)
+
return op
for dtype in self.numeric_types:
for data_format in ["NCHW", "NHWC"]:
self._assertOpOutputMatchesExpected(
make_op(data_format),
- nhwc_to_format(np.array([[[[1, 2, 3, 4]]]], dtype=dtype),
- data_format),
- expected=nhwc_to_format(np.array([[[[1], [2]],
- [[3], [4]]]], dtype=dtype),
- data_format))
+ nhwc_to_format(
+ np.array([[[[1, 2, 3, 4]]]], dtype=dtype), data_format),
+ expected=nhwc_to_format(
+ np.array([[[[1], [2]], [[3], [4]]]], dtype=dtype), data_format))
self._assertOpOutputMatchesExpected(
make_op(data_format),
nhwc_to_format(
- np.array([[[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]]]],
- dtype=dtype),
+ np.array(
+ [[[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]]]], dtype=dtype),
data_format),
expected=nhwc_to_format(
- np.array([[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]]],
- dtype=dtype),
- data_format))
+ np.array(
+ [[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]]],
+ dtype=dtype), data_format))
self._assertOpOutputMatchesExpected(
make_op(data_format),
nhwc_to_format(
- np.array([[[[1, 2, 3, 4],
- [5, 6, 7, 8]],
- [[9, 10, 11, 12],
- [13, 14, 15, 16]]]], dtype=dtype),
- data_format),
+ np.array(
+ [[[[1, 2, 3, 4], [5, 6, 7, 8]], [[9, 10, 11, 12],
+ [13, 14, 15, 16]]]],
+ dtype=dtype), data_format),
expected=nhwc_to_format(
- np.array([[[[1], [2], [5], [6]],
- [[3], [4], [7], [8]],
- [[9], [10], [13], [14]],
- [[11], [12], [15], [16]]]], dtype=dtype),
- data_format))
+ np.array(
+ [[[[1], [2], [5], [6]], [[3], [4], [7], [8]],
+ [[9], [10], [13], [14]], [[11], [12], [15], [16]]]],
+ dtype=dtype), data_format))
def testSpaceToDepth(self):
+
def make_op(data_format):
+
def op(x):
- return array_ops.space_to_depth(x, block_size=2,
- data_format=data_format)
+ return array_ops.space_to_depth(
+ x, block_size=2, data_format=data_format)
+
return op
for dtype in self.numeric_types:
for data_format in ["NCHW", "NHWC"]:
self._assertOpOutputMatchesExpected(
make_op(data_format),
- nhwc_to_format(np.array([[[[1], [2]],
- [[3], [4]]]], dtype=dtype),
- data_format),
- expected=nhwc_to_format(np.array([[[[1, 2, 3, 4]]]], dtype=dtype),
- data_format))
+ nhwc_to_format(
+ np.array([[[[1], [2]], [[3], [4]]]], dtype=dtype), data_format),
+ expected=nhwc_to_format(
+ np.array([[[[1, 2, 3, 4]]]], dtype=dtype), data_format))
self._assertOpOutputMatchesExpected(
make_op(data_format),
- nhwc_to_format(np.array([[[[1, 2, 3], [4, 5, 6]],
- [[7, 8, 9], [10, 11, 12]]]], dtype=dtype),
- data_format),
+ nhwc_to_format(
+ np.array(
+ [[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]]],
+ dtype=dtype), data_format),
expected=nhwc_to_format(
- np.array([[[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]]]],
- dtype=dtype),
+ np.array(
+ [[[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]]]], dtype=dtype),
data_format))
self._assertOpOutputMatchesExpected(
make_op(data_format),
- nhwc_to_format(np.array([[[[1], [2], [5], [6]],
- [[3], [4], [7], [8]],
- [[9], [10], [13], [14]],
- [[11], [12], [15], [16]]]], dtype=dtype),
- data_format),
+ nhwc_to_format(
+ np.array(
+ [[[[1], [2], [5], [6]], [[3], [4], [7], [8]],
+ [[9], [10], [13], [14]], [[11], [12], [15], [16]]]],
+ dtype=dtype), data_format),
expected=nhwc_to_format(
- np.array([[[[1, 2, 3, 4],
- [5, 6, 7, 8]],
- [[9, 10, 11, 12],
- [13, 14, 15, 16]]]], dtype=dtype),
- data_format))
+ np.array(
+ [[[[1, 2, 3, 4], [5, 6, 7, 8]], [[9, 10, 11, 12],
+ [13, 14, 15, 16]]]],
+ dtype=dtype), data_format))
def _assertSoftplusMatchesExpected(self, features, dtype):
features = np.array(features, dtype=dtype)
zero = np.asarray(0).astype(dtype)
expected = np.logaddexp(zero, features)
self._assertOpOutputMatchesExpected(
- nn_ops.softplus, features, expected=expected,
- rtol=1e-6,
- atol=9.1e-6)
+ nn_ops.softplus, features, expected=expected, rtol=1e-6, atol=9.1e-6)
def testSoftplus(self):
for dtype in self.float_types:
@@ -824,9 +881,10 @@ class UnaryOpsTest(XLATestCase):
one = dtype(1)
ten = dtype(10)
self._assertSoftplusMatchesExpected([
- log_eps, log_eps - one, log_eps + one, log_eps - ten,
- log_eps + ten, -log_eps, -log_eps - one, -log_eps + one,
- -log_eps - ten, -log_eps + ten], dtype)
+ log_eps, log_eps - one, log_eps + one, log_eps - ten, log_eps + ten,
+ -log_eps, -log_eps - one, -log_eps + one, -log_eps - ten,
+ -log_eps + ten
+ ], dtype)
if __name__ == "__main__":
diff --git a/tensorflow/compiler/tests/variable_ops_test.py b/tensorflow/compiler/tests/variable_ops_test.py
index bd616f2a20..dd2c252d38 100644
--- a/tensorflow/compiler/tests/variable_ops_test.py
+++ b/tensorflow/compiler/tests/variable_ops_test.py
@@ -20,7 +20,7 @@ from __future__ import print_function
import numpy as np
-from tensorflow.compiler.tests.xla_test import XLATestCase
+from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
@@ -37,7 +37,7 @@ from tensorflow.python.platform import googletest
from tensorflow.python.training.gradient_descent import GradientDescentOptimizer
-class VariableOpsTest(XLATestCase):
+class VariableOpsTest(xla_test.XLATestCase):
"""Test cases for resource variable operators."""
def testOneWriteOneOutput(self):
@@ -435,7 +435,7 @@ class StridedSliceAssignChecker(object):
self.test.assertAllEqual(val, valnp)
-class SliceAssignTest(XLATestCase):
+class SliceAssignTest(xla_test.XLATestCase):
def testSliceAssign(self):
for dtype in self.numeric_types:
diff --git a/tensorflow/compiler/tests/while_test.py b/tensorflow/compiler/tests/while_test.py
index f79eb27435..b637cf31cf 100644
--- a/tensorflow/compiler/tests/while_test.py
+++ b/tensorflow/compiler/tests/while_test.py
@@ -20,7 +20,7 @@ from __future__ import print_function
import numpy as np
-from tensorflow.compiler.tests.xla_test import XLATestCase
+from tensorflow.compiler.tests import xla_test
from tensorflow.compiler.tf2xla.python import xla
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
@@ -29,7 +29,7 @@ from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
-class WhileTest(XLATestCase):
+class WhileTest(xla_test.XLATestCase):
def testSingletonLoopHandrolled(self):
# Define a function for the loop body
diff --git a/tensorflow/compiler/tests/xla_device_test.py b/tensorflow/compiler/tests/xla_device_test.py
index f0b010fa67..06d977b93c 100644
--- a/tensorflow/compiler/tests/xla_device_test.py
+++ b/tensorflow/compiler/tests/xla_device_test.py
@@ -20,14 +20,14 @@ from __future__ import print_function
import numpy as np
-from tensorflow.compiler.tests.xla_test import XLATestCase
+from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_control_flow_ops
from tensorflow.python.platform import test
-class XlaDeviceTest(XLATestCase):
+class XlaDeviceTest(xla_test.XLATestCase):
def testCopies(self):
"""Tests that copies onto and off XLA devices work."""
diff --git a/tensorflow/compiler/tf2xla/BUILD b/tensorflow/compiler/tf2xla/BUILD
index a7b9cc6c81..ff002d15b0 100644
--- a/tensorflow/compiler/tf2xla/BUILD
+++ b/tensorflow/compiler/tf2xla/BUILD
@@ -139,12 +139,14 @@ cc_library(
"xla_op_registry.cc",
"xla_resource.cc",
"xla_cpu_backend.cc",
+ "legacy_flags/backend_registration_flags.cc",
] + if_cuda_is_configured([
"xla_gpu_backend.cc",
]),
hdrs = [
"const_analysis.h",
"graph_compiler.h",
+ "legacy_flags/backend_registration_flags.h",
"xla_compilation_device.h",
"xla_compiler.h",
"xla_context.h",
@@ -162,18 +164,24 @@ cc_library(
":sharding_util",
":tf2xla_util",
"//tensorflow/compiler/tf2xla/lib:util",
- "//tensorflow/compiler/xla:literal_util",
+ "//tensorflow/compiler/xla:literal",
"//tensorflow/compiler/xla:shape_util",
+ "//tensorflow/compiler/xla:status_macros",
"//tensorflow/compiler/xla:statusor",
"//tensorflow/compiler/xla:types",
"//tensorflow/compiler/xla:xla_data_proto",
"//tensorflow/compiler/xla/client:client_library",
"//tensorflow/compiler/xla/client:local_client",
+ "//tensorflow/compiler/xla/client/lib:arithmetic",
+ "//tensorflow/compiler/xla/client/lib:constants",
+ "//tensorflow/compiler/xla/client/lib:numeric",
"//tensorflow/compiler/xla/client/xla_client:xla_builder",
"//tensorflow/compiler/xla/client/xla_client:xla_computation",
+ "//tensorflow/compiler/xla/legacy_flags:parse_flags_from_env",
"//tensorflow/core:core_cpu",
"//tensorflow/core:core_cpu_internal",
"//tensorflow/core:framework",
+ "//tensorflow/core:framework_internal",
"//tensorflow/core:lib",
"//tensorflow/core:lib_internal",
"//tensorflow/core:protos_all_cc",
@@ -198,7 +206,7 @@ cc_library(
],
visibility = [":friends"],
deps = [
- "//tensorflow/compiler/xla:literal_util",
+ "//tensorflow/compiler/xla:literal",
"//tensorflow/compiler/xla:shape_util",
"//tensorflow/compiler/xla:xla_data_proto",
"//tensorflow/core:core_cpu_internal",
@@ -281,6 +289,7 @@ tf_cc_test(
deps = [
":tf2xla",
":tf2xla_proto",
+ "//tensorflow/compiler/xla:literal",
"//tensorflow/compiler/xla:literal_util",
"//tensorflow/compiler/xla:statusor",
"//tensorflow/compiler/xla/client:client_library",
@@ -323,7 +332,7 @@ tf_cc_test(
"//tensorflow/cc:ops",
"//tensorflow/cc:resource_variable_ops",
"//tensorflow/compiler/tf2xla/kernels:xla_ops",
- "//tensorflow/compiler/xla:literal_util",
+ "//tensorflow/compiler/xla:literal",
"//tensorflow/compiler/xla:shape_util",
"//tensorflow/compiler/xla:status_macros",
"//tensorflow/compiler/xla/client:client_library",
@@ -360,6 +369,7 @@ tf_cc_test(
],
deps = [
":common",
+ "//tensorflow/compiler/xla:literal",
"//tensorflow/compiler/xla:literal_util",
"//tensorflow/core:framework",
"//tensorflow/core:test",
diff --git a/tensorflow/compiler/tf2xla/functionalize_control_flow.cc b/tensorflow/compiler/tf2xla/functionalize_control_flow.cc
index 140dad61d9..6cc95149a1 100644
--- a/tensorflow/compiler/tf2xla/functionalize_control_flow.cc
+++ b/tensorflow/compiler/tf2xla/functionalize_control_flow.cc
@@ -166,6 +166,27 @@ StatusOr<Node*> AddNode(const NodeDef& node_def, Graph* graph) {
return inserted_node;
}
+// Check that the graph has no cycle containing the given node.
+Status CheckNoCycleContains(const Node* node, const int num_nodes) {
+ std::vector<const Node*> ready;
+ ready.push_back(node);
+ std::vector<bool> visited(num_nodes);
+ while (!ready.empty()) {
+ const Node* current_node = ready.back();
+ ready.pop_back();
+ visited[current_node->id()] = true;
+ for (const Edge* out : current_node->out_edges()) {
+ if (out->dst() == node) {
+ return errors::Internal("Detect a cycle: Node \"", node->name(), "\"(",
+ node->def().op(), ") feeds into itself.");
+ } else if (!visited[out->dst()->id()]) {
+ ready.push_back(out->dst());
+ }
+ }
+ }
+ return Status::OK();
+}
+
StatusOr<Node*> BuildArgNode(Graph* graph, DataType type, int index) {
NodeDef arg_def;
NodeDefBuilder builder(strings::StrCat(kArgOp, index), kArgOp);
@@ -1407,6 +1428,10 @@ StatusOr<Node*> FunctionalizeCond::ConvertToXlaIf(
TF_RETURN_IF_ERROR(
AddInputEdges(cond_arg_nodes, switch_cluster.predicate_edge, if_node));
TF_RETURN_IF_ERROR(AddOutputEdges(merge_nodes, if_node));
+ // Check that the if_node doesn't feed into itself.
+ TF_RETURN_WITH_CONTEXT_IF_ERROR(
+ CheckNoCycleContains(if_node, graph_->num_node_ids()),
+ "ConvertToXlaIf failed.");
return if_node;
}
@@ -1506,6 +1531,16 @@ Status FunctionalizeControlFlow(const FunctionLibraryDefinition* lookup_library,
worklist.push_back(frame->parent);
}
}
+ // There should be no cycle at this point, since while loops have been removed
+ // from graph.
+ // Check that the newly added XlaWhile nodes don't feed into themselves.
+ for (const Node* node : graph->op_nodes()) {
+ if (node->def().op() == "XlaWhile") {
+ TF_RETURN_WITH_CONTEXT_IF_ERROR(
+ CheckNoCycleContains(node, graph->num_node_ids()),
+ "FunctionalizeLoop failed.");
+ }
+ }
// FunctionalizeControlFlow is invoked for every function, so the loops's
// bodies and conditionals that were extracted into functions will be handled
diff --git a/tensorflow/compiler/tf2xla/functionalize_control_flow_test.cc b/tensorflow/compiler/tf2xla/functionalize_control_flow_test.cc
index 14977a908a..aae2f8ee5a 100644
--- a/tensorflow/compiler/tf2xla/functionalize_control_flow_test.cc
+++ b/tensorflow/compiler/tf2xla/functionalize_control_flow_test.cc
@@ -29,6 +29,7 @@ limitations under the License.
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/graph/graph_constructor.h"
#include "tensorflow/core/graph/graph_def_builder.h"
+#include "tensorflow/core/graph/validate.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/util/equal_graph_def.h"
@@ -1012,5 +1013,60 @@ TEST(FunctionalizeControlFlow, Complex) {
}
}
+TEST(FunctionalizeControlFlow, Cycle) {
+ std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
+ // -----------------------------------------------------
+ // | |
+ // | v
+ // less -> switch_1 --> add -> merge_1 -> identity -> switch_2
+ // | ^ |
+ // | | v
+ // --------> one -------------------------> add_2 ---> merge_2
+ {
+ Scope scope = Scope::NewRootScope().ExitOnError();
+
+ auto x = ops::Placeholder(scope.WithOpName("x"), DT_INT32);
+ auto y = ops::Placeholder(scope.WithOpName("y"), DT_INT32);
+ auto less = ops::Less(scope.WithOpName("cond/Less"), y, x);
+ auto switch_1 = ops::Switch(scope.WithOpName("cond/Switch"), x, less);
+ auto two =
+ ops::Const<int32>(scope.WithOpName("cond/two")
+ .WithControlDependencies(switch_1.output_true),
+ 2);
+ auto mul = ops::Multiply(scope.WithOpName("cond/true/mul"),
+ switch_1.output_true, two);
+ auto one =
+ ops::Const<int32>(scope.WithOpName("cond/one")
+ .WithControlDependencies(switch_1.output_false),
+ 1);
+ auto add = ops::Add(scope.WithOpName("cond/false/add"),
+ switch_1.output_false, one);
+
+ auto merge_1 = ops::Merge(scope.WithOpName("cond/Merge"),
+ std::initializer_list<Input>{add, mul});
+ auto identity =
+ ops::Identity(scope.WithOpName("cond/Merge/identity"), merge_1.output);
+ auto switch_2 =
+ ops::Switch(scope.WithOpName("grad/cond/Switch"), identity, less);
+ auto add_2 = ops::Add(scope.WithOpName("cond_2/false/add"),
+ switch_2.output_false, one);
+ auto mul_2 = ops::Multiply(scope.WithOpName("cond_2/true/mul"),
+ switch_2.output_true, two);
+ auto merge_2 = ops::Merge(scope.WithOpName("cond_2/Merge"),
+ std::initializer_list<Input>{add_2, mul_2});
+ TF_ASSERT_OK(scope.ToGraph(graph.get()));
+ }
+ // No cycle before functionalize control flow.
+ TF_EXPECT_OK(graph::ValidateGraphHasNoCycle(*graph));
+ FunctionLibraryDefinition library(OpRegistry::Global(), {});
+ // switch_1 and switch_2 have the same switch depth. They are replaced by a
+ // single XlaIf node during FunctionalizeControlFlow, resulting in a cycle:
+ // less -> XlaIf <--> identity.
+ Status status = FunctionalizeControlFlow(graph.get(), &library);
+ EXPECT_FALSE(status.ok());
+ EXPECT_TRUE(str_util::StrContains(status.error_message(), "Detect a cycle"))
+ << status.error_message();
+}
+
} // namespace
} // namespace tensorflow
diff --git a/tensorflow/compiler/tf2xla/graph_compiler.cc b/tensorflow/compiler/tf2xla/graph_compiler.cc
index 212f6f3966..e1cea03865 100644
--- a/tensorflow/compiler/tf2xla/graph_compiler.cc
+++ b/tensorflow/compiler/tf2xla/graph_compiler.cc
@@ -29,6 +29,7 @@ limitations under the License.
#include "tensorflow/compiler/tf2xla/xla_context.h"
#include "tensorflow/compiler/tf2xla/xla_op_kernel.h"
#include "tensorflow/compiler/xla/client/client_library.h"
+#include "tensorflow/compiler/xla/client/xla_client/xla_builder.h"
#include "tensorflow/core/common_runtime/device.h"
#include "tensorflow/core/common_runtime/executor.h"
#include "tensorflow/core/common_runtime/function.h"
@@ -39,6 +40,7 @@ limitations under the License.
#include "tensorflow/core/graph/algorithm.h"
#include "tensorflow/core/graph/graph_constructor.h"
#include "tensorflow/core/graph/node_builder.h"
+#include "tensorflow/core/graph/validate.h"
#include "tensorflow/core/lib/gtl/cleanup.h"
#include "tensorflow/core/lib/hash/hash.h"
#include "tensorflow/core/platform/logging.h"
@@ -87,6 +89,8 @@ Status PrepareArguments(XlaOpKernelContext* ctx, Graph* graph,
}
} // namespace
Status GraphCompiler::Compile() {
+ // Check that the graph has no illegal cycles.
+ TF_RETURN_IF_ERROR(graph::ValidateGraphHasNoCycle(*graph_));
// Maintain a mapping from node id to node outputs.
using NodeOutputs = std::vector<TensorValue>;
std::vector<NodeOutputs> output_registry(graph_->num_node_ids());
@@ -157,9 +161,8 @@ Status GraphCompiler::Compile() {
outputs.resize(n->num_outputs());
for (int o = 0; o < n->num_outputs(); ++o) {
outputs[o] = op_context.release_output(o);
- if (*op_context.is_output_dead() || outputs[o].tensor == nullptr) {
+ if (outputs[o].tensor == nullptr) {
return errors::Internal("Missing xla_context ", o, "-th output from ",
- (*op_context.is_output_dead() ? "(dead)" : ""),
SummarizeNode(*n));
}
}
@@ -227,7 +230,7 @@ Status GraphCompiler::CompileFunctionalNode(Node* n,
XlaContext& context = XlaContext::Get(op_context);
auto* b = context.builder();
- auto output_handle = b->Call(*result.computation, handles);
+ auto output_handle = xla::Call(b, *result.computation, handles);
// The output handle of `Call` computation is a tuple type. Unzip it so
// that it can fit into future computations.
int computation_output = 0;
@@ -236,7 +239,7 @@ Status GraphCompiler::CompileFunctionalNode(Node* n,
xla_op_context.SetConstantOutput(i, result.outputs[i].constant_value);
} else {
xla_op_context.SetOutput(
- i, b->GetTupleElement(output_handle, computation_output));
+ i, xla::GetTupleElement(output_handle, computation_output));
++computation_output;
}
}
diff --git a/tensorflow/compiler/tf2xla/kernels/BUILD b/tensorflow/compiler/tf2xla/kernels/BUILD
index 659ff7321b..d88a34dfd9 100644
--- a/tensorflow/compiler/tf2xla/kernels/BUILD
+++ b/tensorflow/compiler/tf2xla/kernels/BUILD
@@ -58,6 +58,7 @@ tf_kernel_library(
"pack_op.cc",
"pad_op.cc",
"pooling_ops.cc",
+ "qr_op.cc",
"quantize_and_dequantize_op.cc",
"random_ops.cc",
"reduce_window_op.cc",
@@ -82,6 +83,7 @@ tf_kernel_library(
"sort_ops.cc",
"spacetobatch_op.cc",
"spacetodepth_op.cc",
+ "sparse_to_dense_op.cc",
"split_op.cc",
"stack_ops.cc",
"stateless_random_ops.cc",
@@ -106,6 +108,7 @@ tf_kernel_library(
"//tensorflow/compiler/tf2xla:xla_compiler",
"//tensorflow/compiler/tf2xla/lib:batch_dot",
"//tensorflow/compiler/tf2xla/lib:cholesky",
+ "//tensorflow/compiler/tf2xla/lib:qr",
"//tensorflow/compiler/tf2xla/lib:random",
"//tensorflow/compiler/tf2xla/lib:scatter",
"//tensorflow/compiler/tf2xla/lib:triangular_solve",
@@ -113,6 +116,7 @@ tf_kernel_library(
"//tensorflow/compiler/tf2xla/lib:while_loop",
"//tensorflow/compiler/tf2xla/ops:xla_ops",
"//tensorflow/compiler/xla:array4d",
+ "//tensorflow/compiler/xla:literal",
"//tensorflow/compiler/xla:literal_util",
"//tensorflow/compiler/xla:shape_util",
"//tensorflow/compiler/xla:status_macros",
@@ -120,6 +124,10 @@ tf_kernel_library(
"//tensorflow/compiler/xla:xla_data_proto",
"//tensorflow/compiler/xla/client:client_library",
"//tensorflow/compiler/xla/client/lib:arithmetic",
+ "//tensorflow/compiler/xla/client/lib:constants",
+ "//tensorflow/compiler/xla/client/lib:math",
+ "//tensorflow/compiler/xla/client/lib:numeric",
+ "//tensorflow/compiler/xla/client/lib:prng",
"//tensorflow/compiler/xla/client/xla_client:xla_builder",
"//tensorflow/core:framework",
"//tensorflow/core:image_ops_op_lib",
@@ -155,7 +163,7 @@ tf_kernel_library(
"//tensorflow/compiler/tf2xla:common",
"//tensorflow/compiler/tf2xla:xla_compiler",
"//tensorflow/compiler/tf2xla/ops:xla_ops",
- "//tensorflow/compiler/xla:literal_util",
+ "//tensorflow/compiler/xla:literal",
"//tensorflow/compiler/xla/client/xla_client:xla_builder",
"//tensorflow/core:framework",
"//tensorflow/core:lib",
@@ -171,7 +179,7 @@ tf_kernel_library(
"//tensorflow/compiler/tf2xla:common",
"//tensorflow/compiler/tf2xla:xla_compiler",
"//tensorflow/compiler/tf2xla/ops:xla_ops",
- "//tensorflow/compiler/xla:literal_util",
+ "//tensorflow/compiler/xla:literal",
"//tensorflow/compiler/xla/client/xla_client:xla_builder",
"//tensorflow/core:framework",
"//tensorflow/core:lib",
@@ -206,6 +214,7 @@ tf_kernel_library(
":index_ops_kernel_argmax_float_2d",
"//tensorflow/compiler/tf2xla:common",
"//tensorflow/compiler/tf2xla:xla_compiler",
+ "//tensorflow/compiler/xla:literal",
"//tensorflow/compiler/xla:literal_util",
"//tensorflow/compiler/xla/client:client_library",
"//tensorflow/compiler/xla/client/lib:arithmetic",
diff --git a/tensorflow/compiler/tf2xla/kernels/aggregate_ops.cc b/tensorflow/compiler/tf2xla/kernels/aggregate_ops.cc
index 1e59868621..e335328280 100644
--- a/tensorflow/compiler/tf2xla/kernels/aggregate_ops.cc
+++ b/tensorflow/compiler/tf2xla/kernels/aggregate_ops.cc
@@ -15,6 +15,7 @@ limitations under the License.
#include "tensorflow/compiler/tf2xla/xla_op_kernel.h"
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
+#include "tensorflow/compiler/xla/client/xla_client/xla_builder.h"
namespace tensorflow {
namespace {
@@ -31,7 +32,7 @@ class AddNOp : public XlaOpKernel {
xla::XlaOp sum = ctx->Input(0);
for (int i = 1; i < ctx->num_inputs(); ++i) {
- sum = ctx->builder()->Add(sum, ctx->Input(i));
+ sum = xla::Add(sum, ctx->Input(i));
}
ctx->SetOutput(0, sum);
diff --git a/tensorflow/compiler/tf2xla/kernels/batch_matmul_op.cc b/tensorflow/compiler/tf2xla/kernels/batch_matmul_op.cc
index b0ba25b998..4cfe946b2e 100644
--- a/tensorflow/compiler/tf2xla/kernels/batch_matmul_op.cc
+++ b/tensorflow/compiler/tf2xla/kernels/batch_matmul_op.cc
@@ -28,11 +28,10 @@ class BatchMatMulOp : public XlaOpKernel {
}
void Compile(XlaOpKernelContext* ctx) override {
- auto result = BatchDot(ctx->builder(), ctx->Input(0), ctx->Input(1),
+ auto result = BatchDot(ctx->Input(0), ctx->Input(1),
/*transpose_x=*/adj_x_, /*transpose_y=*/adj_y_,
/*conjugate_x=*/adj_x_, /*conjugate_y=*/adj_y_);
- OP_REQUIRES_OK(ctx, result.status());
- ctx->SetOutput(0, result.ValueOrDie());
+ ctx->SetOutput(0, result);
}
private:
diff --git a/tensorflow/compiler/tf2xla/kernels/batch_norm_op.cc b/tensorflow/compiler/tf2xla/kernels/batch_norm_op.cc
index 93fbc40461..c4af79281d 100644
--- a/tensorflow/compiler/tf2xla/kernels/batch_norm_op.cc
+++ b/tensorflow/compiler/tf2xla/kernels/batch_norm_op.cc
@@ -18,6 +18,7 @@ limitations under the License.
#include "tensorflow/compiler/tf2xla/xla_helpers.h"
#include "tensorflow/compiler/tf2xla/xla_op_kernel.h"
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
+#include "tensorflow/compiler/xla/client/xla_client/xla_builder.h"
#include "tensorflow/core/util/tensor_format.h"
namespace tensorflow {
@@ -49,8 +50,6 @@ class FusedBatchNormOp : public XlaOpKernel {
OP_REQUIRES_OK(ctx,
DataTypeToPrimitiveType(ctx->input_type(1), &scale_type));
- xla::XlaBuilder* builder = ctx->builder();
-
xla::XlaOp input = ctx->Input(0);
TensorShape input_shape = ctx->InputShape(0);
@@ -60,30 +59,30 @@ class FusedBatchNormOp : public XlaOpKernel {
// TODO(b/69928690): support mixed precision in the XLA batch normalization
// operators. As a workaround, cast everything to the statistics type (which
// may be more precise than the input type).
- input = builder->ConvertElementType(input, scale_type);
+ input = xla::ConvertElementType(input, scale_type);
if (is_training_) {
- xla::XlaOp output = builder->BatchNormTraining(
+ xla::XlaOp output = xla::BatchNormTraining(
input, ctx->Input(1), ctx->Input(2), epsilon_, feature_index);
// In training mode, outputs the normalized value as well as the
// calculated mean and variance.
- ctx->SetOutput(0, builder->ConvertElementType(
- builder->GetTupleElement(output, 0), input_type));
- ctx->SetOutput(1, builder->GetTupleElement(output, 1));
- ctx->SetOutput(2, builder->GetTupleElement(output, 2));
+ ctx->SetOutput(0, xla::ConvertElementType(xla::GetTupleElement(output, 0),
+ input_type));
+ ctx->SetOutput(1, xla::GetTupleElement(output, 1));
+ ctx->SetOutput(2, xla::GetTupleElement(output, 2));
// Output 3 and 4 for "FusedBatchNorm" are currently marked as "reserved
// space 1 & 2". They are used to pass the per-batch mean and
// variance to the gradient. Here we maintain the same behavior by setting
// them to the mean and variance calculated by BatchNormTraining.
- ctx->SetOutput(3, builder->GetTupleElement(output, 1));
- ctx->SetOutput(4, builder->GetTupleElement(output, 2));
+ ctx->SetOutput(3, xla::GetTupleElement(output, 1));
+ ctx->SetOutput(4, xla::GetTupleElement(output, 2));
} else {
- xla::XlaOp output = builder->BatchNormInference(
+ xla::XlaOp output = xla::BatchNormInference(
input, ctx->Input(1), ctx->Input(2), ctx->Input(3), ctx->Input(4),
epsilon_, feature_index);
- ctx->SetOutput(0, builder->ConvertElementType(output, input_type));
+ ctx->SetOutput(0, xla::ConvertElementType(output, input_type));
// Directly send input to output as mean and variance in inference mode.
ctx->SetOutput(1, ctx->Input(3));
ctx->SetOutput(2, ctx->Input(4));
@@ -144,12 +143,12 @@ class FusedBatchNormGradOp : public XlaOpKernel {
xla::XlaOp offset_backprop;
if (is_training_) {
xla::XlaOp output =
- b->BatchNormGrad(activations, scale, mean, var, grad_backprop,
- epsilon_, feature_index);
+ xla::BatchNormGrad(activations, scale, mean, var, grad_backprop,
+ epsilon_, feature_index);
- x_backprop = b->GetTupleElement(output, 0);
- scale_backprop = b->GetTupleElement(output, 1);
- offset_backprop = b->GetTupleElement(output, 2);
+ x_backprop = xla::GetTupleElement(output, 0);
+ scale_backprop = xla::GetTupleElement(output, 1);
+ offset_backprop = xla::GetTupleElement(output, 2);
} else {
// Reduce over all dimensions except the feature dim.
std::vector<int64> reduction_dims(input_dims - 1);
@@ -166,35 +165,35 @@ class FusedBatchNormGradOp : public XlaOpKernel {
auto converted =
XlaHelpers::ConvertElementType(b, grad_backprop, accumulation_type);
auto reduce =
- b->Reduce(converted, XlaHelpers::Zero(b, accumulation_type),
- *ctx->GetOrCreateAdd(accumulation_type), reduction_dims);
+ xla::Reduce(converted, XlaHelpers::Zero(b, accumulation_type),
+ *ctx->GetOrCreateAdd(accumulation_type), reduction_dims);
offset_backprop = XlaHelpers::ConvertElementType(b, reduce, scale_dtype);
// scratch1 = rsqrt(pop_var + epsilon)
auto neg_half = XlaHelpers::FloatLiteral(b, scale_dtype, -0.5);
- auto scratch1 =
- b->Pow(b->Add(var, b->ConstantR0<float>(epsilon_)), neg_half);
+ auto scratch1 = xla::Pow(
+ xla::Add(var, xla::ConstantR0<float>(b, epsilon_)), neg_half);
// scratch2 = sum(y_backprop * (x - mean))
auto mul =
- b->Mul(grad_backprop, b->Sub(activations, mean, {feature_index}));
+ xla::Mul(grad_backprop, xla::Sub(activations, mean, {feature_index}));
converted = XlaHelpers::ConvertElementType(b, mul, accumulation_type);
reduce =
- b->Reduce(converted, XlaHelpers::Zero(b, accumulation_type),
- *ctx->GetOrCreateAdd(accumulation_type), reduction_dims);
+ xla::Reduce(converted, XlaHelpers::Zero(b, accumulation_type),
+ *ctx->GetOrCreateAdd(accumulation_type), reduction_dims);
auto scratch2 = XlaHelpers::ConvertElementType(b, reduce, scale_dtype);
x_backprop =
- b->Mul(grad_backprop, b->Mul(scratch1, scale), {feature_index});
- scale_backprop = b->Mul(scratch1, scratch2);
+ xla::Mul(grad_backprop, xla::Mul(scratch1, scale), {feature_index});
+ scale_backprop = xla::Mul(scratch1, scratch2);
}
ctx->SetOutput(0,
XlaHelpers::ConvertElementType(b, x_backprop, input_dtype));
ctx->SetOutput(1, scale_backprop);
ctx->SetOutput(2, offset_backprop);
- ctx->SetConstantOutput(3, Tensor(scale_dtype, {}));
- ctx->SetConstantOutput(4, Tensor(scale_dtype, {}));
+ ctx->SetConstantOutput(3, Tensor());
+ ctx->SetConstantOutput(4, Tensor());
}
private:
diff --git a/tensorflow/compiler/tf2xla/kernels/batchtospace_op.cc b/tensorflow/compiler/tf2xla/kernels/batchtospace_op.cc
index 642278ab99..26130fd9e7 100644
--- a/tensorflow/compiler/tf2xla/kernels/batchtospace_op.cc
+++ b/tensorflow/compiler/tf2xla/kernels/batchtospace_op.cc
@@ -16,6 +16,7 @@ limitations under the License.
#include "tensorflow/compiler/tf2xla/xla_helpers.h"
#include "tensorflow/compiler/tf2xla/xla_op_kernel.h"
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
+#include "tensorflow/compiler/xla/client/xla_client/xla_builder.h"
namespace tensorflow {
namespace {
@@ -45,7 +46,6 @@ void BatchToSpace(XlaOpKernelContext* ctx, const xla::XlaOp& input,
", 2] instead of ",
xla::ShapeUtil::HumanString(crops.shape())));
- xla::XlaBuilder* b = ctx->builder();
const int64 batch_size = input_shape[0];
// Compute the product of the block_shape values.
@@ -72,7 +72,7 @@ void BatchToSpace(XlaOpKernelContext* ctx, const xla::XlaOp& input,
reshaped_shape[block_rank] = batch_size / block_num_elems;
std::copy(input_shape.begin() + 1, input_shape.end(),
reshaped_shape.begin() + block_rank + 1);
- xla::XlaOp reshaped = b->Reshape(input, reshaped_shape);
+ xla::XlaOp reshaped = xla::Reshape(input, reshaped_shape);
// 2. Permute dimensions of `reshaped` to produce `permuted` of shape
// [batch / prod(block_shape),
@@ -90,7 +90,7 @@ void BatchToSpace(XlaOpKernelContext* ctx, const xla::XlaOp& input,
}
std::iota(permutation.begin() + 1 + block_rank * 2, permutation.end(),
1 + block_rank * 2);
- xla::XlaOp permuted = b->Transpose(reshaped, permutation);
+ xla::XlaOp permuted = xla::Transpose(reshaped, permutation);
// 3. Reshape `permuted` to produce `reshaped_permuted` of shape
// [batch / prod(block_shape),
@@ -110,7 +110,8 @@ void BatchToSpace(XlaOpKernelContext* ctx, const xla::XlaOp& input,
std::copy(remainder_shape.begin(), remainder_shape.end(),
reshaped_permuted_shape.begin() + 1 + block_rank);
- xla::XlaOp reshaped_permuted = b->Reshape(permuted, reshaped_permuted_shape);
+ xla::XlaOp reshaped_permuted =
+ xla::Reshape(permuted, reshaped_permuted_shape);
// 4. Crop the start and end of dimensions `[1, ..., M]` of
// `reshaped_permuted` according to `crops` to produce the output of shape:
@@ -138,7 +139,7 @@ void BatchToSpace(XlaOpKernelContext* ctx, const xla::XlaOp& input,
" end: ", crop_end, " size ", reshaped_permuted_shape[1 + i]));
}
xla::XlaOp output =
- b->Slice(reshaped_permuted, start_indices, end_indices, strides);
+ xla::Slice(reshaped_permuted, start_indices, end_indices, strides);
ctx->SetOutput(0, output);
}
diff --git a/tensorflow/compiler/tf2xla/kernels/bcast_ops.cc b/tensorflow/compiler/tf2xla/kernels/bcast_ops.cc
index ee2c920453..ba3b1c9dab 100644
--- a/tensorflow/compiler/tf2xla/kernels/bcast_ops.cc
+++ b/tensorflow/compiler/tf2xla/kernels/bcast_ops.cc
@@ -19,7 +19,7 @@ limitations under the License.
#include "tensorflow/compiler/tf2xla/xla_helpers.h"
#include "tensorflow/compiler/tf2xla/xla_op_kernel.h"
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
-#include "tensorflow/compiler/xla/literal_util.h"
+#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/util/bcast.h"
diff --git a/tensorflow/compiler/tf2xla/kernels/bias_ops.cc b/tensorflow/compiler/tf2xla/kernels/bias_ops.cc
index 9d677f4266..e9b2c0b16d 100644
--- a/tensorflow/compiler/tf2xla/kernels/bias_ops.cc
+++ b/tensorflow/compiler/tf2xla/kernels/bias_ops.cc
@@ -18,6 +18,7 @@ limitations under the License.
#include "tensorflow/compiler/tf2xla/xla_helpers.h"
#include "tensorflow/compiler/tf2xla/xla_op_kernel.h"
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
+#include "tensorflow/compiler/xla/client/xla_client/xla_builder.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/util/tensor_format.h"
@@ -60,8 +61,7 @@ class BiasOp : public XlaOpKernel {
"of the input tensor: ",
bias_shape.DebugString(), " vs. ", input_shape.DebugString()));
- xla::XlaOp result =
- ctx->builder()->Add(ctx->Input(0), ctx->Input(1), {feature_dim});
+ xla::XlaOp result = xla::Add(ctx->Input(0), ctx->Input(1), {feature_dim});
ctx->SetOutput(0, result);
}
@@ -109,8 +109,8 @@ class BiasAddGradOp : public XlaOpKernel {
auto converted =
XlaHelpers::ConvertElementType(b, ctx->Input(0), accumulation_type);
auto reduce =
- b->Reduce(converted, XlaHelpers::Zero(b, accumulation_type),
- *ctx->GetOrCreateAdd(accumulation_type), reduce_dims);
+ xla::Reduce(converted, XlaHelpers::Zero(b, accumulation_type),
+ *ctx->GetOrCreateAdd(accumulation_type), reduce_dims);
ctx->SetOutput(0, XlaHelpers::ConvertElementType(b, reduce, input_type(0)));
}
diff --git a/tensorflow/compiler/tf2xla/kernels/binary_ops.cc b/tensorflow/compiler/tf2xla/kernels/binary_ops.cc
index fee939bdea..d6d4ae8937 100644
--- a/tensorflow/compiler/tf2xla/kernels/binary_ops.cc
+++ b/tensorflow/compiler/tf2xla/kernels/binary_ops.cc
@@ -41,18 +41,19 @@ namespace {
const BCast& broadcast_helper, \
const std::vector<int64>& extend_dimensions) override { \
xla::XlaBuilder* b = ctx->builder(); \
+ (void)b; \
return HLO; \
} \
}; \
REGISTER_XLA_OP(Name(#NAME), NAME##Op)
-XLA_MAKE_BINARY(Add, b->Add(lhs, rhs, extend_dimensions));
-XLA_MAKE_BINARY(Sub, b->Sub(lhs, rhs, extend_dimensions));
-XLA_MAKE_BINARY(Mul, b->Mul(lhs, rhs, extend_dimensions));
-XLA_MAKE_BINARY(Div, b->Div(lhs, rhs, extend_dimensions));
+XLA_MAKE_BINARY(Add, xla::Add(lhs, rhs, extend_dimensions));
+XLA_MAKE_BINARY(Sub, xla::Sub(lhs, rhs, extend_dimensions));
+XLA_MAKE_BINARY(Mul, xla::Mul(lhs, rhs, extend_dimensions));
+XLA_MAKE_BINARY(Div, xla::Div(lhs, rhs, extend_dimensions));
-XLA_MAKE_BINARY(Atan2, b->Atan2(lhs, rhs, extend_dimensions));
-XLA_MAKE_BINARY(Complex, b->Complex(lhs, rhs, extend_dimensions));
+XLA_MAKE_BINARY(Atan2, xla::Atan2(lhs, rhs, extend_dimensions));
+XLA_MAKE_BINARY(Complex, xla::Complex(lhs, rhs, extend_dimensions));
// Implementation of FloorDiv. Pseudo-code:
// if ((x < 0) != (y < 0)) {
@@ -67,13 +68,13 @@ static xla::XlaOp FloorDivImpl(xla::XlaBuilder* b, DataType dtype, xla::XlaOp x,
std::tie(x, y) = XlaBinaryOp::Broadcast(b, x, y, broadcast_helper);
auto zero = XlaHelpers::Zero(b, dtype);
auto one = XlaHelpers::One(b, dtype);
- auto different_sign = b->Ne(b->Lt(x, zero), b->Lt(y, zero));
- auto abs_x = b->Abs(x);
- auto abs_y = b->Abs(y);
- auto t = b->Neg(b->Sub(b->Add(abs_x, abs_y), one));
- auto result = b->Select(different_sign, b->Div(t, abs_y), b->Div(x, y));
+ auto different_sign = xla::Ne(xla::Lt(x, zero), xla::Lt(y, zero));
+ auto abs_x = xla::Abs(x);
+ auto abs_y = xla::Abs(y);
+ auto t = xla::Neg(xla::Sub(xla::Add(abs_x, abs_y), one));
+ auto result = xla::Select(different_sign, xla::Div(t, abs_y), xla::Div(x, y));
if (DataTypeIsFloating(dtype)) {
- result = b->Floor(result);
+ result = xla::Floor(result);
}
return result;
}
@@ -87,76 +88,78 @@ static xla::XlaOp FloorModImpl(xla::XlaBuilder* b, DataType dtype, xla::XlaOp x,
xla::XlaOp y, const BCast& broadcast_helper) {
std::tie(x, y) = XlaBinaryOp::Broadcast(b, x, y, broadcast_helper);
auto zero = XlaHelpers::Zero(b, dtype);
- auto same_sign = b->Eq(b->Lt(x, zero), b->Lt(y, zero));
- auto trunc_mod = b->Rem(x, y);
- return b->Select(same_sign, trunc_mod, b->Rem(b->Add(trunc_mod, y), y));
+ auto same_sign = xla::Eq(xla::Lt(x, zero), xla::Lt(y, zero));
+ auto trunc_mod = xla::Rem(x, y);
+ return xla::Select(same_sign, trunc_mod, xla::Rem(xla::Add(trunc_mod, y), y));
}
XLA_MAKE_BINARY(FloorMod,
FloorModImpl(b, input_type(0), lhs, rhs, broadcast_helper));
-XLA_MAKE_BINARY(BitwiseAnd, b->And(lhs, rhs, extend_dimensions));
-XLA_MAKE_BINARY(BitwiseOr, b->Or(lhs, rhs, extend_dimensions));
-XLA_MAKE_BINARY(BitwiseXor, b->Xor(lhs, rhs, extend_dimensions));
+XLA_MAKE_BINARY(BitwiseAnd, xla::And(lhs, rhs, extend_dimensions));
+XLA_MAKE_BINARY(BitwiseOr, xla::Or(lhs, rhs, extend_dimensions));
+XLA_MAKE_BINARY(BitwiseXor, xla::Xor(lhs, rhs, extend_dimensions));
-XLA_MAKE_BINARY(LeftShift, b->ShiftLeft(lhs, rhs, extend_dimensions));
+XLA_MAKE_BINARY(LeftShift, xla::ShiftLeft(lhs, rhs, extend_dimensions));
XLA_MAKE_BINARY(RightShift,
(DataTypeIsUnsigned(ctx->input_type(0))
- ? b->ShiftRightLogical(lhs, rhs, extend_dimensions)
- : b->ShiftRightArithmetic(lhs, rhs, extend_dimensions)));
-
-XLA_MAKE_BINARY(LogicalAnd, b->And(lhs, rhs, extend_dimensions));
-XLA_MAKE_BINARY(LogicalOr, b->Or(lhs, rhs, extend_dimensions));
-XLA_MAKE_BINARY(Mod, b->Rem(lhs, rhs, extend_dimensions));
-XLA_MAKE_BINARY(Maximum, b->Max(lhs, rhs, extend_dimensions));
-XLA_MAKE_BINARY(Minimum, b->Min(lhs, rhs, extend_dimensions));
-XLA_MAKE_BINARY(RealDiv, b->Div(lhs, rhs, extend_dimensions));
-XLA_MAKE_BINARY(ReciprocalGrad, b->Neg(b->Mul(rhs, b->Mul(lhs, lhs))));
+ ? xla::ShiftRightLogical(lhs, rhs, extend_dimensions)
+ : xla::ShiftRightArithmetic(lhs, rhs, extend_dimensions)));
+
+XLA_MAKE_BINARY(LogicalAnd, xla::And(lhs, rhs, extend_dimensions));
+XLA_MAKE_BINARY(LogicalOr, xla::Or(lhs, rhs, extend_dimensions));
+XLA_MAKE_BINARY(Mod, xla::Rem(lhs, rhs, extend_dimensions));
+XLA_MAKE_BINARY(Maximum, xla::Max(lhs, rhs, extend_dimensions));
+XLA_MAKE_BINARY(Minimum, xla::Min(lhs, rhs, extend_dimensions));
+XLA_MAKE_BINARY(RealDiv, xla::Div(lhs, rhs, extend_dimensions));
+XLA_MAKE_BINARY(ReciprocalGrad, xla::Neg(xla::Mul(rhs, xla::Mul(lhs, lhs))));
XLA_MAKE_BINARY(
RsqrtGrad,
- b->Mul(b->Pow(lhs, XlaHelpers::IntegerLiteral(b, input_type(0), 3)),
- b->Div(rhs, XlaHelpers::IntegerLiteral(b, input_type(0), -2)),
- extend_dimensions));
-XLA_MAKE_BINARY(SqrtGrad,
- b->Div(b->Mul(rhs,
- XlaHelpers::FloatLiteral(b, input_type(0), 0.5)),
- lhs, extend_dimensions));
+ xla::Mul(xla::Pow(lhs, XlaHelpers::IntegerLiteral(b, input_type(0), 3)),
+ xla::Div(rhs, XlaHelpers::IntegerLiteral(b, input_type(0), -2)),
+ extend_dimensions));
+XLA_MAKE_BINARY(
+ SqrtGrad,
+ xla::Div(xla::Mul(rhs, XlaHelpers::FloatLiteral(b, input_type(0), 0.5)),
+ lhs, extend_dimensions));
static xla::XlaOp Square(xla::XlaBuilder* builder, const xla::XlaOp& x) {
- return builder->Mul(x, x);
+ return xla::Mul(x, x);
}
XLA_MAKE_BINARY(SquaredDifference,
- Square(b, b->Sub(lhs, rhs, extend_dimensions)));
+ Square(b, xla::Sub(lhs, rhs, extend_dimensions)));
-XLA_MAKE_BINARY(TruncateDiv, b->Div(lhs, rhs, extend_dimensions));
-XLA_MAKE_BINARY(TruncateMod, b->Rem(lhs, rhs, extend_dimensions));
+XLA_MAKE_BINARY(TruncateDiv, xla::Div(lhs, rhs, extend_dimensions));
+XLA_MAKE_BINARY(TruncateMod, xla::Rem(lhs, rhs, extend_dimensions));
// Comparison ops
-XLA_MAKE_BINARY(Equal, b->Eq(lhs, rhs, extend_dimensions));
-XLA_MAKE_BINARY(NotEqual, b->Ne(lhs, rhs, extend_dimensions));
-XLA_MAKE_BINARY(Greater, b->Gt(lhs, rhs, extend_dimensions));
-XLA_MAKE_BINARY(GreaterEqual, b->Ge(lhs, rhs, extend_dimensions));
-XLA_MAKE_BINARY(Less, b->Lt(lhs, rhs, extend_dimensions));
-XLA_MAKE_BINARY(LessEqual, b->Le(lhs, rhs, extend_dimensions));
+XLA_MAKE_BINARY(Equal, xla::Eq(lhs, rhs, extend_dimensions));
+XLA_MAKE_BINARY(NotEqual, xla::Ne(lhs, rhs, extend_dimensions));
+XLA_MAKE_BINARY(Greater, xla::Gt(lhs, rhs, extend_dimensions));
+XLA_MAKE_BINARY(GreaterEqual, xla::Ge(lhs, rhs, extend_dimensions));
+XLA_MAKE_BINARY(Less, xla::Lt(lhs, rhs, extend_dimensions));
+XLA_MAKE_BINARY(LessEqual, xla::Le(lhs, rhs, extend_dimensions));
// Non-linear ops
XLA_MAKE_BINARY(SigmoidGrad,
- b->Mul(b->Mul(rhs, lhs),
- b->Sub(XlaHelpers::One(b, input_type(0)), lhs)));
+ xla::Mul(xla::Mul(rhs, lhs),
+ xla::Sub(XlaHelpers::One(b, input_type(0)), lhs)));
XLA_MAKE_BINARY(SoftplusGrad,
- b->Div(lhs, b->Add(b->Exp(b->Neg(rhs)),
- XlaHelpers::One(b, input_type(1)))));
+ xla::Div(lhs, xla::Add(xla::Exp(xla::Neg(rhs)),
+ XlaHelpers::One(b, input_type(1)))));
// softsigngrad(gradients, features) = gradients / (1 + abs(features)) ** 2
XLA_MAKE_BINARY(SoftsignGrad,
- b->Div(lhs, Square(b, b->Add(XlaHelpers::One(b, input_type(0)),
- b->Abs(rhs)))));
+ xla::Div(lhs,
+ Square(b, xla::Add(XlaHelpers::One(b, input_type(0)),
+ xla::Abs(rhs)))));
-XLA_MAKE_BINARY(TanhGrad, b->Mul(rhs, b->Sub(XlaHelpers::One(b, input_type(0)),
- b->Mul(lhs, lhs))));
+XLA_MAKE_BINARY(TanhGrad,
+ xla::Mul(rhs, xla::Sub(XlaHelpers::One(b, input_type(0)),
+ xla::Mul(lhs, lhs))));
-XLA_MAKE_BINARY(Pow, b->Pow(lhs, rhs, extend_dimensions));
+XLA_MAKE_BINARY(Pow, xla::Pow(lhs, rhs, extend_dimensions));
#undef XLA_MAKE_BINARY
@@ -169,12 +172,13 @@ class ApproximateEqualOp : public XlaOpKernel {
// Computes the max of the scalar input x and 0.
void Compile(XlaOpKernelContext* ctx) override {
xla::XlaBuilder* b = ctx->builder();
- auto abs = b->Abs(b->Sub(ctx->Input(0), ctx->Input(1)));
+ auto abs = xla::Abs(xla::Sub(ctx->Input(0), ctx->Input(1)));
auto abs_shape = b->GetShape(abs);
OP_REQUIRES_OK(ctx, abs_shape.status());
auto abs_type = abs_shape.ValueOrDie().element_type();
- auto result = b->Lt(
- abs, b->ConvertElementType(b->ConstantR0<float>(tolerance_), abs_type));
+ auto result =
+ xla::Lt(abs, xla::ConvertElementType(
+ xla::ConstantR0<float>(b, tolerance_), abs_type));
ctx->SetOutput(0, result);
}
diff --git a/tensorflow/compiler/tf2xla/kernels/bucketize_op.cc b/tensorflow/compiler/tf2xla/kernels/bucketize_op.cc
index ca9a6b4068..efbdb76eaa 100644
--- a/tensorflow/compiler/tf2xla/kernels/bucketize_op.cc
+++ b/tensorflow/compiler/tf2xla/kernels/bucketize_op.cc
@@ -18,6 +18,7 @@ limitations under the License.
#include "tensorflow/compiler/tf2xla/xla_op_kernel.h"
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
#include "tensorflow/compiler/xla/client/lib/arithmetic.h"
+#include "tensorflow/compiler/xla/client/xla_client/xla_builder.h"
#include "tensorflow/core/framework/op_kernel.h"
namespace tensorflow {
@@ -36,22 +37,22 @@ class BucketizeOp : public XlaOpKernel {
const DataType dtype = context->input_type(0);
xla::XlaOp input = context->Input(0);
- xla::XlaOp boundaries = builder->ConstantR1<float>(boundaries_);
+ xla::XlaOp boundaries = xla::ConstantR1<float>(builder, boundaries_);
// TODO(phawkins): the following behavior matches the behavior of the core
// Bucketize kernel. However, comparing an int32 or int64 against float may
// lead to inaccurate bucketing due to rounding.
if (dtype == DT_DOUBLE) {
- input = builder->ConvertElementType(input, xla::F64);
- boundaries = builder->ConvertElementType(boundaries, xla::F64);
+ input = xla::ConvertElementType(input, xla::F64);
+ boundaries = xla::ConvertElementType(boundaries, xla::F64);
} else {
- input = builder->ConvertElementType(input, xla::F32);
+ input = xla::ConvertElementType(input, xla::F32);
}
- xla::XlaOp comparison = builder->ConvertElementType(
- builder->Ge(builder->Broadcast(input, {1}), boundaries,
- /*broadcast_dimensions=*/{0}),
- xla::S32);
- xla::XlaOp buckets = builder->Reduce(
- comparison, /*init_value=*/builder->ConstantR0<int32>(0),
+ xla::XlaOp comparison =
+ xla::ConvertElementType(xla::Ge(xla::Broadcast(input, {1}), boundaries,
+ /*broadcast_dimensions=*/{0}),
+ xla::S32);
+ xla::XlaOp buckets = xla::Reduce(
+ comparison, /*init_value=*/xla::ConstantR0<int32>(builder, 0),
/*computation=*/xla::CreateScalarAddComputation(xla::S32, builder),
/*dimensions_to_reduce=*/{0});
context->SetOutput(0, buckets);
diff --git a/tensorflow/compiler/tf2xla/kernels/cast_op.cc b/tensorflow/compiler/tf2xla/kernels/cast_op.cc
index e9d98c7685..62eebf762b 100644
--- a/tensorflow/compiler/tf2xla/kernels/cast_op.cc
+++ b/tensorflow/compiler/tf2xla/kernels/cast_op.cc
@@ -17,6 +17,7 @@ limitations under the License.
#include "tensorflow/compiler/tf2xla/xla_helpers.h"
#include "tensorflow/compiler/tf2xla/xla_op_kernel.h"
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
+#include "tensorflow/compiler/xla/client/xla_client/xla_builder.h"
#include "tensorflow/compiler/xla/primitive_util.h"
#include "tensorflow/core/framework/kernel_def_builder.h"
@@ -40,14 +41,14 @@ class CastOp : public XlaOpKernel {
if (src_dtype_ == dst_dtype_) {
output = input;
} else if (dst_dtype_ == DT_BOOL) {
- output = builder->Ne(input, XlaHelpers::Zero(builder, src_dtype_));
+ output = xla::Ne(input, XlaHelpers::Zero(builder, src_dtype_));
} else if (xla::primitive_util::IsComplexType(src_type_) &&
!xla::primitive_util::IsComplexType(dst_type_)) {
// As in cast_op.h, we replicate the numpy behavior of truncating the
// imaginary part.
- output = builder->ConvertElementType(builder->Real(input), dst_type_);
+ output = xla::ConvertElementType(xla::Real(input), dst_type_);
} else {
- output = builder->ConvertElementType(input, dst_type_);
+ output = xla::ConvertElementType(input, dst_type_);
}
ctx->SetOutput(0, output);
@@ -72,7 +73,6 @@ class BitcastOp : public XlaOpKernel {
}
void Compile(XlaOpKernelContext* ctx) override {
- xla::XlaBuilder* builder = ctx->builder();
xla::XlaOp input = ctx->Input(0);
xla::XlaOp output;
@@ -92,7 +92,7 @@ class BitcastOp : public XlaOpKernel {
xla::primitive_util::BitWidth(dst_type_),
errors::Unimplemented(
"Only bitcasts between equally sized types supported."));
- output = builder->BitcastConvertType(input, dst_type_);
+ output = xla::BitcastConvertType(input, dst_type_);
}
ctx->SetOutput(0, output);
diff --git a/tensorflow/compiler/tf2xla/kernels/categorical_op.cc b/tensorflow/compiler/tf2xla/kernels/categorical_op.cc
index 835a7f5689..1784e712b5 100644
--- a/tensorflow/compiler/tf2xla/kernels/categorical_op.cc
+++ b/tensorflow/compiler/tf2xla/kernels/categorical_op.cc
@@ -21,6 +21,7 @@ limitations under the License.
#include "tensorflow/compiler/tf2xla/xla_op_kernel.h"
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
#include "tensorflow/compiler/xla/client/lib/arithmetic.h"
+#include "tensorflow/compiler/xla/client/xla_client/xla_builder.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
@@ -65,24 +66,22 @@ class CategoricalOp : public XlaOpKernel {
DataTypeToPrimitiveType(input_type(0), &uniform_xla_type));
xla::Shape uniform_shape =
xla::ShapeUtil::MakeShape(uniform_xla_type, uniform_shape_array);
- auto uniforms = builder->RngUniform(
- XlaHelpers::Zero(builder, input_type(0)),
- XlaHelpers::One(builder, input_type(0)), uniform_shape);
+ auto uniforms =
+ xla::RngUniform(XlaHelpers::Zero(builder, input_type(0)),
+ XlaHelpers::One(builder, input_type(0)), uniform_shape);
// Use Gumbel softmax trick to generate categorical samples.
// See:
// https://hips.seas.harvard.edu/blog/2013/04/06/the-gumbel-max-trick-for-discrete-distributions/
// TODO(b/68769470): Switch to using a cumulative sum approach.
- auto softmax_entries =
- builder->Sub(logits, builder->Log(builder->Neg(builder->Log(uniforms))),
- /*broadcast_dimensions=*/{0, 2});
-
- TensorShape softmax_shape(uniform_shape_array);
- xla::XlaOp argmax;
- OP_REQUIRES_OK(
- ctx,
- XlaHelpers::ArgMax(builder, ctx, softmax_entries, softmax_shape,
- input_type(0), output_type(0), /*axis=*/2, &argmax));
+ auto softmax_entries = xla::Sub(logits, xla::Log(-xla::Log(uniforms)),
+ /*broadcast_dimensions=*/{0, 2});
+
+ xla::PrimitiveType xla_output_type;
+ OP_REQUIRES_OK(ctx,
+ DataTypeToPrimitiveType(output_type(0), &xla_output_type));
+ xla::XlaOp argmax =
+ XlaHelpers::ArgMax(softmax_entries, xla_output_type, /*axis=*/2);
ctx->SetOutput(0, argmax);
}
diff --git a/tensorflow/compiler/tf2xla/kernels/cholesky_op.cc b/tensorflow/compiler/tf2xla/kernels/cholesky_op.cc
index fe6651793d..9fcbc86adc 100644
--- a/tensorflow/compiler/tf2xla/kernels/cholesky_op.cc
+++ b/tensorflow/compiler/tf2xla/kernels/cholesky_op.cc
@@ -24,12 +24,7 @@ class CholeskyOp : public XlaOpKernel {
public:
explicit CholeskyOp(OpKernelConstruction* ctx) : XlaOpKernel(ctx) {}
void Compile(XlaOpKernelContext* ctx) override {
- auto result = Cholesky(ctx->builder(), ctx->Input(0));
- if (!result.ok()) {
- ctx->SetStatus(result.status());
- return;
- }
- ctx->SetOutput(0, result.ValueOrDie());
+ ctx->SetOutput(0, Cholesky(ctx->Input(0)));
}
};
diff --git a/tensorflow/compiler/tf2xla/kernels/clip_by_value_op.cc b/tensorflow/compiler/tf2xla/kernels/clip_by_value_op.cc
index a00bc912f9..4e6d33304c 100644
--- a/tensorflow/compiler/tf2xla/kernels/clip_by_value_op.cc
+++ b/tensorflow/compiler/tf2xla/kernels/clip_by_value_op.cc
@@ -15,6 +15,7 @@ limitations under the License.
#include "tensorflow/compiler/tf2xla/xla_op_kernel.h"
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
+#include "tensorflow/compiler/xla/client/xla_client/xla_builder.h"
#include "tensorflow/core/framework/tensor_shape.h"
namespace tensorflow {
@@ -29,7 +30,6 @@ class ClipByValueOp : public XlaOpKernel {
const TensorShape min_shape = ctx->InputShape(1);
const TensorShape max_shape = ctx->InputShape(2);
- xla::XlaBuilder* builder = ctx->builder();
auto input = ctx->Input(0);
auto min = ctx->Input(1);
auto max = ctx->Input(2);
@@ -45,13 +45,13 @@ class ClipByValueOp : public XlaOpKernel {
if (shape != min_shape) {
OP_REQUIRES(ctx, TensorShapeUtils::IsScalar(min_shape), shape_error());
- min = builder->Broadcast(min, shape.dim_sizes());
+ min = xla::Broadcast(min, shape.dim_sizes());
}
if (shape != max_shape) {
OP_REQUIRES(ctx, TensorShapeUtils::IsScalar(max_shape), shape_error());
- max = builder->Broadcast(max, shape.dim_sizes());
+ max = xla::Broadcast(max, shape.dim_sizes());
}
- ctx->SetOutput(0, builder->Clamp(min, input, max));
+ ctx->SetOutput(0, xla::Clamp(min, input, max));
}
};
diff --git a/tensorflow/compiler/tf2xla/kernels/concat_op.cc b/tensorflow/compiler/tf2xla/kernels/concat_op.cc
index 78285affa1..e3a32a5c0e 100644
--- a/tensorflow/compiler/tf2xla/kernels/concat_op.cc
+++ b/tensorflow/compiler/tf2xla/kernels/concat_op.cc
@@ -22,6 +22,7 @@ limitations under the License.
#include "tensorflow/compiler/tf2xla/xla_helpers.h"
#include "tensorflow/compiler/tf2xla/xla_op_kernel.h"
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
+#include "tensorflow/compiler/xla/client/xla_client/xla_builder.h"
#include "tensorflow/compiler/xla/literal_util.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/register_types.h"
@@ -88,7 +89,7 @@ class ConcatBaseOp : public XlaOpKernel {
"] = ", in_shape.DebugString()));
if (in_shape.dims() == 0) {
// Inputs that come in as scalars must be reshaped to 1-vectors.
- input_data.push_back(ctx->builder()->Reshape(handle, {1}));
+ input_data.push_back(xla::Reshape(handle, {1}));
} else {
input_data.push_back(handle);
}
@@ -96,7 +97,7 @@ class ConcatBaseOp : public XlaOpKernel {
}
VLOG(1) << "Concat dim " << concat_dim << " equivalent to " << axis;
- ctx->SetOutput(0, ctx->builder()->ConcatInDim(input_data, axis));
+ ctx->SetOutput(0, xla::ConcatInDim(ctx->builder(), input_data, axis));
}
private:
diff --git a/tensorflow/compiler/tf2xla/kernels/const_op.cc b/tensorflow/compiler/tf2xla/kernels/const_op.cc
index 59d06c654d..f4360d8c3f 100644
--- a/tensorflow/compiler/tf2xla/kernels/const_op.cc
+++ b/tensorflow/compiler/tf2xla/kernels/const_op.cc
@@ -17,6 +17,7 @@ limitations under the License.
#include "tensorflow/compiler/tf2xla/xla_compiler.h"
#include "tensorflow/compiler/tf2xla/xla_op_kernel.h"
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
+#include "tensorflow/compiler/xla/client/xla_client/xla_builder.h"
#include "tensorflow/core/framework/kernel_def_builder.h"
#include "tensorflow/core/framework/tensor.pb.h"
@@ -53,41 +54,41 @@ class ConstOp : public XlaOpKernel {
switch (proto_.dtype()) {
case DT_BOOL:
if (proto_.bool_val_size() == 1) {
- ctx->SetOutput(0,
- b->Broadcast(b->ConstantR0<bool>(proto_.bool_val(0)),
- shape.dim_sizes()));
+ ctx->SetOutput(
+ 0, xla::Broadcast(xla::ConstantR0<bool>(b, proto_.bool_val(0)),
+ shape.dim_sizes()));
return;
}
break;
case DT_FLOAT:
if (proto_.float_val_size() == 1) {
- ctx->SetOutput(
- 0, b->Broadcast(b->ConstantR0<float>(proto_.float_val(0)),
- shape.dim_sizes()));
+ ctx->SetOutput(0, xla::Broadcast(xla::ConstantR0<float>(
+ b, proto_.float_val(0)),
+ shape.dim_sizes()));
return;
}
break;
case DT_DOUBLE:
if (proto_.double_val_size() == 1) {
- ctx->SetOutput(
- 0, b->Broadcast(b->ConstantR0<double>(proto_.double_val(0)),
- shape.dim_sizes()));
+ ctx->SetOutput(0, xla::Broadcast(xla::ConstantR0<double>(
+ b, proto_.double_val(0)),
+ shape.dim_sizes()));
return;
}
break;
case DT_INT32:
if (proto_.int_val_size() == 1) {
- ctx->SetOutput(0,
- b->Broadcast(b->ConstantR0<int32>(proto_.int_val(0)),
- shape.dim_sizes()));
+ ctx->SetOutput(
+ 0, xla::Broadcast(xla::ConstantR0<int32>(b, proto_.int_val(0)),
+ shape.dim_sizes()));
return;
}
break;
case DT_INT64:
if (proto_.int64_val_size() == 1) {
- ctx->SetOutput(
- 0, b->Broadcast(b->ConstantR0<int64>(proto_.int64_val(0)),
- shape.dim_sizes()));
+ ctx->SetOutput(0, xla::Broadcast(xla::ConstantR0<int64>(
+ b, proto_.int64_val(0)),
+ shape.dim_sizes()));
return;
}
break;
diff --git a/tensorflow/compiler/tf2xla/kernels/conv_ops.cc b/tensorflow/compiler/tf2xla/kernels/conv_ops.cc
index 627bad12f3..48ac4867ed 100644
--- a/tensorflow/compiler/tf2xla/kernels/conv_ops.cc
+++ b/tensorflow/compiler/tf2xla/kernels/conv_ops.cc
@@ -18,6 +18,8 @@ limitations under the License.
#include "tensorflow/compiler/tf2xla/xla_helpers.h"
#include "tensorflow/compiler/tf2xla/xla_op_kernel.h"
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
+#include "tensorflow/compiler/xla/client/lib/numeric.h"
+#include "tensorflow/compiler/xla/client/xla_client/xla_builder.h"
#include "tensorflow/compiler/xla/literal_util.h"
#include "tensorflow/core/framework/numeric_op.h"
#include "tensorflow/core/framework/op_kernel.h"
@@ -51,8 +53,8 @@ xla::XlaOp CreateExpandedZero(const TensorShape& filter_shape, DataType dtype,
xla::XlaBuilder* builder) {
TensorShape expanded_filter_shape =
ExpandedFilterShapeForDepthwiseConvolution(filter_shape);
- return builder->Broadcast(XlaHelpers::Zero(builder, dtype),
- expanded_filter_shape.dim_sizes());
+ return xla::Broadcast(XlaHelpers::Zero(builder, dtype),
+ expanded_filter_shape.dim_sizes());
}
// Create a mask for depthwise convolution that will make a normal convolution
@@ -95,32 +97,27 @@ xla::XlaOp CreateExpandedFilterMask(const TensorShape& filter_shape,
// Create a M sized linspace and an M*N sized linspace that will be
// broadcasted into perpendicular dimensions and compared.
- xla::XlaOp input_feature_iota;
- // DT_INT32 Iota will always return status::OK().
- TF_CHECK_OK(XlaHelpers::Iota(builder, DataType::DT_INT32, input_feature,
- &input_feature_iota));
- xla::XlaOp expanded_feature_iota;
- TF_CHECK_OK(XlaHelpers::Iota(builder, DataType::DT_INT32,
- input_feature * depthwise_multiplier,
- &expanded_feature_iota));
+ xla::XlaOp input_feature_iota = xla::Iota(builder, xla::S32, input_feature);
+ xla::XlaOp expanded_feature_iota =
+ xla::Iota(builder, xla::S32, input_feature * depthwise_multiplier);
// Divide the M*N sized linspace by the depthwise_multiplier to create
// [0 0 1 1 2 2] in the example in the function comment.
expanded_feature_iota =
- builder->Div(expanded_feature_iota,
- XlaHelpers::IntegerLiteral(builder, DataType::DT_INT32,
- depthwise_multiplier));
+ xla::Div(expanded_feature_iota,
+ XlaHelpers::IntegerLiteral(builder, DataType::DT_INT32,
+ depthwise_multiplier));
// Broadcast the N*M linspace to [H, W, ..., M, M*N].
auto expanded_feature_broadcast_dims = expanded_filter_shape.dim_sizes();
expanded_feature_broadcast_dims.pop_back();
- auto broadcasted_expanded_feature_iota = builder->Broadcast(
- expanded_feature_iota, expanded_feature_broadcast_dims);
+ auto broadcasted_expanded_feature_iota =
+ xla::Broadcast(expanded_feature_iota, expanded_feature_broadcast_dims);
// Compare the broadcasted linspace to the input feature linspace in the
// input feature dimension to create a diagonal predicate.
- return builder->Eq(broadcasted_expanded_feature_iota, input_feature_iota,
- {expanded_filter_shape.dims() - 2});
+ return xla::Eq(broadcasted_expanded_feature_iota, input_feature_iota,
+ {expanded_filter_shape.dims() - 2});
}
// Expands a filter of shape [H, W, ..., M, N] to [H, W, ..., M, M*N] by adding
@@ -142,16 +139,16 @@ xla::XlaOp ExpandFilterForDepthwiseConvolution(const TensorShape& filter_shape,
implicit_broadcast_filter_shape.dims() - 1,
depthwise_multiplier * input_feature);
auto implicit_broadcast_filter =
- builder->Reshape(filter, implicit_broadcast_filter_shape.dim_sizes());
+ xla::Reshape(filter, implicit_broadcast_filter_shape.dim_sizes());
// Broadcast the filter to [H, W, ..., M, M*N].
auto expanded_zero = CreateExpandedZero(filter_shape, dtype, builder);
- auto expanded_filter = builder->Add(implicit_broadcast_filter, expanded_zero);
+ auto expanded_filter = xla::Add(implicit_broadcast_filter, expanded_zero);
// If the filter mask is set, choose the broadcasted filter, othwerwise,
// choose zero.
- return builder->Select(CreateExpandedFilterMask(filter_shape, builder),
- expanded_filter, expanded_zero);
+ return xla::Select(CreateExpandedFilterMask(filter_shape, builder),
+ expanded_filter, expanded_zero);
}
// Inverse of ExpandFilterForDepthwiseConvolution.
@@ -162,17 +159,17 @@ xla::XlaOp ContractFilterForDepthwiseBackprop(XlaOpKernelContext* ctx,
xla::XlaBuilder* builder) {
TensorShape expanded_filter_shape =
ExpandedFilterShapeForDepthwiseConvolution(filter_shape);
- auto masked_expanded_filter = builder->Select(
+ auto masked_expanded_filter = xla::Select(
CreateExpandedFilterMask(filter_shape, builder), filter_backprop,
CreateExpandedZero(filter_shape, dtype, builder));
- return builder->Reshape(
+ return xla::Reshape(
// This reduce does not need inputs to be converted with
// XlaHelpers::SumAccumulationType() since the ExpandedFilterMask with
// ExpandedZero guarantees that only one element is non zero, so there
// cannot be accumulated precision error.
- builder->Reduce(masked_expanded_filter, XlaHelpers::Zero(builder, dtype),
- *ctx->GetOrCreateAdd(dtype),
- {expanded_filter_shape.dims() - 2}),
+ xla::Reduce(masked_expanded_filter, XlaHelpers::Zero(builder, dtype),
+ *ctx->GetOrCreateAdd(dtype),
+ {expanded_filter_shape.dims() - 2}),
filter_shape.dim_sizes());
}
@@ -289,8 +286,8 @@ class ConvOp : public XlaOpKernel {
}
xla::XlaOp conv =
- b->ConvGeneralDilated(ctx->Input(0), filter, window_strides, padding,
- lhs_dilation, rhs_dilation, dims);
+ xla::ConvGeneralDilated(ctx->Input(0), filter, window_strides, padding,
+ lhs_dilation, rhs_dilation, dims);
ctx->SetOutput(0, conv);
}
@@ -435,11 +432,11 @@ class ConvBackpropInputOp : public XlaOpKernel {
}
// Mirror the filter in the spatial dimensions.
- xla::XlaOp mirrored_weights = b->Rev(filter, kernel_spatial_dims);
+ xla::XlaOp mirrored_weights = xla::Rev(filter, kernel_spatial_dims);
// activation gradients
// = gradients (with padding and dilation) <conv> mirrored_weights
- xla::XlaOp in_backprop = b->ConvGeneralDilated(
+ xla::XlaOp in_backprop = xla::ConvGeneralDilated(
out_backprop, mirrored_weights, /*window_strides=*/ones, padding,
lhs_dilation, rhs_dilation, dnums);
@@ -638,8 +635,8 @@ class ConvBackpropFilterOp : public XlaOpKernel {
// This is done by specifying the window dilation factors in the
// convolution HLO below.
auto filter_backprop =
- b->ConvGeneralDilated(activations, gradients, window_strides, padding,
- /*lhs_dilation=*/ones, rhs_dilation, dnums);
+ xla::ConvGeneralDilated(activations, gradients, window_strides, padding,
+ /*lhs_dilation=*/ones, rhs_dilation, dnums);
if (depthwise_) {
filter_backprop = ContractFilterForDepthwiseBackprop(
diff --git a/tensorflow/compiler/tf2xla/kernels/cross_op.cc b/tensorflow/compiler/tf2xla/kernels/cross_op.cc
index 7fcd4170fb..500a564f3f 100644
--- a/tensorflow/compiler/tf2xla/kernels/cross_op.cc
+++ b/tensorflow/compiler/tf2xla/kernels/cross_op.cc
@@ -16,6 +16,7 @@ limitations under the License.
#include "tensorflow/compiler/tf2xla/xla_helpers.h"
#include "tensorflow/compiler/tf2xla/xla_op_kernel.h"
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
+#include "tensorflow/compiler/xla/client/xla_client/xla_builder.h"
namespace tensorflow {
namespace {
@@ -58,21 +59,21 @@ class CrossOp : public XlaOpKernel {
auto in1 = ctx->Input(1);
starts.back() = 0;
limits.back() = 1;
- auto u1 = b->Slice(in0, starts, limits, strides);
- auto v1 = b->Slice(in1, starts, limits, strides);
+ auto u1 = xla::Slice(in0, starts, limits, strides);
+ auto v1 = xla::Slice(in1, starts, limits, strides);
starts.back() = 1;
limits.back() = 2;
- auto u2 = b->Slice(in0, starts, limits, strides);
- auto v2 = b->Slice(in1, starts, limits, strides);
+ auto u2 = xla::Slice(in0, starts, limits, strides);
+ auto v2 = xla::Slice(in1, starts, limits, strides);
starts.back() = 2;
limits.back() = 3;
- auto u3 = b->Slice(in0, starts, limits, strides);
- auto v3 = b->Slice(in1, starts, limits, strides);
+ auto u3 = xla::Slice(in0, starts, limits, strides);
+ auto v3 = xla::Slice(in1, starts, limits, strides);
- auto s1 = b->Sub(b->Mul(u2, v3), b->Mul(u3, v2));
- auto s2 = b->Sub(b->Mul(u3, v1), b->Mul(u1, v3));
- auto s3 = b->Sub(b->Mul(u1, v2), b->Mul(u2, v1));
- auto output = b->ConcatInDim({s1, s2, s3}, in0_shape.dims() - 1);
+ auto s1 = xla::Sub(xla::Mul(u2, v3), xla::Mul(u3, v2));
+ auto s2 = xla::Sub(xla::Mul(u3, v1), xla::Mul(u1, v3));
+ auto s3 = xla::Sub(xla::Mul(u1, v2), xla::Mul(u2, v1));
+ auto output = xla::ConcatInDim(b, {s1, s2, s3}, in0_shape.dims() - 1);
ctx->SetOutput(0, output);
}
diff --git a/tensorflow/compiler/tf2xla/kernels/cwise_ops.cc b/tensorflow/compiler/tf2xla/kernels/cwise_ops.cc
index 01aa1a83e7..9ff3e02228 100644
--- a/tensorflow/compiler/tf2xla/kernels/cwise_ops.cc
+++ b/tensorflow/compiler/tf2xla/kernels/cwise_ops.cc
@@ -96,18 +96,16 @@ void XlaBinaryOp::Compile(XlaOpKernelContext* ctx) {
// First reshape the inputs, which should be a metadata-only
// operation since we are flattening the dimensions in order.
- auto lhs_shaped = builder->Reshape(lhs, broadcast_helper.x_reshape());
- auto rhs_shaped = builder->Reshape(rhs, broadcast_helper.y_reshape());
+ auto lhs_shaped = xla::Reshape(lhs, broadcast_helper.x_reshape());
+ auto rhs_shaped = xla::Reshape(rhs, broadcast_helper.y_reshape());
// Next broadcast the necessary input dimensions. We rely on the
// XLA optimizer to be smart about the fact that we are asking
// it to broadcast size 1 on some of these dimensions, to avoid
// adding complexity to this code.
- auto lhs_broadcast =
- builder->Broadcast(lhs_shaped, broadcast_helper.x_bcast());
+ auto lhs_broadcast = xla::Broadcast(lhs_shaped, broadcast_helper.x_bcast());
int lhs_size = broadcast_helper.x_bcast().size();
- auto rhs_broadcast =
- builder->Broadcast(rhs_shaped, broadcast_helper.y_bcast());
+ auto rhs_broadcast = xla::Broadcast(rhs_shaped, broadcast_helper.y_bcast());
int rhs_size = broadcast_helper.y_bcast().size();
// Now reshape them to the correct output shape. After the
@@ -122,15 +120,15 @@ void XlaBinaryOp::Compile(XlaOpKernelContext* ctx) {
lhs_reorder.push_back(i);
lhs_reorder.push_back(i + lhs_size);
}
- auto lhs_output = builder->Reshape(lhs_broadcast, lhs_reorder,
- broadcast_helper.output_shape());
+ auto lhs_output =
+ xla::Reshape(lhs_broadcast, lhs_reorder, broadcast_helper.output_shape());
std::vector<int64> rhs_reorder;
for (int i = 0; i < rhs_size; ++i) {
rhs_reorder.push_back(i);
rhs_reorder.push_back(i + rhs_size);
}
- auto rhs_output = builder->Reshape(rhs_broadcast, rhs_reorder,
- broadcast_helper.output_shape());
+ auto rhs_output =
+ xla::Reshape(rhs_broadcast, rhs_reorder, broadcast_helper.output_shape());
return {lhs_output, rhs_output};
}
diff --git a/tensorflow/compiler/tf2xla/kernels/depthtospace_op.cc b/tensorflow/compiler/tf2xla/kernels/depthtospace_op.cc
index 23243f6246..f314920025 100644
--- a/tensorflow/compiler/tf2xla/kernels/depthtospace_op.cc
+++ b/tensorflow/compiler/tf2xla/kernels/depthtospace_op.cc
@@ -16,6 +16,7 @@ limitations under the License.
#include "tensorflow/compiler/tf2xla/xla_helpers.h"
#include "tensorflow/compiler/tf2xla/xla_op_kernel.h"
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
+#include "tensorflow/compiler/xla/client/xla_client/xla_builder.h"
#include "tensorflow/core/util/tensor_format.h"
namespace tensorflow {
@@ -50,7 +51,6 @@ class DepthToSpaceOp : public XlaOpKernel {
const gtl::InlinedVector<int64, 4> input_shape =
input_tensor_shape.dim_sizes();
- xla::XlaBuilder* b = ctx->builder();
xla::XlaOp input = ctx->Input(0);
int feature_dim = GetTensorFeatureDimIndex(input_rank, data_format_);
@@ -130,7 +130,7 @@ class DepthToSpaceOp : public XlaOpKernel {
") is not divisible by square of the block size (",
block_size_, ")"));
- xla::XlaOp reshaped = b->Reshape(input, reshaped_shape);
+ xla::XlaOp reshaped = xla::Reshape(input, reshaped_shape);
// 2. Permute dimensions of `reshaped` to produce
// `permuted_reshaped` of shape:
@@ -141,7 +141,7 @@ class DepthToSpaceOp : public XlaOpKernel {
// input_shape[2],
// block_size_,
// depth / (block_size_ * block_size_)]
- xla::XlaOp permuted_reshaped = b->Transpose(reshaped, transpose_order);
+ xla::XlaOp permuted_reshaped = xla::Transpose(reshaped, transpose_order);
// 3. Reshape `permuted_reshaped` to flatten `block_shape` into the
// batch dimension, producing an output tensor of shape:
@@ -151,7 +151,7 @@ class DepthToSpaceOp : public XlaOpKernel {
// input_shape[2] * block_size_,
// depth / (block_size_ * block_size_)]
//
- xla::XlaOp output = b->Reshape(permuted_reshaped, output_shape);
+ xla::XlaOp output = xla::Reshape(permuted_reshaped, output_shape);
ctx->SetOutput(0, output);
}
diff --git a/tensorflow/compiler/tf2xla/kernels/diag_op.cc b/tensorflow/compiler/tf2xla/kernels/diag_op.cc
index 931705ba83..6dec414c53 100644
--- a/tensorflow/compiler/tf2xla/kernels/diag_op.cc
+++ b/tensorflow/compiler/tf2xla/kernels/diag_op.cc
@@ -18,6 +18,9 @@ limitations under the License.
#include "tensorflow/compiler/tf2xla/xla_helpers.h"
#include "tensorflow/compiler/tf2xla/xla_op_kernel.h"
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
+#include "tensorflow/compiler/xla/client/lib/constants.h"
+#include "tensorflow/compiler/xla/client/lib/numeric.h"
+#include "tensorflow/compiler/xla/client/xla_client/xla_builder.h"
#include "tensorflow/compiler/xla/util.h"
#include "tensorflow/core/framework/op_kernel.h"
@@ -25,10 +28,10 @@ namespace tensorflow {
namespace {
// Create a diagonal / batch diagonal matrix with 'input' on the diagonal.
-xla::StatusOr<xla::XlaOp> CreateDiagonal(
- const xla::XlaOp& input, int64 last_dim_size,
- tensorflow::gtl::ArraySlice<int64> other_dims, XlaOpKernelContext* ctx,
- xla::XlaBuilder* builder) {
+xla::XlaOp CreateDiagonal(xla::XlaOp input, int64 last_dim_size,
+ gtl::ArraySlice<int64> other_dims,
+ xla::PrimitiveType element_type) {
+ xla::XlaBuilder* builder = input.builder();
// Create two matrices that have the following forms, and compare them:
//
// [[0, 0, 0, 0] [[0, 1, 2, 3]
@@ -38,16 +41,14 @@ xla::StatusOr<xla::XlaOp> CreateDiagonal(
//
// This produces a predicate matrix of the right size, with "true" on the
// diagonal.
- xla::XlaOp iota;
- TF_RETURN_IF_ERROR(
- XlaHelpers::Iota(builder, DataType::DT_INT32, last_dim_size, &iota));
- xla::XlaOp iota_broadcast = builder->Broadcast(iota, {last_dim_size});
- xla::XlaOp mask = builder->Eq(iota_broadcast, iota, {0});
+ xla::XlaOp iota = xla::Iota(builder, xla::S32, last_dim_size);
+ xla::XlaOp iota_broadcast = xla::Broadcast(iota, {last_dim_size});
+ xla::XlaOp mask = xla::Eq(iota_broadcast, iota, {0});
// If this is a batched diagonal, broadcast the mask across the other
// dimensions.
if (!other_dims.empty()) {
- mask = builder->Broadcast(mask, other_dims);
+ mask = xla::Broadcast(mask, other_dims);
}
// Broadcast the input, and then use the mask computed above to select the
@@ -64,18 +65,15 @@ xla::StatusOr<xla::XlaOp> CreateDiagonal(
std::vector<int64> broadcast_dims(other_dims.begin(), other_dims.end());
broadcast_dims.push_back(1LL);
broadcast_dims.push_back(last_dim_size);
- xla::XlaOp input_broadcast = builder->Reshape(input, broadcast_dims);
+ xla::XlaOp input_broadcast = xla::Reshape(input, broadcast_dims);
broadcast_dims[broadcast_dims.size() - 2] = last_dim_size;
- xla::PrimitiveType element_type;
- TF_RETURN_IF_ERROR(
- DataTypeToPrimitiveType(ctx->input_type(0), &element_type));
auto broadcast_shape =
xla::ShapeUtil::MakeShape(element_type, broadcast_dims);
- xla::XlaOp zeros = Zeros(builder, broadcast_shape);
+ xla::XlaOp zeros = xla::Zeros(builder, broadcast_shape);
- input_broadcast = builder->Add(input_broadcast, zeros);
- return builder->Select(mask, input_broadcast, zeros);
+ input_broadcast = xla::Add(input_broadcast, zeros);
+ return xla::Select(mask, input_broadcast, zeros);
}
class DiagOp : public XlaOpKernel {
@@ -83,8 +81,6 @@ class DiagOp : public XlaOpKernel {
explicit DiagOp(OpKernelConstruction* ctx) : XlaOpKernel(ctx) {}
void Compile(XlaOpKernelContext* ctx) override {
- xla::XlaBuilder* builder = ctx->builder();
-
OP_REQUIRES(ctx, ctx->num_inputs() >= 1,
errors::InvalidArgument("Diag op must have at an input"));
const TensorShape input_shape = ctx->InputShape(0);
@@ -104,19 +100,17 @@ class DiagOp : public XlaOpKernel {
// Flattens the input to 1D.
int64 size = input_shape.num_elements();
- input = builder->Reshape(input, {size});
+ input = xla::Reshape(input, {size});
// Create an R2 with the R1 diagonal.
- auto diag_or_status =
- CreateDiagonal(input, size, /*other_dims=*/{}, ctx, builder);
- OP_REQUIRES_OK(ctx, diag_or_status.status());
- xla::XlaOp diag = diag_or_status.ValueOrDie();
+ xla::XlaOp diag =
+ CreateDiagonal(input, size, /*other_dims=*/{}, ctx->input_xla_type(0));
// Reshapes to the final shape.
std::vector<int64> new_dims(dims.size() * 2);
std::copy(dims.begin(), dims.end(), new_dims.begin());
std::copy(dims.begin(), dims.end(), new_dims.begin() + dims.size());
- diag = builder->Reshape(diag, new_dims);
+ diag = xla::Reshape(diag, new_dims);
ctx->SetOutput(0, diag);
}
@@ -170,21 +164,21 @@ class DiagPartOp : public XlaOpKernel {
// Flattens the input to 1D.
int64 size = input_shape.num_elements();
- diag = builder->Reshape(diag, {size});
+ diag = xla::Reshape(diag, {size});
// Adds padding after the last element of 'new_size'.
xla::PaddingConfig config;
auto* dim = config.add_dimensions();
dim->set_edge_padding_high(new_size);
auto zero = XlaHelpers::Zero(builder, input_type(0));
- diag = builder->Pad(diag, zero, config);
+ diag = xla::Pad(diag, zero, config);
// Reshapes so the diagonal is now in the first column.
- diag = builder->Reshape(diag, {new_size, new_size + 1});
+ diag = xla::Reshape(diag, {new_size, new_size + 1});
// Slices out the first column and reshapes to the final shape.
- diag = builder->Slice(diag, {0, 0}, {new_size, 1}, {1, 1});
- diag = builder->Reshape(diag, new_dims);
+ diag = xla::Slice(diag, {0, 0}, {new_size, 1}, {1, 1});
+ diag = xla::Reshape(diag, new_dims);
ctx->SetOutput(0, diag);
}
@@ -197,8 +191,6 @@ class MatrixDiagOp : public XlaOpKernel {
explicit MatrixDiagOp(OpKernelConstruction* ctx) : XlaOpKernel(ctx) {}
void Compile(XlaOpKernelContext* ctx) override {
- xla::XlaBuilder* builder = ctx->builder();
-
OP_REQUIRES(ctx, ctx->num_inputs() >= 1,
errors::InvalidArgument("MatrixDiag op must have at an input"));
const TensorShape input_shape = ctx->InputShape(0);
@@ -208,17 +200,15 @@ class MatrixDiagOp : public XlaOpKernel {
errors::InvalidArgument("Expected 1 <= dims, got shape ",
input_shape.DebugString()));
- xla::XlaOp diag = ctx->Input(0);
int last_dim = dims.size() - 1;
int64 last_dim_size = input_shape.dim_size(last_dim);
tensorflow::gtl::ArraySlice<int64> other_dims(dims);
other_dims.pop_back();
- auto diag_or_status =
- CreateDiagonal(diag, last_dim_size, other_dims, ctx, builder);
- OP_REQUIRES_OK(ctx, diag_or_status.status());
- diag = diag_or_status.ValueOrDie();
+ xla::XlaOp input = ctx->Input(0);
+ xla::XlaOp diag = CreateDiagonal(input, last_dim_size, other_dims,
+ ctx->input_xla_type(0));
ctx->SetOutput(0, diag);
}
};
@@ -265,7 +255,7 @@ class MatrixDiagPartOp : public XlaOpKernel {
// Collapses the last two dimensions.
std::vector<int64> flattened_dims(dims.begin(), dims.end() - 1);
flattened_dims.back() *= dims.back();
- diag = builder->Reshape(diag, flattened_dims);
+ diag = xla::Reshape(diag, flattened_dims);
// Slices or pads the last dimension to 'target_size'.
int64 actual_size = flattened_dims.back();
@@ -276,13 +266,13 @@ class MatrixDiagPartOp : public XlaOpKernel {
auto* dim = config.mutable_dimensions(flattened_dims.size() - 1);
dim->set_edge_padding_high(target_size - actual_size);
auto zero = XlaHelpers::Zero(builder, input_type(0));
- diag = builder->Pad(diag, zero, config);
+ diag = xla::Pad(diag, zero, config);
} else if (actual_size > target_size) {
std::vector<int64> start(flattened_dims.size(), 0);
std::vector<int64> limits(flattened_dims.begin(), flattened_dims.end());
std::vector<int64> strides(flattened_dims.size(), 1);
limits[flattened_dims.size() - 1] = target_size;
- diag = builder->Slice(diag, start, limits, strides);
+ diag = xla::Slice(diag, start, limits, strides);
}
// Reshape so the target values are in the first position of the last
@@ -290,18 +280,18 @@ class MatrixDiagPartOp : public XlaOpKernel {
std::vector<int64> unflattened_dims(dims.begin(), dims.end());
dims[last_dim - 1] = smaller_dim_size;
dims[last_dim] = last_dim_size + 1;
- diag = builder->Reshape(diag, dims);
+ diag = xla::Reshape(diag, dims);
// Slices out the first column and reshapes to the final shape.
std::vector<int64> start(dims.size(), 0);
std::vector<int64> limits(dims.begin(), dims.end());
std::vector<int64> strides(dims.size(), 1);
limits[last_dim] = 1;
- diag = builder->Slice(diag, start, limits, strides);
+ diag = xla::Slice(diag, start, limits, strides);
// Collapses away the last dimension.
dims.pop_back();
- diag = builder->Reshape(diag, dims);
+ diag = xla::Reshape(diag, dims);
ctx->SetOutput(0, diag);
}
diff --git a/tensorflow/compiler/tf2xla/kernels/dynamic_slice_ops.cc b/tensorflow/compiler/tf2xla/kernels/dynamic_slice_ops.cc
index 0419de78b2..3b86ea34c9 100644
--- a/tensorflow/compiler/tf2xla/kernels/dynamic_slice_ops.cc
+++ b/tensorflow/compiler/tf2xla/kernels/dynamic_slice_ops.cc
@@ -57,8 +57,8 @@ class DynamicUpdateSliceOp : public XlaOpKernel {
input_shape.DebugString(), "; update shape is ",
update_shape.DebugString()));
- xla::XlaOp result = ctx->builder()->DynamicUpdateSlice(
- ctx->Input(0), ctx->Input(1), ctx->Input(2));
+ xla::XlaOp result =
+ xla::DynamicUpdateSlice(ctx->Input(0), ctx->Input(1), ctx->Input(2));
ctx->SetOutput(0, result);
}
};
diff --git a/tensorflow/compiler/tf2xla/kernels/dynamic_stitch_op.cc b/tensorflow/compiler/tf2xla/kernels/dynamic_stitch_op.cc
index dd4a169087..958231505b 100644
--- a/tensorflow/compiler/tf2xla/kernels/dynamic_stitch_op.cc
+++ b/tensorflow/compiler/tf2xla/kernels/dynamic_stitch_op.cc
@@ -20,6 +20,7 @@ limitations under the License.
#include "tensorflow/compiler/tf2xla/xla_helpers.h"
#include "tensorflow/compiler/tf2xla/xla_op_kernel.h"
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
+#include "tensorflow/compiler/xla/client/xla_client/xla_builder.h"
#include "tensorflow/compiler/xla/literal_util.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/register_types.h"
@@ -150,8 +151,7 @@ class DynamicStitchOp : public XlaOpKernel {
if (new_shape == data_shapes[input_num]) {
input[input_num] = handle;
} else {
- input[input_num] =
- ctx->builder()->Reshape(handle, new_shape.dim_sizes());
+ input[input_num] = xla::Reshape(handle, new_shape.dim_sizes());
}
}
@@ -175,10 +175,10 @@ class DynamicStitchOp : public XlaOpKernel {
// And place it in the concat list in the place indicated by
// the index.
to_concat[index_num] =
- ctx->builder()->Slice(expression, slice_start, slice_limit, stride);
+ xla::Slice(expression, slice_start, slice_limit, stride);
}
- ctx->SetOutput(0, ctx->builder()->ConcatInDim(to_concat, 0));
+ ctx->SetOutput(0, xla::ConcatInDim(ctx->builder(), to_concat, 0));
}
private:
diff --git a/tensorflow/compiler/tf2xla/kernels/elu_op.cc b/tensorflow/compiler/tf2xla/kernels/elu_op.cc
index 493781a1e6..81f42e504e 100644
--- a/tensorflow/compiler/tf2xla/kernels/elu_op.cc
+++ b/tensorflow/compiler/tf2xla/kernels/elu_op.cc
@@ -19,7 +19,7 @@ limitations under the License.
#include "tensorflow/compiler/tf2xla/xla_helpers.h"
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
#include "tensorflow/compiler/xla/client/xla_client/xla_builder.h"
-#include "tensorflow/compiler/xla/literal_util.h"
+#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/core/framework/kernel_def_builder.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/kernels/no_op.h"
@@ -34,9 +34,9 @@ class EluOp : public XlaOpKernel {
void Compile(XlaOpKernelContext* ctx) override {
xla::XlaBuilder* b = ctx->builder();
const auto zero = XlaHelpers::Zero(b, input_type(0));
- const auto pred = b->Gt(ctx->Input(0), zero);
- const auto expm1 = b->Expm1(ctx->Input(0));
- ctx->SetOutput(0, b->Select(pred, ctx->Input(0), expm1));
+ const auto pred = xla::Gt(ctx->Input(0), zero);
+ const auto expm1 = xla::Expm1(ctx->Input(0));
+ ctx->SetOutput(0, xla::Select(pred, ctx->Input(0), expm1));
}
};
@@ -51,9 +51,9 @@ class EluGradOp : public XlaOpKernel {
const auto one = XlaHelpers::One(b, input_type(0));
const auto grad = ctx->Input(0);
const auto activation = ctx->Input(1);
- const auto exp_grad = b->Mul(grad, b->Add(activation, one));
- const auto pred = b->Gt(activation, zero);
- ctx->SetOutput(0, b->Select(pred, grad, exp_grad));
+ const auto exp_grad = xla::Mul(grad, xla::Add(activation, one));
+ const auto pred = xla::Gt(activation, zero);
+ ctx->SetOutput(0, xla::Select(pred, grad, exp_grad));
}
};
@@ -71,10 +71,10 @@ class SeluOp : public XlaOpKernel {
1.0507009873554804934193349852946);
const auto scale_alpha = XlaHelpers::FloatLiteral(b, input_type(0),
1.7580993408473768599402175208123);
- const auto pred = b->Gt(ctx->Input(0), zero);
- const auto expm1 = b->Expm1(ctx->Input(0));
- ctx->SetOutput(0, b->Select(pred, b->Mul(scale, ctx->Input(0)),
- b->Mul(scale_alpha, expm1)));
+ const auto pred = xla::Gt(ctx->Input(0), zero);
+ const auto expm1 = xla::Expm1(ctx->Input(0));
+ ctx->SetOutput(0, xla::Select(pred, xla::Mul(scale, ctx->Input(0)),
+ xla::Mul(scale_alpha, expm1)));
}
};
@@ -92,10 +92,10 @@ class SeluGradOp : public XlaOpKernel {
1.7580993408473768599402175208123);
const auto grad = ctx->Input(0);
const auto activation = ctx->Input(1);
- const auto lin_grad = b->Mul(grad, scale);
- const auto exp_grad = b->Mul(grad, b->Add(activation, scale_alpha));
- const auto pred = b->Gt(activation, zero);
- ctx->SetOutput(0, b->Select(pred, lin_grad, exp_grad));
+ const auto lin_grad = xla::Mul(grad, scale);
+ const auto exp_grad = xla::Mul(grad, xla::Add(activation, scale_alpha));
+ const auto pred = xla::Gt(activation, zero);
+ ctx->SetOutput(0, xla::Select(pred, lin_grad, exp_grad));
}
};
diff --git a/tensorflow/compiler/tf2xla/kernels/extract_image_patches_op.cc b/tensorflow/compiler/tf2xla/kernels/extract_image_patches_op.cc
index 6df01cabbf..65d42a302f 100644
--- a/tensorflow/compiler/tf2xla/kernels/extract_image_patches_op.cc
+++ b/tensorflow/compiler/tf2xla/kernels/extract_image_patches_op.cc
@@ -17,6 +17,8 @@ limitations under the License.
#include "tensorflow/compiler/tf2xla/xla_helpers.h"
#include "tensorflow/compiler/tf2xla/xla_op_kernel.h"
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
+#include "tensorflow/compiler/xla/client/lib/numeric.h"
+#include "tensorflow/compiler/xla/client/xla_client/xla_builder.h"
#include "tensorflow/core/util/tensor_format.h"
namespace tensorflow {
@@ -110,13 +112,11 @@ class ExtractImagePatchesOp : public XlaOpKernel {
// Builds an identity matrix as a broadcast equality of iotas.
// iota = np.arange(np.prod(ksize), depth)
// filter = np.equal(np.reshape(iota, [-1, 1]), iota).astype(np.float32)
- xla::XlaOp iota;
- TF_CHECK_OK(XlaHelpers::Iota(builder, DataType::DT_INT32,
- kernel_size * depth, &iota));
+ xla::XlaOp iota = xla::Iota(builder, xla::S32, kernel_size * depth);
- auto lhs = builder->Reshape(iota, lhs_shape);
- auto filter = builder->ConvertElementType(
- builder->Eq(lhs, iota, {num_spatial_dims + 1}), type);
+ auto lhs = xla::Reshape(iota, lhs_shape);
+ auto filter = xla::ConvertElementType(
+ xla::Eq(lhs, iota, {num_spatial_dims + 1}), type);
xla::ConvolutionDimensionNumbers dims;
std::vector<int64> window_strides(num_spatial_dims);
@@ -148,8 +148,8 @@ class ExtractImagePatchesOp : public XlaOpKernel {
}
xla::XlaOp conv =
- builder->ConvGeneralDilated(ctx->Input(0), filter, window_strides,
- padding, lhs_dilation, rhs_dilation, dims);
+ xla::ConvGeneralDilated(ctx->Input(0), filter, window_strides, padding,
+ lhs_dilation, rhs_dilation, dims);
ctx->SetOutput(0, conv);
}
diff --git a/tensorflow/compiler/tf2xla/kernels/fake_quantize_ops.cc b/tensorflow/compiler/tf2xla/kernels/fake_quantize_ops.cc
index 8f0de0a524..2fd1a34741 100644
--- a/tensorflow/compiler/tf2xla/kernels/fake_quantize_ops.cc
+++ b/tensorflow/compiler/tf2xla/kernels/fake_quantize_ops.cc
@@ -17,6 +17,7 @@ limitations under the License.
#include "tensorflow/compiler/tf2xla/xla_op_kernel.h"
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
#include "tensorflow/compiler/xla/client/lib/arithmetic.h"
+#include "tensorflow/compiler/xla/client/xla_client/xla_builder.h"
#include "tensorflow/core/platform/macros.h"
namespace tensorflow {
@@ -49,20 +50,20 @@ void XlaNudge(xla::XlaBuilder* b, const DataType data_type,
const float quant_min_value, const float quant_max_value,
xla::XlaOp* nudged_min, xla::XlaOp* nudged_max,
xla::XlaOp* scale) {
- *scale = b->Div(b->Sub(max, min),
- XlaHelpers::FloatLiteral(b, data_type,
- quant_max_value - quant_min_value));
+ *scale = xla::Div(xla::Sub(max, min),
+ XlaHelpers::FloatLiteral(
+ b, data_type, quant_max_value - quant_min_value));
xla::XlaOp quant_min =
XlaHelpers::FloatLiteral(b, data_type, quant_min_value);
- xla::XlaOp zero_point_from_min = b->Sub(quant_min, b->Div(min, *scale));
+ xla::XlaOp zero_point_from_min = xla::Sub(quant_min, xla::Div(min, *scale));
xla::XlaOp quant_max =
XlaHelpers::FloatLiteral(b, data_type, quant_max_value);
xla::XlaOp nudged_zero_point =
- b->Select(b->Le(zero_point_from_min, quant_min), quant_min,
- b->Select(b->Ge(zero_point_from_min, quant_max), quant_max,
- b->Round(zero_point_from_min)));
- *nudged_min = b->Mul(b->Sub(quant_min, nudged_zero_point), *scale);
- *nudged_max = b->Mul(b->Sub(quant_max, nudged_zero_point), *scale);
+ xla::Select(xla::Le(zero_point_from_min, quant_min), quant_min,
+ xla::Select(xla::Ge(zero_point_from_min, quant_max),
+ quant_max, xla::Round(zero_point_from_min)));
+ *nudged_min = xla::Mul(xla::Sub(quant_min, nudged_zero_point), *scale);
+ *nudged_max = xla::Mul(xla::Sub(quant_max, nudged_zero_point), *scale);
}
xla::XlaOp Quantize(xla::XlaBuilder* b, const xla::XlaOp& input,
@@ -71,14 +72,14 @@ xla::XlaOp Quantize(xla::XlaBuilder* b, const xla::XlaOp& input,
const xla::XlaOp& nudged_input_max,
const xla::XlaOp& input_scale) {
xla::XlaOp one = XlaHelpers::FloatLiteral(b, data_type, 1.0f);
- xla::XlaOp inv_scale = b->Div(one, input_scale);
+ xla::XlaOp inv_scale = xla::Div(one, input_scale);
xla::XlaOp half = XlaHelpers::FloatLiteral(b, data_type, 0.5f);
- xla::XlaOp clamped = b->Clamp(nudged_input_min, input, nudged_input_max);
- xla::XlaOp clamped_shifted = b->Sub(clamped, nudged_input_min);
+ xla::XlaOp clamped = xla::Clamp(nudged_input_min, input, nudged_input_max);
+ xla::XlaOp clamped_shifted = xla::Sub(clamped, nudged_input_min);
xla::XlaOp rounded =
- b->Floor(b->Add(b->Mul(clamped_shifted, inv_scale), half));
- return b->Add(b->Mul(rounded, input_scale), nudged_input_min);
+ xla::Floor(xla::Add(xla::Mul(clamped_shifted, inv_scale), half));
+ return xla::Add(xla::Mul(rounded, input_scale), nudged_input_min);
}
class FakeQuantWithMinMaxArgsOp : public XlaOpKernel {
@@ -163,11 +164,11 @@ class FakeQuantWithMinMaxArgsGradOp : public XlaOpKernel {
xla::XlaOp nudged_input_max =
XlaHelpers::FloatLiteral(b, data_type, nudged_input_max_);
- xla::XlaOp between_nudged_min_max =
- b->And(b->Le(nudged_input_min, input), b->Le(input, nudged_input_max));
- xla::XlaOp zeroes = b->Broadcast(XlaHelpers::Zero(b, data_type),
- gradient_shape.dim_sizes());
- xla::XlaOp output = b->Select(between_nudged_min_max, gradient, zeroes);
+ xla::XlaOp between_nudged_min_max = xla::And(
+ xla::Le(nudged_input_min, input), xla::Le(input, nudged_input_max));
+ xla::XlaOp zeroes = xla::Broadcast(XlaHelpers::Zero(b, data_type),
+ gradient_shape.dim_sizes());
+ xla::XlaOp output = xla::Select(between_nudged_min_max, gradient, zeroes);
ctx->SetOutput(0, output);
}
@@ -249,25 +250,25 @@ class FakeQuantWithMinMaxVarsGradOp : public XlaOpKernel {
XlaNudge(b, data_type, input_min, input_max, quant_min_, quant_max_,
&nudged_input_min, &nudged_input_max, &input_scale);
- xla::XlaOp between_nudged_min_max =
- b->And(b->Le(nudged_input_min, input), b->Le(input, nudged_input_max));
+ xla::XlaOp between_nudged_min_max = xla::And(
+ xla::Le(nudged_input_min, input), xla::Le(input, nudged_input_max));
xla::XlaOp zero = XlaHelpers::Zero(b, data_type);
- xla::XlaOp zeroes = b->Broadcast(zero, gradient_shape.dim_sizes());
- xla::XlaOp output0 = b->Select(between_nudged_min_max, gradient, zeroes);
+ xla::XlaOp zeroes = xla::Broadcast(zero, gradient_shape.dim_sizes());
+ xla::XlaOp output0 = xla::Select(between_nudged_min_max, gradient, zeroes);
ctx->SetOutput(0, output0);
- xla::XlaOp below_min = b->Lt(input, nudged_input_min);
- xla::XlaOp select1 = b->Select(below_min, gradient, zeroes);
- xla::XlaOp reduce1 = b->ReduceAll(
+ xla::XlaOp below_min = xla::Lt(input, nudged_input_min);
+ xla::XlaOp select1 = xla::Select(below_min, gradient, zeroes);
+ xla::XlaOp reduce1 = xla::ReduceAll(
XlaHelpers::ConvertElementType(b, select1, accumulation_type),
XlaHelpers::Zero(b, accumulation_type),
*ctx->GetOrCreateAdd(accumulation_type));
xla::XlaOp output1 = XlaHelpers::ConvertElementType(b, reduce1, data_type);
ctx->SetOutput(1, output1);
- xla::XlaOp above_max = b->Gt(input, nudged_input_max);
- xla::XlaOp select2 = b->Select(above_max, gradient, zeroes);
- xla::XlaOp reduce2 = b->ReduceAll(
+ xla::XlaOp above_max = xla::Gt(input, nudged_input_max);
+ xla::XlaOp select2 = xla::Select(above_max, gradient, zeroes);
+ xla::XlaOp reduce2 = xla::ReduceAll(
XlaHelpers::ConvertElementType(b, select2, accumulation_type),
XlaHelpers::Zero(b, accumulation_type),
*ctx->GetOrCreateAdd(accumulation_type));
diff --git a/tensorflow/compiler/tf2xla/kernels/fft_ops.cc b/tensorflow/compiler/tf2xla/kernels/fft_ops.cc
index 933924cad1..b2b00e51e3 100644
--- a/tensorflow/compiler/tf2xla/kernels/fft_ops.cc
+++ b/tensorflow/compiler/tf2xla/kernels/fft_ops.cc
@@ -18,6 +18,7 @@ limitations under the License.
#include "tensorflow/compiler/tf2xla/xla_helpers.h"
#include "tensorflow/compiler/tf2xla/xla_op_kernel.h"
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
+#include "tensorflow/compiler/xla/client/xla_client/xla_builder.h"
#include "tensorflow/compiler/xla/literal_util.h"
#include "tensorflow/core/framework/numeric_op.h"
#include "tensorflow/core/framework/op_kernel.h"
@@ -62,8 +63,7 @@ class GenericFftOp : public XlaOpKernel {
}
}
- xla::XlaBuilder* b = ctx->builder();
- xla::XlaOp fft = b->Fft(ctx->Input(0), fft_type_, fft_length);
+ xla::XlaOp fft = xla::Fft(ctx->Input(0), fft_type_, fft_length);
ctx->SetOutput(0, fft);
}
diff --git a/tensorflow/compiler/tf2xla/kernels/fill_op.cc b/tensorflow/compiler/tf2xla/kernels/fill_op.cc
index e4467a0fb1..95faa1d058 100644
--- a/tensorflow/compiler/tf2xla/kernels/fill_op.cc
+++ b/tensorflow/compiler/tf2xla/kernels/fill_op.cc
@@ -19,6 +19,7 @@ limitations under the License.
#include "tensorflow/compiler/tf2xla/xla_helpers.h"
#include "tensorflow/compiler/tf2xla/xla_op_kernel.h"
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
+#include "tensorflow/compiler/xla/client/xla_client/xla_builder.h"
#include "tensorflow/core/framework/kernel_def_builder.h"
#include "tensorflow/core/framework/register_types.h"
@@ -59,11 +60,11 @@ class FillOp : public XlaOpKernel {
xla::XlaOp data = ctx->Input(1);
if (value_shape.dims() > 0) {
CHECK_EQ(value_shape.dims(), 1);
- data = ctx->builder()->Reshape(data, {});
+ data = xla::Reshape(data, {});
}
// Emit the actual computation, which broadcasts the scalar to the
// desired shape.
- auto result = ctx->builder()->Broadcast(data, broadcast);
+ auto result = xla::Broadcast(data, broadcast);
ctx->SetOutput(0, result);
}
diff --git a/tensorflow/compiler/tf2xla/kernels/gather_op.cc b/tensorflow/compiler/tf2xla/kernels/gather_op.cc
index d13e25bcdd..5f041be5df 100644
--- a/tensorflow/compiler/tf2xla/kernels/gather_op.cc
+++ b/tensorflow/compiler/tf2xla/kernels/gather_op.cc
@@ -21,6 +21,7 @@ limitations under the License.
#include "tensorflow/compiler/tf2xla/xla_helpers.h"
#include "tensorflow/compiler/tf2xla/xla_op_kernel.h"
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
+#include "tensorflow/compiler/xla/client/xla_client/xla_builder.h"
#include "tensorflow/core/framework/kernel_def_builder.h"
#include "tensorflow/core/framework/op_kernel.h"
@@ -75,8 +76,8 @@ Status XlaGather(const xla::XlaOp& input, const TensorShape& input_shape,
out_shape.AppendShape(indices_shape_no_index_vectors);
out_shape.AppendShape(input_shape_post_axis);
- *gather_output = builder->Broadcast(XlaHelpers::Zero(builder, dtype),
- out_shape.dim_sizes());
+ *gather_output =
+ xla::Broadcast(XlaHelpers::Zero(builder, dtype), out_shape.dim_sizes());
return Status::OK();
}
@@ -142,7 +143,7 @@ Status XlaGather(const xla::XlaOp& input, const TensorShape& input_shape,
dim_numbers.add_gather_dims_to_operand_dims(i);
}
- *gather_output = builder->Gather(input, indices, dim_numbers, window_bounds);
+ *gather_output = xla::Gather(input, indices, dim_numbers, window_bounds);
return Status::OK();
}
diff --git a/tensorflow/compiler/tf2xla/kernels/if_op.cc b/tensorflow/compiler/tf2xla/kernels/if_op.cc
index d48c6eea75..f5fcf3cacd 100644
--- a/tensorflow/compiler/tf2xla/kernels/if_op.cc
+++ b/tensorflow/compiler/tf2xla/kernels/if_op.cc
@@ -19,6 +19,7 @@ limitations under the License.
#include "tensorflow/compiler/tf2xla/xla_context.h"
#include "tensorflow/compiler/tf2xla/xla_op_kernel.h"
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
+#include "tensorflow/compiler/xla/client/xla_client/xla_builder.h"
namespace tensorflow {
@@ -199,13 +200,13 @@ void XlaIfOp::Compile(XlaOpKernelContext* ctx) {
}
}
- xla::XlaOp outputs =
- b->Conditional(ctx->Input(0), b->Tuple(inputs), *then_result.computation,
- b->Tuple(inputs), *else_result.computation);
+ xla::XlaOp outputs = xla::Conditional(
+ ctx->Input(0), xla::Tuple(b, inputs), *then_result.computation,
+ xla::Tuple(b, inputs), *else_result.computation);
// Sets non-variable outputs.
for (int i = 0; i < output_types_.size(); ++i) {
if (ctx->input_type(i) != DT_RESOURCE) {
- xla::XlaOp output_handle = b->GetTupleElement(outputs, i);
+ xla::XlaOp output_handle = xla::GetTupleElement(outputs, i);
if (VLOG_IS_ON(2)) {
LOG(INFO) << "Setting output " << i;
auto shape_or = b->GetShape(output_handle);
@@ -233,7 +234,7 @@ void XlaIfOp::Compile(XlaOpKernelContext* ctx) {
OP_REQUIRES_OK(ctx,
resource->SetFromPack(
arguments[update.input_index].tensor_array_gradients,
- b->GetTupleElement(outputs, pos), b));
+ xla::GetTupleElement(outputs, pos), b));
}
VLOG(2) << "If variable: pos: " << update.input_index
<< " name: " << resource->name()
diff --git a/tensorflow/compiler/tf2xla/kernels/image_ops.cc b/tensorflow/compiler/tf2xla/kernels/image_ops.cc
index 1568b33679..cb4caf7bcb 100644
--- a/tensorflow/compiler/tf2xla/kernels/image_ops.cc
+++ b/tensorflow/compiler/tf2xla/kernels/image_ops.cc
@@ -17,6 +17,7 @@ limitations under the License.
#include "tensorflow/compiler/tf2xla/xla_helpers.h"
#include "tensorflow/compiler/tf2xla/xla_op_kernel.h"
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
+#include "tensorflow/compiler/xla/client/xla_client/xla_builder.h"
namespace tensorflow {
namespace {
@@ -32,23 +33,26 @@ std::array<xla::XlaOp, 3> RGBToHSV(XlaOpKernelContext* ctx, xla::XlaBuilder* b,
auto red = rgb[0];
auto green = rgb[1];
auto blue = rgb[2];
- auto value = b->Max(b->Max(red, green), blue);
- auto minimum = b->Min(b->Min(red, green), blue);
- auto range = b->Sub(value, minimum);
-
- auto zeros = b->Broadcast(zero, shape.dim_sizes());
- auto saturation = b->Select(b->Gt(value, zero), b->Div(range, value), zeros);
-
- auto norm = b->Div(XlaHelpers::FloatLiteral(b, dtype, 1.0 / 6.0), range);
-
- auto hue = b->Select(b->Eq(green, value),
- b->Add(b->Mul(norm, b->Sub(blue, red)),
- XlaHelpers::FloatLiteral(b, dtype, 2.0 / 6.0)),
- b->Add(b->Mul(norm, b->Sub(red, green)),
- XlaHelpers::FloatLiteral(b, dtype, 4.0 / 6.0)));
- hue = b->Select(b->Eq(red, value), b->Mul(norm, b->Sub(green, blue)), hue);
- hue = b->Select(b->Gt(range, zero), hue, zeros);
- hue = b->Select(b->Lt(hue, zero), b->Add(hue, one), hue);
+ auto value = xla::Max(xla::Max(red, green), blue);
+ auto minimum = xla::Min(xla::Min(red, green), blue);
+ auto range = xla::Sub(value, minimum);
+
+ auto zeros = xla::Broadcast(zero, shape.dim_sizes());
+ auto saturation =
+ xla::Select(xla::Gt(value, zero), xla::Div(range, value), zeros);
+
+ auto norm = xla::Div(XlaHelpers::FloatLiteral(b, dtype, 1.0 / 6.0), range);
+
+ auto hue =
+ xla::Select(xla::Eq(green, value),
+ xla::Add(xla::Mul(norm, xla::Sub(blue, red)),
+ XlaHelpers::FloatLiteral(b, dtype, 2.0 / 6.0)),
+ xla::Add(xla::Mul(norm, xla::Sub(red, green)),
+ XlaHelpers::FloatLiteral(b, dtype, 4.0 / 6.0)));
+ hue = xla::Select(xla::Eq(red, value), xla::Mul(norm, xla::Sub(green, blue)),
+ hue);
+ hue = xla::Select(xla::Gt(range, zero), hue, zeros);
+ hue = xla::Select(xla::Lt(hue, zero), xla::Add(hue, one), hue);
return {hue, saturation, value};
}
@@ -66,15 +70,15 @@ std::array<xla::XlaOp, 3> HSVToRGB(xla::XlaBuilder* b,
auto four = XlaHelpers::FloatLiteral(b, dtype, 4.0);
auto six = XlaHelpers::FloatLiteral(b, dtype, 6.0);
- auto dh = b->Mul(hue, six);
- auto dr = b->Clamp(zero, b->Sub(b->Abs(b->Sub(dh, three)), one), one);
- auto dg = b->Clamp(zero, b->Sub(two, b->Abs(b->Sub(dh, two))), one);
- auto db = b->Clamp(zero, b->Sub(two, b->Abs(b->Sub(dh, four))), one);
- auto one_minus_s = b->Sub(one, saturation);
+ auto dh = xla::Mul(hue, six);
+ auto dr = xla::Clamp(zero, xla::Sub(xla::Abs(xla::Sub(dh, three)), one), one);
+ auto dg = xla::Clamp(zero, xla::Sub(two, xla::Abs(xla::Sub(dh, two))), one);
+ auto db = xla::Clamp(zero, xla::Sub(two, xla::Abs(xla::Sub(dh, four))), one);
+ auto one_minus_s = xla::Sub(one, saturation);
- auto red = b->Mul(b->Add(one_minus_s, b->Mul(saturation, dr)), value);
- auto green = b->Mul(b->Add(one_minus_s, b->Mul(saturation, dg)), value);
- auto blue = b->Mul(b->Add(one_minus_s, b->Mul(saturation, db)), value);
+ auto red = xla::Mul(xla::Add(one_minus_s, xla::Mul(saturation, dr)), value);
+ auto green = xla::Mul(xla::Add(one_minus_s, xla::Mul(saturation, dg)), value);
+ auto blue = xla::Mul(xla::Add(one_minus_s, xla::Mul(saturation, db)), value);
return {red, green, blue};
}
@@ -97,21 +101,21 @@ class RGBToHSVOp : public XlaOpKernel {
xla::XlaBuilder* b = context->builder();
xla::XlaOp input = context->Input(0);
- xla::XlaOp red =
- b->SliceInDim(input, /*start_index=*/0, /*limit_index=*/1, /*stride=*/1,
- /*dimno=*/channel_dim);
- xla::XlaOp green =
- b->SliceInDim(input, /*start_index=*/1, /*limit_index=*/2, /*stride=*/1,
- /*dimno=*/channel_dim);
- xla::XlaOp blue =
- b->SliceInDim(input, /*start_index=*/2, /*limit_index=*/3, /*stride=*/1,
- /*dimno=*/channel_dim);
+ xla::XlaOp red = xla::SliceInDim(input, /*start_index=*/0,
+ /*limit_index=*/1, /*stride=*/1,
+ /*dimno=*/channel_dim);
+ xla::XlaOp green = xla::SliceInDim(input, /*start_index=*/1,
+ /*limit_index=*/2, /*stride=*/1,
+ /*dimno=*/channel_dim);
+ xla::XlaOp blue = xla::SliceInDim(input, /*start_index=*/2,
+ /*limit_index=*/3, /*stride=*/1,
+ /*dimno=*/channel_dim);
TensorShape channel_shape = input_shape;
channel_shape.set_dim(channel_dim, 1);
auto hsv = RGBToHSV(context, b, {red, green, blue}, context->input_type(0),
channel_shape);
- context->SetOutput(0, b->ConcatInDim(hsv, channel_dim));
+ context->SetOutput(0, xla::ConcatInDim(b, hsv, channel_dim));
}
};
REGISTER_XLA_OP(Name("RGBToHSV"), RGBToHSVOp);
@@ -134,20 +138,20 @@ class HSVToRGBOp : public XlaOpKernel {
xla::XlaBuilder* b = context->builder();
xla::XlaOp input = context->Input(0);
- xla::XlaOp hue =
- b->SliceInDim(input, /*start_index=*/0, /*limit_index=*/1, /*stride=*/1,
- /*dimno=*/channel_dim);
- xla::XlaOp saturation =
- b->SliceInDim(input, /*start_index=*/1, /*limit_index=*/2, /*stride=*/1,
- /*dimno=*/channel_dim);
- xla::XlaOp value =
- b->SliceInDim(input, /*start_index=*/2, /*limit_index=*/3, /*stride=*/1,
- /*dimno=*/channel_dim);
+ xla::XlaOp hue = xla::SliceInDim(input, /*start_index=*/0,
+ /*limit_index=*/1, /*stride=*/1,
+ /*dimno=*/channel_dim);
+ xla::XlaOp saturation = xla::SliceInDim(input, /*start_index=*/1,
+ /*limit_index=*/2, /*stride=*/1,
+ /*dimno=*/channel_dim);
+ xla::XlaOp value = xla::SliceInDim(input, /*start_index=*/2,
+ /*limit_index=*/3, /*stride=*/1,
+ /*dimno=*/channel_dim);
auto rgb = HSVToRGB(context->builder(), {hue, saturation, value},
context->input_type(0));
- context->SetOutput(0, b->ConcatInDim(rgb, channel_dim));
+ context->SetOutput(0, xla::ConcatInDim(b, rgb, channel_dim));
}
};
REGISTER_XLA_OP(Name("HSVToRGB"), HSVToRGBOp);
@@ -182,18 +186,20 @@ class AdjustContrastOpV2 : public XlaOpKernel {
const DataType accumulation_type = XlaHelpers::SumAccumulationType(type);
auto converted =
XlaHelpers::ConvertElementType(b, input, accumulation_type);
- auto reduce = b->Reduce(converted, XlaHelpers::Zero(b, accumulation_type),
- *context->GetOrCreateAdd(accumulation_type),
- {height_dim, width_dim});
+ auto reduce = xla::Reduce(converted, XlaHelpers::Zero(b, accumulation_type),
+ *context->GetOrCreateAdd(accumulation_type),
+ {height_dim, width_dim});
auto output = XlaHelpers::ConvertElementType(b, reduce, type);
- output = b->Div(output, XlaHelpers::FloatLiteral(b, type, height * width));
+ output =
+ xla::Div(output, XlaHelpers::FloatLiteral(b, type, height * width));
std::vector<int64> broadcast_dims(input_shape.dims() - 2);
std::iota(broadcast_dims.begin(), broadcast_dims.end(), 0);
broadcast_dims.back() = channel_dim;
- output = b->Add(b->Mul(input, factor),
- b->Mul(output, b->Sub(XlaHelpers::One(b, type), factor)),
- broadcast_dims);
+ output =
+ xla::Add(xla::Mul(input, factor),
+ xla::Mul(output, xla::Sub(XlaHelpers::One(b, type), factor)),
+ broadcast_dims);
context->SetOutput(0, output);
}
};
@@ -226,26 +232,26 @@ class AdjustSaturationOp : public XlaOpKernel {
DataType type = context->input_type(0);
- xla::XlaOp red =
- b->SliceInDim(input, /*start_index=*/0, /*limit_index=*/1, /*stride=*/1,
- /*dimno=*/channel_dim);
- xla::XlaOp green =
- b->SliceInDim(input, /*start_index=*/1, /*limit_index=*/2, /*stride=*/1,
- /*dimno=*/channel_dim);
- xla::XlaOp blue =
- b->SliceInDim(input, /*start_index=*/2, /*limit_index=*/3, /*stride=*/1,
- /*dimno=*/channel_dim);
+ xla::XlaOp red = xla::SliceInDim(input, /*start_index=*/0,
+ /*limit_index=*/1, /*stride=*/1,
+ /*dimno=*/channel_dim);
+ xla::XlaOp green = xla::SliceInDim(input, /*start_index=*/1,
+ /*limit_index=*/2, /*stride=*/1,
+ /*dimno=*/channel_dim);
+ xla::XlaOp blue = xla::SliceInDim(input, /*start_index=*/2,
+ /*limit_index=*/3, /*stride=*/1,
+ /*dimno=*/channel_dim);
TensorShape channel_shape = input_shape;
channel_shape.set_dim(channel_dim, 1);
auto hsv = RGBToHSV(context, b, {red, green, blue}, context->input_type(0),
channel_shape);
- hsv[1] = b->Clamp(XlaHelpers::Zero(b, type), b->Mul(hsv[1], scale),
- XlaHelpers::One(b, type));
+ hsv[1] = xla::Clamp(XlaHelpers::Zero(b, type), xla::Mul(hsv[1], scale),
+ XlaHelpers::One(b, type));
auto rgb = HSVToRGB(context->builder(), hsv, context->input_type(0));
- context->SetOutput(0, b->ConcatInDim(rgb, channel_dim));
+ context->SetOutput(0, xla::ConcatInDim(b, rgb, channel_dim));
}
};
REGISTER_XLA_OP(Name("AdjustSaturation"), AdjustSaturationOp);
@@ -276,15 +282,15 @@ class AdjustHueOp : public XlaOpKernel {
DataType type = context->input_type(0);
- xla::XlaOp red =
- b->SliceInDim(input, /*start_index=*/0, /*limit_index=*/1, /*stride=*/1,
- /*dimno=*/channel_dim);
- xla::XlaOp green =
- b->SliceInDim(input, /*start_index=*/1, /*limit_index=*/2, /*stride=*/1,
- /*dimno=*/channel_dim);
- xla::XlaOp blue =
- b->SliceInDim(input, /*start_index=*/2, /*limit_index=*/3, /*stride=*/1,
- /*dimno=*/channel_dim);
+ xla::XlaOp red = xla::SliceInDim(input, /*start_index=*/0,
+ /*limit_index=*/1, /*stride=*/1,
+ /*dimno=*/channel_dim);
+ xla::XlaOp green = xla::SliceInDim(input, /*start_index=*/1,
+ /*limit_index=*/2, /*stride=*/1,
+ /*dimno=*/channel_dim);
+ xla::XlaOp blue = xla::SliceInDim(input, /*start_index=*/2,
+ /*limit_index=*/3, /*stride=*/1,
+ /*dimno=*/channel_dim);
TensorShape channel_shape = input_shape;
channel_shape.set_dim(channel_dim, 1);
auto hsv = RGBToHSV(context, b, {red, green, blue}, context->input_type(0),
@@ -294,12 +300,13 @@ class AdjustHueOp : public XlaOpKernel {
auto one = XlaHelpers::One(b, type);
auto& hue = hsv[0];
- hue = b->Rem(b->Add(hsv[0], delta), one);
- hue = b->Select(b->Lt(hue, zero), b->Rem(b->Add(one, hue), one), hue);
+ hue = xla::Rem(xla::Add(hsv[0], delta), one);
+ hue =
+ xla::Select(xla::Lt(hue, zero), xla::Rem(xla::Add(one, hue), one), hue);
auto rgb = HSVToRGB(context->builder(), hsv, context->input_type(0));
- context->SetOutput(0, b->ConcatInDim(rgb, channel_dim));
+ context->SetOutput(0, xla::ConcatInDim(b, rgb, channel_dim));
}
};
REGISTER_XLA_OP(Name("AdjustHue"), AdjustHueOp);
diff --git a/tensorflow/compiler/tf2xla/kernels/image_resize_ops.cc b/tensorflow/compiler/tf2xla/kernels/image_resize_ops.cc
index 79d3a6979c..d6bf92fb3d 100644
--- a/tensorflow/compiler/tf2xla/kernels/image_resize_ops.cc
+++ b/tensorflow/compiler/tf2xla/kernels/image_resize_ops.cc
@@ -18,6 +18,8 @@ limitations under the License.
#include "tensorflow/compiler/tf2xla/xla_op_kernel.h"
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
#include "tensorflow/compiler/xla/array4d.h"
+#include "tensorflow/compiler/xla/client/lib/numeric.h"
+#include "tensorflow/compiler/xla/client/xla_client/xla_builder.h"
#include "tensorflow/core/framework/kernel_def_builder.h"
#include "tensorflow/core/framework/register_types.h"
#include "tensorflow/core/lib/math/math_util.h"
@@ -127,48 +129,41 @@ const int64 kMax2DKernelSize = 16;
xla::XlaOp MakeBilinearResizeKernel(xla::XlaBuilder* builder,
gtl::ArraySlice<int64> kernel_size,
int64 channels) {
- xla::XlaOp channels_iota;
- // DT_INT32 Iota will always return status::OK().
- TF_CHECK_OK(
- XlaHelpers::Iota(builder, DataType::DT_INT32, channels, &channels_iota));
-
- auto diag = builder->ConvertElementType(
- builder->Eq(
- builder->Broadcast(channels_iota, {2 * kernel_size[0] - 1,
+ xla::XlaOp channels_iota = xla::Iota(builder, xla::S32, channels);
+
+ auto diag = xla::ConvertElementType(
+ xla::Eq(xla::Broadcast(channels_iota, {2 * kernel_size[0] - 1,
2 * kernel_size[1] - 1, channels}),
- channels_iota, /*broadcast_dimensions=*/{2}),
+ channels_iota, /*broadcast_dimensions=*/{2}),
xla::PrimitiveType::F32);
- return builder->Mul(
- builder->Mul(diag,
- builder->ConstantR1<float>(Make1DKernel(kernel_size[1])),
- /*broadcast_dimensions=*/{1}),
- builder->ConstantR1<float>(Make1DKernel(kernel_size[0])),
+ return xla::Mul(
+ xla::Mul(diag,
+ xla::ConstantR1<float>(builder, Make1DKernel(kernel_size[1])),
+ /*broadcast_dimensions=*/{1}),
+ xla::ConstantR1<float>(builder, Make1DKernel(kernel_size[0])),
/*broadcast_dimensions=*/{0});
}
xla::XlaOp MakeBilinearResizeKernelInDim(xla::XlaBuilder* builder,
gtl::ArraySlice<int64> kernel_size,
int64 channels, int64 dim) {
- xla::XlaOp channels_iota;
- // DT_INT32 Iota will always return status::OK().
- TF_CHECK_OK(
- XlaHelpers::Iota(builder, DataType::DT_INT32, channels, &channels_iota));
-
- auto diag = builder->ConvertElementType(
- builder->Eq(builder->Broadcast(
- channels_iota,
- {dim == 0 ? (2 * kernel_size[0] - 1) : 1,
- dim == 1 ? (2 * kernel_size[1] - 1) : 1, channels}),
- channels_iota, /*broadcast_dimensions=*/{2}),
+ xla::XlaOp channels_iota = xla::Iota(builder, xla::S32, channels);
+
+ auto diag = xla::ConvertElementType(
+ xla::Eq(
+ xla::Broadcast(channels_iota,
+ {dim == 0 ? (2 * kernel_size[0] - 1) : 1,
+ dim == 1 ? (2 * kernel_size[1] - 1) : 1, channels}),
+ channels_iota, /*broadcast_dimensions=*/{2}),
xla::PrimitiveType::F32);
if (dim == 1) {
- return builder->Mul(
- diag, builder->ConstantR1<float>(Make1DKernel(kernel_size[1])),
+ return xla::Mul(
+ diag, xla::ConstantR1<float>(builder, Make1DKernel(kernel_size[1])),
/*broadcast_dimensions=*/{1});
}
- return builder->Mul(diag,
- builder->ConstantR1<float>(Make1DKernel(kernel_size[0])),
- /*broadcast_dimensions=*/{0});
+ return xla::Mul(diag,
+ xla::ConstantR1<float>(builder, Make1DKernel(kernel_size[0])),
+ /*broadcast_dimensions=*/{0});
}
xla::XlaOp ResizeUsingDilationAndConvolution(xla::XlaBuilder* builder,
@@ -208,7 +203,7 @@ xla::XlaOp ResizeUsingDilationAndConvolution(xla::XlaBuilder* builder,
if (dims.kernel_size[0] * dims.kernel_size[1] < kMax2DKernelSize) {
xla::XlaOp kernel =
MakeBilinearResizeKernel(builder, dims.kernel_size, channels);
- output = builder->ConvGeneralDilated(
+ output = xla::ConvGeneralDilated(
input, kernel, dims.stride,
/*padding=*/
{{dims.kernel_size[0] - 1, dims.kernel_size[0] - 1},
@@ -218,7 +213,7 @@ xla::XlaOp ResizeUsingDilationAndConvolution(xla::XlaBuilder* builder,
} else {
xla::XlaOp kernel0 =
MakeBilinearResizeKernelInDim(builder, dims.kernel_size, channels, 0);
- output = builder->ConvGeneralDilated(
+ output = xla::ConvGeneralDilated(
input, kernel0, {dims.stride[0], 1},
/*padding=*/
{{dims.kernel_size[0] - 1, dims.kernel_size[0] - 1}, {0, 0}},
@@ -226,7 +221,7 @@ xla::XlaOp ResizeUsingDilationAndConvolution(xla::XlaBuilder* builder,
/*rhs_dilation=*/{1, 1}, dimension_numbers);
xla::XlaOp kernel1 =
MakeBilinearResizeKernelInDim(builder, dims.kernel_size, channels, 1);
- output = builder->ConvGeneralDilated(
+ output = xla::ConvGeneralDilated(
output, kernel1, {1, dims.stride[1]},
/*padding=*/
{{0, 0}, {dims.kernel_size[1] - 1, dims.kernel_size[1] - 1}},
@@ -238,8 +233,8 @@ xla::XlaOp ResizeUsingDilationAndConvolution(xla::XlaBuilder* builder,
// size > 1 dimension.
for (int i = 0; i < num_spatial_dims; ++i) {
if (in_size[i] == 1 && out_size[i] > 1) {
- output = builder->Add(output, builder->ConstantR1<float>(out_size[i], 0),
- /*broadcast_dimensions=*/{1 + i});
+ output = xla::Add(output, xla::ConstantR1<float>(builder, out_size[i], 0),
+ /*broadcast_dimensions=*/{1 + i});
}
}
return output;
@@ -279,12 +274,12 @@ xla::XlaOp ResizeUsingDilationAndConvolutionGradOp(xla::XlaBuilder* builder,
for (int i = 0; i < num_spatial_dims; ++i) {
if (in_size[i] == 1 && grad_size[i] > 1) {
kernel =
- builder->Add(kernel, builder->ConstantR1<float>(grad_size[i], 0),
- /*broadcast_dimensions=*/{i});
+ xla::Add(kernel, xla::ConstantR1<float>(builder, grad_size[i], 0),
+ /*broadcast_dimensions=*/{i});
}
}
- output = builder->ConvGeneralDilated(
+ output = xla::ConvGeneralDilated(
grad, kernel, /*window_strides=*/dims.kernel_size,
/*padding=*/
{{dims.kernel_size[0] - 1, dims.kernel_size[0] - 1},
@@ -302,23 +297,23 @@ xla::XlaOp ResizeUsingDilationAndConvolutionGradOp(xla::XlaBuilder* builder,
// gradient contributions in that dimension.
if (in_size[0] == 1 && grad_size[0] > 1) {
kernel0 =
- builder->Add(kernel0, builder->ConstantR1<float>(grad_size[0], 0),
- /*broadcast_dimensions=*/{0});
+ xla::Add(kernel0, xla::ConstantR1<float>(builder, grad_size[0], 0),
+ /*broadcast_dimensions=*/{0});
}
if (in_size[1] == 1 && grad_size[1] > 1) {
kernel1 =
- builder->Add(kernel0, builder->ConstantR1<float>(grad_size[1], 0),
- /*broadcast_dimensions=*/{1});
+ xla::Add(kernel0, xla::ConstantR1<float>(builder, grad_size[1], 0),
+ /*broadcast_dimensions=*/{1});
}
- output = builder->ConvGeneralDilated(
+ output = xla::ConvGeneralDilated(
grad, kernel0, /*window_strides=*/{dims.kernel_size[0], 1},
/*padding=*/
{{dims.kernel_size[0] - 1, dims.kernel_size[0] - 1}, {0, 0}},
/*lhs_dilation=*/{dims.stride[0], 1},
/*rhs_dilation=*/{1, 1}, dimension_numbers);
- output = builder->ConvGeneralDilated(
+ output = xla::ConvGeneralDilated(
output, kernel1, /*window_strides=*/{1, dims.kernel_size[1]},
/*padding=*/
{{0, 0}, {dims.kernel_size[1] - 1, dims.kernel_size[1] - 1}},
@@ -337,7 +332,7 @@ xla::XlaOp ResizeUsingDilationAndConvolutionGradOp(xla::XlaBuilder* builder,
}
}
if (pad_output) {
- output = builder->Pad(output, builder->ConstantR0<float>(0.0f), padding);
+ output = xla::Pad(output, xla::ConstantR0<float>(builder, 0.0f), padding);
}
return output;
}
@@ -393,13 +388,13 @@ class ResizeBilinearOp : public XlaOpKernel {
}
}
if (slice_input) {
- input = b->Slice(input, {0, 0, 0, 0},
- {batch, slice_size[0], slice_size[1], channels},
- {1, 1, 1, 1});
+ input = xla::Slice(input, {0, 0, 0, 0},
+ {batch, slice_size[0], slice_size[1], channels},
+ {1, 1, 1, 1});
}
// Output is always type float.
- input = b->ConvertElementType(input, xla::F32);
+ input = xla::ConvertElementType(input, xla::F32);
// Special Case:
// Instead of doing a ResizeUsingDilationAndConvolution directly,
@@ -529,7 +524,7 @@ class ResizeBilinearGradOp : public XlaOpKernel {
}
}
- output = b->ConvertElementType(output, output_type_);
+ output = xla::ConvertElementType(output, output_type_);
ctx->SetOutput(0, output);
}
diff --git a/tensorflow/compiler/tf2xla/kernels/index_ops.cc b/tensorflow/compiler/tf2xla/kernels/index_ops.cc
index 36eb4c7545..f396474858 100644
--- a/tensorflow/compiler/tf2xla/kernels/index_ops.cc
+++ b/tensorflow/compiler/tf2xla/kernels/index_ops.cc
@@ -60,19 +60,15 @@ void XlaArgMinMaxOp::Compile(XlaOpKernelContext* ctx) {
input_shape.DebugString()));
DataType index_type = output_type(0);
+ xla::PrimitiveType index_xla_type;
+ OP_REQUIRES_OK(ctx, DataTypeToPrimitiveType(index_type, &index_xla_type));
- xla::XlaBuilder* b = ctx->builder();
xla::XlaOp input = ctx->Input(0);
-
xla::XlaOp output;
if (is_min_) {
- OP_REQUIRES_OK(ctx,
- XlaHelpers::ArgMin(b, ctx, input, input_shape, input_type(0),
- index_type, axis, &output));
+ output = XlaHelpers::ArgMin(input, index_xla_type, axis);
} else {
- OP_REQUIRES_OK(ctx,
- XlaHelpers::ArgMax(b, ctx, input, input_shape, input_type(0),
- index_type, axis, &output));
+ output = XlaHelpers::ArgMax(input, index_xla_type, axis);
}
ctx->SetOutput(0, output);
diff --git a/tensorflow/compiler/tf2xla/kernels/index_ops_cpu.cc b/tensorflow/compiler/tf2xla/kernels/index_ops_cpu.cc
index 2c2d88486f..22a45b2a11 100644
--- a/tensorflow/compiler/tf2xla/kernels/index_ops_cpu.cc
+++ b/tensorflow/compiler/tf2xla/kernels/index_ops_cpu.cc
@@ -19,6 +19,7 @@ limitations under the License.
#include "tensorflow/compiler/tf2xla/xla_helpers.h"
#include "tensorflow/compiler/tf2xla/xla_op_kernel.h"
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
+#include "tensorflow/compiler/xla/literal_util.h"
#include "tensorflow/core/framework/kernel_def_builder.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/register_types.h"
@@ -76,14 +77,15 @@ class ArgMaxCustomCallOp : public XlaOpKernel {
// XLA passes <out> to the function, so it is not included here.
std::vector<xla::XlaOp> args;
args.push_back(ctx->Input(0));
- args.push_back(b.ConstantLiteral(
- *xla::Literal::CreateR1<int64>(input_shape.dim_sizes())));
+ args.push_back(xla::ConstantLiteral(
+ &b, *xla::LiteralUtil::CreateR1<int64>(input_shape.dim_sizes())));
if (input_shape.dims() > 1) {
// Don't bother passing the output shape and dim for the 1d case, since
// the shape is always a scalar and the dim is always 0.
- args.push_back(b.ConstantLiteral(
- *xla::Literal::CreateR1<int64>(output_shape.dim_sizes())));
- args.push_back(b.ConstantLiteral(*xla::Literal::CreateR0<int32>(dim)));
+ args.push_back(xla::ConstantLiteral(
+ &b, *xla::LiteralUtil::CreateR1<int64>(output_shape.dim_sizes())));
+ args.push_back(
+ xla::ConstantLiteral(&b, *xla::LiteralUtil::CreateR0<int32>(dim)));
}
xla::Shape xla_shape =
@@ -94,10 +96,12 @@ class ArgMaxCustomCallOp : public XlaOpKernel {
xla::XlaOp output;
switch (input_shape.dims()) {
case 1:
- output = b.CustomCall("argmax_float_1d_xla_impl", args, xla_shape);
+ output =
+ xla::CustomCall(&b, "argmax_float_1d_xla_impl", args, xla_shape);
break;
case 2:
- output = b.CustomCall("argmax_float_2d_xla_impl", args, xla_shape);
+ output =
+ xla::CustomCall(&b, "argmax_float_2d_xla_impl", args, xla_shape);
break;
default:
OP_REQUIRES(ctx, false,
diff --git a/tensorflow/compiler/tf2xla/kernels/l2loss_op.cc b/tensorflow/compiler/tf2xla/kernels/l2loss_op.cc
index 1decf7d72d..9e64711051 100644
--- a/tensorflow/compiler/tf2xla/kernels/l2loss_op.cc
+++ b/tensorflow/compiler/tf2xla/kernels/l2loss_op.cc
@@ -39,12 +39,12 @@ class L2LossOp : public XlaOpKernel {
const DataType accumulation_type = XlaHelpers::SumAccumulationType(dtype);
auto t =
XlaHelpers::ConvertElementType(b, ctx->Input(0), accumulation_type);
- auto square = b->Mul(t, t);
- auto reduce = b->Reduce(square, XlaHelpers::Zero(b, accumulation_type),
- *ctx->GetOrCreateAdd(accumulation_type), dims);
+ auto square = xla::Mul(t, t);
+ auto reduce = xla::Reduce(square, XlaHelpers::Zero(b, accumulation_type),
+ *ctx->GetOrCreateAdd(accumulation_type), dims);
auto deconverted = XlaHelpers::ConvertElementType(b, reduce, dtype);
auto two = XlaHelpers::IntegerLiteral(b, dtype, 2);
- ctx->SetOutput(0, b->Div(deconverted, two));
+ ctx->SetOutput(0, xla::Div(deconverted, two));
}
};
diff --git a/tensorflow/compiler/tf2xla/kernels/listdiff_op.cc b/tensorflow/compiler/tf2xla/kernels/listdiff_op.cc
index 0388b4c830..2fb072f827 100644
--- a/tensorflow/compiler/tf2xla/kernels/listdiff_op.cc
+++ b/tensorflow/compiler/tf2xla/kernels/listdiff_op.cc
@@ -22,6 +22,7 @@ limitations under the License.
#include "tensorflow/compiler/tf2xla/xla_helpers.h"
#include "tensorflow/compiler/tf2xla/xla_op_kernel.h"
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
+#include "tensorflow/compiler/xla/client/xla_client/xla_builder.h"
#include "tensorflow/core/framework/kernel_def_builder.h"
#include "tensorflow/core/framework/register_types.h"
#include "tensorflow/core/lib/core/errors.h"
@@ -90,8 +91,10 @@ class ListDiffOp : public XlaOpKernel {
idx_output.push_back(i);
}
- context->SetOutput(0, context->builder()->ConstantR1<Tval>(val_output));
- context->SetOutput(1, context->builder()->ConstantR1<Tidx>(idx_output));
+ context->SetOutput(0,
+ xla::ConstantR1<Tval>(context->builder(), val_output));
+ context->SetOutput(1,
+ xla::ConstantR1<Tidx>(context->builder(), idx_output));
return Status::OK();
}
diff --git a/tensorflow/compiler/tf2xla/kernels/lrn_ops.cc b/tensorflow/compiler/tf2xla/kernels/lrn_ops.cc
index 39fbf98a62..dc934543cb 100644
--- a/tensorflow/compiler/tf2xla/kernels/lrn_ops.cc
+++ b/tensorflow/compiler/tf2xla/kernels/lrn_ops.cc
@@ -16,6 +16,7 @@ limitations under the License.
#include "tensorflow/compiler/tf2xla/xla_helpers.h"
#include "tensorflow/compiler/tf2xla/xla_op_kernel.h"
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
+#include "tensorflow/compiler/xla/client/xla_client/xla_builder.h"
#include "tensorflow/core/framework/kernel_def_builder.h"
namespace tensorflow {
@@ -50,8 +51,8 @@ class LRNOp : public XlaOpKernel {
auto accumulation_type = XlaHelpers::SumAccumulationType(input_type(0));
auto converted =
XlaHelpers::ConvertElementType(builder, input, accumulation_type);
- auto squared = builder->Mul(converted, converted);
- auto reduce = builder->ReduceWindow(
+ auto squared = xla::Mul(converted, converted);
+ auto reduce = xla::ReduceWindow(
squared, XlaHelpers::Zero(builder, accumulation_type),
*ctx->GetOrCreateAdd(accumulation_type),
/* window_dimensions = */ {1, 1, 1, depth_radius_ * 2 + 1},
@@ -59,12 +60,12 @@ class LRNOp : public XlaOpKernel {
auto sqr_sum =
XlaHelpers::ConvertElementType(builder, reduce, input_type(0));
- auto scale = builder->Pow(
- builder->Add(builder->ConstantR0<float>(bias_),
- builder->Mul(builder->ConstantR0<float>(alpha_), sqr_sum)),
- builder->ConstantR0<float>(-beta_));
+ auto scale = xla::Pow(
+ xla::Add(xla::ConstantR0<float>(builder, bias_),
+ xla::Mul(xla::ConstantR0<float>(builder, alpha_), sqr_sum)),
+ xla::ConstantR0<float>(builder, -beta_));
- ctx->SetOutput(0, builder->Mul(input, scale));
+ ctx->SetOutput(0, xla::Mul(input, scale));
}
private:
@@ -138,8 +139,8 @@ class LRNGradOp : public XlaOpKernel {
auto accumulation_type = XlaHelpers::SumAccumulationType(input_type(0));
auto converted =
XlaHelpers::ConvertElementType(builder, in_image, accumulation_type);
- auto squared = builder->Mul(converted, converted);
- auto reduce = builder->ReduceWindow(
+ auto squared = xla::Mul(converted, converted);
+ auto reduce = xla::ReduceWindow(
squared, XlaHelpers::Zero(builder, accumulation_type),
*ctx->GetOrCreateAdd(accumulation_type),
/* window_dimensions = */ {1, 1, 1, depth_radius_ * 2 + 1},
@@ -148,17 +149,17 @@ class LRNGradOp : public XlaOpKernel {
XlaHelpers::ConvertElementType(builder, reduce, input_type(0));
auto norm =
- builder->Add(builder->ConstantR0<float>(bias_),
- builder->Mul(builder->ConstantR0<float>(alpha_), sqr_sum));
+ xla::Add(xla::ConstantR0<float>(builder, bias_),
+ xla::Mul(xla::ConstantR0<float>(builder, alpha_), sqr_sum));
- auto dy = builder->Mul(
- builder->Mul(builder->ConstantR0<float>(-2.0f * alpha_ * beta_),
- builder->Div(out_image, norm)),
+ auto dy = xla::Mul(
+ xla::Mul(xla::ConstantR0<float>(builder, -2.0f * alpha_ * beta_),
+ xla::Div(out_image, norm)),
in_grads);
auto converted_dy =
XlaHelpers::ConvertElementType(builder, dy, accumulation_type);
- auto dy_reduce = builder->ReduceWindow(
+ auto dy_reduce = xla::ReduceWindow(
converted_dy, XlaHelpers::Zero(builder, accumulation_type),
*ctx->GetOrCreateAdd(accumulation_type),
/* window_dimensions = */ {1, 1, 1, depth_radius_ * 2 + 1},
@@ -166,10 +167,10 @@ class LRNGradOp : public XlaOpKernel {
auto dy_reduced =
XlaHelpers::ConvertElementType(builder, dy_reduce, input_type(0));
- xla::XlaOp gradients = builder->Add(
- builder->Mul(in_image, dy_reduced),
- builder->Mul(in_grads,
- builder->Pow(norm, builder->ConstantR0<float>(-beta_))));
+ xla::XlaOp gradients = xla::Add(
+ xla::Mul(in_image, dy_reduced),
+ xla::Mul(in_grads,
+ xla::Pow(norm, xla::ConstantR0<float>(builder, -beta_))));
ctx->SetOutput(0, gradients);
}
diff --git a/tensorflow/compiler/tf2xla/kernels/matmul_op.cc b/tensorflow/compiler/tf2xla/kernels/matmul_op.cc
index 6949b296f4..844080b8cf 100644
--- a/tensorflow/compiler/tf2xla/kernels/matmul_op.cc
+++ b/tensorflow/compiler/tf2xla/kernels/matmul_op.cc
@@ -18,6 +18,7 @@ limitations under the License.
#include "tensorflow/compiler/tf2xla/xla_helpers.h"
#include "tensorflow/compiler/tf2xla/xla_op_kernel.h"
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
+#include "tensorflow/compiler/xla/client/xla_client/xla_builder.h"
#include "tensorflow/core/framework/op_kernel.h"
namespace tensorflow {
@@ -70,15 +71,15 @@ class MatMulOp : public XlaOpKernel {
xla::XlaOp b = ctx->Input(1);
if (is_sparse_) {
if (a_type_ == DT_BFLOAT16) {
- a = ctx->builder()->ConvertElementType(a, xla::F32);
+ a = xla::ConvertElementType(a, xla::F32);
}
if (b_type_ == DT_BFLOAT16) {
- b = ctx->builder()->ConvertElementType(b, xla::F32);
+ b = xla::ConvertElementType(b, xla::F32);
}
}
- auto lhs = (transpose_a_) ? ctx->builder()->Transpose(a, {1, 0}) : a;
- auto rhs = (transpose_b_) ? ctx->builder()->Transpose(b, {1, 0}) : b;
- ctx->SetOutput(0, ctx->builder()->Dot(lhs, rhs));
+ auto lhs = (transpose_a_) ? xla::Transpose(a, {1, 0}) : a;
+ auto rhs = (transpose_b_) ? xla::Transpose(b, {1, 0}) : b;
+ ctx->SetOutput(0, xla::Dot(lhs, rhs));
}
private:
diff --git a/tensorflow/compiler/tf2xla/kernels/matrix_band_part_op.cc b/tensorflow/compiler/tf2xla/kernels/matrix_band_part_op.cc
index fbd5dc0fda..e06c87db7a 100644
--- a/tensorflow/compiler/tf2xla/kernels/matrix_band_part_op.cc
+++ b/tensorflow/compiler/tf2xla/kernels/matrix_band_part_op.cc
@@ -16,6 +16,8 @@ limitations under the License.
#include "tensorflow/compiler/tf2xla/xla_helpers.h"
#include "tensorflow/compiler/tf2xla/xla_op_kernel.h"
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
+#include "tensorflow/compiler/xla/client/lib/numeric.h"
+#include "tensorflow/compiler/xla/client/xla_client/xla_builder.h"
#include "tensorflow/core/framework/tensor_shape.h"
namespace tensorflow {
@@ -50,6 +52,7 @@ class MatrixBandPartOp : public XlaOpKernel {
xla::XlaOp num_upper = context->Input(2);
DataType input_type = context->input_type(0);
DataType index_type = context->input_type(1);
+ xla::PrimitiveType index_xla_type = context->input_xla_type(1);
TensorShape batch_shape = input_shape;
batch_shape.RemoveLastDims(2);
@@ -58,33 +61,29 @@ class MatrixBandPartOp : public XlaOpKernel {
// Compute 'offset', which is how many diagonals we are above/below the
// diagonal.
- xla::XlaOp iota_m;
- OP_REQUIRES_OK(context, XlaHelpers::Iota(builder, index_type, m, &iota_m));
+ xla::XlaOp iota_m = xla::Iota(builder, index_xla_type, m);
+ xla::XlaOp iota_n = xla::Iota(builder, index_xla_type, n);
- xla::XlaOp iota_n;
- OP_REQUIRES_OK(context, XlaHelpers::Iota(builder, index_type, n, &iota_n));
-
- auto offset = builder->Sub(builder->Broadcast(iota_n, {m}), iota_m,
- /*broadcast_dimensions=*/{0});
+ auto offset = xla::Sub(xla::Broadcast(iota_n, {m}), iota_m,
+ /*broadcast_dimensions=*/{0});
// If num_lower or num_upper are negative, include all lower/upper
// diagonals.
auto zero_index = XlaHelpers::Zero(builder, index_type);
- num_lower = builder->Select(
- builder->Lt(num_lower, zero_index),
- XlaHelpers::IntegerLiteral(builder, index_type, m), num_lower);
- num_upper = builder->Select(
- builder->Lt(num_upper, zero_index),
- XlaHelpers::IntegerLiteral(builder, index_type, n), num_upper);
+ num_lower = xla::Select(xla::Lt(num_lower, zero_index),
+ XlaHelpers::IntegerLiteral(builder, index_type, m),
+ num_lower);
+ num_upper = xla::Select(xla::Lt(num_upper, zero_index),
+ XlaHelpers::IntegerLiteral(builder, index_type, n),
+ num_upper);
- auto indicator = builder->And(builder->Le(builder->Neg(num_lower), offset),
- builder->Le(offset, num_upper));
- indicator = builder->Broadcast(indicator, batch_shape.dim_sizes());
+ auto indicator = xla::And(xla::Le(xla::Neg(num_lower), offset),
+ xla::Le(offset, num_upper));
+ indicator = xla::Broadcast(indicator, batch_shape.dim_sizes());
auto zero_input = XlaHelpers::Zero(builder, input_type);
- auto output = builder->Select(
- indicator, input,
- builder->Broadcast(zero_input, input_shape.dim_sizes()));
+ auto output = xla::Select(
+ indicator, input, xla::Broadcast(zero_input, input_shape.dim_sizes()));
context->SetOutput(0, output);
}
diff --git a/tensorflow/compiler/tf2xla/kernels/matrix_set_diag_op.cc b/tensorflow/compiler/tf2xla/kernels/matrix_set_diag_op.cc
index db53f6fef8..e2ab4b83cf 100644
--- a/tensorflow/compiler/tf2xla/kernels/matrix_set_diag_op.cc
+++ b/tensorflow/compiler/tf2xla/kernels/matrix_set_diag_op.cc
@@ -16,6 +16,8 @@ limitations under the License.
#include "tensorflow/compiler/tf2xla/xla_helpers.h"
#include "tensorflow/compiler/tf2xla/xla_op_kernel.h"
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
+#include "tensorflow/compiler/xla/client/lib/numeric.h"
+#include "tensorflow/compiler/xla/client/xla_client/xla_builder.h"
namespace tensorflow {
@@ -61,14 +63,11 @@ class MatrixSetDiagOp : public XlaOpKernel {
auto zero = XlaHelpers::Zero(builder, context->input_type(0));
// Create an indicator tensor that is true only on the diagonal.
- xla::XlaOp iota_m;
- OP_REQUIRES_OK(context, XlaHelpers::Iota(builder, DT_INT32, m, &iota_m));
- xla::XlaOp iota_n;
- OP_REQUIRES_OK(context, XlaHelpers::Iota(builder, DT_INT32, n, &iota_n));
- auto indicator = builder->Eq(iota_m,
- builder->Broadcast(iota_n, {m}),
- /*broadcast_dimensions=*/{0});
- indicator = builder->Broadcast(indicator, batch_shape.dim_sizes());
+ xla::XlaOp iota_m = xla::Iota(builder, xla::S32, m);
+ xla::XlaOp iota_n = xla::Iota(builder, xla::S32, n);
+ auto indicator = xla::Eq(iota_m, xla::Broadcast(iota_n, {m}),
+ /*broadcast_dimensions=*/{0});
+ indicator = xla::Broadcast(indicator, batch_shape.dim_sizes());
// Broadcast diag up to the input shape. Use an implicit broadcast (Add)
// because we need to broadcast on the right.
@@ -77,10 +76,10 @@ class MatrixSetDiagOp : public XlaOpKernel {
if (min_dim != m) {
diag_broadcast_dims.back() = rank - 1;
}
- diag = builder->Add(diag, builder->Broadcast(zero, input_shape.dim_sizes()),
- /*broadcast_dimensions=*/diag_broadcast_dims);
+ diag = xla::Add(diag, xla::Broadcast(zero, input_shape.dim_sizes()),
+ /*broadcast_dimensions=*/diag_broadcast_dims);
- auto output = builder->Select(indicator, diag, input);
+ auto output = xla::Select(indicator, diag, input);
context->SetOutput(0, output);
}
diff --git a/tensorflow/compiler/tf2xla/kernels/matrix_triangular_solve_op.cc b/tensorflow/compiler/tf2xla/kernels/matrix_triangular_solve_op.cc
index eaed931464..f4def11d08 100644
--- a/tensorflow/compiler/tf2xla/kernels/matrix_triangular_solve_op.cc
+++ b/tensorflow/compiler/tf2xla/kernels/matrix_triangular_solve_op.cc
@@ -30,13 +30,9 @@ class MatrixTriangularSolveOp : public XlaOpKernel {
void Compile(XlaOpKernelContext* ctx) override {
auto result = TriangularSolve(
- ctx->builder(), ctx->Input(0), ctx->Input(1), /*left_side=*/true,
+ ctx->Input(0), ctx->Input(1), /*left_side=*/true,
/*lower=*/lower_, /*transpose_a=*/adjoint_, /*conjugate_a=*/adjoint_);
- if (!result.ok()) {
- ctx->SetStatus(result.status());
- return;
- }
- ctx->SetOutput(0, result.ValueOrDie());
+ ctx->SetOutput(0, result);
}
private:
diff --git a/tensorflow/compiler/tf2xla/kernels/mirror_pad_op.cc b/tensorflow/compiler/tf2xla/kernels/mirror_pad_op.cc
index c3326b4d11..529959dbd9 100644
--- a/tensorflow/compiler/tf2xla/kernels/mirror_pad_op.cc
+++ b/tensorflow/compiler/tf2xla/kernels/mirror_pad_op.cc
@@ -16,6 +16,7 @@ limitations under the License.
#include "tensorflow/compiler/tf2xla/xla_helpers.h"
#include "tensorflow/compiler/tf2xla/xla_op_kernel.h"
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
+#include "tensorflow/compiler/xla/client/xla_client/xla_builder.h"
#include "tensorflow/core/util/mirror_pad_mode.h"
namespace tensorflow {
@@ -32,16 +33,16 @@ class MirrorPadOp : public XlaOpKernel {
xla::XlaOp accum = t;
for (int64 dimno = xla::ShapeUtil::Rank(original_shape) - 1; dimno >= 0;
--dimno) {
- auto t_rev = b->Rev(accum, {dimno});
+ auto t_rev = xla::Rev(accum, {dimno});
TF_ASSIGN_OR_RETURN(int64 lhs_padding,
pad_literal.GetIntegralAsS64({dimno, 0}));
TF_ASSIGN_OR_RETURN(int64 rhs_padding,
pad_literal.GetIntegralAsS64({dimno, 1}));
int64 dim_size = original_shape.dimensions(dimno);
- auto lhs_pad = b->SliceInDim(t_rev, dim_size - 1 - lhs_padding,
- dim_size - 1, 1, dimno);
- auto rhs_pad = b->SliceInDim(t_rev, 1, 1 + rhs_padding, 1, dimno);
- accum = b->ConcatInDim({lhs_pad, accum, rhs_pad}, dimno);
+ auto lhs_pad = xla::SliceInDim(t_rev, dim_size - 1 - lhs_padding,
+ dim_size - 1, 1, dimno);
+ auto rhs_pad = xla::SliceInDim(t_rev, 1, 1 + rhs_padding, 1, dimno);
+ accum = xla::ConcatInDim(b, {lhs_pad, accum, rhs_pad}, dimno);
}
return accum;
}
diff --git a/tensorflow/compiler/tf2xla/kernels/pack_op.cc b/tensorflow/compiler/tf2xla/kernels/pack_op.cc
index aecaabb6dc..3aed47de26 100644
--- a/tensorflow/compiler/tf2xla/kernels/pack_op.cc
+++ b/tensorflow/compiler/tf2xla/kernels/pack_op.cc
@@ -22,6 +22,7 @@ limitations under the License.
#include "tensorflow/compiler/tf2xla/xla_helpers.h"
#include "tensorflow/compiler/tf2xla/xla_op_kernel.h"
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
+#include "tensorflow/compiler/xla/client/xla_client/xla_builder.h"
#include "tensorflow/compiler/xla/literal_util.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/register_types.h"
@@ -76,11 +77,10 @@ class PackOp : public XlaOpKernel {
for (int i = 0; i < num; ++i) {
// Reshape the inputs to have an extra dimension of size 1.
- reshaped_inputs[i] =
- ctx->builder()->Reshape(values[i], child_shape.dim_sizes());
+ reshaped_inputs[i] = xla::Reshape(values[i], child_shape.dim_sizes());
}
- ctx->SetOutput(0, ctx->builder()->ConcatInDim(reshaped_inputs, axis));
+ ctx->SetOutput(0, xla::ConcatInDim(ctx->builder(), reshaped_inputs, axis));
}
private:
diff --git a/tensorflow/compiler/tf2xla/kernels/pad_op.cc b/tensorflow/compiler/tf2xla/kernels/pad_op.cc
index 17b85338f7..89fd610bc6 100644
--- a/tensorflow/compiler/tf2xla/kernels/pad_op.cc
+++ b/tensorflow/compiler/tf2xla/kernels/pad_op.cc
@@ -17,6 +17,7 @@ limitations under the License.
#include "tensorflow/compiler/tf2xla/xla_helpers.h"
#include "tensorflow/compiler/tf2xla/xla_op_kernel.h"
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
+#include "tensorflow/compiler/xla/client/xla_client/xla_builder.h"
#include "tensorflow/core/framework/kernel_def_builder.h"
#include "tensorflow/core/framework/register_types.h"
@@ -74,11 +75,10 @@ class PadOp : public XlaOpKernel {
if (ctx->num_inputs() == 3) {
OP_REQUIRES(ctx, TensorShapeUtils::IsScalar(ctx->InputShape(2)),
errors::InvalidArgument("constant_values must be a scalar."));
- ctx->SetOutput(0,
- ctx->builder()->Pad(ctx->Input(0), ctx->Input(2), config));
+ ctx->SetOutput(0, xla::Pad(ctx->Input(0), ctx->Input(2), config));
} else {
auto zero = XlaHelpers::Zero(ctx->builder(), input_type(0));
- ctx->SetOutput(0, ctx->builder()->Pad(ctx->Input(0), zero, config));
+ ctx->SetOutput(0, xla::Pad(ctx->Input(0), zero, config));
}
}
};
diff --git a/tensorflow/compiler/tf2xla/kernels/pooling_ops.cc b/tensorflow/compiler/tf2xla/kernels/pooling_ops.cc
index eb8b5b130f..12d9cb9bac 100644
--- a/tensorflow/compiler/tf2xla/kernels/pooling_ops.cc
+++ b/tensorflow/compiler/tf2xla/kernels/pooling_ops.cc
@@ -20,7 +20,9 @@ limitations under the License.
#include "tensorflow/compiler/tf2xla/xla_op_kernel.h"
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
#include "tensorflow/compiler/xla/client/lib/arithmetic.h"
-#include "tensorflow/compiler/xla/literal_util.h"
+#include "tensorflow/compiler/xla/client/lib/constants.h"
+#include "tensorflow/compiler/xla/client/xla_client/xla_builder.h"
+#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/compiler/xla/util.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/register_types.h"
@@ -61,6 +63,9 @@ class PoolingOp : public XlaOpKernel {
Padding padding;
OP_REQUIRES_OK(ctx, ctx->GetAttr("padding", &padding));
padding_ = (padding == VALID) ? xla::Padding::kValid : xla::Padding::kSame;
+
+ OP_REQUIRES_OK(
+ ctx, DataTypeToPrimitiveType(reduction_type_, &xla_reduction_type_));
}
int num_dims() const { return num_spatial_dims_ + 2; }
@@ -113,8 +118,8 @@ class PoolingOp : public XlaOpKernel {
xla::XlaBuilder* const b = ctx->builder();
auto input =
XlaHelpers::ConvertElementType(b, ctx->Input(0), reduction_type_);
- auto reduce = ctx->builder()->ReduceWindow(
- input, InitValue(b), *Reduction(ctx), ksize, stride, padding_);
+ auto reduce = xla::ReduceWindow(input, InitValue(b), *Reduction(ctx), ksize,
+ stride, padding_);
auto pooled = XlaHelpers::ConvertElementType(b, reduce, input_type(0));
ctx->SetOutput(0,
PostProcessOutput(ctx, pooled, input_type(0), input_shape));
@@ -127,6 +132,7 @@ class PoolingOp : public XlaOpKernel {
xla::Padding padding_;
TensorFormat data_format_ = FORMAT_NHWC;
DataType reduction_type_;
+ xla::PrimitiveType xla_reduction_type_;
};
class MaxPoolOp : public PoolingOp {
@@ -136,7 +142,7 @@ class MaxPoolOp : public PoolingOp {
/*reduction_type=*/ctx->input_type(0)) {}
xla::XlaOp InitValue(xla::XlaBuilder* b) override {
- return XlaHelpers::MinValue(b, reduction_type_);
+ return xla::MinValue(b, xla_reduction_type_);
}
const xla::XlaComputation* Reduction(XlaOpKernelContext* ctx) override {
@@ -190,7 +196,7 @@ static xla::XlaOp AvgPoolDivideByCount(
auto divisor =
XlaHelpers::IntegerLiteral(ctx->builder(), dtype, window_size);
- return ctx->builder()->Div(output, divisor);
+ return xla::Div(output, divisor);
} else {
// For SAME padding, the padding shouldn't be included in the
// counts. We use another ReduceWindow to find the right counts.
@@ -212,18 +218,18 @@ static xla::XlaOp AvgPoolDivideByCount(
// Build a matrix of all 1s, with the same width/height as the input.
const DataType accumulation_type = XlaHelpers::SumAccumulationType(dtype);
- auto ones = ctx->builder()->Broadcast(
+ auto ones = xla::Broadcast(
XlaHelpers::One(ctx->builder(), accumulation_type), input_dim_sizes);
// Perform a ReduceWindow with the same window size, strides, and padding
// to count the number of contributions to each result element.
- auto reduce = ctx->builder()->ReduceWindow(
+ auto reduce = xla::ReduceWindow(
ones, XlaHelpers::Zero(ctx->builder(), accumulation_type),
*ctx->GetOrCreateAdd(accumulation_type), window_ksize, window_stride,
xla::Padding::kSame);
auto counts = XlaHelpers::ConvertElementType(ctx->builder(), reduce, dtype);
- return ctx->builder()->Div(output, counts, window_dims);
+ return xla::Div(output, counts, window_dims);
}
}
@@ -235,7 +241,7 @@ class AvgPoolOp : public PoolingOp {
XlaHelpers::SumAccumulationType(ctx->input_type(0))) {}
xla::XlaOp InitValue(xla::XlaBuilder* b) override {
- return XlaHelpers::Zero(b, reduction_type_);
+ return xla::Zero(b, xla_reduction_type_);
}
const xla::XlaComputation* Reduction(XlaOpKernelContext* ctx) override {
@@ -347,9 +353,9 @@ class MaxPoolGradOp : public XlaOpKernel {
xla::XlaOp init_value = XlaHelpers::Zero(ctx->builder(), input_type(2));
auto select = CreateScalarGeComputation(element_type, ctx->builder());
auto scatter = CreateScalarAddComputation(element_type, ctx->builder());
- xla::XlaOp gradients = ctx->builder()->SelectAndScatter(
- input, select, ksize_, stride_, xla_padding, out_backprop, init_value,
- scatter);
+ xla::XlaOp gradients =
+ xla::SelectAndScatter(input, select, ksize_, stride_, xla_padding,
+ out_backprop, init_value, scatter);
ctx->SetOutput(0, gradients);
}
@@ -485,12 +491,12 @@ class AvgPoolGradOp : public XlaOpKernel {
}
auto zero = XlaHelpers::Zero(b, dtype);
- auto padded_gradients = b->Pad(out_backprop_div, zero, padding_config);
+ auto padded_gradients = xla::Pad(out_backprop_div, zero, padding_config);
// in_backprop = padded_gradients <conv> ones
std::vector<int64> ones(num_dims(), 1LL);
auto accumulation_type = XlaHelpers::SumAccumulationType(dtype);
- auto in_backprop = b->ReduceWindow(
+ auto in_backprop = xla::ReduceWindow(
XlaHelpers::ConvertElementType(b, padded_gradients, accumulation_type),
XlaHelpers::Zero(b, accumulation_type),
*ctx->GetOrCreateAdd(accumulation_type), ksize_,
@@ -614,58 +620,61 @@ class MaxPoolGradGradOp : public XlaOpKernel {
auto b = ctx->builder();
- auto sixteen = b->ConstantR0<uint32>(16);
+ auto sixteen = xla::ConstantR0<uint32>(b, 16);
// in (f32) -> round to bf16 -> f32 for correct bitwidth -> 16-high-bit u32
- auto in_hi = b->BitcastConvertType(
- b->ConvertElementType(b->ConvertElementType(input, xla::BF16),
- xla::F32),
+ auto in_hi = xla::BitcastConvertType(
+ xla::ConvertElementType(xla::ConvertElementType(input, xla::BF16),
+ xla::F32),
xla::U32);
- auto bp_int = b->BitcastConvertType(out_backprop, xla::U32);
- auto bp_hi = b->ShiftRightLogical(bp_int, sixteen);
- auto bp_lo = b->ShiftRightLogical(b->ShiftLeft(bp_int, sixteen), sixteen);
- auto in_hi_bp_hi = b->Add(in_hi, bp_hi); // Want an unsigned add.
- auto in_hi_bp_lo = b->Add(in_hi, bp_lo); // Want an unsigned add.
-
- auto init_value = XlaHelpers::MinValue(b, DT_FLOAT);
+ auto bp_int = xla::BitcastConvertType(out_backprop, xla::U32);
+ auto bp_hi = xla::ShiftRightLogical(bp_int, sixteen);
+ auto bp_lo =
+ xla::ShiftRightLogical(xla::ShiftLeft(bp_int, sixteen), sixteen);
+ auto in_hi_bp_hi = xla::Add(in_hi, bp_hi); // Want an unsigned add.
+ auto in_hi_bp_lo = xla::Add(in_hi, bp_lo); // Want an unsigned add.
+
+ auto init_value = xla::MinValue(b, xla::F32);
// We will reduce by taking the maximal value up to 16 bits (ignoring the lo
// 16 bits of packed-in hi/lo backprop value).
auto rb = b->CreateSubBuilder("GreaterOrEqOf_ByFirst16Bits");
{
// F32 parameters to satisfy lowering type restriction for reduce opcode.
const xla::Shape scalar = xla::ShapeUtil::MakeShape(xla::F32, {});
- auto lhs = rb->Parameter(0, scalar, "lhs");
- auto rhs = rb->Parameter(1, scalar, "rhs");
- auto sixteen = rb->ConstantR0<int32>(16);
- auto lhs_criteria = rb->ShiftLeft(
- rb->ShiftRightLogical(rb->BitcastConvertType(lhs, xla::S32), sixteen),
- sixteen);
- auto rhs_criteria = rb->ShiftLeft(
- rb->ShiftRightLogical(rb->BitcastConvertType(rhs, xla::S32), sixteen),
- sixteen);
+ auto lhs = xla::Parameter(rb.get(), 0, scalar, "lhs");
+ auto rhs = xla::Parameter(rb.get(), 1, scalar, "rhs");
+ auto sixteen = xla::ConstantR0<int32>(rb.get(), 16);
+ auto lhs_criteria =
+ xla::ShiftLeft(xla::ShiftRightLogical(
+ xla::BitcastConvertType(lhs, xla::S32), sixteen),
+ sixteen);
+ auto rhs_criteria =
+ xla::ShiftLeft(xla::ShiftRightLogical(
+ xla::BitcastConvertType(rhs, xla::S32), sixteen),
+ sixteen);
// Must use a F32 comparison, because S32 would not work for negatives.
- rb->Select(rb->Ge(rb->BitcastConvertType(lhs_criteria, xla::F32),
- rb->BitcastConvertType(rhs_criteria, xla::F32)),
- lhs, rhs);
+ xla::Select(xla::Ge(xla::BitcastConvertType(lhs_criteria, xla::F32),
+ xla::BitcastConvertType(rhs_criteria, xla::F32)),
+ lhs, rhs);
}
auto reduce = rb->BuildAndNoteError();
xla::Padding xla_padding =
(padding_ == VALID) ? xla::Padding::kValid : xla::Padding::kSame;
auto pooled_hi =
- b->ReduceWindow(b->BitcastConvertType(in_hi_bp_hi, xla::F32),
- init_value, reduce, ksize_, stride_, xla_padding);
+ xla::ReduceWindow(xla::BitcastConvertType(in_hi_bp_hi, xla::F32),
+ init_value, reduce, ksize_, stride_, xla_padding);
auto pooled_lo =
- b->ReduceWindow(b->BitcastConvertType(in_hi_bp_lo, xla::F32),
- init_value, reduce, ksize_, stride_, xla_padding);
+ xla::ReduceWindow(xla::BitcastConvertType(in_hi_bp_lo, xla::F32),
+ init_value, reduce, ksize_, stride_, xla_padding);
auto grads_hi =
- b->ShiftLeft(b->BitcastConvertType(pooled_hi, xla::U32), sixteen);
- auto grads_lo = b->ShiftRightLogical(
- b->ShiftLeft(b->BitcastConvertType(pooled_lo, xla::U32), sixteen),
+ xla::ShiftLeft(xla::BitcastConvertType(pooled_hi, xla::U32), sixteen);
+ auto grads_lo = xla::ShiftRightLogical(
+ xla::ShiftLeft(xla::BitcastConvertType(pooled_lo, xla::U32), sixteen),
sixteen);
- auto grads = b->Add(grads_hi, grads_lo); // Want an unsigned add.
+ auto grads = xla::Add(grads_hi, grads_lo); // Want an unsigned add.
xla::PrimitiveType element_type;
OP_REQUIRES_OK(ctx, DataTypeToPrimitiveType(input_type(2), &element_type));
- ctx->SetOutput(0, b->BitcastConvertType(grads, element_type));
+ ctx->SetOutput(0, xla::BitcastConvertType(grads, element_type));
}
protected:
diff --git a/tensorflow/compiler/tf2xla/kernels/qr_op.cc b/tensorflow/compiler/tf2xla/kernels/qr_op.cc
new file mode 100644
index 0000000000..de9068a640
--- /dev/null
+++ b/tensorflow/compiler/tf2xla/kernels/qr_op.cc
@@ -0,0 +1,47 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#include "tensorflow/compiler/tf2xla/lib/qr.h"
+#include "tensorflow/compiler/tf2xla/xla_op_kernel.h"
+#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
+
+namespace tensorflow {
+namespace {
+
+class QROp : public XlaOpKernel {
+ public:
+ explicit QROp(OpKernelConstruction* ctx) : XlaOpKernel(ctx) {
+ bool full_matrices;
+ OP_REQUIRES_OK(ctx, ctx->GetAttr("full_matrices", &full_matrices));
+ OP_REQUIRES(
+ ctx, full_matrices,
+ errors::Unimplemented("full_matrices=False case of QR decomposition is "
+ "not implemented in TF/XLA"));
+ }
+ void Compile(XlaOpKernelContext* ctx) override {
+ auto result = QRDecomposition(ctx->Input(0));
+ if (!result.ok()) {
+ ctx->SetStatus(result.status());
+ return;
+ }
+ ctx->SetOutput(0, result.ValueOrDie().q);
+ ctx->SetOutput(1, result.ValueOrDie().r);
+ }
+};
+
+REGISTER_XLA_OP(Name("Qr").TypeConstraint("T", kFloatTypes), QROp);
+
+} // namespace
+} // namespace tensorflow
diff --git a/tensorflow/compiler/tf2xla/kernels/quantize_and_dequantize_op.cc b/tensorflow/compiler/tf2xla/kernels/quantize_and_dequantize_op.cc
index 661cd5923e..e88221e4f4 100644
--- a/tensorflow/compiler/tf2xla/kernels/quantize_and_dequantize_op.cc
+++ b/tensorflow/compiler/tf2xla/kernels/quantize_and_dequantize_op.cc
@@ -13,10 +13,13 @@ See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
+#include "tensorflow/compiler/tf2xla/type_util.h"
#include "tensorflow/compiler/tf2xla/xla_helpers.h"
#include "tensorflow/compiler/tf2xla/xla_op_kernel.h"
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
#include "tensorflow/compiler/xla/client/lib/arithmetic.h"
+#include "tensorflow/compiler/xla/client/lib/constants.h"
+#include "tensorflow/compiler/xla/client/xla_client/xla_builder.h"
#include "tensorflow/core/platform/macros.h"
namespace tensorflow {
@@ -28,82 +31,115 @@ class QuantizeAndDequantizeOp : public XlaOpKernel {
: XlaOpKernel(ctx) {
OP_REQUIRES_OK(ctx, ctx->GetAttr("signed_input", &signed_input_));
OP_REQUIRES_OK(ctx, ctx->GetAttr("range_given", &range_given_));
- OP_REQUIRES_OK(ctx, ctx->GetAttr("num_bits", &num_bits_));
- OP_REQUIRES(ctx, num_bits_ > 0 && num_bits_ < (signed_input_ ? 62 : 63),
- errors::InvalidArgument("num_bits is out of range: ", num_bits_,
- " with signed_input_ ", signed_input_));
}
void Compile(XlaOpKernelContext* ctx) override {
xla::XlaOp input = ctx->Input(0);
const DataType data_type = ctx->input_type(0);
- // Comments taken from semantics description at
- // https://www.tensorflow.org/versions/r1.0/api_docs/cc/class/tensorflow/ops/quantize-and-dequantize
- //
- // ... we find m such that
- //
- // m = max(abs(input_min), abs(input_max)) if range_given is true,
- // m = max(abs(min_elem(input)),
- // abs(max_elem(input))) otherwise.
+ xla::PrimitiveType xla_type;
+ OP_REQUIRES_OK(ctx, DataTypeToPrimitiveType(data_type, &xla_type));
+
xla::XlaBuilder* b = ctx->builder();
- xla::XlaOp input_min, input_max;
+
+ // The implementation follows
+ // tensorflow/core/kernels/quantize_and_dequantize_op.h closely.
+ xla::XlaOp min_range, max_range;
if (range_given_) {
- double input_min_value, input_max_value;
- OP_REQUIRES_OK(ctx, ctx->ConstantInputAsFloatScalar(1, &input_min_value));
- OP_REQUIRES_OK(ctx, ctx->ConstantInputAsFloatScalar(2, &input_max_value));
- input_min = XlaHelpers::FloatLiteral(b, data_type, input_min_value);
- input_max = XlaHelpers::FloatLiteral(b, data_type, input_max_value);
+ min_range = ctx->Input(1);
+ max_range = ctx->Input(2);
} else {
const xla::XlaComputation* fmax = ctx->GetOrCreateMax(data_type);
const xla::XlaComputation* fmin = ctx->GetOrCreateMin(data_type);
- input_min =
- b->ReduceAll(input, XlaHelpers::MaxValue(b, data_type), *fmin);
- input_max =
- b->ReduceAll(input, XlaHelpers::MinValue(b, data_type), *fmax);
+ min_range = ReduceAll(input, xla::MaxValue(b, xla_type), *fmin);
+ max_range = ReduceAll(input, xla::MinValue(b, xla_type), *fmax);
}
- xla::XlaOp m = b->Max(b->Abs(input_min), b->Abs(input_max));
-
- // Next, we choose our fixed-point quantization buckets, [min_fixed,
- // max_fixed]. If signed_input is true, this is
- //
- // [min_fixed, max_fixed ] = [-((1 << (num_bits - 1)) - 1),
- // (1 << (num_bits - 1)) - 1].
- //
- // Otherwise, if signed_input is false, the fixed-point range is
- //
- // [min_fixed, max_fixed] = [0, (1 << num_bits) - 1].
- int64 min_fixed, max_fixed;
+
+ xla::XlaOp num_bits;
+ if (num_bits_ < 0) {
+ OP_REQUIRES(
+ ctx, ctx->num_inputs() == 4,
+ errors::Internal("Expected 4 inputs to QuantizeAndDequantize"));
+ num_bits = ctx->Input(3);
+ } else {
+ num_bits = xla::ConstantR0<int32>(b, num_bits_);
+ }
+
+ const xla::XlaOp zero = XlaHelpers::Zero(b, data_type);
+ const xla::XlaOp one = XlaHelpers::One(b, data_type);
+ const xla::XlaOp two = XlaHelpers::FloatLiteral(b, data_type, 2.0);
+ const xla::XlaOp half = XlaHelpers::FloatLiteral(b, data_type, 0.5);
+
+ // Calculate the range for the simulated integer quantization:
+ // e.g. [-128,127] for signed = true, num_bits = 8,
+ // or [0, 255] for signed = false, num_bits = 8.
+ // We do this in floating point for hardware that does not have 64-bit
+ // integer support.
+ xla::XlaOp min_quantized, max_quantized;
if (signed_input_) {
- min_fixed = -((1LL << (num_bits_ - 1)) - 1);
- max_fixed = (1LL << (num_bits_ - 1)) - 1;
+ min_quantized =
+ -Pow(two, ConvertElementType(num_bits - xla::ConstantR0<int32>(b, 1),
+ xla_type));
+ max_quantized =
+ Pow(two, ConvertElementType(num_bits - xla::ConstantR0<int32>(b, 1),
+ xla_type)) -
+ one;
} else {
- min_fixed = 0;
- max_fixed = (1LL << num_bits_) - 1;
+ min_quantized = zero;
+ max_quantized = Pow(two, ConvertElementType(num_bits, xla_type)) - one;
}
- // From this we compute our scaling factor, s:
- //
- // s = (max_fixed - min_fixed) / (2 * m).
- xla::XlaOp s =
- b->Div(XlaHelpers::FloatLiteral(b, data_type, max_fixed - min_fixed),
- b->Mul(XlaHelpers::FloatLiteral(b, data_type, 2.0), m));
+ // Determine the maximum scaling factor that would scale
+ // [min_range, max_range] to not exceed [min_quantized, max_quantized],
+ // while keeping 0 unchanged.
+ xla::XlaOp scale_from_min_side =
+ Select(Gt(min_quantized * min_range, zero), min_quantized / min_range,
+ xla::MaxFiniteValue(b, xla_type));
+ xla::XlaOp scale_from_max_side =
+ Select(Gt(max_quantized * max_range, zero), max_quantized / max_range,
+ xla::MaxFiniteValue(b, xla_type));
- // Now we can quantize and dequantize the elements of our tensor. An element
- // e is transformed into e':
- //
- // e' = (e * s).round_to_nearest() / s.
- xla::XlaOp result = b->Div(b->Round(b->Mul(input, s)), s);
+ // Note: Avoids changing the side of the range that determines scale.
+ xla::XlaOp cond = Lt(scale_from_min_side, scale_from_max_side);
+ xla::XlaOp scale = Select(cond, scale_from_min_side, scale_from_max_side);
+ xla::XlaOp inverse_scale =
+ Select(cond, min_range / min_quantized, max_range / max_quantized);
+ min_range = Select(cond, min_range, min_quantized * inverse_scale);
+ max_range = Select(cond, max_quantized * inverse_scale, max_range);
+ if (range_given_) {
+ // Note: The clamping here is to avoid overflow in the quantized type.
+ // The semantics of the op does not guarantee to clamp to the specified
+ // min_range and max_range - because we may have changed either min_range
+ // or max_range.
+ // No need to clamp to min_range and max_range if range_given_ == false as
+ // in that case they were measured from the tensor.
+ input = Clamp(min_range, input, max_range);
+ }
+ xla::XlaOp result =
+ Floor((input - min_range) * scale + half) * inverse_scale + min_range;
ctx->SetOutput(0, result);
}
- int64 num_bits_;
+ protected:
+ int64 num_bits_ = -1;
bool signed_input_;
bool range_given_;
};
-REGISTER_XLA_OP(Name("QuantizeAndDequantizeV2"), QuantizeAndDequantizeOp);
+class QuantizeAndDequantizeV2Op : public QuantizeAndDequantizeOp {
+ public:
+ explicit QuantizeAndDequantizeV2Op(OpKernelConstruction* ctx)
+ : QuantizeAndDequantizeOp(ctx) {
+ OP_REQUIRES_OK(ctx, ctx->GetAttr("num_bits", &num_bits_));
+ OP_REQUIRES(ctx, num_bits_ > 0 && num_bits_ < (signed_input_ ? 62 : 63),
+ errors::InvalidArgument("num_bits is out of range: ", num_bits_,
+ " with signed_input_ ", signed_input_));
+ }
+};
+
+REGISTER_XLA_OP(Name("QuantizeAndDequantizeV2"), QuantizeAndDequantizeV2Op);
+REGISTER_XLA_OP(Name("QuantizeAndDequantizeV3"), QuantizeAndDequantizeOp);
} // namespace
} // namespace tensorflow
diff --git a/tensorflow/compiler/tf2xla/kernels/random_ops.cc b/tensorflow/compiler/tf2xla/kernels/random_ops.cc
index be83834e86..607cad798a 100644
--- a/tensorflow/compiler/tf2xla/kernels/random_ops.cc
+++ b/tensorflow/compiler/tf2xla/kernels/random_ops.cc
@@ -26,6 +26,8 @@ limitations under the License.
#include "tensorflow/compiler/tf2xla/xla_op_kernel.h"
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
#include "tensorflow/compiler/xla/client/lib/arithmetic.h"
+#include "tensorflow/compiler/xla/client/lib/numeric.h"
+#include "tensorflow/compiler/xla/client/xla_client/xla_builder.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
@@ -46,8 +48,8 @@ class RandomUniformOp : public XlaOpKernel {
OP_REQUIRES_OK(ctx, TensorShapeToXLAShape(dtype, shape, &xla_shape));
xla::XlaBuilder* b = ctx->builder();
- xla::XlaOp result = b->RngUniform(XlaHelpers::Zero(b, dtype),
- XlaHelpers::One(b, dtype), xla_shape);
+ xla::XlaOp result = xla::RngUniform(XlaHelpers::Zero(b, dtype),
+ XlaHelpers::One(b, dtype), xla_shape);
ctx->SetOutput(0, result);
}
@@ -72,57 +74,121 @@ class RandomShuffleOp : public XlaOpKernel {
for (tensorflow::TensorShapeDim dimension : input_shape) {
num_elements *= dimension.size;
}
+
if (num_elements <= 1 || n <= 1) {
// No shuffling is required, so copy input directly to output
ctx->SetOutput(0, input);
- } else {
- // Generate the random swaps for the indices.
- auto swaps_shape = xla::ShapeUtil::MakeShape(xla::S32, {n});
- auto swaps =
- builder->RngUniform(builder->ConstantR0<int32>(0),
- builder->ConstantR0<int32>(n), swaps_shape);
-
- // Generate range(n) as the initial value for the indices to be swapped.
- xla::XlaOp indices;
- TF_CHECK_OK(XlaHelpers::Iota(builder, DataType::DT_INT32, n, &indices));
-
- // Swap the indices at i and swaps[i].
- auto swap_body_fn = [&](xla::XlaOp i,
- gtl::ArraySlice<xla::XlaOp> loop_vars,
- xla::XlaBuilder* builder)
- -> xla::StatusOr<std::vector<xla::XlaOp>> {
- auto swaps = loop_vars[0];
- auto indices = loop_vars[1];
- i = builder->Reshape(i, {1});
- // temp = indices[i]
- auto temp = builder->DynamicSlice(indices, i, {1});
- // swap_index = swaps[i]
- auto swap_index = builder->DynamicSlice(swaps, i, {1});
- // swap_value = indices[swaps[i]]
- auto swap_value = builder->DynamicSlice(indices, swap_index, {1});
- // indices[i] = indices[swaps[i]]
- indices = builder->DynamicUpdateSlice(indices, swap_value, i);
- // indices[swaps[i]] = temp
- indices = builder->DynamicUpdateSlice(indices, temp, swap_index);
- return std::vector<xla::XlaOp>{swaps, indices};
- };
- // for i in range(n):
- auto swap_loop_result =
- XlaForEachIndex(n, xla::S32, swap_body_fn, {swaps, indices},
- "indices_swap_loop", builder)
- .ValueOrDie();
- auto swapped_indices = swap_loop_result[1];
-
- // Gather the data using the swapped indices as the shuffled order.
- auto indices_tensor_shape = TensorShape({n});
- DataType type = ctx->expected_output_dtype(0);
- xla::XlaOp gather;
- OP_REQUIRES_OK(ctx, XlaGather(input, input_shape, swapped_indices,
- indices_tensor_shape,
- /*axis=*/0, /*indices_are_nd=*/false, type,
- DT_INT32, builder, &gather));
- ctx->SetOutput(0, gather);
+ return;
+ }
+
+ if (input_shape.dims() == 1) {
+ // For R1s, shuffle values by sorting instead of the obvious Fisher-Yates
+ // algorithm. Fisher-Yates is simple to implement and correct, but not
+ // easily parallelizable. For a sufficiently parallel architecture, it is
+ // faster to sort many times, than Fisher-Yates shuffle once.
+
+ // Shuffle values by assigning each value a random key and sorting the
+ // keys. Keys can collide causing detectable patterns in the shuffled
+ // output. Collisions translates into more ascending sub-sequences in the
+ // shuffled output than would be expected by chance. To avoid collisions,
+ // the number of possible key values must be sufficiently large.
+
+ // How are more than 2^32 keys created? In each loop iteration, the
+ // algorithm sorts by random keys. Conceptually, the earlier iterations
+ // are sorting on the lower-order bits of larger keys that are never
+ // actually assembled.
+
+ // The expected number of collisions is n - d + d(1 - 1/d)^n, where d is
+ // the number of possible keys and n is the number of values. If d = n^2,
+ // then the limit as n goes to infinity is 1/2. If d = n^3, then the limit
+ // as n goes to infinity is zero.
+
+ // This implementation ensures that the key-space is greater than or equal
+ // to the cube of the number of values. The risk of collisions can be
+ // further reduced by increasing Exponent at the expense of
+ // performance.
+
+ // For Exponent = 2, the expected number of collisions per shuffle is
+ // maximized at n = floor((2^32-1)^(1/2)) = 65535 where the expectation is
+ // about 1/2.
+
+ // For Exponent = 3, the expected number of collisions per shuffle is
+ // maximized at n = floor((2^32-1)^(1/3)) = 1625 where the expectation is
+ // about 1/3255.
+
+ // For Exponent = 4, the expected number of collisions per shuffle is
+ // maximized at n = floor((2^32-1)^(1/4)) = 255 where the expectation is
+ // about 1/132622.
+ constexpr int Exponent = 3;
+ const int rounds = static_cast<int>(
+ std::ceil(Exponent * std::log(num_elements) / std::log(kuint32max)));
+
+ const xla::Shape key_shape =
+ xla::ShapeUtil::MakeShape(xla::U32, {num_elements});
+ xla::XlaOp zero = xla::ConstantR0(builder, 0U);
+
+ // Unfortunately, xla::RngUniform gives values in the half open interval
+ // rather than the closed interval, so instead of 2^32 possible keys there
+ // are only 2^32 - 1 (kuint32max).
+ xla::XlaOp max_value = xla::ConstantR0(builder, kuint32max);
+
+ xla::XlaOp curr = input;
+ for (int i = 0; i < rounds; ++i) {
+ xla::XlaOp keys = xla::RngUniform(zero, max_value, key_shape);
+ xla::XlaOp sorted = xla::Sort(keys, curr);
+ curr = xla::GetTupleElement(sorted, 1);
+ }
+
+ ctx->SetOutput(0, curr);
+ return;
}
+
+ // The Fisher-Yates algorithm.
+
+ // Generate the random swaps for the indices.
+ auto swaps_shape = xla::ShapeUtil::MakeShape(xla::S32, {n});
+ auto swaps =
+ xla::RngUniform(xla::ConstantR0<int32>(builder, 0),
+ xla::ConstantR0<int32>(builder, n), swaps_shape);
+
+ // Generate range(n) as the initial value for the indices to be swapped.
+ xla::XlaOp indices = xla::Iota(builder, xla::S32, n);
+
+ // Swap the indices at i and swaps[i].
+ auto swap_body_fn = [&](xla::XlaOp i, gtl::ArraySlice<xla::XlaOp> loop_vars,
+ xla::XlaBuilder* builder)
+ -> xla::StatusOr<std::vector<xla::XlaOp>> {
+ auto swaps = loop_vars[0];
+ auto indices = loop_vars[1];
+ i = xla::Reshape(i, {1});
+ // temp = indices[i]
+ auto temp = xla::DynamicSlice(indices, i, {1});
+ // swap_index = swaps[i]
+ auto swap_index = xla::DynamicSlice(swaps, i, {1});
+ // swap_value = indices[swaps[i]]
+ auto swap_value = xla::DynamicSlice(indices, swap_index, {1});
+ // indices[i] = indices[swaps[i]]
+ indices = xla::DynamicUpdateSlice(indices, swap_value, i);
+ // indices[swaps[i]] = temp
+ indices = xla::DynamicUpdateSlice(indices, temp, swap_index);
+ return std::vector<xla::XlaOp>{swaps, indices};
+ };
+ // for i in range(n):
+ auto swap_loop_result =
+ XlaForEachIndex(n, xla::S32, swap_body_fn, {swaps, indices},
+ "indices_swap_loop", builder)
+ .ValueOrDie();
+ auto swapped_indices = swap_loop_result[1];
+
+ // Gather the data using the swapped indices as the shuffled order.
+ auto indices_tensor_shape = TensorShape({n});
+ DataType type = ctx->expected_output_dtype(0);
+ xla::XlaOp gather;
+ OP_REQUIRES_OK(ctx, XlaGather(input, input_shape, swapped_indices,
+ indices_tensor_shape,
+ /*axis=*/0, /*indices_are_nd=*/false, type,
+ DT_INT32, builder, &gather));
+ ctx->SetOutput(0, gather);
}
private:
@@ -153,7 +219,7 @@ class RandomUniformIntOp : public XlaOpKernel {
auto minval = ctx->Input(1);
auto maxval = ctx->Input(2);
- ctx->SetOutput(0, ctx->builder()->RngUniform(minval, maxval, xla_shape));
+ ctx->SetOutput(0, xla::RngUniform(minval, maxval, xla_shape));
}
private:
@@ -179,8 +245,8 @@ class RandomStandardNormalOp : public XlaOpKernel {
xla::XlaBuilder* b = ctx->builder();
// Normal distribution with a mean of 0 and a standard deviation of 1:
- xla::XlaOp result = b->RngNormal(XlaHelpers::Zero(b, dtype),
- XlaHelpers::One(b, dtype), xla_shape);
+ xla::XlaOp result = xla::RngNormal(XlaHelpers::Zero(b, dtype),
+ XlaHelpers::One(b, dtype), xla_shape);
ctx->SetOutput(0, result);
}
@@ -209,10 +275,8 @@ class TruncatedNormalOp : public XlaOpKernel {
xla::XlaOp one = XlaHelpers::FloatLiteral(b, dtype, 1.0);
xla::XlaOp min_positive =
XlaHelpers::FloatLiteral(b, dtype, std::numeric_limits<float>::min());
- auto uniform = b->RngUniform(min_positive, one, xla_shape);
- auto truncated_normal_or_status = TruncatedNormal(dtype, uniform, b);
- OP_REQUIRES_OK(ctx, truncated_normal_or_status.status());
- ctx->SetOutput(0, truncated_normal_or_status.ValueOrDie());
+ auto uniform = xla::RngUniform(min_positive, one, xla_shape);
+ ctx->SetOutput(0, TruncatedNormal(uniform));
}
};
@@ -221,5 +285,5 @@ REGISTER_XLA_OP(Name("TruncatedNormal")
.TypeConstraint("dtype", DT_FLOAT),
TruncatedNormalOp);
-} // anonymous namespace
+} // namespace
} // namespace tensorflow
diff --git a/tensorflow/compiler/tf2xla/kernels/reduce_window_op.cc b/tensorflow/compiler/tf2xla/kernels/reduce_window_op.cc
index 08894489ac..76bd1e62aa 100644
--- a/tensorflow/compiler/tf2xla/kernels/reduce_window_op.cc
+++ b/tensorflow/compiler/tf2xla/kernels/reduce_window_op.cc
@@ -19,6 +19,7 @@ limitations under the License.
#include "tensorflow/compiler/tf2xla/xla_compiler.h"
#include "tensorflow/compiler/tf2xla/xla_op_kernel.h"
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
+#include "tensorflow/compiler/xla/client/xla_client/xla_builder.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/op_kernel.h"
@@ -98,10 +99,10 @@ class ReduceWindowOp : public XlaOpKernel {
{
std::unique_ptr<xla::XlaBuilder> cb =
builder->CreateSubBuilder("wrapper");
- auto x = cb->Parameter(0, scalar_shape, "x");
- auto y = cb->Parameter(1, scalar_shape, "y");
- auto outputs = cb->Call(*reducer.computation, {x, y});
- cb->GetTupleElement(outputs, 0);
+ auto x = xla::Parameter(cb.get(), 0, scalar_shape, "x");
+ auto y = xla::Parameter(cb.get(), 1, scalar_shape, "y");
+ auto outputs = xla::Call(cb.get(), *reducer.computation, {x, y});
+ xla::GetTupleElement(outputs, 0);
xla::StatusOr<xla::XlaComputation> result = cb->Build();
OP_REQUIRES_OK(context, result.status());
wrapper = std::move(result.ValueOrDie());
@@ -112,7 +113,7 @@ class ReduceWindowOp : public XlaOpKernel {
padding[i] = {padding_low_[i], padding_high_[i]};
}
- xla::XlaOp output = builder->ReduceWindowWithGeneralPadding(
+ xla::XlaOp output = xla::ReduceWindowWithGeneralPadding(
context->Input(0), context->Input(1), wrapper, window_dimensions_,
window_strides_, padding);
context->SetOutput(0, output);
diff --git a/tensorflow/compiler/tf2xla/kernels/reduction_ops.cc b/tensorflow/compiler/tf2xla/kernels/reduction_ops.cc
index 0f42563779..be7f2bce8c 100644
--- a/tensorflow/compiler/tf2xla/kernels/reduction_ops.cc
+++ b/tensorflow/compiler/tf2xla/kernels/reduction_ops.cc
@@ -19,7 +19,9 @@ limitations under the License.
#include "tensorflow/compiler/tf2xla/type_util.h"
#include "tensorflow/compiler/tf2xla/xla_helpers.h"
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
-#include "tensorflow/compiler/xla/literal_util.h"
+#include "tensorflow/compiler/xla/client/lib/constants.h"
+#include "tensorflow/compiler/xla/client/xla_client/xla_builder.h"
+#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/core/framework/kernel_def_builder.h"
namespace tensorflow {
@@ -31,11 +33,11 @@ class SumOp : public XlaReductionOp {
: XlaReductionOp(ctx,
XlaHelpers::SumAccumulationType(ctx->input_type(0))) {}
xla::XlaOp InitialValue(xla::XlaBuilder* builder) override {
- return XlaHelpers::Zero(builder, reduction_type_);
+ return xla::Zero(builder, xla_reduction_type_);
}
void BuildReducer(xla::XlaBuilder* builder, const xla::XlaOp& scalar_lhs,
const xla::XlaOp& scalar_rhs) override {
- builder->Add(scalar_lhs, scalar_rhs);
+ xla::Add(scalar_lhs, scalar_rhs);
}
};
@@ -48,12 +50,12 @@ class ProdOp : public XlaReductionOp {
XlaHelpers::SumAccumulationType(ctx->input_type(0))) {}
xla::XlaOp InitialValue(xla::XlaBuilder* builder) override {
- return XlaHelpers::One(builder, reduction_type_);
+ return xla::One(builder, xla_reduction_type_);
}
void BuildReducer(xla::XlaBuilder* builder, const xla::XlaOp& scalar_lhs,
const xla::XlaOp& scalar_rhs) override {
- builder->Mul(scalar_lhs, scalar_rhs);
+ xla::Mul(scalar_lhs, scalar_rhs);
}
};
@@ -66,12 +68,12 @@ class MinOp : public XlaReductionOp {
: XlaReductionOp(ctx, ctx->input_type(0)) {}
xla::XlaOp InitialValue(xla::XlaBuilder* builder) override {
- return XlaHelpers::MaxValue(builder, reduction_type_);
+ return xla::MaxValue(builder, xla_reduction_type_);
}
void BuildReducer(xla::XlaBuilder* builder, const xla::XlaOp& scalar_lhs,
const xla::XlaOp& scalar_rhs) override {
- builder->Min(scalar_lhs, scalar_rhs);
+ xla::Min(scalar_lhs, scalar_rhs);
}
};
@@ -83,12 +85,12 @@ class MaxOp : public XlaReductionOp {
: XlaReductionOp(ctx, ctx->input_type(0)) {}
xla::XlaOp InitialValue(xla::XlaBuilder* builder) override {
- return XlaHelpers::MinValue(builder, reduction_type_);
+ return xla::MinValue(builder, xla_reduction_type_);
}
void BuildReducer(xla::XlaBuilder* builder, const xla::XlaOp& scalar_lhs,
const xla::XlaOp& scalar_rhs) override {
- builder->Max(scalar_lhs, scalar_rhs);
+ xla::Max(scalar_lhs, scalar_rhs);
}
};
@@ -101,11 +103,11 @@ class MeanOp : public XlaReductionOp {
XlaHelpers::SumAccumulationType(ctx->input_type(0))) {}
xla::XlaOp InitialValue(xla::XlaBuilder* builder) override {
- return XlaHelpers::Zero(builder, reduction_type_);
+ return xla::Zero(builder, xla_reduction_type_);
}
void BuildReducer(xla::XlaBuilder* builder, const xla::XlaOp& scalar_lhs,
const xla::XlaOp& scalar_rhs) override {
- builder->Add(scalar_lhs, scalar_rhs);
+ xla::Add(scalar_lhs, scalar_rhs);
}
xla::XlaOp BuildFinalizer(xla::XlaBuilder* builder,
@@ -113,7 +115,7 @@ class MeanOp : public XlaReductionOp {
int64 num_elements_reduced) override {
auto divisor = XlaHelpers::IntegerLiteral(builder, input_type(0),
num_elements_reduced);
- return builder->Div(reduce_output, divisor);
+ return reduce_output / divisor;
}
};
@@ -126,12 +128,12 @@ class AllOp : public XlaReductionOp {
: XlaReductionOp(ctx, ctx->input_type(0)) {}
xla::XlaOp InitialValue(xla::XlaBuilder* builder) override {
- return builder->ConstantR0<bool>(true);
+ return xla::ConstantR0<bool>(builder, true);
}
void BuildReducer(xla::XlaBuilder* builder, const xla::XlaOp& scalar_lhs,
const xla::XlaOp& scalar_rhs) override {
- builder->And(scalar_lhs, scalar_rhs);
+ xla::And(scalar_lhs, scalar_rhs);
}
};
@@ -143,12 +145,12 @@ class AnyOp : public XlaReductionOp {
: XlaReductionOp(ctx, ctx->input_type(0)) {}
xla::XlaOp InitialValue(xla::XlaBuilder* builder) override {
- return builder->ConstantR0<bool>(false);
+ return xla::ConstantR0<bool>(builder, false);
}
void BuildReducer(xla::XlaBuilder* builder, const xla::XlaOp& scalar_lhs,
const xla::XlaOp& scalar_rhs) override {
- builder->Or(scalar_lhs, scalar_rhs);
+ xla::Or(scalar_lhs, scalar_rhs);
}
};
diff --git a/tensorflow/compiler/tf2xla/kernels/reduction_ops.h b/tensorflow/compiler/tf2xla/kernels/reduction_ops.h
index 2ecfb854a1..8333f9b288 100644
--- a/tensorflow/compiler/tf2xla/kernels/reduction_ops.h
+++ b/tensorflow/compiler/tf2xla/kernels/reduction_ops.h
@@ -64,6 +64,7 @@ class XlaReductionOp : public XlaOpKernel {
protected:
DataType reduction_type_;
+ xla::PrimitiveType xla_reduction_type_;
};
} // namespace tensorflow
diff --git a/tensorflow/compiler/tf2xla/kernels/reduction_ops_common.cc b/tensorflow/compiler/tf2xla/kernels/reduction_ops_common.cc
index 44510c731e..ed1d1c6610 100644
--- a/tensorflow/compiler/tf2xla/kernels/reduction_ops_common.cc
+++ b/tensorflow/compiler/tf2xla/kernels/reduction_ops_common.cc
@@ -19,7 +19,8 @@ limitations under the License.
#include "tensorflow/compiler/tf2xla/type_util.h"
#include "tensorflow/compiler/tf2xla/xla_helpers.h"
#include "tensorflow/compiler/tf2xla/xla_op_kernel.h"
-#include "tensorflow/compiler/xla/literal_util.h"
+#include "tensorflow/compiler/xla/client/xla_client/xla_builder.h"
+#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/core/framework/kernel_def_builder.h"
namespace tensorflow {
@@ -31,6 +32,8 @@ XlaReductionOp::XlaReductionOp(OpKernelConstruction* ctx,
OP_REQUIRES_OK(ctx, ctx->MatchSignature({dt, DT_INT32}, {dt}));
OP_REQUIRES_OK(ctx, ctx->GetAttr("keep_dims", &keep_dims_));
+ OP_REQUIRES_OK(
+ ctx, DataTypeToPrimitiveType(reduction_type_, &xla_reduction_type_));
}
// Unless BuildFinalizer is overridden the reduction has no
@@ -101,20 +104,20 @@ void XlaReductionOp::Compile(XlaOpKernelContext* ctx) {
xla::PrimitiveType type;
TF_CHECK_OK(DataTypeToPrimitiveType(reduction_type_, &type));
- auto data = b->ConvertElementType(ctx->Input(0), type);
+ auto data = xla::ConvertElementType(ctx->Input(0), type);
// Call virtual method to get the initial value.
- auto initial = b->ConvertElementType(InitialValue(b), type);
+ auto initial = xla::ConvertElementType(InitialValue(b), type);
// Make two scalar parameters of the desired type for the lambda.
- auto rx = r.Parameter(0, xla::ShapeUtil::MakeShape(type, {}), "x");
- auto ry = r.Parameter(1, xla::ShapeUtil::MakeShape(type, {}), "y");
+ auto rx = xla::Parameter(&r, 0, xla::ShapeUtil::MakeShape(type, {}), "x");
+ auto ry = xla::Parameter(&r, 1, xla::ShapeUtil::MakeShape(type, {}), "y");
// Call virtual method to build the reduction lambda.
BuildReducer(&r, rx, ry);
xla::XlaComputation reduction_computation = r.Build().ConsumeValueOrDie();
- auto reduce = b->Reduce(data, initial, reduction_computation, xla_axes);
+ auto reduce = xla::Reduce(data, initial, reduction_computation, xla_axes);
auto deconverted = XlaHelpers::ConvertElementType(b, reduce, input_type(0));
auto finalized = BuildFinalizer(b, deconverted, num_elements_reduced);
- auto result = keep_dims_ ? b->Reshape(finalized, final_shape) : finalized;
+ auto result = keep_dims_ ? xla::Reshape(finalized, final_shape) : finalized;
ctx->SetOutput(0, result);
}
diff --git a/tensorflow/compiler/tf2xla/kernels/relu_op.cc b/tensorflow/compiler/tf2xla/kernels/relu_op.cc
index ba7d484d53..f4b804e546 100644
--- a/tensorflow/compiler/tf2xla/kernels/relu_op.cc
+++ b/tensorflow/compiler/tf2xla/kernels/relu_op.cc
@@ -19,7 +19,7 @@ limitations under the License.
#include "tensorflow/compiler/tf2xla/xla_helpers.h"
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
#include "tensorflow/compiler/xla/client/xla_client/xla_builder.h"
-#include "tensorflow/compiler/xla/literal_util.h"
+#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/core/framework/kernel_def_builder.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/kernels/no_op.h"
@@ -34,7 +34,7 @@ class ReluOp : public XlaOpKernel {
void Compile(XlaOpKernelContext* ctx) override {
xla::XlaBuilder* builder = ctx->builder();
auto zero = XlaHelpers::Zero(builder, input_type(0));
- ctx->SetOutput(0, builder->Max(zero, ctx->Input(0)));
+ ctx->SetOutput(0, xla::Max(zero, ctx->Input(0)));
}
};
@@ -46,7 +46,7 @@ class Relu6Op : public XlaOpKernel {
xla::XlaBuilder* builder = ctx->builder();
auto zero = XlaHelpers::Zero(builder, input_type(0));
auto six = XlaHelpers::IntegerLiteral(builder, input_type(0), 6);
- ctx->SetOutput(0, builder->Clamp(zero, ctx->Input(0), six));
+ ctx->SetOutput(0, xla::Clamp(zero, ctx->Input(0), six));
}
};
@@ -59,9 +59,9 @@ class ReluGradOp : public XlaOpKernel {
xla::XlaBuilder* b = ctx->builder();
const TensorShape shape = ctx->InputShape(0);
const auto zero =
- b->Broadcast(XlaHelpers::Zero(b, input_type(0)), shape.dim_sizes());
- const auto pred = b->Gt(ctx->Input(1), zero);
- ctx->SetOutput(0, b->Select(pred, ctx->Input(0), zero));
+ xla::Broadcast(XlaHelpers::Zero(b, input_type(0)), shape.dim_sizes());
+ const auto pred = xla::Gt(ctx->Input(1), zero);
+ ctx->SetOutput(0, xla::Select(pred, ctx->Input(0), zero));
}
};
@@ -74,12 +74,12 @@ class Relu6GradOp : public XlaOpKernel {
xla::XlaBuilder* b = ctx->builder();
const TensorShape shape = ctx->InputShape(0);
const auto zero =
- b->Broadcast(XlaHelpers::Zero(b, input_type(0)), shape.dim_sizes());
- const auto six = b->Broadcast(
+ xla::Broadcast(XlaHelpers::Zero(b, input_type(0)), shape.dim_sizes());
+ const auto six = xla::Broadcast(
XlaHelpers::IntegerLiteral(b, input_type(0), 6), shape.dim_sizes());
- auto out =
- b->Select(b->And(b->Lt(ctx->Input(1), six), b->Gt(ctx->Input(1), zero)),
- ctx->Input(0), zero);
+ auto out = xla::Select(
+ xla::And(xla::Lt(ctx->Input(1), six), xla::Gt(ctx->Input(1), zero)),
+ ctx->Input(0), zero);
ctx->SetOutput(0, out);
}
};
diff --git a/tensorflow/compiler/tf2xla/kernels/reshape_op.cc b/tensorflow/compiler/tf2xla/kernels/reshape_op.cc
index af4d64b159..354fec9be7 100644
--- a/tensorflow/compiler/tf2xla/kernels/reshape_op.cc
+++ b/tensorflow/compiler/tf2xla/kernels/reshape_op.cc
@@ -19,7 +19,8 @@ limitations under the License.
#include "tensorflow/compiler/tf2xla/xla_helpers.h"
#include "tensorflow/compiler/tf2xla/xla_op_kernel.h"
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
-#include "tensorflow/compiler/xla/literal_util.h"
+#include "tensorflow/compiler/xla/client/xla_client/xla_builder.h"
+#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/register_types.h"
#include "tensorflow/core/framework/tensor.h"
@@ -90,8 +91,7 @@ class ReshapeOp : public XlaOpKernel {
VLOG(1) << "Reshape " << input_shape.DebugString() << " "
<< shape.DebugString();
- ctx->SetOutput(0,
- ctx->builder()->Reshape(ctx->Input(0), shape.dim_sizes()));
+ ctx->SetOutput(0, xla::Reshape(ctx->Input(0), shape.dim_sizes()));
}
};
diff --git a/tensorflow/compiler/tf2xla/kernels/retval_op.cc b/tensorflow/compiler/tf2xla/kernels/retval_op.cc
index a711278638..5be70a4ded 100644
--- a/tensorflow/compiler/tf2xla/kernels/retval_op.cc
+++ b/tensorflow/compiler/tf2xla/kernels/retval_op.cc
@@ -17,6 +17,7 @@ limitations under the License.
#include "tensorflow/compiler/tf2xla/xla_op_kernel.h"
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
#include "tensorflow/compiler/xla/client/xla_client/xla_builder.h"
+#include "tensorflow/compiler/xla/status_macros.h"
#include "tensorflow/core/framework/kernel_def_builder.h"
#include "tensorflow/core/framework/op_kernel.h"
@@ -62,15 +63,24 @@ class RetvalOp : public XlaOpKernel {
OP_REQUIRES_OK(ctx, tc.AddConstRetval(index_, dtype_, literal));
} else {
TensorShape shape = ctx->InputShape(0);
- TensorShape representation_shape =
- tc.is_entry_computation()
- ? tc.RepresentationShape(shape, ctx->input_type(0))
- : shape;
+ ctx->SetStatus(is_constant.status());
+ TensorShape representation_shape;
+ if (tc.is_entry_computation()) {
+ xla::StatusOr<TensorShape> shape_or_status =
+ tc.RepresentationShape(shape, ctx->input_type(0));
+ if (!shape_or_status.ok()) {
+ ctx->SetStatus(shape_or_status.status());
+ return;
+ } else {
+ representation_shape = shape_or_status.ValueOrDie();
+ }
+ } else {
+ representation_shape = shape;
+ }
xla::XlaOp output = input;
if (tc.is_entry_computation()) {
- output =
- ctx->builder()->Reshape(input, representation_shape.dim_sizes());
+ output = xla::Reshape(input, representation_shape.dim_sizes());
} else {
// The core from which a return value is returned depends on the
// device assignment of the input to the retval. Since we can't change
@@ -78,8 +88,8 @@ class RetvalOp : public XlaOpKernel {
// introduce an operator here, even if the shape does not change.
// TODO(b/76097077): propagate device assignments onto arguments and
// return values of functions, and then reshape unconditionally.
- output = ctx->builder()->GetTupleElement(
- ctx->builder()->Tuple({output}), 0);
+ output =
+ xla::GetTupleElement(xla::Tuple(ctx->builder(), {output}), 0);
}
tc.AddRetval(index_, dtype_, shape, output);
}
diff --git a/tensorflow/compiler/tf2xla/kernels/reverse_op.cc b/tensorflow/compiler/tf2xla/kernels/reverse_op.cc
index 2872a3c4d4..ec15b4cc7a 100644
--- a/tensorflow/compiler/tf2xla/kernels/reverse_op.cc
+++ b/tensorflow/compiler/tf2xla/kernels/reverse_op.cc
@@ -19,7 +19,8 @@ limitations under the License.
#include "tensorflow/compiler/tf2xla/xla_helpers.h"
#include "tensorflow/compiler/tf2xla/xla_op_kernel.h"
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
-#include "tensorflow/compiler/xla/literal_util.h"
+#include "tensorflow/compiler/xla/client/xla_client/xla_builder.h"
+#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/register_types.h"
#include "tensorflow/core/framework/tensor.h"
@@ -62,7 +63,7 @@ class ReverseOp : public XlaOpKernel {
}
}
- ctx->SetOutput(0, ctx->builder()->Rev(ctx->Input(0), dimensions));
+ ctx->SetOutput(0, xla::Rev(ctx->Input(0), dimensions));
}
};
@@ -100,7 +101,7 @@ class ReverseV2Op : public XlaOpKernel {
x_shape.dims(), ")."));
}
- ctx->SetOutput(0, ctx->builder()->Rev(ctx->Input(0), axes));
+ ctx->SetOutput(0, xla::Rev(ctx->Input(0), axes));
}
};
diff --git a/tensorflow/compiler/tf2xla/kernels/reverse_sequence_op.cc b/tensorflow/compiler/tf2xla/kernels/reverse_sequence_op.cc
index 5d1c052684..c810456f94 100644
--- a/tensorflow/compiler/tf2xla/kernels/reverse_sequence_op.cc
+++ b/tensorflow/compiler/tf2xla/kernels/reverse_sequence_op.cc
@@ -17,6 +17,8 @@ limitations under the License.
#include "tensorflow/compiler/tf2xla/xla_helpers.h"
#include "tensorflow/compiler/tf2xla/xla_op_kernel.h"
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
+#include "tensorflow/compiler/xla/client/lib/numeric.h"
+#include "tensorflow/compiler/xla/client/xla_client/xla_builder.h"
#include "tensorflow/core/framework/tensor_shape.h"
namespace tensorflow {
@@ -85,103 +87,96 @@ class ReverseSequenceOp : public XlaOpKernel {
auto condition_builder =
builder->CreateSubBuilder("reverse_sequence_condition");
{
- auto param = condition_builder->Parameter(0, tuple_shape, "param");
- auto i = condition_builder->GetTupleElement(param, 0);
- condition_builder->Lt(
- i, XlaHelpers::IntegerLiteral(condition_builder.get(), seq_lens_type,
- batch_size));
+ auto param =
+ xla::Parameter(condition_builder.get(), 0, tuple_shape, "param");
+ auto i = xla::GetTupleElement(param, 0);
+ xla::Lt(i, XlaHelpers::IntegerLiteral(condition_builder.get(),
+ seq_lens_type, batch_size));
}
auto condition = condition_builder->Build();
OP_REQUIRES_OK(context, condition.status());
auto body_builder = builder->CreateSubBuilder("reverse_sequence_body");
{
- auto param = body_builder->Parameter(0, tuple_shape, "param");
- auto i = body_builder->GetTupleElement(param, 0);
- auto seq_lens = body_builder->GetTupleElement(param, 1);
- auto output = body_builder->GetTupleElement(param, 2);
+ auto param = xla::Parameter(body_builder.get(), 0, tuple_shape, "param");
+ auto i = xla::GetTupleElement(param, 0);
+ auto seq_lens = xla::GetTupleElement(param, 1);
+ auto output = xla::GetTupleElement(param, 2);
// seq_len is the sequence length of the current batch element (rank 1)
- auto seq_len = body_builder->DynamicSlice(
- seq_lens, body_builder->Reshape(i, {1}), {1});
+ auto seq_len = xla::DynamicSlice(seq_lens, xla::Reshape(i, {1}), {1});
// Indices is the offset of the batch element in the input.
- auto batch_element_indices = body_builder->Broadcast(
- XlaHelpers::Zero(body_builder.get(), seq_lens_type),
- {input_shape.dims()});
- batch_element_indices = body_builder->DynamicUpdateSlice(
- batch_element_indices, body_builder->Reshape(i, {1}),
- body_builder->Reshape(
- XlaHelpers::IntegerLiteral(body_builder.get(), seq_lens_type,
- batch_dim_),
- {1}));
+ auto batch_element_indices =
+ xla::Broadcast(XlaHelpers::Zero(body_builder.get(), seq_lens_type),
+ {input_shape.dims()});
+ batch_element_indices = xla::DynamicUpdateSlice(
+ batch_element_indices, xla::Reshape(i, {1}),
+ xla::Reshape(XlaHelpers::IntegerLiteral(body_builder.get(),
+ seq_lens_type, batch_dim_),
+ {1}));
// Slice out the current batch element and pad it out in the sequence
// dimension.
TensorShape slice_shape = input_shape;
slice_shape.set_dim(batch_dim_, 1);
slice_shape.set_dim(seq_dim_, max_seq_len);
- auto slice = body_builder->DynamicSlice(output, batch_element_indices,
- slice_shape.dim_sizes());
+ auto slice = xla::DynamicSlice(output, batch_element_indices,
+ slice_shape.dim_sizes());
auto padding_config = xla::MakeNoPaddingConfig(slice_shape.dims());
padding_config.mutable_dimensions(seq_dim_)->set_edge_padding_high(
slice_shape.dim_size(seq_dim_));
- slice = body_builder->Pad(
- slice, XlaHelpers::Zero(body_builder.get(), input_type),
- padding_config);
+ slice = xla::Pad(slice, XlaHelpers::Zero(body_builder.get(), input_type),
+ padding_config);
// Now slice out the reversed sequence from its actual start.
// sequence_start_indices is the offset of the start of the reversed
// sequence in the input. The slice will go into the padding, however, we
// will mask off these elements and replace them with elements from the
// original input so their values do not matter.
- auto sequence_start_indices = body_builder->Broadcast(
- XlaHelpers::Zero(body_builder.get(), seq_lens_type),
- {slice_shape.dims()});
- sequence_start_indices = body_builder->DynamicUpdateSlice(
+ auto sequence_start_indices =
+ xla::Broadcast(XlaHelpers::Zero(body_builder.get(), seq_lens_type),
+ {slice_shape.dims()});
+ sequence_start_indices = xla::DynamicUpdateSlice(
sequence_start_indices,
- body_builder->Sub(XlaHelpers::IntegerLiteral(
- body_builder.get(), seq_lens_type, max_seq_len),
- seq_len),
- body_builder->Reshape(
- XlaHelpers::IntegerLiteral(body_builder.get(), seq_lens_type,
- seq_dim_),
- {1}));
- slice = body_builder->DynamicSlice(slice, sequence_start_indices,
- slice_shape.dim_sizes());
+ xla::Sub(XlaHelpers::IntegerLiteral(body_builder.get(), seq_lens_type,
+ max_seq_len),
+ seq_len),
+ xla::Reshape(XlaHelpers::IntegerLiteral(body_builder.get(),
+ seq_lens_type, seq_dim_),
+ {1}));
+ slice = xla::DynamicSlice(slice, sequence_start_indices,
+ slice_shape.dim_sizes());
// Shift the reversed sequence to the left.
- output = body_builder->DynamicUpdateSlice(output, slice,
- batch_element_indices);
+ output = xla::DynamicUpdateSlice(output, slice, batch_element_indices);
- body_builder->Tuple(
- {body_builder->Add(
- i, XlaHelpers::One(body_builder.get(), seq_lens_type)),
+ xla::Tuple(
+ body_builder.get(),
+ {xla::Add(i, XlaHelpers::One(body_builder.get(), seq_lens_type)),
seq_lens, output});
}
auto body = body_builder->Build();
OP_REQUIRES_OK(context, body.status());
- auto loop_output = builder->While(
+ auto loop_output = xla::While(
condition.ValueOrDie(), body.ValueOrDie(),
- builder->Tuple({XlaHelpers::Zero(builder, seq_lens_type), seq_lens,
- builder->Rev(input, {seq_dim_})}));
- auto output = builder->GetTupleElement(loop_output, 2);
+ xla::Tuple(builder, {XlaHelpers::Zero(builder, seq_lens_type), seq_lens,
+ xla::Rev(input, {seq_dim_})}));
+ auto output = xla::GetTupleElement(loop_output, 2);
// Mask out elements after the sequence length.
- xla::XlaOp iota;
- OP_REQUIRES_OK(
- context, XlaHelpers::Iota(builder, seq_lens_type, max_seq_len, &iota));
+ xla::XlaOp iota =
+ xla::Iota(builder, seq_lens_xla_shape.element_type(), max_seq_len);
std::vector<int64> dims(input_shape.dims(), 1);
dims[batch_dim_] = batch_size;
- auto mask = builder->Lt(iota, builder->Reshape(seq_lens, dims), {seq_dim_});
+ auto mask = xla::Lt(iota, xla::Reshape(seq_lens, dims), {seq_dim_});
// Broadcast the mask up to the input shape.
- mask =
- builder->Or(mask, builder->Broadcast(builder->ConstantR0<bool>(false),
- input_shape.dim_sizes()));
+ mask = xla::Or(mask, xla::Broadcast(xla::ConstantR0<bool>(builder, false),
+ input_shape.dim_sizes()));
- output = builder->Select(mask, output, input);
+ output = xla::Select(mask, output, input);
context->SetOutput(0, output);
}
diff --git a/tensorflow/compiler/tf2xla/kernels/scan_ops.cc b/tensorflow/compiler/tf2xla/kernels/scan_ops.cc
index 1819fb5433..27ab3e1bf5 100644
--- a/tensorflow/compiler/tf2xla/kernels/scan_ops.cc
+++ b/tensorflow/compiler/tf2xla/kernels/scan_ops.cc
@@ -20,7 +20,8 @@ limitations under the License.
#include "tensorflow/compiler/tf2xla/xla_helpers.h"
#include "tensorflow/compiler/tf2xla/xla_op_kernel.h"
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
-#include "tensorflow/compiler/xla/literal_util.h"
+#include "tensorflow/compiler/xla/client/xla_client/xla_builder.h"
+#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/partial_tensor_shape.h"
#include "tensorflow/core/framework/register_types.h"
@@ -100,7 +101,7 @@ class ScanOp : public XlaOpKernel {
init = XlaHelpers::One(builder, dtype);
reducer = ctx->GetOrCreateMul(dtype);
}
- auto output = builder->ReduceWindowWithGeneralPadding(
+ auto output = xla::ReduceWindowWithGeneralPadding(
XlaHelpers::ConvertElementType(builder, ctx->Input(0), dtype), init,
*reducer, window_dims, window_strides, padding);
output =
@@ -110,12 +111,12 @@ class ScanOp : public XlaOpKernel {
// of all the input elements. Slice off this extra "last" element.
if (exclusive_) {
if (reverse_) {
- output = builder->SliceInDim(output, 1, input_shape.dim_size(axis) + 1,
- 1, axis);
+ output =
+ xla::SliceInDim(output, 1, input_shape.dim_size(axis) + 1, 1, axis);
} else {
output =
- builder->SliceInDim(output, 0, input_shape.dim_size(axis), 1, axis);
+ xla::SliceInDim(output, 0, input_shape.dim_size(axis), 1, axis);
}
}
ctx->SetOutput(0, output);
diff --git a/tensorflow/compiler/tf2xla/kernels/scatter_nd_op.cc b/tensorflow/compiler/tf2xla/kernels/scatter_nd_op.cc
index f2c63b4f90..14709bb6cb 100644
--- a/tensorflow/compiler/tf2xla/kernels/scatter_nd_op.cc
+++ b/tensorflow/compiler/tf2xla/kernels/scatter_nd_op.cc
@@ -19,6 +19,7 @@ limitations under the License.
#include "tensorflow/compiler/tf2xla/xla_helpers.h"
#include "tensorflow/compiler/tf2xla/xla_op_kernel.h"
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
+#include "tensorflow/compiler/xla/client/xla_client/xla_builder.h"
#include "tensorflow/compiler/xla/status_macros.h"
#include "tensorflow/core/framework/kernel_def_builder.h"
#include "tensorflow/core/framework/op_kernel.h"
@@ -103,8 +104,8 @@ class ScatterNdOp : public XlaOpKernel {
updates_shape));
xla::XlaBuilder* builder = context->builder();
- auto buffer = builder->Broadcast(XlaHelpers::Zero(builder, dtype),
- buffer_shape.dim_sizes());
+ auto buffer = xla::Broadcast(XlaHelpers::Zero(builder, dtype),
+ buffer_shape.dim_sizes());
auto indices = context->Input(0);
auto updates = context->Input(1);
auto result =
diff --git a/tensorflow/compiler/tf2xla/kernels/segment_reduction_ops.cc b/tensorflow/compiler/tf2xla/kernels/segment_reduction_ops.cc
index 664078ca16..e2ac7da2c2 100644
--- a/tensorflow/compiler/tf2xla/kernels/segment_reduction_ops.cc
+++ b/tensorflow/compiler/tf2xla/kernels/segment_reduction_ops.cc
@@ -14,20 +14,30 @@ limitations under the License.
==============================================================================*/
#include "tensorflow/compiler/tf2xla/lib/scatter.h"
+#include "tensorflow/compiler/tf2xla/type_util.h"
#include "tensorflow/compiler/tf2xla/xla_helpers.h"
#include "tensorflow/compiler/tf2xla/xla_op_kernel.h"
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
+#include "tensorflow/compiler/xla/client/lib/constants.h"
#include "tensorflow/compiler/xla/client/xla_client/xla_builder.h"
namespace tensorflow {
namespace {
-class UnsortedSegmentSum : public XlaOpKernel {
+class UnsortedSegmentReduce : public XlaOpKernel {
public:
- explicit UnsortedSegmentSum(OpKernelConstruction* ctx) : XlaOpKernel(ctx) {
- OP_REQUIRES_OK(ctx, ctx->GetAttr("T", &dtype_));
+ explicit UnsortedSegmentReduce(OpKernelConstruction* ctx) : XlaOpKernel(ctx) {
+ DataType dtype;
+ OP_REQUIRES_OK(ctx, ctx->GetAttr("T", &dtype));
+ OP_REQUIRES_OK(ctx, DataTypeToPrimitiveType(dtype, &type_));
}
+ // The initial value to initialize elements of the output to.
+ virtual xla::XlaOp InitialValue(xla::XlaBuilder* builder) = 0;
+
+ // A function to combine two scalars with the same index (e.g., sum).
+ virtual xla::XlaOp Combine(xla::XlaOp a, xla::XlaOp b) = 0;
+
void Compile(XlaOpKernelContext* ctx) override {
// output = unsorted_segment_sum(data, indices, num_segments)
// Compute a tensor such that:
@@ -50,28 +60,28 @@ class UnsortedSegmentSum : public XlaOpKernel {
OP_REQUIRES_OK(ctx, ctx->ConstantInputAsIntScalar(2, &num_segments));
OP_REQUIRES(ctx, data_shape.dims() >= indices_shape.dims(),
- errors::InvalidArgument(
- "UnsortedSegmentSum requires that indices' rank be"
- " less than or equal to data's rank."));
+ errors::InvalidArgument(type_string(),
+ " requires that indices' rank be"
+ " less than or equal to data's rank."));
// Validate that indices.shape is a prefix of data.shape.
for (int d = 0; d < indices_shape.dims(); ++d) {
- OP_REQUIRES(ctx, (data_shape.dim_size(d) == indices_shape.dim_size(d)),
- errors::InvalidArgument(
- "UnsortedSegmentSum requires indices shape to be prefix"
- " of data_shape, but dimension ",
- d, " differs ", data_shape.dim_size(d), " vs. ",
- indices_shape.dim_size(d)));
+ OP_REQUIRES(
+ ctx, (data_shape.dim_size(d) == indices_shape.dim_size(d)),
+ errors::InvalidArgument(type_string(),
+ " requires indices shape to be prefix"
+ " of data_shape, but dimension ",
+ d, " differs ", data_shape.dim_size(d),
+ " vs. ", indices_shape.dim_size(d)));
}
xla::XlaBuilder* builder = ctx->builder();
TensorShape buffer_shape = data_shape;
buffer_shape.RemoveDimRange(0, indices_shape.dims());
buffer_shape.InsertDim(0, num_segments);
- auto buffer = builder->Broadcast(XlaHelpers::Zero(builder, dtype_),
- buffer_shape.dim_sizes());
+ auto buffer =
+ xla::Broadcast(InitialValue(builder), buffer_shape.dim_sizes());
- auto combiner = [](xla::XlaOp a, xla::XlaOp b, xla::XlaBuilder* builder) {
- return builder->Add(a, b);
- };
+ auto combiner = [this](xla::XlaOp a, xla::XlaOp b,
+ xla::XlaBuilder* builder) { return Combine(a, b); };
auto result = XlaScatter(buffer, /*updates=*/data, indices,
/*indices_are_vectors=*/false, combiner, builder);
@@ -79,13 +89,73 @@ class UnsortedSegmentSum : public XlaOpKernel {
ctx->SetOutput(0, result.ValueOrDie());
}
- private:
- DataType dtype_;
+ protected:
+ xla::PrimitiveType type_;
+};
+
+class UnsortedSegmentSum : public UnsortedSegmentReduce {
+ public:
+ explicit UnsortedSegmentSum(OpKernelConstruction* ctx)
+ : UnsortedSegmentReduce(ctx) {}
+
+ xla::XlaOp InitialValue(xla::XlaBuilder* builder) override {
+ return xla::Zero(builder, type_);
+ };
+ xla::XlaOp Combine(xla::XlaOp a, xla::XlaOp b) override { return a + b; };
};
REGISTER_XLA_OP(
Name("UnsortedSegmentSum").CompileTimeConstInput("num_segments"),
UnsortedSegmentSum);
+class UnsortedSegmentProd : public UnsortedSegmentReduce {
+ public:
+ explicit UnsortedSegmentProd(OpKernelConstruction* ctx)
+ : UnsortedSegmentReduce(ctx) {}
+
+ xla::XlaOp InitialValue(xla::XlaBuilder* builder) override {
+ return xla::One(builder, type_);
+ };
+ xla::XlaOp Combine(xla::XlaOp a, xla::XlaOp b) override { return a * b; };
+};
+
+REGISTER_XLA_OP(
+ Name("UnsortedSegmentProd").CompileTimeConstInput("num_segments"),
+ UnsortedSegmentProd);
+
+class UnsortedSegmentMin : public UnsortedSegmentReduce {
+ public:
+ explicit UnsortedSegmentMin(OpKernelConstruction* ctx)
+ : UnsortedSegmentReduce(ctx) {}
+
+ xla::XlaOp InitialValue(xla::XlaBuilder* builder) override {
+ return xla::MaxFiniteValue(builder, type_);
+ };
+ xla::XlaOp Combine(xla::XlaOp a, xla::XlaOp b) override {
+ return xla::Min(a, b);
+ };
+};
+
+REGISTER_XLA_OP(
+ Name("UnsortedSegmentMin").CompileTimeConstInput("num_segments"),
+ UnsortedSegmentMin);
+
+class UnsortedSegmentMax : public UnsortedSegmentReduce {
+ public:
+ explicit UnsortedSegmentMax(OpKernelConstruction* ctx)
+ : UnsortedSegmentReduce(ctx) {}
+
+ xla::XlaOp InitialValue(xla::XlaBuilder* builder) override {
+ return xla::MinFiniteValue(builder, type_);
+ };
+ xla::XlaOp Combine(xla::XlaOp a, xla::XlaOp b) override {
+ return xla::Max(a, b);
+ };
+};
+
+REGISTER_XLA_OP(
+ Name("UnsortedSegmentMax").CompileTimeConstInput("num_segments"),
+ UnsortedSegmentMax);
+
} // namespace
} // namespace tensorflow
diff --git a/tensorflow/compiler/tf2xla/kernels/select_op.cc b/tensorflow/compiler/tf2xla/kernels/select_op.cc
index f9f48164d6..5c010c9df2 100644
--- a/tensorflow/compiler/tf2xla/kernels/select_op.cc
+++ b/tensorflow/compiler/tf2xla/kernels/select_op.cc
@@ -19,6 +19,7 @@ limitations under the License.
#include "tensorflow/compiler/tf2xla/xla_helpers.h"
#include "tensorflow/compiler/tf2xla/xla_op_kernel.h"
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
+#include "tensorflow/compiler/xla/client/xla_client/xla_builder.h"
#include "tensorflow/core/framework/kernel_def_builder.h"
#include "tensorflow/core/kernels/bounds_check.h"
@@ -40,8 +41,6 @@ class SelectOp : public XlaOpKernel {
"'then' and 'else' must have the same size. but received: ",
then_shape.DebugString(), " vs. ", else_shape.DebugString()));
- xla::XlaBuilder* builder = ctx->builder();
-
auto cond_handle = ctx->Input(0);
auto then_handle = ctx->Input(1);
auto else_handle = ctx->Input(2);
@@ -69,14 +68,14 @@ class SelectOp : public XlaOpKernel {
const auto dim_sizes = then_shape.dim_sizes();
gtl::ArraySlice<int64> bdims = dim_sizes;
bdims.pop_front();
- cond_handle = builder->Broadcast(cond_handle, bdims);
+ cond_handle = xla::Broadcast(cond_handle, bdims);
std::vector<int64> dim_order(then_shape.dims());
dim_order[0] = then_shape.dims() - 1;
std::iota(dim_order.begin() + 1, dim_order.end(), 0);
- cond_handle = builder->Transpose(cond_handle, dim_order);
+ cond_handle = xla::Transpose(cond_handle, dim_order);
}
- ctx->SetOutput(0, builder->Select(cond_handle, then_handle, else_handle));
+ ctx->SetOutput(0, xla::Select(cond_handle, then_handle, else_handle));
}
private:
diff --git a/tensorflow/compiler/tf2xla/kernels/sendrecv_ops.cc b/tensorflow/compiler/tf2xla/kernels/sendrecv_ops.cc
index 9ce01d0d44..6281d6c653 100644
--- a/tensorflow/compiler/tf2xla/kernels/sendrecv_ops.cc
+++ b/tensorflow/compiler/tf2xla/kernels/sendrecv_ops.cc
@@ -45,7 +45,7 @@ void SendOp::Compile(XlaOpKernelContext* ctx) {
XlaCompiler* compiler = XlaContext::Get(ctx).compiler();
xla::ChannelHandle channel;
OP_REQUIRES_OK(ctx, compiler->GetChannelHandle(tensor_name_, &channel));
- ctx->builder()->Send(ctx->Input(0), channel);
+ xla::Send(ctx->Input(0), channel);
}
REGISTER_XLA_OP(Name("XlaSend"), SendOp);
@@ -76,7 +76,7 @@ void RecvOp::Compile(XlaOpKernelContext* ctx) {
XlaCompiler* compiler = XlaContext::Get(ctx).compiler();
xla::ChannelHandle channel;
OP_REQUIRES_OK(ctx, compiler->GetChannelHandle(tensor_name_, &channel));
- ctx->SetOutput(0, ctx->builder()->Recv(shape_, channel));
+ ctx->SetOutput(0, xla::Recv(ctx->builder(), shape_, channel));
}
REGISTER_XLA_OP(Name("XlaRecv"), RecvOp);
diff --git a/tensorflow/compiler/tf2xla/kernels/sequence_ops.cc b/tensorflow/compiler/tf2xla/kernels/sequence_ops.cc
index bc3d0bf5df..25a5bcbe1d 100644
--- a/tensorflow/compiler/tf2xla/kernels/sequence_ops.cc
+++ b/tensorflow/compiler/tf2xla/kernels/sequence_ops.cc
@@ -18,7 +18,7 @@ limitations under the License.
#include "tensorflow/compiler/tf2xla/xla_helpers.h"
#include "tensorflow/compiler/tf2xla/xla_op_kernel.h"
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
-#include "tensorflow/compiler/xla/literal_util.h"
+#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/register_types.h"
#include "tensorflow/core/framework/tensor.h"
diff --git a/tensorflow/compiler/tf2xla/kernels/shape_op.cc b/tensorflow/compiler/tf2xla/kernels/shape_op.cc
index d59720bef7..5798823cd5 100644
--- a/tensorflow/compiler/tf2xla/kernels/shape_op.cc
+++ b/tensorflow/compiler/tf2xla/kernels/shape_op.cc
@@ -20,6 +20,7 @@ limitations under the License.
#include "tensorflow/compiler/tf2xla/xla_helpers.h"
#include "tensorflow/compiler/tf2xla/xla_op_kernel.h"
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
+#include "tensorflow/compiler/xla/client/xla_client/xla_builder.h"
#include "tensorflow/core/framework/kernel_def_builder.h"
#include "tensorflow/core/kernels/bounds_check.h"
@@ -147,7 +148,7 @@ class ExpandDimsOp : public XlaOpKernel {
dim = std::min<int32>(dim, existing_dims_size);
new_shape.emplace(new_shape.begin() + dim, 1);
- ctx->SetOutput(0, ctx->builder()->Reshape(ctx->Input(0), new_shape));
+ ctx->SetOutput(0, xla::Reshape(ctx->Input(0), new_shape));
}
};
REGISTER_XLA_OP(Name("ExpandDims").CompileTimeConstInput("dim"), ExpandDimsOp);
@@ -204,7 +205,7 @@ class SqueezeOp : public XlaOpKernel {
}
}
- ctx->SetOutput(0, ctx->builder()->Reshape(ctx->Input(0), new_shape));
+ ctx->SetOutput(0, xla::Reshape(ctx->Input(0), new_shape));
}
private:
@@ -221,7 +222,7 @@ class ZerosLikeOp : public XlaOpKernel {
const TensorShape input_shape = ctx->InputShape(0);
auto zero = XlaHelpers::Zero(ctx->builder(), input_type(0));
- ctx->SetOutput(0, ctx->builder()->Broadcast(zero, input_shape.dim_sizes()));
+ ctx->SetOutput(0, xla::Broadcast(zero, input_shape.dim_sizes()));
}
};
@@ -235,7 +236,7 @@ class OnesLikeOp : public XlaOpKernel {
const TensorShape input_shape = ctx->InputShape(0);
auto one = XlaHelpers::One(ctx->builder(), input_type(0));
- ctx->SetOutput(0, ctx->builder()->Broadcast(one, input_shape.dim_sizes()));
+ ctx->SetOutput(0, xla::Broadcast(one, input_shape.dim_sizes()));
}
};
diff --git a/tensorflow/compiler/tf2xla/kernels/slice_op.cc b/tensorflow/compiler/tf2xla/kernels/slice_op.cc
index be1e97bf26..1864584ade 100644
--- a/tensorflow/compiler/tf2xla/kernels/slice_op.cc
+++ b/tensorflow/compiler/tf2xla/kernels/slice_op.cc
@@ -19,6 +19,7 @@ limitations under the License.
#include "tensorflow/compiler/tf2xla/xla_helpers.h"
#include "tensorflow/compiler/tf2xla/xla_op_kernel.h"
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
+#include "tensorflow/compiler/xla/client/xla_client/xla_builder.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/register_types.h"
#include "tensorflow/core/framework/tensor.h"
@@ -92,8 +93,7 @@ class SliceOp : public XlaOpKernel {
limits.push_back(begin[i] + size[i]);
}
std::vector<int64> strides(begin.size(), 1);
- ctx->SetOutput(
- 0, ctx->builder()->Slice(ctx->Input(0), begin, limits, strides));
+ ctx->SetOutput(0, xla::Slice(ctx->Input(0), begin, limits, strides));
} else {
// `begin` is not a compile-time constant.
for (int i = 0; i < input_dims; ++i) {
@@ -106,8 +106,7 @@ class SliceOp : public XlaOpKernel {
input_shape.dim_size(i), "], but ",
"got ", size[i]));
}
- ctx->SetOutput(
- 0, ctx->builder()->DynamicSlice(ctx->Input(0), ctx->Input(1), size));
+ ctx->SetOutput(0, xla::DynamicSlice(ctx->Input(0), ctx->Input(1), size));
}
}
};
diff --git a/tensorflow/compiler/tf2xla/kernels/softmax_op.cc b/tensorflow/compiler/tf2xla/kernels/softmax_op.cc
index bbf5ee8b12..a71fbcd901 100644
--- a/tensorflow/compiler/tf2xla/kernels/softmax_op.cc
+++ b/tensorflow/compiler/tf2xla/kernels/softmax_op.cc
@@ -15,9 +15,12 @@ limitations under the License.
// XLA-specific Ops for softmax.
+#include "tensorflow/compiler/tf2xla/type_util.h"
#include "tensorflow/compiler/tf2xla/xla_helpers.h"
#include "tensorflow/compiler/tf2xla/xla_op_kernel.h"
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
+#include "tensorflow/compiler/xla/client/lib/constants.h"
+#include "tensorflow/compiler/xla/client/xla_client/xla_builder.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
@@ -41,6 +44,7 @@ class SoftmaxOp : public XlaOpKernel {
const int kClassDim = 1;
const DataType type = input_type(0);
+ const xla::PrimitiveType xla_type = ctx->input_xla_type(0);
auto logits = ctx->Input(0);
xla::XlaBuilder* const b = ctx->builder();
@@ -48,24 +52,27 @@ class SoftmaxOp : public XlaOpKernel {
// Find the max in each batch, resulting in a tensor of shape [batch]
auto logits_max =
- b->Reduce(logits, XlaHelpers::MinValue(b, type), max_func, {kClassDim});
+ xla::Reduce(logits, xla::MinValue(b, xla_type), max_func, {kClassDim});
// Subtract the max in batch b from every element in batch b. Broadcasts
// along the batch dimension.
- auto shifted_logits = b->Sub(logits, logits_max, {kBatchDim});
- auto exp_shifted = b->Exp(shifted_logits);
+ auto shifted_logits = xla::Sub(logits, logits_max, {kBatchDim});
+ auto exp_shifted = xla::Exp(shifted_logits);
const DataType accumulation_type = XlaHelpers::SumAccumulationType(type);
+ xla::PrimitiveType xla_accumulation_type;
+ OP_REQUIRES_OK(ctx, DataTypeToPrimitiveType(accumulation_type,
+ &xla_accumulation_type));
auto converted =
- XlaHelpers::ConvertElementType(b, exp_shifted, accumulation_type);
+ xla::ConvertElementType(exp_shifted, xla_accumulation_type);
auto reduce =
- b->Reduce(converted, XlaHelpers::Zero(b, accumulation_type),
- *ctx->GetOrCreateAdd(accumulation_type), {kClassDim});
+ xla::Reduce(converted, xla::Zero(b, xla_accumulation_type),
+ *ctx->GetOrCreateAdd(accumulation_type), {kClassDim});
auto sum = XlaHelpers::ConvertElementType(b, reduce, type);
auto softmax =
log_
// softmax = shifted_logits - log(sum(exp(shifted_logits)))
- ? b->Sub(shifted_logits, b->Log(sum), {kBatchDim})
+ ? xla::Sub(shifted_logits, xla::Log(sum), {kBatchDim})
// softmax = exp(shifted_logits) / sum(exp(shifted_logits))
- : b->Div(exp_shifted, sum, {kBatchDim});
+ : xla::Div(exp_shifted, sum, {kBatchDim});
ctx->SetOutput(0, softmax);
}
@@ -77,8 +84,8 @@ REGISTER_XLA_OP(Name("Softmax"), SoftmaxOp);
REGISTER_XLA_OP(Name("LogSoftmax"), SoftmaxOp);
std::pair<xla::XlaOp, xla::XlaOp> CrossEntropyWithLogits(
- XlaOpKernelContext* ctx, DataType type, const xla::XlaOp& logits,
- const xla::XlaOp& labels) {
+ XlaOpKernelContext* ctx, DataType type, xla::PrimitiveType xla_type,
+ xla::XlaOp logits, xla::XlaOp labels) {
const xla::XlaComputation& max_func = *ctx->GetOrCreateMax(type);
const int kBatchDim = 0;
@@ -87,43 +94,44 @@ std::pair<xla::XlaOp, xla::XlaOp> CrossEntropyWithLogits(
xla::XlaBuilder* b = ctx->builder();
// Find the max in each batch, resulting in a tensor of shape [batch]
auto logits_max =
- b->Reduce(logits, XlaHelpers::MinValue(b, type), max_func, {kClassDim});
+ xla::Reduce(logits, xla::MinValue(b, xla_type), max_func, {kClassDim});
// Subtract the max in batch b from every element in batch b.
// Broadcasts along the batch dimension.
- auto shifted_logits = b->Sub(logits, logits_max, {kBatchDim});
+ auto shifted_logits = xla::Sub(logits, logits_max, {kBatchDim});
// exp(logits - max_logits)
- auto exp_shifted_logits = b->Exp(shifted_logits);
+ auto exp_shifted_logits = xla::Exp(shifted_logits);
// sum_{class} (exp(logits - max_logits))
const DataType accumulation_type = XlaHelpers::SumAccumulationType(type);
auto converted =
XlaHelpers::ConvertElementType(b, exp_shifted_logits, accumulation_type);
- auto reduce = b->Reduce(converted, XlaHelpers::Zero(b, accumulation_type),
- *ctx->GetOrCreateAdd(accumulation_type), {kClassDim});
+ auto reduce =
+ xla::Reduce(converted, XlaHelpers::Zero(b, accumulation_type),
+ *ctx->GetOrCreateAdd(accumulation_type), {kClassDim});
auto sum_exp = XlaHelpers::ConvertElementType(b, reduce, type);
// log(sum(exp(logits - max_logits)))
- auto log_sum_exp = b->Log(sum_exp);
+ auto log_sum_exp = xla::Log(sum_exp);
// sum(-labels *
// ((logits - max_logits) - log(sum(exp(logits - max_logits)))))
// along classes
// (The subtraction broadcasts along the batch dimension.)
- auto sub = b->Sub(shifted_logits, log_sum_exp, {kBatchDim});
- auto mul = b->Mul(b->Neg(labels), sub);
+ auto sub = xla::Sub(shifted_logits, log_sum_exp, {kBatchDim});
+ auto mul = xla::Mul(xla::Neg(labels), sub);
auto sum =
- b->Reduce(XlaHelpers::ConvertElementType(b, mul, accumulation_type),
- XlaHelpers::Zero(b, accumulation_type),
- *ctx->GetOrCreateAdd(accumulation_type), {kClassDim});
+ xla::Reduce(XlaHelpers::ConvertElementType(b, mul, accumulation_type),
+ XlaHelpers::Zero(b, accumulation_type),
+ *ctx->GetOrCreateAdd(accumulation_type), {kClassDim});
auto loss = XlaHelpers::ConvertElementType(b, sum, type);
// backprop: prob - labels, where
// prob = exp(logits - max_logits) / sum(exp(logits - max_logits))
// (where the division broadcasts along the batch dimension)
xla::XlaOp backprop =
- b->Sub(b->Div(exp_shifted_logits, sum_exp, {kBatchDim}), labels);
+ xla::Sub(xla::Div(exp_shifted_logits, sum_exp, {kBatchDim}), labels);
return {loss, backprop};
}
@@ -146,12 +154,13 @@ class SoftmaxXentWithLogitsOp : public XlaOpKernel {
// check that "labels" is a matrix too.
const DataType type = input_type(0);
+ const xla::PrimitiveType xla_type = ctx->input_xla_type(0);
auto logits = ctx->Input(0);
auto labels = ctx->Input(1);
xla::XlaOp loss, backprop;
std::tie(loss, backprop) =
- CrossEntropyWithLogits(ctx, type, logits, labels);
+ CrossEntropyWithLogits(ctx, type, xla_type, logits, labels);
ctx->SetOutput(0, loss);
ctx->SetOutput(1, backprop);
}
@@ -187,8 +196,9 @@ class SparseSoftmaxXentWithLogitsOp : public XlaOpKernel {
int64 batch_size = logits_shape.dim_size(0);
int64 depth = logits_shape.dim_size(1);
- DataType logits_type = input_type(0);
- DataType indices_type = input_type(1);
+ const DataType logits_type = input_type(0);
+ const xla::PrimitiveType xla_logits_type = ctx->input_xla_type(0);
+ const DataType indices_type = input_type(1);
xla::XlaOp indices = ctx->Input(1);
@@ -206,20 +216,18 @@ class SparseSoftmaxXentWithLogitsOp : public XlaOpKernel {
// Builds a vector of {batch_size} that is 0 if the index is in range, or
// NaN otherwise; then add that vector to the labels to force out-of-range
// values to NaNs.
- xla::XlaOp nan_or_zero = builder->Select(
- builder->And(
- builder->Le(XlaHelpers::Zero(builder, indices_type), indices),
- builder->Lt(indices, XlaHelpers::IntegerLiteral(
- builder, indices_type, depth))),
- builder->Broadcast(XlaHelpers::Zero(builder, logits_type),
- {batch_size}),
- builder->Broadcast(XlaHelpers::FloatLiteral(builder, logits_type, NAN),
- {batch_size}));
- labels = builder->Add(labels, nan_or_zero, {0});
+ xla::XlaOp nan_or_zero = xla::Select(
+ xla::And(xla::Le(XlaHelpers::Zero(builder, indices_type), indices),
+ xla::Lt(indices, XlaHelpers::IntegerLiteral(
+ builder, indices_type, depth))),
+ xla::Broadcast(XlaHelpers::Zero(builder, logits_type), {batch_size}),
+ xla::Broadcast(XlaHelpers::FloatLiteral(builder, logits_type, NAN),
+ {batch_size}));
+ labels = xla::Add(labels, nan_or_zero, {0});
xla::XlaOp loss, backprop;
- std::tie(loss, backprop) =
- CrossEntropyWithLogits(ctx, logits_type, ctx->Input(0), labels);
+ std::tie(loss, backprop) = CrossEntropyWithLogits(
+ ctx, logits_type, xla_logits_type, ctx->Input(0), labels);
ctx->SetOutput(0, loss);
ctx->SetOutput(1, backprop);
}
diff --git a/tensorflow/compiler/tf2xla/kernels/sort_ops.cc b/tensorflow/compiler/tf2xla/kernels/sort_ops.cc
index 204ae84582..faaf8964ff 100644
--- a/tensorflow/compiler/tf2xla/kernels/sort_ops.cc
+++ b/tensorflow/compiler/tf2xla/kernels/sort_ops.cc
@@ -25,8 +25,7 @@ class XlaSortOp : public XlaOpKernel {
explicit XlaSortOp(OpKernelConstruction* context) : XlaOpKernel(context) {}
void Compile(XlaOpKernelContext* context) override {
- xla::XlaBuilder* const b = context->builder();
- context->SetOutput(0, b->Sort(context->Input(0)));
+ context->SetOutput(0, xla::Sort(context->Input(0)));
}
};
diff --git a/tensorflow/compiler/tf2xla/kernels/spacetobatch_op.cc b/tensorflow/compiler/tf2xla/kernels/spacetobatch_op.cc
index ec077924b5..8a8525efa1 100644
--- a/tensorflow/compiler/tf2xla/kernels/spacetobatch_op.cc
+++ b/tensorflow/compiler/tf2xla/kernels/spacetobatch_op.cc
@@ -16,6 +16,7 @@ limitations under the License.
#include "tensorflow/compiler/tf2xla/xla_helpers.h"
#include "tensorflow/compiler/tf2xla/xla_op_kernel.h"
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
+#include "tensorflow/compiler/xla/client/xla_client/xla_builder.h"
namespace tensorflow {
namespace {
@@ -73,7 +74,7 @@ void SpaceToBatch(XlaOpKernelContext* ctx, const xla::XlaOp& input,
"The product of the block dimensions must be positive"));
xla::XlaOp padded =
- b->Pad(input, XlaHelpers::Zero(b, input_dtype), padding_config);
+ xla::Pad(input, XlaHelpers::Zero(b, input_dtype), padding_config);
// 2. Reshape `padded` to `reshaped_padded` of shape:
//
@@ -100,7 +101,7 @@ void SpaceToBatch(XlaOpKernelContext* ctx, const xla::XlaOp& input,
std::copy(remainder_shape.begin(), remainder_shape.end(),
reshaped_padded_shape.begin() + 1 + 2 * block_rank);
- xla::XlaOp reshaped_padded = b->Reshape(padded, reshaped_padded_shape);
+ xla::XlaOp reshaped_padded = xla::Reshape(padded, reshaped_padded_shape);
// 3. Permute dimensions of `reshaped_padded` to produce
// `permuted_reshaped_padded` of shape:
@@ -120,7 +121,7 @@ void SpaceToBatch(XlaOpKernelContext* ctx, const xla::XlaOp& input,
std::iota(permutation.begin() + 1 + block_rank * 2, permutation.end(),
1 + block_rank * 2);
xla::XlaOp permuted_reshaped_padded =
- b->Transpose(reshaped_padded, permutation);
+ xla::Transpose(reshaped_padded, permutation);
// 4. Reshape `permuted_reshaped_padded` to flatten `block_shape` into the
// batch dimension, producing an output tensor of shape:
@@ -140,7 +141,7 @@ void SpaceToBatch(XlaOpKernelContext* ctx, const xla::XlaOp& input,
std::copy(remainder_shape.begin(), remainder_shape.end(),
output_shape.begin() + 1 + block_rank);
- xla::XlaOp output = b->Reshape(permuted_reshaped_padded, output_shape);
+ xla::XlaOp output = xla::Reshape(permuted_reshaped_padded, output_shape);
ctx->SetOutput(0, output);
}
diff --git a/tensorflow/compiler/tf2xla/kernels/spacetodepth_op.cc b/tensorflow/compiler/tf2xla/kernels/spacetodepth_op.cc
index 4c5886ee2a..47d282fe9e 100644
--- a/tensorflow/compiler/tf2xla/kernels/spacetodepth_op.cc
+++ b/tensorflow/compiler/tf2xla/kernels/spacetodepth_op.cc
@@ -16,6 +16,7 @@ limitations under the License.
#include "tensorflow/compiler/tf2xla/xla_helpers.h"
#include "tensorflow/compiler/tf2xla/xla_op_kernel.h"
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
+#include "tensorflow/compiler/xla/client/xla_client/xla_builder.h"
#include "tensorflow/core/util/tensor_format.h"
namespace tensorflow {
@@ -50,7 +51,6 @@ class SpaceToDepthOp : public XlaOpKernel {
const gtl::InlinedVector<int64, 4> input_shape =
input_tensor_shape.dim_sizes();
- xla::XlaBuilder* b = ctx->builder();
xla::XlaOp input = ctx->Input(0);
int feature_dim = GetTensorFeatureDimIndex(input_rank, data_format_);
@@ -135,7 +135,7 @@ class SpaceToDepthOp : public XlaOpKernel {
// input_shape[1] / block_size_, block_size_,
// input_shape[2] / block_size_, block_size_,
// depth]
- xla::XlaOp reshaped = b->Reshape(input, reshaped_shape);
+ xla::XlaOp reshaped = xla::Reshape(input, reshaped_shape);
// 2. Permute dimensions of `reshaped` to produce
// `permuted_reshaped` of shape:
@@ -145,7 +145,7 @@ class SpaceToDepthOp : public XlaOpKernel {
// input_shape[2] / block_size_,
// block_size_, block_size_,
// depth]
- xla::XlaOp permuted_reshaped = b->Transpose(reshaped, transpose_order);
+ xla::XlaOp permuted_reshaped = xla::Transpose(reshaped, transpose_order);
// 3. Reshape `permuted_reshaped` to flatten `block_shape` into the
// batch dimension, producing an output tensor of shape:
@@ -155,7 +155,7 @@ class SpaceToDepthOp : public XlaOpKernel {
// input_shape[2] / block_size_,
// block_size_ * block_size_ * depth]
//
- xla::XlaOp output = b->Reshape(permuted_reshaped, output_shape);
+ xla::XlaOp output = xla::Reshape(permuted_reshaped, output_shape);
ctx->SetOutput(0, output);
}
diff --git a/tensorflow/compiler/tf2xla/kernels/sparse_to_dense_op.cc b/tensorflow/compiler/tf2xla/kernels/sparse_to_dense_op.cc
new file mode 100644
index 0000000000..e831dc30a9
--- /dev/null
+++ b/tensorflow/compiler/tf2xla/kernels/sparse_to_dense_op.cc
@@ -0,0 +1,88 @@
+/* Copyright 2015 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#include "tensorflow/compiler/tf2xla/lib/scatter.h"
+#include "tensorflow/compiler/tf2xla/xla_op_kernel.h"
+#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
+
+namespace tensorflow {
+namespace {
+
+// Operator to convert sparse representations to dense.
+class SparseToDenseOp : public XlaOpKernel {
+ public:
+ explicit SparseToDenseOp(OpKernelConstruction* context)
+ : XlaOpKernel(context) {}
+
+ void Compile(XlaOpKernelContext* context) override {
+ // sparse_indices
+ const TensorShape indices_shape = context->InputShape(0);
+ OP_REQUIRES(context, indices_shape.dims() <= 2,
+ errors::InvalidArgument(
+ "sparse_indices should be a scalar, vector, or matrix, "
+ "got shape ",
+ indices_shape.DebugString()));
+ const int64 num_elems =
+ indices_shape.dims() > 0 ? indices_shape.dim_size(0) : 1;
+ const int64 num_dims =
+ indices_shape.dims() > 1 ? indices_shape.dim_size(1) : 1;
+
+ // output_shape
+ TensorShape output_shape;
+ OP_REQUIRES_OK(context, context->ConstantInputAsShape(1, &output_shape));
+ OP_REQUIRES(context, output_shape.dims() == num_dims,
+ errors::InvalidArgument(
+ "output_shape has incorrect number of elements: ",
+ output_shape.num_elements(), " should be: ", num_dims));
+
+ // sparse_values
+ const TensorShape sparse_values_shape = context->InputShape(2);
+ const int64 num_values = sparse_values_shape.num_elements();
+ OP_REQUIRES(
+ context,
+ sparse_values_shape.dims() == 0 ||
+ (sparse_values_shape.dims() == 1 && num_values == num_elems),
+ errors::InvalidArgument("sparse_values has incorrect shape ",
+ sparse_values_shape.DebugString(),
+ ", should be [] or [", num_elems, "]"));
+
+ // default_value
+ const TensorShape default_value_shape = context->InputShape(3);
+ OP_REQUIRES(context, TensorShapeUtils::IsScalar(default_value_shape),
+ errors::InvalidArgument("default_value should be a scalar."));
+
+ xla::XlaOp indices = context->Input(0);
+ xla::XlaOp sparse_values = context->Input(2);
+ xla::XlaOp default_value = context->Input(3);
+
+ if (sparse_values_shape.dims() == 0 && num_elems != 1) {
+ sparse_values = Broadcast(sparse_values, {num_elems});
+ }
+ xla::XlaBuilder* builder = context->builder();
+ auto buffer = Broadcast(default_value, output_shape.dim_sizes());
+
+ auto result = XlaScatter(buffer, sparse_values, indices,
+ /*indices_are_vectors=*/num_dims > 1,
+ /*combiner=*/{}, builder);
+ context->SetOutput(0, builder->ReportErrorOrReturn(result));
+ }
+};
+
+REGISTER_XLA_OP(Name("SparseToDense").CompileTimeConstInput("output_shape"),
+ SparseToDenseOp);
+
+} // namespace
+
+} // namespace tensorflow
diff --git a/tensorflow/compiler/tf2xla/kernels/split_op.cc b/tensorflow/compiler/tf2xla/kernels/split_op.cc
index 9b54058541..242638f981 100644
--- a/tensorflow/compiler/tf2xla/kernels/split_op.cc
+++ b/tensorflow/compiler/tf2xla/kernels/split_op.cc
@@ -19,7 +19,8 @@ limitations under the License.
#include "tensorflow/compiler/tf2xla/xla_helpers.h"
#include "tensorflow/compiler/tf2xla/xla_op_kernel.h"
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
-#include "tensorflow/compiler/xla/literal_util.h"
+#include "tensorflow/compiler/xla/client/xla_client/xla_builder.h"
+#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/register_types.h"
#include "tensorflow/core/framework/tensor.h"
@@ -98,7 +99,7 @@ class SplitOp : public XlaOpKernel {
// Slice out the ith split from the split dimension.
begin[split_dim] = i * slice_size;
limits[split_dim] = (i + 1) * slice_size;
- ctx->SetOutput(i, ctx->builder()->Slice(input, begin, limits, strides));
+ ctx->SetOutput(i, xla::Slice(input, begin, limits, strides));
}
}
};
@@ -199,7 +200,7 @@ class SplitVOp : public XlaOpKernel {
// Slice out the ith split from the split dimension.
limits[split_dim] = begin[split_dim] + slice_size;
- ctx->SetOutput(i, ctx->builder()->Slice(input, begin, limits, strides));
+ ctx->SetOutput(i, xla::Slice(input, begin, limits, strides));
begin[split_dim] = limits[split_dim];
}
}
diff --git a/tensorflow/compiler/tf2xla/kernels/stack_ops.cc b/tensorflow/compiler/tf2xla/kernels/stack_ops.cc
index 0fb05a2be7..df91900570 100644
--- a/tensorflow/compiler/tf2xla/kernels/stack_ops.cc
+++ b/tensorflow/compiler/tf2xla/kernels/stack_ops.cc
@@ -23,7 +23,7 @@ limitations under the License.
#include "tensorflow/compiler/tf2xla/xla_helpers.h"
#include "tensorflow/compiler/tf2xla/xla_op_kernel.h"
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
-#include "tensorflow/compiler/xla/literal_util.h"
+#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/partial_tensor_shape.h"
#include "tensorflow/core/framework/register_types.h"
@@ -144,24 +144,25 @@ class StackPushOp : public XlaOpKernel {
// Initializes the Stack, if the element shape was not already known.
OP_REQUIRES_OK(ctx, MaybeInitializeStack(b, resource, dtype_, elem_shape));
- xla::XlaOp ta = b->GetTupleElement(resource->value(), 0);
- xla::XlaOp index = b->GetTupleElement(resource->value(), 1);
+ xla::XlaOp ta = xla::GetTupleElement(resource->value(), 0);
+ xla::XlaOp index = xla::GetTupleElement(resource->value(), 1);
xla::XlaOp value = ctx->Input(1);
// start_indices of the DynamicUpdateSlice are [index, 0, 0, ..., 0].
auto start_indices =
- b->Pad(b->Reshape(index, {1}), b->ConstantR0<int32>(0),
- xla::MakeEdgePaddingConfig({{0, elem_shape.dims()}}));
+ xla::Pad(xla::Reshape(index, {1}), xla::ConstantR0<int32>(b, 0),
+ xla::MakeEdgePaddingConfig({{0, elem_shape.dims()}}));
TensorShape slice_shape = elem_shape;
slice_shape.InsertDim(0, 1LL);
- auto update = b->Reshape(value, slice_shape.dim_sizes());
+ auto update = xla::Reshape(value, slice_shape.dim_sizes());
// TODO(phawkins): We don't check the index is in bounds --- there is no
// error mechanism in XLA.
- OP_REQUIRES_OK(ctx, resource->SetValue(b->Tuple(
- {b->DynamicUpdateSlice(ta, update, start_indices),
- b->Add(index, b->ConstantR0<int32>(1))})));
+ OP_REQUIRES_OK(ctx,
+ resource->SetValue(xla::Tuple(
+ b, {xla::DynamicUpdateSlice(ta, update, start_indices),
+ xla::Add(index, xla::ConstantR0<int32>(b, 1))})));
ctx->SetOutput(0, value);
}
@@ -197,27 +198,27 @@ class StackPopOp : public XlaOpKernel {
OP_REQUIRES_OK(ctx, GetStackShape(b, resource, &stack_shape));
xla::XlaOp state = resource->value();
- xla::XlaOp ta = b->GetTupleElement(state, 0);
- xla::XlaOp index = b->GetTupleElement(state, 1);
+ xla::XlaOp ta = xla::GetTupleElement(state, 0);
+ xla::XlaOp index = xla::GetTupleElement(state, 1);
- index = b->Sub(index, b->ConstantR0<int32>(1));
- OP_REQUIRES_OK(ctx, resource->SetValue(b->Tuple({ta, index})));
+ index = Sub(index, xla::ConstantR0<int32>(b, 1));
+ OP_REQUIRES_OK(ctx, resource->SetValue(xla::Tuple(b, {ta, index})));
// start_indices of the DynamicSlice are [index, 0, 0, ..., 0].
auto start_indices =
- b->Pad(b->Reshape(index, {1}), b->ConstantR0<int32>(0),
- xla::MakeEdgePaddingConfig({{0, stack_shape.dims() - 1}}));
+ xla::Pad(xla::Reshape(index, {1}), xla::ConstantR0<int32>(b, 0),
+ xla::MakeEdgePaddingConfig({{0, stack_shape.dims() - 1}}));
auto slice_shape = stack_shape.dim_sizes();
slice_shape[0] = 1LL;
// TODO(phawkins): We don't check the index is in bounds --- there is no
// error mechanism in XLA.
- xla::XlaOp read = b->DynamicSlice(ta, start_indices, slice_shape);
+ xla::XlaOp read = xla::DynamicSlice(ta, start_indices, slice_shape);
// Remove the leading '1' dimension.
std::vector<int64> value_shape(slice_shape.begin() + 1, slice_shape.end());
- ctx->SetOutput(0, b->Reshape(read, value_shape));
+ ctx->SetOutput(0, xla::Reshape(read, value_shape));
}
private:
diff --git a/tensorflow/compiler/tf2xla/kernels/stateless_random_ops.cc b/tensorflow/compiler/tf2xla/kernels/stateless_random_ops.cc
index 0367501433..cc4b13d3b9 100644
--- a/tensorflow/compiler/tf2xla/kernels/stateless_random_ops.cc
+++ b/tensorflow/compiler/tf2xla/kernels/stateless_random_ops.cc
@@ -20,7 +20,11 @@ limitations under the License.
#include "tensorflow/compiler/tf2xla/xla_helpers.h"
#include "tensorflow/compiler/tf2xla/xla_op_kernel.h"
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
-#include "tensorflow/compiler/xla/client/lib/arithmetic.h"
+#include "tensorflow/compiler/xla/client/lib/constants.h"
+#include "tensorflow/compiler/xla/client/lib/math.h"
+#include "tensorflow/compiler/xla/client/lib/numeric.h"
+#include "tensorflow/compiler/xla/client/lib/prng.h"
+#include "tensorflow/compiler/xla/client/xla_client/xla_builder.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
@@ -30,134 +34,6 @@ limitations under the License.
namespace tensorflow {
namespace {
-// Rotates a 32-bit integer 'v' left by 'distance' bits.
-xla::XlaOp RotateLeftS32(xla::XlaBuilder* builder, const xla::XlaOp& v,
- int distance) {
- return builder->Or(
- builder->ShiftLeft(v, builder->ConstantR0<int>(distance)),
- builder->ShiftRightLogical(v, builder->ConstantR0<int>(32 - distance)));
-}
-
-using ThreeFry2x32State = std::array<xla::XlaOp, 2>;
-
-// Implements the ThreeFry counter-based PRNG algorithm.
-// Salmon et al. SC 2011. Parallel random numbers: as easy as 1, 2, 3.
-// http://www.thesalmons.org/john/random123/papers/random123sc11.pdf
-ThreeFry2x32State ThreeFry2x32(xla::XlaBuilder* builder,
- ThreeFry2x32State input, ThreeFry2x32State key) {
- // Rotation distances specified by the Threefry2x32 algorithm.
- constexpr std::array<int, 8> rotations = {13, 15, 26, 6, 17, 29, 16, 24};
- ThreeFry2x32State x;
-
- std::array<xla::XlaOp, 3> ks;
- // 0x1BD11BDA is a parity constant specified by the ThreeFry2x32 algorithm.
- ks[2] = builder->ConstantR0<int32>(0x1BD11BDA);
- for (int i = 0; i < 2; ++i) {
- ks[i] = key[i];
- x[i] = input[i];
- ks[2] = builder->Xor(ks[2], key[i]);
- }
-
- x[0] = builder->Add(x[0], ks[0]);
- x[1] = builder->Add(x[1], ks[1]);
-
- // Performs a single round of the Threefry2x32 algorithm, with a rotation
- // amount 'rotation'.
- auto round = [builder](ThreeFry2x32State v, int rotation) {
- v[0] = builder->Add(v[0], v[1]);
- v[1] = RotateLeftS32(builder, v[1], rotation);
- v[1] = builder->Xor(v[0], v[1]);
- return v;
- };
-
- // There are no known statistical flaws with 13 rounds of Threefry2x32.
- // We are conservative and use 20 rounds.
- x = round(x, rotations[0]);
- x = round(x, rotations[1]);
- x = round(x, rotations[2]);
- x = round(x, rotations[3]);
- x[0] = builder->Add(x[0], ks[1]);
- x[1] = builder->Add(builder->Add(x[1], ks[2]), builder->ConstantR0<int32>(1));
-
- x = round(x, rotations[4]);
- x = round(x, rotations[5]);
- x = round(x, rotations[6]);
- x = round(x, rotations[7]);
- x[0] = builder->Add(x[0], ks[2]);
- x[1] = builder->Add(builder->Add(x[1], ks[0]), builder->ConstantR0<int32>(2));
-
- x = round(x, rotations[0]);
- x = round(x, rotations[1]);
- x = round(x, rotations[2]);
- x = round(x, rotations[3]);
- x[0] = builder->Add(x[0], ks[0]);
- x[1] = builder->Add(builder->Add(x[1], ks[1]), builder->ConstantR0<int32>(3));
-
- x = round(x, rotations[4]);
- x = round(x, rotations[5]);
- x = round(x, rotations[6]);
- x = round(x, rotations[7]);
- x[0] = builder->Add(x[0], ks[1]);
- x[1] = builder->Add(builder->Add(x[1], ks[2]), builder->ConstantR0<int32>(4));
-
- x = round(x, rotations[0]);
- x = round(x, rotations[1]);
- x = round(x, rotations[2]);
- x = round(x, rotations[3]);
- x[0] = builder->Add(x[0], ks[2]);
- x[1] = builder->Add(builder->Add(x[1], ks[0]), builder->ConstantR0<int32>(5));
-
- return x;
-}
-
-// Returns a tensor of 'shape' random values uniformly distributed in the range
-// [minval, maxval)
-xla::XlaOp RandomUniform(xla::XlaBuilder* builder, const xla::XlaOp& seed,
- const TensorShape& shape, double minval,
- double maxval) {
- // Split the seed into two 32-bit scalars to form a key.
- auto seed0 = builder->Reshape(builder->Slice(seed, {0}, {1}, {1}), {});
- auto seed1 = builder->Reshape(builder->Slice(seed, {1}, {2}, {1}), {});
- ThreeFry2x32State key = {seed0, seed1};
- const int64 size = shape.num_elements();
-
- const int64 half_size = MathUtil::CeilOfRatio<int64>(size, 2);
- const bool size_is_odd = (half_size * 2 != size);
-
- // Fill the generator inputs with unique counter values.
- ThreeFry2x32State inputs;
- TF_CHECK_OK(XlaHelpers::Iota(builder, DT_INT32, half_size, &inputs[0]));
- inputs[1] = builder->Add(inputs[0], builder->ConstantR0<int32>(half_size));
- ThreeFry2x32State outputs = ThreeFry2x32(builder, inputs, key);
-
- if (size_is_odd) {
- outputs[1] = builder->Slice(outputs[1], {0}, {half_size - 1}, {1});
- }
-
- auto bits =
- builder->Reshape(builder->ConcatInDim(outputs, 0), shape.dim_sizes());
-
- // Form 22 random mantissa bits, with a leading 1 bit. The leading 1 bit
- // forces the random bits into the mantissa.
- constexpr int kFloatBits = 32;
- constexpr int kMantissaBits = 23;
- bits = builder->Or(
- builder->ShiftRightLogical(
- bits, builder->ConstantR0<int32>(kFloatBits - kMantissaBits)),
- builder->ConstantR0<int32>(bit_cast<int32>(1.0f)));
- auto floats = builder->BitcastConvertType(bits, xla::F32);
-
- // We have a floating point number in the range [1.0, 2.0).
- // Subtract 1.0f to shift to the range [0.0, 1.0)
- floats = builder->Sub(floats, builder->ConstantR0<float>(1.0f));
- // Multiply and add to shift to the range [minval, maxval).
- floats = builder->Mul(floats, builder->ConstantR0<float>(maxval - minval));
- floats = builder->Add(floats, builder->ConstantR0<float>(minval));
- return floats;
-}
-
-} // namespace
-
class StatelessRandomUniformOp : public XlaOpKernel {
public:
explicit StatelessRandomUniformOp(OpKernelConstruction* ctx)
@@ -174,7 +50,17 @@ class StatelessRandomUniformOp : public XlaOpKernel {
errors::InvalidArgument("seed must have shape [2], not ",
seed_shape.DebugString()));
xla::XlaOp seed = ctx->Input(1);
- ctx->SetOutput(0, RandomUniform(builder, seed, shape, 0.0, 1.0));
+
+ xla::Shape xla_shape;
+ OP_REQUIRES_OK(ctx, TensorShapeToXLAShape(DT_FLOAT, shape, &xla_shape));
+
+ auto seed0 = xla::Reshape(xla::Slice(seed, {0}, {1}, {1}), {});
+ auto seed1 = xla::Reshape(xla::Slice(seed, {1}, {2}, {1}), {});
+
+ auto uniform = xla::StatelessRngUniform(
+ {seed0, seed1}, xla_shape, xla::ConstantR0<float>(builder, 0.0),
+ xla::ConstantR0<float>(builder, 1.0));
+ ctx->SetOutput(0, uniform);
}
private:
@@ -203,14 +89,20 @@ class StatelessRandomNormalOp : public XlaOpKernel {
seed_shape.DebugString()));
xla::XlaOp seed = ctx->Input(1);
xla::XlaBuilder* builder = ctx->builder();
- auto uniform =
- RandomUniform(builder, seed, shape, std::nextafter(-1.0f, 0.0f), 1.0);
+ xla::Shape xla_shape;
+ OP_REQUIRES_OK(ctx, TensorShapeToXLAShape(DT_FLOAT, shape, &xla_shape));
+
+ auto seed0 = xla::Reshape(xla::Slice(seed, {0}, {1}, {1}), {});
+ auto seed1 = xla::Reshape(xla::Slice(seed, {1}, {2}, {1}), {});
+
+ auto uniform = xla::StatelessRngUniform(
+ {seed0, seed1}, xla_shape,
+ xla::ConstantR0<float>(builder, std::nextafter(-1.0f, 0.0f)),
+ xla::ConstantR0<float>(builder, 1.0));
// Convert uniform distribution to normal distribution by computing
// sqrt(2) * erfinv(x)
- auto erfinv_or_status = ErfInv(uniform);
- OP_REQUIRES_OK(ctx, erfinv_or_status.status());
- auto normal = builder->Mul(builder->ConstantR0<float>(std::sqrt(2.0)),
- erfinv_or_status.ValueOrDie());
+ auto normal =
+ xla::ScalarLike(uniform, std::sqrt(2.0)) * xla::ErfInv(uniform);
ctx->SetOutput(0, normal);
}
@@ -231,8 +123,6 @@ class StatelessTruncatedNormalOp : public XlaOpKernel {
: XlaOpKernel(ctx) {}
void Compile(XlaOpKernelContext* ctx) override {
- const DataType dtype = output_type(0);
-
TensorShape shape;
OP_REQUIRES_OK(ctx, ctx->ConstantInputAsShape(0, &shape));
@@ -241,13 +131,19 @@ class StatelessTruncatedNormalOp : public XlaOpKernel {
errors::InvalidArgument("seed must have shape [2], not ",
seed_shape.DebugString()));
xla::XlaOp seed = ctx->Input(1);
- xla::XlaBuilder* b = ctx->builder();
+ xla::XlaBuilder* builder = ctx->builder();
+
+ auto seed0 = xla::Reshape(xla::Slice(seed, {0}, {1}, {1}), {});
+ auto seed1 = xla::Reshape(xla::Slice(seed, {1}, {2}, {1}), {});
- auto uniform =
- RandomUniform(b, seed, shape, std::numeric_limits<float>::min(), 1.0);
- auto truncated_normal_or_status = TruncatedNormal(dtype, uniform, b);
- OP_REQUIRES_OK(ctx, truncated_normal_or_status.status());
- ctx->SetOutput(0, truncated_normal_or_status.ValueOrDie());
+ xla::Shape xla_shape;
+ OP_REQUIRES_OK(ctx, TensorShapeToXLAShape(DT_FLOAT, shape, &xla_shape));
+ auto uniform = xla::StatelessRngUniform(
+ {seed0, seed1}, xla_shape,
+ xla::ConstantR0<float>(builder, std::numeric_limits<float>::min()),
+ xla::ConstantR0<float>(builder, 1.0));
+
+ ctx->SetOutput(0, TruncatedNormal(uniform));
}
private:
@@ -260,4 +156,5 @@ REGISTER_XLA_OP(Name("StatelessTruncatedNormal")
.TypeConstraint("Tseed", DT_INT32),
StatelessTruncatedNormalOp);
+} // namespace
} // namespace tensorflow
diff --git a/tensorflow/compiler/tf2xla/kernels/strided_slice_op.cc b/tensorflow/compiler/tf2xla/kernels/strided_slice_op.cc
index 55254c746e..c2165ccd86 100644
--- a/tensorflow/compiler/tf2xla/kernels/strided_slice_op.cc
+++ b/tensorflow/compiler/tf2xla/kernels/strided_slice_op.cc
@@ -19,6 +19,7 @@ limitations under the License.
#include "tensorflow/compiler/tf2xla/xla_helpers.h"
#include "tensorflow/compiler/tf2xla/xla_op_kernel.h"
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
+#include "tensorflow/compiler/xla/client/xla_client/xla_builder.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/register_types.h"
#include "tensorflow/core/framework/tensor.h"
@@ -92,12 +93,12 @@ class StridedSliceOp : public XlaOpKernel {
xla::XlaOp slice = ctx->Input(0);
if (!dimensions_to_reverse.empty()) {
- slice = ctx->builder()->Rev(slice, dimensions_to_reverse);
+ slice = xla::Rev(slice, dimensions_to_reverse);
}
- slice = ctx->builder()->Slice(slice, slice_begin, slice_end, slice_strides);
+ slice = xla::Slice(slice, slice_begin, slice_end, slice_strides);
- slice = ctx->builder()->Reshape(slice, final_shape.dim_sizes());
+ slice = xla::Reshape(slice, final_shape.dim_sizes());
ctx->SetOutput(0, slice);
}
@@ -171,7 +172,7 @@ class StridedSliceGradOp : public XlaOpKernel {
xla::XlaOp grad = ctx->Input(4);
// Undo any new/shrink axes.
- grad = ctx->builder()->Reshape(grad, processing_shape.dim_sizes());
+ grad = xla::Reshape(grad, processing_shape.dim_sizes());
// Pad the input gradients.
gtl::InlinedVector<int64, 4> dimensions_to_reverse;
@@ -204,9 +205,9 @@ class StridedSliceGradOp : public XlaOpKernel {
}
}
if (!dimensions_to_reverse.empty()) {
- grad = ctx->builder()->Rev(grad, dimensions_to_reverse);
+ grad = xla::Rev(grad, dimensions_to_reverse);
}
- grad = ctx->builder()->Pad(grad, zero, padding_config);
+ grad = xla::Pad(grad, zero, padding_config);
ctx->SetOutput(0, grad);
}
@@ -306,17 +307,17 @@ class StridedSliceAssignOp : public XlaOpKernel {
}
if (!dimensions_to_reverse.empty()) {
- rhs = ctx->builder()->Rev(rhs, dimensions_to_reverse);
+ rhs = xla::Rev(rhs, dimensions_to_reverse);
}
- rhs = ctx->builder()->Reshape(rhs, slice_dims);
+ rhs = xla::Reshape(rhs, slice_dims);
if (lhs_shape.dims() == 0) {
// TODO(b/38323843): DynamicUpdateSlice crashes on rank 0 inputs. Fix
// and remove this workaround.
lhs = rhs;
} else {
- lhs = ctx->builder()->DynamicUpdateSlice(
- lhs, rhs, ctx->builder()->ConstantR1<int64>(slice_begin));
+ lhs = xla::DynamicUpdateSlice(
+ lhs, rhs, xla::ConstantR1<int64>(ctx->builder(), slice_begin));
}
OP_REQUIRES_OK(ctx, ctx->AssignVariable(0, dtype_, lhs));
diff --git a/tensorflow/compiler/tf2xla/kernels/tensor_array_ops.cc b/tensorflow/compiler/tf2xla/kernels/tensor_array_ops.cc
index 9adee78a1f..26326f18b8 100644
--- a/tensorflow/compiler/tf2xla/kernels/tensor_array_ops.cc
+++ b/tensorflow/compiler/tf2xla/kernels/tensor_array_ops.cc
@@ -25,7 +25,8 @@ limitations under the License.
#include "tensorflow/compiler/tf2xla/xla_op_kernel.h"
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
#include "tensorflow/compiler/tf2xla/xla_resource.h"
-#include "tensorflow/compiler/xla/literal_util.h"
+#include "tensorflow/compiler/xla/client/xla_client/xla_builder.h"
+#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/partial_tensor_shape.h"
#include "tensorflow/core/framework/register_types.h"
@@ -123,10 +124,9 @@ xla::XlaOp DynamicAddSlice(xla::XlaBuilder* builder, const xla::XlaOp& operand,
const xla::XlaOp& update,
const gtl::ArraySlice<int64>& update_dims,
const xla::XlaOp& start_indices) {
- xla::XlaOp current =
- builder->DynamicSlice(operand, start_indices, update_dims);
- xla::XlaOp sum = builder->Add(current, update);
- return builder->DynamicUpdateSlice(operand, sum, start_indices);
+ xla::XlaOp current = xla::DynamicSlice(operand, start_indices, update_dims);
+ xla::XlaOp sum = xla::Add(current, update);
+ return xla::DynamicUpdateSlice(operand, sum, start_indices);
}
class TensorArrayOp : public XlaOpKernel {
@@ -162,7 +162,7 @@ class TensorArrayOp : public XlaOpKernel {
ta_shape.AddDim(size);
ta_shape.AppendShape(shape);
xla::XlaOp zero = XlaHelpers::Zero(b, dtype_);
- value = b->Broadcast(zero, ta_shape.dim_sizes());
+ value = xla::Broadcast(zero, ta_shape.dim_sizes());
}
XlaContext& xc = XlaContext::Get(ctx);
@@ -215,12 +215,12 @@ class TensorArrayWriteOp : public XlaOpKernel {
// start_indices of the DynamicUpdateSlice are [index, 0, 0, ..., 0].
auto start_indices =
- b->Pad(b->Reshape(index, {1}), b->ConstantR0<int32>(0),
- xla::MakeEdgePaddingConfig({{0, elem_shape.dims()}}));
+ xla::Pad(xla::Reshape(index, {1}), xla::ConstantR0<int32>(b, 0),
+ xla::MakeEdgePaddingConfig({{0, elem_shape.dims()}}));
TensorShape slice_shape = elem_shape;
slice_shape.InsertDim(0, 1LL);
- auto update = b->Reshape(value, slice_shape.dim_sizes());
+ auto update = xla::Reshape(value, slice_shape.dim_sizes());
xla::XlaOp written =
DynamicAddSlice(b, ta, update, slice_shape.dim_sizes(), start_indices);
@@ -259,17 +259,17 @@ class TensorArrayReadOp : public XlaOpKernel {
// start_indices of the DynamicSlice are [index, 0, 0, ..., 0].
auto start_indices =
- b->Pad(b->Reshape(index, {1}), b->ConstantR0<int32>(0),
- xla::MakeEdgePaddingConfig({{0, ta_shape.dims() - 1}}));
+ xla::Pad(xla::Reshape(index, {1}), xla::ConstantR0<int32>(b, 0),
+ xla::MakeEdgePaddingConfig({{0, ta_shape.dims() - 1}}));
auto slice_shape = ta_shape.dim_sizes();
slice_shape[0] = 1LL;
- xla::XlaOp read = b->DynamicSlice(ta, start_indices, slice_shape);
+ xla::XlaOp read = xla::DynamicSlice(ta, start_indices, slice_shape);
// Remove the leading '1' dimension.
std::vector<int64> value_shape(slice_shape.begin() + 1, slice_shape.end());
- ctx->SetOutput(0, b->Reshape(read, value_shape));
+ ctx->SetOutput(0, xla::Reshape(read, value_shape));
}
private:
@@ -326,7 +326,7 @@ class TensorArrayGatherOp : public XlaOpKernel {
for (auto i = 1; i < ta_shape.dims(); i++) {
end[i] = ta_shape.dim_size(i);
}
- ctx->SetOutput(0, b->Slice(ta, begin, end, strides));
+ ctx->SetOutput(0, xla::Slice(ta, begin, end, strides));
return;
}
}
@@ -391,7 +391,7 @@ class TensorArrayScatterOp : public XlaOpKernel {
}
if (scatter_all_elements_in_order) {
- ta = b->Add(ta, value);
+ ta = xla::Add(ta, value);
} else {
auto slice_dims = value_shape.dim_sizes();
slice_dims[0] = 1LL;
@@ -407,13 +407,13 @@ class TensorArrayScatterOp : public XlaOpKernel {
// Slice out part of the value.
value_starts[0] = i;
value_ends[0] = i + 1;
- auto slice = b->Slice(value, value_starts, value_ends, value_strides);
+ auto slice = xla::Slice(value, value_starts, value_ends, value_strides);
// start_indices of the DynamicUpdateSlice are [index, 0, 0, ..., 0].
- auto index = b->Slice(indices, {i}, {i + 1}, {1});
+ auto index = xla::Slice(indices, {i}, {i + 1}, {1});
auto start_indices =
- b->Pad(b->Reshape(index, {1}), b->ConstantR0<int32>(0),
- xla::MakeEdgePaddingConfig({{0, elem_shape.dims()}}));
+ xla::Pad(xla::Reshape(index, {1}), xla::ConstantR0<int32>(b, 0),
+ xla::MakeEdgePaddingConfig({{0, elem_shape.dims()}}));
ta = DynamicAddSlice(b, ta, slice, slice_dims, start_indices);
}
}
@@ -452,7 +452,7 @@ class TensorArrayConcatOp : public XlaOpKernel {
auto ta_dims = ta_shape.dim_sizes();
std::vector<int64> shape(ta_dims.begin() + 1, ta_dims.end());
shape[0] *= ta_shape.dim_size(0);
- ctx->SetOutput(0, b->Reshape(ta, shape));
+ ctx->SetOutput(0, xla::Reshape(ta, shape));
Tensor lengths(DT_INT64, {ta_dims[0]});
auto lengths_vec = lengths.vec<int64>();
@@ -522,8 +522,8 @@ class TensorArraySplitOp : public XlaOpKernel {
value_shape.DebugString(), " vs. ",
ta_shape.DebugString()));
- OP_REQUIRES_OK(ctx, resource->SetValue(b->Add(
- ta, b->Reshape(value, ta_shape.dim_sizes()))));
+ OP_REQUIRES_OK(ctx, resource->SetValue(xla::Add(
+ ta, xla::Reshape(value, ta_shape.dim_sizes()))));
ctx->SetOutput(0, flow);
}
diff --git a/tensorflow/compiler/tf2xla/kernels/tile_ops.cc b/tensorflow/compiler/tf2xla/kernels/tile_ops.cc
index e91075196b..c9e5694262 100644
--- a/tensorflow/compiler/tf2xla/kernels/tile_ops.cc
+++ b/tensorflow/compiler/tf2xla/kernels/tile_ops.cc
@@ -20,6 +20,7 @@ limitations under the License.
#include "tensorflow/compiler/tf2xla/xla_helpers.h"
#include "tensorflow/compiler/tf2xla/xla_op_kernel.h"
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
+#include "tensorflow/compiler/xla/client/xla_client/xla_builder.h"
#include "tensorflow/core/framework/numeric_op.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor.h"
@@ -93,9 +94,9 @@ class TileOp : public XlaOpKernel {
if (one_dimension_is_broadcasted_without_multiple) {
// Create a constant Zero the size of the output shape to leverage binary
// operation broadcast semantics.
- auto broadcasted_zero = ctx->builder()->Broadcast(
+ auto broadcasted_zero = xla::Broadcast(
XlaHelpers::Zero(ctx->builder(), ctx->input_type(0)), output_shape);
- ctx->SetOutput(0, ctx->builder()->Add(broadcasted_zero, input));
+ ctx->SetOutput(0, xla::Add(broadcasted_zero, input));
return;
}
@@ -103,7 +104,7 @@ class TileOp : public XlaOpKernel {
// dimension. This prepends the broadcasted dimensions, so an
// input of shape [2,3,1] broadcast with multiples [5,4,3] will
// end up with shape [5,4,3,2,3,1].
- auto broadcasted = ctx->builder()->Broadcast(input, multiples_array);
+ auto broadcasted = xla::Broadcast(input, multiples_array);
// Now flatten and reshape. The broadcasted dimensions are
// paired with the original dimensions so in the above example
// we flatten [0,3,1,4,2,5] then reshape to [10,12,3].
@@ -112,8 +113,7 @@ class TileOp : public XlaOpKernel {
flattened.push_back(i);
flattened.push_back(i + output_shape.size());
}
- xla::XlaOp output =
- ctx->builder()->Reshape(broadcasted, flattened, output_shape);
+ xla::XlaOp output = xla::Reshape(broadcasted, flattened, output_shape);
ctx->SetOutput(0, output);
}
diff --git a/tensorflow/compiler/tf2xla/kernels/topk_op.cc b/tensorflow/compiler/tf2xla/kernels/topk_op.cc
index cbe3c8aaff..1ddcb08c8e 100644
--- a/tensorflow/compiler/tf2xla/kernels/topk_op.cc
+++ b/tensorflow/compiler/tf2xla/kernels/topk_op.cc
@@ -16,8 +16,9 @@ limitations under the License.
#include "tensorflow/compiler/tf2xla/xla_helpers.h"
#include "tensorflow/compiler/tf2xla/xla_op_kernel.h"
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
+#include "tensorflow/compiler/xla/client/lib/numeric.h"
#include "tensorflow/compiler/xla/client/xla_client/xla_builder.h"
-#include "tensorflow/compiler/xla/literal_util.h"
+#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/core/framework/kernel_def_builder.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/kernels/no_op.h"
@@ -51,108 +52,33 @@ class TopKOp : public XlaOpKernel {
errors::Unimplemented("TopK is implemented for 1-D inputs, got shape ",
input_shape.DebugString()));
- const int64 n = input_shape.dim_size(0);
- OP_REQUIRES(context, n < (1 << 16),
- errors::Unimplemented(
- "TopK is implemented for sizes up to 2**16, got shape ",
- input_shape.DebugString()));
-
xla::XlaBuilder* const b = context->builder();
if (input_shape.dim_size(0) < k) {
k = input_shape.dim_size(0);
}
- const xla::XlaOp input_bf16 = context->Input(0);
- xla::XlaOp iota_s32;
- OP_REQUIRES_OK(context, XlaHelpers::Iota(b, DT_INT32, n, &iota_s32));
-
- // TODO(b/73891930): add a key-value sort to HLO, rather than using
- // bit-packing tricks here.
-
- xla::XlaOp zero = b->ConstantR0<int32>(0);
-
- // max can either be 0x7FFFFFFF or 0x8000000. Neither choice is totally
- // ideal. The implications of the choice are:
- //
- // 0x7FFFFFFF
- // 1. +0.0 > -0.0
- // 2. The elements of the inputs and outputs are bitwise identical.
- // 3. The sort is unstable since a later +0.0 will appear before an earlier
- // -0.0.
- //
- // 0x8000000
- // 1. +0.0 == -0.0
- // 2. All -0.0 in the input are replaced with +0.0 in the output.
- // 3. The sort is stable.
- xla::XlaOp max = b->ConstantR0<int32>(0x80000000);
- xla::XlaOp index_mask = b->ConstantR0<int32>(0x0000FFFF);
- xla::XlaOp value_mask = b->ConstantR0<int32>(0xFFFF0000);
-
- // Convert to from bf16 to f32. The lower 16-bits are zero due to the
- // definition of bf16.
- xla::XlaOp input_f32 = b->ConvertElementType(input_bf16, xla::F32);
-
- // Negate the input to reverse sort it. The lower 16-bits are zero, because
- // negating a float is just inverting the high-bit.
- xla::XlaOp negative_input_f32 = b->Neg(input_f32);
-
- // Convert to a sign magnitude integer. The lower 16-bits are zero, since
- // bitcast convert doesn't change any bits.
- xla::XlaOp negative_input_sm32 =
- b->BitcastConvertType(negative_input_f32, xla::S32);
-
- // Convert from sign magnitude integer to two's complement integer. The
- // lower 16-bits are zero on both sides of the select. On the false side,
- // the value is unchanged, and on the true side, the lower 16-bits of max
- // are all zero, so the lower 16-bits of the result of the subtraction will
- // also be zero.
- xla::XlaOp negative_input_s32 =
- b->Select(b->Lt(negative_input_sm32, zero),
- b->Sub(max, negative_input_sm32), negative_input_sm32);
-
- // In order for the Or with iota_s32 to to work properly, the lower 16-bits
- // of negative_input_32 must be zero.
-
- // Pack elements as:
- // * upper 16 bits are the value
- // * lower 16 bits are the index.
- xla::XlaOp packed_s32 = b->Or(negative_input_s32, iota_s32);
-
- // TODO(phawkins): use a more efficient algorithm that does not require a
- // full sort.
- xla::XlaOp sorted_s32 = b->Slice(b->Sort(packed_s32),
- /*start_indices=*/{0},
- /*limit_indices=*/{k},
- /*strides=*/{1});
-
- // Unpack the value/index.
- xla::XlaOp indices_s32 = b->And(sorted_s32, index_mask);
- xla::XlaOp negative_values_s32 = b->And(sorted_s32, value_mask);
-
- // Convert from two's complement integer to sign magnitude integer.
- xla::XlaOp negative_values_sm32 =
- b->Select(b->Lt(negative_values_s32, zero),
- b->Sub(max, negative_values_s32), negative_values_s32);
-
- xla::XlaOp negative_values_f32 =
- b->BitcastConvertType(negative_values_sm32, xla::F32);
-
- // Negate the values to get back the original inputs.
- xla::XlaOp values_f32 = b->Neg(negative_values_f32);
-
- // Convert from f32 to bf16.
- xla::XlaOp values_bf16 = b->ConvertElementType(values_f32, xla::BF16);
-
- context->SetOutput(0, values_bf16);
- context->SetOutput(1, indices_s32);
+ const xla::XlaOp input = context->Input(0);
+ xla::XlaOp iota_s32 = xla::Iota(b, xla::S32, input_shape.dim_size(0));
+ xla::XlaOp sort_result = xla::Sort(xla::Neg(input), iota_s32);
+ xla::XlaOp values =
+ xla::Neg(xla::Slice(xla::GetTupleElement(sort_result, 0),
+ /*start_indices=*/{0},
+ /*limit_indices=*/{k},
+ /*strides=*/{1}));
+ xla::XlaOp indices = xla::Slice(xla::GetTupleElement(sort_result, 1),
+ /*start_indices=*/{0},
+ /*limit_indices=*/{k},
+ /*strides=*/{1});
+ context->SetOutput(0, values);
+ context->SetOutput(1, indices);
}
private:
bool sorted_;
};
-REGISTER_XLA_OP(
- Name("TopKV2").CompileTimeConstInput("k").TypeConstraint("T", DT_BFLOAT16),
- TopKOp);
+REGISTER_XLA_OP(Name("TopKV2").CompileTimeConstInput("k").TypeConstraint(
+ "T", {DT_UINT32, DT_INT32, DT_FLOAT, DT_BFLOAT16}),
+ TopKOp);
} // namespace
} // namespace tensorflow
diff --git a/tensorflow/compiler/tf2xla/kernels/training_ops.cc b/tensorflow/compiler/tf2xla/kernels/training_ops.cc
index 34caefa050..98df730249 100644
--- a/tensorflow/compiler/tf2xla/kernels/training_ops.cc
+++ b/tensorflow/compiler/tf2xla/kernels/training_ops.cc
@@ -16,8 +16,10 @@ limitations under the License.
#include "tensorflow/compiler/tf2xla/kernels/cwise_ops.h"
#include "tensorflow/compiler/tf2xla/xla_helpers.h"
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
+#include "tensorflow/compiler/xla/client/lib/constants.h"
+#include "tensorflow/compiler/xla/client/lib/math.h"
#include "tensorflow/compiler/xla/client/xla_client/xla_builder.h"
-#include "tensorflow/compiler/xla/literal_util.h"
+#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/core/framework/kernel_def_builder.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/kernels/no_op.h"
@@ -31,7 +33,6 @@ class ResourceApplyGradientDescent : public XlaOpKernel {
: XlaOpKernel(ctx) {}
void Compile(XlaOpKernelContext* ctx) override {
xla::XlaOp handle;
- xla::XlaBuilder* b = ctx->builder();
DataType type = ctx->input_type(1);
TensorShape var_shape;
OP_REQUIRES_OK(ctx, ctx->ReadVariableInput(0, type, &var_shape, &handle));
@@ -48,7 +49,7 @@ class ResourceApplyGradientDescent : public XlaOpKernel {
var_shape.DebugString(), " vs ",
delta_shape.DebugString()));
- handle = b->Sub(handle, b->Mul(ctx->Input(1), ctx->Input(2)));
+ handle = handle - ctx->Input(1) * ctx->Input(2);
OP_REQUIRES_OK(ctx, ctx->AssignVariable(0, type, handle));
}
};
@@ -56,6 +57,64 @@ REGISTER_XLA_OP(
Name("ResourceApplyGradientDescent").TypeConstraint("T", kFloatTypes),
ResourceApplyGradientDescent);
+xla::XlaOp ProximalGradientDescentUpdate(xla::XlaOp var, xla::XlaOp lr,
+ xla::XlaOp l1, xla::XlaOp l2,
+ xla::XlaOp grad) {
+ xla::XlaOp one = xla::ScalarLike(lr, 1.0);
+ xla::XlaOp zero = xla::ScalarLike(lr, 0.0);
+ xla::XlaOp prox_var = var - grad * lr;
+ xla::XlaOp l1_gt_zero = xla::Sign(prox_var) *
+ xla::Max(xla::Abs(prox_var) - lr * l1, zero) /
+ (one + lr * l2);
+ xla::XlaOp l1_le_zero = prox_var / (one + lr * l2);
+ return xla::Select(xla::Gt(l1, zero), l1_gt_zero, l1_le_zero);
+}
+
+class ResourceApplyProximalGradientDescent : public XlaOpKernel {
+ public:
+ explicit ResourceApplyProximalGradientDescent(OpKernelConstruction* ctx)
+ : XlaOpKernel(ctx) {
+ OP_REQUIRES_OK(ctx, ctx->GetAttr("T", &dtype_));
+ }
+
+ void Compile(XlaOpKernelContext* ctx) override {
+ xla::XlaOp var;
+ TensorShape var_shape;
+ OP_REQUIRES_OK(ctx, ctx->ReadVariableInput(0, dtype_, &var_shape, &var));
+
+ TensorShape alpha_shape = ctx->InputShape(1);
+ OP_REQUIRES(ctx, TensorShapeUtils::IsScalar(alpha_shape),
+ errors::InvalidArgument("alpha is not a scalar: ",
+ alpha_shape.DebugString()));
+ TensorShape l1_shape = ctx->InputShape(2);
+ OP_REQUIRES(ctx, TensorShapeUtils::IsScalar(alpha_shape),
+ errors::InvalidArgument("l1 is not a scalar: ",
+ l1_shape.DebugString()));
+ TensorShape l2_shape = ctx->InputShape(3);
+ OP_REQUIRES(ctx, TensorShapeUtils::IsScalar(alpha_shape),
+ errors::InvalidArgument("l2 is not a scalar: ",
+ l2_shape.DebugString()));
+ TensorShape delta_shape = ctx->InputShape(4);
+ OP_REQUIRES(
+ ctx, var_shape.IsSameSize(delta_shape),
+ errors::InvalidArgument("var and delta do not have the same shape: ",
+ var_shape.DebugString(), " vs ",
+ delta_shape.DebugString()));
+ xla::XlaOp alpha = ctx->Input(1);
+ xla::XlaOp l1 = ctx->Input(2);
+ xla::XlaOp l2 = ctx->Input(3);
+ xla::XlaOp delta = ctx->Input(4);
+ var = ProximalGradientDescentUpdate(var, alpha, l1, l2, delta);
+ OP_REQUIRES_OK(ctx, ctx->AssignVariable(0, dtype_, var));
+ }
+
+ private:
+ DataType dtype_;
+};
+REGISTER_XLA_OP(Name("ResourceApplyProximalGradientDescent")
+ .TypeConstraint("T", kFloatTypes),
+ ResourceApplyProximalGradientDescent);
+
class ResourceApplyMomentum : public XlaOpKernel {
public:
explicit ResourceApplyMomentum(OpKernelConstruction* ctx) : XlaOpKernel(ctx) {
@@ -63,8 +122,6 @@ class ResourceApplyMomentum : public XlaOpKernel {
}
void Compile(XlaOpKernelContext* ctx) override {
- xla::XlaBuilder* b = ctx->builder();
-
DataType type = ctx->input_type(2);
TensorShape var_shape, accum_shape;
@@ -97,14 +154,13 @@ class ResourceApplyMomentum : public XlaOpKernel {
xla::XlaOp grad = ctx->Input(3);
xla::XlaOp momentum = ctx->Input(4);
- accum = b->Add(b->Mul(accum, momentum), grad);
+ accum = accum * momentum + grad;
if (use_nesterov_) {
// See https://github.com/tensorflow/tensorflow/pull/2798 for an
// explanation of the reparameterization used here.
- var = b->Sub(
- var, b->Add(b->Mul(grad, lr), b->Mul(b->Mul(accum, momentum), lr)));
+ var = var - (grad * lr + accum * momentum * lr);
} else {
- var = b->Sub(var, b->Mul(accum, lr));
+ var = var - accum * lr;
}
OP_REQUIRES_OK(ctx, ctx->AssignVariable(0, type, var));
OP_REQUIRES_OK(ctx, ctx->AssignVariable(1, type, accum));
@@ -121,8 +177,6 @@ class ResourceApplyAdagrad : public XlaOpKernel {
explicit ResourceApplyAdagrad(OpKernelConstruction* ctx) : XlaOpKernel(ctx) {}
void Compile(XlaOpKernelContext* ctx) override {
- xla::XlaBuilder* b = ctx->builder();
-
DataType type = ctx->input_type(2);
TensorShape var_shape, accum_shape;
@@ -149,10 +203,8 @@ class ResourceApplyAdagrad : public XlaOpKernel {
xla::XlaOp lr = ctx->Input(2);
xla::XlaOp grad = ctx->Input(3);
- accum = b->Add(accum, b->Pow(grad, XlaHelpers::FloatLiteral(b, type, 2.0)));
- var = b->Sub(
- var, b->Mul(b->Mul(grad, lr),
- b->Pow(accum, XlaHelpers::FloatLiteral(b, type, -0.5))));
+ accum = accum + xla::Square(grad);
+ var = var - grad * lr * xla::Rsqrt(accum);
OP_REQUIRES_OK(ctx, ctx->AssignVariable(0, type, var));
OP_REQUIRES_OK(ctx, ctx->AssignVariable(1, type, accum));
}
@@ -160,6 +212,139 @@ class ResourceApplyAdagrad : public XlaOpKernel {
REGISTER_XLA_OP(Name("ResourceApplyAdagrad").TypeConstraint("T", kFloatTypes),
ResourceApplyAdagrad);
+class ResourceApplyProximalAdagrad : public XlaOpKernel {
+ public:
+ explicit ResourceApplyProximalAdagrad(OpKernelConstruction* ctx)
+ : XlaOpKernel(ctx) {
+ OP_REQUIRES_OK(ctx, ctx->GetAttr("T", &dtype_));
+ }
+
+ void Compile(XlaOpKernelContext* ctx) override {
+ TensorShape var_shape, accum_shape;
+ xla::XlaOp var, accum;
+ OP_REQUIRES_OK(ctx, ctx->ReadVariableInput(0, dtype_, &var_shape, &var));
+ OP_REQUIRES_OK(ctx,
+ ctx->ReadVariableInput(1, dtype_, &accum_shape, &accum));
+
+ OP_REQUIRES(ctx, var_shape.IsSameSize(accum_shape),
+ errors::InvalidArgument(
+ "var and accum do not have the same shape",
+ var_shape.DebugString(), " ", accum_shape.DebugString()));
+
+ TensorShape lr_shape = ctx->InputShape(2);
+ OP_REQUIRES(ctx, TensorShapeUtils::IsScalar(lr_shape),
+ errors::InvalidArgument("lr is not a scalar: ",
+ lr_shape.DebugString()));
+ TensorShape l1_shape = ctx->InputShape(3);
+ OP_REQUIRES(ctx, TensorShapeUtils::IsScalar(l1_shape),
+ errors::InvalidArgument("l1 is not a scalar: ",
+ l1_shape.DebugString()));
+ TensorShape l2_shape = ctx->InputShape(4);
+ OP_REQUIRES(ctx, TensorShapeUtils::IsScalar(l2_shape),
+ errors::InvalidArgument("l2 is not a scalar: ",
+ l2_shape.DebugString()));
+ TensorShape grad_shape = ctx->InputShape(5);
+ OP_REQUIRES(ctx, var_shape.IsSameSize(grad_shape),
+ errors::InvalidArgument(
+ "var and grad do not have the same shape: ",
+ var_shape.DebugString(), " vs ", grad_shape.DebugString()));
+
+ xla::XlaOp lr = ctx->Input(2);
+ xla::XlaOp l1 = ctx->Input(3);
+ xla::XlaOp l2 = ctx->Input(4);
+ xla::XlaOp grad = ctx->Input(5);
+ accum = accum + xla::Square(grad);
+ // Adagrad learning rate.
+ xla::XlaOp adagrad_lr = lr * xla::Rsqrt(accum);
+ var = ProximalGradientDescentUpdate(var, adagrad_lr, l1, l2, grad);
+ OP_REQUIRES_OK(ctx, ctx->AssignVariable(0, dtype_, var));
+ OP_REQUIRES_OK(ctx, ctx->AssignVariable(1, dtype_, accum));
+ }
+
+ private:
+ DataType dtype_;
+};
+REGISTER_XLA_OP(
+ Name("ResourceApplyProximalAdagrad").TypeConstraint("T", kFloatTypes),
+ ResourceApplyProximalAdagrad);
+
+class ResourceApplyAdagradDA : public XlaOpKernel {
+ public:
+ explicit ResourceApplyAdagradDA(OpKernelConstruction* ctx)
+ : XlaOpKernel(ctx) {
+ OP_REQUIRES_OK(ctx, ctx->GetAttr("T", &dtype_));
+ }
+
+ void Compile(XlaOpKernelContext* ctx) override {
+ TensorShape var_shape, accum_shape, squared_accum_shape;
+ xla::XlaOp var, accum, squared_accum;
+ OP_REQUIRES_OK(ctx, ctx->ReadVariableInput(0, dtype_, &var_shape, &var));
+ OP_REQUIRES_OK(ctx,
+ ctx->ReadVariableInput(1, dtype_, &accum_shape, &accum));
+ OP_REQUIRES_OK(ctx, ctx->ReadVariableInput(2, dtype_, &squared_accum_shape,
+ &squared_accum));
+ OP_REQUIRES(ctx, var_shape.IsSameSize(accum_shape),
+ errors::InvalidArgument(
+ "var and accum do not have the same shape",
+ var_shape.DebugString(), " ", accum_shape.DebugString()));
+ OP_REQUIRES(
+ ctx, var_shape.IsSameSize(squared_accum_shape),
+ errors::InvalidArgument(
+ "var and squared accum do not have the same shape",
+ var_shape.DebugString(), " ", squared_accum_shape.DebugString()));
+
+ TensorShape grad_shape = ctx->InputShape(3);
+ TensorShape lr_shape = ctx->InputShape(4);
+ TensorShape l1_shape = ctx->InputShape(5);
+ TensorShape l2_shape = ctx->InputShape(6);
+ TensorShape global_step_shape = ctx->InputShape(7);
+
+ OP_REQUIRES(ctx, var_shape.IsSameSize(grad_shape),
+ errors::InvalidArgument(
+ "var and grad do not have the same shape",
+ var_shape.DebugString(), " ", grad_shape.DebugString()));
+ OP_REQUIRES(ctx, TensorShapeUtils::IsScalar(lr_shape),
+ errors::InvalidArgument("lr is not a scalar: ",
+ lr_shape.DebugString()));
+ OP_REQUIRES(ctx, TensorShapeUtils::IsScalar(l1_shape),
+ errors::InvalidArgument("l1 is not a scalar: ",
+ l1_shape.DebugString()));
+ OP_REQUIRES(ctx, TensorShapeUtils::IsScalar(l2_shape),
+ errors::InvalidArgument("l2 is not a scalar: ",
+ l2_shape.DebugString()));
+ OP_REQUIRES(ctx, TensorShapeUtils::IsScalar(global_step_shape),
+ errors::InvalidArgument("global step is not a scalar: ",
+ global_step_shape.DebugString()));
+
+ xla::XlaOp grad = ctx->Input(3);
+ xla::XlaOp lr = ctx->Input(4);
+ xla::XlaOp l1 = ctx->Input(5);
+ xla::XlaOp l2 = ctx->Input(6);
+ xla::XlaBuilder* const b = ctx->builder();
+ xla::XlaOp global_step =
+ XlaHelpers::ConvertElementType(b, ctx->Input(7), dtype_);
+
+ accum = accum + grad;
+ squared_accum = squared_accum + xla::Square(grad);
+ xla::XlaOp zero = xla::ScalarLike(lr, 0.0);
+ xla::XlaOp denominator = global_step * lr * l2 + xla::Sqrt(squared_accum);
+ xla::XlaOp l1_le_zero = -lr * accum / denominator;
+ xla::XlaOp l1_gt_zero = -lr * xla::Sign(accum) *
+ xla::Max(xla::Abs(accum) - global_step * l1, zero) /
+ denominator;
+
+ var = xla::Select(xla::Gt(l1, zero), l1_gt_zero, l1_le_zero);
+ OP_REQUIRES_OK(ctx, ctx->AssignVariable(0, dtype_, var));
+ OP_REQUIRES_OK(ctx, ctx->AssignVariable(1, dtype_, accum));
+ OP_REQUIRES_OK(ctx, ctx->AssignVariable(2, dtype_, squared_accum));
+ }
+
+ private:
+ DataType dtype_;
+};
+REGISTER_XLA_OP(Name("ResourceApplyAdagradDA").TypeConstraint("T", kFloatTypes),
+ ResourceApplyAdagradDA);
+
class ResourceApplyAdam : public XlaOpKernel {
public:
explicit ResourceApplyAdam(OpKernelConstruction* ctx) : XlaOpKernel(ctx) {
@@ -227,17 +412,12 @@ class ResourceApplyAdam : public XlaOpKernel {
// variable <- variable - alpha * m_t / (sqrt(v_t) + epsilon)
xla::XlaBuilder* b = ctx->builder();
- xla::XlaOp half = XlaHelpers::FloatLiteral(b, dtype_, 0.5);
xla::XlaOp one = XlaHelpers::FloatLiteral(b, dtype_, 1.0);
- xla::XlaOp two = XlaHelpers::FloatLiteral(b, dtype_, 2.0);
- xla::XlaOp alpha =
- b->Div(b->Mul(lr, b->Pow(b->Sub(one, beta2_power), half)),
- b->Sub(one, beta1_power));
- m = b->Add(m, b->Mul(b->Sub(grad, m), b->Sub(one, beta1)));
- v = b->Add(v, b->Mul(b->Sub(b->Pow(grad, two), v), b->Sub(one, beta2)));
- var =
- b->Sub(var, b->Div(b->Mul(m, alpha), b->Add(b->Pow(v, half), epsilon)));
+ xla::XlaOp alpha = lr * xla::Sqrt(one - beta2_power) / (one - beta1_power);
+ m = m + (grad - m) * (one - beta1);
+ v = v + (xla::Square(grad) - v) * (one - beta2);
+ var = var - m * alpha / (xla::Sqrt(v) + epsilon);
OP_REQUIRES_OK(ctx, ctx->AssignVariable(0, dtype_, var));
OP_REQUIRES_OK(ctx, ctx->AssignVariable(1, dtype_, m));
@@ -250,38 +430,112 @@ class ResourceApplyAdam : public XlaOpKernel {
REGISTER_XLA_OP(Name("ResourceApplyAdam").TypeConstraint("T", kFloatTypes),
ResourceApplyAdam);
-class ResourceApplyRMSProp : public XlaOpKernel {
+class ResourceApplyAdaMax : public XlaOpKernel {
public:
- explicit ResourceApplyRMSProp(OpKernelConstruction* ctx) : XlaOpKernel(ctx) {}
+ explicit ResourceApplyAdaMax(OpKernelConstruction* ctx) : XlaOpKernel(ctx) {
+ OP_REQUIRES_OK(ctx, ctx->GetAttr("T", &dtype_));
+ }
void Compile(XlaOpKernelContext* ctx) override {
- xla::XlaBuilder* b = ctx->builder();
+ TensorShape var_shape, m_shape, v_shape;
+ xla::XlaOp var, m, v;
+ OP_REQUIRES_OK(ctx, ctx->ReadVariableInput(0, dtype_, &var_shape, &var));
+ OP_REQUIRES_OK(ctx, ctx->ReadVariableInput(1, dtype_, &m_shape, &m));
+ OP_REQUIRES_OK(ctx, ctx->ReadVariableInput(2, dtype_, &v_shape, &v));
- DataType type = ctx->input_type(3);
+ TensorShape beta1_power_shape = ctx->InputShape(3);
+ TensorShape lr_shape = ctx->InputShape(4);
+ TensorShape beta1_shape = ctx->InputShape(5);
+ TensorShape beta2_shape = ctx->InputShape(6);
+ TensorShape epsilon_shape = ctx->InputShape(7);
+ TensorShape grad_shape = ctx->InputShape(8);
- TensorShape var_shape, ms_shape, mom_shape;
- xla::XlaOp var, ms, mom;
- OP_REQUIRES_OK(ctx, ctx->ReadVariableInput(0, type, &var_shape, &var));
- OP_REQUIRES_OK(ctx, ctx->ReadVariableInput(1, type, &ms_shape, &ms));
- OP_REQUIRES_OK(ctx, ctx->ReadVariableInput(2, type, &mom_shape, &mom));
+ OP_REQUIRES(ctx, TensorShapeUtils::IsScalar(beta1_power_shape),
+ errors::InvalidArgument("beta1_power is not a scalar: ",
+ beta1_power_shape.DebugString()));
+ OP_REQUIRES(ctx, TensorShapeUtils::IsScalar(lr_shape),
+ errors::InvalidArgument("lr is not a scalar : ",
+ lr_shape.DebugString()));
+ OP_REQUIRES(ctx, TensorShapeUtils::IsScalar(beta1_shape),
+ errors::InvalidArgument("beta1 is not a scalar: ",
+ beta1_shape.DebugString()));
+ OP_REQUIRES(ctx, TensorShapeUtils::IsScalar(beta2_shape),
+ errors::InvalidArgument("beta2 is not a scalar: ",
+ beta2_shape.DebugString()));
+ OP_REQUIRES(ctx, TensorShapeUtils::IsScalar(epsilon_shape),
+ errors::InvalidArgument("epsilon is not a scalar: ",
+ epsilon_shape.DebugString()));
+ OP_REQUIRES(ctx, var_shape.IsSameSize(m_shape),
+ errors::InvalidArgument("var and m do not have the same shape",
+ var_shape.DebugString(), " ",
+ m_shape.DebugString()));
+ OP_REQUIRES(ctx, var_shape.IsSameSize(v_shape),
+ errors::InvalidArgument("var and v do not have the same shape",
+ var_shape.DebugString(), " ",
+ v_shape.DebugString()));
+ OP_REQUIRES(ctx, var_shape.IsSameSize(grad_shape),
+ errors::InvalidArgument(
+ "var and grad do not have the same shape",
+ var_shape.DebugString(), " ", grad_shape.DebugString()));
- TensorShape lr_shape = ctx->InputShape(3);
+ xla::XlaOp beta1_power = ctx->Input(3);
+ xla::XlaOp lr = ctx->Input(4);
+ xla::XlaOp beta1 = ctx->Input(5);
+ xla::XlaOp beta2 = ctx->Input(6);
+ xla::XlaOp epsilon = ctx->Input(7);
+ xla::XlaOp grad = ctx->Input(8);
+
+ xla::XlaOp one = xla::ScalarLike(lr, 1.0);
+ m = beta1 * m + (one - beta1) * grad;
+ v = xla::Max(beta2 * v, xla::Abs(grad));
+ var = var - lr / (one - beta1_power) * (m / (v + epsilon));
+
+ OP_REQUIRES_OK(ctx, ctx->AssignVariable(0, dtype_, var));
+ OP_REQUIRES_OK(ctx, ctx->AssignVariable(1, dtype_, m));
+ OP_REQUIRES_OK(ctx, ctx->AssignVariable(2, dtype_, v));
+ }
+
+ private:
+ DataType dtype_;
+};
+REGISTER_XLA_OP(Name("ResourceApplyAdaMax").TypeConstraint("T", kFloatTypes),
+ ResourceApplyAdaMax);
+
+class ResourceApplyRMSProp : public XlaOpKernel {
+ public:
+ explicit ResourceApplyRMSProp(OpKernelConstruction* ctx) : XlaOpKernel(ctx) {
+ OP_REQUIRES_OK(ctx, ctx->GetAttr("T", &dtype_));
+ }
+
+ void Compile(XlaOpKernelContext* ctx) override {
+ TensorShape var_shape, ms_shape, mom_shape, mg_shape;
+ xla::XlaOp var, ms, mom, mg;
+ OP_REQUIRES_OK(ctx,
+ ctx->ReadVariableInput("var", dtype_, &var_shape, &var));
+ if (centered_) {
+ OP_REQUIRES_OK(ctx, ctx->ReadVariableInput("mg", dtype_, &mg_shape, &mg));
+ }
+ OP_REQUIRES_OK(ctx, ctx->ReadVariableInput("ms", dtype_, &ms_shape, &ms));
+ OP_REQUIRES_OK(ctx,
+ ctx->ReadVariableInput("mom", dtype_, &mom_shape, &mom));
+
+ TensorShape lr_shape = ctx->InputShape("lr");
OP_REQUIRES(ctx, TensorShapeUtils::IsScalar(lr_shape),
errors::InvalidArgument("lr is not a scalar: ",
lr_shape.DebugString()));
- TensorShape rho_shape = ctx->InputShape(4);
+ TensorShape rho_shape = ctx->InputShape("rho");
OP_REQUIRES(ctx, TensorShapeUtils::IsScalar(rho_shape),
errors::InvalidArgument("rho is not a scalar: ",
rho_shape.DebugString()));
- TensorShape momentum_shape = ctx->InputShape(5);
+ TensorShape momentum_shape = ctx->InputShape("momentum");
OP_REQUIRES(ctx, TensorShapeUtils::IsScalar(momentum_shape),
errors::InvalidArgument("momentum is not a scalar: ",
momentum_shape.DebugString()));
- TensorShape epsilon_shape = ctx->InputShape(6);
+ TensorShape epsilon_shape = ctx->InputShape("epsilon");
OP_REQUIRES(ctx, TensorShapeUtils::IsScalar(epsilon_shape),
errors::InvalidArgument("epsilon is not a scalar: ",
epsilon_shape.DebugString()));
- TensorShape grad_shape = ctx->InputShape(7);
+ TensorShape grad_shape = ctx->InputShape("grad");
// var should be the same shape as mom and ms.
OP_REQUIRES(ctx, var_shape.IsSameSize(ms_shape),
@@ -297,11 +551,11 @@ class ResourceApplyRMSProp : public XlaOpKernel {
"var and grad do not have the same shape",
var_shape.DebugString(), " ", grad_shape.DebugString()));
- xla::XlaOp lr = ctx->Input(3);
- xla::XlaOp rho = ctx->Input(4);
- xla::XlaOp momentum = ctx->Input(5);
- xla::XlaOp epsilon = ctx->Input(6);
- xla::XlaOp grad = ctx->Input(7);
+ xla::XlaOp lr = ctx->Input("lr");
+ xla::XlaOp rho = ctx->Input("rho");
+ xla::XlaOp momentum = ctx->Input("momentum");
+ xla::XlaOp epsilon = ctx->Input("epsilon");
+ xla::XlaOp grad = ctx->Input("grad");
// ms <- rho * ms_{t-1} + (1-rho) * grad * grad
// mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon)
@@ -320,25 +574,46 @@ class ResourceApplyRMSProp : public XlaOpKernel {
// ms <- grad**2 (1 - rho) + ms * rho
//
// Which is the equation listed above.
- xla::XlaOp new_ms = b->Add(
- ms,
- b->Mul(b->Sub(b->Pow(grad, XlaHelpers::FloatLiteral(b, type, 2.0)), ms),
- b->Sub(XlaHelpers::FloatLiteral(b, type, 1.0), rho)));
- xla::XlaOp new_mom =
- b->Add(b->Mul(mom, momentum),
- b->Mul(b->Mul(grad, lr),
- b->Pow(b->Add(new_ms, epsilon),
- XlaHelpers::FloatLiteral(b, type, -0.5))));
- xla::XlaOp new_var = b->Sub(var, new_mom);
-
- OP_REQUIRES_OK(ctx, ctx->AssignVariable(0, type, new_var));
- OP_REQUIRES_OK(ctx, ctx->AssignVariable(1, type, new_ms));
- OP_REQUIRES_OK(ctx, ctx->AssignVariable(2, type, new_mom));
+ xla::XlaOp one = xla::ScalarLike(ms, 1.0);
+ xla::XlaOp new_ms = xla::Square(grad) * (one - rho) + ms * rho;
+ xla::XlaOp denominator;
+ if (centered_) {
+ mg = grad * (one - rho) + mg * rho;
+ denominator = new_ms - xla::Square(mg) + epsilon;
+ } else {
+ denominator = new_ms + epsilon;
+ }
+ xla::XlaOp new_mom = mom * momentum + grad * lr * xla::Rsqrt(denominator);
+ xla::XlaOp new_var = var - new_mom;
+
+ OP_REQUIRES_OK(ctx, ctx->AssignVariable("var", dtype_, new_var));
+ if (centered_) {
+ OP_REQUIRES_OK(ctx, ctx->AssignVariable("mg", dtype_, mg));
+ }
+ OP_REQUIRES_OK(ctx, ctx->AssignVariable("ms", dtype_, new_ms));
+ OP_REQUIRES_OK(ctx, ctx->AssignVariable("mom", dtype_, new_mom));
}
+
+ protected:
+ bool centered_ = false;
+
+ private:
+ DataType dtype_;
};
REGISTER_XLA_OP(Name("ResourceApplyRMSProp").TypeConstraint("T", kFloatTypes),
ResourceApplyRMSProp);
+class ResourceApplyCenteredRMSProp : public ResourceApplyRMSProp {
+ public:
+ explicit ResourceApplyCenteredRMSProp(OpKernelConstruction* ctx)
+ : ResourceApplyRMSProp(ctx) {
+ centered_ = true;
+ }
+};
+REGISTER_XLA_OP(
+ Name("ResourceApplyCenteredRMSProp").TypeConstraint("T", kFloatTypes),
+ ResourceApplyCenteredRMSProp);
+
void CompileFtrl(XlaOpKernelContext* ctx, DataType dtype,
bool has_l2_shrinkage) {
xla::XlaBuilder* b = ctx->builder();
@@ -424,21 +699,18 @@ void CompileFtrl(XlaOpKernelContext* ctx, DataType dtype,
xla::XlaOp two = XlaHelpers::FloatLiteral(b, dtype, 2.0);
xla::XlaOp grad_to_use;
if (has_l2_shrinkage) {
- grad_to_use = b->Add(grad, b->Mul(two, b->Mul(l2_shrinkage, var)));
+ grad_to_use = grad + two * l2_shrinkage * var;
} else {
grad_to_use = grad;
}
- xla::XlaOp new_accum = b->Add(accum, b->Pow(grad_to_use, two));
- xla::XlaOp new_accum_lr_pow = b->Pow(new_accum, b->Neg(lr_power));
- xla::XlaOp accum_lr_pow = b->Pow(accum, b->Neg(lr_power));
- linear = b->Add(
- linear,
- b->Sub(grad_to_use,
- b->Mul(b->Div(b->Sub(new_accum_lr_pow, accum_lr_pow), lr), var)));
- xla::XlaOp linear_clipped = b->Clamp(b->Neg(l1), linear, l1);
- xla::XlaOp quadratic = b->Add(b->Div(new_accum_lr_pow, lr), b->Mul(two, l2));
- var = b->Div(b->Sub(linear_clipped, linear), quadratic);
+ xla::XlaOp new_accum = accum + xla::Square(grad_to_use);
+ xla::XlaOp new_accum_lr_pow = xla::Pow(new_accum, -lr_power);
+ xla::XlaOp accum_lr_pow = xla::Pow(accum, -lr_power);
+ linear = linear + grad_to_use - (new_accum_lr_pow - accum_lr_pow) / lr * var;
+ xla::XlaOp linear_clipped = xla::Clamp(-l1, linear, l1);
+ xla::XlaOp quadratic = new_accum_lr_pow / lr + two * l2;
+ var = (linear_clipped - linear) / quadratic;
accum = new_accum;
OP_REQUIRES_OK(ctx, ctx->AssignVariable(0, dtype, var));
@@ -478,5 +750,176 @@ class ResourceApplyFtrlV2 : public XlaOpKernel {
REGISTER_XLA_OP(Name("ResourceApplyFtrlV2").TypeConstraint("T", kFloatTypes),
ResourceApplyFtrlV2);
+class ResourceApplyAdadelta : public XlaOpKernel {
+ public:
+ explicit ResourceApplyAdadelta(OpKernelConstruction* ctx) : XlaOpKernel(ctx) {
+ OP_REQUIRES_OK(ctx, ctx->GetAttr("T", &dtype_));
+ }
+
+ void Compile(XlaOpKernelContext* ctx) override {
+ TensorShape var_shape, accum_shape, accum_update_shape;
+ xla::XlaOp var, accum, accum_update;
+ OP_REQUIRES_OK(ctx, ctx->ReadVariableInput(0, dtype_, &var_shape, &var));
+ OP_REQUIRES_OK(ctx,
+ ctx->ReadVariableInput(1, dtype_, &accum_shape, &accum));
+ OP_REQUIRES_OK(ctx, ctx->ReadVariableInput(2, dtype_, &accum_update_shape,
+ &accum_update));
+
+ TensorShape lr_shape = ctx->InputShape(3);
+ TensorShape rho_shape = ctx->InputShape(4);
+ TensorShape epsilon_shape = ctx->InputShape(5);
+ TensorShape grad_shape = ctx->InputShape(6);
+
+ OP_REQUIRES(ctx, TensorShapeUtils::IsScalar(lr_shape),
+ errors::InvalidArgument("lr is not a scalar: ",
+ lr_shape.DebugString()));
+
+ OP_REQUIRES(ctx, TensorShapeUtils::IsScalar(rho_shape),
+ errors::InvalidArgument("rho is not a scalar: ",
+ rho_shape.DebugString()));
+
+ OP_REQUIRES(ctx, TensorShapeUtils::IsScalar(epsilon_shape),
+ errors::InvalidArgument("epsilon is not a scalar: ",
+ epsilon_shape.DebugString()));
+
+ OP_REQUIRES(ctx, var_shape.IsSameSize(accum_shape),
+ errors::InvalidArgument(
+ "var and accum do not have the same shape",
+ var_shape.DebugString(), " ", accum_shape.DebugString()));
+
+ OP_REQUIRES(ctx, var_shape.IsSameSize(grad_shape),
+ errors::InvalidArgument(
+ "var and grad do not have the same shape",
+ var_shape.DebugString(), " ", grad_shape.DebugString()));
+
+ xla::XlaOp lr = ctx->Input(3);
+ xla::XlaOp rho = ctx->Input(4);
+ xla::XlaOp epsilon = ctx->Input(5);
+ xla::XlaOp grad = ctx->Input(6);
+
+ xla::XlaBuilder* b = ctx->builder();
+ xla::XlaOp neg_half = XlaHelpers::FloatLiteral(b, dtype_, -0.5);
+ xla::XlaOp half = XlaHelpers::FloatLiteral(b, dtype_, 0.5);
+ xla::XlaOp one = XlaHelpers::FloatLiteral(b, dtype_, 1.0);
+ xla::XlaOp two = XlaHelpers::FloatLiteral(b, dtype_, 2.0);
+
+ accum = rho * accum + (one - rho) * xla::Pow(grad, two);
+ xla::XlaOp update = xla::Pow(accum_update + epsilon, half) *
+ xla::Pow(accum + epsilon, neg_half) * grad;
+ accum_update = rho * accum_update + (one - rho) * xla::Pow(update, two);
+ var = var - update * lr;
+ OP_REQUIRES_OK(ctx, ctx->AssignVariable(0, dtype_, var));
+ OP_REQUIRES_OK(ctx, ctx->AssignVariable(1, dtype_, accum));
+ OP_REQUIRES_OK(ctx, ctx->AssignVariable(2, dtype_, accum_update));
+ }
+
+ private:
+ DataType dtype_;
+};
+REGISTER_XLA_OP(Name("ResourceApplyAdadelta").TypeConstraint("T", kFloatTypes),
+ ResourceApplyAdadelta);
+
+class ResourceApplySignBase : public XlaOpKernel {
+ public:
+ explicit ResourceApplySignBase(OpKernelConstruction* ctx) : XlaOpKernel(ctx) {
+ OP_REQUIRES_OK(ctx, ctx->GetAttr("T", &dtype_));
+ }
+
+ void Compile(XlaOpKernelContext* ctx) override {
+ TensorShape var_shape, m_shape;
+ xla::XlaOp var, m;
+ OP_REQUIRES_OK(ctx, ctx->ReadVariableInput(0, dtype_, &var_shape, &var));
+ OP_REQUIRES_OK(ctx, ctx->ReadVariableInput(1, dtype_, &m_shape, &m));
+ OP_REQUIRES(ctx, var_shape.IsSameSize(m_shape),
+ errors::InvalidArgument("var and m do not have the same shape",
+ var_shape.DebugString(), " ",
+ m_shape.DebugString()));
+ TensorShape grad_shape = ctx->InputShape(6);
+ OP_REQUIRES(ctx, var_shape.IsSameSize(grad_shape),
+ errors::InvalidArgument(
+ "var and grad do not have the same shape",
+ var_shape.DebugString(), " ", grad_shape.DebugString()));
+ CheckScalarParams(ctx);
+
+ xla::XlaOp lr = ctx->Input(2);
+ xla::XlaOp alpha = ctx->Input(3);
+ xla::XlaOp sign_decay = ctx->Input(4);
+ xla::XlaOp beta = ctx->Input(5);
+ xla::XlaOp grad = ctx->Input(6);
+
+ m = m * beta + grad * (xla::ScalarLike(beta, 1.0) - beta);
+ xla::XlaOp decay = xla::Sign(grad) * xla::Sign(m) * sign_decay;
+
+ xla::XlaOp grad_scale = ComputeGradientScale(alpha, decay);
+ var = var - lr * grad_scale * grad;
+ OP_REQUIRES_OK(ctx, ctx->AssignVariable(0, dtype_, var));
+ OP_REQUIRES_OK(ctx, ctx->AssignVariable(1, dtype_, m));
+ }
+
+ virtual void CheckScalarParams(XlaOpKernelContext* ctx) {
+ TensorShape lr_shape = ctx->InputShape(2);
+ TensorShape sign_decay_shape = ctx->InputShape(4);
+ TensorShape beta_shape = ctx->InputShape(5);
+
+ OP_REQUIRES(ctx, TensorShapeUtils::IsScalar(lr_shape),
+ errors::InvalidArgument("lr is not a scalar: ",
+ lr_shape.DebugString()));
+
+ OP_REQUIRES(ctx, TensorShapeUtils::IsScalar(sign_decay_shape),
+ errors::InvalidArgument("sign_decay is not a scalar: ",
+ sign_decay_shape.DebugString()));
+
+ OP_REQUIRES(ctx, TensorShapeUtils::IsScalar(beta_shape),
+ errors::InvalidArgument("beta is not a scalar: ",
+ beta_shape.DebugString()));
+ }
+
+ virtual xla::XlaOp ComputeGradientScale(xla::XlaOp alpha,
+ xla::XlaOp decay) = 0;
+
+ private:
+ DataType dtype_;
+};
+
+class ResourceApplyAddSign : public ResourceApplySignBase {
+ public:
+ explicit ResourceApplyAddSign(OpKernelConstruction* ctx)
+ : ResourceApplySignBase(ctx) {}
+
+ void CheckScalarParams(XlaOpKernelContext* ctx) override {
+ ResourceApplySignBase::CheckScalarParams(ctx);
+ TensorShape alpha_shape = ctx->InputShape(3);
+ OP_REQUIRES(ctx, TensorShapeUtils::IsScalar(alpha_shape),
+ errors::InvalidArgument("alpha is not a scalar: ",
+ alpha_shape.DebugString()));
+ }
+
+ xla::XlaOp ComputeGradientScale(xla::XlaOp alpha, xla::XlaOp decay) override {
+ return alpha + decay;
+ }
+};
+REGISTER_XLA_OP(Name("ResourceApplyAddSign").TypeConstraint("T", kFloatTypes),
+ ResourceApplyAddSign);
+
+class ResourceApplyPowerSign : public ResourceApplySignBase {
+ public:
+ explicit ResourceApplyPowerSign(OpKernelConstruction* ctx)
+ : ResourceApplySignBase(ctx) {}
+
+ void CheckScalarParams(XlaOpKernelContext* ctx) override {
+ ResourceApplySignBase::CheckScalarParams(ctx);
+ TensorShape logbase_shape = ctx->InputShape(3);
+ OP_REQUIRES(ctx, TensorShapeUtils::IsScalar(logbase_shape),
+ errors::InvalidArgument("logbase is not a scalar: ",
+ logbase_shape.DebugString()));
+ }
+
+ xla::XlaOp ComputeGradientScale(xla::XlaOp alpha, xla::XlaOp decay) override {
+ return xla::Exp(alpha * decay);
+ }
+};
+REGISTER_XLA_OP(Name("ResourceApplyPowerSign").TypeConstraint("T", kFloatTypes),
+ ResourceApplyPowerSign);
+
} // namespace
} // namespace tensorflow
diff --git a/tensorflow/compiler/tf2xla/kernels/transpose_op.cc b/tensorflow/compiler/tf2xla/kernels/transpose_op.cc
index ef5aae81a8..6c721c48fe 100644
--- a/tensorflow/compiler/tf2xla/kernels/transpose_op.cc
+++ b/tensorflow/compiler/tf2xla/kernels/transpose_op.cc
@@ -23,6 +23,7 @@ limitations under the License.
#include "tensorflow/compiler/tf2xla/xla_helpers.h"
#include "tensorflow/compiler/tf2xla/xla_op_kernel.h"
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
+#include "tensorflow/compiler/xla/client/xla_client/xla_builder.h"
#include "tensorflow/core/framework/kernel_def_builder.h"
#include "tensorflow/core/framework/register_types.h"
#include "tensorflow/core/kernels/bounds_check.h"
@@ -84,12 +85,12 @@ class TransposeOp : public XlaOpKernel {
if (dims <= 1 || is_identity) {
transposed = ctx->Input(0);
} else {
- transposed = ctx->builder()->Transpose(ctx->Input(0), transposed_order);
+ transposed = xla::Transpose(ctx->Input(0), transposed_order);
}
// Conjugate the transposed result if this is ConjugateTransposeOp.
if (conjugate_) {
- ctx->SetOutput(0, ctx->builder()->Conj(transposed));
+ ctx->SetOutput(0, xla::Conj(transposed));
} else {
ctx->SetOutput(0, transposed);
}
@@ -146,7 +147,7 @@ class InvertPermutationOp : public XlaOpKernel {
output[d] = i;
}
- ctx->SetOutput(0, ctx->builder()->ConstantR1<int32>(output));
+ ctx->SetOutput(0, xla::ConstantR1<int32>(ctx->builder(), output));
}
};
diff --git a/tensorflow/compiler/tf2xla/kernels/unary_ops.cc b/tensorflow/compiler/tf2xla/kernels/unary_ops.cc
index a39e5dcfc5..4bb31f4117 100644
--- a/tensorflow/compiler/tf2xla/kernels/unary_ops.cc
+++ b/tensorflow/compiler/tf2xla/kernels/unary_ops.cc
@@ -21,21 +21,21 @@ limitations under the License.
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
#include "tensorflow/compiler/xla/client/client_library.h"
#include "tensorflow/compiler/xla/client/lib/arithmetic.h"
+#include "tensorflow/compiler/xla/client/lib/constants.h"
+#include "tensorflow/compiler/xla/client/lib/math.h"
#include "tensorflow/compiler/xla/client/xla_client/xla_builder.h"
#include "tensorflow/core/framework/kernel_def_builder.h"
namespace tensorflow {
namespace {
-// A subclass of a TlaUnaryOp must build the lambda computation that
-// describes the scalar->scalar function to apply to each element of
-// the input.
#define XLAJIT_MAKE_UNARY(NAME, COMPUTATION) \
class NAME##Op : public XlaOpKernel { \
public: \
explicit NAME##Op(OpKernelConstruction* ctx) : XlaOpKernel(ctx) {} \
void Compile(XlaOpKernelContext* ctx) { \
xla::XlaBuilder* b = ctx->builder(); \
+ (void)b; \
xla::XlaOp x = ctx->Input(0); \
xla::XlaOp y = COMPUTATION; \
ctx->SetOutput(0, y); \
@@ -43,122 +43,100 @@ namespace {
}; \
REGISTER_XLA_OP(Name(#NAME), NAME##Op);
-XLAJIT_MAKE_UNARY(ComplexAbs, b->Abs(x));
+XLAJIT_MAKE_UNARY(ComplexAbs, xla::Abs(x));
-XLAJIT_MAKE_UNARY(Angle, b->Atan2(b->Imag(x), b->Real(x)));
+XLAJIT_MAKE_UNARY(Angle, xla::Atan2(xla::Imag(x), xla::Real(x)));
-XLAJIT_MAKE_UNARY(Conj, b->Conj(x));
+XLAJIT_MAKE_UNARY(Conj, xla::Conj(x));
// Return x if x>0, otherwise -x.
-XLAJIT_MAKE_UNARY(Abs, b->Abs(x));
+XLAJIT_MAKE_UNARY(Abs, xla::Abs(x));
// acos(x) = 2 * atan(sqrt(1 - x^2) / (1 + x))
-XLAJIT_MAKE_UNARY(
- Acos,
- b->Mul(XlaHelpers::FloatLiteral(b, input_type(0), 2.0),
- b->Atan2(b->Pow(b->Sub(XlaHelpers::One(b, input_type(0)),
- b->Mul(x, x)),
- XlaHelpers::FloatLiteral(b, input_type(0), 0.5)),
- b->Add(XlaHelpers::One(b, input_type(0)), x))));
+XLAJIT_MAKE_UNARY(Acos,
+ xla::ScalarLike(x, 2.0) *
+ xla::Atan2(xla::Sqrt(xla::ScalarLike(x, 1.0) - x * x),
+ xla::ScalarLike(x, 1.0) + x));
// acosh(x) = log(x + sqrt(x^2 - 1))
// = log(x + sqrt((x+1)*(x-1)))
-XLAJIT_MAKE_UNARY(
- Acosh,
- b->Log(b->Add(x,
- b->Pow(b->Mul(b->Add(x, XlaHelpers::One(b, input_type(0))),
- b->Sub(x, XlaHelpers::One(b, input_type(0)))),
- XlaHelpers::FloatLiteral(b, input_type(0), 0.5)))));
+XLAJIT_MAKE_UNARY(Acosh,
+ xla::Log(x + xla::Sqrt((x + xla::ScalarLike(x, 1.0)) *
+ (x - xla::ScalarLike(x, 1.0)))));
// asin(x) = 2 * atan(x / (1 + sqrt(1 - x^2)))
XLAJIT_MAKE_UNARY(
- Asin,
- b->Mul(XlaHelpers::FloatLiteral(b, input_type(0), 2.0),
- b->Atan2(x, b->Add(XlaHelpers::One(b, input_type(0)),
- b->Pow(b->Sub(XlaHelpers::One(b, input_type(0)),
- b->Mul(x, x)),
- XlaHelpers::FloatLiteral(b, input_type(0),
- 0.5))))));
+ Asin, xla::ScalarLike(x, 2.0) *
+ xla::Atan2(x, xla::ScalarLike(x, 1.0) +
+ xla::Sqrt(xla::ScalarLike(x, 1.0) - x * x)));
// asinh(x) = log(x + sqrt(x^2 + 1))
-XLAJIT_MAKE_UNARY(
- Asinh,
- b->Log(b->Add(x, b->Pow(b->Add(b->Mul(x, x),
- XlaHelpers::One(b, input_type(0))),
- XlaHelpers::FloatLiteral(b, input_type(0), 0.5)))));
+XLAJIT_MAKE_UNARY(Asinh,
+ xla::Log(x + xla::Sqrt(x * x + xla::ScalarLike(x, 1.0))));
-XLAJIT_MAKE_UNARY(Atan, b->Atan2(x, XlaHelpers::One(b, input_type(0))));
+XLAJIT_MAKE_UNARY(Atan, xla::Atan2(x, xla::ScalarLike(x, 1.0)));
// atanh(x) = 0.5 * log((1 + x) / (1 - x))
+XLAJIT_MAKE_UNARY(Atanh, xla::Log((xla::ScalarLike(x, 1.0) + x) /
+ (xla::ScalarLike(x, 1.0) - x)) *
+ xla::ScalarLike(x, 0.5));
+XLAJIT_MAKE_UNARY(Ceil, xla::Ceil(x));
+XLAJIT_MAKE_UNARY(Cos, xla::Cos(x));
+XLAJIT_MAKE_UNARY(Cosh, (xla::Exp(x) + xla::Exp(-x)) * xla::ScalarLike(x, 0.5));
+XLAJIT_MAKE_UNARY(Sin, xla::Sin(x));
+XLAJIT_MAKE_UNARY(Exp, xla::Exp(x));
+
+XLAJIT_MAKE_UNARY(Expm1, xla::Expm1(x));
+
+XLAJIT_MAKE_UNARY(Floor, xla::Floor(x));
+XLAJIT_MAKE_UNARY(IsFinite, xla::IsFinite(x));
XLAJIT_MAKE_UNARY(
- Atanh, b->Mul(b->Log(b->Div(b->Add(XlaHelpers::One(b, input_type(0)), x),
- b->Sub(XlaHelpers::One(b, input_type(0)), x))),
- XlaHelpers::FloatLiteral(b, input_type(0), 0.5)));
-XLAJIT_MAKE_UNARY(Ceil, b->Ceil(x));
-XLAJIT_MAKE_UNARY(Cos, b->Cos(x));
-XLAJIT_MAKE_UNARY(Cosh,
- b->Mul(b->Add(b->Exp(x), b->Exp(b->Neg(x))),
- XlaHelpers::FloatLiteral(b, input_type(0), 0.5)));
-XLAJIT_MAKE_UNARY(Sin, b->Sin(x));
-XLAJIT_MAKE_UNARY(Exp, b->Exp(x));
-
-XLAJIT_MAKE_UNARY(Expm1, b->Expm1(x));
-
-XLAJIT_MAKE_UNARY(Floor, b->Floor(x));
-XLAJIT_MAKE_UNARY(IsFinite, b->IsFinite(x));
-XLAJIT_MAKE_UNARY(IsInf, b->Eq(b->Abs(x),
- XlaHelpers::FloatLiteral(
- b, input_type(0),
- std::numeric_limits<double>::infinity())));
-XLAJIT_MAKE_UNARY(IsNan, b->Ne(x, x));
+ IsInf,
+ xla::Eq(xla::Abs(x),
+ xla::ScalarLike(x, std::numeric_limits<double>::infinity())));
+XLAJIT_MAKE_UNARY(IsNan, xla::Ne(x, x));
// Return 1/x
-XLAJIT_MAKE_UNARY(Inv, b->Div(XlaHelpers::One(b, input_type(0)), x));
-XLAJIT_MAKE_UNARY(Reciprocal, b->Div(XlaHelpers::One(b, input_type(0)), x));
-XLAJIT_MAKE_UNARY(Log, b->Log(x));
+XLAJIT_MAKE_UNARY(Inv, xla::ScalarLike(x, 1.0) / x);
+XLAJIT_MAKE_UNARY(Reciprocal, xla::ScalarLike(x, 1.0) / x);
+XLAJIT_MAKE_UNARY(Log, xla::Log(x));
-XLAJIT_MAKE_UNARY(Log1p, b->Log1p(x));
+XLAJIT_MAKE_UNARY(Log1p, xla::Log1p(x));
-XLAJIT_MAKE_UNARY(Invert, b->Not(x));
-XLAJIT_MAKE_UNARY(LogicalNot, b->Not(x));
-XLAJIT_MAKE_UNARY(Neg, b->Neg(x));
+XLAJIT_MAKE_UNARY(Invert, xla::Not(x));
+XLAJIT_MAKE_UNARY(LogicalNot, xla::Not(x));
+XLAJIT_MAKE_UNARY(Neg, -x);
// Implements Banker's rounding: numbers that are equidistant between two
// integers are rounded towards even.
-static xla::XlaOp Round(xla::XlaBuilder* b, DataType dtype,
- const xla::XlaOp& x) {
- auto half = XlaHelpers::FloatLiteral(b, dtype, 0.5);
- auto one = XlaHelpers::FloatLiteral(b, dtype, 1.0);
- auto two = XlaHelpers::FloatLiteral(b, dtype, 2.0);
-
- auto round_val = b->Floor(x);
- auto fraction = b->Sub(x, round_val);
- auto nearest_even_int =
- b->Sub(round_val, b->Mul(two, b->Floor(b->Mul(half, x))));
- auto is_odd = b->Eq(nearest_even_int, one);
- return b->Select(
- b->Or(b->Gt(fraction, half), b->And(b->Eq(fraction, half), is_odd)),
- b->Add(round_val, one), round_val);
+xla::XlaOp RoundToEven(xla::XlaOp x) {
+ auto half = xla::ScalarLike(x, 0.5);
+ auto one = xla::ScalarLike(x, 1.0);
+ auto two = xla::ScalarLike(x, 2.0);
+
+ auto round_val = xla::Floor(x);
+ auto fraction = x - round_val;
+ auto nearest_even_int = round_val - two * xla::Floor(half * x);
+ auto is_odd = xla::Eq(nearest_even_int, one);
+ return xla::Select(xla::Or(xla::Gt(fraction, half),
+ xla::And(xla::Eq(fraction, half), is_odd)),
+ round_val + one, round_val);
}
-XLAJIT_MAKE_UNARY(Rint, Round(b, input_type(0), x));
-XLAJIT_MAKE_UNARY(Round, Round(b, input_type(0), x));
+XLAJIT_MAKE_UNARY(Rint, RoundToEven(x));
+XLAJIT_MAKE_UNARY(Round, RoundToEven(x));
-XLAJIT_MAKE_UNARY(Rsqrt,
- b->Pow(x, XlaHelpers::FloatLiteral(b, input_type(0), -0.5)));
+XLAJIT_MAKE_UNARY(Rsqrt, xla::Rsqrt(x));
// Expresses sigmoid as a rescaled tanh: sigmoid(x) == (tanh(x/2) + 1) / 2.
-static xla::XlaOp Sigmoid(xla::XlaBuilder* b, DataType dtype,
- const xla::XlaOp& x) {
- auto half = XlaHelpers::FloatLiteral(b, dtype, 0.5);
- return b->Add(half, b->Mul(half, b->Tanh(b->Mul(half, x))));
+xla::XlaOp Sigmoid(xla::XlaOp x) {
+ auto half = xla::ScalarLike(x, 0.5);
+ return half + half * xla::Tanh(half * x);
}
-XLAJIT_MAKE_UNARY(Sigmoid, Sigmoid(b, input_type(0), x));
+XLAJIT_MAKE_UNARY(Sigmoid, Sigmoid(x));
// Returns 0 if x is 0, -1 if x < 0 and 1 if x > 0.
-XLAJIT_MAKE_UNARY(Sign, b->Sign(x));
-XLAJIT_MAKE_UNARY(Sinh,
- b->Mul(b->Sub(b->Exp(x), b->Exp(b->Neg(x))),
- XlaHelpers::FloatLiteral(b, input_type(0), 0.5)));
+XLAJIT_MAKE_UNARY(Sign, xla::Sign(x));
+XLAJIT_MAKE_UNARY(Sinh, (xla::Exp(x) - xla::Exp(-x)) * xla::ScalarLike(x, 0.5));
// softplus(x) = log(1 + exp(x))
//
@@ -168,22 +146,18 @@ XLAJIT_MAKE_UNARY(Sinh,
//
// This is equivalent to:
// max(x, 0) + log1p(exp(-abs(x)))
-XLAJIT_MAKE_UNARY(Softplus,
- b->Add(b->Max(x, XlaHelpers::Zero(b, input_type(0))),
- b->Log1p(b->Exp(b->Neg(b->Abs(x))))));
+XLAJIT_MAKE_UNARY(Softplus, xla::Max(x, xla::ScalarLike(x, 0.0)) +
+ xla::Log1p(xla::Exp(-xla::Abs(x))));
// softsign(x) = x / (abs(x) + 1)
-XLAJIT_MAKE_UNARY(Softsign,
- b->Div(x,
- b->Add(b->Abs(x), XlaHelpers::One(b, input_type(0)))));
-XLAJIT_MAKE_UNARY(Sqrt,
- b->Pow(x, XlaHelpers::FloatLiteral(b, input_type(0), 0.5)));
-XLAJIT_MAKE_UNARY(Square, b->Mul(x, x));
-XLAJIT_MAKE_UNARY(Tan, b->Div(b->Sin(x), b->Cos(x)));
-XLAJIT_MAKE_UNARY(Tanh, b->Tanh(x));
-
-XLAJIT_MAKE_UNARY(Real, b->Real(x));
-XLAJIT_MAKE_UNARY(Imag, b->Imag(x));
+XLAJIT_MAKE_UNARY(Softsign, x / (xla::Abs(x) + xla::ScalarLike(x, 1.0)));
+XLAJIT_MAKE_UNARY(Sqrt, xla::Sqrt(x));
+XLAJIT_MAKE_UNARY(Square, x* x);
+XLAJIT_MAKE_UNARY(Tan, xla::Sin(x) / xla::Cos(x));
+XLAJIT_MAKE_UNARY(Tanh, xla::Tanh(x));
+
+XLAJIT_MAKE_UNARY(Real, xla::Real(x));
+XLAJIT_MAKE_UNARY(Imag, xla::Imag(x));
#undef XLAJIT_MAKE_UNARY
@@ -193,17 +167,10 @@ class ErfOp : public XlaOpKernel {
public:
explicit ErfOp(OpKernelConstruction* ctx) : XlaOpKernel(ctx) {}
void Compile(XlaOpKernelContext* ctx) override {
- xla::XlaBuilder* b = ctx->builder();
- xla::PrimitiveType primitive_type;
- xla::XlaOp one = XlaHelpers::One(b, input_type(0));
xla::XlaOp x = ctx->Input(0);
- xla::XlaOp abs_x = b->Abs(x);
-
- OP_REQUIRES_OK(ctx,
- DataTypeToPrimitiveType(input_type(0), &primitive_type));
-
- auto y = b->Select(b->Gt(abs_x, one), b->Sub(one, Erfc(x, primitive_type)),
- Erf(x, primitive_type));
+ xla::XlaOp one = xla::ScalarLike(x, 1.0);
+ auto y =
+ xla::Select(xla::Gt(xla::Abs(x), one), one - xla::Erfc(x), xla::Erf(x));
ctx->SetOutput(0, y);
}
};
@@ -213,21 +180,60 @@ class ErfcOp : public XlaOpKernel {
public:
explicit ErfcOp(OpKernelConstruction* ctx) : XlaOpKernel(ctx) {}
void Compile(XlaOpKernelContext* ctx) override {
- xla::XlaBuilder* b = ctx->builder();
- xla::XlaOp one = XlaHelpers::One(b, input_type(0));
xla::XlaOp x = ctx->Input(0);
- xla::XlaOp abs_x = b->Abs(x);
-
- xla::PrimitiveType primitive_type;
- OP_REQUIRES_OK(ctx,
- DataTypeToPrimitiveType(input_type(0), &primitive_type));
-
- auto y = b->Select(b->Lt(abs_x, one), b->Sub(one, Erf(x, primitive_type)),
- Erfc(x, primitive_type));
+ xla::XlaOp one = xla::ScalarLike(x, 1.0);
+ auto y =
+ xla::Select(xla::Lt(xla::Abs(x), one), one - xla::Erf(x), xla::Erfc(x));
ctx->SetOutput(0, y);
}
};
REGISTER_XLA_OP(Name("Erfc"), ErfcOp);
+class LgammaOp : public XlaOpKernel {
+ public:
+ explicit LgammaOp(OpKernelConstruction* ctx) : XlaOpKernel(ctx) {}
+ // Calculate lgamma using the Lanczos approximation
+ // (https://en.wikipedia.org/wiki/Lanczos_approximation).
+ void Compile(XlaOpKernelContext* ctx) override {
+ xla::XlaOp input = ctx->Input(0);
+ xla::PrimitiveType input_type = ctx->input_xla_type(0);
+
+ if (input_type == xla::F16 || input_type == xla::BF16) {
+ // The approximation works better with at least 32-bits of accuracy.
+ xla::XlaOp input_f32 = xla::ConvertElementType(input, xla::F32);
+ xla::XlaOp result_f32 = xla::Lgamma(input_f32);
+ xla::XlaOp result_x16 = xla::ConvertElementType(result_f32, input_type);
+ ctx->SetOutput(0, result_x16);
+ } else {
+ xla::XlaOp result = xla::Lgamma(input);
+ ctx->SetOutput(0, result);
+ }
+ }
+}; // namespace
+REGISTER_XLA_OP(Name("Lgamma"), LgammaOp);
+
+class DigammaOp : public XlaOpKernel {
+ public:
+ explicit DigammaOp(OpKernelConstruction* ctx) : XlaOpKernel(ctx) {}
+ // Calculate lgamma using the Lanczos approximation
+ // (https://en.wikipedia.org/wiki/Lanczos_approximation).
+ void Compile(XlaOpKernelContext* ctx) override {
+ xla::XlaOp input = ctx->Input(0);
+ xla::PrimitiveType input_type = ctx->input_xla_type(0);
+
+ if (input_type == xla::F16 || input_type == xla::BF16) {
+ // The approximation works better with at least 32-bits of accuracy.
+ xla::XlaOp input_f32 = xla::ConvertElementType(input, xla::F32);
+ xla::XlaOp result_f32 = xla::Digamma(input_f32);
+ xla::XlaOp result_x16 = xla::ConvertElementType(result_f32, input_type);
+ ctx->SetOutput(0, result_x16);
+ } else {
+ xla::XlaOp result = xla::Digamma(input);
+ ctx->SetOutput(0, result);
+ }
+ }
+}; // namespace
+REGISTER_XLA_OP(Name("Digamma"), DigammaOp);
+
} // namespace
} // namespace tensorflow
diff --git a/tensorflow/compiler/tf2xla/kernels/unpack_op.cc b/tensorflow/compiler/tf2xla/kernels/unpack_op.cc
index f87586ba57..f951127bb9 100644
--- a/tensorflow/compiler/tf2xla/kernels/unpack_op.cc
+++ b/tensorflow/compiler/tf2xla/kernels/unpack_op.cc
@@ -22,7 +22,8 @@ limitations under the License.
#include "tensorflow/compiler/tf2xla/xla_helpers.h"
#include "tensorflow/compiler/tf2xla/xla_op_kernel.h"
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
-#include "tensorflow/compiler/xla/literal_util.h"
+#include "tensorflow/compiler/xla/client/xla_client/xla_builder.h"
+#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/register_types.h"
#include "tensorflow/core/framework/tensor.h"
@@ -74,10 +75,9 @@ class UnpackOp : public XlaOpKernel {
for (int i = 0; i < num; ++i) {
start_indices[axis] = i;
limit_indices[axis] = i + 1;
- auto slice = ctx->builder()->Slice(input, start_indices, limit_indices,
- strides);
+ auto slice = xla::Slice(input, start_indices, limit_indices, strides);
// Reshape to drop the 'axis' dimension.
- auto result = ctx->builder()->Reshape(slice, output_shape.dim_sizes());
+ auto result = xla::Reshape(slice, output_shape.dim_sizes());
ctx->SetOutput(i, result);
}
}
diff --git a/tensorflow/compiler/tf2xla/kernels/variable_ops.cc b/tensorflow/compiler/tf2xla/kernels/variable_ops.cc
index ad51396bdf..bb27b5d56f 100644
--- a/tensorflow/compiler/tf2xla/kernels/variable_ops.cc
+++ b/tensorflow/compiler/tf2xla/kernels/variable_ops.cc
@@ -20,7 +20,7 @@ limitations under the License.
#include "tensorflow/compiler/tf2xla/xla_helpers.h"
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
#include "tensorflow/compiler/xla/client/xla_client/xla_builder.h"
-#include "tensorflow/compiler/xla/literal_util.h"
+#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/core/framework/kernel_def_builder.h"
#include "tensorflow/core/framework/types.h"
@@ -33,8 +33,8 @@ class VarIsInitializedOp : public XlaOpKernel {
void Compile(XlaOpKernelContext* ctx) override {
XlaResource* variable;
OP_REQUIRES_OK(ctx, ctx->GetResourceInput(0, &variable));
- ctx->SetOutput(0,
- ctx->builder()->ConstantR0<bool>(variable->initialized()));
+ ctx->SetOutput(
+ 0, xla::ConstantR0<bool>(ctx->builder(), variable->initialized()));
}
};
REGISTER_XLA_OP(Name("VarIsInitializedOp"), VarIsInitializedOp);
@@ -96,7 +96,7 @@ class AssignAddVariableOp : public XlaOpKernel {
xla::XlaOp handle;
OP_REQUIRES_OK(ctx,
ctx->ReadVariableInput(0, type, /*shape=*/nullptr, &handle));
- handle = ctx->builder()->Add(handle, ctx->Input(1));
+ handle = xla::Add(handle, ctx->Input(1));
OP_REQUIRES_OK(ctx, ctx->AssignVariable(0, type, handle));
}
};
@@ -112,7 +112,7 @@ class AssignSubVariableOp : public XlaOpKernel {
xla::XlaOp handle;
OP_REQUIRES_OK(ctx,
ctx->ReadVariableInput(0, type, /*shape=*/nullptr, &handle));
- handle = ctx->builder()->Sub(handle, ctx->Input(1));
+ handle = xla::Sub(handle, ctx->Input(1));
OP_REQUIRES_OK(ctx, ctx->AssignVariable(0, type, handle));
}
};
@@ -191,7 +191,7 @@ class ResourceScatterAddOp : public ResourceScatterOp {
private:
static xla::XlaOp Combine(const xla::XlaOp& x, const xla::XlaOp& y,
xla::XlaBuilder* builder) {
- return builder->Add(x, y);
+ return xla::Add(x, y);
}
};
REGISTER_XLA_OP(Name("ResourceScatterAdd"), ResourceScatterAddOp);
@@ -204,7 +204,7 @@ class ResourceScatterSubOp : public ResourceScatterOp {
private:
static xla::XlaOp Combine(const xla::XlaOp& x, const xla::XlaOp& y,
xla::XlaBuilder* builder) {
- return builder->Sub(x, y);
+ return xla::Sub(x, y);
}
};
REGISTER_XLA_OP(Name("ResourceScatterSub"), ResourceScatterSubOp);
@@ -217,7 +217,7 @@ class ResourceScatterMulOp : public ResourceScatterOp {
private:
static xla::XlaOp Combine(const xla::XlaOp& x, const xla::XlaOp& y,
xla::XlaBuilder* builder) {
- return builder->Mul(x, y);
+ return xla::Mul(x, y);
}
};
REGISTER_XLA_OP(Name("ResourceScatterMul"), ResourceScatterMulOp);
@@ -230,7 +230,7 @@ class ResourceScatterDivOp : public ResourceScatterOp {
private:
static xla::XlaOp Combine(const xla::XlaOp& x, const xla::XlaOp& y,
xla::XlaBuilder* builder) {
- return builder->Div(x, y);
+ return xla::Div(x, y);
}
};
REGISTER_XLA_OP(Name("ResourceScatterDiv"), ResourceScatterDivOp);
@@ -243,7 +243,7 @@ class ResourceScatterMinOp : public ResourceScatterOp {
private:
static xla::XlaOp Combine(const xla::XlaOp& x, const xla::XlaOp& y,
xla::XlaBuilder* builder) {
- return builder->Min(x, y);
+ return xla::Min(x, y);
}
};
REGISTER_XLA_OP(Name("ResourceScatterMin"), ResourceScatterMinOp);
@@ -256,7 +256,7 @@ class ResourceScatterMaxOp : public ResourceScatterOp {
private:
static xla::XlaOp Combine(const xla::XlaOp& x, const xla::XlaOp& y,
xla::XlaBuilder* builder) {
- return builder->Max(x, y);
+ return xla::Max(x, y);
}
};
REGISTER_XLA_OP(Name("ResourceScatterMax"), ResourceScatterMaxOp);
@@ -286,7 +286,7 @@ class ResourceScatterNdAddOp : public ResourceScatterOp {
private:
static xla::XlaOp Combine(const xla::XlaOp& x, const xla::XlaOp& y,
xla::XlaBuilder* builder) {
- return builder->Add(x, y);
+ return xla::Add(x, y);
}
};
REGISTER_XLA_OP(Name("ResourceScatterNdAdd"), ResourceScatterNdAddOp);
diff --git a/tensorflow/compiler/tf2xla/kernels/while_op.cc b/tensorflow/compiler/tf2xla/kernels/while_op.cc
index 5467c5d994..9413a30a6c 100644
--- a/tensorflow/compiler/tf2xla/kernels/while_op.cc
+++ b/tensorflow/compiler/tf2xla/kernels/while_op.cc
@@ -22,7 +22,7 @@ limitations under the License.
#include "tensorflow/compiler/tf2xla/xla_op_kernel.h"
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
#include "tensorflow/compiler/xla/client/xla_client/xla_builder.h"
-#include "tensorflow/compiler/xla/literal_util.h"
+#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/op_kernel.h"
@@ -246,7 +246,7 @@ void XlaWhileOp::Compile(XlaOpKernelContext* ctx) {
}
}
- xla::XlaOp init = builder->Tuple(inputs);
+ xla::XlaOp init = xla::Tuple(builder, inputs);
VLOG(1) << "Building while loop";
@@ -255,22 +255,21 @@ void XlaWhileOp::Compile(XlaOpKernelContext* ctx) {
{
std::unique_ptr<xla::XlaBuilder> cb =
builder->CreateSubBuilder("cond_wrapper");
- auto inputs = cb->Parameter(0, cond_input_shape, "inputs");
- auto outputs = cb->Call(*cond.computation, {inputs});
- cb->GetTupleElement(outputs, 0);
+ auto inputs = xla::Parameter(cb.get(), 0, cond_input_shape, "inputs");
+ auto outputs = xla::Call(cb.get(), *cond.computation, {inputs});
+ xla::GetTupleElement(outputs, 0);
xla::StatusOr<xla::XlaComputation> result = cb->Build();
OP_REQUIRES_OK(ctx, result.status());
cond_wrapper = std::move(result.ValueOrDie());
}
- xla::XlaOp while_result =
- builder->While(cond_wrapper, *body.computation, init);
+ xla::XlaOp while_result = xla::While(cond_wrapper, *body.computation, init);
// Sets non-variable outputs.
for (int i = 0; i < ctx->num_outputs(); ++i) {
if (ctx->input_type(i) != DT_RESOURCE) {
ctx->SetOutput(body.input_mapping[i],
- builder->GetTupleElement(while_result, i));
+ xla::GetTupleElement(while_result, i));
}
}
@@ -284,7 +283,7 @@ void XlaWhileOp::Compile(XlaOpKernelContext* ctx) {
OP_REQUIRES_OK(ctx,
resource->SetFromPack(
arguments[update.input_index].tensor_array_gradients,
- builder->GetTupleElement(while_result, pos), builder));
+ xla::GetTupleElement(while_result, pos), builder));
}
VLOG(2) << "Loop-carried variable: pos: " << update.input_index
<< " name: " << resource->name() << " modified: " << update.modified
diff --git a/tensorflow/compiler/tf2xla/legacy_flags/backend_registration_flags.cc b/tensorflow/compiler/tf2xla/legacy_flags/backend_registration_flags.cc
new file mode 100644
index 0000000000..661505021f
--- /dev/null
+++ b/tensorflow/compiler/tf2xla/legacy_flags/backend_registration_flags.cc
@@ -0,0 +1,63 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+// Legacy flags for the XLA bridge's backend registration modules.
+
+#include <mutex> // NOLINT
+#include <vector>
+
+#include "tensorflow/compiler/tf2xla/legacy_flags/backend_registration_flags.h"
+#include "tensorflow/compiler/xla/legacy_flags/parse_flags_from_env.h"
+#include "tensorflow/core/platform/types.h"
+#include "tensorflow/core/util/command_line_flags.h"
+
+namespace tensorflow {
+namespace legacy_flags {
+
+// Pointers to the parsed value of the flags and flag descriptors, initialized
+// via flags_init.
+static BackendRegistrationFlags* flags;
+static std::vector<Flag>* flag_list;
+static std::once_flag flags_init;
+
+// Allocate *flags. Called via call_once(&flags_init,...).
+static void AllocateFlags() {
+ flags = new BackendRegistrationFlags;
+ flags->tf_enable_prng_ops_gpu = false;
+ flag_list = new std::vector<Flag>({
+ Flag("tf_enable_prng_ops_gpu", &flags->tf_enable_prng_ops_gpu,
+ "Whether to enable PRNG ops: [RandomStandardNormal | RandomUniform "
+ "| RandomUniformInt | TruncatedNormal] on GPU."),
+ });
+ xla::legacy_flags::ParseFlagsFromEnv(*flag_list);
+}
+
+// Append to *append_to flag definitions associated with the XLA bridge's
+// backend registration modules.
+void AppendBackendRegistrationFlags(std::vector<Flag>* append_to) {
+ std::call_once(flags_init, &AllocateFlags);
+ append_to->insert(append_to->end(), flag_list->begin(), flag_list->end());
+}
+
+// Return a pointer to the BackendRegistrationFlags struct;
+// repeated calls return the same pointer.
+// This should be called only after Flags::Parse() has returned.
+BackendRegistrationFlags* GetBackendRegistrationFlags() {
+ std::call_once(flags_init, &AllocateFlags);
+ return flags;
+}
+
+} // namespace legacy_flags
+} // namespace tensorflow
diff --git a/tensorflow/compiler/tf2xla/legacy_flags/backend_registration_flags.h b/tensorflow/compiler/tf2xla/legacy_flags/backend_registration_flags.h
new file mode 100644
index 0000000000..861c923dd5
--- /dev/null
+++ b/tensorflow/compiler/tf2xla/legacy_flags/backend_registration_flags.h
@@ -0,0 +1,49 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#ifndef TENSORFLOW_COMPILER_TF2XLA_LEGACY_FLAGS_BACKEND_REGISTRATION_FLAGS_H_
+#define TENSORFLOW_COMPILER_TF2XLA_LEGACY_FLAGS_BACKEND_REGISTRATION_FLAGS_H_
+
+// Legacy flags for the XLA bridge's backend registration modules.
+
+#include <vector>
+
+#include "tensorflow/core/platform/types.h"
+#include "tensorflow/core/util/command_line_flags.h"
+
+namespace tensorflow {
+namespace legacy_flags {
+
+// Append to *flag_list flag definitions associated with the XLA bridge's
+// backend registration modules.
+void AppendBackendRegistrationFlags(std::vector<tensorflow::Flag>* append_to);
+
+// The values of flags associated with the XLA bridge's backend registration
+// module.
+typedef struct {
+ // Whether to enable RandomUniform op on GPU backend.
+ // TODO (b/32333178): Remove this flag or set its default to true.
+ bool tf_enable_prng_ops_gpu;
+} BackendRegistrationFlags;
+
+// Return a pointer to the BackendRegistrationFlags struct;
+// repeated calls return the same pointer.
+// This should be called only after Flags::Parse() has returned.
+BackendRegistrationFlags* GetBackendRegistrationFlags();
+
+} // namespace legacy_flags
+} // namespace tensorflow
+
+#endif // TENSORFLOW_COMPILER_TF2XLA_LEGACY_FLAGS_BACKEND_REGISTRATION_FLAGS_H_
diff --git a/tensorflow/compiler/tf2xla/lib/BUILD b/tensorflow/compiler/tf2xla/lib/BUILD
index 04c600698c..30039e256a 100644
--- a/tensorflow/compiler/tf2xla/lib/BUILD
+++ b/tensorflow/compiler/tf2xla/lib/BUILD
@@ -40,10 +40,11 @@ cc_library(
":triangular_solve",
":util",
":while_loop",
- "//tensorflow/compiler/xla:literal_util",
+ "//tensorflow/compiler/xla:literal",
"//tensorflow/compiler/xla:shape_util",
"//tensorflow/compiler/xla:status_macros",
"//tensorflow/compiler/xla:statusor",
+ "//tensorflow/compiler/xla/client/lib:constants",
"//tensorflow/compiler/xla/client/xla_client:xla_builder",
"//tensorflow/compiler/xla/client/xla_client:xla_computation",
"//tensorflow/core:lib",
@@ -58,20 +59,43 @@ cc_library(
"//tensorflow/compiler/tf2xla:xla_compiler",
"//tensorflow/compiler/xla:status_macros",
"//tensorflow/compiler/xla:statusor",
- "//tensorflow/compiler/xla/client/lib:arithmetic",
+ "//tensorflow/compiler/xla/client/lib:constants",
+ "//tensorflow/compiler/xla/client/lib:math",
"//tensorflow/compiler/xla/client/xla_client:xla_builder",
"//tensorflow/core:protos_all_cc",
],
)
cc_library(
+ name = "qr",
+ srcs = ["qr.cc"],
+ hdrs = ["qr.h"],
+ deps = [
+ ":batch_dot",
+ ":util",
+ ":while_loop",
+ "//tensorflow/compiler/xla:literal_util",
+ "//tensorflow/compiler/xla:shape_util",
+ "//tensorflow/compiler/xla:status_macros",
+ "//tensorflow/compiler/xla:statusor",
+ "//tensorflow/compiler/xla/client/lib:arithmetic",
+ "//tensorflow/compiler/xla/client/lib:constants",
+ "//tensorflow/compiler/xla/client/lib:math",
+ "//tensorflow/compiler/xla/client/lib:numeric",
+ "//tensorflow/compiler/xla/client/xla_client:xla_builder",
+ "//tensorflow/compiler/xla/client/xla_client:xla_computation",
+ "//tensorflow/core:lib",
+ ],
+)
+
+cc_library(
name = "scatter",
srcs = ["scatter.cc"],
hdrs = ["scatter.h"],
deps = [
":util",
":while_loop",
- "//tensorflow/compiler/xla:literal_util",
+ "//tensorflow/compiler/xla:literal",
"//tensorflow/compiler/xla:shape_util",
"//tensorflow/compiler/xla:status_macros",
"//tensorflow/compiler/xla:statusor",
@@ -90,11 +114,13 @@ cc_library(
deps = [
":batch_dot",
":util",
- "//tensorflow/compiler/xla:literal_util",
+ "//tensorflow/compiler/xla:literal",
"//tensorflow/compiler/xla:shape_util",
"//tensorflow/compiler/xla:status_macros",
"//tensorflow/compiler/xla:statusor",
"//tensorflow/compiler/xla:util",
+ "//tensorflow/compiler/xla/client/lib:constants",
+ "//tensorflow/compiler/xla/client/lib:numeric",
"//tensorflow/compiler/xla/client/xla_client:xla_builder",
"//tensorflow/compiler/xla/client/xla_client:xla_computation",
"//tensorflow/core:lib",
@@ -108,7 +134,7 @@ xla_test(
deps = [
":triangular_solve",
"//tensorflow/compiler/xla:array2d",
- "//tensorflow/compiler/xla:literal_util",
+ "//tensorflow/compiler/xla:literal",
"//tensorflow/compiler/xla:shape_util",
"//tensorflow/compiler/xla:statusor",
"//tensorflow/compiler/xla:test",
@@ -130,6 +156,7 @@ cc_library(
srcs = ["util.cc"],
hdrs = ["util.h"],
deps = [
+ "//tensorflow/compiler/xla:literal",
"//tensorflow/compiler/xla:literal_util",
"//tensorflow/compiler/xla:shape_util",
"//tensorflow/compiler/xla:status_macros",
@@ -148,7 +175,7 @@ xla_test(
":batch_dot",
":util",
"//tensorflow/compiler/xla:array2d",
- "//tensorflow/compiler/xla:literal_util",
+ "//tensorflow/compiler/xla:literal",
"//tensorflow/compiler/xla:shape_util",
"//tensorflow/compiler/xla:statusor",
"//tensorflow/compiler/xla:test",
diff --git a/tensorflow/compiler/tf2xla/lib/batch_dot.cc b/tensorflow/compiler/tf2xla/lib/batch_dot.cc
index ee0bb91a6b..3c4eec081b 100644
--- a/tensorflow/compiler/tf2xla/lib/batch_dot.cc
+++ b/tensorflow/compiler/tf2xla/lib/batch_dot.cc
@@ -18,6 +18,7 @@ limitations under the License.
#include <memory>
#include <vector>
+#include "tensorflow/compiler/xla/client/xla_client/xla_builder.h"
#include "tensorflow/compiler/xla/shape_util.h"
#include "tensorflow/compiler/xla/status_macros.h"
#include "tensorflow/compiler/xla/statusor.h"
@@ -25,91 +26,94 @@ limitations under the License.
namespace tensorflow {
-xla::StatusOr<xla::XlaOp> BatchDot(xla::XlaBuilder* builder, xla::XlaOp x,
- xla::XlaOp y, bool transpose_x,
- bool transpose_y, bool conjugate_x,
- bool conjugate_y) {
- TF_ASSIGN_OR_RETURN(xla::Shape x_shape, builder->GetShape(x));
- TF_ASSIGN_OR_RETURN(xla::Shape y_shape, builder->GetShape(y));
-
- // Check that both tensors have the same number of dimensions. There must be
- // at least two (the batch dimensions can be empty).
- if (xla::ShapeUtil::Rank(x_shape) != xla::ShapeUtil::Rank(y_shape)) {
- return errors::InvalidArgument(
- "Arguments to BatchedDot have different ranks: ",
- xla::ShapeUtil::HumanString(x_shape), " vs. ",
- xla::ShapeUtil::HumanString(y_shape));
- }
- const int ndims = xla::ShapeUtil::Rank(x_shape);
- if (ndims < 2) {
- return errors::InvalidArgument(
- "Arguments to BatchedDot must have rank >= 2: ", ndims);
- }
-
- // The batch dimensions must be equal and the matrix dimensions must be
- // valid.
- std::vector<int64> batch_dimension_numbers;
- for (int i = 0; i < ndims - 2; ++i) {
- if (x_shape.dimensions(i) != y_shape.dimensions(i)) {
+xla::XlaOp BatchDot(xla::XlaOp x, xla::XlaOp y, bool transpose_x,
+ bool transpose_y, bool conjugate_x, bool conjugate_y) {
+ xla::XlaBuilder* builder = x.builder();
+ return builder->ReportErrorOrReturn([&]() -> xla::StatusOr<xla::XlaOp> {
+ TF_ASSIGN_OR_RETURN(xla::Shape x_shape, builder->GetShape(x));
+ TF_ASSIGN_OR_RETURN(xla::Shape y_shape, builder->GetShape(y));
+
+ // Check that both tensors have the same number of dimensions. There must be
+ // at least two (the batch dimensions can be empty).
+ if (xla::ShapeUtil::Rank(x_shape) != xla::ShapeUtil::Rank(y_shape)) {
return errors::InvalidArgument(
- "Dimension ", i, " of inputs to BatchedDot must be equal: ",
- xla::ShapeUtil::HumanString(x_shape), " vs ",
+ "Arguments to BatchedDot have different ranks: ",
+ xla::ShapeUtil::HumanString(x_shape), " vs. ",
xla::ShapeUtil::HumanString(y_shape));
}
- batch_dimension_numbers.push_back(i);
- }
-
- int x_inner_dim = transpose_x ? (ndims - 2) : (ndims - 1);
- int y_inner_dim = transpose_y ? (ndims - 1) : (ndims - 2);
- if (x_shape.dimensions(x_inner_dim) != y_shape.dimensions(y_inner_dim)) {
- return errors::InvalidArgument(
- "Dimensions ", x_inner_dim, " and ", y_inner_dim,
- " of arguments to BatchedDot must be equal: ",
- xla::ShapeUtil::HumanString(x_shape), " transpose: ", transpose_x,
- " vs. ", xla::ShapeUtil::HumanString(y_shape),
- " transpose: ", transpose_y);
- }
-
- // Check for zero lhs/rhs dim size.
- if (xla::ShapeUtil::IsZeroElementArray(x_shape) ||
- xla::ShapeUtil::IsZeroElementArray(y_shape)) {
- std::vector<int64> dimensions(batch_dimension_numbers.size());
- for (int i = 0; i < batch_dimension_numbers.size(); ++i) {
- dimensions[i] = x_shape.dimensions(batch_dimension_numbers[i]);
+ const int ndims = xla::ShapeUtil::Rank(x_shape);
+ if (ndims < 2) {
+ return errors::InvalidArgument(
+ "Arguments to BatchedDot must have rank >= 2: ", ndims);
+ }
+
+ // The batch dimensions must be equal and the matrix dimensions must be
+ // valid.
+ std::vector<int64> batch_dimension_numbers;
+ for (int i = 0; i < ndims - 2; ++i) {
+ if (x_shape.dimensions(i) != y_shape.dimensions(i)) {
+ return errors::InvalidArgument(
+ "Dimension ", i, " of inputs to BatchedDot must be equal: ",
+ xla::ShapeUtil::HumanString(x_shape), " vs ",
+ xla::ShapeUtil::HumanString(y_shape));
+ }
+ batch_dimension_numbers.push_back(i);
+ }
+
+ int x_inner_dim = transpose_x ? (ndims - 2) : (ndims - 1);
+ int y_inner_dim = transpose_y ? (ndims - 1) : (ndims - 2);
+ if (x_shape.dimensions(x_inner_dim) != y_shape.dimensions(y_inner_dim)) {
+ return errors::InvalidArgument(
+ "Dimensions ", x_inner_dim, " and ", y_inner_dim,
+ " of arguments to BatchedDot must be equal: ",
+ xla::ShapeUtil::HumanString(x_shape), " transpose: ", transpose_x,
+ " vs. ", xla::ShapeUtil::HumanString(y_shape),
+ " transpose: ", transpose_y);
+ }
+
+ // Check for zero lhs/rhs dim size.
+ if (xla::ShapeUtil::IsZeroElementArray(x_shape) ||
+ xla::ShapeUtil::IsZeroElementArray(y_shape)) {
+ std::vector<int64> dimensions(batch_dimension_numbers.size());
+ for (int i = 0; i < batch_dimension_numbers.size(); ++i) {
+ dimensions[i] = x_shape.dimensions(batch_dimension_numbers[i]);
+ }
+ int x_outer_dim = transpose_x ? (ndims - 1) : (ndims - 2);
+ int y_outer_dim = transpose_y ? (ndims - 2) : (ndims - 1);
+ dimensions.push_back(x_shape.dimensions(x_outer_dim));
+ dimensions.push_back(y_shape.dimensions(y_outer_dim));
+ return xla::Broadcast(
+ xla::ConstantLiteral(builder,
+ xla::LiteralUtil::Zero(x_shape.element_type())),
+ dimensions);
+ }
+
+ if (x_shape.element_type() == xla::C64 && conjugate_x) {
+ x = xla::Conj(x);
+ }
+ if (y_shape.element_type() == xla::C64 && conjugate_y) {
+ y = xla::Conj(y);
+ }
+
+ // If there are no batch dimensions, use a regular Dot.
+ // TODO(b/69062148) Remove this code when Dot emitters can be passed
+ // dimensions to transpose directly (i.e. without requiring a Transpose
+ // HLO).
+ if (batch_dimension_numbers.empty()) {
+ auto lhs = transpose_x ? xla::Transpose(x, {1, 0}) : x;
+ auto rhs = transpose_y ? xla::Transpose(y, {1, 0}) : y;
+ return xla::Dot(lhs, rhs);
+ }
+
+ xla::DotDimensionNumbers dot_dnums;
+ dot_dnums.add_lhs_contracting_dimensions(x_inner_dim);
+ dot_dnums.add_rhs_contracting_dimensions(y_inner_dim);
+ for (auto batch_dimension_number : batch_dimension_numbers) {
+ dot_dnums.add_lhs_batch_dimensions(batch_dimension_number);
+ dot_dnums.add_rhs_batch_dimensions(batch_dimension_number);
}
- int x_outer_dim = transpose_x ? (ndims - 1) : (ndims - 2);
- int y_outer_dim = transpose_y ? (ndims - 2) : (ndims - 1);
- dimensions.push_back(x_shape.dimensions(x_outer_dim));
- dimensions.push_back(y_shape.dimensions(y_outer_dim));
- return builder->Broadcast(
- builder->ConstantLiteral(xla::Literal::Zero(x_shape.element_type())),
- dimensions);
- }
-
- if (x_shape.element_type() == xla::C64 && conjugate_x) {
- x = builder->Conj(x);
- }
- if (y_shape.element_type() == xla::C64 && conjugate_y) {
- y = builder->Conj(y);
- }
-
- // If there are no batch dimensions, use a regular Dot.
- // TODO(b/69062148) Remove this code when Dot emitters can be passed
- // dimensions to transpose directly (i.e. without requiring a Transpose HLO).
- if (batch_dimension_numbers.empty()) {
- auto lhs = transpose_x ? builder->Transpose(x, {1, 0}) : x;
- auto rhs = transpose_y ? builder->Transpose(y, {1, 0}) : y;
- return builder->Dot(lhs, rhs);
- }
-
- xla::DotDimensionNumbers dot_dnums;
- dot_dnums.add_lhs_contracting_dimensions(x_inner_dim);
- dot_dnums.add_rhs_contracting_dimensions(y_inner_dim);
- for (auto batch_dimension_number : batch_dimension_numbers) {
- dot_dnums.add_lhs_batch_dimensions(batch_dimension_number);
- dot_dnums.add_rhs_batch_dimensions(batch_dimension_number);
- }
- return builder->DotGeneral(x, y, dot_dnums);
+ return xla::DotGeneral(x, y, dot_dnums);
+ });
}
} // namespace tensorflow
diff --git a/tensorflow/compiler/tf2xla/lib/batch_dot.h b/tensorflow/compiler/tf2xla/lib/batch_dot.h
index 1acc72033b..d07a9486f1 100644
--- a/tensorflow/compiler/tf2xla/lib/batch_dot.h
+++ b/tensorflow/compiler/tf2xla/lib/batch_dot.h
@@ -43,10 +43,9 @@ namespace tensorflow {
// It is computed as:
//
// output[..., :, :] = matrix(x[..., :, :]) * matrix(y[..., :, :])
-xla::StatusOr<xla::XlaOp> BatchDot(xla::XlaBuilder* builder, xla::XlaOp x,
- xla::XlaOp y, bool transpose_x,
- bool transpose_y, bool conjugate_x = false,
- bool conjugate_y = false);
+xla::XlaOp BatchDot(xla::XlaOp x, xla::XlaOp y, bool transpose_x = false,
+ bool transpose_y = false, bool conjugate_x = false,
+ bool conjugate_y = false);
} // namespace tensorflow
diff --git a/tensorflow/compiler/tf2xla/lib/cholesky.cc b/tensorflow/compiler/tf2xla/lib/cholesky.cc
index 20925118bf..35b137aa2c 100644
--- a/tensorflow/compiler/tf2xla/lib/cholesky.cc
+++ b/tensorflow/compiler/tf2xla/lib/cholesky.cc
@@ -22,7 +22,9 @@ limitations under the License.
#include "tensorflow/compiler/tf2xla/lib/triangular_solve.h"
#include "tensorflow/compiler/tf2xla/lib/util.h"
#include "tensorflow/compiler/tf2xla/lib/while_loop.h"
-#include "tensorflow/compiler/xla/literal_util.h"
+#include "tensorflow/compiler/xla/client/lib/constants.h"
+#include "tensorflow/compiler/xla/client/xla_client/xla_builder.h"
+#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/compiler/xla/shape_util.h"
#include "tensorflow/compiler/xla/status_macros.h"
#include "tensorflow/compiler/xla/statusor.h"
@@ -47,178 +49,163 @@ namespace {
// l[..., j+1:, j] = (a[..., j+1:, j] - np.dot(l[..., j+1:, :j], row_t)) /
// l[..., j, j]
// return l
-xla::StatusOr<xla::XlaOp> CholeskyUnblocked(xla::XlaBuilder* builder,
- const xla::XlaOp& a) {
- TF_ASSIGN_OR_RETURN(xla::Shape a_shape, builder->GetShape(a));
- const int n_dims = xla::ShapeUtil::Rank(a_shape);
- const int64 n = xla::ShapeUtil::GetDimension(a_shape, -1);
- gtl::ArraySlice<int64> major_dims(xla::AsInt64Slice(a_shape.dimensions()),
- /*pos=*/0,
- /*len=*/n_dims - 2);
-
- xla::XlaOp l = Zeros(builder, a_shape);
-
- // Construct the for loop body to iterate over rows.
- auto body_fn = [&](xla::XlaOp i, gtl::ArraySlice<xla::XlaOp> loop_vars,
- xla::XlaBuilder* body_builder)
- -> xla::StatusOr<std::vector<xla::XlaOp>> {
- xla::Shape col_shape;
- xla::Shape row_shape;
- for (int64 d : major_dims) {
- row_shape.add_dimensions(d);
- col_shape.add_dimensions(d);
- }
- row_shape.add_dimensions(1);
- row_shape.add_dimensions(n);
- row_shape.set_element_type(a_shape.element_type());
- auto mask_zeros_row = Zeros(body_builder, row_shape);
-
- col_shape.add_dimensions(n);
- col_shape.add_dimensions(1);
- col_shape.set_element_type(a_shape.element_type());
- auto mask_zeros_col = Zeros(body_builder, col_shape);
-
- std::vector<int32> mask_vector(n);
- std::iota(mask_vector.begin(), mask_vector.end(), 0);
- auto mask_range = body_builder->ConstantR1<int32>(mask_vector);
- auto mask_range_row = body_builder->Broadcast(
- body_builder->Reshape(mask_range, {0}, {1, n}), major_dims);
- auto mask_range_col = body_builder->Broadcast(
- body_builder->Reshape(mask_range, {0}, {n, 1}), major_dims);
- auto body_a = loop_vars[0];
- auto body_l = loop_vars[1];
-
- // row = l[..., i, :i]
- // select the whole i-th row, then mask out all columns past i-1
- auto zero = body_builder->ConstantR0<int32>(0);
- TF_ASSIGN_OR_RETURN(auto l_i, DynamicSliceInMinorDims(body_builder, body_l,
- {i, zero}, {1, n}));
- auto row = body_builder->Select(body_builder->Ge(mask_range_row, i),
- mask_zeros_row, l_i);
- // a[..., i, i]
- TF_ASSIGN_OR_RETURN(auto a_ii, DynamicSliceInMinorDims(body_builder, body_a,
- {i, i}, {1, 1}));
- // np.dot(row, np.swapaxes(row, -1, -2))
- xla::XlaOp diag_dot;
- TF_ASSIGN_OR_RETURN(diag_dot, BatchDot(body_builder, row, row,
- /*transpose_x=*/false,
- /*transpose_y=*/true));
- // l[..., i, i] = np.sqrt(a[..., i, i] - np.dot(row,
- // np.swapaxes(row, -1, -2)))
- auto l_ii = body_builder->Pow(
- body_builder->Sub(a_ii, diag_dot),
- FloatLiteral(body_builder, a_shape.element_type(), 0.5));
-
- // a[..., i+1:, i]
- // select the whole i-th column, then mask out all rows above i+1
+xla::XlaOp CholeskyUnblocked(xla::XlaOp a) {
+ xla::XlaBuilder* builder = a.builder();
+ return builder->ReportErrorOrReturn([&]() -> xla::StatusOr<xla::XlaOp> {
+ TF_ASSIGN_OR_RETURN(xla::Shape a_shape, builder->GetShape(a));
+ const int n_dims = xla::ShapeUtil::Rank(a_shape);
+ const int64 n = xla::ShapeUtil::GetDimension(a_shape, -1);
+ gtl::ArraySlice<int64> major_dims(xla::AsInt64Slice(a_shape.dimensions()),
+ /*pos=*/0,
+ /*len=*/n_dims - 2);
+
+ xla::XlaOp l = xla::ZerosLike(a);
+
+ // Construct the for loop body to iterate over rows.
+ auto body_fn = [&](xla::XlaOp i, gtl::ArraySlice<xla::XlaOp> loop_vars,
+ xla::XlaBuilder* body_builder)
+ -> xla::StatusOr<std::vector<xla::XlaOp>> {
+ xla::Shape col_shape;
+ xla::Shape row_shape;
+ for (int64 d : major_dims) {
+ row_shape.add_dimensions(d);
+ col_shape.add_dimensions(d);
+ }
+ row_shape.add_dimensions(1);
+ row_shape.add_dimensions(n);
+ row_shape.set_element_type(a_shape.element_type());
+ auto mask_zeros_row = xla::Zeros(body_builder, row_shape);
+
+ col_shape.add_dimensions(n);
+ col_shape.add_dimensions(1);
+ col_shape.set_element_type(a_shape.element_type());
+ auto mask_zeros_col = xla::Zeros(body_builder, col_shape);
+
+ std::vector<int32> mask_vector(n);
+ std::iota(mask_vector.begin(), mask_vector.end(), 0);
+ auto mask_range = xla::ConstantR1<int32>(body_builder, mask_vector);
+ auto mask_range_row =
+ xla::Broadcast(xla::Reshape(mask_range, {0}, {1, n}), major_dims);
+ auto mask_range_col =
+ xla::Broadcast(xla::Reshape(mask_range, {0}, {n, 1}), major_dims);
+ auto body_a = loop_vars[0];
+ auto body_l = loop_vars[1];
+
+ // row = l[..., i, :i]
+ // select the whole i-th row, then mask out all columns past i-1
+ auto zero = xla::ConstantR0<int32>(body_builder, 0);
+ auto l_i = DynamicSliceInMinorDims(body_l, {i, zero}, {1, n});
+ auto row = xla::Select(xla::Ge(mask_range_row, i), mask_zeros_row, l_i);
+ // a[..., i, i]
+ auto a_ii = DynamicSliceInMinorDims(body_a, {i, i}, {1, 1});
+ // np.dot(row, np.swapaxes(row, -1, -2))
+ auto diag_dot = BatchDot(row, row,
+ /*transpose_x=*/false,
+ /*transpose_y=*/true);
+ // l[..., i, i] = np.sqrt(a[..., i, i] - np.dot(row,
+ // np.swapaxes(row, -1, -2)))
+ auto l_ii =
+ xla::Pow(a_ii - diag_dot,
+ FloatLiteral(body_builder, a_shape.element_type(), 0.5));
+
+ // a[..., i+1:, i]
+ // select the whole i-th column, then mask out all rows above i+1
+ auto a_0i = DynamicSliceInMinorDims(body_a, {i}, {1});
+ auto a_ip1i =
+ xla::Select(xla::Le(mask_range_col, i), mask_zeros_col, a_0i);
+
+ // l[..., i+1:, i] = (a[..., i+1:, i] - np.dot(l[..., i+1:, :i], r.T)) /
+ // l[..., i, i]
+ // The columns in [i, n] are zeroed out in `row`, so we just have to
+ // zero out rows above i+1 after the BatchDot. np.dot(l[..., :, :i],
+ // r.T)
+ auto dot = BatchDot(body_l, row,
+ /*transpose_x=*/false,
+ /*transpose_y=*/true);
+ // np.dot(l[..., i+1:, :i], r.T)
+ auto dot_ip1 =
+ xla::Select(xla::Le(mask_range_col, i), mask_zeros_col, dot);
+
+ body_l =
+ DynamicUpdateSliceInMinorDims(body_l, (a_ip1i - dot_ip1) / l_ii, {i});
+ // Assign the diagonal after the rest of the column because otherwise the
+ // column assign will wrap around and overwrite the diagonal assign.
+ body_l = DynamicUpdateSliceInMinorDims(body_l, l_ii, {i, i});
+
+ return std::vector<xla::XlaOp>{body_a, body_l};
+ };
+
TF_ASSIGN_OR_RETURN(
- auto a_0i, DynamicSliceInMinorDims(body_builder, body_a, {i}, {1}));
- auto a_ip1i = body_builder->Select(body_builder->Le(mask_range_col, i),
- mask_zeros_col, a_0i);
-
- // l[..., i+1:, i] = (a[..., i+1:, i] - np.dot(l[..., i+1:, :i], r.T)) /
- // l[..., i, i]
- // The columns in [i, n] are zeroed out in `row`, so we just have to
- // zero out rows above i+1 after the BatchDot. np.dot(l[..., :, :i],
- // r.T)
- TF_ASSIGN_OR_RETURN(auto dot, BatchDot(body_builder, body_l, row,
- /*transpose_x=*/false,
- /*transpose_y=*/true));
- // np.dot(l[..., i+1:, :i], r.T)
- auto dot_ip1 = body_builder->Select(body_builder->Le(mask_range_col, i),
- mask_zeros_col, dot);
-
- auto col_update =
- body_builder->Div(body_builder->Sub(a_ip1i, dot_ip1), l_ii);
- TF_ASSIGN_OR_RETURN(body_l, DynamicUpdateSliceInMinorDims(
- body_builder, body_l, col_update, {i}));
- // Assign the diagonal after the rest of the column because otherwise the
- // column assign will wrap around and overwrite the diagonal assign.
- TF_ASSIGN_OR_RETURN(body_l, DynamicUpdateSliceInMinorDims(
- body_builder, body_l, l_ii, {i, i}));
-
- return std::vector<xla::XlaOp>{body_a, body_l};
- };
-
- TF_ASSIGN_OR_RETURN(
- auto cholesky_while,
- XlaForEachIndex(n, xla::S32, body_fn, {a, l}, "unblocked", builder));
-
- return cholesky_while[1];
+ auto cholesky_while,
+ XlaForEachIndex(n, xla::S32, body_fn, {a, l}, "unblocked", builder));
+
+ return cholesky_while[1];
+ });
}
} // namespace
-xla::StatusOr<xla::XlaOp> Cholesky(xla::XlaBuilder* builder, xla::XlaOp a,
- int64 block_size) {
- TF_ASSIGN_OR_RETURN(xla::Shape a_shape, builder->GetShape(a));
- const int ndims = xla::ShapeUtil::Rank(a_shape);
- if (ndims < 2) {
- return errors::InvalidArgument(
- "Arguments to Cholesky must have rank >= 2: ", ndims);
- }
-
- const int64 n = xla::ShapeUtil::GetDimension(a_shape, -1);
- if (n != xla::ShapeUtil::GetDimension(a_shape, -2)) {
- return errors::InvalidArgument(
- "Arguments to Cholesky must be square matrices: ",
- xla::ShapeUtil::HumanString(a_shape));
- }
-
- if (block_size < 1) {
- return errors::InvalidArgument(
- "block_size argument to Cholesky must be >= 1; got ", block_size);
- }
-
- // Blocked left-looking Cholesky factorization.
- // Algorithm 1 from
- // Haidar, Azzam, et al. "High-performance Cholesky factorization for GPU-only
- // execution." Proceedings of General Purpose GPUs. ACM, 2017.
- xla::XlaOp l = Zeros(builder, a_shape);
- for (int64 i = 0; i < n; i += block_size) {
- int64 k = std::min(block_size, n - i);
- if (i > 0) {
- // TODO(phawkins): consider implementing SYRK for the diagonal part of
- // the panel.
- // a[i:, i:i+k] -= np.dot(l[i:, :i], np.transpose(l[i:i+k, :i]))
- TF_ASSIGN_OR_RETURN(auto lhs,
- SliceInMinorDims(builder, l, {i, 0}, {n, i}));
- TF_ASSIGN_OR_RETURN(auto rhs,
- SliceInMinorDims(builder, l, {i, 0}, {i + k, i}));
- TF_ASSIGN_OR_RETURN(auto delta,
- BatchDot(builder, lhs, rhs, /*transpose_x=*/false,
- /*transpose_y=*/true, /*conjugate_x=*/false,
- /*conjugate_y=*/false));
- TF_ASSIGN_OR_RETURN(auto before,
- SliceInMinorDims(builder, a, {i, i}, {n, i + k}));
- TF_ASSIGN_OR_RETURN(
- a, UpdateSliceInMinorDims(builder, a, builder->Sub(before, delta),
- {i, i}));
+xla::XlaOp Cholesky(xla::XlaOp a, int64 block_size) {
+ xla::XlaBuilder* builder = a.builder();
+ return builder->ReportErrorOrReturn([&]() -> xla::StatusOr<xla::XlaOp> {
+ TF_ASSIGN_OR_RETURN(xla::Shape a_shape, builder->GetShape(a));
+ const int ndims = xla::ShapeUtil::Rank(a_shape);
+ if (ndims < 2) {
+ return errors::InvalidArgument(
+ "Arguments to Cholesky must have rank >= 2: ", ndims);
+ }
+
+ const int64 n = xla::ShapeUtil::GetDimension(a_shape, -1);
+ if (n != xla::ShapeUtil::GetDimension(a_shape, -2)) {
+ return errors::InvalidArgument(
+ "Arguments to Cholesky must be square matrices: ",
+ xla::ShapeUtil::HumanString(a_shape));
+ }
+
+ if (block_size < 1) {
+ return errors::InvalidArgument(
+ "block_size argument to Cholesky must be >= 1; got ", block_size);
}
- // l[i:i+k, i:i+k] = cholesky_unblocked(a[i:i+k, i:i+k])
- TF_ASSIGN_OR_RETURN(auto x,
- SliceInMinorDims(builder, a, {i, i}, {i + k, i + k}));
- TF_ASSIGN_OR_RETURN(auto factorized, CholeskyUnblocked(builder, x));
- TF_ASSIGN_OR_RETURN(l,
- UpdateSliceInMinorDims(builder, l, factorized, {i, i}));
-
- if (i + k < n) {
- // l[i+k:, i:i+k] = trsm_right_transpose(l[i:i+k, i:i+k], a[i+k:, i:i+k])
- TF_ASSIGN_OR_RETURN(auto panel,
- SliceInMinorDims(builder, a, {i + k, i}, {n, i + k}));
- TF_ASSIGN_OR_RETURN(auto update,
- TriangularSolve(builder, factorized, panel,
- /*left_side=*/false,
- /*lower=*/true,
- /*transpose_a=*/true,
- /*conjugate_a=*/false,
- /*block_size=*/block_size));
- TF_ASSIGN_OR_RETURN(
- l, UpdateSliceInMinorDims(builder, l, update, {i + k, i}));
+ // Blocked left-looking Cholesky factorization.
+ // Algorithm 1 from
+ // Haidar, Azzam, et al. "High-performance Cholesky factorization for
+ // GPU-only execution." Proceedings of General Purpose GPUs. ACM, 2017.
+ xla::XlaOp l = xla::ZerosLike(a);
+ for (int64 i = 0; i < n; i += block_size) {
+ int64 k = std::min(block_size, n - i);
+ if (i > 0) {
+ // TODO(phawkins): consider implementing SYRK for the diagonal part of
+ // the panel.
+ // a[i:, i:i+k] -= np.dot(l[i:, :i], np.transpose(l[i:i+k, :i]))
+ auto lhs = SliceInMinorDims(l, {i, 0}, {n, i});
+ auto rhs = SliceInMinorDims(l, {i, 0}, {i + k, i});
+ auto delta = BatchDot(lhs, rhs, /*transpose_x=*/false,
+ /*transpose_y=*/true);
+ auto before = SliceInMinorDims(a, {i, i}, {n, i + k});
+ a = UpdateSliceInMinorDims(a, before - delta, {i, i});
+ }
+
+ // l[i:i+k, i:i+k] = cholesky_unblocked(a[i:i+k, i:i+k])
+ auto x = SliceInMinorDims(a, {i, i}, {i + k, i + k});
+ auto factorized = CholeskyUnblocked(x);
+ l = UpdateSliceInMinorDims(l, factorized, {i, i});
+
+ if (i + k < n) {
+ // l[i+k:, i:i+k] =
+ // trsm_right_transpose(l[i:i+k, i:i+k], a[i+k:, i:i+k])
+ auto panel = SliceInMinorDims(a, {i + k, i}, {n, i + k});
+ auto update = TriangularSolve(factorized, panel,
+ /*left_side=*/false,
+ /*lower=*/true,
+ /*transpose_a=*/true,
+ /*conjugate_a=*/false,
+ /*block_size=*/block_size);
+ l = UpdateSliceInMinorDims(l, update, {i + k, i});
+ }
}
- }
- return l;
+ return l;
+ });
}
} // namespace tensorflow
diff --git a/tensorflow/compiler/tf2xla/lib/cholesky.h b/tensorflow/compiler/tf2xla/lib/cholesky.h
index 20fca7969e..0f6e0e9d15 100644
--- a/tensorflow/compiler/tf2xla/lib/cholesky.h
+++ b/tensorflow/compiler/tf2xla/lib/cholesky.h
@@ -30,8 +30,7 @@ namespace tensorflow {
// TODO(phawkins): check for negative values on the diagonal and return an
// error, instead of silently yielding NaNs.
// TODO(znado): handle the complex Hermitian case
-xla::StatusOr<xla::XlaOp> Cholesky(xla::XlaBuilder* builder, xla::XlaOp a,
- int64 block_size = 256);
+xla::XlaOp Cholesky(xla::XlaOp a, int64 block_size = 256);
} // namespace tensorflow
diff --git a/tensorflow/compiler/tf2xla/lib/qr.cc b/tensorflow/compiler/tf2xla/lib/qr.cc
new file mode 100644
index 0000000000..9c8ac7af25
--- /dev/null
+++ b/tensorflow/compiler/tf2xla/lib/qr.cc
@@ -0,0 +1,387 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#include "tensorflow/compiler/tf2xla/lib/qr.h"
+
+#include <memory>
+#include <vector>
+
+#include "tensorflow/compiler/tf2xla/lib/batch_dot.h"
+#include "tensorflow/compiler/tf2xla/lib/util.h"
+#include "tensorflow/compiler/tf2xla/lib/while_loop.h"
+#include "tensorflow/compiler/xla/client/lib/arithmetic.h"
+#include "tensorflow/compiler/xla/client/lib/constants.h"
+#include "tensorflow/compiler/xla/client/lib/math.h"
+#include "tensorflow/compiler/xla/client/lib/numeric.h"
+#include "tensorflow/compiler/xla/client/xla_client/xla_builder.h"
+#include "tensorflow/compiler/xla/literal_util.h"
+#include "tensorflow/compiler/xla/shape_util.h"
+#include "tensorflow/compiler/xla/status_macros.h"
+#include "tensorflow/compiler/xla/statusor.h"
+#include "tensorflow/core/lib/core/errors.h"
+
+namespace tensorflow {
+
+namespace {
+
+// Computes a Householder reflection of the form:
+// H = I - tau v v.T.
+// such that
+// H . ( x1 ) = ( x1 )
+// ( x2 ) = ( x2 )
+// ( ... ) = ( ... )
+// ( xk ) = ( beta )
+// ( ... ) ( 0 )
+// ( ... ) ( 0 )
+// Unlike the usual formulation, we allow the caller to supply 'k' rather than
+// only providing the relevant part of 'x' to maintain XLA's static shape
+// invariant. In addition, the implementation supports batching.
+// Pseudo-code, without batching:
+// alpha = x[k]
+// x_copy = np.copy(x)
+// x_copy[:k+1] = 0
+// xnorm = norm2(x_copy)
+// if xnorm == 0:
+// beta = alpha
+// tau = 0
+// v = np.zeros_like(x)
+// else:
+// beta = - np.sign(alpha) * dlapy2(alpha, xnorm)
+// tau = (beta - alpha) / beta
+// v = x / (alpha - beta)
+// v[k] = 1
+// return (v, tau, beta)
+// TODO(phawkins): LAPACK's xLARFG implementation has code for handling
+// overflows in the norm/beta calculations. Perhaps do the same here.
+xla::Status House(xla::XlaOp x, xla::XlaOp k, gtl::ArraySlice<int64> batch_dims,
+ const int64 m, xla::XlaOp* v, xla::XlaOp* tau,
+ xla::XlaOp* beta) {
+ xla::XlaBuilder* const builder = x.builder();
+ TF_ASSIGN_OR_RETURN(xla::Shape x_shape, builder->GetShape(x));
+ const xla::PrimitiveType type = x_shape.element_type();
+
+ std::vector<int64> batch_dim_ids(batch_dims.size());
+ std::iota(batch_dim_ids.begin(), batch_dim_ids.end(), 0);
+ const int64 minor_dim = batch_dims.size();
+
+ xla::XlaOp zero = xla::ScalarLike(x, 0.0);
+ xla::XlaOp one = xla::ScalarLike(x, 1.0);
+
+ // alpha = x[k]
+ xla::XlaOp alpha =
+ xla::Reshape(DynamicSliceInMinorDims(x, {k}, {1}), batch_dims);
+
+ // Compute x[k+1:] (padded with zeros in elements 0..k)
+ xla::XlaOp iota = xla::Iota(builder, xla::S32, m);
+ xla::XlaOp x_after_k =
+ xla::Mul(x, xla::ConvertElementType(xla::Gt(iota, k), type),
+ /*broadcast_dimensions=*/{minor_dim});
+
+ // sigma = np.dot(x[k+1:], x[k+1:])
+ auto sigma =
+ xla::Reduce(x_after_k * x_after_k, zero,
+ xla::CreateScalarAddComputation(type, builder), {minor_dim});
+ // mu = np.sqrt(x[k]*x[k] + sigma)
+ auto mu = xla::Sqrt(xla::Square(alpha) + sigma);
+
+ auto sigma_is_zero = xla::Eq(sigma, zero);
+
+ *beta = xla::Select(sigma_is_zero, alpha, -xla::Sign(alpha) * mu);
+ *tau = xla::Select(sigma_is_zero, xla::Broadcast(zero, batch_dims),
+ (*beta - alpha) / *beta);
+ auto divisor = xla::Select(sigma_is_zero, xla::Broadcast(one, batch_dims),
+ alpha - *beta);
+
+ auto e_k = xla::Broadcast(xla::ConvertElementType(xla::Eq(iota, k), type),
+ std::vector<int64>(batch_dims.size(), 1));
+
+ // Form v as [0, 0, ..., 1] ++ x[k+1:] / divisor
+ // If sigma is zero, x[k+1:] is zero, so use any non-zero divisor.
+ *v = e_k +
+ xla::Div(x_after_k, divisor, /*broadcast_dimensions=*/batch_dim_ids);
+ return Status::OK();
+}
+
+// Householder QR decomposition. Algorithm 5.2.1 from Golub and Van
+// Loan "Matrix Computations", 4th Edition. This is an unblocked implementation
+// used as an inner routine of the blocked implementation.
+// Algorithm is adapted slightly so the shapes inside the loop are static, at
+// the cost of some redundant computation. Since this is used as an inner block
+// kernel, accumulates the Householder transformations (vs, taus) rather than
+// the matrix q.
+// Equivalent Python code, without batching:
+// def qr(a):
+// m = a.shape[0]
+// n = a.shape[1]
+// vs = np.zeros([m, n])
+// taus = np.zeros([n])
+// for j in xrange(min(m, n)):
+// v, tau, beta = house(a[:, j], j)
+// # Unusually, we apply the Householder transformation to the entirety of
+// # a, wasting FLOPs to maintain the static shape invariant that XLA
+// # requires. For columns that precede j this has no effect.
+// a[:, :] -= tau * np.dot(v[:, np.newaxis],
+// np.dot(v[np.newaxis, :], a[:, :]))
+// # Form column j explicitly rather than relying on the precision of the
+// # Householder update.
+// a[j, j] = beta
+// a[j+1:, j] = np.zeros([m - j - 1], dtype=a.dtype)
+// vs[:, j] = v
+// taus[j] = tau
+// return (q, vs, taus)
+struct QRBlockResult {
+ // The factored R value
+ xla::XlaOp r;
+
+ // Representation of the Householder matrices I - beta v v.T
+ xla::XlaOp taus; // Shape: [..., n]
+ xla::XlaOp vs; // Shape: [..., m, n]
+};
+xla::StatusOr<QRBlockResult> QRBlock(xla::XlaOp a) {
+ xla::XlaBuilder* builder = a.builder();
+ TF_ASSIGN_OR_RETURN(xla::Shape a_shape, builder->GetShape(a));
+ const int num_dims = xla::ShapeUtil::Rank(a_shape);
+ if (num_dims < 2) {
+ return errors::InvalidArgument("Arguments to QR must have rank >= 2: ",
+ num_dims);
+ }
+ xla::PrimitiveType type = a_shape.element_type();
+
+ const int64 m = xla::ShapeUtil::GetDimension(a_shape, -2);
+ const int64 n = xla::ShapeUtil::GetDimension(a_shape, -1);
+
+ const int64 num_batch_dims = num_dims - 2;
+ std::vector<int64> batch_dims(num_batch_dims);
+ for (int i = 0; i < num_batch_dims; ++i) {
+ batch_dims[i] = xla::ShapeUtil::GetDimension(a_shape, i);
+ }
+
+ std::vector<int64> batch_dim_indices(num_batch_dims);
+ std::iota(batch_dim_indices.begin(), batch_dim_indices.end(), 0);
+
+ auto qr_body_fn =
+ [&](xla::XlaOp j, gtl::ArraySlice<xla::XlaOp> values,
+ xla::XlaBuilder* builder) -> xla::StatusOr<std::vector<xla::XlaOp>> {
+ auto a = values[0];
+ auto vs = values[1];
+ auto taus = values[2];
+
+ // v, beta = house(a[:, j], j)
+ auto x = DynamicSliceInMinorDims(a, {j}, {1});
+ xla::XlaOp v, tau, beta;
+ TF_RETURN_IF_ERROR(House(xla::Collapse(x, {num_dims - 2, num_dims - 1}), j,
+ batch_dims, m, &v, &tau, &beta));
+
+ std::vector<int64> shape = batch_dims;
+ shape.push_back(1);
+ shape.push_back(m);
+ auto v_broadcast = xla::Reshape(v, shape);
+ // a[:, :] -= tau * np.dot(v[:, np.newaxis],
+ // np.dot(v[np.newaxis, :], a[:, :]))
+ auto vva = BatchDot(v_broadcast, a);
+ vva = BatchDot(v_broadcast, vva, /*transpose_x=*/true);
+ a = a - xla::Mul(tau, vva,
+ /*broadcast_dimensions=*/batch_dim_indices);
+
+ // It is more precise to populate column 'k' explicitly, rather than
+ // computing it implicitly by applying the Householder transformation.
+ // a[k,k] = beta
+ // a[k+1:,k] = np.zeros([m-k-1], dtype=a.dtype)
+ auto iota = xla::Reshape(xla::Iota(a.builder(), xla::S32, m), {m, 1});
+ auto predecessor_mask = xla::ConvertElementType(xla::Lt(iota, j), type);
+ auto mask = xla::Broadcast(xla::ConvertElementType(xla::Eq(iota, j), type),
+ std::vector<int64>(batch_dims.size(), 1));
+ auto new_x =
+ xla::Mul(x, predecessor_mask,
+ /*broadcast_dimensions=*/{num_dims - 2, num_dims - 1}) +
+ xla::Mul(beta, mask, /*broadcast_dimensions=*/batch_dim_indices);
+ a = DynamicUpdateSliceInMinorDims(a, new_x, {j});
+
+ // vs[:, j] = v
+ vs = DynamicUpdateSliceInMinorDims(
+ vs, xla::Reshape(v, ConcatVectors(batch_dims, {m, 1})), {j});
+ // taus[j] = tau
+ taus = DynamicUpdateSliceInMinorDims(
+ taus, xla::Reshape(tau, ConcatVectors(batch_dims, {1})), {j});
+ return std::vector<xla::XlaOp>{a, vs, taus};
+ };
+
+ auto vs = xla::Zeros(builder, xla::ShapeUtil::MakeShape(
+ type, ConcatVectors(batch_dims, {m, n})));
+ auto taus = xla::Zeros(
+ builder, xla::ShapeUtil::MakeShape(type, ConcatVectors(batch_dims, {n})));
+
+ TF_ASSIGN_OR_RETURN(auto values,
+ XlaForEachIndex(std::min(m, n), xla::S32, qr_body_fn,
+ {a, vs, taus}, "qr", builder));
+
+ QRBlockResult result;
+ result.r = values[0];
+ result.vs = values[1];
+ result.taus = values[2];
+ return result;
+}
+
+// Computes W and Y such that I-WY is equivalent to the sequence of Householder
+// transformations given by vs and taus.
+// Golub and van Loan, "Matrix Computations", algorithm 5.1.2.
+// Y = np.zeros([m, n])
+// W = np.zeros([m, n])
+// Y[:, 0] = vs[:, 0]
+// W[:, 0] = -taus[0] * vs[:, 0]
+// for j in xrange(1, n):
+// v = vs[:, j]
+// z = -taus[j] * v - taus[j] * np.dot(W, np.dot(Y.T, v))
+// W[:, j] = z
+// Y[:, j] = v
+// return W
+// There is no need to return Y since at termination of the loop it is equal to
+// vs.
+xla::StatusOr<xla::XlaOp> ComputeWYRepresentation(
+ xla::PrimitiveType type, gtl::ArraySlice<int64> batch_dims, xla::XlaOp vs,
+ xla::XlaOp taus, int64 m, int64 n) {
+ std::vector<int64> batch_dim_indices(batch_dims.size());
+ std::iota(batch_dim_indices.begin(), batch_dim_indices.end(), 0);
+ int64 n_index = batch_dims.size() + 1;
+
+ auto body_fn =
+ [&](xla::XlaOp j, gtl::ArraySlice<xla::XlaOp> values,
+ xla::XlaBuilder* builder) -> xla::StatusOr<std::vector<xla::XlaOp>> {
+ auto w = values[0];
+ auto y = values[1];
+ const auto vs = values[2];
+ const auto taus = values[3];
+
+ // Want j values in range [1, ... n).
+ j = j + xla::ConstantR0<int32>(builder, 1);
+ // vs has shape [..., m, 1]
+ auto v = DynamicSliceInMinorDims(vs, {j}, {1});
+ // beta has shape [..., 1]
+ auto beta = DynamicSliceInMinorDims(taus, {j}, {1});
+
+ // yv has shape [..., n, 1]
+ auto yv = BatchDot(y, v, /*transpose_x=*/true);
+ // wyv has shape [..., m, 1]
+ auto wyv = BatchDot(w, yv);
+
+ auto z = xla::Mul(
+ -beta, v + wyv,
+ /*broadcast_dimensions=*/ConcatVectors(batch_dim_indices, {n_index}));
+
+ w = DynamicUpdateSliceInMinorDims(w, z, {j});
+ y = DynamicUpdateSliceInMinorDims(y, v, {j});
+
+ return std::vector<xla::XlaOp>{w, y, vs, taus};
+ };
+
+ xla::XlaBuilder* builder = vs.builder();
+ auto w = xla::Zeros(builder, xla::ShapeUtil::MakeShape(
+ type, ConcatVectors(batch_dims, {m, n})));
+ auto y = w;
+ auto v = SliceInMinorDims(vs, {0}, {1});
+ auto beta = SliceInMinorDims(taus, {0}, {1});
+ y = UpdateSliceInMinorDims(y, v, {0});
+ auto bv = xla::Mul(
+ -beta, v,
+ /*broadcast_dimensions=*/ConcatVectors(batch_dim_indices, {n_index}));
+ w = UpdateSliceInMinorDims(w, bv, {0});
+
+ TF_ASSIGN_OR_RETURN(
+ auto values, XlaForEachIndex(n - 1, xla::S32, body_fn, {w, y, vs, taus},
+ "wy", builder));
+ return values[0];
+}
+
+} // namespace
+
+// Block Householder QR Factorization. Algorithm 5.2.2 of Golub and van Loan.
+// def qr_blocked(a, block_size):
+// m = a.shape[0]
+// n = a.shape[1]
+// q = np.eye(m)
+// for i in xrange(0, min(m, n), block_size):
+// k = min(block_size, min(m, n) - s)
+// (a, vs, taus) = qr(a[i:, i:i+k])
+// y = vs
+// w = ComputeWYRepresentation(vs, taus, m-i, k)
+// a[i:, i+r:] += np.dot(y, np.dot(w.T, a[i:, i+k:]))
+// q[:, i:] += np.dot(q[:, i:], np.dot(w, y.T))
+// return (q, a)
+// TODO(phawkins): consider using UT transformations (in the form I - V U V')
+// rather than WY transformations.
+xla::StatusOr<QRDecompositionResult> QRDecomposition(xla::XlaOp a,
+ int64 block_size) {
+ xla::XlaBuilder* builder = a.builder();
+ TF_ASSIGN_OR_RETURN(xla::Shape a_shape, builder->GetShape(a));
+ const int num_dims = xla::ShapeUtil::Rank(a_shape);
+ if (num_dims < 2) {
+ return errors::InvalidArgument("Arguments to QR must have rank >= 2: ",
+ num_dims);
+ }
+ xla::PrimitiveType type = a_shape.element_type();
+
+ const int64 m = xla::ShapeUtil::GetDimension(a_shape, -2);
+ const int64 n = xla::ShapeUtil::GetDimension(a_shape, -1);
+ const int64 p = std::min(m, n);
+
+ if (block_size < 1) {
+ return errors::InvalidArgument(
+ "block_size argument to QR must be >= 1; got ", block_size);
+ }
+
+ const int64 num_batch_dims = num_dims - 2;
+ std::vector<int64> batch_dims(num_batch_dims);
+ for (int i = 0; i < num_batch_dims; ++i) {
+ batch_dims[i] = xla::ShapeUtil::GetDimension(a_shape, i);
+ }
+
+ auto q = xla::Broadcast(xla::IdentityMatrix(builder, type, m, m), batch_dims);
+ for (int64 i = 0; i < p; i += block_size) {
+ int64 k = std::min(block_size, p - i);
+
+ auto a_block = SliceInMinorDims(a, {i, i}, {m, i + k});
+ TF_ASSIGN_OR_RETURN(auto qr_block, QRBlock(a_block));
+
+ a = UpdateSliceInMinorDims(a, qr_block.r, {i, i});
+
+ // Compute the I-WY block representation of a product of Householder
+ // matrices.
+ TF_ASSIGN_OR_RETURN(auto w,
+ ComputeWYRepresentation(type, batch_dims, qr_block.vs,
+ qr_block.taus, m - i, k));
+ auto y = qr_block.vs;
+
+ // a[i:, i+k:] += np.dot(Y, np.dot(W.T, a[i:, i+k:]))
+ auto a_panel = SliceInMinorDims(a, {i, i + k}, {m, n});
+ auto a_update = BatchDot(w, a_panel, /*transpose_x=*/true);
+ a_update = BatchDot(y, a_update);
+ a_panel = a_panel + a_update;
+ a = UpdateSliceInMinorDims(a, a_panel, {i, i + k});
+
+ // q[:, i:] += np.dot(np.dot(q[:, i:], W), Y.T))
+ auto q_panel = SliceInMinorDims(q, {0, i}, {m, m});
+ auto q_update = BatchDot(q_panel, w);
+ q_update =
+ BatchDot(q_update, y, /*transpose_x=*/false, /*transpose_y=*/true);
+ q_panel = q_panel + q_update;
+ q = UpdateSliceInMinorDims(q, q_panel, {0, i});
+ }
+ QRDecompositionResult result;
+ result.q = q;
+ result.r = a;
+ return result;
+}
+
+} // namespace tensorflow
diff --git a/tensorflow/compiler/tf2xla/lib/qr.h b/tensorflow/compiler/tf2xla/lib/qr.h
new file mode 100644
index 0000000000..3aa6a9b075
--- /dev/null
+++ b/tensorflow/compiler/tf2xla/lib/qr.h
@@ -0,0 +1,40 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#ifndef TENSORFLOW_COMPILER_TF2XLA_LIB_QR_H_
+#define TENSORFLOW_COMPILER_TF2XLA_LIB_QR_H_
+
+#include "tensorflow/compiler/xla/client/xla_client/xla_builder.h"
+
+namespace tensorflow {
+
+// Computes the QR decompositions of a batch of matrices. That is,
+// given a (batched) matrix a, computes an orthonormal matrix Q and an
+// upper-triangular matrix R such that a = QR.
+// `a` must be a (batched) matrix of size [..., m, n].
+// The algorithm implements a blocked QR decomposition; `block_size` is
+// the block size to use.
+// TODO(phawkins): handle the complex case.
+struct QRDecompositionResult {
+ xla::XlaOp q;
+ xla::XlaOp r;
+};
+
+xla::StatusOr<QRDecompositionResult> QRDecomposition(xla::XlaOp a,
+ int64 block_size = 128);
+
+} // namespace tensorflow
+
+#endif // TENSORFLOW_COMPILER_TF2XLA_LIB_QR_H_
diff --git a/tensorflow/compiler/tf2xla/lib/random.cc b/tensorflow/compiler/tf2xla/lib/random.cc
index 4a2516244a..8ff10fbd3f 100644
--- a/tensorflow/compiler/tf2xla/lib/random.cc
+++ b/tensorflow/compiler/tf2xla/lib/random.cc
@@ -19,13 +19,14 @@ limitations under the License.
#include <limits>
#include "tensorflow/compiler/tf2xla/xla_helpers.h"
-#include "tensorflow/compiler/xla/client/lib/arithmetic.h"
+#include "tensorflow/compiler/xla/client/lib/constants.h"
+#include "tensorflow/compiler/xla/client/lib/math.h"
+#include "tensorflow/compiler/xla/client/xla_client/xla_builder.h"
#include "tensorflow/compiler/xla/status_macros.h"
namespace tensorflow {
-xla::StatusOr<xla::XlaOp> TruncatedNormal(const DataType dtype,
- const xla::XlaOp& uniform,
- xla::XlaBuilder* builder) {
+
+xla::XlaOp TruncatedNormal(xla::XlaOp uniform) {
auto normal_cdf = [](double x) {
return (1.0 + std::erf(x / std::sqrt(2.0))) / 2.0;
};
@@ -40,18 +41,15 @@ xla::StatusOr<xla::XlaOp> TruncatedNormal(const DataType dtype,
const double kBetaNormalCdf = normal_cdf(kBeta);
const double kZ = kBetaNormalCdf - kAlphaNormalCdf;
- xla::XlaOp one = XlaHelpers::FloatLiteral(builder, dtype, 1.0);
- xla::XlaOp two = XlaHelpers::FloatLiteral(builder, dtype, 2.0);
- xla::XlaOp sqrt_2 = XlaHelpers::FloatLiteral(builder, dtype, std::sqrt(2.0));
-
- xla::XlaOp z = XlaHelpers::FloatLiteral(builder, dtype, kZ);
- xla::XlaOp alpha_normal_cdf =
- XlaHelpers::FloatLiteral(builder, dtype, kAlphaNormalCdf);
+ xla::XlaOp one = xla::ScalarLike(uniform, 1.0);
+ xla::XlaOp two = xla::ScalarLike(uniform, 2.0);
+ xla::XlaOp sqrt_2 = xla::ScalarLike(uniform, std::sqrt(2.0));
+ xla::XlaOp z = xla::ScalarLike(uniform, kZ);
+ xla::XlaOp alpha_normal_cdf = xla::ScalarLike(uniform, kAlphaNormalCdf);
+ auto p = alpha_normal_cdf + z * uniform;
// probit(p) = sqrt(2) * erfinv(2*p-1)
- auto p = builder->Add(alpha_normal_cdf, builder->Mul(z, uniform));
- auto erfinv_input = builder->Sub(builder->Mul(p, two), one);
- TF_ASSIGN_OR_RETURN(auto erfinv_or_status, ErfInv(erfinv_input));
- return builder->Mul(sqrt_2, erfinv_or_status);
+ return sqrt_2 * xla::ErfInv(two * p - one);
}
+
} // namespace tensorflow
diff --git a/tensorflow/compiler/tf2xla/lib/random.h b/tensorflow/compiler/tf2xla/lib/random.h
index 18c873dba5..2c573fd85b 100644
--- a/tensorflow/compiler/tf2xla/lib/random.h
+++ b/tensorflow/compiler/tf2xla/lib/random.h
@@ -21,15 +21,15 @@ limitations under the License.
#include "tensorflow/core/framework/types.pb.h"
namespace tensorflow {
+
// Builds an array filled with values sampled from a truncated normal
// distribution such that no values are greater than two or less than negative
// two.
//
// The "uniform" parameter must be an array of random numbers distributed in
// (0,1).
-xla::StatusOr<xla::XlaOp> TruncatedNormal(DataType dtype,
- const xla::XlaOp& uniform,
- xla::XlaBuilder* builder);
+xla::XlaOp TruncatedNormal(xla::XlaOp uniform);
+
} // namespace tensorflow
#endif // TENSORFLOW_COMPILER_TF2XLA_LIB_RANDOM_H_
diff --git a/tensorflow/compiler/tf2xla/lib/scatter.cc b/tensorflow/compiler/tf2xla/lib/scatter.cc
index d5a27abb25..6a5be1c2be 100644
--- a/tensorflow/compiler/tf2xla/lib/scatter.cc
+++ b/tensorflow/compiler/tf2xla/lib/scatter.cc
@@ -21,7 +21,8 @@ limitations under the License.
#include "tensorflow/compiler/tf2xla/lib/util.h"
#include "tensorflow/compiler/tf2xla/lib/while_loop.h"
#include "tensorflow/compiler/xla/client/lib/arithmetic.h"
-#include "tensorflow/compiler/xla/literal_util.h"
+#include "tensorflow/compiler/xla/client/xla_client/xla_builder.h"
+#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/compiler/xla/shape_util.h"
#include "tensorflow/compiler/xla/status_macros.h"
#include "tensorflow/compiler/xla/util.h"
@@ -97,8 +98,8 @@ xla::StatusOr<xla::XlaOp> XlaScatter(
buffer_shape_post_axes.end());
// Construct the initial values of the loop-carried Tensors.
- auto flat_indices = builder->Reshape(indices, flat_indices_shape);
- auto flat_updates = builder->Reshape(updates, flat_updates_shape);
+ auto flat_indices = xla::Reshape(indices, flat_indices_shape);
+ auto flat_updates = xla::Reshape(updates, flat_updates_shape);
auto init = {flat_indices, flat_updates, buffer};
// Constructs the loop body. The implementation of scatter is essentially:
@@ -112,46 +113,44 @@ xla::StatusOr<xla::XlaOp> XlaScatter(
auto updates = loop_vars[1];
auto buffer = loop_vars[2];
- auto zero_index = body_builder->ConstantLiteral(
- xla::Literal::Zero(indices_shape.element_type()));
+ auto zero_index = xla::ConstantLiteral(
+ body_builder, xla::LiteralUtil::Zero(indices_shape.element_type()));
// Slice the i-th index from the indices array.
xla::XlaOp index;
- auto indices_offset = body_builder->Reshape(i, {1});
+ auto indices_offset = xla::Reshape(i, {1});
if (indices_are_vectors) {
- indices_offset = body_builder->Pad(indices_offset, zero_index,
- xla::MakeEdgePaddingConfig({{0, 1}}));
+ indices_offset = xla::Pad(indices_offset, zero_index,
+ xla::MakeEdgePaddingConfig({{0, 1}}));
- index = body_builder->DynamicSlice(indices, indices_offset,
- {1, num_index_dims});
- index = body_builder->Collapse(index, {0, 1});
+ index = xla::DynamicSlice(indices, indices_offset, {1, num_index_dims});
+ index = xla::Collapse(index, {0, 1});
} else {
- index = body_builder->DynamicSlice(indices, indices_offset, {1});
+ index = xla::DynamicSlice(indices, indices_offset, {1});
}
// Discard updates with negative indices, since some users expect this.
- auto index_in_range =
- body_builder->ReduceAll(body_builder->Le(zero_index, index),
- body_builder->ConstantR0<bool>(true),
- xla::CreateScalarAndComputation(body_builder));
+ auto index_in_range = xla::ReduceAll(
+ xla::Le(zero_index, index), xla::ConstantR0<bool>(body_builder, true),
+ xla::CreateScalarAndComputation(body_builder));
// Make the index in bounds to prevent implementation defined behavior.
- index = body_builder->Max(index, zero_index);
- index = body_builder->Pad(
+ index = xla::Max(index, zero_index);
+ index = xla::Pad(
index, zero_index,
xla::MakeEdgePaddingConfig({{0, buffer_shape_post_axes.size()}}));
// Slice the i-th index from the updates array.
- auto updates_offset = body_builder->Reshape(i, {1});
- updates_offset = body_builder->Pad(
+ auto updates_offset = xla::Reshape(i, {1});
+ updates_offset = xla::Pad(
updates_offset, zero_index,
xla::MakeEdgePaddingConfig({{0, buffer_shape_post_axes.size()}}));
std::vector<int64> flat_updates_slice_shape({1});
flat_updates_slice_shape.insert(flat_updates_slice_shape.end(),
buffer_shape_post_axes.begin(),
buffer_shape_post_axes.end());
- auto update = body_builder->DynamicSlice(updates, updates_offset,
- flat_updates_slice_shape);
+ auto update =
+ xla::DynamicSlice(updates, updates_offset, flat_updates_slice_shape);
// Unflatten the major (iteration) dimensions of the slice to their
// original shape.
@@ -159,20 +158,19 @@ xla::StatusOr<xla::XlaOp> XlaScatter(
updates_slice_shape.insert(updates_slice_shape.end(),
buffer_shape_post_axes.begin(),
buffer_shape_post_axes.end());
- update = body_builder->Reshape(update, updates_slice_shape);
+ update = xla::Reshape(update, updates_slice_shape);
// Apply the update to the buffer. If there is a combiner, use it to merge
// the current values with the update.
- auto current_value =
- body_builder->DynamicSlice(buffer, index, updates_slice_shape);
+ auto current_value = xla::DynamicSlice(buffer, index, updates_slice_shape);
if (combiner) {
update = combiner(current_value, update, body_builder);
}
// Use the current value instead of the update if the index is out of
// bounds.
- update = body_builder->Select(index_in_range, update, current_value);
+ update = xla::Select(index_in_range, update, current_value);
// Apply the update.
- buffer = body_builder->DynamicUpdateSlice(buffer, update, index);
+ buffer = xla::DynamicUpdateSlice(buffer, update, index);
return std::vector<xla::XlaOp>{indices, updates, buffer};
};
diff --git a/tensorflow/compiler/tf2xla/lib/triangular_solve.cc b/tensorflow/compiler/tf2xla/lib/triangular_solve.cc
index b4503601f9..e405f8dfaa 100644
--- a/tensorflow/compiler/tf2xla/lib/triangular_solve.cc
+++ b/tensorflow/compiler/tf2xla/lib/triangular_solve.cc
@@ -20,7 +20,10 @@ limitations under the License.
#include "tensorflow/compiler/tf2xla/lib/batch_dot.h"
#include "tensorflow/compiler/tf2xla/lib/util.h"
-#include "tensorflow/compiler/xla/literal_util.h"
+#include "tensorflow/compiler/xla/client/lib/constants.h"
+#include "tensorflow/compiler/xla/client/lib/numeric.h"
+#include "tensorflow/compiler/xla/client/xla_client/xla_builder.h"
+#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/compiler/xla/shape_util.h"
#include "tensorflow/compiler/xla/status_macros.h"
#include "tensorflow/compiler/xla/statusor.h"
@@ -29,619 +32,582 @@ limitations under the License.
namespace tensorflow {
-xla::StatusOr<xla::XlaOp> TriangularSolve(xla::XlaBuilder* builder,
- const xla::XlaOp& a, xla::XlaOp b,
- bool left_side, bool lower,
- bool transpose_a, bool conjugate_a,
- int64 block_size) {
- TF_ASSIGN_OR_RETURN(xla::Shape a_shape, builder->GetShape(a));
- TF_ASSIGN_OR_RETURN(xla::Shape b_shape, builder->GetShape(b));
- if (xla::ShapeUtil::Rank(a_shape) != xla::ShapeUtil::Rank(b_shape)) {
- return errors::InvalidArgument(
- "Arguments to TriangularSolve have different ranks: ",
- xla::ShapeUtil::HumanString(a_shape), " vs. ",
- xla::ShapeUtil::HumanString(b_shape));
- }
- const int ndims = xla::ShapeUtil::Rank(a_shape);
- if (ndims < 2) {
- return errors::InvalidArgument(
- "Arguments to TriangularSolve must have rank >= 2: ", ndims);
- }
- // The batch dimensions must be equal.
- std::vector<int64> batch_dimensions;
- for (int i = 0; i < ndims - 2; ++i) {
- int64 a_size = a_shape.dimensions(i);
- int64 b_size = b_shape.dimensions(i);
- if (a_size != b_size) {
+xla::XlaOp TriangularSolve(xla::XlaOp a, xla::XlaOp b, bool left_side,
+ bool lower, bool transpose_a, bool conjugate_a,
+ int64 block_size) {
+ xla::XlaBuilder* builder = a.builder();
+ return builder->ReportErrorOrReturn([&]() -> xla::StatusOr<xla::XlaOp> {
+ TF_ASSIGN_OR_RETURN(xla::Shape a_shape, builder->GetShape(a));
+ TF_ASSIGN_OR_RETURN(xla::Shape b_shape, builder->GetShape(b));
+ if (xla::ShapeUtil::Rank(a_shape) != xla::ShapeUtil::Rank(b_shape)) {
return errors::InvalidArgument(
- "Batch dimensions of arguments to TriangularSolve must be equal: ",
- xla::ShapeUtil::HumanString(a_shape), " vs ",
+ "Arguments to TriangularSolve have different ranks: ",
+ xla::ShapeUtil::HumanString(a_shape), " vs. ",
xla::ShapeUtil::HumanString(b_shape));
}
- batch_dimensions.push_back(a_size);
- }
-
- if (xla::ShapeUtil::GetDimension(a_shape, -1) !=
- xla::ShapeUtil::GetDimension(a_shape, -2)) {
- return errors::InvalidArgument(
- "The 'a' arguments to TriangularSolve must be square matrices: ",
- xla::ShapeUtil::HumanString(a_shape));
- }
- const int64 m = xla::ShapeUtil::GetDimension(b_shape, -2);
- const int64 n = xla::ShapeUtil::GetDimension(b_shape, -1);
- if ((left_side ? m : n) != xla::ShapeUtil::GetDimension(a_shape, -1)) {
- return errors::InvalidArgument(
- "Arguments to TriangularSolve have incompatible matrix shapes: ",
- xla::ShapeUtil::HumanString(a_shape), " vs ",
- xla::ShapeUtil::HumanString(b_shape));
- }
-
- if (block_size < 1) {
- return errors::InvalidArgument(
- "block_size argument to TriangularSolve must be >= 1; got ",
- block_size);
- }
-
- std::map<int, xla::XlaComputation> base_computations;
- auto get_base_triangular_solve =
- [&](int k) -> xla::StatusOr<xla::XlaComputation*> {
- xla::XlaComputation& computation = base_computations[k];
- if (computation.IsNull()) {
- std::unique_ptr<xla::XlaBuilder> sub = builder->CreateSubBuilder(
- tensorflow::strings::StrCat("trsm_base_", k));
-
- auto a_param = sub->Parameter(
- 0,
- xla::ShapeUtil::MakeShape(
- b_shape.element_type(),
- PrependMajorDims(sub.get(), batch_dimensions, {k, k})),
- "a");
-
- std::array<int64, 2> b_lastd;
- if (left_side) {
- b_lastd = {k, n};
- } else {
- b_lastd = {m, k};
- }
- auto b_param = sub->Parameter(
- 1,
- xla::ShapeUtil::MakeShape(
- b_shape.element_type(),
- PrependMajorDims(sub.get(), batch_dimensions, b_lastd)),
- "b");
-
- // We use a left-looking or right-looking subroutine on the block diagonal
- // in the lower=true cases, while falling back to a recursive call in
- // others. The left-looking and right-looking subroutines are written with
- // a While loop and so yields much faster compile times. Moreover, they
- // can give higher performance on smaller (sub)problems.
- if (left_side && lower) {
- TF_RETURN_IF_ERROR(TriangularSolveLeftLooking(sub.get(), a_param,
- b_param, transpose_a,
- conjugate_a)
- .status());
- } else if (!left_side && lower) {
- TF_RETURN_IF_ERROR(TriangularSolveRightLooking(sub.get(), a_param,
- b_param, transpose_a,
- conjugate_a)
- .status());
- } else {
- TF_RETURN_IF_ERROR(TriangularSolve(sub.get(), a_param, b_param,
- left_side, lower, transpose_a,
- conjugate_a,
- /*block_size=*/1)
- .status());
+ const int ndims = xla::ShapeUtil::Rank(a_shape);
+ if (ndims < 2) {
+ return errors::InvalidArgument(
+ "Arguments to TriangularSolve must have rank >= 2: ", ndims);
+ }
+ // The batch dimensions must be equal.
+ std::vector<int64> batch_dimensions;
+ for (int i = 0; i < ndims - 2; ++i) {
+ int64 a_size = a_shape.dimensions(i);
+ int64 b_size = b_shape.dimensions(i);
+ if (a_size != b_size) {
+ return errors::InvalidArgument(
+ "Batch dimensions of arguments to TriangularSolve must be equal: ",
+ xla::ShapeUtil::HumanString(a_shape), " vs ",
+ xla::ShapeUtil::HumanString(b_shape));
}
+ batch_dimensions.push_back(a_size);
+ }
- TF_ASSIGN_OR_RETURN(computation, sub->Build());
+ if (xla::ShapeUtil::GetDimension(a_shape, -1) !=
+ xla::ShapeUtil::GetDimension(a_shape, -2)) {
+ return errors::InvalidArgument(
+ "The 'a' arguments to TriangularSolve must be square matrices: ",
+ xla::ShapeUtil::HumanString(a_shape));
}
- return &computation;
- };
-
- xla::XlaOp output = Zeros(builder, b_shape);
-
- // Right-looking blocked triangular solve.
- // For an explanation of the algorithm, see the TRSM discussion in:
- // Goto, Kazushige, and Robert Van De Geijn. "High-performance implementation
- // of the level-3 BLAS." ACM Transactions on Mathematical Software (TOMS) 35.1
- // (2008): 4.
-
- // In the code comments below, T = lambda x: np.swapaxes(x, -1, -2) if
- // conjugate_a is False, or T = lambda x: np.conj(np.swapaxes(x, -1, -2)) if
- // conjugate_a is True.
-
- if (!left_side && lower == transpose_a) {
- // for i in range(0, a.shape[-1], block_size):
- for (int64 i = 0; i < n; i += block_size) {
- int64 k = std::min(block_size, n - i);
-
- // output[..., :, i:i+k] = triangular_solve(
- // a[..., i:i+k, i:i+k], b[..., :, i:i+k], ..., block_size=1)
- TF_ASSIGN_OR_RETURN(auto a_slice,
- SliceInMinorDims(builder, a, {i, i}, {i + k, i + k}));
- TF_ASSIGN_OR_RETURN(auto b_slice,
- SliceInMinorDims(builder, b, {0, i}, {m, i + k}));
- xla::XlaOp update;
- if (k > 1) {
- TF_ASSIGN_OR_RETURN(xla::XlaComputation * solve,
- get_base_triangular_solve(k));
- update = builder->Call(*solve, {a_slice, b_slice});
- } else {
- TF_ASSIGN_OR_RETURN(auto a_slice_conj,
- MaybeConjugate(builder, a_slice, conjugate_a));
- update = builder->Div(b_slice, a_slice_conj);
+ const int64 m = xla::ShapeUtil::GetDimension(b_shape, -2);
+ const int64 n = xla::ShapeUtil::GetDimension(b_shape, -1);
+ if ((left_side ? m : n) != xla::ShapeUtil::GetDimension(a_shape, -1)) {
+ return errors::InvalidArgument(
+ "Arguments to TriangularSolve have incompatible matrix shapes: ",
+ xla::ShapeUtil::HumanString(a_shape), " vs ",
+ xla::ShapeUtil::HumanString(b_shape));
+ }
+
+ if (block_size < 1) {
+ return errors::InvalidArgument(
+ "block_size argument to TriangularSolve must be >= 1; got ",
+ block_size);
+ }
+
+ std::map<int, xla::XlaComputation> base_computations;
+ auto get_base_triangular_solve =
+ [&](int k) -> xla::StatusOr<xla::XlaComputation*> {
+ xla::XlaComputation& computation = base_computations[k];
+ if (computation.IsNull()) {
+ std::unique_ptr<xla::XlaBuilder> sub = builder->CreateSubBuilder(
+ tensorflow::strings::StrCat("trsm_base_", k));
+
+ auto a_param = xla::Parameter(
+ sub.get(), 0,
+ xla::ShapeUtil::MakeShape(b_shape.element_type(),
+ ConcatVectors(batch_dimensions, {k, k})),
+ "a");
+
+ std::array<int64, 2> b_lastd;
+ if (left_side) {
+ b_lastd = {k, n};
+ } else {
+ b_lastd = {m, k};
+ }
+ auto b_param = xla::Parameter(
+ sub.get(), 1,
+ xla::ShapeUtil::MakeShape(b_shape.element_type(),
+ ConcatVectors(batch_dimensions, b_lastd)),
+ "b");
+
+ // We use a left-looking or right-looking subroutine on the block
+ // diagonal in the lower=true cases, while falling back to a recursive
+ // call in others. The left-looking and right-looking subroutines are
+ // written with a While loop and so yields much faster compile times.
+ // Moreover, they can give higher performance on smaller (sub)problems.
+ if (left_side && lower) {
+ TriangularSolveLeftLooking(a_param, b_param, transpose_a,
+ conjugate_a);
+ } else if (!left_side && lower) {
+ TriangularSolveRightLooking(a_param, b_param, transpose_a,
+ conjugate_a);
+ } else {
+ TriangularSolve(a_param, b_param, left_side, lower, transpose_a,
+ conjugate_a,
+ /*block_size=*/1);
+ }
+
+ TF_ASSIGN_OR_RETURN(computation, sub->Build());
}
- TF_ASSIGN_OR_RETURN(
- output, UpdateSliceInMinorDims(builder, output, update, {0, i}));
-
- // if i + k < a.shape[-1]:
- // a_slice_2 = a[..., i+k:, i:i+k] if lower else a[..., i:i+k, i+k:]
- // a_slice_2 = T(a_slice_2) if transpose_a else a_slice_2
- // b[..., :, i+k:] -= np.matmul(output[..., :, i:i+k], a_slice_2)
- if (i + k < n) {
- xla::XlaOp a_slice_2;
+ return &computation;
+ };
+
+ xla::XlaOp output = xla::ZerosLike(b);
+
+ // Right-looking blocked triangular solve.
+ // For an explanation of the algorithm, see the TRSM discussion in:
+ // Goto, Kazushige, and Robert Van De Geijn. "High-performance
+ // implementation of the level-3 BLAS." ACM Transactions on Mathematical
+ // Software (TOMS) 35.1 (2008): 4.
+
+ // In the code comments below, T = lambda x: np.swapaxes(x, -1, -2) if
+ // conjugate_a is False, or T = lambda x: np.conj(np.swapaxes(x, -1, -2)) if
+ // conjugate_a is True.
+
+ if (!left_side && lower == transpose_a) {
+ // for i in range(0, a.shape[-1], block_size):
+ for (int64 i = 0; i < n; i += block_size) {
+ int64 k = std::min(block_size, n - i);
+
+ // output[..., :, i:i+k] = triangular_solve(
+ // a[..., i:i+k, i:i+k],
+ // b[..., :, i:i+k] - np.matmul(output[..., :, :i],
+ // a[..., :i, i:i+k]),
+ // ..., block_size=1)
+ auto a_slice = SliceInMinorDims(a, {i, i}, {i + k, i + k});
+ auto b_slice = SliceInMinorDims(b, {0, i}, {m, i + k});
+
+ // Note that we multiply with the full output, since this is faster
+ // than slicing, and output[..., :, i:] = 0
+ xla::XlaOp a_prev;
if (lower) {
- TF_ASSIGN_OR_RETURN(
- a_slice_2, SliceInMinorDims(builder, a, {i + k, i}, {n, i + k}));
+ a_prev = SliceInMinorDims(a, {i, 0}, {i + k, n});
} else {
- TF_ASSIGN_OR_RETURN(
- a_slice_2, SliceInMinorDims(builder, a, {i, i + k}, {i + k, n}));
+ a_prev = SliceInMinorDims(a, {0, i}, {n, i + k});
}
-
- TF_ASSIGN_OR_RETURN(auto b_update,
- BatchDot(builder, update, a_slice_2,
- /*transpose_x=*/false,
- /*transpose_y=*/transpose_a,
- /*conjugate_x=*/false,
- /*conjugate_y=*/conjugate_a));
- TF_ASSIGN_OR_RETURN(auto b_slice_2,
- SliceInMinorDims(builder, b, {0, i + k}, {m, n}));
- b_update = builder->Sub(b_slice_2, b_update);
- TF_ASSIGN_OR_RETURN(
- b, UpdateSliceInMinorDims(builder, b, b_update, {0, i + k}));
+ auto prev_contribution = BatchDot(output, a_prev,
+ /*transpose_x=*/false,
+ /*transpose_y=*/transpose_a,
+ /*conjugate_x=*/false,
+ /*conjugate_y=*/conjugate_a);
+ auto to_solve = b_slice - prev_contribution;
+
+ xla::XlaOp update;
+ if (k > 1) {
+ TF_ASSIGN_OR_RETURN(xla::XlaComputation * solve,
+ get_base_triangular_solve(k));
+ update = xla::Call(builder, *solve, {a_slice, to_solve});
+ } else {
+ auto a_slice_conj = MaybeConjugate(a_slice, conjugate_a);
+ update = to_solve / a_slice_conj;
+ }
+ output = UpdateSliceInMinorDims(output, update, {0, i});
}
- }
- } else if (left_side && lower != transpose_a) {
- // for i in range(0, a.shape[-1], block_size):
- for (int64 i = 0; i < m; i += block_size) {
- int64 k = std::min(block_size, m - i);
-
- // output[..., i:i+k, :] = triangular_solve(
- // a[..., i:i+k, i:i+k], b[..., i:i+k, :], ..., block_size=1)
- TF_ASSIGN_OR_RETURN(auto a_slice,
- SliceInMinorDims(builder, a, {i, i}, {i + k, i + k}));
- TF_ASSIGN_OR_RETURN(auto b_slice,
- SliceInMinorDims(builder, b, {i, 0}, {i + k, n}));
- xla::XlaOp update;
- if (k > 1) {
- TF_ASSIGN_OR_RETURN(xla::XlaComputation * solve,
- get_base_triangular_solve(k));
- update = builder->Call(*solve, {a_slice, b_slice});
- } else {
- TF_ASSIGN_OR_RETURN(auto a_slice_conj,
- MaybeConjugate(builder, a_slice, conjugate_a));
- update = builder->Div(b_slice, a_slice_conj);
- }
- TF_ASSIGN_OR_RETURN(
- output, UpdateSliceInMinorDims(builder, output, update, {i, 0}));
-
- // if i + k < a.shape[-1]:
- // a_slice_2 = a[..., i+k:, i:i+k] if lower else a[..., i:i+k, i+k:]
- // a_slice_2 = T(a_slice_2) if transpose_a else a_slice_2
- // b[..., i+k:, :] -= np.matmul(a_slice_2, output[..., i:i+k, :])
- if (i + k < m) {
- xla::XlaOp a_slice_2;
+ } else if (left_side && lower != transpose_a) {
+ // for i in range(0, a.shape[-1], block_size):
+ for (int64 i = 0; i < m; i += block_size) {
+ int64 k = std::min(block_size, m - i);
+
+ // output[..., i:i+k, :] = triangular_solve(
+ // a[..., i:i+k, i:i+k],
+ // b[..., i:i+k, :] - np.matmul(a[..., i:i+k, :i],
+ // output[..., :i, :]),
+ // ..., block_size=1)
+ auto a_slice = SliceInMinorDims(a, {i, i}, {i + k, i + k});
+ auto b_slice = SliceInMinorDims(b, {i, 0}, {i + k, n});
+
+ xla::XlaOp a_prev;
if (lower) {
- TF_ASSIGN_OR_RETURN(
- a_slice_2, SliceInMinorDims(builder, a, {i + k, i}, {m, i + k}));
+ a_prev = SliceInMinorDims(a, {i, 0}, {i + k, m});
} else {
- TF_ASSIGN_OR_RETURN(
- a_slice_2, SliceInMinorDims(builder, a, {i, i + k}, {i + k, m}));
+ a_prev = SliceInMinorDims(a, {0, i}, {m, i + k});
}
-
- TF_ASSIGN_OR_RETURN(auto b_update, BatchDot(builder, a_slice_2, update,
- /*transpose_x=*/transpose_a,
- /*transpose_y=*/false,
- /*conjugate_x=*/conjugate_a,
- /*conjugate_y=*/false));
- TF_ASSIGN_OR_RETURN(auto b_slice_2,
- SliceInMinorDims(builder, b, {i + k, 0}, {m, n}));
- b_update = builder->Sub(b_slice_2, b_update);
- TF_ASSIGN_OR_RETURN(
- b, UpdateSliceInMinorDims(builder, b, b_update, {i + k, 0}));
- }
- }
- } else if (!left_side && lower != transpose_a) {
- // for i in reversed(range(0, a.shape[-1], block_size)):
- const int64 last_blk_ix = xla::RoundUpToNearest(n, block_size) - block_size;
- for (int64 i = last_blk_ix; i >= 0; i -= block_size) {
- int64 k = std::min(block_size, n - i);
-
- // output[..., :, i:i+k] triangular_solve(
- // a[..., i:i+k, i:i+k], b[..., :, i:i+k], ..., block_size=1)
- TF_ASSIGN_OR_RETURN(auto a_slice,
- SliceInMinorDims(builder, a, {i, i}, {i + k, i + k}));
- TF_ASSIGN_OR_RETURN(auto b_slice,
- SliceInMinorDims(builder, b, {0, i}, {m, i + k}));
- xla::XlaOp update;
- if (k > 1) {
- TF_ASSIGN_OR_RETURN(xla::XlaComputation * solve,
- get_base_triangular_solve(k));
- update = builder->Call(*solve, {a_slice, b_slice});
- } else {
- TF_ASSIGN_OR_RETURN(auto a_slice_conj,
- MaybeConjugate(builder, a_slice, conjugate_a));
- update = builder->Div(b_slice, a_slice_conj);
+ auto prev_contribution = BatchDot(a_prev, output,
+ /*transpose_x=*/transpose_a,
+ /*transpose_y=*/false,
+ /*conjugate_x=*/conjugate_a,
+ /*conjugate_y=*/false);
+ auto to_solve = b_slice - prev_contribution;
+
+ xla::XlaOp update;
+ if (k > 1) {
+ TF_ASSIGN_OR_RETURN(xla::XlaComputation * solve,
+ get_base_triangular_solve(k));
+ update = xla::Call(builder, *solve, {a_slice, to_solve});
+ } else {
+ auto a_slice_conj = MaybeConjugate(a_slice, conjugate_a);
+ update = to_solve / a_slice_conj;
+ }
+ output = UpdateSliceInMinorDims(output, update, {i, 0});
}
- TF_ASSIGN_OR_RETURN(
- output, UpdateSliceInMinorDims(builder, output, update, {0, i}));
-
- // if i - k >= 0:
- // a_slice_2 = a[..., i:i+k, :i] if lower else a[..., :i, i:i+k]
- // a_slice_2 = T(a_slice_2) if transpose_a else a_slice_2
- // b[..., :, :i] -= np.matmul(out[..., :, i:i+k], a_slice_2)
- if (i - k >= 0) {
- xla::XlaOp a_slice_2;
+ } else if (!left_side && lower != transpose_a) {
+ // for i in reversed(range(0, a.shape[-1], block_size)):
+ const int64 last_blk_ix =
+ xla::RoundUpToNearest(n, block_size) - block_size;
+ for (int64 i = last_blk_ix; i >= 0; i -= block_size) {
+ int64 k = std::min(block_size, n - i);
+
+ // output[..., :, i:i+k] = triangular_solve(
+ // a[..., i:i+k, i:i+k],
+ // b[..., :, i:i+k] - np.matmul(output[..., :, :i],
+ // a[..., :i, i:i+k]),\
+ // ..., block_size=1)
+ auto a_slice = SliceInMinorDims(a, {i, i}, {i + k, i + k});
+ auto b_slice = SliceInMinorDims(b, {0, i}, {m, i + k});
+
+ xla::XlaOp a_prev;
if (lower) {
- TF_ASSIGN_OR_RETURN(a_slice_2,
- SliceInMinorDims(builder, a, {i, 0}, {i + k, i}));
+ a_prev = SliceInMinorDims(a, {0, i}, {n, i + k});
} else {
- TF_ASSIGN_OR_RETURN(a_slice_2,
- SliceInMinorDims(builder, a, {0, i}, {i, i + k}));
+ a_prev = SliceInMinorDims(a, {i, 0}, {i + k, n});
}
-
- TF_ASSIGN_OR_RETURN(auto b_update,
- BatchDot(builder, update, a_slice_2,
- /*transpose_x=*/false,
- /*transpose_y=*/transpose_a,
- /*conjugate_x=*/false,
- /*conjugate_y=*/conjugate_a));
- TF_ASSIGN_OR_RETURN(auto b_slice_2,
- SliceInMinorDims(builder, b, {0, 0}, {m, i}));
- b_update = builder->Sub(b_slice_2, b_update);
- TF_ASSIGN_OR_RETURN(
- b, UpdateSliceInMinorDims(builder, b, b_update, {0, 0}));
- }
- }
- } else { // left_side && lower == transpose_a
- // for i in reversed(range(0, a.shape[-1], block_size)):
- const int64 last_blk_ix = xla::RoundUpToNearest(m, block_size) - block_size;
- for (int64 i = last_blk_ix; i >= 0; i -= block_size) {
- int64 k = std::min(block_size, m - i);
-
- // output[..., i:i+k, :] triangular_solve(
- // a[..., i:i+k, i:i+k], b[..., i:i+k, :], ..., block_size=1)
- TF_ASSIGN_OR_RETURN(auto a_slice,
- SliceInMinorDims(builder, a, {i, i}, {i + k, i + k}));
- TF_ASSIGN_OR_RETURN(auto b_slice,
- SliceInMinorDims(builder, b, {i, 0}, {i + k, n}));
- xla::XlaOp update;
- if (k > 1) {
- TF_ASSIGN_OR_RETURN(xla::XlaComputation * solve,
- get_base_triangular_solve(k));
- update = builder->Call(*solve, {a_slice, b_slice});
- } else {
- TF_ASSIGN_OR_RETURN(auto a_slice_conj,
- MaybeConjugate(builder, a_slice, conjugate_a));
- update = builder->Div(b_slice, a_slice_conj);
+ auto prev_contribution = BatchDot(output, a_prev,
+ /*transpose_x=*/false,
+ /*transpose_y=*/transpose_a,
+ /*conjugate_x=*/false,
+ /*conjugate_y=*/conjugate_a);
+ auto to_solve = b_slice - prev_contribution;
+
+ xla::XlaOp update;
+ if (k > 1) {
+ TF_ASSIGN_OR_RETURN(xla::XlaComputation * solve,
+ get_base_triangular_solve(k));
+ update = xla::Call(builder, *solve, {a_slice, to_solve});
+ } else {
+ auto a_slice_conj = MaybeConjugate(a_slice, conjugate_a);
+ update = to_solve / a_slice_conj;
+ }
+ output = UpdateSliceInMinorDims(output, update, {0, i});
}
- TF_ASSIGN_OR_RETURN(
- output, UpdateSliceInMinorDims(builder, output, update, {i, 0}));
-
- // if i - k >= 0:
- // a_slice_2 = a[..., i:i+k, :i] if lower else a[..., :i, i:i+k]
- // a_slice_2 = T(a_slice_2) if transpose_a else a_slice_2
- // b[..., :i, :] -= np.matmul(a_slice_2, out[..., i:i+k, :])
- if (i - k >= 0) {
- xla::XlaOp a_slice_2;
+ } else { // left_side && lower == transpose_a
+ // for i in reversed(range(0, a.shape[-1], block_size)):
+ const int64 last_blk_ix =
+ xla::RoundUpToNearest(m, block_size) - block_size;
+ for (int64 i = last_blk_ix; i >= 0; i -= block_size) {
+ int64 k = std::min(block_size, m - i);
+
+ // output[..., i:i+k, :] = triangular_solve(
+ // a[..., i:i+k, i:i+k],
+ // b[..., i:i+k, :] - np.matmul(a[..., i:i+k, :i],
+ // output[..., :i, :]),
+ // ..., block_size=1)
+ auto a_slice = SliceInMinorDims(a, {i, i}, {i + k, i + k});
+ auto b_slice = SliceInMinorDims(b, {i, 0}, {i + k, n});
+
+ xla::XlaOp a_prev;
if (lower) {
- TF_ASSIGN_OR_RETURN(a_slice_2,
- SliceInMinorDims(builder, a, {i, 0}, {i + k, i}));
+ a_prev = SliceInMinorDims(a, {0, i}, {m, i + k});
} else {
- TF_ASSIGN_OR_RETURN(a_slice_2,
- SliceInMinorDims(builder, a, {0, i}, {i, i + k}));
+ a_prev = SliceInMinorDims(a, {i, 0}, {i + k, m});
}
-
- TF_ASSIGN_OR_RETURN(auto b_update, BatchDot(builder, a_slice_2, update,
- /*transpose_x=*/transpose_a,
- /*transpose_y=*/false,
- /*conjugate_x=*/conjugate_a,
- /*conjugate_y=*/false));
- TF_ASSIGN_OR_RETURN(auto b_slice_2,
- SliceInMinorDims(builder, b, {0, 0}, {i, n}));
- b_update = builder->Sub(b_slice_2, b_update);
- TF_ASSIGN_OR_RETURN(
- b, UpdateSliceInMinorDims(builder, b, b_update, {0, 0}));
+ auto prev_contribution = BatchDot(a_prev, output,
+ /*transpose_x=*/transpose_a,
+ /*transpose_y=*/false,
+ /*conjugate_x=*/conjugate_a,
+ /*conjugate_y=*/false);
+ auto to_solve = b_slice - prev_contribution;
+
+ xla::XlaOp update;
+ if (k > 1) {
+ TF_ASSIGN_OR_RETURN(xla::XlaComputation * solve,
+ get_base_triangular_solve(k));
+ update = xla::Call(builder, *solve, {a_slice, to_solve});
+ } else {
+ auto a_slice_conj = MaybeConjugate(a_slice, conjugate_a);
+ update = to_solve / a_slice_conj;
+ }
+ output = UpdateSliceInMinorDims(output, update, {i, 0});
}
}
- }
- return output;
+ return output;
+ });
}
-xla::StatusOr<xla::XlaOp> TriangularSolveLeftLooking(xla::XlaBuilder* builder,
- const xla::XlaOp& a,
- const xla::XlaOp& b,
- bool transpose_a,
- bool conjugate_a) {
- TF_ASSIGN_OR_RETURN(xla::Shape a_shape, builder->GetShape(a));
- TF_ASSIGN_OR_RETURN(xla::Shape b_shape, builder->GetShape(b));
- const int64 m = xla::ShapeUtil::GetDimension(b_shape, -2);
- const int64 n = xla::ShapeUtil::GetDimension(b_shape, -1);
- const int64 ndims = xla::ShapeUtil::Rank(a_shape);
-
- std::vector<int64> batch_dimensions;
- for (int i = 0; i < ndims - 2; ++i) {
- int64 a_size = a_shape.dimensions(i);
- batch_dimensions.push_back(a_size);
- }
-
- // The main computation is performed in a While loop.
-
- // Allocate the output and set its first or last row,
- // output = np.zeros_like(b)
- // if transpose_a:
- // output[..., m-1:, :] = b[..., m-1:, :] / a[..., m-1:, m-1:]
- // else:
- // output[..., :1, :] = b[..., :1, :] / a[..., :1, :1]
- xla::XlaOp output = Zeros(builder, b_shape);
- {
- auto i = transpose_a ? m - 1 : 0;
- TF_ASSIGN_OR_RETURN(auto a_slice,
- SliceInMinorDims(builder, a, {i, i}, {i + 1, i + 1}));
- TF_ASSIGN_OR_RETURN(auto b_slice,
- SliceInMinorDims(builder, b, {i, 0}, {i + 1, n}));
- TF_ASSIGN_OR_RETURN(auto a_slice_conj,
- MaybeConjugate(builder, a_slice, conjugate_a));
- auto update = builder->Div(b_slice, a_slice_conj);
- TF_ASSIGN_OR_RETURN(
- output, UpdateSliceInMinorDims(builder, output, update, {i, 0}));
- }
-
- // Construct the initial loop carry tuple,
- // if transpose_a:
- // init = (m-2, output, a, b)
- // else:
- // init = (1, output, a, b)
- std::vector<xla::Shape> tuple_shapes = {
- // The loop iteration counter is a scalar, incremented each iteration.
- xla::ShapeUtil::MakeShape(xla::S32, {}),
- // The output has the shape of b, with one row updated each iteration.
- b_shape,
- // The coefficient matrix a is a loop invariant.
- a_shape,
- // The right-hand-side matrix b is a loop invariant.
- b_shape};
- xla::Shape tuple_shape = xla::ShapeUtil::MakeTupleShape(tuple_shapes);
- auto init_i = builder->ConstantR0<int32>(transpose_a ? m - 2 : 1);
- auto init = builder->Tuple({init_i, output, a, b});
-
- // Construct the loop condition function,
- // def cond_fun(loop_carry):
- // i, output, a, b = loop_carry
- // return i >= 0 if transpose_a else i < m
- std::unique_ptr<xla::XlaBuilder> condb =
- builder->CreateSubBuilder("TriangularSolveLeftLookingWhileCond");
- {
- auto i = condb->GetTupleElement(
- condb->Parameter(0, tuple_shape,
- "TriangularSolveLeftLookingWhileTuple"),
- 0);
+xla::XlaOp TriangularSolveLeftLooking(xla::XlaOp a, xla::XlaOp b,
+ bool transpose_a, bool conjugate_a) {
+ xla::XlaBuilder* builder = a.builder();
+ return builder->ReportErrorOrReturn([&]() -> xla::StatusOr<xla::XlaOp> {
+ TF_ASSIGN_OR_RETURN(xla::Shape a_shape, builder->GetShape(a));
+ TF_ASSIGN_OR_RETURN(xla::Shape b_shape, builder->GetShape(b));
+ const int64 m = xla::ShapeUtil::GetDimension(b_shape, -2);
+ const int64 n = xla::ShapeUtil::GetDimension(b_shape, -1);
+ const int64 ndims = xla::ShapeUtil::Rank(a_shape);
+
+ std::vector<int64> batch_dimensions;
+ int64 num_batches = 1;
+ for (int i = 0; i < ndims - 2; ++i) {
+ int64 a_size = a_shape.dimensions(i);
+ batch_dimensions.push_back(a_size);
+ num_batches = num_batches * a_size;
+ }
+
+ // Rescale the input to be unit triangular
+ auto diag = Diagonal(a);
+ xla::XlaOp scaled_a;
+ std::vector<int64> broadcast_dimensions(ndims - 1);
+ std::iota(broadcast_dimensions.begin(), broadcast_dimensions.end(), 0);
if (transpose_a) {
- condb->Ge(i, condb->ConstantR0<int32>(0));
+ scaled_a = Div(a, diag, broadcast_dimensions);
} else {
- condb->Lt(i, condb->ConstantR0<int32>(m));
+ // Broadcast over the rows
+ broadcast_dimensions[ndims - 2] = ndims - 1;
+ scaled_a = Div(a, diag, broadcast_dimensions);
}
- }
- TF_ASSIGN_OR_RETURN(auto cond, condb->Build());
-
- // Construct the loop body function,
- // def body_fun(loop_carry):
- // i, output, a, b = loop_carry
- // if transpose_a:
- // a_row = np.swapaxes(a[..., i+1:, i:i+1], -1 -2)
- // else:
- // a_row = a[..., i:i+1, :i]
- // result_row = b[..., i:i+1, :] - np.matmul(a_row, output[..., :, :])
- // output[..., i:i+1, :] = result_row / a[..., i:i+1, i:i+1]
- // if transpose_a:
- // return (i - 1, output, a, b)
- // else:
- // return (i + 1, output, a, b)
- // We have to do some extra FLOPs propagating zeros in the matrix multiply
- // because we can't have the size of its arguments depend on the loop counter.
- std::unique_ptr<xla::XlaBuilder> bodyb =
- builder->CreateSubBuilder("TriangularSolveLeftLookingWhileBody");
- {
- auto input_tuple = bodyb->Parameter(0, tuple_shape,
- "TriangularSolveLeftLookingWhileTuple");
- // i, output, a, b = loop_carry
- auto i = bodyb->GetTupleElement(input_tuple, 0);
- auto body_out = bodyb->GetTupleElement(input_tuple, 1);
- auto body_a = bodyb->GetTupleElement(input_tuple, 2);
- auto body_b = bodyb->GetTupleElement(input_tuple, 3);
- auto zero = bodyb->ConstantR0<int32>(0);
+ // The main computation is performed in a While loop.
- // We'd like to implement this:
- // if transpose_a:
- // a_row = T(a[..., i+1:, i:i+1])
- // result_row = (b[..., i:i+1, :]
- // - np.matmul(a_row, body_out[..., i+1:, :]))
- // else:
- // result_row = (b[..., i:i+1, :]
- // - np.matmul(a[..., i:i+1, :i], body_out[..., :i, :]))
- // But since we can't have intermediate array sizes depend on the loop
- // counter, we instead exploit the fact that we initialized the output to
- // all zeros and use that as zero-padding (doing unnecessary FLOPs).
- xla::XlaOp a_row;
- if (transpose_a) {
- TF_ASSIGN_OR_RETURN(a_row, DynamicSliceInMinorDims(bodyb.get(), body_a,
- {zero, i}, {m, 1}));
- } else {
- TF_ASSIGN_OR_RETURN(a_row, DynamicSliceInMinorDims(bodyb.get(), body_a,
- {i, zero}, {1, m}));
+ // Allocate the output and set its first or last row,
+ // output = np.zeros_like(b)
+ // if transpose_a:
+ // output[..., m-1:, :] = b[..., m-1:, :] / a[..., m-1:, m-1:]
+ // else:
+ // output[..., :1, :] = b[..., :1, :] / a[..., :1, :1]
+ xla::XlaOp output = xla::ZerosLike(b);
+ {
+ auto i = transpose_a ? m - 1 : 0;
+ auto a_slice = SliceInMinorDims(scaled_a, {i, i}, {i + 1, i + 1});
+ auto b_slice = SliceInMinorDims(b, {i, 0}, {i + 1, n});
+ auto a_slice_conj = MaybeConjugate(a_slice, conjugate_a);
+ auto update = b_slice / a_slice_conj;
+ output = UpdateSliceInMinorDims(output, update, {i, 0});
}
- TF_ASSIGN_OR_RETURN(auto b_update, BatchDot(bodyb.get(), a_row, body_out,
- /*transpose_x=*/transpose_a,
- /*transpose_y=*/false,
- /*conjugate_x=*/conjugate_a,
- /*conjugate_y=*/false));
- TF_ASSIGN_OR_RETURN(
- auto result_row_slice,
- DynamicSliceInMinorDims(bodyb.get(), body_b, {i, zero}, {1, n}));
- auto result_row = bodyb->Sub(result_row_slice, b_update);
-
- // body_out[..., i:i+1, :] = result_row / a[..., i:i+1, i:i+1]
- TF_ASSIGN_OR_RETURN(auto a_elt, DynamicSliceInMinorDims(bodyb.get(), body_a,
- {i, i}, {1, 1}));
- TF_ASSIGN_OR_RETURN(auto a_elt_conj,
- MaybeConjugate(bodyb.get(), a_elt, conjugate_a));
- auto div_result = bodyb->Div(result_row, a_elt_conj);
- TF_ASSIGN_OR_RETURN(body_out,
- DynamicUpdateSliceInMinorDims(bodyb.get(), body_out,
- div_result, {i, zero}));
+ // Construct the initial loop carry tuple,
// if transpose_a:
- // return (i - 1, body_out, a, b)
+ // init = (m-2, output, a, b)
// else:
- // return (i + 1, body_out, a, b)
- auto next_i = bodyb->Add(i, bodyb->ConstantR0<int32>(transpose_a ? -1 : 1));
- bodyb->Tuple({next_i, body_out, body_a, body_b});
- }
- TF_ASSIGN_OR_RETURN(auto body, bodyb->Build());
-
- // Construct the While loop and return the result,
- // return while_loop(cond_fun, body_fun, init)[1]
- auto triangular_solve_left_looking_while = builder->While(cond, body, init);
- return builder->GetTupleElement(triangular_solve_left_looking_while, 1);
+ // init = (1, output, a, b)
+ std::vector<xla::Shape> tuple_shapes = {
+ // The loop iteration counter is a scalar, incremented each iteration.
+ xla::ShapeUtil::MakeShape(xla::S32, {}),
+ // The output has the shape of b, with one row updated each iteration.
+ b_shape,
+ // The coefficient matrix a is a loop invariant.
+ a_shape,
+ // The right-hand-side matrix b is a loop invariant.
+ b_shape};
+ xla::Shape tuple_shape = xla::ShapeUtil::MakeTupleShape(tuple_shapes);
+ auto init_i = xla::ConstantR0<int32>(builder, transpose_a ? m - 2 : 1);
+ auto init = xla::Tuple(builder, {init_i, output, scaled_a, b});
+
+ // Construct the loop condition function,
+ // def cond_fun(loop_carry):
+ // i, output, a, b = loop_carry
+ // return i >= 0 if transpose_a else i < m
+ std::unique_ptr<xla::XlaBuilder> condb =
+ builder->CreateSubBuilder("TriangularSolveLeftLookingWhileCond");
+ {
+ auto i = xla::GetTupleElement(
+ xla::Parameter(condb.get(), 0, tuple_shape,
+ "TriangularSolveLeftLookingWhileTuple"),
+ 0);
+ if (transpose_a) {
+ xla::Ge(i, xla::ConstantR0<int32>(condb.get(), 0));
+ } else {
+ xla::Lt(i, xla::ConstantR0<int32>(condb.get(), m));
+ }
+ }
+ TF_ASSIGN_OR_RETURN(auto cond, condb->Build());
+
+ // Construct the loop body function,
+ // def body_fun(loop_carry):
+ // i, output, a, b = loop_carry
+ // if transpose_a:
+ // a_row = np.swapaxes(a[..., i+1:, i:i+1], -1 -2)
+ // else:
+ // a_row = a[..., i:i+1, :i]
+ // result_row = b[..., i:i+1, :] - np.matmul(a_row, output[..., :, :])
+ // output[..., i:i+1, :] = result_row / a[..., i:i+1, i:i+1]
+ // if transpose_a:
+ // return (i - 1, output, a, b)
+ // else:
+ // return (i + 1, output, a, b)
+ // We have to do some extra FLOPs propagating zeros in the matrix multiply
+ // because we can't have the size of its arguments depend on the loop
+ // counter.
+ std::unique_ptr<xla::XlaBuilder> bodyb =
+ builder->CreateSubBuilder("TriangularSolveLeftLookingWhileBody");
+ {
+ auto input_tuple = xla::Parameter(bodyb.get(), 0, tuple_shape,
+ "TriangularSolveLeftLookingWhileTuple");
+
+ // i, output, a, b = loop_carry
+ auto i = xla::GetTupleElement(input_tuple, 0);
+ auto body_out = xla::GetTupleElement(input_tuple, 1);
+ auto body_a = xla::GetTupleElement(input_tuple, 2);
+ auto body_b = xla::GetTupleElement(input_tuple, 3);
+ auto zero = xla::ConstantR0<int32>(bodyb.get(), 0);
+
+ // We'd like to implement this:
+ // if transpose_a:
+ // a_row = T(a[..., i+1:, i:i+1])
+ // result_row = (b[..., i:i+1, :]
+ // - np.matmul(a_row, body_out[..., i+1:, :]))
+ // else:
+ // result_row = (b[..., i:i+1, :]
+ // - np.matmul(a[..., i:i+1, :i], body_out[..., :i, :]))
+ // But since we can't have intermediate array sizes depend on the loop
+ // counter, we instead exploit the fact that we initialized the output to
+ // all zeros and use that as zero-padding (doing unnecessary FLOPs).
+ xla::XlaOp a_row;
+ if (transpose_a) {
+ a_row = DynamicSliceInMinorDims(body_a, {zero, i}, {m, 1});
+ } else {
+ a_row = DynamicSliceInMinorDims(body_a, {i, zero}, {1, m});
+ }
+ auto b_update = BatchDot(a_row, body_out,
+ /*transpose_x=*/transpose_a,
+ /*transpose_y=*/false,
+ /*conjugate_x=*/conjugate_a,
+ /*conjugate_y=*/false);
+ auto result_row_slice =
+ DynamicSliceInMinorDims(body_b, {i, zero}, {1, n});
+ auto result_row = result_row_slice - b_update;
+
+ // body_out[..., i:i+1, :] = result_row
+ body_out = DynamicUpdateSliceInMinorDims(body_out, result_row, {i, zero});
+
+ // if transpose_a:
+ // return (i - 1, body_out, a, b)
+ // else:
+ // return (i + 1, body_out, a, b)
+ auto next_i = xla::Add(
+ i, xla::ConstantR0<int32>(bodyb.get(), transpose_a ? -1 : 1));
+ xla::Tuple(bodyb.get(), {next_i, body_out, body_a, body_b});
+ }
+ TF_ASSIGN_OR_RETURN(auto body, bodyb->Build());
+
+ // Construct the While loop and return the result,
+ // return while_loop(cond_fun, body_fun, init)[1]
+ auto triangular_solve_left_looking_while = xla::While(cond, body, init);
+ output = xla::GetTupleElement(triangular_solve_left_looking_while, 1);
+ auto scaling = MaybeConjugate(diag, conjugate_a);
+ // Broadcast over the columns
+ broadcast_dimensions[ndims - 2] = ndims - 2;
+ return Div(output, scaling, broadcast_dimensions);
+ });
}
-xla::StatusOr<xla::XlaOp> TriangularSolveRightLooking(xla::XlaBuilder* builder,
- const xla::XlaOp& a,
- const xla::XlaOp& b,
- bool transpose_a,
- bool conjugate_a) {
- TF_ASSIGN_OR_RETURN(xla::Shape a_shape, builder->GetShape(a));
- TF_ASSIGN_OR_RETURN(xla::Shape b_shape, builder->GetShape(b));
- const int64 m = xla::ShapeUtil::GetDimension(b_shape, -2);
- const int64 n = xla::ShapeUtil::GetDimension(b_shape, -1);
- const int64 ndims = xla::ShapeUtil::Rank(a_shape);
-
- std::vector<int64> batch_dimensions;
- for (int i = 0; i < ndims - 2; ++i) {
- int64 a_size = a_shape.dimensions(i);
- batch_dimensions.push_back(a_size);
- }
-
- // The main computation is performed in a While loop.
- xla::XlaOp output = Zeros(builder, b_shape);
-
- // Construct the initial loop carry tuple,
- // if transpose_a:
- // init = (0, output, a, b)
- // else:
- // init = (n-1, output, a, b)
- std::vector<xla::Shape> tuple_shapes = {
- // The loop iteration counter is a scalar, incremented each iteration.
- xla::ShapeUtil::MakeShape(xla::S32, {}),
- // The output has the shape of b, with one row updated each iteration.
- b_shape,
- // The coefficient matrix a is a loop invariant.
- a_shape,
- // The right-hand-side matrix b is a loop invariant.
- b_shape};
- xla::Shape tuple_shape = xla::ShapeUtil::MakeTupleShape(tuple_shapes);
- auto init_i = builder->ConstantR0<int32>(transpose_a ? 0 : n - 1);
- auto init = builder->Tuple({init_i, output, a, b});
-
- // Construct the loop condition function,
- // def cond_fun(loop_carry):
- // i, output, a, b = loop_carry
- // return i < n if transpose_a else i >= 0
- std::unique_ptr<xla::XlaBuilder> condb =
- builder->CreateSubBuilder("TriangularSolveRightLookingWhileCond");
- {
- auto i = condb->GetTupleElement(
- condb->Parameter(0, tuple_shape,
- "TriangularSolveRightLookingWhileTuple"),
- 0);
+xla::XlaOp TriangularSolveRightLooking(xla::XlaOp a, xla::XlaOp b,
+ bool transpose_a, bool conjugate_a) {
+ xla::XlaBuilder* builder = a.builder();
+ return builder->ReportErrorOrReturn([&]() -> xla::StatusOr<xla::XlaOp> {
+ TF_ASSIGN_OR_RETURN(xla::Shape a_shape, builder->GetShape(a));
+ TF_ASSIGN_OR_RETURN(xla::Shape b_shape, builder->GetShape(b));
+ const int64 m = xla::ShapeUtil::GetDimension(b_shape, -2);
+ const int64 n = xla::ShapeUtil::GetDimension(b_shape, -1);
+ const int64 ndims = xla::ShapeUtil::Rank(a_shape);
+
+ std::vector<int64> batch_dimensions;
+ int64 num_batches = 1;
+ for (int i = 0; i < ndims - 2; ++i) {
+ int64 a_size = a_shape.dimensions(i);
+ batch_dimensions.push_back(a_size);
+ num_batches = num_batches * a_size;
+ }
+
+ // Rescale the input to be unit triangular
+ auto diag = Diagonal(a);
+ xla::XlaOp scaled_a;
+ std::vector<int64> broadcast_dimensions(ndims - 1);
+ std::iota(broadcast_dimensions.begin(), broadcast_dimensions.end(), 0);
if (transpose_a) {
- condb->Lt(i, condb->ConstantR0<int32>(n));
+ // Broadcast over the rows
+ broadcast_dimensions[ndims - 2] = ndims - 1;
+ scaled_a = Div(a, diag, broadcast_dimensions);
} else {
- condb->Ge(i, condb->ConstantR0<int32>(0));
+ scaled_a = Div(a, diag, broadcast_dimensions);
}
- }
- TF_ASSIGN_OR_RETURN(auto cond, condb->Build());
-
- // Construct the loop body function,
- // def body_fun(loop_carry):
- // i, output, a, b = loop_carry
- // if transpose_a:
- // a_row = np.swapaxes(a[..., :, i:i+1], -1 -2)
- // else:
- // a_row = a[..., :, i:i+1]
- // result_row = b[..., :, i:i+1] - np.matmul(output, a_row)
- // output[..., :, i:i+1] = result_row / a[..., i:i+1, i:i+1]
- // if transpose_a:
- // return (i - 1, output, a, b)
- // else:
- // return (i + 1, output, a, b)
- // We have to do some extra FLOPs propagating zeros in the matrix multiply
- // because we can't have the size of its arguments depend on the loop counter.
- std::unique_ptr<xla::XlaBuilder> bodyb =
- builder->CreateSubBuilder("TriangularSolveRightLookingWhileBody");
- {
- auto input_tuple = bodyb->Parameter(
- 0, tuple_shape, "TriangularSolveRightLookingWhileTuple");
-
- // i, output, a, b = loop_carry
- auto i = bodyb->GetTupleElement(input_tuple, 0);
- auto body_out = bodyb->GetTupleElement(input_tuple, 1);
- auto body_a = bodyb->GetTupleElement(input_tuple, 2);
- auto body_b = bodyb->GetTupleElement(input_tuple, 3);
- auto zero = bodyb->ConstantR0<int32>(0);
-
- // We'd like to implement b[..., :, i:i+1] - np.matmul(output, a[..., :,
- // i:i+1]) But since we can't have intermediate array sizes depend on the
- // loop counter, we instead exploit the fact that we initialized the output
- // to all zeros and use that as zero-padding (doing unnecessary FLOPs).
- TF_ASSIGN_OR_RETURN(auto b_update, BatchDot(bodyb.get(), body_out, body_a,
- /*transpose_x=*/false,
- /*transpose_y=*/transpose_a,
- /*conjugate_x=*/false,
- /*conjugate_y=*/conjugate_a));
- // result = b - np.matmul(output, a)
- auto result = bodyb->Sub(body_b, b_update);
- // result_row = result[..., :, i:i+1]
- TF_ASSIGN_OR_RETURN(
- auto result_row,
- DynamicSliceInMinorDims(bodyb.get(), result, {zero, i}, {m, 1}));
-
- // body_out[..., :, i:i+1] = result_row / a[..., i:i+1, i:i+1]
- TF_ASSIGN_OR_RETURN(auto a_ii, DynamicSliceInMinorDims(bodyb.get(), body_a,
- {i, i}, {1, 1}));
- TF_ASSIGN_OR_RETURN(auto a_ii_conj,
- MaybeConjugate(bodyb.get(), a_ii, conjugate_a));
- auto div_result = bodyb->Div(result_row, a_ii_conj);
- TF_ASSIGN_OR_RETURN(body_out,
- DynamicUpdateSliceInMinorDims(bodyb.get(), body_out,
- div_result, {zero, i}));
+ // The main computation is performed in a While loop.
+ xla::XlaOp output = xla::ZerosLike(b);
+
+ // Construct the initial loop carry tuple,
// if transpose_a:
- // return (i + 1, body_out, a, b)
+ // init = (0, output, a, b)
// else:
- // return (i - 1, body_out, a, b)
- auto next_i = bodyb->Add(i, bodyb->ConstantR0<int32>(transpose_a ? 1 : -1));
- bodyb->Tuple({next_i, body_out, body_a, body_b});
- }
- TF_ASSIGN_OR_RETURN(auto body, bodyb->Build());
-
- // Construct the While loop and return the result,
- // return while_loop(cond_fun, body_fun, init)[1]
- auto triangular_solve_left_looking_while = builder->While(cond, body, init);
- return builder->GetTupleElement(triangular_solve_left_looking_while, 1);
+ // init = (n-1, output, a, b)
+ std::vector<xla::Shape> tuple_shapes = {
+ // The loop iteration counter is a scalar, incremented each iteration.
+ xla::ShapeUtil::MakeShape(xla::S32, {}),
+ // The output has the shape of b, with one row updated each iteration.
+ b_shape,
+ // The coefficient matrix a is a loop invariant.
+ a_shape,
+ // The right-hand-side matrix b is a loop invariant.
+ b_shape};
+ xla::Shape tuple_shape = xla::ShapeUtil::MakeTupleShape(tuple_shapes);
+ auto init_i = xla::ConstantR0<int32>(builder, transpose_a ? 0 : n - 1);
+ auto init = xla::Tuple(builder, {init_i, output, scaled_a, b});
+
+ // Construct the loop condition function,
+ // def cond_fun(loop_carry):
+ // i, output, a, b = loop_carry
+ // return i < n if transpose_a else i >= 0
+ std::unique_ptr<xla::XlaBuilder> condb =
+ builder->CreateSubBuilder("TriangularSolveRightLookingWhileCond");
+ {
+ auto i = xla::GetTupleElement(
+ xla::Parameter(condb.get(), 0, tuple_shape,
+ "TriangularSolveRightLookingWhileTuple"),
+ 0);
+ if (transpose_a) {
+ xla::Lt(i, xla::ConstantR0<int32>(condb.get(), n));
+ } else {
+ xla::Ge(i, xla::ConstantR0<int32>(condb.get(), 0));
+ }
+ }
+ TF_ASSIGN_OR_RETURN(auto cond, condb->Build());
+
+ // Construct the loop body function,
+ // def body_fun(loop_carry):
+ // i, output, a, b = loop_carry
+ // if transpose_a:
+ // a_row = np.swapaxes(a[..., :, i:i+1], -1, -2)
+ // else:
+ // a_row = a[..., :, i:i+1]
+ // result_row = b[..., :, i:i+1] - np.matmul(output, a_row)
+ // output[..., :, i:i+1] = result_row / a[..., i:i+1, i:i+1]
+ // if transpose_a:
+ // return (i - 1, output, a, b)
+ // else:
+ // return (i + 1, output, a, b)
+ // We have to do some extra FLOPs propagating zeros in the matrix multiply
+ // because we can't have the size of its arguments depend on the loop
+ // counter.
+ std::unique_ptr<xla::XlaBuilder> bodyb =
+ builder->CreateSubBuilder("TriangularSolveRightLookingWhileBody");
+ {
+ auto input_tuple = xla::Parameter(
+ bodyb.get(), 0, tuple_shape, "TriangularSolveRightLookingWhileTuple");
+
+ // i, output, a, b = loop_carry
+ auto i = xla::GetTupleElement(input_tuple, 0);
+ auto body_out = xla::GetTupleElement(input_tuple, 1);
+ auto body_a = xla::GetTupleElement(input_tuple, 2);
+ auto body_b = xla::GetTupleElement(input_tuple, 3);
+ auto zero = xla::ConstantR0<int32>(bodyb.get(), 0);
+
+ // result = b - np.matmul(output, a)
+ // result_row = result[..., :, i:i+1]
+ auto body_b_slice = DynamicSliceInMinorDims(body_b, {zero, i}, {m, 1});
+ xla::XlaOp a_slice;
+ if (transpose_a) {
+ a_slice = DynamicSliceInMinorDims(body_a, {i, zero}, {1, n});
+ } else {
+ a_slice = DynamicSliceInMinorDims(body_a, {zero, i}, {n, 1});
+ }
+ auto b_update = body_b_slice - BatchDot(body_out, a_slice,
+ /*transpose_x=*/false,
+ /*transpose_y=*/transpose_a,
+ /*conjugate_x=*/false,
+ /*conjugate_y=*/conjugate_a);
+
+ // body_out[..., :, i:i+1] = b_update
+ body_out = DynamicUpdateSliceInMinorDims(body_out, b_update, {zero, i});
+
+ // if transpose_a:
+ // return (i + 1, body_out, a, b)
+ // else:
+ // return (i - 1, body_out, a, b)
+ auto next_i = xla::Add(
+ i, xla::ConstantR0<int32>(bodyb.get(), transpose_a ? 1 : -1));
+ xla::Tuple(bodyb.get(), {next_i, body_out, body_a, body_b});
+ }
+ TF_ASSIGN_OR_RETURN(auto body, bodyb->Build());
+
+ // Construct the While loop and return the result,
+ // return while_loop(cond_fun, body_fun, init)[1]
+ auto triangular_solve_left_looking_while = xla::While(cond, body, init);
+ output = xla::GetTupleElement(triangular_solve_left_looking_while, 1);
+ auto scaling = MaybeConjugate(diag, conjugate_a);
+ // Broadcast over the rows
+ broadcast_dimensions[ndims - 2] = ndims - 1;
+ return Div(output, scaling, broadcast_dimensions);
+ });
}
} // namespace tensorflow
diff --git a/tensorflow/compiler/tf2xla/lib/triangular_solve.h b/tensorflow/compiler/tf2xla/lib/triangular_solve.h
index 540c26b247..7eb9238014 100644
--- a/tensorflow/compiler/tf2xla/lib/triangular_solve.h
+++ b/tensorflow/compiler/tf2xla/lib/triangular_solve.h
@@ -57,23 +57,15 @@ namespace tensorflow {
//
// Uses a blocked algorithm if `block_size` is > 1; if block_size == 1 then no
// blocking is used.
-xla::StatusOr<xla::XlaOp> TriangularSolve(xla::XlaBuilder* builder,
- const xla::XlaOp& a, xla::XlaOp b,
- bool left_side, bool lower,
- bool transpose_a, bool conjugate_a,
- int64 block_size = 256);
+xla::XlaOp TriangularSolve(xla::XlaOp a, xla::XlaOp b, bool left_side,
+ bool lower, bool transpose_a, bool conjugate_a,
+ int64 block_size = 128);
-xla::StatusOr<xla::XlaOp> TriangularSolveLeftLooking(xla::XlaBuilder* builder,
- const xla::XlaOp& a,
- const xla::XlaOp& b,
- bool transpose_a,
- bool conjugate_a);
+xla::XlaOp TriangularSolveLeftLooking(xla::XlaOp a, xla::XlaOp b,
+ bool transpose_a, bool conjugate_a);
-xla::StatusOr<xla::XlaOp> TriangularSolveRightLooking(xla::XlaBuilder* builder,
- const xla::XlaOp& a,
- const xla::XlaOp& b,
- bool transpose_a,
- bool conjugate_a);
+xla::XlaOp TriangularSolveRightLooking(xla::XlaOp a, xla::XlaOp b,
+ bool transpose_a, bool conjugate_a);
} // namespace tensorflow
diff --git a/tensorflow/compiler/tf2xla/lib/triangular_solve_test.cc b/tensorflow/compiler/tf2xla/lib/triangular_solve_test.cc
index 87ea4763f7..f1bff6037b 100644
--- a/tensorflow/compiler/tf2xla/lib/triangular_solve_test.cc
+++ b/tensorflow/compiler/tf2xla/lib/triangular_solve_test.cc
@@ -21,7 +21,7 @@ limitations under the License.
#include "tensorflow/compiler/xla/array2d.h"
#include "tensorflow/compiler/xla/client/xla_client/xla_builder.h"
-#include "tensorflow/compiler/xla/literal_util.h"
+#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/compiler/xla/statusor.h"
#include "tensorflow/compiler/xla/test.h"
#include "tensorflow/compiler/xla/tests/client_library_test_base.h"
@@ -85,11 +85,10 @@ XLA_TEST_F(TriangularSolveTest, SimpleRightLowerTranspose) {
xla::XlaOp a, b;
auto a_data = CreateR2Parameter<float>(AValsLower(), 0, "a", &builder, &a);
auto b_data = CreateR2Parameter<float>(BValsRight(), 1, "b", &builder, &b);
- auto result = TriangularSolve(&builder, a, b,
- /*left_side=*/false, /*lower=*/true,
- /*transpose_a=*/true, /*conjugate_a=*/false,
- /*block_size=*/2);
- TF_ASSERT_OK(result.status());
+ TriangularSolve(a, b,
+ /*left_side=*/false, /*lower=*/true,
+ /*transpose_a=*/true, /*conjugate_a=*/false,
+ /*block_size=*/2);
xla::Array2D<float> expected({
{0.5, 0.08333334, 0.04629629, 0.03367003},
@@ -107,11 +106,10 @@ XLA_TEST_F(TriangularSolveTest, SimpleRightLowerNotranspose) {
xla::XlaOp a, b;
auto a_data = CreateR2Parameter<float>(AValsLower(), 0, "a", &builder, &a);
auto b_data = CreateR2Parameter<float>(BValsRight(), 1, "b", &builder, &b);
- auto result = TriangularSolve(&builder, a, b,
- /*left_side=*/false, /*lower=*/true,
- /*transpose_a=*/false, /*conjugate_a=*/false,
- /*block_size=*/2);
- TF_ASSERT_OK(result.status());
+ TriangularSolve(a, b,
+ /*left_side=*/false, /*lower=*/true,
+ /*transpose_a=*/false, /*conjugate_a=*/false,
+ /*block_size=*/2);
xla::Array2D<float> expected({
{-0.16414141, -0.06902357, -0.07070707, 0.36363636},
@@ -129,11 +127,10 @@ XLA_TEST_F(TriangularSolveTest, SimpleRightUpperTranspose) {
xla::XlaOp a, b;
auto a_data = CreateR2Parameter<float>(AValsUpper(), 0, "a", &builder, &a);
auto b_data = CreateR2Parameter<float>(BValsRight(), 1, "b", &builder, &b);
- auto result = TriangularSolve(&builder, a, b,
- /*left_side=*/false, /*lower=*/false,
- /*transpose_a=*/true, /*conjugate_a=*/false,
- /*block_size=*/2);
- TF_ASSERT_OK(result.status());
+ TriangularSolve(a, b,
+ /*left_side=*/false, /*lower=*/false,
+ /*transpose_a=*/true, /*conjugate_a=*/false,
+ /*block_size=*/2);
xla::Array2D<float> expected({
{-0.16414141, -0.06902357, -0.07070707, 0.36363636},
@@ -151,11 +148,10 @@ XLA_TEST_F(TriangularSolveTest, SimpleRightUpperNotranspose) {
xla::XlaOp a, b;
auto a_data = CreateR2Parameter<float>(AValsUpper(), 0, "a", &builder, &a);
auto b_data = CreateR2Parameter<float>(BValsRight(), 1, "b", &builder, &b);
- auto result = TriangularSolve(&builder, a, b,
- /*left_side=*/false, /*lower=*/false,
- /*transpose_a=*/false, /*conjugate_a=*/false,
- /*block_size=*/2);
- TF_ASSERT_OK(result.status());
+ TriangularSolve(a, b,
+ /*left_side=*/false, /*lower=*/false,
+ /*transpose_a=*/false, /*conjugate_a=*/false,
+ /*block_size=*/2);
xla::Array2D<float> expected({
{0.5, 0.08333334, 0.04629629, 0.03367003},
@@ -173,11 +169,10 @@ XLA_TEST_F(TriangularSolveTest, SimpleLeftLowerTranspose) {
xla::XlaOp a, b;
auto a_data = CreateR2Parameter<float>(AValsLower(), 0, "a", &builder, &a);
auto b_data = CreateR2Parameter<float>(BValsLeft(), 1, "b", &builder, &b);
- auto result = TriangularSolve(&builder, a, b,
- /*left_side=*/true, /*lower=*/true,
- /*transpose_a=*/true, /*conjugate_a=*/false,
- /*block_size=*/2);
- TF_ASSERT_OK(result.status());
+ TriangularSolve(a, b,
+ /*left_side=*/true, /*lower=*/true,
+ /*transpose_a=*/true, /*conjugate_a=*/false,
+ /*block_size=*/2);
xla::Array2D<float> expected({
{-0.89646465, -0.69444444, -0.49242424},
@@ -196,11 +191,10 @@ XLA_TEST_F(TriangularSolveTest, SimpleLeftLowerNotranspose) {
xla::XlaOp a, b;
auto a_data = CreateR2Parameter<float>(AValsLower(), 0, "a", &builder, &a);
auto b_data = CreateR2Parameter<float>(BValsLeft(), 1, "b", &builder, &b);
- auto result = TriangularSolve(&builder, a, b,
- /*left_side=*/true, /*lower=*/true,
- /*transpose_a=*/false, /*conjugate_a=*/false,
- /*block_size=*/2);
- TF_ASSERT_OK(result.status());
+ TriangularSolve(a, b,
+ /*left_side=*/true, /*lower=*/true,
+ /*transpose_a=*/false, /*conjugate_a=*/false,
+ /*block_size=*/2);
xla::Array2D<float> expected({
{0.5, 1.0, 1.5},
@@ -219,11 +213,10 @@ XLA_TEST_F(TriangularSolveTest, SimpleLeftUpperTranspose) {
xla::XlaOp a, b;
auto a_data = CreateR2Parameter<float>(AValsUpper(), 0, "a", &builder, &a);
auto b_data = CreateR2Parameter<float>(BValsLeft(), 1, "b", &builder, &b);
- auto result = TriangularSolve(&builder, a, b,
- /*left_side=*/true, /*lower=*/false,
- /*transpose_a=*/true, /*conjugate_a=*/false,
- /*block_size=*/2);
- TF_ASSERT_OK(result.status());
+ TriangularSolve(a, b,
+ /*left_side=*/true, /*lower=*/false,
+ /*transpose_a=*/true, /*conjugate_a=*/false,
+ /*block_size=*/2);
xla::Array2D<float> expected({
{0.5, 1.0, 1.5},
@@ -242,11 +235,10 @@ XLA_TEST_F(TriangularSolveTest, SimpleLeftUpperNotranspose) {
xla::XlaOp a, b;
auto a_data = CreateR2Parameter<float>(AValsUpper(), 0, "a", &builder, &a);
auto b_data = CreateR2Parameter<float>(BValsLeft(), 1, "b", &builder, &b);
- auto result = TriangularSolve(&builder, a, b,
- /*left_side=*/true, /*lower=*/false,
- /*transpose_a=*/false, /*conjugate_a=*/false,
- /*block_size=*/2);
- TF_ASSERT_OK(result.status());
+ TriangularSolve(a, b,
+ /*left_side=*/true, /*lower=*/false,
+ /*transpose_a=*/false, /*conjugate_a=*/false,
+ /*block_size=*/2);
xla::Array2D<float> expected({
{-0.89646465, -0.69444444, -0.49242424},
@@ -267,11 +259,10 @@ XLA_TEST_F(TriangularSolveTest, SimpleRightLowerTransposeConjugate) {
CreateR2Parameter<complex64>(AValsLowerComplex(), 0, "a", &builder, &a);
auto b_data =
CreateR2Parameter<complex64>(BValsRightComplex(), 1, "b", &builder, &b);
- auto result = TriangularSolve(&builder, a, b,
- /*left_side=*/false, /*lower=*/true,
- /*transpose_a=*/true, /*conjugate_a=*/true,
- /*block_size=*/2);
- TF_ASSERT_OK(result.status());
+ TriangularSolve(a, b,
+ /*left_side=*/false, /*lower=*/true,
+ /*transpose_a=*/true, /*conjugate_a=*/true,
+ /*block_size=*/2);
xla::Array2D<complex64> expected({
{0.5, complex64(0.08333333, 0.08333333),
@@ -295,11 +286,10 @@ XLA_TEST_F(TriangularSolveTest, SimpleLeftUpperTransposeNoconjugate) {
CreateR2Parameter<complex64>(AValsUpperComplex(), 0, "a", &builder, &a);
auto b_data =
CreateR2Parameter<complex64>(BValsLeftComplex(), 1, "b", &builder, &b);
- auto result = TriangularSolve(&builder, a, b,
- /*left_side=*/true, /*lower=*/false,
- /*transpose_a=*/true, /*conjugate_a=*/false,
- /*block_size=*/2);
- TF_ASSERT_OK(result.status());
+ TriangularSolve(a, b,
+ /*left_side=*/true, /*lower=*/false,
+ /*transpose_a=*/true, /*conjugate_a=*/false,
+ /*block_size=*/2);
xla::Array2D<complex64> expected({
{0.5, 1., 1.5},
@@ -323,10 +313,9 @@ XLA_TEST_F(TriangularSolveLeftLookingTest, Simple) {
xla::XlaOp a, b;
auto a_data = CreateR2Parameter<float>(AValsLower(), 0, "a", &builder, &a);
auto b_data = CreateR2Parameter<float>(BValsLeft(), 1, "b", &builder, &b);
- auto result = TriangularSolveLeftLooking(&builder, a, b,
- /*transpose_a=*/false,
- /*conjugate_a=*/false);
- TF_ASSERT_OK(result.status());
+ TriangularSolveLeftLooking(a, b,
+ /*transpose_a=*/false,
+ /*conjugate_a=*/false);
xla::Array2D<float> expected({
{0.5, 1.0, 1.5},
@@ -345,10 +334,9 @@ XLA_TEST_F(TriangularSolveLeftLookingTest, NonzeroUpperTriangle) {
xla::XlaOp a, b;
auto a_data = CreateR2Parameter<float>(AValsFull(), 0, "a", &builder, &a);
auto b_data = CreateR2Parameter<float>(BValsLeft(), 1, "b", &builder, &b);
- auto result = TriangularSolveLeftLooking(&builder, a, b,
- /*transpose_a=*/false,
- /*conjugate_a=*/false);
- TF_ASSERT_OK(result.status());
+ TriangularSolveLeftLooking(a, b,
+ /*transpose_a=*/false,
+ /*conjugate_a=*/false);
xla::Array2D<float> expected({
{0.5, 1.0, 1.5},
diff --git a/tensorflow/compiler/tf2xla/lib/util.cc b/tensorflow/compiler/tf2xla/lib/util.cc
index d9ff7e6259..a6f5d346cb 100644
--- a/tensorflow/compiler/tf2xla/lib/util.cc
+++ b/tensorflow/compiler/tf2xla/lib/util.cc
@@ -18,6 +18,8 @@ limitations under the License.
#include <memory>
#include <vector>
+#include "tensorflow/compiler/xla/client/xla_client/xla_builder.h"
+#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/compiler/xla/literal_util.h"
#include "tensorflow/compiler/xla/shape_util.h"
#include "tensorflow/compiler/xla/status_macros.h"
@@ -28,8 +30,9 @@ limitations under the License.
namespace tensorflow {
xla::XlaOp Zeros(xla::XlaBuilder* builder, const xla::Shape& shape) {
- return builder->Broadcast(
- builder->ConstantLiteral(xla::Literal::Zero(shape.element_type())),
+ return xla::Broadcast(
+ xla::ConstantLiteral(builder,
+ xla::LiteralUtil::Zero(shape.element_type())),
xla::AsInt64Slice(shape.dimensions()));
}
@@ -37,19 +40,19 @@ xla::XlaOp FloatLiteral(xla::XlaBuilder* builder, xla::PrimitiveType type,
double value) {
switch (type) {
case xla::F16:
- return builder->ConstantR0<xla::half>(static_cast<xla::half>(value));
+ return xla::ConstantR0<xla::half>(builder, static_cast<xla::half>(value));
break;
case xla::BF16:
- return builder->ConstantR0<bfloat16>(static_cast<bfloat16>(value));
+ return xla::ConstantR0<bfloat16>(builder, static_cast<bfloat16>(value));
break;
case xla::F32:
- return builder->ConstantR0<float>(static_cast<float>(value));
+ return xla::ConstantR0<float>(builder, static_cast<float>(value));
break;
case xla::F64:
- return builder->ConstantR0<double>(value);
+ return xla::ConstantR0<double>(builder, value);
break;
case xla::C64:
- return builder->ConstantR0<xla::complex64>(value);
+ return xla::ConstantR0<xla::complex64>(builder, value);
break;
default:
LOG(FATAL) << "unhandled element type " << type;
@@ -61,31 +64,31 @@ xla::XlaOp IntegerLiteral(xla::XlaBuilder* builder, xla::PrimitiveType type,
xla::Literal literal;
switch (type) {
case xla::U8:
- literal = std::move(*xla::Literal::CreateR0<uint8>(value));
+ literal = std::move(*xla::LiteralUtil::CreateR0<uint8>(value));
break;
case xla::U32:
- literal = std::move(*xla::Literal::CreateR0<uint32>(value));
+ literal = std::move(*xla::LiteralUtil::CreateR0<uint32>(value));
break;
case xla::U64:
- literal = std::move(*xla::Literal::CreateR0<uint64>(value));
+ literal = std::move(*xla::LiteralUtil::CreateR0<uint64>(value));
break;
case xla::S8:
- literal = std::move(*xla::Literal::CreateR0<int8>(value));
+ literal = std::move(*xla::LiteralUtil::CreateR0<int8>(value));
break;
case xla::S32:
- literal = std::move(*xla::Literal::CreateR0<int32>(value));
+ literal = std::move(*xla::LiteralUtil::CreateR0<int32>(value));
break;
case xla::S64:
- literal = std::move(*xla::Literal::CreateR0<int64>(value));
+ literal = std::move(*xla::LiteralUtil::CreateR0<int64>(value));
break;
case xla::F32:
- literal = std::move(*xla::Literal::CreateR0<float>(value));
+ literal = std::move(*xla::LiteralUtil::CreateR0<float>(value));
break;
case xla::F64:
- literal = std::move(*xla::Literal::CreateR0<double>(value));
+ literal = std::move(*xla::LiteralUtil::CreateR0<double>(value));
break;
case xla::C64:
- literal = std::move(*xla::Literal::CreateR0<complex64>(value));
+ literal = std::move(*xla::LiteralUtil::CreateR0<complex64>(value));
break;
case xla::PRED:
LOG(FATAL) << "pred element type is not integral";
@@ -94,11 +97,11 @@ xla::XlaOp IntegerLiteral(xla::XlaBuilder* builder, xla::PrimitiveType type,
LOG(FATAL) << "u16/s16 literals not yet implemented";
case xla::BF16:
literal = std::move(
- *xla::Literal::CreateR0<bfloat16>(static_cast<bfloat16>(value)));
+ *xla::LiteralUtil::CreateR0<bfloat16>(static_cast<bfloat16>(value)));
break;
case xla::F16:
- literal = std::move(
- *xla::Literal::CreateR0<xla::half>(static_cast<xla::half>(value)));
+ literal = std::move(*xla::LiteralUtil::CreateR0<xla::half>(
+ static_cast<xla::half>(value)));
break;
case xla::TUPLE:
LOG(FATAL) << "tuple element type is not integral";
@@ -107,134 +110,140 @@ xla::XlaOp IntegerLiteral(xla::XlaBuilder* builder, xla::PrimitiveType type,
default:
LOG(FATAL) << "unhandled element type " << type;
}
- return builder->ConstantLiteral(literal);
+ return xla::ConstantLiteral(builder, literal);
}
-xla::StatusOr<xla::XlaOp> SliceInMinorDims(xla::XlaBuilder* builder,
- const xla::XlaOp& x,
- gtl::ArraySlice<int64> start,
- gtl::ArraySlice<int64> end) {
- TF_RET_CHECK(start.size() == end.size());
- int64 n_minor_dims = start.size();
-
- TF_ASSIGN_OR_RETURN(xla::Shape shape, builder->GetShape(x));
-
- const int64 n_dims = xla::ShapeUtil::Rank(shape);
- TF_RET_CHECK(n_minor_dims <= n_dims);
- gtl::ArraySlice<int64> major_dims(xla::AsInt64Slice(shape.dimensions()),
- /*pos=*/0,
- /*len=*/n_dims - n_minor_dims);
-
- // Prepends 0s in the major dim
- std::vector<int64> padded_start(n_dims, 0);
- std::copy(start.begin(), start.end(),
- padded_start.begin() + major_dims.size());
-
- // Prepends the shape of the major dims.
- std::vector<int64> padded_end(n_dims);
- std::copy(major_dims.begin(), major_dims.end(), padded_end.begin());
- std::copy(end.begin(), end.end(), padded_end.begin() + major_dims.size());
-
- std::vector<int64> strides(n_dims, 1);
- return builder->Slice(x, padded_start, padded_end, strides);
+xla::XlaOp SliceInMinorDims(xla::XlaOp x, gtl::ArraySlice<int64> start,
+ gtl::ArraySlice<int64> end) {
+ xla::XlaBuilder* builder = x.builder();
+ return builder->ReportErrorOrReturn([&]() -> xla::StatusOr<xla::XlaOp> {
+ TF_RET_CHECK(start.size() == end.size());
+ int64 n_minor_dims = start.size();
+
+ TF_ASSIGN_OR_RETURN(xla::Shape shape, builder->GetShape(x));
+
+ const int64 n_dims = xla::ShapeUtil::Rank(shape);
+ TF_RET_CHECK(n_minor_dims <= n_dims);
+ gtl::ArraySlice<int64> major_dims(xla::AsInt64Slice(shape.dimensions()),
+ /*pos=*/0,
+ /*len=*/n_dims - n_minor_dims);
+
+ // Prepends 0s in the major dim
+ std::vector<int64> padded_start(n_dims, 0);
+ std::copy(start.begin(), start.end(),
+ padded_start.begin() + major_dims.size());
+
+ // Prepends the shape of the major dims.
+ std::vector<int64> padded_end(n_dims);
+ std::copy(major_dims.begin(), major_dims.end(), padded_end.begin());
+ std::copy(end.begin(), end.end(), padded_end.begin() + major_dims.size());
+
+ std::vector<int64> strides(n_dims, 1);
+ return xla::Slice(x, padded_start, padded_end, strides);
+ });
}
-std::vector<int64> PrependMajorDims(xla::XlaBuilder* builder,
- const gtl::ArraySlice<int64>& major_dims,
- const gtl::ArraySlice<int64>& indices) {
- std::vector<int64> output(indices.size() + major_dims.size());
- std::copy(major_dims.begin(), major_dims.end(), output.begin());
- std::copy(indices.begin(), indices.end(), output.begin() + major_dims.size());
+std::vector<int64> ConcatVectors(gtl::ArraySlice<int64> xs,
+ gtl::ArraySlice<int64> ys) {
+ std::vector<int64> output(xs.size() + ys.size());
+ std::copy(xs.begin(), xs.end(), output.begin());
+ std::copy(ys.begin(), ys.end(), output.begin() + xs.size());
return output;
}
-xla::StatusOr<xla::XlaOp> DynamicSliceInMinorDims(
- xla::XlaBuilder* builder, const xla::XlaOp& x,
- const std::vector<xla::XlaOp>& starts,
- const gtl::ArraySlice<int64>& sizes) {
- TF_ASSIGN_OR_RETURN(xla::Shape shape, builder->GetShape(x));
- const int64 n_dims = xla::ShapeUtil::Rank(shape);
- int64 n_minor_dims = starts.size();
- TF_RET_CHECK(n_minor_dims == sizes.size());
- TF_RET_CHECK(n_minor_dims <= n_dims);
- gtl::ArraySlice<int64> major_dims(xla::AsInt64Slice(shape.dimensions()),
- /*pos=*/0,
- /*len=*/n_dims - sizes.size());
- TF_ASSIGN_OR_RETURN(auto padded_starts,
- PrependZerosInMajorDims(builder, x, starts));
- auto padded_sizes = PrependMajorDims(builder, major_dims, sizes);
- return builder->DynamicSlice(x, padded_starts, padded_sizes);
+xla::XlaOp DynamicSliceInMinorDims(xla::XlaOp x,
+ gtl::ArraySlice<xla::XlaOp> starts,
+ gtl::ArraySlice<int64> sizes) {
+ xla::XlaBuilder* builder = x.builder();
+ return builder->ReportErrorOrReturn([&]() -> xla::StatusOr<xla::XlaOp> {
+ TF_ASSIGN_OR_RETURN(xla::Shape shape, builder->GetShape(x));
+ const int64 n_dims = xla::ShapeUtil::Rank(shape);
+ int64 n_minor_dims = starts.size();
+ TF_RET_CHECK(n_minor_dims == sizes.size());
+ TF_RET_CHECK(n_minor_dims <= n_dims);
+ gtl::ArraySlice<int64> major_dims(xla::AsInt64Slice(shape.dimensions()),
+ /*pos=*/0,
+ /*len=*/n_dims - sizes.size());
+ auto padded_starts = PrependZerosInMajorDims(x, starts);
+ auto padded_sizes = ConcatVectors(major_dims, sizes);
+ return xla::DynamicSlice(x, padded_starts, padded_sizes);
+ });
}
-xla::StatusOr<xla::XlaOp> UpdateSlice(xla::XlaBuilder* builder,
- const xla::XlaOp& x,
- const xla::XlaOp& update,
- gtl::ArraySlice<int64> start) {
- // TODO(phawkins): make int64 work on all backends, remove the int32 cast.
- std::vector<int32> start_as_int32(start.begin(), start.end());
- auto start_constant = builder->ConstantR1<int32>(start_as_int32);
- TF_ASSIGN_OR_RETURN(xla::Shape shape, builder->GetShape(x));
- const int64 n_dims = xla::ShapeUtil::Rank(shape);
- TF_ASSIGN_OR_RETURN(xla::Shape start_constant_shape,
- builder->GetShape(start_constant));
- const int64 start_length =
- xla::ShapeUtil::GetDimension(start_constant_shape, -1);
- TF_RET_CHECK(start_length == n_dims);
- return builder->DynamicUpdateSlice(x, update, start_constant);
+xla::XlaOp UpdateSlice(xla::XlaOp x, xla::XlaOp update,
+ gtl::ArraySlice<int64> start) {
+ xla::XlaBuilder* builder = x.builder();
+ return builder->ReportErrorOrReturn([&]() -> xla::StatusOr<xla::XlaOp> {
+ // TODO(phawkins): make int64 work on all backends, remove the int32 cast.
+ std::vector<int32> start_as_int32(start.begin(), start.end());
+ auto start_constant = xla::ConstantR1<int32>(builder, start_as_int32);
+ TF_ASSIGN_OR_RETURN(xla::Shape shape, builder->GetShape(x));
+ const int64 n_dims = xla::ShapeUtil::Rank(shape);
+ TF_ASSIGN_OR_RETURN(xla::Shape start_constant_shape,
+ builder->GetShape(start_constant));
+ const int64 start_length =
+ xla::ShapeUtil::GetDimension(start_constant_shape, -1);
+ TF_RET_CHECK(start_length == n_dims);
+ return xla::DynamicUpdateSlice(x, update, start_constant);
+ });
}
-xla::StatusOr<xla::XlaOp> UpdateSliceInMinorDims(xla::XlaBuilder* builder,
- const xla::XlaOp& x,
- const xla::XlaOp& update,
- gtl::ArraySlice<int64> start) {
- TF_ASSIGN_OR_RETURN(xla::Shape shape, builder->GetShape(x));
- const int64 n_dims = xla::ShapeUtil::Rank(shape);
- const int64 n_minor_dims = start.size();
- TF_RET_CHECK(n_minor_dims <= n_dims);
- std::vector<int64> padded_start(n_dims, 0);
- std::copy(start.begin(), start.end(),
- padded_start.begin() + (n_dims - n_minor_dims));
- return UpdateSlice(builder, x, update, padded_start);
+xla::XlaOp UpdateSliceInMinorDims(xla::XlaOp x, xla::XlaOp update,
+ gtl::ArraySlice<int64> start) {
+ xla::XlaBuilder* builder = x.builder();
+ return builder->ReportErrorOrReturn([&]() -> xla::StatusOr<xla::XlaOp> {
+ TF_ASSIGN_OR_RETURN(xla::Shape shape, builder->GetShape(x));
+ const int64 n_dims = xla::ShapeUtil::Rank(shape);
+ const int64 n_minor_dims = start.size();
+ TF_RET_CHECK(n_minor_dims <= n_dims);
+ std::vector<int64> padded_start(n_dims, 0);
+ std::copy(start.begin(), start.end(),
+ padded_start.begin() + (n_dims - n_minor_dims));
+ return UpdateSlice(x, update, padded_start);
+ });
}
-xla::StatusOr<xla::XlaOp> DynamicUpdateSliceInMinorDims(
- xla::XlaBuilder* builder, const xla::XlaOp& x, const xla::XlaOp& update,
- const std::vector<xla::XlaOp>& starts) {
- TF_ASSIGN_OR_RETURN(auto padded_starts,
- PrependZerosInMajorDims(builder, x, starts));
- return builder->DynamicUpdateSlice(x, update, padded_starts);
+xla::XlaOp DynamicUpdateSliceInMinorDims(xla::XlaOp x, xla::XlaOp update,
+ gtl::ArraySlice<xla::XlaOp> starts) {
+ auto padded_starts = PrependZerosInMajorDims(x, starts);
+ return xla::DynamicUpdateSlice(x, update, padded_starts);
}
-xla::StatusOr<xla::XlaOp> PrependZerosInMajorDims(
- xla::XlaBuilder* builder, const xla::XlaOp& x,
- const std::vector<xla::XlaOp>& starts) {
- TF_ASSIGN_OR_RETURN(xla::Shape shape, builder->GetShape(x));
- const int64 n_dims = xla::ShapeUtil::Rank(shape);
- auto zero = builder->Reshape(builder->ConstantR0<int32>(0), {1});
- std::vector<xla::XlaOp> padded_starts(n_dims, zero);
- for (int i = 0; i < starts.size(); ++i) {
- padded_starts[n_dims - starts.size() + i] =
- builder->Reshape(starts[i], {1});
- }
- return builder->ConcatInDim(padded_starts, 0);
+xla::XlaOp PrependZerosInMajorDims(xla::XlaOp x,
+ gtl::ArraySlice<xla::XlaOp> starts) {
+ xla::XlaBuilder* builder = x.builder();
+ return builder->ReportErrorOrReturn([&]() -> xla::StatusOr<xla::XlaOp> {
+ TF_ASSIGN_OR_RETURN(xla::Shape shape, builder->GetShape(x));
+ const int64 n_dims = xla::ShapeUtil::Rank(shape);
+ auto zero = xla::Reshape(xla::ConstantR0<int32>(builder, 0), {1});
+ std::vector<xla::XlaOp> padded_starts(n_dims, zero);
+ for (int i = 0; i < starts.size(); ++i) {
+ padded_starts[n_dims - starts.size() + i] = xla::Reshape(starts[i], {1});
+ }
+ return xla::ConcatInDim(builder, padded_starts, 0);
+ });
}
-xla::StatusOr<xla::XlaOp> TransposeInMinorDims(xla::XlaBuilder* builder,
- const xla::XlaOp& x) {
- TF_ASSIGN_OR_RETURN(xla::Shape shape, builder->GetShape(x));
- const int64 n_dims = xla::ShapeUtil::Rank(shape);
- TF_RET_CHECK(n_dims >= 2);
- std::vector<int64> permutation(n_dims);
- std::iota(permutation.begin(), permutation.end(), 0);
- std::swap(permutation[n_dims - 1], permutation[n_dims - 2]);
- return builder->Transpose(x, permutation);
+xla::XlaOp TransposeInMinorDims(xla::XlaOp x) {
+ xla::XlaBuilder* builder = x.builder();
+ return builder->ReportErrorOrReturn([&]() -> xla::StatusOr<xla::XlaOp> {
+ TF_ASSIGN_OR_RETURN(xla::Shape shape, builder->GetShape(x));
+ const int64 n_dims = xla::ShapeUtil::Rank(shape);
+ TF_RET_CHECK(n_dims >= 2);
+ std::vector<int64> permutation(n_dims);
+ std::iota(permutation.begin(), permutation.end(), 0);
+ std::swap(permutation[n_dims - 1], permutation[n_dims - 2]);
+ return xla::Transpose(x, permutation);
+ });
}
-xla::StatusOr<xla::XlaOp> MaybeConjugate(xla::XlaBuilder* builder,
- const xla::XlaOp& x, bool conjugate) {
- TF_ASSIGN_OR_RETURN(xla::Shape shape, builder->GetShape(x));
- auto perform_conj = shape.element_type() == xla::C64 && conjugate;
- return perform_conj ? builder->Conj(x) : x;
+xla::XlaOp MaybeConjugate(xla::XlaOp x, bool conjugate) {
+ xla::XlaBuilder* builder = x.builder();
+ return builder->ReportErrorOrReturn([&]() -> xla::StatusOr<xla::XlaOp> {
+ TF_ASSIGN_OR_RETURN(xla::Shape shape, builder->GetShape(x));
+ auto perform_conj = shape.element_type() == xla::C64 && conjugate;
+ return perform_conj ? xla::Conj(x) : x;
+ });
}
} // namespace tensorflow
diff --git a/tensorflow/compiler/tf2xla/lib/util.h b/tensorflow/compiler/tf2xla/lib/util.h
index 3c120a2548..6cb6c088e9 100644
--- a/tensorflow/compiler/tf2xla/lib/util.h
+++ b/tensorflow/compiler/tf2xla/lib/util.h
@@ -23,9 +23,6 @@ limitations under the License.
namespace tensorflow {
-// Returns a zero-filled tensor with shape `shape`.
-xla::XlaOp Zeros(xla::XlaBuilder* builder, const xla::Shape& shape);
-
// Returns a floating point scalar constant of 'type' with 'value'.
// If 'type' is complex, returns a real value with zero imaginary component.
xla::XlaOp FloatLiteral(xla::XlaBuilder* builder, xla::PrimitiveType type,
@@ -33,7 +30,7 @@ xla::XlaOp FloatLiteral(xla::XlaBuilder* builder, xla::PrimitiveType type,
// Makes a 1D tensor [0, ..., x, y] from two tensors x and y with zeros
// prepended until the array is length n_dims.
-xla::XlaOp PrependZerosInMajorDims(xla::XlaBuilder* builder,
+xla::XlaOp PrependZerosInMajorDims(xla::XlaOp x,
gtl::ArraySlice<xla::XlaOp> starts);
// Returns a integer scalar constant of 'type' with 'value'.
@@ -41,54 +38,43 @@ xla::XlaOp PrependZerosInMajorDims(xla::XlaBuilder* builder,
xla::XlaOp IntegerLiteral(xla::XlaBuilder* builder, xla::PrimitiveType type,
int64 value);
-// Builds a vector of zeros of length rank(x) with the last two values being
+// Builds a vector of zeros of length rank(x) with the last values being
// those in `starts`.
-xla::StatusOr<xla::XlaOp> PrependZerosInMajorDims(
- xla::XlaBuilder* builder, const xla::XlaOp& x,
- const std::vector<xla::XlaOp>& starts);
+xla::XlaOp PrependZerosInMajorDims(xla::XlaOp x,
+ gtl::ArraySlice<xla::XlaOp> starts);
// Performs a slice in the minor dimensions of a Tensor.
-xla::StatusOr<xla::XlaOp> SliceInMinorDims(xla::XlaBuilder* builder,
- const xla::XlaOp& x,
- gtl::ArraySlice<int64> start,
- gtl::ArraySlice<int64> end);
+xla::XlaOp SliceInMinorDims(xla::XlaOp x, gtl::ArraySlice<int64> start,
+ gtl::ArraySlice<int64> end);
-// Builds a 1-d vector out of a concatenation of `major_dims` and `starts`.
-std::vector<int64> PrependMajorDims(xla::XlaBuilder* builder,
- const gtl::ArraySlice<int64>& major_dims,
- const gtl::ArraySlice<int64>& indices);
+// Returns the concatenation of `xs` and `ys`.
+std::vector<int64> ConcatVectors(gtl::ArraySlice<int64> xs,
+ gtl::ArraySlice<int64> ys);
// Performs a dynamic slice in the minor dimensions of a Tensor.
-xla::StatusOr<xla::XlaOp> DynamicSliceInMinorDims(
- xla::XlaBuilder* builder, const xla::XlaOp& x,
- const std::vector<xla::XlaOp>& starts, const gtl::ArraySlice<int64>& sizes);
+xla::XlaOp DynamicSliceInMinorDims(xla::XlaOp x,
+ gtl::ArraySlice<xla::XlaOp> starts,
+ gtl::ArraySlice<int64> sizes);
// Updates a slice of 'x', i.e.,
// x[start[0], ..., start[n]] = update
-xla::StatusOr<xla::XlaOp> UpdateSlice(xla::XlaBuilder* builder,
- const xla::XlaOp& x,
- const xla::XlaOp& update,
- gtl::ArraySlice<int64> start);
+xla::XlaOp UpdateSlice(xla::XlaOp x, xla::XlaOp update,
+ gtl::ArraySlice<int64> start);
// Updates a slice of 'x', where 'start' contains a list of minor dimensions:
// x[..., start[0], ..., start[n]] = update
-xla::StatusOr<xla::XlaOp> UpdateSliceInMinorDims(xla::XlaBuilder* builder,
- const xla::XlaOp& x,
- const xla::XlaOp& update,
- gtl::ArraySlice<int64> start);
+xla::XlaOp UpdateSliceInMinorDims(xla::XlaOp x, xla::XlaOp update,
+ gtl::ArraySlice<int64> start);
-xla::StatusOr<xla::XlaOp> DynamicUpdateSliceInMinorDims(
- xla::XlaBuilder* builder, const xla::XlaOp& x, const xla::XlaOp& update,
- const std::vector<xla::XlaOp>& starts);
+xla::XlaOp DynamicUpdateSliceInMinorDims(xla::XlaOp x, xla::XlaOp update,
+ gtl::ArraySlice<xla::XlaOp> starts);
// Transposes a stack of matrices `x` by swapping the last two dimensions.
-xla::StatusOr<xla::XlaOp> TransposeInMinorDims(xla::XlaBuilder* builder,
- const xla::XlaOp& x);
+xla::XlaOp TransposeInMinorDims(xla::XlaOp x);
// Applies a complex conjugation operation if `a` is complex and `conjugate_a`
// is true, otherwise returns its argument.
-xla::StatusOr<xla::XlaOp> MaybeConjugate(xla::XlaBuilder* builder,
- const xla::XlaOp& x, bool conjugate);
+xla::XlaOp MaybeConjugate(xla::XlaOp x, bool conjugate);
} // namespace tensorflow
diff --git a/tensorflow/compiler/tf2xla/lib/util_test.cc b/tensorflow/compiler/tf2xla/lib/util_test.cc
index 265b39402c..442fe92c34 100644
--- a/tensorflow/compiler/tf2xla/lib/util_test.cc
+++ b/tensorflow/compiler/tf2xla/lib/util_test.cc
@@ -21,7 +21,7 @@ limitations under the License.
#include "tensorflow/compiler/tf2xla/lib/batch_dot.h"
#include "tensorflow/compiler/xla/array2d.h"
-#include "tensorflow/compiler/xla/literal_util.h"
+#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/compiler/xla/statusor.h"
#include "tensorflow/compiler/xla/test.h"
#include "tensorflow/compiler/xla/tests/client_library_test_base.h"
@@ -70,8 +70,7 @@ XLA_TEST_F(UtilTest, Simple2dLookup) {
auto a_data = CreateR2Parameter<float>(BValsRight(), 0, "a", &builder, &a);
auto x_data = CreateR0Parameter<int>(2, 1, "x", &builder, &x);
auto y_data = CreateR0Parameter<int>(1, 2, "y", &builder, &y);
- auto result = DynamicSliceInMinorDims(&builder, a, {x, y}, {1, 1});
- TF_ASSERT_OK(result.status());
+ DynamicSliceInMinorDims(a, {x, y}, {1, 1});
ComputeAndCompareR2<float>(&builder, {{10}},
{a_data.get(), x_data.get(), y_data.get()},
@@ -86,10 +85,8 @@ XLA_TEST_F(UtilTest, Simple3dLookup) {
CreateR3Parameter<float>(BatchedAValsFull(), 0, "a", &builder, &a);
auto index_data = CreateR0Parameter<int>(1, 1, "index", &builder, &index);
- TF_ASSERT_OK_AND_ASSIGN(
- auto l_index,
- DynamicSliceInMinorDims(&builder, a,
- {index, builder.ConstantR0<int32>(0)}, {1, 4}));
+ DynamicSliceInMinorDims(a, {index, xla::ConstantR0<int32>(&builder, 0)},
+ {1, 4});
ComputeAndCompareR3<float>(&builder, {{{3, 6, 0, 1}}, {{24, 61, 82, 48}}},
{a_data.get(), index_data.get()});
@@ -104,8 +101,7 @@ XLA_TEST_F(UtilTest, SimpleSliceUpdate) {
auto x_data = CreateR0Parameter<int>(2, 2, "x", &builder, &x);
auto y_data = CreateR0Parameter<int>(1, 3, "y", &builder, &y);
- auto result = DynamicUpdateSliceInMinorDims(&builder, a, b, {x, y});
- TF_ASSERT_OK(result.status());
+ DynamicUpdateSliceInMinorDims(a, b, {x, y});
xla::Array2D<float> expected(
{{{2, 0, 1, 2}, {3, 6, 0, 1}, {4, 9, 1, -10}, {5, 8, 10, 11}}});
@@ -128,13 +124,9 @@ XLA_TEST_F(UtilTest, RowBatchDot) {
// Select {{3, 6, 0, 1}, {24, 61, 82, 48}} out of BatchedAValsFull().
auto index_data = CreateR0Parameter<int>(1, 2, "index", &builder, &index);
- TF_ASSERT_OK_AND_ASSIGN(
- auto l_index,
- DynamicSliceInMinorDims(&builder, a,
- {index, builder.ConstantR0<int32>(0)}, {1, n}));
- TF_ASSERT_OK_AND_ASSIGN(
- auto dot, BatchDot(&builder, l_index, row,
- /*transpose_x=*/false, /*transpose_y=*/true));
+ auto l_index = DynamicSliceInMinorDims(
+ a, {index, xla::ConstantR0<int32>(&builder, 0)}, {1, n});
+ BatchDot(l_index, row, /*transpose_x=*/false, /*transpose_y=*/true);
ComputeAndCompareR3<float>(&builder, {{{33}}, {{292}}},
{a_data.get(), row_data.get(), index_data.get()});
diff --git a/tensorflow/compiler/tf2xla/lib/while_loop.cc b/tensorflow/compiler/tf2xla/lib/while_loop.cc
index 09ce594930..574e70ddee 100644
--- a/tensorflow/compiler/tf2xla/lib/while_loop.cc
+++ b/tensorflow/compiler/tf2xla/lib/while_loop.cc
@@ -15,6 +15,7 @@ limitations under the License.
#include "tensorflow/compiler/tf2xla/lib/while_loop.h"
#include "tensorflow/compiler/tf2xla/lib/util.h"
+#include "tensorflow/compiler/xla/client/xla_client/xla_builder.h"
#include "tensorflow/compiler/xla/shape_util.h"
#include "tensorflow/compiler/xla/status_macros.h"
@@ -39,7 +40,7 @@ xla::StatusOr<std::vector<xla::XlaOp>> XlaWhileLoop(
xla::XlaBuilder* builder) {
std::vector<xla::XlaOp> elements(arity);
for (int i = 0; i < arity; ++i) {
- elements[i] = builder->GetTupleElement(tuple, i);
+ elements[i] = xla::GetTupleElement(tuple, i);
}
return elements;
};
@@ -48,7 +49,8 @@ xla::StatusOr<std::vector<xla::XlaOp>> XlaWhileLoop(
std::unique_ptr<xla::XlaBuilder> cond_builder =
builder->CreateSubBuilder(strings::StrCat(name, "_condition"));
{
- auto parameter = cond_builder->Parameter(0, tuple_shape, "parameter");
+ auto parameter =
+ xla::Parameter(cond_builder.get(), 0, tuple_shape, "parameter");
TF_RETURN_IF_ERROR(
condition_function(unpack_tuple(parameter, arity, cond_builder.get()),
@@ -61,7 +63,8 @@ xla::StatusOr<std::vector<xla::XlaOp>> XlaWhileLoop(
std::unique_ptr<xla::XlaBuilder> body_builder =
builder->CreateSubBuilder(strings::StrCat(name, "_body"));
{
- auto parameter = body_builder->Parameter(0, tuple_shape, "parameter");
+ auto parameter =
+ xla::Parameter(body_builder.get(), 0, tuple_shape, "parameter");
TF_ASSIGN_OR_RETURN(
auto result,
@@ -69,11 +72,11 @@ xla::StatusOr<std::vector<xla::XlaOp>> XlaWhileLoop(
body_builder.get()));
TF_RET_CHECK(result.size() == initial_values.size());
- body_builder->Tuple(result);
+ xla::Tuple(body_builder.get(), result);
}
TF_ASSIGN_OR_RETURN(auto body, body_builder->Build());
- auto outputs = builder->While(cond, body, builder->Tuple(initial_values));
+ auto outputs = xla::While(cond, body, xla::Tuple(builder, initial_values));
return unpack_tuple(outputs, arity, builder);
}
@@ -86,9 +89,8 @@ xla::StatusOr<std::vector<xla::XlaOp>> XlaForEachIndex(
auto while_cond_fn =
[&](gtl::ArraySlice<xla::XlaOp> values,
xla::XlaBuilder* cond_builder) -> xla::StatusOr<xla::XlaOp> {
- return cond_builder->Lt(
- values[0],
- IntegerLiteral(cond_builder, num_iterations_type, num_iterations));
+ return xla::Lt(values[0], IntegerLiteral(cond_builder, num_iterations_type,
+ num_iterations));
};
auto while_body_fn = [&](gtl::ArraySlice<xla::XlaOp> values,
xla::XlaBuilder* body_builder)
@@ -97,9 +99,10 @@ xla::StatusOr<std::vector<xla::XlaOp>> XlaForEachIndex(
std::vector<xla::XlaOp> updated_values;
updated_values.reserve(values.size());
- updated_values.push_back(body_builder->Add(
+ updated_values.push_back(xla::Add(
iteration,
- body_builder->ConstantLiteral(xla::Literal::One(num_iterations_type))));
+ xla::ConstantLiteral(body_builder,
+ xla::LiteralUtil::One(num_iterations_type))));
values.remove_prefix(1);
TF_ASSIGN_OR_RETURN(std::vector<xla::XlaOp> body_outputs,
@@ -111,8 +114,8 @@ xla::StatusOr<std::vector<xla::XlaOp>> XlaForEachIndex(
std::vector<xla::XlaOp> values;
values.reserve(initial_values.size() + 1);
- values.push_back(
- builder->ConstantLiteral(xla::Literal::Zero(num_iterations_type)));
+ values.push_back(xla::ConstantLiteral(
+ builder, xla::LiteralUtil::Zero(num_iterations_type)));
values.insert(values.end(), initial_values.begin(), initial_values.end());
TF_ASSIGN_OR_RETURN(values, XlaWhileLoop(while_cond_fn, while_body_fn, values,
diff --git a/tensorflow/compiler/tf2xla/literal_util.cc b/tensorflow/compiler/tf2xla/literal_util.cc
index b43405a1a4..2fb66913ad 100644
--- a/tensorflow/compiler/tf2xla/literal_util.cc
+++ b/tensorflow/compiler/tf2xla/literal_util.cc
@@ -17,7 +17,7 @@ limitations under the License.
#include "tensorflow/compiler/tf2xla/shape_util.h"
#include "tensorflow/compiler/tf2xla/type_util.h"
-#include "tensorflow/compiler/xla/literal_util.h"
+#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/core/common_runtime/dma_helper.h"
namespace tensorflow {
diff --git a/tensorflow/compiler/tf2xla/literal_util.h b/tensorflow/compiler/tf2xla/literal_util.h
index ab7e861f33..0610a57029 100644
--- a/tensorflow/compiler/tf2xla/literal_util.h
+++ b/tensorflow/compiler/tf2xla/literal_util.h
@@ -18,7 +18,7 @@ limitations under the License.
#ifndef TENSORFLOW_COMPILER_TF2XLA_LITERAL_UTIL_H_
#define TENSORFLOW_COMPILER_TF2XLA_LITERAL_UTIL_H_
-#include "tensorflow/compiler/xla/literal_util.h"
+#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/compiler/xla/xla_data.pb.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/lib/core/status.h"
diff --git a/tensorflow/compiler/tf2xla/literal_util_test.cc b/tensorflow/compiler/tf2xla/literal_util_test.cc
index f3d6787daa..a3404c2b3d 100644
--- a/tensorflow/compiler/tf2xla/literal_util_test.cc
+++ b/tensorflow/compiler/tf2xla/literal_util_test.cc
@@ -15,6 +15,7 @@ limitations under the License.
#include "tensorflow/compiler/tf2xla/literal_util.h"
+#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/compiler/xla/literal_util.h"
#include "tensorflow/core/framework/numeric_types.h"
#include "tensorflow/core/framework/tensor_testutil.h"
@@ -27,7 +28,7 @@ TEST(LiteralUtil, LiteralToHostTensor) {
{
std::vector<int64> int64_values = {1, 2, 3};
std::unique_ptr<xla::Literal> int64_values_literal =
- xla::Literal::CreateR1(gtl::ArraySlice<int64>(int64_values));
+ xla::LiteralUtil::CreateR1(gtl::ArraySlice<int64>(int64_values));
Tensor host_tensor;
EXPECT_EQ("Cannot convert literal of type S64 to tensor of type int32",
LiteralToHostTensor(*int64_values_literal, DT_INT32, &host_tensor)
@@ -48,7 +49,7 @@ TEST(LiteralUtil, LiteralToHostTensor) {
Tensor host_tensor;
std::vector<int32> int32_values = {10, 11};
std::unique_ptr<xla::Literal> int32_values_literal =
- xla::Literal::CreateR1(gtl::ArraySlice<int32>(int32_values));
+ xla::LiteralUtil::CreateR1(gtl::ArraySlice<int32>(int32_values));
EXPECT_TRUE(
LiteralToHostTensor(*int32_values_literal, DT_INT32, &host_tensor)
.ok());
diff --git a/tensorflow/compiler/tf2xla/tf2xla_test.cc b/tensorflow/compiler/tf2xla/tf2xla_test.cc
index 84c133ffab..f0b30dcf4e 100644
--- a/tensorflow/compiler/tf2xla/tf2xla_test.cc
+++ b/tensorflow/compiler/tf2xla/tf2xla_test.cc
@@ -18,6 +18,7 @@ limitations under the License.
#include "tensorflow/compiler/tf2xla/tf2xla.pb.h"
#include "tensorflow/compiler/xla/client/client_library.h"
#include "tensorflow/compiler/xla/client/local_client.h"
+#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/compiler/xla/literal_util.h"
#include "tensorflow/compiler/xla/statusor.h"
#include "tensorflow/core/framework/attr_value.pb.h"
@@ -73,8 +74,8 @@ TEST(ConvertGraphDefToXla, Sum) {
TF_EXPECT_OK(ConvertGraphDefToXla(graph_def, config, client, &computation));
// Set up arguments.
- auto x_literal = xla::Literal::CreateR0<int32>(10);
- auto y_literal = xla::Literal::CreateR0<int32>(32);
+ auto x_literal = xla::LiteralUtil::CreateR0<int32>(10);
+ auto y_literal = xla::LiteralUtil::CreateR0<int32>(32);
auto x_global_or = client->TransferToServer(*x_literal);
auto y_global_or = client->TransferToServer(*y_literal);
TF_EXPECT_OK(x_global_or.status());
diff --git a/tensorflow/compiler/tf2xla/xla_compiler.cc b/tensorflow/compiler/tf2xla/xla_compiler.cc
index 9c8e56a17e..319cbc74e9 100644
--- a/tensorflow/compiler/tf2xla/xla_compiler.cc
+++ b/tensorflow/compiler/tf2xla/xla_compiler.cc
@@ -28,6 +28,7 @@ limitations under the License.
#include "tensorflow/compiler/tf2xla/xla_compilation_device.h"
#include "tensorflow/compiler/tf2xla/xla_context.h"
#include "tensorflow/compiler/xla/client/client_library.h"
+#include "tensorflow/compiler/xla/client/xla_client/xla_builder.h"
#include "tensorflow/core/common_runtime/device.h"
#include "tensorflow/core/common_runtime/executor.h"
#include "tensorflow/core/common_runtime/function.h"
@@ -230,10 +231,13 @@ Status XlaCompiler::XLAShapeForArgument(const XlaCompiler::Argument& arg,
case XlaCompiler::Argument::kConstant:
LOG(FATAL) << "Unreachable case";
case XlaCompiler::Argument::kParameter: {
- TensorShape shape =
- is_entry_computation
- ? options_.shape_representation_fn(arg.shape, arg.type)
- : arg.shape;
+ TensorShape shape;
+ if (is_entry_computation) {
+ TF_ASSIGN_OR_RETURN(
+ shape, options_.shape_representation_fn(arg.shape, arg.type));
+ } else {
+ shape = arg.shape;
+ }
return TensorShapeToXLAShape(arg.type, shape, xla_shape);
}
case XlaCompiler::Argument::kResource: {
@@ -241,8 +245,9 @@ Status XlaCompiler::XLAShapeForArgument(const XlaCompiler::Argument& arg,
switch (arg.resource_kind) {
case XlaResource::kVariable: {
- TensorShape representation_shape =
- options_.shape_representation_fn(arg.shape, arg.type);
+ TF_ASSIGN_OR_RETURN(
+ TensorShape representation_shape,
+ options_.shape_representation_fn(arg.shape, arg.type));
return TensorShapeToXLAShape(arg.type, representation_shape,
xla_shape);
}
@@ -338,9 +343,9 @@ Status BuildComputation(
const std::vector<int>& arg_cores,
const std::vector<XlaContext::Retval>& retvals,
const std::vector<std::unique_ptr<XlaResource>>& resources,
- bool return_updated_values_for_all_resources, xla::XlaBuilder* builder,
- xla::XlaComputation* computation, int* num_computation_outputs,
- int* num_nonconst_outputs,
+ bool return_updated_values_for_all_resources, bool always_return_tuple,
+ xla::XlaBuilder* builder, xla::XlaComputation* computation,
+ int* num_computation_outputs, int* num_nonconst_outputs,
std::vector<XlaCompiler::OutputDescription>* outputs,
std::vector<XlaCompiler::ResourceUpdate>* resource_updates) {
std::vector<xla::XlaOp> elems;
@@ -384,13 +389,14 @@ Status BuildComputation(
const XlaCompiler::Argument& arg = args[resource->arg_num()];
const int core = arg_cores[resource->arg_num()];
DCHECK_LT(resource->arg_num(), arg_cores.size());
- bool modified = resource->value() != resource->initial_value();
+ bool modified = !resource->value().IsIdenticalTo(resource->initial_value());
// TensorArray gradients were modified if their values changed or there are
// any newly created gradients.
for (const auto& grad : resource->tensor_array_gradients()) {
- modified = modified ||
- grad.second->value() != grad.second->initial_value() ||
- arg.tensor_array_gradients.count(grad.first) == 0;
+ modified =
+ modified ||
+ !grad.second->value().IsIdenticalTo(grad.second->initial_value()) ||
+ arg.tensor_array_gradients.count(grad.first) == 0;
}
if (return_updated_values_for_all_resources || modified) {
resource_updates->emplace_back();
@@ -415,7 +421,7 @@ Status BuildComputation(
// create a tuple/get-tuple-element combination so that sharding
// assignment will be placed on this value, which will cause the resource
// update to be returned from the same device that provided the resource.
- handle = builder->GetTupleElement(builder->Tuple({handle}), 0);
+ handle = xla::GetTupleElement(xla::Tuple(builder, {handle}), 0);
elems.push_back(handle);
}
@@ -424,7 +430,9 @@ Status BuildComputation(
*num_computation_outputs = elems.size();
// Builds the XLA computation.
- builder->Tuple(elems);
+ if (always_return_tuple || elems.size() != 1) {
+ xla::Tuple(builder, elems);
+ }
builder->ClearOpMetadata();
xla::StatusOr<xla::XlaComputation> computation_status = builder->Build();
@@ -551,16 +559,16 @@ Status XlaCompiler::BuildArguments(
}
xla::XlaScopedShardingAssignment assign_tuple_sharding(builder,
tuple_sharding);
- tuple = builder->Parameter(0, (*input_shapes)[0], "arg_tuple");
+ tuple = xla::Parameter(builder, 0, (*input_shapes)[0], "arg_tuple");
} else {
- tuple = builder->Parameter(0, (*input_shapes)[0], "arg_tuple");
+ tuple = xla::Parameter(builder, 0, (*input_shapes)[0], "arg_tuple");
}
for (std::vector<int>::size_type i = 0; i < input_mapping->size(); ++i) {
const int core = (*arg_cores)[input_mapping->at(i)];
xla::XlaScopedShardingAssignment assign_sharding(
builder, core == -1 ? tensorflow::gtl::optional<xla::OpSharding>()
: xla::sharding_builder::AssignDevice(core));
- arg_handles[i] = builder->GetTupleElement(tuple, i);
+ arg_handles[i] = xla::GetTupleElement(tuple, i);
}
} else {
for (std::vector<int>::size_type i = 0; i < input_mapping->size(); ++i) {
@@ -568,8 +576,8 @@ Status XlaCompiler::BuildArguments(
xla::XlaScopedShardingAssignment assign_sharding(
builder, core == -1 ? tensorflow::gtl::optional<xla::OpSharding>()
: xla::sharding_builder::AssignDevice(core));
- arg_handles[i] =
- builder->Parameter(i, (*input_shapes)[i], strings::StrCat("arg", i));
+ arg_handles[i] = xla::Parameter(builder, i, (*input_shapes)[i],
+ strings::StrCat("arg", i));
}
}
@@ -600,7 +608,7 @@ Status XlaCompiler::BuildArguments(
// return values of functions, and then reshape unconditionally.
if (is_entry_computation) {
arg_expression.set_handle(
- builder->Reshape(arg_handles[i], arg.shape.dim_sizes()));
+ xla::Reshape(arg_handles[i], arg.shape.dim_sizes()));
} else {
arg_expression.set_handle(arg_handles[i]);
}
@@ -660,20 +668,17 @@ Status XlaCompiler::CompileSingleOp(
namespace {
// Check that the ops of all non-functional nodes have been registered.
-string ValidateFunctionDef(const FunctionDef* fdef,
+Status ValidateFunctionDef(const FunctionDef* fdef,
const FunctionLibraryDefinition& flib_def) {
- std::vector<string> invalid_ops;
for (const NodeDef& node : fdef->node_def()) {
const string& op = node.op();
if (op == FunctionLibraryDefinition::kGradientOp || flib_def.Find(op)) {
continue;
}
const OpDef* op_def;
- if (!OpRegistry::Global()->LookUpOpDef(op, &op_def).ok()) {
- invalid_ops.push_back(op);
- }
+ TF_RETURN_IF_ERROR(OpRegistry::Global()->LookUpOpDef(op, &op_def));
}
- return tensorflow::str_util::Join(invalid_ops, ", ");
+ return Status::OK();
}
// Check that the graph doesn't have any invalid nodes (e.g. incompatible with
@@ -681,35 +686,33 @@ string ValidateFunctionDef(const FunctionDef* fdef,
Status ValidateGraph(const Graph* graph,
const FunctionLibraryDefinition& flib_def,
const DeviceType& device_type, const string& name) {
- std::vector<string> invalid_ops;
+ auto maybe_error = [&](const string& op, const Status& s) -> Status {
+ if (!s.ok()) {
+ return errors::InvalidArgument(strings::StrCat(
+ "Detected unsupported operations when trying to compile graph ", name,
+ " on ", device_type.type_string(), ": ", op, " (", s.error_message(),
+ ")"));
+ }
+ return Status::OK();
+ };
+
for (const Node* node : graph->nodes()) {
if (node->type_string() == FunctionLibraryDefinition::kGradientOp) {
continue;
}
const FunctionDef* fdef = flib_def.Find(node->def().op());
+ Status s;
if (fdef) {
- string error_msg = ValidateFunctionDef(fdef, flib_def);
- if (!error_msg.empty()) {
- invalid_ops.push_back(
- strings::StrCat(node->def().op(), ":{", error_msg, "}"));
- }
+ s = ValidateFunctionDef(fdef, flib_def);
+ TF_RETURN_IF_ERROR(maybe_error(node->def().op(), s));
continue;
}
const OpDef* op_def;
- if (!OpRegistry::Global()->LookUpOpDef(node->def().op(), &op_def).ok()) {
- invalid_ops.push_back(node->def().op());
- continue;
- }
+ s = OpRegistry::Global()->LookUpOpDef(node->def().op(), &op_def);
+ TF_RETURN_IF_ERROR(maybe_error(node->def().op(), s));
TF_RETURN_IF_ERROR(ValidateNodeDef(node->def(), *op_def));
- if (!FindKernelDef(device_type, node->def(), nullptr, nullptr).ok()) {
- invalid_ops.push_back(node->def().op());
- }
- }
- if (!invalid_ops.empty()) {
- return errors::InvalidArgument(strings::StrCat(
- "Detected unsupported operations when trying to compile graph ", name,
- " on ", device_type.type_string(), ":",
- tensorflow::str_util::Join(invalid_ops, ", ")));
+ s = FindKernelDef(device_type, node->def(), nullptr, nullptr);
+ TF_RETURN_IF_ERROR(maybe_error(node->def().op(), s));
}
return Status::OK();
}
@@ -767,9 +770,10 @@ Status XlaCompiler::CompileGraph(const XlaCompiler::CompileOptions& options,
result->outputs.resize(context->retvals().size());
TF_RETURN_IF_ERROR(BuildComputation(
args, arg_cores, context->retvals(), context->resources(),
- options.return_updated_values_for_all_resources, &builder,
- result->computation.get(), &num_computation_outputs,
- &num_nonconst_outputs, &result->outputs, &result->resource_updates));
+ options.return_updated_values_for_all_resources,
+ options.always_return_tuple, &builder, result->computation.get(),
+ &num_computation_outputs, &num_nonconst_outputs, &result->outputs,
+ &result->resource_updates));
VLOG(2) << "Outputs: total: " << context->retvals().size()
<< " nonconstant: " << num_nonconst_outputs;
diff --git a/tensorflow/compiler/tf2xla/xla_compiler.h b/tensorflow/compiler/tf2xla/xla_compiler.h
index 6be74957c6..079c99797e 100644
--- a/tensorflow/compiler/tf2xla/xla_compiler.h
+++ b/tensorflow/compiler/tf2xla/xla_compiler.h
@@ -20,6 +20,7 @@ limitations under the License.
#include "tensorflow/compiler/tf2xla/xla_compilation_device.h"
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
#include "tensorflow/compiler/xla/client/local_client.h"
+#include "tensorflow/compiler/xla/status_macros.h"
#include "tensorflow/core/common_runtime/device.h"
#include "tensorflow/core/common_runtime/device_mgr.h"
#include "tensorflow/core/common_runtime/function.h"
@@ -169,6 +170,11 @@ class XlaCompiler {
// computation.
bool resolve_compile_time_constants = true;
+ // If 'always_return_tuple' is true, then the output of a computation will
+ // always be a tuple. Otherwise, a single-element output will not be wrapped
+ // in a tuple.
+ bool always_return_tuple = true;
+
// True when compiling the entry computation, false for subcomputations
// (while, call, etc.)
bool is_entry_computation = true;
@@ -237,7 +243,8 @@ class XlaCompiler {
std::shared_ptr<xla::XlaComputation> computation;
};
- typedef std::function<TensorShape(const TensorShape&, DataType)>
+ typedef std::function<xla::StatusOr<TensorShape>(const TensorShape&,
+ DataType)>
ShapeRepresentationFn;
struct Options {
// Name of the compilation device to use. It must be set by the caller.
diff --git a/tensorflow/compiler/tf2xla/xla_compiler_test.cc b/tensorflow/compiler/tf2xla/xla_compiler_test.cc
index 613230452b..6f76816a86 100644
--- a/tensorflow/compiler/tf2xla/xla_compiler_test.cc
+++ b/tensorflow/compiler/tf2xla/xla_compiler_test.cc
@@ -23,7 +23,7 @@ limitations under the License.
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
#include "tensorflow/compiler/xla/client/client_library.h"
#include "tensorflow/compiler/xla/client/local_client.h"
-#include "tensorflow/compiler/xla/literal_util.h"
+#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/compiler/xla/shape_util.h"
#include "tensorflow/compiler/xla/status_macros.h"
#include "tensorflow/compiler/xla/tests/literal_test_util.h"
@@ -206,9 +206,9 @@ TEST_F(XlaCompilerTest, Simple) {
// Tests that the generated computation works.
std::unique_ptr<xla::Literal> param0_literal =
- xla::Literal::CreateR1<int32>({7, 42});
+ xla::LiteralUtil::CreateR1<int32>({7, 42});
std::unique_ptr<xla::Literal> param1_literal =
- xla::Literal::CreateR1<int32>({-3, 101});
+ xla::LiteralUtil::CreateR1<int32>({-3, 101});
std::unique_ptr<xla::GlobalData> param0_data =
client_->TransferToServer(*param0_literal).ConsumeValueOrDie();
std::unique_ptr<xla::GlobalData> param1_data =
@@ -222,9 +222,9 @@ TEST_F(XlaCompilerTest, Simple) {
client_->Transfer(*actual).ConsumeValueOrDie();
std::unique_ptr<xla::Literal> expected0 =
- xla::Literal::CreateR1<int32>({4, 143});
+ xla::LiteralUtil::CreateR1<int32>({4, 143});
std::unique_ptr<xla::Literal> expected_literal =
- xla::Literal::MakeTuple({expected0.get()});
+ xla::LiteralUtil::MakeTuple({expected0.get()});
EXPECT_TRUE(xla::LiteralTestUtil::Equal(*expected_literal, *actual_literal));
}
@@ -306,7 +306,7 @@ TEST_F(XlaCompilerTest, ConstantOutputs) {
// Tests that the generated computation works.
std::unique_ptr<xla::Literal> param0_literal =
- xla::Literal::CreateR1<int32>({7, 42});
+ xla::LiteralUtil::CreateR1<int32>({7, 42});
std::unique_ptr<xla::GlobalData> param0_data =
client_->TransferToServer(*param0_literal).ConsumeValueOrDie();
@@ -317,9 +317,9 @@ TEST_F(XlaCompilerTest, ConstantOutputs) {
client_->Transfer(*actual).ConsumeValueOrDie();
std::unique_ptr<xla::Literal> expected0 =
- xla::Literal::CreateR1<int32>({-7, -42});
+ xla::LiteralUtil::CreateR1<int32>({-7, -42});
std::unique_ptr<xla::Literal> expected_literal =
- xla::Literal::MakeTuple({expected0.get()});
+ xla::LiteralUtil::MakeTuple({expected0.get()});
EXPECT_TRUE(
xla::LiteralTestUtil::Equal(*expected_literal, *actual_literal));
}
@@ -341,7 +341,7 @@ TEST_F(XlaCompilerTest, ConstantOutputs) {
// Tests that the generated computation works.
std::unique_ptr<xla::Literal> param0_literal =
- xla::Literal::CreateR1<int32>({7, 42});
+ xla::LiteralUtil::CreateR1<int32>({7, 42});
std::unique_ptr<xla::GlobalData> param0_data =
client_->TransferToServer(*param0_literal).ConsumeValueOrDie();
@@ -351,11 +351,12 @@ TEST_F(XlaCompilerTest, ConstantOutputs) {
std::unique_ptr<xla::Literal> actual_literal =
client_->Transfer(*actual).ConsumeValueOrDie();
- std::unique_ptr<xla::Literal> expected0 = xla::Literal::CreateR0<int32>(7);
+ std::unique_ptr<xla::Literal> expected0 =
+ xla::LiteralUtil::CreateR0<int32>(7);
std::unique_ptr<xla::Literal> expected1 =
- xla::Literal::CreateR1<int32>({-7, -42});
+ xla::LiteralUtil::CreateR1<int32>({-7, -42});
std::unique_ptr<xla::Literal> expected =
- xla::Literal::MakeTuple({expected0.get(), expected1.get()});
+ xla::LiteralUtil::MakeTuple({expected0.get(), expected1.get()});
EXPECT_TRUE(xla::LiteralTestUtil::Equal(*expected, *actual_literal));
}
}
@@ -569,11 +570,11 @@ TEST_F(XlaCompilerTest, CanPassTensorArraysToAndFromComputation) {
// Tests that the generated computation works.
std::unique_ptr<xla::Literal> input_base =
- xla::Literal::CreateR1<int32>({7, 42});
+ xla::LiteralUtil::CreateR1<int32>({7, 42});
std::unique_ptr<xla::Literal> input_grad2 =
- xla::Literal::CreateR1<int32>({-3, 101});
+ xla::LiteralUtil::CreateR1<int32>({-3, 101});
std::unique_ptr<xla::Literal> input =
- xla::Literal::MakeTuple({input_base.get(), input_grad2.get()});
+ xla::LiteralUtil::MakeTuple({input_base.get(), input_grad2.get()});
std::unique_ptr<xla::GlobalData> param0_data =
client_->TransferToServer(*input).ConsumeValueOrDie();
@@ -583,17 +584,18 @@ TEST_F(XlaCompilerTest, CanPassTensorArraysToAndFromComputation) {
std::unique_ptr<xla::Literal> actual_literal =
client_->Transfer(*actual).ConsumeValueOrDie();
- std::unique_ptr<xla::Literal> output_read = xla::Literal::CreateR0<int32>(42);
+ std::unique_ptr<xla::Literal> output_read =
+ xla::LiteralUtil::CreateR0<int32>(42);
std::unique_ptr<xla::Literal> output_base =
- xla::Literal::CreateR1<int32>({7, 42});
+ xla::LiteralUtil::CreateR1<int32>({7, 42});
std::unique_ptr<xla::Literal> output_grad1 =
- xla::Literal::CreateR1<int32>({0, 1});
+ xla::LiteralUtil::CreateR1<int32>({0, 1});
std::unique_ptr<xla::Literal> output_grad2 =
- xla::Literal::CreateR1<int32>({-3, 101});
- std::unique_ptr<xla::Literal> output_resource = xla::Literal::MakeTuple(
+ xla::LiteralUtil::CreateR1<int32>({-3, 101});
+ std::unique_ptr<xla::Literal> output_resource = xla::LiteralUtil::MakeTuple(
{output_base.get(), output_grad1.get(), output_grad2.get()});
std::unique_ptr<xla::Literal> expected_literal =
- xla::Literal::MakeTuple({output_read.get(), output_resource.get()});
+ xla::LiteralUtil::MakeTuple({output_read.get(), output_resource.get()});
EXPECT_TRUE(xla::LiteralTestUtil::Equal(*expected_literal, *actual_literal));
}
@@ -796,9 +798,9 @@ TEST_F(XlaCompilerTest, Variables) {
// Tests that the generated computation works.
std::unique_ptr<xla::Literal> param0_literal =
- xla::Literal::CreateR1<int32>({7, 42});
+ xla::LiteralUtil::CreateR1<int32>({7, 42});
std::unique_ptr<xla::Literal> param1_literal =
- xla::Literal::CreateR1<int32>({-3, 101});
+ xla::LiteralUtil::CreateR1<int32>({-3, 101});
std::unique_ptr<xla::GlobalData> param0_data =
client_->TransferToServer(*param0_literal).ConsumeValueOrDie();
std::unique_ptr<xla::GlobalData> param1_data =
@@ -812,11 +814,11 @@ TEST_F(XlaCompilerTest, Variables) {
client_->Transfer(*actual).ConsumeValueOrDie();
std::unique_ptr<xla::Literal> expected0 =
- xla::Literal::CreateR1<int32>({5, 144});
+ xla::LiteralUtil::CreateR1<int32>({5, 144});
std::unique_ptr<xla::Literal> expected1 =
- xla::Literal::CreateR1<int32>({4, 143});
+ xla::LiteralUtil::CreateR1<int32>({4, 143});
std::unique_ptr<xla::Literal> expected_literal =
- xla::Literal::MakeTuple({expected0.get(), expected1.get()});
+ xla::LiteralUtil::MakeTuple({expected0.get(), expected1.get()});
EXPECT_TRUE(xla::LiteralTestUtil::Equal(*expected_literal, *actual_literal));
}
@@ -884,9 +886,9 @@ TEST_F(XlaCompilerTest, VariableRepresentationShapeFunction) {
// Tests that the generated computation works.
std::unique_ptr<xla::Literal> param0_literal =
- xla::Literal::CreateR2<int32>({{4, 55}, {1, -3}});
+ xla::LiteralUtil::CreateR2<int32>({{4, 55}, {1, -3}});
std::unique_ptr<xla::Literal> param1_literal =
- xla::Literal::CreateR1<int32>({22, 11, 33, 404});
+ xla::LiteralUtil::CreateR1<int32>({22, 11, 33, 404});
std::unique_ptr<xla::GlobalData> param0_data =
client_->TransferToServer(*param0_literal).ConsumeValueOrDie();
std::unique_ptr<xla::GlobalData> param1_data =
@@ -900,11 +902,11 @@ TEST_F(XlaCompilerTest, VariableRepresentationShapeFunction) {
client_->Transfer(*actual).ConsumeValueOrDie();
std::unique_ptr<xla::Literal> expected0 =
- xla::Literal::CreateR2<int32>({{27, 67}, {35, 402}});
+ xla::LiteralUtil::CreateR2<int32>({{27, 67}, {35, 402}});
std::unique_ptr<xla::Literal> expected1 =
- xla::Literal::CreateR1<int32>({26, 66, 34, 401});
+ xla::LiteralUtil::CreateR1<int32>({26, 66, 34, 401});
std::unique_ptr<xla::Literal> expected_literal =
- xla::Literal::MakeTuple({expected0.get(), expected1.get()});
+ xla::LiteralUtil::MakeTuple({expected0.get(), expected1.get()});
EXPECT_TRUE(xla::LiteralTestUtil::Equal(*expected_literal, *actual_literal));
}
@@ -953,9 +955,9 @@ TEST_F(XlaCompilerTest, ArgRetvalShapeRepresentationFunction) {
// Tests that the generated computation works.
std::unique_ptr<xla::Literal> param0_literal =
- xla::Literal::CreateR1<int32>({4, 55, 1, -3});
+ xla::LiteralUtil::CreateR1<int32>({4, 55, 1, -3});
std::unique_ptr<xla::Literal> param1_literal =
- xla::Literal::CreateR1<int32>({22, 11, 33, 404});
+ xla::LiteralUtil::CreateR1<int32>({22, 11, 33, 404});
std::unique_ptr<xla::GlobalData> param0_data =
client_->TransferToServer(*param0_literal).ConsumeValueOrDie();
std::unique_ptr<xla::GlobalData> param1_data =
@@ -969,11 +971,11 @@ TEST_F(XlaCompilerTest, ArgRetvalShapeRepresentationFunction) {
client_->Transfer(*actual).ConsumeValueOrDie();
std::unique_ptr<xla::Literal> expected0 =
- xla::Literal::CreateR1<int32>({27, 67, 35, 402});
+ xla::LiteralUtil::CreateR1<int32>({27, 67, 35, 402});
std::unique_ptr<xla::Literal> expected1 =
- xla::Literal::CreateR1<int32>({26, 66, 34, 401});
+ xla::LiteralUtil::CreateR1<int32>({26, 66, 34, 401});
std::unique_ptr<xla::Literal> expected_literal =
- xla::Literal::MakeTuple({expected0.get(), expected1.get()});
+ xla::LiteralUtil::MakeTuple({expected0.get(), expected1.get()});
EXPECT_TRUE(xla::LiteralTestUtil::Equal(*expected_literal, *actual_literal));
}
@@ -1021,8 +1023,7 @@ TEST_F(XlaCompilerTest, FunctionWithInvalidOp) {
status = compiler.CompileGraph(XlaCompiler::CompileOptions(), "fill",
std::move(graph), args, &result);
ASSERT_FALSE(status.ok());
- EXPECT_TRUE(
- str_util::StrContains(status.error_message(), "FillFn:{InvalidOp}"))
+ EXPECT_TRUE(str_util::StrContains(status.error_message(), "InvalidOp"))
<< status.error_message();
}
diff --git a/tensorflow/compiler/tf2xla/xla_context.cc b/tensorflow/compiler/tf2xla/xla_context.cc
index 67174b251d..0dea366476 100644
--- a/tensorflow/compiler/tf2xla/xla_context.cc
+++ b/tensorflow/compiler/tf2xla/xla_context.cc
@@ -27,7 +27,7 @@ limitations under the License.
#include "tensorflow/compiler/xla/client/client_library.h"
#include "tensorflow/compiler/xla/client/xla_client/xla_builder.h"
#include "tensorflow/compiler/xla/layout_util.h"
-#include "tensorflow/compiler/xla/literal_util.h"
+#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/compiler/xla/statusor.h"
#include "tensorflow/core/common_runtime/dma_helper.h"
#include "tensorflow/core/lib/gtl/array_slice.h"
@@ -66,8 +66,8 @@ XlaContext::XlaContext(
XlaCompiler* compiler, xla::XlaBuilder* builder,
bool allow_cpu_custom_calls, bool resolve_compile_time_constants,
bool is_entry_computation,
- const std::function<TensorShape(const TensorShape&, DataType)>*
- shape_representation_fn)
+ const std::function<xla::StatusOr<TensorShape>(
+ const TensorShape&, DataType)>* shape_representation_fn)
: compiler_(compiler),
builder_(builder),
allow_cpu_custom_calls_(allow_cpu_custom_calls),
@@ -119,8 +119,8 @@ Status XlaContext::CreateResource(
return Status::OK();
}
-TensorShape XlaContext::RepresentationShape(const TensorShape& shape,
- DataType type) const {
+xla::StatusOr<TensorShape> XlaContext::RepresentationShape(
+ const TensorShape& shape, DataType type) const {
return (*shape_representation_fn_)(shape, type);
}
@@ -131,9 +131,11 @@ const xla::XlaComputation* XlaContext::GetOrCreateMax(const DataType type) {
xla::XlaBuilder b("max<" + type_string + ">");
xla::PrimitiveType xla_type;
TF_CHECK_OK(DataTypeToPrimitiveType(type, &xla_type));
- auto x = b.Parameter(0, xla::ShapeUtil::MakeShape(xla_type, {}), "x");
- auto y = b.Parameter(1, xla::ShapeUtil::MakeShape(xla_type, {}), "y");
- b.Max(x, y);
+ auto x =
+ xla::Parameter(&b, 0, xla::ShapeUtil::MakeShape(xla_type, {}), "x");
+ auto y =
+ xla::Parameter(&b, 1, xla::ShapeUtil::MakeShape(xla_type, {}), "y");
+ xla::Max(x, y);
return b.Build().ConsumeValueOrDie();
});
}
@@ -145,9 +147,11 @@ const xla::XlaComputation* XlaContext::GetOrCreateMin(const DataType type) {
xla::XlaBuilder b("min<" + type_string + ">");
xla::PrimitiveType xla_type;
TF_CHECK_OK(DataTypeToPrimitiveType(type, &xla_type));
- auto x = b.Parameter(0, xla::ShapeUtil::MakeShape(xla_type, {}), "x");
- auto y = b.Parameter(1, xla::ShapeUtil::MakeShape(xla_type, {}), "y");
- b.Min(x, y);
+ auto x =
+ xla::Parameter(&b, 0, xla::ShapeUtil::MakeShape(xla_type, {}), "x");
+ auto y =
+ xla::Parameter(&b, 1, xla::ShapeUtil::MakeShape(xla_type, {}), "y");
+ xla::Min(x, y);
return b.Build().ConsumeValueOrDie();
});
}
@@ -159,9 +163,11 @@ const xla::XlaComputation* XlaContext::GetOrCreateAdd(const DataType type) {
xla::XlaBuilder b("add<" + type_string + ">");
xla::PrimitiveType xla_type;
TF_CHECK_OK(DataTypeToPrimitiveType(type, &xla_type));
- auto x = b.Parameter(0, xla::ShapeUtil::MakeShape(xla_type, {}), "x");
- auto y = b.Parameter(1, xla::ShapeUtil::MakeShape(xla_type, {}), "y");
- b.Add(x, y);
+ auto x =
+ xla::Parameter(&b, 0, xla::ShapeUtil::MakeShape(xla_type, {}), "x");
+ auto y =
+ xla::Parameter(&b, 1, xla::ShapeUtil::MakeShape(xla_type, {}), "y");
+ xla::Add(x, y);
return b.Build().ConsumeValueOrDie();
});
}
@@ -173,9 +179,11 @@ const xla::XlaComputation* XlaContext::GetOrCreateMul(const DataType type) {
xla::XlaBuilder b("mul<" + type_string + ">");
xla::PrimitiveType xla_type;
TF_CHECK_OK(DataTypeToPrimitiveType(type, &xla_type));
- auto x = b.Parameter(0, xla::ShapeUtil::MakeShape(xla_type, {}), "x");
- auto y = b.Parameter(1, xla::ShapeUtil::MakeShape(xla_type, {}), "y");
- b.Mul(x, y);
+ auto x =
+ xla::Parameter(&b, 0, xla::ShapeUtil::MakeShape(xla_type, {}), "x");
+ auto y =
+ xla::Parameter(&b, 1, xla::ShapeUtil::MakeShape(xla_type, {}), "y");
+ xla::Mul(x, y);
return b.Build().ConsumeValueOrDie();
});
}
diff --git a/tensorflow/compiler/tf2xla/xla_context.h b/tensorflow/compiler/tf2xla/xla_context.h
index 5960daaefd..38d8cd653c 100644
--- a/tensorflow/compiler/tf2xla/xla_context.h
+++ b/tensorflow/compiler/tf2xla/xla_context.h
@@ -24,6 +24,7 @@ limitations under the License.
#include "tensorflow/compiler/tf2xla/xla_compiler.h"
#include "tensorflow/compiler/xla/client/xla_client/xla_builder.h"
#include "tensorflow/compiler/xla/client/xla_client/xla_computation.h"
+#include "tensorflow/compiler/xla/status_macros.h"
#include "tensorflow/compiler/xla/xla_data.pb.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/resource_mgr.h"
@@ -47,8 +48,8 @@ class XlaContext : public ResourceBase {
XlaContext(XlaCompiler* compiler, xla::XlaBuilder* builder,
bool allow_cpu_custom_calls, bool resolve_compile_time_constants,
bool is_entry_computation,
- const std::function<TensorShape(const TensorShape&, DataType)>*
- shape_representation_fn);
+ const std::function<xla::StatusOr<TensorShape>(
+ const TensorShape&, DataType)>* shape_representation_fn);
// Virtual method defined by ResourceBase.
string DebugString() override;
@@ -101,8 +102,8 @@ class XlaContext : public ResourceBase {
// Returns the XLA shape to be used to represent a variable of TF `shape`
// and `type`, or of an argument or return value of a top-level computation.
- TensorShape RepresentationShape(const TensorShape& shape,
- DataType type) const;
+ xla::StatusOr<TensorShape> RepresentationShape(const TensorShape& shape,
+ DataType type) const;
// Get an XLA lambda to compute Max. This is cached in the
// XlaContext since it may be used by multiple Ops. There is a
@@ -160,7 +161,7 @@ class XlaContext : public ResourceBase {
// should be represented in XLA. Parameters/return values will be shaped
// according to this function, and reshaped back to/from their declared shapes
// for computations. Must be non-null.
- const std::function<TensorShape(const TensorShape&, DataType)>*
+ const std::function<xla::StatusOr<TensorShape>(const TensorShape&, DataType)>*
shape_representation_fn_;
// Cache of prebuilt computations indexed by their type.
diff --git a/tensorflow/compiler/tf2xla/xla_cpu_backend.cc b/tensorflow/compiler/tf2xla/xla_cpu_backend.cc
index ead229aacc..23d04d43b3 100644
--- a/tensorflow/compiler/tf2xla/xla_cpu_backend.cc
+++ b/tensorflow/compiler/tf2xla/xla_cpu_backend.cc
@@ -31,6 +31,10 @@ bool CpuOpFilter(KernelDef* kdef) {
DT_FLOAT);
return true;
}
+ // TODO(b/26783907): The CPU backend currently does not implement sort.
+ if (kdef->op() == "XlaSort" || kdef->op() == "TopKV2") {
+ return false;
+ }
if (kdef->op() == "Const") {
AddDtypeToKernalDefConstraint("dtype", DT_STRING, kdef);
}
diff --git a/tensorflow/compiler/tf2xla/xla_gpu_backend.cc b/tensorflow/compiler/tf2xla/xla_gpu_backend.cc
index 62168b6483..dc98d4fda6 100644
--- a/tensorflow/compiler/tf2xla/xla_gpu_backend.cc
+++ b/tensorflow/compiler/tf2xla/xla_gpu_backend.cc
@@ -13,6 +13,7 @@ See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
+#include "tensorflow/compiler/tf2xla/legacy_flags/backend_registration_flags.h"
#include "tensorflow/compiler/tf2xla/tf2xla_util.h"
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
#include "tensorflow/core/framework/kernel_def.pb.h"
@@ -22,8 +23,16 @@ namespace tensorflow {
bool GpuOpFilter(KernelDef* kdef) {
// TODO(b/31361304): The GPU backend does not parallelize PRNG ops, leading to
// slow code.
- if (kdef->op() == "RandomStandardNormal" || kdef->op() == "RandomUniform" ||
- kdef->op() == "RandomUniformInt" || kdef->op() == "TruncatedNormal") {
+ legacy_flags::BackendRegistrationFlags* flags =
+ legacy_flags::GetBackendRegistrationFlags();
+ VLOG(2) << "flags->tf_enable_prng_ops_gpu: " << flags->tf_enable_prng_ops_gpu;
+ if (!flags->tf_enable_prng_ops_gpu &&
+ (kdef->op() == "RandomStandardNormal" || kdef->op() == "RandomUniform" ||
+ kdef->op() == "RandomUniformInt" || kdef->op() == "TruncatedNormal")) {
+ return false;
+ }
+ // TODO(b/26783907): The GPU backend currently does not implement sort.
+ if (kdef->op() == "XlaSort" || kdef->op() == "TopKV2") {
return false;
}
if (kdef->op() == "Const") {
diff --git a/tensorflow/compiler/tf2xla/xla_helpers.cc b/tensorflow/compiler/tf2xla/xla_helpers.cc
index 93cd340485..4d1b3b1a13 100644
--- a/tensorflow/compiler/tf2xla/xla_helpers.cc
+++ b/tensorflow/compiler/tf2xla/xla_helpers.cc
@@ -23,6 +23,9 @@ limitations under the License.
#include "tensorflow/compiler/tf2xla/type_util.h"
#include "tensorflow/compiler/tf2xla/xla_context.h"
#include "tensorflow/compiler/tf2xla/xla_op_kernel.h"
+#include "tensorflow/compiler/xla/client/lib/arithmetic.h"
+#include "tensorflow/compiler/xla/client/lib/constants.h"
+#include "tensorflow/compiler/xla/client/lib/numeric.h"
#include "tensorflow/compiler/xla/client/xla_client/xla_builder.h"
#include "tensorflow/compiler/xla/types.h"
#include "tensorflow/core/framework/tensor.h"
@@ -33,103 +36,71 @@ namespace tensorflow {
namespace {
-Status ArgMinMax(xla::XlaBuilder* builder, XlaOpKernelContext* ctx,
- const xla::XlaOp& input, const TensorShape& input_shape,
- DataType input_type, DataType output_type, int axis,
- bool is_min, xla::XlaOp* argminmax) {
- xla::XlaOp init_value;
- const xla::XlaComputation* reducer;
- if (is_min) {
- init_value = XlaHelpers::MaxValue(builder, input_type);
- reducer = ctx->GetOrCreateMin(input_type);
- } else {
- init_value = XlaHelpers::MinValue(builder, input_type);
- reducer = ctx->GetOrCreateMax(input_type);
- }
-
- xla::PrimitiveType xla_output_type;
- TF_RETURN_IF_ERROR(DataTypeToPrimitiveType(output_type, &xla_output_type));
-
- xla::XlaOp input_max = builder->Reduce(input, init_value, *reducer,
- /*dimensions_to_reduce=*/{axis});
- std::vector<int64> broadcast_dims(input_shape.dims() - 1);
- std::iota(broadcast_dims.begin(), broadcast_dims.begin() + axis, 0);
- std::iota(broadcast_dims.begin() + axis, broadcast_dims.end(), axis + 1);
- // Compute a mask that has 1s for elements equal to the maximum.
- xla::XlaOp partial_mask = builder->ConvertElementType(
- builder->Eq(input, input_max, broadcast_dims), xla_output_type);
-
- // In order to make identity elements for a bitwise And, we:
- // Left shift the 1 to the leftmost bit, yielding 0x10...0
- // Arithmetic right shift the 1 back to the rightmost bit, yielding
- // 0xFF...F
- int32 bits_in_type =
- xla::ShapeUtil::ByteSizeOfPrimitiveType(xla_output_type) * 8 - 1;
- xla::XlaOp shift_amount =
- XlaHelpers::IntegerLiteral(builder, output_type, bits_in_type);
- xla::XlaOp full_mask = builder->ShiftRightArithmetic(
- builder->ShiftLeft(partial_mask, shift_amount), shift_amount);
-
- // And with the vector [0, 1, 2, ...] to convert each 0xFF...F into its
- // index.
- xla::XlaOp iota;
-
- const int64 axis_size = input_shape.dim_size(axis);
- TF_RETURN_IF_ERROR(XlaHelpers::Iota(builder, output_type, axis_size, &iota));
- xla::XlaOp product =
- builder->And(full_mask, iota, /*broadcast_dimensions=*/{axis});
-
- // If there are multiple maximum elements, choose the one with the highest
- // index.
- xla::XlaOp output =
- builder->Reduce(product, XlaHelpers::MinValue(builder, output_type),
- *ctx->GetOrCreateMax(output_type),
- /*dimensions_to_reduce=*/{axis});
- *argminmax = output;
- return Status::OK();
+xla::XlaOp ArgMinMax(xla::XlaOp input, xla::PrimitiveType output_type, int axis,
+ bool is_min) {
+ xla::XlaBuilder* builder = input.builder();
+ return builder->ReportErrorOrReturn([&]() -> xla::StatusOr<xla::XlaOp> {
+ TF_ASSIGN_OR_RETURN(xla::Shape input_shape, builder->GetShape(input));
+ xla::XlaOp init_value;
+ xla::XlaComputation reducer;
+ if (is_min) {
+ init_value = xla::MaxValue(builder, input_shape.element_type());
+ reducer =
+ xla::CreateScalarMinComputation(input_shape.element_type(), builder);
+ } else {
+ init_value = xla::MinValue(builder, input_shape.element_type());
+ reducer =
+ xla::CreateScalarMaxComputation(input_shape.element_type(), builder);
+ }
+
+ xla::XlaOp input_max = xla::Reduce(input, init_value, reducer,
+ /*dimensions_to_reduce=*/{axis});
+ std::vector<int64> broadcast_dims(xla::ShapeUtil::Rank(input_shape) - 1);
+ std::iota(broadcast_dims.begin(), broadcast_dims.begin() + axis, 0);
+ std::iota(broadcast_dims.begin() + axis, broadcast_dims.end(), axis + 1);
+ // Compute a mask that has 1s for elements equal to the maximum.
+ xla::XlaOp partial_mask = xla::ConvertElementType(
+ xla::Eq(input, input_max, broadcast_dims), output_type);
+
+ // In order to make identity elements for a bitwise And, we:
+ // Left shift the 1 to the leftmost bit, yielding 0x10...0
+ // Arithmetic right shift the 1 back to the rightmost bit, yielding
+ // 0xFF...F
+ int32 bits_in_type =
+ xla::ShapeUtil::ByteSizeOfPrimitiveType(output_type) * 8 - 1;
+ xla::XlaOp shift_amount =
+ xla::ConstantR0WithType(builder, output_type, bits_in_type);
+ xla::XlaOp full_mask = xla::ShiftRightArithmetic(
+ xla::ShiftLeft(partial_mask, shift_amount), shift_amount);
+
+ // And with the vector [0, 1, 2, ...] to convert each 0xFF...F into its
+ // index.
+
+ const int64 axis_size = xla::ShapeUtil::GetDimension(input_shape, axis);
+ xla::XlaOp iota = xla::Iota(builder, output_type, axis_size);
+ xla::XlaOp product =
+ xla::And(full_mask, iota, /*broadcast_dimensions=*/{axis});
+
+ // If there are multiple maximum elements, choose the one with the highest
+ // index.
+ return xla::Reduce(product, xla::MinValue(builder, output_type),
+ xla::CreateScalarMaxComputation(output_type, builder),
+ /*dimensions_to_reduce=*/{axis});
+ });
}
} // namespace
-xla::XlaOp XlaHelpers::MinValue(xla::XlaBuilder* b, DataType data_type) {
- xla::PrimitiveType type;
- TF_CHECK_OK(DataTypeToPrimitiveType(data_type, &type));
- return b->ConstantLiteral(xla::Literal::MinValue(type));
-}
-
-xla::XlaOp XlaHelpers::MaxValue(xla::XlaBuilder* b, DataType data_type) {
- xla::PrimitiveType type;
- TF_CHECK_OK(DataTypeToPrimitiveType(data_type, &type));
- return b->ConstantLiteral(xla::Literal::MaxValue(type));
-}
-
xla::XlaOp XlaHelpers::Zero(xla::XlaBuilder* b, DataType data_type) {
xla::PrimitiveType type;
TF_CHECK_OK(DataTypeToPrimitiveType(data_type, &type));
- return b->ConstantLiteral(xla::Literal::Zero(type));
+ return xla::ConstantLiteral(b, xla::LiteralUtil::Zero(type));
}
xla::XlaOp XlaHelpers::One(xla::XlaBuilder* b, DataType data_type) {
xla::PrimitiveType type;
TF_CHECK_OK(DataTypeToPrimitiveType(data_type, &type));
- return b->ConstantLiteral(xla::Literal::One(type));
-}
-
-xla::XlaOp XlaHelpers::Epsilon(xla::XlaBuilder* b, DataType data_type) {
- switch (data_type) {
- case DT_HALF:
- return b->ConstantR0<Eigen::half>(
- static_cast<Eigen::half>(Eigen::NumTraits<Eigen::half>::epsilon()));
- case DT_BFLOAT16:
- return b->ConstantR0<bfloat16>(bfloat16::epsilon());
- case DT_FLOAT:
- return b->ConstantR0<float>(std::numeric_limits<float>::epsilon());
- case DT_DOUBLE:
- return b->ConstantR0<double>(std::numeric_limits<double>::epsilon());
- default:
- LOG(FATAL) << "Unsupported type in XlaHelpers::Epsilon: "
- << DataTypeString(data_type);
- }
+ return xla::ConstantLiteral(b, xla::LiteralUtil::One(type));
}
xla::XlaOp XlaHelpers::IntegerLiteral(xla::XlaBuilder* b, DataType data_type,
@@ -177,45 +148,14 @@ static Tensor MakeLinspaceTensor(const TensorShape& shape, int64 depth) {
return linspace;
}
-Status XlaHelpers::ArgMax(xla::XlaBuilder* builder, XlaOpKernelContext* ctx,
- const xla::XlaOp& input,
- const TensorShape& input_shape, DataType input_type,
- DataType output_type, int axis, xla::XlaOp* argmax) {
- return ArgMinMax(builder, ctx, input, input_shape, input_type, output_type,
- axis, /*is_min=*/false, argmax);
-}
-
-Status XlaHelpers::ArgMin(xla::XlaBuilder* builder, XlaOpKernelContext* ctx,
- const xla::XlaOp& input,
- const TensorShape& input_shape, DataType input_type,
- DataType output_type, int axis, xla::XlaOp* argmin) {
- return ArgMinMax(builder, ctx, input, input_shape, input_type, output_type,
- axis, /*is_min=*/true, argmin);
+xla::XlaOp XlaHelpers::ArgMax(xla::XlaOp input, xla::PrimitiveType output_type,
+ int axis) {
+ return ArgMinMax(input, output_type, axis, /*is_min=*/false);
}
-Status XlaHelpers::Iota(xla::XlaBuilder* builder, DataType dtype, int64 size,
- xla::XlaOp* iota) {
- TensorShape linspace_shape({size});
- Tensor linspace;
- switch (dtype) {
- case DT_UINT8:
- linspace = MakeLinspaceTensor<uint8>(linspace_shape, size);
- break;
- case DT_INT32:
- linspace = MakeLinspaceTensor<int32>(linspace_shape, size);
- break;
- case DT_INT64:
- linspace = MakeLinspaceTensor<int64>(linspace_shape, size);
- break;
- default:
- return errors::InvalidArgument("Invalid argument type ",
- DataTypeString(dtype));
- }
- xla::BorrowingLiteral linspace_literal;
- TF_RETURN_IF_ERROR(HostTensorToBorrowingLiteral(linspace, &linspace_literal));
-
- *iota = builder->ConstantLiteral(linspace_literal);
- return Status::OK();
+xla::XlaOp XlaHelpers::ArgMin(xla::XlaOp input, xla::PrimitiveType output_type,
+ int axis) {
+ return ArgMinMax(input, output_type, axis, /*is_min=*/true);
}
Status XlaHelpers::OneHot(xla::XlaBuilder* builder, int64 depth, int axis,
@@ -256,17 +196,19 @@ Status XlaHelpers::OneHot(xla::XlaBuilder* builder, int64 depth, int axis,
std::vector<int64> broadcast_dims(indices_shape.dims());
std::iota(broadcast_dims.begin(), broadcast_dims.begin() + axis, 0);
std::iota(broadcast_dims.begin() + axis, broadcast_dims.end(), axis + 1);
- xla::XlaOp one_hot_bool = builder->Eq(
- indices, builder->ConstantLiteral(linspace_literal), broadcast_dims);
+ xla::XlaOp one_hot_bool = xla::Eq(
+ indices, xla::ConstantLiteral(builder, linspace_literal), broadcast_dims);
// Selects the user-provided off_value and on_value values.
- *one_hot = builder->Select(
- one_hot_bool, builder->Broadcast(on_value, output_shape.dim_sizes()),
- builder->Broadcast(off_value, output_shape.dim_sizes()));
+ *one_hot = xla::Select(one_hot_bool,
+ xla::Broadcast(on_value, output_shape.dim_sizes()),
+ xla::Broadcast(off_value, output_shape.dim_sizes()));
return Status::OK();
}
DataType XlaHelpers::SumAccumulationType(const DataType& dtype) {
+ // Upcast 16 bit sum reductions to 32 bit to reduce the precision loss from
+ // repeated floating point additions.
if (dtype == DT_BFLOAT16 || dtype == DT_HALF) {
return DT_FLOAT;
}
@@ -278,7 +220,7 @@ xla::XlaOp XlaHelpers::ConvertElementType(xla::XlaBuilder* const builder,
const DataType new_element_type) {
xla::PrimitiveType convert_to;
TF_CHECK_OK(DataTypeToPrimitiveType(new_element_type, &convert_to));
- return builder->ConvertElementType(operand, convert_to);
+ return xla::ConvertElementType(operand, convert_to);
}
} // end namespace tensorflow
diff --git a/tensorflow/compiler/tf2xla/xla_helpers.h b/tensorflow/compiler/tf2xla/xla_helpers.h
index c3fdc5252e..d6ca4ab934 100644
--- a/tensorflow/compiler/tf2xla/xla_helpers.h
+++ b/tensorflow/compiler/tf2xla/xla_helpers.h
@@ -28,14 +28,6 @@ namespace tensorflow {
// Helper methods for building XLA computations.
class XlaHelpers {
public:
- // Returns a handle representing the minimum value of a scalar
- // element of data_type.
- static xla::XlaOp MinValue(xla::XlaBuilder* b, DataType data_type);
-
- // Returns a handle representing the maximum value of a scalar
- // element of data_type.
- static xla::XlaOp MaxValue(xla::XlaBuilder* b, DataType data_type);
-
// Returns a handle representing the zero value of a scalar
// element of data_type.
static xla::XlaOp Zero(xla::XlaBuilder* b, DataType data_type);
@@ -44,10 +36,6 @@ class XlaHelpers {
// element of data_type.
static xla::XlaOp One(xla::XlaBuilder* b, DataType data_type);
- // Returns the machine epsilon for floating-point type `data_type`, i.e.,
- // the difference between 1.0 and the next representable value.
- static xla::XlaOp Epsilon(xla::XlaBuilder* b, DataType data_type);
-
// Returns a handle representing the given value of an integer scalar
// element of data_type.
// Note that unlike One and Zero, does not work on boolean types.
@@ -65,25 +53,15 @@ class XlaHelpers {
gtl::ArraySlice<int64> shape,
xla::Literal* output);
- // Sets `argmax` to the argmax of `input` along `axis`. `input_shape` and
- // `input_dtype` are the shape and dtype of `input` respectively, and
- // `output_type` is the dtype to use for `argmax`.
- static Status ArgMax(xla::XlaBuilder* builder, XlaOpKernelContext* ctx,
- const xla::XlaOp& input, const TensorShape& input_shape,
- DataType input_type, DataType output_type, int axis,
- xla::XlaOp* argmax);
-
- // Sets `argmin` to the argmin of `input` along `axis`. `input_shape` and
- // `input_dtype` are the shape and dtype of `input` respectively, and
- // `output_type` is the dtype to use for `argmin`.
- static Status ArgMin(xla::XlaBuilder* builder, XlaOpKernelContext* ctx,
- const xla::XlaOp& input, const TensorShape& input_shape,
- DataType input_type, DataType output_type, int axis,
- xla::XlaOp* argmin);
-
- // Sets *iota to a rank 1 tensor with values [0, 1, 2, ...] of `dtype`.
- static Status Iota(xla::XlaBuilder* builder, DataType dtype, int64 size,
- xla::XlaOp* iota);
+ // Returns the argmax of `input` along `axis`. `output_type` is the type to
+ // use for the output.
+ static xla::XlaOp ArgMax(xla::XlaOp input, xla::PrimitiveType output_type,
+ int axis);
+
+ // Returns the argmin of `input` along `axis`. `output_type` is the type to
+ // use for the output.
+ static xla::XlaOp ArgMin(xla::XlaOp input, xla::PrimitiveType output_type,
+ int axis);
// Converts `indices` into a one-hot representation. `depth` is the size
// of the new axis to add. `axis` is the position at which to add the new
diff --git a/tensorflow/compiler/tf2xla/xla_op_kernel.cc b/tensorflow/compiler/tf2xla/xla_op_kernel.cc
index b58959bd6c..e8eafb3819 100644
--- a/tensorflow/compiler/tf2xla/xla_op_kernel.cc
+++ b/tensorflow/compiler/tf2xla/xla_op_kernel.cc
@@ -19,7 +19,10 @@ limitations under the License.
#include "tensorflow/compiler/tf2xla/literal_util.h"
#include "tensorflow/compiler/tf2xla/shape_util.h"
+#include "tensorflow/compiler/tf2xla/type_util.h"
#include "tensorflow/compiler/tf2xla/xla_context.h"
+#include "tensorflow/compiler/xla/client/xla_client/xla_builder.h"
+#include "tensorflow/compiler/xla/status_macros.h"
#include "tensorflow/core/common_runtime/dma_helper.h"
namespace tensorflow {
@@ -63,10 +66,32 @@ const xla::XlaOp& XlaOpKernelContext::Input(int index) {
return GetComputationFromTensor(context_->input(index));
}
+const xla::XlaOp& XlaOpKernelContext::Input(StringPiece name) {
+ return GetComputationFromTensor(GetInputTensorByName(name));
+}
+
TensorShape XlaOpKernelContext::InputShape(int index) {
return context_->input(index).shape();
}
+TensorShape XlaOpKernelContext::InputShape(StringPiece name) {
+ return GetInputTensorByName(name).shape();
+}
+
+DataType XlaOpKernelContext::input_type(int index) const {
+ return context_->input(index).dtype();
+}
+
+xla::PrimitiveType XlaOpKernelContext::input_xla_type(int index) {
+ xla::PrimitiveType type;
+ Status status = DataTypeToPrimitiveType(input_type(index), &type);
+ if (!status.ok()) {
+ SetStatus(status);
+ return xla::PRIMITIVE_TYPE_INVALID;
+ }
+ return type;
+}
+
Status XlaOpKernelContext::ConstantInput(int index,
xla::Literal* constant_literal) {
return ConstantInputReshaped(
@@ -128,7 +153,7 @@ Status XlaOpKernelContext::ConstantInputReshaped(
xla::XlaOp handle = expression->handle();
if (new_shape != tensor.shape()) {
// Reshape the handle to the desired shape.
- handle = builder()->Reshape(handle, new_shape.dim_sizes());
+ handle = xla::Reshape(handle, new_shape.dim_sizes());
}
// The XLA layout is specified minor to major, and TensorFlow's minor
@@ -315,10 +340,11 @@ Status XlaOpKernelContext::ConstantInputList(
return Status::OK();
}
-Status XlaOpKernelContext::ReadVariableInput(int index, DataType type,
- TensorShape* shape,
- xla::XlaOp* value) {
- const Tensor& tensor = context_->input(index);
+namespace {
+
+Status ReadVariableInputTensor(const Tensor& tensor, DataType type,
+ const OpKernelContext* ctx, TensorShape* shape,
+ xla::XlaOp* value) {
const XlaExpression* expression = CastExpressionFromTensor(tensor);
XlaResource* variable = expression->resource();
TF_RET_CHECK(variable != nullptr);
@@ -336,18 +362,34 @@ Status XlaOpKernelContext::ReadVariableInput(int index, DataType type,
*shape = variable->shape();
}
- XlaContext& xla_context = XlaContext::Get(context_);
- TensorShape representation_shape =
- xla_context.RepresentationShape(variable->shape(), variable->type());
+ XlaContext& xla_context = XlaContext::Get(ctx);
+ TF_ASSIGN_OR_RETURN(
+ TensorShape representation_shape,
+ xla_context.RepresentationShape(variable->shape(), variable->type()));
if (representation_shape == variable->shape()) {
*value = variable->value();
} else {
- *value =
- builder()->Reshape(variable->value(), variable->shape().dim_sizes());
+ *value = xla::Reshape(variable->value(), variable->shape().dim_sizes());
}
return Status::OK();
}
+} // namespace
+
+Status XlaOpKernelContext::ReadVariableInput(int index, DataType type,
+ TensorShape* shape,
+ xla::XlaOp* value) {
+ return ReadVariableInputTensor(context_->input(index), type, context_, shape,
+ value);
+}
+
+Status XlaOpKernelContext::ReadVariableInput(StringPiece name, DataType type,
+ TensorShape* shape,
+ xla::XlaOp* value) {
+ return ReadVariableInputTensor(GetInputTensorByName(name), type, context_,
+ shape, value);
+}
+
Status XlaOpKernelContext::GetVariableTypeAndShape(int index, DataType* type,
TensorShape* shape) const {
const Tensor& tensor = context_->input(index);
@@ -394,7 +436,7 @@ void XlaOpKernelContext::SetConstantOutput(int index, const Tensor& constant) {
xla::BorrowingLiteral literal;
OP_REQUIRES_OK(context_, HostTensorToBorrowingLiteral(constant, &literal));
- xla::XlaOp handle = builder()->ConstantLiteral(literal);
+ xla::XlaOp handle = xla::ConstantLiteral(builder(), literal);
CHECK(handle.valid());
// Make the Tensor that will refer to the expression.
@@ -438,17 +480,17 @@ Status XlaOpKernelContext::GetResourceInput(int index, XlaResource** resource) {
return Status::OK();
}
-Status XlaOpKernelContext::AssignVariable(int input_index, DataType type,
- xla::XlaOp handle) {
- TF_RET_CHECK(handle.valid());
+namespace {
- const XlaExpression* expression =
- CastExpressionFromTensor(context_->input(input_index));
+Status AssignVariableTensor(const Tensor& tensor, DataType type,
+ const OpKernelContext* ctx, xla::XlaOp handle,
+ xla::XlaBuilder* builder) {
+ const XlaExpression* expression = CastExpressionFromTensor(tensor);
XlaResource* variable = expression->resource();
TF_RET_CHECK(variable != nullptr);
TF_RET_CHECK(variable->kind() == XlaResource::kVariable);
- auto shape_or_status = builder()->GetShape(handle);
+ auto shape_or_status = builder->GetShape(handle);
if (!shape_or_status.ok()) {
return shape_or_status.status();
}
@@ -458,15 +500,31 @@ Status XlaOpKernelContext::AssignVariable(int input_index, DataType type,
TF_RETURN_IF_ERROR(variable->SetTypeAndShape(type, shape));
- XlaContext& xla_context = XlaContext::Get(context_);
- TensorShape representation_shape =
- xla_context.RepresentationShape(shape, type);
+ XlaContext& xla_context = XlaContext::Get(ctx);
+ TF_ASSIGN_OR_RETURN(TensorShape representation_shape,
+ xla_context.RepresentationShape(shape, type));
if (shape != representation_shape) {
- handle = builder()->Reshape(handle, representation_shape.dim_sizes());
+ handle = xla::Reshape(handle, representation_shape.dim_sizes());
}
return variable->SetValue(handle);
}
+} // namespace
+
+Status XlaOpKernelContext::AssignVariable(int input_index, DataType type,
+ xla::XlaOp handle) {
+ TF_RET_CHECK(handle.valid());
+ return AssignVariableTensor(context_->input(input_index), type, context_,
+ handle, builder());
+}
+
+Status XlaOpKernelContext::AssignVariable(StringPiece name, DataType type,
+ xla::XlaOp handle) {
+ TF_RET_CHECK(handle.valid());
+ return AssignVariableTensor(GetInputTensorByName(name), type, context_,
+ handle, builder());
+}
+
XlaCompiler* XlaOpKernelContext::compiler() const {
return XlaContext::Get(context_).compiler();
}
@@ -506,6 +564,12 @@ const xla::XlaComputation* XlaOpKernelContext::GetOrCreateMul(
return XlaContext::Get(context_).GetOrCreateMul(type);
}
+const Tensor& XlaOpKernelContext::GetInputTensorByName(StringPiece name) {
+ const Tensor* tensor;
+ CHECK(context_->input(name, &tensor).ok());
+ return *tensor;
+}
+
XlaOpKernel::XlaOpKernel(OpKernelConstruction* context) : OpKernel(context) {}
void XlaOpKernel::Compute(OpKernelContext* context) {
diff --git a/tensorflow/compiler/tf2xla/xla_op_kernel.h b/tensorflow/compiler/tf2xla/xla_op_kernel.h
index 667dc262ca..6203cffd80 100644
--- a/tensorflow/compiler/tf2xla/xla_op_kernel.h
+++ b/tensorflow/compiler/tf2xla/xla_op_kernel.h
@@ -18,6 +18,7 @@ limitations under the License.
#include "tensorflow/compiler/tf2xla/xla_compiler.h"
#include "tensorflow/compiler/xla/client/xla_client/xla_builder.h"
+#include "tensorflow/compiler/xla/xla_data.pb.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/platform/macros.h"
@@ -66,16 +67,26 @@ class XlaOpKernelContext {
// Returns the number of inputs to the operator.
int num_inputs() const { return context_->num_inputs(); }
- // Returns the type of input 'index'.
- DataType input_type(int index) { return context_->input(index).dtype(); }
+ // Returns the type of input `index`.
+ DataType input_type(int index) const;
- // Returns the shape of input 'index'.
+ // Returns the type of input `index` as an xla::PrimitiveType. If the type
+ // is not representable as an XLA type, sets an error status and returns
+ // xla::PRIMITIVE_TYPE_INVALID.
+ xla::PrimitiveType input_xla_type(int index);
+
+ // Returns the shape of input `index`.
TensorShape InputShape(int index);
- // Returns input 'index' as a XlaOp. Unlike
+ // Returns the shape of input `name`.
+ TensorShape InputShape(StringPiece name);
+
+ // Returns input `index` as a XlaOp. Unlike
// OpKernelContext::Input returns a symbolic value rather than a concrete
// Tensor.
const xla::XlaOp& Input(int index);
+ // Returns input `name` as a XlaOp.
+ const xla::XlaOp& Input(StringPiece name);
// Returns true if all inputs are the same shape, otherwise sets the
// status to a non-OK value and returns false.
@@ -90,13 +101,13 @@ class XlaOpKernelContext {
// Helper methods for constant inputs.
- // Evaluates input 'index' and stores it in '*constant_literal'. If the
+ // Evaluates input `index` and stores it in `*constant_literal`. If the
// expression cannot be evaluated, e.g., because it depends on unbound
// parameters, returns a non-OK status.
Status ConstantInput(int index, xla::Literal* constant_literal);
- // Evaluates input 'index', reshapes it to 'new_shape' if new_shape !=
- // InputShape(index), and stores it in '*constant_literal'. If the input
+ // Evaluates input `index`, reshapes it to `new_shape` if new_shape !=
+ // InputShape(index), and stores it in `*constant_literal`. If the input
// cannot be evaluated, e.g., because it depends on unbound parameters,
// returns a non-Ok status. If InputShape(index).num_elements() !=
// new_shape.num_elements(), returns an error status.
@@ -131,17 +142,17 @@ class XlaOpKernelContext {
return context_->expected_output_dtype(index);
}
- // Sets output 'index' to the XlaOp 'handle'.
+ // Sets output `index` to the XlaOp `handle`.
// All outputs should be set using SetOutput and SetConstantOutput, not
// via the underlying OpKernelContext.
void SetOutput(int index, const xla::XlaOp& handle);
- // Sets output 'index' to compile-time constant 'host_tensor', where
- // 'host_tensor' is a tensor in host memory. It is preferable to use
+ // Sets output `index` to compile-time constant `host_tensor`, where
+ // `host_tensor` is a tensor in host memory. It is preferable to use
// SetConstantOutput where possible.
void SetConstantOutput(int index, const Tensor& host_tensor);
- // Sets output 'index' to an invalid value.
+ // Sets output `index` to an invalid value.
// Any subsequent attempt to consume this output will cause an error.
void SetInvalidOutput(int index);
@@ -151,10 +162,10 @@ class XlaOpKernelContext {
// Variables
- // Sets '*resource' to the resource associated with input `index`.
+ // Sets `*resource` to the resource associated with input `index`.
Status GetResourceInput(int index, XlaResource** resource);
- // Sets output 'index' to be a reference to resource 'resource'.
+ // Sets output `index` to be a reference to resource `resource`.
void SetResourceOutput(int index, XlaResource* resource);
// Sets `*type` and `*shape` to the current type and shape of a variable's
@@ -163,17 +174,23 @@ class XlaOpKernelContext {
TensorShape* shape) const;
// Reads the current value of the resouce variable referred to by input
- // 'index'. If `shape` is not nullptr, sets `*shape` to the shape of the
+ // `index`. If `shape` is not nullptr, sets `*shape` to the shape of the
// variable. Returns an error if the variable has not been initialized, or if
// its type does not match `type`.
Status ReadVariableInput(int index, DataType type, TensorShape* shape,
xla::XlaOp* value);
+ // Reads the current value of the resouce variable referred to by input
+ // `name`.
+ Status ReadVariableInput(StringPiece name, DataType type, TensorShape* shape,
+ xla::XlaOp* value);
// Assigns the value `handle` to the variable referenced by input
// `input_index`. The variable must be of `type`. Returns an error if the
// variable has been initialized with a different type or with a
// different shape.
Status AssignVariable(int input_index, DataType type, xla::XlaOp handle);
+ // Assigns the value `handle` to the variable referenced by input `name`.
+ Status AssignVariable(StringPiece name, DataType type, xla::XlaOp handle);
// Helper routines for the OP_REQUIRES macros
void CtxFailure(const Status& s);
@@ -221,6 +238,9 @@ class XlaOpKernelContext {
const xla::XlaComputation* GetOrCreateMul(const DataType type);
private:
+ // Returns the tensor of input `name`.
+ const Tensor& GetInputTensorByName(StringPiece name);
+
OpKernelContext* const context_;
};
diff --git a/tensorflow/compiler/tf2xla/xla_op_registry.cc b/tensorflow/compiler/tf2xla/xla_op_registry.cc
index ee6da6a67a..46785bc1f0 100644
--- a/tensorflow/compiler/tf2xla/xla_op_registry.cc
+++ b/tensorflow/compiler/tf2xla/xla_op_registry.cc
@@ -240,6 +240,7 @@ void XlaOpRegistry::RegisterCompilationKernels() {
// a) the types supported by the backend, and
// b) the types allowed by the OpDef, and
// c) the type constraints.
+ bool unsatisfiable_type_constraint = false;
for (const string& type_attr : type_attrs) {
KernelDef::AttrConstraint* attr_constraint = kdef->add_constraint();
attr_constraint->set_name(type_attr);
@@ -276,7 +277,14 @@ void XlaOpRegistry::RegisterCompilationKernels() {
if (op_registration->allow_resource_types) {
allowed_values->add_type(DT_RESOURCE);
}
+ // Don't build KernelDefs that have unsatisfiable type constraints.
+ if (allowed_values->type().empty()) {
+ unsatisfiable_type_constraint = true;
+ break;
+ }
}
+ if (unsatisfiable_type_constraint) continue;
+
if (backend.second.op_filter != nullptr &&
!backend.second.op_filter(kdef.get())) {
continue;
diff --git a/tensorflow/compiler/tf2xla/xla_op_registry.h b/tensorflow/compiler/tf2xla/xla_op_registry.h
index 2d4593ea49..fc14834ca6 100644
--- a/tensorflow/compiler/tf2xla/xla_op_registry.h
+++ b/tensorflow/compiler/tf2xla/xla_op_registry.h
@@ -279,7 +279,7 @@ class XlaOpRegistrar {
#define REGISTER_XLA_OP_UNIQ(CTR, BUILDER, OP) \
static ::tensorflow::XlaOpRegistrar xla_op_registrar__body__##CTR##__object( \
- XlaOpRegistrationBuilder::BUILDER.Build( \
+ ::tensorflow::XlaOpRegistrationBuilder::BUILDER.Build( \
[](::tensorflow::OpKernelConstruction* context) \
-> ::tensorflow::OpKernel* { return new OP(context); }));
diff --git a/tensorflow/compiler/tf2xla/xla_op_registry_test.cc b/tensorflow/compiler/tf2xla/xla_op_registry_test.cc
index 266cbc4395..7b3b15b1af 100644
--- a/tensorflow/compiler/tf2xla/xla_op_registry_test.cc
+++ b/tensorflow/compiler/tf2xla/xla_op_registry_test.cc
@@ -82,5 +82,38 @@ TEST(XlaOpRegistryTest, XlaOpRegistrationWithOverride) {
}
}
+// A dummy generic OpKernel for all backends.
+class DummyInfeasibleTypeConstraintOp : public XlaOpKernel {
+ public:
+ explicit DummyInfeasibleTypeConstraintOp(OpKernelConstruction* ctx)
+ : XlaOpKernel(ctx) {}
+ void Compile(XlaOpKernelContext* ctx) override {
+ LOG(FATAL) << "unreachable";
+ }
+};
+
+REGISTER_OP("DummyInfeasibleTypeConstraintOp")
+ .Attr("T: {float, string}")
+ .Input("input: T")
+ .Output("output: T")
+ .Doc(R"doc(
+A dummy Op.
+
+input: dummy input.
+output: dummy output.
+)doc");
+REGISTER_XLA_OP(
+ Name("DummyInfeasibleTypeConstraintOp").TypeConstraint("T", DT_STRING),
+ DummyInfeasibleTypeConstraintOp);
+
+TEST(XlaOpRegistryTest, OpWithInfeasibleTypeConstraintIsNotRegistered) {
+ XlaOpRegistry::RegisterCompilationKernels();
+ auto registered_kernels = GetAllRegisteredKernels().kernel();
+ for (const auto& kernels : registered_kernels) {
+ // The operator should not be registered.
+ EXPECT_NE(kernels.op(), "DummyInfeasibleTypeConstraintOp");
+ }
+}
+
} // namespace
} // namespace tensorflow
diff --git a/tensorflow/compiler/tf2xla/xla_resource.cc b/tensorflow/compiler/tf2xla/xla_resource.cc
index 540c65c597..baea814965 100644
--- a/tensorflow/compiler/tf2xla/xla_resource.cc
+++ b/tensorflow/compiler/tf2xla/xla_resource.cc
@@ -22,6 +22,7 @@ limitations under the License.
#include "tensorflow/compiler/tf2xla/sharding_util.h"
#include "tensorflow/compiler/tf2xla/xla_context.h"
#include "tensorflow/compiler/tf2xla/xla_helpers.h"
+#include "tensorflow/compiler/xla/client/xla_client/xla_builder.h"
namespace tensorflow {
@@ -89,16 +90,16 @@ Status XlaResource::SetZeroValue(xla::XlaBuilder* builder) {
}
switch (kind_) {
case kVariable: {
- value_ = builder->Broadcast(XlaHelpers::Zero(builder, type_),
- shape_.dim_sizes());
+ value_ =
+ xla::Broadcast(XlaHelpers::Zero(builder, type_), shape_.dim_sizes());
break;
}
case kTensorArray: {
TensorShape ta_shape;
ta_shape.AddDim(tensor_array_size_);
ta_shape.AppendShape(shape_);
- value_ = builder->Broadcast(XlaHelpers::Zero(builder, type_),
- ta_shape.dim_sizes());
+ value_ = xla::Broadcast(XlaHelpers::Zero(builder, type_),
+ ta_shape.dim_sizes());
break;
}
case kStack: {
@@ -106,9 +107,9 @@ Status XlaResource::SetZeroValue(xla::XlaBuilder* builder) {
ta_shape.AddDim(tensor_array_size_);
ta_shape.AppendShape(shape_);
value_ =
- builder->Tuple({builder->Broadcast(XlaHelpers::Zero(builder, type_),
- ta_shape.dim_sizes()),
- builder->ConstantR0<int32>(0)});
+ xla::Tuple(builder, {xla::Broadcast(XlaHelpers::Zero(builder, type_),
+ ta_shape.dim_sizes()),
+ xla::ConstantR0<int32>(builder, 0)});
break;
}
@@ -130,8 +131,8 @@ Status XlaResource::GetOrCreateTensorArrayGradient(const string& source,
TensorShape ta_shape;
ta_shape.AddDim(tensor_array_size_);
ta_shape.AppendShape(shape_);
- xla::XlaOp gradient_value = builder->Broadcast(
- XlaHelpers::Zero(builder, type_), ta_shape.dim_sizes());
+ xla::XlaOp gradient_value =
+ xla::Broadcast(XlaHelpers::Zero(builder, type_), ta_shape.dim_sizes());
gradient.reset(
new XlaResource(/*kind=*/kTensorArray, /*arg_num=*/-1,
/*name=*/strings::StrCat("TensorArrayGrad: ", name_),
@@ -152,7 +153,7 @@ Status XlaResource::Pack(xla::XlaOp* pack, xla::XlaBuilder* builder) const {
for (const auto& gradient : tensor_array_gradients_) {
elems.push_back(gradient.second->value_);
}
- *pack = builder->Tuple(elems);
+ *pack = xla::Tuple(builder, elems);
}
return Status::OK();
}
@@ -168,7 +169,7 @@ Status XlaResource::SetFromPack(const std::set<string>& gradient_sources,
} else {
TF_RET_CHECK(kind_ == kTensorArray);
int pos = 0;
- auto v = builder->GetTupleElement(pack, pos++);
+ auto v = xla::GetTupleElement(pack, pos++);
if (!initialized()) {
initial_value_ = v;
}
@@ -178,7 +179,7 @@ Status XlaResource::SetFromPack(const std::set<string>& gradient_sources,
XlaResource* gradient;
TF_RETURN_IF_ERROR(
GetOrCreateTensorArrayGradient(source, builder, &gradient));
- auto v = builder->GetTupleElement(pack, pos++);
+ auto v = xla::GetTupleElement(pack, pos++);
if (!gradient->initialized()) {
gradient->initial_value_ = v;
}
diff --git a/tensorflow/compiler/xla/BUILD b/tensorflow/compiler/xla/BUILD
index 4525197146..f1c383fd9e 100644
--- a/tensorflow/compiler/xla/BUILD
+++ b/tensorflow/compiler/xla/BUILD
@@ -142,30 +142,15 @@ cc_library(
cc_library(
name = "statusor",
- srcs = ["statusor.cc"],
hdrs = [
"statusor.h",
- "statusor_internals.h",
],
visibility = ["//visibility:public"],
deps = [
":status",
"//tensorflow/core:lib",
"//tensorflow/core:lib_internal",
- ],
-)
-
-tf_cc_test(
- name = "statusor_test",
- size = "small",
- srcs = ["statusor_test.cc"],
- deps = [
- ":statusor",
- ":test",
- ":types",
- "//tensorflow/core:lib",
- "//tensorflow/core:test",
- "//tensorflow/core:test_main",
+ "//tensorflow/stream_executor",
],
)
@@ -175,6 +160,7 @@ cc_library(
hdrs = [
"iterator_util.h",
"map_util.h",
+ "overflow_util.h",
"ptr_util.h",
"util.h",
],
@@ -250,7 +236,7 @@ cc_library(
":types",
":util",
":xla_data_proto",
- "//tensorflow/core:framework_internal",
+ "//tensorflow/core:framework",
"//tensorflow/core:lib",
"//tensorflow/core:lib_internal",
"//tensorflow/core:regexp_internal",
@@ -268,6 +254,7 @@ tf_cc_test(
":types",
":util",
":xla_data_proto",
+ "//tensorflow/core:lib",
"//tensorflow/core:test_main",
],
)
@@ -295,9 +282,9 @@ tf_cc_test(
)
cc_library(
- name = "literal_util",
- srcs = ["literal_util.cc"],
- hdrs = ["literal_util.h"],
+ name = "literal",
+ srcs = ["literal.cc"],
+ hdrs = ["literal.h"],
visibility = ["//visibility:public"],
deps = [
":array2d",
@@ -314,11 +301,12 @@ cc_library(
)
tf_cc_test(
- name = "literal_util_test",
- srcs = ["literal_util_test.cc"],
+ name = "literal_test",
+ srcs = ["literal_test.cc"],
deps = [
":array3d",
":array4d",
+ ":literal",
":literal_util",
":shape_util",
":test",
@@ -331,6 +319,26 @@ tf_cc_test(
)
cc_library(
+ name = "literal_util",
+ srcs = ["literal_util.cc"],
+ hdrs = ["literal_util.h"],
+ visibility = ["//visibility:public"],
+ deps = [
+ ":array2d",
+ ":array3d",
+ ":array4d",
+ ":literal",
+ ":shape_util",
+ ":sparse_index_array",
+ ":status_macros",
+ ":types",
+ ":util",
+ ":xla_data_proto",
+ "//tensorflow/core:lib",
+ ],
+)
+
+cc_library(
name = "error_spec",
hdrs = ["error_spec.h"],
)
@@ -341,6 +349,7 @@ cc_library(
hdrs = ["literal_comparison.h"],
deps = [
":error_spec",
+ ":literal",
":literal_util",
":util",
"//tensorflow/core:lib",
@@ -472,7 +481,7 @@ cc_library(
hdrs = ["packed_literal_reader.h"],
visibility = [":internal"],
deps = [
- ":literal_util",
+ ":literal",
":shape_util",
":status_macros",
":statusor",
@@ -503,7 +512,7 @@ cc_library(
hdrs = ["text_literal_reader.h"],
visibility = [":internal"],
deps = [
- ":literal_util",
+ ":literal",
":shape_util",
":status_macros",
":statusor",
@@ -519,7 +528,7 @@ tf_cc_test(
name = "text_literal_reader_test",
srcs = ["text_literal_reader_test.cc"],
deps = [
- ":literal_util",
+ ":literal",
":shape_util",
":test",
":text_literal_reader",
@@ -536,7 +545,7 @@ cc_library(
hdrs = ["text_literal_writer.h"],
visibility = [":internal"],
deps = [
- ":literal_util",
+ ":literal",
":shape_util",
":status_macros",
":types",
@@ -549,6 +558,7 @@ tf_cc_test(
name = "text_literal_writer_test",
srcs = ["text_literal_writer_test.cc"],
deps = [
+ ":literal",
":literal_util",
":test",
":test_helpers",
@@ -621,6 +631,7 @@ cc_library(
":array2d",
":array3d",
":array4d",
+ ":literal_util",
":util",
":window_util",
":xla_data_proto",
@@ -641,7 +652,7 @@ tf_cc_test(
":array2d",
":array3d",
":array4d",
- ":literal_util",
+ ":literal",
":reference_util",
":test",
":util",
diff --git a/tensorflow/compiler/xla/client/BUILD b/tensorflow/compiler/xla/client/BUILD
index 8f08d3b2e0..25666cad40 100644
--- a/tensorflow/compiler/xla/client/BUILD
+++ b/tensorflow/compiler/xla/client/BUILD
@@ -65,7 +65,7 @@ cc_library(
deps = [
":global_data",
"//tensorflow/compiler/xla:execution_options_util",
- "//tensorflow/compiler/xla:literal_util",
+ "//tensorflow/compiler/xla:literal",
"//tensorflow/compiler/xla:service_interface",
"//tensorflow/compiler/xla:status_macros",
"//tensorflow/compiler/xla:statusor",
diff --git a/tensorflow/compiler/xla/client/client.cc b/tensorflow/compiler/xla/client/client.cc
index 3d596a6e65..3a157c69cd 100644
--- a/tensorflow/compiler/xla/client/client.cc
+++ b/tensorflow/compiler/xla/client/client.cc
@@ -20,7 +20,7 @@ limitations under the License.
#include "tensorflow/compiler/xla/execution_options_util.h"
#include "tensorflow/compiler/xla/legacy_flags/debug_options_flags.h"
-#include "tensorflow/compiler/xla/literal_util.h"
+#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/compiler/xla/ptr_util.h"
#include "tensorflow/compiler/xla/status_macros.h"
#include "tensorflow/compiler/xla/types.h"
diff --git a/tensorflow/compiler/xla/client/client.h b/tensorflow/compiler/xla/client/client.h
index 68f0d0ac78..69d4d300ca 100644
--- a/tensorflow/compiler/xla/client/client.h
+++ b/tensorflow/compiler/xla/client/client.h
@@ -21,7 +21,7 @@ limitations under the License.
#include "tensorflow/compiler/xla/client/global_data.h"
#include "tensorflow/compiler/xla/client/xla_client/xla_computation.h"
-#include "tensorflow/compiler/xla/literal_util.h"
+#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/compiler/xla/service/hlo.pb.h"
#include "tensorflow/compiler/xla/service_interface.h"
#include "tensorflow/compiler/xla/statusor.h"
diff --git a/tensorflow/compiler/xla/client/lib/BUILD b/tensorflow/compiler/xla/client/lib/BUILD
index d49d959a6c..77ba474cf6 100644
--- a/tensorflow/compiler/xla/client/lib/BUILD
+++ b/tensorflow/compiler/xla/client/lib/BUILD
@@ -13,11 +13,18 @@ filegroup(
]),
)
+load("//tensorflow/compiler/xla/tests:build_defs.bzl", "xla_test")
+load("//tensorflow/compiler/xla/tests:build_defs.bzl", "generate_backend_suites")
+
+# Generate test_suites for all backends, named "${backend}_tests".
+generate_backend_suites()
+
cc_library(
name = "arithmetic",
srcs = ["arithmetic.cc"],
hdrs = ["arithmetic.h"],
deps = [
+ ":constants",
"//tensorflow/compiler/xla:shape_util",
"//tensorflow/compiler/xla:status_macros",
"//tensorflow/compiler/xla:types",
@@ -29,12 +36,113 @@ cc_library(
)
cc_library(
+ name = "constants",
+ srcs = ["constants.cc"],
+ hdrs = ["constants.h"],
+ deps = [
+ "//tensorflow/compiler/xla:literal_util",
+ "//tensorflow/compiler/xla:shape_util",
+ "//tensorflow/compiler/xla:types",
+ "//tensorflow/compiler/xla:util",
+ "//tensorflow/compiler/xla:xla_data_proto",
+ "//tensorflow/compiler/xla/client/xla_client:xla_builder",
+ ],
+)
+
+xla_test(
+ name = "constants_test",
+ srcs = ["constants_test.cc"],
+ tags = ["enable_for_xla_interpreter"],
+ deps = [
+ ":constants",
+ "//tensorflow/compiler/xla:test",
+ "//tensorflow/compiler/xla:types",
+ "//tensorflow/compiler/xla:xla_data_proto",
+ "//tensorflow/compiler/xla/client/xla_client:xla_builder",
+ "//tensorflow/compiler/xla/tests:client_library_test_base",
+ "//tensorflow/compiler/xla/tests:xla_internal_test_main",
+ ],
+)
+
+cc_library(
+ name = "math",
+ srcs = ["math.cc"],
+ hdrs = ["math.h"],
+ deps = [
+ ":constants",
+ "//tensorflow/compiler/xla:shape_util",
+ "//tensorflow/compiler/xla:status_macros",
+ "//tensorflow/compiler/xla/client/xla_client:xla_builder",
+ ],
+)
+
+xla_test(
+ name = "math_test",
+ srcs = ["math_test.cc"],
+ tags = ["enable_for_xla_interpreter"],
+ deps = [
+ ":math",
+ "//tensorflow/compiler/xla:literal_util",
+ "//tensorflow/compiler/xla:test",
+ "//tensorflow/compiler/xla:types",
+ "//tensorflow/compiler/xla:xla_data_proto",
+ "//tensorflow/compiler/xla/client/xla_client:xla_builder",
+ "//tensorflow/compiler/xla/tests:client_library_test_base",
+ "//tensorflow/compiler/xla/tests:xla_internal_test_main",
+ ],
+)
+
+cc_library(
+ name = "numeric",
+ srcs = ["numeric.cc"],
+ hdrs = ["numeric.h"],
+ deps = [
+ ":arithmetic",
+ ":constants",
+ "//tensorflow/compiler/xla:types",
+ "//tensorflow/compiler/xla:xla_data_proto",
+ "//tensorflow/compiler/xla/client/xla_client:xla_builder",
+ "//tensorflow/core:lib",
+ ],
+)
+
+xla_test(
+ name = "numeric_test",
+ srcs = ["numeric_test.cc"],
+ tags = ["enable_for_xla_interpreter"],
+ deps = [
+ ":numeric",
+ "//tensorflow/compiler/xla:test",
+ "//tensorflow/compiler/xla:types",
+ "//tensorflow/compiler/xla:xla_data_proto",
+ "//tensorflow/compiler/xla/client/xla_client:xla_builder",
+ "//tensorflow/compiler/xla/tests:client_library_test_base",
+ "//tensorflow/compiler/xla/tests:xla_internal_test_main",
+ ],
+)
+
+cc_library(
+ name = "prng",
+ srcs = ["prng.cc"],
+ hdrs = ["prng.h"],
+ deps = [
+ ":constants",
+ ":math",
+ ":numeric",
+ "//tensorflow/compiler/xla:util",
+ "//tensorflow/compiler/xla:xla_data_proto",
+ "//tensorflow/compiler/xla/client/xla_client:xla_builder",
+ "//tensorflow/core:lib",
+ ],
+)
+
+cc_library(
name = "testing",
srcs = ["testing.cc"],
hdrs = ["testing.h"],
deps = [
"//tensorflow/compiler/xla:execution_options_util",
- "//tensorflow/compiler/xla:literal_util",
+ "//tensorflow/compiler/xla:literal",
"//tensorflow/compiler/xla:shape_util",
"//tensorflow/compiler/xla:statusor",
"//tensorflow/compiler/xla:types",
diff --git a/tensorflow/compiler/xla/client/lib/arithmetic.cc b/tensorflow/compiler/xla/client/lib/arithmetic.cc
index 8e875bf352..978fc40f34 100644
--- a/tensorflow/compiler/xla/client/lib/arithmetic.cc
+++ b/tensorflow/compiler/xla/client/lib/arithmetic.cc
@@ -17,6 +17,7 @@ limitations under the License.
#include <string>
+#include "tensorflow/compiler/xla/client/lib/constants.h"
#include "tensorflow/compiler/xla/client/xla_client/xla_builder.h"
#include "tensorflow/compiler/xla/client/xla_client/xla_computation.h"
#include "tensorflow/compiler/xla/shape_util.h"
@@ -42,8 +43,8 @@ XlaComputation CreateScalarComputation(const string& name, PrimitiveType type,
}
const Shape scalar = ShapeUtil::MakeShape(type, {});
- auto lhs = b->Parameter(0, scalar, "lhs");
- auto rhs = b->Parameter(1, scalar, "rhs");
+ auto lhs = Parameter(b.get(), 0, scalar, "lhs");
+ auto rhs = Parameter(b.get(), 1, scalar, "rhs");
generator(b.get(), lhs, rhs);
return b->BuildAndNoteError();
}
@@ -55,7 +56,7 @@ XlaComputation CreateScalarAddComputation(PrimitiveType type,
return CreateScalarComputation(
"add", type, builder,
[](XlaBuilder* b, const XlaOp& lhs, const XlaOp& rhs) {
- return b->Add(lhs, rhs);
+ return Add(lhs, rhs);
});
}
@@ -64,17 +65,15 @@ XlaComputation CreateScalarMultiplyComputation(PrimitiveType type,
return CreateScalarComputation(
"mul", type, builder,
[](XlaBuilder* b, const XlaOp& lhs, const XlaOp& rhs) {
- return b->Mul(lhs, rhs);
+ return Mul(lhs, rhs);
});
}
XlaComputation CreateScalarGeComputation(PrimitiveType type,
XlaBuilder* builder) {
- return CreateScalarComputation(
- "ge", type, builder,
- [](XlaBuilder* b, const XlaOp& lhs, const XlaOp& rhs) {
- return b->Ge(lhs, rhs);
- });
+ return CreateScalarComputation("ge", type, builder,
+ [](XlaBuilder* b, const XlaOp& lhs,
+ const XlaOp& rhs) { return Ge(lhs, rhs); });
}
XlaComputation CreateScalarMaxComputation(PrimitiveType type,
@@ -82,7 +81,7 @@ XlaComputation CreateScalarMaxComputation(PrimitiveType type,
return CreateScalarComputation(
"max", type, builder,
[](XlaBuilder* b, const XlaOp& lhs, const XlaOp& rhs) {
- return b->Max(lhs, rhs);
+ return Max(lhs, rhs);
});
}
@@ -91,7 +90,7 @@ XlaComputation CreateScalarMinComputation(PrimitiveType type,
return CreateScalarComputation(
"min", type, builder,
[](XlaBuilder* b, const XlaOp& lhs, const XlaOp& rhs) {
- return b->Min(lhs, rhs);
+ return Min(lhs, rhs);
});
}
@@ -99,156 +98,27 @@ XlaComputation CreateScalarAndComputation(XlaBuilder* builder) {
return CreateScalarComputation(
"and", PRED, builder,
[](XlaBuilder* b, const XlaOp& lhs, const XlaOp& rhs) {
- return b->And(lhs, rhs);
+ return And(lhs, rhs);
});
}
XlaComputation CreateScalarOrComputation(XlaBuilder* builder) {
- return CreateScalarComputation(
- "or", PRED, builder,
- [](XlaBuilder* b, const XlaOp& lhs, const XlaOp& rhs) {
- return b->Or(lhs, rhs);
- });
+ return CreateScalarComputation("or", PRED, builder,
+ [](XlaBuilder* b, const XlaOp& lhs,
+ const XlaOp& rhs) { return Or(lhs, rhs); });
}
-StatusOr<XlaOp> Any(const XlaOp& predicates, XlaBuilder* builder) {
- auto f = builder->ConstantR0<bool>(false);
- XlaComputation logical_or = CreateScalarOrComputation(builder);
- TF_ASSIGN_OR_RETURN(const Shape& predicates_shape,
- builder->GetShape(predicates));
- std::vector<int64> all_dimensions(ShapeUtil::Rank(predicates_shape));
- std::iota(all_dimensions.begin(), all_dimensions.end(), 0);
- return builder->Reduce(predicates, f, logical_or, all_dimensions);
-}
-
-namespace {
-XlaOp FloatLiteral(XlaBuilder* b, PrimitiveType data_type, float value) {
- return b->ConvertElementType(b->ConstantR0(value), data_type);
-}
-
-// Polynomials for computing erf/erfc. Originally from cephes.
-// Note we use float for compatibility across devices, at the cost of some
-// precision for 64 bit computations.
-//
-// Coefficients are in descending order.
-std::array<float, 9> kErfcPCoefficient = {
- 2.46196981473530512524E-10, 5.64189564831068821977E-1,
- 7.46321056442269912687E0, 4.86371970985681366614E1,
- 1.96520832956077098242E2, 5.26445194995477358631E2,
- 9.34528527171957607540E2, 1.02755188689515710272E3,
- 5.57535335369399327526E2};
-std::array<float, 9> kErfcQCoefficient = {
- 1.00000000000000000000E0, 1.32281951154744992508E1,
- 8.67072140885989742329E1, 3.54937778887819891062E2,
- 9.75708501743205489753E2, 1.82390916687909736289E3,
- 2.24633760818710981792E3, 1.65666309194161350182E3,
- 5.57535340817727675546E2};
-std::array<float, 6> kErfcRCoefficient = {
- 5.64189583547755073984E-1, 1.27536670759978104416E0,
- 5.01905042251180477414E0, 6.16021097993053585195E0,
- 7.40974269950448939160E0, 2.97886665372100240670E0};
-std::array<float, 7> kErfcSCoefficient = {
- 1.00000000000000000000E0, 2.26052863220117276590E0,
- 9.39603524938001434673E0, 1.20489539808096656605E1,
- 1.70814450747565897222E1, 9.60896809063285878198E0,
- 3.36907645100081516050E0};
-std::array<float, 5> kErfTCoefficient = {
- 9.60497373987051638749E0, 9.00260197203842689217E1,
- 2.23200534594684319226E3, 7.00332514112805075473E3,
- 5.55923013010394962768E4};
-std::array<float, 6> kErfUCoefficient = {
- 1.00000000000000000000E0, 3.35617141647503099647E1,
- 5.21357949780152679795E2, 4.59432382970980127987E3,
- 2.26290000613890934246E4, 4.92673942608635921086E4};
-} // namespace
-
-// Evaluate the polynomial given coefficients and `x`.
-// N.B. Coefficients should be supplied in decreasing order.
-XlaOp EvaluatePolynomial(const XlaOp& x,
- tensorflow::gtl::ArraySlice<float> coefficients,
- PrimitiveType data_type) {
- XlaBuilder* b = x.builder();
- XlaOp poly = FloatLiteral(b, data_type, 0.0);
- for (float c : coefficients) {
- poly = b->Add(b->Mul(poly, x), FloatLiteral(b, data_type, c));
- }
- return poly;
-}
-
-// Compute an approximation of the error function complement (1 - erf(x)).
-XlaOp Erfc(const XlaOp& x, PrimitiveType data_type) {
- XlaBuilder* b = x.builder();
- XlaOp zero = FloatLiteral(b, data_type, 0.0);
- XlaOp two = FloatLiteral(b, data_type, 2.0);
- XlaOp eight = FloatLiteral(b, data_type, 8.0);
-
- XlaOp abs_x = b->Abs(x);
- XlaOp z = b->Exp(b->Mul(b->Neg(x), x));
-
- XlaOp pp = EvaluatePolynomial(abs_x, kErfcPCoefficient, data_type);
- XlaOp pq = EvaluatePolynomial(abs_x, kErfcQCoefficient, data_type);
- XlaOp pr = EvaluatePolynomial(abs_x, kErfcRCoefficient, data_type);
- XlaOp ps = EvaluatePolynomial(abs_x, kErfcSCoefficient, data_type);
-
- XlaOp y = b->Select(b->Lt(abs_x, eight), b->Div(b->Mul(z, pp), pq),
- b->Div(b->Mul(z, pr), ps));
-
- return b->Select(b->Lt(x, zero), b->Sub(two, y), y);
-}
-
-// Compute a polynomial approximation of the error function.
-XlaOp Erf(const XlaOp& x, PrimitiveType data_type) {
- XlaBuilder* b = x.builder();
- XlaOp z = b->Mul(x, x);
- XlaOp pt = EvaluatePolynomial(z, kErfTCoefficient, data_type);
- XlaOp pu = EvaluatePolynomial(z, kErfUCoefficient, data_type);
- return b->Div(b->Mul(x, pt), pu);
-}
-
-// Approximation for the inverse error function from
-// Giles, M., "Approximating the erfinv function".
-// The approximation has the form:
-// w = -log((1 - x) * (1 + x))
-// if ( w < 5 ) {
-// w = w - 2.5
-// p = sum_{i=1}^n lq[i]*w^i
-// } else {
-// w = sqrt(w) - 3
-// p = sum_{i=1}^n gq[i]*w^i
-// }
-// return p*x
-StatusOr<XlaOp> ErfInv(const XlaOp& x) {
- XlaBuilder* b = x.builder();
- TF_ASSIGN_OR_RETURN(Shape shape, b->GetShape(x));
- constexpr int kDegree = 9;
- constexpr std::array<float, 9> w_less_than_5_constants = {
- 2.81022636e-08f, 3.43273939e-07f, -3.5233877e-06f,
- -4.39150654e-06f, 0.00021858087f, -0.00125372503f,
- -0.00417768164f, 0.246640727f, 1.50140941f};
- constexpr std::array<float, 9> w_greater_than_5_constants = {
- -0.000200214257f, 0.000100950558f, 0.00134934322f,
- -0.00367342844f, 0.00573950773f, -0.0076224613f,
- 0.00943887047f, 1.00167406f, 2.83297682f};
-
- auto one = b->ConstantR0<float>(1.0);
- auto w = b->Neg(b->Log(b->Mul(b->Sub(one, x), b->Add(one, x))));
-
- auto lt = b->Lt(w, b->ConstantR0<float>(5.0));
- auto coefficient = [&](int i) {
- return b->Select(
- lt,
- b->Broadcast(b->ConstantR0<float>(w_less_than_5_constants[i]),
- AsInt64Slice(shape.dimensions())),
- b->Broadcast(b->ConstantR0<float>(w_greater_than_5_constants[i]),
- AsInt64Slice(shape.dimensions())));
- };
- w = b->Select(lt, b->Sub(w, b->ConstantR0<float>(2.5f)),
- b->Sub(b->SqrtF32(w), b->ConstantR0<float>(3.0f)));
- auto p = coefficient(0);
- for (int i = 1; i < kDegree; ++i) {
- p = b->Add(coefficient(i), b->Mul(p, w));
- }
- return b->Mul(p, x);
+XlaOp Any(XlaOp predicates) {
+ XlaBuilder* builder = predicates.builder();
+ return builder->ReportErrorOrReturn([&]() -> StatusOr<XlaOp> {
+ auto f = ConstantR0<bool>(builder, false);
+ XlaComputation logical_or = CreateScalarOrComputation(builder);
+ TF_ASSIGN_OR_RETURN(const Shape& predicates_shape,
+ builder->GetShape(predicates));
+ std::vector<int64> all_dimensions(ShapeUtil::Rank(predicates_shape));
+ std::iota(all_dimensions.begin(), all_dimensions.end(), 0);
+ return Reduce(predicates, f, logical_or, all_dimensions);
+ });
}
} // namespace xla
diff --git a/tensorflow/compiler/xla/client/lib/arithmetic.h b/tensorflow/compiler/xla/client/lib/arithmetic.h
index 33a8254274..d0b916e8c8 100644
--- a/tensorflow/compiler/xla/client/lib/arithmetic.h
+++ b/tensorflow/compiler/xla/client/lib/arithmetic.h
@@ -53,22 +53,7 @@ XlaComputation CreateScalarOrComputation(XlaBuilder* builder);
// Returns whether any predicate in "predicates" is set.
//
// Note: if predicates is zero-sized, Any() vacuously returns false.
-StatusOr<XlaOp> Any(const XlaOp& predicates, XlaBuilder* builder);
-
-// Evaluate the polynomial given coefficients and `x`.
-// N.B. Coefficients should be supplied in decreasing order.
-XlaOp EvaluatePolynomial(const XlaOp& x,
- tensorflow::gtl::ArraySlice<float> coefficients,
- PrimitiveType data_type);
-
-// Compute an approximation of the error function complement (1 - erf(x)).
-XlaOp Erfc(const XlaOp& x, PrimitiveType data_type);
-
-// Compute an approximation of the error function.
-XlaOp Erf(const XlaOp& x, PrimitiveType data_type);
-
-// Compute an approximation of the inverse of the error function.
-StatusOr<XlaOp> ErfInv(const XlaOp& x);
+XlaOp Any(XlaOp predicates);
} // namespace xla
diff --git a/tensorflow/compiler/xla/client/lib/constants.cc b/tensorflow/compiler/xla/client/lib/constants.cc
new file mode 100644
index 0000000000..031d62e4ff
--- /dev/null
+++ b/tensorflow/compiler/xla/client/lib/constants.cc
@@ -0,0 +1,103 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#include "tensorflow/compiler/xla/client/lib/constants.h"
+
+#include "tensorflow/compiler/xla/literal_util.h"
+#include "tensorflow/compiler/xla/util.h"
+
+namespace xla {
+
+XlaOp Zero(XlaBuilder* builder, PrimitiveType type) {
+ return ConstantLiteral(builder, LiteralUtil::Zero(type));
+}
+
+XlaOp Zeros(XlaBuilder* builder, const Shape& shape) {
+ return Broadcast(Zero(builder, shape.element_type()),
+ AsInt64Slice(shape.dimensions()));
+}
+
+XlaOp ZerosLike(XlaOp prototype) {
+ XlaBuilder* builder = prototype.builder();
+ return builder->ReportErrorOrReturn([&]() -> StatusOr<XlaOp> {
+ TF_ASSIGN_OR_RETURN(Shape shape, builder->GetShape(prototype));
+ return Zeros(builder, shape);
+ });
+}
+
+XlaOp One(XlaBuilder* builder, PrimitiveType type) {
+ return ConstantLiteral(builder, LiteralUtil::One(type));
+}
+
+XlaOp Epsilon(XlaBuilder* builder, PrimitiveType type) {
+ switch (type) {
+ case F16:
+ return ConstantR0<Eigen::half>(
+ builder,
+ static_cast<Eigen::half>(Eigen::NumTraits<Eigen::half>::epsilon()));
+ case BF16:
+ return ConstantR0<bfloat16>(builder, bfloat16::epsilon());
+ case F32:
+ return ConstantR0<float>(builder, std::numeric_limits<float>::epsilon());
+ case F64:
+ return ConstantR0<double>(builder,
+ std::numeric_limits<double>::epsilon());
+ default:
+ return builder->ReportError(InvalidArgument(
+ "Invalid type for Epsilon (%s).", PrimitiveType_Name(type).c_str()));
+ }
+}
+
+XlaOp MinValue(XlaBuilder* builder, PrimitiveType type) {
+ return ConstantLiteral(builder, LiteralUtil::MinValue(type));
+}
+
+XlaOp MinFiniteValue(XlaBuilder* builder, PrimitiveType type) {
+ switch (type) {
+ case F16:
+ return ConstantR0<Eigen::half>(builder,
+ Eigen::NumTraits<Eigen::half>::lowest());
+ case BF16:
+ return ConstantR0<bfloat16>(builder, bfloat16::lowest());
+ case F32:
+ return ConstantR0<float>(builder, -std::numeric_limits<float>::max());
+ case F64:
+ return ConstantR0<double>(builder, -std::numeric_limits<double>::max());
+ default:
+ return MinValue(builder, type);
+ }
+}
+
+XlaOp MaxValue(XlaBuilder* builder, PrimitiveType type) {
+ return ConstantLiteral(builder, LiteralUtil::MaxValue(type));
+}
+
+XlaOp MaxFiniteValue(XlaBuilder* builder, PrimitiveType type) {
+ switch (type) {
+ case F16:
+ return ConstantR0<Eigen::half>(builder,
+ Eigen::NumTraits<Eigen::half>::highest());
+ case BF16:
+ return ConstantR0<bfloat16>(builder, bfloat16::highest());
+ case F32:
+ return ConstantR0<float>(builder, std::numeric_limits<float>::max());
+ case F64:
+ return ConstantR0<double>(builder, std::numeric_limits<double>::max());
+ default:
+ return MaxValue(builder, type);
+ }
+}
+
+} // namespace xla
diff --git a/tensorflow/compiler/xla/client/lib/constants.h b/tensorflow/compiler/xla/client/lib/constants.h
new file mode 100644
index 0000000000..b47f5243f0
--- /dev/null
+++ b/tensorflow/compiler/xla/client/lib/constants.h
@@ -0,0 +1,124 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#ifndef TENSORFLOW_COMPILER_XLA_CLIENT_LIB_CONSTANTS_H_
+#define TENSORFLOW_COMPILER_XLA_CLIENT_LIB_CONSTANTS_H_
+
+#include <type_traits>
+
+#include "tensorflow/compiler/xla/client/xla_client/xla_builder.h"
+#include "tensorflow/compiler/xla/primitive_util.h"
+#include "tensorflow/compiler/xla/types.h"
+#include "tensorflow/compiler/xla/xla_data.pb.h"
+
+namespace xla {
+
+// Returns scalar 'value' as a scalar of 'type'. Unlike ConstantR0, 'type' is
+// determined at C++ run-time, rather than C++ compile-time.
+// If 'value' is floating point but 'type' is not, or if 'value' is complex but
+// 'type' is not, an error will be returned. This is to catch accidental
+// truncation; in such cases, use an explicit cast.
+template <typename T>
+XlaOp ConstantR0WithType(XlaBuilder* builder, PrimitiveType type, T value) {
+ if (std::is_floating_point<T>::value &&
+ !(primitive_util::IsFloatingPointType(type) ||
+ primitive_util::IsComplexType(type))) {
+ return builder->ReportError(InvalidArgument(
+ "Invalid cast from floating point type to %s in ConstantR0WithType.",
+ PrimitiveType_Name(type).c_str()));
+ }
+ if (std::is_same<T, complex64>::value &&
+ !primitive_util::IsComplexType(type)) {
+ return builder->ReportError(InvalidArgument(
+ "Invalid cast from complex type to %s in ConstantR0WithType.",
+ PrimitiveType_Name(type).c_str()));
+ }
+ switch (type) {
+ case F16:
+ return ConstantR0<half>(builder, static_cast<half>(value));
+ case BF16:
+ return ConstantR0<bfloat16>(builder, static_cast<bfloat16>(value));
+ case F32:
+ return ConstantR0<float>(builder, static_cast<float>(value));
+ case F64:
+ return ConstantR0<double>(builder, static_cast<double>(value));
+ case C64:
+ return ConstantR0<complex64>(builder, static_cast<complex64>(value));
+ case U8:
+ return ConstantR0<uint8>(builder, static_cast<uint8>(value));
+ case U32:
+ return ConstantR0<uint32>(builder, static_cast<uint32>(value));
+ case U64:
+ return ConstantR0<uint64>(builder, static_cast<uint64>(value));
+ case S8:
+ return ConstantR0<int8>(builder, static_cast<int8>(value));
+ case S32:
+ return ConstantR0<int32>(builder, static_cast<int32>(value));
+ case S64:
+ return ConstantR0<int64>(builder, static_cast<int64>(value));
+ default:
+ return builder->ReportError(
+ InvalidArgument("Invalid type for ConstantR0WithType (%s).",
+ PrimitiveType_Name(type).c_str()));
+ }
+}
+
+// Returns a scalar containing 'value' cast to the same run-time type as
+// 'prototype'.
+// If 'value' is floating point but 'prototype' is not, or if 'value' is complex
+// 'prototype' is not, an error will be returned.
+template <typename T>
+XlaOp ScalarLike(XlaOp prototype, T value) {
+ XlaBuilder* builder = prototype.builder();
+ return builder->ReportErrorOrReturn([&]() -> StatusOr<XlaOp> {
+ TF_ASSIGN_OR_RETURN(Shape shape, builder->GetShape(prototype));
+ return ConstantR0WithType(builder, shape.element_type(), value);
+ });
+}
+
+// Returns a scalar with value '0' of 'type'.
+XlaOp Zero(XlaBuilder* builder, PrimitiveType type);
+
+// Returns a zero-filled tensor with shape `shape`.
+XlaOp Zeros(XlaBuilder* builder, const Shape& shape);
+
+// Returns a zero-filled tensor with the same shape as `prototype`.
+XlaOp ZerosLike(XlaOp prototype);
+
+// Returns a scalar with value '1' of 'type'.
+XlaOp One(XlaBuilder* builder, PrimitiveType type);
+
+// Returns the machine epsilon for floating-point type `type`, i.e.,
+// the difference between 1.0 and the next representable value.
+XlaOp Epsilon(XlaBuilder* builder, PrimitiveType type);
+
+// Returns the minimum representable finite or infinite value for 'type'.
+// Returns '-inf' for floating-point types.
+XlaOp MinValue(XlaBuilder* builder, PrimitiveType type);
+
+// Returns the minimum representable finite value for 'type'. For a floating
+// point type, this is equal to -MaxFiniteValue().
+XlaOp MinFiniteValue(XlaBuilder* builder, PrimitiveType type);
+
+// Returns the maximum representable finite or infinite value for 'type'.
+// Returns 'inf' for floating-point types.
+XlaOp MaxValue(XlaBuilder* builder, PrimitiveType type);
+
+// Returns the maximum representable finite value for 'type'.
+XlaOp MaxFiniteValue(XlaBuilder* builder, PrimitiveType type);
+
+} // namespace xla
+
+#endif // TENSORFLOW_COMPILER_XLA_CLIENT_LIB_CONSTANTS_H_
diff --git a/tensorflow/compiler/xla/client/lib/constants_test.cc b/tensorflow/compiler/xla/client/lib/constants_test.cc
new file mode 100644
index 0000000000..f1e3439862
--- /dev/null
+++ b/tensorflow/compiler/xla/client/lib/constants_test.cc
@@ -0,0 +1,159 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#include "tensorflow/compiler/xla/client/lib/constants.h"
+#include "tensorflow/compiler/xla/client/xla_client/xla_builder.h"
+#include "tensorflow/compiler/xla/test.h"
+#include "tensorflow/compiler/xla/tests/client_library_test_base.h"
+#include "tensorflow/compiler/xla/tests/test_macros.h"
+#include "tensorflow/compiler/xla/types.h"
+#include "tensorflow/compiler/xla/xla_data.pb.h"
+
+namespace xla {
+namespace {
+
+using ConstantsTest = ClientLibraryTestBase;
+
+using ::testing::HasSubstr;
+
+XLA_TEST_F(ConstantsTest, ConstantR0WithTypeS32) {
+ XlaBuilder builder(TestName());
+ ConstantR0WithType(&builder, xla::S32, 4);
+ ComputeAndCompareR0<int32>(&builder, 4, {});
+}
+
+XLA_TEST_F(ConstantsTest, ConstantR0WithTypeS32DoesNotAcceptFloats) {
+ XlaBuilder builder(TestName());
+ ConstantR0WithType(&builder, xla::S32, 4.5);
+ auto statusor = builder.Build();
+ ASSERT_FALSE(statusor.ok());
+ EXPECT_THAT(statusor.status().error_message(), HasSubstr("Invalid cast"));
+}
+
+XLA_TEST_F(ConstantsTest, ConstantR0WithTypeF32) {
+ XlaBuilder builder(TestName());
+ ConstantR0WithType(&builder, xla::F32, -7);
+ ComputeAndCompareR0<float>(&builder, -7, {});
+ ConstantR0WithType(&builder, xla::F32, 0.5);
+ ComputeAndCompareR0<float>(&builder, 0.5, {});
+}
+
+XLA_TEST_F(ConstantsTest, ScalarLikeS32) {
+ XlaBuilder builder(TestName());
+ ScalarLike(ConstantR0<int32>(&builder, 42), -3);
+ ComputeAndCompareR0<int32>(&builder, -3, {});
+}
+
+XLA_TEST_F(ConstantsTest, ScalarLikeF32) {
+ XlaBuilder builder(TestName());
+ ScalarLike(ConstantR0<float>(&builder, 42.75), -3.2);
+ ComputeAndCompareR0<float>(&builder, -3.2, {});
+}
+
+XLA_TEST_F(ConstantsTest, ZeroS32) {
+ XlaBuilder builder(TestName());
+ Zero(&builder, S32);
+ ComputeAndCompareR0<int32>(&builder, 0, {});
+}
+
+XLA_TEST_F(ConstantsTest, ZeroF32) {
+ XlaBuilder builder(TestName());
+ Zero(&builder, F32);
+ ComputeAndCompareR0<float>(&builder, 0.0, {});
+}
+
+XLA_TEST_F(ConstantsTest, ZerosS32) {
+ XlaBuilder builder(TestName());
+ Zeros(&builder, ShapeUtil::MakeShape(S32, {2, 2}));
+ ComputeAndCompareR2<int32>(&builder, {{0, 0}, {0, 0}}, {});
+}
+
+XLA_TEST_F(ConstantsTest, ZerosLikeF32) {
+ XlaBuilder builder(TestName());
+ ZerosLike(ConstantR1<float>(&builder, {1., 2., 3.}));
+ ComputeAndCompareR1<float>(&builder, {0., 0., 0.}, {});
+}
+
+XLA_TEST_F(ConstantsTest, OneS32) {
+ XlaBuilder builder(TestName());
+ One(&builder, S32);
+ ComputeAndCompareR0<int32>(&builder, 1, {});
+}
+
+XLA_TEST_F(ConstantsTest, OneF32) {
+ XlaBuilder builder(TestName());
+ One(&builder, F32);
+ ComputeAndCompareR0<float>(&builder, 1., {});
+}
+
+XLA_TEST_F(ConstantsTest, EpsilonF32) {
+ XlaBuilder builder(TestName());
+ Epsilon(&builder, F32);
+ ComputeAndCompareR0<float>(&builder, std::numeric_limits<float>::epsilon(),
+ {});
+}
+
+XLA_TEST_F(ConstantsTest, MinFiniteValueS32) {
+ XlaBuilder builder(TestName());
+ MinFiniteValue(&builder, S32);
+ ComputeAndCompareR0<int32>(&builder, std::numeric_limits<int32>::min(), {});
+}
+
+XLA_TEST_F(ConstantsTest, MaxFiniteValueS32) {
+ XlaBuilder builder(TestName());
+ MaxFiniteValue(&builder, S32);
+ ComputeAndCompareR0<int32>(&builder, std::numeric_limits<int32>::max(), {});
+}
+
+XLA_TEST_F(ConstantsTest, MinFiniteValueF32) {
+ XlaBuilder builder(TestName());
+ MinFiniteValue(&builder, F32);
+ ComputeAndCompareR0<float>(&builder, -std::numeric_limits<float>::max(), {});
+}
+
+XLA_TEST_F(ConstantsTest, MaxFiniteValueF32) {
+ XlaBuilder builder(TestName());
+ MaxFiniteValue(&builder, F32);
+ ComputeAndCompareR0<float>(&builder, std::numeric_limits<float>::max(), {});
+}
+
+XLA_TEST_F(ConstantsTest, MinValueS32) {
+ XlaBuilder builder(TestName());
+ MinValue(&builder, S32);
+ ComputeAndCompareR0<int32>(&builder, std::numeric_limits<int32>::min(), {});
+}
+
+XLA_TEST_F(ConstantsTest, MaxValueS32) {
+ XlaBuilder builder(TestName());
+ MaxValue(&builder, S32);
+ ComputeAndCompareR0<int32>(&builder, std::numeric_limits<int32>::max(), {});
+}
+
+XLA_TEST_F(ConstantsTest, MinValueF32) {
+ XlaBuilder builder(TestName());
+ MinValue(&builder, F32);
+ ComputeAndCompareR0<float>(&builder, -std::numeric_limits<float>::infinity(),
+ {});
+}
+
+XLA_TEST_F(ConstantsTest, MaxValueF32) {
+ XlaBuilder builder(TestName());
+ MaxValue(&builder, F32);
+ ComputeAndCompareR0<float>(&builder, std::numeric_limits<float>::infinity(),
+ {});
+}
+
+} // namespace
+} // namespace xla
diff --git a/tensorflow/compiler/xla/client/lib/math.cc b/tensorflow/compiler/xla/client/lib/math.cc
new file mode 100644
index 0000000000..2a7ac1d716
--- /dev/null
+++ b/tensorflow/compiler/xla/client/lib/math.cc
@@ -0,0 +1,272 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#include "tensorflow/compiler/xla/client/lib/math.h"
+
+#include "tensorflow/compiler/xla/client/lib/constants.h"
+#include "tensorflow/compiler/xla/shape_util.h"
+#include "tensorflow/compiler/xla/status_macros.h"
+
+namespace xla {
+
+XlaOp Sqrt(XlaOp operand) { return Pow(operand, ScalarLike(operand, 0.5)); }
+
+XlaOp Rsqrt(XlaOp operand) { return Pow(operand, ScalarLike(operand, -0.5)); }
+
+XlaOp Square(XlaOp operand) { return Pow(operand, ScalarLike(operand, 2.0)); }
+
+XlaOp Reciprocal(XlaOp operand) {
+ return Pow(operand, ScalarLike(operand, -1.0));
+}
+
+namespace {
+
+// Polynomials for computing erf/erfc. Originally from cephes.
+// Note we use float for compatibility across devices, at the cost of some
+// precision for 64 bit computations.
+//
+// Coefficients are in descending order.
+std::array<float, 9> kErfcPCoefficient = {
+ 2.46196981473530512524E-10, 5.64189564831068821977E-1,
+ 7.46321056442269912687E0, 4.86371970985681366614E1,
+ 1.96520832956077098242E2, 5.26445194995477358631E2,
+ 9.34528527171957607540E2, 1.02755188689515710272E3,
+ 5.57535335369399327526E2};
+std::array<float, 9> kErfcQCoefficient = {
+ 1.00000000000000000000E0, 1.32281951154744992508E1,
+ 8.67072140885989742329E1, 3.54937778887819891062E2,
+ 9.75708501743205489753E2, 1.82390916687909736289E3,
+ 2.24633760818710981792E3, 1.65666309194161350182E3,
+ 5.57535340817727675546E2};
+std::array<float, 6> kErfcRCoefficient = {
+ 5.64189583547755073984E-1, 1.27536670759978104416E0,
+ 5.01905042251180477414E0, 6.16021097993053585195E0,
+ 7.40974269950448939160E0, 2.97886665372100240670E0};
+std::array<float, 7> kErfcSCoefficient = {
+ 1.00000000000000000000E0, 2.26052863220117276590E0,
+ 9.39603524938001434673E0, 1.20489539808096656605E1,
+ 1.70814450747565897222E1, 9.60896809063285878198E0,
+ 3.36907645100081516050E0};
+std::array<float, 5> kErfTCoefficient = {
+ 9.60497373987051638749E0, 9.00260197203842689217E1,
+ 2.23200534594684319226E3, 7.00332514112805075473E3,
+ 5.55923013010394962768E4};
+std::array<float, 6> kErfUCoefficient = {
+ 1.00000000000000000000E0, 3.35617141647503099647E1,
+ 5.21357949780152679795E2, 4.59432382970980127987E3,
+ 2.26290000613890934246E4, 4.92673942608635921086E4};
+} // namespace
+
+// Evaluate the polynomial given coefficients and `x`.
+// N.B. Coefficients should be supplied in decreasing order.
+XlaOp EvaluatePolynomial(XlaOp x,
+ tensorflow::gtl::ArraySlice<float> coefficients) {
+ XlaOp poly = ScalarLike(x, 0.0);
+ for (float c : coefficients) {
+ poly = poly * x + ScalarLike(x, c);
+ }
+ return poly;
+}
+
+// Compute an approximation of the error function complement (1 - erf(x)).
+XlaOp Erfc(XlaOp x) {
+ XlaOp abs_x = Abs(x);
+ XlaOp z = Exp(-x * x);
+
+ XlaOp pp = EvaluatePolynomial(abs_x, kErfcPCoefficient);
+ XlaOp pq = EvaluatePolynomial(abs_x, kErfcQCoefficient);
+ XlaOp pr = EvaluatePolynomial(abs_x, kErfcRCoefficient);
+ XlaOp ps = EvaluatePolynomial(abs_x, kErfcSCoefficient);
+
+ XlaOp y = Select(Lt(abs_x, ScalarLike(x, 8.0)), z * pp / pq, z * pr / ps);
+
+ return Select(Lt(x, ScalarLike(x, 0.0)), ScalarLike(x, 2.0) - y, y);
+}
+
+// Compute a polynomial approximation of the error function.
+XlaOp Erf(XlaOp x) {
+ XlaOp z = x * x;
+ XlaOp pt = EvaluatePolynomial(z, kErfTCoefficient);
+ XlaOp pu = EvaluatePolynomial(z, kErfUCoefficient);
+ return x * pt / pu;
+}
+
+// Approximation for the inverse error function from
+// Giles, M., "Approximating the erfinv function".
+// The approximation has the form:
+// w = -log((1 - x) * (1 + x))
+// if ( w < 5 ) {
+// w = w - 2.5
+// p = sum_{i=1}^n lq[i]*w^i
+// } else {
+// w = sqrt(w) - 3
+// p = sum_{i=1}^n gq[i]*w^i
+// }
+// return p*x
+XlaOp ErfInv(XlaOp x) {
+ XlaBuilder* b = x.builder();
+ return b->ReportErrorOrReturn([&]() -> StatusOr<XlaOp> {
+ TF_ASSIGN_OR_RETURN(Shape shape, b->GetShape(x));
+ constexpr int kDegree = 9;
+ constexpr std::array<float, 9> w_less_than_5_constants = {
+ 2.81022636e-08f, 3.43273939e-07f, -3.5233877e-06f,
+ -4.39150654e-06f, 0.00021858087f, -0.00125372503f,
+ -0.00417768164f, 0.246640727f, 1.50140941f};
+ constexpr std::array<float, 9> w_greater_than_5_constants = {
+ -0.000200214257f, 0.000100950558f, 0.00134934322f,
+ -0.00367342844f, 0.00573950773f, -0.0076224613f,
+ 0.00943887047f, 1.00167406f, 2.83297682f};
+
+ auto one = ScalarLike(x, 1.0);
+ auto w = -Log((one - x) * (one + x));
+
+ auto lt = Lt(w, ScalarLike(x, 5.0));
+ auto coefficient = [&](int i) {
+ return Select(lt,
+ Broadcast(ScalarLike(x, w_less_than_5_constants[i]),
+ AsInt64Slice(shape.dimensions())),
+ Broadcast(ScalarLike(x, w_greater_than_5_constants[i]),
+ AsInt64Slice(shape.dimensions())));
+ };
+ w = Select(lt, w - ScalarLike(x, 2.5), Sqrt(w) - ScalarLike(x, 3.0));
+ auto p = coefficient(0);
+ for (int i = 1; i < kDegree; ++i) {
+ p = coefficient(i) + p * w;
+ }
+ return p * x;
+ });
+}
+
+namespace {
+// Coefficients for the Lanczos approximation of the gamma function. The
+// coefficients are uniquely determined by the choice of g and n (kLanczosGamma
+// and kLanczosCoefficients.size() + 1). The coefficients below correspond to
+// [7, 9]. [5, 7], [7, 9], [9, 10], and [607/128.0, 15] were evaluated and [7,
+// 9] seemed to be the least sensitive to the quality of the log function. In
+// particular, [5, 7] is the only choice where -1.5e-5 <= lgamma(2) <= 1.5e-5
+// for a particularly inaccurate log function.
+static constexpr double kLanczosGamma = 7; // aka g
+static constexpr double kBaseLanczosCoeff = 0.99999999999980993227684700473478;
+static constexpr std::array<double, 8> kLanczosCoefficients = {
+ 676.520368121885098567009190444019, -1259.13921672240287047156078755283,
+ 771.3234287776530788486528258894, -176.61502916214059906584551354,
+ 12.507343278686904814458936853, -0.13857109526572011689554707,
+ 9.984369578019570859563e-6, 1.50563273514931155834e-7};
+} // namespace
+
+// Compute the Lgamma function using Lanczos' approximation from "A Precision
+// Approximation of the Gamma Function". SIAM Journal on Numerical Analysis
+// series B. Vol. 1:
+// lgamma(z + 1) = (log(2) + log(pi)) / 2 + (z + 1/2) * log(t(z)) - t(z) + A(z)
+// t(z) = z + kLanczosGamma + 1/2
+// A(z) = kBaseLanczosCoeff + sigma(k = 1, n, kLanczosCoefficients[i] / (z + k))
+xla::XlaOp Lgamma(xla::XlaOp input) {
+ xla::XlaOp one_half = xla::ScalarLike(input, 0.5);
+ xla::XlaOp one = xla::ScalarLike(input, 1);
+
+ xla::XlaOp pi = xla::ScalarLike(input, M_PI);
+ xla::XlaOp log_pi = xla::ScalarLike(input, std::log(M_PI));
+ xla::XlaOp log_sqrt_two_pi =
+ xla::ScalarLike(input, (std::log(2) + std::log(M_PI)) / 2);
+
+ xla::XlaOp lanczos_gamma_plus_one_half =
+ xla::ScalarLike(input, kLanczosGamma + 0.5);
+ xla::XlaOp log_lanczos_gamma_plus_one_half =
+ xla::ScalarLike(input, std::log(kLanczosGamma + 0.5));
+
+ xla::XlaOp base_lanczos_coeff = xla::ScalarLike(input, kBaseLanczosCoeff);
+
+ // If the input is less than 0.5 use Gauss's reflection formula:
+ // gamma(x) = pi / sin(pi * x) * gamma(1 - x)
+ xla::XlaOp need_to_reflect = xla::Lt(xla::Real(input), one_half);
+ xla::XlaOp z = xla::Select(need_to_reflect, -input, input - one);
+
+ xla::XlaOp x = base_lanczos_coeff;
+ for (int i = 0; i < kLanczosCoefficients.size(); ++i) {
+ xla::XlaOp lanczos_coefficient =
+ xla::ScalarLike(input, kLanczosCoefficients[i]);
+ xla::XlaOp index = xla::ScalarLike(input, i);
+ x = x + lanczos_coefficient / (z + index + one);
+ }
+
+ // To improve accuracy on platforms with less-precise log implementations,
+ // compute log(lanczos_gamma_plus_one_half) at compile time and use log1p on
+ // the device.
+ // log(t) = log(kLanczosGamma + 0.5 + z)
+ // = log(kLanczosGamma + 0.5) + log1p(z / (kLanczosGamma + 0.5))
+ xla::XlaOp t = lanczos_gamma_plus_one_half + z;
+ xla::XlaOp log_t = log_lanczos_gamma_plus_one_half +
+ xla::Log1p(z / lanczos_gamma_plus_one_half);
+
+ xla::XlaOp log_y = log_sqrt_two_pi + (z + one_half) * log_t - t + xla::Log(x);
+
+ xla::XlaOp reflection = log_pi - xla::Log(xla::Sin(pi * input)) - log_y;
+ xla::XlaOp result = xla::Select(need_to_reflect, reflection, log_y);
+ return result;
+}
+
+// Compute the Digamma function using Lanczos' approximation from "A Precision
+// Approximation of the Gamma Function". SIAM Journal on Numerical Analysis
+// series B. Vol. 1:
+// digamma(z + 1) = log(t(z)) + A'(z) / A(z) - kLanczosGamma / t(z)
+// t(z) = z + kLanczosGamma + 1/2
+// A(z) = kBaseLanczosCoeff + sigma(k = 1, n, kLanczosCoefficients[i] / (z + k))
+// A'(z) = sigma(k = 1, n, kLanczosCoefficients[i] / (z + k) / (z + k))
+xla::XlaOp Digamma(xla::XlaOp input) {
+ xla::XlaOp zero = xla::ScalarLike(input, 0);
+ xla::XlaOp one_half = xla::ScalarLike(input, 0.5);
+ xla::XlaOp one = xla::ScalarLike(input, 1);
+
+ xla::XlaOp pi = xla::ScalarLike(input, M_PI);
+
+ xla::XlaOp lanczos_gamma = xla::ScalarLike(input, kLanczosGamma);
+ xla::XlaOp lanczos_gamma_plus_one_half =
+ xla::ScalarLike(input, kLanczosGamma + 0.5);
+ xla::XlaOp log_lanczos_gamma_plus_one_half =
+ xla::ScalarLike(input, std::log(kLanczosGamma + 0.5));
+
+ xla::XlaOp base_lanczos_coeff = xla::ScalarLike(input, kBaseLanczosCoeff);
+
+ // If the input is less than 0.5 use Gauss's reflection formula:
+ // digamma(x) = digamma(1 - x) - pi * cot(pi * x)
+ xla::XlaOp need_to_reflect = xla::Lt(xla::Real(input), one_half);
+ xla::XlaOp z = xla::Select(need_to_reflect, -input, input - one);
+
+ xla::XlaOp num = zero;
+ xla::XlaOp denom = base_lanczos_coeff;
+ for (int i = 0; i < kLanczosCoefficients.size(); ++i) {
+ xla::XlaOp lanczos_coefficient =
+ xla::ScalarLike(input, kLanczosCoefficients[i]);
+ xla::XlaOp index = xla::ScalarLike(input, i);
+ num = num - lanczos_coefficient / ((z + index + one) * (z + index + one));
+ denom = denom + lanczos_coefficient / (z + index + one);
+ }
+
+ // To improve accuracy on platforms with less-precise log implementations,
+ // compute log(lanczos_gamma_plus_one_half) at compile time and use log1p on
+ // the device.
+ // log(t) = log(kLanczosGamma + 0.5 + z)
+ // = log(kLanczosGamma + 0.5) + log1p(z / (kLanczosGamma + 0.5))
+ xla::XlaOp t = lanczos_gamma_plus_one_half + z;
+ xla::XlaOp log_t = log_lanczos_gamma_plus_one_half +
+ xla::Log1p(z / lanczos_gamma_plus_one_half);
+
+ xla::XlaOp y = log_t + num / denom - lanczos_gamma / t;
+ xla::XlaOp reflection = y - pi * xla::Cos(pi * input) / xla::Sin(pi * input);
+ xla::XlaOp result = xla::Select(need_to_reflect, reflection, y);
+ return result;
+}
+
+} // namespace xla
diff --git a/tensorflow/compiler/xla/client/lib/math.h b/tensorflow/compiler/xla/client/lib/math.h
new file mode 100644
index 0000000000..e4c79b5f52
--- /dev/null
+++ b/tensorflow/compiler/xla/client/lib/math.h
@@ -0,0 +1,57 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#ifndef TENSORFLOW_COMPILER_XLA_CLIENT_LIB_MATH_H_
+#define TENSORFLOW_COMPILER_XLA_CLIENT_LIB_MATH_H_
+
+#include "tensorflow/compiler/xla/client/xla_client/xla_builder.h"
+
+namespace xla {
+
+// Computes the square root of 'operand'.
+XlaOp Sqrt(XlaOp operand);
+
+// Computes the reciprocal of the square root of 'operand'.
+XlaOp Rsqrt(XlaOp operand);
+
+// Computes the square of 'operand'.
+XlaOp Square(XlaOp operand);
+
+// Computes the reciprocal of 'operand'.
+XlaOp Reciprocal(XlaOp operand);
+
+// Evaluates a polynomial given coefficients and `x`.
+// N.B. Coefficients should be supplied in decreasing order.
+XlaOp EvaluatePolynomial(XlaOp x,
+ tensorflow::gtl::ArraySlice<float> coefficients);
+
+// Computes an approximation of the error function complement (1 - erf(x)).
+XlaOp Erfc(XlaOp x);
+
+// Computes an approximation of the error function.
+XlaOp Erf(XlaOp x);
+
+// Computes an approximation of the inverse of the error function.
+XlaOp ErfInv(XlaOp x);
+
+// Computes an approximation of the lgamma function.
+XlaOp Lgamma(XlaOp input);
+
+// Computes an approximation of the digamma function.
+XlaOp Digamma(XlaOp input);
+
+} // namespace xla
+
+#endif // TENSORFLOW_COMPILER_XLA_CLIENT_LIB_MATH_H_
diff --git a/tensorflow/compiler/xla/client/lib/math_test.cc b/tensorflow/compiler/xla/client/lib/math_test.cc
new file mode 100644
index 0000000000..1df287d7db
--- /dev/null
+++ b/tensorflow/compiler/xla/client/lib/math_test.cc
@@ -0,0 +1,140 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#include "tensorflow/compiler/xla/client/lib/math.h"
+#include "tensorflow/compiler/xla/client/xla_client/xla_builder.h"
+#include "tensorflow/compiler/xla/literal_util.h"
+#include "tensorflow/compiler/xla/test.h"
+#include "tensorflow/compiler/xla/tests/client_library_test_base.h"
+#include "tensorflow/compiler/xla/tests/test_macros.h"
+#include "tensorflow/compiler/xla/types.h"
+#include "tensorflow/compiler/xla/xla_data.pb.h"
+
+namespace xla {
+namespace {
+
+class MathTest : public ClientLibraryTestBase {
+ public:
+ ErrorSpec error_spec_{0.0001};
+};
+
+XLA_TEST_F(MathTest, SqrtF32) {
+ XlaBuilder builder(TestName());
+ Literal zero_literal = LiteralUtil::Zero(PrimitiveType::F32);
+
+ std::unique_ptr<GlobalData> zero_data =
+ client_->TransferToServer(zero_literal).ConsumeValueOrDie();
+
+ XlaOp zero = Parameter(&builder, 0, zero_literal.shape(), "zero");
+ Sqrt(zero);
+
+ ComputeAndCompareR0<float>(&builder, 0.0f, {zero_data.get()}, error_spec_);
+}
+
+XLA_TEST_F(MathTest, SquareTenValues) {
+ XlaBuilder builder(TestName());
+ auto x = ConstantR1<float>(
+ &builder, {2.1, -2.6, 2.6, -4.0, 2.1, 2.3, -5.0, -0.9, -2.4, 1.6});
+ Square(x);
+
+ std::vector<float> expected = {4.41, 6.76, 6.76, 16., 4.41,
+ 5.29, 25., 0.81, 5.76, 2.56};
+ ComputeAndCompareR1<float>(&builder, expected, {}, error_spec_);
+}
+
+XLA_TEST_F(MathTest, ReciprocalTenValues) {
+ XlaBuilder builder(TestName());
+ auto x = ConstantR1<float>(
+ &builder, {2.1, -2.6, 2.6, -4.0, 2.1, 2.3, -5.0, -0.9, -2.4, 1.6});
+ Reciprocal(x);
+
+ std::vector<float> expected = {
+ 0.47619048, -0.38461538, 0.38461538, -0.25, 0.47619048,
+ 0.43478261, -0.2, -1.11111111, -0.41666667, 0.625};
+ ComputeAndCompareR1<float>(&builder, expected, {}, error_spec_);
+}
+
+XLA_TEST_F(MathTest, SqrtZeroes) {
+ XlaBuilder builder(TestName());
+ auto x = ConstantR1<float>(&builder, {0.0, -0.0});
+ Sqrt(x);
+
+ ComputeAndCompareR1<float>(&builder, {0, 0}, {}, error_spec_);
+}
+
+XLA_TEST_F(MathTest, SqrtSixValues) {
+ XlaBuilder builder(TestName());
+ auto x = ConstantR1<float>(&builder, {16.0, 1.0, 1024.0, 0.16, 0.2, 12345});
+ Sqrt(x);
+
+ std::vector<float> expected = {4, 1, 32, 0.4, 0.4472, 111.1080};
+ ComputeAndCompareR1<float>(&builder, expected, {}, error_spec_);
+}
+
+XLA_TEST_F(MathTest, Lgamma) {
+ XlaBuilder builder(TestName());
+ auto x = ConstantR1<float>(&builder, {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 0.5, 1.5,
+ 2.5, -1.5, -3.5, -5.5});
+ Lgamma(x);
+
+ std::vector<float> expected = {
+ 0,
+ 0,
+ static_cast<float>(std::log(2)),
+ static_cast<float>(std::log(6)),
+ static_cast<float>(std::log(24)),
+ static_cast<float>(std::log(120)),
+ static_cast<float>(std::log(M_PI) / 2),
+ static_cast<float>(std::log(M_PI) / 2 - std::log(2)),
+ static_cast<float>(std::log(M_PI) / 2 - std::log(4) + std::log(3)),
+ static_cast<float>(std::log(M_PI) / 2 - std::log(3) + std::log(4)),
+ static_cast<float>(std::log(M_PI) / 2 - std::log(105) + std::log(16)),
+ static_cast<float>(std::log(M_PI) / 2 - std::log(10395) + std::log(64))};
+ error_spec_ = ErrorSpec{0.001};
+ ComputeAndCompareR1<float>(&builder, expected, {}, error_spec_);
+}
+
+XLA_TEST_F(MathTest, Digamma) {
+ XlaBuilder builder(TestName());
+ auto x = ConstantR1<float>(&builder, {1.0, 0.5, 1 / 3.0, 0.25, 1 / 6.0, 0.125,
+ 2.0, 3.0, 4.0, 6.0, 8.0, 9.0});
+ Digamma(x);
+
+ constexpr double euler_mascheroni =
+ 0.57721566490153286060651209008240243104215933593992;
+ std::vector<float> expected = {
+ static_cast<float>(-euler_mascheroni),
+ static_cast<float>(-2 * std::log(2) - euler_mascheroni),
+ static_cast<float>(-M_PI / 2 / std::sqrt(3) - 3 * std::log(3) / 2 -
+ euler_mascheroni),
+ static_cast<float>(-M_PI / 2 - 3 * std::log(2) - euler_mascheroni),
+ static_cast<float>(-M_PI * std::sqrt(3) / 2 - 2 * std::log(2) -
+ 3 * std::log(3) / 2 - euler_mascheroni),
+ static_cast<float>(
+ -M_PI / 2 - 4 * std::log(2) -
+ (M_PI + std::log(2 + std::sqrt(2)) - std::log(2 - std::sqrt(2))) /
+ std::sqrt(2) -
+ euler_mascheroni),
+ static_cast<float>(1 - euler_mascheroni),
+ static_cast<float>(1.5 - euler_mascheroni),
+ static_cast<float>(11 / 6.0 - euler_mascheroni),
+ static_cast<float>(137 / 60.0 - euler_mascheroni),
+ static_cast<float>(363 / 140.0 - euler_mascheroni),
+ static_cast<float>(761 / 280.0 - euler_mascheroni)};
+ ComputeAndCompareR1<float>(&builder, expected, {}, error_spec_);
+}
+
+} // namespace
+} // namespace xla
diff --git a/tensorflow/compiler/xla/client/lib/numeric.cc b/tensorflow/compiler/xla/client/lib/numeric.cc
new file mode 100644
index 0000000000..cdbeb189f4
--- /dev/null
+++ b/tensorflow/compiler/xla/client/lib/numeric.cc
@@ -0,0 +1,104 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#include <numeric>
+#include <vector>
+
+#include "tensorflow/compiler/xla/client/lib/arithmetic.h"
+#include "tensorflow/compiler/xla/client/lib/constants.h"
+#include "tensorflow/compiler/xla/client/lib/numeric.h"
+#include "tensorflow/core/lib/gtl/array_slice.h"
+
+namespace xla {
+
+namespace {
+
+template <typename T>
+XlaOp MakeIota(XlaBuilder* builder, int64 size) {
+ std::vector<T> values(size);
+ for (int64 i = 0; i < size; ++i) {
+ values[i] = static_cast<T>(i);
+ }
+ return ConstantR1<T>(builder, values);
+}
+
+} // namespace
+
+XlaOp Iota(XlaBuilder* builder, PrimitiveType type, int64 size) {
+ switch (type) {
+ case S8:
+ return MakeIota<int8>(builder, size);
+ case S16:
+ return MakeIota<int16>(builder, size);
+ case S32:
+ return MakeIota<int32>(builder, size);
+ case S64:
+ return MakeIota<int64>(builder, size);
+ case U8:
+ return MakeIota<uint8>(builder, size);
+ case U16:
+ return MakeIota<uint16>(builder, size);
+ case U32:
+ return MakeIota<uint32>(builder, size);
+ case U64:
+ return MakeIota<uint64>(builder, size);
+ case BF16:
+ return MakeIota<bfloat16>(builder, size);
+ case F16:
+ return MakeIota<half>(builder, size);
+ case F32:
+ return MakeIota<float>(builder, size);
+ case F64:
+ return MakeIota<double>(builder, size);
+ case C64:
+ return MakeIota<complex64>(builder, size);
+ default:
+ return builder->ReportError(
+ InvalidArgument("Unimplemented type for Iota: %s.",
+ PrimitiveType_Name(type).c_str()));
+ }
+}
+
+XlaOp IdentityMatrix(XlaBuilder* builder, PrimitiveType type, int64 m,
+ int64 n) {
+ auto a = Iota(builder, type, m);
+ auto b = Iota(builder, type, n);
+ auto indicator = Eq(a, Broadcast(b, {m}), /*broadcast_dimensions=*/{0});
+ return ConvertElementType(indicator, type);
+}
+
+XlaOp Diagonal(XlaOp x) {
+ XlaBuilder* builder = x.builder();
+ return builder->ReportErrorOrReturn([&]() -> StatusOr<XlaOp> {
+ TF_ASSIGN_OR_RETURN(Shape shape, builder->GetShape(x));
+ const int64 n_dims = ShapeUtil::Rank(shape);
+ TF_RET_CHECK(n_dims >= 2);
+ const int64 n = shape.dimensions(n_dims - 1);
+ const int64 m = shape.dimensions(n_dims - 2);
+ tensorflow::gtl::ArraySlice<int64> major_dims(
+ AsInt64Slice(shape.dimensions()), /*pos=*/0, /*len=*/n_dims - 2);
+ auto a = Iota(builder, U32, n);
+ auto b = Iota(builder, U32, m);
+ auto indicator = Eq(a, Broadcast(b, {n}), /*broadcast_dimensions=*/{0});
+ auto mask = Broadcast(indicator, major_dims);
+ XlaComputation add =
+ CreateScalarAddComputation(shape.element_type(), builder);
+ auto diag = Reduce(Select(mask, x, Zeros(builder, shape)), ScalarLike(x, 0),
+ add, {n_dims - 1});
+ return diag;
+ });
+}
+
+} // namespace xla
diff --git a/tensorflow/compiler/xla/client/lib/numeric.h b/tensorflow/compiler/xla/client/lib/numeric.h
new file mode 100644
index 0000000000..3ec084636b
--- /dev/null
+++ b/tensorflow/compiler/xla/client/lib/numeric.h
@@ -0,0 +1,37 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#ifndef TENSORFLOW_COMPILER_XLA_CLIENT_LIB_NUMERIC_H_
+#define TENSORFLOW_COMPILER_XLA_CLIENT_LIB_NUMERIC_H_
+
+#include "tensorflow/compiler/xla/client/xla_client/xla_builder.h"
+#include "tensorflow/compiler/xla/types.h"
+#include "tensorflow/compiler/xla/xla_data.pb.h"
+
+namespace xla {
+
+// Returns a rank 1 tensor of `type` containing values [0, 1, 2, ...].
+XlaOp Iota(XlaBuilder* builder, PrimitiveType type, int64 size);
+
+// Returns an m x n matrix with 1s on the diagonal elements, zeros everywhere
+// else.
+XlaOp IdentityMatrix(XlaBuilder* builder, PrimitiveType type, int64 m, int64 n);
+
+// Get the diagonals of the last two dimensions.
+XlaOp Diagonal(XlaOp x);
+
+} // namespace xla
+
+#endif // TENSORFLOW_COMPILER_XLA_CLIENT_LIB_NUMERIC_H_
diff --git a/tensorflow/compiler/xla/client/lib/numeric_test.cc b/tensorflow/compiler/xla/client/lib/numeric_test.cc
new file mode 100644
index 0000000000..bc8a73e9d7
--- /dev/null
+++ b/tensorflow/compiler/xla/client/lib/numeric_test.cc
@@ -0,0 +1,37 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#include "tensorflow/compiler/xla/client/lib/numeric.h"
+#include "tensorflow/compiler/xla/client/xla_client/xla_builder.h"
+#include "tensorflow/compiler/xla/test.h"
+#include "tensorflow/compiler/xla/tests/client_library_test_base.h"
+#include "tensorflow/compiler/xla/tests/test_macros.h"
+#include "tensorflow/compiler/xla/types.h"
+#include "tensorflow/compiler/xla/xla_data.pb.h"
+
+namespace xla {
+namespace {
+
+using NumericTest = ClientLibraryTestBase;
+
+XLA_TEST_F(NumericTest, Iota) {
+ XlaBuilder builder(TestName());
+ Iota(&builder, S32, 10);
+
+ ComputeAndCompareR1<int32>(&builder, {0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, {});
+}
+
+} // namespace
+} // namespace xla
diff --git a/tensorflow/compiler/xla/client/lib/prng.cc b/tensorflow/compiler/xla/client/lib/prng.cc
new file mode 100644
index 0000000000..299a6ac2b6
--- /dev/null
+++ b/tensorflow/compiler/xla/client/lib/prng.cc
@@ -0,0 +1,150 @@
+/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#include <cmath>
+
+#include "tensorflow/compiler/xla/client/lib/constants.h"
+#include "tensorflow/compiler/xla/client/lib/math.h"
+#include "tensorflow/compiler/xla/client/lib/numeric.h"
+#include "tensorflow/compiler/xla/client/xla_client/xla_builder.h"
+#include "tensorflow/compiler/xla/util.h"
+#include "tensorflow/core/lib/core/casts.h"
+
+namespace xla {
+namespace {
+
+// Rotates a 32-bit integer 'v' left by 'distance' bits.
+XlaOp RotateLeftS32(XlaOp v, int distance) {
+ return (v << ConstantR0<int32>(v.builder(), distance)) |
+ ShiftRightLogical(v, ConstantR0<int32>(v.builder(), 32 - distance));
+}
+
+using ThreeFry2x32State = std::array<XlaOp, 2>;
+
+// Implements the ThreeFry counter-based PRNG algorithm.
+// Salmon et al. SC 2011. Parallel random numbers: as easy as 1, 2, 3.
+// http://www.thesalmons.org/john/random123/papers/random123sc11.pdf
+ThreeFry2x32State ThreeFry2x32(ThreeFry2x32State input, ThreeFry2x32State key) {
+ XlaBuilder* builder = input[0].builder();
+ // Rotation distances specified by the Threefry2x32 algorithm.
+ constexpr std::array<int, 8> rotations = {13, 15, 26, 6, 17, 29, 16, 24};
+ ThreeFry2x32State x;
+
+ std::array<XlaOp, 3> ks;
+ // 0x1BD11BDA is a parity constant specified by the ThreeFry2x32 algorithm.
+ ks[2] = ConstantR0<int32>(builder, 0x1BD11BDA);
+ for (int i = 0; i < 2; ++i) {
+ ks[i] = key[i];
+ x[i] = input[i];
+ ks[2] = ks[2] ^ key[i];
+ }
+
+ x[0] = x[0] + ks[0];
+ x[1] = x[1] + ks[1];
+
+ // Performs a single round of the Threefry2x32 algorithm, with a rotation
+ // amount 'rotation'.
+ auto round = [builder](ThreeFry2x32State v, int rotation) {
+ v[0] = v[0] + v[1];
+ v[1] = RotateLeftS32(v[1], rotation);
+ v[1] = v[0] ^ v[1];
+ return v;
+ };
+
+ // There are no known statistical flaws with 13 rounds of Threefry2x32.
+ // We are conservative and use 20 rounds.
+ x = round(x, rotations[0]);
+ x = round(x, rotations[1]);
+ x = round(x, rotations[2]);
+ x = round(x, rotations[3]);
+ x[0] = x[0] + ks[1];
+ x[1] = x[1] + ks[2] + ConstantR0<int32>(builder, 1);
+
+ x = round(x, rotations[4]);
+ x = round(x, rotations[5]);
+ x = round(x, rotations[6]);
+ x = round(x, rotations[7]);
+ x[0] = x[0] + ks[2];
+ x[1] = x[1] + ks[0] + ConstantR0<int32>(builder, 2);
+
+ x = round(x, rotations[0]);
+ x = round(x, rotations[1]);
+ x = round(x, rotations[2]);
+ x = round(x, rotations[3]);
+ x[0] = x[0] + ks[0];
+ x[1] = x[1] + ks[1] + ConstantR0<int32>(builder, 3);
+
+ x = round(x, rotations[4]);
+ x = round(x, rotations[5]);
+ x = round(x, rotations[6]);
+ x = round(x, rotations[7]);
+ x[0] = x[0] + ks[1];
+ x[1] = x[1] + ks[2] + ConstantR0<int32>(builder, 4);
+
+ x = round(x, rotations[0]);
+ x = round(x, rotations[1]);
+ x = round(x, rotations[2]);
+ x = round(x, rotations[3]);
+ x[0] = x[0] + ks[2];
+ x[1] = x[1] + ks[0] + ConstantR0<int32>(builder, 5);
+
+ return x;
+}
+
+} // namespace
+
+XlaOp StatelessRngUniform(std::array<XlaOp, 2> seeds, const Shape& shape,
+ XlaOp minval, XlaOp maxval) {
+ XlaBuilder* builder = seeds[0].builder();
+ if (shape.element_type() != F32) {
+ return builder->ReportError(Unimplemented(
+ "Types other than F32 are not implemented by StatelessRngUniform."));
+ }
+ ThreeFry2x32State key = seeds;
+ const int64 size = ShapeUtil::ElementsIn(shape);
+
+ const int64 half_size = CeilOfRatio<int64>(size, 2);
+ const bool size_is_odd = (half_size * 2 != size);
+
+ // Fill the generator inputs with unique counter values.
+ ThreeFry2x32State inputs;
+ inputs[0] = Iota(builder, S32, half_size);
+ inputs[1] = inputs[0] + ConstantR0<int32>(builder, half_size);
+ ThreeFry2x32State outputs = ThreeFry2x32(inputs, key);
+
+ if (size_is_odd) {
+ outputs[1] = Slice(outputs[1], {0}, {half_size - 1}, {1});
+ }
+
+ auto bits = Reshape(ConcatInDim(builder, outputs, 0),
+ AsInt64Slice(shape.dimensions()));
+
+ // Form 23 random mantissa bits, with a leading 1 bit. The leading 1 bit
+ // forces the random bits into the mantissa.
+ constexpr int kFloatBits = 32;
+ constexpr int kMantissaBits = 23;
+ bits = ShiftRightLogical(
+ bits, ConstantR0<int32>(builder, kFloatBits - kMantissaBits)) |
+ ConstantR0<int32>(builder, tensorflow::bit_cast<int32>(1.0f));
+ auto floats = BitcastConvertType(bits, F32);
+
+ // We have a floating point number in the range [1.0, 2.0).
+ // Subtract 1.0f to shift to the range [0.0, 1.0)
+ floats = floats - ConstantR0<float>(builder, 1.0f);
+ // Multiply and add to shift to the range [minval, maxval).
+ return floats * (maxval - minval) + minval;
+}
+
+} // namespace xla
diff --git a/tensorflow/compiler/xla/client/lib/prng.h b/tensorflow/compiler/xla/client/lib/prng.h
new file mode 100644
index 0000000000..ac86390239
--- /dev/null
+++ b/tensorflow/compiler/xla/client/lib/prng.h
@@ -0,0 +1,34 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#ifndef TENSORFLOW_COMPILER_XLA_CLIENT_LIB_PRNG_H_
+#define TENSORFLOW_COMPILER_XLA_CLIENT_LIB_PRNG_H_
+
+#include <array>
+
+#include "tensorflow/compiler/xla/client/xla_client/xla_builder.h"
+#include "tensorflow/compiler/xla/xla_data.pb.h"
+
+namespace xla {
+
+// Returns a tensor containing 'shape' random values uniformly distributed in
+// the range [minval, maxval). Requires 2 32-bit integer seeds.
+// Currently only 'shape's of type F32 are implemented.
+XlaOp StatelessRngUniform(std::array<XlaOp, 2> seeds, const Shape& shape,
+ XlaOp minval, XlaOp maxval);
+
+} // namespace xla
+
+#endif // TENSORFLOW_COMPILER_XLA_CLIENT_LIB_PRNG_H_
diff --git a/tensorflow/compiler/xla/client/lib/testing.cc b/tensorflow/compiler/xla/client/lib/testing.cc
index 3380af9f30..534c509868 100644
--- a/tensorflow/compiler/xla/client/lib/testing.cc
+++ b/tensorflow/compiler/xla/client/lib/testing.cc
@@ -17,7 +17,7 @@ limitations under the License.
#include "tensorflow/compiler/xla/client/xla_client/xla_builder.h"
#include "tensorflow/compiler/xla/execution_options_util.h"
-#include "tensorflow/compiler/xla/literal_util.h"
+#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/compiler/xla/shape_util.h"
#include "tensorflow/compiler/xla/statusor.h"
#include "tensorflow/compiler/xla/tests/test_utils.h"
@@ -48,15 +48,15 @@ int64 DataSizeOfShape(const Shape& shape) {
// Creates a XlaOp for an op what generates fake data with the given shape.
XlaOp BuildFakeDataOpOnDevice(const Shape& shape, XlaBuilder* builder) {
if (ShapeUtil::IsArray(shape)) {
- return builder->Broadcast(
- builder->ConstantLiteral(Literal::One(shape.element_type())),
+ return Broadcast(
+ ConstantLiteral(builder, LiteralUtil::One(shape.element_type())),
AsInt64Slice(shape.dimensions()));
}
std::vector<XlaOp> parts;
for (const Shape& s : shape.tuple_shapes()) {
parts.push_back(BuildFakeDataOpOnDevice(s, builder));
}
- return builder->Tuple(parts);
+ return Tuple(builder, parts);
}
std::unique_ptr<GlobalData> MakeFakeDataViaDeviceOrDie(const Shape& shape,
diff --git a/tensorflow/compiler/xla/client/xla_client/BUILD b/tensorflow/compiler/xla/client/xla_client/BUILD
index 507a2dc5f0..763653c685 100644
--- a/tensorflow/compiler/xla/client/xla_client/BUILD
+++ b/tensorflow/compiler/xla/client/xla_client/BUILD
@@ -1,7 +1,5 @@
# Description:
# The new XLA client libraries.
-#
-# This is NOT YET ready to use.
licenses(["notice"]) # Apache 2.0
@@ -41,9 +39,11 @@ cc_library(
name = "xla_builder",
srcs = ["xla_builder.cc"],
hdrs = ["xla_builder.h"],
+ visibility = ["//visibility:public"],
deps = [
":xla_computation",
"//tensorflow/compiler/xla:execution_options_util",
+ "//tensorflow/compiler/xla:literal",
"//tensorflow/compiler/xla:literal_util",
"//tensorflow/compiler/xla:shape_util",
"//tensorflow/compiler/xla:status_macros",
@@ -52,6 +52,7 @@ cc_library(
"//tensorflow/compiler/xla:util",
"//tensorflow/compiler/xla:xla_data_proto",
"//tensorflow/compiler/xla/client:padding",
+ "//tensorflow/compiler/xla/client:sharding_builder",
"//tensorflow/compiler/xla/service:hlo",
"//tensorflow/compiler/xla/service:hlo_proto",
"//tensorflow/compiler/xla/service:shape_inference",
@@ -64,7 +65,7 @@ tf_cc_test(
srcs = ["xla_builder_test.cc"],
deps = [
":xla_builder",
- "//tensorflow/compiler/xla:literal_util",
+ "//tensorflow/compiler/xla:literal",
"//tensorflow/compiler/xla:shape_util",
"//tensorflow/compiler/xla:status_macros",
"//tensorflow/compiler/xla:test",
diff --git a/tensorflow/compiler/xla/client/xla_client/xla_builder.cc b/tensorflow/compiler/xla/client/xla_client/xla_builder.cc
index 256667cbe0..3b4f9e1407 100644
--- a/tensorflow/compiler/xla/client/xla_client/xla_builder.cc
+++ b/tensorflow/compiler/xla/client/xla_client/xla_builder.cc
@@ -21,6 +21,7 @@ limitations under the License.
#include <string>
#include <utility>
+#include "tensorflow/compiler/xla/client/sharding_builder.h"
#include "tensorflow/compiler/xla/execution_options_util.h"
#include "tensorflow/compiler/xla/service/hlo_opcode.h"
#include "tensorflow/compiler/xla/service/shape_inference.h"
@@ -47,6 +48,7 @@ int64 GetUniqueId() {
// computation.
bool CanBeRoot(HloOpcode opcode) {
switch (opcode) {
+ case HloOpcode::kAfterAll:
case HloOpcode::kSend:
case HloOpcode::kSendDone:
case HloOpcode::kOutfeed:
@@ -59,6 +61,36 @@ bool CanBeRoot(HloOpcode opcode) {
} // namespace
+XlaOp operator-(const XlaOp& x) { return Neg(x); }
+XlaOp operator+(const XlaOp& x, const XlaOp& y) { return Add(x, y); }
+XlaOp operator-(const XlaOp& x, const XlaOp& y) { return Sub(x, y); }
+XlaOp operator*(const XlaOp& x, const XlaOp& y) { return Mul(x, y); }
+XlaOp operator/(const XlaOp& x, const XlaOp& y) { return Div(x, y); }
+XlaOp operator%(const XlaOp& x, const XlaOp& y) { return Rem(x, y); }
+
+XlaOp operator~(const XlaOp& x) { return Not(x); }
+XlaOp operator&(const XlaOp& x, const XlaOp& y) { return And(x, y); }
+XlaOp operator|(const XlaOp& x, const XlaOp& y) { return Or(x, y); }
+XlaOp operator^(const XlaOp& x, const XlaOp& y) { return Xor(x, y); }
+XlaOp operator<<(const XlaOp& x, const XlaOp& y) { return ShiftLeft(x, y); }
+
+XlaOp operator>>(const XlaOp& x, const XlaOp& y) {
+ XlaBuilder* builder = x.builder();
+ return builder->ReportErrorOrReturn([&]() -> StatusOr<XlaOp> {
+ TF_ASSIGN_OR_RETURN(xla::Shape shape, builder->GetShape(x));
+ if (!ShapeUtil::ElementIsIntegral(shape)) {
+ return InvalidArgument(
+ "Argument to >> operator does not have an integral type (%s).",
+ ShapeUtil::HumanString(shape).c_str());
+ }
+ if (ShapeUtil::ElementIsSigned(shape)) {
+ return ShiftRightArithmetic(x, y);
+ } else {
+ return ShiftRightLogical(x, y);
+ }
+ });
+}
+
StatusOr<Shape> XlaBuilder::GetShape(const XlaOp& op) const {
TF_RETURN_IF_ERROR(first_error_);
@@ -81,7 +113,7 @@ XlaBuilder::XlaBuilder(const string& computation_name)
XlaBuilder::~XlaBuilder() {}
-void XlaBuilder::NoteError(const Status& error) {
+XlaOp XlaBuilder::ReportError(const Status& error) {
CHECK(!error.ok());
if (die_immediately_on_error_) {
LOG(FATAL) << "error building computation: " << error;
@@ -91,19 +123,22 @@ void XlaBuilder::NoteError(const Status& error) {
first_error_ = error;
first_error_backtrace_.CreateCurrent(/*skip_count=*/1);
}
+ return XlaOp(this);
}
-XlaOp XlaBuilder::NoteErrorOrReturn(
- const std::function<StatusOr<XlaOp>()>& op_creator) {
+XlaOp XlaBuilder::ReportErrorOrReturn(const StatusOr<XlaOp>& op) {
if (!first_error_.ok()) {
return XlaOp(this);
}
- auto op = op_creator();
if (!op.ok()) {
- NoteError(op.status());
- return XlaOp(this);
+ return ReportError(op.status());
}
- return op.ConsumeValueOrDie();
+ return op.ValueOrDie();
+}
+
+XlaOp XlaBuilder::ReportErrorOrReturn(
+ const std::function<StatusOr<XlaOp>()>& op_creator) {
+ return ReportErrorOrReturn(op_creator());
}
StatusOr<ProgramShape> XlaBuilder::GetProgramShape(int64* root_id) const {
@@ -207,7 +242,7 @@ XlaComputation XlaBuilder::BuildAndNoteError() {
DCHECK(parent_builder_ != nullptr);
auto build_status = Build();
if (!build_status.ok()) {
- parent_builder_->NoteError(
+ parent_builder_->ReportError(
AddStatus(build_status.status(),
tensorflow::strings::StrCat("error from: ", name_)));
return {};
@@ -315,7 +350,7 @@ StatusOr<XlaOp> XlaBuilder::AddBroadcastSequence(const Shape& output_shape,
}
XlaOp XlaBuilder::UnaryOp(HloOpcode unop, const XlaOp& operand) {
- return NoteErrorOrReturn([&]() -> StatusOr<XlaOp> {
+ return ReportErrorOrReturn([&]() -> StatusOr<XlaOp> {
HloInstructionProto instr;
TF_ASSIGN_OR_RETURN(const Shape& operand_shape, GetShape(operand));
TF_ASSIGN_OR_RETURN(*instr.mutable_shape(),
@@ -327,7 +362,7 @@ XlaOp XlaBuilder::UnaryOp(HloOpcode unop, const XlaOp& operand) {
XlaOp XlaBuilder::BinaryOp(
HloOpcode binop, const XlaOp& lhs, const XlaOp& rhs,
tensorflow::gtl::ArraySlice<int64> broadcast_dimensions) {
- return NoteErrorOrReturn([&]() -> StatusOr<XlaOp> {
+ return ReportErrorOrReturn([&]() -> StatusOr<XlaOp> {
HloInstructionProto instr;
TF_ASSIGN_OR_RETURN(const Shape& lhs_shape, GetShape(lhs));
TF_ASSIGN_OR_RETURN(const Shape& rhs_shape, GetShape(rhs));
@@ -383,7 +418,7 @@ XlaOp XlaBuilder::BinaryOp(
XlaOp XlaBuilder::TernaryOp(HloOpcode triop, const XlaOp& lhs, const XlaOp& rhs,
const XlaOp& ehs) {
- return NoteErrorOrReturn([&]() -> StatusOr<XlaOp> {
+ return ReportErrorOrReturn([&]() -> StatusOr<XlaOp> {
HloInstructionProto instr;
TF_ASSIGN_OR_RETURN(const Shape& lhs_shape, GetShape(lhs));
TF_ASSIGN_OR_RETURN(const Shape& rhs_shape, GetShape(rhs));
@@ -430,7 +465,7 @@ XlaOp XlaBuilder::Mul(const XlaOp& lhs, const XlaOp& rhs,
}
XlaOp XlaBuilder::ConstantLiteral(const LiteralSlice& literal) {
- return NoteErrorOrReturn([&]() -> StatusOr<XlaOp> {
+ return ReportErrorOrReturn([&]() -> StatusOr<XlaOp> {
HloInstructionProto instr;
*instr.mutable_shape() = literal.shape();
*instr.mutable_literal() = literal.ToProto();
@@ -440,7 +475,7 @@ XlaOp XlaBuilder::ConstantLiteral(const LiteralSlice& literal) {
XlaOp XlaBuilder::Call(const XlaComputation& computation,
tensorflow::gtl::ArraySlice<XlaOp> operands) {
- return NoteErrorOrReturn([&]() -> StatusOr<XlaOp> {
+ return ReportErrorOrReturn([&]() -> StatusOr<XlaOp> {
HloInstructionProto instr;
std::vector<const Shape*> operand_shape_ptrs;
TF_ASSIGN_OR_RETURN(const auto& operand_shapes, GetOperandShapes(operands));
@@ -461,7 +496,7 @@ XlaOp XlaBuilder::Call(const XlaComputation& computation,
XlaOp XlaBuilder::Parameter(int64 parameter_number, const Shape& shape,
const string& name) {
- return NoteErrorOrReturn([&]() -> StatusOr<XlaOp> {
+ return ReportErrorOrReturn([&]() -> StatusOr<XlaOp> {
HloInstructionProto instr;
if (!parameter_numbers_.insert(parameter_number).second) {
return InvalidArgument("parameter %lld already registered",
@@ -476,7 +511,7 @@ XlaOp XlaBuilder::Parameter(int64 parameter_number, const Shape& shape,
XlaOp XlaBuilder::Broadcast(
const XlaOp& operand, tensorflow::gtl::ArraySlice<int64> broadcast_sizes) {
- return NoteErrorOrReturn([&]() -> StatusOr<XlaOp> {
+ return ReportErrorOrReturn([&]() -> StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(const Shape& operand_shape, GetShape(operand));
TF_ASSIGN_OR_RETURN(
const Shape& shape,
@@ -498,6 +533,14 @@ XlaOp XlaBuilder::Broadcast(
});
}
+XlaOp XlaBuilder::BroadcastInDim(
+ const XlaOp& operand, const Shape& shape,
+ const tensorflow::gtl::ArraySlice<int64> broadcast_dimensions) {
+ return ReportErrorOrReturn([&]() -> StatusOr<XlaOp> {
+ return InDimBroadcast(shape, operand, broadcast_dimensions);
+ });
+}
+
StatusOr<XlaOp> XlaBuilder::Reshape(const Shape& shape, const XlaOp& operand) {
TF_RETURN_IF_ERROR(first_error_);
@@ -510,7 +553,7 @@ XlaOp XlaBuilder::Slice(const XlaOp& operand,
tensorflow::gtl::ArraySlice<int64> start_indices,
tensorflow::gtl::ArraySlice<int64> limit_indices,
tensorflow::gtl::ArraySlice<int64> strides) {
- return NoteErrorOrReturn([&]() -> StatusOr<XlaOp> {
+ return ReportErrorOrReturn([&]() -> StatusOr<XlaOp> {
HloInstructionProto instr;
TF_ASSIGN_OR_RETURN(const Shape& operand_shape, GetShape(operand));
TF_ASSIGN_OR_RETURN(
@@ -530,7 +573,7 @@ XlaOp XlaBuilder::Slice(const XlaOp& operand,
XlaOp XlaBuilder::SliceInDim(const XlaOp& operand, int64 start_index,
int64 limit_index, int64 stride, int64 dimno) {
- return NoteErrorOrReturn([&]() -> StatusOr<XlaOp> {
+ return ReportErrorOrReturn([&]() -> StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(const Shape& shape, GetShape(operand));
std::vector<int64> starts(ShapeUtil::Rank(shape), 0);
std::vector<int64> limits(shape.dimensions().begin(),
@@ -545,7 +588,7 @@ XlaOp XlaBuilder::SliceInDim(const XlaOp& operand, int64 start_index,
XlaOp XlaBuilder::DynamicSlice(const XlaOp& operand, const XlaOp& start_indices,
tensorflow::gtl::ArraySlice<int64> slice_sizes) {
- return NoteErrorOrReturn([&]() -> StatusOr<XlaOp> {
+ return ReportErrorOrReturn([&]() -> StatusOr<XlaOp> {
HloInstructionProto instr;
TF_ASSIGN_OR_RETURN(const Shape& operand_shape, GetShape(operand));
@@ -566,7 +609,7 @@ XlaOp XlaBuilder::DynamicSlice(const XlaOp& operand, const XlaOp& start_indices,
XlaOp XlaBuilder::DynamicUpdateSlice(const XlaOp& operand, const XlaOp& update,
const XlaOp& start_indices) {
- return NoteErrorOrReturn([&]() -> StatusOr<XlaOp> {
+ return ReportErrorOrReturn([&]() -> StatusOr<XlaOp> {
HloInstructionProto instr;
TF_ASSIGN_OR_RETURN(const Shape& operand_shape, GetShape(operand));
@@ -584,7 +627,7 @@ XlaOp XlaBuilder::DynamicUpdateSlice(const XlaOp& operand, const XlaOp& update,
XlaOp XlaBuilder::ConcatInDim(tensorflow::gtl::ArraySlice<XlaOp> operands,
int64 dimension) {
- return NoteErrorOrReturn([&]() -> StatusOr<XlaOp> {
+ return ReportErrorOrReturn([&]() -> StatusOr<XlaOp> {
HloInstructionProto instr;
std::vector<const Shape*> operand_shape_ptrs;
@@ -603,7 +646,7 @@ XlaOp XlaBuilder::ConcatInDim(tensorflow::gtl::ArraySlice<XlaOp> operands,
XlaOp XlaBuilder::Pad(const XlaOp& operand, const XlaOp& padding_value,
const PaddingConfig& padding_config) {
- return NoteErrorOrReturn([&]() -> StatusOr<XlaOp> {
+ return ReportErrorOrReturn([&]() -> StatusOr<XlaOp> {
HloInstructionProto instr;
TF_ASSIGN_OR_RETURN(const Shape& operand_shape, GetShape(operand));
@@ -624,7 +667,7 @@ XlaOp XlaBuilder::Pad(const XlaOp& operand, const XlaOp& padding_value,
XlaOp XlaBuilder::Reshape(const XlaOp& operand,
tensorflow::gtl::ArraySlice<int64> dimensions,
tensorflow::gtl::ArraySlice<int64> new_sizes) {
- return NoteErrorOrReturn([&]() -> StatusOr<XlaOp> {
+ return ReportErrorOrReturn([&]() -> StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(const Shape& operand_shape, GetShape(operand));
TF_ASSIGN_OR_RETURN(const Shape& shape,
ShapeInference::InferReshapeShape(
@@ -638,7 +681,7 @@ XlaOp XlaBuilder::Reshape(const XlaOp& operand,
XlaOp XlaBuilder::Reshape(const XlaOp& operand,
tensorflow::gtl::ArraySlice<int64> new_sizes) {
- return NoteErrorOrReturn([&]() -> StatusOr<XlaOp> {
+ return ReportErrorOrReturn([&]() -> StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(auto shape, GetShape(operand));
std::vector<int64> dimensions(shape.dimensions_size());
std::iota(dimensions.begin(), dimensions.end(), 0);
@@ -648,7 +691,7 @@ XlaOp XlaBuilder::Reshape(const XlaOp& operand,
XlaOp XlaBuilder::Collapse(const XlaOp& operand,
tensorflow::gtl::ArraySlice<int64> dimensions) {
- return NoteErrorOrReturn([&]() -> StatusOr<XlaOp> {
+ return ReportErrorOrReturn([&]() -> StatusOr<XlaOp> {
if (dimensions.size() <= 1) {
// Not collapsing anything, trivially we can return the operand versus
// enqueueing a trivial reshape.
@@ -690,21 +733,29 @@ XlaOp XlaBuilder::Collapse(const XlaOp& operand,
}
void XlaBuilder::Trace(const string& tag, const XlaOp& operand) {
- NoteErrorOrReturn([&]() -> StatusOr<XlaOp> {
+ ReportErrorOrReturn([&]() -> StatusOr<XlaOp> {
HloInstructionProto instr;
*instr.mutable_shape() = ShapeUtil::MakeNil();
- *instr.mutable_literal() = Literal::CreateR1U8(tag)->ToProto();
+ *instr.mutable_literal() = LiteralUtil::CreateR1U8(tag)->ToProto();
return AddInstruction(std::move(instr), HloOpcode::kTrace, {operand});
});
}
XlaOp XlaBuilder::Select(const XlaOp& pred, const XlaOp& on_true,
const XlaOp& on_false) {
- return TernaryOp(HloOpcode::kSelect, pred, on_true, on_false);
+ return ReportErrorOrReturn([&]() -> StatusOr<XlaOp> {
+ TF_ASSIGN_OR_RETURN(const Shape& true_shape, GetShape(on_true));
+ TF_ASSIGN_OR_RETURN(const Shape& false_shape, GetShape(on_false));
+ TF_RET_CHECK(ShapeUtil::IsTuple(true_shape) ==
+ ShapeUtil::IsTuple(false_shape));
+ HloOpcode opcode = ShapeUtil::IsTuple(true_shape) ? HloOpcode::kTupleSelect
+ : HloOpcode::kSelect;
+ return TernaryOp(opcode, pred, on_true, on_false);
+ });
}
XlaOp XlaBuilder::Tuple(tensorflow::gtl::ArraySlice<XlaOp> elements) {
- return NoteErrorOrReturn([&]() -> StatusOr<XlaOp> {
+ return ReportErrorOrReturn([&]() -> StatusOr<XlaOp> {
HloInstructionProto instr;
std::vector<const Shape*> operand_shape_ptrs;
TF_ASSIGN_OR_RETURN(const auto& operand_shapes, GetOperandShapes(elements));
@@ -718,7 +769,7 @@ XlaOp XlaBuilder::Tuple(tensorflow::gtl::ArraySlice<XlaOp> elements) {
}
XlaOp XlaBuilder::GetTupleElement(const XlaOp& tuple_data, int64 index) {
- return NoteErrorOrReturn([&]() -> StatusOr<XlaOp> {
+ return ReportErrorOrReturn([&]() -> StatusOr<XlaOp> {
HloInstructionProto instr;
TF_ASSIGN_OR_RETURN(const Shape& tuple_shape, GetShape(tuple_data));
if (!ShapeUtil::IsTuple(tuple_shape)) {
@@ -767,7 +818,7 @@ XlaOp XlaBuilder::Lt(const XlaOp& lhs, const XlaOp& rhs,
}
XlaOp XlaBuilder::Dot(const XlaOp& lhs, const XlaOp& rhs) {
- return NoteErrorOrReturn([&]() -> StatusOr<XlaOp> {
+ return ReportErrorOrReturn([&]() -> StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(const Shape& lhs_shape, GetShape(lhs));
DotDimensionNumbers dimension_numbers;
@@ -780,7 +831,7 @@ XlaOp XlaBuilder::Dot(const XlaOp& lhs, const XlaOp& rhs) {
XlaOp XlaBuilder::DotGeneral(const XlaOp& lhs, const XlaOp& rhs,
const DotDimensionNumbers& dimension_numbers) {
- return NoteErrorOrReturn([&]() -> StatusOr<XlaOp> {
+ return ReportErrorOrReturn([&]() -> StatusOr<XlaOp> {
HloInstructionProto instr;
TF_ASSIGN_OR_RETURN(const Shape& lhs_shape, GetShape(lhs));
TF_ASSIGN_OR_RETURN(const Shape& rhs_shape, GetShape(rhs));
@@ -859,7 +910,7 @@ XlaOp XlaBuilder::ConvWithGeneralDimensions(
const XlaOp& lhs, const XlaOp& rhs,
tensorflow::gtl::ArraySlice<int64> window_strides, Padding padding,
const ConvolutionDimensionNumbers& dimension_numbers) {
- return NoteErrorOrReturn([&]() -> StatusOr<XlaOp> {
+ return ReportErrorOrReturn([&]() -> StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(const Shape& lhs_shape, GetShape(lhs));
TF_ASSIGN_OR_RETURN(const Shape& rhs_shape, GetShape(rhs));
@@ -905,7 +956,7 @@ XlaOp XlaBuilder::ConvGeneralDilated(
tensorflow::gtl::ArraySlice<int64> lhs_dilation,
tensorflow::gtl::ArraySlice<int64> rhs_dilation,
const ConvolutionDimensionNumbers& dimension_numbers) {
- return NoteErrorOrReturn([&]() -> StatusOr<XlaOp> {
+ return ReportErrorOrReturn([&]() -> StatusOr<XlaOp> {
HloInstructionProto instr;
TF_ASSIGN_OR_RETURN(const Shape& lhs_shape, GetShape(lhs));
TF_ASSIGN_OR_RETURN(const Shape& rhs_shape, GetShape(rhs));
@@ -992,7 +1043,7 @@ StatusOr<Window> XlaBuilder::MakeWindow(
XlaOp XlaBuilder::Fft(const XlaOp& operand, const FftType fft_type,
const tensorflow::gtl::ArraySlice<int64> fft_length) {
- return NoteErrorOrReturn([&]() -> StatusOr<XlaOp> {
+ return ReportErrorOrReturn([&]() -> StatusOr<XlaOp> {
HloInstructionProto instr;
TF_ASSIGN_OR_RETURN(const Shape& operand_shape, GetShape(operand));
TF_ASSIGN_OR_RETURN(
@@ -1009,23 +1060,98 @@ XlaOp XlaBuilder::Fft(const XlaOp& operand, const FftType fft_type,
}
XlaOp XlaBuilder::Infeed(const Shape& shape, const string& config) {
- return NoteErrorOrReturn([&]() -> StatusOr<XlaOp> {
+ return ReportErrorOrReturn([&]() -> StatusOr<XlaOp> {
HloInstructionProto instr;
if (!LayoutUtil::HasLayout(shape)) {
return InvalidArgument("Given shape to Infeed must have a layout");
}
- *instr.mutable_shape() = shape;
+ const Shape infeed_instruction_shape =
+ ShapeUtil::MakeTupleShape({shape, ShapeUtil::MakeTokenShape()});
+ *instr.mutable_shape() = infeed_instruction_shape;
instr.set_infeed_config(config);
- return AddInstruction(std::move(instr), HloOpcode::kInfeed);
+
+ if (ShapeUtil::IsArray(shape) && sharding() &&
+ sharding()->type() == OpSharding::Type::OpSharding_Type_OTHER) {
+ // TODO(b/110793772): Support tiled array-shaped infeeds.
+ return InvalidArgument(
+ "Tiled sharding is not yet supported for array-shaped infeeds");
+ }
+
+ if (sharding() &&
+ sharding()->type() == OpSharding::Type::OpSharding_Type_REPLICATED) {
+ return InvalidArgument(
+ "Replicated sharding is not yet supported for infeeds");
+ }
+
+ // The sharding is set by the client according to the data tuple shape.
+ // However, the shape of the infeed instruction is a tuple containing the
+ // data and a token. For tuple sharding type, the sharding must be changed
+ // to accommodate the token.
+ XlaOp infeed;
+ if (sharding() &&
+ sharding()->type() == OpSharding::Type::OpSharding_Type_TUPLE) {
+ // TODO(b/80000000): Remove this when clients have been updated to handle
+ // tokens.
+ OpSharding infeed_instruction_sharding = *sharding();
+ // Arbitrarily assign the token to device 0.
+ *infeed_instruction_sharding.add_tuple_shardings() =
+ sharding_builder::AssignDevice(0);
+ XlaScopedShardingAssignment scoped_sharding(this,
+ infeed_instruction_sharding);
+ TF_ASSIGN_OR_RETURN(infeed,
+ AddInstruction(std::move(instr), HloOpcode::kInfeed));
+ } else {
+ TF_ASSIGN_OR_RETURN(infeed,
+ AddInstruction(std::move(instr), HloOpcode::kInfeed));
+ }
+
+ // The infeed instruction produces a tuple of the infed data and a token
+ // type. Return XLA op containing the data.
+ // TODO(b/80000000): Remove this when clients have been updated to handle
+ // tokens.
+ HloInstructionProto infeed_data;
+ *infeed_data.mutable_shape() = shape;
+ infeed_data.set_tuple_index(0);
+ return AddInstruction(std::move(infeed_data), HloOpcode::kGetTupleElement,
+ {infeed});
+ });
+}
+
+XlaOp XlaBuilder::InfeedWithToken(const XlaOp& token, const Shape& shape,
+ const string& config) {
+ return ReportErrorOrReturn([&]() -> StatusOr<XlaOp> {
+ HloInstructionProto instr;
+ if (!LayoutUtil::HasLayout(shape)) {
+ return InvalidArgument("Given shape to Infeed must have a layout");
+ }
+ const Shape infeed_instruction_shape =
+ ShapeUtil::MakeTupleShape({shape, ShapeUtil::MakeTokenShape()});
+ *instr.mutable_shape() = infeed_instruction_shape;
+ instr.set_infeed_config(config);
+
+ if (ShapeUtil::IsArray(shape) && sharding() &&
+ sharding()->type() == OpSharding::Type::OpSharding_Type_OTHER) {
+ // TODO(b/110793772): Support tiled array-shaped infeeds.
+ return InvalidArgument(
+ "Tiled sharding is not yet supported for array-shaped infeeds");
+ }
+
+ if (sharding() &&
+ sharding()->type() == OpSharding::Type::OpSharding_Type_REPLICATED) {
+ return InvalidArgument(
+ "Replicated sharding is not yet supported for infeeds");
+ }
+
+ return AddInstruction(std::move(instr), HloOpcode::kInfeed, {token});
});
}
void XlaBuilder::Outfeed(const XlaOp& operand, const Shape& shape_with_layout,
const string& outfeed_config) {
- NoteErrorOrReturn([&]() -> StatusOr<XlaOp> {
+ ReportErrorOrReturn([&]() -> StatusOr<XlaOp> {
HloInstructionProto instr;
- *instr.mutable_shape() = ShapeUtil::MakeNil();
+ *instr.mutable_shape() = ShapeUtil::MakeTokenShape();
// Check and set outfeed shape.
if (!LayoutUtil::HasLayout(shape_with_layout)) {
@@ -1042,14 +1168,80 @@ void XlaBuilder::Outfeed(const XlaOp& operand, const Shape& shape_with_layout,
instr.set_outfeed_config(outfeed_config);
- return AddInstruction(std::move(instr), HloOpcode::kOutfeed, {operand});
+ TF_RETURN_IF_ERROR(
+ AddInstruction(std::move(instr), HloOpcode::kOutfeed, {operand})
+ .status());
+
+ // The outfeed instruction produces a token. However, existing users expect
+ // a nil shape (empty tuple). This should only be relevant if the outfeed is
+ // the root of a computation.
+ // TODO(b/80000000): Remove this when clients have been updated to handle
+ // tokens.
+ HloInstructionProto tuple_instr;
+ *tuple_instr.mutable_shape() = ShapeUtil::MakeNil();
+
+ // The dummy tuple should have no sharding.
+ {
+ XlaScopedShardingAssignment scoped_sharding(this, OpSharding());
+ TF_ASSIGN_OR_RETURN(
+ XlaOp empty_tuple,
+ AddInstruction(std::move(tuple_instr), HloOpcode::kTuple, {}));
+ return empty_tuple;
+ }
+ });
+}
+
+XlaOp XlaBuilder::OutfeedWithToken(const XlaOp& operand, const XlaOp& token,
+ const Shape& shape_with_layout,
+ const string& outfeed_config) {
+ return ReportErrorOrReturn([&]() -> StatusOr<XlaOp> {
+ HloInstructionProto instr;
+
+ *instr.mutable_shape() = ShapeUtil::MakeTokenShape();
+
+ // Check and set outfeed shape.
+ if (!LayoutUtil::HasLayout(shape_with_layout)) {
+ return InvalidArgument("Given shape to Outfeed must have a layout");
+ }
+ TF_ASSIGN_OR_RETURN(const Shape& operand_shape, GetShape(operand));
+ if (!ShapeUtil::Compatible(operand_shape, shape_with_layout)) {
+ return InvalidArgument(
+ "Outfeed shape %s must be compatible with operand shape %s",
+ ShapeUtil::HumanStringWithLayout(shape_with_layout).c_str(),
+ ShapeUtil::HumanStringWithLayout(operand_shape).c_str());
+ }
+ *instr.mutable_outfeed_shape() = shape_with_layout;
+
+ instr.set_outfeed_config(outfeed_config);
+
+ return AddInstruction(std::move(instr), HloOpcode::kOutfeed,
+ {operand, token});
+ });
+}
+
+XlaOp XlaBuilder::CreateToken() {
+ return ReportErrorOrReturn([&]() -> StatusOr<XlaOp> {
+ HloInstructionProto instr;
+ *instr.mutable_shape() = ShapeUtil::MakeTokenShape();
+ return AddInstruction(std::move(instr), HloOpcode::kAfterAll);
+ });
+}
+
+XlaOp XlaBuilder::AfterAll(tensorflow::gtl::ArraySlice<XlaOp> tokens) {
+ return ReportErrorOrReturn([&]() -> StatusOr<XlaOp> {
+ if (tokens.empty()) {
+ return InvalidArgument("AfterAll requires at least one operand");
+ }
+ HloInstructionProto instr;
+ *instr.mutable_shape() = ShapeUtil::MakeTokenShape();
+ return AddInstruction(std::move(instr), HloOpcode::kAfterAll, tokens);
});
}
XlaOp XlaBuilder::CustomCall(const string& call_target_name,
tensorflow::gtl::ArraySlice<XlaOp> operands,
const Shape& shape) {
- return NoteErrorOrReturn([&]() -> StatusOr<XlaOp> {
+ return ReportErrorOrReturn([&]() -> StatusOr<XlaOp> {
HloInstructionProto instr;
if (tensorflow::str_util::StartsWith(call_target_name, "$")) {
return InvalidArgument(
@@ -1066,7 +1258,7 @@ XlaOp XlaBuilder::CustomCall(const string& call_target_name,
XlaOp XlaBuilder::HostCompute(tensorflow::gtl::ArraySlice<XlaOp> operands,
const string& channel_name,
int64 cost_estimate_ns, const Shape& shape) {
- return NoteErrorOrReturn([&]() -> StatusOr<XlaOp> {
+ return ReportErrorOrReturn([&]() -> StatusOr<XlaOp> {
HloInstructionProto instr;
*instr.mutable_shape() = shape;
instr.set_channel_name(channel_name);
@@ -1221,7 +1413,7 @@ XlaOp XlaBuilder::IsFinite(const XlaOp& operand) {
XlaOp XlaBuilder::Transpose(const XlaOp& operand,
tensorflow::gtl::ArraySlice<int64> permutation) {
- return NoteErrorOrReturn([&]() -> StatusOr<XlaOp> {
+ return ReportErrorOrReturn([&]() -> StatusOr<XlaOp> {
HloInstructionProto instr;
TF_ASSIGN_OR_RETURN(const Shape& operand_shape, GetShape(operand));
TF_ASSIGN_OR_RETURN(
@@ -1236,7 +1428,7 @@ XlaOp XlaBuilder::Transpose(const XlaOp& operand,
XlaOp XlaBuilder::Rev(const XlaOp& operand,
tensorflow::gtl::ArraySlice<int64> dimensions) {
- return NoteErrorOrReturn([&]() -> StatusOr<XlaOp> {
+ return ReportErrorOrReturn([&]() -> StatusOr<XlaOp> {
HloInstructionProto instr;
TF_ASSIGN_OR_RETURN(const Shape& operand_shape, GetShape(operand));
TF_ASSIGN_OR_RETURN(
@@ -1249,13 +1441,31 @@ XlaOp XlaBuilder::Rev(const XlaOp& operand,
});
}
-XlaOp XlaBuilder::Sort(const XlaOp& operand) {
- return UnaryOp(HloOpcode::kSort, operand);
-}
-
-XlaOp XlaBuilder::SqrtF32(const XlaOp& operand) {
- return BinaryOp(HloOpcode::kPower, operand, ConstantR0<float>(0.5),
- /*broadcast_dimensions=*/{});
+XlaOp XlaBuilder::Sort(XlaOp keys, tensorflow::gtl::optional<XlaOp> values,
+ int64 dimension) {
+ return ReportErrorOrReturn([&]() -> StatusOr<XlaOp> {
+ HloInstructionProto instr;
+ std::vector<const Shape*> operand_shape_ptrs;
+ TF_ASSIGN_OR_RETURN(const Shape& keys_shape, GetShape(keys));
+ operand_shape_ptrs.push_back(&keys_shape);
+ Shape values_shape;
+ if (values.has_value()) {
+ TF_ASSIGN_OR_RETURN(values_shape, GetShape(*values));
+ operand_shape_ptrs.push_back(&values_shape);
+ }
+ TF_ASSIGN_OR_RETURN(*instr.mutable_shape(),
+ ShapeInference::InferVariadicOpShape(
+ HloOpcode::kSort, operand_shape_ptrs));
+ if (dimension == -1) {
+ TF_ASSIGN_OR_RETURN(const Shape& keys_shape, GetShape(keys));
+ dimension = ShapeUtil::Rank(keys_shape) - 1;
+ }
+ instr.add_dimensions(dimension);
+ return values.has_value()
+ ? AddInstruction(std::move(instr), HloOpcode::kSort,
+ {keys, *values})
+ : AddInstruction(std::move(instr), HloOpcode::kSort, {keys});
+ });
}
XlaOp XlaBuilder::Pow(const XlaOp& lhs, const XlaOp& rhs,
@@ -1265,7 +1475,7 @@ XlaOp XlaBuilder::Pow(const XlaOp& lhs, const XlaOp& rhs,
XlaOp XlaBuilder::ConvertElementType(const XlaOp& operand,
PrimitiveType new_element_type) {
- return NoteErrorOrReturn([&]() -> StatusOr<XlaOp> {
+ return ReportErrorOrReturn([&]() -> StatusOr<XlaOp> {
HloInstructionProto instr;
TF_ASSIGN_OR_RETURN(const Shape& operand_shape, GetShape(operand));
TF_ASSIGN_OR_RETURN(
@@ -1277,7 +1487,7 @@ XlaOp XlaBuilder::ConvertElementType(const XlaOp& operand,
XlaOp XlaBuilder::BitcastConvertType(const XlaOp& operand,
PrimitiveType new_element_type) {
- return NoteErrorOrReturn([&]() -> StatusOr<XlaOp> {
+ return ReportErrorOrReturn([&]() -> StatusOr<XlaOp> {
HloInstructionProto instr;
TF_ASSIGN_OR_RETURN(const Shape& operand_shape, GetShape(operand));
TF_ASSIGN_OR_RETURN(
@@ -1288,16 +1498,6 @@ XlaOp XlaBuilder::BitcastConvertType(const XlaOp& operand,
});
}
-XlaOp XlaBuilder::SquareF32(const XlaOp& operand) {
- return BinaryOp(HloOpcode::kPower, operand, ConstantR0<float>(2.0),
- /*broadcast_dimensions=*/{});
-}
-
-XlaOp XlaBuilder::ReciprocalF32(const XlaOp& operand) {
- return BinaryOp(HloOpcode::kPower, operand, ConstantR0<float>(-1.0),
- /*broadcast_dimensions=*/{});
-}
-
XlaOp XlaBuilder::Neg(const XlaOp& operand) {
return UnaryOp(HloOpcode::kNegate, operand);
}
@@ -1311,13 +1511,12 @@ XlaOp XlaBuilder::Map(tensorflow::gtl::ArraySlice<XlaOp> operands,
const XlaComputation& computation,
tensorflow::gtl::ArraySlice<int64> dimensions,
tensorflow::gtl::ArraySlice<XlaOp> static_operands) {
- return NoteErrorOrReturn([&]() -> StatusOr<XlaOp> {
+ return ReportErrorOrReturn([&]() -> StatusOr<XlaOp> {
if (!static_operands.empty()) {
return Unimplemented("static_operands is not supported in Map");
}
HloInstructionProto instr;
-
std::vector<const Shape*> operand_shape_ptrs;
TF_ASSIGN_OR_RETURN(const auto& operand_shapes, GetOperandShapes(operands));
c_transform(operand_shapes, std::back_inserter(operand_shape_ptrs),
@@ -1329,16 +1528,32 @@ XlaOp XlaBuilder::Map(tensorflow::gtl::ArraySlice<XlaOp> operands,
ShapeInference::InferMapShape(operand_shape_ptrs, called_program_shape,
dimensions));
+ const Shape& output_shape = instr.shape();
+ const int64 output_rank = ShapeUtil::Rank(output_shape);
AddCalledComputation(computation, &instr);
+ std::vector<XlaOp> new_operands(operands.begin(), operands.end());
+ for (XlaOp& new_operand : new_operands) {
+ TF_ASSIGN_OR_RETURN(Shape shape, GetShape(new_operand));
+ const int64 rank = ShapeUtil::Rank(shape);
+ if (rank != output_rank) {
+ TF_ASSIGN_OR_RETURN(new_operand,
+ InDimBroadcast(output_shape, new_operand, {}));
+ TF_ASSIGN_OR_RETURN(shape, GetShape(new_operand));
+ }
+ if (!ShapeUtil::SameDimensions(output_shape, shape)) {
+ TF_ASSIGN_OR_RETURN(new_operand,
+ AddBroadcastSequence(output_shape, new_operand));
+ }
+ }
- return AddInstruction(std::move(instr), HloOpcode::kMap, operands);
+ return AddInstruction(std::move(instr), HloOpcode::kMap, new_operands);
});
}
XlaOp XlaBuilder::RngOp(RandomDistribution distribution,
tensorflow::gtl::ArraySlice<XlaOp> parameters,
const Shape& shape) {
- return NoteErrorOrReturn([&]() -> StatusOr<XlaOp> {
+ return ReportErrorOrReturn([&]() -> StatusOr<XlaOp> {
HloInstructionProto instr;
// Check the number of parameters per RNG distribution.
@@ -1376,7 +1591,7 @@ XlaOp XlaBuilder::RngUniform(const XlaOp& a, const XlaOp& b,
XlaOp XlaBuilder::While(const XlaComputation& condition,
const XlaComputation& body, const XlaOp& init) {
- return NoteErrorOrReturn([&]() -> StatusOr<XlaOp> {
+ return ReportErrorOrReturn([&]() -> StatusOr<XlaOp> {
HloInstructionProto instr;
// Infer shape.
@@ -1398,7 +1613,7 @@ XlaOp XlaBuilder::While(const XlaComputation& condition,
XlaOp XlaBuilder::Gather(const XlaOp& input, const XlaOp& gather_indices,
const GatherDimensionNumbers& dimension_numbers,
tensorflow::gtl::ArraySlice<int64> window_bounds) {
- return NoteErrorOrReturn([&]() -> StatusOr<XlaOp> {
+ return ReportErrorOrReturn([&]() -> StatusOr<XlaOp> {
HloInstructionProto instr;
TF_ASSIGN_OR_RETURN(const Shape& input_shape, GetShape(input));
@@ -1423,7 +1638,7 @@ XlaOp XlaBuilder::Conditional(const XlaOp& predicate, const XlaOp& true_operand,
const XlaComputation& true_computation,
const XlaOp& false_operand,
const XlaComputation& false_computation) {
- return NoteErrorOrReturn([&]() -> StatusOr<XlaOp> {
+ return ReportErrorOrReturn([&]() -> StatusOr<XlaOp> {
HloInstructionProto instr;
TF_ASSIGN_OR_RETURN(const Shape& predicate_shape, GetShape(predicate));
@@ -1455,13 +1670,14 @@ XlaOp XlaBuilder::Reduce(
const XlaOp& operand, const XlaOp& init_value,
const XlaComputation& computation,
tensorflow::gtl::ArraySlice<int64> dimensions_to_reduce) {
- return NoteErrorOrReturn([&]() -> StatusOr<XlaOp> {
+ return ReportErrorOrReturn([&]() -> StatusOr<XlaOp> {
HloInstructionProto instr;
TF_ASSIGN_OR_RETURN(const Shape& operand_shape, GetShape(operand));
TF_ASSIGN_OR_RETURN(const Shape& init_shape, GetShape(init_value));
TF_ASSIGN_OR_RETURN(const ProgramShape& called_program_shape,
computation.GetProgramShape());
+
TF_ASSIGN_OR_RETURN(*instr.mutable_shape(),
ShapeInference::InferReduceShape(
operand_shape, init_shape, dimensions_to_reduce,
@@ -1480,7 +1696,7 @@ XlaOp XlaBuilder::Reduce(
XlaOp XlaBuilder::ReduceAll(const XlaOp& operand, const XlaOp& init_value,
const XlaComputation& computation) {
- return NoteErrorOrReturn([&]() -> StatusOr<XlaOp> {
+ return ReportErrorOrReturn([&]() -> StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(const Shape& operand_shape, GetShape(operand));
std::vector<int64> all_dimnos(ShapeUtil::Rank(operand_shape));
std::iota(all_dimnos.begin(), all_dimnos.end(), 0);
@@ -1493,7 +1709,7 @@ XlaOp XlaBuilder::ReduceWindow(
const XlaComputation& computation,
tensorflow::gtl::ArraySlice<int64> window_dimensions,
tensorflow::gtl::ArraySlice<int64> window_strides, Padding padding) {
- return NoteErrorOrReturn([&]() -> StatusOr<XlaOp> {
+ return ReportErrorOrReturn([&]() -> StatusOr<XlaOp> {
HloInstructionProto instr;
TF_ASSIGN_OR_RETURN(const Shape& operand_shape, GetShape(operand));
@@ -1516,7 +1732,7 @@ XlaOp XlaBuilder::ReduceWindowWithGeneralPadding(
tensorflow::gtl::ArraySlice<int64> window_dimensions,
tensorflow::gtl::ArraySlice<int64> window_strides,
tensorflow::gtl::ArraySlice<std::pair<int64, int64>> padding) {
- return NoteErrorOrReturn([&]() -> StatusOr<XlaOp> {
+ return ReportErrorOrReturn([&]() -> StatusOr<XlaOp> {
HloInstructionProto instr;
TF_ASSIGN_OR_RETURN(const Shape& operand_shape, GetShape(operand));
@@ -1540,7 +1756,7 @@ XlaOp XlaBuilder::ReduceWindowWithGeneralPadding(
XlaOp XlaBuilder::BatchNormTraining(const XlaOp& operand, const XlaOp& scale,
const XlaOp& offset, float epsilon,
int64 feature_index) {
- return NoteErrorOrReturn([&]() -> StatusOr<XlaOp> {
+ return ReportErrorOrReturn([&]() -> StatusOr<XlaOp> {
HloInstructionProto instr;
TF_ASSIGN_OR_RETURN(const Shape& operand_shape, GetShape(operand));
@@ -1563,7 +1779,7 @@ XlaOp XlaBuilder::BatchNormInference(const XlaOp& operand, const XlaOp& scale,
const XlaOp& offset, const XlaOp& mean,
const XlaOp& variance, float epsilon,
int64 feature_index) {
- return NoteErrorOrReturn([&]() -> StatusOr<XlaOp> {
+ return ReportErrorOrReturn([&]() -> StatusOr<XlaOp> {
HloInstructionProto instr;
TF_ASSIGN_OR_RETURN(const Shape& operand_shape, GetShape(operand));
@@ -1588,7 +1804,7 @@ XlaOp XlaBuilder::BatchNormGrad(const XlaOp& operand, const XlaOp& scale,
const XlaOp& batch_mean, const XlaOp& batch_var,
const XlaOp& grad_output, float epsilon,
int64 feature_index) {
- return NoteErrorOrReturn([&]() -> StatusOr<XlaOp> {
+ return ReportErrorOrReturn([&]() -> StatusOr<XlaOp> {
HloInstructionProto instr;
TF_ASSIGN_OR_RETURN(const Shape& operand_shape, GetShape(operand));
@@ -1612,7 +1828,7 @@ XlaOp XlaBuilder::BatchNormGrad(const XlaOp& operand, const XlaOp& scale,
XlaOp XlaBuilder::CrossReplicaSum(
const XlaOp& operand,
tensorflow::gtl::ArraySlice<int64> replica_group_ids) {
- return NoteErrorOrReturn([&]() -> StatusOr<XlaOp> {
+ return ReportErrorOrReturn([&]() -> StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(const Shape& shape, GetShape(operand));
const Shape& scalar_shape = ShapeUtil::MakeShape(shape.element_type(), {});
auto b = CreateSubBuilder("sum");
@@ -1628,11 +1844,7 @@ XlaOp XlaBuilder::CrossReplicaSum(
const XlaOp& operand, const XlaComputation& computation,
tensorflow::gtl::ArraySlice<int64> replica_group_ids,
const tensorflow::gtl::optional<ChannelHandle>& channel_id) {
- return NoteErrorOrReturn([&]() -> StatusOr<XlaOp> {
- if (channel_id.has_value()) {
- return Unimplemented("channel_id is not supported in AllReduce");
- }
-
+ return ReportErrorOrReturn([&]() -> StatusOr<XlaOp> {
HloInstructionProto instr;
TF_ASSIGN_OR_RETURN(const Shape& operand_shape, GetShape(operand));
TF_ASSIGN_OR_RETURN(
@@ -1642,6 +1854,10 @@ XlaOp XlaBuilder::CrossReplicaSum(
instr.add_replica_group_ids(replica_group_id);
}
+ if (channel_id.has_value()) {
+ instr.set_all_reduce_id(channel_id->handle());
+ }
+
AddCalledComputation(computation, &instr);
return AddInstruction(std::move(instr), HloOpcode::kCrossReplicaSum,
@@ -1655,7 +1871,7 @@ XlaOp XlaBuilder::SelectAndScatter(
tensorflow::gtl::ArraySlice<int64> window_strides, Padding padding,
const XlaOp& source, const XlaOp& init_value,
const XlaComputation& scatter) {
- return NoteErrorOrReturn([&]() -> StatusOr<XlaOp> {
+ return ReportErrorOrReturn([&]() -> StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(const Shape& operand_shape, GetShape(operand));
return SelectAndScatterWithGeneralPadding(
operand, select, window_dimensions, window_strides,
@@ -1672,7 +1888,7 @@ XlaOp XlaBuilder::SelectAndScatterWithGeneralPadding(
tensorflow::gtl::ArraySlice<std::pair<int64, int64>> padding,
const XlaOp& source, const XlaOp& init_value,
const XlaComputation& scatter) {
- return NoteErrorOrReturn([&]() -> StatusOr<XlaOp> {
+ return ReportErrorOrReturn([&]() -> StatusOr<XlaOp> {
HloInstructionProto instr;
TF_ASSIGN_OR_RETURN(const Shape& operand_shape, GetShape(operand));
@@ -1700,7 +1916,7 @@ XlaOp XlaBuilder::SelectAndScatterWithGeneralPadding(
XlaOp XlaBuilder::ReducePrecision(const XlaOp& operand, const int exponent_bits,
const int mantissa_bits) {
- return NoteErrorOrReturn([&]() -> StatusOr<XlaOp> {
+ return ReportErrorOrReturn([&]() -> StatusOr<XlaOp> {
HloInstructionProto instr;
TF_ASSIGN_OR_RETURN(const Shape& operand_shape, GetShape(operand));
TF_ASSIGN_OR_RETURN(*instr.mutable_shape(),
@@ -1714,20 +1930,51 @@ XlaOp XlaBuilder::ReducePrecision(const XlaOp& operand, const int exponent_bits,
}
void XlaBuilder::Send(const XlaOp& operand, const ChannelHandle& handle) {
- NoteErrorOrReturn([&]() -> StatusOr<XlaOp> {
- HloInstructionProto instr;
+ ReportErrorOrReturn([&]() -> StatusOr<XlaOp> {
+ // Send HLO takes two operands: a data operand and a token. Generate the
+ // token to pass into the send.
+ // TODO(b/80000000): Remove this when clients have been updated to handle
+ // tokens.
+ HloInstructionProto token_instr;
+ *token_instr.mutable_shape() = ShapeUtil::MakeTokenShape();
+ TF_ASSIGN_OR_RETURN(XlaOp token, AddInstruction(std::move(token_instr),
+ HloOpcode::kAfterAll, {}));
+
+ // Send instruction produces a tuple of {aliased operand, U32 context,
+ // token}.
+ HloInstructionProto send_instr;
+ TF_ASSIGN_OR_RETURN(const Shape& shape, GetShape(operand));
+ *send_instr.mutable_shape() = ShapeUtil::MakeTupleShape(
+ {shape, ShapeUtil::MakeShape(U32, {}), ShapeUtil::MakeTokenShape()});
+ send_instr.set_channel_id(handle.handle());
+ TF_ASSIGN_OR_RETURN(XlaOp send,
+ AddInstruction(std::move(send_instr), HloOpcode::kSend,
+ {operand, token}));
+
+ HloInstructionProto send_done_instr;
+ *send_done_instr.mutable_shape() = ShapeUtil::MakeTokenShape();
+ send_done_instr.set_channel_id(handle.handle());
+ return AddInstruction(std::move(send_done_instr), HloOpcode::kSendDone,
+ {send});
+ });
+}
- // Send instruction produces a tuple of {aliased operand, U32 context}.
+XlaOp XlaBuilder::SendWithToken(const XlaOp& operand, const XlaOp& token,
+ const ChannelHandle& handle) {
+ return ReportErrorOrReturn([&]() -> StatusOr<XlaOp> {
+ // Send instruction produces a tuple of {aliased operand, U32 context,
+ // token}.
+ HloInstructionProto send_instr;
TF_ASSIGN_OR_RETURN(const Shape& shape, GetShape(operand));
- *instr.mutable_shape() =
- ShapeUtil::MakeTupleShape({shape, ShapeUtil::MakeShape(U32, {})});
- instr.set_channel_id(handle.handle());
- TF_ASSIGN_OR_RETURN(
- XlaOp send,
- AddInstruction(std::move(instr), HloOpcode::kSend, {operand}));
+ *send_instr.mutable_shape() = ShapeUtil::MakeTupleShape(
+ {shape, ShapeUtil::MakeShape(U32, {}), ShapeUtil::MakeTokenShape()});
+ send_instr.set_channel_id(handle.handle());
+ TF_ASSIGN_OR_RETURN(XlaOp send,
+ AddInstruction(std::move(send_instr), HloOpcode::kSend,
+ {operand, token}));
HloInstructionProto send_done_instr;
- *send_done_instr.mutable_shape() = ShapeUtil::MakeNil();
+ *send_done_instr.mutable_shape() = ShapeUtil::MakeTokenShape();
send_done_instr.set_channel_id(handle.handle());
return AddInstruction(std::move(send_done_instr), HloOpcode::kSendDone,
{send});
@@ -1735,18 +1982,60 @@ void XlaBuilder::Send(const XlaOp& operand, const ChannelHandle& handle) {
}
XlaOp XlaBuilder::Recv(const Shape& shape, const ChannelHandle& handle) {
- return NoteErrorOrReturn([&]() -> StatusOr<XlaOp> {
- HloInstructionProto instr;
+ return ReportErrorOrReturn([&]() -> StatusOr<XlaOp> {
+ // Recv HLO takes a single token operand. Generate the token to pass into
+ // the Recv and RecvDone instructions.
+ // TODO(b/80000000): Remove this when clients have been updated to handle
+ // tokens.
+ HloInstructionProto token_instr;
+ *token_instr.mutable_shape() = ShapeUtil::MakeTokenShape();
+ TF_ASSIGN_OR_RETURN(XlaOp token, AddInstruction(std::move(token_instr),
+ HloOpcode::kAfterAll, {}));
+
+ // Recv instruction produces a tuple of {receive buffer, U32 context,
+ // token}.
+ HloInstructionProto recv_instr;
+ *recv_instr.mutable_shape() = ShapeUtil::MakeTupleShape(
+ {shape, ShapeUtil::MakeShape(U32, {}), ShapeUtil::MakeTokenShape()});
+ recv_instr.set_channel_id(handle.handle());
+ TF_ASSIGN_OR_RETURN(XlaOp recv, AddInstruction(std::move(recv_instr),
+ HloOpcode::kRecv, {token}));
- // Recv instruction produces a tuple of {receive buffer, U32 context}.
- *instr.mutable_shape() =
- ShapeUtil::MakeTupleShape({shape, ShapeUtil::MakeShape(U32, {})});
- instr.set_channel_id(handle.handle());
- TF_ASSIGN_OR_RETURN(XlaOp recv,
- AddInstruction(std::move(instr), HloOpcode::kRecv, {}));
+ HloInstructionProto recv_done_instr;
+ *recv_done_instr.mutable_shape() =
+ ShapeUtil::MakeTupleShape({shape, ShapeUtil::MakeTokenShape()});
+ recv_done_instr.set_channel_id(handle.handle());
+ TF_ASSIGN_OR_RETURN(XlaOp recv_done,
+ AddInstruction(std::move(recv_done_instr),
+ HloOpcode::kRecvDone, {recv}));
+
+ // The RecvDone instruction produces a tuple of the data and a token
+ // type. Return XLA op containing the data.
+ // TODO(b/80000000): Remove this when clients have been updated to handle
+ // tokens.
+ HloInstructionProto recv_data;
+ *recv_data.mutable_shape() = shape;
+ recv_data.set_tuple_index(0);
+ return AddInstruction(std::move(recv_data), HloOpcode::kGetTupleElement,
+ {recv_done});
+ });
+}
+
+XlaOp XlaBuilder::RecvWithToken(const XlaOp& token, const Shape& shape,
+ const ChannelHandle& handle) {
+ return ReportErrorOrReturn([&]() -> StatusOr<XlaOp> {
+ // Recv instruction produces a tuple of {receive buffer, U32 context,
+ // token}.
+ HloInstructionProto recv_instr;
+ *recv_instr.mutable_shape() = ShapeUtil::MakeTupleShape(
+ {shape, ShapeUtil::MakeShape(U32, {}), ShapeUtil::MakeTokenShape()});
+ recv_instr.set_channel_id(handle.handle());
+ TF_ASSIGN_OR_RETURN(XlaOp recv, AddInstruction(std::move(recv_instr),
+ HloOpcode::kRecv, {token}));
HloInstructionProto recv_done_instr;
- *recv_done_instr.mutable_shape() = shape;
+ *recv_done_instr.mutable_shape() =
+ ShapeUtil::MakeTupleShape({shape, ShapeUtil::MakeTokenShape()});
recv_done_instr.set_channel_id(handle.handle());
return AddInstruction(std::move(recv_done_instr), HloOpcode::kRecvDone,
{recv});
@@ -1990,4 +2279,526 @@ StatusOr<const HloInstructionProto*> XlaBuilder::LookUpInstruction(
return &instructions_[op.handle()];
}
+// Enqueues a "retrieve parameter value" instruction for a parameter that was
+// passed to the computation.
+XlaOp Parameter(XlaBuilder* builder, int64 parameter_number, const Shape& shape,
+ const string& name) {
+ return builder->Parameter(parameter_number, shape, name);
+}
+
+// Enqueues a constant with the value of the given literal onto the
+// computation.
+XlaOp ConstantLiteral(XlaBuilder* builder, const LiteralSlice& literal) {
+ return builder->ConstantLiteral(literal);
+}
+
+XlaOp Broadcast(const XlaOp& operand,
+ tensorflow::gtl::ArraySlice<int64> broadcast_sizes) {
+ return operand.builder()->Broadcast(operand, broadcast_sizes);
+}
+
+XlaOp BroadcastInDim(
+ const XlaOp& operand, const Shape& shape,
+ const tensorflow::gtl::ArraySlice<int64> broadcast_dimensions) {
+ return operand.builder()->BroadcastInDim(operand, shape,
+ broadcast_dimensions);
+}
+
+XlaOp Pad(const XlaOp& operand, const XlaOp& padding_value,
+ const PaddingConfig& padding_config) {
+ return operand.builder()->Pad(operand, padding_value, padding_config);
+}
+
+XlaOp Reshape(const XlaOp& operand,
+ tensorflow::gtl::ArraySlice<int64> dimensions,
+ tensorflow::gtl::ArraySlice<int64> new_sizes) {
+ return operand.builder()->Reshape(operand, dimensions, new_sizes);
+}
+
+XlaOp Reshape(const XlaOp& operand,
+ tensorflow::gtl::ArraySlice<int64> new_sizes) {
+ return operand.builder()->Reshape(operand, new_sizes);
+}
+
+XlaOp Collapse(const XlaOp& operand,
+ tensorflow::gtl::ArraySlice<int64> dimensions) {
+ return operand.builder()->Collapse(operand, dimensions);
+}
+
+XlaOp Slice(const XlaOp& operand,
+ tensorflow::gtl::ArraySlice<int64> start_indices,
+ tensorflow::gtl::ArraySlice<int64> limit_indices,
+ tensorflow::gtl::ArraySlice<int64> strides) {
+ return operand.builder()->Slice(operand, start_indices, limit_indices,
+ strides);
+}
+
+XlaOp SliceInDim(const XlaOp& operand, int64 start_index, int64 limit_index,
+ int64 stride, int64 dimno) {
+ return operand.builder()->SliceInDim(operand, start_index, limit_index,
+ stride, dimno);
+}
+
+XlaOp DynamicSlice(const XlaOp& operand, const XlaOp& start_indices,
+ tensorflow::gtl::ArraySlice<int64> slice_sizes) {
+ return operand.builder()->DynamicSlice(operand, start_indices, slice_sizes);
+}
+
+XlaOp DynamicUpdateSlice(const XlaOp& operand, const XlaOp& update,
+ const XlaOp& start_indices) {
+ return operand.builder()->DynamicUpdateSlice(operand, update, start_indices);
+}
+
+XlaOp ConcatInDim(XlaBuilder* builder,
+ tensorflow::gtl::ArraySlice<XlaOp> operands,
+ int64 dimension) {
+ return builder->ConcatInDim(operands, dimension);
+}
+
+void Trace(const string& tag, const XlaOp& operand) {
+ return operand.builder()->Trace(tag, operand);
+}
+
+XlaOp Select(const XlaOp& pred, const XlaOp& on_true, const XlaOp& on_false) {
+ return pred.builder()->Select(pred, on_true, on_false);
+}
+
+XlaOp Tuple(XlaBuilder* builder, tensorflow::gtl::ArraySlice<XlaOp> elements) {
+ return builder->Tuple(elements);
+}
+
+XlaOp GetTupleElement(const XlaOp& tuple_data, int64 index) {
+ return tuple_data.builder()->GetTupleElement(tuple_data, index);
+}
+
+XlaOp Eq(const XlaOp& lhs, const XlaOp& rhs,
+ tensorflow::gtl::ArraySlice<int64> broadcast_dimensions) {
+ return lhs.builder()->Eq(lhs, rhs, broadcast_dimensions);
+}
+
+XlaOp Ne(const XlaOp& lhs, const XlaOp& rhs,
+ tensorflow::gtl::ArraySlice<int64> broadcast_dimensions) {
+ return lhs.builder()->Ne(lhs, rhs, broadcast_dimensions);
+}
+
+XlaOp Ge(const XlaOp& lhs, const XlaOp& rhs,
+ tensorflow::gtl::ArraySlice<int64> broadcast_dimensions) {
+ return lhs.builder()->Ge(lhs, rhs, broadcast_dimensions);
+}
+
+XlaOp Gt(const XlaOp& lhs, const XlaOp& rhs,
+ tensorflow::gtl::ArraySlice<int64> broadcast_dimensions) {
+ return lhs.builder()->Gt(lhs, rhs, broadcast_dimensions);
+}
+
+XlaOp Lt(const XlaOp& lhs, const XlaOp& rhs,
+ tensorflow::gtl::ArraySlice<int64> broadcast_dimensions) {
+ return lhs.builder()->Lt(lhs, rhs, broadcast_dimensions);
+}
+
+XlaOp Le(const XlaOp& lhs, const XlaOp& rhs,
+ tensorflow::gtl::ArraySlice<int64> broadcast_dimensions) {
+ return lhs.builder()->Le(lhs, rhs, broadcast_dimensions);
+}
+
+XlaOp Dot(const XlaOp& lhs, const XlaOp& rhs) {
+ return lhs.builder()->Dot(lhs, rhs);
+}
+
+XlaOp DotGeneral(const XlaOp& lhs, const XlaOp& rhs,
+ const DotDimensionNumbers& dimension_numbers) {
+ return lhs.builder()->DotGeneral(lhs, rhs, dimension_numbers);
+}
+
+XlaOp Conv(const XlaOp& lhs, const XlaOp& rhs,
+ tensorflow::gtl::ArraySlice<int64> window_strides, Padding padding) {
+ return lhs.builder()->Conv(lhs, rhs, window_strides, padding);
+}
+
+XlaOp ConvWithGeneralPadding(
+ const XlaOp& lhs, const XlaOp& rhs,
+ tensorflow::gtl::ArraySlice<int64> window_strides,
+ tensorflow::gtl::ArraySlice<std::pair<int64, int64>> padding) {
+ return lhs.builder()->ConvWithGeneralPadding(lhs, rhs, window_strides,
+ padding);
+}
+
+XlaOp ConvWithGeneralDimensions(
+ const XlaOp& lhs, const XlaOp& rhs,
+ tensorflow::gtl::ArraySlice<int64> window_strides, Padding padding,
+ const ConvolutionDimensionNumbers& dimension_numbers) {
+ return lhs.builder()->ConvWithGeneralDimensions(lhs, rhs, window_strides,
+ padding, dimension_numbers);
+}
+
+XlaOp ConvGeneral(const XlaOp& lhs, const XlaOp& rhs,
+ tensorflow::gtl::ArraySlice<int64> window_strides,
+ tensorflow::gtl::ArraySlice<std::pair<int64, int64>> padding,
+ const ConvolutionDimensionNumbers& dimension_numbers) {
+ return lhs.builder()->ConvGeneral(lhs, rhs, window_strides, padding,
+ dimension_numbers);
+}
+
+XlaOp ConvGeneralDilated(
+ const XlaOp& lhs, const XlaOp& rhs,
+ tensorflow::gtl::ArraySlice<int64> window_strides,
+ tensorflow::gtl::ArraySlice<std::pair<int64, int64>> padding,
+ tensorflow::gtl::ArraySlice<int64> lhs_dilation,
+ tensorflow::gtl::ArraySlice<int64> rhs_dilation,
+ const ConvolutionDimensionNumbers& dimension_numbers) {
+ return lhs.builder()->ConvGeneralDilated(lhs, rhs, window_strides, padding,
+ lhs_dilation, rhs_dilation,
+ dimension_numbers);
+}
+
+XlaOp Fft(const XlaOp& operand, FftType fft_type,
+ tensorflow::gtl::ArraySlice<int64> fft_length) {
+ return operand.builder()->Fft(operand, fft_type, fft_length);
+}
+
+XlaOp Infeed(XlaBuilder* builder, const Shape& shape, const string& config) {
+ return builder->Infeed(shape, config);
+}
+
+void Outfeed(const XlaOp& operand, const Shape& shape_with_layout,
+ const string& outfeed_config) {
+ return operand.builder()->Outfeed(operand, shape_with_layout, outfeed_config);
+}
+
+XlaOp Call(XlaBuilder* builder, const XlaComputation& computation,
+ tensorflow::gtl::ArraySlice<XlaOp> operands) {
+ return builder->Call(computation, operands);
+}
+
+XlaOp CustomCall(XlaBuilder* builder, const string& call_target_name,
+ tensorflow::gtl::ArraySlice<XlaOp> operands,
+ const Shape& shape) {
+ return builder->CustomCall(call_target_name, operands, shape);
+}
+
+XlaOp HostCompute(XlaBuilder* builder,
+ tensorflow::gtl::ArraySlice<XlaOp> operands,
+ const string& channel_name, int64 cost_estimate_ns,
+ const Shape& shape) {
+ return builder->HostCompute(operands, channel_name, cost_estimate_ns, shape);
+}
+
+XlaOp Complex(const XlaOp& real, const XlaOp& imag,
+ tensorflow::gtl::ArraySlice<int64> broadcast_dimensions) {
+ return real.builder()->Complex(real, imag, broadcast_dimensions);
+}
+
+XlaOp Conj(const XlaOp& operand) { return operand.builder()->Conj(operand); }
+
+XlaOp Add(const XlaOp& lhs, const XlaOp& rhs,
+ tensorflow::gtl::ArraySlice<int64> broadcast_dimensions) {
+ return lhs.builder()->Add(lhs, rhs, broadcast_dimensions);
+}
+
+XlaOp Sub(const XlaOp& lhs, const XlaOp& rhs,
+ tensorflow::gtl::ArraySlice<int64> broadcast_dimensions) {
+ return lhs.builder()->Sub(lhs, rhs, broadcast_dimensions);
+}
+
+XlaOp Mul(const XlaOp& lhs, const XlaOp& rhs,
+ tensorflow::gtl::ArraySlice<int64> broadcast_dimensions) {
+ return lhs.builder()->Mul(lhs, rhs, broadcast_dimensions);
+}
+
+XlaOp Div(const XlaOp& lhs, const XlaOp& rhs,
+ tensorflow::gtl::ArraySlice<int64> broadcast_dimensions) {
+ return lhs.builder()->Div(lhs, rhs, broadcast_dimensions);
+}
+
+XlaOp Rem(const XlaOp& lhs, const XlaOp& rhs,
+ tensorflow::gtl::ArraySlice<int64> broadcast_dimensions) {
+ return lhs.builder()->Rem(lhs, rhs, broadcast_dimensions);
+}
+
+XlaOp Max(const XlaOp& lhs, const XlaOp& rhs,
+ tensorflow::gtl::ArraySlice<int64> broadcast_dimensions) {
+ return lhs.builder()->Max(lhs, rhs, broadcast_dimensions);
+}
+
+XlaOp Min(const XlaOp& lhs, const XlaOp& rhs,
+ tensorflow::gtl::ArraySlice<int64> broadcast_dimensions) {
+ return lhs.builder()->Min(lhs, rhs, broadcast_dimensions);
+}
+
+XlaOp And(const XlaOp& lhs, const XlaOp& rhs,
+ tensorflow::gtl::ArraySlice<int64> broadcast_dimensions) {
+ return lhs.builder()->And(lhs, rhs, broadcast_dimensions);
+}
+
+XlaOp Or(const XlaOp& lhs, const XlaOp& rhs,
+ tensorflow::gtl::ArraySlice<int64> broadcast_dimensions) {
+ return lhs.builder()->Or(lhs, rhs, broadcast_dimensions);
+}
+
+XlaOp Xor(const XlaOp& lhs, const XlaOp& rhs,
+ tensorflow::gtl::ArraySlice<int64> broadcast_dimensions) {
+ return lhs.builder()->Xor(lhs, rhs, broadcast_dimensions);
+}
+
+XlaOp Not(const XlaOp& operand) { return operand.builder()->Not(operand); }
+
+XlaOp ShiftLeft(const XlaOp& lhs, const XlaOp& rhs,
+ tensorflow::gtl::ArraySlice<int64> broadcast_dimensions) {
+ return lhs.builder()->ShiftLeft(lhs, rhs, broadcast_dimensions);
+}
+
+XlaOp ShiftRightArithmetic(
+ const XlaOp& lhs, const XlaOp& rhs,
+ tensorflow::gtl::ArraySlice<int64> broadcast_dimensions) {
+ return lhs.builder()->ShiftRightArithmetic(lhs, rhs, broadcast_dimensions);
+}
+
+XlaOp ShiftRightLogical(
+ const XlaOp& lhs, const XlaOp& rhs,
+ tensorflow::gtl::ArraySlice<int64> broadcast_dimensions) {
+ return lhs.builder()->ShiftRightLogical(lhs, rhs, broadcast_dimensions);
+}
+
+XlaOp Reduce(const XlaOp& operand, const XlaOp& init_value,
+ const XlaComputation& computation,
+ tensorflow::gtl::ArraySlice<int64> dimensions_to_reduce) {
+ return operand.builder()->Reduce(operand, init_value, computation,
+ dimensions_to_reduce);
+}
+
+XlaOp ReduceAll(const XlaOp& operand, const XlaOp& init_value,
+ const XlaComputation& computation) {
+ return operand.builder()->ReduceAll(operand, init_value, computation);
+}
+
+XlaOp ReduceWindow(const XlaOp& operand, const XlaOp& init_value,
+ const XlaComputation& computation,
+ tensorflow::gtl::ArraySlice<int64> window_dimensions,
+ tensorflow::gtl::ArraySlice<int64> window_strides,
+ Padding padding) {
+ return operand.builder()->ReduceWindow(operand, init_value, computation,
+ window_dimensions, window_strides,
+ padding);
+}
+
+XlaOp ReduceWindowWithGeneralPadding(
+ const XlaOp& operand, const XlaOp& init_value,
+ const XlaComputation& computation,
+ tensorflow::gtl::ArraySlice<int64> window_dimensions,
+ tensorflow::gtl::ArraySlice<int64> window_strides,
+ tensorflow::gtl::ArraySlice<std::pair<int64, int64>> padding) {
+ return operand.builder()->ReduceWindowWithGeneralPadding(
+ operand, init_value, computation, window_dimensions, window_strides,
+ padding);
+}
+
+XlaOp CrossReplicaSum(const XlaOp& operand,
+ tensorflow::gtl::ArraySlice<int64> replica_group_ids) {
+ return operand.builder()->CrossReplicaSum(operand, replica_group_ids);
+}
+
+XlaOp CrossReplicaSum(
+ const XlaOp& operand, const XlaComputation& computation,
+ tensorflow::gtl::ArraySlice<int64> replica_group_ids,
+ const tensorflow::gtl::optional<ChannelHandle>& channel_id) {
+ return operand.builder()->CrossReplicaSum(operand, computation,
+ replica_group_ids, channel_id);
+}
+
+XlaOp SelectAndScatter(const XlaOp& operand, const XlaComputation& select,
+ tensorflow::gtl::ArraySlice<int64> window_dimensions,
+ tensorflow::gtl::ArraySlice<int64> window_strides,
+ Padding padding, const XlaOp& source,
+ const XlaOp& init_value, const XlaComputation& scatter) {
+ return operand.builder()->SelectAndScatter(operand, select, window_dimensions,
+ window_strides, padding, source,
+ init_value, scatter);
+}
+
+XlaOp SelectAndScatterWithGeneralPadding(
+ const XlaOp& operand, const XlaComputation& select,
+ tensorflow::gtl::ArraySlice<int64> window_dimensions,
+ tensorflow::gtl::ArraySlice<int64> window_strides,
+ tensorflow::gtl::ArraySlice<std::pair<int64, int64>> padding,
+ const XlaOp& source, const XlaOp& init_value,
+ const XlaComputation& scatter) {
+ return operand.builder()->SelectAndScatterWithGeneralPadding(
+ operand, select, window_dimensions, window_strides, padding, source,
+ init_value, scatter);
+}
+
+XlaOp Abs(const XlaOp& operand) { return operand.builder()->Abs(operand); }
+
+XlaOp Atan2(const XlaOp& y, const XlaOp& x,
+ tensorflow::gtl::ArraySlice<int64> broadcast_dimensions) {
+ return y.builder()->Atan2(y, x, broadcast_dimensions);
+}
+
+XlaOp Exp(const XlaOp& operand) { return operand.builder()->Exp(operand); }
+
+XlaOp Expm1(const XlaOp& operand) { return operand.builder()->Expm1(operand); }
+
+XlaOp Floor(const XlaOp& operand) { return operand.builder()->Floor(operand); }
+
+XlaOp Ceil(const XlaOp& operand) { return operand.builder()->Ceil(operand); }
+
+XlaOp Round(const XlaOp& operand) { return operand.builder()->Round(operand); }
+
+XlaOp Log(const XlaOp& operand) { return operand.builder()->Log(operand); }
+
+XlaOp Log1p(const XlaOp& operand) { return operand.builder()->Log1p(operand); }
+
+XlaOp Sign(const XlaOp& operand) { return operand.builder()->Sign(operand); }
+
+XlaOp Clz(const XlaOp& operand) { return operand.builder()->Clz(operand); }
+
+XlaOp Cos(const XlaOp& operand) { return operand.builder()->Cos(operand); }
+
+XlaOp Sin(const XlaOp& operand) { return operand.builder()->Sin(operand); }
+
+XlaOp Tanh(const XlaOp& operand) { return operand.builder()->Tanh(operand); }
+
+XlaOp Real(const XlaOp& operand) { return operand.builder()->Real(operand); }
+
+XlaOp Imag(const XlaOp& operand) { return operand.builder()->Imag(operand); }
+
+XlaOp Pow(const XlaOp& lhs, const XlaOp& rhs,
+ tensorflow::gtl::ArraySlice<int64> broadcast_dimensions) {
+ return lhs.builder()->Pow(lhs, rhs, broadcast_dimensions);
+}
+
+XlaOp IsFinite(const XlaOp& operand) {
+ return operand.builder()->IsFinite(operand);
+}
+
+XlaOp ConvertElementType(const XlaOp& operand, PrimitiveType new_element_type) {
+ return operand.builder()->ConvertElementType(operand, new_element_type);
+}
+
+XlaOp BitcastConvertType(const XlaOp& operand, PrimitiveType new_element_type) {
+ return operand.builder()->BitcastConvertType(operand, new_element_type);
+}
+
+XlaOp Neg(const XlaOp& operand) { return operand.builder()->Neg(operand); }
+
+XlaOp Transpose(const XlaOp& operand,
+ tensorflow::gtl::ArraySlice<int64> permutation) {
+ return operand.builder()->Transpose(operand, permutation);
+}
+
+XlaOp Rev(const XlaOp& operand, tensorflow::gtl::ArraySlice<int64> dimensions) {
+ return operand.builder()->Rev(operand, dimensions);
+}
+
+XlaOp Sort(XlaOp keys, tensorflow::gtl::optional<XlaOp> values,
+ int64 dimension) {
+ return keys.builder()->Sort(keys, std::move(values), dimension);
+}
+
+XlaOp Clamp(const XlaOp& min, const XlaOp& operand, const XlaOp& max) {
+ return min.builder()->Clamp(min, operand, max);
+}
+
+XlaOp Map(XlaBuilder* builder, tensorflow::gtl::ArraySlice<XlaOp> operands,
+ const XlaComputation& computation,
+ tensorflow::gtl::ArraySlice<int64> dimensions,
+ tensorflow::gtl::ArraySlice<XlaOp> static_operands) {
+ return builder->Map(operands, computation, dimensions, static_operands);
+}
+
+XlaOp RngNormal(const XlaOp& mu, const XlaOp& sigma, const Shape& shape) {
+ return mu.builder()->RngNormal(mu, sigma, shape);
+}
+
+XlaOp RngUniform(const XlaOp& a, const XlaOp& b, const Shape& shape) {
+ return a.builder()->RngUniform(a, b, shape);
+}
+
+XlaOp While(const XlaComputation& condition, const XlaComputation& body,
+ const XlaOp& init) {
+ return init.builder()->While(condition, body, init);
+}
+
+XlaOp Conditional(const XlaOp& predicate, const XlaOp& true_operand,
+ const XlaComputation& true_computation,
+ const XlaOp& false_operand,
+ const XlaComputation& false_computation) {
+ return predicate.builder()->Conditional(predicate, true_operand,
+ true_computation, false_operand,
+ false_computation);
+}
+
+XlaOp ReducePrecision(const XlaOp& operand, const int exponent_bits,
+ const int mantissa_bits) {
+ return operand.builder()->ReducePrecision(operand, exponent_bits,
+ mantissa_bits);
+}
+
+XlaOp Gather(const XlaOp& input, const XlaOp& gather_indices,
+ const GatherDimensionNumbers& dimension_numbers,
+ tensorflow::gtl::ArraySlice<int64> window_bounds) {
+ return input.builder()->Gather(input, gather_indices, dimension_numbers,
+ window_bounds);
+}
+
+void Send(const XlaOp& operand, const ChannelHandle& handle) {
+ return operand.builder()->Send(operand, handle);
+}
+
+XlaOp Recv(XlaBuilder* builder, const Shape& shape,
+ const ChannelHandle& handle) {
+ return builder->Recv(shape, handle);
+}
+
+XlaOp SendWithToken(const XlaOp& operand, const XlaOp& token,
+ const ChannelHandle& handle) {
+ return operand.builder()->SendWithToken(operand, token, handle);
+}
+
+XlaOp RecvWithToken(const XlaOp& token, const Shape& shape,
+ const ChannelHandle& handle) {
+ return token.builder()->RecvWithToken(token, shape, handle);
+}
+
+XlaOp InfeedWithToken(const XlaOp& token, const Shape& shape,
+ const string& config) {
+ return token.builder()->InfeedWithToken(token, shape, config);
+}
+
+XlaOp OutfeedWithToken(const XlaOp& operand, const XlaOp& token,
+ const Shape& shape_with_layout,
+ const string& outfeed_config) {
+ return operand.builder()->OutfeedWithToken(operand, token, shape_with_layout,
+ outfeed_config);
+}
+
+XlaOp CreateToken(XlaBuilder* builder) { return builder->CreateToken(); }
+
+XlaOp AfterAll(XlaBuilder* builder, tensorflow::gtl::ArraySlice<XlaOp> tokens) {
+ return builder->AfterAll(tokens);
+}
+
+XlaOp BatchNormTraining(const XlaOp& operand, const XlaOp& scale,
+ const XlaOp& offset, float epsilon,
+ int64 feature_index) {
+ return operand.builder()->BatchNormTraining(operand, scale, offset, epsilon,
+ feature_index);
+}
+
+XlaOp BatchNormInference(const XlaOp& operand, const XlaOp& scale,
+ const XlaOp& offset, const XlaOp& mean,
+ const XlaOp& variance, float epsilon,
+ int64 feature_index) {
+ return operand.builder()->BatchNormInference(
+ operand, scale, offset, mean, variance, epsilon, feature_index);
+}
+
+XlaOp BatchNormGrad(const XlaOp& operand, const XlaOp& scale,
+ const XlaOp& batch_mean, const XlaOp& batch_var,
+ const XlaOp& grad_output, float epsilon,
+ int64 feature_index) {
+ return operand.builder()->BatchNormGrad(operand, scale, batch_mean, batch_var,
+ grad_output, epsilon, feature_index);
+}
+
} // namespace xla
diff --git a/tensorflow/compiler/xla/client/xla_client/xla_builder.h b/tensorflow/compiler/xla/client/xla_client/xla_builder.h
index f18306fff0..2be6f4a553 100644
--- a/tensorflow/compiler/xla/client/xla_client/xla_builder.h
+++ b/tensorflow/compiler/xla/client/xla_client/xla_builder.h
@@ -18,10 +18,12 @@ limitations under the License.
#include <map>
#include <string>
+#include <type_traits>
#include <utility>
#include "tensorflow/compiler/xla/client/padding.h"
#include "tensorflow/compiler/xla/client/xla_client/xla_computation.h"
+#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/compiler/xla/literal_util.h"
#include "tensorflow/compiler/xla/service/hlo.pb.h"
#include "tensorflow/compiler/xla/service/hlo_opcode.h"
@@ -46,22 +48,25 @@ class XlaBuilder;
// instruction as an operand.
class XlaOp {
public:
- XlaOp() : handle_(-1), builder_(nullptr) {}
- ~XlaOp() {}
-
- XlaBuilder* builder() const { return builder_; }
-
- bool operator==(const XlaOp& rhs) const {
- return handle_ == rhs.handle_ && builder_ == rhs.builder_;
+ XlaOp() : handle_(-1), builder_(nullptr) {
+ static_assert(std::is_trivially_destructible<XlaOp>::value,
+ "XlaOp should be trivially destructible");
}
+ ~XlaOp() = default;
- bool operator!=(const XlaOp& rhs) const {
- return handle_ != rhs.handle_ || builder_ != rhs.builder_;
- }
+ XlaBuilder* builder() const { return builder_; }
// Returns true if the XlaOp represents valid, non-erroneous value.
bool valid() const { return handle_ >= 0; }
+ // Returns true if the XlaOp was created by the XlaOp() constructor and
+ // not returned by a builder.
+ bool IsUninitialized() const { return builder_ == nullptr; }
+
+ bool IsIdenticalTo(const XlaOp& rhs) const {
+ return handle_ == rhs.handle_ && builder_ == rhs.builder_;
+ }
+
friend std::ostream& operator<<(std::ostream& out, const XlaOp& op) {
out << op.handle();
return out;
@@ -84,6 +89,30 @@ class XlaOp {
XlaBuilder* builder_;
};
+// Arithmetic operator overloads for the XlaOp type.
+XlaOp operator-(const XlaOp& x);
+XlaOp operator+(const XlaOp& x, const XlaOp& y);
+XlaOp operator-(const XlaOp& x, const XlaOp& y);
+XlaOp operator*(const XlaOp& x, const XlaOp& y);
+XlaOp operator/(const XlaOp& x, const XlaOp& y);
+XlaOp operator%(const XlaOp& x, const XlaOp& y);
+
+// Bitwise operator overloads for the XlaOp type.
+XlaOp operator~(const XlaOp& x);
+XlaOp operator&(const XlaOp& x, const XlaOp& y);
+XlaOp operator|(const XlaOp& x, const XlaOp& y);
+XlaOp operator^(const XlaOp& x, const XlaOp& y);
+XlaOp operator<<(const XlaOp& x, const XlaOp& y);
+// Performs a right arithmetic shift if 'x' is a signed type, otherwise performs
+// a right logical shift.
+XlaOp operator>>(const XlaOp& x, const XlaOp& y);
+
+// We don't overload the relational operators (==, !=, <, <=, >, >=) because the
+// semantics might be surprising since their result types are usually 'bool'.
+// Further programmers may expect == to be a structural equality.
+// We also choose not to overload any of the mutating operators (e.g., +=, -=)
+// because the semantics might be misleading — XLA computations are immutable.
+
// A convenient interface for building up computations.
//
// Thread-compatible.
@@ -130,6 +159,93 @@ class XlaBuilder {
die_immediately_on_error_ = enabled;
}
+ // Default dimension numbers used for a 2D convolution.
+ static constexpr int64 kConvBatchDimension = 0;
+ static constexpr int64 kConvFeatureDimension = 1;
+ static constexpr int64 kConvFirstSpatialDimension = 2;
+ static constexpr int64 kConvSecondSpatialDimension = 3;
+ static constexpr int64 kConvKernelOutputDimension = 0;
+ static constexpr int64 kConvKernelInputDimension = 1;
+ static constexpr int64 kConvKernelFirstSpatialDimension = 2;
+ static constexpr int64 kConvKernelSecondSpatialDimension = 3;
+
+ // Creates a default ConvolutionDimensionNumbers. For a 2D convolution, for
+ // the input operand {batch, feature, height, width} = {0, 1, 2, 3} and for
+ // the kernel operand
+ // {output_feature, input_feature, height, width} = {0, 1, 2, 3}.
+ static ConvolutionDimensionNumbers CreateDefaultConvDimensionNumbers(
+ int num_spatial_dims = 2);
+
+ // Returns an error if the convolution dimension numbers have conflicts.
+ static Status Validate(const ConvolutionDimensionNumbers& dnum);
+
+ // Returns a new XlaBuilder whose resultant Computation is used only by this
+ // XlaBuilder. The sub-XlaBuilder has the same die_immediately_on_error
+ // behavior as the parent.
+ std::unique_ptr<XlaBuilder> CreateSubBuilder(const string& computation_name);
+
+ // Builds the computation with the requested operations, or returns a non-ok
+ // status. Note that all ops that have been enqueued will be moved to the
+ // computation being returned.
+ StatusOr<XlaComputation> Build();
+
+ // Builds the computation with the requested operations, or notes an error in
+ // the parent XlaBuilder and returns an empty computation if building failed.
+ // This function is intended to be used where the returned XlaComputation is
+ // only used by the parent XlaBuilder and hence further operation on the
+ // returned XlaComputation will simply be error'ed out if an error occurred
+ // while building this computation. If the built computation is to be used by
+ // a XlaBuilder other than the parent XlaBuilder then Build() should be used
+ // instead.
+ XlaComputation BuildAndNoteError();
+
+ // Returns a subgraph that roots on the given root. If the root is not a
+ // compile-time constant (see `IsConstant`), returns an error.
+ //
+ // This will copy the needed ops/computations to the subgraph.
+ StatusOr<XlaComputation> BuildConstantSubGraph(const XlaOp& root_op) const;
+
+ // Returns the first error that was encountered while building the
+ // computation. When an error is encountered, by default we return a vacuous
+ // XlaOp and inform the user of the error that occurred while
+ // building the computation when they make a final call to Build().
+ //
+ // See also set_die_immediately_on_error().
+ Status first_error() const { return first_error_; }
+
+ // Returns the shape of the given op.
+ StatusOr<Shape> GetShape(const XlaOp& op) const;
+
+ // Returns the (inferred) result for the current computation's shape.
+ StatusOr<ProgramShape> GetProgramShape() const;
+
+ // Reports an error to the builder, by
+ // * storing it internally and capturing a backtrace if it's the first error
+ // (this deferred value will be produced on the call to
+ // Build()/GetShape()/...)
+ // * dying if die_immediately_on_error_ is true.
+ // Returns an XlaOp with an invalid handle but a valid builder. This value can
+ // be returned in place of a value in APIs that return an XlaOp.
+ XlaOp ReportError(const Status& error);
+
+ // A helper function that converts a StatusOr<XlaOp> into an XlaOp.
+ // If the Status was an error, reports the error to builder and returns an
+ // invalid XlaOp handle.
+ XlaOp ReportErrorOrReturn(const StatusOr<XlaOp>& op);
+
+ // A helper function that runs a function that returns a StatusOr<XlaOp> and
+ // returns an XlaOp.
+ XlaOp ReportErrorOrReturn(const std::function<StatusOr<XlaOp>()>& op_creator);
+
+ // Returns true if 'operand' is a compile-time constant. A compile-time
+ // constant does not depend on any parameters, or on stateful operators such
+ // as `RngNormal` or `Infeed`.
+ //
+ // This tests whether a computation is a compile-time constant without
+ // evaluating the computation.
+ StatusOr<bool> IsConstant(const XlaOp& operand) const;
+
+ private:
// Enqueues a "retrieve parameter value" instruction for a parameter that was
// passed to the computation.
XlaOp Parameter(int64 parameter_number, const Shape& shape,
@@ -202,6 +318,27 @@ class XlaBuilder {
XlaOp Broadcast(const XlaOp& operand,
tensorflow::gtl::ArraySlice<int64> broadcast_sizes);
+ // Performs in-dimension-style broadcast.
+ //
+ // Operand specifies the input to be broadcast. "shape" is expected output
+ // shape. "broadcast_dimensions" are the dimensions to be broadcasting into.
+ // Dimension numbers in broadcast_dimensions map to individual dimensions
+ // of the operand, and specify what dimension of the output shape they
+ // should be broadcast.
+ // e.g.
+ // Say operand = [1, 2], i.e., a 1D tensor with 2 elements.
+ // and dimension of shape is [2,2].
+ // Specifying {1} as brodcast_dimension will generate output
+ // [1 , 2]
+ // [1 , 2]
+ // On the other hand, specifying {0} as broadcast_dimension
+ // will generate output
+ // [1 , 1]
+ // [2 , 2]
+ XlaOp BroadcastInDim(
+ const XlaOp& operand, const Shape& shape,
+ const tensorflow::gtl::ArraySlice<int64> broadcast_dimensions);
+
// Enqueues a pad operation onto the computation that pads the given value on
// the edges as well as between the elements of the input. padding_config
// specifies the padding amount for each dimension.
@@ -350,26 +487,6 @@ class XlaBuilder {
XlaOp DotGeneral(const XlaOp& lhs, const XlaOp& rhs,
const DotDimensionNumbers& dimension_numbers);
- // Default dimension numbers used for a 2D convolution.
- static constexpr int64 kConvBatchDimension = 0;
- static constexpr int64 kConvFeatureDimension = 1;
- static constexpr int64 kConvFirstSpatialDimension = 2;
- static constexpr int64 kConvSecondSpatialDimension = 3;
- static constexpr int64 kConvKernelOutputDimension = 0;
- static constexpr int64 kConvKernelInputDimension = 1;
- static constexpr int64 kConvKernelFirstSpatialDimension = 2;
- static constexpr int64 kConvKernelSecondSpatialDimension = 3;
-
- // Creates a default ConvolutionDimensionNumbers. For a 2D convolution, for
- // the input operand {batch, feature, height, width} = {0, 1, 2, 3} and for
- // the kernel operand
- // {output_feature, input_feature, height, width} = {0, 1, 2, 3}.
- static ConvolutionDimensionNumbers CreateDefaultConvDimensionNumbers(
- int num_spatial_dims = 2);
-
- // Returns an error if the convolution dimension numbers have conflicts.
- static Status Validate(const ConvolutionDimensionNumbers& dnum);
-
// Enqueues a convolution instruction onto the computation, which uses the
// default convolution dimension numbers.
XlaOp Conv(const XlaOp& lhs, const XlaOp& rhs,
@@ -416,6 +533,8 @@ class XlaBuilder {
// Enqueues an infeed instruction onto the computation, which writes data of
// the given shape to the infeed buffer of the device.
XlaOp Infeed(const Shape& shape, const string& config = "");
+ XlaOp InfeedWithToken(const XlaOp& token, const Shape& shape,
+ const string& config = "");
// Enqueues an outfeed instruction onto the computation. This instruction
// generates outgoing data transfers for the given data.
@@ -425,6 +544,9 @@ class XlaBuilder {
// will occur.
void Outfeed(const XlaOp& operand, const Shape& shape_with_layout,
const string& outfeed_config);
+ XlaOp OutfeedWithToken(const XlaOp& operand, const XlaOp& token,
+ const Shape& shape_with_layout,
+ const string& outfeed_config);
// Enqueues a call instruction onto the computation.
XlaOp Call(const XlaComputation& computation,
@@ -635,16 +757,6 @@ class XlaBuilder {
// Enqueues an imaginary-part instruction onto the computation.
XlaOp Imag(const XlaOp& operand);
- // Enqueues a float32 sqrt instruction onto the computation.
- // (float32 is specified as there is an implicit float32 0.5f constant
- // exponent).
- XlaOp SqrtF32(const XlaOp& operand);
-
- // Enqueues a float32 square instruction onto the computation.
- // (float32 is specified as there is an implicit float32 2.0f constant
- // exponent).
- XlaOp SquareF32(const XlaOp& operand);
-
// Enqueues a lhs^rhs computation onto the computation.
XlaOp Pow(const XlaOp& lhs, const XlaOp& rhs,
tensorflow::gtl::ArraySlice<int64> broadcast_dimensions = {});
@@ -667,14 +779,6 @@ class XlaBuilder {
XlaOp BitcastConvertType(const XlaOp& operand,
PrimitiveType new_element_type);
- // Enqueues a float32 reciprocal instruction onto the computation.
- // (float32 is specified as there is an implicit float32 -1.0f constant
- // exponent).
- //
- // TODO(b/34468990) axe F32 suffix, can be determined by reflecting on the
- // shape of the operand.
- XlaOp ReciprocalF32(const XlaOp& operand);
-
// Enqueues a negate instruction onto the computation.
XlaOp Neg(const XlaOp& operand);
@@ -689,7 +793,24 @@ class XlaBuilder {
tensorflow::gtl::ArraySlice<int64> dimensions);
// Enqueues a sort (as increasing order) instruction onto the computation.
- XlaOp Sort(const XlaOp& operand);
+ // If only keys are provided:
+ // * If the keys are an rank-1 tensor (an array), the result is a sorted array
+ // of keys, in ascending order.
+ // * If the keys have higher rank, the keys are sorted along the provided
+ // dimension. For example, for a rank-2 tensor (a matrix) of keys, a dimension
+ // value of 0 will indepenently sort every column, and a dimension value of 1
+ // will independently sort each row. If no dimension number is provided, then
+ // the last dimension is chosen by default.
+ //
+ // If both keys and values are provided:
+ // * The keys and the values must tensors with the same dimensions. The
+ // element types of the tensors may be different.
+ // * The result is a tuple that consists of a sorted tensor of keys (along the
+ // provided dimension, as above) as the first element, and a tensor with their
+ // corresponding values as the second element.
+ XlaOp Sort(XlaOp keys,
+ tensorflow::gtl::optional<XlaOp> values = tensorflow::gtl::nullopt,
+ int64 dimension = -1);
// Enqueues a clamp instruction onto the computation.
XlaOp Clamp(const XlaOp& min, const XlaOp& operand, const XlaOp& max);
@@ -730,19 +851,23 @@ class XlaBuilder {
// Enqueues a Send node onto the computation, to send the given operand to
// a Recv instruction that shares the same channel handle.
void Send(const XlaOp& operand, const ChannelHandle& handle);
+ XlaOp SendWithToken(const XlaOp& operand, const XlaOp& token,
+ const ChannelHandle& handle);
+
+ // Enqueues an AfterAll operation with no operands producing a token-shaped
+ // value.
+ XlaOp CreateToken();
+
+ // Enqueues an AfterAll operation with no operands producing a token-shaped
+ // value.
+ XlaOp AfterAll(tensorflow::gtl::ArraySlice<XlaOp> tokens);
// Enqueues a Recv node onto the computation. The data comes from a Send
// instruction that shares the same channel handle and its shape must
// be the same as the given shape.
XlaOp Recv(const Shape& shape, const ChannelHandle& handle);
-
- // Returns true if 'operand' is a compile-time constant. A compile-time
- // constant does not depend on any parameters, or on stateful operators such
- // as `RngNormal` or `Infeed`.
- //
- // This tests whether a computation is a compile-time constant without
- // evaluating the computation.
- StatusOr<bool> IsConstant(const XlaOp& operand) const;
+ XlaOp RecvWithToken(const XlaOp& token, const Shape& shape,
+ const ChannelHandle& handle);
// Normalizes operand across spatial and batch dimensions for each feature.
//
@@ -782,47 +907,6 @@ class XlaBuilder {
const XlaOp& grad_output, float epsilon,
int64 feature_index);
- // Returns a new XlaBuilder whose resultant Computation is used only by this
- // XlaBuilder. The sub-XlaBuilder has the same die_immediately_on_error
- // behavior as the parent.
- std::unique_ptr<XlaBuilder> CreateSubBuilder(const string& computation_name);
-
- // Builds the computation with the requested operations, or returns a non-ok
- // status. Note that all ops that have been enqueued will be moved to the
- // computation being returned.
- StatusOr<XlaComputation> Build();
-
- // Builds the computation with the requested operations, or notes an error in
- // the parent XlaBuilder and returns an empty computation if building failed.
- // This function is intended to be used where the returned XlaComputation is
- // only used by the parent XlaBuilder and hence further operation on the
- // returned XlaComputation will simply be error'ed out if an error occurred
- // while building this computation. If the built computation is to be used by
- // a XlaBuilder other than the parent XlaBuilder then Build() should be used
- // instead.
- XlaComputation BuildAndNoteError();
-
- // Returns a subgraph that roots on the given root. If the root is not a
- // compile-time constant (see `IsConstant`), returns an error.
- //
- // This will copy the needed ops/computations to the subgraph.
- StatusOr<XlaComputation> BuildConstantSubGraph(const XlaOp& root_op) const;
-
- // Returns the first error that was encountered while building the
- // computation. When an error is encountered, by default we return a vacuous
- // XlaOp and inform the user of the error that occurred while
- // building the computation when they make a final call to Build().
- //
- // See also set_die_immediately_on_error().
- Status first_error() const { return first_error_; }
-
- // Returns the shape of the given op.
- StatusOr<Shape> GetShape(const XlaOp& op) const;
-
- // Returns the (inferred) result for the current computation's shape.
- StatusOr<ProgramShape> GetProgramShape() const;
-
- private:
StatusOr<XlaOp> AddInstruction(
HloInstructionProto&& instr, HloOpcode opcode,
tensorflow::gtl::ArraySlice<XlaOp> operands = {});
@@ -830,14 +914,6 @@ class XlaBuilder {
void AddCalledComputation(const XlaComputation& computation,
HloInstructionProto* instr);
- // Notes that the error occurred by:
- // * storing it internally and capturing a backtrace if it's the first error
- // (this deferred value will be produced on the call to Build())
- // * dying if die_immediately_on_error_ is true
- void NoteError(const Status& error);
-
- XlaOp NoteErrorOrReturn(const std::function<StatusOr<XlaOp>()>& op_creator);
-
StatusOr<const HloInstructionProto*> LookUpInstruction(const XlaOp& op) const;
// Internal helper method that does the building for an arbitrary unary op.
@@ -933,16 +1009,1032 @@ class XlaBuilder {
bool die_immediately_on_error_ = false;
XlaBuilder* parent_builder_{nullptr};
+
+ friend XlaOp Parameter(XlaBuilder* builder, int64 parameter_number,
+ const Shape& shape, const string& name);
+ friend XlaOp ConstantLiteral(XlaBuilder* builder,
+ const LiteralSlice& literal);
+ template <typename NativeT>
+ friend XlaOp ConstantR0(XlaBuilder* builder, NativeT value);
+ template <typename NativeT>
+ friend XlaOp ConstantR1(XlaBuilder* builder,
+ tensorflow::gtl::ArraySlice<NativeT> values);
+ friend XlaOp ConstantR1(XlaBuilder* builder,
+ const tensorflow::core::Bitmap& values);
+ template <typename NativeT>
+ friend XlaOp ConstantR2(
+ XlaBuilder* builder,
+ std::initializer_list<std::initializer_list<NativeT>> values);
+ template <typename NativeT>
+ friend XlaOp ConstantFromArrayWithLayout(XlaBuilder* builder,
+ const Array<NativeT>& values,
+ const Layout& layout);
+ template <typename NativeT>
+ friend XlaOp ConstantFromArray(XlaBuilder* builder,
+ const Array<NativeT>& values);
+ template <typename NativeT>
+ friend XlaOp ConstantR2FromArray2DWithLayout(XlaBuilder* builder,
+ const Array2D<NativeT>& values,
+ const Layout& layout);
+ template <typename NativeT>
+ friend XlaOp ConstantR2FromArray2D(XlaBuilder* builder,
+ const Array2D<NativeT>& values);
+ template <typename NativeT>
+ friend XlaOp ConstantR3FromArray3DWithLayout(XlaBuilder* builder,
+ const Array3D<NativeT>& values,
+ const Layout& layout);
+ template <typename NativeT>
+ friend XlaOp ConstantR3FromArray3D(XlaBuilder* builder,
+ const Array3D<NativeT>& values);
+ template <typename NativeT>
+ friend XlaOp ConstantR4FromArray4DWithLayout(XlaBuilder* builder,
+ const Array4D<NativeT>& values,
+ const Layout& layout);
+ template <typename NativeT>
+ friend XlaOp ConstantR4FromArray4D(XlaBuilder* builder,
+ const Array4D<NativeT>& values);
+
+ template <typename NativeT>
+ friend XlaOp ConstantR1(XlaBuilder* builder, int64 length, NativeT value);
+
+ friend XlaOp Broadcast(const XlaOp& operand,
+ tensorflow::gtl::ArraySlice<int64> broadcast_sizes);
+
+ friend XlaOp BroadcastInDim(
+ const XlaOp& operand, const Shape& shape,
+ const tensorflow::gtl::ArraySlice<int64> broadcast_dimensions);
+
+ friend XlaOp Pad(const XlaOp& operand, const XlaOp& padding_value,
+ const PaddingConfig& padding_config);
+
+ friend XlaOp Reshape(const XlaOp& operand,
+ tensorflow::gtl::ArraySlice<int64> dimensions,
+ tensorflow::gtl::ArraySlice<int64> new_sizes);
+
+ friend XlaOp Reshape(const XlaOp& operand,
+ tensorflow::gtl::ArraySlice<int64> new_sizes);
+
+ friend XlaOp Collapse(const XlaOp& operand,
+ tensorflow::gtl::ArraySlice<int64> dimensions);
+
+ friend XlaOp Slice(const XlaOp& operand,
+ tensorflow::gtl::ArraySlice<int64> start_indices,
+ tensorflow::gtl::ArraySlice<int64> limit_indices,
+ tensorflow::gtl::ArraySlice<int64> strides);
+
+ friend XlaOp SliceInDim(const XlaOp& operand, int64 start_index,
+ int64 limit_index, int64 stride, int64 dimno);
+
+ friend XlaOp DynamicSlice(const XlaOp& operand, const XlaOp& start_indices,
+ tensorflow::gtl::ArraySlice<int64> slice_sizes);
+
+ friend XlaOp DynamicUpdateSlice(const XlaOp& operand, const XlaOp& update,
+ const XlaOp& start_indices);
+
+ friend XlaOp ConcatInDim(XlaBuilder* builder,
+ tensorflow::gtl::ArraySlice<XlaOp> operands,
+ int64 dimension);
+
+ friend void Trace(const string& tag, const XlaOp& operand);
+
+ friend XlaOp Select(const XlaOp& pred, const XlaOp& on_true,
+ const XlaOp& on_false);
+ friend XlaOp Tuple(XlaBuilder* builder,
+ tensorflow::gtl::ArraySlice<XlaOp> elements);
+ friend XlaOp GetTupleElement(const XlaOp& tuple_data, int64 index);
+ friend XlaOp Eq(const XlaOp& lhs, const XlaOp& rhs,
+ tensorflow::gtl::ArraySlice<int64> broadcast_dimensions);
+ friend XlaOp Ne(const XlaOp& lhs, const XlaOp& rhs,
+ tensorflow::gtl::ArraySlice<int64> broadcast_dimensions);
+ friend XlaOp Ge(const XlaOp& lhs, const XlaOp& rhs,
+ tensorflow::gtl::ArraySlice<int64> broadcast_dimensions);
+ friend XlaOp Gt(const XlaOp& lhs, const XlaOp& rhs,
+ tensorflow::gtl::ArraySlice<int64> broadcast_dimensions);
+ friend XlaOp Lt(const XlaOp& lhs, const XlaOp& rhs,
+ tensorflow::gtl::ArraySlice<int64> broadcast_dimensions);
+ friend XlaOp Le(const XlaOp& lhs, const XlaOp& rhs,
+ tensorflow::gtl::ArraySlice<int64> broadcast_dimensions);
+ friend XlaOp Dot(const XlaOp& lhs, const XlaOp& rhs);
+ friend XlaOp DotGeneral(const XlaOp& lhs, const XlaOp& rhs,
+ const DotDimensionNumbers& dimension_numbers);
+ friend XlaOp Conv(const XlaOp& lhs, const XlaOp& rhs,
+ tensorflow::gtl::ArraySlice<int64> window_strides,
+ Padding padding);
+ friend XlaOp ConvWithGeneralPadding(
+ const XlaOp& lhs, const XlaOp& rhs,
+ tensorflow::gtl::ArraySlice<int64> window_strides,
+ tensorflow::gtl::ArraySlice<std::pair<int64, int64>> padding);
+ friend XlaOp ConvWithGeneralDimensions(
+ const XlaOp& lhs, const XlaOp& rhs,
+ tensorflow::gtl::ArraySlice<int64> window_strides, Padding padding,
+ const ConvolutionDimensionNumbers& dimension_numbers);
+ friend XlaOp ConvGeneral(
+ const XlaOp& lhs, const XlaOp& rhs,
+ tensorflow::gtl::ArraySlice<int64> window_strides,
+ tensorflow::gtl::ArraySlice<std::pair<int64, int64>> padding,
+ const ConvolutionDimensionNumbers& dimension_numbers);
+ friend XlaOp ConvGeneralDilated(
+ const XlaOp& lhs, const XlaOp& rhs,
+ tensorflow::gtl::ArraySlice<int64> window_strides,
+ tensorflow::gtl::ArraySlice<std::pair<int64, int64>> padding,
+ tensorflow::gtl::ArraySlice<int64> lhs_dilation,
+ tensorflow::gtl::ArraySlice<int64> rhs_dilation,
+ const ConvolutionDimensionNumbers& dimension_numbers);
+ friend XlaOp Fft(const XlaOp& operand, FftType fft_type,
+ tensorflow::gtl::ArraySlice<int64> fft_length);
+ friend XlaOp Infeed(XlaBuilder* builder, const Shape& shape,
+ const string& config);
+ friend void Outfeed(const XlaOp& operand, const Shape& shape_with_layout,
+ const string& outfeed_config);
+ friend XlaOp Call(XlaBuilder* builder, const XlaComputation& computation,
+ tensorflow::gtl::ArraySlice<XlaOp> operands);
+ friend XlaOp CustomCall(XlaBuilder* builder, const string& call_target_name,
+ tensorflow::gtl::ArraySlice<XlaOp> operands,
+ const Shape& shape);
+ friend XlaOp HostCompute(XlaBuilder* builder,
+ tensorflow::gtl::ArraySlice<XlaOp> operands,
+ const string& channel_name, int64 cost_estimate_ns,
+ const Shape& shape);
+ friend XlaOp Complex(const XlaOp& real, const XlaOp& imag,
+ tensorflow::gtl::ArraySlice<int64> broadcast_dimensions);
+ friend XlaOp Conj(const XlaOp& operand);
+ friend XlaOp Add(const XlaOp& lhs, const XlaOp& rhs,
+ tensorflow::gtl::ArraySlice<int64> broadcast_dimensions);
+ friend XlaOp Sub(const XlaOp& lhs, const XlaOp& rhs,
+ tensorflow::gtl::ArraySlice<int64> broadcast_dimensions);
+ friend XlaOp Mul(const XlaOp& lhs, const XlaOp& rhs,
+ tensorflow::gtl::ArraySlice<int64> broadcast_dimensions);
+ friend XlaOp Div(const XlaOp& lhs, const XlaOp& rhs,
+ tensorflow::gtl::ArraySlice<int64> broadcast_dimensions);
+ friend XlaOp Rem(const XlaOp& lhs, const XlaOp& rhs,
+ tensorflow::gtl::ArraySlice<int64> broadcast_dimensions);
+ friend XlaOp Max(const XlaOp& lhs, const XlaOp& rhs,
+ tensorflow::gtl::ArraySlice<int64> broadcast_dimensions);
+ friend XlaOp Min(const XlaOp& lhs, const XlaOp& rhs,
+ tensorflow::gtl::ArraySlice<int64> broadcast_dimensions);
+ friend XlaOp And(const XlaOp& lhs, const XlaOp& rhs,
+ tensorflow::gtl::ArraySlice<int64> broadcast_dimensions);
+ friend XlaOp Or(const XlaOp& lhs, const XlaOp& rhs,
+ tensorflow::gtl::ArraySlice<int64> broadcast_dimensions);
+ friend XlaOp Xor(const XlaOp& lhs, const XlaOp& rhs,
+ tensorflow::gtl::ArraySlice<int64> broadcast_dimensions);
+ friend XlaOp Not(const XlaOp& operand);
+ friend XlaOp ShiftLeft(
+ const XlaOp& lhs, const XlaOp& rhs,
+ tensorflow::gtl::ArraySlice<int64> broadcast_dimensions);
+ friend XlaOp ShiftRightArithmetic(
+ const XlaOp& lhs, const XlaOp& rhs,
+ tensorflow::gtl::ArraySlice<int64> broadcast_dimensions);
+ friend XlaOp ShiftRightLogical(
+ const XlaOp& lhs, const XlaOp& rhs,
+ tensorflow::gtl::ArraySlice<int64> broadcast_dimensions);
+ friend XlaOp Reduce(const XlaOp& operand, const XlaOp& init_value,
+ const XlaComputation& computation,
+ tensorflow::gtl::ArraySlice<int64> dimensions_to_reduce);
+ friend XlaOp ReduceAll(const XlaOp& operand, const XlaOp& init_value,
+ const XlaComputation& computation);
+ friend XlaOp ReduceWindow(
+ const XlaOp& operand, const XlaOp& init_value,
+ const XlaComputation& computation,
+ tensorflow::gtl::ArraySlice<int64> window_dimensions,
+ tensorflow::gtl::ArraySlice<int64> window_strides, Padding padding);
+ friend XlaOp ReduceWindowWithGeneralPadding(
+ const XlaOp& operand, const XlaOp& init_value,
+ const XlaComputation& computation,
+ tensorflow::gtl::ArraySlice<int64> window_dimensions,
+ tensorflow::gtl::ArraySlice<int64> window_strides,
+ tensorflow::gtl::ArraySlice<std::pair<int64, int64>> padding);
+ friend XlaOp CrossReplicaSum(
+ const XlaOp& operand,
+ tensorflow::gtl::ArraySlice<int64> replica_group_ids);
+ friend XlaOp CrossReplicaSum(
+ const XlaOp& operand, const XlaComputation& computation,
+ tensorflow::gtl::ArraySlice<int64> replica_group_ids,
+ const tensorflow::gtl::optional<ChannelHandle>& channel_id);
+ friend XlaOp SelectAndScatter(
+ const XlaOp& operand, const XlaComputation& select,
+ tensorflow::gtl::ArraySlice<int64> window_dimensions,
+ tensorflow::gtl::ArraySlice<int64> window_strides, Padding padding,
+ const XlaOp& source, const XlaOp& init_value,
+ const XlaComputation& scatter);
+ friend XlaOp SelectAndScatterWithGeneralPadding(
+ const XlaOp& operand, const XlaComputation& select,
+ tensorflow::gtl::ArraySlice<int64> window_dimensions,
+ tensorflow::gtl::ArraySlice<int64> window_strides,
+ tensorflow::gtl::ArraySlice<std::pair<int64, int64>> padding,
+ const XlaOp& source, const XlaOp& init_value,
+ const XlaComputation& scatter);
+ friend XlaOp Abs(const XlaOp& operand);
+ friend XlaOp Atan2(const XlaOp& y, const XlaOp& x,
+ tensorflow::gtl::ArraySlice<int64> broadcast_dimensions);
+ friend XlaOp Exp(const XlaOp& operand);
+ friend XlaOp Expm1(const XlaOp& operand);
+ friend XlaOp Floor(const XlaOp& operand);
+ friend XlaOp Ceil(const XlaOp& operand);
+ friend XlaOp Round(const XlaOp& operand);
+ friend XlaOp Log(const XlaOp& operand);
+ friend XlaOp Log1p(const XlaOp& operand);
+ friend XlaOp Sign(const XlaOp& operand);
+ friend XlaOp Clz(const XlaOp& operand);
+ friend XlaOp Cos(const XlaOp& operand);
+ friend XlaOp Sin(const XlaOp& operand);
+ friend XlaOp Tanh(const XlaOp& operand);
+ friend XlaOp Real(const XlaOp& operand);
+ friend XlaOp Imag(const XlaOp& operand);
+ friend XlaOp Pow(const XlaOp& lhs, const XlaOp& rhs,
+ tensorflow::gtl::ArraySlice<int64> broadcast_dimensions);
+ friend XlaOp IsFinite(const XlaOp& operand);
+ friend XlaOp ConvertElementType(const XlaOp& operand,
+ PrimitiveType new_element_type);
+ friend XlaOp BitcastConvertType(const XlaOp& operand,
+ PrimitiveType new_element_type);
+ friend XlaOp Neg(const XlaOp& operand);
+ friend XlaOp Transpose(const XlaOp& operand,
+ tensorflow::gtl::ArraySlice<int64> permutation);
+ friend XlaOp Rev(const XlaOp& operand,
+ tensorflow::gtl::ArraySlice<int64> dimensions);
+ friend XlaOp Sort(XlaOp keys, tensorflow::gtl::optional<XlaOp> values,
+ int64 dimension);
+ friend XlaOp Clamp(const XlaOp& min, const XlaOp& operand, const XlaOp& max);
+ friend XlaOp Map(XlaBuilder* builder,
+ tensorflow::gtl::ArraySlice<XlaOp> operands,
+ const XlaComputation& computation,
+ tensorflow::gtl::ArraySlice<int64> dimensions,
+ tensorflow::gtl::ArraySlice<XlaOp> static_operands);
+ friend XlaOp RngNormal(const XlaOp& mu, const XlaOp& sigma,
+ const Shape& shape);
+ friend XlaOp RngUniform(const XlaOp& a, const XlaOp& b, const Shape& shape);
+ friend XlaOp While(const XlaComputation& condition,
+ const XlaComputation& body, const XlaOp& init);
+ friend XlaOp Conditional(const XlaOp& predicate, const XlaOp& true_operand,
+ const XlaComputation& true_computation,
+ const XlaOp& false_operand,
+ const XlaComputation& false_computation);
+ friend XlaOp ReducePrecision(const XlaOp& operand, const int exponent_bits,
+ const int mantissa_bits);
+ friend XlaOp Gather(const XlaOp& input, const XlaOp& gather_indices,
+ const GatherDimensionNumbers& dimension_numbers,
+ tensorflow::gtl::ArraySlice<int64> window_bounds);
+ friend void Send(const XlaOp& operand, const ChannelHandle& handle);
+ friend XlaOp Recv(XlaBuilder* builder, const Shape& shape,
+ const ChannelHandle& handle);
+ friend XlaOp BatchNormTraining(const XlaOp& operand, const XlaOp& scale,
+ const XlaOp& offset, float epsilon,
+ int64 feature_index);
+ friend XlaOp BatchNormInference(const XlaOp& operand, const XlaOp& scale,
+ const XlaOp& offset, const XlaOp& mean,
+ const XlaOp& variance, float epsilon,
+ int64 feature_index);
+ friend XlaOp BatchNormGrad(const XlaOp& operand, const XlaOp& scale,
+ const XlaOp& batch_mean, const XlaOp& batch_var,
+ const XlaOp& grad_output, float epsilon,
+ int64 feature_index);
+ friend XlaOp SendWithToken(const XlaOp& operand, const XlaOp& token,
+ const ChannelHandle& handle);
+ friend XlaOp RecvWithToken(const XlaOp& token, const Shape& shape,
+ const ChannelHandle& handle);
+ friend XlaOp InfeedWithToken(const XlaOp& token, const Shape& shape,
+ const string& config);
+ friend XlaOp OutfeedWithToken(const XlaOp& operand, const XlaOp& token,
+ const Shape& shape_with_layout,
+ const string& outfeed_config);
+ friend XlaOp CreateToken(XlaBuilder* builder);
+ friend XlaOp AfterAll(XlaBuilder* builder,
+ tensorflow::gtl::ArraySlice<XlaOp> tokens);
+};
+
+// RAII-style object: sets the current sharding assignment in builder on
+// construction, and sets back to the previous assignment on destruction.
+class XlaScopedShardingAssignment {
+ public:
+ XlaScopedShardingAssignment(xla::XlaBuilder* builder,
+ tensorflow::gtl::optional<OpSharding> sharding)
+ : builder_(builder), prev_sharding_(builder->sharding()) {
+ SetSharding(sharding);
+ }
+
+ XlaScopedShardingAssignment(const XlaScopedShardingAssignment&) = delete;
+ XlaScopedShardingAssignment& operator=(const XlaScopedShardingAssignment&) =
+ delete;
+
+ ~XlaScopedShardingAssignment() { SetSharding(prev_sharding_); }
+
+ private:
+ void SetSharding(const tensorflow::gtl::optional<OpSharding>& sharding) {
+ if (sharding.has_value()) {
+ builder_->SetSharding(sharding.value());
+ } else {
+ builder_->ClearSharding();
+ }
+ }
+
+ xla::XlaBuilder* const builder_;
+ tensorflow::gtl::optional<OpSharding> prev_sharding_;
};
+// Free functions for building XlaOps. The intention is that these will
+// become the public API for building XlaOps rather than calling methods on
+// XlaBuilder directly.
+
+// Enqueues a "retrieve parameter value" instruction for a parameter that was
+// passed to the computation.
+XlaOp Parameter(XlaBuilder* builder, int64 parameter_number, const Shape& shape,
+ const string& name);
+
+// Enqueues a constant with the value of the given literal onto the
+// computation.
+XlaOp ConstantLiteral(XlaBuilder* builder, const LiteralSlice& literal);
+
+// Enqueues a constant onto the computation. Methods are templated on the
+// native host type (NativeT) which corresponds to a specific XLA
+// PrimitiveType as given in the following table:
+//
+// Native Type PrimitiveType
+// -----------------------------
+// bool PRED
+// int32 S32
+// int64 S64
+// uint32 U32
+// uint64 U64
+// float F32
+// double F64
+//
+// Note: not all primitive types defined in xla_data.proto have a
+// corresponding native type yet.
+template <typename NativeT>
+XlaOp ConstantR0(XlaBuilder* builder, NativeT value);
+template <typename NativeT>
+XlaOp ConstantR1(XlaBuilder* builder,
+ tensorflow::gtl::ArraySlice<NativeT> values);
+XlaOp ConstantR1(XlaBuilder* builder, const tensorflow::core::Bitmap& values);
+template <typename NativeT>
+XlaOp ConstantR2(XlaBuilder* builder,
+ std::initializer_list<std::initializer_list<NativeT>> values);
+template <typename NativeT>
+XlaOp ConstantFromArrayWithLayout(XlaBuilder* builder,
+ const Array<NativeT>& values,
+ const Layout& layout);
+template <typename NativeT>
+XlaOp ConstantFromArray(XlaBuilder* builder, const Array<NativeT>& values);
+template <typename NativeT>
+XlaOp ConstantR2FromArray2DWithLayout(XlaBuilder* builder,
+ const Array2D<NativeT>& values,
+ const Layout& layout);
+template <typename NativeT>
+XlaOp ConstantR2FromArray2D(XlaBuilder* builder,
+ const Array2D<NativeT>& values);
+template <typename NativeT>
+XlaOp ConstantR3FromArray3DWithLayout(XlaBuilder* builder,
+ const Array3D<NativeT>& values,
+ const Layout& layout);
+template <typename NativeT>
+XlaOp ConstantR3FromArray3D(XlaBuilder* builder,
+ const Array3D<NativeT>& values);
+template <typename NativeT>
+XlaOp ConstantR4FromArray4DWithLayout(XlaBuilder* builder,
+ const Array4D<NativeT>& values,
+ const Layout& layout);
+template <typename NativeT>
+XlaOp ConstantR4FromArray4D(XlaBuilder* builder,
+ const Array4D<NativeT>& values);
+
+// Enqueues a rank one constant (XlaBuilder* builder, vector) onto the
+// computation. The vector has size 'length' and every element has the value
+// 'value'.
+template <typename NativeT>
+XlaOp ConstantR1(XlaBuilder* builder, int64 length, NativeT value);
+
+// Adds dimensions to an array by duplicating the data in the array.
+//
+// The new dimensions are inserted on the left, i.e. if
+// broadcast_sizes has values {a0, ..., aN} and the operand shape
+// has dimensions {b0, ..., bM} then the shape of the output has
+// dimensions {a0, ..., aN, b0, ..., bM}.
+//
+// The new dimensions index into copies of the operand, i.e.
+//
+// output[i0, ..., iN, j0, ..., jM] = operand[j0, ..., jM]
+XlaOp Broadcast(const XlaOp& operand,
+ tensorflow::gtl::ArraySlice<int64> broadcast_sizes);
+
+// Performs in-dimension-style broadcast.
+//
+// Operand specifies the input to be broadcast. "shape" is expected output
+// shape. "broadcast_dimensions" are the dimensions to be broadcasting into.
+// Dimension numbers in broadcast_dimensions map to individual dimensions
+// of the operand, and specify what dimension of the output shape they
+// should be broadcast.
+// e.g.
+// Say operand = [1, 2], i.e., a 1D tensor with 2 elements.
+// and dimension of shape is [2,2].
+// Specifying {1} as brodcast_dimension will generate output
+// [1 , 2]
+// [1 , 2]
+// On the other hand, specifying {0} as broadcast_dimension
+// will generate output
+// [1 , 1]
+// [2 , 2]
+XlaOp BroadcastInDim(
+ const XlaOp& operand, const Shape& shape,
+ const tensorflow::gtl::ArraySlice<int64> broadcast_dimensions);
+
+// Enqueues a pad operation onto the computation that pads the given value on
+// the edges as well as between the elements of the input. padding_config
+// specifies the padding amount for each dimension.
+XlaOp Pad(const XlaOp& operand, const XlaOp& padding_value,
+ const PaddingConfig& padding_config);
+
+// Enqueues an operation onto the computation that flattens the operand based
+// on the dimension order (major/slowest-varying to minor/fastest-varying)
+// given, followed by reshaping it into the shape with the given dimension
+// sizes (also major to minor). Conceptually, this is a limited form of
+// "shape casting".
+XlaOp Reshape(const XlaOp& operand,
+ tensorflow::gtl::ArraySlice<int64> dimensions,
+ tensorflow::gtl::ArraySlice<int64> new_sizes);
+
+// Enqueues an operation onto the computation that collapses the operand, from
+// first to last dimension (C order), then reshapes it to the given dimension
+// sizes. Conceptually, this is a limited form of "shape casting".
+XlaOp Reshape(const XlaOp& operand,
+ tensorflow::gtl::ArraySlice<int64> new_sizes);
+
+// Wrapper for Reshape.
+// Enqueues an operation to collapse the provided dimensions; e.g. an
+// operand with dimensions {x=256, y=2, z=2, p=32} can be collapsed to
+// {x=1024, y=32} by collapsing dims {0, 1, 2}. Collapsing dimensions must
+// be a consecutive, in-order subsequence of the operand dimensions.
+//
+// Note that collapsing a single dimension does nothing:
+//
+// {256} collapsing {0} => {256}
+// {1} collapsing {0} => {1}
+//
+// Collapsing multiple dimensions produces a single result dimension:
+//
+// {256, 2} collapsing {0,1} => {512}
+// {256, 2, 3} collapsing {0,1} => {512, 3}
+//
+// This could potentially cause data to be moved -- it provides a more
+// structured form of reshaping than an arbitrary Reshape operation.
+XlaOp Collapse(const XlaOp& operand,
+ tensorflow::gtl::ArraySlice<int64> dimensions);
+
+// Enqueues a slice operation onto the computation that slices the operand
+// from the start indices to the limit indices; e.g.
+//
+// x
+// [ 0 1 2 3 ]
+// y [ 4 5 6 7 ] => slice(start={1, 1}, limit={2, 3}) => [ 5 6 ]
+// [ 8 9 a b ]
+//
+// Note that "limit" means up-to-but-not-including; i.e. [start, limit) in 1D
+// range notation.
+// The strides parameter determines the stride over the slice
+XlaOp Slice(const XlaOp& operand,
+ tensorflow::gtl::ArraySlice<int64> start_indices,
+ tensorflow::gtl::ArraySlice<int64> limit_indices,
+ tensorflow::gtl::ArraySlice<int64> strides);
+
+// Enqueues a slice operation in a given dimension, taking all other
+// dimensions as they are; e.g. if dimno is 1 from start_index 2 to
+// limit_index 4 by 1, and the shape is f32[7,8,9], this call is short-hand
+// for:
+//
+// array[:, 2:4:1, :]
+XlaOp SliceInDim(const XlaOp& operand, int64 start_index, int64 limit_index,
+ int64 stride, int64 dimno);
+
+// Enqueues a slice operation onto the computation that slices the 'operand'
+// from dynamic start indices which are passed in 'start_indices'.
+// The size of the slice in each dimension is passed in 'slice_sizes',
+// which specify the end point of exclusive slice intervals in each
+// dimension [start, start + size).
+// The shape of 'start_indices' must be rank == 1, with dimension size
+// equal to the rank of the 'operand'.
+// Slice index calculations are computed modulo input dimension sizes to
+// prevent dynamic start indices from generating out-of-bound array accesses.
+XlaOp DynamicSlice(const XlaOp& operand, const XlaOp& start_indices,
+ tensorflow::gtl::ArraySlice<int64> slice_sizes);
+
+// Enqueues a dynamic update slice operation onto the computation, which
+// updates a slice of 'operand' with 'update' at dynamic 'start_indices'.
+// The shape of 'update' determines the shape of the slice of 'operand'
+// which is updated.
+// The indices specified in 'start_indices' specify the offset of the slice
+// of 'operand' which is updated.
+//
+// update = {10, 11} // calculated at runtime.
+// [1 2 3] start = {1, 1} // calculated at runtime. [1 2 3 ]
+// [4 5 6] => DynamicUpdateslice(data, update, start) => [4 10 11]
+// [7 8 9] [7 8 9 ]
+//
+// The shape of 'start_indices' must be rank == 1, with dimension size
+// equal to the rank of the 'operand'.
+// Slice index calculations are computed modulo update dimension sizes to
+// prevent dynamic start indices from generating out-of-bound array accesses.
+XlaOp DynamicUpdateSlice(const XlaOp& operand, const XlaOp& update,
+ const XlaOp& start_indices);
+
+// Enqueues a concatenate instruction onto the computation. 'operands' must
+// have >= 1 entry.
+XlaOp ConcatInDim(XlaBuilder* builder,
+ tensorflow::gtl::ArraySlice<XlaOp> operands, int64 dimension);
+
+// Enqueue a tracing operation onto the computation; the computation will emit
+// a logging message with the operand.
+void Trace(const string& tag, const XlaOp& operand);
+
+// Enqueues a conditional-move-like select operation onto the computation;
+// predicated on pred, selects between on_true and on_false.
+XlaOp Select(const XlaOp& pred, const XlaOp& on_true, const XlaOp& on_false);
+
+// Enqueues a tuple-creation instruction onto the computation.
+XlaOp Tuple(XlaBuilder* builder, tensorflow::gtl::ArraySlice<XlaOp> elements);
+
+// Enqueues a tuple-element-get instruction onto the computation.
+XlaOp GetTupleElement(const XlaOp& tuple_data, int64 index);
+
+// Enqueues an equal-to comparison instruction onto the computation.
+XlaOp Eq(const XlaOp& lhs, const XlaOp& rhs,
+ tensorflow::gtl::ArraySlice<int64> broadcast_dimensions = {});
+
+// Enqueues a not-equal comparison instruction onto the computation.
+XlaOp Ne(const XlaOp& lhs, const XlaOp& rhs,
+ tensorflow::gtl::ArraySlice<int64> broadcast_dimensions = {});
+
+// Enqueues a greater-or-equal comparison instruction onto the computation.
+XlaOp Ge(const XlaOp& lhs, const XlaOp& rhs,
+ tensorflow::gtl::ArraySlice<int64> broadcast_dimensions = {});
+
+// Enqueues a greater-than comparison instruction onto the computation.
+XlaOp Gt(const XlaOp& lhs, const XlaOp& rhs,
+ tensorflow::gtl::ArraySlice<int64> broadcast_dimensions = {});
+
+// Enqueues a less-than comparison instruction onto the computation.
+XlaOp Lt(const XlaOp& lhs, const XlaOp& rhs,
+ tensorflow::gtl::ArraySlice<int64> broadcast_dimensions = {});
+
+// Enqueues a less-or-equal comparison instruction onto the computation.
+XlaOp Le(const XlaOp& lhs, const XlaOp& rhs,
+ tensorflow::gtl::ArraySlice<int64> broadcast_dimensions = {});
+
+// Enqueues a dot instruction onto the computation.
+XlaOp Dot(const XlaOp& lhs, const XlaOp& rhs);
+
+// Enqueues a general dot instruction onto the computation.
+XlaOp DotGeneral(const XlaOp& lhs, const XlaOp& rhs,
+ const DotDimensionNumbers& dimension_numbers);
+
+// Enqueues a convolution instruction onto the computation, which uses the
+// default convolution dimension numbers.
+XlaOp Conv(const XlaOp& lhs, const XlaOp& rhs,
+ tensorflow::gtl::ArraySlice<int64> window_strides, Padding padding);
+
+// Enqueues a convolution instruction onto the computation, with the caller
+// provided padding configuration in the format returned by MakePadding().
+XlaOp ConvWithGeneralPadding(
+ const XlaOp& lhs, const XlaOp& rhs,
+ tensorflow::gtl::ArraySlice<int64> window_strides,
+ tensorflow::gtl::ArraySlice<std::pair<int64, int64>> padding);
+
+// Enqueues a convolution instruction onto the computation, with the caller
+// provided dimension numbers configuration.
+XlaOp ConvWithGeneralDimensions(
+ const XlaOp& lhs, const XlaOp& rhs,
+ tensorflow::gtl::ArraySlice<int64> window_strides, Padding padding,
+ const ConvolutionDimensionNumbers& dimension_numbers);
+
+// Enqueues a convolution instruction onto the computation, with the caller
+// provided padding configuration as well as the dimension numbers.
+XlaOp ConvGeneral(const XlaOp& lhs, const XlaOp& rhs,
+ tensorflow::gtl::ArraySlice<int64> window_strides,
+ tensorflow::gtl::ArraySlice<std::pair<int64, int64>> padding,
+ const ConvolutionDimensionNumbers& dimension_numbers);
+
+// Enqueues a convolution instruction onto the computation, with the caller
+// provided padding configuration, dilation factors and dimension numbers.
+XlaOp ConvGeneralDilated(
+ const XlaOp& lhs, const XlaOp& rhs,
+ tensorflow::gtl::ArraySlice<int64> window_strides,
+ tensorflow::gtl::ArraySlice<std::pair<int64, int64>> padding,
+ tensorflow::gtl::ArraySlice<int64> lhs_dilation,
+ tensorflow::gtl::ArraySlice<int64> rhs_dilation,
+ const ConvolutionDimensionNumbers& dimension_numbers);
+
+// Enqueues an FFT instruction onto the computation, of the given type and
+// with the given FFT length.
+XlaOp Fft(const XlaOp& operand, FftType fft_type,
+ tensorflow::gtl::ArraySlice<int64> fft_length);
+
+// Enqueues an infeed instruction onto the computation, which writes data of
+// the given shape to the infeed buffer of the device.
+XlaOp Infeed(XlaBuilder* builder, const Shape& shape,
+ const string& config = "");
+
+// Variant of Infeed which takes a token-shaped operand and produces a
+// two-element tuple containing the data value and a token-shaped value.
+// Tokens are used for ordering side-effecting operations.
+// TODO(b/110532604): Replace all uses of the non-token form with this variant.
+XlaOp InfeedWithToken(const XlaOp& token, const Shape& shape,
+ const string& config = "");
+
+// Enqueues an outfeed instruction onto the computation. This instruction
+// generates outgoing data transfers for the given data.
+//
+// shape_with_layout communicates the laid out shape that we want to outfeed
+// -- if !ShapeUtil::Compatible(GetShape(operand), shape_with_layout) an error
+// will occur.
+void Outfeed(const XlaOp& operand, const Shape& shape_with_layout,
+ const string& outfeed_config);
+
+// Variant of Outfeed which takes a token-shaped operand and produces a
+// token-shaped value. Tokens are used for ordering side-effecting operations.
+// TODO(b/110532604): Replace all uses of the non-token form with this variant.
+XlaOp OutfeedWithToken(const XlaOp& operand, const XlaOp& token,
+ const Shape& shape_with_layout,
+ const string& outfeed_config);
+
+// Enqueues a call instruction onto the computation.
+XlaOp Call(XlaBuilder* builder, const XlaComputation& computation,
+ tensorflow::gtl::ArraySlice<XlaOp> operands);
+
+// Enqueues a custom call instruction onto the computation.
+// During code generation, a call instruction is emitted which targets a
+// symbol with the name |call_target_name|. The |operands| are passed to the
+// call instruction. |shape| is the resultant shape.
+XlaOp CustomCall(XlaBuilder* builder, const string& call_target_name,
+ tensorflow::gtl::ArraySlice<XlaOp> operands,
+ const Shape& shape);
+
+// Enqueues a pseudo-op to represent host-side computation data-dependencies.
+// During code generation, host send and receive operations will be generated
+// to transfer |operands| to the host and a single result of |shape| back to
+// the device. Host send/recv operations are emitted using |channel_name|.
+// Dataflow dependencies and the |cost_estimate_ns| field may be used in HLO
+// instruction scheduling.
+XlaOp HostCompute(XlaBuilder* builder,
+ tensorflow::gtl::ArraySlice<XlaOp> operands,
+ const string& channel_name, int64 cost_estimate_ns,
+ const Shape& shape);
+
+// The following methods enqueue element-wise binary arithmetic operations
+// onto the computation. The shapes of the operands have to match unless one
+// of the operands is a scalar, or an explicit broadcast dimension is given
+// (see g3doc for more details).
+
+// Enqueues a complex compose instruction onto the computation.
+XlaOp Complex(const XlaOp& real, const XlaOp& imag,
+ tensorflow::gtl::ArraySlice<int64> broadcast_dimensions = {});
+
+// Enqueues a complex conjugate instruction onto the computation.
+XlaOp Conj(const XlaOp& operand);
+
+// Enqueues an add instruction onto the computation.
+XlaOp Add(const XlaOp& lhs, const XlaOp& rhs,
+ tensorflow::gtl::ArraySlice<int64> broadcast_dimensions = {});
+
+// Enqueues a subtract instruction onto the computation.
+XlaOp Sub(const XlaOp& lhs, const XlaOp& rhs,
+ tensorflow::gtl::ArraySlice<int64> broadcast_dimensions = {});
+
+// Enqueues a multiply instruction onto the computation.
+XlaOp Mul(const XlaOp& lhs, const XlaOp& rhs,
+ tensorflow::gtl::ArraySlice<int64> broadcast_dimensions = {});
+
+// Enqueues a divide instruction onto the computation.
+XlaOp Div(const XlaOp& lhs, const XlaOp& rhs,
+ tensorflow::gtl::ArraySlice<int64> broadcast_dimensions = {});
+
+// Enqueues a remainder instruction onto the computation.
+XlaOp Rem(const XlaOp& lhs, const XlaOp& rhs,
+ tensorflow::gtl::ArraySlice<int64> broadcast_dimensions = {});
+
+// Enqueues a max instruction onto the computation.
+XlaOp Max(const XlaOp& lhs, const XlaOp& rhs,
+ tensorflow::gtl::ArraySlice<int64> broadcast_dimensions = {});
+
+// Enqueues a min instruction onto the computation.
+XlaOp Min(const XlaOp& lhs, const XlaOp& rhs,
+ tensorflow::gtl::ArraySlice<int64> broadcast_dimensions = {});
+
+// Element-wise logical operators
+XlaOp And(const XlaOp& lhs, const XlaOp& rhs,
+ tensorflow::gtl::ArraySlice<int64> broadcast_dimensions = {});
+
+XlaOp Or(const XlaOp& lhs, const XlaOp& rhs,
+ tensorflow::gtl::ArraySlice<int64> broadcast_dimensions = {});
+
+XlaOp Xor(const XlaOp& lhs, const XlaOp& rhs,
+ tensorflow::gtl::ArraySlice<int64> broadcast_dimensions = {});
+
+XlaOp Not(const XlaOp& operand);
+
+XlaOp ShiftLeft(const XlaOp& lhs, const XlaOp& rhs,
+ tensorflow::gtl::ArraySlice<int64> broadcast_dimensions = {});
+XlaOp ShiftRightArithmetic(
+ const XlaOp& lhs, const XlaOp& rhs,
+ tensorflow::gtl::ArraySlice<int64> broadcast_dimensions = {});
+XlaOp ShiftRightLogical(
+ const XlaOp& lhs, const XlaOp& rhs,
+ tensorflow::gtl::ArraySlice<int64> broadcast_dimensions = {});
+
+// Reduces an array among the provided dimensions, given "computation" as a
+// reduction operator.
+XlaOp Reduce(const XlaOp& operand, const XlaOp& init_value,
+ const XlaComputation& computation,
+ tensorflow::gtl::ArraySlice<int64> dimensions_to_reduce);
+
+// Convenience wrapper around the above that reduces all the dimensions in the
+// operand shape.
+XlaOp ReduceAll(const XlaOp& operand, const XlaOp& init_value,
+ const XlaComputation& computation);
+
+// Enqueues a windowed reduce instruction onto the computation.
+XlaOp ReduceWindow(const XlaOp& operand, const XlaOp& init_value,
+ const XlaComputation& computation,
+ tensorflow::gtl::ArraySlice<int64> window_dimensions,
+ tensorflow::gtl::ArraySlice<int64> window_strides,
+ Padding padding);
+
+// As ReduceWindow(), but the padding is given in the format
+// returned by MakePadding().
+XlaOp ReduceWindowWithGeneralPadding(
+ const XlaOp& operand, const XlaOp& init_value,
+ const XlaComputation& computation,
+ tensorflow::gtl::ArraySlice<int64> window_dimensions,
+ tensorflow::gtl::ArraySlice<int64> window_strides,
+ tensorflow::gtl::ArraySlice<std::pair<int64, int64>> padding);
+
+// Returns the sum of the operand value within each subgroup of replicas. All
+// replicas supply one input to the sum and all replicas receive the resulting
+// sum for each subgroup.
+XlaOp CrossReplicaSum(
+ const XlaOp& operand,
+ tensorflow::gtl::ArraySlice<int64> replica_group_ids = {});
+
+// Enqueues an operation that do an AllReduce of the operand cross cores. Here
+// AllReduce means doing a reduction on the input operand cross cores and then
+// broadcasting the reduction result to those cores. The reduction function is
+// defined by `computation`, which should be a commutative computation on
+// scalars, e.g., add, min, or max. The way that AllReduce is applied is
+// configured by:
+//
+// - `replica_group_ids`: maps replica ids to subgroup ids. If empty, all
+// replicas belong to one group. Allreduce will be applied within subgroups.
+// For example, we have 4 replicas, then replica_group_ids={0,1,0,1} means,
+// replica 0 and 2 are in subgroup 0, replica 1 and 3 are in subgroup 1.
+//
+// - `channel_id`: for Allreduce nodes from different models, if they have the
+// same channel_id, they will be 'Allreduce'd. If empty, Allreduce will not be
+// applied cross models.
+//
+// TODO(b/79737069): Rename this to AllReduce when it's ready to use.
+XlaOp CrossReplicaSum(const XlaOp& operand, const XlaComputation& computation,
+ tensorflow::gtl::ArraySlice<int64> replica_group_ids = {},
+ const tensorflow::gtl::optional<ChannelHandle>&
+ channel_id = tensorflow::gtl::nullopt);
+
+// Enqueues an operation that scatters the `source` array to the selected
+// indices of each window.
+XlaOp SelectAndScatter(const XlaOp& operand, const XlaComputation& select,
+ tensorflow::gtl::ArraySlice<int64> window_dimensions,
+ tensorflow::gtl::ArraySlice<int64> window_strides,
+ Padding padding, const XlaOp& source,
+ const XlaOp& init_value, const XlaComputation& scatter);
+
+// As SelectAndScatter(), but the padding is given in the format
+// returned by MakePadding().
+XlaOp SelectAndScatterWithGeneralPadding(
+ const XlaOp& operand, const XlaComputation& select,
+ tensorflow::gtl::ArraySlice<int64> window_dimensions,
+ tensorflow::gtl::ArraySlice<int64> window_strides,
+ tensorflow::gtl::ArraySlice<std::pair<int64, int64>> padding,
+ const XlaOp& source, const XlaOp& init_value,
+ const XlaComputation& scatter);
+
+// Enqueues an abs instruction onto the computation.
+XlaOp Abs(const XlaOp& operand);
+
+// Enqueues a atan2 instruction onto the computation.
+XlaOp Atan2(const XlaOp& y, const XlaOp& x,
+ tensorflow::gtl::ArraySlice<int64> broadcast_dimensions = {});
+
+// Enqueues an exp instruction onto the computation.
+XlaOp Exp(const XlaOp& operand);
+
+// Enqueues an expm1 instruction onto the computation.
+XlaOp Expm1(const XlaOp& operand);
+
+// Enqueues a floor instruction onto the computation.
+XlaOp Floor(const XlaOp& operand);
+
+// Enqueues a ceil instruction onto the computation.
+XlaOp Ceil(const XlaOp& operand);
+
+// Enqueues a round instruction onto the computation, rounding to nearest even
+// with half-way cases rounding away from zero.
+XlaOp Round(const XlaOp& operand);
+
+// Enqueues an log instruction (natural logarithm) onto the computation.
+XlaOp Log(const XlaOp& operand);
+
+// Enqueues an log1p instruction (log(x+1)) onto the computation.
+XlaOp Log1p(const XlaOp& operand);
+
+// Enqueues a sign instruction onto the computation.
+XlaOp Sign(const XlaOp& operand);
+
+// Enqueues a count leading zeros instruction onto the computation.
+XlaOp Clz(const XlaOp& operand);
+
+// Enqueues a cosine instruction onto the computation.
+XlaOp Cos(const XlaOp& operand);
+
+// Enqueues a sine instruction onto the computation.
+XlaOp Sin(const XlaOp& operand);
+
+// Enqueues a tanh instruction onto the computation.
+XlaOp Tanh(const XlaOp& operand);
+
+// Enqueues a real-part instruction onto the computation.
+XlaOp Real(const XlaOp& operand);
+
+// Enqueues an imaginary-part instruction onto the computation.
+XlaOp Imag(const XlaOp& operand);
+
+// Enqueues a lhs^rhs computation onto the computation.
+XlaOp Pow(const XlaOp& lhs, const XlaOp& rhs,
+ tensorflow::gtl::ArraySlice<int64> broadcast_dimensions = {});
+
+// Enqueues an operator that tests if the operand's values are finite, i.e.,
+// not Inf or NaN. Defined only for floating-point types. Returns an array of
+// booleans with the same shape where entries are true iff the corresponding
+// entry was NaN.
+XlaOp IsFinite(const XlaOp& operand);
+
+// Enqueues a convert instruction onto the computation that changes the
+// element type of the operand array to primitive_type.
+XlaOp ConvertElementType(const XlaOp& operand, PrimitiveType new_element_type);
+
+// Enqueues a no-op instruction onto the computation that changes
+// the element type of the operand array to primitive_type. The
+// bit-widths of the source and destination element types must be
+// identical.
+XlaOp BitcastConvertType(const XlaOp& operand, PrimitiveType new_element_type);
+
+// Enqueues a negate instruction onto the computation.
+XlaOp Neg(const XlaOp& operand);
+
+// Enqueues a transpose instruction onto the computation.
+XlaOp Transpose(const XlaOp& operand,
+ tensorflow::gtl::ArraySlice<int64> permutation);
+
+// Enqueues a reverse instruction onto the computation. The order of the
+// elements in the given dimensions is reversed (i.e., the element at index i
+// is moved to index dimension_size - 1 - i).
+XlaOp Rev(const XlaOp& operand, tensorflow::gtl::ArraySlice<int64> dimensions);
+
+// Enqueues a sort (as increasing order) instruction onto the computation.
+// If only keys are provided:
+// * If the keys are an rank-1 tensor (an array), the result is a sorted array
+// of keys, in ascending order.
+// * If the keys have higher rank, the keys are sorted along the provided
+// dimension. For example, for a rank-2 tensor (a matrix) of keys, a dimension
+// value of 0 will indepenently sort every column, and a dimension value of 1
+// will independently sort each row. If no dimension number is provided, then
+// the last dimension is chosen by default.
+//
+// If both keys and values are provided:
+// * The keys and the values must tensors with the same dimensions. The
+// element types of the tensors may be different.
+// * The result is a tuple that consists of a sorted tensor of keys (along the
+// provided dimension, as above) as the first element, and a tensor with their
+// corresponding values as the second element.
+XlaOp Sort(XlaOp keys,
+ tensorflow::gtl::optional<XlaOp> values = tensorflow::gtl::nullopt,
+ int64 dimension = -1);
+
+// Enqueues a clamp instruction onto the computation.
+XlaOp Clamp(const XlaOp& min, const XlaOp& operand, const XlaOp& max);
+
+// Enqueues a map instruction onto the computation.
+XlaOp Map(XlaBuilder* builder, tensorflow::gtl::ArraySlice<XlaOp> operands,
+ const XlaComputation& computation,
+ tensorflow::gtl::ArraySlice<int64> dimensions,
+ tensorflow::gtl::ArraySlice<XlaOp> static_operands = {});
+
+// Enqueues a N(mu, sigma) random number generation instruction onto the
+// computation.
+XlaOp RngNormal(const XlaOp& mu, const XlaOp& sigma, const Shape& shape);
+
+// Enqueues a U(a, b) random number generation instruction onto the
+// computation. Returns values in the semi-open interval [a, b).
+XlaOp RngUniform(const XlaOp& a, const XlaOp& b, const Shape& shape);
+
+// Enqueues a while node onto the computation.
+XlaOp While(const XlaComputation& condition, const XlaComputation& body,
+ const XlaOp& init);
+
+// Enqueues a conditional node onto the computation.
+XlaOp Conditional(const XlaOp& predicate, const XlaOp& true_operand,
+ const XlaComputation& true_computation,
+ const XlaOp& false_operand,
+ const XlaComputation& false_computation);
+
+// Enqueues a ReducePrecision node onto the computation.
+XlaOp ReducePrecision(const XlaOp& operand, const int exponent_bits,
+ const int mantissa_bits);
+
+// Enqueues a Gather node onto the computation.
+XlaOp Gather(const XlaOp& input, const XlaOp& gather_indices,
+ const GatherDimensionNumbers& dimension_numbers,
+ tensorflow::gtl::ArraySlice<int64> window_bounds);
+
+// Enqueues a Send node onto the computation, to send the given operand to
+// a Recv instruction that shares the same channel handle.
+void Send(const XlaOp& operand, const ChannelHandle& handle);
+
+// Variant of Send which takes a token-shaped operand and produces a
+// token-shaped value. Tokens are used for ordering side-effecting operations.
+// TODO(b/110532604): Replace all uses of the non-token form with this variant.
+XlaOp SendWithToken(const XlaOp& operand, const XlaOp& token,
+ const ChannelHandle& handle);
+
+// Enqueues a Recv node onto the computation. The data comes from a Send
+// instruction that shares the same channel handle and its shape must
+// be the same as the given shape.
+XlaOp Recv(XlaBuilder* builder, const Shape& shape,
+ const ChannelHandle& handle);
+
+// Variant of Recv which takes a token-shaped operand and produces a two-element
+// tuple containing the data value and a token-shaped value. Tokens are used
+// for ordering side-effecting operations.
+// TODO(b/110532604): Replace all uses of the non-token form with this variant.
+XlaOp RecvWithToken(const XlaOp& token, const Shape& shape,
+ const ChannelHandle& handle);
+
+// Enqueues an operation (AfterAll) with no operands that produces a
+// token-shaped value. Tokens are used for ordering side-effecting operations.
+// This is a separate method from AfterAll to facility the removal of
+// operand-less AfterAll instructions.
+// TODO(b/110532604): Remove this function when all tokens are derived from a
+// single token generated or passed into the entry computation.
+XlaOp CreateToken(XlaBuilder* builder);
+
+// Enqueues an AfterAll instruction which produces a token-shaped value and
+// takes a variadic number of token-shaped operands. The number of operands must
+// be greater than zero. Used for joining tokens.
+XlaOp AfterAll(XlaBuilder* builder, tensorflow::gtl::ArraySlice<XlaOp> tokens);
+
+// Normalizes operand across spatial and batch dimensions for each feature.
+//
+// Returns a tuple (normalized, batch_mean, batch_var) where `normalized`
+// is the normalized result and batch_mean and batch_var are the mean and
+// variance, respectively, across batch for the operand.
+XlaOp BatchNormTraining(const XlaOp& operand, const XlaOp& scale,
+ const XlaOp& offset, float epsilon,
+ int64 feature_index);
+
+// Normalizes operand across spatial and batch dimensions for each feature.
+//
+// `BatchNormInference` is equivalent to calling `BatchNormTraining` without
+// computing `mean` and `variance` for each batch inside the operation. It
+// uses the input `mean` and `variance` instead as estimated values. The
+// purpose of this op is to reduce latency in inference, hence the name
+// `BatchNormInference`.
+//
+// The output has the same shape as `operand`, and contains the normalized
+// values for each batch.
+XlaOp BatchNormInference(const XlaOp& operand, const XlaOp& scale,
+ const XlaOp& offset, const XlaOp& mean,
+ const XlaOp& variance, float epsilon,
+ int64 feature_index);
+
+// Calculates the gradients of a batch norm op.
+//
+// The inputs `batch_mean` and `batch_var` represent the mean and variance
+// across the batch.
+//
+// Returns a tuple of three elements:
+// - grad_operand: Gradient with respect to input `operand`
+// - grad_offset: Gradient with respect to input `offset`
+// - grad_scale: Gradient with respect to input `scale`
+XlaOp BatchNormGrad(const XlaOp& operand, const XlaOp& scale,
+ const XlaOp& batch_mean, const XlaOp& batch_var,
+ const XlaOp& grad_output, float epsilon,
+ int64 feature_index);
+
+// Implementation details below this point.
+
template <typename NativeT>
XlaOp XlaBuilder::ConstantR0(NativeT value) {
- return ConstantLiteral(*Literal::CreateR0<NativeT>(value));
+ return ConstantLiteral(*LiteralUtil::CreateR0<NativeT>(value));
}
template <typename NativeT>
XlaOp XlaBuilder::ConstantR1(tensorflow::gtl::ArraySlice<NativeT> values) {
- return ConstantLiteral(*Literal::CreateR1<NativeT>(values));
+ return ConstantLiteral(*LiteralUtil::CreateR1<NativeT>(values));
}
template <typename NativeT>
@@ -954,44 +2046,44 @@ XlaOp XlaBuilder::ConstantR1(int64 length, NativeT value) {
}
inline XlaOp XlaBuilder::ConstantR1(const tensorflow::core::Bitmap& values) {
- return ConstantLiteral(*Literal::CreateR1(values));
+ return ConstantLiteral(*LiteralUtil::CreateR1(values));
}
template <typename NativeT>
XlaOp XlaBuilder::ConstantR2(
std::initializer_list<std::initializer_list<NativeT>> values) {
- return ConstantLiteral(*Literal::CreateR2<NativeT>(values));
+ return ConstantLiteral(*LiteralUtil::CreateR2<NativeT>(values));
}
template <typename NativeT>
XlaOp XlaBuilder::ConstantFromArrayWithLayout(const Array<NativeT>& values,
const Layout& layout) {
return ConstantLiteral(
- *Literal::CreateFromArrayWithLayout<NativeT>(values, layout));
+ *LiteralUtil::CreateFromArrayWithLayout<NativeT>(values, layout));
}
template <typename NativeT>
XlaOp XlaBuilder::ConstantFromArray(const Array<NativeT>& values) {
- return ConstantLiteral(*Literal::CreateFromArray<NativeT>(values));
+ return ConstantLiteral(*LiteralUtil::CreateFromArray<NativeT>(values));
}
template <typename NativeT>
XlaOp XlaBuilder::ConstantR2FromArray2DWithLayout(
const Array2D<NativeT>& values, const Layout& layout) {
return ConstantLiteral(
- *Literal::CreateFromArrayWithLayout<NativeT>(values, layout));
+ *LiteralUtil::CreateFromArrayWithLayout<NativeT>(values, layout));
}
template <typename NativeT>
XlaOp XlaBuilder::ConstantR2FromArray2D(const Array2D<NativeT>& values) {
- return ConstantLiteral(*Literal::CreateR2FromArray2D<NativeT>(values));
+ return ConstantLiteral(*LiteralUtil::CreateR2FromArray2D<NativeT>(values));
}
template <typename NativeT>
XlaOp XlaBuilder::ConstantR3FromArray3DWithLayout(
const Array3D<NativeT>& values, const Layout& layout) {
return ConstantLiteral(
- *Literal::CreateR3FromArray3DWithLayout<NativeT>(values, layout));
+ *LiteralUtil::CreateR3FromArray3DWithLayout<NativeT>(values, layout));
}
template <typename NativeT>
@@ -1010,34 +2102,96 @@ XlaOp XlaBuilder::ConstantR4FromArray4D(const Array4D<NativeT>& values) {
return ConstantFromArray(values);
}
-// RAII-style object: sets the current sharding assignment in builder on
-// construction, and sets back to the previous assignment on destruction.
-class XlaScopedShardingAssignment {
- public:
- XlaScopedShardingAssignment(xla::XlaBuilder* builder,
- tensorflow::gtl::optional<OpSharding> sharding)
- : builder_(builder), prev_sharding_(builder->sharding()) {
- SetSharding(sharding);
- }
+// Free function template implementations.
- XlaScopedShardingAssignment(const XlaScopedShardingAssignment&) = delete;
- XlaScopedShardingAssignment& operator=(const XlaScopedShardingAssignment&) =
- delete;
+template <typename NativeT>
+XlaOp ConstantR0(XlaBuilder* builder, NativeT value) {
+ return ConstantLiteral(builder, *LiteralUtil::CreateR0<NativeT>(value));
+}
- ~XlaScopedShardingAssignment() { SetSharding(prev_sharding_); }
+template <typename NativeT>
+XlaOp ConstantR1(XlaBuilder* builder,
+ tensorflow::gtl::ArraySlice<NativeT> values) {
+ return ConstantLiteral(builder, *LiteralUtil::CreateR1<NativeT>(values));
+}
- private:
- void SetSharding(const tensorflow::gtl::optional<OpSharding>& sharding) {
- if (sharding.has_value()) {
- builder_->SetSharding(sharding.value());
- } else {
- builder_->ClearSharding();
- }
- }
+template <typename NativeT>
+XlaOp ConstantR1(XlaBuilder* builder, int64 length, NativeT value) {
+ Literal literal(ShapeUtil::MakeShape(
+ primitive_util::NativeToPrimitiveType<NativeT>(), {length}));
+ literal.PopulateWithValue(value);
+ return ConstantLiteral(builder, literal);
+}
- xla::XlaBuilder* const builder_;
- tensorflow::gtl::optional<OpSharding> prev_sharding_;
-};
+inline XlaOp ConstantR1(XlaBuilder* builder,
+ const tensorflow::core::Bitmap& values) {
+ return ConstantLiteral(builder, *LiteralUtil::CreateR1(values));
+}
+
+template <typename NativeT>
+XlaOp ConstantR2(XlaBuilder* builder,
+ std::initializer_list<std::initializer_list<NativeT>> values) {
+ return ConstantLiteral(builder, *LiteralUtil::CreateR2<NativeT>(values));
+}
+
+template <typename NativeT>
+XlaOp ConstantFromArrayWithLayout(XlaBuilder* builder,
+ const Array<NativeT>& values,
+ const Layout& layout) {
+ return ConstantLiteral(
+ builder,
+ *LiteralUtil::CreateFromArrayWithLayout<NativeT>(values, layout));
+}
+
+template <typename NativeT>
+XlaOp ConstantFromArray(XlaBuilder* builder, const Array<NativeT>& values) {
+ return ConstantLiteral(builder,
+ *LiteralUtil::CreateFromArray<NativeT>(values));
+}
+
+template <typename NativeT>
+XlaOp ConstantR2FromArray2DWithLayout(XlaBuilder* builder,
+ const Array2D<NativeT>& values,
+ const Layout& layout) {
+ return ConstantLiteral(
+ builder,
+ *LiteralUtil::CreateFromArrayWithLayout<NativeT>(values, layout));
+}
+
+template <typename NativeT>
+XlaOp ConstantR2FromArray2D(XlaBuilder* builder,
+ const Array2D<NativeT>& values) {
+ return ConstantLiteral(builder,
+ *LiteralUtil::CreateR2FromArray2D<NativeT>(values));
+}
+
+template <typename NativeT>
+XlaOp ConstantR3FromArray3DWithLayout(XlaBuilder* builder,
+ const Array3D<NativeT>& values,
+ const Layout& layout) {
+ return ConstantLiteral(
+ builder,
+ *LiteralUtil::CreateR3FromArray3DWithLayout<NativeT>(values, layout));
+}
+
+template <typename NativeT>
+XlaOp ConstantR3FromArray3D(XlaBuilder* builder,
+ const Array3D<NativeT>& values) {
+ return ConstantFromArray(builder, values);
+}
+
+template <typename NativeT>
+XlaOp ConstantR4FromArray4DWithLayout(XlaBuilder* builder,
+ const Array4D<NativeT>& values,
+ const Layout& layout) {
+ return ConstantFromArrayWithLayout(builder, values, layout);
+}
+
+template <typename NativeT>
+XlaOp ConstantR4FromArray4D(XlaBuilder* builder,
+ const Array4D<NativeT>& values) {
+ return ConstantFromArray(builder, values);
+}
} // namespace xla
diff --git a/tensorflow/compiler/xla/client/xla_client/xla_builder_test.cc b/tensorflow/compiler/xla/client/xla_client/xla_builder_test.cc
index 0680b38f3a..3b8beb2c78 100644
--- a/tensorflow/compiler/xla/client/xla_client/xla_builder_test.cc
+++ b/tensorflow/compiler/xla/client/xla_client/xla_builder_test.cc
@@ -53,16 +53,86 @@ class XlaBuilderTest : public ::testing::Test {
TEST_F(XlaBuilderTest, OnePlusTwo) {
XlaBuilder b(TestName());
- b.Add(b.ConstantR0<float>(1.0), b.ConstantR0<float>(2.0));
+ Add(ConstantR0<float>(&b, 1.0), ConstantR0<float>(&b, 2.0));
TF_ASSERT_OK_AND_ASSIGN(auto module, BuildHloModule(&b));
auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, op::Add(op::Constant(), op::Constant()));
}
+TEST_F(XlaBuilderTest, UnaryOperatorsBuildExpectedHLO) {
+ auto test_unary_operator =
+ [&](std::function<XlaOp(XlaOp)> op,
+ ::testing::Matcher<const ::xla::HloInstruction*> matches_pattern) {
+ XlaBuilder b(TestName());
+ op(ConstantR0<int32>(&b, 1));
+ TF_ASSERT_OK_AND_ASSIGN(auto module, BuildHloModule(&b));
+ auto root = module->entry_computation()->root_instruction();
+ EXPECT_THAT(root, matches_pattern);
+ };
+ test_unary_operator([](XlaOp x) { return -x; }, op::Negate(op::Constant()));
+ test_unary_operator([](XlaOp x) { return ~x; }, op::Not(op::Constant()));
+}
+
+TEST_F(XlaBuilderTest, BinaryOperatorsBuildExpectedHLO) {
+ auto test_binary_operator =
+ [&](std::function<XlaOp(XlaOp, XlaOp)> op,
+ ::testing::Matcher<const ::xla::HloInstruction*> matches_pattern) {
+ XlaBuilder b(TestName());
+ op(ConstantR0<int32>(&b, 1), ConstantR0<int32>(&b, 2));
+ TF_ASSERT_OK_AND_ASSIGN(auto module, BuildHloModule(&b));
+ auto root = module->entry_computation()->root_instruction();
+ EXPECT_THAT(root, matches_pattern);
+ };
+
+ test_binary_operator([](XlaOp x, XlaOp y) { return x + y; },
+ op::Add(op::Constant(), op::Constant()));
+ test_binary_operator([](XlaOp x, XlaOp y) { return x - y; },
+ op::Subtract(op::Constant(), op::Constant()));
+ test_binary_operator([](XlaOp x, XlaOp y) { return x * y; },
+ op::Multiply(op::Constant(), op::Constant()));
+ test_binary_operator([](XlaOp x, XlaOp y) { return x / y; },
+ op::Divide(op::Constant(), op::Constant()));
+
+ test_binary_operator([](XlaOp x, XlaOp y) { return x & y; },
+ op::And(op::Constant(), op::Constant()));
+ test_binary_operator([](XlaOp x, XlaOp y) { return x | y; },
+ op::Or(op::Constant(), op::Constant()));
+ test_binary_operator([](XlaOp x, XlaOp y) { return x ^ y; },
+ op::Xor(op::Constant(), op::Constant()));
+ test_binary_operator([](XlaOp x, XlaOp y) { return x << y; },
+ op::ShiftLeft(op::Constant(), op::Constant()));
+ test_binary_operator(
+ [](XlaOp x, XlaOp y) { return x >> y; },
+ op::ShiftRightArithmetic(op::Constant(), op::Constant()));
+
+ auto test_unsigned_binary_operator =
+ [&](std::function<XlaOp(XlaOp, XlaOp)> op,
+ ::testing::Matcher<const ::xla::HloInstruction*> matches_pattern) {
+ XlaBuilder b(TestName());
+ op(ConstantR0<uint32>(&b, 1), ConstantR0<uint32>(&b, 2));
+ TF_ASSERT_OK_AND_ASSIGN(auto module, BuildHloModule(&b));
+ auto root = module->entry_computation()->root_instruction();
+ EXPECT_THAT(root, matches_pattern);
+ };
+ test_unsigned_binary_operator(
+ [](XlaOp x, XlaOp y) { return x >> y; },
+ op::ShiftRightLogical(op::Constant(), op::Constant()));
+}
+
+TEST_F(XlaBuilderTest, ShiftRightOperatorOnNonIntegerProducesError) {
+ XlaBuilder b(TestName());
+ ConstantR0<float>(&b, 1) >> ConstantR0<float>(&b, 2);
+ auto statusor = b.Build();
+ ASSERT_FALSE(statusor.ok());
+ EXPECT_THAT(
+ statusor.status().error_message(),
+ HasSubstr("Argument to >> operator does not have an integral type"));
+}
+
TEST_F(XlaBuilderTest, ParamPlusConstantHasScalarBroadcast) {
XlaBuilder b(TestName());
- auto x = b.Parameter(0, ShapeUtil::MakeShape(F32, {3, 5}), "x");
- b.Add(x, b.ConstantR0<float>(1.0));
+ auto x = Parameter(&b, 0, ShapeUtil::MakeShape(F32, {3, 5}), "x");
+ Add(x, ConstantR0<float>(&b, 1.0));
TF_ASSERT_OK_AND_ASSIGN(auto module, BuildHloModule(&b));
auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, op::Add(op::Parameter(), op::Broadcast(op::Constant())));
@@ -72,9 +142,9 @@ TEST_F(XlaBuilderTest, ParamPlusParamHasBroadcast) {
XlaBuilder b(TestName());
const auto& x_shape = ShapeUtil::MakeShape(S32, {2, 4, 6});
const auto& y_shape = ShapeUtil::MakeShape(S32, {2, 4});
- auto x = b.Parameter(0, x_shape, "x");
- auto y = b.Parameter(1, y_shape, "y");
- auto add = b.Add(x, y, /*broadcast_dimensions=*/{0, 1});
+ auto x = Parameter(&b, 0, x_shape, "x");
+ auto y = Parameter(&b, 1, y_shape, "y");
+ auto add = Add(x, y, /*broadcast_dimensions=*/{0, 1});
TF_ASSERT_OK_AND_ASSIGN(auto add_shape, b.GetShape(add));
EXPECT_TRUE(ShapeUtil::Equal(add_shape, x_shape));
@@ -86,8 +156,8 @@ TEST_F(XlaBuilderTest, ParamPlusParamHasBroadcast) {
TEST_F(XlaBuilderTest, XPlusX) {
XlaBuilder b(TestName());
- auto x = b.Parameter(0, ShapeUtil::MakeShape(S32, {1, 3, 5, 7}), "x");
- b.Add(x, x);
+ auto x = Parameter(&b, 0, ShapeUtil::MakeShape(S32, {1, 3, 5, 7}), "x");
+ Add(x, x);
TF_ASSERT_OK_AND_ASSIGN(auto module, BuildHloModule(&b));
auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, op::Add(op::Parameter(0), op::Parameter(0)));
@@ -95,9 +165,9 @@ TEST_F(XlaBuilderTest, XPlusX) {
TEST_F(XlaBuilderTest, ShapeInferenceError) {
XlaBuilder b(TestName());
- auto x = b.Parameter(0, ShapeUtil::MakeShape(U32, {2, 4, 6}), "x");
- auto y = b.Parameter(1, ShapeUtil::MakeShape(U32, {2, 4}), "y");
- b.Add(x, y);
+ auto x = Parameter(&b, 0, ShapeUtil::MakeShape(U32, {2, 4, 6}), "x");
+ auto y = Parameter(&b, 1, ShapeUtil::MakeShape(U32, {2, 4}), "y");
+ Add(x, y);
auto statusor = BuildHloModule(&b);
ASSERT_FALSE(statusor.ok());
EXPECT_THAT(statusor.status().error_message(), HasSubstr("shape inference"));
@@ -105,12 +175,12 @@ TEST_F(XlaBuilderTest, ShapeInferenceError) {
TEST_F(XlaBuilderTest, ParameterAlreadyRegistered) {
XlaBuilder b_call("add");
- b_call.Parameter(0, ShapeUtil::MakeShape(PRED, {}), "x");
+ Parameter(&b_call, 0, ShapeUtil::MakeShape(PRED, {}), "x");
XlaBuilder b(TestName());
- auto x = b.Parameter(0, ShapeUtil::MakeShape(PRED, {}), "x");
- auto y = b.Parameter(0, ShapeUtil::MakeShape(PRED, {}), "y");
- b.Add(x, y);
+ auto x = Parameter(&b, 0, ShapeUtil::MakeShape(PRED, {}), "x");
+ auto y = Parameter(&b, 0, ShapeUtil::MakeShape(PRED, {}), "y");
+ Add(x, y);
auto statusor = BuildHloModule(&b);
ASSERT_FALSE(statusor.ok());
EXPECT_THAT(statusor.status().error_message(),
@@ -119,16 +189,16 @@ TEST_F(XlaBuilderTest, ParameterAlreadyRegistered) {
TEST_F(XlaBuilderTest, Call) {
XlaBuilder b_call("the_only_to_apply");
- auto p0 = b_call.Parameter(0, ShapeUtil::MakeShape(F32, {}), "p0");
- auto p1 = b_call.Parameter(1, ShapeUtil::MakeShape(F32, {}), "p1");
- b_call.Add(p0, p1);
+ auto p0 = Parameter(&b_call, 0, ShapeUtil::MakeShape(F32, {}), "p0");
+ auto p1 = Parameter(&b_call, 1, ShapeUtil::MakeShape(F32, {}), "p1");
+ Add(p0, p1);
TF_ASSERT_OK_AND_ASSIGN(auto call, b_call.Build());
XlaBuilder b(TestName());
- auto x = b.Parameter(0, ShapeUtil::MakeShape(F32, {}), "x");
- auto y = b.Parameter(1, ShapeUtil::MakeShape(F32, {}), "y");
- auto one = b.ConstantR0<float>(1);
- auto two = b.ConstantR0<float>(2);
- b.Add(b.Call(call, {x, y}), b.Call(call, {one, two}));
+ auto x = Parameter(&b, 0, ShapeUtil::MakeShape(F32, {}), "x");
+ auto y = Parameter(&b, 1, ShapeUtil::MakeShape(F32, {}), "y");
+ auto one = ConstantR0<float>(&b, 1);
+ auto two = ConstantR0<float>(&b, 2);
+ Add(Call(&b, call, {x, y}), Call(&b, call, {one, two}));
TF_ASSERT_OK_AND_ASSIGN(auto module, BuildHloModule(&b));
auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, op::Add(op::Call(op::Parameter(), op::Parameter()),
@@ -137,9 +207,9 @@ TEST_F(XlaBuilderTest, Call) {
TEST_F(XlaBuilderTest, BinopHasDegenerateBroadcast) {
XlaBuilder b(TestName());
- auto x = b.Parameter(0, ShapeUtil::MakeShape(F32, {1, 2, 3}), "x");
- auto y = b.Parameter(1, ShapeUtil::MakeShape(F32, {1, 2, 1}), "y");
- b.Add(x, y);
+ auto x = Parameter(&b, 0, ShapeUtil::MakeShape(F32, {1, 2, 3}), "x");
+ auto y = Parameter(&b, 1, ShapeUtil::MakeShape(F32, {1, 2, 1}), "y");
+ Add(x, y);
TF_ASSERT_OK_AND_ASSIGN(auto module, BuildHloModule(&b));
// Expected:
@@ -158,9 +228,9 @@ TEST_F(XlaBuilderTest, BinopHasDegenerateBroadcast) {
TEST_F(XlaBuilderTest, BinopHasInDimAndDegenerateBroadcast) {
XlaBuilder b(TestName());
- auto x = b.Parameter(0, ShapeUtil::MakeShape(F32, {2, 3}), "x");
- auto y = b.Parameter(1, ShapeUtil::MakeShape(F32, {2, 1, 4}), "y");
- b.Add(x, y, /*broadcast_dimensions=*/{0, 1});
+ auto x = Parameter(&b, 0, ShapeUtil::MakeShape(F32, {2, 3}), "x");
+ auto y = Parameter(&b, 1, ShapeUtil::MakeShape(F32, {2, 1, 4}), "y");
+ Add(x, y, /*broadcast_dimensions=*/{0, 1});
TF_ASSERT_OK_AND_ASSIGN(auto module, BuildHloModule(&b));
// The binary operation has in-dim broadcast and degenerate broadcast, should
@@ -183,9 +253,10 @@ TEST_F(XlaBuilderTest, BinopHasInDimAndDegenerateBroadcast) {
TEST_F(XlaBuilderTest, OperandFromWrongBuilder) {
XlaBuilder b1("b1");
- auto p0 = b1.Parameter(0, ShapeUtil::MakeShape(F32, {}), "p0");
+ auto p0 = Parameter(&b1, 0, ShapeUtil::MakeShape(F32, {}), "p0");
XlaBuilder builder("main");
- builder.Add(p0, p0);
+ auto p = Parameter(&builder, 0, ShapeUtil::MakeShape(F32, {}), "p");
+ Add(p, p0);
auto statusor = builder.Build();
ASSERT_FALSE(statusor.ok());
EXPECT_THAT(
@@ -196,8 +267,8 @@ TEST_F(XlaBuilderTest, OperandFromWrongBuilder) {
TEST_F(XlaBuilderTest, ReshapeDefaultOrder) {
XlaBuilder b(TestName());
- auto x = b.Parameter(0, ShapeUtil::MakeShape(F32, {2, 3, 5, 7}), "x");
- b.Reshape(x, /*new_sizes=*/{6, 35});
+ auto x = Parameter(&b, 0, ShapeUtil::MakeShape(F32, {2, 3, 5, 7}), "x");
+ Reshape(x, /*new_sizes=*/{6, 35});
TF_ASSERT_OK_AND_ASSIGN(auto module, BuildHloModule(&b));
auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, op::Reshape(op::Parameter()));
@@ -205,8 +276,8 @@ TEST_F(XlaBuilderTest, ReshapeDefaultOrder) {
TEST_F(XlaBuilderTest, ReshapeHasTranspose) {
XlaBuilder b(TestName());
- auto x = b.Parameter(0, ShapeUtil::MakeShape(F32, {2, 3, 5, 7}), "x");
- b.Reshape(x, /*dimensions=*/{3, 2, 1, 0}, /*new_sizes=*/{6, 35});
+ auto x = Parameter(&b, 0, ShapeUtil::MakeShape(F32, {2, 3, 5, 7}), "x");
+ Reshape(x, /*dimensions=*/{3, 2, 1, 0}, /*new_sizes=*/{6, 35});
TF_ASSERT_OK_AND_ASSIGN(auto module, BuildHloModule(&b));
auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, op::Reshape(op::Transpose(op::Parameter())));
@@ -214,12 +285,39 @@ TEST_F(XlaBuilderTest, ReshapeHasTranspose) {
TEST_F(XlaBuilderTest, Transpose) {
XlaBuilder b(TestName());
- auto x = b.Parameter(0, ShapeUtil::MakeShape(F32, {5, 7}), "x");
- b.Transpose(x, /*permutation=*/{1, 0});
+ auto x = Parameter(&b, 0, ShapeUtil::MakeShape(F32, {5, 7}), "x");
+ Transpose(x, /*permutation=*/{1, 0});
TF_ASSERT_OK_AND_ASSIGN(auto module, BuildHloModule(&b));
auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, op::Transpose(op::Parameter()));
}
+TEST_F(XlaBuilderTest, ReportError) {
+ XlaBuilder b(TestName());
+ auto x = Parameter(&b, 0, ShapeUtil::MakeShape(F32, {5, 7}), "x");
+ Add(b.ReportError(InvalidArgument("a test error")), x);
+ auto statusor = b.Build();
+ ASSERT_FALSE(statusor.ok());
+ EXPECT_THAT(statusor.status().error_message(), HasSubstr("a test error"));
+}
+
+TEST_F(XlaBuilderTest, ReportErrorOrReturnHandlesNonErrors) {
+ XlaBuilder b(TestName());
+ StatusOr<XlaOp> op(ConstantR0<float>(&b, 1.0));
+ Add(b.ReportErrorOrReturn(op), ConstantR0<float>(&b, 2.0));
+ TF_ASSERT_OK_AND_ASSIGN(auto module, BuildHloModule(&b));
+ auto root = module->entry_computation()->root_instruction();
+ EXPECT_THAT(root, op::Add(op::Constant(), op::Constant()));
+}
+
+TEST_F(XlaBuilderTest, ReportErrorOrReturnHandlesErrors) {
+ XlaBuilder b(TestName());
+ StatusOr<XlaOp> op(InvalidArgument("a test error"));
+ Add(b.ReportErrorOrReturn(op), ConstantR0<float>(&b, 2.0));
+ auto statusor = b.Build();
+ ASSERT_FALSE(statusor.ok());
+ EXPECT_THAT(statusor.status().error_message(), HasSubstr("a test error"));
+}
+
} // namespace
} // namespace xla
diff --git a/tensorflow/compiler/xla/layout_util.cc b/tensorflow/compiler/xla/layout_util.cc
index 3f059cac30..15eeb2ea13 100644
--- a/tensorflow/compiler/xla/layout_util.cc
+++ b/tensorflow/compiler/xla/layout_util.cc
@@ -248,6 +248,12 @@ Layout CreateDefaultLayoutForRank(int64 rank) {
}
}
+ if (layout.format() == SPARSE) {
+ if (!layout.padded_dimensions().empty()) {
+ return InvalidArgument("Sparse layout has padded dimensions");
+ }
+ }
+
return Status::OK();
}
diff --git a/tensorflow/compiler/xla/literal.cc b/tensorflow/compiler/xla/literal.cc
new file mode 100644
index 0000000000..5db124b5a2
--- /dev/null
+++ b/tensorflow/compiler/xla/literal.cc
@@ -0,0 +1,1967 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#include "tensorflow/compiler/xla/literal.h"
+
+#include <algorithm>
+#include <cstring>
+#include <functional>
+#include <limits>
+#include <numeric>
+#include <vector>
+
+#include "tensorflow/compiler/xla/index_util.h"
+#include "tensorflow/compiler/xla/shape_util.h"
+#include "tensorflow/compiler/xla/status_macros.h"
+#include "tensorflow/compiler/xla/types.h"
+#include "tensorflow/compiler/xla/util.h"
+#include "tensorflow/core/lib/core/casts.h"
+#include "tensorflow/core/lib/core/errors.h"
+#include "tensorflow/core/lib/hash/hash.h"
+#include "tensorflow/core/lib/strings/str_util.h"
+#include "tensorflow/core/lib/strings/strcat.h"
+#include "tensorflow/core/lib/strings/stringprintf.h"
+#include "tensorflow/core/platform/logging.h"
+#include "tensorflow/core/platform/types.h"
+
+using tensorflow::strings::Printf;
+using tensorflow::strings::StrCat;
+
+namespace xla {
+
+namespace {
+
+constexpr bool kLittleEndian = __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__;
+
+// Converts between little and big endian.
+//
+// Precondition: size % 2 == 0 (elements in the array are 16 bits long)
+void ConvertEndianShort(string* bytes) {
+ CHECK_EQ(bytes->size() / 2, 0);
+ for (int64 i = 0; i < bytes->size(); i += 2) {
+ std::swap((*bytes)[i], (*bytes)[i + 1]);
+ }
+}
+
+void ConvertEndianShort(char* bytes, int64 size) {
+ CHECK_EQ(size / 2, 0);
+ for (int64 i = 0; i < size; i += 2) {
+ std::swap(bytes[i], bytes[i + 1]);
+ }
+}
+
+} // namespace
+
+LiteralBase::~LiteralBase() {}
+
+std::ostream& operator<<(std::ostream& out, const Literal& literal) {
+ out << literal.ToString();
+ return out;
+}
+
+Literal::StrideConfig::StrideConfig(
+ const Shape& source_shape, const Shape& dest_shape,
+ tensorflow::gtl::ArraySlice<int64> dimensions)
+ : dimensions(dimensions),
+ base(dimensions.size(), 0),
+ step(dimensions.size(), 1) {
+ if (!dimensions.empty()) {
+ // Selects the shape with the largest minor dimension as the one upon
+ // which to run the tight stride loop.
+ if (dimensions[LayoutUtil::Minor(source_shape.layout(), 0)] >=
+ dimensions[LayoutUtil::Minor(dest_shape.layout(), 0)]) {
+ minor_dimension = LayoutUtil::Minor(source_shape.layout(), 0);
+ dest_stride = IndexUtil::GetDimensionStride(dest_shape, minor_dimension);
+ } else {
+ minor_dimension = LayoutUtil::Minor(dest_shape.layout(), 0);
+ source_stride =
+ IndexUtil::GetDimensionStride(source_shape, minor_dimension);
+ }
+ minor_loop_size = dimensions[minor_dimension];
+ step[minor_dimension] = minor_loop_size;
+ }
+}
+
+Literal::Literal(const Shape& shape)
+ : Literal(shape, /*allocate_arrays=*/true) {}
+
+void Literal::SetPiece(const Shape& shape, Piece* piece, bool allocate_arrays) {
+ if (ShapeUtil::IsTuple(shape)) {
+ for (int i = 0; i < ShapeUtil::TupleElementCount(shape); ++i) {
+ const Shape& subshape = shape.tuple_shapes(i);
+
+ auto child_piece = Piece();
+ child_piece.set_subshape(&subshape);
+
+ SetPiece(subshape, &child_piece, allocate_arrays);
+
+ piece->emplace_back(std::move(child_piece));
+ }
+ } else if (ShapeUtil::IsArray(shape)) {
+ if (allocate_arrays) {
+ if (LayoutUtil::IsSparseArray(shape)) {
+ // For sparse arrays, the buffer must be of the size of the maximum
+ // number of sparse elements possible.
+ const int64 max_sparse_elements =
+ LayoutUtil::MaxSparseElements(shape.layout());
+ piece->set_buffer(
+ new char[max_sparse_elements *
+ ShapeUtil::ByteSizeOfPrimitiveType(shape.element_type())]);
+ piece->set_sparse_indices(
+ new SparseIndexArray(max_sparse_elements, ShapeUtil::Rank(shape)));
+ } else {
+ piece->set_buffer(new char[piece->size_bytes()]);
+ }
+ }
+ } else {
+ // If the shape is neither an array nor tuple, then it must be
+ // zero-sized. Otherwise, some memory needs to be allocated for it.
+ CHECK_EQ(piece->size_bytes(), 0);
+ }
+}
+
+Literal::Literal(const Shape& shape, bool allocate_arrays)
+ : LiteralBase(), shape_(MakeUnique<Shape>(shape)) {
+ CHECK(LayoutUtil::HasLayout(*shape_));
+ root_piece_ = new Piece();
+ root_piece_->set_subshape(shape_.get());
+ CHECK(&root_piece_->subshape() == shape_.get());
+
+ SetPiece(*shape_, root_piece_, allocate_arrays);
+}
+
+Literal::~Literal() {
+ if (root_piece_ != nullptr) {
+ DeallocateBuffers();
+ delete root_piece_;
+ }
+}
+
+void Literal::DeallocateBuffers() {
+ root_piece_->ForEachMutableSubpiece(
+ [&](const ShapeIndex& index, Piece* piece) {
+ if (piece->buffer() != nullptr) {
+ delete[] piece->buffer();
+ delete piece->sparse_indices();
+ }
+ });
+}
+
+Literal::Literal(Literal&& other) : LiteralBase() { *this = std::move(other); }
+
+Literal& Literal::operator=(Literal&& other) {
+ DCHECK(&other.root_piece_->subshape() == other.shape_.get());
+ using std::swap;
+ swap(shape_, other.shape_);
+ swap(root_piece_, other.root_piece_);
+ DCHECK(&root_piece_->subshape() == shape_.get());
+
+ return *this;
+}
+
+std::unique_ptr<Literal> LiteralBase::CreateFromShape(const Shape& shape) {
+ auto literal = MakeUnique<Literal>(shape);
+ literal->root_piece_->ForEachMutableSubpiece(
+ [&](const ShapeIndex& index, Piece* piece) {
+ if (ShapeUtil::IsArray(piece->subshape())) {
+ memset(piece->untyped_data(), 0, piece->size_bytes());
+ }
+ });
+ return literal;
+}
+
+const SparseIndexArray* LiteralBase::sparse_indices(
+ const ShapeIndex& shape_index) const {
+ return piece(shape_index).sparse_indices();
+}
+
+SparseIndexArray* Literal::sparse_indices(const ShapeIndex& shape_index) {
+ return piece(shape_index).sparse_indices();
+}
+
+template <typename NativeT>
+Status Literal::CopySliceFromInternal(
+ const LiteralBase& src_literal, tensorflow::gtl::ArraySlice<int64> src_base,
+ tensorflow::gtl::ArraySlice<int64> dest_base,
+ tensorflow::gtl::ArraySlice<int64> copy_size) {
+ TF_RET_CHECK(ShapeUtil::Rank(src_literal.shape()) == src_base.size());
+ TF_RET_CHECK(ShapeUtil::Rank(shape()) == dest_base.size());
+
+ auto linear_index = [](const Shape& shape,
+ tensorflow::gtl::ArraySlice<int64> multi_index) {
+ return IndexUtil::MultidimensionalIndexToLinearIndex(shape, multi_index);
+ };
+
+ if (ShapeUtil::Rank(src_literal.shape()) == 0 ||
+ ShapeUtil::Rank(shape()) == 0) {
+ // If any of the two shapes are scalars, we can just call the StridedCopy()
+ // directly, and we know we will be copying only one value.
+ TF_RET_CHECK(copy_size.empty());
+ StridedCopy(data<NativeT>(), linear_index(shape(), dest_base), 0,
+ src_literal.data<NativeT>(),
+ linear_index(src_literal.shape(), src_base), 0, 1);
+ } else if (!ShapeUtil::IsZeroElementArray(shape()) &&
+ !ShapeUtil::IsZeroElementArray(src_literal.shape())) {
+ // Perform copy if neither src nor dest has dimensions with zero element,
+ // otherwise it's a no-op.
+ TF_RET_CHECK(src_base.size() == dest_base.size());
+ TF_RET_CHECK(src_base.size() == copy_size.size());
+
+ // Scan the source from minor, stepping in copy size blocks, then within
+ // the index enumaration functor, do a strided copy advancing source index
+ // by one (walking through the minor dimension), and destination index by
+ // proper stride size at the matching dimension.
+ DimensionVector src_indexes(src_base.size(), 0);
+ DimensionVector dest_indexes(dest_base.size(), 0);
+ Literal::StrideConfig stride_config(src_literal.shape(), shape(),
+ copy_size);
+
+ auto copy_proc = [&](tensorflow::gtl::ArraySlice<int64> indexes) {
+ // Map from multi-dimensional index, to source index.
+ std::transform(indexes.begin(), indexes.end(), src_base.begin(),
+ src_indexes.begin(), std::plus<int64>());
+ // Map from multi-dimensional index, to destination index.
+ std::transform(indexes.begin(), indexes.end(), dest_base.begin(),
+ dest_indexes.begin(), std::plus<int64>());
+
+ int64 src_index = linear_index(src_literal.shape(), src_indexes);
+ int64 dest_index = linear_index(shape(), dest_indexes);
+
+ // `this->` is needed to workaround MSVC bug: #16882
+ StridedCopy(this->data<NativeT>(), dest_index, stride_config.dest_stride,
+ src_literal.data<NativeT>(), src_index,
+ stride_config.source_stride, stride_config.minor_loop_size);
+ return true;
+ };
+
+ ShapeUtil::ForEachIndex(src_literal.shape(), stride_config.base,
+ stride_config.dimensions, stride_config.step,
+ copy_proc);
+ }
+ return Status::OK();
+}
+
+Status Literal::CopyElementFrom(const LiteralSlice& src_literal,
+ tensorflow::gtl::ArraySlice<int64> src_index,
+ tensorflow::gtl::ArraySlice<int64> dest_index) {
+ DCHECK_EQ(shape().element_type(), src_literal.shape().element_type());
+ const int64 src_linear_index = IndexUtil::MultidimensionalIndexToLinearIndex(
+ src_literal.shape(), src_index);
+ const int64 dest_linear_index =
+ IndexUtil::MultidimensionalIndexToLinearIndex(shape(), dest_index);
+ const int64 primitive_size =
+ ShapeUtil::ByteSizeOfPrimitiveType(shape().element_type());
+
+ char* dest_address =
+ static_cast<char*>(untyped_data()) + dest_linear_index * primitive_size;
+ const char* source_address =
+ static_cast<const char*>(src_literal.untyped_data()) +
+ src_linear_index * primitive_size;
+ if (dest_address != source_address) {
+ memcpy(dest_address, source_address, primitive_size);
+ }
+ return Status::OK();
+}
+
+/* static */ StatusOr<std::unique_ptr<Literal>> Literal::CreateFromProto(
+ const LiteralProto& proto) {
+ if (!proto.has_shape()) {
+ return InvalidArgument("LiteralProto has no shape");
+ }
+ if (!LayoutUtil::HasLayout(proto.shape())) {
+ return InvalidArgument("LiteralProto has no layout");
+ }
+
+ auto literal = MakeUnique<Literal>(proto.shape());
+
+ TF_RETURN_IF_ERROR(literal->root_piece_->ForEachMutableSubpieceWithStatus(
+ [&](const ShapeIndex& index, Piece* piece) {
+ const LiteralProto* proto_element = &proto;
+ for (int64 i : index) {
+ CHECK(i < proto_element->tuple_literals_size());
+ proto_element = &proto_element->tuple_literals(i);
+ }
+
+ if (ShapeUtil::IsTuple(piece->subshape())) {
+ if (proto_element->tuple_literals_size() !=
+ ShapeUtil::TupleElementCount(piece->subshape())) {
+ return InvalidArgument(
+ "Expected %lld tuple elements in LiteralProto, has %d",
+ ShapeUtil::TupleElementCount(piece->subshape()),
+ proto_element->tuple_literals_size());
+ }
+ return Status::OK();
+ }
+ if (piece->subshape().element_type() == TOKEN) {
+ return Status::OK();
+ }
+
+ CHECK(ShapeUtil::IsArray(piece->subshape()));
+ TF_RETURN_IF_ERROR(piece->CopyFromProto(*proto_element));
+
+ return Status::OK();
+ }));
+
+ return std::move(literal);
+}
+
+std::vector<Literal> Literal::DecomposeTuple() {
+ CHECK(ShapeUtil::IsTuple(shape()));
+ std::vector<Literal> elements;
+ for (int i = 0; i < ShapeUtil::TupleElementCount(shape()); ++i) {
+ elements.push_back(Literal(ShapeUtil::GetSubshape(shape(), {i}),
+ /*allocate_arrays=*/false));
+ Literal& element = elements.back();
+ element.root_piece_->ForEachMutableSubpiece(
+ [&](const ShapeIndex& index, Piece* dest_piece) {
+ ShapeIndex src_index = {i};
+ for (int64 j : index) {
+ src_index.push_back(j);
+ }
+ Piece& src_piece = piece(src_index);
+
+ // Move the respective buffer and sparse indices over to the element
+ // Literal.
+ dest_piece->set_buffer(src_piece.buffer());
+ src_piece.set_buffer(nullptr);
+ dest_piece->set_sparse_indices(src_piece.sparse_indices());
+ src_piece.set_sparse_indices(nullptr);
+ });
+ }
+ // Set this literal to be nil-shaped.
+ *this = Literal();
+ return elements;
+}
+
+namespace {
+
+// Copies the elements in 'src' to 'dest'. The shape and layout of the data in
+// the array slices are indicated by dest_shape and src_shape respectively.
+template <typename NativeT>
+void CopyElementsBetween(tensorflow::gtl::MutableArraySlice<NativeT> dest,
+ tensorflow::gtl::ArraySlice<NativeT> src,
+ const Shape& dest_shape, const Shape& src_shape) {
+ CHECK(ShapeUtil::Compatible(dest_shape, src_shape));
+ if (ShapeUtil::IsZeroElementArray(dest_shape)) {
+ return;
+ }
+ std::vector<int64> index(ShapeUtil::Rank(dest_shape));
+ do {
+ dest[IndexUtil::MultidimensionalIndexToLinearIndex(dest_shape, index)] =
+ src[IndexUtil::MultidimensionalIndexToLinearIndex(src_shape, index)];
+ } while (IndexUtil::BumpIndices(dest_shape, &index));
+}
+
+} // namespace
+
+Status LiteralBase::Piece::CopyFrom(const LiteralBase::Piece& src) {
+ CHECK(subshape_ != nullptr);
+ CHECK(src.subshape_ != nullptr);
+ if (ShapeUtil::Equal(subshape(), src.subshape())) {
+ // If the layouts are equal it's faster just to memcpy.
+ memcpy(buffer(), src.buffer(), src.size_bytes());
+ } else {
+ TF_RET_CHECK(ShapeUtil::Compatible(src.subshape(), subshape()));
+ std::vector<int64> origin(ShapeUtil::Rank(subshape()), 0);
+ switch (subshape().element_type()) {
+#define COPY_ELEMENTS(XLA_T, NATIVE_T) \
+ case (XLA_T): \
+ CopyElementsBetween<NATIVE_T>(data<NATIVE_T>(), src.data<NATIVE_T>(), \
+ subshape(), src.subshape()); \
+ break;
+ COPY_ELEMENTS(U8, uint8);
+ COPY_ELEMENTS(U16, uint16);
+ COPY_ELEMENTS(U32, uint32);
+ COPY_ELEMENTS(U64, uint64);
+ COPY_ELEMENTS(S8, int8);
+ COPY_ELEMENTS(S16, int16);
+ COPY_ELEMENTS(S32, int32);
+ COPY_ELEMENTS(S64, int64);
+ COPY_ELEMENTS(F16, half);
+ COPY_ELEMENTS(BF16, bfloat16);
+ COPY_ELEMENTS(F32, float);
+ COPY_ELEMENTS(F64, double);
+ COPY_ELEMENTS(C64, complex64);
+ COPY_ELEMENTS(PRED, bool);
+#undef COPY_ELEMENTS
+ default:
+ return Unimplemented(
+ "Copying a Literal object with element type %s is not implemented.",
+ PrimitiveType_Name(subshape().element_type()).c_str());
+ }
+ }
+ return Status::OK();
+}
+
+Status Literal::CopyFrom(const LiteralSlice& src_literal,
+ const ShapeIndex& dest_shape_index,
+ const ShapeIndex& src_shape_index) {
+ const Shape& dest_subshape =
+ ShapeUtil::GetSubshape(shape(), dest_shape_index);
+ const Shape& src_subshape =
+ ShapeUtil::GetSubshape(src_literal.shape(), src_shape_index);
+ if (!ShapeUtil::Compatible(dest_subshape, src_subshape)) {
+ return InvalidArgument(
+ "Destination subshape incompatible with source subshape: %s vs %s",
+ ShapeUtil::HumanString(dest_subshape).c_str(),
+ ShapeUtil::HumanString(src_subshape).c_str());
+ }
+ return root_piece_->ForEachMutableSubpieceWithStatus(
+ [&](const ShapeIndex& index, Piece* piece) {
+ if (!ShapeUtil::IsArray(piece->subshape())) {
+ return Status::OK();
+ }
+
+ // Determine if this index is in the part of this literal that we want
+ // to copy over from src_literal.
+ bool in_subtree_to_copy = true;
+ for (int i = 0; i < dest_shape_index.size(); ++i) {
+ if (index[i] != dest_shape_index[i]) {
+ in_subtree_to_copy = false;
+ break;
+ }
+ }
+ if (!in_subtree_to_copy) {
+ return Status::OK();
+ }
+ // Construct the index of the corresponding piece in the source literal.
+ ShapeIndex src_piece_index = src_shape_index;
+ for (int64 i = dest_shape_index.size(); i < index.size(); ++i) {
+ src_piece_index.push_back(index[i]);
+ }
+ TF_RETURN_IF_ERROR(piece->CopyFrom(src_literal.piece(src_piece_index)));
+ return Status::OK();
+ });
+}
+
+Status Literal::MoveFrom(Literal&& src_literal,
+ const ShapeIndex& dest_shape_index) {
+ const Shape& dest_subshape =
+ ShapeUtil::GetSubshape(shape(), dest_shape_index);
+ if (!ShapeUtil::Equal(dest_subshape, src_literal.shape())) {
+ return InvalidArgument(
+ "Destination subshape not equal to source shape: %s vs %s",
+ ShapeUtil::HumanString(dest_subshape).c_str(),
+ ShapeUtil::HumanString(src_literal.shape()).c_str());
+ }
+
+ src_literal.root_piece_->ForEachSubpiece(
+ [&](const ShapeIndex& src_index, const Piece& src_piece) {
+ if (!ShapeUtil::IsArray(src_piece.subshape())) {
+ return;
+ }
+
+ ShapeIndex dest_index = dest_shape_index;
+ for (int64 i : src_index) {
+ dest_index.push_back(i);
+ }
+ Piece& dest_piece = piece(dest_index);
+ delete[] dest_piece.buffer();
+ dest_piece.set_buffer(src_piece.buffer());
+ delete dest_piece.sparse_indices();
+ dest_piece.set_sparse_indices(src_piece.sparse_indices());
+ });
+
+ src_literal.shape_ = MakeUnique<Shape>(ShapeUtil::MakeNil());
+ delete src_literal.root_piece_;
+ src_literal.root_piece_ = new LiteralBase::Piece();
+ src_literal.root_piece_->set_subshape(src_literal.shape_.get());
+
+ return Status::OK();
+}
+
+Status Literal::CopySliceFrom(const LiteralSlice& src_literal,
+ tensorflow::gtl::ArraySlice<int64> src_base,
+ tensorflow::gtl::ArraySlice<int64> dest_base,
+ tensorflow::gtl::ArraySlice<int64> copy_size) {
+ TF_RET_CHECK(ShapeUtil::IsArray(shape())) << ShapeUtil::HumanString(shape());
+ TF_RET_CHECK(ShapeUtil::IsArray(src_literal.shape()))
+ << ShapeUtil::HumanString(src_literal.shape());
+ TF_RET_CHECK(ShapeUtil::SameElementType(src_literal.shape(), shape()));
+
+ switch (shape().element_type()) {
+ case U8:
+ return CopySliceFromInternal<uint8>(src_literal, src_base, dest_base,
+ copy_size);
+ case U16:
+ return CopySliceFromInternal<uint16>(src_literal, src_base, dest_base,
+ copy_size);
+ case U32:
+ return CopySliceFromInternal<uint32>(src_literal, src_base, dest_base,
+ copy_size);
+ case U64:
+ return CopySliceFromInternal<uint64>(src_literal, src_base, dest_base,
+ copy_size);
+ case S8:
+ return CopySliceFromInternal<int8>(src_literal, src_base, dest_base,
+ copy_size);
+ case S16:
+ return CopySliceFromInternal<int16>(src_literal, src_base, dest_base,
+ copy_size);
+ case S32:
+ return CopySliceFromInternal<int32>(src_literal, src_base, dest_base,
+ copy_size);
+ case S64:
+ return CopySliceFromInternal<int64>(src_literal, src_base, dest_base,
+ copy_size);
+ case F16:
+ return CopySliceFromInternal<half>(src_literal, src_base, dest_base,
+ copy_size);
+ case BF16:
+ return CopySliceFromInternal<bfloat16>(src_literal, src_base, dest_base,
+ copy_size);
+ case F32:
+ return CopySliceFromInternal<float>(src_literal, src_base, dest_base,
+ copy_size);
+ case F64:
+ return CopySliceFromInternal<double>(src_literal, src_base, dest_base,
+ copy_size);
+ case C64:
+ return CopySliceFromInternal<complex64>(src_literal, src_base, dest_base,
+ copy_size);
+ case PRED:
+ return CopySliceFromInternal<bool>(src_literal, src_base, dest_base,
+ copy_size);
+ default:
+ break;
+ }
+ return Unimplemented(
+ "Copying a slice from a Literal object with element type %d is not "
+ "implemented.",
+ shape().element_type());
+}
+
+void Literal::PopulateR1(const tensorflow::core::Bitmap& values) {
+ CHECK(ShapeUtil::IsArray(shape()));
+ CHECK_EQ(ShapeUtil::Rank(shape()), 1);
+ CHECK_EQ(element_count(), values.bits());
+ CHECK_EQ(shape().element_type(), PRED);
+ for (int64 i = 0; i < static_cast<int64>(values.bits()); ++i) {
+ Set({i}, values.get(i));
+ }
+}
+
+std::unique_ptr<Literal> LiteralBase::Relayout(
+ const Layout& new_layout, const ShapeIndex& shape_index) const {
+ // Create new shape with 'new_layout' set at the given shape index.
+ Shape new_shape = shape();
+ Shape* subshape = ShapeUtil::GetMutableSubshape(&new_shape, shape_index);
+ TF_CHECK_OK(LayoutUtil::ValidateLayoutForShape(new_layout, *subshape));
+ *subshape->mutable_layout() = new_layout;
+ auto result = MakeUnique<Literal>(new_shape);
+ TF_CHECK_OK(result->CopyFrom(*this));
+ return result;
+}
+
+std::unique_ptr<Literal> LiteralBase::Relayout(
+ const Shape& shape_with_layout) const {
+ CHECK(ShapeUtil::Compatible(shape_with_layout, shape()))
+ << "Given shape_with_layout " << ShapeUtil::HumanString(shape_with_layout)
+ << " not compatible with literal shape "
+ << ShapeUtil::HumanString(shape());
+ std::unique_ptr<Literal> result = CreateFromShape(shape_with_layout);
+ ShapeUtil::ForEachSubshape(
+ result->shape(),
+ [this, &result](const Shape& subshape, const ShapeIndex& index) {
+ if (ShapeUtil::IsArray(subshape)) {
+ TF_CHECK_OK(result->CopyFrom(*this,
+ /*dest_shape_index=*/index,
+ /*src_shape_index=*/index));
+ }
+ });
+ return result;
+}
+
+StatusOr<std::unique_ptr<Literal>> LiteralBase::Broadcast(
+ const Shape& result_shape,
+ tensorflow::gtl::ArraySlice<int64> dimensions) const {
+ if (!ShapeUtil::IsArray(shape())) {
+ return InvalidArgument("Broadcast only supports arrays.");
+ }
+
+ for (int64 i = 0; i < dimensions.size(); i++) {
+ TF_RET_CHECK(shape().dimensions(i) ==
+ result_shape.dimensions(dimensions[i]));
+ }
+
+ std::unique_ptr<Literal> result = MakeUnique<Literal>(result_shape);
+
+ // scratch_source_index is temporary storage space for the computed index into
+ // the input literal. We put it here to avoid allocating an std::vector in
+ // every iteration of ShapeUtil::ForEachIndex.
+ std::vector<int64> scratch_source_index(shape().dimensions_size());
+
+ char* dest_data = static_cast<char*>(result->untyped_data());
+ const char* source_data = static_cast<const char*>(untyped_data());
+ const int64 primitive_size =
+ ShapeUtil::ByteSizeOfPrimitiveType(shape().element_type());
+
+ ShapeUtil::ForEachIndex(
+ result_shape, [&](tensorflow::gtl::ArraySlice<int64> output_index) {
+ for (int64 i = 0; i < dimensions.size(); ++i) {
+ scratch_source_index[i] = output_index[dimensions[i]];
+ }
+ int64 dest_index = IndexUtil::MultidimensionalIndexToLinearIndex(
+ result_shape, output_index);
+ int64 source_index = IndexUtil::MultidimensionalIndexToLinearIndex(
+ shape(), scratch_source_index);
+ memcpy(dest_data + primitive_size * dest_index,
+ source_data + primitive_size * source_index, primitive_size);
+ return true;
+ });
+
+ return std::move(result);
+}
+
+StatusOr<std::unique_ptr<Literal>> LiteralBase::Reshape(
+ tensorflow::gtl::ArraySlice<int64> dimensions) const {
+ if (!ShapeUtil::IsArray(shape())) {
+ return InvalidArgument("Reshape does not support tuples.");
+ }
+ std::unique_ptr<Literal> output;
+ if (!LayoutUtil::IsMonotonicWithDim0Major(shape().layout())) {
+ output =
+ Relayout(LayoutUtil::GetDefaultLayoutForRank(ShapeUtil::Rank(shape())));
+ } else {
+ output = CloneToUnique();
+ }
+ // Because the layout is monotonic, we can simply reuse the same sequence of
+ // values without changing their order.
+ *output->mutable_shape_do_not_use() =
+ ShapeUtil::MakeShape(shape().element_type(), dimensions);
+
+ int64 elements_before = ShapeUtil::ElementsIn(shape());
+ int64 elements_after = ShapeUtil::ElementsIn(output->shape());
+ if (elements_before != elements_after) {
+ return InvalidArgument(
+ "Shapes before and after Literal::Reshape have different numbers "
+ "of elements: %s vs %s.",
+ ShapeUtil::HumanString(shape()).c_str(),
+ ShapeUtil::HumanString(output->shape()).c_str());
+ }
+ return std::move(output);
+}
+
+std::unique_ptr<Literal> LiteralBase::Transpose(
+ tensorflow::gtl::ArraySlice<int64> permutation) const {
+ CHECK(ShapeUtil::IsArray(shape())) << "Tuple is not supported for transpose";
+ CHECK(IsPermutation(permutation, ShapeUtil::Rank(shape())))
+ << "Given permutation is not a permutation of dimension numbers";
+ // To transpose the array, we just permute the dimensions and layout, and
+ // do a straight memory copy of the raw data set.
+ // This is considerably faster than iterating over every array element using
+ // the EachCell<>() and Set<>() APIs.
+ std::vector<int64> inverse_permutation = InversePermutation(permutation);
+ Shape permuted_shape =
+ ShapeUtil::PermuteDimensions(inverse_permutation, shape());
+ // Replace the layout with one affine to this shape, such that a
+ // transpose operation can be performed by leaving the flat values
+ // representation intact.
+ // For example, consider the shape F32[11,8]{1,0} under a {1,0} permutation.
+ // The shape with affine layout resulting from that operation will be
+ // F32[8,11]{0,1}, since it leaves the original most minor (the 8 sized), the
+ // most minor.
+ //
+ // Essentially, given MinMaj(Di) the position of the Di dimension within the
+ // minor to major vector, and given T(Di) the index that the original Di
+ // dimension has within the transposed array, a layout is affine if
+ // MinMaj(Di) == TMinMaj(T(Di)), with TMinMaj() being the minor to major
+ // vector of the affine layout.
+ CHECK(LayoutUtil::IsDenseArray(permuted_shape));
+ Layout* layout = permuted_shape.mutable_layout();
+ layout->clear_minor_to_major();
+ for (auto index : LayoutUtil::MinorToMajor(shape())) {
+ layout->add_minor_to_major(inverse_permutation[index]);
+ }
+ auto new_literal = MakeUnique<Literal>(permuted_shape);
+ DCHECK_EQ(ShapeUtil::ByteSizeOf(new_literal->shape()),
+ ShapeUtil::ByteSizeOf(shape()));
+ std::memcpy(new_literal->untyped_data(), untyped_data(), size_bytes());
+ return new_literal;
+}
+
+template <typename NativeT>
+std::unique_ptr<Literal> LiteralBase::SliceInternal(
+ const Shape& result_shape,
+ tensorflow::gtl::ArraySlice<int64> start_indices) const {
+ auto result_literal = MakeUnique<Literal>(result_shape);
+ DimensionVector new_indices(ShapeUtil::Rank(result_shape));
+ result_literal->EachCell<NativeT>(
+ [&](tensorflow::gtl::ArraySlice<int64> indices, NativeT /*value*/) {
+ for (int64 i = 0; i < ShapeUtil::Rank(result_shape); ++i) {
+ new_indices[i] = indices[i] + start_indices[i];
+ }
+ NativeT value = Get<NativeT>(new_indices);
+ result_literal->Set<NativeT>(indices, value);
+ });
+ return result_literal;
+}
+
+std::unique_ptr<Literal> LiteralBase::Slice(
+ tensorflow::gtl::ArraySlice<int64> start_indices,
+ tensorflow::gtl::ArraySlice<int64> limit_indices) const {
+ CHECK(ShapeUtil::IsArray(shape())) << "tuple is not supported for slice";
+
+ DimensionVector result_dimensions;
+ for (int64 dnum = 0; dnum < ShapeUtil::Rank(shape()); ++dnum) {
+ CHECK_GE(start_indices[dnum], 0);
+ CHECK_LE(limit_indices[dnum], shape().dimensions(dnum))
+ << "dnum = " << dnum;
+ int64 dimension = limit_indices[dnum] - start_indices[dnum];
+ CHECK_GE(dimension, 0) << "dnum = " << dnum;
+ result_dimensions.push_back(dimension);
+ }
+ const auto result_shape =
+ ShapeUtil::MakeShapeWithLayout(shape().element_type(), result_dimensions,
+ LayoutUtil::MinorToMajor(shape()));
+ switch (result_shape.element_type()) {
+ case F32:
+ return SliceInternal<float>(result_shape, start_indices);
+ case BF16:
+ return SliceInternal<bfloat16>(result_shape, start_indices);
+ case C64:
+ return SliceInternal<complex64>(result_shape, start_indices);
+ case S32:
+ return SliceInternal<int32>(result_shape, start_indices);
+ case U32:
+ return SliceInternal<uint32>(result_shape, start_indices);
+ default:
+ LOG(FATAL) << "not yet implemented: "
+ << PrimitiveType_Name(result_shape.element_type());
+ }
+}
+
+Literal LiteralBase::Clone() const {
+ Literal result(shape());
+ TF_CHECK_OK(result.CopyFrom(*this));
+ return result;
+}
+
+std::unique_ptr<Literal> LiteralBase::CloneToUnique() const {
+ auto result = MakeUnique<Literal>(shape());
+ TF_CHECK_OK(result->CopyFrom(*this));
+ return result;
+}
+
+string LiteralBase::GetAsString(tensorflow::gtl::ArraySlice<int64> multi_index,
+ const ShapeIndex& shape_index) const {
+ const Shape& subshape = ShapeUtil::GetSubshape(shape(), shape_index);
+ CHECK(LayoutUtil::IsDenseArray(subshape));
+ switch (subshape.element_type()) {
+ case PRED:
+ return Get<bool>(multi_index, shape_index) ? "true" : "false";
+ case S8:
+ return StrCat(Get<int8>(multi_index, shape_index));
+ case S16:
+ return StrCat(Get<int16>(multi_index, shape_index));
+ case S32:
+ return StrCat(Get<int32>(multi_index, shape_index));
+ case S64:
+ return StrCat(Get<int64>(multi_index, shape_index));
+ case U8:
+ return StrCat(Get<uint8>(multi_index, shape_index));
+ case U16:
+ return StrCat(Get<uint16>(multi_index, shape_index));
+ case U32:
+ return StrCat(Get<uint32>(multi_index, shape_index));
+ case U64:
+ return StrCat(Get<uint64>(multi_index, shape_index));
+ case F16:
+ return StrCat(static_cast<float>(Get<half>(multi_index, shape_index)));
+ case F32:
+ return StrCat(Get<float>(multi_index, shape_index));
+ case BF16:
+ return StrCat(
+ static_cast<float>(Get<bfloat16>(multi_index, shape_index)));
+ case F64:
+ return StrCat(Get<double>(multi_index, shape_index));
+ case C64: {
+ complex64 c = Get<complex64>(multi_index, shape_index);
+ return StrCat("(", c.real(), ", ", c.imag(), ")");
+ }
+ default:
+ LOG(FATAL) << PrimitiveType_Name(subshape.element_type());
+ }
+}
+
+string LiteralBase::GetSparseElementAsString(
+ int64 sparse_element_number, const ShapeIndex& shape_index) const {
+ const Shape& subshape = ShapeUtil::GetSubshape(shape(), shape_index);
+ CHECK(LayoutUtil::IsSparseArray(subshape));
+ switch (subshape.element_type()) {
+ case PRED:
+ return GetSparseElement<bool>(sparse_element_number, shape_index)
+ ? "true"
+ : "false";
+ case S8:
+ return StrCat(GetSparseElement<int8>(sparse_element_number, shape_index));
+ case S16:
+ return StrCat(
+ GetSparseElement<int16>(sparse_element_number, shape_index));
+ case S32:
+ return StrCat(
+ GetSparseElement<int32>(sparse_element_number, shape_index));
+ case S64:
+ return StrCat(
+ GetSparseElement<int64>(sparse_element_number, shape_index));
+ case U8:
+ return StrCat(
+ GetSparseElement<uint8>(sparse_element_number, shape_index));
+ case U16:
+ return StrCat(
+ GetSparseElement<uint16>(sparse_element_number, shape_index));
+ case U32:
+ return StrCat(
+ GetSparseElement<uint32>(sparse_element_number, shape_index));
+ case U64:
+ return StrCat(
+ GetSparseElement<uint64>(sparse_element_number, shape_index));
+ case F16:
+ return StrCat(static_cast<float>(
+ GetSparseElement<half>(sparse_element_number, shape_index)));
+ case F32:
+ return StrCat(
+ GetSparseElement<float>(sparse_element_number, shape_index));
+ case BF16:
+ return StrCat(static_cast<float>(
+ GetSparseElement<bfloat16>(sparse_element_number, shape_index)));
+ case F64:
+ return StrCat(
+ GetSparseElement<double>(sparse_element_number, shape_index));
+ case C64: {
+ complex64 c =
+ GetSparseElement<complex64>(sparse_element_number, shape_index);
+ return StrCat("(", c.real(), ", ", c.imag(), ")");
+ }
+ default:
+ LOG(FATAL) << "Invalid element type for sparse arrays: "
+ << PrimitiveType_Name(subshape.element_type());
+ }
+}
+
+StatusOr<int64> LiteralBase::GetIntegralAsS64(
+ tensorflow::gtl::ArraySlice<int64> multi_index) const {
+ CHECK(LayoutUtil::IsDenseArray(shape()));
+ switch (shape().element_type()) {
+ case PRED:
+ return Get<bool>(multi_index);
+ case U8:
+ return Get<uint8>(multi_index);
+ case S32:
+ return Get<int32>(multi_index);
+ case S64:
+ return Get<int64>(multi_index);
+ case U32:
+ return Get<uint32>(multi_index);
+ case U64:
+ return Get<uint64>(multi_index);
+ default:
+ return FailedPrecondition(
+ "Array element type is not integral: %s",
+ PrimitiveType_Name(shape().element_type()).c_str());
+ }
+}
+
+size_t LiteralBase::Hash() const {
+ using tensorflow::Hash64;
+ using tensorflow::Hash64Combine;
+
+ size_t hash_value = ShapeUtil::Hash(shape());
+
+ ShapeUtil::ForEachSubshape(
+ shape(), [&](const Shape& subshape, const ShapeIndex& index) {
+ if (!ShapeUtil::IsArray(subshape)) {
+ return;
+ }
+
+ CHECK(LayoutUtil::IsDense(subshape.layout()));
+ hash_value = Hash64Combine(
+ hash_value, Hash64(static_cast<const char*>(untyped_data(index)),
+ size_bytes(index)));
+ });
+
+ return hash_value;
+}
+
+Status Literal::SetIntegralAsS64(tensorflow::gtl::ArraySlice<int64> multi_index,
+ int64 value) {
+ CHECK(LayoutUtil::IsDenseArray(shape()));
+ switch (shape().element_type()) {
+ case PRED:
+ Set<bool>(multi_index, value);
+ break;
+ case U8:
+ Set<uint8>(multi_index, value);
+ break;
+ case S32:
+ Set<int32>(multi_index, value);
+ break;
+ case S64:
+ Set<int64>(multi_index, value);
+ break;
+ case U32:
+ Set<uint32>(multi_index, value);
+ break;
+ case U64:
+ Set<uint64>(multi_index, value);
+ break;
+ default:
+ return FailedPrecondition(
+ "Array element type is not integral: %s",
+ PrimitiveType_Name(shape().element_type()).c_str());
+ }
+ return Status::OK();
+}
+
+tensorflow::gtl::ArraySlice<int64> LiteralBase::GetSparseIndex(
+ int64 sparse_element_number, const ShapeIndex& shape_index) const {
+ const Piece& p = piece(shape_index);
+ CHECK_GE(sparse_element_number, 0);
+ CHECK_LT(sparse_element_number, p.sparse_indices()->index_count());
+ return p.sparse_indices()->At(sparse_element_number);
+}
+
+void Literal::SortSparseElements(const ShapeIndex& shape_index) {
+ piece(shape_index).SortSparseElements();
+}
+
+void LiteralBase::Piece::SortSparseElements() {
+ switch (subshape().element_type()) {
+ case PRED:
+ SortSparseElementsInternal<bool>();
+ break;
+ case S8:
+ SortSparseElementsInternal<int8>();
+ break;
+ case U8:
+ SortSparseElementsInternal<uint8>();
+ break;
+ case S16:
+ SortSparseElementsInternal<int16>();
+ break;
+ case U16:
+ SortSparseElementsInternal<uint16>();
+ break;
+ case S32:
+ SortSparseElementsInternal<int32>();
+ break;
+ case U32:
+ SortSparseElementsInternal<uint32>();
+ break;
+ case S64:
+ SortSparseElementsInternal<int64>();
+ break;
+ case U64:
+ SortSparseElementsInternal<uint64>();
+ break;
+ case F32:
+ SortSparseElementsInternal<float>();
+ break;
+ case F64:
+ SortSparseElementsInternal<double>();
+ break;
+ case C64:
+ SortSparseElementsInternal<complex64>();
+ break;
+ case F16:
+ SortSparseElementsInternal<half>();
+ break;
+ case BF16:
+ SortSparseElementsInternal<bfloat16>();
+ break;
+ default:
+ LOG(FATAL) << "Element type not valid for sparse array: "
+ << PrimitiveType_Name(subshape().element_type());
+ }
+}
+
+template <typename NativeT>
+void LiteralBase::Piece::SortSparseElementsInternal() {
+ CHECK(LayoutUtil::IsSparseArray(subshape()));
+ int64 num_elements = sparse_indices()->index_count();
+ auto values = data<NativeT>();
+ CHECK_LE(num_elements, values.size());
+ sparse_indices()->SortWithValues(
+ tensorflow::gtl::MutableArraySlice<NativeT>(values.data(), num_elements));
+}
+
+namespace {
+
+void ToStringHelper(const LiteralBase& literal, const ShapeIndex& shape_index,
+ bool print_layout, std::vector<string>* pieces) {
+ const Shape& subshape = ShapeUtil::GetSubshape(literal.shape(), shape_index);
+ CHECK(LayoutUtil::HasLayout(literal.shape()));
+ CHECK(LayoutUtil::HasLayout(subshape));
+
+ auto shape_to_string = [print_layout](const Shape& shape) {
+ if (print_layout) {
+ return ShapeUtil::HumanStringWithLayout(shape);
+ } else {
+ return ShapeUtil::HumanString(shape);
+ }
+ };
+
+ // TODO(b/32894291): refactor this code to reduce code duplication.
+ if (ShapeUtil::IsTuple(subshape)) {
+ pieces->push_back(shape_to_string(subshape));
+ pieces->push_back(" (\n");
+ std::vector<string> tuple_pieces;
+ for (int i = 0; i < ShapeUtil::TupleElementCount(subshape); ++i) {
+ ShapeIndex element_index = shape_index;
+ element_index.push_back(i);
+ std::vector<string> element_pieces;
+ ToStringHelper(literal, element_index, print_layout, &element_pieces);
+ tuple_pieces.push_back(tensorflow::str_util::Join(element_pieces, ""));
+ }
+ pieces->push_back(tensorflow::str_util::Join(tuple_pieces, ",\n"));
+ pieces->push_back("\n)");
+ return;
+ }
+
+ if (ShapeUtil::IsToken(subshape)) {
+ pieces->push_back("token");
+ return;
+ }
+
+ if (LayoutUtil::IsSparseArray(subshape)) {
+ pieces->push_back(shape_to_string(subshape));
+ pieces->push_back("{");
+ int64 rank = ShapeUtil::Rank(subshape);
+ int64 num_elements = literal.sparse_element_count();
+ for (int64 i = 0; i < num_elements; ++i) {
+ if (i > 0) {
+ pieces->push_back(", ");
+ }
+ if (rank == 1) {
+ pieces->push_back(StrCat(literal.GetSparseIndex(i)[0]));
+ pieces->push_back(": ");
+ } else {
+ pieces->push_back("[");
+ pieces->push_back(
+ tensorflow::str_util::Join(literal.GetSparseIndex(i), ", "));
+ pieces->push_back("]: ");
+ }
+ pieces->push_back(literal.GetSparseElementAsString(i));
+ }
+ pieces->push_back("}");
+ return;
+ }
+
+ CHECK(LayoutUtil::IsDenseArray(subshape));
+
+ auto element_to_string =
+ [&](tensorflow::gtl::ArraySlice<int64> indices) -> string {
+ PrimitiveType element_type = subshape.element_type();
+ if (element_type == PRED) {
+ // We display predicates in a densely packed form.
+ return literal.Get<bool>(indices, shape_index) ? "1" : "0";
+ }
+ return ((!indices.empty() && indices.back() > 0) ? ", " : "") +
+ literal.GetAsString(indices, shape_index);
+ };
+
+ if (ShapeUtil::Rank(subshape) == 0) {
+ pieces->push_back(literal.GetAsString({}, shape_index));
+ } else if (ShapeUtil::Rank(subshape) == 1) {
+ pieces->push_back("{");
+ for (int64 i0 = 0; i0 < subshape.dimensions(0); ++i0) {
+ pieces->push_back(element_to_string({i0}));
+ }
+ pieces->push_back("}");
+ } else if (ShapeUtil::Rank(subshape) == 2) {
+ pieces->push_back(shape_to_string(subshape));
+ pieces->push_back(" {\n");
+ for (int64 i0 = 0; i0 < subshape.dimensions(0); ++i0) {
+ pieces->push_back(" { ");
+ for (int64 i1 = 0; i1 < subshape.dimensions(1); ++i1) {
+ pieces->push_back(element_to_string({i0, i1}));
+ }
+ pieces->push_back(" ");
+ pieces->push_back(i0 == subshape.dimensions(0) - 1 ? "}\n" : "},\n");
+ }
+ pieces->push_back("}");
+ } else if (ShapeUtil::Rank(subshape) == 3) {
+ pieces->push_back(shape_to_string(subshape));
+ pieces->push_back(" {\n");
+ for (int64 i0 = 0; i0 < subshape.dimensions(0); ++i0) {
+ pieces->push_back(i0 > 0 ? ",\n{" : "{");
+ for (int64 i1 = 0; i1 < subshape.dimensions(1); ++i1) {
+ pieces->push_back(i1 > 0 ? ",\n { " : " { ");
+ for (int64 i2 = 0; i2 < subshape.dimensions(2); ++i2) {
+ pieces->push_back(element_to_string({i0, i1, i2}));
+ }
+ pieces->push_back(" }");
+ }
+ pieces->push_back(" }");
+ }
+ pieces->push_back("\n}");
+ } else if (ShapeUtil::Rank(subshape) == 4) {
+ pieces->push_back(shape_to_string(subshape));
+ pieces->push_back(" {\n");
+ for (int64 i0 = 0; i0 < subshape.dimensions(0); ++i0) {
+ pieces->push_back(Printf(" { /*i0=%lld*/\n", i0));
+ for (int64 i1 = 0; i1 < subshape.dimensions(1); ++i1) {
+ pieces->push_back(Printf(" { /*i1=%lld*/\n", i1));
+ for (int64 i2 = 0; i2 < subshape.dimensions(2); ++i2) {
+ pieces->push_back(" {");
+ for (int64 i3 = 0; i3 < subshape.dimensions(3); ++i3) {
+ pieces->push_back(element_to_string({i0, i1, i2, i3}));
+ }
+ pieces->push_back(i2 == subshape.dimensions(2) - 1 ? "}\n" : "},\n");
+ }
+ pieces->push_back(i1 == subshape.dimensions(1) - 1 ? " }\n"
+ : " },\n");
+ }
+ pieces->push_back(i0 == subshape.dimensions(0) - 1 ? " }\n" : " },\n");
+ }
+ pieces->push_back("}");
+ } else if (ShapeUtil::Rank(subshape) == 5) {
+ pieces->push_back(shape_to_string(subshape));
+ pieces->push_back(" {\n");
+ for (int64 i0 = 0; i0 < subshape.dimensions(0); ++i0) {
+ pieces->push_back(Printf(" { /*i0=%lld*/\n", i0));
+ for (int64 i1 = 0; i1 < subshape.dimensions(1); ++i1) {
+ pieces->push_back(Printf(" { /*i1=%lld*/\n", i1));
+ for (int64 i2 = 0; i2 < subshape.dimensions(2); ++i2) {
+ pieces->push_back(Printf(" { /*i2=%lld*/\n", i2));
+ for (int64 i3 = 0; i3 < subshape.dimensions(3); ++i3) {
+ pieces->push_back(" {");
+ for (int64 i4 = 0; i4 < subshape.dimensions(4); ++i4) {
+ pieces->push_back(element_to_string({i0, i1, i2, i3, i4}));
+ }
+ pieces->push_back(i3 == subshape.dimensions(3) - 1 ? "}\n"
+ : "},\n");
+ }
+ pieces->push_back(i2 == subshape.dimensions(2) - 1 ? " }\n"
+ : " },\n");
+ }
+ pieces->push_back(i1 == subshape.dimensions(1) - 1 ? " }\n"
+ : " },\n");
+ }
+ pieces->push_back(i0 == subshape.dimensions(0) - 1 ? " }\n" : " },\n");
+ }
+ pieces->push_back("}");
+ } else {
+ pieces->push_back(shape_to_string(subshape));
+ pieces->push_back(" {");
+ literal.EachCellAsString(
+ [&](tensorflow::gtl::ArraySlice<int64> indices, const string& value) {
+ pieces->push_back(" ");
+ pieces->push_back(value);
+ });
+ pieces->push_back("}");
+ }
+}
+
+} // namespace
+
+int64 LiteralBase::sparse_element_count() const {
+ CHECK(LayoutUtil::IsSparseArray(shape()));
+ return sparse_indices()->index_count();
+}
+
+string LiteralBase::ToString(bool print_layout) const {
+ std::vector<string> pieces;
+ CHECK(LayoutUtil::HasLayout(this->shape()));
+ ToStringHelper(*this, {}, print_layout, &pieces);
+ return tensorflow::str_util::Join(pieces, "");
+}
+
+void LiteralBase::EachCellAsString(
+ const std::function<void(tensorflow::gtl::ArraySlice<int64> indices,
+ const string& value)>& per_cell) const {
+ if (ShapeUtil::IsZeroElementArray(shape())) {
+ return;
+ }
+ std::vector<int64> indices = IndexUtil::LinearIndexToMultidimensionalIndex(
+ shape(), /*linear_index=*/0);
+ do {
+ per_cell(indices, GetAsString(indices));
+ } while (IndexUtil::BumpIndices(shape(), &indices));
+}
+
+namespace {
+template <typename NativeSrcT, typename NativeDestT, typename ConverterType>
+std::unique_ptr<Literal> ConvertBetweenNativeTypesWithConverter(
+ const LiteralBase& src_literal, const ConverterType& converter) {
+ CHECK(ShapeUtil::IsArray(src_literal.shape()));
+ auto result_literal = MakeUnique<Literal>(ShapeUtil::ChangeElementType(
+ src_literal.shape(),
+ primitive_util::NativeToPrimitiveType<NativeDestT>()));
+ auto src_data = src_literal.data<NativeSrcT>();
+ auto dest_data = result_literal->template data<NativeDestT>();
+ int64 num_elements = src_literal.element_count();
+
+ for (int64 i = 0; i < num_elements; ++i) {
+ dest_data[i] = converter(src_data[i]);
+ }
+ return result_literal;
+}
+
+template <typename NativeSrcT, typename NativeDestT>
+std::unique_ptr<Literal> ConvertBetweenNativeTypes(
+ const LiteralBase& src_literal) {
+ auto converter = [](NativeSrcT src) { return static_cast<NativeDestT>(src); };
+ return ConvertBetweenNativeTypesWithConverter<NativeSrcT, NativeDestT>(
+ src_literal, converter);
+}
+
+template <typename NativeSrcT, typename NativeDestT>
+typename std::enable_if<(sizeof(NativeSrcT) == sizeof(NativeDestT)),
+ std::unique_ptr<Literal>>::type
+BitcastBetweenNativeTypes(const LiteralBase& src_literal) {
+ auto converter = [](NativeSrcT src) {
+ return tensorflow::bit_cast<NativeDestT>(src);
+ };
+ return ConvertBetweenNativeTypesWithConverter<NativeSrcT, NativeDestT>(
+ src_literal, converter);
+}
+
+// This template specialization is here to make the compiler happy. bit_cast has
+// a static check that the types are the same size. This specialization should
+// never be used because the source and destination types are checked for
+// identical sizes higher up.
+template <typename NativeSrcT, typename NativeDestT>
+typename std::enable_if<(sizeof(NativeSrcT) != sizeof(NativeDestT)),
+ std::unique_ptr<Literal>>::type
+BitcastBetweenNativeTypes(const LiteralBase& src_literal) {
+ LOG(FATAL) << "Invalid bitcast between types of different sizes.";
+}
+
+template <PrimitiveType primitive_src_type>
+std::unique_ptr<Literal> ConvertToC64(const LiteralBase& src_literal) {
+ CHECK(ShapeUtil::IsArray(src_literal.shape()));
+ auto result_literal = MakeUnique<Literal>(
+ ShapeUtil::ChangeElementType(src_literal.shape(), C64));
+ using NativeSrcT =
+ typename primitive_util::PrimitiveTypeToNative<primitive_src_type>::type;
+ tensorflow::gtl::ArraySlice<NativeSrcT> src_data =
+ src_literal.data<NativeSrcT>();
+ tensorflow::gtl::MutableArraySlice<complex64> dest_data =
+ result_literal->data<complex64>();
+ int64 num_elements = src_literal.element_count();
+ for (int64 i = 0; i < num_elements; ++i) {
+ dest_data[i] = complex64(static_cast<float>(src_data[i]), 0);
+ }
+ return result_literal;
+}
+
+template <PrimitiveType primitive_src_type, PrimitiveType primitive_dest_type>
+std::unique_ptr<Literal> ConvertIfTypesMatch(const LiteralBase& src_literal,
+ bool bitcast) {
+ CHECK_EQ(primitive_src_type, src_literal.shape().element_type());
+ if (bitcast) {
+ return BitcastBetweenNativeTypes<
+ typename primitive_util::PrimitiveTypeToNative<
+ primitive_src_type>::type,
+ typename primitive_util::PrimitiveTypeToNative<
+ primitive_dest_type>::type>(src_literal);
+ } else {
+ return ConvertBetweenNativeTypes<
+ typename primitive_util::PrimitiveTypeToNative<
+ primitive_src_type>::type,
+ typename primitive_util::PrimitiveTypeToNative<
+ primitive_dest_type>::type>(src_literal);
+ }
+}
+
+template <PrimitiveType primitive_src_type>
+StatusOr<std::unique_ptr<Literal>> ConvertIfDestTypeMatches(
+ const LiteralBase& src_literal, PrimitiveType primitive_dest_type,
+ bool bitcast) {
+ switch (primitive_dest_type) {
+#define CONVERT_IF_TYPES_MATCH(type) \
+ case (type): \
+ return ConvertIfTypesMatch<primitive_src_type, (type)>(src_literal, \
+ bitcast);
+ CONVERT_IF_TYPES_MATCH(PRED)
+ CONVERT_IF_TYPES_MATCH(S8)
+ CONVERT_IF_TYPES_MATCH(S32)
+ CONVERT_IF_TYPES_MATCH(S64)
+ CONVERT_IF_TYPES_MATCH(U8)
+ CONVERT_IF_TYPES_MATCH(U32)
+ CONVERT_IF_TYPES_MATCH(U64)
+ CONVERT_IF_TYPES_MATCH(F16)
+ CONVERT_IF_TYPES_MATCH(F32)
+ CONVERT_IF_TYPES_MATCH(F64)
+ CONVERT_IF_TYPES_MATCH(BF16)
+#undef CONVERT_IF_TYPES_MATCH
+ case C64:
+ if (!bitcast) {
+ return ConvertToC64<primitive_src_type>(src_literal);
+ }
+ break;
+ // Other types are not yet supported.
+ default:
+ break;
+ }
+ return Unimplemented(
+ "Converting from type %s to type %s is not implemented.",
+ PrimitiveType_Name(src_literal.shape().element_type()).c_str(),
+ PrimitiveType_Name(primitive_dest_type).c_str());
+}
+
+StatusOr<std::unique_ptr<Literal>> ConvertSwitch(
+ const LiteralBase& literal, PrimitiveType primitive_dest_type,
+ bool bitcast) {
+ TF_RET_CHECK(ShapeUtil::IsArray(literal.shape()));
+ if (literal.shape().element_type() == primitive_dest_type) {
+ return literal.CloneToUnique();
+ }
+ switch (literal.shape().element_type()) {
+#define CONVERT_IF_DEST_TYPE_MATCHES(type) \
+ case (type): \
+ return ConvertIfDestTypeMatches<(type)>(literal, primitive_dest_type, \
+ bitcast);
+ CONVERT_IF_DEST_TYPE_MATCHES(PRED)
+ CONVERT_IF_DEST_TYPE_MATCHES(S8)
+ CONVERT_IF_DEST_TYPE_MATCHES(S32)
+ CONVERT_IF_DEST_TYPE_MATCHES(S64)
+ CONVERT_IF_DEST_TYPE_MATCHES(U8)
+ CONVERT_IF_DEST_TYPE_MATCHES(U32)
+ CONVERT_IF_DEST_TYPE_MATCHES(U64)
+ CONVERT_IF_DEST_TYPE_MATCHES(F16)
+ CONVERT_IF_DEST_TYPE_MATCHES(F32)
+ CONVERT_IF_DEST_TYPE_MATCHES(F64)
+ CONVERT_IF_DEST_TYPE_MATCHES(BF16)
+#undef CONVERT_IF_DEST_TYPE_MATCHES
+ // Other types are not yet supported.
+ default:
+ return Unimplemented(
+ "%s from type %s to type %s is not implemented.",
+ (bitcast ? "Bitcast converting" : "Converting"),
+ PrimitiveType_Name(literal.shape().element_type()).c_str(),
+ PrimitiveType_Name(primitive_dest_type).c_str());
+ }
+}
+
+} // namespace
+
+StatusOr<std::unique_ptr<Literal>> LiteralBase::Convert(
+ PrimitiveType primitive_dest_type) const {
+ return ConvertSwitch(*this, primitive_dest_type, /*bitcast=*/false);
+}
+
+StatusOr<std::unique_ptr<Literal>> LiteralBase::BitcastConvert(
+ PrimitiveType primitive_dest_type) const {
+ if (primitive_util::BitWidth(shape().element_type()) !=
+ primitive_util::BitWidth(primitive_dest_type)) {
+ return InvalidArgument(
+ "Cannot bitcast convert from %s to %s, bit widths are different: %d != "
+ "%d",
+ PrimitiveType_Name(shape().element_type()).c_str(),
+ PrimitiveType_Name(primitive_dest_type).c_str(),
+ primitive_util::BitWidth(shape().element_type()),
+ primitive_util::BitWidth(primitive_dest_type));
+ }
+ return ConvertSwitch(*this, primitive_dest_type, /*bitcast=*/true);
+}
+
+StatusOr<std::unique_ptr<Literal>> LiteralBase::ConvertToShape(
+ const Shape& dest_shape, bool round_f32_to_bf16) const {
+ if (!ShapeUtil::IsTuple(dest_shape)) {
+ if (round_f32_to_bf16 && shape().element_type() == F32 &&
+ dest_shape.element_type() == BF16) {
+ auto converter = [](float src) {
+ return tensorflow::bfloat16::round_to_bfloat16(src);
+ };
+ return ConvertBetweenNativeTypesWithConverter<float, bfloat16>(*this,
+ converter);
+ }
+ return Convert(dest_shape.element_type());
+ }
+ std::vector<Literal> elements;
+ for (int i = 0; i < ShapeUtil::TupleElementCount(shape()); ++i) {
+ auto element = LiteralSlice(*this, {i});
+ TF_ASSIGN_OR_RETURN(
+ auto new_element,
+ element.ConvertToShape(ShapeUtil::GetSubshape(dest_shape, {i})));
+ elements.push_back(std::move(*new_element));
+ }
+ auto converted = MakeUnique<Literal>();
+ *converted = Literal::MoveIntoTuple(&elements);
+ return std::move(converted);
+}
+
+/* static */ Literal Literal::MoveIntoTuple(
+ tensorflow::gtl::MutableArraySlice<Literal> elements) {
+ std::vector<Shape> element_shapes;
+ for (const Literal& element : elements) {
+ element_shapes.push_back(element.shape());
+ }
+ Literal literal(ShapeUtil::MakeTupleShape(element_shapes),
+ /*allocate_arrays=*/false);
+ for (int i = 0; i < elements.size(); ++i) {
+ TF_CHECK_OK(
+ literal.MoveFrom(std::move(elements[i]), /*dest_shape_index=*/{i}));
+ }
+ return literal;
+}
+
+template <typename NativeT>
+bool LiteralBase::Piece::EqualElementsInternal(
+ const LiteralBase::Piece& other, std::vector<int64>* multi_index) const {
+ if (multi_index->size() == ShapeUtil::Rank(subshape())) {
+ return (Get<NativeT>(*multi_index) == other.Get<NativeT>(*multi_index));
+ }
+ for (int64 i = 0; i < subshape().dimensions(multi_index->size()); ++i) {
+ multi_index->push_back(i);
+ if (!EqualElementsInternal<NativeT>(other, multi_index)) {
+ return false;
+ }
+ multi_index->pop_back();
+ }
+ return true;
+}
+
+bool LiteralBase::Piece::EqualElements(const LiteralBase::Piece& other) const {
+ DCHECK(ShapeUtil::Compatible(subshape(), other.subshape()));
+
+ std::vector<int64> multi_index;
+ switch (subshape().element_type()) {
+ case PRED:
+ return EqualElementsInternal<bool>(other, &multi_index);
+ case U8:
+ return EqualElementsInternal<uint8>(other, &multi_index);
+ case S32:
+ return EqualElementsInternal<int32>(other, &multi_index);
+ case S64:
+ return EqualElementsInternal<int64>(other, &multi_index);
+ case U32:
+ return EqualElementsInternal<uint32>(other, &multi_index);
+ case U64:
+ return EqualElementsInternal<uint64>(other, &multi_index);
+ case F32:
+ return EqualElementsInternal<float>(other, &multi_index);
+ case F64:
+ return EqualElementsInternal<double>(other, &multi_index);
+ case F16:
+ return EqualElementsInternal<half>(other, &multi_index);
+ case BF16:
+ return EqualElementsInternal<bfloat16>(other, &multi_index);
+ case C64:
+ return EqualElementsInternal<complex64>(other, &multi_index);
+ default:
+ LOG(FATAL) << "Unimplemented: LiteralBase::Piece::EqualElements for type "
+ << PrimitiveType_Name(subshape().element_type());
+ }
+}
+
+bool LiteralBase::operator==(const LiteralBase& other) const {
+ if (!ShapeUtil::Compatible(shape(), other.shape())) {
+ return false;
+ }
+
+ return root_piece().ForEachSubpieceWithBool(
+ [&](const ShapeIndex& index, const Piece& piece) {
+ if (!ShapeUtil::IsArray(piece.subshape())) {
+ return true;
+ }
+
+ const Piece& other_piece = other.piece(index);
+ if (!piece.EqualElements(other_piece)) {
+ return false;
+ }
+ return true;
+ });
+}
+
+namespace {
+
+template <typename NativeT>
+static bool AllElementsEqualValue(tensorflow::gtl::ArraySlice<NativeT> data,
+ NativeT value) {
+ for (int64 i = 0; i < data.size(); ++i) {
+ if (data[i] != value) {
+ return false;
+ }
+ }
+ return true;
+}
+
+} // namespace
+
+bool LiteralBase::IsAll(int8 value) const {
+ return root_piece().ForEachSubpieceWithBool([&](const ShapeIndex& index,
+ const Piece& piece) {
+ if (!ShapeUtil::IsArray(piece.subshape())) {
+ return true;
+ }
+
+ auto piece_is_all = [&]() {
+ switch (shape().element_type()) {
+ case U8:
+ if (value >= 0) {
+ return AllElementsEqualValue<uint8>(piece.data<uint8>(), value);
+ }
+ return false;
+ case U32:
+ if (value >= 0) {
+ return AllElementsEqualValue<uint32>(piece.data<uint32>(), value);
+ }
+ return false;
+ case U64:
+ if (value >= 0) {
+ return AllElementsEqualValue<uint64>(piece.data<uint64>(), value);
+ }
+ return false;
+ case S8:
+ return AllElementsEqualValue<int8>(piece.data<int8>(), value);
+ case S32:
+ return AllElementsEqualValue<int32>(piece.data<int32>(), value);
+ case S64:
+ return AllElementsEqualValue<int64>(piece.data<int64>(), value);
+ case F32:
+ return AllElementsEqualValue<float>(piece.data<float>(), value);
+ case F64:
+ return AllElementsEqualValue<double>(piece.data<double>(), value);
+ case F16:
+ return AllElementsEqualValue<half>(piece.data<half>(),
+ static_cast<half>(value));
+ case BF16:
+ return AllElementsEqualValue<bfloat16>(piece.data<bfloat16>(),
+ static_cast<bfloat16>(value));
+ case PRED:
+ if (value == 0) {
+ return AllElementsEqualValue<bool>(piece.data<bool>(), false);
+ }
+ if (value == 1) {
+ return AllElementsEqualValue<bool>(piece.data<bool>(), true);
+ }
+ return false;
+ default:
+ return false;
+ }
+ return false;
+ };
+
+ if (!piece_is_all()) {
+ return false;
+ }
+ return true;
+ });
+}
+
+bool LiteralBase::IsAllFloat(float value) const {
+ return root_piece().ForEachSubpieceWithBool(
+ [&](const ShapeIndex& index, const Piece& piece) {
+ if (!ShapeUtil::IsArray(piece.subshape())) {
+ return true;
+ }
+
+ auto piece_is_all = [&]() {
+ switch (shape().element_type()) {
+ case F32:
+ return AllElementsEqualValue<float>(piece.data<float>(), value);
+ case F64:
+ return AllElementsEqualValue<double>(piece.data<double>(), value);
+ case F16:
+ return AllElementsEqualValue<half>(piece.data<half>(),
+ static_cast<half>(value));
+ case BF16:
+ return AllElementsEqualValue<bfloat16>(
+ piece.data<bfloat16>(), static_cast<bfloat16>(value));
+ default:
+ return false;
+ }
+ };
+ if (!piece_is_all()) {
+ return false;
+ }
+ return true;
+ });
+}
+
+bool LiteralBase::IsAllComplex(complex64 value) const {
+ switch (shape().element_type()) {
+ case C64:
+ return AllElementsEqualValue<complex64>(root_piece().data<complex64>(),
+ value);
+ default:
+ return false;
+ }
+}
+
+bool LiteralBase::IsAllFirst() const {
+ return root_piece().ForEachSubpieceWithBool(
+ [&](const ShapeIndex& index, const Piece& piece) {
+ if (!ShapeUtil::IsArray(piece.subshape())) {
+ return true;
+ }
+
+ // Empty shapes are not all the first element since there is no first
+ // element.
+ if (ShapeUtil::IsZeroElementArray(piece.subshape())) {
+ return false;
+ }
+ auto piece_is_all = [&]() {
+ switch (piece.subshape().element_type()) {
+ case PRED: {
+ auto data = piece.data<bool>();
+ return AllElementsEqualValue<bool>(data, data[0]);
+ }
+ // 8 bit types
+ case S8: {
+ auto data = piece.data<int8>();
+ return AllElementsEqualValue<int8>(data, data[0]);
+ }
+ case U8: {
+ auto data = piece.data<uint8>();
+ return AllElementsEqualValue<uint8>(data, data[0]);
+ }
+ // 16 bit types
+ case BF16: {
+ auto data = piece.data<bfloat16>();
+ return AllElementsEqualValue<bfloat16>(data, data[0]);
+ }
+ case F16: {
+ auto data = piece.data<half>();
+ return AllElementsEqualValue<half>(data, data[0]);
+ }
+ case S16: {
+ auto data = piece.data<int16>();
+ return AllElementsEqualValue<int16>(data, data[0]);
+ }
+ case U16: {
+ auto data = piece.data<uint16>();
+ return AllElementsEqualValue<uint16>(data, data[0]);
+ }
+ // 32 bit types
+ case F32: {
+ auto data = piece.data<float>();
+ return AllElementsEqualValue<float>(data, data[0]);
+ }
+ case U32: {
+ auto data = piece.data<uint32>();
+ return AllElementsEqualValue<uint32>(data, data[0]);
+ }
+ case S32: {
+ auto data = piece.data<int32>();
+ return AllElementsEqualValue<int32>(data, data[0]);
+ }
+ // 64 bit types
+ case C64: {
+ auto data = piece.data<complex64>();
+ return AllElementsEqualValue<complex64>(data, data[0]);
+ }
+ case F64: {
+ auto data = piece.data<double>();
+ return AllElementsEqualValue<double>(data, data[0]);
+ }
+ case S64: {
+ auto data = piece.data<int64>();
+ return AllElementsEqualValue<int64>(data, data[0]);
+ }
+ case U64: {
+ auto data = piece.data<uint64>();
+ return AllElementsEqualValue<uint64>(data, data[0]);
+ }
+ default:
+ return false;
+ }
+ };
+
+ if (!piece_is_all()) {
+ return false;
+ }
+ return true;
+ });
+}
+
+bool LiteralBase::IsZero(tensorflow::gtl::ArraySlice<int64> indices) const {
+ CHECK(ShapeUtil::IsArray(shape()));
+ switch (shape().element_type()) {
+ case U8:
+ return Get<uint8>(indices) == 0;
+ case U32:
+ return Get<uint32>(indices) == 0;
+ case U64:
+ return Get<uint64>(indices) == 0;
+ case S8:
+ return Get<int8>(indices) == 0;
+ case S32:
+ return Get<int32>(indices) == 0;
+ case S64:
+ return Get<int64>(indices) == 0;
+ case F32:
+ return Get<float>(indices) == 0.0f;
+ case F64:
+ return Get<double>(indices) == 0.0;
+ case C64:
+ return Get<complex64>(indices) == complex64(0.0f, 0.0f);
+ case F16:
+ return Get<half>(indices) == static_cast<half>(0.0f);
+ case BF16:
+ return Get<bfloat16>(indices) == static_cast<bfloat16>(0.0f);
+ case PRED:
+ return Get<bool>(indices) == false;
+ default:
+ LOG(FATAL) << "Input literal must be an array.";
+ }
+}
+
+namespace {
+
+template <typename RepeatedFieldT, typename NativeT>
+void CopyToRepeatedField(RepeatedFieldT* dest,
+ const tensorflow::gtl::ArraySlice<NativeT> src) {
+ *dest = RepeatedFieldT(src.begin(), src.end());
+}
+
+} // namespace
+
+void LiteralBase::Piece::WriteToProto(LiteralProto* proto) const {
+ *proto->mutable_shape() = subshape();
+ switch (subshape().element_type()) {
+ case PRED:
+ CopyToRepeatedField(proto->mutable_preds(), data<bool>());
+ break;
+ case U8:
+ proto->set_u8s(static_cast<const unsigned char*>(data<uint8>().data()),
+ element_count());
+ break;
+ case U32:
+ CopyToRepeatedField(proto->mutable_u32s(), data<uint32>());
+ break;
+ case U64:
+ CopyToRepeatedField(proto->mutable_u64s(), data<uint64>());
+ break;
+ case S32:
+ CopyToRepeatedField(proto->mutable_s32s(), data<int32>());
+ break;
+ case S64:
+ CopyToRepeatedField(proto->mutable_s64s(), data<int64>());
+ break;
+ case F16:
+ *proto->mutable_f16s() = string(
+ reinterpret_cast<const char*>(data<half>().data()), size_bytes());
+ if (!kLittleEndian) {
+ ConvertEndianShort(proto->mutable_f16s());
+ }
+ break;
+ case BF16:
+ *proto->mutable_bf16s() = string(
+ reinterpret_cast<const char*>(data<bfloat16>().data()), size_bytes());
+ if (!kLittleEndian) {
+ ConvertEndianShort(proto->mutable_bf16s());
+ }
+ break;
+ case F32:
+ CopyToRepeatedField(proto->mutable_f32s(), data<float>());
+ break;
+ case F64:
+ CopyToRepeatedField(proto->mutable_f64s(), data<double>());
+ break;
+ case C64:
+ for (complex64 value : data<complex64>()) {
+ proto->add_c64s(value.real());
+ proto->add_c64s(value.imag());
+ }
+ break;
+ case TUPLE:
+ case TOKEN:
+ // Nothing to do but assign the shape which is done above.
+ return;
+ default:
+ LOG(FATAL) << "Unhandled primitive type " << subshape().element_type();
+ }
+}
+
+const void* LiteralBase::Piece::untyped_data() const {
+ CHECK(ShapeUtil::IsArray(subshape())) << ShapeUtil::HumanString(subshape());
+ return buffer();
+}
+
+void* LiteralBase::Piece::untyped_data() {
+ CHECK(ShapeUtil::IsArray(subshape())) << ShapeUtil::HumanString(subshape());
+ return buffer();
+}
+
+namespace {
+
+template <typename RepeatedFieldT, typename NativeT>
+Status CopyFromRepeatedField(tensorflow::gtl::MutableArraySlice<NativeT> dest,
+ const RepeatedFieldT& src) {
+ if (dest.size() != src.size()) {
+ return InvalidArgument(
+ "Expected %lu elements in LiteralProto repeated field, has %d",
+ dest.size(), src.size());
+ }
+ std::copy(src.begin(), src.end(), dest.begin());
+ return Status::OK();
+}
+
+} // namespace
+
+Status LiteralBase::Piece::CopyFromProto(const LiteralProto& proto) {
+ // These conditions should have been checked in Literal::CreateFromProto.
+ TF_RET_CHECK(proto.has_shape());
+ TF_RET_CHECK(LayoutUtil::HasLayout(proto.shape()));
+ TF_RET_CHECK(ShapeUtil::Equal(proto.shape(), subshape()));
+
+ switch (subshape().element_type()) {
+ case PRED:
+ TF_RETURN_IF_ERROR(CopyFromRepeatedField(data<bool>(), proto.preds()));
+ break;
+ case U8: {
+ auto u8_data = data<uint8>();
+ TF_RET_CHECK(proto.u8s().size() == u8_data.size());
+ std::copy(proto.u8s().begin(), proto.u8s().end(), u8_data.begin());
+ } break;
+ case S32:
+ TF_RETURN_IF_ERROR(CopyFromRepeatedField(data<int32>(), proto.s32s()));
+ break;
+ case S64:
+ TF_RETURN_IF_ERROR(CopyFromRepeatedField(data<int64>(), proto.s64s()));
+ break;
+ case U32:
+ TF_RETURN_IF_ERROR(CopyFromRepeatedField(data<uint32>(), proto.u32s()));
+ break;
+ case U64:
+ TF_RETURN_IF_ERROR(CopyFromRepeatedField(data<uint64>(), proto.u64s()));
+ break;
+ case F16: {
+ const string& s(proto.f16s());
+ TF_RET_CHECK(data<half>().size() * sizeof(half) == s.size());
+ memcpy(untyped_data(), s.data(), s.size());
+ if (!kLittleEndian) {
+ ConvertEndianShort(reinterpret_cast<char*>(untyped_data()), s.size());
+ }
+ } break;
+
+ case BF16: {
+ const string& s(proto.bf16s());
+ TF_RET_CHECK(data<bfloat16>().size() * sizeof(bfloat16) == s.size());
+ memcpy(untyped_data(), s.data(), s.size());
+ if (!kLittleEndian) {
+ ConvertEndianShort(reinterpret_cast<char*>(untyped_data()), s.size());
+ }
+ } break;
+ case F32:
+ TF_RETURN_IF_ERROR(CopyFromRepeatedField(data<float>(), proto.f32s()));
+ break;
+ case F64:
+ TF_RETURN_IF_ERROR(CopyFromRepeatedField(data<double>(), proto.f64s()));
+ break;
+ case C64: {
+ auto complex_data = data<complex64>();
+ TF_RET_CHECK(proto.c64s_size() == complex_data.size() * 2);
+ for (int64 i = 0; i < complex_data.size(); ++i) {
+ complex_data[i] = complex64{proto.c64s(i * 2), proto.c64s(i * 2 + 1)};
+ }
+ } break;
+ case TUPLE:
+ LOG(FATAL) << "Should not be called on tuple shapes: "
+ << ShapeUtil::HumanString(subshape());
+ break;
+ default:
+ LOG(FATAL) << "Unhandled primitive type " << subshape().element_type();
+ }
+ return Status::OK();
+}
+
+LiteralProto LiteralBase::ToProto() const {
+ LiteralProto proto;
+ root_piece().ForEachSubpiece(
+ [&](const ShapeIndex& index, const Piece& piece) {
+ LiteralProto* proto_piece = &proto;
+ for (int64 i : index) {
+ while (proto_piece->tuple_literals_size() <= i) {
+ proto_piece->add_tuple_literals();
+ }
+ proto_piece = proto_piece->mutable_tuple_literals(i);
+ }
+ piece.WriteToProto(proto_piece);
+ });
+
+ if (LayoutUtil::IsSparseArray(shape())) {
+ CopyToRepeatedField(proto.mutable_sparse_indices(),
+ sparse_indices()->data());
+ }
+
+ return proto;
+}
+
+const void* LiteralBase::untyped_data(const ShapeIndex& shape_index) const {
+ return piece(shape_index).untyped_data();
+}
+
+void* Literal::untyped_data(const ShapeIndex& shape_index) {
+ return piece(shape_index).untyped_data();
+}
+
+int64 LiteralBase::size_bytes(const ShapeIndex& shape_index) const {
+ return piece(shape_index).size_bytes();
+}
+
+string LiteralBase::GetR1U8AsString() const {
+ CHECK(ShapeUtil::IsArray(shape()));
+ CHECK_EQ(ShapeUtil::Rank(shape()), 1);
+ CHECK_EQ(shape().element_type(), U8);
+ return string(tensorflow::bit_cast<const char*>(data<uint8>().data()),
+ ShapeUtil::ElementsIn(shape()));
+}
+
+void BorrowingLiteral::BuildPieceSubtree(const Shape& shape, Piece* piece) {
+ CHECK(ShapeUtil::IsTuple(shape));
+ for (int i = 0; i < ShapeUtil::TupleElementCount(shape); ++i) {
+ const Shape& subshape = shape.tuple_shapes(i);
+
+ auto child_piece = Piece();
+ child_piece.set_subshape(&subshape);
+
+ if (ShapeUtil::IsTuple(subshape)) {
+ BuildPieceSubtree(subshape, &child_piece);
+ }
+
+ piece->emplace_back(std::move(child_piece));
+ }
+}
+
+LiteralSlice::LiteralSlice(const LiteralBase& literal)
+ : LiteralBase(), root_piece_(&literal.root_piece()) {}
+
+LiteralSlice::LiteralSlice(const LiteralBase& literal,
+ const ShapeIndex& view_root)
+ : LiteralBase(), root_piece_(&literal.piece(view_root)) {}
+
+BorrowingLiteral::BorrowingLiteral(const char* src_buf_ptr, const Shape& shape)
+ : LiteralBase(), shape_(MakeUnique<Shape>(shape)) {
+ CHECK(ShapeUtil::IsArray(*shape_));
+ CHECK(LayoutUtil::HasLayout(*shape_));
+
+ root_piece_ = Piece();
+ root_piece_.set_buffer(const_cast<char*>(src_buf_ptr));
+ root_piece_.set_subshape(shape_.get());
+}
+
+BorrowingLiteral::BorrowingLiteral(
+ tensorflow::gtl::ArraySlice<const char*> src_buf_ptrs, const Shape& shape)
+ : LiteralBase(), shape_(MakeUnique<Shape>(shape)) {
+ CHECK(ShapeUtil::IsTuple(*shape_));
+ CHECK(!ShapeUtil::IsNestedTuple(*shape_));
+ CHECK_EQ(src_buf_ptrs.size(), ShapeUtil::TupleElementCount(*shape_));
+ root_piece_ = Piece();
+ root_piece_.set_subshape(shape_.get());
+ BuildPieceSubtree(*shape_, &root_piece_);
+
+ for (int i = 0; i < src_buf_ptrs.size(); ++i) {
+ const auto& src_shape = shape_->tuple_shapes(i);
+ CHECK(ShapeUtil::IsArray(src_shape));
+ root_piece_.child(i).set_buffer(const_cast<char*>(src_buf_ptrs[i]));
+ }
+}
+
+} // namespace xla
diff --git a/tensorflow/compiler/xla/literal.h b/tensorflow/compiler/xla/literal.h
new file mode 100644
index 0000000000..dd67dfa8d4
--- /dev/null
+++ b/tensorflow/compiler/xla/literal.h
@@ -0,0 +1,1152 @@
+/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#ifndef TENSORFLOW_COMPILER_XLA_LITERAL_H_
+#define TENSORFLOW_COMPILER_XLA_LITERAL_H_
+
+#include <functional>
+#include <initializer_list>
+#include <iterator>
+#include <memory>
+#include <ostream>
+#include <string>
+#include <type_traits>
+#include <vector>
+
+#include "tensorflow/compiler/xla/array2d.h"
+#include "tensorflow/compiler/xla/array3d.h"
+#include "tensorflow/compiler/xla/array4d.h"
+#include "tensorflow/compiler/xla/index_util.h"
+#include "tensorflow/compiler/xla/layout_util.h"
+#include "tensorflow/compiler/xla/primitive_util.h"
+#include "tensorflow/compiler/xla/ptr_util.h"
+#include "tensorflow/compiler/xla/shape_util.h"
+#include "tensorflow/compiler/xla/sparse_index_array.h"
+#include "tensorflow/compiler/xla/status_macros.h"
+#include "tensorflow/compiler/xla/types.h"
+#include "tensorflow/compiler/xla/util.h"
+#include "tensorflow/compiler/xla/xla_data.pb.h"
+#include "tensorflow/core/lib/core/bitmap.h"
+#include "tensorflow/core/lib/core/status.h"
+#include "tensorflow/core/lib/core/stringpiece.h"
+#include "tensorflow/core/lib/gtl/array_slice.h"
+#include "tensorflow/core/platform/logging.h"
+#include "tensorflow/core/platform/macros.h"
+#include "tensorflow/core/platform/protobuf.h"
+#include "tensorflow/core/platform/types.h"
+
+namespace xla {
+
+// Forward declare Literal and LiteralSlice class to be used by the creation
+// methods in the base class.
+class Literal;
+class LiteralSlice;
+
+// Abstract base class for literals.
+class LiteralBase {
+ public:
+ virtual ~LiteralBase() = 0;
+
+ // Literals are equal if they have compatible shapes and the same data
+ // values. Layout is not compared.
+ bool operator==(const LiteralBase& other) const;
+ bool operator!=(const LiteralBase& other) const { return !(*this == other); }
+
+ // Returns the shape of the literal.
+ const Shape& shape() const { return root_piece().subshape(); }
+
+ // Serialize to proto.
+ LiteralProto ToProto() const;
+
+ // Returns an ArraySlice of the array for this literal for the given NativeT
+ // (e.g., float). CHECKs if the subshape of the literal at the given
+ // ShapeIndex is not array. See primitive_util.h for the mapping from XLA type
+ // to native type.
+ template <typename NativeT>
+ tensorflow::gtl::ArraySlice<NativeT> data(
+ const ShapeIndex& shape_index = {}) const;
+
+ // Returns a const pointer to the sparse index array. Returns nullptr if the
+ // literal is not a sparse array.
+ const SparseIndexArray* sparse_indices(
+ const ShapeIndex& shape_index = {}) const;
+
+ // Returns a const pointer to (or size of) the underlying buffer holding the
+ // array at the given shape index. CHECKs if the subshape of the literal at
+ // the given ShapeIndex is not array.
+ const void* untyped_data(const ShapeIndex& shape_index = {}) const;
+ int64 size_bytes(const ShapeIndex& shape_index = {}) const;
+
+ // Returns this literal's data as a string. This literal must be a rank-1 U8
+ // array.
+ string GetR1U8AsString() const;
+
+ // Returns a string representation of the literal value.
+ // Warning: this function can take minutes for multi-million element Literals.
+ string ToString(bool print_layout = false) const;
+
+ // Gets an element in the literal at the given index. The multi_index is
+ // CHECKed against the dimension sizes.
+ template <typename NativeT>
+ NativeT Get(tensorflow::gtl::ArraySlice<int64> multi_index,
+ const ShapeIndex& shape_index) const;
+ // Overloads of Get for array literals. CHECKs if the literal is not
+ // array-shaped and dense.
+ template <typename NativeT>
+ NativeT Get(tensorflow::gtl::ArraySlice<int64> multi_index) const;
+
+ // Returns the element value at index (0, ..., 0), however many zeroes are
+ // required for that index.
+ template <typename NativeT>
+ NativeT GetFirstElement() const;
+
+ // As Get(), but determines the correct type and converts the value
+ // into text.
+ string GetAsString(tensorflow::gtl::ArraySlice<int64> multi_index,
+ const ShapeIndex& shape_index = {}) const;
+ // As GetSparseElement(), but determines the correct type and converts the
+ // value into text.
+ string GetSparseElementAsString(int64 sparse_element_number,
+ const ShapeIndex& shape_index = {}) const;
+ // As Get(), but determines the correct type and converts the value into
+ // int64. This literal must be an array.
+ StatusOr<int64> GetIntegralAsS64(
+ tensorflow::gtl::ArraySlice<int64> multi_index) const;
+
+ // Returns the multi-index of the element in a sparse literal at the given
+ // sparse element number. The sparse element number is the position with in
+ // the sparse array's list of (index, value) pairs, and is checked against the
+ // total number of (index, value) pairs in the sparse array.
+ tensorflow::gtl::ArraySlice<int64> GetSparseIndex(
+ int64 sparse_element_number, const ShapeIndex& shape_index = {}) const;
+
+ // Returns the value of the element in a sparse literal at the given sparse
+ // element number. The sparse element number is the position with in the
+ // sparse array's list of (index, value) pairs, and is checked against the
+ // total number of (index, value) pairs in the sparse array.
+ template <typename NativeT>
+ NativeT GetSparseElement(int64 sparse_element_number,
+ const ShapeIndex& shape_index = {}) const;
+
+ // Invokes the "per cell" callback for each element in the provided
+ // literal with the element's indices and a string representation of
+ // the element's value.
+ //
+ // This function is useful if you want a polymorphic representation
+ // of the tensor's elements (turning it to a string for something
+ // like representation in a protobuf).
+ //
+ // This literal must have a dense layout.
+ void EachCellAsString(
+ const std::function<void(tensorflow::gtl::ArraySlice<int64> indices,
+ const string& value)>& per_cell) const;
+ template <typename NativeT>
+ void EachCell(std::function<void(tensorflow::gtl::ArraySlice<int64> indices,
+ NativeT value)>
+ per_cell) const;
+
+ // Returns whether every element in this literal is equal to value.
+ //
+ // value is an int8 because we expect this to be called with small
+ // compile-time constants (0, -1, etc.) and so that whatever value you pass
+ // can be represented exactly by floating-point types as small as 16 bits.
+ //
+ // If value doesn't fit in this literal's type, returns false. Values of 1/0
+ // are considered equal to true/false; other values are not considered equal
+ // to true. Also if this literal is not array-shaped false is returned.
+ bool IsAll(int8 value) const;
+
+ // Like IsAll(const Literal&, int8), except we check whether the literal is
+ // equal to a particular floating-point number.
+ //
+ // If the literal is not a floating-point value, this always returns false.
+ //
+ // This casts value to the type of literal, then compares using ==. The usual
+ // admonishments about floating-point equality checks apply. We expect you to
+ // use this to check for values that can be expressed precisely as a float,
+ // e.g. -0.5. Also if this literal is not array-shaped false is returned.
+ bool IsAllFloat(float value) const;
+
+ // Like IsAll(const Literal&, int8), except we check whether the literal is
+ // equal to a particular complex number.
+ //
+ // If the literal is not a complex value, this always returns false.
+ //
+ // This casts value to the type of literal, then compares using ==. The usual
+ // admonishments about floating-point equality checks apply. We expect you to
+ // use this to check for complex values that can be expressed precisely as
+ // float pairs e.g. (-0.5, 1.0).
+ //
+ // This literal must have a dense layout.
+ bool IsAllComplex(complex64 value) const;
+
+ // Literal consists entirely of the first element of the literal.
+ bool IsAllFirst() const;
+
+ // Returns whether this literal is zero at the specified index. This literal
+ // must be an array with a dense layout.
+ bool IsZero(tensorflow::gtl::ArraySlice<int64> indices) const;
+
+ // Returns the count of the elements in the array at the given shape index in
+ // this literal.
+ int64 element_count(const ShapeIndex& index = {}) const {
+ return ShapeUtil::ElementsIn(ShapeUtil::GetSubshape(shape(), index));
+ }
+
+ // Returns the count of the elements in the sparse array at the given shape
+ // index in this literal, which will be no larger than
+ // LayoutUtil::MaxSparseElements(SetSubshape(shape(), index).layout()).
+ int64 sparse_element_count() const;
+
+ // Compute a hash for this literal. This literal must not be a sparse tensor
+ // or a tuple containing a sparse tensor.
+ size_t Hash() const;
+
+ // Converts this literal to the given shape. Returns an error is the
+ // conversion is not possible.
+ //
+ // round_f32_to_bf16: if true, converting F32 elements to BF16 uses rounding
+ // instead of truncation; otherwise, truncation is used.
+ //
+ // TODO(b/69266521): remove the round_to_bfloat16 flag when rounding becomes
+ // the default behavior.
+ StatusOr<std::unique_ptr<Literal>> ConvertToShape(
+ const Shape& dest_shape, bool round_f32_to_bf16 = false) const;
+
+ // Converts this literal to another primitive type using a bitcast
+ // conversion. The to and from primitive types must have the same bit
+ // width. Returns an error if the conversion is not possible. This literal
+ // must be array-shaped.
+ StatusOr<std::unique_ptr<Literal>> BitcastConvert(
+ PrimitiveType primitive_dest_type) const;
+
+ // Converts this literal to another primitive type. Returns an error if the
+ // conversion is not possible. This literal must be array-shaped.
+ StatusOr<std::unique_ptr<Literal>> Convert(
+ PrimitiveType primitive_dest_type) const;
+
+ // Clones the underlying buffers into a new Literal, or new
+ // std::unique_ptr<Literal>.
+ Literal Clone() const;
+ std::unique_ptr<Literal> CloneToUnique() const;
+
+ // TODO(b/67651157): The methods below which perform computation on Literals
+ // (Reshape, Slice, etc) should be moved elsewhere, and perhaps combined with
+ // evaluator code which operates on Literals.
+ //
+ // Creates a new value that has the equivalent value as this
+ // literal, but conforms to new_layout; e.g. a literal matrix that was in {0,
+ // 1} minor-to-major dimension layout can be re-layed-out as {1, 0}
+ // minor-to-major dimension layout and the value in the cell at any given
+ // logical index (i0, i1) will be the same.
+ //
+ // For tuple shaped literals, shape_index should be used to select the inner
+ // array that the new layout applies to.
+ //
+ // Note: this is useful when the client wants to ensure that a value placed in
+ // the XLA allocation tracker has a particular layout; for efficiency
+ // purposes or avoiding unimplemented operation/layout combinations.
+ std::unique_ptr<Literal> Relayout(const Layout& new_layout,
+ const ShapeIndex& shape_index = {}) const;
+
+ // An overload of Relayout which changes the layout of the entire shape rather
+ // than being limited to a single array within the shape.
+ std::unique_ptr<Literal> Relayout(const Shape& shape_with_layout) const;
+
+ // Creates a new literal by reshaping this literal to have the given
+ // dimensions. The total number of elements must not change; The
+ // implementation currently only supports monotonic dim0-major layouts.
+ // This literal must be an array.
+ StatusOr<std::unique_ptr<Literal>> Reshape(
+ tensorflow::gtl::ArraySlice<int64> dimensions) const;
+
+ // Creates a new literal by broadcasting this literal with `dimensions` to
+ // yield a literal of shape `result_shape`.
+ StatusOr<std::unique_ptr<Literal>> Broadcast(
+ const Shape& result_shape,
+ tensorflow::gtl::ArraySlice<int64> dimensions) const;
+
+ // Creates a new literal by reordering the dimensions of this literal.
+ // The given `permutation` must be a permutation of the dimension numbers
+ // in the original literal, and it specifies the order of the new dimensions
+ // in the result literal (i.e., new_order[i] = old_order[permutation[i]]).
+ // For example, a transpose call on a literal of shape [3 x 8 x 4] and
+ // `permutation` = {2, 0, 1} returns a new literal of shape [4 x 3 x 8].
+ // This literal must be an array.
+ std::unique_ptr<Literal> Transpose(
+ tensorflow::gtl::ArraySlice<int64> permutation) const;
+
+ // Creates a sub-array from this literal by extracting the indices
+ // [start_index, limit_index) of each dimension. The result literal has the
+ // same rank and layout as for the given literal. The number of indices in
+ // start_indices and limit_indices must be the rank of the literal, and the
+ // indices follow the order of the dimensions.
+ // This literal must be an array.
+ std::unique_ptr<Literal> Slice(
+ tensorflow::gtl::ArraySlice<int64> start_indices,
+ tensorflow::gtl::ArraySlice<int64> limit_indices) const;
+
+ // Creates a literal with a prepended dimension with bound "times"; e.g. a
+ // f32[3x2] with times=4 will produce a f32[4x3x2] with the 3x2 from this
+ // literal replicated four times.
+ // This literal must be an array.
+ template <typename NativeT>
+ std::unique_ptr<Literal> Replicate(int64 times) const;
+
+ // Creates a new Literal object with the shape specified as parameter.
+ // The content of the literal values is the default value of the primitive
+ // type of literal itself (0 for numeric types, and false for predicates).
+ //
+ // Note: It's an antipattern to use this method then immediately call
+ // Literal::Populate on the result (since that results in zero initialization,
+ // then reinitialization. Conside if a call to MakeUnique<Literal>(shape),
+ // followed by the call to Literal::Populate can be used instead.
+ static std::unique_ptr<Literal> CreateFromShape(const Shape& shape);
+
+ protected:
+ // A data structure representing a subshape at a particular ShapeIndex within
+ // the literal. For array-shaped ShapeIndexes, this data structure holds the
+ // pointer to the memory allocated for the array data.
+ class Piece {
+ public:
+ // Returns the buffer holding the array data for this piece as an array
+ // slice. This piece must be array-shaped.
+ template <typename NativeT>
+ tensorflow::gtl::ArraySlice<NativeT> data() const;
+ template <typename NativeT>
+ tensorflow::gtl::MutableArraySlice<NativeT> data();
+
+ // Returns the buffer holding the array data for this piece as a void*. This
+ // piece must be array-shaped.
+ void* untyped_data();
+ const void* untyped_data() const;
+
+ // Gets or sets an element in the array at the given index. The multi_index
+ // is CHECKed against the dimension sizes of the array. This piece must be
+ // array-shaped.
+ template <typename NativeT>
+ NativeT Get(tensorflow::gtl::ArraySlice<int64> index) const;
+ template <typename NativeT>
+ void Set(tensorflow::gtl::ArraySlice<int64> index, NativeT value);
+
+ // Gets/sets the buffer holding the array data.
+ char* buffer() const { return buffer_; }
+ void set_buffer(char* buffer) { buffer_ = buffer; }
+
+ // The array of multi-indices that provide the locations of non-zero
+ // elements in a sparse array. Only used if
+ // LayoutUtil::IsSparseArray(shape()) is true.
+ SparseIndexArray* sparse_indices() const { return sparse_indices_; }
+ void set_sparse_indices(SparseIndexArray* sparse_indices) {
+ sparse_indices_ = sparse_indices;
+ }
+
+ // Gets or sets the subshape of this piece. This reference points to a
+ // subshape within the shape in the containing Literal (Literal::shape_).
+ const Shape& subshape() const { return *subshape_; }
+ void set_subshape(const Shape* subshape) { subshape_ = subshape; }
+
+ // Returns the size in bytes of the buffer holding the array data.
+ int64 size_bytes() const { return ShapeUtil::ByteSizeOf(subshape()); }
+
+ // Returns the number of elements in this piece's array.
+ int64 element_count() const {
+ // If this is a sparse array, use the number of elements represented by
+ // the indices in the associated SparseIndexArray.
+ return LayoutUtil::IsSparseArray(subshape())
+ ? sparse_indices()->index_count()
+ : ShapeUtil::ElementsIn(subshape());
+ }
+
+ // Returns the child piece at 'index' of this piece.
+ Piece& child(int64 index) { return children_[index]; }
+
+ // Adds a child piece to this piece's children.
+ void emplace_back(Piece child_piece) {
+ children_.emplace_back(std::move(child_piece));
+ }
+
+ // Returns the size of children pieces of this piece.
+ int64 children_size() { return children_.size(); }
+
+ // Visitor functions that recursively traverses the piece and calls the
+ // given function at each child piece. The function has the type:
+ // void (const ShapeIndex& index, const Piece& piece)
+ template <typename Fn>
+ void ForEachSubpiece(const Fn& func) const {
+ ShapeIndex index;
+ return ForEachHelper(
+ [&func](const ShapeIndex& index, const Piece& piece) {
+ func(index, piece);
+ return Status::OK();
+ },
+ *this, &index)
+ .IgnoreError();
+ }
+ // Same as above, but the function has the type:
+ // Status (const ShapeIndex& index, const Piece& piece)
+ // The first non-OK return value is returned by the function.
+ template <typename Fn>
+ Status ForEachSubpieceWithStatus(const Fn& func) const {
+ ShapeIndex index;
+ return ForEachHelper(func, *this, &index);
+ }
+ // Same as above, but the function has the type:
+ // Bool (const ShapeIndex& index, const Piece& piece)
+ // The first non-true return value is returned by the function.
+ template <typename Fn>
+ bool ForEachSubpieceWithBool(const Fn& func) const {
+ ShapeIndex index;
+ return ForEachHelperBool(func, *this, &index);
+ }
+ // Same as above, but the function has the type:
+ // Void (const ShapeIndex& index, Piece& piece)
+ template <typename Fn>
+ void ForEachMutableSubpiece(const Fn& func) {
+ ShapeIndex index;
+ return ForEachMutableHelper(
+ [&func](const ShapeIndex& index, Piece* piece) {
+ func(index, piece);
+ return Status::OK();
+ },
+ const_cast<xla::LiteralBase::Piece*>(this), &index)
+ .IgnoreError();
+ }
+ // Same as above, but the function has the type:
+ // Status (const ShapeIndex& index, Piece& piece)
+ // The first non-OK return value is returned by the function.
+ template <typename Fn>
+ Status ForEachMutableSubpieceWithStatus(const Fn& func) {
+ ShapeIndex index;
+ return ForEachMutableHelper(
+ func, const_cast<xla::LiteralBase::Piece*>(this), &index);
+ }
+
+ // Returns true if this piece and 'other' contain the same data. This piece
+ // and 'other' must be array-shaped and compatible.
+ bool EqualElements(const Piece& other) const;
+
+ // Writes the shape and data (if array-shaped) into the given proto.
+ void WriteToProto(LiteralProto* proto) const;
+
+ // Copy the data from 'src' into this piece's buffer. Shapes of this piece
+ // and src must be compatible.
+ Status CopyFrom(const Piece& src);
+
+ // Copies the data from the given proto into this piece. The shape of this
+ // piece must be equal (not just compatible) to the shape of the proto.
+ Status CopyFromProto(const LiteralProto& proto);
+
+ // Sorts the elements in a sparse array.
+ void SortSparseElements();
+
+ private:
+ // Helpers for traversing the piece via ForEachSubpiece rooted at 'index'.
+ // The first non-OK (or non-true) value is returned by the function.
+ // The callable 'func' has the same signature as described above in
+ // ForEachSubpiece*.
+ template <typename Fn>
+ Status ForEachHelper(const Fn& func, const Piece& piece,
+ ShapeIndex* index) const {
+ TF_RETURN_IF_ERROR(func(*index, piece));
+ for (int64 i = 0; i < piece.children_.size(); ++i) {
+ index->push_back(i);
+ TF_RETURN_IF_ERROR(ForEachHelper(func, piece.children_[i], index));
+ index->pop_back();
+ }
+ return Status::OK();
+ }
+ template <typename Fn>
+ bool ForEachHelperBool(const Fn& func, const Piece& piece,
+ ShapeIndex* index) const {
+ if (!func(*index, piece)) {
+ return false;
+ }
+ for (int64 i = 0; i < piece.children_.size(); ++i) {
+ index->push_back(i);
+ if (!ForEachHelperBool(func, piece.children_[i], index)) {
+ return false;
+ }
+ index->pop_back();
+ }
+ return true;
+ }
+ template <typename Fn>
+ Status ForEachMutableHelper(const Fn& func, Piece* piece,
+ ShapeIndex* index) {
+ TF_RETURN_IF_ERROR(func(*index, piece));
+ for (int64 i = 0; i < piece->children_.size(); ++i) {
+ index->push_back(i);
+ TF_RETURN_IF_ERROR(
+ ForEachMutableHelper(func, &piece->children_[i], index));
+ index->pop_back();
+ }
+ return Status::OK();
+ }
+
+ // Recursive helper for EqualElements.
+ template <typename NativeT>
+ bool EqualElementsInternal(const Piece& other,
+ std::vector<int64>* multi_index) const;
+
+ // Helper for SortSparseElements that has the element type as a template
+ // parameter.
+ template <typename NativeT>
+ void SortSparseElementsInternal();
+
+ // For array-shaped pieces, this is the buffer holding the literal data.
+ char* buffer_ = nullptr;
+
+ // For sparse arrays, this is the array of indices.
+ SparseIndexArray* sparse_indices_ = nullptr;
+
+ // The shape of piece. This points into the shape of the containing Literal
+ // (Literal::shape_).
+ const Shape* subshape_ = nullptr;
+
+ // Children pieces for tuple shaped pieces.
+ std::vector<Piece> children_ = {};
+ }; // class Piece
+
+ const Piece& piece(const ShapeIndex& shape_index) const {
+ Piece* piece = &const_cast<Piece&>(root_piece());
+ for (const auto i : shape_index) {
+ DCHECK_GE(i, 0);
+ DCHECK_LT(i, piece->children_size());
+ piece = &piece->child(i);
+ }
+ return *piece;
+ }
+
+ // Returns the piece at the root of the shape.
+ virtual const Piece& root_piece() const = 0;
+
+ // LiteralSlice and Literal must access Pieces of other Literals.
+ friend class Literal;
+ friend class LiteralSlice;
+ friend class BorrowingLiteral;
+
+ private:
+ template <typename NativeT>
+ std::unique_ptr<Literal> SliceInternal(
+ const Shape& result_shape,
+ tensorflow::gtl::ArraySlice<int64> start_indices) const;
+};
+
+// Class representing literal values in XLA.
+//
+// The underlying buffer and shape is always owned by this class.
+class Literal : public LiteralBase {
+ public:
+ Literal() : Literal(ShapeUtil::MakeNil()) {}
+
+ // Create a literal of the given shape. The literal is allocated sufficient
+ // memory to hold the shape. Memory is uninitialized.
+ explicit Literal(const Shape& shape);
+ virtual ~Literal();
+
+ // Literals are moveable, but not copyable. To copy a literal use
+ // Literal::Clone or Literal::CloneToUnique. This prevents inadvertent copies
+ // of literals which can be expensive.
+ Literal(const Literal& other) = delete;
+ Literal& operator=(const Literal& other) = delete;
+ Literal(Literal&& other);
+ // 'allocate_arrays' indicates whether to allocate memory for the arrays in
+ // the shape. If false, buffer pointers inside of the Literal::Pieces are set
+ // to nullptr.
+ Literal(const Shape& shape, bool allocate_arrays);
+ Literal& operator=(Literal&& other);
+
+ // TODO(b/67651157): Remove this accessor. Literal users should not be able to
+ // mutate the shape as this can produce malformed Literals.
+ Shape* mutable_shape_do_not_use() { return shape_.get(); }
+
+ // Returns a MutableArraySlice view of the array for this literal for the
+ // given NativeT (e.g., float). CHECKs if the subshape of the literal at the
+ // given ShapeIndex is not array. See primitive_util.h for the mapping from
+ // XLA type to native type.
+ template <typename NativeT>
+ tensorflow::gtl::MutableArraySlice<NativeT> data(
+ const ShapeIndex& shape_index = {});
+ // Unhide const method from parent class.
+ using LiteralBase::data;
+
+ // Returns a pointer to the sparse index array. Returns nullptr if the literal
+ // is not a sparse array.
+ SparseIndexArray* sparse_indices(const ShapeIndex& shape_index = {});
+
+ // Returns a pointer to the underlying buffer holding the array at the given
+ // shape index. CHECKs if the subshape of the literal at the given ShapeIndex
+ // is not array.
+ void* untyped_data(const ShapeIndex& shape_index = {});
+ // Unhide const method from parent class.
+ using LiteralBase::untyped_data;
+
+ // Populates a literal with a sparse layout with the given indices and values.
+ // Each index in the indices array is CHECKed against the dimensions in the
+ // literal's shape. If sort is true, then the indices and values will be
+ // sorted. If sort is false, then the indices and values are assumed to
+ // already be in sorted order. See CreateSparse for an example of how data
+ // are populated.
+ template <typename NativeT>
+ void PopulateSparse(SparseIndexArray indices,
+ tensorflow::gtl::ArraySlice<NativeT> values,
+ bool sort = true);
+
+ // Copy values from 'src_literal' rooted at 'src_shape_index' into this
+ // literal rooted at 'dest_shape_index'. The subshape of this literal rooted
+ // at 'dest_shape_index' must be compatible with the subshape of 'src_literal'
+ // rooted at 'src_shape_index', but need not be arrays.
+ Status CopyFrom(const LiteralSlice& src_literal,
+ const ShapeIndex& dest_shape_index = {},
+ const ShapeIndex& src_shape_index = {});
+
+ // Returns a vector containing the tuple elements of this Literal as separate
+ // Literals. This Literal must be tuple-shaped and can be a nested tuple. The
+ // elements are moved into the new Literals; no data is copied. Upon return
+ // this Literal is set to a nil shape (empty tuple)
+ std::vector<Literal> DecomposeTuple();
+
+ // Similar to CopyFrom, but with move semantincs. The subshape of this literal
+ // rooted at 'dest_shape_index' must be *equal* to the shape 'src_literal'
+ // (layouts and shapes must match), but need not be arrays. The memory
+ // allocated in this literal for the subshape at dest_shape_index is
+ // deallocated, and the respective buffers are replaced with those in
+ // src_literal. Upon return, src_literal is set to a nil shape (empty tuple).
+ Status MoveFrom(Literal&& src_literal,
+ const ShapeIndex& dest_shape_index = {});
+
+ // Copies the values from src_literal, starting at src_base shape indexes,
+ // to this literal, starting at dest_base, where the copy size in each
+ // dimension is specified by copy_size.
+ // The src_literal and this literal must have the same primitive type,
+ // src_base+copy_size must fit the source literal dimensions, as well as
+ // dest_base+copy_size must fit the destination literal dimensions.
+ // Note: if either src_literal or this literal contains dimensions with zero
+ // element, then copy_size must be 0 in these dimensions while the
+ // corresponding base indices being 0.
+ // This literal and 'src_literal' must be arrays.
+ Status CopySliceFrom(const LiteralSlice& src_literal,
+ tensorflow::gtl::ArraySlice<int64> src_base,
+ tensorflow::gtl::ArraySlice<int64> dest_base,
+ tensorflow::gtl::ArraySlice<int64> copy_size);
+
+ // Copies one element from src_literal[src_index] to (*this)[dest_index].
+ Status CopyElementFrom(const LiteralSlice& src_literal,
+ tensorflow::gtl::ArraySlice<int64> src_index,
+ tensorflow::gtl::ArraySlice<int64> dest_index);
+
+ // Sets an element in the literal at the given index. The multi_index is
+ // CHECKed against the dimension sizes.
+ template <typename NativeT>
+ void Set(tensorflow::gtl::ArraySlice<int64> multi_index,
+ const ShapeIndex& shape_index, NativeT value);
+ // Overloads of Set for array literals. CHECKs if the literal is not
+ // array-shaped and dense.
+ template <typename NativeT>
+ void Set(tensorflow::gtl::ArraySlice<int64> multi_index, NativeT value);
+
+ // Appends the given element to the literal. If the elements are not appended
+ // in sorted order, then SortSparseElements should be called before calling
+ // other methods. This literal must have a sparse layout.
+ template <typename NativeT>
+ void AppendSparseElement(tensorflow::gtl::ArraySlice<int64> multi_index,
+ NativeT value, const ShapeIndex& shape_index = {});
+
+ // Sorts the elements in a sparse array.
+ void SortSparseElements(const ShapeIndex& shape_index = {});
+
+ // As Set(), but truncates `value` to the literal element type before storing.
+ // This literal must be an array.
+ Status SetIntegralAsS64(tensorflow::gtl::ArraySlice<int64> multi_index,
+ int64 value);
+
+ // Populate this literal with the given values. Examples:
+ //
+ // // Populate with floats.
+ // Array2D<float> float_values = ...
+ // literal.PopulateR2FromArray2D(values);
+ //
+ // // Populate with int32s.
+ // literal.PopulateR2<int32>({{1, 2}, {3, 4}});
+ //
+ // The shape and element type of this literal must match given values. For
+ // example, in the call above to literal.PopulateR2(), 'literal' must be a 2x2
+ // array of S32.
+ template <typename NativeT>
+ void PopulateR1(tensorflow::gtl::ArraySlice<NativeT> values);
+ void PopulateR1(const tensorflow::core::Bitmap& values);
+ template <typename NativeT>
+ void PopulateR2(std::initializer_list<std::initializer_list<NativeT>> values);
+ template <typename NativeT>
+ void PopulateFromArray(const Array<NativeT>& values);
+ template <typename NativeT>
+ void PopulateR2FromArray2D(const Array2D<NativeT>& values);
+ template <typename NativeT>
+ void PopulateR3FromArray3D(const Array3D<NativeT>& values);
+ template <typename NativeT>
+ void PopulateR4FromArray4D(const Array4D<NativeT>& values);
+
+ // Populates literal values by calling the generator function for every cell
+ // in this literal object.
+ //
+ // generator must be a callable of the type
+ // NativeT(tensorflow::gtl::ArraySlice<int64> indexes) or compatible.
+ //
+ // This literal must have a dense layout.
+ template <typename NativeT, typename FnType>
+ Status Populate(const FnType& generator);
+
+ // A parallel version of Populate(). This can be used if the generator is
+ // thread-safe and the values for the shape's different elements are
+ // independent.
+ template <typename NativeT, typename FnType>
+ Status PopulateParallel(const FnType& generator);
+
+ // Fills this literal with the given value.
+ template <typename NativeT>
+ void PopulateWithValue(NativeT value);
+
+ // This operation is the inverse of DecomposeTuple. The given elements are
+ // moved into the tuple elements of a new tuple-shaped Literal which is
+ // returned. Upon return, each of the Literals in 'elements' is set to a nil
+ // shape (empty tuple).
+ static Literal MoveIntoTuple(
+ tensorflow::gtl::MutableArraySlice<Literal> elements);
+
+ // Serialize from a proto.
+ static StatusOr<std::unique_ptr<Literal>> CreateFromProto(
+ const LiteralProto& proto);
+
+ private:
+ // Recursively sets the subshapes and buffers of all subpieces rooted at
+ // 'piece'. If 'allocate_array' is true, memory is allocated for the arrays in
+ // the shape.
+ void SetPiece(const Shape& shape, Piece* piece, bool allocate_arrays);
+
+ // Returns the piece at the given ShapeIndex.
+ Piece& piece(const ShapeIndex& shape_index) {
+ return const_cast<Piece&>(LiteralBase::piece(shape_index));
+ }
+
+ Piece& root_piece() const override { return *root_piece_; };
+
+ // Internal template helper for the Literal::CopySliceFrom(), matching its
+ // arguments one by one.
+ template <typename NativeT>
+ Status CopySliceFromInternal(const LiteralBase& src_literal,
+ tensorflow::gtl::ArraySlice<int64> src_base,
+ tensorflow::gtl::ArraySlice<int64> dest_base,
+ tensorflow::gtl::ArraySlice<int64> copy_size);
+
+ // Utility structure which is used to create the optimal configuration for
+ // a ShapeUtil::ForEachIndex() scan across two literals.
+ struct StrideConfig {
+ StrideConfig(const Shape& source_shape, const Shape& dest_shape,
+ tensorflow::gtl::ArraySlice<int64> dimensions);
+
+ // The dimensions of the stride operation. Essentially every dimension
+ // will be iterated from base[i] to base[i]+dimensions[i], in step[i]
+ // steps.
+ tensorflow::gtl::ArraySlice<int64> dimensions;
+ DimensionVector base;
+ DimensionVector step;
+ int64 minor_dimension = 0;
+ // The size of the strides for source and destination. One of the two
+ // (the one looping through its most minor dimension) will be 1, while
+ // the other will be the stride size at the dimension matching the other
+ // shape most minor dimension being scanned.
+ int64 dest_stride = 1;
+ int64 source_stride = 1;
+ // The size of the inner loop on the most minor dimension.
+ int64 minor_loop_size = 1;
+ };
+
+ // Literal class always owns the shape. The parent class borrows this shape.
+ std::unique_ptr<Shape> shape_;
+
+ Piece* root_piece_ = nullptr;
+
+ // Implementation details shared between Populate() and PopulateParallel()
+ template <typename NativeT, typename FnType>
+ Status PopulateInternal(const FnType& generator, bool parallel);
+
+ // Deallocate the buffers held by this literal.
+ void DeallocateBuffers();
+
+ friend class LiteralBase;
+};
+std::ostream& operator<<(std::ostream& out, const Literal& literal);
+
+// A read-only view of a Literal. A LiteralSlice contains pointers to shape and
+// literal buffers always owned by others.
+class LiteralSlice : public LiteralBase {
+ public:
+ LiteralSlice() : LiteralBase() {}
+
+ // Implicit conversion constructors.
+ LiteralSlice(const LiteralBase& literal);
+ LiteralSlice(const LiteralBase& literal, const ShapeIndex& view_root);
+
+ private:
+ const Piece& root_piece() const override { return *root_piece_; };
+
+ const Piece* root_piece_; // Not owned.
+};
+
+// A read-only Literal where the underlying buffers are never owned by this
+// class.
+class BorrowingLiteral : public LiteralBase {
+ public:
+ BorrowingLiteral() : LiteralBase() {}
+
+ // 'src_buf_ptr' is not owned by this class and must outlive the
+ // lifetime of this class. It points to an appropirately sized buffer with
+ // data interpretered as indicated by 'shape'.
+ // This constructor is only used for array shapes.
+ BorrowingLiteral(const char* src_buf_ptr, const Shape& shape);
+ // Similar as above, except to be used for constructing non-nested tuples.
+ BorrowingLiteral(tensorflow::gtl::ArraySlice<const char*> src_buf_ptrs,
+ const Shape& shape);
+ // TODO(b/79707221): adding constructors for nested tuples as well.
+
+ private:
+ // Recursively builds the subtree for the given piece and sets the subshapes
+ // of the given piece with the given shape.
+ void BuildPieceSubtree(const Shape& shape, Piece* piece);
+
+ // Accessor for the root piece of this literal.
+ const Piece& root_piece() const override { return root_piece_; };
+ Piece root_piece_;
+
+ // Shape of this literal. Stored as unique_ptr so such that the (default)
+ // move construction of this class would be trivially correct: the pointer to
+ // Shape root_piece_ stores will still point to the correct address.
+ std::unique_ptr<Shape> shape_;
+};
+
+template <typename NativeT>
+tensorflow::gtl::ArraySlice<NativeT> LiteralBase::Piece::data() const {
+ CHECK(ShapeUtil::IsArray(subshape())) << ShapeUtil::HumanString(subshape());
+ CHECK_EQ(subshape().element_type(),
+ primitive_util::NativeToPrimitiveType<NativeT>())
+ << "Attempting to access "
+ << PrimitiveType_Name(primitive_util::NativeToPrimitiveType<NativeT>())
+ << " type, but literal element type is "
+ << PrimitiveType_Name(subshape().element_type());
+ return tensorflow::gtl::ArraySlice<NativeT>(
+ reinterpret_cast<const NativeT*>(buffer()), element_count());
+}
+
+template <typename NativeT>
+tensorflow::gtl::MutableArraySlice<NativeT> LiteralBase::Piece::data() {
+ CHECK(ShapeUtil::IsArray(subshape())) << ShapeUtil::HumanString(subshape());
+ CHECK_EQ(subshape().element_type(),
+ primitive_util::NativeToPrimitiveType<NativeT>())
+ << "Attempting to access "
+ << PrimitiveType_Name(primitive_util::NativeToPrimitiveType<NativeT>())
+ << " type, but literal element type is "
+ << PrimitiveType_Name(subshape().element_type());
+ return tensorflow::gtl::MutableArraySlice<NativeT>(
+ reinterpret_cast<NativeT*>(buffer()), element_count());
+}
+
+template <typename NativeT>
+NativeT LiteralBase::Piece::Get(
+ tensorflow::gtl::ArraySlice<int64> multi_index) const {
+ CHECK(LayoutUtil::IsDenseArray(subshape()));
+ return data<NativeT>()[IndexUtil::MultidimensionalIndexToLinearIndex(
+ subshape(), multi_index)];
+}
+
+template <typename NativeT>
+void LiteralBase::Piece::Set(tensorflow::gtl::ArraySlice<int64> multi_index,
+ NativeT value) {
+ CHECK(LayoutUtil::IsDenseArray(subshape()));
+ data<NativeT>()[IndexUtil::MultidimensionalIndexToLinearIndex(
+ subshape(), multi_index)] = value;
+}
+
+template <typename NativeT>
+tensorflow::gtl::ArraySlice<NativeT> LiteralBase::data(
+ const ShapeIndex& shape_index) const {
+ return piece(shape_index).data<NativeT>();
+}
+
+template <typename NativeT>
+tensorflow::gtl::MutableArraySlice<NativeT> Literal::data(
+ const ShapeIndex& shape_index) {
+ return piece(shape_index).data<NativeT>();
+}
+
+template <typename NativeT>
+inline NativeT LiteralBase::Get(tensorflow::gtl::ArraySlice<int64> multi_index,
+ const ShapeIndex& shape_index) const {
+ return piece(shape_index).Get<NativeT>(multi_index);
+}
+
+template <typename NativeT>
+inline NativeT LiteralBase::Get(
+ tensorflow::gtl::ArraySlice<int64> multi_index) const {
+ return root_piece().Get<NativeT>(multi_index);
+}
+
+template <typename NativeT>
+inline void Literal::Set(tensorflow::gtl::ArraySlice<int64> multi_index,
+ const ShapeIndex& shape_index, NativeT value) {
+ return piece(shape_index).Set<NativeT>(multi_index, value);
+}
+
+template <typename NativeT>
+inline void Literal::Set(tensorflow::gtl::ArraySlice<int64> multi_index,
+ NativeT value) {
+ return root_piece().Set<NativeT>(multi_index, value);
+}
+
+template <typename NativeT>
+NativeT LiteralBase::GetFirstElement() const {
+ return data<NativeT>().at(0);
+}
+
+template <typename NativeT>
+NativeT LiteralBase::GetSparseElement(int64 sparse_element_number,
+ const ShapeIndex& shape_index) const {
+ CHECK(
+ LayoutUtil::IsSparseArray(ShapeUtil::GetSubshape(shape(), shape_index)));
+ return data<NativeT>(shape_index)[sparse_element_number];
+}
+
+template <typename NativeT>
+void Literal::AppendSparseElement(
+ tensorflow::gtl::ArraySlice<int64> multi_index, NativeT value,
+ const ShapeIndex& shape_index) {
+ Piece& p = piece(shape_index);
+ const Shape& subshape = p.subshape();
+ CHECK(LayoutUtil::IsSparseArray(subshape));
+ int64 rank = ShapeUtil::Rank(subshape);
+ CHECK_EQ(multi_index.size(), rank);
+ int64 last_element = p.sparse_indices()->index_count();
+ CHECK_LT(last_element, LayoutUtil::MaxSparseElements(subshape.layout()));
+ p.sparse_indices()->Append(multi_index);
+ CHECK_LT(last_element, p.data<NativeT>().size());
+ p.data<NativeT>()[last_element] = value;
+}
+
+template <typename NativeT>
+void LiteralBase::EachCell(
+ std::function<void(tensorflow::gtl::ArraySlice<int64> indices,
+ NativeT value)>
+ per_cell) const {
+ if (ShapeUtil::IsZeroElementArray(shape())) {
+ return;
+ }
+ std::vector<int64> indices(ShapeUtil::Rank(shape()), 0);
+ do {
+ per_cell(indices, Get<NativeT>(indices));
+ } while (IndexUtil::BumpIndices(shape(), &indices));
+}
+
+template <typename NativeT>
+inline void Literal::PopulateR1(tensorflow::gtl::ArraySlice<NativeT> values) {
+ CHECK(ShapeUtil::IsArray(shape()));
+ CHECK_EQ(ShapeUtil::Rank(shape()), 1);
+ CHECK_EQ(ShapeUtil::ElementsIn(shape()), values.size());
+ CHECK_EQ(shape().element_type(),
+ primitive_util::NativeToPrimitiveType<NativeT>());
+ for (int64 i = 0; i < values.size(); ++i) {
+ Set({i}, values[i]);
+ }
+}
+
+template <typename NativeT>
+void Literal::PopulateR2(
+ std::initializer_list<std::initializer_list<NativeT>> values) {
+ CHECK(ShapeUtil::IsArray(shape()));
+ CHECK_EQ(ShapeUtil::Rank(shape()), 2);
+ CHECK_EQ(shape().element_type(),
+ primitive_util::NativeToPrimitiveType<NativeT>());
+
+ const int64 dim0_size = values.size();
+ const int64 dim1_size = values.begin()->size();
+ CHECK_EQ(dim0_size, shape().dimensions(0));
+ CHECK_EQ(dim1_size, shape().dimensions(1));
+
+ int64 dim0 = 0;
+ for (auto inner_list : values) {
+ int64 dim1 = 0;
+ for (auto value : inner_list) {
+ Set({dim0, dim1}, value);
+ ++dim1;
+ }
+ CHECK_EQ(dim1_size, dim1);
+ ++dim0;
+ }
+}
+
+template <typename NativeT>
+void Literal::PopulateFromArray(const Array<NativeT>& values) {
+ CHECK(ShapeUtil::IsArray(shape()));
+ CHECK_EQ(shape().element_type(),
+ primitive_util::NativeToPrimitiveType<NativeT>());
+ CHECK_EQ(ShapeUtil::Rank(shape()), values.num_dimensions());
+ for (int dim = 0; dim < values.num_dimensions(); ++dim) {
+ CHECK_EQ(values.dim(dim), shape().dimensions(dim));
+ }
+ values.Each([this](tensorflow::gtl::ArraySlice<int64> indices,
+ NativeT value) { this->Set(indices, value); });
+}
+
+template <typename NativeT>
+void Literal::PopulateR2FromArray2D(const Array2D<NativeT>& values) {
+ PopulateFromArray(values);
+}
+
+template <typename NativeT>
+void Literal::PopulateR3FromArray3D(const Array3D<NativeT>& values) {
+ PopulateFromArray(values);
+}
+
+template <typename NativeT>
+void Literal::PopulateR4FromArray4D(const Array4D<NativeT>& values) {
+ PopulateFromArray(values);
+}
+
+template <typename NativeT>
+void Literal::PopulateSparse(SparseIndexArray indices,
+ tensorflow::gtl::ArraySlice<NativeT> values,
+ bool sort) {
+ CHECK(LayoutUtil::IsSparseArray(shape()));
+ int rank = ShapeUtil::Rank(shape());
+ CHECK_EQ(indices.rank(), rank);
+ int64 max_elements = LayoutUtil::MaxSparseElements(shape().layout());
+ CHECK_LE(indices.max_indices(), max_elements);
+ int64 num_elements = values.size();
+ CHECK_LE(num_elements, max_elements);
+ CHECK_EQ(num_elements, indices.index_count());
+ auto root_data = root_piece().data<NativeT>();
+ // Piece::data() returns an ArraySlice of size equal to the number of indices
+ // in the SparseIndexArray. So there is no need to adjust the size of the data
+ // here. It is enough to just copy the incoming values into the data buffer.
+ std::copy(values.begin(), values.end(), root_data.begin());
+ *this->root_piece().sparse_indices() = std::move(indices);
+ if (sort) {
+ auto root_data = this->root_piece().data<NativeT>();
+ this->root_piece().sparse_indices()->SortWithValues(root_data);
+ }
+ DCHECK(this->root_piece().sparse_indices()->Validate(shape()));
+}
+
+template <typename NativeT, typename FnType>
+Status Literal::PopulateInternal(const FnType& generator, bool parallel) {
+ const Shape& this_shape = shape();
+ const int64 rank = ShapeUtil::Rank(this_shape);
+ TF_RET_CHECK(LayoutUtil::IsDenseArray(this_shape));
+ TF_RET_CHECK(this_shape.element_type() ==
+ primitive_util::NativeToPrimitiveType<NativeT>());
+ tensorflow::gtl::MutableArraySlice<NativeT> literal_data = data<NativeT>();
+ if (rank > 0) {
+ StrideConfig stride_config(this_shape, this_shape,
+ AsInt64Slice(this_shape.dimensions()));
+ int64 minor_dimension_size =
+ ShapeUtil::GetDimension(this_shape, stride_config.minor_dimension);
+
+ auto init_function = [&](tensorflow::gtl::ArraySlice<int64> indexes) {
+ DimensionVector minor_scan_indexes(rank, 0);
+ const int64 index =
+ IndexUtil::MultidimensionalIndexToLinearIndex(shape(), indexes);
+ std::copy(indexes.begin(), indexes.end(), minor_scan_indexes.begin());
+ for (int64 i = 0; i < minor_dimension_size; ++i) {
+ minor_scan_indexes[stride_config.minor_dimension] = i;
+ literal_data.at(index + i) = generator(minor_scan_indexes);
+ }
+ };
+ if (parallel) {
+ ShapeUtil::ForEachIndexParallel(this_shape, stride_config.base,
+ stride_config.dimensions,
+ stride_config.step, init_function);
+ } else {
+ ShapeUtil::ForEachIndex(
+ this_shape, stride_config.base, stride_config.dimensions,
+ stride_config.step,
+ [&init_function](tensorflow::gtl::ArraySlice<int64> indexes) {
+ init_function(indexes);
+ return true;
+ });
+ }
+ } else {
+ // For scalars.
+ literal_data.at(0) = generator({});
+ }
+ return Status::OK();
+}
+template <typename NativeT, typename FnType>
+Status Literal::Populate(const FnType& generator) {
+ return PopulateInternal<NativeT>(generator, /*parallel=*/false);
+}
+
+template <typename NativeT, typename FnType>
+Status Literal::PopulateParallel(const FnType& generator) {
+ return PopulateInternal<NativeT>(generator, /*parallel=*/true);
+}
+
+template <typename NativeT>
+void Literal::PopulateWithValue(NativeT value) {
+ CHECK(ShapeUtil::IsArray(shape()));
+ CHECK_EQ(shape().element_type(),
+ primitive_util::NativeToPrimitiveType<NativeT>());
+ for (NativeT& element : data<NativeT>()) {
+ element = value;
+ }
+}
+
+template <typename NativeT>
+std::unique_ptr<Literal> LiteralBase::Replicate(int64 times) const {
+ DimensionVector bounds = {times};
+ bounds.reserve(shape().dimensions_size() + 1);
+ for (int64 bound : shape().dimensions()) {
+ bounds.push_back(bound);
+ }
+ auto literal =
+ MakeUnique<Literal>(ShapeUtil::MakeShape(shape().element_type(), bounds));
+ int64 elements = ShapeUtil::ElementsIn(literal->shape());
+ if (elements == 0) {
+ return literal;
+ }
+
+ DimensionVector output_indices(bounds.size(), 0);
+ tensorflow::gtl::ArraySlice<int64> input_indices = output_indices;
+ input_indices.remove_prefix(1);
+
+ bool done = false;
+ while (!done) {
+ const auto element = Get<NativeT>(input_indices);
+ literal->Set<NativeT>(output_indices, element);
+
+ done = true;
+ for (int n = 0; n < output_indices.size(); ++n) {
+ ++output_indices[n];
+ if (output_indices[n] < bounds[n]) {
+ done = false;
+ break;
+ }
+ output_indices[n] = 0;
+ }
+ }
+ return literal;
+}
+
+} // namespace xla
+
+#endif // TENSORFLOW_COMPILER_XLA_LITERAL_H_
diff --git a/tensorflow/compiler/xla/literal_comparison.cc b/tensorflow/compiler/xla/literal_comparison.cc
index 2125ab7c61..94993cc874 100644
--- a/tensorflow/compiler/xla/literal_comparison.cc
+++ b/tensorflow/compiler/xla/literal_comparison.cc
@@ -19,6 +19,7 @@ limitations under the License.
#include <cmath>
#include <vector>
+#include "tensorflow/compiler/xla/literal_util.h"
#include "tensorflow/compiler/xla/util.h"
#include "tensorflow/core/lib/core/casts.h"
#include "tensorflow/core/lib/strings/strcat.h"
@@ -217,7 +218,7 @@ class NearComparator {
return Printf(
"actual %s, expected %s, index %s, rel error %8.3g, abs error %8.3g",
FpValueToString(actual).c_str(), FpValueToString(expected).c_str(),
- Literal::MultiIndexAsString(
+ LiteralUtil::MultiIndexAsString(
IndexUtil::LinearIndexToMultidimensionalIndex(shape,
linear_index))
.c_str(),
@@ -722,7 +723,7 @@ Status Equal(const LiteralSlice& expected, const LiteralSlice& actual) {
return AppendStatus(result,
tensorflow::strings::Printf(
"\nat index: %s\nexpected: %s\nactual: %s",
- Literal::MultiIndexAsString(multi_index).c_str(),
+ LiteralUtil::MultiIndexAsString(multi_index).c_str(),
ToStringTruncated(expected).c_str(),
ToStringTruncated(actual).c_str()));
}
diff --git a/tensorflow/compiler/xla/literal_comparison.h b/tensorflow/compiler/xla/literal_comparison.h
index 00a13e3619..9e5bf7c1d0 100644
--- a/tensorflow/compiler/xla/literal_comparison.h
+++ b/tensorflow/compiler/xla/literal_comparison.h
@@ -20,7 +20,7 @@ limitations under the License.
#define TENSORFLOW_COMPILER_XLA_LITERAL_COMPARISON_H_
#include "tensorflow/compiler/xla/error_spec.h"
-#include "tensorflow/compiler/xla/literal_util.h"
+#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/core/lib/core/status.h"
namespace xla {
diff --git a/tensorflow/compiler/xla/literal_util_test.cc b/tensorflow/compiler/xla/literal_test.cc
index 493d807591..e8f919950f 100644
--- a/tensorflow/compiler/xla/literal_util_test.cc
+++ b/tensorflow/compiler/xla/literal_test.cc
@@ -13,7 +13,7 @@ See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
-#include "tensorflow/compiler/xla/literal_util.h"
+#include "tensorflow/compiler/xla/literal.h"
#include <vector>
@@ -21,6 +21,7 @@ limitations under the License.
#include "tensorflow/compiler/xla/array3d.h"
#include "tensorflow/compiler/xla/array4d.h"
#include "tensorflow/compiler/xla/layout_util.h"
+#include "tensorflow/compiler/xla/literal_util.h"
#include "tensorflow/compiler/xla/shape_util.h"
#include "tensorflow/compiler/xla/test.h"
#include "tensorflow/compiler/xla/types.h"
@@ -76,11 +77,11 @@ class LiteralUtilTest : public ::testing::Test {
layout_r4_dim0minor_ = LayoutUtil::MakeLayout({0, 1, 2, 3});
literal_r4_2x2x3x3_dim0major_ =
- Literal::CreateR4FromArray4DWithLayout<float>(arr4d,
- layout_r4_dim0major_);
+ LiteralUtil::CreateR4FromArray4DWithLayout<float>(arr4d,
+ layout_r4_dim0major_);
literal_r4_2x2x3x3_dim0minor_ =
- Literal::CreateR4FromArray4DWithLayout<float>(arr4d,
- layout_r4_dim0minor_);
+ LiteralUtil::CreateR4FromArray4DWithLayout<float>(arr4d,
+ layout_r4_dim0minor_);
}
Layout layout_r2_dim0major_;
@@ -94,47 +95,47 @@ class LiteralUtilTest : public ::testing::Test {
};
TEST_F(LiteralUtilTest, LiteralScalarToString) {
- auto true_lit = Literal::CreateR0<bool>(true);
+ auto true_lit = LiteralUtil::CreateR0<bool>(true);
ASSERT_EQ("true", true_lit->ToString());
- auto false_lit = Literal::CreateR0<bool>(false);
+ auto false_lit = LiteralUtil::CreateR0<bool>(false);
ASSERT_EQ("false", false_lit->ToString());
- auto u32_lit = Literal::CreateR0<uint32>(42);
+ auto u32_lit = LiteralUtil::CreateR0<uint32>(42);
ASSERT_EQ("42", u32_lit->ToString());
- auto s32_lit = Literal::CreateR0<int32>(-999);
+ auto s32_lit = LiteralUtil::CreateR0<int32>(-999);
ASSERT_EQ("-999", s32_lit->ToString());
- auto f32_lit = Literal::CreateR0<float>(3.14f);
+ auto f32_lit = LiteralUtil::CreateR0<float>(3.14f);
ASSERT_EQ("3.14", f32_lit->ToString());
- auto f16_lit = Literal::CreateR0<half>(static_cast<half>(0.5f));
+ auto f16_lit = LiteralUtil::CreateR0<half>(static_cast<half>(0.5f));
ASSERT_EQ("0.5", f16_lit->ToString());
- auto c64_lit = Literal::CreateR0<complex64>({3.14f, 2.78f});
+ auto c64_lit = LiteralUtil::CreateR0<complex64>({3.14f, 2.78f});
ASSERT_EQ("(3.14, 2.78)", c64_lit->ToString());
- auto bf16_lit = Literal::CreateR0<bfloat16>(static_cast<bfloat16>(0.5f));
+ auto bf16_lit = LiteralUtil::CreateR0<bfloat16>(static_cast<bfloat16>(0.5f));
ASSERT_EQ("0.5", bf16_lit->ToString());
// 3.14 will be truncated to 3.125 in bfloat16 format.
auto bf16_lit_truncated =
- Literal::CreateR0<bfloat16>(static_cast<bfloat16>(3.14f));
+ LiteralUtil::CreateR0<bfloat16>(static_cast<bfloat16>(3.14f));
ASSERT_EQ("3.125", bf16_lit_truncated->ToString());
auto bf16_lit_truncated2 =
- Literal::CreateR0<bfloat16>(static_cast<bfloat16>(9.001f));
+ LiteralUtil::CreateR0<bfloat16>(static_cast<bfloat16>(9.001f));
ASSERT_EQ("9", bf16_lit_truncated2->ToString());
}
TEST_F(LiteralUtilTest, LiteralVectorToString) {
- auto pred_vec = Literal::CreateR1<bool>({true, false, true});
+ auto pred_vec = LiteralUtil::CreateR1<bool>({true, false, true});
ASSERT_EQ("{101}", pred_vec->ToString());
}
TEST_F(LiteralUtilTest, R2ToString) {
- const auto literal = Literal::CreateR2({{1, 2}, {3, 4}, {5, 6}});
+ const auto literal = LiteralUtil::CreateR2({{1, 2}, {3, 4}, {5, 6}});
const string expected = R"(s32[3,2] {
{ 1, 2 },
{ 3, 4 },
@@ -144,7 +145,8 @@ TEST_F(LiteralUtilTest, R2ToString) {
}
TEST_F(LiteralUtilTest, R3ToString) {
- const auto literal = Literal::CreateR3({{{1}, {2}}, {{3}, {4}}, {{5}, {6}}});
+ const auto literal =
+ LiteralUtil::CreateR3({{{1}, {2}}, {{3}, {4}}, {{5}, {6}}});
const string expected = R"(s32[3,2,1] {
{ { 1 },
{ 2 } },
@@ -157,9 +159,9 @@ TEST_F(LiteralUtilTest, R3ToString) {
}
TEST_F(LiteralUtilTest, TupleToString) {
- auto scalar = Literal::CreateR0<float>(1.0);
- auto matrix = Literal::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}});
- auto tuple = Literal::MakeTuple({scalar.get(), matrix.get()});
+ auto scalar = LiteralUtil::CreateR0<float>(1.0);
+ auto matrix = LiteralUtil::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}});
+ auto tuple = LiteralUtil::MakeTuple({scalar.get(), matrix.get()});
const string expected = R"((f32[], f32[2,2]) (
1,
f32[2,2] {
@@ -182,7 +184,7 @@ TEST_F(LiteralUtilTest, CreateR3FromArray3d) {
});
// clang-format on
- auto literal = Literal::CreateR3FromArray3D(array_3d);
+ auto literal = LiteralUtil::CreateR3FromArray3D(array_3d);
EXPECT_THAT(literal->shape().dimensions(), ElementsAre(2, 3, 2));
string result = literal->ToString();
const string expected = R"(f32[2,3,2] {
@@ -205,7 +207,7 @@ TEST_F(LiteralUtilTest, CreateSparse) {
{3, 5, 6},
};
std::vector<int64> values = {7, 8, 9, 10};
- auto literal = Literal::CreateSparse<int64>(
+ auto literal = LiteralUtil::CreateSparse<int64>(
dimensions, SparseIndexArray(indices.n1() + 3, indices), values);
Array2D<int64> expected_indices = {
@@ -224,7 +226,7 @@ TEST_F(LiteralUtilTest, CreateSparse) {
TEST_F(LiteralUtilTest, LiteralR4F32ProjectedStringifies) {
// clang-format off
- auto literal = Literal::CreateR4Projected<float>({
+ auto literal = LiteralUtil::CreateR4Projected<float>({
{1, 2},
{1001, 1002},
{2001, 2002},
@@ -284,7 +286,7 @@ TEST_F(LiteralUtilTest, LiteralR4F32Stringifies) {
TEST_F(LiteralUtilTest, EachCellR2F32) {
// clang-format off
- auto literal = Literal::CreateR2<float>({
+ auto literal = LiteralUtil::CreateR2<float>({
{3.1f, 4.2f},
{9.3f, 12.4f},
});
@@ -303,26 +305,27 @@ TEST_F(LiteralUtilTest, EachCellR2F32) {
TEST_F(LiteralUtilTest, ScalarEquality) {
// Test equality with scalars.
- auto f32_42 = Literal::CreateR0<float>(42.0);
- auto f32_42_clone = Literal::CreateR0<float>(42.0);
+ auto f32_42 = LiteralUtil::CreateR0<float>(42.0);
+ auto f32_42_clone = LiteralUtil::CreateR0<float>(42.0);
EXPECT_EQ(*f32_42, *f32_42);
EXPECT_EQ(*f32_42, *f32_42_clone);
- auto f32_123 = Literal::CreateR0<float>(123.0);
+ auto f32_123 = LiteralUtil::CreateR0<float>(123.0);
EXPECT_NE(*f32_42, *f32_123);
- auto f64_42 = Literal::CreateR0<double>(42.0);
+ auto f64_42 = LiteralUtil::CreateR0<double>(42.0);
EXPECT_NE(*f32_42, *f64_42);
}
TEST_F(LiteralUtilTest, NonScalarEquality) {
// Test equality with nonscalars.
- auto matrix = Literal::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}});
- auto matrix_clone = Literal::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}});
- auto matrix_different = Literal::CreateR2<float>({{4.0, 3.0}, {1.0, 2.0}});
- auto vector_literal = Literal::CreateR1<float>({1.0, 2.0, 3.0, 4.0});
- auto scalar = Literal::CreateR0<float>(1.0);
+ auto matrix = LiteralUtil::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}});
+ auto matrix_clone = LiteralUtil::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}});
+ auto matrix_different =
+ LiteralUtil::CreateR2<float>({{4.0, 3.0}, {1.0, 2.0}});
+ auto vector_literal = LiteralUtil::CreateR1<float>({1.0, 2.0, 3.0, 4.0});
+ auto scalar = LiteralUtil::CreateR0<float>(1.0);
Literal nil(ShapeUtil::MakeNil());
EXPECT_EQ(*matrix, *matrix);
@@ -335,19 +338,19 @@ TEST_F(LiteralUtilTest, NonScalarEquality) {
}
TEST_F(LiteralUtilTest, TokenEquality) {
- auto token0 = Literal::CreateToken();
- auto token1 = Literal::CreateToken();
- auto scalar = Literal::CreateR0<float>(1.0);
+ auto token0 = LiteralUtil::CreateToken();
+ auto token1 = LiteralUtil::CreateToken();
+ auto scalar = LiteralUtil::CreateR0<float>(1.0);
EXPECT_EQ(*token0, *token1);
EXPECT_NE(*token0, *scalar);
- EXPECT_EQ(*Literal::MakeTuple({token0.get()}),
- *Literal::MakeTuple({token0.get()}));
- EXPECT_EQ(*Literal::MakeTuple({token0.get(), scalar.get()}),
- *Literal::MakeTuple({token1.get(), scalar.get()}));
- EXPECT_NE(*Literal::MakeTuple({token0.get(), scalar.get()}),
- *Literal::MakeTuple({scalar.get(), token1.get()}));
+ EXPECT_EQ(*LiteralUtil::MakeTuple({token0.get()}),
+ *LiteralUtil::MakeTuple({token0.get()}));
+ EXPECT_EQ(*LiteralUtil::MakeTuple({token0.get(), scalar.get()}),
+ *LiteralUtil::MakeTuple({token1.get(), scalar.get()}));
+ EXPECT_NE(*LiteralUtil::MakeTuple({token0.get(), scalar.get()}),
+ *LiteralUtil::MakeTuple({scalar.get(), token1.get()}));
}
TEST_F(LiteralUtilTest, DifferentLayoutEquality) {
@@ -371,43 +374,46 @@ TEST_F(LiteralUtilTest, DifferentLayoutEquality) {
TEST_F(LiteralUtilTest, TupleEquality) {
// Test equality with tuples.
- auto scalar = Literal::CreateR0<float>(1.0);
- auto matrix = Literal::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}});
- auto tuple1 = Literal::MakeTuple({scalar.get(), matrix.get()});
+ auto scalar = LiteralUtil::CreateR0<float>(1.0);
+ auto matrix = LiteralUtil::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}});
+ auto tuple1 = LiteralUtil::MakeTuple({scalar.get(), matrix.get()});
// Tuple with the same elements. One element is shared with the original
// tuple, the other is a clone of the element in the original tuple.
- auto scalar_clone = Literal::CreateR0<float>(1.0);
- auto tuple2 = Literal::MakeTuple({scalar_clone.get(), matrix.get()});
+ auto scalar_clone = LiteralUtil::CreateR0<float>(1.0);
+ auto tuple2 = LiteralUtil::MakeTuple({scalar_clone.get(), matrix.get()});
EXPECT_EQ(*tuple1, *tuple2);
// Tuple with elements reversed.
- auto reversed_tuple = Literal::MakeTuple({matrix.get(), scalar.get()});
+ auto reversed_tuple = LiteralUtil::MakeTuple({matrix.get(), scalar.get()});
EXPECT_NE(*tuple1, *reversed_tuple);
// Tuple with different value.
- auto scalar_42 = Literal::CreateR0<float>(42.0);
- auto different_tuple = Literal::MakeTuple({scalar_42.get(), matrix.get()});
+ auto scalar_42 = LiteralUtil::CreateR0<float>(42.0);
+ auto different_tuple =
+ LiteralUtil::MakeTuple({scalar_42.get(), matrix.get()});
EXPECT_NE(*tuple1, *different_tuple);
}
TEST_F(LiteralUtilTest, C64Equality) {
// Test equality with tuples.
- auto vector = Literal::CreateR1<complex64>({{1.0, 2.0}, {3.0, 4.0}});
+ auto vector = LiteralUtil::CreateR1<complex64>({{1.0, 2.0}, {3.0, 4.0}});
// Tuple with the same elements. One element is shared with the original
// tuple, the other is a clone of the element in the original tuple.
- auto vector_clone = Literal::CreateR1<complex64>({{1.0, 2.0}, {3.0, 4.0}});
+ auto vector_clone =
+ LiteralUtil::CreateR1<complex64>({{1.0, 2.0}, {3.0, 4.0}});
EXPECT_EQ(*vector, *vector_clone);
- auto vector_reversed = Literal::CreateR1<complex64>({{3.0, 4.0}, {1.0, 2.0}});
+ auto vector_reversed =
+ LiteralUtil::CreateR1<complex64>({{3.0, 4.0}, {1.0, 2.0}});
EXPECT_NE(*vector, *vector_reversed);
}
TEST_F(LiteralUtilTest, IsAllTuple) {
- auto element1 = Literal::CreateR0<float>(0.0);
- auto element2 = Literal::CreateR2<float>({{0.0, 0.0}, {0.0, 0.0}});
- auto tuple = Literal::MakeTuple({element1.get(), element1.get()});
+ auto element1 = LiteralUtil::CreateR0<float>(0.0);
+ auto element2 = LiteralUtil::CreateR2<float>({{0.0, 0.0}, {0.0, 0.0}});
+ auto tuple = LiteralUtil::MakeTuple({element1.get(), element1.get()});
// Tuples should always return false for IsAll.
EXPECT_FALSE(tuple->IsAll(0));
@@ -416,140 +422,141 @@ TEST_F(LiteralUtilTest, IsAllTuple) {
// Verifies that CreateFromShape works for tuples.
TEST_F(LiteralUtilTest, CreateFromShapeTuple) {
- auto scalar = Literal::CreateR0<float>(0.0);
- auto matrix = Literal::CreateR2<int32>({{0, 0}, {0, 0}});
- auto tuple = Literal::MakeTuple({scalar.get(), matrix.get()});
+ auto scalar = LiteralUtil::CreateR0<float>(0.0);
+ auto matrix = LiteralUtil::CreateR2<int32>({{0, 0}, {0, 0}});
+ auto tuple = LiteralUtil::MakeTuple({scalar.get(), matrix.get()});
auto x = Literal::CreateFromShape(tuple->shape());
EXPECT_EQ(*tuple, *x);
}
TEST_F(LiteralUtilTest, IsAll) {
- EXPECT_TRUE(Literal::CreateR0<bool>(false)->IsAll(0));
- EXPECT_TRUE(Literal::CreateR0<bool>(true)->IsAll(1));
- EXPECT_FALSE(Literal::CreateR0<bool>(false)->IsAll(1));
- EXPECT_FALSE(Literal::CreateR0<bool>(false)->IsAll(2));
- EXPECT_FALSE(Literal::CreateR0<bool>(true)->IsAll(0));
- EXPECT_FALSE(Literal::CreateR0<bool>(true)->IsAll(2));
- EXPECT_FALSE(Literal::CreateR0<bool>(true)->IsAll(-1));
+ EXPECT_TRUE(LiteralUtil::CreateR0<bool>(false)->IsAll(0));
+ EXPECT_TRUE(LiteralUtil::CreateR0<bool>(true)->IsAll(1));
+ EXPECT_FALSE(LiteralUtil::CreateR0<bool>(false)->IsAll(1));
+ EXPECT_FALSE(LiteralUtil::CreateR0<bool>(false)->IsAll(2));
+ EXPECT_FALSE(LiteralUtil::CreateR0<bool>(true)->IsAll(0));
+ EXPECT_FALSE(LiteralUtil::CreateR0<bool>(true)->IsAll(2));
+ EXPECT_FALSE(LiteralUtil::CreateR0<bool>(true)->IsAll(-1));
// We shouldn't reinterpret int8_min as an unsigned type and then decide that
// it is equal to 255.
auto int8_min = std::numeric_limits<int8>::min();
- EXPECT_FALSE(Literal::CreateR0<uint8>(255)->IsAll(int8_min));
+ EXPECT_FALSE(LiteralUtil::CreateR0<uint8>(255)->IsAll(int8_min));
- EXPECT_TRUE(Literal::CreateR0<float>(42.0)->IsAll(42));
- EXPECT_FALSE(Literal::CreateR0<float>(42.0001)->IsAll(42));
+ EXPECT_TRUE(LiteralUtil::CreateR0<float>(42.0)->IsAll(42));
+ EXPECT_FALSE(LiteralUtil::CreateR0<float>(42.0001)->IsAll(42));
- EXPECT_TRUE(Literal::CreateR1<int>({100, 100, 100})->IsAll(100));
- EXPECT_FALSE(Literal::CreateR1<double>({100, 100, 100.001})->IsAll(100));
+ EXPECT_TRUE(LiteralUtil::CreateR1<int>({100, 100, 100})->IsAll(100));
+ EXPECT_FALSE(LiteralUtil::CreateR1<double>({100, 100, 100.001})->IsAll(100));
- EXPECT_TRUE(Literal::CreateR2<uint64>({{8, 8}, {8, 8}})->IsAll(8));
- EXPECT_FALSE(Literal::CreateR2<uint64>({{8, 8}, {8, 9}})->IsAll(8));
- EXPECT_FALSE(Literal::CreateR2<uint64>({{9, 8}, {8, 8}})->IsAll(8));
+ EXPECT_TRUE(LiteralUtil::CreateR2<uint64>({{8, 8}, {8, 8}})->IsAll(8));
+ EXPECT_FALSE(LiteralUtil::CreateR2<uint64>({{8, 8}, {8, 9}})->IsAll(8));
+ EXPECT_FALSE(LiteralUtil::CreateR2<uint64>({{9, 8}, {8, 8}})->IsAll(8));
half h8(8.0f);
half h9(9.0f);
- EXPECT_TRUE(Literal::CreateR2<half>({{h8}, {h8}})->IsAll(8));
- EXPECT_FALSE(Literal::CreateR2<half>({{h8}, {h9}})->IsAll(8));
- EXPECT_FALSE(Literal::CreateR2<half>({{h9}, {h8}})->IsAll(8));
+ EXPECT_TRUE(LiteralUtil::CreateR2<half>({{h8}, {h8}})->IsAll(8));
+ EXPECT_FALSE(LiteralUtil::CreateR2<half>({{h8}, {h9}})->IsAll(8));
+ EXPECT_FALSE(LiteralUtil::CreateR2<half>({{h9}, {h8}})->IsAll(8));
bfloat16 b8(8.0f);
bfloat16 b9(9.0f);
- EXPECT_TRUE(Literal::CreateR2<bfloat16>({{b8}, {b8}})->IsAll(8));
- EXPECT_FALSE(Literal::CreateR2<bfloat16>({{b8}, {b9}})->IsAll(8));
- EXPECT_FALSE(Literal::CreateR2<bfloat16>({{b9}, {b8}})->IsAll(8));
+ EXPECT_TRUE(LiteralUtil::CreateR2<bfloat16>({{b8}, {b8}})->IsAll(8));
+ EXPECT_FALSE(LiteralUtil::CreateR2<bfloat16>({{b8}, {b9}})->IsAll(8));
+ EXPECT_FALSE(LiteralUtil::CreateR2<bfloat16>({{b9}, {b8}})->IsAll(8));
// 9.001 will be truncated to 9.0
bfloat16 b91(9.001f);
bfloat16 b90(9.00f);
- EXPECT_TRUE(Literal::CreateR2<bfloat16>({{b91}, {b90}})->IsAll(9.0));
+ EXPECT_TRUE(LiteralUtil::CreateR2<bfloat16>({{b91}, {b90}})->IsAll(9.0));
complex64 c8_9 = {8, 9};
- EXPECT_FALSE(Literal::CreateR2<complex64>({{c8_9}, {c8_9}})->IsAll(8));
+ EXPECT_FALSE(LiteralUtil::CreateR2<complex64>({{c8_9}, {c8_9}})->IsAll(8));
auto uint64_max = std::numeric_limits<uint64>::max();
- EXPECT_FALSE(Literal::CreateR2<uint64>(
+ EXPECT_FALSE(LiteralUtil::CreateR2<uint64>(
{{uint64_max, uint64_max}, {uint64_max, uint64_max}})
->IsAll(-1));
}
TEST_F(LiteralUtilTest, IsAllFloat) {
// IsAllFloat always returns false when the literal is not floating-point.
- EXPECT_FALSE(Literal::CreateR0<bool>(false)->IsAllFloat(0));
- EXPECT_FALSE(Literal::CreateR0<int8>(0)->IsAllFloat(0));
- EXPECT_FALSE(Literal::CreateR0<uint8>(0)->IsAllFloat(0));
- EXPECT_FALSE(Literal::CreateR0<int>(0)->IsAllFloat(0));
-
- EXPECT_TRUE(Literal::CreateR0<float>(0)->IsAllFloat(0));
- EXPECT_TRUE(Literal::CreateR0<float>(.5)->IsAllFloat(.5));
- EXPECT_TRUE(Literal::CreateR0<float>(-.5)->IsAllFloat(-.5));
- EXPECT_FALSE(Literal::CreateR0<float>(-.5)->IsAllFloat(-.49));
+ EXPECT_FALSE(LiteralUtil::CreateR0<bool>(false)->IsAllFloat(0));
+ EXPECT_FALSE(LiteralUtil::CreateR0<int8>(0)->IsAllFloat(0));
+ EXPECT_FALSE(LiteralUtil::CreateR0<uint8>(0)->IsAllFloat(0));
+ EXPECT_FALSE(LiteralUtil::CreateR0<int>(0)->IsAllFloat(0));
+
+ EXPECT_TRUE(LiteralUtil::CreateR0<float>(0)->IsAllFloat(0));
+ EXPECT_TRUE(LiteralUtil::CreateR0<float>(.5)->IsAllFloat(.5));
+ EXPECT_TRUE(LiteralUtil::CreateR0<float>(-.5)->IsAllFloat(-.5));
+ EXPECT_FALSE(LiteralUtil::CreateR0<float>(-.5)->IsAllFloat(-.49));
EXPECT_FALSE(
- Literal::CreateR2<float>({{0, 0, 0}, {0, .1, 0}})->IsAllFloat(0));
- EXPECT_TRUE(
- Literal::CreateR2<float>({{.5, .5, .5}, {.5, .5, .5}})->IsAllFloat(.5));
-
- EXPECT_TRUE(Literal::CreateR0<double>(0)->IsAllFloat(0));
- EXPECT_TRUE(Literal::CreateR0<double>(.5)->IsAllFloat(.5));
- EXPECT_TRUE(Literal::CreateR0<double>(-.5)->IsAllFloat(-.5));
- EXPECT_FALSE(Literal::CreateR0<double>(-.5)->IsAllFloat(-.49));
+ LiteralUtil::CreateR2<float>({{0, 0, 0}, {0, .1, 0}})->IsAllFloat(0));
+ EXPECT_TRUE(LiteralUtil::CreateR2<float>({{.5, .5, .5}, {.5, .5, .5}})
+ ->IsAllFloat(.5));
+
+ EXPECT_TRUE(LiteralUtil::CreateR0<double>(0)->IsAllFloat(0));
+ EXPECT_TRUE(LiteralUtil::CreateR0<double>(.5)->IsAllFloat(.5));
+ EXPECT_TRUE(LiteralUtil::CreateR0<double>(-.5)->IsAllFloat(-.5));
+ EXPECT_FALSE(LiteralUtil::CreateR0<double>(-.5)->IsAllFloat(-.49));
EXPECT_FALSE(
- Literal::CreateR2<double>({{0, 0, 0}, {0, .1, 0}})->IsAllFloat(0));
+ LiteralUtil::CreateR2<double>({{0, 0, 0}, {0, .1, 0}})->IsAllFloat(0));
}
TEST_F(LiteralUtilTest, IsAllComplex) {
// IsAllComplex always returns false when the literal is not complex.
- EXPECT_FALSE(Literal::CreateR0<bool>(false)->IsAllComplex(0));
- EXPECT_FALSE(Literal::CreateR0<int8>(0)->IsAllComplex(0));
- EXPECT_FALSE(Literal::CreateR0<uint8>(0)->IsAllComplex(0));
- EXPECT_FALSE(Literal::CreateR0<int>(0)->IsAllComplex(0));
- EXPECT_FALSE(Literal::CreateR0<float>(0)->IsAllComplex(0));
- EXPECT_FALSE(Literal::CreateR0<double>(0)->IsAllComplex(0));
+ EXPECT_FALSE(LiteralUtil::CreateR0<bool>(false)->IsAllComplex(0));
+ EXPECT_FALSE(LiteralUtil::CreateR0<int8>(0)->IsAllComplex(0));
+ EXPECT_FALSE(LiteralUtil::CreateR0<uint8>(0)->IsAllComplex(0));
+ EXPECT_FALSE(LiteralUtil::CreateR0<int>(0)->IsAllComplex(0));
+ EXPECT_FALSE(LiteralUtil::CreateR0<float>(0)->IsAllComplex(0));
+ EXPECT_FALSE(LiteralUtil::CreateR0<double>(0)->IsAllComplex(0));
complex64 c8_9 = {8, 9};
complex64 c7_9 = {7, 9};
- EXPECT_TRUE(Literal::CreateR2<complex64>({{c8_9}, {c8_9}})
+ EXPECT_TRUE(LiteralUtil::CreateR2<complex64>({{c8_9}, {c8_9}})
->IsAllComplex({8.0f, 9.0f}));
- EXPECT_FALSE(Literal::CreateR2<complex64>({{c7_9}, {c8_9}})
+ EXPECT_FALSE(LiteralUtil::CreateR2<complex64>({{c7_9}, {c8_9}})
->IsAllComplex({8.0f, 9.0f}));
- EXPECT_FALSE(Literal::CreateR2<complex64>({{c8_9}, {c7_9}})
+ EXPECT_FALSE(LiteralUtil::CreateR2<complex64>({{c8_9}, {c7_9}})
->IsAllComplex({8.0f, 9.0f}));
}
TEST_F(LiteralUtilTest, IsAllFirst) {
// IsAllComplex always returns false when the literal is not complex.
- EXPECT_FALSE(Literal::CreateR1<bool>({false, true})->IsAllFirst());
- EXPECT_TRUE(Literal::CreateR1<bool>({false, false})->IsAllFirst());
- EXPECT_FALSE(Literal::CreateR1<int8>({1, 1, 2})->IsAllFirst());
- EXPECT_TRUE(Literal::CreateR1<int8>({5, 5, 5, 5})->IsAllFirst());
- EXPECT_FALSE(Literal::CreateR1<uint8>({1, 1, 2})->IsAllFirst());
- EXPECT_TRUE(Literal::CreateR1<int32>({5, 5, 5, 5})->IsAllFirst());
- EXPECT_FALSE(Literal::CreateR1<int32>({1, 1, 2})->IsAllFirst());
- EXPECT_TRUE(Literal::CreateR1<uint32>({5, 5, 5, 5})->IsAllFirst());
- EXPECT_FALSE(Literal::CreateR1<uint32>({1, 1, 2})->IsAllFirst());
+ EXPECT_FALSE(LiteralUtil::CreateR1<bool>({false, true})->IsAllFirst());
+ EXPECT_TRUE(LiteralUtil::CreateR1<bool>({false, false})->IsAllFirst());
+ EXPECT_FALSE(LiteralUtil::CreateR1<int8>({1, 1, 2})->IsAllFirst());
+ EXPECT_TRUE(LiteralUtil::CreateR1<int8>({5, 5, 5, 5})->IsAllFirst());
+ EXPECT_FALSE(LiteralUtil::CreateR1<uint8>({1, 1, 2})->IsAllFirst());
+ EXPECT_TRUE(LiteralUtil::CreateR1<int32>({5, 5, 5, 5})->IsAllFirst());
+ EXPECT_FALSE(LiteralUtil::CreateR1<int32>({1, 1, 2})->IsAllFirst());
+ EXPECT_TRUE(LiteralUtil::CreateR1<uint32>({5, 5, 5, 5})->IsAllFirst());
+ EXPECT_FALSE(LiteralUtil::CreateR1<uint32>({1, 1, 2})->IsAllFirst());
complex64 c8_9 = {8, 9};
complex64 c7_9 = {7, 9};
- EXPECT_TRUE(Literal::CreateR2<complex64>({{c8_9}, {c8_9}})->IsAllFirst());
- EXPECT_FALSE(Literal::CreateR2<complex64>({{c7_9}, {c8_9}})->IsAllFirst());
+ EXPECT_TRUE(LiteralUtil::CreateR2<complex64>({{c8_9}, {c8_9}})->IsAllFirst());
+ EXPECT_FALSE(
+ LiteralUtil::CreateR2<complex64>({{c7_9}, {c8_9}})->IsAllFirst());
}
TEST_F(LiteralUtilTest, IsZero) {
- auto scalar_zero = Literal::CreateR0<float>(0.0f);
- auto scalar_one = Literal::CreateR0<float>(1.0f);
+ auto scalar_zero = LiteralUtil::CreateR0<float>(0.0f);
+ auto scalar_one = LiteralUtil::CreateR0<float>(1.0f);
EXPECT_TRUE(scalar_zero->IsZero({}));
EXPECT_FALSE(scalar_one->IsZero({}));
- auto array = Literal::CreateR2<uint32>({{1, 2, 0, 3}, {1, 0, 1, 2}});
+ auto array = LiteralUtil::CreateR2<uint32>({{1, 2, 0, 3}, {1, 0, 1, 2}});
EXPECT_FALSE(array->IsZero({0, 1}));
EXPECT_TRUE(array->IsZero({0, 2}));
EXPECT_TRUE(array->IsZero({1, 1}));
EXPECT_FALSE(array->IsZero({1, 2}));
- auto complex_zero = Literal::CreateR0<complex64>(0.0f);
- auto complex_nonzero = Literal::CreateR0<complex64>(0.5f);
+ auto complex_zero = LiteralUtil::CreateR0<complex64>(0.0f);
+ auto complex_nonzero = LiteralUtil::CreateR0<complex64>(0.5f);
EXPECT_TRUE(complex_zero->IsZero({}));
EXPECT_FALSE(complex_nonzero->IsZero({}));
}
@@ -563,7 +570,7 @@ TYPED_TEST_CASE(LiteralUtilTestTemplated, TestedTypes);
TYPED_TEST(LiteralUtilTestTemplated, Relayout2x2) {
// Make a non-integer for floating point types.
TypeParam half = TypeParam(1) / TypeParam(2);
- auto data = Literal::CreateR2<TypeParam>({{half, 2}, {3, 4}});
+ auto data = LiteralUtil::CreateR2<TypeParam>({{half, 2}, {3, 4}});
const Layout layout01 = LayoutUtil::MakeLayout({0, 1});
const Layout layout10 = LayoutUtil::MakeLayout({1, 0});
@@ -577,7 +584,7 @@ TYPED_TEST(LiteralUtilTestTemplated, Relayout2x2) {
}
TEST_F(LiteralUtilTest, ReshapeR0) {
- auto original = Literal::CreateR0<float>(1.7f);
+ auto original = LiteralUtil::CreateR0<float>(1.7f);
auto reshape = original->Reshape(/*dimensions=*/{}).ConsumeValueOrDie();
EXPECT_EQ(*original, *reshape);
}
@@ -585,13 +592,13 @@ TEST_F(LiteralUtilTest, ReshapeR0) {
TEST_F(LiteralUtilTest, ReshapeR4) {
// clang-format off
// F32[1x3x2x4]
- auto original = Literal::CreateR4WithLayout<float>({{
+ auto original = LiteralUtil::CreateR4WithLayout<float>({{
{{10, 11, 12, 13}, {14, 15, 16, 17}},
{{18, 19, 20, 21}, {22, 23, 24, 25}},
{{26, 27, 28, 29}, {30, 31, 32, 33}},
}}, layout_r4_dim0major_);
// F32[1x3x4x2]
- auto expected = Literal::CreateR3WithLayout<float>({
+ auto expected = LiteralUtil::CreateR3WithLayout<float>({
{{10, 11}, {12, 13}, {14, 15}, {16, 17}},
{{18, 19}, {20, 21}, {22, 23}, {24, 25}},
{{26, 27}, {28, 29}, {30, 31}, {32, 33}},
@@ -605,13 +612,13 @@ TEST_F(LiteralUtilTest, ReshapeR4) {
TEST_F(LiteralUtilTest, ReshapeR4Dim0Minor) {
// clang-format off
// F32[1x3x2x4]
- auto original = Literal::CreateR4WithLayout<float>({{
+ auto original = LiteralUtil::CreateR4WithLayout<float>({{
{{10, 11, 12, 13}, {14, 15, 16, 17}},
{{18, 19, 20, 21}, {22, 23, 24, 25}},
{{26, 27, 28, 29}, {30, 31, 32, 33}},
}}, layout_r4_dim0minor_);
// F32[1x3x4x2]
- auto expected = Literal::CreateR3WithLayout<float>({
+ auto expected = LiteralUtil::CreateR3WithLayout<float>({
{{10, 11}, {12, 13}, {14, 15}, {16, 17}},
{{18, 19}, {20, 21}, {22, 23}, {24, 25}},
{{26, 27}, {28, 29}, {30, 31}, {32, 33}},
@@ -623,7 +630,7 @@ TEST_F(LiteralUtilTest, ReshapeR4Dim0Minor) {
}
TEST_F(LiteralUtilTest, TransposeR0) {
- auto original = Literal::CreateR0<float>(1.7f);
+ auto original = LiteralUtil::CreateR0<float>(1.7f);
auto reshape = original->Transpose(/*permutation=*/{});
EXPECT_EQ(*original, *reshape);
}
@@ -631,7 +638,7 @@ TEST_F(LiteralUtilTest, TransposeR0) {
TEST_F(LiteralUtilTest, TransposeR4) {
// clang-format off
// F32[1x3x2x4]
- auto original = Literal::CreateR4<float>({{
+ auto original = LiteralUtil::CreateR4<float>({{
{{10, 11, 12, 13}, {14, 15, 16, 17}},
{{18, 19, 20, 21}, {22, 23, 24, 25}},
{{26, 27, 28, 29}, {30, 31, 32, 33}},
@@ -659,7 +666,7 @@ TEST_F(LiteralUtilTest, TestR4RelayoutEquivalence) {
TEST_F(LiteralUtilTest, TestR2LinearLayout) {
// Test expected memory layout of R2 dim0-minor (column-major) literal.
- auto mat_dim0minor = Literal::CreateR2WithLayout<int32>(
+ auto mat_dim0minor = LiteralUtil::CreateR2WithLayout<int32>(
{{1, 2, 3}, {4, 5, 6}}, layout_r2_dim0minor_);
EXPECT_EQ(mat_dim0minor->element_count(), 6);
EXPECT_THAT(mat_dim0minor->data<int32>(), ElementsAre(1, 4, 2, 5, 3, 6));
@@ -670,7 +677,7 @@ TEST_F(LiteralUtilTest, TestR2LinearLayout) {
ElementsAre(1, 2, 3, 4, 5, 6));
// Test expected memory layout of R2 created with dim0-major (row-major).
- auto mat_dim0major = Literal::CreateR2WithLayout<int32>(
+ auto mat_dim0major = LiteralUtil::CreateR2WithLayout<int32>(
{{1, 2, 3}, {4, 5, 6}}, layout_r2_dim0major_);
EXPECT_EQ(mat_dim0major->element_count(), 6);
EXPECT_THAT(mat_dim0major->data<int32>(), ElementsAre(1, 2, 3, 4, 5, 6));
@@ -695,8 +702,8 @@ TEST_F(LiteralUtilTest, TestR3LinearLayout) {
{10, 11, 12},
},
}); // clang-format on
- auto lit_dim0minor =
- Literal::CreateR3FromArray3DWithLayout<int>(arr3d, layout_r3_dim0minor_);
+ auto lit_dim0minor = LiteralUtil::CreateR3FromArray3DWithLayout<int>(
+ arr3d, layout_r3_dim0minor_);
EXPECT_EQ(lit_dim0minor->element_count(), 12);
std::vector<int> expected_dim0minor{1, 7, 4, 10, 2, 8, 5, 11, 3, 9, 6, 12};
@@ -710,8 +717,8 @@ TEST_F(LiteralUtilTest, TestR3LinearLayout) {
testing::ElementsAreArray(expected_dim0major));
// Test expected memory layout of R3 created with dim0-major (row-major).
- auto lit_dim0major =
- Literal::CreateR3FromArray3DWithLayout<int>(arr3d, layout_r3_dim0major_);
+ auto lit_dim0major = LiteralUtil::CreateR3FromArray3DWithLayout<int>(
+ arr3d, layout_r3_dim0major_);
EXPECT_EQ(lit_dim0major->element_count(), 12);
EXPECT_THAT(lit_dim0major->data<int32>(),
testing::ElementsAreArray(expected_dim0major));
@@ -723,28 +730,28 @@ TEST_F(LiteralUtilTest, TestR3LinearLayout) {
}
TEST_F(LiteralUtilTest, SliceR0S32) {
- auto input = Literal::CreateR0<int32>(1);
+ auto input = LiteralUtil::CreateR0<int32>(1);
auto result = input->Slice({}, {});
EXPECT_EQ(*input, *result);
}
TEST_F(LiteralUtilTest, SliceR1F32) {
- auto input = Literal::CreateR1<float>({1.0, 2.0, 3.0, 4.0, 5.0});
+ auto input = LiteralUtil::CreateR1<float>({1.0, 2.0, 3.0, 4.0, 5.0});
auto result = input->Slice({3}, {4});
- auto expected = Literal::CreateR1<float>({4.0});
+ auto expected = LiteralUtil::CreateR1<float>({4.0});
EXPECT_EQ(*expected, *result);
}
TEST_F(LiteralUtilTest, SliceR2U32) {
- auto input_3x4 =
- Literal::CreateR2<uint32>({{1, 2, 3, 4}, {5, 6, 7, 8}, {9, 10, 11, 12}});
+ auto input_3x4 = LiteralUtil::CreateR2<uint32>(
+ {{1, 2, 3, 4}, {5, 6, 7, 8}, {9, 10, 11, 12}});
auto result = input_3x4->Slice({0, 2}, {2, 4});
- auto expected = Literal::CreateR2<uint32>({{3, 4}, {7, 8}});
+ auto expected = LiteralUtil::CreateR2<uint32>({{3, 4}, {7, 8}});
EXPECT_EQ(*expected, *result);
}
TEST_F(LiteralUtilTest, SliceR3U32Full) {
- auto input_2x3x2 = Literal::CreateR3<uint32>(
+ auto input_2x3x2 = LiteralUtil::CreateR3<uint32>(
{{{1, 2}, {3, 4}, {5, 6}}, {{7, 8}, {9, 10}, {11, 12}}});
auto result = input_2x3x2->Slice({0, 0, 0}, {2, 3, 2});
EXPECT_EQ(*input_2x3x2, *result);
@@ -753,21 +760,21 @@ TEST_F(LiteralUtilTest, SliceR3U32Full) {
TEST_F(LiteralUtilTest, PopulateR1S64) {
Literal output(ShapeUtil::MakeShape(S64, {1}));
output.PopulateR1<int64>({77});
- auto expected = Literal::CreateR1<int64>({77});
+ auto expected = LiteralUtil::CreateR1<int64>({77});
EXPECT_EQ(output, *expected);
}
TEST_F(LiteralUtilTest, PopulateR1U64) {
Literal output(ShapeUtil::MakeShape(U64, {2}));
output.PopulateR1<uint64>({{77, 88}});
- auto expected = Literal::CreateR1<uint64>({{77, 88}});
+ auto expected = LiteralUtil::CreateR1<uint64>({{77, 88}});
EXPECT_EQ(output, *expected);
}
TEST_F(LiteralUtilTest, PopulateR1C64) {
Literal output(ShapeUtil::MakeShape(C64, {1}));
output.PopulateR1<complex64>({{77, 88}});
- auto expected = Literal::CreateR1<complex64>({{77, 88}});
+ auto expected = LiteralUtil::CreateR1<complex64>({{77, 88}});
EXPECT_EQ(output, *expected);
}
@@ -775,7 +782,7 @@ TEST_F(LiteralUtilTest, PopulateR2C64) {
Literal output(ShapeUtil::MakeShape(C64, {2, 2}));
output.PopulateR2<complex64>({{{7, 8}, {9, 10}}, {{1, 2}, {3, 4}}});
auto expected =
- Literal::CreateR2<complex64>({{{7, 8}, {9, 10}}, {{1, 2}, {3, 4}}});
+ LiteralUtil::CreateR2<complex64>({{{7, 8}, {9, 10}}, {{1, 2}, {3, 4}}});
EXPECT_EQ(output, *expected);
}
@@ -783,7 +790,7 @@ TEST_F(LiteralUtilTest, PopulateWithValueR0BF16) {
Literal output(ShapeUtil::MakeShape(BF16, {}));
bfloat16 h(0.25f);
output.PopulateWithValue<bfloat16>(h);
- auto expected = Literal::CreateR0<bfloat16>(h);
+ auto expected = LiteralUtil::CreateR0<bfloat16>(h);
EXPECT_EQ(output, *expected);
}
@@ -791,7 +798,7 @@ TEST_F(LiteralUtilTest, PopulateWithValueR1BF16) {
Literal output(ShapeUtil::MakeShape(BF16, {3}));
bfloat16 h(0.5f);
output.PopulateWithValue<bfloat16>(h);
- auto expected = Literal::CreateR1<bfloat16>({h, h, h});
+ auto expected = LiteralUtil::CreateR1<bfloat16>({h, h, h});
EXPECT_EQ(output, *expected);
}
@@ -799,28 +806,28 @@ TEST_F(LiteralUtilTest, PopulateWithValueR2BF16) {
Literal output(ShapeUtil::MakeShape(BF16, {2, 2}));
bfloat16 h(2.0f);
output.PopulateWithValue<bfloat16>(h);
- auto expected = Literal::CreateR2<bfloat16>({{h, h}, {h, h}});
+ auto expected = LiteralUtil::CreateR2<bfloat16>({{h, h}, {h, h}});
EXPECT_EQ(output, *expected);
}
TEST_F(LiteralUtilTest, PopulateWithValueR0F32) {
Literal output(ShapeUtil::MakeShape(F32, {}));
output.PopulateWithValue<float>(2.5f);
- auto expected = Literal::CreateR0<float>(2.5f);
+ auto expected = LiteralUtil::CreateR0<float>(2.5f);
EXPECT_EQ(output, *expected);
}
TEST_F(LiteralUtilTest, PopulateWithValueR1S64) {
Literal output(ShapeUtil::MakeShape(S64, {3}));
output.PopulateWithValue<int64>(-7);
- auto expected = Literal::CreateR1<int64>({-7, -7, -7});
+ auto expected = LiteralUtil::CreateR1<int64>({-7, -7, -7});
EXPECT_EQ(output, *expected);
}
TEST_F(LiteralUtilTest, PopulateWithValueR2U64) {
Literal output(ShapeUtil::MakeShape(U64, {2, 2}));
output.PopulateWithValue<uint64>(42);
- auto expected = Literal::CreateR2<uint64>({{42, 42}, {42, 42}});
+ auto expected = LiteralUtil::CreateR2<uint64>({{42, 42}, {42, 42}});
EXPECT_EQ(output, *expected);
}
@@ -828,7 +835,7 @@ TEST_F(LiteralUtilTest, PopulateWithValueR2C64) {
Literal output(ShapeUtil::MakeShape(C64, {2, 2}));
output.PopulateWithValue<complex64>({4, 2});
auto expected =
- Literal::CreateR2<complex64>({{{4, 2}, {4, 2}}, {{4, 2}, {4, 2}}});
+ LiteralUtil::CreateR2<complex64>({{{4, 2}, {4, 2}}, {{4, 2}, {4, 2}}});
EXPECT_EQ(output, *expected);
}
@@ -836,7 +843,7 @@ TEST_F(LiteralUtilTest, PopulateWithValueR0F16) {
Literal output(ShapeUtil::MakeShape(F16, {}));
half h(0.25f);
output.PopulateWithValue<half>(h);
- auto expected = Literal::CreateR0<half>(h);
+ auto expected = LiteralUtil::CreateR0<half>(h);
EXPECT_EQ(output, *expected);
}
@@ -844,7 +851,7 @@ TEST_F(LiteralUtilTest, PopulateWithValueR1F16) {
Literal output(ShapeUtil::MakeShape(F16, {3}));
half h(0.5f);
output.PopulateWithValue<half>(h);
- auto expected = Literal::CreateR1<half>({h, h, h});
+ auto expected = LiteralUtil::CreateR1<half>({h, h, h});
EXPECT_EQ(output, *expected);
}
@@ -852,15 +859,15 @@ TEST_F(LiteralUtilTest, PopulateWithValueR2F16) {
Literal output(ShapeUtil::MakeShape(F16, {2, 2}));
half h(2.0f);
output.PopulateWithValue<half>(h);
- auto expected = Literal::CreateR2<half>({{h, h}, {h, h}});
+ auto expected = LiteralUtil::CreateR2<half>({{h, h}, {h, h}});
EXPECT_EQ(output, *expected);
}
TEST_F(LiteralUtilTest, ReplicateR2U32) {
- auto input =
- Literal::CreateR2<uint32>({{1, 2, 3, 4}, {5, 6, 7, 8}, {9, 10, 11, 12}});
+ auto input = LiteralUtil::CreateR2<uint32>(
+ {{1, 2, 3, 4}, {5, 6, 7, 8}, {9, 10, 11, 12}});
auto output = input->Replicate<uint32>(3);
- auto expected = Literal::CreateR3<uint32>(
+ auto expected = LiteralUtil::CreateR3<uint32>(
{{{1, 2, 3, 4}, {5, 6, 7, 8}, {9, 10, 11, 12}},
{{1, 2, 3, 4}, {5, 6, 7, 8}, {9, 10, 11, 12}},
{{1, 2, 3, 4}, {5, 6, 7, 8}, {9, 10, 11, 12}}});
@@ -914,12 +921,12 @@ TEST_F(LiteralUtilTest, CopySliceFrom) {
}
TEST_F(LiteralUtilTest, CopyFromScalars) {
- auto zero = Literal::CreateR0<uint32>(0);
- auto nine = Literal::CreateR0<uint32>(9);
+ auto zero = LiteralUtil::CreateR0<uint32>(0);
+ auto nine = LiteralUtil::CreateR0<uint32>(9);
TF_EXPECT_OK(zero->CopyFrom(*nine));
EXPECT_EQ(*zero, *nine);
- auto vect = Literal::CreateR1<uint32>({3, 4, 9, 12, 5, 17, 21});
+ auto vect = LiteralUtil::CreateR1<uint32>({3, 4, 9, 12, 5, 17, 21});
TF_EXPECT_OK(zero->CopySliceFrom(*vect, {5}, {}, {}));
EXPECT_EQ(zero->Get<uint32>({}), 17);
TF_EXPECT_OK(vect->CopySliceFrom(*zero, {}, {4}, {}));
@@ -928,13 +935,13 @@ TEST_F(LiteralUtilTest, CopyFromScalars) {
TEST_F(LiteralUtilTest, CopyFromAndToZeroElement) {
const Shape empty_r1_shape = ShapeUtil::MakeShape(F32, {0});
- const auto const_nine = Literal::CreateR1<float>({9});
+ const auto const_nine = LiteralUtil::CreateR1<float>({9});
const auto const_empty = Literal::CreateFromShape(empty_r1_shape);
{
// Source contains dimension with zero elements.
const auto empty = Literal::CreateFromShape(empty_r1_shape);
- auto nine = Literal::CreateR1<float>({9});
+ auto nine = LiteralUtil::CreateR1<float>({9});
TF_EXPECT_OK(nine->CopySliceFrom(*empty, {0}, {0}, {0}));
EXPECT_EQ(*nine, *const_nine);
@@ -943,7 +950,7 @@ TEST_F(LiteralUtilTest, CopyFromAndToZeroElement) {
{
// Copy 0 element to destination with zero elements.
const auto empty = Literal::CreateFromShape(empty_r1_shape);
- auto nine = Literal::CreateR1<float>({9});
+ auto nine = LiteralUtil::CreateR1<float>({9});
TF_EXPECT_OK(empty->CopySliceFrom(*nine, {0}, {0}, {0}));
EXPECT_EQ(*empty, *const_empty);
@@ -958,16 +965,16 @@ TEST_F(LiteralUtilTest, CopyFromNilShape) {
}
TEST_F(LiteralUtilTest, CopyFromArrays) {
- auto scalar_42 = Literal::CreateR0<float>(42.0);
- auto scalar_123 = Literal::CreateR0<float>(123.0);
+ auto scalar_42 = LiteralUtil::CreateR0<float>(42.0);
+ auto scalar_123 = LiteralUtil::CreateR0<float>(123.0);
EXPECT_NE(*scalar_42, *scalar_123);
TF_ASSERT_OK(scalar_42->CopyFrom(*scalar_123, /*dest_shape_index=*/{},
/*src_shape_index=*/{}));
EXPECT_EQ(*scalar_42, *scalar_123);
EXPECT_EQ(scalar_42->Get<float>({}), 123.0f);
- auto matrix_1234 = Literal::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}});
- auto matrix_5678 = Literal::CreateR2<float>({{5.0, 6.0}, {7.0, 8.0}});
+ auto matrix_1234 = LiteralUtil::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}});
+ auto matrix_5678 = LiteralUtil::CreateR2<float>({{5.0, 6.0}, {7.0, 8.0}});
EXPECT_NE(*matrix_1234, *matrix_5678);
EXPECT_EQ(matrix_1234->Get<float>({0, 0}), 1.0f);
TF_ASSERT_OK(matrix_1234->CopyFrom(*matrix_5678, /*dest_shape_index=*/{},
@@ -977,19 +984,19 @@ TEST_F(LiteralUtilTest, CopyFromArrays) {
}
TEST_F(LiteralUtilTest, CopyFromTuples) {
- auto matrix = Literal::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}});
+ auto matrix = LiteralUtil::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}});
Literal nil_literal(ShapeUtil::MakeNil());
- auto nested_tuple = Literal::MakeTuple(
+ auto nested_tuple = LiteralUtil::MakeTuple(
{matrix.get(),
- Literal::MakeTuple({Literal::CreateR0<int32>(42).get(),
- Literal::CreateR1<double>({23.0, 44.0}).get(),
- &nil_literal})
+ LiteralUtil::MakeTuple(
+ {LiteralUtil::CreateR0<int32>(42).get(),
+ LiteralUtil::CreateR1<double>({23.0, 44.0}).get(), &nil_literal})
.get()});
// Create a tuple the same shape as the inner tuple of nested_tuple but with
// different values..
- auto tuple = Literal::MakeTuple({Literal::CreateR0<int32>(-5).get(),
- Literal::CreateR1<double>({2.0, 4.0}).get(),
- &nil_literal});
+ auto tuple = LiteralUtil::MakeTuple(
+ {LiteralUtil::CreateR0<int32>(-5).get(),
+ LiteralUtil::CreateR1<double>({2.0, 4.0}).get(), &nil_literal});
EXPECT_EQ(*matrix, LiteralSlice(*nested_tuple, {0}));
EXPECT_EQ(nested_tuple->Get<int32>({}, {1, 0}), 42);
@@ -1010,8 +1017,8 @@ TEST_F(LiteralUtilTest, CopyFromTuples) {
EXPECT_EQ(nested_tuple->Get<double>({1}, {1, 1}), 4.0);
}
TEST_F(LiteralUtilTest, CopyBetweenSameTuple) {
- auto tuple = Literal::MakeTuple(
- {Literal::CreateR0<int32>(-2).get(), Literal::CreateR0<int32>(4).get()});
+ auto tuple = LiteralUtil::MakeTuple({LiteralUtil::CreateR0<int32>(-2).get(),
+ LiteralUtil::CreateR0<int32>(4).get()});
EXPECT_EQ(tuple->Get<int32>({}, {0}), -2);
EXPECT_EQ(tuple->Get<int32>({}, {1}), 4);
@@ -1025,8 +1032,8 @@ TEST_F(LiteralUtilTest, CopyBetweenSameTuple) {
}
TEST_F(LiteralUtilTest, CopyFromDifferentShapes) {
- auto matrix = Literal::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}});
- auto vector = Literal::CreateR1<float>({5.0, 7.0});
+ auto matrix = LiteralUtil::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}});
+ auto vector = LiteralUtil::CreateR1<float>({5.0, 7.0});
Status status = matrix->CopyFrom(*vector);
ASSERT_FALSE(status.ok());
ASSERT_THAT(status.error_message(),
@@ -1051,7 +1058,7 @@ TEST_F(LiteralUtilTest, F16) {
half h1(1.0f);
half h2(2.0f);
- auto m2 = Literal::CreateR2<half>({{h1, h2}, {h2, h1}});
+ auto m2 = LiteralUtil::CreateR2<half>({{h1, h2}, {h2, h1}});
Literal* l2 = m2.get();
const char* d2 = reinterpret_cast<const char*>(l2->data<half>().data());
EXPECT_EQ(d2[0], 0);
@@ -1150,12 +1157,12 @@ TEST_F(LiteralUtilTest, PopulateParallel) {
TEST_F(LiteralUtilTest, ConvertR4) {
// clang-format off
- auto original = Literal::CreateR4WithLayout<int8>({{
+ auto original = LiteralUtil::CreateR4WithLayout<int8>({{
{{10, 11, 12, 13}, {14, 15, 16, 17}},
{{18, 19, 20, 21}, {22, 23, 24, 25}},
{{26, 27, 28, 29}, {30, 31, 32, 33}},
}}, layout_r4_dim0major_);
- auto expected = Literal::CreateR4WithLayout<uint32>({{
+ auto expected = LiteralUtil::CreateR4WithLayout<uint32>({{
{{10, 11, 12, 13}, {14, 15, 16, 17}},
{{18, 19, 20, 21}, {22, 23, 24, 25}},
{{26, 27, 28, 29}, {30, 31, 32, 33}},
@@ -1169,42 +1176,42 @@ TEST_F(LiteralUtilTest, ConvertR4) {
TEST_F(LiteralUtilTest, ConvertIfTypesMatch) {
// clang-format off
- auto s8 = Literal::CreateR4WithLayout<int8>({{
+ auto s8 = LiteralUtil::CreateR4WithLayout<int8>({{
{{10, 0, 12, 0}, {0, 15, 0, 17}},
{{0, 19, 0, 21}, {22, 0, 24, 0}},
{{26, 0, 28, 0}, {0, 31, 0, 33}},
}}, layout_r4_dim0major_);
- auto s32 = Literal::CreateR4WithLayout<int32>({{
+ auto s32 = LiteralUtil::CreateR4WithLayout<int32>({{
{{10, 0, 12, 0}, {0, 15, 0, 17}},
{{0, 19, 0, 21}, {22, 0, 24, 0}},
{{26, 0, 28, 0}, {0, 31, 0, 33}},
}}, layout_r4_dim0major_);
- auto u32 = Literal::CreateR4WithLayout<uint32>({{
+ auto u32 = LiteralUtil::CreateR4WithLayout<uint32>({{
{{10, 0, 12, 0}, {0, 15, 0, 17}},
{{0, 19, 0, 21}, {22, 0, 24, 0}},
{{26, 0, 28, 0}, {0, 31, 0, 33}},
}}, layout_r4_dim0major_);
- auto s64 = Literal::CreateR4WithLayout<int64>({{
+ auto s64 = LiteralUtil::CreateR4WithLayout<int64>({{
{{10, 0, 12, 0}, {0, 15, 0, 17}},
{{0, 19, 0, 21}, {22, 0, 24, 0}},
{{26, 0, 28, 0}, {0, 31, 0, 33}},
}}, layout_r4_dim0major_);
- auto u64 = Literal::CreateR4WithLayout<uint64>({{
+ auto u64 = LiteralUtil::CreateR4WithLayout<uint64>({{
{{10, 0, 12, 0}, {0, 15, 0, 17}},
{{0, 19, 0, 21}, {22, 0, 24, 0}},
{{26, 0, 28, 0}, {0, 31, 0, 33}},
}}, layout_r4_dim0major_);
- auto pred = Literal::CreateR4WithLayout<bool>({{
+ auto pred = LiteralUtil::CreateR4WithLayout<bool>({{
{{true, false, true, false}, {false, true, false, true}},
{{false, true, false, true}, {true, false, true, false}},
{{true, false, true, false}, {false, true, false, true}},
}}, layout_r4_dim0major_);
- auto int32_pred = Literal::CreateR4WithLayout<int32>({{
+ auto int32_pred = LiteralUtil::CreateR4WithLayout<int32>({{
{{1, 0, 1, 0}, {0, 1, 0, 1}},
{{0, 1, 0, 1}, {1, 0, 1, 0}},
{{1, 0, 1, 0}, {0, 1, 0, 1}},
}}, layout_r4_dim0major_);
- auto f16 = Literal::CreateR4WithLayout<half>({{
+ auto f16 = LiteralUtil::CreateR4WithLayout<half>({{
{{half(10.0), half(0.0), half(12.0), half(0.0)},
{half(0.0), half(15.0), half(0.0), half(17.0)}},
{{half(0.0), half(19.0), half(0.0), half(21.0)},
@@ -1212,7 +1219,7 @@ TEST_F(LiteralUtilTest, ConvertIfTypesMatch) {
{{half(26.0), half(0.0), half(28.0), half(0.0)},
{half(0.0), half(31.0), half(0.0), half(33.0)}},
}}, layout_r4_dim0major_);
- auto bf16 = Literal::CreateR4WithLayout<bfloat16>({{
+ auto bf16 = LiteralUtil::CreateR4WithLayout<bfloat16>({{
{{bfloat16(10.0), bfloat16(0.0), bfloat16(12.0), bfloat16(0.0)},
{bfloat16(0.0), bfloat16(15.0), bfloat16(0.0), bfloat16(17.0)}},
{{bfloat16(0.0), bfloat16(19.0), bfloat16(0.0), bfloat16(21.0)},
@@ -1220,17 +1227,17 @@ TEST_F(LiteralUtilTest, ConvertIfTypesMatch) {
{{bfloat16(26.0), bfloat16(0.0), bfloat16(28.0), bfloat16(0.0)},
{bfloat16(0.0), bfloat16(31.0), bfloat16(0.0), bfloat16(33.0)}},
}}, layout_r4_dim0major_);
- auto f32 = Literal::CreateR4WithLayout<float>({{
+ auto f32 = LiteralUtil::CreateR4WithLayout<float>({{
{{10.0f, 0.0f, 12.0f, 0.0f}, {0.0f, 15.0f, 0.0f, 17.0f}},
{{0.0f, 19.0f, 0.0f, 21.0f}, {22.0f, 0.0f, 24.0f, 0.0f}},
{{26.0f, 0.0f, 28.0f, 0.0f}, {0.0f, 31.0f, 0.0f, 33.0f}},
}}, layout_r4_dim0major_);
- auto f64 = Literal::CreateR4WithLayout<double>({{
+ auto f64 = LiteralUtil::CreateR4WithLayout<double>({{
{{10.0, 0.0, 12.0, 0.0}, {0.0, 15.0, 0.0, 17.0}},
{{0.0, 19.0, 0.0, 21.0}, {22.0, 0.0, 24.0, 0.0}},
{{26.0, 0.0, 28.0, 0.0}, {0.0, 31.0, 0.0, 33.0}},
}}, layout_r4_dim0major_);
- auto c64 = Literal::CreateR4WithLayout<complex64>({{
+ auto c64 = LiteralUtil::CreateR4WithLayout<complex64>({{
{{10.0f, 0.0f, 12.0f, 0.0f}, {0.0f, 15.0f, 0.0f, 17.0f}},
{{0.0f, 19.0f, 0.0f, 21.0f}, {22.0f, 0.0f, 24.0f, 0.0f}},
{{26.0f, 0.0f, 28.0f, 0.0f}, {0.0f, 31.0f, 0.0f, 33.0f}},
@@ -1302,18 +1309,18 @@ TEST_F(LiteralUtilTest, ConvertIfTypesMatch) {
}
TEST_F(LiteralUtilTest, BitcastConvert) {
- auto original =
- Literal::CreateR1<uint32>({tensorflow::bit_cast<uint32>(2.5f),
- tensorflow::bit_cast<uint32>(-42.25f),
- tensorflow::bit_cast<uint32>(100.f), 0xbeef});
- auto expected = Literal::CreateR1<float>(
+ auto original = LiteralUtil::CreateR1<uint32>(
+ {tensorflow::bit_cast<uint32>(2.5f),
+ tensorflow::bit_cast<uint32>(-42.25f),
+ tensorflow::bit_cast<uint32>(100.f), 0xbeef});
+ auto expected = LiteralUtil::CreateR1<float>(
{2.5f, -42.25f, 100.0f, tensorflow::bit_cast<float>(0xbeef)});
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<Literal> converted,
original->BitcastConvert(F32));
}
TEST_F(LiteralUtilTest, BitcastConvertBetweenInvalidTypes) {
- auto literal = Literal::CreateR0<uint32>(1234);
+ auto literal = LiteralUtil::CreateR0<uint32>(1234);
Status status = literal->BitcastConvert(F64).status();
EXPECT_NE(Status::OK(), status);
EXPECT_TRUE(tensorflow::str_util::StrContains(status.error_message(),
@@ -1348,7 +1355,7 @@ TEST_F(LiteralUtilTest, ToProto_f16) {
half h1(1.0f);
half h2(2.0f);
- auto m = Literal::CreateR2<half>({{h1, h2}, {h2, h1}});
+ auto m = LiteralUtil::CreateR2<half>({{h1, h2}, {h2, h1}});
Literal* l = m.get();
EXPECT_EQ(4, ShapeUtil::ElementsIn(l->shape()));
EXPECT_EQ(4, l->data<half>().size());
@@ -1391,10 +1398,10 @@ TEST_F(LiteralUtilTest, CopyFromProto_f16) {
}
TEST_F(LiteralUtilTest, LiteralSliceTest) {
- auto scalar = Literal::CreateR0<float>(1.0);
- auto matrix = Literal::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}});
- auto tuple = Literal::MakeTuple({scalar.get(), matrix.get()});
- auto nested_tuple = Literal::MakeTuple({tuple.get(), scalar.get()});
+ auto scalar = LiteralUtil::CreateR0<float>(1.0);
+ auto matrix = LiteralUtil::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}});
+ auto tuple = LiteralUtil::MakeTuple({scalar.get(), matrix.get()});
+ auto nested_tuple = LiteralUtil::MakeTuple({tuple.get(), scalar.get()});
Literal nil(ShapeUtil::MakeNil());
EXPECT_EQ(LiteralSlice(*scalar, {}), *scalar);
@@ -1413,10 +1420,10 @@ TEST_F(LiteralUtilTest, LiteralSliceTest) {
}
TEST_F(LiteralUtilTest, MutatingLiteralSlice) {
- auto scalar = Literal::CreateR0<float>(1.0);
- auto matrix = Literal::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}});
- auto tuple = Literal::MakeTuple({scalar.get(), matrix.get()});
- auto nested_tuple = Literal::MakeTuple({tuple.get(), scalar.get()});
+ auto scalar = LiteralUtil::CreateR0<float>(1.0);
+ auto matrix = LiteralUtil::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}});
+ auto tuple = LiteralUtil::MakeTuple({scalar.get(), matrix.get()});
+ auto nested_tuple = LiteralUtil::MakeTuple({tuple.get(), scalar.get()});
// Verify that changing the underlying data beneath the view changes the
// data of the view itself.
const auto nested_tuple_view = LiteralSlice(*nested_tuple);
@@ -1436,15 +1443,16 @@ TEST_F(LiteralUtilTest, MutatingLiteralSlice) {
}
TEST_F(LiteralUtilTest, LiteralSliceOfALiteralSlice) {
- auto scalar = Literal::CreateR0<float>(1.0);
- auto matrix = Literal::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}});
- auto tuple = Literal::MakeTuple({scalar.get(), matrix.get()});
- auto nested_tuple = Literal::MakeTuple({tuple.get(), scalar.get()});
+ auto scalar = LiteralUtil::CreateR0<float>(1.0);
+ auto matrix = LiteralUtil::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}});
+ auto tuple = LiteralUtil::MakeTuple({scalar.get(), matrix.get()});
+ auto nested_tuple = LiteralUtil::MakeTuple({tuple.get(), scalar.get()});
const auto nested_tuple_view = LiteralSlice(*nested_tuple);
const auto tuple_view = LiteralSlice(nested_tuple_view, /*view_root=*/{0});
const auto matrix_view = LiteralSlice(tuple_view, /*view_root=*/{1});
- EXPECT_EQ(matrix_view, *Literal::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}}));
+ EXPECT_EQ(matrix_view,
+ *LiteralUtil::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}}));
}
TEST_F(LiteralUtilTest, BorrowingLiteralFromOneBufferPtr) {
@@ -1488,7 +1496,7 @@ TEST_F(LiteralUtilTest, BorrowingLiteralFromMultipleBufferPtrs) {
TEST_F(LiteralUtilTest, LiteralMove) {
std::unique_ptr<Literal> matrix =
- Literal::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}});
+ LiteralUtil::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}});
Literal literal(std::move(*matrix));
EXPECT_TRUE(
@@ -1501,11 +1509,11 @@ TEST_F(LiteralUtilTest, LiteralMove) {
TEST_F(LiteralUtilTest, DecomposeTuple) {
Literal nil_literal(ShapeUtil::MakeNil());
- auto nested_tuple = Literal::MakeTuple(
- {Literal::CreateR2<int32>({{1, 2}, {3, 4}}).get(),
- Literal::MakeTuple({Literal::CreateR0<int32>(42).get(),
- Literal::CreateR1<double>({23.0, 44.0}).get(),
- &nil_literal})
+ auto nested_tuple = LiteralUtil::MakeTuple(
+ {LiteralUtil::CreateR2<int32>({{1, 2}, {3, 4}}).get(),
+ LiteralUtil::MakeTuple(
+ {LiteralUtil::CreateR0<int32>(42).get(),
+ LiteralUtil::CreateR1<double>({23.0, 44.0}).get(), &nil_literal})
.get(),
&nil_literal});
@@ -1542,13 +1550,13 @@ TEST_F(LiteralUtilTest, DecomposeEmptyTuple) {
TEST_F(LiteralUtilTest, MoveIntoTuple) {
std::vector<Literal> elements;
- elements.push_back(std::move(*Literal::CreateR0<float>(1.0)));
- elements.push_back(std::move(*Literal::CreateR1<int32>({4, 8})));
- elements.push_back(std::move(
- *Literal::MakeTuple({Literal::CreateR0<int32>(42).get(),
- Literal::CreateR1<double>({23.0, 44.0}).get()})
+ elements.push_back(std::move(*LiteralUtil::CreateR0<float>(1.0)));
+ elements.push_back(std::move(*LiteralUtil::CreateR1<int32>({4, 8})));
+ elements.push_back(std::move(*LiteralUtil::MakeTuple(
+ {LiteralUtil::CreateR0<int32>(42).get(),
+ LiteralUtil::CreateR1<double>({23.0, 44.0}).get()})
- ));
+ ));
Literal literal = Literal::MoveIntoTuple(&elements);
ASSERT_TRUE(ShapeUtil::IsTuple(literal.shape()));
@@ -1577,7 +1585,7 @@ TEST_F(LiteralUtilTest, LiteralMoveAssignment) {
EXPECT_TRUE(ShapeUtil::Equal(ShapeUtil::MakeNil(), literal.shape()));
std::unique_ptr<Literal> matrix =
- Literal::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}});
+ LiteralUtil::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}});
literal = std::move(*matrix);
EXPECT_TRUE(
@@ -1590,7 +1598,7 @@ TEST_F(LiteralUtilTest, LiteralMoveAssignment) {
TEST_F(LiteralUtilTest, LiteralSliceCopy) {
std::unique_ptr<Literal> matrix =
- Literal::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}});
+ LiteralUtil::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}});
const auto matrix_view = LiteralSlice(*matrix);
LiteralSlice matrix_view_copy(matrix_view);
@@ -1601,9 +1609,9 @@ TEST_F(LiteralUtilTest, LiteralSliceCopy) {
}
TEST_F(LiteralUtilTest, GetSetTuple) {
- auto tuple = Literal::MakeTuple(
- {Literal::CreateR0<float>(42.0).get(),
- Literal::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}}).get()});
+ auto tuple = LiteralUtil::MakeTuple(
+ {LiteralUtil::CreateR0<float>(42.0).get(),
+ LiteralUtil::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}}).get()});
EXPECT_EQ(tuple->Get<float>(/*multi_index=*/{}, /*shape_index=*/{0}), 42.0);
tuple->Set<float>(/*multi_index=*/{}, /*shape_index=*/{0}, -5.0);
EXPECT_EQ(tuple->Get<float>(/*multi_index=*/{}, /*shape_index=*/{0}), -5.0);
@@ -1644,20 +1652,20 @@ TEST_F(LiteralUtilTest, CreateFromShapeZeroInitialized) {
TEST_F(LiteralUtilTest, ProtoRoundTrip) {
// Test serializing then deserializing a Literal through a proto.
- auto one_f32 = Literal::CreateR0<float>(1.0);
- auto two_f32 = Literal::CreateR0<float>(2.0);
- auto vector_int8 = Literal::CreateR1<int8>({-128, 0, 2, 4, 7, 56, 127});
- auto vector_c64 = Literal::CreateR1<complex64>({{1.0, 2.0}, {3.0, 4.0}});
- auto vector_bfloat16 = Literal::CreateR1<bfloat16>(
+ auto one_f32 = LiteralUtil::CreateR0<float>(1.0);
+ auto two_f32 = LiteralUtil::CreateR0<float>(2.0);
+ auto vector_int8 = LiteralUtil::CreateR1<int8>({-128, 0, 2, 4, 7, 56, 127});
+ auto vector_c64 = LiteralUtil::CreateR1<complex64>({{1.0, 2.0}, {3.0, 4.0}});
+ auto vector_bfloat16 = LiteralUtil::CreateR1<bfloat16>(
{bfloat16{-1.0}, bfloat16{2.0}, bfloat16{-3.0}});
auto vector_half =
- Literal::CreateR1<half>({half{10.0}, half{20.0}, half{-30.0}});
+ LiteralUtil::CreateR1<half>({half{10.0}, half{20.0}, half{-30.0}});
auto matrix_pred =
- Literal::CreateR2<bool>({{true, false, true}, {false, false, true}});
- auto tuple = Literal::MakeTuple(
+ LiteralUtil::CreateR2<bool>({{true, false, true}, {false, false, true}});
+ auto tuple = LiteralUtil::MakeTuple(
{one_f32.get(), vector_half.get(), matrix_pred.get(), matrix_pred.get()});
Literal nil_literal(ShapeUtil::MakeNil());
- auto nested_tuple = Literal::MakeTuple(
+ auto nested_tuple = LiteralUtil::MakeTuple(
{tuple.get(), vector_bfloat16.get(), tuple.get(), &nil_literal});
auto to_from_proto = [](const Literal& literal) -> Literal {
@@ -1790,8 +1798,8 @@ TEST_F(LiteralUtilTest, InvalidProtoTooManyTupleElements) {
}
TEST_F(LiteralUtilTest, SortSparseElements) {
- auto literal =
- Literal::CreateSparse<float>({10, 10, 10}, SparseIndexArray(10, 3), {});
+ auto literal = LiteralUtil::CreateSparse<float>({10, 10, 10},
+ SparseIndexArray(10, 3), {});
literal->AppendSparseElement<float>({2, 3, 4}, 2.0);
literal->AppendSparseElement<float>({3, 4, 5}, 3.0);
literal->AppendSparseElement<float>({1, 2, 3}, 1.0);
@@ -1805,21 +1813,22 @@ TEST_F(LiteralUtilTest, GetSparseElementAsString) {
SparseIndexArray indices(10, {{1, 2, 3}, {2, 3, 4}, {3, 4, 5}});
ASSERT_EQ(
- Literal::CreateSparse<bool>(dimensions, indices, {true, false, true})
+ LiteralUtil::CreateSparse<bool>(dimensions, indices, {true, false, true})
->GetSparseElementAsString(1),
"false");
- ASSERT_EQ(Literal::CreateSparse<int64>(dimensions, indices, {1, 2, 3})
+ ASSERT_EQ(LiteralUtil::CreateSparse<int64>(dimensions, indices, {1, 2, 3})
->GetSparseElementAsString(1),
tensorflow::strings::StrCat(int64{2}));
- ASSERT_EQ(Literal::CreateSparse<double>(dimensions, indices, {1.0, 2.0, 3.0})
- ->GetSparseElementAsString(1),
- tensorflow::strings::StrCat(double{2.0}));
- ASSERT_EQ(Literal::CreateSparse<half>(dimensions, indices,
- {half{1.0}, half{2.0}, half{3.0}})
+ ASSERT_EQ(
+ LiteralUtil::CreateSparse<double>(dimensions, indices, {1.0, 2.0, 3.0})
+ ->GetSparseElementAsString(1),
+ tensorflow::strings::StrCat(double{2.0}));
+ ASSERT_EQ(LiteralUtil::CreateSparse<half>(dimensions, indices,
+ {half{1.0}, half{2.0}, half{3.0}})
->GetSparseElementAsString(1),
tensorflow::strings::StrCat(static_cast<float>(half{2.0})));
ASSERT_EQ(
- Literal::CreateSparse<complex64>(
+ LiteralUtil::CreateSparse<complex64>(
dimensions, indices,
std::vector<complex64>{{1.0, 2.0}, {3.0, 4.0}, {5.0, 6.0}})
->GetSparseElementAsString(1),
@@ -1827,33 +1836,36 @@ TEST_F(LiteralUtilTest, GetSparseElementAsString) {
}
TEST_F(LiteralUtilTest, BroadcastVectorToMatrix0) {
- std::unique_ptr<Literal> literal = Literal::CreateR1<int64>({1, 2});
+ std::unique_ptr<Literal> literal = LiteralUtil::CreateR1<int64>({1, 2});
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<Literal> broadcasted_literal,
literal->Broadcast(
/*result_shape=*/ShapeUtil::MakeShape(S64, {2, 2}),
/*dimensions=*/{0}));
- EXPECT_EQ(*broadcasted_literal, *Literal::CreateR2<int64>({{1, 1}, {2, 2}}));
+ EXPECT_EQ(*broadcasted_literal,
+ *LiteralUtil::CreateR2<int64>({{1, 1}, {2, 2}}));
}
TEST_F(LiteralUtilTest, BroadcastVectorToMatrix1) {
- std::unique_ptr<Literal> literal = Literal::CreateR1<int64>({1, 2});
+ std::unique_ptr<Literal> literal = LiteralUtil::CreateR1<int64>({1, 2});
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<Literal> broadcasted_literal,
literal->Broadcast(
/*result_shape=*/ShapeUtil::MakeShape(S64, {2, 2}),
/*dimensions=*/{1}));
- EXPECT_EQ(*broadcasted_literal, *Literal::CreateR2<int64>({{1, 2}, {1, 2}}));
+ EXPECT_EQ(*broadcasted_literal,
+ *LiteralUtil::CreateR2<int64>({{1, 2}, {1, 2}}));
}
TEST_F(LiteralUtilTest, BroadcastScalarToMatrix) {
- std::unique_ptr<Literal> literal = Literal::CreateR0<int32>(9);
+ std::unique_ptr<Literal> literal = LiteralUtil::CreateR0<int32>(9);
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<Literal> broadcasted_literal,
literal->Broadcast(
/*result_shape=*/ShapeUtil::MakeShape(S32, {2, 2}),
/*dimensions=*/{}));
- EXPECT_EQ(*broadcasted_literal, *Literal::CreateR2<int32>({{9, 9}, {9, 9}}));
+ EXPECT_EQ(*broadcasted_literal,
+ *LiteralUtil::CreateR2<int32>({{9, 9}, {9, 9}}));
}
} // namespace
diff --git a/tensorflow/compiler/xla/literal_util.cc b/tensorflow/compiler/xla/literal_util.cc
index 7c6a181b0a..548fbe8a83 100644
--- a/tensorflow/compiler/xla/literal_util.cc
+++ b/tensorflow/compiler/xla/literal_util.cc
@@ -43,25 +43,6 @@ namespace xla {
namespace {
-constexpr bool kLittleEndian = __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__;
-
-// Converts between little and big endian.
-//
-// Precondition: size % 2 == 0 (elements in the array are 16 bits long)
-void ConvertEndianShort(string* bytes) {
- CHECK_EQ(bytes->size() / 2, 0);
- for (int64 i = 0; i < bytes->size(); i += 2) {
- std::swap((*bytes)[i], (*bytes)[i + 1]);
- }
-}
-
-void ConvertEndianShort(char* bytes, int64 size) {
- CHECK_EQ(size / 2, 0);
- for (int64 i = 0; i < size; i += 2) {
- std::swap(bytes[i], bytes[i + 1]);
- }
-}
-
// Return a literal with all arrays of type FromNativeT converted to type
// ToNativeT in the given literal.
template <typename FromNativeT, typename ToNativeT>
@@ -103,505 +84,54 @@ std::unique_ptr<Literal> ConvertType(LiteralSlice literal) {
} // namespace
-LiteralBase::~LiteralBase() {}
-
-std::ostream& operator<<(std::ostream& out, const Literal& literal) {
- out << literal.ToString();
- return out;
-}
-
-Literal::StrideConfig::StrideConfig(
- const Shape& source_shape, const Shape& dest_shape,
- tensorflow::gtl::ArraySlice<int64> dimensions)
- : dimensions(dimensions),
- base(dimensions.size(), 0),
- step(dimensions.size(), 1) {
- if (!dimensions.empty()) {
- // Selects the shape with the largest minor dimension as the one upon
- // which to run the tight stride loop.
- if (dimensions[LayoutUtil::Minor(source_shape.layout(), 0)] >=
- dimensions[LayoutUtil::Minor(dest_shape.layout(), 0)]) {
- minor_dimension = LayoutUtil::Minor(source_shape.layout(), 0);
- dest_stride = IndexUtil::GetDimensionStride(dest_shape, minor_dimension);
- } else {
- minor_dimension = LayoutUtil::Minor(dest_shape.layout(), 0);
- source_stride =
- IndexUtil::GetDimensionStride(source_shape, minor_dimension);
- }
- minor_loop_size = dimensions[minor_dimension];
- step[minor_dimension] = minor_loop_size;
- }
-}
-
-Literal::Literal(const Shape& shape)
- : Literal(shape, /*allocate_arrays=*/true) {}
-
-void Literal::SetPiece(const Shape& shape, Piece* piece, bool allocate_arrays) {
- if (ShapeUtil::IsTuple(shape)) {
- for (int i = 0; i < ShapeUtil::TupleElementCount(shape); ++i) {
- const Shape& subshape = shape.tuple_shapes(i);
-
- auto child_piece = Piece();
- child_piece.set_subshape(&subshape);
-
- SetPiece(subshape, &child_piece, allocate_arrays);
-
- piece->emplace_back(std::move(child_piece));
- }
- } else if (ShapeUtil::IsArray(shape)) {
- if (allocate_arrays) {
- if (LayoutUtil::IsSparseArray(shape)) {
- // For sparse arrays, the buffer must be of the size of the maximum
- // number of sparse elements possible.
- const int64 max_sparse_elements =
- LayoutUtil::MaxSparseElements(shape.layout());
- piece->set_buffer(
- new char[max_sparse_elements *
- ShapeUtil::ByteSizeOfPrimitiveType(shape.element_type())]);
- piece->set_sparse_indices(
- new SparseIndexArray(max_sparse_elements, ShapeUtil::Rank(shape)));
- } else {
- piece->set_buffer(new char[piece->size_bytes()]);
- }
- }
- } else {
- // If the shape is neither an array nor tuple, then it must be
- // zero-sized. Otherwise, some memory needs to be allocated for it.
- CHECK_EQ(piece->size_bytes(), 0);
- }
-}
-
-Literal::Literal(const Shape& shape, bool allocate_arrays)
- : LiteralBase(), shape_(MakeUnique<Shape>(shape)) {
- CHECK(LayoutUtil::HasLayout(*shape_));
- root_piece_ = new Piece();
- root_piece_->set_subshape(shape_.get());
- CHECK(&root_piece_->subshape() == shape_.get());
-
- SetPiece(*shape_, root_piece_, allocate_arrays);
-}
-
-Literal::~Literal() {
- if (root_piece_ != nullptr) {
- DeallocateBuffers();
- delete root_piece_;
- }
-}
-
-void Literal::DeallocateBuffers() {
- root_piece_->ForEachMutableSubpiece(
- [&](const ShapeIndex& index, Piece* piece) {
- if (piece->buffer() != nullptr) {
- delete[] piece->buffer();
- delete piece->sparse_indices();
- }
- });
-}
-
-Literal::Literal(Literal&& other) : LiteralBase() { *this = std::move(other); }
-
-Literal& Literal::operator=(Literal&& other) {
- DCHECK(&other.root_piece_->subshape() == other.shape_.get());
- using std::swap;
- swap(shape_, other.shape_);
- swap(root_piece_, other.root_piece_);
- DCHECK(&root_piece_->subshape() == shape_.get());
-
- return *this;
-}
-
-std::unique_ptr<Literal> LiteralBase::CreateFromShape(const Shape& shape) {
- auto literal = MakeUnique<Literal>(shape);
- literal->root_piece_->ForEachMutableSubpiece(
- [&](const ShapeIndex& index, Piece* piece) {
- if (ShapeUtil::IsArray(piece->subshape())) {
- memset(piece->untyped_data(), 0, piece->size_bytes());
- }
- });
- return literal;
-}
-
-const SparseIndexArray* LiteralBase::sparse_indices(
- const ShapeIndex& shape_index) const {
- return piece(shape_index).sparse_indices();
-}
-
-SparseIndexArray* Literal::sparse_indices(const ShapeIndex& shape_index) {
- return piece(shape_index).sparse_indices();
-}
-
-/* static */ std::unique_ptr<Literal> Literal::CreateFromDimensions(
+/* static */ std::unique_ptr<Literal> LiteralUtil::CreateFromDimensions(
PrimitiveType primitive_type,
tensorflow::gtl::ArraySlice<int64> dimensions) {
- return CreateFromShape(ShapeUtil::MakeShape(primitive_type, dimensions));
+ return Literal::CreateFromShape(
+ ShapeUtil::MakeShape(primitive_type, dimensions));
}
-/* static */ std::unique_ptr<Literal> Literal::ConvertBF16ToF32(
+/* static */ std::unique_ptr<Literal> LiteralUtil::ConvertBF16ToF32(
const LiteralSlice& bf16_literal) {
return ConvertType<bfloat16, float>(bf16_literal);
}
-/* static */ std::unique_ptr<Literal> Literal::ConvertF32ToBF16(
+/* static */ std::unique_ptr<Literal> LiteralUtil::ConvertF32ToBF16(
const LiteralSlice& f32_literal) {
return ConvertType<float, bfloat16>(f32_literal);
}
-template <typename NativeT>
-Status Literal::CopySliceFromInternal(
- const LiteralBase& src_literal, tensorflow::gtl::ArraySlice<int64> src_base,
- tensorflow::gtl::ArraySlice<int64> dest_base,
- tensorflow::gtl::ArraySlice<int64> copy_size) {
- TF_RET_CHECK(ShapeUtil::Rank(src_literal.shape()) == src_base.size());
- TF_RET_CHECK(ShapeUtil::Rank(shape()) == dest_base.size());
-
- auto linear_index = [](const Shape& shape,
- tensorflow::gtl::ArraySlice<int64> multi_index) {
- return IndexUtil::MultidimensionalIndexToLinearIndex(shape, multi_index);
- };
-
- if (ShapeUtil::Rank(src_literal.shape()) == 0 ||
- ShapeUtil::Rank(shape()) == 0) {
- // If any of the two shapes are scalars, we can just call the StridedCopy()
- // directly, and we know we will be copying only one value.
- TF_RET_CHECK(copy_size.empty());
- StridedCopy(data<NativeT>(), linear_index(shape(), dest_base), 0,
- src_literal.data<NativeT>(),
- linear_index(src_literal.shape(), src_base), 0, 1);
- } else if (!ShapeUtil::IsZeroElementArray(shape()) &&
- !ShapeUtil::IsZeroElementArray(src_literal.shape())) {
- // Perform copy if neither src nor dest has dimensions with zero element,
- // otherwise it's a no-op.
- TF_RET_CHECK(src_base.size() == dest_base.size());
- TF_RET_CHECK(src_base.size() == copy_size.size());
-
- // Scan the source from minor, stepping in copy size blocks, then within
- // the index enumaration functor, do a strided copy advancing source index
- // by one (walking through the minor dimension), and destination index by
- // proper stride size at the matching dimension.
- DimensionVector src_indexes(src_base.size(), 0);
- DimensionVector dest_indexes(dest_base.size(), 0);
- Literal::StrideConfig stride_config(src_literal.shape(), shape(),
- copy_size);
-
- auto copy_proc = [&](tensorflow::gtl::ArraySlice<int64> indexes) {
- // Map from multi-dimensional index, to source index.
- std::transform(indexes.begin(), indexes.end(), src_base.begin(),
- src_indexes.begin(), std::plus<int64>());
- // Map from multi-dimensional index, to destination index.
- std::transform(indexes.begin(), indexes.end(), dest_base.begin(),
- dest_indexes.begin(), std::plus<int64>());
-
- int64 src_index = linear_index(src_literal.shape(), src_indexes);
- int64 dest_index = linear_index(shape(), dest_indexes);
-
- // `this->` is needed to workaround MSVC bug: #16882
- StridedCopy(this->data<NativeT>(), dest_index, stride_config.dest_stride,
- src_literal.data<NativeT>(), src_index,
- stride_config.source_stride, stride_config.minor_loop_size);
- return true;
- };
-
- ShapeUtil::ForEachIndex(src_literal.shape(), stride_config.base,
- stride_config.dimensions, stride_config.step,
- copy_proc);
- }
- return Status::OK();
-}
-
-Status Literal::CopyElementFrom(const LiteralSlice& src_literal,
- tensorflow::gtl::ArraySlice<int64> src_index,
- tensorflow::gtl::ArraySlice<int64> dest_index) {
- DCHECK_EQ(shape().element_type(), src_literal.shape().element_type());
- const int64 src_linear_index = IndexUtil::MultidimensionalIndexToLinearIndex(
- src_literal.shape(), src_index);
- const int64 dest_linear_index =
- IndexUtil::MultidimensionalIndexToLinearIndex(shape(), dest_index);
- const int64 primitive_size =
- ShapeUtil::ByteSizeOfPrimitiveType(shape().element_type());
-
- char* dest_address =
- static_cast<char*>(untyped_data()) + dest_linear_index * primitive_size;
- const char* source_address =
- static_cast<const char*>(src_literal.untyped_data()) +
- src_linear_index * primitive_size;
- if (dest_address != source_address) {
- memcpy(dest_address, source_address, primitive_size);
- }
- return Status::OK();
-}
-
-/* static */ std::unique_ptr<Literal> Literal::CreateToken() {
+/* static */ std::unique_ptr<Literal> LiteralUtil::CreateToken() {
return MakeUnique<Literal>(ShapeUtil::MakeTokenShape());
}
-std::vector<Literal> Literal::DecomposeTuple() {
- CHECK(ShapeUtil::IsTuple(shape()));
- std::vector<Literal> elements;
- for (int i = 0; i < ShapeUtil::TupleElementCount(shape()); ++i) {
- elements.push_back(Literal(ShapeUtil::GetSubshape(shape(), {i}),
- /*allocate_arrays=*/false));
- Literal& element = elements.back();
- element.root_piece_->ForEachMutableSubpiece(
- [&](const ShapeIndex& index, Piece* dest_piece) {
- ShapeIndex src_index = {i};
- for (int64 j : index) {
- src_index.push_back(j);
- }
- Piece& src_piece = piece(src_index);
-
- // Move the respective buffer and sparse indices over to the element
- // Literal.
- dest_piece->set_buffer(src_piece.buffer());
- src_piece.set_buffer(nullptr);
- dest_piece->set_sparse_indices(src_piece.sparse_indices());
- src_piece.set_sparse_indices(nullptr);
- });
- }
- // Set this literal to be nil-shaped.
- *this = Literal();
- return elements;
-}
-
-/* static */ Literal Literal::MoveIntoTuple(
- tensorflow::gtl::MutableArraySlice<Literal> elements) {
- std::vector<Shape> element_shapes;
- for (const Literal& element : elements) {
- element_shapes.push_back(element.shape());
- }
- Literal literal(ShapeUtil::MakeTupleShape(element_shapes),
- /*allocate_arrays=*/false);
- for (int i = 0; i < elements.size(); ++i) {
- TF_CHECK_OK(
- literal.MoveFrom(std::move(elements[i]), /*dest_shape_index=*/{i}));
- }
- return literal;
-}
-
-namespace {
-
-// Copies the elements in 'src' to 'dest'. The shape and layout of the data in
-// the array slices are indicated by dest_shape and src_shape respectively.
-template <typename NativeT>
-void CopyElementsBetween(tensorflow::gtl::MutableArraySlice<NativeT> dest,
- tensorflow::gtl::ArraySlice<NativeT> src,
- const Shape& dest_shape, const Shape& src_shape) {
- CHECK(ShapeUtil::Compatible(dest_shape, src_shape));
- if (ShapeUtil::IsZeroElementArray(dest_shape)) {
- return;
- }
- std::vector<int64> index(ShapeUtil::Rank(dest_shape));
- do {
- dest[IndexUtil::MultidimensionalIndexToLinearIndex(dest_shape, index)] =
- src[IndexUtil::MultidimensionalIndexToLinearIndex(src_shape, index)];
- } while (IndexUtil::BumpIndices(dest_shape, &index));
-}
-
-} // namespace
-
-Status LiteralBase::Piece::CopyFrom(const LiteralBase::Piece& src) {
- CHECK(subshape_ != nullptr);
- CHECK(src.subshape_ != nullptr);
- if (ShapeUtil::Equal(subshape(), src.subshape())) {
- // If the layouts are equal it's faster just to memcpy.
- memcpy(buffer(), src.buffer(), src.size_bytes());
- } else {
- TF_RET_CHECK(ShapeUtil::Compatible(src.subshape(), subshape()));
- std::vector<int64> origin(ShapeUtil::Rank(subshape()), 0);
- switch (subshape().element_type()) {
-#define COPY_ELEMENTS(XLA_T, NATIVE_T) \
- case (XLA_T): \
- CopyElementsBetween<NATIVE_T>(data<NATIVE_T>(), src.data<NATIVE_T>(), \
- subshape(), src.subshape()); \
- break;
- COPY_ELEMENTS(U8, uint8);
- COPY_ELEMENTS(U16, uint16);
- COPY_ELEMENTS(U32, uint32);
- COPY_ELEMENTS(U64, uint64);
- COPY_ELEMENTS(S8, int8);
- COPY_ELEMENTS(S16, int16);
- COPY_ELEMENTS(S32, int32);
- COPY_ELEMENTS(S64, int64);
- COPY_ELEMENTS(F16, half);
- COPY_ELEMENTS(BF16, bfloat16);
- COPY_ELEMENTS(F32, float);
- COPY_ELEMENTS(F64, double);
- COPY_ELEMENTS(C64, complex64);
- COPY_ELEMENTS(PRED, bool);
-#undef COPY_ELEMENTS
- default:
- return Unimplemented(
- "Copying a Literal object with element type %s is not implemented.",
- PrimitiveType_Name(subshape().element_type()).c_str());
- }
- }
- return Status::OK();
-}
-
-Status Literal::CopyFrom(const LiteralSlice& src_literal,
- const ShapeIndex& dest_shape_index,
- const ShapeIndex& src_shape_index) {
- const Shape& dest_subshape =
- ShapeUtil::GetSubshape(shape(), dest_shape_index);
- const Shape& src_subshape =
- ShapeUtil::GetSubshape(src_literal.shape(), src_shape_index);
- if (!ShapeUtil::Compatible(dest_subshape, src_subshape)) {
- return InvalidArgument(
- "Destination subshape incompatible with source subshape: %s vs %s",
- ShapeUtil::HumanString(dest_subshape).c_str(),
- ShapeUtil::HumanString(src_subshape).c_str());
- }
- return root_piece_->ForEachMutableSubpieceWithStatus(
- [&](const ShapeIndex& index, Piece* piece) {
- if (!ShapeUtil::IsArray(piece->subshape())) {
- return Status::OK();
- }
-
- // Determine if this index is in the part of this literal that we want
- // to copy over from src_literal.
- bool in_subtree_to_copy = true;
- for (int i = 0; i < dest_shape_index.size(); ++i) {
- if (index[i] != dest_shape_index[i]) {
- in_subtree_to_copy = false;
- break;
- }
- }
- if (!in_subtree_to_copy) {
- return Status::OK();
- }
- // Construct the index of the corresponding piece in the source literal.
- ShapeIndex src_piece_index = src_shape_index;
- for (int64 i = dest_shape_index.size(); i < index.size(); ++i) {
- src_piece_index.push_back(index[i]);
- }
- TF_RETURN_IF_ERROR(piece->CopyFrom(src_literal.piece(src_piece_index)));
- return Status::OK();
- });
-}
-
-Status Literal::MoveFrom(Literal&& src_literal,
- const ShapeIndex& dest_shape_index) {
- const Shape& dest_subshape =
- ShapeUtil::GetSubshape(shape(), dest_shape_index);
- if (!ShapeUtil::Equal(dest_subshape, src_literal.shape())) {
- return InvalidArgument(
- "Destination subshape not equal to source shape: %s vs %s",
- ShapeUtil::HumanString(dest_subshape).c_str(),
- ShapeUtil::HumanString(src_literal.shape()).c_str());
- }
-
- src_literal.root_piece_->ForEachSubpiece(
- [&](const ShapeIndex& src_index, const Piece& src_piece) {
- if (!ShapeUtil::IsArray(src_piece.subshape())) {
- return;
- }
-
- ShapeIndex dest_index = dest_shape_index;
- for (int64 i : src_index) {
- dest_index.push_back(i);
- }
- Piece& dest_piece = piece(dest_index);
- delete[] dest_piece.buffer();
- dest_piece.set_buffer(src_piece.buffer());
- delete dest_piece.sparse_indices();
- dest_piece.set_sparse_indices(src_piece.sparse_indices());
- });
-
- src_literal.shape_ = MakeUnique<Shape>(ShapeUtil::MakeNil());
- delete src_literal.root_piece_;
- src_literal.root_piece_ = new LiteralBase::Piece();
- src_literal.root_piece_->set_subshape(src_literal.shape_.get());
-
- return Status::OK();
-}
-
-Status Literal::CopySliceFrom(const LiteralSlice& src_literal,
- tensorflow::gtl::ArraySlice<int64> src_base,
- tensorflow::gtl::ArraySlice<int64> dest_base,
- tensorflow::gtl::ArraySlice<int64> copy_size) {
- TF_RET_CHECK(ShapeUtil::IsArray(shape())) << ShapeUtil::HumanString(shape());
- TF_RET_CHECK(ShapeUtil::IsArray(src_literal.shape()))
- << ShapeUtil::HumanString(src_literal.shape());
- TF_RET_CHECK(ShapeUtil::SameElementType(src_literal.shape(), shape()));
-
- switch (shape().element_type()) {
- case U8:
- return CopySliceFromInternal<uint8>(src_literal, src_base, dest_base,
- copy_size);
- case U16:
- return CopySliceFromInternal<uint16>(src_literal, src_base, dest_base,
- copy_size);
- case U32:
- return CopySliceFromInternal<uint32>(src_literal, src_base, dest_base,
- copy_size);
- case U64:
- return CopySliceFromInternal<uint64>(src_literal, src_base, dest_base,
- copy_size);
- case S8:
- return CopySliceFromInternal<int8>(src_literal, src_base, dest_base,
- copy_size);
- case S16:
- return CopySliceFromInternal<int16>(src_literal, src_base, dest_base,
- copy_size);
- case S32:
- return CopySliceFromInternal<int32>(src_literal, src_base, dest_base,
- copy_size);
- case S64:
- return CopySliceFromInternal<int64>(src_literal, src_base, dest_base,
- copy_size);
- case F16:
- return CopySliceFromInternal<half>(src_literal, src_base, dest_base,
- copy_size);
- case BF16:
- return CopySliceFromInternal<bfloat16>(src_literal, src_base, dest_base,
- copy_size);
- case F32:
- return CopySliceFromInternal<float>(src_literal, src_base, dest_base,
- copy_size);
- case F64:
- return CopySliceFromInternal<double>(src_literal, src_base, dest_base,
- copy_size);
- case C64:
- return CopySliceFromInternal<complex64>(src_literal, src_base, dest_base,
- copy_size);
- case PRED:
- return CopySliceFromInternal<bool>(src_literal, src_base, dest_base,
- copy_size);
- default:
- break;
- }
- return Unimplemented(
- "Copying a slice from a Literal object with element type %d is not "
- "implemented.",
- shape().element_type());
-}
-
-/* static */ Literal Literal::Zero(PrimitiveType primitive_type) {
+/* static */ Literal LiteralUtil::Zero(PrimitiveType primitive_type) {
switch (primitive_type) {
case U8:
- return std::move(*Literal::CreateR0<uint8>(0));
+ return std::move(*LiteralUtil::CreateR0<uint8>(0));
case U32:
- return std::move(*Literal::CreateR0<uint32>(0));
+ return std::move(*LiteralUtil::CreateR0<uint32>(0));
case U64:
- return std::move(*Literal::CreateR0<uint64>(0));
+ return std::move(*LiteralUtil::CreateR0<uint64>(0));
case S8:
- return std::move(*Literal::CreateR0<int8>(0));
+ return std::move(*LiteralUtil::CreateR0<int8>(0));
case S32:
- return std::move(*Literal::CreateR0<int32>(0));
+ return std::move(*LiteralUtil::CreateR0<int32>(0));
case S64:
- return std::move(*Literal::CreateR0<int64>(0));
+ return std::move(*LiteralUtil::CreateR0<int64>(0));
case F16:
- return std::move(*Literal::CreateR0<half>(static_cast<half>(0.0f)));
+ return std::move(*LiteralUtil::CreateR0<half>(static_cast<half>(0.0f)));
case BF16:
return std::move(
- *Literal::CreateR0<bfloat16>(static_cast<bfloat16>(0.0f)));
+ *LiteralUtil::CreateR0<bfloat16>(static_cast<bfloat16>(0.0f)));
case F32:
- return std::move(*Literal::CreateR0<float>(0));
+ return std::move(*LiteralUtil::CreateR0<float>(0));
case F64:
- return std::move(*Literal::CreateR0<double>(0));
+ return std::move(*LiteralUtil::CreateR0<double>(0));
case C64:
- return std::move(*Literal::CreateR0<complex64>(0));
+ return std::move(*LiteralUtil::CreateR0<complex64>(0));
case PRED:
- return std::move(*Literal::CreateR0<bool>(false));
+ return std::move(*LiteralUtil::CreateR0<bool>(false));
case S16:
case U16:
LOG(FATAL) << "u16/s16 literals not yet implemented";
@@ -614,33 +144,33 @@ Status Literal::CopySliceFrom(const LiteralSlice& src_literal,
}
}
-/* static */ Literal Literal::One(PrimitiveType primitive_type) {
+/* static */ Literal LiteralUtil::One(PrimitiveType primitive_type) {
switch (primitive_type) {
case U8:
- return std::move(*Literal::CreateR0<uint8>(1));
+ return std::move(*LiteralUtil::CreateR0<uint8>(1));
case U32:
- return std::move(*Literal::CreateR0<uint32>(1));
+ return std::move(*LiteralUtil::CreateR0<uint32>(1));
case U64:
- return std::move(*Literal::CreateR0<uint64>(1));
+ return std::move(*LiteralUtil::CreateR0<uint64>(1));
case S8:
- return std::move(*Literal::CreateR0<int8>(1));
+ return std::move(*LiteralUtil::CreateR0<int8>(1));
case S32:
- return std::move(*Literal::CreateR0<int32>(1));
+ return std::move(*LiteralUtil::CreateR0<int32>(1));
case S64:
- return std::move(*Literal::CreateR0<int64>(1));
+ return std::move(*LiteralUtil::CreateR0<int64>(1));
case F16:
- return std::move(*Literal::CreateR0<half>(static_cast<half>(1.0f)));
+ return std::move(*LiteralUtil::CreateR0<half>(static_cast<half>(1.0f)));
case BF16:
return std::move(
- *Literal::CreateR0<bfloat16>(static_cast<bfloat16>(1.0f)));
+ *LiteralUtil::CreateR0<bfloat16>(static_cast<bfloat16>(1.0f)));
case F32:
- return std::move(*Literal::CreateR0<float>(1));
+ return std::move(*LiteralUtil::CreateR0<float>(1));
case F64:
- return std::move(*Literal::CreateR0<double>(1));
+ return std::move(*LiteralUtil::CreateR0<double>(1));
case C64:
- return std::move(*Literal::CreateR0<complex64>(1));
+ return std::move(*LiteralUtil::CreateR0<complex64>(1));
case PRED:
- return std::move(*Literal::CreateR0<bool>(true));
+ return std::move(*LiteralUtil::CreateR0<bool>(true));
case S16:
case U16:
LOG(FATAL) << "u16/s16 literals not yet implemented";
@@ -653,44 +183,44 @@ Status Literal::CopySliceFrom(const LiteralSlice& src_literal,
}
}
-/* static */ Literal Literal::MinValue(PrimitiveType primitive_type) {
+/* static */ Literal LiteralUtil::MinValue(PrimitiveType primitive_type) {
switch (primitive_type) {
case U8:
return std::move(
- *Literal::CreateR0<uint8>(std::numeric_limits<uint8>::min()));
+ *LiteralUtil::CreateR0<uint8>(std::numeric_limits<uint8>::min()));
case U32:
return std::move(
- *Literal::CreateR0<uint32>(std::numeric_limits<uint32>::min()));
+ *LiteralUtil::CreateR0<uint32>(std::numeric_limits<uint32>::min()));
case U64:
return std::move(
- *Literal::CreateR0<uint64>(std::numeric_limits<uint64>::min()));
+ *LiteralUtil::CreateR0<uint64>(std::numeric_limits<uint64>::min()));
case S8:
return std::move(
- *Literal::CreateR0<int8>(std::numeric_limits<int8>::min()));
+ *LiteralUtil::CreateR0<int8>(std::numeric_limits<int8>::min()));
case S32:
return std::move(
- *Literal::CreateR0<int32>(std::numeric_limits<int32>::min()));
+ *LiteralUtil::CreateR0<int32>(std::numeric_limits<int32>::min()));
case S64:
return std::move(
- *Literal::CreateR0<int64>(std::numeric_limits<int64>::min()));
+ *LiteralUtil::CreateR0<int64>(std::numeric_limits<int64>::min()));
case F32:
- return std::move(
- *Literal::CreateR0<float>(-std::numeric_limits<float>::infinity()));
+ return std::move(*LiteralUtil::CreateR0<float>(
+ -std::numeric_limits<float>::infinity()));
case F64:
- return std::move(
- *Literal::CreateR0<double>(-std::numeric_limits<double>::infinity()));
+ return std::move(*LiteralUtil::CreateR0<double>(
+ -std::numeric_limits<double>::infinity()));
case C64:
LOG(FATAL) << "C64 element type has no minimum value";
case PRED:
- return std::move(*Literal::CreateR0<bool>(false));
+ return std::move(*LiteralUtil::CreateR0<bool>(false));
case S16:
case U16:
LOG(FATAL) << "u16/s16 literals not yet implemented";
case F16:
- return std::move(*Literal::CreateR0<half>(
+ return std::move(*LiteralUtil::CreateR0<half>(
static_cast<half>(-std::numeric_limits<float>::infinity())));
case BF16:
- return std::move(*Literal::CreateR0<bfloat16>(
+ return std::move(*LiteralUtil::CreateR0<bfloat16>(
static_cast<bfloat16>(-std::numeric_limits<float>::infinity())));
case TUPLE:
LOG(FATAL) << "tuple element type has no minimum value";
@@ -701,42 +231,42 @@ Status Literal::CopySliceFrom(const LiteralSlice& src_literal,
}
}
-/* static */ Literal Literal::MaxValue(PrimitiveType primitive_type) {
+/* static */ Literal LiteralUtil::MaxValue(PrimitiveType primitive_type) {
switch (primitive_type) {
case U8:
return std::move(
- *Literal::CreateR0<uint8>(std::numeric_limits<uint8>::max()));
+ *LiteralUtil::CreateR0<uint8>(std::numeric_limits<uint8>::max()));
case U32:
return std::move(
- *Literal::CreateR0<uint32>(std::numeric_limits<uint32>::max()));
+ *LiteralUtil::CreateR0<uint32>(std::numeric_limits<uint32>::max()));
case U64:
return std::move(
- *Literal::CreateR0<uint64>(std::numeric_limits<uint64>::max()));
+ *LiteralUtil::CreateR0<uint64>(std::numeric_limits<uint64>::max()));
case S8:
return std::move(
- *Literal::CreateR0<int8>(std::numeric_limits<int8>::max()));
+ *LiteralUtil::CreateR0<int8>(std::numeric_limits<int8>::max()));
case S32:
return std::move(
- *Literal::CreateR0<int32>(std::numeric_limits<int32>::max()));
+ *LiteralUtil::CreateR0<int32>(std::numeric_limits<int32>::max()));
case S64:
return std::move(
- *Literal::CreateR0<int64>(std::numeric_limits<int64>::max()));
+ *LiteralUtil::CreateR0<int64>(std::numeric_limits<int64>::max()));
case F32:
- return std::move(
- *Literal::CreateR0<float>(std::numeric_limits<float>::infinity()));
+ return std::move(*LiteralUtil::CreateR0<float>(
+ std::numeric_limits<float>::infinity()));
case F64:
- return std::move(
- *Literal::CreateR0<double>(std::numeric_limits<double>::infinity()));
+ return std::move(*LiteralUtil::CreateR0<double>(
+ std::numeric_limits<double>::infinity()));
case PRED:
- return std::move(*Literal::CreateR0<bool>(true));
+ return std::move(*LiteralUtil::CreateR0<bool>(true));
case S16:
case U16:
LOG(FATAL) << "u16/s16 literals not yet implemented";
case F16:
- return std::move(*Literal::CreateR0<half>(
+ return std::move(*LiteralUtil::CreateR0<half>(
static_cast<half>(std::numeric_limits<float>::infinity())));
case BF16:
- return std::move(*Literal::CreateR0<bfloat16>(
+ return std::move(*LiteralUtil::CreateR0<bfloat16>(
static_cast<bfloat16>(std::numeric_limits<float>::infinity())));
case TUPLE:
LOG(FATAL) << "tuple element type has no maximum value";
@@ -747,7 +277,7 @@ Status Literal::CopySliceFrom(const LiteralSlice& src_literal,
}
}
-/* static */ std::unique_ptr<Literal> Literal::CreateR1(
+/* static */ std::unique_ptr<Literal> LiteralUtil::CreateR1(
const tensorflow::core::Bitmap& values) {
auto literal = MakeUnique<Literal>(
ShapeUtil::MakeShape(PRED, {static_cast<int64>(values.bits())}));
@@ -755,17 +285,7 @@ Status Literal::CopySliceFrom(const LiteralSlice& src_literal,
return literal;
}
-void Literal::PopulateR1(const tensorflow::core::Bitmap& values) {
- CHECK(ShapeUtil::IsArray(shape()));
- CHECK_EQ(ShapeUtil::Rank(shape()), 1);
- CHECK_EQ(element_count(), values.bits());
- CHECK_EQ(shape().element_type(), PRED);
- for (int64 i = 0; i < static_cast<int64>(values.bits()); ++i) {
- Set({i}, values.get(i));
- }
-}
-
-/* static */ std::unique_ptr<Literal> Literal::CreateR1U8(
+/* static */ std::unique_ptr<Literal> LiteralUtil::CreateR1U8(
tensorflow::StringPiece value) {
auto literal = MakeUnique<Literal>(
ShapeUtil::MakeShape(U8, {static_cast<int64>(value.size())}));
@@ -775,116 +295,13 @@ void Literal::PopulateR1(const tensorflow::core::Bitmap& values) {
return literal;
}
-/* static */ std::unique_ptr<Literal> Literal::CreateR2F32Linspace(float from,
- float to,
- int64 rows,
- int64 cols) {
+/* static */ std::unique_ptr<Literal> LiteralUtil::CreateR2F32Linspace(
+ float from, float to, int64 rows, int64 cols) {
auto value = MakeLinspaceArray2D(from, to, rows, cols);
return CreateR2FromArray2D(*value);
}
-std::unique_ptr<Literal> LiteralBase::Relayout(
- const Layout& new_layout, const ShapeIndex& shape_index) const {
- // Create new shape with 'new_layout' set at the given shape index.
- Shape new_shape = shape();
- Shape* subshape = ShapeUtil::GetMutableSubshape(&new_shape, shape_index);
- TF_CHECK_OK(LayoutUtil::ValidateLayoutForShape(new_layout, *subshape));
- *subshape->mutable_layout() = new_layout;
- auto result = MakeUnique<Literal>(new_shape);
- TF_CHECK_OK(result->CopyFrom(*this));
- return result;
-}
-
-std::unique_ptr<Literal> LiteralBase::Relayout(
- const Shape& shape_with_layout) const {
- CHECK(ShapeUtil::Compatible(shape_with_layout, shape()))
- << "Given shape_with_layout " << ShapeUtil::HumanString(shape_with_layout)
- << " not compatible with literal shape "
- << ShapeUtil::HumanString(shape());
- std::unique_ptr<Literal> result = CreateFromShape(shape_with_layout);
- ShapeUtil::ForEachSubshape(
- result->shape(),
- [this, &result](const Shape& subshape, const ShapeIndex& index) {
- if (ShapeUtil::IsArray(subshape)) {
- TF_CHECK_OK(result->CopyFrom(*this,
- /*dest_shape_index=*/index,
- /*src_shape_index=*/index));
- }
- });
- return result;
-}
-
-StatusOr<std::unique_ptr<Literal>> LiteralBase::Broadcast(
- const Shape& result_shape,
- tensorflow::gtl::ArraySlice<int64> dimensions) const {
- if (!ShapeUtil::IsArray(shape())) {
- return InvalidArgument("Broadcast only supports arrays.");
- }
-
- for (int64 i = 0; i < dimensions.size(); i++) {
- TF_RET_CHECK(shape().dimensions(i) ==
- result_shape.dimensions(dimensions[i]));
- }
-
- std::unique_ptr<Literal> result = MakeUnique<Literal>(result_shape);
-
- // scratch_source_index is temporary storage space for the computed index into
- // the input literal. We put it here to avoid allocating an std::vector in
- // every iteration of ShapeUtil::ForEachIndex.
- std::vector<int64> scratch_source_index(shape().dimensions_size());
-
- char* dest_data = static_cast<char*>(result->untyped_data());
- const char* source_data = static_cast<const char*>(untyped_data());
- const int64 primitive_size =
- ShapeUtil::ByteSizeOfPrimitiveType(shape().element_type());
-
- ShapeUtil::ForEachIndex(
- result_shape, [&](tensorflow::gtl::ArraySlice<int64> output_index) {
- for (int64 i = 0; i < dimensions.size(); ++i) {
- scratch_source_index[i] = output_index[dimensions[i]];
- }
- int64 dest_index = IndexUtil::MultidimensionalIndexToLinearIndex(
- result_shape, output_index);
- int64 source_index = IndexUtil::MultidimensionalIndexToLinearIndex(
- shape(), scratch_source_index);
- memcpy(dest_data + primitive_size * dest_index,
- source_data + primitive_size * source_index, primitive_size);
- return true;
- });
-
- return std::move(result);
-}
-
-StatusOr<std::unique_ptr<Literal>> LiteralBase::Reshape(
- tensorflow::gtl::ArraySlice<int64> dimensions) const {
- if (!ShapeUtil::IsArray(shape())) {
- return InvalidArgument("Reshape does not support tuples.");
- }
- std::unique_ptr<Literal> output;
- if (!LayoutUtil::IsMonotonicWithDim0Major(shape().layout())) {
- output =
- Relayout(LayoutUtil::GetDefaultLayoutForRank(ShapeUtil::Rank(shape())));
- } else {
- output = CloneToUnique();
- }
- // Because the layout is monotonic, we can simply reuse the same sequence of
- // values without changing their order.
- *output->mutable_shape_do_not_use() =
- ShapeUtil::MakeShape(shape().element_type(), dimensions);
-
- int64 elements_before = ShapeUtil::ElementsIn(shape());
- int64 elements_after = ShapeUtil::ElementsIn(output->shape());
- if (elements_before != elements_after) {
- return InvalidArgument(
- "Shapes before and after Literal::Reshape have different numbers "
- "of elements: %s vs %s.",
- ShapeUtil::HumanString(shape()).c_str(),
- ShapeUtil::HumanString(output->shape()).c_str());
- }
- return std::move(output);
-}
-
-/* static */ std::unique_ptr<Literal> Literal::ReshapeSlice(
+/* static */ std::unique_ptr<Literal> LiteralUtil::ReshapeSlice(
tensorflow::gtl::ArraySlice<int64> new_dimensions,
tensorflow::gtl::ArraySlice<int64> minor_to_major,
const LiteralSlice& literal) {
@@ -956,575 +373,64 @@ StatusOr<std::unique_ptr<Literal>> LiteralBase::Reshape(
return new_literal;
}
-std::unique_ptr<Literal> LiteralBase::Transpose(
- tensorflow::gtl::ArraySlice<int64> permutation) const {
- CHECK(ShapeUtil::IsArray(shape())) << "Tuple is not supported for transpose";
- CHECK(IsPermutation(permutation, ShapeUtil::Rank(shape())))
- << "Given permutation is not a permutation of dimension numbers";
- // To transpose the array, we just permute the dimensions and layout, and
- // do a straight memory copy of the raw data set.
- // This is considerably faster than iterating over every array element using
- // the EachCell<>() and Set<>() APIs.
- std::vector<int64> inverse_permutation = InversePermutation(permutation);
- Shape permuted_shape =
- ShapeUtil::PermuteDimensions(inverse_permutation, shape());
- // Replace the layout with one affine to this shape, such that a
- // transpose operation can be performed by leaving the flat values
- // representation intact.
- // For example, consider the shape F32[11,8]{1,0} under a {1,0} permutation.
- // The shape with affine layout resulting from that operation will be
- // F32[8,11]{0,1}, since it leaves the original most minor (the 8 sized), the
- // most minor.
- //
- // Essentially, given MinMaj(Di) the position of the Di dimension within the
- // minor to major vector, and given T(Di) the index that the original Di
- // dimension has within the transposed array, a layout is affine if
- // MinMaj(Di) == TMinMaj(T(Di)), with TMinMaj() being the minor to major
- // vector of the affine layout.
- CHECK(LayoutUtil::IsDenseArray(permuted_shape));
- Layout* layout = permuted_shape.mutable_layout();
- layout->clear_minor_to_major();
- for (auto index : LayoutUtil::MinorToMajor(shape())) {
- layout->add_minor_to_major(inverse_permutation[index]);
- }
- auto new_literal = MakeUnique<Literal>(permuted_shape);
- DCHECK_EQ(ShapeUtil::ByteSizeOf(new_literal->shape()),
- ShapeUtil::ByteSizeOf(shape()));
- std::memcpy(new_literal->untyped_data(), untyped_data(), size_bytes());
- return new_literal;
-}
-
-template <typename NativeT>
-std::unique_ptr<Literal> LiteralBase::SliceInternal(
- const Shape& result_shape,
- tensorflow::gtl::ArraySlice<int64> start_indices) const {
- auto result_literal = MakeUnique<Literal>(result_shape);
- DimensionVector new_indices(ShapeUtil::Rank(result_shape));
- result_literal->EachCell<NativeT>(
- [&](tensorflow::gtl::ArraySlice<int64> indices, NativeT /*value*/) {
- for (int64 i = 0; i < ShapeUtil::Rank(result_shape); ++i) {
- new_indices[i] = indices[i] + start_indices[i];
- }
- NativeT value = Get<NativeT>(new_indices);
- result_literal->Set<NativeT>(indices, value);
- });
- return result_literal;
-}
-
-std::unique_ptr<Literal> LiteralBase::Slice(
- tensorflow::gtl::ArraySlice<int64> start_indices,
- tensorflow::gtl::ArraySlice<int64> limit_indices) const {
- CHECK(ShapeUtil::IsArray(shape())) << "tuple is not supported for slice";
-
- DimensionVector result_dimensions;
- for (int64 dnum = 0; dnum < ShapeUtil::Rank(shape()); ++dnum) {
- CHECK_GE(start_indices[dnum], 0);
- CHECK_LE(limit_indices[dnum], shape().dimensions(dnum))
- << "dnum = " << dnum;
- int64 dimension = limit_indices[dnum] - start_indices[dnum];
- CHECK_GE(dimension, 0) << "dnum = " << dnum;
- result_dimensions.push_back(dimension);
- }
- const auto result_shape =
- ShapeUtil::MakeShapeWithLayout(shape().element_type(), result_dimensions,
- LayoutUtil::MinorToMajor(shape()));
- switch (result_shape.element_type()) {
- case F32:
- return SliceInternal<float>(result_shape, start_indices);
- case BF16:
- return SliceInternal<bfloat16>(result_shape, start_indices);
- case C64:
- return SliceInternal<complex64>(result_shape, start_indices);
- case S32:
- return SliceInternal<int32>(result_shape, start_indices);
- case U32:
- return SliceInternal<uint32>(result_shape, start_indices);
- default:
- LOG(FATAL) << "not yet implemented: "
- << PrimitiveType_Name(result_shape.element_type());
- }
-}
-
-Literal LiteralBase::Clone() const {
- Literal result(shape());
- TF_CHECK_OK(result.CopyFrom(*this));
- return result;
-}
-
-std::unique_ptr<Literal> LiteralBase::CloneToUnique() const {
- auto result = MakeUnique<Literal>(shape());
- TF_CHECK_OK(result->CopyFrom(*this));
- return result;
-}
-
-string LiteralBase::GetAsString(tensorflow::gtl::ArraySlice<int64> multi_index,
- const ShapeIndex& shape_index) const {
- const Shape& subshape = ShapeUtil::GetSubshape(shape(), shape_index);
- CHECK(LayoutUtil::IsDenseArray(subshape));
- switch (subshape.element_type()) {
- case PRED:
- return Get<bool>(multi_index, shape_index) ? "true" : "false";
- case S8:
- return StrCat(Get<int8>(multi_index, shape_index));
- case S16:
- return StrCat(Get<int16>(multi_index, shape_index));
- case S32:
- return StrCat(Get<int32>(multi_index, shape_index));
- case S64:
- return StrCat(Get<int64>(multi_index, shape_index));
- case U8:
- return StrCat(Get<uint8>(multi_index, shape_index));
- case U16:
- return StrCat(Get<uint16>(multi_index, shape_index));
- case U32:
- return StrCat(Get<uint32>(multi_index, shape_index));
- case U64:
- return StrCat(Get<uint64>(multi_index, shape_index));
- case F16:
- return StrCat(static_cast<float>(Get<half>(multi_index, shape_index)));
- case F32:
- return StrCat(Get<float>(multi_index, shape_index));
- case BF16:
- return StrCat(
- static_cast<float>(Get<bfloat16>(multi_index, shape_index)));
- case F64:
- return StrCat(Get<double>(multi_index, shape_index));
- case C64: {
- complex64 c = Get<complex64>(multi_index, shape_index);
- return StrCat("(", c.real(), ", ", c.imag(), ")");
- }
- default:
- LOG(FATAL) << PrimitiveType_Name(subshape.element_type());
- }
-}
-
-string LiteralBase::GetSparseElementAsString(
- int64 sparse_element_number, const ShapeIndex& shape_index) const {
- const Shape& subshape = ShapeUtil::GetSubshape(shape(), shape_index);
- CHECK(LayoutUtil::IsSparseArray(subshape));
- switch (subshape.element_type()) {
- case PRED:
- return GetSparseElement<bool>(sparse_element_number, shape_index)
- ? "true"
- : "false";
- case S8:
- return StrCat(GetSparseElement<int8>(sparse_element_number, shape_index));
- case S16:
- return StrCat(
- GetSparseElement<int16>(sparse_element_number, shape_index));
- case S32:
- return StrCat(
- GetSparseElement<int32>(sparse_element_number, shape_index));
- case S64:
- return StrCat(
- GetSparseElement<int64>(sparse_element_number, shape_index));
- case U8:
- return StrCat(
- GetSparseElement<uint8>(sparse_element_number, shape_index));
- case U16:
- return StrCat(
- GetSparseElement<uint16>(sparse_element_number, shape_index));
- case U32:
- return StrCat(
- GetSparseElement<uint32>(sparse_element_number, shape_index));
- case U64:
- return StrCat(
- GetSparseElement<uint64>(sparse_element_number, shape_index));
- case F16:
- return StrCat(static_cast<float>(
- GetSparseElement<half>(sparse_element_number, shape_index)));
- case F32:
- return StrCat(
- GetSparseElement<float>(sparse_element_number, shape_index));
- case BF16:
- return StrCat(static_cast<float>(
- GetSparseElement<bfloat16>(sparse_element_number, shape_index)));
- case F64:
- return StrCat(
- GetSparseElement<double>(sparse_element_number, shape_index));
- case C64: {
- complex64 c =
- GetSparseElement<complex64>(sparse_element_number, shape_index);
- return StrCat("(", c.real(), ", ", c.imag(), ")");
- }
- default:
- LOG(FATAL) << "Invalid element type for sparse arrays: "
- << PrimitiveType_Name(subshape.element_type());
- }
-}
-
-StatusOr<int64> LiteralBase::GetIntegralAsS64(
- tensorflow::gtl::ArraySlice<int64> multi_index) const {
- CHECK(LayoutUtil::IsDenseArray(shape()));
- switch (shape().element_type()) {
- case PRED:
- return Get<bool>(multi_index);
- case U8:
- return Get<uint8>(multi_index);
- case S32:
- return Get<int32>(multi_index);
- case S64:
- return Get<int64>(multi_index);
- case U32:
- return Get<uint32>(multi_index);
- case U64:
- return Get<uint64>(multi_index);
- default:
- return FailedPrecondition(
- "Array element type is not integral: %s",
- PrimitiveType_Name(shape().element_type()).c_str());
- }
-}
-
-size_t LiteralBase::Hash() const {
- using tensorflow::Hash64;
- using tensorflow::Hash64Combine;
-
- size_t hash_value = ShapeUtil::Hash(shape());
-
- ShapeUtil::ForEachSubshape(
- shape(), [&](const Shape& subshape, const ShapeIndex& index) {
- if (!ShapeUtil::IsArray(subshape)) {
- return;
- }
-
- CHECK(LayoutUtil::IsDense(subshape.layout()));
- hash_value = Hash64Combine(
- hash_value, Hash64(static_cast<const char*>(untyped_data(index)),
- size_bytes(index)));
- });
-
- return hash_value;
-}
-
-Status Literal::SetIntegralAsS64(tensorflow::gtl::ArraySlice<int64> multi_index,
- int64 value) {
- CHECK(LayoutUtil::IsDenseArray(shape()));
- switch (shape().element_type()) {
- case PRED:
- Set<bool>(multi_index, value);
- break;
- case U8:
- Set<uint8>(multi_index, value);
- break;
- case S32:
- Set<int32>(multi_index, value);
- break;
- case S64:
- Set<int64>(multi_index, value);
- break;
- case U32:
- Set<uint32>(multi_index, value);
- break;
- case U64:
- Set<uint64>(multi_index, value);
- break;
- default:
- return FailedPrecondition(
- "Array element type is not integral: %s",
- PrimitiveType_Name(shape().element_type()).c_str());
- }
- return Status::OK();
-}
-
-tensorflow::gtl::ArraySlice<int64> LiteralBase::GetSparseIndex(
- int64 sparse_element_number, const ShapeIndex& shape_index) const {
- const Piece& p = piece(shape_index);
- CHECK_GE(sparse_element_number, 0);
- CHECK_LT(sparse_element_number, p.sparse_indices()->index_count());
- return p.sparse_indices()->At(sparse_element_number);
-}
-
-void Literal::SortSparseElements(const ShapeIndex& shape_index) {
- piece(shape_index).SortSparseElements();
-}
-
-Literal LiteralBase::GetFirstScalarLiteral() const {
- CHECK(ShapeUtil::IsArray(shape()));
- CHECK_GT(ShapeUtil::ElementsIn(shape()), 0);
- switch (shape().element_type()) {
+/* static */ Literal LiteralUtil::GetFirstScalarLiteral(
+ const LiteralSlice& literal) {
+ CHECK(ShapeUtil::IsArray(literal.shape()));
+ CHECK_GT(ShapeUtil::ElementsIn(literal.shape()), 0);
+ switch (literal.shape().element_type()) {
case PRED:
- return std::move(*Literal::CreateR0<bool>(GetFirstElement<bool>()));
+ return std::move(
+ *LiteralUtil::CreateR0<bool>(literal.GetFirstElement<bool>()));
// 8 bit types.
case S8:
- return std::move(*Literal::CreateR0<int8>(GetFirstElement<int8>()));
+ return std::move(
+ *LiteralUtil::CreateR0<int8>(literal.GetFirstElement<int8>()));
case U8:
- return std::move(*Literal::CreateR0<uint8>(GetFirstElement<uint8>()));
+ return std::move(
+ *LiteralUtil::CreateR0<uint8>(literal.GetFirstElement<uint8>()));
// 16 bit types.
case BF16:
- return std::move(
- *Literal::CreateR0<bfloat16>(GetFirstElement<bfloat16>()));
+ return std::move(*LiteralUtil::CreateR0<bfloat16>(
+ literal.GetFirstElement<bfloat16>()));
case F16:
- return std::move(*Literal::CreateR0<half>(GetFirstElement<half>()));
+ return std::move(
+ *LiteralUtil::CreateR0<half>(literal.GetFirstElement<half>()));
case S16:
- return std::move(*Literal::CreateR0<int16>(GetFirstElement<int16>()));
+ return std::move(
+ *LiteralUtil::CreateR0<int16>(literal.GetFirstElement<int16>()));
case U16:
- return std::move(*Literal::CreateR0<uint16>(GetFirstElement<uint16>()));
+ return std::move(
+ *LiteralUtil::CreateR0<uint16>(literal.GetFirstElement<uint16>()));
// 32 bit types.
case F32:
- return std::move(*Literal::CreateR0<float>(GetFirstElement<float>()));
+ return std::move(
+ *LiteralUtil::CreateR0<float>(literal.GetFirstElement<float>()));
case S32:
- return std::move(*Literal::CreateR0<int32>(GetFirstElement<int32>()));
+ return std::move(
+ *LiteralUtil::CreateR0<int32>(literal.GetFirstElement<int32>()));
case U32:
- return std::move(*Literal::CreateR0<uint32>(GetFirstElement<uint32>()));
+ return std::move(
+ *LiteralUtil::CreateR0<uint32>(literal.GetFirstElement<uint32>()));
// 64 bit types.
case C64:
- return std::move(
- *Literal::CreateR0<complex64>(GetFirstElement<complex64>()));
+ return std::move(*LiteralUtil::CreateR0<complex64>(
+ literal.GetFirstElement<complex64>()));
case F64:
- return std::move(*Literal::CreateR0<double>(GetFirstElement<double>()));
- case S64:
- return std::move(*Literal::CreateR0<int64>(GetFirstElement<int64>()));
- case U64:
- return std::move(*Literal::CreateR0<uint64>(GetFirstElement<uint64>()));
- default:
- LOG(FATAL) << "Unhandled primitive type " << shape().element_type();
- }
-}
-
-void LiteralBase::Piece::SortSparseElements() {
- switch (subshape().element_type()) {
- case PRED:
- SortSparseElementsInternal<bool>();
- break;
- case S8:
- SortSparseElementsInternal<int8>();
- break;
- case U8:
- SortSparseElementsInternal<uint8>();
- break;
- case S16:
- SortSparseElementsInternal<int16>();
- break;
- case U16:
- SortSparseElementsInternal<uint16>();
- break;
- case S32:
- SortSparseElementsInternal<int32>();
- break;
- case U32:
- SortSparseElementsInternal<uint32>();
- break;
+ return std::move(
+ *LiteralUtil::CreateR0<double>(literal.GetFirstElement<double>()));
case S64:
- SortSparseElementsInternal<int64>();
- break;
+ return std::move(
+ *LiteralUtil::CreateR0<int64>(literal.GetFirstElement<int64>()));
case U64:
- SortSparseElementsInternal<uint64>();
- break;
- case F32:
- SortSparseElementsInternal<float>();
- break;
- case F64:
- SortSparseElementsInternal<double>();
- break;
- case C64:
- SortSparseElementsInternal<complex64>();
- break;
- case F16:
- SortSparseElementsInternal<half>();
- break;
- case BF16:
- SortSparseElementsInternal<bfloat16>();
- break;
+ return std::move(
+ *LiteralUtil::CreateR0<uint64>(literal.GetFirstElement<uint64>()));
default:
- LOG(FATAL) << "Element type not valid for sparse array: "
- << PrimitiveType_Name(subshape().element_type());
- }
-}
-
-template <typename NativeT>
-void LiteralBase::Piece::SortSparseElementsInternal() {
- CHECK(LayoutUtil::IsSparseArray(subshape()));
- int64 num_elements = sparse_indices()->index_count();
- auto values = data<NativeT>();
- CHECK_LE(num_elements, values.size());
- sparse_indices()->SortWithValues(
- tensorflow::gtl::MutableArraySlice<NativeT>(values.data(), num_elements));
-}
-
-namespace {
-
-void ToStringHelper(const LiteralBase& literal, const ShapeIndex& shape_index,
- bool print_layout, std::vector<string>* pieces) {
- const Shape& subshape = ShapeUtil::GetSubshape(literal.shape(), shape_index);
- CHECK(LayoutUtil::HasLayout(literal.shape()));
- CHECK(LayoutUtil::HasLayout(subshape));
-
- auto shape_to_string = [print_layout](const Shape& shape) {
- if (print_layout) {
- return ShapeUtil::HumanStringWithLayout(shape);
- } else {
- return ShapeUtil::HumanString(shape);
- }
- };
-
- // TODO(b/32894291): refactor this code to reduce code duplication.
- if (ShapeUtil::IsTuple(subshape)) {
- pieces->push_back(shape_to_string(subshape));
- pieces->push_back(" (\n");
- std::vector<string> tuple_pieces;
- for (int i = 0; i < ShapeUtil::TupleElementCount(subshape); ++i) {
- ShapeIndex element_index = shape_index;
- element_index.push_back(i);
- std::vector<string> element_pieces;
- ToStringHelper(literal, element_index, print_layout, &element_pieces);
- tuple_pieces.push_back(tensorflow::str_util::Join(element_pieces, ""));
- }
- pieces->push_back(tensorflow::str_util::Join(tuple_pieces, ",\n"));
- pieces->push_back("\n)");
- return;
- }
-
- if (ShapeUtil::IsToken(subshape)) {
- pieces->push_back("token");
- return;
- }
-
- if (LayoutUtil::IsSparseArray(subshape)) {
- pieces->push_back(shape_to_string(subshape));
- pieces->push_back("{");
- int64 rank = ShapeUtil::Rank(subshape);
- int64 num_elements = literal.sparse_element_count();
- for (int64 i = 0; i < num_elements; ++i) {
- if (i > 0) {
- pieces->push_back(", ");
- }
- if (rank == 1) {
- pieces->push_back(StrCat(literal.GetSparseIndex(i)[0]));
- pieces->push_back(": ");
- } else {
- pieces->push_back("[");
- pieces->push_back(
- tensorflow::str_util::Join(literal.GetSparseIndex(i), ", "));
- pieces->push_back("]: ");
- }
- pieces->push_back(literal.GetSparseElementAsString(i));
- }
- pieces->push_back("}");
- return;
- }
-
- CHECK(LayoutUtil::IsDenseArray(subshape));
-
- auto element_to_string =
- [&](tensorflow::gtl::ArraySlice<int64> indices) -> string {
- PrimitiveType element_type = subshape.element_type();
- if (element_type == PRED) {
- // We display predicates in a densely packed form.
- return literal.Get<bool>(indices, shape_index) ? "1" : "0";
- }
- return ((!indices.empty() && indices.back() > 0) ? ", " : "") +
- literal.GetAsString(indices, shape_index);
- };
-
- if (ShapeUtil::Rank(subshape) == 0) {
- pieces->push_back(literal.GetAsString({}, shape_index));
- } else if (ShapeUtil::Rank(subshape) == 1) {
- pieces->push_back("{");
- for (int64 i0 = 0; i0 < subshape.dimensions(0); ++i0) {
- pieces->push_back(element_to_string({i0}));
- }
- pieces->push_back("}");
- } else if (ShapeUtil::Rank(subshape) == 2) {
- pieces->push_back(shape_to_string(subshape));
- pieces->push_back(" {\n");
- for (int64 i0 = 0; i0 < subshape.dimensions(0); ++i0) {
- pieces->push_back(" { ");
- for (int64 i1 = 0; i1 < subshape.dimensions(1); ++i1) {
- pieces->push_back(element_to_string({i0, i1}));
- }
- pieces->push_back(" ");
- pieces->push_back(i0 == subshape.dimensions(0) - 1 ? "}\n" : "},\n");
- }
- pieces->push_back("}");
- } else if (ShapeUtil::Rank(subshape) == 3) {
- pieces->push_back(shape_to_string(subshape));
- pieces->push_back(" {\n");
- for (int64 i0 = 0; i0 < subshape.dimensions(0); ++i0) {
- pieces->push_back(i0 > 0 ? ",\n{" : "{");
- for (int64 i1 = 0; i1 < subshape.dimensions(1); ++i1) {
- pieces->push_back(i1 > 0 ? ",\n { " : " { ");
- for (int64 i2 = 0; i2 < subshape.dimensions(2); ++i2) {
- pieces->push_back(element_to_string({i0, i1, i2}));
- }
- pieces->push_back(" }");
- }
- pieces->push_back(" }");
- }
- pieces->push_back("\n}");
- } else if (ShapeUtil::Rank(subshape) == 4) {
- pieces->push_back(shape_to_string(subshape));
- pieces->push_back(" {\n");
- for (int64 i0 = 0; i0 < subshape.dimensions(0); ++i0) {
- pieces->push_back(Printf(" { /*i0=%lld*/\n", i0));
- for (int64 i1 = 0; i1 < subshape.dimensions(1); ++i1) {
- pieces->push_back(Printf(" { /*i1=%lld*/\n", i1));
- for (int64 i2 = 0; i2 < subshape.dimensions(2); ++i2) {
- pieces->push_back(" {");
- for (int64 i3 = 0; i3 < subshape.dimensions(3); ++i3) {
- pieces->push_back(element_to_string({i0, i1, i2, i3}));
- }
- pieces->push_back(i2 == subshape.dimensions(2) - 1 ? "}\n" : "},\n");
- }
- pieces->push_back(i1 == subshape.dimensions(1) - 1 ? " }\n"
- : " },\n");
- }
- pieces->push_back(i0 == subshape.dimensions(0) - 1 ? " }\n" : " },\n");
- }
- pieces->push_back("}");
- } else if (ShapeUtil::Rank(subshape) == 5) {
- pieces->push_back(shape_to_string(subshape));
- pieces->push_back(" {\n");
- for (int64 i0 = 0; i0 < subshape.dimensions(0); ++i0) {
- pieces->push_back(Printf(" { /*i0=%lld*/\n", i0));
- for (int64 i1 = 0; i1 < subshape.dimensions(1); ++i1) {
- pieces->push_back(Printf(" { /*i1=%lld*/\n", i1));
- for (int64 i2 = 0; i2 < subshape.dimensions(2); ++i2) {
- pieces->push_back(Printf(" { /*i2=%lld*/\n", i2));
- for (int64 i3 = 0; i3 < subshape.dimensions(3); ++i3) {
- pieces->push_back(" {");
- for (int64 i4 = 0; i4 < subshape.dimensions(4); ++i4) {
- pieces->push_back(element_to_string({i0, i1, i2, i3, i4}));
- }
- pieces->push_back(i3 == subshape.dimensions(3) - 1 ? "}\n"
- : "},\n");
- }
- pieces->push_back(i2 == subshape.dimensions(2) - 1 ? " }\n"
- : " },\n");
- }
- pieces->push_back(i1 == subshape.dimensions(1) - 1 ? " }\n"
- : " },\n");
- }
- pieces->push_back(i0 == subshape.dimensions(0) - 1 ? " }\n" : " },\n");
- }
- pieces->push_back("}");
- } else {
- pieces->push_back(shape_to_string(subshape));
- pieces->push_back(" {");
- literal.EachCellAsString(
- [&](tensorflow::gtl::ArraySlice<int64> indices, const string& value) {
- pieces->push_back(" ");
- pieces->push_back(value);
- });
- pieces->push_back("}");
+ LOG(FATAL) << "Unhandled primitive type "
+ << literal.shape().element_type();
}
}
-} // namespace
-
-int64 LiteralBase::sparse_element_count() const {
- CHECK(LayoutUtil::IsSparseArray(shape()));
- return sparse_indices()->index_count();
-}
-
-string LiteralBase::ToString(bool print_layout) const {
- std::vector<string> pieces;
- CHECK(LayoutUtil::HasLayout(this->shape()));
- ToStringHelper(*this, {}, print_layout, &pieces);
- return tensorflow::str_util::Join(pieces, "");
-}
-
-/* static */ std::unique_ptr<Literal> Literal::MakeTuple(
+/* static */ std::unique_ptr<Literal> LiteralUtil::MakeTuple(
tensorflow::gtl::ArraySlice<const Literal*> elements) {
std::vector<Shape> element_shapes;
for (const auto* element : elements) {
@@ -1537,7 +443,7 @@ string LiteralBase::ToString(bool print_layout) const {
return literal;
}
-/* static */ std::unique_ptr<Literal> Literal::MakeTupleFromSlices(
+/* static */ std::unique_ptr<Literal> LiteralUtil::MakeTupleFromSlices(
tensorflow::gtl::ArraySlice<LiteralSlice> elements) {
std::vector<Shape> element_shapes;
for (const auto& element : elements) {
@@ -1550,7 +456,7 @@ string LiteralBase::ToString(bool print_layout) const {
return literal;
}
-/* static */ std::unique_ptr<Literal> Literal::MakeTupleOwned(
+/* static */ std::unique_ptr<Literal> LiteralUtil::MakeTupleOwned(
std::vector<std::unique_ptr<Literal>> elements) {
std::vector<Shape> element_shapes;
element_shapes.reserve(elements.size());
@@ -1565,818 +471,9 @@ string LiteralBase::ToString(bool print_layout) const {
return literal;
}
-void LiteralBase::EachCellAsString(
- const std::function<void(tensorflow::gtl::ArraySlice<int64> indices,
- const string& value)>& per_cell) const {
- if (ShapeUtil::IsZeroElementArray(shape())) {
- return;
- }
- std::vector<int64> indices = IndexUtil::LinearIndexToMultidimensionalIndex(
- shape(), /*linear_index=*/0);
- do {
- per_cell(indices, GetAsString(indices));
- } while (IndexUtil::BumpIndices(shape(), &indices));
-}
-
-namespace {
-template <typename NativeSrcT, typename NativeDestT, typename ConverterType>
-std::unique_ptr<Literal> ConvertBetweenNativeTypesWithConverter(
- const LiteralBase& src_literal, const ConverterType& converter) {
- CHECK(ShapeUtil::IsArray(src_literal.shape()));
- auto result_literal = MakeUnique<Literal>(ShapeUtil::ChangeElementType(
- src_literal.shape(),
- primitive_util::NativeToPrimitiveType<NativeDestT>()));
- auto src_data = src_literal.data<NativeSrcT>();
- auto dest_data = result_literal->template data<NativeDestT>();
- int64 num_elements = src_literal.element_count();
-
- for (int64 i = 0; i < num_elements; ++i) {
- dest_data[i] = converter(src_data[i]);
- }
- return result_literal;
-}
-
-template <typename NativeSrcT, typename NativeDestT>
-std::unique_ptr<Literal> ConvertBetweenNativeTypes(
- const LiteralBase& src_literal) {
- auto converter = [](NativeSrcT src) { return static_cast<NativeDestT>(src); };
- return ConvertBetweenNativeTypesWithConverter<NativeSrcT, NativeDestT>(
- src_literal, converter);
-}
-
-template <typename NativeSrcT, typename NativeDestT>
-typename std::enable_if<(sizeof(NativeSrcT) == sizeof(NativeDestT)),
- std::unique_ptr<Literal>>::type
-BitcastBetweenNativeTypes(const LiteralBase& src_literal) {
- auto converter = [](NativeSrcT src) {
- return tensorflow::bit_cast<NativeDestT>(src);
- };
- return ConvertBetweenNativeTypesWithConverter<NativeSrcT, NativeDestT>(
- src_literal, converter);
-}
-
-// This template specialization is here to make the compiler happy. bit_cast has
-// a static check that the types are the same size. This specialization should
-// never be used because the source and destination types are checked for
-// identical sizes higher up.
-template <typename NativeSrcT, typename NativeDestT>
-typename std::enable_if<(sizeof(NativeSrcT) != sizeof(NativeDestT)),
- std::unique_ptr<Literal>>::type
-BitcastBetweenNativeTypes(const LiteralBase& src_literal) {
- LOG(FATAL) << "Invalid bitcast between types of different sizes.";
-}
-
-template <PrimitiveType primitive_src_type>
-std::unique_ptr<Literal> ConvertToC64(const LiteralBase& src_literal) {
- CHECK(ShapeUtil::IsArray(src_literal.shape()));
- auto result_literal = MakeUnique<Literal>(
- ShapeUtil::ChangeElementType(src_literal.shape(), C64));
- using NativeSrcT =
- typename primitive_util::PrimitiveTypeToNative<primitive_src_type>::type;
- tensorflow::gtl::ArraySlice<NativeSrcT> src_data =
- src_literal.data<NativeSrcT>();
- tensorflow::gtl::MutableArraySlice<complex64> dest_data =
- result_literal->data<complex64>();
- int64 num_elements = src_literal.element_count();
- for (int64 i = 0; i < num_elements; ++i) {
- dest_data[i] = complex64(static_cast<float>(src_data[i]), 0);
- }
- return result_literal;
-}
-
-template <PrimitiveType primitive_src_type, PrimitiveType primitive_dest_type>
-std::unique_ptr<Literal> ConvertIfTypesMatch(const LiteralBase& src_literal,
- bool bitcast) {
- CHECK_EQ(primitive_src_type, src_literal.shape().element_type());
- if (bitcast) {
- return BitcastBetweenNativeTypes<
- typename primitive_util::PrimitiveTypeToNative<
- primitive_src_type>::type,
- typename primitive_util::PrimitiveTypeToNative<
- primitive_dest_type>::type>(src_literal);
- } else {
- return ConvertBetweenNativeTypes<
- typename primitive_util::PrimitiveTypeToNative<
- primitive_src_type>::type,
- typename primitive_util::PrimitiveTypeToNative<
- primitive_dest_type>::type>(src_literal);
- }
-}
-
-template <PrimitiveType primitive_src_type>
-StatusOr<std::unique_ptr<Literal>> ConvertIfDestTypeMatches(
- const LiteralBase& src_literal, PrimitiveType primitive_dest_type,
- bool bitcast) {
- switch (primitive_dest_type) {
-#define CONVERT_IF_TYPES_MATCH(type) \
- case (type): \
- return ConvertIfTypesMatch<primitive_src_type, (type)>(src_literal, \
- bitcast);
- CONVERT_IF_TYPES_MATCH(PRED)
- CONVERT_IF_TYPES_MATCH(S8)
- CONVERT_IF_TYPES_MATCH(S32)
- CONVERT_IF_TYPES_MATCH(S64)
- CONVERT_IF_TYPES_MATCH(U8)
- CONVERT_IF_TYPES_MATCH(U32)
- CONVERT_IF_TYPES_MATCH(U64)
- CONVERT_IF_TYPES_MATCH(F16)
- CONVERT_IF_TYPES_MATCH(F32)
- CONVERT_IF_TYPES_MATCH(F64)
- CONVERT_IF_TYPES_MATCH(BF16)
-#undef CONVERT_IF_TYPES_MATCH
- case C64:
- if (!bitcast) {
- return ConvertToC64<primitive_src_type>(src_literal);
- }
- break;
- // Other types are not yet supported.
- default:
- break;
- }
- return Unimplemented(
- "Converting from type %s to type %s is not implemented.",
- PrimitiveType_Name(src_literal.shape().element_type()).c_str(),
- PrimitiveType_Name(primitive_dest_type).c_str());
-}
-
-StatusOr<std::unique_ptr<Literal>> ConvertSwitch(
- const LiteralBase& literal, PrimitiveType primitive_dest_type,
- bool bitcast) {
- TF_RET_CHECK(ShapeUtil::IsArray(literal.shape()));
- if (literal.shape().element_type() == primitive_dest_type) {
- return literal.CloneToUnique();
- }
- switch (literal.shape().element_type()) {
-#define CONVERT_IF_DEST_TYPE_MATCHES(type) \
- case (type): \
- return ConvertIfDestTypeMatches<(type)>(literal, primitive_dest_type, \
- bitcast);
- CONVERT_IF_DEST_TYPE_MATCHES(PRED)
- CONVERT_IF_DEST_TYPE_MATCHES(S8)
- CONVERT_IF_DEST_TYPE_MATCHES(S32)
- CONVERT_IF_DEST_TYPE_MATCHES(S64)
- CONVERT_IF_DEST_TYPE_MATCHES(U8)
- CONVERT_IF_DEST_TYPE_MATCHES(U32)
- CONVERT_IF_DEST_TYPE_MATCHES(U64)
- CONVERT_IF_DEST_TYPE_MATCHES(F16)
- CONVERT_IF_DEST_TYPE_MATCHES(F32)
- CONVERT_IF_DEST_TYPE_MATCHES(F64)
- CONVERT_IF_DEST_TYPE_MATCHES(BF16)
-#undef CONVERT_IF_DEST_TYPE_MATCHES
- // Other types are not yet supported.
- default:
- return Unimplemented(
- "%s from type %s to type %s is not implemented.",
- (bitcast ? "Bitcast converting" : "Converting"),
- PrimitiveType_Name(literal.shape().element_type()).c_str(),
- PrimitiveType_Name(primitive_dest_type).c_str());
- }
-}
-
-} // namespace
-
-StatusOr<std::unique_ptr<Literal>> LiteralBase::Convert(
- PrimitiveType primitive_dest_type) const {
- return ConvertSwitch(*this, primitive_dest_type, /*bitcast=*/false);
-}
-
-StatusOr<std::unique_ptr<Literal>> LiteralBase::BitcastConvert(
- PrimitiveType primitive_dest_type) const {
- if (primitive_util::BitWidth(shape().element_type()) !=
- primitive_util::BitWidth(primitive_dest_type)) {
- return InvalidArgument(
- "Cannot bitcast convert from %s to %s, bit widths are different: %d != "
- "%d",
- PrimitiveType_Name(shape().element_type()).c_str(),
- PrimitiveType_Name(primitive_dest_type).c_str(),
- primitive_util::BitWidth(shape().element_type()),
- primitive_util::BitWidth(primitive_dest_type));
- }
- return ConvertSwitch(*this, primitive_dest_type, /*bitcast=*/true);
-}
-
-StatusOr<std::unique_ptr<Literal>> LiteralBase::ConvertToShape(
- const Shape& dest_shape, bool round_f32_to_bf16) const {
- if (!ShapeUtil::IsTuple(dest_shape)) {
- if (round_f32_to_bf16 && shape().element_type() == F32 &&
- dest_shape.element_type() == BF16) {
- auto converter = [](float src) {
- return tensorflow::bfloat16::round_to_bfloat16(src);
- };
- return ConvertBetweenNativeTypesWithConverter<float, bfloat16>(*this,
- converter);
- }
- return Convert(dest_shape.element_type());
- }
- std::vector<Literal> elements;
- for (int i = 0; i < ShapeUtil::TupleElementCount(shape()); ++i) {
- auto element = LiteralSlice(*this, {i});
- TF_ASSIGN_OR_RETURN(
- auto new_element,
- element.ConvertToShape(ShapeUtil::GetSubshape(dest_shape, {i})));
- elements.push_back(std::move(*new_element));
- }
- auto converted = MakeUnique<Literal>();
- *converted = Literal::MoveIntoTuple(&elements);
- return std::move(converted);
-}
-
-template <typename NativeT>
-bool LiteralBase::Piece::EqualElementsInternal(
- const LiteralBase::Piece& other, std::vector<int64>* multi_index) const {
- if (multi_index->size() == ShapeUtil::Rank(subshape())) {
- return (Get<NativeT>(*multi_index) == other.Get<NativeT>(*multi_index));
- }
- for (int64 i = 0; i < subshape().dimensions(multi_index->size()); ++i) {
- multi_index->push_back(i);
- if (!EqualElementsInternal<NativeT>(other, multi_index)) {
- return false;
- }
- multi_index->pop_back();
- }
- return true;
-}
-
-bool LiteralBase::Piece::EqualElements(const LiteralBase::Piece& other) const {
- DCHECK(ShapeUtil::Compatible(subshape(), other.subshape()));
-
- std::vector<int64> multi_index;
- switch (subshape().element_type()) {
- case PRED:
- return EqualElementsInternal<bool>(other, &multi_index);
- case U8:
- return EqualElementsInternal<uint8>(other, &multi_index);
- case S32:
- return EqualElementsInternal<int32>(other, &multi_index);
- case S64:
- return EqualElementsInternal<int64>(other, &multi_index);
- case U32:
- return EqualElementsInternal<uint32>(other, &multi_index);
- case U64:
- return EqualElementsInternal<uint64>(other, &multi_index);
- case F32:
- return EqualElementsInternal<float>(other, &multi_index);
- case F64:
- return EqualElementsInternal<double>(other, &multi_index);
- case F16:
- return EqualElementsInternal<half>(other, &multi_index);
- case BF16:
- return EqualElementsInternal<bfloat16>(other, &multi_index);
- case C64:
- return EqualElementsInternal<complex64>(other, &multi_index);
- default:
- LOG(FATAL) << "Unimplemented: LiteralBase::Piece::EqualElements for type "
- << PrimitiveType_Name(subshape().element_type());
- }
-}
-
-bool LiteralBase::operator==(const LiteralBase& other) const {
- if (!ShapeUtil::Compatible(shape(), other.shape())) {
- return false;
- }
-
- return root_piece().ForEachSubpieceWithBool(
- [&](const ShapeIndex& index, const Piece& piece) {
- if (!ShapeUtil::IsArray(piece.subshape())) {
- return true;
- }
-
- const Piece& other_piece = other.piece(index);
- if (!piece.EqualElements(other_piece)) {
- return false;
- }
- return true;
- });
-}
-
-namespace {
-
-template <typename NativeT>
-static bool AllElementsEqualValue(tensorflow::gtl::ArraySlice<NativeT> data,
- NativeT value) {
- for (int64 i = 0; i < data.size(); ++i) {
- if (data[i] != value) {
- return false;
- }
- }
- return true;
-}
-
-} // namespace
-
-bool LiteralBase::IsAll(int8 value) const {
- return root_piece().ForEachSubpieceWithBool([&](const ShapeIndex& index,
- const Piece& piece) {
- if (!ShapeUtil::IsArray(piece.subshape())) {
- return true;
- }
-
- auto piece_is_all = [&]() {
- switch (shape().element_type()) {
- case U8:
- if (value >= 0) {
- return AllElementsEqualValue<uint8>(piece.data<uint8>(), value);
- }
- return false;
- case U32:
- if (value >= 0) {
- return AllElementsEqualValue<uint32>(piece.data<uint32>(), value);
- }
- return false;
- case U64:
- if (value >= 0) {
- return AllElementsEqualValue<uint64>(piece.data<uint64>(), value);
- }
- return false;
- case S8:
- return AllElementsEqualValue<int8>(piece.data<int8>(), value);
- case S32:
- return AllElementsEqualValue<int32>(piece.data<int32>(), value);
- case S64:
- return AllElementsEqualValue<int64>(piece.data<int64>(), value);
- case F32:
- return AllElementsEqualValue<float>(piece.data<float>(), value);
- case F64:
- return AllElementsEqualValue<double>(piece.data<double>(), value);
- case F16:
- return AllElementsEqualValue<half>(piece.data<half>(),
- static_cast<half>(value));
- case BF16:
- return AllElementsEqualValue<bfloat16>(piece.data<bfloat16>(),
- static_cast<bfloat16>(value));
- case PRED:
- if (value == 0) {
- return AllElementsEqualValue<bool>(piece.data<bool>(), false);
- }
- if (value == 1) {
- return AllElementsEqualValue<bool>(piece.data<bool>(), true);
- }
- return false;
- default:
- return false;
- }
- return false;
- };
-
- if (!piece_is_all()) {
- return false;
- }
- return true;
- });
-}
-
-bool LiteralBase::IsAllFloat(float value) const {
- return root_piece().ForEachSubpieceWithBool(
- [&](const ShapeIndex& index, const Piece& piece) {
- if (!ShapeUtil::IsArray(piece.subshape())) {
- return true;
- }
-
- auto piece_is_all = [&]() {
- switch (shape().element_type()) {
- case F32:
- return AllElementsEqualValue<float>(piece.data<float>(), value);
- case F64:
- return AllElementsEqualValue<double>(piece.data<double>(), value);
- case F16:
- return AllElementsEqualValue<half>(piece.data<half>(),
- static_cast<half>(value));
- case BF16:
- return AllElementsEqualValue<bfloat16>(
- piece.data<bfloat16>(), static_cast<bfloat16>(value));
- default:
- return false;
- }
- };
- if (!piece_is_all()) {
- return false;
- }
- return true;
- });
-}
-
-bool LiteralBase::IsAllComplex(complex64 value) const {
- switch (shape().element_type()) {
- case C64:
- return AllElementsEqualValue<complex64>(root_piece().data<complex64>(),
- value);
- default:
- return false;
- }
-}
-
-bool LiteralBase::IsAllFirst() const {
- return root_piece().ForEachSubpieceWithBool(
- [&](const ShapeIndex& index, const Piece& piece) {
- if (!ShapeUtil::IsArray(piece.subshape())) {
- return true;
- }
-
- // Empty shapes are not all the first element since there is no first
- // element.
- if (ShapeUtil::IsZeroElementArray(piece.subshape())) {
- return false;
- }
- auto piece_is_all = [&]() {
- switch (piece.subshape().element_type()) {
- case PRED: {
- auto data = piece.data<bool>();
- return AllElementsEqualValue<bool>(data, data[0]);
- }
- // 8 bit types
- case S8: {
- auto data = piece.data<int8>();
- return AllElementsEqualValue<int8>(data, data[0]);
- }
- case U8: {
- auto data = piece.data<uint8>();
- return AllElementsEqualValue<uint8>(data, data[0]);
- }
- // 16 bit types
- case BF16: {
- auto data = piece.data<bfloat16>();
- return AllElementsEqualValue<bfloat16>(data, data[0]);
- }
- case F16: {
- auto data = piece.data<half>();
- return AllElementsEqualValue<half>(data, data[0]);
- }
- case S16: {
- auto data = piece.data<int16>();
- return AllElementsEqualValue<int16>(data, data[0]);
- }
- case U16: {
- auto data = piece.data<uint16>();
- return AllElementsEqualValue<uint16>(data, data[0]);
- }
- // 32 bit types
- case F32: {
- auto data = piece.data<float>();
- return AllElementsEqualValue<float>(data, data[0]);
- }
- case U32: {
- auto data = piece.data<uint32>();
- return AllElementsEqualValue<uint32>(data, data[0]);
- }
- case S32: {
- auto data = piece.data<int32>();
- return AllElementsEqualValue<int32>(data, data[0]);
- }
- // 64 bit types
- case C64: {
- auto data = piece.data<complex64>();
- return AllElementsEqualValue<complex64>(data, data[0]);
- }
- case F64: {
- auto data = piece.data<double>();
- return AllElementsEqualValue<double>(data, data[0]);
- }
- case S64: {
- auto data = piece.data<int64>();
- return AllElementsEqualValue<int64>(data, data[0]);
- }
- case U64: {
- auto data = piece.data<uint64>();
- return AllElementsEqualValue<uint64>(data, data[0]);
- }
- default:
- return false;
- }
- };
-
- if (!piece_is_all()) {
- return false;
- }
- return true;
- });
-}
-
-bool LiteralBase::IsZero(tensorflow::gtl::ArraySlice<int64> indices) const {
- CHECK(ShapeUtil::IsArray(shape()));
- switch (shape().element_type()) {
- case U8:
- return Get<uint8>(indices) == 0;
- case U32:
- return Get<uint32>(indices) == 0;
- case U64:
- return Get<uint64>(indices) == 0;
- case S8:
- return Get<int8>(indices) == 0;
- case S32:
- return Get<int32>(indices) == 0;
- case S64:
- return Get<int64>(indices) == 0;
- case F32:
- return Get<float>(indices) == 0.0f;
- case F64:
- return Get<double>(indices) == 0.0;
- case C64:
- return Get<complex64>(indices) == complex64(0.0f, 0.0f);
- case F16:
- return Get<half>(indices) == static_cast<half>(0.0f);
- case BF16:
- return Get<bfloat16>(indices) == static_cast<bfloat16>(0.0f);
- case PRED:
- return Get<bool>(indices) == false;
- default:
- LOG(FATAL) << "Input literal must be an array.";
- }
-}
-
-namespace {
-
-template <typename RepeatedFieldT, typename NativeT>
-void CopyToRepeatedField(RepeatedFieldT* dest,
- const tensorflow::gtl::ArraySlice<NativeT> src) {
- *dest = RepeatedFieldT(src.begin(), src.end());
-}
-
-} // namespace
-
-void LiteralBase::Piece::WriteToProto(LiteralProto* proto) const {
- *proto->mutable_shape() = subshape();
- switch (subshape().element_type()) {
- case PRED:
- CopyToRepeatedField(proto->mutable_preds(), data<bool>());
- break;
- case U8:
- proto->set_u8s(static_cast<const unsigned char*>(data<uint8>().data()),
- element_count());
- break;
- case U32:
- CopyToRepeatedField(proto->mutable_u32s(), data<uint32>());
- break;
- case U64:
- CopyToRepeatedField(proto->mutable_u64s(), data<uint64>());
- break;
- case S32:
- CopyToRepeatedField(proto->mutable_s32s(), data<int32>());
- break;
- case S64:
- CopyToRepeatedField(proto->mutable_s64s(), data<int64>());
- break;
- case F16:
- *proto->mutable_f16s() = string(
- reinterpret_cast<const char*>(data<half>().data()), size_bytes());
- if (!kLittleEndian) {
- ConvertEndianShort(proto->mutable_f16s());
- }
- break;
- case BF16:
- *proto->mutable_bf16s() = string(
- reinterpret_cast<const char*>(data<bfloat16>().data()), size_bytes());
- if (!kLittleEndian) {
- ConvertEndianShort(proto->mutable_bf16s());
- }
- break;
- case F32:
- CopyToRepeatedField(proto->mutable_f32s(), data<float>());
- break;
- case F64:
- CopyToRepeatedField(proto->mutable_f64s(), data<double>());
- break;
- case C64:
- for (complex64 value : data<complex64>()) {
- proto->add_c64s(value.real());
- proto->add_c64s(value.imag());
- }
- break;
- case TUPLE:
- // Nothing to do but assign the shape which is done above.
- return;
- default:
- LOG(FATAL) << "Unhandled primitive type " << subshape().element_type();
- }
-}
-
-const void* LiteralBase::Piece::untyped_data() const {
- CHECK(ShapeUtil::IsArray(subshape())) << ShapeUtil::HumanString(subshape());
- return buffer();
-}
-
-void* LiteralBase::Piece::untyped_data() {
- CHECK(ShapeUtil::IsArray(subshape())) << ShapeUtil::HumanString(subshape());
- return buffer();
-}
-
-namespace {
-
-template <typename RepeatedFieldT, typename NativeT>
-Status CopyFromRepeatedField(tensorflow::gtl::MutableArraySlice<NativeT> dest,
- const RepeatedFieldT& src) {
- if (dest.size() != src.size()) {
- return InvalidArgument(
- "Expected %lu elements in LiteralProto repeated field, has %d",
- dest.size(), src.size());
- }
- std::copy(src.begin(), src.end(), dest.begin());
- return Status::OK();
-}
-
-} // namespace
-
-Status LiteralBase::Piece::CopyFromProto(const LiteralProto& proto) {
- // These conditions should have been checked in Literal::CreateFromProto.
- TF_RET_CHECK(proto.has_shape());
- TF_RET_CHECK(LayoutUtil::HasLayout(proto.shape()));
- TF_RET_CHECK(ShapeUtil::Equal(proto.shape(), subshape()));
-
- switch (subshape().element_type()) {
- case PRED:
- TF_RETURN_IF_ERROR(CopyFromRepeatedField(data<bool>(), proto.preds()));
- break;
- case U8: {
- auto u8_data = data<uint8>();
- TF_RET_CHECK(proto.u8s().size() == u8_data.size());
- std::copy(proto.u8s().begin(), proto.u8s().end(), u8_data.begin());
- } break;
- case S32:
- TF_RETURN_IF_ERROR(CopyFromRepeatedField(data<int32>(), proto.s32s()));
- break;
- case S64:
- TF_RETURN_IF_ERROR(CopyFromRepeatedField(data<int64>(), proto.s64s()));
- break;
- case U32:
- TF_RETURN_IF_ERROR(CopyFromRepeatedField(data<uint32>(), proto.u32s()));
- break;
- case U64:
- TF_RETURN_IF_ERROR(CopyFromRepeatedField(data<uint64>(), proto.u64s()));
- break;
- case F16: {
- const string& s(proto.f16s());
- TF_RET_CHECK(data<half>().size() * sizeof(half) == s.size());
- memcpy(untyped_data(), s.data(), s.size());
- if (!kLittleEndian) {
- ConvertEndianShort(reinterpret_cast<char*>(untyped_data()), s.size());
- }
- } break;
-
- case BF16: {
- const string& s(proto.bf16s());
- TF_RET_CHECK(data<bfloat16>().size() * sizeof(bfloat16) == s.size());
- memcpy(untyped_data(), s.data(), s.size());
- if (!kLittleEndian) {
- ConvertEndianShort(reinterpret_cast<char*>(untyped_data()), s.size());
- }
- } break;
- case F32:
- TF_RETURN_IF_ERROR(CopyFromRepeatedField(data<float>(), proto.f32s()));
- break;
- case F64:
- TF_RETURN_IF_ERROR(CopyFromRepeatedField(data<double>(), proto.f64s()));
- break;
- case C64: {
- auto complex_data = data<complex64>();
- TF_RET_CHECK(proto.c64s_size() == complex_data.size() * 2);
- for (int64 i = 0; i < complex_data.size(); ++i) {
- complex_data[i] = complex64{proto.c64s(i * 2), proto.c64s(i * 2 + 1)};
- }
- } break;
- case TUPLE:
- LOG(FATAL) << "Should not be called on tuple shapes: "
- << ShapeUtil::HumanString(subshape());
- break;
- default:
- LOG(FATAL) << "Unhandled primitive type " << subshape().element_type();
- }
- return Status::OK();
-}
-
-LiteralProto LiteralBase::ToProto() const {
- LiteralProto proto;
- root_piece().ForEachSubpiece(
- [&](const ShapeIndex& index, const Piece& piece) {
- LiteralProto* proto_piece = &proto;
- for (int64 i : index) {
- while (proto_piece->tuple_literals_size() <= i) {
- proto_piece->add_tuple_literals();
- }
- proto_piece = proto_piece->mutable_tuple_literals(i);
- }
- piece.WriteToProto(proto_piece);
- });
-
- if (LayoutUtil::IsSparseArray(shape())) {
- CopyToRepeatedField(proto.mutable_sparse_indices(),
- sparse_indices()->data());
- }
-
- return proto;
-}
-
-/* static */
-StatusOr<std::unique_ptr<Literal>> Literal::CreateFromProto(
- const LiteralProto& proto) {
- if (!proto.has_shape()) {
- return InvalidArgument("LiteralProto has no shape");
- }
- if (!LayoutUtil::HasLayout(proto.shape())) {
- return InvalidArgument("LiteralProto has no layout");
- }
-
- auto literal = MakeUnique<Literal>(proto.shape());
-
- TF_RETURN_IF_ERROR(literal->root_piece_->ForEachMutableSubpieceWithStatus(
- [&](const ShapeIndex& index, Piece* piece) {
- const LiteralProto* proto_element = &proto;
- for (int64 i : index) {
- CHECK(i < proto_element->tuple_literals_size());
- proto_element = &proto_element->tuple_literals(i);
- }
-
- if (ShapeUtil::IsTuple(piece->subshape())) {
- if (proto_element->tuple_literals_size() !=
- ShapeUtil::TupleElementCount(piece->subshape())) {
- return InvalidArgument(
- "Expected %lld tuple elements in LiteralProto, has %d",
- ShapeUtil::TupleElementCount(piece->subshape()),
- proto_element->tuple_literals_size());
- }
- return Status::OK();
- }
-
- CHECK(ShapeUtil::IsArray(piece->subshape()));
- TF_RETURN_IF_ERROR(piece->CopyFromProto(*proto_element));
-
- return Status::OK();
- }));
-
- return std::move(literal);
-}
-
-/* static */ string Literal::MultiIndexAsString(
+/* static */ string LiteralUtil::MultiIndexAsString(
tensorflow::gtl::ArraySlice<int64> multi_index) {
return StrCat("{", tensorflow::str_util::Join(multi_index, ","), "}");
}
-const void* LiteralBase::untyped_data(const ShapeIndex& shape_index) const {
- return piece(shape_index).untyped_data();
-}
-
-void* Literal::untyped_data(const ShapeIndex& shape_index) {
- return piece(shape_index).untyped_data();
-}
-
-int64 LiteralBase::size_bytes(const ShapeIndex& shape_index) const {
- return piece(shape_index).size_bytes();
-}
-
-string LiteralBase::GetR1U8AsString() const {
- CHECK(ShapeUtil::IsArray(shape()));
- CHECK_EQ(ShapeUtil::Rank(shape()), 1);
- CHECK_EQ(shape().element_type(), U8);
- return string(tensorflow::bit_cast<const char*>(data<uint8>().data()),
- ShapeUtil::ElementsIn(shape()));
-}
-
-void BorrowingLiteral::BuildPieceSubtree(const Shape& shape, Piece* piece) {
- CHECK(ShapeUtil::IsTuple(shape));
- for (int i = 0; i < ShapeUtil::TupleElementCount(shape); ++i) {
- const Shape& subshape = shape.tuple_shapes(i);
-
- auto child_piece = Piece();
- child_piece.set_subshape(&subshape);
-
- if (ShapeUtil::IsTuple(subshape)) {
- BuildPieceSubtree(subshape, &child_piece);
- }
-
- piece->emplace_back(std::move(child_piece));
- }
-}
-
-LiteralSlice::LiteralSlice(const LiteralBase& literal)
- : LiteralBase(), root_piece_(&literal.root_piece()) {}
-
-LiteralSlice::LiteralSlice(const LiteralBase& literal,
- const ShapeIndex& view_root)
- : LiteralBase(), root_piece_(&literal.piece(view_root)) {}
-
-BorrowingLiteral::BorrowingLiteral(const char* src_buf_ptr, const Shape& shape)
- : LiteralBase(), shape_(MakeUnique<Shape>(shape)) {
- CHECK(ShapeUtil::IsArray(*shape_));
- CHECK(LayoutUtil::HasLayout(*shape_));
-
- root_piece_ = Piece();
- root_piece_.set_buffer(const_cast<char*>(src_buf_ptr));
- root_piece_.set_subshape(shape_.get());
-}
-
-BorrowingLiteral::BorrowingLiteral(
- tensorflow::gtl::ArraySlice<const char*> src_buf_ptrs, const Shape& shape)
- : LiteralBase(), shape_(MakeUnique<Shape>(shape)) {
- CHECK(ShapeUtil::IsTuple(*shape_));
- CHECK(!ShapeUtil::IsNestedTuple(*shape_));
- CHECK_EQ(src_buf_ptrs.size(), ShapeUtil::TupleElementCount(*shape_));
- root_piece_ = Piece();
- root_piece_.set_subshape(shape_.get());
- BuildPieceSubtree(*shape_, &root_piece_);
-
- for (int i = 0; i < src_buf_ptrs.size(); ++i) {
- const auto& src_shape = shape_->tuple_shapes(i);
- CHECK(ShapeUtil::IsArray(src_shape));
- root_piece_.child(i).set_buffer(const_cast<char*>(src_buf_ptrs[i]));
- }
-}
-
} // namespace xla
diff --git a/tensorflow/compiler/xla/literal_util.h b/tensorflow/compiler/xla/literal_util.h
index 37ca8ea9f1..e3737a9d00 100644
--- a/tensorflow/compiler/xla/literal_util.h
+++ b/tensorflow/compiler/xla/literal_util.h
@@ -32,6 +32,7 @@ limitations under the License.
#include "tensorflow/compiler/xla/array4d.h"
#include "tensorflow/compiler/xla/index_util.h"
#include "tensorflow/compiler/xla/layout_util.h"
+#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/compiler/xla/primitive_util.h"
#include "tensorflow/compiler/xla/ptr_util.h"
#include "tensorflow/compiler/xla/shape_util.h"
@@ -51,679 +52,12 @@ limitations under the License.
namespace xla {
-// Forward declare Literal and LiteralSlice class to be used by the creation
-// methods in the base class.
-class Literal;
-class LiteralSlice;
-
-// Abstract base class for literals.
-class LiteralBase {
+class LiteralUtil {
public:
- virtual ~LiteralBase() = 0;
-
- // Literals are equal if they have compatible shapes and the same data
- // values. Layout is not compared.
- bool operator==(const LiteralBase& other) const;
- bool operator!=(const LiteralBase& other) const { return !(*this == other); }
-
- // Returns the shape of the literal.
- const Shape& shape() const { return root_piece().subshape(); }
-
- // Serialize to proto.
- LiteralProto ToProto() const;
-
- // Returns an ArraySlice of the array for this literal for the given NativeT
- // (e.g., float). CHECKs if the subshape of the literal at the given
- // ShapeIndex is not array. See primitive_util.h for the mapping from XLA type
- // to native type.
- template <typename NativeT>
- tensorflow::gtl::ArraySlice<NativeT> data(
- const ShapeIndex& shape_index = {}) const;
-
- // Returns a const pointer to the sparse index array. Returns nullptr if the
- // literal is not a sparse array.
- const SparseIndexArray* sparse_indices(
- const ShapeIndex& shape_index = {}) const;
-
- // Returns a const pointer to (or size of) the underlying buffer holding the
- // array at the given shape index. CHECKs if the subshape of the literal at
- // the given ShapeIndex is not array.
- const void* untyped_data(const ShapeIndex& shape_index = {}) const;
- int64 size_bytes(const ShapeIndex& shape_index = {}) const;
-
- // Returns this literal's data as a string. This literal must be a rank-1 U8
- // array.
- string GetR1U8AsString() const;
-
- // Returns a string representation of the literal value.
- // Warning: this function can take minutes for multi-million element Literals.
- string ToString(bool print_layout = false) const;
-
- // Gets an element in the literal at the given index. The multi_index is
- // CHECKed against the dimension sizes.
- template <typename NativeT>
- NativeT Get(tensorflow::gtl::ArraySlice<int64> multi_index,
- const ShapeIndex& shape_index) const;
- // Overloads of Get for array literals. CHECKs if the literal is not
- // array-shaped and dense.
- template <typename NativeT>
- NativeT Get(tensorflow::gtl::ArraySlice<int64> multi_index) const;
-
- // Returns the element value at index (0, ..., 0), however many zeroes are
- // required for that index.
- template <typename NativeT>
- NativeT GetFirstElement() const;
-
- // As Get(), but determines the correct type and converts the value
- // into text.
- string GetAsString(tensorflow::gtl::ArraySlice<int64> multi_index,
- const ShapeIndex& shape_index = {}) const;
- // As GetSparseElement(), but determines the correct type and converts the
- // value into text.
- string GetSparseElementAsString(int64 sparse_element_number,
- const ShapeIndex& shape_index = {}) const;
- // As Get(), but determines the correct type and converts the value into
- // int64. This literal must be an array.
- StatusOr<int64> GetIntegralAsS64(
- tensorflow::gtl::ArraySlice<int64> multi_index) const;
-
- // Returns the multi-index of the element in a sparse literal at the given
- // sparse element number. The sparse element number is the position with in
- // the sparse array's list of (index, value) pairs, and is checked against the
- // total number of (index, value) pairs in the sparse array.
- tensorflow::gtl::ArraySlice<int64> GetSparseIndex(
- int64 sparse_element_number, const ShapeIndex& shape_index = {}) const;
-
- // Returns the value of the element in a sparse literal at the given sparse
- // element number. The sparse element number is the position with in the
- // sparse array's list of (index, value) pairs, and is checked against the
- // total number of (index, value) pairs in the sparse array.
- template <typename NativeT>
- NativeT GetSparseElement(int64 sparse_element_number,
- const ShapeIndex& shape_index = {}) const;
-
- // Invokes the "per cell" callback for each element in the provided
- // literal with the element's indices and a string representation of
- // the element's value.
- //
- // This function is useful if you want a polymorphic representation
- // of the tensor's elements (turning it to a string for something
- // like representation in a protobuf).
- //
- // This literal must have a dense layout.
- void EachCellAsString(
- const std::function<void(tensorflow::gtl::ArraySlice<int64> indices,
- const string& value)>& per_cell) const;
- template <typename NativeT>
- void EachCell(std::function<void(tensorflow::gtl::ArraySlice<int64> indices,
- NativeT value)>
- per_cell) const;
-
- // Returns whether every element in this literal is equal to value.
- //
- // value is an int8 because we expect this to be called with small
- // compile-time constants (0, -1, etc.) and so that whatever value you pass
- // can be represented exactly by floating-point types as small as 16 bits.
- //
- // If value doesn't fit in this literal's type, returns false. Values of 1/0
- // are considered equal to true/false; other values are not considered equal
- // to true. Also if this literal is not array-shaped false is returned.
- bool IsAll(int8 value) const;
-
- // Like IsAll(const Literal&, int8), except we check whether the literal is
- // equal to a particular floating-point number.
- //
- // If the literal is not a floating-point value, this always returns false.
- //
- // This casts value to the type of literal, then compares using ==. The usual
- // admonishments about floating-point equality checks apply. We expect you to
- // use this to check for values that can be expressed precisely as a float,
- // e.g. -0.5. Also if this literal is not array-shaped false is returned.
- bool IsAllFloat(float value) const;
-
- // Like IsAll(const Literal&, int8), except we check whether the literal is
- // equal to a particular complex number.
- //
- // If the literal is not a complex value, this always returns false.
- //
- // This casts value to the type of literal, then compares using ==. The usual
- // admonishments about floating-point equality checks apply. We expect you to
- // use this to check for complex values that can be expressed precisely as
- // float pairs e.g. (-0.5, 1.0).
- //
- // This literal must have a dense layout.
- bool IsAllComplex(complex64 value) const;
-
- // Literal consists entirely of the first element of the literal.
- bool IsAllFirst() const;
-
- // Returns whether this literal is zero at the specified index. This literal
- // must be an array with a dense layout.
- bool IsZero(tensorflow::gtl::ArraySlice<int64> indices) const;
-
- // Returns the count of the elements in the array at the given shape index in
- // this literal.
- int64 element_count(const ShapeIndex& index = {}) const {
- return ShapeUtil::ElementsIn(ShapeUtil::GetSubshape(shape(), index));
- }
-
- // Returns the count of the elements in the sparse array at the given shape
- // index in this literal, which will be no larger than
- // LayoutUtil::MaxSparseElements(SetSubshape(shape(), index).layout()).
- int64 sparse_element_count() const;
-
- // Compute a hash for this literal. This literal must not be a sparse tensor
- // or a tuple containing a sparse tensor.
- size_t Hash() const;
-
- // Converts this literal to the given shape. Returns an error is the
- // conversion is not possible.
- //
- // round_f32_to_bf16: if true, converting F32 elements to BF16 uses rounding
- // instead of truncation; otherwise, truncation is used.
- //
- // TODO(b/69266521): remove the round_to_bfloat16 flag when rounding becomes
- // the default behavior.
- StatusOr<std::unique_ptr<Literal>> ConvertToShape(
- const Shape& dest_shape, bool round_f32_to_bf16 = false) const;
-
- // Converts this literal to another primitive type using a bitcast
- // conversion. The to and from primitive types must have the same bit
- // width. Returns an error if the conversion is not possible. This literal
- // must be array-shaped.
- StatusOr<std::unique_ptr<Literal>> BitcastConvert(
- PrimitiveType primitive_dest_type) const;
-
- // Converts this literal to another primitive type. Returns an error if the
- // conversion is not possible. This literal must be array-shaped.
- StatusOr<std::unique_ptr<Literal>> Convert(
- PrimitiveType primitive_dest_type) const;
+ LiteralUtil() = delete;
// Returns a literal scalar representing the first element.
- Literal GetFirstScalarLiteral() const;
-
- // Clones the underlying buffers into a new Literal, or new
- // std::unique_ptr<Literal>.
- Literal Clone() const;
- std::unique_ptr<Literal> CloneToUnique() const;
-
- // TODO(b/67651157): The methods below which perform computation on Literals
- // (Reshape, Slice, etc) should be moved elsewhere, and perhaps combined with
- // evaluator code which operates on Literals.
- //
- // Creates a new value that has the equivalent value as this
- // literal, but conforms to new_layout; e.g. a literal matrix that was in {0,
- // 1} minor-to-major dimension layout can be re-layed-out as {1, 0}
- // minor-to-major dimension layout and the value in the cell at any given
- // logical index (i0, i1) will be the same.
- //
- // For tuple shaped literals, shape_index should be used to select the inner
- // array that the new layout applies to.
- //
- // Note: this is useful when the client wants to ensure that a value placed in
- // the XLA allocation tracker has a particular layout; for efficiency
- // purposes or avoiding unimplemented operation/layout combinations.
- std::unique_ptr<Literal> Relayout(const Layout& new_layout,
- const ShapeIndex& shape_index = {}) const;
-
- // An overload of Relayout which changes the layout of the entire shape rather
- // than being limited to a single array within the shape.
- std::unique_ptr<Literal> Relayout(const Shape& shape_with_layout) const;
-
- // Creates a new literal by reshaping this literal to have the given
- // dimensions. The total number of elements must not change; The
- // implementation currently only supports monotonic dim0-major layouts.
- // This literal must be an array.
- StatusOr<std::unique_ptr<Literal>> Reshape(
- tensorflow::gtl::ArraySlice<int64> dimensions) const;
-
- // Creates a new literal by broadcasting this literal with `dimensions` to
- // yield a literal of shape `result_shape`.
- StatusOr<std::unique_ptr<Literal>> Broadcast(
- const Shape& result_shape,
- tensorflow::gtl::ArraySlice<int64> dimensions) const;
-
- // Creates a new literal by reordering the dimensions of this literal.
- // The given `permutation` must be a permutation of the dimension numbers
- // in the original literal, and it specifies the order of the new dimensions
- // in the result literal (i.e., new_order[i] = old_order[permutation[i]]).
- // For example, a transpose call on a literal of shape [3 x 8 x 4] and
- // `permutation` = {2, 0, 1} returns a new literal of shape [4 x 3 x 8].
- // This literal must be an array.
- std::unique_ptr<Literal> Transpose(
- tensorflow::gtl::ArraySlice<int64> permutation) const;
-
- // Creates a sub-array from this literal by extracting the indices
- // [start_index, limit_index) of each dimension. The result literal has the
- // same rank and layout as for the given literal. The number of indices in
- // start_indices and limit_indices must be the rank of the literal, and the
- // indices follow the order of the dimensions.
- // This literal must be an array.
- std::unique_ptr<Literal> Slice(
- tensorflow::gtl::ArraySlice<int64> start_indices,
- tensorflow::gtl::ArraySlice<int64> limit_indices) const;
-
- // Creates a literal with a prepended dimension with bound "times"; e.g. a
- // f32[3x2] with times=4 will produce a f32[4x3x2] with the 3x2 from this
- // literal replicated four times.
- // This literal must be an array.
- template <typename NativeT>
- std::unique_ptr<Literal> Replicate(int64 times) const;
-
- // Creates a new Literal object with the shape specified as parameter.
- // The content of the literal values is the default value of the primitive
- // type of literal itself (0 for numeric types, and false for predicates).
- //
- // Note: It's an antipattern to use this method then immediately call
- // Literal::Populate on the result (since that results in zero initialization,
- // then reinitialization. Conside if a call to MakeUnique<Literal>(shape),
- // followed by the call to Literal::Populate can be used instead.
- static std::unique_ptr<Literal> CreateFromShape(const Shape& shape);
-
- protected:
- // A data structure representing a subshape at a particular ShapeIndex within
- // the literal. For array-shaped ShapeIndexes, this data structure holds the
- // pointer to the memory allocated for the array data.
- class Piece {
- public:
- // Returns the buffer holding the array data for this piece as an array
- // slice. This piece must be array-shaped.
- template <typename NativeT>
- tensorflow::gtl::ArraySlice<NativeT> data() const;
- template <typename NativeT>
- tensorflow::gtl::MutableArraySlice<NativeT> data();
-
- // Returns the buffer holding the array data for this piece as a void*. This
- // piece must be array-shaped.
- void* untyped_data();
- const void* untyped_data() const;
-
- // Gets or sets an element in the array at the given index. The multi_index
- // is CHECKed against the dimension sizes of the array. This piece must be
- // array-shaped.
- template <typename NativeT>
- NativeT Get(tensorflow::gtl::ArraySlice<int64> index) const;
- template <typename NativeT>
- void Set(tensorflow::gtl::ArraySlice<int64> index, NativeT value);
-
- // Gets/sets the buffer holding the array data.
- char* buffer() const { return buffer_; }
- void set_buffer(char* buffer) { buffer_ = buffer; }
-
- // The array of multi-indices that provide the locations of non-zero
- // elements in a sparse array. Only used if
- // LayoutUtil::IsSparseArray(shape()) is true.
- SparseIndexArray* sparse_indices() const { return sparse_indices_; }
- void set_sparse_indices(SparseIndexArray* sparse_indices) {
- sparse_indices_ = sparse_indices;
- }
-
- // Gets or sets the subshape of this piece. This reference points to a
- // subshape within the shape in the containing Literal (Literal::shape_).
- const Shape& subshape() const { return *subshape_; }
- void set_subshape(const Shape* subshape) { subshape_ = subshape; }
-
- // Returns the size in bytes of the buffer holding the array data.
- int64 size_bytes() const { return ShapeUtil::ByteSizeOf(subshape()); }
-
- // Returns the number of elements in this piece's array.
- int64 element_count() const {
- // If this is a sparse array, use the number of elements represented by
- // the indices in the associated SparseIndexArray.
- return LayoutUtil::IsSparseArray(subshape())
- ? sparse_indices()->index_count()
- : ShapeUtil::ElementsIn(subshape());
- }
-
- // Returns the child piece at 'index' of this piece.
- Piece& child(int64 index) { return children_[index]; }
-
- // Adds a child piece to this piece's children.
- void emplace_back(Piece child_piece) {
- children_.emplace_back(std::move(child_piece));
- }
-
- // Returns the size of children pieces of this piece.
- int64 children_size() { return children_.size(); }
-
- // Visitor functions that recursively traverses the piece and calls the
- // given function at each child piece. The function has the type:
- // void (const ShapeIndex& index, const Piece& piece)
- template <typename Fn>
- void ForEachSubpiece(const Fn& func) const {
- ShapeIndex index;
- return ForEachHelper(
- [&func](const ShapeIndex& index, const Piece& piece) {
- func(index, piece);
- return Status::OK();
- },
- *this, &index)
- .IgnoreError();
- }
- // Same as above, but the function has the type:
- // Status (const ShapeIndex& index, const Piece& piece)
- // The first non-OK return value is returned by the function.
- template <typename Fn>
- Status ForEachSubpieceWithStatus(const Fn& func) const {
- ShapeIndex index;
- return ForEachHelper(func, *this, &index);
- }
- // Same as above, but the function has the type:
- // Bool (const ShapeIndex& index, const Piece& piece)
- // The first non-true return value is returned by the function.
- template <typename Fn>
- bool ForEachSubpieceWithBool(const Fn& func) const {
- ShapeIndex index;
- return ForEachHelperBool(func, *this, &index);
- }
- // Same as above, but the function has the type:
- // Void (const ShapeIndex& index, Piece& piece)
- template <typename Fn>
- void ForEachMutableSubpiece(const Fn& func) {
- ShapeIndex index;
- return ForEachMutableHelper(
- [&func](const ShapeIndex& index, Piece* piece) {
- func(index, piece);
- return Status::OK();
- },
- const_cast<xla::LiteralBase::Piece*>(this), &index)
- .IgnoreError();
- }
- // Same as above, but the function has the type:
- // Status (const ShapeIndex& index, Piece& piece)
- // The first non-OK return value is returned by the function.
- template <typename Fn>
- Status ForEachMutableSubpieceWithStatus(const Fn& func) {
- ShapeIndex index;
- return ForEachMutableHelper(
- func, const_cast<xla::LiteralBase::Piece*>(this), &index);
- }
-
- // Returns true if this piece and 'other' contain the same data. This piece
- // and 'other' must be array-shaped and compatible.
- bool EqualElements(const Piece& other) const;
-
- // Writes the shape and data (if array-shaped) into the given proto.
- void WriteToProto(LiteralProto* proto) const;
-
- // Copy the data from 'src' into this piece's buffer. Shapes of this piece
- // and src must be compatible.
- Status CopyFrom(const Piece& src);
-
- // Copies the data from the given proto into this piece. The shape of this
- // piece must be equal (not just compatible) to the shape of the proto.
- Status CopyFromProto(const LiteralProto& proto);
-
- // Sorts the elements in a sparse array.
- void SortSparseElements();
-
- private:
- // Helpers for traversing the piece via ForEachSubpiece rooted at 'index'.
- // The first non-OK (or non-true) value is returned by the function.
- // The callable 'func' has the same signature as described above in
- // ForEachSubpiece*.
- template <typename Fn>
- Status ForEachHelper(const Fn& func, const Piece& piece,
- ShapeIndex* index) const {
- TF_RETURN_IF_ERROR(func(*index, piece));
- for (int64 i = 0; i < piece.children_.size(); ++i) {
- index->push_back(i);
- TF_RETURN_IF_ERROR(ForEachHelper(func, piece.children_[i], index));
- index->pop_back();
- }
- return Status::OK();
- }
- template <typename Fn>
- bool ForEachHelperBool(const Fn& func, const Piece& piece,
- ShapeIndex* index) const {
- if (!func(*index, piece)) {
- return false;
- }
- for (int64 i = 0; i < piece.children_.size(); ++i) {
- index->push_back(i);
- if (!ForEachHelperBool(func, piece.children_[i], index)) {
- return false;
- }
- index->pop_back();
- }
- return true;
- }
- template <typename Fn>
- Status ForEachMutableHelper(const Fn& func, Piece* piece,
- ShapeIndex* index) {
- TF_RETURN_IF_ERROR(func(*index, piece));
- for (int64 i = 0; i < piece->children_.size(); ++i) {
- index->push_back(i);
- TF_RETURN_IF_ERROR(
- ForEachMutableHelper(func, &piece->children_[i], index));
- index->pop_back();
- }
- return Status::OK();
- }
-
- // Recursive helper for EqualElements.
- template <typename NativeT>
- bool EqualElementsInternal(const Piece& other,
- std::vector<int64>* multi_index) const;
-
- // Helper for SortSparseElements that has the element type as a template
- // parameter.
- template <typename NativeT>
- void SortSparseElementsInternal();
-
- // For array-shaped pieces, this is the buffer holding the literal data.
- char* buffer_ = nullptr;
-
- // For sparse arrays, this is the array of indices.
- SparseIndexArray* sparse_indices_ = nullptr;
-
- // The shape of piece. This points into the shape of the containing Literal
- // (Literal::shape_).
- const Shape* subshape_ = nullptr;
-
- // Children pieces for tuple shaped pieces.
- std::vector<Piece> children_ = {};
- }; // class Piece
-
- const Piece& piece(const ShapeIndex& shape_index) const {
- Piece* piece = &const_cast<Piece&>(root_piece());
- for (const auto i : shape_index) {
- DCHECK_GE(i, 0);
- DCHECK_LT(i, piece->children_size());
- piece = &piece->child(i);
- }
- return *piece;
- }
-
- // Returns the piece at the root of the shape.
- virtual const Piece& root_piece() const = 0;
-
- // LiteralSlice and Literal must access Pieces of other Literals.
- friend class Literal;
- friend class LiteralSlice;
- friend class BorrowingLiteral;
-
- private:
- template <typename NativeT>
- std::unique_ptr<Literal> SliceInternal(
- const Shape& result_shape,
- tensorflow::gtl::ArraySlice<int64> start_indices) const;
-};
-
-// Class representing literal values in XLA.
-//
-// The underlying buffer and shape is always owned by this class.
-class Literal : public LiteralBase {
- public:
- Literal() : Literal(ShapeUtil::MakeNil()) {}
-
- // Create a literal of the given shape. The literal is allocated sufficient
- // memory to hold the shape. Memory is uninitialized.
- explicit Literal(const Shape& shape);
- virtual ~Literal();
-
- // Literals are moveable, but not copyable. To copy a literal use
- // Literal::Clone or Literal::CloneToUnique. This prevents inadvertent copies
- // of literals which can be expensive.
- Literal(const Literal& other) = delete;
- Literal& operator=(const Literal& other) = delete;
- Literal(Literal&& other);
- // 'allocate_arrays' indicates whether to allocate memory for the arrays in
- // the shape. If false, buffer pointers inside of the Literal::Pieces are set
- // to nullptr.
- Literal(const Shape& shape, bool allocate_arrays);
- Literal& operator=(Literal&& other);
-
- // TODO(b/67651157): Remove this accessor. Literal users should not be able to
- // mutate the shape as this can produce malformed Literals.
- Shape* mutable_shape_do_not_use() { return shape_.get(); }
-
- // Returns a MutableArraySlice view of the array for this literal for the
- // given NativeT (e.g., float). CHECKs if the subshape of the literal at the
- // given ShapeIndex is not array. See primitive_util.h for the mapping from
- // XLA type to native type.
- template <typename NativeT>
- tensorflow::gtl::MutableArraySlice<NativeT> data(
- const ShapeIndex& shape_index = {});
- // Unhide const method from parent class.
- using LiteralBase::data;
-
- // Returns a pointer to the sparse index array. Returns nullptr if the literal
- // is not a sparse array.
- SparseIndexArray* sparse_indices(const ShapeIndex& shape_index = {});
-
- // Returns a pointer to the underlying buffer holding the array at the given
- // shape index. CHECKs if the subshape of the literal at the given ShapeIndex
- // is not array.
- void* untyped_data(const ShapeIndex& shape_index = {});
- // Unhide const method from parent class.
- using LiteralBase::untyped_data;
-
- // Populates a literal with a sparse layout with the given indices and values.
- // Each index in the indices array is CHECKed against the dimensions in the
- // literal's shape. If sort is true, then the indices and values will be
- // sorted. If sort is false, then the indices and values are assumed to
- // already be in sorted order. See CreateSparse for an example of how data
- // are populated.
- template <typename NativeT>
- void PopulateSparse(SparseIndexArray indices,
- tensorflow::gtl::ArraySlice<NativeT> values,
- bool sort = true);
-
- // Copy values from 'src_literal' rooted at 'src_shape_index' into this
- // literal rooted at 'dest_shape_index'. The subshape of this literal rooted
- // at 'dest_shape_index' must be compatible with the subshape of 'src_literal'
- // rooted at 'src_shape_index', but need not be arrays.
- Status CopyFrom(const LiteralSlice& src_literal,
- const ShapeIndex& dest_shape_index = {},
- const ShapeIndex& src_shape_index = {});
-
- // Similar to CopyFrom, but with move semantincs. The subshape of this literal
- // rooted at 'dest_shape_index' must be *equal* to the shape 'src_literal'
- // (layouts and shapes must match), but need not be arrays. The memory
- // allocated in this literal for the subshape at dest_shape_index is
- // deallocated, and the respective buffers are replaced with those in
- // src_literal. Upon return, src_literal is set to a nil shape (empty tuple).
- Status MoveFrom(Literal&& src_literal,
- const ShapeIndex& dest_shape_index = {});
-
- // Copies the values from src_literal, starting at src_base shape indexes,
- // to this literal, starting at dest_base, where the copy size in each
- // dimension is specified by copy_size.
- // The src_literal and this literal must have the same primitive type,
- // src_base+copy_size must fit the source literal dimensions, as well as
- // dest_base+copy_size must fit the destination literal dimensions.
- // Note: if either src_literal or this literal contains dimensions with zero
- // element, then copy_size must be 0 in these dimensions while the
- // corresponding base indices being 0.
- // This literal and 'src_literal' must be arrays.
- Status CopySliceFrom(const LiteralSlice& src_literal,
- tensorflow::gtl::ArraySlice<int64> src_base,
- tensorflow::gtl::ArraySlice<int64> dest_base,
- tensorflow::gtl::ArraySlice<int64> copy_size);
-
- // Copies one element from src_literal[src_index] to (*this)[dest_index].
- Status CopyElementFrom(const LiteralSlice& src_literal,
- tensorflow::gtl::ArraySlice<int64> src_index,
- tensorflow::gtl::ArraySlice<int64> dest_index);
-
- // Sets an element in the literal at the given index. The multi_index is
- // CHECKed against the dimension sizes.
- template <typename NativeT>
- void Set(tensorflow::gtl::ArraySlice<int64> multi_index,
- const ShapeIndex& shape_index, NativeT value);
- // Overloads of Set for array literals. CHECKs if the literal is not
- // array-shaped and dense.
- template <typename NativeT>
- void Set(tensorflow::gtl::ArraySlice<int64> multi_index, NativeT value);
-
- // Appends the given element to the literal. If the elements are not appended
- // in sorted order, then SortSparseElements should be called before calling
- // other methods. This literal must have a sparse layout.
- template <typename NativeT>
- void AppendSparseElement(tensorflow::gtl::ArraySlice<int64> multi_index,
- NativeT value, const ShapeIndex& shape_index = {});
-
- // Sorts the elements in a sparse array.
- void SortSparseElements(const ShapeIndex& shape_index = {});
-
- // As Set(), but truncates `value` to the literal element type before storing.
- // This literal must be an array.
- Status SetIntegralAsS64(tensorflow::gtl::ArraySlice<int64> multi_index,
- int64 value);
-
- // Populate this literal with the given values. Examples:
- //
- // // Populate with floats.
- // Array2D<float> float_values = ...
- // literal.PopulateR2FromArray2D(values);
- //
- // // Populate with int32s.
- // literal.PopulateR2<int32>({{1, 2}, {3, 4}});
- //
- // The shape and element type of this literal must match given values. For
- // example, in the call above to literal.PopulateR2(), 'literal' must be a 2x2
- // array of S32.
- template <typename NativeT>
- void PopulateR1(tensorflow::gtl::ArraySlice<NativeT> values);
- void PopulateR1(const tensorflow::core::Bitmap& values);
- template <typename NativeT>
- void PopulateR2(std::initializer_list<std::initializer_list<NativeT>> values);
- template <typename NativeT>
- void PopulateFromArray(const Array<NativeT>& values);
- template <typename NativeT>
- void PopulateR2FromArray2D(const Array2D<NativeT>& values);
- template <typename NativeT>
- void PopulateR3FromArray3D(const Array3D<NativeT>& values);
- template <typename NativeT>
- void PopulateR4FromArray4D(const Array4D<NativeT>& values);
-
- // Populates literal values by calling the generator function for every cell
- // in this literal object.
- //
- // generator must be a callable of the type
- // NativeT(tensorflow::gtl::ArraySlice<int64> indexes) or compatible.
- //
- // This literal must have a dense layout.
- template <typename NativeT, typename FnType>
- Status Populate(const FnType& generator);
-
- // A parallel version of Populate(). This can be used if the generator is
- // thread-safe and the values for the shape's different elements are
- // independent.
- template <typename NativeT, typename FnType>
- Status PopulateParallel(const FnType& generator);
-
- // Fills this literal with the given value.
- template <typename NativeT>
- void PopulateWithValue(NativeT value);
-
- // Factory methods below.
- //
-
- // Serialize from a proto.
- static StatusOr<std::unique_ptr<Literal>> CreateFromProto(
- const LiteralProto& proto);
+ static Literal GetFirstScalarLiteral(const LiteralSlice& literal);
// Creates a new literal of a given rank. To minimize ambiguity (for users
// and the compiler) these CreateR[0-2] methods should explicitly specify the
@@ -889,7 +223,7 @@ class Literal : public LiteralBase {
// As above, but intended to be invoked with move semantics; i.e.
//
// std::vector<std::unique_ptr<Literal>> elements = ...;
- // auto result = Literal::MakeTupleOwned(std::move(elements));
+ // auto result = LiteralUtil::MakeTupleOwned(std::move(elements));
//
// This would have been declared as an overload, but there is ambiguity
// in invocation between the above signature and this one.
@@ -899,7 +233,7 @@ class Literal : public LiteralBase {
// This overload lets you pass a braced list of unique_ptr<Literal>s to
// MakeTupleOwned:
//
- // Literal::MakeTupleOwned(Literal::CreateR1(...), ...).
+ // LiteralUtil::MakeTupleOwned(LiteralUtil::CreateR1(...), ...).
//
// Simply relying on the MakeTupleOwned(std::vector<unique_ptr<Literal>>)
// overload doesn't work because std::initializer_list's elements are always
@@ -920,19 +254,6 @@ class Literal : public LiteralBase {
// Create a constant token literal. Token types have no value.
static std::unique_ptr<Literal> CreateToken();
- // Returns a vector containing the tuple elements of this Literal as separate
- // Literals. This Literal must be tuple-shaped and can be a nested tuple. The
- // elements are moved into the new Literals; no data is copied. Upon return
- // this Literal is set to a nil shape (empty tuple)
- std::vector<Literal> DecomposeTuple();
-
- // This operation is the inverse of DecomposeTuple. The given elements are
- // moved into the tuple elements of a new tuple-shaped Literal which is
- // returned. Upon return, each of the Literals in 'elements' is set to a nil
- // shape (empty tuple).
- static Literal MoveIntoTuple(
- tensorflow::gtl::MutableArraySlice<Literal> elements);
-
// Creates a new Literal object with its values havings the primitive_type
// type, and with dimensions defined by the dimensions parameter.
// The content of the literal values is the default value of the primitive
@@ -1000,194 +321,12 @@ class Literal : public LiteralBase {
// dimension 1 equal to 8.
static string MultiIndexAsString(
tensorflow::gtl::ArraySlice<int64> multi_index);
-
- private:
- // Recursively sets the subshapes and buffers of all subpieces rooted at
- // 'piece'. If 'allocate_array' is true, memory is allocated for the arrays in
- // the shape.
- void SetPiece(const Shape& shape, Piece* piece, bool allocate_arrays);
-
- // Returns the piece at the given ShapeIndex.
- Piece& piece(const ShapeIndex& shape_index) {
- return const_cast<Piece&>(LiteralBase::piece(shape_index));
- }
-
- Piece& root_piece() const override { return *root_piece_; };
-
- // Internal template helper for the Literal::CopySliceFrom(), matching its
- // arguments one by one.
- template <typename NativeT>
- Status CopySliceFromInternal(const LiteralBase& src_literal,
- tensorflow::gtl::ArraySlice<int64> src_base,
- tensorflow::gtl::ArraySlice<int64> dest_base,
- tensorflow::gtl::ArraySlice<int64> copy_size);
-
- // Utility structure which is used to create the optimal configuration for
- // a ShapeUtil::ForEachIndex() scan across two literals.
- struct StrideConfig {
- StrideConfig(const Shape& source_shape, const Shape& dest_shape,
- tensorflow::gtl::ArraySlice<int64> dimensions);
-
- // The dimensions of the stride operation. Essentially every dimension
- // will be iterated from base[i] to base[i]+dimensions[i], in step[i]
- // steps.
- tensorflow::gtl::ArraySlice<int64> dimensions;
- DimensionVector base;
- DimensionVector step;
- int64 minor_dimension = 0;
- // The size of the strides for source and destination. One of the two
- // (the one looping through its most minor dimension) will be 1, while
- // the other will be the stride size at the dimension matching the other
- // shape most minor dimension being scanned.
- int64 dest_stride = 1;
- int64 source_stride = 1;
- // The size of the inner loop on the most minor dimension.
- int64 minor_loop_size = 1;
- };
-
- // Literal class always owns the shape. The parent class borrows this shape.
- std::unique_ptr<Shape> shape_;
-
- Piece* root_piece_ = nullptr;
-
- // Implementation details shared between Populate() and PopulateParallel()
- template <typename NativeT, typename FnType>
- Status PopulateInternal(const FnType& generator, bool parallel);
-
- // Deallocate the buffers held by this literal.
- void DeallocateBuffers();
-
- friend class LiteralBase;
-};
-std::ostream& operator<<(std::ostream& out, const Literal& literal);
-
-// A read-only view of a Literal. A LiteralSlice contains pointers to shape and
-// literal buffers always owned by others.
-class LiteralSlice : public LiteralBase {
- public:
- LiteralSlice() : LiteralBase() {}
-
- // Implicit conversion constructors.
- LiteralSlice(const LiteralBase& literal);
- LiteralSlice(const LiteralBase& literal, const ShapeIndex& view_root);
-
- private:
- const Piece& root_piece() const override { return *root_piece_; };
-
- const Piece* root_piece_; // Not owned.
-};
-
-// A read-only Literal where the underlying buffers are never owned by this
-// class.
-class BorrowingLiteral : public LiteralBase {
- public:
- BorrowingLiteral() : LiteralBase() {}
-
- // 'src_buf_ptr' is not owned by this class and must outlive the
- // lifetime of this class. It points to an appropirately sized buffer with
- // data interpretered as indicated by 'shape'.
- // This constructor is only used for array shapes.
- BorrowingLiteral(const char* src_buf_ptr, const Shape& shape);
- // Similar as above, except to be used for constructing non-nested tuples.
- BorrowingLiteral(tensorflow::gtl::ArraySlice<const char*> src_buf_ptrs,
- const Shape& shape);
- // TODO(b/79707221): adding constructors for nested tuples as well.
-
- private:
- // Recursively builds the subtree for the given piece and sets the subshapes
- // of the given piece with the given shape.
- void BuildPieceSubtree(const Shape& shape, Piece* piece);
-
- // Accessor for the root piece of this literal.
- const Piece& root_piece() const override { return root_piece_; };
- Piece root_piece_;
-
- // Shape of this literal. Stored as unique_ptr so such that the (default)
- // move construction of this class would be trivially correct: the pointer to
- // Shape root_piece_ stores will still point to the correct address.
- std::unique_ptr<Shape> shape_;
};
-template <typename NativeT>
-tensorflow::gtl::ArraySlice<NativeT> LiteralBase::Piece::data() const {
- CHECK(ShapeUtil::IsArray(subshape())) << ShapeUtil::HumanString(subshape());
- CHECK_EQ(subshape().element_type(),
- primitive_util::NativeToPrimitiveType<NativeT>())
- << "Attempting to access "
- << PrimitiveType_Name(primitive_util::NativeToPrimitiveType<NativeT>())
- << " type, but literal element type is "
- << PrimitiveType_Name(subshape().element_type());
- return tensorflow::gtl::ArraySlice<NativeT>(
- reinterpret_cast<const NativeT*>(buffer()), element_count());
-}
-
-template <typename NativeT>
-tensorflow::gtl::MutableArraySlice<NativeT> LiteralBase::Piece::data() {
- CHECK(ShapeUtil::IsArray(subshape())) << ShapeUtil::HumanString(subshape());
- CHECK_EQ(subshape().element_type(),
- primitive_util::NativeToPrimitiveType<NativeT>())
- << "Attempting to access "
- << PrimitiveType_Name(primitive_util::NativeToPrimitiveType<NativeT>())
- << " type, but literal element type is "
- << PrimitiveType_Name(subshape().element_type());
- return tensorflow::gtl::MutableArraySlice<NativeT>(
- reinterpret_cast<NativeT*>(buffer()), element_count());
-}
-
-template <typename NativeT>
-NativeT LiteralBase::Piece::Get(
- tensorflow::gtl::ArraySlice<int64> multi_index) const {
- CHECK(LayoutUtil::IsDenseArray(subshape()));
- return data<NativeT>()[IndexUtil::MultidimensionalIndexToLinearIndex(
- subshape(), multi_index)];
-}
-
-template <typename NativeT>
-void LiteralBase::Piece::Set(tensorflow::gtl::ArraySlice<int64> multi_index,
- NativeT value) {
- CHECK(LayoutUtil::IsDenseArray(subshape()));
- data<NativeT>()[IndexUtil::MultidimensionalIndexToLinearIndex(
- subshape(), multi_index)] = value;
-}
-
-template <typename NativeT>
-tensorflow::gtl::ArraySlice<NativeT> LiteralBase::data(
- const ShapeIndex& shape_index) const {
- return piece(shape_index).data<NativeT>();
-}
-
-template <typename NativeT>
-tensorflow::gtl::MutableArraySlice<NativeT> Literal::data(
- const ShapeIndex& shape_index) {
- return piece(shape_index).data<NativeT>();
-}
-
-template <typename NativeT>
-inline NativeT LiteralBase::Get(tensorflow::gtl::ArraySlice<int64> multi_index,
- const ShapeIndex& shape_index) const {
- return piece(shape_index).Get<NativeT>(multi_index);
-}
-
-template <typename NativeT>
-inline NativeT LiteralBase::Get(
- tensorflow::gtl::ArraySlice<int64> multi_index) const {
- return root_piece().Get<NativeT>(multi_index);
-}
-
-template <typename NativeT>
-inline void Literal::Set(tensorflow::gtl::ArraySlice<int64> multi_index,
- const ShapeIndex& shape_index, NativeT value) {
- return piece(shape_index).Set<NativeT>(multi_index, value);
-}
-
-template <typename NativeT>
-inline void Literal::Set(tensorflow::gtl::ArraySlice<int64> multi_index,
- NativeT value) {
- return root_piece().Set<NativeT>(multi_index, value);
-}
+std::ostream& operator<<(std::ostream& out, const Literal& literal);
template <typename NativeT>
-/* static */ std::unique_ptr<Literal> Literal::CreateR0(NativeT value) {
+/* static */ std::unique_ptr<Literal> LiteralUtil::CreateR0(NativeT value) {
auto literal = MakeUnique<Literal>(ShapeUtil::MakeShape(
primitive_util::NativeToPrimitiveType<NativeT>(), {}));
literal->Set({}, value);
@@ -1195,7 +334,7 @@ template <typename NativeT>
}
template <typename NativeT>
-/* static */ std::unique_ptr<Literal> Literal::CreateR1(
+/* static */ std::unique_ptr<Literal> LiteralUtil::CreateR1(
tensorflow::gtl::ArraySlice<NativeT> values) {
auto literal = MakeUnique<Literal>(
ShapeUtil::MakeShape(primitive_util::NativeToPrimitiveType<NativeT>(),
@@ -1205,7 +344,7 @@ template <typename NativeT>
}
template <typename NativeT>
-/* static */ std::unique_ptr<Literal> Literal::CreateR2WithLayout(
+/* static */ std::unique_ptr<Literal> LiteralUtil::CreateR2WithLayout(
std::initializer_list<std::initializer_list<NativeT>> values,
const Layout& layout) {
auto literal = MakeUnique<Literal>(ShapeUtil::MakeShapeWithLayout(
@@ -1218,13 +357,13 @@ template <typename NativeT>
}
template <typename NativeT>
-/* static */ std::unique_ptr<Literal> Literal::CreateR2(
+/* static */ std::unique_ptr<Literal> LiteralUtil::CreateR2(
std::initializer_list<std::initializer_list<NativeT>> values) {
return CreateR2WithLayout(values, LayoutUtil::GetDefaultLayoutForR2());
}
template <typename NativeT>
-/* static */ std::unique_ptr<Literal> Literal::CreateR3WithLayout(
+/* static */ std::unique_ptr<Literal> LiteralUtil::CreateR3WithLayout(
std::initializer_list<std::initializer_list<std::initializer_list<NativeT>>>
values,
const Layout& layout) {
@@ -1249,14 +388,14 @@ template <typename NativeT>
}
template <typename NativeT>
-/* static */ std::unique_ptr<Literal> Literal::CreateR3(
+/* static */ std::unique_ptr<Literal> LiteralUtil::CreateR3(
std::initializer_list<std::initializer_list<std::initializer_list<NativeT>>>
values) {
return CreateR3WithLayout(values, LayoutUtil::GetDefaultLayoutForR3());
}
template <typename NativeT>
-/* static */ std::unique_ptr<Literal> Literal::CreateR4WithLayout(
+/* static */ std::unique_ptr<Literal> LiteralUtil::CreateR4WithLayout(
std::initializer_list<std::initializer_list<
std::initializer_list<std::initializer_list<NativeT>>>>
values,
@@ -1287,7 +426,7 @@ template <typename NativeT>
}
template <typename NativeT>
-/* static */ std::unique_ptr<Literal> Literal::CreateSparse(
+/* static */ std::unique_ptr<Literal> LiteralUtil::CreateSparse(
tensorflow::gtl::ArraySlice<int64> dimensions, SparseIndexArray indices,
tensorflow::gtl::ArraySlice<NativeT> values, bool sort) {
int64 num_elements = values.size();
@@ -1302,7 +441,7 @@ template <typename NativeT>
}
template <typename NativeT>
-/* static */ std::unique_ptr<Literal> Literal::CreateR4(
+/* static */ std::unique_ptr<Literal> LiteralUtil::CreateR4(
std::initializer_list<std::initializer_list<
std::initializer_list<std::initializer_list<NativeT>>>>
values) {
@@ -1310,7 +449,7 @@ template <typename NativeT>
}
template <typename NativeT>
-/* static */ std::unique_ptr<Literal> Literal::CreateFromArrayWithLayout(
+/* static */ std::unique_ptr<Literal> LiteralUtil::CreateFromArrayWithLayout(
const Array<NativeT>& values, const Layout& layout) {
auto literal = MakeUnique<Literal>(ShapeUtil::MakeShapeWithLayout(
primitive_util::NativeToPrimitiveType<NativeT>(), values.dimensions(),
@@ -1320,38 +459,40 @@ template <typename NativeT>
}
template <typename NativeT>
-/* static */ std::unique_ptr<Literal> Literal::CreateFromArray(
+/* static */ std::unique_ptr<Literal> LiteralUtil::CreateFromArray(
const Array<NativeT>& values) {
return CreateFromArrayWithLayout(
values, LayoutUtil::GetDefaultLayoutForRank(values.num_dimensions()));
}
template <typename NativeT>
-/* static */ std::unique_ptr<Literal> Literal::CreateR2FromArray2DWithLayout(
- const Array2D<NativeT>& values, const Layout& layout) {
+/* static */ std::unique_ptr<Literal>
+LiteralUtil::CreateR2FromArray2DWithLayout(const Array2D<NativeT>& values,
+ const Layout& layout) {
return CreateFromArrayWithLayout(values, layout);
}
template <typename NativeT>
-/* static */ std::unique_ptr<Literal> Literal::CreateR2FromArray2D(
+/* static */ std::unique_ptr<Literal> LiteralUtil::CreateR2FromArray2D(
const Array2D<NativeT>& values) {
return CreateFromArray(values);
}
template <typename NativeT>
-/* static */ std::unique_ptr<Literal> Literal::CreateR3FromArray3DWithLayout(
- const Array3D<NativeT>& values, const Layout& layout) {
+/* static */ std::unique_ptr<Literal>
+LiteralUtil::CreateR3FromArray3DWithLayout(const Array3D<NativeT>& values,
+ const Layout& layout) {
return CreateFromArrayWithLayout(values, layout);
}
template <typename NativeT>
-/* static */ std::unique_ptr<Literal> Literal::CreateR3FromArray3D(
+/* static */ std::unique_ptr<Literal> LiteralUtil::CreateR3FromArray3D(
const Array3D<NativeT>& values) {
return CreateFromArray(values);
}
template <typename NativeT>
-/* static */ std::unique_ptr<Literal> Literal::CreateR3Projected(
+/* static */ std::unique_ptr<Literal> LiteralUtil::CreateR3Projected(
std::initializer_list<std::initializer_list<NativeT>> values,
int64 projection) {
int64 dim0_size = projection;
@@ -1376,7 +517,7 @@ template <typename NativeT>
}
template <typename NativeT>
-/* static */ std::unique_ptr<Literal> Literal::CreateR4Projected(
+/* static */ std::unique_ptr<Literal> LiteralUtil::CreateR4Projected(
std::initializer_list<std::initializer_list<NativeT>> values,
int64 projection_p, int64 projection_z) {
int64 dim0_size = projection_p;
@@ -1404,49 +545,21 @@ template <typename NativeT>
}
template <typename NativeT>
-/* static */ std::unique_ptr<Literal> Literal::CreateR4FromArray4D(
+/* static */ std::unique_ptr<Literal> LiteralUtil::CreateR4FromArray4D(
const Array4D<NativeT>& values) {
return CreateFromArray(values);
}
template <typename NativeT>
-/* static */ std::unique_ptr<Literal> Literal::CreateR4FromArray4DWithLayout(
- const Array4D<NativeT>& values, const Layout& layout) {
+/* static */ std::unique_ptr<Literal>
+LiteralUtil::CreateR4FromArray4DWithLayout(const Array4D<NativeT>& values,
+ const Layout& layout) {
return CreateFromArrayWithLayout(values, layout);
}
-template <typename NativeT>
-NativeT LiteralBase::GetFirstElement() const {
- return data<NativeT>().at(0);
-}
-
-template <typename NativeT>
-NativeT LiteralBase::GetSparseElement(int64 sparse_element_number,
- const ShapeIndex& shape_index) const {
- CHECK(
- LayoutUtil::IsSparseArray(ShapeUtil::GetSubshape(shape(), shape_index)));
- return data<NativeT>(shape_index)[sparse_element_number];
-}
-
-template <typename NativeT>
-void Literal::AppendSparseElement(
- tensorflow::gtl::ArraySlice<int64> multi_index, NativeT value,
- const ShapeIndex& shape_index) {
- Piece& p = piece(shape_index);
- const Shape& subshape = p.subshape();
- CHECK(LayoutUtil::IsSparseArray(subshape));
- int64 rank = ShapeUtil::Rank(subshape);
- CHECK_EQ(multi_index.size(), rank);
- int64 last_element = p.sparse_indices()->index_count();
- CHECK_LT(last_element, LayoutUtil::MaxSparseElements(subshape.layout()));
- p.sparse_indices()->Append(multi_index);
- CHECK_LT(last_element, p.data<NativeT>().size());
- p.data<NativeT>()[last_element] = value;
-}
-
// Returns an identity matrix (rank 2) with the given row and column count.
template <typename NativeT>
-/* static */ std::unique_ptr<Literal> Literal::MakeIdentityR2(int64 size) {
+/* static */ std::unique_ptr<Literal> LiteralUtil::MakeIdentityR2(int64 size) {
Array2D<NativeT> array(size, size, 0);
for (int64 i = 0; i < size; ++i) {
array(i, i) = 1;
@@ -1455,174 +568,8 @@ template <typename NativeT>
}
template <typename NativeT>
-void LiteralBase::EachCell(
- std::function<void(tensorflow::gtl::ArraySlice<int64> indices,
- NativeT value)>
- per_cell) const {
- if (ShapeUtil::IsZeroElementArray(shape())) {
- return;
- }
- std::vector<int64> indices(ShapeUtil::Rank(shape()), 0);
- do {
- per_cell(indices, Get<NativeT>(indices));
- } while (IndexUtil::BumpIndices(shape(), &indices));
-}
-
-template <typename NativeT>
-inline void Literal::PopulateR1(tensorflow::gtl::ArraySlice<NativeT> values) {
- CHECK(ShapeUtil::IsArray(shape()));
- CHECK_EQ(ShapeUtil::Rank(shape()), 1);
- CHECK_EQ(ShapeUtil::ElementsIn(shape()), values.size());
- CHECK_EQ(shape().element_type(),
- primitive_util::NativeToPrimitiveType<NativeT>());
- for (int64 i = 0; i < values.size(); ++i) {
- Set({i}, values[i]);
- }
-}
-
-template <typename NativeT>
-void Literal::PopulateR2(
- std::initializer_list<std::initializer_list<NativeT>> values) {
- CHECK(ShapeUtil::IsArray(shape()));
- CHECK_EQ(ShapeUtil::Rank(shape()), 2);
- CHECK_EQ(shape().element_type(),
- primitive_util::NativeToPrimitiveType<NativeT>());
-
- const int64 dim0_size = values.size();
- const int64 dim1_size = values.begin()->size();
- CHECK_EQ(dim0_size, shape().dimensions(0));
- CHECK_EQ(dim1_size, shape().dimensions(1));
-
- int64 dim0 = 0;
- for (auto inner_list : values) {
- int64 dim1 = 0;
- for (auto value : inner_list) {
- Set({dim0, dim1}, value);
- ++dim1;
- }
- CHECK_EQ(dim1_size, dim1);
- ++dim0;
- }
-}
-
-template <typename NativeT>
-void Literal::PopulateFromArray(const Array<NativeT>& values) {
- CHECK(ShapeUtil::IsArray(shape()));
- CHECK_EQ(shape().element_type(),
- primitive_util::NativeToPrimitiveType<NativeT>());
- CHECK_EQ(ShapeUtil::Rank(shape()), values.num_dimensions());
- for (int dim = 0; dim < values.num_dimensions(); ++dim) {
- CHECK_EQ(values.dim(dim), shape().dimensions(dim));
- }
- values.Each([this](tensorflow::gtl::ArraySlice<int64> indices,
- NativeT value) { this->Set(indices, value); });
-}
-
-template <typename NativeT>
-void Literal::PopulateR2FromArray2D(const Array2D<NativeT>& values) {
- PopulateFromArray(values);
-}
-
-template <typename NativeT>
-void Literal::PopulateR3FromArray3D(const Array3D<NativeT>& values) {
- PopulateFromArray(values);
-}
-
-template <typename NativeT>
-void Literal::PopulateR4FromArray4D(const Array4D<NativeT>& values) {
- PopulateFromArray(values);
-}
-
-template <typename NativeT>
-void Literal::PopulateSparse(SparseIndexArray indices,
- tensorflow::gtl::ArraySlice<NativeT> values,
- bool sort) {
- CHECK(LayoutUtil::IsSparseArray(shape()));
- int rank = ShapeUtil::Rank(shape());
- CHECK_EQ(indices.rank(), rank);
- int64 max_elements = LayoutUtil::MaxSparseElements(shape().layout());
- CHECK_LE(indices.max_indices(), max_elements);
- int64 num_elements = values.size();
- CHECK_LE(num_elements, max_elements);
- CHECK_EQ(num_elements, indices.index_count());
- auto root_data = root_piece().data<NativeT>();
- // Piece::data() returns an ArraySlice of size equal to the number of indices
- // in the SparseIndexArray. So there is no need to adjust the size of the data
- // here. It is enough to just copy the incoming values into the data buffer.
- std::copy(values.begin(), values.end(), root_data.begin());
- *this->root_piece().sparse_indices() = std::move(indices);
- if (sort) {
- auto root_data = this->root_piece().data<NativeT>();
- this->root_piece().sparse_indices()->SortWithValues(root_data);
- }
- DCHECK(this->root_piece().sparse_indices()->Validate(shape()));
-}
-
-template <typename NativeT, typename FnType>
-Status Literal::PopulateInternal(const FnType& generator, bool parallel) {
- const Shape& this_shape = shape();
- const int64 rank = ShapeUtil::Rank(this_shape);
- TF_RET_CHECK(LayoutUtil::IsDenseArray(this_shape));
- TF_RET_CHECK(this_shape.element_type() ==
- primitive_util::NativeToPrimitiveType<NativeT>());
- tensorflow::gtl::MutableArraySlice<NativeT> literal_data = data<NativeT>();
- if (rank > 0) {
- StrideConfig stride_config(this_shape, this_shape,
- AsInt64Slice(this_shape.dimensions()));
- int64 minor_dimension_size =
- ShapeUtil::GetDimension(this_shape, stride_config.minor_dimension);
-
- auto init_function = [&](tensorflow::gtl::ArraySlice<int64> indexes) {
- DimensionVector minor_scan_indexes(rank, 0);
- const int64 index =
- IndexUtil::MultidimensionalIndexToLinearIndex(shape(), indexes);
- std::copy(indexes.begin(), indexes.end(), minor_scan_indexes.begin());
- for (int64 i = 0; i < minor_dimension_size; ++i) {
- minor_scan_indexes[stride_config.minor_dimension] = i;
- literal_data.at(index + i) = generator(minor_scan_indexes);
- }
- };
- if (parallel) {
- ShapeUtil::ForEachIndexParallel(this_shape, stride_config.base,
- stride_config.dimensions,
- stride_config.step, init_function);
- } else {
- ShapeUtil::ForEachIndex(
- this_shape, stride_config.base, stride_config.dimensions,
- stride_config.step,
- [&init_function](tensorflow::gtl::ArraySlice<int64> indexes) {
- init_function(indexes);
- return true;
- });
- }
- } else {
- // For scalars.
- literal_data.at(0) = generator({});
- }
- return Status::OK();
-}
-template <typename NativeT, typename FnType>
-Status Literal::Populate(const FnType& generator) {
- return PopulateInternal<NativeT>(generator, /*parallel=*/false);
-}
-
-template <typename NativeT, typename FnType>
-Status Literal::PopulateParallel(const FnType& generator) {
- return PopulateInternal<NativeT>(generator, /*parallel=*/true);
-}
-
-template <typename NativeT>
-void Literal::PopulateWithValue(NativeT value) {
- CHECK(ShapeUtil::IsArray(shape()));
- CHECK_EQ(shape().element_type(),
- primitive_util::NativeToPrimitiveType<NativeT>());
- for (NativeT& element : data<NativeT>()) {
- element = value;
- }
-}
-
-template <typename NativeT>
-/* static */ std::unique_ptr<Literal> Literal::CreateFullWithDescendingLayout(
+/* static */ std::unique_ptr<Literal>
+LiteralUtil::CreateFullWithDescendingLayout(
tensorflow::gtl::ArraySlice<int64> dimensions, NativeT value) {
auto literal = MakeUnique<Literal>(ShapeUtil::MakeShapeWithDescendingLayout(
primitive_util::NativeToPrimitiveType<NativeT>(), dimensions));
@@ -1630,44 +577,9 @@ template <typename NativeT>
return literal;
}
-template <typename NativeT>
-std::unique_ptr<Literal> LiteralBase::Replicate(int64 times) const {
- DimensionVector bounds = {times};
- bounds.reserve(shape().dimensions_size() + 1);
- for (int64 bound : shape().dimensions()) {
- bounds.push_back(bound);
- }
- auto literal =
- MakeUnique<Literal>(ShapeUtil::MakeShape(shape().element_type(), bounds));
- int64 elements = ShapeUtil::ElementsIn(literal->shape());
- if (elements == 0) {
- return literal;
- }
-
- DimensionVector output_indices(bounds.size(), 0);
- tensorflow::gtl::ArraySlice<int64> input_indices = output_indices;
- input_indices.remove_prefix(1);
-
- bool done = false;
- while (!done) {
- const auto element = Get<NativeT>(input_indices);
- literal->Set<NativeT>(output_indices, element);
-
- done = true;
- for (int n = 0; n < output_indices.size(); ++n) {
- ++output_indices[n];
- if (output_indices[n] < bounds[n]) {
- done = false;
- break;
- }
- output_indices[n] = 0;
- }
- }
- return literal;
-}
-
template <PrimitiveType type, typename T>
-/* static */ StatusOr<std::unique_ptr<Literal>> Literal::CreateRandomLiteral(
+/* static */ StatusOr<std::unique_ptr<Literal>>
+LiteralUtil::CreateRandomLiteral(
const Shape& shape,
const std::function<T(tensorflow::gtl::ArraySlice<int64>)>& generator) {
using NativeT = typename primitive_util::PrimitiveTypeToNative<type>::type;
@@ -1681,8 +593,9 @@ template <PrimitiveType type, typename T>
}
template <PrimitiveType type, typename E, typename T>
-/* static */ StatusOr<std::unique_ptr<Literal>> Literal::CreateRandomLiteral(
- const Shape& shape, E* engine, T mean, T stddev) {
+/* static */ StatusOr<std::unique_ptr<Literal>>
+LiteralUtil::CreateRandomLiteral(const Shape& shape, E* engine, T mean,
+ T stddev) {
using NativeT = typename primitive_util::PrimitiveTypeToNative<type>::type;
std::normal_distribution<NativeT> generator(mean, stddev);
return CreateRandomLiteral<type, NativeT>(
@@ -1692,8 +605,8 @@ template <PrimitiveType type, typename E, typename T>
}
template <PrimitiveType type, typename T>
-/* static */ StatusOr<std::unique_ptr<Literal>> Literal::CreateRandomLiteral(
- const Shape& shape, T mean, T stddev) {
+/* static */ StatusOr<std::unique_ptr<Literal>>
+LiteralUtil::CreateRandomLiteral(const Shape& shape, T mean, T stddev) {
std::minstd_rand0 engine;
return CreateRandomLiteral<type>(shape, &engine, mean, stddev);
}
diff --git a/tensorflow/compiler/xla/overflow_util.h b/tensorflow/compiler/xla/overflow_util.h
new file mode 100644
index 0000000000..8657d3a4bf
--- /dev/null
+++ b/tensorflow/compiler/xla/overflow_util.h
@@ -0,0 +1,50 @@
+/* Copyright 2015 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#ifndef TENSORFLOW_COMPILER_XLA_OVERFLOW_UTIL_H_
+#define TENSORFLOW_COMPILER_XLA_OVERFLOW_UTIL_H_
+
+#include "tensorflow/core/platform/logging.h"
+#include "tensorflow/core/platform/macros.h"
+#include "tensorflow/core/platform/types.h"
+
+namespace xla {
+
+// Multiply two nonnegative int64's, returning negative for overflow
+inline int64 MultiplyWithoutOverflow(const int64 x, const int64 y) {
+ // Multiply in uint64 rather than int64 since signed overflow is undefined.
+ // Negative values will wrap around to large unsigned values in the casts
+ // (see section 4.7 [conv.integral] of the C++14 standard).
+ const uint64 ux = x;
+ const uint64 uy = y;
+ const uint64 uxy = ux * uy;
+
+ // Check if we overflow uint64, using a cheap check if both inputs are small
+ if (TF_PREDICT_FALSE((ux | uy) >> 32 != 0)) {
+ // Ensure nonnegativity. Note that negative numbers will appear "large"
+ // to the unsigned comparisons above.
+ CHECK(x >= 0 && y >= 0);
+
+ // Otherwise, detect overflow using a division
+ if (ux != 0 && uxy / ux != uy) return -1;
+ }
+
+ // Cast back to signed. Any negative value will signal an error.
+ return static_cast<int64>(uxy);
+}
+
+} // namespace xla
+
+#endif // TENSORFLOW_COMPILER_XLA_OVERFLOW_UTIL_H_
diff --git a/tensorflow/compiler/xla/packed_literal_reader.cc b/tensorflow/compiler/xla/packed_literal_reader.cc
index 857aae0a79..6b7fd10d63 100644
--- a/tensorflow/compiler/xla/packed_literal_reader.cc
+++ b/tensorflow/compiler/xla/packed_literal_reader.cc
@@ -20,7 +20,7 @@ limitations under the License.
#include <utility>
#include "tensorflow/compiler/xla/layout_util.h"
-#include "tensorflow/compiler/xla/literal_util.h"
+#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/compiler/xla/ptr_util.h"
#include "tensorflow/compiler/xla/shape_util.h"
#include "tensorflow/compiler/xla/status_macros.h"
diff --git a/tensorflow/compiler/xla/packed_literal_reader.h b/tensorflow/compiler/xla/packed_literal_reader.h
index 45a9fe0127..98dccaa9a2 100644
--- a/tensorflow/compiler/xla/packed_literal_reader.h
+++ b/tensorflow/compiler/xla/packed_literal_reader.h
@@ -18,7 +18,7 @@ limitations under the License.
#include <memory>
-#include "tensorflow/compiler/xla/literal_util.h"
+#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/compiler/xla/statusor.h"
#include "tensorflow/compiler/xla/types.h"
#include "tensorflow/compiler/xla/xla_data.pb.h"
diff --git a/tensorflow/compiler/xla/python/BUILD b/tensorflow/compiler/xla/python/BUILD
index 83834c1ff6..fe346f9956 100644
--- a/tensorflow/compiler/xla/python/BUILD
+++ b/tensorflow/compiler/xla/python/BUILD
@@ -33,6 +33,7 @@ cc_library(
srcs = ["numpy_bridge.cc"],
hdrs = ["numpy_bridge.h"],
deps = [
+ "//tensorflow/compiler/xla:literal",
"//tensorflow/compiler/xla:literal_util",
"//tensorflow/compiler/xla:shape_util",
"//tensorflow/compiler/xla:xla_data_proto",
@@ -52,9 +53,9 @@ cc_library(
"//tensorflow/compiler/xla/client:client_library",
"//tensorflow/compiler/xla/client:executable_build_options",
"//tensorflow/compiler/xla/client:local_client",
+ "//tensorflow/compiler/xla/client/lib:math",
"//tensorflow/compiler/xla/client/xla_client:xla_builder",
"//tensorflow/compiler/xla/client/xla_client:xla_computation",
- "//tensorflow/compiler/xla/service:hlo_proto",
"//tensorflow/compiler/xla/service:shaped_buffer",
"//tensorflow/core:framework_lite",
"//tensorflow/core:lib",
@@ -70,7 +71,7 @@ tf_py_wrap_cc(
deps = [
":local_computation_builder",
":numpy_bridge",
- "//tensorflow/compiler/xla:literal_util",
+ "//tensorflow/compiler/xla:literal",
"//tensorflow/compiler/xla:shape_util",
"//tensorflow/compiler/xla:xla_data_proto",
"//tensorflow/compiler/xla/service:cpu_plugin",
diff --git a/tensorflow/compiler/xla/python/local_computation_builder.cc b/tensorflow/compiler/xla/python/local_computation_builder.cc
index 29062348b0..be55d50b23 100644
--- a/tensorflow/compiler/xla/python/local_computation_builder.cc
+++ b/tensorflow/compiler/xla/python/local_computation_builder.cc
@@ -14,6 +14,8 @@ limitations under the License.
==============================================================================*/
#include "tensorflow/compiler/xla/python/local_computation_builder.h"
+#include "tensorflow/compiler/xla/client/lib/math.h"
+#include "tensorflow/compiler/xla/client/xla_client/xla_builder.h"
#include "tensorflow/compiler/xla/executable_run_options.h"
#include "tensorflow/compiler/xla/ptr_util.h"
#include "tensorflow/compiler/xla/util.h"
@@ -174,73 +176,73 @@ StatusOr<std::unique_ptr<Literal>> CompiledLocalComputation::Execute(
GetReplicaCount());
for (int replica = 0; replica < GetReplicaCount(); ++replica) {
- pool.Schedule([this, client, replica, &arguments, &shapes_with_layout,
- &results] {
- StatusOr<int> device_ordinal_status =
- client->ReplicaNumberToDeviceOrdinal(replica);
- if (!device_ordinal_status.ok()) {
- results[replica] = device_ordinal_status.status();
- return;
- }
- const int device_ordinal = device_ordinal_status.ValueOrDie();
- VLOG(3) << "Replica " << replica
- << " mapped to device ordinal for execution: "
- << device_ordinal;
-
- // Transfer arguments in
- std::vector<ScopedShapedBuffer> scoped_buffers;
- scoped_buffers.reserve(arguments.size());
- for (int i = 0; i < arguments.size(); ++i) {
- const Literal& argument = arguments[i];
- const tensorflow::gtl::optional<Shape>& shape_with_layout =
- shapes_with_layout[i];
-
- StatusOr<ScopedShapedBuffer> pushed;
- if (shape_with_layout) {
- std::unique_ptr<Literal> relaid =
- argument.Relayout(shape_with_layout.value());
- pushed = ToBuffer(client, device_ordinal, *relaid);
- } else {
- pushed = ToBuffer(client, device_ordinal, argument);
- }
- if (!pushed.ok()) {
- results[replica] = pushed.status();
- return;
- }
-
- scoped_buffers.push_back(std::move(pushed).ValueOrDie());
- }
-
- // Execute
- std::vector<const ShapedBuffer*> argument_buffers;
- argument_buffers.reserve(scoped_buffers.size());
- for (auto& buffer : scoped_buffers) {
- argument_buffers.push_back(&buffer);
- }
-
- DeviceAssignment device_assignment =
- client->backend()
- .computation_placer()
- ->AssignDevices(GetReplicaCount(), /*computation_count=*/1)
- .ConsumeValueOrDie();
-
- ExecutableRunOptions options;
- options.set_device_ordinal(device_ordinal);
- options.set_allocator(client->backend().memory_allocator());
- options.set_intra_op_thread_pool(
- client->backend().eigen_intra_op_thread_pool_device());
- options.set_device_assignment(&device_assignment);
- StatusOr<ScopedShapedBuffer> result_buffer_status =
- executable_->Run(argument_buffers, options);
- if (!result_buffer_status.ok()) {
- results[replica] = result_buffer_status.status();
- return;
- }
-
- // Transfer result out
- results[replica] = client->ShapedBufferToLiteral(
- std::move(result_buffer_status).ValueOrDie());
- });
+ pool.Schedule(
+ [this, client, replica, &arguments, &shapes_with_layout, &results] {
+ StatusOr<int> device_ordinal_status =
+ client->ReplicaNumberToDeviceOrdinal(replica);
+ if (!device_ordinal_status.ok()) {
+ results[replica] = device_ordinal_status.status();
+ return;
+ }
+ const int device_ordinal = device_ordinal_status.ValueOrDie();
+ VLOG(3) << "Replica " << replica
+ << " mapped to device ordinal for execution: "
+ << device_ordinal;
+
+ // Transfer arguments in
+ std::vector<ScopedShapedBuffer> scoped_buffers;
+ scoped_buffers.reserve(arguments.size());
+ for (int i = 0; i < arguments.size(); ++i) {
+ const Literal& argument = arguments[i];
+ const tensorflow::gtl::optional<Shape>& shape_with_layout =
+ shapes_with_layout[i];
+
+ StatusOr<ScopedShapedBuffer> pushed;
+ if (shape_with_layout) {
+ std::unique_ptr<Literal> relaid =
+ argument.Relayout(shape_with_layout.value());
+ pushed = ToBuffer(client, device_ordinal, *relaid);
+ } else {
+ pushed = ToBuffer(client, device_ordinal, argument);
+ }
+ if (!pushed.ok()) {
+ results[replica] = pushed.status();
+ return;
+ }
+
+ scoped_buffers.push_back(std::move(pushed).ValueOrDie());
+ }
+
+ // Execute
+ std::vector<const ShapedBuffer*> argument_buffers;
+ argument_buffers.reserve(scoped_buffers.size());
+ for (auto& buffer : scoped_buffers) {
+ argument_buffers.push_back(&buffer);
+ }
+
+ DeviceAssignment device_assignment =
+ client->backend()
+ .computation_placer()
+ ->AssignDevices(GetReplicaCount(), /*computation_count=*/1)
+ .ConsumeValueOrDie();
+
+ ExecutableRunOptions options;
+ options.set_device_ordinal(device_ordinal);
+ options.set_allocator(client->backend().memory_allocator());
+ options.set_intra_op_thread_pool(
+ client->backend().eigen_intra_op_thread_pool_device());
+ options.set_device_assignment(&device_assignment);
+ StatusOr<ScopedShapedBuffer> result_buffer_status =
+ executable_->Run(argument_buffers, options);
+ if (!result_buffer_status.ok()) {
+ results[replica] = result_buffer_status.status();
+ return;
+ }
+
+ // Transfer result out
+ results[replica] = client->ShapedBufferToLiteral(
+ std::move(result_buffer_status).ValueOrDie());
+ });
}
}
@@ -341,7 +343,7 @@ StatusOr<LocalComputation*> LocalComputationBuilder::Build() {
LocalOp LocalComputationBuilder::Parameter(int64 parameter_number,
const Shape& shape,
const string& name) {
- return builder_.Parameter(parameter_number, shape, name);
+ return xla::Parameter(&builder_, parameter_number, shape, name);
}
StatusOr<Shape> LocalComputationBuilder::GetShape(const LocalOp& operand) {
@@ -354,72 +356,70 @@ StatusOr<Shape> LocalComputationBuilder::GetReturnValueShape() {
}
LocalOp LocalComputationBuilder::Infeed(const Shape& shape) {
- return builder_.Infeed(shape);
+ return xla::Infeed(&builder_, shape);
}
void LocalComputationBuilder::Outfeed(const LocalOp& operand,
const Shape& shape,
const string& outfeed_config) {
- builder_.Outfeed(operand.op(), shape, outfeed_config);
+ xla::Outfeed(operand.op(), shape, outfeed_config);
}
LocalOp LocalComputationBuilder::ConstantLiteral(const Literal& literal) {
- return builder_.ConstantLiteral(literal);
+ return xla::ConstantLiteral(&builder_, literal);
}
LocalOp LocalComputationBuilder::Broadcast(
const LocalOp& operand,
tensorflow::gtl::ArraySlice<int64> broadcast_sizes) {
- return builder_.Broadcast(operand.op(), broadcast_sizes);
+ return xla::Broadcast(operand.op(), broadcast_sizes);
}
LocalOp LocalComputationBuilder::Pad(const LocalOp& operand,
const LocalOp& padding_value,
const PaddingConfig& padding_config) {
- return builder_.Pad(operand.op(), padding_value.op(), padding_config);
+ return xla::Pad(operand.op(), padding_value.op(), padding_config);
}
LocalOp LocalComputationBuilder::Reshape(
const LocalOp& operand, tensorflow::gtl::ArraySlice<int64> dimensions,
tensorflow::gtl::ArraySlice<int64> new_sizes) {
- return builder_.Reshape(operand.op(), dimensions, new_sizes);
+ return xla::Reshape(operand.op(), dimensions, new_sizes);
}
LocalOp LocalComputationBuilder::Collapse(
const LocalOp& operand, tensorflow::gtl::ArraySlice<int64> dimensions) {
- return builder_.Collapse(operand.op(), dimensions);
+ return xla::Collapse(operand.op(), dimensions);
}
LocalOp LocalComputationBuilder::CrossReplicaSum(const LocalOp& operand) {
- return builder_.CrossReplicaSum(operand.op());
+ return xla::CrossReplicaSum(operand.op());
}
LocalOp LocalComputationBuilder::Slice(
const LocalOp& operand, tensorflow::gtl::ArraySlice<int64> start_indices,
tensorflow::gtl::ArraySlice<int64> limit_indices,
tensorflow::gtl::ArraySlice<int64> strides) {
- return builder_.Slice(operand.op(), start_indices, limit_indices, strides);
+ return xla::Slice(operand.op(), start_indices, limit_indices, strides);
}
LocalOp LocalComputationBuilder::SliceInDim(const LocalOp& operand,
int64 start_index,
int64 limit_index, int64 stride,
int64 dimno) {
- return builder_.SliceInDim(operand.op(), start_index, limit_index, stride,
- dimno);
+ return xla::SliceInDim(operand.op(), start_index, limit_index, stride, dimno);
}
LocalOp LocalComputationBuilder::DynamicSlice(
const LocalOp& operand, const LocalOp& start_indices,
tensorflow::gtl::ArraySlice<int64> slice_sizes) {
- return builder_.DynamicSlice(operand.op(), start_indices.op(), slice_sizes);
+ return xla::DynamicSlice(operand.op(), start_indices.op(), slice_sizes);
}
LocalOp LocalComputationBuilder::DynamicUpdateSlice(
const LocalOp& operand, const LocalOp& update,
const LocalOp& start_indices) {
- return builder_.DynamicUpdateSlice(operand.op(), update.op(),
- start_indices.op());
+ return xla::DynamicUpdateSlice(operand.op(), update.op(), start_indices.op());
}
LocalOp LocalComputationBuilder::ConcatInDim(
@@ -429,7 +429,7 @@ LocalOp LocalComputationBuilder::ConcatInDim(
for (const auto& op : operands) {
xla_ops.push_back(op.op());
}
- return builder_.ConcatInDim(xla_ops, dimension);
+ return xla::ConcatInDim(&builder_, xla_ops, dimension);
}
LocalOp LocalComputationBuilder::SelectAndScatterWithGeneralPadding(
@@ -439,7 +439,7 @@ LocalOp LocalComputationBuilder::SelectAndScatterWithGeneralPadding(
tensorflow::gtl::ArraySlice<std::pair<int64, int64>> padding,
const LocalOp& source, const LocalOp& init_value,
const LocalComputation& scatter) {
- return builder_.SelectAndScatterWithGeneralPadding(
+ return xla::SelectAndScatterWithGeneralPadding(
operand.op(), select.computation(), window_dimensions, window_strides,
padding, source.op(), init_value.op(), scatter.computation());
}
@@ -452,22 +452,22 @@ LocalOp LocalComputationBuilder::Tuple(
xla_ops.push_back(op.op());
}
- return builder_.Tuple(xla_ops);
+ return xla::Tuple(&builder_, xla_ops);
}
LocalOp LocalComputationBuilder::GetTupleElement(const LocalOp& tuple_data,
int64 index) {
- return builder_.GetTupleElement(tuple_data.op(), index);
+ return xla::GetTupleElement(tuple_data.op(), index);
}
LocalOp LocalComputationBuilder::Dot(const LocalOp& lhs, const LocalOp& rhs) {
- return builder_.Dot(lhs.op(), rhs.op());
+ return xla::Dot(lhs.op(), rhs.op());
}
LocalOp LocalComputationBuilder::DotGeneral(
const LocalOp& lhs, const LocalOp& rhs,
const DotDimensionNumbers& dimension_numbers) {
- return builder_.DotGeneral(lhs.op(), rhs.op(), dimension_numbers);
+ return xla::DotGeneral(lhs.op(), rhs.op(), dimension_numbers);
}
LocalOp LocalComputationBuilder::ConvGeneralDilated(
@@ -477,14 +477,13 @@ LocalOp LocalComputationBuilder::ConvGeneralDilated(
tensorflow::gtl::ArraySlice<int64> lhs_dilation,
tensorflow::gtl::ArraySlice<int64> rhs_dilation,
const ConvolutionDimensionNumbers& dimension_numbers) {
- return builder_.ConvGeneralDilated(lhs.op(), rhs.op(), window_strides,
- padding, lhs_dilation, rhs_dilation,
- dimension_numbers);
+ return xla::ConvGeneralDilated(lhs.op(), rhs.op(), window_strides, padding,
+ lhs_dilation, rhs_dilation, dimension_numbers);
}
LocalOp LocalComputationBuilder::ConvertElementType(
const LocalOp& operand, PrimitiveType new_element_type) {
- return builder_.ConvertElementType(operand.op(), new_element_type);
+ return xla::ConvertElementType(operand.op(), new_element_type);
}
LocalOp LocalComputationBuilder::Call(
@@ -495,46 +494,39 @@ LocalOp LocalComputationBuilder::Call(
for (const auto& op : operands) {
xla_ops.push_back(op.op());
}
- return builder_.Call(local_computation.computation(), xla_ops);
+ return xla::Call(&builder_, local_computation.computation(), xla_ops);
}
LocalOp LocalComputationBuilder::Transpose(
const LocalOp& operand, tensorflow::gtl::ArraySlice<int64> permutation) {
- return builder_.Transpose(operand.op(), permutation);
+ return xla::Transpose(operand.op(), permutation);
}
LocalOp LocalComputationBuilder::Rev(
const LocalOp& operand, tensorflow::gtl::ArraySlice<int64> dimensions) {
- return builder_.Rev(operand.op(), dimensions);
+ return xla::Rev(operand.op(), dimensions);
}
LocalOp LocalComputationBuilder::Map(
tensorflow::gtl::ArraySlice<LocalOp> operands,
const LocalComputation& local_computation,
- tensorflow::gtl::ArraySlice<int64> dimensions,
- tensorflow::gtl::ArraySlice<LocalOp> static_operands) {
+ tensorflow::gtl::ArraySlice<int64> dimensions) {
std::vector<XlaOp> xla_ops;
xla_ops.reserve(operands.size());
for (const auto& op : operands) {
xla_ops.push_back(op.op());
}
- std::vector<XlaOp> static_xla_ops;
- static_xla_ops.reserve(static_operands.size());
- for (const auto& op : static_operands) {
- static_xla_ops.push_back(op.op());
- }
-
- return builder_.Map(xla_ops, local_computation.computation(), dimensions,
- static_xla_ops);
+ return xla::Map(&builder_, xla_ops, local_computation.computation(),
+ dimensions);
}
LocalOp LocalComputationBuilder::Reduce(
const LocalOp& operand, const LocalOp& init_value,
const LocalComputation& local_computation,
tensorflow::gtl::ArraySlice<int64> dimensions_to_reduce) {
- return builder_.Reduce(operand.op(), init_value.op(),
- local_computation.computation(), dimensions_to_reduce);
+ return xla::Reduce(operand.op(), init_value.op(),
+ local_computation.computation(), dimensions_to_reduce);
}
LocalOp LocalComputationBuilder::ReduceWindowWithGeneralPadding(
@@ -543,7 +535,7 @@ LocalOp LocalComputationBuilder::ReduceWindowWithGeneralPadding(
tensorflow::gtl::ArraySlice<int64> window_dimensions,
tensorflow::gtl::ArraySlice<int64> window_strides,
tensorflow::gtl::ArraySlice<std::pair<int64, int64>> padding) {
- return builder_.ReduceWindowWithGeneralPadding(
+ return xla::ReduceWindowWithGeneralPadding(
operand.op(), init_value.op(), local_computation.computation(),
window_dimensions, window_strides, padding);
}
@@ -551,27 +543,27 @@ LocalOp LocalComputationBuilder::ReduceWindowWithGeneralPadding(
LocalOp LocalComputationBuilder::RngNormal(const LocalOp& mu,
const LocalOp& sigma,
const Shape& shape) {
- return builder_.RngNormal(mu.op(), sigma.op(), shape);
+ return xla::RngNormal(mu.op(), sigma.op(), shape);
}
LocalOp LocalComputationBuilder::RngUniform(const LocalOp& a, const LocalOp& b,
const Shape& shape) {
- return builder_.RngUniform(a.op(), b.op(), shape);
+ return xla::RngUniform(a.op(), b.op(), shape);
}
LocalOp LocalComputationBuilder::While(const LocalComputation& condition,
const LocalComputation& body,
const LocalOp& init) {
- return builder_.While(condition.computation(), body.computation(), init.op());
+ return xla::While(condition.computation(), body.computation(), init.op());
}
LocalOp LocalComputationBuilder::Conditional(
const LocalOp& predicate, const LocalOp& true_operand,
const LocalComputation& true_computation, const LocalOp& false_operand,
const LocalComputation& false_computation) {
- return builder_.Conditional(
- predicate.op(), true_operand.op(), true_computation.computation(),
- false_operand.op(), false_computation.computation());
+ return xla::Conditional(predicate.op(), true_operand.op(),
+ true_computation.computation(), false_operand.op(),
+ false_computation.computation());
}
StatusOr<bool> LocalComputationBuilder::IsConstant(const LocalOp& operand) {
@@ -587,7 +579,7 @@ StatusOr<LocalComputation*> LocalComputationBuilder::BuildConstantSubGraph(
#define _FORWARD(method_name, return_sig, args_sig, args) \
return_sig LocalComputationBuilder::method_name args_sig { \
- return builder_.method_name args; \
+ return xla::method_name args; \
}
#define _FORWARD_UNOP(method_name) \
@@ -621,6 +613,7 @@ _FORWARD_BINOP(Max)
_FORWARD_BINOP(Min)
_FORWARD_BINOP(And)
_FORWARD_BINOP(Or)
+_FORWARD_BINOP(Xor)
_FORWARD_UNOP(Not)
_FORWARD_UNOP(Abs)
_FORWARD_UNOP(Exp)
@@ -634,11 +627,11 @@ _FORWARD_UNOP(Sign)
_FORWARD_UNOP(Cos)
_FORWARD_UNOP(Sin)
_FORWARD_UNOP(Tanh)
-_FORWARD_UNOP(SqrtF32)
-_FORWARD_UNOP(SquareF32)
+_FORWARD_UNOP(Sqrt)
+_FORWARD_UNOP(Square)
_FORWARD_BINOP(Pow)
_FORWARD_UNOP(IsFinite)
-_FORWARD_UNOP(ReciprocalF32)
+_FORWARD_UNOP(Reciprocal)
_FORWARD_UNOP(Neg)
_FORWARD_UNOP(Sort)
diff --git a/tensorflow/compiler/xla/python/local_computation_builder.h b/tensorflow/compiler/xla/python/local_computation_builder.h
index 95f0a0610b..690ff277e8 100644
--- a/tensorflow/compiler/xla/python/local_computation_builder.h
+++ b/tensorflow/compiler/xla/python/local_computation_builder.h
@@ -270,8 +270,7 @@ class LocalComputationBuilder {
LocalOp Map(tensorflow::gtl::ArraySlice<LocalOp> operands,
const LocalComputation& local_computation,
- tensorflow::gtl::ArraySlice<int64> dimensions,
- tensorflow::gtl::ArraySlice<LocalOp> static_operands);
+ tensorflow::gtl::ArraySlice<int64> dimensions);
LocalOp Reduce(const LocalOp& operand, const LocalOp& init_value,
const LocalComputation& local_computation,
@@ -333,6 +332,7 @@ class LocalComputationBuilder {
_FORWARD_BINOP(Min)
_FORWARD_BINOP(And)
_FORWARD_BINOP(Or)
+ _FORWARD_BINOP(Xor)
_FORWARD_UNOP(Not)
_FORWARD_UNOP(Abs)
_FORWARD_UNOP(Exp)
@@ -346,11 +346,11 @@ class LocalComputationBuilder {
_FORWARD_UNOP(Cos)
_FORWARD_UNOP(Sin)
_FORWARD_UNOP(Tanh)
- _FORWARD_UNOP(SqrtF32)
- _FORWARD_UNOP(SquareF32)
+ _FORWARD_UNOP(Sqrt)
+ _FORWARD_UNOP(Square)
_FORWARD_BINOP(Pow)
_FORWARD_UNOP(IsFinite)
- _FORWARD_UNOP(ReciprocalF32)
+ _FORWARD_UNOP(Reciprocal)
_FORWARD_UNOP(Neg)
_FORWARD_UNOP(Sort)
diff --git a/tensorflow/compiler/xla/python/local_computation_builder.i b/tensorflow/compiler/xla/python/local_computation_builder.i
index 477df6fde2..afdea88cb7 100644
--- a/tensorflow/compiler/xla/python/local_computation_builder.i
+++ b/tensorflow/compiler/xla/python/local_computation_builder.i
@@ -109,7 +109,7 @@ limitations under the License.
// Must be included first
#include "tensorflow/python/lib/core/numpy.h"
-#include "tensorflow/compiler/xla/literal_util.h"
+#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/compiler/xla/shape_util.h"
#include "tensorflow/compiler/xla/xla_data.pb.h"
#include "tensorflow/core/lib/gtl/array_slice.h"
@@ -988,6 +988,7 @@ tensorflow::ImportNumpy();
%unignore xla::swig::LocalComputationBuilder::Min;
%unignore xla::swig::LocalComputationBuilder::And;
%unignore xla::swig::LocalComputationBuilder::Or;
+%unignore xla::swig::LocalComputationBuilder::Xor;
%unignore xla::swig::LocalComputationBuilder::Not;
%unignore xla::swig::LocalComputationBuilder::Abs;
%unignore xla::swig::LocalComputationBuilder::Exp;
@@ -1001,11 +1002,11 @@ tensorflow::ImportNumpy();
%unignore xla::swig::LocalComputationBuilder::Cos;
%unignore xla::swig::LocalComputationBuilder::Sin;
%unignore xla::swig::LocalComputationBuilder::Tanh;
-%unignore xla::swig::LocalComputationBuilder::SqrtF32;
-%unignore xla::swig::LocalComputationBuilder::SquareF32;
+%unignore xla::swig::LocalComputationBuilder::Sqrt;
+%unignore xla::swig::LocalComputationBuilder::Square;
%unignore xla::swig::LocalComputationBuilder::Pow;
%unignore xla::swig::LocalComputationBuilder::IsFinite;
-%unignore xla::swig::LocalComputationBuilder::ReciprocalF32;
+%unignore xla::swig::LocalComputationBuilder::Reciprocal;
%unignore xla::swig::LocalComputationBuilder::Neg;
%unignore xla::swig::LocalComputationBuilder::Sort;
%unignore xla::swig::DestructureLocalShapedBufferTuple;
diff --git a/tensorflow/compiler/xla/python/numpy_bridge.cc b/tensorflow/compiler/xla/python/numpy_bridge.cc
index 68648a3a17..71351abd59 100644
--- a/tensorflow/compiler/xla/python/numpy_bridge.cc
+++ b/tensorflow/compiler/xla/python/numpy_bridge.cc
@@ -14,6 +14,7 @@ limitations under the License.
==============================================================================*/
#include "tensorflow/compiler/xla/python/numpy_bridge.h"
+#include "tensorflow/compiler/xla/literal_util.h"
#include "tensorflow/compiler/xla/shape_util.h"
#include "tensorflow/core/platform/logging.h"
@@ -374,7 +375,7 @@ StatusOr<std::unique_ptr<Literal>> XlaLiteralFromPyObject(PyObject* o) {
TF_ASSIGN_OR_RETURN(auto literal, XlaLiteralFromPyObject(element));
elements.push_back(std::move(literal));
}
- return Literal::MakeTupleOwned(std::move(elements));
+ return LiteralUtil::MakeTupleOwned(std::move(elements));
} else if (PyArray_Check(o)) {
PyArrayObject* py_array = reinterpret_cast<PyArrayObject*>(o);
int rank = PyArray_NDIM(py_array);
@@ -383,7 +384,7 @@ StatusOr<std::unique_ptr<Literal>> XlaLiteralFromPyObject(PyObject* o) {
dimensions[i] = PyArray_DIM(py_array, i);
}
int np_type = PyArray_TYPE(py_array);
- auto literal = Literal::CreateFromDimensions(
+ auto literal = LiteralUtil::CreateFromDimensions(
NumpyTypeToPrimitiveType(np_type), dimensions);
TF_RETURN_IF_ERROR(
CopyNumpyArrayToLiteral(np_type, py_array, literal.get()));
diff --git a/tensorflow/compiler/xla/python/numpy_bridge.h b/tensorflow/compiler/xla/python/numpy_bridge.h
index 64f0aae0f9..a67c93a4fb 100644
--- a/tensorflow/compiler/xla/python/numpy_bridge.h
+++ b/tensorflow/compiler/xla/python/numpy_bridge.h
@@ -25,7 +25,7 @@ limitations under the License.
#include <algorithm>
#include <memory>
-#include "tensorflow/compiler/xla/literal_util.h"
+#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/compiler/xla/xla_data.pb.h"
#include "tensorflow/core/lib/gtl/array_slice.h"
#include "tensorflow/python/lib/core/numpy.h"
diff --git a/tensorflow/compiler/xla/python/xla_client.py b/tensorflow/compiler/xla/python/xla_client.py
index a1fc25303c..e2b6eaa096 100644
--- a/tensorflow/compiler/xla/python/xla_client.py
+++ b/tensorflow/compiler/xla/python/xla_client.py
@@ -99,10 +99,10 @@ _UNARY_OPS = [
'Cos',
'Sin',
'Tanh',
- 'SqrtF32',
- 'SquareF32',
+ 'Sqrt',
+ 'Square',
'IsFinite',
- 'ReciprocalF32',
+ 'Reciprocal',
'Neg',
'Sort',
]
@@ -123,6 +123,7 @@ _BINARY_OPS = [
'Min',
'And',
'Or',
+ 'Xor',
'Pow',
]
@@ -460,14 +461,16 @@ class LocalComputation(object):
if self.is_compiled:
raise ValueError('Attempt to compile a compiled local XLA computation.')
+ result_shape = _wrap_shape(self.c_local_computation.GetReturnValueShape())
+
if layout_fn:
argument_shapes = [
shape.map_leaves(layout_fn) for shape in argument_shapes
]
- result_shape = _wrap_shape(self.c_local_computation.GetReturnValueShape())
result_shape = result_shape.map_leaves(layout_fn)
- compile_options = compile_options or CompileOptions()
- compile_options.result_shape = result_shape
+
+ compile_options = compile_options or CompileOptions()
+ compile_options.result_shape = result_shape
return LocalComputation(
self.c_local_computation.Compile(argument_shapes, compile_options),
is_compiled=True)
@@ -908,20 +911,19 @@ class ComputationBuilder(object):
"""
return self._client.Call(computation_to_apply.c_local_computation, operands)
- def Map(self, operands, computation_to_apply, dimensions, static_operands=()):
+ def Map(self, operands, computation_to_apply, dimensions):
"""Enqueues a map operation onto the computation.
Args:
operands: an iterable of LocalOp.
computation_to_apply: a Computation object.
dimensions: dimensions over which to apply map the function.
- static_operands: auxiliary arguments passed to the applied computation.
Returns:
A LocalOp representing the added Map op.
"""
return self._client.Map(operands, computation_to_apply.c_local_computation,
- dimensions, static_operands)
+ dimensions)
def Reduce(self, operand, init_value, computation_to_apply, dimensions):
"""Enqueues a reduction operation onto the computation.
diff --git a/tensorflow/compiler/xla/python/xla_client_test.py b/tensorflow/compiler/xla/python/xla_client_test.py
index 71e1d60a4e..0564ddcb85 100644
--- a/tensorflow/compiler/xla/python/xla_client_test.py
+++ b/tensorflow/compiler/xla/python/xla_client_test.py
@@ -157,6 +157,13 @@ class ComputationsWithConstantsTest(LocalComputationTest):
c.Constant(NumpyArrayBool([True, True, False, False])))
self._ExecuteAndCompareExact(c, expected=[True, True, True, False])
+ def testBooleanXor(self):
+ c = self._NewComputation()
+ c.Xor(
+ c.Constant(NumpyArrayBool([True, False, True, False])),
+ c.Constant(NumpyArrayBool([True, True, False, False])))
+ self._ExecuteAndCompareExact(c, expected=[False, True, True, False])
+
def testSum2DF32(self):
c = self._NewComputation()
c.Add(
@@ -1168,14 +1175,6 @@ class EmbeddedComputationsTest(LocalComputationTest):
self._CreateBinaryDivF64Computation(), [0])
self._ExecuteAndCompareClose(c, expected=[0.2, 0.4, 0.75, 1.0])
- def DISABLED_testMapWithStaticOperands(self):
- c = self._NewComputation()
- factor = c.ConstantF32Scalar(3.0)
- c.Map([c.Constant(NumpyArrayF32([1.0, 2.0, 3.0, 4.0]))],
- self._CreateMulF32ByParamComputation(), [0],
- static_operands=[factor])
- self._ExecuteAndCompareClose(c, expected=[3.0, 6.0, 9.0, 12.0])
-
def testSelectAndScatterF32(self):
c = self._NewComputation()
c.SelectAndScatter(c.Constant(NumpyArrayF32([[1., 2., 6.], [4., 5., 3.]])),
diff --git a/tensorflow/compiler/xla/reference_util.cc b/tensorflow/compiler/xla/reference_util.cc
index c289c84cff..6397f1f479 100644
--- a/tensorflow/compiler/xla/reference_util.cc
+++ b/tensorflow/compiler/xla/reference_util.cc
@@ -19,6 +19,7 @@ limitations under the License.
#include <utility>
#include "tensorflow/compiler/xla/client/xla_client/xla_builder.h"
+#include "tensorflow/compiler/xla/literal_util.h"
#include "tensorflow/compiler/xla/service/cpu/runtime_single_threaded_matmul.h"
#include "tensorflow/compiler/xla/service/hlo_evaluator.h"
#include "tensorflow/compiler/xla/service/hlo_instruction.h"
@@ -510,8 +511,8 @@ ReferenceUtil::ConvArray4DGeneralDimensionsDilated(
std::pair<int64, int64> lhs_dilation, std::pair<int64, int64> rhs_dilation,
ConvolutionDimensionNumbers dnums) {
HloComputation::Builder b("ConvArray4DGeneralDimensionDilated");
- auto lhs_literal = Literal::CreateR4FromArray4D<float>(lhs);
- auto rhs_literal = Literal::CreateR4FromArray4D<float>(rhs);
+ auto lhs_literal = LiteralUtil::CreateR4FromArray4D<float>(lhs);
+ auto rhs_literal = LiteralUtil::CreateR4FromArray4D<float>(rhs);
std::array<int64, 2> ordered_kernel_strides;
std::array<int64, 2> ordered_input_dimensions;
diff --git a/tensorflow/compiler/xla/reference_util_test.cc b/tensorflow/compiler/xla/reference_util_test.cc
index 9da9bc60a2..8091bed499 100644
--- a/tensorflow/compiler/xla/reference_util_test.cc
+++ b/tensorflow/compiler/xla/reference_util_test.cc
@@ -22,7 +22,7 @@ limitations under the License.
#include "tensorflow/compiler/xla/array3d.h"
#include "tensorflow/compiler/xla/array4d.h"
#include "tensorflow/compiler/xla/client/padding.h"
-#include "tensorflow/compiler/xla/literal_util.h"
+#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/compiler/xla/ptr_util.h"
#include "tensorflow/compiler/xla/test.h"
#include "tensorflow/compiler/xla/tests/literal_test_util.h"
@@ -53,7 +53,7 @@ class ReferenceUtilTest : public ::testing::Test {
TEST_F(ReferenceUtilTest, TransposeArray2D) {
auto result = ReferenceUtil::TransposeArray2D(*matrix_);
- auto actual_literal = Literal::CreateR2FromArray2D(*result);
+ auto actual_literal = LiteralUtil::CreateR2FromArray2D(*result);
LiteralTestUtil::ExpectR2Near<float>({{1.f, 4.f}, {2.f, 5.f}, {3.f, 6.f}},
*actual_literal, ErrorSpec(0.0001));
}
@@ -65,7 +65,7 @@ TEST_F(ReferenceUtilTest, MatmulArray2D) {
{11.f, 12.f},
});
auto result = ReferenceUtil::MatmulArray2D(*matrix_, rhs);
- auto actual_literal = Literal::CreateR2FromArray2D(*result);
+ auto actual_literal = LiteralUtil::CreateR2FromArray2D(*result);
LiteralTestUtil::ExpectR2Near<float>({{58.f, 64.f}, {139.f, 154.f}},
*actual_literal, ErrorSpec(0.0001));
}
@@ -73,7 +73,7 @@ TEST_F(ReferenceUtilTest, MatmulArray2D) {
TEST_F(ReferenceUtilTest, ReduceToColArray2D) {
auto add = [](float lhs, float rhs) { return lhs + rhs; };
auto result = ReferenceUtil::ReduceToColArray2D(*matrix_, 0.0f, add);
- auto actual_literal = Literal::CreateR1<float>(*result);
+ auto actual_literal = LiteralUtil::CreateR1<float>(*result);
LiteralTestUtil::ExpectR1Near<float>({6.f, 15.f}, *actual_literal,
ErrorSpec(0.0001));
}
@@ -81,13 +81,13 @@ TEST_F(ReferenceUtilTest, ReduceToColArray2D) {
TEST_F(ReferenceUtilTest, ReduceToRowArray2D) {
auto add = [](float lhs, float rhs) { return lhs + rhs; };
auto result = ReferenceUtil::ReduceToRowArray2D(*matrix_, 0.0f, add);
- auto actual_literal = Literal::CreateR1<float>(*result);
+ auto actual_literal = LiteralUtil::CreateR1<float>(*result);
LiteralTestUtil::ExpectR1Near<float>({5.f, 7.f, 9.f}, *actual_literal,
ErrorSpec(0.0001));
}
TEST_F(ReferenceUtilTest, Reduce4Dto1DZeroSizedArray) {
- auto result = Literal::CreateR1<float>(ReferenceUtil::Reduce4DTo1D(
+ auto result = LiteralUtil::CreateR1<float>(ReferenceUtil::Reduce4DTo1D(
Array4D<float>(1, 0, 1, 1), /*init=*/0, /*dims=*/{0, 1, 2},
[](float a, float b) { return a + b; }));
LiteralTestUtil::ExpectR1Equal<float>({0}, *result);
@@ -96,7 +96,7 @@ TEST_F(ReferenceUtilTest, Reduce4Dto1DZeroSizedArray) {
TEST_F(ReferenceUtilTest, MapArray2D) {
auto identity = [](float value) { return log(exp(value)); };
auto result = ReferenceUtil::MapArray2D(*matrix_, identity);
- auto actual_literal = Literal::CreateR2FromArray2D(*result);
+ auto actual_literal = LiteralUtil::CreateR2FromArray2D(*result);
LiteralTestUtil::ExpectR2NearArray2D(*matrix_, *actual_literal,
ErrorSpec(0.0001));
}
@@ -106,7 +106,7 @@ TEST_F(ReferenceUtilTest, MapWithIndexArray2D) {
return value + row + col;
};
auto result = ReferenceUtil::MapWithIndexArray2D(*matrix_, add_index);
- auto actual_literal = Literal::CreateR2FromArray2D(*result);
+ auto actual_literal = LiteralUtil::CreateR2FromArray2D(*result);
LiteralTestUtil::ExpectR2Near<float>({{1.f, 3.f, 5.f}, {5.f, 7.f, 9.f}},
*actual_literal, ErrorSpec(0.0001));
}
@@ -117,7 +117,7 @@ TEST_F(ReferenceUtilTest, MapArray4D) {
input->FillWithMultiples(1.0f);
auto multiply_by_two = [](float value) { return 2 * value; };
auto result = ReferenceUtil::MapArray4D(*input, multiply_by_two);
- auto actual_literal = Literal::CreateR4FromArray4D(*result);
+ auto actual_literal = LiteralUtil::CreateR4FromArray4D(*result);
Array4D<float> expected(/*planes=*/2, /*depth=*/3, /*height=*/4, /*width=*/5);
expected.FillWithMultiples(2.0f);
@@ -134,7 +134,7 @@ TEST_F(ReferenceUtilTest, MapWithIndexArray4D) {
return value - (3 * 4 * 5 * plane + 4 * 5 * depth + 5 * height + width);
};
auto result = ReferenceUtil::MapWithIndexArray4D(*input, subtract_index);
- auto actual_literal = Literal::CreateR4FromArray4D(*result);
+ auto actual_literal = LiteralUtil::CreateR4FromArray4D(*result);
Array4D<float> expected(/*planes=*/2, /*depth=*/3, /*height=*/4, /*width=*/5);
expected.Fill(0.0f);
@@ -144,7 +144,7 @@ TEST_F(ReferenceUtilTest, MapWithIndexArray4D) {
TEST_F(ReferenceUtilTest, SliceArray2D) {
auto result = ReferenceUtil::Slice2D(*matrix_, {{0, 0}}, {{2, 2}}, {{1, 1}});
- auto actual_literal = Literal::CreateR2FromArray2D(*result);
+ auto actual_literal = LiteralUtil::CreateR2FromArray2D(*result);
LiteralTestUtil::ExpectR2Near<float>({{1.f, 2.f}, {4.f, 5.f}},
*actual_literal, ErrorSpec(0.0001));
@@ -152,7 +152,7 @@ TEST_F(ReferenceUtilTest, SliceArray2D) {
TEST_F(ReferenceUtilTest, SliceStridedArray2D) {
auto result = ReferenceUtil::Slice2D(*matrix_, {{0, 0}}, {{2, 3}}, {{1, 2}});
- auto actual_literal = Literal::CreateR2FromArray2D(*result);
+ auto actual_literal = LiteralUtil::CreateR2FromArray2D(*result);
LiteralTestUtil::ExpectR2Near<float>({{1.f, 3.f}, {4.f, 6.f}},
*actual_literal, ErrorSpec(0.0001));
@@ -164,7 +164,7 @@ TEST_F(ReferenceUtilTest, SliceArray3D) {
auto result =
ReferenceUtil::Slice3D(input, {{0, 0, 0}}, {{2, 2, 2}}, {{1, 1, 1}});
- auto actual_literal = Literal::CreateR3FromArray3D(*result);
+ auto actual_literal = LiteralUtil::CreateR3FromArray3D(*result);
LiteralTestUtil::ExpectR3Near<float>(
{{{0.f, 1.f}, {4.f, 5.f}}, {{12.f, 13.f}, {16.f, 17.f}}}, *actual_literal,
@@ -177,7 +177,7 @@ TEST_F(ReferenceUtilTest, SliceStridedArray3D) {
auto result =
ReferenceUtil::Slice3D(input, {{0, 0, 0}}, {{2, 3, 4}}, {{1, 2, 2}});
- auto actual_literal = Literal::CreateR3FromArray3D(*result);
+ auto actual_literal = LiteralUtil::CreateR3FromArray3D(*result);
LiteralTestUtil::ExpectR3Near<float>(
{{{0.f, 2.f}, {8.f, 10.f}}, {{12.f, 14.f}, {20.f, 22.f}}},
@@ -190,7 +190,7 @@ TEST_F(ReferenceUtilTest, SliceArray4D) {
auto result = ReferenceUtil::Slice4D(input, {{1, 0, 0, 0}}, {{2, 2, 2, 2}},
{{1, 1, 1, 1}});
- auto actual_literal = Literal::CreateR4FromArray4D(*result);
+ auto actual_literal = LiteralUtil::CreateR4FromArray4D(*result);
LiteralTestUtil::ExpectR4Near<float>(
{{{{60.f, 61.f}, {65.f, 66.f}}, {{80.f, 81.f}, {85.f, 86.f}}}},
@@ -203,7 +203,7 @@ TEST_F(ReferenceUtilTest, SliceStridedArray4D) {
auto result = ReferenceUtil::Slice4D(input, {{1, 0, 0, 0}}, {{2, 3, 4, 5}},
{{1, 2, 2, 2}});
- auto actual_literal = Literal::CreateR4FromArray4D(*result);
+ auto actual_literal = LiteralUtil::CreateR4FromArray4D(*result);
LiteralTestUtil::ExpectR4Near<float>(
{{{{60.f, 62.f, 64.f}, {70.f, 72.f, 74.f}},
@@ -218,7 +218,7 @@ TEST_F(ReferenceUtilTest, ConvArray3DWithSamePadding) {
ReferenceUtil::ConvArray3D(input, weights, 1, Padding::kSame);
Array3D<float> expected = {{{17, 28, 39, 20}}};
- auto actual_literal = Literal::CreateR3FromArray3D(*actual);
+ auto actual_literal = LiteralUtil::CreateR3FromArray3D(*actual);
LiteralTestUtil::ExpectR3NearArray3D<float>(expected, *actual_literal,
ErrorSpec(0.0001));
@@ -231,7 +231,7 @@ TEST_F(ReferenceUtilTest, ConvArray3DWithValidPadding) {
ReferenceUtil::ConvArray3D(input, weights, 1, Padding::kValid);
Array3D<float> expected = {{{17, 28, 39}}};
- auto actual_literal = Literal::CreateR3FromArray3D(*actual);
+ auto actual_literal = LiteralUtil::CreateR3FromArray3D(*actual);
LiteralTestUtil::ExpectR3NearArray3D<float>(expected, *actual_literal,
ErrorSpec(0.0001));
@@ -266,7 +266,7 @@ TEST_F(ReferenceUtilTest, ConvWithSamePadding) {
}));
// clang-format on
- auto actual_literal = Literal::CreateR4FromArray4D(*actual);
+ auto actual_literal = LiteralUtil::CreateR4FromArray4D(*actual);
LiteralTestUtil::ExpectR4NearArray4D<float>(expected, *actual_literal,
ErrorSpec(0.0001));
@@ -300,7 +300,7 @@ TEST_F(ReferenceUtilTest, ConvWithValidPadding) {
}));
// clang-format on
- auto actual_literal = Literal::CreateR4FromArray4D(*actual);
+ auto actual_literal = LiteralUtil::CreateR4FromArray4D(*actual);
LiteralTestUtil::ExpectR4NearArray4D<float>(expected, *actual_literal,
ErrorSpec(0.0001));
@@ -356,7 +356,7 @@ TEST_F(ReferenceUtilTest, ConvGeneralDimensionsWithSamePadding) {
}});
// clang-format on
- auto actual_literal = Literal::CreateR4FromArray4D(*actual);
+ auto actual_literal = LiteralUtil::CreateR4FromArray4D(*actual);
LiteralTestUtil::ExpectR4NearArray4D<float>(expected, *actual_literal,
ErrorSpec(0.0001));
@@ -409,7 +409,7 @@ TEST_F(ReferenceUtilTest, ConvGeneralDimensionsWithValidPadding) {
Array4D<float> expected({{{{2514, 2685}}}});
// clang-format on
- auto actual_literal = Literal::CreateR4FromArray4D(*actual);
+ auto actual_literal = LiteralUtil::CreateR4FromArray4D(*actual);
LiteralTestUtil::ExpectR4NearArray4D<float>(expected, *actual_literal,
ErrorSpec(0.0001));
@@ -422,7 +422,7 @@ TEST_F(ReferenceUtilTest, ApplyElementwise2D) {
auto actual = ReferenceUtil::ApplyElementwise2D(
[](float x, float y, float z) { return 100 * x + 10 * y + z; }, a, b, c);
- auto actual_literal = Literal::CreateR2FromArray2D(*actual);
+ auto actual_literal = LiteralUtil::CreateR2FromArray2D(*actual);
LiteralTestUtil::ExpectR2Near({{300.f, 600.f}, {900.f, 1200.f}},
*actual_literal, ErrorSpec(0.0001));
}
diff --git a/tensorflow/compiler/xla/rpc/grpc_client_test.cc b/tensorflow/compiler/xla/rpc/grpc_client_test.cc
index d7dd9786a2..90efee50b4 100644
--- a/tensorflow/compiler/xla/rpc/grpc_client_test.cc
+++ b/tensorflow/compiler/xla/rpc/grpc_client_test.cc
@@ -85,19 +85,19 @@ TEST_F(GRPCClientTestBase, ItsAlive) {
TEST_F(GRPCClientTestBase, AxpyTenValues) {
XlaBuilder builder("axpy_10");
- auto alpha = builder.ConstantR0<float>(3.1415926535);
- auto x = builder.ConstantR1<float>(
- {-1.0, 1.0, 2.0, -2.0, -3.0, 3.0, 4.0, -4.0, -5.0, 5.0});
- auto y = builder.ConstantR1<float>(
- {5.0, -5.0, -4.0, 4.0, 3.0, -3.0, -2.0, 2.0, 1.0, -1.0});
- auto ax = builder.Mul(alpha, x);
- auto axpy = builder.Add(ax, y);
+ auto alpha = ConstantR0<float>(&builder, 3.1415926535);
+ auto x = ConstantR1<float>(
+ &builder, {-1.0, 1.0, 2.0, -2.0, -3.0, 3.0, 4.0, -4.0, -5.0, 5.0});
+ auto y = ConstantR1<float>(
+ &builder, {5.0, -5.0, -4.0, 4.0, 3.0, -3.0, -2.0, 2.0, 1.0, -1.0});
+ auto ax = Mul(alpha, x);
+ Add(ax, y);
std::vector<float> expected = {
1.85840735, -1.85840735, 2.28318531, -2.28318531, -6.42477796,
6.42477796, 10.56637061, -10.56637061, -14.70796327, 14.70796327};
std::unique_ptr<Literal> expected_literal =
- Literal::CreateR1<float>(expected);
+ LiteralUtil::CreateR1<float>(expected);
TF_ASSERT_OK_AND_ASSIGN(auto computation, builder.Build());
TF_ASSERT_OK_AND_ASSIGN(auto result_literal, client_->ExecuteAndTransfer(
computation, {}, nullptr));
diff --git a/tensorflow/compiler/xla/service/BUILD b/tensorflow/compiler/xla/service/BUILD
index c08960a57b..989bb759e3 100644
--- a/tensorflow/compiler/xla/service/BUILD
+++ b/tensorflow/compiler/xla/service/BUILD
@@ -32,6 +32,7 @@ tf_proto_library_py(
name = "hlo_proto", # bzl adds a _py suffix only to the OSS target.
srcs = ["hlo.proto"],
visibility = ["//visibility:public"],
+ deps = ["//tensorflow/compiler/xla:xla_data_proto_py"],
)
xla_proto_library(
@@ -135,7 +136,7 @@ cc_library(
":hlo_dce",
":hlo_pass",
":tuple_simplifier",
- "//tensorflow/compiler/xla:literal_util",
+ "//tensorflow/compiler/xla:literal",
"//tensorflow/compiler/xla:shape_tree",
"//tensorflow/compiler/xla:shape_util",
"//tensorflow/compiler/xla:util",
@@ -181,6 +182,7 @@ tf_cc_test(
name = "shape_inference_test",
srcs = ["shape_inference_test.cc"],
deps = [
+ ":hlo",
":shape_inference",
"//tensorflow/compiler/xla:shape_util",
"//tensorflow/compiler/xla:test",
@@ -226,6 +228,7 @@ cc_library(
":hlo",
":hlo_query",
":shape_inference",
+ "//tensorflow/compiler/xla:literal",
"//tensorflow/compiler/xla:literal_util",
"//tensorflow/compiler/xla:shape_util",
"//tensorflow/compiler/xla:statusor",
@@ -243,7 +246,7 @@ tf_cc_test(
deps = [
":hlo",
":hlo_evaluator",
- "//tensorflow/compiler/xla:literal_util",
+ "//tensorflow/compiler/xla:literal",
"//tensorflow/compiler/xla:reference_util",
"//tensorflow/compiler/xla:shape_util",
"//tensorflow/compiler/xla:status",
@@ -293,6 +296,7 @@ cc_library(
":hlo_reachability",
":name_uniquer",
"//tensorflow/compiler/xla:array",
+ "//tensorflow/compiler/xla:literal",
"//tensorflow/compiler/xla:literal_util",
"//tensorflow/compiler/xla:protobuf_util",
"//tensorflow/compiler/xla:shape_tree",
@@ -395,6 +399,7 @@ tf_cc_test(
deps = [
":hlo_matchers",
":hlo_parser",
+ "//tensorflow/compiler/xla:literal_util",
"//tensorflow/compiler/xla:shape_util",
"//tensorflow/compiler/xla/tests:xla_internal_test_main",
],
@@ -406,7 +411,7 @@ tf_cc_test(
deps = [
":hlo",
":hlo_parser",
- "//tensorflow/compiler/xla:literal_util",
+ "//tensorflow/compiler/xla:literal",
"//tensorflow/compiler/xla:protobuf_util",
"//tensorflow/compiler/xla:shape_util",
"//tensorflow/compiler/xla:test",
@@ -423,7 +428,7 @@ tf_cc_test(
srcs = ["hlo_sharding_test.cc"],
deps = [
":hlo",
- "//tensorflow/compiler/xla:literal_util",
+ "//tensorflow/compiler/xla:literal",
"//tensorflow/compiler/xla:protobuf_util",
"//tensorflow/compiler/xla:shape_util",
"//tensorflow/compiler/xla:test",
@@ -452,7 +457,7 @@ tf_cc_test(
srcs = ["call_graph_test.cc"],
deps = [
":call_graph",
- "//tensorflow/compiler/xla:literal_util",
+ "//tensorflow/compiler/xla:literal",
"//tensorflow/compiler/xla:shape_util",
"//tensorflow/compiler/xla:status_macros",
"//tensorflow/compiler/xla:test",
@@ -486,6 +491,7 @@ cc_library(
hdrs = ["call_inliner.h"],
deps = [
":call_graph",
+ ":hlo_dce",
":hlo_pass",
"//tensorflow/compiler/xla:statusor",
"//tensorflow/core:lib",
@@ -501,7 +507,7 @@ tf_cc_test(
":hlo",
":hlo_matchers",
":hlo_pass",
- "//tensorflow/compiler/xla:literal_util",
+ "//tensorflow/compiler/xla:literal",
"//tensorflow/compiler/xla:shape_util",
"//tensorflow/compiler/xla:test",
"//tensorflow/compiler/xla:types",
@@ -520,7 +526,7 @@ tf_cc_test(
deps = [
":call_graph",
":flatten_call_graph",
- "//tensorflow/compiler/xla:literal_util",
+ "//tensorflow/compiler/xla:literal",
"//tensorflow/compiler/xla:shape_util",
"//tensorflow/compiler/xla:status_macros",
"//tensorflow/compiler/xla:test",
@@ -796,7 +802,7 @@ cc_library(
hdrs = ["transfer_manager.h"],
deps = [
":shaped_buffer",
- "//tensorflow/compiler/xla:literal_util",
+ "//tensorflow/compiler/xla:literal",
"//tensorflow/compiler/xla:shape_util",
"//tensorflow/compiler/xla:status_macros",
"//tensorflow/compiler/xla:statusor",
@@ -959,7 +965,7 @@ tf_cc_test(
":hlo",
":hlo_ordering",
":hlo_scheduling",
- "//tensorflow/compiler/xla:literal_util",
+ "//tensorflow/compiler/xla:literal",
"//tensorflow/compiler/xla:shape_util",
"//tensorflow/compiler/xla:test",
"//tensorflow/compiler/xla:test_helpers",
@@ -1037,7 +1043,7 @@ tf_cc_test(
":hlo_ordering",
":hlo_value",
":tuple_points_to_analysis",
- "//tensorflow/compiler/xla:literal_util",
+ "//tensorflow/compiler/xla:literal",
"//tensorflow/compiler/xla:status_macros",
"//tensorflow/compiler/xla/tests:hlo_test_base",
"//tensorflow/compiler/xla/tests:xla_internal_test_main",
@@ -1120,7 +1126,7 @@ cc_library(
hdrs = ["hlo_query.h"],
deps = [
":hlo",
- "//tensorflow/compiler/xla:literal_util",
+ "//tensorflow/compiler/xla:literal",
"//tensorflow/compiler/xla:shape_util",
],
)
@@ -1169,6 +1175,7 @@ cc_library(
deps = [
":hlo",
":shape_inference",
+ "//tensorflow/compiler/xla:literal",
"//tensorflow/compiler/xla:literal_util",
"//tensorflow/compiler/xla:statusor",
"//tensorflow/compiler/xla:util",
@@ -1199,6 +1206,7 @@ cc_library(
deps = [
":hlo",
":hlo_pass",
+ "//tensorflow/compiler/xla:literal",
"//tensorflow/compiler/xla:literal_util",
"//tensorflow/compiler/xla:shape_util",
"//tensorflow/compiler/xla:status_macros",
@@ -1218,6 +1226,7 @@ cc_library(
":hlo_creation_utils",
":hlo_pass",
":while_util",
+ "//tensorflow/compiler/xla:literal_util",
"//tensorflow/compiler/xla:statusor",
"//tensorflow/compiler/xla:util",
],
@@ -1231,8 +1240,9 @@ tf_cc_test(
":batchnorm_expander",
":hlo",
":hlo_matchers",
+ ":hlo_parser",
":hlo_pass",
- "//tensorflow/compiler/xla:literal_util",
+ "//tensorflow/compiler/xla:literal",
"//tensorflow/compiler/xla:shape_util",
"//tensorflow/compiler/xla:test",
"//tensorflow/compiler/xla:types",
@@ -1254,6 +1264,7 @@ cc_library(
":hlo_pass",
":hlo_query",
":pattern_matcher",
+ "//tensorflow/compiler/xla:literal",
"//tensorflow/compiler/xla:literal_util",
"//tensorflow/compiler/xla:shape_util",
"//tensorflow/compiler/xla:status_macros",
@@ -1273,7 +1284,7 @@ tf_cc_test(
":hlo",
":hlo_matchers",
":hlo_pass",
- "//tensorflow/compiler/xla:literal_util",
+ "//tensorflow/compiler/xla:literal",
"//tensorflow/compiler/xla:shape_util",
"//tensorflow/compiler/xla:test",
"//tensorflow/compiler/xla:types",
@@ -1309,7 +1320,7 @@ tf_cc_test(
":hlo",
":hlo_matchers",
":hlo_pass",
- "//tensorflow/compiler/xla:literal_util",
+ "//tensorflow/compiler/xla:literal",
"//tensorflow/compiler/xla:shape_util",
"//tensorflow/compiler/xla:test",
"//tensorflow/compiler/xla:types",
@@ -1344,7 +1355,7 @@ cc_library(
":call_inliner",
":hlo",
":hlo_pass",
- "//tensorflow/compiler/xla:literal_util",
+ "//tensorflow/compiler/xla:literal",
"//tensorflow/compiler/xla:status_macros",
"//tensorflow/compiler/xla:statusor",
"//tensorflow/compiler/xla:types",
@@ -1360,6 +1371,7 @@ tf_cc_test(
":conditional_simplifier",
":hlo",
":hlo_matchers",
+ "//tensorflow/compiler/xla:literal",
"//tensorflow/compiler/xla:literal_util",
"//tensorflow/compiler/xla:shape_util",
"//tensorflow/compiler/xla:test",
@@ -1419,7 +1431,7 @@ tf_cc_test(
deps = [
":defuser",
":hlo_matchers",
- "//tensorflow/compiler/xla:literal_util",
+ "//tensorflow/compiler/xla:literal",
"//tensorflow/compiler/xla:shape_util",
"//tensorflow/compiler/xla/tests:hlo_verified_test_base",
],
@@ -1447,7 +1459,7 @@ tf_cc_test(
deps = [
":hlo_matchers",
":implicit_broadcast_remover",
- "//tensorflow/compiler/xla:literal_util",
+ "//tensorflow/compiler/xla:literal",
"//tensorflow/compiler/xla:shape_util",
"//tensorflow/compiler/xla/tests:hlo_verified_test_base",
],
@@ -1489,7 +1501,7 @@ tf_cc_test(
":hlo",
":hlo_matchers",
":tuple_simplifier",
- "//tensorflow/compiler/xla:literal_util",
+ "//tensorflow/compiler/xla:literal",
"//tensorflow/compiler/xla:shape_util",
"//tensorflow/compiler/xla:test",
"//tensorflow/compiler/xla:types",
@@ -1504,7 +1516,7 @@ cc_library(
hdrs = ["reshape_mover.h"],
deps = [
":hlo_pass",
- "//tensorflow/compiler/xla:literal_util",
+ "//tensorflow/compiler/xla:literal",
"//tensorflow/compiler/xla:shape_util",
"//tensorflow/compiler/xla:status_macros",
"//tensorflow/compiler/xla:util",
@@ -1519,7 +1531,7 @@ tf_cc_test(
":hlo",
":hlo_matchers",
":reshape_mover",
- "//tensorflow/compiler/xla:literal_util",
+ "//tensorflow/compiler/xla:literal",
"//tensorflow/compiler/xla:shape_util",
"//tensorflow/compiler/xla:test",
"//tensorflow/compiler/xla:test_helpers",
@@ -1554,7 +1566,7 @@ tf_cc_test(
":hlo",
":hlo_matchers",
":inliner",
- "//tensorflow/compiler/xla:literal_util",
+ "//tensorflow/compiler/xla:literal",
"//tensorflow/compiler/xla:shape_util",
"//tensorflow/compiler/xla:test",
"//tensorflow/compiler/xla:util",
@@ -1571,7 +1583,7 @@ cc_library(
hdrs = ["computation_placer.h"],
deps = [
"//tensorflow/compiler/xla:array2d",
- "//tensorflow/compiler/xla:literal_util",
+ "//tensorflow/compiler/xla:literal",
"//tensorflow/compiler/xla:shape_util",
"//tensorflow/compiler/xla:status",
"//tensorflow/compiler/xla:status_macros",
@@ -1603,7 +1615,7 @@ cc_library(
hdrs = ["generic_transfer_manager.h"],
deps = [
":transfer_manager",
- "//tensorflow/compiler/xla:literal_util",
+ "//tensorflow/compiler/xla:literal",
"//tensorflow/compiler/xla:shape_util",
"//tensorflow/compiler/xla:status_macros",
"//tensorflow/compiler/xla:statusor",
@@ -1694,7 +1706,7 @@ tf_cc_test(
deps = [
":hlo",
":hlo_matchers",
- "//tensorflow/compiler/xla:literal_util",
+ "//tensorflow/compiler/xla:literal",
"//tensorflow/compiler/xla:shape_util",
"//tensorflow/compiler/xla:test",
"//tensorflow/compiler/xla:test_helpers",
@@ -1709,6 +1721,7 @@ tf_cc_binary(
deps = [
":hlo",
":hlo_graph_dumper",
+ "//tensorflow/compiler/xla:literal",
"//tensorflow/compiler/xla:literal_util",
"//tensorflow/compiler/xla:shape_util",
"//tensorflow/compiler/xla:types",
@@ -1723,7 +1736,7 @@ tf_cc_test(
srcs = ["hlo_module_test.cc"],
deps = [
":hlo",
- "//tensorflow/compiler/xla:literal_util",
+ "//tensorflow/compiler/xla:literal",
"//tensorflow/compiler/xla:shape_util",
"//tensorflow/compiler/xla:test",
"//tensorflow/compiler/xla:util",
@@ -1821,7 +1834,7 @@ tf_cc_test(
":hlo_matchers",
":hlo_ordering",
":instruction_fusion",
- "//tensorflow/compiler/xla:literal_util",
+ "//tensorflow/compiler/xla:literal",
"//tensorflow/compiler/xla:shape_util",
"//tensorflow/compiler/xla:status_macros",
"//tensorflow/compiler/xla:test",
@@ -1858,7 +1871,7 @@ tf_cc_test(
deps = [
":hlo",
":hlo_liveness_analysis",
- "//tensorflow/compiler/xla:literal_util",
+ "//tensorflow/compiler/xla:literal",
"//tensorflow/compiler/xla:shape_util",
"//tensorflow/compiler/xla:status_macros",
"//tensorflow/compiler/xla:test",
@@ -1919,7 +1932,7 @@ tf_cc_test(
":hlo_matchers",
":hlo_ordering",
":instruction_fusion",
- "//tensorflow/compiler/xla:literal_util",
+ "//tensorflow/compiler/xla:literal",
"//tensorflow/compiler/xla:shape_util",
"//tensorflow/compiler/xla:test",
"//tensorflow/compiler/xla:test_helpers",
@@ -1954,6 +1967,7 @@ cc_library(
":hlo_dataflow_analysis",
":logical_buffer",
":logical_buffer_analysis",
+ "//tensorflow/compiler/xla:literal_util",
"//tensorflow/compiler/xla:shape_tree",
"//tensorflow/compiler/xla:shape_util",
"//tensorflow/compiler/xla:statusor",
@@ -1972,6 +1986,7 @@ tf_cc_test(
":hlo_matchers",
":instruction_fusion",
":tuple_points_to_analysis",
+ "//tensorflow/compiler/xla:literal",
"//tensorflow/compiler/xla:literal_util",
"//tensorflow/compiler/xla:shape_util",
"//tensorflow/compiler/xla:test",
@@ -2043,7 +2058,7 @@ tf_cc_test(
":hlo_graph_dumper",
":hlo_matchers",
":hlo_runner",
- "//tensorflow/compiler/xla:literal_util",
+ "//tensorflow/compiler/xla:literal",
"//tensorflow/compiler/xla:shape_util",
"//tensorflow/compiler/xla:test",
"//tensorflow/compiler/xla:test_helpers",
@@ -2094,6 +2109,7 @@ cc_library(
hdrs = ["hlo_verifier.h"],
deps = [
":hlo",
+ ":hlo_casting_utils",
":hlo_pass",
":shape_inference",
"//tensorflow/compiler/xla:status_macros",
@@ -2106,6 +2122,7 @@ tf_cc_test(
srcs = ["hlo_verifier_test.cc"],
deps = [
":hlo",
+ ":hlo_parser",
":hlo_verifier",
"//tensorflow/compiler/xla:shape_util",
"//tensorflow/compiler/xla:test",
@@ -2167,6 +2184,7 @@ tf_cc_test(
deps = [
":hlo",
":hlo_dce",
+ "//tensorflow/compiler/xla:literal",
"//tensorflow/compiler/xla:literal_util",
"//tensorflow/compiler/xla:shape_util",
"//tensorflow/compiler/xla:types",
@@ -2187,7 +2205,7 @@ tf_cc_test(
deps = [
":hlo",
":hlo_module_dce",
- "//tensorflow/compiler/xla:literal_util",
+ "//tensorflow/compiler/xla:literal",
"//tensorflow/compiler/xla:shape_util",
"//tensorflow/compiler/xla:types",
"//tensorflow/compiler/xla:util",
@@ -2211,7 +2229,7 @@ tf_cc_test(
":hlo",
":hlo_matchers",
":layout_assignment",
- "//tensorflow/compiler/xla:literal_util",
+ "//tensorflow/compiler/xla:literal",
"//tensorflow/compiler/xla:shape_layout",
"//tensorflow/compiler/xla:shape_util",
"//tensorflow/compiler/xla:test",
@@ -2270,7 +2288,7 @@ cc_library(
":hlo",
":hlo_domain_map",
":hlo_pass",
- "//tensorflow/compiler/xla:literal_util",
+ "//tensorflow/compiler/xla:literal",
"//tensorflow/compiler/xla:shape_util",
"//tensorflow/compiler/xla:types",
"//tensorflow/compiler/xla:xla_data_proto",
@@ -2286,7 +2304,7 @@ tf_cc_test(
":hlo",
":hlo_cse",
":hlo_matchers",
- "//tensorflow/compiler/xla:literal_util",
+ "//tensorflow/compiler/xla:literal",
"//tensorflow/compiler/xla:shape_util",
"//tensorflow/compiler/xla:types",
"//tensorflow/compiler/xla:util",
@@ -2308,7 +2326,7 @@ cc_library(
":hlo_evaluator",
":hlo_pass",
":hlo_query",
- "//tensorflow/compiler/xla:literal_util",
+ "//tensorflow/compiler/xla:literal",
"//tensorflow/compiler/xla:shape_util",
"//tensorflow/compiler/xla:types",
"//tensorflow/core:lib",
@@ -2323,7 +2341,7 @@ tf_cc_test(
":hlo_constant_folding",
":hlo_matchers",
":hlo_pass",
- "//tensorflow/compiler/xla:literal_util",
+ "//tensorflow/compiler/xla:literal",
"//tensorflow/compiler/xla:shape_util",
"//tensorflow/compiler/xla:test",
"//tensorflow/compiler/xla:types",
@@ -2361,6 +2379,20 @@ cc_library(
)
cc_library(
+ name = "hlo_domain_verifier",
+ srcs = ["hlo_domain_verifier.cc"],
+ hdrs = ["hlo_domain_verifier.h"],
+ deps = [
+ ":hlo",
+ ":hlo_domain_map",
+ ":hlo_graph_dumper",
+ ":hlo_pass",
+ "//tensorflow/compiler/xla:types",
+ "//tensorflow/core:lib",
+ ],
+)
+
+cc_library(
name = "hlo_domain_isolator",
srcs = ["hlo_domain_isolator.cc"],
hdrs = ["hlo_domain_isolator.h"],
@@ -2379,8 +2411,8 @@ cc_library(
hdrs = ["hlo_domain_remover.h"],
deps = [
":hlo",
- ":hlo_domain_isolator",
":hlo_domain_map",
+ ":hlo_domain_verifier",
":hlo_graph_dumper",
":hlo_pass",
"//tensorflow/compiler/xla:types",
@@ -2415,7 +2447,7 @@ cc_library(
":hlo_evaluator",
":hlo_pass",
":hlo_query",
- "//tensorflow/compiler/xla:literal_util",
+ "//tensorflow/compiler/xla:literal",
"//tensorflow/compiler/xla:shape_util",
"//tensorflow/compiler/xla:types",
"//tensorflow/core:lib",
@@ -2550,7 +2582,7 @@ cc_library(
hdrs = ["hlo_tfgraph_builder.h"],
deps = [
":hlo",
- "//tensorflow/compiler/xla:literal_util",
+ "//tensorflow/compiler/xla:literal",
"//tensorflow/compiler/xla:shape_util",
"//tensorflow/compiler/xla:xla_proto",
"//tensorflow/core:framework",
@@ -2581,7 +2613,7 @@ cc_library(
":hlo_casting_utils",
":hlo_execution_profile",
":hlo_tfgraph_builder",
- "//tensorflow/compiler/xla:literal_util",
+ "//tensorflow/compiler/xla:literal",
"//tensorflow/compiler/xla:shape_util",
"//tensorflow/compiler/xla:types",
"//tensorflow/compiler/xla:window_util",
@@ -2599,6 +2631,7 @@ tf_cc_test(
deps = [
":hlo",
":hlo_graph_dumper",
+ "//tensorflow/compiler/xla:literal_util",
"//tensorflow/compiler/xla:test",
"//tensorflow/compiler/xla:xla_proto",
"//tensorflow/compiler/xla/tests:test_utils",
@@ -2630,7 +2663,7 @@ tf_cc_test(
":hlo_matchers",
":shape_inference",
":transpose_folding",
- "//tensorflow/compiler/xla:literal_util",
+ "//tensorflow/compiler/xla:literal",
"//tensorflow/compiler/xla:shape_util",
"//tensorflow/compiler/xla:test",
"//tensorflow/compiler/xla:test_helpers",
@@ -2651,7 +2684,7 @@ cc_library(
deps = [
":hlo",
":hlo_pass",
- "//tensorflow/compiler/xla:literal_util",
+ "//tensorflow/compiler/xla:literal",
"//tensorflow/compiler/xla:shape_util",
"//tensorflow/compiler/xla:status_macros",
"//tensorflow/compiler/xla:util",
@@ -2666,7 +2699,7 @@ tf_cc_test(
":hlo",
":shape_inference",
":zero_sized_hlo_elimination",
- "//tensorflow/compiler/xla:literal_util",
+ "//tensorflow/compiler/xla:literal",
"//tensorflow/compiler/xla:shape_util",
"//tensorflow/compiler/xla:status_macros",
"//tensorflow/compiler/xla:test",
@@ -2826,6 +2859,7 @@ cc_library(
":hlo",
":hlo_creation_utils",
":tuple_util",
+ "//tensorflow/compiler/xla:literal_util",
"//tensorflow/core:lib",
],
)
@@ -2961,6 +2995,7 @@ cc_library(
":hlo",
":hlo_lexer",
":hlo_sharding_metadata",
+ "//tensorflow/compiler/xla:literal",
"//tensorflow/compiler/xla:literal_util",
"//tensorflow/compiler/xla:shape_util",
"//tensorflow/compiler/xla:statusor",
diff --git a/tensorflow/compiler/xla/service/algebraic_simplifier.cc b/tensorflow/compiler/xla/service/algebraic_simplifier.cc
index d8a9aba834..2205a7ec18 100644
--- a/tensorflow/compiler/xla/service/algebraic_simplifier.cc
+++ b/tensorflow/compiler/xla/service/algebraic_simplifier.cc
@@ -23,6 +23,7 @@ limitations under the License.
#include <vector>
#include "tensorflow/compiler/xla/layout_util.h"
+#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/compiler/xla/literal_util.h"
#include "tensorflow/compiler/xla/service/dfs_hlo_visitor_with_default.h"
#include "tensorflow/compiler/xla/service/hlo_computation.h"
@@ -50,20 +51,15 @@ namespace {
namespace m = match;
-// Returns whether operand is a literal with the given value.
-bool IsLiteralWithValue(const HloInstruction* operand, int8 value) {
- return operand->opcode() == HloOpcode::kConstant &&
- operand->literal().IsAll(value);
-}
-
bool IsAll(const HloInstruction* op, int8 value) {
- if (IsLiteralWithValue(op, value)) {
- return true;
- }
- if (op->opcode() == HloOpcode::kBroadcast && IsAll(op->operand(0), value)) {
- return true;
+ switch (op->opcode()) {
+ case HloOpcode::kBroadcast:
+ return IsAll(op->operand(0), value);
+ case HloOpcode::kConstant:
+ return op->literal().IsAll(value);
+ default:
+ return false;
}
- return false;
}
// Returns whether the given transpose produces a result which is bit-wise
@@ -160,9 +156,6 @@ class AlgebraicSimplifierVisitor : public DfsHloVisitorWithDefault {
Status HandleMap(HloInstruction* map) override;
- Status HandleMaximum(HloInstruction* maximum) override;
- Status HandleMinimum(HloInstruction* minimum) override;
-
// Returns whether algebraic simplification has occurred.
const bool changed() const { return changed_; }
@@ -201,8 +194,9 @@ class AlgebraicSimplifierVisitor : public DfsHloVisitorWithDefault {
// Helper method to perform and add reduction in a single dimension.
HloInstruction* AddReduce(HloInstruction* hlo, int64 dim) {
- HloInstruction* zero = computation_->AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0(0.0f)));
+ HloInstruction* zero =
+ computation_->AddInstruction(HloInstruction::CreateConstant(
+ LiteralUtil::Zero(hlo->shape().element_type()).CloneToUnique()));
HloComputation* AddReduce_computation = GetOrCreateScalarAddComputation();
Shape shape = ShapeUtil::DeleteDimension(dim, hlo->shape());
return computation_->AddInstruction(HloInstruction::CreateReduce(
@@ -537,11 +531,15 @@ Status AlgebraicSimplifierVisitor::HandleConstant(HloInstruction* constant) {
constant, BuildTupleConstant(computation_, constant->literal()));
}
+ if (constant->shape().element_type() == TOKEN) {
+ return Status::OK();
+ }
+
// If a literal is all the same element replace it with a scalar broadcast.
if (ShapeUtil::ElementsIn(constant->shape()) > 1 &&
constant->literal().IsAllFirst()) {
- std::unique_ptr<Literal> unique_scalar =
- MakeUnique<Literal>(constant->literal().GetFirstScalarLiteral());
+ std::unique_ptr<Literal> unique_scalar = MakeUnique<Literal>(
+ LiteralUtil::GetFirstScalarLiteral(constant->literal()));
HloInstruction* scalar = computation_->AddInstruction(
HloInstruction::CreateConstant(std::move(unique_scalar)));
return ReplaceWithNewInstruction(
@@ -572,6 +570,14 @@ Status AlgebraicSimplifierVisitor::HandleSubtract(HloInstruction* sub) {
return Status::OK();
}
+namespace {
+template <typename T>
+Status InvertConstant(const HloInstruction& constant, Literal* result) {
+ return result->Populate<T>([&](tensorflow::gtl::ArraySlice<int64> indices) {
+ return T{1.0} / constant.literal().Get<T>(indices);
+ });
+}
+} // namespace
Status AlgebraicSimplifierVisitor::HandleDivide(HloInstruction* divide) {
Shape* shape;
@@ -633,14 +639,31 @@ Status AlgebraicSimplifierVisitor::HandleDivide(HloInstruction* divide) {
// (Backends can do this transformation, but generally only if the constant is
// a scalar.)
if (Match(divide, m::Divide(m::NonConstant(&a), m::Constant(&b)))) {
- HloInstruction* one =
- computation_->AddInstruction(HloInstruction::CreateConstant(
- Literal::One(a->shape().element_type()).CloneToUnique()));
- HloInstruction* inverse = computation_->AddInstruction(
- HloInstruction::CreateBinary(b->shape(), HloOpcode::kDivide, one, b));
- return ReplaceWithNewInstruction(
- divide, HloInstruction::CreateBinary(divide->shape(),
- HloOpcode::kMultiply, a, inverse));
+ Literal new_literal(b->shape());
+ switch (b->shape().element_type()) {
+ case F16:
+ TF_RETURN_IF_ERROR(InvertConstant<half>(*b, &new_literal));
+ break;
+ case F32:
+ TF_RETURN_IF_ERROR(InvertConstant<float>(*b, &new_literal));
+ break;
+ case BF16:
+ TF_RETURN_IF_ERROR(InvertConstant<bfloat16>(*b, &new_literal));
+ break;
+ case F64:
+ TF_RETURN_IF_ERROR(InvertConstant<double>(*b, &new_literal));
+ break;
+ case C64:
+ TF_RETURN_IF_ERROR(InvertConstant<complex64>(*b, &new_literal));
+ break;
+ default:
+ return Status::OK();
+ }
+ auto inverse = computation_->AddInstruction(
+ HloInstruction::CreateConstant((new_literal.CloneToUnique())));
+ TF_ASSIGN_OR_RETURN(auto new_divide,
+ MakeBinaryHlo(HloOpcode::kMultiply, a, inverse));
+ return ReplaceInstruction(divide, new_divide);
}
// (A / B) / (C / D) => (A / B)*(D / C) => (A * D) / (B * C)
@@ -660,18 +683,18 @@ Status AlgebraicSimplifierVisitor::HandleDivide(HloInstruction* divide) {
if (Match(divide, m::Divide(m::Divide(m::Op(&a), m::Op(&b)), m::Op(&c)))) {
TF_ASSIGN_OR_RETURN(auto b_times_c,
MakeBinaryHlo(HloOpcode::kMultiply, b, c));
- return ReplaceWithNewInstruction(
- divide, HloInstruction::CreateBinary(divide->shape(),
- HloOpcode::kDivide, a, b_times_c));
+ TF_ASSIGN_OR_RETURN(auto new_divide,
+ MakeBinaryHlo(HloOpcode::kDivide, a, b_times_c));
+ return ReplaceInstruction(divide, new_divide);
}
// A / (B / C) => (A*C) / B
if (Match(divide, m::Divide(m::Op(&a), m::Divide(m::Op(&b), m::Op(&c))))) {
TF_ASSIGN_OR_RETURN(auto a_times_c,
MakeBinaryHlo(HloOpcode::kMultiply, a, c));
- return ReplaceWithNewInstruction(
- divide, HloInstruction::CreateBinary(divide->shape(),
- HloOpcode::kDivide, a_times_c, b));
+ TF_ASSIGN_OR_RETURN(auto new_divide,
+ MakeBinaryHlo(HloOpcode::kDivide, a_times_c, b));
+ return ReplaceInstruction(divide, new_divide);
}
return Status::OK();
@@ -1071,7 +1094,7 @@ Status AlgebraicSimplifierVisitor::HandleDot(HloInstruction* dot) {
ShapeUtil::IsZeroElementArray(lhs->shape()) ||
ShapeUtil::IsZeroElementArray(rhs->shape())) {
auto zero = computation_->AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0(0.0f)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0(0.0f)));
return ReplaceWithNewInstruction(
dot, HloInstruction::CreateBroadcast(dot->shape(), zero, {}));
}
@@ -1133,6 +1156,19 @@ Status AlgebraicSimplifierVisitor::HandleMultiply(HloInstruction* multiply) {
return Status::OK();
}
+ // 0*A => 0. Only applies for integral types for correct NaN-handling.
+ if (IsAll(lhs, 0) &&
+ primitive_util::IsIntegralType(multiply->shape().element_type()) &&
+ ReplaceInstructionIfSameShape(multiply, lhs)) {
+ return Status::OK();
+ }
+ // A*0 => 0
+ if (IsAll(rhs, 0) &&
+ primitive_util::IsIntegralType(multiply->shape().element_type()) &&
+ ReplaceInstructionIfSameShape(multiply, rhs)) {
+ return Status::OK();
+ }
+
// exp(A) * exp(B) => exp(A+B)
if (Match(multiply, m::Multiply(m::Exp(m::Op(&lhs)), m::Exp(m::Op(&rhs))))) {
auto add = computation_->AddInstruction(HloInstruction::CreateBinary(
@@ -1230,9 +1266,10 @@ bool OutputIsPermutationOfOperandElements(HloInstruction* instruction,
switch (instruction->opcode()) {
case HloOpcode::kReshape:
case HloOpcode::kReverse:
- case HloOpcode::kSort:
case HloOpcode::kTranspose:
return true;
+ case HloOpcode::kSort:
+ return (!ShapeUtil::IsTuple(instruction->shape()));
default:
return false;
}
@@ -1496,7 +1533,7 @@ Status AlgebraicSimplifierVisitor::HandlePower(HloInstruction* power) {
CHECK(Match(power, m::Power(m::Op(&lhs), m::Op(&rhs))));
if (IsAll(rhs, 0)) {
auto one = HloInstruction::CreateConstant(
- Literal::One(power->shape().element_type()).CloneToUnique());
+ LiteralUtil::One(power->shape().element_type()).CloneToUnique());
std::unique_ptr<HloInstruction> ones;
if (ShapeUtil::IsScalar(power->shape())) {
ones = std::move(one);
@@ -1531,7 +1568,7 @@ Status AlgebraicSimplifierVisitor::HandlePower(HloInstruction* power) {
VLOG(10) << "trying transform [pow(A, -1) => 1/A]: " << power->ToString();
if (IsAll(rhs, -1)) {
auto* one = computation_->AddInstruction(HloInstruction::CreateConstant(
- Literal::One(rhs->shape().element_type()).CloneToUnique()));
+ LiteralUtil::One(rhs->shape().element_type()).CloneToUnique()));
// Explicitly broadcast scalar 1 to the output shape, to avoid implicit
// broadcast in divide HLO as we are trying to eliminate implicit
@@ -2074,10 +2111,9 @@ Status AlgebraicSimplifierVisitor::HandleConvolution(
convolution,
HloInstruction::CreateBroadcast(
convolution->shape(),
- computation_->AddInstruction(HloInstruction::CreateConvert(
- ShapeUtil::MakeShape(convolution->shape().element_type(), {}),
- computation_->AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0(0.0f))))),
+ computation_->AddInstruction(HloInstruction::CreateConstant(
+ LiteralUtil::Zero(convolution->shape().element_type())
+ .CloneToUnique())),
{}));
}
const auto& window = convolution->window();
@@ -2249,68 +2285,6 @@ Status AlgebraicSimplifierVisitor::HandleMap(HloInstruction* map) {
return ReplaceWithNewInstruction(map, std::move(clone));
}
-Status AlgebraicSimplifierVisitor::HandleMaximum(HloInstruction* maximum) {
- // Match the following tree:
- // min_operand operand
- // \ /
- // max_operand min
- // \ /
- // max
- // where max_operand and min_operand are scalar constants.
- {
- HloInstruction* min;
- HloInstruction* max_operand;
- HloInstruction* min_operand;
- HloInstruction* operand;
-
- if (hlo_query::MatchBinaryInstructionOperandOpcode(
- HloOpcode::kMinimum, maximum,
- /*matching_operand=*/&min,
- /*other_operand=*/&max_operand) &&
- hlo_query::MatchBinaryInstructionOperand(
- hlo_query::IsScalarConstant, min,
- /*matching_operand=*/&min_operand,
- /*other_operand=*/&operand) &&
- TransformToClampIfSameShape(maximum, min, min_operand, operand, maximum,
- max_operand)) {
- return Status::OK();
- }
- }
-
- return Status::OK();
-}
-
-Status AlgebraicSimplifierVisitor::HandleMinimum(HloInstruction* minimum) {
- // Match the following tree:
- // max_operand operand
- // \ /
- // min_operand max
- // \ /
- // min
- // where max_operand and min_operand are scalar constants.
- {
- HloInstruction* max;
- HloInstruction* max_operand;
- HloInstruction* min_operand;
- HloInstruction* operand;
-
- if (hlo_query::MatchBinaryInstructionOperandOpcode(
- HloOpcode::kMaximum, minimum,
- /*matching_operand=*/&max,
- /*other_operand=*/&min_operand) &&
- hlo_query::MatchBinaryInstructionOperand(
- hlo_query::IsScalarConstant, max,
- /*matching_operand=*/&max_operand,
- /*other_operand=*/&operand) &&
- TransformToClampIfSameShape(minimum, minimum, min_operand, operand, max,
- max_operand)) {
- return Status::OK();
- }
- }
-
- return Status::OK();
-}
-
StatusOr<bool> AlgebraicSimplifier::Run(HloModule* module) {
XLA_VLOG_LINES(2,
"AlgebraicSimplifier::Run(), before:\n" + module->ToString());
diff --git a/tensorflow/compiler/xla/service/algebraic_simplifier_test.cc b/tensorflow/compiler/xla/service/algebraic_simplifier_test.cc
index 49cc0b808b..3f0f2afadd 100644
--- a/tensorflow/compiler/xla/service/algebraic_simplifier_test.cc
+++ b/tensorflow/compiler/xla/service/algebraic_simplifier_test.cc
@@ -19,7 +19,7 @@ limitations under the License.
#include <utility>
#include "tensorflow/compiler/xla/layout_util.h"
-#include "tensorflow/compiler/xla/literal_util.h"
+#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/compiler/xla/ptr_util.h"
#include "tensorflow/compiler/xla/service/hlo_computation.h"
#include "tensorflow/compiler/xla/service/hlo_instruction.h"
@@ -60,7 +60,7 @@ TEST_F(AlgebraicSimplifierTest, AddZero) {
HloInstruction* param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, r0f32, "param0"));
HloInstruction* zero = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(0.0f)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(0.0f)));
builder.AddInstruction(
HloInstruction::CreateBinary(r0f32, HloOpcode::kAdd, param0, zero));
@@ -74,12 +74,32 @@ TEST_F(AlgebraicSimplifierTest, AddZero) {
EXPECT_EQ(root, param0);
}
+// Test that A * 0 is simplified to 0
+TEST_F(AlgebraicSimplifierTest, MulZero) {
+ Shape r0s32 = ShapeUtil::MakeShape(S32, {});
+ HloComputation::Builder builder(TestName());
+ HloInstruction* param0 = builder.AddInstruction(
+ HloInstruction::CreateParameter(0, r0s32, "param0"));
+ HloInstruction* zero = builder.AddInstruction(
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32>(0)));
+ builder.AddInstruction(
+ HloInstruction::CreateBinary(r0s32, HloOpcode::kMultiply, param0, zero));
+
+ auto computation = module().AddEntryComputation(builder.Build());
+ HloInstruction* root = computation->root_instruction();
+ EXPECT_EQ(root->opcode(), HloOpcode::kMultiply);
+ AlgebraicSimplifier simplifier(/*is_layout_sensitive=*/false,
+ non_bitcasting_callback());
+ ASSERT_TRUE(simplifier.Run(&module()).ValueOrDie());
+ EXPECT_EQ(computation->root_instruction(), zero);
+}
+
// Test that Reduce(Reduce(A)) -> Reduce(A)
TEST_F(AlgebraicSimplifierTest, TwoReducesToOne) {
HloComputation::Builder builder(TestName());
// Create add computation.
HloInstruction* zero = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(0.0f)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(0.0f)));
HloComputation* add_computation = nullptr;
{
HloComputation::Builder builder(TestName() + ".add");
@@ -119,7 +139,7 @@ TEST_F(AlgebraicSimplifierTest, AddConstOnLHS) {
HloInstruction* param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, r0f32, "param0"));
HloInstruction* constant = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0(42.0f)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0(42.0f)));
builder.AddInstruction(
HloInstruction::CreateBinary(r0f32, HloOpcode::kAdd, constant, param0));
@@ -140,9 +160,9 @@ TEST_F(AlgebraicSimplifierTest, AddReassociateMergeConstants) {
HloInstruction* param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, r0f32, "param0"));
HloInstruction* constant1 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0(42.0f)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0(42.0f)));
HloInstruction* constant2 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0(3.14159f)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0(3.14159f)));
HloInstruction* add1 = builder.AddInstruction(
HloInstruction::CreateBinary(r0f32, HloOpcode::kAdd, param0, constant1));
@@ -165,7 +185,7 @@ TEST_F(AlgebraicSimplifierTest, AddBroadcastZeroR0Operand) {
HloInstruction* param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, r2f32, "param0"));
HloInstruction* zero = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(0.0f)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(0.0f)));
HloInstruction* bcast = builder.AddInstruction(
HloInstruction::CreateBroadcast(r2f32, zero, {0, 1}));
builder.AddInstruction(
@@ -200,9 +220,12 @@ TEST_F(AlgebraicSimplifierTest, InlineTrivialMap) {
HloInstruction* param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, r2f32, "param0"));
HloInstruction* zero = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(0.0f)));
- builder.AddInstruction(
- HloInstruction::CreateMap(r2f32, {param0, zero}, add_computation));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(0.0f)));
+ builder.AddInstruction(HloInstruction::CreateMap(
+ r2f32,
+ {param0, builder.AddInstruction(
+ HloInstruction::CreateBroadcast(r2f32, zero, {}))},
+ add_computation));
auto computation = module().AddEntryComputation(builder.Build());
HloInstruction* root = computation->root_instruction();
@@ -211,7 +234,7 @@ TEST_F(AlgebraicSimplifierTest, InlineTrivialMap) {
non_bitcasting_callback());
ASSERT_TRUE(simplifier.Run(&module()).ValueOrDie());
root = computation->root_instruction();
- EXPECT_THAT(root, op::Add(param0, zero));
+ EXPECT_THAT(root, op::Add(param0, op::Broadcast(zero)));
}
TEST_F(AlgebraicSimplifierTest, AddBroadcastZeroR1Operand) {
@@ -220,7 +243,7 @@ TEST_F(AlgebraicSimplifierTest, AddBroadcastZeroR1Operand) {
HloInstruction* param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, r2f32, "param0"));
HloInstruction* zero = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR1<float>({0, 0, 0})));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR1<float>({0, 0, 0})));
HloInstruction* bcast =
builder.AddInstruction(HloInstruction::CreateBroadcast(r2f32, zero, {1}));
builder.AddInstruction(
@@ -239,7 +262,7 @@ TEST_F(AlgebraicSimplifierTest, AddBroadcastZeroR1Operand) {
TEST_F(AlgebraicSimplifierTest, ConstantToBroadcast) {
HloComputation::Builder builder(TestName());
builder.AddInstruction(HloInstruction::CreateConstant(
- Literal::CreateR1<float>({3.14f, 3.14f, 3.14f})));
+ LiteralUtil::CreateR1<float>({3.14f, 3.14f, 3.14f})));
auto computation = module().AddEntryComputation(builder.Build());
HloInstruction* root = computation->root_instruction();
@@ -255,7 +278,7 @@ TEST_F(AlgebraicSimplifierTest, ConstantToBroadcast) {
TEST_F(AlgebraicSimplifierTest, ConstantNotToBroadcast) {
HloComputation::Builder builder(TestName());
builder.AddInstruction(HloInstruction::CreateConstant(
- Literal::CreateR1<float>({3.14, 3.14, 4})));
+ LiteralUtil::CreateR1<float>({3.14, 3.14, 4})));
auto computation = module().AddEntryComputation(builder.Build());
HloInstruction* root = computation->root_instruction();
@@ -274,7 +297,7 @@ TEST_F(AlgebraicSimplifierTest, SubZero) {
HloInstruction* param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, r0f32, "param0"));
HloInstruction* zero = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(0.0f)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(0.0f)));
builder.AddInstruction(
HloInstruction::CreateBinary(r0f32, HloOpcode::kSubtract, param0, zero));
@@ -295,7 +318,7 @@ TEST_F(AlgebraicSimplifierTest, SubConstCanonicalization) {
HloInstruction* param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, r0f32, "param0"));
HloInstruction* constant = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(42.0f)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0f)));
builder.AddInstruction(HloInstruction::CreateBinary(
r0f32, HloOpcode::kSubtract, param0, constant));
@@ -367,17 +390,16 @@ TEST_F(AlgebraicSimplifierTest, RhsDivOfDiv) {
// Test that (A/B)/(C/D) is simplified to (A*D)/(B*C).
TEST_F(AlgebraicSimplifierTest, DivOfDivAndDiv) {
- Shape r0f32 = ShapeUtil::MakeShape(F32, {});
Shape r2f32 = ShapeUtil::MakeShape(F32, {42, 123});
HloComputation::Builder builder(TestName());
HloInstruction* param0 = builder.AddInstruction(
- HloInstruction::CreateParameter(0, r0f32, "param0"));
+ HloInstruction::CreateParameter(0, r2f32, "param0"));
HloInstruction* param1 = builder.AddInstruction(
HloInstruction::CreateParameter(1, r2f32, "param1"));
HloInstruction* param2 = builder.AddInstruction(
HloInstruction::CreateParameter(2, r2f32, "param2"));
HloInstruction* param3 = builder.AddInstruction(
- HloInstruction::CreateParameter(3, r0f32, "param3"));
+ HloInstruction::CreateParameter(3, r2f32, "param3"));
HloInstruction* div0 = builder.AddInstruction(
HloInstruction::CreateBinary(r2f32, HloOpcode::kDivide, param0, param1));
HloInstruction* div1 = builder.AddInstruction(
@@ -398,8 +420,6 @@ TEST_F(AlgebraicSimplifierTest, DivOfDivAndDiv) {
EXPECT_THAT(
computation->root_instruction(),
op::Divide(op::Multiply(param0, param3), op::Multiply(param1, param2)));
- EXPECT_TRUE(
- ShapeUtil::Compatible(computation->root_instruction()->shape(), r2f32));
}
// Test that A/exp(B) is simplified to A*exp(-B).
@@ -459,7 +479,6 @@ TEST_F(AlgebraicSimplifierTest, DivOfPower) {
// Test that broadcasting is done on the right step when simplifying A/pow(B,C)
// to A*pow(B,-C).
TEST_F(AlgebraicSimplifierTest, DivOfBroadcastingPower) {
- Shape r0f32 = ShapeUtil::MakeShape(F32, {});
Shape r1f32 = ShapeUtil::MakeShape(F32, {7});
HloComputation::Builder builder(TestName());
HloInstruction* param0 = builder.AddInstruction(
@@ -467,7 +486,7 @@ TEST_F(AlgebraicSimplifierTest, DivOfBroadcastingPower) {
HloInstruction* param1 = builder.AddInstruction(
HloInstruction::CreateParameter(1, r1f32, "param1"));
HloInstruction* param2 = builder.AddInstruction(
- HloInstruction::CreateParameter(2, r0f32, "param2"));
+ HloInstruction::CreateParameter(2, r1f32, "param2"));
HloInstruction* power = builder.AddInstruction(
HloInstruction::CreateBinary(r1f32, HloOpcode::kPower, param1, param2));
builder.AddInstruction(
@@ -484,14 +503,9 @@ TEST_F(AlgebraicSimplifierTest, DivOfBroadcastingPower) {
ASSERT_THAT(computation->root_instruction(),
op::Multiply(param0, op::Power(param1, op::Negate(param2))));
-
- const HloInstruction* negate =
- computation->root_instruction()->operand(1)->operand(1);
- const Shape& negate_shape = negate->shape();
- EXPECT_EQ(0, negate_shape.dimensions_size());
}
-// A / Const => A * (1 / Const)
+// A / Const => A * InvertedConst
TEST_F(AlgebraicSimplifierTest, DivideByConstant) {
Shape r1f32 = ShapeUtil::MakeShape(F32, {3});
HloComputation::Builder builder(TestName());
@@ -499,7 +513,7 @@ TEST_F(AlgebraicSimplifierTest, DivideByConstant) {
HloInstruction::CreateParameter(0, r1f32, "param0"));
HloInstruction* constant =
builder.AddInstruction(HloInstruction::CreateConstant(
- Literal::CreateR1<float>({0.f, 1.f, 2.f})));
+ LiteralUtil::CreateR1<float>({0.f, 1.f, 2.f})));
builder.AddInstruction(HloInstruction::CreateBinary(r1f32, HloOpcode::kDivide,
param0, constant));
@@ -510,20 +524,19 @@ TEST_F(AlgebraicSimplifierTest, DivideByConstant) {
ASSERT_TRUE(simplifier.Run(&module()).ValueOrDie());
EXPECT_THAT(computation->root_instruction(),
- op::Multiply(param0, op::Divide(op::Constant(), constant)));
+ op::Multiply(param0, op::Constant()));
}
// pow(pow(A, X), Y) => pow(A, X*Y)
TEST_F(AlgebraicSimplifierTest, PowerOfPower) {
- Shape r0f32 = ShapeUtil::MakeShape(F32, {});
Shape r1f32 = ShapeUtil::MakeShape(F32, {7});
HloComputation::Builder builder(TestName());
HloInstruction* base = builder.AddInstruction(
HloInstruction::CreateParameter(0, r1f32, "param0"));
HloInstruction* exp1 = builder.AddInstruction(
- HloInstruction::CreateParameter(1, r0f32, "param1"));
+ HloInstruction::CreateParameter(1, r1f32, "param1"));
HloInstruction* exp2 = builder.AddInstruction(
- HloInstruction::CreateParameter(2, r0f32, "param2"));
+ HloInstruction::CreateParameter(2, r1f32, "param2"));
HloInstruction* inner_power = builder.AddInstruction(
HloInstruction::CreateBinary(r1f32, HloOpcode::kPower, base, exp1));
builder.AddInstruction(HloInstruction::CreateBinary(r1f32, HloOpcode::kPower,
@@ -540,15 +553,14 @@ TEST_F(AlgebraicSimplifierTest, PowerOfPower) {
// Don't simplify pow(pow(A, X), Y) => pow(A, X*Y) if X and Y are complex
// numbers.
TEST_F(AlgebraicSimplifierTest, PowerOfPowerComplex) {
- Shape r0c64 = ShapeUtil::MakeShape(C64, {});
Shape r1c64 = ShapeUtil::MakeShape(C64, {7});
HloComputation::Builder builder(TestName());
HloInstruction* base = builder.AddInstruction(
HloInstruction::CreateParameter(0, r1c64, "param0"));
HloInstruction* exp1 = builder.AddInstruction(
- HloInstruction::CreateParameter(1, r0c64, "param1"));
+ HloInstruction::CreateParameter(1, r1c64, "param1"));
HloInstruction* exp2 = builder.AddInstruction(
- HloInstruction::CreateParameter(2, r0c64, "param2"));
+ HloInstruction::CreateParameter(2, r1c64, "param2"));
HloInstruction* inner_power = builder.AddInstruction(
HloInstruction::CreateBinary(r1c64, HloOpcode::kPower, base, exp1));
builder.AddInstruction(HloInstruction::CreateBinary(r1c64, HloOpcode::kPower,
@@ -567,7 +579,7 @@ TEST_F(AlgebraicSimplifierTest, DivOneScalar) {
HloInstruction* param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, r0f32, "param0"));
HloInstruction* one = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(1.0f)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0f)));
HloInstruction* div = builder.AddInstruction(
HloInstruction::CreateBinary(r0f32, HloOpcode::kDivide, param0, one));
@@ -588,7 +600,7 @@ TEST_F(AlgebraicSimplifierTest, DivOneArray) {
HloInstruction* param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, r2f32, "param0"));
HloInstruction* one = builder.AddInstruction(HloInstruction::CreateConstant(
- Literal::CreateR2<float>({{1.0, 1.0}, {1.0, 1.0}})));
+ LiteralUtil::CreateR2<float>({{1.0, 1.0}, {1.0, 1.0}})));
HloInstruction* div = builder.AddInstruction(
HloInstruction::CreateBinary(r2f32, HloOpcode::kDivide, param0, one));
@@ -868,7 +880,7 @@ TEST_F(AlgebraicSimplifierTest, Pow0Scalar) {
HloInstruction* param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, r0f32, "param0"));
HloInstruction* zero = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(0)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(0)));
builder.AddInstruction(
HloInstruction::CreateBinary(r0f32, HloOpcode::kPower, param0, zero));
@@ -892,7 +904,7 @@ TEST_F(AlgebraicSimplifierTest, Pow0Vector) {
HloInstruction* param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, r1f32, "param0"));
HloInstruction* zero = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(0)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(0)));
builder.AddInstruction(
HloInstruction::CreateBinary(r1f32, HloOpcode::kPower, param0, zero));
@@ -920,7 +932,7 @@ TEST_F(AlgebraicSimplifierTest, Pow1) {
HloInstruction* param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, r0f32, "param0"));
HloInstruction* one = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(1)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1)));
builder.AddInstruction(
HloInstruction::CreateBinary(r0f32, HloOpcode::kPower, param0, one));
@@ -942,7 +954,7 @@ TEST_F(AlgebraicSimplifierTest, Pow2) {
HloInstruction* param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, r0f32, "param0"));
HloInstruction* two = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(2)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2)));
builder.AddInstruction(
HloInstruction::CreateBinary(r0f32, HloOpcode::kPower, param0, two));
@@ -964,7 +976,7 @@ TEST_F(AlgebraicSimplifierTest, PowNegative1) {
HloInstruction* param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, r0f32, "param0"));
HloInstruction* negative_one = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(-1)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(-1)));
builder.AddInstruction(HloInstruction::CreateBinary(r0f32, HloOpcode::kPower,
param0, negative_one));
@@ -1055,7 +1067,7 @@ TEST_F(AlgebraicSimplifierTest, ZeroSizedReduceWindow) {
builder.AddInstruction(HloInstruction::CreateReduceWindow(
ShapeUtil::MakeShape(F32, {5, 2}), param,
builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(0.0f))),
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(0.0f))),
window, add_computation));
module().AddEntryComputation(builder.Build());
HloPassFix<AlgebraicSimplifier> simplifier(/*is_layout_sensitive=*/false,
@@ -1082,7 +1094,7 @@ TEST_F(AlgebraicSimplifierTest, ZeroSizedPad) {
builder.AddInstruction(HloInstruction::CreatePad(
ShapeUtil::MakeShape(F32, {5, 2}), param,
builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0(0.0f))),
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0(0.0f))),
padding));
module().AddEntryComputation(builder.Build());
EXPECT_THAT(module().entry_computation()->root_instruction(),
@@ -1124,7 +1136,7 @@ TEST_F(AlgebraicSimplifierTest, ReshapeBroadcast) {
TEST_F(AlgebraicSimplifierTest, ConvertBetweenSameType) {
HloComputation::Builder builder(TestName());
HloInstruction* input = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(42.0f)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0f)));
builder.AddInstruction(
HloInstruction::CreateConvert(ShapeUtil::MakeShape(F32, {}), input));
@@ -1216,7 +1228,7 @@ TEST_F(AlgebraicSimplifierTest, RemoveEmptyConcatenateOperands) {
HloInstruction* param1 = builder.AddInstruction(
HloInstruction::CreateParameter(1, r1f32, "param1"));
HloInstruction* empty_literal = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR1<float>({})));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR1<float>({})));
HloInstruction* empty_slice =
builder.AddInstruction(HloInstruction::CreateSlice(
ShapeUtil::MakeShape(F32, {0}), param1, {42}, {42}, {1}));
@@ -1246,7 +1258,7 @@ TEST_F(AlgebraicSimplifierTest, OnlyEmptyConcatenateOperands) {
HloInstruction* param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, r1f32, "param0"));
HloInstruction* empty_literal = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR1<float>({})));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR1<float>({})));
HloInstruction* empty_slice =
builder.AddInstruction(HloInstruction::CreateSlice(
ShapeUtil::MakeShape(F32, {0}), param0, {42}, {42}, {1}));
@@ -1416,33 +1428,6 @@ TEST_F(AlgebraicSimplifierTest, ReshapeReplacedWithBitcast) {
op::Tuple(op::Bitcast(), dimensions_wrong_reshape, layout_wrong_reshape));
}
-// Regression test for a bug in the reshape sinking transformation, where
-// moving a reshape to a scalar led to a crash.
-TEST_F(AlgebraicSimplifierTest, ReshapeToScalarNotHoistedAfterEffectiveUnary) {
- HloComputation::Builder builder(TestName());
- HloInstruction* param =
- builder.AddInstruction(HloInstruction::CreateParameter(
- 0, ShapeUtil::MakeShape(F32, {1, 1}), "param"));
- HloInstruction* reshape = builder.AddInstruction(
- HloInstruction::CreateReshape(ShapeUtil::MakeShape(F32, {}), param));
- HloInstruction* zero = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR1<float>({1., 2., 3.})));
- builder.AddInstruction(HloInstruction::CreateBinary(
- ShapeUtil::MakeShape(F32, {3}), HloOpcode::kMaximum, reshape, zero));
- auto computation = module().AddEntryComputation(builder.Build());
-
- EXPECT_THAT(computation->root_instruction(),
- op::Maximum(op::Reshape(param), zero));
-
- AlgebraicSimplifier simplifier(/*is_layout_sensitive=*/false,
- bitcasting_callback());
-
- simplifier.Run(&module()).ValueOrDie();
-
- EXPECT_THAT(computation->root_instruction(),
- op::Maximum(op::Reshape(param), zero));
-}
-
// Regression test for a bug where if we failed to sink a reshape, we'd set the
// 'changed' bit in AlgebraicSimplifier to false.
TEST_F(AlgebraicSimplifierTest, FailureToSinkReshapeDoesntAffectChangedBit) {
@@ -1455,7 +1440,7 @@ TEST_F(AlgebraicSimplifierTest, FailureToSinkReshapeDoesntAffectChangedBit) {
builder.AddInstruction(
HloInstruction::CreateParameter(0, shape, "param0")),
builder.AddInstruction(HloInstruction::CreateConstant(
- Literal::CreateR2<float>({{0, 0}, {0, 0}})))));
+ LiteralUtil::CreateR2<float>({{0, 0}, {0, 0}})))));
builder.AddInstruction(
HloInstruction::CreateReshape(ShapeUtil::MakeShape(F32, {4}), add));
@@ -1478,7 +1463,7 @@ TEST_F(AlgebraicSimplifierTest, FailureToSinkBroadcastDoesntAffectChangedBit) {
builder.AddInstruction(
HloInstruction::CreateParameter(0, shape, "param0")),
builder.AddInstruction(HloInstruction::CreateConstant(
- Literal::CreateR2<float>({{0, 0}, {0, 0}})))));
+ LiteralUtil::CreateR2<float>({{0, 0}, {0, 0}})))));
builder.AddInstruction(
HloInstruction::CreateBroadcast(ShapeUtil::MakeShape(F32, {2, 2, 2}), add,
@@ -1761,7 +1746,7 @@ TEST_F(AlgebraicSimplifierTest, RemoveNoopPad) {
builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(F32, {2, 2}), "param"));
HloInstruction* zero = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(0.0f)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(0.0f)));
PaddingConfig no_padding;
for (int i = 0; i < 2; ++i) {
auto dimension = no_padding.add_dimensions();
@@ -1792,7 +1777,7 @@ TEST_F(AlgebraicSimplifierTest, NegativePadding) {
builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(F32, {10, 10}), "param"));
HloInstruction* zero = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(0.0f)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(0.0f)));
PaddingConfig padding;
int64 low_padding[2] = {-1, -2};
int64 high_padding[2] = {2, -3};
@@ -2103,160 +2088,6 @@ TEST_F(AlgebraicSimplifierTest, ConvertConvToMatmul) {
EXPECT_EQ("NO_CHANGE", build_and_simplify());
}
-// Test that max(min(A, x), y) is transformed to clamp(y, A, x)
-TEST_F(AlgebraicSimplifierTest, MaxMinToClamp) {
- Shape r0f32 = ShapeUtil::MakeShape(F32, {});
- HloComputation::Builder builder(TestName());
- HloInstruction* param0 = builder.AddInstruction(
- HloInstruction::CreateParameter(0, r0f32, "param0"));
- HloInstruction* min_value = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(0.0f)));
- HloInstruction* max_value = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(1.0f)));
- HloInstruction* min = builder.AddInstruction(HloInstruction::CreateBinary(
- r0f32, HloOpcode::kMinimum, param0, min_value));
- builder.AddInstruction(
- HloInstruction::CreateBinary(r0f32, HloOpcode::kMaximum, min, max_value));
-
- auto module = CreateNewModule();
- auto computation = module->AddEntryComputation(builder.Build());
-
- EXPECT_THAT(computation->root_instruction(),
- op::Maximum(op::Minimum(param0, min_value), max_value));
-
- AlgebraicSimplifier simplifier(/*is_layout_sensitive=*/false,
- non_bitcasting_callback());
- ASSERT_TRUE(simplifier.Run(module).ValueOrDie());
-
- EXPECT_THAT(computation->root_instruction(),
- op::Clamp(max_value, param0, min_value));
-}
-
-// Test that min(max(A, x), y) is transformed to clamp(x, A, y) for scalar
-// values.
-TEST_F(AlgebraicSimplifierTest, MinMaxToClamp) {
- Shape r0f32 = ShapeUtil::MakeShape(F32, {});
- HloComputation::Builder builder(TestName());
- HloInstruction* param0 = builder.AddInstruction(
- HloInstruction::CreateParameter(0, r0f32, "param0"));
- HloInstruction* min_value = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(0.0f)));
- HloInstruction* max_value = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(1.0f)));
- HloInstruction* max = builder.AddInstruction(HloInstruction::CreateBinary(
- r0f32, HloOpcode::kMaximum, param0, max_value));
- builder.AddInstruction(
- HloInstruction::CreateBinary(r0f32, HloOpcode::kMinimum, max, min_value));
-
- auto module = CreateNewModule();
- auto computation = module->AddEntryComputation(builder.Build());
-
- EXPECT_THAT(computation->root_instruction(),
- op::Minimum(op::Maximum(param0, max_value), min_value));
-
- AlgebraicSimplifier simplifier(/*is_layout_sensitive=*/false,
- non_bitcasting_callback());
- ASSERT_TRUE(simplifier.Run(module).ValueOrDie());
-
- EXPECT_THAT(computation->root_instruction(),
- op::Clamp(max_value, param0, min_value));
-}
-
-// Test that min(max(A, x), y) is transformed to clamp(x, A, y) for
-// broadcasted scalar values.
-TEST_F(AlgebraicSimplifierTest, MinMaxWithBroadcastToClamp) {
- Shape r0f32 = ShapeUtil::MakeShape(F32, {});
- Shape r1f32 = ShapeUtil::MakeShape(F32, {100});
- HloComputation::Builder builder(TestName());
- HloInstruction* param0 = builder.AddInstruction(
- HloInstruction::CreateParameter(0, r1f32, "param0"));
- HloInstruction* min_value = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(0.0f)));
- HloInstruction* max_value = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(1.0f)));
- HloInstruction* max = builder.AddInstruction(HloInstruction::CreateBinary(
- r1f32, HloOpcode::kMaximum, param0, max_value));
- builder.AddInstruction(
- HloInstruction::CreateBinary(r1f32, HloOpcode::kMinimum, max, min_value));
-
- auto module = CreateNewModule();
- auto computation = module->AddEntryComputation(builder.Build());
-
- EXPECT_THAT(computation->root_instruction(),
- op::Minimum(op::Maximum(param0, max_value), min_value));
-
- AlgebraicSimplifier simplifier(/*is_layout_sensitive=*/false,
- non_bitcasting_callback());
- ASSERT_TRUE(simplifier.Run(module).ValueOrDie());
-
- EXPECT_THAT(computation->root_instruction(),
- op::Clamp(max_value, param0, min_value));
-}
-
-// Test that min(max(A, non-constant1), non-constant2) is not canonicalized to
-// clamp(non-constant1, A, non-constant2)
-TEST_F(AlgebraicSimplifierTest, MinMaxNotToClamp) {
- Shape r0f32 = ShapeUtil::MakeShape(F32, {});
- HloComputation::Builder builder(TestName());
- HloInstruction* param0 = builder.AddInstruction(
- HloInstruction::CreateParameter(0, r0f32, "param0"));
- HloInstruction* min_value = builder.AddInstruction(
- HloInstruction::CreateParameter(1, r0f32, "param1"));
- HloInstruction* max_value = builder.AddInstruction(
- HloInstruction::CreateParameter(2, r0f32, "param2"));
- HloInstruction* max = builder.AddInstruction(HloInstruction::CreateBinary(
- r0f32, HloOpcode::kMaximum, param0, max_value));
- builder.AddInstruction(
- HloInstruction::CreateBinary(r0f32, HloOpcode::kMinimum, max, min_value));
-
- auto module = CreateNewModule();
- auto computation = module->AddEntryComputation(builder.Build());
-
- EXPECT_THAT(computation->root_instruction(),
- op::Minimum(op::Maximum(param0, max_value), min_value));
-
- AlgebraicSimplifier simplifier(/*is_layout_sensitive=*/false,
- non_bitcasting_callback());
- EXPECT_FALSE(simplifier.Run(module).ValueOrDie());
-
- EXPECT_THAT(computation->root_instruction(),
- op::Minimum(op::Maximum(param0, max_value), min_value));
-}
-
-// Test that min(f(max(A, constant1)), constant2) is not transformed to
-// clamp(constant1, A, constant2)
-TEST_F(AlgebraicSimplifierTest, MinEquationWithMaxNotToClamp) {
- Shape r0f32 = ShapeUtil::MakeShape(F32, {});
- HloComputation::Builder builder(TestName());
- HloInstruction* param0 = builder.AddInstruction(
- HloInstruction::CreateParameter(0, r0f32, "param0"));
- HloInstruction* min_value = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(0.0f)));
- HloInstruction* max_value = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(1.0f)));
- HloInstruction* max = builder.AddInstruction(HloInstruction::CreateBinary(
- r0f32, HloOpcode::kMaximum, param0, max_value));
- HloInstruction* fmax = builder.AddInstruction(
- HloInstruction::CreateBinary(r0f32, HloOpcode::kAdd, max, max_value));
- builder.AddInstruction(HloInstruction::CreateBinary(
- r0f32, HloOpcode::kMinimum, fmax, min_value));
-
- auto module = CreateNewModule();
- auto computation = module->AddEntryComputation(builder.Build());
-
- EXPECT_THAT(computation->root_instruction(),
- op::Minimum(op::Add(op::Maximum(param0, max_value), max_value),
- min_value));
-
- AlgebraicSimplifier simplifier(/*is_layout_sensitive=*/false,
- non_bitcasting_callback());
- EXPECT_FALSE(simplifier.Run(module).ValueOrDie());
-
- EXPECT_THAT(computation->root_instruction(),
- op::Minimum(op::Add(op::Maximum(param0, max_value), max_value),
- min_value));
-}
-
// Test that slice(broadcast(/*scalar value*/)) simplifies to a single
// broadcast.
TEST_F(AlgebraicSimplifierTest, ScalarBroadcastToSlice) {
@@ -2298,7 +2129,7 @@ TEST_F(AlgebraicSimplifierTest, ScalarBroadcastToSlice) {
TEST_F(AlgebraicSimplifierTest, ScalarBroadcastToTransposeReshape) {
HloComputation::Builder builder(TestName());
HloInstruction* forty_two = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(42.0f)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0f)));
Shape broadcast_shape = ShapeUtil::MakeShape(F32, {4, 5, 6});
HloInstruction* broadcast = builder.AddInstruction(
@@ -2345,7 +2176,7 @@ TEST_F(AlgebraicSimplifierTest, FoldPadIntoReduceWindow) {
padding.mutable_dimensions(3)->set_edge_padding_high(2);
HloInstruction* pad_value = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(5.0f)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(5.0f)));
HloInstruction* pad = builder.AddInstruction(HloInstruction::CreatePad(
ShapeUtil::MakeShape(F32, {1, 3, 3, 5}), operand, pad_value, padding));
@@ -2376,7 +2207,7 @@ TEST_F(AlgebraicSimplifierTest, FoldPadIntoReduceWindow) {
const Shape reduce_window_shape =
ShapeUtil::MakeShape(F32, {111, 113, 113, 115});
HloInstruction* reduce_init_value = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(5.0f)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(5.0f)));
HloInstruction* reduce_window =
builder.AddInstruction(HloInstruction::CreateReduceWindow(
reduce_window_shape, pad, reduce_init_value, window,
@@ -2427,7 +2258,7 @@ TEST_F(AlgebraicSimplifierTest, FoldConvertedPadIntoReduceWindow) {
padding.mutable_dimensions(3)->set_edge_padding_high(2);
HloInstruction* pad_value = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(5.0f)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(5.0f)));
HloInstruction* pad = builder.AddInstruction(HloInstruction::CreatePad(
ShapeUtil::MakeShape(BF16, {1, 3, 3, 5}), parameter, pad_value, padding));
@@ -2462,7 +2293,7 @@ TEST_F(AlgebraicSimplifierTest, FoldConvertedPadIntoReduceWindow) {
const Shape reduce_window_shape =
ShapeUtil::MakeShape(F32, {111, 113, 113, 115});
HloInstruction* reduce_init_value = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(5.0f)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(5.0f)));
HloInstruction* reduce_window =
builder.AddInstruction(HloInstruction::CreateReduceWindow(
reduce_window_shape, convert, reduce_init_value, window,
@@ -2533,9 +2364,9 @@ TEST_F(AlgebraicSimplifierTest, IteratorInvalidation) {
HloComputation::Builder call_builder(TestName() + ".Call");
HloInstruction* zero = call_builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR1<float>({0.0f})));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR1<float>({0.0f})));
HloInstruction* one = call_builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR1<float>({1.0f})));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR1<float>({1.0f})));
call_builder.AddInstruction(
HloInstruction::CreateCall(r1f32, {zero, one}, dot_computation.get()));
@@ -2551,9 +2382,9 @@ TEST_F(AlgebraicSimplifierTest, ConstantTupleBecomesTupleOfConstants) {
HloComputation::Builder builder(TestName());
const float constant_scalar = 7.3f;
std::initializer_list<float> constant_vector = {1.1f, 2.0f, 3.3f};
- std::unique_ptr<Literal> value =
- Literal::MakeTuple({Literal::CreateR0<float>(constant_scalar).get(),
- Literal::CreateR1<float>(constant_vector).get()});
+ std::unique_ptr<Literal> value = LiteralUtil::MakeTuple(
+ {LiteralUtil::CreateR0<float>(constant_scalar).get(),
+ LiteralUtil::CreateR1<float>(constant_vector).get()});
builder.AddInstruction(HloInstruction::CreateConstant(std::move(value)));
auto computation = module().AddEntryComputation(builder.Build());
@@ -2576,8 +2407,8 @@ TEST_F(AlgebraicSimplifierTest, TrivialDynamicSlice) {
shape,
builder.AddInstruction(
HloInstruction::CreateParameter(0, shape, "slice_from")),
- builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR1<int>({0, 0, 0}))),
+ builder.AddInstruction(HloInstruction::CreateConstant(
+ LiteralUtil::CreateR1<int>({0, 0, 0}))),
/*slice_sizes=*/{10, 100, 1000}));
auto computation = module().AddEntryComputation(builder.Build());
@@ -2610,8 +2441,8 @@ TEST_F(AlgebraicSimplifierTest, TrivialDynamicUpdateSlice) {
builder.AddInstruction(
HloInstruction::CreateParameter(2, slice_shape, "to_update")),
slice,
- builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR1<int>({0, 0, 0})))));
+ builder.AddInstruction(HloInstruction::CreateConstant(
+ LiteralUtil::CreateR1<int>({0, 0, 0})))));
auto computation = module().AddEntryComputation(builder.Build());
AlgebraicSimplifier simplifier(/*is_layout_sensitive=*/false,
@@ -2626,7 +2457,7 @@ TEST_F(AlgebraicSimplifierTest, MergeBroadcasts) {
HloComputation::Builder builder(TestName());
Shape r2f32 = ShapeUtil::MakeShape(F32, {2, 2});
HloInstruction* input_array = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR1<float>({3, 4})));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR1<float>({3, 4})));
HloInstruction* inner_bcast = builder.AddInstruction(
HloInstruction::CreateBroadcast(r2f32, input_array, {1}));
Shape r3f32 = ShapeUtil::MakeShape(F32, {2, 2, 2});
@@ -2735,7 +2566,7 @@ TEST_P(PadReduceWindowEffectiveBroadcastTest, DoIt) {
HloInstruction* pad = builder.AddInstruction(HloInstruction::CreatePad(
pad_shape, input,
builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0(0.0f))),
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0(0.0f))),
padding));
HloComputation* add_computation = nullptr;
@@ -2754,7 +2585,7 @@ TEST_P(PadReduceWindowEffectiveBroadcastTest, DoIt) {
Window window = window_util::MakeWindow(
decorate_spatials(param.reduce_window_spatials, 1, 1));
auto zero = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(0.0f)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(0.0f)));
TF_ASSERT_OK_AND_ASSIGN(const Shape output_shape,
ShapeInference::InferReduceWindowShape(
pad->shape(), zero->shape(), window,
@@ -2893,7 +2724,7 @@ TEST_P(DotOfConcatSimplificationTest, ConstantLHS) {
Shape lhs_shape = ShapeUtil::MakeShape(F32, {spec.m, spec.k});
auto* lhs = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR2F32Linspace(
+ HloInstruction::CreateConstant(LiteralUtil::CreateR2F32Linspace(
/*from=*/10.0, /*to=*/10000.0, /*rows=*/spec.m, /*cols=*/spec.k)));
Shape rhs0_shape = ShapeUtil::MakeShape(F32, {k0, spec.n});
@@ -2972,7 +2803,7 @@ TEST_P(DotOfConcatSimplificationTest, ConstantRHS) {
Shape rhs_shape = ShapeUtil::MakeShape(F32, {spec.k, spec.n});
auto* rhs = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR2F32Linspace(
+ HloInstruction::CreateConstant(LiteralUtil::CreateR2F32Linspace(
/*from=*/10.0, /*to=*/10000.0, /*rows=*/spec.k, /*cols=*/spec.n)));
DotDimensionNumbers dot_dnums;
@@ -3019,7 +2850,7 @@ TEST_F(AlgebraicSimplifierTest, DynamicUpdateSliceZeroUpdate) {
HloInstruction* const update = builder.AddInstruction(
HloInstruction::CreateParameter(1, update_shape, "update"));
HloInstruction* const start_indices = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR1<int>({0})));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR1<int>({0})));
builder.AddInstruction(HloInstruction::CreateDynamicUpdateSlice(
dslice_shape, operand, update, start_indices));
const HloComputation* const computation =
@@ -3068,7 +2899,7 @@ TEST_P(DotOfGatherSimplificationTest, ConstantRHS) {
int64 lhs_cols = (spec.lcd == 0) ? spec.m : (spec.k + k_increase);
Shape lhs_shape = ShapeUtil::MakeShape(F32, {lhs_rows, lhs_cols});
auto* lhs = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR2F32Linspace(
+ HloInstruction::CreateConstant(LiteralUtil::CreateR2F32Linspace(
/*from=*/10.0, /*to=*/10000.0, /*rows=*/lhs_rows,
/*cols=*/lhs_cols)));
@@ -3076,7 +2907,7 @@ TEST_P(DotOfGatherSimplificationTest, ConstantRHS) {
int32 start_col = (spec.lcd == 0) ? spec.s : 0;
const auto start_indices =
builder.AddInstruction(HloInstruction::CreateConstant(
- Literal::CreateR1<int32>({start_row, start_col})));
+ LiteralUtil::CreateR1<int32>({start_row, start_col})));
int64 slice_row_size = (spec.lcd == 0) ? spec.k : 1;
int64 slice_col_size = (spec.lcd == 0) ? 1 : spec.k;
Shape ds_shape = ShapeUtil::MakeShape(F32, {slice_row_size, slice_col_size});
@@ -3087,7 +2918,7 @@ TEST_P(DotOfGatherSimplificationTest, ConstantRHS) {
int64 rhs_cols = (spec.rcd == 0) ? spec.n : spec.k;
Shape rhs_shape = ShapeUtil::MakeShape(F32, {rhs_rows, rhs_cols});
auto* rhs = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR2F32Linspace(
+ HloInstruction::CreateConstant(LiteralUtil::CreateR2F32Linspace(
/*from=*/10.0, /*to=*/10000.0, /*rows=*/rhs_rows,
/*cols=*/rhs_cols)));
@@ -3135,7 +2966,7 @@ TEST_P(DotOfGatherSimplificationTest, ConstantLHS) {
int64 lhs_cols = (spec.lcd == 0) ? spec.m : spec.k;
Shape lhs_shape = ShapeUtil::MakeShape(F32, {lhs_rows, lhs_cols});
auto* lhs = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR2F32Linspace(
+ HloInstruction::CreateConstant(LiteralUtil::CreateR2F32Linspace(
/*from=*/10.0, /*to=*/10000.0, /*rows=*/lhs_rows,
/*cols=*/lhs_cols)));
@@ -3146,7 +2977,7 @@ TEST_P(DotOfGatherSimplificationTest, ConstantLHS) {
int64 rhs_cols = (spec.rcd == 0) ? spec.n : (spec.k + k_increase);
Shape rhs_shape = ShapeUtil::MakeShape(F32, {rhs_rows, rhs_cols});
auto* rhs = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR2F32Linspace(
+ HloInstruction::CreateConstant(LiteralUtil::CreateR2F32Linspace(
/*from=*/10.0, /*to=*/10000.0, /*rows=*/rhs_rows,
/*cols=*/rhs_cols)));
@@ -3154,7 +2985,7 @@ TEST_P(DotOfGatherSimplificationTest, ConstantLHS) {
int32 start_col = (spec.rcd == 0) ? spec.s : 0;
const auto start_indices =
builder.AddInstruction(HloInstruction::CreateConstant(
- Literal::CreateR1<int32>({start_row, start_col})));
+ LiteralUtil::CreateR1<int32>({start_row, start_col})));
int64 slice_row_size = (spec.rcd == 0) ? spec.k : 1;
int64 slice_col_size = (spec.rcd == 0) ? 1 : spec.k;
Shape ds_shape = ShapeUtil::MakeShape(F32, {slice_row_size, slice_col_size});
diff --git a/tensorflow/compiler/xla/service/batchnorm_expander.cc b/tensorflow/compiler/xla/service/batchnorm_expander.cc
index ec13fadbc7..c4cd60c120 100644
--- a/tensorflow/compiler/xla/service/batchnorm_expander.cc
+++ b/tensorflow/compiler/xla/service/batchnorm_expander.cc
@@ -20,6 +20,7 @@ limitations under the License.
#include <utility>
#include <vector>
+#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/compiler/xla/literal_util.h"
#include "tensorflow/compiler/xla/service/dfs_hlo_visitor_with_default.h"
#include "tensorflow/compiler/xla/service/hlo_computation.h"
@@ -34,6 +35,7 @@ limitations under the License.
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/gtl/array_slice.h"
#include "tensorflow/core/lib/gtl/flatmap.h"
+#include "tensorflow/core/lib/gtl/optional.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/types.h"
@@ -41,6 +43,8 @@ namespace xla {
namespace {
+using tensorflow::gtl::optional;
+
// BatchNormExpanderVisitor traverses the HLO computation and rewrites BatchNorm
// operations into smaller operations.
class BatchNormExpanderVisitor : public DfsHloVisitorWithDefault {
@@ -97,7 +101,7 @@ class BatchNormExpanderVisitor : public DfsHloVisitorWithDefault {
add_instruction(HloInstruction::CreateConvert(
ShapeUtil::MakeShape(operand->shape().element_type(), {}),
add_instruction(HloInstruction::CreateConstant(
- Literal::CreateR0<float>(-0.5f))))),
+ LiteralUtil::CreateR0<float>(-0.5f))))),
{}));
return HloInstruction::CreateBinary(operand->shape(), HloOpcode::kPower,
operand, exponent);
@@ -113,7 +117,7 @@ class BatchNormExpanderVisitor : public DfsHloVisitorWithDefault {
add_instruction(HloInstruction::CreateConvert(
ShapeUtil::MakeShape(operand->shape().element_type(), {}),
add_instruction(HloInstruction::CreateConstant(
- Literal::CreateR0<float>(1.0 / element_count))))),
+ LiteralUtil::CreateR0<float>(1.0 / element_count))))),
{}));
return HloInstruction::CreateBinary(operand->shape(), HloOpcode::kMultiply,
operand, elem_count_recip);
@@ -200,11 +204,11 @@ Status BatchNormExpanderVisitor::HandleBatchNormTraining(
HloInstruction* offset = batch_norm->mutable_operand(2);
const Shape feature_shape = scale->shape();
- auto zero_literal = Literal::CreateR0(0.0f);
+ auto zero_literal = LiteralUtil::CreateR0(0.0f);
TF_ASSIGN_OR_RETURN(zero_literal, zero_literal->Convert(ptype));
auto zero = add(HloInstruction::CreateConstant(std::move(zero_literal)));
- auto epsilon_literal = Literal::CreateR0(batch_norm->epsilon());
+ auto epsilon_literal = LiteralUtil::CreateR0(batch_norm->epsilon());
TF_ASSIGN_OR_RETURN(epsilon_literal, epsilon_literal->Convert(ptype));
auto epsilon = add(HloInstruction::CreateBroadcast(
operand_shape,
@@ -288,16 +292,22 @@ Status BatchNormExpanderVisitor::HandleBatchNormTraining(
int64 instruction_count_after = computation_->instruction_count();
CHECK_EQ(instruction_count_after,
instruction_count_before + added_instructions.size());
+ const HloSharding& sharding = batch_norm->sharding();
HloSharding operand_sharding =
- batch_norm->sharding().GetAsShapeTree(batch_norm->shape()).element({0});
+ sharding.GetAsShapeTree(batch_norm->shape()).element({0});
+ optional<int64> unique_device = batch_norm->sharding_unique_device();
+ HloSharding default_sharding =
+ unique_device.has_value()
+ ? HloSharding::AssignDevice(unique_device.value())
+ : HloSharding::Replicate();
for (HloInstruction* inst : added_instructions) {
if (ShapeUtil::Equal(inst->shape(), operand_shape)) {
inst->set_sharding(operand_sharding);
} else {
- inst->set_sharding(HloSharding::Replicate());
+ inst->set_sharding(default_sharding);
}
}
- tuple->set_sharding(batch_norm->sharding());
+ tuple->set_sharding(sharding);
}
TF_CHECK_OK(ReplaceWithNewInstruction(batch_norm, std::move(tuple)));
return Status::OK();
@@ -320,7 +330,7 @@ Status BatchNormExpanderVisitor::HandleBatchNormInference(
HloInstruction* var = batch_norm->mutable_operand(4);
const Shape feature_shape = scale->shape();
- auto epsilon_literal = Literal::CreateR0(batch_norm->epsilon());
+ auto epsilon_literal = LiteralUtil::CreateR0(batch_norm->epsilon());
TF_ASSIGN_OR_RETURN(epsilon_literal, epsilon_literal->Convert(ptype));
auto epsilon = computation_->AddInstruction(HloInstruction::CreateBroadcast(
operand_shape,
@@ -388,14 +398,20 @@ Status BatchNormExpanderVisitor::HandleBatchNormInference(
CHECK_EQ(instruction_count_after,
instruction_count_before + added_instructions.size());
if (batch_norm->has_sharding()) {
+ const HloSharding& sharding = batch_norm->sharding();
+ optional<int64> unique_device = batch_norm->sharding_unique_device();
+ HloSharding default_sharding =
+ unique_device.has_value()
+ ? HloSharding::AssignDevice(unique_device.value())
+ : HloSharding::Replicate();
for (HloInstruction* inst : added_instructions) {
if (ShapeUtil::Equal(inst->shape(), operand_shape)) {
- inst->set_sharding(batch_norm->sharding());
+ inst->set_sharding(sharding);
} else {
- inst->set_sharding(HloSharding::Replicate());
+ inst->set_sharding(default_sharding);
}
}
- shifted_normalized->set_sharding(batch_norm->sharding());
+ shifted_normalized->set_sharding(sharding);
}
TF_CHECK_OK(
ReplaceWithNewInstruction(batch_norm, std::move(shifted_normalized)));
@@ -447,11 +463,11 @@ Status BatchNormExpanderVisitor::HandleBatchNormGrad(
const int64 feature_count = activation_shape.dimensions(feature_index);
const int64 elements_per_feature_int64 = size_in_elements / feature_count;
- auto zero_literal = Literal::CreateR0(0.0f);
+ auto zero_literal = LiteralUtil::CreateR0(0.0f);
TF_ASSIGN_OR_RETURN(zero_literal, zero_literal->Convert(ptype));
auto zero = add(HloInstruction::CreateConstant(std::move(zero_literal)));
- auto epsilon_literal = Literal::CreateR0(batch_norm->epsilon());
+ auto epsilon_literal = LiteralUtil::CreateR0(batch_norm->epsilon());
TF_ASSIGN_OR_RETURN(epsilon_literal, epsilon_literal->Convert(ptype));
auto epsilon_scalar =
add(HloInstruction::CreateConstant(std::move(epsilon_literal)));
@@ -542,7 +558,7 @@ Status BatchNormExpanderVisitor::HandleBatchNormGrad(
Mean(elements_per_feature_int64, scale_times_rsqrt_var_add_epsilon, add));
auto elements_per_feature_literal =
- Literal::CreateR0<float>(elements_per_feature_int64);
+ LiteralUtil::CreateR0<float>(elements_per_feature_int64);
TF_ASSIGN_OR_RETURN(elements_per_feature_literal,
elements_per_feature_literal->Convert(ptype));
auto elements_per_feature = add(
@@ -562,19 +578,25 @@ Status BatchNormExpanderVisitor::HandleBatchNormGrad(
auto tuple =
HloInstruction::CreateTuple({grad_activation, grad_scale, grad_beta});
if (batch_norm->has_sharding()) {
+ const HloSharding& sharding = batch_norm->sharding();
int64 instruction_count_after = computation_->instruction_count();
CHECK_EQ(instruction_count_after,
instruction_count_before + added_instructions.size());
HloSharding activation_sharding =
- batch_norm->sharding().GetAsShapeTree(batch_norm->shape()).element({0});
+ sharding.GetAsShapeTree(batch_norm->shape()).element({0});
+ auto unique_device = batch_norm->sharding_unique_device();
+ HloSharding default_sharding =
+ unique_device.has_value()
+ ? HloSharding::AssignDevice(unique_device.value())
+ : HloSharding::Replicate();
for (HloInstruction* inst : added_instructions) {
if (ShapeUtil::Equal(inst->shape(), activation_shape)) {
inst->set_sharding(activation_sharding);
} else {
- inst->set_sharding(HloSharding::Replicate());
+ inst->set_sharding(default_sharding);
}
}
- tuple->set_sharding(batch_norm->sharding());
+ tuple->set_sharding(sharding);
}
TF_CHECK_OK(ReplaceWithNewInstruction(batch_norm, std::move(tuple)));
diff --git a/tensorflow/compiler/xla/service/batchnorm_expander_test.cc b/tensorflow/compiler/xla/service/batchnorm_expander_test.cc
index aa36e64b07..32f785a70a 100644
--- a/tensorflow/compiler/xla/service/batchnorm_expander_test.cc
+++ b/tensorflow/compiler/xla/service/batchnorm_expander_test.cc
@@ -19,12 +19,13 @@ limitations under the License.
#include <utility>
#include "tensorflow/compiler/xla/layout_util.h"
-#include "tensorflow/compiler/xla/literal_util.h"
+#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/compiler/xla/ptr_util.h"
#include "tensorflow/compiler/xla/service/hlo_computation.h"
#include "tensorflow/compiler/xla/service/hlo_instruction.h"
#include "tensorflow/compiler/xla/service/hlo_matchers.h"
#include "tensorflow/compiler/xla/service/hlo_opcode.h"
+#include "tensorflow/compiler/xla/service/hlo_parser.h"
#include "tensorflow/compiler/xla/service/hlo_pass_fix.h"
#include "tensorflow/compiler/xla/shape_util.h"
#include "tensorflow/compiler/xla/test.h"
@@ -114,5 +115,33 @@ TEST_F(BatchNormExpanderTest, BatchNormGrad) {
EXPECT_EQ(root->opcode(), HloOpcode::kTuple);
}
+TEST_F(BatchNormExpanderTest, BatchNormTrainingSharding) {
+ const char* module_str = R"(
+HloModule module
+ENTRY entry {
+ %param.0 = f32[8,4] parameter(0)
+ %param.1 = f32[4] parameter(1)
+ %param.2 = f32[4] parameter(2)
+ ROOT %batch-norm-training = (f32[8,4], f32[4], f32[4])
+ batch-norm-training(f32[8,4] %param.0, f32[4] %param.1, f32[4] %param.2),
+ epsilon=0.001, feature_index=1, sharding={maximal device=1}
+})";
+
+ TF_ASSERT_OK_AND_ASSIGN(auto module, ParseHloString(module_str));
+ BatchNormExpander rewriter(/*rewrite_training_op=*/true,
+ /*rewrite_inference_op=*/true,
+ /*rewrite_grad_op=*/true);
+ ASSERT_TRUE(rewriter.Run(module.get()).ValueOrDie());
+
+ for (auto* instruction : module->entry_computation()->instructions()) {
+ if (instruction->opcode() == HloOpcode::kParameter) {
+ continue;
+ }
+ ASSERT_TRUE(instruction->has_sharding());
+ TF_ASSERT_OK_AND_ASSIGN(int device, instruction->sharding().UniqueDevice());
+ EXPECT_EQ(device, 1);
+ }
+}
+
} // namespace
} // namespace xla
diff --git a/tensorflow/compiler/xla/service/bfloat16_propagation.cc b/tensorflow/compiler/xla/service/bfloat16_propagation.cc
index ee6b6f69b9..b21c83a07f 100644
--- a/tensorflow/compiler/xla/service/bfloat16_propagation.cc
+++ b/tensorflow/compiler/xla/service/bfloat16_propagation.cc
@@ -15,7 +15,7 @@ limitations under the License.
#include "tensorflow/compiler/xla/service/bfloat16_propagation.h"
-#include "tensorflow/compiler/xla/literal_util.h"
+#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/compiler/xla/map_util.h"
#include "tensorflow/compiler/xla/service/hlo_computation.h"
#include "tensorflow/compiler/xla/service/hlo_dce.h"
@@ -85,9 +85,9 @@ void BFloat16Propagation::RevertIfFusionInternalBF16Changes(
auto root_changes_it = changes_to_bf16_.find(root);
if (root_changes_it != changes_to_bf16_.end()) {
- for (const auto& index : root_changes_it->second) {
+ for (const auto& entry : root_changes_it->second) {
for (const HloValue* value :
- dataflow_->GetValueSet(root, index).values()) {
+ dataflow_->GetValueSet(root, entry.second).values()) {
changed_root_buffers.insert(value);
}
}
@@ -615,7 +615,6 @@ Status BFloat16Propagation::ResolveInconsistentFusions(HloModule* module) {
// (1) a is F32 but tuple is BF16
// (2) after adding conversion
// (3) after tuple simplifier and DCE.
- bool needs_tuple_simplifier = false;
for (auto computation : module->MakeComputationPostOrder()) {
auto insts = computation->MakeInstructionPostOrder();
for (auto inst_it = insts.rbegin(); inst_it != insts.rend(); ++inst_it) {
@@ -629,67 +628,25 @@ Status BFloat16Propagation::ResolveInconsistentFusions(HloModule* module) {
continue;
}
ShapeTree<HloInstruction*> converted_outputs(hlo->shape());
- // Iterate through nodes in the shape tree in pre-order and initialize
- // each non-root node with a corresponding get-tuple-element. For a leaf
- // node, if its shape does not match the fusion output, create a
- // conversion node to overwrite the node value.
- for (auto it = converted_outputs.begin(); it != converted_outputs.end();
- ++it) {
- ShapeIndex output_index = it->first;
- HloInstruction*& output = it->second;
- const Shape subshape =
- ShapeUtil::GetSubshape(hlo->shape(), output_index);
- if (output_index.empty()) {
- output = fusion_root;
- } else {
- ShapeIndex parent_index = output_index;
- parent_index.pop_back();
- output = fusion_computation->AddInstruction(
- HloInstruction::CreateGetTupleElement(
- subshape, converted_outputs.element(parent_index),
- output_index.back()));
- }
- if (!ShapeUtil::IsArray(subshape)) {
- continue;
- }
- if (!ShapeUtil::Compatible(
- subshape,
- ShapeUtil::GetSubshape(fusion_root->shape(), output_index))) {
- output = fusion_computation->AddInstruction(
- HloInstruction::CreateConvert(subshape, output));
- }
- }
- // Iterate through nodes in the shape tree in reverse pre-order and create
- // a tuple instruction for each non-leaf node where the elements are the
- // values of its child nodes.
- for (auto it = converted_outputs.rbegin(); it != converted_outputs.rend();
- ++it) {
- ShapeIndex output_index = it->first;
- HloInstruction*& output = it->second;
- const Shape& subshape =
- ShapeUtil::GetSubshape(hlo->shape(), output_index);
- if (!ShapeUtil::IsTuple(subshape)) {
- continue;
- }
- std::vector<HloInstruction*> elements(
- ShapeUtil::TupleElementCount(subshape));
- ShapeIndex child_index = output_index;
- for (int64 i = 0; i < elements.size(); ++i) {
- child_index.push_back(i);
- elements[i] = converted_outputs.element(child_index);
- child_index.pop_back();
- }
- output = fusion_computation->AddInstruction(
- HloInstruction::CreateTuple(elements));
- }
- fusion_computation->set_root_instruction(converted_outputs.element({}));
- needs_tuple_simplifier |= ShapeUtil::IsTuple(hlo->shape());
+ // Deep copy the fusion root, and convert a leaf node only if its shape
+ // does not match the fusion output.
+ TF_ASSIGN_OR_RETURN(
+ HloInstruction * copy,
+ fusion_computation->DeepCopyInstructionWithCustomCopier(
+ fusion_root,
+ [hlo](HloInstruction* leaf, const ShapeIndex& leaf_index,
+ HloComputation* comp) {
+ const Shape& hlo_subshape =
+ ShapeUtil::GetSubshape(hlo->shape(), leaf_index);
+ if (ShapeUtil::Compatible(leaf->shape(), hlo_subshape)) {
+ return leaf;
+ }
+ return comp->AddInstruction(
+ HloInstruction::CreateConvert(hlo_subshape, leaf));
+ }));
+ fusion_computation->set_root_instruction(copy);
}
}
- if (needs_tuple_simplifier) {
- TupleSimplifier tuple_simplifier;
- TF_RETURN_IF_ERROR(tuple_simplifier.Run(module).status());
- }
return Status::OK();
}
@@ -758,10 +715,38 @@ StatusOr<bool> BFloat16Propagation::Run(HloModule* module) {
changes_to_bf16_.clear();
changed_ = false;
+ auto computations_topological_order = module->MakeComputationPostOrder();
+
+ // Before running the propagation pass, we insert copies (kConvert to the same
+ // type) of F32 inputs to while loops. This prevents other uses of the same
+ // input from aliasing the while loop input/output, so that there's greater
+ // chance to use BF16 inside the loop. If some of these added copies do not
+ // help, they will remain F32 after BF16 propagation and will be removed since
+ // they are no-ops.
+ for (auto computation : computations_topological_order) {
+ for (auto inst : computation->MakeInstructionPostOrder()) {
+ if (inst->opcode() != HloOpcode::kWhile) {
+ continue;
+ }
+
+ auto operand = inst->mutable_operand(0);
+ TF_ASSIGN_OR_RETURN(
+ HloInstruction * copy,
+ computation->DeepCopyInstructionWithCustomCopier(
+ operand, [](HloInstruction* leaf, const ShapeIndex& leaf_index,
+ HloComputation* comp) {
+ if (leaf->shape().element_type() != F32) {
+ return leaf;
+ }
+ return comp->AddInstruction(
+ HloInstruction::CreateConvert(leaf->shape(), leaf));
+ }));
+ TF_RETURN_IF_ERROR(operand->ReplaceUseWith(inst, copy));
+ }
+ }
+
TF_ASSIGN_OR_RETURN(dataflow_, HloDataflowAnalysis::Run(*module));
- const auto& computations_topological_order =
- module->MakeComputationPostOrder();
// The first step is a forward pass (parameters to root), where we determine
// the potential candidate instructions to use bfloat16 in the outputs that
// are not likely to cause overhead from extra explicit conversions. This is
@@ -802,39 +787,42 @@ StatusOr<bool> BFloat16Propagation::Run(HloModule* module) {
// Apply the changes in changes_to_bf16_.
for (auto& change : changes_to_bf16_) {
- auto shape = change.first->mutable_shape();
- for (const auto& index : change.second) {
- auto subshape = ShapeUtil::GetMutableSubshape(shape, index);
+ for (const auto& entry : change.second) {
+ auto subshape = entry.first;
CHECK_EQ(subshape->element_type(), F32);
subshape->set_element_type(BF16);
changed_ = true;
}
}
+ // Removes redundant HLOs added by this pass, either when inserting
+ // de-aliasing copies to while loop inputs, or later when converting output
+ // types.
+ auto clean_up = [this, module]() {
+ TF_RETURN_IF_ERROR(SkipNoopConversions(module));
+ TupleSimplifier tuple_simplifier;
+ TF_RETURN_IF_ERROR(tuple_simplifier.Run(module).status());
+ HloDCE dce;
+ TF_RETURN_IF_ERROR(dce.Run(module).status());
+ return Status::OK();
+ };
+
if (!changed_) {
+ TF_RETURN_IF_ERROR(clean_up());
return false;
}
TF_RETURN_IF_ERROR(ResolveInconsistentFusions(module));
TF_RETURN_IF_ERROR(ResolveConvertedConstants(module));
- // This pass could have turned an F32 -> BF16 conversion to a no-op (BF16 ->
- // BF16), so we skip them now.
- TF_RETURN_IF_ERROR(SkipNoopConversions(module));
-
- {
- // We may have dead HLOs after ResolveInconsistentFusions,
- // ResolveConvertedConstants and SkipNoopConversions.
- HloDCE dce;
- TF_RETURN_IF_ERROR(dce.Run(module).status());
- }
+ TF_RETURN_IF_ERROR(clean_up());
return true;
}
PrimitiveType BFloat16Propagation::OutputTypeAfterChange(
HloInstruction* hlo, const ShapeIndex& index) const {
- PrimitiveType type_on_hlo =
- ShapeUtil::GetSubshape(hlo->shape(), index).element_type();
+ Shape* subshape = ShapeUtil::GetMutableSubshape(hlo->mutable_shape(), index);
+ const PrimitiveType type_on_hlo = subshape->element_type();
if (type_on_hlo != F32) {
return type_on_hlo;
}
@@ -842,7 +830,7 @@ PrimitiveType BFloat16Propagation::OutputTypeAfterChange(
if (it == changes_to_bf16_.end()) {
return type_on_hlo;
}
- return ContainsKey(it->second, index) ? BF16 : F32;
+ return ContainsKey(it->second, subshape) ? BF16 : F32;
}
PrimitiveType BFloat16Propagation::ValueTypeAfterChange(
@@ -856,14 +844,16 @@ void BFloat16Propagation::AddToOrRemoveFromBF16ChangeSet(
HloInstruction* hlo, const ShapeIndex& index, PrimitiveType target_type) {
if (target_type == BF16) {
auto& entry = changes_to_bf16_[hlo];
- entry.insert(index);
+ entry.emplace(ShapeUtil::GetMutableSubshape(hlo->mutable_shape(), index),
+ index);
} else {
CHECK_EQ(target_type, F32);
auto it = changes_to_bf16_.find(hlo);
if (it == changes_to_bf16_.end()) {
return;
}
- it->second.erase(index);
+ it->second.erase(
+ ShapeUtil::GetMutableSubshape(hlo->mutable_shape(), index));
}
}
diff --git a/tensorflow/compiler/xla/service/bfloat16_propagation.h b/tensorflow/compiler/xla/service/bfloat16_propagation.h
index de0355ddfc..02b8cad089 100644
--- a/tensorflow/compiler/xla/service/bfloat16_propagation.h
+++ b/tensorflow/compiler/xla/service/bfloat16_propagation.h
@@ -194,17 +194,11 @@ class BFloat16Propagation : public HloPassInterface {
// are subject to further adjustment, then finally applied to the HLOs. This
// avoids setting changed_ to true but all changes are reverted during
// adjustment.
- struct IndexHasher {
- int64 operator()(const ShapeIndex& index) const {
- int64 hash = 0;
- for (int64 i : index) {
- hash = tensorflow::Hash64Combine(hash, std::hash<int64>()(i));
- }
- return hash;
- }
- };
+ //
+ // For each HloInstruction, changes_to_bf16_ stores the affected buffers in
+ // the output as a map from in-place pointers to subshapes to shape indices.
tensorflow::gtl::FlatMap<HloInstruction*,
- tensorflow::gtl::FlatSet<ShapeIndex, IndexHasher>>
+ tensorflow::gtl::FlatMap<Shape*, ShapeIndex>>
changes_to_bf16_;
// Whether the last processed HLO module has been changed by this pass.
diff --git a/tensorflow/compiler/xla/service/bfloat16_propagation_test.cc b/tensorflow/compiler/xla/service/bfloat16_propagation_test.cc
index e2ca689c06..aeafb25ad7 100644
--- a/tensorflow/compiler/xla/service/bfloat16_propagation_test.cc
+++ b/tensorflow/compiler/xla/service/bfloat16_propagation_test.cc
@@ -133,9 +133,9 @@ TEST_F(BFloat16PropagationTest, ConvertConstantLiteral) {
array_b.FillUnique(10.0f);
HloInstruction* a = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateFromArray(array_a)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateFromArray(array_a)));
HloInstruction* b = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateFromArray(array_b)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateFromArray(array_b)));
HloInstruction* dot = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kDot, a, b));
@@ -150,10 +150,10 @@ TEST_F(BFloat16PropagationTest, ConvertConstantLiteral) {
EXPECT_EQ(dot->operand(0)->opcode(), HloOpcode::kConstant);
EXPECT_EQ(dot->operand(1)->opcode(), HloOpcode::kConstant);
EXPECT_TRUE(LiteralTestUtil::Equal(
- *Literal::ConvertF32ToBF16(*Literal::CreateFromArray(array_a)),
+ *LiteralUtil::ConvertF32ToBF16(*LiteralUtil::CreateFromArray(array_a)),
dot->operand(0)->literal()));
EXPECT_TRUE(LiteralTestUtil::Equal(
- *Literal::ConvertF32ToBF16(*Literal::CreateFromArray(array_b)),
+ *LiteralUtil::ConvertF32ToBF16(*LiteralUtil::CreateFromArray(array_b)),
dot->operand(1)->literal()));
}
@@ -240,12 +240,10 @@ TEST_F(BFloat16PropagationTest, SameValueReferencedTwice) {
EXPECT_TRUE(PropagatePrecision(module.get()));
EXPECT_EQ(computation->root_instruction(), dot);
- EXPECT_TRUE(OutputsBF16(add0));
EXPECT_TRUE(OutputsBF16(add1));
EXPECT_TRUE(OutputsBF16(lhs));
- // rhs is a get-tuple-element, which does not define a buffer, but its shape
- // should also be adjusted accordingly.
- EXPECT_TRUE(OutputsBF16(rhs));
+
+ // add0 and rhs have been eliminated by simplification and DCE.
}
// Tests that a non-fusion computation's root should not be changed.
@@ -434,7 +432,7 @@ TEST_F(BFloat16PropagationTest, SelectOverTuples) {
HloInstruction* tuple1 =
builder.AddInstruction(HloInstruction::CreateTuple({param, add1}));
HloInstruction* sel = builder.AddInstruction(HloInstruction::CreateTernary(
- tuple0->shape(), HloOpcode::kSelect, pred, tuple0, tuple1));
+ tuple0->shape(), HloOpcode::kTupleSelect, pred, tuple0, tuple1));
HloInstruction* gte0 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(shape, sel, 0));
HloInstruction* gte1 = builder.AddInstruction(
@@ -734,10 +732,8 @@ TEST_F(BFloat16PropagationTest, NoopConversionRemoved) {
EXPECT_TRUE(PropagatePrecision(module.get()));
EXPECT_EQ(computation->root_instruction(), add2);
- EXPECT_EQ(add2->operand(0), gte0);
- EXPECT_EQ(add2->operand(1), gte1);
- EXPECT_EQ(gte0->shape().element_type(), BF16);
- EXPECT_EQ(gte1->shape().element_type(), BF16);
+ EXPECT_EQ(add2->operand(0), add0);
+ EXPECT_EQ(add2->operand(1), add1);
EXPECT_EQ(add0->shape().element_type(), BF16);
EXPECT_EQ(add1->shape().element_type(), BF16);
}
@@ -771,8 +767,14 @@ TEST_F(BFloat16PropagationTest, TupleDomain) {
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_TRUE(PropagatePrecision(module.get()));
-
EXPECT_EQ(computation->root_instruction(), root);
+
+ // test BF16 propagated through domain
+ EXPECT_EQ(ShapeUtil::GetTupleElementShape(domain->shape(), 0).element_type(),
+ BF16);
+ EXPECT_EQ(ShapeUtil::GetTupleElementShape(domain->shape(), 1).element_type(),
+ BF16);
+
EXPECT_TRUE(OutputsBF16(a_trans));
EXPECT_TRUE(OutputsBF16(b_trans));
EXPECT_TRUE(OutputsBF16(a_gte));
@@ -781,4 +783,44 @@ TEST_F(BFloat16PropagationTest, TupleDomain) {
EXPECT_FALSE(OutputsBF16(b));
}
+// Tests that bf16 is not propagated through a domain in case its input cannot
+// be propagated. In the case below the input of the domain is the parameter
+// tuple which cannot be propagated, so the domain instruction is not propagated
+// either.
+TEST_F(BFloat16PropagationTest, TupleDomainNoPropagation) {
+ auto builder = HloComputation::Builder(TestName());
+ Shape shape = ShapeUtil::MakeShape(F32, {4, 4});
+ Shape tuple_shape = ShapeUtil::MakeTupleShape({shape, shape});
+
+ HloInstruction* param = builder.AddInstruction(
+ HloInstruction::CreateParameter(0, tuple_shape, "param"));
+ HloInstruction* domain = builder.AddInstruction(
+ HloInstruction::CreateDomain(param->shape(), param, nullptr, nullptr));
+ HloInstruction* a_gte = builder.AddInstruction(
+ HloInstruction::CreateGetTupleElement(shape, domain, 0));
+ HloInstruction* b_gte = builder.AddInstruction(
+ HloInstruction::CreateGetTupleElement(shape, domain, 1));
+ HloInstruction* a_trans = builder.AddInstruction(
+ HloInstruction::CreateTranspose(shape, a_gte, {0, 1}));
+ HloInstruction* b_trans = builder.AddInstruction(
+ HloInstruction::CreateTranspose(shape, b_gte, {0, 1}));
+ HloInstruction* dot = builder.AddInstruction(
+ HloInstruction::CreateBinary(shape, HloOpcode::kDot, a_trans, b_trans));
+ HloInstruction* root = builder.AddInstruction(
+ HloInstruction::CreateBinary(shape, HloOpcode::kAdd, dot, dot));
+
+ auto module = CreateNewModule();
+ auto computation = module->AddEntryComputation(builder.Build());
+
+ EXPECT_TRUE(PropagatePrecision(module.get()));
+
+ EXPECT_EQ(computation->root_instruction(), root);
+ EXPECT_TRUE(OutputsBF16(a_trans));
+ EXPECT_TRUE(OutputsBF16(b_trans));
+ EXPECT_FALSE(OutputsBF16(a_gte));
+ EXPECT_FALSE(OutputsBF16(b_gte));
+ EXPECT_FALSE(OutputsBF16(domain));
+ EXPECT_FALSE(OutputsBF16(param));
+}
+
} // namespace xla
diff --git a/tensorflow/compiler/xla/service/bfloat16_support.cc b/tensorflow/compiler/xla/service/bfloat16_support.cc
index 8595afca7e..23645346e6 100644
--- a/tensorflow/compiler/xla/service/bfloat16_support.cc
+++ b/tensorflow/compiler/xla/service/bfloat16_support.cc
@@ -103,6 +103,7 @@ bool BFloat16Support::EffectiveOperandPrecisionIsOutputPrecision(
case HloOpcode::kDynamicUpdateSlice:
return operand_index == 0 || operand_index == 1;
case HloOpcode::kSelect:
+ case HloOpcode::kTupleSelect:
return operand_index == 1 || operand_index == 2;
default:
break;
diff --git a/tensorflow/compiler/xla/service/buffer_assignment.cc b/tensorflow/compiler/xla/service/buffer_assignment.cc
index afe4b2e142..783e3f7e73 100644
--- a/tensorflow/compiler/xla/service/buffer_assignment.cc
+++ b/tensorflow/compiler/xla/service/buffer_assignment.cc
@@ -1444,8 +1444,23 @@ void BufferAssigner::BuildColocatedBufferSets(
});
} else if (opcode == HloOpcode::kCall) {
const HloInstruction* call_hlo = instruction;
- const HloInstruction* root_hlo =
- call_hlo->to_apply()->root_instruction();
+ const HloComputation* callee = call_hlo->to_apply();
+ const HloInstruction* root_hlo = callee->root_instruction();
+ for (int64 i = 0; i < call_hlo->operand_count(); i++) {
+ const HloInstruction* call_param = callee->parameter_instruction(i);
+ const HloInstruction* call_operand = call_hlo->operand(i);
+ ShapeUtil::ForEachSubshape(
+ call_operand->shape(),
+ [&](const Shape& /*subshape*/, const ShapeIndex& index) {
+ std::vector<const LogicalBuffer*> colocated_set;
+ AddBufferToColocatedSet(call_param, index, points_to_analysis,
+ &colocated_set);
+ AddBufferToColocatedSet(call_operand, index, points_to_analysis,
+ &colocated_set);
+ AddSetToColocatedBufferSets(colocated_set,
+ colocated_buffer_sets);
+ });
+ }
ShapeUtil::ForEachSubshape(
call_hlo->shape(),
[this, call_hlo, root_hlo, &points_to_analysis,
diff --git a/tensorflow/compiler/xla/service/buffer_assignment_test.cc b/tensorflow/compiler/xla/service/buffer_assignment_test.cc
index efa4696130..bfd20921e2 100644
--- a/tensorflow/compiler/xla/service/buffer_assignment_test.cc
+++ b/tensorflow/compiler/xla/service/buffer_assignment_test.cc
@@ -21,7 +21,7 @@ limitations under the License.
#include <utility>
#include <vector>
-#include "tensorflow/compiler/xla/literal_util.h"
+#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/compiler/xla/ptr_util.h"
#include "tensorflow/compiler/xla/service/buffer_value.h"
#include "tensorflow/compiler/xla/service/call_graph.h"
@@ -125,7 +125,7 @@ class BufferAssignmentTest : public HloTestBase {
auto param =
builder.AddInstruction(HloInstruction::CreateParameter(0, r0f32_, "x"));
auto value = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(1.0)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
builder.AddInstruction(
HloInstruction::CreateBinary(r0f32_, HloOpcode::kAdd, param, value));
return builder.Build();
@@ -142,7 +142,7 @@ class BufferAssignmentTest : public HloTestBase {
const string& name) {
auto builder = HloComputation::Builder(name);
auto const4 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<int>(4)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<int>(4)));
auto param = builder.AddInstruction(
HloInstruction::CreateParameter(0, t_s32_f32v4_, "x"));
auto index = builder.AddInstruction(
@@ -167,9 +167,9 @@ class BufferAssignmentTest : public HloTestBase {
const string& name) {
auto builder = HloComputation::Builder(name);
auto const1 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<int>(1)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<int>(1)));
auto constv = builder.AddInstruction(HloInstruction::CreateConstant(
- Literal::CreateR1<float>({1.1f, 2.2f, 3.3f, 4.4f})));
+ LiteralUtil::CreateR1<float>({1.1f, 2.2f, 3.3f, 4.4f})));
auto param = builder.AddInstruction(
HloInstruction::CreateParameter(0, t_s32_f32v4_, "x"));
auto indexc = builder.AddInstruction(
@@ -290,7 +290,7 @@ static bool BuffersDistinct(const std::vector<const HloInstruction*>& a,
TEST_F(BufferAssignmentTest, ScalarConstant) {
auto builder = HloComputation::Builder(TestName());
auto const0 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(1.0)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
auto module = CreateNewModule();
module->AddEntryComputation(builder.Build());
@@ -304,9 +304,9 @@ TEST_F(BufferAssignmentTest, BufferForConst) {
// no buffers assigned, and their consumer has a buffer.
auto builder = HloComputation::Builder(TestName());
auto const0 = builder.AddInstruction(HloInstruction::CreateConstant(
- Literal::CreateR1<float>({1.1f, 2.2f, 3.3f, 4.4f})));
+ LiteralUtil::CreateR1<float>({1.1f, 2.2f, 3.3f, 4.4f})));
auto const1 = builder.AddInstruction(HloInstruction::CreateConstant(
- Literal::CreateR1<float>({4.1f, 4.2f, 4.3f, 4.4f})));
+ LiteralUtil::CreateR1<float>({4.1f, 4.2f, 4.3f, 4.4f})));
auto add = builder.AddInstruction(
HloInstruction::CreateBinary(f32vec4_, HloOpcode::kAdd, const0, const1));
auto module = CreateNewModule();
@@ -327,7 +327,7 @@ TEST_F(BufferAssignmentTest, HasAllocationAt) {
auto param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, f32vec100_, "param0"));
auto constant = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<int>(1)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<int>(1)));
auto negate = builder.AddInstruction(
HloInstruction::CreateUnary(f32vec100_, HloOpcode::kNegate, param0));
auto tuple = builder.AddInstruction(
@@ -352,7 +352,7 @@ TEST_F(BufferAssignmentTest, BufferForOutputConst) {
// This computation copies a constant to output.
auto builder = HloComputation::Builder(TestName());
auto const0 = builder.AddInstruction(HloInstruction::CreateConstant(
- Literal::CreateR1<float>({1.1f, 2.2f, 3.3f, 4.4f})));
+ LiteralUtil::CreateR1<float>({1.1f, 2.2f, 3.3f, 4.4f})));
auto copy = builder.AddInstruction(
HloInstruction::CreateUnary(const0->shape(), HloOpcode::kCopy, const0));
auto module = CreateNewModule();
@@ -660,7 +660,7 @@ TEST_F(BufferAssignmentTest, CannotReuseInputBufferOfReduce) {
auto exp2 = builder.AddInstruction(
HloInstruction::CreateUnary(f32a100x10_, HloOpcode::kExp, exp1));
auto const0 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(0.0f)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(0.0f)));
auto reduce = builder.AddInstruction(HloInstruction::CreateReduce(
/*shape=*/f32vec10_,
/*operand=*/exp2,
@@ -708,9 +708,9 @@ TEST_F(BufferAssignmentTest, ExampleWhile) {
// Creates the main kernel and verifies instruction counts.
auto builder = HloComputation::Builder(TestName());
auto const3 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<int>(0)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<int>(0)));
auto const4 = builder.AddInstruction(HloInstruction::CreateConstant(
- Literal::CreateR1<float>({1.1f, 2.2f, 3.3f, 4.4f})));
+ LiteralUtil::CreateR1<float>({1.1f, 2.2f, 3.3f, 4.4f})));
auto tuple =
builder.AddInstruction(HloInstruction::CreateTuple({const3, const4}));
auto while_op = builder.AddInstruction(HloInstruction::CreateWhile(
@@ -773,11 +773,11 @@ TEST_F(BufferAssignmentTest, ExampleConditional) {
auto builder = HloComputation::Builder(TestName());
auto pred = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<bool>(false)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(false)));
auto const1 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(56.4f)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(56.4f)));
auto const2 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(12.4f)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(12.4f)));
auto conditional = builder.AddInstruction(HloInstruction::CreateConditional(
r0f32_, pred, const1, true_computation, const2, false_computation));
module->AddEntryComputation(builder.Build());
@@ -1094,7 +1094,7 @@ TEST_F(BufferAssignmentTest, EmbeddedComputationBuffers) {
// Allocations for the call computation should not be thread-local.
auto& call_param_alloc = GetTopLevelAllocation(*assignment, call_param);
- EXPECT_FALSE(call_param_alloc.is_entry_computation_parameter());
+ EXPECT_TRUE(call_param_alloc.is_entry_computation_parameter());
EXPECT_FALSE(call_param_alloc.maybe_live_out());
EXPECT_FALSE(call_param_alloc.is_thread_local());
@@ -1200,8 +1200,9 @@ TEST_F(BufferAssignmentTest, DISABLED_TupleConstantAsOutput) {
// Test that a tuple constant which is forwarded to the computation output
// is properly handled.
auto builder = HloComputation::Builder(TestName());
- builder.AddInstruction(HloInstruction::CreateConstant(Literal::MakeTuple(
- {Literal::CreateR0<int64>(0).get(), Literal::CreateR0<int64>(1).get()})));
+ builder.AddInstruction(HloInstruction::CreateConstant(
+ LiteralUtil::MakeTuple({LiteralUtil::CreateR0<int64>(0).get(),
+ LiteralUtil::CreateR0<int64>(1).get()})));
auto module = CreateNewModule();
module->AddEntryComputation(builder.Build());
@@ -1252,16 +1253,18 @@ TEST_F(BufferAssignmentTest, TupleCallAsOutput) {
auto assignment = RunBufferAssignment(module.get());
- EXPECT_EQ(3, assignment->Allocations().size());
+ EXPECT_EQ(2, assignment->Allocations().size());
// Buffers for call are colocated with the sub-computation.
EXPECT_EQ(GetAllocation(*assignment, call, /*index=*/{}),
GetAllocation(*assignment, sub_tuple, /*index=*/{}));
EXPECT_EQ(GetAllocation(*assignment, call, /*index=*/{0}),
GetAllocation(*assignment, sub_param, /*index=*/{}));
- // The parameter isn't aliased with anything.
+
+ // The parameter isn't aliased with the result tuple, but it is aliased with
+ // the call operand.
EXPECT_NE(GetTopLevelAllocation(*assignment, param),
GetTopLevelAllocation(*assignment, sub_tuple));
- EXPECT_NE(GetTopLevelAllocation(*assignment, param),
+ EXPECT_EQ(GetTopLevelAllocation(*assignment, param),
GetTopLevelAllocation(*assignment, sub_param));
}
@@ -1325,13 +1328,15 @@ TEST_F(BufferAssignmentTest, TupleChainedCallAsOutput) {
GetAllocation(*assignment, c_call, /*index=*/{0}));
EXPECT_EQ(GetAllocation(*assignment, c_call, /*index=*/{0}),
GetAllocation(*assignment, d_param, /*index=*/{0}));
- // The parameters aren't aliased with anything.
+
EXPECT_TRUE(BuffersDistinct({a_param}, {b_param}, *assignment));
EXPECT_TRUE(BuffersDistinct({a_param}, {c_param}, *assignment));
EXPECT_TRUE(BuffersDistinct({a_param}, {d_param}, *assignment));
- EXPECT_TRUE(BuffersDistinct({b_param}, {c_param}, *assignment));
- EXPECT_TRUE(BuffersDistinct({b_param}, {d_param}, *assignment));
- EXPECT_TRUE(BuffersDistinct({c_param}, {d_param}, *assignment));
+
+ EXPECT_EQ(GetAllocation(*assignment, b_param, /*index=*/{0}),
+ GetAllocation(*assignment, c_param, /*index=*/{0}));
+ EXPECT_EQ(GetAllocation(*assignment, c_param, /*index=*/{0}),
+ GetAllocation(*assignment, d_param, /*index=*/{0}));
}
TEST_F(BufferAssignmentTest, BitcastAsOutput) {
@@ -1365,8 +1370,9 @@ TEST_F(BufferAssignmentTest, AmbiguousBufferAsOutput) {
HloInstruction::CreateParameter(1, tuple_shape, "param1"));
auto pred_param = builder.AddInstruction(HloInstruction::CreateParameter(
2, ShapeUtil::MakeShape(PRED, {}), "param1"));
- auto select = builder.AddInstruction(HloInstruction::CreateTernary(
- tuple_shape, HloOpcode::kSelect, pred_param, tuple_param0, tuple_param1));
+ auto select = builder.AddInstruction(
+ HloInstruction::CreateTernary(tuple_shape, HloOpcode::kTupleSelect,
+ pred_param, tuple_param0, tuple_param1));
auto module = CreateNewModule();
module->AddEntryComputation(builder.Build());
@@ -1583,7 +1589,7 @@ TEST_F(BufferAssignmentTest, PeakBuffersWhile) {
auto b = HloComputation::Builder(TestName() + ".cond");
b.AddInstruction(HloInstruction::CreateParameter(0, shape, "x"));
b.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<bool>(true)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(true)));
condition = module->AddEmbeddedComputation(b.Build());
}
HloComputation* body;
@@ -1646,9 +1652,9 @@ class WhileBufferAssignmentTest : public HloTestBase {
builder.AddInstruction(
HloInstruction::CreateParameter(0, loop_state_shape_, "loop_state"));
auto zero = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<int>(0)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<int>(0)));
auto ten = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<int>(10)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<int>(10)));
builder.AddInstruction(HloInstruction::CreateBinary(
ShapeUtil::MakeShape(PRED, {}), HloOpcode::kLt, zero, ten));
return builder.Build();
@@ -1707,7 +1713,7 @@ TEST_F(WhileBufferAssignmentTest, TwoForwardWhileLoops) {
HloInstruction::CreateParameter(2, data_shape_, "weights1"));
auto zero = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(0.0)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(0.0)));
auto output0 = builder.AddInstruction(
HloInstruction::CreateBroadcast(data_shape_, zero, {1}));
auto output1 = builder.AddInstruction(
@@ -1850,7 +1856,7 @@ TEST_F(WhileBufferAssignmentTest, ColocatedBuffers) {
auto build_cond = [&]() {
auto builder = HloComputation::Builder("cond");
auto const4 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<int>(4)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<int>(4)));
auto param =
builder.AddInstruction(HloInstruction::CreateParameter(0, r0s32, "x"));
builder.AddInstruction(HloInstruction::CreateBinary(
@@ -1862,7 +1868,7 @@ TEST_F(WhileBufferAssignmentTest, ColocatedBuffers) {
auto build_body = [&]() {
auto builder = HloComputation::Builder("body");
auto const9 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<int>(9)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<int>(9)));
auto param =
builder.AddInstruction(HloInstruction::CreateParameter(0, r0s32, "x"));
builder.AddInstruction(
@@ -1874,11 +1880,15 @@ TEST_F(WhileBufferAssignmentTest, ColocatedBuffers) {
auto module = CreateNewModule();
auto builder = HloComputation::Builder("entry");
- auto infeed = builder.AddInstruction(HloInstruction::CreateInfeed(r0s32, ""));
+ auto token = builder.AddInstruction(HloInstruction::CreateToken());
+ auto infeed =
+ builder.AddInstruction(HloInstruction::CreateInfeed(r0s32, token, ""));
+ auto infeed_data = builder.AddInstruction(
+ HloInstruction::CreateGetTupleElement(r0s32, infeed, 0));
auto cond0 = module->AddEmbeddedComputation(build_cond());
auto body0 = module->AddEmbeddedComputation(build_body());
auto while0 = builder.AddInstruction(
- HloInstruction::CreateWhile(r0s32, cond0, body0, infeed));
+ HloInstruction::CreateWhile(r0s32, cond0, body0, infeed_data));
auto cond1 = module->AddEmbeddedComputation(build_cond());
auto body1 = module->AddEmbeddedComputation(build_body());
@@ -1886,7 +1896,7 @@ TEST_F(WhileBufferAssignmentTest, ColocatedBuffers) {
HloInstruction::CreateWhile(r0s32, cond1, body1, while0));
auto zero = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<int32>(0)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32>(0)));
auto add = builder.AddInstruction(
HloInstruction::CreateBinary(r0s32, HloOpcode::kAdd, zero, zero));
auto cond2 = module->AddEmbeddedComputation(build_cond());
@@ -1909,8 +1919,8 @@ TEST_F(WhileBufferAssignmentTest, ColocatedBuffers) {
// computation, since the issue this test stresses depends on the order the
// nodes are traversed during BufferAssignment.
SequentialHloOrdering::HloModuleSequence sequence;
- sequence[module->entry_computation()] = {infeed, while0, while1, zero,
- add, while2, tuple};
+ sequence[module->entry_computation()] = {
+ token, infeed, infeed_data, while0, while1, zero, add, while2, tuple};
TF_ASSERT_OK_AND_ASSIGN(
auto assignment,
BufferAssigner::Run(
@@ -1948,7 +1958,7 @@ TEST_F(WhileBufferAssignmentTest, OneForwardBackwardWhileLoopSet) {
HloInstruction::CreateParameter(1, data_shape_, "weights0"));
auto zero = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(0.0)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(0.0)));
auto output0 = builder.AddInstruction(
HloInstruction::CreateBroadcast(data_shape_, zero, {1}));
@@ -1992,16 +2002,16 @@ TEST_F(BufferAssignmentTest, TwoCalls) {
auto param = builder.AddInstruction(
HloInstruction::CreateParameter(0, r0f32, "param"));
auto constant1 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(1.0)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
auto add = builder.AddInstruction(
HloInstruction::CreateBinary(r0f32, HloOpcode::kAdd, param, constant1));
sub_computation = module->AddEmbeddedComputation(builder.Build(add));
}
auto builder = HloComputation::Builder(TestName());
auto constant2 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(2.0)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0)));
auto constant3 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(3.0)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(3.0)));
auto call1 = builder.AddInstruction(
HloInstruction::CreateCall(r0f32, {constant2}, sub_computation));
auto call2 = builder.AddInstruction(
@@ -2025,6 +2035,56 @@ TEST_F(BufferAssignmentTest, TwoCalls) {
EXPECT_TRUE(BuffersDistinct({call1}, {call2}, *assignment));
}
+TEST_F(BufferAssignmentTest, CallParamCoAllocation) {
+ const char* hlo_text = R"(
+HloModule CallParamCoAllocation
+
+Callee {
+ param0 = (f32[100],(f32[200],f32[300])) parameter(0)
+ param1 = s32[20] parameter(1)
+ ROOT constant = f32[] constant(1)
+}
+
+ENTRY Main {
+ entry_param0 = f32[100] parameter(0)
+ entry_param1 = s32[20] parameter(1)
+ custom_call = (f32[200],f32[300]) custom-call(), custom_call_target="call-target"
+ call_op0 = (f32[100],(f32[200],f32[300])) tuple(entry_param0, custom_call)
+ ROOT call_result = f32[] call(call_op0, entry_param1), to_apply=Callee
+}
+)";
+
+ TF_ASSERT_OK_AND_ASSIGN(
+ std::unique_ptr<HloModule> module,
+ HloRunner::CreateModuleFromString(
+ hlo_text, legacy_flags::GetDebugOptionsFromFlags()));
+
+ auto buffers = RunBufferAssignment(module.get());
+
+ HloComputation* main = module->entry_computation();
+ HloComputation* callee = module->GetComputationWithName("Callee");
+ EXPECT_NE(callee, nullptr);
+
+ HloInstruction* param0 = callee->parameter_instruction(0);
+ HloInstruction* param1 = callee->parameter_instruction(1);
+
+ HloInstruction* entry_param0 = main->parameter_instruction(0);
+ HloInstruction* entry_param1 = main->parameter_instruction(1);
+ HloInstruction* custom_call = main->GetInstructionWithName("custom_call");
+
+ EXPECT_EQ(GetAllocation(*buffers, entry_param0, {}),
+ GetAllocation(*buffers, param0, {0}));
+ EXPECT_EQ(GetAllocation(*buffers, entry_param1, {}),
+ GetAllocation(*buffers, param1, {}));
+
+ EXPECT_EQ(GetAllocation(*buffers, custom_call, {}),
+ GetAllocation(*buffers, param0, {1}));
+ EXPECT_EQ(GetAllocation(*buffers, custom_call, {0}),
+ GetAllocation(*buffers, param0, {1, 0}));
+ EXPECT_EQ(GetAllocation(*buffers, custom_call, {1}),
+ GetAllocation(*buffers, param0, {1, 1}));
+}
+
static bool IsPostOrderTraversal(
const std::vector<const HloInstruction*>& sequence) {
tensorflow::gtl::FlatSet<const HloInstruction*> seen_so_far;
@@ -2053,9 +2113,9 @@ TEST_F(WhileBufferAssignmentTest, WhileLoopsInterferingResultRange) {
auto builder = HloComputation::Builder(TestName());
auto zero = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(0.0)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(0.0)));
auto one = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(1.0)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
auto input0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, data_shape_, "input0"));
@@ -2137,7 +2197,7 @@ TEST_F(WhileBufferAssignmentTest, WhilesDontShareEntryParamIfLiveOut) {
HloInstruction::CreateParameter(1, data_shape_, "weights0"));
auto zero = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(0.0)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(0.0)));
auto output0 = builder.AddInstruction(
HloInstruction::CreateBroadcast(data_shape_, zero, {1}));
auto output1 = builder.AddInstruction(
diff --git a/tensorflow/compiler/xla/service/buffer_liveness_test.cc b/tensorflow/compiler/xla/service/buffer_liveness_test.cc
index f623aef67a..4a927b5767 100644
--- a/tensorflow/compiler/xla/service/buffer_liveness_test.cc
+++ b/tensorflow/compiler/xla/service/buffer_liveness_test.cc
@@ -327,11 +327,12 @@ TEST_F(BufferLivenessTest, RootInstructionIsNotLastInSequentialOrder) {
builder.AddInstruction(HloInstruction::CreateParameter(0, vec_, "param"));
auto add = builder.AddInstruction(
HloInstruction::CreateBinary(vec_, HloOpcode::kAdd, param, param));
+ auto token = builder.AddInstruction(HloInstruction::CreateToken());
auto recv = builder.AddInstruction(
- HloInstruction::CreateRecv(vec_, /*channel_id=*/0));
+ HloInstruction::CreateRecv(vec_, token, /*channel_id=*/0));
auto recv_done = builder.AddInstruction(HloInstruction::CreateRecvDone(recv));
auto send = builder.AddInstruction(
- HloInstruction::CreateSend(recv_done, /*channel_id=*/1));
+ HloInstruction::CreateSend(recv_done, token, /*channel_id=*/1));
auto send_done = builder.AddInstruction(HloInstruction::CreateSendDone(send));
auto module = CreateNewModule();
@@ -438,11 +439,13 @@ TEST_F(BufferLivenessTest, TupleConstantLiveOut) {
// computation. The buffer containing {0, 1} is copied by GetTupleElement, and
// the buffers containing {3} and 3 are dead.
auto builder = HloComputation::Builder(TestName());
- auto inner_tuple0 = Literal::MakeTuple(
- {Literal::CreateR0<int64>(0).get(), Literal::CreateR0<int64>(1).get()});
- auto inner_tuple1 = Literal::MakeTuple({Literal::CreateR0<int64>(3).get()});
+ auto inner_tuple0 =
+ LiteralUtil::MakeTuple({LiteralUtil::CreateR0<int64>(0).get(),
+ LiteralUtil::CreateR0<int64>(1).get()});
+ auto inner_tuple1 =
+ LiteralUtil::MakeTuple({LiteralUtil::CreateR0<int64>(3).get()});
auto tuple_constant = builder.AddInstruction(HloInstruction::CreateConstant(
- Literal::MakeTuple({inner_tuple0.get(), inner_tuple1.get()})));
+ LiteralUtil::MakeTuple({inner_tuple0.get(), inner_tuple1.get()})));
builder.AddInstruction(HloInstruction::CreateGetTupleElement(
inner_tuple0->shape(), tuple_constant, 0));
@@ -490,7 +493,7 @@ TEST_F(BufferLivenessTest, IndependentTupleElements) {
builder.AddInstruction(HloInstruction::CreateGetTupleElement(
tuple_element0_shape, tuple_param0, 0));
auto const0 = builder.AddInstruction(HloInstruction::CreateConstant(
- Literal::CreateR1<float>({1.f, 1.f, 1.f, 1.f, 1.f, 1.f, 1.f, 1.f})));
+ LiteralUtil::CreateR1<float>({1.f, 1.f, 1.f, 1.f, 1.f, 1.f, 1.f, 1.f})));
auto add0 = builder.AddInstruction(HloInstruction::CreateBinary(
tuple_element0_shape, HloOpcode::kAdd, tuple_element0, const0));
@@ -502,7 +505,7 @@ TEST_F(BufferLivenessTest, IndependentTupleElements) {
builder.AddInstruction(HloInstruction::CreateGetTupleElement(
tuple_element1_shape, tuple_param0, 1));
auto const1 = builder.AddInstruction(HloInstruction::CreateConstant(
- Literal::CreateR1<float>({2.f, 2.f, 2.f, 2.f, 2.f, 2.f, 2.f, 2.f})));
+ LiteralUtil::CreateR1<float>({2.f, 2.f, 2.f, 2.f, 2.f, 2.f, 2.f, 2.f})));
auto add1 = builder.AddInstruction(HloInstruction::CreateBinary(
tuple_element1_shape, HloOpcode::kAdd, tuple_element1, const1));
@@ -554,7 +557,7 @@ TEST_F(BufferLivenessTest, DependentTupleElements) {
builder.AddInstruction(HloInstruction::CreateGetTupleElement(
tuple_element0_shape, tuple_param0, 0));
auto const0 = builder.AddInstruction(HloInstruction::CreateConstant(
- Literal::CreateR1<float>({1.f, 1.f, 1.f, 1.f, 1.f, 1.f, 1.f, 1.f})));
+ LiteralUtil::CreateR1<float>({1.f, 1.f, 1.f, 1.f, 1.f, 1.f, 1.f, 1.f})));
auto add0 = builder.AddInstruction(HloInstruction::CreateBinary(
tuple_element0_shape, HloOpcode::kAdd, tuple_element0, const0));
@@ -626,7 +629,7 @@ class FusedDynamicUpdateSliceLivenessTest : public BufferLivenessTest {
HloInstruction::CreateGetTupleElement(data_shape, tuple_param0, 1));
auto update = builder.AddInstruction(HloInstruction::CreateConstant(
- Literal::CreateR1<float>({2.f, 2.f, 2.f})));
+ LiteralUtil::CreateR1<float>({2.f, 2.f, 2.f})));
HloInstruction* slice = nullptr;
if (update_uses_tuple_element1) {
// Create a slice instruction as an additional user of 'gte1'.
@@ -637,7 +640,7 @@ class FusedDynamicUpdateSliceLivenessTest : public BufferLivenessTest {
}
// Create a DynamicUpdateSlice instruction of tuple element 1 with 'update'.
auto starts = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR1<int32>({2})));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR1<int32>({2})));
auto dynamic_update_slice =
builder.AddInstruction(HloInstruction::CreateDynamicUpdateSlice(
data_shape, gte1, update, starts));
@@ -756,7 +759,7 @@ class DynamicUpdateSliceLivenessTest : public BufferLivenessTest {
HloInstruction::CreateGetTupleElement(data_shape, tuple_param0, 1));
auto update = builder.AddInstruction(HloInstruction::CreateConstant(
- Literal::CreateR1<float>({2.f, 2.f, 2.f})));
+ LiteralUtil::CreateR1<float>({2.f, 2.f, 2.f})));
if (tuple_element1_has_two_uses) {
// Add 'gte0' and 'gte1' to create another user of 'gte1'.
@@ -765,7 +768,7 @@ class DynamicUpdateSliceLivenessTest : public BufferLivenessTest {
}
// Create a DynamicUpdateSlice instruction of tuple element 1 with 'update'.
auto starts = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR1<int32>({2})));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR1<int32>({2})));
auto dynamic_update_slice =
builder.AddInstruction(HloInstruction::CreateDynamicUpdateSlice(
data_shape, gte1, update, starts));
diff --git a/tensorflow/compiler/xla/service/call_graph_test.cc b/tensorflow/compiler/xla/service/call_graph_test.cc
index 1ea7d538cd..cc80b74843 100644
--- a/tensorflow/compiler/xla/service/call_graph_test.cc
+++ b/tensorflow/compiler/xla/service/call_graph_test.cc
@@ -15,7 +15,7 @@ limitations under the License.
#include "tensorflow/compiler/xla/service/call_graph.h"
-#include "tensorflow/compiler/xla/literal_util.h"
+#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/compiler/xla/service/hlo_computation.h"
#include "tensorflow/compiler/xla/shape_util.h"
#include "tensorflow/compiler/xla/status_macros.h"
@@ -82,7 +82,7 @@ class CallGraphTest : public HloTestBase {
HloInstruction* param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, kScalarShape, "param0"));
HloInstruction* zero = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(0.0f)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(0.0f)));
builder.AddInstruction(HloInstruction::CreateBinary(
ShapeUtil::MakeShape(PRED, {}), HloOpcode::kGt, param0, zero));
return builder.Build();
@@ -247,11 +247,11 @@ TEST_F(CallGraphTest, ComputationWithConditional) {
HloComputation::Builder builder(TestName());
HloInstruction* pred = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<bool>(false)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(false)));
HloInstruction* const1 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(56.4f)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(56.4f)));
HloInstruction* const2 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(12.6f)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(12.6f)));
HloInstruction* conditional =
builder.AddInstruction(HloInstruction::CreateConditional(
kScalarShape, pred, const1, true_computation, const2,
diff --git a/tensorflow/compiler/xla/service/call_inliner.cc b/tensorflow/compiler/xla/service/call_inliner.cc
index 482ccc5b67..256d05a73e 100644
--- a/tensorflow/compiler/xla/service/call_inliner.cc
+++ b/tensorflow/compiler/xla/service/call_inliner.cc
@@ -18,6 +18,7 @@ limitations under the License.
#include <deque>
#include "tensorflow/compiler/xla/service/call_graph.h"
+#include "tensorflow/compiler/xla/service/hlo_dce.h"
#include "tensorflow/core/lib/core/errors.h"
namespace xla {
@@ -151,6 +152,14 @@ StatusOr<bool> CallInliner::Run(HloModule* module) {
}
return Status::OK();
}));
+ if (did_mutate) {
+ // Run DCE to remove called computations which are now becoming unused.
+ // This can result then in problems if within the called computation, there
+ // were send/recv instructions, which the module group verifier will flag as
+ // error findingthe same channel ID used for multiple send/recv
+ // instructions.
+ TF_RETURN_IF_ERROR(HloDCE().Run(module).status());
+ }
return did_mutate;
}
diff --git a/tensorflow/compiler/xla/service/call_inliner_test.cc b/tensorflow/compiler/xla/service/call_inliner_test.cc
index 738d00881d..ff968bca29 100644
--- a/tensorflow/compiler/xla/service/call_inliner_test.cc
+++ b/tensorflow/compiler/xla/service/call_inliner_test.cc
@@ -19,7 +19,7 @@ limitations under the License.
#include <utility>
#include "tensorflow/compiler/xla/layout_util.h"
-#include "tensorflow/compiler/xla/literal_util.h"
+#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/compiler/xla/ptr_util.h"
#include "tensorflow/compiler/xla/service/hlo_computation.h"
#include "tensorflow/compiler/xla/service/hlo_instruction.h"
@@ -48,9 +48,9 @@ TEST_F(CallInlinerTest, ControlDependenciesAreCarriedToCaller) {
// the "one" value.
HloComputation::Builder inner(TestName() + ".inner");
HloInstruction* zero = inner.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(24.0f)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(24.0f)));
HloInstruction* one = inner.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(42.0f)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0f)));
TF_ASSERT_OK(zero->AddControlDependencyTo(one));
auto module = CreateNewModule();
HloComputation* inner_computation =
@@ -87,7 +87,7 @@ TEST_F(CallInlinerTest, CallsWithinWhileBodiesAreInlined) {
// little trickier.
HloComputation::Builder just_false(TestName() + ".false");
just_false.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<bool>(false)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(false)));
HloComputation* false_computation =
module->AddEmbeddedComputation(just_false.Build());
@@ -99,7 +99,7 @@ TEST_F(CallInlinerTest, CallsWithinWhileBodiesAreInlined) {
HloComputation::Builder outer(TestName() + ".outer");
HloInstruction* init_value = outer.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<bool>(false)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(false)));
outer.AddInstruction(
HloInstruction::CreateWhile(pred, call_false, call_false, init_value));
@@ -123,9 +123,9 @@ TEST_F(CallInlinerTest, InlineWithoutRunningPass) {
HloComputation::Builder just_false(TestName() + ".false");
auto* true_constant = just_false.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR1<bool>({true})));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR1<bool>({true})));
auto* false_constant = just_false.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<bool>(false)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(false)));
TF_ASSERT_OK(false_constant->AddControlDependencyTo(true_constant));
HloComputation* false_computation =
module->AddEmbeddedComputation(just_false.Build());
@@ -147,15 +147,17 @@ TEST_F(CallInlinerTest, CallToOutfeedComputationIsInlined) {
HloComputation::Builder outfeeder(TestName() + ".outfeeder");
auto value = outfeeder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(42.0)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0)));
+ auto token = outfeeder.AddInstruction(HloInstruction::CreateToken());
outfeeder.AddInstruction(
- HloInstruction::CreateOutfeed(f32, value, /*outfeed_config=*/""));
+ HloInstruction::CreateOutfeed(f32, value, token, /*outfeed_config=*/""));
auto outfeed_computation = module->AddEmbeddedComputation(outfeeder.Build());
HloComputation::Builder outer(TestName() + ".outer");
outer.AddInstruction(HloInstruction::CreateCall(
- ShapeUtil::MakeNil(), /*operands=*/{}, outfeed_computation));
+ outfeed_computation->root_instruction()->shape(), /*operands=*/{},
+ outfeed_computation));
module->AddEntryComputation(outer.Build());
diff --git a/tensorflow/compiler/xla/service/computation_placer.cc b/tensorflow/compiler/xla/service/computation_placer.cc
index 7c1bacff92..d26486fcfe 100644
--- a/tensorflow/compiler/xla/service/computation_placer.cc
+++ b/tensorflow/compiler/xla/service/computation_placer.cc
@@ -19,7 +19,7 @@ limitations under the License.
#include <utility>
#include <vector>
-#include "tensorflow/compiler/xla/literal_util.h"
+#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/compiler/xla/ptr_util.h"
#include "tensorflow/compiler/xla/shape_util.h"
#include "tensorflow/compiler/xla/status.h"
diff --git a/tensorflow/compiler/xla/service/conditional_simplifier.cc b/tensorflow/compiler/xla/service/conditional_simplifier.cc
index e9ec796121..b7be3ba605 100644
--- a/tensorflow/compiler/xla/service/conditional_simplifier.cc
+++ b/tensorflow/compiler/xla/service/conditional_simplifier.cc
@@ -19,7 +19,7 @@ limitations under the License.
#include <utility>
#include <vector>
-#include "tensorflow/compiler/xla/literal_util.h"
+#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/compiler/xla/service/call_inliner.h"
#include "tensorflow/compiler/xla/service/hlo_computation.h"
#include "tensorflow/compiler/xla/service/hlo_instruction.h"
diff --git a/tensorflow/compiler/xla/service/conditional_simplifier_test.cc b/tensorflow/compiler/xla/service/conditional_simplifier_test.cc
index 868348547d..c43a31b167 100644
--- a/tensorflow/compiler/xla/service/conditional_simplifier_test.cc
+++ b/tensorflow/compiler/xla/service/conditional_simplifier_test.cc
@@ -55,7 +55,7 @@ HloComputation* ConditionalSimplifierTest::MakeConditional(HloModule* module) {
true_computation_builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(S32, {}), "param"));
auto one = true_computation_builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<int32>(1)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32>(1)));
true_computation_builder.AddInstruction(HloInstruction::CreateBinary(
ShapeUtil::MakeShape(S32, {}), HloOpcode::kAdd, param, one));
@@ -73,7 +73,7 @@ HloComputation* ConditionalSimplifierTest::MakeConditional(HloModule* module) {
HloInstruction::CreateParameter(0, ShapeUtil::MakeShape(S32, {}),
"param"));
auto forty_two = false_computation_builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<int32>(42)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32>(42)));
false_computation_builder.AddInstruction(HloInstruction::CreateBinary(
ShapeUtil::MakeShape(S32, {}), HloOpcode::kAdd, param, forty_two));
@@ -82,11 +82,11 @@ HloComputation* ConditionalSimplifierTest::MakeConditional(HloModule* module) {
}
auto false_instrn = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<bool>(false)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(false)));
auto false_param = builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(S32, {}), "false_param"));
auto one = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<int32>(1)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32>(1)));
builder.AddInstruction(HloInstruction::CreateConditional(
ShapeUtil::MakeShape(S32, {}), false_instrn, one, true_computation,
@@ -106,7 +106,7 @@ TEST_F(ConditionalSimplifierTest, ConditionalWithControlDependency) {
HloComputation* computation = MakeConditional(&module());
auto* true_op = computation->AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<bool>(true)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(true)));
TF_ASSERT_OK(
true_op->AddControlDependencyTo(computation->root_instruction()));
@@ -119,10 +119,11 @@ TEST_F(ConditionalSimplifierTest, NotRemovedIfContainsSend) {
ASSERT_EQ(conditional->opcode(), HloOpcode::kConditional);
auto* true_computation = conditional->true_computation();
+ auto* token = true_computation->AddInstruction(HloInstruction::CreateToken());
auto* send = true_computation->AddInstruction(HloInstruction::CreateSend(
true_computation->AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<bool>(true))),
- /*channel_id=*/0));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(true))),
+ token, /*channel_id=*/0));
true_computation->AddInstruction(HloInstruction::CreateSendDone(send));
EXPECT_FALSE(ConditionalSimplifier().Run(&module()).ValueOrDie());
}
@@ -133,8 +134,9 @@ TEST_F(ConditionalSimplifierTest, NotRemovedIfContainsRecv) {
ASSERT_EQ(conditional->opcode(), HloOpcode::kConditional);
auto* true_computation = conditional->true_computation();
+ auto* token = true_computation->AddInstruction(HloInstruction::CreateToken());
auto* recv = true_computation->AddInstruction(HloInstruction::CreateRecv(
- ShapeUtil::MakeShape(F32, {1}), /*channel_id=*/0));
+ ShapeUtil::MakeShape(F32, {1}), token, /*channel_id=*/0));
true_computation->AddInstruction(HloInstruction::CreateRecvDone(recv));
EXPECT_FALSE(ConditionalSimplifier().Run(&module()).ValueOrDie());
}
@@ -144,8 +146,9 @@ TEST_F(ConditionalSimplifierTest, NotRemovedIfContainsNonRemovableInstruction) {
auto* conditional = computation->root_instruction();
ASSERT_EQ(conditional->opcode(), HloOpcode::kConditional);
auto* false_computation = conditional->false_computation();
- false_computation->AddInstruction(
- HloInstruction::CreateInfeed(ShapeUtil::MakeShape(F32, {1}), "config"));
+ auto token = false_computation->AddInstruction(HloInstruction::CreateToken());
+ false_computation->AddInstruction(HloInstruction::CreateInfeed(
+ ShapeUtil::MakeShape(F32, {1}), token, "config"));
EXPECT_FALSE(ConditionalSimplifier().Run(&module()).ValueOrDie());
}
diff --git a/tensorflow/compiler/xla/service/copy_insertion.cc b/tensorflow/compiler/xla/service/copy_insertion.cc
index b0ad433d8d..ab3d846403 100644
--- a/tensorflow/compiler/xla/service/copy_insertion.cc
+++ b/tensorflow/compiler/xla/service/copy_insertion.cc
@@ -1093,8 +1093,7 @@ void MaybeDumpModule(const string& message, const HloModule& module) {
} // namespace
Status RemoveUnnecessaryCopies(
- const HloOrdering& ordering,
- const tensorflow::gtl::FlatSet<int>& copies_to_exclude, HloModule* module,
+ const HloOrdering& ordering, HloModule* module,
const HloDataflowAnalysis::FusionCanShareBufferFunction&
fusion_can_share_buffer) {
MaybeDumpModule("after adding copies to resolve interference", *module);
@@ -1108,7 +1107,6 @@ Status RemoveUnnecessaryCopies(
for (HloComputation* computation : module->computations()) {
for (HloInstruction* instruction : computation->instructions()) {
if (instruction->opcode() == HloOpcode::kCopy &&
- !ContainsKey(copies_to_exclude, instruction->unique_id()) &&
instruction->CopyElisionAllowed()) {
TF_RETURN_IF_ERROR(copy_remover.TryElideCopy(instruction).status());
}
@@ -1152,16 +1150,13 @@ StatusOr<bool> CopyInsertion::Run(HloModule* module) {
"Call graph must be flattened before copy insertion.");
}
- // Gather Ids of existing kCopy instructions in the module. We avoid removing
- // these copies (except via DCE in TupleSimplifier) because they may have been
- // added for reasons not considered by copy insertion (eg, layout assignment).
- // Instruction id is used instead of HloInstruction* because the pointer
- // values may be recycled.
- tensorflow::gtl::FlatSet<int> existing_copies;
- for (HloComputation* computation : module->computations()) {
- for (HloInstruction* instruction : computation->instructions()) {
- if (instruction->opcode() == HloOpcode::kCopy) {
- existing_copies.insert(instruction->unique_id());
+ int64 num_existing_copies = 0;
+ if (VLOG_IS_ON(1)) {
+ for (HloComputation* computation : module->computations()) {
+ for (HloInstruction* instruction : computation->instructions()) {
+ if (instruction->opcode() == HloOpcode::kCopy) {
+ ++num_existing_copies;
+ }
}
}
}
@@ -1181,8 +1176,7 @@ StatusOr<bool> CopyInsertion::Run(HloModule* module) {
TF_DCHECK_OK(VerifyNoLiveRangeInterference(module));
DependencyHloOrdering ordering(module);
- TF_RETURN_IF_ERROR(
- RemoveUnnecessaryCopies(ordering, existing_copies, module));
+ TF_RETURN_IF_ERROR(RemoveUnnecessaryCopies(ordering, module));
TF_RETURN_IF_ERROR(AddSpecialCaseCopies(*call_graph, module));
@@ -1203,7 +1197,7 @@ StatusOr<bool> CopyInsertion::Run(HloModule* module) {
}
}
}
- VLOG(1) << "Num copies before copy-insertion: " << existing_copies.size();
+ VLOG(1) << "Num copies before copy-insertion: " << num_existing_copies;
VLOG(1) << "Num copies after copy-insertion: " << num_total_copies;
}
diff --git a/tensorflow/compiler/xla/service/copy_insertion.h b/tensorflow/compiler/xla/service/copy_insertion.h
index 6d25706089..e1973db928 100644
--- a/tensorflow/compiler/xla/service/copy_insertion.h
+++ b/tensorflow/compiler/xla/service/copy_insertion.h
@@ -21,7 +21,6 @@ limitations under the License.
#include "tensorflow/compiler/xla/service/hlo_instruction.h"
#include "tensorflow/compiler/xla/service/hlo_module.h"
#include "tensorflow/compiler/xla/service/hlo_pass_interface.h"
-#include "tensorflow/core/lib/gtl/flatmap.h"
namespace xla {
@@ -79,11 +78,10 @@ class CopyInsertion : public HloPassInterface {
};
// Try to remove as many copies from the module as possible without introducing
-// live range interference. Copy instructions (identified by their unique id) in
-// the set copies_to_exclude are not considered for removal.
+// live range interference. Only copy instructions that are eligible for
+// copy elision are considered for removal.
Status RemoveUnnecessaryCopies(
- const HloOrdering& ordering,
- const tensorflow::gtl::FlatSet<int>& copies_to_exclude, HloModule* module,
+ const HloOrdering& ordering, HloModule* module,
const HloDataflowAnalysis::FusionCanShareBufferFunction&
fusion_can_share_buffer = nullptr);
diff --git a/tensorflow/compiler/xla/service/copy_insertion_test.cc b/tensorflow/compiler/xla/service/copy_insertion_test.cc
index ed1a50f516..cd735256b8 100644
--- a/tensorflow/compiler/xla/service/copy_insertion_test.cc
+++ b/tensorflow/compiler/xla/service/copy_insertion_test.cc
@@ -18,7 +18,7 @@ limitations under the License.
#include <set>
#include "tensorflow/compiler/xla/legacy_flags/debug_options_flags.h"
-#include "tensorflow/compiler/xla/literal_util.h"
+#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/compiler/xla/service/hlo_computation.h"
#include "tensorflow/compiler/xla/service/hlo_instruction.h"
#include "tensorflow/compiler/xla/service/hlo_matchers.h"
@@ -108,7 +108,7 @@ TEST_F(CopyInsertionTest, SingleConstant) {
// be copied before entering the tuple.
auto builder = HloComputation::Builder(TestName());
HloInstruction* constant = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(1.0)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
HloInstruction* tuple =
builder.AddInstruction(HloInstruction::CreateTuple({constant}));
@@ -125,21 +125,27 @@ TEST_F(CopyInsertionTest, SingleConstant) {
}
TEST_F(CopyInsertionTest, ExistingCopiesNotRemoved) {
- // Verify that an kCopy instructions which exist in the pass before
+ // Verify that kCopy instructions which change layout and exist before
// copy-insertion remain in the graph after copy-insertion.
auto module = CreateNewModule();
auto builder = HloComputation::Builder(TestName());
- HloInstruction* constant = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(1.0)));
- HloInstruction* copy_1 = builder.AddInstruction(HloInstruction::CreateUnary(
- constant->shape(), HloOpcode::kCopy, constant));
- HloInstruction* copy_2 = builder.AddInstruction(HloInstruction::CreateUnary(
- constant->shape(), HloOpcode::kCopy, constant));
+ HloInstruction* constant =
+ builder.AddInstruction(HloInstruction::CreateConstant(
+ LiteralUtil::CreateR2<float>({{0.f, 2.f}, {2.f, 4.f}})));
+ auto minor_to_major = LayoutUtil::MinorToMajor(constant->shape());
+ Layout reversed_layout =
+ LayoutUtil::MakeLayoutFromMajorToMinor(minor_to_major);
+ Shape copy_shape = constant->shape();
+ *copy_shape.mutable_layout() = reversed_layout;
+ HloInstruction* copy_1 = builder.AddInstruction(
+ HloInstruction::CreateUnary(copy_shape, HloOpcode::kCopy, constant));
+ HloInstruction* copy_2 = builder.AddInstruction(
+ HloInstruction::CreateUnary(copy_shape, HloOpcode::kCopy, constant));
HloInstruction* add = builder.AddInstruction(HloInstruction::CreateBinary(
constant->shape(), HloOpcode::kAdd, copy_1, copy_2));
- HloInstruction* add_copy = builder.AddInstruction(
- HloInstruction::CreateUnary(constant->shape(), HloOpcode::kCopy, add));
+ builder.AddInstruction(
+ HloInstruction::CreateUnary(add->shape(), HloOpcode::kCopy, add));
module->AddEntryComputation(builder.Build());
@@ -147,12 +153,11 @@ TEST_F(CopyInsertionTest, ExistingCopiesNotRemoved) {
InsertCopies(module.get());
- EXPECT_EQ(CountCopies(*module), 3);
+ EXPECT_EQ(CountCopies(*module), 2);
- EXPECT_EQ(module->entry_computation()->root_instruction(), add_copy);
- EXPECT_THAT(
- module->entry_computation()->root_instruction(),
- op::Copy(op::Add(op::Copy(op::Constant()), op::Copy(op::Constant()))));
+ EXPECT_EQ(module->entry_computation()->root_instruction(), add);
+ EXPECT_THAT(module->entry_computation()->root_instruction(),
+ op::Add(op::Copy(op::Constant()), op::Copy(op::Constant())));
}
TEST_F(CopyInsertionTest, MultipleConstantsAndParameters) {
@@ -162,9 +167,9 @@ TEST_F(CopyInsertionTest, MultipleConstantsAndParameters) {
auto builder = HloComputation::Builder(TestName());
HloInstruction* constant1 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(1.0)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
HloInstruction* constant2 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(2.0)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0)));
HloInstruction* x = builder.AddInstruction(
HloInstruction::CreateParameter(0, ShapeUtil::MakeShape(F32, {}), "x"));
@@ -192,11 +197,11 @@ TEST_F(CopyInsertionTest, AmbiguousPointsToSet) {
// the computation result. Verify that copies are added properly.
auto builder = HloComputation::Builder(TestName());
HloInstruction* constant1 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(1.0)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
HloInstruction* constant2 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(2.0)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0)));
HloInstruction* constant3 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(3.0)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(3.0)));
HloInstruction* tuple1 = builder.AddInstruction(
HloInstruction::CreateTuple({constant1, constant2}));
@@ -204,9 +209,9 @@ TEST_F(CopyInsertionTest, AmbiguousPointsToSet) {
HloInstruction::CreateTuple({constant3, constant2}));
HloInstruction* pred = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<bool>(false)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(false)));
builder.AddInstruction(HloInstruction::CreateTernary(
- tuple1->shape(), HloOpcode::kSelect, pred, tuple1, tuple2));
+ tuple1->shape(), HloOpcode::kTupleSelect, pred, tuple1, tuple2));
EXPECT_THAT(constant1->users(), UnorderedElementsAre(tuple1));
EXPECT_THAT(constant2->users(), UnorderedElementsAre(tuple1, tuple2));
@@ -250,8 +255,9 @@ TEST_F(CopyInsertionTest, BitcastConstant) {
// The output of a bitcast is its operand (same buffer), so a bitcast
// constant feeding the result must have a copy added.
auto builder = HloComputation::Builder(TestName());
- HloInstruction* constant = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR1<float>({1.0, 42.0})));
+ HloInstruction* constant =
+ builder.AddInstruction(HloInstruction::CreateConstant(
+ LiteralUtil::CreateR1<float>({1.0, 42.0})));
HloInstruction* bitcast = builder.AddInstruction(HloInstruction::CreateUnary(
ShapeUtil::MakeShape(F32, {2, 2}), HloOpcode::kBitcast, constant));
@@ -365,9 +371,9 @@ TEST_F(CopyInsertionTest, AmbiguousTopLevelRoot) {
// copy is added.
auto builder = HloComputation::Builder(TestName());
HloInstruction* constant1 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(1.0)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
HloInstruction* constant2 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(2.0)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0)));
HloInstruction* tuple1 = builder.AddInstruction(
HloInstruction::CreateTuple({constant1, constant2}));
@@ -375,9 +381,9 @@ TEST_F(CopyInsertionTest, AmbiguousTopLevelRoot) {
HloInstruction::CreateTuple({constant2, constant1}));
HloInstruction* pred = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<bool>(false)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(false)));
HloInstruction* select = builder.AddInstruction(HloInstruction::CreateTernary(
- tuple1->shape(), HloOpcode::kSelect, pred, tuple1, tuple2));
+ tuple1->shape(), HloOpcode::kTupleSelect, pred, tuple1, tuple2));
HloInstruction* gte =
builder.AddInstruction(HloInstruction::CreateGetTupleElement(
ShapeUtil::GetSubshape(select->shape(), {0}), select, 0));
@@ -408,7 +414,7 @@ class WhileCopyInsertionTest : public CopyInsertionTest {
const Shape& loop_state_shape) {
auto builder = HloComputation::Builder(TestName() + ".Condition");
auto limit_const = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<int32>(10)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32>(10)));
auto loop_state = builder.AddInstruction(
HloInstruction::CreateParameter(0, loop_state_shape, "loop_state"));
auto induction_variable =
@@ -437,7 +443,7 @@ class WhileCopyInsertionTest : public CopyInsertionTest {
builder.AddInstruction(HloInstruction::CreateGetTupleElement(
induction_variable_shape_, loop_state, 0));
auto inc = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<int32>(1)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32>(1)));
auto add0 = builder.AddInstruction(HloInstruction::CreateBinary(
induction_variable->shape(), HloOpcode::kAdd, induction_variable, inc));
// Update data GTE(1).
@@ -475,7 +481,7 @@ class WhileCopyInsertionTest : public CopyInsertionTest {
builder.AddInstruction(HloInstruction::CreateGetTupleElement(
induction_variable_shape_, loop_state, 0));
auto inc = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<int32>(1)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32>(1)));
// add0 = Add(in0, 1)
auto add0 = builder.AddInstruction(HloInstruction::CreateBinary(
@@ -544,7 +550,7 @@ class WhileCopyInsertionTest : public CopyInsertionTest {
builder.AddInstruction(HloInstruction::CreateGetTupleElement(
induction_variable_shape_, loop_state, 0));
auto inc = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<int32>(1)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32>(1)));
// add0 = Add(in0, 1)
auto add0 = builder.AddInstruction(HloInstruction::CreateBinary(
induction_variable->shape(), HloOpcode::kAdd, induction_variable, inc));
@@ -559,8 +565,9 @@ class WhileCopyInsertionTest : public CopyInsertionTest {
data = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(data_shape_, loop_state, 1));
}
- auto update = builder.AddInstruction(HloInstruction::CreateConstant(
- Literal::CreateR1<float>({1.f, 1.f, 1.f, 1.f, 1.f, 1.f, 1.f, 1.f})));
+ auto update = builder.AddInstruction(
+ HloInstruction::CreateConstant(LiteralUtil::CreateR1<float>(
+ {1.f, 1.f, 1.f, 1.f, 1.f, 1.f, 1.f, 1.f})));
// add1 = Add(in1, {1, 1, 1, 1, 1, 1, 1, 1})
auto add1 = builder.AddInstruction(HloInstruction::CreateBinary(
data_shape_, HloOpcode::kAdd, data, update));
@@ -593,7 +600,7 @@ class WhileCopyInsertionTest : public CopyInsertionTest {
auto gte0 = builder.AddInstruction(HloInstruction::CreateGetTupleElement(
induction_variable_shape_, loop_state, 0));
auto inc = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<int32>(1)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32>(1)));
auto add0 = builder.AddInstruction(HloInstruction::CreateBinary(
gte0->shape(), HloOpcode::kAdd, gte0, inc));
@@ -603,8 +610,9 @@ class WhileCopyInsertionTest : public CopyInsertionTest {
// GTE(GTE(loop_state, 1), 0) -> Add
auto gte10 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(data_shape_, gte1, 0));
- auto update10 = builder.AddInstruction(HloInstruction::CreateConstant(
- Literal::CreateR1<float>({1.f, 1.f, 1.f, 1.f, 1.f, 1.f, 1.f, 1.f})));
+ auto update10 = builder.AddInstruction(
+ HloInstruction::CreateConstant(LiteralUtil::CreateR1<float>(
+ {1.f, 1.f, 1.f, 1.f, 1.f, 1.f, 1.f, 1.f})));
auto add10 = builder.AddInstruction(HloInstruction::CreateBinary(
data_shape_, HloOpcode::kAdd, gte10, update10));
@@ -628,10 +636,11 @@ class WhileCopyInsertionTest : public CopyInsertionTest {
bool nested = false) {
auto builder = HloComputation::Builder(TestName() + ".While");
auto induction_var_init = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<int32>(0)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32>(0)));
- auto data_init = builder.AddInstruction(HloInstruction::CreateConstant(
- Literal::CreateR1<float>({0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f})));
+ auto data_init = builder.AddInstruction(
+ HloInstruction::CreateConstant(LiteralUtil::CreateR1<float>(
+ {0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f})));
if (nested) {
auto inner_init = builder.AddInstruction(
@@ -654,8 +663,9 @@ class WhileCopyInsertionTest : public CopyInsertionTest {
HloInstruction* BuildWhileInstruction_InitPointsToConstant() {
auto builder = HloComputation::Builder(TestName() + ".While");
- auto data_init = builder.AddInstruction(HloInstruction::CreateConstant(
- Literal::CreateR1<float>({0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f})));
+ auto data_init = builder.AddInstruction(
+ HloInstruction::CreateConstant(LiteralUtil::CreateR1<float>(
+ {0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f})));
return BuildWhileInstructionWithCustomInit(loop_state_shape_, data_init,
&builder);
}
@@ -672,11 +682,11 @@ class WhileCopyInsertionTest : public CopyInsertionTest {
auto builder = HloComputation::Builder(TestName() + ".While");
auto one = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(1.0)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
auto v1 = builder.AddInstruction(
HloInstruction::CreateBroadcast(data_shape_, one, {1}));
auto zero = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(1.0)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
auto v2 = builder.AddInstruction(
HloInstruction::CreateBroadcast(data_shape_, zero, {1}));
@@ -684,9 +694,9 @@ class WhileCopyInsertionTest : public CopyInsertionTest {
auto tuple2 = builder.AddInstruction(HloInstruction::CreateTuple({v2, v1}));
auto pred = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<bool>(false)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(false)));
auto data_init = builder.AddInstruction(HloInstruction::CreateTernary(
- nested_tuple_shape_, HloOpcode::kSelect, pred, tuple1, tuple2));
+ nested_tuple_shape_, HloOpcode::kTupleSelect, pred, tuple1, tuple2));
return BuildWhileInstructionWithCustomInit(nested_loop_state_shape_,
data_init, &builder);
@@ -696,7 +706,7 @@ class WhileCopyInsertionTest : public CopyInsertionTest {
auto builder = HloComputation::Builder(TestName() + ".While");
auto one = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(1.0)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
auto one_vec = builder.AddInstruction(
HloInstruction::CreateBroadcast(data_shape_, one, {1}));
auto data_init =
@@ -709,11 +719,12 @@ class WhileCopyInsertionTest : public CopyInsertionTest {
HloInstruction* BuildWhileInstruction_InitPointsToInterfering() {
auto builder = HloComputation::Builder(TestName() + ".While");
auto one = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(1.0)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
auto data_init = builder.AddInstruction(
HloInstruction::CreateBroadcast(data_shape_, one, {1}));
- auto one_vec = builder.AddInstruction(HloInstruction::CreateConstant(
- Literal::CreateR1<float>({1.f, 1.f, 1.f, 1.f, 1.f, 1.f, 1.f, 1.f})));
+ auto one_vec = builder.AddInstruction(
+ HloInstruction::CreateConstant(LiteralUtil::CreateR1<float>(
+ {1.f, 1.f, 1.f, 1.f, 1.f, 1.f, 1.f, 1.f})));
// Take a reference to 'data_init' to make it interfere with while result.
auto add = builder.AddInstruction(HloInstruction::CreateBinary(
data_shape_, HloOpcode::kAdd, data_init, one_vec));
@@ -745,7 +756,7 @@ class WhileCopyInsertionTest : public CopyInsertionTest {
const bool nested =
ShapeUtil::Equal(loop_state_shape, nested_loop_state_shape_);
auto induction_var_init = builder->AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<int32>(0)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32>(0)));
auto condition = module_->AddEmbeddedComputation(
BuildConditionComputation(loop_state_shape));
auto body = module_->AddEmbeddedComputation(
@@ -1247,7 +1258,6 @@ TEST_F(WhileCopyInsertionTest, InitPointsToNonDistinctUsedByTwoWhileLoops) {
auto loop_init = builder.AddInstruction(
HloInstruction::CreateTuple({iter_param, data_param, data_param}));
-
// Two while loops shares the same loop init tuple.
auto while_hlo1 = builder.AddInstruction(HloInstruction::CreateWhile(
loop_state_shape, condition1, body1, loop_init));
@@ -1305,7 +1315,7 @@ TEST_F(CopyInsertionTest, SwizzlingWhile) {
cond_builder.AddInstruction(
HloInstruction::CreateParameter(0, loop_state_shape, "param"));
auto cond_constant = cond_builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<bool>(false)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(false)));
cond_builder.AddInstruction(HloInstruction::CreateUnary(
cond_constant->shape(), HloOpcode::kNot, cond_constant));
HloComputation* condition =
@@ -1313,9 +1323,9 @@ TEST_F(CopyInsertionTest, SwizzlingWhile) {
auto builder = HloComputation::Builder(TestName());
auto constant1 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(1.0)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
auto constant2 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(2.0)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0)));
auto tuple = builder.AddInstruction(
HloInstruction::CreateTuple({constant1, constant2}));
auto xla_while = builder.AddInstruction(
@@ -1370,7 +1380,7 @@ TEST_F(CopyInsertionTest, SwizzlingWhileWithOneOp) {
cond_builder.AddInstruction(
HloInstruction::CreateParameter(0, loop_state_shape, "param"));
auto cond_constant = cond_builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<bool>(false)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(false)));
cond_builder.AddInstruction(HloInstruction::CreateUnary(
cond_constant->shape(), HloOpcode::kNot, cond_constant));
HloComputation* condition =
@@ -1378,9 +1388,9 @@ TEST_F(CopyInsertionTest, SwizzlingWhileWithOneOp) {
auto builder = HloComputation::Builder(TestName());
auto constant1 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(1.0)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
auto constant2 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(2.0)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0)));
auto tuple = builder.AddInstruction(
HloInstruction::CreateTuple({constant1, constant2}));
auto xla_while = builder.AddInstruction(
@@ -1430,7 +1440,7 @@ TEST_F(CopyInsertionTest, SwizzlingWhileSharedInput) {
cond_builder.AddInstruction(
HloInstruction::CreateParameter(0, loop_state_shape, "param"));
auto cond_constant = cond_builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<bool>(false)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(false)));
cond_builder.AddInstruction(HloInstruction::CreateUnary(
cond_constant->shape(), HloOpcode::kNot, cond_constant));
HloComputation* condition =
@@ -1438,7 +1448,7 @@ TEST_F(CopyInsertionTest, SwizzlingWhileSharedInput) {
auto builder = HloComputation::Builder(TestName());
auto constant = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(1.0)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
auto tuple =
builder.AddInstruction(HloInstruction::CreateTuple({constant, constant}));
builder.AddInstruction(
@@ -1515,7 +1525,7 @@ TEST_F(CopyInsertionTest, SequentialWhiles) {
cond_builder.AddInstruction(
HloInstruction::CreateParameter(0, loop_state_shape, "param"));
auto cond_constant = cond_builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<bool>(false)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(false)));
cond_builder.AddInstruction(HloInstruction::CreateUnary(
cond_constant->shape(), HloOpcode::kNot, cond_constant));
HloComputation* condition =
@@ -1570,14 +1580,14 @@ TEST_F(CopyInsertionTest, WhileBodyWithConstantRoot) {
body_builder.AddInstruction(
HloInstruction::CreateParameter(0, scalar_shape_, "param"));
body_builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(123.0)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(123.0)));
HloComputation* body = module->AddEmbeddedComputation(body_builder.Build());
auto cond_builder = HloComputation::Builder("condition");
cond_builder.AddInstruction(
HloInstruction::CreateParameter(0, scalar_shape_, "param"));
cond_builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<bool>(false)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(false)));
HloComputation* condition =
module->AddEmbeddedComputation(cond_builder.Build());
@@ -1605,8 +1615,8 @@ HloModule TokensShouldNotBeCopied
%constant.1 = s32[] constant(1)
%add = s32[] add(s32[] %get-tuple-element.1, s32[] %constant.1)
%get-tuple-element.2 = token[] get-tuple-element((s32[], token[]) %param.1), index=1
- %generate-token = token[] generate-token(token[] %get-tuple-element.2)
- ROOT %tuple = (s32[], token[]) tuple(s32[] %add, token[] %generate-token)
+ %after-all = token[] after-all(token[] %get-tuple-element.2)
+ ROOT %tuple = (s32[], token[]) tuple(s32[] %add, token[] %after-all)
}
%Cond (param: (s32[], token[])) -> pred[] {
@@ -1619,7 +1629,7 @@ HloModule TokensShouldNotBeCopied
ENTRY %TokensShouldNotBeCopied () -> s32[] {
%one = s32[] constant(1)
%negative_one = s32[] negate(%one)
- %init_token = token[] generate-token()
+ %init_token = token[] after-all()
%init_tuple = (s32[], token[]) tuple(s32[] %negative_one, token[] %init_token)
%while = (s32[], token[]) while((s32[], token[]) %init_tuple), condition=%Cond, body=%Body
ROOT %root = s32[] get-tuple-element((s32[], token[]) %while), index=0
@@ -1639,7 +1649,7 @@ std::unique_ptr<HloComputation> MakeTrivialCondition(const Shape& shape) {
builder.AddInstruction(
HloInstruction::CreateParameter(0, shape, "loop_state"));
auto constant = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<bool>(false)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(false)));
builder.AddInstruction(HloInstruction::CreateUnary(
constant->shape(), HloOpcode::kNot, constant));
return builder.Build();
diff --git a/tensorflow/compiler/xla/service/cpu/BUILD b/tensorflow/compiler/xla/service/cpu/BUILD
index b703be0f39..c45d914e93 100644
--- a/tensorflow/compiler/xla/service/cpu/BUILD
+++ b/tensorflow/compiler/xla/service/cpu/BUILD
@@ -37,6 +37,7 @@ cc_library(
srcs = ["cpu_transfer_manager.cc"],
hdrs = ["cpu_transfer_manager.h"],
deps = [
+ "//tensorflow/compiler/xla:literal",
"//tensorflow/compiler/xla:literal_util",
"//tensorflow/compiler/xla:shape_util",
"//tensorflow/compiler/xla:status_macros",
@@ -54,29 +55,6 @@ cc_library(
)
cc_library(
- name = "external_constant_pool",
- srcs = ["external_constant_pool.cc"],
- hdrs = ["external_constant_pool.h"],
- deps = [
- "//tensorflow/compiler/xla:literal_util",
- "//tensorflow/compiler/xla:shape_util",
- "//tensorflow/compiler/xla:util",
- "//tensorflow/core:lib",
- ],
-)
-
-tf_cc_test(
- name = "external_constant_pool_test",
- srcs = ["external_constant_pool_test.cc"],
- deps = [
- ":external_constant_pool",
- "//tensorflow/compiler/xla:shape_util",
- "//tensorflow/compiler/xla/tests:xla_internal_test_main",
- "//tensorflow/core:test",
- ],
-)
-
-cc_library(
name = "cpu_compiler",
srcs = ["cpu_compiler.cc"],
hdrs = ["cpu_compiler.h"],
@@ -95,7 +73,7 @@ cc_library(
":ir_emitter",
":parallel_task_assignment",
":simple_orc_jit",
- "//tensorflow/compiler/xla:literal_util",
+ "//tensorflow/compiler/xla:literal",
"//tensorflow/compiler/xla:protobuf_util",
"//tensorflow/compiler/xla:status_macros",
"//tensorflow/compiler/xla:statusor",
@@ -112,7 +90,6 @@ cc_library(
"//tensorflow/compiler/xla/service:dot_decomposer",
"//tensorflow/compiler/xla/service:executable",
"//tensorflow/compiler/xla/service:flatten_call_graph",
- "//tensorflow/compiler/xla/service:gather_expander",
"//tensorflow/compiler/xla/service:hlo",
"//tensorflow/compiler/xla/service:hlo_constant_folding",
"//tensorflow/compiler/xla/service:hlo_cse",
@@ -152,7 +129,7 @@ cc_library(
"@llvm//:x86_code_gen", # fixdeps: keep
"@llvm//:x86_disassembler", # fixdeps: keep
] + select({
- "@org_tensorflow//tensorflow:linux_ppc64le": [
+ "//tensorflow:linux_ppc64le": [
"@llvm//:powerpc_disassembler",
"@llvm//:powerpc_code_gen",
],
@@ -175,7 +152,6 @@ cc_library(
":cpu_runtime",
":custom_call_target_registry",
":disassembler",
- ":external_constant_pool",
":orc_jit_memory_mapper",
":runtime_fp16",
":runtime_conv2d",
@@ -256,7 +232,6 @@ cc_library(
":cpu_options",
":cpu_runtime",
":dot_op_emitter",
- ":external_constant_pool",
":ir_emission_utils",
":ir_function",
":parallel_loop_emitter",
@@ -273,6 +248,7 @@ cc_library(
"//tensorflow/compiler/xla/service:buffer_assignment",
"//tensorflow/compiler/xla/service:elemental_ir_emitter",
"//tensorflow/compiler/xla/service:hlo",
+ "//tensorflow/compiler/xla/service:hlo_casting_utils",
"//tensorflow/compiler/xla/service:hlo_module_config",
"//tensorflow/compiler/xla/service:name_uniquer",
"//tensorflow/compiler/xla/service/llvm_ir:alias_analysis",
@@ -379,7 +355,7 @@ tf_cc_binary(
srcs = ["sample_harness.cc"],
deps = [
"//tensorflow/compiler/xla:array4d",
- "//tensorflow/compiler/xla:literal_util",
+ "//tensorflow/compiler/xla:literal",
"//tensorflow/compiler/xla:statusor",
"//tensorflow/compiler/xla:types",
"//tensorflow/compiler/xla:xla_data_proto",
@@ -741,7 +717,7 @@ tf_cc_test(
deps = [
":cpu_layout_assignment",
":target_machine_features_fake",
- "//tensorflow/compiler/xla:literal_util",
+ "//tensorflow/compiler/xla:literal",
"//tensorflow/compiler/xla:shape_layout",
"//tensorflow/compiler/xla:shape_util",
"//tensorflow/compiler/xla:test",
@@ -833,7 +809,7 @@ tf_cc_test(
":cpu_executable",
":parallel_task_assignment",
":target_machine_features_fake",
- "//tensorflow/compiler/xla:literal_util",
+ "//tensorflow/compiler/xla:literal",
"//tensorflow/compiler/xla:shape_layout",
"//tensorflow/compiler/xla:shape_util",
"//tensorflow/compiler/xla:test",
@@ -916,7 +892,7 @@ tf_cc_test(
srcs = ["cpu_copy_insertion_test.cc"],
deps = [
":cpu_copy_insertion",
- "//tensorflow/compiler/xla:literal_util",
+ "//tensorflow/compiler/xla:literal",
"//tensorflow/compiler/xla:shape_util",
"//tensorflow/compiler/xla:test",
"//tensorflow/compiler/xla:test_helpers",
diff --git a/tensorflow/compiler/xla/service/cpu/conv_canonicalization_test.cc b/tensorflow/compiler/xla/service/cpu/conv_canonicalization_test.cc
index 375b017b09..547d4c696d 100644
--- a/tensorflow/compiler/xla/service/cpu/conv_canonicalization_test.cc
+++ b/tensorflow/compiler/xla/service/cpu/conv_canonicalization_test.cc
@@ -60,11 +60,11 @@ TEST_F(ConvCanonicalizationTest, NonCanonicalToCanonical) {
auto builder = HloComputation::Builder(TestName());
// The input dimensions are in CNHW order.
auto input = builder.AddInstruction(HloInstruction::CreateConstant(
- Literal::CreateR4FromArray4D(Array4D<float>(
+ LiteralUtil::CreateR4FromArray4D(Array4D<float>(
kInputFeatureCount, kBatchSize, kInputSize, kInputSize))));
// The kernel dimensions are in OIHW order.
auto kernel = builder.AddInstruction(HloInstruction::CreateConstant(
- Literal::CreateR4FromArray4D(Array4D<float>(
+ LiteralUtil::CreateR4FromArray4D(Array4D<float>(
kOutputFeatureCount, kInputFeatureCount, kWindowSize, kWindowSize))));
ConvolutionDimensionNumbers dnums;
@@ -122,11 +122,11 @@ TEST_F(ConvCanonicalizationTest, CanonicalStaysTheSame) {
auto builder = HloComputation::Builder(TestName());
// The input dimensions are in NHWC order.
auto input = builder.AddInstruction(HloInstruction::CreateConstant(
- Literal::CreateR4FromArray4D(Array4D<float>(
+ LiteralUtil::CreateR4FromArray4D(Array4D<float>(
kBatchSize, kInputSize, kInputSize, kInputFeatureCount))));
// The kernel dimensions are in HWIO order.
auto kernel = builder.AddInstruction(HloInstruction::CreateConstant(
- Literal::CreateR4FromArray4D(Array4D<float>(
+ LiteralUtil::CreateR4FromArray4D(Array4D<float>(
kWindowSize, kWindowSize, kInputFeatureCount, kOutputFeatureCount))));
ConvolutionDimensionNumbers dnums;
diff --git a/tensorflow/compiler/xla/service/cpu/cpu_compiler.cc b/tensorflow/compiler/xla/service/cpu/cpu_compiler.cc
index 52da9d6eac..29fa29d33a 100644
--- a/tensorflow/compiler/xla/service/cpu/cpu_compiler.cc
+++ b/tensorflow/compiler/xla/service/cpu/cpu_compiler.cc
@@ -30,6 +30,7 @@ limitations under the License.
#include "llvm/ADT/Triple.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/LLVMContext.h"
+#include "llvm/IR/Mangler.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/Verifier.h"
#include "llvm/Object/ObjectFile.h"
@@ -38,7 +39,7 @@ limitations under the License.
#include "llvm/Support/TargetSelect.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetOptions.h"
-#include "tensorflow/compiler/xla/literal_util.h"
+#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/compiler/xla/map_util.h"
#include "tensorflow/compiler/xla/protobuf_util.h"
#include "tensorflow/compiler/xla/ptr_util.h"
@@ -66,7 +67,6 @@ limitations under the License.
#include "tensorflow/compiler/xla/service/dfs_hlo_visitor_with_default.h"
#include "tensorflow/compiler/xla/service/dot_decomposer.h"
#include "tensorflow/compiler/xla/service/flatten_call_graph.h"
-#include "tensorflow/compiler/xla/service/gather_expander.h"
#include "tensorflow/compiler/xla/service/hlo.pb.h"
#include "tensorflow/compiler/xla/service/hlo_computation.h"
#include "tensorflow/compiler/xla/service/hlo_constant_folding.h"
@@ -269,6 +269,7 @@ Status CpuCompiler::RunHloPasses(HloModule* module, bool is_aot_compile,
/*is_layout_sensitive=*/false,
[](const Shape&, const Shape&) { return false; },
/*enable_dot_strength_reduction=*/false);
+ pass.AddPass<HloDCE>();
// BatchNormExpander can create zero-sized ops, so zero-sized HLO
// elimination has to come after that pass.
@@ -296,8 +297,6 @@ Status CpuCompiler::RunHloPasses(HloModule* module, bool is_aot_compile,
pipeline.AddPass<HloCSE>(/*is_layout_sensitive=*/false);
pipeline.AddPass<CpuInstructionFusion>();
- pipeline.AddPass<GatherExpander>();
-
ReducePrecisionInsertion::AddPasses(
&pipeline, module->config().debug_options(),
ReducePrecisionInsertion::PassTiming::AFTER_FUSION);
@@ -306,11 +305,16 @@ Status CpuCompiler::RunHloPasses(HloModule* module, bool is_aot_compile,
module->mutable_entry_computation_layout(), &target_machine_features);
// The LayoutAssignment pass may leave behind kCopy instructions which are
// duplicate or NOPs, so remove them with algebraic simplification and CSE.
- pipeline.AddPass<HloPassFix<AlgebraicSimplifier>>(
- /*is_layout_sensitive=*/true,
- [](const Shape&, const Shape&) { return true; },
- /*enable_dot_strength_reduction=*/false);
- pipeline.AddPass<HloCSE>(/*is_layout_sensitive=*/true);
+ {
+ auto& pass = pipeline.AddPass<HloPassFix<HloPassPipeline>>(
+ "after layout assignement");
+ pass.AddPass<HloPassFix<AlgebraicSimplifier>>(
+ /*is_layout_sensitive=*/true,
+ [](const Shape&, const Shape&) { return true; },
+ /*enable_dot_strength_reduction=*/false);
+ pass.AddPass<HloDCE>();
+ pass.AddPass<HloCSE>(/*is_layout_sensitive=*/true);
+ }
pipeline.AddPass<HloElementTypeConverter>(BF16, F32);
// Outline ops in the entry computation into calls to subcomputations.
const int max_parallelism =
@@ -578,7 +582,7 @@ StatusOr<std::unique_ptr<Executable>> CpuCompiler::RunBackend(
IrEmitter ir_emitter(*module, *assignment, llvm_module.get(),
std::move(instruction_to_profile_idx),
std::move(computation_to_profile_idx),
- &target_machine_features, jit->external_constant_pool());
+ &target_machine_features);
for (auto embedded_computation :
entry_computation->MakeEmbeddedComputationsList()) {
@@ -601,7 +605,13 @@ StatusOr<std::unique_ptr<Executable>> CpuCompiler::RunBackend(
/*is_top_level_computation=*/true,
&module_sequence.at(entry_computation)));
- string function_name = llvm_ir::AsString(entry_function->getName());
+ string function_name = [&]() {
+ llvm::SmallVector<char, 40> function_name_vector;
+ llvm::Mangler::getNameWithPrefix(
+ function_name_vector, entry_function->getName(), jit->data_layout());
+ return string(function_name_vector.begin(), function_name_vector.end());
+ }();
+
string ir_module_string;
if (embed_ir_in_executable) {
ir_module_string = llvm_ir::DumpModuleToString(*llvm_module);
@@ -765,8 +775,7 @@ CpuCompiler::CompileAheadOfTime(std::vector<std::unique_ptr<HloModule>> modules,
IrEmitter ir_emitter(*module, *assignment, &llvm_module,
std::move(instruction_to_profile_idx),
std::move(computation_to_profile_idx),
- &target_machine_features,
- /*external_constant_pool=*/nullptr);
+ &target_machine_features);
HloComputation* computation = module->entry_computation();
for (auto embedded_computation :
computation->MakeEmbeddedComputationsList()) {
diff --git a/tensorflow/compiler/xla/service/cpu/cpu_copy_insertion_test.cc b/tensorflow/compiler/xla/service/cpu/cpu_copy_insertion_test.cc
index a05a269417..4db7fa446e 100644
--- a/tensorflow/compiler/xla/service/cpu/cpu_copy_insertion_test.cc
+++ b/tensorflow/compiler/xla/service/cpu/cpu_copy_insertion_test.cc
@@ -16,7 +16,7 @@ limitations under the License.
#include "tensorflow/compiler/xla/service/cpu/cpu_copy_insertion.h"
#include "tensorflow/compiler/xla/legacy_flags/debug_options_flags.h"
-#include "tensorflow/compiler/xla/literal_util.h"
+#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/compiler/xla/service/hlo_computation.h"
#include "tensorflow/compiler/xla/service/hlo_instruction.h"
#include "tensorflow/compiler/xla/service/hlo_matchers.h"
@@ -74,14 +74,14 @@ TEST_F(CpuCopyInsertionTest, WhileBodyWithConstantRoot) {
body_builder.AddInstruction(
HloInstruction::CreateParameter(0, scalar_shape_, "param"));
body_builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(123.0)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(123.0)));
HloComputation* body = module->AddEmbeddedComputation(body_builder.Build());
auto cond_builder = HloComputation::Builder("condition");
cond_builder.AddInstruction(
HloInstruction::CreateParameter(0, scalar_shape_, "param"));
cond_builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<bool>(false)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(false)));
HloComputation* condition =
module->AddEmbeddedComputation(cond_builder.Build());
@@ -114,7 +114,7 @@ TEST_F(CpuCopyInsertionTest, TupleCall) {
auto sub_param = sub_builder.AddInstruction(
HloInstruction::CreateParameter(0, scalar_shape_, "param"));
auto constant = sub_builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(123.0)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(123.0)));
auto add = sub_builder.AddInstruction(HloInstruction::CreateBinary(
scalar_shape_, HloOpcode::kAdd, sub_param, constant));
sub_builder.AddInstruction(
diff --git a/tensorflow/compiler/xla/service/cpu/cpu_instruction_fusion_test.cc b/tensorflow/compiler/xla/service/cpu/cpu_instruction_fusion_test.cc
index 97e10a89a2..991b14f17d 100644
--- a/tensorflow/compiler/xla/service/cpu/cpu_instruction_fusion_test.cc
+++ b/tensorflow/compiler/xla/service/cpu/cpu_instruction_fusion_test.cc
@@ -282,7 +282,7 @@ class OpcodeFusionTest : public InstructionFusionTest {
builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(F32, {}), "arg0"));
HloInstruction* one = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(1.0)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
builder.AddInstruction(HloInstruction::CreateBinary(
ShapeUtil::MakeShape(F32, {}), HloOpcode::kAdd, arg0, one));
return module->AddEmbeddedComputation(builder.Build());
@@ -501,8 +501,8 @@ TEST_F(OpcodeFusionTest, UnaryMapOfExp) {
HloInstruction* exp = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kExp, param0));
- builder.AddInstruction(HloInstruction::CreateMap(
- shape, {exp}, CreateAdderToOne(module.get()), /*static_operands=*/{}));
+ builder.AddInstruction(
+ HloInstruction::CreateMap(shape, {exp}, CreateAdderToOne(module.get())));
module->AddEntryComputation(builder.Build());
@@ -525,8 +525,8 @@ TEST_F(OpcodeFusionTest, BinaryMapOfExps) {
HloInstruction* exp1 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kExp, param1));
- builder.AddInstruction(HloInstruction::CreateMap(
- shape, {exp0, exp1}, CreateMax(module.get()), /*static_operands=*/{}));
+ builder.AddInstruction(
+ HloInstruction::CreateMap(shape, {exp0, exp1}, CreateMax(module.get())));
module->AddEntryComputation(builder.Build());
@@ -595,7 +595,7 @@ TEST_F(OpcodeFusionTest, MessOfFusileNodes) {
auto pad = builder.AddInstruction(HloInstruction::CreatePad(
ShapeUtil::MakeShape(S32, {5}), idx_choice,
builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0(0))),
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0(0))),
padding_config));
auto slice = builder.AddInstruction(HloInstruction::CreateDynamicSlice(
diff --git a/tensorflow/compiler/xla/service/cpu/cpu_layout_assignment_test.cc b/tensorflow/compiler/xla/service/cpu/cpu_layout_assignment_test.cc
index 429fc7b786..3681d12d8d 100644
--- a/tensorflow/compiler/xla/service/cpu/cpu_layout_assignment_test.cc
+++ b/tensorflow/compiler/xla/service/cpu/cpu_layout_assignment_test.cc
@@ -21,7 +21,7 @@ limitations under the License.
#include <vector>
#include "tensorflow/compiler/xla/layout_util.h"
-#include "tensorflow/compiler/xla/literal_util.h"
+#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/compiler/xla/service/algebraic_simplifier.h"
#include "tensorflow/compiler/xla/service/computation_layout.h"
#include "tensorflow/compiler/xla/service/cpu/target_machine_features_fake.h"
diff --git a/tensorflow/compiler/xla/service/cpu/cpu_transfer_manager.cc b/tensorflow/compiler/xla/service/cpu/cpu_transfer_manager.cc
index b877b29581..156166bf2b 100644
--- a/tensorflow/compiler/xla/service/cpu/cpu_transfer_manager.cc
+++ b/tensorflow/compiler/xla/service/cpu/cpu_transfer_manager.cc
@@ -19,6 +19,7 @@ limitations under the License.
#include <utility>
#include <vector>
+#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/compiler/xla/literal_util.h"
#include "tensorflow/compiler/xla/service/cpu/cpu_runtime.h"
#include "tensorflow/compiler/xla/shape_util.h"
@@ -180,7 +181,7 @@ Status CpuTransferManager::TransferLiteralFromOutfeed(
tensorflow::gtl::ArraySlice<int64> dimensions(
tensorflow::bit_cast<const int64*>(literal_shape.dimensions().data()),
literal_shape.dimensions().size());
- *literal = std::move(*Literal::CreateFromDimensions(
+ *literal = std::move(*LiteralUtil::CreateFromDimensions(
literal_shape.element_type(), dimensions));
TF_ASSIGN_OR_RETURN(Shape received_shape,
TransferArrayBufferFromOutfeed(
@@ -211,7 +212,7 @@ Status CpuTransferManager::TransferLiteralFromOutfeed(
tensorflow::bit_cast<const int64*>(
tuple_element_shape.dimensions().data()),
tuple_element_shape.dimensions().size());
- auto empty = Literal::CreateFromDimensions(
+ auto empty = LiteralUtil::CreateFromDimensions(
tuple_element_shape.element_type(), dimensions);
int64 size = GetByteSizeRequirement(tuple_element_shape);
buffer_data.push_back({empty->untyped_data(), size});
@@ -232,7 +233,7 @@ Status CpuTransferManager::TransferLiteralFromOutfeed(
for (int64 i = 0; i < literal_shape.tuple_shapes_size(); ++i) {
*elements[i]->mutable_shape_do_not_use() = received_shape.tuple_shapes(i);
}
- *literal = std::move(*Literal::MakeTupleOwned(std::move(elements)));
+ *literal = std::move(*LiteralUtil::MakeTupleOwned(std::move(elements)));
TF_RET_CHECK(ShapeUtil::Equal(literal->shape(), literal_shape));
return Status::OK();
}
diff --git a/tensorflow/compiler/xla/service/cpu/cpu_transfer_manager.h b/tensorflow/compiler/xla/service/cpu/cpu_transfer_manager.h
index 6dfc666f09..593575c0fd 100644
--- a/tensorflow/compiler/xla/service/cpu/cpu_transfer_manager.h
+++ b/tensorflow/compiler/xla/service/cpu/cpu_transfer_manager.h
@@ -39,13 +39,14 @@ class CpuTransferManager : public GenericTransferManager {
Status TransferLiteralToInfeed(se::StreamExecutor* executor,
const LiteralSlice& literal) override;
- Status TransferBufferToInfeed(se::StreamExecutor* executor, int64 size,
- const void* source) override;
Status TransferLiteralFromOutfeed(se::StreamExecutor* executor,
const Shape& literal_shape,
Literal* literal) override;
private:
+ Status TransferBufferToInfeed(se::StreamExecutor* executor, int64 size,
+ const void* source);
+
// Transfers infeed data to device. InfeedBuffer->Done() must be
// called to clean up the memory allocated for InfeedBuffer.
StatusOr<cpu::runtime::XfeedBuffer*> TransferBufferToInfeedInternal(
diff --git a/tensorflow/compiler/xla/service/cpu/external_constant_pool.cc b/tensorflow/compiler/xla/service/cpu/external_constant_pool.cc
deleted file mode 100644
index c562865591..0000000000
--- a/tensorflow/compiler/xla/service/cpu/external_constant_pool.cc
+++ /dev/null
@@ -1,50 +0,0 @@
-/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-==============================================================================*/
-
-#include "tensorflow/compiler/xla/service/cpu/external_constant_pool.h"
-
-#include <algorithm>
-#include <cstdlib>
-#include <cstring>
-
-#include "tensorflow/compiler/xla/map_util.h"
-#include "tensorflow/compiler/xla/ptr_util.h"
-#include "tensorflow/compiler/xla/shape_util.h"
-#include "tensorflow/core/lib/gtl/flatset.h"
-
-namespace xla {
-namespace cpu {
-void ExternalConstantPool::Insert(string name, const LiteralSlice& literal,
- int64 alignment) {
- CHECK(!ShapeUtil::IsTuple(literal.shape()));
- CHECK(alignment > 0 && IsPowerOfTwo(static_cast<uint64>(alignment)));
- CHECK(entries_.find(name) == entries_.end());
-
- const int64 literal_size = ShapeUtil::ByteSizeOf(literal.shape());
- void* raw_pointer = tensorflow::port::AlignedMalloc(
- literal_size, std::max<size_t>(alignment, sizeof(void*)));
- CHECK(raw_pointer != nullptr) << "failed to allocate " << literal_size
- << " bytes with alignment of " << alignment;
-
- std::memcpy(raw_pointer, literal.untyped_data(), literal_size);
- entries_.emplace(std::move(name), static_cast<uint8*>(raw_pointer));
-}
-
-const uint8* ExternalConstantPool::Find(const string& name) {
- auto it = entries_.find(name);
- return it == entries_.end() ? nullptr : it->second.get();
-}
-} // namespace cpu
-} // namespace xla
diff --git a/tensorflow/compiler/xla/service/cpu/external_constant_pool.h b/tensorflow/compiler/xla/service/cpu/external_constant_pool.h
deleted file mode 100644
index 0677f5f0b5..0000000000
--- a/tensorflow/compiler/xla/service/cpu/external_constant_pool.h
+++ /dev/null
@@ -1,65 +0,0 @@
-/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-==============================================================================*/
-
-#ifndef TENSORFLOW_COMPILER_XLA_SERVICE_CPU_EXTERNAL_CONSTANT_POOL_H_
-#define TENSORFLOW_COMPILER_XLA_SERVICE_CPU_EXTERNAL_CONSTANT_POOL_H_
-
-#include <memory>
-
-#include "tensorflow/compiler/xla/literal_util.h"
-#include "tensorflow/core/lib/gtl/flatmap.h"
-#include "tensorflow/core/platform/mem.h"
-
-namespace xla {
-namespace cpu {
-// An ExternalConstantPool maintains a set of constants kept external to
-// generated LLVM IR. These constants are accessed from the IR via globals with
-// extern linkage. This current incarnation of ExternalConstantPool only
-// supports the JIT CPU backend; the AOT backend is not supported.
-//
-// Implementation-wise, this is a simple wrapper around a map of strings to byte
-// buffers. This simply implementation works in a JIT scenario. This class
-// will have to become smarter if we decide to support external constant pools
-// on AOT compiles in the future.
-class ExternalConstantPool {
- public:
- // Inserts a buffer with the contents of `literal` into the constant pool with
- // the name `name`. It is an error to try to insert two constants with the
- // same `name` into the same constant pool. The buffer for literal is aligned
- // to `aligment` bytes, and `alignment` must be a power of 2.
- //
- // The constant pool copies out the contents of `literal` into a buffer it
- // owns -- it does not keep pointers to `literal`, or to memory owned by
- // `literal`.
- void Insert(string name, const LiteralSlice& literal, int64 alignment);
-
- // Find the constant with name `name` in this constant pool. If there isn't
- // such constant, return nullptr.
- const uint8* Find(const string& name);
-
- private:
- // We need to `AlignedFree` pointers allocated into `entries_` since we
- // allocate them with `AlignedMalloc`.
- struct FreeDeleter {
- void operator()(void* ptr) { tensorflow::port::AlignedFree(ptr); }
- };
-
- tensorflow::gtl::FlatMap<string, std::unique_ptr<uint8, FreeDeleter>>
- entries_;
-};
-} // namespace cpu
-} // namespace xla
-
-#endif // TENSORFLOW_COMPILER_XLA_SERVICE_CPU_EXTERNAL_CONSTANT_POOL_H_
diff --git a/tensorflow/compiler/xla/service/cpu/external_constant_pool_test.cc b/tensorflow/compiler/xla/service/cpu/external_constant_pool_test.cc
deleted file mode 100644
index 9290a4e5df..0000000000
--- a/tensorflow/compiler/xla/service/cpu/external_constant_pool_test.cc
+++ /dev/null
@@ -1,82 +0,0 @@
-/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-==============================================================================*/
-
-#include "tensorflow/compiler/xla/service/cpu/external_constant_pool.h"
-#include "tensorflow/compiler/xla/layout_util.h"
-#include "tensorflow/core/platform/test.h"
-
-namespace xla {
-namespace cpu {
-namespace {
-class ExternalConstantPoolTest : public ::testing::Test {};
-
-template <typename T>
-T GetFromBuffer(const uint8* buffer, int64 index) {
- T result;
- std::memcpy(&result, buffer + index * sizeof(T), sizeof(T));
- return result;
-}
-
-TEST(ExternalConstantPoolTest, Basic) {
- ExternalConstantPool constant_pool;
- EXPECT_EQ(constant_pool.Find("name-0"), nullptr);
- const auto literal = Literal::CreateR2({{1, 2}, {3, 4}});
- constant_pool.Insert("name-0", *literal, 4);
- const uint8* constant = constant_pool.Find("name-0");
- ASSERT_NE(constant, nullptr);
-
- EXPECT_EQ(GetFromBuffer<int32>(constant, 0), 1);
- EXPECT_EQ(GetFromBuffer<int32>(constant, 1), 2);
- EXPECT_EQ(GetFromBuffer<int32>(constant, 2), 3);
- EXPECT_EQ(GetFromBuffer<int32>(constant, 3), 4);
-
- EXPECT_EQ(constant_pool.Find("name-1"), nullptr);
-}
-
-TEST(ExternalConstantPoolTest, RowMinorLayout) {
- ExternalConstantPool constant_pool;
- EXPECT_EQ(constant_pool.Find("name-0"), nullptr);
- const auto literal = Literal::CreateR2WithLayout(
- {{1, 2}, {3, 4}}, LayoutUtil::MakeLayout({0, 1}));
- constant_pool.Insert("name-0", *literal, 4);
- const uint8* constant = constant_pool.Find("name-0");
- ASSERT_NE(constant, nullptr);
-
- EXPECT_EQ(GetFromBuffer<int32>(constant, 0), 1);
- EXPECT_EQ(GetFromBuffer<int32>(constant, 1), 3);
- EXPECT_EQ(GetFromBuffer<int32>(constant, 2), 2);
- EXPECT_EQ(GetFromBuffer<int32>(constant, 3), 4);
-}
-
-TEST(ExternalConstantPoolTest, Alignment) {
- ExternalConstantPool constant_pool;
- EXPECT_EQ(constant_pool.Find("name-0"), nullptr);
-
- for (int i = 0; i < 8; i++) {
- int64 alignment = 1 << i;
- string name = tensorflow::strings::StrCat("name-", i);
-
- const auto literal = Literal::CreateR2({{1, 2}, {3, 4}});
- constant_pool.Insert(name, *literal, alignment);
-
- const uint8* constant = constant_pool.Find(name);
- ASSERT_NE(constant, nullptr);
- EXPECT_EQ(reinterpret_cast<intptr_t>(constant) % alignment, 0);
- }
-}
-
-} // namespace
-} // namespace cpu
-} // namespace xla
diff --git a/tensorflow/compiler/xla/service/cpu/ir_emitter.cc b/tensorflow/compiler/xla/service/cpu/ir_emitter.cc
index 75e8e9a835..2ad41374d3 100644
--- a/tensorflow/compiler/xla/service/cpu/ir_emitter.cc
+++ b/tensorflow/compiler/xla/service/cpu/ir_emitter.cc
@@ -48,6 +48,8 @@ limitations under the License.
#include "tensorflow/compiler/xla/service/cpu/shape_partition.h"
#include "tensorflow/compiler/xla/service/cpu/simple_orc_jit.h"
#include "tensorflow/compiler/xla/service/elemental_ir_emitter.h"
+#include "tensorflow/compiler/xla/service/hlo_casting_utils.h"
+#include "tensorflow/compiler/xla/service/hlo_instructions.h"
#include "tensorflow/compiler/xla/service/hlo_opcode.h"
#include "tensorflow/compiler/xla/service/llvm_ir/fused_ir_emitter.h"
#include "tensorflow/compiler/xla/service/llvm_ir/llvm_loop.h"
@@ -83,8 +85,7 @@ IrEmitter::IrEmitter(
llvm::Module* llvm_module,
std::unordered_map<const HloInstruction*, int64> instruction_to_profile_idx,
std::unordered_map<const HloComputation*, int64> computation_to_profile_idx,
- const TargetMachineFeatures* target_machine_features,
- ExternalConstantPool* external_constant_pool)
+ const TargetMachineFeatures* target_machine_features)
: assignment_(assignment),
module_(llvm_module),
arch_type_(llvm::Triple(llvm_module->getTargetTriple()).getArch()),
@@ -94,8 +95,7 @@ IrEmitter::IrEmitter(
alias_analysis_(hlo_module, assignment, &llvm_module->getContext()),
hlo_module_config_(hlo_module.config()),
is_top_level_computation_(false),
- target_machine_features_(*target_machine_features),
- external_constant_pool_(external_constant_pool) {
+ target_machine_features_(*target_machine_features) {
ir_builder_.setFastMathFlags(llvm_ir::GetFastMathFlags(
/*fast_math_enabled=*/hlo_module_config_.debug_options()
.xla_enable_fast_math()));
@@ -161,45 +161,18 @@ Status IrEmitter::HandleBitcast(HloInstruction* bitcast) {
}
llvm::Constant* IrEmitter::EmitGlobalForLiteral(const Literal& literal) {
- llvm::Constant* result;
-
- // We avoid creating large constants in the LLVM IR since LLVM is not
- // efficient for large constant arrays. We still emit "small enough" constant
- // arrays into the Ir, in the off chance the LLVM optimizer can do something
- // interesting with it.
- //
- // TODO(b/29904935): Remove the large constant pool.
- const int kMaxInternalConstantSizeInBytes = 128;
- if (external_constant_pool_ &&
- ByteSizeOf(literal.shape()) >= kMaxInternalConstantSizeInBytes) {
- string global_name = tensorflow::strings::StrCat(
- "constant_global_", external_global_constant_counter_++);
- llvm::GlobalVariable* result_global = new llvm::GlobalVariable(
- /*Module=*/*module_,
- /*Type=*/IrShapeType(literal.shape()),
- /*isConstant=*/true,
- /*Linkage=*/llvm::GlobalValue::ExternalLinkage,
- /*Initializer=*/nullptr,
- /*Name=*/AsStringRef(global_name));
- result_global->setAlignment(MinimumAlignmentForShape(literal.shape()));
- external_constant_pool_->Insert(global_name, literal,
- MinimumAlignmentForShape(literal.shape()));
- result = result_global;
- } else {
- llvm::Constant* initializer =
- llvm_ir::ConvertLiteralToIrConstant(literal, module_);
- llvm::GlobalVariable* result_global = new llvm::GlobalVariable(
- /*Module=*/*module_,
- /*Type=*/initializer->getType(),
- /*isConstant=*/true,
- /*Linkage=*/llvm::GlobalValue::PrivateLinkage,
- /*Initializer=*/initializer,
- /*Name=*/"");
- result_global->setAlignment(MinimumAlignmentForShape(literal.shape()));
- result = llvm::ConstantExpr::getBitCast(
- result_global, IrShapeType(literal.shape())->getPointerTo());
- }
- return result;
+ llvm::Constant* initializer =
+ llvm_ir::ConvertLiteralToIrConstant(literal, module_);
+ llvm::GlobalVariable* result_global = new llvm::GlobalVariable(
+ /*Module=*/*module_,
+ /*Type=*/initializer->getType(),
+ /*isConstant=*/true,
+ /*Linkage=*/llvm::GlobalValue::PrivateLinkage,
+ /*Initializer=*/initializer,
+ /*Name=*/"");
+ result_global->setAlignment(MinimumAlignmentForShape(literal.shape()));
+ return llvm::ConstantExpr::getBitCast(
+ result_global, IrShapeType(literal.shape())->getPointerTo());
}
Status IrEmitter::HandleConstant(HloInstruction* constant) {
@@ -306,45 +279,60 @@ Status IrEmitter::HandleGetTupleElement(HloInstruction* get_tuple_element) {
Status IrEmitter::HandleSelect(HloInstruction* select) {
auto pred = select->operand(0);
- auto on_true = select->operand(1);
- auto on_false = select->operand(2);
TF_RET_CHECK(pred->shape().element_type() == PRED);
-
- if (ShapeUtil::IsTuple(select->shape())) {
- TF_RETURN_IF_ERROR(EmitTargetAddressForOp(select));
- llvm_ir::EmitTupleSelect(
- GetIrArrayFor(select), GetIrArrayFor(pred), GetEmittedValueFor(on_true),
- GetEmittedValueFor(on_false), &ir_builder_, module_);
- return Status::OK();
- }
-
return DefaultAction(select);
}
-Status IrEmitter::HandleInfeed(HloInstruction* infeed) {
- VLOG(2) << "HandleInfeed: " << infeed->ToString();
+Status IrEmitter::HandleTupleSelect(HloInstruction* tuple_select) {
+ auto pred = tuple_select->operand(0);
+ auto on_true = tuple_select->operand(1);
+ auto on_false = tuple_select->operand(2);
+ TF_RET_CHECK(pred->shape().element_type() == PRED);
+ TF_RET_CHECK(ShapeUtil::IsScalar(pred->shape()));
+ TF_RET_CHECK(ShapeUtil::IsTuple(tuple_select->shape()));
+ TF_RETURN_IF_ERROR(EmitTargetAddressForOp(tuple_select));
+ llvm_ir::EmitTupleSelect(GetIrArrayFor(tuple_select), GetIrArrayFor(pred),
+ GetEmittedValueFor(on_true),
+ GetEmittedValueFor(on_false), &ir_builder_, module_);
+ return Status::OK();
+}
- const Shape& shape = infeed->shape();
+Status IrEmitter::HandleInfeed(HloInstruction* instruction) {
+ HloInfeedInstruction* infeed = Cast<HloInfeedInstruction>(instruction);
+ VLOG(2) << "HandleInfeed: " << infeed->ToString();
- // The infeed operation produces data (dequeued from the infeed queue) at this
- // address, which has been provided by buffer assignment.
+ // The infeed operation produces a two-element tuple containing data and a
+ // token value. HloInfeedInstruction::infeed_shape gives us the data shape.
+ const Shape& data_shape = infeed->infeed_shape();
+ DCHECK(ShapeUtil::Equal(data_shape,
+ ShapeUtil::GetTupleElementShape(infeed->shape(), 0)));
TF_RETURN_IF_ERROR(EmitTargetAddressForOp(infeed));
- llvm_ir::IrArray infeed_array = GetIrArrayFor(infeed);
- if (ShapeUtil::IsTuple(shape)) {
- TF_RET_CHECK(!ShapeUtil::IsNestedTuple(shape));
+ // Write the tuple index table.
+ TF_ASSIGN_OR_RETURN(BufferAllocation::Slice data_slice,
+ assignment_.GetUniqueSlice(infeed, {0}));
+ llvm::Value* data_address = EmitTempBufferPointer(data_slice, data_shape);
+ TF_ASSIGN_OR_RETURN(BufferAllocation::Slice token_slice,
+ assignment_.GetUniqueSlice(infeed, {1}));
+ llvm::Value* token_address = EmitTempBufferPointer(
+ token_slice, ShapeUtil::GetTupleElementShape(infeed->shape(), 1));
+ llvm_ir::EmitTuple(GetIrArrayFor(infeed), {data_address, token_address},
+ &ir_builder_, module_);
+
+ if (ShapeUtil::IsTuple(data_shape)) {
+ TF_RET_CHECK(!ShapeUtil::IsNestedTuple(data_shape));
// For a tuple, we first copy each of the internal elements to
// their corresponding target locations. We then construct the
// tuple outer buffer containing pointers to the internal
// elements.
std::vector<llvm::Value*> tuple_element_addresses;
- for (int64 i = 0; i < shape.tuple_shapes_size(); ++i) {
+ for (int64 i = 0; i < data_shape.tuple_shapes_size(); ++i) {
TF_ASSIGN_OR_RETURN(BufferAllocation::Slice buffer,
- assignment_.GetUniqueSlice(infeed, {i}));
+ assignment_.GetUniqueSlice(infeed, {0, i}));
const Shape& tuple_element_shape =
- ShapeUtil::GetTupleElementShape(shape, i);
+ ShapeUtil::GetTupleElementShape(data_shape, i);
// Only the outer tuple buffer's target address is obtained from
// GetEmittedValueFor, to handle the case when Infeed is the root
@@ -359,11 +347,11 @@ Status IrEmitter::HandleInfeed(HloInstruction* infeed) {
tuple_element_addresses.push_back(tuple_element_address);
}
- llvm_ir::EmitTuple(infeed_array, tuple_element_addresses, &ir_builder_,
- module_);
+ llvm_ir::EmitTuple(llvm_ir::IrArray(data_address, data_shape),
+ tuple_element_addresses, &ir_builder_, module_);
} else {
- TF_RETURN_IF_ERROR(EmitXfeedTransfer(XfeedKind::kInfeed, shape,
- GetEmittedValueFor(infeed)));
+ TF_RETURN_IF_ERROR(
+ EmitXfeedTransfer(XfeedKind::kInfeed, data_shape, data_address));
}
return Status::OK();
@@ -488,42 +476,111 @@ Status IrEmitter::HandleTuple(HloInstruction* tuple) {
return Status::OK();
}
+StatusOr<llvm::Value*> IrEmitter::EmitTargetElementLoopBodyForMap(
+ HloMapInstruction* map, const llvm_ir::IrArray::Index& index) {
+ llvm::Function* mapped_ir_function =
+ FindOrDie(emitted_functions_, map->to_apply());
+ std::vector<llvm::Value*> parameter_addresses;
+ for (const HloInstruction* operand : map->operands()) {
+ const llvm_ir::IrArray& array = GetIrArrayFor(operand);
+ parameter_addresses.push_back(
+ array.EmitArrayElementAddress(index, &ir_builder_));
+ }
+ return EmitElementFunctionCall(mapped_ir_function, map->shape(),
+ parameter_addresses, "map_function");
+}
+
Status IrEmitter::HandleMap(HloInstruction* map) {
- gtl::ArraySlice<HloInstruction*> operands(map->operands());
- HloComputation* function = map->to_apply();
- // The called computation should have been emitted previously.
- llvm::Function* mapped_ir_function = FindOrDie(emitted_functions_, function);
-
- return EmitTargetElementLoop(map, [this, map, operands, mapped_ir_function](
- const llvm_ir::IrArray::Index& index) {
- std::vector<llvm::Value*> parameter_addresses;
- for (const HloInstruction* operand : operands) {
- const llvm_ir::IrArray& array = GetIrArrayFor(operand);
- parameter_addresses.push_back(
- array.EmitArrayElementAddress(index, &ir_builder_));
- }
- return EmitElementFunctionCall(mapped_ir_function, map->shape(),
- parameter_addresses, "map_function");
+ return EmitTargetElementLoop(map, [&](const llvm_ir::IrArray::Index& index) {
+ return EmitTargetElementLoopBodyForMap(Cast<HloMapInstruction>(map), index);
});
}
-Status IrEmitter::HandleReduceWindow(HloInstruction* reduce_window) {
- auto operand = reduce_window->operand(0);
+StatusOr<llvm::Value*> IrEmitter::EmitTargetElementLoopBodyForReduceWindow(
+ HloReduceWindowInstruction* reduce_window,
+ const llvm_ir::IrArray::Index& index) {
+ const HloInstruction* operand = reduce_window->operand(0);
const Window& window = reduce_window->window();
HloComputation* function = reduce_window->to_apply();
+ // The called computation should have been emitted previously.
+ llvm::Function* reducer_function = FindOrDie(emitted_functions_, function);
+
+ // We fold inputs into the accumulator and initialize it to
+ // the initial value on the reduce_window.
+ PrimitiveType operand_element_type = operand->shape().element_type();
+ llvm::Value* accumulator_address = llvm_ir::EmitAllocaAtFunctionEntry(
+ llvm_ir::PrimitiveTypeToIrType(operand_element_type, module_),
+ "reduce_window_accumulator_address", &ir_builder_,
+ MinimumAlignmentForPrimitiveType(operand_element_type));
+ ir_builder_.CreateStore(
+ ir_builder_.CreateLoad(GetEmittedValueFor(reduce_window->operand(1))),
+ accumulator_address);
+
+ llvm_ir::ForLoopNest loops(IrName(reduce_window, "inner"), &ir_builder_);
+ std::vector<int64> window_size;
+ for (const auto& dim : window.dimensions()) {
+ window_size.push_back(dim.size());
+ }
+ const llvm_ir::IrArray::Index window_index = loops.AddLoopsForShape(
+ ShapeUtil::MakeShape(operand_element_type, window_size), "window");
+ CHECK_EQ(window_index.size(), index.size());
+
+ SetToFirstInsertPoint(loops.GetInnerLoopBodyBasicBlock(), &ir_builder_);
+
+ llvm_ir::IrArray::Index input_index(ir_builder_.getInt64Ty(), index.size());
+ llvm::Value* in_bounds_condition = nullptr;
+ for (size_t i = 0; i < index.size(); ++i) {
+ llvm::Value* strided_index = ir_builder_.CreateNSWMul(
+ index[i], ir_builder_.getInt64(window.dimensions(i).stride()));
+ input_index[i] = ir_builder_.CreateNSWSub(
+ ir_builder_.CreateNSWAdd(strided_index, window_index[i]),
+ ir_builder_.getInt64(window.dimensions(i).padding_low()));
+
+ // We need to check if 0 <= input_index[i] < bound, as otherwise we are in
+ // the padding so that we can skip the computation. That is equivalent to
+ // input_index[i] < bound as an *unsigned* comparison, since a negative
+ // value will wrap to a large positive value.
+ llvm::Value* index_condition = ir_builder_.CreateICmpULT(
+ input_index[i],
+ ir_builder_.getInt64(ShapeUtil::GetDimension(operand->shape(), i)));
+ if (in_bounds_condition == nullptr) {
+ in_bounds_condition = index_condition;
+ } else {
+ in_bounds_condition =
+ ir_builder_.CreateAnd(in_bounds_condition, index_condition);
+ }
+ }
+ CHECK(in_bounds_condition != nullptr);
+
+ llvm_ir::LlvmIfData if_data =
+ llvm_ir::EmitIfThenElse(in_bounds_condition, "in-bounds", &ir_builder_);
+ SetToFirstInsertPoint(if_data.true_block, &ir_builder_);
+
+ // We are not in the padding, so carry out the computation.
+ llvm_ir::IrArray input_array(GetIrArrayFor(operand));
+ llvm::Value* input_value_address =
+ input_array.EmitArrayElementAddress(input_index, &ir_builder_);
+ llvm::Value* result = EmitElementFunctionCall(
+ reducer_function, reduce_window->shape(),
+ {accumulator_address, input_value_address}, "reducer_function");
+ ir_builder_.CreateStore(result, accumulator_address);
+
+ SetToFirstInsertPoint(loops.GetOuterLoopExitBasicBlock(), &ir_builder_);
+ return ir_builder_.CreateLoad(accumulator_address);
+}
+
+Status IrEmitter::HandleReduceWindow(HloInstruction* reduce_window) {
TF_RETURN_IF_ERROR(ElementTypesSameAndSupported(
- /*instruction=*/*reduce_window, /*operands=*/{operand},
+ /*instruction=*/*reduce_window,
+ /*operands=*/{reduce_window->operand(0)},
/*supported_types=*/{F32, BF16, S32}));
// TODO(b/31410564): Implement dilation for reduce-window.
- if (window_util::HasDilation(window)) {
+ if (window_util::HasDilation(reduce_window->window())) {
return Unimplemented(
"Dilation for ReduceWindow is not implemented on CPU.");
}
- // The called computation should have been emitted previously.
- llvm::Function* reducer_function = FindOrDie(emitted_functions_, function);
-
// Pseudo code for reduce window:
//
// for (coordinates O in the output)
@@ -538,73 +595,9 @@ Status IrEmitter::HandleReduceWindow(HloInstruction* reduce_window) {
// This is completely un-optimized and just here to have something
// that works.
return EmitTargetElementLoop(
- reduce_window, [this, reduce_window, operand, window,
- reducer_function](const llvm_ir::IrArray::Index& index) {
- // We fold inputs into the accumulator and initialize it to
- // the initial value on the reduce_window.
- PrimitiveType operand_element_type = operand->shape().element_type();
- llvm::Value* accumulator_address = llvm_ir::EmitAllocaAtFunctionEntry(
- llvm_ir::PrimitiveTypeToIrType(operand_element_type, module_),
- "reduce_window_accumulator_address", &ir_builder_,
- MinimumAlignmentForPrimitiveType(operand_element_type));
- ir_builder_.CreateStore(ir_builder_.CreateLoad(GetEmittedValueFor(
- reduce_window->operand(1))),
- accumulator_address);
-
- llvm_ir::ForLoopNest loops(IrName(reduce_window, "inner"),
- &ir_builder_);
- std::vector<int64> window_size;
- for (const auto& dim : window.dimensions()) {
- window_size.push_back(dim.size());
- }
- const llvm_ir::IrArray::Index window_index = loops.AddLoopsForShape(
- ShapeUtil::MakeShape(operand_element_type, window_size), "window");
- CHECK_EQ(window_index.size(), index.size());
-
- SetToFirstInsertPoint(loops.GetInnerLoopBodyBasicBlock(), &ir_builder_);
-
- llvm_ir::IrArray::Index input_index(ir_builder_.getInt64Ty(),
- index.size());
- llvm::Value* in_bounds_condition = nullptr;
- for (size_t i = 0; i < index.size(); ++i) {
- llvm::Value* strided_index = ir_builder_.CreateNSWMul(
- index[i], ir_builder_.getInt64(window.dimensions(i).stride()));
- input_index[i] = ir_builder_.CreateNSWSub(
- ir_builder_.CreateNSWAdd(strided_index, window_index[i]),
- ir_builder_.getInt64(window.dimensions(i).padding_low()));
-
- // We need to check if 0 <= input_index[i] < bound, as
- // otherwise we are in the padding so that we can skip the
- // computation. That is equivalent to input_index[i] < bound
- // as an *unsigned* comparison, since a negative value will
- // wrap to a large positive value.
- llvm::Value* index_condition = ir_builder_.CreateICmpULT(
- input_index[i], ir_builder_.getInt64(ShapeUtil::GetDimension(
- operand->shape(), i)));
- if (in_bounds_condition == nullptr) {
- in_bounds_condition = index_condition;
- } else {
- in_bounds_condition =
- ir_builder_.CreateAnd(in_bounds_condition, index_condition);
- }
- }
- CHECK(in_bounds_condition != nullptr);
-
- llvm_ir::LlvmIfData if_data = llvm_ir::EmitIfThenElse(
- in_bounds_condition, "in-bounds", &ir_builder_);
- SetToFirstInsertPoint(if_data.true_block, &ir_builder_);
-
- // We are not in the padding, so carry out the computation.
- llvm_ir::IrArray input_array(GetIrArrayFor(operand));
- llvm::Value* input_value_address =
- input_array.EmitArrayElementAddress(input_index, &ir_builder_);
- llvm::Value* result = EmitElementFunctionCall(
- reducer_function, reduce_window->shape(),
- {accumulator_address, input_value_address}, "reducer_function");
- ir_builder_.CreateStore(result, accumulator_address);
-
- SetToFirstInsertPoint(loops.GetOuterLoopExitBasicBlock(), &ir_builder_);
- return ir_builder_.CreateLoad(accumulator_address);
+ reduce_window, [&](const llvm_ir::IrArray::Index& index) {
+ return EmitTargetElementLoopBodyForReduceWindow(
+ Cast<HloReduceWindowInstruction>(reduce_window), index);
});
}
@@ -833,17 +826,157 @@ Status IrEmitter::HandleDot(HloInstruction* dot) {
target_machine_features_);
}
+StatusOr<llvm::Value*> IrEmitter::EmitTargetElementLoopBodyForConvolution(
+ HloConvolutionInstruction* convolution,
+ const llvm_ir::IrArray::Index& index) {
+ const HloInstruction* lhs = convolution->operand(0);
+ const HloInstruction* rhs = convolution->operand(1);
+ const Window& window = convolution->window();
+
+ const ConvolutionDimensionNumbers& dnums =
+ convolution->convolution_dimension_numbers();
+ int num_spatial_dims = dnums.output_spatial_dimensions_size();
+ std::vector<llvm::Value*> output_spatial(num_spatial_dims);
+ for (int i = 0; i < num_spatial_dims; ++i) {
+ output_spatial[i] = index[dnums.output_spatial_dimensions(i)];
+ }
+ llvm::Value* output_feature = index[dnums.output_feature_dimension()];
+ llvm::Value* batch = index[dnums.output_batch_dimension()];
+
+ // We will accumulate the products into this sum to calculate the output entry
+ // at the given index.
+ PrimitiveType lhs_element_type = lhs->shape().element_type();
+ llvm::Type* lhs_llvm_type =
+ llvm_ir::PrimitiveTypeToIrType(lhs_element_type, module_);
+ llvm::Value* sum_address = llvm_ir::EmitAllocaAtFunctionEntry(
+ lhs_llvm_type, "convolution_sum_address", &ir_builder_,
+ MinimumAlignmentForPrimitiveType(lhs_element_type));
+ llvm::Value* constant_zero = llvm::Constant::getNullValue(lhs_llvm_type);
+ ir_builder_.CreateStore(constant_zero, sum_address);
+
+ llvm_ir::ForLoopNest loops(IrName(convolution, "inner"), &ir_builder_);
+ std::vector<llvm::Value*> kernel_spatial(num_spatial_dims);
+ for (int i = 0; i < num_spatial_dims; ++i) {
+ kernel_spatial[i] =
+ loops
+ .AddLoop(
+ 0, rhs->shape().dimensions(dnums.kernel_spatial_dimensions(i)),
+ tensorflow::strings::StrCat("k", i))
+ ->GetIndVarValue();
+ }
+ llvm::Value* input_feature =
+ loops
+ .AddLoop(0, lhs->shape().dimensions(dnums.input_feature_dimension()),
+ "iz")
+ ->GetIndVarValue();
+
+ SetToFirstInsertPoint(loops.GetInnerLoopBodyBasicBlock(), &ir_builder_);
+
+ // Calculate the spatial index in the input array, taking striding, dilation
+ // and padding into account. An index in the padding will be out of the bounds
+ // of the array.
+ const auto calculate_input_index = [this](llvm::Value* output_index,
+ llvm::Value* kernel_index,
+ const WindowDimension& window_dim) {
+ llvm::Value* strided_index = ir_builder_.CreateNSWMul(
+ output_index, ir_builder_.getInt64(window_dim.stride()));
+ llvm::Value* dilated_kernel_index = ir_builder_.CreateNSWMul(
+ kernel_index, ir_builder_.getInt64(window_dim.window_dilation()));
+ return ir_builder_.CreateNSWSub(
+ ir_builder_.CreateNSWAdd(strided_index, dilated_kernel_index),
+ ir_builder_.getInt64(window_dim.padding_low()));
+ };
+ std::vector<llvm::Value*> input_spatial(num_spatial_dims);
+ for (int i = 0; i < num_spatial_dims; ++i) {
+ input_spatial[i] = calculate_input_index(
+ output_spatial[i], kernel_spatial[i], window.dimensions(i));
+ }
+
+ // We need to check if 0 <= input dim < bound, as otherwise we are in the
+ // padding so that we can skip the computation. That is equivalent to input
+ // dim < bound as an *unsigned* comparison, since a negative value will wrap
+ // to a large positive value. The input dim is dilated, so we need to dilate
+ // the bound as well to match.
+
+ // Also need to check that the input coordinates are not in one of the
+ // holes created by base dilation.
+ const auto not_in_hole = [&](llvm::Value* input_index, int64 base_dilation) {
+ llvm::Value* remainder = ir_builder_.CreateSRem(
+ input_index, ir_builder_.getInt64(base_dilation));
+ return ir_builder_.CreateICmpEQ(remainder, ir_builder_.getInt64(0));
+ };
+
+ llvm::Value* in_bounds_condition = ir_builder_.getInt1(true);
+ for (int i = 0; i < num_spatial_dims; ++i) {
+ llvm::ConstantInt* input_bound =
+ ir_builder_.getInt64(window_util::DilatedBound(
+ lhs->shape().dimensions(dnums.input_spatial_dimensions(i)),
+ window.dimensions(i).base_dilation()));
+ llvm::Value* dim_in_bound =
+ ir_builder_.CreateICmpULT(input_spatial[i], input_bound);
+ llvm::Value* dim_not_in_hole =
+ not_in_hole(input_spatial[i], window.dimensions(i).base_dilation());
+ llvm::Value* dim_ok = ir_builder_.CreateAnd(dim_in_bound, dim_not_in_hole);
+ in_bounds_condition = ir_builder_.CreateAnd(in_bounds_condition, dim_ok);
+ }
+
+ // Now we need to map the dilated base coordinates back to the actual
+ // data indices on the lhs.
+ const auto undilate = [&](llvm::Value* input_index, int64 base_dilation) {
+ return ir_builder_.CreateSDiv(input_index,
+ ir_builder_.getInt64(base_dilation));
+ };
+ for (int i = 0; i < num_spatial_dims; ++i) {
+ input_spatial[i] =
+ undilate(input_spatial[i], window.dimensions(i).base_dilation());
+ }
+
+ llvm_ir::LlvmIfData if_data =
+ llvm_ir::EmitIfThenElse(in_bounds_condition, "in-bounds", &ir_builder_);
+ SetToFirstInsertPoint(if_data.true_block, &ir_builder_);
+
+ // We are not in the padding, so carry out the computation.
+ int num_dims = num_spatial_dims + 2;
+ llvm_ir::IrArray::Index input_index(ir_builder_.getInt64Ty(), num_dims);
+ for (int i = 0; i < num_spatial_dims; ++i) {
+ input_index[dnums.input_spatial_dimensions(i)] = input_spatial[i];
+ }
+ input_index[dnums.input_feature_dimension()] = input_feature;
+ input_index[dnums.input_batch_dimension()] = batch;
+
+ llvm_ir::IrArray kernel_array(GetIrArrayFor(rhs));
+ llvm_ir::IrArray::Index kernel_index(ir_builder_.getInt64Ty(), num_dims);
+ for (int i = 0; i < num_spatial_dims; ++i) {
+ kernel_index[dnums.kernel_spatial_dimensions(i)] =
+ window.dimensions(i).window_reversal()
+ ? ir_builder_.CreateNSWSub(
+ ir_builder_.getInt64(window.dimensions(i).size() - 1),
+ kernel_spatial[i])
+ : kernel_spatial[i];
+ }
+
+ kernel_index[dnums.kernel_input_feature_dimension()] = input_feature;
+ kernel_index[dnums.kernel_output_feature_dimension()] = output_feature;
+
+ llvm_ir::IrArray input_array(GetIrArrayFor(lhs));
+ llvm::Value* product = ir_builder_.CreateFMul(
+ input_array.EmitReadArrayElement(input_index, &ir_builder_),
+ kernel_array.EmitReadArrayElement(kernel_index, &ir_builder_));
+ llvm::Value* sum =
+ ir_builder_.CreateFAdd(ir_builder_.CreateLoad(sum_address), product);
+ ir_builder_.CreateStore(sum, sum_address);
+
+ SetToFirstInsertPoint(loops.GetOuterLoopExitBasicBlock(), &ir_builder_);
+ return ir_builder_.CreateLoad(sum_address);
+}
+
Status IrEmitter::HandleConvolution(HloInstruction* convolution) {
auto lhs = convolution->operand(0);
auto rhs = convolution->operand(1);
- const auto& window = convolution->window();
TF_RETURN_IF_ERROR(ElementTypesSameAndSupported(
/*instruction=*/*convolution, /*operands=*/{lhs, rhs},
/*supported_types=*/{F16, F32, C64}));
- const ConvolutionDimensionNumbers& dnums =
- convolution->convolution_dimension_numbers();
-
// TODO(tonywy): Add PotentiallyImplementedAsMKLCovolution to support
// different data layouts.
if (PotentiallyImplementedAsEigenConvolution(*convolution,
@@ -1000,150 +1133,9 @@ Status IrEmitter::HandleConvolution(HloInstruction* convolution) {
// See the description of convolution in the XLA documentation for the pseudo
// code for convolution.
return EmitTargetElementLoop(
- convolution, [this, convolution, lhs, rhs, window,
- dnums](const llvm_ir::IrArray::Index& index) {
- int num_spatial_dims = dnums.output_spatial_dimensions_size();
- std::vector<llvm::Value*> output_spatial(num_spatial_dims);
- for (int i = 0; i < num_spatial_dims; ++i) {
- output_spatial[i] = index[dnums.output_spatial_dimensions(i)];
- }
- llvm::Value* output_feature = index[dnums.output_feature_dimension()];
- llvm::Value* batch = index[dnums.output_batch_dimension()];
-
- // We will accumulate the products into this sum to calculate
- // the output entry at the given index.
- PrimitiveType lhs_element_type = lhs->shape().element_type();
- llvm::Type* lhs_llvm_type =
- llvm_ir::PrimitiveTypeToIrType(lhs_element_type, module_);
- llvm::Value* sum_address = llvm_ir::EmitAllocaAtFunctionEntry(
- lhs_llvm_type, "convolution_sum_address", &ir_builder_,
- MinimumAlignmentForPrimitiveType(lhs_element_type));
- llvm::Value* constant_zero =
- llvm::Constant::getNullValue(lhs_llvm_type);
- ir_builder_.CreateStore(constant_zero, sum_address);
-
- llvm_ir::ForLoopNest loops(IrName(convolution, "inner"), &ir_builder_);
- std::vector<llvm::Value*> kernel_spatial(num_spatial_dims);
- for (int i = 0; i < num_spatial_dims; ++i) {
- kernel_spatial[i] =
- loops
- .AddLoop(0,
- rhs->shape().dimensions(
- dnums.kernel_spatial_dimensions(i)),
- tensorflow::strings::StrCat("k", i))
- ->GetIndVarValue();
- }
- llvm::Value* input_feature =
- loops
- .AddLoop(
- 0, lhs->shape().dimensions(dnums.input_feature_dimension()),
- "iz")
- ->GetIndVarValue();
-
- SetToFirstInsertPoint(loops.GetInnerLoopBodyBasicBlock(), &ir_builder_);
-
- // Calculate the spatial index in the input array, taking striding,
- // dilation and padding into account. An index in the padding will be
- // out of the bounds of the array.
- const auto calculate_input_index =
- [this](llvm::Value* output_index, llvm::Value* kernel_index,
- const WindowDimension& window_dim) {
- llvm::Value* strided_index = ir_builder_.CreateNSWMul(
- output_index, ir_builder_.getInt64(window_dim.stride()));
- llvm::Value* dilated_kernel_index = ir_builder_.CreateNSWMul(
- kernel_index,
- ir_builder_.getInt64(window_dim.window_dilation()));
- return ir_builder_.CreateNSWSub(
- ir_builder_.CreateNSWAdd(strided_index, dilated_kernel_index),
- ir_builder_.getInt64(window_dim.padding_low()));
- };
- std::vector<llvm::Value*> input_spatial(num_spatial_dims);
- for (int i = 0; i < num_spatial_dims; ++i) {
- input_spatial[i] = calculate_input_index(
- output_spatial[i], kernel_spatial[i], window.dimensions(i));
- }
-
- // We need to check if 0 <= input dim < bound, as otherwise we are in
- // the padding so that we can skip the computation. That is equivalent
- // to input dim < bound as an *unsigned* comparison, since a negative
- // value will wrap to a large positive value. The input dim is dilated,
- // so we need to dilate the bound as well to match.
-
- // Also need to check that the input coordinates are not in one of the
- // holes created by base dilation.
- const auto not_in_hole = [&](llvm::Value* input_index,
- int64 base_dilation) {
- llvm::Value* remainder = ir_builder_.CreateSRem(
- input_index, ir_builder_.getInt64(base_dilation));
- return ir_builder_.CreateICmpEQ(remainder, ir_builder_.getInt64(0));
- };
-
- llvm::Value* in_bounds_condition = ir_builder_.getInt1(true);
- for (int i = 0; i < num_spatial_dims; ++i) {
- llvm::ConstantInt* input_bound =
- ir_builder_.getInt64(window_util::DilatedBound(
- lhs->shape().dimensions(dnums.input_spatial_dimensions(i)),
- window.dimensions(i).base_dilation()));
- llvm::Value* dim_in_bound =
- ir_builder_.CreateICmpULT(input_spatial[i], input_bound);
- llvm::Value* dim_not_in_hole = not_in_hole(
- input_spatial[i], window.dimensions(i).base_dilation());
- llvm::Value* dim_ok =
- ir_builder_.CreateAnd(dim_in_bound, dim_not_in_hole);
- in_bounds_condition =
- ir_builder_.CreateAnd(in_bounds_condition, dim_ok);
- }
-
- // Now we need to map the dilated base coordinates back to the actual
- // data indices on the lhs.
- const auto undilate = [&](llvm::Value* input_index,
- int64 base_dilation) {
- return ir_builder_.CreateSDiv(input_index,
- ir_builder_.getInt64(base_dilation));
- };
- for (int i = 0; i < num_spatial_dims; ++i) {
- input_spatial[i] =
- undilate(input_spatial[i], window.dimensions(i).base_dilation());
- }
-
- llvm_ir::LlvmIfData if_data = llvm_ir::EmitIfThenElse(
- in_bounds_condition, "in-bounds", &ir_builder_);
- SetToFirstInsertPoint(if_data.true_block, &ir_builder_);
-
- // We are not in the padding, so carry out the computation.
- int num_dims = num_spatial_dims + 2;
- llvm_ir::IrArray::Index input_index(ir_builder_.getInt64Ty(), num_dims);
- for (int i = 0; i < num_spatial_dims; ++i) {
- input_index[dnums.input_spatial_dimensions(i)] = input_spatial[i];
- }
- input_index[dnums.input_feature_dimension()] = input_feature;
- input_index[dnums.input_batch_dimension()] = batch;
-
- llvm_ir::IrArray kernel_array(GetIrArrayFor(rhs));
- llvm_ir::IrArray::Index kernel_index(ir_builder_.getInt64Ty(),
- num_dims);
- for (int i = 0; i < num_spatial_dims; ++i) {
- kernel_index[dnums.kernel_spatial_dimensions(i)] =
- window.dimensions(i).window_reversal()
- ? ir_builder_.CreateNSWSub(
- ir_builder_.getInt64(window.dimensions(i).size() - 1),
- kernel_spatial[i])
- : kernel_spatial[i];
- }
-
- kernel_index[dnums.kernel_input_feature_dimension()] = input_feature;
- kernel_index[dnums.kernel_output_feature_dimension()] = output_feature;
-
- llvm_ir::IrArray input_array(GetIrArrayFor(lhs));
- llvm::Value* product = ir_builder_.CreateFMul(
- input_array.EmitReadArrayElement(input_index, &ir_builder_),
- kernel_array.EmitReadArrayElement(kernel_index, &ir_builder_));
- llvm::Value* sum = ir_builder_.CreateFAdd(
- ir_builder_.CreateLoad(sum_address), product);
- ir_builder_.CreateStore(sum, sum_address);
-
- SetToFirstInsertPoint(loops.GetOuterLoopExitBasicBlock(), &ir_builder_);
- return ir_builder_.CreateLoad(sum_address);
+ convolution, [&](const llvm_ir::IrArray::Index& index) {
+ return EmitTargetElementLoopBodyForConvolution(
+ Cast<HloConvolutionInstruction>(convolution), index);
});
}
@@ -1780,6 +1772,64 @@ StatusOr<bool> IrEmitter::EmitVectorizedReduce(
return true;
}
+StatusOr<llvm::Value*> IrEmitter::EmitTargetElementLoopBodyForReduce(
+ HloReduceInstruction* reduce, const llvm_ir::IrArray::Index& index) {
+ const HloInstruction* arg = reduce->mutable_operand(0);
+ const HloInstruction* init_value = reduce->mutable_operand(1);
+ gtl::ArraySlice<int64> dimensions(reduce->dimensions());
+ HloComputation* function = reduce->to_apply();
+ // The called computation should have been emitted previously.
+ llvm::Function* reducer_function = FindOrDie(emitted_functions_, function);
+
+ // Initialize an accumulator with init_value.
+ PrimitiveType accumulator_type = reduce->shape().element_type();
+ llvm::AllocaInst* accumulator_addr = llvm_ir::EmitAllocaAtFunctionEntry(
+ llvm_ir::PrimitiveTypeToIrType(accumulator_type, module_), "accumulator",
+ &ir_builder_, MinimumAlignmentForPrimitiveType(accumulator_type));
+ llvm::Value* init_value_addr = GetEmittedValueFor(init_value);
+ llvm::Value* load_init_value = ir_builder_.CreateLoad(init_value_addr);
+ ir_builder_.CreateStore(load_init_value, accumulator_addr);
+
+ // The enclosing loops go over all the target elements. Now we have to compute
+ // the actual target element. For this, we build a new loop nest to iterate
+ // over all the reduction dimensions in the argument.
+ // AddLoopsForShapeOnDimensions will return an Index where induction Value*s
+ // are placed for each dimension in dimensions, and all the rest are nullptrs.
+ llvm_ir::ForLoopNest loops(IrName(reduce, "inner"), &ir_builder_);
+ const llvm_ir::IrArray::Index reduced_dims_index =
+ loops.AddLoopsForShapeOnDimensions(arg->shape(), dimensions,
+ "reduction_dim");
+
+ SetToFirstInsertPoint(loops.GetInnerLoopBodyBasicBlock(), &ir_builder_);
+
+ // Build a full index for the input argument, using reduced_dims_index as the
+ // base. In reduced_dims_index only the reduction dimensions are filled in. We
+ // fill in the rest of the dimensions with induction Value*s taken from
+ // 'index' which iterates over the target array. See the high-level
+ // description in the XLA documentation for details.
+ llvm_ir::IrArray arg_array(GetIrArrayFor(arg));
+ llvm_ir::IrArray::Index input_index = reduced_dims_index;
+ llvm_ir::IrArray::Index::const_iterator it = index.begin();
+
+ for (size_t i = 0; i < input_index.size(); ++i) {
+ if (input_index[i] == nullptr) {
+ input_index[i] = *it++;
+ }
+ }
+ CHECK(index.end() == it);
+
+ // Apply the reduction function to the loaded value.
+ llvm::Value* input_address =
+ arg_array.EmitArrayElementAddress(input_index, &ir_builder_);
+ llvm::Value* result = EmitElementFunctionCall(
+ reducer_function, reduce->shape(), {accumulator_addr, input_address},
+ "reduce_function");
+ ir_builder_.CreateStore(result, accumulator_addr);
+
+ SetToFirstInsertPoint(loops.GetOuterLoopExitBasicBlock(), &ir_builder_);
+ return ir_builder_.CreateLoad(accumulator_addr);
+}
+
Status IrEmitter::HandleReduce(HloInstruction* reduce) {
auto arg = reduce->mutable_operand(0);
auto init_value = reduce->mutable_operand(1);
@@ -1801,61 +1851,11 @@ Status IrEmitter::HandleReduce(HloInstruction* reduce) {
}
}
- // The called computation should have been emitted previously.
- llvm::Function* reducer_function = FindOrDie(emitted_functions_, function);
- return EmitTargetElementLoop(
- reduce, [this, reduce, arg, init_value, dimensions,
- reducer_function](const llvm_ir::IrArray::Index& index) {
- // Initialize an accumulator with init_value.
- PrimitiveType accumulator_type = reduce->shape().element_type();
- llvm::AllocaInst* accumulator_addr = llvm_ir::EmitAllocaAtFunctionEntry(
- llvm_ir::PrimitiveTypeToIrType(accumulator_type, module_),
- "accumulator", &ir_builder_,
- MinimumAlignmentForPrimitiveType(accumulator_type));
- llvm::Value* init_value_addr = GetEmittedValueFor(init_value);
- llvm::Value* load_init_value = ir_builder_.CreateLoad(init_value_addr);
- ir_builder_.CreateStore(load_init_value, accumulator_addr);
-
- // The enclosing loops go over all the target elements. Now we have to
- // compute the actual target element. For this, we build a new loop nest
- // to iterate over all the reduction dimensions in the argument.
- // AddLoopsForShapeOnDimensions will return an Index where induction
- // Value*s are placed for each dimension in dimensions, and all the rest
- // are nullptrs.
- llvm_ir::ForLoopNest loops(IrName(reduce, "inner"), &ir_builder_);
- const llvm_ir::IrArray::Index reduced_dims_index =
- loops.AddLoopsForShapeOnDimensions(arg->shape(), dimensions,
- "reduction_dim");
-
- SetToFirstInsertPoint(loops.GetInnerLoopBodyBasicBlock(), &ir_builder_);
-
- // Build a full index for the input argument, using reduced_dims_index
- // as the base. In reduced_dims_index only the reduction dimensions are
- // filled in. We fill in the rest of the dimensions with induction
- // Value*s taken from 'index' which iterates over the target array.
- // See the high-level description in the XLA documentation for details.
- llvm_ir::IrArray arg_array(GetIrArrayFor(arg));
- llvm_ir::IrArray::Index input_index = reduced_dims_index;
- llvm_ir::IrArray::Index::const_iterator it = index.begin();
-
- for (size_t i = 0; i < input_index.size(); ++i) {
- if (input_index[i] == nullptr) {
- input_index[i] = *it++;
- }
- }
- CHECK(index.end() == it);
-
- // Apply the reduction function to the loaded value.
- llvm::Value* input_address =
- arg_array.EmitArrayElementAddress(input_index, &ir_builder_);
- llvm::Value* result = EmitElementFunctionCall(
- reducer_function, reduce->shape(),
- {accumulator_addr, input_address}, "reduce_function");
- ir_builder_.CreateStore(result, accumulator_addr);
-
- SetToFirstInsertPoint(loops.GetOuterLoopExitBasicBlock(), &ir_builder_);
- return ir_builder_.CreateLoad(accumulator_addr);
- });
+ return EmitTargetElementLoop(reduce,
+ [&](const llvm_ir::IrArray::Index& index) {
+ return EmitTargetElementLoopBodyForReduce(
+ Cast<HloReduceInstruction>(reduce), index);
+ });
}
Status IrEmitter::HandleSend(HloInstruction* send) {
@@ -2539,7 +2539,7 @@ Status IrEmitter::HandleConditional(HloInstruction* conditional) {
return Status::OK();
}
-Status IrEmitter::HandleGenerateToken(HloInstruction* gen_token) {
+Status IrEmitter::HandleAfterAll(HloInstruction* gen_token) {
TF_RET_CHECK(ByteSizeOf(gen_token->shape()) == 0);
// No code to generate, but we need to emit an address for book-keeping.
TF_RETURN_IF_ERROR(EmitTargetAddressForOp(gen_token));
diff --git a/tensorflow/compiler/xla/service/cpu/ir_emitter.h b/tensorflow/compiler/xla/service/cpu/ir_emitter.h
index e1815c1db7..419f19c24d 100644
--- a/tensorflow/compiler/xla/service/cpu/ir_emitter.h
+++ b/tensorflow/compiler/xla/service/cpu/ir_emitter.h
@@ -30,12 +30,12 @@ limitations under the License.
#include "llvm/IR/Value.h"
#include "llvm/Target/TargetMachine.h"
#include "tensorflow/compiler/xla/service/buffer_assignment.h"
-#include "tensorflow/compiler/xla/service/cpu/external_constant_pool.h"
#include "tensorflow/compiler/xla/service/cpu/ir_function.h"
#include "tensorflow/compiler/xla/service/cpu/target_machine_features.h"
#include "tensorflow/compiler/xla/service/dfs_hlo_visitor_with_default.h"
#include "tensorflow/compiler/xla/service/hlo_computation.h"
#include "tensorflow/compiler/xla/service/hlo_instruction.h"
+#include "tensorflow/compiler/xla/service/hlo_instructions.h"
#include "tensorflow/compiler/xla/service/hlo_module_config.h"
#include "tensorflow/compiler/xla/service/llvm_ir/alias_analysis.h"
#include "tensorflow/compiler/xla/service/llvm_ir/ir_array.h"
@@ -67,17 +67,13 @@ class IrEmitter : public DfsHloVisitorWithDefault {
// index in the profiling array.
// computation_to_profile_idx: the mapping from HLO computations to their
// index in the profiling array.
- // external_constant_pool: if non-null, points to an ExternalConstantPool
- // instance into which the Ir emitter can spill
- // constants.
IrEmitter(const HloModule& hlo_module, const BufferAssignment& assignment,
llvm::Module* llvm_module,
std::unordered_map<const HloInstruction*, int64>
instruction_to_profile_idx,
std::unordered_map<const HloComputation*, int64>
computation_to_profile_idx,
- const TargetMachineFeatures* target_machine,
- ExternalConstantPool* external_constant_pool);
+ const TargetMachineFeatures* target_machine);
~IrEmitter() override;
// Emit and return the given HLO computation as an LLVM IR
@@ -122,6 +118,7 @@ class IrEmitter : public DfsHloVisitorWithDefault {
Status HandleCopy(HloInstruction* copy) override;
Status HandleGetTupleElement(HloInstruction* get_tuple_element) override;
Status HandleSelect(HloInstruction* select) override;
+ Status HandleTupleSelect(HloInstruction* tuple_select) override;
Status HandleDot(HloInstruction* dot) override;
Status HandleConvolution(HloInstruction* convolution) override;
Status HandleFft(HloInstruction* fft) override;
@@ -150,7 +147,7 @@ class IrEmitter : public DfsHloVisitorWithDefault {
Status HandleWhile(HloInstruction* xla_while) override;
Status HandleConcatenate(HloInstruction* concatenate) override;
Status HandleConditional(HloInstruction* conditional) override;
- Status HandleGenerateToken(HloInstruction* gen_token) override;
+ Status HandleAfterAll(HloInstruction* gen_token) override;
Status FinishVisit(HloInstruction* root) override;
Status Preprocess(HloInstruction* hlo) override;
@@ -518,6 +515,17 @@ class IrEmitter : public DfsHloVisitorWithDefault {
// Returns the number of bytes within the shape.
int64 ByteSizeOf(const Shape& shape) const;
+ StatusOr<llvm::Value*> EmitTargetElementLoopBodyForMap(
+ HloMapInstruction* map, const llvm_ir::IrArray::Index& index);
+ StatusOr<llvm::Value*> EmitTargetElementLoopBodyForReduceWindow(
+ HloReduceWindowInstruction* reduce_window,
+ const llvm_ir::IrArray::Index& index);
+ StatusOr<llvm::Value*> EmitTargetElementLoopBodyForConvolution(
+ HloConvolutionInstruction* convolution,
+ const llvm_ir::IrArray::Index& index);
+ StatusOr<llvm::Value*> EmitTargetElementLoopBodyForReduce(
+ HloReduceInstruction* reduce, const llvm_ir::IrArray::Index& index);
+
enum class XfeedKind {
kInfeed,
kOutfeed,
@@ -537,9 +545,6 @@ class IrEmitter : public DfsHloVisitorWithDefault {
const TargetMachineFeatures& target_machine_features_;
- int64 external_global_constant_counter_ = 0;
- ExternalConstantPool* external_constant_pool_;
-
struct LiteralPtrHashFunctor {
size_t operator()(const Literal* literal) const { return literal->Hash(); }
};
diff --git a/tensorflow/compiler/xla/service/cpu/parallel_task_assignment_test.cc b/tensorflow/compiler/xla/service/cpu/parallel_task_assignment_test.cc
index fc2efbaf9a..36c9f74385 100644
--- a/tensorflow/compiler/xla/service/cpu/parallel_task_assignment_test.cc
+++ b/tensorflow/compiler/xla/service/cpu/parallel_task_assignment_test.cc
@@ -110,8 +110,9 @@ TEST_F(ParallelTaskAssignmentTest, InfeedOutfeedOperationNotParallelized) {
const string hlo_string = R"(
HloModule TestTaskParallel_infeed_outfeed
ENTRY InfeedOutfeed {
- infeed0 = u32[12345678,2]{1,0} infeed()
- ROOT outfeed0 = u32[12345678,2]{1,0} outfeed(infeed0)
+ infeed0 = (u32[12345678,2]{1,0}, token[]) infeed()
+ infeed0.data = u32[12345678,2]{1,0} get-tuple-element((u32[12345678,2]{1,0}, token[]) infeed0), index=0
+ ROOT outfeed0 = token[] outfeed(infeed0.data)
}
)";
diff --git a/tensorflow/compiler/xla/service/cpu/sample_harness.cc b/tensorflow/compiler/xla/service/cpu/sample_harness.cc
index 167aa4adda..d9e8dcaed9 100644
--- a/tensorflow/compiler/xla/service/cpu/sample_harness.cc
+++ b/tensorflow/compiler/xla/service/cpu/sample_harness.cc
@@ -23,7 +23,7 @@ limitations under the License.
#include "tensorflow/compiler/xla/client/local_client.h"
#include "tensorflow/compiler/xla/client/xla_client/xla_builder.h"
#include "tensorflow/compiler/xla/client/xla_client/xla_computation.h"
-#include "tensorflow/compiler/xla/literal_util.h"
+#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/compiler/xla/statusor.h"
#include "tensorflow/compiler/xla/types.h"
#include "tensorflow/compiler/xla/xla_data.pb.h"
@@ -38,20 +38,21 @@ int main(int argc, char** argv) {
// Transfer parameters.
std::unique_ptr<xla::Literal> param0_literal =
- xla::Literal::CreateR1<float>({1.1f, 2.2f, 3.3f, 5.5f});
+ xla::LiteralUtil::CreateR1<float>({1.1f, 2.2f, 3.3f, 5.5f});
std::unique_ptr<xla::GlobalData> param0_data =
client->TransferToServer(*param0_literal).ConsumeValueOrDie();
- std::unique_ptr<xla::Literal> param1_literal = xla::Literal::CreateR2<float>(
- {{3.1f, 4.2f, 7.3f, 9.5f}, {1.1f, 2.2f, 3.3f, 4.4f}});
+ std::unique_ptr<xla::Literal> param1_literal =
+ xla::LiteralUtil::CreateR2<float>(
+ {{3.1f, 4.2f, 7.3f, 9.5f}, {1.1f, 2.2f, 3.3f, 4.4f}});
std::unique_ptr<xla::GlobalData> param1_data =
client->TransferToServer(*param1_literal).ConsumeValueOrDie();
// Build computation.
xla::XlaBuilder builder("");
- auto p0 = builder.Parameter(0, param0_literal->shape(), "param0");
- auto p1 = builder.Parameter(1, param1_literal->shape(), "param1");
- auto add = builder.Add(p1, p0, {0});
+ auto p0 = Parameter(&builder, 0, param0_literal->shape(), "param0");
+ auto p1 = Parameter(&builder, 1, param1_literal->shape(), "param1");
+ Add(p1, p0, {0});
xla::StatusOr<xla::XlaComputation> computation_status = builder.Build();
xla::XlaComputation computation = computation_status.ConsumeValueOrDie();
diff --git a/tensorflow/compiler/xla/service/cpu/simple_orc_jit.cc b/tensorflow/compiler/xla/service/cpu/simple_orc_jit.cc
index c4c90515ac..be772cfb7e 100644
--- a/tensorflow/compiler/xla/service/cpu/simple_orc_jit.cc
+++ b/tensorflow/compiler/xla/service/cpu/simple_orc_jit.cc
@@ -127,13 +127,6 @@ SimpleOrcJIT::SimpleOrcJIT(const llvm::TargetOptions& target_options,
}
llvm::JITSymbol SimpleOrcJIT::ResolveRuntimeSymbol(const std::string& name) {
- if (const uint8* from_constant_pool =
- external_constant_pool_.Find(string(name))) {
- return llvm::JITEvaluatedSymbol(
- reinterpret_cast<uint64_t>(from_constant_pool),
- llvm::JITSymbolFlags::None);
- }
-
void* func_addr = CustomCallTargetRegistry::Global()->Lookup(name);
if (func_addr == nullptr) {
return nullptr;
diff --git a/tensorflow/compiler/xla/service/cpu/simple_orc_jit.h b/tensorflow/compiler/xla/service/cpu/simple_orc_jit.h
index 1851a3ee0b..d74b63fcf4 100644
--- a/tensorflow/compiler/xla/service/cpu/simple_orc_jit.h
+++ b/tensorflow/compiler/xla/service/cpu/simple_orc_jit.h
@@ -29,7 +29,6 @@ limitations under the License.
#include "llvm/Target/TargetMachine.h"
#include "tensorflow/compiler/xla/service/cpu/compiler_functor.h"
#include "tensorflow/compiler/xla/service/cpu/disassembler.h"
-#include "tensorflow/compiler/xla/service/cpu/external_constant_pool.h"
#include "tensorflow/compiler/xla/types.h"
namespace xla {
@@ -91,10 +90,6 @@ class SimpleOrcJIT {
llvm::TargetMachine* target_machine() const { return target_machine_.get(); }
- ExternalConstantPool* external_constant_pool() {
- return &external_constant_pool_;
- }
-
// Creates an llvm::TargetMachine suitable for JITting code that will run on
// the current machine.
static std::unique_ptr<llvm::TargetMachine> InferTargetMachineForJIT(
@@ -112,7 +107,6 @@ class SimpleOrcJIT {
std::shared_ptr<llvm::orc::SymbolResolver> symbol_resolver_;
ObjLayerT object_layer_;
CompileLayerT compile_layer_;
- ExternalConstantPool external_constant_pool_;
};
} // namespace cpu
diff --git a/tensorflow/compiler/xla/service/cpu/tests/BUILD b/tensorflow/compiler/xla/service/cpu/tests/BUILD
index 66ae5ef0f6..b4c33e2f6c 100644
--- a/tensorflow/compiler/xla/service/cpu/tests/BUILD
+++ b/tensorflow/compiler/xla/service/cpu/tests/BUILD
@@ -40,7 +40,7 @@ tf_cc_test(
name = "cpu_fusion_test",
srcs = ["cpu_fusion_test.cc"],
deps = [
- "//tensorflow/compiler/xla:literal_util",
+ "//tensorflow/compiler/xla:literal",
"//tensorflow/compiler/xla:shape_util",
"//tensorflow/compiler/xla:util",
"//tensorflow/compiler/xla:xla_data_proto",
@@ -82,7 +82,7 @@ tf_cc_test(
name = "cpu_noalias_test",
srcs = ["cpu_noalias_test.cc"],
deps = [
- "//tensorflow/compiler/xla:literal_util",
+ "//tensorflow/compiler/xla:literal",
"//tensorflow/compiler/xla:shape_util",
"//tensorflow/compiler/xla:util",
"//tensorflow/compiler/xla:xla_data_proto",
@@ -128,7 +128,7 @@ tf_cc_test(
name = "cpu_infeed_test",
srcs = ["cpu_infeed_test.cc"],
deps = [
- "//tensorflow/compiler/xla:literal_util",
+ "//tensorflow/compiler/xla:literal",
"//tensorflow/compiler/xla:shape_util",
"//tensorflow/compiler/xla:statusor",
"//tensorflow/compiler/xla:test_helpers",
diff --git a/tensorflow/compiler/xla/service/cpu/tests/cpu_codegen_test.h b/tensorflow/compiler/xla/service/cpu/tests/cpu_codegen_test.h
index 7c8d07a10b..77b3a0301f 100644
--- a/tensorflow/compiler/xla/service/cpu/tests/cpu_codegen_test.h
+++ b/tensorflow/compiler/xla/service/cpu/tests/cpu_codegen_test.h
@@ -22,7 +22,7 @@ namespace xla {
namespace cpu {
// Tests that verify IR emitted by the CPU backend is as expected.
-class CpuCodegenTest : public LLVMIRGenTestBase {};
+class CpuCodegenTest : public LlvmIrGenTestBase {};
} // namespace cpu
} // namespace xla
diff --git a/tensorflow/compiler/xla/service/cpu/tests/cpu_external_constants_test.cc b/tensorflow/compiler/xla/service/cpu/tests/cpu_external_constants_test.cc
index faac927027..00a7aa2ad2 100644
--- a/tensorflow/compiler/xla/service/cpu/tests/cpu_external_constants_test.cc
+++ b/tensorflow/compiler/xla/service/cpu/tests/cpu_external_constants_test.cc
@@ -40,7 +40,7 @@ class CpuExternalConstantsTest : public CpuCodegenTest {
HloInstruction* constant =
builder.AddInstruction(HloInstruction::CreateConstant(
- Literal::CreateR2FromArray2D(backing_array)));
+ LiteralUtil::CreateR2FromArray2D(backing_array)));
HloInstruction* param =
builder.AddInstruction(HloInstruction::CreateParameter(0, shape, "x"));
builder.AddInstruction(
@@ -56,7 +56,8 @@ class CpuExternalConstantsTest : public CpuCodegenTest {
TEST_F(CpuExternalConstantsTest, Basic) {
TestWithArray(/*rows=*/1024, /*cols=*/1024, R"(
-CHECK: @constant_global_0 = external constant [1024 x [1024 x float]], align 16
+CHECK-NOT: @constant_global_0 = external constant [1024 x [1024 x float]], align 16
+CHECK: @0 = private constant [4194304 x i8] {{.*}}, align 16
)");
}
@@ -65,7 +66,7 @@ TEST_F(CpuExternalConstantsTest, BasicNegative) {
// to externalize it.
TestWithArray(/*rows=*/4, /*cols=*/4, R"(
CHECK-NOT: @constant_global_0 = external constant [16 x float], align 8
-CHECK: @0 = private constant [16 x float] {{.*}}, align 8
+CHECK: @0 = private constant [64 x i8] {{.*}}, align 8
)");
}
} // namespace
diff --git a/tensorflow/compiler/xla/service/cpu/tests/cpu_fusion_test.cc b/tensorflow/compiler/xla/service/cpu/tests/cpu_fusion_test.cc
index 23e7a3de4d..d98856fdbf 100644
--- a/tensorflow/compiler/xla/service/cpu/tests/cpu_fusion_test.cc
+++ b/tensorflow/compiler/xla/service/cpu/tests/cpu_fusion_test.cc
@@ -17,7 +17,7 @@ limitations under the License.
#include <utility>
#include <vector>
-#include "tensorflow/compiler/xla/literal_util.h"
+#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/compiler/xla/ptr_util.h"
#include "tensorflow/compiler/xla/service/cpu/cpu_instruction_fusion.h"
#include "tensorflow/compiler/xla/service/hlo_computation.h"
@@ -43,8 +43,8 @@ class CpuFusionTest : public HloTestBase {
TEST_F(CpuFusionTest, FuseTwoElementwiseOps) {
auto builder = HloComputation::Builder(TestName());
- auto input_literal1 = Literal::CreateR1<float>({1.0, 2.0, 3.0});
- auto input_literal2 = Literal::CreateR1<float>({-2.0, -42.0, 2.0});
+ auto input_literal1 = LiteralUtil::CreateR1<float>({1.0, 2.0, 3.0});
+ auto input_literal2 = LiteralUtil::CreateR1<float>({-2.0, -42.0, 2.0});
Shape vshape = input_literal1->shape();
auto input1 = builder.AddInstruction(
@@ -83,7 +83,7 @@ TEST_F(CpuFusionTest, FuseTwoElementwiseOps) {
TEST_F(CpuFusionTest, FuseElementwiseOpChain) {
auto builder = HloComputation::Builder(TestName());
- auto input_literal = Literal::CreateR1<float>({-1.5, -2.5, -3.0});
+ auto input_literal = LiteralUtil::CreateR1<float>({-1.5, -2.5, -3.0});
Shape vshape = input_literal->shape();
auto input = builder.AddInstruction(
@@ -96,8 +96,11 @@ TEST_F(CpuFusionTest, FuseElementwiseOpChain) {
HloInstruction::CreateUnary(vshape, HloOpcode::kExp, ceil));
auto floor = builder.AddInstruction(
HloInstruction::CreateUnary(vshape, HloOpcode::kFloor, exp));
- auto two = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(2.0)));
+ auto two = builder.AddInstruction(HloInstruction::CreateBroadcast(
+ vshape,
+ builder.AddInstruction(
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0))),
+ {}));
builder.AddInstruction(
HloInstruction::CreateBinary(vshape, HloOpcode::kMultiply, two, floor));
@@ -114,9 +117,9 @@ TEST_F(CpuFusionTest, FuseElementwiseOpChain) {
EXPECT_EQ(HloOpcode::kFusion, fusion_instruction->opcode());
EXPECT_EQ(HloOpcode::kMultiply,
fusion_instruction->fused_expression_root()->opcode());
- // There should be 7 fused instructions: 2 parameters and the fused
+ // There should be 8 fused instructions: 2 parameters and the fused
// operations.
- EXPECT_EQ(7, fusion_instruction->fused_instruction_count());
+ EXPECT_EQ(8, fusion_instruction->fused_instruction_count());
// Compile and execute the computation.
auto result = ExecuteAndTransfer(std::move(module), {});
@@ -131,7 +134,7 @@ TEST_F(CpuFusionTest, ElementwiseOpChainWithNonfusableInstruction) {
// middle.
auto module = CreateNewModule();
auto builder = HloComputation::Builder(TestName());
- auto input_literal = Literal::CreateR1<float>({-1.5, -2.5, -3.0});
+ auto input_literal = LiteralUtil::CreateR1<float>({-1.5, -2.5, -3.0});
Shape vshape = input_literal->shape();
auto input = builder.AddInstruction(
@@ -163,15 +166,18 @@ TEST_F(CpuFusionTest, ElementwiseOpChainWithNonfusableInstruction) {
ShapeUtil::MakeShape(F32, {6, 1}), concatenate)),
/*init_value=*/
builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(0))),
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(0))),
/*dimensions_to_reduce=*/{1}, add_f32));
auto exp = builder.AddInstruction(
HloInstruction::CreateUnary(cshape, HloOpcode::kExp, reduce));
auto floor = builder.AddInstruction(
HloInstruction::CreateUnary(cshape, HloOpcode::kFloor, exp));
- auto two = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(2.0)));
+ auto two = builder.AddInstruction(HloInstruction::CreateBroadcast(
+ cshape,
+ builder.AddInstruction(
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0))),
+ {}));
builder.AddInstruction(
HloInstruction::CreateBinary(cshape, HloOpcode::kMultiply, two, floor));
@@ -188,9 +194,9 @@ TEST_F(CpuFusionTest, ElementwiseOpChainWithNonfusableInstruction) {
EXPECT_EQ(HloOpcode::kFusion, fusion_instruction1->opcode());
EXPECT_EQ(HloOpcode::kMultiply,
fusion_instruction1->fused_expression_root()->opcode());
- // There should be 5 fused instructions in the root fusion instruction: 2
+ // There should be 6 fused instructions in the root fusion instruction: 2
// parameters, multiply, floor, and exp.
- EXPECT_EQ(5, fusion_instruction1->fused_instruction_count())
+ EXPECT_EQ(6, fusion_instruction1->fused_instruction_count())
<< fusion_instruction1->fused_instructions_computation()->ToString();
auto fusion_instruction2 = reduce->operand(0);
@@ -225,7 +231,7 @@ TEST_F(CpuFusionTest, TestOperandOrderToAvoidDuplication) {
// operand vectors. Test for this problem by counting the number of nodes in
// each fusion instruction to ensure that negate is not duplicated.
auto builder = HloComputation::Builder(TestName());
- auto input_literal = Literal::CreateR1<float>({1.0, 2.0, 3.0});
+ auto input_literal = LiteralUtil::CreateR1<float>({1.0, 2.0, 3.0});
Shape vshape = input_literal->shape();
auto constant = builder.AddInstruction(
@@ -286,10 +292,10 @@ TEST_F(CpuFusionTest, DoNotDuplicateExpensiveOps) {
// computation. The duplication is caused by the other use of exp2 in the
// tuple.
auto builder = HloComputation::Builder(TestName());
- auto input_literal1 = Literal::CreateR1<float>({1.0, 2.0, 3.0});
- auto input_literal2 = Literal::CreateR1<float>({-2.0, -42.0, 2.0});
+ auto input_literal1 = LiteralUtil::CreateR1<float>({1.0, 2.0, 3.0});
+ auto input_literal2 = LiteralUtil::CreateR1<float>({-2.0, -42.0, 2.0});
auto constant = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(42.0)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0)));
Shape shape = constant->shape();
auto exp1 = builder.AddInstruction(
diff --git a/tensorflow/compiler/xla/service/cpu/tests/cpu_infeed_test.cc b/tensorflow/compiler/xla/service/cpu/tests/cpu_infeed_test.cc
index dd63b998e9..0d45918d09 100644
--- a/tensorflow/compiler/xla/service/cpu/tests/cpu_infeed_test.cc
+++ b/tensorflow/compiler/xla/service/cpu/tests/cpu_infeed_test.cc
@@ -21,7 +21,7 @@ limitations under the License.
#include "tensorflow/compiler/xla/client/local_client.h"
#include "tensorflow/compiler/xla/client/xla_client/xla_builder.h"
#include "tensorflow/compiler/xla/client/xla_client/xla_computation.h"
-#include "tensorflow/compiler/xla/literal_util.h"
+#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/compiler/xla/shape_util.h"
#include "tensorflow/compiler/xla/statusor.h"
#include "tensorflow/compiler/xla/test_helpers.h"
@@ -47,7 +47,7 @@ class InfeedTest : public ClientLibraryTestBase {
// don't use ResetDevice since it is not implemented on CPU.
ASSERT_IS_OK(client_->TransferToInfeed(literal));
XlaBuilder builder(TestName());
- builder.Infeed(literal.shape());
+ Infeed(&builder, literal.shape());
if (ShapeUtil::IsTuple(literal.shape())) {
// TODO(b/30609564): Use ComputeAndCompareLiteral instead.
ComputeAndCompareTuple(&builder, literal, {});
@@ -58,52 +58,52 @@ class InfeedTest : public ClientLibraryTestBase {
};
TEST_F(InfeedTest, SingleInfeedR0Bool) {
- TestInfeedRoundTrip(*Literal::CreateR0<bool>(true));
+ TestInfeedRoundTrip(*LiteralUtil::CreateR0<bool>(true));
}
TEST_F(InfeedTest, SingleInfeedR1U32) {
- TestInfeedRoundTrip(*Literal::CreateR1<uint32>({1, 2, 3}));
+ TestInfeedRoundTrip(*LiteralUtil::CreateR1<uint32>({1, 2, 3}));
}
TEST_F(InfeedTest, SingleInfeedR2F32) {
- TestInfeedRoundTrip(*Literal::CreateR2F32Linspace(0.0, 1.0, 128, 64));
+ TestInfeedRoundTrip(*LiteralUtil::CreateR2F32Linspace(0.0, 1.0, 128, 64));
}
TEST_F(InfeedTest, SingleInfeedR3F32) {
TestInfeedRoundTrip(
- *Literal::CreateR3({{{1.0f, 2.0f, 3.0f}, {4.0f, 5.0f, 6.0f}},
- {{1.1f, 2.1f, 3.1f}, {6.1f, 3.5f, 2.8f}}}));
+ *LiteralUtil::CreateR3({{{1.0f, 2.0f, 3.0f}, {4.0f, 5.0f, 6.0f}},
+ {{1.1f, 2.1f, 3.1f}, {6.1f, 3.5f, 2.8f}}}));
}
TEST_F(InfeedTest, SingleInfeedR3F32DifferentLayout) {
const Layout r3_dim0minor = LayoutUtil::MakeLayout({0, 1, 2});
const Layout r3_dim0major = LayoutUtil::MakeLayout({2, 1, 0});
- TestInfeedRoundTrip(
- *Literal::CreateR3WithLayout({{{1.0f, 2.0f, 3.0f}, {4.0f, 5.0f, 6.0f}},
- {{1.1f, 2.1f, 3.1f}, {6.1f, 3.5f, 2.8f}}},
- r3_dim0minor));
+ TestInfeedRoundTrip(*LiteralUtil::CreateR3WithLayout(
+ {{{1.0f, 2.0f, 3.0f}, {4.0f, 5.0f, 6.0f}},
+ {{1.1f, 2.1f, 3.1f}, {6.1f, 3.5f, 2.8f}}},
+ r3_dim0minor));
- TestInfeedRoundTrip(
- *Literal::CreateR3WithLayout({{{1.0f, 2.0f, 3.0f}, {4.0f, 5.0f, 6.0f}},
- {{1.1f, 2.1f, 3.1f}, {6.1f, 3.5f, 2.8f}}},
- r3_dim0major));
+ TestInfeedRoundTrip(*LiteralUtil::CreateR3WithLayout(
+ {{{1.0f, 2.0f, 3.0f}, {4.0f, 5.0f, 6.0f}},
+ {{1.1f, 2.1f, 3.1f}, {6.1f, 3.5f, 2.8f}}},
+ r3_dim0major));
}
TEST_F(InfeedTest, SingleInfeedR4S32) {
- TestInfeedRoundTrip(*Literal::CreateR4(
+ TestInfeedRoundTrip(*LiteralUtil::CreateR4(
{{{{1, -2}, {-4, 5}, {6, 7}}, {{8, 9}, {10, 11}, {12, 13}}},
{{{10, 3}, {7, -2}, {3, 6}}, {{2, 5}, {-11, 5}, {-2, -5}}}}));
}
TEST_F(InfeedTest, SingleInfeedTuple) {
TestInfeedRoundTrip(
- *Literal::MakeTuple({Literal::CreateR1<uint32>({1, 2, 3}).get(),
- Literal::CreateR0<bool>(false).get()}));
+ *LiteralUtil::MakeTuple({LiteralUtil::CreateR1<uint32>({1, 2, 3}).get(),
+ LiteralUtil::CreateR0<bool>(false).get()}));
}
TEST_F(InfeedTest, SingleInfeedEmptyTuple) {
- TestInfeedRoundTrip(*Literal::MakeTuple({}));
+ TestInfeedRoundTrip(*LiteralUtil::MakeTuple({}));
}
// Tests Infeed operation used in a while loop, as in the code below. The
@@ -125,8 +125,8 @@ TEST_F(InfeedTest, DISABLED_SingleInfeedInWhile) {
XlaComputation condition;
{
XlaBuilder builder("condition");
- auto prev = builder.Parameter(0, result_shape, "prev");
- builder.Gt(builder.ConstantR0<float>(40.0f), prev);
+ auto prev = Parameter(&builder, 0, result_shape, "prev");
+ Gt(ConstantR0<float>(&builder, 40.0f), prev);
condition = builder.Build().ConsumeValueOrDie();
}
// Create a computation for the body: add the reduced value of the Infeed
@@ -134,17 +134,16 @@ TEST_F(InfeedTest, DISABLED_SingleInfeedInWhile) {
XlaComputation body;
{
XlaBuilder builder("body");
- auto prev = builder.Parameter(0, result_shape, "prev");
- auto infeed = builder.Infeed(infeed_shape);
- auto addend =
- builder.Reduce(infeed, builder.ConstantR0<float>(0.0f),
- CreateScalarAddComputation(F32, &builder), {0});
- builder.Add(prev, addend);
+ auto prev = Parameter(&builder, 0, result_shape, "prev");
+ auto infeed = Infeed(&builder, infeed_shape);
+ auto addend = Reduce(infeed, ConstantR0<float>(&builder, 0.0f),
+ CreateScalarAddComputation(F32, &builder), {0});
+ Add(prev, addend);
body = builder.Build().ConsumeValueOrDie();
}
// Create a While node with computations for the condition and the body.
- auto init = builder.ConstantR0<float>(0.0f);
- builder.While(condition, body, init);
+ auto init = ConstantR0<float>(&builder, 0.0f);
+ While(condition, body, init);
// Build and asynchronously launch the computation.
auto computation = builder.Build().ConsumeValueOrDie();
@@ -157,13 +156,16 @@ TEST_F(InfeedTest, DISABLED_SingleInfeedInWhile) {
});
// Send 5 Infeed data of shape F32[3].
- ASSERT_IS_OK(client_->TransferToInfeed(*Literal::CreateR1<float>({1, 2, 3})));
- ASSERT_IS_OK(client_->TransferToInfeed(*Literal::CreateR1<float>({4, 5, 6})));
- ASSERT_IS_OK(client_->TransferToInfeed(*Literal::CreateR1<float>({7, 8, 9})));
ASSERT_IS_OK(
- client_->TransferToInfeed(*Literal::CreateR1<float>({10, 11, 12})));
+ client_->TransferToInfeed(*LiteralUtil::CreateR1<float>({1, 2, 3})));
+ ASSERT_IS_OK(
+ client_->TransferToInfeed(*LiteralUtil::CreateR1<float>({4, 5, 6})));
ASSERT_IS_OK(
- client_->TransferToInfeed(*Literal::CreateR1<float>({13, 14, 15})));
+ client_->TransferToInfeed(*LiteralUtil::CreateR1<float>({7, 8, 9})));
+ ASSERT_IS_OK(
+ client_->TransferToInfeed(*LiteralUtil::CreateR1<float>({10, 11, 12})));
+ ASSERT_IS_OK(
+ client_->TransferToInfeed(*LiteralUtil::CreateR1<float>({13, 14, 15})));
delete computation_thread; // Joins the thread.
auto result_literal = client_->Transfer(*result).ConsumeValueOrDie();
@@ -207,8 +209,8 @@ TEST_F(InfeedTest, DISABLED_TwoInfeedsInTotalOrder) {
XlaComputation condition;
{
XlaBuilder builder("condition");
- auto prev = builder.Parameter(0, result_shape, "prev");
- builder.GetTupleElement(prev, 1);
+ auto prev = Parameter(&builder, 0, result_shape, "prev");
+ GetTupleElement(prev, 1);
condition = builder.Build().ConsumeValueOrDie();
}
@@ -221,44 +223,44 @@ TEST_F(InfeedTest, DISABLED_TwoInfeedsInTotalOrder) {
const auto build_body = [this, &result_shape](const Shape& infeed_shape) {
XlaComputation body;
XlaBuilder builder("body");
- auto prev = builder.Parameter(0, result_shape, "prev");
- auto infeed = builder.Infeed(infeed_shape);
- auto addend = builder.Reduce(
- builder.GetTupleElement(infeed, 0), builder.ConstantR0<float>(0.0f),
- CreateScalarAddComputation(F32, &builder), {0});
- auto result = builder.Add(builder.GetTupleElement(prev, 0), addend);
- builder.Tuple({result, builder.GetTupleElement(infeed, 1)});
+ auto prev = Parameter(&builder, 0, result_shape, "prev");
+ auto infeed = Infeed(&builder, infeed_shape);
+ auto addend =
+ Reduce(GetTupleElement(infeed, 0), ConstantR0<float>(&builder, 0.0f),
+ CreateScalarAddComputation(F32, &builder), {0});
+ auto result = Add(GetTupleElement(prev, 0), addend);
+ Tuple(&builder, {result, GetTupleElement(infeed, 1)});
return builder.Build().ConsumeValueOrDie();
};
// Create the first while loop with infeed1_shape.
- auto init = builder.Tuple(
- {builder.ConstantR0<float>(0.0f), builder.ConstantR0<bool>(true)});
- auto while1 = builder.While(condition, build_body(infeed1_shape), init);
- auto result1 = builder.Tuple(
- {builder.GetTupleElement(while1, 0), builder.ConstantR0<bool>(true)});
+ auto init = Tuple(&builder, {ConstantR0<float>(&builder, 0.0f),
+ ConstantR0<bool>(&builder, true)});
+ auto while1 = While(condition, build_body(infeed1_shape), init);
+ auto result1 = Tuple(
+ &builder, {GetTupleElement(while1, 0), ConstantR0<bool>(&builder, true)});
// Create the second while loop with infeed2_shape. Note that the result from
// the first while loop is used as the initial value.
- auto while2 = builder.While(condition, build_body(infeed2_shape), result1);
- builder.GetTupleElement(while2, 0);
+ auto while2 = While(condition, build_body(infeed2_shape), result1);
+ GetTupleElement(while2, 0);
// Build the computation.
auto computation = builder.Build().ConsumeValueOrDie();
// Send the first 4 Infeed data of shape Tuple(F32[2], PRED).
ASSERT_IS_OK(client_->TransferToInfeed(
- *Literal::MakeTuple({Literal::CreateR1<float>({1, 2}).get(),
- Literal::CreateR0<bool>(true).get()})));
+ *LiteralUtil::MakeTuple({LiteralUtil::CreateR1<float>({1, 2}).get(),
+ LiteralUtil::CreateR0<bool>(true).get()})));
ASSERT_IS_OK(client_->TransferToInfeed(
- *Literal::MakeTuple({Literal::CreateR1<float>({3, 4}).get(),
- Literal::CreateR0<bool>(true).get()})));
+ *LiteralUtil::MakeTuple({LiteralUtil::CreateR1<float>({3, 4}).get(),
+ LiteralUtil::CreateR0<bool>(true).get()})));
ASSERT_IS_OK(client_->TransferToInfeed(
- *Literal::MakeTuple({Literal::CreateR1<float>({5, 6}).get(),
- Literal::CreateR0<bool>(true).get()})));
+ *LiteralUtil::MakeTuple({LiteralUtil::CreateR1<float>({5, 6}).get(),
+ LiteralUtil::CreateR0<bool>(true).get()})));
ASSERT_IS_OK(client_->TransferToInfeed(
- *Literal::MakeTuple({Literal::CreateR1<float>({7, 8}).get(),
- Literal::CreateR0<bool>(false).get()})));
+ *LiteralUtil::MakeTuple({LiteralUtil::CreateR1<float>({7, 8}).get(),
+ LiteralUtil::CreateR0<bool>(false).get()})));
// Asynchronously launch the execution on the device.
std::unique_ptr<GlobalData> result;
@@ -273,14 +275,14 @@ TEST_F(InfeedTest, DISABLED_TwoInfeedsInTotalOrder) {
// Infeed data, and send the rest Infeed data of shape Tuple(F32[3], PRED).
sleep(1);
ASSERT_IS_OK(client_->TransferToInfeed(
- *Literal::MakeTuple({Literal::CreateR1<float>({1, 2, 3}).get(),
- Literal::CreateR0<bool>(true).get()})));
+ *LiteralUtil::MakeTuple({LiteralUtil::CreateR1<float>({1, 2, 3}).get(),
+ LiteralUtil::CreateR0<bool>(true).get()})));
ASSERT_IS_OK(client_->TransferToInfeed(
- *Literal::MakeTuple({Literal::CreateR1<float>({7, 8, 9}).get(),
- Literal::CreateR0<bool>(false).get()})));
+ *LiteralUtil::MakeTuple({LiteralUtil::CreateR1<float>({7, 8, 9}).get(),
+ LiteralUtil::CreateR0<bool>(false).get()})));
ASSERT_IS_OK(client_->TransferToInfeed(
- *Literal::MakeTuple({Literal::CreateR1<float>({4, 5, 6}).get(),
- Literal::CreateR0<bool>(true).get()})));
+ *LiteralUtil::MakeTuple({LiteralUtil::CreateR1<float>({4, 5, 6}).get(),
+ LiteralUtil::CreateR0<bool>(true).get()})));
// Wait for the execution to be done, and transfer the result.
delete computation_thread; // Joins the thread.
diff --git a/tensorflow/compiler/xla/service/cpu/tests/cpu_literal_caching_test.cc b/tensorflow/compiler/xla/service/cpu/tests/cpu_literal_caching_test.cc
index 27044b1d62..90b99c828e 100644
--- a/tensorflow/compiler/xla/service/cpu/tests/cpu_literal_caching_test.cc
+++ b/tensorflow/compiler/xla/service/cpu/tests/cpu_literal_caching_test.cc
@@ -38,7 +38,8 @@ while_body {
while_cond {
arg_cond = f32[2,3,2] parameter(0)
- ROOT unknown = pred[] infeed()
+ infeed = (pred[], token[]) infeed()
+ ROOT unknown = pred[] get-tuple-element((pred[], token[]) infeed), index=0
}
ENTRY main {
@@ -49,14 +50,14 @@ ENTRY main {
{{2, 1}, {2001, 3002}, {2001, 2002}}})
const_b = f32[2,3,2] while(f32[2,3,2] const_a), condition=while_cond, body=while_body
- out0 = () outfeed(f32[2,3,2] const_a)
- ROOT out1 = () outfeed(f32[2,3,2] const_b)
+ out0 = token[] outfeed(f32[2,3,2] const_a)
+ ROOT out1 = token[] outfeed(f32[2,3,2] const_b)
}
)";
string filecheck_pattern = R"(
-CHECK: private constant [12 x float]
-CHECK-NOT: private constant [12 x float]
+CHECK: private constant [48 x i8]
+CHECK-NOT: private constant [48 x i8]
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
@@ -84,7 +85,8 @@ while_body {
while_cond {
arg_cond = (f32[2,1]{1,0}, f32[1]{0}) parameter(0)
- ROOT unknown = pred[] infeed()
+ infeed = (pred[], token[]) infeed()
+ ROOT unknown = pred[] get-tuple-element((pred[], token[]) infeed), index=0
}
ENTRY main {
@@ -98,10 +100,10 @@ ENTRY main {
)";
string filecheck_pattern = R"(
-CHECK: private constant [1 x float]
-CHECK: private constant [2 x float]
-CHECK-NOT: private constant [1 x float]
-CHECK-NOT: private constant [2 x float]
+CHECK: private constant [4 x i8]
+CHECK: private constant [8 x i8]
+CHECK-NOT: private constant [4 x i8]
+CHECK-NOT: private constant [8 x i8]
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
diff --git a/tensorflow/compiler/xla/service/cpu/tests/cpu_noalias_test.cc b/tensorflow/compiler/xla/service/cpu/tests/cpu_noalias_test.cc
index 3b6b0ed740..ccb61740f6 100644
--- a/tensorflow/compiler/xla/service/cpu/tests/cpu_noalias_test.cc
+++ b/tensorflow/compiler/xla/service/cpu/tests/cpu_noalias_test.cc
@@ -17,7 +17,7 @@ limitations under the License.
#include <utility>
#include "llvm/IR/Module.h"
-#include "tensorflow/compiler/xla/literal_util.h"
+#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/compiler/xla/ptr_util.h"
#include "tensorflow/compiler/xla/service/buffer_assignment.h"
#include "tensorflow/compiler/xla/service/cpu/tests/cpu_codegen_test.h"
@@ -42,7 +42,7 @@ TEST_F(CpuNoAliasTest, Concat) {
HloComputation::Builder builder(TestName());
std::unique_ptr<Literal> literal =
- Literal::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}});
+ LiteralUtil::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}});
auto param_shape = ShapeUtil::MakeShape(F32, {2, 2});
HloInstruction* param_x = builder.AddInstruction(
HloInstruction::CreateParameter(0, param_shape, "x"));
diff --git a/tensorflow/compiler/xla/service/cpu/tests/cpu_outfeed_test.cc b/tensorflow/compiler/xla/service/cpu/tests/cpu_outfeed_test.cc
index 1ee279290b..dac416e1c7 100644
--- a/tensorflow/compiler/xla/service/cpu/tests/cpu_outfeed_test.cc
+++ b/tensorflow/compiler/xla/service/cpu/tests/cpu_outfeed_test.cc
@@ -32,12 +32,13 @@ ENTRY main {
{{{1, 2}, {1001, 1002}, {2001, 2002}},
{{2, 1}, {2001, 3002}, {2001, 2002}}})
- ROOT out = () outfeed(f32[2,3,2] const_a)
+ outfeed = token[] outfeed(f32[2,3,2] const_a)
+ ROOT root = () tuple()
}
)";
string filecheck_pattern = R"(
-CHECK: private constant [12 x float]
+CHECK: private constant [48 x i8]
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
diff --git a/tensorflow/compiler/xla/service/defuser_test.cc b/tensorflow/compiler/xla/service/defuser_test.cc
index 32b5c5d35f..e727ba49cb 100644
--- a/tensorflow/compiler/xla/service/defuser_test.cc
+++ b/tensorflow/compiler/xla/service/defuser_test.cc
@@ -15,7 +15,7 @@ limitations under the License.
#include "tensorflow/compiler/xla/service/defuser.h"
-#include "tensorflow/compiler/xla/literal_util.h"
+#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/compiler/xla/service/hlo_matchers.h"
#include "tensorflow/compiler/xla/shape_util.h"
#include "tensorflow/compiler/xla/tests/hlo_verified_test_base.h"
@@ -124,7 +124,7 @@ TEST_F(DefuserTest, NonTrivialFusionInstruction) {
auto div = builder.AddInstruction(
HloInstruction::CreateBinary(shape_, HloOpcode::kDivide, mul, param3));
auto constant = builder.AddInstruction(HloInstruction::CreateConstant(
- Literal::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}})));
+ LiteralUtil::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}})));
auto add2 = builder.AddInstruction(
HloInstruction::CreateBinary(shape_, HloOpcode::kAdd, constant, div));
@@ -162,7 +162,7 @@ TEST_F(DefuserTest, MultipleFusionInstructions) {
auto div = builder.AddInstruction(
HloInstruction::CreateBinary(shape_, HloOpcode::kDivide, mul, param3));
auto constant = builder.AddInstruction(HloInstruction::CreateConstant(
- Literal::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}})));
+ LiteralUtil::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}})));
auto add2 = builder.AddInstruction(
HloInstruction::CreateBinary(shape_, HloOpcode::kAdd, constant, div));
diff --git a/tensorflow/compiler/xla/service/dfs_hlo_visitor.h b/tensorflow/compiler/xla/service/dfs_hlo_visitor.h
index 7d56d57b5f..51f16bdc94 100644
--- a/tensorflow/compiler/xla/service/dfs_hlo_visitor.h
+++ b/tensorflow/compiler/xla/service/dfs_hlo_visitor.h
@@ -19,7 +19,7 @@ limitations under the License.
#include <type_traits>
#include <vector>
-#include "tensorflow/compiler/xla/literal_util.h"
+#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/compiler/xla/service/hlo_opcode.h"
#include "tensorflow/compiler/xla/status.h"
#include "tensorflow/compiler/xla/types.h"
@@ -76,6 +76,7 @@ class DfsHloVisitorBase {
virtual Status HandleClamp(HloInstructionPtr hlo) = 0;
virtual Status HandleSelect(HloInstructionPtr hlo) = 0;
+ virtual Status HandleTupleSelect(HloInstructionPtr hlo) = 0;
virtual Status HandleMaximum(HloInstructionPtr hlo) {
return HandleElementwiseBinary(hlo);
}
@@ -246,7 +247,7 @@ class DfsHloVisitorBase {
virtual Status HandleBatchNormGrad(HloInstructionPtr hlo) = 0;
- virtual Status HandleGenerateToken(HloInstructionPtr token) = 0;
+ virtual Status HandleAfterAll(HloInstructionPtr token) = 0;
// Invoked to inform the visitor that the traversal has completed, and that
// the root was "root".
diff --git a/tensorflow/compiler/xla/service/dfs_hlo_visitor_with_default.h b/tensorflow/compiler/xla/service/dfs_hlo_visitor_with_default.h
index 6934e00a4b..0686ca74af 100644
--- a/tensorflow/compiler/xla/service/dfs_hlo_visitor_with_default.h
+++ b/tensorflow/compiler/xla/service/dfs_hlo_visitor_with_default.h
@@ -16,7 +16,7 @@ limitations under the License.
#ifndef TENSORFLOW_COMPILER_XLA_SERVICE_DFS_HLO_VISITOR_WITH_DEFAULT_H_
#define TENSORFLOW_COMPILER_XLA_SERVICE_DFS_HLO_VISITOR_WITH_DEFAULT_H_
-#include "tensorflow/compiler/xla/literal_util.h"
+#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/compiler/xla/service/dfs_hlo_visitor.h"
#include "tensorflow/compiler/xla/service/hlo_opcode.h"
#include "tensorflow/compiler/xla/types.h"
@@ -79,6 +79,9 @@ class DfsHloVisitorWithDefaultBase
Status HandleSelect(HloInstructionPtr select) override {
return DefaultAction(select);
}
+ Status HandleTupleSelect(HloInstructionPtr tuple_select) override {
+ return DefaultAction(tuple_select);
+ }
Status HandleDot(HloInstructionPtr dot) override {
return DefaultAction(dot);
}
@@ -188,7 +191,7 @@ class DfsHloVisitorWithDefaultBase
Status HandleGather(HloInstructionPtr gather) override {
return DefaultAction(gather);
}
- Status HandleGenerateToken(HloInstructionPtr token) override {
+ Status HandleAfterAll(HloInstructionPtr token) override {
return DefaultAction(token);
}
diff --git a/tensorflow/compiler/xla/service/elemental_ir_emitter.cc b/tensorflow/compiler/xla/service/elemental_ir_emitter.cc
index ce0951bbe1..c51632597a 100644
--- a/tensorflow/compiler/xla/service/elemental_ir_emitter.cc
+++ b/tensorflow/compiler/xla/service/elemental_ir_emitter.cc
@@ -468,6 +468,10 @@ StatusOr<llvm::Value*> ElementalIrEmitter::EmitFloatUnaryOp(
}
case HloOpcode::kNegate:
return ir_builder_->CreateFNeg(operand_value);
+ case HloOpcode::kReal:
+ return operand_value;
+ case HloOpcode::kImag:
+ return llvm::ConstantFP::get(operand_value->getType(), 0.0);
default:
return Unimplemented("unary floating-point op '%s'",
HloOpcodeString(op->opcode()).c_str());
@@ -1227,7 +1231,14 @@ llvm_ir::IrArray::Index ElementalIrEmitter::ElementwiseSourceIndex(
// If no implicit broadcast is needed for this operand, returns the target
// index as the source index.
- if (ShapeUtil::CompatibleIgnoringElementType(operand_shape, hlo.shape())) {
+ //
+ // `IrArray::Index` may contain a physical linear which we can propagate to
+ // our operand only if our layouts match. "only if" is a bit strong since
+ // e.g. we can still forward the linear index if the operand shape is
+ // [5,1,1,5]{3,2,1,0} and the HLO shape is[5,1,1,5]{3,1,2,0}, but those cases
+ // are probably not worth handling here for now.
+ if (ShapeUtil::CompatibleIgnoringElementType(operand_shape, hlo.shape()) &&
+ LayoutUtil::Equal(operand_shape.layout(), hlo.shape().layout())) {
return target_index;
}
@@ -1558,19 +1569,18 @@ StatusOr<llvm::Value*> ElementalIrEmitter::EmitElementalDynamicSlice(
// TODO(b/74360564): This is implementation defined behavior, but is
// currently respected by all implementations. Change this if we ever decide
- // to oficially document different behavior.
+ // to officially document different behavior.
start_index_value =
ir_builder_->CreateSExtOrTrunc(start_index_value, index_type);
- llvm::Value* operand_dim_size =
- index_typed_const(input_hlo->shape().dimensions(i));
- llvm::Value* output_dim_size =
- index_typed_const(hlo->shape().dimensions(i));
+ int64 largest_valid_start_index =
+ input_hlo->shape().dimensions(i) - hlo->shape().dimensions(i);
+ CHECK_GE(largest_valid_start_index, 0);
+ bool is_signed = ShapeUtil::ElementIsSigned(hlo->operand(1)->shape());
start_index_value = EmitIntegralMin(
- ir_builder_->CreateSub(operand_dim_size, output_dim_size),
- EmitIntegralMax(index_typed_const(0), start_index_value,
- /*is_signed=*/true),
- /*is_signed=*/true);
+ index_typed_const(largest_valid_start_index),
+ EmitIntegralMax(index_typed_const(0), start_index_value, is_signed),
+ is_signed);
start_index_value->setName(
AsStringRef(IrName(hlo, StrCat("start_idx", i))));
@@ -1603,19 +1613,22 @@ StatusOr<llvm::Value*> ElementalIrEmitter::EmitElementalGather(
llvm::Type* index_type = index.GetType();
// This is the index into `operand` that holds the element we want to
- // generate. This index "unsafe" as in the components in here may be
- // out of bounds.
- IrArray::Index unsafe_operand_index(index_type);
-
- // First copy in the window indices to unsafe_operand_index.
- for (int64 i = 0, e = operand_shape.dimensions_size(),
- unsafe_operand_index_dim = 0;
+ // generate.
+ IrArray::Index operand_index(index_type);
+
+ // First copy in the window indices to operand_index. Also collect a mapping
+ // from operand dimension to output window dimension. Elided window dimensions
+ // map to -1.
+ std::vector<int64> operand_to_output_dim(operand_shape.dimensions_size(), -1);
+ for (int64 i = 0, e = operand_shape.dimensions_size(), operand_index_dim = 0;
i < e; i++) {
if (c_binary_search(dim_numbers.elided_window_dims(), i)) {
- unsafe_operand_index.push_back(index.GetConstantWithIndexType(0));
+ operand_index.push_back(index.GetConstantWithIndexType(0));
} else {
- unsafe_operand_index.push_back(
- index[dim_numbers.output_window_dims(unsafe_operand_index_dim++)]);
+ int64 output_window_dim =
+ dim_numbers.output_window_dims(operand_index_dim++);
+ operand_to_output_dim[i] = output_window_dim;
+ operand_index.push_back(index[output_window_dim]);
}
}
@@ -1634,20 +1647,40 @@ StatusOr<llvm::Value*> ElementalIrEmitter::EmitElementalGather(
}
}
- auto add_to_unsafe_operand_index = [&](llvm::Value* index_component,
- int64 dim) {
+ auto add_to_operand_index = [&](llvm::Value* index_component, int64 dim) {
llvm::Value* gather_dim_component_extended =
ir_builder_->CreateSExtOrTrunc(index_component, index_type);
- unsafe_operand_index[dim_numbers.gather_dims_to_operand_dims(dim)] =
- ir_builder_->CreateAdd(
- unsafe_operand_index[dim_numbers.gather_dims_to_operand_dims(dim)],
- gather_dim_component_extended);
+ int64 operand_dim = dim_numbers.gather_dims_to_operand_dims(dim);
+ int64 output_dim = operand_to_output_dim[operand_dim];
+ // If 'output_dim' is -1, it means 'operand_dim' is an elided window dim.
+ // This means we set the iteration index to 0, so for the purpose of the
+ // following calculations we can consider the output dimension size to be 1.
+ int64 output_dim_size =
+ output_dim == -1 ? 1 : output_shape.dimensions(output_dim);
+ int64 largest_valid_start_index =
+ operand_shape.dimensions(operand_dim) - output_dim_size;
+ CHECK_GE(largest_valid_start_index, 0);
+
+ // Clamp the gather index so that the gather region fits in the operand.
+ // gather_dim_component_extended_inbound =
+ // clamp(gather_dim_component_extended, 0, largest_valid_start_index);
+
+ // TODO(b/111078873): This is implementation defined behavior.
+ bool is_signed = ShapeUtil::ElementIsSigned(indices_shape);
+ auto gather_dim_component_extended_inbound = EmitIntegralMin(
+ index.GetConstantWithIndexType(largest_valid_start_index),
+ EmitIntegralMax(index.GetConstantWithIndexType(0),
+ gather_dim_component_extended, is_signed),
+ is_signed);
+
+ operand_index[operand_dim] = ir_builder_->CreateAdd(
+ operand_index[operand_dim], gather_dim_component_extended_inbound);
};
if (indices_shape.dimensions_size() == dim_numbers.index_vector_dim()) {
TF_ASSIGN_OR_RETURN(llvm::Value * gather_dim_component,
indices_generator(gather_index_index));
- add_to_unsafe_operand_index(gather_dim_component, 0);
+ add_to_operand_index(gather_dim_component, 0);
} else {
int64 index_vector_size =
indices_shape.dimensions(dim_numbers.index_vector_dim());
@@ -1656,18 +1689,10 @@ StatusOr<llvm::Value*> ElementalIrEmitter::EmitElementalGather(
index.GetConstantWithIndexType(i);
TF_ASSIGN_OR_RETURN(llvm::Value * gather_dim_component,
indices_generator(gather_index_index));
- add_to_unsafe_operand_index(gather_dim_component, i);
+ add_to_operand_index(gather_dim_component, i);
}
}
-
- IrArray::Index safe_operand_index(index_type);
- for (int64 i = 0, e = unsafe_operand_index.size(); i < e; i++) {
- safe_operand_index.push_back(ir_builder_->CreateURem(
- unsafe_operand_index[i],
- index.GetConstantWithIndexType(operand_shape.dimensions(i))));
- }
-
- return operand_generator(safe_operand_index);
+ return operand_generator(operand_index);
}
StatusOr<llvm::Value*> ElementalIrEmitter::EmitElementalDynamicUpdateSlice(
@@ -1699,19 +1724,20 @@ StatusOr<llvm::Value*> ElementalIrEmitter::EmitElementalDynamicUpdateSlice(
// TODO(b/74360564): This is implementation defined behavior, but is
// currently respected by all implementations. Change this if we ever decide
- // to oficially document different behavior.
+ // to officially document different behavior.
start_index_value =
ir_builder_->CreateSExtOrTrunc(start_index_value, index_type);
- llvm::Value* input_dim_size =
- index_typed_const(input_hlo->shape().dimensions(i));
llvm::Value* update_dim_size =
index_typed_const(update_hlo->shape().dimensions(i));
+ int64 largest_valid_start_index =
+ input_hlo->shape().dimensions(i) - update_hlo->shape().dimensions(i);
+ CHECK_GE(largest_valid_start_index, 0);
- start_index_value =
- EmitIntegralMin(ir_builder_->CreateSub(input_dim_size, update_dim_size),
- EmitIntegralMax(index_typed_const(0), start_index_value,
- /*is_signed=*/true),
- /*is_signed=*/true);
+ bool is_signed = ShapeUtil::ElementIsSigned(start_hlo->shape());
+ start_index_value = EmitIntegralMin(
+ index_typed_const(largest_valid_start_index),
+ EmitIntegralMax(index_typed_const(0), start_index_value, is_signed),
+ is_signed);
start_index_value->setName(
AsStringRef(IrName(hlo, StrCat("start_idx", i))));
diff --git a/tensorflow/compiler/xla/service/elemental_ir_emitter_test.cc b/tensorflow/compiler/xla/service/elemental_ir_emitter_test.cc
index 8980d43033..addb016b04 100644
--- a/tensorflow/compiler/xla/service/elemental_ir_emitter_test.cc
+++ b/tensorflow/compiler/xla/service/elemental_ir_emitter_test.cc
@@ -57,8 +57,8 @@ ENTRY main {
}
)";
- std::unique_ptr<Literal> lhs = Literal::CreateR3<int32>({{{1}, {2}}});
- std::unique_ptr<Literal> rhs = Literal::CreateR3<int32>({{{3}, {4}}});
+ std::unique_ptr<Literal> lhs = LiteralUtil::CreateR3<int32>({{{1}, {2}}});
+ std::unique_ptr<Literal> rhs = LiteralUtil::CreateR3<int32>({{{3}, {4}}});
RunTest(hlo_text, {lhs.get(), rhs.get()});
}
} // namespace
diff --git a/tensorflow/compiler/xla/service/flatten_call_graph_test.cc b/tensorflow/compiler/xla/service/flatten_call_graph_test.cc
index d3854b40de..8f6608241e 100644
--- a/tensorflow/compiler/xla/service/flatten_call_graph_test.cc
+++ b/tensorflow/compiler/xla/service/flatten_call_graph_test.cc
@@ -15,7 +15,7 @@ limitations under the License.
#include "tensorflow/compiler/xla/service/flatten_call_graph.h"
-#include "tensorflow/compiler/xla/literal_util.h"
+#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/compiler/xla/service/call_graph.h"
#include "tensorflow/compiler/xla/service/hlo_computation.h"
#include "tensorflow/compiler/xla/shape_util.h"
@@ -80,7 +80,7 @@ class FlattenCallGraphTest : public HloTestBase {
HloInstruction* param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, kScalarShape, "param0"));
HloInstruction* zero = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(0.0f)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(0.0f)));
builder.AddInstruction(HloInstruction::CreateBinary(
ShapeUtil::MakeShape(PRED, {}), HloOpcode::kGt, param0, zero));
return builder.Build();
@@ -157,7 +157,7 @@ TEST_F(FlattenCallGraphTest, SharedWhileConditionAndBody) {
builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(PRED, {}), "param0"));
HloInstruction* false_constant = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<bool>(false)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(false)));
builder.AddInstruction(
HloInstruction::CreateBinary(ShapeUtil::MakeShape(PRED, {}),
HloOpcode::kEq, param0, false_constant));
@@ -168,7 +168,7 @@ TEST_F(FlattenCallGraphTest, SharedWhileConditionAndBody) {
{
HloComputation::Builder builder(TestName() + ".entry");
HloInstruction* false_constant = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<bool>(false)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(false)));
builder.AddInstruction(HloInstruction::CreateWhile(
ShapeUtil::MakeShape(PRED, {}), cond_computation, cond_computation,
false_constant));
@@ -232,11 +232,11 @@ TEST_F(FlattenCallGraphTest, FlattenCallsInConditional) {
// computation in the true and false branch.
HloComputation::Builder builder(TestName());
auto pred = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<bool>(true)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(true)));
auto constant1 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(56.0f)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(56.0f)));
auto constant2 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(12.0f)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(12.0f)));
builder.AddInstruction(HloInstruction::CreateConditional(
kScalarShape, pred, constant1, sub_computation, constant2,
sub_computation));
diff --git a/tensorflow/compiler/xla/service/gather_expander.cc b/tensorflow/compiler/xla/service/gather_expander.cc
index 7cd2c9c136..e3a42d0d06 100644
--- a/tensorflow/compiler/xla/service/gather_expander.cc
+++ b/tensorflow/compiler/xla/service/gather_expander.cc
@@ -15,6 +15,7 @@ limitations under the License.
#include <utility>
+#include "tensorflow/compiler/xla/literal_util.h"
#include "tensorflow/compiler/xla/service/gather_expander.h"
#include "tensorflow/compiler/xla/service/hlo_creation_utils.h"
#include "tensorflow/compiler/xla/service/hlo_instruction.h"
@@ -113,7 +114,7 @@ static StatusOr<HloInstruction*> ExpandIndexVectorIntoOperandSpace(
const Shape& index_shape = index_vector->shape();
HloInstruction* zero =
computation->AddInstruction(HloInstruction::CreateConstant(
- Literal::CreateFromDimensions(index_shape.element_type(), {1})));
+ LiteralUtil::CreateFromDimensions(index_shape.element_type(), {1})));
// We extract out individual components from the smaller index and concatenate
// them (interspersing zeros as needed) into the larger index.
diff --git a/tensorflow/compiler/xla/service/generic_transfer_manager.cc b/tensorflow/compiler/xla/service/generic_transfer_manager.cc
index 85e28a0dfe..e314a469f0 100644
--- a/tensorflow/compiler/xla/service/generic_transfer_manager.cc
+++ b/tensorflow/compiler/xla/service/generic_transfer_manager.cc
@@ -20,7 +20,7 @@ limitations under the License.
#include <vector>
#include "tensorflow/compiler/xla/layout_util.h"
-#include "tensorflow/compiler/xla/literal_util.h"
+#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/compiler/xla/service/interpreter/platform_id.h"
#include "tensorflow/compiler/xla/shape_util.h"
#include "tensorflow/compiler/xla/status_macros.h"
@@ -158,16 +158,10 @@ Status GenericTransferManager::TransferLiteralToInfeed(
return Unimplemented("Generic transfer to Infeed");
}
-Status GenericTransferManager::TransferBufferToInfeed(
- se::StreamExecutor* executor, int64 size, const void* source) {
- return Unimplemented("Generic transfer to Infeed");
-}
-
Status GenericTransferManager::TransferLiteralFromOutfeed(
se::StreamExecutor* executor, const Shape& literal_shape,
Literal* literal) {
- return Unimplemented(
- "Outfeed is not supported on this platform (b/30467474)");
+ return Unimplemented("Generic transfer from Outfeed");
}
Status GenericTransferManager::ResetDevices(
diff --git a/tensorflow/compiler/xla/service/generic_transfer_manager.h b/tensorflow/compiler/xla/service/generic_transfer_manager.h
index d216fe7d29..3cd002c1bf 100644
--- a/tensorflow/compiler/xla/service/generic_transfer_manager.h
+++ b/tensorflow/compiler/xla/service/generic_transfer_manager.h
@@ -61,9 +61,6 @@ class GenericTransferManager : public TransferManager {
int64 GetByteSizeRequirement(const Shape& shape) const override;
protected:
- Status TransferBufferToInfeed(se::StreamExecutor* executor, int64 size,
- const void* source) override;
-
Status WriteSingleTupleIndexTable(
se::Stream* stream,
tensorflow::gtl::ArraySlice<se::DeviceMemoryBase> elements,
diff --git a/tensorflow/compiler/xla/service/gpu/BUILD b/tensorflow/compiler/xla/service/gpu/BUILD
index af6d298589..59172e53d3 100644
--- a/tensorflow/compiler/xla/service/gpu/BUILD
+++ b/tensorflow/compiler/xla/service/gpu/BUILD
@@ -150,7 +150,7 @@ cc_library(
":parallel_loop_emitter",
":partition_assignment",
":while_transformer",
- "//tensorflow/compiler/xla:literal_util",
+ "//tensorflow/compiler/xla:literal",
"//tensorflow/compiler/xla:shape_util",
"//tensorflow/compiler/xla:status_macros",
"//tensorflow/compiler/xla:statusor",
@@ -165,6 +165,7 @@ cc_library(
"//tensorflow/compiler/xla/service/llvm_ir:fused_ir_emitter",
"//tensorflow/compiler/xla/service/llvm_ir:ir_array",
"//tensorflow/compiler/xla/service/llvm_ir:kernel_support_library",
+ "//tensorflow/compiler/xla/service/llvm_ir:kernel_tiling",
"//tensorflow/compiler/xla/service/llvm_ir:llvm_loop",
"//tensorflow/compiler/xla/service/llvm_ir:llvm_util",
"//tensorflow/compiler/xla/service/llvm_ir:loop_emitter",
@@ -199,7 +200,7 @@ cc_library(
srcs = ["elemental_ir_emitter.cc"],
hdrs = ["elemental_ir_emitter.h"],
deps = [
- "//tensorflow/compiler/xla:literal_util",
+ "//tensorflow/compiler/xla:literal",
"//tensorflow/compiler/xla:shape_util",
"//tensorflow/compiler/xla:status_macros",
"//tensorflow/compiler/xla:statusor",
@@ -246,6 +247,7 @@ cc_library(
"//tensorflow/compiler/xla/service:hlo_execution_profile",
"//tensorflow/compiler/xla/service:pool",
"//tensorflow/core:lib",
+ "//tensorflow/core:ptr_util",
"//tensorflow/core:stream_executor_no_cuda",
],
)
@@ -264,6 +266,7 @@ cc_library(
"infeed_thunk.cc",
"kernel_thunk.cc",
"memset_thunk.cc",
+ "outfeed_thunk.cc",
"sequential_thunk.cc",
"thunk_schedule.cc",
"tuple_thunk.cc",
@@ -281,6 +284,7 @@ cc_library(
"infeed_thunk.h",
"kernel_thunk.h",
"memset_thunk.h",
+ "outfeed_thunk.h",
"sequential_thunk.h",
"thunk.h",
"thunk_schedule.h",
@@ -288,15 +292,16 @@ cc_library(
"while_thunk.h",
],
deps = [
- ":backend_configs",
":buffer_allocations",
":cudnn_convolution_runner",
":hlo_execution_profiler",
":infeed_manager",
":ir_emission_utils",
+ ":outfeed_manager",
":partition_assignment",
":stream_assignment",
"//tensorflow/compiler/xla:array2d",
+ "//tensorflow/compiler/xla:literal",
"//tensorflow/compiler/xla:shape_tree",
"//tensorflow/compiler/xla:shape_util",
"//tensorflow/compiler/xla:status",
@@ -350,6 +355,7 @@ cc_library(
":cudnn_convolution_runner",
":gpu_executable",
":ir_emission_utils",
+ "//tensorflow/compiler/xla:literal_util",
"//tensorflow/compiler/xla/service:device_memory_allocator",
"//tensorflow/compiler/xla/service:hlo",
"//tensorflow/compiler/xla/service:hlo_pass",
@@ -381,7 +387,7 @@ cc_library(
hdrs = ["cudnn_convolution_rewriter.h"],
deps = [
":ir_emission_utils",
- "//tensorflow/compiler/xla:literal_util",
+ "//tensorflow/compiler/xla:literal",
"//tensorflow/compiler/xla:util",
"//tensorflow/compiler/xla:window_util",
"//tensorflow/compiler/xla:xla_data_proto",
@@ -442,6 +448,7 @@ cc_library(
srcs = ["multi_output_fusion.cc"],
hdrs = ["multi_output_fusion.h"],
deps = [
+ ":ir_emission_utils",
"//tensorflow/compiler/xla:shape_util",
"//tensorflow/compiler/xla/service:hlo",
"//tensorflow/compiler/xla/service:multi_output_fusion",
@@ -515,6 +522,7 @@ cc_library(
hdrs = ["pad_insertion.h"],
deps = [
":ir_emission_utils",
+ "//tensorflow/compiler/xla:literal",
"//tensorflow/compiler/xla:literal_util",
"//tensorflow/compiler/xla:util",
"//tensorflow/compiler/xla:window_util",
@@ -531,7 +539,10 @@ cc_library(
hdrs = ["gpu_transfer_manager.h"],
deps = [
":gpu_compiler",
+ ":outfeed_manager",
+ "//tensorflow/compiler/xla:literal",
"//tensorflow/compiler/xla:literal_util",
+ "//tensorflow/compiler/xla:shape_tree",
"//tensorflow/compiler/xla:shape_util",
"//tensorflow/compiler/xla:status_macros",
"//tensorflow/compiler/xla:statusor",
@@ -622,6 +633,7 @@ cc_library(
hdrs = ["cudnn_batchnorm_rewriter.h"],
deps = [
":ir_emission_utils",
+ "//tensorflow/compiler/xla:literal",
"//tensorflow/compiler/xla:literal_util",
"//tensorflow/compiler/xla/service:hlo",
"//tensorflow/compiler/xla/service:hlo_pass",
@@ -629,18 +641,39 @@ cc_library(
)
cc_library(
+ name = "xfeed_queue",
+ hdrs = ["xfeed_queue.h"],
+ deps = ["//tensorflow/core:lib"],
+)
+
+cc_library(
name = "infeed_manager",
srcs = ["infeed_manager.cc"],
hdrs = ["infeed_manager.h"],
deps = [
+ ":xfeed_queue",
+ "//tensorflow/compiler/xla:shape_tree",
"//tensorflow/compiler/xla:types",
"//tensorflow/compiler/xla:util",
- "//tensorflow/core:lib",
"//tensorflow/core:stream_executor_no_cuda",
],
)
cc_library(
+ name = "outfeed_manager",
+ srcs = ["outfeed_manager.cc"],
+ hdrs = ["outfeed_manager.h"],
+ deps = [
+ ":xfeed_queue",
+ "//tensorflow/compiler/xla:literal",
+ "//tensorflow/compiler/xla:shape_tree",
+ "//tensorflow/compiler/xla:shape_util",
+ "//tensorflow/compiler/xla:util",
+ "//tensorflow/core:lib",
+ ],
+)
+
+cc_library(
name = "gpu_layout_assignment",
srcs = ["gpu_layout_assignment.cc"],
hdrs = ["gpu_layout_assignment.h"],
@@ -714,7 +747,7 @@ cc_library(
srcs = ["while_transformer.cc"],
hdrs = ["while_transformer.h"],
deps = [
- "//tensorflow/compiler/xla:literal_util",
+ "//tensorflow/compiler/xla:literal",
"//tensorflow/compiler/xla:shape_util",
"//tensorflow/compiler/xla:status_macros",
"//tensorflow/compiler/xla:statusor",
@@ -769,6 +802,7 @@ cc_library(
hdrs = ["stream_executor_util.h"],
deps = [
"//tensorflow/compiler/xla:shape_util",
+ "//tensorflow/compiler/xla:statusor",
"//tensorflow/compiler/xla:xla_data_proto",
"//tensorflow/core:stream_executor_no_cuda",
],
diff --git a/tensorflow/compiler/xla/service/gpu/conditional_thunk.cc b/tensorflow/compiler/xla/service/gpu/conditional_thunk.cc
index 77a48965e0..5780e0af40 100644
--- a/tensorflow/compiler/xla/service/gpu/conditional_thunk.cc
+++ b/tensorflow/compiler/xla/service/gpu/conditional_thunk.cc
@@ -16,6 +16,7 @@ limitations under the License.
#include "tensorflow/compiler/xla/service/gpu/conditional_thunk.h"
#include "tensorflow/compiler/xla/ptr_util.h"
+#include "tensorflow/compiler/xla/service/gpu/hlo_execution_profiler.h"
#include "tensorflow/compiler/xla/util.h"
#include "tensorflow/core/lib/core/errors.h"
@@ -32,8 +33,11 @@ ConditionalThunk::ConditionalThunk(
predicate_buffer_index_(predicate_buffer_index),
true_operand_buffer_index_(true_operand_buffer_index),
false_operand_buffer_index_(false_operand_buffer_index),
- true_thunk_(std::move(true_thunk_sequence), hlo),
- false_thunk_(std::move(false_thunk_sequence), hlo) {}
+ // Pass nullptr as the HloInstruction* to the true_thunk_ and false_thunk_
+ // constructors because these SequentialThunks are logically "part of"
+ // this ConditionalThunk, and shouldn't be profiled separately from it.
+ true_thunk_(std::move(true_thunk_sequence), nullptr),
+ false_thunk_(std::move(false_thunk_sequence), nullptr) {}
Status ConditionalThunk::Initialize(const GpuExecutable& executable,
se::StreamExecutor* executor) {
@@ -43,7 +47,9 @@ Status ConditionalThunk::Initialize(const GpuExecutable& executable,
}
Status ConditionalThunk::ExecuteOnStream(
- const BufferAllocations& buffer_allocations, se::Stream* stream) {
+ const BufferAllocations& buffer_allocations, se::Stream* stream,
+ HloExecutionProfiler* profiler) {
+ auto op_profiler = profiler->MakeScopedInstructionProfiler(hlo_instruction());
// Copy the predicate value from device.
bool predicate;
se::DeviceMemoryBase predicate_address =
@@ -59,10 +65,15 @@ Status ConditionalThunk::ExecuteOnStream(
// Execute the true or the false computation depending on the value of the
// predicate.
if (predicate) {
- TF_RETURN_IF_ERROR(true_thunk_.ExecuteOnStream(buffer_allocations, stream));
+ profiler->StartHloComputation();
+ TF_RETURN_IF_ERROR(
+ true_thunk_.ExecuteOnStream(buffer_allocations, stream, profiler));
+ profiler->FinishHloComputation(hlo_instruction()->true_computation());
} else {
+ profiler->StartHloComputation();
TF_RETURN_IF_ERROR(
- false_thunk_.ExecuteOnStream(buffer_allocations, stream));
+ false_thunk_.ExecuteOnStream(buffer_allocations, stream, profiler));
+ profiler->FinishHloComputation(hlo_instruction()->false_computation());
}
return Status::OK();
diff --git a/tensorflow/compiler/xla/service/gpu/conditional_thunk.h b/tensorflow/compiler/xla/service/gpu/conditional_thunk.h
index ee03865d17..aef24342c9 100644
--- a/tensorflow/compiler/xla/service/gpu/conditional_thunk.h
+++ b/tensorflow/compiler/xla/service/gpu/conditional_thunk.h
@@ -17,6 +17,7 @@ limitations under the License.
#define TENSORFLOW_COMPILER_XLA_SERVICE_GPU_CONDITIONAL_THUNK_H_
#include "tensorflow/compiler/xla/service/gpu/buffer_allocations.h"
+#include "tensorflow/compiler/xla/service/gpu/hlo_execution_profiler.h"
#include "tensorflow/compiler/xla/service/gpu/sequential_thunk.h"
#include "tensorflow/compiler/xla/service/gpu/thunk.h"
#include "tensorflow/compiler/xla/service/hlo_instruction.h"
@@ -50,7 +51,8 @@ class ConditionalThunk : public Thunk {
Status Initialize(const GpuExecutable& executable,
se::StreamExecutor* executor) override;
Status ExecuteOnStream(const BufferAllocations& buffer_allocations,
- se::Stream* stream) override;
+ se::Stream* stream,
+ HloExecutionProfiler* profiler) override;
private:
BufferAllocation::Slice predicate_buffer_index_;
diff --git a/tensorflow/compiler/xla/service/gpu/convolution_thunk.cc b/tensorflow/compiler/xla/service/gpu/convolution_thunk.cc
index f088112412..7833a4077e 100644
--- a/tensorflow/compiler/xla/service/gpu/convolution_thunk.cc
+++ b/tensorflow/compiler/xla/service/gpu/convolution_thunk.cc
@@ -18,6 +18,7 @@ limitations under the License.
#include <string>
#include "tensorflow/compiler/xla/service/gpu/cudnn_convolution_runner.h"
+#include "tensorflow/compiler/xla/service/gpu/hlo_execution_profiler.h"
#include "tensorflow/compiler/xla/types.h"
#include "tensorflow/compiler/xla/util.h"
#include "tensorflow/core/lib/strings/strcat.h"
@@ -55,7 +56,8 @@ ConvolutionThunk::ConvolutionThunk(
tensor_ops_enabled_(tensor_ops_enabled) {}
Status ConvolutionThunk::ExecuteOnStream(
- const BufferAllocations& buffer_allocations, se::Stream* stream) {
+ const BufferAllocations& buffer_allocations, se::Stream* stream,
+ HloExecutionProfiler* profiler) {
se::DeviceMemoryBase input_data =
buffer_allocations.GetDeviceAddress(input_buffer_);
se::DeviceMemoryBase filter_data =
@@ -68,6 +70,7 @@ Status ConvolutionThunk::ExecuteOnStream(
se::dnn::AlgorithmConfig algorithm_config(
se::dnn::AlgorithmDesc(algorithm_, tensor_ops_enabled_));
+ auto op_profiler = profiler->MakeScopedInstructionProfiler(hlo_instruction());
TF_RETURN_IF_ERROR(RunCudnnConvolution(
convolution_kind_, input_shape_, filter_shape_, output_shape_, input_data,
filter_data, output_data, scratch, window_, dim_nums_, algorithm_config,
diff --git a/tensorflow/compiler/xla/service/gpu/convolution_thunk.h b/tensorflow/compiler/xla/service/gpu/convolution_thunk.h
index 6d845025b1..d76ca6698d 100644
--- a/tensorflow/compiler/xla/service/gpu/convolution_thunk.h
+++ b/tensorflow/compiler/xla/service/gpu/convolution_thunk.h
@@ -20,6 +20,7 @@ limitations under the License.
#include "tensorflow/compiler/xla/service/gpu/buffer_allocations.h"
#include "tensorflow/compiler/xla/service/gpu/cudnn_convolution_runner.h"
#include "tensorflow/compiler/xla/service/gpu/gpu_executable.h"
+#include "tensorflow/compiler/xla/service/gpu/hlo_execution_profiler.h"
#include "tensorflow/compiler/xla/service/gpu/thunk.h"
#include "tensorflow/compiler/xla/service/hlo_instruction.h"
#include "tensorflow/compiler/xla/types.h"
@@ -66,7 +67,8 @@ class ConvolutionThunk : public Thunk {
// Does the convolution for the thunk on "stream".
Status ExecuteOnStream(const BufferAllocations& buffer_allocations,
- se::Stream* stream) override;
+ se::Stream* stream,
+ HloExecutionProfiler* profiler) override;
private:
class ScratchAllocator;
diff --git a/tensorflow/compiler/xla/service/gpu/copy_thunk.cc b/tensorflow/compiler/xla/service/gpu/copy_thunk.cc
index ee38c0318a..92e03f94c1 100644
--- a/tensorflow/compiler/xla/service/gpu/copy_thunk.cc
+++ b/tensorflow/compiler/xla/service/gpu/copy_thunk.cc
@@ -15,6 +15,7 @@ limitations under the License.
#include "tensorflow/compiler/xla/service/gpu/copy_thunk.h"
+#include "tensorflow/compiler/xla/service/gpu/hlo_execution_profiler.h"
#include "tensorflow/core/platform/stream_executor_no_cuda.h"
namespace xla {
@@ -30,9 +31,11 @@ HostToDeviceCopyThunk::HostToDeviceCopyThunk(
mem_size_(mem_size) {}
Status HostToDeviceCopyThunk::ExecuteOnStream(
- const BufferAllocations& buffer_allocations, se::Stream* stream) {
+ const BufferAllocations& buffer_allocations, se::Stream* stream,
+ HloExecutionProfiler* profiler) {
se::DeviceMemoryBase destination_data =
buffer_allocations.GetDeviceAddress(destination_buffer_);
+ auto op_profiler = profiler->MakeScopedInstructionProfiler(hlo_instruction());
stream->ThenMemcpy(&destination_data, source_address_, mem_size_);
return Status::OK();
}
@@ -47,11 +50,13 @@ DeviceToDeviceCopyThunk::DeviceToDeviceCopyThunk(
mem_size_(mem_size) {}
Status DeviceToDeviceCopyThunk::ExecuteOnStream(
- const BufferAllocations& buffer_allocations, se::Stream* stream) {
+ const BufferAllocations& buffer_allocations, se::Stream* stream,
+ HloExecutionProfiler* profiler) {
se::DeviceMemoryBase destination_data =
buffer_allocations.GetDeviceAddress(destination_buffer_);
se::DeviceMemoryBase source_data =
buffer_allocations.GetDeviceAddress(source_buffer_);
+ auto op_profiler = profiler->MakeScopedInstructionProfiler(hlo_instruction());
stream->ThenMemcpy(&destination_data, source_data, mem_size_);
return Status::OK();
}
diff --git a/tensorflow/compiler/xla/service/gpu/copy_thunk.h b/tensorflow/compiler/xla/service/gpu/copy_thunk.h
index 8b128386f6..91564b520a 100644
--- a/tensorflow/compiler/xla/service/gpu/copy_thunk.h
+++ b/tensorflow/compiler/xla/service/gpu/copy_thunk.h
@@ -18,6 +18,7 @@ limitations under the License.
#include "tensorflow/compiler/xla/service/buffer_assignment.h"
#include "tensorflow/compiler/xla/service/gpu/buffer_allocations.h"
+#include "tensorflow/compiler/xla/service/gpu/hlo_execution_profiler.h"
#include "tensorflow/compiler/xla/service/gpu/thunk.h"
#include "tensorflow/compiler/xla/service/hlo_instruction.h"
#include "tensorflow/core/platform/stream_executor_no_cuda.h"
@@ -40,7 +41,8 @@ class HostToDeviceCopyThunk : public Thunk {
HostToDeviceCopyThunk& operator=(const HostToDeviceCopyThunk&) = delete;
Status ExecuteOnStream(const BufferAllocations& buffer_allocations,
- se::Stream* stream) override;
+ se::Stream* stream,
+ HloExecutionProfiler* profiler) override;
private:
const void* source_address_;
@@ -63,7 +65,8 @@ class DeviceToDeviceCopyThunk : public Thunk {
DeviceToDeviceCopyThunk& operator=(const DeviceToDeviceCopyThunk&) = delete;
Status ExecuteOnStream(const BufferAllocations& buffer_allocations,
- se::Stream* stream) override;
+ se::Stream* stream,
+ HloExecutionProfiler* profiler) override;
private:
const BufferAllocation::Slice source_buffer_;
diff --git a/tensorflow/compiler/xla/service/gpu/cudnn_batchnorm_rewriter.cc b/tensorflow/compiler/xla/service/gpu/cudnn_batchnorm_rewriter.cc
index db6924c742..6028950652 100644
--- a/tensorflow/compiler/xla/service/gpu/cudnn_batchnorm_rewriter.cc
+++ b/tensorflow/compiler/xla/service/gpu/cudnn_batchnorm_rewriter.cc
@@ -14,6 +14,7 @@ limitations under the License.
==============================================================================*/
#include "tensorflow/compiler/xla/service/gpu/cudnn_batchnorm_rewriter.h"
+#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/compiler/xla/literal_util.h"
#include "tensorflow/compiler/xla/service/gpu/ir_emission_utils.h"
@@ -66,11 +67,12 @@ Status Visitor::HandleBatchNormInference(HloInstruction* batch_norm) {
return Status::OK();
}
- HloInstruction* epsilon = computation_->AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0(batch_norm->epsilon())));
+ HloInstruction* epsilon =
+ computation_->AddInstruction(HloInstruction::CreateConstant(
+ LiteralUtil::CreateR0(batch_norm->epsilon())));
HloInstruction* feature_index =
computation_->AddInstruction(HloInstruction::CreateConstant(
- Literal::CreateR0(batch_norm->feature_index())));
+ LiteralUtil::CreateR0(batch_norm->feature_index())));
std::vector<HloInstruction*> operands(batch_norm->operands().begin(),
batch_norm->operands().end());
@@ -101,11 +103,12 @@ Status Visitor::HandleBatchNormTraining(HloInstruction* batch_norm) {
return Status::OK();
}
- HloInstruction* epsilon = computation_->AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0(batch_norm->epsilon())));
+ HloInstruction* epsilon =
+ computation_->AddInstruction(HloInstruction::CreateConstant(
+ LiteralUtil::CreateR0(batch_norm->epsilon())));
HloInstruction* feature_index =
computation_->AddInstruction(HloInstruction::CreateConstant(
- Literal::CreateR0(batch_norm->feature_index())));
+ LiteralUtil::CreateR0(batch_norm->feature_index())));
std::vector<HloInstruction*> operands(batch_norm->operands().begin(),
batch_norm->operands().end());
@@ -126,12 +129,17 @@ Status Visitor::HandleBatchNormTraining(HloInstruction* batch_norm) {
HloInstruction* variance_plus_epsilon =
computation_->AddInstruction(HloInstruction::CreateBinary(
inverse_stddev->shape(), HloOpcode::kPower, inverse_stddev,
- computation_->AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(-2)))));
+ computation_->AddInstruction(HloInstruction::CreateBroadcast(
+ inverse_stddev->shape(),
+ computation_->AddInstruction(HloInstruction::CreateConstant(
+ LiteralUtil::CreateR0<float>(-2))),
+ {}))));
HloInstruction* variance =
computation_->AddInstruction(HloInstruction::CreateBinary(
variance_plus_epsilon->shape(), HloOpcode::kSubtract,
- variance_plus_epsilon, epsilon));
+ variance_plus_epsilon,
+ computation_->AddInstruction(HloInstruction::CreateBroadcast(
+ variance_plus_epsilon->shape(), epsilon, {}))));
// Repackage the results.
std::unique_ptr<HloInstruction> new_tuple = HloInstruction::CreateTuple({
@@ -164,23 +172,29 @@ Status Visitor::HandleBatchNormGrad(HloInstruction* batch_norm) {
return Status::OK();
}
- HloInstruction* epsilon = computation_->AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0(batch_norm->epsilon())));
+ HloInstruction* epsilon =
+ computation_->AddInstruction(HloInstruction::CreateConstant(
+ LiteralUtil::CreateR0(batch_norm->epsilon())));
HloInstruction* feature_index =
computation_->AddInstruction(HloInstruction::CreateConstant(
- Literal::CreateR0(batch_norm->feature_index())));
+ LiteralUtil::CreateR0(batch_norm->feature_index())));
// The cudnn libcall expects its input to be rsqrt(variance + epsilon), but
// the batchnorm HLO takes plain variance as input. Fix it up.
HloInstruction* var_plus_epsilon =
computation_->AddInstruction(HloInstruction::CreateBinary(
batch_norm->operand(3)->shape(), HloOpcode::kAdd,
- batch_norm->mutable_operand(3), epsilon));
+ batch_norm->mutable_operand(3),
+ computation_->AddInstruction(HloInstruction::CreateBroadcast(
+ batch_norm->operand(3)->shape(), epsilon, {}))));
HloInstruction* inverse_stddev =
computation_->AddInstruction(HloInstruction::CreateBinary(
var_plus_epsilon->shape(), HloOpcode::kPower, var_plus_epsilon,
- computation_->AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(-.5)))));
+ computation_->AddInstruction(HloInstruction::CreateBroadcast(
+ var_plus_epsilon->shape(),
+ computation_->AddInstruction(HloInstruction::CreateConstant(
+ LiteralUtil::CreateR0<float>(-.5))),
+ {}))));
std::vector<HloInstruction*> operands(batch_norm->operands().begin(),
batch_norm->operands().end());
diff --git a/tensorflow/compiler/xla/service/gpu/cudnn_batchnorm_thunk.cc b/tensorflow/compiler/xla/service/gpu/cudnn_batchnorm_thunk.cc
index 68099fd638..7b172812c3 100644
--- a/tensorflow/compiler/xla/service/gpu/cudnn_batchnorm_thunk.cc
+++ b/tensorflow/compiler/xla/service/gpu/cudnn_batchnorm_thunk.cc
@@ -17,6 +17,7 @@ limitations under the License.
#include <string>
+#include "tensorflow/compiler/xla/service/gpu/hlo_execution_profiler.h"
#include "tensorflow/compiler/xla/service/gpu/ir_emission_utils.h"
#include "tensorflow/compiler/xla/types.h"
#include "tensorflow/compiler/xla/util.h"
@@ -99,13 +100,15 @@ CudnnBatchNormForwardInferenceThunk::CudnnBatchNormForwardInferenceThunk(
}
Status CudnnBatchNormForwardInferenceThunk::ExecuteOnStream(
- const BufferAllocations& buffer_allocations, se::Stream* stream) {
+ const BufferAllocations& buffer_allocations, se::Stream* stream,
+ HloExecutionProfiler* profiler) {
dnn::BatchDescriptor operand_desc;
dnn::BatchDescriptor scale_offset_desc;
std::tie(operand_desc, scale_offset_desc) =
MakeDescriptors(hlo_instruction()->shape(), feature_index_);
se::DeviceMemory<float> output(buffer_allocations.GetDeviceAddress(output_));
+ auto op_profiler = profiler->MakeScopedInstructionProfiler(hlo_instruction());
stream->ThenBatchNormalizationForward(
se::DeviceMemory<float>(buffer_allocations.GetDeviceAddress(operand_)),
se::DeviceMemory<float>(buffer_allocations.GetDeviceAddress(scale_)),
@@ -123,6 +126,7 @@ Status CudnnBatchNormForwardInferenceThunk::ExecuteOnStream(
/*is_training=*/false, //
/*var_to_inv_var=*/nullptr, //
/*inv_var_to_var=*/nullptr);
+
if (!stream->ok()) {
return InternalError("BatchNormalizationForward call failed.");
}
@@ -158,7 +162,8 @@ CudnnBatchNormForwardTrainingThunk::CudnnBatchNormForwardTrainingThunk(
}
Status CudnnBatchNormForwardTrainingThunk::ExecuteOnStream(
- const BufferAllocations& buffer_allocations, se::Stream* stream) {
+ const BufferAllocations& buffer_allocations, se::Stream* stream,
+ HloExecutionProfiler* profiler) {
dnn::BatchDescriptor operand_desc;
dnn::BatchDescriptor scale_offset_desc;
// The BatchNormTraining HLO outputs a tuple of three elements: output data,
@@ -175,6 +180,7 @@ Status CudnnBatchNormForwardTrainingThunk::ExecuteOnStream(
buffer_allocations.GetDeviceAddress(output_inv_stddev_));
se::DeviceMemory<float> null_device_ptr(nullptr);
+ auto op_profiler = profiler->MakeScopedInstructionProfiler(hlo_instruction());
stream->ThenBatchNormalizationForward(
se::DeviceMemory<float>(buffer_allocations.GetDeviceAddress(operand_)),
se::DeviceMemory<float>(buffer_allocations.GetDeviceAddress(scale_)),
@@ -240,7 +246,8 @@ CudnnBatchNormBackwardThunk::CudnnBatchNormBackwardThunk(
}
Status CudnnBatchNormBackwardThunk::ExecuteOnStream(
- const BufferAllocations& buffer_allocations, se::Stream* stream) {
+ const BufferAllocations& buffer_allocations, se::Stream* stream,
+ HloExecutionProfiler* profiler) {
dnn::BatchDescriptor operand_desc;
dnn::BatchDescriptor scale_offset_desc;
@@ -257,6 +264,7 @@ Status CudnnBatchNormBackwardThunk::ExecuteOnStream(
se::DeviceMemory<float> output_grad_offset(
buffer_allocations.GetDeviceAddress(output_grad_offset_));
+ auto op_profiler = profiler->MakeScopedInstructionProfiler(hlo_instruction());
stream->ThenBatchNormalizationBackward(
se::DeviceMemory<float>(
buffer_allocations.GetDeviceAddress(grad_output_)),
diff --git a/tensorflow/compiler/xla/service/gpu/cudnn_batchnorm_thunk.h b/tensorflow/compiler/xla/service/gpu/cudnn_batchnorm_thunk.h
index 874f85a863..d2143b3952 100644
--- a/tensorflow/compiler/xla/service/gpu/cudnn_batchnorm_thunk.h
+++ b/tensorflow/compiler/xla/service/gpu/cudnn_batchnorm_thunk.h
@@ -18,6 +18,7 @@ limitations under the License.
#include "tensorflow/compiler/xla/service/buffer_assignment.h"
#include "tensorflow/compiler/xla/service/gpu/buffer_allocations.h"
+#include "tensorflow/compiler/xla/service/gpu/hlo_execution_profiler.h"
#include "tensorflow/compiler/xla/service/gpu/thunk.h"
#include "tensorflow/compiler/xla/service/hlo_instruction.h"
#include "tensorflow/compiler/xla/types.h"
@@ -60,7 +61,8 @@ class CudnnBatchNormForwardInferenceThunk : public Thunk {
const CudnnBatchNormForwardInferenceThunk&) = delete;
Status ExecuteOnStream(const BufferAllocations& buffer_allocations,
- se::Stream* stream) override;
+ se::Stream* stream,
+ HloExecutionProfiler* profiler) override;
private:
BufferAllocation::Slice operand_;
@@ -90,7 +92,8 @@ class CudnnBatchNormForwardTrainingThunk : public Thunk {
const CudnnBatchNormForwardTrainingThunk&) = delete;
Status ExecuteOnStream(const BufferAllocations& buffer_allocations,
- se::Stream* stream) override;
+ se::Stream* stream,
+ HloExecutionProfiler* profiler) override;
private:
BufferAllocation::Slice operand_;
@@ -123,7 +126,8 @@ class CudnnBatchNormBackwardThunk : public Thunk {
delete;
Status ExecuteOnStream(const BufferAllocations& buffer_allocations,
- se::Stream* stream) override;
+ se::Stream* stream,
+ HloExecutionProfiler* profiler) override;
private:
BufferAllocation::Slice operand_;
diff --git a/tensorflow/compiler/xla/service/gpu/cudnn_convolution_algorithm_picker.cc b/tensorflow/compiler/xla/service/gpu/cudnn_convolution_algorithm_picker.cc
index 3dc98c4c93..5a63e65208 100644
--- a/tensorflow/compiler/xla/service/gpu/cudnn_convolution_algorithm_picker.cc
+++ b/tensorflow/compiler/xla/service/gpu/cudnn_convolution_algorithm_picker.cc
@@ -14,6 +14,7 @@ limitations under the License.
==============================================================================*/
#include "tensorflow/compiler/xla/service/gpu/cudnn_convolution_algorithm_picker.h"
+#include "tensorflow/compiler/xla/literal_util.h"
#include "tensorflow/compiler/xla/service/gpu/backend_configs.pb.h"
#include "tensorflow/compiler/xla/service/gpu/convolution_thunk.h"
#include "tensorflow/compiler/xla/service/gpu/ir_emission_utils.h"
@@ -80,8 +81,7 @@ bool ShouldIncludeWinogradNonfusedAlgo(const Shape& input_shape,
const ConvolutionDimensionNumbers& dnums,
se::StreamExecutor* stream_exec) {
// Skip this check for cudnn7 and newer.
- auto version =
- stream_exec->AsDnn()->GetVersion();
+ auto version = stream_exec->AsDnn()->GetVersion();
if (version.ok() && version.ValueOrDie().major_version() >= 7) {
return true;
}
@@ -338,8 +338,8 @@ StatusOr<bool> CudnnConvolutionAlgorithmPicker::RunOnInstruction(
computation->AddInstruction(HloInstruction::CreateTuple(
{computation->AddInstruction(HloInstruction::CreateGetTupleElement(
new_call_shape.tuple_shapes(0), new_call, 0)),
- computation->AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR1<uint8>({})))}));
+ computation->AddInstruction(HloInstruction::CreateConstant(
+ LiteralUtil::CreateR1<uint8>({})))}));
TF_RETURN_IF_ERROR(instr->parent()->ReplaceInstruction(instr, new_tuple));
return true;
diff --git a/tensorflow/compiler/xla/service/gpu/cudnn_convolution_rewriter.cc b/tensorflow/compiler/xla/service/gpu/cudnn_convolution_rewriter.cc
index f9dccd287d..905b5ee876 100644
--- a/tensorflow/compiler/xla/service/gpu/cudnn_convolution_rewriter.cc
+++ b/tensorflow/compiler/xla/service/gpu/cudnn_convolution_rewriter.cc
@@ -18,7 +18,7 @@ limitations under the License.
#include <numeric>
#include <vector>
-#include "tensorflow/compiler/xla/literal_util.h"
+#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/compiler/xla/service/dfs_hlo_visitor_with_default.h"
#include "tensorflow/compiler/xla/service/gpu/ir_emission_utils.h"
#include "tensorflow/compiler/xla/service/hlo_computation.h"
diff --git a/tensorflow/compiler/xla/service/gpu/elemental_ir_emitter.cc b/tensorflow/compiler/xla/service/gpu/elemental_ir_emitter.cc
index 27d2c3e491..e594cec2f8 100644
--- a/tensorflow/compiler/xla/service/gpu/elemental_ir_emitter.cc
+++ b/tensorflow/compiler/xla/service/gpu/elemental_ir_emitter.cc
@@ -29,7 +29,7 @@ limitations under the License.
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/Type.h"
-#include "tensorflow/compiler/xla/literal_util.h"
+#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/compiler/xla/primitive_util.h"
#include "tensorflow/compiler/xla/service/hlo_opcode.h"
#include "tensorflow/compiler/xla/service/llvm_ir/ir_array.h"
diff --git a/tensorflow/compiler/xla/service/gpu/fft_thunk.cc b/tensorflow/compiler/xla/service/gpu/fft_thunk.cc
index e14ee6918b..0cdddf8bcf 100644
--- a/tensorflow/compiler/xla/service/gpu/fft_thunk.cc
+++ b/tensorflow/compiler/xla/service/gpu/fft_thunk.cc
@@ -17,6 +17,7 @@ limitations under the License.
#include <string>
+#include "tensorflow/compiler/xla/service/gpu/hlo_execution_profiler.h"
#include "tensorflow/compiler/xla/types.h"
#include "tensorflow/compiler/xla/util.h"
#include "tensorflow/core/lib/strings/strcat.h"
@@ -107,7 +108,8 @@ FftThunk::FftThunk(FftType fft_type,
output_shape_(output_shape) {}
Status FftThunk::ExecuteOnStream(const BufferAllocations& buffer_allocations,
- se::Stream* stream) {
+ se::Stream* stream,
+ HloExecutionProfiler* profiler) {
VLOG(3) << "FFT type: " << FftTypeToString(fft_type_);
VLOG(3) << "Input shape: " << ShapeUtil::HumanStringWithLayout(input_shape_);
VLOG(3) << "Output shape: "
@@ -116,6 +118,7 @@ Status FftThunk::ExecuteOnStream(const BufferAllocations& buffer_allocations,
FftScratchAllocator scratch_allocator(buffer_allocations.device_ordinal(),
buffer_allocations.memory_allocator());
+ auto op_profiler = profiler->MakeScopedInstructionProfiler(hlo_instruction());
if (fft_plan_ == nullptr) {
const int64 fft_rank = fft_length_.size();
CHECK_LE(fft_rank, 3);
diff --git a/tensorflow/compiler/xla/service/gpu/fft_thunk.h b/tensorflow/compiler/xla/service/gpu/fft_thunk.h
index b0a22564f3..8c53be5077 100644
--- a/tensorflow/compiler/xla/service/gpu/fft_thunk.h
+++ b/tensorflow/compiler/xla/service/gpu/fft_thunk.h
@@ -19,6 +19,7 @@ limitations under the License.
#include "tensorflow/compiler/xla/service/buffer_assignment.h"
#include "tensorflow/compiler/xla/service/gpu/buffer_allocations.h"
#include "tensorflow/compiler/xla/service/gpu/gpu_executable.h"
+#include "tensorflow/compiler/xla/service/gpu/hlo_execution_profiler.h"
#include "tensorflow/compiler/xla/service/gpu/thunk.h"
#include "tensorflow/compiler/xla/service/hlo_instruction.h"
#include "tensorflow/compiler/xla/types.h"
@@ -72,7 +73,8 @@ class FftThunk : public Thunk {
// Does the FFT for the thunk on "stream".
Status ExecuteOnStream(const BufferAllocations& buffer_allocations,
- se::Stream* stream) override;
+ se::Stream* stream,
+ HloExecutionProfiler* profiler) override;
private:
const se::fft::Type fft_type_;
diff --git a/tensorflow/compiler/xla/service/gpu/for_thunk.cc b/tensorflow/compiler/xla/service/gpu/for_thunk.cc
index b36539e0cb..b3a3c5dcb4 100644
--- a/tensorflow/compiler/xla/service/gpu/for_thunk.cc
+++ b/tensorflow/compiler/xla/service/gpu/for_thunk.cc
@@ -16,6 +16,7 @@ limitations under the License.
#include "tensorflow/compiler/xla/service/gpu/for_thunk.h"
#include "tensorflow/compiler/xla/ptr_util.h"
+#include "tensorflow/compiler/xla/service/gpu/hlo_execution_profiler.h"
#include "tensorflow/compiler/xla/util.h"
#include "tensorflow/core/lib/core/errors.h"
@@ -27,8 +28,11 @@ ForThunk::ForThunk(const int64 loop_limit,
const HloInstruction* hlo)
: Thunk(Kind::kWhile, hlo),
loop_limit_(loop_limit),
- body_thunk_sequence_(
- MakeUnique<SequentialThunk>(std::move(*body_thunk_sequence), hlo)) {}
+ body_thunk_sequence_(MakeUnique<SequentialThunk>(
+ // Pass nullptr as the HloInstruction* to the body_thunk_sequence_
+ // constructor because this SequentialThunk is logically "part of"
+ // this ForThunk, and shouldn't be profiled separately from it.
+ std::move(*body_thunk_sequence), nullptr)) {}
Status ForThunk::Initialize(const GpuExecutable& executable,
se::StreamExecutor* executor) {
@@ -37,11 +41,15 @@ Status ForThunk::Initialize(const GpuExecutable& executable,
}
Status ForThunk::ExecuteOnStream(const BufferAllocations& buffer_allocations,
- se::Stream* stream) {
+ se::Stream* stream,
+ HloExecutionProfiler* profiler) {
+ auto op_profiler = profiler->MakeScopedInstructionProfiler(hlo_instruction());
for (int64 i = 0; i < loop_limit_; ++i) {
+ profiler->StartHloComputation();
// Invoke loop body thunk sequence.
- TF_RETURN_IF_ERROR(
- body_thunk_sequence_->ExecuteOnStream(buffer_allocations, stream));
+ TF_RETURN_IF_ERROR(body_thunk_sequence_->ExecuteOnStream(buffer_allocations,
+ stream, profiler));
+ profiler->FinishHloComputation(hlo_instruction()->while_body());
}
return Status::OK();
}
diff --git a/tensorflow/compiler/xla/service/gpu/for_thunk.h b/tensorflow/compiler/xla/service/gpu/for_thunk.h
index 41ddfe0ceb..c2d39071b2 100644
--- a/tensorflow/compiler/xla/service/gpu/for_thunk.h
+++ b/tensorflow/compiler/xla/service/gpu/for_thunk.h
@@ -19,6 +19,7 @@ limitations under the License.
#include <vector>
#include "tensorflow/compiler/xla/service/gpu/buffer_allocations.h"
+#include "tensorflow/compiler/xla/service/gpu/hlo_execution_profiler.h"
#include "tensorflow/compiler/xla/service/gpu/sequential_thunk.h"
#include "tensorflow/compiler/xla/service/gpu/thunk.h"
#include "tensorflow/compiler/xla/service/hlo_instruction.h"
@@ -39,7 +40,8 @@ class ForThunk : public Thunk {
Status Initialize(const GpuExecutable& executable,
se::StreamExecutor* executor) override;
Status ExecuteOnStream(const BufferAllocations& buffer_allocations,
- se::Stream* stream) override;
+ se::Stream* stream,
+ HloExecutionProfiler* profiler) override;
private:
const int64 loop_limit_;
diff --git a/tensorflow/compiler/xla/service/gpu/gemm_thunk.cc b/tensorflow/compiler/xla/service/gpu/gemm_thunk.cc
index 79fca43d02..dbc7754e25 100644
--- a/tensorflow/compiler/xla/service/gpu/gemm_thunk.cc
+++ b/tensorflow/compiler/xla/service/gpu/gemm_thunk.cc
@@ -252,7 +252,8 @@ GemmThunk::GemmThunk(const BufferAllocation::Slice& lhs_buffer,
alpha_(alpha) {}
Status GemmThunk::ExecuteOnStream(const BufferAllocations& buffer_allocations,
- se::Stream* stream) {
+ se::Stream* stream,
+ HloExecutionProfiler* profiler) {
VLOG(2) << "Executing a GemmThunk";
se::DeviceMemoryBase lhs_data =
@@ -352,6 +353,7 @@ Status GemmThunk::ExecuteOnStream(const BufferAllocations& buffer_allocations,
alpha_, stream);
};
+ auto op_profiler = profiler->MakeScopedInstructionProfiler(hlo_instruction());
bool launch_ok;
if (LayoutUtil::Minor(output_shape_.layout(), 0) == 0) {
launch_ok = launch(
diff --git a/tensorflow/compiler/xla/service/gpu/gemm_thunk.h b/tensorflow/compiler/xla/service/gpu/gemm_thunk.h
index 7a4830d64e..939c7f85e3 100644
--- a/tensorflow/compiler/xla/service/gpu/gemm_thunk.h
+++ b/tensorflow/compiler/xla/service/gpu/gemm_thunk.h
@@ -19,6 +19,7 @@ limitations under the License.
#include "tensorflow/compiler/xla/service/buffer_assignment.h"
#include "tensorflow/compiler/xla/service/gpu/buffer_allocations.h"
#include "tensorflow/compiler/xla/service/gpu/gpu_executable.h"
+#include "tensorflow/compiler/xla/service/gpu/hlo_execution_profiler.h"
#include "tensorflow/compiler/xla/service/gpu/thunk.h"
#include "tensorflow/compiler/xla/service/hlo_instruction.h"
#include "tensorflow/compiler/xla/xla_data.pb.h"
@@ -48,7 +49,8 @@ class GemmThunk : public Thunk {
// Does the gemm operation for the thunk on "stream", which must be non-null.
Status ExecuteOnStream(const BufferAllocations& buffer_allocations,
- se::Stream* stream) override;
+ se::Stream* stream,
+ HloExecutionProfiler* profiler) override;
// Returns true if we'll perform autotuning if run on the given stream. If
// so, we want the GPU to be quiescent during autotuning, so as not to
diff --git a/tensorflow/compiler/xla/service/gpu/gpu_compiler.cc b/tensorflow/compiler/xla/service/gpu/gpu_compiler.cc
index decfc40daf..5e5d893582 100644
--- a/tensorflow/compiler/xla/service/gpu/gpu_compiler.cc
+++ b/tensorflow/compiler/xla/service/gpu/gpu_compiler.cc
@@ -354,16 +354,30 @@ void WarnIfBadPtxasVersion(const string& ptxas_path) {
return;
}
+ // We need ptxas >= 9.0 as a hard requirement, because we compile targeting
+ // PTX 6.0. An older ptxas will just fail to compile any of our code.
+ //
// ptxas 9.0 before 9.0.276 and ptxas 9.1 before 9.1.121 miscompile some
// address calculations with large offsets (e.g. "load ptr + large_constant"),
// b/70245379.
- if ((vmaj == 9 && vmin == 0 && vdot < 276) ||
- (vmaj == 9 && vmin == 1 && vdot < 121)) {
- LOG(WARNING) << "*** WARNING *** You are using ptxas " << vmaj << "."
- << vmin << "." << vdot
- << ", which is in range [9.0.0, 9.0.276) + [9.1.0, 9.1.121). "
- "These versions are known to miscompile XLA code, leading "
- "to incorrect results or invalid-address errors.";
+ //
+ // ptxas 9.1.121 miscompiles some large multioutput fusions, again in a way
+ // that appears related to address calculations. ptxas 9.2.88 appears to
+ // work, as far as we can tell.
+ if (vmaj < 9) {
+ LOG(ERROR)
+ << "You are using ptxas 8.x, but XLA requires ptxas 9.x (and strongly "
+ "prefers >= 9.2.88). Compilation of XLA kernels below will likely "
+ "fail.\n\nYou do not need to update CUDA; cherry-picking the ptxas "
+ "binary is sufficient.";
+ } else if ((vmaj < 9 || vmin < 2 || vdot < 88)) {
+ LOG(WARNING)
+ << "*** WARNING *** You are using ptxas " << vmaj << "." << vmin << "."
+ << vdot
+ << ", which older than 9.2.88. ptxas 9.x before 9.2.88 is known to "
+ "miscompile XLA code, leading to incorrect results or "
+ "invalid-address errors.\n\nYou do not need to update to CUDA "
+ "9.2.88; cherry-picking the ptxas binary is sufficient.";
}
}
@@ -391,6 +405,10 @@ void WarnIfBadDriverJITVersion() {
// - 384.x before 384.108
// - 387.x before 387.40
// - 390.x before 390.10.
+ //
+ // TODO(jlebar): This list does not cover the address-calculation bug we've
+ // observed in ptxas 9.1.121. Need to get a new safe range from nvidia
+ // corresponding to ptxas >= 9.2.88.
auto vmaj = std::get<0>(version);
auto vmin = std::get<1>(version);
if ((vmaj == 384 && vmin < 108) || //
@@ -552,8 +570,7 @@ StatusOr<std::unique_ptr<Executable>> GpuCompiler::RunBackend(
&ir_emitter_context);
{
XLA_SCOPED_LOGGING_TIMER("GpuCompiler::RunBackend - IR emission");
- TF_RETURN_IF_ERROR(
- entry_computation->root_instruction()->Accept(&ir_emitter));
+ TF_RETURN_IF_ERROR(entry_computation->Accept(&ir_emitter));
}
if (user_pre_optimization_hook_) {
diff --git a/tensorflow/compiler/xla/service/gpu/gpu_executable.cc b/tensorflow/compiler/xla/service/gpu/gpu_executable.cc
index f20a828bc1..0cad2958c7 100644
--- a/tensorflow/compiler/xla/service/gpu/gpu_executable.cc
+++ b/tensorflow/compiler/xla/service/gpu/gpu_executable.cc
@@ -136,18 +136,17 @@ Status GpuExecutable::ExecuteThunks(
TF_RETURN_IF_ERROR(main_stream->BlockHostUntilDone());
}
- profiler.StartOperation();
VLOG(2) << "Executing the thunk for "
<< thunk->hlo_instruction()->ToString() << " on stream "
<< stream_no;
- TF_RETURN_IF_ERROR(thunk->ExecuteOnStream(buffer_allocations, stream));
+ TF_RETURN_IF_ERROR(
+ thunk->ExecuteOnStream(buffer_allocations, stream, &profiler));
if (thunk_schedule_->Depended(thunk)) {
auto finish_event = MakeUnique<se::Event>(main_stream->parent());
finish_event->Init();
stream->ThenRecordEvent(finish_event.get());
thunk_to_finish_event[thunk] = std::move(finish_event);
}
- profiler.FinishOperation(thunk->hlo_instruction());
}
main_stream->ThenWaitFor(&sub_streams);
diff --git a/tensorflow/compiler/xla/service/gpu/gpu_layout_assignment.cc b/tensorflow/compiler/xla/service/gpu/gpu_layout_assignment.cc
index 8bf62dde8b..09ef62c87f 100644
--- a/tensorflow/compiler/xla/service/gpu/gpu_layout_assignment.cc
+++ b/tensorflow/compiler/xla/service/gpu/gpu_layout_assignment.cc
@@ -51,7 +51,7 @@ HeuristicLayoutAssignment(const HloInstruction* instr,
// H <=> Y
// W <=> X
//
- // Therefore kOutputInputYX means NHWC; kBatchDepthYX means NCHW.
+ // Therefore kOutputInputYX and kBatchDepthYX mean NCHW.
// As of today, our empirical evidence is that cudnn 7.0 is faster on V100 x
// fp16 with the mostly-NHWC layout. The heuristic may change as cudnn version
diff --git a/tensorflow/compiler/xla/service/gpu/gpu_layout_assignment_test.cc b/tensorflow/compiler/xla/service/gpu/gpu_layout_assignment_test.cc
index e48165c142..95f78ae293 100644
--- a/tensorflow/compiler/xla/service/gpu/gpu_layout_assignment_test.cc
+++ b/tensorflow/compiler/xla/service/gpu/gpu_layout_assignment_test.cc
@@ -132,10 +132,10 @@ TEST_F(LayoutAssignmentTest, BatchNormInference) {
HloInstruction::CreateParameter(4, aux_shape, "variance"));
auto* epsilon = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(1)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1)));
auto* feature_index =
builder.AddInstruction(HloInstruction::CreateConstant(
- Literal::CreateR0<int64>(kFeatureIndex)));
+ LiteralUtil::CreateR0<int64>(kFeatureIndex)));
auto* batchnorm = builder.AddInstruction(HloInstruction::CreateCustomCall(
shape,
@@ -201,10 +201,10 @@ TEST_F(LayoutAssignmentTest, BatchNormTraining) {
HloInstruction::CreateParameter(2, offset_scale_shape, "offset"));
auto* epsilon = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(1)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1)));
auto* feature_index =
builder.AddInstruction(HloInstruction::CreateConstant(
- Literal::CreateR0<int64>(kFeatureIndex)));
+ LiteralUtil::CreateR0<int64>(kFeatureIndex)));
auto* batchnorm = builder.AddInstruction(HloInstruction::CreateCustomCall(
batchnorm_shape, {operand, scale, offset, epsilon, feature_index},
@@ -278,10 +278,10 @@ TEST_F(LayoutAssignmentTest, BatchNormGrad) {
HloInstruction::CreateParameter(4, shape, "var"));
auto* epsilon = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(1)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1)));
auto* feature_index =
builder.AddInstruction(HloInstruction::CreateConstant(
- Literal::CreateR0<int64>(kFeatureIndex)));
+ LiteralUtil::CreateR0<int64>(kFeatureIndex)));
auto* batchnorm =
builder.AddInstruction(HloInstruction::CreateCustomCall(
diff --git a/tensorflow/compiler/xla/service/gpu/gpu_transfer_manager.cc b/tensorflow/compiler/xla/service/gpu/gpu_transfer_manager.cc
index 7bb8df6581..6c23228976 100644
--- a/tensorflow/compiler/xla/service/gpu/gpu_transfer_manager.cc
+++ b/tensorflow/compiler/xla/service/gpu/gpu_transfer_manager.cc
@@ -20,8 +20,10 @@ limitations under the License.
#include <vector>
#include "llvm/IR/DataLayout.h"
+#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/compiler/xla/literal_util.h"
#include "tensorflow/compiler/xla/service/gpu/gpu_compiler.h"
+#include "tensorflow/compiler/xla/service/gpu/outfeed_manager.h"
#include "tensorflow/compiler/xla/shape_util.h"
#include "tensorflow/compiler/xla/status_macros.h"
#include "tensorflow/compiler/xla/statusor.h"
@@ -34,15 +36,14 @@ limitations under the License.
#include "tensorflow/core/platform/stream_executor_no_cuda.h"
namespace xla {
+namespace gpu {
// TODO(b/30467474) Once GPU infeed implementation settles, consider
// folding back the cpu and gpu infeed implementations into a generic
// one if possible.
-GpuTransferManager::GpuTransferManager()
- : GenericTransferManager(
- se::cuda::kCudaPlatformId,
- /*pointer_size=*/llvm::DataLayout(gpu::GpuCompiler::kDataLayout)
- .getPointerSize(0 /* default address space */)) {}
+GpuTransferManager::GpuTransferManager(se::Platform::Id id,
+ unsigned pointer_size)
+ : GenericTransferManager(id, pointer_size) {}
Status GpuTransferManager::TransferLiteralToInfeed(
se::StreamExecutor* executor, const LiteralSlice& literal) {
@@ -50,53 +51,28 @@ Status GpuTransferManager::TransferLiteralToInfeed(
VLOG(2) << "Transferring literal to infeed with shape: "
<< ShapeUtil::HumanString(shape);
- if (!ShapeUtil::IsTuple(shape)) {
- int64 size = GetByteSizeRequirement(shape);
- return TransferBufferToInfeed(executor, size, literal.untyped_data());
- }
-
- if (ShapeUtil::IsNestedTuple(shape)) {
- return Unimplemented(
- "Infeed with a nested tuple shape is not supported: %s",
- ShapeUtil::HumanString(literal.shape()).c_str());
- }
-
// For a tuple, we transfer each of its elements to the device and
// enqueue the resulting destination device addresses with the
// infeed manager.
- std::vector<gpu::InfeedBuffer*> buffers;
- buffers.reserve(ShapeUtil::TupleElementCount(shape));
- auto cleanup = tensorflow::gtl::MakeCleanup([buffers]() {
- for (gpu::InfeedBuffer* b : buffers) {
- b->Done();
- }
- });
-
- for (int64 i = 0; i < ShapeUtil::TupleElementCount(shape); ++i) {
- const Shape& tuple_element_shape =
- ShapeUtil::GetTupleElementShape(shape, i);
- int64 tuple_element_size = GetByteSizeRequirement(tuple_element_shape);
- TF_ASSIGN_OR_RETURN(
- gpu::InfeedBuffer * buffer,
- TransferBufferToInfeedInternal(executor, tuple_element_size,
- literal.untyped_data({i})));
- buffers.push_back(buffer);
- }
-
- cleanup.release();
- return EnqueueBuffersToInfeed(executor, buffers);
-}
-
-Status GpuTransferManager::TransferBufferToInfeed(se::StreamExecutor* executor,
- int64 size,
- const void* source) {
- TF_ASSIGN_OR_RETURN(gpu::InfeedBuffer * buffer,
- TransferBufferToInfeedInternal(executor, size, source));
- return EnqueueBuffersToInfeed(executor, {buffer});
+ ShapeTree<InfeedBuffer> buffer_tree(shape);
+
+ TF_RETURN_IF_ERROR(ShapeUtil::ForEachSubshapeWithStatus(
+ shape, [&](const Shape& literal_subshape, const ShapeIndex& index) {
+ if (ShapeUtil::IsArray(literal_subshape)) {
+ int64 tuple_element_size = GetByteSizeRequirement(literal_subshape);
+ TF_ASSIGN_OR_RETURN(
+ *buffer_tree.mutable_element(index),
+ TransferBufferToInfeedInternal(executor, tuple_element_size,
+ literal.untyped_data(index)));
+ }
+ return Status::OK();
+ }));
+
+ return EnqueueBuffersToInfeed(executor, std::move(buffer_tree));
}
Status GpuTransferManager::EnqueueBuffersToInfeed(
- se::StreamExecutor* executor, std::vector<gpu::InfeedBuffer*> buffers) {
+ se::StreamExecutor* executor, ShapeTree<InfeedBuffer> buffers) {
gpu::InfeedManager* infeed_manager = gpu::GetOrCreateInfeedManager();
se::Stream* stream = infeed_manager->GetStream(executor);
@@ -106,21 +82,18 @@ Status GpuTransferManager::EnqueueBuffersToInfeed(
// possible.
Status block_status = stream->BlockHostUntilDone();
if (!block_status.ok()) {
- for (gpu::InfeedBuffer* b : buffers) {
- b->Done();
- }
return InternalError("Failed to complete data transfer on stream %p: %s",
stream, block_status.error_message().c_str());
}
- infeed_manager->EnqueueBuffers(buffers);
+ infeed_manager->EnqueueDestination(std::move(buffers));
VLOG(2) << "Infeed data transferred";
return Status::OK();
}
-StatusOr<gpu::InfeedBuffer*> GpuTransferManager::TransferBufferToInfeedInternal(
+StatusOr<InfeedBuffer> GpuTransferManager::TransferBufferToInfeedInternal(
se::StreamExecutor* executor, int64 size, const void* source) {
if (size > std::numeric_limits<int32>::max()) {
return InvalidArgument("Infeed shape is too large: needs %lld bytes", size);
@@ -136,23 +109,84 @@ StatusOr<gpu::InfeedBuffer*> GpuTransferManager::TransferBufferToInfeedInternal(
return InternalError("Failed to obtain a stream");
}
- gpu::InfeedBuffer* buffer = new gpu::InfeedBuffer(executor, size);
- stream->ThenMemcpy(buffer->device_memory(), source, size);
+ InfeedBuffer buffer(executor, size);
+ stream->ThenMemcpy(buffer.device_memory(), source, size);
VLOG(2) << "Queued infeed data on stream " << stream;
- return buffer;
+ return std::move(buffer);
+}
+
+static std::unique_ptr<Literal> ShapeTreeToLiteral(
+ ShapeTree<std::unique_ptr<gpu::OutfeedBuffer>>* shape_tree) {
+ // This is a struct instead of a lambda for std::function-free recursion.
+ struct Helper {
+ static std::unique_ptr<Literal> helper(
+ ShapeTree<std::unique_ptr<gpu::OutfeedBuffer>>* shape_tree,
+ ShapeIndex* index) {
+ const Shape& shape = ShapeUtil::GetSubshape(shape_tree->shape(), *index);
+ if (ShapeUtil::IsArray(shape)) {
+ return (*shape_tree->mutable_element(*index))->WaitUntilAvailable();
+ }
+
+ CHECK(ShapeUtil::IsTuple(shape))
+ << ShapeUtil::HumanStringWithLayout(shape);
+ const int64 tuple_element_count = ShapeUtil::TupleElementCount(shape);
+ index->push_back(0);
+ std::vector<std::unique_ptr<Literal>> tuple_operands;
+ for (int64 i = 0; i < tuple_element_count; ++i) {
+ index->back() = i;
+ tuple_operands.push_back(helper(shape_tree, index));
+ }
+ index->pop_back();
+ return LiteralUtil::MakeTupleOwned(std::move(tuple_operands));
+ }
+ };
+ ShapeIndex index;
+ return Helper::helper(shape_tree, &index);
+}
+
+Status GpuTransferManager::TransferLiteralFromOutfeed(
+ se::StreamExecutor* /*executor*/, const Shape& literal_shape,
+ Literal* literal) {
+ ShapeTree<std::unique_ptr<gpu::OutfeedBuffer>> outfeed_buffers(
+ &literal_shape);
+
+ // First create a tree of literal buffers that the device can write to.
+ outfeed_buffers.ForEachMutableElement(
+ [&](const ShapeIndex& index,
+ std::unique_ptr<gpu::OutfeedBuffer>* buffer) {
+ const Shape& shape = ShapeUtil::GetSubshape(literal_shape, index);
+ // Do not transfer tuple index buffers.
+ if (ShapeUtil::IsTuple(shape)) {
+ return;
+ }
+ *buffer = MakeUnique<gpu::OutfeedBuffer>(GetByteSizeRequirement(shape));
+ });
+
+ // Give the tree of buffers to the outfeed mananger. The device will fill it
+ // while we're waiting for it below.
+ gpu::OutfeedManager* outfeed_manager = gpu::GetOrCreateOutfeedManager();
+ outfeed_manager->EnqueueDestination(&outfeed_buffers);
+
+ // Now turn the tree of buffers back into a literal.
+ *literal = std::move(*ShapeTreeToLiteral(&outfeed_buffers));
+ return Status::OK();
}
+} // namespace gpu
} // namespace xla
-static std::unique_ptr<xla::TransferManager> CreateGpuTransferManager() {
- return xla::MakeUnique<xla::GpuTransferManager>();
+static std::unique_ptr<xla::TransferManager> CreateNVPTXTransferManager() {
+ return xla::MakeUnique<xla::gpu::GpuTransferManager>(
+ /*id=*/stream_executor::cuda::kCudaPlatformId,
+ /*pointer_size=*/llvm::DataLayout(xla::gpu::GpuCompiler::kDataLayout)
+ .getPointerSize(0 /* default address space */));
}
static bool InitModule() {
xla::TransferManager::RegisterTransferManager(
- stream_executor::cuda::kCudaPlatformId, &CreateGpuTransferManager);
+ stream_executor::cuda::kCudaPlatformId, &CreateNVPTXTransferManager);
return true;
}
static bool module_initialized = InitModule();
diff --git a/tensorflow/compiler/xla/service/gpu/gpu_transfer_manager.h b/tensorflow/compiler/xla/service/gpu/gpu_transfer_manager.h
index 09f8227f50..dceeb9e2eb 100644
--- a/tensorflow/compiler/xla/service/gpu/gpu_transfer_manager.h
+++ b/tensorflow/compiler/xla/service/gpu/gpu_transfer_manager.h
@@ -21,6 +21,7 @@ limitations under the License.
#include "tensorflow/compiler/xla/service/generic_transfer_manager.h"
#include "tensorflow/compiler/xla/service/gpu/infeed_manager.h"
#include "tensorflow/compiler/xla/service/transfer_manager.h"
+#include "tensorflow/compiler/xla/shape_tree.h"
#include "tensorflow/compiler/xla/statusor.h"
#include "tensorflow/compiler/xla/xla_data.pb.h"
#include "tensorflow/core/platform/macros.h"
@@ -28,33 +29,36 @@ limitations under the License.
#include "tensorflow/core/platform/types.h"
namespace xla {
+namespace gpu {
// An implementation of the XLA GenericTransferManager that
// handles GPU-specific infeed.
class GpuTransferManager : public GenericTransferManager {
public:
- GpuTransferManager();
+ GpuTransferManager(se::Platform::Id id, unsigned pointer_size);
~GpuTransferManager() override {}
Status TransferLiteralToInfeed(se::StreamExecutor* executor,
const LiteralSlice& literal) override;
- Status TransferBufferToInfeed(se::StreamExecutor* executor, int64 size,
- const void* source) override;
+ Status TransferLiteralFromOutfeed(se::StreamExecutor* executor,
+ const Shape& literal_shape,
+ Literal* literal) override;
private:
// Initiates the infeed data transfers. InfeedBuffer->Done() must be
// called to clean up the memory allocated for InfeedBuffer.
- StatusOr<gpu::InfeedBuffer*> TransferBufferToInfeedInternal(
+ StatusOr<InfeedBuffer> TransferBufferToInfeedInternal(
se::StreamExecutor* executor, int64 size, const void* source);
// Enqueues infeed data buffers with the infeed manager after their
// transfer completes.
Status EnqueueBuffersToInfeed(se::StreamExecutor* executor,
- std::vector<gpu::InfeedBuffer*> buffers);
+ ShapeTree<InfeedBuffer> buffers);
TF_DISALLOW_COPY_AND_ASSIGN(GpuTransferManager);
};
+} // namespace gpu
} // namespace xla
#endif // TENSORFLOW_COMPILER_XLA_SERVICE_GPU_TRANSFER_MANAGER_H_
diff --git a/tensorflow/compiler/xla/service/gpu/hlo_execution_profiler.cc b/tensorflow/compiler/xla/service/gpu/hlo_execution_profiler.cc
index daddd3738e..19420e590d 100644
--- a/tensorflow/compiler/xla/service/gpu/hlo_execution_profiler.cc
+++ b/tensorflow/compiler/xla/service/gpu/hlo_execution_profiler.cc
@@ -16,6 +16,8 @@ limitations under the License.
#include "tensorflow/compiler/xla/service/gpu/hlo_execution_profiler.h"
#include <memory>
+#include <stack>
+#include <unordered_set>
#include <vector>
#include "tensorflow/compiler/xla/service/hlo_computation.h"
@@ -24,9 +26,30 @@ limitations under the License.
#include "tensorflow/compiler/xla/service/pool.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/stream_executor_no_cuda.h"
+#include "tensorflow/core/util/ptr_util.h"
namespace xla {
namespace gpu {
+namespace {
+void InitAndStartTimer(std::stack<std::unique_ptr<se::Timer>>* timers,
+ se::Stream* stream) {
+ timers->push(MakeUnique<se::Timer>(stream->parent()));
+ stream->InitTimer(timers->top().get()).ThenStartTimer(timers->top().get());
+}
+
+uint64 GetCyclesTaken(
+ std::stack<std::unique_ptr<se::Timer>>* timers,
+ const std::vector<Pool<se::Stream>::SmartPtr>& sub_streams,
+ se::Stream* stream, double clock_rate_ghz) {
+ CHECK_GT(timers->size(), 0);
+ stream->ThenWaitFor(&sub_streams);
+ stream->ThenStopTimer(timers->top().get());
+ stream->BlockHostUntilDone().IgnoreError();
+ double nanoseconds = timers->top()->Nanoseconds();
+ timers->pop();
+ return static_cast<uint64>(nanoseconds * clock_rate_ghz);
+}
+} // namespace
HloExecutionProfiler::HloExecutionProfiler(
bool do_profile, HloExecutionProfile* profile, se::Stream* stream,
@@ -39,11 +62,7 @@ HloExecutionProfiler::HloExecutionProfiler(
computation_(computation) {
if (do_profile_) {
clock_rate_ghz_ = stream->parent()->GetDeviceDescription().clock_rate_ghz();
- execution_timer_.reset(new se::Timer(stream->parent()));
- per_op_timer_.reset(new se::Timer(stream->parent()));
- stream->InitTimer(execution_timer_.get())
- .ThenStartTimer(execution_timer_.get());
- stream->InitTimer(per_op_timer_.get());
+ InitAndStartTimer(&timers_, stream);
}
}
@@ -51,31 +70,53 @@ void HloExecutionProfiler::FinishExecution() {
CHECK(!finished_execution_) << "Call FinishExecution only once!";
finished_execution_ = true;
if (do_profile_) {
- stream_->ThenWaitFor(&sub_streams_);
- stream_->ThenStopTimer(execution_timer_.get());
- stream_->BlockHostUntilDone().IgnoreError();
profile_->set_total_cycles_executed(
*computation_,
- static_cast<uint64>(execution_timer_->Nanoseconds() * clock_rate_ghz_));
+ GetCyclesTaken(&timers_, sub_streams_, stream_, clock_rate_ghz_));
}
}
-void HloExecutionProfiler::StartOperation() {
+void HloExecutionProfiler::StartHloComputation() {
if (do_profile_) {
- stream_->ThenStartTimer(per_op_timer_.get());
+ InitAndStartTimer(&timers_, stream_);
+ }
+}
+
+void HloExecutionProfiler::FinishHloComputation(
+ const HloComputation* computation) {
+ if (do_profile_) {
+ profile_->set_total_cycles_executed(
+ *computation,
+ GetCyclesTaken(&timers_, sub_streams_, stream_, clock_rate_ghz_));
}
}
-void HloExecutionProfiler::FinishOperation(
+void HloExecutionProfiler::StartHloInstruction() {
+ if (do_profile_) {
+ InitAndStartTimer(&timers_, stream_);
+ }
+}
+
+void HloExecutionProfiler::FinishHloInstruction(
const HloInstruction* hlo_instruction) {
if (do_profile_) {
- stream_->ThenWaitFor(&sub_streams_);
- stream_->ThenStopTimer(per_op_timer_.get());
- stream_->BlockHostUntilDone().IgnoreError();
+ hlo_instructions_.erase(hlo_instruction);
profile_->SetCyclesTakenBy(
hlo_instruction,
- static_cast<uint64>(per_op_timer_->Nanoseconds() * clock_rate_ghz_));
+ GetCyclesTaken(&timers_, sub_streams_, stream_, clock_rate_ghz_));
+ }
+}
+
+std::unique_ptr<ScopedInstructionProfiler>
+HloExecutionProfiler::MakeScopedInstructionProfiler(
+ const HloInstruction* hlo_instruction) {
+ if (do_profile_ && hlo_instruction != nullptr) {
+ // Make sure that we are not already measuring the time for the same
+ // 'hlo_instruction'.
+ CHECK(hlo_instructions_.insert(hlo_instruction).second)
+ << hlo_instruction->name();
}
+ return MakeUnique<ScopedInstructionProfiler>(this, hlo_instruction);
}
} // namespace gpu
diff --git a/tensorflow/compiler/xla/service/gpu/hlo_execution_profiler.h b/tensorflow/compiler/xla/service/gpu/hlo_execution_profiler.h
index c9b882ff80..6654850bef 100644
--- a/tensorflow/compiler/xla/service/gpu/hlo_execution_profiler.h
+++ b/tensorflow/compiler/xla/service/gpu/hlo_execution_profiler.h
@@ -17,6 +17,8 @@ limitations under the License.
#define TENSORFLOW_COMPILER_XLA_SERVICE_GPU_HLO_EXECUTION_PROFILER_H_
#include <memory>
+#include <stack>
+#include <unordered_set>
#include <vector>
#include "tensorflow/compiler/xla/service/hlo_computation.h"
@@ -28,6 +30,8 @@ limitations under the License.
namespace xla {
namespace gpu {
+class ScopedInstructionProfiler;
+
// A helper class for profiling HLO in the course of GPU program execution.
// All of the profiling is guarded internally, to avoid the caller needing to
// have lots of conditionals sprinkled around.
@@ -43,12 +47,25 @@ class HloExecutionProfiler {
// execution timer.
void FinishExecution();
- // If profiling is enabled, starts the per-operation timer.
- void StartOperation();
+ // If profiling is enabled, starts a timer for a (sub)computation.
+ void StartHloComputation();
+
+ // If profiling is enabled stops the timer for a (sub)computation and records
+ // the time that the computation took to execute in the profile.
+ void FinishHloComputation(const HloComputation* computation);
+
+ // If profiling is enabled, starts a per-operation timer.
+ void StartHloInstruction();
// If profiling is enabled, stops the per-operation timer and records the time
// that the hlo_instruction took to execute in the profile.
- void FinishOperation(const HloInstruction* hlo_instruction);
+ void FinishHloInstruction(const HloInstruction* hlo_instruction);
+
+ // Returns a ScopedInstructionProfiler and triggers a call to
+ // StartHloInstruction(). Once the returned ScopedInstructionProfiler goes
+ // out of scope, it triggers a call to FinishHloInstruction().
+ std::unique_ptr<ScopedInstructionProfiler> MakeScopedInstructionProfiler(
+ const HloInstruction* hlo_instruction);
private:
const bool do_profile_;
@@ -57,11 +74,36 @@ class HloExecutionProfiler {
se::Stream* stream_;
const std::vector<Pool<se::Stream>::SmartPtr>& sub_streams_;
const HloComputation* computation_;
- std::unique_ptr<se::Timer> execution_timer_;
- std::unique_ptr<se::Timer> per_op_timer_;
+ std::stack<std::unique_ptr<se::Timer>> timers_;
+ // Contains the HLO instructions for which we are currently measuring the
+ // time.
+ std::unordered_set<const HloInstruction*> hlo_instructions_;
bool finished_execution_ = false;
};
+// This class can be used within the ExecuteOnStream() implementations of
+// Thunks. It ensures that we always have a pair of matching
+// StartHloInstruction() and FinishHloInstruction() calls to the profiler.
+class ScopedInstructionProfiler {
+ public:
+ ScopedInstructionProfiler(HloExecutionProfiler* profiler,
+ const HloInstruction* hlo_instruction)
+ : profiler_(profiler), hlo_instruction_(hlo_instruction) {
+ if (hlo_instruction != nullptr) {
+ profiler->StartHloInstruction();
+ }
+ }
+ ~ScopedInstructionProfiler() {
+ if (hlo_instruction_ != nullptr) {
+ profiler_->FinishHloInstruction(hlo_instruction_);
+ }
+ }
+
+ private:
+ HloExecutionProfiler* profiler_;
+ const HloInstruction* hlo_instruction_;
+};
+
} // namespace gpu
} // namespace xla
diff --git a/tensorflow/compiler/xla/service/gpu/hlo_schedule.cc b/tensorflow/compiler/xla/service/gpu/hlo_schedule.cc
index 375709150e..19de37b0fb 100644
--- a/tensorflow/compiler/xla/service/gpu/hlo_schedule.cc
+++ b/tensorflow/compiler/xla/service/gpu/hlo_schedule.cc
@@ -100,7 +100,7 @@ GpuHloOrdering::GpuHloOrdering(
if (last_instruction_per_stream[stream_no] != nullptr) {
immediate_preds.push_back(last_instruction_per_stream[stream_no]);
}
- predecessor_map->SetReachabilityToUnion(immediate_preds, hlo);
+ predecessor_map->FastSetReachabilityToUnion(immediate_preds, hlo);
last_instruction_per_stream[stream_no] = hlo;
} else {
// Only parameters and constants don't have an assigned stream, since they
diff --git a/tensorflow/compiler/xla/service/gpu/hlo_to_ir_bindings.cc b/tensorflow/compiler/xla/service/gpu/hlo_to_ir_bindings.cc
index d420863b85..6f2a7e1850 100644
--- a/tensorflow/compiler/xla/service/gpu/hlo_to_ir_bindings.cc
+++ b/tensorflow/compiler/xla/service/gpu/hlo_to_ir_bindings.cc
@@ -145,7 +145,7 @@ llvm::Value* HloToIrBindings::GetTypedIrValue(const HloInstruction& hlo,
llvm::Value* typed_ir_value;
if (llvm::isa<llvm::GlobalVariable>(ir_value)) {
- typed_ir_value = llvm::ConstantExpr::getBitCast(
+ typed_ir_value = llvm::ConstantExpr::getPointerBitCastOrAddrSpaceCast(
llvm::cast<llvm::GlobalVariable>(ir_value), dest_type);
} else {
typed_ir_value =
diff --git a/tensorflow/compiler/xla/service/gpu/infeed_manager.cc b/tensorflow/compiler/xla/service/gpu/infeed_manager.cc
index ae310beefa..c5f0cdf6cd 100644
--- a/tensorflow/compiler/xla/service/gpu/infeed_manager.cc
+++ b/tensorflow/compiler/xla/service/gpu/infeed_manager.cc
@@ -15,76 +15,13 @@ limitations under the License.
#include "tensorflow/compiler/xla/service/gpu/infeed_manager.h"
-#include "tensorflow/compiler/xla/map_util.h"
#include "tensorflow/compiler/xla/ptr_util.h"
-#include "tensorflow/core/platform/logging.h"
namespace xla {
namespace gpu {
-InfeedManager::InfeedManager() : host_to_device_executor_(nullptr) {}
-
-void InfeedManager::Reset() {
- tensorflow::mutex_lock l(mu_);
- CHECK(dequeued_buffer_.empty());
- for (auto buffer : enqueued_buffer_) {
- buffer->Done();
- }
- enqueued_buffer_.clear();
-}
-
-void InfeedManager::EnqueueBuffers(const std::vector<InfeedBuffer*>& buffers) {
- tensorflow::mutex_lock l(mu_);
- bool was_empty = enqueued_buffer_.empty();
- for (gpu::InfeedBuffer* b : buffers) {
- enqueued_buffer_.push_back(b);
- }
- if (was_empty) {
- // This has the potential to suffer from the notified thread
- // immediately trying and failing to acquire mu_, but seems
- // preferable to the alternative of notifying outside the lock
- // on every enqueue.
- cv_.notify_one();
- }
-}
-
-InfeedBuffer* InfeedManager::BlockingDequeueBuffer() {
- bool became_empty = false;
- InfeedBuffer* current_buffer;
- {
- tensorflow::mutex_lock l(mu_);
- while (enqueued_buffer_.empty()) {
- cv_.wait(l);
- }
- current_buffer = enqueued_buffer_.front();
- enqueued_buffer_.pop_front();
- dequeued_buffer_.insert(current_buffer);
- if (enqueued_buffer_.empty()) {
- became_empty = true;
- }
- }
- if (became_empty) {
- for (const auto& callback : on_empty_callbacks_) {
- callback();
- }
- }
- return current_buffer;
-}
-
-void InfeedManager::ReleaseBuffers(const std::vector<InfeedBuffer*>& buffers) {
- {
- tensorflow::mutex_lock l(mu_);
- for (gpu::InfeedBuffer* b : buffers) {
- CHECK(ContainsKey(dequeued_buffer_, b));
- dequeued_buffer_.erase(b);
- }
- }
- for (gpu::InfeedBuffer* b : buffers) {
- b->Done();
- }
-}
-
se::Stream* InfeedManager::GetStream(se::StreamExecutor* executor) {
+ tensorflow::mutex_lock l(host_to_device_stream_mu_);
if (host_to_device_executor_ == nullptr) {
host_to_device_executor_ = executor;
host_to_device_stream_ = MakeUnique<se::Stream>(executor);
@@ -100,10 +37,6 @@ se::Stream* InfeedManager::GetStream(se::StreamExecutor* executor) {
return host_to_device_stream_.get();
}
-void InfeedManager::RegisterOnEmptyCallback(std::function<void()> callback) {
- on_empty_callbacks_.push_back(std::move(callback));
-}
-
InfeedManager* GetOrCreateInfeedManager() {
static InfeedManager* manager = new InfeedManager;
return manager;
diff --git a/tensorflow/compiler/xla/service/gpu/infeed_manager.h b/tensorflow/compiler/xla/service/gpu/infeed_manager.h
index a3fc15cfe3..7e418882e0 100644
--- a/tensorflow/compiler/xla/service/gpu/infeed_manager.h
+++ b/tensorflow/compiler/xla/service/gpu/infeed_manager.h
@@ -20,12 +20,9 @@ limitations under the License.
#ifndef TENSORFLOW_COMPILER_XLA_SERVICE_GPU_INFEED_MANAGER_H_
#define TENSORFLOW_COMPILER_XLA_SERVICE_GPU_INFEED_MANAGER_H_
-#include <deque>
-#include <vector>
-
+#include "tensorflow/compiler/xla/service/gpu/xfeed_queue.h"
+#include "tensorflow/compiler/xla/shape_tree.h"
#include "tensorflow/compiler/xla/types.h"
-#include "tensorflow/core/lib/gtl/flatset.h"
-#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/stream_executor_no_cuda.h"
namespace xla {
@@ -47,90 +44,41 @@ namespace gpu {
// the client. The client manages the memory of the buffer.
class InfeedBuffer {
public:
+ InfeedBuffer() = default;
InfeedBuffer(se::StreamExecutor* executor, int64 length)
- : executor_(executor), length_(length) {
- device_memory_ = executor_->AllocateArray<uint8>(length);
- CHECK(!device_memory_.is_null());
+ : device_memory_(executor, executor->AllocateArray<uint8>(length)),
+ length_(length) {
+ CHECK(!device_memory_->is_null());
}
- ~InfeedBuffer() { executor_->Deallocate(&device_memory_); }
-
int64 length() const { return length_; }
- // Callback to signal that this buffer is consumed. This helps the
- // client to manage memory for the infeed buffers.
- void Done() { delete this; }
-
- se::DeviceMemoryBase* device_memory() { return &device_memory_; }
+ se::DeviceMemoryBase* device_memory() { return device_memory_.ptr(); }
private:
- se::StreamExecutor* executor_; // Not owned.
- const int64 length_;
- se::DeviceMemoryBase device_memory_;
+ se::ScopedDeviceMemory<uint8> device_memory_;
+ int64 length_;
};
// Client-side class used to enqueue infeed buffers.
-class InfeedManager {
+class InfeedManager : public XfeedQueue<ShapeTree<InfeedBuffer>> {
public:
- InfeedManager();
-
- // Calls the completion callback for any enqueued buffers that have
- // not been dequeued by the runtime, and empties the infeed
- // queue. Reset may not be called while a runtime computation is
- // processing a dequeued buffer. The only safe way to ensure this
- // condition is to call Reset when no computation is taking place.
- void Reset();
-
- // Adds a set of buffers to the infeed queue atomically. buffer->Done
- // will be called when the buffer will no longer be accessed by the
- // InfeedManager, either as a result of a call to Reset or because the
- // runtime has dequeued and used the buffer.
- void EnqueueBuffers(const std::vector<InfeedBuffer*>& buffers);
-
- // Blocks until the infeed queue is non-empty, then returns the
- // buffer at the head of the queue. Adds the current buffer to the
- // to-be released set.
- InfeedBuffer* BlockingDequeueBuffer();
-
- // Releases a set of buffers from the to-be released set.
- void ReleaseBuffers(const std::vector<InfeedBuffer*>& buffers);
-
// Returns a cached stream associated with an executor. Allocates a
// new stream on the first invocation. On subsequent invocations, if
// the cached executor is not the same as the requested executor,
// returns null.
se::Stream* GetStream(se::StreamExecutor* executor);
- // Registers a callback that will be called when 'enqueued_buffer_' becomes
- // empty.
- void RegisterOnEmptyCallback(std::function<void()> callback);
-
private:
- // TODO(b/30467474): Revisit if this mutex becomes a point of
- // contention.
- tensorflow::mutex mu_;
-
- // Condition variable that is signaled every time a buffer is
- // enqueued to an empty queue.
- tensorflow::condition_variable cv_;
-
- // InfeedBuffer* queue contents are not owned, but buffer->Done must
- // be called when the buffer is no longer needed by the runtime.
- std::deque<InfeedBuffer*> enqueued_buffer_;
-
- // Buffers that are dequeued and currently being processed by the
- // runtime. Not owned.
- tensorflow::gtl::FlatSet<const InfeedBuffer*> dequeued_buffer_;
+ // Mutex for serializing the creation of host_to_device_stream_.
+ tensorflow::mutex host_to_device_stream_mu_;
// Cached host to device stream for queuing infeed data.
- std::unique_ptr<se::Stream> host_to_device_stream_;
+ std::unique_ptr<se::Stream> host_to_device_stream_
+ GUARDED_BY(host_to_device_stream_mu_);
// Executor that the host_to_device_stream belongs to. Not owned.
- se::StreamExecutor* host_to_device_executor_;
-
- // List of callbacks which will be called when 'enqueued_buffer_' becomes
- // empty.
- std::vector<std::function<void()>> on_empty_callbacks_;
+ se::StreamExecutor* host_to_device_executor_ = nullptr;
};
// Singleton creator-or-accessor: Returns the GPU infeed manager.
diff --git a/tensorflow/compiler/xla/service/gpu/infeed_thunk.cc b/tensorflow/compiler/xla/service/gpu/infeed_thunk.cc
index ea34d5b30c..fee6d2af3b 100644
--- a/tensorflow/compiler/xla/service/gpu/infeed_thunk.cc
+++ b/tensorflow/compiler/xla/service/gpu/infeed_thunk.cc
@@ -13,8 +13,9 @@ See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
-#include "tensorflow/compiler/xla/service/gpu/infeed_manager.h"
#include "tensorflow/compiler/xla/service/gpu/infeed_thunk.h"
+#include "tensorflow/compiler/xla/service/gpu/hlo_execution_profiler.h"
+#include "tensorflow/compiler/xla/service/gpu/infeed_manager.h"
#include "tensorflow/compiler/xla/util.h"
#include "tensorflow/core/platform/stream_executor_no_cuda.h"
@@ -22,57 +23,82 @@ namespace xla {
namespace gpu {
InfeedThunk::InfeedThunk(
- tensorflow::gtl::ArraySlice<BufferAllocation::Slice> tuple_element_buffers,
- const BufferAllocation::Slice& destination_buffer,
+ const ShapeTree<BufferAllocation::Slice>& infeed_slices,
const HloInstruction* hlo_instruction)
- : Thunk(Kind::kInfeed, hlo_instruction),
- tuple_element_buffers_(tuple_element_buffers.begin(),
- tuple_element_buffers.end()),
- destination_buffer_(destination_buffer) {}
+ : Thunk(Kind::kInfeed, hlo_instruction), infeed_slices_(infeed_slices) {}
Status InfeedThunk::ExecuteOnStream(const BufferAllocations& buffer_allocations,
- se::Stream* stream) {
- VLOG(2) << "Infeeding to GPU ";
+ se::Stream* stream,
+ HloExecutionProfiler* profiler) {
+ VLOG(2) << "Infeeding to GPU: " << hlo_instruction()->ToString();
+
+ auto op_profiler = profiler->MakeScopedInstructionProfiler(hlo_instruction());
+ ShapeTree<InfeedBuffer> infeed_buffers =
+ GetOrCreateInfeedManager()->BlockingGetNextDestination();
+
+ {
+ // The infeed buffer has an extra outer tuple with a token. Adjust the index
+ // accordingly.
+ ShapeIndex index = {0};
+ std::function<void(std::vector<void*>*)> copy_tuple_contents =
+ [&](std::vector<void*>* tuple_element_addresses) {
+ const Shape& shape = ShapeUtil::GetSubshape(infeed_buffers.shape(),
+ ShapeIndexView(index, 1));
+ // For the leaf buffers of the tuple copy the elements directly.
+ if (ShapeUtil::IsArray(shape)) {
+ const BufferAllocation::Slice& tuple_element_buffer =
+ infeed_slices_.element(index);
+ se::DeviceMemoryBase tuple_element_address =
+ buffer_allocations.GetDeviceAddress(tuple_element_buffer);
- se::DeviceMemoryBase destination_address =
- buffer_allocations.GetDeviceAddress(destination_buffer_);
+ InfeedBuffer* buffer =
+ infeed_buffers.mutable_element(ShapeIndexView(index, 1));
+ stream->ThenMemcpy(&tuple_element_address,
+ *(buffer->device_memory()), buffer->length());
+ tuple_element_addresses->push_back(tuple_element_address.opaque());
+ return;
+ }
+
+ const int64 tuple_element_count = ShapeUtil::TupleElementCount(shape);
+ index.push_back(0);
+ std::vector<void*> inner_tuple_element_addresses;
+ for (int64 i = 0; i < tuple_element_count; ++i) {
+ index.back() = i;
+ copy_tuple_contents(&inner_tuple_element_addresses);
+ }
+ index.pop_back();
+
+ // Create a buffer of pointers for non-leaf buffers.
+ CHECK_EQ(tuple_element_count, inner_tuple_element_addresses.size());
+ auto host_size = inner_tuple_element_addresses.size() * sizeof(void*);
+ se::DeviceMemoryBase tuple_address =
+ buffer_allocations.GetDeviceAddress(
+ infeed_slices_.element(index));
+ stream->ThenMemcpy(&tuple_address,
+ inner_tuple_element_addresses.data(), host_size);
+ tuple_element_addresses->push_back(tuple_address.opaque());
+ };
- InfeedManager* infeed_manager = GetOrCreateInfeedManager();
- std::vector<InfeedBuffer*> infeed_buffers;
- if (ShapeUtil::IsTuple(hlo_instruction()->shape())) {
- CHECK(!ShapeUtil::IsNestedTuple(hlo_instruction()->shape()));
- // Transfer the tuple elements first.
std::vector<void*> tuple_element_addresses;
- for (BufferAllocation::Slice tuple_element_buffer :
- tuple_element_buffers_) {
- se::DeviceMemoryBase tuple_element_address =
- buffer_allocations.GetDeviceAddress(tuple_element_buffer);
-
- InfeedBuffer* buffer = infeed_manager->BlockingDequeueBuffer();
- infeed_buffers.push_back(buffer);
- stream->ThenMemcpy(&tuple_element_address, *(buffer->device_memory()),
- buffer->length());
- tuple_element_addresses.push_back(tuple_element_address.opaque());
- }
- // Transfer the tuple outer buffer.
- auto host_size = tuple_element_addresses.size() * sizeof(void*);
- stream->ThenMemcpy(&destination_address, tuple_element_addresses.data(),
- host_size);
- } else {
- InfeedBuffer* buffer = infeed_manager->BlockingDequeueBuffer();
- infeed_buffers.push_back(buffer);
- stream->ThenMemcpy(&destination_address, *(buffer->device_memory()),
- buffer->length());
+ copy_tuple_contents(&tuple_element_addresses);
+ CHECK_EQ(1, tuple_element_addresses.size());
}
+ // Construct top-level tuple of infeed containing the data and the token. Use
+ // a nullptr for the token, it should never be dereferenced.
+ se::DeviceMemoryBase data_address =
+ buffer_allocations.GetDeviceAddress(infeed_slices_.element({0}));
+ void* infeed_addresses[] = {data_address.opaque(), nullptr};
+ se::DeviceMemoryBase top_level_address =
+ buffer_allocations.GetDeviceAddress(infeed_slices_.element({}));
+ stream->ThenMemcpy(&top_level_address, infeed_addresses, 2 * sizeof(void*));
+
Status block_status = stream->BlockHostUntilDone();
if (!block_status.ok()) {
return InternalError("Failed to complete data transfer on stream %p: %s",
stream, block_status.error_message().c_str());
}
- infeed_manager->ReleaseBuffers(infeed_buffers);
-
VLOG(2) << "Infeeding to GPU complete";
return Status::OK();
}
diff --git a/tensorflow/compiler/xla/service/gpu/infeed_thunk.h b/tensorflow/compiler/xla/service/gpu/infeed_thunk.h
index 93713cb12d..59487e245b 100644
--- a/tensorflow/compiler/xla/service/gpu/infeed_thunk.h
+++ b/tensorflow/compiler/xla/service/gpu/infeed_thunk.h
@@ -18,6 +18,7 @@ limitations under the License.
#include "tensorflow/compiler/xla/service/buffer_assignment.h"
#include "tensorflow/compiler/xla/service/gpu/buffer_allocations.h"
+#include "tensorflow/compiler/xla/service/gpu/hlo_execution_profiler.h"
#include "tensorflow/compiler/xla/service/gpu/thunk.h"
#include "tensorflow/compiler/xla/service/hlo_instruction.h"
#include "tensorflow/core/platform/stream_executor_no_cuda.h"
@@ -32,23 +33,19 @@ namespace gpu {
class InfeedThunk : public Thunk {
public:
// Constructs a InfeedThunk that copies data from the on-device
- // infeed queue to the device buffer
- // `destination_buffer`. `mem_size` is the size of the data in
- // bytes.
- InfeedThunk(tensorflow::gtl::ArraySlice<BufferAllocation::Slice>
- tuple_element_buffers,
- const BufferAllocation::Slice& destination_buffer,
+ // infeed queue into the buffers in the given shape tree.
+ InfeedThunk(const ShapeTree<BufferAllocation::Slice>& infeed_slices,
const HloInstruction* hlo_instruction);
InfeedThunk(const InfeedThunk&) = delete;
InfeedThunk& operator=(const InfeedThunk&) = delete;
Status ExecuteOnStream(const BufferAllocations& buffer_allocations,
- se::Stream* stream) override;
+ se::Stream* stream,
+ HloExecutionProfiler* profiler) override;
private:
- const std::vector<BufferAllocation::Slice> tuple_element_buffers_;
- const BufferAllocation::Slice destination_buffer_;
+ const ShapeTree<BufferAllocation::Slice> infeed_slices_;
};
} // namespace gpu
diff --git a/tensorflow/compiler/xla/service/gpu/instruction_fusion_test.cc b/tensorflow/compiler/xla/service/gpu/instruction_fusion_test.cc
index 1963d9eef7..98ba162cd9 100644
--- a/tensorflow/compiler/xla/service/gpu/instruction_fusion_test.cc
+++ b/tensorflow/compiler/xla/service/gpu/instruction_fusion_test.cc
@@ -33,7 +33,7 @@ TEST_F(InstructionFusionTest,
CostlyProducerAndOperandElementReusingConsumerNotFused) {
HloComputation::Builder builder(TestName());
HloInstruction* const0 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0(5)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0(5)));
HloInstruction* exp1 = builder.AddInstruction(HloInstruction::CreateUnary(
ShapeUtil::MakeShape(S32, {}), HloOpcode::kExp, const0));
HloInstruction* broadcast2 =
@@ -53,7 +53,7 @@ TEST_F(InstructionFusionTest,
NonCostlyProducerAndOperandElementReusingConsumerFused) {
HloComputation::Builder builder(TestName());
HloInstruction* const0 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0(5)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0(5)));
HloInstruction* negate1 = builder.AddInstruction(HloInstruction::CreateUnary(
ShapeUtil::MakeShape(S32, {}), HloOpcode::kNegate, const0));
HloInstruction* broadcast2 =
@@ -73,7 +73,7 @@ TEST_F(InstructionFusionTest,
CostlyProducerAndNonOperandElementReusingConsumerFused_Reshape) {
HloComputation::Builder builder(TestName());
HloInstruction* const0 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0(5)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0(5)));
HloInstruction* exp1 = builder.AddInstruction(HloInstruction::CreateUnary(
ShapeUtil::MakeShape(S32, {}), HloOpcode::kExp, const0));
HloInstruction* reshape2 = builder.AddInstruction(
@@ -92,7 +92,7 @@ TEST_F(InstructionFusionTest,
CostlyProducerAndNonOperandElementReusingConsumerFused_Transpose) {
HloComputation::Builder builder(TestName());
HloInstruction* const0 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0(5)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0(5)));
HloInstruction* exp1 = builder.AddInstruction(HloInstruction::CreateUnary(
ShapeUtil::MakeShape(S32, {}), HloOpcode::kExp, const0));
HloInstruction* transpose2 = builder.AddInstruction(
diff --git a/tensorflow/compiler/xla/service/gpu/ir_emission_utils.cc b/tensorflow/compiler/xla/service/gpu/ir_emission_utils.cc
index 388aa35d7d..2799baab41 100644
--- a/tensorflow/compiler/xla/service/gpu/ir_emission_utils.cc
+++ b/tensorflow/compiler/xla/service/gpu/ir_emission_utils.cc
@@ -242,15 +242,17 @@ llvm::Value* EmitPrintf(tensorflow::StringPiece fmt,
arguments_ptr});
}
-llvm::Value* EmitShuffleDown(llvm::Value* value, llvm::Value* offset,
- llvm::IRBuilder<>* builder) {
+llvm::Value* EmitFullWarpShuffleDown(llvm::Value* value, llvm::Value* offset,
+ llvm::IRBuilder<>* builder) {
int bit_width = value->getType()->getPrimitiveSizeInBits();
+ llvm::Value* all_warps_mask = builder->getInt32(-1);
// Special case for efficiency
if (value->getType()->isFloatTy() && bit_width == 32) {
return llvm_ir::EmitCallToIntrinsic(
- llvm::Intrinsic::nvvm_shfl_down_f32,
- {value, offset, builder->getInt32(kWarpSize - 1)}, {}, builder);
+ llvm::Intrinsic::nvvm_shfl_sync_down_f32,
+ {all_warps_mask, value, offset, builder->getInt32(kWarpSize - 1)}, {},
+ builder);
}
// We must split values wider than 32 bits as the "shfl" instruction operates
@@ -264,10 +266,11 @@ llvm::Value* EmitShuffleDown(llvm::Value* value, llvm::Value* offset,
for (int i = 0; i < num_segments; ++i) {
x = builder->CreateInsertElement(
x,
- llvm_ir::EmitCallToIntrinsic(llvm::Intrinsic::nvvm_shfl_down_i32,
- {builder->CreateExtractElement(x, i),
- offset, builder->getInt32(kWarpSize - 1)},
- {}, builder),
+ llvm_ir::EmitCallToIntrinsic(
+ llvm::Intrinsic::nvvm_shfl_sync_down_i32,
+ {all_warps_mask, builder->CreateExtractElement(x, i), offset,
+ builder->getInt32(kWarpSize - 1)},
+ {}, builder),
i);
}
return builder->CreateBitCast(
diff --git a/tensorflow/compiler/xla/service/gpu/ir_emission_utils.h b/tensorflow/compiler/xla/service/gpu/ir_emission_utils.h
index 59455f389e..9bb4c42b15 100644
--- a/tensorflow/compiler/xla/service/gpu/ir_emission_utils.h
+++ b/tensorflow/compiler/xla/service/gpu/ir_emission_utils.h
@@ -125,13 +125,17 @@ llvm::Value* EmitPrintf(tensorflow::StringPiece fmt,
llvm::IRBuilder<>* builder);
// Emits code to shuffle data between threads of a warp. This has the same
-// semantics as the PTX "shfl.down" instruction [0] but works for values of any
-// size. The last operand of the emitted "shfl" is `kWarpSize - 1`.
+// semantics as the PTX "shfl.sync.down" instruction but works for values that
+// aren't 32 bits in size. The last operand of the emitted "shfl" is
+// `kWarpSize - 1`.
//
-// [0]
-// http://docs.nvidia.com/cuda/parallel-thread-execution/#data-movement-and-conversion-instructions-shfl
-llvm::Value* EmitShuffleDown(llvm::Value* value, llvm::Value* offset,
- llvm::IRBuilder<>* builder);
+// This function emits a "full-warp" shuffle, which all threads of a warp
+// participate in. *Do not use this function from a divergent context:* You
+// can't correctly do so on both Volta and earlier GPUs.
+//
+// https://docs.nvidia.com/cuda/parallel-thread-execution/#data-movement-and-conversion-instructions-shfl-sync
+llvm::Value* EmitFullWarpShuffleDown(llvm::Value* value, llvm::Value* offset,
+ llvm::IRBuilder<>* builder);
} // namespace gpu
} // namespace xla
diff --git a/tensorflow/compiler/xla/service/gpu/ir_emitter.cc b/tensorflow/compiler/xla/service/gpu/ir_emitter.cc
index d38a496fea..fe83d017f4 100644
--- a/tensorflow/compiler/xla/service/gpu/ir_emitter.cc
+++ b/tensorflow/compiler/xla/service/gpu/ir_emitter.cc
@@ -94,10 +94,7 @@ Status IrEmitter::HandleConstant(HloInstruction* constant) {
<< std::endl
<< " its type: "
<< llvm_ir::DumpToString(*global_for_const->getType());
- llvm::Constant* shape_constant = llvm::ConstantExpr::getBitCast(
- global_for_const,
- llvm_ir::ShapeToIrType(literal.shape(), module_)->getPointerTo());
- bindings_.BindHloToIrValue(*constant, shape_constant);
+ bindings_.BindHloToIrValue(*constant, global_for_const);
return Status::OK();
}
@@ -194,6 +191,8 @@ bool IrEmitter::MaybeEmitDirectAtomicOperation(
HloOpcode root_opcode = computation.root_instruction()->opcode();
PrimitiveType element_type =
computation.root_instruction()->shape().element_type();
+ bool is_atomic_integral = element_type == S32 || element_type == U32 ||
+ element_type == S64 || element_type == U64;
llvm::Value* source = ir_builder_.CreateLoad(source_address, "source");
if (root_opcode == HloOpcode::kAdd) {
// NVPTX supports atomicAdd on F32 and integer types.
@@ -204,7 +203,7 @@ bool IrEmitter::MaybeEmitDirectAtomicOperation(
{output_address->getType()}, &ir_builder_);
return true;
}
- if (primitive_util::IsIntegralType(element_type)) {
+ if (is_atomic_integral) {
// integral + integral
ir_builder_.CreateAtomicRMW(llvm::AtomicRMWInst::Add, output_address,
source,
@@ -213,9 +212,8 @@ bool IrEmitter::MaybeEmitDirectAtomicOperation(
}
}
- // NVPTX supports atomicMax and atomicMin on only integer types.
- if (root_opcode == HloOpcode::kMaximum &&
- primitive_util::IsIntegralType(element_type)) {
+ // NVPTX supports atomicMax and atomicMin only on integer types.
+ if (root_opcode == HloOpcode::kMaximum && is_atomic_integral) {
// max(integral, integral)
auto opcode = primitive_util::IsSignedIntegralType(element_type)
? llvm::AtomicRMWInst::Max
@@ -225,8 +223,7 @@ bool IrEmitter::MaybeEmitDirectAtomicOperation(
return true;
}
- if (root_opcode == HloOpcode::kMinimum &&
- primitive_util::IsIntegralType(element_type)) {
+ if (root_opcode == HloOpcode::kMinimum && is_atomic_integral) {
// min(integral, integral)
auto opcode = primitive_util::IsSignedIntegralType(element_type)
? llvm::AtomicRMWInst::Min
@@ -424,24 +421,27 @@ Status IrEmitter::EmitAtomicOperationForNestedComputation(
Status IrEmitter::HandleSelect(HloInstruction* select) {
auto pred = select->operand(0);
- auto on_true = select->operand(1);
- auto on_false = select->operand(2);
TF_RET_CHECK(pred->shape().element_type() == PRED);
-
- if (ShapeUtil::IsTuple(select->shape())) {
- llvm_ir::EmitTupleSelect(GetIrArray(*select, *select),
- GetIrArray(*pred, *select),
- GetBasePointer(*on_true),
- GetBasePointer(*on_false), &ir_builder_, module_);
- return Status::OK();
- }
-
// We must not call the subclass `DefaultAction` method, lest its
// `HandleSelect` call `IrEmitter::HandleSelect` and its `DefaultAction`
// assume no handler has already been called.
return IrEmitter::DefaultAction(select);
}
+Status IrEmitter::HandleTupleSelect(HloInstruction* tuple_select) {
+ auto pred = tuple_select->operand(0);
+ auto on_true = tuple_select->operand(1);
+ auto on_false = tuple_select->operand(2);
+ TF_RET_CHECK(pred->shape().element_type() == PRED);
+ TF_RET_CHECK(ShapeUtil::IsScalar(pred->shape()));
+ TF_RET_CHECK(ShapeUtil::IsTuple(tuple_select->shape()));
+ llvm_ir::EmitTupleSelect(GetIrArray(*tuple_select, *tuple_select),
+ GetIrArray(*pred, *tuple_select),
+ GetBasePointer(*on_true), GetBasePointer(*on_false),
+ &ir_builder_, module_);
+ return Status::OK();
+}
+
namespace {
llvm::Value* Real(llvm::Value* x, llvm::IRBuilder<>* ir_builder) {
return ir_builder->CreateExtractValue(x, {0});
diff --git a/tensorflow/compiler/xla/service/gpu/ir_emitter.h b/tensorflow/compiler/xla/service/gpu/ir_emitter.h
index e55dfc6dae..d2dd335f10 100644
--- a/tensorflow/compiler/xla/service/gpu/ir_emitter.h
+++ b/tensorflow/compiler/xla/service/gpu/ir_emitter.h
@@ -88,6 +88,7 @@ class IrEmitter : public DfsHloVisitorWithDefault {
Status HandleReduce(HloInstruction* reduce) override;
Status HandleTuple(HloInstruction* tuple) override;
Status HandleSelect(HloInstruction* select) override;
+ Status HandleTupleSelect(HloInstruction* tuple_select) override;
Status HandleFusion(HloInstruction* fusion) override;
Status HandleCall(HloInstruction* call) override;
Status HandleCustomCall(HloInstruction* custom_call) override;
diff --git a/tensorflow/compiler/xla/service/gpu/ir_emitter_unnested.cc b/tensorflow/compiler/xla/service/gpu/ir_emitter_unnested.cc
index f6f0a45124..75bbbbe8ef 100644
--- a/tensorflow/compiler/xla/service/gpu/ir_emitter_unnested.cc
+++ b/tensorflow/compiler/xla/service/gpu/ir_emitter_unnested.cc
@@ -28,7 +28,7 @@ limitations under the License.
#include "llvm/IR/Instructions.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/Module.h"
-#include "tensorflow/compiler/xla/literal_util.h"
+#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/compiler/xla/ptr_util.h"
#include "tensorflow/compiler/xla/service/buffer_assignment.h"
#include "tensorflow/compiler/xla/service/dfs_hlo_visitor.h"
@@ -48,6 +48,7 @@ limitations under the License.
#include "tensorflow/compiler/xla/service/gpu/ir_emitter_context.h"
#include "tensorflow/compiler/xla/service/gpu/kernel_thunk.h"
#include "tensorflow/compiler/xla/service/gpu/memset_thunk.h"
+#include "tensorflow/compiler/xla/service/gpu/outfeed_thunk.h"
#include "tensorflow/compiler/xla/service/gpu/parallel_loop_emitter.h"
#include "tensorflow/compiler/xla/service/gpu/partition_assignment.h"
#include "tensorflow/compiler/xla/service/gpu/sequential_thunk.h"
@@ -79,6 +80,7 @@ namespace gpu {
namespace {
+using llvm_ir::IrArray;
using llvm_ir::IrName;
using tensorflow::gtl::ArraySlice;
using tensorflow::gtl::InlinedVector;
@@ -355,7 +357,8 @@ Status IrEmitterUnnested::DefaultAction(HloInstruction* hlo) {
unroll_factor = ComputeMaxUnrollFactor(hlo);
}
- thunk_sequence_->emplace_back(BuildKernelThunk(hlo, unroll_factor));
+ thunk_sequence_->emplace_back(BuildKernelThunk(
+ hlo, /*implements_whole_instruction=*/true, unroll_factor));
return IrEmitter::DefaultAction(hlo);
}
@@ -369,7 +372,8 @@ Status IrEmitterUnnested::HandleDot(HloInstruction* dot) {
thunk_sequence_->emplace_back(BuildGemmThunk(dot));
return Status::OK();
}
- thunk_sequence_->emplace_back(BuildKernelThunk(dot));
+ thunk_sequence_->emplace_back(
+ BuildKernelThunk(dot, /*implements_whole_instruction=*/true));
return IrEmitter::HandleDot(dot);
}
@@ -379,7 +383,8 @@ Status IrEmitterUnnested::HandleConditional(HloInstruction* conditional) {
}
Status IrEmitterUnnested::HandleConvolution(HloInstruction* convolution) {
- thunk_sequence_->emplace_back(BuildKernelThunk(convolution));
+ thunk_sequence_->emplace_back(
+ BuildKernelThunk(convolution, /*implements_whole_instruction=*/true));
return IrEmitter::HandleConvolution(convolution);
}
@@ -586,10 +591,11 @@ Status IrEmitterUnnested::HandleFusion(HloInstruction* fusion) {
}
}
CHECK(first_reduce != nullptr);
- thunks.push_back(BuildKernelThunk(fusion));
+ thunks.push_back(
+ BuildKernelThunk(fusion, /*implements_whole_instruction=*/false));
thunk_sequence_->emplace_back(
MakeUnique<SequentialThunk>(std::move(thunks), fusion));
- std::vector<llvm_ir::IrArray> parameter_arrays;
+ std::vector<IrArray> parameter_arrays;
for (HloInstruction* operand : fusion->operands()) {
parameter_arrays.push_back(GetIrArray(*operand, *fusion));
}
@@ -615,6 +621,8 @@ Status IrEmitterUnnested::HandleFusion(HloInstruction* fusion) {
output_shape_index = {i};
}
if (inst->opcode() == HloOpcode::kReduce) {
+ CHECK(IsReductionToVector(*inst))
+ << "Only reductions to vector are supported";
// Shapes, layouts and dimensions must be the same for all reduces
// inside of this fusion.
CHECK(ShapeUtil::Equal(first_reduce->shape(), inst->shape()));
@@ -658,8 +666,9 @@ Status IrEmitterUnnested::HandleFusion(HloInstruction* fusion) {
// touching the un-updated elements.
// Set up kernel thunk and fused ir emitter.
- thunk_sequence_->emplace_back(BuildKernelThunk(fusion));
- std::vector<llvm_ir::IrArray> operand_arrays;
+ thunk_sequence_->emplace_back(
+ BuildKernelThunk(fusion, /*implements_whole_instruction=*/true));
+ std::vector<IrArray> operand_arrays;
for (HloInstruction* operand : fusion->operands()) {
operand_arrays.push_back(GetIrArray(*operand, *fusion));
}
@@ -672,7 +681,7 @@ Status IrEmitterUnnested::HandleFusion(HloInstruction* fusion) {
// Array to write into. Because this is an in-place operation, this is the
// same as operand 0's array.
- llvm_ir::IrArray output_array = GetIrArray(*fusion, *fusion);
+ IrArray output_array = GetIrArray(*fusion, *fusion);
LaunchDimensions launch_dimensions = CalculateLaunchDimensions(
update_shape, ir_emitter_context_->device_description());
@@ -685,314 +694,25 @@ Status IrEmitterUnnested::HandleFusion(HloInstruction* fusion) {
fusion, operand_arrays, output_array, &elemental_emitter,
launch_dimensions, &ir_builder_);
}
+
if (ImplementedAsGemm(*fusion)) {
thunk_sequence_->emplace_back(BuildGemmThunk(fusion));
return Status::OK();
}
- CHECK(fusion->fusion_kind() == HloInstruction::FusionKind::kLoop);
- int unroll_factor = ComputeMaxUnrollFactor(fusion);
+ CHECK_EQ(fusion->fusion_kind(), HloInstruction::FusionKind::kLoop);
- thunk_sequence_->emplace_back(BuildKernelThunk(fusion, unroll_factor));
- return IrEmitter::HandleFusion(fusion);
-}
-
-namespace {
-
-// Returns the indices of the first elements of all consecutive subarrays of the
-// given array. For example:
-// ConsecutiveSegments({m, m+1, m+2, n, k, k+1}) = {0, 3, 4}
-std::vector<size_t> ConsecutiveSegments(tensorflow::gtl::ArraySlice<int64> xs) {
- std::vector<size_t> is = {0};
- for (size_t i = 1; i < xs.size(); ++i) {
- if (1 != xs[i] - xs[i - 1]) {
- is.push_back(i);
- }
- }
- return is;
-}
-
-// Merges the sequences of dimensions of the given shape which start at the
-// given indices `segs`.
-Shape MergeDimensions(tensorflow::gtl::ArraySlice<size_t> segs,
- const Shape& shape) {
- std::vector<int64> dimensions;
- for (size_t i = 1; i <= segs.size(); ++i) {
- dimensions.push_back(std::accumulate(
- shape.dimensions().begin() + segs[i - 1],
- shape.dimensions().begin() +
- (segs.size() == i ? shape.dimensions().size() : segs[i]),
- 1, std::multiplies<int64>()));
- }
- return ShapeUtil::MakeShapeWithDescendingLayout(shape.element_type(),
- dimensions);
-}
-
-// Returns whether the given shapes and permutation are a 0-2-1 transpose, and
-// if so, the normalized and rank-reduced shapes. The shapes must have the same
-// dimensions, so this considers layout only.
-//
-// This function recognizes higher-rank transposes which are elementwise
-// equivalent to a 0-2-1 transpose.
-std::tuple<bool, Shape, Shape> IsTranspose021(const Shape& a, const Shape& b) {
- CHECK(ShapeUtil::Compatible(a, b));
- std::vector<int64> perm(a.dimensions().size());
- {
- auto layout_a_orig = LayoutUtil::MinorToMajor(a);
- std::vector<int64> layout_a(layout_a_orig.rbegin(), layout_a_orig.rend());
- auto layout_b_orig = LayoutUtil::MinorToMajor(b);
- std::vector<int64> layout_b(layout_b_orig.rbegin(), layout_b_orig.rend());
- for (size_t i = 0; i < perm.size(); ++i) {
- perm[i] = PositionInContainer(layout_b, layout_a[i]);
- }
+ if (CheckAndEmitHloWithTile021(fusion)) {
+ return Status::OK();
}
- auto segs = ConsecutiveSegments(perm);
- Shape norm_a =
- ShapeUtil::MakeShapeWithDescendingLayoutAndSamePhysicalLayout(a);
- Shape norm_b =
- ShapeUtil::MakeShapeWithDescendingLayoutAndSamePhysicalLayout(b);
- if (3 == segs.size() && 0 == perm[0]) {
- Shape reduced_a = MergeDimensions(segs, norm_a);
- Shape reduced_b = ShapeUtil::MakeShapeWithDescendingLayout(
- b.element_type(),
- Permute({0, 2, 1}, AsInt64Slice(reduced_a.dimensions())));
- return std::make_tuple(true, reduced_a, reduced_b);
- }
- return std::make_tuple(false, ShapeUtil::MakeNil(), ShapeUtil::MakeNil());
-}
-
-// Returns whether the given shapes are potentially of a 0-2-1 transpose.
-// As 0-2-1 is a self-inverse permutation, which shape is input or output is
-// arbitrary.
-bool AreShapesForTranspose021(const Shape& a, const Shape& b) {
- return 3 == b.dimensions().size() &&
- ShapeUtil::Compatible(
- ShapeUtil::MakeShapeWithDescendingLayoutAndSamePhysicalLayout(a),
- ShapeUtil::PermuteDimensions(
- {0, 2, 1},
- ShapeUtil::MakeShapeWithDescendingLayoutAndSamePhysicalLayout(
- b)));
-}
-
-// Emits a tiled 0-2-1 transpose, assuming both input and output lain out from
-// major to minor. The x- and y- dimensions are tiled in square tiles of edge
-// length `tile_size`. Each thread block of `tile_size` x `num_rows` threads
-// transposes one tile: each thread copies a row from the input to a shared
-// memory tile, then copies a column from the shared memory tile to the output.
-//
-// `tile_size` should usually be same as warp size.
-//
-// Returns (number of tiles = number of thread blocks needed).
-//
-// TODO(b/33320379): Here each block transposes 1 tile. It may be more efficient
-// to launch fewer blocks so each transposes many tiles, and
-// in any case, the number of blocks we can launch is limited.
-//
-// This is the same algorithm in CUDA:
-// https://github.com/tensorflow/tensorflow/blob/d2693c8a70567cc78b2e8a9ac8020d321620ca83/tensorflow/core/kernels/conv_ops_gpu_3.cu.cc#L189
-int64 EmitTranspose021Tiled(llvm_ir::IrArray input, llvm_ir::IrArray output,
- const int64 tile_size, const int64 num_rows,
- llvm::IRBuilder<>* builder) {
- // Adds `addend` to the given `dim` of `index`.
- auto offset_dim = [builder](llvm_ir::IrArray::Index index,
- llvm::Value* addend, int64 dim) {
- index[dim] = builder->CreateAdd(index[dim], addend);
- return index;
- };
- CHECK(AreShapesForTranspose021(input.GetShape(), output.GetShape()));
-
- Shape input_shape =
- ShapeUtil::MakeShapeWithDescendingLayoutAndSamePhysicalLayout(
- input.GetShape());
- Shape output_shape =
- ShapeUtil::MakeShapeWithDescendingLayoutAndSamePhysicalLayout(
- output.GetShape());
- input = input.CastToShape(input_shape, builder);
- output = output.CastToShape(output_shape, builder);
-
- llvm::Type* tile_type = llvm::ArrayType::get(
- llvm::ArrayType::get(input.GetElementLlvmType(), tile_size),
- // One extra here to avoid share memory bank conflict
- tile_size + 1);
- auto* tile = new llvm::GlobalVariable(
- *builder->GetInsertBlock()->getParent()->getParent(), tile_type,
- /*isConstant=*/false, llvm::GlobalValue::PrivateLinkage,
- llvm::UndefValue::get(tile_type), "tile", nullptr,
- llvm::GlobalValue::NotThreadLocal,
- /*AddressSpace=*/3 /* GPU shared memory */);
-
- // let x = threadIdx.x
- llvm::Value* x = llvm_ir::EmitCallToIntrinsic(
- llvm::Intrinsic::nvvm_read_ptx_sreg_tid_x, {}, {}, builder);
- llvm_ir::AddRangeMetadata(0, num_rows * tile_size,
- static_cast<llvm::Instruction*>(x));
- x = builder->CreateIntCast(x, builder->getInt64Ty(), /*isSigned=*/true,
- "thread.id.x");
-
- // computing logical thread ids
- // logical_x = x % tile_size
- auto logical_x = builder->CreateURem(x, builder->getInt64(tile_size));
-
- // logical_y = x / tile_size
- auto logical_y = builder->CreateUDiv(x, builder->getInt64(tile_size));
-
- // `emit_cp` emits equivalent to following pseudocode:
- // if (tile_size == tile_width && tile_size == tile_height) {
- // unroll for (i in range(0, tile_size, num_rows)) {
- // emit_cp_element(index + {0, i, 0}, y + logical_y);
- // }
- // } else if (x < tile_width) {
- // tile_height_upperbound = ceil(tile_height / num_rows) * num_rows;
- // for (i in range(0, tile_height_upperbound, num_rows)) {
- // y_loc = i + logical_y;
- // if (y_loc < tile_height)
- // emit_cp_element(index + {0, i, 0}, y_loc);
- // }
- // }
- //
- // We use this to emit both the copy from input to tile and the copy from tile
- // to output.
- //
- // `index` is the origin of the row or column in the input or output array.
- //
- // `emit_cp_element(index, y)` emits code to copy a single element between the
- // tile and the input or output array, where `y` is the `y`-position in the
- // tile, whether which is row or column is a function of whether we're copying
- // from input or to output, and `index` is the index into the input or output
- // array.
- auto emit_cp_tile = [builder, tile_size, &offset_dim, num_rows, logical_x,
- logical_y](
- std::function<void(const llvm_ir::IrArray::Index&,
- llvm::Value*)>
- emit_cp_element,
- llvm::Value* tile_width, llvm::Value* tile_height,
- const llvm_ir::IrArray::Index& index,
- const string& loop_name) {
- llvm_ir::LlvmIfData if_not_last_row = llvm_ir::EmitIfThenElse(
- builder->CreateAnd(
- builder->CreateICmpEQ(builder->getInt64(tile_size), tile_width),
- builder->CreateICmpEQ(builder->getInt64(tile_size), tile_height)),
- "not_last_row", builder);
- builder->SetInsertPoint(if_not_last_row.true_block->getTerminator());
- for (int64 i = 0; i < tile_size; i += num_rows) {
- auto source_idx = offset_dim(index, builder->getInt64(i), /*dim=*/1);
- auto y_loc = builder->CreateAdd(builder->getInt64(i), logical_y);
- emit_cp_element(source_idx, y_loc);
- }
- builder->SetInsertPoint(if_not_last_row.false_block->getTerminator());
- llvm_ir::LlvmIfData if_in_tile = llvm_ir::EmitIfThenElse(
- builder->CreateICmpULT(logical_x, tile_width), "x_in_tile", builder);
- builder->SetInsertPoint(if_in_tile.true_block->getTerminator());
-
- // tile_height_upper_bound = ceil(tile_height / num_rows) * num_rows
- auto tile_height_upper_bound = builder->CreateMul(
- builder->CreateUDiv(
- builder->CreateAdd(tile_height, builder->getInt64(num_rows - 1)),
- builder->getInt64(num_rows)),
- builder->getInt64(num_rows));
-
- auto loop = llvm_ir::ForLoop::EmitForLoop(
- loop_name, builder->getInt64(0), tile_height_upper_bound,
- builder->getInt64(num_rows), builder);
- llvm_ir::SetToFirstInsertPoint(loop->GetHeaderBasicBlock(), builder);
- builder->SetInsertPoint(loop->GetBodyBasicBlock()->getTerminator());
-
- auto y_loc = builder->CreateAdd(loop->GetIndVarValue(), logical_y);
- auto if_y_in_tile = llvm_ir::EmitIfThenElse(
- builder->CreateICmpULT(y_loc, tile_height), "y_in_tile", builder);
- builder->SetInsertPoint(if_y_in_tile.true_block->getTerminator());
-
- emit_cp_element(offset_dim(index, loop->GetIndVarValue(), /*dim=*/1),
- y_loc);
- builder->SetInsertPoint(if_not_last_row.after_block->getTerminator());
- };
-
- auto input_dims_in_tiles = input_shape.dimensions();
- // Unpermuted dimensions are untiled.
- for (int i = 1; i < 3; ++i) {
- input_dims_in_tiles[i] =
- CeilOfRatio<int64>(input_dims_in_tiles[i], tile_size);
- }
- int64 num_tiles =
- std::accumulate(input_dims_in_tiles.begin(), input_dims_in_tiles.end(), 1,
- std::multiplies<int64>());
- const llvm_ir::IrArray::Index input_tile_index(
- /*linear=*/builder->CreateIntCast(
- llvm_ir::AddRangeMetadata(
- 0, num_tiles,
- static_cast<llvm::Instruction*>(llvm_ir::EmitCallToIntrinsic(
- llvm::Intrinsic::nvvm_read_ptx_sreg_ctaid_x, {}, {},
- builder))),
- builder->getInt64Ty(), /*isSigned=*/true, "block.id.x"),
- ShapeUtil::MakeShapeWithDescendingLayout(
- PRED /*arbitrary*/, AsInt64Slice(input_dims_in_tiles)),
- builder);
- const llvm_ir::IrArray::Index input_tile_origin = ({
- llvm_ir::IrArray::Index index = input_tile_index;
- for (int i = 1; i < 3; ++i) {
- index[i] = builder->CreateMul(index[i], builder->getInt64(tile_size),
- "tile_origin." + std::to_string(i));
- }
- index;
- });
- const llvm_ir::IrArray::Index input_index =
- offset_dim(offset_dim(input_tile_origin, logical_x, /*dim=*/2), logical_y,
- /*dim=*/1);
- std::vector<llvm::Value*> tile_dims(input_shape.dimensions().size());
- // Only last row or column may not have full size.
- for (int i = 1; i < 3; ++i) {
- tile_dims[i] = builder->CreateSelect(
- builder->CreateICmpEQ(input_tile_index[i],
- builder->getInt64(input_dims_in_tiles[i] - 1)),
- builder->getInt64(input_shape.dimensions(i) -
- (input_dims_in_tiles[i] - 1) * tile_size),
- builder->getInt64(tile_size), "tile_size");
- }
-
- // Load data from input memory to shared memory tile.
- emit_cp_tile(
- // tile[y, x] = input_array[index]
- [builder, tile, &input, logical_x](const llvm_ir::IrArray::Index& index,
- llvm::Value* y) {
- builder->CreateStore(
- input.EmitReadArrayElement(index, builder, "input_element"),
- builder->CreateGEP(tile, {builder->getInt64(0), y, logical_x}));
- },
- tile_dims[2], tile_dims[1], input_index, "input");
+ int unroll_factor = ComputeMaxUnrollFactor(fusion);
- // Wait for all threads to reach this point, lest we copy a value from tile to
- // output before the other thread copies it from input to tile.
- // This is `__syncthreads` in CUDA.
- llvm_ir::EmitCallToIntrinsic(llvm::Intrinsic::nvvm_barrier0, {}, {}, builder);
-
- const llvm_ir::IrArray::Index output_tile_index(
- Permute({0, 2, 1}, input_tile_index.multidim()));
- const llvm_ir::IrArray::Index output_tile_origin(
- Permute({0, 2, 1}, input_tile_origin.multidim()));
- const llvm_ir::IrArray::Index output_index =
- offset_dim(offset_dim(output_tile_origin, logical_x, /*dim=*/2),
- logical_y, /*dim=*/1);
-
- // Store data from shared memory tile to output memory.
- emit_cp_tile(
- // output_array[index] = tile[x, y]
- [builder, tile, &output, logical_x](const llvm_ir::IrArray::Index& index,
- llvm::Value* y) {
- output.EmitWriteArrayElement(
- index,
- builder->CreateLoad(
- builder->CreateGEP(tile, {builder->getInt64(0), logical_x, y}),
- "output_element"),
- builder);
- },
- tile_dims[1], tile_dims[2], output_index, "output");
-
- return num_tiles;
+ thunk_sequence_->emplace_back(BuildKernelThunk(
+ fusion, /*implements_whole_instruction=*/true, unroll_factor));
+ return IrEmitter::HandleFusion(fusion);
}
-} // namespace
-
Status IrEmitterUnnested::HandleCopy(HloInstruction* copy) {
if (ImplementedAsHostToDeviceMemcpy(ir_emitter_context_->buffer_assignment(),
*copy)) {
@@ -1004,25 +724,7 @@ Status IrEmitterUnnested::HandleCopy(HloInstruction* copy) {
thunk_sequence_->emplace_back(BuildDeviceToDeviceCopyThunk(copy));
return Status::OK();
}
- bool is_transpose_021;
- Shape reduced_input_shape, reduced_output_shape;
- std::tie(is_transpose_021, reduced_input_shape, reduced_output_shape) =
- IsTranspose021(copy->operand(0)->shape(), copy->shape());
- if (is_transpose_021 &&
- reduced_input_shape.dimensions(1) >= kMinDimensionToTransposeTiled &&
- reduced_input_shape.dimensions(2) >= kMinDimensionToTransposeTiled) {
- thunk_sequence_->emplace_back(BuildKernelThunk(copy));
- VLOG(3) << "Emitting tiled 0-2-1 transposition";
- constexpr int64 tile_size = 32;
- constexpr int64 num_rows = 8;
- int64 num_tiles = EmitTranspose021Tiled(
- GetIrArray(*copy->operand(0), *copy)
- .CastToShape(reduced_input_shape, &ir_builder_),
- GetIrArray(*copy, *copy)
- .CastToShape(reduced_output_shape, &ir_builder_),
- tile_size, num_rows, &ir_builder_);
- UpdateLaunchDimensions(LaunchDimensions(num_tiles, num_rows * tile_size),
- LastThunk(), ir_emitter_context_->llvm_module());
+ if (CheckAndEmitHloWithTile021(copy)) {
return Status::OK();
}
@@ -1030,7 +732,7 @@ Status IrEmitterUnnested::HandleCopy(HloInstruction* copy) {
}
Status IrEmitterUnnested::EmitExtraOutputsForReduce(
- const HloInstruction* reduce, const llvm_ir::IrArray::Index& index,
+ const HloInstruction* reduce, const IrArray::Index& index,
tensorflow::gtl::ArraySlice<
std::pair<llvm_ir::ElementGenerator, ShapeIndex>>
extra_output_gens) {
@@ -1073,11 +775,9 @@ Status IrEmitterUnnested::EmitReductionToScalar(
tiled_input_shape, ir_emitter_context_->device_description());
llvm::Type* index_ty = GetIndexTypeForKernel(
- reduce,
- launch_dimensions.block_count() * launch_dimensions.threads_per_block(),
- &ir_builder_);
+ reduce, launch_dimensions.launch_bound(), &ir_builder_);
- auto index_typed_const = [&](uint64 c) -> llvm::Constant* {
+ auto index_typed_constant = [&](uint64 c) -> llvm::Constant* {
return llvm::ConstantInt::get(index_ty, c);
};
@@ -1119,8 +819,7 @@ Status IrEmitterUnnested::EmitReductionToScalar(
// // and threads_per_block is a multiple of warpSize.
// reduce_kernel<<<num_blocks, threads_per_block>>>();
//
- auto loop_body_emitter =
- [=](const llvm_ir::IrArray::Index& tile_index) -> Status {
+ auto loop_body_emitter = [=](const IrArray::Index& tile_index) -> Status {
const int num_reduces = reducers.size();
llvm::Type* element_ir_type =
llvm_ir::PrimitiveTypeToIrType(input_shape.element_type(), module_);
@@ -1129,9 +828,8 @@ Status IrEmitterUnnested::EmitReductionToScalar(
llvm::Value* partial_reduction_result_address = ir_builder_.CreateAlloca(
element_ir_type, /*ArraySize=*/nullptr,
"partial_reduction_result." + llvm::Twine(i));
- TF_ASSIGN_OR_RETURN(
- llvm::Value* const init_ir_value,
- init_value_gens[i](llvm_ir::IrArray::Index(index_ty)));
+ TF_ASSIGN_OR_RETURN(llvm::Value* const init_ir_value,
+ init_value_gens[i](IrArray::Index(index_ty)));
ir_builder_.CreateStore(init_ir_value, partial_reduction_result_address);
partial_reduction_result_addresses.push_back(
partial_reduction_result_address);
@@ -1143,21 +841,22 @@ Status IrEmitterUnnested::EmitReductionToScalar(
// Emit an inner for-loop that reduces the elements in the tile.
auto emit_tile_element_loop = [=](bool tile_in_bounds) -> Status {
std::unique_ptr<llvm_ir::ForLoop> tile_element_loop =
- llvm_ir::ForLoop::EmitForLoop(
- "element_id_in_tile", index_typed_const(0),
- index_typed_const(kTileSize), index_typed_const(1), &ir_builder_);
+ llvm_ir::ForLoop::EmitForLoop("element_id_in_tile",
+ index_typed_constant(0),
+ index_typed_constant(kTileSize),
+ index_typed_constant(1), &ir_builder_);
// Emit the body of the partial reduction loop.
llvm_ir::SetToFirstInsertPoint(tile_element_loop->GetBodyBasicBlock(),
&ir_builder_);
llvm::Value* x = ir_builder_.CreateNSWAdd(
- ir_builder_.CreateNSWMul(x_in_tiles, index_typed_const(kTileSize)),
+ ir_builder_.CreateNSWMul(x_in_tiles, index_typed_constant(kTileSize)),
tile_element_loop->GetIndVarValue());
// Unless we know the tile is entirely in bounds, we have to emit a
// x-in-bounds check before reading from the input.
if (!tile_in_bounds) {
llvm_ir::LlvmIfData if_data = llvm_ir::EmitIfThenElse(
- ir_builder_.CreateICmpULT(x, index_typed_const(num_elems)),
+ ir_builder_.CreateICmpULT(x, index_typed_constant(num_elems)),
"x_in_bounds", &ir_builder_);
// Emit code that reads the input element and accumulates it to
@@ -1165,7 +864,7 @@ Status IrEmitterUnnested::EmitReductionToScalar(
llvm_ir::SetToFirstInsertPoint(if_data.true_block, &ir_builder_);
}
- llvm_ir::IrArray::Index input_index(
+ IrArray::Index input_index(
/*linear=*/x, input_shape, &ir_builder_);
llvm::Value* input_address = ir_builder_.CreateAlloca(element_ir_type);
for (int i = 0; i != num_reduces; ++i) {
@@ -1183,12 +882,12 @@ Status IrEmitterUnnested::EmitReductionToScalar(
// x_end = kTileSize + x_in_tiles * kTileSize, i.e., the location that's
// immediately beyond the tile.
llvm::Value* x_end = ir_builder_.CreateNSWAdd(
- index_typed_const(kTileSize),
- ir_builder_.CreateNSWMul(x_in_tiles, index_typed_const(kTileSize)));
+ index_typed_constant(kTileSize),
+ ir_builder_.CreateNSWMul(x_in_tiles, index_typed_constant(kTileSize)));
// The tile is entirely in bound if all_threads_in_bounds or
// x_end <= num_elems.
llvm::Value* tile_in_bounds = ir_builder_.CreateOr(
- ir_builder_.CreateICmpULE(x_end, index_typed_const(num_elems)),
+ ir_builder_.CreateICmpULE(x_end, index_typed_constant(num_elems)),
ir_builder_.getInt1(all_threads_in_bounds));
llvm_ir::LlvmIfData if_tile_in_bounds_data =
llvm_ir::EmitIfThenElse(tile_in_bounds, "tile_in_bounds", &ir_builder_);
@@ -1219,10 +918,13 @@ Status IrEmitterUnnested::EmitReductionToScalar(
ir_builder_.CreateBitCast(partial_reduction_result_addresses[i],
shuffle_ir_type->getPointerTo()),
"partial_reduction_result");
+ CHECK_EQ(launch_dimensions.threads_per_block() % kWarpSize, 0)
+ << "Requires block size a multiple of the warp size, otherwise we "
+ "will read undefined elements.";
ir_builder_.CreateStore(
- EmitShuffleDown(partial_reduction_result,
- ir_builder_.getInt32(shuffle_distance),
- &ir_builder_),
+ EmitFullWarpShuffleDown(partial_reduction_result,
+ ir_builder_.getInt32(shuffle_distance),
+ &ir_builder_),
ir_builder_.CreateBitCast(result_from_other_lane,
shuffle_ir_type->getPointerTo()));
TF_RETURN_IF_ERROR(EmitCallToNestedComputation(
@@ -1239,9 +941,9 @@ Status IrEmitterUnnested::EmitReductionToScalar(
// lane 0 (which holds the partially accumulated result for its warp) to the
// output element.
llvm::Value* lane_id = ir_builder_.CreateURem(
- x_in_tiles, index_typed_const(kWarpSize), "lane_id");
+ x_in_tiles, index_typed_constant(kWarpSize), "lane_id");
llvm_ir::LlvmIfData if_lane_id_is_zero_data = llvm_ir::EmitIfThenElse(
- ir_builder_.CreateICmpEQ(lane_id, index_typed_const(0)),
+ ir_builder_.CreateICmpEQ(lane_id, index_typed_constant(0)),
"lane_id_is_zero", &ir_builder_);
llvm_ir::SetToFirstInsertPoint(if_lane_id_is_zero_data.true_block,
&ir_builder_);
@@ -1250,7 +952,7 @@ Status IrEmitterUnnested::EmitReductionToScalar(
llvm::Value* output_address =
GetIrArray(*output, *output, reduce_output_shapes[i])
.EmitArrayElementAddress(
- llvm_ir::IrArray::Index(
+ IrArray::Index(
/*linear=*/ir_builder_.getInt64(0),
ShapeUtil::GetSubshape(output->shape(),
reduce_output_shapes[i]),
@@ -1309,7 +1011,7 @@ Status IrEmitterUnnested::EmitColumnReduction(
// TODO(b/110211620): Convert to use i32 index_type when it is possible.
llvm::Type* index_ty = ir_builder_.getInt64Ty();
- auto index_typed_const = [&](uint64 c) -> llvm::Constant* {
+ auto index_typed_constant = [&](uint64 c) -> llvm::Constant* {
return llvm::ConstantInt::get(index_ty, c);
};
@@ -1336,8 +1038,7 @@ Status IrEmitterUnnested::EmitColumnReduction(
// }
// AtomicReducer(&output[x], partial_result);
// }
- auto loop_body_emitter =
- [=](const llvm_ir::IrArray::Index& tile_index) -> Status {
+ auto loop_body_emitter = [=](const IrArray::Index& tile_index) -> Status {
const int num_reduces = reducers.size();
// Emit the loop body that reduces one tile.
llvm::Type* element_ir_type =
@@ -1347,9 +1048,8 @@ Status IrEmitterUnnested::EmitColumnReduction(
llvm::Value* partial_reduction_result_address = ir_builder_.CreateAlloca(
element_ir_type, /*ArraySize=*/nullptr,
"partial_reduction_result." + llvm::Twine(i));
- TF_ASSIGN_OR_RETURN(
- llvm::Value* const init_ir_value,
- init_value_gens[i](llvm_ir::IrArray::Index(index_ty)));
+ TF_ASSIGN_OR_RETURN(llvm::Value* const init_ir_value,
+ init_value_gens[i](IrArray::Index(index_ty)));
ir_builder_.CreateStore(init_ir_value, partial_reduction_result_address);
partial_reduction_result_addresses.push_back(
partial_reduction_result_address);
@@ -1365,22 +1065,23 @@ Status IrEmitterUnnested::EmitColumnReduction(
auto emit_tile_element_loop = [=](bool tile_in_bounds) -> Status {
std::unique_ptr<llvm_ir::ForLoop> tile_element_loop =
- llvm_ir::ForLoop::EmitForLoop(
- "element_id_in_tile", index_typed_const(0),
- index_typed_const(kTileSize), index_typed_const(1), &ir_builder_);
+ llvm_ir::ForLoop::EmitForLoop("element_id_in_tile",
+ index_typed_constant(0),
+ index_typed_constant(kTileSize),
+ index_typed_constant(1), &ir_builder_);
// Emit the body of the partial reduction loop.
llvm_ir::SetToFirstInsertPoint(tile_element_loop->GetBodyBasicBlock(),
&ir_builder_);
llvm::Value* y = ir_builder_.CreateNSWAdd(
- ir_builder_.CreateNSWMul(y_in_tiles, index_typed_const(kTileSize)),
+ ir_builder_.CreateNSWMul(y_in_tiles, index_typed_constant(kTileSize)),
tile_element_loop->GetIndVarValue());
// Unless we know the tile is entirely in bounds, we have to emit a
// y-in-bounds check before reading from the input.
if (!tile_in_bounds) {
llvm_ir::LlvmIfData if_data = llvm_ir::EmitIfThenElse(
- ir_builder_.CreateICmpULT(y, index_typed_const(height)),
+ ir_builder_.CreateICmpULT(y, index_typed_constant(height)),
"y_in_bounds", &ir_builder_);
// Emit code that reads the input element and accumulates it to
@@ -1404,9 +1105,9 @@ Status IrEmitterUnnested::EmitColumnReduction(
const Shape input_matrix_shape =
ShapeUtil::MakeShapeWithDescendingLayout(input_shape.element_type(),
{height, width});
- const llvm_ir::IrArray::Index input_matrix_index(
- {y, x}, input_matrix_shape, &ir_builder_);
- const llvm_ir::IrArray::Index input_index =
+ const IrArray::Index input_matrix_index({y, x}, input_matrix_shape,
+ &ir_builder_);
+ const IrArray::Index input_index =
input_matrix_index
.SourceIndexOfReshape(input_matrix_shape,
normalized_input_shape, &ir_builder_)
@@ -1430,10 +1131,10 @@ Status IrEmitterUnnested::EmitColumnReduction(
// y_end = kTileSize + y_in_tiles * kTileSize, i.e., the y location that's
// immediately beyond the tile.
llvm::Value* y_end = ir_builder_.CreateNSWAdd(
- index_typed_const(kTileSize),
- ir_builder_.CreateNSWMul(y_in_tiles, index_typed_const(kTileSize)));
+ index_typed_constant(kTileSize),
+ ir_builder_.CreateNSWMul(y_in_tiles, index_typed_constant(kTileSize)));
llvm::Value* tile_in_bounds = ir_builder_.CreateOr(
- ir_builder_.CreateICmpULE(y_end, index_typed_const(height)),
+ ir_builder_.CreateICmpULE(y_end, index_typed_constant(height)),
ir_builder_.getInt1(height % kTileSize == 0));
// The tile is entirely in bound if "height" is a multiple of kTileSize or
// y_end <= height.
@@ -1457,11 +1158,10 @@ Status IrEmitterUnnested::EmitColumnReduction(
llvm::Value* output_address =
GetIrArray(*output, *output, reduce_output_shapes[i])
.EmitArrayElementAddress(
- llvm_ir::IrArray::Index(
- x,
- ShapeUtil::GetSubshape(output->shape(),
- reduce_output_shapes[i]),
- &ir_builder_),
+ IrArray::Index(x,
+ ShapeUtil::GetSubshape(
+ output->shape(), reduce_output_shapes[i]),
+ &ir_builder_),
&ir_builder_, "output_element_address");
TF_RETURN_IF_ERROR(EmitAtomicOperationForNestedComputation(
*reducers[i], output_address, partial_reduction_result_addresses[i]));
@@ -1529,7 +1229,7 @@ Status IrEmitterUnnested::EmitRowReduction(
// for (element_id_in_tile : range(x_tile_size)) {
// int x = x_in_tiles * x_tile_size + element_id_in_tile;
// if (x < width)
- // partial_result = reducer(partial_result, input[z][y][z]);
+ // partial_result = reducer(partial_result, input[z][y][x]);
// }
// AtomicReducer(&output[y], partial_result);
// }
@@ -1583,10 +1283,11 @@ Status IrEmitterUnnested::EmitRowReduction(
// for (int element_id_in_z_tile = 0; element_id_in_z_tile < z_tile_size;
// ++element_id_in_z_tile) {
// z = z_in_tiles * z_tile_size + element_id_in_z_tile;
+ // int tx = x;
// for (int element_id_in_x_tile = 0;
// element_id_in_x_tile < x_tile_size;
- // ++element_id_in_x_tile, x += warpSize) {
- // partial_result = Reducer(partial_result, input[z][y][x]);
+ // ++element_id_in_x_tile, tx += warpSize) {
+ // partial_result = Reducer(partial_result, input[z][y][tx]);
// }
// }
// } else {
@@ -1594,10 +1295,11 @@ Status IrEmitterUnnested::EmitRowReduction(
// for (int element_id_in_z_tile = 0; element_id_in_z_tile < z_tile_size;
// ++element_id_in_z_tile) {
// z = z_in_tiles * z_tile_size + element_id_in_z_tile;
+ // int tx = x;
// for (int element_id_in_x_tile = 0; element_id_in_x_tile <
- // x_tile_size; ++element_id_in_tile, x += warpSize) {
- // if (x < width)
- // partial_result = Reducer(partial_result, input[z][y][x]);
+ // x_tile_size; ++element_id_in_tile, tx += warpSize) {
+ // if (tx < width)
+ // partial_result = Reducer(partial_result, input[z][y][tx]);
// }
// }
// }
@@ -1625,15 +1327,13 @@ Status IrEmitterUnnested::EmitRowReduction(
LaunchDimensions launch_dimensions = CalculateLaunchDimensions(
tiled_input_shape, ir_emitter_context_->device_description());
llvm::Type* index_ty = GetIndexTypeForKernel(
- reduce,
- launch_dimensions.block_count() * launch_dimensions.threads_per_block(),
- &ir_builder_);
+ reduce, launch_dimensions.launch_bound(), &ir_builder_);
- auto index_typed_const = [&](uint64 c) -> llvm::Constant* {
+ auto index_typed_constant = [&](uint64 c) -> llvm::Constant* {
return llvm::ConstantInt::get(index_ty, c);
};
- auto loop_body_emitter = [=](const llvm_ir::IrArray::Index& tile_index) {
+ auto loop_body_emitter = [=](const IrArray::Index& tile_index) {
const int num_reduces = reducers.size();
llvm::Type* element_ir_type = llvm_ir::PrimitiveTypeToIrType(
input_shape.element_type(), ir_emitter_context_->llvm_module());
@@ -1642,9 +1342,8 @@ Status IrEmitterUnnested::EmitRowReduction(
llvm::Value* partial_reduction_result_address = ir_builder_.CreateAlloca(
element_ir_type, /*ArraySize=*/nullptr,
"partial_reduction_result." + llvm::Twine(i));
- TF_ASSIGN_OR_RETURN(
- llvm::Value* const init_ir_value,
- init_value_gens[i](llvm_ir::IrArray::Index(index_ty)));
+ TF_ASSIGN_OR_RETURN(llvm::Value* const init_ir_value,
+ init_value_gens[i](IrArray::Index(index_ty)));
ir_builder_.CreateStore(init_ir_value, partial_reduction_result_address);
partial_reduction_result_addresses.push_back(
partial_reduction_result_address);
@@ -1656,20 +1355,20 @@ Status IrEmitterUnnested::EmitRowReduction(
x_tile = ir_builder_.CreateZExtOrTrunc(x_tile, index_ty);
- llvm::Value* warp_id =
- ir_builder_.CreateUDiv(x_tile, index_typed_const(kWarpSize), "warp_id");
- llvm::Value* lane_id =
- ir_builder_.CreateURem(x_tile, index_typed_const(kWarpSize), "lane_id");
+ llvm::Value* warp_id = ir_builder_.CreateUDiv(
+ x_tile, index_typed_constant(kWarpSize), "warp_id");
+ llvm::Value* lane_id = ir_builder_.CreateURem(
+ x_tile, index_typed_constant(kWarpSize), "lane_id");
// The x-location of the last element in this z-x-tile.
// last_x = lane_id + warpSize * (x_tile_size - 1 + warp_id * x_tile_size);
llvm::Value* last_x = ir_builder_.CreateNSWAdd(
lane_id, ir_builder_.CreateNSWMul(
- index_typed_const(kWarpSize),
+ index_typed_constant(kWarpSize),
ir_builder_.CreateNSWAdd(
- index_typed_const(x_tile_size - 1),
+ index_typed_constant(x_tile_size - 1),
ir_builder_.CreateNSWMul(
- warp_id, index_typed_const(x_tile_size)))));
+ warp_id, index_typed_constant(x_tile_size)))));
KernelSupportLibrary ksl(
&ir_builder_,
@@ -1682,19 +1381,19 @@ Status IrEmitterUnnested::EmitRowReduction(
int64 x_tile_loop_bound) -> Status {
auto emit_z_tile_element_loop = [&](llvm::Value* z_indvar) -> Status {
llvm::Value* z = ir_builder_.CreateNSWAdd(
- z_indvar,
- ir_builder_.CreateNSWMul(index_typed_const(z_tile_size), z_tile));
+ z_indvar, ir_builder_.CreateNSWMul(
+ index_typed_constant(z_tile_size), z_tile));
TF_RETURN_IF_ERROR(ksl.For(
"x_tile",
- /*start=*/index_typed_const(0),
- /*end=*/index_typed_const(x_tile_loop_bound),
+ /*start=*/index_typed_constant(0),
+ /*end=*/index_typed_constant(x_tile_loop_bound),
/*step=*/1, [&](llvm::Value* x_indvar) -> Status {
// x = lane_id +
// warpSize * (element_id_in_x_tile + warp_id * x_tile_size);
llvm::Value* x = ir_builder_.CreateNSWAdd(
lane_id,
ir_builder_.CreateNSWMul(
- index_typed_const(kWarpSize),
+ index_typed_constant(kWarpSize),
ir_builder_.CreateNSWAdd(
x_indvar, ir_builder_.CreateNSWMul(
warp_id, llvm::ConstantInt::get(
@@ -1704,9 +1403,9 @@ Status IrEmitterUnnested::EmitRowReduction(
// emit a x-in-bounds check before reading from the input.
if (!x_tile_in_bounds) {
llvm_ir::LlvmIfData if_x_in_bounds_data =
- llvm_ir::EmitIfThenElse(
- ir_builder_.CreateICmpULT(x, index_typed_const(width)),
- "x_in_bounds", &ir_builder_);
+ llvm_ir::EmitIfThenElse(ir_builder_.CreateICmpULT(
+ x, index_typed_constant(width)),
+ "x_in_bounds", &ir_builder_);
// Points ir_builder_ to the then-block.
llvm_ir::SetToFirstInsertPoint(if_x_in_bounds_data.true_block,
&ir_builder_);
@@ -1733,9 +1432,9 @@ Status IrEmitterUnnested::EmitRowReduction(
const Shape input_3d_tensor_shape =
ShapeUtil::MakeShapeWithDescendingLayout(
input_shape.element_type(), {depth, height, width});
- const llvm_ir::IrArray::Index input_3d_tensor_index(
+ const IrArray::Index input_3d_tensor_index(
{z, y, x}, input_3d_tensor_shape, &ir_builder_);
- const llvm_ir::IrArray::Index input_index =
+ const IrArray::Index input_index =
input_3d_tensor_index
.SourceIndexOfReshape(input_3d_tensor_shape,
normalized_input_shape,
@@ -1761,14 +1460,14 @@ Status IrEmitterUnnested::EmitRowReduction(
};
return ksl.For("z_tile",
- /*start=*/index_typed_const(0),
- /*end=*/index_typed_const(z_tile_size),
+ /*start=*/index_typed_constant(0),
+ /*end=*/index_typed_constant(z_tile_size),
/*step=*/1, emit_z_tile_element_loop);
};
llvm::Value* tile_in_bounds = ir_builder_.CreateOr(
ir_builder_.getInt1(width % (x_tile_size * kWarpSize) == 0),
- ir_builder_.CreateICmpULT(last_x, index_typed_const(width)));
+ ir_builder_.CreateICmpULT(last_x, index_typed_constant(width)));
TF_RETURN_IF_ERROR(
ksl.If(tile_in_bounds,
@@ -1802,10 +1501,13 @@ Status IrEmitterUnnested::EmitRowReduction(
ir_builder_.CreateBitCast(partial_reduction_result_addresses[i],
shuffle_ir_type->getPointerTo()),
"partial_reduction_result");
+ CHECK_EQ(launch_dimensions.threads_per_block() % kWarpSize, 0)
+ << "Requires block size a multiple of the warp size, otherwise we "
+ "will read undefined elements.";
ir_builder_.CreateStore(
- EmitShuffleDown(partial_reduction_result,
- ir_builder_.getInt32(shuffle_distance),
- &ir_builder_),
+ EmitFullWarpShuffleDown(partial_reduction_result,
+ ir_builder_.getInt32(shuffle_distance),
+ &ir_builder_),
ir_builder_.CreateBitCast(result_from_other_lane,
shuffle_ir_type->getPointerTo()));
TF_RETURN_IF_ERROR(EmitCallToNestedComputation(
@@ -1822,7 +1524,7 @@ Status IrEmitterUnnested::EmitRowReduction(
// lane 0 (which holds the partially accumulated result for its warp) to the
// output element.
llvm_ir::LlvmIfData if_lane_id_is_zero_data = llvm_ir::EmitIfThenElse(
- ir_builder_.CreateICmpEQ(lane_id, index_typed_const(0)),
+ ir_builder_.CreateICmpEQ(lane_id, index_typed_constant(0)),
"lane_id_is_zero", &ir_builder_);
llvm_ir::SetToFirstInsertPoint(if_lane_id_is_zero_data.true_block,
&ir_builder_);
@@ -1830,21 +1532,22 @@ Status IrEmitterUnnested::EmitRowReduction(
llvm::Value* output_address =
GetIrArray(*output, *output, reduce_output_shapes[i])
.EmitArrayElementAddress(
- llvm_ir::IrArray::Index(
- y,
- ShapeUtil::GetSubshape(output->shape(),
- reduce_output_shapes[i]),
- &ir_builder_),
+ IrArray::Index(y,
+ ShapeUtil::GetSubshape(
+ output->shape(), reduce_output_shapes[i]),
+ &ir_builder_),
&ir_builder_, "output_element_address");
- if (x_tile_size * z_tile_size < depth * width) {
- TF_RETURN_IF_ERROR(EmitAtomicOperationForNestedComputation(
- *reducers[i], output_address,
- partial_reduction_result_addresses[i]));
- } else {
+ // We don't need to emit atomic operations if there is only one tile of
+ // results. 'depth' is the z dimension, 'width' is the x dimension.
+ if (z_tile_size >= depth && x_tile_size >= width) {
TF_RETURN_IF_ERROR(EmitCallToNestedComputation(
*reducers[i],
{output_address, partial_reduction_result_addresses[i]},
output_address));
+ } else {
+ TF_RETURN_IF_ERROR(EmitAtomicOperationForNestedComputation(
+ *reducers[i], output_address,
+ partial_reduction_result_addresses[i]));
}
}
return Status::OK();
@@ -1970,31 +1673,31 @@ Status IrEmitterUnnested::HandleReduce(HloInstruction* reduce) {
HloComputation* reducer = reduce->to_apply();
// HandleReduce specializes reduction from a multi-dimensional array to a 1D
// array. The specialized version requires an initializer thunk that
- // ingitializes the output array to the initial value of the reduce.
- if (IsReductionToVector(*reduce) &&
- // NVPTX backend can't do atomic cmpxchg any narrower than 32 bits
- 32 <= primitive_util::BitWidth(reduce->shape().element_type())) {
+ // initializes the output array to the initial value of the reduce.
+ if (IsReductionToVector(*reduce)) {
TF_ASSIGN_OR_RETURN(std::unique_ptr<Thunk> initializer_thunk,
BuildInitializerThunk(reduce));
std::vector<std::unique_ptr<Thunk>> thunks;
thunks.push_back(std::move(initializer_thunk));
- thunks.push_back(BuildKernelThunk(reduce));
+ thunks.push_back(
+ BuildKernelThunk(reduce, /*implements_whole_instruction=*/false));
thunk_sequence_->emplace_back(
MakeUnique<SequentialThunk>(std::move(thunks), reduce));
return EmitReductionToVector(
- reduce, input->shape(), {[&](const llvm_ir::IrArray::Index& index) {
+ reduce, input->shape(), {[&](const IrArray::Index& index) {
return GetIrArray(*input, *reduce)
.EmitReadArrayElement(index, &ir_builder_);
}},
- {[&](const llvm_ir::IrArray::Index& index) {
+ {[&](const IrArray::Index& index) {
return GetIrArray(*init_value, *reduce)
.EmitReadArrayElement(index, &ir_builder_);
}},
dimensions_to_reduce, {reducer}, {{}}, {});
}
- thunk_sequence_->emplace_back(BuildKernelThunk(reduce));
+ thunk_sequence_->emplace_back(
+ BuildKernelThunk(reduce, /*implements_whole_instruction=*/true));
return IrEmitter::HandleReduce(reduce);
}
@@ -2023,7 +1726,8 @@ Status IrEmitterUnnested::HandleTuple(HloInstruction* tuple) {
tuple_element_buffers, GetAllocationSlice(*tuple), tuple));
return Status::OK();
}
- thunk_sequence_->emplace_back(BuildKernelThunk(tuple));
+ thunk_sequence_->emplace_back(
+ BuildKernelThunk(tuple, /*implements_whole_instruction=*/true));
return IrEmitter::HandleTuple(tuple);
}
@@ -2048,7 +1752,8 @@ Status IrEmitterUnnested::HandleSelectAndScatter(
BuildInitializerThunk(select_and_scatter));
std::vector<std::unique_ptr<Thunk>> thunks;
thunks.push_back(std::move(initializer_thunk));
- thunks.push_back(BuildKernelThunk(select_and_scatter));
+ thunks.push_back(BuildKernelThunk(select_and_scatter,
+ /*implements_whole_instruction=*/false));
thunk_sequence_->emplace_back(
MakeUnique<SequentialThunk>(std::move(thunks), select_and_scatter));
@@ -2062,7 +1767,7 @@ Status IrEmitterUnnested::HandleSelectAndScatter(
source->shape(), ir_emitter_context_->device_description());
llvm::Type* index_type = GetIndexTypeForKernel(
select_and_scatter, launch_dimensions.launch_bound(), &ir_builder_);
- auto index_typed_const = [&](uint64 c) -> llvm::Constant* {
+ auto index_typed_constant = [&](uint64 c) -> llvm::Constant* {
return llvm::ConstantInt::get(index_type, c);
};
@@ -2085,8 +1790,7 @@ Status IrEmitterUnnested::HandleSelectAndScatter(
// selected_index = I
// initialized_flag = true
// output(selected_index) = scatter(output(selected_index), source(S))
- auto loop_body_emitter =
- [=](const llvm_ir::IrArray::Index& source_index) -> Status {
+ auto loop_body_emitter = [=](const IrArray::Index& source_index) -> Status {
// Allocate space to keep the currently selected value, its index, and a
// boolean flag if the value is initialized. The initialized_flag is set
// false.
@@ -2096,7 +1800,7 @@ Status IrEmitterUnnested::HandleSelectAndScatter(
"selected_value_address", &ir_builder_);
llvm::Value* selected_index_address =
llvm_ir::EmitAllocaAtFunctionEntryWithCount(
- index_type, index_typed_const(rank), "selected_index_address",
+ index_type, index_typed_constant(rank), "selected_index_address",
&ir_builder_);
llvm::Value* initialized_flag_address = llvm_ir::EmitAllocaAtFunctionEntry(
ir_builder_.getInt1Ty(), "initialized_flag_address", &ir_builder_);
@@ -2111,7 +1815,7 @@ Status IrEmitterUnnested::HandleSelectAndScatter(
window_size.push_back(dim.size());
CHECK_GT(dim.size(), 0);
}
- const llvm_ir::IrArray::Index window_index = window_loops.AddLoopsForShape(
+ const IrArray::Index window_index = window_loops.AddLoopsForShape(
ShapeUtil::MakeShape(operand_element_type, window_size), "window");
llvm_ir::SetToFirstInsertPoint(window_loops.GetInnerLoopBodyBasicBlock(),
&ir_builder_);
@@ -2119,17 +1823,17 @@ Status IrEmitterUnnested::HandleSelectAndScatter(
// Compute the operand index to visit and evaluate the condition whether the
// operand index is within the bounds. The unsigned comparison includes
// checking whether the operand index >= 0.
- llvm_ir::IrArray::Index operand_index(index_type, source_index.size());
+ IrArray::Index operand_index(index_type, source_index.size());
llvm::Value* in_bounds_condition = ir_builder_.getInt1(true);
for (int64 i = 0; i < rank; ++i) {
llvm::Value* strided_index = ir_builder_.CreateNSWMul(
- source_index[i], index_typed_const(window.dimensions(i).stride()));
+ source_index[i], index_typed_constant(window.dimensions(i).stride()));
operand_index[i] = ir_builder_.CreateNSWSub(
ir_builder_.CreateNSWAdd(strided_index, window_index[i]),
- index_typed_const(window.dimensions(i).padding_low()));
+ index_typed_constant(window.dimensions(i).padding_low()));
llvm::Value* index_condition = ir_builder_.CreateICmpULT(
operand_index[i],
- index_typed_const(ShapeUtil::GetDimension(operand->shape(), i)));
+ index_typed_constant(ShapeUtil::GetDimension(operand->shape(), i)));
in_bounds_condition =
ir_builder_.CreateAnd(in_bounds_condition, index_condition);
}
@@ -2147,8 +1851,7 @@ Status IrEmitterUnnested::HandleSelectAndScatter(
// If the initialized_flag is false, initialize the selected value and index
// with the currently visiting operand.
llvm_ir::SetToFirstInsertPoint(if_initialized.false_block, &ir_builder_);
- const auto save_operand_index = [&](
- const llvm_ir::IrArray::Index& operand_index) {
+ const auto save_operand_index = [&](const IrArray::Index& operand_index) {
for (int64 i = 0; i < rank; ++i) {
llvm::Value* selected_index_address_slot =
ir_builder_.CreateInBoundsGEP(selected_index_address,
@@ -2156,7 +1859,7 @@ Status IrEmitterUnnested::HandleSelectAndScatter(
ir_builder_.CreateStore(operand_index[i], selected_index_address_slot);
}
};
- llvm_ir::IrArray operand_array = GetIrArray(*operand, *select_and_scatter);
+ IrArray operand_array = GetIrArray(*operand, *select_and_scatter);
llvm::Value* operand_data =
operand_array.EmitReadArrayElement(operand_index, &ir_builder_);
ir_builder_.CreateStore(operand_data, selected_value_address);
@@ -2201,7 +1904,7 @@ Status IrEmitterUnnested::HandleSelectAndScatter(
// value and the current output value.
llvm_ir::SetToFirstInsertPoint(window_loops.GetOuterLoopExitBasicBlock(),
&ir_builder_);
- llvm_ir::IrArray::Index selected_index(operand_index.GetType());
+ IrArray::Index selected_index(operand_index.GetType());
for (int64 i = 0; i < rank; ++i) {
llvm::Value* selected_index_address_slot = ir_builder_.CreateInBoundsGEP(
selected_index_address, {ir_builder_.getInt32(i)});
@@ -2256,15 +1959,23 @@ Status IrEmitterUnnested::HandleWhile(HloInstruction* xla_while) {
}
Status IrEmitterUnnested::HandleRng(HloInstruction* random) {
- thunk_sequence_->push_back(BuildKernelThunk(random));
+ thunk_sequence_->push_back(
+ BuildKernelThunk(random, /*implements_whole_instruction=*/true));
return IrEmitter::HandleRng(random);
}
Status IrEmitterUnnested::HandleSelect(HloInstruction* select) {
- thunk_sequence_->push_back(BuildKernelThunk(select));
+ thunk_sequence_->push_back(
+ BuildKernelThunk(select, /*implements_whole_instruction=*/true));
return IrEmitter::HandleSelect(select);
}
+Status IrEmitterUnnested::HandleTupleSelect(HloInstruction* tuple_select) {
+ thunk_sequence_->push_back(
+ BuildKernelThunk(tuple_select, /*implements_whole_instruction=*/true));
+ return IrEmitter::HandleTupleSelect(tuple_select);
+}
+
Status IrEmitterUnnested::HandleCrossReplicaSum(HloInstruction* crs) {
if (hlo_module_config_.replica_count() != 1) {
// TODO(b/33011107): Support nontrivial cross replica sum on GPU.
@@ -2300,18 +2011,18 @@ Status IrEmitterUnnested::HandleCrossReplicaSum(HloInstruction* crs) {
thunks.push_back(MakeUnique<DeviceToDeviceCopyThunk>(
/*source_address=*/GetAllocationSlice(*crs->operand(i)),
/*destination_buffer=*/tuple_element_buffers.back(),
- /*mem_size=*/ShapeUtil::ByteSizeOf(crs->operand(i)->shape()), crs));
+ /*mem_size=*/ShapeUtil::ByteSizeOf(crs->operand(i)->shape()), nullptr));
}
// Output a tuple of the buffers above.
thunks.push_back(MakeUnique<TupleThunk>(tuple_element_buffers,
- GetAllocationSlice(*crs), crs));
+ GetAllocationSlice(*crs), nullptr));
thunk_sequence_->push_back(
MakeUnique<SequentialThunk>(std::move(thunks), crs));
return Status::OK();
}
-Status IrEmitterUnnested::HandleGenerateToken(HloInstruction* gen_token) {
+Status IrEmitterUnnested::HandleAfterAll(HloInstruction* gen_token) {
return Status::OK();
}
@@ -2320,6 +2031,11 @@ Status IrEmitterUnnested::HandleInfeed(HloInstruction* infeed) {
return Status::OK();
}
+Status IrEmitterUnnested::HandleOutfeed(HloInstruction* outfeed) {
+ thunk_sequence_->emplace_back(BuildOutfeedThunk(outfeed));
+ return Status::OK();
+}
+
// Figures out how to access the buffers for all subshapes of hlo's operands and
// for hlo itself (i.e. all the buffers produced by HLO).
//
@@ -2439,7 +2155,8 @@ GetHloBufferSlices(const HloInstruction* hlo,
}
std::unique_ptr<KernelThunk> IrEmitterUnnested::BuildKernelThunk(
- const HloInstruction* inst, int unroll_factor) {
+ const HloInstruction* inst, bool implements_whole_instruction,
+ int unroll_factor) {
const BufferAssignment& buffer_assn =
ir_emitter_context_->buffer_assignment();
@@ -2531,7 +2248,8 @@ std::unique_ptr<KernelThunk> IrEmitterUnnested::BuildKernelThunk(
}
return MakeUnique<KernelThunk>(buffers, llvm_ir::AsString(kernel->getName()),
- inst, unroll_factor);
+ implements_whole_instruction ? inst : nullptr,
+ unroll_factor);
}
std::unique_ptr<Thunk> IrEmitterUnnested::BuildHostToDeviceCopyThunk(
@@ -2563,17 +2281,31 @@ std::unique_ptr<Thunk> IrEmitterUnnested::BuildInfeedThunk(
const HloInstruction* inst) {
CHECK_EQ(HloOpcode::kInfeed, inst->opcode());
- std::vector<BufferAllocation::Slice> tuple_element_buffers;
- for (int64 i = 0; i < inst->shape().tuple_shapes_size(); ++i) {
- BufferAllocation::Slice buffer = ir_emitter_context_->buffer_assignment()
- .GetUniqueSlice(inst, {i})
- .ConsumeValueOrDie();
- tuple_element_buffers.push_back(buffer);
- }
+ ShapeTree<BufferAllocation::Slice> slices(inst->shape());
+ slices.ForEachMutableElement(
+ [&](const ShapeIndex& index, BufferAllocation::Slice* slice) {
+ *slice = ir_emitter_context_->buffer_assignment()
+ .GetUniqueSlice(inst, index)
+ .ConsumeValueOrDie();
+ });
+ return MakeUnique<InfeedThunk>(slices, inst);
+}
- return MakeUnique<InfeedThunk>(
- tuple_element_buffers,
- /*destination_buffer=*/GetAllocationSlice(*inst), inst);
+std::unique_ptr<Thunk> IrEmitterUnnested::BuildOutfeedThunk(
+ const HloInstruction* inst) {
+ CHECK_EQ(HloOpcode::kOutfeed, inst->opcode());
+
+ ShapeTree<BufferAllocation::Slice> slices(inst->operand(0)->shape());
+ slices.ForEachMutableElement(
+ [&](const ShapeIndex& index, BufferAllocation::Slice* slice) {
+ auto status_or_slice =
+ ir_emitter_context_->buffer_assignment().GetUniqueSlice(
+ inst->operand(0), index);
+ if (status_or_slice.ok()) {
+ *slice = status_or_slice.ConsumeValueOrDie();
+ }
+ });
+ return MakeUnique<OutfeedThunk>(std::move(slices), inst);
}
namespace {
@@ -2691,6 +2423,11 @@ StatusOr<std::unique_ptr<Thunk>> IrEmitterUnnested::BuildInitializerThunk(
init_value = hlo->operand(init_value->parameter_number());
}
+ // Initializer thunks don't implement a whole instruction, and we want to
+ // profile the whole instruction instead of the individual thunks it consists
+ // of. Therefore we pass nullptr as the HloInstruction* to the thunks we
+ // generate below.
+ //
// In the common case, the initializer is a constant. In this case, emit a
// device-memset call if we can. Currently StreamExecutor only supports
// zeroing and 32-bit memsets.
@@ -2704,7 +2441,8 @@ StatusOr<std::unique_ptr<Thunk>> IrEmitterUnnested::BuildInitializerThunk(
ArraySlice<uint8> literal_bytes(
reinterpret_cast<const uint8*>(literal.untyped_data()), num_bytes);
if (c_all_of(literal_bytes, [](uint8 byte) { return byte == 0; })) {
- return {MakeUnique<MemzeroThunk>(GetAllocationSlice(*hlo, index), hlo)};
+ return {
+ MakeUnique<MemzeroThunk>(GetAllocationSlice(*hlo, index), nullptr)};
}
// If the literal is 8 or 16 bits wide, we can emit a 32-bit memset by
@@ -2718,11 +2456,11 @@ StatusOr<std::unique_ptr<Thunk>> IrEmitterUnnested::BuildInitializerThunk(
uint8 b = literal_bytes.front();
pattern16 = uint16{b} | (uint16{b} << 8);
} else {
- pattern16 = literal_bytes.front();
+ memcpy(&pattern16, literal_bytes.data(), sizeof(pattern16));
}
uint32 pattern32 = uint32{pattern16} | (uint32{pattern16} << 16);
return {MakeUnique<Memset32BitValueThunk>(
- pattern32, GetAllocationSlice(*hlo, index), hlo)};
+ pattern32, GetAllocationSlice(*hlo, index), nullptr)};
}
// If the literal is an even multiple of 32 bits wide, we can emit a 32-bit
@@ -2733,12 +2471,13 @@ StatusOr<std::unique_ptr<Thunk>> IrEmitterUnnested::BuildInitializerThunk(
uint32 word;
memcpy(&word, literal_bytes.data(), sizeof(word));
return {MakeUnique<Memset32BitValueThunk>(
- word, GetAllocationSlice(*hlo, index), hlo)};
+ word, GetAllocationSlice(*hlo, index), nullptr)};
}
}
// Otherwise fall back to our slow initializer code.
- std::unique_ptr<KernelThunk> kernel_thunk = BuildKernelThunk(hlo);
+ std::unique_ptr<KernelThunk> kernel_thunk =
+ BuildKernelThunk(hlo, /*implements_whole_instruction=*/false);
LaunchDimensions launch_dimensions =
CalculateLaunchDimensions(ShapeUtil::GetSubshape(hlo->shape(), index),
ir_emitter_context_->device_description());
@@ -2750,7 +2489,7 @@ StatusOr<std::unique_ptr<Thunk>> IrEmitterUnnested::BuildInitializerThunk(
TF_RETURN_IF_ERROR(HandleConstant(const_cast<HloInstruction*>(init_value)));
}
TF_RETURN_IF_ERROR(ParallelLoopEmitter(
- [=](const llvm_ir::IrArray::Index& index) {
+ [=](const IrArray::Index& index) {
return GetIrArray(*init_value, *hlo)
.EmitReadArrayElement(index, &ir_builder_);
},
@@ -2945,8 +2684,8 @@ Status IrEmitterUnnested::EmitTargetElementLoopInThunk(
&ir_builder_));
}
- // For multiple outputs fusion, we need to emit each operand and the root.
- std::vector<llvm_ir::IrArray> output_arrays;
+ // For multioutput fusion, we need to emit each operand and the root.
+ std::vector<IrArray> output_arrays;
for (int64 i = 0; i < ShapeUtil::TupleElementCount(hlo.shape()); ++i) {
output_arrays.push_back(GetIrArray(hlo, hlo, {i}));
}
@@ -2975,5 +2714,482 @@ Status IrEmitterUnnested::EmitTargetElementLoop(
static_cast<KernelThunk*>(LastThunk()));
}
+int IrEmitterUnnested::ConstructIrArrayForOutputs(
+ const HloInstruction& hlo, std::vector<IrArray>* output_arrays) {
+ int64 num_outputs = 1;
+ if (hlo.IsMultiOutputFusion()) {
+ num_outputs = ShapeUtil::TupleElementCount(hlo.shape());
+ output_arrays->reserve(num_outputs);
+ for (int64 i = 0; i < num_outputs; ++i) {
+ output_arrays->push_back(GetIrArray(hlo, hlo, {i}));
+ }
+ } else {
+ output_arrays->push_back(GetIrArray(hlo, hlo));
+ }
+ return num_outputs;
+}
+
+int IrEmitterUnnested::ConstructIrArrayForInputs(
+ const HloInstruction& hlo, std::vector<IrArray>* param_arrays) {
+ int64 num_params = hlo.operands().size();
+ param_arrays->reserve(num_params);
+ for (const HloInstruction* param : hlo.operands()) {
+ param_arrays->push_back(GetIrArray(*param, hlo));
+ }
+ return num_params;
+}
+
+int IrEmitterUnnested::ConstructOutputReducedShapeAndCastOutputIrArrayToShape(
+ const HloInstruction& hlo, const std::vector<IrArray>& output_arrays,
+ tensorflow::gtl::ArraySlice<int64> reduced_output_dims,
+ std::vector<Shape>* output_reduced_shapes,
+ std::vector<IrArray>* output_in_reduced_shape_arrays) {
+ int64 num_outputs = 1;
+ if (hlo.IsMultiOutputFusion()) {
+ num_outputs = ShapeUtil::TupleElementCount(hlo.shape());
+ output_in_reduced_shape_arrays->reserve(num_outputs);
+ output_reduced_shapes->reserve(num_outputs);
+ for (int64 i = 0; i < num_outputs; ++i) {
+ output_reduced_shapes->push_back(ShapeUtil::MakeShapeWithDescendingLayout(
+ ShapeUtil::GetSubshape(hlo.shape(), {i}).element_type(),
+ reduced_output_dims));
+ output_in_reduced_shape_arrays->push_back(output_arrays[i].CastToShape(
+ (*output_reduced_shapes)[i], &ir_builder_));
+ }
+ } else {
+ output_reduced_shapes->push_back(ShapeUtil::MakeShapeWithDescendingLayout(
+ hlo.shape().element_type(), reduced_output_dims));
+ output_in_reduced_shape_arrays->push_back(output_arrays[0].CastToShape(
+ (*output_reduced_shapes)[0], &ir_builder_));
+ }
+ return num_outputs;
+}
+
+int IrEmitterUnnested::ConstructInputReducedShapeAndCastInputIrArrayToShape(
+ const HloInstruction& hlo, const std::vector<IrArray>& param_arrays,
+ const std::vector<llvm::Value*>& param_buffers,
+ tensorflow::gtl::ArraySlice<int64> reduced_output_dims,
+ std::vector<Shape>* param_reduced_shapes,
+ std::vector<IrArray>* param_in_reduced_shape_arrays) {
+ int64 num_params = hlo.operands().size();
+ param_in_reduced_shape_arrays->reserve(num_params);
+ param_reduced_shapes->reserve(num_params);
+ for (int64 id = 0; id < num_params; ++id) {
+ if (param_buffers[id] == nullptr) {
+ param_reduced_shapes->push_back(Shape());
+ param_in_reduced_shape_arrays->push_back(IrArray());
+ continue;
+ }
+ const HloInstruction* param = hlo.operand(id);
+ param_reduced_shapes->push_back(ShapeUtil::MakeShapeWithDescendingLayout(
+ param->shape().element_type(),
+ Permute({0, 2, 1}, reduced_output_dims)));
+ param_in_reduced_shape_arrays->push_back(param_arrays[id].CastToShape(
+ (*param_reduced_shapes)[id], &ir_builder_));
+ }
+ return num_params;
+}
+
+namespace {
+
+// Reads thread_idx.x and converts it to a (y,x) coordinate, assuming that the
+// thread lives within a square tile of size tile_size (so thread blocks are of
+// size tile_size * tile_size).
+std::tuple<llvm::Value*, llvm::Value*> CalculateYXCoordinateWithinTile(
+ llvm::IRBuilder<>* builder, llvm::Value* tile_size,
+ int64 threads_per_tile) {
+ // Calculate the starting element coordinate within a tile for the current
+ // thread, (y, x) from thread_id.
+ llvm::Value* thread_id = llvm_ir::EmitCallToIntrinsic(
+ llvm::Intrinsic::nvvm_read_ptx_sreg_tid_x, {}, {}, builder);
+ llvm_ir::AddRangeMetadata(0, threads_per_tile,
+ llvm::cast<llvm::Instruction>(thread_id));
+ thread_id = builder->CreateIntCast(thread_id, tile_size->getType(),
+ /*isSigned=*/true, "thread.id.x");
+ auto x = builder->CreateURem(thread_id, tile_size);
+ auto y = builder->CreateUDiv(thread_id, tile_size);
+ return std::make_tuple(y, x);
+}
+
+// Reads block_idx.x, casts it to type index_ty, and adds the assumption that
+// it's in the range [0, num_blocks].
+llvm::Value* GetBlockIdx(llvm::IRBuilder<>* builder, llvm::Type* index_ty,
+ int64 num_blocks) {
+ llvm::Value* block_id = llvm_ir::EmitCallToIntrinsic(
+ llvm::Intrinsic::nvvm_read_ptx_sreg_ctaid_x, {}, {}, builder);
+ llvm_ir::AddRangeMetadata(0, num_blocks,
+ llvm::cast<llvm::Instruction>(block_id));
+ return builder->CreateIntCast(block_id, index_ty, /*isSigned=*/true,
+ "block.id.x");
+}
+
+// Emits code to process up to (tile_size/num_rows) elements in a tile, given
+// `emit_elem_function` is the function to emit code to process one element, `y`
+// and `x` are the coordinates for the first element to process, and `index` is
+// the index for the origin of the tile. Emits bounds check to ensure that each
+// processed element is within the boundary defined by `tile_width` and
+// `tile_height`.
+void EmitTiledElementalCodeWithBoundsCheck(
+ int64 tile_size, int64 num_rows, const IrArray::Index& index,
+ const string& loop_name, KernelSupportLibrary* ksl,
+ llvm::IRBuilder<>* builder, llvm::Value* y, llvm::Value* x,
+ llvm::Value* tile_width, llvm::Value* tile_height,
+ const std::function<void(const IrArray::Index&, llvm::Value*)>&
+ emit_elem_function) {
+ llvm::Type* index_ty = tile_width->getType();
+ // Emits a constant value with index type.
+ auto index_typed_constant = [&](uint64 c) -> llvm::Constant* {
+ return llvm::ConstantInt::get(index_ty, c);
+ };
+ // Adds `addend` to the given `dim` of `index`.
+ auto offset_dim = [&](IrArray::Index index, llvm::Value* addend, int64 dim) {
+ index[dim] = builder->CreateAdd(index[dim], addend);
+ return index;
+ };
+
+ auto emit_full_tile = [&] {
+ for (int64 i = 0; i < tile_size; i += num_rows) {
+ auto source_idx = offset_dim(index, index_typed_constant(i), /*dim=*/1);
+ auto y_loc = builder->CreateAdd(index_typed_constant(i), y);
+ emit_elem_function(source_idx, y_loc);
+ }
+ };
+
+ auto emit_last_row = [&] {
+ ksl->IfReturnVoid("x_in_tile", builder->CreateICmpULT(x, tile_width), [&] {
+ // tile_height_upper_bound =
+ // ceil(tile_height / num_rows) * num_rows
+ auto tile_height_upper_bound = builder->CreateMul(
+ builder->CreateUDiv(
+ builder->CreateAdd(tile_height,
+ index_typed_constant(num_rows - 1)),
+ index_typed_constant(num_rows)),
+ index_typed_constant(num_rows));
+ ksl->ForReturnVoid(
+ loop_name, /*start=*/index_typed_constant(0),
+ /*end=*/tile_height_upper_bound,
+ /*step=*/index_typed_constant(num_rows), [&](llvm::Value* y_indvar) {
+ auto y_loc = builder->CreateAdd(y_indvar, y);
+ ksl->IfReturnVoid(
+ "y_in_tile", builder->CreateICmpULT(y_loc, tile_height), [&] {
+ emit_elem_function(offset_dim(index, y_indvar, /*dim=*/1),
+ y_loc);
+ });
+ });
+ });
+ };
+ ksl->IfReturnVoid(
+ "full_tile",
+ builder->CreateAnd(
+ builder->CreateICmpEQ(index_typed_constant(tile_size), tile_width),
+ builder->CreateICmpEQ(index_typed_constant(tile_size), tile_height)),
+ emit_full_tile, emit_last_row);
+}
+} // namespace
+
+// Emits a kernel for the given hlo instruction using a tiled 0-2-1 transpose
+// algorithm to improve the memory access patterns for the input parameters
+// which have a shape that is a 0-2-1 transpose of the output tensors.
+//
+// For the purpose of tiling, the output tensors have a logical shape of three
+// components 0-2-1 while the relevant input parameters have a logical shape of
+// three components 0-1-2 in the order major to minor. The x- and y- dimensions
+// of the tensors are tiled in square tiles of edge length `kTileSize`. Each
+// thread block of `kTileSize` x `kNumRows` threads transposes one tile: each
+// thread copies kTileSize/kNumRows elements from the input to a shared memory
+// tile, then the otherwise "regular hlo kernel" reads from the shared memory
+// instead of the original input.
+//
+// This is similar to the following CUDA algorithm in TensorFlow:
+// https://goo.gl/MStRV6.
+//
+// `kTileSize` should usually be same as warp size. We currently choose 32 for
+// `kTileSize` and 4 for `kNumRows`. The CUDA algorithm uses 8 for `kNumRows`.
+//
+// TODO(b/33320379): Here each block transposes 1 tile. It may be more efficient
+// to launch fewer blocks so each transposes many tiles.
+LaunchDimensions IrEmitterUnnested::EmitHlo021Tile(
+ HloInstruction* hlo, tensorflow::gtl::ArraySlice<int64> reduced_output_dims,
+ tensorflow::gtl::ArraySlice<int64> tiled_param_ids) {
+ // Parameters for the tiling algorithm.
+ constexpr int64 kTileSize = 32;
+ constexpr int64 kNumRows = 4;
+ constexpr int64 kThreadsPerTile = kTileSize * kNumRows;
+
+ // Construct IrArrays for the inputs and outputs.
+ std::vector<IrArray> output_arrays;
+ int64 num_outputs = ConstructIrArrayForOutputs(*hlo, &output_arrays);
+ std::vector<IrArray> param_arrays;
+ int64 num_params = ConstructIrArrayForInputs(*hlo, &param_arrays);
+
+ // Allocate shared memory buffers to store the tiled inputs.
+ std::vector<llvm::Value*> param_shmem_buffers(num_params, nullptr);
+ for (int64 id : tiled_param_ids) {
+ const HloInstruction* param = hlo->operand(id);
+ // Add 1 to the minor dimension to reduce shared memory bank conflicts.
+ llvm::Type* tile_type = llvm::ArrayType::get(
+ llvm::ArrayType::get(llvm_ir::PrimitiveTypeToIrType(
+ param->shape().element_type(), module_),
+ kTileSize + 1),
+ kTileSize);
+ const int kNVPTXSharedMemoryAddrSpace = 3;
+ auto* tile_base_ptr = new llvm::GlobalVariable(
+ *ir_builder_.GetInsertBlock()->getParent()->getParent(), tile_type,
+ /*isConstant=*/false, llvm::GlobalValue::PrivateLinkage,
+ llvm::UndefValue::get(tile_type),
+ llvm_ir::AsStringRef(IrName(hlo, StrCat("tile", id))), nullptr,
+ llvm::GlobalValue::NotThreadLocal, kNVPTXSharedMemoryAddrSpace);
+ param_shmem_buffers[id] = tile_base_ptr;
+ VLOG(3) << "Added shmem buffer for parameter " << id << ": "
+ << llvm_ir::DumpToString(*tile_base_ptr);
+ }
+
+ // The 0-2-1 shape of the tiling scheme is the reduced shape of the HLO result
+ // for the purpose of tiling. Calculate the logical output dimensions in the
+ // tile from the reduced output dimensions.
+ std::vector<int64> output_dims_in_tiles = std::vector<int64>(
+ reduced_output_dims.begin(), reduced_output_dims.end());
+ CHECK_EQ(output_dims_in_tiles.size(), 3);
+ for (int i = 1; i < 3; ++i) {
+ output_dims_in_tiles[i] =
+ CeilOfRatio<int64>(output_dims_in_tiles[i], kTileSize);
+ }
+ const int64 num_tiles =
+ c_accumulate(output_dims_in_tiles, 1, std::multiplies<int64>());
+ LaunchDimensions launch_dimensions(num_tiles, kThreadsPerTile);
+
+ llvm::Type* index_ty = GetIndexTypeForKernel(
+ hlo, launch_dimensions.launch_bound(), &ir_builder_);
+ auto index_typed_constant = [&](uint64 c) -> llvm::Constant* {
+ return llvm::ConstantInt::get(index_ty, c);
+ };
+
+ // Cast each output IrArray to its corresponding reduced shape and keep the
+ // reduced shape live during IR emission.
+ std::vector<IrArray> output_in_reduced_shape_arrays;
+ std::vector<Shape> output_reduced_shapes;
+ CHECK_EQ(ConstructOutputReducedShapeAndCastOutputIrArrayToShape(
+ *hlo, output_arrays, reduced_output_dims, &output_reduced_shapes,
+ &output_in_reduced_shape_arrays),
+ num_outputs);
+
+ // For each tiled parameter, cast its input IrArray to the corresponding
+ // reduced shape and keep the reduced shape live during IR emission.
+ std::vector<IrArray> param_in_reduced_shape_arrays;
+ std::vector<Shape> param_reduced_shapes;
+ CHECK_EQ(ConstructInputReducedShapeAndCastInputIrArrayToShape(
+ *hlo, param_arrays, param_shmem_buffers, reduced_output_dims,
+ &param_reduced_shapes, &param_in_reduced_shape_arrays),
+ num_params);
+
+ // Calculate the starting element coordinate within a tile for the current
+ // thread, (y, x) from thread_id.
+ llvm::Value* x;
+ llvm::Value* y;
+ std::tie(y, x) = CalculateYXCoordinateWithinTile(
+ &ir_builder_, index_typed_constant(kTileSize), kThreadsPerTile);
+
+ // Calculate the index for the current output tile from block_id.
+ const IrArray::Index output_tile_index(
+ GetBlockIdx(&ir_builder_, index_ty, num_tiles),
+ ShapeUtil::MakeShapeWithDescendingLayout(PRED /*arbitrary*/,
+ output_dims_in_tiles),
+ &ir_builder_);
+
+ // Output tile origin is the index for the first element of the current output
+ // tile.
+ const IrArray::Index output_tile_origin = [&] {
+ IrArray::Index index = output_tile_index;
+ for (int i = 1; i < 3; ++i) {
+ index[i] = ir_builder_.CreateMul(output_tile_index[i],
+ index_typed_constant(kTileSize),
+ "tile_origin." + std::to_string(i));
+ }
+ return index;
+ }();
+
+ // Calculate the input tile origin from the output tile origin.
+ const IrArray::Index input_tile_origin(
+ Permute({0, 2, 1}, output_tile_origin.multidim()));
+
+ // Calculate the current output tile bounds in each of the logical dimensions.
+ std::vector<llvm::Value*> output_tile_bounds(3);
+ for (int i = 1; i < 3; ++i) {
+ // Only last row or column may not have full size.
+ output_tile_bounds[i] = ir_builder_.CreateSelect(
+ ir_builder_.CreateICmpEQ(
+ output_tile_index[i],
+ index_typed_constant(output_dims_in_tiles[i] - 1)),
+ index_typed_constant(reduced_output_dims[i] -
+ (output_dims_in_tiles[i] - 1) * kTileSize),
+ index_typed_constant(kTileSize), "kTileSize");
+ }
+
+ KernelSupportLibrary ksl(&ir_builder_, llvm_ir::UnrollMode::kDefaultUnroll);
+
+ // Curry a few parameters to EmitTiledElementalCodeWithBoundsCheck.
+ auto emit_tiled_elemental_code_with_bounds_check =
+ [&](const IrArray::Index& index, const string& loop_name,
+ llvm::Value* tile_width, llvm::Value* tile_height,
+ const std::function<void(const IrArray::Index&, llvm::Value*)>&
+ emit_elem_function) {
+ EmitTiledElementalCodeWithBoundsCheck(
+ kTileSize, kNumRows, index, loop_name, &ksl, &ir_builder_, y, x,
+ tile_width, tile_height, emit_elem_function);
+ };
+
+ // Adds `addend` to the given `dim` of `index`.
+ auto offset_dim = [&](IrArray::Index index, llvm::Value* addend, int64 dim) {
+ index[dim] = ir_builder_.CreateAdd(index[dim], addend);
+ return index;
+ };
+ const IrArray::Index input_index =
+ offset_dim(offset_dim(input_tile_origin, x, /*dim=*/2), y, /*dim=*/1);
+
+ // Copy input parameter values to shared memory buffers:
+ // tile[y, x] = input[index]
+ emit_tiled_elemental_code_with_bounds_check(
+ input_index, "input", output_tile_bounds[1], output_tile_bounds[2],
+ [&](const IrArray::Index& index, llvm::Value* y_loc) {
+ for (int64 id : tiled_param_ids) {
+ IrArray& input_in_logical_shape = param_in_reduced_shape_arrays[id];
+ llvm::Value* shmem_buffer = param_shmem_buffers[id];
+ // TODO(jlebar): Add AA metadata to this store. Tile buffers are
+ // global variables, so LLVM can't infer much about it.
+ ir_builder_.CreateStore(
+ input_in_logical_shape.EmitReadArrayElement(index, &ir_builder_,
+ "input_element"),
+ ir_builder_.CreateGEP(shmem_buffer,
+ {index_typed_constant(0), y_loc, x}));
+ }
+ });
+
+ // Wait for all threads to reach this point, lest we copy a value from tile to
+ // output before the other thread copies it from input to tile.
+ // This is `__syncthreads` in CUDA.
+ llvm_ir::EmitCallToIntrinsic(llvm::Intrinsic::nvvm_barrier0, {}, {},
+ &ir_builder_);
+
+ llvm_ir::TiledParameterInfo tiled_param_info(param_shmem_buffers, y, x);
+
+ const IrArray::Index output_index =
+ offset_dim(offset_dim(output_tile_origin, x, /*dim=*/2), y, /*dim=*/1);
+
+ // Write to output[index] by emitting code like normal, except that values for
+ // the tiled parameters are read from the shmem buffers.
+ if (hlo->opcode() == HloOpcode::kCopy) {
+ emit_tiled_elemental_code_with_bounds_check(
+ output_index, "output", output_tile_bounds[2], output_tile_bounds[1],
+ [&](const IrArray::Index& index, llvm::Value* y_loc) {
+ // TODO(jlebar): Add AA metadata to this load.
+ llvm::Instruction* load_from_shmem_buffer = ir_builder_.CreateLoad(
+ ir_builder_.CreateGEP(param_shmem_buffers[0],
+ {ir_builder_.getInt64(0), x, y_loc}),
+ "output_element");
+ output_in_reduced_shape_arrays[0].EmitWriteArrayElement(
+ index, load_from_shmem_buffer, &ir_builder_);
+ });
+ } else {
+ CHECK_EQ(hlo->opcode(), HloOpcode::kFusion);
+ emit_tiled_elemental_code_with_bounds_check(
+ output_index, "output", output_tile_bounds[2], output_tile_bounds[1],
+ [&](const IrArray::Index& index, llvm::Value* y_loc) {
+ GpuElementalIrEmitter elem_emitter(hlo_module_config_, module_,
+ &ir_builder_, GetNestedComputer());
+ FusedIrEmitter fused_emitter(param_arrays, &elem_emitter);
+ tiled_param_info.set_y(y_loc);
+ fused_emitter.SetTiledParameterInfo(&tiled_param_info);
+ TF_CHECK_OK(hlo->fused_expression_root()->Accept(&fused_emitter));
+ IrArray::Index untiled_index = llvm_ir::GetUnreducedOutputIndex(
+ index, output_reduced_shapes[0], output_arrays[0].GetShape(),
+ &ir_builder_);
+ const llvm_ir::ElementGenerator& output_generator =
+ fused_emitter.GetRootGenerator();
+ llvm::Value* output_value =
+ output_generator(untiled_index).ValueOrDie();
+ if (hlo->IsMultiOutputFusion()) {
+ CHECK(output_value->getType()->isStructTy());
+ CHECK_EQ(output_value->getType()->getStructNumElements(),
+ output_in_reduced_shape_arrays.size());
+ for (int64 i = 0; i < output_in_reduced_shape_arrays.size(); ++i) {
+ output_in_reduced_shape_arrays[i].EmitWriteArrayElement(
+ index, ir_builder_.CreateExtractValue(output_value, i),
+ &ir_builder_);
+ }
+ } else {
+ output_in_reduced_shape_arrays[0].EmitWriteArrayElement(
+ index, output_value, &ir_builder_);
+ }
+ });
+ }
+
+ // For multioutput fusion, emit a tuple with all the individual outputs.
+ if (hlo->IsMultiOutputFusion()) {
+ std::vector<llvm::Value*> tuple_operand_ptrs;
+ for (int64 i = 0; i < output_arrays.size(); ++i) {
+ tuple_operand_ptrs.push_back(output_arrays[i].GetBasePointer());
+ }
+ llvm_ir::EmitTuple(GetIrArray(*hlo, *hlo), tuple_operand_ptrs, &ir_builder_,
+ module_);
+ }
+
+ return launch_dimensions;
+}
+
+bool IrEmitterUnnested::CheckAndEmitHloWithTile021(HloInstruction* hlo) {
+ HloOpcode opcode = hlo->opcode();
+ CHECK(opcode == HloOpcode::kFusion || opcode == HloOpcode::kCopy);
+ CHECK(opcode != HloOpcode::kFusion ||
+ hlo->fusion_kind() == HloInstruction::FusionKind::kLoop)
+ << "Only loop fusions are supported.";
+
+ const Shape& output_shape = hlo->IsMultiOutputFusion()
+ ? ShapeUtil::GetSubshape(hlo->shape(), {0})
+ : hlo->shape();
+
+ // If the output_shape is reduced to 021 shape, find all the parameters of the
+ // hlo that are in the corresponding 012 shape.
+ std::vector<int64> params_012;
+ optional<std::vector<int64>> reduced_dims_021;
+ for (int64 operand_idx = 0; operand_idx < hlo->operand_count();
+ ++operand_idx) {
+ HloInstruction* operand = hlo->mutable_operand(operand_idx);
+ auto find_transpose_result =
+ llvm_ir::FindTranspose021(operand->shape(), output_shape);
+ if (!find_transpose_result.has_value()) {
+ continue;
+ }
+ const std::vector<int64>& curr_reduced_dims_021 = *find_transpose_result;
+ if (!reduced_dims_021.has_value()) {
+ reduced_dims_021 = curr_reduced_dims_021;
+ }
+ if (!ContainersEqual(*reduced_dims_021, curr_reduced_dims_021)) {
+ // There is more than one possible transpose. Instead of picking one
+ // transpose, we simply give up here.
+ return false;
+ }
+ params_012.push_back(operand_idx);
+ }
+
+ if (!reduced_dims_021.has_value()) {
+ return false;
+ }
+
+ if ((*reduced_dims_021)[1] < kMinDimensionToTransposeTiled ||
+ (*reduced_dims_021)[2] < kMinDimensionToTransposeTiled) {
+ return false;
+ }
+
+ VLOG(3) << "EmitHlo021Tile Emitting hlo tile 0-2-1" << hlo->ToString();
+ thunk_sequence_->emplace_back(
+ BuildKernelThunk(hlo, /*implements_whole_instruction=*/true));
+ const LaunchDimensions launch_dimensions =
+ EmitHlo021Tile(hlo, *reduced_dims_021, params_012);
+ UpdateLaunchDimensions(launch_dimensions, LastThunk(),
+ ir_emitter_context_->llvm_module());
+
+ return true;
+}
+
} // namespace gpu
} // namespace xla
diff --git a/tensorflow/compiler/xla/service/gpu/ir_emitter_unnested.h b/tensorflow/compiler/xla/service/gpu/ir_emitter_unnested.h
index 279a5c386a..59547c16d7 100644
--- a/tensorflow/compiler/xla/service/gpu/ir_emitter_unnested.h
+++ b/tensorflow/compiler/xla/service/gpu/ir_emitter_unnested.h
@@ -18,6 +18,7 @@ limitations under the License.
#include "tensorflow/compiler/xla/service/gpu/ir_emitter.h"
#include "tensorflow/compiler/xla/service/gpu/thunk.h"
+#include "tensorflow/compiler/xla/service/llvm_ir/kernel_tiling.h"
namespace xla {
namespace gpu {
@@ -73,10 +74,12 @@ class IrEmitterUnnested : public IrEmitter {
Status HandleTuple(HloInstruction* tuple) override;
Status HandleWhile(HloInstruction* xla_while) override;
Status HandleInfeed(HloInstruction* xla_infeed) override;
+ Status HandleOutfeed(HloInstruction* outfeed) override;
Status HandleRng(HloInstruction* random) override;
Status HandleSelect(HloInstruction* select) override;
+ Status HandleTupleSelect(HloInstruction* tuple_select) override;
Status HandleCrossReplicaSum(HloInstruction* crs) override;
- Status HandleGenerateToken(HloInstruction* gen_token) override;
+ Status HandleAfterAll(HloInstruction* gen_token) override;
Status EmitTargetElementLoop(
const HloInstruction& hlo,
@@ -115,7 +118,7 @@ class IrEmitterUnnested : public IrEmitter {
// Emits code that reduces a matrix of shape [height x width] to a vector of
// [width]. Other parameters have the same meaning as those of
// `EmitReductionToVector`. Note that input shape might not be
- // [height x width], but can be bitcast to [height x weight] with "height"
+ // [height x width], but can be bitcast to [height x width] with "height"
// being the major dimension.
Status EmitColumnReduction(
int64 height, int64 width, HloInstruction* reduce,
@@ -131,7 +134,7 @@ class IrEmitterUnnested : public IrEmitter {
// Emits code that reduces a 3D tensor of shape [depth x height x width] to a
// vector of shape [height]. Other parameters have the same meaning as those
// of `EmitReductionToVector`. Note that input shape might not be
- // [depth x height x width], but can be bitcast to [depth x height x weight]
+ // [depth x height x width], but can be bitcast to [depth x height x width]
// with "depth" being the most major dimension.
Status EmitRowReduction(
int64 depth, int64 height, int64 width, HloInstruction* reduce,
@@ -182,12 +185,56 @@ class IrEmitterUnnested : public IrEmitter {
std::pair<llvm_ir::ElementGenerator, ShapeIndex>>
extra_output_gens);
+ // Returns true if a 0-2-1 tiling algorithm is already used to emit the kernel
+ // for the hlo instruction.
+ bool CheckAndEmitHloWithTile021(HloInstruction* hlo);
+ // Emits a kernel for the hlo instruction using a 0-2-1 tiling algorithm and
+ // returns the launch dimensions for the kernel. This is a helper to support
+ // the implementation of CheckAndEmitHloWithTile021.
+ LaunchDimensions EmitHlo021Tile(
+ HloInstruction* hlo,
+ tensorflow::gtl::ArraySlice<int64> reduced_output_dims,
+ tensorflow::gtl::ArraySlice<int64> tiled_param_ids);
+ // Generates the IrArray for each output of hlo and returns the number of
+ // outputs.
+ int ConstructIrArrayForOutputs(const HloInstruction& hlo,
+ std::vector<llvm_ir::IrArray>* output_arrays);
+ // Generates the IrArray for each input of hlo and returns the number of
+ // inputs.
+ int ConstructIrArrayForInputs(const HloInstruction& hlo,
+ std::vector<llvm_ir::IrArray>* param_arrays);
+ // For each output of the `hlo` instruction, constructs the reduced shape for
+ // the output with the given `reduced_output_dims` and cast the original
+ // output IrArray element in `output_arrays` to the reduced shape. Returns
+ // the number of outputs.
+ int ConstructOutputReducedShapeAndCastOutputIrArrayToShape(
+ const HloInstruction& hlo,
+ const std::vector<llvm_ir::IrArray>& output_arrays,
+ tensorflow::gtl::ArraySlice<int64> reduced_output_dims,
+ std::vector<Shape>* output_reduced_shapes,
+ std::vector<llvm_ir::IrArray>* output_in_reduced_shape_arrays);
+ // For each input of the `hlo` instruction, checks its value in
+ // `param_buffers` to find out whether the input has a reduced shape. If the
+ // input has a reduced shape, constructs the reduced shape for the input and
+ // casts the original input IrArray in `param_arrays` to the reduced shape.
+ // Return the total number of inputs.
+ int ConstructInputReducedShapeAndCastInputIrArrayToShape(
+ const HloInstruction& hlo,
+ const std::vector<llvm_ir::IrArray>& param_arrays,
+ const std::vector<llvm::Value*>& param_buffers,
+ tensorflow::gtl::ArraySlice<int64> reduced_output_dims,
+ std::vector<Shape>* param_reduced_shapes,
+ std::vector<llvm_ir::IrArray>* param_in_reduced_shape_arrays);
+
// Returns a KernelThunk that invokes the kernel emitted for `inst`. The
// caller needs to make sure `inst` outlives the lifetime of the returned
// Thunk object. The kernel implementation will be unrolled if unroll_factor
- // is greater than one.
- std::unique_ptr<KernelThunk> BuildKernelThunk(const HloInstruction* inst,
- int unroll_factor = 1);
+ // is greater than one. 'implements_whole_instruction' specifies whether this
+ // KernelThunk implements the whole 'inst' HloInstruction. In some cases
+ // 'inst' will be implemented by a sequence of Thunks.
+ std::unique_ptr<KernelThunk> BuildKernelThunk(
+ const HloInstruction* inst, bool implements_whole_instruction,
+ int unroll_factor = 1);
// Returns a FftThunk that calls cuFFT to implement `inst`.
std::unique_ptr<Thunk> BuildFftThunk(const HloInstruction* inst);
@@ -208,10 +255,14 @@ class IrEmitterUnnested : public IrEmitter {
std::unique_ptr<Thunk> BuildDeviceToDeviceCopyThunk(
const HloInstruction* inst);
- // Returns an InfeedThunk that performs device-to-device memcpy to implement
+ // Returns an InfeedThunk that performs a host-to-device memcpy to implement
// `inst`.
std::unique_ptr<Thunk> BuildInfeedThunk(const HloInstruction* inst);
+ // Returns an OutfeedThunk that performs a device-to-host memcpy to implement
+ // `inst`.
+ std::unique_ptr<Thunk> BuildOutfeedThunk(const HloInstruction* inst);
+
// Returns a WhileThunk that invokes thunk sequences for 'condition' and
// 'body' sub-computations of while instruction 'hlo'.
std::unique_ptr<Thunk> BuildWhileThunk(const HloInstruction* hlo);
diff --git a/tensorflow/compiler/xla/service/gpu/kernel_thunk.cc b/tensorflow/compiler/xla/service/gpu/kernel_thunk.cc
index f56c1ce69f..e76823ad10 100644
--- a/tensorflow/compiler/xla/service/gpu/kernel_thunk.cc
+++ b/tensorflow/compiler/xla/service/gpu/kernel_thunk.cc
@@ -17,6 +17,7 @@ limitations under the License.
#include "tensorflow/compiler/xla/ptr_util.h"
#include "tensorflow/compiler/xla/service/gpu/gpu_executable.h"
+#include "tensorflow/compiler/xla/service/gpu/hlo_execution_profiler.h"
#include "tensorflow/compiler/xla/types.h"
#include "tensorflow/compiler/xla/util.h"
#include "tensorflow/core/lib/core/stringpiece.h"
@@ -75,7 +76,8 @@ void KernelThunk::SetLaunchDimensions(const LaunchDimensions& launch_dims) {
}
Status KernelThunk::ExecuteOnStream(const BufferAllocations& buffer_allocations,
- se::Stream* stream) {
+ se::Stream* stream,
+ HloExecutionProfiler* profiler) {
// Load the kernel.
se::StreamExecutor* executor = stream->parent();
LaunchDimensions launch_dimensions;
@@ -100,6 +102,7 @@ Status KernelThunk::ExecuteOnStream(const BufferAllocations& buffer_allocations,
VLOG(3) << " Arg: alloc #" << arg->index() << ": " << buf.opaque() << " ("
<< buf.size() << "B)";
}
+ auto op_profiler = profiler->MakeScopedInstructionProfiler(hlo_instruction());
if (!stream->parent()->Launch(
stream, se::ThreadDim(launch_dimensions.threads_per_block()),
se::BlockDim(launch_dimensions.block_count()), *kernel,
diff --git a/tensorflow/compiler/xla/service/gpu/kernel_thunk.h b/tensorflow/compiler/xla/service/gpu/kernel_thunk.h
index 7def27e189..d751de50ad 100644
--- a/tensorflow/compiler/xla/service/gpu/kernel_thunk.h
+++ b/tensorflow/compiler/xla/service/gpu/kernel_thunk.h
@@ -22,6 +22,7 @@ limitations under the License.
#include "tensorflow/compiler/xla/service/buffer_assignment.h"
#include "tensorflow/compiler/xla/service/gpu/buffer_allocations.h"
+#include "tensorflow/compiler/xla/service/gpu/hlo_execution_profiler.h"
#include "tensorflow/compiler/xla/service/gpu/partition_assignment.h"
#include "tensorflow/compiler/xla/service/gpu/thunk.h"
#include "tensorflow/compiler/xla/service/hlo_instruction.h"
@@ -62,7 +63,8 @@ class KernelThunk : public Thunk {
// Executes the kernel for the thunk on "stream", which must be non-null.
Status ExecuteOnStream(const BufferAllocations& buffer_allocations,
- se::Stream* stream) override;
+ se::Stream* stream,
+ HloExecutionProfiler* profiler) override;
private:
// Buffers passed to the kernel as arguments.
diff --git a/tensorflow/compiler/xla/service/gpu/llvm_gpu_backend/BUILD b/tensorflow/compiler/xla/service/gpu/llvm_gpu_backend/BUILD
index 7de8f9e1ee..da31c65b7e 100644
--- a/tensorflow/compiler/xla/service/gpu/llvm_gpu_backend/BUILD
+++ b/tensorflow/compiler/xla/service/gpu/llvm_gpu_backend/BUILD
@@ -34,6 +34,7 @@ cc_library(
"//tensorflow/compiler/xla/service/llvm_ir:llvm_util",
"//tensorflow/core:lib",
"//tensorflow/core:lib_internal",
+ "@llvm//:amdgpu_code_gen",
"@llvm//:analysis",
"@llvm//:bit_reader",
"@llvm//:bit_writer",
diff --git a/tensorflow/compiler/xla/service/gpu/llvm_gpu_backend/gpu_backend_lib.cc b/tensorflow/compiler/xla/service/gpu/llvm_gpu_backend/gpu_backend_lib.cc
index a4e4e85bf3..2b0d6924a2 100644
--- a/tensorflow/compiler/xla/service/gpu/llvm_gpu_backend/gpu_backend_lib.cc
+++ b/tensorflow/compiler/xla/service/gpu/llvm_gpu_backend/gpu_backend_lib.cc
@@ -206,7 +206,7 @@ std::unique_ptr<llvm::TargetMachine> GetTargetMachine(
codegen_opt_level = CodeGenOpt::None;
}
return WrapUnique(target->createTargetMachine(
- triple.str(), llvm_ir::AsStringRef(cpu_name), "+ptx42", target_options,
+ triple.str(), llvm_ir::AsStringRef(cpu_name), "+ptx60", target_options,
Optional<Reloc::Model>(RelocModel), Optional<CodeModel::Model>(CMModel),
codegen_opt_level));
}
diff --git a/tensorflow/compiler/xla/service/gpu/memset_thunk.cc b/tensorflow/compiler/xla/service/gpu/memset_thunk.cc
index d4100a898b..9fd6cf7157 100644
--- a/tensorflow/compiler/xla/service/gpu/memset_thunk.cc
+++ b/tensorflow/compiler/xla/service/gpu/memset_thunk.cc
@@ -14,21 +14,27 @@ limitations under the License.
==============================================================================*/
#include "tensorflow/compiler/xla/service/gpu/memset_thunk.h"
+
+#include "tensorflow/compiler/xla/service/gpu/hlo_execution_profiler.h"
#include "tensorflow/stream_executor/stream_executor.h"
namespace xla {
namespace gpu {
Status MemzeroThunk::ExecuteOnStream(
- const BufferAllocations& buffer_allocations, se::Stream* stream) {
+ const BufferAllocations& buffer_allocations, se::Stream* stream,
+ HloExecutionProfiler* profiler) {
se::DeviceMemoryBase dest_data = buffer_allocations.GetDeviceAddress(dest_);
+ auto op_profiler = profiler->MakeScopedInstructionProfiler(hlo_instruction());
stream->ThenMemZero(&dest_data, dest_data.size());
return Status::OK();
}
Status Memset32BitValueThunk::ExecuteOnStream(
- const BufferAllocations& buffer_allocations, se::Stream* stream) {
+ const BufferAllocations& buffer_allocations, se::Stream* stream,
+ HloExecutionProfiler* profiler) {
se::DeviceMemoryBase dest_data = buffer_allocations.GetDeviceAddress(dest_);
+ auto op_profiler = profiler->MakeScopedInstructionProfiler(hlo_instruction());
stream->ThenMemset32(&dest_data, value_, dest_data.size());
return Status::OK();
}
diff --git a/tensorflow/compiler/xla/service/gpu/memset_thunk.h b/tensorflow/compiler/xla/service/gpu/memset_thunk.h
index 51c332d287..d1fec0bd76 100644
--- a/tensorflow/compiler/xla/service/gpu/memset_thunk.h
+++ b/tensorflow/compiler/xla/service/gpu/memset_thunk.h
@@ -17,6 +17,7 @@ limitations under the License.
#define TENSORFLOW_COMPILER_XLA_SERVICE_GPU_MEMSET_THUNK_H_
#include "tensorflow/compiler/xla/service/buffer_assignment.h"
+#include "tensorflow/compiler/xla/service/gpu/hlo_execution_profiler.h"
#include "tensorflow/compiler/xla/service/gpu/thunk.h"
#include "tensorflow/compiler/xla/service/hlo_instruction.h"
#include "tensorflow/compiler/xla/status.h"
@@ -36,7 +37,8 @@ class MemzeroThunk : public Thunk {
: Thunk(Kind::kMemzero, hlo), dest_(dest) {}
Status ExecuteOnStream(const BufferAllocations& buffer_allocations,
- se::Stream* stream) override;
+ se::Stream* stream,
+ HloExecutionProfiler* profiler) override;
private:
const BufferAllocation::Slice dest_;
@@ -52,7 +54,8 @@ class Memset32BitValueThunk : public Thunk {
: Thunk(Kind::kMemset32BitValue, hlo), value_(value), dest_(dest) {}
Status ExecuteOnStream(const BufferAllocations& buffer_allocations,
- se::Stream* stream) override;
+ se::Stream* stream,
+ HloExecutionProfiler* profiler) override;
private:
uint32 value_;
diff --git a/tensorflow/compiler/xla/service/gpu/multi_output_fusion.cc b/tensorflow/compiler/xla/service/gpu/multi_output_fusion.cc
index d541776f00..ea661b3c2c 100644
--- a/tensorflow/compiler/xla/service/gpu/multi_output_fusion.cc
+++ b/tensorflow/compiler/xla/service/gpu/multi_output_fusion.cc
@@ -23,9 +23,11 @@ limitations under the License.
#include <string>
#include <utility>
+#include "tensorflow/compiler/xla/service/gpu/ir_emission_utils.h"
#include "tensorflow/compiler/xla/service/hlo_instruction.h"
#include "tensorflow/compiler/xla/service/hlo_opcode.h"
#include "tensorflow/compiler/xla/shape_util.h"
+#include "tensorflow/core/lib/gtl/flatset.h"
#include "tensorflow/core/platform/types.h"
namespace xla {
@@ -69,6 +71,7 @@ bool GpuMultiOutputFusion::ShapesCompatibleForFusion(HloInstruction* instr1,
// In that case, the operand of the reduce needs to have the same shape
// as the other tuple operands, but also we need to compare the output
// shapes of the reduces.
+ // TODO(tjoerg): Allow differences in fp precision.
auto* element_instr_1 = get_element_instr(instr1);
auto* element_instr_2 = get_element_instr(instr2);
if (element_instr_1->opcode() == HloOpcode::kReduce &&
@@ -82,31 +85,35 @@ bool GpuMultiOutputFusion::ShapesCompatibleForFusion(HloInstruction* instr1,
}
namespace {
-bool IsReduction(HloInstruction* instr) {
+bool IsInputFusibleReduction(HloInstruction* instr) {
if (instr->IsMultiOutputFusion()) {
for (const HloInstruction* operand :
instr->fused_expression_root()->operands()) {
if (operand->opcode() == HloOpcode::kReduce) {
+ CHECK(instr->fusion_kind() == HloInstruction::FusionKind::kInput)
+ << " Reduce multi-output fusion " << instr->ToString()
+ << " must be an input fusion.";
return true;
}
}
return false;
} else if (instr->opcode() == HloOpcode::kFusion) {
- return instr->fused_expression_root()->opcode() == HloOpcode::kReduce;
+ // The loop emitter can handle to-vector reduce fusions. Such reduce
+ // fusions have the fusion kind kLoop rather than kInput. We do not fuse
+ // to-vector reduce fusions, because the resulting fusions may no longer be
+ // supported by loop emitter.
+ return IsReductionToVector(*instr->fused_expression_root());
} else {
- return instr->opcode() == HloOpcode::kReduce;
+ return IsReductionToVector(*instr);
}
}
} // namespace
bool GpuMultiOutputFusion::IsFusible(HloInstruction* instr) {
// We can fuse reduces and loop fusions.
- return IsReduction(instr) ||
+ return IsInputFusibleReduction(instr) ||
(instr->opcode() == HloOpcode::kFusion &&
- instr->fusion_kind() == HloInstruction::FusionKind::kLoop &&
- // TODO(b/110202584): bitcasts make nested fusions, GPU has no support
- // for nested fusions.
- instr->fused_expression_root()->opcode() != HloOpcode::kBitcast);
+ instr->fusion_kind() == HloInstruction::FusionKind::kLoop);
}
int64 GpuMultiOutputFusion::GetProfit(HloInstruction* instr1,
@@ -147,5 +154,110 @@ bool GpuMultiOutputFusion::LegalToFuse(HloInstruction* instr1,
return instr1->fusion_kind() != HloInstruction::FusionKind::kLoop;
}
+bool GpuMultiOutputFusion::DoProducerConsumerMultiOutputFusion() {
+ bool changed = false;
+ RecomputeReachability();
+
+ tensorflow::gtl::FlatSet<HloInstruction*> to_fuse;
+ // Keep a list of the instructions to fuse after making all the fusion
+ // decisions. We first aggressively add instructions to potential_fusion_list,
+ // then filter out instructions that will be no longer fusable because of
+ // reachability change. This avoids recalculating reachability on a large set
+ // of instructions.
+ std::vector<std::pair<HloInstruction*, HloInstruction*>>
+ potential_fusion_list;
+ std::vector<std::pair<HloInstruction*, HloInstruction*>> fusion_list;
+ std::vector<HloInstruction*> instrs_to_update_reachability;
+
+ // For each reduce or reduce multi-output fusion, try to fuse it with loop
+ // fusions operands.
+ for (HloInstruction* consumer : computation()->MakeInstructionPostOrder()) {
+ if (consumer->user_count() == 0) {
+ continue;
+ }
+ if (!IsInputFusibleReduction(consumer)) {
+ continue;
+ }
+
+ auto consumer_operands = consumer->operands();
+ for (size_t i = 0; i < consumer_operands.size(); ++i) {
+ HloInstruction* producer = consumer_operands[i];
+ if (!producer->IsFusable()) {
+ continue;
+ }
+ const bool is_loop_fusion =
+ producer->opcode() == HloOpcode::kFusion &&
+ producer->fusion_kind() == HloInstruction::FusionKind::kLoop;
+ if (!is_loop_fusion) {
+ continue;
+ }
+ if (!ShapesCompatibleForFusion(producer, consumer)) {
+ continue;
+ }
+ // If we have already decided to fuse this producer, skip it.
+ if (ContainsKey(to_fuse, producer)) {
+ continue;
+ }
+ // Do not fuse a producer if the other operands of the fusion are
+ // reachable from the producer, this would create a cycle.
+ if (c_any_of(consumer_operands, [&](HloInstruction* operand) {
+ return producer != operand &&
+ reachability()->IsReachable(producer, operand);
+ })) {
+ break;
+ }
+ to_fuse.insert(producer);
+ potential_fusion_list.emplace_back(producer, consumer);
+ instrs_to_update_reachability.push_back(producer);
+ instrs_to_update_reachability.push_back(consumer);
+ break;
+ }
+ }
+
+ // Filter out pairs that will be no longer fusable because of reachability
+ // change.
+ for (auto& fusion_pair : potential_fusion_list) {
+ HloInstruction* producer = fusion_pair.first;
+ HloInstruction* consumer = fusion_pair.second;
+ if (!c_any_of(consumer->operands(), [&](HloInstruction* operand) {
+ return producer != operand &&
+ reachability()->IsReachable(producer, operand);
+ })) {
+ UpdateReachability(producer, consumer, instrs_to_update_reachability);
+ fusion_list.push_back(fusion_pair);
+ }
+ }
+
+ for (auto fusions_to_create : fusion_list) {
+ HloInstruction* producer = fusions_to_create.first;
+ HloInstruction* consumer = fusions_to_create.second;
+ if (consumer->opcode() != HloOpcode::kFusion) {
+ // Fusing with a reduce (fusion) always results in an input fusion.
+ HloInstruction* input_fusion =
+ computation()->AddInstruction(HloInstruction::CreateFusion(
+ consumer->shape(), HloInstruction::FusionKind::kInput, consumer));
+ VLOG(2) << "Fuse producer " << producer->name() << " and its consumer "
+ << consumer->name() << " into " << input_fusion->name();
+ TF_CHECK_OK(computation()->ReplaceInstruction(consumer, input_fusion));
+ if (producer->opcode() == HloOpcode::kFusion) {
+ input_fusion->MergeFusionInstructionIntoMultiOutput(producer);
+ } else {
+ input_fusion->FuseInstructionIntoMultiOutput(producer);
+ }
+ } else {
+ VLOG(2) << "Fuse producer " << producer->name() << " into its consumer "
+ << consumer->name();
+
+ if (producer->opcode() == HloOpcode::kFusion) {
+ consumer->MergeFusionInstructionIntoMultiOutput(producer);
+ } else {
+ consumer->FuseInstructionIntoMultiOutput(producer);
+ }
+ }
+ changed = true;
+ }
+ return changed;
+}
+
} // namespace gpu
} // namespace xla
diff --git a/tensorflow/compiler/xla/service/gpu/multi_output_fusion.h b/tensorflow/compiler/xla/service/gpu/multi_output_fusion.h
index 16db0e0f02..67ca5d49ee 100644
--- a/tensorflow/compiler/xla/service/gpu/multi_output_fusion.h
+++ b/tensorflow/compiler/xla/service/gpu/multi_output_fusion.h
@@ -45,6 +45,9 @@ class GpuMultiOutputFusion : public MultiOutputFusion {
// Test if it's legal to fuse instr1 and instr2 into one fusion instruction.
bool LegalToFuse(HloInstruction* instr1, HloInstruction* instr2) override;
+
+ // Fuse loop fusions into reduce fusions.
+ bool DoProducerConsumerMultiOutputFusion() override;
};
} // namespace gpu
diff --git a/tensorflow/compiler/xla/service/gpu/multi_output_fusion_test.cc b/tensorflow/compiler/xla/service/gpu/multi_output_fusion_test.cc
index 5e7ceb7976..979ea79243 100644
--- a/tensorflow/compiler/xla/service/gpu/multi_output_fusion_test.cc
+++ b/tensorflow/compiler/xla/service/gpu/multi_output_fusion_test.cc
@@ -255,5 +255,99 @@ TEST_F(InstructionFusionTest, MultiOutputFusionTwoLoops) {
op::Tuple(op::Multiply(), op::Divide()));
}
+TEST_F(InstructionFusionTest, ProducerConsumerFusionLoopFusionAndReduce) {
+ auto module = ParseHloString(tensorflow::strings::StrCat(kModulePrefix, R"(
+ fused_add {
+ p0.1 = f32[2,2,2]{2,1,0} parameter(0)
+ p1.1 = f32[2,2,2]{2,1,0} parameter(1)
+ ROOT add = f32[2,2,2]{2,1,0} add(p0.1, p1.1)
+ }
+
+ ENTRY reduce {
+ p0 = f32[2,2,2]{2,1,0} parameter(0)
+ p1 = f32[2,2,2]{2,1,0} parameter(1)
+ c0 = f32[] constant(0)
+ add = f32[2,2,2]{2,1,0} fusion(p0, p1), kind=kLoop, calls=fused_add
+ reduce = f32[2,2]{1,0} reduce(add, c0), dimensions={2}, to_apply=scalar_add_computation
+ ROOT root = (f32[2,2]{1,0}, f32[2,2,2]{2,1,0}) tuple(reduce, add)
+ })"))
+ .ValueOrDie();
+ ASSERT_TRUE(GpuMultiOutputFusion().Run(module.get()).ValueOrDie());
+ SCOPED_TRACE(module->ToString());
+ const HloInstruction* root = module->entry_computation()->root_instruction();
+ EXPECT_THAT(root, op::Tuple(op::GetTupleElement(), op::GetTupleElement()));
+ const HloInstruction* fusion = root->operand(0)->operand(0);
+ ASSERT_TRUE(fusion->IsMultiOutputFusion());
+ EXPECT_THAT(fusion->fused_expression_root(),
+ op::Tuple(op::Reduce(), op::Add()));
+}
+
+TEST_F(InstructionFusionTest, ProducerConsumerFusionLoopFusionAndReduceFusion) {
+ auto module = ParseHloString(tensorflow::strings::StrCat(kModulePrefix, R"(
+ fused_select {
+ p1.1 = f32[2,2,2]{2,1,0} parameter(1)
+ c0 = f32[] constant(0)
+ broadcast = f32[2,2,2]{2,1,0} broadcast(f32[] c0), dimensions={}
+ greater-than = pred[2,2,2]{2,1,0} greater-than(f32[2,2,2]{2,1,0} p1.1, f32[2,2,2]{2,1,0} broadcast)
+ p0.1 = f32[2,2,2]{2,1,0} parameter(0)
+ ROOT select = f32[2,2,2]{2,1,0} select(pred[2,2,2]{2,1,0} greater-than, f32[2,2,2]{2,1,0} p0.1, f32[2,2,2]{2,1,0} broadcast)
+ }
+
+ fused_reduce {
+ p0.2 = f32[2,2,2]{2,1,0} parameter(0)
+ c1 = f32[] constant(0)
+ r1 = f32[2,2]{1,0} reduce(p0.2, c1), dimensions={2}, to_apply=scalar_add_computation
+ mul = f32[2,2,2]{2,1,0} multiply(p0.2, p0.2)
+ r2 = f32[2,2]{1,0} reduce(mul, c1), dimensions={2}, to_apply=scalar_add_computation
+ ROOT tuple = (f32[2,2]{1,0}, f32[2,2]{1,0}) tuple(r1, r2)
+ }
+
+ ENTRY reduce {
+ p0 = f32[2,2,2]{2,1,0} parameter(0)
+ p1 = f32[2,2,2]{2,1,0} parameter(1)
+ select = f32[2,2,2]{2,1,0} fusion(p0, p1), kind=kLoop, calls=fused_select
+ fusion = (f32[2,2]{1,0}, f32[2,2]{1,0}) fusion(select), kind=kInput, calls=fused_reduce
+ gte0 = f32[2,2]{1,0} get-tuple-element(fusion), index=0
+ gte1 = f32[2,2]{1,0} get-tuple-element(fusion), index=1
+ ROOT root = (f32[2,2]{1,0}, f32[2,2]{1,0}, f32[2,2,2]{2,1,0}) tuple(gte1, gte1, select)
+ })"))
+ .ValueOrDie();
+ ASSERT_TRUE(GpuMultiOutputFusion().Run(module.get()).ValueOrDie());
+ SCOPED_TRACE(module->ToString());
+ const HloInstruction* root = module->entry_computation()->root_instruction();
+ EXPECT_THAT(root, op::Tuple(op::GetTupleElement(), op::GetTupleElement(),
+ op::GetTupleElement()));
+ const HloInstruction* fusion = root->operand(0)->operand(0);
+ ASSERT_TRUE(fusion->IsMultiOutputFusion());
+ EXPECT_THAT(fusion->fused_expression_root(),
+ op::Tuple(op::Reduce(), op::Reduce(), op::Select()));
+}
+
+TEST_F(InstructionFusionTest, ProducerConsumerFusionDoNotFuseLoopReduceFusion) {
+ auto module = ParseHloString(tensorflow::strings::StrCat(kModulePrefix, R"(
+ fused_element_wise {
+ p0.1 = f32[2,2,2]{2,1,0} parameter(0)
+ p1.1 = f32[2,2,2]{2,1,0} parameter(1)
+ ROOT root = f32[2,2,2]{2,1,0} add(p0.1, p1.1)
+ }
+
+ fused_reduce {
+ p0.2 = f32[2,2,2]{2,1,0} parameter(0)
+ mul = f32[2,2,2]{2,1,0} multiply(f32[2,2,2]{2,1,0} p0.2, f32[2,2,2]{2,1,0} p0.2)
+ c1 = f32[] constant(0)
+ ROOT reduce = f32[2,2]{1,0} reduce(f32[2,2,2]{2,1,0} mul, f32[] c1), dimensions={1}, to_apply=scalar_add_computation
+ }
+
+ ENTRY reduce {
+ p0 = f32[2,2,2]{2,1,0} parameter(0)
+ p1 = f32[2,2,2]{2,1,0} parameter(1)
+ element_wise = f32[2,2,2]{2,1,0} fusion(p0, p1), kind=kLoop, calls=fused_element_wise
+ fusion = (f32[2,2]{1,0}, f32[2,2]{1,0}) fusion(element_wise), kind=kLoop, calls=fused_reduce
+ ROOT root = (f32[2,2]{1,0}, f32[2,2,2]{2,1,0}) tuple(fusion, element_wise)
+ })"))
+ .ValueOrDie();
+ ASSERT_FALSE(GpuMultiOutputFusion().Run(module.get()).ValueOrDie());
+}
+
} // namespace gpu
} // namespace xla
diff --git a/tensorflow/compiler/xla/service/gpu/outfeed_manager.cc b/tensorflow/compiler/xla/service/gpu/outfeed_manager.cc
new file mode 100644
index 0000000000..4aaf0c9e14
--- /dev/null
+++ b/tensorflow/compiler/xla/service/gpu/outfeed_manager.cc
@@ -0,0 +1,32 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#include "tensorflow/compiler/xla/service/gpu/outfeed_manager.h"
+
+#include "tensorflow/compiler/xla/map_util.h"
+#include "tensorflow/compiler/xla/ptr_util.h"
+#include "tensorflow/compiler/xla/shape_util.h"
+#include "tensorflow/core/platform/logging.h"
+
+namespace xla {
+namespace gpu {
+
+OutfeedManager* GetOrCreateOutfeedManager() {
+ static auto* manager = new OutfeedManager;
+ return manager;
+}
+
+} // namespace gpu
+} // namespace xla
diff --git a/tensorflow/compiler/xla/service/gpu/outfeed_manager.h b/tensorflow/compiler/xla/service/gpu/outfeed_manager.h
new file mode 100644
index 0000000000..a752eb7011
--- /dev/null
+++ b/tensorflow/compiler/xla/service/gpu/outfeed_manager.h
@@ -0,0 +1,69 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#ifndef TENSORFLOW_COMPILER_XLA_SERVICE_GPU_OUTFEED_MANAGER_H_
+#define TENSORFLOW_COMPILER_XLA_SERVICE_GPU_OUTFEED_MANAGER_H_
+
+#include "tensorflow/compiler/xla/literal.h"
+#include "tensorflow/compiler/xla/service/gpu/xfeed_queue.h"
+#include "tensorflow/compiler/xla/shape_tree.h"
+#include "tensorflow/core/platform/mutex.h"
+#include "tensorflow/core/platform/notification.h"
+
+namespace xla {
+namespace gpu {
+
+// TODO(b/30467474) Once GPU outfeed implementation settles, consider
+// folding back the cpu and gpu outfeed implementations into a generic
+// one if possible.
+
+// Defines a buffer holding the destination for an outfeed in host memory and a
+// notification when that triggers when the transfer is done.
+class OutfeedBuffer {
+ public:
+ OutfeedBuffer(int64 length) : length_(length) {}
+
+ // Waits for the device transfer to be finished.
+ std::unique_ptr<Literal> WaitUntilAvailable() {
+ done_.WaitForNotification();
+ return std::move(destination_);
+ }
+
+ int64 length() const { return length_; }
+ void set_destination(std::unique_ptr<Literal> destination) {
+ destination_ = std::move(destination);
+ }
+ Literal* destination() { return destination_.get(); }
+
+ // Callback to signal that this buffer is consumed.
+ void Done() { done_.Notify(); }
+
+ private:
+ std::unique_ptr<Literal> destination_;
+ const int64 length_;
+ tensorflow::Notification done_;
+};
+
+// Manages a thread-safe queue of buffers. The buffers are supposed to be
+// produced by the transfer manager and consumed by the device.
+using OutfeedManager = XfeedQueue<ShapeTree<std::unique_ptr<OutfeedBuffer>>*>;
+
+// Singleton creator-or-accessor: Returns the GPU outfeed manager.
+OutfeedManager* GetOrCreateOutfeedManager();
+
+} // namespace gpu
+} // namespace xla
+
+#endif // TENSORFLOW_COMPILER_XLA_SERVICE_GPU_OUTFEED_MANAGER_H_
diff --git a/tensorflow/compiler/xla/service/gpu/outfeed_thunk.cc b/tensorflow/compiler/xla/service/gpu/outfeed_thunk.cc
new file mode 100644
index 0000000000..7986e63f43
--- /dev/null
+++ b/tensorflow/compiler/xla/service/gpu/outfeed_thunk.cc
@@ -0,0 +1,111 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#include "tensorflow/compiler/xla/service/gpu/outfeed_thunk.h"
+#include "tensorflow/compiler/xla/literal.h"
+#include "tensorflow/compiler/xla/service/gpu/hlo_execution_profiler.h"
+#include "tensorflow/compiler/xla/service/gpu/outfeed_manager.h"
+#include "tensorflow/compiler/xla/util.h"
+#include "tensorflow/core/platform/stream_executor_no_cuda.h"
+
+namespace xla {
+namespace gpu {
+
+OutfeedThunk::OutfeedThunk(ShapeTree<BufferAllocation::Slice> outfeed_slices,
+ const HloInstruction* hlo_instruction)
+ : Thunk(Kind::kOutfeed, hlo_instruction),
+ outfeed_slices_(std::move(outfeed_slices)) {}
+
+Status OutfeedThunk::ExecuteOnStream(
+ const BufferAllocations& buffer_allocations, se::Stream* stream,
+ HloExecutionProfiler* profiler) {
+ VLOG(2) << "Outfeeding from GPU: " << hlo_instruction()->ToString();
+
+ auto op_profiler = profiler->MakeScopedInstructionProfiler(hlo_instruction());
+ OutfeedManager* outfeed_manager = GetOrCreateOutfeedManager();
+ ShapeTree<std::unique_ptr<OutfeedBuffer>>* outfeed_buffers =
+ outfeed_manager->BlockingGetNextDestination();
+
+ // Nothing to be done for empty tuples.
+ if (ShapeUtil::IsEmptyTuple(hlo_instruction()->operand(0)->shape())) {
+ return Status::OK();
+ }
+ CHECK(ShapeUtil::Compatible(hlo_instruction()->operand(0)->shape(),
+ outfeed_buffers->shape()));
+
+ TF_RETURN_IF_ERROR(outfeed_buffers->ForEachMutableElementWithStatus(
+ [&](const ShapeIndex& index, std::unique_ptr<OutfeedBuffer>* buffer) {
+ if (!*buffer) { // Tuple pointers.
+ return Status::OK();
+ }
+ // Allocate storage for the literal data.
+ const Shape& shape =
+ ShapeUtil::GetSubshape(outfeed_buffers->shape(), index);
+ (*buffer)->set_destination(Literal::CreateFromShape(shape));
+
+ BufferAllocation::Slice slice = outfeed_slices_.element(index);
+ se::DeviceMemoryBase data_address;
+ if (slice.allocation()) {
+ // If we have a static allocation, read it from there. This avoids
+ // synchronizing the host and device just to read a pointer.
+ data_address = buffer_allocations.GetDeviceAddress(slice);
+ } else {
+ // Otherwise we have to read the tuple pointer first.
+ CHECK(!index.empty());
+ // Copy the parent buffer to the host.
+ BufferAllocation::Slice tuple_slice =
+ outfeed_slices_.element(ShapeIndexView(index).ConsumeFront());
+ if (!tuple_slice.allocation()) {
+ return Unimplemented(
+ "Nested dynamic tuples are not supported on GPU");
+ }
+ se::DeviceMemoryBase tuple_address =
+ buffer_allocations.GetDeviceAddress(tuple_slice);
+ CHECK(tuple_slice.size() % sizeof(void*) == 0)
+ << "Tuple size must be a multiple of pointer size";
+ std::vector<void*> tuple_element_buffer_addresses(tuple_slice.size() /
+ sizeof(void*));
+ stream->ThenMemcpy(tuple_element_buffer_addresses.data(),
+ tuple_address, tuple_slice.size());
+ TF_RETURN_IF_ERROR(stream->BlockHostUntilDone());
+ // The data address is specified by the element of the tuple pointer
+ // buffer.
+ data_address =
+ se::DeviceMemoryBase(tuple_element_buffer_addresses[index.back()],
+ (*buffer)->length());
+ }
+
+ // TODO(b/111309141): Run this on a separate stream so it doesn't block
+ // the GPU from doing work during the transfer. This could be handled by
+ // making StreamAssignment do something intelligent with outfeed thunks.
+ stream
+ ->ThenMemcpy((*buffer)->destination()->untyped_data(), data_address,
+ (*buffer)->length())
+ .ThenDoHostCallback([buffer]() { (*buffer)->Done(); });
+ return Status::OK();
+ }));
+
+ Status block_status = stream->BlockHostUntilDone();
+ if (!block_status.ok()) {
+ return InternalError("Failed to complete data transfer on stream %p: %s",
+ stream, block_status.error_message().c_str());
+ }
+
+ VLOG(2) << "Outfeeding from GPU complete";
+ return Status::OK();
+}
+
+} // namespace gpu
+} // namespace xla
diff --git a/tensorflow/compiler/xla/service/gpu/outfeed_thunk.h b/tensorflow/compiler/xla/service/gpu/outfeed_thunk.h
new file mode 100644
index 0000000000..8ed89f05f0
--- /dev/null
+++ b/tensorflow/compiler/xla/service/gpu/outfeed_thunk.h
@@ -0,0 +1,52 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#ifndef TENSORFLOW_COMPILER_XLA_SERVICE_GPU_OUTFEED_THUNK_H_
+#define TENSORFLOW_COMPILER_XLA_SERVICE_GPU_OUTFEED_THUNK_H_
+
+#include "tensorflow/compiler/xla/service/gpu/buffer_allocations.h"
+#include "tensorflow/compiler/xla/service/gpu/hlo_execution_profiler.h"
+#include "tensorflow/compiler/xla/service/gpu/thunk.h"
+#include "tensorflow/compiler/xla/service/hlo_instruction.h"
+#include "tensorflow/core/platform/stream_executor_no_cuda.h"
+
+namespace xla {
+namespace gpu {
+
+// A thunk that outfeeds data. Data must be already resident on the host. This
+// thunk performs a host to device copy from the buffer allocated for the
+// outfeed op to the host location.
+class OutfeedThunk : public Thunk {
+ public:
+ // Constructs a OutfeedThunk that copies data to the host-side
+ // outfeed queue from the buffers in the given shape tree.
+ OutfeedThunk(ShapeTree<BufferAllocation::Slice> outfeed_slices,
+ const HloInstruction* hlo_instruction);
+
+ OutfeedThunk(const OutfeedThunk&) = delete;
+ OutfeedThunk& operator=(const OutfeedThunk&) = delete;
+
+ Status ExecuteOnStream(const BufferAllocations& buffer_allocations,
+ se::Stream* stream,
+ HloExecutionProfiler* profiler) override;
+
+ private:
+ const ShapeTree<BufferAllocation::Slice> outfeed_slices_;
+};
+
+} // namespace gpu
+} // namespace xla
+
+#endif // TENSORFLOW_COMPILER_XLA_SERVICE_GPU_OUTFEED_THUNK_H_
diff --git a/tensorflow/compiler/xla/service/gpu/pad_insertion.cc b/tensorflow/compiler/xla/service/gpu/pad_insertion.cc
index c8f0d4185c..b22040eee1 100644
--- a/tensorflow/compiler/xla/service/gpu/pad_insertion.cc
+++ b/tensorflow/compiler/xla/service/gpu/pad_insertion.cc
@@ -15,6 +15,7 @@ limitations under the License.
#include "tensorflow/compiler/xla/service/gpu/pad_insertion.h"
+#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/compiler/xla/literal_util.h"
#include "tensorflow/compiler/xla/service/gpu/ir_emission_utils.h"
#include "tensorflow/compiler/xla/service/hlo_creation_utils.h"
@@ -68,7 +69,7 @@ HloInstruction* MaybePaddedAndSlicedInput(
PrimitiveType element_type = input->shape().element_type();
HloInstruction* padding =
computation->AddInstruction(HloInstruction::CreateConstant(
- MakeUnique<Literal>(Literal::Zero(element_type))));
+ MakeUnique<Literal>(LiteralUtil::Zero(element_type))));
input = MakePadHlo(input, padding, padding_config).ValueOrDie();
}
@@ -125,7 +126,7 @@ HloInstruction* MaybePaddedKernel(const Window& conv_window,
PrimitiveType element_type = kernel->shape().element_type();
HloInstruction* padding =
computation->AddInstruction(HloInstruction::CreateConstant(
- MakeUnique<Literal>(Literal::Zero(element_type))));
+ MakeUnique<Literal>(LiteralUtil::Zero(element_type))));
return MakePadHlo(kernel, padding, padding_config).ValueOrDie();
}
} // namespace
@@ -234,9 +235,9 @@ bool PadInsertion::CanonicalizeBackwardFilterConvolution(
// Create a new backward convolution replacing the old one.
HloComputation* computation = backward_conv->parent();
HloInstruction* output = backward_conv->mutable_operand(1);
- HloInstruction* padding =
- computation->AddInstruction(HloInstruction::CreateConstant(
- MakeUnique<Literal>(Literal::Zero(input->shape().element_type()))));
+ HloInstruction* padding = computation->AddInstruction(
+ HloInstruction::CreateConstant(MakeUnique<Literal>(
+ LiteralUtil::Zero(input->shape().element_type()))));
HloInstruction* padded_input =
MakePadHlo(input, padding, input_padding_config).ValueOrDie();
diff --git a/tensorflow/compiler/xla/service/gpu/sequential_thunk.cc b/tensorflow/compiler/xla/service/gpu/sequential_thunk.cc
index 88cb10883e..84285be70a 100644
--- a/tensorflow/compiler/xla/service/gpu/sequential_thunk.cc
+++ b/tensorflow/compiler/xla/service/gpu/sequential_thunk.cc
@@ -15,6 +15,7 @@ limitations under the License.
#include "tensorflow/compiler/xla/service/gpu/sequential_thunk.h"
+#include "tensorflow/compiler/xla/service/gpu/hlo_execution_profiler.h"
#include "tensorflow/core/lib/core/errors.h"
namespace xla {
@@ -33,9 +34,12 @@ Status SequentialThunk::Initialize(const GpuExecutable& executable,
}
Status SequentialThunk::ExecuteOnStream(
- const BufferAllocations& buffer_allocations, se::Stream* stream) {
+ const BufferAllocations& buffer_allocations, se::Stream* stream,
+ HloExecutionProfiler* profiler) {
+ auto op_profiler = profiler->MakeScopedInstructionProfiler(hlo_instruction());
for (const auto& thunk : thunks_) {
- TF_RETURN_IF_ERROR(thunk->ExecuteOnStream(buffer_allocations, stream));
+ TF_RETURN_IF_ERROR(
+ thunk->ExecuteOnStream(buffer_allocations, stream, profiler));
}
return Status::OK();
}
diff --git a/tensorflow/compiler/xla/service/gpu/sequential_thunk.h b/tensorflow/compiler/xla/service/gpu/sequential_thunk.h
index 135f79e413..3c4de1d1a6 100644
--- a/tensorflow/compiler/xla/service/gpu/sequential_thunk.h
+++ b/tensorflow/compiler/xla/service/gpu/sequential_thunk.h
@@ -19,6 +19,7 @@ limitations under the License.
#include <vector>
#include "tensorflow/compiler/xla/service/gpu/buffer_allocations.h"
+#include "tensorflow/compiler/xla/service/gpu/hlo_execution_profiler.h"
#include "tensorflow/compiler/xla/service/gpu/thunk.h"
#include "tensorflow/compiler/xla/service/hlo_instruction.h"
#include "tensorflow/core/platform/stream_executor_no_cuda.h"
@@ -41,7 +42,8 @@ class SequentialThunk : public Thunk {
Status Initialize(const GpuExecutable& executable,
se::StreamExecutor* executor) override;
Status ExecuteOnStream(const BufferAllocations& buffer_allocations,
- se::Stream* stream) override;
+ se::Stream* stream,
+ HloExecutionProfiler* profiler) override;
private:
// The list of sub-thunks.
diff --git a/tensorflow/compiler/xla/service/gpu/stream_executor_util.h b/tensorflow/compiler/xla/service/gpu/stream_executor_util.h
index 8218f4fd11..39a6a38d00 100644
--- a/tensorflow/compiler/xla/service/gpu/stream_executor_util.h
+++ b/tensorflow/compiler/xla/service/gpu/stream_executor_util.h
@@ -16,6 +16,7 @@ limitations under the License.
#ifndef TENSORFLOW_COMPILER_XLA_SERVICE_GPU_STREAM_EXECUTOR_UTIL_H_
#define TENSORFLOW_COMPILER_XLA_SERVICE_GPU_STREAM_EXECUTOR_UTIL_H_
+#include "tensorflow/compiler/xla/statusor.h"
#include "tensorflow/compiler/xla/xla_data.pb.h"
#include "tensorflow/core/platform/stream_executor_no_cuda.h"
diff --git a/tensorflow/compiler/xla/service/gpu/thunk.h b/tensorflow/compiler/xla/service/gpu/thunk.h
index 931c0bffab..99a1a0eae9 100644
--- a/tensorflow/compiler/xla/service/gpu/thunk.h
+++ b/tensorflow/compiler/xla/service/gpu/thunk.h
@@ -20,6 +20,7 @@ limitations under the License.
#include <vector>
#include "tensorflow/compiler/xla/service/gpu/buffer_allocations.h"
+#include "tensorflow/compiler/xla/service/gpu/hlo_execution_profiler.h"
#include "tensorflow/compiler/xla/service/hlo_instruction.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/platform/stream_executor_no_cuda.h"
@@ -53,6 +54,7 @@ class Thunk {
kKernel,
kMemset32BitValue,
kMemzero,
+ kOutfeed,
kSequential,
kTuple,
kWhile,
@@ -94,11 +96,12 @@ class Thunk {
// Execute the kernel for the thunk on the given stream. This method must be
// called after Initialize and can be called multiple times over Thunk's
- // lifetime. Stream argument must be non-null.
+ // lifetime. 'stream' and 'profiler' must be non-null.
//
// Precondition: Initialize(stream->parent()) has been called.
virtual Status ExecuteOnStream(const BufferAllocations& buffer_allocations,
- se::Stream* stream) = 0;
+ se::Stream* stream,
+ HloExecutionProfiler* profiler) = 0;
private:
Kind kind_;
diff --git a/tensorflow/compiler/xla/service/gpu/tuple_thunk.cc b/tensorflow/compiler/xla/service/gpu/tuple_thunk.cc
index 97cb04c38f..a10e40451c 100644
--- a/tensorflow/compiler/xla/service/gpu/tuple_thunk.cc
+++ b/tensorflow/compiler/xla/service/gpu/tuple_thunk.cc
@@ -15,13 +15,15 @@ limitations under the License.
#include "tensorflow/compiler/xla/service/gpu/tuple_thunk.h"
+#include "tensorflow/compiler/xla/service/gpu/hlo_execution_profiler.h"
#include "tensorflow/compiler/xla/util.h"
namespace xla {
namespace gpu {
Status TupleThunk::ExecuteOnStream(const BufferAllocations& buffer_allocations,
- se::Stream* stream) {
+ se::Stream* stream,
+ HloExecutionProfiler* profiler) {
std::vector<void*> tuple_element_buffer_addresses;
for (BufferAllocation::Slice tuple_element_buffer : tuple_element_buffers_) {
tuple_element_buffer_addresses.push_back(
@@ -31,6 +33,7 @@ Status TupleThunk::ExecuteOnStream(const BufferAllocations& buffer_allocations,
buffer_allocations.GetDeviceAddress(dest_buffer_));
auto host_size = tuple_element_buffer_addresses.size() * sizeof(void*);
+ auto op_profiler = profiler->MakeScopedInstructionProfiler(hlo_instruction());
if (!stream
->ThenMemcpy(&dest_buffer_address,
tuple_element_buffer_addresses.data(), host_size)
diff --git a/tensorflow/compiler/xla/service/gpu/tuple_thunk.h b/tensorflow/compiler/xla/service/gpu/tuple_thunk.h
index 951f809b51..2d5735d6c4 100644
--- a/tensorflow/compiler/xla/service/gpu/tuple_thunk.h
+++ b/tensorflow/compiler/xla/service/gpu/tuple_thunk.h
@@ -20,6 +20,7 @@ limitations under the License.
#include "tensorflow/compiler/xla/service/buffer_assignment.h"
#include "tensorflow/compiler/xla/service/gpu/gpu_executable.h"
+#include "tensorflow/compiler/xla/service/gpu/hlo_execution_profiler.h"
#include "tensorflow/compiler/xla/service/gpu/thunk.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/gtl/array_slice.h"
@@ -46,7 +47,8 @@ class TupleThunk : public Thunk {
TupleThunk& operator=(const TupleThunk&) = delete;
Status ExecuteOnStream(const BufferAllocations& buffer_allocations,
- se::Stream* stream) override;
+ se::Stream* stream,
+ HloExecutionProfiler* profiler) override;
private:
const std::vector<BufferAllocation::Slice> tuple_element_buffers_;
diff --git a/tensorflow/compiler/xla/service/gpu/while_thunk.cc b/tensorflow/compiler/xla/service/gpu/while_thunk.cc
index 30b9640c4c..1315a4183a 100644
--- a/tensorflow/compiler/xla/service/gpu/while_thunk.cc
+++ b/tensorflow/compiler/xla/service/gpu/while_thunk.cc
@@ -16,6 +16,7 @@ limitations under the License.
#include "tensorflow/compiler/xla/service/gpu/while_thunk.h"
#include "tensorflow/compiler/xla/ptr_util.h"
+#include "tensorflow/compiler/xla/service/gpu/hlo_execution_profiler.h"
#include "tensorflow/compiler/xla/util.h"
#include "tensorflow/core/lib/core/errors.h"
@@ -29,10 +30,14 @@ WhileThunk::WhileThunk(
const HloInstruction* hlo)
: Thunk(Kind::kWhile, hlo),
condition_result_buffer_index_(condition_result_buffer_index),
+ // Pass nullptr as the HloInstruction* to the condition_thunk_sequence_
+ // and body_thunk_sequence_ constructors because these SequentialThunks
+ // are logically "part of" this WhileThunk, and shouldn't be profiled
+ // separately from it.
condition_thunk_sequence_(MakeUnique<SequentialThunk>(
- std::move(*condition_thunk_sequence), hlo)),
- body_thunk_sequence_(
- MakeUnique<SequentialThunk>(std::move(*body_thunk_sequence), hlo)) {}
+ std::move(*condition_thunk_sequence), nullptr)),
+ body_thunk_sequence_(MakeUnique<SequentialThunk>(
+ std::move(*body_thunk_sequence), nullptr)) {}
Status WhileThunk::Initialize(const GpuExecutable& executable,
se::StreamExecutor* executor) {
@@ -43,14 +48,18 @@ Status WhileThunk::Initialize(const GpuExecutable& executable,
}
Status WhileThunk::ExecuteOnStream(const BufferAllocations& buffer_allocations,
- se::Stream* stream) {
+ se::Stream* stream,
+ HloExecutionProfiler* profiler) {
se::DeviceMemoryBase condition_result_data =
buffer_allocations.GetDeviceAddress(condition_result_buffer_index_);
+ auto op_profiler = profiler->MakeScopedInstructionProfiler(hlo_instruction());
while (true) {
// Invoke thunk sequence for while 'condition' computation.
- TF_RETURN_IF_ERROR(
- condition_thunk_sequence_->ExecuteOnStream(buffer_allocations, stream));
+ profiler->StartHloComputation();
+ TF_RETURN_IF_ERROR(condition_thunk_sequence_->ExecuteOnStream(
+ buffer_allocations, stream, profiler));
+ profiler->FinishHloComputation(hlo_instruction()->while_condition());
// Copy the result of condition computation and break the loop if 'false'.
bool condition_result;
@@ -66,9 +75,14 @@ Status WhileThunk::ExecuteOnStream(const BufferAllocations& buffer_allocations,
break;
}
- // Invoke thunk sequence for while 'body' computation.
- TF_RETURN_IF_ERROR(
- body_thunk_sequence_->ExecuteOnStream(buffer_allocations, stream));
+ // We measure the time of one execution of the while body computation. The
+ // while body may be executed more than once, the last measurement "wins".
+ profiler->StartHloComputation();
+ // Invoke thunk sequence for while 'body' computation, and pass on
+ // 'profiler' to measure the timing of the thunks in 'body_thunk_sequence_'.
+ TF_RETURN_IF_ERROR(body_thunk_sequence_->ExecuteOnStream(buffer_allocations,
+ stream, profiler));
+ profiler->FinishHloComputation(hlo_instruction()->while_body());
}
return Status::OK();
}
diff --git a/tensorflow/compiler/xla/service/gpu/while_thunk.h b/tensorflow/compiler/xla/service/gpu/while_thunk.h
index 22176685a9..9270f95ee6 100644
--- a/tensorflow/compiler/xla/service/gpu/while_thunk.h
+++ b/tensorflow/compiler/xla/service/gpu/while_thunk.h
@@ -19,6 +19,7 @@ limitations under the License.
#include <vector>
#include "tensorflow/compiler/xla/service/gpu/buffer_allocations.h"
+#include "tensorflow/compiler/xla/service/gpu/hlo_execution_profiler.h"
#include "tensorflow/compiler/xla/service/gpu/sequential_thunk.h"
#include "tensorflow/compiler/xla/service/gpu/thunk.h"
#include "tensorflow/compiler/xla/service/hlo_instruction.h"
@@ -48,7 +49,8 @@ class WhileThunk : public Thunk {
Status Initialize(const GpuExecutable& executable,
se::StreamExecutor* executor) override;
Status ExecuteOnStream(const BufferAllocations& buffer_allocations,
- se::Stream* stream) override;
+ se::Stream* stream,
+ HloExecutionProfiler* profiler) override;
private:
const BufferAllocation::Slice condition_result_buffer_index_;
diff --git a/tensorflow/compiler/xla/service/gpu/while_transformer.cc b/tensorflow/compiler/xla/service/gpu/while_transformer.cc
index 7749201cbc..c5321df6c4 100644
--- a/tensorflow/compiler/xla/service/gpu/while_transformer.cc
+++ b/tensorflow/compiler/xla/service/gpu/while_transformer.cc
@@ -18,7 +18,7 @@ limitations under the License.
#include <unordered_map>
#include <vector>
-#include "tensorflow/compiler/xla/literal_util.h"
+#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/compiler/xla/service/hlo_computation.h"
#include "tensorflow/compiler/xla/shape_util.h"
#include "tensorflow/compiler/xla/status_macros.h"
diff --git a/tensorflow/compiler/xla/service/gpu/while_transformer_test.cc b/tensorflow/compiler/xla/service/gpu/while_transformer_test.cc
index 2f290f61bd..dbc8442ed2 100644
--- a/tensorflow/compiler/xla/service/gpu/while_transformer_test.cc
+++ b/tensorflow/compiler/xla/service/gpu/while_transformer_test.cc
@@ -42,7 +42,7 @@ class WhileTransformerTest : public HloTestBase {
const int64 tuple_index, const int64 limit) {
auto builder = HloComputation::Builder(TestName() + ".Condition");
auto limit_const = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<int32>(limit)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32>(limit)));
auto loop_state = builder.AddInstruction(HloInstruction::CreateParameter(
0, GetLoopStateShape(tuple_index), "loop_state"));
auto induction_variable =
@@ -65,8 +65,8 @@ class WhileTransformerTest : public HloTestBase {
auto induction_variable =
builder.AddInstruction(HloInstruction::CreateGetTupleElement(
induction_variable_shape_, loop_state, ind_var_tuple_index));
- auto inc = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<int32>(increment)));
+ auto inc = builder.AddInstruction(HloInstruction::CreateConstant(
+ LiteralUtil::CreateR0<int32>(increment)));
auto add0 = builder.AddInstruction(HloInstruction::CreateBinary(
induction_variable->shape(), HloOpcode::kAdd, induction_variable, inc));
// Update data GTE(data_tuple_index).
@@ -89,10 +89,12 @@ class WhileTransformerTest : public HloTestBase {
const int64 ind_var_tuple_index,
const int64 ind_var_init) {
auto builder = HloComputation::Builder(TestName() + ".While");
- auto induction_var_init = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<int32>(ind_var_init)));
- auto data_init = builder.AddInstruction(HloInstruction::CreateConstant(
- Literal::CreateR1<float>({0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f})));
+ auto induction_var_init =
+ builder.AddInstruction(HloInstruction::CreateConstant(
+ LiteralUtil::CreateR0<int32>(ind_var_init)));
+ auto data_init = builder.AddInstruction(
+ HloInstruction::CreateConstant(LiteralUtil::CreateR1<float>(
+ {0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f})));
auto loop_state_init =
ind_var_tuple_index == 0
? builder.AddInstruction(
diff --git a/tensorflow/compiler/xla/service/gpu/xfeed_queue.h b/tensorflow/compiler/xla/service/gpu/xfeed_queue.h
new file mode 100644
index 0000000000..737c7eb025
--- /dev/null
+++ b/tensorflow/compiler/xla/service/gpu/xfeed_queue.h
@@ -0,0 +1,89 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#ifndef TENSORFLOW_COMPILER_XLA_SERVICE_GPU_XFEED_QUEUE_H_
+#define TENSORFLOW_COMPILER_XLA_SERVICE_GPU_XFEED_QUEUE_H_
+
+#include <deque>
+#include <vector>
+
+#include "tensorflow/core/platform/mutex.h"
+#include "tensorflow/core/platform/notification.h"
+#include "tensorflow/core/platform/thread_annotations.h"
+
+namespace xla {
+namespace gpu {
+
+// TODO(b/30467474) Once GPU outfeed implementation settles, consider
+// folding back the cpu and gpu outfeed implementations into a generic
+// one if possible.
+
+// Manages a thread-safe queue of buffers.
+template <typename BufferType>
+class XfeedQueue {
+ public:
+ // Adds a tree of buffers to the queue. The individual buffers correspond to
+ // the elements of a tuple and may be nullptr if the buffer is a tuple index
+ // buffer.
+ void EnqueueDestination(BufferType buffers) {
+ tensorflow::mutex_lock l(mu_);
+ enqueued_buffers_.push_back(std::move(buffers));
+ cv_.notify_one();
+ }
+
+ // Blocks until the queue is non-empty, then returns the buffer at the head of
+ // the queue.
+ BufferType BlockingGetNextDestination() {
+ bool became_empty;
+ BufferType current_buffer;
+ {
+ tensorflow::mutex_lock l(mu_);
+ while (enqueued_buffers_.empty()) {
+ cv_.wait(l);
+ }
+ current_buffer = std::move(enqueued_buffers_.front());
+ enqueued_buffers_.pop_front();
+ became_empty = enqueued_buffers_.empty();
+ }
+ if (became_empty) {
+ for (const auto& callback : on_empty_callbacks_) {
+ callback();
+ }
+ }
+ return current_buffer;
+ }
+
+ void RegisterOnEmptyCallback(std::function<void()> callback) {
+ on_empty_callbacks_.push_back(std::move(callback));
+ }
+
+ private:
+ tensorflow::mutex mu_;
+
+ // Condition variable that is signaled every time a buffer is enqueued.
+ tensorflow::condition_variable cv_;
+
+ // The queue of trees of buffers. Buffer* queue contents are not owned.
+ std::deque<BufferType> enqueued_buffers_ GUARDED_BY(mu_);
+
+ // List of callbacks which will be called when 'enqueued_buffers_' becomes
+ // empty.
+ std::vector<std::function<void()>> on_empty_callbacks_;
+};
+
+} // namespace gpu
+} // namespace xla
+
+#endif // TENSORFLOW_COMPILER_XLA_SERVICE_GPU_XFEED_QUEUE_H_
diff --git a/tensorflow/compiler/xla/service/graphviz_example.cc b/tensorflow/compiler/xla/service/graphviz_example.cc
index acf6611486..aa89567ee8 100644
--- a/tensorflow/compiler/xla/service/graphviz_example.cc
+++ b/tensorflow/compiler/xla/service/graphviz_example.cc
@@ -22,6 +22,7 @@ limitations under the License.
#include <memory>
#include <string>
+#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/compiler/xla/literal_util.h"
#include "tensorflow/compiler/xla/ptr_util.h"
#include "tensorflow/compiler/xla/service/hlo_computation.h"
@@ -47,7 +48,7 @@ HloComputation* AddScalarConstantComputation(int64 addend, HloModule* module) {
auto x_value = builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(F32, {}), "x_value"));
auto half = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(0.5)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(0.5)));
builder.AddInstruction(HloInstruction::CreateBinary(
half->shape(), HloOpcode::kAdd, x_value, half));
return module->AddEmbeddedComputation(builder.Build());
@@ -122,7 +123,7 @@ std::unique_ptr<HloModule> MakeBigGraph() {
auto rng = builder.AddInstruction(
HloInstruction::CreateRng(vshape, RNG_UNIFORM, {param_m, param_m}));
auto one = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(1.0)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
auto add_computation = ScalarSumComputation(module.get());
builder.AddInstruction(
HloInstruction::CreateReduce(vshape, rng, one, {1}, add_computation));
diff --git a/tensorflow/compiler/xla/service/heap_simulator_test.cc b/tensorflow/compiler/xla/service/heap_simulator_test.cc
index 3849b565e3..b41dc66fe9 100644
--- a/tensorflow/compiler/xla/service/heap_simulator_test.cc
+++ b/tensorflow/compiler/xla/service/heap_simulator_test.cc
@@ -19,7 +19,7 @@ limitations under the License.
#include <utility>
#include <vector>
-#include "tensorflow/compiler/xla/literal_util.h"
+#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/compiler/xla/service/buffer_value.h"
#include "tensorflow/compiler/xla/service/hlo_computation.h"
#include "tensorflow/compiler/xla/service/hlo_instruction.h"
@@ -239,7 +239,7 @@ class HeapSimulatorTest : public HloTestBase {
TEST_F(HeapSimulatorTest, ScalarConstant) {
auto builder = HloComputation::Builder(TestName());
auto const0 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(1.0)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
// Constants aren't assigned. See b/32248867
HeapSimulatorTracker tracker(TestName(), builder.Build(), {const0});
@@ -674,7 +674,7 @@ class HeapAlgorithmTestBase : public ::testing::Test {
const BufferValue* DummyBufferValue() {
const BufferValue::Id id = buffers_.size();
auto const0 = builder_.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(1.0)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
buffers_.emplace_back(MakeUnique<HloValue>(id, const0, ShapeIndex{}));
return buffers_.back().get();
}
diff --git a/tensorflow/compiler/xla/service/hlo_alias_analysis_test.cc b/tensorflow/compiler/xla/service/hlo_alias_analysis_test.cc
index 8f18d50f6e..403d4df6b5 100644
--- a/tensorflow/compiler/xla/service/hlo_alias_analysis_test.cc
+++ b/tensorflow/compiler/xla/service/hlo_alias_analysis_test.cc
@@ -18,7 +18,7 @@ limitations under the License.
#include <map>
#include <memory>
-#include "tensorflow/compiler/xla/literal_util.h"
+#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/compiler/xla/service/flatten_call_graph.h"
#include "tensorflow/compiler/xla/service/hlo_graph_dumper.h"
#include "tensorflow/compiler/xla/service/hlo_matchers.h"
@@ -116,9 +116,9 @@ TEST_F(HloAliasAnalysisTest, BinaryOperation) {
// Test the analysis on a single binary operation (Add).
auto builder = HloComputation::Builder(TestName());
auto constant1 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(1.0)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
auto constant2 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(2.0)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0)));
auto add = builder.AddInstruction(HloInstruction::CreateBinary(
scalar_shape_, HloOpcode::kAdd, constant1, constant2));
module_->AddEntryComputation(builder.Build());
@@ -228,9 +228,9 @@ TEST_F(HloAliasAnalysisTest, SingleCall) {
auto builder = HloComputation::Builder(TestName());
auto constant1 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(1.0)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
auto constant2 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(2.0)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0)));
auto call = builder.AddInstruction(HloInstruction::CreateCall(
scalar_shape_, {constant1, constant2}, called_computation));
module_->AddEntryComputation(builder.Build());
@@ -267,9 +267,9 @@ TEST_F(HloAliasAnalysisTest, ComputationCalledTwice) {
auto builder = HloComputation::Builder(TestName());
auto constant1 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(1.0)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
auto constant2 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(2.0)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0)));
auto call1 = builder.AddInstruction(HloInstruction::CreateCall(
scalar_shape_, {constant1, constant2}, called_computation));
auto call2 = builder.AddInstruction(HloInstruction::CreateCall(
@@ -346,15 +346,15 @@ TEST_F(HloAliasAnalysisTest, SingleWhile) {
auto cond_param = cond_builder.AddInstruction(
HloInstruction::CreateParameter(0, tuple_shape, "param"));
cond_builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<bool>(false)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(false)));
HloComputation* condition =
module_->AddEmbeddedComputation(cond_builder.Build());
auto builder = HloComputation::Builder(TestName());
auto constant1 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(1.0)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
auto constant2 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(2.0)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0)));
auto tuple = builder.AddInstruction(
HloInstruction::CreateTuple({constant1, constant2}));
auto xla_while = builder.AddInstruction(
@@ -439,15 +439,15 @@ TEST_F(HloAliasAnalysisTest, SequentialWhiles) {
cond_builder.AddInstruction(
HloInstruction::CreateParameter(0, tuple_shape, "param"));
cond_builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<bool>(false)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(false)));
HloComputation* condition =
module_->AddEmbeddedComputation(cond_builder.Build());
auto builder = HloComputation::Builder(TestName());
auto constant1 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(1.0)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
auto constant2 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(2.0)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0)));
auto tuple = builder.AddInstruction(
HloInstruction::CreateTuple({constant1, constant2}));
auto xla_while0 = builder.AddInstruction(
@@ -498,7 +498,7 @@ TEST_F(HloAliasAnalysisTest, NestedWhiles) {
cond_builder.AddInstruction(
HloInstruction::CreateParameter(0, tuple_shape, "param"));
cond_builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<bool>(false)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(false)));
return cond_builder.Build();
};
// Build separate condition computations so the call graph is flat. The
@@ -543,9 +543,9 @@ TEST_F(HloAliasAnalysisTest, NestedWhiles) {
auto builder = HloComputation::Builder(TestName());
auto constant1 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(1.0)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
auto constant2 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(2.0)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0)));
auto tuple = builder.AddInstruction(
HloInstruction::CreateTuple({constant1, constant2}));
auto entry_while = builder.AddInstruction(
@@ -608,17 +608,17 @@ TEST_F(HloAliasAnalysisTest, SwizzlingWhile) {
cond_builder.AddInstruction(
HloInstruction::CreateParameter(0, tuple_shape, "param"));
auto cond_constant = cond_builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<bool>(false)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(false)));
HloComputation* condition =
module_->AddEmbeddedComputation(cond_builder.Build());
auto builder = HloComputation::Builder(TestName());
auto constant1 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(1.0)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
auto constant2 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(2.0)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0)));
auto constant3 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(3.0)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(3.0)));
auto tuple = builder.AddInstruction(
HloInstruction::CreateTuple({constant1, constant2, constant3}));
auto xla_while = builder.AddInstruction(
@@ -654,19 +654,18 @@ TEST_F(HloAliasAnalysisTest, SwizzlingWhile) {
}
TEST_F(HloAliasAnalysisTest, TupleSelect) {
- // Test a kSelect of a tuple value. Non-top-level element flow through the
- // instruction.
+ // Test a kTupleSelect. Non-top-level element flow through the instruction.
auto builder = HloComputation::Builder(TestName());
auto pred = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<bool>(false)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(false)));
auto constant1 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(1.0)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
auto constant2 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(2.0)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0)));
auto constant3 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(3.0)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(3.0)));
auto constant4 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(4.0)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(4.0)));
auto tuple1 =
builder.AddInstruction(HloInstruction::CreateTuple({constant1}));
auto tuple2 =
@@ -677,13 +676,13 @@ TEST_F(HloAliasAnalysisTest, TupleSelect) {
builder.AddInstruction(HloInstruction::CreateTuple({constant4}));
const Shape tuple_shape = tuple1->shape();
auto select11 = builder.AddInstruction(HloInstruction::CreateTernary(
- tuple_shape, HloOpcode::kSelect, pred, tuple1, tuple1));
+ tuple_shape, HloOpcode::kTupleSelect, pred, tuple1, tuple1));
auto select12 = builder.AddInstruction(HloInstruction::CreateTernary(
- tuple_shape, HloOpcode::kSelect, pred, tuple1, tuple2));
+ tuple_shape, HloOpcode::kTupleSelect, pred, tuple1, tuple2));
auto select34 = builder.AddInstruction(HloInstruction::CreateTernary(
- tuple_shape, HloOpcode::kSelect, pred, tuple3, tuple4));
+ tuple_shape, HloOpcode::kTupleSelect, pred, tuple3, tuple4));
auto select1234 = builder.AddInstruction(HloInstruction::CreateTernary(
- tuple_shape, HloOpcode::kSelect, pred, select12, select34));
+ tuple_shape, HloOpcode::kTupleSelect, pred, select12, select34));
module_->AddEntryComputation(builder.Build());
@@ -718,7 +717,7 @@ TEST_F(HloAliasAnalysisTest, TupleSelect) {
}
TEST_F(HloAliasAnalysisTest, TupleSelectToWhile) {
- // Test a tuple-shaped kSelect feeding a kWhile instruction. HLO:
+ // Test a tuple-shaped kTupleSelect feeding a kWhile instruction. HLO:
//
// body((F32[], F32[]) %tuple_param):
// %negate = Negate(%tuple_param{0})
@@ -754,22 +753,22 @@ TEST_F(HloAliasAnalysisTest, TupleSelectToWhile) {
auto cond_param = cond_builder.AddInstruction(
HloInstruction::CreateParameter(0, tuple_shape, "param"));
cond_builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<bool>(false)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(false)));
HloComputation* condition =
module_->AddEmbeddedComputation(cond_builder.Build());
auto pred = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<bool>(false)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(false)));
auto constant1 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(1.0)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
auto constant2 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(2.0)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0)));
auto tuple1 =
builder.AddInstruction(HloInstruction::CreateTuple({constant1}));
auto tuple2 =
builder.AddInstruction(HloInstruction::CreateTuple({constant2}));
auto select = builder.AddInstruction(HloInstruction::CreateTernary(
- tuple_shape, HloOpcode::kSelect, pred, tuple1, tuple2));
+ tuple_shape, HloOpcode::kTupleSelect, pred, tuple1, tuple2));
auto xla_while = builder.AddInstruction(
HloInstruction::CreateWhile(tuple_shape, condition, body, select));
@@ -806,7 +805,7 @@ TEST_F(HloAliasAnalysisTest, Bitcast) {
// Bitcasting a value should not produce a new buffer.
auto builder = HloComputation::Builder(TestName());
auto constant = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(1.0)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
auto bitcast = builder.AddInstruction(HloInstruction::CreateUnary(
scalar_shape_, HloOpcode::kBitcast, constant));
@@ -825,7 +824,7 @@ TEST_F(HloAliasAnalysisTest, BitcastInterference) {
// interference.
auto builder = HloComputation::Builder(TestName());
auto constant = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(1.0)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
auto bitcast = builder.AddInstruction(HloInstruction::CreateUnary(
scalar_shape_, HloOpcode::kBitcast, constant));
builder.AddInstruction(HloInstruction::CreateTuple({constant, bitcast}));
@@ -844,13 +843,13 @@ TEST_F(HloAliasAnalysisTest, WhileInterference) {
// the other use of the init.
auto builder = HloComputation::Builder(TestName());
auto init = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(1.0)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
auto cond_builder = HloComputation::Builder("condition");
auto cond_param = cond_builder.AddInstruction(
HloInstruction::CreateParameter(0, init->shape(), "param"));
auto cond_root = cond_builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<bool>(false)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(false)));
HloComputation* condition =
module_->AddEmbeddedComputation(cond_builder.Build());
diff --git a/tensorflow/compiler/xla/service/hlo_computation.cc b/tensorflow/compiler/xla/service/hlo_computation.cc
index c057be8201..441288da1a 100644
--- a/tensorflow/compiler/xla/service/hlo_computation.cc
+++ b/tensorflow/compiler/xla/service/hlo_computation.cc
@@ -120,6 +120,30 @@ HloInstruction* HloComputation::AddParameter(
return instructions_.back().get();
}
+namespace {
+
+// Returns the new name for a fusion parameter when we change its number.
+//
+// Fusion parameters are named foo.param_1, bar.param_2, etc. We are
+// renumbering the parameters, so replace the final number in the name with
+// the updated value.
+string RenameFusionParameter(const string& original_name, int64 new_param_no) {
+ const string param_underscore = ".param_";
+ size_t index = original_name.rfind(param_underscore);
+ if (index == string::npos) {
+ return original_name;
+ }
+ string after_param = original_name.substr(index + param_underscore.size());
+ int64 numeric_suffix;
+ if (tensorflow::strings::safe_strto64(after_param, &numeric_suffix)) {
+ return StrCat(original_name.substr(0, index + param_underscore.size()),
+ new_param_no);
+ }
+ return original_name;
+}
+
+} // namespace
+
Status HloComputation::RemoveParameter(int64 param_no) {
CHECK_GE(param_no, 0);
CHECK_LT(param_no, param_instructions_.size());
@@ -132,21 +156,8 @@ Status HloComputation::RemoveParameter(int64 param_no) {
while (param_no < param_instructions_.size()) {
param_instruction = param_instructions_[param_no];
- string param_name = param_instruction->name();
- // Fusion parameters are named foo.param_1, bar.param_2, etc. We are
- // renumbering the parameters, so replace the final number in the name with
- // the updated value.
- const string param_underscore = ".param_";
- size_t index = param_name.rfind(param_underscore);
- if (index == string::npos) {
- string after_param = name().substr(index + param_underscore.size());
- int64 numeric_suffix;
- if (tensorflow::strings::safe_strto64(after_param, &numeric_suffix)) {
- param_name =
- StrCat(param_name.substr(0, index), param_underscore, param_no);
- }
- }
-
+ string param_name =
+ RenameFusionParameter(param_instruction->name(), param_no);
HloInstruction* new_instr =
AddInstructionInternal(HloInstruction::CreateParameter(
param_no, param_instruction->shape(), param_name));
@@ -159,6 +170,34 @@ Status HloComputation::RemoveParameter(int64 param_no) {
return Status::OK();
}
+Status HloComputation::RemoveUnusedParameters() {
+ CHECK(IsFusionComputation());
+ int64 removed = 0;
+ for (int64 i = 0; i < param_instructions_.size(); ++i) {
+ HloInstruction* param_instruction = param_instructions_[i];
+ if (param_instruction->user_count() == 0 &&
+ param_instruction != root_instruction()) {
+ TF_RETURN_IF_ERROR(RemoveInstruction(param_instruction));
+ ++removed;
+ continue;
+ }
+
+ if (removed > 0) {
+ const int64 param_no = i - removed;
+ string param_name =
+ RenameFusionParameter(param_instruction->name(), param_no);
+ HloInstruction* new_instr =
+ AddInstructionInternal(HloInstruction::CreateParameter(
+ param_no, param_instruction->shape(), param_name));
+ TF_RETURN_IF_ERROR(param_instruction->ReplaceAllUsesWith(new_instr));
+ param_instructions_[param_no] = new_instr;
+ TF_RETURN_IF_ERROR(RemoveInstruction(param_instruction));
+ }
+ }
+ param_instructions_.resize(param_instructions_.size() - removed);
+ return Status::OK();
+}
+
bool HloComputation::IsRemovable(const HloInstruction* instruction) {
// If the instruction has control predecessors or successors then we cannot
// remove the instruction without violating ordering constraints (added, for
@@ -245,9 +284,8 @@ void HloComputation::set_root_instruction(
if (!IsFusionComputation()) {
CHECK(ShapeUtil::Compatible(new_root_instruction->shape(),
root_instruction_->shape()))
- << new_root_instruction->shape().ShortDebugString()
- << " is incompatible with "
- << root_instruction_->shape().ShortDebugString();
+ << new_root_instruction->shape() << " is incompatible with "
+ << root_instruction_->shape();
}
bool root_found = false;
for (auto& instruction : instructions_) {
@@ -490,8 +528,10 @@ HloInstruction* HloComputation::CreateFusionInstruction(
}
StatusOr<HloInstruction*> HloComputation::DeepCopyHelper(
- HloInstruction* instruction, const ShapeTree<bool>* indices_to_copy,
- ShapeTree<HloInstruction*>* copies_added, ShapeIndex* index) {
+ HloInstruction* instruction, ShapeIndex* index,
+ const std::function<
+ HloInstruction*(HloInstruction* leaf, const ShapeIndex& leaf_index,
+ HloComputation* computation)>& copy_leaf) {
if (ShapeUtil::IsTuple(instruction->shape())) {
std::vector<HloInstruction*> elements;
for (int64 i = 0; i < ShapeUtil::TupleElementCount(instruction->shape());
@@ -502,9 +542,8 @@ StatusOr<HloInstruction*> HloComputation::DeepCopyHelper(
instruction, i));
index->push_back(i);
- TF_ASSIGN_OR_RETURN(
- HloInstruction * element,
- DeepCopyHelper(gte, indices_to_copy, copies_added, index));
+ TF_ASSIGN_OR_RETURN(HloInstruction * element,
+ DeepCopyHelper(gte, index, copy_leaf));
elements.push_back(element);
index->pop_back();
}
@@ -518,19 +557,7 @@ StatusOr<HloInstruction*> HloComputation::DeepCopyHelper(
// Array shape.
TF_RET_CHECK(ShapeUtil::IsArray(instruction->shape()));
- if (indices_to_copy == nullptr || indices_to_copy->element(*index)) {
- // Use kCopy to copy array elements
- HloInstruction* copy = AddInstruction(HloInstruction::CreateUnary(
- instruction->shape(), HloOpcode::kCopy, instruction));
- if (copies_added != nullptr) {
- *copies_added->mutable_element(*index) = copy;
- }
- return copy;
- } else {
- // Elements which are not to be copied are passed through
- // transparently.
- return instruction;
- }
+ return copy_leaf(instruction, *index, this);
}
StatusOr<HloInstruction*> HloComputation::DeepCopyInstruction(
@@ -552,7 +579,36 @@ StatusOr<HloInstruction*> HloComputation::DeepCopyInstruction(
}
ShapeIndex index;
- return DeepCopyHelper(instruction, indices_to_copy, copies_added, &index);
+ auto copy_leaf = [indices_to_copy, copies_added](
+ HloInstruction* leaf, const ShapeIndex& leaf_index,
+ HloComputation* computation) {
+ if (indices_to_copy == nullptr || indices_to_copy->element(leaf_index)) {
+ HloInstruction* copy = computation->AddInstruction(
+ HloInstruction::CreateUnary(leaf->shape(), HloOpcode::kCopy, leaf));
+ if (copies_added != nullptr) {
+ *copies_added->mutable_element(leaf_index) = copy;
+ }
+ return copy;
+ }
+ // Elements which are not to be copied are passed through
+ // transparently.
+ return leaf;
+ };
+ return DeepCopyHelper(instruction, &index, copy_leaf);
+}
+
+StatusOr<HloInstruction*> HloComputation::DeepCopyInstructionWithCustomCopier(
+ HloInstruction* instruction,
+ const std::function<
+ HloInstruction*(HloInstruction* leaf, const ShapeIndex& leaf_index,
+ HloComputation* computation)>& copy_leaf) {
+ if (instruction->parent() != this) {
+ return FailedPrecondition(
+ "Can't deep copy instruction %s: instruction is not in computation %s",
+ instruction->name().c_str(), name().c_str());
+ }
+ ShapeIndex index;
+ return DeepCopyHelper(instruction, &index, copy_leaf);
}
ProgramShape HloComputation::ComputeProgramShape() const {
@@ -625,7 +681,7 @@ std::unique_ptr<HloReachabilityMap> HloComputation::ComputeReachability()
inputs.assign(hlo->operands().begin(), hlo->operands().end());
inputs.insert(inputs.end(), hlo->control_predecessors().begin(),
hlo->control_predecessors().end());
- result->SetReachabilityToUnion(inputs, hlo);
+ result->FastSetReachabilityToUnion(inputs, hlo);
}
return result;
}
@@ -842,4 +898,13 @@ void HloComputation::UniquifyName(NameUniquer* name_uniquer) {
name_ = name_uniquer->GetUniqueName(name_);
}
+HloInstruction* HloComputation::GetInstructionWithName(
+ tensorflow::StringPiece name) {
+ auto instructions_in_computation = instructions();
+ auto it = c_find_if(instructions_in_computation, [&](HloInstruction* instr) {
+ return instr->name() == name;
+ });
+ return it == instructions_in_computation.end() ? nullptr : *it;
+}
+
} // namespace xla
diff --git a/tensorflow/compiler/xla/service/hlo_computation.h b/tensorflow/compiler/xla/service/hlo_computation.h
index 0f111a1a76..49ed65910f 100644
--- a/tensorflow/compiler/xla/service/hlo_computation.h
+++ b/tensorflow/compiler/xla/service/hlo_computation.h
@@ -16,6 +16,7 @@ limitations under the License.
#ifndef TENSORFLOW_COMPILER_XLA_SERVICE_HLO_COMPUTATION_H_
#define TENSORFLOW_COMPILER_XLA_SERVICE_HLO_COMPUTATION_H_
+#include <functional>
#include <list>
#include <memory>
#include <string>
@@ -113,6 +114,11 @@ class HloComputation {
// instruction.
Status RemoveParameter(int64 param_no);
+ // Remove unused parameters from the computation.
+ // Note this is only applicatable to the computation for the fusion
+ // instruction.
+ Status RemoveUnusedParameters();
+
// Add new parameter instruction to the computation.
// This should be a new parameter. Instruction will be appended to parameters
// and inserted to the instruction list.
@@ -249,6 +255,14 @@ class HloComputation {
const ShapeTree<bool>* indices_to_copy = nullptr,
ShapeTree<HloInstruction*>* copies_added = nullptr);
+ // As above, but uses a custom function to copy the leaf nodes, which could
+ // create alternative HLOs other than kCopy, or even pass-throughs.
+ StatusOr<HloInstruction*> DeepCopyInstructionWithCustomCopier(
+ HloInstruction* instruction,
+ const std::function<
+ HloInstruction*(HloInstruction* leaf, const ShapeIndex& leaf_index,
+ HloComputation* computation)>& copy_leaf);
+
// Computes and returns the ProgramShape of this computation (shape of
// parameters and result with layout).
ProgramShape ComputeProgramShape() const;
@@ -351,6 +365,10 @@ class HloComputation {
unique_id_ = id;
}
+ // Returns the instruction in this computation that has name `name`. Returns
+ // null if there is no such computation.
+ HloInstruction* GetInstructionWithName(tensorflow::StringPiece name);
+
int64 unique_id() const { return unique_id_; }
private:
@@ -373,8 +391,10 @@ class HloComputation {
// Internal helper for recursive copying of an instruction. Creates and
// returns a deep copy of the given instruction.
StatusOr<HloInstruction*> DeepCopyHelper(
- HloInstruction* instruction, const ShapeTree<bool>* indices_to_copy,
- ShapeTree<HloInstruction*>* copies_added, ShapeIndex* index);
+ HloInstruction* instruction, ShapeIndex* index,
+ const std::function<
+ HloInstruction*(HloInstruction* leaf, const ShapeIndex& leaf_index,
+ HloComputation* computation)>& copy_leaf);
// Internal helper to collect unreachable roots.
std::vector<HloInstruction*> CollectUnreachableRoots() const;
diff --git a/tensorflow/compiler/xla/service/hlo_computation_test.cc b/tensorflow/compiler/xla/service/hlo_computation_test.cc
index c504fc51d2..e4c5470331 100644
--- a/tensorflow/compiler/xla/service/hlo_computation_test.cc
+++ b/tensorflow/compiler/xla/service/hlo_computation_test.cc
@@ -17,7 +17,7 @@ limitations under the License.
#include <set>
-#include "tensorflow/compiler/xla/literal_util.h"
+#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/compiler/xla/service/dfs_hlo_visitor_with_default.h"
#include "tensorflow/compiler/xla/service/hlo_instruction.h"
#include "tensorflow/compiler/xla/service/hlo_matchers.h"
@@ -118,7 +118,7 @@ TEST_F(HloComputationTest, PostOrderSingleton) {
// Test GetInstructionPostOrder for a computation with one instruction.
auto builder = HloComputation::Builder(TestName());
auto constant = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(42.0f)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0f)));
auto module = CreateNewModule();
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_THAT(computation->MakeInstructionPostOrder(), ElementsAre(constant));
@@ -129,7 +129,7 @@ TEST_F(HloComputationTest, PostOrderSimple) {
// instructions.
auto builder = HloComputation::Builder(TestName());
auto constant = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(42.0f)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0f)));
auto negate1 = builder.AddInstruction(
HloInstruction::CreateUnary(r0f32_, HloOpcode::kNegate, constant));
auto negate2 = builder.AddInstruction(
@@ -144,7 +144,7 @@ TEST_F(HloComputationTest, PostOrderTrace) {
// Test GetInstructionPostOrder for a computation with a trace instruction.
auto builder = HloComputation::Builder(TestName());
auto constant = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(42.0f)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0f)));
auto negate1 = builder.AddInstruction(
HloInstruction::CreateUnary(r0f32_, HloOpcode::kNegate, constant));
auto trace =
@@ -163,13 +163,13 @@ TEST_F(HloComputationTest, PostOrderDisconnectedInstructions) {
// which are not connected.
auto builder = HloComputation::Builder(TestName());
auto constant1 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(42.0f)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0f)));
auto constant2 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(42.0f)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0f)));
auto constant3 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(42.0f)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0f)));
auto constant4 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(42.0f)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0f)));
auto module = CreateNewModule();
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_THAT(computation->MakeInstructionPostOrder(),
@@ -181,11 +181,11 @@ TEST_F(HloComputationTest, PostOrderWithMultipleRoots) {
// which are not connected.
auto builder = HloComputation::Builder(TestName());
auto constant1 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(42.0f)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0f)));
auto constant2 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(42.0f)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0f)));
auto constant3 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(42.0f)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0f)));
auto add1 = builder.AddInstruction(HloInstruction::CreateBinary(
r0f32_, HloOpcode::kAdd, constant1, constant2));
auto add2 = builder.AddInstruction(HloInstruction::CreateBinary(
@@ -205,11 +205,11 @@ TEST_F(HloComputationTest, VisitWithMultipleRoots) {
// computation has multiple roots (dead code).
auto builder = HloComputation::Builder(TestName());
auto constant1 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(42.0f)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0f)));
auto constant2 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(42.0f)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0f)));
auto constant3 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(42.0f)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0f)));
// Add three disconnected add expressions.
builder.AddInstruction(HloInstruction::CreateBinary(r0f32_, HloOpcode::kAdd,
constant1, constant2));
@@ -256,7 +256,7 @@ TEST_F(HloComputationTest, DeepCopyArray) {
// Test that DeepCopyInstruction properly copies an array.
auto builder = HloComputation::Builder(TestName());
auto constant = builder.AddInstruction(HloInstruction::CreateConstant(
- Literal::CreateR1<float>({1.0, 2.0, 3.0})));
+ LiteralUtil::CreateR1<float>({1.0, 2.0, 3.0})));
auto module = CreateNewModule();
auto computation = module->AddEntryComputation(builder.Build());
auto copy = computation->DeepCopyInstruction(constant).ValueOrDie();
@@ -268,9 +268,9 @@ TEST_F(HloComputationTest, DeepCopyTuple) {
// Test that DeepCopyInstruction properly copies a tuple.
auto builder = HloComputation::Builder(TestName());
auto constant1 = builder.AddInstruction(HloInstruction::CreateConstant(
- Literal::CreateR1<float>({1.0, 2.0, 3.0})));
+ LiteralUtil::CreateR1<float>({1.0, 2.0, 3.0})));
auto constant2 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(42.0)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0)));
auto tuple = builder.AddInstruction(
HloInstruction::CreateTuple({constant1, constant2}));
@@ -289,7 +289,7 @@ TEST_F(HloComputationTest, DeepCopyArrayAtIndices) {
// copy are specified.
auto builder = HloComputation::Builder(TestName());
auto constant = builder.AddInstruction(HloInstruction::CreateConstant(
- Literal::CreateR1<float>({1.0, 2.0, 3.0})));
+ LiteralUtil::CreateR1<float>({1.0, 2.0, 3.0})));
auto computation = builder.Build();
{
@@ -314,9 +314,9 @@ TEST_F(HloComputationTest, DeepCopyTupleAtIndices) {
// specified by the given indices.
auto builder = HloComputation::Builder(TestName());
auto constant1 = builder.AddInstruction(HloInstruction::CreateConstant(
- Literal::CreateR1<float>({1.0, 2.0, 3.0})));
+ LiteralUtil::CreateR1<float>({1.0, 2.0, 3.0})));
auto constant2 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(42.0)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0)));
auto tuple = builder.AddInstruction(
HloInstruction::CreateTuple({constant1, constant2}));
auto computation = builder.Build();
@@ -375,22 +375,22 @@ TEST_F(HloComputationTest, DeepCopyToken) {
// Test that DeepCopyInstruction properly handles tokens which should not be
// copied.
auto builder = HloComputation::Builder(TestName());
- auto token = builder.AddInstruction(HloInstruction::CreateGenerateToken({}));
+ auto token = builder.AddInstruction(HloInstruction::CreateToken());
auto module = CreateNewModule();
auto computation = module->AddEntryComputation(builder.Build());
auto copy = computation->DeepCopyInstruction(token).ValueOrDie();
// No copy should be added.
- EXPECT_THAT(copy, op::GenerateToken());
+ EXPECT_THAT(copy, op::AfterAll());
}
TEST_F(HloComputationTest, DeepCopyTokenTuple) {
// Test that DeepCopyInstruction properly handles tokens which should not be
// copied.
auto builder = HloComputation::Builder(TestName());
- auto token = builder.AddInstruction(HloInstruction::CreateGenerateToken({}));
+ auto token = builder.AddInstruction(HloInstruction::CreateToken());
auto constant = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(42.0)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0)));
auto tuple =
builder.AddInstruction(HloInstruction::CreateTuple({token, constant}));
auto module = CreateNewModule();
@@ -407,7 +407,7 @@ TEST_F(HloComputationTest, CycleDetection) {
// Test whether the visitor can detect cycles in the graph.
auto builder = HloComputation::Builder(TestName());
auto constant = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(42.0f)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0f)));
auto negate = builder.AddInstruction(
HloInstruction::CreateUnary(r0f32_, HloOpcode::kNegate, constant));
auto add = builder.AddInstruction(
@@ -433,7 +433,7 @@ TEST_F(HloComputationTest, RemoveInstructionWithDuplicateOperand) {
// twice.
auto builder = HloComputation::Builder(TestName());
auto constant = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(42.0f)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0f)));
auto dead_negate = builder.AddInstruction(
HloInstruction::CreateUnary(r0f32_, HloOpcode::kNegate, constant));
auto dead_add = builder.AddInstruction(HloInstruction::CreateBinary(
@@ -456,9 +456,9 @@ TEST_F(HloComputationTest, RemoveInstructionWithDuplicateOperand) {
TEST_F(HloComputationTest, CloneWithControlDependency) {
auto builder = HloComputation::Builder(TestName());
auto constant1 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(1.0f)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0f)));
auto constant2 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(2.0f)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0f)));
auto add = builder.AddInstruction(HloInstruction::CreateBinary(
r0f32_, HloOpcode::kAdd, constant1, constant2));
@@ -502,9 +502,9 @@ TEST_F(HloComputationTest, Reachability) {
// There is a control dependency from 'add' to 'exp'.
auto builder = HloComputation::Builder(TestName());
auto constant1 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(1.0f)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0f)));
auto constant2 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(2.0f)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0f)));
auto add = builder.AddInstruction(HloInstruction::CreateBinary(
r0f32_, HloOpcode::kAdd, constant1, constant2));
auto negate = builder.AddInstruction(
@@ -607,13 +607,14 @@ TEST_F(HloComputationTest, Stringification) {
auto* computation = module->AddEntryComputation(builder.Build());
auto options = HloPrintOptions().set_print_metadata(false);
- EXPECT_EQ(computation->ToString(options),
- R"(%TransposeDot (x: f32[5,10], y: f32[20,10]) -> f32[5,20] {
+ const string expected_computation =
+ R"(%TransposeDot (x: f32[5,10], y: f32[20,10]) -> f32[5,20] {
%x = f32[5,10]{1,0} parameter(0)
%y = f32[20,10]{1,0} parameter(1)
%transpose = f32[10,20]{1,0} transpose(f32[20,10]{1,0} %y), dimensions={1,0}
ROOT %dot = f32[5,20]{1,0} dot(f32[5,10]{1,0} %x, f32[10,20]{1,0} %transpose), lhs_contracting_dims={1}, rhs_contracting_dims={0}
-})");
+})";
+ EXPECT_EQ(computation->ToString(options), expected_computation);
}
TEST_F(HloComputationTest, StringificationIndent) {
@@ -639,13 +640,14 @@ TEST_F(HloComputationTest, StringificationIndent) {
auto options =
HloPrintOptions().set_print_metadata(false).set_indent_amount(2);
- EXPECT_EQ(computation->ToString(options),
- R"( %TransposeDot (x: f32[5,10], y: f32[20,10]) -> f32[5,20] {
+ const string expected_computation =
+ R"( %TransposeDot (x: f32[5,10], y: f32[20,10]) -> f32[5,20] {
%x = f32[5,10]{1,0} parameter(0)
%y = f32[20,10]{1,0} parameter(1)
%transpose = f32[10,20]{1,0} transpose(f32[20,10]{1,0} %y), dimensions={1,0}
ROOT %dot = f32[5,20]{1,0} dot(f32[5,10]{1,0} %x, f32[10,20]{1,0} %transpose), lhs_contracting_dims={1}, rhs_contracting_dims={0}
- })");
+ })";
+ EXPECT_EQ(computation->ToString(options), expected_computation);
}
TEST_F(HloComputationTest, StringificationCanonical) {
@@ -670,21 +672,23 @@ TEST_F(HloComputationTest, StringificationCanonical) {
auto* computation = module->AddEntryComputation(builder.Build());
auto options = HloPrintOptions().set_print_metadata(false);
- EXPECT_EQ(computation->ToString(options),
- R"(%TransposeDot (x: f32[5,10], y: f32[20,10]) -> f32[5,20] {
+ const string expected_computation1 =
+ R"(%TransposeDot (x: f32[5,10], y: f32[20,10]) -> f32[5,20] {
%x = f32[5,10]{1,0} parameter(0)
%y = f32[20,10]{1,0} parameter(1)
%transpose = f32[10,20]{1,0} transpose(f32[20,10]{1,0} %y), dimensions={1,0}
ROOT %dot = f32[5,20]{1,0} dot(f32[5,10]{1,0} %x, f32[10,20]{1,0} %transpose), lhs_contracting_dims={1}, rhs_contracting_dims={0}
-})");
+})";
+ EXPECT_EQ(computation->ToString(options), expected_computation1);
options = HloPrintOptions().Canonical();
- EXPECT_EQ(computation->ToString(options), R"(TransposeDot {
+ const string expected_computation2 = R"(TransposeDot {
tmp_0 = f32[5,10]{1,0} parameter(0)
tmp_1 = f32[20,10]{1,0} parameter(1)
tmp_2 = f32[10,20]{1,0} transpose(f32[20,10]{1,0} tmp_1), dimensions={1,0}
ROOT tmp_3 = f32[5,20]{1,0} dot(f32[5,10]{1,0} tmp_0, f32[10,20]{1,0} tmp_2), lhs_contracting_dims={1}, rhs_contracting_dims={0}
-})");
+})";
+ EXPECT_EQ(computation->ToString(options), expected_computation2);
}
} // namespace
diff --git a/tensorflow/compiler/xla/service/hlo_constant_folding.cc b/tensorflow/compiler/xla/service/hlo_constant_folding.cc
index 35ecd4428d..7229031c0c 100644
--- a/tensorflow/compiler/xla/service/hlo_constant_folding.cc
+++ b/tensorflow/compiler/xla/service/hlo_constant_folding.cc
@@ -21,7 +21,7 @@ limitations under the License.
#include <vector>
#include "tensorflow/compiler/xla/layout_util.h"
-#include "tensorflow/compiler/xla/literal_util.h"
+#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/compiler/xla/service/dfs_hlo_visitor_with_default.h"
#include "tensorflow/compiler/xla/service/hlo_computation.h"
#include "tensorflow/compiler/xla/service/hlo_evaluator.h"
@@ -51,14 +51,18 @@ StatusOr<bool> HloConstantFolding::Run(HloModule* module) {
computation->root_instruction() != instruction) {
continue;
}
- // Skip Constant, Parameter, Reduce operation.
+ // Skip Constant, Parameter, Reduce, and AfterAll operation.
// TODO(b/35975797): Enable Reduce operation once arbitrary computation
// are supported by the evaluator.
// TODO(b/64407269): Enable Tuple once the timeout issue is resolved.
+ // TODO(b/110532604): Enable AfterAll once AfterAll requires at least one
+ // operand in which case constant folding will be impossible and this
+ // special case is not necessary.
if (instruction->opcode() == HloOpcode::kParameter ||
instruction->opcode() == HloOpcode::kConstant ||
instruction->opcode() == HloOpcode::kTuple ||
- instruction->opcode() == HloOpcode::kReduce) {
+ instruction->opcode() == HloOpcode::kReduce ||
+ instruction->opcode() == HloOpcode::kAfterAll) {
continue;
}
// Skip instructions with non-constant operands.
diff --git a/tensorflow/compiler/xla/service/hlo_constant_folding_test.cc b/tensorflow/compiler/xla/service/hlo_constant_folding_test.cc
index 5d05ccfc0b..64a42c1efc 100644
--- a/tensorflow/compiler/xla/service/hlo_constant_folding_test.cc
+++ b/tensorflow/compiler/xla/service/hlo_constant_folding_test.cc
@@ -19,7 +19,7 @@ limitations under the License.
#include <utility>
#include "tensorflow/compiler/xla/layout_util.h"
-#include "tensorflow/compiler/xla/literal_util.h"
+#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/compiler/xla/service/hlo_computation.h"
#include "tensorflow/compiler/xla/service/hlo_instruction.h"
#include "tensorflow/compiler/xla/service/hlo_matchers.h"
@@ -41,7 +41,7 @@ using HloConstantFoldingTest = HloTestBase;
TEST_F(HloConstantFoldingTest, ConvertF32ToS64) {
HloComputation::Builder builder(TestName());
HloInstruction* input = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(42.0f)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0f)));
builder.AddInstruction(
HloInstruction::CreateConvert(ShapeUtil::MakeShape(S64, {}), input));
@@ -62,7 +62,7 @@ TEST_F(HloConstantFoldingTest, ConvertF32ToS64) {
TEST_F(HloConstantFoldingTest, ConvertS64ToF32) {
HloComputation::Builder builder(TestName());
HloInstruction* input = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<int64>(42)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<int64>(42)));
builder.AddInstruction(
HloInstruction::CreateConvert(ShapeUtil::MakeShape(F32, {}), input));
@@ -82,8 +82,8 @@ TEST_F(HloConstantFoldingTest, ConvertS64ToF32) {
TEST_F(HloConstantFoldingTest, ConvertF32ArrayToS64Array) {
HloComputation::Builder builder(TestName());
- HloInstruction* input = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR1<float>({42.0f, 19.0f})));
+ HloInstruction* input = builder.AddInstruction(HloInstruction::CreateConstant(
+ LiteralUtil::CreateR1<float>({42.0f, 19.0f})));
builder.AddInstruction(
HloInstruction::CreateConvert(ShapeUtil::MakeShape(S64, {2}), input));
@@ -120,7 +120,7 @@ TEST_F(HloConstantFoldingTest, Concatenate) {
for (auto csize : test_config.concat_sizes) {
dimensions[test_config.concat_dimension] = csize;
concat_size += csize;
- auto literal = Literal::CreateFromDimensions(F32, dimensions);
+ auto literal = LiteralUtil::CreateFromDimensions(F32, dimensions);
HloInstruction* insn = builder.AddInstruction(
HloInstruction::CreateConstant(std::move(literal)));
operands.push_back(insn);
@@ -149,7 +149,7 @@ TEST_F(HloConstantFoldingTest, Slice) {
const int64 slice_limits[] = {10, 8, 6, 5, 9};
const int64 slice_strides[] = {1, 1, 1, 1, 1};
TF_ASSERT_OK_AND_ASSIGN(auto literal,
- Literal::CreateRandomLiteral<F32>(
+ LiteralUtil::CreateRandomLiteral<F32>(
ShapeUtil::MakeShape(F32, dimensions), 0.0, 1.0));
HloInstruction* literal_instruction = builder.AddInstruction(
HloInstruction::CreateConstant(std::move(literal)));
@@ -172,7 +172,7 @@ TEST_F(HloConstantFoldingTest, TransposeConstantFold) {
HloComputation::Builder builder(TestName());
const int64 dimensions[] = {11, 8, 7, 5, 9};
TF_ASSERT_OK_AND_ASSIGN(auto literal,
- Literal::CreateRandomLiteral<F32>(
+ LiteralUtil::CreateRandomLiteral<F32>(
ShapeUtil::MakeShape(F32, dimensions), 0.0, 1.0));
auto literal_clone = literal->Literal::CloneToUnique();
HloInstruction* literal_instruction = builder.AddInstruction(
diff --git a/tensorflow/compiler/xla/service/hlo_cost_analysis.cc b/tensorflow/compiler/xla/service/hlo_cost_analysis.cc
index 762e1afc71..c49cf7f5db 100644
--- a/tensorflow/compiler/xla/service/hlo_cost_analysis.cc
+++ b/tensorflow/compiler/xla/service/hlo_cost_analysis.cc
@@ -164,7 +164,11 @@ Status HloCostAnalysis::HandleGetTupleElement(const HloInstruction*) {
return Status::OK();
}
-Status HloCostAnalysis::HandleSelect(const HloInstruction*) {
+Status HloCostAnalysis::HandleSelect(const HloInstruction* hlo) {
+ return HandleElementwiseOp(hlo);
+}
+
+Status HloCostAnalysis::HandleTupleSelect(const HloInstruction*) {
return Status::OK();
}
@@ -393,7 +397,7 @@ Status HloCostAnalysis::HandleTranspose(const HloInstruction*) {
return Status::OK();
}
-Status HloCostAnalysis::HandleGenerateToken(const HloInstruction*) {
+Status HloCostAnalysis::HandleAfterAll(const HloInstruction*) {
return Status::OK();
}
diff --git a/tensorflow/compiler/xla/service/hlo_cost_analysis.h b/tensorflow/compiler/xla/service/hlo_cost_analysis.h
index 0d66736fe1..0181138a6d 100644
--- a/tensorflow/compiler/xla/service/hlo_cost_analysis.h
+++ b/tensorflow/compiler/xla/service/hlo_cost_analysis.h
@@ -54,7 +54,8 @@ class HloCostAnalysis : public ConstDfsHloVisitor {
Status HandleConstant(const HloInstruction* constant) override;
Status HandleGetTupleElement(
const HloInstruction* get_tuple_element) override;
- Status HandleSelect(const HloInstruction* select) override;
+ Status HandleSelect(const HloInstruction* hlo) override;
+ Status HandleTupleSelect(const HloInstruction* hlo) override;
Status HandleCompare(const HloInstruction* compare) override;
Status HandleClamp(const HloInstruction* clamp) override;
Status HandleReducePrecision(const HloInstruction* hlo) override;
@@ -97,7 +98,7 @@ class HloCostAnalysis : public ConstDfsHloVisitor {
Status HandleBroadcast(const HloInstruction* broadcast) override;
Status HandlePad(const HloInstruction* pad) override;
Status HandleReshape(const HloInstruction* reshape) override;
- Status HandleGenerateToken(const HloInstruction* token) override;
+ Status HandleAfterAll(const HloInstruction* token) override;
Status HandleTranspose(const HloInstruction* transpose) override;
Status HandleWhile(const HloInstruction* xla_while) override;
Status HandleConditional(const HloInstruction* conditional) override;
diff --git a/tensorflow/compiler/xla/service/hlo_cost_analysis_test.cc b/tensorflow/compiler/xla/service/hlo_cost_analysis_test.cc
index d22bef5673..9fd0363f57 100644
--- a/tensorflow/compiler/xla/service/hlo_cost_analysis_test.cc
+++ b/tensorflow/compiler/xla/service/hlo_cost_analysis_test.cc
@@ -59,9 +59,9 @@ class HloCostAnalysisTest : public ::testing::Test {
// Create a computation for a unary user function: x => exp(x + 0.5)
{
XlaBuilder builder("add_and_exp");
- auto x = builder.Parameter(0, ShapeUtil::MakeShape(F32, {}), "x");
- auto half = builder.ConstantR0<float>(0.5);
- builder.Exp(builder.Add(x, half));
+ auto x = Parameter(&builder, 0, ShapeUtil::MakeShape(F32, {}), "x");
+ auto half = ConstantR0<float>(&builder, 0.5);
+ Exp(Add(x, half));
auto computation_status = builder.Build();
TF_CHECK_OK(computation_status.status());
add_and_exp_ = computation_status.ConsumeValueOrDie();
@@ -70,9 +70,9 @@ class HloCostAnalysisTest : public ::testing::Test {
// Create a computation for a binary user function: (x, y) => x + y
{
XlaBuilder builder("add");
- auto x = builder.Parameter(0, ShapeUtil::MakeShape(F32, {}), "x");
- auto y = builder.Parameter(1, ShapeUtil::MakeShape(F32, {}), "y");
- builder.Add(x, y);
+ auto x = Parameter(&builder, 0, ShapeUtil::MakeShape(F32, {}), "x");
+ auto y = Parameter(&builder, 1, ShapeUtil::MakeShape(F32, {}), "y");
+ Add(x, y);
auto computation_status = builder.Build();
TF_CHECK_OK(computation_status.status());
add_ = computation_status.ConsumeValueOrDie();
@@ -81,9 +81,9 @@ class HloCostAnalysisTest : public ::testing::Test {
// Create a computation for a sigmoid function: x => 1 / (1 + exp(-x))
{
XlaBuilder builder("sigmoid");
- auto x = builder.Parameter(0, ShapeUtil::MakeShape(F32, {}), "x");
- auto one = builder.ConstantR0<float>(1.0);
- builder.Div(one, builder.Add(one, builder.Exp(builder.Neg(x))));
+ auto x = Parameter(&builder, 0, ShapeUtil::MakeShape(F32, {}), "x");
+ auto one = ConstantR0<float>(&builder, 1.0);
+ Div(one, Add(one, Exp(Neg(x))));
auto computation_status = builder.Build();
TF_CHECK_OK(computation_status.status());
sigmoid_ = computation_status.ConsumeValueOrDie();
@@ -92,9 +92,9 @@ class HloCostAnalysisTest : public ::testing::Test {
// Create a computation for a binary max function: (x, y) => max (x, y)
{
XlaBuilder builder("max");
- auto x = builder.Parameter(0, ShapeUtil::MakeShape(F32, {}), "x");
- auto y = builder.Parameter(1, ShapeUtil::MakeShape(F32, {}), "y");
- builder.Max(x, y);
+ auto x = Parameter(&builder, 0, ShapeUtil::MakeShape(F32, {}), "x");
+ auto y = Parameter(&builder, 1, ShapeUtil::MakeShape(F32, {}), "y");
+ Max(x, y);
auto computation_status = builder.Build();
TF_CHECK_OK(computation_status.status());
max_ = computation_status.ConsumeValueOrDie();
@@ -103,9 +103,9 @@ class HloCostAnalysisTest : public ::testing::Test {
// Create a computation for a binary GT function: (x, y) => x > y
{
XlaBuilder builder("gt");
- auto x = builder.Parameter(0, ShapeUtil::MakeShape(F32, {}), "x");
- auto y = builder.Parameter(1, ShapeUtil::MakeShape(F32, {}), "y");
- builder.Gt(x, y);
+ auto x = Parameter(&builder, 0, ShapeUtil::MakeShape(F32, {}), "x");
+ auto y = Parameter(&builder, 1, ShapeUtil::MakeShape(F32, {}), "y");
+ Gt(x, y);
auto computation_status = builder.Build();
TF_CHECK_OK(computation_status.status());
gt_ = computation_status.ConsumeValueOrDie();
@@ -137,9 +137,9 @@ class HloCostAnalysisTest : public ::testing::Test {
TEST_F(HloCostAnalysisTest, MatrixMultiply) {
XlaBuilder builder("matrix_multiply");
- auto lhs = builder.Parameter(0, ShapeUtil::MakeShape(F32, {10, 5}), "lhs");
- auto rhs = builder.Parameter(1, ShapeUtil::MakeShape(F32, {5, 30}), "rhs");
- auto result = builder.Dot(lhs, rhs);
+ auto lhs = Parameter(&builder, 0, ShapeUtil::MakeShape(F32, {10, 5}), "lhs");
+ auto rhs = Parameter(&builder, 1, ShapeUtil::MakeShape(F32, {5, 30}), "rhs");
+ Dot(lhs, rhs);
// Run HLO cost analysis.
auto hlo_module = BuildHloGraph(&builder);
@@ -159,8 +159,8 @@ TEST_F(HloCostAnalysisTest, MatrixMultiply) {
TEST_F(HloCostAnalysisTest, Map) {
XlaBuilder builder("map");
- auto input = builder.Parameter(0, ShapeUtil::MakeShape(F32, {10}), "in");
- auto result = builder.Map({input}, add_and_exp_, {0});
+ auto input = Parameter(&builder, 0, ShapeUtil::MakeShape(F32, {10}), "in");
+ Map(&builder, {input}, add_and_exp_, {0});
// Run HLO cost analysis.
auto hlo_module = BuildHloGraph(&builder);
@@ -176,17 +176,17 @@ TEST_F(HloCostAnalysisTest, Map) {
TEST_F(HloCostAnalysisTest, Convolution) {
XlaBuilder builder("convolution");
- auto input = builder.Parameter(
- 0,
+ auto input = Parameter(
+ &builder, 0,
ShapeUtil::MakeShape(F32, {/*p_dim=*/1, /*z_dim=*/1, /*y_dim=*/10,
/*x_dim=*/20}),
"input");
- auto kernel = builder.Parameter(
- 1,
+ auto kernel = Parameter(
+ &builder, 1,
ShapeUtil::MakeShape(F32, {/*p_dim=*/1, /*z_dim=*/1, /*y_dim=*/3,
/*x_dim=*/3}),
"kernel");
- auto result = builder.Conv(input, kernel, {1, 1}, Padding::kValid);
+ Conv(input, kernel, {1, 1}, Padding::kValid);
// Run HLO cost analysis.
auto hlo_module = BuildHloGraph(&builder);
@@ -206,9 +206,8 @@ TEST_F(HloCostAnalysisTest, Convolution) {
TEST_F(HloCostAnalysisTest, Reduce) {
XlaBuilder builder("reduce");
auto input =
- builder.Parameter(0, ShapeUtil::MakeShape(F32, {10, 20}), "input");
- auto result =
- builder.Reduce(input, builder.ConstantR0<float>(0.0f), add_, {1});
+ Parameter(&builder, 0, ShapeUtil::MakeShape(F32, {10, 20}), "input");
+ Reduce(input, ConstantR0<float>(&builder, 0.0f), add_, {1});
// Run HLO cost analysis.
auto hlo_module = BuildHloGraph(&builder);
@@ -224,9 +223,9 @@ TEST_F(HloCostAnalysisTest, Reduce) {
TEST_F(HloCostAnalysisTest, ReduceWindow) {
XlaBuilder builder("reduce_window");
auto input =
- builder.Parameter(0, ShapeUtil::MakeShape(F32, {10, 20}), "input");
- auto result = builder.ReduceWindow(input, builder.ConstantR0<float>(0), add_,
- {4, 5}, {4, 5}, Padding::kValid);
+ Parameter(&builder, 0, ShapeUtil::MakeShape(F32, {10, 20}), "input");
+ ReduceWindow(input, ConstantR0<float>(&builder, 0), add_, {4, 5}, {4, 5},
+ Padding::kValid);
// Run HLO cost analysis.
auto hlo_module = BuildHloGraph(&builder);
@@ -241,12 +240,11 @@ TEST_F(HloCostAnalysisTest, ReduceWindow) {
TEST_F(HloCostAnalysisTest, SelectAndScatter) {
XlaBuilder builder("select_and_scatter");
auto operand =
- builder.Parameter(0, ShapeUtil::MakeShape(F32, {10, 20}), "input");
+ Parameter(&builder, 0, ShapeUtil::MakeShape(F32, {10, 20}), "input");
auto source =
- builder.Parameter(1, ShapeUtil::MakeShape(F32, {2, 4}), "source");
- auto result =
- builder.SelectAndScatter(operand, gt_, {4, 5}, {4, 5}, Padding::kValid,
- source, builder.ConstantR0<float>(0), add_);
+ Parameter(&builder, 1, ShapeUtil::MakeShape(F32, {2, 4}), "source");
+ SelectAndScatter(operand, gt_, {4, 5}, {4, 5}, Padding::kValid, source,
+ ConstantR0<float>(&builder, 0), add_);
// Run HLO cost analysis.
auto hlo_module = BuildHloGraph(&builder);
@@ -261,7 +259,7 @@ TEST_F(HloCostAnalysisTest, SelectAndScatter) {
TEST_F(HloCostAnalysisTest, Broadcast) {
XlaBuilder b("broadcast");
- b.Broadcast(b.ConstantR0<float>(42), {10, 7});
+ Broadcast(ConstantR0<float>(&b, 42), {10, 7});
auto hlo_module = BuildHloGraph(&b);
HloCostAnalysis analysis(ShapeSize);
ASSERT_IS_OK(
@@ -273,13 +271,12 @@ TEST_F(HloCostAnalysisTest, Broadcast) {
TEST_F(HloCostAnalysisTest, FullyConnectedForward) {
XlaBuilder builder("fully_connected_forward");
auto input =
- builder.Parameter(0, ShapeUtil::MakeShape(F32, {10, 5}), "input");
+ Parameter(&builder, 0, ShapeUtil::MakeShape(F32, {10, 5}), "input");
auto weight =
- builder.Parameter(1, ShapeUtil::MakeShape(F32, {5, 20}), "weight");
- auto bias = builder.Parameter(2, ShapeUtil::MakeShape(F32, {20}), "bias");
+ Parameter(&builder, 1, ShapeUtil::MakeShape(F32, {5, 20}), "weight");
+ auto bias = Parameter(&builder, 2, ShapeUtil::MakeShape(F32, {20}), "bias");
// sigmoid(input * weight + bias)
- auto result = builder.Map(
- {builder.Add(builder.Dot(input, weight), bias, {1})}, sigmoid_, {0, 1});
+ Map(&builder, {Add(Dot(input, weight), bias, {1})}, sigmoid_, {0, 1});
// Run HLO cost analysis.
auto hlo_module = BuildHloGraph(&builder);
@@ -297,11 +294,11 @@ TEST_F(HloCostAnalysisTest, MatmulAndConvolutionCanBeTheSameComputation) {
HloCostAnalysis conv_analysis(ShapeSize);
{
XlaBuilder builder("conv_looking_matmul");
- auto lhs = builder.Parameter(0, ShapeUtil::MakeShape(F32, {64, 64, 1, 1}),
- "input");
- auto rhs = builder.Parameter(1, ShapeUtil::MakeShape(F32, {64, 64, 1, 1}),
- "weights");
- builder.Conv(lhs, rhs, {1, 1}, Padding::kSame);
+ auto lhs = Parameter(&builder, 0, ShapeUtil::MakeShape(F32, {64, 64, 1, 1}),
+ "input");
+ auto rhs = Parameter(&builder, 1, ShapeUtil::MakeShape(F32, {64, 64, 1, 1}),
+ "weights");
+ Conv(lhs, rhs, {1, 1}, Padding::kSame);
auto hlo_module = BuildHloGraph(&builder);
ASSERT_IS_OK(hlo_module->entry_computation()->root_instruction()->Accept(
&conv_analysis));
@@ -311,10 +308,10 @@ TEST_F(HloCostAnalysisTest, MatmulAndConvolutionCanBeTheSameComputation) {
{
XlaBuilder builder("matmul");
auto lhs =
- builder.Parameter(0, ShapeUtil::MakeShape(F32, {64, 64}), "input");
+ Parameter(&builder, 0, ShapeUtil::MakeShape(F32, {64, 64}), "input");
auto rhs =
- builder.Parameter(1, ShapeUtil::MakeShape(F32, {64, 64}), "weights");
- builder.Dot(lhs, rhs);
+ Parameter(&builder, 1, ShapeUtil::MakeShape(F32, {64, 64}), "weights");
+ Dot(lhs, rhs);
auto hlo_module = BuildHloGraph(&builder);
ASSERT_IS_OK(hlo_module->entry_computation()->root_instruction()->Accept(
&matmul_analysis));
@@ -341,13 +338,13 @@ TEST_F(FusionCostAnalysis, LoopFusion) {
// tuple = Tuple({sub, sub, mul, C1})
HloComputation::Builder builder(TestName());
auto c1 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR2F32Linspace(
+ HloInstruction::CreateConstant(LiteralUtil::CreateR2F32Linspace(
/*from=*/0.0f, /*to=*/1.0f, /*rows=*/2, /*cols=*/2)));
auto c2 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR2F32Linspace(
+ HloInstruction::CreateConstant(LiteralUtil::CreateR2F32Linspace(
/*from=*/1.0f, /*to=*/2.0f, /*rows=*/2, /*cols=*/2)));
auto c3 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR2F32Linspace(
+ HloInstruction::CreateConstant(LiteralUtil::CreateR2F32Linspace(
/*from=*/2.0f, /*to=*/3.0f, /*rows=*/2, /*cols=*/2)));
auto add = builder.AddInstruction(
HloInstruction::CreateBinary(r2f32, HloOpcode::kAdd, c1, c2));
@@ -394,9 +391,9 @@ TEST_F(FusionCostAnalysis, NoLayout) {
HloComputation::Builder builder(TestName());
auto c1 = builder.AddInstruction(HloInstruction::CreateConstant(
- Literal::CreateR4FromArray4D(Array4D<float>(2, 3, 4, 5))));
+ LiteralUtil::CreateR4FromArray4D(Array4D<float>(2, 3, 4, 5))));
auto c2 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR1<float>({1, 2, 3})));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR1<float>({1, 2, 3})));
auto broadcast = builder.AddInstruction(
HloInstruction::CreateBroadcast(shape_without_layout, c2, {1}));
@@ -419,9 +416,9 @@ TEST_F(HloCostAnalysisTest, TupleCost) {
HloCostAnalysis analysis(ShapeSize);
{
XlaBuilder builder("matmul");
- auto x = builder.Parameter(0, ShapeUtil::MakeShape(F32, {123}), "x");
- auto y = builder.Parameter(1, ShapeUtil::MakeShape(F32, {42}), "y");
- auto tuple = builder.Tuple({x, y});
+ auto x = Parameter(&builder, 0, ShapeUtil::MakeShape(F32, {123}), "x");
+ auto y = Parameter(&builder, 1, ShapeUtil::MakeShape(F32, {42}), "y");
+ Tuple(&builder, {x, y});
auto hlo_module = BuildHloGraph(&builder);
ASSERT_IS_OK(
@@ -435,21 +432,21 @@ TEST_F(HloCostAnalysisTest, TupleCost) {
TEST_F(HloCostAnalysisTest, BaseDilatedConvolution) {
XlaBuilder builder("BaseDilatedConvolution");
- auto input = builder.Parameter(
- 0,
+ auto input = Parameter(
+ &builder, 0,
ShapeUtil::MakeShape(F32, {/*p_dim=*/1, /*z_dim=*/1, /*y_dim=*/10,
/*x_dim=*/20}),
"input");
- auto kernel = builder.Parameter(
- 1,
+ auto kernel = Parameter(
+ &builder, 1,
ShapeUtil::MakeShape(F32, {/*p_dim=*/1, /*z_dim=*/1, /*y_dim=*/3,
/*x_dim=*/3}),
"kernel");
- auto result = builder.ConvGeneralDilated(
- input, kernel, /*window_strides=*/{1, 1}, /*padding=*/{{1, 1}, {1, 1}},
- /*lhs_dilation=*/{3, 5}, /*rhs_dilation=*/{7, 11},
- XlaBuilder::CreateDefaultConvDimensionNumbers(2));
+ ConvGeneralDilated(input, kernel, /*window_strides=*/{1, 1},
+ /*padding=*/{{1, 1}, {1, 1}},
+ /*lhs_dilation=*/{3, 5}, /*rhs_dilation=*/{7, 11},
+ XlaBuilder::CreateDefaultConvDimensionNumbers(2));
// Run HLO cost analysis.
auto hlo_module = BuildHloGraph(&builder);
@@ -463,8 +460,8 @@ TEST_F(HloCostAnalysisTest, BaseDilatedConvolution) {
TEST_F(HloCostAnalysisTest, Slice) {
// Test the analysis on a slice.
XlaBuilder builder("slice");
- auto x = builder.Parameter(0, ShapeUtil::MakeShape(F32, {2}), "x");
- auto slice = builder.Slice(x, {0}, {1}, {1});
+ auto x = Parameter(&builder, 0, ShapeUtil::MakeShape(F32, {2}), "x");
+ Slice(x, {0}, {1}, {1});
auto hlo_module = BuildHloGraph(&builder);
// Run HLO cost analysis.
@@ -478,8 +475,8 @@ TEST_F(HloCostAnalysisTest, Slice) {
TEST_F(HloCostAnalysisTest, DynamicSlice) {
// Test the analysis on a slice.
XlaBuilder builder("dynamic-slice");
- auto x = builder.Parameter(0, ShapeUtil::MakeShape(F32, {2}), "x");
- auto slice = builder.DynamicSlice(x, builder.ConstantR1<int32>({1}), {1});
+ auto x = Parameter(&builder, 0, ShapeUtil::MakeShape(F32, {2}), "x");
+ DynamicSlice(x, ConstantR1<int32>(&builder, {1}), {1});
auto hlo_module = BuildHloGraph(&builder);
// Run HLO cost analysis.
@@ -493,9 +490,9 @@ TEST_F(HloCostAnalysisTest, DynamicSlice) {
TEST_F(HloCostAnalysisTest, DynamicUpdateSlice) {
// Test the analysis on a slice.
XlaBuilder builder("dynamic-update-slice");
- auto x = builder.Parameter(0, ShapeUtil::MakeShape(F32, {2}), "x");
- auto slice = builder.DynamicUpdateSlice(x, builder.ConstantR1<float>({1.0}),
- builder.ConstantR1<int32>({1}));
+ auto x = Parameter(&builder, 0, ShapeUtil::MakeShape(F32, {2}), "x");
+ DynamicUpdateSlice(x, ConstantR1<float>(&builder, {1.0}),
+ ConstantR1<int32>(&builder, {1}));
auto hlo_module = BuildHloGraph(&builder);
// Run HLO cost analysis.
diff --git a/tensorflow/compiler/xla/service/hlo_creation_utils.cc b/tensorflow/compiler/xla/service/hlo_creation_utils.cc
index 0fb65c845a..90d2be118d 100644
--- a/tensorflow/compiler/xla/service/hlo_creation_utils.cc
+++ b/tensorflow/compiler/xla/service/hlo_creation_utils.cc
@@ -14,6 +14,7 @@ limitations under the License.
==============================================================================*/
#include "tensorflow/compiler/xla/service/hlo_creation_utils.h"
+#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/compiler/xla/literal_util.h"
#include "tensorflow/compiler/xla/ptr_util.h"
#include "tensorflow/compiler/xla/service/shape_inference.h"
@@ -261,9 +262,9 @@ StatusOr<HloInstruction*> PadVectorWithZeros(HloInstruction* operand,
padding_config_dim.set_edge_padding_high(zeros_to_append);
*padding_config.add_dimensions() = padding_config_dim;
- HloInstruction* zero =
- computation->AddInstruction(HloInstruction::CreateConstant(
- MakeUnique<Literal>(Literal::Zero(operand->shape().element_type()))));
+ HloInstruction* zero = computation->AddInstruction(
+ HloInstruction::CreateConstant(MakeUnique<Literal>(
+ LiteralUtil::Zero(operand->shape().element_type()))));
return MakePadHlo(operand, zero, padding_config);
}
@@ -272,7 +273,7 @@ StatusOr<HloInstruction*> BroadcastZeros(
ArraySlice<int64> broadcast_dimensions) {
HloInstruction* zero =
computation->AddInstruction(HloInstruction::CreateConstant(
- MakeUnique<Literal>(Literal::Zero(element_type))));
+ MakeUnique<Literal>(LiteralUtil::Zero(element_type))));
return MakeBroadcastHlo(zero, /*broadcast_dimensions=*/{},
/*result_shape_bounds=*/broadcast_dimensions);
}
diff --git a/tensorflow/compiler/xla/service/hlo_creation_utils_test.cc b/tensorflow/compiler/xla/service/hlo_creation_utils_test.cc
index 7e7c4f95fe..60d3e71757 100644
--- a/tensorflow/compiler/xla/service/hlo_creation_utils_test.cc
+++ b/tensorflow/compiler/xla/service/hlo_creation_utils_test.cc
@@ -60,8 +60,8 @@ TEST_F(HloCreationUtilsTest, CollapseFirst1Dim) {
HloEvaluator evaluator;
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<Literal> result_literal,
evaluator.Evaluate<std::unique_ptr<Literal>>(
- *module, {Literal::CreateR1<int32>({3, 4})}));
- CHECK_EQ(*result_literal, *Literal::CreateR1<int32>({3, 4}));
+ *module, {LiteralUtil::CreateR1<int32>({3, 4})}));
+ CHECK_EQ(*result_literal, *LiteralUtil::CreateR1<int32>({3, 4}));
}
TEST_F(HloCreationUtilsTest, CollapseFirst2Dims) {
@@ -82,10 +82,10 @@ TEST_F(HloCreationUtilsTest, CollapseFirst2Dims) {
std::unique_ptr<Literal> result_literal,
evaluator.Evaluate<std::unique_ptr<Literal>>(
*module,
- {Literal::CreateR3<int32>(
+ {LiteralUtil::CreateR3<int32>(
{{{1, 2}, {3, 4}, {5, 6}}, {{-1, -2}, {-3, -4}, {-5, -6}}})}));
CHECK_EQ(*result_literal,
- *Literal::CreateR2<int32>(
+ *LiteralUtil::CreateR2<int32>(
{{1, 2}, {3, 4}, {5, 6}, {-1, -2}, {-3, -4}, {-5, -6}}));
}
@@ -103,10 +103,11 @@ TEST_F(HloCreationUtilsTest, Prepend1DegenerateDim) {
entry_computation->set_root_instruction(with_1_degenerate_dim_prepended);
HloEvaluator evaluator;
- TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<Literal> result_literal,
- evaluator.Evaluate<std::unique_ptr<Literal>>(
- *module, {Literal::CreateR1<int32>({9, 10})}));
- CHECK_EQ(*result_literal, *Literal::CreateR2<int32>({{9, 10}}));
+ TF_ASSERT_OK_AND_ASSIGN(
+ std::unique_ptr<Literal> result_literal,
+ evaluator.Evaluate<std::unique_ptr<Literal>>(
+ *module, {LiteralUtil::CreateR1<int32>({9, 10})}));
+ CHECK_EQ(*result_literal, *LiteralUtil::CreateR2<int32>({{9, 10}}));
}
TEST_F(HloCreationUtilsTest, Prepend2DegenerateDims) {
@@ -123,10 +124,11 @@ TEST_F(HloCreationUtilsTest, Prepend2DegenerateDims) {
entry_computation->set_root_instruction(with_2_degenerate_dims_prepended);
HloEvaluator evaluator;
- TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<Literal> result_literal,
- evaluator.Evaluate<std::unique_ptr<Literal>>(
- *module, {Literal::CreateR1<int32>({9, 10})}));
- CHECK_EQ(*result_literal, *Literal::CreateR3<int32>({{{9, 10}}}));
+ TF_ASSERT_OK_AND_ASSIGN(
+ std::unique_ptr<Literal> result_literal,
+ evaluator.Evaluate<std::unique_ptr<Literal>>(
+ *module, {LiteralUtil::CreateR1<int32>({9, 10})}));
+ CHECK_EQ(*result_literal, *LiteralUtil::CreateR3<int32>({{{9, 10}}}));
}
TEST_F(HloCreationUtilsTest, Prepend2DegenerateDimsToScalar) {
@@ -145,8 +147,8 @@ TEST_F(HloCreationUtilsTest, Prepend2DegenerateDimsToScalar) {
HloEvaluator evaluator;
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<Literal> result_literal,
evaluator.Evaluate<std::unique_ptr<Literal>>(
- *module, {Literal::CreateR0<int32>(9)}));
- CHECK_EQ(*result_literal, *Literal::CreateR2<int32>({{9}}));
+ *module, {LiteralUtil::CreateR0<int32>(9)}));
+ CHECK_EQ(*result_literal, *LiteralUtil::CreateR2<int32>({{9}}));
}
TEST_F(HloCreationUtilsTest, ExpandFirstDimInto3Dims) {
@@ -166,9 +168,9 @@ TEST_F(HloCreationUtilsTest, ExpandFirstDimInto3Dims) {
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<Literal> result_literal,
evaluator.Evaluate<std::unique_ptr<Literal>>(
- *module, {Literal::CreateR1<int32>({1, 2, 3, 4, 5, 6})}));
+ *module, {LiteralUtil::CreateR1<int32>({1, 2, 3, 4, 5, 6})}));
CHECK_EQ(*result_literal,
- *Literal::CreateR3<int32>({{{1, 2}}, {{3, 4}}, {{5, 6}}}));
+ *LiteralUtil::CreateR3<int32>({{{1, 2}}, {{3, 4}}, {{5, 6}}}));
}
TEST_F(HloCreationUtilsTest, PadVectorWithZeros) {
@@ -188,8 +190,8 @@ TEST_F(HloCreationUtilsTest, PadVectorWithZeros) {
HloEvaluator evaluator;
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<Literal> result_literal,
evaluator.Evaluate<std::unique_ptr<Literal>>(
- *module, {Literal::CreateR1<int32>({3, 4})}));
- CHECK_EQ(*result_literal, *Literal::CreateR1<int32>({0, 0, 0, 3, 4, 0}));
+ *module, {LiteralUtil::CreateR1<int32>({3, 4})}));
+ CHECK_EQ(*result_literal, *LiteralUtil::CreateR1<int32>({0, 0, 0, 3, 4, 0}));
}
TEST_F(HloCreationUtilsTest, BroadcastZeros_S32) {
@@ -209,8 +211,8 @@ TEST_F(HloCreationUtilsTest, BroadcastZeros_S32) {
HloEvaluator evaluator;
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<Literal> result_literal,
evaluator.Evaluate<std::unique_ptr<Literal>>(
- *module, {Literal::CreateR0<int32>(0)}));
- CHECK_EQ(*result_literal, *Literal::CreateR2<int32>({{0, 0}, {0, 0}}));
+ *module, {LiteralUtil::CreateR0<int32>(0)}));
+ CHECK_EQ(*result_literal, *LiteralUtil::CreateR2<int32>({{0, 0}, {0, 0}}));
}
TEST_F(HloCreationUtilsTest, BroadcastZeros_F32) {
@@ -230,9 +232,9 @@ TEST_F(HloCreationUtilsTest, BroadcastZeros_F32) {
HloEvaluator evaluator;
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<Literal> result_literal,
evaluator.Evaluate<std::unique_ptr<Literal>>(
- *module, {Literal::CreateR0<float>(0.0f)}));
+ *module, {LiteralUtil::CreateR0<float>(0.0f)}));
CHECK_EQ(*result_literal,
- *Literal::CreateR2<float>({{0.0f, 0.0f}, {0.0f, 0.0f}}));
+ *LiteralUtil::CreateR2<float>({{0.0f, 0.0f}, {0.0f, 0.0f}}));
}
} // namespace
diff --git a/tensorflow/compiler/xla/service/hlo_cse.cc b/tensorflow/compiler/xla/service/hlo_cse.cc
index a0ee889623..06484f4012 100644
--- a/tensorflow/compiler/xla/service/hlo_cse.cc
+++ b/tensorflow/compiler/xla/service/hlo_cse.cc
@@ -24,7 +24,7 @@ limitations under the License.
#include <vector>
#include "tensorflow/compiler/xla/layout_util.h"
-#include "tensorflow/compiler/xla/literal_util.h"
+#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/compiler/xla/service/hlo_computation.h"
#include "tensorflow/compiler/xla/service/hlo_domain_map.h"
#include "tensorflow/compiler/xla/service/hlo_instruction.h"
@@ -143,10 +143,8 @@ StatusOr<bool> HloCSE::Run(HloModule* module) {
if (instruction->operand_count() == 0) {
continue;
}
- // Skip instructions which have side effects or are a domain (which must
- // not be CSE-ed).
- if (instruction->HasSideEffect() ||
- instruction->opcode() == HloOpcode::kDomain) {
+ // Skip instructions which have side effects.
+ if (instruction->HasSideEffect()) {
continue;
}
diff --git a/tensorflow/compiler/xla/service/hlo_cse_test.cc b/tensorflow/compiler/xla/service/hlo_cse_test.cc
index 16db374566..76b9c66651 100644
--- a/tensorflow/compiler/xla/service/hlo_cse_test.cc
+++ b/tensorflow/compiler/xla/service/hlo_cse_test.cc
@@ -21,7 +21,7 @@ limitations under the License.
#include <vector>
#include "tensorflow/compiler/xla/layout_util.h"
-#include "tensorflow/compiler/xla/literal_util.h"
+#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/compiler/xla/ptr_util.h"
#include "tensorflow/compiler/xla/service/hlo_computation.h"
#include "tensorflow/compiler/xla/service/hlo_instruction.h"
@@ -53,9 +53,9 @@ TEST_F(HloCseTest, CombineTwoConstants) {
// Test that two identical constants are commoned.
auto builder = HloComputation::Builder(TestName());
auto constant1 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(42.0f)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0f)));
auto constant2 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(42.0f)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0f)));
builder.AddInstruction(HloInstruction::CreateBinary(
constant1->shape(), HloOpcode::kAdd, constant1, constant2));
@@ -72,7 +72,7 @@ TEST_F(HloCseTest, CombineTwoConstants) {
EXPECT_EQ(42.0f, constant->literal().Get<float>({}));
auto result = ExecuteAndTransfer(std::move(module), {});
- auto expected = Literal::CreateR0<float>(84.0);
+ auto expected = LiteralUtil::CreateR0<float>(84.0);
EXPECT_TRUE(LiteralTestUtil::Near(*expected, *result, ErrorSpec(1e-4)));
}
@@ -81,10 +81,10 @@ TEST_F(HloCseTest, CombineTwoConstantsDifferentLayoutsAndInsensitive) {
// the pass is not layout sensitive.
auto builder = HloComputation::Builder(TestName());
auto constant1 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR2WithLayout<float>(
+ HloInstruction::CreateConstant(LiteralUtil::CreateR2WithLayout<float>(
{{1.0, 2.0}, {3.0, 4.0}}, LayoutUtil::MakeLayout({0, 1}))));
auto constant2 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR2WithLayout<float>(
+ HloInstruction::CreateConstant(LiteralUtil::CreateR2WithLayout<float>(
{{1.0, 2.0}, {3.0, 4.0}}, LayoutUtil::MakeLayout({1, 0}))));
auto add = builder.AddInstruction(HloInstruction::CreateBinary(
constant1->shape(), HloOpcode::kAdd, constant1, constant2));
@@ -104,7 +104,7 @@ TEST_F(HloCseTest, CombineTwoConstantsDifferentLayoutsAndInsensitive) {
EXPECT_THAT(add, op::Add(first_operand, first_operand));
auto result = ExecuteAndTransfer(std::move(module), {});
- auto expected = Literal::CreateR2<float>({{2.0, 4.0}, {6.0, 8.0}});
+ auto expected = LiteralUtil::CreateR2<float>({{2.0, 4.0}, {6.0, 8.0}});
EXPECT_TRUE(LiteralTestUtil::Near(*expected, *result, ErrorSpec(1e-4)));
}
@@ -113,10 +113,10 @@ TEST_F(HloCseTest, CombineTwoConstantsDifferentLayoutsAndSensitive) {
// if the pass is layout sensitive.
auto builder = HloComputation::Builder(TestName());
auto constant1 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR2WithLayout<float>(
+ HloInstruction::CreateConstant(LiteralUtil::CreateR2WithLayout<float>(
{{1.0, 2.0}, {3.0, 4.0}}, LayoutUtil::MakeLayout({0, 1}))));
auto constant2 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR2WithLayout<float>(
+ HloInstruction::CreateConstant(LiteralUtil::CreateR2WithLayout<float>(
{{1.0, 2.0}, {3.0, 4.0}}, LayoutUtil::MakeLayout({1, 0}))));
auto add = builder.AddInstruction(HloInstruction::CreateBinary(
constant1->shape(), HloOpcode::kAdd, constant1, constant2));
@@ -134,7 +134,7 @@ TEST_F(HloCseTest, CombineTwoConstantsDifferentLayoutsAndSensitive) {
EXPECT_THAT(add, op::Add(constant1, constant2));
auto result = ExecuteAndTransfer(std::move(module), {});
- auto expected = Literal::CreateR2<float>({{2.0, 4.0}, {6.0, 8.0}});
+ auto expected = LiteralUtil::CreateR2<float>({{2.0, 4.0}, {6.0, 8.0}});
EXPECT_TRUE(LiteralTestUtil::Near(*expected, *result, ErrorSpec(1e-4)));
}
@@ -144,20 +144,20 @@ TEST_F(HloCseTest, ConstantsSameValueDifferentType) {
auto builder = HloComputation::Builder(TestName());
std::vector<HloInstruction*> constants;
constants.push_back(builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<uint32>(42))));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<uint32>(42))));
constants.push_back(builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<int32>(42))));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32>(42))));
constants.push_back(builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<uint64>(42.0))));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<uint64>(42.0))));
constants.push_back(builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<int64>(42.0))));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<int64>(42.0))));
constants.push_back(builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<double>(42.0))));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<double>(42.0))));
constants.push_back(builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(42.0f))));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0f))));
// Duplicate the float constant to verify something happens.
constants.push_back(builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(42.0f))));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0f))));
const Shape shape_r0 = ShapeUtil::MakeShape(F32, {});
for (int64 i = 0; i < constants.size(); ++i) {
@@ -188,13 +188,13 @@ TEST_F(HloCseTest, NonscalarConstants) {
// Test that identical nonscalar constants are merged.
auto builder = HloComputation::Builder(TestName());
auto common_constant1 = builder.AddInstruction(HloInstruction::CreateConstant(
- Literal::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}})));
+ LiteralUtil::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}})));
auto common_constant2 = builder.AddInstruction(HloInstruction::CreateConstant(
- Literal::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}})));
+ LiteralUtil::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}})));
// Create a constant which has the same shape but a different value.
auto uncommon_constant =
builder.AddInstruction(HloInstruction::CreateConstant(
- Literal::CreateR2<float>({{2.0, 4.0}, {6.0, 8.0}})));
+ LiteralUtil::CreateR2<float>({{2.0, 4.0}, {6.0, 8.0}})));
// Tie the constants together with a tuple. This makes it easier to refer to
// the constant instructions via their use.
@@ -223,7 +223,7 @@ TEST_F(HloCseTest, IdenticalInstructions) {
// Test that three identical instructions are commoned.
auto builder = HloComputation::Builder(TestName());
auto constant = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(42.0)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0)));
auto exp1 = builder.AddInstruction(HloInstruction::CreateUnary(
constant->shape(), HloOpcode::kExp, constant));
auto exp2 = builder.AddInstruction(HloInstruction::CreateUnary(
@@ -253,7 +253,7 @@ TEST_F(HloCseTest, IdenticalInstructionsDifferentLayoutsSensitive) {
// commoned if the pass is layout sensitive.
auto builder = HloComputation::Builder(TestName());
auto constant = builder.AddInstruction(HloInstruction::CreateConstant(
- Literal::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}})));
+ LiteralUtil::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}})));
auto exp1 = builder.AddInstruction(HloInstruction::CreateUnary(
constant->shape(), HloOpcode::kExp, constant));
@@ -284,7 +284,7 @@ TEST_F(HloCseTest, IdenticalInstructionsDifferentLayoutsInsensitive) {
// the pass is layout insensitive.
auto builder = HloComputation::Builder(TestName());
auto constant = builder.AddInstruction(HloInstruction::CreateConstant(
- Literal::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}})));
+ LiteralUtil::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}})));
auto exp1 = builder.AddInstruction(HloInstruction::CreateUnary(
constant->shape(), HloOpcode::kExp, constant));
@@ -362,7 +362,7 @@ TEST_F(HloCseTest, IdenticalExpressions) {
// The *1 instructions should be merged with the *2 instructions.
auto builder = HloComputation::Builder(TestName());
auto constant = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(42.0)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0)));
auto negate1 = builder.AddInstruction(HloInstruction::CreateUnary(
constant->shape(), HloOpcode::kNegate, constant));
@@ -400,9 +400,9 @@ TEST_F(HloCseTest, DoNotCombineRng) {
// Test that two RNG ops are not commoned.
auto builder = HloComputation::Builder(TestName());
auto constant1 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(0.0f)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(0.0f)));
auto constant2 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(1.0f)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0f)));
auto rng1 = builder.AddInstruction(HloInstruction::CreateRng(
ShapeUtil::MakeShape(F32, {}), RandomDistribution::RNG_UNIFORM,
{constant1, constant2}));
@@ -442,9 +442,9 @@ TEST_F(HloCseTest, DoNotCombineCallsToImpureFunctions) {
Shape scalar_shape = ShapeUtil::MakeShape(F32, {});
auto builder = HloComputation::Builder(TestName() + "_rng_fun");
auto constant1 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(0.0f)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(0.0f)));
auto constant2 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(1.0f)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0f)));
auto rng = builder.AddInstruction(HloInstruction::CreateRng(
scalar_shape, RandomDistribution::RNG_UNIFORM, {constant1, constant2}));
auto param = builder.AddInstruction(HloInstruction::CreateParameter(
@@ -459,7 +459,7 @@ TEST_F(HloCseTest, DoNotCombineCallsToImpureFunctions) {
{
auto builder = HloComputation::Builder(TestName());
auto constant = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR1<float>({5.0f})));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR1<float>({5.0f})));
auto rng1 = builder.AddInstruction(
HloInstruction::CreateMap(constant->shape(), {constant}, rng_function));
auto rng2 = builder.AddInstruction(
@@ -521,9 +521,9 @@ TEST_F(HloCseTest, ConstantsSameValueInDifferentDomains) {
// in this case) are not collapsed.
auto builder = HloComputation::Builder(TestName());
builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<uint32>(42)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<uint32>(42)));
builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<uint32>(42)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<uint32>(42)));
auto module = CreateNewModule();
auto computation = module->AddEntryComputation(builder.Build());
@@ -536,5 +536,40 @@ TEST_F(HloCseTest, ConstantsSameValueInDifferentDomains) {
EXPECT_EQ(2, computation->instruction_count());
}
+TEST_F(HloCseTest, Domain) {
+ auto module = ParseHloString(R"(
+HloModule module
+ENTRY %entry {
+ %param = f32[] parameter(0), sharding={maximal device=0}
+ %domain.0 = f32[] domain(%param),
+ domain={kind="sharding", entry={maximal device=0}, exit={maximal device=1}}
+ %domain.1 = f32[] domain(%param),
+ domain={kind="sharding", entry={maximal device=0}, exit={maximal device=1}}
+ %domain.2 = f32[] domain(%param),
+ domain={kind="sharding", entry={maximal device=0}, exit={maximal device=2}}
+ %negate.0 = f32[] negate(%domain.0)
+ %negate.1 = f32[] negate(%domain.1)
+ %negate.2 = f32[] negate(%domain.2)
+ %domain.3 = f32[] domain(%negate.0),
+ domain={kind="sharding", entry={maximal device=1}, exit={maximal device=0}}
+ %domain.4 = f32[] domain(%negate.1),
+ domain={kind="sharding", entry={maximal device=1}, exit={maximal device=0}}
+ %domain.5 = f32[] domain(%negate.2),
+ domain={kind="sharding", entry={maximal device=2}, exit={maximal device=0}}
+ %add = f32[] add(%domain.3, %domain.4)
+ ROOT %sub = f32[] subtract(%add, %domain.5)
+})")
+ .ValueOrDie();
+
+ HloCSE cse(/*is_layout_sensitive=*/false);
+ EXPECT_TRUE(cse.Run(module.get()).ValueOrDie());
+ LOG(INFO) << "AAAAA " << module->ToString();
+ const HloInstruction* sub = module->entry_computation()->root_instruction();
+ const HloInstruction* add = sub->operand(0);
+ EXPECT_EQ(add->operand(0), add->operand(1));
+ EXPECT_NE(add->operand(0), sub->operand(1));
+ EXPECT_NE(add->operand(1), sub->operand(1));
+}
+
} // namespace
} // namespace xla
diff --git a/tensorflow/compiler/xla/service/hlo_dataflow_analysis.cc b/tensorflow/compiler/xla/service/hlo_dataflow_analysis.cc
index f529c0dad7..de1a32d8bd 100644
--- a/tensorflow/compiler/xla/service/hlo_dataflow_analysis.cc
+++ b/tensorflow/compiler/xla/service/hlo_dataflow_analysis.cc
@@ -398,18 +398,17 @@ bool HloDataflowAnalysis::UpdateSendValueSet(HloInstruction* send) {
bool HloDataflowAnalysis::UpdateRecvDoneValueSet(HloInstruction* recv_done) {
CHECK_EQ(recv_done->opcode(), HloOpcode::kRecvDone);
bool changed = false;
- // RecvDone forwards the operand value at {0} to the output.
+ // RecvDone forwards the operand value at {0} to element {0} of its output.
for (auto& pair : GetInstructionValueSet(recv_done)) {
ShapeIndex& index = pair.first;
HloValueSet& value_set = pair.second;
- ShapeIndex operand_index = {0};
- for (int64 i : index) {
- operand_index.push_back(i);
+ if (index.empty() || index[0] != 0) {
+ continue;
}
const HloValueSet& operand_value_set =
- GetValueSet(recv_done->operand(0), operand_index);
+ GetValueSet(recv_done->operand(0), index);
if (value_set != operand_value_set) {
value_set = operand_value_set;
changed = true;
@@ -466,6 +465,24 @@ bool HloDataflowAnalysis::UpdateCopyValueSet(HloInstruction* copy) {
return changed;
}
+bool HloDataflowAnalysis::UpdateDomainValueSet(HloInstruction* domain) {
+ // Domain instructions just forward their operand. Given that domains can have
+ // a tuple operand, we iterate through its indexes, like for copies.
+ // Unlike copies though we also propagate the top-level value.
+ CHECK_EQ(domain->opcode(), HloOpcode::kDomain);
+ bool changed = false;
+ for (auto& pair : GetInstructionValueSet(domain)) {
+ const ShapeIndex& index = pair.first;
+ HloValueSet& value_set = pair.second;
+ HloValueSet& operand_value_set = GetValueSet(domain->operand(0), index);
+ if (value_set != operand_value_set) {
+ value_set = operand_value_set;
+ changed = true;
+ }
+ }
+ return changed;
+}
+
bool HloDataflowAnalysis::UpdateGetTupleElementValueSet(HloInstruction* gte) {
CHECK_EQ(gte->opcode(), HloOpcode::kGetTupleElement);
bool changed = false;
@@ -560,17 +577,17 @@ bool HloDataflowAnalysis::UpdateParameterValueSet(HloInstruction* parameter) {
}
}
-bool HloDataflowAnalysis::UpdateSelectValueSet(HloInstruction* select) {
- CHECK_EQ(select->opcode(), HloOpcode::kSelect);
- // A phi value is not defined at a kSelect instruction because kSelect does
- // not create a new value. Rather it forwards a value from its operands. This
- // contrasts with kWhile instruction (which does define a phi value) which has
- // in-place update semantics.
+bool HloDataflowAnalysis::UpdateTupleSelectValueSet(HloInstruction* select) {
+ CHECK_EQ(select->opcode(), HloOpcode::kTupleSelect);
+ // A phi value is not defined at a kTupleSelect instruction because
+ // kTupleSelect does not create a new value. Rather it forwards a value from
+ // its operands. This contrasts with kWhile instruction (which does define a
+ // phi value) which has in-place update semantics.
bool changed = false;
for (auto& pair : GetInstructionValueSet(select)) {
const ShapeIndex& index = pair.first;
if (index.empty()) {
- // kSelect copies (not forwards) the top-level value.
+ // kTupleSelect copies (not forwards) the top-level value.
continue;
}
HloValueSet& value_set = pair.second;
@@ -626,12 +643,14 @@ bool HloDataflowAnalysis::UpdateInstructionValueSet(
return UpdateBitcastValueSet(instruction);
case HloOpcode::kSlice:
return UpdateSliceValueSet(instruction);
+ case HloOpcode::kDomain:
+ return UpdateDomainValueSet(instruction);
case HloOpcode::kCopy:
return UpdateCopyValueSet(instruction);
case HloOpcode::kGetTupleElement:
return UpdateGetTupleElementValueSet(instruction);
- case HloOpcode::kSelect:
- return UpdateSelectValueSet(instruction);
+ case HloOpcode::kTupleSelect:
+ return UpdateTupleSelectValueSet(instruction);
case HloOpcode::kTuple:
return UpdateTupleValueSet(instruction);
case HloOpcode::kParameter:
@@ -804,6 +823,7 @@ Status HloDataflowAnalysis::InitializeInstructionValueSets() {
case HloOpcode::kCall:
case HloOpcode::kConditional:
case HloOpcode::kGetTupleElement:
+ case HloOpcode::kDomain:
// These instructions define no values. The values in their output
// flow from their operands or from cross computation dataflow.
break;
@@ -829,21 +849,25 @@ Status HloDataflowAnalysis::InitializeInstructionValueSets() {
}
break;
case HloOpcode::kCopy:
- case HloOpcode::kSelect:
+ case HloOpcode::kTupleSelect:
case HloOpcode::kTuple:
// These instructions only define their top-level values. Any other
// values flow from their operands.
define_top_level_only();
break;
case HloOpcode::kRecvDone:
- // RecvDone aliases its input tuple element {0}, therefore does not
- // define any values.
+ // RecvDone produces a two-element tuple. Element zero aliases its
+ // input tuple element {0}; element one is a token.
+ define_value_at(/*index=*/{});
+ define_value_at(/*index=*/{1});
break;
case HloOpcode::kSend:
- // Send produces a tuple of {aliased operand, U32 context}, therefore
- // only defines the top-level tuple and the tuple element at {1}.
+ // Send produces a tuple of {aliased operand, U32 context, token},
+ // therefore only defines the top-level tuple and the tuple elements
+ // at {1} and {2}.
define_value_at(/*index=*/{});
define_value_at(/*index=*/{1});
+ define_value_at(/*index=*/{2});
break;
default:
define_all_values();
diff --git a/tensorflow/compiler/xla/service/hlo_dataflow_analysis.h b/tensorflow/compiler/xla/service/hlo_dataflow_analysis.h
index 3d2d5baa77..f4abc7a7c7 100644
--- a/tensorflow/compiler/xla/service/hlo_dataflow_analysis.h
+++ b/tensorflow/compiler/xla/service/hlo_dataflow_analysis.h
@@ -185,10 +185,11 @@ class HloDataflowAnalysis {
bool UpdateCallValueSet(HloInstruction* call);
bool UpdateConditionalValueSet(HloInstruction* conditional);
bool UpdateCopyValueSet(HloInstruction* copy);
+ bool UpdateDomainValueSet(HloInstruction* domain);
bool UpdateGetTupleElementValueSet(HloInstruction* gte);
bool UpdateParameterValueSet(HloInstruction* parameter);
bool UpdateRecvDoneValueSet(HloInstruction* recv_done);
- bool UpdateSelectValueSet(HloInstruction* select);
+ bool UpdateTupleSelectValueSet(HloInstruction* select);
bool UpdateSendValueSet(HloInstruction* send);
bool UpdateTupleValueSet(HloInstruction* tuple);
bool UpdateWhileValueSet(HloInstruction* xla_while);
diff --git a/tensorflow/compiler/xla/service/hlo_dataflow_analysis_test.cc b/tensorflow/compiler/xla/service/hlo_dataflow_analysis_test.cc
index 0ea8bdcab6..37bc2d2c9d 100644
--- a/tensorflow/compiler/xla/service/hlo_dataflow_analysis_test.cc
+++ b/tensorflow/compiler/xla/service/hlo_dataflow_analysis_test.cc
@@ -15,7 +15,7 @@ limitations under the License.
#include "tensorflow/compiler/xla/service/hlo_dataflow_analysis.h"
-#include "tensorflow/compiler/xla/literal_util.h"
+#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/compiler/xla/service/hlo_computation.h"
#include "tensorflow/compiler/xla/service/hlo_graph_dumper.h"
#include "tensorflow/compiler/xla/service/hlo_matchers.h"
@@ -101,9 +101,9 @@ TEST_P(HloDataflowAnalysisTest, BinaryOperation) {
// Test the dataflow for a simple binary operation (Add).
auto builder = HloComputation::Builder(TestName());
auto constant1 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(1.0)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
auto constant2 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(2.0)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0)));
auto add = builder.AddInstruction(HloInstruction::CreateBinary(
scalar_shape_, HloOpcode::kAdd, constant1, constant2));
module_->AddEntryComputation(builder.Build());
@@ -198,9 +198,9 @@ TEST_P(HloDataflowAnalysisTest, NestedTuple) {
// Verify the dataflow through a nested tuple.
auto builder = HloComputation::Builder(TestName());
auto constant1 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(1.0)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
auto constant2 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(2.0)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0)));
auto tuple = builder.AddInstruction(
HloInstruction::CreateTuple({constant1, constant2}));
auto nested_tuple = builder.AddInstruction(
@@ -259,9 +259,9 @@ TEST_P(HloDataflowAnalysisTest, SingleCall) {
auto builder = HloComputation::Builder(TestName());
auto constant1 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(1.0)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
auto constant2 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(2.0)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0)));
auto call = builder.AddInstruction(HloInstruction::CreateCall(
scalar_shape_, {constant1, constant2}, called_computation));
module_->AddEntryComputation(builder.Build());
@@ -308,9 +308,9 @@ TEST_P(HloDataflowAnalysisTest, ComputationCalledTwiceWithSameArguments) {
auto builder = HloComputation::Builder(TestName());
auto constant1 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(1.0)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
auto constant2 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(2.0)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0)));
auto call1 = builder.AddInstruction(HloInstruction::CreateCall(
scalar_shape_, {constant1, constant2}, called_computation));
auto call2 = builder.AddInstruction(HloInstruction::CreateCall(
@@ -362,9 +362,9 @@ TEST_P(HloDataflowAnalysisTest, ComputationCalledTwiceWithDifferentArguments) {
auto builder = HloComputation::Builder(TestName());
auto constant1 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(1.0)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
auto constant2 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(2.0)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0)));
auto call1 = builder.AddInstruction(HloInstruction::CreateCall(
scalar_shape_, {constant1, constant2}, called_computation));
auto call2 = builder.AddInstruction(HloInstruction::CreateCall(
@@ -426,9 +426,9 @@ TEST_P(HloDataflowAnalysisTest, NestedCalls) {
auto builder = HloComputation::Builder(TestName());
auto constant1 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(1.0)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
auto constant2 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(2.0)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0)));
auto call = builder.AddInstruction(HloInstruction::CreateCall(
scalar_shape_, {constant1, constant2}, outer_computation));
module_->AddEntryComputation(builder.Build());
@@ -493,15 +493,15 @@ TEST_P(HloDataflowAnalysisTest, SingleWhile) {
auto cond_param = cond_builder.AddInstruction(
HloInstruction::CreateParameter(0, tuple_shape, "param"));
auto cond_constant = cond_builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<bool>(false)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(false)));
HloComputation* condition =
module_->AddEmbeddedComputation(cond_builder.Build());
auto builder = HloComputation::Builder(TestName());
auto constant1 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(1.0)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
auto constant2 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(2.0)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0)));
auto tuple = builder.AddInstruction(
HloInstruction::CreateTuple({constant1, constant2}));
auto xla_while = builder.AddInstruction(
@@ -594,15 +594,15 @@ TEST_P(HloDataflowAnalysisTest, SequentialWhiles) {
cond_builder.AddInstruction(
HloInstruction::CreateParameter(0, tuple_shape, "param"));
cond_builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<bool>(false)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(false)));
HloComputation* condition =
module_->AddEmbeddedComputation(cond_builder.Build());
auto builder = HloComputation::Builder(TestName());
auto constant1 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(1.0)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
auto constant2 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(2.0)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0)));
auto tuple = builder.AddInstruction(
HloInstruction::CreateTuple({constant1, constant2}));
auto xla_while0 = builder.AddInstruction(
@@ -653,7 +653,7 @@ TEST_P(HloDataflowAnalysisTest, NestedWhiles) {
cond_builder.AddInstruction(
HloInstruction::CreateParameter(0, tuple_shape, "param"));
cond_builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<bool>(false)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(false)));
HloComputation* condition =
module_->AddEmbeddedComputation(cond_builder.Build());
@@ -691,9 +691,9 @@ TEST_P(HloDataflowAnalysisTest, NestedWhiles) {
auto builder = HloComputation::Builder(TestName());
auto constant1 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(1.0)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
auto constant2 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(2.0)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0)));
auto tuple = builder.AddInstruction(
HloInstruction::CreateTuple({constant1, constant2}));
auto entry_while = builder.AddInstruction(
@@ -780,15 +780,15 @@ TEST_P(HloDataflowAnalysisTest, SwizzlingWhile) {
auto cond_param = cond_builder.AddInstruction(
HloInstruction::CreateParameter(0, tuple_shape, "param"));
cond_builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<bool>(false)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(false)));
HloComputation* condition =
module_->AddEmbeddedComputation(cond_builder.Build());
auto builder = HloComputation::Builder(TestName());
auto constant1 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(1.0)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
auto constant2 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(2.0)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0)));
auto tuple = builder.AddInstruction(
HloInstruction::CreateTuple({constant1, constant2}));
auto xla_while = builder.AddInstruction(
@@ -840,11 +840,11 @@ TEST_P(HloDataflowAnalysisTest, ArraySelect) {
// Test a kSelect of an array value.
auto builder = HloComputation::Builder(TestName());
auto pred = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<bool>(false)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(false)));
auto constant1 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(1.0)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
auto constant2 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(2.0)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0)));
auto select = builder.AddInstruction(HloInstruction::CreateTernary(
scalar_shape_, HloOpcode::kSelect, pred, constant1, constant2));
@@ -860,19 +860,18 @@ TEST_P(HloDataflowAnalysisTest, ArraySelect) {
}
TEST_P(HloDataflowAnalysisTest, TupleSelect) {
- // Test a kSelect of a tuple value. Non-top-level element flow through the
- // instruction.
+ // Test a kTupleSelect. Non-top-level element flow through the instruction.
auto builder = HloComputation::Builder(TestName());
auto pred = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<bool>(false)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(false)));
auto constant1 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(1.0)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
auto constant2 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(2.0)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0)));
auto constant3 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(3.0)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(3.0)));
auto constant4 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(4.0)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(4.0)));
auto tuple1 =
builder.AddInstruction(HloInstruction::CreateTuple({constant1}));
auto tuple2 =
@@ -883,20 +882,20 @@ TEST_P(HloDataflowAnalysisTest, TupleSelect) {
builder.AddInstruction(HloInstruction::CreateTuple({constant4}));
const Shape tuple_shape = tuple1->shape();
auto select11 = builder.AddInstruction(HloInstruction::CreateTernary(
- tuple_shape, HloOpcode::kSelect, pred, tuple1, tuple1));
+ tuple_shape, HloOpcode::kTupleSelect, pred, tuple1, tuple1));
auto select12 = builder.AddInstruction(HloInstruction::CreateTernary(
- tuple_shape, HloOpcode::kSelect, pred, tuple1, tuple2));
+ tuple_shape, HloOpcode::kTupleSelect, pred, tuple1, tuple2));
auto select34 = builder.AddInstruction(HloInstruction::CreateTernary(
- tuple_shape, HloOpcode::kSelect, pred, tuple3, tuple4));
+ tuple_shape, HloOpcode::kTupleSelect, pred, tuple3, tuple4));
auto select1234 = builder.AddInstruction(HloInstruction::CreateTernary(
- tuple_shape, HloOpcode::kSelect, pred, select12, select34));
+ tuple_shape, HloOpcode::kTupleSelect, pred, select12, select34));
module_->AddEntryComputation(builder.Build());
bool ssa_form = GetParam();
const HloDataflowAnalysis& analysis = RunAnalysis(ssa_form);
- // Top-level value is always defined by a kSelect.
+ // Top-level value is always defined by a kTupleSelect.
EXPECT_TRUE(analysis.ValueIsDefinedAt(select11));
EXPECT_TRUE(analysis.ValueIsDefinedAt(select12));
EXPECT_TRUE(analysis.ValueIsDefinedAt(select34));
@@ -937,20 +936,20 @@ TEST_P(HloDataflowAnalysisTest, TupleSelect) {
}
TEST_P(HloDataflowAnalysisTest, NestedTupleSelect) {
- // Test kSelect of a nested tuple.
+ // Test kTupleSelect of a nested tuple.
auto builder = HloComputation::Builder(TestName());
auto pred = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<bool>(false)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(false)));
auto constant1 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(1.0)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
auto constant2 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(2.0)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0)));
auto constant3 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(3.0)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(3.0)));
auto constant4 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(4.0)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(4.0)));
auto constant5 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(5.0)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(5.0)));
auto inner_tuple1 = builder.AddInstruction(
HloInstruction::CreateTuple({constant2, constant3}));
auto tuple1 = builder.AddInstruction(
@@ -960,7 +959,7 @@ TEST_P(HloDataflowAnalysisTest, NestedTupleSelect) {
auto tuple2 = builder.AddInstruction(
HloInstruction::CreateTuple({constant4, inner_tuple2}));
auto select = builder.AddInstruction(HloInstruction::CreateTernary(
- tuple1->shape(), HloOpcode::kSelect, pred, tuple1, tuple2));
+ tuple1->shape(), HloOpcode::kTupleSelect, pred, tuple1, tuple2));
module_->AddEntryComputation(builder.Build());
@@ -983,7 +982,7 @@ TEST_P(HloDataflowAnalysisTest, NestedTupleSelect) {
}
TEST_P(HloDataflowAnalysisTest, TupleSelectToWhile) {
- // Test a tuple-shaped kSelect feeding a kWhile instruction. HLO:
+ // Test a tuple-shaped kTupleSelect feeding a kWhile instruction. HLO:
//
// body((F32[], F32[]) %tuple_param):
// %add = Add(%tuple_param{0}, %tuple_param{1})
@@ -1026,24 +1025,24 @@ TEST_P(HloDataflowAnalysisTest, TupleSelectToWhile) {
cond_builder.AddInstruction(
HloInstruction::CreateParameter(0, tuple_shape, "param"));
cond_builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<bool>(false)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(false)));
HloComputation* condition =
module_->AddEmbeddedComputation(cond_builder.Build());
auto pred = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<bool>(false)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(false)));
auto constant1 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(1.0)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
auto constant2 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(2.0)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0)));
auto constant3 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(3.0)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(3.0)));
auto tuple1 =
builder.AddInstruction(HloInstruction::CreateTuple({constant1}));
auto tuple2 =
builder.AddInstruction(HloInstruction::CreateTuple({constant2}));
auto select = builder.AddInstruction(HloInstruction::CreateTernary(
- tuple1->shape(), HloOpcode::kSelect, pred, tuple1, tuple2));
+ tuple1->shape(), HloOpcode::kTupleSelect, pred, tuple1, tuple2));
auto gte = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape_, select, 0));
auto tuple =
@@ -1089,7 +1088,7 @@ TEST_P(HloDataflowAnalysisTest, BitcastDefinesValue) {
// Test the bitcast_defines_value flag to the dataflow analysis.
auto builder = HloComputation::Builder(TestName());
auto constant = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(1.0)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
auto bitcast = builder.AddInstruction(HloInstruction::CreateUnary(
scalar_shape_, HloOpcode::kBitcast, constant));
@@ -1158,44 +1157,50 @@ TEST_P(HloDataflowAnalysisTest, SendAndSendDone) {
auto builder = HloComputation::Builder(TestName());
auto param = builder.AddInstruction(
HloInstruction::CreateParameter(0, scalar_shape_, "param0"));
+ auto token = builder.AddInstruction(HloInstruction::CreateToken());
auto send = builder.AddInstruction(
- HloInstruction::CreateSend(param, /*channel_id=*/0));
+ HloInstruction::CreateSend(param, token, /*channel_id=*/0));
auto send_done = builder.AddInstruction(HloInstruction::CreateSendDone(send));
module_->AddEntryComputation(builder.Build());
bool ssa_form = GetParam();
const HloDataflowAnalysis& analysis = RunAnalysis(ssa_form);
- EXPECT_EQ(analysis.values().size(), 4);
+ EXPECT_EQ(analysis.values().size(), 6);
EXPECT_TRUE(analysis.ValueIsDefinedAt(param));
EXPECT_TRUE(analysis.ValueIsDefinedAt(send, /*index=*/{}));
EXPECT_FALSE(analysis.ValueIsDefinedAt(send, /*index=*/{0}));
EXPECT_TRUE(analysis.ValueIsDefinedAt(send, /*index=*/{1}));
+ EXPECT_TRUE(analysis.ValueIsDefinedAt(send, /*index=*/{2}));
EXPECT_TRUE(analysis.ValueIsDefinedAt(send_done));
EXPECT_THAT(HloValuesAt(send, /*index=*/{0}),
UnorderedElementsAre(analysis.GetValueDefinedAt(param)));
}
TEST_P(HloDataflowAnalysisTest, RecvAndRecvDone) {
- // Test that a RecvDone forwards its operand tuple element at {0} to the
- // output.
+ // Test that a RecvDone forwards its operand tuple element at {0} to element
+ // {0} of the output.
auto builder = HloComputation::Builder(TestName());
+ auto token = builder.AddInstruction(HloInstruction::CreateToken());
auto recv = builder.AddInstruction(
- HloInstruction::CreateRecv(scalar_shape_, /*channel_id=*/0));
+ HloInstruction::CreateRecv(scalar_shape_, token, /*channel_id=*/0));
auto recv_done = builder.AddInstruction(HloInstruction::CreateRecvDone(recv));
module_->AddEntryComputation(builder.Build());
bool ssa_form = GetParam();
const HloDataflowAnalysis& analysis = RunAnalysis(ssa_form);
- EXPECT_EQ(analysis.values().size(), 3);
+ EXPECT_EQ(analysis.values().size(), 7);
EXPECT_TRUE(analysis.ValueIsDefinedAt(recv, /*index=*/{}));
EXPECT_TRUE(analysis.ValueIsDefinedAt(recv, /*index=*/{0}));
EXPECT_TRUE(analysis.ValueIsDefinedAt(recv, /*index=*/{1}));
- EXPECT_FALSE(analysis.ValueIsDefinedAt(recv_done));
- EXPECT_THAT(HloValuesAt(recv_done),
+ EXPECT_TRUE(analysis.ValueIsDefinedAt(recv, /*index=*/{2}));
+ EXPECT_TRUE(analysis.ValueIsDefinedAt(recv_done, /*index=*/{}));
+ EXPECT_FALSE(analysis.ValueIsDefinedAt(recv_done, /*index=*/{0}));
+ EXPECT_TRUE(analysis.ValueIsDefinedAt(recv_done, /*index=*/{1}));
+ EXPECT_THAT(HloValuesAt(recv_done, /*index=*/{0}),
UnorderedElementsAre(analysis.GetValueDefinedAt(recv, {0})));
EXPECT_TRUE(
analysis.GetValueDefinedAt(recv, /*index=*/{0}).live_out_of_module());
@@ -1304,13 +1309,13 @@ TEST_P(HloDataflowAnalysisTest, WhileParameters_Sequential) {
auto body_param = body_builder.AddInstruction(
HloInstruction::CreateParameter(0, scalar_shape_, "body_param"));
auto constant = body_builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(1.0)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
auto exp = body_builder.AddInstruction(
HloInstruction::CreateUnary(scalar_shape_, HloOpcode::kExp, constant));
auto add = body_builder.AddInstruction(HloInstruction::CreateBinary(
scalar_shape_, HloOpcode::kAdd, exp, body_param));
auto dead_constant = body_builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(1.0)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
auto dead_negate = body_builder.AddInstruction(HloInstruction::CreateUnary(
scalar_shape_, HloOpcode::kNegate, dead_constant));
HloComputation* body = module_->AddEmbeddedComputation(
@@ -1320,7 +1325,7 @@ TEST_P(HloDataflowAnalysisTest, WhileParameters_Sequential) {
auto cond_param = cond_builder.AddInstruction(
HloInstruction::CreateParameter(0, scalar_shape_, "cond_param"));
auto cond_constant = cond_builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<bool>(false)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(false)));
HloComputation* condition =
module_->AddEmbeddedComputation(cond_builder.Build());
@@ -1571,11 +1576,11 @@ TEST_P(HloDataflowAnalysisTest, ConditionalWithIdentity) {
auto builder = HloComputation::Builder(TestName());
auto pred = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<bool>(true)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(true)));
auto constant1 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(56.0f)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(56.0f)));
auto constant2 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(12.0f)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(12.0f)));
auto conditional = builder.AddInstruction(HloInstruction::CreateConditional(
scalar_shape_, pred, constant1, true_computation, constant2,
false_computation));
@@ -1662,11 +1667,11 @@ TEST_P(HloDataflowAnalysisTest, ConditionalTakingTupleOperand) {
auto builder = HloComputation::Builder(TestName());
auto pred = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<bool>(true)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(true)));
auto constant1 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(56.0f)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(56.0f)));
auto constant2 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(12.0f)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(12.0f)));
auto tuple_operand = builder.AddInstruction(
HloInstruction::CreateTuple({constant1, constant2}));
auto conditional = builder.AddInstruction(HloInstruction::CreateConditional(
@@ -1792,15 +1797,15 @@ TEST_P(HloDataflowAnalysisTest, NestedConditionals) {
// Build entry computation.
auto builder = HloComputation::Builder(TestName());
auto pred1 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<bool>(true)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(true)));
auto pred2 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<bool>(false)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(false)));
auto constant1 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(1.1f)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.1f)));
auto constant2 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(2.2f)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.2f)));
auto constant3 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(3.3f)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(3.3f)));
auto tuple_operand = builder.AddInstruction(
HloInstruction::CreateTuple({pred2, constant1, constant2}));
auto conditional = builder.AddInstruction(HloInstruction::CreateConditional(
@@ -1938,9 +1943,9 @@ TEST_F(DoesNotUseOperandBufferTest, FusedDynamicUpdateSlice) {
// Create a DynamicUpdateSlice instruction of tuple element 1.
auto starts = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR1<int32>({2})));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR1<int32>({2})));
auto update = builder.AddInstruction(HloInstruction::CreateConstant(
- Literal::CreateR1<float>({2.f, 2.f, 2.f})));
+ LiteralUtil::CreateR1<float>({2.f, 2.f, 2.f})));
auto dynamic_update_slice =
builder.AddInstruction(HloInstruction::CreateDynamicUpdateSlice(
data_shape, gte1, update, starts));
@@ -2043,7 +2048,7 @@ TEST_F(CanShareOperandBufferWithUserTest,
Shape data_shape = ShapeUtil::MakeShape(F32, {2, 2});
auto one = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(1.0)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
auto operand = builder.AddInstruction(
HloInstruction::CreateBroadcast(data_shape, one, {1}));
@@ -2071,7 +2076,7 @@ TEST_F(CanShareOperandBufferWithUserTest,
auto param = builder.AddInstruction(
HloInstruction::CreateParameter(0, data_shape, "param0"));
auto index = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR1<int64>({0, 0})));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR1<int64>({0, 0})));
auto ds = builder.AddInstruction(
HloInstruction::CreateDynamicSlice(slice_shape, param, index, {1, 2, 2}));
@@ -2139,9 +2144,9 @@ TEST_F(CanShareOperandBufferWithUserTest, FusedDynamicUpdateSlice) {
// Create a DynamicUpdateSlice instruction of tuple element 1.
auto starts = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR1<int32>({2})));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR1<int32>({2})));
auto update = builder.AddInstruction(HloInstruction::CreateConstant(
- Literal::CreateR1<float>({2.f, 2.f, 2.f})));
+ LiteralUtil::CreateR1<float>({2.f, 2.f, 2.f})));
auto dynamic_update_slice =
builder.AddInstruction(HloInstruction::CreateDynamicUpdateSlice(
data_shape, gte1, update, starts));
@@ -2179,9 +2184,9 @@ TEST_F(CanShareOperandBufferWithUserTest,
// Create a DynamicUpdateSlice instruction of tuple element 1.
auto starts = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR1<int32>({2})));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR1<int32>({2})));
auto update = builder.AddInstruction(HloInstruction::CreateConstant(
- Literal::CreateR1<float>({2.f, 2.f, 2.f})));
+ LiteralUtil::CreateR1<float>({2.f, 2.f, 2.f})));
auto dynamic_update_slice =
builder.AddInstruction(HloInstruction::CreateDynamicUpdateSlice(
data_shape_bf16, convert1, update, starts));
@@ -2232,9 +2237,9 @@ TEST_F(CanShareOperandBufferWithUserTest, FusedDotAdd) {
Shape data_shape = ShapeUtil::MakeShape(F32, {2, 2});
auto a = builder.AddInstruction(HloInstruction::CreateConstant(
- Literal::CreateR2<float>({{1.0, 0.0}, {0.0, 1.0}})));
+ LiteralUtil::CreateR2<float>({{1.0, 0.0}, {0.0, 1.0}})));
auto b = builder.AddInstruction(HloInstruction::CreateConstant(
- Literal::CreateR2<float>({{2.0, 2.0}, {2.0, 2.0}})));
+ LiteralUtil::CreateR2<float>({{2.0, 2.0}, {2.0, 2.0}})));
DotDimensionNumbers dot_dnums;
dot_dnums.add_lhs_contracting_dimensions(1);
@@ -2243,7 +2248,7 @@ TEST_F(CanShareOperandBufferWithUserTest, FusedDotAdd) {
HloInstruction::CreateDot(data_shape, a, b, dot_dnums));
auto one = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(1.0)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
auto add_operand = builder.AddInstruction(
HloInstruction::CreateBroadcast(data_shape, one, {1}));
@@ -2265,7 +2270,7 @@ TEST_F(CanShareOperandBufferWithUserTest, OutputFusionCantAliasOperandBuffer) {
Shape data_shape = ShapeUtil::MakeShape(F32, {2, 2});
auto one = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(1.0)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
auto operand = builder.AddInstruction(
HloInstruction::CreateBroadcast(data_shape, one, {1}));
@@ -2273,7 +2278,7 @@ TEST_F(CanShareOperandBufferWithUserTest, OutputFusionCantAliasOperandBuffer) {
HloInstruction::CreateReverse(data_shape, operand, {0, 1}));
auto two = builder.AddInstruction(HloInstruction::CreateConstant(
- Literal::CreateR2<float>({{2.0, 2.0}, {2.0, 2.0}})));
+ LiteralUtil::CreateR2<float>({{2.0, 2.0}, {2.0, 2.0}})));
auto add = builder.AddInstruction(
HloInstruction::CreateBinary(data_shape, HloOpcode::kAdd, reverse, two));
@@ -2293,13 +2298,13 @@ TEST_F(CanShareOperandBufferWithUserTest, FusionCanShareBufferCustomized) {
Shape data_shape = ShapeUtil::MakeShape(F32, {2, 2});
auto one = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(1.0)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
auto operand = builder.AddInstruction(
HloInstruction::CreateBroadcast(data_shape, one, {1}));
auto mul = builder.AddInstruction(HloInstruction::CreateBinary(
data_shape, HloOpcode::kMultiply, operand, operand));
auto two = builder.AddInstruction(HloInstruction::CreateConstant(
- Literal::CreateR2<float>({{2.0, 2.0}, {2.0, 2.0}})));
+ LiteralUtil::CreateR2<float>({{2.0, 2.0}, {2.0, 2.0}})));
auto add = builder.AddInstruction(
HloInstruction::CreateBinary(data_shape, HloOpcode::kAdd, mul, two));
@@ -2365,7 +2370,7 @@ TEST_F(CanShareOperandBufferWithUserTest, CallToComputationWithFusionRoot) {
auto sub_param = sub_builder.AddInstruction(
HloInstruction::CreateParameter(0, shape, "sub_param"));
auto one = sub_builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(1.0)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
auto ones = sub_builder.AddInstruction(
HloInstruction::CreateBroadcast(shape, one, {1}));
auto add = sub_builder.AddInstruction(
diff --git a/tensorflow/compiler/xla/service/hlo_dce_test.cc b/tensorflow/compiler/xla/service/hlo_dce_test.cc
index 5a56607a66..26e3736e01 100644
--- a/tensorflow/compiler/xla/service/hlo_dce_test.cc
+++ b/tensorflow/compiler/xla/service/hlo_dce_test.cc
@@ -53,9 +53,9 @@ TEST_F(HloDceTest, NoDeadCode) {
// Verify that no dead code is removed from a computation with no dead code.
auto builder = HloComputation::Builder(TestName());
auto constant1 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(42.0f)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0f)));
auto constant2 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(123.0f)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(123.0f)));
builder.AddInstruction(HloInstruction::CreateBinary(
constant1->shape(), HloOpcode::kAdd, constant1, constant2));
@@ -74,20 +74,21 @@ TEST_F(HloDceTest, InstructionsWithSideEffect) {
// Verify that side-effect instructions (Send in this test) are not removed.
auto builder = HloComputation::Builder(TestName());
auto constant = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(42.0f)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0f)));
+ auto token = builder.AddInstruction(HloInstruction::CreateToken());
builder.AddInstruction(
- HloInstruction::CreateSend(constant, /*channel_id=*/0));
+ HloInstruction::CreateSend(constant, token, /*channel_id=*/0));
builder.AddInstruction(HloInstruction::CreateTuple({}));
auto module = CreateNewModule();
auto computation = module->AddEntryComputation(builder.Build());
- EXPECT_EQ(3, computation->instruction_count());
+ EXPECT_EQ(4, computation->instruction_count());
HloDCE dce;
EXPECT_FALSE(dce.Run(module.get()).ValueOrDie());
- EXPECT_EQ(3, computation->instruction_count());
+ EXPECT_EQ(4, computation->instruction_count());
}
TEST_F(HloDceTest, DeadParameters) {
@@ -126,9 +127,9 @@ TEST_F(HloDceTest, ControlDependencies) {
// Verify that instructions with control dependencies are not removed.
auto builder = HloComputation::Builder(TestName());
auto constant1 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(42.0f)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0f)));
auto constant2 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(123.0f)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(123.0f)));
// Create two dead instructions: a negate and an add.
auto dead_negate = builder.AddInstruction(HloInstruction::CreateUnary(
@@ -223,7 +224,7 @@ TEST_F(HloDceTest, CalledComputationWithSideEffect) {
auto param = cond_builder.AddInstruction(
HloInstruction::CreateParameter(0, shape, "cond_param"));
auto constant = cond_builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(42.0f)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0f)));
cond_builder.AddInstruction(HloInstruction::CreateBinary(
ShapeUtil::MakeShape(PRED, {}), HloOpcode::kLt, param, constant));
}
@@ -234,9 +235,9 @@ TEST_F(HloDceTest, CalledComputationWithSideEffect) {
{
auto param = body_builder.AddInstruction(
HloInstruction::CreateParameter(0, shape, "param"));
-
- auto infeed =
- body_builder.AddInstruction(HloInstruction::CreateInfeed(shape, ""));
+ auto token = body_builder.AddInstruction(HloInstruction::CreateToken());
+ auto infeed = body_builder.AddInstruction(
+ HloInstruction::CreateInfeed(shape, token, ""));
body_builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, param, infeed));
}
@@ -278,8 +279,10 @@ TEST_F(HloDceTest, CalledComputationWithNestedSideEffect) {
{
auto param = nested_callee_builder.AddInstruction(
HloInstruction::CreateParameter(0, shape, "param"));
+ auto token =
+ nested_callee_builder.AddInstruction(HloInstruction::CreateToken());
nested_callee_builder.AddInstruction(
- HloInstruction::CreateOutfeed(shape, param, ""));
+ HloInstruction::CreateOutfeed(shape, param, token, ""));
}
auto nested_called_computation =
module->AddEmbeddedComputation(nested_callee_builder.Build());
@@ -342,12 +345,12 @@ TEST_F(HloDceTest, RemoveDeadSubcomputation) {
builder.AddInstruction(HloInstruction::CreateParameter(
/*parameter_number=*/0, ShapeUtil::MakeShape(F32, {100}), "param0")),
builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(0))),
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(0))),
/*dimensions_to_reduce=*/{0}, reduce_subcomp));
// Add another instruction as the root of the computation.
builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(0)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(0)));
module->AddEntryComputation(builder.Build());
EXPECT_EQ(module->MakeComputationPostOrder().size(), 2);
@@ -383,7 +386,7 @@ TEST_F(HloDceTest, KeepUsedSubcomputation) {
builder.AddInstruction(HloInstruction::CreateParameter(
/*parameter_number=*/0, ShapeUtil::MakeShape(F32, {100}), "param0")),
builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(0))),
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(0))),
/*dimensions_to_reduce=*/{0}, reduce_subcomp));
// Add another instruction as the root of the computation that also uses
@@ -393,7 +396,7 @@ TEST_F(HloDceTest, KeepUsedSubcomputation) {
builder.AddInstruction(HloInstruction::CreateParameter(
/*parameter_number=*/1, ShapeUtil::MakeShape(F32, {100}), "param1")),
builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(0))),
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(0))),
/*dimensions_to_reduce=*/{0}, reduce_subcomp));
module->AddEntryComputation(builder.Build());
diff --git a/tensorflow/compiler/xla/service/hlo_domain_map.cc b/tensorflow/compiler/xla/service/hlo_domain_map.cc
index ebd5adb5d5..9e096320db 100644
--- a/tensorflow/compiler/xla/service/hlo_domain_map.cc
+++ b/tensorflow/compiler/xla/service/hlo_domain_map.cc
@@ -41,11 +41,15 @@ namespace xla {
bool HloDomainMap::InSameDomain(HloInstruction* instruction1,
HloInstruction* instruction2) const {
- int64 domain_id1 = FindOrDefault(instruction_to_domain_, instruction1, -1);
- int64 domain_id2 = FindOrDefault(instruction_to_domain_, instruction2, -1);
+ int64 domain_id1 = GetDomainId(instruction1);
+ int64 domain_id2 = GetDomainId(instruction2);
return domain_id1 >= 0 && domain_id1 == domain_id2;
}
+int64 HloDomainMap::GetDomainId(HloInstruction* instruction) const {
+ return FindOrDefault(instruction_to_domain_, instruction, -1);
+}
+
Status HloDomainMap::TryProcessEmptyDomain(HloInstruction* instruction) {
TF_RET_CHECK(instruction->opcode() == HloOpcode::kDomain);
// We only check operands, so we are sure to not process the empty domain from
@@ -58,6 +62,11 @@ Status HloDomainMap::TryProcessEmptyDomain(HloInstruction* instruction) {
TF_RETURN_IF_ERROR(InsertDomain(std::move(domain)));
}
}
+ if (instruction == instruction->parent()->root_instruction()) {
+ auto domain = MakeUnique<DomainMetadata::Domain>();
+ domain->enter_domains.insert(instruction);
+ TF_RETURN_IF_ERROR(InsertDomain(std::move(domain)));
+ }
return Status::OK();
}
diff --git a/tensorflow/compiler/xla/service/hlo_domain_map.h b/tensorflow/compiler/xla/service/hlo_domain_map.h
index e62ef763fb..1ca7159725 100644
--- a/tensorflow/compiler/xla/service/hlo_domain_map.h
+++ b/tensorflow/compiler/xla/service/hlo_domain_map.h
@@ -65,6 +65,10 @@ class HloDomainMap {
// currently processing.
bool IsDomainInstruction(HloInstruction* instruction) const;
+ // Retrieves the domain identifier of the instruction, or -1 in case
+ // instruction is not found within any domain.
+ int64 GetDomainId(HloInstruction* instruction) const;
+
private:
HloDomainMap(string domain_kind) : domain_kind_(std::move(domain_kind)) {}
diff --git a/tensorflow/compiler/xla/service/hlo_domain_metadata.h b/tensorflow/compiler/xla/service/hlo_domain_metadata.h
index aa0308100a..f855f2a1fc 100644
--- a/tensorflow/compiler/xla/service/hlo_domain_metadata.h
+++ b/tensorflow/compiler/xla/service/hlo_domain_metadata.h
@@ -71,12 +71,6 @@ class DomainMetadata {
// Returns a string representation of the metadata.
virtual string ToString() const = 0;
-
- // Given a reachable set (the set of instructions which are reachable from
- // each other via user/operand pathways, without crossing a kDomain
- // instruciton), makes sure that all of them have metadata attributes which
- // are coherent with this metadata object.
- virtual Status NormalizeInstructions(const Domain& domain) const = 0;
};
} // namespace xla
diff --git a/tensorflow/compiler/xla/service/hlo_domain_remover.cc b/tensorflow/compiler/xla/service/hlo_domain_remover.cc
index 1d06040b0e..67fad0769f 100644
--- a/tensorflow/compiler/xla/service/hlo_domain_remover.cc
+++ b/tensorflow/compiler/xla/service/hlo_domain_remover.cc
@@ -16,8 +16,8 @@ limitations under the License.
#include "tensorflow/compiler/xla/service/hlo_domain_remover.h"
#include "tensorflow/compiler/xla/service/hlo_computation.h"
-#include "tensorflow/compiler/xla/service/hlo_domain_isolator.h"
#include "tensorflow/compiler/xla/service/hlo_domain_map.h"
+#include "tensorflow/compiler/xla/service/hlo_domain_verifier.h"
#include "tensorflow/compiler/xla/service/hlo_graph_dumper.h"
#include "tensorflow/compiler/xla/service/hlo_instruction.h"
#include "tensorflow/compiler/xla/service/hlo_opcode.h"
@@ -43,54 +43,16 @@ class HloDomainRemover::RunContext {
Status HloDomainRemover::RunContext::VerifyAndNormalizeDomain(
const DomainMetadata::Domain& domain) {
- // Verify that the whole kDomain frontier bounding the instruction reach set,
- // has matching metadata.
- // A kDomain instruction has two sides of metadata, a user facing and an
- // operand facing.
- // A reachable instruction set can make contact with a kDomain instruction on
- // a user facing side (the kDomain is operand of the instruction), or on a
- // operand facing side (the kDomain is user of the instruction).
- // And depending on the contact side, the proper metadata object
- // (user_side_metadata() vs. operand_side_metadata()) needs to be used for
- // consistency checks.
- const DomainMetadata* ref_metadata = nullptr;
- VLOG(4) << "Reach set:";
- for (HloInstruction* instruction : domain.instructions) {
- VLOG(4) << " " << instruction->name();
- }
- VLOG(4) << " Domains:";
- for (HloInstruction* instruction : domain.enter_domains) {
- const DomainMetadata& meta = instruction->user_side_metadata();
- VLOG(4) << " User side: " << instruction->name();
- VLOG(4) << " " << meta.ToString();
- if (ref_metadata == nullptr) {
- ref_metadata = &meta;
- } else {
- TF_RET_CHECK(meta.Matches(*ref_metadata))
- << "Metadata mismatch at instruction " << instruction->name() << " : "
- << meta.ToString() << " vs " << ref_metadata->ToString();
- }
- }
- for (HloInstruction* instruction : domain.exit_domains) {
- const DomainMetadata& meta = instruction->operand_side_metadata();
- VLOG(4) << " Operand side: " << instruction->name();
- VLOG(4) << " " << meta.ToString();
- if (ref_metadata == nullptr) {
- ref_metadata = &meta;
- } else {
- TF_RET_CHECK(meta.Matches(*ref_metadata))
- << "Metadata mismatch at instruction " << instruction->name() << " : "
- << meta.ToString() << " vs " << ref_metadata->ToString();
- }
- }
+ TF_ASSIGN_OR_RETURN(const DomainMetadata* ref_metadata,
+ HloDomainVerifier::VerifyDomain(domain));
if (ref_metadata != nullptr) {
VLOG(4) << "Applying domain normalization: " << ref_metadata->ToString();
- TF_RETURN_IF_ERROR(ref_metadata->NormalizeInstructions(domain));
+ TF_RETURN_IF_ERROR(remover_->normalizer_(domain, ref_metadata));
} else {
// No kDomain instruction was present within this domain, so call the
// generic normalization functions and have them apply their heuristic.
VLOG(2) << "Applying domain-less normalization";
- TF_RETURN_IF_ERROR(remover_->normalizer_(domain));
+ TF_RETURN_IF_ERROR(remover_->normalizer_(domain, nullptr));
}
return Status::OK();
}
diff --git a/tensorflow/compiler/xla/service/hlo_domain_remover.h b/tensorflow/compiler/xla/service/hlo_domain_remover.h
index 0c71dd34fd..c859e05f02 100644
--- a/tensorflow/compiler/xla/service/hlo_domain_remover.h
+++ b/tensorflow/compiler/xla/service/hlo_domain_remover.h
@@ -35,9 +35,10 @@ class HloDomainRemover : public HloPassInterface {
// instructions in it with the same attributes (ie, sharding), a normalizer
// function is tasked at applying attribute normalization on the instructions
// within such domain.
- HloDomainRemover(
- tensorflow::StringPiece kind,
- std::function<Status(const DomainMetadata::Domain&)> normalizer)
+ HloDomainRemover(tensorflow::StringPiece kind,
+ std::function<Status(const DomainMetadata::Domain&,
+ const DomainMetadata* metadata)>
+ normalizer)
: kind_(kind.ToString()), normalizer_(std::move(normalizer)) {}
tensorflow::StringPiece name() const override { return "domain_remover"; }
@@ -48,7 +49,9 @@ class HloDomainRemover : public HloPassInterface {
class RunContext;
string kind_;
- std::function<Status(const DomainMetadata::Domain&)> normalizer_;
+ std::function<Status(const DomainMetadata::Domain&,
+ const DomainMetadata* metadata)>
+ normalizer_;
};
} // namespace xla
diff --git a/tensorflow/compiler/xla/service/hlo_domain_test.cc b/tensorflow/compiler/xla/service/hlo_domain_test.cc
index 5d8081c1ef..ffc18a0f88 100644
--- a/tensorflow/compiler/xla/service/hlo_domain_test.cc
+++ b/tensorflow/compiler/xla/service/hlo_domain_test.cc
@@ -97,12 +97,6 @@ class OpNameMetadata : public DomainMetadata {
string ToString() const override { return opname_; }
- Status NormalizeInstructions(
- const DomainMetadata::Domain& domain) const override {
- // For the purposes of this test, nothing to do.
- return Status::OK();
- }
-
static tensorflow::StringPiece KindName() { return "opname"; }
private:
@@ -124,7 +118,8 @@ std::unique_ptr<HloInstruction> OpNameDomainCreator(HloInstruction* instruction,
std::move(user_side_metadata));
}
-Status OpNameDomainNormalizer(const DomainMetadata::Domain& domain) {
+Status OpNameDomainNormalizer(const DomainMetadata::Domain& domain,
+ const DomainMetadata* metadata) {
// Nothing to do for the particular use this test make of the OpName domains.
return Status::OK();
}
@@ -159,7 +154,7 @@ ENTRY entry {
EXPECT_FALSE(HasDomainEdge(module, "e", "d"));
HloDomainRemover remover(ShardingMetadata::KindName(),
- NormalizeShardingDomain);
+ ShardingMetadata::NormalizeShardingDomain);
TF_ASSERT_OK_AND_ASSIGN(bool remover_changed, remover.Run(module));
EXPECT_TRUE(remover_changed);
@@ -201,12 +196,14 @@ HloModule Module
ENTRY entry {
p0 = (f32[4]) parameter(0)
a = f32[4] get-tuple-element(p0), index=0
- b = (f32[4], u32[]) send(a), channel_id=1, sharding={maximal device=0}
- c = () send-done(b), channel_id=1, sharding={maximal device=0}
- d = (f32[4], u32[]) recv(), channel_id=2, sharding={maximal device=0}
- e = f32[4] recv-done(d), channel_id=2, sharding={maximal device=0}
- f = f32[4] add(a, e)
- g = f32[4] subtract(a, e)
+ token = token[] after-all()
+ b = (f32[4], u32[], token[]) send(a, token), channel_id=1, sharding={maximal device=0}
+ c = token[] send-done(b), channel_id=1, sharding={maximal device=0}
+ d = (f32[4], u32[], token[]) recv(token), channel_id=2, sharding={maximal device=0}
+ e = (f32[4], token[]) recv-done(d), channel_id=2, sharding={maximal device=0}
+ e_element = f32[4] get-tuple-element(e), index=0, sharding={maximal device=0}
+ f = f32[4] add(a, e_element)
+ g = f32[4] subtract(a, e_element)
ROOT h = (f32[4], f32[4]) tuple(f, g)
}
)";
@@ -219,18 +216,18 @@ ENTRY entry {
EXPECT_TRUE(isolator_changed);
EXPECT_TRUE(HasDomainEdge(module, "b", "a"));
- EXPECT_TRUE(HasDomainEdge(module, "f", "e"));
+ EXPECT_TRUE(HasDomainEdge(module, "f", "e_element"));
EXPECT_FALSE(HasDomainEdge(module, "a", "p0"));
EXPECT_FALSE(HasDomainEdge(module, "c", "b"));
EXPECT_FALSE(HasDomainEdge(module, "e", "d"));
HloDomainRemover remover(ShardingMetadata::KindName(),
- NormalizeShardingDomain);
+ ShardingMetadata::NormalizeShardingDomain);
TF_ASSERT_OK_AND_ASSIGN(bool remover_changed, remover.Run(module));
EXPECT_TRUE(remover_changed);
EXPECT_FALSE(HasDomainEdge(module, "b", "a"));
- EXPECT_FALSE(HasDomainEdge(module, "f", "e"));
+ EXPECT_FALSE(HasDomainEdge(module, "f", "e_element"));
}
TEST_F(HloDomainTest, CheckNoDomainAddedOnPureIOComputation) {
@@ -238,11 +235,13 @@ TEST_F(HloDomainTest, CheckNoDomainAddedOnPureIOComputation) {
HloModule Module
ENTRY entry {
- a = (f32[4], u32[]) recv(), channel_id=1, sharding={maximal device=-1}
- b = f32[4] recv-done(a), channel_id=1, sharding={maximal device=-1}
- c = f32[4] add(b, b), sharding={maximal device=-1}
- d = (f32[4], u32[]) send(c), channel_id=2, sharding={maximal device=-1}
- ROOT e = () send-done(d), channel_id=2, sharding={maximal device=-1}
+ token = token[] after-all(), sharding={maximal device=-1}
+ a = (f32[4], u32[], token[]) recv(token), channel_id=1, sharding={maximal device=-1}
+ b = (f32[4], token[]) recv-done(a), channel_id=1, sharding={maximal device=-1}
+ b_element = f32[4] get-tuple-element(b), index=0, sharding={maximal device=-1}
+ c = f32[4] add(b_element, b_element), sharding={maximal device=-1}
+ d = (f32[4], u32[], token[]) send(c, token), channel_id=2, sharding={maximal device=-1}
+ ROOT e = token[] send-done(d), channel_id=2, sharding={maximal device=-1}
}
)";
@@ -259,11 +258,13 @@ TEST_F(HloDomainTest, CheckNormalizationOnPureIOComputation) {
HloModule Module
ENTRY entry {
- a = (f32[4], u32[]) recv(), channel_id=1, sharding={maximal device=0}
- b = f32[4] recv-done(a), channel_id=1, sharding={maximal device=0}
- c = f32[4] add(b, b)
- d = (f32[4], u32[]) send(c), channel_id=2, sharding={maximal device=0}
- ROOT e = () send-done(d), channel_id=2, sharding={maximal device=0}
+ token = token[] after-all(), sharding={maximal device=0}
+ a = (f32[4], u32[], token[]) recv(token), channel_id=1, sharding={maximal device=0}
+ b = (f32[4], token[]) recv-done(a), channel_id=1, sharding={maximal device=0}
+ b_element = f32[4] get-tuple-element(b), index=0, sharding={maximal device=0}
+ c = f32[4] add(b_element, b_element)
+ d = (f32[4], u32[], token[]) send(c, token), channel_id=2, sharding={maximal device=0}
+ ROOT e = token[] send-done(d), channel_id=2, sharding={maximal device=0}
}
)";
@@ -271,7 +272,7 @@ ENTRY entry {
LOG(INFO) << "Original module:\n" << module->ToString();
HloDomainRemover remover(ShardingMetadata::KindName(),
- NormalizeShardingDomain);
+ ShardingMetadata::NormalizeShardingDomain);
TF_ASSERT_OK_AND_ASSIGN(bool remover_changed, remover.Run(module));
EXPECT_FALSE(remover_changed);
@@ -318,7 +319,7 @@ ENTRY entry {
EXPECT_FALSE(HasDomainEdge(module, "e", "d"));
HloDomainRemover sharding_remover(ShardingMetadata::KindName(),
- NormalizeShardingDomain);
+ ShardingMetadata::NormalizeShardingDomain);
TF_ASSERT_OK_AND_ASSIGN(bool sharding_remover_changed,
sharding_remover.Run(module));
EXPECT_TRUE(sharding_remover_changed);
@@ -340,10 +341,12 @@ TEST_F(HloDomainTest, CheckNormalizationOnInfeedTuple) {
HloModule Module
ENTRY entry {
- infeed = (f32[4], f32[4]) infeed(),
- sharding={{maximal device=1}, {maximal device=0}}
- gte0 = f32[4] get-tuple-element(infeed), index=0
- gte1 = f32[4] get-tuple-element(infeed), index=1
+ token = token[] after-all()
+ infeed = ((f32[4], f32[4]), token[]) infeed(token),
+ sharding={{maximal device=1}, {maximal device=0}, {maximal device=0}}
+ infeed.data = (f32[4], f32[4]) get-tuple-element(infeed), index=0
+ gte0 = f32[4] get-tuple-element(infeed.data), index=0
+ gte1 = f32[4] get-tuple-element(infeed.data), index=1
copy0 = f32[4] copy(gte0)
copy1 = f32[4] copy(gte1)
ROOT add = f32[4] add(copy0, copy1)
@@ -357,8 +360,7 @@ ENTRY entry {
TF_ASSERT_OK_AND_ASSIGN(bool isolator_changed, isolator.Run(module));
EXPECT_TRUE(isolator_changed);
- EXPECT_TRUE(HasDomainEdge(module, "gte0", "infeed"));
- EXPECT_TRUE(HasDomainEdge(module, "gte1", "infeed"));
+ EXPECT_TRUE(HasDomainEdge(module, "infeed.data", "infeed"));
EXPECT_FALSE(HasDomainEdge(module, "copy0", "gte0"));
EXPECT_FALSE(HasDomainEdge(module, "copy1", "gte1"));
@@ -366,6 +368,8 @@ ENTRY entry {
// HLO passes adding unexpected instructions.
//
// infeed
+ // |
+ // infeed.data (tuple element 0 of infeed)
// / \
// GTE0 GTE1
// / \
@@ -374,30 +378,35 @@ ENTRY entry {
// \ /
// TUPLE
// |
- // DOMAIN
HloInstruction* infeed = FindInstruction(module, "infeed");
ASSERT_NE(infeed, nullptr);
- auto infeed_users = infeed->users();
- HloInstruction* new_gte0 =
+ HloInstruction* infeed_data =
infeed->parent()->AddInstruction(HloInstruction::CreateGetTupleElement(
ShapeUtil::GetTupleElementShape(infeed->shape(), 0), infeed, 0));
+
+ auto infeed_data_users = infeed_data->users();
+ HloInstruction* new_gte0 = infeed_data->parent()->AddInstruction(
+ HloInstruction::CreateGetTupleElement(
+ ShapeUtil::GetTupleElementShape(infeed_data->shape(), 0), infeed_data,
+ 0));
HloInstruction* new_copy0 =
- infeed->parent()->AddInstruction(HloInstruction::CreateUnary(
+ infeed_data->parent()->AddInstruction(HloInstruction::CreateUnary(
new_gte0->shape(), HloOpcode::kCopy, new_gte0));
- HloInstruction* new_gte1 =
- infeed->parent()->AddInstruction(HloInstruction::CreateGetTupleElement(
- ShapeUtil::GetTupleElementShape(infeed->shape(), 1), infeed, 1));
+ HloInstruction* new_gte1 = infeed_data->parent()->AddInstruction(
+ HloInstruction::CreateGetTupleElement(
+ ShapeUtil::GetTupleElementShape(infeed_data->shape(), 1), infeed_data,
+ 1));
HloInstruction* new_copy1 =
- infeed->parent()->AddInstruction(HloInstruction::CreateUnary(
+ infeed_data->parent()->AddInstruction(HloInstruction::CreateUnary(
new_gte1->shape(), HloOpcode::kCopy, new_gte1));
- HloInstruction* new_tuple = infeed->parent()->AddInstruction(
+ HloInstruction* new_tuple = infeed_data->parent()->AddInstruction(
HloInstruction::CreateTuple({new_copy0, new_copy1}));
- for (HloInstruction* user : infeed_users) {
- TF_EXPECT_OK(infeed->ReplaceUseWith(user, new_tuple));
+ for (HloInstruction* user : infeed_data_users) {
+ TF_EXPECT_OK(infeed_data->ReplaceUseWith(user, new_tuple));
}
HloDomainRemover remover(ShardingMetadata::KindName(),
- NormalizeShardingDomain);
+ ShardingMetadata::NormalizeShardingDomain);
TF_ASSERT_OK_AND_ASSIGN(bool remover_changed, remover.Run(module));
EXPECT_TRUE(remover_changed);
@@ -412,7 +421,7 @@ ENTRY entry {
};
for (auto& assignment : assignments) {
auto device = assignment.instruction->sharding_unique_device();
- EXPECT_TRUE(device.has_value());
+ ASSERT_TRUE(device.has_value());
EXPECT_EQ(*device, assignment.device);
}
EXPECT_TRUE(new_tuple->has_sharding());
@@ -422,5 +431,64 @@ ENTRY entry {
HloSharding::AssignDevice(0)}));
}
+TEST_F(HloDomainTest, EmptyRootDomain) {
+ const char* const hlo_string = R"(
+HloModule Module
+
+ENTRY entry {
+ %param = f32[1] parameter(0), sharding={maximal device=0}
+ %tuple = (f32[1]) tuple(%param),
+ sharding={maximal device=1}
+ ROOT %gte = f32[1] get-tuple-element(%tuple), index=0,
+ sharding={maximal device=1}
+})";
+
+ TF_ASSERT_OK_AND_ASSIGN(HloModule * module, ParseModule(hlo_string));
+
+ HloDomainIsolator isolator(CreateShardingDomain);
+ TF_ASSERT_OK_AND_ASSIGN(bool isolator_changed, isolator.Run(module));
+ EXPECT_TRUE(isolator_changed);
+
+ EXPECT_TRUE(HasDomainEdge(module, "tuple", "param"));
+ EXPECT_FALSE(HasDomainEdge(module, "gte", "tuple"));
+
+ // Remove %tuple and %gte (tuple simplification)
+ HloInstruction* gte = FindInstruction(module, "gte");
+ HloInstruction* tuple = FindInstruction(module, "tuple");
+ module->entry_computation()->set_root_instruction(tuple->mutable_operand(0));
+ TF_EXPECT_OK(module->entry_computation()->RemoveInstruction(gte));
+ TF_EXPECT_OK(module->entry_computation()->RemoveInstruction(tuple));
+
+ HloDomainRemover remover(ShardingMetadata::KindName(),
+ ShardingMetadata::NormalizeShardingDomain);
+ TF_ASSERT_OK_AND_ASSIGN(bool remover_changed, remover.Run(module));
+ EXPECT_TRUE(remover_changed);
+
+ const HloInstruction* root = module->entry_computation()->root_instruction();
+ EXPECT_TRUE(root->has_sharding());
+ EXPECT_EQ(root->sharding(), HloSharding::AssignDevice(1));
+}
+
+// Tests that text dumps of domain instructions can be parsed back, in the
+// specific case of null shardings.
+TEST_F(HloDomainTest, DumpParseNullSharding) {
+ auto builder = HloComputation::Builder(TestName());
+ Shape shape = ShapeUtil::MakeShape(F32, {});
+ auto sharding_md_0 = MakeUnique<ShardingMetadata>(nullptr);
+ auto sharding_md_1 = MakeUnique<ShardingMetadata>(nullptr);
+ HloInstruction* param =
+ builder.AddInstruction(HloInstruction::CreateParameter(0, shape, "p"));
+ HloInstruction* domain = builder.AddInstruction(HloInstruction::CreateDomain(
+ shape, param, std::move(sharding_md_0), std::move(sharding_md_1)));
+ builder.AddInstruction(
+ HloInstruction::CreateBinary(shape, HloOpcode::kAdd, domain, domain));
+
+ auto module = CreateNewModule();
+ module->AddEntryComputation(builder.Build());
+
+ auto hlo_string = module->ToString();
+ ASSERT_TRUE(ParseModule(hlo_string).status().ok());
+}
+
} // namespace
} // namespace xla
diff --git a/tensorflow/compiler/xla/service/hlo_domain_verifier.cc b/tensorflow/compiler/xla/service/hlo_domain_verifier.cc
new file mode 100644
index 0000000000..751fc677e2
--- /dev/null
+++ b/tensorflow/compiler/xla/service/hlo_domain_verifier.cc
@@ -0,0 +1,124 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#include "tensorflow/compiler/xla/service/hlo_domain_verifier.h"
+
+#include <set>
+
+#include "tensorflow/compiler/xla/service/hlo_computation.h"
+#include "tensorflow/compiler/xla/service/hlo_domain_map.h"
+#include "tensorflow/compiler/xla/service/hlo_graph_dumper.h"
+#include "tensorflow/compiler/xla/service/hlo_instruction.h"
+#include "tensorflow/compiler/xla/service/hlo_opcode.h"
+#include "tensorflow/compiler/xla/types.h"
+
+namespace xla {
+
+class HloDomainVerifier::RunContext {
+ public:
+ RunContext(HloModule* module, HloDomainVerifier* verifier)
+ : module_(module), verifier_(verifier) {}
+
+ Status Run();
+
+ private:
+ // If the verifier caller passed an empty vector for kinds, we collect all the
+ // avalable domain types.
+ Status PopulateDomainKinds();
+
+ HloModule* module_;
+ HloDomainVerifier* verifier_;
+};
+
+Status HloDomainVerifier::RunContext::PopulateDomainKinds() {
+ if (verifier_->kinds_.empty()) {
+ // The caller specified no domain kinds, collect all the ones available.
+ std::set<string> kinds;
+ for (HloComputation* computation : module_->computations()) {
+ for (HloInstruction* instruction : computation->instructions()) {
+ if (instruction->opcode() == HloOpcode::kDomain) {
+ TF_RET_CHECK(instruction->user_side_metadata().Kind() ==
+ instruction->operand_side_metadata().Kind())
+ << instruction->ToString();
+ kinds.insert(instruction->user_side_metadata().Kind().ToString());
+ }
+ }
+ }
+ verifier_->kinds_.insert(verifier_->kinds_.end(), kinds.begin(),
+ kinds.end());
+ }
+ return Status::OK();
+}
+
+Status HloDomainVerifier::RunContext::Run() {
+ VLOG(4) << "Running HLO Domain Verifier";
+ TF_RETURN_IF_ERROR(PopulateDomainKinds());
+ for (HloComputation* computation : module_->computations()) {
+ for (auto& kind : verifier_->kinds_) {
+ // First create the domain instruciton sets. A domain instruction set is
+ // the set of instructions whose edges never cross a kDomain instruction.
+ TF_ASSIGN_OR_RETURN(std::unique_ptr<HloDomainMap> domain_map,
+ HloDomainMap::Create(computation, kind));
+ // Verify every domain populated within the map.
+ for (auto& domain : domain_map->GetDomains()) {
+ TF_RETURN_IF_ERROR(VerifyDomain(*domain).status());
+ }
+ }
+ }
+ return Status::OK();
+}
+
+StatusOr<bool> HloDomainVerifier::Run(HloModule* module) {
+ RunContext run_context(module, this);
+ TF_RETURN_IF_ERROR(run_context.Run());
+ return false;
+}
+
+StatusOr<const DomainMetadata*> HloDomainVerifier::VerifyDomain(
+ const DomainMetadata::Domain& domain) {
+ const DomainMetadata* ref_metadata = nullptr;
+ VLOG(4) << "Reach set:";
+ for (HloInstruction* instruction : domain.instructions) {
+ VLOG(4) << " " << instruction->name();
+ }
+ VLOG(4) << " Domains:";
+ for (HloInstruction* instruction : domain.enter_domains) {
+ const DomainMetadata& meta = instruction->user_side_metadata();
+ VLOG(4) << " User side: " << instruction->name();
+ VLOG(4) << " " << meta.ToString();
+ if (ref_metadata == nullptr) {
+ ref_metadata = &meta;
+ } else {
+ TF_RET_CHECK(meta.Matches(*ref_metadata))
+ << "Metadata mismatch at instruction " << instruction->name() << " : "
+ << meta.ToString() << " vs " << ref_metadata->ToString();
+ }
+ }
+ for (HloInstruction* instruction : domain.exit_domains) {
+ const DomainMetadata& meta = instruction->operand_side_metadata();
+ VLOG(4) << " Operand side: " << instruction->name();
+ VLOG(4) << " " << meta.ToString();
+ if (ref_metadata == nullptr) {
+ ref_metadata = &meta;
+ } else {
+ TF_RET_CHECK(meta.Matches(*ref_metadata))
+ << "Metadata mismatch at instruction " << instruction->name() << " : "
+ << meta.ToString() << " vs " << ref_metadata->ToString();
+ }
+ }
+ return ref_metadata;
+}
+
+} // namespace xla
diff --git a/tensorflow/compiler/xla/service/hlo_domain_verifier.h b/tensorflow/compiler/xla/service/hlo_domain_verifier.h
new file mode 100644
index 0000000000..8e53cf97f8
--- /dev/null
+++ b/tensorflow/compiler/xla/service/hlo_domain_verifier.h
@@ -0,0 +1,65 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#ifndef TENSORFLOW_COMPILER_XLA_SERVICE_HLO_DOMAIN_VERIFIER_H_
+#define TENSORFLOW_COMPILER_XLA_SERVICE_HLO_DOMAIN_VERIFIER_H_
+
+#include <string>
+#include <vector>
+
+#include "tensorflow/compiler/xla/service/hlo_domain_map.h"
+#include "tensorflow/compiler/xla/service/hlo_domain_metadata.h"
+#include "tensorflow/compiler/xla/service/hlo_module.h"
+#include "tensorflow/compiler/xla/service/hlo_pass_interface.h"
+#include "tensorflow/core/lib/core/status.h"
+
+namespace xla {
+
+// Verifies that the domain instructions are consistent, and the each domain is
+// surrounded by the same metadata.
+class HloDomainVerifier : public HloPassInterface {
+ public:
+ HloDomainVerifier(std::vector<string> kinds) : kinds_(std::move(kinds)) {}
+
+ tensorflow::StringPiece name() const override { return "domain_verifier"; }
+
+ StatusOr<bool> Run(HloModule* module) override;
+
+ // Verify that the whole kDomain frontier bounding the instruction reach set,
+ // has matching metadata.
+ // A kDomain instruction has two sides of metadata, a user facing and an
+ // operand facing.
+ // A reachable instruction set can make contact with a kDomain instruction on
+ // a user facing side (the kDomain is operand of the instruction), or on a
+ // operand facing side (the kDomain is user of the instruction).
+ // And depending on the contact side, the proper metadata object
+ // (user_side_metadata() vs. operand_side_metadata()) needs to be used for
+ // consistency checks.
+ // Returns the DomainMetadata pointer which surrounds the domain, and
+ // represents the common metadata within such domain. If the returned
+ // DomainMetadata pointer is nullptr, the input domain had no kDomain
+ // boundary.
+ static StatusOr<const DomainMetadata*> VerifyDomain(
+ const DomainMetadata::Domain& domain);
+
+ private:
+ class RunContext;
+
+ std::vector<string> kinds_;
+};
+
+} // namespace xla
+
+#endif // TENSORFLOW_COMPILER_XLA_SERVICE_HLO_DOMAIN_VERIFIER_H_
diff --git a/tensorflow/compiler/xla/service/hlo_element_type_converter.cc b/tensorflow/compiler/xla/service/hlo_element_type_converter.cc
index 4ed1508d70..c804f4364f 100644
--- a/tensorflow/compiler/xla/service/hlo_element_type_converter.cc
+++ b/tensorflow/compiler/xla/service/hlo_element_type_converter.cc
@@ -21,7 +21,7 @@ limitations under the License.
#include <vector>
#include "tensorflow/compiler/xla/layout_util.h"
-#include "tensorflow/compiler/xla/literal_util.h"
+#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/compiler/xla/service/dfs_hlo_visitor_with_default.h"
#include "tensorflow/compiler/xla/service/hlo_computation.h"
#include "tensorflow/compiler/xla/service/hlo_evaluator.h"
diff --git a/tensorflow/compiler/xla/service/hlo_element_type_converter_test.cc b/tensorflow/compiler/xla/service/hlo_element_type_converter_test.cc
index 5c5a059e0f..c170e36c73 100644
--- a/tensorflow/compiler/xla/service/hlo_element_type_converter_test.cc
+++ b/tensorflow/compiler/xla/service/hlo_element_type_converter_test.cc
@@ -57,8 +57,10 @@ TEST_F(HloElementTypeConverterTest, InfeedsOutfeedsNotConverted) {
const string& hlo_string = R"(
HloModule InfeedOutfeed
ENTRY RoundTrip16MiBR1.v2 {
- ROOT infeed = bf16[4]{0} infeed()
- outfeed = () outfeed(infeed)
+ token = token[] after-all()
+ infeed = (bf16[4]{0}, token[]) infeed(token)
+ ROOT infeed.data = bf16[4]{0} get-tuple-element(infeed), index=0
+ outfeed = token[] outfeed(infeed.data, token)
}
)";
auto module = CreateModuleFromHloString(hlo_string);
diff --git a/tensorflow/compiler/xla/service/hlo_evaluator.cc b/tensorflow/compiler/xla/service/hlo_evaluator.cc
index 33424019b9..dfdfeb49a2 100644
--- a/tensorflow/compiler/xla/service/hlo_evaluator.cc
+++ b/tensorflow/compiler/xla/service/hlo_evaluator.cc
@@ -25,6 +25,7 @@ limitations under the License.
#include "tensorflow/compiler/xla/index_util.h"
#include "tensorflow/compiler/xla/layout_util.h"
+#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/compiler/xla/literal_util.h"
#include "tensorflow/compiler/xla/map_util.h"
#include "tensorflow/compiler/xla/primitive_util.h"
@@ -135,7 +136,6 @@ StatusOr<std::unique_ptr<Literal>> Compare<complex64>(
} // namespace
-
HloEvaluator::HloEvaluator(int64 max_loop_iterations)
: max_loop_iterations_(max_loop_iterations) {
typed_visitors_[PRED] = MakeUnique<HloEvaluatorTypedVisitor<bool>>(this);
@@ -330,6 +330,24 @@ StatusOr<std::unique_ptr<Literal>> HloEvaluator::EvaluateElementwiseUnaryOp(
return result;
}
+StatusOr<std::unique_ptr<Literal>> HloEvaluator::EvaluateDotOp(
+ const DotDimensionNumbers& dim_numbers, const Literal& lhs,
+ const Literal& rhs) {
+ std::unique_ptr<HloInstruction> lhs_instr =
+ HloInstruction::CreateConstant(lhs.CloneToUnique());
+ std::unique_ptr<HloInstruction> rhs_instr =
+ HloInstruction::CreateConstant(rhs.CloneToUnique());
+
+ TF_ASSIGN_OR_RETURN(
+ Shape dot_shape,
+ ShapeInference::InferDotOpShape(lhs.shape(), rhs.shape(), dim_numbers));
+
+ std::unique_ptr<HloInstruction> cloned_instruction =
+ HloInstruction::CreateDot(dot_shape, lhs_instr.get(), rhs_instr.get(),
+ dim_numbers);
+ return Evaluate(cloned_instruction.get());
+}
+
Status HloEvaluator::HandleParameter(HloInstruction* parameter) {
CHECK_LT(parameter->parameter_number(), arg_literals_.size());
const Literal* input_literal = arg_literals_[parameter->parameter_number()];
@@ -382,7 +400,7 @@ Status HloEvaluator::HandleConcatenate(HloInstruction* concatenate) {
ShapeUtil::GetDimension(operand_shape, concat_dim);
}
- auto result_literal = Literal::CreateFromDimensions(
+ auto result_literal = LiteralUtil::CreateFromDimensions(
reference_shape.element_type(), concat_dimensions);
DimensionVector source_indices(rank, 0);
DimensionVector dest_indices(concat_dimensions.size(), 0);
@@ -533,7 +551,7 @@ Status HloEvaluator::HandleTuple(HloInstruction* tuple) {
operand_literals.push_back(&GetEvaluatedLiteralFor(operand));
}
- evaluated_[tuple] = Literal::MakeTuple(operand_literals);
+ evaluated_[tuple] = LiteralUtil::MakeTuple(operand_literals);
return Status::OK();
}
@@ -757,6 +775,12 @@ class OutputWindowIndexToInputIndex {
return ArraySlice<int64>(input_index_);
}
+ // Returns for a given 'input_dim' the corresponding output dimension index,
+ // or -1 if 'input_dim' is an elided window dimension.
+ int64 input_dim_value_to_output_index(int64 input_dim) {
+ return input_dim_value_to_output_index_[input_dim];
+ }
+
private:
// Propagates window dimensions from the output index to input_index_ by
// mutating input_index_ in place.
@@ -774,7 +798,7 @@ class OutputWindowIndexToInputIndex {
// input_dim_value_to_index_vector_[i] tells us how to compute dimension i of
// the input index from the output index. See
- // PropagateOutputIndexToInputIndex.
+ // PropagateOutputIndexWindowDimsToInputIndex.
std::vector<int64> input_dim_value_to_output_index_;
// The result computed by this functor. operator() returns an ArraySlice into
@@ -827,6 +851,8 @@ Status HloEvaluator::HandleGather(HloInstruction* gather) {
// corresponding index in the input shape.
std::vector<int64> input_index(operand.shape().dimensions_size());
std::vector<int64> output_index(gather->shape().dimensions_size());
+ std::vector<int64> input_gather_index_clamped(
+ operand.shape().dimensions_size());
OutputGatherIndexToInputIndex output_gather_index_to_input_index(
&gather->gather_dimension_numbers(), /*input_shape=*/operand.shape(),
@@ -848,14 +874,26 @@ Status HloEvaluator::HandleGather(HloInstruction* gather) {
output_index[i] = output_gather_index[i] + output_window_index[i];
DCHECK_LT(output_index[i], shape.dimensions(i));
}
+ for (int i = 0, e = input_gather_index.size(); i < e; i++) {
+ int64 output_dim =
+ output_window_index_to_input_index.input_dim_value_to_output_index(i);
+ // If 'output_dim' is -1, it means 'i' is an elided window dim. This means
+ // we set the iteration index to 0, so for the purpose of the following
+ // calculations we can consider the output dimension size to be 1.
+ int64 output_dim_size =
+ output_dim == -1 ? 1 : shape.dimensions(output_dim);
+ // Clamp the gather index so that the gather region fits in the operand.
+ // input_gather_index_clamped[i] = clamp(input_gather_index[i], 0,
+ // operand_shape.dimensions(i) -
+ // output_dim_size);
+ input_gather_index_clamped[i] =
+ std::min(operand_shape.dimensions(i) - output_dim_size,
+ std::max(0LL, input_gather_index[i]));
+ }
for (int i = 0, e = input_index.size(); i < e; i++) {
- // TODO(b/74360564): We should implement whatever out of bounds behavior
- // we decide for dynamic-slice here as well.
- input_index[i] = (input_gather_index[i] + input_window_index[i]) %
- operand_shape.dimensions(i);
- if (input_index[i] < 0) {
- input_index[i] += operand_shape.dimensions(i);
- }
+ input_index[i] = input_gather_index_clamped[i] + input_window_index[i];
+ DCHECK_GE(input_index[i], 0);
+ DCHECK_LT(input_index[i], operand_shape.dimensions(i));
}
TF_RETURN_IF_ERROR(
result->CopyElementFrom(operand, input_index, output_index));
@@ -902,8 +940,8 @@ Status HloEvaluator::HandleBroadcast(HloInstruction* broadcast) {
return Status::OK();
}
-Status HloEvaluator::HandleGenerateToken(HloInstruction* token) {
- evaluated_[token] = Literal::CreateToken();
+Status HloEvaluator::HandleAfterAll(HloInstruction* token) {
+ evaluated_[token] = LiteralUtil::CreateToken();
return Status::OK();
}
@@ -1024,8 +1062,6 @@ Status HloEvaluator::HandleSelect(HloInstruction* select) {
const auto& on_false = GetEvaluatedLiteralFor(select->operand(2));
// If predicate is of scalar type, no element-wise selection would be needed.
- // This would also handle output array of tuple types as the DefaultAction
- // would go through the HloEvaluatorTypedVisitor which doesn't handle tuples.
if (ShapeUtil::IsScalar(pred.shape())) {
if (pred.Get<bool>({})) {
evaluated_[select] = on_true.CloneToUnique();
@@ -1038,6 +1074,19 @@ Status HloEvaluator::HandleSelect(HloInstruction* select) {
return DefaultAction(select);
}
+Status HloEvaluator::HandleTupleSelect(HloInstruction* tuple_select) {
+ const auto& pred = GetEvaluatedLiteralFor(tuple_select->operand(0));
+ const auto& on_true = GetEvaluatedLiteralFor(tuple_select->operand(1));
+ const auto& on_false = GetEvaluatedLiteralFor(tuple_select->operand(2));
+
+ if (pred.Get<bool>({})) {
+ evaluated_[tuple_select] = on_true.CloneToUnique();
+ } else {
+ evaluated_[tuple_select] = on_false.CloneToUnique();
+ }
+ return Status::OK();
+}
+
Status HloEvaluator::HandleWhile(HloInstruction* while_hlo) {
HloComputation* cond_comp = while_hlo->while_condition();
HloComputation* body_comp = while_hlo->while_body();
@@ -1068,6 +1117,107 @@ Status HloEvaluator::HandleWhile(HloInstruction* while_hlo) {
return Status::OK();
}
+// Key-value sort is a special snowflake: it's templated on two different
+// element types, one for the keys, and one for the values. Jump through some
+// hoops to make this work.
+namespace {
+template <typename KeyType, typename ValueType>
+std::unique_ptr<Literal> EvaluateSortInternal(HloInstruction* sort,
+ const Literal& keys_literal,
+ const Literal& values_literal) {
+ CHECK_EQ(sort->operand_count(), 2);
+ // We need to sort and array of keys and an array of values, where the
+ // sorted order of the values is determined by the keys. The simplest(?)
+ // way to do this is to go to an array-of-pairs representation, sort the
+ // array using the keys, and then go back to pair-of-arrays.
+ VLOG(3) << "HandleSort keys_literal: " << keys_literal.ToString();
+ VLOG(3) << "HandleSort values_literal: " << values_literal.ToString();
+ const auto& keys_data = keys_literal.data<KeyType>();
+ const auto& values_data = values_literal.data<ValueType>();
+ using kv_pair = std::pair<KeyType, ValueType>;
+ std::vector<kv_pair> key_value_vector;
+ CHECK_EQ(keys_data.size(), values_data.size());
+ key_value_vector.reserve(keys_data.size());
+ for (int i = 0; i < keys_data.size(); ++i) {
+ key_value_vector.push_back(std::make_pair(keys_data[i], values_data[i]));
+ }
+ std::sort(key_value_vector.begin(), key_value_vector.end(),
+ [](const kv_pair& a, const kv_pair& b) {
+ return SafeLess<KeyType>(a.first, b.first);
+ });
+ std::vector<KeyType> result_keys;
+ std::vector<ValueType> result_values;
+ for (const auto& key_value : key_value_vector) {
+ result_keys.push_back(key_value.first);
+ result_values.push_back(key_value.second);
+ }
+ auto result_keys_literal = MakeUnique<Literal>(sort->operand(0)->shape());
+ result_keys_literal->PopulateR1(
+ tensorflow::gtl::ArraySlice<KeyType>(result_keys));
+ auto result_values_literal = MakeUnique<Literal>(sort->operand(1)->shape());
+ result_values_literal->PopulateR1(
+ tensorflow::gtl::ArraySlice<ValueType>(result_values));
+ auto result_tuple = LiteralUtil::MakeTuple(
+ {result_keys_literal.get(), result_values_literal.get()});
+ VLOG(3) << "HandleSort result_tuple: " << result_tuple->ToString();
+ return result_tuple;
+}
+
+template <typename KeyType>
+StatusOr<std::unique_ptr<Literal>> EvaluateSortCurried(
+ HloInstruction* sort, const Literal& keys_literal,
+ const Literal& values_literal) {
+ switch (sort->operand(1)->shape().element_type()) {
+ case F32:
+ return EvaluateSortInternal<KeyType, float>(sort, keys_literal,
+ values_literal);
+ case U32:
+ return EvaluateSortInternal<KeyType, uint32>(sort, keys_literal,
+ values_literal);
+ case S32:
+ return EvaluateSortInternal<KeyType, int32>(sort, keys_literal,
+ values_literal);
+ case BF16:
+ return EvaluateSortInternal<KeyType, bfloat16>(sort, keys_literal,
+ values_literal);
+ default:
+ return InvalidArgument("Unsupported type for Sort");
+ }
+}
+
+StatusOr<std::unique_ptr<Literal>> EvaluateSort(HloInstruction* sort,
+ const Literal& keys_literal,
+ const Literal& values_literal) {
+ switch (sort->operand(0)->shape().element_type()) {
+ case F32:
+ return EvaluateSortCurried<float>(sort, keys_literal, values_literal);
+ case U32:
+ return EvaluateSortCurried<uint32>(sort, keys_literal, values_literal);
+ case S32:
+ return EvaluateSortCurried<int32>(sort, keys_literal, values_literal);
+ case BF16:
+ return EvaluateSortCurried<bfloat16>(sort, keys_literal, values_literal);
+ default:
+ return InvalidArgument("Unsupported type for Sort");
+ }
+}
+} // namespace
+
+Status HloEvaluator::HandleSort(HloInstruction* sort) {
+ if (!ShapeUtil::IsTuple(sort->shape())) {
+ return DefaultAction(sort);
+ } else {
+ auto result = EvaluateSort(sort, GetEvaluatedLiteralFor(sort->operand(0)),
+ GetEvaluatedLiteralFor(sort->operand(1)));
+ if (result.ok()) {
+ evaluated_[sort] = std::move(result.ValueOrDie());
+ return Status::OK();
+ } else {
+ return result.status();
+ }
+ }
+}
+
Status HloEvaluator::Preprocess(HloInstruction* hlo) {
VLOG(2) << "About to visit HLO: " << hlo->ToString();
return Status::OK();
diff --git a/tensorflow/compiler/xla/service/hlo_evaluator.h b/tensorflow/compiler/xla/service/hlo_evaluator.h
index fc2fc9437b..a4c37ef328 100644
--- a/tensorflow/compiler/xla/service/hlo_evaluator.h
+++ b/tensorflow/compiler/xla/service/hlo_evaluator.h
@@ -23,6 +23,7 @@ limitations under the License.
#include "tensorflow/compiler/xla/service/hlo_computation.h"
#include "tensorflow/compiler/xla/service/hlo_instruction.h"
#include "tensorflow/compiler/xla/service/hlo_module.h"
+#include "tensorflow/compiler/xla/service/shape_inference.h"
#include "tensorflow/compiler/xla/statusor.h"
#include "tensorflow/compiler/xla/util.h"
#include "tensorflow/compiler/xla/xla_data.pb.h"
@@ -115,6 +116,10 @@ class HloEvaluator : public DfsHloVisitorWithDefault {
StatusOr<std::unique_ptr<Literal>> EvaluateElementwiseUnaryOp(
HloOpcode opcode, const Literal& operand);
+ StatusOr<std::unique_ptr<Literal>> EvaluateDotOp(
+ const DotDimensionNumbers& dim_numbers, const Literal& lhs,
+ const Literal& rhs);
+
protected:
// Make HloEvaluatorTypedVisitor a friend because it is logically part of this
// class.
@@ -172,9 +177,13 @@ class HloEvaluator : public DfsHloVisitorWithDefault {
Status HandleSelect(HloInstruction* select) override;
+ Status HandleTupleSelect(HloInstruction* tuple_select) override;
+
Status HandleBroadcast(HloInstruction* broadcast) override;
- Status HandleGenerateToken(HloInstruction* token) override;
+ Status HandleAfterAll(HloInstruction* token) override;
+
+ Status HandleSort(HloInstruction* sort) override;
// Returns the already-evaluated literal result for the instruction.
// A Constant instruction is considered evaluated and its literal will be
diff --git a/tensorflow/compiler/xla/service/hlo_evaluator_test.cc b/tensorflow/compiler/xla/service/hlo_evaluator_test.cc
index 42770d848a..5f575b24a1 100644
--- a/tensorflow/compiler/xla/service/hlo_evaluator_test.cc
+++ b/tensorflow/compiler/xla/service/hlo_evaluator_test.cc
@@ -22,7 +22,7 @@ limitations under the License.
#include <vector>
#include "tensorflow/compiler/xla/client/xla_client/xla_builder.h"
-#include "tensorflow/compiler/xla/literal_util.h"
+#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/compiler/xla/reference_util.h"
#include "tensorflow/compiler/xla/service/hlo_computation.h"
#include "tensorflow/compiler/xla/service/hlo_element_type_converter.h"
@@ -112,9 +112,9 @@ class HloEvaluatorTest : public ::testing::WithParamInterface<bool>,
// Verifies that HloEvaluator evaluates a HLO instruction that performs clamp
// with 3 operands.
TEST_P(HloEvaluatorTest, DoesClamp) {
- auto low = Literal::CreateR2<float>({{0.f, 2.f}, {2.f, 4.f}});
- auto value = Literal::CreateR2<float>({{0.f, 5.f}, {0.f, 4.f}});
- auto high = Literal::CreateR2<float>({{2.f, 4.f}, {4.f, 4.f}});
+ auto low = LiteralUtil::CreateR2<float>({{0.f, 2.f}, {2.f, 4.f}});
+ auto value = LiteralUtil::CreateR2<float>({{0.f, 5.f}, {0.f, 4.f}});
+ auto high = LiteralUtil::CreateR2<float>({{2.f, 4.f}, {4.f, 4.f}});
Shape shape = low->shape();
HloComputation::Builder b(TestName());
@@ -127,15 +127,15 @@ TEST_P(HloEvaluatorTest, DoesClamp) {
std::unique_ptr<Literal> result = Evaluate();
- auto expected = Literal::CreateR2<float>({{0, 4}, {2, 4}});
+ auto expected = LiteralUtil::CreateR2<float>({{0, 4}, {2, 4}});
EXPECT_TRUE(LiteralTestUtil::Equal(*expected, *result));
}
TEST_P(HloEvaluatorTest, DISABLED_DoesClampSpecialBroadcast) {
- auto low = Literal::CreateR0<float>(0.f);
- auto value = Literal::CreateR2<float>({{-1.f, 0.f}, {1.f, 2.f}});
- auto high = Literal::CreateR0<float>(1.f);
+ auto low = LiteralUtil::CreateR0<float>(0.f);
+ auto value = LiteralUtil::CreateR2<float>({{-1.f, 0.f}, {1.f, 2.f}});
+ auto high = LiteralUtil::CreateR0<float>(1.f);
Shape shape = value->shape();
HloComputation::Builder b(TestName());
@@ -148,7 +148,7 @@ TEST_P(HloEvaluatorTest, DISABLED_DoesClampSpecialBroadcast) {
std::unique_ptr<Literal> result = Evaluate();
- auto expected = Literal::CreateR2<float>({{0, 0}, {1, 1}});
+ auto expected = LiteralUtil::CreateR2<float>({{0, 0}, {1, 1}});
EXPECT_TRUE(LiteralTestUtil::Equal(*expected, *result));
}
@@ -156,9 +156,9 @@ TEST_P(HloEvaluatorTest, DISABLED_DoesClampSpecialBroadcast) {
// Verifies that HloEvaluator evaluates a HLO instruction that performs select
// with 3 operands.
TEST_P(HloEvaluatorTest, DoesSelect) {
- auto pred = Literal::CreateR2<bool>({{true, false}, {false, true}});
- auto on_true = Literal::CreateR2<float>({{2.f, 4.f}, {4.f, 4.f}});
- auto on_false = Literal::CreateR2<float>({{0.f, 5.f}, {0.f, 4.f}});
+ auto pred = LiteralUtil::CreateR2<bool>({{true, false}, {false, true}});
+ auto on_true = LiteralUtil::CreateR2<float>({{2.f, 4.f}, {4.f, 4.f}});
+ auto on_false = LiteralUtil::CreateR2<float>({{0.f, 5.f}, {0.f, 4.f}});
Shape shape = on_true->shape();
HloComputation::Builder b(TestName());
@@ -173,7 +173,7 @@ TEST_P(HloEvaluatorTest, DoesSelect) {
std::unique_ptr<Literal> result = Evaluate({});
- auto expected = Literal::CreateR2<float>({{2, 5}, {0, 4}});
+ auto expected = LiteralUtil::CreateR2<float>({{2, 5}, {0, 4}});
EXPECT_TRUE(LiteralTestUtil::Equal(*expected, *result));
}
@@ -181,46 +181,46 @@ TEST_P(HloEvaluatorTest, DoesSelect) {
// Verifies that HloEvaluator evaluates a HLO instruction that performs
// element-wise addition with 2 operands.
TEST_P(HloEvaluatorTest, DoesAdd) {
- auto lhs = Literal::CreateR2<int64>({{1, 0}, {-100, 4}});
- auto rhs = Literal::CreateR2<int64>({{2, 4}, {4, 4}});
- auto expected = Literal::CreateR2<int64>({{3, 4}, {-96, 8}});
+ auto lhs = LiteralUtil::CreateR2<int64>({{1, 0}, {-100, 4}});
+ auto rhs = LiteralUtil::CreateR2<int64>({{2, 4}, {4, 4}});
+ auto expected = LiteralUtil::CreateR2<int64>({{3, 4}, {-96, 8}});
TestBinaryOp(HloOpcode::kAdd, std::move(expected), std::move(lhs),
std::move(rhs));
}
// Verifies that HloEvaluator evaluates a HLO instruction that performs
// element-wise and with 2 operands.
TEST_P(HloEvaluatorTest, DoesAnd) {
- auto lhs = Literal::CreateR2<int64>({{1, 0}, {-100, 4}});
- auto rhs = Literal::CreateR2<int64>({{2, 4}, {4, 4}});
- auto expected = Literal::CreateR2<int64>({{0, 0}, {4, 4}});
+ auto lhs = LiteralUtil::CreateR2<int64>({{1, 0}, {-100, 4}});
+ auto rhs = LiteralUtil::CreateR2<int64>({{2, 4}, {4, 4}});
+ auto expected = LiteralUtil::CreateR2<int64>({{0, 0}, {4, 4}});
TestBinaryOp(HloOpcode::kAnd, std::move(expected), std::move(lhs),
std::move(rhs));
}
// Verifies that HloEvaluator evaluates a HLO instruction that performs
// element-wise or with 2 operands.
TEST_P(HloEvaluatorTest, DoesOr) {
- auto lhs = Literal::CreateR2<int64>({{1, 0}, {-100, 4}});
- auto rhs = Literal::CreateR2<int64>({{2, 4}, {4, 4}});
- auto expected = Literal::CreateR2<int64>({{3, 4}, {-100, 4}});
+ auto lhs = LiteralUtil::CreateR2<int64>({{1, 0}, {-100, 4}});
+ auto rhs = LiteralUtil::CreateR2<int64>({{2, 4}, {4, 4}});
+ auto expected = LiteralUtil::CreateR2<int64>({{3, 4}, {-100, 4}});
TestBinaryOp(HloOpcode::kOr, std::move(expected), std::move(lhs),
std::move(rhs));
}
// Verifies that HloEvaluator evaluates a HLO instruction that performs
// element-wise or with 2 operands.
TEST_P(HloEvaluatorTest, DoesXor) {
- auto lhs = Literal::CreateR2<int64>({{1, 0}, {-100, 4}});
- auto rhs = Literal::CreateR2<int64>({{2, 4}, {4, 4}});
- auto expected = Literal::CreateR2<int64>({{3, 4}, {-104, 0}});
+ auto lhs = LiteralUtil::CreateR2<int64>({{1, 0}, {-100, 4}});
+ auto rhs = LiteralUtil::CreateR2<int64>({{2, 4}, {4, 4}});
+ auto expected = LiteralUtil::CreateR2<int64>({{3, 4}, {-104, 0}});
TestBinaryOp(HloOpcode::kXor, std::move(expected), std::move(lhs),
std::move(rhs));
}
// Verifies that HloEvaluator evaluates a HLO instruction that performs
// element-wise multiply with 2 operands.
TEST_P(HloEvaluatorTest, DoesMultiply) {
- auto lhs = Literal::CreateR2<int32>({{-1, 0}, {-100, 4}});
- auto rhs = Literal::CreateR2<int32>(
+ auto lhs = LiteralUtil::CreateR2<int32>({{-1, 0}, {-100, 4}});
+ auto rhs = LiteralUtil::CreateR2<int32>(
{{std::numeric_limits<int32>::min(), 4}, {4, 4}});
- auto expected = Literal::CreateR2<int32>(
+ auto expected = LiteralUtil::CreateR2<int32>(
{{std::numeric_limits<int32>::min(), 0}, {-400, 16}});
TestBinaryOp(HloOpcode::kMultiply, std::move(expected), std::move(lhs),
std::move(rhs));
@@ -228,17 +228,17 @@ TEST_P(HloEvaluatorTest, DoesMultiply) {
// Verifies that HloEvaluator evaluates a HLO instruction that performs
// element-wise divide with 2 operands.
TEST_P(HloEvaluatorTest, DoesDivideInt64) {
- auto lhs = Literal::CreateR2<int64>({{1, 0}, {-100, 4}});
- auto rhs = Literal::CreateR2<int64>({{2, 4}, {4, 4}});
- auto expected = Literal::CreateR2<int64>({{0, 0}, {-25, 1}});
+ auto lhs = LiteralUtil::CreateR2<int64>({{1, 0}, {-100, 4}});
+ auto rhs = LiteralUtil::CreateR2<int64>({{2, 4}, {4, 4}});
+ auto expected = LiteralUtil::CreateR2<int64>({{0, 0}, {-25, 1}});
TestBinaryOp(HloOpcode::kDivide, std::move(expected), std::move(lhs),
std::move(rhs));
}
TEST_P(HloEvaluatorTest, DoesDivideDouble) {
- auto lhs = Literal::CreateR2<double>({{1.0, 0.0}, {-100.0, 4.0}});
- auto rhs = Literal::CreateR2<double>({{2.2, 4.0}, {4.0, 4.0}});
+ auto lhs = LiteralUtil::CreateR2<double>({{1.0, 0.0}, {-100.0, 4.0}});
+ auto rhs = LiteralUtil::CreateR2<double>({{2.2, 4.0}, {4.0, 4.0}});
auto expected =
- Literal::CreateR2<double>({{0.45454545454545453, 0}, {-25, 1}});
+ LiteralUtil::CreateR2<double>({{0.45454545454545453, 0}, {-25, 1}});
TestBinaryOp(HloOpcode::kDivide, std::move(expected), std::move(lhs),
std::move(rhs));
}
@@ -246,54 +246,54 @@ TEST_P(HloEvaluatorTest, DoesDivideDouble) {
// Verifies that HloEvaluator evaluates a HLO instruction that performs
// element-wise abs op with 1 operand.
TEST_P(HloEvaluatorTest, DoesAbsR2) {
- auto operand = Literal::CreateR2<int64>({{1, -20}, {-100, 4}});
- auto expected = Literal::CreateR2<int64>({{1, 20}, {100, 4}});
+ auto operand = LiteralUtil::CreateR2<int64>({{1, -20}, {-100, 4}});
+ auto expected = LiteralUtil::CreateR2<int64>({{1, 20}, {100, 4}});
TestUnaryOp(HloOpcode::kAbs, std::move(expected), std::move(operand));
}
TEST_P(HloEvaluatorTest, DoesAbsR0) {
- auto operand = Literal::CreateR0<float>(-1.0f);
- auto expected = Literal::CreateR0<float>(1.0f);
+ auto operand = LiteralUtil::CreateR0<float>(-1.0f);
+ auto expected = LiteralUtil::CreateR0<float>(1.0f);
TestUnaryOp(HloOpcode::kAbs, std::move(expected), std::move(operand));
}
TEST_P(HloEvaluatorTest, DoesAbsR1WithZeroSize) {
- auto operand = Literal::CreateR1<float>({});
- auto expected = Literal::CreateR1<float>({});
+ auto operand = LiteralUtil::CreateR1<float>({});
+ auto expected = LiteralUtil::CreateR1<float>({});
TestUnaryOp(HloOpcode::kAbs, std::move(expected), std::move(operand));
}
TEST_P(HloEvaluatorTest, DoesNegateR2) {
- auto operand = Literal::CreateR2<int32>(
+ auto operand = LiteralUtil::CreateR2<int32>(
{{0, std::numeric_limits<int32>::min()}, {-1, 4}});
- auto expected =
- Literal::CreateR2<int32>({{0, std::numeric_limits<int>::min()}, {1, -4}});
+ auto expected = LiteralUtil::CreateR2<int32>(
+ {{0, std::numeric_limits<int>::min()}, {1, -4}});
TestUnaryOp(HloOpcode::kNegate, std::move(expected), std::move(operand));
}
TEST_P(HloEvaluatorTest, DoesCosR2) {
- auto operand = Literal::CreateR2<float>({{0, M_PI}, {-M_PI, 2 * M_PI}});
- auto expected = Literal::CreateR2<float>({{1, -1}, {-1, 1}});
+ auto operand = LiteralUtil::CreateR2<float>({{0, M_PI}, {-M_PI, 2 * M_PI}});
+ auto expected = LiteralUtil::CreateR2<float>({{1, -1}, {-1, 1}});
TestUnaryOp(HloOpcode::kCos, std::move(expected), std::move(operand),
use_bfloat16_ ? 0.031250 : 9.5367431640625E-7);
}
TEST_P(HloEvaluatorTest, DoesSinR2) {
- auto operand = Literal::CreateR2<float>({{0, M_PI}, {-M_PI, 2 * M_PI}});
- auto expected = Literal::CreateR2<float>({{0, 0}, {0, 0}});
+ auto operand = LiteralUtil::CreateR2<float>({{0, M_PI}, {-M_PI, 2 * M_PI}});
+ auto expected = LiteralUtil::CreateR2<float>({{0, 0}, {0, 0}});
TestUnaryOp(HloOpcode::kSin, std::move(expected), std::move(operand),
use_bfloat16_ ? 0.031250 : 9.5367431640625E-7);
}
TEST_P(HloEvaluatorTest, DoesNotR2) {
auto operand =
- Literal::CreateR2<int32>({{0, std::numeric_limits<int>::min()},
- {-1, std::numeric_limits<int>::max()}});
+ LiteralUtil::CreateR2<int32>({{0, std::numeric_limits<int>::min()},
+ {-1, std::numeric_limits<int>::max()}});
auto expected =
- Literal::CreateR2<int32>({{-1, std::numeric_limits<int>::max()},
- {0, std::numeric_limits<int>::min()}});
+ LiteralUtil::CreateR2<int32>({{-1, std::numeric_limits<int>::max()},
+ {0, std::numeric_limits<int>::min()}});
TestUnaryOp(HloOpcode::kNot, std::move(expected), std::move(operand));
}
// Verifies that HloEvaluator evaluates a HLO Computation with non-parameter nor
// constant operands.
TEST_P(HloEvaluatorTest, DoesTraverseInstructions) {
- auto lhs = Literal::CreateR2<int64>({{1, 0}, {-100, 4}});
- auto rhs = Literal::CreateR2<int64>({{2, 4}, {4, 4}});
- auto rhs2 = Literal::CreateR2<int64>({{1, -20}, {-100, 4}});
+ auto lhs = LiteralUtil::CreateR2<int64>({{1, 0}, {-100, 4}});
+ auto rhs = LiteralUtil::CreateR2<int64>({{2, 4}, {4, 4}});
+ auto rhs2 = LiteralUtil::CreateR2<int64>({{1, -20}, {-100, 4}});
std::vector<const Literal*> args = {lhs.get(), rhs.get(), rhs2.get()};
Shape shape = ShapeUtil::MakeShape(S64, {2, 2});
@@ -314,7 +314,7 @@ TEST_P(HloEvaluatorTest, DoesTraverseInstructions) {
std::unique_ptr<Literal> result = Evaluate(args);
- auto expected = Literal::CreateR2<int64>({{4, -16}, {-196, 12}});
+ auto expected = LiteralUtil::CreateR2<int64>({{4, -16}, {-196, 12}});
EXPECT_TRUE(LiteralTestUtil::Equal(*expected, *result));
}
@@ -324,7 +324,7 @@ TEST_P(HloEvaluatorTest, DoesReshape) {
HloComputation::Builder b(TestName());
const int64 dimensions[] = {11, 8, 7, 5, 9};
TF_ASSERT_OK_AND_ASSIGN(auto literal,
- Literal::CreateRandomLiteral<F32>(
+ LiteralUtil::CreateRandomLiteral<F32>(
ShapeUtil::MakeShape(F32, dimensions), 0.0, 1.0));
auto literal_clone = literal->CloneToUnique();
HloInstruction* literal_instruction =
@@ -349,8 +349,8 @@ TEST_P(HloEvaluatorTest, DoesReshape) {
// Verifies Broadcast operation is correctly evaluated.
TEST_P(HloEvaluatorTest, DoesBroadcast) {
HloComputation::Builder b(TestName());
- auto input_literal = Literal::CreateR2<int32>({{1, 2}, {3, 4}, {5, 6}});
- auto output_literal = Literal::CreateR3<int32>(
+ auto input_literal = LiteralUtil::CreateR2<int32>({{1, 2}, {3, 4}, {5, 6}});
+ auto output_literal = LiteralUtil::CreateR3<int32>(
{{{1, 2}, {3, 4}, {5, 6}}, {{1, 2}, {3, 4}, {5, 6}}});
HloInstruction* literal_instruction = b.AddInstruction(
HloInstruction::CreateConstant(std::move(input_literal)));
@@ -365,8 +365,8 @@ TEST_P(HloEvaluatorTest, DoesBroadcast) {
TEST_P(HloEvaluatorTest, DoesBroadcastScalar) {
HloComputation::Builder b(TestName());
- auto input_literal = Literal::CreateR0<int32>(111);
- auto output_literal = Literal::CreateR2<int32>(
+ auto input_literal = LiteralUtil::CreateR0<int32>(111);
+ auto output_literal = LiteralUtil::CreateR2<int32>(
{{111, 111}, {111, 111}, {111, 111}, {111, 111}, {111, 111}, {111, 111}});
HloInstruction* literal_instruction = b.AddInstruction(
@@ -386,9 +386,9 @@ TEST_P(HloEvaluatorTest, DoesConcatenateSimple) {
HloComputation::Builder b(TestName());
HloInstruction* operand1 = b.AddInstruction(HloInstruction::CreateConstant(
- Literal::CreateR2<int64>({{-1, -2}, {100, 200}})));
+ LiteralUtil::CreateR2<int64>({{-1, -2}, {100, 200}})));
HloInstruction* operand2 = b.AddInstruction(HloInstruction::CreateConstant(
- Literal::CreateR2<int64>({{-2, -3}, {-100, -200}})));
+ LiteralUtil::CreateR2<int64>({{-2, -3}, {-100, -200}})));
std::vector<HloInstruction*> operands = {operand1, operand2};
@@ -399,8 +399,8 @@ TEST_P(HloEvaluatorTest, DoesConcatenateSimple) {
std::unique_ptr<Literal> result = Evaluate();
- auto expected =
- Literal::CreateR2<int64>({{-1, -2}, {100, 200}, {-2, -3}, {-100, -200}});
+ auto expected = LiteralUtil::CreateR2<int64>(
+ {{-1, -2}, {100, 200}, {-2, -3}, {-100, -200}});
EXPECT_TRUE(LiteralTestUtil::Equal(*expected, *result));
}
@@ -408,9 +408,9 @@ TEST_P(HloEvaluatorTest, ConcatenateHandlesShapeWithZeroElement) {
HloComputation::Builder b(TestName());
HloInstruction* operand1 = b.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR1<int64>({100, 200})));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR1<int64>({100, 200})));
HloInstruction* operand2 = b.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR1<int64>({})));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR1<int64>({})));
std::vector<HloInstruction*> operands = {operand1, operand2};
@@ -421,16 +421,16 @@ TEST_P(HloEvaluatorTest, ConcatenateHandlesShapeWithZeroElement) {
std::unique_ptr<Literal> result = Evaluate();
- auto expected = Literal::CreateR1<int64>({100, 200});
+ auto expected = LiteralUtil::CreateR1<int64>({100, 200});
EXPECT_TRUE(LiteralTestUtil::Equal(*expected, *result));
}
TEST_P(HloEvaluatorTest, ConvertWithSameLayout) {
HloComputation::Builder b(TestName());
- auto input_literal = Literal::CreateR2<int32>({{1, 2}, {3, 4}, {5, 6}});
+ auto input_literal = LiteralUtil::CreateR2<int32>({{1, 2}, {3, 4}, {5, 6}});
auto expected =
- Literal::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}, {5.0, 6.0}});
+ LiteralUtil::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}, {5.0, 6.0}});
ASSERT_TRUE(LayoutUtil::LayoutsInShapesEqual(input_literal->shape(),
expected->shape()));
@@ -447,9 +447,9 @@ TEST_P(HloEvaluatorTest, ConvertWithSameLayout) {
TEST_P(HloEvaluatorTest, ConvertWithDifferentLayout) {
HloComputation::Builder b(TestName());
- auto input_literal = Literal::CreateR2WithLayout<int32>(
+ auto input_literal = LiteralUtil::CreateR2WithLayout<int32>(
{{1, 2}, {3, 4}, {5, 6}}, LayoutUtil::MakeLayout({0, 1}));
- auto expected = Literal::CreateR2WithLayout<float>(
+ auto expected = LiteralUtil::CreateR2WithLayout<float>(
{{1.0, 2.0}, {3.0, 4.0}, {5.0, 6.0}}, LayoutUtil::MakeLayout({1, 0}));
ASSERT_FALSE(LayoutUtil::LayoutsInShapesEqual(input_literal->shape(),
expected->shape()));
@@ -478,13 +478,13 @@ PaddingConfig CreatePaddingConfig(
}
TEST_P(HloEvaluatorTest, Pad2DIntegerArrayWithZeroDimension) {
- auto operand = Literal::CreateR2<int32>({{}, {}});
+ auto operand = LiteralUtil::CreateR2<int32>({{}, {}});
HloComputation::Builder b(TestName());
auto operand_instruction =
b.AddInstruction(HloInstruction::CreateConstant(std::move(operand)));
constexpr int32 kPadValue = 10;
- auto pad_value = Literal::CreateR0<int32>(kPadValue);
+ auto pad_value = LiteralUtil::CreateR0<int32>(kPadValue);
auto padding_value_instruction =
b.AddInstruction(HloInstruction::CreateConstant(std::move(pad_value)));
@@ -496,7 +496,7 @@ TEST_P(HloEvaluatorTest, Pad2DIntegerArrayWithZeroDimension) {
std::unique_ptr<Literal> result = Evaluate();
- auto expected = Literal::CreateR2<int32>(
+ auto expected = LiteralUtil::CreateR2<int32>(
{{10, 10}, {10, 10}, {10, 10}, {10, 10}, {10, 10}});
EXPECT_TRUE(LiteralTestUtil::Equal(*expected, *result));
@@ -506,11 +506,11 @@ TEST_P(HloEvaluatorTest, Pad4DFloatArrayWithInteriorPadding) {
HloComputation::Builder b(TestName());
Array4D<float> input_array(3, 2, 1, 1, {1, 2, 3, 4, 5, 6});
- auto input = Literal::CreateR4FromArray4D<float>(input_array);
+ auto input = LiteralUtil::CreateR4FromArray4D<float>(input_array);
HloInstruction* input_instruction =
b.AddInstruction(HloInstruction::CreateConstant(std::move(input)));
constexpr float kPadValue = 1.5;
- auto pad_value = Literal::CreateR0<float>(kPadValue);
+ auto pad_value = LiteralUtil::CreateR0<float>(kPadValue);
HloInstruction* pad_instruction =
b.AddInstruction(HloInstruction::CreateConstant(std::move(pad_value)));
@@ -532,7 +532,7 @@ TEST_P(HloEvaluatorTest, Pad4DFloatArrayWithInteriorPadding) {
(*expected_array)(7, 0, 0, 0) = 5.0f;
(*expected_array)(7, 2, 0, 0) = 6.0f;
- auto expected = Literal::CreateR4FromArray4D<float>(*expected_array);
+ auto expected = LiteralUtil::CreateR4FromArray4D<float>(*expected_array);
EXPECT_TRUE(LiteralTestUtil::Equal(*expected, *result));
}
@@ -549,12 +549,12 @@ TEST_P(HloEvaluatorTest, NegativePadding2D) {
// }
auto input_array = MakeUnique<Array2D<float>>(4, 3);
input_array->FillUnique(1.0f);
- auto input = Literal::CreateR2FromArray2D<float>(*input_array);
+ auto input = LiteralUtil::CreateR2FromArray2D<float>(*input_array);
HloInstruction* input_instruction =
b.AddInstruction(HloInstruction::CreateConstant(std::move(input)));
auto pad_value_instruction = b.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(2.718f)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.718f)));
auto r2_padding_on_dim0_dim1 =
CreatePaddingConfig({{{-1, -2, 0}}, {{-2, 4, 0}}});
@@ -574,7 +574,7 @@ TEST_P(HloEvaluatorTest, NegativePadding2D) {
(*expected_array)(0, 2) = 2.718f;
(*expected_array)(0, 3) = 2.718f;
(*expected_array)(0, 4) = 2.718f;
- auto expected = Literal::CreateR2FromArray2D<float>(*expected_array);
+ auto expected = LiteralUtil::CreateR2FromArray2D<float>(*expected_array);
EXPECT_TRUE(LiteralTestUtil::Near(*expected, *result, ErrorSpec(0.031250)));
}
@@ -590,12 +590,12 @@ TEST_P(HloEvaluatorTest, NegativeAndInteriorPadding2D) {
// }
auto input_array = MakeUnique<Array2D<float>>(4, 3);
input_array->FillUnique(1.0f);
- auto input = Literal::CreateR2FromArray2D<float>(*input_array);
+ auto input = LiteralUtil::CreateR2FromArray2D<float>(*input_array);
HloInstruction* input_instruction =
b.AddInstruction(HloInstruction::CreateConstant(std::move(input)));
auto pad_value_instruction = b.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(2.718f)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.718f)));
PaddingConfig padding_config = MakeNoPaddingConfig(2);
@@ -613,7 +613,7 @@ TEST_P(HloEvaluatorTest, NegativeAndInteriorPadding2D) {
std::unique_ptr<Literal> result = Evaluate();
auto expected_array = MakeUnique<Array2D<float>>(0, 9);
- auto expected = Literal::CreateR2FromArray2D<float>(*expected_array);
+ auto expected = LiteralUtil::CreateR2FromArray2D<float>(*expected_array);
EXPECT_TRUE(LiteralTestUtil::Equal(*expected, *result));
}
@@ -630,13 +630,13 @@ TEST_P(HloEvaluatorTest, DotRank2AndRank1) {
// }
auto lhs_array = MakeUnique<Array2D<float>>(4, 1);
lhs_array->FillUnique(1.0f);
- auto lhs_literal = Literal::CreateR2FromArray2D<float>(*lhs_array);
+ auto lhs_literal = LiteralUtil::CreateR2FromArray2D<float>(*lhs_array);
HloInstruction* lhs_instruction =
b.AddInstruction(HloInstruction::CreateConstant(std::move(lhs_literal)));
// rhs:
// f32[2] { 1, 2 },
- auto rhs_literal = Literal::CreateR2<float>({{1, 2}});
+ auto rhs_literal = LiteralUtil::CreateR2<float>({{1, 2}});
HloInstruction* rhs_instruction =
b.AddInstruction(HloInstruction::CreateConstant(std::move(rhs_literal)));
@@ -658,7 +658,7 @@ TEST_P(HloEvaluatorTest, DotRank2AndRank1) {
{4.f, 8.f},
});
// clang-format on
- auto expected = Literal::CreateR2FromArray2D<float>(expected_array);
+ auto expected = LiteralUtil::CreateR2FromArray2D<float>(expected_array);
EXPECT_TRUE(LiteralTestUtil::Equal(*expected, *result));
}
@@ -669,7 +669,7 @@ TEST_P(HloEvaluatorTest, DotRank1AndRank2) {
// lhs:
// f32[3]
// { 1, 2, 3 },
- auto lhs_literal = Literal::CreateR1<float>({1, 2, 3});
+ auto lhs_literal = LiteralUtil::CreateR1<float>({1, 2, 3});
HloInstruction* lhs_instruction =
b.AddInstruction(HloInstruction::CreateConstant(std::move(lhs_literal)));
@@ -681,7 +681,7 @@ TEST_P(HloEvaluatorTest, DotRank1AndRank2) {
// }
auto rhs_array = MakeUnique<Array2D<float>>(3, 2);
rhs_array->FillUnique(1.0f);
- auto rhs_literal = Literal::CreateR2FromArray2D<float>(*rhs_array);
+ auto rhs_literal = LiteralUtil::CreateR2FromArray2D<float>(*rhs_array);
HloInstruction* rhs_instruction =
b.AddInstruction(HloInstruction::CreateConstant(std::move(rhs_literal)));
@@ -695,7 +695,7 @@ TEST_P(HloEvaluatorTest, DotRank1AndRank2) {
std::unique_ptr<Literal> result = Evaluate();
- auto expected = Literal::CreateR1<float>({22.f, 28.f});
+ auto expected = LiteralUtil::CreateR1<float>({22.f, 28.f});
EXPECT_TRUE(LiteralTestUtil::Equal(*expected, *result));
}
@@ -712,7 +712,7 @@ TEST_P(HloEvaluatorTest, DotRank2AndRank2) {
// }
auto lhs_array = MakeUnique<Array2D<float>>(4, 3);
lhs_array->FillUnique(1.0f);
- auto lhs_literal = Literal::CreateR2FromArray2D<float>(*lhs_array);
+ auto lhs_literal = LiteralUtil::CreateR2FromArray2D<float>(*lhs_array);
HloInstruction* lhs_instruction =
b.AddInstruction(HloInstruction::CreateConstant(std::move(lhs_literal)));
@@ -724,7 +724,7 @@ TEST_P(HloEvaluatorTest, DotRank2AndRank2) {
// }
auto rhs_array = MakeUnique<Array2D<float>>(3, 2);
rhs_array->FillUnique(1.0f);
- auto rhs_literal = Literal::CreateR2FromArray2D<float>(*rhs_array);
+ auto rhs_literal = LiteralUtil::CreateR2FromArray2D<float>(*rhs_array);
HloInstruction* rhs_instruction =
b.AddInstruction(HloInstruction::CreateConstant(std::move(rhs_literal)));
@@ -744,7 +744,7 @@ TEST_P(HloEvaluatorTest, DotRank2AndRank2) {
{94.f, 124.f},
{130.f, 172.f},
});
- auto expected = Literal::CreateR2FromArray2D<float>(expected_array);
+ auto expected = LiteralUtil::CreateR2FromArray2D<float>(expected_array);
EXPECT_TRUE(LiteralTestUtil::Equal(*expected, *result));
}
@@ -753,12 +753,12 @@ TEST_P(HloEvaluatorTest, SimpleConv1D) {
HloComputation::Builder b(TestName());
Array3D<float> lhs_array = {{{1, 2, 3}}};
- auto lhs_literal = Literal::CreateR3FromArray3D<float>(lhs_array);
+ auto lhs_literal = LiteralUtil::CreateR3FromArray3D<float>(lhs_array);
HloInstruction* lhs_instruction =
b.AddInstruction(HloInstruction::CreateConstant(std::move(lhs_literal)));
Array3D<float> rhs_array = {{{3.f, 4.f}}};
- auto rhs_literal = Literal::CreateR3FromArray3D<float>(rhs_array);
+ auto rhs_literal = LiteralUtil::CreateR3FromArray3D<float>(rhs_array);
HloInstruction* rhs_instruction =
b.AddInstruction(HloInstruction::CreateConstant(std::move(rhs_literal)));
@@ -792,7 +792,7 @@ TEST_P(HloEvaluatorTest, SimpleConv1D) {
std::unique_ptr<Literal> result = Evaluate();
Array3D<float> expected_array = {{{11.f, 18.f, 9.f}}};
- auto expected = Literal::CreateR3FromArray3D<float>(expected_array);
+ auto expected = LiteralUtil::CreateR3FromArray3D<float>(expected_array);
EXPECT_TRUE(LiteralTestUtil::Equal(*expected, *result));
}
@@ -809,7 +809,7 @@ TEST_P(HloEvaluatorTest, Simple4x4Conv2DWith2x2Kernel) {
{13, 14, 15, 16},
}));
// clang-format on
- auto lhs_literal = Literal::CreateR4FromArray4D<float>(lhs_array);
+ auto lhs_literal = LiteralUtil::CreateR4FromArray4D<float>(lhs_array);
HloInstruction* lhs_instruction =
b.AddInstruction(HloInstruction::CreateConstant(std::move(lhs_literal)));
@@ -820,7 +820,7 @@ TEST_P(HloEvaluatorTest, Simple4x4Conv2DWith2x2Kernel) {
{7, 8},
}));
// clang-format on
- auto rhs_literal = Literal::CreateR4FromArray4D<float>(rhs_array);
+ auto rhs_literal = LiteralUtil::CreateR4FromArray4D<float>(rhs_array);
HloInstruction* rhs_instruction =
b.AddInstruction(HloInstruction::CreateConstant(std::move(rhs_literal)));
@@ -854,7 +854,7 @@ TEST_P(HloEvaluatorTest, Simple4x4Conv2DWith2x2Kernel) {
{149, 160, 171, 80},
}));
// clang-format on
- auto expected = Literal::CreateR4FromArray4D<float>(expected_array);
+ auto expected = LiteralUtil::CreateR4FromArray4D<float>(expected_array);
EXPECT_TRUE(LiteralTestUtil::Equal(*expected, *result));
}
@@ -884,11 +884,11 @@ TEST_P(HloEvaluatorTest, Conv2DGeneralDimensionsReversed) {
}});
// clang-format on
- auto lhs_literal = Literal::CreateR4FromArray4D<float>(input);
+ auto lhs_literal = LiteralUtil::CreateR4FromArray4D<float>(input);
HloInstruction* lhs_instruction =
b.AddInstruction(HloInstruction::CreateConstant(std::move(lhs_literal)));
- auto rhs_literal = Literal::CreateR4FromArray4D<float>(weight);
+ auto rhs_literal = LiteralUtil::CreateR4FromArray4D<float>(weight);
HloInstruction* rhs_instruction =
b.AddInstruction(HloInstruction::CreateConstant(std::move(rhs_literal)));
rhs_instruction = b.AddInstruction(HloInstruction::CreateReverse(
@@ -933,7 +933,7 @@ TEST_P(HloEvaluatorTest, Conv2DGeneralDimensionsReversed) {
Array4D<float> expected_array({{{{2514, 2685}}}});
Array4D<float> expected_array_bf16({{{{2512, 2672}}}});
// clang-format on
- auto expected = Literal::CreateR4FromArray4D<float>(
+ auto expected = LiteralUtil::CreateR4FromArray4D<float>(
use_bfloat16_ ? expected_array_bf16 : expected_array);
EXPECT_TRUE(LiteralTestUtil::Equal(*expected, *result));
@@ -964,11 +964,11 @@ TEST_P(HloEvaluatorTest, Conv2DGeneralDimensions) {
}});
// clang-format on
- auto lhs_literal = Literal::CreateR4FromArray4D<float>(input);
+ auto lhs_literal = LiteralUtil::CreateR4FromArray4D<float>(input);
HloInstruction* lhs_instruction =
b.AddInstruction(HloInstruction::CreateConstant(std::move(lhs_literal)));
- auto rhs_literal = Literal::CreateR4FromArray4D<float>(weight);
+ auto rhs_literal = LiteralUtil::CreateR4FromArray4D<float>(weight);
HloInstruction* rhs_instruction =
b.AddInstruction(HloInstruction::CreateConstant(std::move(rhs_literal)));
@@ -1010,7 +1010,7 @@ TEST_P(HloEvaluatorTest, Conv2DGeneralDimensions) {
Array4D<float> expected_array({{{{2514, 2685}}}});
Array4D<float> expected_array_bf16({{{{2512, 2672}}}});
// clang-format on
- auto expected = Literal::CreateR4FromArray4D<float>(
+ auto expected = LiteralUtil::CreateR4FromArray4D<float>(
use_bfloat16_ ? expected_array_bf16 : expected_array);
EXPECT_TRUE(LiteralTestUtil::Equal(*expected, *result));
@@ -1028,7 +1028,7 @@ TEST_P(HloEvaluatorTest, DilatedBaseConv2DWithHighPadding) {
{13, 14, 15, 16},
}));
// clang-format on
- auto lhs_literal = Literal::CreateR4FromArray4D<float>(lhs_array);
+ auto lhs_literal = LiteralUtil::CreateR4FromArray4D<float>(lhs_array);
HloInstruction* lhs_instruction =
b.AddInstruction(HloInstruction::CreateConstant(std::move(lhs_literal)));
@@ -1039,7 +1039,7 @@ TEST_P(HloEvaluatorTest, DilatedBaseConv2DWithHighPadding) {
{7, 8},
}));
// clang-format on
- auto rhs_literal = Literal::CreateR4FromArray4D<float>(rhs_array);
+ auto rhs_literal = LiteralUtil::CreateR4FromArray4D<float>(rhs_array);
HloInstruction* rhs_instruction =
b.AddInstruction(HloInstruction::CreateConstant(std::move(rhs_literal)));
@@ -1074,7 +1074,7 @@ TEST_P(HloEvaluatorTest, DilatedBaseConv2DWithHighPadding) {
{91, 112, 98, 120, 105, 128, 112},
{65, 84, 70, 90, 75, 96, 80},
}));
- auto expected = Literal::CreateR4FromArray4D<float>(expected_array);
+ auto expected = LiteralUtil::CreateR4FromArray4D<float>(expected_array);
EXPECT_TRUE(LiteralTestUtil::Equal(*expected, *result));
}
@@ -1091,7 +1091,7 @@ TEST_P(HloEvaluatorTest, DilatedBaseConv2DWithLowAndHighPadding) {
{13, 14, 15, 16},
}));
// clang-format on
- auto lhs_literal = Literal::CreateR4FromArray4D<float>(lhs_array);
+ auto lhs_literal = LiteralUtil::CreateR4FromArray4D<float>(lhs_array);
HloInstruction* lhs_instruction =
b.AddInstruction(HloInstruction::CreateConstant(std::move(lhs_literal)));
@@ -1102,7 +1102,7 @@ TEST_P(HloEvaluatorTest, DilatedBaseConv2DWithLowAndHighPadding) {
{7, 8},
}));
// clang-format on
- auto rhs_literal = Literal::CreateR4FromArray4D<float>(rhs_array);
+ auto rhs_literal = LiteralUtil::CreateR4FromArray4D<float>(rhs_array);
HloInstruction* rhs_instruction =
b.AddInstruction(HloInstruction::CreateConstant(std::move(rhs_literal)));
@@ -1138,7 +1138,7 @@ TEST_P(HloEvaluatorTest, DilatedBaseConv2DWithLowAndHighPadding) {
{104, 91, 112, 98, 120, 105, 128, 112},
{78, 65, 84, 70, 90, 75, 96, 80},
}));
- auto expected = Literal::CreateR4FromArray4D<float>(expected_array);
+ auto expected = LiteralUtil::CreateR4FromArray4D<float>(expected_array);
EXPECT_TRUE(LiteralTestUtil::Equal(*expected, *result));
}
@@ -1156,7 +1156,7 @@ TEST_P(HloEvaluatorTest,
{13, 14, 15, 16},
}));
// clang-format on
- auto lhs_literal = Literal::CreateR4FromArray4D<float>(lhs_array);
+ auto lhs_literal = LiteralUtil::CreateR4FromArray4D<float>(lhs_array);
HloInstruction* lhs_instruction =
b.AddInstruction(HloInstruction::CreateConstant(std::move(lhs_literal)));
@@ -1167,7 +1167,7 @@ TEST_P(HloEvaluatorTest,
{8, 9, 10},
}));
// clang-format on
- auto rhs_literal = Literal::CreateR4FromArray4D<float>(rhs_array);
+ auto rhs_literal = LiteralUtil::CreateR4FromArray4D<float>(rhs_array);
HloInstruction* rhs_instruction =
b.AddInstruction(HloInstruction::CreateConstant(std::move(rhs_literal)));
@@ -1210,7 +1210,7 @@ TEST_P(HloEvaluatorTest,
{0, 0, 0},
{91, 98, 105},
}));
- auto expected = Literal::CreateR4FromArray4D<float>(expected_array);
+ auto expected = LiteralUtil::CreateR4FromArray4D<float>(expected_array);
EXPECT_TRUE(LiteralTestUtil::Equal(*expected, *result));
}
@@ -1225,9 +1225,9 @@ TEST_F(HloEvaluatorPreciseReduceTest, AddReductionPrecisionTest) {
constexpr int kNumElements = 1 << 25; // float += 1 saturates at 1<<24
std::vector<float> v(kNumElements, 1.0f);
HloInstruction* arg_instruction = b.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR1<float>(v)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR1<float>(v)));
HloInstruction* init_value = b.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(0.f)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(0.f)));
HloComputation::Builder add_computation("add");
Shape scalar_shape = ShapeUtil::MakeShape(F32, {});
@@ -1262,9 +1262,9 @@ void BM_ReducePrecisely(int num_iters) {
constexpr int kNumElements = 1 << 25; // float += 1 saturates at 1<<24
std::vector<float> v(kNumElements, 1.0f);
HloInstruction* arg_instruction = b.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR1<float>(v)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR1<float>(v)));
auto init_value = b.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(0.f)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(0.f)));
HloComputation::Builder add_computation("add");
Shape scalar_shape = ShapeUtil::MakeShape(F32, {});
@@ -1299,13 +1299,13 @@ TEST_P(HloEvaluatorTest, ReduceAdd) {
// }
auto arg_array = MakeUnique<Array2D<float>>(2, 3);
arg_array->FillUnique(1.0f);
- auto arg_literal = Literal::CreateR2FromArray2D<float>(*arg_array);
+ auto arg_literal = LiteralUtil::CreateR2FromArray2D<float>(*arg_array);
HloInstruction* arg_instruction =
b.AddInstruction(HloInstruction::CreateConstant(std::move(arg_literal)));
auto init_value = b.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(0.f)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(0.f)));
HloComputation::Builder add_computation("add");
Shape scalar_shape = ShapeUtil::MakeShape(F32, {});
@@ -1326,7 +1326,7 @@ TEST_P(HloEvaluatorTest, ReduceAdd) {
std::unique_ptr<Literal> result = Evaluate();
- auto expected = Literal::CreateR1<float>({6, 18});
+ auto expected = LiteralUtil::CreateR1<float>({6, 18});
EXPECT_TRUE(LiteralTestUtil::Equal(*expected, *result));
}
@@ -1341,13 +1341,13 @@ TEST_P(HloEvaluatorTest, ReduceWindowMax) {
// }
auto arg_array = MakeUnique<Array2D<float>>(2, 3);
arg_array->FillUnique(1.0f);
- auto arg_literal = Literal::CreateR2FromArray2D<float>(*arg_array);
+ auto arg_literal = LiteralUtil::CreateR2FromArray2D<float>(*arg_array);
HloInstruction* arg_instruction =
b.AddInstruction(HloInstruction::CreateConstant(std::move(arg_literal)));
auto init_value = b.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(0.f)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(0.f)));
HloComputation::Builder max_computation("max");
Shape scalar_shape = ShapeUtil::MakeShape(F32, {});
@@ -1378,7 +1378,7 @@ TEST_P(HloEvaluatorTest, ReduceWindowMax) {
std::unique_ptr<Literal> result = Evaluate();
- auto expected = Literal::CreateR2<float>({{6, 7}});
+ auto expected = LiteralUtil::CreateR2<float>({{6, 7}});
EXPECT_TRUE(LiteralTestUtil::Equal(*expected, *result));
}
@@ -1392,13 +1392,13 @@ TEST_P(HloEvaluatorTest, ReduceWindowAdd) {
// }
auto arg_array = MakeUnique<Array2D<float>>(2, 3);
arg_array->FillUnique(1.0f);
- auto arg_literal = Literal::CreateR2FromArray2D<float>(*arg_array);
+ auto arg_literal = LiteralUtil::CreateR2FromArray2D<float>(*arg_array);
HloInstruction* arg_instruction =
b.AddInstruction(HloInstruction::CreateConstant(std::move(arg_literal)));
auto init_value = b.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(0.f)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(0.f)));
HloComputation::Builder add_computation("add");
Shape scalar_shape = ShapeUtil::MakeShape(F32, {});
@@ -1435,7 +1435,7 @@ TEST_P(HloEvaluatorTest, ReduceWindowAdd) {
std::unique_ptr<Literal> result = Evaluate();
- auto expected = Literal::CreateR2<float>({{1, 3, 5}, {5, 11, 13}});
+ auto expected = LiteralUtil::CreateR2<float>({{1, 3, 5}, {5, 11, 13}});
EXPECT_TRUE(LiteralTestUtil::Equal(*expected, *result));
}
@@ -1445,13 +1445,13 @@ TEST_P(HloEvaluatorTest, ReduceWindowAdd6D) {
// arg: f32[4,4,4,4,4,4] full of ones. Using small dims to limit run-time.
std::vector<int64> input_dims(6, 4);
std::unique_ptr<Literal> arg_literal =
- Literal::CreateFullWithDescendingLayout<float>(input_dims, 1.0f);
+ LiteralUtil::CreateFullWithDescendingLayout<float>(input_dims, 1.0f);
HloInstruction* arg_instruction =
b.AddInstruction(HloInstruction::CreateConstant(std::move(arg_literal)));
auto init_value = b.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(0.f)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(0.f)));
HloComputation::Builder add_computation("add");
Shape scalar_shape = ShapeUtil::MakeShape(F32, {});
@@ -1498,7 +1498,7 @@ TEST_P(HloEvaluatorTest, ReduceWindowAdd6D) {
std::vector<int64> output_dims = {4, 3, 3, 3, 4, 4};
std::unique_ptr<Literal> result_literal =
- Literal::CreateFullWithDescendingLayout<float>(output_dims, 8.0f);
+ LiteralUtil::CreateFullWithDescendingLayout<float>(output_dims, 8.0f);
EXPECT_TRUE(LiteralTestUtil::Equal(*result_literal, *result));
}
@@ -1513,7 +1513,8 @@ TEST_P(HloEvaluatorTest, StridedSlice) {
// }
auto operand_array = MakeUnique<Array2D<float>>(3, 5);
operand_array->FillUnique(1.0f);
- auto operand_literal = Literal::CreateR2FromArray2D<float>(*operand_array);
+ auto operand_literal =
+ LiteralUtil::CreateR2FromArray2D<float>(*operand_array);
HloInstruction* operand = b.AddInstruction(
HloInstruction::CreateConstant(std::move(operand_literal)));
@@ -1527,7 +1528,7 @@ TEST_P(HloEvaluatorTest, StridedSlice) {
std::unique_ptr<Literal> result = Evaluate();
- auto expected = Literal::CreateR2<float>({
+ auto expected = LiteralUtil::CreateR2<float>({
{3},
{19},
});
@@ -1545,13 +1546,14 @@ TEST_P(HloEvaluatorTest, DynamicSlice) {
// }
auto operand_array = MakeUnique<Array2D<float>>(2, 4);
operand_array->FillUnique(1.0f);
- auto operand_literal = Literal::CreateR2FromArray2D<float>(*operand_array);
+ auto operand_literal =
+ LiteralUtil::CreateR2FromArray2D<float>(*operand_array);
HloInstruction* operand = b.AddInstruction(
HloInstruction::CreateConstant(std::move(operand_literal)));
auto start_indices = b.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR1<int32>({0, 1})));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR1<int32>({0, 1})));
Shape shape = ShapeUtil::MakeShape(F32, {2, 3});
b.AddInstruction(HloInstruction::CreateDynamicSlice(shape, operand,
@@ -1560,7 +1562,7 @@ TEST_P(HloEvaluatorTest, DynamicSlice) {
std::unique_ptr<Literal> result = Evaluate();
- auto expected = Literal::CreateR2<float>({
+ auto expected = LiteralUtil::CreateR2<float>({
{2, 3, 4},
{6, 7, 8},
});
@@ -1580,13 +1582,14 @@ TEST_P(HloEvaluatorTest, DynamicSliceModSlice) {
// }
auto operand_array = MakeUnique<Array2D<float>>(2, 4);
operand_array->FillUnique(1.0f);
- auto operand_literal = Literal::CreateR2FromArray2D<float>(*operand_array);
+ auto operand_literal =
+ LiteralUtil::CreateR2FromArray2D<float>(*operand_array);
HloInstruction* operand = b.AddInstruction(
HloInstruction::CreateConstant(std::move(operand_literal)));
auto start_indices = b.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR1<int32>({2, 1})));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR1<int32>({2, 1})));
Shape shape = ShapeUtil::MakeShape(F32, {2, 3});
b.AddInstruction(HloInstruction::CreateDynamicSlice(shape, operand,
@@ -1595,7 +1598,7 @@ TEST_P(HloEvaluatorTest, DynamicSliceModSlice) {
std::unique_ptr<Literal> result = Evaluate();
- auto expected = Literal::CreateR2<float>({
+ auto expected = LiteralUtil::CreateR2<float>({
{2, 3, 4},
{6, 7, 8},
});
@@ -1613,16 +1616,17 @@ TEST_P(HloEvaluatorTest, DynamicSliceUpdate) {
// }
auto operand_array = MakeUnique<Array2D<double>>(2, 3);
operand_array->FillUnique(1.0);
- auto operand_literal = Literal::CreateR2FromArray2D<double>(*operand_array);
+ auto operand_literal =
+ LiteralUtil::CreateR2FromArray2D<double>(*operand_array);
HloInstruction* operand = b.AddInstruction(
HloInstruction::CreateConstant(std::move(operand_literal)));
auto start_indices = b.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR1<int64>({0, 1})));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR1<int64>({0, 1})));
auto update = b.AddInstruction(HloInstruction::CreateConstant(
- Literal::CreateR2<double>({{-2.0, -3.0}, {-6.0, -7.0}})));
+ LiteralUtil::CreateR2<double>({{-2.0, -3.0}, {-6.0, -7.0}})));
Shape shape = ShapeUtil::MakeShape(F64, {2, 3});
b.AddInstruction(HloInstruction::CreateDynamicUpdateSlice(
@@ -1631,7 +1635,7 @@ TEST_P(HloEvaluatorTest, DynamicSliceUpdate) {
std::unique_ptr<Literal> result = Evaluate();
- auto expected = Literal::CreateR2<double>({
+ auto expected = LiteralUtil::CreateR2<double>({
{1, -2, -3},
{5, -6, -7},
});
@@ -1649,12 +1653,13 @@ TEST_P(HloEvaluatorTest, SetAndGetTuples) {
// }
auto operand_array = MakeUnique<Array2D<double>>(2, 3);
operand_array->FillUnique(1.0);
- auto operand_literal2 = Literal::CreateR2FromArray2D<double>(*operand_array);
+ auto operand_literal2 =
+ LiteralUtil::CreateR2FromArray2D<double>(*operand_array);
HloInstruction* operand2 = b.AddInstruction(
HloInstruction::CreateConstant(std::move(operand_literal2)));
HloInstruction* operand1 = b.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR1<int64>({0, 1})));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR1<int64>({0, 1})));
auto tuple =
b.AddInstruction(HloInstruction::CreateTuple({operand1, operand2}));
@@ -1666,7 +1671,7 @@ TEST_P(HloEvaluatorTest, SetAndGetTuples) {
std::unique_ptr<Literal> result = Evaluate();
- auto expected = Literal::CreateR2<double>({
+ auto expected = LiteralUtil::CreateR2<double>({
{1, 2, 3},
{5, 6, 7},
});
@@ -1686,9 +1691,9 @@ TEST_P(HloEvaluatorTest, SetAndGetNestedTuples) {
operand_array->FillUnique(1.0);
HloInstruction* operand2 = b.AddInstruction(HloInstruction::CreateConstant(
- Literal::CreateR2FromArray2D<double>(*operand_array)));
+ LiteralUtil::CreateR2FromArray2D<double>(*operand_array)));
HloInstruction* operand1 = b.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR1<int64>({0, 1})));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR1<int64>({0, 1})));
auto tuple1 =
b.AddInstruction(HloInstruction::CreateTuple({operand1, operand2}));
@@ -1706,8 +1711,8 @@ TEST_P(HloEvaluatorTest, SetAndGetNestedTuples) {
std::unique_ptr<Literal> result = Evaluate();
auto result_inner_literal =
- Literal::CreateR2FromArray2D<double>(*operand_array);
- auto expected = Literal::MakeTuple({
+ LiteralUtil::CreateR2FromArray2D<double>(*operand_array);
+ auto expected = LiteralUtil::MakeTuple({
result_inner_literal.get(),
result_inner_literal.get(),
});
@@ -1735,7 +1740,7 @@ TEST_P(HloEvaluatorTest, Reverse) {
{{23.0f}, {24.0f}}},
});
// clang-format on
- auto operand_literal = Literal::CreateR4FromArray4D<float>(input);
+ auto operand_literal = LiteralUtil::CreateR4FromArray4D<float>(input);
HloInstruction* operand = b.AddInstruction(
HloInstruction::CreateConstant(std::move(operand_literal)));
@@ -1746,7 +1751,7 @@ TEST_P(HloEvaluatorTest, Reverse) {
std::unique_ptr<Literal> result = Evaluate();
// clang-format off
- auto expected = Literal::CreateR4FromArray4D<float>({
+ auto expected = LiteralUtil::CreateR4FromArray4D<float>({
{{{23.0f}, {24.0f}},
{{21.0f}, {22.0f}},
{{19.0f}, {20.0f}}},
@@ -1782,11 +1787,11 @@ TEST_P(HloEvaluatorTest, EvaluateWithSubstitutions) {
// Evaluate add with param0 = {1, 2, 3, 4}, square = {10, 20, 30, 40}.
HloEvaluator evaluator;
auto result = evaluator.EvaluateWithSubstitutions(
- add, {{param0, Literal::CreateR1<float>({1, 2, 3, 4}).get()},
- {square, Literal::CreateR1<float>({10, 20, 30, 40}).get()}});
+ add, {{param0, LiteralUtil::CreateR1<float>({1, 2, 3, 4}).get()},
+ {square, LiteralUtil::CreateR1<float>({10, 20, 30, 40}).get()}});
TF_ASSERT_OK(result.status());
EXPECT_TRUE(LiteralTestUtil::Equal(
- *Literal::CreateR1<float>({11, 22, 33, 44}), *result.ValueOrDie()));
+ *LiteralUtil::CreateR1<float>({11, 22, 33, 44}), *result.ValueOrDie()));
}
// Check that EvaluateWithSubstitutions works if one of the operands to the op
@@ -1799,18 +1804,18 @@ TEST_P(HloEvaluatorTest, EvaluateWithSubstitutionsWithConstantOperand) {
b.AddInstruction(HloInstruction::CreateParameter(0, shape, "param0"));
HloInstruction* square = b.AddInstruction(HloInstruction::CreateBinary(
shape, HloOpcode::kMultiply, param0, param0));
- HloInstruction* constant = b.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR1<float>({1, 2, 3, 4})));
+ HloInstruction* constant = b.AddInstruction(HloInstruction::CreateConstant(
+ LiteralUtil::CreateR1<float>({1, 2, 3, 4})));
HloInstruction* add = b.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, constant, square));
// Evaluate add with square = {10, 20, 30, 40}.
HloEvaluator evaluator;
auto result = evaluator.EvaluateWithSubstitutions(
- add, {{square, Literal::CreateR1<float>({10, 20, 30, 40}).get()}});
+ add, {{square, LiteralUtil::CreateR1<float>({10, 20, 30, 40}).get()}});
TF_ASSERT_OK(result.status());
EXPECT_TRUE(LiteralTestUtil::Equal(
- *Literal::CreateR1<float>({11, 22, 33, 44}), *result.ValueOrDie()));
+ *LiteralUtil::CreateR1<float>({11, 22, 33, 44}), *result.ValueOrDie()));
}
TEST_P(HloEvaluatorTest, EvaluateGather_TensorFlowGatherV1) {
@@ -1830,11 +1835,12 @@ ENTRY main {
)";
ParseAndVerifyModule(hlo_text);
std::unique_ptr<Literal> operand =
- Literal::CreateR2<int32>({{1, 2, 3}, {4, 5, 6}, {7, 8, 9}});
- std::unique_ptr<Literal> gather_indices = Literal::CreateR1<int32>({0, 2});
- EXPECT_TRUE(
- LiteralTestUtil::Equal(*Literal::CreateR2<int32>({{1, 2, 3}, {7, 8, 9}}),
- *Evaluate({operand.get(), gather_indices.get()})));
+ LiteralUtil::CreateR2<int32>({{1, 2, 3}, {4, 5, 6}, {7, 8, 9}});
+ std::unique_ptr<Literal> gather_indices =
+ LiteralUtil::CreateR1<int32>({0, 2});
+ EXPECT_TRUE(LiteralTestUtil::Equal(
+ *LiteralUtil::CreateR2<int32>({{1, 2, 3}, {7, 8, 9}}),
+ *Evaluate({operand.get(), gather_indices.get()})));
}
TEST_P(HloEvaluatorTest, EvaluateGather_TensorFlowGatherV2) {
@@ -1854,10 +1860,11 @@ ENTRY main {
)";
ParseAndVerifyModule(hlo_text);
std::unique_ptr<Literal> operand =
- Literal::CreateR2<int32>({{1, 2, 3}, {4, 5, 6}, {7, 8, 9}});
- std::unique_ptr<Literal> gather_indices = Literal::CreateR1<int32>({0, 2});
+ LiteralUtil::CreateR2<int32>({{1, 2, 3}, {4, 5, 6}, {7, 8, 9}});
+ std::unique_ptr<Literal> gather_indices =
+ LiteralUtil::CreateR1<int32>({0, 2});
EXPECT_TRUE(LiteralTestUtil::Equal(
- *Literal::CreateR2<int32>({{1, 3}, {4, 6}, {7, 9}}),
+ *LiteralUtil::CreateR2<int32>({{1, 3}, {4, 6}, {7, 9}}),
*Evaluate({operand.get(), gather_indices.get()})));
}
@@ -1878,11 +1885,11 @@ ENTRY main {
)";
ParseAndVerifyModule(hlo_text);
std::unique_ptr<Literal> operand =
- Literal::CreateR2<int32>({{1, 2, 3}, {4, 5, 6}, {7, 8, 9}});
+ LiteralUtil::CreateR2<int32>({{1, 2, 3}, {4, 5, 6}, {7, 8, 9}});
std::unique_ptr<Literal> gather_indices =
- Literal::CreateR2<int32>({{0, 2}, {2, 1}});
+ LiteralUtil::CreateR2<int32>({{0, 2}, {2, 1}});
EXPECT_TRUE(LiteralTestUtil::Equal(
- *Literal::CreateR3<int32>(
+ *LiteralUtil::CreateR3<int32>(
{{{1, 3}, {4, 6}, {7, 9}}, {{3, 2}, {6, 5}, {9, 8}}}),
*Evaluate({operand.get(), gather_indices.get()})));
}
@@ -1904,13 +1911,13 @@ ENTRY main {
)";
ParseAndVerifyModule(hlo_text);
std::unique_ptr<Literal> operand =
- Literal::CreateR3<int32>({{{-1, 1}, {-2, 2}, {-3, 3}}, //
- {{-4, 4}, {-5, 5}, {-6, 6}}, //
- {{-7, 7}, {-8, 8}, {-9, 9}}});
+ LiteralUtil::CreateR3<int32>({{{-1, 1}, {-2, 2}, {-3, 3}}, //
+ {{-4, 4}, {-5, 5}, {-6, 6}}, //
+ {{-7, 7}, {-8, 8}, {-9, 9}}});
std::unique_ptr<Literal> gather_indices =
- Literal::CreateR2<int32>({{0, 0}, {1, 0}});
+ LiteralUtil::CreateR2<int32>({{0, 0}, {1, 0}});
EXPECT_TRUE(
- LiteralTestUtil::Equal(*Literal::CreateR2<int32>({{-1, 1}, {-4, 4}}),
+ LiteralTestUtil::Equal(*LiteralUtil::CreateR2<int32>({{-1, 1}, {-4, 4}}),
*Evaluate({operand.get(), gather_indices.get()})));
}
@@ -1932,13 +1939,13 @@ ENTRY main {
)";
ParseAndVerifyModule(hlo_text);
std::unique_ptr<Literal> operand =
- Literal::CreateR3<int32>({{{-1, 1}, {-2, 2}, {-3, 3}}, //
- {{-4, 4}, {-5, 5}, {-6, 6}}, //
- {{-7, 7}, {-8, 8}, {-9, 9}}});
+ LiteralUtil::CreateR3<int32>({{{-1, 1}, {-2, 2}, {-3, 3}}, //
+ {{-4, 4}, {-5, 5}, {-6, 6}}, //
+ {{-7, 7}, {-8, 8}, {-9, 9}}});
std::unique_ptr<Literal> gather_indices =
- Literal::CreateR2<int32>({{0, 0}, {1, 0}});
+ LiteralUtil::CreateR2<int32>({{0, 0}, {1, 0}});
EXPECT_TRUE(
- LiteralTestUtil::Equal(*Literal::CreateR2<int32>({{-2, 2}, {-1, 1}}),
+ LiteralTestUtil::Equal(*LiteralUtil::CreateR2<int32>({{-2, 2}, {-1, 1}}),
*Evaluate({operand.get(), gather_indices.get()})));
}
@@ -1959,10 +1966,11 @@ ENTRY main {
)";
ParseAndVerifyModule(hlo_text);
std::unique_ptr<Literal> operand =
- Literal::CreateR2<int32>({{1, 2, 3}, {4, 5, 6}, {7, 8, 9}});
- std::unique_ptr<Literal> gather_indices = Literal::CreateR1<int32>({1, 1});
+ LiteralUtil::CreateR2<int32>({{1, 2, 3}, {4, 5, 6}, {7, 8, 9}});
+ std::unique_ptr<Literal> gather_indices =
+ LiteralUtil::CreateR1<int32>({1, 1});
EXPECT_TRUE(
- LiteralTestUtil::Equal(*Literal::CreateR2<int32>({{5}}),
+ LiteralTestUtil::Equal(*LiteralUtil::CreateR2<int32>({{5}}),
*Evaluate({operand.get(), gather_indices.get()})));
}
@@ -1983,11 +1991,11 @@ ENTRY main {
)";
ParseAndVerifyModule(hlo_text);
std::unique_ptr<Literal> operand =
- Literal::CreateR2<int32>({{1, 2, 3}, {4, 5, 6}, {7, 8, 9}});
+ LiteralUtil::CreateR2<int32>({{1, 2, 3}, {4, 5, 6}, {7, 8, 9}});
std::unique_ptr<Literal> gather_indices =
- Literal::CreateR2<int32>({{2, 1}, {1, 1}});
+ LiteralUtil::CreateR2<int32>({{2, 1}, {1, 1}});
EXPECT_TRUE(
- LiteralTestUtil::Equal(*Literal::CreateR3<int32>({{{8}}, {{5}}}),
+ LiteralTestUtil::Equal(*LiteralUtil::CreateR3<int32>({{{8}}, {{5}}}),
*Evaluate({operand.get(), gather_indices.get()})));
}
@@ -2007,10 +2015,11 @@ ENTRY main {
}
)";
ParseAndVerifyModule(hlo_text);
- std::unique_ptr<Literal> operand = Literal::CreateR2<int32>({{}, {}, {}});
- std::unique_ptr<Literal> gather_indices = Literal::CreateR1<int32>({0, 2});
+ std::unique_ptr<Literal> operand = LiteralUtil::CreateR2<int32>({{}, {}, {}});
+ std::unique_ptr<Literal> gather_indices =
+ LiteralUtil::CreateR1<int32>({0, 2});
EXPECT_TRUE(
- LiteralTestUtil::Equal(*Literal::CreateR2<int32>({{}, {}}),
+ LiteralTestUtil::Equal(*LiteralUtil::CreateR2<int32>({{}, {}}),
*Evaluate({operand.get(), gather_indices.get()})));
}
@@ -2031,11 +2040,11 @@ ENTRY main {
)";
ParseAndVerifyModule(hlo_text);
- std::unique_ptr<Literal> operand = Literal::CreateR1<int32>({0, 1, 2});
+ std::unique_ptr<Literal> operand = LiteralUtil::CreateR1<int32>({0, 1, 2});
std::unique_ptr<Literal> gather_indices =
- Literal::CreateR3<int32>({{{0}, {1}}, {{2}, {1}}});
+ LiteralUtil::CreateR3<int32>({{{0}, {1}}, {{2}, {1}}});
EXPECT_TRUE(
- LiteralTestUtil::Equal(*Literal::CreateR2<int32>({{0, 1}, {2, 1}}),
+ LiteralTestUtil::Equal(*LiteralUtil::CreateR2<int32>({{0, 1}, {2, 1}}),
*Evaluate({operand.get(), gather_indices.get()})));
}
@@ -2043,14 +2052,14 @@ ENTRY main {
// element-wise comparison with 2 bfloat16 operands.
TEST_P(HloEvaluatorTest, DoesCompareBF16) {
// lhs >= rhs
- auto lhs = Literal::CreateR2<bfloat16>(
+ auto lhs = LiteralUtil::CreateR2<bfloat16>(
{{bfloat16(0.25), bfloat16(0.35), bfloat16(0.125)},
{bfloat16(-0.25), bfloat16(-0.35), bfloat16(-0.125)}});
- auto rhs = Literal::CreateR2<bfloat16>(
+ auto rhs = LiteralUtil::CreateR2<bfloat16>(
{{bfloat16(0.5), bfloat16(0.125), bfloat16(0.125)},
{bfloat16(0.25), bfloat16(-0.375), bfloat16(-0.127)}});
auto expected =
- Literal::CreateR2<bool>({{false, true, true}, {false, true, true}});
+ LiteralUtil::CreateR2<bool>({{false, true, true}, {false, true, true}});
TestBinaryOp(HloOpcode::kGe, std::move(expected), std::move(lhs),
std::move(rhs));
}
diff --git a/tensorflow/compiler/xla/service/hlo_evaluator_typed_visitor.h b/tensorflow/compiler/xla/service/hlo_evaluator_typed_visitor.h
index 8b08756c64..e1924a0f8e 100644
--- a/tensorflow/compiler/xla/service/hlo_evaluator_typed_visitor.h
+++ b/tensorflow/compiler/xla/service/hlo_evaluator_typed_visitor.h
@@ -16,6 +16,7 @@ limitations under the License.
#ifndef TENSORFLOW_COMPILER_XLA_SERVICE_HLO_EVALUATOR_TYPED_VISITOR_H_
#define TENSORFLOW_COMPILER_XLA_SERVICE_HLO_EVALUATOR_TYPED_VISITOR_H_
+#include "tensorflow/compiler/xla/literal_util.h"
#include "tensorflow/compiler/xla/service/hlo_evaluator.h"
#include "tensorflow/compiler/xla/service/shape_inference.h"
#include "tensorflow/core/lib/core/casts.h"
@@ -34,6 +35,37 @@ using is_complex_t = std::is_same<T, complex64>;
template <typename T>
using is_complex64_t = std::is_same<T, complex64>;
+// It's UB to use std::sort with std::less<float>, because of NaNs. Define
+// "safe" less functions which are actually strict weak orders.
+template <
+ typename NativeT,
+ typename std::enable_if<std::is_integral<NativeT>::value>::type* = nullptr>
+bool SafeLess(const NativeT& a, const NativeT& b) {
+ return a < b;
+}
+
+template <typename NativeT,
+ typename std::enable_if<
+ std::is_floating_point<NativeT>::value ||
+ std::is_same<NativeT, bfloat16>::value>::type* = nullptr>
+bool SafeLess(const NativeT& a, const NativeT& b) {
+ if (std::isnan(b)) {
+ return !std::isnan(a);
+ } else {
+ return a < b;
+ }
+}
+
+template <typename NativeT, typename std::enable_if<std::is_same<
+ NativeT, Eigen::half>::value>::type* = nullptr>
+bool SafeLess(const NativeT& a, const NativeT& b) {
+ if (Eigen::half_impl::isnan(b)) {
+ return !Eigen::half_impl::isnan(a);
+ } else {
+ return a < b;
+ }
+}
+
// Templated DfsHloVisitor for use by HloEvaluator.
//
// Typically ReturnT here indicates the resulting literal type of each evaluated
@@ -269,6 +301,14 @@ class HloEvaluatorTypedVisitor : public DfsHloVisitorWithDefault {
return HandleFloor<ReturnT>(floor);
}
+ Status HandleImag(HloInstruction* imag) override {
+ TF_ASSIGN_OR_RETURN(parent_->evaluated_[imag],
+ ElementWiseUnaryOp(imag, [](ElementwiseT elem_operand) {
+ return std::imag(elem_operand);
+ }));
+ return Status::OK();
+ }
+
Status HandleLog(HloInstruction* log) override {
TF_ASSIGN_OR_RETURN(parent_->evaluated_[log],
ElementWiseUnaryOp(log, [](ElementwiseT elem_operand) {
@@ -572,6 +612,14 @@ class HloEvaluatorTypedVisitor : public DfsHloVisitorWithDefault {
return Status::OK();
}
+ Status HandleReal(HloInstruction* real) override {
+ TF_ASSIGN_OR_RETURN(parent_->evaluated_[real],
+ ElementWiseUnaryOp(real, [](ElementwiseT elem_operand) {
+ return std::real(elem_operand);
+ }));
+ return Status::OK();
+ }
+
template <
typename NativeT,
typename std::enable_if<!is_complex_t<NativeT>::value>::type* = nullptr>
@@ -1025,83 +1073,47 @@ class HloEvaluatorTypedVisitor : public DfsHloVisitorWithDefault {
CHECK_EQ(dnums.lhs_batch_dimensions_size(),
dnums.rhs_batch_dimensions_size());
- std::vector<int64> lhs_non_contracting_dims;
+ DimensionVector lhs_index(lhs_rank);
+ DimensionVector rhs_index(rhs_rank);
+
+ // result_index_locations[i] contains one or two pointers to the locations
+ // in lhs_index or rhs_index where the i'th result index should go.
+ tensorflow::gtl::InlinedVector<std::pair<int64*, int64*>, kInlineRank>
+ result_index_locations;
+ result_index_locations.reserve(lhs_rank + rhs_rank - 2);
+
+ // The first components in the output shape are the LHS and RHS batch
+ // dimensions:
+ for (int64 i = 0; i < dnums.lhs_batch_dimensions_size(); i++) {
+ result_index_locations.push_back(
+ {&lhs_index[dnums.lhs_batch_dimensions(i)],
+ &rhs_index[dnums.rhs_batch_dimensions(i)]});
+ }
+
+ // Then we have the LHS and RHS non-contracting dimensions, if any:
for (int64 i = 0; i < lhs_rank; i++) {
- if (i != lhs_contracting_dimension) {
- lhs_non_contracting_dims.push_back(i);
+ if (i != lhs_contracting_dimension &&
+ !ArrayContains(AsInt64Slice(dnums.lhs_batch_dimensions()), i)) {
+ result_index_locations.push_back({&lhs_index[i], nullptr});
}
}
-
- std::vector<int64> rhs_non_batch_non_contracting_dims;
- tensorflow::gtl::FlatSet<int64> batch_dims_set(
- dnums.rhs_batch_dimensions().begin(),
- dnums.rhs_batch_dimensions().end());
for (int64 i = 0; i < rhs_rank; i++) {
- if (i != rhs_contracting_dimension && batch_dims_set.count(i) == 0) {
- rhs_non_batch_non_contracting_dims.push_back(i);
+ if (i != rhs_contracting_dimension &&
+ !ArrayContains(AsInt64Slice(dnums.rhs_batch_dimensions()), i)) {
+ result_index_locations.push_back({&rhs_index[i], nullptr});
}
}
- const int64 batch_dim_size = dnums.lhs_batch_dimensions_size();
- const int64 lhs_non_contracting_size = lhs_non_contracting_dims.size();
-
- DimensionVector lhs_index(lhs_rank);
- DimensionVector rhs_index(rhs_rank);
auto result = MakeUnique<Literal>(dot->shape());
TF_RETURN_IF_ERROR(result->Populate<ReturnT>(
[&](tensorflow::gtl::ArraySlice<int64> result_index) {
ElementwiseT result_val = static_cast<ElementwiseT>(0);
- // Find the corresponding non-contracting indices for lhs and rhs.
- //
- // For `result_index`, its batch dimension, if exists, will be at the
- // same dimension as the batch dimension of lhs and rhs. More
- // specifically:
- // - For lhs, the non-contracting dimensions, including the batch
- // dimension have the same index as the `result_index`.
- // - For rhs, the batch dimension is set seperately from other
- // non-contracting dimensions, since these other non-contracting
- // dimensions in rhs follow the non-contracting dimensions of lhs in
- // the resulting index.
- //
- // As an example, for a resulting index:
- // result_index [result_batch, result_x, result_y]
- // the effecting lhs and rhs indices are:
- // lhs [result_batch, lhs_non_contracting_dim, contracting_dim
- // rhs [result_batch, contracting_dim, rhs_non_contracting_dim]
- // `result_x` is only affected by the lhs_non_contracting_dim and
- // likewise `result_y` only depends on rhs_non_contracting_dim.
- //
- // so we can look up the lhs and rhs indices by:
- //
- // lhs:
- // batch index is the same as `result_batch`.
- // non-contracting dimension is the same as
- // result_index[lhs_non_contracting_dim]
- // rhs:
- // batch index: the same as `result_batch`.
- // non-contracting dimension index: *not* the same as
- // result_index[rhs_non_contractng_dim], since the
- // non-contracting dimensions of lhs are included in the
- // result_index first. Instead, the non_contracting_dim of rhs must
- // be calculated as following:
- // lhs_non_contracting_dimensions_size +
- // (rhs_non_batch_non_contracting_dim - batch_dim_size) - 1
- //
- // Note that (rhs_non_batch_contracting_dim - batch_dim_size) is
- // the index offset to the result_index that only depends on
- // the non_batch and non-contracting dimensions of rhs. -1 at the
- // end translates size to index.
- for (auto i : lhs_non_contracting_dims) {
- lhs_index[i] = result_index[i];
- }
- for (auto i : dnums.rhs_batch_dimensions()) {
- rhs_index[i] = result_index[i];
- }
- for (auto i : rhs_non_batch_non_contracting_dims) {
- const int64 rhs_non_batch_non_contracting_dim =
- lhs_non_contracting_size + (i - batch_dim_size) - 1;
- rhs_index[i] = result_index[rhs_non_batch_non_contracting_dim];
+ for (int64 i = 0; i < result_index.size(); i++) {
+ *result_index_locations[i].first = result_index[i];
+ if (result_index_locations[i].second) {
+ *result_index_locations[i].second = result_index[i];
+ }
}
// Accumulates resulting product along the contracted dimension.
@@ -1321,7 +1333,7 @@ class HloEvaluatorTypedVisitor : public DfsHloVisitorWithDefault {
parent_->GetEvaluatedLiteralFor(operand);
auto curr_val = arg_literal.Get<NativeT>(multi_index);
- auto curr_val_literal = Literal::CreateR0<NativeT>(curr_val);
+ auto curr_val_literal = LiteralUtil::CreateR0<NativeT>(curr_val);
arg_literals.push_back(std::move(curr_val_literal));
}
@@ -1402,22 +1414,24 @@ class HloEvaluatorTypedVisitor : public DfsHloVisitorWithDefault {
!is_complex_t<NativeT>::value &&
!std::is_same<NativeT, bool>::value>::type* = nullptr>
Status HandleSort(HloInstruction* sort) {
- TF_RET_CHECK(ShapeUtil::Rank(sort->shape()) == 1)
+ auto keys = sort->operand(0);
+ TF_RET_CHECK(ShapeUtil::Rank(keys->shape()) == 1)
<< "Sort is only supported for R1 shapes";
+ TF_RET_CHECK(sort->operand_count() == 1)
+ << "Typed visitor does not support key-value sort";
- auto arg = sort->operand(0);
- const Literal& arg_literal = parent_->GetEvaluatedLiteralFor(arg);
- VLOG(3) << "HandleSort arg_literal: " << arg_literal.ToString();
- const auto& arg_data = arg_literal.data<ReturnT>();
+ const Literal& keys_literal = parent_->GetEvaluatedLiteralFor(keys);
+ VLOG(3) << "HandleSort keys_literal: " << keys_literal.ToString();
+ const auto& keys_data = keys_literal.data<ReturnT>();
- std::vector<ReturnT> return_data(arg_data.begin(), arg_data.end());
- std::sort(return_data.begin(), return_data.end(),
+ std::vector<ReturnT> result_data(keys_data.begin(), keys_data.end());
+ std::sort(result_data.begin(), result_data.end(),
[](const ReturnT& a, const ReturnT& b) {
return SafeLess<ReturnT>(a, b);
});
auto result_literal = MakeUnique<Literal>(sort->shape());
result_literal->PopulateR1(
- tensorflow::gtl::ArraySlice<ReturnT>(return_data));
+ tensorflow::gtl::ArraySlice<ReturnT>(result_data));
VLOG(3) << "HandleSort result_literal: " << result_literal->ToString();
parent_->evaluated_[sort] = std::move(result_literal);
return Status::OK();
@@ -1507,8 +1521,9 @@ class HloEvaluatorTypedVisitor : public DfsHloVisitorWithDefault {
auto curr_val = arg_literal.Get<ReturnT>(input_index);
// Evaluate computation with specified literal operands.
- auto curr_val_literal = Literal::CreateR0<ReturnT>(curr_val);
- auto result_val_literal = Literal::CreateR0<ReturnT>(result_val);
+ auto curr_val_literal = LiteralUtil::CreateR0<ReturnT>(curr_val);
+ auto result_val_literal =
+ LiteralUtil::CreateR0<ReturnT>(result_val);
std::unique_ptr<Literal> computed_result =
embedded_evaluator
@@ -1586,10 +1601,10 @@ class HloEvaluatorTypedVisitor : public DfsHloVisitorWithDefault {
// Used in the dual IterateThroughWindow lambdas below. Hoisted to avoid
// dynamic memory allocations.
- auto curr_val_literal = Literal::CreateR0<ReturnT>(ReturnT());
- auto selected_val_literal = Literal::CreateR0<ReturnT>(ReturnT());
- auto source_literal_scatter = Literal::CreateR0<ReturnT>(ReturnT());
- auto scattered_literal = Literal::CreateR0<ReturnT>(ReturnT());
+ auto curr_val_literal = LiteralUtil::CreateR0<ReturnT>(ReturnT());
+ auto selected_val_literal = LiteralUtil::CreateR0<ReturnT>(ReturnT());
+ auto source_literal_scatter = LiteralUtil::CreateR0<ReturnT>(ReturnT());
+ auto scattered_literal = LiteralUtil::CreateR0<ReturnT>(ReturnT());
do {
// For each element in `source`, we place a window in `operand`. For each
// window placement, we iterate inside the window twice:
@@ -1710,9 +1725,9 @@ class HloEvaluatorTypedVisitor : public DfsHloVisitorWithDefault {
// Evaluate computation with specified literal operands.
const auto curr_val_literal =
- Literal::CreateR0<ReturnT>(curr_val);
+ LiteralUtil::CreateR0<ReturnT>(curr_val);
const auto result_val_literal =
- Literal::CreateR0<ReturnT>(result_val);
+ LiteralUtil::CreateR0<ReturnT>(result_val);
std::unique_ptr<Literal> computed_result =
embedded_evaluator
.Evaluate<const Literal*>(
@@ -1757,7 +1772,7 @@ class HloEvaluatorTypedVisitor : public DfsHloVisitorWithDefault {
return operand_literal.Get<ReturnT>(operand_index);
};
- auto result = Literal::CreateFromDimensions(
+ auto result = LiteralUtil::CreateFromDimensions(
shape.element_type(), AsInt64Slice(shape.dimensions()));
TF_RETURN_IF_ERROR(result->Populate<ReturnT>(func));
parent_->evaluated_[slice] = std::move(result);
@@ -2175,38 +2190,6 @@ class HloEvaluatorTypedVisitor : public DfsHloVisitorWithDefault {
return rhs_unsigned >= lhs_size_unsigned;
}
- // It's UB to use std::sort with std::less<float>, because of NaNs. Define
- // "safe" less functions which are actually strict weak orders.
- template <typename NativeT,
- typename std::enable_if<std::is_integral<NativeT>::value>::type* =
- nullptr>
- static bool SafeLess(const NativeT& a, const NativeT& b) {
- return a < b;
- }
-
- template <typename NativeT,
- typename std::enable_if<
- std::is_floating_point<NativeT>::value ||
- std::is_same<NativeT, bfloat16>::value>::type* = nullptr>
- static bool SafeLess(const NativeT& a, const NativeT& b) {
- if (std::isnan(b)) {
- return !std::isnan(a);
- } else {
- return a < b;
- }
- }
-
- template <typename NativeT,
- typename std::enable_if<
- std::is_same<NativeT, Eigen::half>::value>::type* = nullptr>
- static bool SafeLess(const NativeT& a, const NativeT& b) {
- if (Eigen::half_impl::isnan(b)) {
- return !Eigen::half_impl::isnan(a);
- } else {
- return a < b;
- }
- }
-
HloEvaluator* parent_;
};
diff --git a/tensorflow/compiler/xla/service/hlo_graph_dumper.cc b/tensorflow/compiler/xla/service/hlo_graph_dumper.cc
index b349f7d46f..57cf34d7de 100644
--- a/tensorflow/compiler/xla/service/hlo_graph_dumper.cc
+++ b/tensorflow/compiler/xla/service/hlo_graph_dumper.cc
@@ -27,7 +27,7 @@ limitations under the License.
#include <vector>
#include "tensorflow/compiler/xla/layout_util.h"
-#include "tensorflow/compiler/xla/literal_util.h"
+#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/compiler/xla/service/hlo_casting_utils.h"
#include "tensorflow/compiler/xla/service/hlo_instructions.h"
#include "tensorflow/compiler/xla/service/hlo_module.h"
@@ -966,6 +966,7 @@ ColorScheme HloDotDumper::GetInstructionColor(const HloInstruction* instr) {
case HloOpcode::kRemainder:
case HloOpcode::kRng:
case HloOpcode::kRoundNearestAfz:
+ case HloOpcode::kSelect:
case HloOpcode::kShiftLeft:
case HloOpcode::kShiftRightArithmetic:
case HloOpcode::kShiftRightLogical:
@@ -984,7 +985,7 @@ ColorScheme HloDotDumper::GetInstructionColor(const HloInstruction* instr) {
case HloOpcode::kBitcast:
case HloOpcode::kGetTupleElement:
case HloOpcode::kTrace:
- case HloOpcode::kGenerateToken:
+ case HloOpcode::kAfterAll:
case HloOpcode::kTuple:
return kWhite;
case HloOpcode::kBroadcast:
@@ -1001,7 +1002,7 @@ ColorScheme HloDotDumper::GetInstructionColor(const HloInstruction* instr) {
case HloOpcode::kPad:
case HloOpcode::kReshape:
case HloOpcode::kReverse:
- case HloOpcode::kSelect:
+ case HloOpcode::kTupleSelect:
case HloOpcode::kTranspose:
// De-emphasize scalar-shaped data movement ops and all data movement ops
// inside fusion nodes, both of which are essentially free.
diff --git a/tensorflow/compiler/xla/service/hlo_graph_dumper_test.cc b/tensorflow/compiler/xla/service/hlo_graph_dumper_test.cc
index 68f41a1cbb..1d7a062c55 100644
--- a/tensorflow/compiler/xla/service/hlo_graph_dumper_test.cc
+++ b/tensorflow/compiler/xla/service/hlo_graph_dumper_test.cc
@@ -15,6 +15,7 @@ limitations under the License.
#include "tensorflow/compiler/xla/service/hlo_graph_dumper.h"
+#include "tensorflow/compiler/xla/literal_util.h"
#include "tensorflow/compiler/xla/service/hlo_computation.h"
#include "tensorflow/compiler/xla/service/hlo_instruction.h"
#include "tensorflow/compiler/xla/service/hlo_module.h"
@@ -120,7 +121,7 @@ TEST(HloGraphDumperTest, NestedFusion) {
TEST(HloGraphDumperTest, Constant) {
HloComputation::Builder b("b");
auto instruction = b.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(-42)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(-42)));
instruction->SetAndSanitizeName("i_am_a_constant_root_instruction");
HloModuleConfig config;
HloModule m(TestName(), config);
diff --git a/tensorflow/compiler/xla/service/hlo_instruction.cc b/tensorflow/compiler/xla/service/hlo_instruction.cc
index a07dbe6256..02139facdb 100644
--- a/tensorflow/compiler/xla/service/hlo_instruction.cc
+++ b/tensorflow/compiler/xla/service/hlo_instruction.cc
@@ -22,7 +22,7 @@ limitations under the License.
#include <utility>
#include "tensorflow/compiler/xla/layout_util.h"
-#include "tensorflow/compiler/xla/literal_util.h"
+#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/compiler/xla/protobuf_util.h"
#include "tensorflow/compiler/xla/ptr_util.h"
#include "tensorflow/compiler/xla/service/dfs_hlo_visitor.h"
@@ -112,10 +112,10 @@ StatusOr<std::unique_ptr<HloInstruction>> HloInstruction::CreateFromProto(
break;
}
case HloOpcode::kSend:
- TF_RET_CHECK(proto.operand_ids_size() == 1)
- << "Send instruction should have 1 operand but sees "
+ TF_RET_CHECK(proto.operand_ids_size() == 2)
+ << "Send instruction should have 2 operand but sees "
<< proto.operand_ids_size();
- instruction = CreateSend(operands(0), proto.channel_id());
+ instruction = CreateSend(operands(0), operands(1), proto.channel_id());
break;
case HloOpcode::kSendDone:
TF_RET_CHECK(proto.operand_ids_size() == 1)
@@ -124,11 +124,11 @@ StatusOr<std::unique_ptr<HloInstruction>> HloInstruction::CreateFromProto(
instruction = CreateSendDone(operands(0));
break;
case HloOpcode::kRecv:
- TF_RET_CHECK(proto.operand_ids_size() == 0)
- << "Recv instruction should have 0 operand but sees "
+ TF_RET_CHECK(proto.operand_ids_size() == 1)
+ << "Recv instruction should have 1 operand but sees "
<< proto.operand_ids_size();
- instruction =
- CreateRecv(proto.shape().tuple_shapes(0), proto.channel_id());
+ instruction = CreateRecv(proto.shape().tuple_shapes(0), operands(0),
+ proto.channel_id());
break;
case HloOpcode::kRecvDone:
TF_RET_CHECK(proto.operand_ids_size() == 1)
@@ -163,6 +163,20 @@ StatusOr<std::unique_ptr<HloInstruction>> HloInstruction::CreateFromProto(
proto.dimensions().end()),
computations(0));
break;
+ case HloOpcode::kSort: {
+ TF_RET_CHECK(proto.operand_ids_size() == 1 ||
+ proto.operand_ids_size() == 2)
+ << "Sort instruction should have 1 or 2 operands but has "
+ << proto.operand_ids_size();
+ TF_RET_CHECK(proto.dimensions().size() == 1)
+ << "Sort instruction should have 1 dimension";
+ HloInstruction* keys = operands(0);
+ HloInstruction* values =
+ proto.operand_ids_size() == 2 ? operands(1) : nullptr;
+ instruction =
+ CreateSort(proto.shape(), proto.dimensions(0), keys, values);
+ break;
+ }
case HloOpcode::kTranspose:
TF_RET_CHECK(proto.operand_ids_size() == 1)
<< "Transpose instruction should have 1 operand but sees "
@@ -263,12 +277,30 @@ StatusOr<std::unique_ptr<HloInstruction>> HloInstruction::CreateFromProto(
CreateReducePrecision(proto.shape(), operands(0),
proto.exponent_bits(), proto.mantissa_bits());
break;
- case HloOpcode::kInfeed:
- instruction = CreateInfeed(proto.shape(), proto.infeed_config());
- break;
+ case HloOpcode::kInfeed: {
+ const Shape& data_shape =
+ ShapeUtil::GetTupleElementShape(proto.shape(), 0);
+ if (proto.operand_ids_size() == 0) {
+ // TODO(b/80000000): Remove this when all uses of infeed are
+ // converted to take tokens.
+ instruction = CreateInfeed(data_shape, proto.infeed_config());
+ } else {
+ CHECK_EQ(proto.operand_ids_size(), 1);
+ instruction =
+ CreateInfeed(data_shape, operands(0), proto.infeed_config());
+ }
+ } break;
case HloOpcode::kOutfeed:
- instruction = CreateOutfeed(proto.outfeed_shape(), operands(0),
- proto.outfeed_config());
+ if (proto.operand_ids_size() == 1) {
+ // TODO(b/80000000): Remove this when all uses of outfeed are
+ // converted to take tokens.
+ instruction = CreateOutfeed(proto.outfeed_shape(), operands(0),
+ proto.outfeed_config());
+ } else {
+ CHECK_EQ(proto.operand_ids_size(), 2);
+ instruction = CreateOutfeed(proto.outfeed_shape(), operands(0),
+ operands(1), proto.outfeed_config());
+ }
break;
case HloOpcode::kCrossReplicaSum: {
TF_RET_CHECK(proto.called_computation_ids_size() == 1)
@@ -354,6 +386,23 @@ StatusOr<std::unique_ptr<HloInstruction>> HloInstruction::CreateFromProto(
slice_sizes);
break;
}
+ case HloOpcode::kGather: {
+ TF_RET_CHECK(proto.operand_ids_size() == 2)
+ << "Gather instruction should have 2 operands but sees "
+ << proto.operand_ids_size();
+ TF_RET_CHECK(proto.has_gather_dimension_numbers())
+ << "Gather instruction should have GatherDimensionNumbers set.";
+ std::unique_ptr<GatherDimensionNumbers> gather_dimension_numbers =
+ MakeUnique<GatherDimensionNumbers>(proto.gather_dimension_numbers());
+ std::vector<int64> gather_window_bounds;
+ for (int64 bound : proto.gather_window_bounds()) {
+ gather_window_bounds.push_back(bound);
+ }
+ instruction =
+ CreateGather(proto.shape(), operands(0), operands(1),
+ *gather_dimension_numbers, gather_window_bounds);
+ break;
+ }
default: {
instruction = WrapUnique(new HloInstruction(opcode, proto.shape()));
for (const int64 operand_id : proto.operand_ids()) {
@@ -395,13 +444,6 @@ StatusOr<std::unique_ptr<HloInstruction>> HloInstruction::CreateFromProto(
instruction->set_sharding(sharding);
}
- if (proto.has_gather_dimension_numbers()) {
- instruction->gather_dimension_numbers_ =
- MakeUnique<GatherDimensionNumbers>(proto.gather_dimension_numbers());
- }
- for (int64 bound : proto.gather_window_bounds()) {
- instruction->gather_window_bounds_.push_back(bound);
- }
return std::move(instruction);
}
@@ -471,7 +513,6 @@ HloInstruction::CreateGetTupleElement(const Shape& shape,
case HloOpcode::kReal:
case HloOpcode::kSign:
case HloOpcode::kSin:
- case HloOpcode::kSort:
case HloOpcode::kTanh:
break;
default:
@@ -524,8 +565,9 @@ HloInstruction::CreateGetTupleElement(const Shape& shape,
// Only certain opcodes are supported with CreateTernary: opcodes of ternary
// instructions with no auxiliary fields.
switch (opcode) {
- case (HloOpcode::kClamp):
- case (HloOpcode::kSelect):
+ case HloOpcode::kClamp:
+ case HloOpcode::kSelect:
+ case HloOpcode::kTupleSelect:
break;
default:
LOG(FATAL) << "Invalid ternary instruction opcode "
@@ -543,10 +585,8 @@ HloInstruction::CreateGetTupleElement(const Shape& shape,
/* static */ std::unique_ptr<HloInstruction> HloInstruction::CreateMap(
const Shape& shape, tensorflow::gtl::ArraySlice<HloInstruction*> operands,
- HloComputation* map_computation,
- tensorflow::gtl::ArraySlice<HloInstruction*> static_operands) {
- return MakeUnique<HloMapInstruction>(shape, operands, map_computation,
- static_operands);
+ HloComputation* map_computation) {
+ return MakeUnique<HloMapInstruction>(shape, operands, map_computation);
}
/* static */ std::unique_ptr<HloInstruction> HloInstruction::CreateConvolve(
@@ -610,19 +650,33 @@ HloInstruction::CreateCrossReplicaSum(
}
/* static */ std::unique_ptr<HloInstruction> HloInstruction::CreateInfeed(
- const Shape& shape, const string& config) {
- return MakeUnique<HloInfeedInstruction>(shape, config);
+ const Shape& infeed_shape, HloInstruction* token_operand,
+ const string& config) {
+ return MakeUnique<HloInfeedInstruction>(infeed_shape, token_operand, config);
+}
+
+/* static */ std::unique_ptr<HloInstruction> HloInstruction::CreateInfeed(
+ const Shape& infeed_shape, const string& config) {
+ return MakeUnique<HloInfeedInstruction>(infeed_shape, config);
}
/* static */ std::unique_ptr<HloInstruction> HloInstruction::CreateOutfeed(
- const Shape& shape, HloInstruction* operand,
+ const Shape& outfeed_shape, HloInstruction* operand,
+ HloInstruction* token_operand, tensorflow::StringPiece outfeed_config) {
+ return MakeUnique<HloOutfeedInstruction>(outfeed_shape, operand,
+ token_operand, outfeed_config);
+}
+
+/* static */ std::unique_ptr<HloInstruction> HloInstruction::CreateOutfeed(
+ const Shape& outfeed_shape, HloInstruction* operand,
tensorflow::StringPiece outfeed_config) {
- return MakeUnique<HloOutfeedInstruction>(shape, operand, outfeed_config);
+ return MakeUnique<HloOutfeedInstruction>(outfeed_shape, operand,
+ outfeed_config);
}
/* static */ std::unique_ptr<HloInstruction> HloInstruction::CreateSend(
- HloInstruction* operand, int64 channel_id) {
- return MakeUnique<HloSendInstruction>(operand, channel_id);
+ HloInstruction* operand, HloInstruction* token, int64 channel_id) {
+ return MakeUnique<HloSendInstruction>(operand, token, channel_id);
}
/* static */ std::unique_ptr<HloInstruction> HloInstruction::CreateSendDone(
@@ -634,8 +688,8 @@ HloInstruction::CreateCrossReplicaSum(
}
/* static */ std::unique_ptr<HloInstruction> HloInstruction::CreateRecv(
- const Shape& shape, int64 channel_id) {
- return MakeUnique<HloRecvInstruction>(shape, channel_id);
+ const Shape& shape, HloInstruction* token, int64 channel_id) {
+ return MakeUnique<HloRecvInstruction>(shape, token, channel_id);
}
/* static */ std::unique_ptr<HloInstruction> HloInstruction::CreateRecvDone(
@@ -652,17 +706,22 @@ HloInstruction::CreateCrossReplicaSum(
return MakeUnique<HloReverseInstruction>(shape, operand, dimensions);
}
-/* static */ std::unique_ptr<HloInstruction>
-HloInstruction::CreateGenerateToken(
+/* static */ std::unique_ptr<HloInstruction> HloInstruction::CreateAfterAll(
tensorflow::gtl::ArraySlice<HloInstruction*> operands) {
- auto instruction = WrapUnique(new HloInstruction(
- HloOpcode::kGenerateToken, ShapeUtil::MakeTokenShape()));
+ CHECK(!operands.empty());
+ auto instruction = WrapUnique(
+ new HloInstruction(HloOpcode::kAfterAll, ShapeUtil::MakeTokenShape()));
for (auto operand : operands) {
instruction->AppendOperand(operand);
}
return instruction;
}
+/* static */ std::unique_ptr<HloInstruction> HloInstruction::CreateToken() {
+ return WrapUnique(
+ new HloInstruction(HloOpcode::kAfterAll, ShapeUtil::MakeTokenShape()));
+}
+
/* static */ std::unique_ptr<HloInstruction> HloInstruction::CreateWhile(
const Shape& shape, HloComputation* condition, HloComputation* body,
HloInstruction* init) {
@@ -879,6 +938,12 @@ HloInstruction::CreateBroadcastSequence(
return MakeUnique<HloTransposeInstruction>(shape, operand, dimensions);
}
+/* static */ std::unique_ptr<HloInstruction> HloInstruction::CreateSort(
+ const Shape& shape, int64 dimension, HloInstruction* keys,
+ HloInstruction* values) {
+ return MakeUnique<HloSortInstruction>(shape, dimension, keys, values);
+}
+
/* static */ std::unique_ptr<HloInstruction> HloInstruction::CreateFusion(
const Shape& shape, FusionKind fusion_kind, HloInstruction* fused_root) {
return MakeUnique<HloFusionInstruction>(shape, fusion_kind, fused_root);
@@ -923,6 +988,8 @@ bool HloInstruction::HasSideEffectNoRecurse() const {
case HloOpcode::kTrace:
case HloOpcode::kHostCompute:
return true;
+ case HloOpcode::kCrossReplicaSum:
+ return all_reduce_id().has_value();
default:
return false;
}
@@ -981,34 +1048,8 @@ bool HloInstruction::HasSideEffect() const {
const Shape& shape, HloInstruction* operand, HloInstruction* gather_indices,
const GatherDimensionNumbers& gather_dim_numbers,
tensorflow::gtl::ArraySlice<int64> window_bounds) {
- std::unique_ptr<HloInstruction> instruction =
- WrapUnique(new HloInstruction(HloOpcode::kGather, shape));
- instruction->AppendOperand(operand);
- instruction->AppendOperand(gather_indices);
- instruction->gather_dimension_numbers_ =
- MakeUnique<GatherDimensionNumbers>(gather_dim_numbers);
- c_copy(window_bounds, std::back_inserter(instruction->gather_window_bounds_));
- return instruction;
-}
-
-/* static */ GatherDimensionNumbers HloInstruction::MakeGatherDimNumbers(
- tensorflow::gtl::ArraySlice<int64> output_window_dims,
- tensorflow::gtl::ArraySlice<int64> elided_window_dims,
- tensorflow::gtl::ArraySlice<int64> gather_dims_to_operand_dims,
- int64 index_vector_dim) {
- GatherDimensionNumbers gather_dim_numbers;
- for (int64 output_window_dim : output_window_dims) {
- gather_dim_numbers.add_output_window_dims(output_window_dim);
- }
- for (int64 elided_window_dim : elided_window_dims) {
- gather_dim_numbers.add_elided_window_dims(elided_window_dim);
- }
- for (int64 gather_dim_to_input_dim : gather_dims_to_operand_dims) {
- gather_dim_numbers.add_gather_dims_to_operand_dims(gather_dim_to_input_dim);
- }
-
- gather_dim_numbers.set_index_vector_dim(index_vector_dim);
- return gather_dim_numbers;
+ return MakeUnique<HloGatherInstruction>(shape, operand, gather_indices,
+ gather_dim_numbers, window_bounds);
}
/* static */ std::unique_ptr<HloInstruction> HloInstruction::CreateDomain(
@@ -1071,6 +1112,8 @@ std::unique_ptr<HloInstruction> HloInstruction::CloneWithNewOperands(
case HloOpcode::kHostCompute:
case HloOpcode::kPad:
case HloOpcode::kDynamicSlice:
+ case HloOpcode::kSort:
+ case HloOpcode::kGather:
clone = CloneWithNewOperandsImpl(shape, new_operands, context);
break;
// Unary ops.
@@ -1093,7 +1136,6 @@ std::unique_ptr<HloInstruction> HloInstruction::CloneWithNewOperands(
case HloOpcode::kReal:
case HloOpcode::kSign:
case HloOpcode::kSin:
- case HloOpcode::kSort:
case HloOpcode::kTanh:
CHECK_EQ(new_operands.size(), 1);
clone = CreateUnary(shape, opcode_, new_operands[0]);
@@ -1127,6 +1169,7 @@ std::unique_ptr<HloInstruction> HloInstruction::CloneWithNewOperands(
// Ternary ops.
case HloOpcode::kClamp:
case HloOpcode::kSelect:
+ case HloOpcode::kTupleSelect:
CHECK_EQ(new_operands.size(), 3);
clone = CreateTernary(shape, opcode_, new_operands[0], new_operands[1],
new_operands[2]);
@@ -1172,19 +1215,18 @@ std::unique_ptr<HloInstruction> HloInstruction::CloneWithNewOperands(
true_computation(), new_operands[2],
false_computation());
break;
- case HloOpcode::kGather:
- CHECK_EQ(new_operands.size(), 2);
- clone = CreateGather(shape, new_operands[0], new_operands[1],
- *gather_dimension_numbers_, gather_window_bounds_);
- break;
case HloOpcode::kDomain:
CHECK_EQ(new_operands.size(), 1);
clone =
CreateDomain(shape, new_operands[0], operand_side_metadata_->Clone(),
user_side_metadata_->Clone());
break;
- case HloOpcode::kGenerateToken:
- clone = CreateGenerateToken(new_operands);
+ case HloOpcode::kAfterAll:
+ if (new_operands.empty()) {
+ clone = CreateToken();
+ } else {
+ clone = CreateAfterAll(new_operands);
+ }
break;
}
SetupDerivedInstruction(clone.get());
@@ -1369,6 +1411,30 @@ void HloInstruction::AppendOperand(HloInstruction* operand) {
operand->AddUser(this);
}
+void HloInstruction::RemoveOperandsAtAscendingIndices(
+ tensorflow::gtl::ArraySlice<int> ascending_indices) {
+ if (ascending_indices.empty()) {
+ return;
+ }
+ int next_index = 0;
+ int removed_count = 0;
+ for (int to_remove : ascending_indices) {
+ while (next_index < to_remove) {
+ operands_[next_index - removed_count] = operands_[next_index];
+ ++next_index;
+ }
+ CHECK_LT(to_remove, operands_.size());
+ ++removed_count;
+ ++next_index;
+ }
+ while (next_index < operands_.size()) {
+ operands_[next_index - removed_count] = operands_[next_index];
+ ++next_index;
+ }
+ CHECK_EQ(removed_count, ascending_indices.size());
+ operands_.resize(operands_.size() - removed_count);
+}
+
void HloInstruction::AddUser(HloInstruction* user) {
if (!ContainsKey(user_set_, user)) {
user_set_.insert(user);
@@ -1442,12 +1508,12 @@ bool HloInstruction::IdenticalSlowPath(
case HloOpcode::kSubtract:
case HloOpcode::kTanh:
case HloOpcode::kTuple:
+ case HloOpcode::kTupleSelect:
return true;
// These opcodes have complex or special behavior so just return false.
- case HloOpcode::kDomain:
case HloOpcode::kWhile:
- case HloOpcode::kGenerateToken:
+ case HloOpcode::kAfterAll:
return false;
// Check dot dimension numbers.
@@ -1455,11 +1521,6 @@ bool HloInstruction::IdenticalSlowPath(
return protobuf_util::ProtobufEquals(dot_dimension_numbers(),
other.dot_dimension_numbers());
- case HloOpcode::kGather:
- return protobuf_util::ProtobufEquals(gather_dimension_numbers(),
- other.gather_dimension_numbers()) &&
- gather_window_bounds() == other.gather_window_bounds();
-
// Remaining instructions with special values.
case HloOpcode::kCall:
return eq_computations(to_apply(), other.to_apply());
@@ -1467,9 +1528,9 @@ bool HloInstruction::IdenticalSlowPath(
return eq_computations(true_computation(), other.true_computation()) &&
eq_computations(false_computation(), other.false_computation());
- // These opcodes are not yet supported.
- case HloOpcode::kSort:
- return false;
+ case HloOpcode::kDomain:
+ return operand_side_metadata().Matches(other.operand_side_metadata()) &&
+ user_side_metadata().Matches(other.user_side_metadata());
// Ops migrated to subclasses should never come to this line.
// TODO(b/80131774): Remove this switch when migration is complete.
@@ -1484,6 +1545,7 @@ bool HloInstruction::IdenticalSlowPath(
case HloOpcode::kReverse:
case HloOpcode::kConcatenate:
case HloOpcode::kReduce:
+ case HloOpcode::kSort:
case HloOpcode::kTranspose:
case HloOpcode::kBroadcast:
case HloOpcode::kMap:
@@ -1505,6 +1567,7 @@ bool HloInstruction::IdenticalSlowPath(
case HloOpcode::kHostCompute:
case HloOpcode::kPad:
case HloOpcode::kDynamicSlice:
+ case HloOpcode::kGather:
LOG(FATAL) << "Base class impl called for opcode with subclass: "
<< opcode();
}
@@ -1539,6 +1602,10 @@ Status HloInstruction::ReplaceUseWith(HloInstruction* user,
std::replace(user->operands_.begin(), user->operands_.end(), this,
new_producer);
new_producer->AddUser(user);
+ if (user->opcode() == HloOpcode::kFusion) {
+ TF_RETURN_IF_ERROR(
+ Cast<HloFusionInstruction>(user)->DeduplicateFusionOperands());
+ }
return Status::OK();
}
@@ -1547,10 +1614,14 @@ Status HloInstruction::ReplaceOperandWith(int64 operand_num,
TF_RET_CHECK(operand_num >= 0);
TF_RET_CHECK(operand_num < operand_count());
HloInstruction* old_operand = mutable_operand(operand_num);
+ if (old_operand == new_operand) {
+ return Status::OK();
+ }
+
TF_RET_CHECK(ShapeUtil::CompatibleIgnoringFpPrecision(old_operand->shape(),
new_operand->shape()))
- << old_operand->shape().ShortDebugString() << " is not compatible with "
- << new_operand->shape().ShortDebugString();
+ << old_operand->shape() << " is not compatible with "
+ << new_operand->shape();
operands_[operand_num] = new_operand;
VLOG(3) << "Replacing operand " << operand_num << " of " << name() << " with "
@@ -1577,6 +1648,10 @@ Status HloInstruction::ReplaceAllUsesWith(HloInstruction* new_producer) {
std::replace(user->operands_.begin(), user->operands_.end(), this,
new_producer);
new_producer->AddUser(user);
+ if (user->opcode() == HloOpcode::kFusion) {
+ TF_RETURN_IF_ERROR(
+ Cast<HloFusionInstruction>(user)->DeduplicateFusionOperands());
+ }
}
}
users_.clear();
@@ -1755,7 +1830,6 @@ bool HloInstruction::IsElementwiseImpl(
// Ternary elementwise operations.
case HloOpcode::kSelect:
- return !ShapeUtil::IsTuple(shape_);
case HloOpcode::kClamp:
return true;
@@ -1767,6 +1841,10 @@ bool HloInstruction::IsElementwiseImpl(
}
}
+bool HloInstruction::IsCrossModuleAllReduce() const {
+ return opcode() == HloOpcode::kCrossReplicaSum && all_reduce_id();
+}
+
string HloInstruction::ToStringWithCanonicalNameMap(
const HloPrintOptions& options,
CanonicalNameMap* canonical_name_map) const {
@@ -1859,11 +1937,6 @@ std::vector<string> HloInstruction::ExtraAttributesToString(
if (dot_dimension_numbers_ != nullptr) {
extra.push_back(DotDimensionNumbersToString());
}
- if (gather_dimension_numbers_ != nullptr) {
- extra.push_back(GatherDimensionNumbersToString());
- extra.push_back(
- StrCat("window_bounds={", Join(gather_window_bounds(), ","), "}"));
- }
if (options.print_subcomputation_mode() ==
HloPrintOptions::PrintSubcomputationMode::kNameOnly) {
@@ -1950,8 +2023,8 @@ std::vector<string> HloInstruction::ExtraAttributesToString(
}
if (operand_side_metadata_ != nullptr && user_side_metadata_ != nullptr) {
extra.push_back(StrCat("domain={kind=\"", operand_side_metadata_->Kind(),
- "\", entry=", operand_side_metadata_->ToString(),
- ", exit=", user_side_metadata_->ToString(), "}"));
+ "\", entry=", user_side_metadata_->ToString(),
+ ", exit=", operand_side_metadata_->ToString(), "}"));
}
return extra;
@@ -1993,14 +2066,6 @@ HloInstructionProto HloInstruction::ToProto() const {
if (dot_dimension_numbers_ != nullptr) {
*proto.mutable_dot_dimension_numbers() = *dot_dimension_numbers_;
}
- if (gather_dimension_numbers_ != nullptr) {
- *proto.mutable_gather_dimension_numbers() = *gather_dimension_numbers_;
- }
- if (opcode() == HloOpcode::kGather) {
- for (int64 bound : gather_window_bounds()) {
- proto.add_gather_window_bounds(bound);
- }
- }
if (has_sharding()) {
*proto.mutable_sharding() = sharding().ToProto();
@@ -2126,6 +2191,8 @@ Status HloInstruction::Visit(DfsHloVisitorBase<HloInstructionPtr>* visitor) {
return visitor->HandleRemainder(this);
case HloOpcode::kSelect:
return visitor->HandleSelect(this);
+ case HloOpcode::kTupleSelect:
+ return visitor->HandleTupleSelect(this);
case HloOpcode::kConvolution:
return visitor->HandleConvolution(this);
case HloOpcode::kFft:
@@ -2226,8 +2293,8 @@ Status HloInstruction::Visit(DfsHloVisitorBase<HloInstructionPtr>* visitor) {
return visitor->HandleGather(this);
case HloOpcode::kDomain:
return visitor->HandleDomain(this);
- case HloOpcode::kGenerateToken:
- return visitor->HandleGenerateToken(this);
+ case HloOpcode::kAfterAll:
+ return visitor->HandleAfterAll(this);
// These opcodes are not handled here.
case HloOpcode::kTrace:
@@ -2759,26 +2826,6 @@ std::ostream& operator<<(std::ostream& os, HloInstruction::FusionKind kind) {
return os << ToString(kind);
}
-string HloInstruction::GatherDimensionNumbersToString() const {
- CHECK_NE(gather_dimension_numbers_.get(), nullptr);
- string output_window_dims =
- StrCat("output_window_dims={",
- Join(gather_dimension_numbers_->output_window_dims(), ","), "}");
- string elided_window_dims =
- StrCat("elided_window_dims={",
- Join(gather_dimension_numbers_->elided_window_dims(), ","), "}");
- string gather_dims_to_operand_dims = StrCat(
- "gather_dims_to_operand_dims={",
- Join(gather_dimension_numbers_->gather_dims_to_operand_dims(), ","), "}");
- string index_vector_dim = StrCat(
- "index_vector_dim=", gather_dimension_numbers_->index_vector_dim());
-
- return Join<std::initializer_list<string>>(
- {output_window_dims, elided_window_dims, gather_dims_to_operand_dims,
- index_vector_dim},
- ", ");
-}
-
bool HloInstruction::CouldBeBitcast() const {
switch (opcode_) {
case HloOpcode::kTranspose:
@@ -3092,4 +3139,14 @@ int64 HloInstruction::slice_sizes(int64 dimension) const {
const std::vector<int64>& HloInstruction::dynamic_slice_sizes() const {
return Cast<HloDynamicSliceInstruction>(this)->dynamic_slice_sizes();
}
+
+const GatherDimensionNumbers& HloInstruction::gather_dimension_numbers() const {
+ return Cast<HloGatherInstruction>(this)->gather_dimension_numbers();
+}
+
+tensorflow::gtl::ArraySlice<int64> HloInstruction::gather_window_bounds()
+ const {
+ return Cast<HloGatherInstruction>(this)->gather_window_bounds();
+}
+
} // namespace xla
diff --git a/tensorflow/compiler/xla/service/hlo_instruction.h b/tensorflow/compiler/xla/service/hlo_instruction.h
index 8f59e67123..180b2fb359 100644
--- a/tensorflow/compiler/xla/service/hlo_instruction.h
+++ b/tensorflow/compiler/xla/service/hlo_instruction.h
@@ -33,7 +33,7 @@ limitations under the License.
#include <vector>
#include "tensorflow/compiler/xla/iterator_util.h"
-#include "tensorflow/compiler/xla/literal_util.h"
+#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/compiler/xla/map_util.h"
#include "tensorflow/compiler/xla/service/dfs_hlo_visitor.h"
#include "tensorflow/compiler/xla/service/hlo.pb.h"
@@ -389,11 +389,10 @@ class HloInstruction {
// Creates a map instruction, where the computation (given by the handle) is
// applied element-wise to every element in operands (across the operands,
- // at a given index) with the same `static_operands`.
+ // at a given index)
static std::unique_ptr<HloInstruction> CreateMap(
const Shape& shape, tensorflow::gtl::ArraySlice<HloInstruction*> operands,
- HloComputation* map_computation,
- tensorflow::gtl::ArraySlice<HloInstruction*> static_operands = {});
+ HloComputation* map_computation);
// Creates a convolution op, where rhs is the convolutional filter
// and window describes how the filter is applied to lhs.
@@ -459,19 +458,36 @@ class HloInstruction {
const Shape& shape, HloInstruction* operand);
// Creates an infeed instruction, which reads data of the given shape from the
- // Infeed interface of the device.
- static std::unique_ptr<HloInstruction> CreateInfeed(const Shape& shape,
+ // Infeed interface of the device. infeed_shape is the shape of the data
+ // received from the infeed *not* the shape of the infeed instruction which
+ // is a tuple containing the infeed_shape and the TOKEN.
+ static std::unique_ptr<HloInstruction> CreateInfeed(
+ const Shape& infeed_shape, HloInstruction* token_operand,
+ const string& config);
+ // Overload which does not require a token.
+ // TODO(b/80000000): Remove this overload when all uses of infeed are
+ // converted to take tokens.
+ static std::unique_ptr<HloInstruction> CreateInfeed(const Shape& infeed_shape,
const string& config);
- // Creates an outfeed instruction, which outputs data.
+ // Creates an outfeed instruction, which outputs data. outfeed_shape is the
+ // shape of the data being outfed *not* the shape of the outfeed instruction
+ // which is a TOKEN.
static std::unique_ptr<HloInstruction> CreateOutfeed(
- const Shape& shape, HloInstruction* operand,
+ const Shape& outfeed_shape, HloInstruction* operand,
+ HloInstruction* token_operand, tensorflow::StringPiece outfeed_config);
+ // Overload which does not require a token.
+ // TODO(b/80000000): Remove this overload when all uses of outfeed are
+ // converted to take tokens.
+ static std::unique_ptr<HloInstruction> CreateOutfeed(
+ const Shape& outfeed_shape, HloInstruction* operand,
tensorflow::StringPiece outfeed_config);
// Creates an asynchronous send instruction with the given channel id, which
// initiates sending the operand data to a unique receive instruction in
// another computation that has the same channel id.
static std::unique_ptr<HloInstruction> CreateSend(HloInstruction* operand,
+ HloInstruction* token,
int64 channel_id);
// Blocks until data transfer for the Send instruction (operand) is complete.
@@ -483,6 +499,7 @@ class HloInstruction {
// which allocates resources to receive data of the given shape from a unique
// send instruction in another computation that has the same channel id.
static std::unique_ptr<HloInstruction> CreateRecv(const Shape& shape,
+ HloInstruction* token,
int64 channel_id);
// Blocks until data transfer for the Recv instruction (operand) is complete
@@ -596,6 +613,11 @@ class HloInstruction {
const Shape& shape, HloInstruction* operand,
tensorflow::gtl::ArraySlice<int64> dimensions);
+ // Creates a sort op, with a keys operand, and an optional values operand.
+ static std::unique_ptr<HloInstruction> CreateSort(
+ const Shape& shape, int64 dimension, HloInstruction* keys,
+ HloInstruction* values = nullptr);
+
// Creates a while instruction, given a condition computation, a body
// computation, and the initial value for the input of the computations. For
// example, shape: S32, condition: i -> i < 1000, body: i -> i * 2, init: 1
@@ -665,17 +687,18 @@ class HloInstruction {
const Shape& shape, HloInstruction* operand,
tensorflow::gtl::ArraySlice<int64> dimensions);
- // Creates a token instruction used for joining or creating token types which
- // thread through side-effecting operations.
- static std::unique_ptr<HloInstruction> CreateGenerateToken(
+ // Creates a Afterall instruction used for joining or creating new values of
+ // token type which thread through side-effecting operations. Operands must
+ // all be tokens, and there must be at least one operand.
+ static std::unique_ptr<HloInstruction> CreateAfterAll(
tensorflow::gtl::ArraySlice<HloInstruction*> operands);
- // Creates an instance of GatherDimensionNumbers.
- static GatherDimensionNumbers MakeGatherDimNumbers(
- tensorflow::gtl::ArraySlice<int64> output_window_dims,
- tensorflow::gtl::ArraySlice<int64> elided_window_dims,
- tensorflow::gtl::ArraySlice<int64> gather_dims_to_operand_dims,
- int64 index_vector_dim);
+ // Creates an AfterAll instruction which creates a token type out of thin air
+ // (no operands). This is a separate method from CreateAfterAll to facility
+ // the removal of operand-less AfterAll instructions.
+ // TODO(b/110532604): Remove this capability of creating a token from nothing
+ // when we plumb a primordial token from the entry computation.
+ static std::unique_ptr<HloInstruction> CreateToken();
// Returns the opcode for this instruction.
HloOpcode opcode() const { return opcode_; }
@@ -811,9 +834,15 @@ class HloInstruction {
// Replaces the use of this instruction in "user" with "new_producer". Note
// that there might be multiple uses of this instruction in "user"; all will
// be replaced.
+ //
+ // If user is a fusion instruction, this function will remove any duplicated
+ // operands of it which could be created due to this replacement.
Status ReplaceUseWith(HloInstruction* user, HloInstruction* new_producer);
// Replaces the specified operand with new_operand.
+ //
+ // This function does NOT remove duplicated operands even if this instruction
+ // is a fusion, so that the existing operand numbers do not change.
Status ReplaceOperandWith(int64 operand_no, HloInstruction* new_operand);
// Replaces all uses of this instruction with the new producer. If
@@ -822,6 +851,9 @@ class HloInstruction {
//
// If this instruction is the root of its computation, sets the computation's
// root to new_producer.
+ //
+ // If a user is a fusion instruction, this function will remove any duplicated
+ // operands of it which could be created due to this replacement.
Status ReplaceAllUsesWith(HloInstruction* new_producer);
// Performs a postorder DFS visit using this node as the root. If
@@ -1042,19 +1074,6 @@ class HloInstruction {
// Returns the dump string of the dot dimension numbers.
string DotDimensionNumbersToString() const;
- const GatherDimensionNumbers& gather_dimension_numbers() const {
- CHECK(gather_dimension_numbers_ != nullptr);
- return *gather_dimension_numbers_;
- }
-
- tensorflow::gtl::ArraySlice<int64> gather_window_bounds() const {
- CHECK_EQ(opcode(), HloOpcode::kGather);
- return gather_window_bounds_;
- }
-
- // Returns the dump string of the gather dimension numbers.
- string GatherDimensionNumbersToString() const;
-
// Clones the HLO instruction. The clone will have the same opcode, shape, and
// operands. After creation the clone has no uses. "this" (the instruction
// cloned from) is not changed. Suffix is the string to append to the name of
@@ -1109,6 +1128,9 @@ class HloInstruction {
// Returns true if this instruction is elementwise on all its operands.
bool IsElementwise() const;
+ // Returns true if this is an cross module all-reduce instrucion.
+ bool IsCrossModuleAllReduce() const;
+
// Returns true if this elementwise instruction implicitly broadcasts operand
// `operand_idx`.
//
@@ -1421,6 +1443,12 @@ class HloInstruction {
// Delegates to HloDynamicSliceInstruction::dynamic_slice_sizes.
const std::vector<int64>& dynamic_slice_sizes() const;
+
+ // Delegates to HloGatherInstruction::gather_dimension_numbers.
+ const GatherDimensionNumbers& gather_dimension_numbers() const;
+ // Delegates to HloGatherInstruction::gather_window_bounds.
+ tensorflow::gtl::ArraySlice<int64> gather_window_bounds() const;
+
// Old methods kept for smooth subclassing transition END.
protected:
@@ -1440,6 +1468,10 @@ class HloInstruction {
operands_.erase(operands_.begin() + index);
}
+ // Removes a list of operands with the given indices in ascending order.
+ void RemoveOperandsAtAscendingIndices(
+ tensorflow::gtl::ArraySlice<int> ascending_indices);
+
void AppendComputation(HloComputation* computation) {
called_computations_.push_back(computation);
}
@@ -1560,9 +1592,6 @@ class HloInstruction {
// Describes the dimension numbers used for a dot.
std::unique_ptr<DotDimensionNumbers> dot_dimension_numbers_;
- std::unique_ptr<GatherDimensionNumbers> gather_dimension_numbers_;
- std::vector<int64> gather_window_bounds_;
-
// Used to tag kCopy instructions that are eligible for copy elision.
bool copy_elision_allowed_ = true;
diff --git a/tensorflow/compiler/xla/service/hlo_instruction_test.cc b/tensorflow/compiler/xla/service/hlo_instruction_test.cc
index 8ee24f9d92..b75a2bd34b 100644
--- a/tensorflow/compiler/xla/service/hlo_instruction_test.cc
+++ b/tensorflow/compiler/xla/service/hlo_instruction_test.cc
@@ -20,10 +20,11 @@ limitations under the License.
#include <utility>
#include <vector>
-#include "tensorflow/compiler/xla/literal_util.h"
+#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/compiler/xla/protobuf_util.h"
#include "tensorflow/compiler/xla/service/dfs_hlo_visitor_with_default.h"
#include "tensorflow/compiler/xla/service/hlo_computation.h"
+#include "tensorflow/compiler/xla/service/hlo_instructions.h"
#include "tensorflow/compiler/xla/service/hlo_parser.h"
#include "tensorflow/compiler/xla/shape_util.h"
#include "tensorflow/compiler/xla/test.h"
@@ -249,7 +250,7 @@ TEST_F(HloInstructionTest, MultipleUsersAndOperands) {
auto param1 = builder.AddInstruction(
HloInstruction::CreateParameter(1, r0f32_, "param1"));
auto c0 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(1.1f)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.1f)));
auto addleft = builder.AddInstruction(
HloInstruction::CreateBinary(r0f32_, HloOpcode::kAdd, param0, c0));
auto addright = builder.AddInstruction(
@@ -294,7 +295,7 @@ TEST_F(HloInstructionTest, MultipleUsersAndOperandsWithUnaryOps) {
auto param1 = builder.AddInstruction(
HloInstruction::CreateParameter(1, r0f32_, "param1"));
auto c0 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(1.1f)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.1f)));
auto neg1 = builder.AddInstruction(
HloInstruction::CreateUnary(r0f32_, HloOpcode::kNegate, c0));
auto addleft = builder.AddInstruction(
@@ -334,7 +335,7 @@ TEST_F(HloInstructionTest, TrivialMap) {
auto param = embedded_builder.AddInstruction(
HloInstruction::CreateParameter(0, r0f32, "x"));
auto value = embedded_builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(1.0)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
embedded_builder.AddInstruction(
HloInstruction::CreateBinary(r0f32, HloOpcode::kAdd, param, value));
auto add_f32 = module->AddEmbeddedComputation(embedded_builder.Build());
@@ -383,9 +384,9 @@ TEST_F(HloInstructionTest, TrivialReduce) {
auto param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, f32a100x10, "p"));
auto const0 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(0.0f)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(0.0f)));
builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(1.1f)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.1f)));
auto reduce = builder.AddInstruction(
HloInstruction::CreateReduce(f32v100, param0, const0,
/*dimensions_to_reduce=*/{1}, add_f32));
@@ -626,7 +627,7 @@ TEST_F(HloInstructionTest, SingletonFusionOp) {
HloComputation::Builder builder(TestName());
// Create a fusion instruction containing a single unary operation.
auto constant = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(1.1f)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.1f)));
auto exp = builder.AddInstruction(
HloInstruction::CreateUnary(r0f32_, HloOpcode::kExp, constant));
auto module = CreateNewModule();
@@ -642,9 +643,9 @@ TEST_F(HloInstructionTest, BinaryFusionOp) {
HloComputation::Builder builder(TestName());
// Create a fusion instruction containing a single binary operation.
auto constant1 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(1.1f)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.1f)));
auto constant2 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(42.1f)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.1f)));
auto add = builder.AddInstruction(HloInstruction::CreateBinary(
r0f32_, HloOpcode::kAdd, constant1, constant2));
auto module = CreateNewModule();
@@ -661,7 +662,7 @@ TEST_F(HloInstructionTest, ChainFusionOp) {
HloComputation::Builder builder(TestName());
// Create a chain of fused unary ops.
auto constant = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(1.1f)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.1f)));
auto exp1 = builder.AddInstruction(
HloInstruction::CreateUnary(r0f32_, HloOpcode::kExp, constant));
auto exp2 = builder.AddInstruction(
@@ -682,7 +683,7 @@ TEST_F(HloInstructionTest, PreserveMetadataInFusionAndClone) {
HloComputation::Builder builder(TestName());
// Create a chain of fused unary ops.
auto constant = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(1.1f)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.1f)));
auto exp1 = builder.AddInstruction(
HloInstruction::CreateUnary(r0f32_, HloOpcode::kExp, constant));
auto exp2 = builder.AddInstruction(
@@ -710,16 +711,17 @@ TEST_F(HloInstructionTest, PreserveMetadataInFusionAndClone) {
TEST_F(HloInstructionTest, PreserveOutfeedShapeThroughClone) {
HloComputation::Builder builder(TestName());
auto constant = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR2<float>({
+ HloInstruction::CreateConstant(LiteralUtil::CreateR2<float>({
{1, 2},
{3, 4},
})));
auto shape10 = ShapeUtil::MakeShapeWithLayout(F32, {2, 2}, {1, 0});
auto shape01 = ShapeUtil::MakeShapeWithLayout(F32, {2, 2}, {0, 1});
+ auto token = builder.AddInstruction(HloInstruction::CreateToken());
auto outfeed10 = builder.AddInstruction(
- HloInstruction::CreateOutfeed(shape10, constant, ""));
+ HloInstruction::CreateOutfeed(shape10, constant, token, ""));
auto outfeed01 = builder.AddInstruction(
- HloInstruction::CreateOutfeed(shape01, constant, ""));
+ HloInstruction::CreateOutfeed(shape01, constant, token, ""));
auto clone01 = builder.AddInstruction(outfeed01->Clone());
auto clone10 = builder.AddInstruction(outfeed10->Clone());
@@ -731,7 +733,7 @@ TEST_F(HloInstructionTest, PreserveOutfeedShapeThroughClone) {
TEST_F(HloInstructionTest, PreserveTupleShapeThroughClone) {
HloComputation::Builder builder(TestName());
auto* constant = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR2<float>({
+ HloInstruction::CreateConstant(LiteralUtil::CreateR2<float>({
{1, 2},
{3, 4},
})));
@@ -762,13 +764,13 @@ TEST_F(HloInstructionTest, FusionOpWithCalledComputations) {
HloComputation::Builder builder(TestName());
auto constant = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(1.1f)));
- auto map_1_x = builder.AddInstruction(HloInstruction::CreateMap(
- scalar_shape, {constant}, computation_x, /*static_operands=*/{}));
- auto map_2_x = builder.AddInstruction(HloInstruction::CreateMap(
- scalar_shape, {map_1_x}, computation_x, /*static_operands=*/{}));
- auto map_3_y = builder.AddInstruction(HloInstruction::CreateMap(
- scalar_shape, {map_2_x}, computation_y, /*static_operands=*/{}));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.1f)));
+ auto map_1_x = builder.AddInstruction(
+ HloInstruction::CreateMap(scalar_shape, {constant}, computation_x));
+ auto map_2_x = builder.AddInstruction(
+ HloInstruction::CreateMap(scalar_shape, {map_1_x}, computation_x));
+ auto map_3_y = builder.AddInstruction(
+ HloInstruction::CreateMap(scalar_shape, {map_2_x}, computation_y));
auto* computation = module->AddEntryComputation(builder.Build());
auto* fusion = computation->CreateFusionInstruction(
@@ -797,11 +799,11 @@ TEST_F(HloInstructionTest, ComplexFusionOp) {
// Notable complexities are repeated operands in the same instruction,
// different shapes, use of value in different expressions.
auto c1 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(1.1f)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.1f)));
auto c2 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(2.1f)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.1f)));
auto c3 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(9.0f)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(9.0f)));
auto add = builder.AddInstruction(
HloInstruction::CreateBinary(r0f32_, HloOpcode::kAdd, c1, c2));
@@ -872,11 +874,11 @@ TEST_F(HloInstructionTest, IdenticalInstructions) {
// Create a set of random constant operands to use below. Make them matrices
// so dimensions are interesting.
auto operand1 = HloInstruction::CreateConstant(
- Literal::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}}));
+ LiteralUtil::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}}));
auto operand2 = HloInstruction::CreateConstant(
- Literal::CreateR2<float>({{10.0, 20.0}, {30.0, 40.0}}));
- auto vector_operand =
- HloInstruction::CreateConstant(Literal::CreateR1<float>({42.0, 123.0}));
+ LiteralUtil::CreateR2<float>({{10.0, 20.0}, {30.0, 40.0}}));
+ auto vector_operand = HloInstruction::CreateConstant(
+ LiteralUtil::CreateR1<float>({42.0, 123.0}));
Shape shape = operand1->shape();
// Convenient short names for the operands.
@@ -1170,6 +1172,40 @@ TEST_F(HloInstructionTest, CloneOfFusionPreservesShape) {
EXPECT_TRUE(StructuralEqual(*fusion, *fusion2));
}
+TEST_F(HloInstructionTest, NoRedundantFusionOperandsAfterReplacingUse) {
+ // Fused expression:
+ //
+ // x y
+ // | |
+ // | transpose
+ // \ /
+ // dot
+ const Shape s = ShapeUtil::MakeShape(F32, {10, 10});
+
+ HloComputation::Builder builder("TransposeDot");
+ HloInstruction* x =
+ builder.AddInstruction(HloInstruction::CreateParameter(0, s, "x"));
+ HloInstruction* y =
+ builder.AddInstruction(HloInstruction::CreateParameter(1, s, "y"));
+ HloInstruction* reshape =
+ builder.AddInstruction(HloInstruction::CreateTranspose(s, y, {1, 0}));
+ DotDimensionNumbers dot_dnums;
+ dot_dnums.add_lhs_contracting_dimensions(1);
+ dot_dnums.add_rhs_contracting_dimensions(0);
+ HloInstruction* dot = builder.AddInstruction(
+ HloInstruction::CreateDot(s, x, reshape, dot_dnums));
+
+ auto module = CreateNewModule();
+ auto* computation = module->AddEntryComputation(builder.Build());
+ HloInstruction* fusion = computation->CreateFusionInstruction(
+ {dot, reshape}, HloInstruction::FusionKind::kLoop);
+
+ EXPECT_TRUE(x->ReplaceAllUsesWith(y).ok());
+
+ EXPECT_THAT(fusion->operands(), UnorderedElementsAre(y));
+ EXPECT_EQ(fusion->fused_instructions_computation()->num_parameters(), 1);
+}
+
TEST_F(HloInstructionTest, FusionEquality) {
auto module = CreateNewModule();
HloComputation::Builder builder(TestName());
@@ -1199,9 +1235,9 @@ TEST_F(HloInstructionTest, NestedFusionEquality) {
// Build a nested fusion computation.
Shape data_shape = ShapeUtil::MakeShape(F32, {2, 2});
auto a = builder.AddInstruction(HloInstruction::CreateConstant(
- Literal::CreateR2<float>({{1.0, 0.0}, {0.0, 1.0}})));
+ LiteralUtil::CreateR2<float>({{1.0, 0.0}, {0.0, 1.0}})));
auto b = builder.AddInstruction(HloInstruction::CreateConstant(
- Literal::CreateR2<float>({{2.0, 2.0}, {2.0, 2.0}})));
+ LiteralUtil::CreateR2<float>({{2.0, 2.0}, {2.0, 2.0}})));
auto b_t = builder.AddInstruction(
HloInstruction::CreateTranspose(data_shape, b, {1, 0}));
DotDimensionNumbers dot_dnums;
@@ -1210,7 +1246,7 @@ TEST_F(HloInstructionTest, NestedFusionEquality) {
auto dot = builder.AddInstruction(
HloInstruction::CreateDot(data_shape, a, b_t, dot_dnums));
auto one = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(1.0)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
auto add_operand = builder.AddInstruction(
HloInstruction::CreateBroadcast(data_shape, one, {1}));
auto add = builder.AddInstruction(HloInstruction::CreateBinary(
@@ -1307,7 +1343,7 @@ TEST_F(HloInstructionTest, Stringification) {
"condition=%TransposeDot, body=%TransposeDot");
auto pred = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<bool>(true)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(true)));
HloInstruction* conditional =
builder.AddInstruction(HloInstruction::CreateConditional(
sout, pred, x, computation, x, computation));
@@ -1334,7 +1370,7 @@ TEST_F(HloInstructionTest, StringifyGather_0) {
HloInstruction* gather_instruction =
builder.AddInstruction(HloInstruction::CreateGather(
gather_result_shape, input, gather_indices,
- HloInstruction::MakeGatherDimNumbers(
+ HloGatherInstruction::MakeGatherDimNumbers(
/*output_window_dims=*/{4, 5, 6, 7, 8},
/*elided_window_dims=*/{},
/*gather_dims_to_operand_dims=*/{0, 1, 2, 3, 4},
@@ -1370,7 +1406,7 @@ TEST_F(HloInstructionTest, StringifyGather_1) {
HloInstruction* gather_instruction =
builder.AddInstruction(HloInstruction::CreateGather(
gather_result_shape, input, gather_indices,
- HloInstruction::MakeGatherDimNumbers(
+ HloGatherInstruction::MakeGatherDimNumbers(
/*output_window_dims=*/{4, 5, 6, 7, 8},
/*elided_window_dims=*/{},
/*gather_dims_to_operand_dims=*/{0, 1, 2, 3, 4},
@@ -1420,15 +1456,15 @@ TEST_F(HloInstructionTest, CanonnicalStringificationFusion) {
HloInstruction* fusion = computation->CreateFusionInstruction(
{dot, reshape}, HloInstruction::FusionKind::kLoop);
- EXPECT_EQ(
- fusion->ToString(options),
+ const string expected_fusion =
R"(f32[5,20]{1,0} fusion(f32[5,10]{1,0}, f32[20,10]{1,0}), kind=kLoop, calls=
{
tmp_0 = f32[5,10]{1,0} parameter(0)
tmp_1 = f32[20,10]{1,0} parameter(1)
tmp_2 = f32[10,20]{1,0} transpose(f32[20,10]{1,0} tmp_1), dimensions={1,0}
ROOT tmp_3 = f32[5,20]{1,0} dot(f32[5,10]{1,0} tmp_0, f32[10,20]{1,0} tmp_2), lhs_contracting_dims={1}, rhs_contracting_dims={0}
-})");
+})";
+ EXPECT_EQ(fusion->ToString(options), expected_fusion);
}
TEST_F(HloInstructionTest, CanonnicalStringificationWhile) {
@@ -1460,8 +1496,8 @@ TEST_F(HloInstructionTest, CanonnicalStringificationWhile) {
HloInstruction::CreateWhile(sout, computation, computation, x));
auto options = HloPrintOptions().Canonical();
- EXPECT_EQ(loop->ToString(options),
- R"(f32[5,20]{1,0} while(f32[5,10]{1,0}), condition=
+ const string expected_loop =
+ R"(f32[5,20]{1,0} while(f32[5,10]{1,0}), condition=
{
tmp_0 = f32[5,10]{1,0} parameter(0)
tmp_1 = f32[20,10]{1,0} parameter(1)
@@ -1483,7 +1519,8 @@ TEST_F(HloInstructionTest, CanonnicalStringificationWhile) {
tmp_2 = f32[10,20]{1,0} transpose(f32[20,10]{1,0} tmp_1), dimensions={1,0}
ROOT tmp_3 = f32[5,20]{1,0} dot(f32[5,10]{1,0} tmp_0, f32[10,20]{1,0} tmp_2), lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
-})");
+})";
+ EXPECT_EQ(loop->ToString(options), expected_loop);
}
TEST_F(HloInstructionTest, CanonnicalStringificationConditional) {
@@ -1515,13 +1552,12 @@ TEST_F(HloInstructionTest, CanonnicalStringificationConditional) {
HloInstruction::CreateWhile(sout, computation, computation, x));
auto pred = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<bool>(true)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(true)));
HloInstruction* conditional =
builder.AddInstruction(HloInstruction::CreateConditional(
sout, pred, x, computation, x, computation));
auto options = HloPrintOptions().Canonical();
- EXPECT_EQ(
- conditional->ToString(options),
+ const string expected_conditional =
R"(f32[5,20]{1,0} conditional(pred[], f32[5,10]{1,0}, f32[5,10]{1,0}), true_computation=
{
tmp_0 = f32[5,10]{1,0} parameter(0)
@@ -1544,7 +1580,8 @@ TEST_F(HloInstructionTest, CanonnicalStringificationConditional) {
tmp_2 = f32[10,20]{1,0} transpose(f32[20,10]{1,0} tmp_1), dimensions={1,0}
ROOT tmp_3 = f32[5,20]{1,0} dot(f32[5,10]{1,0} tmp_0, f32[10,20]{1,0} tmp_2), lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
-})");
+})";
+ EXPECT_EQ(conditional->ToString(options), expected_conditional);
}
TEST_F(HloInstructionTest, CheckDeepClone) {
diff --git a/tensorflow/compiler/xla/service/hlo_instructions.cc b/tensorflow/compiler/xla/service/hlo_instructions.cc
index 803fde73a5..702f808449 100644
--- a/tensorflow/compiler/xla/service/hlo_instructions.cc
+++ b/tensorflow/compiler/xla/service/hlo_instructions.cc
@@ -17,10 +17,12 @@ limitations under the License.
#include <deque>
+#include "tensorflow/compiler/xla/literal_util.h"
#include "tensorflow/compiler/xla/service/hlo_casting_utils.h"
#include "tensorflow/compiler/xla/service/hlo_computation.h"
#include "tensorflow/compiler/xla/service/hlo_module.h"
#include "tensorflow/compiler/xla/window_util.h"
+#include "tensorflow/core/lib/gtl/flatmap.h"
namespace xla {
namespace {
@@ -203,25 +205,28 @@ bool HloSendRecvInstruction::IdenticalSlowPath(
// Send instruction produces a tuple of {aliased operand, U32 context}.
HloSendInstruction::HloSendInstruction(HloInstruction* operand,
- int64 channel_id)
+ HloInstruction* token, int64 channel_id)
: HloSendRecvInstruction(
HloOpcode::kSend,
- ShapeUtil::MakeTupleShape(
- {CHECK_NOTNULL(operand)->shape(), ShapeUtil::MakeShape(U32, {})}),
+ ShapeUtil::MakeTupleShape({CHECK_NOTNULL(operand)->shape(),
+ ShapeUtil::MakeShape(U32, {}),
+ ShapeUtil::MakeTokenShape()}),
channel_id) {
AppendOperand(operand);
+ AppendOperand(token);
}
std::unique_ptr<HloInstruction> HloSendInstruction::CloneWithNewOperandsImpl(
const Shape& shape,
tensorflow::gtl::ArraySlice<HloInstruction*> new_operands,
HloCloneContext* context) const {
- CHECK_EQ(new_operands.size(), 1);
- return MakeUnique<HloSendInstruction>(new_operands[0], channel_id());
+ CHECK_EQ(new_operands.size(), 2);
+ return MakeUnique<HloSendInstruction>(new_operands[0], new_operands[1],
+ channel_id());
}
HloSendDoneInstruction::HloSendDoneInstruction(HloSendInstruction* operand)
- : HloSendRecvInstruction(HloOpcode::kSendDone, ShapeUtil::MakeNil(),
+ : HloSendRecvInstruction(HloOpcode::kSendDone, ShapeUtil::MakeTokenShape(),
CHECK_NOTNULL(operand)->channel_id()) {
AppendOperand(operand);
}
@@ -237,25 +242,31 @@ HloSendDoneInstruction::CloneWithNewOperandsImpl(
}
// Recv instruction produces a tuple of {receive buffer, U32 context}.
-HloRecvInstruction::HloRecvInstruction(const Shape& shape, int64 channel_id)
+HloRecvInstruction::HloRecvInstruction(const Shape& shape,
+ HloInstruction* token, int64 channel_id)
: HloSendRecvInstruction(
HloOpcode::kRecv,
- ShapeUtil::MakeTupleShape({shape, ShapeUtil::MakeShape(U32, {})}),
- channel_id) {}
+ ShapeUtil::MakeTupleShape({shape, ShapeUtil::MakeShape(U32, {}),
+ ShapeUtil::MakeTokenShape()}),
+ channel_id) {
+ AppendOperand(token);
+}
std::unique_ptr<HloInstruction> HloRecvInstruction::CloneWithNewOperandsImpl(
const Shape& shape,
tensorflow::gtl::ArraySlice<HloInstruction*> new_operands,
HloCloneContext* context) const {
- CHECK_EQ(new_operands.size(), 0);
+ CHECK_EQ(new_operands.size(), 1);
return MakeUnique<HloRecvInstruction>(
- ShapeUtil::GetTupleElementShape(shape, 0), channel_id());
+ ShapeUtil::GetTupleElementShape(shape, 0), new_operands[0], channel_id());
}
HloRecvDoneInstruction::HloRecvDoneInstruction(HloRecvInstruction* operand)
: HloSendRecvInstruction(
HloOpcode::kRecvDone,
- ShapeUtil::GetTupleElementShape(operand->shape(), 0),
+ ShapeUtil::MakeTupleShape(
+ {ShapeUtil::GetTupleElementShape(operand->shape(), 0),
+ ShapeUtil::MakeTokenShape()}),
CHECK_NOTNULL(operand)->channel_id()) {
AppendOperand(operand);
}
@@ -280,8 +291,6 @@ HloAllReduceInstruction::HloAllReduceInstruction(
replica_group_ids_(replica_group_ids.begin(), replica_group_ids.end()),
cross_replica_sum_barrier_(barrier.begin(), barrier.end()),
all_reduce_id_(all_reduce_id) {
- // TODO(b/79737069): Remove the CHECK when supported.
- CHECK(!all_reduce_id_);
for (auto operand : operands) {
AppendOperand(operand);
}
@@ -458,6 +467,46 @@ std::unique_ptr<HloInstruction> HloReduceInstruction::CloneWithNewOperandsImpl(
shape, new_operands[0], new_operands[1], dimensions(), to_apply());
}
+HloSortInstruction::HloSortInstruction(const Shape& shape, int64 dimension,
+ HloInstruction* keys,
+ HloInstruction* values)
+ : HloInstruction(HloOpcode::kSort, shape), dimensions_({dimension}) {
+ AppendOperand(keys);
+ if (values) {
+ AppendOperand(values);
+ }
+}
+
+HloInstructionProto HloSortInstruction::ToProto() const {
+ HloInstructionProto proto = HloInstruction::ToProto();
+ for (int64 dimension : dimensions_) {
+ proto.add_dimensions(dimension);
+ }
+ return proto;
+}
+
+std::vector<string> HloSortInstruction::ExtraAttributesToStringImpl(
+ const HloPrintOptions& options) const {
+ return {StrCat("dimensions={", Join(dimensions(), ","), "}")};
+}
+
+bool HloSortInstruction::IdenticalSlowPath(
+ const HloInstruction& other,
+ const std::function<bool(const HloComputation*, const HloComputation*)>&
+ eq_computations) const {
+ const auto& casted_other = static_cast<const HloSortInstruction&>(other);
+ return dimensions() == casted_other.dimensions();
+}
+
+std::unique_ptr<HloInstruction> HloSortInstruction::CloneWithNewOperandsImpl(
+ const Shape& shape,
+ tensorflow::gtl::ArraySlice<HloInstruction*> new_operands,
+ HloCloneContext* context) const {
+ HloInstruction* keys = new_operands[0];
+ HloInstruction* values = new_operands.size() == 2 ? new_operands[1] : nullptr;
+ return MakeUnique<HloSortInstruction>(shape, dimensions(0), keys, values);
+}
+
HloTransposeInstruction::HloTransposeInstruction(
const Shape& shape, HloInstruction* operand,
tensorflow::gtl::ArraySlice<int64> dimensions)
@@ -553,10 +602,8 @@ HloBroadcastInstruction::CloneWithNewOperandsImpl(
HloMapInstruction::HloMapInstruction(
const Shape& shape, tensorflow::gtl::ArraySlice<HloInstruction*> operands,
- HloComputation* map_computation,
- tensorflow::gtl::ArraySlice<HloInstruction*> static_operands)
+ HloComputation* map_computation)
: HloInstruction(HloOpcode::kMap, shape) {
- CHECK(static_operands.empty()) << "static_operands not yet supported";
for (auto operand : operands) {
AppendOperand(operand);
}
@@ -758,7 +805,7 @@ string HloConstantInstruction::OperandsToStringWithCanonicalNameMap(
HloTraceInstruction::HloTraceInstruction(const string& tag,
HloInstruction* operand)
: HloInstruction(HloOpcode::kTrace, ShapeUtil::MakeNil()),
- literal_(Literal::CreateR1U8(tag)) {
+ literal_(LiteralUtil::CreateR1U8(tag)) {
AppendOperand(operand);
operand->set_tracing(this);
}
@@ -1044,8 +1091,6 @@ HloInstruction* HloFusionInstruction::CloneAndFuseInternal(
CHECK_NOTNULL(GetModule())->AddEmbeddedComputation(builder.Build()));
clone = fused_expression_root();
} else {
- clone = fused_instructions_computation()->AddInstruction(
- instruction_to_fuse->Clone(/*suffix=*/""));
// When add_output is false, instruction_to_fuse is necessarily an operand
// of the fusion instruction. After fusion this will no longer be the
// case. Remove the operand from the operand list and remove its
@@ -1055,6 +1100,16 @@ HloInstruction* HloFusionInstruction::CloneAndFuseInternal(
bool in_operand_list = std::find(operands().begin(), operands().end(),
instruction_to_fuse) != operands().end();
CHECK(add_output || in_operand_list);
+ if (instruction_to_fuse->opcode() == HloOpcode::kTuple) {
+ // We assume all uses of a kTuple operation are GTE ops, not another
+ // fusion node. In this case, we don't need to clone
+ // 'instruction_to_fuse'.
+ CHECK(!in_operand_list);
+ clone = instruction_to_fuse;
+ } else {
+ clone = fused_instructions_computation()->AddInstruction(
+ instruction_to_fuse->Clone(/*suffix=*/""));
+ }
const std::vector<HloInstruction*>& fused_parameters =
fused_instructions_computation()->parameter_instructions();
for (int64 operand_num = 0; operand_num < operand_count(); ++operand_num) {
@@ -1151,9 +1206,10 @@ HloInstruction* HloFusionInstruction::CloneAndFuseInternal(
}
int64 index = tuple_elements.size();
if (instruction_to_fuse->opcode() == HloOpcode::kTuple) {
- index -= instruction_to_fuse->operand_count();
+ CHECK_EQ(clone, instruction_to_fuse);
+ index -= clone->operand_count();
std::vector<HloInstruction*> to_be_removed;
- for (auto old_gte : instruction_to_fuse->users()) {
+ for (auto old_gte : clone->users()) {
CHECK_EQ(old_gte->opcode(), HloOpcode::kGetTupleElement);
int64 old_tuple_index = old_gte->tuple_index();
HloInstruction* new_gte =
@@ -1165,7 +1221,6 @@ HloInstruction* HloFusionInstruction::CloneAndFuseInternal(
for (auto old_gte : to_be_removed) {
TF_CHECK_OK(parent()->RemoveInstruction(old_gte));
}
- TF_CHECK_OK(fused_instructions_computation()->RemoveInstruction(clone));
} else {
HloInstruction* new_gte =
parent()->AddInstruction(HloInstruction::CreateGetTupleElement(
@@ -1174,7 +1229,9 @@ HloInstruction* HloFusionInstruction::CloneAndFuseInternal(
}
}
- VLOG(2) << "New clone:\n" << clone->ToString();
+ if (clone != instruction_to_fuse) {
+ VLOG(2) << "New clone:\n" << clone->ToString();
+ }
return clone;
}
@@ -1210,6 +1267,26 @@ std::unique_ptr<HloInstruction> HloFusionInstruction::CloneWithNewOperandsImpl(
new_fused_computation);
}
+Status HloFusionInstruction::DeduplicateFusionOperands() {
+ tensorflow::gtl::FlatMap<const HloInstruction*, int> operand_indices;
+ std::vector<int> operands_to_remove;
+ for (int i = 0; i < operand_count(); ++i) {
+ auto emplace_result = operand_indices.emplace(operand(i), i);
+ if (!emplace_result.second) {
+ TF_RETURN_IF_ERROR(fused_parameter(i)->ReplaceAllUsesWith(
+ fused_parameter(emplace_result.first->second)));
+ operands_to_remove.push_back(i);
+ }
+ }
+ if (operands_to_remove.empty()) {
+ return Status::OK();
+ }
+ TF_RETURN_IF_ERROR(
+ fused_instructions_computation()->RemoveUnusedParameters());
+ RemoveOperandsAtAscendingIndices(operands_to_remove);
+ return Status::OK();
+}
+
HloRngInstruction::HloRngInstruction(
const Shape& shape, RandomDistribution distribution,
tensorflow::gtl::ArraySlice<HloInstruction*> parameters)
@@ -1365,9 +1442,22 @@ HloReducePrecisionInstruction::CloneWithNewOperandsImpl(
shape, new_operands[0], exponent_bits(), mantissa_bits());
}
-HloInfeedInstruction::HloInfeedInstruction(const Shape& shape,
+HloInfeedInstruction::HloInfeedInstruction(const Shape& infeed_shape,
+ HloInstruction* token_operand,
const string& config)
- : HloInstruction(HloOpcode::kInfeed, shape), infeed_config_(config) {}
+ : HloInstruction(HloOpcode::kInfeed,
+ ShapeUtil::MakeTupleShape(
+ {infeed_shape, ShapeUtil::MakeTokenShape()})),
+ infeed_config_(config) {
+ AppendOperand(token_operand);
+}
+
+HloInfeedInstruction::HloInfeedInstruction(const Shape& infeed_shape,
+ const string& config)
+ : HloInstruction(HloOpcode::kInfeed,
+ ShapeUtil::MakeTupleShape(
+ {infeed_shape, ShapeUtil::MakeTokenShape()})),
+ infeed_config_(config) {}
HloInstructionProto HloInfeedInstruction::ToProto() const {
HloInstructionProto proto = HloInstruction::ToProto();
@@ -1395,19 +1485,37 @@ std::unique_ptr<HloInstruction> HloInfeedInstruction::CloneWithNewOperandsImpl(
const Shape& shape,
tensorflow::gtl::ArraySlice<HloInstruction*> new_operands,
HloCloneContext* context) const {
- CHECK_EQ(new_operands.size(), 0);
- return MakeUnique<HloInfeedInstruction>(shape, infeed_config());
+ if (new_operands.empty()) {
+ return MakeUnique<HloInfeedInstruction>(infeed_shape(), infeed_config());
+ } else {
+ CHECK_EQ(new_operands.size(), 1);
+ return MakeUnique<HloInfeedInstruction>(infeed_shape(), new_operands[0],
+ infeed_config());
+ }
}
HloOutfeedInstruction::HloOutfeedInstruction(
- const Shape& shape, HloInstruction* operand,
+ const Shape& outfeed_shape, HloInstruction* operand,
+ HloInstruction* token_operand, tensorflow::StringPiece outfeed_config)
+ : HloInstruction(HloOpcode::kOutfeed, ShapeUtil::MakeTokenShape()),
+ outfeed_shape_(outfeed_shape),
+ outfeed_config_(outfeed_config.begin(), outfeed_config.end()) {
+ CHECK(ShapeUtil::Compatible(operand->shape(), outfeed_shape))
+ << "Outfeed shape " << outfeed_shape
+ << " must be compatible with operand shape " << operand->shape();
+ AppendOperand(operand);
+ AppendOperand(token_operand);
+}
+
+HloOutfeedInstruction::HloOutfeedInstruction(
+ const Shape& outfeed_shape, HloInstruction* operand,
tensorflow::StringPiece outfeed_config)
- : HloInstruction(HloOpcode::kOutfeed, ShapeUtil::MakeNil()),
- outfeed_shape_(shape),
+ : HloInstruction(HloOpcode::kOutfeed, ShapeUtil::MakeTokenShape()),
+ outfeed_shape_(outfeed_shape),
outfeed_config_(outfeed_config.begin(), outfeed_config.end()) {
- CHECK(ShapeUtil::Compatible(operand->shape(), shape))
- << "Outfeed shape " << shape << " must be compatible with operand shape "
- << operand->shape();
+ CHECK(ShapeUtil::Compatible(operand->shape(), outfeed_shape))
+ << "Outfeed shape " << outfeed_shape
+ << " must be compatible with operand shape " << operand->shape();
AppendOperand(operand);
}
@@ -1438,9 +1546,14 @@ std::unique_ptr<HloInstruction> HloOutfeedInstruction::CloneWithNewOperandsImpl(
const Shape& shape,
tensorflow::gtl::ArraySlice<HloInstruction*> new_operands,
HloCloneContext* context) const {
- CHECK_EQ(new_operands.size(), 1);
- return MakeUnique<HloOutfeedInstruction>(outfeed_shape(), new_operands[0],
- outfeed_config());
+ if (new_operands.size() == 1) {
+ return MakeUnique<HloOutfeedInstruction>(outfeed_shape(), new_operands[0],
+ outfeed_config());
+ } else {
+ CHECK_EQ(new_operands.size(), 2);
+ return MakeUnique<HloOutfeedInstruction>(outfeed_shape(), new_operands[0],
+ new_operands[1], outfeed_config());
+ }
}
HloConvolutionInstruction::HloConvolutionInstruction(
@@ -1799,4 +1912,93 @@ HloDynamicSliceInstruction::CloneWithNewOperandsImpl(
return MakeUnique<HloDynamicSliceInstruction>(
shape, new_operands[0], new_operands[1], dynamic_slice_sizes_);
}
+
+HloGatherInstruction::HloGatherInstruction(
+ const Shape& shape, HloInstruction* operand, HloInstruction* gather_indices,
+ const GatherDimensionNumbers& gather_dim_numbers,
+ tensorflow::gtl::ArraySlice<int64> window_bounds)
+ : HloInstruction(HloOpcode::kGather, shape) {
+ AppendOperand(operand);
+ AppendOperand(gather_indices);
+ gather_dimension_numbers_ =
+ MakeUnique<GatherDimensionNumbers>(gather_dim_numbers);
+ c_copy(window_bounds, std::back_inserter(gather_window_bounds_));
+}
+
+string HloGatherInstruction::GatherDimensionNumbersToString() const {
+ CHECK(gather_dimension_numbers_ != nullptr);
+ string output_window_dims =
+ StrCat("output_window_dims={",
+ Join(gather_dimension_numbers_->output_window_dims(), ","), "}");
+ string elided_window_dims =
+ StrCat("elided_window_dims={",
+ Join(gather_dimension_numbers_->elided_window_dims(), ","), "}");
+ string gather_dims_to_operand_dims = StrCat(
+ "gather_dims_to_operand_dims={",
+ Join(gather_dimension_numbers_->gather_dims_to_operand_dims(), ","), "}");
+ string index_vector_dim = StrCat(
+ "index_vector_dim=", gather_dimension_numbers_->index_vector_dim());
+
+ return Join<std::initializer_list<string>>(
+ {output_window_dims, elided_window_dims, gather_dims_to_operand_dims,
+ index_vector_dim},
+ ", ");
+}
+
+/* static */ GatherDimensionNumbers HloGatherInstruction::MakeGatherDimNumbers(
+ tensorflow::gtl::ArraySlice<int64> output_window_dims,
+ tensorflow::gtl::ArraySlice<int64> elided_window_dims,
+ tensorflow::gtl::ArraySlice<int64> gather_dims_to_operand_dims,
+ int64 index_vector_dim) {
+ GatherDimensionNumbers gather_dim_numbers;
+ for (int64 output_window_dim : output_window_dims) {
+ gather_dim_numbers.add_output_window_dims(output_window_dim);
+ }
+ for (int64 elided_window_dim : elided_window_dims) {
+ gather_dim_numbers.add_elided_window_dims(elided_window_dim);
+ }
+ for (int64 gather_dim_to_input_dim : gather_dims_to_operand_dims) {
+ gather_dim_numbers.add_gather_dims_to_operand_dims(gather_dim_to_input_dim);
+ }
+
+ gather_dim_numbers.set_index_vector_dim(index_vector_dim);
+ return gather_dim_numbers;
+}
+
+HloInstructionProto HloGatherInstruction::ToProto() const {
+ HloInstructionProto proto = HloInstruction::ToProto();
+ *proto.mutable_gather_dimension_numbers() = gather_dimension_numbers();
+ for (int64 bound : gather_window_bounds()) {
+ proto.add_gather_window_bounds(bound);
+ }
+ return proto;
+}
+
+std::vector<string> HloGatherInstruction::ExtraAttributesToStringImpl(
+ const HloPrintOptions& options) const {
+ return {GatherDimensionNumbersToString(),
+ StrCat("window_bounds={", Join(gather_window_bounds(), ","), "}")};
+}
+
+bool HloGatherInstruction::IdenticalSlowPath(
+ const HloInstruction& other,
+ const std::function<bool(const HloComputation*, const HloComputation*)>&
+ eq_computations) const {
+ const auto& casted_other = static_cast<const HloGatherInstruction&>(other);
+ return protobuf_util::ProtobufEquals(
+ gather_dimension_numbers(),
+ casted_other.gather_dimension_numbers()) &&
+ gather_window_bounds() == casted_other.gather_window_bounds();
+}
+
+std::unique_ptr<HloInstruction> HloGatherInstruction::CloneWithNewOperandsImpl(
+ const Shape& shape,
+ tensorflow::gtl::ArraySlice<HloInstruction*> new_operands,
+ HloCloneContext* context) const {
+ CHECK_EQ(new_operands.size(), 2);
+ return MakeUnique<HloGatherInstruction>(
+ shape, new_operands[0], new_operands[1], gather_dimension_numbers(),
+ gather_window_bounds());
+}
+
} // namespace xla
diff --git a/tensorflow/compiler/xla/service/hlo_instructions.h b/tensorflow/compiler/xla/service/hlo_instructions.h
index 1a2e4ae0a5..65a93cdcf1 100644
--- a/tensorflow/compiler/xla/service/hlo_instructions.h
+++ b/tensorflow/compiler/xla/service/hlo_instructions.h
@@ -161,7 +161,8 @@ class HloSendRecvInstruction : public HloInstruction {
class HloSendInstruction : public HloSendRecvInstruction {
public:
- explicit HloSendInstruction(HloInstruction* operand, int64 channel_id);
+ explicit HloSendInstruction(HloInstruction* operand, HloInstruction* token,
+ int64 channel_id);
private:
// Implementation for non-common logic of CloneWithNewOperands.
@@ -185,7 +186,8 @@ class HloSendDoneInstruction : public HloSendRecvInstruction {
class HloRecvInstruction : public HloSendRecvInstruction {
public:
- explicit HloRecvInstruction(const Shape& shape, int64 channel_id);
+ explicit HloRecvInstruction(const Shape& shape, HloInstruction* token,
+ int64 channel_id);
private:
// Implementation for non-common logic of CloneWithNewOperands.
@@ -347,6 +349,35 @@ class HloReduceInstruction : public HloInstruction {
std::vector<int64> dimensions_;
};
+class HloSortInstruction : public HloInstruction {
+ public:
+ explicit HloSortInstruction(const Shape& shape, int64 dimension,
+ HloInstruction* keys,
+ HloInstruction* values = nullptr);
+ // Returns the dimension sizes or numbers associated with this instruction.
+ const std::vector<int64>& dimensions() const override { return dimensions_; }
+ int64 dimensions(int64 index) const override { return dimensions()[index]; }
+ // Returns the sort dimension for this instruction
+ int64 sort_dimension() { return dimensions(0); }
+ // Returns a serialized representation of this instruction.
+ HloInstructionProto ToProto() const override;
+
+ private:
+ std::vector<string> ExtraAttributesToStringImpl(
+ const HloPrintOptions& options) const override;
+ bool IdenticalSlowPath(
+ const HloInstruction& other,
+ const std::function<bool(const HloComputation*, const HloComputation*)>&
+ eq_computations) const override;
+ // Implementation for non-common logic of CloneWithNewOperands.
+ std::unique_ptr<HloInstruction> CloneWithNewOperandsImpl(
+ const Shape& shape,
+ tensorflow::gtl::ArraySlice<HloInstruction*> new_operands,
+ HloCloneContext* context) const override;
+
+ std::vector<int64> dimensions_;
+};
+
class HloTransposeInstruction : public HloInstruction {
public:
explicit HloTransposeInstruction(
@@ -407,8 +438,7 @@ class HloMapInstruction : public HloInstruction {
public:
explicit HloMapInstruction(
const Shape& shape, tensorflow::gtl::ArraySlice<HloInstruction*> operands,
- HloComputation* map_computation,
- tensorflow::gtl::ArraySlice<HloInstruction*> static_operands = {});
+ HloComputation* map_computation);
// Returns the dimension sizes or numbers associated with this instruction.
const std::vector<int64>& dimensions() const override { return dimensions_; }
int64 dimensions(int64 index) const override { return dimensions()[index]; }
@@ -636,6 +666,9 @@ class HloFusionInstruction : public HloInstruction {
void set_fusion_kind(FusionKind kind) { fusion_kind_ = kind; }
+ // If multiple operands are the same instruction, keeps only one of them.
+ Status DeduplicateFusionOperands();
+
private:
// Fuses the given instruction into this fusion instruction. When add_output
// is false (which is the default), instruction_to_fuse is cloned and the
@@ -785,12 +818,25 @@ class HloReducePrecisionInstruction : public HloInstruction {
class HloInfeedInstruction : public HloInstruction {
public:
- explicit HloInfeedInstruction(const Shape& shape, const string& config);
+ explicit HloInfeedInstruction(const Shape& infeed_shape,
+ HloInstruction* token_operand,
+ const string& config);
+ // TODO(b/80000000): Remove this constructor when all uses of infeed are
+ // converted to take tokens.
+ explicit HloInfeedInstruction(const Shape& infeed_shape,
+ const string& config);
// Returns the infeed configuration string. The infeed configuration includes
// any metadata needed for the backend compiler (e.g., infeed buffer address)
// and is target-dependent.
string infeed_config() const { return infeed_config_; }
void set_infeed_config(const string& config) { infeed_config_ = config; }
+ // Returns the shape of the data received by the infeed. This is not the same
+ // as the shape of the infeed instruction which produces a tuple containing
+ // the infeed data shape and a TOKEN.
+ const Shape& infeed_shape() const {
+ TF_DCHECK_OK(ShapeUtil::ValidateShapeWithOptionalLayout(shape()));
+ return ShapeUtil::GetSubshape(shape(), {0});
+ }
// Returns a serialized representation of this instruction.
HloInstructionProto ToProto() const override;
@@ -813,11 +859,19 @@ class HloInfeedInstruction : public HloInstruction {
class HloOutfeedInstruction : public HloInstruction {
public:
- explicit HloOutfeedInstruction(const Shape& shape, HloInstruction* operand,
+ explicit HloOutfeedInstruction(const Shape& outfeed_shape,
+ HloInstruction* operand,
+ HloInstruction* token_operand,
tensorflow::StringPiece outfeed_config);
+ // TODO(b/80000000): Remove this constructor when all uses of outfeed are
+ // converted to take tokens.
+ explicit HloOutfeedInstruction(const Shape& outfeed_shape,
+ HloInstruction* operand,
+ tensorflow::StringPiece outfeed_config);
+
// Returns the shape for the Outfeed instruction.
const Shape& outfeed_shape() const {
- TF_DCHECK_OK(ShapeUtil::ValidateShapeWithOptionalLayout(shape()));
+ TF_DCHECK_OK(ShapeUtil::ValidateShapeWithOptionalLayout(outfeed_shape_));
return outfeed_shape_;
}
// Returns the config for the Outfeed instruction.
@@ -1094,6 +1148,49 @@ class HloDynamicSliceInstruction : public HloInstruction {
// ('start' is specified dynamically in the second operand of the operation).
std::vector<int64> dynamic_slice_sizes_;
};
+
+class HloGatherInstruction : public HloInstruction {
+ public:
+ explicit HloGatherInstruction(
+ const Shape& shape, HloInstruction* operand,
+ HloInstruction* gather_indices,
+ const GatherDimensionNumbers& gather_dim_numbers,
+ tensorflow::gtl::ArraySlice<int64> window_bounds);
+ const GatherDimensionNumbers& gather_dimension_numbers() const {
+ CHECK(gather_dimension_numbers_ != nullptr);
+ return *gather_dimension_numbers_;
+ }
+ tensorflow::gtl::ArraySlice<int64> gather_window_bounds() const {
+ return gather_window_bounds_;
+ }
+ // Returns the dump string of the gather dimension numbers.
+ string GatherDimensionNumbersToString() const;
+ // Returns a serialized representation of this instruction.
+ HloInstructionProto ToProto() const override;
+
+ // Creates an instance of GatherDimensionNumbers.
+ static GatherDimensionNumbers MakeGatherDimNumbers(
+ tensorflow::gtl::ArraySlice<int64> output_window_dims,
+ tensorflow::gtl::ArraySlice<int64> elided_window_dims,
+ tensorflow::gtl::ArraySlice<int64> gather_dims_to_operand_dims,
+ int64 index_vector_dim);
+
+ private:
+ std::vector<string> ExtraAttributesToStringImpl(
+ const HloPrintOptions& options) const override;
+ bool IdenticalSlowPath(
+ const HloInstruction& other,
+ const std::function<bool(const HloComputation*, const HloComputation*)>&
+ eq_computations) const override;
+ std::unique_ptr<HloInstruction> CloneWithNewOperandsImpl(
+ const Shape& shape,
+ tensorflow::gtl::ArraySlice<HloInstruction*> new_operands,
+ HloCloneContext* context) const override;
+
+ std::unique_ptr<GatherDimensionNumbers> gather_dimension_numbers_;
+ std::vector<int64> gather_window_bounds_;
+};
+
} // namespace xla
#endif // TENSORFLOW_COMPILER_XLA_SERVICE_HLO_INSTRUCTIONS_H_
diff --git a/tensorflow/compiler/xla/service/hlo_liveness_analysis_test.cc b/tensorflow/compiler/xla/service/hlo_liveness_analysis_test.cc
index 0275294a1a..01b625c29c 100644
--- a/tensorflow/compiler/xla/service/hlo_liveness_analysis_test.cc
+++ b/tensorflow/compiler/xla/service/hlo_liveness_analysis_test.cc
@@ -15,7 +15,7 @@ limitations under the License.
#include "tensorflow/compiler/xla/service/hlo_liveness_analysis.h"
-#include "tensorflow/compiler/xla/literal_util.h"
+#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/compiler/xla/service/hlo_computation.h"
#include "tensorflow/compiler/xla/service/hlo_opcode.h"
#include "tensorflow/compiler/xla/service/hlo_parser.h"
diff --git a/tensorflow/compiler/xla/service/hlo_matchers.h b/tensorflow/compiler/xla/service/hlo_matchers.h
index 8a31a8e617..b57c940238 100644
--- a/tensorflow/compiler/xla/service/hlo_matchers.h
+++ b/tensorflow/compiler/xla/service/hlo_matchers.h
@@ -187,7 +187,7 @@ HLO_MATCHER(Exp);
HLO_MATCHER(Floor);
HLO_MATCHER(Fusion);
HLO_MATCHER(Ge);
-HLO_MATCHER(GenerateToken);
+HLO_MATCHER(AfterAll);
HLO_MATCHER(Gt);
HLO_MATCHER(Infeed);
HLO_MATCHER(IsFinite);
@@ -196,6 +196,7 @@ HLO_MATCHER(Log);
HLO_MATCHER(And);
HLO_MATCHER(Not);
HLO_MATCHER(Or);
+HLO_MATCHER(Xor);
HLO_MATCHER(Lt);
HLO_MATCHER(Map);
HLO_MATCHER(Maximum);
diff --git a/tensorflow/compiler/xla/service/hlo_matchers_test.cc b/tensorflow/compiler/xla/service/hlo_matchers_test.cc
index 9a3010cf1f..7de59acc1e 100644
--- a/tensorflow/compiler/xla/service/hlo_matchers_test.cc
+++ b/tensorflow/compiler/xla/service/hlo_matchers_test.cc
@@ -14,6 +14,7 @@ limitations under the License.
==============================================================================*/
#include "tensorflow/compiler/xla/service/hlo_matchers.h"
+#include "tensorflow/compiler/xla/literal_util.h"
#include "tensorflow/compiler/xla/service/hlo_parser.h"
#include "tensorflow/compiler/xla/shape_util.h"
@@ -75,8 +76,10 @@ TEST(HloMatchersTest, Test) {
}
TEST(HloMatchersTest, CustomCallMatcher) {
- auto c1 = HloInstruction::CreateConstant(Literal::CreateR1<float>({1, 2, 3}));
- auto c2 = HloInstruction::CreateConstant(Literal::CreateR1<int32>({1, 2, 3}));
+ auto c1 =
+ HloInstruction::CreateConstant(LiteralUtil::CreateR1<float>({1, 2, 3}));
+ auto c2 =
+ HloInstruction::CreateConstant(LiteralUtil::CreateR1<int32>({1, 2, 3}));
auto call = HloInstruction::CreateCustomCall(
ShapeUtil::MakeShape(F32, {1}), {c1.get(), c2.get()}, "foo_target");
diff --git a/tensorflow/compiler/xla/service/hlo_module.cc b/tensorflow/compiler/xla/service/hlo_module.cc
index 39bc25ba42..55ff073d3f 100644
--- a/tensorflow/compiler/xla/service/hlo_module.cc
+++ b/tensorflow/compiler/xla/service/hlo_module.cc
@@ -537,10 +537,11 @@ uint64 HloModule::RandomNew64() const {
HloComputation* HloModule::GetComputationWithName(
tensorflow::StringPiece name) {
- auto it = c_find_if(computations(), [&](HloComputation* computation) {
+ auto computations_in_module = computations();
+ auto it = c_find_if(computations_in_module, [&](HloComputation* computation) {
return computation->name() == name;
});
- return it == computations().end() ? nullptr : *it;
+ return it == computations_in_module.end() ? nullptr : *it;
}
/* static */ std::atomic<int> HloModule::next_unique_module_id_(0);
diff --git a/tensorflow/compiler/xla/service/hlo_module_group_metadata.cc b/tensorflow/compiler/xla/service/hlo_module_group_metadata.cc
index bf33640db1..3ffac2f413 100644
--- a/tensorflow/compiler/xla/service/hlo_module_group_metadata.cc
+++ b/tensorflow/compiler/xla/service/hlo_module_group_metadata.cc
@@ -75,10 +75,23 @@ Status HloModuleGroupMetadata::Build() {
if (tracked == nullptr) {
return Status::OK();
}
- // Add the parent computation of this channel instruction and its peer
- // computation (both must be while computations) as companions.
+
+ std::vector<HloComputation*> peers;
if (IsChannelInstruction(hlo)) {
- HloComputation* peer_computation = PeerComputation(hlo);
+ peers.push_back(PeerComputation(hlo));
+ } else if (hlo->IsCrossModuleAllReduce()) {
+ for (HloInstruction* instr : GetAllReduceGroup(*hlo->all_reduce_id())) {
+ if (instr == hlo) {
+ continue;
+ }
+ peers.push_back(instr->parent());
+ }
+ }
+
+ // Add the parent computation of this channel (or all-reduce) instruction
+ // and its peer computation(s) (both must be while computations) as
+ // companions.
+ for (HloComputation* peer_computation : peers) {
const TrackedInstruction* peer_tracked =
GetTrackedInstruction(peer_computation);
TF_RET_CHECK(peer_tracked != nullptr)
@@ -175,7 +188,8 @@ bool HloModuleGroupMetadata::IsCompanionInstruction(HloInstruction* hlo) const {
bool HloModuleGroupMetadata::InstructionCommunicates(
HloInstruction* hlo) const {
- return IsChannelInstruction(hlo) || IsCompanionInstruction(hlo);
+ return IsChannelInstruction(hlo) || IsCompanionInstruction(hlo) ||
+ hlo->IsCrossModuleAllReduce();
}
const HloModuleGroupMetadata::Channel& HloModuleGroupMetadata::GetChannel(
@@ -200,6 +214,13 @@ HloComputation* HloModuleGroupMetadata::PeerComputation(
}
}
+const std::vector<HloInstruction*>& HloModuleGroupMetadata::GetAllReduceGroup(
+ int64 all_reduce_id) const {
+ auto it = all_reduce_map_.find(all_reduce_id);
+ CHECK(it != all_reduce_map_.end());
+ return it->second;
+}
+
std::vector<HloModuleGroupMetadata::TrackedInstruction>
HloModuleGroupMetadata::GetCompanionsPath(const HloInstruction* hlo) const {
std::vector<TrackedInstruction> path;
@@ -278,10 +299,27 @@ Status HloModuleGroupMetadata::RecordInstructions() {
tracked_instructions_[hlo->to_apply()] =
TrackedInstruction(hlo, ComputationKind::kCallFunction);
}
+
+ // Group cross module all-reduce instructions by the all_reduce id.
+ if (hlo->IsCrossModuleAllReduce()) {
+ TF_RET_CHECK(channel_id_map_.find(*hlo->all_reduce_id()) ==
+ channel_id_map_.end())
+ << "all_reduce_id " << *hlo->all_reduce_id()
+ << " is already used by a send/recv instruction";
+ all_reduce_map_[*hlo->all_reduce_id()].push_back(hlo);
+ max_channel_id_ = std::max(max_channel_id_, *hlo->all_reduce_id());
+ return Status::OK();
+ }
+
if (!IsChannelInstruction(hlo)) {
return Status::OK();
}
+ TF_RET_CHECK(all_reduce_map_.find(hlo->channel_id()) ==
+ all_reduce_map_.end())
+ << "channel id " << hlo->channel_id()
+ << " is already used by an all-reduce instruction";
+
// Add a new channel if needed.
if (channel_id_map_.find(hlo->channel_id()) == channel_id_map_.end()) {
channels_.emplace_back();
@@ -324,6 +362,7 @@ Status HloModuleGroupMetadata::RecordInstructions() {
}
}
VLOG(2) << "Created " << channels_.size() << " channels";
+ VLOG(2) << "Created " << all_reduce_map_.size() << " all-reduce groups";
return Status::OK();
}
@@ -382,7 +421,8 @@ Status HloModuleGroupMetadata::VerifyChannelInstructions() {
// Check if the shapes match for each channel.
for (const Channel& channel : channels_) {
const Shape& send_shape = channel.send->operand(0)->shape();
- const Shape& recv_shape = channel.recv_done->shape();
+ const Shape& recv_shape =
+ ShapeUtil::GetTupleElementShape(channel.recv_done->shape(), 0);
if (!ShapeUtil::Compatible(send_shape, recv_shape)) {
return FailedPrecondition("send/recv shapes do not match");
}
diff --git a/tensorflow/compiler/xla/service/hlo_module_group_metadata.h b/tensorflow/compiler/xla/service/hlo_module_group_metadata.h
index ffde3a332d..9eea5c6a3d 100644
--- a/tensorflow/compiler/xla/service/hlo_module_group_metadata.h
+++ b/tensorflow/compiler/xla/service/hlo_module_group_metadata.h
@@ -118,13 +118,17 @@ class HloModuleGroupMetadata {
// comment above on companion instructions.
bool IsCompanionInstruction(HloInstruction* hlo) const;
- // Returns true if the instruction is either a channel instruction or a
- // companion instruction.
+ // Returns true if the instruction is either a channel instruction, a
+ // cross-module all-reduce instruction, or a companion instruction.
bool InstructionCommunicates(HloInstruction* hlo) const;
// Returns the Channel instance for the given channel id.
const Channel& GetChannel(int64 channel_id) const;
+ // Returns the all-reduce instructions with the same all_reduce_id.
+ const std::vector<HloInstruction*>& GetAllReduceGroup(
+ int64 all_reduce_id) const;
+
// Returns the computation that contains the peer channel instructions for
// the given instruction.
//
@@ -187,13 +191,14 @@ class HloModuleGroupMetadata {
// Returns all channels in the module group.
const std::vector<Channel>& channels() const { return channels_; }
- // Returns the maximum channel id used in the module group.
+ // Returns the maximum channel id or all_reduce_id used in the module group.
int64 max_channel_id() const { return max_channel_id_; }
private:
Status Build();
- // Record all channel instructions and While instructions.
+ // Record all channel instructions, cross-module AllReduce instructions, and
+ // While/Conditional/Call instructions.
Status RecordInstructions();
// Verifies the given HloModules are well-formed and follow the specification,
@@ -255,6 +260,9 @@ class HloModuleGroupMetadata {
// Map from channel ids to the index in channels_.
tensorflow::gtl::FlatMap<int64, int64> channel_id_map_;
+ // Map from all-reduce ids to the all reduce instructions.
+ tensorflow::gtl::FlatMap<int64, std::vector<HloInstruction*>> all_reduce_map_;
+
// The maximum channel id used in the module group.
int64 max_channel_id_ = -1;
diff --git a/tensorflow/compiler/xla/service/hlo_module_group_util.cc b/tensorflow/compiler/xla/service/hlo_module_group_util.cc
index 21a9b7291a..9fd0ade153 100644
--- a/tensorflow/compiler/xla/service/hlo_module_group_util.cc
+++ b/tensorflow/compiler/xla/service/hlo_module_group_util.cc
@@ -56,12 +56,17 @@ std::vector<HloInstruction*> HloModuleGroupUtil::GlobalPredecessors(
};
// If the given instruction is a companion instruction, we need to find the
- // predecessors of all of its companion instructions.
+ // predecessors of all of its companion instructions. If the instruction is an
+ // all-reduce, we need to find the predecessors of all the peer all-reduce
+ // instructions.
std::vector<HloInstruction*> instruction_group;
if (metadata_.IsCompanionInstruction(instruction)) {
for (HloInstruction* companion : metadata_.Companions(instruction)) {
instruction_group.push_back(companion);
}
+ } else if (instruction->IsCrossModuleAllReduce()) {
+ instruction_group =
+ metadata_.GetAllReduceGroup(*instruction->all_reduce_id());
} else {
instruction_group.push_back(instruction);
}
@@ -112,12 +117,17 @@ std::vector<HloInstruction*> HloModuleGroupUtil::GlobalSuccessors(
};
// If the given instruction is a companion instruction, we need to find the
- // successors of all of its companion instructions.
+ // successors of all of its companion instructions. If the instruction is an
+ // all-reduce, we need to find the successors of all its peer all-reduce
+ // instructions.
std::vector<HloInstruction*> instruction_group;
if (metadata_.IsCompanionInstruction(instruction)) {
for (HloInstruction* companion : metadata_.Companions(instruction)) {
instruction_group.push_back(companion);
}
+ } else if (instruction->IsCrossModuleAllReduce()) {
+ instruction_group =
+ metadata_.GetAllReduceGroup(*instruction->all_reduce_id());
} else {
instruction_group.push_back(instruction);
}
@@ -170,15 +180,17 @@ Status HloModuleGroupUtil::VisitTopologicalOrder(
HloInstruction* hlo = stack.top();
// Find the instruction group of the currently visited instruction. The
- // instruction group represents all companion instructions of the
- // current instruction, and are considered to be a single entity for the
- // purpose of the traversal (i.e., they must always be in the same visit
- // state).
+ // instruction group represents all companion instructions of the current
+ // instruction, or all the all-reduce instructions that belong to the same
+ // group, or are considered to be a single entity for the purpose of the
+ // traversal (i.e., they must always be in the same visit state).
std::vector<HloInstruction*> instruction_group;
if (metadata_.IsCompanionInstruction(hlo)) {
for (HloInstruction* companion : metadata_.Companions(hlo)) {
instruction_group.push_back(companion);
}
+ } else if (hlo->IsCrossModuleAllReduce()) {
+ instruction_group = metadata_.GetAllReduceGroup(*hlo->all_reduce_id());
} else {
instruction_group.push_back(hlo);
}
@@ -292,7 +304,7 @@ HloModuleGroupUtil::ComputeReachability(
}
auto reachability = MakeUnique<HloReachabilityMap>(post_order);
for (HloInstruction* hlo : post_order) {
- reachability->SetReachabilityToUnion(GlobalPredecessors(hlo), hlo);
+ reachability->FastSetReachabilityToUnion(GlobalPredecessors(hlo), hlo);
}
return std::move(reachability);
}
diff --git a/tensorflow/compiler/xla/service/hlo_module_test.cc b/tensorflow/compiler/xla/service/hlo_module_test.cc
index 7f28a804bf..236f450086 100644
--- a/tensorflow/compiler/xla/service/hlo_module_test.cc
+++ b/tensorflow/compiler/xla/service/hlo_module_test.cc
@@ -15,7 +15,7 @@ limitations under the License.
#include "tensorflow/compiler/xla/service/hlo_module.h"
-#include "tensorflow/compiler/xla/literal_util.h"
+#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/compiler/xla/ptr_util.h"
#include "tensorflow/compiler/xla/service/hlo_computation.h"
#include "tensorflow/compiler/xla/service/hlo_instruction.h"
@@ -38,7 +38,7 @@ class HloModuleTest : public HloTestBase {
std::unique_ptr<HloComputation> CreateConstantComputation() {
auto builder = HloComputation::Builder("Constant");
builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(42.0f)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0f)));
return builder.Build();
}
@@ -122,7 +122,7 @@ TEST_F(HloModuleTest, CloneHasFusion) {
{
auto b = HloComputation::Builder("Entry");
auto input = b.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(42.0f)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0f)));
b.AddInstruction(
HloInstruction::CreateFusion(r0f32_, HloInstruction::FusionKind::kInput,
/*operands=*/{input}, fused_computation));
@@ -173,7 +173,7 @@ TEST_F(HloModuleTest, LargeConstantToString) {
auto builder = HloComputation::Builder("Constant");
std::vector<float> values(16, 42.0);
builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR1<float>(values)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR1<float>(values)));
module->AddEntryComputation(builder.Build());
EXPECT_EQ(
diff --git a/tensorflow/compiler/xla/service/hlo_opcode.h b/tensorflow/compiler/xla/service/hlo_opcode.h
index 7083321276..39e12c4815 100644
--- a/tensorflow/compiler/xla/service/hlo_opcode.h
+++ b/tensorflow/compiler/xla/service/hlo_opcode.h
@@ -81,7 +81,7 @@ namespace xla {
V(kFusion, "fusion", kHloOpcodeIsVariadic) \
V(kGather, "gather") \
V(kGe, "greater-than-or-equal-to", kHloOpcodeIsComparison) \
- V(kGenerateToken, "generate-token", kHloOpcodeIsVariadic) \
+ V(kAfterAll, "after-all", kHloOpcodeIsVariadic) \
V(kGetTupleElement, "get-tuple-element") \
V(kGt, "greater-than", kHloOpcodeIsComparison) \
V(kHostCompute, "host-compute") \
@@ -133,6 +133,7 @@ namespace xla {
V(kTrace, "trace") \
V(kTranspose, "transpose") \
V(kTuple, "tuple", kHloOpcodeIsVariadic) \
+ V(kTupleSelect, "tuple-select") \
V(kWhile, "while")
enum class HloOpcode {
diff --git a/tensorflow/compiler/xla/service/hlo_opcode_test.cc b/tensorflow/compiler/xla/service/hlo_opcode_test.cc
index 774345124b..6f3f83f63a 100644
--- a/tensorflow/compiler/xla/service/hlo_opcode_test.cc
+++ b/tensorflow/compiler/xla/service/hlo_opcode_test.cc
@@ -58,7 +58,7 @@ TEST(HloOpcodeTest, OpcodeProperties) {
case HloOpcode::kConcatenate:
case HloOpcode::kFusion:
case HloOpcode::kMap:
- case HloOpcode::kGenerateToken:
+ case HloOpcode::kAfterAll:
case HloOpcode::kTuple:
EXPECT_TRUE(HloOpcodeIsVariadic(opcode));
break;
diff --git a/tensorflow/compiler/xla/service/hlo_ordering_test.cc b/tensorflow/compiler/xla/service/hlo_ordering_test.cc
index cfe5dace05..126d3a2d9c 100644
--- a/tensorflow/compiler/xla/service/hlo_ordering_test.cc
+++ b/tensorflow/compiler/xla/service/hlo_ordering_test.cc
@@ -57,7 +57,7 @@ TEST_F(HloOrderingTest, InstructionsInDifferentComputations) {
auto builder_c = HloComputation::Builder("C");
HloInstruction* c = builder_c.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(42.0f)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0f)));
HloComputation* computation_c =
module->AddEmbeddedComputation(builder_c.Build());
@@ -145,7 +145,7 @@ TEST_F(HloOrderingTest, InstructionsInWhileComputations) {
auto builder = HloComputation::Builder(TestName());
auto constant = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(1.0)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
auto xla_while = builder.AddInstruction(
HloInstruction::CreateWhile(scalar_shape, condition, body, constant));
module->AddEntryComputation(builder.Build());
@@ -208,7 +208,7 @@ TEST_F(HloOrderingTest, ValuesInWhileComputations) {
auto builder = HloComputation::Builder(TestName());
auto constant = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(1.0)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
auto xla_while = builder.AddInstruction(
HloInstruction::CreateWhile(scalar_shape, condition, body, constant));
auto add = builder.AddInstruction(HloInstruction::CreateBinary(
diff --git a/tensorflow/compiler/xla/service/hlo_parser.cc b/tensorflow/compiler/xla/service/hlo_parser.cc
index 605c6ae741..d387539350 100644
--- a/tensorflow/compiler/xla/service/hlo_parser.cc
+++ b/tensorflow/compiler/xla/service/hlo_parser.cc
@@ -15,8 +15,10 @@ limitations under the License.
#include "tensorflow/compiler/xla/service/hlo_parser.h"
+#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/compiler/xla/literal_util.h"
#include "tensorflow/compiler/xla/service/hlo_domain_metadata.h"
+#include "tensorflow/compiler/xla/service/hlo_instructions.h"
#include "tensorflow/compiler/xla/service/hlo_opcode.h"
#include "tensorflow/compiler/xla/service/hlo_sharding_metadata.h"
#include "tensorflow/compiler/xla/shape_util.h"
@@ -509,7 +511,6 @@ bool HloParser::ParseInstruction(HloComputation::Builder* builder,
case HloOpcode::kReal:
case HloOpcode::kSign:
case HloOpcode::kSin:
- case HloOpcode::kSort:
case HloOpcode::kTanh: {
if (!ParseOperands(&operands, /*expected_size=*/1) ||
!ParseAttributes(attrs)) {
@@ -552,7 +553,8 @@ bool HloParser::ParseInstruction(HloComputation::Builder* builder,
}
// Ternary ops.
case HloOpcode::kClamp:
- case HloOpcode::kSelect: {
+ case HloOpcode::kSelect:
+ case HloOpcode::kTupleSelect: {
if (!ParseOperands(&operands, /*expected_size=*/3) ||
!ParseAttributes(attrs)) {
return false;
@@ -617,12 +619,42 @@ bool HloParser::ParseInstruction(HloComputation::Builder* builder,
HloInstruction::CreateReshape(shape, operands[0]));
break;
}
- case HloOpcode::kGenerateToken: {
+ case HloOpcode::kAfterAll: {
if (!ParseOperands(&operands) || !ParseAttributes(attrs)) {
return false;
}
- instruction = builder->AddInstruction(
- HloInstruction::CreateGenerateToken(operands));
+ if (operands.empty()) {
+ instruction = builder->AddInstruction(HloInstruction::CreateToken());
+ } else {
+ instruction =
+ builder->AddInstruction(HloInstruction::CreateAfterAll(operands));
+ }
+ break;
+ }
+ case HloOpcode::kSort: {
+ auto loc = lexer_.GetLoc();
+
+ optional<std::vector<tensorflow::int64>> dimensions;
+ attrs["dimensions"] = {/*required=*/true, AttrTy::kBracedInt64List,
+ &dimensions};
+ if (!ParseOperands(&operands) || !ParseAttributes(attrs) ||
+ dimensions->size() != 1) {
+ return false;
+ }
+ switch (operands.size()) {
+ case 1:
+ instruction = builder->AddInstruction(HloInstruction::CreateSort(
+ shape, dimensions->at(0), /*keys=*/operands[0]));
+ break;
+ case 2:
+ instruction = builder->AddInstruction(HloInstruction::CreateSort(
+ shape, dimensions->at(0),
+ /*keys=*/operands[0], /*values=*/operands[1]));
+ break;
+ default:
+ return Error(loc, StrCat("expects either 1 or 2 operands, but has ",
+ operands.size(), " operands"));
+ }
break;
}
case HloOpcode::kTuple: {
@@ -650,12 +682,12 @@ bool HloParser::ParseInstruction(HloComputation::Builder* builder,
case HloOpcode::kRecv: {
optional<tensorflow::int64> channel_id;
attrs["channel_id"] = {/*required=*/true, AttrTy::kInt64, &channel_id};
- if (!ParseOperands(&operands, /*expected_size=*/0) ||
+ if (!ParseOperands(&operands, /*expected_size=*/1) ||
!ParseAttributes(attrs)) {
return false;
}
- instruction = builder->AddInstruction(
- HloInstruction::CreateRecv(shape.tuple_shapes(0), *channel_id));
+ instruction = builder->AddInstruction(HloInstruction::CreateRecv(
+ shape.tuple_shapes(0), operands[0], *channel_id));
break;
}
case HloOpcode::kRecvDone: {
@@ -675,12 +707,12 @@ bool HloParser::ParseInstruction(HloComputation::Builder* builder,
case HloOpcode::kSend: {
optional<tensorflow::int64> channel_id;
attrs["channel_id"] = {/*required=*/true, AttrTy::kInt64, &channel_id};
- if (!ParseOperands(&operands, /*expected_size=*/1) ||
+ if (!ParseOperands(&operands, /*expected_size=*/2) ||
!ParseAttributes(attrs)) {
return false;
}
instruction = builder->AddInstruction(
- HloInstruction::CreateSend(operands[0], *channel_id));
+ HloInstruction::CreateSend(operands[0], operands[1], *channel_id));
break;
}
case HloOpcode::kSendDone: {
@@ -978,23 +1010,53 @@ bool HloParser::ParseInstruction(HloComputation::Builder* builder,
case HloOpcode::kInfeed: {
optional<string> config;
attrs["infeed_config"] = {/*required=*/false, AttrTy::kString, &config};
- if (!ParseOperands(&operands, /*expected_size=*/0) ||
- !ParseAttributes(attrs)) {
+ if (!ParseOperands(&operands) || !ParseAttributes(attrs)) {
return false;
}
- instruction = builder->AddInstruction(
- HloInstruction::CreateInfeed(shape, config ? *config : ""));
+ // We need to know the infeed data shape to construct the infeed
+ // instruction. This is the zero-th element of the tuple-shaped output of
+ // the infeed instruction. ShapeUtil::GetTupleElementShape will check fail
+ // if the shape is not a non-empty tuple, so add guard so an error message
+ // can be emitted instead of a check fail
+ if (!ShapeUtil::IsTuple(shape) && !ShapeUtil::IsEmptyTuple(shape)) {
+ return Error(lexer_.GetLoc(),
+ "infeed must have a non-empty tuple shape");
+ }
+
+ if (operands.empty()) {
+ // TODO(b/80000000): Remove this when all uses of infeed are
+ // converted to take tokens.
+ instruction = builder->AddInstruction(HloInstruction::CreateInfeed(
+ ShapeUtil::GetTupleElementShape(shape, 0), config ? *config : ""));
+ } else if (operands.size() == 1) {
+ instruction = builder->AddInstruction(HloInstruction::CreateInfeed(
+ ShapeUtil::GetTupleElementShape(shape, 0), operands[0],
+ config ? *config : ""));
+ } else {
+ return Error(lexer_.GetLoc(),
+ "infeed must have exactly zero or one operands");
+ }
break;
}
case HloOpcode::kOutfeed: {
optional<string> config;
attrs["outfeed_config"] = {/*required=*/false, AttrTy::kString, &config};
- if (!ParseOperands(&operands, /*expected_size=*/1) ||
- !ParseAttributes(attrs)) {
+ if (!ParseOperands(&operands) || !ParseAttributes(attrs)) {
return false;
}
- instruction = builder->AddInstruction(HloInstruction::CreateOutfeed(
- operands[0]->shape(), operands[0], config ? *config : ""));
+ if (operands.size() == 1) {
+ // TODO(b/80000000): Remove this when all uses of outfeed are
+ // converted to take tokens.
+ instruction = builder->AddInstruction(HloInstruction::CreateOutfeed(
+ operands[0]->shape(), operands[0], config ? *config : ""));
+ } else if (operands.size() == 2) {
+ instruction = builder->AddInstruction(
+ HloInstruction::CreateOutfeed(operands[0]->shape(), operands[0],
+ operands[1], config ? *config : ""));
+ } else {
+ return Error(lexer_.GetLoc(),
+ "outfeed must have exactly one or two operands");
+ }
break;
}
case HloOpcode::kRng: {
@@ -1131,11 +1193,12 @@ bool HloParser::ParseInstruction(HloComputation::Builder* builder,
return false;
}
- GatherDimensionNumbers dim_numbers = HloInstruction::MakeGatherDimNumbers(
- /*output_window_dims=*/*output_window_dims,
- /*elided_window_dims=*/*elided_window_dims,
- /*gather_dims_to_operand_dims=*/*gather_dims_to_operand_dims,
- /*index_vector_dim=*/*index_vector_dim);
+ GatherDimensionNumbers dim_numbers =
+ HloGatherInstruction::MakeGatherDimNumbers(
+ /*output_window_dims=*/*output_window_dims,
+ /*elided_window_dims=*/*elided_window_dims,
+ /*gather_dims_to_operand_dims=*/*gather_dims_to_operand_dims,
+ /*index_vector_dim=*/*index_vector_dim);
instruction = builder->AddInstruction(HloInstruction::CreateGather(
shape, /*operand=*/operands[0], /*gather_indices=*/operands[1],
@@ -1150,8 +1213,8 @@ bool HloParser::ParseInstruction(HloComputation::Builder* builder,
return false;
}
instruction = builder->AddInstruction(HloInstruction::CreateDomain(
- shape, operands[0], std::move(domain.entry_metadata),
- std::move(domain.exit_metadata)));
+ shape, operands[0], std::move(domain.exit_metadata),
+ std::move(domain.entry_metadata)));
break;
}
case HloOpcode::kTrace:
@@ -1558,7 +1621,7 @@ bool HloParser::ParseTupleLiteral(std::unique_ptr<Literal>* literal,
}
}
}
- *literal = Literal::MakeTupleOwned(std::move(elements));
+ *literal = LiteralUtil::MakeTupleOwned(std::move(elements));
return ParseToken(TokKind::kRparen,
StrCat("expects ')' at the end of the tuple with ",
ShapeUtil::TupleElementCount(shape), "elements"));
@@ -1586,8 +1649,8 @@ bool HloParser::ParseDenseLiteral(std::unique_ptr<Literal>* literal,
}
// Create a literal with the given shape in default layout.
- *literal = Literal::CreateFromDimensions(shape.element_type(),
- AsInt64Slice(shape.dimensions()));
+ *literal = LiteralUtil::CreateFromDimensions(
+ shape.element_type(), AsInt64Slice(shape.dimensions()));
tensorflow::int64 nest_level = 0;
tensorflow::int64 linear_index = 0;
// elems_seen_per_dim[i] is how many elements or sub-arrays we have seen for
diff --git a/tensorflow/compiler/xla/service/hlo_parser_test.cc b/tensorflow/compiler/xla/service/hlo_parser_test.cc
index d481e07f60..f06c705c42 100644
--- a/tensorflow/compiler/xla/service/hlo_parser_test.cc
+++ b/tensorflow/compiler/xla/service/hlo_parser_test.cc
@@ -277,12 +277,13 @@ ENTRY %WhileWithScalarS32Result.v2 () -> s32[] {
"SendRecv",
R"(HloModule TwoSendRecvBothWayRecvFist_module
-ENTRY %TwoSendRecvBothWayRecvFist.v3 () -> f32[] {
- %recv = (f32[], u32[]) recv(), channel_id=15, sharding={maximal device=1}
- ROOT %recv-done = f32[] recv-done((f32[], u32[]) %recv), channel_id=15, sharding={maximal device=1}
+ENTRY %TwoSendRecvBothWayRecvFist.v3 () -> (f32[], token[]) {
+ %token = token[] after-all()
+ %recv = (f32[], u32[], token[]) recv(token[] %token), channel_id=15, sharding={maximal device=1}
+ ROOT %recv-done = (f32[], token[]) recv-done((f32[], u32[], token[]) %recv), channel_id=15, sharding={maximal device=1}
%constant = f32[] constant(2.1), sharding={maximal device=0}
- %send = (f32[], u32[]) send(f32[] %constant), channel_id=16, sharding={maximal device=0}, control-predecessors={%recv}
- %send-done = () send-done((f32[], u32[]) %send), channel_id=16, sharding={maximal device=0}
+ %send = (f32[], u32[], token[]) send(f32[] %constant, token[] %token), channel_id=16, sharding={maximal device=0}, control-predecessors={%recv}
+ %send-done = token[] send-done((f32[], u32[], token[]) %send), channel_id=16, sharding={maximal device=0}
}
)"
@@ -795,10 +796,14 @@ ENTRY ReduceR3ToR2.v3 {
R"(HloModule outfeed_module
ENTRY InfeedToOutfeed {
- infeed = (u32[3]{0}, pred[]) infeed()
- outfeed = () outfeed(infeed)
- ROOT infeed.1 = (u32[3]{0}, pred[]) infeed()
- outfeed.1 = () outfeed(infeed.1)
+ token = token[] after-all()
+ infeed = ((u32[3]{0}, pred[]), token[]) infeed(token)
+ infeed.data = (u32[3]{0}, pred[]) get-tuple-element(infeed), index=0
+ outfeed = token[] outfeed(infeed.data, token)
+ ROOT infeed.1 = ((u32[3]{0}, pred[]), token[]) infeed(token)
+ infeed.1.data = (u32[3]{0}, pred[]) get-tuple-element(infeed.1), index=0
+ infeed.1.token = token[] get-tuple-element(infeed.1), index=1
+ outfeed.1 = token[] outfeed(infeed.1.data, infeed.1.token)
}
)"
@@ -828,6 +833,56 @@ ENTRY ReducePrecision {
)"
},
+// Sort (Key)
+{
+"SortKey",
+R"(HloModule sort
+
+ENTRY Sort {
+ x = f32[1024]{0} parameter(0)
+ ROOT sorted = f32[1024]{0} sort(x), dimensions={0}
+}
+
+)"
+},
+// Sort (Key, Value)
+{
+"SortKeyValue",
+R"(HloModule sort
+
+ENTRY Sort {
+ keys = f32[1024]{0} parameter(0)
+ values = s32[1024]{0} parameter(1)
+ ROOT sorted = (f32[1024]{0}, s32[1024]{0}) sort(keys, values), dimensions={0}
+}
+
+)"
+},
+// R2 Sort (Key)
+{
+"SortKeyR2",
+R"(HloModule sort
+
+ENTRY Sort {
+ x = f32[1024,16]{0,1} parameter(0)
+ ROOT sorted = f32[1024,16]{0,1} sort(x), dimensions={0}
+}
+
+)"
+},
+// R2 Sort (Key, Value)
+{
+"SortKeyValueR2",
+R"(HloModule sort
+
+ENTRY Sort {
+ keys = f32[1024,16]{0,1} parameter(0)
+ values = s32[1024,16]{0,1} parameter(1)
+ ROOT sorted = (f32[1024,16]{0,1}, s32[1024,16]{0,1}) sort(keys, values), dimensions={0}
+}
+
+)"
+},
// Conditional
{
"Conditional",
@@ -1192,11 +1247,12 @@ TEST_F(HloParserTest, UnexpectedAttribute) {
const string original = R"(HloModule unexpected_attr_module
ENTRY %TwoSendRecvBothWayRecvFist.v3 () -> f32[] {
- %recv = (f32[], u32[]) recv(), channel_id=15
- %recv-done = f32[] recv-done((f32[], u32[]) %recv), channel_id=15
+ %token = token[] after-all()
+ %recv = (f32[], u32[], token[]) recv(token[] %token), channel_id=15
+ %recv-done = (f32[], token[]) recv-done((f32[], u32[], token[]) %recv), channel_id=15
ROOT %constant = f32[] constant(2.1)
- %send = (f32[], u32[]) send(f32[] %constant), channel_id=16, calls=%recv
- %send-done = () send-done((f32[], u32[]) %send), channel_id=16
+ %send = (f32[], u32[], token[]) send(f32[] %constant, token[] %token), channel_id=16, calls=%recv
+ %send-done = token[] send-done((f32[], u32[], token[]) %send), channel_id=16
}
)";
@@ -1208,11 +1264,12 @@ TEST_F(HloParserTest, MissingAttribute) {
const string original = R"(HloModule missing_attr_module
ENTRY %TwoSendRecvBothWayRecvFist.v3 () -> f32[] {
- %recv = (f32[], u32[]) recv(), channel_id=15
- %recv-done = f32[] recv-done((f32[], u32[]) %recv), channel_id=15
+ %token = token[] after-all()
+ %recv = (f32[], u32[], token[]) recv(token[] %token), channel_id=15
+ %recv-done = (f32[], token[]) recv-done((f32[], u32[], token[]) %recv), channel_id=15
ROOT %constant = f32[] constant(-2.1)
- %send = (f32[], u32[]) send(f32[] %constant)
- %send-done = () send-done((f32[], u32[]) %send), channel_id=16
+ %send = (f32[], u32[], token[]) send(f32[] %constant, token[] %token)
+ %send-done = token[] send-done((f32[], u32[], token[]) %send), channel_id=16
}
)";
@@ -1224,11 +1281,12 @@ TEST_F(HloParserTest, PredecessorUndefined) {
const string original = R"(HloModule pre_not_found_module
ENTRY %TwoSendRecvBothWayRecvFist.v3 () -> f32[] {
- %recv = (f32[], u32[]) recv(), channel_id=15
- %recv-done = f32[] recv-done((f32[], u32[]) %recv), channel_id=15
+ %token = token[] after-all()
+ %recv = (f32[], u32[], token[]) recv(token[] %token), channel_id=15
+ %recv-done = (f32[], token[]) recv-done((f32[], u32[], token[]) %recv), channel_id=15
ROOT %constant = f32[] constant(2.1)
- %send = (f32[], u32[]) send(f32[] %constant), channel_id=16, control-predecessors={%done}
- %send-done = () send-done((f32[], u32[]) %send), channel_id=16
+ %send = (f32[], u32[], token[]) send(f32[] %constant, token[] %token), channel_id=16, control-predecessors={%done}
+ %send-done = token[] send-done((f32[], u32[], token[]) %send), channel_id=16
}
)";
@@ -1418,5 +1476,15 @@ TEST_F(HloParserTest, ParseConvolutionDimensionNumbers) {
EXPECT_EQ(original, ConvolutionDimensionNumbersToString(dnums));
}
+TEST_F(HloParserTest, NontupleInfeed) {
+ const string original = R"(HloModule nontuple_infeed:
+ENTRY nontuple_infeed {
+ token = token[] after-all()
+ ROOT infeed = pred[] infeed(token)
+})";
+ ExpectHasSubstr(ParseHloString(original).status().error_message(),
+ "infeed must have a non-empty tuple shape");
+}
+
} // namespace
} // namespace xla
diff --git a/tensorflow/compiler/xla/service/hlo_query.cc b/tensorflow/compiler/xla/service/hlo_query.cc
index 2418c19f3d..2a07b6fcbc 100644
--- a/tensorflow/compiler/xla/service/hlo_query.cc
+++ b/tensorflow/compiler/xla/service/hlo_query.cc
@@ -15,7 +15,7 @@ limitations under the License.
#include "tensorflow/compiler/xla/service/hlo_query.h"
-#include "tensorflow/compiler/xla/literal_util.h"
+#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/compiler/xla/service/hlo_opcode.h"
#include "tensorflow/compiler/xla/shape_util.h"
diff --git a/tensorflow/compiler/xla/service/hlo_reachability_test.cc b/tensorflow/compiler/xla/service/hlo_reachability_test.cc
index 657a9ee83d..585c95972b 100644
--- a/tensorflow/compiler/xla/service/hlo_reachability_test.cc
+++ b/tensorflow/compiler/xla/service/hlo_reachability_test.cc
@@ -39,15 +39,15 @@ TEST_F(HloReachabilityTest, Reachability) {
*/
auto builder = HloComputation::Builder(TestName());
auto a = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(0.0f)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(0.0f)));
auto b = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(0.0f)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(0.0f)));
auto c = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(0.0f)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(0.0f)));
auto d = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(0.0f)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(0.0f)));
auto e = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(0.0f)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(0.0f)));
builder.Build();
HloReachabilityMap reachability({a, b, c, d, e});
diff --git a/tensorflow/compiler/xla/service/hlo_rematerialization.cc b/tensorflow/compiler/xla/service/hlo_rematerialization.cc
index 62c07d7fac..59a8800a7d 100644
--- a/tensorflow/compiler/xla/service/hlo_rematerialization.cc
+++ b/tensorflow/compiler/xla/service/hlo_rematerialization.cc
@@ -1244,7 +1244,7 @@ StatusOr<bool> HloRematerialization::Run(
// TODO(b/80249101): Instead of a separate copy elision pass, use the
// ordering from the HLO schedule directly for copy insertion.
SequentialHloOrdering ordering(module, *sequence);
- TF_RETURN_IF_ERROR(RemoveUnnecessaryCopies(ordering, {}, module));
+ TF_RETURN_IF_ERROR(RemoveUnnecessaryCopies(ordering, module));
}
// Compute peak memory usage of all computations in the module called in a
diff --git a/tensorflow/compiler/xla/service/hlo_rematerialization_test.cc b/tensorflow/compiler/xla/service/hlo_rematerialization_test.cc
index 7a46da6efe..cd131147e6 100644
--- a/tensorflow/compiler/xla/service/hlo_rematerialization_test.cc
+++ b/tensorflow/compiler/xla/service/hlo_rematerialization_test.cc
@@ -132,7 +132,7 @@ class HloRematerializationTest : public HloTestBase {
builder.AddInstruction(
HloInstruction::CreateParameter(0, vec1_shape_, "param"));
builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<bool>(true)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(true)));
return builder.Build();
}
@@ -226,7 +226,7 @@ TEST_F(HloRematerializationTest, RematerializeAroundWhile) {
cond_builder.AddInstruction(
HloInstruction::CreateParameter(0, vec1_shape_, "param"));
cond_builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<bool>(true)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(true)));
HloComputation* while_cond =
module->AddEmbeddedComputation(cond_builder.Build());
@@ -263,7 +263,7 @@ TEST_F(HloRematerializationTest, RematerializeEntryAndWhileBody) {
cond_builder.AddInstruction(
HloInstruction::CreateParameter(0, vec1_shape_, "param"));
cond_builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<bool>(true)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(true)));
HloComputation* while_cond =
module->AddEmbeddedComputation(cond_builder.Build());
@@ -296,7 +296,7 @@ TEST_F(HloRematerializationTest, RematerializeNestedComputations) {
cond_builder.AddInstruction(
HloInstruction::CreateParameter(0, vec1_shape_, "param"));
cond_builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<bool>(true)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(true)));
HloComputation* while_cond =
module->AddEmbeddedComputation(cond_builder.Build());
diff --git a/tensorflow/compiler/xla/service/hlo_scheduling_test.cc b/tensorflow/compiler/xla/service/hlo_scheduling_test.cc
index 73f22f81f4..cf9ceed5b2 100644
--- a/tensorflow/compiler/xla/service/hlo_scheduling_test.cc
+++ b/tensorflow/compiler/xla/service/hlo_scheduling_test.cc
@@ -168,8 +168,9 @@ TEST_F(HloSchedulingTest, ListAccountsForSubcomputations) {
auto cond_builder = HloComputation::Builder("WhileCond");
HloInstruction* cond_param = cond_builder.AddInstruction(
HloInstruction::CreateParameter(0, r1f32, "cond_param"));
- HloInstruction* zero_vector = cond_builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR2<float>({{0, 0, 0, 0}})));
+ HloInstruction* zero_vector =
+ cond_builder.AddInstruction(HloInstruction::CreateConstant(
+ LiteralUtil::CreateR2<float>({{0, 0, 0, 0}})));
cond_builder.AddInstruction(HloInstruction::CreateBinary(
ShapeUtil::MakeShape(PRED, {}), HloOpcode::kNe, cond_param, zero_vector));
auto cond_computation = module->AddEmbeddedComputation(cond_builder.Build());
@@ -179,16 +180,18 @@ TEST_F(HloSchedulingTest, ListAccountsForSubcomputations) {
auto body_builder = HloComputation::Builder("WhileBody");
HloInstruction* body_param = body_builder.AddInstruction(
HloInstruction::CreateParameter(0, r1f32, "body_param"));
- HloInstruction* one_vector = body_builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR2<float>({{1, 1, 1, 1}})));
+ HloInstruction* one_vector =
+ body_builder.AddInstruction(HloInstruction::CreateConstant(
+ LiteralUtil::CreateR2<float>({{1, 1, 1, 1}})));
body_builder.AddInstruction(HloInstruction::CreateBinary(
r1f32, HloOpcode::kSubtract, body_param, one_vector));
auto body_computation = module->AddEmbeddedComputation(body_builder.Build());
// transpose(matrix) + bcast(while)
auto builder = HloComputation::Builder(TestName());
- HloInstruction* while_init = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR2<float>({{1, 1, 1, 1}})));
+ HloInstruction* while_init =
+ builder.AddInstruction(HloInstruction::CreateConstant(
+ LiteralUtil::CreateR2<float>({{1, 1, 1, 1}})));
// Creates 16 bytes, ignoring subcomputations
HloInstruction* while_loop =
builder.AddInstruction(HloInstruction::CreateWhile(
@@ -199,7 +202,7 @@ TEST_F(HloSchedulingTest, ListAccountsForSubcomputations) {
HloInstruction::CreateBroadcast(r2f32, while_loop, {0}));
HloInstruction* matrix = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR2<float>(
+ HloInstruction::CreateConstant(LiteralUtil::CreateR2<float>(
{{1.0, 2.0, 3.0, 4.0}, {1.0, 2.0, 3.0, 4.0}})));
// Creates 32 bytes
HloInstruction* transpose = builder.AddInstruction(
@@ -257,7 +260,7 @@ TEST_F(HloSchedulingTest, TuplesAreAccountedCorrectly) {
// Wrap lit in abs because constants are considered free by
// IgnoreInstruction, and it skews the accounting.
auto lit = builder.AddInstruction(HloInstruction::CreateConstant(
- Literal::CreateR1<float>({1, 1, 1, 1, 1, 1})));
+ LiteralUtil::CreateR1<float>({1, 1, 1, 1, 1, 1})));
auto abs_const = builder.AddInstruction(
HloInstruction::CreateUnary(r1f32, HloOpcode::kAbs, lit));
@@ -300,11 +303,11 @@ TEST_F(HloSchedulingTest, MultiOutputFusionAccountedCorrectly) {
HloComputation::Builder builder(TestName());
auto c1 = builder.AddInstruction(HloInstruction::CreateConstant(
- Literal::CreateR1<float>({1, 1, 1, 1, 1})));
+ LiteralUtil::CreateR1<float>({1, 1, 1, 1, 1})));
auto c2 = builder.AddInstruction(HloInstruction::CreateConstant(
- Literal::CreateR1<float>({1, 2, 3, 4, 5})));
+ LiteralUtil::CreateR1<float>({1, 2, 3, 4, 5})));
auto c3 = builder.AddInstruction(HloInstruction::CreateConstant(
- Literal::CreateR1<float>({0, 2, 4, 6, 8})));
+ LiteralUtil::CreateR1<float>({0, 2, 4, 6, 8})));
auto add = builder.AddInstruction(
HloInstruction::CreateBinary(r1f32, HloOpcode::kAdd, c1, c2));
@@ -354,8 +357,9 @@ TEST_F(HloSchedulingTest, HeapSimulatorAccountsForSubcomputations) {
auto cond_builder = HloComputation::Builder("WhileCond");
HloInstruction* cond_param = cond_builder.AddInstruction(
HloInstruction::CreateParameter(0, r1f32, "cond_param"));
- HloInstruction* zero_vector = cond_builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR2<float>({{0, 0, 0, 0}})));
+ HloInstruction* zero_vector =
+ cond_builder.AddInstruction(HloInstruction::CreateConstant(
+ LiteralUtil::CreateR2<float>({{0, 0, 0, 0}})));
cond_builder.AddInstruction(HloInstruction::CreateBinary(
ShapeUtil::MakeShape(PRED, {}), HloOpcode::kNe, cond_param, zero_vector));
auto cond_computation = module->AddEmbeddedComputation(cond_builder.Build());
@@ -365,15 +369,17 @@ TEST_F(HloSchedulingTest, HeapSimulatorAccountsForSubcomputations) {
auto body_builder = HloComputation::Builder("WhileBody");
HloInstruction* body_param = body_builder.AddInstruction(
HloInstruction::CreateParameter(0, r1f32, "body_param"));
- HloInstruction* one_vector = body_builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR2<float>({{1, 1, 1, 1}})));
+ HloInstruction* one_vector =
+ body_builder.AddInstruction(HloInstruction::CreateConstant(
+ LiteralUtil::CreateR2<float>({{1, 1, 1, 1}})));
body_builder.AddInstruction(HloInstruction::CreateBinary(
r1f32, HloOpcode::kSubtract, body_param, one_vector));
auto body_computation = module->AddEmbeddedComputation(body_builder.Build());
auto builder = HloComputation::Builder(TestName());
- HloInstruction* while_init = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR2<float>({{1, 1, 1, 1}})));
+ HloInstruction* while_init =
+ builder.AddInstruction(HloInstruction::CreateConstant(
+ LiteralUtil::CreateR2<float>({{1, 1, 1, 1}})));
// Creates 16 bytes, ignoring subcomputations
builder.AddInstruction(HloInstruction::CreateWhile(
r1f32, cond_computation, body_computation, while_init));
diff --git a/tensorflow/compiler/xla/service/hlo_sharding.cc b/tensorflow/compiler/xla/service/hlo_sharding.cc
index 268b4727bc..393944c20f 100644
--- a/tensorflow/compiler/xla/service/hlo_sharding.cc
+++ b/tensorflow/compiler/xla/service/hlo_sharding.cc
@@ -60,6 +60,9 @@ HloSharding HloSharding::Tuple(
const Shape& tuple_shape,
tensorflow::gtl::ArraySlice<HloSharding> shardings) {
CHECK(ShapeUtil::IsTuple(tuple_shape)) << ShapeUtil::HumanString(tuple_shape);
+ for (auto& sharding : shardings) {
+ CHECK(!sharding.IsTuple()) << sharding.ToString();
+ }
std::vector<HloSharding> flattened_list(shardings.begin(), shardings.end());
CHECK_EQ(flattened_list.size(), RequiredLeaves(tuple_shape))
<< "Flat list has " << flattened_list.size() << ", required "
@@ -67,6 +70,24 @@ HloSharding HloSharding::Tuple(
return HloSharding(flattened_list);
}
+HloSharding HloSharding::SingleTuple(const Shape& tuple_shape,
+ const HloSharding& sharding) {
+ CHECK(ShapeUtil::IsTuple(tuple_shape)) << ShapeUtil::HumanString(tuple_shape);
+ CHECK(!sharding.IsTuple()) << sharding.ToString();
+ int64 leaf_count = ShapeUtil::GetLeafCount(tuple_shape);
+ std::vector<HloSharding> flattened_list;
+ flattened_list.reserve(leaf_count);
+ for (int64 i = 0; i < leaf_count; ++i) {
+ flattened_list.push_back(sharding);
+ }
+ return HloSharding(flattened_list);
+}
+
+HloSharding HloSharding::Single(const Shape& shape,
+ const HloSharding& sharding) {
+ return ShapeUtil::IsTuple(shape) ? SingleTuple(shape, sharding) : sharding;
+}
+
string HloSharding::ToString() const {
if (IsTuple()) {
std::vector<string> parts;
diff --git a/tensorflow/compiler/xla/service/hlo_sharding.h b/tensorflow/compiler/xla/service/hlo_sharding.h
index 34324d2058..6f672b0f28 100644
--- a/tensorflow/compiler/xla/service/hlo_sharding.h
+++ b/tensorflow/compiler/xla/service/hlo_sharding.h
@@ -24,7 +24,7 @@ limitations under the License.
#include <vector>
#include "tensorflow/compiler/xla/array.h"
-#include "tensorflow/compiler/xla/literal_util.h"
+#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/compiler/xla/protobuf_util.h"
#include "tensorflow/compiler/xla/shape_tree.h"
#include "tensorflow/compiler/xla/xla_data.pb.h"
@@ -80,6 +80,15 @@ class HloSharding {
static HloSharding Tuple(const Shape& tuple_shape,
tensorflow::gtl::ArraySlice<HloSharding> shardings);
+ // Creates a new sharding for a tuple type, with a single input sharding
+ // repeated on each leaf.
+ static HloSharding SingleTuple(const Shape& tuple_shape,
+ const HloSharding& sharding);
+
+ // If shape is an array, returns sharding, otherwise returns the tuple shaped
+ // sharding with all the leaf nodes having the same input sharding.
+ static HloSharding Single(const Shape& shape, const HloSharding& sharding);
+
// Create a new sharding from a protobuf OpSharding.
static StatusOr<HloSharding> FromProto(const OpSharding& proto);
diff --git a/tensorflow/compiler/xla/service/hlo_sharding_metadata.cc b/tensorflow/compiler/xla/service/hlo_sharding_metadata.cc
index 748273a43c..94f5a3b273 100644
--- a/tensorflow/compiler/xla/service/hlo_sharding_metadata.cc
+++ b/tensorflow/compiler/xla/service/hlo_sharding_metadata.cc
@@ -88,6 +88,12 @@ std::vector<PassThrough> LocatePassThroughDomainLinks(
VLOG(2) << " " << instruction->ToString();
}
}
+ if (instruction == instruction->parent()->root_instruction()) {
+ pass_through.emplace_back(nullptr, instruction);
+ VLOG(2) << "Found passthrough domain link:";
+ VLOG(2) << " <root>";
+ VLOG(2) << " " << instruction->ToString();
+ }
}
return pass_through;
}
@@ -101,8 +107,12 @@ Status FixupPassThroughDomainLinks(const DomainMetadata::Domain& domain,
HloInstruction::CreateGetTupleElement(pass_through.operand->shape(),
tuple, 0));
gte->set_sharding(sharding);
- TF_RETURN_IF_ERROR(
- pass_through.operand->ReplaceUseWith(pass_through.user, gte));
+ if (pass_through.user != nullptr) {
+ TF_RETURN_IF_ERROR(
+ pass_through.operand->ReplaceUseWith(pass_through.user, gte));
+ } else {
+ pass_through.operand->parent()->set_root_instruction(gte);
+ }
}
return Status::OK();
}
@@ -235,21 +245,6 @@ StatusOr<int64> ApplyDomainShardingPass(const DomainMetadata::Domain& domain,
Status ApplyDomainSharding(const DomainMetadata::Domain& domain,
const HloSharding& sharding) {
- // Here is the place to call external sharding normalizers, which are
- // implemented in other modules (ie, spatial partitioning).
- // The signature of the external normalizer function should be something
- // like:
- //
- // StatusOr<bool> Normalizer(const DomainMetadata::Domain&,
- // const HloSharding& sharding);
- //
- // The function should return true if it has processed the domain
- // normalization, false if domain was not one recognized by it, or an error.
- // We will call the functions in order below, and fall back to local code if
- // none of the external normalizers acted on the domain.
- // External normalizers should not handle the cases that are already handled
- // locally.
-
// None of the external normalizers handled the domain sharding, try to see
// whether this is a single sharding first.
auto single_sharding = sharding.ExtractSingleSharding();
@@ -377,28 +372,39 @@ bool ShardingMetadata::Matches(const DomainMetadata& other) const {
}
string ShardingMetadata::ToString() const {
- return sharding_ != nullptr ? sharding_->ToString() : "None";
+ return sharding_ != nullptr ? sharding_->ToString() : "{}";
}
-Status ShardingMetadata::NormalizeInstructions(
- const DomainMetadata::Domain& domain) const {
- if (sharding_ != nullptr) {
- VLOG(4) << "Normalizing sharding to " << sharding_->ToString() << ":";
- TF_RETURN_IF_ERROR(ApplyDomainSharding(domain, *sharding_));
- TF_RETURN_IF_ERROR(FixupPassThroughDomainLinks(domain, *sharding_));
+/*static*/ StatusOr<const ShardingMetadata*>
+ShardingMetadata::ToShardingMetadata(const DomainMetadata* metadata) {
+ if (metadata->Kind() != ShardingMetadata::KindName()) {
+ return Status(
+ tensorflow::error::INVALID_ARGUMENT,
+ "ShardingMetadata normalizer called with incorrect domain metadata");
}
- return Status::OK();
+ return static_cast<const ShardingMetadata*>(metadata);
}
-Status NormalizeShardingDomain(const DomainMetadata::Domain& domain) {
- TF_ASSIGN_OR_RETURN(std::unique_ptr<HloSharding> sharding,
- ExtractOriginalCommonSharding(domain.instructions));
- if (sharding != nullptr) {
- VLOG(4) << "Normalizing sharding-less domain to " << sharding->ToString()
- << ":";
- TF_RETURN_IF_ERROR(ApplyDomainSharding(domain, *sharding));
+Status ShardingMetadata::NormalizeShardingDomain(
+ const DomainMetadata::Domain& domain, const DomainMetadata* metadata) {
+ if (metadata != nullptr) {
+ TF_ASSIGN_OR_RETURN(const auto& sharding_metadata,
+ ToShardingMetadata(metadata));
+ const HloSharding* sharding = sharding_metadata->sharding();
+ if (sharding != nullptr) {
+ VLOG(4) << "Normalizing sharding to " << sharding->ToString() << ":";
+ TF_RETURN_IF_ERROR(ApplyDomainSharding(domain, *sharding));
+ TF_RETURN_IF_ERROR(FixupPassThroughDomainLinks(domain, *sharding));
+ }
} else {
- VLOG(1) << "Unable to find common sharding";
+ TF_ASSIGN_OR_RETURN(std::unique_ptr<HloSharding> sharding,
+ ExtractOriginalCommonSharding(domain.instructions));
+ if (sharding != nullptr) {
+ VLOG(4) << "Normalizing sharding-less domain to " << sharding->ToString();
+ TF_RETURN_IF_ERROR(ApplyDomainSharding(domain, *sharding));
+ } else {
+ VLOG(1) << "Unable to find common sharding";
+ }
}
return Status::OK();
}
diff --git a/tensorflow/compiler/xla/service/hlo_sharding_metadata.h b/tensorflow/compiler/xla/service/hlo_sharding_metadata.h
index ec162c3490..5e01fc0e22 100644
--- a/tensorflow/compiler/xla/service/hlo_sharding_metadata.h
+++ b/tensorflow/compiler/xla/service/hlo_sharding_metadata.h
@@ -38,23 +38,26 @@ class ShardingMetadata : public DomainMetadata {
string ToString() const override;
- Status NormalizeInstructions(
- const DomainMetadata::Domain& domain) const override;
+ const HloSharding* sharding() const { return sharding_.get(); }
static tensorflow::StringPiece KindName() { return "sharding"; }
+ static StatusOr<const ShardingMetadata*> ToShardingMetadata(
+ const DomainMetadata* metadata);
+
+ // Apply the specified domain metadata onto the specified domain. If no
+ // metadata is specified then apply sharding heuristics and normalize the
+ // instructions whose sharding deviates from the one which is inferred as to
+ // be the original one. Policy wise, HLO passes are allowed to create new
+ // unassigned instructions, but if they do create assigned ones, they have to
+ // conform to the ones around.
+ static Status NormalizeShardingDomain(const DomainMetadata::Domain& domain,
+ const DomainMetadata* metadata);
+
private:
std::unique_ptr<HloSharding> sharding_;
};
-// Within a set of instructions which had common sharding attributes before
-// entring the HLO passes pipeline, apply sharding heuristics and normalize the
-// instructions whose sharding deviates from the one which is inferred as to be
-// the original one.
-// Policy wise, HLO passes are allowed to create new unassigned instructions,
-// but if they do create assigned ones, they have to conform to the ones around.
-Status NormalizeShardingDomain(const DomainMetadata::Domain& domain);
-
// Given an HLO graph edge between instruction and one of its operands, creates
// a ShardingMetadata based kDomain instruction if the sharding between
// instruction and operand changes. Returns nullptr if there is no need for a
diff --git a/tensorflow/compiler/xla/service/hlo_sharding_test.cc b/tensorflow/compiler/xla/service/hlo_sharding_test.cc
index 54b7402b86..7baa927d0e 100644
--- a/tensorflow/compiler/xla/service/hlo_sharding_test.cc
+++ b/tensorflow/compiler/xla/service/hlo_sharding_test.cc
@@ -18,7 +18,7 @@ limitations under the License.
#include <utility>
#include <vector>
-#include "tensorflow/compiler/xla/literal_util.h"
+#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/compiler/xla/service/hlo_parser.h"
#include "tensorflow/compiler/xla/shape_util.h"
#include "tensorflow/compiler/xla/test.h"
diff --git a/tensorflow/compiler/xla/service/hlo_subcomputation_unification_test.cc b/tensorflow/compiler/xla/service/hlo_subcomputation_unification_test.cc
index 7b601f9a95..45c684d667 100644
--- a/tensorflow/compiler/xla/service/hlo_subcomputation_unification_test.cc
+++ b/tensorflow/compiler/xla/service/hlo_subcomputation_unification_test.cc
@@ -75,7 +75,7 @@ TEST_F(HloSubcomputationUnificationTest, UnifyIdentities) {
module->AddEmbeddedComputation(CreateR0S32IdentityComputation());
auto constant = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<int32>(5)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32>(5)));
auto x = builder.AddInstruction(
HloInstruction::CreateCall(r0s32_, {constant}, callee1));
auto y = builder.AddInstruction(
@@ -112,9 +112,9 @@ TEST_F(HloSubcomputationUnificationTest, UnifyAdditions) {
module->AddEmbeddedComputation(CreateR0S32AdditionComputation());
auto constant1 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<int32>(5)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32>(5)));
auto constant2 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<int32>(3)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32>(3)));
auto x = builder.AddInstruction(
HloInstruction::CreateCall(r0s32_, {constant1, constant2}, callee1));
auto y = builder.AddInstruction(
diff --git a/tensorflow/compiler/xla/service/hlo_tfgraph_builder.cc b/tensorflow/compiler/xla/service/hlo_tfgraph_builder.cc
index 3dc733940f..48f676db85 100644
--- a/tensorflow/compiler/xla/service/hlo_tfgraph_builder.cc
+++ b/tensorflow/compiler/xla/service/hlo_tfgraph_builder.cc
@@ -15,7 +15,7 @@ limitations under the License.
#include "tensorflow/compiler/xla/service/hlo_tfgraph_builder.h"
#include "tensorflow/compiler/xla/layout_util.h"
-#include "tensorflow/compiler/xla/literal_util.h"
+#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/compiler/xla/service/hlo_opcode.h"
#include "tensorflow/compiler/xla/shape_util.h"
#include "tensorflow/core/framework/attr_value.pb.h"
diff --git a/tensorflow/compiler/xla/service/hlo_tfgraph_builder_test.cc b/tensorflow/compiler/xla/service/hlo_tfgraph_builder_test.cc
index be156d765d..1e2b31a1f2 100644
--- a/tensorflow/compiler/xla/service/hlo_tfgraph_builder_test.cc
+++ b/tensorflow/compiler/xla/service/hlo_tfgraph_builder_test.cc
@@ -90,7 +90,7 @@ TEST_F(HloTfGraphBuilderTest, CheckConcatenateDimsAndShapes) {
TEST_F(HloTfGraphBuilderTest, CheckScalarValue) {
auto builder = HloComputation::Builder("Const");
HloInstruction *instruction = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0(123)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0(123)));
OpMetadata metadata;
metadata.set_op_name("x");
metadata.set_op_type("y");
diff --git a/tensorflow/compiler/xla/service/hlo_value.cc b/tensorflow/compiler/xla/service/hlo_value.cc
index 7b27dbfec3..4e3c9df3a0 100644
--- a/tensorflow/compiler/xla/service/hlo_value.cc
+++ b/tensorflow/compiler/xla/service/hlo_value.cc
@@ -125,7 +125,7 @@ bool MayUseOperandValue(int64 operand_number, const ShapeIndex& index,
// transparently.
CHECK_EQ(operand_number, 0);
return index.empty();
- case HloOpcode::kSelect:
+ case HloOpcode::kTupleSelect:
// Select does not use any nested elements of its selected-from operands
// (operand 1 and 2)
CHECK_GE(operand_number, 0);
diff --git a/tensorflow/compiler/xla/service/hlo_verifier.cc b/tensorflow/compiler/xla/service/hlo_verifier.cc
index 1d6cd4cb23..48eeba6afd 100644
--- a/tensorflow/compiler/xla/service/hlo_verifier.cc
+++ b/tensorflow/compiler/xla/service/hlo_verifier.cc
@@ -15,6 +15,8 @@ limitations under the License.
#include <set>
+#include "tensorflow/compiler/xla/service/hlo_casting_utils.h"
+#include "tensorflow/compiler/xla/service/hlo_instructions.h"
#include "tensorflow/compiler/xla/service/hlo_opcode.h"
#include "tensorflow/compiler/xla/service/hlo_verifier.h"
#include "tensorflow/compiler/xla/status_macros.h"
@@ -39,6 +41,10 @@ Status ShapeVerifier::HandleSelect(HloInstruction* select) {
return CheckTernaryShape(select);
}
+Status ShapeVerifier::HandleTupleSelect(HloInstruction* tuple_select) {
+ return CheckTernaryShape(tuple_select);
+}
+
Status ShapeVerifier::HandleConcatenate(HloInstruction* concatenate) {
std::vector<const Shape*> operand_shapes;
for (const HloInstruction* operand : concatenate->operands()) {
@@ -106,22 +112,73 @@ Status ShapeVerifier::HandleReducePrecision(HloInstruction* reduce_precision) {
reduce_precision->mantissa_bits()));
}
-Status ShapeVerifier::HandleInfeed(HloInstruction*) { return Status::OK(); }
+namespace {
+
+Status CheckIsTokenOperand(const HloInstruction* instruction,
+ int64 operand_no) {
+ const HloInstruction* token = instruction->operand(operand_no);
+ if (!ShapeUtil::Equal(token->shape(), ShapeUtil::MakeTokenShape())) {
+ return InternalError(
+ "Expected operand %lld to be token-shaped, actual shape is"
+ "%s:\n%s",
+ operand_no, ShapeUtil::HumanString(token->shape()).c_str(),
+ instruction->ToString().c_str());
+ }
+ return Status::OK();
+}
+
+Status CheckOperandAndParameter(const HloInstruction* instruction,
+ int64 operand_number,
+ const HloComputation* computation,
+ int64 parameter_number) {
+ const HloInstruction* operand = instruction->operand(operand_number);
+ const HloInstruction* parameter =
+ computation->parameter_instruction(parameter_number);
+ if (!ShapeUtil::Compatible(operand->shape(), parameter->shape())) {
+ return InternalError("Operand %s shape does not match parameter's %s in %s",
+ operand->ToString().c_str(),
+ parameter->ToString().c_str(),
+ instruction->ToString().c_str());
+ }
+ return Status::OK();
+}
+
+} // namespace
+
+Status ShapeVerifier::HandleInfeed(HloInstruction* instruction) {
+ HloInfeedInstruction* infeed = Cast<HloInfeedInstruction>(instruction);
+ // Infeed has an optional single token operand.
+ // TODO(b/80000000): Update when token is not optional.
+ if (infeed->operand_count() == 1) {
+ TF_RETURN_IF_ERROR(CheckIsTokenOperand(instruction, 0));
+ }
+
+ // The output of infeed is a tuple containing the data value and a token.
+ return CheckShape(infeed,
+ ShapeUtil::MakeTupleShape(
+ {infeed->infeed_shape(), ShapeUtil::MakeTokenShape()}));
+}
+
+Status ShapeVerifier::HandleOutfeed(HloInstruction* instruction) {
+ HloOutfeedInstruction* outfeed = Cast<HloOutfeedInstruction>(instruction);
+ // Outfeed has an optional token operand (operand 1).
+ // TODO(b/80000000): Update when token is not optional.
+ if (outfeed->operand_count() == 2) {
+ TF_RETURN_IF_ERROR(CheckIsTokenOperand(instruction, 1));
+ }
-Status ShapeVerifier::HandleOutfeed(HloInstruction* outfeed) {
// Outfeed has a separate shape field for the value which is outfed to the
- // host. The shape of the instruction itself is always nil because the outfeed
- // produces no HLO value in the graph.
+ // host. The shape of the instruction itself is always a token.
if (!ShapeUtil::Compatible(outfeed->outfeed_shape(),
outfeed->operand(0)->shape())) {
return InternalError(
- "Expected outfeed to have shape compatible with operand's shape %s, "
+ "Expected outfeed shape to be compatible with operand's shape %s, "
"actual shape is %s:\n%s",
ShapeUtil::HumanString(outfeed->operand(0)->shape()).c_str(),
ShapeUtil::HumanString(outfeed->outfeed_shape()).c_str(),
outfeed->ToString().c_str());
}
- return CheckShape(outfeed, ShapeUtil::MakeNil());
+ return CheckShape(outfeed, ShapeUtil::MakeTokenShape());
}
Status ShapeVerifier::HandleHostCompute(HloInstruction*) {
@@ -137,7 +194,16 @@ Status ShapeVerifier::HandleReverse(HloInstruction* reverse) {
}
Status ShapeVerifier::HandleSort(HloInstruction* sort) {
- return CheckUnaryShape(sort);
+ if (sort->operand_count() == 2 &&
+ !ShapeUtil::SameDimensions(sort->operand(0)->shape(),
+ sort->operand(1)->shape())) {
+ return InternalError(
+ "Expected sort to have to have the same dimensions for the keys and "
+ "the values. Keys shape is: %s\n, Values shape is: %s",
+ ShapeUtil::HumanString(sort->operand(0)->shape()).c_str(),
+ ShapeUtil::HumanString(sort->operand(1)->shape()).c_str());
+ }
+ return CheckVariadicShape(sort);
}
Status ShapeVerifier::HandleConstant(HloInstruction* constant) {
@@ -203,8 +269,11 @@ Status ShapeVerifier::HandleParameter(HloInstruction* hlo) {
Status ShapeVerifier::HandleFusion(HloInstruction*) { return Status::OK(); }
Status ShapeVerifier::HandleCall(HloInstruction* call) {
+ for (int64 i = 0; i < call->to_apply()->num_parameters(); ++i) {
+ TF_RETURN_IF_ERROR(CheckOperandAndParameter(call, i, call->to_apply(), i));
+ }
// The shape of kCall should match the shape of the computation it calls.
- return CheckShape(call, call->to_apply()->ComputeProgramShape().result());
+ return CheckShape(call, call->to_apply()->root_instruction()->shape());
}
Status ShapeVerifier::HandleCustomCall(HloInstruction*) { return Status::OK(); }
@@ -273,19 +342,37 @@ Status ShapeVerifier::HandleSelectAndScatter(HloInstruction* instruction) {
}
Status ShapeVerifier::HandleWhile(HloInstruction* xla_while) {
+ TF_RETURN_IF_ERROR(
+ CheckOperandAndParameter(xla_while, 0, xla_while->while_body(), 0));
+ TF_RETURN_IF_ERROR(
+ CheckOperandAndParameter(xla_while, 0, xla_while->while_condition(), 0));
+ const Shape& conditional_shape =
+ xla_while->while_condition()->root_instruction()->shape();
+ if (!ShapeUtil::Compatible(conditional_shape,
+ ShapeUtil::MakeShape(PRED, {}))) {
+ return InternalError(
+ "Conditional computation shape does not lead to a scalar predicate "
+ "shape: %s",
+ ShapeUtil::HumanString(conditional_shape).c_str());
+ }
// The shape of kWhile should match the shape of the body computation it
// calls.
return CheckShape(xla_while,
- xla_while->while_body()->ComputeProgramShape().result());
+ xla_while->while_body()->root_instruction()->shape());
}
Status ShapeVerifier::HandleConditional(HloInstruction* conditional) {
+ TF_RETURN_IF_ERROR(CheckOperandAndParameter(
+ conditional, 1, conditional->true_computation(), 0));
+ TF_RETURN_IF_ERROR(CheckOperandAndParameter(
+ conditional, 2, conditional->false_computation(), 0));
+ TF_RETURN_IF_ERROR(
+ CheckShape(conditional,
+ conditional->true_computation()->root_instruction()->shape()));
TF_RETURN_IF_ERROR(CheckShape(
conditional,
- conditional->true_computation()->ComputeProgramShape().result()));
- return CheckShape(
- conditional,
- conditional->false_computation()->ComputeProgramShape().result());
+ conditional->false_computation()->root_instruction()->shape()));
+ return Status::OK();
}
Status ShapeVerifier::HandlePad(HloInstruction* pad) {
@@ -299,9 +386,11 @@ Status ShapeVerifier::HandleSend(HloInstruction* send) {
const HloInstruction* send_done = send->users().front();
TF_RET_CHECK(send_done->opcode() == HloOpcode::kSendDone);
TF_RETURN_IF_ERROR(CheckSameChannel(send, send_done));
- return CheckShape(
- send, ShapeUtil::MakeTupleShape(
- {send->operand(0)->shape(), ShapeUtil::MakeShape(U32, {})}));
+ TF_RETURN_IF_ERROR(CheckIsTokenOperand(send, 1));
+ return CheckShape(send,
+ ShapeUtil::MakeTupleShape({send->operand(0)->shape(),
+ ShapeUtil::MakeShape(U32, {}),
+ ShapeUtil::MakeTokenShape()}));
}
Status ShapeVerifier::HandleSendDone(HloInstruction* send_done) {
@@ -309,7 +398,8 @@ Status ShapeVerifier::HandleSendDone(HloInstruction* send_done) {
const HloInstruction* send = send_done->operand(0);
TF_RET_CHECK(send->opcode() == HloOpcode::kSend);
TF_RETURN_IF_ERROR(CheckSameChannel(send, send_done));
- return CheckShape(send_done, ShapeUtil::MakeNil());
+
+ return CheckShape(send_done, ShapeUtil::MakeTokenShape());
}
Status ShapeVerifier::HandleRecv(HloInstruction* recv) {
@@ -317,9 +407,11 @@ Status ShapeVerifier::HandleRecv(HloInstruction* recv) {
const HloInstruction* recv_done = recv->users().front();
TF_RET_CHECK(recv_done->opcode() == HloOpcode::kRecvDone);
TF_RETURN_IF_ERROR(CheckSameChannel(recv, recv_done));
- return CheckShape(recv,
- ShapeUtil::MakeTupleShape(
- {recv_done->shape(), ShapeUtil::MakeShape(U32, {})}));
+ TF_RETURN_IF_ERROR(CheckIsTokenOperand(recv, 0));
+ return CheckShape(
+ recv, ShapeUtil::MakeTupleShape(
+ {ShapeUtil::GetTupleElementShape(recv_done->shape(), 0),
+ ShapeUtil::MakeShape(U32, {}), ShapeUtil::MakeTokenShape()}));
}
Status ShapeVerifier::HandleRecvDone(HloInstruction* recv_done) {
@@ -327,7 +419,9 @@ Status ShapeVerifier::HandleRecvDone(HloInstruction* recv_done) {
const HloInstruction* recv = recv_done->operand(0);
TF_RET_CHECK(recv->opcode() == HloOpcode::kRecv);
TF_RETURN_IF_ERROR(CheckSameChannel(recv, recv_done));
- return CheckShape(recv_done, recv->shape().tuple_shapes(0));
+ return CheckShape(recv_done,
+ ShapeUtil::MakeTupleShape({recv->shape().tuple_shapes(0),
+ ShapeUtil::MakeTokenShape()}));
}
Status ShapeVerifier::HandleBatchNormTraining(
@@ -386,6 +480,7 @@ Status CheckMixedPrecisionOperands(const HloInstruction* instruction) {
case HloOpcode::kRecvDone:
case HloOpcode::kReducePrecision:
case HloOpcode::kSelect:
+ case HloOpcode::kTupleSelect:
case HloOpcode::kSend:
case HloOpcode::kSendDone:
case HloOpcode::kTuple:
@@ -426,13 +521,12 @@ Status ShapeVerifier::HandleGather(HloInstruction* gather) {
gather->gather_dimension_numbers(), gather->gather_window_bounds()));
}
-Status ShapeVerifier::HandleGenerateToken(HloInstruction* token) {
+Status ShapeVerifier::HandleAfterAll(HloInstruction* token) {
std::vector<const Shape*> operand_shapes;
for (const HloInstruction* operand : token->operands()) {
operand_shapes.push_back(&operand->shape());
}
- return CheckShape(token,
- ShapeInference::InferGenerateTokenShape(operand_shapes));
+ return CheckShape(token, ShapeInference::InferAfterAllShape(operand_shapes));
}
Status ShapeVerifier::CheckShape(const HloInstruction* instruction,
@@ -449,16 +543,10 @@ Status ShapeVerifier::CheckShape(const HloInstruction* instruction,
// We treat BF16 and F32 as compatible types if mixed precision is allowed,
// but only when the instruction defines the BF16/F32 buffer.
switch (instruction->opcode()) {
- case HloOpcode::kSelect:
- if (ShapeUtil::IsTuple(inferred_shape) || !allow_mixed_precision_) {
- // Select only defines the top-level buffer, which in this case is the
- // tuple, so we cannot allow mixed precision.
- compatible =
- ShapeUtil::Compatible(instruction->shape(), inferred_shape);
- } else {
- compatible = ShapeUtil::CompatibleIgnoringFpPrecision(
- instruction->shape(), inferred_shape);
- }
+ case HloOpcode::kTupleSelect:
+ // TupleSelect only defines the top-level buffer, which in this case is
+ // the tuple, so we cannot allow mixed precision.
+ compatible = ShapeUtil::Compatible(instruction->shape(), inferred_shape);
break;
case HloOpcode::kGetTupleElement:
case HloOpcode::kTuple:
@@ -751,33 +839,23 @@ Status HloVerifier::CheckWhileInstruction(HloInstruction* instruction) {
"While loop must have exactly one operand; had %lld : %s",
instruction->operand_count(), instruction->ToString().c_str());
}
- auto* init = instruction->operand(0);
- auto* cond_param = while_cond->parameter_instruction(0);
- if (!ShapeUtil::Compatible(init->shape(), cond_param->shape())) {
- return FailedPrecondition(
- "While condition's parameter must have the same shape as the "
- "loop's 'init'. init: %s, param: %s",
- init->ToString().c_str(), cond_param->ToString().c_str());
- }
- auto* cond_root = while_cond->root_instruction();
- if (!ShapeUtil::Compatible(cond_root->shape(),
- ShapeUtil::MakeShape(PRED, {}))) {
- return FailedPrecondition("While condition should have shape PRED: %s",
- cond_root->ToString().c_str());
- }
- auto* body_param = while_body->parameter_instruction(0);
- if (!ShapeUtil::Compatible(init->shape(), body_param->shape())) {
+ return Status::OK();
+}
+
+Status HloVerifier::CheckConditionalInstruction(HloInstruction* instruction) {
+ if (instruction->true_computation()->num_parameters() != 1) {
return FailedPrecondition(
- "While body's parameter must have the same shape as the loop's"
- " 'init'. init: %s, param: %s",
- init->ToString().c_str(), body_param->ToString().c_str());
+ "True computation %s of %s must have 1 parameter insted of %lld",
+ instruction->true_computation()->name().c_str(),
+ instruction->ToString().c_str(),
+ instruction->true_computation()->num_parameters());
}
- auto* body_root = while_body->root_instruction();
- if (!ShapeUtil::Compatible(init->shape(), body_root->shape())) {
+ if (instruction->false_computation()->num_parameters() != 1) {
return FailedPrecondition(
- "While body should have same shape as the loop's 'init'."
- "init: %s, body: %s",
- init->ToString().c_str(), body_root->ToString().c_str());
+ "False computation %s of %s must have 1 parameter insted of %lld",
+ instruction->false_computation()->name().c_str(),
+ instruction->ToString().c_str(),
+ instruction->false_computation()->num_parameters());
}
return Status::OK();
}
@@ -786,8 +864,7 @@ Status HloVerifier::CheckElementwiseInstruction(HloInstruction* instruction) {
const Shape& out_shape = instruction->shape();
for (HloInstruction* operand : instruction->operands()) {
const Shape& operand_shape = operand->shape();
- if (!ShapeUtil::IsScalar(operand_shape) &&
- !ShapeUtil::CompatibleIgnoringElementType(operand_shape, out_shape)) {
+ if (!ShapeUtil::CompatibleIgnoringElementType(operand_shape, out_shape)) {
return FailedPrecondition(
"Implicit broadcast is not allowed in HLO."
"Found non-compatible shapes for instruction %s.\n"
@@ -815,9 +892,10 @@ bool ShapeContainsToken(const Shape& shape) {
}
// Verifies that all types entering and exiting the entry computation are
-// legal. For example, TOKEN types have no Literal representation and cannot be
-// on the interface of the entry computation (parameters and root instruction).
+// legal.
Status VerifyEntryAndExitShapes(const HloModule& module) {
+ // Tokens cannot be passed as entry parameters.
+ // TODO(b/80000000): Remove this constraint.
for (int i = 0; i < module.entry_computation()->num_parameters(); ++i) {
HloInstruction* param =
module.entry_computation()->parameter_instruction(i);
@@ -827,14 +905,6 @@ Status VerifyEntryAndExitShapes(const HloModule& module) {
ShapeUtil::HumanString(param->shape()).c_str());
}
}
- if (ShapeContainsToken(
- module.entry_computation()->root_instruction()->shape())) {
- return InternalError(
- "Entry root is or contains a token shape: %s",
- ShapeUtil::HumanString(
- module.entry_computation()->root_instruction()->shape())
- .c_str());
- }
return Status::OK();
}
@@ -881,7 +951,11 @@ StatusOr<bool> HloVerifier::Run(HloModule* module) {
<< " != " << ShapeUtil::Rank(instruction->operand(0)->shape());
} else if (instruction->opcode() == HloOpcode::kWhile) {
TF_RETURN_IF_ERROR(CheckWhileInstruction(instruction));
- } else if (instruction->IsElementwise()) {
+ } else if (instruction->opcode() == HloOpcode::kConditional) {
+ TF_RETURN_IF_ERROR(CheckConditionalInstruction(instruction));
+ } else if (instruction->opcode() !=
+ HloOpcode::kRng /* Rng operands are always scalar. */
+ && instruction->IsElementwise()) {
TF_RETURN_IF_ERROR(CheckElementwiseInstruction(instruction));
}
diff --git a/tensorflow/compiler/xla/service/hlo_verifier.h b/tensorflow/compiler/xla/service/hlo_verifier.h
index 7283b3e7dc..9e62bdc8a9 100644
--- a/tensorflow/compiler/xla/service/hlo_verifier.h
+++ b/tensorflow/compiler/xla/service/hlo_verifier.h
@@ -35,6 +35,7 @@ class ShapeVerifier : public DfsHloVisitor {
Status HandleElementwiseBinary(HloInstruction* hlo) override;
Status HandleClamp(HloInstruction* clamp) override;
Status HandleSelect(HloInstruction* select) override;
+ Status HandleTupleSelect(HloInstruction* tuple_select) override;
Status HandleConcatenate(HloInstruction* concatenate) override;
Status HandleConvert(HloInstruction* convert) override;
Status HandleBitcastConvert(HloInstruction* convert) override;
@@ -81,7 +82,7 @@ class ShapeVerifier : public DfsHloVisitor {
HloInstruction* batch_norm_inference) override;
Status HandleBatchNormGrad(HloInstruction* batch_norm_grad) override;
Status HandleGather(HloInstruction* gather) override;
- Status HandleGenerateToken(HloInstruction* token) override;
+ Status HandleAfterAll(HloInstruction* token) override;
Status FinishVisit(HloInstruction*) override { return Status::OK(); }
@@ -145,6 +146,8 @@ class HloVerifier : public HloPassInterface {
Status CheckWhileInstruction(HloInstruction* instruction);
+ Status CheckConditionalInstruction(HloInstruction* instruction);
+
// Checks that the non-scalar operand shapes are compatible to the output
// shape, i.e., that there are no implicit broadcasts of size-one dimensions.
Status CheckElementwiseInstruction(HloInstruction* instruction);
diff --git a/tensorflow/compiler/xla/service/hlo_verifier_test.cc b/tensorflow/compiler/xla/service/hlo_verifier_test.cc
index c92db0be14..04c6ba3eeb 100644
--- a/tensorflow/compiler/xla/service/hlo_verifier_test.cc
+++ b/tensorflow/compiler/xla/service/hlo_verifier_test.cc
@@ -21,6 +21,7 @@ limitations under the License.
#include "tensorflow/compiler/xla/service/hlo_computation.h"
#include "tensorflow/compiler/xla/service/hlo_instruction.h"
#include "tensorflow/compiler/xla/service/hlo_opcode.h"
+#include "tensorflow/compiler/xla/service/hlo_parser.h"
#include "tensorflow/compiler/xla/shape_util.h"
#include "tensorflow/compiler/xla/test.h"
#include "tensorflow/compiler/xla/tests/hlo_test_base.h"
@@ -123,5 +124,55 @@ TEST_F(HloVerifierTest, ResetsShapeVerifierState) {
EXPECT_FALSE(verifier().Run(module.get()).status().ok());
}
+TEST_F(HloVerifierTest, CheckCallOperandParameterShapesMismatch) {
+ const char* const hlo_string = R"(
+HloModule Module
+
+callme {
+ ROOT param = (s32[], f32[4]) parameter(0)
+}
+
+ENTRY entry {
+ p0 = (f32[4], s32[]) parameter(0)
+ ROOT mycall = (s32[], f32[4]) call(p0), to_apply=callme
+}
+)";
+ TF_ASSERT_OK_AND_ASSIGN(auto module, ParseHloString(hlo_string));
+
+ auto status = verifier().Run(module.get()).status();
+ ASSERT_FALSE(status.ok());
+ EXPECT_THAT(status.error_message(),
+ HasSubstr("shape does not match parameter"));
+}
+
+TEST_F(HloVerifierTest, CheckConditionalOperandParameterShapesMismatch) {
+ const char* const hlo_string = R"(
+HloModule Module
+
+true_branch {
+ tparam = (s32[], f32[4]) parameter(0)
+ ROOT tgte1 = f32[4] get-tuple-element(tparam), index=1
+}
+
+false_branch {
+ fparam = (s32[], f32[4]) parameter(0)
+ ROOT fgte1 = f32[4] get-tuple-element(fparam), index=1
+}
+
+ENTRY entry {
+ p0 = (f32[4], s32[]) parameter(0)
+ constant = pred[] constant(true)
+ ROOT conditional = f32[4] conditional(constant, p0, p0),
+ true_computation=true_branch, false_computation=false_branch
+}
+)";
+ TF_ASSERT_OK_AND_ASSIGN(auto module, ParseHloString(hlo_string));
+
+ auto status = verifier().Run(module.get()).status();
+ ASSERT_FALSE(status.ok());
+ EXPECT_THAT(status.error_message(),
+ HasSubstr("shape does not match parameter"));
+}
+
} // namespace
} // namespace xla
diff --git a/tensorflow/compiler/xla/service/implicit_broadcast_remover_test.cc b/tensorflow/compiler/xla/service/implicit_broadcast_remover_test.cc
index 8c7b38dd1b..f85d31d522 100644
--- a/tensorflow/compiler/xla/service/implicit_broadcast_remover_test.cc
+++ b/tensorflow/compiler/xla/service/implicit_broadcast_remover_test.cc
@@ -15,7 +15,7 @@ limitations under the License.
#include "tensorflow/compiler/xla/service/implicit_broadcast_remover.h"
-#include "tensorflow/compiler/xla/literal_util.h"
+#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/compiler/xla/service/hlo_matchers.h"
#include "tensorflow/compiler/xla/shape_util.h"
#include "tensorflow/compiler/xla/tests/hlo_verified_test_base.h"
diff --git a/tensorflow/compiler/xla/service/indexed_array_analysis.cc b/tensorflow/compiler/xla/service/indexed_array_analysis.cc
index 1985d20578..8b2df32567 100644
--- a/tensorflow/compiler/xla/service/indexed_array_analysis.cc
+++ b/tensorflow/compiler/xla/service/indexed_array_analysis.cc
@@ -19,6 +19,7 @@ limitations under the License.
#include "tensorflow/compiler/xla/util.h"
#include "tensorflow/core/lib/gtl/flatset.h"
#include "tensorflow/core/lib/gtl/inlined_vector.h"
+#include "tensorflow/core/lib/gtl/optional.h"
#include "tensorflow/core/lib/strings/strcat.h"
namespace xla {
@@ -160,6 +161,12 @@ StatusOr<Analysis::Array*> IndexedArrayAnalysis::ComputeArrayFor(
computed_array,
ComputeArrayForReshape(instr->shape(),
FindOrDie(cache_, instr->operand(0))));
+ } else if (instr->opcode() == HloOpcode::kDot) {
+ TF_ASSIGN_OR_RETURN(
+ computed_array,
+ ComputeArrayForDot(instr->shape(), instr->dot_dimension_numbers(),
+ FindOrDie(cache_, instr->operand(0)),
+ FindOrDie(cache_, instr->operand(1))));
} else {
computed_array = nullptr;
}
@@ -290,8 +297,7 @@ StatusOr<Analysis::Array*> IndexedArrayAnalysis::ComputeArrayForGather(
}
if (auto* indexed = dynamic_cast<ScalarIndexedArray*>(source)) {
- auto it = c_find(indexed->output_dims(), source_dim);
- if (it != indexed->output_dims().end()) {
+ if (c_linear_search(indexed->output_dims(), source_dim)) {
return FoldGatherOfGather(indexed, indices, source_dim, output_dims,
shape);
}
@@ -956,11 +962,177 @@ IndexedArrayAnalysis::ComputeArrayForElementwiseUnaryOp(HloOpcode opcode,
return Construct<ScalarIndexedConstantArray>(
new_source, scalar_indexed_const->indices(),
scalar_indexed_const->source_dim(),
- std::vector<int64>(scalar_indexed_const->output_dims().begin(),
- scalar_indexed_const->output_dims().end()),
+ ArraySliceToVector(scalar_indexed_const->output_dims()),
scalar_indexed_const->shape());
}
+namespace {
+
+// Returns the non-contracting non-batch dimension (as per `contracting_dims`
+// and `batch_dims`) if there is exactly one, otherwise returns nullopt.
+gtl::optional<int64> GetOnlyNonContractingNonBatchDim(
+ int64 rank, ArraySlice<int64> contracting_dims,
+ ArraySlice<int64> batch_dims) {
+ gtl::optional<int64> result;
+ for (int64 dim = 0; dim < rank; dim++) {
+ if (!ArrayContains(contracting_dims, dim) &&
+ !ArrayContains(batch_dims, dim)) {
+ if (result.has_value()) {
+ return gtl::nullopt;
+ }
+ result = dim;
+ }
+ }
+ return result;
+}
+
+// Returns true if `indexed_array`, which is either the LHS or the RHS of a Dot
+// HLO, can be folded into the dot operation. For now these conditions are both
+// necessary and sufficient.
+//
+// `tag` describes the caller. Used only for logging.
+//
+// `contracting_dims` and `batch_dims` are the contracting and batch dimensions
+// of whatever operand `indexed_array` is to the dot (LHS or RHS).
+bool CanFoldDotIntoIndexedArray(
+ tensorflow::StringPiece tag,
+ Analysis::ScalarIndexedConstantArray* indexed_array,
+ ArraySlice<int64> contracting_dims, ArraySlice<int64> batch_dims) {
+ gtl::optional<int64> non_contracting_non_batch_dim =
+ GetOnlyNonContractingNonBatchDim(ShapeUtil::Rank(indexed_array->shape()),
+ contracting_dims, batch_dims);
+ if (!non_contracting_non_batch_dim.has_value()) {
+ VLOG(3) << tag << ": multiple or no non-contracting non-batch dimensions";
+ return false;
+ }
+
+ if (indexed_array->output_dims().size() != 1 ||
+ indexed_array->output_dims()[0] != *non_contracting_non_batch_dim) {
+ VLOG(3) << tag << ": output dims != the lhs non-contracting non-batch dim";
+ return false;
+ }
+
+ int64 indexed_array_rank = ShapeUtil::Rank(indexed_array->shape());
+ if (indexed_array->source_dim() < (indexed_array_rank - 2)) {
+ // This restriction can be lifted by inserting reshape nodes.
+ VLOG(3) << tag
+ << ": source dim is not in the low two dims, won't be able to form "
+ "a matmul";
+ return false;
+ }
+
+ return true;
+}
+
+} // namespace
+
+StatusOr<Analysis::Array*>
+IndexedArrayAnalysis::ComputeArrayForDotWithIndexedLhs(
+ const Shape& shape, const DotDimensionNumbers& dim_numbers,
+ ScalarIndexedConstantArray* lhs, ConstantArray* rhs) {
+ VLOG(3) << "ComputeArrayForDotWithIndexedLhs(" << ToString(lhs) << " "
+ << ToString(rhs);
+ if (!CanFoldDotIntoIndexedArray(
+ "ComputeArrayForDotWithIndexedLhs", lhs, /*contracting_dims=*/
+ AsInt64Slice(dim_numbers.lhs_contracting_dimensions()),
+ /*batch_dims=*/AsInt64Slice(dim_numbers.lhs_batch_dimensions()))) {
+ return nullptr;
+ }
+
+ int64 lhs_rank = ShapeUtil::Rank(lhs->shape());
+ DotDimensionNumbers new_dim_numbers = dim_numbers;
+ new_dim_numbers.set_lhs_contracting_dimensions(
+ 0, lhs->source_dim() == (lhs_rank - 1) ? (lhs_rank - 2) : (lhs_rank - 1));
+
+ TF_ASSIGN_OR_RETURN(Literal * literal_for_new_source,
+ TakeOwnership(HloEvaluator{}.EvaluateDotOp(
+ new_dim_numbers, lhs->literal(), *rhs->literal())));
+
+ // The new source dimension is wherever the non-batch non-contracting LHS
+ // dimension "went".
+ int64 new_source_dim = dim_numbers.lhs_batch_dimensions_size() +
+ dim_numbers.rhs_batch_dimensions_size();
+
+ ConstantArray* new_source = Construct<ConstantArray>(literal_for_new_source);
+ return Construct<ScalarIndexedConstantArray>(
+ new_source, lhs->indices(), new_source_dim,
+ ArraySliceToVector(lhs->output_dims()), shape);
+}
+
+StatusOr<Analysis::Array*>
+IndexedArrayAnalysis::ComputeArrayForDotWithIndexedRhs(
+ const Shape& shape, const DotDimensionNumbers& dim_numbers,
+ ConstantArray* lhs, ScalarIndexedConstantArray* rhs) {
+ VLOG(3) << "ComputeArrayForDotWithIndexedRhs(" << ToString(lhs) << " "
+ << ToString(rhs);
+ if (!CanFoldDotIntoIndexedArray(
+ "ComputeArrayForDotWithIndexedRhs", rhs, /*contracting_dims=*/
+ AsInt64Slice(dim_numbers.rhs_contracting_dimensions()),
+ /*batch_dims=*/AsInt64Slice(dim_numbers.rhs_batch_dimensions()))) {
+ return nullptr;
+ }
+
+ int64 rhs_rank = ShapeUtil::Rank(rhs->shape());
+
+ DotDimensionNumbers new_dim_numbers = dim_numbers;
+ new_dim_numbers.set_rhs_contracting_dimensions(
+ 0, rhs->source_dim() == (rhs_rank - 1) ? (rhs_rank - 2) : (rhs_rank - 1));
+
+ TF_ASSIGN_OR_RETURN(Literal * literal_for_new_source,
+ TakeOwnership(HloEvaluator{}.EvaluateDotOp(
+ new_dim_numbers, *lhs->literal(), rhs->literal())));
+
+ // The new source dimension is wherever the non-batch non-contracting RHS
+ // dimension "went".
+ int64 new_source_dim = dim_numbers.lhs_batch_dimensions_size() +
+ dim_numbers.rhs_batch_dimensions_size() + 1;
+
+ ConstantArray* new_source = Construct<ConstantArray>(literal_for_new_source);
+ return Construct<ScalarIndexedConstantArray>(
+ new_source, rhs->indices(), new_source_dim,
+ ArraySliceToVector(rhs->output_dims()), shape);
+}
+
+StatusOr<Analysis::Array*> IndexedArrayAnalysis::ComputeArrayForDot(
+ const Shape& shape, const DotDimensionNumbers& dim_numbers, Array* lhs,
+ Array* rhs) {
+ // Intuitively, if
+ //
+ // - The LHS of a dot product is a gathered sequence of rows from a constant
+ // array (i.e. LHS[I,J] = Const[Indices[I],J]) and the RHS is a constant
+ //
+ // OR
+ //
+ // - If the RHS of a dot product is a gathered sequence of columns from a
+ // constant array (i.e. RHS[I,J] = Const[I, Indices[J]]) and the LHS is a
+ // constant
+ //
+ // then the result of the dot product itself is a gather from a constant
+ // array. E.g. Dot(LHS, ConstRhs) where LHS[I,J] = Const[Indices[I],J] can be
+ // rewritten as Result where Result[I,J] = Dot(Const, ConstRhs)[Indices[I],
+ // J].
+ //
+ // We do a general version of this rewrite here.
+ VLOG(3) << "ComputeArrayForDot(" << ToString(lhs) << " " << ToString(rhs);
+ if (auto* lhs_indexed_array =
+ dynamic_cast<ScalarIndexedConstantArray*>(lhs)) {
+ if (auto* rhs_constant = dynamic_cast<ConstantArray*>(rhs)) {
+ return ComputeArrayForDotWithIndexedLhs(shape, dim_numbers,
+ lhs_indexed_array, rhs_constant);
+ }
+ }
+
+ if (auto* rhs_indexed_array =
+ dynamic_cast<ScalarIndexedConstantArray*>(rhs)) {
+ if (auto* lhs_constant = dynamic_cast<ConstantArray*>(lhs)) {
+ return ComputeArrayForDotWithIndexedRhs(shape, dim_numbers, lhs_constant,
+ rhs_indexed_array);
+ }
+ }
+
+ return nullptr;
+}
+
tensorflow::StringPiece IndexedArrayAnalysisPrinterPass::name() const {
return "indexed-array-analysis-printer-pass";
}
diff --git a/tensorflow/compiler/xla/service/indexed_array_analysis.h b/tensorflow/compiler/xla/service/indexed_array_analysis.h
index 8684430231..e923dc39f7 100644
--- a/tensorflow/compiler/xla/service/indexed_array_analysis.h
+++ b/tensorflow/compiler/xla/service/indexed_array_analysis.h
@@ -268,6 +268,18 @@ class IndexedArrayAnalysis {
tensorflow::gtl::ArraySlice<int64> window_bounds, Array* source,
Array* indices);
+ StatusOr<Array*> ComputeArrayForDotWithIndexedLhs(
+ const Shape& shape, const DotDimensionNumbers& dim_numbers,
+ ScalarIndexedConstantArray* lhs, ConstantArray* rhs);
+
+ StatusOr<Array*> ComputeArrayForDotWithIndexedRhs(
+ const Shape& shape, const DotDimensionNumbers& dim_numbers,
+ ConstantArray* lhs, ScalarIndexedConstantArray* rhs);
+
+ StatusOr<Array*> ComputeArrayForDot(const Shape& shape,
+ const DotDimensionNumbers& dim_numbers,
+ Array* lhs, Array* rhs);
+
// This tries to fold a ScalarIndexedArray which has another
// ScalarIndexedArray as a source into a ScalarIndexedArray that instead has a
// ScalarIndexedArray as indices. If `source` happened to be a
diff --git a/tensorflow/compiler/xla/service/indexed_array_analysis_test.cc b/tensorflow/compiler/xla/service/indexed_array_analysis_test.cc
index fc2befe05b..5f4b42799b 100644
--- a/tensorflow/compiler/xla/service/indexed_array_analysis_test.cc
+++ b/tensorflow/compiler/xla/service/indexed_array_analysis_test.cc
@@ -799,5 +799,170 @@ ENTRY main {
AssertArrayForRootExpressionIs(hlo_text, "%add");
}
+TEST_F(IndexedArrayAnalysisTest, DotOpBasic_0) {
+ string hlo_text = R"(
+HloModule DotOp
+
+ENTRY main {
+ gather_operand = s32[3,4] constant(s32[3,4]{{1,2,3,4},{5,6,7,8},{9,10,11,12}})
+ dot_rhs_constant = s32[4,3] constant(s32[4,3]{{1,2,3},{4,5,6},{7,8,9},{10,11,12}})
+ indices = s32[5] parameter(0)
+ dot_lhs = s32[5,4] gather(gather_operand, indices),
+ output_window_dims={1},
+ elided_window_dims={0},
+ gather_dims_to_operand_dims={0},
+ index_vector_dim=1,
+ window_bounds={1,4}
+ ROOT dot = s32[5,3] dot(dot_lhs, dot_rhs_constant), lhs_contracting_dims={1}, rhs_contracting_dims={0}
+}
+)";
+
+ AssertArrayWithConstantsForRootExpressionIs(hlo_text, R"(
+(scalar-indexed-const
+ (constant s32[3,3] s32[3,3] {
+ { 70, 80, 90 },
+ { 158, 184, 210 },
+ { 246, 288, 330 } })
+ %indices 0->[0]))");
+}
+
+TEST_F(IndexedArrayAnalysisTest, DotOpBasic_1) {
+ string hlo_text = R"(
+HloModule DotOp
+
+ENTRY main {
+ gather_operand = s32[3,4] constant(s32[3,4]{{1,2,3,4},{5,6,7,8},{9,10,11,12}})
+ dot_rhs_constant = s32[3,3] constant(s32[3,3]{{1,2,3},{4,5,6},{7,8,9}})
+ indices = s32[5] parameter(0)
+ dot_lhs = s32[3,5] gather(gather_operand, indices),
+ output_window_dims={0},
+ elided_window_dims={1},
+ gather_dims_to_operand_dims={1},
+ index_vector_dim=1,
+ window_bounds={3,1}
+ ROOT dot = s32[5,3] dot(dot_lhs, dot_rhs_constant), lhs_contracting_dims={0}, rhs_contracting_dims={0}
+}
+)";
+
+ AssertArrayWithConstantsForRootExpressionIs(hlo_text, R"(
+(scalar-indexed-const
+ (constant s32[4,3] s32[4,3] {
+ { 84, 99, 114 },
+ { 96, 114, 132 },
+ { 108, 129, 150 },
+ { 120, 144, 168 } })
+ %indices 0->[1]))");
+}
+
+TEST_F(IndexedArrayAnalysisTest, DotOpBasic_2) {
+ string hlo_text = R"(
+HloModule DotOp
+
+ENTRY main {
+ gather_operand = s32[3,4] constant(s32[3,4]{{1,2,3,4},{5,6,7,8},{9,10,11,12}})
+ dot_lhs_constant = s32[4,3] constant(s32[4,3]{{1,2,3},{4,5,6},{7,8,9},{10,11,12}})
+ indices = s32[5] parameter(0)
+ dot_rhs = s32[3,5] gather(gather_operand, indices),
+ output_window_dims={0},
+ elided_window_dims={1},
+ gather_dims_to_operand_dims={1},
+ index_vector_dim=1,
+ window_bounds={3,1}
+ ROOT dot = s32[4,5] dot(dot_lhs_constant, dot_rhs), lhs_contracting_dims={1}, rhs_contracting_dims={0}
+}
+)";
+
+ AssertArrayWithConstantsForRootExpressionIs(hlo_text, R"(
+(scalar-indexed-const
+ (constant s32[4,4] s32[4,4] {
+ { 38, 44, 50, 56 },
+ { 83, 98, 113, 128 },
+ { 128, 152, 176, 200 },
+ { 173, 206, 239, 272 } })
+ %indices 1->[1])
+)");
+}
+
+TEST_F(IndexedArrayAnalysisTest, DotOpBasic_3) {
+ string hlo_text = R"(
+HloModule DotOp
+
+ENTRY main {
+ gather_operand = s32[4,3] constant(s32[4,3]{{1,2,3},{4,5,6},{7,8,9},{10,11,12}})
+ dot_lhs_constant = s32[4,3] constant(s32[4,3]{{1,2,3},{4,5,6},{7,8,9},{10,11,12}})
+ indices = s32[5] parameter(0)
+ dot_rhs = s32[5,3] gather(gather_operand, indices),
+ output_window_dims={1},
+ elided_window_dims={0},
+ gather_dims_to_operand_dims={0},
+ index_vector_dim=1,
+ window_bounds={1,3}
+ ROOT dot = s32[4,5] dot(dot_lhs_constant, dot_rhs), lhs_contracting_dims={1}, rhs_contracting_dims={1}
+}
+)";
+
+ AssertArrayWithConstantsForRootExpressionIs(hlo_text, R"(
+(scalar-indexed-const
+ (constant s32[4,4] s32[4,4] {
+ { 14, 32, 50, 68 },
+ { 32, 77, 122, 167 },
+ { 50, 122, 194, 266 },
+ { 68, 167, 266, 365 } })
+ %indices 1->[0])
+)");
+}
+
+TEST_F(IndexedArrayAnalysisTest, DotOpWithBatch) {
+ string hlo_text = R"(
+HloModule DotOp
+
+ENTRY main {
+ gather_operand = s32[2,3,2] constant(s32[2,3,2]{{{1,2},{3,4},{5,6}},{{7,8},{9,10},{11,12}}})
+ dot_lhs_constant = s32[2,2,3] constant(s32[2,2,3]{{{1,2,3},{4,5,6}},{{7,8,9},{10,11,12}}})
+ indices = s32[4] parameter(0)
+ dot_rhs = s32[2,3,4] gather(gather_operand, indices),
+ output_window_dims={0,1},
+ elided_window_dims={2},
+ gather_dims_to_operand_dims={2},
+ index_vector_dim=1,
+ window_bounds={2,3,1}
+ ROOT dot = s32[2,2,4] dot(dot_lhs_constant, dot_rhs),
+ lhs_contracting_dims={2}, rhs_contracting_dims={1},
+ lhs_batch_dims={0}, rhs_batch_dims={0}
+}
+)";
+
+ AssertArrayWithConstantsForRootExpressionIs(hlo_text, R"(
+(scalar-indexed-const
+ (constant s32[2,2,2] s32[2,2,2] {
+ { { 22, 28 },
+ { 49, 64 } },
+ { { 220, 244 },
+ { 301, 334 } } })
+ %indices 3->[2])
+)");
+}
+
+TEST_F(IndexedArrayAnalysisTest, DotOpNegative) {
+ string hlo_text = R"(
+HloModule DotOp
+
+ENTRY main {
+ gather_operand = s32[3,4] constant(s32[3,4]{{1,2,3,4},{5,6,7,8},{9,10,11,12}})
+ dot_rhs_constant = s32[2,3] constant(s32[2,3]{{1,2,3},{4,5,6}})
+ indices = s32[2] parameter(0)
+ dot_lhs = s32[3,2] gather(gather_operand, indices),
+ output_window_dims={0},
+ elided_window_dims={1},
+ gather_dims_to_operand_dims={1},
+ index_vector_dim=1,
+ window_bounds={3,1}
+ ROOT dot = s32[3,3] dot(dot_lhs, dot_rhs_constant), lhs_contracting_dims={1}, rhs_contracting_dims={0}
+}
+)";
+
+ AssertArrayWithConstantsForRootExpressionIs(hlo_text, "%dot");
+}
+
} // namespace
} // namespace xla
diff --git a/tensorflow/compiler/xla/service/inliner_test.cc b/tensorflow/compiler/xla/service/inliner_test.cc
index d2af261008..32937b33b3 100644
--- a/tensorflow/compiler/xla/service/inliner_test.cc
+++ b/tensorflow/compiler/xla/service/inliner_test.cc
@@ -18,7 +18,7 @@ limitations under the License.
#include <memory>
#include <utility>
-#include "tensorflow/compiler/xla/literal_util.h"
+#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/compiler/xla/ptr_util.h"
#include "tensorflow/compiler/xla/service/hlo_computation.h"
#include "tensorflow/compiler/xla/service/hlo_instruction.h"
@@ -51,10 +51,10 @@ TEST_F(InlinerTest, MapMax) {
auto max_f32 = max_builder.Build();
auto builder = HloComputation::Builder("MapMaxFunction");
- auto lhs = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR1<float>({1, 2, 3, 4})));
- auto rhs = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR1<float>({4, 3, 2, 1})));
+ auto lhs = builder.AddInstruction(HloInstruction::CreateConstant(
+ LiteralUtil::CreateR1<float>({1, 2, 3, 4})));
+ auto rhs = builder.AddInstruction(HloInstruction::CreateConstant(
+ LiteralUtil::CreateR1<float>({4, 3, 2, 1})));
builder.AddInstruction(
HloInstruction::CreateMap(lhs->shape(), {lhs, rhs}, max_f32.get()));
@@ -70,7 +70,7 @@ TEST_F(InlinerTest, MapMax) {
// Verify execution on CPU.
auto result = ExecuteAndTransfer(std::move(hlo_module), {});
- auto expected = Literal::CreateR1<float>({4, 3, 3, 4});
+ auto expected = LiteralUtil::CreateR1<float>({4, 3, 3, 4});
EXPECT_TRUE(LiteralTestUtil::Equal(*result, *expected));
}
@@ -83,12 +83,12 @@ TEST_F(InlinerTest, MapConstant) {
HloInstruction::CreateParameter(0, r0f32, "x"));
(void)param1;
const2_builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(2.0f)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0f)));
auto const2_f32 = const2_builder.Build();
auto builder = HloComputation::Builder("MapConstFunction");
auto lhs = builder.AddInstruction(HloInstruction::CreateConstant(
- Literal::CreateR2<float>({{1, 2, 3, 4}, {5, 6, 7, 8}})));
+ LiteralUtil::CreateR2<float>({{1, 2, 3, 4}, {5, 6, 7, 8}})));
builder.AddInstruction(
HloInstruction::CreateMap(lhs->shape(), {lhs}, const2_f32.get()));
@@ -104,7 +104,7 @@ TEST_F(InlinerTest, MapConstant) {
// Verify execution on CPU.
auto result = ExecuteAndTransfer(std::move(hlo_module), {});
- auto expected = Literal::CreateR2<float>({{2, 2, 2, 2}, {2, 2, 2, 2}});
+ auto expected = LiteralUtil::CreateR2<float>({{2, 2, 2, 2}, {2, 2, 2, 2}});
EXPECT_TRUE(LiteralTestUtil::Equal(*result, *expected));
}
@@ -123,10 +123,10 @@ TEST_F(InlinerTest, MapSubtractOppositeOrder) {
auto max_f32 = max_builder.Build();
auto builder = HloComputation::Builder("MapSubFunction");
- auto lhs = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR1<float>({1, 2, 3, 4})));
- auto rhs = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR1<float>({4, 3, 2, 1})));
+ auto lhs = builder.AddInstruction(HloInstruction::CreateConstant(
+ LiteralUtil::CreateR1<float>({1, 2, 3, 4})));
+ auto rhs = builder.AddInstruction(HloInstruction::CreateConstant(
+ LiteralUtil::CreateR1<float>({4, 3, 2, 1})));
builder.AddInstruction(
HloInstruction::CreateMap(lhs->shape(), {lhs, rhs}, max_f32.get()));
@@ -142,7 +142,7 @@ TEST_F(InlinerTest, MapSubtractOppositeOrder) {
// Verify execution on CPU.
auto result = ExecuteAndTransfer(std::move(hlo_module), {});
- auto expected = Literal::CreateR1<float>({3, 1, -1, -3});
+ auto expected = LiteralUtil::CreateR1<float>({3, 1, -1, -3});
EXPECT_TRUE(LiteralTestUtil::Equal(*result, *expected));
}
diff --git a/tensorflow/compiler/xla/service/instruction_fusion.cc b/tensorflow/compiler/xla/service/instruction_fusion.cc
index 9ac8635767..da91262130 100644
--- a/tensorflow/compiler/xla/service/instruction_fusion.cc
+++ b/tensorflow/compiler/xla/service/instruction_fusion.cc
@@ -97,9 +97,10 @@ bool IsAlwaysDuplicable(const HloInstruction& instruction) {
case HloOpcode::kShiftRightLogical:
case HloOpcode::kSlice:
case HloOpcode::kSubtract:
- case HloOpcode::kGenerateToken:
+ case HloOpcode::kAfterAll:
case HloOpcode::kTranspose:
case HloOpcode::kTuple:
+ case HloOpcode::kTupleSelect:
return false;
// Cheap instructions for reals, but expensive for complex.
diff --git a/tensorflow/compiler/xla/service/instruction_fusion_test.cc b/tensorflow/compiler/xla/service/instruction_fusion_test.cc
index 21db233899..9e7a15f033 100644
--- a/tensorflow/compiler/xla/service/instruction_fusion_test.cc
+++ b/tensorflow/compiler/xla/service/instruction_fusion_test.cc
@@ -167,7 +167,8 @@ TEST_F(InstructionFusionTest, AvoidDuplicationIfNotAllFusable) {
builder.AddInstruction(HloInstruction::CreateParameter(1, shape, "1"));
HloInstruction* binary1 = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, param0, param1));
- builder.AddInstruction(HloInstruction::CreateSend(binary1, 0));
+ auto token = builder.AddInstruction(HloInstruction::CreateToken());
+ builder.AddInstruction(HloInstruction::CreateSend(binary1, token, 0));
HloInstruction* unary = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kAbs, binary1));
@@ -258,7 +259,8 @@ TEST_F(InstructionFusionTest, AvoidDuplicationIfNotAllFusableRecursively) {
add = f32[4,3]{1,0} add(p0, p0)
abs1 = f32[4,3]{1,0} abs(add)
log = f32[4,3]{1,0} log(abs1)
- send = f32[4,3]{1,0} send(log), channel_id=0
+ token = token[] after-all()
+ send = f32[4,3]{1,0} send(log, token), channel_id=0
abs2 = f32[4,3]{1,0} abs(log)
ROOT root = f32[4,3]{1,0} subtract(abs2, add)
})")
@@ -288,7 +290,8 @@ TEST_F(InstructionFusionTest, AvoidDuplicationIfNotAllFusableRecursively) {
p0 = f32[4,3]{1,0} parameter(0)
add1 = f32[4,3]{1,0} add(p0, p0)
log = f32[4,3]{1,0} log(p0)
- send = f32[4,3]{1,0} send(log), channel_id=0
+ token = token[] after-all()
+ send = f32[4,3]{1,0} send(log, token), channel_id=0
add2 = f32[4,3]{1,0} add(log, add1)
ROOT root = f32[4,3]{1,0} subtract(add1, add2)
})")
@@ -321,7 +324,8 @@ TEST_F(InstructionFusionTest, AvoidDuplicationIfNotAllFusableRecursively) {
add1 = f32[4,3]{1,0} add(p0, p0)
add2 = f32[4,3]{1,0} add(add1, add1)
log = f32[4,3]{1,0} log(add2)
- send = f32[4,3]{1,0} send(log), channel_id=0
+ token = token[] after-all()
+ send = f32[4,3]{1,0} send(log, token), channel_id=0
sub1 = f32[4,3]{1,0} subtract(log, add2)
sub2 = f32[4,3]{1,0} subtract(add2, add1)
ROOT root = (f32[4,3]{1,0}, f32[4,3]{1,0}) tuple(sub1, sub2)
@@ -352,7 +356,8 @@ TEST_F(InstructionFusionTest, AllowUnaryDuplication) {
builder.AddInstruction(HloInstruction::CreateParameter(0, shape, "0"));
HloInstruction* unary1 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kFloor, param0));
- builder.AddInstruction(HloInstruction::CreateSend(unary1, 0));
+ auto token = builder.AddInstruction(HloInstruction::CreateToken());
+ builder.AddInstruction(HloInstruction::CreateSend(unary1, token, 0));
HloInstruction* unary2 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kAbs, unary1));
@@ -375,7 +380,8 @@ TEST_F(InstructionFusionTest, AllowEffectiveUnaryDuplication) {
builder.AddInstruction(HloInstruction::CreateParameter(1, shape, "1"));
HloInstruction* binary1 = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, param0, param1));
- builder.AddInstruction(HloInstruction::CreateSend(binary1, 0));
+ auto token = builder.AddInstruction(HloInstruction::CreateToken());
+ builder.AddInstruction(HloInstruction::CreateSend(binary1, token, 0));
HloInstruction* unary = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kAbs, binary1));
diff --git a/tensorflow/compiler/xla/service/interpreter/BUILD b/tensorflow/compiler/xla/service/interpreter/BUILD
index 524d3234eb..8652599dc6 100644
--- a/tensorflow/compiler/xla/service/interpreter/BUILD
+++ b/tensorflow/compiler/xla/service/interpreter/BUILD
@@ -74,7 +74,7 @@ cc_library(
hdrs = ["executable.h"],
deps = [
":executor",
- "//tensorflow/compiler/xla:literal_util",
+ "//tensorflow/compiler/xla:literal",
"//tensorflow/compiler/xla:shape_util",
"//tensorflow/compiler/xla:status_macros",
"//tensorflow/compiler/xla:statusor",
diff --git a/tensorflow/compiler/xla/service/interpreter/executable.cc b/tensorflow/compiler/xla/service/interpreter/executable.cc
index 9816acf650..8d40c08d55 100644
--- a/tensorflow/compiler/xla/service/interpreter/executable.cc
+++ b/tensorflow/compiler/xla/service/interpreter/executable.cc
@@ -21,7 +21,7 @@ limitations under the License.
#include <utility>
#include <vector>
-#include "tensorflow/compiler/xla/literal_util.h"
+#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/compiler/xla/ptr_util.h"
#include "tensorflow/compiler/xla/service/hlo_computation.h"
#include "tensorflow/compiler/xla/service/hlo_instruction.h"
diff --git a/tensorflow/compiler/xla/service/layout_assignment.cc b/tensorflow/compiler/xla/service/layout_assignment.cc
index 36fdfa868d..46a6d57353 100644
--- a/tensorflow/compiler/xla/service/layout_assignment.cc
+++ b/tensorflow/compiler/xla/service/layout_assignment.cc
@@ -59,7 +59,6 @@ namespace xla {
// anonymous namespace, instead of three or four spread all over this file.
namespace {
-
} // namespace
std::ostream& operator<<(std::ostream& out,
@@ -113,14 +112,18 @@ LayoutConstraints::LayoutConstraints(
HloComputation* computation)
: points_to_analysis_(points_to_analysis), computation_(computation) {
// Gather all array-shaped logical buffers into unconstrained_buffer_ids.
- for (LogicalBuffer::Id id = 0; id < points_to_analysis_.num_logical_buffers();
- id++) {
- auto& buffer = points_to_analysis_.logical_buffer(id);
- // The points to analysis is computed per module, restrict constraints to
- // array buffers in this computation.
- if (buffer.IsArray() && buffer.instruction()->parent() == computation) {
- unconstrained_buffer_ids_.insert(buffer.id());
- }
+ for (HloInstruction* inst : computation_->instructions()) {
+ points_to_analysis_.GetPointsToSet(inst).ForEachElement(
+ [&](const ShapeIndex&, const PointsToSet::BufferList& buffers) {
+ for (const LogicalBuffer* buffer : buffers) {
+ // The points to analysis is computed per module, restrict
+ // constraints to array buffers in this computation.
+ if (buffer->IsArray() &&
+ buffer->instruction()->parent() == computation) {
+ unconstrained_buffer_ids_.insert(buffer->id());
+ }
+ }
+ });
}
}
@@ -1630,7 +1633,8 @@ Status LayoutAssignment::ConstrainChannelLayouts(
for (HloInstruction* instruction : computation->instructions()) {
if (instruction->opcode() == HloOpcode::kRecvDone) {
const Layout* layout = channel_constraints->ConstrainChannel(
- instruction->channel_id(), instruction->shape().layout());
+ instruction->channel_id(),
+ ShapeUtil::GetSubshape(instruction->shape(), {0}).layout());
TF_RET_CHECK(layout == nullptr)
<< instruction->ToString()
<< " cannot constrain layout as it was set to "
@@ -1647,7 +1651,7 @@ Status LayoutAssignment::ConstrainChannelLayouts(
instruction->channel_id(), operand->shape().layout());
if (layout != nullptr) {
// We found an already constrained layout which does not match the one
- // the kSend wants to impose. Eitehr add a new kCopy, or use the
+ // the kSend wants to impose. Either add a new kCopy, or use the
// existing one to marshal the correct shape.
Shape shape = operand->shape();
*shape.mutable_layout() = *layout;
diff --git a/tensorflow/compiler/xla/service/layout_assignment_test.cc b/tensorflow/compiler/xla/service/layout_assignment_test.cc
index 62599b376a..a16fa75e30 100644
--- a/tensorflow/compiler/xla/service/layout_assignment_test.cc
+++ b/tensorflow/compiler/xla/service/layout_assignment_test.cc
@@ -21,7 +21,7 @@ limitations under the License.
#include <vector>
#include "tensorflow/compiler/xla/layout_util.h"
-#include "tensorflow/compiler/xla/literal_util.h"
+#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/compiler/xla/service/algebraic_simplifier.h"
#include "tensorflow/compiler/xla/service/computation_layout.h"
#include "tensorflow/compiler/xla/service/hlo_computation.h"
@@ -141,9 +141,9 @@ TEST_F(LayoutAssignmentTest, FusionInstruction) {
std::vector<std::initializer_list<int64>> minor_to_majors = {{0, 1}, {1, 0}};
for (auto& minor_to_major : minor_to_majors) {
auto builder = HloComputation::Builder(TestName());
- auto constant_literal1 = Literal::CreateR2WithLayout<float>(
+ auto constant_literal1 = LiteralUtil::CreateR2WithLayout<float>(
{{1.0, 2.0}, {3.0, 4.0}}, LayoutUtil::MakeLayout(minor_to_major));
- auto constant_literal2 = Literal::CreateR2WithLayout<float>(
+ auto constant_literal2 = LiteralUtil::CreateR2WithLayout<float>(
{{5.0, 6.0}, {7.0, 8.0}}, LayoutUtil::MakeLayout(minor_to_major));
Shape ashape = constant_literal1->shape();
@@ -192,10 +192,10 @@ TEST_F(LayoutAssignmentTest, TupleLayout) {
// match their source).
auto builder = HloComputation::Builder(TestName());
auto constant0 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR2WithLayout<float>(
+ HloInstruction::CreateConstant(LiteralUtil::CreateR2WithLayout<float>(
{{1.0, 2.0}, {3.0, 4.0}}, LayoutUtil::MakeLayout({0, 1}))));
auto constant1 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR2WithLayout<float>(
+ HloInstruction::CreateConstant(LiteralUtil::CreateR2WithLayout<float>(
{{1.0, 2.0}, {3.0, 4.0}}, LayoutUtil::MakeLayout({1, 0}))));
auto tuple = builder.AddInstruction(
HloInstruction::CreateTuple({constant0, constant1}));
@@ -229,10 +229,10 @@ TEST_F(LayoutAssignmentTest, TupleSelect) {
// Verify layouts of a select with tuple operands is assigned properly.
auto builder = HloComputation::Builder(TestName());
auto constant0 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR2WithLayout<float>(
+ HloInstruction::CreateConstant(LiteralUtil::CreateR2WithLayout<float>(
{{1.0, 2.0}, {3.0, 4.0}}, LayoutUtil::MakeLayout({0, 1}))));
auto constant1 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR2WithLayout<float>(
+ HloInstruction::CreateConstant(LiteralUtil::CreateR2WithLayout<float>(
{{1.0, 2.0}, {3.0, 4.0}}, LayoutUtil::MakeLayout({1, 0}))));
auto tuple0 = builder.AddInstruction(
HloInstruction::CreateTuple({constant0, constant1}));
@@ -240,7 +240,7 @@ TEST_F(LayoutAssignmentTest, TupleSelect) {
HloInstruction::CreateTuple({constant0, constant1}));
auto pred = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<bool>(true)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(true)));
auto select = builder.AddInstruction(HloInstruction::CreateTernary(
tuple0->shape(), HloOpcode::kSelect, pred, tuple0, tuple1));
@@ -274,7 +274,7 @@ TEST_F(LayoutAssignmentTest, ConflictingLayoutTuple) {
// tuple and assigning the layouts of the copied arrays as needed.
auto builder = HloComputation::Builder(TestName());
auto constant = builder.AddInstruction(HloInstruction::CreateConstant(
- Literal::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}})));
+ LiteralUtil::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}})));
auto inner_tuple =
builder.AddInstruction(HloInstruction::CreateTuple({constant}));
auto nested_tuple = builder.AddInstruction(
@@ -584,7 +584,7 @@ TEST_F(LayoutAssignmentTest, TransposeToBitcastToUser) {
auto builder = HloComputation::Builder(TestName());
Shape input_shape = ShapeUtil::MakeShape(F32, {3, 5, 6, 7});
auto constant = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(1.0f)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0f)));
auto broadcast = builder.AddInstruction(
HloInstruction::CreateBroadcast(input_shape, constant, {}));
auto transpose = builder.AddInstruction(HloInstruction::CreateTranspose(
@@ -770,9 +770,12 @@ TEST_F(LayoutAssignmentTest, ConditionalAsymmetricLayout) {
false_builder.AddInstruction(
HloInstruction::CreateParameter(0, tshape, "param"));
// Using infeed as layout assignment does not mess up with it.
- auto infeed =
- false_builder.AddInstruction(HloInstruction::CreateInfeed(xshape, ""));
- false_builder.AddInstruction(HloInstruction::CreateTuple({infeed}));
+ auto token = false_builder.AddInstruction(HloInstruction::CreateToken());
+ auto infeed = false_builder.AddInstruction(
+ HloInstruction::CreateInfeed(xshape, token, ""));
+ auto infeed_data = false_builder.AddInstruction(
+ HloInstruction::CreateGetTupleElement(xshape, infeed, 0));
+ false_builder.AddInstruction(HloInstruction::CreateTuple({infeed_data}));
}
HloComputation* false_computation =
module->AddEmbeddedComputation(false_builder.Build());
@@ -799,7 +802,7 @@ TEST_F(LayoutAssignmentTest, ConditionalAsymmetricLayout) {
TEST_F(LayoutAssignmentTest, InternalErrorOnBitcast) {
auto builder = HloComputation::Builder(TestName());
auto constant0 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR2WithLayout<float>(
+ HloInstruction::CreateConstant(LiteralUtil::CreateR2WithLayout<float>(
{{1.0, 2.0}, {3.0, 4.0}}, LayoutUtil::MakeLayout({0, 1}))));
builder.AddInstruction(HloInstruction::CreateUnary(
constant0->shape(), HloOpcode::kBitcast, constant0));
@@ -825,12 +828,14 @@ TEST_F(LayoutAssignmentTest, ChannelLayoutMismatch) {
ENTRY entry_computation {
param = (f32[2,2]) parameter(0)
gte = f32[2,2] get-tuple-element(param), index=0
- recv = (f32[2,2], u32[]) recv(), channel_id=1, sharding={maximal device=1}
- ROOT recv-done = f32[2,2] recv-done(recv), channel_id=1,
+ token = token[] after-all()
+ recv = (f32[2,2], u32[], token[]) recv(token), channel_id=1, sharding={maximal device=1}
+ recv-done = (f32[2,2], token[]) recv-done(recv), channel_id=1,
sharding={maximal device=1}
- send = (f32[2,2], u32[]) send(gte), channel_id=1,
+ ROOT root = f32[2,2] get-tuple-element(recv-done), index=0
+ send = (f32[2,2], u32[], token[]) send(gte, token), channel_id=1,
sharding={maximal device=0}
- send-done = () send-done(send), channel_id=1, sharding={maximal device=0}
+ send-done = token[] send-done(send), channel_id=1, sharding={maximal device=0}
}
)";
@@ -849,7 +854,7 @@ TEST_F(LayoutAssignmentTest, ChannelLayoutMismatch) {
AssignLayouts(module.get(), &computation_layout, &channel_constraints);
EXPECT_THAT(LayoutOf(module.get(), "gte"), ElementsAre(0, 1));
- EXPECT_THAT(LayoutOf(module.get(), "recv-done"), ElementsAre(1, 0));
+ EXPECT_THAT(LayoutOf(module.get(), "root"), ElementsAre(1, 0));
EXPECT_TRUE(
ShapeUtil::Equal(ShapeUtil::GetSubshape(
FindInstruction(module.get(), "send")->shape(), {0}),
diff --git a/tensorflow/compiler/xla/service/llvm_ir/BUILD b/tensorflow/compiler/xla/service/llvm_ir/BUILD
index f1e7fc2953..6f1e04a1c6 100644
--- a/tensorflow/compiler/xla/service/llvm_ir/BUILD
+++ b/tensorflow/compiler/xla/service/llvm_ir/BUILD
@@ -21,6 +21,11 @@ filegroup(
]),
)
+load(
+ "//tensorflow:tensorflow.bzl",
+ "tf_cc_test",
+)
+
cc_library(
name = "alias_analysis",
srcs = ["alias_analysis.cc"],
@@ -37,12 +42,25 @@ cc_library(
],
)
+tf_cc_test(
+ name = "alias_analysis_test",
+ srcs = ["alias_analysis_test.cc"],
+ deps = [
+ ":alias_analysis",
+ "//tensorflow/compiler/xla/service:hlo_parser",
+ "//tensorflow/compiler/xla/service/cpu:custom_call_target_registry",
+ "//tensorflow/compiler/xla/service/cpu/tests:cpu_codegen_test",
+ "//tensorflow/compiler/xla/tests:filecheck",
+ "//tensorflow/core:test",
+ ],
+)
+
cc_library(
name = "llvm_util",
srcs = ["llvm_util.cc"],
hdrs = ["llvm_util.h"],
deps = [
- "//tensorflow/compiler/xla:literal_util",
+ "//tensorflow/compiler/xla:literal",
"//tensorflow/compiler/xla:shape_util",
"//tensorflow/compiler/xla:types",
"//tensorflow/compiler/xla:util",
@@ -107,11 +125,30 @@ cc_library(
)
cc_library(
+ name = "kernel_tiling",
+ srcs = ["kernel_tiling.cc"],
+ hdrs = ["kernel_tiling.h"],
+ deps = [
+ ":ir_array",
+ ":llvm_util",
+ "//tensorflow/compiler/xla:shape_util",
+ "//tensorflow/compiler/xla:statusor",
+ "//tensorflow/compiler/xla:types",
+ "//tensorflow/compiler/xla:util",
+ "//tensorflow/compiler/xla:xla_data_proto",
+ "//tensorflow/compiler/xla/service:hlo",
+ "//tensorflow/core:lib",
+ "@llvm//:core",
+ ],
+)
+
+cc_library(
name = "fused_ir_emitter",
srcs = ["fused_ir_emitter.cc"],
hdrs = ["fused_ir_emitter.h"],
deps = [
":ir_array",
+ ":kernel_tiling",
":llvm_util",
":loop_emitter",
":tuple_ops",
diff --git a/tensorflow/compiler/xla/service/llvm_ir/alias_analysis.cc b/tensorflow/compiler/xla/service/llvm_ir/alias_analysis.cc
index f200a08a3c..93a8c130e1 100644
--- a/tensorflow/compiler/xla/service/llvm_ir/alias_analysis.cc
+++ b/tensorflow/compiler/xla/service/llvm_ir/alias_analysis.cc
@@ -35,9 +35,10 @@ void AliasAnalysis::AddAliasingInformationToIrArray(const HloInstruction& hlo,
llvm_ir::IrArray* array,
const ShapeIndex& index) {
BufferAllocation::Slice buffer_slice;
- if (hlo.opcode() == HloOpcode::kParameter) {
- // Parameters may alias with each other but may not alias with our temporary
- // buffers.
+ if (hlo.opcode() == HloOpcode::kParameter &&
+ hlo.parent() == hlo.parent()->parent()->entry_computation()) {
+ // Entry computation parameters may alias with each other but may not alias
+ // with our temporary buffers.
buffer_slice = BufferAllocation::Slice(kParameterAllocation, 0, 0);
} else {
const std::set<BufferAllocation::Slice> slices =
diff --git a/tensorflow/compiler/xla/service/llvm_ir/alias_analysis_test.cc b/tensorflow/compiler/xla/service/llvm_ir/alias_analysis_test.cc
new file mode 100644
index 0000000000..2552ff4a6a
--- /dev/null
+++ b/tensorflow/compiler/xla/service/llvm_ir/alias_analysis_test.cc
@@ -0,0 +1,83 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#include <memory>
+#include <utility>
+
+#include "tensorflow/compiler/xla/service/cpu/custom_call_target_registry.h"
+#include "tensorflow/compiler/xla/service/cpu/tests/cpu_codegen_test.h"
+#include "tensorflow/compiler/xla/service/llvm_ir/alias_analysis.h"
+#include "tensorflow/compiler/xla/tests/filecheck.h"
+#include "tensorflow/core/platform/test.h"
+
+namespace xla {
+namespace cpu {
+namespace {
+class AliasAnalysisTest : public CpuCodegenTest {};
+
+void FakeCustomCallTarget(float* out, float** in) {}
+
+REGISTER_CUSTOM_CALL_TARGET(FakeCustomCallTarget);
+
+TEST_F(AliasAnalysisTest, EmbeddedComputationParamsMayAliasTemps) {
+ const char* hlo_string = R"(
+HloModule while
+
+body {
+ const.0.125 = f32[] constant(0.125)
+ body.state = f32[] parameter(0)
+ ROOT add.2.2 = f32[] add(const.0.125, body.state)
+}
+
+condition {
+ const.100 = f32[] constant(100)
+ condition.state = f32[] parameter(0)
+ addend = f32[] custom-call(condition.state), custom_call_target="FakeCustomCallTarget"
+ add = f32[] add(addend, condition.state)
+ ROOT greater-than = pred[] greater-than(const.100, add)
+}
+
+ENTRY while3 {
+ const.0 = f32[] constant(0)
+ ROOT while = f32[] while(const.0), condition=condition, body=body
+}
+)";
+
+ CompileAndVerifyIr(hlo_string, R"(
+; CHECK-LABEL: @body(i8* align 4 dereferenceable(4) %retval
+; CHECK: %[[add_result:.*]] = fadd fast float %[[fadd_lhs:.*]], %[[fadd_rhs:.*]]
+; CHECK: store float %[[add_result]], float* %[[store_dest:.*]], !alias.scope ![[alias_scope_md_for_store:.*]]
+;
+; CHECK-LABEL: @condition(i8* align 1 dereferenceable(1) %fusion, i8* noalias %run_options, i8** noalias %params
+; CHECK: %[[cond_state_buf_ptr:.*]] = getelementptr inbounds i8*, i8** %params, i64 0
+; CHECK: %[[cond_state_buf_untyped:.*]] = load i8*, i8** %[[cond_state_buf_ptr]]
+; CHECK: %[[cond_state_buf_typed:.*]] = bitcast i8* %[[cond_state_buf_untyped]] to float*
+; CHECK: load float, float* %[[cond_state_buf_typed]], !alias.scope ![[alias_scope_md_for_store]], !noalias ![[noalias_md_for_load:.*]]
+;
+; CHECK-LABEL: @while3(
+
+![[alias_scope_md_for_store]] = !{![[buffer_idx_0:.*]]}
+![[buffer_idx_0]] = !{!"buffer: {index:0, offset:0, size:4}", ![[aa_md_root:.*]]}
+![[aa_md_root]] = !{!"XLA global AA domain"}
+![[buffer_idx_1:.*]] = !{!"buffer: {index:1, offset:0, size:4}", !3}
+![[buffer_idx_1_offset_16:.*]] = !{!"buffer: {index:1, offset:16, size:1}", !3}
+![[noalias_md_for_load]] = !{![[buffer_idx_1_offset_16]], ![[buffer_idx_1]]}
+}
+)");
+}
+
+} // namespace
+} // namespace cpu
+} // namespace xla
diff --git a/tensorflow/compiler/xla/service/llvm_ir/fused_ir_emitter.cc b/tensorflow/compiler/xla/service/llvm_ir/fused_ir_emitter.cc
index d909845a3a..b12ce97e28 100644
--- a/tensorflow/compiler/xla/service/llvm_ir/fused_ir_emitter.cc
+++ b/tensorflow/compiler/xla/service/llvm_ir/fused_ir_emitter.cc
@@ -119,7 +119,24 @@ Status FusedIrEmitter::HandleGetTupleElement(
}
Status FusedIrEmitter::HandleParameter(HloInstruction* parameter) {
- generators_[parameter] = [=](const IrArray::Index& index) {
+ generators_[parameter] = [=](const IrArray::Index& index) -> llvm::Value* {
+ if (tiled_parameter_info_) {
+ if (llvm::Value* param_tile_buffer =
+ tiled_parameter_info_->GetBufferForParameter(
+ parameter->parameter_number())) {
+ // TODO(jlebar): Add AA metadata to this load. Tile buffers are global
+ // variables, so LLVM's points-to analysis doesn't help us much. And we
+ // want the AA info to be present before address spaces are inferred
+ // (which is pretty late in the pipeline), so even if we had
+ // address-space-based AA in LLVM, it wouldn't help us much here.
+ return ir_builder_->CreateLoad(
+ ir_builder_->CreateGEP(
+ param_tile_buffer,
+ {index.GetConstantWithIndexType(0), tiled_parameter_info_->x(),
+ tiled_parameter_info_->y()}),
+ "tiled_buffer");
+ }
+ }
return parameter_arrays_[parameter->parameter_number()]
.EmitReadArrayElement(index, ir_builder_);
};
diff --git a/tensorflow/compiler/xla/service/llvm_ir/fused_ir_emitter.h b/tensorflow/compiler/xla/service/llvm_ir/fused_ir_emitter.h
index b3b6026ef1..a6ceec7b23 100644
--- a/tensorflow/compiler/xla/service/llvm_ir/fused_ir_emitter.h
+++ b/tensorflow/compiler/xla/service/llvm_ir/fused_ir_emitter.h
@@ -25,6 +25,7 @@ limitations under the License.
#include "tensorflow/compiler/xla/service/elemental_ir_emitter.h"
#include "tensorflow/compiler/xla/service/hlo_instruction.h"
#include "tensorflow/compiler/xla/service/llvm_ir/ir_array.h"
+#include "tensorflow/compiler/xla/service/llvm_ir/kernel_tiling.h"
#include "tensorflow/compiler/xla/service/llvm_ir/loop_emitter.h"
#include "tensorflow/compiler/xla/statusor.h"
#include "tensorflow/compiler/xla/xla_data.pb.h"
@@ -56,6 +57,7 @@ class FusedIrEmitter : public DfsHloVisitorWithDefault {
FusedIrEmitter(tensorflow::gtl::ArraySlice<llvm_ir::IrArray> parameter_arrays,
ElementalIrEmitter* elemental_emitter)
: parameter_arrays_(parameter_arrays),
+ tiled_parameter_info_(nullptr),
elemental_emitter_(elemental_emitter),
ir_builder_(elemental_emitter->ir_builder()),
module_(elemental_emitter->module()) {}
@@ -86,9 +88,14 @@ class FusedIrEmitter : public DfsHloVisitorWithDefault {
return it->second;
}
+ void SetTiledParameterInfo(const llvm_ir::TiledParameterInfo* info) {
+ tiled_parameter_info_ = info;
+ }
+
private:
// Arrays of parameters of fusion instruction
tensorflow::gtl::ArraySlice<llvm_ir::IrArray> parameter_arrays_;
+ const llvm_ir::TiledParameterInfo* tiled_parameter_info_;
ElementalIrEmitter* elemental_emitter_;
diff --git a/tensorflow/compiler/xla/service/llvm_ir/ir_array.cc b/tensorflow/compiler/xla/service/llvm_ir/ir_array.cc
index ea10cef49a..dcf9838d80 100644
--- a/tensorflow/compiler/xla/service/llvm_ir/ir_array.cc
+++ b/tensorflow/compiler/xla/service/llvm_ir/ir_array.cc
@@ -422,9 +422,11 @@ IrArray IrArray::CastToShape(const Shape& new_shape,
llvm::IRBuilder<>* ir_builder) const {
llvm::Module* module = ir_builder->GetInsertBlock()->getParent()->getParent();
llvm::Type* new_ir_type = llvm_ir::ShapeToIrType(new_shape, module);
- return IrArray(
+ IrArray new_irarray(
ir_builder->CreatePointerCast(base_ptr_, new_ir_type->getPointerTo()),
new_shape);
+ new_irarray.metadata_ = metadata_;
+ return new_irarray;
}
/* static */ IrArray::Index IrArray::BumpIndex(const Index& index,
diff --git a/tensorflow/compiler/xla/service/llvm_ir/ir_array.h b/tensorflow/compiler/xla/service/llvm_ir/ir_array.h
index 4648c6d7ac..0777c49923 100644
--- a/tensorflow/compiler/xla/service/llvm_ir/ir_array.h
+++ b/tensorflow/compiler/xla/service/llvm_ir/ir_array.h
@@ -114,19 +114,19 @@ class IrArray {
size_t size() const { return multidim().size(); }
llvm::Value* operator[](size_t i) const { return multidim()[i]; }
- llvm::Value*& operator[](size_t i) { return multidim()[i]; }
+ llvm::Value*& operator[](size_t i) { return mutable_multidim()[i]; }
- void push_back(llvm::Value* value) { multidim().push_back(value); }
+ void push_back(llvm::Value* value) { mutable_multidim().push_back(value); }
void InsertAt(int64 index, llvm::Value* value) {
CHECK_LE(index, size());
- multidim().insert(multidim().begin() + index, value);
+ mutable_multidim().insert(mutable_multidim().begin() + index, value);
}
using iterator = std::vector<llvm::Value*>::iterator;
using const_iterator = std::vector<llvm::Value*>::const_iterator;
- iterator begin() { return multidim().begin(); }
- iterator end() { return multidim().end(); }
+ iterator begin() { return mutable_multidim().begin(); }
+ iterator end() { return mutable_multidim().end(); }
const_iterator begin() const { return multidim().begin(); }
const_iterator end() const { return multidim().end(); }
@@ -185,7 +185,7 @@ class IrArray {
private:
// Changing the multi-dimensional index invalidates the linear index.
- std::vector<llvm::Value*>& multidim() {
+ std::vector<llvm::Value*>& mutable_multidim() {
linear_ = nullptr;
return multidim_;
}
diff --git a/tensorflow/compiler/xla/service/llvm_ir/kernel_support_library.cc b/tensorflow/compiler/xla/service/llvm_ir/kernel_support_library.cc
index 1f6e3c829f..98d0ceb3e2 100644
--- a/tensorflow/compiler/xla/service/llvm_ir/kernel_support_library.cc
+++ b/tensorflow/compiler/xla/service/llvm_ir/kernel_support_library.cc
@@ -56,10 +56,11 @@ Status KernelSupportLibrary::For(
}
Status KernelSupportLibrary::If(
- llvm::Value* condition, const std::function<Status()>& true_block_generator,
+ tensorflow::StringPiece name, llvm::Value* condition,
+ const std::function<Status()>& true_block_generator,
const std::function<Status()>& false_block_generator) {
llvm_ir::LlvmIfData if_data =
- llvm_ir::EmitIfThenElse(condition, "", ir_builder_);
+ llvm_ir::EmitIfThenElse(condition, name, ir_builder_);
ir_builder_->SetInsertPoint(&if_data.true_block->back());
TF_RETURN_IF_ERROR(true_block_generator());
ir_builder_->SetInsertPoint(&if_data.false_block->back());
diff --git a/tensorflow/compiler/xla/service/llvm_ir/kernel_support_library.h b/tensorflow/compiler/xla/service/llvm_ir/kernel_support_library.h
index 6f7a9d94e3..9d770cc4c3 100644
--- a/tensorflow/compiler/xla/service/llvm_ir/kernel_support_library.h
+++ b/tensorflow/compiler/xla/service/llvm_ir/kernel_support_library.h
@@ -203,16 +203,30 @@ class KernelSupportLibrary {
// `true_block_generator()`;
// else
// `false_block_generator()`;
- Status If(llvm::Value* condition,
+ Status If(tensorflow::StringPiece name, llvm::Value* condition,
const std::function<Status()>& true_block_generator,
const std::function<Status()>& false_block_generator =
[]() -> Status { return Status::OK(); });
+ Status If(llvm::Value* condition,
+ const std::function<Status()>& true_block_generator,
+ const std::function<Status()>& false_block_generator =
+ []() -> Status { return Status::OK(); }) {
+ return If("", condition, true_block_generator, false_block_generator);
+ }
+
void IfReturnVoid(llvm::Value* condition,
const std::function<void()>& true_block_generator,
const std::function<void()>& false_block_generator = []() {
}) {
- TF_CHECK_OK(If(condition,
+ IfReturnVoid("", condition, true_block_generator, false_block_generator);
+ }
+
+ void IfReturnVoid(tensorflow::StringPiece name, llvm::Value* condition,
+ const std::function<void()>& true_block_generator,
+ const std::function<void()>& false_block_generator = []() {
+ }) {
+ TF_CHECK_OK(If(name, condition,
[&]() {
true_block_generator();
return Status::OK();
diff --git a/tensorflow/compiler/xla/service/llvm_ir/kernel_tiling.cc b/tensorflow/compiler/xla/service/llvm_ir/kernel_tiling.cc
new file mode 100644
index 0000000000..533b75cdae
--- /dev/null
+++ b/tensorflow/compiler/xla/service/llvm_ir/kernel_tiling.cc
@@ -0,0 +1,118 @@
+/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#include "tensorflow/compiler/xla/service/llvm_ir/kernel_tiling.h"
+#include "tensorflow/compiler/xla/layout_util.h"
+#include "tensorflow/compiler/xla/service/llvm_ir/llvm_util.h"
+#include "tensorflow/compiler/xla/shape_util.h"
+#include "tensorflow/compiler/xla/statusor.h"
+#include "tensorflow/compiler/xla/util.h"
+#include "tensorflow/core/platform/logging.h"
+
+namespace xla {
+namespace llvm_ir {
+
+namespace {
+// Returns the indices of the first elements of all consecutive subarrays of the
+// given array. For example:
+// ConsecutiveSegments({m, m+1, m+2, n, k, k+1}) = {0, 3, 4}
+std::vector<size_t> ConsecutiveSegments(tensorflow::gtl::ArraySlice<int64> xs) {
+ std::vector<size_t> is = {0};
+ for (size_t i = 1; i < xs.size(); ++i) {
+ if (1 != xs[i] - xs[i - 1]) {
+ is.push_back(i);
+ }
+ }
+ return is;
+}
+
+// Merges the sequences of dimensions of the given shape which start at the
+// given indices `segs`.
+Shape MergeDimensions(tensorflow::gtl::ArraySlice<size_t> segs,
+ const Shape& shape) {
+ std::vector<int64> dimensions;
+ for (size_t i = 1; i <= segs.size(); ++i) {
+ dimensions.push_back(std::accumulate(
+ shape.dimensions().begin() + segs[i - 1],
+ shape.dimensions().begin() +
+ (segs.size() == i ? shape.dimensions().size() : segs[i]),
+ 1, std::multiplies<int64>()));
+ }
+ return ShapeUtil::MakeShapeWithDescendingLayout(shape.element_type(),
+ dimensions);
+}
+} // namespace
+
+tensorflow::gtl::optional<std::vector<int64> > FindTranspose021(
+ const Shape& a, const Shape& b) {
+ if (!ShapeUtil::CompatibleIgnoringElementType(a, b)) {
+ return tensorflow::gtl::nullopt;
+ }
+
+ std::vector<int64> perm(a.dimensions().size());
+ {
+ auto layout_a_orig = LayoutUtil::MinorToMajor(a);
+ std::vector<int64> layout_a(layout_a_orig.rbegin(), layout_a_orig.rend());
+ auto layout_b_orig = LayoutUtil::MinorToMajor(b);
+ std::vector<int64> layout_b(layout_b_orig.rbegin(), layout_b_orig.rend());
+ for (size_t i = 0; i < perm.size(); ++i) {
+ perm[i] = PositionInContainer(layout_b, layout_a[i]);
+ }
+ }
+ auto segs = ConsecutiveSegments(perm);
+ if ((3 == segs.size() && 0 == perm[0]) || 2 == segs.size()) {
+ Shape norm_a =
+ ShapeUtil::MakeShapeWithDescendingLayoutAndSamePhysicalLayout(a);
+ Shape reduced_a = MergeDimensions(segs, norm_a);
+ auto reduced_a_dims = reduced_a.dimensions();
+ std::vector<int64> dims_021;
+ if (2 == segs.size()) {
+ // The logical component-0 is of size one.
+ dims_021 = {1, reduced_a_dims[1], reduced_a_dims[0]};
+ } else {
+ dims_021 = {reduced_a_dims[0], reduced_a_dims[2], reduced_a_dims[1]};
+ }
+
+ return dims_021;
+ }
+
+ return tensorflow::gtl::nullopt;
+}
+
+IrArray::Index GetUnreducedOutputIndex(
+ const IrArray::Index& reduced_output_index,
+ const Shape& reduced_output_shape, const Shape& unreduced_output_shape,
+ llvm::IRBuilder<>* ir_builder) {
+ auto bounds = reduced_output_shape.dimensions();
+ auto minor_to_major = reduced_output_shape.layout().minor_to_major();
+ llvm::Value* linear_index = reduced_output_index.GetConstantWithIndexType(0);
+ int64 multiplier = 1;
+ for (int i = 0; i < reduced_output_index.size(); ++i) {
+ int64 dim = minor_to_major[i];
+ llvm::Value* addend = ir_builder->CreateMul(
+ reduced_output_index[dim],
+ reduced_output_index.GetConstantWithIndexType(multiplier),
+ "linearizing",
+ /*HasNUW=*/true, /*HasNSW=*/true);
+ linear_index = ir_builder->CreateAdd(linear_index, addend, "",
+ /*HasNUW=*/true, /*HasNSW=*/true);
+ multiplier *= bounds[dim];
+ }
+
+ return IrArray::Index(linear_index, unreduced_output_shape, ir_builder);
+}
+
+} // namespace llvm_ir
+} // namespace xla
diff --git a/tensorflow/compiler/xla/service/llvm_ir/kernel_tiling.h b/tensorflow/compiler/xla/service/llvm_ir/kernel_tiling.h
new file mode 100644
index 0000000000..6f1268fffb
--- /dev/null
+++ b/tensorflow/compiler/xla/service/llvm_ir/kernel_tiling.h
@@ -0,0 +1,80 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#ifndef TENSORFLOW_COMPILER_XLA_SERVICE_LLVM_IR_KERNEL_TILING_H_
+#define TENSORFLOW_COMPILER_XLA_SERVICE_LLVM_IR_KERNEL_TILING_H_
+
+#include "llvm/IR/Value.h"
+#include "tensorflow/compiler/xla/service/hlo_instruction.h"
+#include "tensorflow/compiler/xla/service/llvm_ir/ir_array.h"
+
+namespace xla {
+namespace llvm_ir {
+
+// About 0-2-1 transpose:
+//
+// If a shape can be viewed as three logical components 0-1-2 in the order of
+// major to minor, a 0-2-1-transpose changes the order of such logical
+// components to 0-2-1. We call the shape being transposed the input shape and
+// the transposed shape the output shape. The logical view of the input and
+// output shapes for the transpose are called the 0-1-2 shape or reduced input
+// shape and the 0-2-1 shape or the reduced output shape respectively. The
+// original input and output shapes are called the unreduced input and output
+// shapes.
+
+// If `b` is a 0-2-1 transpose of `a` in 0-1-2, return the dimensions for the
+// reduced shape of `b` or the 0-2-1 shape.
+tensorflow::gtl::optional<std::vector<int64> > FindTranspose021(const Shape& a,
+ const Shape& b);
+
+// Return the unreduced output index corresponding to the given reduced output
+// index.
+IrArray::Index GetUnreducedOutputIndex(
+ const IrArray::Index& reduced_output_index,
+ const Shape& reduced_output_shape, const Shape& unreduced_output_shape,
+ llvm::IRBuilder<>* ir_builder);
+
+// A class to represent information for tiled parameters to support IR emission
+// for 021 transpose.
+class TiledParameterInfo {
+ public:
+ TiledParameterInfo(tensorflow::gtl::ArraySlice<llvm::Value*> param_buffers,
+ llvm::Value* y, llvm::Value* x)
+ : param_buffers_(param_buffers), y_(y), x_(x) {}
+
+ llvm::Value* x() const { return x_; }
+ llvm::Value* y() const { return y_; }
+
+ void set_x(llvm::Value* x) { x_ = x; }
+ void set_y(llvm::Value* y) { y_ = y; }
+
+ llvm::Value* GetBufferForParameter(int64 index) const {
+ return param_buffers_[index];
+ }
+
+ private:
+ // Param_buffers_[i] stores the tile buffer for the ith parameter or nullptr
+ // if the parameter is not tiled.
+ tensorflow::gtl::ArraySlice<llvm::Value*> param_buffers_;
+ // The y coordinate within a tile.
+ llvm::Value* y_;
+ // The x coordinate within a tile.
+ llvm::Value* x_;
+};
+
+} // namespace llvm_ir
+} // namespace xla
+
+#endif // TENSORFLOW_COMPILER_XLA_SERVICE_LLVM_IR_KERNEL_TILING_H_
diff --git a/tensorflow/compiler/xla/service/llvm_ir/llvm_util.cc b/tensorflow/compiler/xla/service/llvm_ir/llvm_util.cc
index e61a2fd12d..6c55361b44 100644
--- a/tensorflow/compiler/xla/service/llvm_ir/llvm_util.cc
+++ b/tensorflow/compiler/xla/service/llvm_ir/llvm_util.cc
@@ -26,7 +26,7 @@ limitations under the License.
#include "llvm/Target/TargetOptions.h"
#include "llvm/Transforms/Utils/Cloning.h"
#include "tensorflow/compiler/xla/layout_util.h"
-#include "tensorflow/compiler/xla/literal_util.h"
+#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/compiler/xla/service/name_uniquer.h"
#include "tensorflow/compiler/xla/shape_util.h"
#include "tensorflow/compiler/xla/types.h"
@@ -36,6 +36,7 @@ limitations under the License.
#include "tensorflow/core/lib/io/path.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/lib/strings/strcat.h"
+#include "tensorflow/core/platform/byte_order.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/types.h"
@@ -251,14 +252,12 @@ StatusOr<Shape> DecodeSelfDescribingShapeConstant(const void* shape_ptr,
llvm::Constant* ConvertLiteralToIrConstant(const Literal& literal,
llvm::Module* module) {
- const Shape& shape = literal.shape();
- llvm::Type* type = shape.element_type() == C64
- ? llvm::Type::getFloatTy(module->getContext())
- : PrimitiveTypeToIrType(shape.element_type(), module);
const char* data = static_cast<const char*>(literal.untyped_data());
- uint64 num_elements = literal.size_bytes() * 8 / GetSizeInBits(type);
- return llvm::ConstantDataArray::getRaw(
- llvm::StringRef(data, literal.size_bytes()), num_elements, type);
+ CHECK_EQ(module->getDataLayout().isLittleEndian(),
+ tensorflow::port::kLittleEndian);
+ return llvm::ConstantDataArray::getString(
+ module->getContext(), llvm::StringRef(data, literal.size_bytes()),
+ /*AddNull=*/false);
}
llvm::AllocaInst* EmitAllocaAtFunctionEntry(llvm::Type* type,
diff --git a/tensorflow/compiler/xla/service/llvm_ir/llvm_util.h b/tensorflow/compiler/xla/service/llvm_ir/llvm_util.h
index 4a10ec466d..9c51861eac 100644
--- a/tensorflow/compiler/xla/service/llvm_ir/llvm_util.h
+++ b/tensorflow/compiler/xla/service/llvm_ir/llvm_util.h
@@ -27,7 +27,7 @@ limitations under the License.
#include "llvm/IR/Module.h"
#include "llvm/IR/Value.h"
#include "llvm/Support/raw_ostream.h"
-#include "tensorflow/compiler/xla/literal_util.h"
+#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/compiler/xla/service/hlo_instruction.h"
#include "tensorflow/compiler/xla/service/hlo_module_config.h"
#include "tensorflow/compiler/xla/types.h"
diff --git a/tensorflow/compiler/xla/service/logical_buffer_analysis.cc b/tensorflow/compiler/xla/service/logical_buffer_analysis.cc
index f410921b4b..d631fb5ee4 100644
--- a/tensorflow/compiler/xla/service/logical_buffer_analysis.cc
+++ b/tensorflow/compiler/xla/service/logical_buffer_analysis.cc
@@ -131,18 +131,23 @@ Status LogicalBufferAnalysis::HandleDomain(HloInstruction*) {
return Status::OK();
}
-Status LogicalBufferAnalysis::HandleRecvDone(HloInstruction*) {
- // RecvDone doesn't create a new buffer but rather aliases its input (Recv)
- // tuple element at {0} to its output.
+Status LogicalBufferAnalysis::HandleRecvDone(HloInstruction* recv_done) {
+ // RecvDone produces a two-element tuple containing the data value (which
+ // aliases part of its operand) and a token. Only the tuple index table and
+ // the token are defined by the RecvDone.
+ NewLogicalBuffer(recv_done, /*index=*/{});
+ NewLogicalBuffer(recv_done, /*index=*/{1});
return Status::OK();
}
Status LogicalBufferAnalysis::HandleSend(HloInstruction* send) {
- // Send creates new buffers for the top-level tuple and the context (tuple
- // element at {1}). Tuple element at {0} is an alias of the Send operand, so
- // we don't need to create a new Logical Buffer for that.
+ // Send creates new buffers for the top-level tuple, the context (tuple
+ // element at {1}), and the token (tuple element at {2}). Tuple element at {0}
+ // is an alias of the Send operand, so we don't need to create a new Logical
+ // Buffer for that.
NewLogicalBuffer(send, /*index=*/{});
NewLogicalBuffer(send, /*index=*/{1});
+ NewLogicalBuffer(send, /*index=*/{2});
return Status::OK();
}
@@ -152,10 +157,10 @@ Status LogicalBufferAnalysis::HandleTuple(HloInstruction* tuple) {
return Status::OK();
}
-Status LogicalBufferAnalysis::HandleSelect(HloInstruction* select) {
+Status LogicalBufferAnalysis::HandleTupleSelect(HloInstruction* tuple_select) {
// Select allocates a new buffer and then shallow copies the on_true or
// on_false buffer into this new buffer.
- NewLogicalBuffer(select, /*index=*/{});
+ NewLogicalBuffer(tuple_select, /*index=*/{});
return Status::OK();
}
diff --git a/tensorflow/compiler/xla/service/logical_buffer_analysis.h b/tensorflow/compiler/xla/service/logical_buffer_analysis.h
index b5ef396787..81f524d84a 100644
--- a/tensorflow/compiler/xla/service/logical_buffer_analysis.h
+++ b/tensorflow/compiler/xla/service/logical_buffer_analysis.h
@@ -63,7 +63,7 @@ class LogicalBufferAnalysis : public DfsHloVisitorWithDefault {
Status HandleCopy(HloInstruction* copy) override;
Status HandleRecvDone(HloInstruction* recv_done) override;
Status HandleSend(HloInstruction* send) override;
- Status HandleSelect(HloInstruction* select) override;
+ Status HandleTupleSelect(HloInstruction* tuple_select) override;
// A map from the buffer ID to the logical buffer
std::vector<std::unique_ptr<LogicalBuffer>> logical_buffers_;
diff --git a/tensorflow/compiler/xla/service/multi_output_fusion.cc b/tensorflow/compiler/xla/service/multi_output_fusion.cc
index 79b5a442aa..4166ef5baf 100644
--- a/tensorflow/compiler/xla/service/multi_output_fusion.cc
+++ b/tensorflow/compiler/xla/service/multi_output_fusion.cc
@@ -115,39 +115,18 @@ HloInstruction* MultiOutputFusion::Fuse(HloInstruction* instr1,
HloInstruction* fused = instr2;
// Make sure that if only one of the instructions is a fusion, or if only one
// of the instructions is a multi-output fusion, it's what will be fused into.
- //
- // An invariant is that no bitcast nodes will show up in the middle of a
- // fusion node. This invariant must hold in order for us to lower it. Given
- // that, we require that during multi-output fusion, a fusion node ending with
- // bitcast to preserve its structure as a nested fusion instead being
- // merged and flattened.
- if (fused->opcode() == HloOpcode::kFusion &&
- fused->fused_expression_root()->opcode() != HloOpcode::kBitcast) {
+ if (fused->opcode() == HloOpcode::kFusion) {
std::swap(remaining, fused);
}
if (fused->IsMultiOutputFusion()) {
std::swap(remaining, fused);
}
- if (fused->opcode() == HloOpcode::kFusion &&
- fused->fused_expression_root()->opcode() != HloOpcode::kBitcast) {
+ if (fused->opcode() == HloOpcode::kFusion) {
remaining->MergeFusionInstructionIntoMultiOutput(fused);
} else {
- if (remaining->opcode() == HloOpcode::kFusion &&
- remaining->fused_expression_root()->opcode() == HloOpcode::kBitcast) {
- auto parent_computation = remaining->parent();
- // Create a nested fusion node.
- auto remaining_nested_fused =
- parent_computation->AddInstruction(HloInstruction::CreateFusion(
- remaining->shape(), HloInstruction::FusionKind::kLoop,
- remaining));
- TF_CHECK_OK(parent_computation->ReplaceInstruction(
- remaining, remaining_nested_fused));
- remaining = remaining_nested_fused;
- }
remaining->FuseInstructionIntoMultiOutput(fused);
}
-
return remaining;
}
diff --git a/tensorflow/compiler/xla/service/multi_output_fusion.h b/tensorflow/compiler/xla/service/multi_output_fusion.h
index d23822e33e..0019cd7254 100644
--- a/tensorflow/compiler/xla/service/multi_output_fusion.h
+++ b/tensorflow/compiler/xla/service/multi_output_fusion.h
@@ -78,6 +78,10 @@ class MultiOutputFusion : public HloPassInterface {
// Test if it's legal to fuse instr1 and instr2 into one fusion instruction.
virtual bool LegalToFuse(HloInstruction* instr1, HloInstruction* instr2);
+ // Fuse HloInstrctuion instr1 and instr2 and return the fused instruction.
+ // The other instruction is removed from its parent computation.
+ virtual HloInstruction* Fuse(HloInstruction* instr1, HloInstruction* instr2);
+
// Recompute reachability for the current computation.
void RecomputeReachability();
@@ -101,10 +105,6 @@ class MultiOutputFusion : public HloPassInterface {
virtual bool DoProducerConsumerMultiOutputFusion();
private:
- // Fuse HloInstrctuion instr1 and instr2 and return the fused instruction.
- // The other instruction is removed from its parent computation.
- HloInstruction* Fuse(HloInstruction* instr1, HloInstruction* instr2);
-
// Update the internal data structures after instr1 and instr2 are fused into
// one fusion instruction.
void Update(HloInstruction* instr1, HloInstruction* instr2);
diff --git a/tensorflow/compiler/xla/service/name_uniquer.cc b/tensorflow/compiler/xla/service/name_uniquer.cc
index 3a6a7c25f4..f6e7578a89 100644
--- a/tensorflow/compiler/xla/service/name_uniquer.cc
+++ b/tensorflow/compiler/xla/service/name_uniquer.cc
@@ -67,22 +67,17 @@ string NameUniquer::GetUniqueName(tensorflow::StringPiece prefix) {
has_numeric_suffix = true;
// Remove numeric suffix from root.
root = root.substr(0, separator_index);
- // Update count to at least the numeric suffix value to avoid future
- // colisions with this name.
- generated_names_[root] = std::max(generated_names_[root], numeric_suffix);
}
}
- int64* count = &(generated_names_[root]);
- if (*count == 0) {
- *count = 1;
+
+ SequentialIdGenerator& id_generator = generated_names_[root];
+ numeric_suffix = id_generator.RegisterId(numeric_suffix);
+ if (numeric_suffix == 0) {
return has_numeric_suffix ? tensorflow::strings::StrCat(root, separator_, 0)
: root;
- } else {
- tensorflow::strings::StrAppend(&root, separator_, *count);
- // Increment lookup under old 'root' name.
- (*count)++;
- return root;
}
+ tensorflow::strings::StrAppend(&root, separator_, numeric_suffix);
+ return root;
}
} // namespace xla
diff --git a/tensorflow/compiler/xla/service/name_uniquer.h b/tensorflow/compiler/xla/service/name_uniquer.h
index 4139c2700b..4423d61069 100644
--- a/tensorflow/compiler/xla/service/name_uniquer.h
+++ b/tensorflow/compiler/xla/service/name_uniquer.h
@@ -17,10 +17,11 @@ limitations under the License.
#define TENSORFLOW_COMPILER_XLA_SERVICE_NAME_UNIQUER_H_
#include <string>
-#include <unordered_map>
#include "tensorflow/compiler/xla/types.h"
#include "tensorflow/core/lib/core/stringpiece.h"
+#include "tensorflow/core/lib/gtl/flatmap.h"
+#include "tensorflow/core/lib/gtl/flatset.h"
#include "tensorflow/core/platform/macros.h"
namespace xla {
@@ -44,13 +45,40 @@ class NameUniquer {
static string GetSanitizedName(const string& name);
private:
+ // Used to track and generate new identifiers for the same instruction name
+ // root.
+ class SequentialIdGenerator {
+ public:
+ SequentialIdGenerator() = default;
+
+ // Tries to register id as used identifier. If id is not already used, the
+ // id itself will be returned. Otherwise a new one will be generated, and
+ // returned.
+ int64 RegisterId(int64 id) {
+ if (used_.insert(id).second) {
+ return id;
+ }
+ while (!used_.insert(next_).second) {
+ ++next_;
+ }
+ return next_++;
+ }
+
+ private:
+ // The next identifier to be tried.
+ int64 next_ = 0;
+
+ // Set of all the identifiers which has been used.
+ tensorflow::gtl::FlatSet<int64> used_;
+ };
+
// The string to use to separate the prefix of the name from the uniquing
// integer value.
string separator_;
- // Map from name prefix to the number of names generated using that prefix
- // so far.
- std::unordered_map<string, int64> generated_names_;
+ // Map from name prefix to the generator data structure which tracks used
+ // identifiers and generates new ones.
+ tensorflow::gtl::FlatMap<string, SequentialIdGenerator> generated_names_;
TF_DISALLOW_COPY_AND_ASSIGN(NameUniquer);
};
diff --git a/tensorflow/compiler/xla/service/name_uniquer_test.cc b/tensorflow/compiler/xla/service/name_uniquer_test.cc
index 2ec255558c..3e2592c6ac 100644
--- a/tensorflow/compiler/xla/service/name_uniquer_test.cc
+++ b/tensorflow/compiler/xla/service/name_uniquer_test.cc
@@ -54,12 +54,13 @@ TEST_F(NameUniquerTest, NumericSuffixes) {
EXPECT_EQ("foo", uniquer.GetUniqueName("foo"));
EXPECT_EQ("foo.54", uniquer.GetUniqueName("foo.54"));
- EXPECT_EQ("foo.55", uniquer.GetUniqueName("foo"));
+ EXPECT_EQ("foo.1", uniquer.GetUniqueName("foo"));
EXPECT_EQ("foo.55.1", uniquer.GetUniqueName("foo.55.1"));
- EXPECT_EQ("foo.55.2", uniquer.GetUniqueName("foo.55.1"));
- EXPECT_EQ("bar.0", uniquer.GetUniqueName("bar.-1000"));
- EXPECT_EQ("bar.1", uniquer.GetUniqueName("bar.-2000"));
- EXPECT_EQ("bar.2", uniquer.GetUniqueName("bar.1"));
+ EXPECT_EQ("foo.55.0", uniquer.GetUniqueName("foo.55.1"));
+ EXPECT_EQ("bar.1000", uniquer.GetUniqueName("bar.1000"));
+ EXPECT_EQ("bar.2000", uniquer.GetUniqueName("bar.2000"));
+ EXPECT_EQ("bar.-2000", uniquer.GetUniqueName("bar.-2000"));
+ EXPECT_EQ("bar.1", uniquer.GetUniqueName("bar.1"));
}
TEST_F(NameUniquerTest, PrefixHasSuffix) {
@@ -77,12 +78,12 @@ TEST_F(NameUniquerTest, Sanitize) {
EXPECT_EQ("foo.54", uniquer.GetUniqueName("foo.54"));
EXPECT_EQ("foo_54", uniquer.GetUniqueName("foo_54"));
EXPECT_EQ("foo_54.1", uniquer.GetUniqueName("foo_54.1"));
- EXPECT_EQ("foo_55", uniquer.GetUniqueName("foo"));
+ EXPECT_EQ("foo_2", uniquer.GetUniqueName("foo"));
// Invalid characters will be replaced with '_'.
- EXPECT_EQ("bar_0", uniquer.GetUniqueName("bar<-1000"));
- EXPECT_EQ("bar_1", uniquer.GetUniqueName("bar<-2000"));
- EXPECT_EQ("bar_2", uniquer.GetUniqueName("bar_1"));
+ EXPECT_EQ("bar_1000", uniquer.GetUniqueName("bar<1000"));
+ EXPECT_EQ("bar_2000", uniquer.GetUniqueName("bar<2000"));
+ EXPECT_EQ("bar_1", uniquer.GetUniqueName("bar_1"));
// Separator is only recognized in the middle of the prefix.
EXPECT_EQ("_10", uniquer.GetUniqueName(
@@ -93,5 +94,15 @@ TEST_F(NameUniquerTest, Sanitize) {
EXPECT_EQ("foobar__1", uniquer.GetUniqueName("foobar_"));
}
+TEST_F(NameUniquerTest, KeepNamesInRandomOrder) {
+ NameUniquer uniquer(".");
+
+ EXPECT_EQ("foo.11", uniquer.GetUniqueName("foo.11"));
+ EXPECT_EQ("foo.10", uniquer.GetUniqueName("foo.10"));
+ EXPECT_EQ("foo.1", uniquer.GetUniqueName("foo.1"));
+ EXPECT_EQ("foo.12", uniquer.GetUniqueName("foo.12"));
+ EXPECT_EQ("foo.3", uniquer.GetUniqueName("foo.3"));
+}
+
} // namespace
} // namespace xla
diff --git a/tensorflow/compiler/xla/service/pattern_matcher.h b/tensorflow/compiler/xla/service/pattern_matcher.h
index 2515222cf2..ac6ea4c72f 100644
--- a/tensorflow/compiler/xla/service/pattern_matcher.h
+++ b/tensorflow/compiler/xla/service/pattern_matcher.h
@@ -86,8 +86,8 @@ namespace xla {
// are provided below.
//
// Example nullary instruction:
-// Recv() == Op().WithOpcode(HloOpcode::kRecv)
-// Recv(&a) == Op(&a).WithOpcode(HloOpcode::kRecv)
+// Param() == Op().WithOpcode(HloOpcode::kParam)
+// Param(&a) == Op(&a).WithOpcode(HloOpcode::kParam)
//
// Example unary instruction:
// Abs() == Op().WithOpcode(HloOpcode::kAbs)
@@ -726,6 +726,32 @@ class HloInstructionPatternFusionKindImpl {
::xla::HloInstruction::FusionKind kind_;
};
+// An HloInstructionPattern implementation that matches only if the instruction
+// is a kGetTupleElement with a particular tuple index.
+template <typename Previous>
+class HloInstructionPatternTupleIndexImpl {
+ public:
+ explicit constexpr HloInstructionPatternTupleIndexImpl(
+ const Previous& previous, int64 tuple_index)
+ : previous_(previous), tuple_index_(tuple_index) {}
+
+ bool Match(const ::xla::HloInstruction* inst) const {
+ return previous_.Match(inst) &&
+ inst->opcode() == HloOpcode::kGetTupleElement &&
+ inst->tuple_index() == tuple_index_;
+ }
+
+ bool Match(::xla::HloInstruction* inst) const {
+ return previous_.Match(inst) &&
+ inst->opcode() == HloOpcode::kGetTupleElement &&
+ inst->tuple_index() == tuple_index_;
+ }
+
+ private:
+ Previous previous_;
+ int64 tuple_index_;
+};
+
// A pattern that matches HloInstructions.
template <typename HloInstructionType, typename Impl>
class HloInstructionPattern {
@@ -841,6 +867,17 @@ class HloInstructionPattern {
HloInstructionPatternFusionKindImpl<Impl>(impl_, kind), matched_inst_);
}
+ // Modifies the pattern to match only if the instruction is a
+ // get-tuple-element with the given tuple index.
+ constexpr HloInstructionPattern<HloInstructionType,
+ HloInstructionPatternTupleIndexImpl<Impl>>
+ WithTupleIndex(int64 tuple_index) const {
+ return HloInstructionPattern<HloInstructionType,
+ HloInstructionPatternTupleIndexImpl<Impl>>(
+ HloInstructionPatternTupleIndexImpl<Impl>(impl_, tuple_index),
+ matched_inst_);
+ }
+
private:
Impl impl_;
HloInstructionType** matched_inst_;
@@ -880,9 +917,7 @@ Op(::xla::HloInstruction** matched_inst) {
return Op(matched_inst).WithOpcode(HloOpcode::k##NAME); \
}
XLA_NULLOP_PATTERN(Constant)
-XLA_NULLOP_PATTERN(Infeed)
XLA_NULLOP_PATTERN(Parameter)
-XLA_NULLOP_PATTERN(Recv)
#undef XLA_NULLOP_PATTERN
// Helpers for unary instructions.
@@ -919,18 +954,21 @@ XLA_UNOP_PATTERN(Cos)
XLA_UNOP_PATTERN(Exp)
XLA_UNOP_PATTERN(Fft)
XLA_UNOP_PATTERN(Floor)
+XLA_UNOP_PATTERN(GetTupleElement)
XLA_UNOP_PATTERN(Imag)
+XLA_UNOP_PATTERN(Infeed)
XLA_UNOP_PATTERN(IsFinite)
XLA_UNOP_PATTERN(Log)
XLA_UNOP_PATTERN(Not)
XLA_UNOP_PATTERN(Negate)
-XLA_UNOP_PATTERN(Outfeed)
XLA_UNOP_PATTERN(Real)
+XLA_UNOP_PATTERN(Recv)
+XLA_UNOP_PATTERN(RecvDone)
XLA_UNOP_PATTERN(Reduce)
XLA_UNOP_PATTERN(ReducePrecision)
XLA_UNOP_PATTERN(Reshape)
XLA_UNOP_PATTERN(Reverse)
-XLA_UNOP_PATTERN(Send)
+XLA_UNOP_PATTERN(SendDone)
XLA_UNOP_PATTERN(Sign)
XLA_UNOP_PATTERN(Sin)
XLA_UNOP_PATTERN(Sort)
@@ -981,8 +1019,10 @@ XLA_BINOP_PATTERN(Maximum)
XLA_BINOP_PATTERN(Minimum)
XLA_BINOP_PATTERN(Multiply)
XLA_BINOP_PATTERN(Ne)
+XLA_BINOP_PATTERN(Outfeed)
XLA_BINOP_PATTERN(Power)
XLA_BINOP_PATTERN(Remainder)
+XLA_BINOP_PATTERN(Send)
XLA_BINOP_PATTERN(Subtract)
XLA_BINOP_PATTERN(And)
XLA_BINOP_PATTERN(Or)
@@ -1040,6 +1080,32 @@ inline auto NonConstant(HloInstructionType** matched_inst)
return Op(matched_inst).IsNonConstant();
}
+// Add overloads for GetTupleElement which take a int64 specifying which tuple
+// element is selected.
+template <typename Arg>
+inline auto GetTupleElement(Arg&& arg, int64 tuple_index)
+ -> decltype(Op().WithOpcode(HloOpcode::kGetTupleElement)
+ .WithOperand(0, std::forward<Arg>(arg))
+ .WithTupleIndex(tuple_index)) {
+ return Op()
+ .WithOpcode(HloOpcode::kGetTupleElement)
+ .WithOperand(0, std::forward<Arg>(arg))
+ .WithTupleIndex(tuple_index);
+}
+
+template <typename HloInstructionType, typename Arg>
+inline auto GetTupleElement(HloInstructionType** matched_inst, Arg&& arg,
+ int64 tuple_index)
+ -> decltype(Op(matched_inst)
+ .WithOpcode(HloOpcode::kGetTupleElement)
+ .WithOperand(0, std::forward<Arg>(arg))
+ .WithTupleIndex(tuple_index)) {
+ return Op(matched_inst)
+ .WithOpcode(HloOpcode::kGetTupleElement)
+ .WithOperand(0, std::forward<Arg>(arg))
+ .WithTupleIndex(tuple_index);
+}
+
} // namespace match
} // namespace xla
diff --git a/tensorflow/compiler/xla/service/pattern_matcher_test.cc b/tensorflow/compiler/xla/service/pattern_matcher_test.cc
index fef3c132b0..a530581c34 100644
--- a/tensorflow/compiler/xla/service/pattern_matcher_test.cc
+++ b/tensorflow/compiler/xla/service/pattern_matcher_test.cc
@@ -193,5 +193,23 @@ TEST(PatternMatcherTest, FusionKind) {
HloInstruction::FusionKind::kLoop)));
}
+TEST(PatternMatcherTest, GetTupleElement) {
+ constexpr char kModuleStr[] = R"(
+ HloModule test_module
+
+ ENTRY while.v11 {
+ p0 = (f32[], f32[], f32[]) parameter(0)
+ ROOT gte = f32[] get-tuple-element(p0), index=1
+ })";
+ TF_ASSERT_OK_AND_ASSIGN(auto hlo_module, ParseHloString(kModuleStr));
+
+ auto* root = hlo_module->entry_computation()->root_instruction();
+ EXPECT_FALSE(Match(root, match::Op().WithTupleIndex(0)));
+ EXPECT_TRUE(Match(root, match::Op().WithTupleIndex(1)));
+ EXPECT_FALSE(Match(root, match::Op().WithTupleIndex(2)));
+ EXPECT_FALSE(Match(root, match::GetTupleElement(match::Op(), 0)));
+ EXPECT_TRUE(Match(root, match::GetTupleElement(match::Op(), 1)));
+}
+
} // namespace
} // namespace xla
diff --git a/tensorflow/compiler/xla/service/platform_util.cc b/tensorflow/compiler/xla/service/platform_util.cc
index 7c63c0acc7..39fe3c7835 100644
--- a/tensorflow/compiler/xla/service/platform_util.cc
+++ b/tensorflow/compiler/xla/service/platform_util.cc
@@ -75,19 +75,6 @@ PlatformUtil::GetSupportedPlatforms() {
auto* platform = platform_pair.second;
auto compiler_status = Compiler::GetForPlatform(platform);
if (compiler_status.ok()) {
- if (platform->VisibleDeviceCount() > 0) {
- LOG(INFO) << "platform " << platform->Name() << " present with "
- << platform->VisibleDeviceCount() << " visible devices";
- } else {
- LOG(WARNING) << "platform " << platform->Name() << " present but no "
- << "visible devices found";
- }
- // Note: currently we call zero device platforms "supported" on the basis
- // that, if the platform support was linked in, it was probably intended
- // to be used for execution, and this way we can flag an error.
- //
- // TODO(b/33730287) If we want an alternative version of this behavior we
- // could add an --xla_fallback_to_host flag.
platforms.push_back(platform);
} else {
LOG(INFO) << "platform " << platform->Name() << " present but no "
diff --git a/tensorflow/compiler/xla/service/reshape_mover.cc b/tensorflow/compiler/xla/service/reshape_mover.cc
index 49ec38eb62..ca86c5d13e 100644
--- a/tensorflow/compiler/xla/service/reshape_mover.cc
+++ b/tensorflow/compiler/xla/service/reshape_mover.cc
@@ -38,7 +38,7 @@ limitations under the License.
#include "tensorflow/compiler/xla/service/reshape_mover.h"
#include <algorithm>
-#include "tensorflow/compiler/xla/literal_util.h"
+#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/compiler/xla/shape_util.h"
#include "tensorflow/compiler/xla/status_macros.h"
#include "tensorflow/compiler/xla/util.h"
diff --git a/tensorflow/compiler/xla/service/reshape_mover_test.cc b/tensorflow/compiler/xla/service/reshape_mover_test.cc
index 13e2d3258e..ad3b662c20 100644
--- a/tensorflow/compiler/xla/service/reshape_mover_test.cc
+++ b/tensorflow/compiler/xla/service/reshape_mover_test.cc
@@ -16,7 +16,7 @@ limitations under the License.
#include "tensorflow/compiler/xla/service/reshape_mover.h"
#include "tensorflow/compiler/xla/layout_util.h"
-#include "tensorflow/compiler/xla/literal_util.h"
+#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/compiler/xla/ptr_util.h"
#include "tensorflow/compiler/xla/service/hlo_computation.h"
#include "tensorflow/compiler/xla/service/hlo_instruction.h"
@@ -175,8 +175,9 @@ TEST_F(ReshapeMoverTest, EquivalentReshapesMoved) {
TEST_F(ReshapeMoverTest, 1ConstantAnd2ReshapesMoved) {
HloComputation::Builder builder(TestName());
auto root_shape = ShapeUtil::MakeShape(F32, {2, 3});
- auto const0 = builder.AddInstruction(HloInstruction::CreateConstant(
- Literal::CreateR2<bool>({{true, true, false}, {false, false, true}})));
+ auto const0 = builder.AddInstruction(
+ HloInstruction::CreateConstant(LiteralUtil::CreateR2<bool>(
+ {{true, true, false}, {false, false, true}})));
auto param1 = builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(F32, {1, 3, 1, 2}), "param1"));
@@ -255,12 +256,12 @@ TEST_F(ReshapeMoverTest, 2TrivialConstantReshapeNotMoved) {
HloComputation::Builder builder(TestName());
auto root_shape = ShapeUtil::MakeShape(F32, {3, 2});
auto const0 = builder.AddInstruction(HloInstruction::CreateConstant(
- Literal::CreateR2<float>({{1, 2, 3}, {4, 5, 6}})));
+ LiteralUtil::CreateR2<float>({{1, 2, 3}, {4, 5, 6}})));
auto reshape0 =
builder.AddInstruction(HloInstruction::CreateReshape(root_shape, const0));
auto const1 = builder.AddInstruction(HloInstruction::CreateConstant(
- Literal::CreateR2<float>({{1, 2, 3}, {4, 5, 6}})));
+ LiteralUtil::CreateR2<float>({{1, 2, 3}, {4, 5, 6}})));
auto reshape1 =
builder.AddInstruction(HloInstruction::CreateReshape(root_shape, const1));
@@ -309,7 +310,7 @@ TEST_F(ReshapeMoverTest, 1NonTrivialReshapeMoved) {
auto param0 = builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(F32, {1, 3, 1, 2}), "param0"));
auto const1 = builder.AddInstruction(HloInstruction::CreateConstant(
- Literal::CreateR2<float>({{1, 2, 3}, {4, 5, 6}})));
+ LiteralUtil::CreateR2<float>({{1, 2, 3}, {4, 5, 6}})));
auto reshape0 =
builder.AddInstruction(HloInstruction::CreateReshape(root_shape, param0));
builder.AddInstruction(HloInstruction::CreateBinary(
@@ -348,7 +349,7 @@ TEST_F(ReshapeMoverTest, 1NonTrivialReshapeWith1ReshapedConstNotMoved) {
auto param0 = builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(F32, {1, 3}), "param0"));
auto const1 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR1<float>({9, 8, 7})));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR1<float>({9, 8, 7})));
auto reshape0 =
builder.AddInstruction(HloInstruction::CreateReshape(root_shape, param0));
auto reshape1 =
diff --git a/tensorflow/compiler/xla/service/shape_inference.cc b/tensorflow/compiler/xla/service/shape_inference.cc
index bbc95f8630..214146cf68 100644
--- a/tensorflow/compiler/xla/service/shape_inference.cc
+++ b/tensorflow/compiler/xla/service/shape_inference.cc
@@ -69,11 +69,11 @@ Status VerifyReducerShape(const ProgramShape& reducer_shape,
}
const Shape& accumulator_shape = reducer_shape.result();
- if (ShapeUtil::Rank(accumulator_shape) != 0) {
+ if (!ShapeUtil::IsArray(accumulator_shape) ||
+ ShapeUtil::Rank(accumulator_shape) != 0) {
return InvalidArgument(
- "Reduction function must have rank 0 (rank %lld reduction function "
- "given).",
- ShapeUtil::Rank(accumulator_shape));
+ "Reduction function must produce a scalar but has shape: %s",
+ ShapeUtil::HumanString(accumulator_shape).c_str());
}
// Check that the accumulator can be passed in as the first argument.
@@ -222,13 +222,16 @@ StatusOr<Shape> InferWindowOutputShape(const Shape& base_shape,
return shape;
case HloOpcode::kReal:
case HloOpcode::kImag:
- if (!ShapeUtil::ElementIsComplex(shape)) {
+ if (ShapeUtil::ElementIsComplex(shape)) {
+ return ShapeUtil::ComplexComponentShape(shape);
+ } else if (ShapeUtil::ElementIsFloating(shape)) {
+ return shape;
+ } else {
return InvalidArgument(
- "Expected element type in shape to be complex for real/imag "
- "operation; got %s.",
+ "Expected element type in shape to be floating or complex for "
+ "real/imag operation; got %s.",
PrimitiveType_Name(shape.element_type()).c_str());
}
- return ShapeUtil::ChangeElementType(shape, F32);
case HloOpcode::kAbs:
if (ShapeUtil::ElementIsComplex(shape)) {
return ShapeUtil::ChangeElementType(
@@ -239,7 +242,6 @@ StatusOr<Shape> InferWindowOutputShape(const Shape& base_shape,
case HloOpcode::kNegate:
case HloOpcode::kRoundNearestAfz:
case HloOpcode::kSign:
- case HloOpcode::kSort:
return shape;
case HloOpcode::kNot:
@@ -329,7 +331,7 @@ StatusOr<Shape> InferWindowOutputShape(const Shape& base_shape,
return ShapeUtil::MakeShape(element_type, new_dimensions);
}
-/* static */ StatusOr<Shape> ShapeInference::InferGenerateTokenShape(
+/* static */ StatusOr<Shape> ShapeInference::InferAfterAllShape(
tensorflow::gtl::ArraySlice<const Shape*> arg_shapes) {
for (const Shape* arg_shape : arg_shapes) {
if (arg_shape->element_type() != TOKEN) {
@@ -930,6 +932,8 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
return InferClampShape(lhs, rhs, ehs);
case HloOpcode::kSelect:
return InferSelectShape(lhs, rhs, ehs);
+ case HloOpcode::kTupleSelect:
+ return InferTupleSelectShape(lhs, rhs, ehs);
default:
return InvalidArgument("Unknown operation %s.",
HloOpcodeString(opcode).c_str());
@@ -962,6 +966,15 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
}
return result;
}
+ case HloOpcode::kSort: {
+ if (operand_shapes.size() == 1) {
+ return *operand_shapes[0];
+ } else if (operand_shapes.size() == 2) {
+ return ShapeUtil::MakeTupleShape(
+ {*operand_shapes[0], *operand_shapes[1]});
+ }
+ return InvalidArgument("Unexpected number of operands for sort");
+ }
default:
return InvalidArgument("Unknown operation %s.",
HloOpcodeString(opcode).c_str());
@@ -2259,15 +2272,7 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
// broadcast from all operands, not just the predicate.
/* static */ StatusOr<Shape> ShapeInference::InferSelectShape(
const Shape& pred, const Shape& on_true, const Shape& on_false) {
- bool compatible;
- if (ShapeUtil::IsTuple(on_true)) {
- // Select only defines the top-level buffer, so if it's a tuple, the two
- // input must match exactly.
- compatible = ShapeUtil::Compatible(on_true, on_false);
- } else {
- compatible = ShapeUtil::CompatibleIgnoringFpPrecision(on_true, on_false);
- }
- if (!compatible) {
+ if (!ShapeUtil::CompatibleIgnoringFpPrecision(on_true, on_false)) {
return InvalidArgument(
"Operands to select must be the same shape; got %s and %s.",
ShapeUtil::HumanString(on_true).c_str(),
@@ -2279,7 +2284,7 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
ShapeUtil::HumanString(pred).c_str());
}
if (ShapeUtil::CompatibleIgnoringElementType(pred, on_true) ||
- ShapeUtil::Rank(pred) == 0) {
+ ShapeUtil::IsScalar(pred)) {
// By this stage we know that pred's element type is PRED. Therefore, this
// check restricts pred to be a PRED scalar, or a PRED array with the same
// dimensions as on_true and on_false.
@@ -2293,6 +2298,29 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
}
}
+/* static */ StatusOr<Shape> ShapeInference::InferTupleSelectShape(
+ const Shape& pred, const Shape& on_true, const Shape& on_false) {
+ // Select only defines the top-level buffer, so if it's a tuple, the two
+ // input must match exactly.
+ if (!ShapeUtil::Compatible(on_true, on_false)) {
+ return InvalidArgument(
+ "Operands to tuple-select must be the same shape; got %s and %s.",
+ ShapeUtil::HumanString(on_true).c_str(),
+ ShapeUtil::HumanString(on_false).c_str());
+ }
+ if (pred.element_type() != PRED) {
+ return InvalidArgument(
+ "TupleSelect's pred operand must have PRED element type; got %s.",
+ ShapeUtil::HumanString(pred).c_str());
+ }
+ if (!ShapeUtil::IsScalar(pred)) {
+ return InvalidArgument(
+ "TupleSelect operation with non-scalar predicate: %s.",
+ ShapeUtil::HumanString(pred).c_str());
+ }
+ return on_true;
+}
+
/* static */ StatusOr<Shape> ShapeInference::InferCallShape(
tensorflow::gtl::ArraySlice<const Shape*> arg_shapes,
const ProgramShape& to_apply) {
diff --git a/tensorflow/compiler/xla/service/shape_inference.h b/tensorflow/compiler/xla/service/shape_inference.h
index eef6e62fc8..1a5684e3c3 100644
--- a/tensorflow/compiler/xla/service/shape_inference.h
+++ b/tensorflow/compiler/xla/service/shape_inference.h
@@ -216,11 +216,11 @@ class ShapeInference {
static StatusOr<Shape> InferConcatOpShape(
tensorflow::gtl::ArraySlice<const Shape*> arg_shapes, int64 dimension);
- // Infers the shape produced by a kGenerateToken operation. Trivially this
- // shape is always a TOKEN shape. However, ShapeInference serves two purposes:
- // inferring shapes and checking operand shapes. This method verifies that the
- // operand shapes are all TOKENs.
- static StatusOr<Shape> InferGenerateTokenShape(
+ // Infers the shape produced by a kAfterAll. Trivially this shape is always a
+ // TOKEN shape. However, ShapeInference serves two purposes: inferring shapes
+ // and checking operand shapes. This method verifies that the operand shapes
+ // are all TOKENs.
+ static StatusOr<Shape> InferAfterAllShape(
tensorflow::gtl::ArraySlice<const Shape*> arg_shapes);
// Helper that validates the given operand shape can be converted to the
@@ -286,6 +286,10 @@ class ShapeInference {
static StatusOr<Shape> InferSelectShape(const Shape& pred,
const Shape& on_true,
const Shape& on_false);
+ // Helper for inferring the shape of TupleSelect ops.
+ static StatusOr<Shape> InferTupleSelectShape(const Shape& pred,
+ const Shape& on_true,
+ const Shape& on_false);
// Helper for inferring shapes of binary operations which use degenerate
// dimension broadcasting (a dimension of size 1 in one operand is broadcast
diff --git a/tensorflow/compiler/xla/service/shape_inference_test.cc b/tensorflow/compiler/xla/service/shape_inference_test.cc
index bafe14d6f4..9b1ce143c6 100644
--- a/tensorflow/compiler/xla/service/shape_inference_test.cc
+++ b/tensorflow/compiler/xla/service/shape_inference_test.cc
@@ -17,6 +17,7 @@ limitations under the License.
#include <string>
+#include "tensorflow/compiler/xla/service/hlo_instructions.h"
#include "tensorflow/compiler/xla/shape_util.h"
#include "tensorflow/compiler/xla/test.h"
#include "tensorflow/compiler/xla/test_helpers.h"
@@ -1543,45 +1544,45 @@ class GatherShapeInferenceTest : public ShapeInferenceTest {
};
TEST_F(GatherShapeInferenceTest, TensorFlowGather) {
- TF_ASSERT_OK_AND_ASSIGN(
- Shape gather_shape,
- ShapeInference::InferGatherShape(matrix_64_48_, s64_vector_32_,
- HloInstruction::MakeGatherDimNumbers(
- /*output_window_dims=*/{0},
- /*elided_window_dims=*/{1},
- /*gather_dims_to_operand_dims=*/{1},
- /*index_vector_dim=*/1),
- /*window_bounds=*/{64, 1}));
+ TF_ASSERT_OK_AND_ASSIGN(Shape gather_shape,
+ ShapeInference::InferGatherShape(
+ matrix_64_48_, s64_vector_32_,
+ HloGatherInstruction::MakeGatherDimNumbers(
+ /*output_window_dims=*/{0},
+ /*elided_window_dims=*/{1},
+ /*gather_dims_to_operand_dims=*/{1},
+ /*index_vector_dim=*/1),
+ /*window_bounds=*/{64, 1}));
EXPECT_TRUE(
ShapeUtil::Equal(gather_shape, ShapeUtil::MakeShape(F32, {64, 32})))
<< ShapeUtil::HumanString(gather_shape);
}
TEST_F(GatherShapeInferenceTest, TensorFlowGatherV2) {
- TF_ASSERT_OK_AND_ASSIGN(
- Shape gather_shape,
- ShapeInference::InferGatherShape(matrix_64_48_, s64_vector_32_,
- HloInstruction::MakeGatherDimNumbers(
- /*output_window_dims=*/{1},
- /*elided_window_dims=*/{0},
- /*gather_dims_to_operand_dims=*/{0},
- /*index_vector_dim=*/1),
- /*window_bounds=*/{1, 48}));
+ TF_ASSERT_OK_AND_ASSIGN(Shape gather_shape,
+ ShapeInference::InferGatherShape(
+ matrix_64_48_, s64_vector_32_,
+ HloGatherInstruction::MakeGatherDimNumbers(
+ /*output_window_dims=*/{1},
+ /*elided_window_dims=*/{0},
+ /*gather_dims_to_operand_dims=*/{0},
+ /*index_vector_dim=*/1),
+ /*window_bounds=*/{1, 48}));
EXPECT_TRUE(
ShapeUtil::Equal(gather_shape, ShapeUtil::MakeShape(F32, {32, 48})))
<< ShapeUtil::HumanString(gather_shape);
}
TEST_F(GatherShapeInferenceTest, TensorFlowGatherNd) {
- TF_ASSERT_OK_AND_ASSIGN(
- Shape gather_shape,
- ShapeInference::InferGatherShape(matrix_64_48_, s64_4d_tensor_10_9_8_7_1_,
- HloInstruction::MakeGatherDimNumbers(
- /*output_window_dims=*/{4},
- /*elided_window_dims=*/{0},
- /*gather_dims_to_operand_dims=*/{0},
- /*index_vector_dim=*/4),
- /*window_bounds=*/{1, 48}));
+ TF_ASSERT_OK_AND_ASSIGN(Shape gather_shape,
+ ShapeInference::InferGatherShape(
+ matrix_64_48_, s64_4d_tensor_10_9_8_7_1_,
+ HloGatherInstruction::MakeGatherDimNumbers(
+ /*output_window_dims=*/{4},
+ /*elided_window_dims=*/{0},
+ /*gather_dims_to_operand_dims=*/{0},
+ /*index_vector_dim=*/4),
+ /*window_bounds=*/{1, 48}));
EXPECT_TRUE(ShapeUtil::Equal(gather_shape,
ShapeUtil::MakeShape(F32, {10, 9, 8, 7, 48})))
<< ShapeUtil::HumanString(gather_shape);
@@ -1592,7 +1593,7 @@ TEST_F(GatherShapeInferenceTest, TensorFlowBatchDynamicSlice) {
Shape gather_shape,
ShapeInference::InferGatherShape(
f32_5d_tensor_50_49_48_47_46_, s64_4d_tensor_10_9_8_7_5_,
- HloInstruction::MakeGatherDimNumbers(
+ HloGatherInstruction::MakeGatherDimNumbers(
/*output_window_dims=*/{4, 5, 6, 7, 8},
/*elided_window_dims=*/{},
/*gather_dims_to_operand_dims=*/{0, 1, 2, 3, 4},
@@ -1609,7 +1610,7 @@ TEST_F(GatherShapeInferenceTest, NonDefaultGatherIndicesLeafDim_A) {
Shape gather_shape,
ShapeInference::InferGatherShape(
f32_5d_tensor_50_49_48_47_46_, s64_4d_tensor_10_9_5_7_6_,
- HloInstruction::MakeGatherDimNumbers(
+ HloGatherInstruction::MakeGatherDimNumbers(
/*output_window_dims=*/{4, 5, 6, 7, 8},
/*elided_window_dims=*/{},
/*gather_dims_to_operand_dims=*/{0, 1, 2, 3, 4},
@@ -1627,7 +1628,7 @@ TEST_F(GatherShapeInferenceTest, NonDefaultGatherIndicesLeafDim_B) {
Shape gather_shape,
ShapeInference::InferGatherShape(
f32_5d_tensor_50_49_48_47_46_, s64_4d_tensor_5_10_9_7_6_,
- HloInstruction::MakeGatherDimNumbers(
+ HloGatherInstruction::MakeGatherDimNumbers(
/*output_window_dims=*/{4, 5, 6, 7, 8},
/*elided_window_dims=*/{},
/*gather_dims_to_operand_dims=*/{0, 1, 2, 3, 4},
@@ -1646,7 +1647,7 @@ TEST_F(GatherShapeInferenceTest, NoOutputGatherDims) {
Shape gather_shape,
ShapeInference::InferGatherShape(
f32_5d_tensor_50_49_48_47_46_, s64_vector_5_,
- HloInstruction::MakeGatherDimNumbers(
+ HloGatherInstruction::MakeGatherDimNumbers(
/*output_window_dims=*/{0, 1, 2, 3, 4},
/*elided_window_dims=*/{},
/*gather_dims_to_operand_dims=*/{0, 1, 2, 3, 4},
@@ -1664,7 +1665,7 @@ TEST_F(GatherShapeInferenceTest, ScalarGatherIndices) {
TF_ASSERT_OK_AND_ASSIGN(Shape gather_shape,
ShapeInference::InferGatherShape(
f32_5d_tensor_50_49_48_47_46_, s64_scalar_,
- HloInstruction::MakeGatherDimNumbers(
+ HloGatherInstruction::MakeGatherDimNumbers(
/*output_window_dims=*/{0, 1, 2, 3},
/*elided_window_dims=*/{0},
/*gather_dims_to_operand_dims=*/{0},
@@ -1679,10 +1680,11 @@ TEST_F(GatherShapeInferenceTest, ScalarGatherIndices) {
TEST_F(GatherShapeInferenceTest, TupleShapedTensorInput) {
StatusOr<Shape> statusor = ShapeInference::InferGatherShape(
tuple_shape_, s64_vector_32_,
- HloInstruction::MakeGatherDimNumbers(/*output_window_dims=*/{0},
- /*elided_window_dims=*/{1},
- /*gather_dims_to_operand_dims=*/{1},
- /*index_vector_dim=*/1),
+ HloGatherInstruction::MakeGatherDimNumbers(
+ /*output_window_dims=*/{0},
+ /*elided_window_dims=*/{1},
+ /*gather_dims_to_operand_dims=*/{1},
+ /*index_vector_dim=*/1),
/*window_bounds=*/{64, 1});
ASSERT_FALSE(statusor.ok());
EXPECT_THAT(statusor.status().error_message(),
@@ -1693,10 +1695,11 @@ TEST_F(GatherShapeInferenceTest, TupleShapedTensorInput) {
TEST_F(GatherShapeInferenceTest, TupleShapedGatherIndicesInput) {
StatusOr<Shape> statusor = ShapeInference::InferGatherShape(
s64_vector_32_, tuple_shape_,
- HloInstruction::MakeGatherDimNumbers(/*output_window_dims=*/{0},
- /*elided_window_dims=*/{1},
- /*gather_dims_to_operand_dims=*/{1},
- /*index_vector_dim=*/0),
+ HloGatherInstruction::MakeGatherDimNumbers(
+ /*output_window_dims=*/{0},
+ /*elided_window_dims=*/{1},
+ /*gather_dims_to_operand_dims=*/{1},
+ /*index_vector_dim=*/0),
/*window_bounds=*/{64, 1});
ASSERT_FALSE(statusor.ok());
EXPECT_THAT(statusor.status().error_message(),
@@ -1707,10 +1710,11 @@ TEST_F(GatherShapeInferenceTest, TupleShapedGatherIndicesInput) {
TEST_F(GatherShapeInferenceTest, FloatingPointGatherIndicesInput) {
StatusOr<Shape> statusor = ShapeInference::InferGatherShape(
s64_vector_32_, vector_32_,
- HloInstruction::MakeGatherDimNumbers(/*output_window_dims=*/{0},
- /*elided_window_dims=*/{1},
- /*gather_dims_to_operand_dims=*/{1},
- /*index_vector_dim=*/0),
+ HloGatherInstruction::MakeGatherDimNumbers(
+ /*output_window_dims=*/{0},
+ /*elided_window_dims=*/{1},
+ /*gather_dims_to_operand_dims=*/{1},
+ /*index_vector_dim=*/0),
/*window_bounds=*/{64, 1});
ASSERT_FALSE(statusor.ok());
EXPECT_THAT(statusor.status().error_message(),
@@ -1722,7 +1726,7 @@ TEST_F(GatherShapeInferenceTest,
InvalidGatherDimNumbers_NonAscendingWindowIndices) {
StatusOr<Shape> statusor = ShapeInference::InferGatherShape(
f32_5d_tensor_50_49_48_47_46_, s64_4d_tensor_10_9_8_7_5_,
- HloInstruction::MakeGatherDimNumbers(
+ HloGatherInstruction::MakeGatherDimNumbers(
/*output_window_dims=*/{4, 5, 6, 8, 7},
/*elided_window_dims=*/{},
/*gather_dims_to_operand_dims=*/{0, 1, 2, 3, 4},
@@ -1739,7 +1743,7 @@ TEST_F(GatherShapeInferenceTest,
InvalidGatherDimNumbers_RepeatedWindowIndices) {
StatusOr<Shape> statusor = ShapeInference::InferGatherShape(
f32_5d_tensor_50_49_48_47_46_, s64_4d_tensor_10_9_8_7_5_,
- HloInstruction::MakeGatherDimNumbers(
+ HloGatherInstruction::MakeGatherDimNumbers(
/*output_window_dims=*/{4, 5, 6, 7, 7},
/*elided_window_dims=*/{},
/*gather_dims_to_operand_dims=*/{0, 1, 2, 3, 4},
@@ -1756,7 +1760,7 @@ TEST_F(GatherShapeInferenceTest,
InvalidGatherDimNumbers_WindowIndexOutOfBounds) {
StatusOr<Shape> statusor = ShapeInference::InferGatherShape(
f32_5d_tensor_50_49_48_47_46_, s64_4d_tensor_10_9_8_7_5_,
- HloInstruction::MakeGatherDimNumbers(
+ HloGatherInstruction::MakeGatherDimNumbers(
/*output_window_dims=*/{4, 5, 99, 100, 101},
/*elided_window_dims=*/{},
/*gather_dims_to_operand_dims=*/{0, 1, 2, 3, 4},
@@ -1772,7 +1776,7 @@ TEST_F(GatherShapeInferenceTest,
InvalidGatherDimNumbers_WindowIndexBarelyOutOfBounds) {
StatusOr<Shape> statusor = ShapeInference::InferGatherShape(
f32_5d_tensor_50_49_48_47_46_, s64_4d_tensor_10_9_8_7_5_,
- HloInstruction::MakeGatherDimNumbers(
+ HloGatherInstruction::MakeGatherDimNumbers(
/*output_window_dims=*/{4, 5, 6, 7, 9},
/*elided_window_dims=*/{},
/*gather_dims_to_operand_dims=*/{0, 1, 2, 3, 4},
@@ -1788,7 +1792,7 @@ TEST_F(GatherShapeInferenceTest,
InvalidGatherDimNumbers_MismatchingElidedWindowDims) {
StatusOr<Shape> statusor = ShapeInference::InferGatherShape(
f32_5d_tensor_50_49_48_47_46_, s64_4d_tensor_10_9_8_7_5_,
- HloInstruction::MakeGatherDimNumbers(
+ HloGatherInstruction::MakeGatherDimNumbers(
/*output_window_dims=*/{4, 5, 6, 7, 8},
/*elided_window_dims=*/{4},
/*gather_dims_to_operand_dims=*/{0, 1, 2, 3, 4},
@@ -1806,7 +1810,7 @@ TEST_F(GatherShapeInferenceTest,
InvalidGatherDimNumbers_OutOfBoundsWindowToInputMapping) {
StatusOr<Shape> statusor = ShapeInference::InferGatherShape(
f32_5d_tensor_50_49_48_47_46_, s64_4d_tensor_10_9_8_7_5_,
- HloInstruction::MakeGatherDimNumbers(
+ HloGatherInstruction::MakeGatherDimNumbers(
/*output_window_dims=*/{4, 5, 6, 7, 8},
/*elided_window_dims=*/{0, 1, 2, 3, 19},
/*gather_dims_to_operand_dims=*/{0, 1, 2, 3, 4},
@@ -1823,7 +1827,7 @@ TEST_F(GatherShapeInferenceTest,
InvalidGatherDimNumbers_RepeatedWindowToInputMapping) {
StatusOr<Shape> statusor = ShapeInference::InferGatherShape(
f32_5d_tensor_50_49_48_47_46_, s64_4d_tensor_10_9_8_7_5_,
- HloInstruction::MakeGatherDimNumbers(
+ HloGatherInstruction::MakeGatherDimNumbers(
/*output_window_dims=*/{4, 5, 6, 7, 8},
/*elided_window_dims=*/{0, 1, 2, 3, 3},
/*gather_dims_to_operand_dims=*/{0, 1, 2, 3, 4},
@@ -1841,7 +1845,7 @@ TEST_F(GatherShapeInferenceTest,
InvalidGatherDimNumbers_MismatchingGatherToInputMapping) {
StatusOr<Shape> statusor = ShapeInference::InferGatherShape(
f32_5d_tensor_50_49_48_47_46_, s64_4d_tensor_10_9_8_7_5_,
- HloInstruction::MakeGatherDimNumbers(
+ HloGatherInstruction::MakeGatherDimNumbers(
/*output_window_dims=*/{4, 5, 6, 7, 8},
/*elided_window_dims=*/{},
/*gather_dims_to_operand_dims=*/{0, 1, 2, 3},
@@ -1860,7 +1864,7 @@ TEST_F(GatherShapeInferenceTest,
InvalidGatherDimNumbers_OutOfBoundsGatherToInputMapping) {
StatusOr<Shape> statusor = ShapeInference::InferGatherShape(
f32_5d_tensor_50_49_48_47_46_, s64_4d_tensor_10_9_8_7_5_,
- HloInstruction::MakeGatherDimNumbers(
+ HloGatherInstruction::MakeGatherDimNumbers(
/*output_window_dims=*/{4, 5, 6, 7, 8},
/*elided_window_dims=*/{},
/*gather_dims_to_operand_dims=*/{0, 1, 2, 3, 7},
@@ -1878,7 +1882,7 @@ TEST_F(GatherShapeInferenceTest,
InvalidGatherDimNumbers_RepeatedGatherToInputMapping) {
StatusOr<Shape> statusor = ShapeInference::InferGatherShape(
f32_5d_tensor_50_49_48_47_46_, s64_4d_tensor_10_9_8_7_5_,
- HloInstruction::MakeGatherDimNumbers(
+ HloGatherInstruction::MakeGatherDimNumbers(
/*output_window_dims=*/{4, 5, 6, 7, 8},
/*elided_window_dims=*/{},
/*gather_dims_to_operand_dims=*/{0, 1, 2, 3, 3},
@@ -1896,7 +1900,7 @@ TEST_F(GatherShapeInferenceTest,
InvalidGatherDimNumbers_NonAscendingElidedWindowDims) {
StatusOr<Shape> statusor = ShapeInference::InferGatherShape(
f32_5d_tensor_50_49_48_47_46_, s64_4d_tensor_10_9_8_7_5_,
- HloInstruction::MakeGatherDimNumbers(
+ HloGatherInstruction::MakeGatherDimNumbers(
/*output_window_dims=*/{4, 5, 6, 7, 8},
/*elided_window_dims=*/{2, 1},
/*gather_dims_to_operand_dims=*/{0, 1, 2, 3, 4},
@@ -1911,7 +1915,7 @@ TEST_F(GatherShapeInferenceTest,
TEST_F(GatherShapeInferenceTest, InvalidGatherDimNumbers_WindowBoundsTooLarge) {
StatusOr<Shape> statusor = ShapeInference::InferGatherShape(
f32_5d_tensor_50_49_48_47_46_, s64_4d_tensor_10_9_8_7_5_,
- HloInstruction::MakeGatherDimNumbers(
+ HloGatherInstruction::MakeGatherDimNumbers(
/*output_window_dims=*/{4, 5, 6, 7},
/*elided_window_dims=*/{2},
/*gather_dims_to_operand_dims=*/{0, 1, 2, 3, 4},
@@ -1928,7 +1932,7 @@ TEST_F(GatherShapeInferenceTest,
InvalidGatherDimNumbers_MismatchingNumberOfWindowBounds) {
StatusOr<Shape> statusor = ShapeInference::InferGatherShape(
f32_5d_tensor_50_49_48_47_46_, s64_4d_tensor_10_9_8_7_5_,
- HloInstruction::MakeGatherDimNumbers(
+ HloGatherInstruction::MakeGatherDimNumbers(
/*output_window_dims=*/{4, 5, 6, 7, 8},
/*elided_window_dims=*/{},
/*gather_dims_to_operand_dims=*/{0, 1, 2, 3, 4},
@@ -1946,7 +1950,7 @@ TEST_F(GatherShapeInferenceTest,
InvalidGatherDimNumbers_WindowBoundsNot1ForElidedDim) {
StatusOr<Shape> statusor = ShapeInference::InferGatherShape(
f32_5d_tensor_50_49_48_47_46_, s64_4d_tensor_10_9_8_7_5_,
- HloInstruction::MakeGatherDimNumbers(
+ HloGatherInstruction::MakeGatherDimNumbers(
/*output_window_dims=*/{4, 5, 6, 7},
/*elided_window_dims=*/{1},
/*gather_dims_to_operand_dims=*/{0, 1, 2, 3, 4},
@@ -1962,7 +1966,7 @@ TEST_F(GatherShapeInferenceTest,
TEST_F(GatherShapeInferenceTest, OutOfBoundsGatherIndicesLeafDim) {
StatusOr<Shape> statusor = ShapeInference::InferGatherShape(
f32_5d_tensor_50_49_48_47_46_, s64_4d_tensor_10_9_5_7_6_,
- HloInstruction::MakeGatherDimNumbers(
+ HloGatherInstruction::MakeGatherDimNumbers(
/*output_window_dims=*/{4, 5, 6, 7, 8},
/*elided_window_dims=*/{},
/*gather_dims_to_operand_dims=*/{0, 1, 2, 3, 4},
diff --git a/tensorflow/compiler/xla/service/transfer_manager.cc b/tensorflow/compiler/xla/service/transfer_manager.cc
index 4c5038a009..7232c658b3 100644
--- a/tensorflow/compiler/xla/service/transfer_manager.cc
+++ b/tensorflow/compiler/xla/service/transfer_manager.cc
@@ -44,6 +44,7 @@ StatusOr<std::unique_ptr<Literal>> TransferManager::TransferLiteralFromDevice(
se::Stream* stream, const ShapedBuffer& device_buffer) {
StatusOr<std::unique_ptr<Literal>> ret;
se::Stream* substream = stream->GetOrCreateSubStream();
+ substream->ThenWaitFor(stream);
auto cleanup = tensorflow::gtl::MakeCleanup(
[&]() { stream->ReturnSubStream(substream); });
@@ -64,6 +65,7 @@ Status TransferManager::TransferLiteralToDevice(
// Use a substream so that if we are called from a HostCallback we don't
// deadlock.
se::Stream* substream = stream->GetOrCreateSubStream();
+ substream->ThenWaitFor(stream);
auto cleanup = tensorflow::gtl::MakeCleanup(
[&]() { stream->ReturnSubStream(substream); });
TF_RETURN_IF_ERROR(
diff --git a/tensorflow/compiler/xla/service/transfer_manager.h b/tensorflow/compiler/xla/service/transfer_manager.h
index e384359642..82c599e482 100644
--- a/tensorflow/compiler/xla/service/transfer_manager.h
+++ b/tensorflow/compiler/xla/service/transfer_manager.h
@@ -20,7 +20,7 @@ limitations under the License.
#include <set>
#include <vector>
-#include "tensorflow/compiler/xla/literal_util.h"
+#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/compiler/xla/service/shaped_buffer.h"
#include "tensorflow/compiler/xla/statusor.h"
#include "tensorflow/compiler/xla/types.h"
@@ -167,16 +167,6 @@ class TransferManager {
const se::Platform* platform);
protected:
- // Transfer a memory block of the given size from 'source' buffer to the
- // Infeed interface of the device using the given executor.
- //
- // size is the size to transfer from source in bytes.
- //
- // source is the source data that must be in the target-dependent layout that
- // the Infeed HLO used in the computation expects.
- virtual Status TransferBufferToInfeed(se::StreamExecutor* executor,
- int64 size, const void* source) = 0;
-
// Transfer a memory block of the given size from the device source into the
// 'destination' buffer.
//
diff --git a/tensorflow/compiler/xla/service/transpose_folding_test.cc b/tensorflow/compiler/xla/service/transpose_folding_test.cc
index cccb8f2fbb..7051a4cf51 100644
--- a/tensorflow/compiler/xla/service/transpose_folding_test.cc
+++ b/tensorflow/compiler/xla/service/transpose_folding_test.cc
@@ -20,7 +20,7 @@ limitations under the License.
#include <vector>
#include "tensorflow/compiler/xla/client/xla_client/xla_builder.h"
-#include "tensorflow/compiler/xla/literal_util.h"
+#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/compiler/xla/service/gpu/ir_emission_utils.h"
#include "tensorflow/compiler/xla/service/hlo_computation.h"
#include "tensorflow/compiler/xla/service/hlo_instruction.h"
@@ -160,11 +160,11 @@ TEST_F(TransposeFoldingTest, FuseDotWithConstantOperands) {
auto builder = HloComputation::Builder("entry");
// (1.0 + 2.0) * (2.0 - 3.0)
HloInstruction* const1 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(1.0)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
HloInstruction* const2 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(2.0)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0)));
HloInstruction* const3 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(3.0)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(3.0)));
HloInstruction* add = builder.AddInstruction(HloInstruction::CreateBinary(
const1->shape(), HloOpcode::kAdd, const1, const2));
HloInstruction* sub = builder.AddInstruction(HloInstruction::CreateBinary(
diff --git a/tensorflow/compiler/xla/service/tuple_points_to_analysis.cc b/tensorflow/compiler/xla/service/tuple_points_to_analysis.cc
index d1e1744647..990dfc410c 100644
--- a/tensorflow/compiler/xla/service/tuple_points_to_analysis.cc
+++ b/tensorflow/compiler/xla/service/tuple_points_to_analysis.cc
@@ -292,22 +292,29 @@ Status TuplePointsToAnalysis::HandleSlice(HloInstruction* slice) {
}
Status TuplePointsToAnalysis::HandleRecvDone(HloInstruction* recv_done) {
- // RecvDone aliases its input (Recv) tuple element {0} to its output.
+ // RecvDone aliases its input (Recv) tuple element {0} to element {0} of its
+ // output. The other indices ({} and {1}) define their own buffers.
PointsToSet& points_to_set = CreateEmptyPointsToSet(recv_done);
+ points_to_set.AddPointedToBuffer(
+ logical_buffer_analysis_->GetBuffer(recv_done, /*index=*/{}),
+ /*index=*/{});
+ points_to_set.AddPointedToBuffer(
+ logical_buffer_analysis_->GetBuffer(recv_done, /*index=*/{1}),
+ /*index=*/{1});
+
const PointsToSet& operand_points_to_set =
GetPointsToSet(recv_done->operand(0));
- // Recursively copy the points to set of the operand tuple {0}.
+ // Recursively copy the points to set of the operand tuple {0} to the output
+ // element {0}.
points_to_set.ForEachMutableElement(
[this, &points_to_set, &operand_points_to_set](
const ShapeIndex& index, PointsToSet::BufferList* buffers) {
- ShapeIndex src_index({0});
- for (auto element : index) {
- src_index.push_back(element);
+ if (index.empty() || index[0] != 0) {
+ return;
}
- *buffers = operand_points_to_set.element(src_index);
- for (auto& tuple_source :
- operand_points_to_set.tuple_sources(src_index)) {
+ *buffers = operand_points_to_set.element(index);
+ for (auto& tuple_source : operand_points_to_set.tuple_sources(index)) {
points_to_set.add_tuple_source(index, tuple_source);
}
});
@@ -315,7 +322,7 @@ Status TuplePointsToAnalysis::HandleRecvDone(HloInstruction* recv_done) {
}
Status TuplePointsToAnalysis::HandleSend(HloInstruction* send) {
- // Send creates a tuple of {aliased operand, U32 context}.
+ // Send creates a tuple of {aliased operand, U32 context, token}.
PointsToSet& points_to_set = CreateEmptyPointsToSet(send);
// Creates the points to set for the tuple and its element at {1}.
@@ -328,6 +335,10 @@ Status TuplePointsToAnalysis::HandleSend(HloInstruction* send) {
context_buffer->push_back(
&logical_buffer_analysis_->GetBuffer(send, ShapeIndex({1})));
+ auto token_buffer = points_to_set.mutable_element(ShapeIndex({2}));
+ token_buffer->push_back(
+ &logical_buffer_analysis_->GetBuffer(send, ShapeIndex({2})));
+
// Recursively copy the points to set of the operand to output tuple {0}.
const PointsToSet& operand_points_to_set = GetPointsToSet(send->operand(0));
operand_points_to_set.ForEachElement(
@@ -388,7 +399,7 @@ Status TuplePointsToAnalysis::HandleTuple(HloInstruction* tuple) {
return Status::OK();
}
-Status TuplePointsToAnalysis::HandleSelect(HloInstruction* select) {
+Status TuplePointsToAnalysis::HandleTupleSelect(HloInstruction* tuple_select) {
// Select allocates a new buffer and then shallow copies the on_true or
// on_false buffer into this new buffer. Which side is chosen cannot be
// determined statically so conservatively set the points-to set to the union
@@ -396,9 +407,9 @@ Status TuplePointsToAnalysis::HandleSelect(HloInstruction* select) {
//
// First create a copy of the on_true points-to set (and tuple sources), then
// add in elements of the on_false points-to set (tuple sources).
- auto on_true = select->operand(1);
- auto on_false = select->operand(2);
- PointsToSet& points_to_set = CreateCopiedPointsToSet(select, on_true);
+ auto on_true = tuple_select->operand(1);
+ auto on_false = tuple_select->operand(2);
+ PointsToSet& points_to_set = CreateCopiedPointsToSet(tuple_select, on_true);
const PointsToSet& false_points_to_set = *PerInst(on_false)->points_to_set;
points_to_set.ForEachMutableElement(
[&](const ShapeIndex& index, PointsToSet::BufferList* buffers) {
@@ -416,7 +427,7 @@ Status TuplePointsToAnalysis::HandleSelect(HloInstruction* select) {
// respective element in the points-to set should contain only itself.
points_to_set.mutable_element({})->clear();
points_to_set.AddPointedToBuffer(
- logical_buffer_analysis_->GetBuffer(select, /*index=*/{}),
+ logical_buffer_analysis_->GetBuffer(tuple_select, /*index=*/{}),
/*index=*/{});
return Status::OK();
}
diff --git a/tensorflow/compiler/xla/service/tuple_points_to_analysis.h b/tensorflow/compiler/xla/service/tuple_points_to_analysis.h
index c0d8241480..686bb05328 100644
--- a/tensorflow/compiler/xla/service/tuple_points_to_analysis.h
+++ b/tensorflow/compiler/xla/service/tuple_points_to_analysis.h
@@ -253,7 +253,7 @@ class TuplePointsToAnalysis : public DfsHloVisitorWithDefault {
Status HandleCopy(HloInstruction* copy) override;
Status HandleRecvDone(HloInstruction* recv_done) override;
Status HandleSend(HloInstruction* send) override;
- Status HandleSelect(HloInstruction* select) override;
+ Status HandleTupleSelect(HloInstruction* tuple_select) override;
string ToString() const;
diff --git a/tensorflow/compiler/xla/service/tuple_points_to_analysis_test.cc b/tensorflow/compiler/xla/service/tuple_points_to_analysis_test.cc
index 5734f28407..0ac8df4271 100644
--- a/tensorflow/compiler/xla/service/tuple_points_to_analysis_test.cc
+++ b/tensorflow/compiler/xla/service/tuple_points_to_analysis_test.cc
@@ -124,9 +124,9 @@ class TuplePointsToAnalysisTest : public HloTestBase {
TEST_F(TuplePointsToAnalysisTest, SimpleTuple) {
auto builder = HloComputation::Builder(TestName());
auto constant1 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(1.0)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
auto constant2 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(2.0)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0)));
auto tuple = builder.AddInstruction(
HloInstruction::CreateTuple({constant1, constant2}));
@@ -177,14 +177,14 @@ TEST_F(TuplePointsToAnalysisTest, NestedTuple) {
// tuple.
auto builder = HloComputation::Builder(TestName());
auto constant1 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(1.0)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
auto constant2 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(2.0)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0)));
auto inner_tuple = builder.AddInstruction(
HloInstruction::CreateTuple({constant1, constant2}));
auto constant3 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(3.0)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(3.0)));
auto tuple = builder.AddInstruction(
HloInstruction::CreateTuple({inner_tuple, constant3}));
@@ -238,14 +238,14 @@ TEST_F(TuplePointsToAnalysisTest, GetTupleElement) {
// tuple.
auto builder = HloComputation::Builder(TestName());
auto constant1 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(1.0)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
auto constant2 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(2.0)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0)));
auto inner_tuple = builder.AddInstruction(
HloInstruction::CreateTuple({constant1, constant2}));
auto constant3 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(3.0)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(3.0)));
auto tuple = builder.AddInstruction(
HloInstruction::CreateTuple({inner_tuple, constant3}));
@@ -270,7 +270,7 @@ TEST_F(TuplePointsToAnalysisTest, DuplicatedElement) {
// Create a tuple which contains duplicate elements.
auto builder = HloComputation::Builder(TestName());
auto constant = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(1.0)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
auto tuple = builder.AddInstruction(
HloInstruction::CreateTuple({constant, constant, constant}));
@@ -291,9 +291,9 @@ TEST_F(TuplePointsToAnalysisTest, TupleCopy) {
// the same.
auto builder = HloComputation::Builder(TestName());
auto constant1 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(1.0)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
auto constant2 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(2.0)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0)));
auto tuple = builder.AddInstruction(
HloInstruction::CreateTuple({constant1, constant2}));
auto copy = builder.AddInstruction(
@@ -317,9 +317,10 @@ TEST_F(TuplePointsToAnalysisTest, SendAndSendDone) {
// Send forwards its operand to the output tuple at {0}.
auto builder = HloComputation::Builder(TestName());
auto constant = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(1.0)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
+ auto token = builder.AddInstruction(HloInstruction::CreateToken());
auto send = builder.AddInstruction(
- HloInstruction::CreateSend(constant, /*channel_id=*/0));
+ HloInstruction::CreateSend(constant, token, /*channel_id=*/0));
auto send_done = builder.AddInstruction(HloInstruction::CreateSendDone(send));
BuildModuleAndRunAnalysis(builder.Build());
@@ -342,8 +343,9 @@ TEST_F(TuplePointsToAnalysisTest, SendAndSendDone) {
TEST_F(TuplePointsToAnalysisTest, RecvAndRecvDone) {
// RecvDone forwards its operand tuple element at {0} to the output.
auto builder = HloComputation::Builder(TestName());
+ auto token = builder.AddInstruction(HloInstruction::CreateToken());
auto recv = builder.AddInstruction(HloInstruction::CreateRecv(
- ShapeUtil::MakeShape(F32, {1, 2, 3}), /*channel_id=*/0));
+ ShapeUtil::MakeShape(F32, {1, 2, 3}), token, /*channel_id=*/0));
auto recv_done = builder.AddInstruction(HloInstruction::CreateRecvDone(recv));
BuildModuleAndRunAnalysis(builder.Build());
@@ -355,7 +357,7 @@ TEST_F(TuplePointsToAnalysisTest, RecvAndRecvDone) {
ExpectHasTopLevelBuffers(
points_to_analysis_->GetPointsToSet(recv).element({}), {recv});
- ExpectHasBufferAliases(recv, {0}, {{recv, {0}}, {recv_done, {}}});
+ ExpectHasBufferAliases(recv, {0}, {{recv, {0}}, {recv_done, {0}}});
}
TEST_F(TuplePointsToAnalysisTest, TupleSelect) {
@@ -363,18 +365,18 @@ TEST_F(TuplePointsToAnalysisTest, TupleSelect) {
// set containing the union of both sides.
auto builder = HloComputation::Builder(TestName());
auto constant1 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(1.0)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
auto constant2 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(2.0)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0)));
auto tuple1 = builder.AddInstruction(
HloInstruction::CreateTuple({constant1, constant2}));
auto tuple2 = builder.AddInstruction(
HloInstruction::CreateTuple({constant2, constant2}));
auto pred = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<bool>(false)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(false)));
auto select = builder.AddInstruction(HloInstruction::CreateTernary(
- tuple1->shape(), HloOpcode::kSelect, pred, tuple1, tuple2));
+ tuple1->shape(), HloOpcode::kTupleSelect, pred, tuple1, tuple2));
BuildModuleAndRunAnalysis(builder.Build());
@@ -401,9 +403,9 @@ TEST_F(TuplePointsToAnalysisTest, SelectTupleParameters) {
auto param1 = builder.AddInstruction(
HloInstruction::CreateParameter(1, tuple_shape, "param1"));
auto pred = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<bool>(false)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(false)));
auto select = builder.AddInstruction(HloInstruction::CreateTernary(
- tuple_shape, HloOpcode::kSelect, pred, param0, param1));
+ tuple_shape, HloOpcode::kTupleSelect, pred, param0, param1));
auto copy = builder.AddInstruction(
HloInstruction::CreateUnary(tuple_shape, HloOpcode::kCopy, select));
@@ -441,18 +443,18 @@ TEST_F(TuplePointsToAnalysisTest, UnambiguousTupleSelect) {
// Select from two identical tuples. The result should not be ambiguous.
auto builder = HloComputation::Builder(TestName());
auto constant1 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(1.0)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
auto constant2 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(2.0)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0)));
auto tuple1 = builder.AddInstruction(
HloInstruction::CreateTuple({constant1, constant2}));
auto tuple2 = builder.AddInstruction(
HloInstruction::CreateTuple({constant1, constant2}));
auto pred = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<bool>(false)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(false)));
auto select = builder.AddInstruction(HloInstruction::CreateTernary(
- tuple1->shape(), HloOpcode::kSelect, pred, tuple1, tuple2));
+ tuple1->shape(), HloOpcode::kTupleSelect, pred, tuple1, tuple2));
BuildModuleAndRunAnalysis(builder.Build());
@@ -472,9 +474,9 @@ TEST_F(TuplePointsToAnalysisTest, NestedTupleSelect) {
// the right values.
auto builder = HloComputation::Builder(TestName());
auto constant1 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(1.0)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
auto constant2 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(2.0)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0)));
auto inner_tuple1 = builder.AddInstruction(
HloInstruction::CreateTuple({constant1, constant2}));
auto inner_tuple2 = builder.AddInstruction(
@@ -486,9 +488,9 @@ TEST_F(TuplePointsToAnalysisTest, NestedTupleSelect) {
builder.AddInstruction(HloInstruction::CreateTuple({inner_tuple2}));
auto pred = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<bool>(false)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(false)));
auto select = builder.AddInstruction(HloInstruction::CreateTernary(
- tuple1->shape(), HloOpcode::kSelect, pred, tuple1, tuple2));
+ tuple1->shape(), HloOpcode::kTupleSelect, pred, tuple1, tuple2));
BuildModuleAndRunAnalysis(builder.Build());
@@ -519,9 +521,9 @@ TEST_F(TuplePointsToAnalysisTest, TupleWithBitcast) {
// have the operand of the bitcast in its points-to set.
auto builder = HloComputation::Builder(TestName());
auto constant1 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(1.0)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
auto constant2 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(2.0)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0)));
auto bitcast = builder.AddInstruction(HloInstruction::CreateUnary(
constant2->shape(), HloOpcode::kBitcast, constant2));
auto tuple =
@@ -555,9 +557,10 @@ TEST_F(TuplePointsToAnalysisTest, PointsToTupleConstantElements) {
// Construct a tuple constant and kCopy it. Verify the points-to set of the
// copy correctly correctly points into the nested elements of the constant.
auto builder = HloComputation::Builder(TestName());
- auto tuple_constant = builder.AddInstruction(HloInstruction::CreateConstant(
- Literal::MakeTuple({Literal::CreateR2<float>({{1.0}, {2.0}}).get(),
- Literal::CreateR1<float>({2.0, 42}).get()})));
+ auto tuple_constant = builder.AddInstruction(
+ HloInstruction::CreateConstant(LiteralUtil::MakeTuple(
+ {LiteralUtil::CreateR2<float>({{1.0}, {2.0}}).get(),
+ LiteralUtil::CreateR1<float>({2.0, 42}).get()})));
auto copy = builder.AddInstruction(HloInstruction::CreateUnary(
tuple_constant->shape(), HloOpcode::kCopy, tuple_constant));
@@ -577,9 +580,9 @@ TEST_F(TuplePointsToAnalysisTest, BufferAliases) {
// times. Verify buffer alias sets.
auto builder = HloComputation::Builder(TestName());
auto constant1 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(1.0)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
auto constant2 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(2.0)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0)));
auto inner_tuple = builder.AddInstruction(
HloInstruction::CreateTuple({constant1, constant2}));
auto tuple = builder.AddInstruction(
@@ -618,7 +621,7 @@ class FusionPointsToAnalysisTest : public TuplePointsToAnalysisTest {
auto tuple_element1 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(update_shape, tuple_param0, 1));
auto ones = builder.AddInstruction(HloInstruction::CreateConstant(
- Literal::CreateR1<float>({1.f, 1.f, 1.f, 1.f})));
+ LiteralUtil::CreateR1<float>({1.f, 1.f, 1.f, 1.f})));
// Create 'update' = Add(GetTupleElement(tuple_param0, 1), ones)
auto update = builder.AddInstruction(HloInstruction::CreateBinary(
update_shape, HloOpcode::kAdd, tuple_element1, ones));
@@ -866,9 +869,9 @@ TEST_F(DoesNotUseOperandBufferTest, FusedDynamicUpdateSlice) {
// Create a DynamicUpdateSlice instruction of tuple element 1.
auto starts = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR1<int32>({2})));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR1<int32>({2})));
auto update = builder.AddInstruction(HloInstruction::CreateConstant(
- Literal::CreateR1<float>({2.f, 2.f, 2.f})));
+ LiteralUtil::CreateR1<float>({2.f, 2.f, 2.f})));
auto dynamic_update_slice =
builder.AddInstruction(HloInstruction::CreateDynamicUpdateSlice(
data_shape, gte1, update, starts));
@@ -960,9 +963,9 @@ TEST_F(CanShareOperandBufferWithUserTest, FusedDynamicUpdateSlice) {
// Create a DynamicUpdateSlice instruction of tuple element 1.
auto starts = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR1<int32>({2})));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR1<int32>({2})));
auto update = builder.AddInstruction(HloInstruction::CreateConstant(
- Literal::CreateR1<float>({2.f, 2.f, 2.f})));
+ LiteralUtil::CreateR1<float>({2.f, 2.f, 2.f})));
auto dynamic_update_slice =
builder.AddInstruction(HloInstruction::CreateDynamicUpdateSlice(
data_shape, gte1, update, starts));
@@ -1014,9 +1017,9 @@ TEST_F(CanShareOperandBufferWithUserTest, FusedDotAdd) {
Shape data_shape = ShapeUtil::MakeShape(F32, {2, 2});
auto a = builder.AddInstruction(HloInstruction::CreateConstant(
- Literal::CreateR2<float>({{1.0, 0.0}, {0.0, 1.0}})));
+ LiteralUtil::CreateR2<float>({{1.0, 0.0}, {0.0, 1.0}})));
auto b = builder.AddInstruction(HloInstruction::CreateConstant(
- Literal::CreateR2<float>({{2.0, 2.0}, {2.0, 2.0}})));
+ LiteralUtil::CreateR2<float>({{2.0, 2.0}, {2.0, 2.0}})));
DotDimensionNumbers dot_dnums;
dot_dnums.add_lhs_contracting_dimensions(1);
@@ -1025,7 +1028,7 @@ TEST_F(CanShareOperandBufferWithUserTest, FusedDotAdd) {
HloInstruction::CreateDot(data_shape, a, b, dot_dnums));
auto one = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(1.0)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
auto add_operand = builder.AddInstruction(
HloInstruction::CreateBroadcast(data_shape, one, {1}));
@@ -1047,7 +1050,7 @@ TEST_F(CanShareOperandBufferWithUserTest, OutputFusionCantAliasOperandBuffer) {
Shape data_shape = ShapeUtil::MakeShape(F32, {2, 2});
auto one = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(1.0)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
auto operand = builder.AddInstruction(
HloInstruction::CreateBroadcast(data_shape, one, {1}));
@@ -1055,7 +1058,7 @@ TEST_F(CanShareOperandBufferWithUserTest, OutputFusionCantAliasOperandBuffer) {
HloInstruction::CreateReverse(data_shape, operand, {0, 1}));
auto two = builder.AddInstruction(HloInstruction::CreateConstant(
- Literal::CreateR2<float>({{2.0, 2.0}, {2.0, 2.0}})));
+ LiteralUtil::CreateR2<float>({{2.0, 2.0}, {2.0, 2.0}})));
auto add = builder.AddInstruction(
HloInstruction::CreateBinary(data_shape, HloOpcode::kAdd, reverse, two));
@@ -1120,7 +1123,7 @@ TEST_F(CanShareOperandBufferWithUserTest, CallToComputationWithFusionRoot) {
auto sub_param = sub_builder.AddInstruction(
HloInstruction::CreateParameter(0, shape, "sub_param"));
auto one = sub_builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(1.0)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
auto ones = sub_builder.AddInstruction(
HloInstruction::CreateBroadcast(shape, one, {1}));
auto add = sub_builder.AddInstruction(
diff --git a/tensorflow/compiler/xla/service/tuple_simplifier_test.cc b/tensorflow/compiler/xla/service/tuple_simplifier_test.cc
index d3635eae81..39b693872d 100644
--- a/tensorflow/compiler/xla/service/tuple_simplifier_test.cc
+++ b/tensorflow/compiler/xla/service/tuple_simplifier_test.cc
@@ -18,7 +18,7 @@ limitations under the License.
#include <memory>
#include <utility>
-#include "tensorflow/compiler/xla/literal_util.h"
+#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/compiler/xla/service/hlo_computation.h"
#include "tensorflow/compiler/xla/service/hlo_instruction.h"
#include "tensorflow/compiler/xla/service/hlo_matchers.h"
diff --git a/tensorflow/compiler/xla/service/while_loop_invariant_code_motion_test.cc b/tensorflow/compiler/xla/service/while_loop_invariant_code_motion_test.cc
index 8831c513ee..32e69c335b 100644
--- a/tensorflow/compiler/xla/service/while_loop_invariant_code_motion_test.cc
+++ b/tensorflow/compiler/xla/service/while_loop_invariant_code_motion_test.cc
@@ -53,7 +53,7 @@ HloComputation* WhileLoopInvariantCodeMotionTest::MakeAlwaysTrueComputation(
builder.AddInstruction(
HloInstruction::CreateParameter(0, param_shape, "param"));
builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<bool>(true)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(true)));
return module->AddEmbeddedComputation(builder.Build());
}
@@ -125,7 +125,7 @@ TEST_F(WhileLoopInvariantCodeMotionTest, HoistInvariantOperationTree) {
builder.AddInstruction(HloInstruction::CreateUnary(
scalar_s32, HloOpcode::kNegate, mul_result));
HloInstruction* constant = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<int32>(4)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32>(4)));
HloInstruction* sub_result =
builder.AddInstruction(HloInstruction::CreateBinary(
scalar_s32, HloOpcode::kSubtract, negate_result, constant));
@@ -248,7 +248,9 @@ TEST_F(WhileLoopInvariantCodeMotionTest,
TEST_F(WhileLoopInvariantCodeMotionTest, DontHoistInstructionWithSideEffects) {
auto scalar_s32 = ShapeUtil::MakeShape(S32, {});
- Shape while_shape = ShapeUtil::MakeTupleShape({scalar_s32, scalar_s32});
+ auto token_shape = ShapeUtil::MakeTokenShape();
+ Shape while_shape =
+ ShapeUtil::MakeTupleShape({scalar_s32, scalar_s32, token_shape});
HloComputation* while_body = [&]() {
HloComputation::Builder builder(TestName() + ".while_body");
@@ -258,25 +260,32 @@ TEST_F(WhileLoopInvariantCodeMotionTest, DontHoistInstructionWithSideEffects) {
HloInstruction::CreateGetTupleElement(scalar_s32, param, 0));
HloInstruction* gte_1 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_s32, param, 1));
+ HloInstruction* in_token = builder.AddInstruction(
+ HloInstruction::CreateGetTupleElement(token_shape, param, 2));
+ HloInstruction* out_token = builder.AddInstruction(
+ HloInstruction::CreateOutfeed(scalar_s32, gte_0, in_token, ""));
builder.AddInstruction(
- HloInstruction::CreateOutfeed(scalar_s32, gte_0, ""));
- builder.AddInstruction(HloInstruction::CreateTuple({gte_0, gte_1}));
+ HloInstruction::CreateTuple({gte_0, gte_1, out_token}));
return module().AddEmbeddedComputation(builder.Build());
}();
HloComputation::Builder builder(TestName());
+ auto* scalar_param = builder.AddInstruction(
+ HloInstruction::CreateParameter(0, scalar_s32, "param"));
+ auto* token = builder.AddInstruction(HloInstruction::CreateToken());
auto* init_value = builder.AddInstruction(
- HloInstruction::CreateParameter(0, while_shape, "init_value"));
+ HloInstruction::CreateTuple({scalar_param, scalar_param, token}));
auto* while_inst = builder.AddInstruction(HloInstruction::CreateWhile(
while_shape, MakeAlwaysTrueComputation(while_shape, &module()),
while_body, init_value));
-
+ builder.AddInstruction(
+ HloInstruction::CreateGetTupleElement(scalar_s32, while_inst, 0));
module().AddEntryComputation(builder.Build());
TF_ASSERT_OK_AND_ASSIGN(bool simplified_loop,
WhileLoopInvariantCodeMotion{}.Run(&module()));
- EXPECT_FALSE(simplified_loop);
+ ASSERT_FALSE(simplified_loop);
EXPECT_THAT(while_inst->while_body()->instructions(),
Contains(op::Outfeed()));
@@ -287,7 +296,9 @@ TEST_F(WhileLoopInvariantCodeMotionTest, DontHoistBitcastAlone) {
// bitcast either.
auto scalar_s32 = ShapeUtil::MakeShape(S32, {});
auto scalar_f32 = ShapeUtil::MakeShape(F32, {});
- Shape while_shape = ShapeUtil::MakeTupleShape({scalar_s32, scalar_s32});
+ auto token_shape = ShapeUtil::MakeTokenShape();
+ Shape while_shape =
+ ShapeUtil::MakeTupleShape({scalar_s32, scalar_s32, token_shape});
HloComputation* while_body = [&]() {
HloComputation::Builder builder(TestName() + ".while_body");
@@ -297,21 +308,29 @@ TEST_F(WhileLoopInvariantCodeMotionTest, DontHoistBitcastAlone) {
HloInstruction::CreateGetTupleElement(scalar_s32, param, 0));
HloInstruction* gte_1 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_s32, param, 1));
+ HloInstruction* in_token = builder.AddInstruction(
+ HloInstruction::CreateGetTupleElement(token_shape, param, 2));
HloInstruction* bitcast_inst = builder.AddInstruction(
HloInstruction::CreateUnary(scalar_f32, HloOpcode::kBitcast, gte_0));
+ HloInstruction* out_token = builder.AddInstruction(
+ HloInstruction::CreateOutfeed(scalar_f32, bitcast_inst, in_token, ""));
builder.AddInstruction(
- HloInstruction::CreateOutfeed(scalar_f32, bitcast_inst, ""));
- builder.AddInstruction(HloInstruction::CreateTuple({gte_0, gte_1}));
+ HloInstruction::CreateTuple({gte_0, gte_1, out_token}));
return module().AddEmbeddedComputation(builder.Build());
}();
HloComputation::Builder builder(TestName());
+ auto* scalar_param = builder.AddInstruction(
+ HloInstruction::CreateParameter(0, scalar_s32, "param"));
+ auto* token = builder.AddInstruction(HloInstruction::CreateToken());
auto* init_value = builder.AddInstruction(
- HloInstruction::CreateParameter(0, while_shape, "init_value"));
+ HloInstruction::CreateTuple({scalar_param, scalar_param, token}));
auto* while_inst = builder.AddInstruction(HloInstruction::CreateWhile(
while_shape, MakeAlwaysTrueComputation(while_shape, &module()),
while_body, init_value));
+ builder.AddInstruction(
+ HloInstruction::CreateGetTupleElement(scalar_s32, while_inst, 0));
module().AddEntryComputation(builder.Build());
diff --git a/tensorflow/compiler/xla/service/while_loop_simplifier_test.cc b/tensorflow/compiler/xla/service/while_loop_simplifier_test.cc
index 619e87caa5..2e1571943e 100644
--- a/tensorflow/compiler/xla/service/while_loop_simplifier_test.cc
+++ b/tensorflow/compiler/xla/service/while_loop_simplifier_test.cc
@@ -157,7 +157,7 @@ TEST_F(WhileLoopSimplifierTest,
auto* while_op = computation->root_instruction();
ASSERT_EQ(while_op->opcode(), HloOpcode::kWhile);
auto* true_op = while_op->while_body()->AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<bool>(true)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(true)));
TF_ASSERT_OK(true_op->AddControlDependencyTo(
while_op->while_body()->root_instruction()));
ASSERT_TRUE(WhileLoopSimplifier().Run(the_module).ValueOrDie());
@@ -175,9 +175,11 @@ TEST_F(WhileLoopSimplifierTest, LoopWithSendNotSimplified) {
auto* while_op = computation->root_instruction();
ASSERT_EQ(while_op->opcode(), HloOpcode::kWhile);
auto* while_body = while_op->while_body();
+ auto* token = while_body->AddInstruction(HloInstruction::CreateToken());
auto* send = while_body->AddInstruction(HloInstruction::CreateSend(
while_body->AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<bool>(true))),
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(true))),
+ token,
/*channel_id=*/0));
while_body->AddInstruction(HloInstruction::CreateSendDone(send));
EXPECT_FALSE(WhileLoopSimplifier().Run(the_module).ValueOrDie());
@@ -190,8 +192,9 @@ TEST_F(WhileLoopSimplifierTest, LoopWithRecvNotSimplified) {
auto* while_op = computation->root_instruction();
ASSERT_EQ(while_op->opcode(), HloOpcode::kWhile);
auto* while_body = while_op->while_body();
+ auto* token = while_body->AddInstruction(HloInstruction::CreateToken());
auto* recv = while_body->AddInstruction(
- HloInstruction::CreateRecv(ShapeUtil::MakeShape(F32, {1}),
+ HloInstruction::CreateRecv(ShapeUtil::MakeShape(F32, {1}), token,
/*channel_id=*/0));
while_body->AddInstruction(HloInstruction::CreateRecvDone(recv));
EXPECT_FALSE(WhileLoopSimplifier().Run(the_module).ValueOrDie());
@@ -208,8 +211,9 @@ TEST_F(WhileLoopSimplifierTest, LoopWithInfeedNotSimplified) {
auto* while_op = computation->root_instruction();
ASSERT_EQ(while_op->opcode(), HloOpcode::kWhile);
auto* while_body = while_op->while_body();
- while_body->AddInstruction(
- HloInstruction::CreateInfeed(ShapeUtil::MakeShape(F32, {1}), "config"));
+ auto token = while_body->AddInstruction(HloInstruction::CreateToken());
+ while_body->AddInstruction(HloInstruction::CreateInfeed(
+ ShapeUtil::MakeShape(F32, {1}), token, "config"));
EXPECT_FALSE(WhileLoopSimplifier().Run(the_module).ValueOrDie());
}
diff --git a/tensorflow/compiler/xla/service/while_util.cc b/tensorflow/compiler/xla/service/while_util.cc
index 473eab2ea8..1ef17b9d7d 100644
--- a/tensorflow/compiler/xla/service/while_util.cc
+++ b/tensorflow/compiler/xla/service/while_util.cc
@@ -14,6 +14,7 @@ limitations under the License.
==============================================================================*/
#include "tensorflow/compiler/xla/service/while_util.h"
+#include "tensorflow/compiler/xla/literal_util.h"
#include "tensorflow/compiler/xla/service/hlo_computation.h"
#include "tensorflow/compiler/xla/service/hlo_creation_utils.h"
#include "tensorflow/compiler/xla/service/tuple_util.h"
@@ -38,7 +39,7 @@ static StatusOr<HloComputation*> WidenWhileCondition(
// the root instruction later. We later change the root instruction to
// something more appropriate.
builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<bool>(false)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(false)));
return narrow_condition->parent()->AddEmbeddedComputation(builder.Build());
}();
@@ -154,7 +155,7 @@ MakeCountedLoopConditionComputation(const Shape& loop_state_shape,
{&loop_state_shape}, scalar_pred, "while_cond"));
HloInstruction* trip_count_constant = cond_computation->AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<int32>(trip_count)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32>(trip_count)));
HloInstruction* param = cond_computation->parameter_instruction(0);
TF_ASSIGN_OR_RETURN(HloInstruction * indvar,
@@ -175,7 +176,7 @@ static StatusOr<std::unique_ptr<HloComputation>> MakeCountedLoopBodyComputation(
CreateComputationWithSignature(
{&loop_state_shape}, loop_state_shape, "while_body"));
HloInstruction* one = body_computation->AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<int32>(1)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32>(1)));
HloInstruction* param = body_computation->parameter_instruction(0);
TF_ASSIGN_OR_RETURN(HloInstruction * indvar,
MakeGetTupleElementHlo(param, 0));
@@ -203,7 +204,7 @@ static StatusOr<HloInstruction*> MakeInitTupleFromInitValues(
std::vector<HloInstruction*> init_values_with_indvar;
init_values_with_indvar.reserve(init_values.size() + 1);
HloInstruction* zero = computation->AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<int32>(0)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32>(0)));
init_values_with_indvar.push_back(zero);
c_copy(init_values, std::back_inserter(init_values_with_indvar));
return computation->AddInstruction(
diff --git a/tensorflow/compiler/xla/service/while_util_test.cc b/tensorflow/compiler/xla/service/while_util_test.cc
index d79d329721..2ccb919acf 100644
--- a/tensorflow/compiler/xla/service/while_util_test.cc
+++ b/tensorflow/compiler/xla/service/while_util_test.cc
@@ -179,7 +179,9 @@ body {
cond {
param.c = (s32[], s32[]) parameter(0)
- ROOT condition = pred[] infeed()
+ token = token[] after-all()
+ infeed = (pred[], token[]) infeed(token)
+ ROOT condition = pred[] get-tuple-element(infeed), index=0
}
ENTRY main {
diff --git a/tensorflow/compiler/xla/service/zero_sized_hlo_elimination.cc b/tensorflow/compiler/xla/service/zero_sized_hlo_elimination.cc
index 44b0ec5cd4..83d696fe09 100644
--- a/tensorflow/compiler/xla/service/zero_sized_hlo_elimination.cc
+++ b/tensorflow/compiler/xla/service/zero_sized_hlo_elimination.cc
@@ -15,7 +15,7 @@ limitations under the License.
#include "tensorflow/compiler/xla/service/zero_sized_hlo_elimination.h"
-#include "tensorflow/compiler/xla/literal_util.h"
+#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/compiler/xla/service/hlo_computation.h"
#include "tensorflow/compiler/xla/service/hlo_instruction.h"
#include "tensorflow/compiler/xla/shape_util.h"
@@ -32,7 +32,8 @@ StatusOr<bool> ZeroSizedHloElimination::Run(HloModule* module) {
for (HloComputation* comp : module->MakeNonfusionComputations()) {
for (HloInstruction* instruction : comp->MakeInstructionPostOrder()) {
if (instruction->HasSideEffect() ||
- !ShapeUtil::IsArray(instruction->shape())) {
+ !ShapeUtil::IsArray(instruction->shape()) ||
+ instruction->opcode() == HloOpcode::kConstant) {
continue;
}
if (comp->IsRemovable(instruction) &&
diff --git a/tensorflow/compiler/xla/service/zero_sized_hlo_elimination_test.cc b/tensorflow/compiler/xla/service/zero_sized_hlo_elimination_test.cc
index f5331280ee..b9ef18892d 100644
--- a/tensorflow/compiler/xla/service/zero_sized_hlo_elimination_test.cc
+++ b/tensorflow/compiler/xla/service/zero_sized_hlo_elimination_test.cc
@@ -19,7 +19,7 @@ limitations under the License.
#include <unordered_set>
#include <vector>
-#include "tensorflow/compiler/xla/literal_util.h"
+#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/compiler/xla/service/hlo_computation.h"
#include "tensorflow/compiler/xla/service/hlo_instruction.h"
#include "tensorflow/compiler/xla/service/hlo_module.h"
@@ -67,7 +67,16 @@ TEST_F(ZeroSizedHloEliminationTest, DoesNotEliminateParameter) {
}
TEST_F(ZeroSizedHloEliminationTest, DoesNotEliminateSideEffects) {
- builder_.AddInstruction(HloInstruction::CreateSend(zero_sized_param_, 0));
+ auto token = builder_.AddInstruction(HloInstruction::CreateToken());
+ builder_.AddInstruction(
+ HloInstruction::CreateSend(zero_sized_param_, token, 0));
+ TF_ASSERT_OK_AND_ASSIGN(bool changed, RunZeroSizedElimination());
+ EXPECT_FALSE(changed);
+}
+
+TEST_F(ZeroSizedHloEliminationTest, DoesNotEliminateConstant) {
+ builder_.AddInstruction(
+ HloInstruction::CreateConstant(LiteralUtil::CreateR1({})));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunZeroSizedElimination());
EXPECT_FALSE(changed);
}
diff --git a/tensorflow/compiler/xla/shape_layout.cc b/tensorflow/compiler/xla/shape_layout.cc
index 7ee366b27a..caad31d6ce 100644
--- a/tensorflow/compiler/xla/shape_layout.cc
+++ b/tensorflow/compiler/xla/shape_layout.cc
@@ -67,6 +67,14 @@ void ShapeLayout::ResetLayout(const Layout& layout) {
TF_CHECK_OK(ShapeUtil::ValidateShape(shape_));
}
+void ShapeLayout::ResetLayout(const Layout& layout,
+ ShapeIndexView shape_index) {
+ CHECK(ShapeUtil::IsTuple(shape_));
+ *ShapeUtil::GetMutableSubshape(&shape_, shape_index)->mutable_layout() =
+ layout;
+ TF_CHECK_OK(ShapeUtil::ValidateShape(shape_));
+}
+
bool ShapeLayout::operator==(const ShapeLayout& other) const {
return ShapeUtil::Equal(shape_, other.shape_);
}
diff --git a/tensorflow/compiler/xla/shape_layout.h b/tensorflow/compiler/xla/shape_layout.h
index 36806da599..214cf98854 100644
--- a/tensorflow/compiler/xla/shape_layout.h
+++ b/tensorflow/compiler/xla/shape_layout.h
@@ -72,6 +72,10 @@ class ShapeLayout {
// tuple.
void ResetLayout(const Layout& layout);
+ // Resets the layout on the shape at the provided ShapeIndex to the provided
+ // layout. Shape must be a tuple.
+ void ResetLayout(const Layout& layout, ShapeIndexView shape_index);
+
// Returns a string representation of this object.
string ToString() const { return ShapeUtil::HumanStringWithLayout(shape_); }
diff --git a/tensorflow/compiler/xla/shape_util.cc b/tensorflow/compiler/xla/shape_util.cc
index 98c3095499..f4668c0f55 100644
--- a/tensorflow/compiler/xla/shape_util.cc
+++ b/tensorflow/compiler/xla/shape_util.cc
@@ -24,6 +24,7 @@ limitations under the License.
#include "tensorflow/compiler/xla/index_util.h"
#include "tensorflow/compiler/xla/layout_util.h"
+#include "tensorflow/compiler/xla/overflow_util.h"
#include "tensorflow/compiler/xla/primitive_util.h"
#include "tensorflow/compiler/xla/status_macros.h"
#include "tensorflow/compiler/xla/types.h"
@@ -45,28 +46,14 @@ namespace xla {
using ::tensorflow::strings::StrAppend;
using ::tensorflow::strings::StrCat;
-string ShapeIndex::ToString() const {
- return StrCat("{", tensorflow::str_util::Join(indices_, ","), "}");
-}
+string ShapeIndex::ToString() const { return ShapeIndexView(*this).ToString(); }
string ShapeIndexView::ToString() const {
- return StrCat("{",
- tensorflow::str_util::Join(
- tensorflow::gtl::make_range(begin_, end_), ","),
- "}");
+ return StrCat("{", tensorflow::str_util::Join(indices_, ","), "}");
}
bool ShapeIndexView::operator==(const ShapeIndexView& other) const {
- if (size() != other.size()) {
- return false;
- }
- for (auto it = begin(), other_it = other.begin(); it != end();
- ++it, ++other_it) {
- if (*it != *other_it) {
- return false;
- }
- }
- return true;
+ return indices_ == other.indices_;
}
bool ShapeIndexView::operator!=(const ShapeIndexView& other) const {
@@ -94,8 +81,11 @@ bool IsArrayPrimitiveType(PrimitiveType primitive_type) {
// Recursive helper for comparing the equality of two shapes. Returns true if
// the shapes are the same. If compare_layouts is true, then layouts must also
// match.
-bool CompareShapes(const Shape& lhs, const Shape& rhs, bool compare_layouts) {
- if (!ShapeUtil::SameElementType(lhs, rhs)) {
+bool CompareShapes(const Shape& lhs, const Shape& rhs, bool compare_layouts,
+ bool ignore_fp_precision) {
+ if ((ignore_fp_precision &&
+ !ShapeUtil::SameElementTypeIgnoringFpPrecision(lhs, rhs)) ||
+ (!ignore_fp_precision && !ShapeUtil::SameElementType(lhs, rhs))) {
VLOG(3) << "CompareShapes: lhs element type != rhs element type";
return false;
}
@@ -103,7 +93,8 @@ bool CompareShapes(const Shape& lhs, const Shape& rhs, bool compare_layouts) {
if (ShapeUtil::IsTuple(lhs)) {
return ContainersEqual(lhs.tuple_shapes(), rhs.tuple_shapes(),
[=](const Shape& l, const Shape& r) {
- return CompareShapes(l, r, compare_layouts);
+ return CompareShapes(l, r, compare_layouts,
+ ignore_fp_precision);
});
} else if (!ShapeUtil::IsArray(lhs)) {
// Non-tuple, non-array tupes such as opaque and token types are trivially
@@ -170,7 +161,8 @@ StatusOr<Shape> MakeShapeWithLayoutInternal(
} // namespace
/* static */ bool ShapeUtil::Equal(const Shape& lhs, const Shape& rhs) {
- bool equal = CompareShapes(lhs, rhs, /*compare_layouts=*/true);
+ bool equal = CompareShapes(lhs, rhs, /*compare_layouts=*/true,
+ /*ignore_fp_precision=*/false);
if (!equal && VLOG_IS_ON(3)) {
VLOG(3) << "ShapeUtil::Equal differ: lhs = " << lhs.ShortDebugString()
<< ", rhs = " << rhs.ShortDebugString();
@@ -179,6 +171,18 @@ StatusOr<Shape> MakeShapeWithLayoutInternal(
return equal;
}
+/* static */ bool ShapeUtil::EqualIgnoringFpPrecision(const Shape& lhs,
+ const Shape& rhs) {
+ bool equal = CompareShapes(lhs, rhs, /*compare_layouts=*/true,
+ /*ignore_fp_precision=*/true);
+ if (!equal && VLOG_IS_ON(3)) {
+ VLOG(3) << "ShapeUtil::EqualIgnoringFpPrecision differ: lhs = "
+ << lhs.ShortDebugString() << ", rhs = " << rhs.ShortDebugString();
+ }
+
+ return equal;
+}
+
/* static */ int64 ShapeUtil::Rank(const Shape& shape) {
CHECK(ShapeUtil::IsArray(shape))
<< "Non-arrays do not have a rank, shape: " << shape;
@@ -574,12 +578,11 @@ StatusOr<Shape> ParseShapeStringInternal(tensorflow::StringPiece* s) {
// tensorflow::StringPiece is not compatible with internal RE2 StringPiece, so
// we convert in to the RE2-consumable type and then consume the corresponding
// amount from our StringPiece type.
+ static LazyRE2 shape_pattern = {
+ "^(\\w*\\d*)\\[([\\d,]*)\\](?:\\s*(dense|sparse)?\\s*{([\\d,]+)})?"};
tensorflow::RegexpStringPiece s_consumable(s->data(), s->size());
- if (RE2::Consume(
- &s_consumable,
- "^(\\w*\\d*)\\[([\\d,]*)\\](?:\\s*(dense|sparse)?\\s*{([\\d,]+)})?",
- &element_type_string, &dimensions_string, &format_string,
- &layout_string)) {
+ if (RE2::Consume(&s_consumable, *shape_pattern, &element_type_string,
+ &dimensions_string, &format_string, &layout_string)) {
size_t consumed = s->size() - s_consumable.size();
s->remove_prefix(consumed);
auto string_to_int64 = [&s](const string& input) -> StatusOr<int64> {
@@ -665,7 +668,8 @@ StatusOr<Shape> ParseShapeStringInternal(tensorflow::StringPiece* s) {
}
/* static */ bool ShapeUtil::Compatible(const Shape& lhs, const Shape& rhs) {
- return CompareShapes(lhs, rhs, /*compare_layouts=*/false);
+ return CompareShapes(lhs, rhs, /*compare_layouts=*/false,
+ /*ignore_fp_precision=*/false);
}
/* static */ bool ShapeUtil::CompatibleIgnoringElementType(const Shape& lhs,
@@ -867,6 +871,60 @@ StatusOr<Shape> ParseShapeStringInternal(tensorflow::StringPiece* s) {
}
}
+ TF_RETURN_IF_ERROR(ValidateShapeSize(shape));
+ return Status::OK();
+}
+
+/* static */ Status ShapeUtil::ValidateShapeSize(const Shape& shape) {
+ VLOG(3) << "Validating shape size: " << ShapeUtil::HumanString(shape);
+
+ if (!IsArray(shape)) {
+ return Status::OK();
+ }
+
+ int64 shape_size = [&shape]() {
+ int64 shape_size;
+ if (LayoutUtil::IsSparseArray(shape)) {
+ shape_size = LayoutUtil::MaxSparseElements(shape.layout());
+ if (shape_size < 0) {
+ return shape_size;
+ }
+ shape_size = MultiplyWithoutOverflow(shape_size, ShapeUtil::Rank(shape));
+ if (shape_size < 0) {
+ return shape_size;
+ }
+ shape_size = MultiplyWithoutOverflow(shape_size, sizeof(int64));
+ if (shape_size < 0) {
+ return shape_size;
+ }
+ }
+
+ shape_size = 1;
+
+ // This is intentionally unconditional: even if the shape is sparse, we want
+ // to verify the densified version has a reasonable size.
+ if (shape.dimensions().empty()) {
+ return shape_size;
+ }
+
+ for (int64 dim : shape.dimensions()) {
+ shape_size = MultiplyWithoutOverflow(shape_size, dim);
+ if (shape_size < 0) {
+ return shape_size;
+ }
+ }
+ shape_size = MultiplyWithoutOverflow(
+ shape_size, ByteSizeOfPrimitiveType(shape.element_type()));
+
+ return shape_size;
+ }();
+
+ if (shape_size < 0) {
+ return InvalidArgument("Shape %s size may overflow int64.",
+ ShapeUtil::HumanString(shape).c_str());
+ }
+
+ VLOG(3) << "Shape size is valid: " << shape_size;
return Status::OK();
}
@@ -1054,12 +1112,41 @@ Status ForEachMutableSubshapeHelper(
for (auto dim : Permute(permutation, shape.dimensions())) {
new_shape.add_dimensions(dim);
}
+
+ // If `shape` has a layout, by contract we choose a new layout such that the
+ // transpose defined by this permutation is a bitcast.
+ //
+ // Some formalism helps to understand the correct way to do this. We're going
+ // to do algebra in the group of permutations of the dimensions of `shape`.
+ //
+ // Since the order of `shape`'s dimensions is not permuted relative to itself,
+ // `shape`'s list of dimensions is isomorphic to the identity I.
+ //
+ // Let `shape`'s layout be L. A layout is a permutation which maps a
+ // minor-to-major physical layout to the order of a shape's logical dims.
+ // Therefore inverse of a layout maps from logical to physical dims, and so
+ // the physical layout of I is simply L'.I = L', where L' is the inverse of L.
+ //
+ // Let the argument `permutation` be P. This is a permutation over `shape`'s
+ // dimensions, so our return value will be a shape with dims P.I = P. Our
+ // goal is to construct a layout permutation L* that we can apply to P such
+ // that that the physical dimension ordering of the returned shape is the same
+ // as that of the original shape, namely L'.
+ //
+ // Our returned shape has dims P and layout L*, so its in-memory layout is
+ // L*'.P. Setting this equal to L' and solving for L*, we get:
+ //
+ // L*'.P = L' =>
+ // L*' = L'P' =>
+ // L* = P.L
+ //
if (shape.has_layout()) {
CHECK(LayoutUtil::IsDenseArray(shape));
Layout* new_layout = new_shape.mutable_layout();
new_layout->set_format(DENSE);
new_layout->clear_minor_to_major();
- for (auto index : Permute(permutation, shape.layout().minor_to_major())) {
+ for (auto index : ComposePermutations(
+ permutation, AsInt64Slice(shape.layout().minor_to_major()))) {
new_layout->add_minor_to_major(index);
}
if (shape.layout().padded_dimensions_size() > 0) {
@@ -1069,6 +1156,13 @@ Status ForEachMutableSubshapeHelper(
new_layout->add_padded_dimensions(dim);
}
}
+ // The permutation accepted by TransposeIsBitcast is the inverse of the
+ // permutation here.
+ CHECK(TransposeIsBitcast(shape, new_shape, InversePermutation(permutation)))
+ << "shape=" << HumanStringWithLayout(shape)
+ << ", new_shape=" << HumanStringWithLayout(new_shape)
+ << ", permutation={" << tensorflow::str_util::Join(permutation, ",")
+ << "}";
}
return new_shape;
}
diff --git a/tensorflow/compiler/xla/shape_util.h b/tensorflow/compiler/xla/shape_util.h
index 02e4f41505..17c1d7b10a 100644
--- a/tensorflow/compiler/xla/shape_util.h
+++ b/tensorflow/compiler/xla/shape_util.h
@@ -110,31 +110,33 @@ class ShapeIndex {
class ShapeIndexView {
public:
ShapeIndexView(const ShapeIndex& shape_index, int64 offset = 0)
- : ShapeIndexView(shape_index.data() + offset,
- shape_index.data() + shape_index.size()) {
+ : indices_(shape_index.data() + offset, shape_index.size() - offset) {
CHECK_LE(offset, shape_index.size());
}
- ShapeIndexView(std::initializer_list<int64> indices)
- : ShapeIndexView(indices.begin(), indices.end()) {}
+ ShapeIndexView(std::initializer_list<int64> indices) : indices_(indices) {}
ShapeIndexView(const ShapeIndexView& other) = default;
using iterator = const int64*;
- iterator begin() const { return begin_; }
- iterator end() const { return end_; }
- int64 size() const { return std::distance(begin_, end_); }
- bool empty() const { return begin_ == end_; }
+ iterator begin() const { return indices_.begin(); }
+ iterator end() const { return indices_.end(); }
+ int64 size() const { return indices_.size(); }
+ bool empty() const { return indices_.empty(); }
int64 front() const {
CHECK(!empty());
- return *begin_;
+ return indices_.front();
}
ShapeIndexView ConsumeFront() const {
- CHECK(!empty());
- auto new_begin = begin_;
- ++new_begin;
- return ShapeIndexView(new_begin, end_);
+ ShapeIndexView result = *this;
+ result.indices_.pop_front();
+ return result;
}
- ShapeIndex ToShapeIndex() const { return ShapeIndex(begin_, end_); }
+ ShapeIndexView ConsumeBack() const {
+ ShapeIndexView result = *this;
+ result.indices_.pop_back();
+ return result;
+ }
+ ShapeIndex ToShapeIndex() const { return ShapeIndex(begin(), end()); }
bool operator==(const ShapeIndexView& other) const;
bool operator!=(const ShapeIndexView& other) const;
@@ -142,10 +144,7 @@ class ShapeIndexView {
string ToString() const;
private:
- ShapeIndexView(iterator begin, iterator end) : begin_(begin), end_(end) {}
-
- iterator begin_;
- iterator end_;
+ tensorflow::gtl::ArraySlice<int64> indices_;
};
std::ostream& operator<<(std::ostream& out, const ShapeIndex& shape_index);
@@ -280,6 +279,9 @@ class ShapeUtil {
// Returns whether the lhs and rhs shapes are identical protobufs.
static bool Equal(const Shape& lhs, const Shape& rhs);
+ // As Equal, but allow one of lhs and rhs to be F16 while the other is F32.
+ static bool EqualIgnoringFpPrecision(const Shape& lhs, const Shape& rhs);
+
// Returns the rank (number of dimensions) of the given shape.
// Precondition: !IsTuple(shape)
static int64 Rank(const Shape& shape);
@@ -527,7 +529,13 @@ class ShapeUtil {
static bool HasDegenerateDimensions(const Shape& shape);
// Permutes the dimensions by the given permutation, so
- // return_value.dimensions[permutation[i]] = argument.dimensions[i]
+ // return_value.dimensions[permutation[i]] = argument.dimensions[i].
+ //
+ // Postcondition: For any valid permutation,
+ //
+ // !HasLayout(shape) ||
+ // TransposeIsBitcast(shape, PermuteDimensions(permutation, shape),
+ // InversePermutation(permutation)).
static Shape PermuteDimensions(tensorflow::gtl::ArraySlice<int64> permutation,
const Shape& shape);
@@ -699,6 +707,10 @@ class ShapeUtil {
static size_t Hash(const Shape& shape);
private:
+ // Validates the shape size is sane. This makes sure it's safe to do
+ // calculations in int64 without overflowing.
+ static Status ValidateShapeSize(const Shape& shape);
+
// Validates all of the non-layout properties of the shape -- this is a helper
// used by both the layout-optional and layout-required public method.
static Status ValidateShapeWithOptionalLayoutInternal(const Shape& shape);
diff --git a/tensorflow/compiler/xla/shape_util_test.cc b/tensorflow/compiler/xla/shape_util_test.cc
index 606f7492ce..ed2d16c0e9 100644
--- a/tensorflow/compiler/xla/shape_util_test.cc
+++ b/tensorflow/compiler/xla/shape_util_test.cc
@@ -15,6 +15,7 @@ limitations under the License.
#include "tensorflow/compiler/xla/shape_util.h"
+#include <numeric>
#include "tensorflow/compiler/xla/layout_util.h"
#include "tensorflow/compiler/xla/status_macros.h"
#include "tensorflow/compiler/xla/test.h"
@@ -22,12 +23,23 @@ limitations under the License.
#include "tensorflow/compiler/xla/types.h"
#include "tensorflow/compiler/xla/util.h"
#include "tensorflow/compiler/xla/xla_data.pb.h"
+#include "tensorflow/core/lib/strings/str_util.h"
+#include "tensorflow/core/lib/strings/strcat.h"
namespace xla {
namespace {
using ::testing::ElementsAre;
+TEST(ShapeUtilTest, ShapeIndexViewTest) {
+ ShapeIndex index = {1, 2, 3, 4};
+ ShapeIndexView index_view(index, 1);
+ EXPECT_EQ(3, index_view.size());
+ EXPECT_EQ(ShapeIndexView({2, 3, 4}), index_view);
+ EXPECT_EQ(ShapeIndexView({3, 4}), index_view.ConsumeFront());
+ EXPECT_EQ(ShapeIndexView({2, 3}), index_view.ConsumeBack());
+}
+
TEST(ShapeUtilTest, GetDimensionHelperCanNegativeIndex) {
Shape matrix = ShapeUtil::MakeShape(F32, {2, 3});
EXPECT_EQ(3, ShapeUtil::GetDimension(matrix, -1));
@@ -242,6 +254,24 @@ TEST(ShapeUtilTest, IncompatibleDifferentElementShapes) {
EXPECT_FALSE(ShapeUtil::Compatible(shape_1, shape_2));
}
+TEST(ShapeUtilTest, EqualIgnoringFpPrecision) {
+ EXPECT_TRUE(ShapeUtil::EqualIgnoringFpPrecision(
+ ShapeUtil::MakeShapeWithLayout(F32, {4, 3}, {0, 1}),
+ ShapeUtil::MakeShapeWithLayout(F16, {4, 3}, {0, 1})));
+}
+
+TEST(ShapeUtilTest, UnequalIgnoringFpPrecision) {
+ EXPECT_FALSE(ShapeUtil::EqualIgnoringFpPrecision(
+ ShapeUtil::MakeShapeWithLayout(F32, {4, 3}, {0, 1}),
+ ShapeUtil::MakeShapeWithLayout(F16, {3, 4}, {0, 1})));
+ EXPECT_FALSE(ShapeUtil::EqualIgnoringFpPrecision(
+ ShapeUtil::MakeShapeWithLayout(F32, {3, 4}, {0, 1}),
+ ShapeUtil::MakeShapeWithLayout(F16, {3, 4}, {1, 0})));
+ EXPECT_FALSE(ShapeUtil::EqualIgnoringFpPrecision(
+ ShapeUtil::MakeShapeWithLayout(F32, {4, 3}, {0, 1}),
+ ShapeUtil::MakeShapeWithLayout(PRED, {4, 3}, {0, 1})));
+}
+
TEST(ShapeUtilTest, CompatibleTuples) {
Shape tuple1 = ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(F32, {3, 2}), ShapeUtil::MakeShape(PRED, {4, 5})});
@@ -803,6 +833,28 @@ TEST(ShapeUtilTest, HasDegenerateDimensions) {
ShapeUtil::HasDegenerateDimensions(ShapeUtil::MakeShape(F32, {3, 0, 5})));
}
+TEST(ShapeUtilTest, PermuteDimensionsLayout) {
+ std::vector<int64> layout(3);
+ std::iota(layout.begin(), layout.end(), 0);
+ do {
+ Shape s = ShapeUtil::MakeShapeWithLayout(F32, {10, 100, 1000}, layout);
+ SCOPED_TRACE(tensorflow::strings::StrCat("s=", ShapeUtil::HumanString(s)));
+
+ std::vector<int64> permutation(3);
+ std::iota(permutation.begin(), permutation.end(), 0);
+ do {
+ SCOPED_TRACE(tensorflow::strings::StrCat(
+ "permutation=", tensorflow::str_util::Join(permutation, ",")));
+
+ // TransposeIsBitcast takes the inverse of the permutation that
+ // PermuteDimensions takes.
+ EXPECT_TRUE(ShapeUtil::TransposeIsBitcast(
+ s, ShapeUtil::PermuteDimensions(permutation, s),
+ InversePermutation(permutation)));
+ } while (std::next_permutation(permutation.begin(), permutation.end()));
+ } while (std::next_permutation(layout.begin(), layout.end()));
+}
+
TEST(AlgebraicSimplifierTest, ReshapeIsBitcast_3x2x2_6x2_Dim0IsMostMinor) {
EXPECT_FALSE(ShapeUtil::ReshapeIsBitcast(
ShapeUtil::MakeShapeWithLayout(F32, {3, 2, 2}, {0, 1, 2}),
diff --git a/tensorflow/compiler/xla/statusor.h b/tensorflow/compiler/xla/statusor.h
index 0e1387c939..a32e2ad985 100644
--- a/tensorflow/compiler/xla/statusor.h
+++ b/tensorflow/compiler/xla/statusor.h
@@ -12,297 +12,17 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
-
-// StatusOr<T> is the union of a Status object and a T object. StatusOr models
-// the concept of an object that is either a value, or an error Status
-// explaining why such a value is not present. To this end, StatusOr<T> does not
-// allow its Status value to be Status::OK.
-//
-// The primary use-case for StatusOr<T> is as the return value of a
-// function which may fail.
-//
-// Example client usage for a StatusOr<T>, where T is not a pointer:
-//
-// StatusOr<float> result = DoBigCalculationThatCouldFail();
-// if (result.ok()) {
-// float answer = result.ValueOrDie();
-// printf("Big calculation yielded: %f", answer);
-// } else {
-// LOG(ERROR) << result.status();
-// }
-//
-// Example client usage for a StatusOr<T*>:
-//
-// StatusOr<Foo*> result = FooFactory::MakeNewFoo(arg);
-// if (result.ok()) {
-// std::unique_ptr<Foo> foo(result.ValueOrDie());
-// foo->DoSomethingCool();
-// } else {
-// LOG(ERROR) << result.status();
-// }
-//
-// Example client usage for a StatusOr<std::unique_ptr<T>>:
-//
-// StatusOr<std::unique_ptr<Foo>> result = FooFactory::MakeNewFoo(arg);
-// if (result.ok()) {
-// std::unique_ptr<Foo> foo = std::move(result.ValueOrDie());
-// foo->DoSomethingCool();
-// } else {
-// LOG(ERROR) << result.status();
-// }
-//
-// Example factory implementation returning StatusOr<T*>:
-//
-// StatusOr<Foo*> FooFactory::MakeNewFoo(int arg) {
-// if (arg <= 0) {
-// return tensorflow::InvalidArgument("Arg must be positive");
-// } else {
-// return new Foo(arg);
-// }
-// }
-//
-// Note that the assignment operators require that destroying the currently
-// stored value cannot invalidate the argument; in other words, the argument
-// cannot be an alias for the current value, or anything owned by the current
-// value.
#ifndef TENSORFLOW_COMPILER_XLA_STATUSOR_H_
#define TENSORFLOW_COMPILER_XLA_STATUSOR_H_
#include "tensorflow/compiler/xla/status.h"
-#include "tensorflow/compiler/xla/statusor_internals.h"
-#include "tensorflow/core/platform/macros.h"
+#include "tensorflow/stream_executor/lib/statusor.h"
namespace xla {
-#if defined(__clang__)
-// Only clang supports warn_unused_result as a type annotation.
-template <typename T>
-class TF_MUST_USE_RESULT StatusOr;
-#endif
-
-template <typename T>
-class StatusOr : private internal_statusor::StatusOrData<T>,
- private internal_statusor::TraitsBase<
- std::is_copy_constructible<T>::value,
- std::is_move_constructible<T>::value> {
- template <typename U>
- friend class StatusOr;
-
- typedef internal_statusor::StatusOrData<T> Base;
-
- public:
- typedef T element_type;
-
- // Constructs a new StatusOr with Status::UNKNOWN status. This is marked
- // 'explicit' to try to catch cases like 'return {};', where people think
- // StatusOr<std::vector<int>> will be initialized with an empty vector,
- // instead of a Status::UNKNOWN status.
- explicit StatusOr();
-
- // StatusOr<T> will be copy constructible/assignable if T is copy
- // constructible.
- StatusOr(const StatusOr&) = default;
- StatusOr& operator=(const StatusOr&) = default;
-
- // StatusOr<T> will be move constructible/assignable if T is move
- // constructible.
- StatusOr(StatusOr&&) = default;
- StatusOr& operator=(StatusOr&&) = default;
-
- // Conversion copy/move constructor, T must be convertible from U.
- template <typename U, typename std::enable_if<
- std::is_convertible<U, T>::value>::type* = nullptr>
- StatusOr(const StatusOr<U>& other);
- template <typename U, typename std::enable_if<
- std::is_convertible<U, T>::value>::type* = nullptr>
- StatusOr(StatusOr<U>&& other);
-
- // Conversion copy/move assignment operator, T must be convertible from U.
- template <typename U, typename std::enable_if<
- std::is_convertible<U, T>::value>::type* = nullptr>
- StatusOr& operator=(const StatusOr<U>& other);
- template <typename U, typename std::enable_if<
- std::is_convertible<U, T>::value>::type* = nullptr>
- StatusOr& operator=(StatusOr<U>&& other);
-
- // Constructs a new StatusOr with the given value. After calling this
- // constructor, calls to ValueOrDie() will succeed, and calls to status() will
- // return OK.
- //
- // NOTE: Not explicit - we want to use StatusOr<T> as a return type
- // so it is convenient and sensible to be able to do 'return T()'
- // when the return type is StatusOr<T>.
- //
- // REQUIRES: T is copy constructible.
- StatusOr(const T& value);
-
- // Constructs a new StatusOr with the given non-ok status. After calling
- // this constructor, calls to ValueOrDie() will CHECK-fail.
- //
- // NOTE: Not explicit - we want to use StatusOr<T> as a return
- // value, so it is convenient and sensible to be able to do 'return
- // Status()' when the return type is StatusOr<T>.
- //
- // REQUIRES: !status.ok(). This requirement is DCHECKed.
- // In optimized builds, passing Status::OK() here will have the effect
- // of passing tensorflow::error::INTERNAL as a fallback.
- StatusOr(const Status& status);
- StatusOr& operator=(const Status& status);
-
- // TODO(b/62186997): Add operator=(T) overloads.
-
- // Similar to the `const T&` overload.
- //
- // REQUIRES: T is move constructible.
- StatusOr(T&& value);
-
- // RValue versions of the operations declared above.
- StatusOr(Status&& status);
- StatusOr& operator=(Status&& status);
-
- // Returns this->status().ok()
- bool ok() const { return this->status_.ok(); }
-
- // Returns a reference to our status. If this contains a T, then
- // returns Status::OK().
- const Status& status() const &;
- Status status() &&;
-
- // Returns a reference to our current value, or CHECK-fails if !this->ok().
- //
- // Note: for value types that are cheap to copy, prefer simple code:
- //
- // T value = statusor.ValueOrDie();
- //
- // Otherwise, if the value type is expensive to copy, but can be left
- // in the StatusOr, simply assign to a reference:
- //
- // T& value = statusor.ValueOrDie(); // or `const T&`
- //
- // Otherwise, if the value type supports an efficient move, it can be
- // used as follows:
- //
- // T value = std::move(statusor).ValueOrDie();
- //
- // The std::move on statusor instead of on the whole expression enables
- // warnings about possible uses of the statusor object after the move.
- // C++ style guide waiver for ref-qualified overloads granted in cl/143176389
- // See go/ref-qualifiers for more details on such overloads.
- const T& ValueOrDie() const &;
- T& ValueOrDie() &;
- const T&& ValueOrDie() const &&;
- T&& ValueOrDie() &&;
-
- T ConsumeValueOrDie() { return std::move(ValueOrDie()); }
-
- // Ignores any errors. This method does nothing except potentially suppress
- // complaints from any tools that are checking that errors are not dropped on
- // the floor.
- void IgnoreError() const;
-};
-
-////////////////////////////////////////////////////////////////////////////////
-// Implementation details for StatusOr<T>
-
-template <typename T>
-StatusOr<T>::StatusOr() : Base(Status(tensorflow::error::UNKNOWN, "")) {}
-
-template <typename T>
-StatusOr<T>::StatusOr(const T& value) : Base(value) {}
-
-template <typename T>
-StatusOr<T>::StatusOr(const Status& status) : Base(status) {}
-
-template <typename T>
-StatusOr<T>& StatusOr<T>::operator=(const Status& status) {
- this->Assign(status);
- return *this;
-}
-
-template <typename T>
-StatusOr<T>::StatusOr(T&& value) : Base(std::move(value)) {}
-
-template <typename T>
-StatusOr<T>::StatusOr(Status&& status) : Base(std::move(status)) {}
-
-template <typename T>
-StatusOr<T>& StatusOr<T>::operator=(Status&& status) {
- this->Assign(std::move(status));
- return *this;
-}
-
-template <typename T>
-template <typename U,
- typename std::enable_if<std::is_convertible<U, T>::value>::type*>
-inline StatusOr<T>::StatusOr(const StatusOr<U>& other)
- : Base(static_cast<const typename StatusOr<U>::Base&>(other)) {}
-
-template <typename T>
-template <typename U,
- typename std::enable_if<std::is_convertible<U, T>::value>::type*>
-inline StatusOr<T>& StatusOr<T>::operator=(const StatusOr<U>& other) {
- if (other.ok())
- this->Assign(other.ValueOrDie());
- else
- this->Assign(other.status());
- return *this;
-}
-
-template <typename T>
-template <typename U,
- typename std::enable_if<std::is_convertible<U, T>::value>::type*>
-inline StatusOr<T>::StatusOr(StatusOr<U>&& other)
- : Base(static_cast<typename StatusOr<U>::Base&&>(other)) {}
-
-template <typename T>
-template <typename U,
- typename std::enable_if<std::is_convertible<U, T>::value>::type*>
-inline StatusOr<T>& StatusOr<T>::operator=(StatusOr<U>&& other) {
- if (other.ok()) {
- this->Assign(std::move(other).ValueOrDie());
- } else {
- this->Assign(std::move(other).status());
- }
- return *this;
-}
-
-template <typename T>
-const Status& StatusOr<T>::status() const & {
- return this->status_;
-}
-template <typename T>
-Status StatusOr<T>::status() && {
- return ok() ? Status::OK() : std::move(this->status_);
-}
-
-template <typename T>
-const T& StatusOr<T>::ValueOrDie() const & {
- this->EnsureOk();
- return this->data_;
-}
-
-template <typename T>
-T& StatusOr<T>::ValueOrDie() & {
- this->EnsureOk();
- return this->data_;
-}
-
-template <typename T>
-const T&& StatusOr<T>::ValueOrDie() const && {
- this->EnsureOk();
- return std::move(this->data_);
-}
-
-template <typename T>
-T&& StatusOr<T>::ValueOrDie() && {
- this->EnsureOk();
- return std::move(this->data_);
-}
-
+// Use steam_executor's StatusOr so we don't duplicate code.
template <typename T>
-void StatusOr<T>::IgnoreError() const {
- // no-op
-}
+using StatusOr = ::stream_executor::port::StatusOr<T>;
} // namespace xla
diff --git a/tensorflow/compiler/xla/tests/BUILD b/tensorflow/compiler/xla/tests/BUILD
index b76830f666..6a75aa6794 100644
--- a/tensorflow/compiler/xla/tests/BUILD
+++ b/tensorflow/compiler/xla/tests/BUILD
@@ -65,6 +65,7 @@ cc_library(
srcs = ["test_utils.cc"],
hdrs = ["test_utils.h"],
deps = [
+ "//tensorflow/compiler/xla:literal",
"//tensorflow/compiler/xla:literal_util",
"//tensorflow/compiler/xla:shape_util",
"//tensorflow/compiler/xla:util",
@@ -88,6 +89,7 @@ cc_library(
"//tensorflow/compiler/xla:array3d",
"//tensorflow/compiler/xla:array4d",
"//tensorflow/compiler/xla:error_spec",
+ "//tensorflow/compiler/xla:literal",
"//tensorflow/compiler/xla:literal_comparison",
"//tensorflow/compiler/xla:literal_util",
"//tensorflow/compiler/xla:test",
@@ -179,6 +181,7 @@ cc_library(
"//tensorflow/compiler/xla:array3d",
"//tensorflow/compiler/xla:array4d",
"//tensorflow/compiler/xla:execution_options_util",
+ "//tensorflow/compiler/xla:literal",
"//tensorflow/compiler/xla:literal_util",
"//tensorflow/compiler/xla:shape_util",
"//tensorflow/compiler/xla:status_macros",
@@ -209,6 +212,7 @@ cc_library(
deps = [
":codegen_test_base",
":filecheck",
+ "//tensorflow/compiler/xla/service:hlo_parser",
"//tensorflow/compiler/xla/service:llvm_compiler",
"//tensorflow/compiler/xla/service/llvm_ir:llvm_util",
"//tensorflow/core:test",
@@ -302,7 +306,7 @@ xla_test(
"enable_for_xla_interpreter",
],
deps = [
- "//tensorflow/compiler/xla:literal_util",
+ "//tensorflow/compiler/xla:literal",
"//tensorflow/compiler/xla:shape_util",
"//tensorflow/compiler/xla:statusor",
"//tensorflow/compiler/xla:test",
@@ -345,7 +349,7 @@ xla_test(
"enable_for_xla_interpreter",
],
deps = [
- "//tensorflow/compiler/xla:literal_util",
+ "//tensorflow/compiler/xla:literal",
"//tensorflow/compiler/xla:shape_util",
"//tensorflow/compiler/xla:status_macros",
"//tensorflow/compiler/xla:statusor",
@@ -406,7 +410,7 @@ xla_test(
tags = ["enable_for_xla_interpreter"],
deps = [
"//tensorflow/compiler/xla:array2d",
- "//tensorflow/compiler/xla:literal_util",
+ "//tensorflow/compiler/xla:literal",
"//tensorflow/compiler/xla:shape_util",
"//tensorflow/compiler/xla:statusor",
"//tensorflow/compiler/xla:test",
@@ -435,7 +439,7 @@ xla_test(
tags = ["optonly"],
deps = [
"//tensorflow/compiler/xla:array2d",
- "//tensorflow/compiler/xla:literal_util",
+ "//tensorflow/compiler/xla:literal",
"//tensorflow/compiler/xla:shape_util",
"//tensorflow/compiler/xla:statusor",
"//tensorflow/compiler/xla:xla_data_proto",
@@ -531,6 +535,7 @@ xla_test(
srcs = ["scalar_computations_test.cc"],
shard_count = 32,
deps = [
+ "//tensorflow/compiler/xla:literal",
"//tensorflow/compiler/xla:literal_util",
"//tensorflow/compiler/xla:status_macros",
"//tensorflow/compiler/xla:statusor",
@@ -573,7 +578,7 @@ xla_test(
"enable_for_xla_interpreter",
],
deps = [
- "//tensorflow/compiler/xla:literal_util",
+ "//tensorflow/compiler/xla:literal",
"//tensorflow/compiler/xla:shape_util",
"//tensorflow/compiler/xla:statusor",
"//tensorflow/compiler/xla:test",
@@ -599,7 +604,7 @@ xla_test(
"//tensorflow/compiler/xla:array2d",
"//tensorflow/compiler/xla:array3d",
"//tensorflow/compiler/xla:array4d",
- "//tensorflow/compiler/xla:literal_util",
+ "//tensorflow/compiler/xla:literal",
"//tensorflow/compiler/xla:shape_util",
"//tensorflow/compiler/xla:statusor",
"//tensorflow/compiler/xla:test",
@@ -645,7 +650,7 @@ xla_test(
tags = ["enable_for_xla_interpreter"],
deps = [
"//tensorflow/compiler/xla:array2d",
- "//tensorflow/compiler/xla:literal_util",
+ "//tensorflow/compiler/xla:literal",
"//tensorflow/compiler/xla:shape_util",
"//tensorflow/compiler/xla:statusor",
"//tensorflow/compiler/xla:test",
@@ -697,6 +702,7 @@ xla_test(
"//tensorflow/compiler/xla:execution_options_util",
"//tensorflow/compiler/xla:status_macros",
"//tensorflow/compiler/xla:test",
+ "//tensorflow/compiler/xla/client/xla_client:xla_builder",
"//tensorflow/compiler/xla/service:hlo_parser",
"//tensorflow/compiler/xla/tests:xla_internal_test_main",
],
@@ -763,6 +769,7 @@ xla_test(
"//tensorflow/compiler/xla:array2d",
"//tensorflow/compiler/xla:array3d",
"//tensorflow/compiler/xla:array4d",
+ "//tensorflow/compiler/xla:literal",
"//tensorflow/compiler/xla:literal_util",
"//tensorflow/compiler/xla:xla_data_proto",
"//tensorflow/compiler/xla/client:local_client",
@@ -779,7 +786,7 @@ xla_test(
CONVOLUTION_TEST_DEPS = [
"//tensorflow/compiler/xla:array2d",
"//tensorflow/compiler/xla:array4d",
- "//tensorflow/compiler/xla:literal_util",
+ "//tensorflow/compiler/xla:literal",
"//tensorflow/compiler/xla:reference_util",
"//tensorflow/compiler/xla:shape_util",
"//tensorflow/compiler/xla:statusor",
@@ -826,7 +833,7 @@ xla_test(
deps = [
"//tensorflow/compiler/xla:array3d",
"//tensorflow/compiler/xla:array4d",
- "//tensorflow/compiler/xla:literal_util",
+ "//tensorflow/compiler/xla:literal",
"//tensorflow/compiler/xla:reference_util",
"//tensorflow/compiler/xla:xla_data_proto",
"//tensorflow/compiler/xla/client:local_client",
@@ -873,7 +880,7 @@ xla_test(
":test_utils",
"//tensorflow/compiler/xla:array2d",
"//tensorflow/compiler/xla:array4d",
- "//tensorflow/compiler/xla:literal_util",
+ "//tensorflow/compiler/xla:literal",
"//tensorflow/compiler/xla:reference_util",
"//tensorflow/compiler/xla:shape_util",
"//tensorflow/compiler/xla:status_macros",
@@ -885,6 +892,7 @@ xla_test(
"//tensorflow/compiler/xla/client:global_data",
"//tensorflow/compiler/xla/client:local_client",
"//tensorflow/compiler/xla/client/lib:arithmetic",
+ "//tensorflow/compiler/xla/client/lib:math",
"//tensorflow/compiler/xla/client/xla_client:xla_builder",
"//tensorflow/compiler/xla/client/xla_client:xla_computation",
"//tensorflow/compiler/xla/service:hlo",
@@ -905,7 +913,7 @@ xla_test(
":test_utils",
"//tensorflow/compiler/xla:array2d",
"//tensorflow/compiler/xla:array4d",
- "//tensorflow/compiler/xla:literal_util",
+ "//tensorflow/compiler/xla:literal",
"//tensorflow/compiler/xla:reference_util",
"//tensorflow/compiler/xla:shape_util",
"//tensorflow/compiler/xla:status_macros",
@@ -938,7 +946,7 @@ xla_test(
],
deps = [
":test_utils",
- "//tensorflow/compiler/xla:literal_util",
+ "//tensorflow/compiler/xla:literal",
"//tensorflow/compiler/xla:statusor",
"//tensorflow/compiler/xla:test",
"//tensorflow/compiler/xla:test_helpers",
@@ -1029,6 +1037,7 @@ xla_test(
],
deps = [
"//tensorflow/compiler/xla:array2d",
+ "//tensorflow/compiler/xla:literal",
"//tensorflow/compiler/xla:literal_util",
"//tensorflow/compiler/xla:shape_util",
"//tensorflow/compiler/xla:statusor",
@@ -1077,6 +1086,7 @@ xla_test(
deps = [
"//tensorflow/compiler/xla:array2d",
"//tensorflow/compiler/xla:array4d",
+ "//tensorflow/compiler/xla:literal",
"//tensorflow/compiler/xla:literal_util",
"//tensorflow/compiler/xla:reference_util",
"//tensorflow/compiler/xla:shape_util",
@@ -1147,7 +1157,7 @@ xla_test(
],
deps = [
"//tensorflow/compiler/xla:array2d",
- "//tensorflow/compiler/xla:literal_util",
+ "//tensorflow/compiler/xla:literal",
"//tensorflow/compiler/xla:reference_util",
"//tensorflow/compiler/xla:shape_util",
"//tensorflow/compiler/xla:status_macros",
@@ -1174,7 +1184,7 @@ xla_test(
deps = [
":client_library_test_base",
"//tensorflow/compiler/xla:array2d",
- "//tensorflow/compiler/xla:literal_util",
+ "//tensorflow/compiler/xla:literal",
"//tensorflow/compiler/xla:util",
"//tensorflow/compiler/xla:xla_data_proto",
"//tensorflow/compiler/xla/client/xla_client:xla_builder",
@@ -1226,6 +1236,7 @@ xla_test(
"enable_for_xla_interpreter",
],
deps = [
+ "//tensorflow/compiler/xla:literal",
"//tensorflow/compiler/xla:literal_util",
"//tensorflow/compiler/xla:shape_util",
"//tensorflow/compiler/xla:test_helpers",
@@ -1244,10 +1255,12 @@ xla_test(
name = "custom_call_test",
srcs = ["custom_call_test.cc"],
deps = [
+ "//tensorflow/compiler/xla:literal",
"//tensorflow/compiler/xla:literal_util",
"//tensorflow/compiler/xla:shape_util",
"//tensorflow/compiler/xla:util",
"//tensorflow/compiler/xla:xla_data_proto",
+ "//tensorflow/compiler/xla/client/xla_client:xla_builder",
"//tensorflow/compiler/xla/service:hlo",
"//tensorflow/compiler/xla/service/cpu:custom_call_target_registry",
"//tensorflow/compiler/xla/tests:client_library_test_base",
@@ -1288,6 +1301,7 @@ xla_test(
deps = [
"//tensorflow/compiler/xla:array2d",
"//tensorflow/compiler/xla:array4d",
+ "//tensorflow/compiler/xla:literal",
"//tensorflow/compiler/xla:literal_util",
"//tensorflow/compiler/xla:statusor",
"//tensorflow/compiler/xla:test",
@@ -1365,7 +1379,7 @@ xla_test(
],
deps = [
"//tensorflow/compiler/xla:array2d",
- "//tensorflow/compiler/xla:literal_util",
+ "//tensorflow/compiler/xla:literal",
"//tensorflow/compiler/xla:reference_util",
"//tensorflow/compiler/xla:shape_util",
"//tensorflow/compiler/xla:statusor",
@@ -1388,7 +1402,7 @@ xla_test(
name = "prng_test",
srcs = ["prng_test.cc"],
deps = [
- "//tensorflow/compiler/xla:literal_util",
+ "//tensorflow/compiler/xla:literal",
"//tensorflow/compiler/xla:shape_util",
"//tensorflow/compiler/xla:test",
"//tensorflow/compiler/xla:util",
@@ -1413,6 +1427,7 @@ xla_test(
deps = [
"//tensorflow/compiler/xla:array2d",
"//tensorflow/compiler/xla:array4d",
+ "//tensorflow/compiler/xla:literal",
"//tensorflow/compiler/xla:literal_util",
"//tensorflow/compiler/xla:reference_util",
"//tensorflow/compiler/xla:shape_util",
@@ -1527,7 +1542,7 @@ xla_test(
name = "cross_replica_sum_test",
srcs = ["cross_replica_sum_test.cc"],
deps = [
- "//tensorflow/compiler/xla:literal_util",
+ "//tensorflow/compiler/xla:literal",
"//tensorflow/compiler/xla:shape_util",
"//tensorflow/compiler/xla:test",
"//tensorflow/compiler/xla:test_helpers",
@@ -1571,7 +1586,7 @@ xla_test(
name = "compilation_cache_test",
srcs = ["compilation_cache_test.cc"],
deps = [
- "//tensorflow/compiler/xla:literal_util",
+ "//tensorflow/compiler/xla:literal",
"//tensorflow/compiler/xla:shape_util",
"//tensorflow/compiler/xla:statusor",
"//tensorflow/compiler/xla:xla_data_proto",
@@ -1611,7 +1626,7 @@ xla_test(
name = "compute_constant_test",
srcs = ["compute_constant_test.cc"],
deps = [
- "//tensorflow/compiler/xla:literal_util",
+ "//tensorflow/compiler/xla:literal",
"//tensorflow/compiler/xla:shape_util",
"//tensorflow/compiler/xla:status_macros",
"//tensorflow/compiler/xla:statusor",
@@ -1686,7 +1701,7 @@ xla_test(
"enable_for_xla_interpreter",
],
deps = [
- "//tensorflow/compiler/xla:literal_util",
+ "//tensorflow/compiler/xla:literal",
"//tensorflow/compiler/xla:protobuf_util",
"//tensorflow/compiler/xla:shape_util",
"//tensorflow/compiler/xla:statusor",
@@ -1711,7 +1726,7 @@ xla_test(
"enable_for_xla_interpreter",
],
deps = [
- "//tensorflow/compiler/xla:literal_util",
+ "//tensorflow/compiler/xla:literal",
"//tensorflow/compiler/xla:shape_util",
"//tensorflow/compiler/xla:util",
"//tensorflow/compiler/xla:xla_data_proto",
@@ -1728,6 +1743,7 @@ tf_cc_test(
srcs = ["llvm_compiler_test.cc"],
tags = ["requires-gpu-sm35"],
deps = [
+ "//tensorflow/compiler/xla:literal_util",
"//tensorflow/compiler/xla:test_helpers",
"//tensorflow/compiler/xla/service:backend",
"//tensorflow/compiler/xla/service:cpu_plugin",
@@ -1748,7 +1764,7 @@ xla_test(
name = "round_trip_packed_literal_test",
srcs = ["round_trip_packed_literal_test.cc"],
deps = [
- "//tensorflow/compiler/xla:literal_util",
+ "//tensorflow/compiler/xla:literal",
"//tensorflow/compiler/xla:packed_literal_reader",
"//tensorflow/compiler/xla:shape_util",
"//tensorflow/compiler/xla:statusor",
@@ -1771,7 +1787,7 @@ xla_test(
],
deps = [
"//tensorflow/compiler/xla:array2d",
- "//tensorflow/compiler/xla:literal_util",
+ "//tensorflow/compiler/xla:literal",
"//tensorflow/compiler/xla:shape_util",
"//tensorflow/compiler/xla:util",
"//tensorflow/compiler/xla:xla_data_proto",
@@ -1780,6 +1796,7 @@ xla_test(
"//tensorflow/compiler/xla/client/xla_client:xla_builder",
"//tensorflow/compiler/xla/client/xla_client:xla_computation",
"//tensorflow/compiler/xla/service:hlo",
+ "//tensorflow/compiler/xla/service:hlo_parser",
"//tensorflow/compiler/xla/service:hlo_runner",
"//tensorflow/compiler/xla/service:platform_util",
"//tensorflow/compiler/xla/tests:client_library_test_base",
@@ -1798,7 +1815,7 @@ xla_test(
srcs = ["multioutput_fusion_test.cc"],
deps = [
"//tensorflow/compiler/xla:array2d",
- "//tensorflow/compiler/xla:literal_util",
+ "//tensorflow/compiler/xla:literal",
"//tensorflow/compiler/xla:shape_util",
"//tensorflow/compiler/xla:util",
"//tensorflow/compiler/xla:xla_data_proto",
@@ -1838,7 +1855,7 @@ xla_test(
name = "local_client_allocation_test",
srcs = ["local_client_allocation_test.cc"],
deps = [
- "//tensorflow/compiler/xla:literal_util",
+ "//tensorflow/compiler/xla:literal",
"//tensorflow/compiler/xla:statusor",
"//tensorflow/compiler/xla/client:local_client",
"//tensorflow/compiler/xla/client/xla_client:xla_builder",
@@ -1861,7 +1878,7 @@ xla_test(
shard_count = 30,
tags = ["optonly"],
deps = [
- "//tensorflow/compiler/xla:literal_util",
+ "//tensorflow/compiler/xla:literal",
"//tensorflow/compiler/xla:shape_util",
"//tensorflow/compiler/xla:statusor",
"//tensorflow/compiler/xla:test",
@@ -1907,7 +1924,7 @@ xla_test(
srcs = ["round_trip_transfer_test.cc"],
deps = [
"//tensorflow/compiler/xla:array4d",
- "//tensorflow/compiler/xla:literal_util",
+ "//tensorflow/compiler/xla:literal",
"//tensorflow/compiler/xla:shape_util",
"//tensorflow/compiler/xla:statusor",
"//tensorflow/compiler/xla:xla_data_proto",
@@ -1928,7 +1945,7 @@ xla_test(
deps = [
"//tensorflow/compiler/xla:array2d",
"//tensorflow/compiler/xla:array4d",
- "//tensorflow/compiler/xla:literal_util",
+ "//tensorflow/compiler/xla:literal",
"//tensorflow/compiler/xla:reference_util",
"//tensorflow/compiler/xla:shape_util",
"//tensorflow/compiler/xla:status_macros",
@@ -1976,7 +1993,7 @@ xla_test(
":literal_test_util",
":local_client_test_base",
":xla_internal_test_main",
- "//tensorflow/compiler/xla:literal_util",
+ "//tensorflow/compiler/xla:literal",
"//tensorflow/compiler/xla:shape_util",
"//tensorflow/compiler/xla:statusor",
"//tensorflow/compiler/xla:types",
@@ -2038,6 +2055,7 @@ xla_test(
"//tensorflow/compiler/xla:shape_util",
"//tensorflow/compiler/xla/client/xla_client:xla_builder",
"//tensorflow/compiler/xla/client/xla_client:xla_computation",
+ "//tensorflow/compiler/xla/service:hlo_parser",
"//tensorflow/compiler/xla/tests:xla_internal_test_main",
"//tensorflow/core:test",
],
diff --git a/tensorflow/compiler/xla/tests/array_elementwise_ops_test.cc b/tensorflow/compiler/xla/tests/array_elementwise_ops_test.cc
index 8ac771ae5a..3ae96fa1bc 100644
--- a/tensorflow/compiler/xla/tests/array_elementwise_ops_test.cc
+++ b/tensorflow/compiler/xla/tests/array_elementwise_ops_test.cc
@@ -26,7 +26,7 @@ limitations under the License.
#include "tensorflow/compiler/xla/client/local_client.h"
#include "tensorflow/compiler/xla/client/xla_client/xla_builder.h"
#include "tensorflow/compiler/xla/layout_util.h"
-#include "tensorflow/compiler/xla/literal_util.h"
+#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/compiler/xla/statusor.h"
#include "tensorflow/compiler/xla/test.h"
#include "tensorflow/compiler/xla/tests/client_library_test_base.h"
@@ -51,16 +51,16 @@ class ArrayElementwiseOpTestParamCount
XLA_TEST_F(ArrayElementwiseOpTest, NegConstantZeroElementF32) {
XlaBuilder builder(TestName());
- auto a = builder.ConstantR1<float>({});
- builder.Neg(a);
+ auto a = ConstantR1<float>(&builder, {});
+ Neg(a);
ComputeAndCompareR1<float>(&builder, {}, {}, error_spec_);
}
XLA_TEST_F(ArrayElementwiseOpTest, NegConstantF32) {
XlaBuilder builder(TestName());
- auto a = builder.ConstantR1<float>({-2.5f, 3.14f, 2.25f, -10.0f, 6.0f});
- builder.Neg(a);
+ auto a = ConstantR1<float>(&builder, {-2.5f, 3.14f, 2.25f, -10.0f, 6.0f});
+ Neg(a);
ComputeAndCompareR1<float>(&builder, {2.5f, -3.14f, -2.25f, 10.0f, -6.0f}, {},
error_spec_);
@@ -68,10 +68,10 @@ XLA_TEST_F(ArrayElementwiseOpTest, NegConstantF32) {
XLA_TEST_F(ArrayElementwiseOpTest, NegConstantS32) {
XlaBuilder builder(TestName());
- auto a = builder.ConstantR1<int32>({-1, 0, 1, 324,
- std::numeric_limits<int32>::min(),
- std::numeric_limits<int32>::max()});
- builder.Neg(a);
+ auto a = ConstantR1<int32>(&builder,
+ {-1, 0, 1, 324, std::numeric_limits<int32>::min(),
+ std::numeric_limits<int32>::max()});
+ Neg(a);
// -min == min for int32 due to an overflow. In C++ it is undefined behavior
// to do this calculation. For XLA we have not specified that, so it
@@ -84,17 +84,17 @@ XLA_TEST_F(ArrayElementwiseOpTest, NegConstantS32) {
XLA_TEST_F(ArrayElementwiseOpTest, NegConstantZeroElementC64) {
XlaBuilder builder(TestName());
- auto a = builder.ConstantR1<complex64>({});
- builder.Neg(a);
+ auto a = ConstantR1<complex64>(&builder, {});
+ Neg(a);
ComputeAndCompareR1<complex64>(&builder, {}, {}, error_spec_);
}
XLA_TEST_F(ArrayElementwiseOpTest, NegConstantC64) {
XlaBuilder builder(TestName());
- auto a = builder.ConstantR1<complex64>(
- {{-2.5f, 1.0f}, {0.0f, 3.14f}, {2.25f, -1.0f}, {-10.0f, 0.0f}});
- builder.Neg(a);
+ auto a = ConstantR1<complex64>(
+ &builder, {{-2.5f, 1.0f}, {0.0f, 3.14f}, {2.25f, -1.0f}, {-10.0f, 0.0f}});
+ Neg(a);
ComputeAndCompareR1<complex64>(
&builder, {{2.5f, -1.0f}, {0.0f, -3.14f}, {-2.25f, 1.0f}, {10.0f, 0.0f}},
@@ -103,16 +103,17 @@ XLA_TEST_F(ArrayElementwiseOpTest, NegConstantC64) {
XLA_TEST_F(ArrayElementwiseOpTest, NegConstantS64) {
XlaBuilder builder(TestName());
- auto a = builder.ConstantR1<int64>({
- -1,
- 1,
- 0,
- 0x12345678,
- static_cast<int64>(0xffffffff12345678l),
- static_cast<int64>(0x8000000000000000LL),
- static_cast<int64>(0x8000000000000001LL),
- });
- builder.Neg(a);
+ auto a =
+ ConstantR1<int64>(&builder, {
+ -1,
+ 1,
+ 0,
+ 0x12345678,
+ static_cast<int64>(0xffffffff12345678l),
+ static_cast<int64>(0x8000000000000000LL),
+ static_cast<int64>(0x8000000000000001LL),
+ });
+ Neg(a);
LOG(INFO) << -static_cast<int64>(0x7FFFFFFFFFFFFFFFLL);
ComputeAndCompareR1<int64>(&builder,
@@ -130,8 +131,8 @@ XLA_TEST_F(ArrayElementwiseOpTest, NegConstantS64) {
XLA_TEST_F(ArrayElementwiseOpTest, IsFiniteZeroElementF32s) {
XlaBuilder builder(TestName());
- auto a = builder.ConstantR1<float>({});
- builder.IsFinite(a);
+ auto a = ConstantR1<float>(&builder, {});
+ IsFinite(a);
ComputeAndCompareR1<bool>(&builder, {}, {});
}
@@ -141,21 +142,21 @@ static const float kNonCanonicalNaN = tensorflow::bit_cast<float>(0x7FD01234);
XLA_TEST_F(ArrayElementwiseOpTest, IsFiniteScalarF32) {
XlaBuilder builder(TestName());
- builder.IsFinite(builder.ConstantR0<float>(NAN));
+ IsFinite(ConstantR0<float>(&builder, NAN));
ComputeAndCompareR0<bool>(&builder, false, {});
EXPECT_TRUE(std::isnan(kNonCanonicalNaN));
- builder.IsFinite(builder.ConstantR0<float>(kNonCanonicalNaN));
+ IsFinite(ConstantR0<float>(&builder, kNonCanonicalNaN));
ComputeAndCompareR0<bool>(&builder, false, {});
const float inf = std::numeric_limits<float>::infinity();
- builder.IsFinite(builder.ConstantR0<float>(inf));
+ IsFinite(ConstantR0<float>(&builder, inf));
ComputeAndCompareR0<bool>(&builder, false, {});
- builder.IsFinite(builder.ConstantR0<float>(-inf));
+ IsFinite(ConstantR0<float>(&builder, -inf));
ComputeAndCompareR0<bool>(&builder, false, {});
- builder.IsFinite(builder.ConstantR0<float>(0.0f));
+ IsFinite(ConstantR0<float>(&builder, 0.0f));
ComputeAndCompareR0<bool>(&builder, true, {});
}
@@ -163,9 +164,9 @@ XLA_TEST_F(ArrayElementwiseOpTest, IsFiniteR1F32s) {
XlaBuilder builder(TestName());
const float inf = std::numeric_limits<float>::infinity();
EXPECT_TRUE(std::isnan(kNonCanonicalNaN));
- auto a = builder.ConstantR1<float>(
- {{NAN, 7.0f, kNonCanonicalNaN, -1.0f, inf, -inf}});
- builder.IsFinite(a);
+ auto a = ConstantR1<float>(&builder,
+ {{NAN, 7.0f, kNonCanonicalNaN, -1.0f, inf, -inf}});
+ IsFinite(a);
ComputeAndCompareR1<bool>(&builder, {false, true, false, true, false, false},
{});
@@ -173,9 +174,9 @@ XLA_TEST_F(ArrayElementwiseOpTest, IsFiniteR1F32s) {
XLA_TEST_F(ArrayElementwiseOpTest, AddTwoConstantF32s) {
XlaBuilder builder(TestName());
- auto a = builder.ConstantR1<float>({-2.5f, 3.14f, 2.25f, -10.0f, 6.0f});
- auto b = builder.ConstantR1<float>({100.0f, 3.13f, 2.75f, 10.5f, -999.0f});
- builder.Add(a, b);
+ auto a = ConstantR1<float>(&builder, {-2.5f, 3.14f, 2.25f, -10.0f, 6.0f});
+ auto b = ConstantR1<float>(&builder, {100.0f, 3.13f, 2.75f, 10.5f, -999.0f});
+ Add(a, b);
ComputeAndCompareR1<float>(&builder, {97.5f, 6.27f, 5.0f, 0.5f, -993.0f}, {},
error_spec_);
@@ -183,20 +184,20 @@ XLA_TEST_F(ArrayElementwiseOpTest, AddTwoConstantF32s) {
XLA_TEST_F(ArrayElementwiseOpTest, AddTwoConstantZeroElementF32s) {
XlaBuilder builder(TestName());
- auto a = builder.ConstantR1<float>({});
- auto b = builder.ConstantR1<float>({});
- builder.Add(a, b);
+ auto a = ConstantR1<float>(&builder, {});
+ auto b = ConstantR1<float>(&builder, {});
+ Add(a, b);
ComputeAndCompareR1<float>(&builder, {}, {}, error_spec_);
}
XLA_TEST_F(ArrayElementwiseOpTest, AddTwoConstantC64s) {
XlaBuilder builder(TestName());
- auto a = builder.ConstantR1<complex64>(
- {{-2.5f, 0.0f}, {0.0f, 3.14f}, {2.25f, 0.0f}, {1.0f, -10.0f}});
- auto b = builder.ConstantR1<complex64>(
- {{100.0f, 0.0f}, {3.13f, 0.0f}, {2.75f, 1.0f}, {-2.0f, 10.5f}});
- builder.Add(a, b);
+ auto a = ConstantR1<complex64>(
+ &builder, {{-2.5f, 0.0f}, {0.0f, 3.14f}, {2.25f, 0.0f}, {1.0f, -10.0f}});
+ auto b = ConstantR1<complex64>(
+ &builder, {{100.0f, 0.0f}, {3.13f, 0.0f}, {2.75f, 1.0f}, {-2.0f, 10.5f}});
+ Add(a, b);
ComputeAndCompareR1<complex64>(
&builder, {97.5f, {3.13f, 3.14f}, {5.0f, 1.0f}, {-1.0f, 0.5f}}, {},
@@ -205,9 +206,9 @@ XLA_TEST_F(ArrayElementwiseOpTest, AddTwoConstantC64s) {
XLA_TEST_F(ArrayElementwiseOpTest, AddTwoConstantZeroElementC64s) {
XlaBuilder builder(TestName());
- auto a = builder.ConstantR1<complex64>({});
- auto b = builder.ConstantR1<complex64>({});
- builder.Add(a, b);
+ auto a = ConstantR1<complex64>(&builder, {});
+ auto b = ConstantR1<complex64>(&builder, {});
+ Add(a, b);
ComputeAndCompareR1<complex64>(&builder, {}, {}, error_spec_);
}
@@ -224,8 +225,8 @@ XLA_TEST_F(ArrayElementwiseOpTest, AddTwoConstantU64s) {
0x8000000000000000LL,
0x8000000000000000LL,
1};
- std::unique_ptr<Literal> lhs_literal = Literal::CreateR1<uint64>({lhs});
- auto lhs_param = b.Parameter(0, lhs_literal->shape(), "lhs_param");
+ std::unique_ptr<Literal> lhs_literal = LiteralUtil::CreateR1<uint64>({lhs});
+ auto lhs_param = Parameter(&b, 0, lhs_literal->shape(), "lhs_param");
std::unique_ptr<GlobalData> lhs_data =
client_->TransferToServer(*lhs_literal).ConsumeValueOrDie();
@@ -238,12 +239,12 @@ XLA_TEST_F(ArrayElementwiseOpTest, AddTwoConstantU64s) {
0,
1,
0x8000000000000000LL};
- std::unique_ptr<Literal> rhs_literal = Literal::CreateR1<uint64>({rhs});
- auto rhs_param = b.Parameter(1, rhs_literal->shape(), "rhs_param");
+ std::unique_ptr<Literal> rhs_literal = LiteralUtil::CreateR1<uint64>({rhs});
+ auto rhs_param = Parameter(&b, 1, rhs_literal->shape(), "rhs_param");
std::unique_ptr<GlobalData> rhs_data =
client_->TransferToServer(*rhs_literal).ConsumeValueOrDie();
- b.Add(lhs_param, rhs_param);
+ Add(lhs_param, rhs_param);
std::vector<uint64> expected(lhs.size());
for (int64 i = 0; i < lhs.size(); ++i) {
@@ -264,8 +265,8 @@ XLA_TEST_F(ArrayElementwiseOpTest, SubTwoConstantS64s) {
1,
0,
-1};
- std::unique_ptr<Literal> lhs_literal = Literal::CreateR1<int64>({lhs});
- auto lhs_param = b.Parameter(0, lhs_literal->shape(), "lhs_param");
+ std::unique_ptr<Literal> lhs_literal = LiteralUtil::CreateR1<int64>({lhs});
+ auto lhs_param = Parameter(&b, 0, lhs_literal->shape(), "lhs_param");
std::unique_ptr<GlobalData> lhs_data =
client_->TransferToServer(*lhs_literal).ConsumeValueOrDie();
@@ -277,12 +278,12 @@ XLA_TEST_F(ArrayElementwiseOpTest, SubTwoConstantS64s) {
0x7FFFFFFFFFFFFFFLL,
0x7FFFFFFFFFFFFFFFLL,
0x7FFFFFFFFFFFFFFFLL};
- std::unique_ptr<Literal> rhs_literal = Literal::CreateR1<int64>({rhs});
- auto rhs_param = b.Parameter(1, rhs_literal->shape(), "rhs_param");
+ std::unique_ptr<Literal> rhs_literal = LiteralUtil::CreateR1<int64>({rhs});
+ auto rhs_param = Parameter(&b, 1, rhs_literal->shape(), "rhs_param");
std::unique_ptr<GlobalData> rhs_data =
client_->TransferToServer(*rhs_literal).ConsumeValueOrDie();
- auto sub = b.Sub(lhs_param, rhs_param);
+ Sub(lhs_param, rhs_param);
std::vector<int64> expected(lhs.size());
for (int64 i = 0; i < lhs.size(); ++i) {
@@ -302,26 +303,26 @@ TEST_P(ArrayElementwiseOpTestParamCount, AddManyValues) {
b_values.push_back(2 * i / static_cast<float>(count + 2));
}
- std::unique_ptr<Literal> a_literal = Literal::CreateR1<float>({a_values});
+ std::unique_ptr<Literal> a_literal = LiteralUtil::CreateR1<float>({a_values});
std::unique_ptr<GlobalData> a_data =
client_->TransferToServer(*a_literal).ConsumeValueOrDie();
- auto a_constant = builder.ConstantR1<float>(a_values);
- auto a_param = builder.Parameter(0, a_literal->shape(), "a_param");
+ auto a_constant = ConstantR1<float>(&builder, a_values);
+ auto a_param = Parameter(&builder, 0, a_literal->shape(), "a_param");
- std::unique_ptr<Literal> b_literal = Literal::CreateR1<float>({b_values});
+ std::unique_ptr<Literal> b_literal = LiteralUtil::CreateR1<float>({b_values});
std::unique_ptr<GlobalData> b_data =
client_->TransferToServer(*b_literal).ConsumeValueOrDie();
- auto b_constant = builder.Parameter(1, a_literal->shape(), "b_param");
- auto b_param = builder.ConstantR1<float>(b_values);
+ auto b_constant = Parameter(&builder, 1, a_literal->shape(), "b_param");
+ auto b_param = ConstantR1<float>(&builder, b_values);
- auto sum1 = builder.Add(a_constant, b_constant);
- auto sum2 = builder.Add(a_constant, b_param);
- auto sum3 = builder.Add(a_param, b_constant);
- auto sum4 = builder.Add(a_param, b_param);
+ auto sum1 = Add(a_constant, b_constant);
+ auto sum2 = Add(a_constant, b_param);
+ auto sum3 = Add(a_param, b_constant);
+ auto sum4 = Add(a_param, b_param);
- auto sum = builder.Add(sum1, sum2);
- sum = builder.Add(sum, sum3);
- sum = builder.Add(sum, sum4);
+ auto sum = Add(sum1, sum2);
+ sum = Add(sum, sum3);
+ sum = Add(sum, sum4);
std::vector<float> expected;
for (int64 i = 0; i < count; ++i) {
@@ -334,9 +335,9 @@ TEST_P(ArrayElementwiseOpTestParamCount, AddManyValues) {
XLA_TEST_F(ArrayElementwiseOpTest, SubTwoConstantF32s) {
XlaBuilder builder(TestName());
- auto a = builder.ConstantR1<float>({-2.5f, 3.14f, 2.25f, -10.0f, 6.0f});
- auto b = builder.ConstantR1<float>({100.0f, 3.13f, 2.75f, 10.5f, -999.0f});
- builder.Sub(a, b);
+ auto a = ConstantR1<float>(&builder, {-2.5f, 3.14f, 2.25f, -10.0f, 6.0f});
+ auto b = ConstantR1<float>(&builder, {100.0f, 3.13f, 2.75f, 10.5f, -999.0f});
+ Sub(a, b);
ComputeAndCompareR1<float>(&builder, {-102.5f, 0.01f, -0.5f, -20.5f, 1005.0f},
{}, error_spec_);
@@ -344,38 +345,38 @@ XLA_TEST_F(ArrayElementwiseOpTest, SubTwoConstantF32s) {
XLA_TEST_F(ArrayElementwiseOpTest, SubTwoConstantZeroElementF32s) {
XlaBuilder builder(TestName());
- auto a = builder.ConstantR1<float>({});
- auto b = builder.ConstantR1<float>({});
- builder.Sub(a, b);
+ auto a = ConstantR1<float>(&builder, {});
+ auto b = ConstantR1<float>(&builder, {});
+ Sub(a, b);
ComputeAndCompareR1<float>(&builder, {}, {}, error_spec_);
}
XLA_TEST_F(ArrayElementwiseOpTest, SubTwoConstantS32s) {
XlaBuilder builder(TestName());
- auto a = builder.ConstantR1<int32>({-1, 0, 2, 1000000000});
- auto b = builder.ConstantR1<int32>({-1, 2, 1, -1});
- builder.Sub(a, b);
+ auto a = ConstantR1<int32>(&builder, {-1, 0, 2, 1000000000});
+ auto b = ConstantR1<int32>(&builder, {-1, 2, 1, -1});
+ Sub(a, b);
ComputeAndCompareR1<int32>(&builder, {0, -2, 1, 1000000001}, {});
}
XLA_TEST_F(ArrayElementwiseOpTest, SubTwoConstantZeroElementS32s) {
XlaBuilder builder(TestName());
- auto a = builder.ConstantR1<int32>({});
- auto b = builder.ConstantR1<int32>({});
- builder.Sub(a, b);
+ auto a = ConstantR1<int32>(&builder, {});
+ auto b = ConstantR1<int32>(&builder, {});
+ Sub(a, b);
ComputeAndCompareR1<int32>(&builder, {}, {});
}
XLA_TEST_F(ArrayElementwiseOpTest, SubTwoConstantC64s) {
XlaBuilder builder(TestName());
- auto a = builder.ConstantR1<complex64>(
- {{-2.5f, 0.0f}, {0.0f, 3.14f}, {3.0f, 2.25f}});
- auto b = builder.ConstantR1<complex64>(
- {{0.0f, 10.0f}, {3.13f, 0.0f}, {2.75f, -0.25f}});
- builder.Sub(a, b);
+ auto a = ConstantR1<complex64>(&builder,
+ {{-2.5f, 0.0f}, {0.0f, 3.14f}, {3.0f, 2.25f}});
+ auto b = ConstantR1<complex64>(
+ &builder, {{0.0f, 10.0f}, {3.13f, 0.0f}, {2.75f, -0.25f}});
+ Sub(a, b);
ComputeAndCompareR1<complex64>(
&builder, {{-2.5f, -10.0f}, {-3.13f, 3.14f}, {0.25f, 2.5f}}, {},
@@ -384,18 +385,18 @@ XLA_TEST_F(ArrayElementwiseOpTest, SubTwoConstantC64s) {
XLA_TEST_F(ArrayElementwiseOpTest, SubTwoConstantZeroElementC64s) {
XlaBuilder builder(TestName());
- auto a = builder.ConstantR1<complex64>({});
- auto b = builder.ConstantR1<complex64>({});
- builder.Sub(a, b);
+ auto a = ConstantR1<complex64>(&builder, {});
+ auto b = ConstantR1<complex64>(&builder, {});
+ Sub(a, b);
ComputeAndCompareR1<complex64>(&builder, {}, {}, error_spec_);
}
XLA_TEST_F(ArrayElementwiseOpTest, DivTwoConstantF32s) {
XlaBuilder builder(TestName());
- auto a = builder.ConstantR1<float>({-2.5f, 25.5f, 2.25f, -10.0f, 6.0f});
- auto b = builder.ConstantR1<float>({10.0f, 5.1f, 1.0f, 10.0f, -6.0f});
- builder.Div(a, b);
+ auto a = ConstantR1<float>(&builder, {-2.5f, 25.5f, 2.25f, -10.0f, 6.0f});
+ auto b = ConstantR1<float>(&builder, {10.0f, 5.1f, 1.0f, 10.0f, -6.0f});
+ Div(a, b);
ComputeAndCompareR1<float>(&builder, {-0.25f, 5.0f, 2.25f, -1.0f, -1.0f}, {},
error_spec_);
@@ -403,9 +404,9 @@ XLA_TEST_F(ArrayElementwiseOpTest, DivTwoConstantF32s) {
XLA_TEST_F(ArrayElementwiseOpTest, DivTwoConstantZeroElementF32s) {
XlaBuilder builder(TestName());
- auto a = builder.ConstantR1<float>({});
- auto b = builder.ConstantR1<float>({});
- builder.Div(a, b);
+ auto a = ConstantR1<float>(&builder, {});
+ auto b = ConstantR1<float>(&builder, {});
+ Div(a, b);
ComputeAndCompareR1<float>(&builder, {}, {}, error_spec_);
}
@@ -442,7 +443,7 @@ XLA_TEST_F(ArrayElementwiseOpTest, DivS32s) {
CreateR1Parameter<int32>(dividends, 0, "dividend", &builder, &dividend);
auto divisor_data =
CreateR1Parameter<int32>(divisors, 1, "divisor", &builder, &divisor);
- builder.Div(dividend, divisor);
+ Div(dividend, divisor);
ComputeAndCompareR1<int32>(&builder, quotients,
{dividend_data.get(), divisor_data.get()});
@@ -454,7 +455,7 @@ XLA_TEST_F(ArrayElementwiseOpTest, DivS32s) {
XlaOp dividend;
auto dividend_data =
CreateR1Parameter<int32>(dividends, 0, "dividend", &builder, &dividend);
- builder.Div(dividend, builder.ConstantR1<int32>(divisors));
+ Div(dividend, ConstantR1<int32>(&builder, divisors));
ComputeAndCompareR1<int32>(&builder, quotients, {dividend_data.get()});
}
@@ -467,7 +468,7 @@ XLA_TEST_F(ArrayElementwiseOpTest, DivS32s) {
CreateR1Parameter<int32>(dividends, 0, "dividend", &builder, &dividend);
auto divisor_data =
CreateR1Parameter<int32>(divisors, 1, "divisor", &builder, &divisor);
- builder.Rem(dividend, divisor);
+ Rem(dividend, divisor);
ComputeAndCompareR1<int32>(&builder, remainders,
{dividend_data.get(), divisor_data.get()});
@@ -479,7 +480,7 @@ XLA_TEST_F(ArrayElementwiseOpTest, DivS32s) {
XlaOp dividend;
auto dividend_data =
CreateR1Parameter<int32>(dividends, 0, "dividend", &builder, &dividend);
- builder.Rem(dividend, builder.ConstantR1<int32>(divisors));
+ Rem(dividend, ConstantR1<int32>(&builder, divisors));
ComputeAndCompareR1<int32>(&builder, remainders, {dividend_data.get()});
}
@@ -513,7 +514,7 @@ XLA_TEST_F(ArrayElementwiseOpTest, DivU32s) {
&builder, &dividend);
auto divisor_data =
CreateR1Parameter<uint32>(divisors, 1, "divisor", &builder, &divisor);
- builder.Div(dividend, divisor);
+ Div(dividend, divisor);
ComputeAndCompareR1<uint32>(&builder, quotients,
{dividend_data.get(), divisor_data.get()});
@@ -524,7 +525,7 @@ XLA_TEST_F(ArrayElementwiseOpTest, DivU32s) {
XlaOp dividend;
auto dividend_data = CreateR1Parameter<uint32>(dividends, 0, "dividend",
&builder, &dividend);
- builder.Div(dividend, builder.ConstantR1<uint32>(divisors));
+ Div(dividend, ConstantR1<uint32>(&builder, divisors));
ComputeAndCompareR1<uint32>(&builder, quotients, {dividend_data.get()});
}
@@ -537,7 +538,7 @@ XLA_TEST_F(ArrayElementwiseOpTest, DivU32s) {
&builder, &dividend);
auto divisor_data =
CreateR1Parameter<uint32>(divisors, 1, "divisor", &builder, &divisor);
- builder.Rem(dividend, divisor);
+ Rem(dividend, divisor);
ComputeAndCompareR1<uint32>(&builder, remainders,
{dividend_data.get(), divisor_data.get()});
@@ -548,7 +549,7 @@ XLA_TEST_F(ArrayElementwiseOpTest, DivU32s) {
XlaOp dividend;
auto dividend_data = CreateR1Parameter<uint32>(dividends, 0, "dividend",
&builder, &dividend);
- builder.Rem(dividend, builder.ConstantR1<uint32>(divisors));
+ Rem(dividend, ConstantR1<uint32>(&builder, divisors));
ComputeAndCompareR1<uint32>(&builder, remainders, {dividend_data.get()});
}
@@ -556,11 +557,11 @@ XLA_TEST_F(ArrayElementwiseOpTest, DivU32s) {
XLA_TEST_F(ArrayElementwiseOpTest, DivTwoConstantC64s) {
XlaBuilder builder(TestName());
- auto a = builder.ConstantR1<complex64>(
- {{-2.5f, 1.0f}, {-25.5f, 0.0f}, {2.0f, -1.0f}});
- auto b = builder.ConstantR1<complex64>(
- {{10.0f, 0.0f}, {0.0f, 1.0f}, {2.0f, -1.0f}});
- builder.Div(a, b);
+ auto a = ConstantR1<complex64>(
+ &builder, {{-2.5f, 1.0f}, {-25.5f, 0.0f}, {2.0f, -1.0f}});
+ auto b = ConstantR1<complex64>(&builder,
+ {{10.0f, 0.0f}, {0.0f, 1.0f}, {2.0f, -1.0f}});
+ Div(a, b);
ComputeAndCompareR1<complex64>(
&builder, {{-0.25f, 0.1f}, {0.0f, 25.5f}, {1.0f, 0.0f}}, {}, error_spec_);
@@ -568,20 +569,20 @@ XLA_TEST_F(ArrayElementwiseOpTest, DivTwoConstantC64s) {
XLA_TEST_F(ArrayElementwiseOpTest, DivTwoConstantZeroElementC64s) {
XlaBuilder builder(TestName());
- auto a = builder.ConstantR1<complex64>({});
- auto b = builder.ConstantR1<complex64>({});
- builder.Div(a, b);
+ auto a = ConstantR1<complex64>(&builder, {});
+ auto b = ConstantR1<complex64>(&builder, {});
+ Div(a, b);
ComputeAndCompareR1<complex64>(&builder, {}, {}, error_spec_);
}
XLA_TEST_F(ArrayElementwiseOpTest, RemF32s) {
XlaBuilder builder(TestName());
- auto a = builder.ConstantR1<float>(
- {-2.5f, 25.5f, 2.25f, -10.0f, 6.0f, 3.0f, 3.0f, -1.0f, -8.0f});
- auto b = builder.ConstantR1<float>(
- {10.0f, 5.1f, 1.0f, 10.0f, -6.0f, 2.0f, -2.0f, 7.0f, -4.0f});
- builder.Rem(a, b);
+ auto a = ConstantR1<float>(
+ &builder, {-2.5f, 25.5f, 2.25f, -10.0f, 6.0f, 3.0f, 3.0f, -1.0f, -8.0f});
+ auto b = ConstantR1<float>(
+ &builder, {10.0f, 5.1f, 1.0f, 10.0f, -6.0f, 2.0f, -2.0f, 7.0f, -4.0f});
+ Rem(a, b);
ComputeAndCompareR1<float>(
&builder, {-2.5f, 0.0f, 0.25f, 0.0f, -0.0f, 1.0f, 1.0f, -1.0f, -0.0f}, {},
@@ -590,20 +591,20 @@ XLA_TEST_F(ArrayElementwiseOpTest, RemF32s) {
XLA_TEST_F(ArrayElementwiseOpTest, RemZeroElementF32s) {
XlaBuilder builder(TestName());
- auto a = builder.ConstantR1<float>({});
- auto b = builder.ConstantR1<float>({});
- builder.Rem(a, b);
+ auto a = ConstantR1<float>(&builder, {});
+ auto b = ConstantR1<float>(&builder, {});
+ Rem(a, b);
ComputeAndCompareR1<float>(&builder, {}, {}, error_spec_);
}
XLA_TEST_F(ArrayElementwiseOpTest, RemF64s) {
XlaBuilder builder(TestName());
- auto a = builder.ConstantR1<double>(
- {-2.5, 25.5, 2.25, -10.0, 6.0, 3.0, 3.0, -1.0, -8.0});
- auto b = builder.ConstantR1<double>(
- {10.0, 5.1, 1.0, 10.0, -6.0, 2.0, -2.0, 7.0, -4.0});
- builder.Rem(a, b);
+ auto a = ConstantR1<double>(
+ &builder, {-2.5, 25.5, 2.25, -10.0, 6.0, 3.0, 3.0, -1.0, -8.0});
+ auto b = ConstantR1<double>(
+ &builder, {10.0, 5.1, 1.0, 10.0, -6.0, 2.0, -2.0, 7.0, -4.0});
+ Rem(a, b);
ComputeAndCompareR1<double>(
&builder, {-2.5, 0.0, 0.25, 0.0, -0.0, 1.0, 1.0, -1.0, -0.0}, {},
@@ -612,9 +613,9 @@ XLA_TEST_F(ArrayElementwiseOpTest, RemF64s) {
XLA_TEST_F(ArrayElementwiseOpTest, MulTwoConstantF32s) {
XlaBuilder builder(TestName());
- auto a = builder.ConstantR1<float>({-2.5f, 25.5f, 2.25f, -10.0f, 6.0f});
- auto b = builder.ConstantR1<float>({10.0f, 5.0f, 1.0f, 10.0f, -6.0f});
- builder.Mul(a, b);
+ auto a = ConstantR1<float>(&builder, {-2.5f, 25.5f, 2.25f, -10.0f, 6.0f});
+ auto b = ConstantR1<float>(&builder, {10.0f, 5.0f, 1.0f, 10.0f, -6.0f});
+ Mul(a, b);
ComputeAndCompareR1<float>(&builder, {-25.0f, 127.5f, 2.25f, -100.0f, -36.0f},
{}, error_spec_);
@@ -622,9 +623,9 @@ XLA_TEST_F(ArrayElementwiseOpTest, MulTwoConstantF32s) {
XLA_TEST_F(ArrayElementwiseOpTest, MulTwoConstantZeroElementF32s) {
XlaBuilder builder(TestName());
- auto a = builder.ConstantR1<float>({});
- auto b = builder.ConstantR1<float>({});
- builder.Mul(a, b);
+ auto a = ConstantR1<float>(&builder, {});
+ auto b = ConstantR1<float>(&builder, {});
+ Mul(a, b);
ComputeAndCompareR1<float>(&builder, {}, {}, error_spec_);
}
@@ -648,18 +649,18 @@ XLA_TEST_F(ArrayElementwiseOpTest, MulTwoConstantS32s) {
}
XlaBuilder builder(TestName());
- auto a = builder.ConstantR1<int32>(a_data);
- auto b = builder.ConstantR1<int32>(b_data);
- builder.Mul(a, b);
+ auto a = ConstantR1<int32>(&builder, a_data);
+ auto b = ConstantR1<int32>(&builder, b_data);
+ Mul(a, b);
ComputeAndCompareR1<int32>(&builder, expected, {});
}
XLA_TEST_F(ArrayElementwiseOpTest, MulTwoConstantZeroElementS32s) {
XlaBuilder builder(TestName());
- auto a = builder.ConstantR1<int32>({});
- auto b = builder.ConstantR1<int32>({});
- builder.Mul(a, b);
+ auto a = ConstantR1<int32>(&builder, {});
+ auto b = ConstantR1<int32>(&builder, {});
+ Mul(a, b);
ComputeAndCompareR1<int32>(&builder, {}, {});
}
@@ -679,20 +680,20 @@ XLA_TEST_F(ArrayElementwiseOpTest, MulTwoConstantU32s) {
}
XlaBuilder builder(TestName());
- auto a = builder.ConstantR1<uint32>(a_data);
- auto b = builder.ConstantR1<uint32>(b_data);
- builder.Mul(a, b);
+ auto a = ConstantR1<uint32>(&builder, a_data);
+ auto b = ConstantR1<uint32>(&builder, b_data);
+ Mul(a, b);
ComputeAndCompareR1<uint32>(&builder, expected, {});
}
XLA_TEST_F(ArrayElementwiseOpTest, MulTwoConstantC64s) {
XlaBuilder builder(TestName());
- auto a = builder.ConstantR1<complex64>(
- {{-2.5f, 0.0f}, {0.0f, 25.5f}, {2.0f, -10.0f}});
- auto b = builder.ConstantR1<complex64>(
- {{0.0f, 10.0f}, {5.0f, 1.0f}, {10.0f, -6.0f}});
- builder.Mul(a, b);
+ auto a = ConstantR1<complex64>(
+ &builder, {{-2.5f, 0.0f}, {0.0f, 25.5f}, {2.0f, -10.0f}});
+ auto b = ConstantR1<complex64>(&builder,
+ {{0.0f, 10.0f}, {5.0f, 1.0f}, {10.0f, -6.0f}});
+ Mul(a, b);
ComputeAndCompareR1<complex64>(
&builder, {{0.0f, -25.0f}, {-25.5f, 127.5f}, {-40.0f, -112.0}}, {},
@@ -701,27 +702,27 @@ XLA_TEST_F(ArrayElementwiseOpTest, MulTwoConstantC64s) {
XLA_TEST_F(ArrayElementwiseOpTest, MulTwoConstantZeroElementC64s) {
XlaBuilder builder(TestName());
- auto a = builder.ConstantR1<complex64>({});
- auto b = builder.ConstantR1<complex64>({});
- builder.Mul(a, b);
+ auto a = ConstantR1<complex64>(&builder, {});
+ auto b = ConstantR1<complex64>(&builder, {});
+ Mul(a, b);
ComputeAndCompareR1<complex64>(&builder, {}, {}, error_spec_);
}
XLA_TEST_F(ArrayElementwiseOpTest, AndPredR1) {
XlaBuilder builder(TestName());
- auto a = builder.ConstantR1<bool>({false, false, true, true});
- auto b = builder.ConstantR1<bool>({false, true, false, true});
- builder.And(a, b);
+ auto a = ConstantR1<bool>(&builder, {false, false, true, true});
+ auto b = ConstantR1<bool>(&builder, {false, true, false, true});
+ And(a, b);
ComputeAndCompareR1<bool>(&builder, {false, false, false, true}, {});
}
XLA_TEST_F(ArrayElementwiseOpTest, AndPredR2) {
XlaBuilder builder(TestName());
- auto a = builder.ConstantR2<bool>({{false, false}, {true, true}});
- auto b = builder.ConstantR2<bool>({{false, true}, {false, true}});
- builder.And(a, b);
+ auto a = ConstantR2<bool>(&builder, {{false, false}, {true, true}});
+ auto b = ConstantR2<bool>(&builder, {{false, true}, {false, true}});
+ And(a, b);
Array2D<bool> expected_array({{false, false}, {false, true}});
ComputeAndCompareR2<bool>(&builder, expected_array, {});
@@ -729,27 +730,27 @@ XLA_TEST_F(ArrayElementwiseOpTest, AndPredR2) {
XLA_TEST_F(ArrayElementwiseOpTest, AndZeroElementPredR1) {
XlaBuilder builder(TestName());
- auto a = builder.ConstantR1<bool>({});
- auto b = builder.ConstantR1<bool>({});
- builder.And(a, b);
+ auto a = ConstantR1<bool>(&builder, {});
+ auto b = ConstantR1<bool>(&builder, {});
+ And(a, b);
ComputeAndCompareR1<bool>(&builder, {}, {});
}
XLA_TEST_F(ArrayElementwiseOpTest, AndS32R1) {
XlaBuilder builder(TestName());
- auto a = builder.ConstantR1<int32>({0, -1, -8});
- auto b = builder.ConstantR1<int32>({5, -7, 12});
- builder.And(a, b);
+ auto a = ConstantR1<int32>(&builder, {0, -1, -8});
+ auto b = ConstantR1<int32>(&builder, {5, -7, 12});
+ And(a, b);
ComputeAndCompareR1<int32>(&builder, {0, -7, 8}, {});
}
XLA_TEST_F(ArrayElementwiseOpTest, AndS32R2) {
XlaBuilder builder(TestName());
- auto a = builder.ConstantR2<int32>({{0, -5}, {-1, 5}});
- auto b = builder.ConstantR2<int32>({{1, -6}, {4, 5}});
- builder.And(a, b);
+ auto a = ConstantR2<int32>(&builder, {{0, -5}, {-1, 5}});
+ auto b = ConstantR2<int32>(&builder, {{1, -6}, {4, 5}});
+ And(a, b);
Array2D<int32> expected_array({{0, -6}, {4, 5}});
ComputeAndCompareR2<int32>(&builder, expected_array, {});
@@ -757,27 +758,27 @@ XLA_TEST_F(ArrayElementwiseOpTest, AndS32R2) {
XLA_TEST_F(ArrayElementwiseOpTest, AndZeroElementS32R1) {
XlaBuilder builder(TestName());
- auto a = builder.ConstantR1<int32>({});
- auto b = builder.ConstantR1<int32>({});
- builder.And(a, b);
+ auto a = ConstantR1<int32>(&builder, {});
+ auto b = ConstantR1<int32>(&builder, {});
+ And(a, b);
ComputeAndCompareR1<int32>(&builder, {}, {});
}
XLA_TEST_F(ArrayElementwiseOpTest, AndU32R1) {
XlaBuilder builder(TestName());
- auto a = builder.ConstantR1<int32>({0, 1, 8});
- auto b = builder.ConstantR1<int32>({5, 7, 12});
- builder.And(a, b);
+ auto a = ConstantR1<int32>(&builder, {0, 1, 8});
+ auto b = ConstantR1<int32>(&builder, {5, 7, 12});
+ And(a, b);
ComputeAndCompareR1<int32>(&builder, {0, 1, 8}, {});
}
XLA_TEST_F(ArrayElementwiseOpTest, AndU32R2) {
XlaBuilder builder(TestName());
- auto a = builder.ConstantR2<uint32>({{0, 1}, {3, 8}});
- auto b = builder.ConstantR2<uint32>({{1, 0}, {7, 6}});
- builder.And(a, b);
+ auto a = ConstantR2<uint32>(&builder, {{0, 1}, {3, 8}});
+ auto b = ConstantR2<uint32>(&builder, {{1, 0}, {7, 6}});
+ And(a, b);
Array2D<uint32> expected_array({{0, 0}, {3, 0}});
ComputeAndCompareR2<uint32>(&builder, expected_array, {});
@@ -785,27 +786,27 @@ XLA_TEST_F(ArrayElementwiseOpTest, AndU32R2) {
XLA_TEST_F(ArrayElementwiseOpTest, AndZeroElementU32R1) {
XlaBuilder builder(TestName());
- auto a = builder.ConstantR1<uint32>({});
- auto b = builder.ConstantR1<uint32>({});
- builder.And(a, b);
+ auto a = ConstantR1<uint32>(&builder, {});
+ auto b = ConstantR1<uint32>(&builder, {});
+ And(a, b);
ComputeAndCompareR1<uint32>(&builder, {}, {});
}
XLA_TEST_F(ArrayElementwiseOpTest, OrPredR1) {
XlaBuilder builder(TestName());
- auto a = builder.ConstantR1<bool>({false, false, true, true});
- auto b = builder.ConstantR1<bool>({false, true, false, true});
- builder.Or(a, b);
+ auto a = ConstantR1<bool>(&builder, {false, false, true, true});
+ auto b = ConstantR1<bool>(&builder, {false, true, false, true});
+ Or(a, b);
ComputeAndCompareR1<bool>(&builder, {false, true, true, true}, {});
}
XLA_TEST_F(ArrayElementwiseOpTest, OrPredR2) {
XlaBuilder builder(TestName());
- auto a = builder.ConstantR2<bool>({{false, false}, {true, true}});
- auto b = builder.ConstantR2<bool>({{false, true}, {false, true}});
- builder.Or(a, b);
+ auto a = ConstantR2<bool>(&builder, {{false, false}, {true, true}});
+ auto b = ConstantR2<bool>(&builder, {{false, true}, {false, true}});
+ Or(a, b);
Array2D<bool> expected_array({{false, true}, {true, true}});
ComputeAndCompareR2<bool>(&builder, expected_array, {});
@@ -813,27 +814,27 @@ XLA_TEST_F(ArrayElementwiseOpTest, OrPredR2) {
XLA_TEST_F(ArrayElementwiseOpTest, OrZeroElementPredR1) {
XlaBuilder builder(TestName());
- auto a = builder.ConstantR1<bool>({});
- auto b = builder.ConstantR1<bool>({});
- builder.Or(a, b);
+ auto a = ConstantR1<bool>(&builder, {});
+ auto b = ConstantR1<bool>(&builder, {});
+ Or(a, b);
ComputeAndCompareR1<bool>(&builder, {}, {});
}
XLA_TEST_F(ArrayElementwiseOpTest, OrS32R1) {
XlaBuilder builder(TestName());
- auto a = builder.ConstantR1<int32>({0, -1, 8});
- auto b = builder.ConstantR1<int32>({5, -7, 4});
- builder.Or(a, b);
+ auto a = ConstantR1<int32>(&builder, {0, -1, 8});
+ auto b = ConstantR1<int32>(&builder, {5, -7, 4});
+ Or(a, b);
ComputeAndCompareR1<int32>(&builder, {5, -1, 12}, {});
}
XLA_TEST_F(ArrayElementwiseOpTest, OrS32R2) {
XlaBuilder builder(TestName());
- auto a = builder.ConstantR2<int32>({{0, -1}, {8, 8}});
- auto b = builder.ConstantR2<int32>({{5, -7}, {4, 1}});
- builder.Or(a, b);
+ auto a = ConstantR2<int32>(&builder, {{0, -1}, {8, 8}});
+ auto b = ConstantR2<int32>(&builder, {{5, -7}, {4, 1}});
+ Or(a, b);
Array2D<int32> expected_array({{5, -1}, {12, 9}});
ComputeAndCompareR2<int32>(&builder, expected_array, {});
@@ -841,27 +842,27 @@ XLA_TEST_F(ArrayElementwiseOpTest, OrS32R2) {
XLA_TEST_F(ArrayElementwiseOpTest, OrZeroElementS32R1) {
XlaBuilder builder(TestName());
- auto a = builder.ConstantR1<int32>({});
- auto b = builder.ConstantR1<int32>({});
- builder.Or(a, b);
+ auto a = ConstantR1<int32>(&builder, {});
+ auto b = ConstantR1<int32>(&builder, {});
+ Or(a, b);
ComputeAndCompareR1<int32>(&builder, {}, {});
}
XLA_TEST_F(ArrayElementwiseOpTest, OrU32R1) {
XlaBuilder builder(TestName());
- auto a = builder.ConstantR1<uint32>({0, 1, 8});
- auto b = builder.ConstantR1<uint32>({5, 7, 4});
- builder.Or(a, b);
+ auto a = ConstantR1<uint32>(&builder, {0, 1, 8});
+ auto b = ConstantR1<uint32>(&builder, {5, 7, 4});
+ Or(a, b);
ComputeAndCompareR1<uint32>(&builder, {5, 7, 12}, {});
}
XLA_TEST_F(ArrayElementwiseOpTest, OrU32R2) {
XlaBuilder builder(TestName());
- auto a = builder.ConstantR2<uint32>({{0, 1}, {8, 8}});
- auto b = builder.ConstantR2<uint32>({{5, 7}, {4, 1}});
- builder.Or(a, b);
+ auto a = ConstantR2<uint32>(&builder, {{0, 1}, {8, 8}});
+ auto b = ConstantR2<uint32>(&builder, {{5, 7}, {4, 1}});
+ Or(a, b);
Array2D<uint32> expected_array({{5, 7}, {12, 9}});
ComputeAndCompareR2<uint32>(&builder, expected_array, {});
@@ -869,27 +870,27 @@ XLA_TEST_F(ArrayElementwiseOpTest, OrU32R2) {
XLA_TEST_F(ArrayElementwiseOpTest, OrZeroElementU32R1) {
XlaBuilder builder(TestName());
- auto a = builder.ConstantR1<uint32>({});
- auto b = builder.ConstantR1<uint32>({});
- builder.Or(a, b);
+ auto a = ConstantR1<uint32>(&builder, {});
+ auto b = ConstantR1<uint32>(&builder, {});
+ Or(a, b);
ComputeAndCompareR1<uint32>(&builder, {}, {});
}
XLA_TEST_F(ArrayElementwiseOpTest, XorPredR1) {
XlaBuilder builder(TestName());
- auto a = builder.ConstantR1<bool>({false, false, true, true});
- auto b = builder.ConstantR1<bool>({false, true, false, true});
- builder.Xor(a, b);
+ auto a = ConstantR1<bool>(&builder, {false, false, true, true});
+ auto b = ConstantR1<bool>(&builder, {false, true, false, true});
+ Xor(a, b);
ComputeAndCompareR1<bool>(&builder, {false, true, true, false}, {});
}
XLA_TEST_F(ArrayElementwiseOpTest, XorPredR2) {
XlaBuilder builder(TestName());
- auto a = builder.ConstantR2<bool>({{false, false}, {true, true}});
- auto b = builder.ConstantR2<bool>({{false, true}, {false, true}});
- builder.Xor(a, b);
+ auto a = ConstantR2<bool>(&builder, {{false, false}, {true, true}});
+ auto b = ConstantR2<bool>(&builder, {{false, true}, {false, true}});
+ Xor(a, b);
Array2D<bool> expected_array({{false, true}, {true, false}});
ComputeAndCompareR2<bool>(&builder, expected_array, {});
@@ -897,27 +898,27 @@ XLA_TEST_F(ArrayElementwiseOpTest, XorPredR2) {
XLA_TEST_F(ArrayElementwiseOpTest, XorZeroElementPredR1) {
XlaBuilder builder(TestName());
- auto a = builder.ConstantR1<bool>({});
- auto b = builder.ConstantR1<bool>({});
- builder.Xor(a, b);
+ auto a = ConstantR1<bool>(&builder, {});
+ auto b = ConstantR1<bool>(&builder, {});
+ Xor(a, b);
ComputeAndCompareR1<bool>(&builder, {}, {});
}
XLA_TEST_F(ArrayElementwiseOpTest, XorS32R1) {
XlaBuilder builder(TestName());
- auto a = builder.ConstantR1<int32>({0, -1, 8});
- auto b = builder.ConstantR1<int32>({5, -7, 4});
- builder.Xor(a, b);
+ auto a = ConstantR1<int32>(&builder, {0, -1, 8});
+ auto b = ConstantR1<int32>(&builder, {5, -7, 4});
+ Xor(a, b);
ComputeAndCompareR1<int32>(&builder, {5, 6, 12}, {});
}
XLA_TEST_F(ArrayElementwiseOpTest, XorS32R2) {
XlaBuilder builder(TestName());
- auto a = builder.ConstantR2<int32>({{0, -1}, {8, 8}});
- auto b = builder.ConstantR2<int32>({{5, -7}, {4, 1}});
- builder.Xor(a, b);
+ auto a = ConstantR2<int32>(&builder, {{0, -1}, {8, 8}});
+ auto b = ConstantR2<int32>(&builder, {{5, -7}, {4, 1}});
+ Xor(a, b);
Array2D<int32> expected_array({{5, 6}, {12, 9}});
ComputeAndCompareR2<int32>(&builder, expected_array, {});
@@ -925,27 +926,27 @@ XLA_TEST_F(ArrayElementwiseOpTest, XorS32R2) {
XLA_TEST_F(ArrayElementwiseOpTest, XorZeroElementS32R1) {
XlaBuilder builder(TestName());
- auto a = builder.ConstantR1<int32>({});
- auto b = builder.ConstantR1<int32>({});
- builder.Xor(a, b);
+ auto a = ConstantR1<int32>(&builder, {});
+ auto b = ConstantR1<int32>(&builder, {});
+ Xor(a, b);
ComputeAndCompareR1<int32>(&builder, {}, {});
}
XLA_TEST_F(ArrayElementwiseOpTest, XorU32R1) {
XlaBuilder builder(TestName());
- auto a = builder.ConstantR1<uint32>({0, 1, 8});
- auto b = builder.ConstantR1<uint32>({5, 7, 4});
- builder.Xor(a, b);
+ auto a = ConstantR1<uint32>(&builder, {0, 1, 8});
+ auto b = ConstantR1<uint32>(&builder, {5, 7, 4});
+ Xor(a, b);
ComputeAndCompareR1<uint32>(&builder, {5, 6, 12}, {});
}
XLA_TEST_F(ArrayElementwiseOpTest, XorU32R2) {
XlaBuilder builder(TestName());
- auto a = builder.ConstantR2<uint32>({{0, 1}, {8, 8}});
- auto b = builder.ConstantR2<uint32>({{5, 7}, {4, 1}});
- builder.Xor(a, b);
+ auto a = ConstantR2<uint32>(&builder, {{0, 1}, {8, 8}});
+ auto b = ConstantR2<uint32>(&builder, {{5, 7}, {4, 1}});
+ Xor(a, b);
Array2D<uint32> expected_array({{5, 6}, {12, 9}});
ComputeAndCompareR2<uint32>(&builder, expected_array, {});
@@ -953,24 +954,24 @@ XLA_TEST_F(ArrayElementwiseOpTest, XorU32R2) {
XLA_TEST_F(ArrayElementwiseOpTest, XorZeroElementU32R1) {
XlaBuilder builder(TestName());
- auto a = builder.ConstantR1<uint32>({});
- auto b = builder.ConstantR1<uint32>({});
- builder.Xor(a, b);
+ auto a = ConstantR1<uint32>(&builder, {});
+ auto b = ConstantR1<uint32>(&builder, {});
+ Xor(a, b);
ComputeAndCompareR1<uint32>(&builder, {}, {});
}
XLA_TEST_F(ArrayElementwiseOpTest, NotPredR1) {
XlaBuilder builder(TestName());
- auto a = builder.ConstantR1<bool>({false, true, true, false});
- builder.Not(a);
+ auto a = ConstantR1<bool>(&builder, {false, true, true, false});
+ Not(a);
ComputeAndCompareR1<bool>(&builder, {true, false, false, true}, {});
}
XLA_TEST_F(ArrayElementwiseOpTest, NotPredR2) {
XlaBuilder builder(TestName());
- auto a = builder.ConstantR2<bool>({{false, true}, {true, false}});
- builder.Not(a);
+ auto a = ConstantR2<bool>(&builder, {{false, true}, {true, false}});
+ Not(a);
Array2D<bool> expected_array({{true, false}, {false, true}});
ComputeAndCompareR2<bool>(&builder, expected_array, {});
@@ -978,24 +979,24 @@ XLA_TEST_F(ArrayElementwiseOpTest, NotPredR2) {
XLA_TEST_F(ArrayElementwiseOpTest, NotZeroElementPredR1) {
XlaBuilder builder(TestName());
- auto a = builder.ConstantR1<bool>({});
- builder.Not(a);
+ auto a = ConstantR1<bool>(&builder, {});
+ Not(a);
ComputeAndCompareR1<bool>(&builder, {}, {});
}
XLA_TEST_F(ArrayElementwiseOpTest, NotS32R1) {
XlaBuilder builder(TestName());
- auto a = builder.ConstantR1<int32>({-1, 0, 1});
- builder.Not(a);
+ auto a = ConstantR1<int32>(&builder, {-1, 0, 1});
+ Not(a);
ComputeAndCompareR1<int32>(&builder, {0, -1, -2}, {});
}
XLA_TEST_F(ArrayElementwiseOpTest, NotS32R2) {
XlaBuilder builder(TestName());
- auto a = builder.ConstantR2<int32>({{-1, 0}, {1, 8}});
- builder.Not(a);
+ auto a = ConstantR2<int32>(&builder, {{-1, 0}, {1, 8}});
+ Not(a);
Array2D<int32> expected_array({{0, -1}, {-2, -9}});
ComputeAndCompareR2<int32>(&builder, expected_array, {});
@@ -1003,24 +1004,24 @@ XLA_TEST_F(ArrayElementwiseOpTest, NotS32R2) {
XLA_TEST_F(ArrayElementwiseOpTest, NotZeroElementS32R1) {
XlaBuilder builder(TestName());
- auto a = builder.ConstantR1<int32>({});
- builder.Not(a);
+ auto a = ConstantR1<int32>(&builder, {});
+ Not(a);
ComputeAndCompareR1<int32>(&builder, {}, {});
}
XLA_TEST_F(ArrayElementwiseOpTest, NotU32R1) {
XlaBuilder builder(TestName());
- auto a = builder.ConstantR1<uint32>({0, 4294967295});
- builder.Not(a);
+ auto a = ConstantR1<uint32>(&builder, {0, 4294967295});
+ Not(a);
ComputeAndCompareR1<uint32>(&builder, {4294967295, 0}, {});
}
XLA_TEST_F(ArrayElementwiseOpTest, NotU32R2) {
XlaBuilder builder(TestName());
- auto a = builder.ConstantR2<uint32>({{0, 4294967295}, {1, 4294967294}});
- builder.Not(a);
+ auto a = ConstantR2<uint32>(&builder, {{0, 4294967295}, {1, 4294967294}});
+ Not(a);
Array2D<uint32> expected_array({{4294967295, 0}, {4294967294, 1}});
ComputeAndCompareR2<uint32>(&builder, expected_array, {});
@@ -1028,19 +1029,19 @@ XLA_TEST_F(ArrayElementwiseOpTest, NotU32R2) {
XLA_TEST_F(ArrayElementwiseOpTest, NotZeroElementU32R1) {
XlaBuilder builder(TestName());
- auto a = builder.ConstantR1<uint32>({});
- builder.Not(a);
+ auto a = ConstantR1<uint32>(&builder, {});
+ Not(a);
ComputeAndCompareR1<uint32>(&builder, {}, {});
}
XLA_TEST_F(ArrayElementwiseOpTest, ShiftLeftS32) {
XlaBuilder builder(TestName());
- auto a = builder.ConstantR1<int32>({static_cast<int32>(0x12345678),
- static_cast<int32>(0xF0001000), 1, 3, 77,
- 1, -3, 77});
- auto b = builder.ConstantR1<int32>({4, 8, 2, 7, 15, 32, 100, -1});
- builder.ShiftLeft(a, b);
+ auto a = ConstantR1<int32>(
+ &builder, {static_cast<int32>(0x12345678), static_cast<int32>(0xF0001000),
+ 1, 3, 77, 1, -3, 77});
+ auto b = ConstantR1<int32>(&builder, {4, 8, 2, 7, 15, 32, 100, -1});
+ ShiftLeft(a, b);
ComputeAndCompareR1<int32>(&builder,
{static_cast<int32>(0x23456780), 0x00100000, 0x4,
@@ -1050,11 +1051,11 @@ XLA_TEST_F(ArrayElementwiseOpTest, ShiftLeftS32) {
XLA_TEST_F(ArrayElementwiseOpTest, ShiftRightArithmeticS32) {
XlaBuilder builder(TestName());
- auto a = builder.ConstantR1<int32>({static_cast<int32>(0x92345678),
- static_cast<int32>(0x10001000), 1, 3, 77,
- 1, -3, 77});
- auto b = builder.ConstantR1<int32>({4, 8, 2, 7, 2, 32, 100, -1});
- builder.ShiftRightArithmetic(a, b);
+ auto a = ConstantR1<int32>(
+ &builder, {static_cast<int32>(0x92345678), static_cast<int32>(0x10001000),
+ 1, 3, 77, 1, -3, 77});
+ auto b = ConstantR1<int32>(&builder, {4, 8, 2, 7, 2, 32, 100, -1});
+ ShiftRightArithmetic(a, b);
ComputeAndCompareR1<int32>(
&builder,
@@ -1065,11 +1066,11 @@ XLA_TEST_F(ArrayElementwiseOpTest, ShiftRightArithmeticS32) {
XLA_TEST_F(ArrayElementwiseOpTest, ShiftRightLogicalS32) {
XlaBuilder builder(TestName());
- auto a = builder.ConstantR1<int32>({static_cast<int32>(0x92345678),
- static_cast<int32>(0x10001000), 1, 3, 77,
- 1, -3, 77});
- auto b = builder.ConstantR1<int32>({4, 8, 2, 7, 5, 32, 100, -1});
- builder.ShiftRightLogical(a, b);
+ auto a = ConstantR1<int32>(
+ &builder, {static_cast<int32>(0x92345678), static_cast<int32>(0x10001000),
+ 1, 3, 77, 1, -3, 77});
+ auto b = ConstantR1<int32>(&builder, {4, 8, 2, 7, 5, 32, 100, -1});
+ ShiftRightLogical(a, b);
ComputeAndCompareR1<int32>(&builder,
{0x09234567, 0x00100010, 0, 0, 2, 0, 0, 0}, {});
@@ -1077,10 +1078,10 @@ XLA_TEST_F(ArrayElementwiseOpTest, ShiftRightLogicalS32) {
XLA_TEST_F(ArrayElementwiseOpTest, ShiftLeftU32) {
XlaBuilder builder(TestName());
- auto a = builder.ConstantR1<uint32>(
- {0x12345678, 0xF0001000, 1, 3, 77, 1, ~3u, 77});
- auto b = builder.ConstantR1<uint32>({4, 8, 2, 7, 15, 32, 100, ~0u});
- builder.ShiftLeft(a, b);
+ auto a = ConstantR1<uint32>(&builder,
+ {0x12345678, 0xF0001000, 1, 3, 77, 1, ~3u, 77});
+ auto b = ConstantR1<uint32>(&builder, {4, 8, 2, 7, 15, 32, 100, ~0u});
+ ShiftLeft(a, b);
ComputeAndCompareR1<uint32>(
&builder, {0x23456780, 0x00100000, 0x4, 0x180, 2523136, 0, 0, 0}, {});
@@ -1088,10 +1089,10 @@ XLA_TEST_F(ArrayElementwiseOpTest, ShiftLeftU32) {
XLA_TEST_F(ArrayElementwiseOpTest, ShiftRightArithmeticU32) {
XlaBuilder builder(TestName());
- auto a = builder.ConstantR1<uint32>(
- {0x92345678, 0x10001000, 1, 3, 77, 1, ~3u, 77});
- auto b = builder.ConstantR1<uint32>({4, 8, 2, 7, 2, 32, 100, ~0u});
- builder.ShiftRightArithmetic(a, b);
+ auto a = ConstantR1<uint32>(&builder,
+ {0x92345678, 0x10001000, 1, 3, 77, 1, ~3u, 77});
+ auto b = ConstantR1<uint32>(&builder, {4, 8, 2, 7, 2, 32, 100, ~0u});
+ ShiftRightArithmetic(a, b);
ComputeAndCompareR1<uint32>(
&builder, {0xF9234567, 0x00100010, 0, 0, 19, 0, ~0u, 0}, {});
@@ -1099,10 +1100,10 @@ XLA_TEST_F(ArrayElementwiseOpTest, ShiftRightArithmeticU32) {
XLA_TEST_F(ArrayElementwiseOpTest, ShiftRightLogicalU32) {
XlaBuilder builder(TestName());
- auto a = builder.ConstantR1<uint32>(
- {0x92345678, 0x10001000, 1, 3, 77, 1, ~3u, 77});
- auto b = builder.ConstantR1<uint32>({4, 8, 2, 7, 5, 32, 100, ~0u});
- builder.ShiftRightLogical(a, b);
+ auto a = ConstantR1<uint32>(&builder,
+ {0x92345678, 0x10001000, 1, 3, 77, 1, ~3u, 77});
+ auto b = ConstantR1<uint32>(&builder, {4, 8, 2, 7, 5, 32, 100, ~0u});
+ ShiftRightLogical(a, b);
ComputeAndCompareR1<uint32>(&builder,
{0x09234567, 0x00100010, 0, 0, 2, 0, 0, 0}, {});
@@ -1111,18 +1112,18 @@ XLA_TEST_F(ArrayElementwiseOpTest, ShiftRightLogicalU32) {
XLA_TEST_F(ArrayElementwiseOpTest, CompareEqF32s) {
SetFastMathDisabled(true);
XlaBuilder builder(TestName());
- auto lhs = builder.ConstantR1<float>({-2.5f, 25.5f, 2.25f, NAN, 6.0f});
- auto rhs = builder.ConstantR1<float>({10.0f, 5.0f, 2.25f, 10.0f, NAN});
- builder.Eq(lhs, rhs);
+ auto lhs = ConstantR1<float>(&builder, {-2.5f, 25.5f, 2.25f, NAN, 6.0f});
+ auto rhs = ConstantR1<float>(&builder, {10.0f, 5.0f, 2.25f, 10.0f, NAN});
+ Eq(lhs, rhs);
ComputeAndCompareR1<bool>(&builder, {false, false, true, false, false}, {});
}
XLA_TEST_F(ArrayElementwiseOpTest, CompareEqZeroElementF32s) {
XlaBuilder builder(TestName());
- auto lhs = builder.ConstantR1<float>({});
- auto rhs = builder.ConstantR1<float>({});
- builder.Eq(lhs, rhs);
+ auto lhs = ConstantR1<float>(&builder, {});
+ auto rhs = ConstantR1<float>(&builder, {});
+ Eq(lhs, rhs);
ComputeAndCompareR1<bool>(&builder, {}, {});
}
@@ -1130,9 +1131,9 @@ XLA_TEST_F(ArrayElementwiseOpTest, CompareEqZeroElementF32s) {
XLA_TEST_F(ArrayElementwiseOpTest, CompareGeF32s) {
SetFastMathDisabled(true);
XlaBuilder builder(TestName());
- auto lhs = builder.ConstantR1<float>({-2.5f, 25.5f, 2.25f, NAN, 6.0f});
- auto rhs = builder.ConstantR1<float>({10.0f, 5.0f, 1.0f, 10.0f, NAN});
- builder.Ge(lhs, rhs);
+ auto lhs = ConstantR1<float>(&builder, {-2.5f, 25.5f, 2.25f, NAN, 6.0f});
+ auto rhs = ConstantR1<float>(&builder, {10.0f, 5.0f, 1.0f, 10.0f, NAN});
+ Ge(lhs, rhs);
ComputeAndCompareR1<bool>(&builder, {false, true, true, false, false}, {});
}
@@ -1140,9 +1141,9 @@ XLA_TEST_F(ArrayElementwiseOpTest, CompareGeF32s) {
XLA_TEST_F(ArrayElementwiseOpTest, CompareGtF32s) {
SetFastMathDisabled(true);
XlaBuilder builder(TestName());
- auto lhs = builder.ConstantR1<float>({-2.5f, 25.5f, 2.25f, NAN, 6.0f});
- auto rhs = builder.ConstantR1<float>({10.0f, 5.0f, 1.0f, 10.0f, NAN});
- builder.Gt(lhs, rhs);
+ auto lhs = ConstantR1<float>(&builder, {-2.5f, 25.5f, 2.25f, NAN, 6.0f});
+ auto rhs = ConstantR1<float>(&builder, {10.0f, 5.0f, 1.0f, 10.0f, NAN});
+ Gt(lhs, rhs);
ComputeAndCompareR1<bool>(&builder, {false, true, true, false, false}, {});
}
@@ -1150,9 +1151,9 @@ XLA_TEST_F(ArrayElementwiseOpTest, CompareGtF32s) {
XLA_TEST_F(ArrayElementwiseOpTest, CompareLeF32s) {
SetFastMathDisabled(true);
XlaBuilder builder(TestName());
- auto lhs = builder.ConstantR1<float>({-2.5f, 5.0f, 2.25f, NAN, 6.0f});
- auto rhs = builder.ConstantR1<float>({10.0f, 5.0f, 1.0f, 10.0f, NAN});
- builder.Le(lhs, rhs);
+ auto lhs = ConstantR1<float>(&builder, {-2.5f, 5.0f, 2.25f, NAN, 6.0f});
+ auto rhs = ConstantR1<float>(&builder, {10.0f, 5.0f, 1.0f, 10.0f, NAN});
+ Le(lhs, rhs);
ComputeAndCompareR1<bool>(&builder, {true, true, false, false, false}, {});
}
@@ -1160,9 +1161,9 @@ XLA_TEST_F(ArrayElementwiseOpTest, CompareLeF32s) {
XLA_TEST_F(ArrayElementwiseOpTest, CompareLtF32s) {
SetFastMathDisabled(true);
XlaBuilder builder(TestName());
- auto lhs = builder.ConstantR1<float>({-2.5f, 25.5f, 2.25f, NAN, 6.0f});
- auto rhs = builder.ConstantR1<float>({10.0f, 5.0f, 1.0f, 10.0f, NAN});
- builder.Lt(lhs, rhs);
+ auto lhs = ConstantR1<float>(&builder, {-2.5f, 25.5f, 2.25f, NAN, 6.0f});
+ auto rhs = ConstantR1<float>(&builder, {10.0f, 5.0f, 1.0f, 10.0f, NAN});
+ Lt(lhs, rhs);
ComputeAndCompareR1<bool>(&builder, {true, false, false, false, false}, {});
}
@@ -1171,9 +1172,10 @@ XLA_TEST_F(ArrayElementwiseOpTest, CompareEqS32s) {
const int32 min = std::numeric_limits<int32>::min();
const int32 max = std::numeric_limits<int32>::max();
XlaBuilder builder(TestName());
- auto lhs = builder.ConstantR1<int32>({min, min, min, 0, 0, 0, max, max, max});
- auto rhs = builder.ConstantR1<int32>({min, 0, max, -1, 0, 1, min, 0, max});
- builder.Eq(lhs, rhs);
+ auto lhs =
+ ConstantR1<int32>(&builder, {min, min, min, 0, 0, 0, max, max, max});
+ auto rhs = ConstantR1<int32>(&builder, {min, 0, max, -1, 0, 1, min, 0, max});
+ Eq(lhs, rhs);
ComputeAndCompareR1<bool>(
&builder, {true, false, false, false, true, false, false, false, true},
@@ -1182,9 +1184,9 @@ XLA_TEST_F(ArrayElementwiseOpTest, CompareEqS32s) {
XLA_TEST_F(ArrayElementwiseOpTest, CompareEqZeroElementS32s) {
XlaBuilder builder(TestName());
- auto lhs = builder.ConstantR1<int32>({});
- auto rhs = builder.ConstantR1<int32>({});
- builder.Eq(lhs, rhs);
+ auto lhs = ConstantR1<int32>(&builder, {});
+ auto rhs = ConstantR1<int32>(&builder, {});
+ Eq(lhs, rhs);
ComputeAndCompareR1<bool>(&builder, {}, {});
}
@@ -1192,26 +1194,26 @@ XLA_TEST_F(ArrayElementwiseOpTest, CompareEqZeroElementS32s) {
XLA_TEST_F(ArrayElementwiseOpTest, CompareEqC64s) {
SetFastMathDisabled(true);
XlaBuilder builder(TestName());
- auto lhs = builder.ConstantR1<complex64>({{-2.5f, 10.0f},
- {1.0f, 25.5f},
- {2.25f, -3.0f},
- {NAN, 0.0f},
- {1.0f, 6.0f}});
- auto rhs = builder.ConstantR1<complex64>({{0.0f, 10.0f},
- {1.0f, 5.0f},
- {2.25f, -3.0f},
- {10.0f, 0.0f},
- {1.0f, NAN}});
- builder.Eq(lhs, rhs);
+ auto lhs = ConstantR1<complex64>(&builder, {{-2.5f, 10.0f},
+ {1.0f, 25.5f},
+ {2.25f, -3.0f},
+ {NAN, 0.0f},
+ {1.0f, 6.0f}});
+ auto rhs = ConstantR1<complex64>(&builder, {{0.0f, 10.0f},
+ {1.0f, 5.0f},
+ {2.25f, -3.0f},
+ {10.0f, 0.0f},
+ {1.0f, NAN}});
+ Eq(lhs, rhs);
ComputeAndCompareR1<bool>(&builder, {false, false, true, false, false}, {});
}
XLA_TEST_F(ArrayElementwiseOpTest, CompareEqZeroElementC64s) {
XlaBuilder builder(TestName());
- auto lhs = builder.ConstantR1<complex64>({});
- auto rhs = builder.ConstantR1<complex64>({});
- builder.Eq(lhs, rhs);
+ auto lhs = ConstantR1<complex64>(&builder, {});
+ auto rhs = ConstantR1<complex64>(&builder, {});
+ Eq(lhs, rhs);
ComputeAndCompareR1<bool>(&builder, {}, {});
}
@@ -1221,17 +1223,17 @@ XLA_TEST_F(ArrayElementwiseOpTest, CompareNeC64s) {
SetFastMathDisabled(true);
XlaBuilder builder(TestName());
- auto lhs = builder.ConstantR1<complex64>({{-2.5f, 10.0f},
- {1.0f, 25.5f},
- {2.25f, -3.0f},
- {NAN, 0.0f},
- {1.0f, 6.0f}});
- auto rhs = builder.ConstantR1<complex64>({{0.0f, 10.0f},
- {1.0f, 5.0f},
- {2.25f, -3.0f},
- {10.0f, 0.0f},
- {1.0f, NAN}});
- builder.Ne(lhs, rhs);
+ auto lhs = ConstantR1<complex64>(&builder, {{-2.5f, 10.0f},
+ {1.0f, 25.5f},
+ {2.25f, -3.0f},
+ {NAN, 0.0f},
+ {1.0f, 6.0f}});
+ auto rhs = ConstantR1<complex64>(&builder, {{0.0f, 10.0f},
+ {1.0f, 5.0f},
+ {2.25f, -3.0f},
+ {10.0f, 0.0f},
+ {1.0f, NAN}});
+ Ne(lhs, rhs);
ComputeAndCompareR1<bool>(&builder, {true, true, false, true, true}, {});
}
@@ -1241,9 +1243,9 @@ XLA_TEST_F(ArrayElementwiseOpTest, CompareNeF32s) {
SetFastMathDisabled(true);
XlaBuilder builder(TestName());
- auto lhs = builder.ConstantR1<float>({-2.5f, 25.5f, 2.25f, NAN, 6.0f});
- auto rhs = builder.ConstantR1<float>({10.0f, 25.5f, 1.0f, 10.0f, NAN});
- builder.Ne(lhs, rhs);
+ auto lhs = ConstantR1<float>(&builder, {-2.5f, 25.5f, 2.25f, NAN, 6.0f});
+ auto rhs = ConstantR1<float>(&builder, {10.0f, 25.5f, 1.0f, 10.0f, NAN});
+ Ne(lhs, rhs);
ComputeAndCompareR1<bool>(&builder, {true, false, true, true, true}, {});
}
@@ -1252,9 +1254,10 @@ XLA_TEST_F(ArrayElementwiseOpTest, CompareNeS32s) {
const int32 min = std::numeric_limits<int32>::min();
const int32 max = std::numeric_limits<int32>::max();
XlaBuilder builder(TestName());
- auto lhs = builder.ConstantR1<int32>({min, min, min, 0, 0, 0, max, max, max});
- auto rhs = builder.ConstantR1<int32>({min, 0, max, -1, 0, 1, min, 0, max});
- builder.Ne(lhs, rhs);
+ auto lhs =
+ ConstantR1<int32>(&builder, {min, min, min, 0, 0, 0, max, max, max});
+ auto rhs = ConstantR1<int32>(&builder, {min, 0, max, -1, 0, 1, min, 0, max});
+ Ne(lhs, rhs);
ComputeAndCompareR1<bool>(
&builder, {false, true, true, true, false, true, true, true, false}, {});
@@ -1264,9 +1267,10 @@ XLA_TEST_F(ArrayElementwiseOpTest, CompareGeS32s) {
const int32 min = std::numeric_limits<int32>::min();
const int32 max = std::numeric_limits<int32>::max();
XlaBuilder builder(TestName());
- auto lhs = builder.ConstantR1<int32>({min, min, min, 0, 0, 0, max, max, max});
- auto rhs = builder.ConstantR1<int32>({min, 0, max, -1, 0, 1, min, 0, max});
- builder.Ge(lhs, rhs);
+ auto lhs =
+ ConstantR1<int32>(&builder, {min, min, min, 0, 0, 0, max, max, max});
+ auto rhs = ConstantR1<int32>(&builder, {min, 0, max, -1, 0, 1, min, 0, max});
+ Ge(lhs, rhs);
ComputeAndCompareR1<bool>(
&builder, {true, false, false, true, true, false, true, true, true}, {});
@@ -1276,9 +1280,10 @@ XLA_TEST_F(ArrayElementwiseOpTest, CompareGtS32s) {
const int32 min = std::numeric_limits<int32>::min();
const int32 max = std::numeric_limits<int32>::max();
XlaBuilder builder(TestName());
- auto lhs = builder.ConstantR1<int32>({min, min, min, 0, 0, 0, max, max, max});
- auto rhs = builder.ConstantR1<int32>({min, 0, max, -1, 0, 1, min, 0, max});
- builder.Gt(lhs, rhs);
+ auto lhs =
+ ConstantR1<int32>(&builder, {min, min, min, 0, 0, 0, max, max, max});
+ auto rhs = ConstantR1<int32>(&builder, {min, 0, max, -1, 0, 1, min, 0, max});
+ Gt(lhs, rhs);
ComputeAndCompareR1<bool>(
&builder, {false, false, false, true, false, false, true, true, false},
@@ -1289,9 +1294,10 @@ XLA_TEST_F(ArrayElementwiseOpTest, CompareLeS32s) {
const int32 min = std::numeric_limits<int32>::min();
const int32 max = std::numeric_limits<int32>::max();
XlaBuilder builder(TestName());
- auto lhs = builder.ConstantR1<int32>({min, min, min, 0, 0, 0, max, max, max});
- auto rhs = builder.ConstantR1<int32>({min, 0, max, -1, 0, 1, min, 0, max});
- builder.Le(lhs, rhs);
+ auto lhs =
+ ConstantR1<int32>(&builder, {min, min, min, 0, 0, 0, max, max, max});
+ auto rhs = ConstantR1<int32>(&builder, {min, 0, max, -1, 0, 1, min, 0, max});
+ Le(lhs, rhs);
ComputeAndCompareR1<bool>(
&builder, {true, true, true, false, true, true, false, false, true}, {});
@@ -1301,9 +1307,10 @@ XLA_TEST_F(ArrayElementwiseOpTest, CompareLtS32s) {
const int32 min = std::numeric_limits<int32>::min();
const int32 max = std::numeric_limits<int32>::max();
XlaBuilder builder(TestName());
- auto lhs = builder.ConstantR1<int32>({min, min, min, 0, 0, 0, max, max, max});
- auto rhs = builder.ConstantR1<int32>({min, 0, max, -1, 0, 1, min, 0, max});
- builder.Lt(lhs, rhs);
+ auto lhs =
+ ConstantR1<int32>(&builder, {min, min, min, 0, 0, 0, max, max, max});
+ auto rhs = ConstantR1<int32>(&builder, {min, 0, max, -1, 0, 1, min, 0, max});
+ Lt(lhs, rhs);
ComputeAndCompareR1<bool>(
&builder, {false, true, true, false, false, true, false, false, false},
@@ -1313,9 +1320,9 @@ XLA_TEST_F(ArrayElementwiseOpTest, CompareLtS32s) {
XLA_TEST_F(ArrayElementwiseOpTest, CompareEqU32s) {
const uint32 max = std::numeric_limits<uint32>::max();
XlaBuilder builder(TestName());
- auto lhs = builder.ConstantR1<uint32>({0, 0, 0, 5, 5, 5, max, max, max});
- auto rhs = builder.ConstantR1<uint32>({0, 1, max, 4, 5, 6, 0, 1, max});
- builder.Eq(lhs, rhs);
+ auto lhs = ConstantR1<uint32>(&builder, {0, 0, 0, 5, 5, 5, max, max, max});
+ auto rhs = ConstantR1<uint32>(&builder, {0, 1, max, 4, 5, 6, 0, 1, max});
+ Eq(lhs, rhs);
ComputeAndCompareR1<bool>(
&builder, {true, false, false, false, true, false, false, false, true},
@@ -1325,9 +1332,9 @@ XLA_TEST_F(ArrayElementwiseOpTest, CompareEqU32s) {
XLA_TEST_F(ArrayElementwiseOpTest, CompareNeU32s) {
const uint32 max = std::numeric_limits<uint32>::max();
XlaBuilder builder(TestName());
- auto lhs = builder.ConstantR1<uint32>({0, 0, 0, 5, 5, 5, max, max, max});
- auto rhs = builder.ConstantR1<uint32>({0, 1, max, 4, 5, 6, 0, 1, max});
- builder.Ne(lhs, rhs);
+ auto lhs = ConstantR1<uint32>(&builder, {0, 0, 0, 5, 5, 5, max, max, max});
+ auto rhs = ConstantR1<uint32>(&builder, {0, 1, max, 4, 5, 6, 0, 1, max});
+ Ne(lhs, rhs);
ComputeAndCompareR1<bool>(
&builder, {false, true, true, true, false, true, true, true, false}, {});
@@ -1336,9 +1343,9 @@ XLA_TEST_F(ArrayElementwiseOpTest, CompareNeU32s) {
XLA_TEST_F(ArrayElementwiseOpTest, CompareGeU32s) {
const uint32 max = std::numeric_limits<uint32>::max();
XlaBuilder builder(TestName());
- auto lhs = builder.ConstantR1<uint32>({0, 0, 0, 5, 5, 5, max, max, max});
- auto rhs = builder.ConstantR1<uint32>({0, 1, max, 4, 5, 6, 0, 1, max});
- builder.Ge(lhs, rhs);
+ auto lhs = ConstantR1<uint32>(&builder, {0, 0, 0, 5, 5, 5, max, max, max});
+ auto rhs = ConstantR1<uint32>(&builder, {0, 1, max, 4, 5, 6, 0, 1, max});
+ Ge(lhs, rhs);
ComputeAndCompareR1<bool>(
&builder, {true, false, false, true, true, false, true, true, true}, {});
@@ -1347,9 +1354,9 @@ XLA_TEST_F(ArrayElementwiseOpTest, CompareGeU32s) {
XLA_TEST_F(ArrayElementwiseOpTest, CompareGtU32s) {
const uint32 max = std::numeric_limits<uint32>::max();
XlaBuilder builder(TestName());
- auto lhs = builder.ConstantR1<uint32>({0, 0, 0, 5, 5, 5, max, max, max});
- auto rhs = builder.ConstantR1<uint32>({0, 1, max, 4, 5, 6, 0, 1, max});
- builder.Gt(lhs, rhs);
+ auto lhs = ConstantR1<uint32>(&builder, {0, 0, 0, 5, 5, 5, max, max, max});
+ auto rhs = ConstantR1<uint32>(&builder, {0, 1, max, 4, 5, 6, 0, 1, max});
+ Gt(lhs, rhs);
ComputeAndCompareR1<bool>(
&builder, {false, false, false, true, false, false, true, true, false},
@@ -1359,9 +1366,9 @@ XLA_TEST_F(ArrayElementwiseOpTest, CompareGtU32s) {
XLA_TEST_F(ArrayElementwiseOpTest, CompareLeU32s) {
const uint32 max = std::numeric_limits<uint32>::max();
XlaBuilder builder(TestName());
- auto lhs = builder.ConstantR1<uint32>({0, 0, 0, 5, 5, 5, max, max, max});
- auto rhs = builder.ConstantR1<uint32>({0, 1, max, 4, 5, 6, 0, 1, max});
- builder.Le(lhs, rhs);
+ auto lhs = ConstantR1<uint32>(&builder, {0, 0, 0, 5, 5, 5, max, max, max});
+ auto rhs = ConstantR1<uint32>(&builder, {0, 1, max, 4, 5, 6, 0, 1, max});
+ Le(lhs, rhs);
ComputeAndCompareR1<bool>(
&builder, {true, true, true, false, true, true, false, false, true}, {});
@@ -1370,9 +1377,9 @@ XLA_TEST_F(ArrayElementwiseOpTest, CompareLeU32s) {
XLA_TEST_F(ArrayElementwiseOpTest, CompareLtU32s) {
const uint32 max = std::numeric_limits<uint32>::max();
XlaBuilder builder(TestName());
- auto lhs = builder.ConstantR1<uint32>({0, 0, 0, 5, 5, 5, max, max, max});
- auto rhs = builder.ConstantR1<uint32>({0, 1, max, 4, 5, 6, 0, 1, max});
- builder.Lt(lhs, rhs);
+ auto lhs = ConstantR1<uint32>(&builder, {0, 0, 0, 5, 5, 5, max, max, max});
+ auto rhs = ConstantR1<uint32>(&builder, {0, 1, max, 4, 5, 6, 0, 1, max});
+ Lt(lhs, rhs);
ComputeAndCompareR1<bool>(
&builder, {false, true, true, false, false, true, false, false, false},
@@ -1383,10 +1390,10 @@ XLA_TEST_F(ArrayElementwiseOpTest, PowF32s) {
SetFastMathDisabled(true);
XlaBuilder builder(TestName());
auto lhs =
- builder.ConstantR1<float>({4.0f, 2.0f, 2.0f, NAN, 6.0f, -2.0f, -2.0f});
+ ConstantR1<float>(&builder, {4.0f, 2.0f, 2.0f, NAN, 6.0f, -2.0f, -2.0f});
auto rhs =
- builder.ConstantR1<float>({2.0f, -2.0f, 3.0f, 10.0f, NAN, 3.0f, 4.0f});
- builder.Pow(lhs, rhs);
+ ConstantR1<float>(&builder, {2.0f, -2.0f, 3.0f, 10.0f, NAN, 3.0f, 4.0f});
+ Pow(lhs, rhs);
ComputeAndCompareR1<float>(
&builder, {16.0f, 0.25f, 8.0f, NAN, NAN, -8.0f, 16.0f}, {}, error_spec_);
@@ -1395,9 +1402,9 @@ XLA_TEST_F(ArrayElementwiseOpTest, PowF32s) {
XLA_TEST_F(ArrayElementwiseOpTest, PowNonIntegerF32s) {
SetFastMathDisabled(true);
XlaBuilder builder(TestName());
- auto lhs = builder.ConstantR1<float>({-2.0f, -0.6f, -0.6f, 0.0f});
- auto rhs = builder.ConstantR1<float>({0.5f, 0.6f, -0.6f, -0.6f});
- builder.Pow(lhs, rhs);
+ auto lhs = ConstantR1<float>(&builder, {-2.0f, -0.6f, -0.6f, 0.0f});
+ auto rhs = ConstantR1<float>(&builder, {0.5f, 0.6f, -0.6f, -0.6f});
+ Pow(lhs, rhs);
ComputeAndCompareR1<float>(&builder, {NAN, NAN, NAN, INFINITY}, {},
error_spec_);
@@ -1405,9 +1412,9 @@ XLA_TEST_F(ArrayElementwiseOpTest, PowNonIntegerF32s) {
XLA_TEST_F(ArrayElementwiseOpTest, PowZeroElementF32s) {
XlaBuilder builder(TestName());
- auto lhs = builder.ConstantR1<float>({});
- auto rhs = builder.ConstantR1<float>({});
- builder.Pow(lhs, rhs);
+ auto lhs = ConstantR1<float>(&builder, {});
+ auto rhs = ConstantR1<float>(&builder, {});
+ Pow(lhs, rhs);
ComputeAndCompareR1<float>(&builder, {}, {}, error_spec_);
}
@@ -1419,14 +1426,14 @@ XLA_TEST_F(ArrayElementwiseOpTest, PowSpecialF32) {
std::vector<float> values = {1.0f, 2.0f, 3.2f, -4.0f};
std::vector<float> exponents = {0.0f, 1.0f, 2.0f, 0.5f, -1.0f, -0.5f};
- std::unique_ptr<Literal> param_literal = Literal::CreateR1<float>(values);
+ std::unique_ptr<Literal> param_literal = LiteralUtil::CreateR1<float>(values);
std::unique_ptr<GlobalData> param_data =
client_->TransferToServer(*param_literal).ConsumeValueOrDie();
- auto sum = b.ConstantR0<float>(0.0f);
- auto param = b.Parameter(0, param_literal->shape(), "param");
+ auto sum = ConstantR0<float>(&b, 0.0f);
+ auto param = Parameter(&b, 0, param_literal->shape(), "param");
for (float exponent : exponents) {
- sum = b.Add(sum, b.Pow(param, b.ConstantR0<float>(exponent)));
+ sum = Add(sum, Pow(param, ConstantR0<float>(&b, exponent)));
}
std::vector<float> expected;
@@ -1447,15 +1454,15 @@ XLA_TEST_F(ArrayElementwiseOpTest, PowOfExpF32) {
std::vector<float> values0 = {1.0f, 2.0f, 3.2f, -4.0f, 0.0f, 5.7f};
std::vector<float> values1 = {0.0f, 1.0f, 2.0f, 0.5f, -1.0f, -0.5f};
- std::unique_ptr<Literal> literal0 = Literal::CreateR1<float>(values0);
+ std::unique_ptr<Literal> literal0 = LiteralUtil::CreateR1<float>(values0);
std::unique_ptr<GlobalData> data0 =
client_->TransferToServer(*literal0).ConsumeValueOrDie();
- std::unique_ptr<Literal> literal1 = Literal::CreateR1<float>(values1);
+ std::unique_ptr<Literal> literal1 = LiteralUtil::CreateR1<float>(values1);
std::unique_ptr<GlobalData> data1 =
client_->TransferToServer(*literal1).ConsumeValueOrDie();
- auto param0 = b.Parameter(0, literal0->shape(), "param0");
- auto param1 = b.Parameter(1, literal1->shape(), "param1");
- b.Pow(b.Exp(param0), param1);
+ auto param0 = Parameter(&b, 0, literal0->shape(), "param0");
+ auto param1 = Parameter(&b, 1, literal1->shape(), "param1");
+ Pow(Exp(param0), param1);
std::vector<float> expected(values0.size());
for (int64 i = 0; i < values0.size(); ++i) {
@@ -1472,15 +1479,15 @@ XLA_TEST_F(ArrayElementwiseOpTest, LogOfPowerF32) {
std::vector<float> values0 = {1.0f, 2.0f, 3.2f, 4.0f, 0.5f, 5.7f};
std::vector<float> values1 = {0.0f, 1.0f, 2.0f, 0.5f, -1.0f, -0.5f};
- std::unique_ptr<Literal> literal0 = Literal::CreateR1<float>(values0);
+ std::unique_ptr<Literal> literal0 = LiteralUtil::CreateR1<float>(values0);
std::unique_ptr<GlobalData> data0 =
client_->TransferToServer(*literal0).ConsumeValueOrDie();
- std::unique_ptr<Literal> literal1 = Literal::CreateR1<float>(values1);
+ std::unique_ptr<Literal> literal1 = LiteralUtil::CreateR1<float>(values1);
std::unique_ptr<GlobalData> data1 =
client_->TransferToServer(*literal1).ConsumeValueOrDie();
- auto param0 = b.Parameter(0, literal0->shape(), "param0");
- auto param1 = b.Parameter(1, literal1->shape(), "param1");
- b.Log(b.Pow(param0, param1));
+ auto param0 = Parameter(&b, 0, literal0->shape(), "param0");
+ auto param1 = Parameter(&b, 1, literal1->shape(), "param1");
+ Log(Pow(param0, param1));
std::vector<float> expected(values0.size());
for (int64 i = 0; i < values0.size(); ++i) {
@@ -1497,15 +1504,15 @@ XLA_TEST_F(ArrayElementwiseOpTest, MulOfExpF32) {
std::vector<float> values0 = {1.0f, 2.0f, 3.2f, -4.0f, 0.0f, 5.7f};
std::vector<float> values1 = {0.0f, 1.0f, 2.0f, 0.5f, -1.0f, -0.5f};
- std::unique_ptr<Literal> literal0 = Literal::CreateR1<float>(values0);
+ std::unique_ptr<Literal> literal0 = LiteralUtil::CreateR1<float>(values0);
std::unique_ptr<GlobalData> data0 =
client_->TransferToServer(*literal0).ConsumeValueOrDie();
- std::unique_ptr<Literal> literal1 = Literal::CreateR1<float>(values1);
+ std::unique_ptr<Literal> literal1 = LiteralUtil::CreateR1<float>(values1);
std::unique_ptr<GlobalData> data1 =
client_->TransferToServer(*literal1).ConsumeValueOrDie();
- auto param0 = b.Parameter(0, literal0->shape(), "param0");
- auto param1 = b.Parameter(1, literal1->shape(), "param1");
- b.Mul(b.Exp(param0), b.Exp(param1));
+ auto param0 = Parameter(&b, 0, literal0->shape(), "param0");
+ auto param1 = Parameter(&b, 1, literal1->shape(), "param1");
+ Mul(Exp(param0), Exp(param1));
std::vector<float> expected(values0.size());
for (int64 i = 0; i < values0.size(); ++i) {
@@ -1522,15 +1529,15 @@ XLA_TEST_F(ArrayElementwiseOpTest, DivOfExpF32) {
std::vector<float> values0 = {1.0f, 2.0f, 3.2f, -4.0f, 0.0f, 5.7f};
std::vector<float> values1 = {0.0f, 1.0f, 2.0f, 0.5f, -1.0f, -0.5f};
- std::unique_ptr<Literal> literal0 = Literal::CreateR1<float>(values0);
+ std::unique_ptr<Literal> literal0 = LiteralUtil::CreateR1<float>(values0);
std::unique_ptr<GlobalData> data0 =
client_->TransferToServer(*literal0).ConsumeValueOrDie();
- std::unique_ptr<Literal> literal1 = Literal::CreateR1<float>(values1);
+ std::unique_ptr<Literal> literal1 = LiteralUtil::CreateR1<float>(values1);
std::unique_ptr<GlobalData> data1 =
client_->TransferToServer(*literal1).ConsumeValueOrDie();
- auto param0 = b.Parameter(0, literal0->shape(), "param0");
- auto param1 = b.Parameter(1, literal1->shape(), "param1");
- b.Div(param0, b.Exp(param1));
+ auto param0 = Parameter(&b, 0, literal0->shape(), "param0");
+ auto param1 = Parameter(&b, 1, literal1->shape(), "param1");
+ Div(param0, Exp(param1));
std::vector<float> expected(values0.size());
for (int64 i = 0; i < values0.size(); ++i) {
@@ -1548,21 +1555,21 @@ XLA_TEST_F(ArrayElementwiseOpTest, Div3_lhs_F32) {
std::vector<float> values1 = {0.1f, 1.0f, 2.0f, 0.5f, -1.0f, -0.5f};
std::vector<float> values2 = {0.1f, 1.1f, 6.9f, 12.5f, -15.0f, -0.5f};
- std::unique_ptr<Literal> literal0 = Literal::CreateR1<float>(values0);
+ std::unique_ptr<Literal> literal0 = LiteralUtil::CreateR1<float>(values0);
std::unique_ptr<GlobalData> data0 =
client_->TransferToServer(*literal0).ConsumeValueOrDie();
- std::unique_ptr<Literal> literal1 = Literal::CreateR1<float>(values1);
+ std::unique_ptr<Literal> literal1 = LiteralUtil::CreateR1<float>(values1);
std::unique_ptr<GlobalData> data1 =
client_->TransferToServer(*literal1).ConsumeValueOrDie();
- std::unique_ptr<Literal> literal2 = Literal::CreateR1<float>(values2);
+ std::unique_ptr<Literal> literal2 = LiteralUtil::CreateR1<float>(values2);
std::unique_ptr<GlobalData> data2 =
client_->TransferToServer(*literal2).ConsumeValueOrDie();
- auto param0 = b.Parameter(0, literal0->shape(), "param0");
- auto param1 = b.Parameter(1, literal1->shape(), "param1");
- auto param2 = b.Parameter(2, literal2->shape(), "param2");
- b.Div(b.Div(param0, param1), param2);
+ auto param0 = Parameter(&b, 0, literal0->shape(), "param0");
+ auto param1 = Parameter(&b, 1, literal1->shape(), "param1");
+ auto param2 = Parameter(&b, 2, literal2->shape(), "param2");
+ Div(Div(param0, param1), param2);
std::vector<float> expected(values0.size());
for (int64 i = 0; i < values0.size(); ++i) {
@@ -1580,22 +1587,22 @@ XLA_TEST_F(ArrayElementwiseOpTest, Div3_rhs_F32) {
std::vector<float> values1 = {0.1f, 1.0f, 2.0f, 0.5f, -1.0f, -0.5f};
std::vector<float> values2 = {0.1f, 1.1f, 6.9f, 12.5f, -15.0f, -0.5f};
- std::unique_ptr<Literal> literal0 = Literal::CreateR1<float>(values0);
+ std::unique_ptr<Literal> literal0 = LiteralUtil::CreateR1<float>(values0);
std::unique_ptr<GlobalData> data0 =
client_->TransferToServer(*literal0).ConsumeValueOrDie();
- std::unique_ptr<Literal> literal1 = Literal::CreateR1<float>(values1);
+ std::unique_ptr<Literal> literal1 = LiteralUtil::CreateR1<float>(values1);
std::unique_ptr<GlobalData> data1 =
client_->TransferToServer(*literal1).ConsumeValueOrDie();
- std::unique_ptr<Literal> literal2 = Literal::CreateR1<float>(values2);
+ std::unique_ptr<Literal> literal2 = LiteralUtil::CreateR1<float>(values2);
std::unique_ptr<GlobalData> data2 =
client_->TransferToServer(*literal2).ConsumeValueOrDie();
- auto param0 = b.Parameter(0, literal0->shape(), "param0");
- auto param1 = b.Parameter(1, literal1->shape(), "param1");
- auto param2 = b.Parameter(2, literal2->shape(), "param2");
- b.Div(param0, b.Div(param1, param2));
+ auto param0 = Parameter(&b, 0, literal0->shape(), "param0");
+ auto param1 = Parameter(&b, 1, literal1->shape(), "param1");
+ auto param2 = Parameter(&b, 2, literal2->shape(), "param2");
+ Div(param0, Div(param1, param2));
std::vector<float> expected(values0.size());
for (int64 i = 0; i < values0.size(); ++i) {
@@ -1613,22 +1620,22 @@ XLA_TEST_F(ArrayElementwiseOpTest, DivOfPowerF32) {
std::vector<float> values1 = {0.1f, 1.0f, 2.0f, 0.5f, 1.0f, 0.5f};
std::vector<float> values2 = {0.1f, 1.1f, 6.9f, 9.5f, -11.0f, -0.5f};
- std::unique_ptr<Literal> literal0 = Literal::CreateR1<float>(values0);
+ std::unique_ptr<Literal> literal0 = LiteralUtil::CreateR1<float>(values0);
std::unique_ptr<GlobalData> data0 =
client_->TransferToServer(*literal0).ConsumeValueOrDie();
- std::unique_ptr<Literal> literal1 = Literal::CreateR1<float>(values1);
+ std::unique_ptr<Literal> literal1 = LiteralUtil::CreateR1<float>(values1);
std::unique_ptr<GlobalData> data1 =
client_->TransferToServer(*literal1).ConsumeValueOrDie();
- std::unique_ptr<Literal> literal2 = Literal::CreateR1<float>(values2);
+ std::unique_ptr<Literal> literal2 = LiteralUtil::CreateR1<float>(values2);
std::unique_ptr<GlobalData> data2 =
client_->TransferToServer(*literal2).ConsumeValueOrDie();
- auto param0 = b.Parameter(0, literal0->shape(), "param0");
- auto param1 = b.Parameter(1, literal1->shape(), "param1");
- auto param2 = b.Parameter(2, literal2->shape(), "param2");
- b.Div(param0, b.Pow(param1, param2));
+ auto param0 = Parameter(&b, 0, literal0->shape(), "param0");
+ auto param1 = Parameter(&b, 1, literal1->shape(), "param1");
+ auto param2 = Parameter(&b, 2, literal2->shape(), "param2");
+ Div(param0, Pow(param1, param2));
std::vector<float> expected(values0.size());
for (int64 i = 0; i < values0.size(); ++i) {
@@ -1647,27 +1654,27 @@ XLA_TEST_F(ArrayElementwiseOpTest, Div4F32) {
std::vector<float> values2 = {0.1f, 1.1f, 6.9f, 12.5f, -15.0f, -0.5f};
std::vector<float> values3 = {2.1f, 3.1f, 9.9f, -4.5f, -11.0f, -21.5f};
- std::unique_ptr<Literal> literal0 = Literal::CreateR1<float>(values0);
+ std::unique_ptr<Literal> literal0 = LiteralUtil::CreateR1<float>(values0);
std::unique_ptr<GlobalData> data0 =
client_->TransferToServer(*literal0).ConsumeValueOrDie();
- std::unique_ptr<Literal> literal1 = Literal::CreateR1<float>(values1);
+ std::unique_ptr<Literal> literal1 = LiteralUtil::CreateR1<float>(values1);
std::unique_ptr<GlobalData> data1 =
client_->TransferToServer(*literal1).ConsumeValueOrDie();
- std::unique_ptr<Literal> literal2 = Literal::CreateR1<float>(values2);
+ std::unique_ptr<Literal> literal2 = LiteralUtil::CreateR1<float>(values2);
std::unique_ptr<GlobalData> data2 =
client_->TransferToServer(*literal2).ConsumeValueOrDie();
- std::unique_ptr<Literal> literal3 = Literal::CreateR1<float>(values3);
+ std::unique_ptr<Literal> literal3 = LiteralUtil::CreateR1<float>(values3);
std::unique_ptr<GlobalData> data3 =
client_->TransferToServer(*literal3).ConsumeValueOrDie();
- auto param0 = b.Parameter(0, literal0->shape(), "param0");
- auto param1 = b.Parameter(1, literal1->shape(), "param1");
- auto param2 = b.Parameter(2, literal2->shape(), "param2");
- auto param3 = b.Parameter(3, literal3->shape(), "param2");
- b.Div(b.Div(param0, param1), b.Div(param2, param3));
+ auto param0 = Parameter(&b, 0, literal0->shape(), "param0");
+ auto param1 = Parameter(&b, 1, literal1->shape(), "param1");
+ auto param2 = Parameter(&b, 2, literal2->shape(), "param2");
+ auto param3 = Parameter(&b, 3, literal3->shape(), "param2");
+ Div(Div(param0, param1), Div(param2, param3));
std::vector<float> expected(values0.size());
for (int64 i = 0; i < values0.size(); ++i) {
@@ -1687,8 +1694,8 @@ TEST_P(ArrayElementwiseOpTestParamCount, SquareManyValues) {
for (int i = 0; i < count; ++i) {
values.push_back(i / static_cast<float>(count));
}
- auto x = builder.ConstantR1<float>(values);
- builder.Pow(x, builder.ConstantR0<float>(2.0f));
+ auto x = ConstantR1<float>(&builder, values);
+ Pow(x, ConstantR0<float>(&builder, 2.0f));
std::vector<float> expected;
expected.reserve(values.size());
@@ -1713,8 +1720,8 @@ XLA_TEST_F(ArrayElementwiseOpTest, SquareIn4D) {
Array4D<float> expected(2, 2, 2, 2, expected_vector);
- auto x = builder.ConstantR4FromArray4D<float>(values);
- builder.Pow(x, builder.ConstantR0<float>(2.0f));
+ auto x = ConstantR4FromArray4D<float>(&builder, values);
+ Pow(x, ConstantR0<float>(&builder, 2.0f));
ComputeAndCompareR4<float>(&builder, expected, {}, error_spec_);
}
@@ -1724,8 +1731,8 @@ XLA_TEST_F(ArrayElementwiseOpTest, SquareIn4DZeroElements) {
Array4D<float> values(2, 2, 0, 2);
Array4D<float> expected(2, 2, 0, 2);
- auto x = builder.ConstantR4FromArray4D<float>(values);
- builder.Pow(x, builder.ConstantR0<float>(2.0f));
+ auto x = ConstantR4FromArray4D<float>(&builder, values);
+ Pow(x, ConstantR0<float>(&builder, 2.0f));
ComputeAndCompareR4<float>(&builder, expected, {}, error_spec_);
}
@@ -1733,9 +1740,9 @@ XLA_TEST_F(ArrayElementwiseOpTest, SquareIn4DZeroElements) {
XLA_TEST_F(ArrayElementwiseOpTest, MinF32s) {
XlaBuilder builder(TestName());
SetFastMathDisabled(true);
- auto lhs = builder.ConstantR1<float>({1.0f, 1.0f, 2.25f, NAN, 6.0f});
- auto rhs = builder.ConstantR1<float>({2.0f, -5.0f, 1.0f, 10.0f, NAN});
- builder.Min(lhs, rhs);
+ auto lhs = ConstantR1<float>(&builder, {1.0f, 1.0f, 2.25f, NAN, 6.0f});
+ auto rhs = ConstantR1<float>(&builder, {2.0f, -5.0f, 1.0f, 10.0f, NAN});
+ Min(lhs, rhs);
ComputeAndCompareR1<float>(&builder, {1.0f, -5.0f, 1.0f, NAN, NAN}, {},
error_spec_);
@@ -1743,18 +1750,18 @@ XLA_TEST_F(ArrayElementwiseOpTest, MinF32s) {
XLA_TEST_F(ArrayElementwiseOpTest, MinZeroElementF32s) {
XlaBuilder builder(TestName());
- auto lhs = builder.ConstantR1<float>({});
- auto rhs = builder.ConstantR1<float>({});
- builder.Min(lhs, rhs);
+ auto lhs = ConstantR1<float>(&builder, {});
+ auto rhs = ConstantR1<float>(&builder, {});
+ Min(lhs, rhs);
ComputeAndCompareR1<float>(&builder, {}, {}, error_spec_);
}
XLA_TEST_F(ArrayElementwiseOpTest, MinF64s) {
XlaBuilder builder(TestName());
SetFastMathDisabled(true);
- auto lhs = builder.ConstantR1<double>({1.0, 1.0, 2.25, NAN, 6.0});
- auto rhs = builder.ConstantR1<double>({2.0, -5.0, 1.0, 10.0, NAN});
- builder.Min(lhs, rhs);
+ auto lhs = ConstantR1<double>(&builder, {1.0, 1.0, 2.25, NAN, 6.0});
+ auto rhs = ConstantR1<double>(&builder, {2.0, -5.0, 1.0, 10.0, NAN});
+ Min(lhs, rhs);
ComputeAndCompareR1<double>(&builder, {1.0, -5.0, 1.0, NAN, NAN}, {},
error_spec_);
@@ -1763,9 +1770,9 @@ XLA_TEST_F(ArrayElementwiseOpTest, MinF64s) {
XLA_TEST_F(ArrayElementwiseOpTest, MaxF32s) {
XlaBuilder builder(TestName());
SetFastMathDisabled(true);
- auto lhs = builder.ConstantR1<float>({1.0f, 1.0f, 2.25f, NAN, 6.0f});
- auto rhs = builder.ConstantR1<float>({2.0f, -5.0f, 1.0f, 10.0f, NAN});
- builder.Max(lhs, rhs);
+ auto lhs = ConstantR1<float>(&builder, {1.0f, 1.0f, 2.25f, NAN, 6.0f});
+ auto rhs = ConstantR1<float>(&builder, {2.0f, -5.0f, 1.0f, 10.0f, NAN});
+ Max(lhs, rhs);
ComputeAndCompareR1<float>(&builder, {2.0f, 1.0f, 2.25f, NAN, NAN}, {},
error_spec_);
@@ -1773,18 +1780,18 @@ XLA_TEST_F(ArrayElementwiseOpTest, MaxF32s) {
XLA_TEST_F(ArrayElementwiseOpTest, MaxZeroElementF32s) {
XlaBuilder builder(TestName());
- auto lhs = builder.ConstantR1<float>({});
- auto rhs = builder.ConstantR1<float>({});
- builder.Max(lhs, rhs);
+ auto lhs = ConstantR1<float>(&builder, {});
+ auto rhs = ConstantR1<float>(&builder, {});
+ Max(lhs, rhs);
ComputeAndCompareR1<float>(&builder, {}, {}, error_spec_);
}
XLA_TEST_F(ArrayElementwiseOpTest, MaxF64s) {
XlaBuilder builder(TestName());
SetFastMathDisabled(true);
- auto lhs = builder.ConstantR1<double>({1.0, 1.0, 2.25, NAN, 6.0});
- auto rhs = builder.ConstantR1<double>({2.0, -5.0, 1.0, 10.0, NAN});
- builder.Max(lhs, rhs);
+ auto lhs = ConstantR1<double>(&builder, {1.0, 1.0, 2.25, NAN, 6.0});
+ auto rhs = ConstantR1<double>(&builder, {2.0, -5.0, 1.0, 10.0, NAN});
+ Max(lhs, rhs);
ComputeAndCompareR1<double>(&builder, {2.0, 1.0, 2.25, NAN, NAN}, {},
error_spec_);
@@ -1794,11 +1801,11 @@ XLA_TEST_F(ArrayElementwiseOpTest, MaxS32s) {
const int32 min = std::numeric_limits<int32>::min();
const int32 max = std::numeric_limits<int32>::max();
XlaBuilder builder(TestName());
- auto x = builder.ConstantR1<int32>(
- {min, min, min, -1, -1, 0, 0, 0, 1, 1, max, max, max});
- auto y = builder.ConstantR1<int32>(
- {min, max, 0, -10, 0, -1, 0, 1, 0, 10, 0, max, min});
- builder.Max(x, y);
+ auto x = ConstantR1<int32>(
+ &builder, {min, min, min, -1, -1, 0, 0, 0, 1, 1, max, max, max});
+ auto y = ConstantR1<int32>(
+ &builder, {min, max, 0, -10, 0, -1, 0, 1, 0, 10, 0, max, min});
+ Max(x, y);
std::vector<int32> expected = {min, max, 0, -1, 0, 0, 0,
1, 1, 10, max, max, max};
@@ -1809,11 +1816,11 @@ XLA_TEST_F(ArrayElementwiseOpTest, MinS32s) {
const int32 min = std::numeric_limits<int32>::min();
const int32 max = std::numeric_limits<int32>::max();
XlaBuilder builder(TestName());
- auto x = builder.ConstantR1<int32>(
- {min, min, min, -1, -1, 0, 0, 0, 1, 1, max, max, max});
- auto y = builder.ConstantR1<int32>(
- {min, max, 0, -10, 0, -1, 0, 1, 0, 10, 0, max, min});
- builder.Min(x, y);
+ auto x = ConstantR1<int32>(
+ &builder, {min, min, min, -1, -1, 0, 0, 0, 1, 1, max, max, max});
+ auto y = ConstantR1<int32>(
+ &builder, {min, max, 0, -10, 0, -1, 0, 1, 0, 10, 0, max, min});
+ Min(x, y);
std::vector<int32> expected = {min, min, min, -10, -1, -1, 0,
0, 0, 1, 0, max, min};
@@ -1823,9 +1830,9 @@ XLA_TEST_F(ArrayElementwiseOpTest, MinS32s) {
XLA_TEST_F(ArrayElementwiseOpTest, MaxU32s) {
const uint32 max = std::numeric_limits<uint32>::max();
XlaBuilder builder(TestName());
- auto x = builder.ConstantR1<uint32>({0, 0, 1, 1, 1, max, max, max});
- auto y = builder.ConstantR1<uint32>({0, 1, 0, 1, 10, 0, 234234, max});
- builder.Max(x, y);
+ auto x = ConstantR1<uint32>(&builder, {0, 0, 1, 1, 1, max, max, max});
+ auto y = ConstantR1<uint32>(&builder, {0, 1, 0, 1, 10, 0, 234234, max});
+ Max(x, y);
std::vector<uint32> expected = {0, 1, 1, 1, 10, max, max, max};
ComputeAndCompareR1<uint32>(&builder, expected, {});
@@ -1834,9 +1841,9 @@ XLA_TEST_F(ArrayElementwiseOpTest, MaxU32s) {
XLA_TEST_F(ArrayElementwiseOpTest, MinU32s) {
const uint32 max = std::numeric_limits<uint32>::max();
XlaBuilder builder(TestName());
- auto x = builder.ConstantR1<uint32>({0, 0, 1, 1, 1, max, max, max});
- auto y = builder.ConstantR1<uint32>({0, 1, 0, 1, 10, 0, 234234, max});
- builder.Min(x, y);
+ auto x = ConstantR1<uint32>(&builder, {0, 0, 1, 1, 1, max, max, max});
+ auto y = ConstantR1<uint32>(&builder, {0, 1, 0, 1, 10, 0, 234234, max});
+ Min(x, y);
std::vector<uint32> expected = {0, 0, 0, 1, 1, 0, 234234, max};
ComputeAndCompareR1<uint32>(&builder, expected, {});
@@ -1844,11 +1851,11 @@ XLA_TEST_F(ArrayElementwiseOpTest, MinU32s) {
XLA_TEST_F(ArrayElementwiseOpTest, MaxTenF32s) {
XlaBuilder builder(TestName());
- auto x = builder.ConstantR1<float>(
- {-0.0, 1.0, 2.0, -3.0, -4.0, 5.0, 6.0, -7.0, -8.0, 9.0});
- auto y = builder.ConstantR1<float>(
- {-0.0, -1.0, -2.0, 3.0, 4.0, -5.0, -6.0, 7.0, 8.0, -9.0});
- builder.Max(x, y);
+ auto x = ConstantR1<float>(
+ &builder, {-0.0, 1.0, 2.0, -3.0, -4.0, 5.0, 6.0, -7.0, -8.0, 9.0});
+ auto y = ConstantR1<float>(
+ &builder, {-0.0, -1.0, -2.0, 3.0, 4.0, -5.0, -6.0, 7.0, 8.0, -9.0});
+ Max(x, y);
std::vector<float> expected = {-0.0, 1.0, 2.0, 3.0, 4.0,
5.0, 6.0, 7.0, 8.0, 9.0};
@@ -1857,9 +1864,9 @@ XLA_TEST_F(ArrayElementwiseOpTest, MaxTenF32s) {
XLA_TEST_F(ArrayElementwiseOpTest, MaxR1S1AndR1S0F32s) {
XlaBuilder builder(TestName());
- auto u = builder.ConstantR1<float>({3.5});
- auto v = builder.ConstantR1<float>({});
- builder.Max(u, v);
+ auto u = ConstantR1<float>(&builder, {3.5});
+ auto v = ConstantR1<float>(&builder, {});
+ Max(u, v);
ComputeAndCompareR1<float>(&builder, {}, {}, error_spec_);
}
@@ -1867,9 +1874,9 @@ XLA_TEST_F(ArrayElementwiseOpTest, MaxR1S1AndR1S0F32s) {
XLA_TEST_F(ArrayElementwiseOpTest, MaxR1S0AndR2S0x2F32s) {
for (int broadcast_dim : {0, 1}) {
XlaBuilder builder(TestName());
- auto u = builder.ConstantR1<float>({3.5});
- auto v = builder.ConstantR2FromArray2D<float>(Array2D<float>(0, 2));
- builder.Max(u, v, /*broadcast_dimensions=*/{broadcast_dim});
+ auto u = ConstantR1<float>(&builder, {3.5});
+ auto v = ConstantR2FromArray2D<float>(&builder, Array2D<float>(0, 2));
+ Max(u, v, /*broadcast_dimensions=*/{broadcast_dim});
ComputeAndCompareR2<float>(&builder, Array2D<float>(0, 2), {}, error_spec_);
}
@@ -1877,10 +1884,10 @@ XLA_TEST_F(ArrayElementwiseOpTest, MaxR1S0AndR2S0x2F32s) {
XLA_TEST_F(ArrayElementwiseOpTest, Max1DAnd2DF32s) {
XlaBuilder builder(TestName());
- auto v = builder.ConstantR1<float>({2.0f, 3.0f, 4.0f});
- auto m =
- builder.ConstantR2<float>({{-2.5f, 3.14f, 1.0f}, {2.25f, -10.0f, 3.33f}});
- builder.Max(v, m, /*broadcast_dimensions=*/{1});
+ auto v = ConstantR1<float>(&builder, {2.0f, 3.0f, 4.0f});
+ auto m = ConstantR2<float>(&builder,
+ {{-2.5f, 3.14f, 1.0f}, {2.25f, -10.0f, 3.33f}});
+ Max(v, m, /*broadcast_dimensions=*/{1});
Array2D<float> expected({{2.0f, 3.14f, 4.0f}, {2.25f, 3.0f, 4.0f}});
ComputeAndCompareR2<float>(&builder, expected, {}, error_spec_);
@@ -1888,9 +1895,9 @@ XLA_TEST_F(ArrayElementwiseOpTest, Max1DAnd2DF32s) {
XLA_TEST_F(ArrayElementwiseOpTest, Max1DAnd2DZeroElementF32s) {
XlaBuilder builder(TestName());
- auto v = builder.ConstantR1<float>({});
- auto m = builder.ConstantR2<float>({{}, {}});
- builder.Max(v, m, /*broadcast_dimensions=*/{1});
+ auto v = ConstantR1<float>(&builder, {});
+ auto m = ConstantR2<float>(&builder, {{}, {}});
+ Max(v, m, /*broadcast_dimensions=*/{1});
Array2D<float> expected({{}, {}});
ComputeAndCompareR2<float>(&builder, expected, {}, error_spec_);
@@ -1898,10 +1905,10 @@ XLA_TEST_F(ArrayElementwiseOpTest, Max1DAnd2DZeroElementF32s) {
XLA_TEST_F(ArrayElementwiseOpTest, Max3DAndScalarS32s) {
XlaBuilder builder(TestName());
- auto scalar = builder.ConstantR0<int32>(2);
+ auto scalar = ConstantR0<int32>(&builder, 2);
Array3D<int32> a_3d({{{3, 9, -1}, {2, -10, 3}}, {{-2, 2, 8}, {12, 10, 4}}});
- auto array = builder.ConstantR3FromArray3D<int32>(a_3d);
- builder.Max(array, scalar, /*broadcast_dimensions=*/{});
+ auto array = ConstantR3FromArray3D<int32>(&builder, a_3d);
+ Max(array, scalar, /*broadcast_dimensions=*/{});
Array3D<int32> expected({{{3, 9, 2}, {2, 2, 3}}, {{2, 2, 8}, {12, 10, 4}}});
ComputeAndCompareR3<int32>(&builder, expected, {});
@@ -1909,10 +1916,10 @@ XLA_TEST_F(ArrayElementwiseOpTest, Max3DAndScalarS32s) {
XLA_TEST_F(ArrayElementwiseOpTest, Max3DAndScalarZeroElementS32s) {
XlaBuilder builder(TestName());
- auto scalar = builder.ConstantR0<int32>(2);
+ auto scalar = ConstantR0<int32>(&builder, 2);
Array3D<int32> a_3d(2, 0, 3);
- auto array = builder.ConstantR3FromArray3D<int32>(a_3d);
- builder.Max(array, scalar, /*broadcast_dimensions=*/{});
+ auto array = ConstantR3FromArray3D<int32>(&builder, a_3d);
+ Max(array, scalar, /*broadcast_dimensions=*/{});
Array3D<int32> expected(2, 0, 3);
ComputeAndCompareR3<int32>(&builder, expected, {});
@@ -1920,10 +1927,10 @@ XLA_TEST_F(ArrayElementwiseOpTest, Max3DAndScalarZeroElementS32s) {
XLA_TEST_F(ArrayElementwiseOpTest, Min2DTo1DF32s) {
XlaBuilder builder(TestName());
- auto m =
- builder.ConstantR2<float>({{-10.4f, 64.0f, 6.0f}, {0.1f, 32.0f, 16.1f}});
- auto v = builder.ConstantR1<float>({-10.2f, 16.4f});
- builder.Min(m, v, /*broadcast_dimensions=*/{0});
+ auto m = ConstantR2<float>(&builder,
+ {{-10.4f, 64.0f, 6.0f}, {0.1f, 32.0f, 16.1f}});
+ auto v = ConstantR1<float>(&builder, {-10.2f, 16.4f});
+ Min(m, v, /*broadcast_dimensions=*/{0});
Array2D<float> expected({{-10.4f, -10.2f, -10.2f}, {0.1f, 16.4f, 16.1f}});
ComputeAndCompareR2<float>(&builder, expected, {}, error_spec_);
@@ -1931,9 +1938,9 @@ XLA_TEST_F(ArrayElementwiseOpTest, Min2DTo1DF32s) {
XLA_TEST_F(ArrayElementwiseOpTest, Min2DTo1DZeroElementF32s) {
XlaBuilder builder(TestName());
- auto m = builder.ConstantR2<float>({{}, {}});
- auto v = builder.ConstantR1<float>({-10.2f, 16.4f});
- builder.Min(m, v, /*broadcast_dimensions=*/{0});
+ auto m = ConstantR2<float>(&builder, {{}, {}});
+ auto v = ConstantR1<float>(&builder, {-10.2f, 16.4f});
+ Min(m, v, /*broadcast_dimensions=*/{0});
Array2D<float> expected({{}, {}});
ComputeAndCompareR2<float>(&builder, expected, {}, error_spec_);
@@ -1942,11 +1949,11 @@ XLA_TEST_F(ArrayElementwiseOpTest, Min2DTo1DZeroElementF32s) {
XLA_TEST_F(ArrayElementwiseOpTest, Min2DTo4DF32s) {
XlaBuilder builder(TestName());
auto array2d =
- builder.ConstantR2<float>({{-12.2f, 64.3f, 6.1f}, {0.0f, 32.2f, 2.5f}});
- auto array4d = builder.ConstantR4FromArray4D<float>(
- {{{{-12.1f, 32.3f, 6.2f}}, {{0.0f, 32.5f, 3.0f}}},
- {{{-2.5f, 64.29f, 6.5f}}, {{-0.01f, 32.25f, 2.6f}}}});
- builder.Min(array2d, array4d, /*broadcast_dimensions=*/{1, 3});
+ ConstantR2<float>(&builder, {{-12.2f, 64.3f, 6.1f}, {0.0f, 32.2f, 2.5f}});
+ auto array4d = ConstantR4FromArray4D<float>(
+ &builder, {{{{-12.1f, 32.3f, 6.2f}}, {{0.0f, 32.5f, 3.0f}}},
+ {{{-2.5f, 64.29f, 6.5f}}, {{-0.01f, 32.25f, 2.6f}}}});
+ Min(array2d, array4d, /*broadcast_dimensions=*/{1, 3});
Array4D<float> expected(
{{{{-12.2f, 32.3f, 6.1f}}, {{0.0f, 32.2f, 2.5f}}},
@@ -1957,10 +1964,10 @@ XLA_TEST_F(ArrayElementwiseOpTest, Min2DTo4DF32s) {
XLA_TEST_F(ArrayElementwiseOpTest, Min2DTo4DZeroElementF32s) {
XlaBuilder builder(TestName());
auto array2d =
- builder.ConstantR2<float>({{-12.2f, 64.3f, 6.1f}, {0.0f, 32.2f, 2.5f}});
+ ConstantR2<float>(&builder, {{-12.2f, 64.3f, 6.1f}, {0.0f, 32.2f, 2.5f}});
Array4D<float> arg(2, 2, 0, 3);
- auto array4d = builder.ConstantR4FromArray4D<float>(arg);
- builder.Min(array2d, array4d, /*broadcast_dimensions=*/{1, 3});
+ auto array4d = ConstantR4FromArray4D<float>(&builder, arg);
+ Min(array2d, array4d, /*broadcast_dimensions=*/{1, 3});
Array4D<float> expected(2, 2, 0, 3);
ComputeAndCompareR4<float>(&builder, expected, {}, error_spec_);
@@ -1968,9 +1975,9 @@ XLA_TEST_F(ArrayElementwiseOpTest, Min2DTo4DZeroElementF32s) {
XLA_TEST_F(ArrayElementwiseOpTest, MinTenS32s) {
XlaBuilder builder(TestName());
- auto x = builder.ConstantR1<int32>({0, 1, 2, 3, 4, 5, 6, 7, 8, 9});
- auto y = builder.ConstantR1<int32>({9, 8, 7, 6, 5, 4, 3, 2, 1, 0});
- builder.Min(x, y);
+ auto x = ConstantR1<int32>(&builder, {0, 1, 2, 3, 4, 5, 6, 7, 8, 9});
+ auto y = ConstantR1<int32>(&builder, {9, 8, 7, 6, 5, 4, 3, 2, 1, 0});
+ Min(x, y);
std::vector<int32> expected = {0, 1, 2, 3, 4, 4, 3, 2, 1, 0};
ComputeAndCompareR1<int32>(&builder, expected, {});
@@ -1978,9 +1985,9 @@ XLA_TEST_F(ArrayElementwiseOpTest, MinTenS32s) {
XLA_TEST_F(ArrayElementwiseOpTest, MaxTenS32s) {
XlaBuilder builder(TestName());
- auto x = builder.ConstantR1<int32>({0, 1, 2, 3, 4, 5, 6, 7, 8, 9});
- auto y = builder.ConstantR1<int32>({9, 8, 7, 6, 5, 4, 3, 2, 1, 0});
- builder.Max(x, y);
+ auto x = ConstantR1<int32>(&builder, {0, 1, 2, 3, 4, 5, 6, 7, 8, 9});
+ auto y = ConstantR1<int32>(&builder, {9, 8, 7, 6, 5, 4, 3, 2, 1, 0});
+ Max(x, y);
std::vector<int32> expected = {9, 8, 7, 6, 5, 5, 6, 7, 8, 9};
ComputeAndCompareR1<int32>(&builder, expected, {});
@@ -1988,19 +1995,20 @@ XLA_TEST_F(ArrayElementwiseOpTest, MaxTenS32s) {
XLA_TEST_F(ArrayElementwiseOpTest, RemTwoConstantS32s) {
XlaBuilder builder(TestName());
- auto a = builder.ConstantR1<int32>({-3, 26, 2, -1, 1});
- auto b = builder.ConstantR1<int32>({10, 5, 1, 10, -10});
- builder.Rem(a, b);
+ auto a = ConstantR1<int32>(&builder, {-3, 26, 2, -1, 1});
+ auto b = ConstantR1<int32>(&builder, {10, 5, 1, 10, -10});
+ Rem(a, b);
ComputeAndCompareR1<int32>(&builder, {-3, 1, 0, -1, 1}, {});
}
XLA_TEST_F(ArrayElementwiseOpTest, NonNanClampF32) {
XlaBuilder builder(TestName());
- auto minimum = builder.ConstantR1<float>({1.0f, -6.5f, 1.0f, 2.25f, 0.0f});
- auto argument = builder.ConstantR1<float>({2.0f, 10.0f, -5.0f, 1.0f, 10.0f});
- auto maximum = builder.ConstantR1<float>({3.0f, 0.5f, 25.5f, 5.0f, 123.0});
- builder.Clamp(minimum, argument, maximum);
+ auto minimum = ConstantR1<float>(&builder, {1.0f, -6.5f, 1.0f, 2.25f, 0.0f});
+ auto argument =
+ ConstantR1<float>(&builder, {2.0f, 10.0f, -5.0f, 1.0f, 10.0f});
+ auto maximum = ConstantR1<float>(&builder, {3.0f, 0.5f, 25.5f, 5.0f, 123.0});
+ Clamp(minimum, argument, maximum);
ComputeAndCompareR1<float>(&builder, {2.0f, 0.5f, 1.0f, 2.25f, 10.0f}, {},
error_spec_);
@@ -2008,10 +2016,10 @@ XLA_TEST_F(ArrayElementwiseOpTest, NonNanClampF32) {
XLA_TEST_F(ArrayElementwiseOpTest, ClampF32Scalar) {
XlaBuilder builder(TestName());
- auto minimum = builder.ConstantR0<float>(0.0f);
- auto argument = builder.ConstantR1<float>({2.0f, 10.0f, -5.0f, 1.0f, 4.0f});
- auto maximum = builder.ConstantR0<float>(5.0f);
- builder.Clamp(minimum, argument, maximum);
+ auto minimum = ConstantR0<float>(&builder, 0.0f);
+ auto argument = ConstantR1<float>(&builder, {2.0f, 10.0f, -5.0f, 1.0f, 4.0f});
+ auto maximum = ConstantR0<float>(&builder, 5.0f);
+ Clamp(minimum, argument, maximum);
ComputeAndCompareR1<float>(&builder, {2.0f, 5.0f, 0.0f, 1.0f, 4.0f}, {},
error_spec_);
@@ -2019,16 +2027,19 @@ XLA_TEST_F(ArrayElementwiseOpTest, ClampF32Scalar) {
XLA_TEST_F(ArrayElementwiseOpTest, ClampF32ScalarVector) {
XlaBuilder builder(TestName());
- auto min_scalar = builder.ConstantR0<float>(0.0f);
- auto min_vector = builder.ConstantR1<float>({1.0f, -6.5f, 1.0f, 2.25f, 0.0f});
- auto arg_vector = builder.ConstantR1<float>({2.0f, 10.0f, -5.0f, 1.0f, 4.0f});
- auto max_scalar = builder.ConstantR0<float>(3.0f);
- auto max_vector = builder.ConstantR1<float>({3.0f, 0.5f, 25.5f, 5.0f, 123.0});
+ auto min_scalar = ConstantR0<float>(&builder, 0.0f);
+ auto min_vector =
+ ConstantR1<float>(&builder, {1.0f, -6.5f, 1.0f, 2.25f, 0.0f});
+ auto arg_vector =
+ ConstantR1<float>(&builder, {2.0f, 10.0f, -5.0f, 1.0f, 4.0f});
+ auto max_scalar = ConstantR0<float>(&builder, 3.0f);
+ auto max_vector =
+ ConstantR1<float>(&builder, {3.0f, 0.5f, 25.5f, 5.0f, 123.0});
// Perform clamp with broadcasted scalar and vector.
- builder.Add(builder.Add(builder.Clamp(min_vector, arg_vector, max_scalar),
- builder.Clamp(min_scalar, arg_vector, max_vector)),
- builder.Add(builder.Clamp(min_vector, arg_vector, max_vector),
- builder.Clamp(min_scalar, arg_vector, max_scalar)));
+ Add(Add(Clamp(min_vector, arg_vector, max_scalar),
+ Clamp(min_scalar, arg_vector, max_vector)),
+ Add(Clamp(min_vector, arg_vector, max_vector),
+ Clamp(min_scalar, arg_vector, max_scalar)));
ComputeAndCompareR1<float>(&builder, {8.0f, 7.0f, 2.0f, 6.5f, 14.0f}, {},
error_spec_);
@@ -2036,52 +2047,52 @@ XLA_TEST_F(ArrayElementwiseOpTest, ClampF32ScalarVector) {
XLA_TEST_F(ArrayElementwiseOpTest, ClampS32Vector) {
XlaBuilder builder(TestName());
- auto min_vector = builder.ConstantR1<int32>({1, -6, 1, 2, 0, -5});
- auto arg_vector = builder.ConstantR1<int32>({2, 10, -5, 1, 4, 10});
- auto max_vector = builder.ConstantR1<int32>({3, 0, 25, 5, 123, -1});
- builder.Clamp(min_vector, arg_vector, max_vector);
+ auto min_vector = ConstantR1<int32>(&builder, {1, -6, 1, 2, 0, -5});
+ auto arg_vector = ConstantR1<int32>(&builder, {2, 10, -5, 1, 4, 10});
+ auto max_vector = ConstantR1<int32>(&builder, {3, 0, 25, 5, 123, -1});
+ Clamp(min_vector, arg_vector, max_vector);
ComputeAndCompareR1<int32>(&builder, {2, 0, 1, 2, 4, -1}, {});
}
XLA_TEST_F(ArrayElementwiseOpTest, ClampS32ScalarVector) {
XlaBuilder builder(TestName());
- auto min_scalar = builder.ConstantR0<int32>(0);
- auto min_vector = builder.ConstantR1<int32>({1, -6, 1, 2, 0});
- auto arg_vector = builder.ConstantR1<int32>({2, 10, -5, 1, 4});
- auto max_scalar = builder.ConstantR0<int32>(3);
- auto max_vector = builder.ConstantR1<int32>({3, 1, 25, 5, 123});
+ auto min_scalar = ConstantR0<int32>(&builder, 0);
+ auto min_vector = ConstantR1<int32>(&builder, {1, -6, 1, 2, 0});
+ auto arg_vector = ConstantR1<int32>(&builder, {2, 10, -5, 1, 4});
+ auto max_scalar = ConstantR0<int32>(&builder, 3);
+ auto max_vector = ConstantR1<int32>(&builder, {3, 1, 25, 5, 123});
// Perform clamp with broadcasted scalar and vector.
- builder.Add(builder.Add(builder.Clamp(min_vector, arg_vector, max_scalar),
- builder.Clamp(min_scalar, arg_vector, max_vector)),
- builder.Add(builder.Clamp(min_vector, arg_vector, max_vector),
- builder.Clamp(min_scalar, arg_vector, max_scalar)));
+ Add(Add(Clamp(min_vector, arg_vector, max_scalar),
+ Clamp(min_scalar, arg_vector, max_vector)),
+ Add(Clamp(min_vector, arg_vector, max_vector),
+ Clamp(min_scalar, arg_vector, max_scalar)));
ComputeAndCompareR1<int32>(&builder, {8, 8, 2, 6, 14}, {});
}
XLA_TEST_F(ArrayElementwiseOpTest, ClampU32Vector) {
XlaBuilder builder(TestName());
- auto min_vector = builder.ConstantR1<uint32>({1, 2, 1, 2, 0, ~0u - 4});
- auto arg_vector = builder.ConstantR1<uint32>({2, 10, 5, 1, 4, 10});
- auto max_vector = builder.ConstantR1<uint32>({3, 5, 25, 5, 123, ~0u});
- builder.Clamp(min_vector, arg_vector, max_vector);
+ auto min_vector = ConstantR1<uint32>(&builder, {1, 2, 1, 2, 0, ~0u - 4});
+ auto arg_vector = ConstantR1<uint32>(&builder, {2, 10, 5, 1, 4, 10});
+ auto max_vector = ConstantR1<uint32>(&builder, {3, 5, 25, 5, 123, ~0u});
+ Clamp(min_vector, arg_vector, max_vector);
ComputeAndCompareR1<uint32>(&builder, {2, 5, 5, 2, 4, ~0u - 4}, {});
}
XLA_TEST_F(ArrayElementwiseOpTest, ClampU32ScalarVector) {
XlaBuilder builder(TestName());
- auto min_scalar = builder.ConstantR0<uint32>(0);
- auto min_vector = builder.ConstantR1<uint32>({1, 0, 1, 2, 0});
- auto arg_vector = builder.ConstantR1<uint32>({2, 10, 0, 1, 4});
- auto max_scalar = builder.ConstantR0<uint32>(3);
- auto max_vector = builder.ConstantR1<uint32>({3, 1, 25, 5, 123});
+ auto min_scalar = ConstantR0<uint32>(&builder, 0);
+ auto min_vector = ConstantR1<uint32>(&builder, {1, 0, 1, 2, 0});
+ auto arg_vector = ConstantR1<uint32>(&builder, {2, 10, 0, 1, 4});
+ auto max_scalar = ConstantR0<uint32>(&builder, 3);
+ auto max_vector = ConstantR1<uint32>(&builder, {3, 1, 25, 5, 123});
// Perform clamp with broadcasted scalar and vector.
- builder.Add(builder.Add(builder.Clamp(min_vector, arg_vector, max_scalar),
- builder.Clamp(min_scalar, arg_vector, max_vector)),
- builder.Add(builder.Clamp(min_vector, arg_vector, max_vector),
- builder.Clamp(min_scalar, arg_vector, max_scalar)));
+ Add(Add(Clamp(min_vector, arg_vector, max_scalar),
+ Clamp(min_scalar, arg_vector, max_vector)),
+ Add(Clamp(min_vector, arg_vector, max_vector),
+ Clamp(min_scalar, arg_vector, max_scalar)));
ComputeAndCompareR1<uint32>(&builder, {8, 8, 2, 6, 14}, {});
}
@@ -2090,18 +2101,18 @@ XLA_TEST_F(ArrayElementwiseOpTest, AddTwoParametersF32s) {
XlaBuilder builder(TestName());
std::unique_ptr<Literal> param0_literal =
- Literal::CreateR1<float>({1.1f, 2.2f, 3.3f, 5.5f});
+ LiteralUtil::CreateR1<float>({1.1f, 2.2f, 3.3f, 5.5f});
std::unique_ptr<GlobalData> param0_data =
client_->TransferToServer(*param0_literal).ConsumeValueOrDie();
std::unique_ptr<Literal> param1_literal =
- Literal::CreateR1<float>({7.2f, 2.3f, 3.4f, 5.6f});
+ LiteralUtil::CreateR1<float>({7.2f, 2.3f, 3.4f, 5.6f});
std::unique_ptr<GlobalData> param1_data =
client_->TransferToServer(*param1_literal).ConsumeValueOrDie();
- auto p0 = builder.Parameter(0, param0_literal->shape(), "param0");
- auto p1 = builder.Parameter(1, param1_literal->shape(), "param1");
- builder.Add(p0, p1);
+ auto p0 = Parameter(&builder, 0, param0_literal->shape(), "param0");
+ auto p1 = Parameter(&builder, 1, param1_literal->shape(), "param1");
+ Add(p0, p1);
ComputeAndCompareR1<float>(&builder, {8.3f, 4.5f, 6.7f, 11.1f},
{param0_data.get(), param1_data.get()},
@@ -2112,18 +2123,18 @@ XLA_TEST_F(ArrayElementwiseOpTest, AddTwoParametersZeroElementF32s) {
XlaBuilder builder(TestName());
std::unique_ptr<Literal> param0_literal =
- Literal::CreateR3FromArray3D<float>(Array3D<float>(0, 7, 0));
+ LiteralUtil::CreateR3FromArray3D<float>(Array3D<float>(0, 7, 0));
std::unique_ptr<GlobalData> param0_data =
client_->TransferToServer(*param0_literal).ConsumeValueOrDie();
std::unique_ptr<Literal> param1_literal =
- Literal::CreateR3FromArray3D<float>(Array3D<float>(0, 7, 0));
+ LiteralUtil::CreateR3FromArray3D<float>(Array3D<float>(0, 7, 0));
std::unique_ptr<GlobalData> param1_data =
client_->TransferToServer(*param1_literal).ConsumeValueOrDie();
- auto p0 = builder.Parameter(0, param0_literal->shape(), "param0");
- auto p1 = builder.Parameter(1, param1_literal->shape(), "param1");
- builder.Add(p0, p1);
+ auto p0 = Parameter(&builder, 0, param0_literal->shape(), "param0");
+ auto p1 = Parameter(&builder, 1, param1_literal->shape(), "param1");
+ Add(p0, p1);
Array3D<float> expected(0, 7, 0);
ComputeAndCompareR3<float>(
@@ -2134,13 +2145,13 @@ XLA_TEST_F(ArrayElementwiseOpTest, AddParameterToConstantF32s) {
XlaBuilder builder(TestName());
std::unique_ptr<Literal> param0_literal =
- Literal::CreateR1<float>({1.1f, 2.2f, 3.3f, 5.5f});
+ LiteralUtil::CreateR1<float>({1.1f, 2.2f, 3.3f, 5.5f});
std::unique_ptr<GlobalData> param0_data =
client_->TransferToServer(*param0_literal).ConsumeValueOrDie();
- auto a = builder.ConstantR1<float>({1.1f, 2.2f, 3.3f, 4.4f});
- auto p = builder.Parameter(0, param0_literal->shape(), "param0");
- builder.Add(a, p);
+ auto a = ConstantR1<float>(&builder, {1.1f, 2.2f, 3.3f, 4.4f});
+ auto p = Parameter(&builder, 0, param0_literal->shape(), "param0");
+ Add(a, p);
ComputeAndCompareR1<float>(&builder, {2.2f, 4.4f, 6.6f, 9.9f},
{param0_data.get()}, error_spec_);
@@ -2148,8 +2159,8 @@ XLA_TEST_F(ArrayElementwiseOpTest, AddParameterToConstantF32s) {
XLA_TEST_F(ArrayElementwiseOpTest, CosF32s) {
XlaBuilder builder(TestName());
- auto a = builder.ConstantR1<float>({3.14159f, 0.0f, 1.570796f, -0.78539f});
- builder.Cos(a);
+ auto a = ConstantR1<float>(&builder, {3.14159f, 0.0f, 1.570796f, -0.78539f});
+ Cos(a);
ComputeAndCompareR1<float>(&builder, {-1.0f, 1.0f, 0.0f, 0.707107f}, {},
error_spec_);
@@ -2157,8 +2168,8 @@ XLA_TEST_F(ArrayElementwiseOpTest, CosF32s) {
XLA_TEST_F(ArrayElementwiseOpTest, SinF32s) {
XlaBuilder builder(TestName());
- auto a = builder.ConstantR1<float>({3.14159f, 0.0f, 1.570796f, -0.78539f});
- builder.Sin(a);
+ auto a = ConstantR1<float>(&builder, {3.14159f, 0.0f, 1.570796f, -0.78539f});
+ Sin(a);
ComputeAndCompareR1<float>(&builder, {0.0f, 0.0f, 1.0f, -0.707107f}, {},
error_spec_);
@@ -2166,9 +2177,9 @@ XLA_TEST_F(ArrayElementwiseOpTest, SinF32s) {
XLA_TEST_F(ArrayElementwiseOpTest, Atan2F32s) {
XlaBuilder builder(TestName());
- auto a = builder.ConstantR1<float>({0.0f, 5.0f, 0.0f, -3.0f, 2.0f, -8.0f});
- auto b = builder.ConstantR1<float>({6.0f, 0.0f, -4.0f, 0.0f, 2.0f, 8.0f});
- builder.Atan2(a, b);
+ auto a = ConstantR1<float>(&builder, {0.0f, 5.0f, 0.0f, -3.0f, 2.0f, -8.0f});
+ auto b = ConstantR1<float>(&builder, {6.0f, 0.0f, -4.0f, 0.0f, 2.0f, 8.0f});
+ Atan2(a, b);
ComputeAndCompareR1<float>(
&builder,
@@ -2178,8 +2189,8 @@ XLA_TEST_F(ArrayElementwiseOpTest, Atan2F32s) {
XLA_TEST_F(ArrayElementwiseOpTest, TanhF32s) {
XlaBuilder builder(TestName());
- auto a = builder.ConstantR1<float>({-2.5f, 3.14f, 2.25f});
- builder.Tanh(a);
+ auto a = ConstantR1<float>(&builder, {-2.5f, 3.14f, 2.25f});
+ Tanh(a);
ComputeAndCompareR1<float>(&builder, {-0.986614f, 0.996260f, 0.978026}, {},
error_spec_);
@@ -2190,7 +2201,7 @@ XLA_TEST_F(ArrayElementwiseOpTest, TanhF32sVector) {
// the input tensor is large enough to exercise the vectorized tanh
// implementation on XLA CPU.
XlaBuilder builder(TestName());
- auto input_literal = Literal::CreateR1<float>(
+ auto input_literal = LiteralUtil::CreateR1<float>(
{1.02, -0.32, 0.85, 0.90, 1.23, -0.91, -0.49, 0.80, -0.67, 0.16,
-0.07, 0.39, -0.41, 0.04, 1.36, 1.25, 0.41, 0.65, -1.08, 0.32,
-1.45, -0.77, -1.09, 0.91, -1.03, -0.30, -1.11, -1.17, 1.50, -0.85,
@@ -2201,8 +2212,8 @@ XLA_TEST_F(ArrayElementwiseOpTest, TanhF32sVector) {
TF_ASSERT_OK_AND_ASSIGN(auto input_data,
client_->TransferToServer(*input_literal));
- auto input = builder.Parameter(0, input_literal->shape(), "input");
- builder.Tanh(input);
+ auto input = Parameter(&builder, 0, input_literal->shape(), "input");
+ Tanh(input);
ComputeAndCompareR1<float>(
&builder,
@@ -2232,7 +2243,7 @@ XLA_TEST_F(ArrayElementwiseOpTest, ExpF32sVector) {
// Just to help make sense of the scales here -- exp(89) saturates float32 and
// exp(-10) is smaller than our error spec.
- std::unique_ptr<Literal> input_literal = Literal::CreateR1<float>(
+ std::unique_ptr<Literal> input_literal = LiteralUtil::CreateR1<float>(
{1.02, -0.32, 0.85, 0.9, 1.23, -0.91, -0.49, 0.8, -1.31,
-1.44, -0.13, -1.31, -0.79, 1.41, 1.21, 1.05, -195.6, -194.5,
-193.4, -192.3, -191.2, -190.1, -189.0, -187.9, -19.6, -18.5, -17.4,
@@ -2247,8 +2258,8 @@ XLA_TEST_F(ArrayElementwiseOpTest, ExpF32sVector) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<GlobalData> input_data,
client_->TransferToServer(*input_literal));
- auto input = builder.Parameter(0, input_literal->shape(), "input");
- builder.Exp(input);
+ auto input = Parameter(&builder, 0, input_literal->shape(), "input");
+ Exp(input);
std::vector<float> expected_result;
int64 input_size = input_literal->shape().dimensions(0);
@@ -2266,7 +2277,7 @@ XLA_TEST_F(ArrayElementwiseOpTest, LogF32sVector) {
// implementation on XLA CPU.
XlaBuilder builder(TestName());
- std::unique_ptr<Literal> input_literal = Literal::CreateR1<float>(
+ std::unique_ptr<Literal> input_literal = LiteralUtil::CreateR1<float>(
{-1.29, -1.41, -1.25, -13.5, -11.7, -17.9, -198,
-167, 1.29, 1.41, 1.25, 13.5, 11.7, 17.9,
198, 167, 1.27e+03, 1.33e+03, 1.74e+03, 1.6e+04, 1.84e+04,
@@ -2285,8 +2296,8 @@ XLA_TEST_F(ArrayElementwiseOpTest, LogF32sVector) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<GlobalData> input_data,
client_->TransferToServer(*input_literal));
- auto input = builder.Parameter(0, input_literal->shape(), "input");
- builder.Log(input);
+ auto input = Parameter(&builder, 0, input_literal->shape(), "input");
+ Log(input);
std::vector<float> expected_result;
int64 input_size = input_literal->shape().dimensions(0);
@@ -2301,9 +2312,9 @@ XLA_TEST_F(ArrayElementwiseOpTest, LogF32sVector) {
XLA_TEST_F(ArrayElementwiseOpTest, ClzU32s) {
XlaBuilder builder(TestName());
- auto a = builder.ConstantR1<uint32>(
- {0, 1, 0x10, 0x10000, 0x700000, 0x12345678, 0xF2345678});
- builder.Clz(a);
+ auto a = ConstantR1<uint32>(
+ &builder, {0, 1, 0x10, 0x10000, 0x700000, 0x12345678, 0xF2345678});
+ Clz(a);
ComputeAndCompareR1<uint32>(&builder, {32, 31, 27, 15, 9, 3, 0}, {});
}
@@ -2311,8 +2322,8 @@ XLA_TEST_F(ArrayElementwiseOpTest, ClzU32s) {
XLA_TEST_F(ArrayElementwiseOpTest, ClzS64s) {
XlaBuilder builder(TestName());
auto a =
- builder.ConstantR1<int64>({0, 1, 0x80000000, 0x7FFFFFFFF2345678ul, -1});
- builder.Clz(a);
+ ConstantR1<int64>(&builder, {0, 1, 0x80000000, 0x7FFFFFFFF2345678ul, -1});
+ Clz(a);
ComputeAndCompareR1<int64>(&builder, {64, 63, 32, 1, 0}, {});
}
@@ -2324,12 +2335,12 @@ XLA_TEST_F(ArrayElementwiseOpTest, AddChainFoldLeft) {
// c---------------------/
XlaBuilder builder(TestName());
- auto a = builder.ConstantR1<float>({1.1f, 2.2f, 3.3f, 4.4f});
- auto b = builder.ConstantR1<float>({2.1f, 3.2f, 4.3f, 5.4f});
- auto c = builder.ConstantR1<float>({-3.3f, -15.5f, -7.7f, -29.9f});
+ auto a = ConstantR1<float>(&builder, {1.1f, 2.2f, 3.3f, 4.4f});
+ auto b = ConstantR1<float>(&builder, {2.1f, 3.2f, 4.3f, 5.4f});
+ auto c = ConstantR1<float>(&builder, {-3.3f, -15.5f, -7.7f, -29.9f});
- auto add = builder.Add(a, b);
- builder.Add(add, c);
+ auto add = Add(a, b);
+ Add(add, c);
ComputeAndCompareR1<float>(&builder, {-0.1f, -10.1f, -0.1f, -20.1f}, {},
error_spec_);
@@ -2342,12 +2353,12 @@ XLA_TEST_F(ArrayElementwiseOpTest, AddChainFoldRight) {
// a---------------------/
XlaBuilder builder(TestName());
- auto a = builder.ConstantR1<float>({91.1f, 2.2f, 3.3f, 4.4f});
- auto b = builder.ConstantR1<float>({2.1f, 3.2f, 4.3f, 5.4f});
- auto c = builder.ConstantR1<float>({-3.3f, -15.5f, -7.7f, -29.9f});
+ auto a = ConstantR1<float>(&builder, {91.1f, 2.2f, 3.3f, 4.4f});
+ auto b = ConstantR1<float>(&builder, {2.1f, 3.2f, 4.3f, 5.4f});
+ auto c = ConstantR1<float>(&builder, {-3.3f, -15.5f, -7.7f, -29.9f});
- auto add = builder.Add(b, c);
- builder.Add(a, add);
+ auto add = Add(b, c);
+ Add(a, add);
ComputeAndCompareR1<float>(&builder, {89.9f, -10.1f, -0.1f, -20.1f}, {},
error_spec_);
@@ -2359,12 +2370,12 @@ XLA_TEST_F(ArrayElementwiseOpTest, AddWithNeg) {
// b ----- (neg) ----/
XlaBuilder builder(TestName());
- auto a = builder.ConstantR1<float>({91.1f, 2.2f, 3.3f, 4.4f});
- auto b = builder.ConstantR1<float>({2.1f, 3.2f, 4.3f, 5.4f});
+ auto a = ConstantR1<float>(&builder, {91.1f, 2.2f, 3.3f, 4.4f});
+ auto b = ConstantR1<float>(&builder, {2.1f, 3.2f, 4.3f, 5.4f});
- auto neg_a = builder.Neg(a);
- auto neg_b = builder.Neg(b);
- builder.Add(neg_a, neg_b);
+ auto neg_a = Neg(a);
+ auto neg_b = Neg(b);
+ Add(neg_a, neg_b);
ComputeAndCompareR1<float>(&builder, {-93.2f, -5.4f, -7.6f, -9.8f}, {},
error_spec_);
@@ -2380,14 +2391,14 @@ XLA_TEST_F(ArrayElementwiseOpTest, AddChainTwoSide) {
// d -----/
XlaBuilder builder(TestName());
- auto a = builder.ConstantR1<float>({91.1f, 2.2f, 3.3f, 4.4f});
- auto b = builder.ConstantR1<float>({2.1f, 3.2f, 4.3f, 5.4f});
- auto c = builder.ConstantR1<float>({-3.3f, -15.5f, -7.7f, -29.9f});
- auto d = builder.ConstantR1<float>({-19.0f, 10.0f, -40.0f, 20.2f});
+ auto a = ConstantR1<float>(&builder, {91.1f, 2.2f, 3.3f, 4.4f});
+ auto b = ConstantR1<float>(&builder, {2.1f, 3.2f, 4.3f, 5.4f});
+ auto c = ConstantR1<float>(&builder, {-3.3f, -15.5f, -7.7f, -29.9f});
+ auto d = ConstantR1<float>(&builder, {-19.0f, 10.0f, -40.0f, 20.2f});
- auto add_ab = builder.Add(a, b);
- auto add_cd = builder.Add(c, d);
- builder.Add(add_ab, add_cd);
+ auto add_ab = Add(a, b);
+ auto add_cd = Add(c, d);
+ Add(add_ab, add_cd);
ComputeAndCompareR1<float>(&builder, {70.9f, -0.1f, -40.1f, 0.1f}, {},
error_spec_);
@@ -2395,11 +2406,11 @@ XLA_TEST_F(ArrayElementwiseOpTest, AddChainTwoSide) {
XLA_TEST_F(ArrayElementwiseOpTest, 2DBinaryOpF32s) {
XlaBuilder builder(TestName());
- auto a =
- builder.ConstantR2<float>({{-2.5f, 3.14f, 1.0f}, {2.25f, -10.0f, 3.33f}});
- auto b =
- builder.ConstantR2<float>({{-1.5f, 8.14f, 42.0}, {-1.0f, -4.0f, 5.55f}});
- builder.Add(a, b);
+ auto a = ConstantR2<float>(&builder,
+ {{-2.5f, 3.14f, 1.0f}, {2.25f, -10.0f, 3.33f}});
+ auto b = ConstantR2<float>(&builder,
+ {{-1.5f, 8.14f, 42.0}, {-1.0f, -4.0f, 5.55f}});
+ Add(a, b);
Array2D<float> expected_array(
{{-4.0f, 11.28f, 43.0f}, {1.25f, -14.0f, 8.88f}});
@@ -2409,10 +2420,10 @@ XLA_TEST_F(ArrayElementwiseOpTest, 2DBinaryOpF32s) {
XLA_TEST_F(ArrayElementwiseOpTest, ScalarPlus2DF32) {
// Add a scalar + matrix.
XlaBuilder builder(TestName());
- auto a =
- builder.ConstantR2<float>({{-2.5f, 3.14f, 1.0f}, {2.25f, -10.0f, 3.33f}});
- auto scalar = builder.ConstantR0<float>(3.0f);
- builder.Add(scalar, a);
+ auto a = ConstantR2<float>(&builder,
+ {{-2.5f, 3.14f, 1.0f}, {2.25f, -10.0f, 3.33f}});
+ auto scalar = ConstantR0<float>(&builder, 3.0f);
+ Add(scalar, a);
Array2D<float> expected_array({{0.5f, 6.14f, 4.0f}, {5.25f, -7.0f, 6.33f}});
ComputeAndCompareR2<float>(&builder, expected_array, {}, error_spec_);
@@ -2421,10 +2432,10 @@ XLA_TEST_F(ArrayElementwiseOpTest, ScalarPlus2DF32) {
XLA_TEST_F(ArrayElementwiseOpTest, 2DPlusScalarF32) {
// Add a matrix + scalar.
XlaBuilder builder(TestName());
- auto a =
- builder.ConstantR2<float>({{-2.5f, 3.14f, 1.0f}, {2.25f, -10.0f, 3.33f}});
- auto scalar = builder.ConstantR0<float>(3.0f);
- builder.Add(a, scalar);
+ auto a = ConstantR2<float>(&builder,
+ {{-2.5f, 3.14f, 1.0f}, {2.25f, -10.0f, 3.33f}});
+ auto scalar = ConstantR0<float>(&builder, 3.0f);
+ Add(a, scalar);
Array2D<float> expected_array({{0.5f, 6.14f, 4.0f}, {5.25f, -7.0f, 6.33f}});
ComputeAndCompareR2<float>(&builder, expected_array, {}, error_spec_);
@@ -2434,13 +2445,13 @@ XLA_TEST_F(ArrayElementwiseOpTest, Add1DTo2DF32) {
// Test simple broadcasting of a R1F32 over R2F32. The vector's size matches
// only dim 0 of the matrix.
XlaBuilder builder(TestName());
- auto v = builder.ConstantR1<float>({20.0f, 40.0f, 60.0f});
+ auto v = ConstantR1<float>(&builder, {20.0f, 40.0f, 60.0f});
// clang-format off
- auto m = builder.ConstantR2<float>({
+ auto m = ConstantR2<float>(&builder, {
{-2.5f, 3.14f, 1.0f},
{2.25f, -10.0f, 3.33f}});
// clang-format on
- builder.Add(v, m, /*broadcast_dimensions=*/{1});
+ Add(v, m, /*broadcast_dimensions=*/{1});
Array2D<float> expected_array(
{{17.5f, 43.14f, 61.0f}, {22.25f, 30.0f, 63.33f}});
ComputeAndCompareR2<float>(&builder, expected_array, {}, error_spec_);
@@ -2449,27 +2460,27 @@ XLA_TEST_F(ArrayElementwiseOpTest, Add1DTo2DF32) {
XLA_TEST_F(ArrayElementwiseOpTest, Compare1DTo2DS32Eq) {
// Test broadcasting in Eq comparison.
XlaBuilder builder(TestName());
- auto v = builder.ConstantR1<int32>({42, 73});
- auto m = builder.ConstantR2<int32>({{42, 73}, {42, 52}});
+ auto v = ConstantR1<int32>(&builder, {42, 73});
+ auto m = ConstantR2<int32>(&builder, {{42, 73}, {42, 52}});
// This test exercises both possible broadcast dimensions for a vector/matrix
// comparison.
- auto cmp_dim_0 = builder.Eq(v, m, /*broadcast_dimensions=*/{1});
- auto cmp_dim_1 = builder.Eq(v, m, /*broadcast_dimensions=*/{0});
- auto result = builder.Tuple({cmp_dim_0, cmp_dim_1});
+ auto cmp_dim_0 = Eq(v, m, /*broadcast_dimensions=*/{1});
+ auto cmp_dim_1 = Eq(v, m, /*broadcast_dimensions=*/{0});
+ Tuple(&builder, {cmp_dim_0, cmp_dim_1});
- auto expected = Literal::MakeTuple(
- {Literal::CreateR2<bool>({{true, true}, {true, false}}).get(),
- Literal::CreateR2<bool>({{true, false}, {false, false}}).get()});
+ auto expected = LiteralUtil::MakeTuple(
+ {LiteralUtil::CreateR2<bool>({{true, true}, {true, false}}).get(),
+ LiteralUtil::CreateR2<bool>({{true, false}, {false, false}}).get()});
ComputeAndCompareTuple(&builder, *expected, {}, error_spec_);
}
XLA_TEST_F(ArrayElementwiseOpTest, Compare1DTo2DS32Ne) {
// Test broadcasting in Ne comparison.
XlaBuilder builder(TestName());
- auto v = builder.ConstantR1<int32>({42, 73});
- auto m = builder.ConstantR2<int32>({{42, 73}, {42, 52}});
- builder.Ne(v, m, /*broadcast_dimensions=*/{1});
+ auto v = ConstantR1<int32>(&builder, {42, 73});
+ auto m = ConstantR2<int32>(&builder, {{42, 73}, {42, 52}});
+ Ne(v, m, /*broadcast_dimensions=*/{1});
const string expected = R"(pred[2,2] {
{ 00 },
@@ -2481,9 +2492,9 @@ XLA_TEST_F(ArrayElementwiseOpTest, Compare1DTo2DS32Ne) {
XLA_TEST_F(ArrayElementwiseOpTest, Compare1DTo2DS32Ge) {
// Test broadcasting in Ge comparison.
XlaBuilder builder(TestName());
- auto v = builder.ConstantR1<int32>({1, 2, 3, 4});
- auto m = builder.ConstantR2<int32>({{1, 0, 5, 6}, {42, 52, 10, 4}});
- builder.Ge(v, m, /*broadcast_dimensions=*/{1});
+ auto v = ConstantR1<int32>(&builder, {1, 2, 3, 4});
+ auto m = ConstantR2<int32>(&builder, {{1, 0, 5, 6}, {42, 52, 10, 4}});
+ Ge(v, m, /*broadcast_dimensions=*/{1});
const string expected = R"(pred[2,4] {
{ 1100 },
@@ -2495,9 +2506,9 @@ XLA_TEST_F(ArrayElementwiseOpTest, Compare1DTo2DS32Ge) {
XLA_TEST_F(ArrayElementwiseOpTest, Compare1DTo2DS32Gt) {
// Test broadcasting in Gt comparison.
XlaBuilder builder(TestName());
- auto v = builder.ConstantR1<int32>({1, 2, 3, 4});
- auto m = builder.ConstantR2<int32>({{1, 0, 5, 6}, {42, 52, 10, 4}});
- builder.Gt(v, m, /*broadcast_dimensions=*/{1});
+ auto v = ConstantR1<int32>(&builder, {1, 2, 3, 4});
+ auto m = ConstantR2<int32>(&builder, {{1, 0, 5, 6}, {42, 52, 10, 4}});
+ Gt(v, m, /*broadcast_dimensions=*/{1});
const string expected = R"(pred[2,4] {
{ 0100 },
@@ -2509,9 +2520,9 @@ XLA_TEST_F(ArrayElementwiseOpTest, Compare1DTo2DS32Gt) {
XLA_TEST_F(ArrayElementwiseOpTest, Compare1DTo2DS32Le) {
// Test broadcasting in Le comparison.
XlaBuilder builder(TestName());
- auto v = builder.ConstantR1<int32>({1, 2, 3, 4});
- auto m = builder.ConstantR2<int32>({{1, 0, 5, 6}, {42, 52, 10, 4}});
- builder.Le(v, m, /*broadcast_dimensions=*/{1});
+ auto v = ConstantR1<int32>(&builder, {1, 2, 3, 4});
+ auto m = ConstantR2<int32>(&builder, {{1, 0, 5, 6}, {42, 52, 10, 4}});
+ Le(v, m, /*broadcast_dimensions=*/{1});
const string expected = R"(pred[2,4] {
{ 1011 },
@@ -2523,9 +2534,9 @@ XLA_TEST_F(ArrayElementwiseOpTest, Compare1DTo2DS32Le) {
XLA_TEST_F(ArrayElementwiseOpTest, Compare1DTo2DS32Lt) {
// Test broadcasting in Lt comparison.
XlaBuilder builder(TestName());
- auto v = builder.ConstantR1<int32>({1, 2, 3, 4});
- auto m = builder.ConstantR2<int32>({{1, 0, 5, 6}, {42, 52, 10, 4}});
- builder.Lt(v, m, /*broadcast_dimensions=*/{1});
+ auto v = ConstantR1<int32>(&builder, {1, 2, 3, 4});
+ auto m = ConstantR2<int32>(&builder, {{1, 0, 5, 6}, {42, 52, 10, 4}});
+ Lt(v, m, /*broadcast_dimensions=*/{1});
const string expected = R"(pred[2,4] {
{ 0011 },
@@ -2538,9 +2549,10 @@ XLA_TEST_F(ArrayElementwiseOpTest, Mul2Dby1DF32) {
// Test simple broadcasting of a R1F32 over R2F32 when the order of binary op
// arguments is reversed.
XlaBuilder builder(TestName());
- auto m = builder.ConstantR2<float>({{1.5f, 2.5f, 3.5f}, {4.5f, 5.5f, 6.5f}});
- auto v = builder.ConstantR1<float>({2.0f, 4.0f, 6.0f});
- builder.Mul(m, v, /*broadcast_dimensions=*/{1});
+ auto m =
+ ConstantR2<float>(&builder, {{1.5f, 2.5f, 3.5f}, {4.5f, 5.5f, 6.5f}});
+ auto v = ConstantR1<float>(&builder, {2.0f, 4.0f, 6.0f});
+ Mul(m, v, /*broadcast_dimensions=*/{1});
Array2D<float> expected_array({{3.0f, 10.0f, 21.0f}, {9.0f, 22.0f, 39.0f}});
ComputeAndCompareR2<float>(&builder, expected_array, {}, error_spec_);
}
@@ -2551,10 +2563,10 @@ XLA_TEST_F(ArrayElementwiseOpTest, Add2DTo2DWithDegenerateDim1) {
// m's shape in XLA notation is {3, 2}
// md's shape in XLA notation is {3, 1}
// The result has shape {3, 2}, where md is broadcast over m
- auto m =
- builder.ConstantR2<float>({{-2.5f, 3.14f, 1.0f}, {2.25f, -10.0f, 3.33f}});
- auto md = builder.ConstantR2<float>({{10.0f, 20.0f, 30.0f}});
- builder.Add(m, md);
+ auto m = ConstantR2<float>(&builder,
+ {{-2.5f, 3.14f, 1.0f}, {2.25f, -10.0f, 3.33f}});
+ auto md = ConstantR2<float>(&builder, {{10.0f, 20.0f, 30.0f}});
+ Add(m, md);
Array2D<float> expected_array(
{{7.5f, 23.14f, 31.0f}, {12.25f, 10.0f, 33.33f}});
ComputeAndCompareR2<float>(&builder, expected_array, {}, error_spec_);
@@ -2566,10 +2578,10 @@ XLA_TEST_F(ArrayElementwiseOpTest, Add2DTo2DWithDegenerateDim0) {
// m's shape in XLA notation is {3, 2}
// md's shape in XLA notation is {1, 2}
// The result has shape {3, 2}, where md is broadcast over m
- auto m =
- builder.ConstantR2<float>({{-2.5f, 3.14f, 1.0f}, {2.25f, -10.0f, 3.33f}});
- auto md = builder.ConstantR2<float>({{10.0f}, {20.0f}});
- builder.Add(m, md);
+ auto m = ConstantR2<float>(&builder,
+ {{-2.5f, 3.14f, 1.0f}, {2.25f, -10.0f, 3.33f}});
+ auto md = ConstantR2<float>(&builder, {{10.0f}, {20.0f}});
+ Add(m, md);
Array2D<float> expected_array(
{{7.5f, 13.14f, 11.0f}, {22.25f, 10.0f, 23.33f}});
ComputeAndCompareR2<float>(&builder, expected_array, {}, error_spec_);
@@ -2584,9 +2596,9 @@ XLA_TEST_F(ArrayElementwiseOpTest, Add2DsWithDegenerateDimsOuterProduct) {
// a's shape in XLA notation is {1, 4}
// b's shape in XLA notation is {3, 1}
// The result has shape {3, 4}.
- auto a = builder.ConstantR2<float>({{0.0f}, {10.0f}, {20.0f}, {30.0f}});
- auto b = builder.ConstantR2<float>({{1.0f, 2.0f, 3.0f}});
- builder.Add(a, b);
+ auto a = ConstantR2<float>(&builder, {{0.0f}, {10.0f}, {20.0f}, {30.0f}});
+ auto b = ConstantR2<float>(&builder, {{1.0f, 2.0f, 3.0f}});
+ Add(a, b);
Array2D<float> expected_array({{1.0f, 2.0f, 3.0f},
{11.0f, 12.0f, 13.0f},
{21.0f, 22.0f, 23.0f},
@@ -2598,9 +2610,9 @@ XLA_TEST_F(ArrayElementwiseOpTest, Add1DTo2DF32TwoWaysOver1) {
// Add together a (2,2) array and a (2) array, using dimension 0 for
// broadcasting (though there are two ways to broadcast these shapes).
XlaBuilder builder(TestName());
- auto v = builder.ConstantR1<float>({20.0f, 40.0f});
- auto m = builder.ConstantR2<float>({{10.0f, 50.0f}, {77.0f, 88.0f}});
- builder.Add(v, m, /*broadcast_dimensions=*/{1});
+ auto v = ConstantR1<float>(&builder, {20.0f, 40.0f});
+ auto m = ConstantR2<float>(&builder, {{10.0f, 50.0f}, {77.0f, 88.0f}});
+ Add(v, m, /*broadcast_dimensions=*/{1});
Array2D<float> expected_array({{30.0f, 90.0f}, {97.0f, 128.0f}});
ComputeAndCompareR2<float>(&builder, expected_array, {}, error_spec_);
}
@@ -2609,9 +2621,9 @@ XLA_TEST_F(ArrayElementwiseOpTest, Add1DTo2DF32TwoWaysOver0) {
// Add together a (2,2) array and a (2) array, using dimension 1 for
// broadcasting (though there are two ways to broadcast these shapes).
XlaBuilder builder(TestName());
- auto v = builder.ConstantR1<float>({20.0f, 40.0f});
- auto m = builder.ConstantR2<float>({{10.0f, 50.0f}, {77.0f, 88.0f}});
- builder.Add(v, m, /*broadcast_dimensions=*/{0});
+ auto v = ConstantR1<float>(&builder, {20.0f, 40.0f});
+ auto m = ConstantR2<float>(&builder, {{10.0f, 50.0f}, {77.0f, 88.0f}});
+ Add(v, m, /*broadcast_dimensions=*/{0});
Array2D<float> expected_array({{30.0f, 70.0f}, {117.0f, 128.0f}});
ComputeAndCompareR2<float>(&builder, expected_array, {}, error_spec_);
}
@@ -2621,12 +2633,12 @@ XLA_TEST_F(ArrayElementwiseOpTest, 3DBinaryOpF32s) {
XlaBuilder builder(TestName());
Array3D<float> a_3d({{{1.0f, 2.0f}, {3.0f, 4.0f}, {5.0f, 6.0f}},
{{7.0f, 8.0f}, {9.0f, 10.0f}, {11.0f, 12.0f}}});
- auto a = builder.ConstantR3FromArray3D<float>(a_3d);
+ auto a = ConstantR3FromArray3D<float>(&builder, a_3d);
Array3D<float> b_3d({{{2.0f, 4.0f}, {6.0f, 8.0f}, {10.0f, 12.0f}},
{{14.0f, 16.0f}, {18.0f, 20.0f}, {22.0f, 24.0f}}});
- auto b = builder.ConstantR3FromArray3D<float>(b_3d);
- builder.Add(a, b);
+ auto b = ConstantR3FromArray3D<float>(&builder, b_3d);
+ Add(a, b);
Array3D<float> expected_3d(
{{{3.0f, 6.0f}, {9.0f, 12.0f}, {15.0f, 18.0f}},
@@ -2648,9 +2660,9 @@ XLA_TEST_F(ArrayElementwiseOpTest, Add1DTo3DTwoWaysOver2) {
{11.0f, 12.0f}},
});
// clang-format on
- auto a = builder.ConstantR3FromArray3D<float>(a_3d);
- auto v = builder.ConstantR1<float>({10.0f, 20.0f});
- builder.Add(a, v, /*broadcast_dimensions=*/{2});
+ auto a = ConstantR3FromArray3D<float>(&builder, a_3d);
+ auto v = ConstantR1<float>(&builder, {10.0f, 20.0f});
+ Add(a, v, /*broadcast_dimensions=*/{2});
Array3D<float> expected_3d(
{{{11.0f, 22.0f}, {13.0f, 24.0f}, {15.0f, 26.0f}},
@@ -2672,9 +2684,9 @@ XLA_TEST_F(ArrayElementwiseOpTest, Add1DTo3DTwoWaysOver0) {
{11.0f, 12.0f}},
});
// clang-format on
- auto a = builder.ConstantR3FromArray3D<float>(a_3d);
- auto v = builder.ConstantR1<float>({10.0f, 20.0f});
- builder.Add(a, v, /*broadcast_dimensions=*/{0});
+ auto a = ConstantR3FromArray3D<float>(&builder, a_3d);
+ auto v = ConstantR1<float>(&builder, {10.0f, 20.0f});
+ Add(a, v, /*broadcast_dimensions=*/{0});
// clang-format off
Array3D<float> expected_3d({
@@ -2702,12 +2714,12 @@ XLA_TEST_F(ArrayElementwiseOpTest, Add2DTo3D) {
{9.0f, 10.0f},
{11.0f, 12.0f}},
});
- auto a = builder.ConstantR3FromArray3D<float>(a_3d);
- auto m = builder.ConstantR2<float>({
+ auto a = ConstantR3FromArray3D<float>(&builder, a_3d);
+ auto m = ConstantR2<float>(&builder, {
{10.0f, 20.0f, 30.0f},
{40.0f, 50.0f, 60.0f},
});
- builder.Add(a, m, /*broadcast_dimensions=*/{0, 1});
+ Add(a, m, /*broadcast_dimensions=*/{0, 1});
Array3D<float> expected_3d({
{{11.0f, 12.0f},
@@ -2727,12 +2739,12 @@ XLA_TEST_F(ArrayElementwiseOpTest, CompareGtR3F32sWithDegenerateDim2) {
XlaBuilder builder(TestName());
Array3D<float> a_3d({{{1.0f, 2.0f}, {3.0f, 4.0f}, {5.0f, 6.0f}},
{{7.0f, 8.0f}, {9.0f, 10.0f}, {11.0f, 12.0f}}});
- auto a = builder.ConstantR3FromArray3D<float>(a_3d);
+ auto a = ConstantR3FromArray3D<float>(&builder, a_3d);
Array3D<float> b_3d({{{7.0f, 1.0f}, {3.0f, 10.0f}, {15.0f, 6.0f}}});
- auto b = builder.ConstantR3FromArray3D<float>(b_3d);
+ auto b = ConstantR3FromArray3D<float>(&builder, b_3d);
- builder.Gt(a, b);
+ Gt(a, b);
Array3D<int> expected_3d(
{{{0, 1}, {0, 0}, {0, 0}}, {{0, 1}, {1, 0}, {0, 1}}});
@@ -2767,9 +2779,9 @@ XLA_TEST_F(ArrayElementwiseOpTest, 4DBinaryOpF32s) {
}
}
- auto a = builder.ConstantR4FromArray4D<float>(*operand_a_4d);
- auto b = builder.ConstantR4FromArray4D<float>(*operand_b_4d);
- builder.Add(a, b);
+ auto a = ConstantR4FromArray4D<float>(&builder, *operand_a_4d);
+ auto b = ConstantR4FromArray4D<float>(&builder, *operand_b_4d);
+ Add(a, b);
ComputeAndCompareR4<float>(&builder, *expected_4d, {}, error_spec_);
}
@@ -2795,9 +2807,9 @@ XLA_TEST_F(ArrayElementwiseOpTest, R4PlusR1InDim1) {
}
}
- auto a = builder.ConstantR4FromArray4D<float>(*operand_a_4d);
- auto b = builder.ConstantR1<float>(operand_b_1d);
- builder.Add(a, b, {1});
+ auto a = ConstantR4FromArray4D<float>(&builder, *operand_a_4d);
+ auto b = ConstantR1<float>(&builder, operand_b_1d);
+ Add(a, b, {1});
ComputeAndCompareR4<float>(&builder, *expected_4d, {}, error_spec_);
}
@@ -2813,11 +2825,12 @@ XLA_TEST_F(ArrayElementwiseOpTest, R4_16x16x2x2_Plus_R1_16) {
std::iota(r1.begin(), r1.end(), 1.0);
XlaBuilder builder(TestName());
- std::unique_ptr<Literal> a_literal = Literal::CreateR4FromArray4DWithLayout(
- r4, LayoutUtil::MakeLayout({0, 1, 2, 3}));
- auto a = builder.ConstantLiteral(*a_literal);
- auto b = builder.ConstantR1<float>(r1);
- builder.Add(a, b, {1});
+ std::unique_ptr<Literal> a_literal =
+ LiteralUtil::CreateR4FromArray4DWithLayout(
+ r4, LayoutUtil::MakeLayout({0, 1, 2, 3}));
+ auto a = ConstantLiteral(&builder, *a_literal);
+ auto b = ConstantR1<float>(&builder, r1);
+ Add(a, b, {1});
for (int i0 = 0; i0 < d0; ++i0) {
for (int i1 = 0; i1 < d1; ++i1) {
@@ -2835,8 +2848,8 @@ XLA_TEST_F(ArrayElementwiseOpTest, R4_16x16x2x2_Plus_R1_16) {
XLA_TEST_F(ArrayElementwiseOpTest, CannotAddOpaques) {
XlaBuilder builder(TestName());
auto shape = ShapeUtil::MakeOpaqueShape();
- auto x = builder.Parameter(0, shape, "x");
- builder.Add(x, x);
+ auto x = Parameter(&builder, 0, shape, "x");
+ Add(x, x);
auto computation_status = builder.Build();
ASSERT_FALSE(computation_status.ok());
EXPECT_THAT(computation_status.status().ToString(),
@@ -2846,11 +2859,11 @@ XLA_TEST_F(ArrayElementwiseOpTest, CannotAddOpaques) {
XLA_TEST_F(ArrayElementwiseOpTest, IdentityBroadcastOfSameRankIsAllowed) {
XlaBuilder builder(TestName());
- auto a =
- builder.ConstantR2<float>({{-2.5f, 3.14f, 1.0f}, {2.25f, -10.0f, 3.33f}});
- auto b =
- builder.ConstantR2<float>({{-1.5f, 8.14f, 42.0}, {-1.0f, -4.0f, 5.55f}});
- builder.Add(a, b, /*broadcast_dimensions=*/{0, 1});
+ auto a = ConstantR2<float>(&builder,
+ {{-2.5f, 3.14f, 1.0f}, {2.25f, -10.0f, 3.33f}});
+ auto b = ConstantR2<float>(&builder,
+ {{-1.5f, 8.14f, 42.0}, {-1.0f, -4.0f, 5.55f}});
+ Add(a, b, /*broadcast_dimensions=*/{0, 1});
Array2D<float> expected_array(
{{-4.0f, 11.28f, 43.0f}, {1.25f, -14.0f, 8.88f}});
@@ -2859,11 +2872,11 @@ XLA_TEST_F(ArrayElementwiseOpTest, IdentityBroadcastOfSameRankIsAllowed) {
XLA_TEST_F(ArrayElementwiseOpTest, NonIdentityBroadcastOfSameRankIsDisallowed) {
XlaBuilder builder(TestName());
- auto a =
- builder.ConstantR2<float>({{-2.5f, 3.14f, 1.0f}, {2.25f, -10.0f, 3.33f}});
- auto b =
- builder.ConstantR2<float>({{-1.5f, 8.14f, 42.0}, {-1.0f, -4.0f, 5.55f}});
- builder.Add(a, b, /*broadcast_dimensions=*/{1, 0});
+ auto a = ConstantR2<float>(&builder,
+ {{-2.5f, 3.14f, 1.0f}, {2.25f, -10.0f, 3.33f}});
+ auto b = ConstantR2<float>(&builder,
+ {{-1.5f, 8.14f, 42.0}, {-1.0f, -4.0f, 5.55f}});
+ Add(a, b, /*broadcast_dimensions=*/{1, 0});
auto computation_status = builder.Build();
ASSERT_FALSE(computation_status.ok());
@@ -2875,15 +2888,15 @@ XLA_TEST_F(ArrayElementwiseOpTest, NonIdentityBroadcastOfSameRankIsDisallowed) {
// broadcast.
XLA_TEST_F(ArrayElementwiseOpTest, ImplictBroadcastInFusedExpressions) {
XlaBuilder builder(TestName());
- auto x_literal = Literal::CreateR1<float>({1, 2, 3});
- auto y_literal = Literal::CreateR1<float>({4, 5});
+ auto x_literal = LiteralUtil::CreateR1<float>({1, 2, 3});
+ auto y_literal = LiteralUtil::CreateR1<float>({4, 5});
auto x_data = client_->TransferToServer(*x_literal).ConsumeValueOrDie();
auto y_data = client_->TransferToServer(*y_literal).ConsumeValueOrDie();
- auto x = builder.Parameter(0, x_literal->shape(), "x");
- auto y = builder.Parameter(1, y_literal->shape(), "y");
- auto slice = builder.Slice(x, {1}, {2}, {1});
- builder.Sub(slice, y);
+ auto x = Parameter(&builder, 0, x_literal->shape(), "x");
+ auto y = Parameter(&builder, 1, y_literal->shape(), "y");
+ auto slice = Slice(x, {1}, {2}, {1});
+ Sub(slice, y);
ComputeAndCompareR1<float>(&builder, {-2, -3}, {x_data.get(), y_data.get()},
error_spec_);
diff --git a/tensorflow/compiler/xla/tests/axpy_simple_test.cc b/tensorflow/compiler/xla/tests/axpy_simple_test.cc
index fcd9ff55e3..8d15b7841b 100644
--- a/tensorflow/compiler/xla/tests/axpy_simple_test.cc
+++ b/tensorflow/compiler/xla/tests/axpy_simple_test.cc
@@ -29,10 +29,10 @@ class AxpySimpleTest : public ClientLibraryTestBase {};
TEST_F(AxpySimpleTest, AxTenValues) {
XlaBuilder builder("ax_10");
- auto alpha = builder.ConstantR0<float>(3.1415926535);
- auto x = builder.ConstantR1<float>(
- {-1.0, 1.0, 2.0, -2.0, -3.0, 3.0, 4.0, -4.0, -5.0, 5.0});
- builder.Mul(alpha, x);
+ auto alpha = ConstantR0<float>(&builder, 3.1415926535);
+ auto x = ConstantR1<float>(
+ &builder, {-1.0, 1.0, 2.0, -2.0, -3.0, 3.0, 4.0, -4.0, -5.0, 5.0});
+ Mul(alpha, x);
std::vector<float> expected = {
-3.14159265, 3.14159265, 6.28318531, -6.28318531, -9.42477796,
@@ -42,11 +42,11 @@ TEST_F(AxpySimpleTest, AxTenValues) {
XLA_TEST_F(AxpySimpleTest, AxpyZeroValues) {
XlaBuilder builder("axpy_10");
- auto alpha = builder.ConstantR0<float>(3.1415926535);
- auto x = builder.ConstantR1<float>({});
- auto y = builder.ConstantR1<float>({});
- auto ax = builder.Mul(alpha, x);
- builder.Add(ax, y);
+ auto alpha = ConstantR0<float>(&builder, 3.1415926535);
+ auto x = ConstantR1<float>(&builder, {});
+ auto y = ConstantR1<float>(&builder, {});
+ auto ax = Mul(alpha, x);
+ Add(ax, y);
std::vector<float> expected = {};
ComputeAndCompareR1<float>(&builder, expected, {}, ErrorSpec(0.0001));
@@ -54,13 +54,13 @@ XLA_TEST_F(AxpySimpleTest, AxpyZeroValues) {
TEST_F(AxpySimpleTest, AxpyTenValues) {
XlaBuilder builder("axpy_10");
- auto alpha = builder.ConstantR0<float>(3.1415926535);
- auto x = builder.ConstantR1<float>(
- {-1.0, 1.0, 2.0, -2.0, -3.0, 3.0, 4.0, -4.0, -5.0, 5.0});
- auto y = builder.ConstantR1<float>(
- {5.0, -5.0, -4.0, 4.0, 3.0, -3.0, -2.0, 2.0, 1.0, -1.0});
- auto ax = builder.Mul(alpha, x);
- builder.Add(ax, y);
+ auto alpha = ConstantR0<float>(&builder, 3.1415926535);
+ auto x = ConstantR1<float>(
+ &builder, {-1.0, 1.0, 2.0, -2.0, -3.0, 3.0, 4.0, -4.0, -5.0, 5.0});
+ auto y = ConstantR1<float>(
+ &builder, {5.0, -5.0, -4.0, 4.0, 3.0, -3.0, -2.0, 2.0, 1.0, -1.0});
+ auto ax = Mul(alpha, x);
+ Add(ax, y);
TF_ASSERT_OK_AND_ASSIGN(ProgramShape shape, builder.GetProgramShape());
diff --git a/tensorflow/compiler/xla/tests/bad_rng_shape_validation_test.cc b/tensorflow/compiler/xla/tests/bad_rng_shape_validation_test.cc
index 22c3394e6f..8c227df7f0 100644
--- a/tensorflow/compiler/xla/tests/bad_rng_shape_validation_test.cc
+++ b/tensorflow/compiler/xla/tests/bad_rng_shape_validation_test.cc
@@ -35,10 +35,10 @@ class BadRngShapeValidationTest : public ClientLibraryTestBase {};
TEST_F(BadRngShapeValidationTest, DefaultConstructedShapeCreatesError) {
XlaBuilder builder(TestName());
- auto zero = builder.ConstantR0<float>(0.0);
- auto one = builder.ConstantR0<float>(1.0);
+ auto zero = ConstantR0<float>(&builder, 0.0);
+ auto one = ConstantR0<float>(&builder, 1.0);
Shape default_constructed;
- builder.RngUniform(zero, one, default_constructed);
+ RngUniform(zero, one, default_constructed);
StatusOr<XlaComputation> computation = builder.Build();
EXPECT_FALSE(computation.ok());
@@ -49,13 +49,13 @@ TEST_F(BadRngShapeValidationTest, DefaultConstructedShapeCreatesError) {
TEST_F(BadRngShapeValidationTest, ShapeWithoutLayoutIsOk) {
XlaBuilder builder(TestName());
- auto zero = builder.ConstantR0<float>(0.0);
- auto one = builder.ConstantR0<float>(1.0);
+ auto zero = ConstantR0<float>(&builder, 0.0);
+ auto one = ConstantR0<float>(&builder, 1.0);
Shape sans_layout;
sans_layout.set_element_type(F32);
sans_layout.add_dimensions(1);
- builder.RngUniform(zero, one, sans_layout);
+ RngUniform(zero, one, sans_layout);
StatusOr<XlaComputation> computation = builder.Build();
ASSERT_TRUE(computation.ok());
diff --git a/tensorflow/compiler/xla/tests/batch_normalization_test.cc b/tensorflow/compiler/xla/tests/batch_normalization_test.cc
index f3dac75a44..6a024798f9 100644
--- a/tensorflow/compiler/xla/tests/batch_normalization_test.cc
+++ b/tensorflow/compiler/xla/tests/batch_normalization_test.cc
@@ -20,10 +20,11 @@ limitations under the License.
#include "tensorflow/compiler/xla/array2d.h"
#include "tensorflow/compiler/xla/array4d.h"
#include "tensorflow/compiler/xla/client/lib/arithmetic.h"
+#include "tensorflow/compiler/xla/client/lib/math.h"
#include "tensorflow/compiler/xla/client/local_client.h"
#include "tensorflow/compiler/xla/client/xla_client/xla_builder.h"
#include "tensorflow/compiler/xla/client/xla_client/xla_computation.h"
-#include "tensorflow/compiler/xla/literal_util.h"
+#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/compiler/xla/reference_util.h"
#include "tensorflow/compiler/xla/service/hlo_computation.h"
#include "tensorflow/compiler/xla/service/hlo_instruction.h"
@@ -62,7 +63,7 @@ class BatchNormalizationTest
{5.0f, 4.4f}, // p2
});
input_array_.FillWithPZ(pz);
- input_literal_ = std::move(*Literal::CreateR4FromArray4D(input_array_));
+ input_literal_ = std::move(*LiteralUtil::CreateR4FromArray4D(input_array_));
CHECK_EQ(kSamples, input_array_.planes());
CHECK_EQ(kZ, input_array_.depth());
CHECK_EQ(kY, input_array_.height());
@@ -101,9 +102,9 @@ INSTANTIATE_TEST_CASE_P(BatchNormalizationTestInstance, BatchNormalizationTest,
XLA_TEST_P(BatchNormalizationTest, SubtractInZ) {
XlaBuilder builder("subtract_in_z_one_sample");
- auto x = builder.ConstantLiteral(input_literal_);
- auto y = builder.ConstantR1<float>({3.14, 4.25});
- builder.Sub(x, y, /*broadcast_dimensions=*/{1});
+ auto x = ConstantLiteral(&builder, input_literal_);
+ auto y = ConstantR1<float>(&builder, {3.14, 4.25});
+ Sub(x, y, /*broadcast_dimensions=*/{1});
Array4D<float> expected(kSamples, kZ, kY, kX);
Array2D<float> pz({
@@ -117,8 +118,8 @@ XLA_TEST_P(BatchNormalizationTest, SubtractInZ) {
XLA_TEST_P(BatchNormalizationTest, SquareTesseractElementwise) {
XlaBuilder builder("square_tesseract_elementwise");
- auto x = builder.ConstantLiteral(input_literal_);
- builder.SquareF32(x);
+ auto x = ConstantLiteral(&builder, input_literal_);
+ Square(x);
using tensorflow::MathUtil;
@@ -134,11 +135,10 @@ XLA_TEST_P(BatchNormalizationTest, SquareTesseractElementwise) {
XLA_TEST_P(BatchNormalizationTest, SumToZ) {
XlaBuilder builder("sum_to_z");
- auto input_activations = builder.ConstantLiteral(input_literal_);
+ auto input_activations = ConstantLiteral(&builder, input_literal_);
XlaComputation add = CreateScalarAddComputation(F32, &builder);
// Reduce all but the Z dimension.
- builder.Reduce(input_activations, builder.ConstantR0<float>(0.0f), add,
- {0, 2, 3});
+ Reduce(input_activations, ConstantR0<float>(&builder, 0.0f), add, {0, 2, 3});
std::vector<float> expected = {6, 12.6};
ComputeAndCompareR1<float>(&builder, expected, {}, error_spec_);
@@ -146,13 +146,13 @@ XLA_TEST_P(BatchNormalizationTest, SumToZ) {
XLA_TEST_P(BatchNormalizationTest, SquareAndReduce) {
XlaBuilder builder("square_and_reduce");
- auto input_activations = builder.ConstantLiteral(input_literal_);
- auto set_means = builder.ConstantR1<float>({2.f, 4.2f});
- auto activation_deviations = builder.Sub(input_activations, set_means,
- /*broadcast_dimensions=*/{1});
+ auto input_activations = ConstantLiteral(&builder, input_literal_);
+ auto set_means = ConstantR1<float>(&builder, {2.f, 4.2f});
+ auto activation_deviations = Sub(input_activations, set_means,
+ /*broadcast_dimensions=*/{1});
XlaComputation add = CreateScalarAddComputation(F32, &builder);
- auto dev_squares = builder.SquareF32(activation_deviations);
- builder.Reduce(dev_squares, builder.ConstantR0<float>(0.0f), add, {0, 2, 3});
+ auto dev_squares = Square(activation_deviations);
+ Reduce(dev_squares, ConstantR0<float>(&builder, 0.0f), add, {0, 2, 3});
std::vector<float> expected = {18, 0.06};
ComputeAndCompareR1<float>(&builder, expected, {}, error_spec_);
@@ -160,8 +160,8 @@ XLA_TEST_P(BatchNormalizationTest, SquareAndReduce) {
XLA_TEST_P(BatchNormalizationTest, VarianceToStddev) {
XlaBuilder builder("variance_to_stddev");
- auto variance = builder.ConstantR1<float>({6.f, .02f});
- builder.SqrtF32(variance);
+ auto variance = ConstantR1<float>(&builder, {6.f, .02f});
+ Sqrt(variance);
std::vector<float> expected = {2.44948974f, 0.14142136f};
ComputeAndCompareR1<float>(&builder, expected, {}, error_spec_);
@@ -172,50 +172,50 @@ XLA_TEST_P(BatchNormalizationTest, VarianceToStddev) {
XLA_TEST_P(BatchNormalizationTest, SpecComparisonForward) {
XlaBuilder builder("batch_normalize_per_spec");
auto input_activations =
- CheckShape(&builder, builder.ConstantLiteral(input_literal_),
+ CheckShape(&builder, ConstantLiteral(&builder, input_literal_),
ShapeUtil::MakeShape(F32, {3, 2, 1, 1}));
- auto gamma = builder.ConstantR1<float>({1.0, 1.0});
- auto beta = builder.ConstantR1<float>({0.0, 0.0});
+ auto gamma = ConstantR1<float>(&builder, {1.0, 1.0});
+ auto beta = ConstantR1<float>(&builder, {0.0, 0.0});
XlaComputation add = CreateScalarAddComputation(F32, &builder);
// Reduce all dimensions except dimension 1.
Shape TwoElementVectorF32 = ShapeUtil::MakeShape(F32, {2});
auto sum = CheckShape(
&builder,
- builder.Reduce(input_activations, builder.ConstantR0<float>(0.0f), add,
- /*dimensions_to_reduce=*/{0, 2, 3}),
+ Reduce(input_activations, ConstantR0<float>(&builder, 0.0f), add,
+ /*dimensions_to_reduce=*/{0, 2, 3}),
TwoElementVectorF32);
auto input_shape = builder.GetShape(input_activations).ConsumeValueOrDie();
auto sum_shape = builder.GetShape(sum).ConsumeValueOrDie();
- auto count = builder.ConstantR0<float>(ShapeUtil::ElementsIn(input_shape) /
- ShapeUtil::ElementsIn(sum_shape));
- auto set_means = builder.Div(sum, count);
+ auto count =
+ ConstantR0<float>(&builder, ShapeUtil::ElementsIn(input_shape) /
+ ShapeUtil::ElementsIn(sum_shape));
+ auto set_means = Div(sum, count);
const float kEpsilon = 1e-9f;
- auto epsilon = builder.ConstantR0<float>(kEpsilon);
- auto epsilon2 = builder.ConstantR1<float>({kEpsilon, kEpsilon});
- auto activation_deviations = builder.Sub(input_activations, set_means,
- /*broadcast_dimensions=*/{1});
- auto dev_squares = builder.SquareF32(activation_deviations);
- auto sum_of_squares = CheckShape(
- &builder,
- builder.Reduce(dev_squares, builder.ConstantR0<float>(0.0f), add,
- /*dimensions_to_reduce=*/{0, 2, 3}),
- TwoElementVectorF32);
- auto variance = builder.Div(sum_of_squares, count);
- auto standard_deviation = builder.SqrtF32(variance);
+ auto epsilon = ConstantR0<float>(&builder, kEpsilon);
+ auto epsilon2 = ConstantR1<float>(&builder, {kEpsilon, kEpsilon});
+ auto activation_deviations = Sub(input_activations, set_means,
+ /*broadcast_dimensions=*/{1});
+ auto dev_squares = Square(activation_deviations);
+ auto sum_of_squares =
+ CheckShape(&builder,
+ Reduce(dev_squares, ConstantR0<float>(&builder, 0.0f), add,
+ /*dimensions_to_reduce=*/{0, 2, 3}),
+ TwoElementVectorF32);
+ auto variance = Div(sum_of_squares, count);
+ auto standard_deviation = Sqrt(variance);
auto standard_deviation_above_epsilon =
- CheckShape(&builder, builder.Gt(standard_deviation, epsilon),
+ CheckShape(&builder, Gt(standard_deviation, epsilon),
ShapeUtil::MakeShape(PRED, {2}));
- auto gt_eps = builder.Select(standard_deviation_above_epsilon,
- standard_deviation, epsilon2);
- auto normalization_factors = builder.ReciprocalF32(gt_eps);
+ auto gt_eps =
+ Select(standard_deviation_above_epsilon, standard_deviation, epsilon2);
+ auto normalization_factors = Reciprocal(gt_eps);
auto normalized_input_activations =
- builder.Mul(activation_deviations, normalization_factors,
- /*broadcast_dimensions=*/{1});
- /* auto output_activations = */ builder.Add(
- builder.Mul(normalized_input_activations, gamma,
- /*broadcast_dimensions=*/{1}),
- beta, /*broadcast_dimensions=*/{1});
+ Mul(activation_deviations, normalization_factors,
+ /*broadcast_dimensions=*/{1});
+ /* auto output_activations = */ Add(Mul(normalized_input_activations, gamma,
+ /*broadcast_dimensions=*/{1}),
+ beta, /*broadcast_dimensions=*/{1});
Array4D<float> expected(kSamples, kZ, kY, kX);
Array2D<float> pz({
@@ -232,46 +232,47 @@ XLA_TEST_P(BatchNormalizationTest, BasicTraining) {
const int kFeatureIndex = 3;
XlaBuilder builder(TestName());
- auto operand = builder.ConstantR4FromArray4D<float>(
- {{{{1.f, 2.f}}, {{3.f, 4.f}}}, {{{5.f, 6.f}}, {{7.f, 8.f}}}});
+ auto operand = ConstantR4FromArray4D<float>(
+ &builder, {{{{1.f, 2.f}}, {{3.f, 4.f}}}, {{{5.f, 6.f}}, {{7.f, 8.f}}}});
- auto scale = builder.ConstantR1<float>({2.0f, 3.0f});
+ auto scale = ConstantR1<float>(&builder, {2.0f, 3.0f});
- auto offset = builder.ConstantR1<float>({1.0f, 2.0f});
+ auto offset = ConstantR1<float>(&builder, {1.0f, 2.0f});
- builder.BatchNormTraining(operand, scale, offset,
- /*epsilon=*/0.001, kFeatureIndex);
+ BatchNormTraining(operand, scale, offset,
+ /*epsilon=*/0.001, kFeatureIndex);
- auto expected = Literal::MakeTuple(
- {Literal::CreateR4<float>({{{{-1.6f, -2.0f}}, {{0.1f, 0.6f}}},
- {{{1.9f, 3.3f}}, {{3.7f, 6.0f}}}})
+ auto expected = LiteralUtil::MakeTuple(
+ {LiteralUtil::CreateR4<float>({{{{-1.6f, -2.0f}}, {{0.1f, 0.6f}}},
+ {{{1.9f, 3.3f}}, {{3.7f, 6.0f}}}})
.get(),
- Literal::CreateR1<float>({4, 5}).get(),
- Literal::CreateR1<float>({5, 5}).get()});
+ LiteralUtil::CreateR1<float>({4, 5}).get(),
+ LiteralUtil::CreateR1<float>({5, 5}).get()});
ComputeAndCompareTuple(&builder, *expected, {}, ErrorSpec(0.1));
}
-XLA_TEST_P(BatchNormalizationTest, BasicTrainingOnSublane) {
+XLA_TEST_P(BatchNormalizationTest, BasicTrainingOnDimension2) {
const int kFeatureIndex = 2;
XlaBuilder builder(TestName());
- auto operand = builder.ConstantR4FromArray4D<float>(
+ auto operand = ConstantR4FromArray4D<float>(
+ &builder,
{{{{1.f}, {2.f}}, {{3.f}, {4.f}}}, {{{5.f}, {6.f}}, {{7.f}, {8.f}}}});
- auto scale = builder.ConstantR1<float>({2.0f, 3.0f});
+ auto scale = ConstantR1<float>(&builder, {2.0f, 3.0f});
- auto offset = builder.ConstantR1<float>({1.0f, 2.0f});
+ auto offset = ConstantR1<float>(&builder, {1.0f, 2.0f});
- builder.BatchNormTraining(operand, scale, offset,
- /*epsilon=*/0.001, kFeatureIndex);
+ BatchNormTraining(operand, scale, offset,
+ /*epsilon=*/0.001, kFeatureIndex);
- auto expected = Literal::MakeTuple(
- {Literal::CreateR4<float>({{{{-1.6f}, {-2.0f}}, {{0.1f}, {0.6f}}},
- {{{1.9f}, {3.3f}}, {{3.7f}, {6.0f}}}})
+ auto expected = LiteralUtil::MakeTuple(
+ {LiteralUtil::CreateR4<float>({{{{-1.6f}, {-2.0f}}, {{0.1f}, {0.6f}}},
+ {{{1.9f}, {3.3f}}, {{3.7f}, {6.0f}}}})
.get(),
- Literal::CreateR1<float>({4, 5}).get(),
- Literal::CreateR1<float>({5, 5}).get()});
+ LiteralUtil::CreateR1<float>({4, 5}).get(),
+ LiteralUtil::CreateR1<float>({5, 5}).get()});
ComputeAndCompareTuple(&builder, *expected, {}, ErrorSpec(0.1));
}
@@ -294,14 +295,14 @@ XLA_TEST_P(BatchNormalizationTest, TrainingWithFeatureOnLowDimension) {
CreateR1Parameter<float>(std::vector<float>(260, 1.0f),
/*parameter_number=*/2, "offset", &builder, &h2);
- builder.BatchNormTraining(h0, h1, h2,
- /*epsilon=*/1, kFeatureIndex);
+ BatchNormTraining(h0, h1, h2,
+ /*epsilon=*/1, kFeatureIndex);
- auto expected = Literal::MakeTuple(
- {Literal::CreateR3FromArray3D<float>(Array3D<float>(260, 2, 2, 1.0f))
+ auto expected = LiteralUtil::MakeTuple(
+ {LiteralUtil::CreateR3FromArray3D<float>(Array3D<float>(260, 2, 2, 1.0f))
.get(),
- Literal::CreateR1<float>(std::vector<float>(260, 1.0f)).get(),
- Literal::CreateR1<float>(std::vector<float>(260, 0.0f)).get()});
+ LiteralUtil::CreateR1<float>(std::vector<float>(260, 1.0f)).get(),
+ LiteralUtil::CreateR1<float>(std::vector<float>(260, 0.0f)).get()});
ComputeAndCompareTuple(&builder, *expected,
{operand.get(), scale.get(), offset.get()},
@@ -327,14 +328,15 @@ XLA_TEST_P(BatchNormalizationTest, LargeEpsilonTest) {
/*parameter_number=*/2, "offset", &builder, &h2);
// var = 125, mean = 15, epsilon = -100
- builder.BatchNormTraining(h0, h1, h2,
- /*epsilon=*/-100, kFeatureIndex);
+ BatchNormTraining(h0, h1, h2,
+ /*epsilon=*/-100, kFeatureIndex);
- auto expected = Literal::MakeTuple(
- {Literal::CreateR3FromArray3D<float>({{{-3.0f}, {-1.0f}, {1.0f}, {3.0f}}})
+ auto expected = LiteralUtil::MakeTuple(
+ {LiteralUtil::CreateR3FromArray3D<float>(
+ {{{-3.0f}, {-1.0f}, {1.0f}, {3.0f}}})
.get(),
- Literal::CreateR1<float>(std::vector<float>(1, 15.0f)).get(),
- Literal::CreateR1<float>(std::vector<float>(1, 125.0f)).get()});
+ LiteralUtil::CreateR1<float>(std::vector<float>(1, 15.0f)).get(),
+ LiteralUtil::CreateR1<float>(std::vector<float>(1, 125.0f)).get()});
ComputeAndCompareTuple(&builder, *expected,
{operand.get(), scale.get(), offset.get()},
@@ -346,26 +348,27 @@ XLA_TEST_P(BatchNormalizationTest, BatchNormGradBasic) {
XlaBuilder builder(TestName());
auto operand =
- builder.ConstantR4FromArray4D<float>(Array4D<float>(2, 2, 2, 1, 0.0f));
+ ConstantR4FromArray4D<float>(&builder, Array4D<float>(2, 2, 2, 1, 0.0f));
- auto scale = builder.ConstantR1<float>({1.0f, 1.0f});
+ auto scale = ConstantR1<float>(&builder, {1.0f, 1.0f});
- auto mean = builder.ConstantR1<float>({0.0f, 0.0f});
+ auto mean = ConstantR1<float>(&builder, {0.0f, 0.0f});
- auto var = builder.ConstantR1<float>({1.0f, 1.0f});
+ auto var = ConstantR1<float>(&builder, {1.0f, 1.0f});
- auto grad_output = builder.ConstantR4FromArray4D<float>(
+ auto grad_output = ConstantR4FromArray4D<float>(
+ &builder,
{{{{1.f}, {2.f}}, {{3.f}, {4.f}}}, {{{5.f}, {6.f}}, {{7.f}, {8.f}}}});
- builder.BatchNormGrad(operand, scale, mean, var, grad_output,
- /*epsilon=*/0.0, kFeatureIndex);
+ BatchNormGrad(operand, scale, mean, var, grad_output,
+ /*epsilon=*/0.0, kFeatureIndex);
- auto expected = Literal::MakeTuple(
- {Literal::CreateR4<float>({{{{-3.f}, {-3.f}}, {{-1.f}, {-1.f}}},
- {{{1.f}, {1.f}}, {{3.f}, {3.f}}}})
+ auto expected = LiteralUtil::MakeTuple(
+ {LiteralUtil::CreateR4<float>({{{{-3.f}, {-3.f}}, {{-1.f}, {-1.f}}},
+ {{{1.f}, {1.f}}, {{3.f}, {3.f}}}})
.get(),
- Literal::CreateR1<float>({0, 0}).get(),
- Literal::CreateR1<float>({16, 20}).get()});
+ LiteralUtil::CreateR1<float>({0, 0}).get(),
+ LiteralUtil::CreateR1<float>({16, 20}).get()});
ComputeAndCompareTuple(&builder, *expected, {}, ErrorSpec(0.1));
}
@@ -511,22 +514,23 @@ XLA_TEST_P(BatchNormTestManySizes, RandomizedTrainingTests) {
auto normalized = *ReferenceUtil::BatchNorm4D(input_array, mean4D, var4D,
scale4D, offset4D, epsilon);
- auto expected_normalized = Literal::CreateR4FromArray4D<float>(normalized);
+ auto expected_normalized =
+ LiteralUtil::CreateR4FromArray4D<float>(normalized);
- auto offset_literal = Literal::CreateR1<float>(offset);
- auto scale_literal = Literal::CreateR1<float>(scale);
- auto input_literal = Literal::CreateR4FromArray4D<float>(input_array);
+ auto offset_literal = LiteralUtil::CreateR1<float>(offset);
+ auto scale_literal = LiteralUtil::CreateR1<float>(scale);
+ auto input_literal = LiteralUtil::CreateR4FromArray4D<float>(input_array);
auto input_activations =
- builder.Parameter(0, input_literal->shape(), "input");
+ Parameter(&builder, 0, input_literal->shape(), "input");
auto scale_activations =
- builder.Parameter(1, scale_literal->shape(), "offset");
+ Parameter(&builder, 1, scale_literal->shape(), "offset");
auto offset_activations =
- builder.Parameter(2, offset_literal->shape(), "scale");
+ Parameter(&builder, 2, offset_literal->shape(), "scale");
- auto expected = Literal::MakeTuple({expected_normalized.get(),
- Literal::CreateR1<float>(mean).get(),
- Literal::CreateR1<float>(var).get()});
+ auto expected = LiteralUtil::MakeTuple(
+ {expected_normalized.get(), LiteralUtil::CreateR1<float>(mean).get(),
+ LiteralUtil::CreateR1<float>(var).get()});
std::unique_ptr<GlobalData> input_data =
client_->TransferToServer(*input_literal).ConsumeValueOrDie();
@@ -535,8 +539,8 @@ XLA_TEST_P(BatchNormTestManySizes, RandomizedTrainingTests) {
std::unique_ptr<GlobalData> offset_data =
client_->TransferToServer(*offset_literal).ConsumeValueOrDie();
- builder.BatchNormTraining(input_activations, scale_activations,
- offset_activations, epsilon, feature_index);
+ BatchNormTraining(input_activations, scale_activations, offset_activations,
+ epsilon, feature_index);
// Run all HLO passes during this test. In particular, ClientLibraryTestBase
// disables constant folding, but we want it enabled for our zero-sized tensor
@@ -611,21 +615,21 @@ XLA_TEST_P(BatchNormTestManySizes, RandomizedInferencingTests) {
auto normalized = *ReferenceUtil::BatchNorm4D(input_array, mean4D, var4D,
scale4D, offset4D, epsilon);
- auto offset_literal = Literal::CreateR1<float>(offset);
- auto scale_literal = Literal::CreateR1<float>(scale);
- auto mean_literal = Literal::CreateR1<float>(mean);
- auto var_literal = Literal::CreateR1<float>(var);
- auto input_literal = Literal::CreateR4FromArray4D<float>(input_array);
+ auto offset_literal = LiteralUtil::CreateR1<float>(offset);
+ auto scale_literal = LiteralUtil::CreateR1<float>(scale);
+ auto mean_literal = LiteralUtil::CreateR1<float>(mean);
+ auto var_literal = LiteralUtil::CreateR1<float>(var);
+ auto input_literal = LiteralUtil::CreateR4FromArray4D<float>(input_array);
auto input_activations =
- builder.Parameter(0, input_literal->shape(), "input");
+ Parameter(&builder, 0, input_literal->shape(), "input");
auto scale_activations =
- builder.Parameter(1, scale_literal->shape(), "offset");
+ Parameter(&builder, 1, scale_literal->shape(), "offset");
auto offset_activations =
- builder.Parameter(2, offset_literal->shape(), "scale");
- auto mean_activations = builder.Parameter(3, mean_literal->shape(), "mean");
+ Parameter(&builder, 2, offset_literal->shape(), "scale");
+ auto mean_activations = Parameter(&builder, 3, mean_literal->shape(), "mean");
auto variance_activations =
- builder.Parameter(4, var_literal->shape(), "variance");
+ Parameter(&builder, 4, var_literal->shape(), "variance");
Array4D<float> expected = normalized;
@@ -640,9 +644,9 @@ XLA_TEST_P(BatchNormTestManySizes, RandomizedInferencingTests) {
std::unique_ptr<GlobalData> variance_data =
client_->TransferToServer(*var_literal).ConsumeValueOrDie();
- builder.BatchNormInference(input_activations, scale_activations,
- offset_activations, mean_activations,
- variance_activations, epsilon, feature_index);
+ BatchNormInference(input_activations, scale_activations, offset_activations,
+ mean_activations, variance_activations, epsilon,
+ feature_index);
// Run all HLO passes during this test. In particular, ClientLibraryTestBase
// disables constant folding, but we want it enabled for our zero-sized tensor
@@ -798,21 +802,23 @@ XLA_TEST_P(BatchNormTestManySizes, RandomizedGradTests) {
});
auto expected_grad_activation =
- Literal::CreateR4FromArray4D<float>(grad_activation);
+ LiteralUtil::CreateR4FromArray4D<float>(grad_activation);
- auto input_literal = Literal::CreateR4FromArray4D<float>(input_array);
- auto scale_literal = Literal::CreateR1<float>(scale);
- auto mean_literal = Literal::CreateR1<float>(mean);
- auto var_literal = Literal::CreateR1<float>(var);
+ auto input_literal = LiteralUtil::CreateR4FromArray4D<float>(input_array);
+ auto scale_literal = LiteralUtil::CreateR1<float>(scale);
+ auto mean_literal = LiteralUtil::CreateR1<float>(mean);
+ auto var_literal = LiteralUtil::CreateR1<float>(var);
auto grad_output_literal =
- Literal::CreateR4FromArray4D<float>(grad_output_array);
-
- auto input_parameter = builder.Parameter(0, input_literal->shape(), "input");
- auto scale_parameter = builder.Parameter(1, scale_literal->shape(), "scale");
- auto mean_parameter = builder.Parameter(2, mean_literal->shape(), "mean");
- auto var_parameter = builder.Parameter(3, var_literal->shape(), "variance");
+ LiteralUtil::CreateR4FromArray4D<float>(grad_output_array);
+
+ auto input_parameter =
+ Parameter(&builder, 0, input_literal->shape(), "input");
+ auto scale_parameter =
+ Parameter(&builder, 1, scale_literal->shape(), "scale");
+ auto mean_parameter = Parameter(&builder, 2, mean_literal->shape(), "mean");
+ auto var_parameter = Parameter(&builder, 3, var_literal->shape(), "variance");
auto grad_output_parameter =
- builder.Parameter(4, grad_output_literal->shape(), "grad_output");
+ Parameter(&builder, 4, grad_output_literal->shape(), "grad_output");
std::unique_ptr<GlobalData> input_data =
client_->TransferToServer(*input_literal).ConsumeValueOrDie();
@@ -825,14 +831,13 @@ XLA_TEST_P(BatchNormTestManySizes, RandomizedGradTests) {
std::unique_ptr<GlobalData> grad_output_data =
client_->TransferToServer(*grad_output_literal).ConsumeValueOrDie();
- builder.BatchNormGrad(input_parameter, scale_parameter, mean_parameter,
- var_parameter, grad_output_parameter, epsilon,
- feature_index);
+ BatchNormGrad(input_parameter, scale_parameter, mean_parameter, var_parameter,
+ grad_output_parameter, epsilon, feature_index);
auto expected =
- Literal::MakeTuple({expected_grad_activation.get(),
- Literal::CreateR1<float>(grad_scale).get(),
- Literal::CreateR1<float>(grad_offset).get()});
+ LiteralUtil::MakeTuple({expected_grad_activation.get(),
+ LiteralUtil::CreateR1<float>(grad_scale).get(),
+ LiteralUtil::CreateR1<float>(grad_offset).get()});
// Run all HLO passes during this test. In particular, ClientLibraryTestBase
// disables constant folding, but we want it enabled for our zero-sized tensor
diff --git a/tensorflow/compiler/xla/tests/bfloat16_test.cc b/tensorflow/compiler/xla/tests/bfloat16_test.cc
index ca337e7884..747c82b502 100644
--- a/tensorflow/compiler/xla/tests/bfloat16_test.cc
+++ b/tensorflow/compiler/xla/tests/bfloat16_test.cc
@@ -22,7 +22,7 @@ limitations under the License.
#include "tensorflow/compiler/xla/client/lib/arithmetic.h"
#include "tensorflow/compiler/xla/client/local_client.h"
#include "tensorflow/compiler/xla/client/xla_client/xla_builder.h"
-#include "tensorflow/compiler/xla/literal_util.h"
+#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/compiler/xla/reference_util.h"
#include "tensorflow/compiler/xla/service/hlo_computation.h"
#include "tensorflow/compiler/xla/service/hlo_instruction.h"
@@ -51,9 +51,9 @@ class Bfloat16Test : public ClientLibraryTestBase {
XLA_TEST_F(Bfloat16Test, ScalarOperation) {
XlaBuilder builder(TestName());
- auto x = builder.ConstantR0<bfloat16>(static_cast<bfloat16>(2.0f));
- auto y = builder.ConstantR0<bfloat16>(static_cast<bfloat16>(1.0f));
- builder.Add(x, y);
+ auto x = ConstantR0<bfloat16>(&builder, static_cast<bfloat16>(2.0f));
+ auto y = ConstantR0<bfloat16>(&builder, static_cast<bfloat16>(1.0f));
+ Add(x, y);
ComputeAndCompareR0<bfloat16>(&builder, static_cast<bfloat16>(3.0f), {},
error_spec_);
@@ -61,8 +61,8 @@ XLA_TEST_F(Bfloat16Test, ScalarOperation) {
XLA_TEST_F(Bfloat16Test, LogOperation) {
XlaBuilder builder(TestName());
- auto x = builder.ConstantR0<bfloat16>(static_cast<bfloat16>(4.0f));
- builder.Log(x);
+ auto x = ConstantR0<bfloat16>(&builder, static_cast<bfloat16>(4.0f));
+ Log(x);
ComputeAndCompareR0<bfloat16>(&builder, static_cast<bfloat16>(1.387f), {},
error_spec_);
@@ -70,7 +70,7 @@ XLA_TEST_F(Bfloat16Test, LogOperation) {
XLA_TEST_F(Bfloat16Test, NegateScalarF16) {
XlaBuilder builder(TestName());
- builder.Neg(builder.ConstantR0<bfloat16>(static_cast<bfloat16>(2.1f)));
+ Neg(ConstantR0<bfloat16>(&builder, static_cast<bfloat16>(2.1f)));
ComputeAndCompareR0<bfloat16>(&builder, static_cast<bfloat16>(-2.1f), {},
error_spec_);
@@ -80,33 +80,33 @@ XLA_TEST_F(Bfloat16Test, BatchNormTraining) {
const int kFeatureIndex = 2;
XlaBuilder builder(TestName());
- auto operand = builder.ConstantR4FromArray4D<bfloat16>(
+ auto operand = ConstantR4FromArray4D<bfloat16>(
+ &builder,
{{{{static_cast<bfloat16>(1.f)}, {static_cast<bfloat16>(2.f)}},
{{static_cast<bfloat16>(3.f)}, {static_cast<bfloat16>(4.f)}}},
{{{static_cast<bfloat16>(5.f)}, {static_cast<bfloat16>(6.f)}},
{{static_cast<bfloat16>(7.f)}, {static_cast<bfloat16>(8.f)}}}});
- auto scale = builder.ConstantR1<bfloat16>(
- {static_cast<bfloat16>(2.0f), static_cast<bfloat16>(3.0f)});
+ auto scale = ConstantR1<bfloat16>(
+ &builder, {static_cast<bfloat16>(2.0f), static_cast<bfloat16>(3.0f)});
- auto offset = builder.ConstantR1<bfloat16>(
- {static_cast<bfloat16>(1.0f), static_cast<bfloat16>(2.0f)});
+ auto offset = ConstantR1<bfloat16>(
+ &builder, {static_cast<bfloat16>(1.0f), static_cast<bfloat16>(2.0f)});
- auto tuple = builder.BatchNormTraining(operand, scale, offset,
- /*epsilon=*/0.001, kFeatureIndex);
+ BatchNormTraining(operand, scale, offset, /*epsilon=*/0.001, kFeatureIndex);
- auto expected = Literal::MakeTuple(
- {Literal::CreateR4<bfloat16>(
+ auto expected = LiteralUtil::MakeTuple(
+ {LiteralUtil::CreateR4<bfloat16>(
{{{{static_cast<bfloat16>(-1.6875f)},
{static_cast<bfloat16>(-2.04f)}},
{{static_cast<bfloat16>(0.105f)}, {static_cast<bfloat16>(0.66f)}}},
{{{static_cast<bfloat16>(1.89f)}, {static_cast<bfloat16>(3.35f)}},
{{static_cast<bfloat16>(3.7f)}, {static_cast<bfloat16>(6.04f)}}}})
.get(),
- Literal::CreateR1<bfloat16>(
+ LiteralUtil::CreateR1<bfloat16>(
{static_cast<bfloat16>(4), static_cast<bfloat16>(5)})
.get(),
- Literal::CreateR1<bfloat16>(
+ LiteralUtil::CreateR1<bfloat16>(
{static_cast<bfloat16>(5), static_cast<bfloat16>(5)})
.get()});
@@ -117,38 +117,39 @@ XLA_TEST_F(Bfloat16Test, BatchNormGrad) {
const int kFeatureIndex = 2;
XlaBuilder builder(TestName());
- auto operand = builder.ConstantR4FromArray4D<bfloat16>(
- Array4D<bfloat16>(2, 2, 2, 1, static_cast<bfloat16>(0.0f)));
+ auto operand = ConstantR4FromArray4D<bfloat16>(
+ &builder, Array4D<bfloat16>(2, 2, 2, 1, static_cast<bfloat16>(0.0f)));
- auto scale = builder.ConstantR1<bfloat16>(
- {static_cast<bfloat16>(1.0f), static_cast<bfloat16>(1.0f)});
+ auto scale = ConstantR1<bfloat16>(
+ &builder, {static_cast<bfloat16>(1.0f), static_cast<bfloat16>(1.0f)});
- auto mean = builder.ConstantR1<bfloat16>(
- {static_cast<bfloat16>(0.0f), static_cast<bfloat16>(0.0f)});
+ auto mean = ConstantR1<bfloat16>(
+ &builder, {static_cast<bfloat16>(0.0f), static_cast<bfloat16>(0.0f)});
- auto var = builder.ConstantR1<bfloat16>(
- {static_cast<bfloat16>(1.0f), static_cast<bfloat16>(1.0f)});
+ auto var = ConstantR1<bfloat16>(
+ &builder, {static_cast<bfloat16>(1.0f), static_cast<bfloat16>(1.0f)});
- auto grad_output = builder.ConstantR4FromArray4D<bfloat16>(
+ auto grad_output = ConstantR4FromArray4D<bfloat16>(
+ &builder,
{{{{static_cast<bfloat16>(1.f)}, {static_cast<bfloat16>(2.f)}},
{{static_cast<bfloat16>(3.f)}, {static_cast<bfloat16>(4.f)}}},
{{{static_cast<bfloat16>(5.f)}, {static_cast<bfloat16>(6.f)}},
{{static_cast<bfloat16>(7.f)}, {static_cast<bfloat16>(8.f)}}}});
- builder.BatchNormGrad(operand, scale, mean, var, grad_output,
- /*epsilon=*/0.0, kFeatureIndex);
+ BatchNormGrad(operand, scale, mean, var, grad_output,
+ /*epsilon=*/0.0, kFeatureIndex);
- auto expected = Literal::MakeTuple(
- {Literal::CreateR4<bfloat16>(
+ auto expected = LiteralUtil::MakeTuple(
+ {LiteralUtil::CreateR4<bfloat16>(
{{{{static_cast<bfloat16>(-3.f)}, {static_cast<bfloat16>(-3.f)}},
{{static_cast<bfloat16>(-1.f)}, {static_cast<bfloat16>(-1.f)}}},
{{{static_cast<bfloat16>(1.f)}, {static_cast<bfloat16>(1.f)}},
{{static_cast<bfloat16>(3.f)}, {static_cast<bfloat16>(3.f)}}}})
.get(),
- Literal::CreateR1<bfloat16>(
+ LiteralUtil::CreateR1<bfloat16>(
{static_cast<bfloat16>(0), static_cast<bfloat16>(0)})
.get(),
- Literal::CreateR1<bfloat16>(
+ LiteralUtil::CreateR1<bfloat16>(
{static_cast<bfloat16>(16), static_cast<bfloat16>(20)})
.get()});
diff --git a/tensorflow/compiler/xla/tests/binop_scaling_test.cc b/tensorflow/compiler/xla/tests/binop_scaling_test.cc
index 48203b1d40..20cb989751 100644
--- a/tensorflow/compiler/xla/tests/binop_scaling_test.cc
+++ b/tensorflow/compiler/xla/tests/binop_scaling_test.cc
@@ -33,9 +33,9 @@ TEST_F(BinopScalingTest, MatrixPlusPseudoMatrixRowVector_32x4) {
auto arhs = MakeLinspaceArray2D(0.0, 1.0, 1, 4);
XlaBuilder builder(TestName());
- auto lhs = builder.ConstantR2FromArray2D<float>(*alhs);
- auto rhs = builder.ConstantR2FromArray2D<float>(*arhs);
- builder.Add(lhs, rhs);
+ auto lhs = ConstantR2FromArray2D<float>(&builder, *alhs);
+ auto rhs = ConstantR2FromArray2D<float>(&builder, *arhs);
+ Add(lhs, rhs);
auto aexpected = ReferenceUtil::MapWithIndexArray2D(
*alhs, [&](float lhs_value, int64 row, int64 col) {
@@ -49,9 +49,9 @@ TEST_F(BinopScalingTest, MatrixPlusPseudoMatrixRowVector_129x129) {
auto arhs = MakeLinspaceArray2D(0.0, 1.0, 1, 129);
XlaBuilder builder(TestName());
- auto lhs = builder.ConstantR2FromArray2D<float>(*alhs);
- auto rhs = builder.ConstantR2FromArray2D<float>(*arhs);
- builder.Add(lhs, rhs);
+ auto lhs = ConstantR2FromArray2D<float>(&builder, *alhs);
+ auto rhs = ConstantR2FromArray2D<float>(&builder, *arhs);
+ Add(lhs, rhs);
auto aexpected = ReferenceUtil::MapWithIndexArray2D(
*alhs, [&](float lhs_value, int64 row, int64 col) {
@@ -65,9 +65,9 @@ TEST_F(BinopScalingTest, MatrixPlusPseudoMatrixColVector_9x5) {
auto arhs = MakeLinspaceArray2D(0.0, 1.0, 9, 1);
XlaBuilder builder(TestName());
- auto lhs = builder.ConstantR2FromArray2D<float>(*alhs);
- auto rhs = builder.ConstantR2FromArray2D<float>(*arhs);
- builder.Add(lhs, rhs);
+ auto lhs = ConstantR2FromArray2D<float>(&builder, *alhs);
+ auto rhs = ConstantR2FromArray2D<float>(&builder, *arhs);
+ Add(lhs, rhs);
auto aexpected = ReferenceUtil::MapWithIndexArray2D(
*alhs, [&](float lhs_value, int64 row, int64 col) {
@@ -81,9 +81,9 @@ TEST_F(BinopScalingTest, MatrixPlusPseudoMatrixColVector_129x257) {
auto arhs = MakeLinspaceArray2D(0.0, 1.0, 129, 1);
XlaBuilder builder(TestName());
- auto lhs = builder.ConstantR2FromArray2D<float>(*alhs);
- auto rhs = builder.ConstantR2FromArray2D<float>(*arhs);
- builder.Add(lhs, rhs);
+ auto lhs = ConstantR2FromArray2D<float>(&builder, *alhs);
+ auto rhs = ConstantR2FromArray2D<float>(&builder, *arhs);
+ Add(lhs, rhs);
auto aexpected = ReferenceUtil::MapWithIndexArray2D(
*alhs, [&](float lhs_value, int64 row, int64 col) {
@@ -94,11 +94,12 @@ TEST_F(BinopScalingTest, MatrixPlusPseudoMatrixColVector_129x257) {
TEST_F(BinopScalingTest, R0PlusR2F32) {
XlaBuilder builder(TestName());
- auto lhs = builder.ConstantR0<float>(42.0);
- auto rhs = builder.ConstantR2<float>({
- {1.0, 2.0}, {3.0, 4.0},
- });
- builder.Add(lhs, rhs);
+ auto lhs = ConstantR0<float>(&builder, 42.0);
+ auto rhs = ConstantR2<float>(&builder, {
+ {1.0, 2.0},
+ {3.0, 4.0},
+ });
+ Add(lhs, rhs);
Array2D<float> expected(2, 2);
expected(0, 0) = 42.0 + 1.0;
@@ -129,9 +130,9 @@ TEST_F(BinopScalingTest, R4PlusR0S32) {
});
// clang-format on
- auto lhs = builder.ConstantR4FromArray4D(lhs_array);
- auto rhs = builder.ConstantR0<int>(42);
- builder.Add(lhs, rhs);
+ auto lhs = ConstantR4FromArray4D(&builder, lhs_array);
+ auto rhs = ConstantR0<int>(&builder, 42);
+ Add(lhs, rhs);
ComputeAndCompareR4<int>(&builder, expected, {});
}
diff --git a/tensorflow/compiler/xla/tests/bitcast_convert_test.cc b/tensorflow/compiler/xla/tests/bitcast_convert_test.cc
index bff60f25ec..d531e8fa82 100644
--- a/tensorflow/compiler/xla/tests/bitcast_convert_test.cc
+++ b/tensorflow/compiler/xla/tests/bitcast_convert_test.cc
@@ -43,8 +43,8 @@ class BitcastConvertTest : public ClientLibraryTestBase {
TEST_F(BitcastConvertTest, ConvertR1S32ToR1S32) {
XlaBuilder builder(TestName());
- auto a = builder.ConstantR1<int32>({42, 64});
- builder.BitcastConvertType(a, S32);
+ auto a = ConstantR1<int32>(&builder, {42, 64});
+ BitcastConvertType(a, S32);
std::vector<int32> expected = {42, 64};
ComputeAndCompareR1<int32>(&builder, expected, {});
@@ -52,8 +52,8 @@ TEST_F(BitcastConvertTest, ConvertR1S32ToR1S32) {
TEST_F(BitcastConvertTest, ConvertR1F32ToR1F32) {
XlaBuilder builder(TestName());
- auto a = builder.ConstantR1<float>({42.0f, 64.0f});
- builder.BitcastConvertType(a, F32);
+ auto a = ConstantR1<float>(&builder, {42.0f, 64.0f});
+ BitcastConvertType(a, F32);
std::vector<float> expected = {42.0f, 64.0f};
ComputeAndCompareR1<float>(&builder, expected, {});
@@ -62,10 +62,10 @@ TEST_F(BitcastConvertTest, ConvertR1F32ToR1F32) {
TEST_F(BitcastConvertTest, BitcastR1S32ToR1F32) {
XlaBuilder builder(TestName());
auto a =
- builder.ConstantR1<int32>({0, static_cast<int32>(0x80000000), 0x3F800000,
- static_cast<int32>(0xBF800000), 0x3F000000,
- static_cast<int32>(0xBF000000)});
- builder.BitcastConvertType(a, F32);
+ ConstantR1<int32>(&builder, {0, static_cast<int32>(0x80000000),
+ 0x3F800000, static_cast<int32>(0xBF800000),
+ 0x3F000000, static_cast<int32>(0xBF000000)});
+ BitcastConvertType(a, F32);
std::vector<float> expected = {0.0f, -0.0f, 1.0f, -1.0f, 0.5f, -0.5f};
ComputeAndCompareR1<float>(&builder, expected, {});
@@ -73,8 +73,8 @@ TEST_F(BitcastConvertTest, BitcastR1S32ToR1F32) {
XLA_TEST_F(BitcastConvertTest, ConvertR1S0S32ToR1S0F32) {
XlaBuilder builder(TestName());
- auto a = builder.ConstantR1<int32>({});
- builder.BitcastConvertType(a, F32);
+ auto a = ConstantR1<int32>(&builder, {});
+ BitcastConvertType(a, F32);
std::vector<float> expected = {};
ComputeAndCompareR1<float>(&builder, expected, {});
@@ -82,8 +82,8 @@ XLA_TEST_F(BitcastConvertTest, ConvertR1S0S32ToR1S0F32) {
TEST_F(BitcastConvertTest, ConvertR1F32ToR1S32) {
XlaBuilder builder(TestName());
- auto a = builder.ConstantR1<float>({42.6, 64.4});
- builder.BitcastConvertType(a, S32);
+ auto a = ConstantR1<float>(&builder, {42.6, 64.4});
+ BitcastConvertType(a, S32);
std::vector<int32> expected = {0x422a6666, 0x4280cccd};
ComputeAndCompareR1<int32>(&builder, expected, {});
@@ -91,9 +91,9 @@ TEST_F(BitcastConvertTest, ConvertR1F32ToR1S32) {
TEST_F(BitcastConvertTest, ConvertS32Extremes) {
XlaBuilder builder(TestName());
- auto a = builder.ConstantR1<int32>(
- {std::numeric_limits<int32>::min(), std::numeric_limits<int32>::max()});
- builder.BitcastConvertType(a, F32);
+ auto a = ConstantR1<int32>(&builder, {std::numeric_limits<int32>::min(),
+ std::numeric_limits<int32>::max()});
+ BitcastConvertType(a, F32);
std::vector<float> expected = {-0.0f, NAN};
ComputeAndCompareR1<float>(&builder, expected, {}, ErrorSpec(0, 0));
@@ -102,10 +102,10 @@ TEST_F(BitcastConvertTest, ConvertS32Extremes) {
TEST_F(BitcastConvertTest, ConvertMapToS32) {
XlaBuilder builder(TestName());
auto b = builder.CreateSubBuilder("convert");
- auto param = b->Parameter(0, ShapeUtil::MakeShape(F32, {}), "in");
- b->BitcastConvertType(param, S32);
- auto a = builder.ConstantR1<float>({42.0f, 64.0f});
- builder.Map({a}, b->BuildAndNoteError(), {0});
+ auto param = Parameter(b.get(), 0, ShapeUtil::MakeShape(F32, {}), "in");
+ BitcastConvertType(param, S32);
+ auto a = ConstantR1<float>(&builder, {42.0f, 64.0f});
+ Map(&builder, {a}, b->BuildAndNoteError(), {0});
std::vector<int32> expected = {0x42280000, 0x42800000};
ComputeAndCompareR1<int32>(&builder, expected, {});
@@ -114,10 +114,10 @@ TEST_F(BitcastConvertTest, ConvertMapToS32) {
TEST_F(BitcastConvertTest, ConvertMapToF32) {
XlaBuilder builder(TestName());
auto b = builder.CreateSubBuilder("convert");
- auto param = b->Parameter(0, ShapeUtil::MakeShape(S32, {}), "in");
- b->BitcastConvertType(param, F32);
- auto a = builder.ConstantR1<int32>({0x42280000, 0x42800000});
- builder.Map({a}, b->BuildAndNoteError(), {0});
+ auto param = Parameter(b.get(), 0, ShapeUtil::MakeShape(S32, {}), "in");
+ BitcastConvertType(param, F32);
+ auto a = ConstantR1<int32>(&builder, {0x42280000, 0x42800000});
+ Map(&builder, {a}, b->BuildAndNoteError(), {0});
std::vector<float> expected = {42.0f, 64.0f};
ComputeAndCompareR1<float>(&builder, expected, {});
@@ -130,9 +130,9 @@ TEST_F(BitcastConvertTest, ConvertMapToF32) {
// the new convert should have the same element type as the old convert.
TEST_F(BitcastConvertTest, ConvertReshape) {
XlaBuilder builder(TestName());
- auto input = builder.ConstantR1<int32>({0x42280000});
- auto reshape = builder.Reshape(input, /*dimensions=*/{0}, /*new_sizes=*/{});
- builder.BitcastConvertType(reshape, F32);
+ auto input = ConstantR1<int32>(&builder, {0x42280000});
+ auto reshape = Reshape(input, /*dimensions=*/{0}, /*new_sizes=*/{});
+ BitcastConvertType(reshape, F32);
ComputeAndCompareR0<float>(&builder, 42.0f, {});
}
diff --git a/tensorflow/compiler/xla/tests/broadcast_simple_test.cc b/tensorflow/compiler/xla/tests/broadcast_simple_test.cc
index 3a0f51fc66..50dd574624 100644
--- a/tensorflow/compiler/xla/tests/broadcast_simple_test.cc
+++ b/tensorflow/compiler/xla/tests/broadcast_simple_test.cc
@@ -21,6 +21,7 @@ limitations under the License.
#include "tensorflow/compiler/xla/array4d.h"
#include "tensorflow/compiler/xla/client/local_client.h"
#include "tensorflow/compiler/xla/client/xla_client/xla_builder.h"
+#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/compiler/xla/literal_util.h"
#include "tensorflow/compiler/xla/statusor.h"
#include "tensorflow/compiler/xla/test.h"
@@ -37,17 +38,17 @@ class BroadcastSimpleTest : public ClientLibraryTestBase {
XlaBuilder* builder) {
switch (op) {
case HloOpcode::kMinimum: {
- return builder->Min(lhs, rhs);
+ return Min(lhs, rhs);
}
case HloOpcode::kMaximum: {
- return builder->Max(lhs, rhs);
+ return Max(lhs, rhs);
}
case HloOpcode::kMultiply: {
- return builder->Mul(lhs, rhs);
+ return Mul(lhs, rhs);
}
default: {
// Default to Add
- return builder->Add(lhs, rhs);
+ return Add(lhs, rhs);
}
}
}
@@ -58,7 +59,7 @@ class BroadcastSimpleTest : public ClientLibraryTestBase {
Array3D<float>* r3_array, float start, float end, int seed) {
*r3_shape = ShapeUtil::MakeShapeWithLayout(F32, bounds, minor_to_major);
r3_array->FillRandom(start, end, seed);
- auto r3_data = Literal::CreateR3FromArray3D(*r3_array)->Relayout(
+ auto r3_data = LiteralUtil::CreateR3FromArray3D(*r3_array)->Relayout(
LayoutUtil::MakeLayout(minor_to_major));
std::unique_ptr<GlobalData> r3_global_data =
client_->TransferToServer(*r3_data).ConsumeValueOrDie();
@@ -71,7 +72,7 @@ class BroadcastSimpleTest : public ClientLibraryTestBase {
Array2D<float>* r2_array, float start, float end, int seed) {
*r2_shape = ShapeUtil::MakeShapeWithLayout(F32, bounds, minor_to_major);
r2_array->FillRandom(start, end, seed);
- auto r2_data = Literal::CreateR2FromArray2D(*r2_array)->Relayout(
+ auto r2_data = LiteralUtil::CreateR2FromArray2D(*r2_array)->Relayout(
LayoutUtil::MakeLayout(minor_to_major));
std::unique_ptr<GlobalData> r2_global_data =
client_->TransferToServer(*r2_data).ConsumeValueOrDie();
@@ -104,13 +105,13 @@ using ::testing::HasSubstr;
XLA_TEST_F(BroadcastSimpleTest, ScalarNoOpBroadcast) {
XlaBuilder b(TestName());
- b.Broadcast(b.ConstantR0<float>(1.5), {});
+ Broadcast(ConstantR0<float>(&b, 1.5), {});
ComputeAndCompareR0<float>(&b, 1.5, {}, ErrorSpec(0.0001));
}
XLA_TEST_F(BroadcastSimpleTest, ScalarTo2D_2x3) {
XlaBuilder b(TestName());
- b.Broadcast(b.ConstantR0<float>(2.25), {2, 3});
+ Broadcast(ConstantR0<float>(&b, 2.25), {2, 3});
Array2D<float> expected(2, 3, 2.25);
ComputeAndCompareR2<float>(&b, expected, {}, ErrorSpec(0.0001));
}
@@ -122,7 +123,7 @@ XLA_TEST_F(BroadcastSimpleTest, ScalarParamTo2D_2x3) {
CreateR0Parameter<float>(2.25f, /*parameter_number=*/0, /*name=*/"src",
/*builder=*/&b, /*data_handle=*/&src);
- b.Broadcast(src, {2, 3});
+ Broadcast(src, {2, 3});
Array2D<float> expected(2, 3, 2.25);
ComputeAndCompareR2<float>(&b, expected, {param_data.get()},
ErrorSpec(0.0001));
@@ -130,21 +131,21 @@ XLA_TEST_F(BroadcastSimpleTest, ScalarParamTo2D_2x3) {
XLA_TEST_F(BroadcastSimpleTest, ScalarTo2D_2x0) {
XlaBuilder b(TestName());
- b.Broadcast(b.ConstantR0<float>(2.25), {2, 0});
+ Broadcast(ConstantR0<float>(&b, 2.25), {2, 0});
Array2D<float> expected(2, 0);
ComputeAndCompareR2<float>(&b, expected, {}, ErrorSpec(0.0001));
}
XLA_TEST_F(BroadcastSimpleTest, ScalarTo2D_0x2) {
XlaBuilder b(TestName());
- b.Broadcast(b.ConstantR0<float>(2.25), {0, 2});
+ Broadcast(ConstantR0<float>(&b, 2.25), {0, 2});
Array2D<float> expected(0, 2);
ComputeAndCompareR2<float>(&b, expected, {}, ErrorSpec(0.0001));
}
XLA_TEST_F(BroadcastSimpleTest, 1DTo2D) {
XlaBuilder b(TestName());
- b.Broadcast(b.ConstantR1<float>({1, 2, 3}), {2});
+ Broadcast(ConstantR1<float>(&b, {1, 2, 3}), {2});
Array2D<float> expected(2, 3);
expected(0, 0) = 1;
@@ -156,6 +157,86 @@ XLA_TEST_F(BroadcastSimpleTest, 1DTo2D) {
ComputeAndCompareR2<float>(&b, expected, {}, ErrorSpec(0.0001));
}
+XLA_TEST_F(BroadcastSimpleTest, 1DTo2D_WithDimsUsual) {
+ XlaBuilder b(TestName());
+ BroadcastInDim(ConstantR1<float>(&b, {1, 2}),
+ ShapeUtil::MakeShape(F32, {2, 2}), {1});
+
+ Array2D<float> expected(2, 2);
+ expected(0, 0) = 1;
+ expected(0, 1) = 2;
+ expected(1, 0) = 1;
+ expected(1, 1) = 2;
+
+ ComputeAndCompareR2<float>(&b, expected, {}, ErrorSpec(0.0001));
+}
+
+XLA_TEST_F(BroadcastSimpleTest, 1DTo2D_WithDimsTranspose) {
+ XlaBuilder b(TestName());
+ BroadcastInDim(ConstantR1<float>(&b, {1, 2}),
+ ShapeUtil::MakeShape(F32, {2, 2}), {0});
+
+ Array2D<float> expected(2, 2);
+ expected(0, 0) = 1;
+ expected(0, 1) = 1;
+ expected(1, 0) = 2;
+ expected(1, 1) = 2;
+
+ ComputeAndCompareR2<float>(&b, expected, {}, ErrorSpec(0.0001));
+}
+
+XLA_TEST_F(BroadcastSimpleTest, 2DTo3D_WithDims) {
+ XlaBuilder b(TestName());
+ BroadcastInDim(ConstantR2<float>(&b, {{1.0, 5.0}, {2.0, 6.0}}),
+ ShapeUtil::MakeShape(F32, {2, 2, 2}), {0, 1});
+
+ Array3D<float> expected(2, 2, 2);
+ expected(0, 0, 0) = 1.0;
+ expected(1, 0, 0) = 2.0;
+ expected(0, 0, 1) = 1.0;
+ expected(1, 0, 1) = 2.0;
+ expected(0, 1, 0) = 5.0;
+ expected(1, 1, 0) = 6.0;
+ expected(1, 1, 1) = 6.0;
+ expected(0, 1, 1) = 5.0;
+
+ ComputeAndCompareR3<float>(&b, expected, {}, ErrorSpec(0.0001));
+}
+
+XLA_TEST_F(BroadcastSimpleTest, 2DTo3D_WithDimsNotPossibleWithBroadCast) {
+ XlaBuilder b(TestName());
+ BroadcastInDim(ConstantR2<float>(&b, {{1.0, 5.0}, {2.0, 6.0}}),
+ ShapeUtil::MakeShape(F32, {2, 2, 2}), {0, 2});
+
+ Array3D<float> expected(2, 2, 2);
+ expected(0, 0, 0) = 1.0;
+ expected(1, 0, 0) = 2.0;
+ expected(0, 0, 1) = 5.0;
+ expected(1, 0, 1) = 6.0;
+ expected(0, 1, 0) = 1.0;
+ expected(1, 1, 0) = 2.0;
+ expected(1, 1, 1) = 6.0;
+ expected(0, 1, 1) = 5.0;
+
+ ComputeAndCompareR3<float>(&b, expected, {}, ErrorSpec(0.0001));
+}
+
+XLA_TEST_F(BroadcastSimpleTest, 1DTo2D_WithDimsNotPossibleWithBroadCast) {
+ XlaBuilder b(TestName());
+ BroadcastInDim(ConstantR1<float>(&b, {1, 2}),
+ ShapeUtil::MakeShape(F32, {3, 2}), {1});
+
+ Array2D<float> expected(3, 2);
+ expected(0, 0) = 1;
+ expected(0, 1) = 2;
+ expected(1, 0) = 1;
+ expected(1, 1) = 2;
+ expected(2, 0) = 1;
+ expected(2, 1) = 2;
+
+ ComputeAndCompareR2<float>(&b, expected, {}, ErrorSpec(0.0001));
+}
+
// Tests implicit broadcasting of PREDs.
XLA_TEST_F(BroadcastSimpleTest, BooleanAnd2DTo3D_Pred) {
XlaBuilder b(TestName());
@@ -172,7 +253,7 @@ XLA_TEST_F(BroadcastSimpleTest, BooleanAnd2DTo3D_Pred) {
XlaOp x, y;
auto x_data = CreateR2Parameter<bool>(x_vals, 0, "x", &b, &x);
auto y_data = CreateR3Parameter<bool>(y_vals, 1, "y", &b, &y);
- b.And(x, y, /*broadcast_dimensions=*/{1, 2});
+ And(x, y, /*broadcast_dimensions=*/{1, 2});
Array3D<bool> expected(2, 2, 1);
expected(0, 0, 0) = false;
@@ -185,7 +266,7 @@ XLA_TEST_F(BroadcastSimpleTest, BooleanAnd2DTo3D_Pred) {
XLA_TEST_F(BroadcastSimpleTest, ZeroElement_1DTo2D) {
XlaBuilder b(TestName());
- b.Broadcast(b.ConstantR1<float>({}), {2});
+ Broadcast(ConstantR1<float>(&b, {}), {2});
Array2D<float> expected(2, 0);
ComputeAndCompareR2<float>(&b, expected, {}, ErrorSpec(0.0001));
@@ -193,7 +274,7 @@ XLA_TEST_F(BroadcastSimpleTest, ZeroElement_1DTo2D) {
XLA_TEST_F(BroadcastSimpleTest, 1DToZeroElement2D) {
XlaBuilder b(TestName());
- b.Broadcast(b.ConstantR1<float>({1, 2, 3}), {0});
+ Broadcast(ConstantR1<float>(&b, {1, 2, 3}), {0});
Array2D<float> expected(0, 3);
ComputeAndCompareR2<float>(&b, expected, {}, ErrorSpec(0.0001));
@@ -209,14 +290,14 @@ XLA_TEST_F(BroadcastSimpleTest, InDimensionAndDegenerateBroadcasting) {
// dimensions.
XlaBuilder b(TestName());
- b.Add(b.ConstantR2<float>({{1.0, 5.0}}),
- b.ConstantLiteral(*Literal::CreateR3<float>(
- {{{2.0}, {3.0}, {4.0}}, {{5.0}, {6.0}, {7.0}}})),
- /*broadcast_dimensions=*/{1, 2});
+ Add(ConstantR2<float>(&b, {{1.0, 5.0}}),
+ ConstantLiteral(&b, *LiteralUtil::CreateR3<float>(
+ {{{2.0}, {3.0}, {4.0}}, {{5.0}, {6.0}, {7.0}}})),
+ /*broadcast_dimensions=*/{1, 2});
auto expected =
- Literal::CreateR3<float>({{{3.0, 7.0}, {4.0, 8.0}, {5.0, 9.0}},
- {{6.0, 10.0}, {7.0, 11.0}, {8.0, 12.0}}});
+ LiteralUtil::CreateR3<float>({{{3.0, 7.0}, {4.0, 8.0}, {5.0, 9.0}},
+ {{6.0, 10.0}, {7.0, 11.0}, {8.0, 12.0}}});
ComputeAndCompareLiteral(&b, *expected, {}, ErrorSpec(0.0001));
}
@@ -260,9 +341,10 @@ XLA_TEST_P(BroadcastR3ImplicitTest, Doit) {
MakeR3Data(spec.input_bounds, spec.minor2major_layout, &r3_implicit_shape,
&r3_implicit_array, 1.0, 0.2, 56789);
- auto r3_implicit_parameter = builder.Parameter(0, r3_implicit_shape, "input");
- auto r3_parameter = builder.Parameter(1, r3_shape, "input");
- XlaOp op = BuildBinOp(spec.op, r3_implicit_parameter, r3_parameter, &builder);
+ auto r3_implicit_parameter =
+ Parameter(&builder, 0, r3_implicit_shape, "input");
+ auto r3_parameter = Parameter(&builder, 1, r3_shape, "input");
+ BuildBinOp(spec.op, r3_implicit_parameter, r3_parameter, &builder);
Array3D<float> expected_array(spec.output_bounds[0], spec.output_bounds[1],
spec.output_bounds[2]);
@@ -284,7 +366,7 @@ XLA_TEST_P(BroadcastR3ImplicitTest, Doit) {
}
}
}
- auto expected = Literal::CreateR3FromArray3D(expected_array);
+ auto expected = LiteralUtil::CreateR3FromArray3D(expected_array);
ComputeAndCompareLiteral(
&builder, *expected,
{r3_implicit_global_data.get(), r3_global_data.get()},
@@ -306,10 +388,10 @@ XLA_TEST_F(BroadcastSimpleTest, Add3DTo3DDegenerate_1_2) {
auto r1 = CreateR3Parameter(r1d, 1, "r1", &b, &r1h);
auto r3 = CreateR3Parameter(r3d, 0, "r3", &b, &r3h);
- b.Add(r3h, r1h);
+ Add(r3h, r1h);
auto expected =
- Literal::CreateR3<float>({{{2, 3}, {4, 5}}, {{7, 8}, {9, 10}}});
+ LiteralUtil::CreateR3<float>({{{2, 3}, {4, 5}}, {{7, 8}, {9, 10}}});
ComputeAndCompareLiteral(&b, *expected, {r3.get(), r1.get()},
ErrorSpec(0.0001));
@@ -317,79 +399,81 @@ XLA_TEST_F(BroadcastSimpleTest, Add3DTo3DDegenerate_1_2) {
XLA_TEST_F(BroadcastSimpleTest, Add3DTo3DDegenerate_0_1) {
XlaBuilder b(TestName());
- auto r1 = b.ConstantLiteral(*Literal::CreateR3<float>({{{1, 2}}}));
- auto r3 = b.ConstantLiteral(
- *Literal::CreateR3<float>({{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}}));
- b.Add(r3, r1);
+ auto r1 = ConstantLiteral(&b, *LiteralUtil::CreateR3<float>({{{1, 2}}}));
+ auto r3 = ConstantLiteral(
+ &b, *LiteralUtil::CreateR3<float>({{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}}));
+ Add(r3, r1);
auto expected =
- Literal::CreateR3<float>({{{2, 4}, {4, 6}}, {{6, 8}, {8, 10}}});
+ LiteralUtil::CreateR3<float>({{{2, 4}, {4, 6}}, {{6, 8}, {8, 10}}});
ComputeAndCompareLiteral(&b, *expected, {}, ErrorSpec(0.0001));
}
XLA_TEST_F(BroadcastSimpleTest, Add3DTo3DDegenerate_0_2) {
XlaBuilder b(TestName());
- auto r1 = b.ConstantLiteral(*Literal::CreateR3<float>({{{1}, {2}}}));
- auto r3 = b.ConstantLiteral(
- *Literal::CreateR3<float>({{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}}));
- b.Add(r3, r1);
+ auto r1 = ConstantLiteral(&b, *LiteralUtil::CreateR3<float>({{{1}, {2}}}));
+ auto r3 = ConstantLiteral(
+ &b, *LiteralUtil::CreateR3<float>({{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}}));
+ Add(r3, r1);
auto expected =
- Literal::CreateR3<float>({{{2, 3}, {5, 6}}, {{6, 7}, {9, 10}}});
+ LiteralUtil::CreateR3<float>({{{2, 3}, {5, 6}}, {{6, 7}, {9, 10}}});
ComputeAndCompareLiteral(&b, *expected, {}, ErrorSpec(0.0001));
}
XLA_TEST_F(BroadcastSimpleTest, Add3DTo3DDegenerate_0) {
XlaBuilder b(TestName());
- auto r1 = b.ConstantLiteral(*Literal::CreateR3<float>({{{1, 2}, {3, 4}}}));
- auto r3 = b.ConstantLiteral(
- *Literal::CreateR3<float>({{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}}));
- b.Add(r3, r1);
+ auto r1 =
+ ConstantLiteral(&b, *LiteralUtil::CreateR3<float>({{{1, 2}, {3, 4}}}));
+ auto r3 = ConstantLiteral(
+ &b, *LiteralUtil::CreateR3<float>({{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}}));
+ Add(r3, r1);
auto expected =
- Literal::CreateR3<float>({{{2, 4}, {6, 8}}, {{6, 8}, {10, 12}}});
+ LiteralUtil::CreateR3<float>({{{2, 4}, {6, 8}}, {{6, 8}, {10, 12}}});
ComputeAndCompareLiteral(&b, *expected, {}, ErrorSpec(0.0001));
}
XLA_TEST_F(BroadcastSimpleTest, Add3DTo3DDegenerate_1) {
XlaBuilder b(TestName());
- auto r1 = b.ConstantLiteral(*Literal::CreateR3<float>({{{1, 2}}, {{3, 4}}}));
- auto r3 = b.ConstantLiteral(
- *Literal::CreateR3<float>({{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}}));
- b.Add(r3, r1);
+ auto r1 =
+ ConstantLiteral(&b, *LiteralUtil::CreateR3<float>({{{1, 2}}, {{3, 4}}}));
+ auto r3 = ConstantLiteral(
+ &b, *LiteralUtil::CreateR3<float>({{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}}));
+ Add(r3, r1);
auto expected =
- Literal::CreateR3<float>({{{2, 4}, {4, 6}}, {{8, 10}, {10, 12}}});
+ LiteralUtil::CreateR3<float>({{{2, 4}, {4, 6}}, {{8, 10}, {10, 12}}});
ComputeAndCompareLiteral(&b, *expected, {}, ErrorSpec(0.0001));
}
XLA_TEST_F(BroadcastSimpleTest, Add3DTo3DDegenerate_2) {
XlaBuilder b(TestName());
- auto r1 =
- b.ConstantLiteral(*Literal::CreateR3<float>({{{1}, {2}}, {{3}, {4}}}));
- auto r3 = b.ConstantLiteral(
- *Literal::CreateR3<float>({{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}}));
- b.Add(r3, r1);
+ auto r1 = ConstantLiteral(
+ &b, *LiteralUtil::CreateR3<float>({{{1}, {2}}, {{3}, {4}}}));
+ auto r3 = ConstantLiteral(
+ &b, *LiteralUtil::CreateR3<float>({{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}}));
+ Add(r3, r1);
auto expected =
- Literal::CreateR3<float>({{{2, 3}, {5, 6}}, {{8, 9}, {11, 12}}});
+ LiteralUtil::CreateR3<float>({{{2, 3}, {5, 6}}, {{8, 9}, {11, 12}}});
ComputeAndCompareLiteral(&b, *expected, {}, ErrorSpec(0.0001));
}
XLA_TEST_F(BroadcastSimpleTest, Add3DTo3DDegenerate_0_1_2) {
XlaBuilder b(TestName());
- auto r1 = b.ConstantLiteral(*Literal::CreateR3<float>({{{1}}}));
- auto r3 = b.ConstantLiteral(
- *Literal::CreateR3<float>({{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}}));
- b.Add(r3, r1);
+ auto r1 = ConstantLiteral(&b, *LiteralUtil::CreateR3<float>({{{1}}}));
+ auto r3 = ConstantLiteral(
+ &b, *LiteralUtil::CreateR3<float>({{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}}));
+ Add(r3, r1);
auto expected =
- Literal::CreateR3<float>({{{2, 3}, {4, 5}}, {{6, 7}, {8, 9}}});
+ LiteralUtil::CreateR3<float>({{{2, 3}, {4, 5}}, {{6, 7}, {8, 9}}});
ComputeAndCompareLiteral(&b, *expected, {}, ErrorSpec(0.0001));
}
@@ -509,14 +593,14 @@ XLA_TEST_P(BroadcastR2ImplicitTest, Doit) {
&r2_implicit_shape2, &r2_implicit_array2, 0.8, 0.4, 56789);
auto r2_implicit_parameter1 =
- builder.Parameter(0, r2_implicit_shape1, "input0");
- auto r2_parameter = builder.Parameter(1, r2_shape, "input1");
+ Parameter(&builder, 0, r2_implicit_shape1, "input0");
+ auto r2_parameter = Parameter(&builder, 1, r2_shape, "input1");
auto r2_implicit_parameter2 =
- builder.Parameter(2, r2_implicit_shape2, "input2");
+ Parameter(&builder, 2, r2_implicit_shape2, "input2");
XlaOp op1 =
BuildBinOp(spec.op1, r2_implicit_parameter1, r2_parameter, &builder);
- XlaOp op2 = BuildBinOp(spec.op2, op1, r2_implicit_parameter2, &builder);
+ BuildBinOp(spec.op2, op1, r2_implicit_parameter2, &builder);
Array2D<float> expected_array(spec.output_bounds[0], spec.output_bounds[1]);
@@ -530,7 +614,7 @@ XLA_TEST_P(BroadcastR2ImplicitTest, Doit) {
*v = ApplyOpToFloats(spec.op2, tmp, v3);
});
- auto expected = Literal::CreateR2FromArray2D(expected_array);
+ auto expected = LiteralUtil::CreateR2FromArray2D(expected_array);
ComputeAndCompareLiteral(
&builder, *expected,
{r2_implicit_global_data1.get(), r2_global_data.get(),
@@ -544,80 +628,82 @@ INSTANTIATE_TEST_CASE_P(BroadcastR2ImplicitTestInstances,
XLA_TEST_F(BroadcastSimpleTest, Add2DTo2DDegenerate_0) {
XlaBuilder b(TestName());
- auto r1 = b.ConstantLiteral(*Literal::CreateR2<float>({{1, 2}}));
- auto r2 = b.ConstantLiteral(*Literal::CreateR2<float>({{1, 2}, {3, 4}}));
- b.Add(r2, r1);
+ auto r1 = ConstantLiteral(&b, *LiteralUtil::CreateR2<float>({{1, 2}}));
+ auto r2 =
+ ConstantLiteral(&b, *LiteralUtil::CreateR2<float>({{1, 2}, {3, 4}}));
+ Add(r2, r1);
- auto expected = Literal::CreateR2<float>({{2, 4}, {4, 6}});
+ auto expected = LiteralUtil::CreateR2<float>({{2, 4}, {4, 6}});
ComputeAndCompareLiteral(&b, *expected, {}, ErrorSpec(0.0001));
}
XLA_TEST_F(BroadcastSimpleTest, Add2DTo2DDegenerate_1) {
XlaBuilder b(TestName());
- auto r1 = b.ConstantLiteral(*Literal::CreateR2<float>({{1}, {2}}));
- auto r2 = b.ConstantLiteral(*Literal::CreateR2<float>({{1, 2}, {3, 4}}));
- b.Add(r2, r1);
+ auto r1 = ConstantLiteral(&b, *LiteralUtil::CreateR2<float>({{1}, {2}}));
+ auto r2 =
+ ConstantLiteral(&b, *LiteralUtil::CreateR2<float>({{1, 2}, {3, 4}}));
+ Add(r2, r1);
- auto expected = Literal::CreateR2<float>({{2, 3}, {5, 6}});
+ auto expected = LiteralUtil::CreateR2<float>({{2, 3}, {5, 6}});
ComputeAndCompareLiteral(&b, *expected, {}, ErrorSpec(0.0001));
}
XLA_TEST_F(BroadcastSimpleTest, Add1DTo3DInDim0) {
XlaBuilder b(TestName());
- auto r1 = b.ConstantR1<float>({10, 20});
- auto r3 = b.ConstantLiteral(
- *Literal::CreateR3<float>({{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}}));
- b.Add(r3, r1, {0});
+ auto r1 = ConstantR1<float>(&b, {10, 20});
+ auto r3 = ConstantLiteral(
+ &b, *LiteralUtil::CreateR3<float>({{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}}));
+ Add(r3, r1, {0});
- auto expected =
- Literal::CreateR3<float>({{{11, 12}, {13, 14}}, {{25, 26}, {27, 28}}});
+ auto expected = LiteralUtil::CreateR3<float>(
+ {{{11, 12}, {13, 14}}, {{25, 26}, {27, 28}}});
ComputeAndCompareLiteral(&b, *expected, {}, ErrorSpec(0.0001));
}
XLA_TEST_F(BroadcastSimpleTest, Add1DTo3DInDim1) {
XlaBuilder b(TestName());
- auto r1 = b.ConstantR1<float>({10, 20});
- auto r3 = b.ConstantLiteral(
- *Literal::CreateR3<float>({{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}}));
- b.Add(r1, r3, {1});
+ auto r1 = ConstantR1<float>(&b, {10, 20});
+ auto r3 = ConstantLiteral(
+ &b, *LiteralUtil::CreateR3<float>({{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}}));
+ Add(r1, r3, {1});
- auto expected =
- Literal::CreateR3<float>({{{11, 12}, {23, 24}}, {{15, 16}, {27, 28}}});
+ auto expected = LiteralUtil::CreateR3<float>(
+ {{{11, 12}, {23, 24}}, {{15, 16}, {27, 28}}});
ComputeAndCompareLiteral(&b, *expected, {}, ErrorSpec(0.0001));
}
XLA_TEST_F(BroadcastSimpleTest, Add1DTo3DInDim2) {
XlaBuilder b(TestName());
- auto r1 = b.ConstantR1<float>({10, 20});
- auto r3 = b.ConstantLiteral(
- *Literal::CreateR3<float>({{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}}));
- b.Add(r1, r3, {2});
+ auto r1 = ConstantR1<float>(&b, {10, 20});
+ auto r3 = ConstantLiteral(
+ &b, *LiteralUtil::CreateR3<float>({{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}}));
+ Add(r1, r3, {2});
- auto expected =
- Literal::CreateR3<float>({{{11, 22}, {13, 24}}, {{15, 26}, {17, 28}}});
+ auto expected = LiteralUtil::CreateR3<float>(
+ {{{11, 22}, {13, 24}}, {{15, 26}, {17, 28}}});
ComputeAndCompareLiteral(&b, *expected, {}, ErrorSpec(0.0001));
}
XLA_TEST_F(BroadcastSimpleTest, Add1DTo3DInDimAll) {
XlaBuilder b(TestName());
- auto r1_0 = b.ConstantR1<float>({1000, 2000});
- auto r1_1 = b.ConstantR1<float>({100, 200});
- auto r1_2 = b.ConstantR1<float>({10, 20});
- auto r3 = b.ConstantLiteral(
- *Literal::CreateR3<float>({{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}}));
+ auto r1_0 = ConstantR1<float>(&b, {1000, 2000});
+ auto r1_1 = ConstantR1<float>(&b, {100, 200});
+ auto r1_2 = ConstantR1<float>(&b, {10, 20});
+ auto r3 = ConstantLiteral(
+ &b, *LiteralUtil::CreateR3<float>({{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}}));
for (int i = 0; i < 3; ++i) {
- r3 = b.Add(r1_0, r3, {0});
- r3 = b.Add(r3, r1_1, {1});
- r3 = b.Add(r1_2, r3, {2});
+ r3 = Add(r1_0, r3, {0});
+ r3 = Add(r3, r1_1, {1});
+ r3 = Add(r1_2, r3, {2});
}
- r3 = b.Mul(r3, b.ConstantR0<float>(-2));
+ r3 = Mul(r3, ConstantR0<float>(&b, -2));
- auto expected = Literal::CreateR3<float>(
+ auto expected = LiteralUtil::CreateR3<float>(
{{{-6 * 1110 - 2, -6 * 1120 - 4}, {-6 * 1210 - 6, -6 * 1220 - 8}},
{{-6 * 2110 - 10, -6 * 2120 - 12}, {-6 * 2210 - 14, -6 * 2220 - 16}}});
@@ -626,19 +712,19 @@ XLA_TEST_F(BroadcastSimpleTest, Add1DTo3DInDimAll) {
XLA_TEST_F(BroadcastSimpleTest, Add1DTo3DInDimAllWithScalarBroadcast) {
XlaBuilder b(TestName());
- auto r1_0 = b.ConstantR1<float>({1000, 2000});
- auto r1_1 = b.ConstantR1<float>({100, 200});
- auto r1_2 = b.ConstantR1<float>({10, 20});
- auto r0 = b.ConstantR0<float>(3);
- auto r3 = b.Broadcast(r0, {2, 2, 2});
+ auto r1_0 = ConstantR1<float>(&b, {1000, 2000});
+ auto r1_1 = ConstantR1<float>(&b, {100, 200});
+ auto r1_2 = ConstantR1<float>(&b, {10, 20});
+ auto r0 = ConstantR0<float>(&b, 3);
+ auto r3 = Broadcast(r0, {2, 2, 2});
for (int i = 0; i < 3; ++i) {
- r3 = b.Add(r1_0, r3, {0});
- r3 = b.Add(r3, r1_1, {1});
- r3 = b.Add(r1_2, r3, {2});
+ r3 = Add(r1_0, r3, {0});
+ r3 = Add(r3, r1_1, {1});
+ r3 = Add(r1_2, r3, {2});
}
- r3 = b.Mul(r3, b.ConstantR0<float>(-1));
+ r3 = Mul(r3, ConstantR0<float>(&b, -1));
- auto expected = Literal::CreateR3<float>(
+ auto expected = LiteralUtil::CreateR3<float>(
{{{-3 * 1110 - 3, -3 * 1120 - 3}, {-3 * 1210 - 3, -3 * 1220 - 3}},
{{-3 * 2110 - 3, -3 * 2120 - 3}, {-3 * 2210 - 3, -3 * 2220 - 3}}});
@@ -650,10 +736,10 @@ XLA_TEST_F(BroadcastSimpleTest, InvalidBinaryAndDegenerateBroadcasting) {
// results in a shape incompatible with the lhs [2, 3, 1].
XlaBuilder b(TestName());
- b.Add(b.ConstantR2<float>({{1.0, 5.0}, {1.0, 5.0}}),
- b.ConstantLiteral(*Literal::CreateR3<float>(
- {{{2.0}, {3.0}, {4.0}}, {{5.0}, {6.0}, {7.0}}})),
- /*broadcast_dimensions=*/{1, 2});
+ Add(ConstantR2<float>(&b, {{1.0, 5.0}, {1.0, 5.0}}),
+ ConstantLiteral(&b, *LiteralUtil::CreateR3<float>(
+ {{{2.0}, {3.0}, {4.0}}, {{5.0}, {6.0}, {7.0}}})),
+ /*broadcast_dimensions=*/{1, 2});
auto result_status = Execute(&b, {});
EXPECT_FALSE(result_status.ok());
@@ -665,8 +751,8 @@ XLA_TEST_F(BroadcastSimpleTest, InvalidInDimensionBroadcasting) {
// Test invalid broadcasting with [1, 2] and [2, 3] inputs.
XlaBuilder b(TestName());
- b.Add(b.ConstantR2<float>({{1.0, 2.0}}),
- b.ConstantR2<float>({{1.0, 2.0, 3.0}, {4.0, 5.0, 6.0}}));
+ Add(ConstantR2<float>(&b, {{1.0, 2.0}}),
+ ConstantR2<float>(&b, {{1.0, 2.0, 3.0}, {4.0, 5.0, 6.0}}));
auto result_status = Execute(&b, {});
EXPECT_FALSE(result_status.ok());
@@ -678,8 +764,8 @@ XLA_TEST_F(BroadcastSimpleTest, InvalidDegenerateBroadcasting) {
// Test invalid broadcasting with [1, 2] and [2, 3] inputs.
XlaBuilder b(TestName());
- b.Add(b.ConstantR2<float>({{1.0, 2.0}}),
- b.ConstantR2<float>({{1.0, 2.0, 3.0}, {4.0, 5.0, 6.0}}));
+ Add(ConstantR2<float>(&b, {{1.0, 2.0}}),
+ ConstantR2<float>(&b, {{1.0, 2.0, 3.0}, {4.0, 5.0, 6.0}}));
auto result_status = Execute(&b, {});
EXPECT_FALSE(result_status.ok());
diff --git a/tensorflow/compiler/xla/tests/broadcast_test.cc b/tensorflow/compiler/xla/tests/broadcast_test.cc
index 51b9f0d3e3..c7b94b5bba 100644
--- a/tensorflow/compiler/xla/tests/broadcast_test.cc
+++ b/tensorflow/compiler/xla/tests/broadcast_test.cc
@@ -16,7 +16,7 @@ limitations under the License.
#include <memory>
#include <utility>
-#include "tensorflow/compiler/xla/literal_util.h"
+#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/compiler/xla/ptr_util.h"
#include "tensorflow/compiler/xla/service/hlo_computation.h"
#include "tensorflow/compiler/xla/service/hlo_instruction.h"
@@ -37,7 +37,7 @@ XLA_TEST_F(BroadcastTest, BroadcastScalarToScalar) {
// Test degenerate case of broadcasting a scalar into a scalar.
auto builder = HloComputation::Builder(TestName());
auto input = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(42.0)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0)));
builder.AddInstruction(HloInstruction::CreateBroadcast(
ShapeUtil::MakeShape(F32, {}), input, {}));
@@ -46,14 +46,14 @@ XLA_TEST_F(BroadcastTest, BroadcastScalarToScalar) {
hlo_module->AddEntryComputation(builder.Build());
auto result = ExecuteAndTransfer(std::move(hlo_module), {});
- EXPECT_TRUE(LiteralTestUtil::Near(*Literal::CreateR0<float>(42.0), *result,
- error_spec_));
+ EXPECT_TRUE(LiteralTestUtil::Near(*LiteralUtil::CreateR0<float>(42.0),
+ *result, error_spec_));
}
XLA_TEST_F(BroadcastTest, BroadcastScalarTo2D) {
auto builder = HloComputation::Builder(TestName());
auto input = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(42.0)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0)));
builder.AddInstruction(HloInstruction::CreateBroadcast(
ShapeUtil::MakeShape(F32, {2, 2}), input, {}));
@@ -63,14 +63,14 @@ XLA_TEST_F(BroadcastTest, BroadcastScalarTo2D) {
auto result = ExecuteAndTransfer(std::move(hlo_module), {});
EXPECT_TRUE(LiteralTestUtil::Near(
- *Literal::CreateR2<float>({{42.0, 42.0}, {42.0, 42.0}}), *result,
+ *LiteralUtil::CreateR2<float>({{42.0, 42.0}, {42.0, 42.0}}), *result,
error_spec_));
}
XLA_TEST_F(BroadcastTest, BroadcastVectorTo2D) {
auto builder = HloComputation::Builder(TestName());
auto input = builder.AddInstruction(HloInstruction::CreateConstant(
- Literal::CreateR1<float>({1.0, 2.0, 3.0})));
+ LiteralUtil::CreateR1<float>({1.0, 2.0, 3.0})));
// Broadcast vector in both dimension 0 and dimension 1. Join them in a tuple
// to enable testing of the results.
@@ -86,18 +86,18 @@ XLA_TEST_F(BroadcastTest, BroadcastVectorTo2D) {
auto result = ExecuteAndTransfer(std::move(hlo_module), {});
EXPECT_TRUE(LiteralTestUtil::Near(
- *Literal::CreateR2<float>({{1.0, 1.0}, {2.0, 2.0}, {3.0, 3.0}}),
+ *LiteralUtil::CreateR2<float>({{1.0, 1.0}, {2.0, 2.0}, {3.0, 3.0}}),
LiteralSlice(*result, {0}), error_spec_));
EXPECT_TRUE(LiteralTestUtil::Near(
- *Literal::CreateR2<float>({{1.0, 2.0, 3.0}, {1.0, 2.0, 3.0}}),
+ *LiteralUtil::CreateR2<float>({{1.0, 2.0, 3.0}, {1.0, 2.0, 3.0}}),
LiteralSlice(*result, {1}), error_spec_));
}
XLA_TEST_F(BroadcastTest, Broadcast2DTo2D) {
auto builder = HloComputation::Builder(TestName());
auto input = builder.AddInstruction(HloInstruction::CreateConstant(
- Literal::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}})));
+ LiteralUtil::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}})));
builder.AddInstruction(HloInstruction::CreateBroadcast(
ShapeUtil::MakeShape(F32, {2, 2}), input, {0, 1}));
@@ -106,9 +106,9 @@ XLA_TEST_F(BroadcastTest, Broadcast2DTo2D) {
hlo_module->AddEntryComputation(builder.Build());
auto result = ExecuteAndTransfer(std::move(hlo_module), {});
- EXPECT_TRUE(
- LiteralTestUtil::Near(*Literal::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}}),
- *result, error_spec_));
+ EXPECT_TRUE(LiteralTestUtil::Near(
+ *LiteralUtil::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}}), *result,
+ error_spec_));
}
XLA_TEST_F(BroadcastTest, Broadcast2DTo2DTranspose) {
@@ -116,7 +116,7 @@ XLA_TEST_F(BroadcastTest, Broadcast2DTo2DTranspose) {
// the dimensions, ie transpose.
auto builder = HloComputation::Builder(TestName());
auto input = builder.AddInstruction(HloInstruction::CreateConstant(
- Literal::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}})));
+ LiteralUtil::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}})));
builder.AddInstruction(HloInstruction::CreateBroadcast(
ShapeUtil::MakeShape(F32, {2, 2}), input, {1, 0}));
@@ -125,15 +125,15 @@ XLA_TEST_F(BroadcastTest, Broadcast2DTo2DTranspose) {
hlo_module->AddEntryComputation(builder.Build());
auto result = ExecuteAndTransfer(std::move(hlo_module), {});
- EXPECT_TRUE(
- LiteralTestUtil::Near(*Literal::CreateR2<float>({{1.0, 3.0}, {2.0, 4.0}}),
- *result, error_spec_));
+ EXPECT_TRUE(LiteralTestUtil::Near(
+ *LiteralUtil::CreateR2<float>({{1.0, 3.0}, {2.0, 4.0}}), *result,
+ error_spec_));
}
XLA_TEST_F(BroadcastTest, Broadcast2DTo3D) {
auto builder = HloComputation::Builder(TestName());
auto input = builder.AddInstruction(HloInstruction::CreateConstant(
- Literal::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}})));
+ LiteralUtil::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}})));
builder.AddInstruction(HloInstruction::CreateBroadcast(
ShapeUtil::MakeShape(F32, {2, 3, 2}), input, {0, 2}));
@@ -143,15 +143,15 @@ XLA_TEST_F(BroadcastTest, Broadcast2DTo3D) {
auto result = ExecuteAndTransfer(std::move(hlo_module), {});
EXPECT_TRUE(LiteralTestUtil::Near(
- *Literal::CreateR3<float>({{{1.0, 2.0}, {1.0, 2.0}, {1.0, 2.0}},
- {{3.0, 4.0}, {3.0, 4.0}, {3.0, 4.0}}}),
+ *LiteralUtil::CreateR3<float>({{{1.0, 2.0}, {1.0, 2.0}, {1.0, 2.0}},
+ {{3.0, 4.0}, {3.0, 4.0}, {3.0, 4.0}}}),
*result, error_spec_));
}
TEST_F(BroadcastTest, Broadcast_R1_2_To_R4_2x2x3x3) {
auto builder = HloComputation::Builder(TestName());
auto input = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR1<float>({1.0, 2.0})));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR1<float>({1.0, 2.0})));
// Broadcast vector in dimension 1.
builder.AddInstruction(HloInstruction::CreateBroadcast(
@@ -166,8 +166,9 @@ TEST_F(BroadcastTest, Broadcast_R1_2_To_R4_2x2x3x3) {
Array2D<float> pz({{1, 2}, {1, 2}});
expected.FillWithPZ(pz);
- EXPECT_TRUE(LiteralTestUtil::Near(
- *Literal::CreateR4FromArray4D<float>(expected), *result, error_spec_));
+ EXPECT_TRUE(
+ LiteralTestUtil::Near(*LiteralUtil::CreateR4FromArray4D<float>(expected),
+ *result, error_spec_));
}
TEST_F(BroadcastTest, Broadcast_R1_1025_To_R4_3x3x3x1025) {
@@ -176,7 +177,7 @@ TEST_F(BroadcastTest, Broadcast_R1_1025_To_R4_3x3x3x1025) {
int64 r1_size = input_data.size();
std::iota(input_data.begin(), input_data.end(), 0.0f);
auto input = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR1<float>(input_data)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR1<float>(input_data)));
// Broadcast vector in dimension 3.
builder.AddInstruction(HloInstruction::CreateBroadcast(
@@ -196,8 +197,9 @@ TEST_F(BroadcastTest, Broadcast_R1_1025_To_R4_3x3x3x1025) {
}
expected.FillWithYX(yx);
- EXPECT_TRUE(LiteralTestUtil::Near(
- *Literal::CreateR4FromArray4D<float>(expected), *result, error_spec_));
+ EXPECT_TRUE(
+ LiteralTestUtil::Near(*LiteralUtil::CreateR4FromArray4D<float>(expected),
+ *result, error_spec_));
}
XLA_TEST_F(BroadcastTest, Broadcast_R1_64_To_R4_32x64x7x7) {
@@ -207,7 +209,7 @@ XLA_TEST_F(BroadcastTest, Broadcast_R1_64_To_R4_32x64x7x7) {
std::vector<float> r1_array(64, 42.0);
auto input = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR1<float>(r1_array)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR1<float>(r1_array)));
// Broadcast vector in dimension 1.
builder.AddInstruction(HloInstruction::CreateBroadcast(
@@ -218,14 +220,14 @@ XLA_TEST_F(BroadcastTest, Broadcast_R1_64_To_R4_32x64x7x7) {
hlo_module->AddEntryComputation(builder.Build());
auto result = ExecuteAndTransfer(std::move(hlo_module), {});
- EXPECT_TRUE(LiteralTestUtil::Near(*Literal::CreateR4FromArray4D(r4_array),
+ EXPECT_TRUE(LiteralTestUtil::Near(*LiteralUtil::CreateR4FromArray4D(r4_array),
*result, error_spec_));
}
TEST_F(BroadcastTest, Broadcast_R0_to_R4_64x64x3x3) {
auto builder = HloComputation::Builder(TestName());
auto input = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(1.0f)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0f)));
builder.AddInstruction(HloInstruction::CreateBroadcast(
ShapeUtil::MakeShape(F32, {64, 64, 3, 3}), input, {}));
@@ -238,15 +240,16 @@ TEST_F(BroadcastTest, Broadcast_R0_to_R4_64x64x3x3) {
Array4D<float> expected(64, 64, 3, 3);
expected.Fill(1.0f);
- EXPECT_TRUE(LiteralTestUtil::Near(
- *Literal::CreateR4FromArray4D<float>(expected), *result, error_spec_));
+ EXPECT_TRUE(
+ LiteralTestUtil::Near(*LiteralUtil::CreateR4FromArray4D<float>(expected),
+ *result, error_spec_));
}
TEST_F(BroadcastTest, Broadcast_R2_2x2_To_R4_3x3x2x2) {
auto builder = HloComputation::Builder(TestName());
Array2D<float> to_broadcast({{1.0f, 2.0f}, {3.0f, 4.0f}});
auto input = builder.AddInstruction(HloInstruction::CreateConstant(
- Literal::CreateR2FromArray2D<float>(to_broadcast)));
+ LiteralUtil::CreateR2FromArray2D<float>(to_broadcast)));
// Broadcast vector in dimensions 2 and 3.
builder.AddInstruction(HloInstruction::CreateBroadcast(
@@ -260,8 +263,9 @@ TEST_F(BroadcastTest, Broadcast_R2_2x2_To_R4_3x3x2x2) {
Array4D<float> expected(3, 3, 2, 2);
expected.FillWithYX(to_broadcast);
- EXPECT_TRUE(LiteralTestUtil::Near(
- *Literal::CreateR4FromArray4D<float>(expected), *result, error_spec_));
+ EXPECT_TRUE(
+ LiteralTestUtil::Near(*LiteralUtil::CreateR4FromArray4D<float>(expected),
+ *result, error_spec_));
}
TEST_F(BroadcastTest, Broadcast_R3_2x3x4_to_R4_2x3x4x5) {
@@ -280,7 +284,7 @@ TEST_F(BroadcastTest, Broadcast_R3_2x3x4_to_R4_2x3x4x5) {
}
}
auto input = builder.AddInstruction(HloInstruction::CreateConstant(
- Literal::CreateR3FromArray3D<float>(input_vals)));
+ LiteralUtil::CreateR3FromArray3D<float>(input_vals)));
// Broadcast vector in dimensions 2 and 3.
builder.AddInstruction(HloInstruction::CreateBroadcast(
@@ -291,8 +295,9 @@ TEST_F(BroadcastTest, Broadcast_R3_2x3x4_to_R4_2x3x4x5) {
hlo_module->AddEntryComputation(builder.Build());
auto result = ExecuteAndTransfer(std::move(hlo_module), {});
- EXPECT_TRUE(LiteralTestUtil::Near(
- *Literal::CreateR4FromArray4D<float>(expected), *result, error_spec_));
+ EXPECT_TRUE(
+ LiteralTestUtil::Near(*LiteralUtil::CreateR4FromArray4D<float>(expected),
+ *result, error_spec_));
}
} // namespace
diff --git a/tensorflow/compiler/xla/tests/call_test.cc b/tensorflow/compiler/xla/tests/call_test.cc
index 5fd33b50c9..2086e38b91 100644
--- a/tensorflow/compiler/xla/tests/call_test.cc
+++ b/tensorflow/compiler/xla/tests/call_test.cc
@@ -18,6 +18,7 @@ limitations under the License.
#include "tensorflow/compiler/xla/client/xla_client/xla_builder.h"
#include "tensorflow/compiler/xla/client/xla_client/xla_computation.h"
+#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/compiler/xla/literal_util.h"
#include "tensorflow/compiler/xla/shape_util.h"
#include "tensorflow/compiler/xla/test_helpers.h"
@@ -34,7 +35,7 @@ class CallOpTest : public ClientLibraryTestBase {
protected:
XlaComputation CreateR0F32IdentityComputation() {
XlaBuilder builder("Identity");
- builder.Parameter(0, r0f32_, "x");
+ Parameter(&builder, 0, r0f32_, "x");
auto build_status = builder.Build();
EXPECT_IS_OK(build_status.status());
return build_status.ConsumeValueOrDie();
@@ -42,9 +43,9 @@ class CallOpTest : public ClientLibraryTestBase {
XlaComputation CreateR1S0F32AdditionComputation() {
XlaBuilder builder("Addition");
- auto x = builder.Parameter(0, r1s0f32_, "x");
- auto y = builder.Parameter(1, r1s0f32_, "y");
- builder.Add(x, y);
+ auto x = Parameter(&builder, 0, r1s0f32_, "x");
+ auto y = Parameter(&builder, 1, r1s0f32_, "y");
+ Add(x, y);
auto build_status = builder.Build();
EXPECT_IS_OK(build_status.status());
return build_status.ConsumeValueOrDie();
@@ -52,9 +53,9 @@ class CallOpTest : public ClientLibraryTestBase {
XlaComputation CreateR1S2F32AdditionComputation() {
XlaBuilder builder("Addition");
- auto x = builder.Parameter(0, r1s2f32_, "x");
- auto y = builder.Parameter(1, r1s2f32_, "y");
- builder.Add(x, y);
+ auto x = Parameter(&builder, 0, r1s2f32_, "x");
+ auto y = Parameter(&builder, 1, r1s2f32_, "y");
+ Add(x, y);
auto build_status = builder.Build();
EXPECT_IS_OK(build_status.status());
return build_status.ConsumeValueOrDie();
@@ -62,7 +63,7 @@ class CallOpTest : public ClientLibraryTestBase {
XlaComputation CreateR0F32TupleComputation() {
XlaBuilder builder("Tuple");
- builder.Tuple({builder.Parameter(0, r0f32_, "x")});
+ Tuple(&builder, {Parameter(&builder, 0, r0f32_, "x")});
auto build_status = builder.Build();
EXPECT_IS_OK(build_status.status());
return build_status.ConsumeValueOrDie();
@@ -76,8 +77,9 @@ class CallOpTest : public ClientLibraryTestBase {
XLA_TEST_F(CallOpTest, CallR0F32IdentityScalar) {
XlaBuilder builder(TestName());
XlaComputation callee = CreateR0F32IdentityComputation();
- auto constant = builder.ConstantLiteral(*Literal::CreateR0<float>(42.0));
- builder.Call(callee, {constant});
+ auto constant =
+ ConstantLiteral(&builder, *LiteralUtil::CreateR0<float>(42.0));
+ Call(&builder, callee, {constant});
ComputeAndCompareR0<float>(&builder, 42.0, {}, ErrorSpec(0.01f));
}
@@ -85,9 +87,9 @@ XLA_TEST_F(CallOpTest, CallR0F32IdentityScalar) {
XLA_TEST_F(CallOpTest, CallR1S0F32AddArray) {
XlaBuilder builder(TestName());
XlaComputation callee = CreateR1S0F32AdditionComputation();
- auto x = builder.ConstantLiteral(*Literal::CreateR1<float>({}));
- auto y = builder.ConstantLiteral(*Literal::CreateR1<float>({}));
- builder.Call(callee, {x, y});
+ auto x = ConstantLiteral(&builder, *LiteralUtil::CreateR1<float>({}));
+ auto y = ConstantLiteral(&builder, *LiteralUtil::CreateR1<float>({}));
+ Call(&builder, callee, {x, y});
ComputeAndCompareR1<float>(&builder, {}, {}, ErrorSpec(0.01f));
}
@@ -95,9 +97,11 @@ XLA_TEST_F(CallOpTest, CallR1S0F32AddArray) {
XLA_TEST_F(CallOpTest, CallR1S2F32AddArray) {
XlaBuilder builder(TestName());
XlaComputation callee = CreateR1S2F32AdditionComputation();
- auto x = builder.ConstantLiteral(*Literal::CreateR1<float>({1.0f, 2.0f}));
- auto y = builder.ConstantLiteral(*Literal::CreateR1<float>({2.0f, 3.0f}));
- builder.Call(callee, {x, y});
+ auto x =
+ ConstantLiteral(&builder, *LiteralUtil::CreateR1<float>({1.0f, 2.0f}));
+ auto y =
+ ConstantLiteral(&builder, *LiteralUtil::CreateR1<float>({2.0f, 3.0f}));
+ Call(&builder, callee, {x, y});
ComputeAndCompareR1<float>(&builder, {3.0f, 5.0f}, {}, ErrorSpec(0.01f));
}
@@ -105,40 +109,40 @@ XLA_TEST_F(CallOpTest, CallR1S2F32AddArray) {
XLA_TEST_F(CallOpTest, CallTreeTwoDeepBranchFactorThree) {
XlaBuilder builder("inner");
{
- auto x = builder.Parameter(0, r0f32_, "x");
- builder.Add(x, builder.ConstantR0<float>(1.0));
+ auto x = Parameter(&builder, 0, r0f32_, "x");
+ Add(x, ConstantR0<float>(&builder, 1.0));
}
TF_ASSERT_OK_AND_ASSIGN(XlaComputation inner, builder.Build());
XlaBuilder builder2("outer");
{
- auto x = builder2.Parameter(0, r0f32_, "x");
- x = builder2.Call(inner, {x});
- x = builder2.Call(inner, {x});
- x = builder2.Call(inner, {x});
+ auto x = Parameter(&builder2, 0, r0f32_, "x");
+ x = Call(&builder2, inner, {x});
+ x = Call(&builder2, inner, {x});
+ x = Call(&builder2, inner, {x});
}
TF_ASSERT_OK_AND_ASSIGN(XlaComputation outer, builder2.Build());
XlaBuilder builder3("outermost");
{
- auto x = builder3.Parameter(0, r0f32_, "x");
- x = builder3.Call(outer, {x});
- x = builder3.Call(outer, {x});
- x = builder3.Call(outer, {x});
+ auto x = Parameter(&builder3, 0, r0f32_, "x");
+ x = Call(&builder3, outer, {x});
+ x = Call(&builder3, outer, {x});
+ x = Call(&builder3, outer, {x});
}
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<GlobalData> start,
- client_->TransferToServer(*Literal::CreateR0<float>(1.0f)));
+ client_->TransferToServer(*LiteralUtil::CreateR0<float>(1.0f)));
ComputeAndCompareR0<float>(&builder3, 10.0f, {start.get()}, ErrorSpec(0.0f));
}
XLA_TEST_F(CallOpTest, CallR0F32Tuple) {
XlaBuilder builder(TestName());
XlaComputation callee = CreateR0F32TupleComputation();
- auto elem = Literal::CreateR0<float>(42.0);
- auto tuple = Literal::MakeTuple({elem.get()});
- builder.Call(callee, {builder.ConstantLiteral(*elem)});
+ auto elem = LiteralUtil::CreateR0<float>(42.0);
+ auto tuple = LiteralUtil::MakeTuple({elem.get()});
+ Call(&builder, callee, {ConstantLiteral(&builder, *elem)});
ComputeAndCompareTuple(&builder, *tuple, {}, ErrorSpec(0.01f));
}
diff --git a/tensorflow/compiler/xla/tests/check_execution_arity_test.cc b/tensorflow/compiler/xla/tests/check_execution_arity_test.cc
index 660ff0cad5..0bc8facfe2 100644
--- a/tensorflow/compiler/xla/tests/check_execution_arity_test.cc
+++ b/tensorflow/compiler/xla/tests/check_execution_arity_test.cc
@@ -18,7 +18,7 @@ limitations under the License.
#include "tensorflow/compiler/xla/client/global_data.h"
#include "tensorflow/compiler/xla/client/local_client.h"
#include "tensorflow/compiler/xla/client/xla_client/xla_builder.h"
-#include "tensorflow/compiler/xla/literal_util.h"
+#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/compiler/xla/shape_util.h"
#include "tensorflow/compiler/xla/statusor.h"
#include "tensorflow/compiler/xla/test.h"
@@ -36,11 +36,11 @@ class CheckExecutionArityTest : public ClientLibraryTestBase {};
TEST_F(CheckExecutionArityTest, TwoParamComputationNumArguments) {
XlaBuilder builder("add_two_params");
- auto param_literal = Literal::CreateR1<float>({1.1f, 2.2f});
+ auto param_literal = LiteralUtil::CreateR1<float>({1.1f, 2.2f});
- auto p0 = builder.Parameter(0, param_literal->shape(), "param0");
- auto p1 = builder.Parameter(1, param_literal->shape(), "param1");
- auto add = builder.Add(p0, p1);
+ auto p0 = Parameter(&builder, 0, param_literal->shape(), "param0");
+ auto p1 = Parameter(&builder, 1, param_literal->shape(), "param1");
+ Add(p0, p1);
auto param0_data =
client_->TransferToServer(*param_literal).ConsumeValueOrDie();
@@ -77,20 +77,20 @@ TEST_F(CheckExecutionArityTest, TwoParamComputationNumArguments) {
XLA_TEST_F(CheckExecutionArityTest, CheckArgumentShapes) {
XlaBuilder builder("add_two_params");
- auto p0 = builder.Parameter(0, ShapeUtil::MakeShape(F32, {}), "param0");
- auto p1 = builder.Parameter(1, ShapeUtil::MakeShape(F32, {4}), "param1");
- auto add = builder.Mul(p0, p1);
+ auto p0 = Parameter(&builder, 0, ShapeUtil::MakeShape(F32, {}), "param0");
+ auto p1 = Parameter(&builder, 1, ShapeUtil::MakeShape(F32, {4}), "param1");
+ Mul(p0, p1);
auto computation_status = builder.Build();
ASSERT_IS_OK(computation_status.status());
auto computation = computation_status.ConsumeValueOrDie();
- auto f32_literal = Literal::CreateR0<float>(1.1f);
+ auto f32_literal = LiteralUtil::CreateR0<float>(1.1f);
auto f32_data = client_->TransferToServer(*f32_literal).ConsumeValueOrDie();
- auto f32_4_literal = Literal::CreateR1<float>({1.0f, 2.0f, 3.0f, 4.0f});
+ auto f32_4_literal = LiteralUtil::CreateR1<float>({1.0f, 2.0f, 3.0f, 4.0f});
auto f32_4_data =
client_->TransferToServer(*f32_4_literal).ConsumeValueOrDie();
- auto u8_4_literal = Literal::CreateR1U8("hola");
+ auto u8_4_literal = LiteralUtil::CreateR1U8("hola");
auto u8_4_data = client_->TransferToServer(*u8_4_literal).ConsumeValueOrDie();
// Match
diff --git a/tensorflow/compiler/xla/tests/client_library_test_base.cc b/tensorflow/compiler/xla/tests/client_library_test_base.cc
index bf8ed4d9fb..ef784da457 100644
--- a/tensorflow/compiler/xla/tests/client_library_test_base.cc
+++ b/tensorflow/compiler/xla/tests/client_library_test_base.cc
@@ -19,6 +19,7 @@ limitations under the License.
#include "tensorflow/compiler/xla/client/client_library.h"
#include "tensorflow/compiler/xla/client/local_client.h"
+#include "tensorflow/compiler/xla/client/xla_client/xla_builder.h"
#include "tensorflow/compiler/xla/execution_options_util.h"
#include "tensorflow/compiler/xla/literal_util.h"
#include "tensorflow/compiler/xla/ptr_util.h"
@@ -156,7 +157,7 @@ string ClientLibraryTestBase::ExecuteToString(
void ClientLibraryTestBase::ComputeAndCompareR1(
XlaBuilder* builder, const tensorflow::core::Bitmap& expected,
tensorflow::gtl::ArraySlice<GlobalData*> arguments) {
- std::unique_ptr<Literal> expected_literal = Literal::CreateR1(expected);
+ std::unique_ptr<Literal> expected_literal = LiteralUtil::CreateR1(expected);
ClientLibraryTestBase::ComputeAndCompareLiteral(builder, *expected_literal,
arguments);
}
@@ -294,7 +295,7 @@ Status ClientLibraryTestBase::ComputeAndCompareLiteralWithStatus(
std::unique_ptr<Literal> converted_expected;
Shape layout_shape;
if (use_bfloat16_) {
- converted_expected = Literal::ConvertF32ToBF16(expected);
+ converted_expected = LiteralUtil::ConvertF32ToBF16(expected);
expected_ptr = converted_expected.get();
if (shape_with_layout != nullptr) {
layout_shape = *shape_with_layout;
@@ -346,7 +347,7 @@ Status ClientLibraryTestBase::ComputeAndCompareLiteralWithStatus(
std::unique_ptr<Literal> converted_expected;
Shape layout_shape;
if (use_bfloat16_) {
- converted_expected = Literal::ConvertF32ToBF16(expected);
+ converted_expected = LiteralUtil::ConvertF32ToBF16(expected);
expected_ptr = converted_expected.get();
if (shape_with_layout != nullptr) {
layout_shape = *shape_with_layout;
@@ -388,7 +389,7 @@ void ClientLibraryTestBase::ComputeAndCompareR1U8(
auto actual = actual_status.ConsumeValueOrDie();
// Turn the expected value into a literal.
- std::unique_ptr<Literal> expected_literal = Literal::CreateR1U8(expected);
+ std::unique_ptr<Literal> expected_literal = LiteralUtil::CreateR1U8(expected);
VLOG(1) << "expected: " << expected_literal->ToString();
VLOG(1) << "actual: " << actual->ToString();
@@ -486,11 +487,11 @@ ClientLibraryTestBase::ComputeValueAndReference(
XlaComputation ClientLibraryTestBase::CreateScalarRelu() {
XlaBuilder builder("relu");
auto shape = ShapeUtil::MakeShape(use_bfloat16_ ? BF16 : F32, {});
- auto z_value = builder.Parameter(0, shape, "z_value");
+ auto z_value = Parameter(&builder, 0, shape, "z_value");
auto zero = use_bfloat16_
- ? builder.ConstantR0<bfloat16>(static_cast<bfloat16>(0.0f))
- : builder.ConstantR0<float>(0.0f);
- builder.Max(z_value, zero);
+ ? ConstantR0<bfloat16>(&builder, static_cast<bfloat16>(0.0f))
+ : ConstantR0<float>(&builder, 0.0f);
+ Max(z_value, zero);
auto computation_status = builder.Build();
TF_CHECK_OK(computation_status.status());
return computation_status.ConsumeValueOrDie();
@@ -499,9 +500,9 @@ XlaComputation ClientLibraryTestBase::CreateScalarRelu() {
XlaComputation ClientLibraryTestBase::CreateScalarMax() {
XlaBuilder builder("max");
auto shape = ShapeUtil::MakeShape(use_bfloat16_ ? BF16 : F32, {});
- auto x = builder.Parameter(0, shape, "x");
- auto y = builder.Parameter(1, shape, "y");
- builder.Max(x, y);
+ auto x = Parameter(&builder, 0, shape, "x");
+ auto y = Parameter(&builder, 1, shape, "y");
+ Max(x, y);
auto computation_status = builder.Build();
TF_CHECK_OK(computation_status.status());
return computation_status.ConsumeValueOrDie();
@@ -510,13 +511,13 @@ XlaComputation ClientLibraryTestBase::CreateScalarMax() {
XlaComputation ClientLibraryTestBase::CreateScalarReluSensitivity() {
XlaBuilder builder("relu_sensitivity");
auto shape = ShapeUtil::MakeShape(use_bfloat16_ ? BF16 : F32, {});
- auto activation = builder.Parameter(0, shape, "activation");
- auto backprop = builder.Parameter(1, shape, "backprop");
+ auto activation = Parameter(&builder, 0, shape, "activation");
+ auto backprop = Parameter(&builder, 1, shape, "backprop");
auto zero = use_bfloat16_
- ? builder.ConstantR0<bfloat16>(static_cast<bfloat16>(0.0f))
- : builder.ConstantR0<float>(0.0f);
- auto activation_gtz = builder.Gt(activation, zero);
- builder.Select(activation_gtz, /*on_true=*/backprop, /*on_false=*/zero);
+ ? ConstantR0<bfloat16>(&builder, static_cast<bfloat16>(0.0f))
+ : ConstantR0<float>(&builder, 0.0f);
+ auto activation_gtz = Gt(activation, zero);
+ Select(activation_gtz, /*on_true=*/backprop, /*on_false=*/zero);
auto computation_status = builder.Build();
TF_CHECK_OK(computation_status.status());
@@ -559,8 +560,9 @@ XlaOp ClientLibraryTestBase::AddParam(const Literal& argument,
XlaOp ClientLibraryTestBase::CreateConstantFromLiteral(const Literal& literal,
XlaBuilder* builder) {
- return builder->ConstantLiteral(
- use_bfloat16_ ? *Literal::ConvertF32ToBF16(literal) : literal);
+ return ConstantLiteral(builder, use_bfloat16_
+ ? *LiteralUtil::ConvertF32ToBF16(literal)
+ : literal);
}
std::unique_ptr<GlobalData>
@@ -581,14 +583,14 @@ ClientLibraryTestBase::CreateParameterAndTransferLiteral(
const Literal* param_literal = &literal;
std::unique_ptr<Literal> converted_literal;
if (use_bfloat16_) {
- converted_literal = Literal::ConvertF32ToBF16(literal);
+ converted_literal = LiteralUtil::ConvertF32ToBF16(literal);
param_literal = converted_literal.get();
}
std::unique_ptr<GlobalData> data =
client_->TransferToServer(*param_literal, device_handle)
.ConsumeValueOrDie();
*data_handle =
- builder->Parameter(parameter_number, param_literal->shape(), name);
+ Parameter(builder, parameter_number, param_literal->shape(), name);
return data;
}
diff --git a/tensorflow/compiler/xla/tests/client_library_test_base.h b/tensorflow/compiler/xla/tests/client_library_test_base.h
index 0499fec589..fcc9347db5 100644
--- a/tensorflow/compiler/xla/tests/client_library_test_base.h
+++ b/tensorflow/compiler/xla/tests/client_library_test_base.h
@@ -28,6 +28,7 @@ limitations under the License.
#include "tensorflow/compiler/xla/client/global_data.h"
#include "tensorflow/compiler/xla/client/xla_client/xla_builder.h"
#include "tensorflow/compiler/xla/client/xla_client/xla_computation.h"
+#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/compiler/xla/literal_util.h"
#include "tensorflow/compiler/xla/ptr_util.h"
#include "tensorflow/compiler/xla/statusor.h"
@@ -284,7 +285,7 @@ class ClientLibraryTestBase : public ::testing::Test {
template <class T>
XlaOp AddParam(const Array<T>& argument, XlaBuilder* builder) {
- return AddParam(*Literal::CreateFromArray(argument), builder);
+ return AddParam(*LiteralUtil::CreateFromArray(argument), builder);
}
// Creates a constant instruction with the given literal. When the
@@ -299,13 +300,14 @@ class ClientLibraryTestBase : public ::testing::Test {
template <typename NativeT>
XlaOp CreateConstantFromArray(const Array<NativeT>& array,
XlaBuilder* builder) {
- return CreateConstantFromLiteral(*Literal::CreateFromArray(array), builder);
+ return CreateConstantFromLiteral(*LiteralUtil::CreateFromArray(array),
+ builder);
}
// Same as CreateConstantFromArray, but for scalars.
template <typename NativeT>
XlaOp CreateConstantFromScalar(NativeT value, XlaBuilder* builder) {
- return CreateConstantFromLiteral(*Literal::CreateR0<NativeT>(value),
+ return CreateConstantFromLiteral(*LiteralUtil::CreateR0<NativeT>(value),
builder);
}
@@ -373,6 +375,13 @@ class ClientLibraryTestBase : public ::testing::Test {
// The float type used in this test, BF16 or F32 according to use_bfloat16.
PrimitiveType FloatType() const { return use_bfloat16_ ? BF16 : F32; }
+ // Executes the computation and calculates the expected reference value using
+ // the reference client. Returns two literals in the order of (expected,
+ // actual).
+ StatusOr<std::pair<std::unique_ptr<Literal>, std::unique_ptr<Literal>>>
+ ComputeValueAndReference(XlaBuilder* builder,
+ tensorflow::gtl::ArraySlice<Literal> arguments);
+
Client* client_;
Client* ref_client_; // To compute reference result.
ExecutionOptions execution_options_;
@@ -390,13 +399,6 @@ class ClientLibraryTestBase : public ::testing::Test {
const string& error_message)>& verify_output,
const Shape* output_with_layout = nullptr);
- // Executes the computation and calculates the expected reference value using
- // the reference client. Returns two literals in the order of (expected,
- // actual).
- StatusOr<std::pair<std::unique_ptr<Literal>, std::unique_ptr<Literal>>>
- ComputeValueAndReference(XlaBuilder* builder,
- tensorflow::gtl::ArraySlice<Literal> arguments);
-
// Whether to run tests with all float-type input/output converted to
// bfloat16.
bool use_bfloat16_ = false;
@@ -410,7 +412,7 @@ void ClientLibraryTestBase::ComputeAndCompareR0(
XlaBuilder* builder, NativeT expected,
tensorflow::gtl::ArraySlice<GlobalData*> arguments) {
std::unique_ptr<Literal> expected_literal =
- Literal::CreateR0<NativeT>(expected);
+ LiteralUtil::CreateR0<NativeT>(expected);
ClientLibraryTestBase::ComputeAndCompareLiteral(builder, *expected_literal,
arguments);
}
@@ -426,7 +428,7 @@ void ClientLibraryTestBase::ComputeAndCompareR0(
std::is_same<NativeT, complex64>::value,
"Float or complex type required when specifying an ErrorSpec");
std::unique_ptr<Literal> expected_literal =
- Literal::CreateR0<NativeT>(expected);
+ LiteralUtil::CreateR0<NativeT>(expected);
ClientLibraryTestBase::ComputeAndCompareLiteral(builder, *expected_literal,
arguments, error);
}
@@ -436,7 +438,7 @@ void ClientLibraryTestBase::ComputeAndCompareR1(
XlaBuilder* builder, tensorflow::gtl::ArraySlice<NativeT> expected,
tensorflow::gtl::ArraySlice<GlobalData*> arguments) {
std::unique_ptr<Literal> expected_literal =
- Literal::CreateR1<NativeT>(expected);
+ LiteralUtil::CreateR1<NativeT>(expected);
ClientLibraryTestBase::ComputeAndCompareLiteral(builder, *expected_literal,
arguments);
}
@@ -452,7 +454,7 @@ void ClientLibraryTestBase::ComputeAndCompareR1(
std::is_same<NativeT, complex64>::value,
"Float or complex type required when specifying an ErrorSpec");
std::unique_ptr<Literal> expected_literal =
- Literal::CreateR1<NativeT>(expected);
+ LiteralUtil::CreateR1<NativeT>(expected);
ClientLibraryTestBase::ComputeAndCompareLiteral(builder, *expected_literal,
arguments, error);
}
@@ -462,7 +464,7 @@ void ClientLibraryTestBase::ComputeAndCompareR2(
XlaBuilder* builder, const Array2D<NativeT>& expected,
tensorflow::gtl::ArraySlice<GlobalData*> arguments) {
std::unique_ptr<Literal> expected_literal =
- Literal::CreateR2FromArray2D<NativeT>(expected);
+ LiteralUtil::CreateR2FromArray2D<NativeT>(expected);
ClientLibraryTestBase::ComputeAndCompareLiteral(builder, *expected_literal,
arguments);
}
@@ -478,7 +480,7 @@ void ClientLibraryTestBase::ComputeAndCompareR2(
std::is_same<NativeT, complex64>::value,
"Float or complex type required when specifying an ErrorSpec");
std::unique_ptr<Literal> expected_literal =
- Literal::CreateR2FromArray2D<NativeT>(expected);
+ LiteralUtil::CreateR2FromArray2D<NativeT>(expected);
ClientLibraryTestBase::ComputeAndCompareLiteral(builder, *expected_literal,
arguments, error);
}
@@ -488,7 +490,7 @@ void ClientLibraryTestBase::ComputeAndCompareR3(
XlaBuilder* builder, const Array3D<NativeT>& expected,
tensorflow::gtl::ArraySlice<GlobalData*> arguments) {
std::unique_ptr<Literal> expected_literal =
- Literal::CreateR3FromArray3D<NativeT>(expected);
+ LiteralUtil::CreateR3FromArray3D<NativeT>(expected);
ClientLibraryTestBase::ComputeAndCompareLiteral(builder, *expected_literal,
arguments);
}
@@ -504,7 +506,7 @@ void ClientLibraryTestBase::ComputeAndCompareR3(
std::is_same<NativeT, complex64>::value,
"Float or complex type required when specifying an ErrorSpec");
std::unique_ptr<Literal> expected_literal =
- Literal::CreateR3FromArray3D<NativeT>(expected);
+ LiteralUtil::CreateR3FromArray3D<NativeT>(expected);
ClientLibraryTestBase::ComputeAndCompareLiteral(builder, *expected_literal,
arguments, error);
}
@@ -514,7 +516,7 @@ void ClientLibraryTestBase::ComputeAndCompareR4(
XlaBuilder* builder, const Array4D<NativeT>& expected,
tensorflow::gtl::ArraySlice<GlobalData*> arguments) {
std::unique_ptr<Literal> expected_literal =
- Literal::CreateR4FromArray4D<NativeT>(expected);
+ LiteralUtil::CreateR4FromArray4D<NativeT>(expected);
ClientLibraryTestBase::ComputeAndCompareLiteral(builder, *expected_literal,
arguments);
}
@@ -530,7 +532,7 @@ void ClientLibraryTestBase::ComputeAndCompareR4(
std::is_same<NativeT, complex64>::value,
"Float or complex type required when specifying an ErrorSpec");
std::unique_ptr<Literal> expected_literal =
- Literal::CreateR4FromArray4D<NativeT>(expected);
+ LiteralUtil::CreateR4FromArray4D<NativeT>(expected);
ClientLibraryTestBase::ComputeAndCompareLiteral(builder, *expected_literal,
arguments, error);
}
@@ -539,13 +541,13 @@ template <typename NativeT>
std::unique_ptr<GlobalData> ClientLibraryTestBase::CreateR0Parameter(
NativeT value, int64 parameter_number, const string& name,
XlaBuilder* builder, XlaOp* data_handle) {
- std::unique_ptr<Literal> literal = Literal::CreateR0(value);
+ std::unique_ptr<Literal> literal = LiteralUtil::CreateR0(value);
if (use_bfloat16_ && literal->shape().element_type() == F32) {
- literal = Literal::ConvertF32ToBF16(*literal);
+ literal = LiteralUtil::ConvertF32ToBF16(*literal);
}
std::unique_ptr<GlobalData> data =
client_->TransferToServer(*literal).ConsumeValueOrDie();
- *data_handle = builder->Parameter(parameter_number, literal->shape(), name);
+ *data_handle = Parameter(builder, parameter_number, literal->shape(), name);
return data;
}
@@ -553,13 +555,13 @@ template <typename NativeT>
std::unique_ptr<GlobalData> ClientLibraryTestBase::CreateR1Parameter(
tensorflow::gtl::ArraySlice<NativeT> values, int64 parameter_number,
const string& name, XlaBuilder* builder, XlaOp* data_handle) {
- std::unique_ptr<Literal> literal = Literal::CreateR1(values);
+ std::unique_ptr<Literal> literal = LiteralUtil::CreateR1(values);
if (use_bfloat16_ && literal->shape().element_type() == F32) {
- literal = Literal::ConvertF32ToBF16(*literal);
+ literal = LiteralUtil::ConvertF32ToBF16(*literal);
}
std::unique_ptr<GlobalData> data =
client_->TransferToServer(*literal).ConsumeValueOrDie();
- *data_handle = builder->Parameter(parameter_number, literal->shape(), name);
+ *data_handle = Parameter(builder, parameter_number, literal->shape(), name);
return data;
}
@@ -567,13 +569,13 @@ template <typename NativeT>
std::unique_ptr<GlobalData> ClientLibraryTestBase::CreateR2Parameter(
const Array2D<NativeT>& array_2d, int64 parameter_number,
const string& name, XlaBuilder* builder, XlaOp* data_handle) {
- std::unique_ptr<Literal> literal = Literal::CreateR2FromArray2D(array_2d);
+ std::unique_ptr<Literal> literal = LiteralUtil::CreateR2FromArray2D(array_2d);
if (use_bfloat16_ && literal->shape().element_type() == F32) {
- literal = Literal::ConvertF32ToBF16(*literal);
+ literal = LiteralUtil::ConvertF32ToBF16(*literal);
}
std::unique_ptr<GlobalData> data =
client_->TransferToServer(*literal).ConsumeValueOrDie();
- *data_handle = builder->Parameter(parameter_number, literal->shape(), name);
+ *data_handle = Parameter(builder, parameter_number, literal->shape(), name);
return data;
}
@@ -581,13 +583,13 @@ template <typename NativeT>
std::unique_ptr<GlobalData> ClientLibraryTestBase::CreateR3Parameter(
const Array3D<NativeT>& array_3d, int64 parameter_number,
const string& name, XlaBuilder* builder, XlaOp* data_handle) {
- std::unique_ptr<Literal> literal = Literal::CreateR3FromArray3D(array_3d);
+ std::unique_ptr<Literal> literal = LiteralUtil::CreateR3FromArray3D(array_3d);
if (use_bfloat16_ && literal->shape().element_type() == F32) {
- literal = Literal::ConvertF32ToBF16(*literal);
+ literal = LiteralUtil::ConvertF32ToBF16(*literal);
}
std::unique_ptr<GlobalData> data =
client_->TransferToServer(*literal).ConsumeValueOrDie();
- *data_handle = builder->Parameter(parameter_number, literal->shape(), name);
+ *data_handle = Parameter(builder, parameter_number, literal->shape(), name);
return data;
}
diff --git a/tensorflow/compiler/xla/tests/client_test.cc b/tensorflow/compiler/xla/tests/client_test.cc
index 08671cf624..6ce2f844a3 100644
--- a/tensorflow/compiler/xla/tests/client_test.cc
+++ b/tensorflow/compiler/xla/tests/client_test.cc
@@ -43,8 +43,8 @@ XLA_TEST_F(ClientTest, ExecuteWithLayout) {
std::vector<std::vector<int64>> layouts = {{0, 1}, {1, 0}};
for (const std::vector<int64>& execute_layout : layouts) {
for (const std::vector<int64>& transfer_layout : layouts) {
- b.Add(b.ConstantR2<int32>({{1, 2}, {3, 4}}),
- b.ConstantR2<int32>({{10, 20}, {30, 40}}));
+ Add(ConstantR2<int32>(&b, {{1, 2}, {3, 4}}),
+ ConstantR2<int32>(&b, {{10, 20}, {30, 40}}));
TF_ASSERT_OK_AND_ASSIGN(auto computation, b.Build());
ExecutionOptions execution_options = execution_options_;
@@ -56,7 +56,7 @@ XLA_TEST_F(ClientTest, ExecuteWithLayout) {
client_->Execute(computation, {}, &execution_options));
std::unique_ptr<Literal> expected_literal =
- Literal::CreateR2WithLayout<int32>(
+ LiteralUtil::CreateR2WithLayout<int32>(
{{11, 22}, {33, 44}}, LayoutUtil::MakeLayout(transfer_layout));
TF_ASSERT_OK_AND_ASSIGN(
@@ -72,8 +72,8 @@ XLA_TEST_F(ClientTest, ExecuteWithLayout) {
XLA_TEST_F(ClientTest, ExecuteWithTupleLayout) {
XlaBuilder b(TestName());
- b.Tuple({b.ConstantR2<int32>({{1, 2}, {3, 4}}),
- b.ConstantR2<int32>({{10, 20}, {30, 40}})});
+ Tuple(&b, {ConstantR2<int32>(&b, {{1, 2}, {3, 4}}),
+ ConstantR2<int32>(&b, {{10, 20}, {30, 40}})});
TF_ASSERT_OK_AND_ASSIGN(auto computation, b.Build());
@@ -112,13 +112,13 @@ XLA_TEST_F(ClientTest, DISABLED_ON_GPU(ExecuteParallel)) {
XlaComputation add_with_one_arg, mul_with_two_args, dot_with_one_arg;
Shape shape = ShapeUtil::MakeShape(S32, {2, 2});
- TF_ASSERT_OK_AND_ASSIGN(
- std::unique_ptr<GlobalData> const_arg,
- client_->TransferToServer(*Literal::CreateR2<int32>({{5, 6}, {7, 8}})));
+ TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<GlobalData> const_arg,
+ client_->TransferToServer(
+ *LiteralUtil::CreateR2<int32>({{5, 6}, {7, 8}})));
XlaBuilder b(TestName() + ".add");
- b.Add(b.Parameter(0, shape, "param_0"),
- b.ConstantR2<int32>({{1, 2}, {3, 4}}));
+ Add(Parameter(&b, 0, shape, "param_0"),
+ ConstantR2<int32>(&b, {{1, 2}, {3, 4}}));
TF_ASSERT_OK_AND_ASSIGN(add_with_one_arg, b.Build());
// We can't really test parallel execution on CPU since all of the cores in a
@@ -136,7 +136,7 @@ XLA_TEST_F(ClientTest, DISABLED_ON_GPU(ExecuteParallel)) {
TF_ASSERT_OK_AND_ASSIGN(auto results,
client_->ExecuteParallel(computation_instances));
- auto expected_result = Literal::CreateR2<int32>({{6, 8}, {10, 12}});
+ auto expected_result = LiteralUtil::CreateR2<int32>({{6, 8}, {10, 12}});
TF_ASSERT_OK_AND_ASSIGN(
auto result_literal,
diff --git a/tensorflow/compiler/xla/tests/compilation_cache_test.cc b/tensorflow/compiler/xla/tests/compilation_cache_test.cc
index 50a0069648..ff38246286 100644
--- a/tensorflow/compiler/xla/tests/compilation_cache_test.cc
+++ b/tensorflow/compiler/xla/tests/compilation_cache_test.cc
@@ -21,7 +21,7 @@ limitations under the License.
#include "tensorflow/compiler/xla/client/local_client.h"
#include "tensorflow/compiler/xla/client/xla_client/xla_builder.h"
#include "tensorflow/compiler/xla/client/xla_client/xla_computation.h"
-#include "tensorflow/compiler/xla/literal_util.h"
+#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/compiler/xla/shape_util.h"
#include "tensorflow/compiler/xla/statusor.h"
#include "tensorflow/compiler/xla/tests/client_library_test_base.h"
@@ -50,7 +50,7 @@ class CompilationCacheTest : public ClientLibraryTestBase {
&execution_profile)
.ConsumeValueOrDie();
EXPECT_TRUE(LiteralTestUtil::Near(
- *Literal::CreateR0<float>(expected_result), *result, error_spec_));
+ *LiteralUtil::CreateR0<float>(expected_result), *result, error_spec_));
EXPECT_EQ(expect_cache_hit, execution_profile.compilation_cache_hit());
}
@@ -67,7 +67,7 @@ class CompilationCacheTest : public ClientLibraryTestBase {
std::unique_ptr<Literal> result =
client_->Transfer(*data_handle).ConsumeValueOrDie();
EXPECT_TRUE(LiteralTestUtil::Near(
- *Literal::CreateR2<float>(expected_result), *result, error_spec_));
+ *LiteralUtil::CreateR2<float>(expected_result), *result, error_spec_));
EXPECT_EQ(expect_cache_hit, execution_profile.compilation_cache_hit());
}
@@ -77,7 +77,7 @@ class CompilationCacheTest : public ClientLibraryTestBase {
// TODO(b/74197823): Disabled because there is no cache in the new design.
XLA_TEST_F(CompilationCacheTest, DISABLED_ComputationCalledMultipleTimes) {
XlaBuilder builder(TestName());
- builder.Neg(builder.ConstantR0<float>(42.0));
+ Neg(ConstantR0<float>(&builder, 42.0));
XlaComputation computation = builder.Build().ConsumeValueOrDie();
ExecuteComputationR0F32(computation, {}, -42.0, /*expect_cache_hit=*/false);
@@ -89,17 +89,17 @@ XLA_TEST_F(CompilationCacheTest, DISABLED_ComputationCalledMultipleTimes) {
XLA_TEST_F(CompilationCacheTest,
DISABLED_ComputationCalledWithDifferentParameters) {
std::unique_ptr<GlobalData> data_42 =
- client_->TransferToServer(*Literal::CreateR0<float>(42.0f))
+ client_->TransferToServer(*LiteralUtil::CreateR0<float>(42.0f))
.ConsumeValueOrDie();
std::unique_ptr<GlobalData> data_123 =
- client_->TransferToServer(*Literal::CreateR0<float>(123.0f))
+ client_->TransferToServer(*LiteralUtil::CreateR0<float>(123.0f))
.ConsumeValueOrDie();
std::unique_ptr<GlobalData> data_456 =
- client_->TransferToServer(*Literal::CreateR0<float>(456.0f))
+ client_->TransferToServer(*LiteralUtil::CreateR0<float>(456.0f))
.ConsumeValueOrDie();
XlaBuilder builder(TestName());
- builder.Neg(builder.Parameter(0, ShapeUtil::MakeShape(F32, {}), "param"));
+ Neg(Parameter(&builder, 0, ShapeUtil::MakeShape(F32, {}), "param"));
XlaComputation computation = builder.Build().ConsumeValueOrDie();
ExecuteComputationR0F32(computation, {data_42.get()}, -42.0,
@@ -115,16 +115,16 @@ XLA_TEST_F(CompilationCacheTest,
// TODO(b/74197823): Disabled because there is no cache in the new design.
XLA_TEST_F(CompilationCacheTest, DISABLED_MultipleComputations) {
XlaBuilder builder_neg(TestName() + "_neg");
- builder_neg.Neg(builder_neg.ConstantR0<float>(42.0));
+ Neg(ConstantR0<float>(&builder_neg, 42.0));
XlaComputation computation_neg = builder_neg.Build().ConsumeValueOrDie();
XlaBuilder builder_exp(TestName() + "_exp");
- builder_exp.Exp(builder_exp.ConstantR0<float>(1.0));
+ Exp(ConstantR0<float>(&builder_exp, 1.0));
XlaComputation computation_exp = builder_exp.Build().ConsumeValueOrDie();
XlaBuilder builder_add(TestName() + "_add");
- builder_add.Add(builder_add.ConstantR0<float>(2.0),
- builder_add.ConstantR0<float>(3.0));
+ Add(ConstantR0<float>(&builder_add, 2.0),
+ ConstantR0<float>(&builder_add, 3.0));
XlaComputation computation_add = builder_add.Build().ConsumeValueOrDie();
ExecuteComputationR0F32(computation_neg, {}, -42.0,
@@ -143,18 +143,18 @@ XLA_TEST_F(CompilationCacheTest, DISABLED_DifferentParameterLayouts) {
// layouts. Use these arrays as parameters to a simple computation. If the
// layout of the array changes then computation should be recompiled (cache
// miss).
- auto rowmaj_array = Literal::CreateR2WithLayout(
+ auto rowmaj_array = LiteralUtil::CreateR2WithLayout(
{{1.0f, 2.0f}, {3.0f, 4.0f}}, LayoutUtil::MakeLayout({1, 0}));
auto rowmaj_handle =
client_->TransferToServer(*rowmaj_array).ConsumeValueOrDie();
- auto colmaj_array = Literal::CreateR2WithLayout(
+ auto colmaj_array = LiteralUtil::CreateR2WithLayout(
{{1.0f, 2.0f}, {3.0f, 4.0f}}, LayoutUtil::MakeLayout({0, 1}));
auto colmaj_handle =
client_->TransferToServer(*colmaj_array).ConsumeValueOrDie();
XlaBuilder builder(TestName());
- builder.Parameter(0, ShapeUtil::MakeShape(F32, {2, 2}), "param0");
+ Parameter(&builder, 0, ShapeUtil::MakeShape(F32, {2, 2}), "param0");
XlaComputation computation = builder.Build().ConsumeValueOrDie();
ExecuteComputationR2F32(computation, {colmaj_handle.get()},
diff --git a/tensorflow/compiler/xla/tests/compute_constant_test.cc b/tensorflow/compiler/xla/tests/compute_constant_test.cc
index ba22530f1c..64bf8b3b38 100644
--- a/tensorflow/compiler/xla/tests/compute_constant_test.cc
+++ b/tensorflow/compiler/xla/tests/compute_constant_test.cc
@@ -22,7 +22,7 @@ limitations under the License.
#include "tensorflow/compiler/xla/client/xla_client/xla_builder.h"
#include "tensorflow/compiler/xla/client/xla_client/xla_computation.h"
#include "tensorflow/compiler/xla/layout_util.h"
-#include "tensorflow/compiler/xla/literal_util.h"
+#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/compiler/xla/shape_util.h"
#include "tensorflow/compiler/xla/status_macros.h"
#include "tensorflow/compiler/xla/statusor.h"
@@ -99,7 +99,7 @@ TEST_F(ComputeConstantTest, ScalarInt32Literal) {
for (ClientType client_type : client_types) {
Client* client = ClientOrDie(platform_, client_type);
XlaBuilder b(TestName());
- auto computation = b.ConstantR0<int32>(42);
+ auto computation = ConstantR0<int32>(&b, 42);
EXPECT_TRUE(IsConstant(computation, &b));
auto value = ComputeConstantScalar<int32>(client, computation, &b);
@@ -113,7 +113,7 @@ TEST_F(ComputeConstantTest, ScalarFloatAdd) {
Client* client = ClientOrDie(platform_, client_type);
XlaBuilder b(TestName());
auto computation =
- b.Add(b.ConstantR0<float>(42.5f), b.ConstantR0<float>(1.5f));
+ Add(ConstantR0<float>(&b, 42.5f), ConstantR0<float>(&b, 1.5f));
EXPECT_TRUE(IsConstant(computation, &b));
auto value = ComputeConstantScalar<float>(client, computation, &b);
@@ -127,8 +127,8 @@ TEST_F(ComputeConstantTest, ScalarRng) {
Client* client = ClientOrDie(platform_, client_type);
XlaBuilder b(TestName());
auto computation =
- b.RngUniform(b.ConstantR0<float>(1.1f), b.ConstantR0<float>(2.1f),
- ShapeUtil::MakeShape(F32, {}));
+ RngUniform(ConstantR0<float>(&b, 1.1f), ConstantR0<float>(&b, 2.1f),
+ ShapeUtil::MakeShape(F32, {}));
EXPECT_FALSE(IsConstant(computation, &b));
auto value = ComputeConstantScalar<float>(client, computation, &b);
@@ -141,7 +141,7 @@ TEST_F(ComputeConstantTest, DirectParamMissing) {
for (ClientType client_type : client_types) {
Client* client = ClientOrDie(platform_, client_type);
XlaBuilder b(TestName());
- auto computation = b.Parameter(0, ShapeUtil::MakeShape(F32, {}), "param");
+ auto computation = Parameter(&b, 0, ShapeUtil::MakeShape(F32, {}), "param");
EXPECT_FALSE(IsConstant(computation, &b));
auto value = ComputeConstantScalar<float>(client, computation, &b);
@@ -156,8 +156,8 @@ TEST_F(ComputeConstantTest, IndirectParamMissing) {
Client* client = ClientOrDie(platform_, client_type);
XlaBuilder b(TestName());
auto computation =
- b.Add(b.ConstantR0<float>(1.0f),
- b.Parameter(0, ShapeUtil::MakeShape(F32, {}), "param"));
+ Add(ConstantR0<float>(&b, 1.0f),
+ Parameter(&b, 0, ShapeUtil::MakeShape(F32, {}), "param"));
EXPECT_FALSE(IsConstant(computation, &b));
auto value = ComputeConstantScalar<float>(client, computation, &b);
@@ -174,18 +174,18 @@ TEST_F(ComputeConstantTest, UnrelatedParam) {
Client* client = ClientOrDie(platform_, client_type);
XlaBuilder b(TestName());
- auto param_a = b.Parameter(10, ShapeUtil::MakeShape(F32, {}), "param0");
+ auto param_a = Parameter(&b, 10, ShapeUtil::MakeShape(F32, {}), "param0");
auto constant_4 =
- b.Add(b.ConstantR0<float>(2.5f), b.ConstantR0<float>(1.5f));
- auto not_constant_a = b.Add(constant_4, param_a);
+ Add(ConstantR0<float>(&b, 2.5f), ConstantR0<float>(&b, 1.5f));
+ auto not_constant_a = Add(constant_4, param_a);
- auto param_b = b.Parameter(1, ShapeUtil::MakeShape(F32, {}), "param1");
+ auto param_b = Parameter(&b, 1, ShapeUtil::MakeShape(F32, {}), "param1");
auto constant_9 =
- b.Mul(b.ConstantR0<float>(2.0f), b.ConstantR0<float>(4.5f));
- auto not_constant_b = b.Add(param_b, constant_9);
+ Mul(ConstantR0<float>(&b, 2.0f), ConstantR0<float>(&b, 4.5f));
+ auto not_constant_b = Add(param_b, constant_9);
- auto constant_13 = b.Add(constant_4, constant_9);
- b.Add(not_constant_b, b.Add(constant_13, not_constant_a));
+ auto constant_13 = Add(constant_4, constant_9);
+ Add(not_constant_b, Add(constant_13, not_constant_a));
EXPECT_TRUE(IsConstant(constant_13, &b));
@@ -201,13 +201,13 @@ TEST_F(ComputeConstantTest, NonScalarAdd) {
XlaBuilder b(TestName());
auto computation =
- b.Add(b.ConstantR1<int32>({1, 2}), b.ConstantR1<int32>({3, 4}));
+ Add(ConstantR1<int32>(&b, {1, 2}), ConstantR1<int32>(&b, {3, 4}));
EXPECT_TRUE(IsConstant(computation, &b));
TF_ASSERT_OK_AND_ASSIGN(auto computed,
ComputeConstantLiteral(client, computation, &b));
std::unique_ptr<Literal> expected_literal =
- Literal::CreateR1<int32>({4, 6});
+ LiteralUtil::CreateR1<int32>({4, 6});
EXPECT_TRUE(LiteralTestUtil::Equal(*expected_literal, *computed));
}
}
@@ -216,12 +216,12 @@ TEST_F(ComputeConstantTest, IntegerDivide) {
for (ClientType client_type : client_types) {
Client* client = ClientOrDie(platform_, client_type);
XlaBuilder b(TestName());
- auto computation = b.Div(b.ConstantR0<int32>(15), b.ConstantR0<int32>(3));
+ auto computation = Div(ConstantR0<int32>(&b, 15), ConstantR0<int32>(&b, 3));
EXPECT_TRUE(IsConstant(computation, &b));
TF_ASSERT_OK_AND_ASSIGN(auto computed,
ComputeConstantLiteral(client, computation, &b));
- std::unique_ptr<Literal> expected_literal = Literal::CreateR0<int32>(5);
+ std::unique_ptr<Literal> expected_literal = LiteralUtil::CreateR0<int32>(5);
EXPECT_TRUE(LiteralTestUtil::Equal(*expected_literal, *computed));
}
}
@@ -237,13 +237,13 @@ XLA_TEST_F(ComputeConstantTest, Layout) {
TF_ASSERT_OK_AND_ASSIGN(
auto computed, ComputeConstantLiteral(
client,
- b.Add(b.ConstantR2<int32>({{1, 2}, {3, 4}}),
- b.ConstantR2<int32>({{10, 20}, {30, 40}})),
+ Add(ConstantR2<int32>(&b, {{1, 2}, {3, 4}}),
+ ConstantR2<int32>(&b, {{10, 20}, {30, 40}})),
&b, &layout_proto));
std::unique_ptr<Literal> expected_literal =
- Literal::CreateR2WithLayout<int32>({{11, 22}, {33, 44}},
- LayoutUtil::MakeLayout(layout));
+ LiteralUtil::CreateR2WithLayout<int32>(
+ {{11, 22}, {33, 44}}, LayoutUtil::MakeLayout(layout));
ASSERT_TRUE(LiteralTestUtil::EqualShapesAndLayouts(
expected_literal->shape(), computed->shape()));
EXPECT_TRUE(LiteralTestUtil::Equal(*expected_literal, *computed));
diff --git a/tensorflow/compiler/xla/tests/concat_test.cc b/tensorflow/compiler/xla/tests/concat_test.cc
index 352864502a..9f288634c0 100644
--- a/tensorflow/compiler/xla/tests/concat_test.cc
+++ b/tensorflow/compiler/xla/tests/concat_test.cc
@@ -39,7 +39,7 @@ using ::testing::HasSubstr;
// Concatenate expects at least one argument.
XLA_TEST_F(ConcatTest, Concat_Nothing) {
XlaBuilder builder(TestName());
- builder.ConcatInDim({}, 0);
+ ConcatInDim(&builder, {}, 0);
StatusOr<XlaComputation> computation_status = builder.Build();
ASSERT_FALSE(computation_status.ok());
EXPECT_THAT(computation_status.status().ToString(),
@@ -49,8 +49,8 @@ XLA_TEST_F(ConcatTest, Concat_Nothing) {
// Concatenate with one argument works.
XLA_TEST_F(ConcatTest, Concat_R1_With_Nothing) {
XlaBuilder builder(TestName());
- auto a = builder.ConstantR1<float>({42.0, 64.0});
- builder.ConcatInDim({a}, 0);
+ auto a = ConstantR1<float>(&builder, {42.0, 64.0});
+ ConcatInDim(&builder, {a}, 0);
std::vector<float> expected = {42, 64};
ComputeAndCompareR1<float>(&builder, expected, {}, ErrorSpec(0.0001));
@@ -58,8 +58,8 @@ XLA_TEST_F(ConcatTest, Concat_R1_With_Nothing) {
XLA_TEST_F(ConcatTest, Concat_R1_L0_With_Nothing) {
XlaBuilder builder(TestName());
- auto a = builder.ConstantR1<float>({});
- builder.ConcatInDim({a}, 0);
+ auto a = ConstantR1<float>(&builder, {});
+ ConcatInDim(&builder, {a}, 0);
std::vector<float> expected = {};
ComputeAndCompareR1<float>(&builder, expected, {}, ErrorSpec(0.0001));
@@ -69,9 +69,9 @@ XLA_TEST_F(ConcatTest, Concat_R1_L0_With_Nothing) {
// to concatenate on.
XLA_TEST_F(ConcatTest, CannotConcatR0WithR0) {
XlaBuilder builder(TestName());
- auto a = builder.ConstantR0<float>(42.0);
- auto b = builder.ConstantR0<float>(64.0);
- builder.ConcatInDim({a, b}, 0);
+ auto a = ConstantR0<float>(&builder, 42.0);
+ auto b = ConstantR0<float>(&builder, 64.0);
+ ConcatInDim(&builder, {a, b}, 0);
StatusOr<XlaComputation> computation_status = builder.Build();
ASSERT_FALSE(computation_status.ok());
EXPECT_THAT(computation_status.status().ToString(),
@@ -80,9 +80,9 @@ XLA_TEST_F(ConcatTest, CannotConcatR0WithR0) {
XLA_TEST_F(ConcatTest, Concat_R1_L0_With_R1_L0) {
XlaBuilder builder(TestName());
- auto a = builder.ConstantR1<float>({});
- auto b = builder.ConstantR1<float>({});
- builder.ConcatInDim({a, b}, 0);
+ auto a = ConstantR1<float>(&builder, {});
+ auto b = ConstantR1<float>(&builder, {});
+ ConcatInDim(&builder, {a, b}, 0);
std::vector<float> expected = {};
ComputeAndCompareR1<float>(&builder, expected, {}, ErrorSpec(0.0001));
@@ -90,9 +90,9 @@ XLA_TEST_F(ConcatTest, Concat_R1_L0_With_R1_L0) {
XLA_TEST_F(ConcatTest, Concat_R1_L0_With_R1_L1) {
XlaBuilder builder(TestName());
- auto a = builder.ConstantR1<float>({});
- auto b = builder.ConstantR1<float>({256.0});
- builder.ConcatInDim({a, b}, 0);
+ auto a = ConstantR1<float>(&builder, {});
+ auto b = ConstantR1<float>(&builder, {256.0});
+ ConcatInDim(&builder, {a, b}, 0);
std::vector<float> expected = {256};
ComputeAndCompareR1<float>(&builder, expected, {}, ErrorSpec(0.0001));
@@ -100,9 +100,9 @@ XLA_TEST_F(ConcatTest, Concat_R1_L0_With_R1_L1) {
XLA_TEST_F(ConcatTest, Concat_R1_L2_With_R1_L0) {
XlaBuilder builder(TestName());
- auto a = builder.ConstantR1<float>({42.0, 64.0});
- auto b = builder.ConstantR1<float>({});
- builder.ConcatInDim({a, b}, 0);
+ auto a = ConstantR1<float>(&builder, {42.0, 64.0});
+ auto b = ConstantR1<float>(&builder, {});
+ ConcatInDim(&builder, {a, b}, 0);
std::vector<float> expected = {42, 64};
ComputeAndCompareR1<float>(&builder, expected, {}, ErrorSpec(0.0001));
@@ -110,9 +110,9 @@ XLA_TEST_F(ConcatTest, Concat_R1_L2_With_R1_L0) {
XLA_TEST_F(ConcatTest, Concat_R1_L2_With_R1_L1) {
XlaBuilder builder(TestName());
- auto a = builder.ConstantR1<float>({42.0, 64.0});
- auto b = builder.ConstantR1<float>({256.0});
- builder.ConcatInDim({a, b}, 0);
+ auto a = ConstantR1<float>(&builder, {42.0, 64.0});
+ auto b = ConstantR1<float>(&builder, {256.0});
+ ConcatInDim(&builder, {a, b}, 0);
std::vector<float> expected = {42, 64, 256};
ComputeAndCompareR1<float>(&builder, expected, {}, ErrorSpec(0.0001));
@@ -130,9 +130,9 @@ XLA_TEST_F(ConcatTest, Concat_R1_L253_With_R1_L7) {
}
XlaBuilder builder(TestName());
- auto a = builder.ConstantR1<float>(lhs);
- auto b = builder.ConstantR1<float>(rhs);
- builder.ConcatInDim({a, b}, 0);
+ auto a = ConstantR1<float>(&builder, lhs);
+ auto b = ConstantR1<float>(&builder, rhs);
+ ConcatInDim(&builder, {a, b}, 0);
ComputeAndCompareR1<float>(&builder, expected, {}, ErrorSpec(0.0001));
}
@@ -140,9 +140,9 @@ XLA_TEST_F(ConcatTest, Concat_R1_L253_With_R1_L7) {
XLA_TEST_F(ConcatTest, Concat_0x0_With_0x0) {
for (int dim : {0, 1}) {
XlaBuilder builder(TestName());
- auto a = builder.ConstantR2FromArray2D(Array2D<float>(0, 0));
- auto b = builder.ConstantR2FromArray2D(Array2D<float>(0, 0));
- builder.ConcatInDim({a, b}, dim);
+ auto a = ConstantR2FromArray2D(&builder, Array2D<float>(0, 0));
+ auto b = ConstantR2FromArray2D(&builder, Array2D<float>(0, 0));
+ ConcatInDim(&builder, {a, b}, dim);
ComputeAndCompareR2<float>(&builder, Array2D<float>(0, 0), {},
ErrorSpec(0.0001));
@@ -153,9 +153,9 @@ XLA_TEST_F(ConcatTest, Concat_1x1_With_1x1_InDim0) {
XlaBuilder builder(TestName());
auto a_array = CreatePatternedMatrix(1, 1);
auto b_array = CreatePatternedMatrix(1, 1, /*offset=*/64.0);
- auto a = builder.ConstantR2FromArray2D(*a_array);
- auto b = builder.ConstantR2FromArray2D(*b_array);
- builder.ConcatInDim({a, b}, 0);
+ auto a = ConstantR2FromArray2D(&builder, *a_array);
+ auto b = ConstantR2FromArray2D(&builder, *b_array);
+ ConcatInDim(&builder, {a, b}, 0);
Array2D<float> expected({
{0},
@@ -168,9 +168,9 @@ XLA_TEST_F(ConcatTest, Concat_1x1_With_1x1_InDim1) {
XlaBuilder builder(TestName());
auto a_array = CreatePatternedMatrix(1, 1);
auto b_array = CreatePatternedMatrix(1, 1, /*offset=*/64.0);
- auto a = builder.ConstantR2FromArray2D(*a_array);
- auto b = builder.ConstantR2FromArray2D(*b_array);
- builder.ConcatInDim({a, b}, 1);
+ auto a = ConstantR2FromArray2D(&builder, *a_array);
+ auto b = ConstantR2FromArray2D(&builder, *b_array);
+ ConcatInDim(&builder, {a, b}, 1);
Array2D<float> expected({
{0, 64},
@@ -181,9 +181,9 @@ XLA_TEST_F(ConcatTest, Concat_1x1_With_1x1_InDim1) {
XLA_TEST_F(ConcatTest, Concat2x0With2x5) {
XlaBuilder builder(TestName());
auto b_array = CreatePatternedMatrix(2, 5, /*offset=*/64.0);
- auto a = builder.ConstantR2FromArray2D(Array2D<float>(2, 0));
- auto b = builder.ConstantR2FromArray2D(*b_array);
- builder.ConcatInDim({a, b}, 1);
+ auto a = ConstantR2FromArray2D(&builder, Array2D<float>(2, 0));
+ auto b = ConstantR2FromArray2D(&builder, *b_array);
+ ConcatInDim(&builder, {a, b}, 1);
ComputeAndCompareR2<float>(&builder, *b_array, {}, ErrorSpec(0.0001));
}
@@ -192,9 +192,9 @@ XLA_TEST_F(ConcatTest, Concat2x3With2x5) {
XlaBuilder builder(TestName());
auto a_array = CreatePatternedMatrix(2, 3);
auto b_array = CreatePatternedMatrix(2, 5, /*offset=*/64.0);
- auto a = builder.ConstantR2FromArray2D(*a_array);
- auto b = builder.ConstantR2FromArray2D(*b_array);
- builder.ConcatInDim({a, b}, 1);
+ auto a = ConstantR2FromArray2D(&builder, *a_array);
+ auto b = ConstantR2FromArray2D(&builder, *b_array);
+ ConcatInDim(&builder, {a, b}, 1);
Array2D<float> expected({
{0, 1, 2, 64, 65, 66, 67, 68},
@@ -206,9 +206,9 @@ XLA_TEST_F(ConcatTest, Concat2x3With2x5) {
XLA_TEST_F(ConcatTest, Concat3x2With0x2) {
XlaBuilder builder(TestName());
auto a_array = CreatePatternedMatrix(3, 2);
- auto a = builder.ConstantR2FromArray2D(*a_array);
- auto b = builder.ConstantR2FromArray2D(Array2D<float>(0, 2));
- builder.ConcatInDim({a, b}, 0);
+ auto a = ConstantR2FromArray2D(&builder, *a_array);
+ auto b = ConstantR2FromArray2D(&builder, Array2D<float>(0, 2));
+ ConcatInDim(&builder, {a, b}, 0);
ComputeAndCompareR2<float>(&builder, *a_array, {}, ErrorSpec(0.0001));
}
@@ -217,9 +217,9 @@ XLA_TEST_F(ConcatTest, Concat3x2With5x2) {
XlaBuilder builder(TestName());
auto a_array = CreatePatternedMatrix(3, 2);
auto b_array = CreatePatternedMatrix(5, 2, /*offset=*/64.0);
- auto a = builder.ConstantR2FromArray2D(*a_array);
- auto b = builder.ConstantR2FromArray2D(*b_array);
- builder.ConcatInDim({a, b}, 0);
+ auto a = ConstantR2FromArray2D(&builder, *a_array);
+ auto b = ConstantR2FromArray2D(&builder, *b_array);
+ ConcatInDim(&builder, {a, b}, 0);
Array2D<float> expected({
{0, 1},
@@ -236,9 +236,9 @@ XLA_TEST_F(ConcatTest, Concat3x2With5x2) {
XLA_TEST_F(ConcatTest, Concat_R3_3x0x2_3x0x1) {
XlaBuilder builder(TestName());
- auto a = builder.ConstantR3FromArray3D(Array3D<float>(3, 0, 2));
- auto b = builder.ConstantR3FromArray3D(Array3D<float>(3, 0, 1));
- builder.ConcatInDim({a, b}, 2);
+ auto a = ConstantR3FromArray3D(&builder, Array3D<float>(3, 0, 2));
+ auto b = ConstantR3FromArray3D(&builder, Array3D<float>(3, 0, 1));
+ ConcatInDim(&builder, {a, b}, 2);
ComputeAndCompareR3<float>(&builder, Array3D<float>(3, 0, 3), {},
ErrorSpec(0.0001));
}
@@ -257,9 +257,9 @@ XLA_TEST_F(ConcatTest, Concat_R3_3x1x2_3x1x1) {
{{7}},
{{8}},
});
- auto a = builder.ConstantR3FromArray3D(a_array);
- auto b = builder.ConstantR3FromArray3D(b_array);
- builder.ConcatInDim({a, b}, 2);
+ auto a = ConstantR3FromArray3D(&builder, a_array);
+ auto b = ConstantR3FromArray3D(&builder, b_array);
+ ConcatInDim(&builder, {a, b}, 2);
Array3D<float> expected({
{{0, 1, 6}},
@@ -271,10 +271,10 @@ XLA_TEST_F(ConcatTest, Concat_R3_3x1x2_3x1x1) {
XLA_TEST_F(ConcatTest, Concat_R1_1x1_1x1_1x1) {
XlaBuilder builder(TestName());
- auto a = builder.ConstantR1<float>({42.0});
- auto b = builder.ConstantR1<float>({64.0});
- auto c = builder.ConstantR1<float>({256.0});
- builder.ConcatInDim({a, b, c}, 0);
+ auto a = ConstantR1<float>(&builder, {42.0});
+ auto b = ConstantR1<float>(&builder, {64.0});
+ auto c = ConstantR1<float>(&builder, {256.0});
+ ConcatInDim(&builder, {a, b, c}, 0);
std::vector<float> expected = {42, 64, 256};
ComputeAndCompareR1<float>(&builder, expected, {}, ErrorSpec(0.0001));
@@ -300,10 +300,10 @@ XLA_TEST_F(ConcatTest, Concat_R3_3x1x2_3x1x1_3x1x1) {
{{7}},
{{11}},
});
- auto a = builder.ConstantR3FromArray3D(a_array);
- auto b = builder.ConstantR3FromArray3D(b_array);
- auto c = builder.ConstantR3FromArray3D(c_array);
- builder.ConcatInDim({a, b, c}, 2);
+ auto a = ConstantR3FromArray3D(&builder, a_array);
+ auto b = ConstantR3FromArray3D(&builder, b_array);
+ auto c = ConstantR3FromArray3D(&builder, c_array);
+ ConcatInDim(&builder, {a, b, c}, 2);
Array3D<float> expected({
{{0, 1, 2, 3}},
@@ -315,11 +315,11 @@ XLA_TEST_F(ConcatTest, Concat_R3_3x1x2_3x1x1_3x1x1) {
XLA_TEST_F(ConcatTest, DoubleConcatLeftAssociative) {
XlaBuilder builder(TestName());
- auto a = builder.ConstantR1<float>({42.0});
- auto b = builder.ConstantR1<float>({64.0});
- auto c = builder.ConstantR1<float>({256.0});
+ auto a = ConstantR1<float>(&builder, {42.0});
+ auto b = ConstantR1<float>(&builder, {64.0});
+ auto c = ConstantR1<float>(&builder, {256.0});
// concatenated = (a concat b) concat c
- builder.ConcatInDim({builder.ConcatInDim({a, b}, 0), c}, 0);
+ ConcatInDim(&builder, {ConcatInDim(&builder, {a, b}, 0), c}, 0);
std::vector<float> expected = {42, 64, 256};
ComputeAndCompareR1<float>(&builder, expected, {}, ErrorSpec(0.0001));
@@ -327,11 +327,11 @@ XLA_TEST_F(ConcatTest, DoubleConcatLeftAssociative) {
XLA_TEST_F(ConcatTest, DoubleConcatRightAssociative) {
XlaBuilder builder(TestName());
- auto a = builder.ConstantR1<float>({42.0});
- auto b = builder.ConstantR1<float>({64.0});
- auto c = builder.ConstantR1<float>({256.0});
+ auto a = ConstantR1<float>(&builder, {42.0});
+ auto b = ConstantR1<float>(&builder, {64.0});
+ auto c = ConstantR1<float>(&builder, {256.0});
// concatenated = a concat (b concat c)
- builder.ConcatInDim({a, builder.ConcatInDim({b, c}, 0)}, 0);
+ ConcatInDim(&builder, {a, ConcatInDim(&builder, {b, c}, 0)}, 0);
std::vector<float> expected = {42, 64, 256};
ComputeAndCompareR1<float>(&builder, expected, {}, ErrorSpec(0.0001));
@@ -346,9 +346,9 @@ XLA_TEST_F(ConcatTest, Concat_1x1024_With_1x1024_InDim0) {
}
XlaBuilder builder(TestName());
- auto a = builder.ConstantR2FromArray2D<float>(lhs);
- auto b = builder.ConstantR2FromArray2D<float>(rhs);
- builder.ConcatInDim({a, b}, 0);
+ auto a = ConstantR2FromArray2D<float>(&builder, lhs);
+ auto b = ConstantR2FromArray2D<float>(&builder, rhs);
+ ConcatInDim(&builder, {a, b}, 0);
Array2D<float> expected(2, 1024);
for (int i = 0; i < 1024; ++i) {
@@ -367,9 +367,9 @@ XLA_TEST_F(ConcatTest, Concat_1x1024_With_1x1024_InDim1) {
}
XlaBuilder builder(TestName());
- auto a = builder.ConstantR2FromArray2D<float>(lhs);
- auto b = builder.ConstantR2FromArray2D<float>(rhs);
- builder.ConcatInDim({a, b}, 1);
+ auto a = ConstantR2FromArray2D<float>(&builder, lhs);
+ auto b = ConstantR2FromArray2D<float>(&builder, rhs);
+ ConcatInDim(&builder, {a, b}, 1);
Array2D<float> expected(1, 2048);
for (int i = 0; i < 1024; ++i) {
@@ -392,9 +392,9 @@ XLA_TEST_F(ConcatTest, Concat_64x64_With_64x2) {
}
XlaBuilder builder(TestName());
- auto a = builder.ConstantR2FromArray2D<float>(lhs);
- auto b = builder.ConstantR2FromArray2D<float>(rhs);
- builder.ConcatInDim({a, b}, 1);
+ auto a = ConstantR2FromArray2D<float>(&builder, lhs);
+ auto b = ConstantR2FromArray2D<float>(&builder, rhs);
+ ConcatInDim(&builder, {a, b}, 1);
Array2D<float> expected(64, 66);
for (int i0 = 0; i0 < 64; ++i0) {
@@ -410,9 +410,9 @@ XLA_TEST_F(ConcatTest, CannotConcatOpaques) {
XlaBuilder builder(TestName());
auto opaque_shape = ShapeUtil::MakeOpaqueShape();
auto r1f32 = xla::ShapeUtil::MakeShape(xla::F32, {1});
- auto x = builder.Parameter(0, r1f32, "x");
- auto y = builder.Parameter(1, opaque_shape, "y");
- builder.ConcatInDim({x, y}, 0);
+ auto x = Parameter(&builder, 0, r1f32, "x");
+ auto y = Parameter(&builder, 1, opaque_shape, "y");
+ ConcatInDim(&builder, {x, y}, 0);
StatusOr<XlaComputation> computation_status = builder.Build();
ASSERT_FALSE(computation_status.ok());
EXPECT_THAT(
@@ -425,9 +425,9 @@ XLA_TEST_F(ConcatTest, CannotConcatTokens) {
XlaBuilder builder(TestName());
auto token_shape = ShapeUtil::MakeTokenShape();
auto r1f32 = xla::ShapeUtil::MakeShape(xla::F32, {1});
- auto x = builder.Parameter(0, r1f32, "x");
- auto y = builder.Parameter(1, token_shape, "y");
- builder.ConcatInDim({x, y}, 0);
+ auto x = Parameter(&builder, 0, r1f32, "x");
+ auto y = Parameter(&builder, 1, token_shape, "y");
+ ConcatInDim(&builder, {x, y}, 0);
StatusOr<XlaComputation> computation_status = builder.Build();
ASSERT_FALSE(computation_status.ok());
EXPECT_THAT(
@@ -437,10 +437,10 @@ XLA_TEST_F(ConcatTest, CannotConcatTokens) {
XLA_TEST_F(ConcatTest, ConcatSeveralBoxedPredicates) {
XlaBuilder builder(TestName());
- auto p0 = builder.ConstantR1<bool>({true});
- auto p1 = builder.ConstantR1<bool>({false});
- auto p2 = builder.ConstantR1<bool>({true});
- builder.ConcatInDim({p0, p1, p2}, 0);
+ auto p0 = ConstantR1<bool>(&builder, {true});
+ auto p1 = ConstantR1<bool>(&builder, {false});
+ auto p2 = ConstantR1<bool>(&builder, {true});
+ ConcatInDim(&builder, {p0, p1, p2}, 0);
bool expected[] = {true, false, true};
ComputeAndCompareR1<bool>(&builder, expected, {});
@@ -448,11 +448,11 @@ XLA_TEST_F(ConcatTest, ConcatSeveralBoxedPredicates) {
XLA_TEST_F(ConcatTest, ConcatSeveralR1S32s) {
XlaBuilder builder(TestName());
- auto a0 = builder.ConstantR1<int32>({1});
- auto a1 = builder.ConstantR1<int32>({2, 3});
- auto a2 = builder.ConstantR1<int32>({4, 5, 6});
- auto a3 = builder.ConstantR1<int32>({7, 8, 9, 10});
- builder.ConcatInDim({a0, a1, a2, a3}, 0);
+ auto a0 = ConstantR1<int32>(&builder, {1});
+ auto a1 = ConstantR1<int32>(&builder, {2, 3});
+ auto a2 = ConstantR1<int32>(&builder, {4, 5, 6});
+ auto a3 = ConstantR1<int32>(&builder, {7, 8, 9, 10});
+ ConcatInDim(&builder, {a0, a1, a2, a3}, 0);
std::vector<int32> expected(10);
std::iota(expected.begin(), expected.end(), 1);
@@ -487,7 +487,7 @@ XLA_TEST_F(ConcatTest, ConcatR3WeirdDims) {
auto p1 = CreateR3Parameter<float>(arr1, /*parameter_number=*/1, "p1",
&builder, &h1);
- builder.ConcatInDim({h0, h1}, 2);
+ ConcatInDim(&builder, {h0, h1}, 2);
ComputeAndCompareR3<float>(&builder, expected, {p0.get(), p1.get()});
}
@@ -514,9 +514,9 @@ TEST_P(ConcatR2BinaryTest, DoIt) {
rhs.FillUnique(1000);
XlaBuilder builder(TestName());
- auto a0 = builder.ConstantR2FromArray2D<int32>(lhs);
- auto a1 = builder.ConstantR2FromArray2D<int32>(rhs);
- builder.ConcatInDim({a0, a1}, spec.concat_dimension);
+ auto a0 = ConstantR2FromArray2D<int32>(&builder, lhs);
+ auto a1 = ConstantR2FromArray2D<int32>(&builder, rhs);
+ ConcatInDim(&builder, {a0, a1}, spec.concat_dimension);
std::unique_ptr<Array2D<int32>> expected =
ReferenceUtil::Concat2D(lhs, rhs, spec.concat_dimension);
@@ -534,19 +534,19 @@ TEST_P(ConcatR2BinaryTest, DoIt) {
// concat
XLA_TEST_F(ConcatTest, ConcatOperandsOfSameOperand) {
auto f32_scalar = ShapeUtil::MakeShape(xla::F32, {});
- auto x_literal = Literal::CreateR0<float>(2.f);
- auto y_literal = Literal::CreateR0<float>(3.f);
+ auto x_literal = LiteralUtil::CreateR0<float>(2.f);
+ auto y_literal = LiteralUtil::CreateR0<float>(3.f);
auto x_data = client_->TransferToServer(*x_literal).ConsumeValueOrDie();
auto y_data = client_->TransferToServer(*y_literal).ConsumeValueOrDie();
XlaBuilder builder(TestName());
- auto x = builder.Parameter(0, f32_scalar, "x");
- auto y = builder.Parameter(1, f32_scalar, "y");
- auto mul = builder.Mul(x, y);
- auto add1 = builder.Add(mul, builder.ConstantR1<float>({1.f, 2.f}));
- auto add2 = builder.Add(mul, builder.ConstantR1<float>({3.f, 4.f}));
- auto add3 = builder.Add(mul, builder.ConstantR1<float>({5.f, 6.f}));
- builder.ConcatInDim({add1, add2, add3}, /*dimension=*/0);
+ auto x = Parameter(&builder, 0, f32_scalar, "x");
+ auto y = Parameter(&builder, 1, f32_scalar, "y");
+ auto mul = Mul(x, y);
+ auto add1 = Add(mul, ConstantR1<float>(&builder, {1.f, 2.f}));
+ auto add2 = Add(mul, ConstantR1<float>(&builder, {3.f, 4.f}));
+ auto add3 = Add(mul, ConstantR1<float>(&builder, {5.f, 6.f}));
+ ConcatInDim(&builder, {add1, add2, add3}, /*dimension=*/0);
ComputeAndCompareR1<float>(&builder, {7., 8., 9., 10., 11., 12.},
{x_data.get(), y_data.get()}, ErrorSpec(1e-4));
@@ -556,21 +556,21 @@ XLA_TEST_F(ConcatTest, ConcatOperandsOfSameOperand) {
// produces the correct result in rank 1.
XLA_TEST_F(ConcatTest, ConcatBroadcastArgument) {
auto f32_scalar = ShapeUtil::MakeShape(xla::F32, {});
- auto x_literal = Literal::CreateR1<float>({2.0f, 3.0f, 5.0f, 6.0f});
- auto y_literal = Literal::CreateR0<float>(1.5f);
- auto z_literal = Literal::CreateR0<float>(5.5f);
+ auto x_literal = LiteralUtil::CreateR1<float>({2.0f, 3.0f, 5.0f, 6.0f});
+ auto y_literal = LiteralUtil::CreateR0<float>(1.5f);
+ auto z_literal = LiteralUtil::CreateR0<float>(5.5f);
auto x_data = client_->TransferToServer(*x_literal).ConsumeValueOrDie();
auto y_data = client_->TransferToServer(*y_literal).ConsumeValueOrDie();
auto z_data = client_->TransferToServer(*z_literal).ConsumeValueOrDie();
XlaBuilder builder(TestName());
- auto x = builder.Parameter(0, x_literal->shape(), "x");
- auto y = builder.Parameter(1, f32_scalar, "y");
- auto z = builder.Parameter(2, f32_scalar, "z");
- auto bcast = builder.Broadcast(y, {5});
- auto bcast2 = builder.Broadcast(z, {3});
- auto concat = builder.ConcatInDim({bcast, x}, /*dimension=*/0);
- builder.ConcatInDim({concat, bcast2}, /*dimension=*/0);
+ auto x = Parameter(&builder, 0, x_literal->shape(), "x");
+ auto y = Parameter(&builder, 1, f32_scalar, "y");
+ auto z = Parameter(&builder, 2, f32_scalar, "z");
+ auto bcast = Broadcast(y, {5});
+ auto bcast2 = Broadcast(z, {3});
+ auto concat = ConcatInDim(&builder, {bcast, x}, /*dimension=*/0);
+ ConcatInDim(&builder, {concat, bcast2}, /*dimension=*/0);
ComputeAndCompareR1<float>(
&builder,
@@ -584,21 +584,21 @@ XLA_TEST_F(ConcatTest, ConcatBroadcastArgument) {
XLA_TEST_F(ConcatTest, ConcatBroadcastArgumentR3) {
auto f32_scalar = ShapeUtil::MakeShape(xla::F32, {});
Array3D<float> x3d(3, 5, 7, 3.14f);
- auto x_literal = Literal::CreateR3FromArray3D<float>(x3d);
- auto y_literal = Literal::CreateR0<float>(1.5f);
- auto z_literal = Literal::CreateR0<float>(5.5f);
+ auto x_literal = LiteralUtil::CreateR3FromArray3D<float>(x3d);
+ auto y_literal = LiteralUtil::CreateR0<float>(1.5f);
+ auto z_literal = LiteralUtil::CreateR0<float>(5.5f);
auto x_data = client_->TransferToServer(*x_literal).ConsumeValueOrDie();
auto y_data = client_->TransferToServer(*y_literal).ConsumeValueOrDie();
auto z_data = client_->TransferToServer(*z_literal).ConsumeValueOrDie();
XlaBuilder builder(TestName());
- auto x = builder.Parameter(0, x_literal->shape(), "x");
- auto y = builder.Parameter(1, f32_scalar, "y");
- auto z = builder.Parameter(2, f32_scalar, "y");
- auto y_bcast = builder.Broadcast(y, {1, 5, 7});
- auto z_bcast = builder.Broadcast(z, {4, 1, 7});
- auto concat = builder.ConcatInDim({y_bcast, x}, /*dimension=*/0);
- builder.ConcatInDim({concat, z_bcast}, /*dimension=*/1);
+ auto x = Parameter(&builder, 0, x_literal->shape(), "x");
+ auto y = Parameter(&builder, 1, f32_scalar, "y");
+ auto z = Parameter(&builder, 2, f32_scalar, "y");
+ auto y_bcast = Broadcast(y, {1, 5, 7});
+ auto z_bcast = Broadcast(z, {4, 1, 7});
+ auto concat = ConcatInDim(&builder, {y_bcast, x}, /*dimension=*/0);
+ ConcatInDim(&builder, {concat, z_bcast}, /*dimension=*/1);
Array3D<float> y_bcast3d(1, 5, 7, 1.5f);
Array3D<float> z_bcast3d(4, 1, 7, 5.5f);
auto concat0 = ReferenceUtil::Concat3D(y_bcast3d, x3d, 0);
diff --git a/tensorflow/compiler/xla/tests/conditional_test.cc b/tensorflow/compiler/xla/tests/conditional_test.cc
index 7ff6706935..369663de15 100644
--- a/tensorflow/compiler/xla/tests/conditional_test.cc
+++ b/tensorflow/compiler/xla/tests/conditional_test.cc
@@ -26,8 +26,8 @@ class ConditionalOpTest : public ClientLibraryTestBase {
protected:
XlaComputation CreateR0ConstantComputation(float value) {
XlaBuilder builder("Constant");
- builder.Parameter(0, empty_tuple_, "tuple");
- builder.ConstantR0<float>(value);
+ Parameter(&builder, 0, empty_tuple_, "tuple");
+ ConstantR0<float>(&builder, value);
auto build_status = builder.Build();
EXPECT_IS_OK(build_status.status());
return build_status.ConsumeValueOrDie();
@@ -35,7 +35,7 @@ class ConditionalOpTest : public ClientLibraryTestBase {
XlaComputation CreateR0IdentityComputation() {
XlaBuilder builder("Identity");
- builder.Parameter(0, r0f32_, "x");
+ Parameter(&builder, 0, r0f32_, "x");
auto build_status = builder.Build();
EXPECT_IS_OK(build_status.status());
return build_status.ConsumeValueOrDie();
@@ -43,8 +43,8 @@ class ConditionalOpTest : public ClientLibraryTestBase {
XlaComputation CreateCeilComputation(const Shape& shape) {
XlaBuilder builder("Ceil");
- auto param = builder.Parameter(0, shape, "param");
- builder.Ceil(param);
+ auto param = Parameter(&builder, 0, shape, "param");
+ Ceil(param);
auto build_status = builder.Build();
EXPECT_IS_OK(build_status.status());
return build_status.ConsumeValueOrDie();
@@ -60,8 +60,8 @@ class ConditionalOpTest : public ClientLibraryTestBase {
XlaComputation CreateFloorComputation(const Shape& shape) {
XlaBuilder builder("Floor");
- auto param = builder.Parameter(0, shape, "param");
- builder.Floor(param);
+ auto param = Parameter(&builder, 0, shape, "param");
+ Floor(param);
auto build_status = builder.Build();
EXPECT_IS_OK(build_status.status());
return build_status.ConsumeValueOrDie();
@@ -78,12 +78,12 @@ class ConditionalOpTest : public ClientLibraryTestBase {
XlaComputation CreateTupleCeilComputation(const string& computation_name,
const Shape& tuple_shape) {
XlaBuilder builder(computation_name);
- auto tuple = builder.Parameter(0, tuple_shape, "tuple");
- auto x = builder.GetTupleElement(tuple, 0);
- auto y = builder.GetTupleElement(tuple, 1);
- auto x_ceil = builder.Ceil(x);
- auto y_ceil = builder.Ceil(y);
- builder.Tuple({x_ceil, y_ceil});
+ auto tuple = Parameter(&builder, 0, tuple_shape, "tuple");
+ auto x = GetTupleElement(tuple, 0);
+ auto y = GetTupleElement(tuple, 1);
+ auto x_ceil = Ceil(x);
+ auto y_ceil = Ceil(y);
+ Tuple(&builder, {x_ceil, y_ceil});
auto build_status = builder.Build();
EXPECT_IS_OK(build_status.status());
return build_status.ConsumeValueOrDie();
@@ -100,12 +100,12 @@ class ConditionalOpTest : public ClientLibraryTestBase {
XlaComputation CreateTupleFloorComputation(const string& computation_name,
const Shape& tuple_shape) {
XlaBuilder builder(computation_name);
- auto tuple = builder.Parameter(0, tuple_shape, "tuple");
- auto x = builder.GetTupleElement(tuple, 0);
- auto y = builder.GetTupleElement(tuple, 1);
- auto x_floor = builder.Floor(x);
- auto y_floor = builder.Floor(y);
- builder.Tuple({x_floor, y_floor});
+ auto tuple = Parameter(&builder, 0, tuple_shape, "tuple");
+ auto x = GetTupleElement(tuple, 0);
+ auto y = GetTupleElement(tuple, 1);
+ auto x_floor = Floor(x);
+ auto y_floor = Floor(y);
+ Tuple(&builder, {x_floor, y_floor});
auto build_status = builder.Build();
EXPECT_IS_OK(build_status.status());
return build_status.ConsumeValueOrDie();
@@ -122,10 +122,10 @@ class ConditionalOpTest : public ClientLibraryTestBase {
XlaComputation CreateTupleAddComputation(const string& computation_name,
const Shape& tuple_shape) {
XlaBuilder builder(computation_name);
- auto tuple = builder.Parameter(0, tuple_shape, "tuple");
- auto x = builder.GetTupleElement(tuple, 0);
- auto y = builder.GetTupleElement(tuple, 1);
- builder.Add(x, y);
+ auto tuple = Parameter(&builder, 0, tuple_shape, "tuple");
+ auto x = GetTupleElement(tuple, 0);
+ auto y = GetTupleElement(tuple, 1);
+ Add(x, y);
auto build_status = builder.Build();
EXPECT_IS_OK(build_status.status());
return build_status.ConsumeValueOrDie();
@@ -142,10 +142,10 @@ class ConditionalOpTest : public ClientLibraryTestBase {
XlaComputation CreateTupleSubComputation(const string& computation_name,
const Shape& tuple_shape) {
XlaBuilder builder(computation_name);
- auto tuple = builder.Parameter(0, tuple_shape, "tuple");
- auto x = builder.GetTupleElement(tuple, 0);
- auto y = builder.GetTupleElement(tuple, 1);
- builder.Sub(x, y);
+ auto tuple = Parameter(&builder, 0, tuple_shape, "tuple");
+ auto x = GetTupleElement(tuple, 0);
+ auto y = GetTupleElement(tuple, 1);
+ Sub(x, y);
auto build_status = builder.Build();
EXPECT_IS_OK(build_status.status());
return build_status.ConsumeValueOrDie();
@@ -172,198 +172,215 @@ class ConditionalOpTest : public ClientLibraryTestBase {
// Test true and false computations that do not take any parameters.
XLA_TEST_F(ConditionalOpTest, Parameters0) {
XlaBuilder builder(TestName());
- auto pred = builder.ConstantR0<bool>(true);
- auto operands = builder.Tuple({});
+ XlaOp pred;
+ auto pred_arg = CreateR0Parameter<bool>(true, 0, "pred", &builder, &pred);
+ auto operands = Tuple(&builder, {});
auto true_computation = CreateR0ConstantComputation(56.0f);
auto false_computation = CreateR0ConstantComputation(12.0f);
- builder.Conditional(pred, operands, true_computation, operands,
- false_computation);
+ Conditional(pred, operands, true_computation, operands, false_computation);
- ComputeAndCompareR0<float>(&builder, 56.0f, {}, error_spec_);
+ ComputeAndCompareR0<float>(&builder, 56.0f, {pred_arg.get()}, error_spec_);
}
// Test true and false computations that take in 1 parameter.
XLA_TEST_F(ConditionalOpTest, Parameters1) {
XlaBuilder builder(TestName());
- auto pred = builder.ConstantR0<bool>(false);
- auto operand1 = builder.ConstantR0<float>(56.0f);
- auto operand2 = builder.ConstantR0<float>(12.0f);
+ XlaOp pred;
+ auto pred_arg = CreateR0Parameter<bool>(false, 0, "pred", &builder, &pred);
+ auto operand1 = ConstantR0<float>(&builder, 56.0f);
+ auto operand2 = ConstantR0<float>(&builder, 12.0f);
auto identity = CreateR0IdentityComputation();
- builder.Conditional(pred, operand1, identity, operand2, identity);
+ Conditional(pred, operand1, identity, operand2, identity);
- ComputeAndCompareR0<float>(&builder, 12.0f, {}, error_spec_);
+ ComputeAndCompareR0<float>(&builder, 12.0f, {pred_arg.get()}, error_spec_);
}
// Test conditional with two different computations in the true and false cases
// that take in different arguments.
XLA_TEST_F(ConditionalOpTest, DiffComputationsDiffArgs) {
XlaBuilder builder(TestName());
- auto pred = builder.ConstantR0<bool>(false);
- auto operand1 = builder.ConstantR0<float>(56.4f);
- auto operand2 = builder.ConstantR0<float>(12.6f);
- builder.Conditional(pred, operand1, CreateR0CeilComputation(), operand2,
- CreateR0FloorComputation());
+ XlaOp pred;
+ auto pred_arg = CreateR0Parameter<bool>(false, 0, "pred", &builder, &pred);
+ auto operand1 = ConstantR0<float>(&builder, 56.4f);
+ auto operand2 = ConstantR0<float>(&builder, 12.6f);
+ Conditional(pred, operand1, CreateR0CeilComputation(), operand2,
+ CreateR0FloorComputation());
- ComputeAndCompareR0<float>(&builder, 12.0f, {}, error_spec_);
+ ComputeAndCompareR0<float>(&builder, 12.0f, {pred_arg.get()}, error_spec_);
}
// Test conditional with two different computations in the true and false cases
// that take in the same arguments.
XLA_TEST_F(ConditionalOpTest, DiffComputationsSameArg) {
XlaBuilder builder(TestName());
- auto pred = builder.ConstantR0<bool>(false);
- auto operand = builder.ConstantR0<float>(12.6f);
- builder.Conditional(pred, operand, CreateR0CeilComputation(), operand,
- CreateR0FloorComputation());
+ XlaOp pred;
+ auto pred_arg = CreateR0Parameter<bool>(false, 0, "pred", &builder, &pred);
+ auto operand = ConstantR0<float>(&builder, 12.6f);
+ Conditional(pred, operand, CreateR0CeilComputation(), operand,
+ CreateR0FloorComputation());
- ComputeAndCompareR0<float>(&builder, 12.0f, {}, error_spec_);
+ ComputeAndCompareR0<float>(&builder, 12.0f, {pred_arg.get()}, error_spec_);
}
// Test conditional with the same computation in the true and false cases but
// take in different arguments.
XLA_TEST_F(ConditionalOpTest, SameComputationDiffArgs) {
XlaBuilder builder(TestName());
- auto pred = builder.ConstantR0<bool>(false);
- auto operand1 = builder.ConstantR0<float>(56.4f);
- auto operand2 = builder.ConstantR0<float>(12.6f);
+ XlaOp pred;
+ auto pred_arg = CreateR0Parameter<bool>(false, 0, "pred", &builder, &pred);
+ auto operand1 = ConstantR0<float>(&builder, 56.4f);
+ auto operand2 = ConstantR0<float>(&builder, 12.6f);
auto floor = CreateR0FloorComputation();
- builder.Conditional(pred, operand1, floor, operand2, floor);
+ Conditional(pred, operand1, floor, operand2, floor);
- ComputeAndCompareR0<float>(&builder, 12.0f, {}, error_spec_);
+ ComputeAndCompareR0<float>(&builder, 12.0f, {pred_arg.get()}, error_spec_);
}
// Test conditional with the same computation in the true and false cases that
// take in the same arguments.
XLA_TEST_F(ConditionalOpTest, SameComputationSameArg) {
XlaBuilder builder(TestName());
- auto pred = builder.ConstantR0<bool>(false);
- auto operand = builder.ConstantR0<float>(12.6f);
+ XlaOp pred;
+ auto pred_arg = CreateR0Parameter<bool>(false, 0, "pred", &builder, &pred);
+ auto operand = ConstantR0<float>(&builder, 12.6f);
auto floor = CreateR0FloorComputation();
- builder.Conditional(pred, operand, floor, operand, floor);
+ Conditional(pred, operand, floor, operand, floor);
- ComputeAndCompareR0<float>(&builder, 12.0f, {}, error_spec_);
+ ComputeAndCompareR0<float>(&builder, 12.0f, {pred_arg.get()}, error_spec_);
}
// Test conditional with different instances of the same computation in the true
// and false cases.
XLA_TEST_F(ConditionalOpTest, SameComputationDiffInstances) {
XlaBuilder builder(TestName());
- auto pred = builder.ConstantR0<bool>(false);
- auto operand1 = builder.ConstantR0<float>(56.4f);
- auto operand2 = builder.ConstantR0<float>(12.6f);
- builder.Conditional(pred, operand1, CreateR0FloorComputation(), operand2,
- CreateR0FloorComputation());
+ XlaOp pred;
+ auto pred_arg = CreateR0Parameter<bool>(false, 0, "pred", &builder, &pred);
+ auto operand1 = ConstantR0<float>(&builder, 56.4f);
+ auto operand2 = ConstantR0<float>(&builder, 12.6f);
+ Conditional(pred, operand1, CreateR0FloorComputation(), operand2,
+ CreateR0FloorComputation());
- ComputeAndCompareR0<float>(&builder, 12.0f, {}, error_spec_);
+ ComputeAndCompareR0<float>(&builder, 12.0f, {pred_arg.get()}, error_spec_);
}
// Test the case when a call invokes a computation that contains a conditional.
XLA_TEST_F(ConditionalOpTest, ConditionalWithCall) {
Shape r0bool = ShapeUtil::MakeShape(PRED, {});
XlaBuilder inner_builder(TestName() + ".inner_conditional");
- auto pred_cond = inner_builder.Parameter(0, r0bool, "param0");
- auto true_operand = inner_builder.Parameter(1, r0f32_, "param1");
- auto false_operand = inner_builder.Parameter(2, r0f32_, "param2");
- inner_builder.Conditional(pred_cond, true_operand, CreateR0CeilComputation(),
- false_operand, CreateR0FloorComputation());
+ auto pred_cond = Parameter(&inner_builder, 0, r0bool, "param0");
+ auto true_operand = Parameter(&inner_builder, 1, r0f32_, "param1");
+ auto false_operand = Parameter(&inner_builder, 2, r0f32_, "param2");
+ Conditional(pred_cond, true_operand, CreateR0CeilComputation(), false_operand,
+ CreateR0FloorComputation());
auto inner_builder_result = inner_builder.Build();
XlaBuilder builder(TestName());
- auto pred = builder.ConstantR0<bool>(false);
- auto operand1 = builder.ConstantR0<float>(56.4f);
- auto operand2 = builder.ConstantR0<float>(12.6f);
- builder.Call(inner_builder_result.ConsumeValueOrDie(),
- {pred, operand1, operand2});
+ XlaOp pred;
+ auto pred_arg = CreateR0Parameter<bool>(false, 0, "pred", &builder, &pred);
+ auto operand1 = ConstantR0<float>(&builder, 56.4f);
+ auto operand2 = ConstantR0<float>(&builder, 12.6f);
+ Call(&builder, inner_builder_result.ConsumeValueOrDie(),
+ {pred, operand1, operand2});
- ComputeAndCompareR0<float>(&builder, 12.0f, {}, error_spec_);
+ ComputeAndCompareR0<float>(&builder, 12.0f, {pred_arg.get()}, error_spec_);
}
// Test true and false computations that take in 2 parameters and predicate is
// true.
XLA_TEST_F(ConditionalOpTest, Parameters2TrueBranch) {
XlaBuilder builder(TestName());
- auto pred = builder.ConstantR0<bool>(true);
- auto operand1 = builder.ConstantR0<float>(56.0f);
- auto operand2 = builder.ConstantR0<float>(12.0f);
- auto operands = builder.Tuple({operand1, operand2});
- builder.Conditional(pred, operands, CreateR0TupleAddComputation(), operands,
- CreateR0TupleSubComputation());
-
- ComputeAndCompareR0<float>(&builder, 68.0f, {}, error_spec_);
+ XlaOp pred;
+ auto pred_arg = CreateR0Parameter<bool>(true, 0, "pred", &builder, &pred);
+ auto operand1 = ConstantR0<float>(&builder, 56.0f);
+ auto operand2 = ConstantR0<float>(&builder, 12.0f);
+ auto operands = Tuple(&builder, {operand1, operand2});
+ Conditional(pred, operands, CreateR0TupleAddComputation(), operands,
+ CreateR0TupleSubComputation());
+
+ ComputeAndCompareR0<float>(&builder, 68.0f, {pred_arg.get()}, error_spec_);
}
// Test true and false computations that take in 2 parameters and predicate is
// false.
XLA_TEST_F(ConditionalOpTest, Parameters2FalseBranch) {
XlaBuilder builder(TestName());
- auto pred = builder.ConstantR0<bool>(false);
- auto operand1 = builder.ConstantR0<float>(56.0f);
- auto operand2 = builder.ConstantR0<float>(12.0f);
- auto operands = builder.Tuple({operand1, operand2});
- builder.Conditional(pred, operands, CreateR0TupleAddComputation(), operands,
- CreateR0TupleSubComputation());
-
- ComputeAndCompareR0<float>(&builder, 44.0f, {}, error_spec_);
+ XlaOp pred;
+ auto pred_arg = CreateR0Parameter<bool>(false, 0, "pred", &builder, &pred);
+ auto operand1 = ConstantR0<float>(&builder, 56.0f);
+ auto operand2 = ConstantR0<float>(&builder, 12.0f);
+ auto operands = Tuple(&builder, {operand1, operand2});
+ Conditional(pred, operands, CreateR0TupleAddComputation(), operands,
+ CreateR0TupleSubComputation());
+
+ ComputeAndCompareR0<float>(&builder, 44.0f, {pred_arg.get()}, error_spec_);
}
// Test true and false computations that take in 2 array parameters and
// predicate is true.
XLA_TEST_F(ConditionalOpTest, Parameters2ArrayTrueBranch) {
XlaBuilder builder(TestName());
- auto pred = builder.ConstantR0<bool>(true);
- auto operand1 = builder.ConstantR1<float>({24.0f, 56.0f});
- auto operand2 = builder.ConstantR1<float>({10.0f, 11.0f});
- auto operands = builder.Tuple({operand1, operand2});
- builder.Conditional(pred, operands, CreateR1TupleAddComputation(), operands,
- CreateR1TupleSubComputation());
-
- ComputeAndCompareR1<float>(&builder, {34.0f, 67.0f}, {}, error_spec_);
+ XlaOp pred;
+ auto pred_arg = CreateR0Parameter<bool>(true, 0, "pred", &builder, &pred);
+ auto operand1 = ConstantR1<float>(&builder, {24.0f, 56.0f});
+ auto operand2 = ConstantR1<float>(&builder, {10.0f, 11.0f});
+ auto operands = Tuple(&builder, {operand1, operand2});
+ Conditional(pred, operands, CreateR1TupleAddComputation(), operands,
+ CreateR1TupleSubComputation());
+
+ ComputeAndCompareR1<float>(&builder, {34.0f, 67.0f}, {pred_arg.get()},
+ error_spec_);
}
// Test true and false computations that take in 2 array parameters and
// predicate is false.
XLA_TEST_F(ConditionalOpTest, Parameters2ArrayFalseBranch) {
XlaBuilder builder(TestName());
- auto pred = builder.ConstantR0<bool>(false);
- auto operand1 = builder.ConstantR1<float>({24.0f, 56.0f});
- auto operand2 = builder.ConstantR1<float>({10.0f, 11.0f});
- auto operands = builder.Tuple({operand1, operand2});
- builder.Conditional(pred, operands, CreateR1TupleAddComputation(), operands,
- CreateR1TupleSubComputation());
-
- ComputeAndCompareR1<float>(&builder, {14.0f, 45.0f}, {}, error_spec_);
+ XlaOp pred;
+ auto pred_arg = CreateR0Parameter<bool>(false, 0, "pred", &builder, &pred);
+ auto operand1 = ConstantR1<float>(&builder, {24.0f, 56.0f});
+ auto operand2 = ConstantR1<float>(&builder, {10.0f, 11.0f});
+ auto operands = Tuple(&builder, {operand1, operand2});
+ Conditional(pred, operands, CreateR1TupleAddComputation(), operands,
+ CreateR1TupleSubComputation());
+
+ ComputeAndCompareR1<float>(&builder, {14.0f, 45.0f}, {pred_arg.get()},
+ error_spec_);
}
// Test true and false computations that return a tuple of scalars.
XLA_TEST_F(ConditionalOpTest, ReturnTupleOfScalars) {
XlaBuilder builder(TestName());
- auto pred = builder.ConstantR0<bool>(false);
- auto operands = builder.Tuple(
- {builder.ConstantR0<float>(12.2f), builder.ConstantR0<float>(25.6f)});
- builder.Conditional(pred, operands, CreateR0TupleCeilComputation(), operands,
- CreateR0TupleFloorComputation());
+ XlaOp pred;
+ auto pred_arg = CreateR0Parameter<bool>(false, 0, "pred", &builder, &pred);
+ auto operands = Tuple(&builder, {ConstantR0<float>(&builder, 12.2f),
+ ConstantR0<float>(&builder, 25.6f)});
+ Conditional(pred, operands, CreateR0TupleCeilComputation(), operands,
+ CreateR0TupleFloorComputation());
ComputeAndCompareTuple(
&builder,
- *Literal::MakeTuple({Literal::CreateR0<float>(12.0f).get(),
- Literal::CreateR0<float>(25.0f).get()}),
- {}, error_spec_);
+ *LiteralUtil::MakeTuple({LiteralUtil::CreateR0<float>(12.0f).get(),
+ LiteralUtil::CreateR0<float>(25.0f).get()}),
+ {pred_arg.get()}, error_spec_);
}
// Test true and false computations that return a tuple of arrays.
XLA_TEST_F(ConditionalOpTest, ReturnTupleOfArrays) {
XlaBuilder builder(TestName());
- auto pred = builder.ConstantR0<bool>(true);
- auto operands = builder.Tuple({builder.ConstantR1<float>({12.2f, 15.8f}),
- builder.ConstantR1<float>({25.6f, 29.2f})});
- builder.Conditional(pred, operands, CreateR1TupleCeilComputation(), operands,
- CreateR1TupleFloorComputation());
+ XlaOp pred;
+ auto pred_arg = CreateR0Parameter<bool>(true, 0, "pred", &builder, &pred);
+ auto operands =
+ Tuple(&builder, {ConstantR1<float>(&builder, {12.2f, 15.8f}),
+ ConstantR1<float>(&builder, {25.6f, 29.2f})});
+ Conditional(pred, operands, CreateR1TupleCeilComputation(), operands,
+ CreateR1TupleFloorComputation());
ComputeAndCompareTuple(
&builder,
- *Literal::MakeTuple({Literal::CreateR1<float>({13.0f, 16.0f}).get(),
- Literal::CreateR1<float>({26.0f, 30.0f}).get()}),
- {}, error_spec_);
+ *LiteralUtil::MakeTuple(
+ {LiteralUtil::CreateR1<float>({13.0f, 16.0f}).get(),
+ LiteralUtil::CreateR1<float>({26.0f, 30.0f}).get()}),
+ {pred_arg.get()}, error_spec_);
}
// Test true and false computations that return a tuple of a predicate, a
@@ -371,85 +388,91 @@ XLA_TEST_F(ConditionalOpTest, ReturnTupleOfArrays) {
XLA_TEST_F(ConditionalOpTest, ReturnTupleofPredicateScalarArray) {
XlaBuilder true_builder(TestName() + ".true");
{
- true_builder.Parameter(0, empty_tuple_, "tuple");
- auto true_pred = true_builder.ConstantR0<bool>(true);
- auto true_scalar = true_builder.ConstantR0<float>(12.2f);
- auto true_array = true_builder.ConstantR1<float>({12.8f, 14.6f});
- true_builder.Tuple({true_pred, true_scalar, true_array});
+ Parameter(&true_builder, 0, empty_tuple_, "tuple");
+ auto true_pred = ConstantR0<bool>(&true_builder, true);
+ auto true_scalar = ConstantR0<float>(&true_builder, 12.2f);
+ auto true_array = ConstantR1<float>(&true_builder, {12.8f, 14.6f});
+ Tuple(&true_builder, {true_pred, true_scalar, true_array});
}
auto true_builder_result = true_builder.Build();
EXPECT_IS_OK(true_builder_result.status());
XlaBuilder false_builder(TestName() + ".false");
{
- false_builder.Parameter(0, empty_tuple_, "tuple");
- auto false_pred = false_builder.ConstantR0<bool>(false);
- auto false_scalar = false_builder.ConstantR0<float>(25.6f);
- auto false_array = false_builder.ConstantR1<float>({26.4f, 32.6f});
- false_builder.Tuple({false_pred, false_scalar, false_array});
+ Parameter(&false_builder, 0, empty_tuple_, "tuple");
+ auto false_pred = ConstantR0<bool>(&false_builder, false);
+ auto false_scalar = ConstantR0<float>(&false_builder, 25.6f);
+ auto false_array = ConstantR1<float>(&false_builder, {26.4f, 32.6f});
+ Tuple(&false_builder, {false_pred, false_scalar, false_array});
}
auto false_builder_result = false_builder.Build();
EXPECT_IS_OK(false_builder_result.status());
XlaBuilder builder(TestName());
- auto pred = builder.ConstantR0<bool>(true);
- auto operands = builder.Tuple({});
- builder.Conditional(pred, operands, true_builder_result.ConsumeValueOrDie(),
- operands, false_builder_result.ConsumeValueOrDie());
+ XlaOp pred;
+ auto pred_arg = CreateR0Parameter<bool>(true, 0, "pred", &builder, &pred);
+ auto operands = Tuple(&builder, {});
+ Conditional(pred, operands, true_builder_result.ConsumeValueOrDie(), operands,
+ false_builder_result.ConsumeValueOrDie());
ComputeAndCompareTuple(
&builder,
- *Literal::MakeTuple({Literal::CreateR0<bool>(true).get(),
- Literal::CreateR0<float>(12.2f).get(),
- Literal::CreateR1<float>({12.8f, 14.6f}).get()}),
- {}, error_spec_);
+ *LiteralUtil::MakeTuple(
+ {LiteralUtil::CreateR0<bool>(true).get(),
+ LiteralUtil::CreateR0<float>(12.2f).get(),
+ LiteralUtil::CreateR1<float>({12.8f, 14.6f}).get()}),
+ {pred_arg.get()}, error_spec_);
}
// Test true and false computations that return a nested tuple.
XLA_TEST_F(ConditionalOpTest, ReturnNestedTuple) {
XlaBuilder true_builder(TestName() + ".true");
{
- true_builder.Parameter(0, empty_tuple_, "tuple");
- auto true_constant1 = true_builder.ConstantR0<float>(12.2f);
- auto true_constant2 = true_builder.ConstantR1<float>({12.8f, 14.6f});
- auto true_constant3 = true_builder.ConstantR1<float>({25.4f, 29.8f});
- auto true_constant4 = true_builder.ConstantR0<float>(35.6f);
- true_builder.Tuple({true_builder.Tuple({true_constant1, true_constant2}),
- true_builder.Tuple({true_constant3, true_constant4})});
+ Parameter(&true_builder, 0, empty_tuple_, "tuple");
+ auto true_constant1 = ConstantR0<float>(&true_builder, 12.2f);
+ auto true_constant2 = ConstantR1<float>(&true_builder, {12.8f, 14.6f});
+ auto true_constant3 = ConstantR1<float>(&true_builder, {25.4f, 29.8f});
+ auto true_constant4 = ConstantR0<float>(&true_builder, 35.6f);
+ Tuple(&true_builder,
+ {Tuple(&true_builder, {true_constant1, true_constant2}),
+ Tuple(&true_builder, {true_constant3, true_constant4})});
}
auto true_builder_result = true_builder.Build();
EXPECT_IS_OK(true_builder_result.status());
XlaBuilder false_builder(TestName() + ".false");
{
- false_builder.Parameter(0, empty_tuple_, "tuple");
- auto false_constant1 = false_builder.ConstantR0<float>(46.6f);
- auto false_constant2 = false_builder.ConstantR1<float>({54.4f, 58.4f});
- auto false_constant3 = false_builder.ConstantR1<float>({62.1f, 67.4f});
- auto false_constant4 = false_builder.ConstantR0<float>(9.3f);
- false_builder.Tuple(
- {false_builder.Tuple({false_constant1, false_constant2}),
- false_builder.Tuple({false_constant3, false_constant4})});
+ Parameter(&false_builder, 0, empty_tuple_, "tuple");
+ auto false_constant1 = ConstantR0<float>(&false_builder, 46.6f);
+ auto false_constant2 = ConstantR1<float>(&false_builder, {54.4f, 58.4f});
+ auto false_constant3 = ConstantR1<float>(&false_builder, {62.1f, 67.4f});
+ auto false_constant4 = ConstantR0<float>(&false_builder, 9.3f);
+ Tuple(&false_builder,
+ {Tuple(&false_builder, {false_constant1, false_constant2}),
+ Tuple(&false_builder, {false_constant3, false_constant4})});
}
auto false_builder_result = false_builder.Build();
EXPECT_IS_OK(false_builder_result.status());
XlaBuilder builder(TestName());
- auto pred = builder.ConstantR0<bool>(false);
- auto operands = builder.Tuple({});
- builder.Conditional(pred, operands, true_builder_result.ConsumeValueOrDie(),
- operands, false_builder_result.ConsumeValueOrDie());
+ XlaOp pred;
+ auto pred_arg = CreateR0Parameter<bool>(false, 0, "pred", &builder, &pred);
+ auto operands = Tuple(&builder, {});
+ Conditional(pred, operands, true_builder_result.ConsumeValueOrDie(), operands,
+ false_builder_result.ConsumeValueOrDie());
ComputeAndCompareTuple(
&builder,
- *Literal::MakeTuple(
- {Literal::MakeTuple({Literal::CreateR0<float>(46.6f).get(),
- Literal::CreateR1<float>({54.4f, 58.4f}).get()})
+ *LiteralUtil::MakeTuple(
+ {LiteralUtil::MakeTuple(
+ {LiteralUtil::CreateR0<float>(46.6f).get(),
+ LiteralUtil::CreateR1<float>({54.4f, 58.4f}).get()})
.get(),
- Literal::MakeTuple({Literal::CreateR1<float>({62.1f, 67.4f}).get(),
- Literal::CreateR0<float>(9.3f).get()})
+ LiteralUtil::MakeTuple(
+ {LiteralUtil::CreateR1<float>({62.1f, 67.4f}).get(),
+ LiteralUtil::CreateR0<float>(9.3f).get()})
.get()}),
- {}, error_spec_);
+ {pred_arg.get()}, error_spec_);
}
// Test conditional that takes in scalar operands in the form of external
@@ -464,8 +487,8 @@ XLA_TEST_F(ConditionalOpTest, ScalarOperandsFromExternalParams) {
CreateR0Parameter<float>(56.3f, 1, "operand1", &builder, &operand1);
auto operand2_param =
CreateR0Parameter<float>(12.7f, 2, "operand2", &builder, &operand2);
- builder.Conditional(pred, operand1, CreateR0CeilComputation(), operand2,
- CreateR0FloorComputation());
+ Conditional(pred, operand1, CreateR0CeilComputation(), operand2,
+ CreateR0FloorComputation());
ComputeAndCompareR0<float>(
&builder, 57.0f,
@@ -484,8 +507,8 @@ XLA_TEST_F(ConditionalOpTest, ArrayOperandsFromExternalParams) {
&builder, &operand1);
auto operand2_param = CreateR1Parameter<float>({10.2f, 11.6f}, 2, "operand2",
&builder, &operand2);
- builder.Conditional(pred, operand1, CreateR1CeilComputation(), operand2,
- CreateR1FloorComputation());
+ Conditional(pred, operand1, CreateR1CeilComputation(), operand2,
+ CreateR1FloorComputation());
ComputeAndCompareR1<float>(
&builder, {10.0f, 11.0f},
@@ -499,29 +522,29 @@ XLA_TEST_F(ConditionalOpTest, NestedConditionals) {
{
Shape r0bool = ShapeUtil::MakeShape(PRED, {});
Shape tuple_shape = ShapeUtil::MakeTupleShape({r0bool, r0f32_, r0f32_});
- auto param0 = inner_builder.Parameter(0, tuple_shape, "param0");
- auto pred_cond = inner_builder.GetTupleElement(param0, 0);
- auto true_operand = inner_builder.GetTupleElement(param0, 1);
- auto false_operand = inner_builder.GetTupleElement(param0, 2);
- inner_builder.Conditional(pred_cond, true_operand,
- CreateR0CeilComputation(), false_operand,
- CreateR0FloorComputation());
+ auto param0 = Parameter(&inner_builder, 0, tuple_shape, "param0");
+ auto pred_cond = GetTupleElement(param0, 0);
+ auto true_operand = GetTupleElement(param0, 1);
+ auto false_operand = GetTupleElement(param0, 2);
+ Conditional(pred_cond, true_operand, CreateR0CeilComputation(),
+ false_operand, CreateR0FloorComputation());
}
auto inner_builder_result = inner_builder.Build();
EXPECT_IS_OK(inner_builder_result.status());
XlaBuilder builder(TestName());
- auto pred1 = builder.ConstantR0<bool>(true);
- auto pred2 = builder.ConstantR0<bool>(false);
- auto operand1 = builder.ConstantR0<float>(1.1f);
- auto operand2 = builder.ConstantR0<float>(12.2f);
- auto operand3 = builder.ConstantR0<float>(43.3f);
- auto tuple_operand = builder.Tuple({pred2, operand1, operand2});
- builder.Conditional(pred1, tuple_operand,
- inner_builder_result.ConsumeValueOrDie(), operand3,
- CreateR0IdentityComputation());
-
- ComputeAndCompareR0<float>(&builder, 12.0f, {}, error_spec_);
+ XlaOp pred1, pred2;
+ auto pred1_arg = CreateR0Parameter<bool>(true, 0, "pred1", &builder, &pred1);
+ auto pred2_arg = CreateR0Parameter<bool>(false, 1, "pred2", &builder, &pred2);
+ auto operand1 = ConstantR0<float>(&builder, 1.1f);
+ auto operand2 = ConstantR0<float>(&builder, 12.2f);
+ auto operand3 = ConstantR0<float>(&builder, 43.3f);
+ auto tuple_operand = Tuple(&builder, {pred2, operand1, operand2});
+ Conditional(pred1, tuple_operand, inner_builder_result.ConsumeValueOrDie(),
+ operand3, CreateR0IdentityComputation());
+
+ ComputeAndCompareR0<float>(&builder, 12.0f,
+ {pred1_arg.get(), pred2_arg.get()}, error_spec_);
}
XLA_TEST_F(ConditionalOpTest, ConditionalInNestedComputation) {
@@ -529,36 +552,36 @@ XLA_TEST_F(ConditionalOpTest, ConditionalInNestedComputation) {
{
Shape r0bool = ShapeUtil::MakeShape(PRED, {});
Shape tuple_shape = ShapeUtil::MakeTupleShape({r0bool, r0f32_, r0f32_});
- auto param0 = inner_builder.Parameter(0, tuple_shape, "param0");
- auto pred_cond = inner_builder.GetTupleElement(param0, 0);
- auto true_operand = inner_builder.GetTupleElement(param0, 1);
- auto false_operand = inner_builder.GetTupleElement(param0, 2);
- inner_builder.Conditional(pred_cond, true_operand,
- CreateR0CeilComputation(), false_operand,
- CreateR0FloorComputation());
+ auto param0 = Parameter(&inner_builder, 0, tuple_shape, "param0");
+ auto pred_cond = GetTupleElement(param0, 0);
+ auto true_operand = GetTupleElement(param0, 1);
+ auto false_operand = GetTupleElement(param0, 2);
+ Conditional(pred_cond, true_operand, CreateR0CeilComputation(),
+ false_operand, CreateR0FloorComputation());
}
auto inner_builder_result = inner_builder.Build();
EXPECT_IS_OK(inner_builder_result.status());
XlaBuilder builder(TestName());
- auto pred2 = builder.ConstantR0<bool>(false);
- auto operand1 = builder.ConstantR0<float>(1.1f);
- auto operand2 = builder.ConstantR0<float>(12.2f);
- auto tuple_operand = builder.Tuple({pred2, operand1, operand2});
- builder.Call(inner_builder_result.ConsumeValueOrDie(), {tuple_operand});
+ XlaOp pred;
+ auto pred_arg = CreateR0Parameter<bool>(false, 0, "pred", &builder, &pred);
+ auto operand1 = ConstantR0<float>(&builder, 1.1f);
+ auto operand2 = ConstantR0<float>(&builder, 12.2f);
+ auto tuple_operand = Tuple(&builder, {pred, operand1, operand2});
+ Call(&builder, inner_builder_result.ConsumeValueOrDie(), {tuple_operand});
- ComputeAndCompareR0<float>(&builder, 12.0f, {}, error_spec_);
+ ComputeAndCompareR0<float>(&builder, 12.0f, {pred_arg.get()}, error_spec_);
}
// Test a mismatch in the shape of the true operand and true computation.
XLA_TEST_F(ConditionalOpTest, ShapeMismatch) {
XlaBuilder builder(TestName());
- auto pred = builder.ConstantR0<bool>(true);
- auto operand1 = builder.ConstantR0<float>(56.0f);
- auto operand2 = builder.ConstantR0<float>(12.0f);
- auto operands = builder.Tuple({operand1, operand2});
- builder.Conditional(pred, operands, CreateR1TupleAddComputation(), operands,
- CreateR0TupleSubComputation());
+ auto pred = ConstantR0<bool>(&builder, true);
+ auto operand1 = ConstantR0<float>(&builder, 56.0f);
+ auto operand2 = ConstantR0<float>(&builder, 12.0f);
+ auto operands = Tuple(&builder, {operand1, operand2});
+ Conditional(pred, operands, CreateR1TupleAddComputation(), operands,
+ CreateR0TupleSubComputation());
auto result = builder.Build();
EXPECT_FALSE(result.ok());
@@ -572,46 +595,47 @@ XLA_TEST_F(ConditionalOpTest, SwappedInputsInSequentialConditionals) {
XlaComputation swapper;
{
XlaBuilder builder(TestName() + ".swapper");
- auto param0 = builder.Parameter(0, tuple_shape, "sp0");
- auto x = builder.GetTupleElement(param0, 0);
- auto y = builder.GetTupleElement(param0, 1);
- builder.Tuple({y, x});
+ auto param0 = Parameter(&builder, 0, tuple_shape, "sp0");
+ auto x = GetTupleElement(param0, 0);
+ auto y = GetTupleElement(param0, 1);
+ Tuple(&builder, {y, x});
swapper = builder.Build().ConsumeValueOrDie();
}
XlaComputation forwarder;
{
XlaBuilder builder(TestName() + ".forwarder");
- auto param0 = builder.Parameter(0, tuple_shape, "fp0");
- auto x = builder.GetTupleElement(param0, 0);
- auto y = builder.GetTupleElement(param0, 1);
- builder.Tuple({x, y});
+ auto param0 = Parameter(&builder, 0, tuple_shape, "fp0");
+ auto x = GetTupleElement(param0, 0);
+ auto y = GetTupleElement(param0, 1);
+ Tuple(&builder, {x, y});
forwarder = builder.Build().ConsumeValueOrDie();
}
XlaComputation main;
{
XlaBuilder builder(TestName() + ".main");
- auto param0 = builder.Parameter(0, tuple_shape, "mp0");
- auto x = builder.GetTupleElement(param0, 0);
- auto y = builder.GetTupleElement(param0, 1);
- auto lt_pred = builder.Lt(x, y);
- auto res = builder.Conditional(lt_pred, param0, forwarder, param0, swapper);
- auto ge_pred = builder.Ge(x, y);
- builder.Conditional(ge_pred, res, swapper, res, forwarder);
+ auto param0 = Parameter(&builder, 0, tuple_shape, "mp0");
+ auto x = GetTupleElement(param0, 0);
+ auto y = GetTupleElement(param0, 1);
+ auto lt_pred = Lt(x, y);
+ auto res = Conditional(lt_pred, param0, forwarder, param0, swapper);
+ auto ge_pred = Ge(x, y);
+ Conditional(ge_pred, res, swapper, res, forwarder);
main = builder.Build().ConsumeValueOrDie();
}
auto test_swap = [&](float a, float b) {
XlaBuilder builder(TestName());
- auto x = builder.ConstantR0<float>(a);
- auto y = builder.ConstantR0<float>(b);
- auto tuple_operand = builder.Tuple({x, y});
- builder.Call(main, {tuple_operand});
+ XlaOp x, y;
+ auto x_arg = CreateR0Parameter<float>(a, 0, "x", &builder, &x);
+ auto y_arg = CreateR0Parameter<float>(b, 1, "y", &builder, &y);
+ auto tuple_operand = Tuple(&builder, {x, y});
+ Call(&builder, main, {tuple_operand});
ComputeAndCompareTuple(
&builder,
- *Literal::MakeTuple({Literal::CreateR0<float>(a).get(),
- Literal::CreateR0<float>(b).get()}),
- {}, error_spec_);
+ *LiteralUtil::MakeTuple({LiteralUtil::CreateR0<float>(a).get(),
+ LiteralUtil::CreateR0<float>(b).get()}),
+ {x_arg.get(), y_arg.get()}, error_spec_);
};
test_swap(3.11f, 9.4f);
diff --git a/tensorflow/compiler/xla/tests/constants_test.cc b/tensorflow/compiler/xla/tests/constants_test.cc
index 916ffadbc7..71d72a9828 100644
--- a/tensorflow/compiler/xla/tests/constants_test.cc
+++ b/tensorflow/compiler/xla/tests/constants_test.cc
@@ -26,6 +26,7 @@ limitations under the License.
#include "tensorflow/compiler/xla/literal_util.h"
#include "tensorflow/compiler/xla/tests/client_library_test_base.h"
#include "tensorflow/compiler/xla/tests/literal_test_util.h"
+#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/types.h"
@@ -39,7 +40,7 @@ class ConstantsTest : public ClientLibraryTestBase {
TEST_F(ConstantsTest, ZeroCellF32) {
XlaBuilder builder(TestName());
- builder.ConstantR1<float>({});
+ ConstantR1<float>(&builder, {});
ComputeAndCompareR1<float>(&builder, {}, {}, error_spec_);
}
@@ -48,7 +49,7 @@ TEST_F(ConstantsTest, OneCellF32) {
std::vector<float> constant = {2.0};
XlaBuilder builder(TestName());
- builder.ConstantR1<float>(constant);
+ ConstantR1<float>(&builder, constant);
ComputeAndCompareR1<float>(&builder, constant, {}, error_spec_);
}
@@ -57,7 +58,7 @@ TEST_F(ConstantsTest, OneCellS32) {
std::vector<int32> constant = {2};
XlaBuilder builder(TestName());
- builder.ConstantR1<int32>(constant);
+ ConstantR1<int32>(&builder, constant);
ComputeAndCompareR1<int32>(&builder, constant, {});
}
@@ -66,7 +67,7 @@ TEST_F(ConstantsTest, OneCellU32) {
std::vector<uint32> constant = {2};
XlaBuilder builder(TestName());
- builder.ConstantR1<uint32>(constant);
+ ConstantR1<uint32>(&builder, constant);
ComputeAndCompareR1<uint32>(&builder, constant, {});
}
@@ -75,7 +76,7 @@ TEST_F(ConstantsTest, EightCells) {
std::vector<float> constant = {0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0};
XlaBuilder builder(TestName());
- builder.ConstantR1<float>(constant);
+ ConstantR1<float>(&builder, constant);
ComputeAndCompareR1<float>(&builder, constant, {}, error_spec_);
}
@@ -85,14 +86,14 @@ TEST_F(ConstantsTest, SixteenCells) {
8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0};
XlaBuilder builder(TestName());
- builder.ConstantR1<float>(constant);
+ ConstantR1<float>(&builder, constant);
ComputeAndCompareR1<float>(&builder, constant, {}, error_spec_);
}
TEST_F(ConstantsTest, Empty_0x2) {
XlaBuilder builder(TestName());
- builder.ConstantR2FromArray2D<float>(Array2D<float>(0, 2));
+ ConstantR2FromArray2D<float>(&builder, Array2D<float>(0, 2));
ComputeAndCompareR2<float>(&builder, Array2D<float>(0, 2), {}, error_spec_);
}
@@ -102,15 +103,15 @@ TEST_F(ConstantsTest, Small_2x2) {
MakeLinspaceArray2D(100.0, 200.0, 2, 2);
XlaBuilder builder(TestName());
- builder.ConstantR2FromArray2D<float>(*constant);
+ ConstantR2FromArray2D<float>(&builder, *constant);
ComputeAndCompareR2<float>(&builder, *constant, {}, error_spec_);
}
TEST_F(ConstantsTest, Empty_3x0x2) {
XlaBuilder builder(TestName());
- auto constant = builder.ConstantLiteral(
- *Literal::CreateR3FromArray3D<float>(Array3D<float>(3, 0, 2)));
+ ConstantLiteral(&builder, *LiteralUtil::CreateR3FromArray3D<float>(
+ Array3D<float>(3, 0, 2)));
ComputeAndCompareR3<float>(&builder, Array3D<float>(3, 0, 2), {});
}
@@ -125,8 +126,7 @@ TEST_F(ConstantsTest, Small_2x2x2) {
{{5.f, 6.f}, // y0
{7.f, 8.f}}, // y1
});
- auto constant =
- builder.ConstantLiteral(*Literal::CreateR3FromArray3D<float>(array3d));
+ ConstantLiteral(&builder, *LiteralUtil::CreateR3FromArray3D<float>(array3d));
ComputeAndCompareR3<float>(&builder, array3d, {});
}
@@ -141,17 +141,17 @@ TEST_F(ConstantsTest, Small_3x2x1x1) {
});
input_array.FillWithPZ(pz);
std::unique_ptr<Literal> input_literal =
- Literal::CreateR4FromArray4D(input_array);
+ LiteralUtil::CreateR4FromArray4D(input_array);
{
XlaBuilder builder(TestName());
- builder.ConstantLiteral(*input_literal);
+ ConstantLiteral(&builder, *input_literal);
ComputeAndCompareR4<float>(&builder, input_array, {}, error_spec_);
}
{
XlaBuilder builder(TestName());
- builder.ConstantR4FromArray4D<float>(input_array);
+ ConstantR4FromArray4D<float>(&builder, input_array);
ComputeAndCompareR4<float>(&builder, input_array, {}, error_spec_);
}
}
@@ -159,17 +159,26 @@ TEST_F(ConstantsTest, Small_3x2x1x1) {
// TODO(b/29263943): Support tuple constants.
TEST_F(ConstantsTest, DISABLED_TupleConstant) {
XlaBuilder builder(TestName());
- builder.ConstantLiteral(
- *Literal::MakeTuple({Literal::CreateR2<float>({{1.0}, {2.0}}).get(),
- Literal::CreateR1<float>({2.0, 42}).get()}));
+ ConstantLiteral(&builder,
+ *LiteralUtil::MakeTuple(
+ {LiteralUtil::CreateR2<float>({{1.0}, {2.0}}).get(),
+ LiteralUtil::CreateR1<float>({2.0, 42}).get()}));
std::unique_ptr<Literal> result =
ExecuteAndTransfer(&builder, {}).ConsumeValueOrDie();
- LiteralTestUtil::ExpectR2Near<float>(
- {{1.0}, {2.0}}, LiteralSlice(*result, {0}), error_spec_);
- LiteralTestUtil::ExpectR1Near<float>(
- {2.0, 42.0}, LiteralSlice(*result, {1}), error_spec_);
+ LiteralTestUtil::ExpectR2Near<float>({{1.0}, {2.0}},
+ LiteralSlice(*result, {0}), error_spec_);
+ LiteralTestUtil::ExpectR1Near<float>({2.0, 42.0}, LiteralSlice(*result, {1}),
+ error_spec_);
+}
+
+TEST_F(ConstantsTest, Token) {
+ XlaBuilder builder(TestName());
+ ConstantLiteral(&builder, *LiteralUtil::CreateToken());
+ // TODO(b/80000000): tokens cannot be returned from computations.
+ Tuple(&builder, {});
+ TF_ASSERT_OK(Execute(&builder, {}).status());
}
} // namespace
diff --git a/tensorflow/compiler/xla/tests/convert_test.cc b/tensorflow/compiler/xla/tests/convert_test.cc
index 3a885b4389..dca57fd1c7 100644
--- a/tensorflow/compiler/xla/tests/convert_test.cc
+++ b/tensorflow/compiler/xla/tests/convert_test.cc
@@ -45,8 +45,8 @@ class ConvertTest : public ClientLibraryTestBase {
TEST_F(ConvertTest, ConvertR1S32ToR1S32) {
XlaBuilder builder(TestName());
- auto a = builder.ConstantR1<int32>({42, 64});
- builder.ConvertElementType(a, S32);
+ auto a = ConstantR1<int32>(&builder, {42, 64});
+ ConvertElementType(a, S32);
std::vector<int32> expected = {42, 64};
ComputeAndCompareR1<int32>(&builder, expected, {});
@@ -54,8 +54,8 @@ TEST_F(ConvertTest, ConvertR1S32ToR1S32) {
TEST_F(ConvertTest, ConvertR1F32ToR1F32) {
XlaBuilder builder(TestName());
- auto a = builder.ConstantR1<float>({42.0f, 64.0f});
- builder.ConvertElementType(a, F32);
+ auto a = ConstantR1<float>(&builder, {42.0f, 64.0f});
+ ConvertElementType(a, F32);
std::vector<float> expected = {42.0f, 64.0f};
ComputeAndCompareR1<float>(&builder, expected, {}, ErrorSpec(0.0001));
@@ -63,8 +63,8 @@ TEST_F(ConvertTest, ConvertR1F32ToR1F32) {
TEST_F(ConvertTest, ConvertR1S32ToR1F32) {
XlaBuilder builder(TestName());
- auto a = builder.ConstantR1<int32>({42, 64});
- builder.ConvertElementType(a, F32);
+ auto a = ConstantR1<int32>(&builder, {42, 64});
+ ConvertElementType(a, F32);
std::vector<float> expected = {42.0f, 64.0f};
ComputeAndCompareR1<float>(&builder, expected, {}, ErrorSpec(0.0001));
@@ -72,8 +72,8 @@ TEST_F(ConvertTest, ConvertR1S32ToR1F32) {
TEST_F(ConvertTest, ConvertR1PREDToR1S32) {
XlaBuilder builder(TestName());
- auto a = builder.ConstantR1<bool>({true, false, true});
- builder.ConvertElementType(a, S32);
+ auto a = ConstantR1<bool>(&builder, {true, false, true});
+ ConvertElementType(a, S32);
std::vector<int32> expected = {1, 0, 1};
ComputeAndCompareR1<int32>(&builder, expected, {});
@@ -81,8 +81,8 @@ TEST_F(ConvertTest, ConvertR1PREDToR1S32) {
TEST_F(ConvertTest, ConvertR1PREDToR1F32) {
XlaBuilder builder(TestName());
- auto a = builder.ConstantR1<bool>({true, false, true});
- builder.ConvertElementType(a, F32);
+ auto a = ConstantR1<bool>(&builder, {true, false, true});
+ ConvertElementType(a, F32);
std::vector<float> expected = {1., 0., 1.};
ComputeAndCompareR1<float>(&builder, expected, {});
@@ -90,8 +90,8 @@ TEST_F(ConvertTest, ConvertR1PREDToR1F32) {
XLA_TEST_F(ConvertTest, ConvertR1S0S32ToR1S0F32) {
XlaBuilder builder(TestName());
- auto a = builder.ConstantR1<int32>({});
- builder.ConvertElementType(a, F32);
+ auto a = ConstantR1<int32>(&builder, {});
+ ConvertElementType(a, F32);
std::vector<float> expected = {};
ComputeAndCompareR1<float>(&builder, expected, {}, ErrorSpec(0.0001));
@@ -99,8 +99,8 @@ XLA_TEST_F(ConvertTest, ConvertR1S0S32ToR1S0F32) {
TEST_F(ConvertTest, ConvertR1F32ToR1S32) {
XlaBuilder builder(TestName());
- auto a = builder.ConstantR1<float>({42.6, 64.4});
- builder.ConvertElementType(a, S32);
+ auto a = ConstantR1<float>(&builder, {42.6, 64.4});
+ ConvertElementType(a, S32);
std::vector<int32> expected = {42, 64};
ComputeAndCompareR1<int32>(&builder, expected, {});
@@ -145,12 +145,12 @@ XLA_TEST_F(ConvertTest, ConvertR1S64ToR1F32) {
static_cast<int64>(0x8000008000000000LL),
static_cast<int64>(0x8000010000000000LL),
};
- std::unique_ptr<Literal> arg_literal = Literal::CreateR1<int64>({arg});
- auto arg_param = builder.Parameter(0, arg_literal->shape(), "arg_param");
+ std::unique_ptr<Literal> arg_literal = LiteralUtil::CreateR1<int64>({arg});
+ auto arg_param = Parameter(&builder, 0, arg_literal->shape(), "arg_param");
std::unique_ptr<GlobalData> arg_data =
client_->TransferToServer(*arg_literal).ConsumeValueOrDie();
- builder.ConvertElementType(arg_param, F32);
+ ConvertElementType(arg_param, F32);
std::vector<float> expected(arg.size());
for (int64 i = 0; i < arg.size(); ++i) {
@@ -164,12 +164,12 @@ XLA_TEST_F(ConvertTest, ConvertR1U32ToR1F32) {
std::vector<uint32> arg{0, 1, 0x1000, 0x7fffffff,
0x80000000, 0x80000001, 0x80000002, 0x80000003,
0x80000080, 0x80000081, 0x80000082, 0xFFFFFFFF};
- std::unique_ptr<Literal> arg_literal = Literal::CreateR1<uint32>({arg});
- auto arg_param = builder.Parameter(0, arg_literal->shape(), "arg_param");
+ std::unique_ptr<Literal> arg_literal = LiteralUtil::CreateR1<uint32>({arg});
+ auto arg_param = Parameter(&builder, 0, arg_literal->shape(), "arg_param");
std::unique_ptr<GlobalData> arg_data =
client_->TransferToServer(*arg_literal).ConsumeValueOrDie();
- builder.ConvertElementType(arg_param, F32);
+ ConvertElementType(arg_param, F32);
std::vector<float> expected(arg.size());
for (int64 i = 0; i < arg.size(); ++i) {
@@ -182,12 +182,12 @@ XLA_TEST_F(ConvertTest, ConvertR1F32ToR1U32) {
XlaBuilder builder(TestName());
std::vector<float> arg{0.0f, 1.0f, 16777216.0f,
16777218.0f, 2147483647.0f, 4294967040.0f};
- std::unique_ptr<Literal> arg_literal = Literal::CreateR1<float>({arg});
- auto arg_param = builder.Parameter(0, arg_literal->shape(), "arg_param");
+ std::unique_ptr<Literal> arg_literal = LiteralUtil::CreateR1<float>({arg});
+ auto arg_param = Parameter(&builder, 0, arg_literal->shape(), "arg_param");
std::unique_ptr<GlobalData> arg_data =
client_->TransferToServer(*arg_literal).ConsumeValueOrDie();
- builder.ConvertElementType(arg_param, U32);
+ ConvertElementType(arg_param, U32);
std::vector<uint32> expected(arg.size());
for (int64 i = 0; i < arg.size(); ++i) {
@@ -199,12 +199,12 @@ XLA_TEST_F(ConvertTest, ConvertR1F32ToR1U32) {
XLA_TEST_F(ConvertTest, ConvertR1U32ToR1S64) {
XlaBuilder builder(TestName());
std::vector<uint32> arg{0, 1, 0x1000, 0x7fffffff, 0x80000082, 0xFFFFFFFF};
- std::unique_ptr<Literal> arg_literal = Literal::CreateR1<uint32>({arg});
- auto arg_param = builder.Parameter(0, arg_literal->shape(), "arg_param");
+ std::unique_ptr<Literal> arg_literal = LiteralUtil::CreateR1<uint32>({arg});
+ auto arg_param = Parameter(&builder, 0, arg_literal->shape(), "arg_param");
std::unique_ptr<GlobalData> arg_data =
client_->TransferToServer(*arg_literal).ConsumeValueOrDie();
- builder.ConvertElementType(arg_param, S64);
+ ConvertElementType(arg_param, S64);
std::vector<int64> expected(arg.size());
for (int64 i = 0; i < arg.size(); ++i) {
@@ -216,12 +216,12 @@ XLA_TEST_F(ConvertTest, ConvertR1U32ToR1S64) {
XLA_TEST_F(ConvertTest, ConvertR1S32ToR1S64) {
XlaBuilder builder(TestName());
std::vector<int32> arg{0, 1, 0x1000, -1, -0x1000};
- std::unique_ptr<Literal> arg_literal = Literal::CreateR1<int32>({arg});
- auto arg_param = builder.Parameter(0, arg_literal->shape(), "arg_param");
+ std::unique_ptr<Literal> arg_literal = LiteralUtil::CreateR1<int32>({arg});
+ auto arg_param = Parameter(&builder, 0, arg_literal->shape(), "arg_param");
std::unique_ptr<GlobalData> arg_data =
client_->TransferToServer(*arg_literal).ConsumeValueOrDie();
- builder.ConvertElementType(arg_param, S64);
+ ConvertElementType(arg_param, S64);
std::vector<int64> expected(arg.size());
for (int64 i = 0; i < arg.size(); ++i) {
@@ -253,12 +253,12 @@ XLA_TEST_F(ConvertTest, ConvertR1F32ToR1S64) {
9223370937343148032.f,
-9223371487098961920.f,
-9223370937343148032.f};
- std::unique_ptr<Literal> arg_literal = Literal::CreateR1<float>({arg});
- auto arg_param = builder.Parameter(0, arg_literal->shape(), "arg_param");
+ std::unique_ptr<Literal> arg_literal = LiteralUtil::CreateR1<float>({arg});
+ auto arg_param = Parameter(&builder, 0, arg_literal->shape(), "arg_param");
std::unique_ptr<GlobalData> arg_data =
client_->TransferToServer(*arg_literal).ConsumeValueOrDie();
- builder.ConvertElementType(arg_param, S64);
+ ConvertElementType(arg_param, S64);
std::vector<int64> expected(arg.size());
for (int64 i = 0; i < arg.size(); ++i) {
@@ -269,8 +269,8 @@ XLA_TEST_F(ConvertTest, ConvertR1F32ToR1S64) {
XLA_TEST_F(ConvertTest, ConvertR1U8ToR1F32) {
XlaBuilder builder(TestName());
- auto a = builder.ConstantR1<uint8_t>({32, 64});
- builder.ConvertElementType(a, F32);
+ auto a = ConstantR1<uint8_t>(&builder, {32, 64});
+ ConvertElementType(a, F32);
std::vector<float> expected = {32.0, 64.0};
ComputeAndCompareR1<float>(&builder, expected, {});
@@ -278,8 +278,8 @@ XLA_TEST_F(ConvertTest, ConvertR1U8ToR1F32) {
XLA_TEST_F(ConvertTest, ConvertR1U8ToR1S32) {
XlaBuilder builder(TestName());
- auto a = builder.ConstantR1<uint8_t>({32, 64});
- builder.ConvertElementType(a, S32);
+ auto a = ConstantR1<uint8_t>(&builder, {32, 64});
+ ConvertElementType(a, S32);
std::vector<int32_t> expected = {32, 64};
ComputeAndCompareR1<int32_t>(&builder, expected, {});
@@ -287,8 +287,8 @@ XLA_TEST_F(ConvertTest, ConvertR1U8ToR1S32) {
XLA_TEST_F(ConvertTest, ConvertR1U8ToR1U32) {
XlaBuilder builder(TestName());
- auto a = builder.ConstantR1<uint8_t>({32, 64});
- builder.ConvertElementType(a, U32);
+ auto a = ConstantR1<uint8_t>(&builder, {32, 64});
+ ConvertElementType(a, U32);
std::vector<uint32_t> expected = {32, 64};
ComputeAndCompareR1<uint32_t>(&builder, expected, {});
@@ -296,8 +296,8 @@ XLA_TEST_F(ConvertTest, ConvertR1U8ToR1U32) {
XLA_TEST_F(ConvertTest, ConvertR1F32ToR1F64) {
XlaBuilder builder(TestName());
- auto a = builder.ConstantR1<float>({32.0f, 64.0f});
- builder.ConvertElementType(a, F64);
+ auto a = ConstantR1<float>(&builder, {32.0f, 64.0f});
+ ConvertElementType(a, F64);
std::vector<double> expected = {32.0, 64.0};
ComputeAndCompareR1<double>(&builder, expected, {});
@@ -305,8 +305,8 @@ XLA_TEST_F(ConvertTest, ConvertR1F32ToR1F64) {
XLA_TEST_F(ConvertTest, ConvertR1F64ToR1F32) {
XlaBuilder builder(TestName());
- auto a = builder.ConstantR1<double>({32.0, 64.0});
- builder.ConvertElementType(a, F32);
+ auto a = ConstantR1<double>(&builder, {32.0, 64.0});
+ ConvertElementType(a, F32);
std::vector<float> expected = {32.0f, 64.0f};
ComputeAndCompareR1<float>(&builder, expected, {});
@@ -314,9 +314,9 @@ XLA_TEST_F(ConvertTest, ConvertR1F64ToR1F32) {
TEST_F(ConvertTest, ConvertS32Extremes) {
XlaBuilder builder(TestName());
- auto a = builder.ConstantR1<int32>(
- {std::numeric_limits<int32>::min(), std::numeric_limits<int32>::max()});
- builder.ConvertElementType(a, F32);
+ auto a = ConstantR1<int32>(&builder, {std::numeric_limits<int32>::min(),
+ std::numeric_limits<int32>::max()});
+ ConvertElementType(a, F32);
std::vector<float> expected = {
static_cast<float>(std::numeric_limits<int32>::min()),
@@ -327,10 +327,10 @@ TEST_F(ConvertTest, ConvertS32Extremes) {
TEST_F(ConvertTest, ConvertMapToS32) {
XlaBuilder builder(TestName());
auto b = builder.CreateSubBuilder("convert");
- auto param = b->Parameter(0, ShapeUtil::MakeShape(F32, {}), "in");
- b->ConvertElementType(param, S32);
- auto a = builder.ConstantR1<float>({42.0f, 64.0f});
- builder.Map({a}, b->BuildAndNoteError(), {0});
+ auto param = Parameter(b.get(), 0, ShapeUtil::MakeShape(F32, {}), "in");
+ ConvertElementType(param, S32);
+ auto a = ConstantR1<float>(&builder, {42.0f, 64.0f});
+ Map(&builder, {a}, b->BuildAndNoteError(), {0});
std::vector<int32> expected = {42, 64};
ComputeAndCompareR1<int32>(&builder, expected, {});
@@ -339,10 +339,10 @@ TEST_F(ConvertTest, ConvertMapToS32) {
TEST_F(ConvertTest, ConvertMapToF32) {
XlaBuilder builder(TestName());
auto b = builder.CreateSubBuilder("convert");
- auto param = b->Parameter(0, ShapeUtil::MakeShape(S32, {}), "in");
- b->ConvertElementType(param, F32);
- auto a = builder.ConstantR1<int32>({42, 64});
- builder.Map({a}, b->BuildAndNoteError(), {0});
+ auto param = Parameter(b.get(), 0, ShapeUtil::MakeShape(S32, {}), "in");
+ ConvertElementType(param, F32);
+ auto a = ConstantR1<int32>(&builder, {42, 64});
+ Map(&builder, {a}, b->BuildAndNoteError(), {0});
std::vector<float> expected = {42.0f, 64.0f};
ComputeAndCompareR1<float>(&builder, expected, {}, ErrorSpec(0.0001));
@@ -355,9 +355,9 @@ TEST_F(ConvertTest, ConvertMapToF32) {
// the new convert should have the same element type as the old convert.
TEST_F(ConvertTest, ConvertReshape) {
XlaBuilder builder(TestName());
- auto input = builder.ConstantR1<int32>({42});
- auto reshape = builder.Reshape(input, /*dimensions=*/{0}, /*new_sizes=*/{});
- builder.ConvertElementType(reshape, F32);
+ auto input = ConstantR1<int32>(&builder, {42});
+ auto reshape = Reshape(input, /*dimensions=*/{0}, /*new_sizes=*/{});
+ ConvertElementType(reshape, F32);
ComputeAndCompareR0<float>(&builder, 42.0f, {}, ErrorSpec(0.0001));
}
@@ -391,13 +391,13 @@ XLA_TEST_F(ConvertTest, ConvertR1F16ToR1F32) {
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<GlobalData> dot_lhs_handle,
- client_->TransferToServer(*Literal::CreateR1<half>(input)));
+ client_->TransferToServer(*LiteralUtil::CreateR1<half>(input)));
XlaBuilder builder(TestName());
- builder.ConvertElementType(
- builder.Parameter(
- 0, ShapeUtil::MakeShape(F16, {static_cast<int64>(input.size())}),
- "param"),
+ ConvertElementType(
+ Parameter(&builder, 0,
+ ShapeUtil::MakeShape(F16, {static_cast<int64>(input.size())}),
+ "param"),
F32);
ComputeAndCompareR1<float>(&builder, expected_output, {dot_lhs_handle.get()});
@@ -411,13 +411,13 @@ XLA_TEST_F(ConvertTest, ConvertR1F32ToR1F16) {
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<GlobalData> dot_lhs_handle,
- client_->TransferToServer(*Literal::CreateR1<float>(input)));
+ client_->TransferToServer(*LiteralUtil::CreateR1<float>(input)));
XlaBuilder builder(TestName());
- builder.ConvertElementType(
- builder.Parameter(
- 0, ShapeUtil::MakeShape(F32, {static_cast<int64>(input.size())}),
- "param"),
+ ConvertElementType(
+ Parameter(&builder, 0,
+ ShapeUtil::MakeShape(F32, {static_cast<int64>(input.size())}),
+ "param"),
F16);
ComputeAndCompareR1<half>(&builder, expected_output, {dot_lhs_handle.get()});
@@ -426,28 +426,28 @@ XLA_TEST_F(ConvertTest, ConvertR1F32ToR1F16) {
XLA_TEST_F(ConvertTest, ConvertC64ToC64) {
XlaBuilder builder(TestName());
std::vector<complex64> x = {{42.0f, 64.0f}};
- builder.ConvertElementType(builder.ConstantR1<complex64>(x), C64);
+ ConvertElementType(ConstantR1<complex64>(&builder, x), C64);
ComputeAndCompareR1<complex64>(&builder, x, {}, ErrorSpec(0.0001));
}
XLA_TEST_F(ConvertTest, ConvertS64S64) {
XlaBuilder builder(TestName());
std::vector<int64> x = {{-42, 64}};
- builder.ConvertElementType(builder.ConstantR1<int64>(x), S64);
+ ConvertElementType(ConstantR1<int64>(&builder, x), S64);
ComputeAndCompareR1<int64>(&builder, x, {});
}
XLA_TEST_F(ConvertTest, ConvertU64U64) {
XlaBuilder builder(TestName());
std::vector<uint64> x = {{42, 64}};
- builder.ConvertElementType(builder.ConstantR1<uint64>(x), U64);
+ ConvertElementType(ConstantR1<uint64>(&builder, x), U64);
ComputeAndCompareR1<uint64>(&builder, x, {});
}
XLA_TEST_F(ConvertTest, ConvertU64S64) {
XlaBuilder builder(TestName());
std::vector<uint64> unsigned_x = {{42, UINT64_MAX}};
- builder.ConvertElementType(builder.ConstantR1<uint64>(unsigned_x), S64);
+ ConvertElementType(ConstantR1<uint64>(&builder, unsigned_x), S64);
std::vector<int64> signed_x = {{42, -1}};
ComputeAndCompareR1<int64>(&builder, signed_x, {});
}
@@ -455,7 +455,7 @@ XLA_TEST_F(ConvertTest, ConvertU64S64) {
XLA_TEST_F(ConvertTest, ConvertS64U64) {
XlaBuilder builder(TestName());
std::vector<int64> signed_x = {{42, -1, INT64_MIN}};
- builder.ConvertElementType(builder.ConstantR1<int64>(signed_x), U64);
+ ConvertElementType(ConstantR1<int64>(&builder, signed_x), U64);
std::vector<uint64> unsigned_x = {
{42, UINT64_MAX, tensorflow::MathUtil::IPow<uint64>(2, 63)}};
ComputeAndCompareR1<uint64>(&builder, unsigned_x, {});
@@ -475,10 +475,9 @@ XLA_TEST_F(ConvertTest, ConvertBF16F32) {
}
// Exhaustively test all bf16 to f32 conversions.
- xla::XlaOp all_bfloats_bf16 = builder.ConstantR1<bfloat16>(all_bfloats);
- xla::XlaOp all_bfloats_f32 =
- builder.ConvertElementType(all_bfloats_bf16, F32);
- xla::XlaOp all_bfloats_u32 = builder.BitcastConvertType(all_bfloats_f32, U32);
+ xla::XlaOp all_bfloats_bf16 = ConstantR1<bfloat16>(&builder, all_bfloats);
+ xla::XlaOp all_bfloats_f32 = ConvertElementType(all_bfloats_bf16, F32);
+ BitcastConvertType(all_bfloats_f32, U32);
ComputeAndCompareR1<uint32>(&builder, expected, {});
}
diff --git a/tensorflow/compiler/xla/tests/convolution_dimension_numbers_test.cc b/tensorflow/compiler/xla/tests/convolution_dimension_numbers_test.cc
index b5a42e3059..944366410b 100644
--- a/tensorflow/compiler/xla/tests/convolution_dimension_numbers_test.cc
+++ b/tensorflow/compiler/xla/tests/convolution_dimension_numbers_test.cc
@@ -93,14 +93,15 @@ XLA_TEST_F(ConvolutionDimensionNumbersTest,
auto weight_array = MakeUnique<Array4D<float>>(4, 3, 1, 1);
weight_array->FillWithMultiples(0.2);
auto weight_data =
- client_->TransferToServer(*Literal::CreateR4FromArray4D(*weight_array))
+ client_
+ ->TransferToServer(*LiteralUtil::CreateR4FromArray4D(*weight_array))
.ConsumeValueOrDie();
XlaBuilder builder(TestName());
- auto input = builder.ConstantR4FromArray4D<float>(*input_array);
+ auto input = ConstantR4FromArray4D<float>(&builder, *input_array);
auto weight =
- builder.Parameter(0, ShapeUtil::MakeShape(F32, {4, 3, 1, 1}), "weight");
- auto conv1 = builder.Conv(input, weight, {1, 1}, Padding::kValid);
+ Parameter(&builder, 0, ShapeUtil::MakeShape(F32, {4, 3, 1, 1}), "weight");
+ auto conv1 = Conv(input, weight, {1, 1}, Padding::kValid);
ConvolutionDimensionNumbers dim_nums =
XlaBuilder::CreateDefaultConvDimensionNumbers();
@@ -117,8 +118,7 @@ XLA_TEST_F(ConvolutionDimensionNumbersTest,
dim_nums.set_kernel_input_feature_dimension(
dim_nums.kernel_output_feature_dimension());
dim_nums.set_kernel_output_feature_dimension(old_kernel_input_feature_dim);
- builder.ConvWithGeneralDimensions(input, conv1, {1, 1}, Padding::kValid,
- dim_nums);
+ ConvWithGeneralDimensions(input, conv1, {1, 1}, Padding::kValid, dim_nums);
auto expected_conv1 = ReferenceUtil::ConvArray4D(*input_array, *weight_array,
{1, 1}, Padding::kValid);
diff --git a/tensorflow/compiler/xla/tests/convolution_test.cc b/tensorflow/compiler/xla/tests/convolution_test.cc
index 346bb3a399..a8b8f74ca9 100644
--- a/tensorflow/compiler/xla/tests/convolution_test.cc
+++ b/tensorflow/compiler/xla/tests/convolution_test.cc
@@ -25,7 +25,7 @@ limitations under the License.
#include "tensorflow/compiler/xla/client/padding.h"
#include "tensorflow/compiler/xla/client/xla_client/xla_builder.h"
#include "tensorflow/compiler/xla/layout_util.h"
-#include "tensorflow/compiler/xla/literal_util.h"
+#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/compiler/xla/ptr_util.h"
#include "tensorflow/compiler/xla/reference_util.h"
#include "tensorflow/compiler/xla/shape_util.h"
@@ -89,9 +89,9 @@ class ForwardPassConvolution_3x3x256_256_OutputZ_Iota : public ConvolutionTest {
ASSERT_EQ(2, arhs->height());
XlaBuilder builder(TestName());
- auto lhs = builder.ConstantR4FromArray4D<T>(*alhs);
- auto rhs = builder.ConstantR4FromArray4D<T>(*arhs);
- builder.Conv(lhs, rhs, {1, 1}, Padding::kValid);
+ auto lhs = ConstantR4FromArray4D<T>(&builder, *alhs);
+ auto rhs = ConstantR4FromArray4D<T>(&builder, *arhs);
+ Conv(lhs, rhs, {1, 1}, Padding::kValid);
ComputeAndCompare(&builder, {}, error_spec_);
}
@@ -109,9 +109,9 @@ class Convolve_1x1x1x2_1x1x1x2_Valid : public ConvolutionTest {
XlaBuilder builder(TestName());
Shape input_shape = ShapeUtil::MakeShapeWithType<T>({1, 1, 1, 2});
Shape filter_shape = ShapeUtil::MakeShapeWithType<T>({1, 1, 1, 2});
- auto input = builder.Parameter(0, input_shape, "input");
- auto filter = builder.Parameter(1, filter_shape, "filter");
- builder.Conv(input, filter, {1, 1}, Padding::kValid);
+ auto input = Parameter(&builder, 0, input_shape, "input");
+ auto filter = Parameter(&builder, 1, filter_shape, "filter");
+ Conv(input, filter, {1, 1}, Padding::kValid);
Array4D<T> input_data(1, 1, 1, 2);
input_data.FillWithYX(Array2D<T>({
@@ -123,8 +123,8 @@ class Convolve_1x1x1x2_1x1x1x2_Valid : public ConvolutionTest {
}));
ComputeAndCompare(&builder,
- {std::move(*Literal::CreateFromArray(input_data)),
- std::move(*Literal::CreateFromArray(filter_data))},
+ {std::move(*LiteralUtil::CreateFromArray(input_data)),
+ std::move(*LiteralUtil::CreateFromArray(filter_data))},
error_spec_);
}
};
@@ -140,9 +140,9 @@ class Convolve_1x1x4x4_1x1x2x2_Valid : public ConvolutionTest {
XlaBuilder builder(TestName());
Shape input_shape = ShapeUtil::MakeShapeWithType<T>({1, 1, 4, 4});
Shape filter_shape = ShapeUtil::MakeShapeWithType<T>({1, 1, 2, 2});
- auto input = builder.Parameter(0, input_shape, "input");
- auto filter = builder.Parameter(1, filter_shape, "filter");
- builder.Conv(input, filter, {1, 1}, Padding::kValid);
+ auto input = Parameter(&builder, 0, input_shape, "input");
+ auto filter = Parameter(&builder, 1, filter_shape, "filter");
+ Conv(input, filter, {1, 1}, Padding::kValid);
Array4D<T> input_data(1, 1, 4, 4);
input_data.FillWithYX(Array2D<T>({
@@ -157,8 +157,8 @@ class Convolve_1x1x4x4_1x1x2x2_Valid : public ConvolutionTest {
{7.0f, 8.0f},
}));
ComputeAndCompare(&builder,
- {std::move(*Literal::CreateFromArray(input_data)),
- std::move(*Literal::CreateFromArray(filter_data))},
+ {std::move(*LiteralUtil::CreateFromArray(input_data)),
+ std::move(*LiteralUtil::CreateFromArray(filter_data))},
error_spec_);
}
};
@@ -174,9 +174,9 @@ class Convolve_1x1x4x4_1x1x2x2_Same : public ConvolutionTest {
XlaBuilder builder(TestName());
Shape input_shape = ShapeUtil::MakeShapeWithType<T>({1, 1, 4, 4});
Shape filter_shape = ShapeUtil::MakeShapeWithType<T>({1, 1, 2, 2});
- auto input = builder.Parameter(0, input_shape, "input");
- auto filter = builder.Parameter(1, filter_shape, "filter");
- builder.Conv(input, filter, {1, 1}, Padding::kSame);
+ auto input = Parameter(&builder, 0, input_shape, "input");
+ auto filter = Parameter(&builder, 1, filter_shape, "filter");
+ Conv(input, filter, {1, 1}, Padding::kSame);
Array4D<T> input_data(1, 1, 4, 4);
input_data.FillWithYX(Array2D<T>({
@@ -192,8 +192,8 @@ class Convolve_1x1x4x4_1x1x2x2_Same : public ConvolutionTest {
}));
ComputeAndCompare(&builder,
- {std::move(*Literal::CreateFromArray(input_data)),
- std::move(*Literal::CreateFromArray(filter_data))},
+ {std::move(*LiteralUtil::CreateFromArray(input_data)),
+ std::move(*LiteralUtil::CreateFromArray(filter_data))},
error_spec_);
}
};
@@ -210,9 +210,9 @@ class Convolve_1x1x4x4_1x1x3x3_Same : public ConvolutionTest {
XlaBuilder builder(TestName());
Shape input_shape = ShapeUtil::MakeShapeWithType<T>({1, 1, 4, 4});
Shape filter_shape = ShapeUtil::MakeShapeWithType<T>({1, 1, 3, 3});
- auto input = builder.Parameter(0, input_shape, "input");
- auto filter = builder.Parameter(1, filter_shape, "filter");
- builder.Conv(input, filter, {1, 1}, Padding::kSame);
+ auto input = Parameter(&builder, 0, input_shape, "input");
+ auto filter = Parameter(&builder, 1, filter_shape, "filter");
+ Conv(input, filter, {1, 1}, Padding::kSame);
Array4D<T> input_data(1, 1, 4, 4);
input_data.FillWithYX(Array2D<T>({{1.0f, 2.0f, 3.0f, 4.0f},
@@ -224,8 +224,8 @@ class Convolve_1x1x4x4_1x1x3x3_Same : public ConvolutionTest {
{{5.0f, 6.0f, 7.0f}, {8.0f, 9.0f, 10.0f}, {11.0f, 12.0f, 13.0f}}));
// clang-format on
ComputeAndCompare(&builder,
- {std::move(*Literal::CreateFromArray(input_data)),
- std::move(*Literal::CreateFromArray(filter_data))},
+ {std::move(*LiteralUtil::CreateFromArray(input_data)),
+ std::move(*LiteralUtil::CreateFromArray(filter_data))},
error_spec_);
}
};
@@ -238,9 +238,9 @@ XLA_TEST_F(ConvolutionTest, Convolve1D_1x2x5_1x2x2_Valid) {
{
Shape input_shape = ShapeUtil::MakeShape(F32, {1, 2, 5});
Shape filter_shape = ShapeUtil::MakeShape(F32, {1, 2, 2});
- auto input = builder.Parameter(0, input_shape, "input");
- auto filter = builder.Parameter(1, filter_shape, "filter");
- builder.Conv(input, filter, {1}, Padding::kValid);
+ auto input = Parameter(&builder, 0, input_shape, "input");
+ auto filter = Parameter(&builder, 1, filter_shape, "filter");
+ Conv(input, filter, {1}, Padding::kValid);
}
Array3D<float> input({{{1, 2, 3, 4, 5}, {6, 7, 8, 9, 10}}});
@@ -249,10 +249,10 @@ XLA_TEST_F(ConvolutionTest, Convolve1D_1x2x5_1x2x2_Valid) {
Array3D<float> expected({{{510, 610, 710, 810}}});
auto input_literal =
- client_->TransferToServer(*Literal::CreateR3FromArray3D(input))
+ client_->TransferToServer(*LiteralUtil::CreateR3FromArray3D(input))
.ConsumeValueOrDie();
auto filter_literal =
- client_->TransferToServer(*Literal::CreateR3FromArray3D(filter))
+ client_->TransferToServer(*LiteralUtil::CreateR3FromArray3D(filter))
.ConsumeValueOrDie();
ComputeAndCompareR3<float>(&builder, expected,
@@ -268,10 +268,10 @@ class Convolve1D_1x2x5_1x2x2_WithRHSDilation : public ConvolutionTest {
{
Shape input_shape = ShapeUtil::MakeShapeWithType<T>({1, 2, 5});
Shape filter_shape = ShapeUtil::MakeShapeWithType<T>({1, 2, 2});
- auto input = builder.Parameter(0, input_shape, "input");
- auto filter = builder.Parameter(1, filter_shape, "filter");
+ auto input = Parameter(&builder, 0, input_shape, "input");
+ auto filter = Parameter(&builder, 1, filter_shape, "filter");
// Convolution dimensions are bf0_oi0->bo0.
- builder.ConvGeneralDilated(
+ ConvGeneralDilated(
input, filter, /*window_strides=*/{1}, /*padding=*/{{0, 0}},
/*lhs_dilation=*/{1}, /*rhs_dilation=*/{2},
/*dimension_numbers=*/builder.CreateDefaultConvDimensionNumbers(1));
@@ -284,10 +284,10 @@ class Convolve1D_1x2x5_1x2x2_WithRHSDilation : public ConvolutionTest {
Array3D<T> expected({{{570.0f, 670.0f, 770.0f}}});
auto input_literal =
- client_->TransferToServer(*Literal::CreateR3FromArray3D(input))
+ client_->TransferToServer(*LiteralUtil::CreateR3FromArray3D(input))
.ConsumeValueOrDie();
auto filter_literal =
- client_->TransferToServer(*Literal::CreateR3FromArray3D(filter))
+ client_->TransferToServer(*LiteralUtil::CreateR3FromArray3D(filter))
.ConsumeValueOrDie();
ComputeAndCompareR3<T>(&builder, expected,
@@ -304,10 +304,10 @@ XLA_TEST_F(ConvolutionTest, Convolve1D_1x2x5_1x2x2_WithLHSDilation) {
{
Shape input_shape = ShapeUtil::MakeShape(F32, {1, 2, 5});
Shape filter_shape = ShapeUtil::MakeShape(F32, {1, 2, 2});
- auto input = builder.Parameter(0, input_shape, "input");
- auto filter = builder.Parameter(1, filter_shape, "filter");
+ auto input = Parameter(&builder, 0, input_shape, "input");
+ auto filter = Parameter(&builder, 1, filter_shape, "filter");
// Convolution dimensions are bf0_oi0->bo0.
- builder.ConvGeneralDilated(
+ ConvGeneralDilated(
input, filter, /*window_strides=*/{1}, /*padding=*/{{0, 0}},
/*lhs_dilation=*/{2}, /*rhs_dilation=*/{1},
/*dimension_numbers=*/builder.CreateDefaultConvDimensionNumbers(1));
@@ -319,10 +319,10 @@ XLA_TEST_F(ConvolutionTest, Convolve1D_1x2x5_1x2x2_WithLHSDilation) {
Array3D<float> expected({{{190, 320, 230, 380, 270, 440, 310, 500}}});
auto input_literal =
- client_->TransferToServer(*Literal::CreateR3FromArray3D(input))
+ client_->TransferToServer(*LiteralUtil::CreateR3FromArray3D(input))
.ConsumeValueOrDie();
auto filter_literal =
- client_->TransferToServer(*Literal::CreateR3FromArray3D(filter))
+ client_->TransferToServer(*LiteralUtil::CreateR3FromArray3D(filter))
.ConsumeValueOrDie();
ComputeAndCompareR3<float>(&builder, expected,
@@ -335,10 +335,10 @@ XLA_TEST_F(ConvolutionTest, Convolve1D_1x2x5_1x2x2_WithLHSAndRHSDilation) {
{
Shape input_shape = ShapeUtil::MakeShape(F32, {1, 2, 5});
Shape filter_shape = ShapeUtil::MakeShape(F32, {1, 2, 2});
- auto input = builder.Parameter(0, input_shape, "input");
- auto filter = builder.Parameter(1, filter_shape, "filter");
+ auto input = Parameter(&builder, 0, input_shape, "input");
+ auto filter = Parameter(&builder, 1, filter_shape, "filter");
// Convolution dimensions are bf0_oi0->bo0.
- builder.ConvGeneralDilated(
+ ConvGeneralDilated(
input, filter, /*window_strides=*/{1}, /*padding=*/{{0, 0}},
/*lhs_dilation=*/{2}, /*rhs_dilation=*/{2},
/*dimension_numbers=*/builder.CreateDefaultConvDimensionNumbers(1));
@@ -350,10 +350,10 @@ XLA_TEST_F(ConvolutionTest, Convolve1D_1x2x5_1x2x2_WithLHSAndRHSDilation) {
Array3D<float> expected({{{510, 0, 610, 0, 710, 0, 810}}});
auto input_literal =
- client_->TransferToServer(*Literal::CreateR3FromArray3D(input))
+ client_->TransferToServer(*LiteralUtil::CreateR3FromArray3D(input))
.ConsumeValueOrDie();
auto filter_literal =
- client_->TransferToServer(*Literal::CreateR3FromArray3D(filter))
+ client_->TransferToServer(*LiteralUtil::CreateR3FromArray3D(filter))
.ConsumeValueOrDie();
ComputeAndCompareR3<float>(&builder, expected,
@@ -369,10 +369,10 @@ class Convolve1D_1x2x5_1x2x2_WithPadding : public ConvolutionTest {
{
Shape input_shape = ShapeUtil::MakeShapeWithType<T>({1, 2, 5});
Shape filter_shape = ShapeUtil::MakeShapeWithType<T>({1, 2, 2});
- auto input = builder.Parameter(0, input_shape, "input");
- auto filter = builder.Parameter(1, filter_shape, "filter");
+ auto input = Parameter(&builder, 0, input_shape, "input");
+ auto filter = Parameter(&builder, 1, filter_shape, "filter");
// Convolution dimensions are bf0_oi0->bo0.
- builder.ConvGeneralDilated(
+ ConvGeneralDilated(
input, filter, /*window_strides=*/{1}, /*padding=*/{{2, 2}},
/*lhs_dilation=*/{1}, /*rhs_dilation=*/{1},
/*dimension_numbers=*/builder.CreateDefaultConvDimensionNumbers(1));
@@ -386,10 +386,10 @@ class Convolve1D_1x2x5_1x2x2_WithPadding : public ConvolutionTest {
{{{0.0f, 260.0f, 510.0f, 610.0f, 710.0f, 810.0f, 350.0f, 0.0f}}});
auto input_literal =
- client_->TransferToServer(*Literal::CreateR3FromArray3D(input))
+ client_->TransferToServer(*LiteralUtil::CreateR3FromArray3D(input))
.ConsumeValueOrDie();
auto filter_literal =
- client_->TransferToServer(*Literal::CreateR3FromArray3D(filter))
+ client_->TransferToServer(*LiteralUtil::CreateR3FromArray3D(filter))
.ConsumeValueOrDie();
ComputeAndCompareR3<T>(&builder, expected,
@@ -408,8 +408,8 @@ XLA_TEST_F(ConvolutionTest, Convolve3D_1x4x2x3x3_2x2x2x3x3_Valid) {
Shape input_shape = ShapeUtil::MakeShape(F32, input_dims);
Shape filter_shape = ShapeUtil::MakeShape(F32, filter_dims);
{
- auto input = builder.Parameter(0, input_shape, "input");
- auto filter = builder.Parameter(1, filter_shape, "filter");
+ auto input = Parameter(&builder, 0, input_shape, "input");
+ auto filter = Parameter(&builder, 1, filter_shape, "filter");
// Tensorflow dimension numbers for 3D convolution.
ConvolutionDimensionNumbers dnums;
@@ -429,21 +429,20 @@ XLA_TEST_F(ConvolutionTest, Convolve3D_1x4x2x3x3_2x2x2x3x3_Valid) {
dnums.set_kernel_input_feature_dimension(3);
dnums.set_kernel_output_feature_dimension(4);
- builder.ConvWithGeneralDimensions(input, filter, {1, 1, 1}, Padding::kValid,
- dnums);
+ ConvWithGeneralDimensions(input, filter, {1, 1, 1}, Padding::kValid, dnums);
}
std::vector<float> input_elems(ShapeUtil::ElementsIn(input_shape));
iota(input_elems.begin(), input_elems.end(), 1.0f);
- auto input_r1 = Literal::CreateR1<float>(input_elems);
+ auto input_r1 = LiteralUtil::CreateR1<float>(input_elems);
auto input_r5 = input_r1->Reshape(input_dims).ConsumeValueOrDie();
std::vector<float> filter_elems(ShapeUtil::ElementsIn(filter_shape));
iota(filter_elems.begin(), filter_elems.end(), 1.0f);
- auto filter_r1 = Literal::CreateR1<float>(filter_elems);
+ auto filter_r1 = LiteralUtil::CreateR1<float>(filter_elems);
auto filter_r5 = filter_r1->Reshape(filter_dims).ConsumeValueOrDie();
- auto expected_r1 = Literal::CreateR1<float>(
+ auto expected_r1 = LiteralUtil::CreateR1<float>(
{19554, 19962, 20370, 22110, 22590, 23070, 34890, 35730, 36570, 37446,
38358, 39270, 50226, 51498, 52770, 52782, 54126, 55470});
auto expected_r5 = expected_r1->Reshape({1, 3, 1, 2, 3}).ConsumeValueOrDie();
@@ -475,8 +474,8 @@ class Convolve2D_1x3x3x5_3x3x5x5_Valid : public ConvolutionTest {
Shape input_shape = ShapeUtil::MakeShapeWithType<T>(input_dims);
Shape filter_shape = ShapeUtil::MakeShapeWithType<T>(filter_dims);
{
- auto input = builder.Parameter(0, input_shape, "input");
- auto filter = builder.Parameter(1, filter_shape, "filter");
+ auto input = Parameter(&builder, 0, input_shape, "input");
+ auto filter = Parameter(&builder, 1, filter_shape, "filter");
// Tensorflow dimension numbers for 2D convolution.
ConvolutionDimensionNumbers dnums;
@@ -493,21 +492,20 @@ class Convolve2D_1x3x3x5_3x3x5x5_Valid : public ConvolutionTest {
dnums.set_kernel_input_feature_dimension(2);
dnums.set_kernel_output_feature_dimension(3);
- builder.ConvWithGeneralDimensions(input, filter, {1, 1}, Padding::kValid,
- dnums);
+ ConvWithGeneralDimensions(input, filter, {1, 1}, Padding::kValid, dnums);
}
std::vector<T> input_elems(ShapeUtil::ElementsIn(input_shape));
iota_int_init_value(input_elems, 1);
- auto input_r1 = Literal::CreateR1<T>(input_elems);
+ auto input_r1 = LiteralUtil::CreateR1<T>(input_elems);
auto input_r4 = input_r1->Reshape(input_dims).ConsumeValueOrDie();
std::vector<T> filter_elems(ShapeUtil::ElementsIn(filter_shape));
iota_int_init_value(filter_elems, 1);
- auto filter_r1 = Literal::CreateR1<T>(filter_elems);
+ auto filter_r1 = LiteralUtil::CreateR1<T>(filter_elems);
auto filter_r4 = filter_r1->Reshape(filter_dims).ConsumeValueOrDie();
- auto expected_r1 = Literal::CreateR1<T>(
+ auto expected_r1 = LiteralUtil::CreateR1<T>(
{static_cast<T>(92115), static_cast<T>(93150), static_cast<T>(94185)});
auto expected_r4 = expected_r1->Reshape({1, 1, 1, 3}).ConsumeValueOrDie();
@@ -541,8 +539,8 @@ XLA_TEST_P(ConvolveWithAndWithoutCanonicalization,
Shape input_shape = ShapeUtil::MakeShape(F32, {4, 29});
Shape filter_shape = ShapeUtil::MakeShape(F32, {4, 10});
- auto input = builder.Parameter(0, input_shape, "input");
- auto filter = builder.Parameter(1, filter_shape, "filter");
+ auto input = Parameter(&builder, 0, input_shape, "input");
+ auto filter = Parameter(&builder, 1, filter_shape, "filter");
ConvolutionDimensionNumbers dnums;
dnums.set_input_feature_dimension(0);
@@ -551,7 +549,7 @@ XLA_TEST_P(ConvolveWithAndWithoutCanonicalization,
dnums.set_kernel_output_feature_dimension(1);
dnums.set_output_batch_dimension(0);
dnums.set_output_feature_dimension(1);
- builder.ConvWithGeneralDimensions(input, filter, {}, Padding::kValid, dnums);
+ ConvWithGeneralDimensions(input, filter, {}, Padding::kValid, dnums);
Array2D<float> param0(4, 29);
param0.FillUnique();
@@ -563,8 +561,8 @@ XLA_TEST_P(ConvolveWithAndWithoutCanonicalization,
expected_result.Fill(0);
ComputeAndCompare(&builder,
- {std::move(*Literal::CreateFromArray(param0)),
- std::move(*Literal::CreateFromArray(param1))},
+ {std::move(*LiteralUtil::CreateFromArray(param0)),
+ std::move(*LiteralUtil::CreateFromArray(param1))},
error_spec_);
}
@@ -599,8 +597,8 @@ class Convolve1D1WindowTestBase
Shape input_shape = ShapeUtil::MakeShapeWithType<T>(input_dims);
Shape filter_shape = ShapeUtil::MakeShapeWithType<T>(filter_dims);
{
- auto input = builder.Parameter(0, input_shape, "input");
- auto filter = builder.Parameter(1, filter_shape, "filter");
+ auto input = Parameter(&builder, 0, input_shape, "input");
+ auto filter = Parameter(&builder, 1, filter_shape, "filter");
// Tensorflow dimension numbers for 1D convolution.
ConvolutionDimensionNumbers dnums;
@@ -614,24 +612,23 @@ class Convolve1D1WindowTestBase
dnums.set_kernel_input_feature_dimension(1);
dnums.set_kernel_output_feature_dimension(2);
- builder.ConvWithGeneralDimensions(input, filter, {1}, Padding::kValid,
- dnums);
+ ConvWithGeneralDimensions(input, filter, {1}, Padding::kValid, dnums);
}
std::vector<T> input_elems(ShapeUtil::ElementsIn(input_shape),
static_cast<T>(1.0f));
- auto input_r1 = Literal::CreateR1<T>(input_elems);
+ auto input_r1 = LiteralUtil::CreateR1<T>(input_elems);
auto input_r3 = input_r1->Reshape(input_dims).ConsumeValueOrDie();
std::vector<T> filter_elems(ShapeUtil::ElementsIn(filter_shape),
static_cast<T>(1.0f));
- auto filter_r1 = Literal::CreateR1<T>(filter_elems);
+ auto filter_r1 = LiteralUtil::CreateR1<T>(filter_elems);
auto filter_r3 = filter_r1->Reshape(filter_dims).ConsumeValueOrDie();
std::vector<T> expect_elems(batch * output_feature * num_windows,
static_cast<T>(window_size * input_feature));
- auto expected_r1 = Literal::CreateR1<T>(expect_elems);
+ auto expected_r1 = LiteralUtil::CreateR1<T>(expect_elems);
auto expected_r3 =
expected_r1->Reshape({batch, num_windows, output_feature})
.ConsumeValueOrDie();
@@ -726,9 +723,9 @@ XLA_TEST_F(ConvolutionTest, Convolve_bf16_1x1x1x2_1x1x1x2_Valid) {
XlaBuilder builder(TestName());
Shape input_shape = ShapeUtil::MakeShape(BF16, {1, 1, 1, 2});
Shape filter_shape = ShapeUtil::MakeShape(BF16, {1, 1, 1, 2});
- auto input = builder.Parameter(0, input_shape, "input");
- auto filter = builder.Parameter(1, filter_shape, "filter");
- builder.Conv(input, filter, {1, 1}, Padding::kValid);
+ auto input = Parameter(&builder, 0, input_shape, "input");
+ auto filter = Parameter(&builder, 1, filter_shape, "filter");
+ Conv(input, filter, {1, 1}, Padding::kValid);
Array4D<bfloat16> input_data(1, 1, 1, 2);
input_data.FillWithYX(Array2D<bfloat16>({
@@ -740,8 +737,8 @@ XLA_TEST_F(ConvolutionTest, Convolve_bf16_1x1x1x2_1x1x1x2_Valid) {
}));
ComputeAndCompare(&builder,
- {std::move(*Literal::CreateFromArray(input_data)),
- std::move(*Literal::CreateFromArray(filter_data))},
+ {std::move(*LiteralUtil::CreateFromArray(input_data)),
+ std::move(*LiteralUtil::CreateFromArray(filter_data))},
error_spec_);
}
@@ -754,9 +751,9 @@ XLA_TEST_F(ConvolutionTest, NoCudnnAlgorithmPicker) {
XlaBuilder builder(TestName());
Shape input_shape = ShapeUtil::MakeShape(F32, {1, 1, 1, 2});
Shape filter_shape = ShapeUtil::MakeShape(F32, {1, 1, 1, 2});
- auto input = builder.Parameter(0, input_shape, "input");
- auto filter = builder.Parameter(1, filter_shape, "filter");
- builder.Conv(input, filter, {1, 1}, Padding::kValid);
+ auto input = Parameter(&builder, 0, input_shape, "input");
+ auto filter = Parameter(&builder, 1, filter_shape, "filter");
+ Conv(input, filter, {1, 1}, Padding::kValid);
Array4D<float> input_data(1, 1, 1, 2);
input_data.FillIota(0);
@@ -764,8 +761,8 @@ XLA_TEST_F(ConvolutionTest, NoCudnnAlgorithmPicker) {
filter_data.FillIota(10);
ComputeAndCompare(&builder,
- {std::move(*Literal::CreateFromArray(input_data)),
- std::move(*Literal::CreateFromArray(filter_data))});
+ {std::move(*LiteralUtil::CreateFromArray(input_data)),
+ std::move(*LiteralUtil::CreateFromArray(filter_data))});
}
} // namespace
diff --git a/tensorflow/compiler/xla/tests/convolution_variants_test.cc b/tensorflow/compiler/xla/tests/convolution_variants_test.cc
index fea850dc13..8792e7781b 100644
--- a/tensorflow/compiler/xla/tests/convolution_variants_test.cc
+++ b/tensorflow/compiler/xla/tests/convolution_variants_test.cc
@@ -28,7 +28,7 @@ limitations under the License.
#include "tensorflow/compiler/xla/client/local_client.h"
#include "tensorflow/compiler/xla/client/padding.h"
#include "tensorflow/compiler/xla/client/xla_client/xla_builder.h"
-#include "tensorflow/compiler/xla/literal_util.h"
+#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/compiler/xla/reference_util.h"
#include "tensorflow/compiler/xla/tests/client_library_test_base.h"
#include "tensorflow/compiler/xla/tests/literal_test_util.h"
@@ -55,12 +55,12 @@ XLA_TEST_F(ConvolutionVariantsTest, Minimal) {
XlaBuilder builder(TestName());
const Array4D<float> input_array(1, 1, 1, 1, {2});
- auto input = builder.ConstantR4FromArray4D<float>(input_array);
+ auto input = ConstantR4FromArray4D<float>(&builder, input_array);
const Array4D<float> filter_array(1, 1, 1, 1, {3});
- auto filter = builder.ConstantR4FromArray4D<float>(filter_array);
+ auto filter = ConstantR4FromArray4D<float>(&builder, filter_array);
- builder.Conv(input, filter, {1, 1}, Padding::kValid);
+ Conv(input, filter, {1, 1}, Padding::kValid);
const Array4D<float> expected(1, 1, 1, 1, {6});
ComputeAndCompareR4<float>(&builder, expected, {}, error_spec_);
@@ -70,12 +70,12 @@ XLA_TEST_F(ConvolutionVariantsTest, MinimalWithBatch) {
XlaBuilder builder(TestName());
const Array4D<float> input_array(5, 1, 1, 1, {1, 2, 3, 4, 5});
- auto input = builder.ConstantR4FromArray4D<float>(input_array);
+ auto input = ConstantR4FromArray4D<float>(&builder, input_array);
const Array4D<float> filter_array(1, 1, 1, 1, {2});
- auto filter = builder.ConstantR4FromArray4D<float>(filter_array);
+ auto filter = ConstantR4FromArray4D<float>(&builder, filter_array);
- builder.Conv(input, filter, {1, 1}, Padding::kValid);
+ Conv(input, filter, {1, 1}, Padding::kValid);
const Array4D<float> expected(5, 1, 1, 1, {2, 4, 6, 8, 10});
ComputeAndCompareR4<float>(&builder, expected, {}, error_spec_);
@@ -86,12 +86,12 @@ XLA_TEST_F(ConvolutionVariantsTest, Flat1x1) {
Array4D<float> input_array(2, 1, 3, 4);
input_array.FillWithMultiples(1);
- auto input = builder.ConstantR4FromArray4D<float>(input_array);
+ auto input = ConstantR4FromArray4D<float>(&builder, input_array);
const Array4D<float> filter_array(1, 1, 1, 1, {2.3});
- auto filter = builder.ConstantR4FromArray4D<float>(filter_array);
+ auto filter = ConstantR4FromArray4D<float>(&builder, filter_array);
- builder.Conv(input, filter, {1, 1}, Padding::kValid);
+ Conv(input, filter, {1, 1}, Padding::kValid);
Array4D<float> expected(2, 1, 3, 4);
expected.FillWithMultiples(2.3);
@@ -102,12 +102,12 @@ XLA_TEST_F(ConvolutionVariantsTest, Deep1x1) {
XlaBuilder builder(TestName());
Array4D<float> input_array(1, 2, 1, 1, {10, 1});
- auto input = builder.ConstantR4FromArray4D<float>(input_array);
+ auto input = ConstantR4FromArray4D<float>(&builder, input_array);
const Array4D<float> filter_array(3, 2, 1, 1, {1, 2, 3, 4, 5, 6});
- auto filter = builder.ConstantR4FromArray4D<float>(filter_array);
+ auto filter = ConstantR4FromArray4D<float>(&builder, filter_array);
- builder.Conv(input, filter, {1, 1}, Padding::kValid);
+ Conv(input, filter, {1, 1}, Padding::kValid);
Array4D<float> expected(1, 3, 1, 1, {12, 34, 56});
ComputeAndCompareR4<float>(&builder, expected, {}, error_spec_);
@@ -117,12 +117,12 @@ XLA_TEST_F(ConvolutionVariantsTest, Filter1x2in1x2) {
XlaBuilder builder(TestName());
Array4D<float> input_array(1, 1, 1, 2, {1, 2});
- auto input = builder.ConstantR4FromArray4D<float>(input_array);
+ auto input = ConstantR4FromArray4D<float>(&builder, input_array);
const Array4D<float> filter_array(1, 1, 1, 2, {10, 1});
- auto filter = builder.ConstantR4FromArray4D<float>(filter_array);
+ auto filter = ConstantR4FromArray4D<float>(&builder, filter_array);
- builder.Conv(input, filter, {1, 1}, Padding::kValid);
+ Conv(input, filter, {1, 1}, Padding::kValid);
Array4D<float> expected(1, 1, 1, 1, {12});
ComputeAndCompareR4<float>(&builder, expected, {}, error_spec_);
@@ -132,12 +132,12 @@ XLA_TEST_F(ConvolutionVariantsTest, Filter1x2in1x3) {
XlaBuilder builder(TestName());
Array4D<float> input_array(1, 1, 1, 3, {1, 2, 3});
- auto input = builder.ConstantR4FromArray4D<float>(input_array);
+ auto input = ConstantR4FromArray4D<float>(&builder, input_array);
const Array4D<float> filter_array(1, 1, 1, 2, {10, 1});
- auto filter = builder.ConstantR4FromArray4D<float>(filter_array);
+ auto filter = ConstantR4FromArray4D<float>(&builder, filter_array);
- builder.Conv(input, filter, {1, 1}, Padding::kValid);
+ Conv(input, filter, {1, 1}, Padding::kValid);
Array4D<float> expected(1, 1, 1, 2, {12, 23});
ComputeAndCompareR4<float>(&builder, expected, {}, error_spec_);
@@ -147,12 +147,12 @@ XLA_TEST_F(ConvolutionVariantsTest, Filter1x2in2x2) {
XlaBuilder builder(TestName());
Array4D<float> input_array(1, 1, 2, 2, {1, 2, 3, 4});
- auto input = builder.ConstantR4FromArray4D<float>(input_array);
+ auto input = ConstantR4FromArray4D<float>(&builder, input_array);
const Array4D<float> filter_array(1, 1, 1, 2, {10, 1});
- auto filter = builder.ConstantR4FromArray4D<float>(filter_array);
+ auto filter = ConstantR4FromArray4D<float>(&builder, filter_array);
- builder.Conv(input, filter, {1, 1}, Padding::kValid);
+ Conv(input, filter, {1, 1}, Padding::kValid);
Array4D<float> expected(1, 1, 2, 1, {12, 34});
ComputeAndCompareR4<float>(&builder, expected, {}, error_spec_);
@@ -162,12 +162,12 @@ XLA_TEST_F(ConvolutionVariantsTest, Filter2x1in2x2) {
XlaBuilder builder(TestName());
Array4D<float> input_array(1, 1, 2, 2, {1, 2, 3, 4});
- auto input = builder.ConstantR4FromArray4D<float>(input_array);
+ auto input = ConstantR4FromArray4D<float>(&builder, input_array);
const Array4D<float> filter_array(1, 1, 2, 1, {10, 1});
- auto filter = builder.ConstantR4FromArray4D<float>(filter_array);
+ auto filter = ConstantR4FromArray4D<float>(&builder, filter_array);
- builder.Conv(input, filter, {1, 1}, Padding::kValid);
+ Conv(input, filter, {1, 1}, Padding::kValid);
Array4D<float> expected(1, 1, 1, 2, {13, 24});
ComputeAndCompareR4<float>(&builder, expected, {}, error_spec_);
@@ -177,12 +177,12 @@ XLA_TEST_F(ConvolutionVariantsTest, Filter2x2in2x2) {
XlaBuilder builder(TestName());
Array4D<float> input_array(1, 1, 2, 2, {1, 2, 3, 4});
- auto input = builder.ConstantR4FromArray4D<float>(input_array);
+ auto input = ConstantR4FromArray4D<float>(&builder, input_array);
const Array4D<float> filter_array(1, 1, 2, 2, {1000, 100, 10, 1});
- auto filter = builder.ConstantR4FromArray4D<float>(filter_array);
+ auto filter = ConstantR4FromArray4D<float>(&builder, filter_array);
- builder.Conv(input, filter, {1, 1}, Padding::kValid);
+ Conv(input, filter, {1, 1}, Padding::kValid);
Array4D<float> expected(1, 1, 1, 1, {1234});
ComputeAndCompareR4<float>(&builder, expected, {}, error_spec_);
@@ -194,13 +194,13 @@ XLA_TEST_F(ConvolutionVariantsTest, Filter1x2in2x3WithDepthAndBatch) {
Array4D<float> input_array(
2, 2, 2, 3, {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 0, // plane 0
0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 0, 0}); // plane 1
- auto input = builder.ConstantR4FromArray4D<float>(input_array);
+ auto input = ConstantR4FromArray4D<float>(&builder, input_array);
const Array4D<float> filter_array(
2, 2, 1, 2, {1000, 100, 10, 1, 0.1, 0.01, 0.001, 0.0001});
- auto filter = builder.ConstantR4FromArray4D<float>(filter_array);
+ auto filter = ConstantR4FromArray4D<float>(&builder, filter_array);
- builder.Conv(input, filter, {1, 1}, Padding::kValid);
+ Conv(input, filter, {1, 1}, Padding::kValid);
Array4D<float> expected(
2, 2, 2, 2,
@@ -213,12 +213,12 @@ XLA_TEST_F(ConvolutionVariantsTest, Filter1x1stride1x2in1x4) {
XlaBuilder builder(TestName());
Array4D<float> input_array(1, 1, 1, 4, {1, 2, 3, 4});
- auto input = builder.ConstantR4FromArray4D<float>(input_array);
+ auto input = ConstantR4FromArray4D<float>(&builder, input_array);
const Array4D<float> filter_array(1, 1, 1, 1, {10});
- auto filter = builder.ConstantR4FromArray4D<float>(filter_array);
+ auto filter = ConstantR4FromArray4D<float>(&builder, filter_array);
- builder.Conv(input, filter, {1, 2}, Padding::kValid);
+ Conv(input, filter, {1, 2}, Padding::kValid);
Array4D<float> expected(1, 1, 1, 2, {10, 30});
ComputeAndCompareR4<float>(&builder, expected, {}, error_spec_);
@@ -228,12 +228,12 @@ XLA_TEST_F(ConvolutionVariantsTest, Filter1x1stride1x2in1x5) {
XlaBuilder builder(TestName());
Array4D<float> input_array(1, 1, 1, 5, {1, 2, 3, 4, 5});
- auto input = builder.ConstantR4FromArray4D<float>(input_array);
+ auto input = ConstantR4FromArray4D<float>(&builder, input_array);
const Array4D<float> filter_array(1, 1, 1, 1, {10});
- auto filter = builder.ConstantR4FromArray4D<float>(filter_array);
+ auto filter = ConstantR4FromArray4D<float>(&builder, filter_array);
- builder.Conv(input, filter, {1, 2}, Padding::kValid);
+ Conv(input, filter, {1, 2}, Padding::kValid);
Array4D<float> expected(1, 1, 1, 3, {10, 30, 50});
ComputeAndCompareR4<float>(&builder, expected, {}, error_spec_);
@@ -243,12 +243,12 @@ XLA_TEST_F(ConvolutionVariantsTest, Filter1x3stride1x2in1x4) {
XlaBuilder builder(TestName());
Array4D<float> input_array(1, 1, 1, 4, {1, 2, 3, 4});
- auto input = builder.ConstantR4FromArray4D<float>(input_array);
+ auto input = ConstantR4FromArray4D<float>(&builder, input_array);
const Array4D<float> filter_array(1, 1, 1, 3, {100, 10, 1});
- auto filter = builder.ConstantR4FromArray4D<float>(filter_array);
+ auto filter = ConstantR4FromArray4D<float>(&builder, filter_array);
- builder.Conv(input, filter, {1, 2}, Padding::kValid);
+ Conv(input, filter, {1, 2}, Padding::kValid);
Array4D<float> expected(1, 1, 1, 1, {123});
ComputeAndCompareR4<float>(&builder, expected, {}, error_spec_);
@@ -258,12 +258,12 @@ XLA_TEST_F(ConvolutionVariantsTest, Filter1x3stride1x2in1x5) {
XlaBuilder builder(TestName());
Array4D<float> input_array(1, 1, 1, 5, {1, 2, 3, 4, 5});
- auto input = builder.ConstantR4FromArray4D<float>(input_array);
+ auto input = ConstantR4FromArray4D<float>(&builder, input_array);
const Array4D<float> filter_array(1, 1, 1, 3, {100, 10, 1});
- auto filter = builder.ConstantR4FromArray4D<float>(filter_array);
+ auto filter = ConstantR4FromArray4D<float>(&builder, filter_array);
- builder.Conv(input, filter, {1, 2}, Padding::kValid);
+ Conv(input, filter, {1, 2}, Padding::kValid);
Array4D<float> expected(1, 1, 1, 2, {123, 345});
ComputeAndCompareR4<float>(&builder, expected, {}, error_spec_);
@@ -273,12 +273,12 @@ XLA_TEST_F(ConvolutionVariantsTest, Filter1x1stride2x2in3x3) {
XlaBuilder builder(TestName());
Array4D<float> input_array(1, 1, 3, 3, {1, 2, 3, 4, 5, 6, 7, 8, 9});
- auto input = builder.ConstantR4FromArray4D<float>(input_array);
+ auto input = ConstantR4FromArray4D<float>(&builder, input_array);
const Array4D<float> filter_array(1, 1, 1, 1, {10});
- auto filter = builder.ConstantR4FromArray4D<float>(filter_array);
+ auto filter = ConstantR4FromArray4D<float>(&builder, filter_array);
- builder.Conv(input, filter, {2, 2}, Padding::kValid);
+ Conv(input, filter, {2, 2}, Padding::kValid);
Array4D<float> expected(1, 1, 2, 2, {10, 30, 70, 90});
ComputeAndCompareR4<float>(&builder, expected, {}, error_spec_);
@@ -288,12 +288,12 @@ XLA_TEST_F(ConvolutionVariantsTest, Filter3x1in1x1Padded) {
XlaBuilder builder(TestName());
Array4D<float> input_array(1, 1, 1, 1, {1});
- auto input = builder.ConstantR4FromArray4D<float>(input_array);
+ auto input = ConstantR4FromArray4D<float>(&builder, input_array);
const Array4D<float> filter_array(1, 1, 1, 3, {10, 20, 30});
- auto filter = builder.ConstantR4FromArray4D<float>(filter_array);
+ auto filter = ConstantR4FromArray4D<float>(&builder, filter_array);
- builder.Conv(input, filter, {1, 1}, Padding::kSame);
+ Conv(input, filter, {1, 1}, Padding::kSame);
Array4D<float> expected(1, 1, 1, 1, {20});
ComputeAndCompareR4<float>(&builder, expected, {}, error_spec_);
@@ -303,12 +303,12 @@ XLA_TEST_F(ConvolutionVariantsTest, Filter5x1in3x1Padded) {
XlaBuilder builder(TestName());
Array4D<float> input_array(1, 1, 1, 3, {1, 2, 3});
- auto input = builder.ConstantR4FromArray4D<float>(input_array);
+ auto input = ConstantR4FromArray4D<float>(&builder, input_array);
const Array4D<float> filter_array(1, 1, 1, 5, {10000, 1000, 100, 10, 1});
- auto filter = builder.ConstantR4FromArray4D<float>(filter_array);
+ auto filter = ConstantR4FromArray4D<float>(&builder, filter_array);
- builder.Conv(input, filter, {1, 1}, Padding::kSame);
+ Conv(input, filter, {1, 1}, Padding::kSame);
Array4D<float> expected(1, 1, 1, 3, {123, 1230, 12300});
ComputeAndCompareR4<float>(&builder, expected, {}, error_spec_);
@@ -318,15 +318,15 @@ XLA_TEST_F(ConvolutionVariantsTest, Filter3x3in2x2Padded) {
XlaBuilder builder(TestName());
Array4D<float> input_array(1, 1, 2, 2, {1, 2, 3, 4});
- auto input = builder.ConstantR4FromArray4D<float>(input_array);
+ auto input = ConstantR4FromArray4D<float>(&builder, input_array);
const Array4D<float> filter_array(1, 1, 3, 3,
{10000, 0, 1000, // row 0
0, 100, 0, // row 1
10, 0, 1}); // row 2
- auto filter = builder.ConstantR4FromArray4D<float>(filter_array);
+ auto filter = ConstantR4FromArray4D<float>(&builder, filter_array);
- builder.Conv(input, filter, {1, 1}, Padding::kSame);
+ Conv(input, filter, {1, 1}, Padding::kSame);
Array4D<float> expected(1, 1, 2, 2, {104, 230, 2300, 10400});
ComputeAndCompareR4<float>(&builder, expected, {}, error_spec_);
@@ -336,12 +336,12 @@ XLA_TEST_F(ConvolutionVariantsTest, Filter1x1in2x1WithPaddingAndDepth) {
XlaBuilder builder(TestName());
Array4D<float> input_array(1, 2, 1, 2, {1, 2, 3, 4});
- auto input = builder.ConstantR4FromArray4D<float>(input_array);
+ auto input = ConstantR4FromArray4D<float>(&builder, input_array);
const Array4D<float> filter_array(1, 2, 1, 1, {10, 1});
- auto filter = builder.ConstantR4FromArray4D<float>(filter_array);
+ auto filter = ConstantR4FromArray4D<float>(&builder, filter_array);
- builder.Conv(input, filter, {1, 1}, Padding::kSame);
+ Conv(input, filter, {1, 1}, Padding::kSame);
Array4D<float> expected(1, 1, 1, 2, {13, 24});
ComputeAndCompareR4<float>(&builder, expected, {}, error_spec_);
@@ -351,12 +351,12 @@ XLA_TEST_F(ConvolutionVariantsTest, Filter2x2Stride1x1Input3x3) {
XlaBuilder builder(TestName());
Array4D<float> input_array(1, 1, 3, 3, {1, 2, 3, 4, 5, 6, 7, 8, 9});
- auto input = builder.ConstantR4FromArray4D<float>(input_array);
+ auto input = ConstantR4FromArray4D<float>(&builder, input_array);
const Array4D<float> filter_array(1, 1, 2, 2, {7, 13, 17, 23});
- auto filter = builder.ConstantR4FromArray4D<float>(filter_array);
+ auto filter = ConstantR4FromArray4D<float>(&builder, filter_array);
- builder.Conv(input, filter, {1, 1}, Padding::kValid);
+ Conv(input, filter, {1, 1}, Padding::kValid);
Array4D<float> expected(1, 1, 2, 2, {216, 276, 396, 456});
ComputeAndCompareR4<float>(&builder, expected, {}, error_spec_);
@@ -366,12 +366,12 @@ XLA_TEST_F(ConvolutionVariantsTest, Filter1x2Stride1x1Input1x3) {
XlaBuilder builder(TestName());
Array4D<float> input_array(1, 1, 1, 3, {1, 2, 3});
- auto input = builder.ConstantR4FromArray4D<float>(input_array);
+ auto input = ConstantR4FromArray4D<float>(&builder, input_array);
const Array4D<float> filter_array(1, 1, 1, 2, {7, 13});
- auto filter = builder.ConstantR4FromArray4D<float>(filter_array);
+ auto filter = ConstantR4FromArray4D<float>(&builder, filter_array);
- builder.Conv(input, filter, {1, 1}, Padding::kValid);
+ Conv(input, filter, {1, 1}, Padding::kValid);
Array4D<float> expected(1, 1, 1, 2, {33, 53});
ComputeAndCompareR4<float>(&builder, expected, {}, error_spec_);
@@ -383,15 +383,15 @@ XLA_TEST_F(ConvolutionVariantsTest, Filter2x1x8x8Input1x1x8x8) {
std::vector<float> input_data(64);
std::iota(input_data.begin(), input_data.end(), 0.0);
Array4D<float> input_array(1, 1, 8, 8, input_data);
- auto input = builder.ConstantR4FromArray4D<float>(input_array);
+ auto input = ConstantR4FromArray4D<float>(&builder, input_array);
std::vector<float> filter_data(128);
std::fill(filter_data.begin(), filter_data.begin() + 64, 1.0);
std::fill(filter_data.begin() + 64, filter_data.begin() + 128, 2.0);
const Array4D<float> filter_array(2, 1, 8, 8, filter_data);
- auto filter = builder.ConstantR4FromArray4D<float>(filter_array);
+ auto filter = ConstantR4FromArray4D<float>(&builder, filter_array);
- builder.Conv(input, filter, {1, 1}, Padding::kValid);
+ Conv(input, filter, {1, 1}, Padding::kValid);
Array4D<float> expected(1, 2, 1, 1, {2016, 4032});
ComputeAndCompareR4<float>(&builder, expected, {}, error_spec_);
@@ -403,14 +403,14 @@ XLA_TEST_F(ConvolutionVariantsTest, Filter1x1x1x1Input16x1x1x1) {
std::vector<float> input_data(16 * 1 * 1 * 1);
std::iota(input_data.begin(), input_data.end(), 1.0);
Array4D<float> input_array(16, 1, 1, 1, input_data);
- auto input = builder.ConstantR4FromArray4D<float>(input_array);
+ auto input = ConstantR4FromArray4D<float>(&builder, input_array);
std::vector<float> filter_data(1 * 1 * 1 * 1);
std::iota(filter_data.begin(), filter_data.end(), 1.0);
const Array4D<float> filter_array(1, 1, 1, 1, filter_data);
- auto filter = builder.ConstantR4FromArray4D<float>(filter_array);
+ auto filter = ConstantR4FromArray4D<float>(&builder, filter_array);
- builder.Conv(input, filter, {1, 1}, Padding::kValid);
+ Conv(input, filter, {1, 1}, Padding::kValid);
std::vector<float> expected_data = {1, 2, 3, 4, 5, 6, 7, 8,
9, 10, 11, 12, 13, 14, 15, 16};
@@ -432,14 +432,14 @@ XLA_TEST_F(ConvolutionVariantsTest, Filter1x1x2x2Input16x1x2x2) {
}
}
}
- auto input = builder.ConstantR4FromArray4D<float>(input_array);
+ auto input = ConstantR4FromArray4D<float>(&builder, input_array);
std::vector<float> filter_data(1 * 1 * ky * kx);
std::iota(filter_data.begin(), filter_data.end(), 1.0);
const Array4D<float> filter_array(1, 1, ky, kx, filter_data);
- auto filter = builder.ConstantR4FromArray4D<float>(filter_array);
+ auto filter = ConstantR4FromArray4D<float>(&builder, filter_array);
- builder.Conv(input, filter, {1, 1}, Padding::kValid);
+ Conv(input, filter, {1, 1}, Padding::kValid);
std::vector<float> expected_data(bs);
for (int i = 0; i < bs; ++i) {
@@ -463,14 +463,14 @@ XLA_TEST_F(ConvolutionVariantsTest, Filter1x1x2x2Input3x1x2x2) {
}
}
}
- auto input = builder.ConstantR4FromArray4D<float>(input_array);
+ auto input = ConstantR4FromArray4D<float>(&builder, input_array);
std::vector<float> filter_data(1 * 1 * ky * kx);
std::iota(filter_data.begin(), filter_data.end(), 1.0);
const Array4D<float> filter_array(1, 1, ky, kx, filter_data);
- auto filter = builder.ConstantR4FromArray4D<float>(filter_array);
+ auto filter = ConstantR4FromArray4D<float>(&builder, filter_array);
- builder.Conv(input, filter, {1, 1}, Padding::kValid);
+ Conv(input, filter, {1, 1}, Padding::kValid);
std::vector<float> expected_data = {
23,
@@ -492,14 +492,14 @@ XLA_TEST_F(ConvolutionVariantsTest, Filter1x1x8x8Input16x1x8x8) {
}
}
}
- auto input = builder.ConstantR4FromArray4D<float>(input_array);
+ auto input = ConstantR4FromArray4D<float>(&builder, input_array);
std::vector<float> filter_data(1 * 1 * 8 * 8);
std::iota(filter_data.begin(), filter_data.end(), 1.0);
const Array4D<float> filter_array(1, 1, 8, 8, filter_data);
- auto filter = builder.ConstantR4FromArray4D<float>(filter_array);
+ auto filter = ConstantR4FromArray4D<float>(&builder, filter_array);
- builder.Conv(input, filter, {1, 1}, Padding::kValid);
+ Conv(input, filter, {1, 1}, Padding::kValid);
std::vector<float> expected_data = {
19664, 21744, 23824, 25904, 27984, 30064, 32144, 34224,
@@ -515,7 +515,7 @@ XLA_TEST_F(ConvolutionVariantsTest, Filter2x2x8x8Input1x2x8x8) {
std::vector<float> input_data(2 * 8 * 8);
std::iota(input_data.begin(), input_data.end(), 0.0);
Array4D<float> input_array(1, 2, 8, 8, input_data);
- auto input = builder.ConstantR4FromArray4D<float>(input_array);
+ auto input = ConstantR4FromArray4D<float>(&builder, input_array);
std::vector<float> filter_data(2 * 2 * 8 * 8);
std::fill(filter_data.begin(), filter_data.begin() + filter_data.size() / 4,
@@ -527,9 +527,9 @@ XLA_TEST_F(ConvolutionVariantsTest, Filter2x2x8x8Input1x2x8x8) {
std::fill(filter_data.begin() + 3 * filter_data.size() / 4, filter_data.end(),
4.0);
const Array4D<float> filter_array(2, 2, 8, 8, filter_data);
- auto filter = builder.ConstantR4FromArray4D<float>(filter_array);
+ auto filter = ConstantR4FromArray4D<float>(&builder, filter_array);
- builder.Conv(input, filter, {1, 1}, Padding::kValid);
+ Conv(input, filter, {1, 1}, Padding::kValid);
Array4D<float> expected(1, 2, 1, 1, {14240, 30496});
ComputeAndCompareR4<float>(&builder, expected, {}, error_spec_);
@@ -541,7 +541,7 @@ XLA_TEST_F(ConvolutionVariantsTest, Filter2x2x8x8Input2x2x8x8) {
std::vector<float> input_data(2 * 2 * 8 * 8);
std::iota(input_data.begin(), input_data.end(), 0.0);
Array4D<float> input_array(2, 2, 8, 8, input_data);
- auto input = builder.ConstantR4FromArray4D<float>(input_array);
+ auto input = ConstantR4FromArray4D<float>(&builder, input_array);
std::vector<float> filter_data(2 * 2 * 8 * 8);
std::fill(filter_data.begin(), filter_data.begin() + filter_data.size() / 4,
@@ -553,9 +553,9 @@ XLA_TEST_F(ConvolutionVariantsTest, Filter2x2x8x8Input2x2x8x8) {
std::fill(filter_data.begin() + 3 * filter_data.size() / 4, filter_data.end(),
4.0);
const Array4D<float> filter_array(2, 2, 8, 8, filter_data);
- auto filter = builder.ConstantR4FromArray4D<float>(filter_array);
+ auto filter = ConstantR4FromArray4D<float>(&builder, filter_array);
- builder.Conv(input, filter, {1, 1}, Padding::kValid);
+ Conv(input, filter, {1, 1}, Padding::kValid);
Array4D<float> expected(2, 2, 1, 1, {14240, 30496, 38816, 87840});
ComputeAndCompareR4<float>(&builder, expected, {}, error_spec_);
@@ -567,7 +567,7 @@ XLA_TEST_F(ConvolutionVariantsTest, Filter2x2x8x8Input32x2x8x8) {
std::vector<float> input_data(32 * 2 * 8 * 8);
std::iota(input_data.begin(), input_data.end(), 0.0);
Array4D<float> input_array(32, 2, 8, 8, input_data);
- auto input = builder.ConstantR4FromArray4D<float>(input_array);
+ auto input = ConstantR4FromArray4D<float>(&builder, input_array);
std::vector<float> filter_data(2 * 2 * 8 * 8);
std::fill(filter_data.begin(), filter_data.begin() + filter_data.size() / 4,
@@ -579,9 +579,9 @@ XLA_TEST_F(ConvolutionVariantsTest, Filter2x2x8x8Input32x2x8x8) {
std::fill(filter_data.begin() + 3 * filter_data.size() / 4, filter_data.end(),
4.0);
const Array4D<float> filter_array(2, 2, 8, 8, filter_data);
- auto filter = builder.ConstantR4FromArray4D<float>(filter_array);
+ auto filter = ConstantR4FromArray4D<float>(&builder, filter_array);
- builder.Conv(input, filter, {1, 1}, Padding::kValid);
+ Conv(input, filter, {1, 1}, Padding::kValid);
std::vector<float> expected_data = {
14240, 30496, 38816, 87840, 63392, 145184, 87968,
@@ -613,9 +613,9 @@ XLA_TEST_F(ConvolutionVariantsTest, Filter16x16x1x1Input16x16x1x1) {
}
}
- auto input = builder.ConstantR4FromArray4D<float>(input_array);
- auto filter = builder.ConstantR4FromArray4D<float>(filter_array);
- builder.Conv(input, filter, {1, 1}, Padding::kValid);
+ auto input = ConstantR4FromArray4D<float>(&builder, input_array);
+ auto filter = ConstantR4FromArray4D<float>(&builder, filter_array);
+ Conv(input, filter, {1, 1}, Padding::kValid);
Array4D<float> expected(16, 16, 1, 1);
for (int i0 = 0; i0 < 16; ++i0) {
@@ -635,9 +635,9 @@ XLA_TEST_F(ConvolutionVariantsTest, FlatRhsDilation) {
Array4D<float> input_array(1, 1, 4, 6, input_data);
Array4D<float> filter_array(1, 1, 2, 3, {1, 10, 100, 2, 20, 200});
- auto input = builder.ConstantR4FromArray4D<float>(input_array);
- auto filter = builder.ConstantR4FromArray4D<float>(filter_array);
- builder.ConvGeneralDilated(
+ auto input = ConstantR4FromArray4D<float>(&builder, input_array);
+ auto filter = ConstantR4FromArray4D<float>(&builder, filter_array);
+ ConvGeneralDilated(
/*lhs=*/input, /*rhs=*/filter, /*window_strides=*/{}, /*padding=*/{},
/*lhs_dilation=*/{}, /*rhs_dilation=*/{2, 2},
XlaBuilder::CreateDefaultConvDimensionNumbers());
@@ -654,9 +654,9 @@ XLA_TEST_F(ConvolutionVariantsTest, FlatLhsDilation1D) {
Array4D<float> input_array(1, 1, 1, 5, input_data);
Array4D<float> filter_array(1, 1, 1, 2, {10, 1});
- auto input = builder.ConstantR4FromArray4D<float>(input_array);
- auto filter = builder.ConstantR4FromArray4D<float>(filter_array);
- builder.ConvGeneralDilated(
+ auto input = ConstantR4FromArray4D<float>(&builder, input_array);
+ auto filter = ConstantR4FromArray4D<float>(&builder, filter_array);
+ ConvGeneralDilated(
/*lhs=*/input, /*rhs=*/filter, /*window_strides=*/{}, /*padding=*/{},
/*lhs_dilation=*/{1, 2}, /*rhs_dilation=*/{},
XlaBuilder::CreateDefaultConvDimensionNumbers());
@@ -677,9 +677,9 @@ XLA_TEST_F(ConvolutionVariantsTest, FlatLhsDilation) {
200, 20, 2, //
300, 30, 3, //
400, 40, 4});
- auto input = builder.ConstantR4FromArray4D<float>(input_array);
- auto filter = builder.ConstantR4FromArray4D<float>(filter_array);
- builder.ConvGeneralDilated(
+ auto input = ConstantR4FromArray4D<float>(&builder, input_array);
+ auto filter = ConstantR4FromArray4D<float>(&builder, filter_array);
+ ConvGeneralDilated(
/*lhs=*/input, /*rhs=*/filter, /*window_strides=*/{2, 1},
/*padding=*/{{1, 0}, {0, 0}}, /*lhs_dilation=*/{3, 2},
/*rhs_dilation=*/{}, XlaBuilder::CreateDefaultConvDimensionNumbers());
@@ -699,9 +699,9 @@ XLA_TEST_F(ConvolutionVariantsTest, NegativePaddingOnBothEnds) {
Array4D<float> input_array(1, 1, 1, 5, input_data);
Array4D<float> filter_array(1, 1, 1, 2, {10, 1});
- auto input = builder.ConstantR4FromArray4D<float>(input_array);
- auto filter = builder.ConstantR4FromArray4D<float>(filter_array);
- builder.ConvGeneral(
+ auto input = ConstantR4FromArray4D<float>(&builder, input_array);
+ auto filter = ConstantR4FromArray4D<float>(&builder, filter_array);
+ ConvGeneral(
/*lhs=*/input, /*rhs=*/filter, /*window_strides=*/{},
/*padding=*/{{0, 0}, {-1, -1}},
XlaBuilder::CreateDefaultConvDimensionNumbers());
@@ -718,9 +718,9 @@ XLA_TEST_F(ConvolutionVariantsTest, NegativePaddingLowAndPositivePaddingHigh) {
Array4D<float> input_array(1, 1, 1, 5, input_data);
Array4D<float> filter_array(1, 1, 1, 2, {10, 1});
- auto input = builder.ConstantR4FromArray4D<float>(input_array);
- auto filter = builder.ConstantR4FromArray4D<float>(filter_array);
- builder.ConvGeneral(
+ auto input = ConstantR4FromArray4D<float>(&builder, input_array);
+ auto filter = ConstantR4FromArray4D<float>(&builder, filter_array);
+ ConvGeneral(
/*lhs=*/input, /*rhs=*/filter, /*window_strides=*/{},
/*padding=*/{{0, 0}, {-1, 2}},
XlaBuilder::CreateDefaultConvDimensionNumbers());
@@ -737,9 +737,9 @@ XLA_TEST_F(ConvolutionVariantsTest, PositivePaddingLowAndNegativePaddingHigh) {
Array4D<float> input_array(1, 1, 1, 5, input_data);
Array4D<float> filter_array(1, 1, 1, 2, {10, 1});
- auto input = builder.ConstantR4FromArray4D<float>(input_array);
- auto filter = builder.ConstantR4FromArray4D<float>(filter_array);
- builder.ConvGeneral(
+ auto input = ConstantR4FromArray4D<float>(&builder, input_array);
+ auto filter = ConstantR4FromArray4D<float>(&builder, filter_array);
+ ConvGeneral(
/*lhs=*/input, /*rhs=*/filter, /*window_strides=*/{},
/*padding=*/{{0, 0}, {2, -1}},
XlaBuilder::CreateDefaultConvDimensionNumbers());
@@ -756,9 +756,9 @@ XLA_TEST_F(ConvolutionVariantsTest, PositivePaddingAndDilation) {
Array4D<float> input_array(1, 1, 1, 5, input_data);
Array4D<float> filter_array(1, 1, 1, 2, {10, 1});
- auto input = builder.ConstantR4FromArray4D<float>(input_array);
- auto filter = builder.ConstantR4FromArray4D<float>(filter_array);
- builder.ConvGeneralDilated(
+ auto input = ConstantR4FromArray4D<float>(&builder, input_array);
+ auto filter = ConstantR4FromArray4D<float>(&builder, filter_array);
+ ConvGeneralDilated(
/*lhs=*/input, /*rhs=*/filter, /*window_strides=*/{},
/*padding=*/{{0, 0}, {3, 2}},
/*lhs_dilation=*/{1, 2}, /*rhs_dilation=*/{1, 2},
@@ -781,9 +781,9 @@ XLA_TEST_F(ConvolutionVariantsTest, NegativePaddingAndDilation) {
Array4D<float> input_array(1, 1, 1, 5, input_data);
Array4D<float> filter_array(1, 1, 1, 2, {10, 1});
- auto input = builder.ConstantR4FromArray4D<float>(input_array);
- auto filter = builder.ConstantR4FromArray4D<float>(filter_array);
- builder.ConvGeneralDilated(
+ auto input = ConstantR4FromArray4D<float>(&builder, input_array);
+ auto filter = ConstantR4FromArray4D<float>(&builder, filter_array);
+ ConvGeneralDilated(
/*lhs=*/input, /*rhs=*/filter, /*window_strides=*/{},
/*padding=*/{{0, 0}, {-3, -2}},
/*lhs_dilation=*/{1, 2}, /*rhs_dilation=*/{1, 2},
@@ -821,9 +821,9 @@ XLA_TEST_F(ConvolutionVariantsTest, RandomData_Input1x1x2x3_Filter2x1x1x2) {
Array4D<float> filter_array(oz, iz, ky, kx, kernel_data);
XlaBuilder builder(TestName());
- auto input = builder.ConstantR4FromArray4D<float>(input_array);
- auto filter = builder.ConstantR4FromArray4D<float>(filter_array);
- builder.Conv(input, filter, {1, 1}, Padding::kValid);
+ auto input = ConstantR4FromArray4D<float>(&builder, input_array);
+ auto filter = ConstantR4FromArray4D<float>(&builder, filter_array);
+ Conv(input, filter, {1, 1}, Padding::kValid);
std::unique_ptr<Array4D<float>> expected = ReferenceUtil::ConvArray4D(
input_array, filter_array, {1, 1}, Padding::kValid);
@@ -854,9 +854,9 @@ XLA_TEST_F(ConvolutionVariantsTest, RandomData_Input1x16x1x1_Filter1x16x1x1) {
Array4D<float> filter_array(oz, iz, ky, kx, kernel_data);
XlaBuilder builder(TestName());
- auto input = builder.ConstantR4FromArray4D<float>(input_array);
- auto filter = builder.ConstantR4FromArray4D<float>(filter_array);
- builder.Conv(input, filter, {1, 1}, Padding::kValid);
+ auto input = ConstantR4FromArray4D<float>(&builder, input_array);
+ auto filter = ConstantR4FromArray4D<float>(&builder, filter_array);
+ Conv(input, filter, {1, 1}, Padding::kValid);
std::unique_ptr<Array4D<float>> expected = ReferenceUtil::ConvArray4D(
input_array, filter_array, {1, 1}, Padding::kValid);
@@ -887,9 +887,9 @@ XLA_TEST_F(ConvolutionVariantsTest, RandomData_Input16x16x1x1_Filter1x16x1x1) {
Array4D<float> filter_array(oz, iz, ky, kx, kernel_data);
XlaBuilder builder(TestName());
- auto input = builder.ConstantR4FromArray4D<float>(input_array);
- auto filter = builder.ConstantR4FromArray4D<float>(filter_array);
- builder.Conv(input, filter, {1, 1}, Padding::kValid);
+ auto input = ConstantR4FromArray4D<float>(&builder, input_array);
+ auto filter = ConstantR4FromArray4D<float>(&builder, filter_array);
+ Conv(input, filter, {1, 1}, Padding::kValid);
std::unique_ptr<Array4D<float>> expected = ReferenceUtil::ConvArray4D(
input_array, filter_array, {1, 1}, Padding::kValid);
@@ -920,9 +920,9 @@ XLA_TEST_F(ConvolutionVariantsTest, RandomData_Input16x16x1x1_Filter16x16x1x1) {
Array4D<float> filter_array(oz, iz, ky, kx, kernel_data);
XlaBuilder builder(TestName());
- auto input = builder.ConstantR4FromArray4D<float>(input_array);
- auto filter = builder.ConstantR4FromArray4D<float>(filter_array);
- builder.Conv(input, filter, {1, 1}, Padding::kValid);
+ auto input = ConstantR4FromArray4D<float>(&builder, input_array);
+ auto filter = ConstantR4FromArray4D<float>(&builder, filter_array);
+ Conv(input, filter, {1, 1}, Padding::kValid);
std::unique_ptr<Array4D<float>> expected = ReferenceUtil::ConvArray4D(
input_array, filter_array, {1, 1}, Padding::kValid);
@@ -954,9 +954,9 @@ XLA_TEST_F(ConvolutionVariantsTest,
Array4D<float> filter_array(oz, iz, ky, kx, kernel_data);
XlaBuilder builder(TestName());
- auto input = builder.ConstantR4FromArray4D<float>(input_array);
- auto filter = builder.ConstantR4FromArray4D<float>(filter_array);
- builder.Conv(input, filter, {1, 1}, Padding::kValid);
+ auto input = ConstantR4FromArray4D<float>(&builder, input_array);
+ auto filter = ConstantR4FromArray4D<float>(&builder, filter_array);
+ Conv(input, filter, {1, 1}, Padding::kValid);
std::unique_ptr<Array4D<float>> expected = ReferenceUtil::ConvArray4D(
input_array, filter_array, {1, 1}, Padding::kValid);
@@ -970,12 +970,12 @@ XLA_TEST_F(ConvolutionVariantsTest, Filter1x2x1x1Input1x2x3x1GeneralPadding) {
std::vector<float> input_data(1 * 2 * 3 * 1);
std::iota(input_data.begin(), input_data.end(), 1.0);
Array4D<float> input_array(1, 2, 3, 1, input_data);
- auto input = builder.ConstantR4FromArray4D<float>(input_array);
+ auto input = ConstantR4FromArray4D<float>(&builder, input_array);
std::vector<float> filter_data(1 * 2 * 1 * 1);
std::iota(filter_data.begin(), filter_data.end(), 1.0);
Array4D<float> filter_array(1, 2, 1, 1, filter_data);
- auto filter = builder.ConstantR4FromArray4D<float>(filter_array);
+ auto filter = ConstantR4FromArray4D<float>(&builder, filter_array);
ConvolutionDimensionNumbers dnums;
// NHWC input format.
@@ -995,7 +995,7 @@ XLA_TEST_F(ConvolutionVariantsTest, Filter1x2x1x1Input1x2x3x1GeneralPadding) {
dnums.set_kernel_output_feature_dimension(3);
// Tests padding sizes that don't correspond either to SAME or VALID padding.
- builder.ConvGeneral(input, filter, {1, 1}, {{2, 1}, {2, 3}}, dnums);
+ ConvGeneral(input, filter, {1, 1}, {{2, 1}, {2, 3}}, dnums);
std::vector<float> expected_data = {
0, 0, 0, 0, 0, 0, 0, //
@@ -1014,12 +1014,12 @@ XLA_TEST_F(ConvolutionVariantsTest, Filter1x1x1x1Input1x2x3x1GeneralPadding) {
std::vector<float> input_data(1 * 2 * 3 * 1);
std::iota(input_data.begin(), input_data.end(), 1.0);
Array4D<float> input_array(1, 2, 3, 1, input_data);
- auto input = builder.ConstantR4FromArray4D<float>(input_array);
+ auto input = ConstantR4FromArray4D<float>(&builder, input_array);
std::vector<float> filter_data(1 * 1 * 1 * 1);
std::iota(filter_data.begin(), filter_data.end(), 2.0);
Array4D<float> filter_array(1, 1, 1, 1, filter_data);
- auto filter = builder.ConstantR4FromArray4D<float>(filter_array);
+ auto filter = ConstantR4FromArray4D<float>(&builder, filter_array);
ConvolutionDimensionNumbers dnums;
// NHWC input format.
@@ -1039,7 +1039,7 @@ XLA_TEST_F(ConvolutionVariantsTest, Filter1x1x1x1Input1x2x3x1GeneralPadding) {
dnums.set_kernel_output_feature_dimension(3);
// Tests padding sizes that don't correspond either to SAME or VALID padding.
- builder.ConvGeneral(input, filter, {1, 1}, {{2, 1}, {2, 3}}, dnums);
+ ConvGeneral(input, filter, {1, 1}, {{2, 1}, {2, 3}}, dnums);
std::vector<float> expected_data = {
0, 0, 0, 0, 0, 0, 0, 0, //
@@ -1058,12 +1058,12 @@ XLA_TEST_F(ConvolutionVariantsTest, Filter1x1x1x1Input1x2x3x1NoPadding) {
std::vector<float> input_data(1 * 2 * 3 * 1);
std::iota(input_data.begin(), input_data.end(), 1.0);
Array4D<float> input_array(1, 2, 3, 1, input_data);
- auto input = builder.ConstantR4FromArray4D<float>(input_array);
+ auto input = ConstantR4FromArray4D<float>(&builder, input_array);
std::vector<float> filter_data(1 * 1 * 1 * 1);
std::iota(filter_data.begin(), filter_data.end(), 2.0);
Array4D<float> filter_array(1, 1, 1, 1, filter_data);
- auto filter = builder.ConstantR4FromArray4D<float>(filter_array);
+ auto filter = ConstantR4FromArray4D<float>(&builder, filter_array);
ConvolutionDimensionNumbers dnums;
// NHWC input format.
@@ -1083,7 +1083,7 @@ XLA_TEST_F(ConvolutionVariantsTest, Filter1x1x1x1Input1x2x3x1NoPadding) {
dnums.set_kernel_output_feature_dimension(3);
// Tests zero padding sizes. This can use matmul for computation.
- builder.ConvGeneral(input, filter, {1, 1}, {{0, 0}, {0, 0}}, dnums);
+ ConvGeneral(input, filter, {1, 1}, {{0, 0}, {0, 0}}, dnums);
std::vector<float> expected_data = {
2, 4, 6, //
@@ -1099,12 +1099,12 @@ XLA_TEST_F(ConvolutionVariantsTest, Filter1x1x2x3Input1x2x3x2NoPadding) {
std::vector<float> input_data(1 * 2 * 3 * 2);
std::iota(input_data.begin(), input_data.end(), 1.0);
Array4D<float> input_array(1, 2, 3, 2, input_data);
- auto input = builder.ConstantR4FromArray4D<float>(input_array);
+ auto input = ConstantR4FromArray4D<float>(&builder, input_array);
std::vector<float> filter_data(1 * 1 * 2 * 3);
std::iota(filter_data.begin(), filter_data.end(), 2.0);
Array4D<float> filter_array(1, 1, 2, 3, filter_data);
- auto filter = builder.ConstantR4FromArray4D<float>(filter_array);
+ auto filter = ConstantR4FromArray4D<float>(&builder, filter_array);
ConvolutionDimensionNumbers dnums;
// NHWC input format.
@@ -1124,7 +1124,7 @@ XLA_TEST_F(ConvolutionVariantsTest, Filter1x1x2x3Input1x2x3x2NoPadding) {
dnums.set_kernel_output_feature_dimension(3);
// Tests zero padding sizes. This can use matmul for computation.
- builder.ConvGeneral(input, filter, {1, 1}, {{0, 0}, {0, 0}}, dnums);
+ ConvGeneral(input, filter, {1, 1}, {{0, 0}, {0, 0}}, dnums);
std::vector<float> expected_data = {
12, 15, 18, //
@@ -1148,14 +1148,14 @@ XLA_TEST_F(ConvolutionVariantsTest,
BackwardInputLowPaddingLessThanHighPadding) {
XlaBuilder builder(TestName());
- auto gradients = builder.ConstantR4FromArray4D<float>(
- Array4D<float>(1, 1, 1, 3, /*values=*/{1, 2, 3}));
- auto weights = builder.ConstantR4FromArray4D<float>(
- Array4D<float>(1, 1, 1, 2, /*values=*/{5, 6}));
- auto mirrored_weights = builder.Rev(weights, {2, 3});
- builder.ConvWithGeneralPadding(gradients, mirrored_weights,
- /*window_strides=*/{1, 1},
- /*padding=*/{{0, 0}, {1, 0}});
+ auto gradients = ConstantR4FromArray4D<float>(
+ &builder, Array4D<float>(1, 1, 1, 3, /*values=*/{1, 2, 3}));
+ auto weights = ConstantR4FromArray4D<float>(
+ &builder, Array4D<float>(1, 1, 1, 2, /*values=*/{5, 6}));
+ auto mirrored_weights = Rev(weights, {2, 3});
+ ConvWithGeneralPadding(gradients, mirrored_weights,
+ /*window_strides=*/{1, 1},
+ /*padding=*/{{0, 0}, {1, 0}});
ComputeAndCompareR4<float>(&builder, {{{{5, 16, 27}}}}, {}, error_spec_);
}
@@ -1167,16 +1167,16 @@ XLA_TEST_F(ConvolutionVariantsTest,
BackwardInputLowPaddingGreaterThanHighPadding) {
XlaBuilder builder(TestName());
- auto gradients = builder.ConstantR4FromArray4D<float>(
- Array4D<float>(1, 1, 1, 1, /*values=*/{1}));
- auto weights = builder.ConstantR4FromArray4D<float>(
- Array4D<float>(1, 1, 1, 3, /*values=*/{1, 10, 100}));
- auto mirrored_weights = builder.Rev(weights, {2, 3});
- builder.ConvGeneralDilated(gradients, mirrored_weights,
- /*window_strides=*/{1, 1},
- /*padding=*/{{0, 0}, {0, 3}},
- /*lhs_dilation=*/{1, 3}, /*rhs_dilation=*/{},
- XlaBuilder::CreateDefaultConvDimensionNumbers());
+ auto gradients = ConstantR4FromArray4D<float>(
+ &builder, Array4D<float>(1, 1, 1, 1, /*values=*/{1}));
+ auto weights = ConstantR4FromArray4D<float>(
+ &builder, Array4D<float>(1, 1, 1, 3, /*values=*/{1, 10, 100}));
+ auto mirrored_weights = Rev(weights, {2, 3});
+ ConvGeneralDilated(gradients, mirrored_weights,
+ /*window_strides=*/{1, 1},
+ /*padding=*/{{0, 0}, {0, 3}},
+ /*lhs_dilation=*/{1, 3}, /*rhs_dilation=*/{},
+ XlaBuilder::CreateDefaultConvDimensionNumbers());
ComputeAndCompareR4<float>(&builder, {{{{100, 0}}}}, {}, error_spec_);
}
@@ -1187,14 +1187,14 @@ XLA_TEST_F(ConvolutionVariantsTest,
XLA_TEST_F(ConvolutionVariantsTest, BackwardInputEvenPadding) {
XlaBuilder builder(TestName());
- auto gradients = builder.ConstantR4FromArray4D<float>(
- Array4D<float>(1, 1, 1, 1, /*values=*/{1}));
- auto weights = builder.ConstantR4FromArray4D<float>(
- Array4D<float>(1, 1, 1, 3, /*values=*/{1, 10, 100}));
- auto mirrored_weights = builder.Rev(weights, {2, 3});
- builder.ConvWithGeneralPadding(gradients, mirrored_weights,
- /*window_strides=*/{1, 1},
- /*padding=*/{{0, 0}, {1, 1}});
+ auto gradients = ConstantR4FromArray4D<float>(
+ &builder, Array4D<float>(1, 1, 1, 1, /*values=*/{1}));
+ auto weights = ConstantR4FromArray4D<float>(
+ &builder, Array4D<float>(1, 1, 1, 3, /*values=*/{1, 10, 100}));
+ auto mirrored_weights = Rev(weights, {2, 3});
+ ConvWithGeneralPadding(gradients, mirrored_weights,
+ /*window_strides=*/{1, 1},
+ /*padding=*/{{0, 0}, {1, 1}});
ComputeAndCompareR4<float>(&builder, {{{{10}}}}, {}, error_spec_);
}
@@ -1208,14 +1208,14 @@ XLA_TEST_F(ConvolutionVariantsTest, BackwardInputEvenPadding) {
XLA_TEST_F(ConvolutionVariantsTest, BackwardInputWithNegativePaddingHigh) {
XlaBuilder builder(TestName());
- auto gradients = builder.ConstantR4FromArray4D<float>(
- Array4D<float>(1, 1, 1, 3, /*values=*/{1, 2, 3}));
- auto weights = builder.ConstantR4FromArray4D<float>(
- Array4D<float>(1, 1, 1, 2, /*values=*/{1, 10}));
- auto mirrored_weights = builder.Rev(weights, {2, 3});
- builder.ConvWithGeneralPadding(gradients, mirrored_weights,
- /*window_strides=*/{1, 1},
- /*padding=*/{{0, 0}, {0, 2}});
+ auto gradients = ConstantR4FromArray4D<float>(
+ &builder, Array4D<float>(1, 1, 1, 3, /*values=*/{1, 2, 3}));
+ auto weights = ConstantR4FromArray4D<float>(
+ &builder, Array4D<float>(1, 1, 1, 2, /*values=*/{1, 10}));
+ auto mirrored_weights = Rev(weights, {2, 3});
+ ConvWithGeneralPadding(gradients, mirrored_weights,
+ /*window_strides=*/{1, 1},
+ /*padding=*/{{0, 0}, {0, 2}});
ComputeAndCompareR4<float>(&builder, {{{{12, 23, 30, 0}}}}, {}, error_spec_);
}
@@ -1229,17 +1229,17 @@ XLA_TEST_F(ConvolutionVariantsTest,
// weight gradients: 24,130,240
//
// This pattern will be fused to backward convolution with padding=(1,2).
- auto activations = builder.ConstantR4FromArray4D<float>(
- Array4D<float>(1, 1, 1, 4, /*values=*/{1, 2, 3, 4}));
- auto gradients = builder.ConstantR4FromArray4D<float>(
- Array4D<float>(1, 1, 1, 3, /*values=*/{100, 10, 1}));
- auto forward_conv = builder.ConvGeneralDilated(
- activations, gradients,
- /*window_strides=*/{1, 1},
- /*padding=*/{{0, 0}, {1, 2}},
- /*lhs_dilation=*/{}, /*rhs_dilation=*/{1, 2},
- XlaBuilder::CreateDefaultConvDimensionNumbers());
- builder.Transpose(forward_conv, {0, 1, 2, 3});
+ auto activations = ConstantR4FromArray4D<float>(
+ &builder, Array4D<float>(1, 1, 1, 4, /*values=*/{1, 2, 3, 4}));
+ auto gradients = ConstantR4FromArray4D<float>(
+ &builder, Array4D<float>(1, 1, 1, 3, /*values=*/{100, 10, 1}));
+ auto forward_conv =
+ ConvGeneralDilated(activations, gradients,
+ /*window_strides=*/{1, 1},
+ /*padding=*/{{0, 0}, {1, 2}},
+ /*lhs_dilation=*/{}, /*rhs_dilation=*/{1, 2},
+ XlaBuilder::CreateDefaultConvDimensionNumbers());
+ Transpose(forward_conv, {0, 1, 2, 3});
ComputeAndCompareR4<float>(&builder, {{{{24, 130, 240}}}}, {}, error_spec_);
}
@@ -1255,17 +1255,17 @@ XLA_TEST_F(ConvolutionVariantsTest,
// This pattern will be fused to backward convolution with padding=(2,1).
// Note: both (2,1) and (2,0) are valid padding for the backward convolution
// because the stride is 2.
- auto activations = builder.ConstantR4FromArray4D<float>(
- Array4D<float>(1, 1, 1, 4, /*values=*/{1, 2, 3, 4}));
- auto gradients = builder.ConstantR4FromArray4D<float>(
- Array4D<float>(1, 1, 1, 3, /*values=*/{100, 10, 1}));
- auto forward_conv = builder.ConvGeneralDilated(
- activations, gradients,
- /*window_strides=*/{1, 1},
- /*padding=*/{{0, 0}, {2, 0}},
- /*lhs_dilation=*/{}, /*rhs_dilation=*/{1, 2},
- XlaBuilder::CreateDefaultConvDimensionNumbers());
- builder.Transpose(forward_conv, {0, 1, 2, 3});
+ auto activations = ConstantR4FromArray4D<float>(
+ &builder, Array4D<float>(1, 1, 1, 4, /*values=*/{1, 2, 3, 4}));
+ auto gradients = ConstantR4FromArray4D<float>(
+ &builder, Array4D<float>(1, 1, 1, 3, /*values=*/{100, 10, 1}));
+ auto forward_conv =
+ ConvGeneralDilated(activations, gradients,
+ /*window_strides=*/{1, 1},
+ /*padding=*/{{0, 0}, {2, 0}},
+ /*lhs_dilation=*/{}, /*rhs_dilation=*/{1, 2},
+ XlaBuilder::CreateDefaultConvDimensionNumbers());
+ Transpose(forward_conv, {0, 1, 2, 3});
ComputeAndCompareR4<float>(&builder, {{{{13, 24}}}}, {}, error_spec_);
}
@@ -1282,17 +1282,17 @@ XLA_TEST_F(ConvolutionVariantsTest, BackwardFilterEvenPadding) {
// because the stride is 2. ConvolutionFolding prefers (2,2) because cuDNN
// supports even padding only -- using (2,1) would need extra effort of
// canonicalization.
- auto activations = builder.ConstantR4FromArray4D<float>(
- Array4D<float>(1, 1, 1, 4, /*values=*/{1, 2, 3, 4}));
- auto gradients = builder.ConstantR4FromArray4D<float>(
- Array4D<float>(1, 1, 1, 3, /*values=*/{100, 10, 1}));
- auto forward_conv = builder.ConvGeneralDilated(
- activations, gradients,
- /*window_strides=*/{1, 1},
- /*padding=*/{{0, 0}, {2, 1}},
- /*lhs_dilation=*/{}, /*rhs_dilation=*/{1, 2},
- XlaBuilder::CreateDefaultConvDimensionNumbers());
- builder.Transpose(forward_conv, {0, 1, 2, 3});
+ auto activations = ConstantR4FromArray4D<float>(
+ &builder, Array4D<float>(1, 1, 1, 4, /*values=*/{1, 2, 3, 4}));
+ auto gradients = ConstantR4FromArray4D<float>(
+ &builder, Array4D<float>(1, 1, 1, 3, /*values=*/{100, 10, 1}));
+ auto forward_conv =
+ ConvGeneralDilated(activations, gradients,
+ /*window_strides=*/{1, 1},
+ /*padding=*/{{0, 0}, {2, 1}},
+ /*lhs_dilation=*/{}, /*rhs_dilation=*/{1, 2},
+ XlaBuilder::CreateDefaultConvDimensionNumbers());
+ Transpose(forward_conv, {0, 1, 2, 3});
ComputeAndCompareR4<float>(&builder, {{{{13, 24, 130}}}}, {}, error_spec_);
}
@@ -1300,14 +1300,14 @@ XLA_TEST_F(ConvolutionVariantsTest, BackwardFilterEvenPadding) {
XLA_TEST_F(ConvolutionVariantsTest, BackwardInputEvenPadding1D) {
XlaBuilder builder(TestName());
- auto gradients = builder.ConstantR3FromArray3D<float>(
- Array3D<float>(1, 1, 1, /*value=*/1));
+ auto gradients = ConstantR3FromArray3D<float>(
+ &builder, Array3D<float>(1, 1, 1, /*value=*/1));
auto weights =
- builder.ConstantR3FromArray3D<float>(Array3D<float>({{{1, 10, 100}}}));
- auto mirrored_weights = builder.Rev(weights, {2});
- builder.ConvWithGeneralPadding(gradients, mirrored_weights,
- /*window_strides=*/{1},
- /*padding=*/{{1, 1}});
+ ConstantR3FromArray3D<float>(&builder, Array3D<float>({{{1, 10, 100}}}));
+ auto mirrored_weights = Rev(weights, {2});
+ ConvWithGeneralPadding(gradients, mirrored_weights,
+ /*window_strides=*/{1},
+ /*padding=*/{{1, 1}});
ComputeAndCompareR3<float>(&builder, {{{10}}}, {}, error_spec_);
}
@@ -1315,17 +1315,17 @@ XLA_TEST_F(ConvolutionVariantsTest, BackwardFilterEvenPadding1D) {
XlaBuilder builder(TestName());
auto activations =
- builder.ConstantR3FromArray3D<float>(Array3D<float>({{{1, 2, 3, 4}}}));
+ ConstantR3FromArray3D<float>(&builder, Array3D<float>({{{1, 2, 3, 4}}}));
auto gradients =
- builder.ConstantR3FromArray3D<float>(Array3D<float>({{{100, 10, 1}}}));
+ ConstantR3FromArray3D<float>(&builder, Array3D<float>({{{100, 10, 1}}}));
auto forward_conv =
- builder.ConvGeneralDilated(activations, gradients,
- /*window_strides=*/{1},
- /*padding=*/{{2, 1}},
- /*lhs_dilation=*/{}, /*rhs_dilation=*/{2},
- XlaBuilder::CreateDefaultConvDimensionNumbers(
- /*num_spatial_dims=*/1));
- builder.Transpose(forward_conv, {0, 1, 2});
+ ConvGeneralDilated(activations, gradients,
+ /*window_strides=*/{1},
+ /*padding=*/{{2, 1}},
+ /*lhs_dilation=*/{}, /*rhs_dilation=*/{2},
+ XlaBuilder::CreateDefaultConvDimensionNumbers(
+ /*num_spatial_dims=*/1));
+ Transpose(forward_conv, {0, 1, 2});
ComputeAndCompareR3<float>(&builder, {{{13, 24, 130}}}, {}, error_spec_);
}
@@ -1333,52 +1333,52 @@ XLA_TEST_F(ConvolutionVariantsTest, BackwardFilterEvenPadding1D) {
XLA_TEST_F(ConvolutionVariantsTest, BackwardInputEvenPadding3D) {
XlaBuilder builder(TestName());
- auto gradients_flat = Literal::CreateR1<float>({1});
+ auto gradients_flat = LiteralUtil::CreateR1<float>({1});
auto gradients_literal =
gradients_flat->Reshape({1, 1, 1, 1, 1}).ConsumeValueOrDie();
- auto gradients = builder.ConstantLiteral(*gradients_literal);
+ auto gradients = ConstantLiteral(&builder, *gradients_literal);
- auto weights_flat = Literal::CreateR1<float>({1, 10, 100});
+ auto weights_flat = LiteralUtil::CreateR1<float>({1, 10, 100});
auto weights_literal =
weights_flat->Reshape({1, 1, 1, 1, 3}).ConsumeValueOrDie();
- auto weights = builder.ConstantLiteral(*weights_literal);
+ auto weights = ConstantLiteral(&builder, *weights_literal);
- auto expected_flat = Literal::CreateR1<float>({10});
+ auto expected_flat = LiteralUtil::CreateR1<float>({10});
auto expected_literal =
expected_flat->Reshape({1, 1, 1, 1, 1}).ConsumeValueOrDie();
- auto mirrored_weights = builder.Rev(weights, {2, 3, 4});
- builder.ConvWithGeneralPadding(gradients, mirrored_weights,
- /*window_strides=*/{1, 1, 1},
- /*padding=*/{{0, 0}, {0, 0}, {1, 1}});
+ auto mirrored_weights = Rev(weights, {2, 3, 4});
+ ConvWithGeneralPadding(gradients, mirrored_weights,
+ /*window_strides=*/{1, 1, 1},
+ /*padding=*/{{0, 0}, {0, 0}, {1, 1}});
ComputeAndCompareLiteral(&builder, *expected_literal, {}, error_spec_);
}
XLA_TEST_F(ConvolutionVariantsTest, BackwardFilterEvenPadding3D) {
XlaBuilder builder(TestName());
- auto activations_flat = Literal::CreateR1<float>({1, 2, 3, 4});
+ auto activations_flat = LiteralUtil::CreateR1<float>({1, 2, 3, 4});
auto activations_literal =
activations_flat->Reshape({1, 1, 1, 1, 4}).ConsumeValueOrDie();
- auto activations = builder.ConstantLiteral(*activations_literal);
+ auto activations = ConstantLiteral(&builder, *activations_literal);
- auto gradients_flat = Literal::CreateR1<float>({100, 10, 1});
+ auto gradients_flat = LiteralUtil::CreateR1<float>({100, 10, 1});
auto gradients_literal =
gradients_flat->Reshape({1, 1, 1, 1, 3}).ConsumeValueOrDie();
- auto gradients = builder.ConstantLiteral(*gradients_literal);
+ auto gradients = ConstantLiteral(&builder, *gradients_literal);
- auto expected_flat = Literal::CreateR1<float>({13, 24, 130});
+ auto expected_flat = LiteralUtil::CreateR1<float>({13, 24, 130});
auto expected_literal =
expected_flat->Reshape({1, 1, 1, 1, 3}).ConsumeValueOrDie();
- auto forward_conv = builder.ConvGeneralDilated(
- activations, gradients,
- /*window_strides=*/{1, 1, 1},
- /*padding=*/{{0, 0}, {0, 0}, {2, 1}},
- /*lhs_dilation=*/{}, /*rhs_dilation=*/{1, 1, 2},
- XlaBuilder::CreateDefaultConvDimensionNumbers(
- /*num_spatial_dims=*/3));
- builder.Transpose(forward_conv, {0, 1, 2, 3, 4});
+ auto forward_conv =
+ ConvGeneralDilated(activations, gradients,
+ /*window_strides=*/{1, 1, 1},
+ /*padding=*/{{0, 0}, {0, 0}, {2, 1}},
+ /*lhs_dilation=*/{}, /*rhs_dilation=*/{1, 1, 2},
+ XlaBuilder::CreateDefaultConvDimensionNumbers(
+ /*num_spatial_dims=*/3));
+ Transpose(forward_conv, {0, 1, 2, 3, 4});
ComputeAndCompareLiteral(&builder, *expected_literal, {}, error_spec_);
}
diff --git a/tensorflow/compiler/xla/tests/copy_test.cc b/tensorflow/compiler/xla/tests/copy_test.cc
index 2b3390ca98..1dc6ff0f4f 100644
--- a/tensorflow/compiler/xla/tests/copy_test.cc
+++ b/tensorflow/compiler/xla/tests/copy_test.cc
@@ -18,7 +18,7 @@ limitations under the License.
#include "tensorflow/compiler/xla/array2d.h"
#include "tensorflow/compiler/xla/client/xla_client/xla_builder.h"
-#include "tensorflow/compiler/xla/literal_util.h"
+#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/compiler/xla/ptr_util.h"
#include "tensorflow/compiler/xla/service/hlo_computation.h"
#include "tensorflow/compiler/xla/service/hlo_instruction.h"
@@ -58,37 +58,38 @@ class CopyOpTest : public HloTestBase {
};
XLA_TEST_F(CopyOpTest, CopyR0Bool) {
- TestCopyOp(*Literal::CreateR0<bool>(true));
+ TestCopyOp(*LiteralUtil::CreateR0<bool>(true));
}
XLA_TEST_F(CopyOpTest, CopyR1S0U32) {
- TestCopyOp(*Literal::CreateR1<uint32>({}));
+ TestCopyOp(*LiteralUtil::CreateR1<uint32>({}));
}
XLA_TEST_F(CopyOpTest, CopyR1S3U32) {
- TestCopyOp(*Literal::CreateR1<uint32>({1, 2, 3}));
+ TestCopyOp(*LiteralUtil::CreateR1<uint32>({1, 2, 3}));
}
XLA_TEST_F(CopyOpTest, CopyR3F32_2x2x3) {
- TestCopyOp(*Literal::CreateR3({{{1.0f, 2.0f, 3.0f}, {4.0f, 5.0f, 6.0f}},
- {{1.1f, 2.1f, 3.1f}, {6.1f, 3.5f, 2.8f}}}));
+ TestCopyOp(
+ *LiteralUtil::CreateR3({{{1.0f, 2.0f, 3.0f}, {4.0f, 5.0f, 6.0f}},
+ {{1.1f, 2.1f, 3.1f}, {6.1f, 3.5f, 2.8f}}}));
}
XLA_TEST_F(CopyOpTest, CopyR4S32_2x2x3x2) {
- TestCopyOp(*Literal::CreateR4(
+ TestCopyOp(*LiteralUtil::CreateR4(
{{{{1, -2}, {-4, 5}, {6, 7}}, {{8, 9}, {10, 11}, {12, 13}}},
{{{10, 3}, {7, -2}, {3, 6}}, {{2, 5}, {-11, 5}, {-2, -5}}}}));
}
XLA_TEST_F(CopyOpTest, CopyR4S32_0x2x3x2) {
- TestCopyOp(*Literal::CreateR4FromArray4D(Array4D<int32>(0, 2, 3, 2)));
+ TestCopyOp(*LiteralUtil::CreateR4FromArray4D(Array4D<int32>(0, 2, 3, 2)));
}
XLA_TEST_F(CopyOpTest, CopyParameterScalar) {
auto builder = HloComputation::Builder(TestName());
// Copy literal to device to use as parameter.
- auto literal = Literal::CreateR0<float>(42.0);
+ auto literal = LiteralUtil::CreateR0<float>(42.0);
Shape shape = literal->shape();
auto param0 = builder.AddInstruction(
@@ -109,7 +110,7 @@ XLA_TEST_F(CopyOpTest, CopyParameterScalar) {
XLA_TEST_F(CopyOpTest, CopyConstantR2Twice) {
auto builder = HloComputation::Builder(TestName());
- auto literal = Literal::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}});
+ auto literal = LiteralUtil::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}});
auto constant = builder.AddInstruction(
HloInstruction::CreateConstant(std::move(literal)));
@@ -131,7 +132,7 @@ XLA_TEST_F(CopyOpTest, CopyConstantR2DifferentLayouts) {
HloComputation::Builder builder(TestName());
std::unique_ptr<Literal> literal =
- Literal::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}});
+ LiteralUtil::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}});
// Reverse the minor-to-major order of the literal.
Layout* literal_layout =
literal->mutable_shape_do_not_use()->mutable_layout();
@@ -168,7 +169,7 @@ void CopyOpTest::TestCopyConstantLayout021(size_t n1, size_t n2, size_t n3) {
HloComputation::Builder builder(TestName());
- std::unique_ptr<Literal> literal = Literal::CreateR3FromArray3D(a);
+ std::unique_ptr<Literal> literal = LiteralUtil::CreateR3FromArray3D(a);
HloInstruction* constant = builder.AddInstruction(
HloInstruction::CreateConstant(std::move(literal)));
@@ -202,7 +203,7 @@ void CopyOpTest::TestCopyConstantLayoutR4(
HloComputation::Builder builder(TestName());
- std::unique_ptr<Literal> literal = Literal::CreateR4FromArray4D(a);
+ std::unique_ptr<Literal> literal = LiteralUtil::CreateR4FromArray4D(a);
HloInstruction* constant = builder.AddInstruction(
HloInstruction::CreateConstant(std::move(literal)));
@@ -248,7 +249,7 @@ XLA_TEST_F(CopyOpClientTest, Copy0x0) {
auto empty = Literal::CreateFromShape(in_shape);
XlaBuilder builder(TestName());
- auto param0 = builder.Parameter(0, in_shape, "input");
+ Parameter(&builder, 0, in_shape, "input");
auto input_data = client_->TransferToServer(*empty).ConsumeValueOrDie();
auto actual = ExecuteAndTransfer(&builder, {input_data.get()}, &out_shape)
diff --git a/tensorflow/compiler/xla/tests/cross_replica_sum_test.cc b/tensorflow/compiler/xla/tests/cross_replica_sum_test.cc
index b151187c4b..d12a4e7fcd 100644
--- a/tensorflow/compiler/xla/tests/cross_replica_sum_test.cc
+++ b/tensorflow/compiler/xla/tests/cross_replica_sum_test.cc
@@ -13,7 +13,7 @@ See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
-#include "tensorflow/compiler/xla/literal_util.h"
+#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/compiler/xla/service/hlo_parser.h"
#include "tensorflow/compiler/xla/shape_util.h"
#include "tensorflow/compiler/xla/test.h"
@@ -45,7 +45,7 @@ XLA_TEST_F(TrivialCrossReplicaSumTest, OneOperand) {
})";
auto module =
ParseHloString(module_str, GetModuleConfigForTest()).ValueOrDie();
- auto literal = Literal::CreateR1<float>({1, 2, 3});
+ auto literal = LiteralUtil::CreateR1<float>({1, 2, 3});
EXPECT_EQ(*literal, *ExecuteAndTransfer(std::move(module), {literal.get()}));
}
@@ -66,10 +66,10 @@ XLA_TEST_F(TrivialCrossReplicaSumTest, MultipleOperands) {
})";
auto module =
ParseHloString(module_str, GetModuleConfigForTest()).ValueOrDie();
- auto literal0 = Literal::CreateR1<float>({1, 2, 3});
- auto literal1 = Literal::CreateR1<float>({10, 20});
+ auto literal0 = LiteralUtil::CreateR1<float>({1, 2, 3});
+ auto literal1 = LiteralUtil::CreateR1<float>({10, 20});
EXPECT_EQ(
- *Literal::MakeTuple({literal0.get(), literal1.get()}),
+ *LiteralUtil::MakeTuple({literal0.get(), literal1.get()}),
*ExecuteAndTransfer(std::move(module), {literal0.get(), literal1.get()}));
}
@@ -93,9 +93,9 @@ XLA_TEST_F(TrivialCrossReplicaSumTest, ConstantOperand) {
})";
auto module =
ParseHloString(module_str, GetModuleConfigForTest()).ValueOrDie();
- auto literal0 = Literal::CreateR1<float>({1, 2, 3});
- auto literal1 = Literal::CreateR1<float>({10, 20});
- EXPECT_EQ(*Literal::MakeTuple({literal0.get(), literal1.get()}),
+ auto literal0 = LiteralUtil::CreateR1<float>({1, 2, 3});
+ auto literal1 = LiteralUtil::CreateR1<float>({10, 20});
+ EXPECT_EQ(*LiteralUtil::MakeTuple({literal0.get(), literal1.get()}),
*ExecuteAndTransfer(std::move(module), {literal0.get()}));
}
diff --git a/tensorflow/compiler/xla/tests/custom_call_test.cc b/tensorflow/compiler/xla/tests/custom_call_test.cc
index b43d5c9ff5..90f3d1b874 100644
--- a/tensorflow/compiler/xla/tests/custom_call_test.cc
+++ b/tensorflow/compiler/xla/tests/custom_call_test.cc
@@ -16,6 +16,7 @@ limitations under the License.
#include <memory>
#include <utility>
+#include "tensorflow/compiler/xla/client/xla_client/xla_builder.h"
#include "tensorflow/compiler/xla/literal_util.h"
#include "tensorflow/compiler/xla/ptr_util.h"
#include "tensorflow/compiler/xla/service/cpu/custom_call_target_registry.h"
@@ -73,7 +74,7 @@ XLA_TEST_F(CustomCallTest, DISABLED_ON_GPU(CustomCallR0F32Add2)) {
auto builder = HloComputation::Builder(TestName());
auto constant = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(42.0f)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0f)));
builder.AddInstruction(
HloInstruction::CreateCustomCall(r0f32_, {constant}, "R0F32Add2"));
@@ -94,7 +95,7 @@ XLA_TEST_F(CustomCallTest, DISABLED_ON_GPU(CustomCallR2F32Reduce)) {
array(1, 1) = 4.0f;
auto constant = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR2FromArray2D(array)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR2FromArray2D(array)));
builder.AddInstruction(
HloInstruction::CreateCustomCall(r0f32_, {constant}, "R2F32ReduceSum"));
@@ -110,7 +111,7 @@ XLA_TEST_F(CustomCallTest,
auto b = HloComputation::Builder(TestName());
auto input = b.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR2FromArray2D(
+ HloInstruction::CreateConstant(LiteralUtil::CreateR2FromArray2D(
Array2D<float>{{1.0f, 2.0f}, {3.0f, 4.0f}})));
auto incremented = b.AddInstruction(HloInstruction::CreateCustomCall(
ShapeUtil::MakeShape(F32, {1, 2, 2}), {input}, "Add1ToValues"));
@@ -135,8 +136,8 @@ class CustomCallClientAPITest : public ClientLibraryTestBase {};
// are reserved for internal use.
XLA_TEST_F(CustomCallClientAPITest, IllegalCustomCallTarget) {
XlaBuilder builder(TestName());
- builder.CustomCall("$illegal", /*operands=*/{},
- ShapeUtil::MakeShape(F32, {1}));
+ CustomCall(&builder, "$illegal", /*operands=*/{},
+ ShapeUtil::MakeShape(F32, {1}));
StatusOr<std::unique_ptr<GlobalData>> result =
Execute(&builder, /*arguments=*/{});
diff --git a/tensorflow/compiler/xla/tests/deallocation_test.cc b/tensorflow/compiler/xla/tests/deallocation_test.cc
index bfe688e20d..d4b3aac85b 100644
--- a/tensorflow/compiler/xla/tests/deallocation_test.cc
+++ b/tensorflow/compiler/xla/tests/deallocation_test.cc
@@ -48,7 +48,7 @@ class DeallocationTest : public ClientLibraryTestBase {
TEST_F(DeallocationTest, DeallocateScalar) {
XlaBuilder builder(TestName());
- builder.ConstantR0<float>(42.0);
+ ConstantR0<float>(&builder, 42.0);
auto global_data = ExecuteAndCheckTransfer(&builder, {});
// A result can be transferred an arbitrary number of times. Add an extra
@@ -66,7 +66,7 @@ TEST_F(DeallocationTest, DeallocateScalar) {
TEST_F(DeallocationTest, DeallocateVector) {
XlaBuilder builder(TestName());
- builder.ConstantR1<float>({1.0, 2.0, 3.0, 4.0});
+ ConstantR1<float>(&builder, {1.0, 2.0, 3.0, 4.0});
auto global_data = ExecuteAndCheckTransfer(&builder, {});
ASSERT_IS_OK(client_->Unregister(*global_data));
@@ -79,7 +79,7 @@ TEST_F(DeallocationTest, DeallocateVector) {
TEST_F(DeallocationTest, DeallocateEmptyVector) {
XlaBuilder builder(TestName());
- builder.ConstantR1<float>({});
+ ConstantR1<float>(&builder, {});
auto global_data = ExecuteAndCheckTransfer(&builder, {});
ASSERT_IS_OK(client_->Unregister(*global_data));
@@ -92,8 +92,8 @@ TEST_F(DeallocationTest, DeallocateEmptyVector) {
XLA_TEST_F(DeallocationTest, DeallocateTuple) {
XlaBuilder builder(TestName());
- builder.Tuple({builder.ConstantR0<float>(42.0),
- builder.ConstantR1<float>({1.0, 2.0, 3.0})});
+ Tuple(&builder, {ConstantR0<float>(&builder, 42.0),
+ ConstantR1<float>(&builder, {1.0, 2.0, 3.0})});
auto global_data = ExecuteAndCheckTransfer(&builder, {});
ASSERT_IS_OK(client_->Unregister(*global_data));
@@ -106,9 +106,10 @@ XLA_TEST_F(DeallocationTest, DeallocateTuple) {
XLA_TEST_F(DeallocationTest, DeallocateTupleWithRepeatedElements) {
XlaBuilder builder(TestName());
- auto element = builder.ConstantR0<float>(42.0);
- auto inner_tuple = builder.Tuple({builder.ConstantR0<float>(42.0), element});
- builder.Tuple({element, inner_tuple, element});
+ auto element = ConstantR0<float>(&builder, 42.0);
+ auto inner_tuple =
+ Tuple(&builder, {ConstantR0<float>(&builder, 42.0), element});
+ Tuple(&builder, {element, inner_tuple, element});
auto global_data = ExecuteAndCheckTransfer(&builder, {});
ASSERT_IS_OK(client_->Unregister(*global_data));
@@ -122,9 +123,9 @@ XLA_TEST_F(DeallocationTest, DeallocateTupleWithRepeatedElements) {
XLA_TEST_F(DeallocationTest, DeallocateNestedTuple) {
XlaBuilder builder(TestName());
auto inner_tuple =
- builder.Tuple({builder.ConstantR0<float>(42.0),
- builder.ConstantR1<float>({1.0, 2.0, 3.0})});
- builder.Tuple({inner_tuple, builder.ConstantR1<float>({0.123, 0.456})});
+ Tuple(&builder, {ConstantR0<float>(&builder, 42.0),
+ ConstantR1<float>(&builder, {1.0, 2.0, 3.0})});
+ Tuple(&builder, {inner_tuple, ConstantR1<float>(&builder, {0.123, 0.456})});
auto global_data = ExecuteAndCheckTransfer(&builder, {});
ASSERT_IS_OK(client_->Unregister(*global_data));
diff --git a/tensorflow/compiler/xla/tests/deconstruct_tuple_test.cc b/tensorflow/compiler/xla/tests/deconstruct_tuple_test.cc
index 12789fe665..a6a233e71a 100644
--- a/tensorflow/compiler/xla/tests/deconstruct_tuple_test.cc
+++ b/tensorflow/compiler/xla/tests/deconstruct_tuple_test.cc
@@ -20,7 +20,7 @@ limitations under the License.
#include "tensorflow/compiler/xla/client/local_client.h"
#include "tensorflow/compiler/xla/client/xla_client/xla_builder.h"
#include "tensorflow/compiler/xla/client/xla_client/xla_computation.h"
-#include "tensorflow/compiler/xla/literal_util.h"
+#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/compiler/xla/shape_util.h"
#include "tensorflow/compiler/xla/statusor.h"
#include "tensorflow/compiler/xla/test.h"
@@ -54,9 +54,9 @@ class DeconstructTupleTest : public ClientLibraryTestBase {
TEST_F(DeconstructTupleTest, DeconstructTuple) {
XlaBuilder builder(TestName());
- auto const1 = builder.ConstantR1<float>({1.0, 2.0, 3.0, 4.0});
- auto const2 = builder.ConstantR1<float>({2.0, 4.0, 6.0, 8.0});
- builder.Tuple({const1, const2});
+ auto const1 = ConstantR1<float>(&builder, {1.0, 2.0, 3.0, 4.0});
+ auto const2 = ConstantR1<float>(&builder, {2.0, 4.0, 6.0, 8.0});
+ Tuple(&builder, {const1, const2});
auto global_data = ExecuteAndCheckTransfer(&builder, {});
auto result_status = client_->DeconstructTuple(*global_data);
@@ -73,9 +73,9 @@ TEST_F(DeconstructTupleTest, DeconstructTuple) {
TEST_F(DeconstructTupleTest, DeconstructTupleTwice) {
XlaBuilder builder(TestName());
- auto const1 = builder.ConstantR1<float>({1.0, 2.0, 3.0, 4.0});
- auto const2 = builder.ConstantR1<float>({2.0, 4.0, 6.0, 8.0});
- builder.Tuple({const1, const2});
+ auto const1 = ConstantR1<float>(&builder, {1.0, 2.0, 3.0, 4.0});
+ auto const2 = ConstantR1<float>(&builder, {2.0, 4.0, 6.0, 8.0});
+ Tuple(&builder, {const1, const2});
auto global_data = ExecuteAndCheckTransfer(&builder, {});
auto result_status1 = client_->DeconstructTuple(*global_data);
@@ -103,9 +103,9 @@ TEST_F(DeconstructTupleTest, DeconstructTupleTwice) {
XLA_TEST_F(DeconstructTupleTest, DeconstructTupleRepeatedElement) {
XlaBuilder builder(TestName());
- auto const1 = builder.ConstantR1<float>({1.0, 2.0, 3.0, 4.0});
- auto const2 = builder.ConstantR1<float>({2.0, 4.0, 6.0, 8.0});
- builder.Tuple({const1, const2, const2, const1});
+ auto const1 = ConstantR1<float>(&builder, {1.0, 2.0, 3.0, 4.0});
+ auto const2 = ConstantR1<float>(&builder, {2.0, 4.0, 6.0, 8.0});
+ Tuple(&builder, {const1, const2, const2, const1});
auto global_data = ExecuteAndCheckTransfer(&builder, {});
auto result_status = client_->DeconstructTuple(*global_data);
@@ -129,9 +129,9 @@ XLA_TEST_F(DeconstructTupleTest, DeconstructTupleRepeatedElement) {
TEST_F(DeconstructTupleTest, DeconstructTupleThenDeallocate) {
XlaBuilder builder(TestName());
- auto const1 = builder.ConstantR1<float>({1.0, 2.0, 3.0, 4.0});
- auto const2 = builder.ConstantR1<float>({2.0, 4.0, 6.0, 8.0});
- builder.Tuple({const1, const2, const1});
+ auto const1 = ConstantR1<float>(&builder, {1.0, 2.0, 3.0, 4.0});
+ auto const2 = ConstantR1<float>(&builder, {2.0, 4.0, 6.0, 8.0});
+ Tuple(&builder, {const1, const2, const1});
auto global_data = ExecuteAndCheckTransfer(&builder, {});
auto result_status = client_->DeconstructTuple(*global_data);
@@ -159,7 +159,7 @@ TEST_F(DeconstructTupleTest, DeconstructTupleThenDeallocate) {
TEST_F(DeconstructTupleTest, DeconstructNonTuple) {
XlaBuilder builder(TestName());
- builder.ConstantR1<float>({1.0, 2.0, 3.0, 4.0});
+ ConstantR1<float>(&builder, {1.0, 2.0, 3.0, 4.0});
auto global_data = ExecuteAndCheckTransfer(&builder, {});
auto result_status = client_->DeconstructTuple(*global_data);
@@ -171,11 +171,11 @@ TEST_F(DeconstructTupleTest, DeconstructNonTuple) {
XLA_TEST_F(DeconstructTupleTest, DeconstructTupleFromParam) {
XlaBuilder builder(TestName());
std::unique_ptr<Literal> param0_literal =
- Literal::CreateR1<float>({3.14f, -100.25f});
+ LiteralUtil::CreateR1<float>({3.14f, -100.25f});
std::unique_ptr<GlobalData> param0_data =
client_->TransferToServer(*param0_literal).ConsumeValueOrDie();
- auto p = builder.Parameter(0, ShapeUtil::MakeShape(F32, {2}), "param0");
- builder.Tuple({p});
+ auto p = Parameter(&builder, 0, ShapeUtil::MakeShape(F32, {2}), "param0");
+ Tuple(&builder, {p});
auto global_data = ExecuteAndCheckTransfer(&builder, {param0_data.get()});
auto result_status = client_->DeconstructTuple(*global_data);
@@ -186,9 +186,9 @@ XLA_TEST_F(DeconstructTupleTest, DeconstructTupleFromParam) {
XLA_TEST_F(DeconstructTupleTest, DeconstructNestedTuple) {
XlaBuilder builder(TestName());
- auto const1 = builder.ConstantR1<float>({1.0, 2.0, 3.0, 4.0});
- auto const2 = builder.ConstantR1<float>({2.0, 4.0, 6.0, 8.0});
- builder.Tuple({builder.Tuple({const1, const2}), const1});
+ auto const1 = ConstantR1<float>(&builder, {1.0, 2.0, 3.0, 4.0});
+ auto const2 = ConstantR1<float>(&builder, {2.0, 4.0, 6.0, 8.0});
+ Tuple(&builder, {Tuple(&builder, {const1, const2}), const1});
auto global_data = ExecuteAndCheckTransfer(&builder, {});
auto result_status = client_->DeconstructTuple(*global_data);
diff --git a/tensorflow/compiler/xla/tests/deep_graph_test.cc b/tensorflow/compiler/xla/tests/deep_graph_test.cc
index 085a5105ac..810947ab01 100644
--- a/tensorflow/compiler/xla/tests/deep_graph_test.cc
+++ b/tensorflow/compiler/xla/tests/deep_graph_test.cc
@@ -30,7 +30,7 @@ TEST_F(ClientLibraryTestBase, DeepGraph) {
auto y_data = CreateR0Parameter<int32>(1, 1, "y", &b, &y);
XlaOp z = x;
for (int i = 0; i < kDepth; ++i) {
- z = b.Add(z, y);
+ z = Add(z, y);
}
ComputeAndCompareR0<int32>(&b, /*expected=*/kDepth + 3,
{x_data.get(), y_data.get()});
diff --git a/tensorflow/compiler/xla/tests/dot_operation_test.cc b/tensorflow/compiler/xla/tests/dot_operation_test.cc
index 0fd846cef8..d86fd7cc2d 100644
--- a/tensorflow/compiler/xla/tests/dot_operation_test.cc
+++ b/tensorflow/compiler/xla/tests/dot_operation_test.cc
@@ -67,15 +67,16 @@ XLA_TEST_F(DotOperationTest, DotOfInputTupleElem) {
XlaOp param;
auto param_data = CreateParameterAndTransferLiteral(
0,
- *Literal::MakeTuple({Literal::CreateR2<float>({{1, 2}, {3, 4}}).get(),
- Literal::CreateR2<float>({{5, 6}, {7, 8}}).get()}),
+ *LiteralUtil::MakeTuple(
+ {LiteralUtil::CreateR2<float>({{1, 2}, {3, 4}}).get(),
+ LiteralUtil::CreateR2<float>({{5, 6}, {7, 8}}).get()}),
"arg0", &builder, &param);
- auto lhs = builder.GetTupleElement(param, 0);
- auto rhs = builder.GetTupleElement(param, 1);
- builder.Dot(lhs, rhs);
+ auto lhs = GetTupleElement(param, 0);
+ auto rhs = GetTupleElement(param, 1);
+ Dot(lhs, rhs);
ComputeAndCompareLiteral(&builder,
- *Literal::CreateR2<float>({{19, 22}, {43, 50}}),
+ *LiteralUtil::CreateR2<float>({{19, 22}, {43, 50}}),
{param_data.get()});
}
@@ -87,9 +88,9 @@ XLA_TYPED_TEST(DotOperationTest_F16F32F64CF64, ZeroElementVectorDot) {
using T = TypeParam;
XlaBuilder builder(this->TestName());
- auto lhs = builder.ConstantR1<T>({});
- auto rhs = builder.ConstantR1<T>({});
- auto result = builder.Dot(lhs, rhs);
+ auto lhs = ConstantR1<T>(&builder, {});
+ auto rhs = ConstantR1<T>(&builder, {});
+ Dot(lhs, rhs);
this->template ComputeAndCompareR0<T>(&builder, static_cast<T>(0.0), {},
this->error_spec_);
@@ -102,9 +103,9 @@ TYPED_TEST_CASE(DotOperationTest_F16F32F64, TypesF16F32F64);
XLA_TYPED_TEST(DotOperationTest_F16F32F64, TrivialMatrixVectorDot) {
using T = TypeParam;
XlaBuilder builder(this->TestName());
- auto lhs = builder.ConstantR2FromArray2D<T>({{3.0f, 4.0f}});
- auto rhs = builder.ConstantFromArray<T>({3.0f, 4.0f});
- auto result = builder.Dot(lhs, rhs);
+ auto lhs = ConstantR2FromArray2D<T>(&builder, {{3.0f, 4.0f}});
+ auto rhs = ConstantFromArray<T>(&builder, {3.0f, 4.0f});
+ Dot(lhs, rhs);
this->template ComputeAndCompareR1<T>(&builder, {static_cast<T>(25.0f)}, {},
this->error_spec_);
@@ -113,9 +114,9 @@ XLA_TYPED_TEST(DotOperationTest_F16F32F64, TrivialMatrixVectorDot) {
XLA_TYPED_TEST(DotOperationTest_F16F32F64, OneElementVectorDot) {
using T = TypeParam;
XlaBuilder builder(this->TestName());
- auto lhs = builder.ConstantR1<T>({static_cast<T>(2.0f)});
- auto rhs = builder.ConstantR1<T>({static_cast<T>(3.0f)});
- auto result = builder.Dot(lhs, rhs);
+ auto lhs = ConstantR1<T>(&builder, {static_cast<T>(2.0f)});
+ auto rhs = ConstantR1<T>(&builder, {static_cast<T>(3.0f)});
+ Dot(lhs, rhs);
this->template ComputeAndCompareR0<T>(&builder, static_cast<T>(6.0f), {},
this->error_spec_);
@@ -124,9 +125,9 @@ XLA_TYPED_TEST(DotOperationTest_F16F32F64, OneElementVectorDot) {
XLA_TYPED_TEST(DotOperationTest_F16F32F64, VectorDot) {
using T = TypeParam;
XlaBuilder builder(this->TestName());
- auto lhs = builder.ConstantFromArray<T>({1.0f, 2.5f, 42.0f});
- auto rhs = builder.ConstantFromArray<T>({11.0f, -1.0f, 0.5f});
- auto result = builder.Dot(lhs, rhs);
+ auto lhs = ConstantFromArray<T>(&builder, {1.0f, 2.5f, 42.0f});
+ auto rhs = ConstantFromArray<T>(&builder, {11.0f, -1.0f, 0.5f});
+ Dot(lhs, rhs);
this->template ComputeAndCompareR0<T>(&builder, static_cast<T>(29.5f), {},
this->error_spec_);
@@ -139,9 +140,9 @@ std::vector<int64> MinorToMajorForIsRowMajor(bool row_major) {
XLA_TYPED_TEST(DotOperationTest_F16F32F64, Dot_0x2_2x0) {
using T = TypeParam;
XlaBuilder builder(this->TestName());
- auto lhs = builder.ConstantR2FromArray2D<T>(Array2D<T>(0, 2));
- auto rhs = builder.ConstantR2FromArray2D<T>(Array2D<T>(2, 0));
- auto result = builder.Dot(lhs, rhs);
+ auto lhs = ConstantR2FromArray2D<T>(&builder, Array2D<T>(0, 2));
+ auto rhs = ConstantR2FromArray2D<T>(&builder, Array2D<T>(2, 0));
+ Dot(lhs, rhs);
this->template ComputeAndCompareR2<T>(&builder, Array2D<T>(0, 0), {},
this->error_spec_);
@@ -150,10 +151,10 @@ XLA_TYPED_TEST(DotOperationTest_F16F32F64, Dot_0x2_2x0) {
XLA_TYPED_TEST(DotOperationTest_F16F32F64, Dot_0x2_2x3) {
using T = TypeParam;
XlaBuilder builder(this->TestName());
- auto lhs = builder.ConstantR2FromArray2D<T>(Array2D<T>(0, 2));
- auto rhs = builder.ConstantR2FromArray2D<T>(
- {{7.0f, 8.0f, 9.0f}, {42.0f, 77.0f, 101.0f}});
- auto result = builder.Dot(lhs, rhs);
+ auto lhs = ConstantR2FromArray2D<T>(&builder, Array2D<T>(0, 2));
+ auto rhs = ConstantR2FromArray2D<T>(
+ &builder, {{7.0f, 8.0f, 9.0f}, {42.0f, 77.0f, 101.0f}});
+ Dot(lhs, rhs);
this->template ComputeAndCompareR2<T>(&builder, Array2D<T>(0, 3), {},
this->error_spec_);
@@ -162,10 +163,10 @@ XLA_TYPED_TEST(DotOperationTest_F16F32F64, Dot_0x2_2x3) {
XLA_TYPED_TEST(DotOperationTest_F16F32F64, Dot_3x2_2x0) {
using T = TypeParam;
XlaBuilder builder(this->TestName());
- auto lhs = builder.ConstantR2FromArray2D<T>(
- {{7.0f, 8.0f}, {9.0f, 42.0f}, {77.0f, 101.0f}});
- auto rhs = builder.ConstantR2FromArray2D<T>(Array2D<T>(2, 0));
- auto result = builder.Dot(lhs, rhs);
+ auto lhs = ConstantR2FromArray2D<T>(
+ &builder, {{7.0f, 8.0f}, {9.0f, 42.0f}, {77.0f, 101.0f}});
+ auto rhs = ConstantR2FromArray2D<T>(&builder, Array2D<T>(2, 0));
+ Dot(lhs, rhs);
this->template ComputeAndCompareR2<T>(&builder, Array2D<T>(3, 0), {},
this->error_spec_);
@@ -174,9 +175,9 @@ XLA_TYPED_TEST(DotOperationTest_F16F32F64, Dot_3x2_2x0) {
XLA_TYPED_TEST(DotOperationTest_F16F32F64, Dot_2x0_0x2) {
using T = TypeParam;
XlaBuilder builder(this->TestName());
- auto lhs = builder.ConstantR2FromArray2D<T>(Array2D<T>(2, 0));
- auto rhs = builder.ConstantR2FromArray2D<T>(Array2D<T>(0, 2));
- auto result = builder.Dot(lhs, rhs);
+ auto lhs = ConstantR2FromArray2D<T>(&builder, Array2D<T>(2, 0));
+ auto rhs = ConstantR2FromArray2D<T>(&builder, Array2D<T>(0, 2));
+ Dot(lhs, rhs);
this->template ComputeAndCompareR2<T>(
&builder, Array2D<T>(2, 2, static_cast<T>(0.0f)), {}, this->error_spec_);
@@ -186,19 +187,19 @@ XLA_TYPED_TEST(DotOperationTest_F16F32F64, FusedDot) {
using T = TypeParam;
XlaBuilder builder(this->TestName());
auto param0 =
- builder.Parameter(0, ShapeUtil::MakeShapeWithType<T>({2, 4}), "arg0");
+ Parameter(&builder, 0, ShapeUtil::MakeShapeWithType<T>({2, 4}), "arg0");
auto param1 =
- builder.Parameter(1, ShapeUtil::MakeShapeWithType<T>({4, 1}), "arg1");
- auto exp0 = builder.Exp(param0);
- auto result = builder.Dot(exp0, param1);
+ Parameter(&builder, 1, ShapeUtil::MakeShapeWithType<T>({4, 1}), "arg1");
+ auto exp0 = Exp(param0);
+ Dot(exp0, param1);
auto lhs_handle =
this->client_
- ->TransferToServer(*Literal::CreateR2FromArray2D<T>(
+ ->TransferToServer(*LiteralUtil::CreateR2FromArray2D<T>(
{{1.0f, 2.0f, 3.0f, 4.0f}, {-1.0f, -2.0f, -3.0f, -4.0f}}))
.ConsumeValueOrDie();
auto rhs_handle = this->client_
- ->TransferToServer(*Literal::CreateR2FromArray2D<T>(
+ ->TransferToServer(*LiteralUtil::CreateR2FromArray2D<T>(
{{1.0f}, {2.0f}, {3.0f}, {4.0f}}))
.ConsumeValueOrDie();
@@ -217,23 +218,22 @@ class SquareMatrixDot : public DotOperationTest {
void TestImpl(bool lhs_row_major, bool rhs_row_major) {
auto lhs_handle =
client_
- ->TransferToServer(*Literal::CreateFromArrayWithLayout<T>(
+ ->TransferToServer(*LiteralUtil::CreateFromArrayWithLayout<T>(
{{1.0f, 2.0f}, {3.0f, -4.0f}},
LayoutUtil::MakeLayout(
MinorToMajorForIsRowMajor(lhs_row_major))))
.ConsumeValueOrDie();
auto rhs_handle =
client_
- ->TransferToServer(*Literal::CreateFromArrayWithLayout<T>(
+ ->TransferToServer(*LiteralUtil::CreateFromArrayWithLayout<T>(
{{1.0f, 6.0f}, {7.0f, -4.0f}},
LayoutUtil::MakeLayout(
MinorToMajorForIsRowMajor(rhs_row_major))))
.ConsumeValueOrDie();
XlaBuilder builder(TestName());
auto prim_type = primitive_util::NativeToPrimitiveType<T>();
- auto result = builder.Dot(
- builder.Parameter(0, ShapeUtil::MakeShape(prim_type, {2, 2}), "lhs"),
- builder.Parameter(1, ShapeUtil::MakeShape(prim_type, {2, 2}), "rhs"));
+ Dot(Parameter(&builder, 0, ShapeUtil::MakeShape(prim_type, {2, 2}), "lhs"),
+ Parameter(&builder, 1, ShapeUtil::MakeShape(prim_type, {2, 2}), "rhs"));
Array2D<T> expected({{15.0f, -2.0f}, {-25.0f, 34.0f}});
ComputeAndCompareR2<T>(&builder, expected,
@@ -287,9 +287,10 @@ void ParametricDotTest::TestImpl() {
std::unique_ptr<Array2D<NativeT>> dot_lhs_data =
MakeLinspaceArray2D<NativeT>(0.0, 1.0, param.m, param.k);
- std::unique_ptr<Literal> dot_lhs_lit = Literal::CreateR2FromArray2DWithLayout(
- *dot_lhs_data, LayoutUtil::MakeLayout(
- MinorToMajorForIsRowMajor(param.dot_lhs_row_major)));
+ std::unique_ptr<Literal> dot_lhs_lit =
+ LiteralUtil::CreateR2FromArray2DWithLayout(
+ *dot_lhs_data, LayoutUtil::MakeLayout(MinorToMajorForIsRowMajor(
+ param.dot_lhs_row_major)));
std::unique_ptr<GlobalData> dot_lhs_handle =
client_->TransferToServer(*dot_lhs_lit).ConsumeValueOrDie();
@@ -298,7 +299,7 @@ void ParametricDotTest::TestImpl() {
Layout rhs_layout = LayoutUtil::MakeLayout(
MinorToMajorForIsRowMajor(param.dot_rhs_row_major));
std::unique_ptr<Literal> dot_rhs_lit =
- Literal::CreateR2FromArray2DWithLayout(*dot_rhs_data, rhs_layout);
+ LiteralUtil::CreateR2FromArray2DWithLayout(*dot_rhs_data, rhs_layout);
std::unique_ptr<GlobalData> dot_rhs_handle =
client_->TransferToServer(*dot_rhs_lit).ConsumeValueOrDie();
@@ -308,7 +309,7 @@ void ParametricDotTest::TestImpl() {
if (param.has_addend) {
addend_data = MakeLinspaceArray2D<NativeT>(0.0, 1.0, param.m, param.n);
- addend_lit = Literal::CreateR2FromArray2DWithLayout(
+ addend_lit = LiteralUtil::CreateR2FromArray2DWithLayout(
*addend_data, LayoutUtil::MakeLayout(
MinorToMajorForIsRowMajor(param.addend_row_major)));
addend_handle = client_->TransferToServer(*addend_lit).ConsumeValueOrDie();
@@ -316,26 +317,26 @@ void ParametricDotTest::TestImpl() {
XlaBuilder builder(TestName());
auto prim_type = primitive_util::NativeToPrimitiveType<NativeT>();
- auto result = builder.Dot(
- builder.Parameter(0,
- ShapeUtil::MakeShapeWithLayout(
- prim_type, {param.m, param.k},
- MinorToMajorForIsRowMajor(param.dot_lhs_row_major)),
- "dot_lhs"),
- builder.Parameter(1,
- ShapeUtil::MakeShapeWithLayout(
- prim_type, {param.k, param.n},
- MinorToMajorForIsRowMajor(param.dot_rhs_row_major)),
- "dot_rhs"));
+ auto result =
+ Dot(Parameter(&builder, 0,
+ ShapeUtil::MakeShapeWithLayout(
+ prim_type, {param.m, param.k},
+ MinorToMajorForIsRowMajor(param.dot_lhs_row_major)),
+ "dot_lhs"),
+ Parameter(&builder, 1,
+ ShapeUtil::MakeShapeWithLayout(
+ prim_type, {param.k, param.n},
+ MinorToMajorForIsRowMajor(param.dot_rhs_row_major)),
+ "dot_rhs"));
if (param.has_addend) {
- result = builder.Add(
- result, builder.Parameter(
- 2,
- ShapeUtil::MakeShapeWithLayout(
- prim_type, {param.m, param.n},
- MinorToMajorForIsRowMajor(param.addend_row_major)),
- "addend"));
+ result =
+ Add(result,
+ Parameter(&builder, 2,
+ ShapeUtil::MakeShapeWithLayout(
+ prim_type, {param.m, param.n},
+ MinorToMajorForIsRowMajor(param.addend_row_major)),
+ "addend"));
}
std::unique_ptr<Array2D<NativeT>> expected;
@@ -477,14 +478,14 @@ class NonsquareMatrixDot : public DotOperationTest {
void TestImpl(bool lhs_row_major, bool rhs_row_major) {
auto lhs_handle =
client_
- ->TransferToServer(*Literal::CreateFromArrayWithLayout<T>(
+ ->TransferToServer(*LiteralUtil::CreateFromArrayWithLayout<T>(
{{1.0f, 2.0f, 3.0f}, {3.0f, -4.0f, -1.0f}},
LayoutUtil::MakeLayout(
MinorToMajorForIsRowMajor(lhs_row_major))))
.ConsumeValueOrDie();
auto rhs_handle =
client_
- ->TransferToServer(*Literal::CreateFromArrayWithLayout<T>(
+ ->TransferToServer(*LiteralUtil::CreateFromArrayWithLayout<T>(
{{1.0f, 6.0f}, {2.0f, 3.0f}, {7.0f, -4.0f}},
LayoutUtil::MakeLayout(
MinorToMajorForIsRowMajor(rhs_row_major))))
@@ -492,9 +493,8 @@ class NonsquareMatrixDot : public DotOperationTest {
XlaBuilder builder(TestName());
auto prim_type = primitive_util::NativeToPrimitiveType<T>();
- auto result = builder.Dot(
- builder.Parameter(0, ShapeUtil::MakeShape(prim_type, {2, 3}), "lhs"),
- builder.Parameter(1, ShapeUtil::MakeShape(prim_type, {3, 2}), "rhs"));
+ Dot(Parameter(&builder, 0, ShapeUtil::MakeShape(prim_type, {2, 3}), "lhs"),
+ Parameter(&builder, 1, ShapeUtil::MakeShape(prim_type, {3, 2}), "rhs"));
Array2D<T> expected({{26.0f, 0.0f}, {-12.0f, 10.0f}});
@@ -512,21 +512,20 @@ XLA_TYPED_TEST(NonsquareMatrixDot, TestTT) { this->TestImpl(true, true); }
XLA_TEST_F(DotOperationTest, MatrixVectorC64) {
auto lhs_handle =
client_
- ->TransferToServer(*Literal::CreateR2WithLayout<complex64>(
+ ->TransferToServer(*LiteralUtil::CreateR2WithLayout<complex64>(
{{1.0, 2.0, 3.0, -4.0}}, LayoutUtil::MakeLayout({1, 0})))
.ConsumeValueOrDie();
auto rhs_handle =
client_
- ->TransferToServer(*Literal::CreateR2WithLayout<complex64>(
+ ->TransferToServer(*LiteralUtil::CreateR2WithLayout<complex64>(
{{1.0, 1.0}, {2.0, 2.0}, {3.0, 3.0}, {-4.0, 4.0}},
LayoutUtil::MakeLayout({1, 0})))
.ConsumeValueOrDie();
XlaBuilder builder(TestName());
auto prim_type = primitive_util::NativeToPrimitiveType<complex64>();
- auto result = builder.Dot(
- builder.Parameter(0, ShapeUtil::MakeShape(prim_type, {1, 4}), "lhs"),
- builder.Parameter(1, ShapeUtil::MakeShape(prim_type, {4, 2}), "rhs"));
+ Dot(Parameter(&builder, 0, ShapeUtil::MakeShape(prim_type, {1, 4}), "lhs"),
+ Parameter(&builder, 1, ShapeUtil::MakeShape(prim_type, {4, 2}), "rhs"));
Array2D<complex64> expected({{30.0, -2.0}});
@@ -538,11 +537,13 @@ XLA_TYPED_TEST(DotOperationTest_F16F32F64, ConcurrentMatMult) {
using T = TypeParam;
XlaBuilder builder(this->TestName());
- auto matrix1 = builder.ConstantR2FromArray2D<T>({{1.0f, 2.0f}, {3.0f, 4.0f}});
- auto matrix2 = builder.ConstantR2FromArray2D<T>({{5.0f, 6.0f}, {7.0f, 8.0f}});
- auto matrix12 = builder.Dot(matrix1, matrix2);
- auto matrix21 = builder.Dot(matrix2, matrix1);
- builder.Add(matrix12, matrix21);
+ auto matrix1 =
+ ConstantR2FromArray2D<T>(&builder, {{1.0f, 2.0f}, {3.0f, 4.0f}});
+ auto matrix2 =
+ ConstantR2FromArray2D<T>(&builder, {{5.0f, 6.0f}, {7.0f, 8.0f}});
+ auto matrix12 = Dot(matrix1, matrix2);
+ auto matrix21 = Dot(matrix2, matrix1);
+ Add(matrix12, matrix21);
Array2D<T> expected({{42.0f, 56.0f}, {74.0f, 96.0f}});
this->template ComputeAndCompareR2<T>(&builder, expected, {},
@@ -559,32 +560,32 @@ TYPED_TEST_CASE(DotOperationTestForBatchMatMul, TypesF16F32F64);
XLA_TYPED_TEST(DotOperationTestForBatchMatMul, Types) {
using T = TypeParam;
XlaBuilder builder(this->TestName());
- auto x =
- builder.Parameter(0, ShapeUtil::MakeShapeWithType<T>({2, 2, 2, 2}), "x");
- auto y =
- builder.Parameter(1, ShapeUtil::MakeShapeWithType<T>({2, 2, 2, 2}), "y");
+ auto x = Parameter(&builder, 0, ShapeUtil::MakeShapeWithType<T>({2, 2, 2, 2}),
+ "x");
+ auto y = Parameter(&builder, 1, ShapeUtil::MakeShapeWithType<T>({2, 2, 2, 2}),
+ "y");
- auto x_flat = builder.Reshape(x, {0, 1, 2, 3}, {4, 2, 2});
- auto y_flat = builder.Reshape(y, {0, 1, 2, 3}, {4, 2, 2});
+ auto x_flat = Reshape(x, {0, 1, 2, 3}, {4, 2, 2});
+ auto y_flat = Reshape(y, {0, 1, 2, 3}, {4, 2, 2});
// Slice batches into individual matrices and multiply them.
std::vector<XlaOp> out_slices;
for (int i = 0; i < 4; ++i) {
// Slice off individual matrices and reshape to 2D tensors.
- auto x_slice = builder.Slice(x_flat, {i, 0, 0}, {i + 1, 2, 2}, {1, 1, 1});
- x_slice = builder.Reshape(x_slice, {0, 1, 2}, {2, 2});
- auto y_slice = builder.Slice(y_flat, {i, 0, 0}, {i + 1, 2, 2}, {1, 1, 1});
- y_slice = builder.Reshape(y_slice, {0, 1, 2}, {2, 2});
+ auto x_slice = Slice(x_flat, {i, 0, 0}, {i + 1, 2, 2}, {1, 1, 1});
+ x_slice = Reshape(x_slice, {0, 1, 2}, {2, 2});
+ auto y_slice = Slice(y_flat, {i, 0, 0}, {i + 1, 2, 2}, {1, 1, 1});
+ y_slice = Reshape(y_slice, {0, 1, 2}, {2, 2});
- auto out = builder.Dot(x_slice, y_slice);
- out = builder.Reshape(out, {0, 1}, {1, 2, 2});
+ auto out = Dot(x_slice, y_slice);
+ out = Reshape(out, {0, 1}, {1, 2, 2});
out_slices.push_back(out);
}
- auto out_flat = builder.ConcatInDim(out_slices, 0);
- builder.Reshape(out_flat, {0, 1, 2}, {2, 2, 2, 2});
+ auto out_flat = ConcatInDim(&builder, out_slices, 0);
+ Reshape(out_flat, {0, 1, 2}, {2, 2, 2, 2});
auto x_data = this->client_
- ->TransferToServer(*Literal::CreateR4FromArray4D<T>(
+ ->TransferToServer(*LiteralUtil::CreateR4FromArray4D<T>(
{{{{1000.0f, 100.0f}, {10.0f, 1.0f}},
{{2000.0f, 200.0f}, {20.0f, 2.0f}}},
{{{3000.0f, 300.0f}, {30.0f, 3.0f}},
@@ -592,7 +593,7 @@ XLA_TYPED_TEST(DotOperationTestForBatchMatMul, Types) {
.ConsumeValueOrDie();
auto y_data =
this->client_
- ->TransferToServer(*Literal::CreateR4FromArray4D<T>(
+ ->TransferToServer(*LiteralUtil::CreateR4FromArray4D<T>(
{{{{1.0f, 2.0f}, {3.0f, 4.0f}}, {{5.0f, 6.0f}, {7.0f, 8.0f}}},
{{{11.0f, 22.0f}, {33.0f, 44.0f}},
{{55.0f, 66.0f}, {77.0f, 88.0f}}}}))
@@ -616,9 +617,9 @@ XLA_TYPED_TEST(DotOperationTest_F16F32F64, GeneralMatMul) {
XlaBuilder builder(this->TestName());
auto x =
- builder.Parameter(0, ShapeUtil::MakeShapeWithType<T>({2, 2, 2}), "x");
+ Parameter(&builder, 0, ShapeUtil::MakeShapeWithType<T>({2, 2, 2}), "x");
auto y =
- builder.Parameter(1, ShapeUtil::MakeShapeWithType<T>({2, 2, 2}), "y");
+ Parameter(&builder, 1, ShapeUtil::MakeShapeWithType<T>({2, 2, 2}), "y");
DotDimensionNumbers dnums;
dnums.add_lhs_contracting_dimensions(2);
@@ -626,17 +627,17 @@ XLA_TYPED_TEST(DotOperationTest_F16F32F64, GeneralMatMul) {
dnums.add_lhs_batch_dimensions(0);
dnums.add_rhs_batch_dimensions(0);
- auto out = builder.DotGeneral(x, y, dnums);
+ DotGeneral(x, y, dnums);
auto x_data =
this->client_
- ->TransferToServer(*Literal::CreateR3FromArray3D<T>(
+ ->TransferToServer(*LiteralUtil::CreateR3FromArray3D<T>(
{{{1.0f, 2.0f}, {3.0f, 4.0f}}, {{5.0f, 6.0f}, {7.0f, 8.0f}}}))
.ConsumeValueOrDie();
auto y_data =
this->client_
- ->TransferToServer(*Literal::CreateR3FromArray3D<T>(
+ ->TransferToServer(*LiteralUtil::CreateR3FromArray3D<T>(
{{{1.0f, 0.0f}, {0.0f, 1.0f}}, {{1.0f, 0.0f}, {0.0f, 1.0f}}}))
.ConsumeValueOrDie();
@@ -665,32 +666,36 @@ XLA_TYPED_TEST(DotOperationTest_F16F32F64, TransposeFolding) {
}
auto lhs_handle =
this->client_
- ->TransferToServer(*Literal::CreateR2FromArray2DWithLayout<T>(
- *lhs, LayoutUtil::MakeLayout(
- MinorToMajorForIsRowMajor(row_major))))
+ ->TransferToServer(
+ *LiteralUtil::CreateR2FromArray2DWithLayout<T>(
+ *lhs, LayoutUtil::MakeLayout(
+ MinorToMajorForIsRowMajor(row_major))))
.ConsumeValueOrDie();
auto rhs_handle =
this->client_
- ->TransferToServer(*Literal::CreateR2FromArray2DWithLayout<T>(
- *rhs, LayoutUtil::MakeLayout(
- MinorToMajorForIsRowMajor(row_major))))
+ ->TransferToServer(
+ *LiteralUtil::CreateR2FromArray2DWithLayout<T>(
+ *rhs, LayoutUtil::MakeLayout(
+ MinorToMajorForIsRowMajor(row_major))))
.ConsumeValueOrDie();
XlaBuilder builder(this->TestName());
auto prim_type = primitive_util::NativeToPrimitiveType<T>();
- auto lhs_arg = builder.Parameter(
- 0, ShapeUtil::MakeShape(prim_type, {lhs->height(), lhs->width()}),
+ auto lhs_arg = Parameter(
+ &builder, 0,
+ ShapeUtil::MakeShape(prim_type, {lhs->height(), lhs->width()}),
"lhs");
- auto rhs_arg = builder.Parameter(
- 1, ShapeUtil::MakeShape(prim_type, {rhs->height(), rhs->width()}),
+ auto rhs_arg = Parameter(
+ &builder, 1,
+ ShapeUtil::MakeShape(prim_type, {rhs->height(), rhs->width()}),
"rhs");
if (transpose_lhs) {
- lhs_arg = builder.Transpose(lhs_arg, {1, 0});
+ lhs_arg = Transpose(lhs_arg, {1, 0});
}
if (transpose_rhs) {
- rhs_arg = builder.Transpose(rhs_arg, {1, 0});
+ rhs_arg = Transpose(rhs_arg, {1, 0});
}
- auto result = builder.Dot(lhs_arg, rhs_arg);
+ Dot(lhs_arg, rhs_arg);
Array2D<T> expected({{26.0f, 0.0f}, {-12.0f, 10.0f}});
VLOG(1) << "TestTransposeFolding " << transpose_lhs << " "
@@ -713,15 +718,15 @@ XLA_TYPED_TEST(DotOperationTest_F16F32F64,
{6.0f, 5.0f, 4.0f, 3.0f, 2.0f, 1.0f}}));
XlaBuilder builder(this->TestName());
- auto lhs_constant = builder.ConstantR2FromArray2D(*constant_lhs_array);
- auto rhs_arg_0 = builder.Parameter(0, ShapeUtil::MakeShape(prim_type, {2, 2}),
- "rhs_arg_0");
- auto rhs_arg_1 = builder.Parameter(1, ShapeUtil::MakeShape(prim_type, {3, 2}),
- "rhs_arg_1");
- auto rhs_arg_2 = builder.Parameter(2, ShapeUtil::MakeShape(prim_type, {1, 2}),
- "rhs_arg_2");
- auto result = builder.Dot(
- lhs_constant, builder.ConcatInDim({rhs_arg_0, rhs_arg_1, rhs_arg_2}, 0));
+ auto lhs_constant = ConstantR2FromArray2D(&builder, *constant_lhs_array);
+ auto rhs_arg_0 = Parameter(
+ &builder, 0, ShapeUtil::MakeShape(prim_type, {2, 2}), "rhs_arg_0");
+ auto rhs_arg_1 = Parameter(
+ &builder, 1, ShapeUtil::MakeShape(prim_type, {3, 2}), "rhs_arg_1");
+ auto rhs_arg_2 = Parameter(
+ &builder, 2, ShapeUtil::MakeShape(prim_type, {1, 2}), "rhs_arg_2");
+ Dot(lhs_constant,
+ ConcatInDim(&builder, {rhs_arg_0, rhs_arg_1, rhs_arg_2}, 0));
std::unique_ptr<Array2D<T>> arg_0_value_array(
new Array2D<T>({{1.0f, 2.0f}, {3.0f, 4.0f}}));
@@ -732,15 +737,15 @@ XLA_TYPED_TEST(DotOperationTest_F16F32F64,
TF_ASSERT_OK_AND_ASSIGN(
auto arg_0_value,
this->client_->TransferToServer(
- *Literal::CreateR2FromArray2D<T>(*arg_0_value_array)));
+ *LiteralUtil::CreateR2FromArray2D<T>(*arg_0_value_array)));
TF_ASSERT_OK_AND_ASSIGN(
auto arg_1_value,
this->client_->TransferToServer(
- *Literal::CreateR2FromArray2D<T>(*arg_1_value_array)));
+ *LiteralUtil::CreateR2FromArray2D<T>(*arg_1_value_array)));
TF_ASSERT_OK_AND_ASSIGN(
auto arg_2_value,
this->client_->TransferToServer(
- *Literal::CreateR2FromArray2D<T>(*arg_2_value_array)));
+ *LiteralUtil::CreateR2FromArray2D<T>(*arg_2_value_array)));
Array2D<T> expected({{53.0f, 74.0f}, {45.0f, 66.0f}});
this->template ComputeAndCompareR2<T>(
@@ -761,15 +766,15 @@ XLA_TYPED_TEST(DotOperationTest_F16F32F64,
{2.0f, 1.0f}}));
XlaBuilder builder(this->TestName());
- auto rhs_constant = builder.ConstantR2FromArray2D(*constant_rhs_array);
- auto lhs_arg_0 = builder.Parameter(0, ShapeUtil::MakeShapeWithType<T>({2, 2}),
- "lhs_arg_0");
- auto lhs_arg_1 = builder.Parameter(1, ShapeUtil::MakeShapeWithType<T>({2, 3}),
- "lhs_arg_1");
- auto lhs_arg_2 = builder.Parameter(2, ShapeUtil::MakeShapeWithType<T>({2, 1}),
- "lhs_arg_2");
- auto result = builder.Dot(
- builder.ConcatInDim({lhs_arg_0, lhs_arg_1, lhs_arg_2}, 1), rhs_constant);
+ auto rhs_constant = ConstantR2FromArray2D(&builder, *constant_rhs_array);
+ auto lhs_arg_0 = Parameter(
+ &builder, 0, ShapeUtil::MakeShapeWithType<T>({2, 2}), "lhs_arg_0");
+ auto lhs_arg_1 = Parameter(
+ &builder, 1, ShapeUtil::MakeShapeWithType<T>({2, 3}), "lhs_arg_1");
+ auto lhs_arg_2 = Parameter(
+ &builder, 2, ShapeUtil::MakeShapeWithType<T>({2, 1}), "lhs_arg_2");
+ Dot(ConcatInDim(&builder, {lhs_arg_0, lhs_arg_1, lhs_arg_2}, 1),
+ rhs_constant);
std::unique_ptr<Array2D<T>> arg_0_value_array(
new Array2D<T>({{1.0f, 2.0f}, {3.0f, 4.0f}}));
@@ -781,15 +786,15 @@ XLA_TYPED_TEST(DotOperationTest_F16F32F64,
TF_ASSERT_OK_AND_ASSIGN(
auto arg_0_value,
this->client_->TransferToServer(
- *Literal::CreateR2FromArray2D<T>(*arg_0_value_array)));
+ *LiteralUtil::CreateR2FromArray2D<T>(*arg_0_value_array)));
TF_ASSERT_OK_AND_ASSIGN(
auto arg_1_value,
this->client_->TransferToServer(
- *Literal::CreateR2FromArray2D<T>(*arg_1_value_array)));
+ *LiteralUtil::CreateR2FromArray2D<T>(*arg_1_value_array)));
TF_ASSERT_OK_AND_ASSIGN(
auto arg_2_value,
this->client_->TransferToServer(
- *Literal::CreateR2FromArray2D<T>(*arg_2_value_array)));
+ *LiteralUtil::CreateR2FromArray2D<T>(*arg_2_value_array)));
Array2D<T> expected({{38.0f, 36.0f}, {93.0f, 91.0f}});
this->template ComputeAndCompareR2<T>(
@@ -811,16 +816,15 @@ XLA_TEST_F(DotOperationTest, DotOfGatherOptimizationWithConstRHSClassicMM) {
// Dot result to slice from: {{114, 105, 96}, {96, 105, 114}}
XlaBuilder builder(TestName());
- auto lhs_constant = builder.ConstantR2FromArray2D(*constant_lhs_array);
- auto rhs_constant = builder.ConstantR2FromArray2D(*constant_rhs_array);
- auto start_constant = builder.ConstantR1<int32>({1, 0});
- auto dynamic_slice =
- builder.DynamicSlice(lhs_constant, start_constant, {1, 6});
+ auto lhs_constant = ConstantR2FromArray2D(&builder, *constant_lhs_array);
+ auto rhs_constant = ConstantR2FromArray2D(&builder, *constant_rhs_array);
+ auto start_constant = ConstantR1<int32>(&builder, {1, 0});
+ auto dynamic_slice = DynamicSlice(lhs_constant, start_constant, {1, 6});
DotDimensionNumbers dot_dnums;
dot_dnums.add_lhs_contracting_dimensions(1);
dot_dnums.add_rhs_contracting_dimensions(0);
- auto result = builder.DotGeneral(dynamic_slice, rhs_constant, dot_dnums);
+ DotGeneral(dynamic_slice, rhs_constant, dot_dnums);
Array2D<float> expected({{96.0, 105.0, 114.0}});
ComputeAndCompareR2<float>(&builder, expected, {}, error_spec_);
@@ -839,25 +843,23 @@ XLA_TEST_F(DotOperationTest, DotOfGatherOptimizationWithConstLHSClassicMM) {
// Dot result to slice from: {{114, 105, 96}, {96, 105, 114}}
XlaBuilder builder(TestName());
- auto lhs_constant = builder.ConstantR2FromArray2D(*constant_lhs_array);
- auto rhs_constant = builder.ConstantR2FromArray2D(*constant_rhs_array);
- auto start_constant = builder.ConstantR1<int32>({0, 1});
- auto dynamic_slice =
- builder.DynamicSlice(rhs_constant, start_constant, {6, 1});
+ auto lhs_constant = ConstantR2FromArray2D(&builder, *constant_lhs_array);
+ auto rhs_constant = ConstantR2FromArray2D(&builder, *constant_rhs_array);
+ auto start_constant = ConstantR1<int32>(&builder, {0, 1});
+ auto dynamic_slice = DynamicSlice(rhs_constant, start_constant, {6, 1});
DotDimensionNumbers dot_dnums;
dot_dnums.add_lhs_contracting_dimensions(1);
dot_dnums.add_rhs_contracting_dimensions(0);
- auto result = builder.DotGeneral(lhs_constant, dynamic_slice, dot_dnums);
+ DotGeneral(lhs_constant, dynamic_slice, dot_dnums);
Array2D<float> expected({{105.0}, {105.0}});
ComputeAndCompareR2<float>(&builder, expected, {}, error_spec_);
}
-// TODO (b/69062148) Enable when Dot implements general contracting dimensions.
XLA_TEST_F(DotOperationTest,
- DISABLED_ON_CPU(DISABLED_ON_GPU(DISABLED_ON_INTERPRETER(
- DotOfGatherOptimizationWithConstRHSReverseMM)))) {
+
+ DotOfGatherOptimizationWithConstRHSReverseMM) {
std::unique_ptr<Array2D<float>> constant_lhs_array(
new Array2D<float>({{1.0, 2.0, 3.0},
{4.0, 5.0, 6.0},
@@ -870,25 +872,21 @@ XLA_TEST_F(DotOperationTest,
// Dot result to slice from: {{114, 96}, {105, 105}, {96, 114}}
XlaBuilder builder(TestName());
- auto lhs_constant = builder.ConstantR2FromArray2D(*constant_lhs_array);
- auto rhs_constant = builder.ConstantR2FromArray2D(*constant_rhs_array);
- auto start_constant = builder.ConstantR1<int32>({0, 1});
- auto dynamic_slice =
- builder.DynamicSlice(lhs_constant, start_constant, {6, 1});
+ auto lhs_constant = ConstantR2FromArray2D(&builder, *constant_lhs_array);
+ auto rhs_constant = ConstantR2FromArray2D(&builder, *constant_rhs_array);
+ auto start_constant = ConstantR1<int32>(&builder, {0, 1});
+ auto dynamic_slice = DynamicSlice(lhs_constant, start_constant, {6, 1});
DotDimensionNumbers dot_dnums;
dot_dnums.add_lhs_contracting_dimensions(0);
dot_dnums.add_rhs_contracting_dimensions(1);
- auto result = builder.DotGeneral(dynamic_slice, rhs_constant, dot_dnums);
+ DotGeneral(dynamic_slice, rhs_constant, dot_dnums);
Array2D<float> expected({{105.0, 105.0}});
ComputeAndCompareR2<float>(&builder, expected, {}, error_spec_);
}
-// TODO (b/69062148) Enable when Dot implements general contracting dimensions.
-XLA_TEST_F(DotOperationTest,
- DISABLED_ON_CPU(DISABLED_ON_GPU(DISABLED_ON_INTERPRETER(
- DotOfGatherOptimizationWithConstLHSReverseMM)))) {
+XLA_TEST_F(DotOperationTest, DotOfGatherOptimizationWithConstLHSReverseMM) {
std::unique_ptr<Array2D<float>> constant_lhs_array(
new Array2D<float>({{1.0, 2.0, 3.0},
{4.0, 5.0, 6.0},
@@ -901,25 +899,21 @@ XLA_TEST_F(DotOperationTest,
// Dot result to slice from: {{114, 96}, {105, 105}, {96, 114}}
XlaBuilder builder(TestName());
- auto lhs_constant = builder.ConstantR2FromArray2D(*constant_lhs_array);
- auto rhs_constant = builder.ConstantR2FromArray2D(*constant_rhs_array);
- auto start_constant = builder.ConstantR1<int32>({1, 0});
- auto dynamic_slice =
- builder.DynamicSlice(rhs_constant, start_constant, {1, 6});
+ auto lhs_constant = ConstantR2FromArray2D(&builder, *constant_lhs_array);
+ auto rhs_constant = ConstantR2FromArray2D(&builder, *constant_rhs_array);
+ auto start_constant = ConstantR1<int32>(&builder, {1, 0});
+ auto dynamic_slice = DynamicSlice(rhs_constant, start_constant, {1, 6});
DotDimensionNumbers dot_dnums;
dot_dnums.add_lhs_contracting_dimensions(0);
dot_dnums.add_rhs_contracting_dimensions(1);
- auto result = builder.DotGeneral(lhs_constant, dynamic_slice, dot_dnums);
+ DotGeneral(lhs_constant, dynamic_slice, dot_dnums);
Array2D<float> expected({{96.0}, {105.0}, {114.0}});
ComputeAndCompareR2<float>(&builder, expected, {}, error_spec_);
}
-// TODO (b/69062148) Enable when Dot implements general contracting dimensions.
-XLA_TEST_F(DotOperationTest,
- DISABLED_ON_CPU(DISABLED_ON_GPU(
- DISABLED_ON_INTERPRETER(DotOfGatherOptimizationWithConstRHSRows)))) {
+XLA_TEST_F(DotOperationTest, DotOfGatherOptimizationWithConstRHSRows) {
std::unique_ptr<Array2D<float>> constant_lhs_array(
new Array2D<float>({{1.0, 2.0},
{3.0, 4.0},
@@ -937,25 +931,21 @@ XLA_TEST_F(DotOperationTest,
// Dot result to slice from: {{132, 129, 126}, {126, 129, 132}}
XlaBuilder builder(TestName());
- auto lhs_constant = builder.ConstantR2FromArray2D(*constant_lhs_array);
- auto rhs_constant = builder.ConstantR2FromArray2D(*constant_rhs_array);
- auto start_constant = builder.ConstantR1<int32>({0, 1});
- auto dynamic_slice =
- builder.DynamicSlice(lhs_constant, start_constant, {6, 1});
+ auto lhs_constant = ConstantR2FromArray2D(&builder, *constant_lhs_array);
+ auto rhs_constant = ConstantR2FromArray2D(&builder, *constant_rhs_array);
+ auto start_constant = ConstantR1<int32>(&builder, {0, 1});
+ auto dynamic_slice = DynamicSlice(lhs_constant, start_constant, {6, 1});
DotDimensionNumbers dot_dnums;
dot_dnums.add_lhs_contracting_dimensions(0);
dot_dnums.add_rhs_contracting_dimensions(0);
- auto result = builder.DotGeneral(dynamic_slice, rhs_constant, dot_dnums);
+ DotGeneral(dynamic_slice, rhs_constant, dot_dnums);
Array2D<float> expected({{126.0, 129.0, 132.0}});
ComputeAndCompareR2<float>(&builder, expected, {}, error_spec_);
}
-// TODO (b/69062148) Enable when Dot implements general contracting dimensions.
-XLA_TEST_F(DotOperationTest,
- DISABLED_ON_CPU(DISABLED_ON_GPU(
- DISABLED_ON_INTERPRETER(DotOfGatherOptimizationWithConstLHSRows)))) {
+XLA_TEST_F(DotOperationTest, DotOfGatherOptimizationWithConstLHSRows) {
std::unique_ptr<Array2D<float>> constant_lhs_array(
new Array2D<float>({{1.0, 2.0},
{3.0, 4.0},
@@ -973,25 +963,21 @@ XLA_TEST_F(DotOperationTest,
// Dot result to slice from: {{132, 129, 126}, {126, 129, 132}}
XlaBuilder builder(TestName());
- auto lhs_constant = builder.ConstantR2FromArray2D(*constant_lhs_array);
- auto rhs_constant = builder.ConstantR2FromArray2D(*constant_rhs_array);
- auto start_constant = builder.ConstantR1<int32>({0, 1});
- auto dynamic_slice =
- builder.DynamicSlice(rhs_constant, start_constant, {6, 1});
+ auto lhs_constant = ConstantR2FromArray2D(&builder, *constant_lhs_array);
+ auto rhs_constant = ConstantR2FromArray2D(&builder, *constant_rhs_array);
+ auto start_constant = ConstantR1<int32>(&builder, {0, 1});
+ auto dynamic_slice = DynamicSlice(rhs_constant, start_constant, {6, 1});
DotDimensionNumbers dot_dnums;
dot_dnums.add_lhs_contracting_dimensions(0);
dot_dnums.add_rhs_contracting_dimensions(0);
- auto result = builder.DotGeneral(lhs_constant, dynamic_slice, dot_dnums);
+ DotGeneral(lhs_constant, dynamic_slice, dot_dnums);
Array2D<float> expected({{129.0}, {129.0}});
ComputeAndCompareR2<float>(&builder, expected, {}, error_spec_);
}
-// TODO (b/69062148) Enable when Dot implements general contracting dimensions.
-XLA_TEST_F(DotOperationTest,
- DISABLED_ON_CPU(DISABLED_ON_GPU(
- DISABLED_ON_INTERPRETER(DotOfGatherOptimizationWithConstRHSCols)))) {
+XLA_TEST_F(DotOperationTest, DotOfGatherOptimizationWithConstRHSCols) {
std::unique_ptr<Array2D<float>> constant_lhs_array(new Array2D<float>(
{{1.0, 2.0, 3.0, 4.0, 5.0, 6.0}, {6.0, 5.0, 4.0, 3.0, 2.0, 1.0}}));
std::unique_ptr<Array2D<float>> constant_rhs_array(
@@ -1001,25 +987,21 @@ XLA_TEST_F(DotOperationTest,
// Dot result to slice from: {{91, 168, 56}, {56, 168, 91}}
XlaBuilder builder(TestName());
- auto lhs_constant = builder.ConstantR2FromArray2D(*constant_lhs_array);
- auto rhs_constant = builder.ConstantR2FromArray2D(*constant_rhs_array);
- auto start_constant = builder.ConstantR1<int32>({1, 0});
- auto dynamic_slice =
- builder.DynamicSlice(lhs_constant, start_constant, {1, 6});
+ auto lhs_constant = ConstantR2FromArray2D(&builder, *constant_lhs_array);
+ auto rhs_constant = ConstantR2FromArray2D(&builder, *constant_rhs_array);
+ auto start_constant = ConstantR1<int32>(&builder, {1, 0});
+ auto dynamic_slice = DynamicSlice(lhs_constant, start_constant, {1, 6});
DotDimensionNumbers dot_dnums;
dot_dnums.add_lhs_contracting_dimensions(1);
dot_dnums.add_rhs_contracting_dimensions(1);
- auto result = builder.DotGeneral(dynamic_slice, rhs_constant, dot_dnums);
+ DotGeneral(dynamic_slice, rhs_constant, dot_dnums);
Array2D<float> expected({{56.0, 168.0, 91.0}});
ComputeAndCompareR2<float>(&builder, expected, {}, error_spec_);
}
-// TODO (b/69062148) Enable when Dot implements general contracting dimensions.
-XLA_TEST_F(DotOperationTest,
- DISABLED_ON_CPU(DISABLED_ON_GPU(
- DISABLED_ON_INTERPRETER(DotOfGatherOptimizationWithConstLHSCols)))) {
+XLA_TEST_F(DotOperationTest, DotOfGatherOptimizationWithConstLHSCols) {
std::unique_ptr<Array2D<float>> constant_lhs_array(new Array2D<float>(
{{1.0, 2.0, 3.0, 4.0, 5.0, 6.0}, {6.0, 5.0, 4.0, 3.0, 2.0, 1.0}}));
std::unique_ptr<Array2D<float>> constant_rhs_array(
@@ -1029,19 +1011,41 @@ XLA_TEST_F(DotOperationTest,
// Dot result to slice from: {{91, 168, 56}, {56, 168, 91}}
XlaBuilder builder(TestName());
- auto lhs_constant = builder.ConstantR2FromArray2D(*constant_lhs_array);
- auto rhs_constant = builder.ConstantR2FromArray2D(*constant_rhs_array);
- auto start_constant = builder.ConstantR1<int32>({1, 0});
- auto dynamic_slice =
- builder.DynamicSlice(rhs_constant, start_constant, {1, 6});
+ auto lhs_constant = ConstantR2FromArray2D(&builder, *constant_lhs_array);
+ auto rhs_constant = ConstantR2FromArray2D(&builder, *constant_rhs_array);
+ auto start_constant = ConstantR1<int32>(&builder, {1, 0});
+ auto dynamic_slice = DynamicSlice(rhs_constant, start_constant, {1, 6});
DotDimensionNumbers dot_dnums;
dot_dnums.add_lhs_contracting_dimensions(1);
dot_dnums.add_rhs_contracting_dimensions(1);
- auto result = builder.DotGeneral(lhs_constant, dynamic_slice, dot_dnums);
+ DotGeneral(lhs_constant, dynamic_slice, dot_dnums);
Array2D<float> expected({{168.0}, {168.0}});
ComputeAndCompareR2<float>(&builder, expected, {}, error_spec_);
}
+
+XLA_TEST_F(DotOperationTest, DotRank2AndRank2NonDefaultContractionDims) {
+ XlaBuilder builder(TestName());
+
+ Array2D<float> lhs_array({{1.0f, 2.0f}, {3.0f, 4.0f}});
+ auto lhs_constant = ConstantR2FromArray2D(&builder, lhs_array);
+
+ Array2D<float> rhs_array({{5.0f, 6.0f}, {7.0f, 8.0f}});
+ auto rhs_constant = ConstantR2FromArray2D(&builder, rhs_array);
+
+ Shape shape = ShapeUtil::MakeShape(F32, {2, 2});
+ DotDimensionNumbers dot_dnums;
+ dot_dnums.add_lhs_contracting_dimensions(0);
+ dot_dnums.add_rhs_contracting_dimensions(0);
+ DotGeneral(lhs_constant, rhs_constant, dot_dnums);
+
+ Array2D<float> expected({
+ {26.f, 30.f},
+ {38.f, 44.f},
+ });
+
+ ComputeAndCompareR2<float>(&builder, expected, {}, error_spec_);
+}
} // namespace
} // namespace xla
diff --git a/tensorflow/compiler/xla/tests/dynamic_ops_test.cc b/tensorflow/compiler/xla/tests/dynamic_ops_test.cc
index a918c91f07..88ac96d6b0 100644
--- a/tensorflow/compiler/xla/tests/dynamic_ops_test.cc
+++ b/tensorflow/compiler/xla/tests/dynamic_ops_test.cc
@@ -124,11 +124,11 @@ class DynamicSliceTest : public ClientLibraryTestBase {
// vector<bool> is special so that it cannot be an ArraySlice<bool>, which
// is what the code below wants. So instead we do this.
Literal input_values =
- std::move(*Literal::CreateR1(input_values_int)
+ std::move(*LiteralUtil::CreateR1(input_values_int)
->Convert(primitive_util::NativeToPrimitiveType<DataT>())
.ValueOrDie());
Literal expected_values =
- std::move(*Literal::CreateR1(expected_values_int)
+ std::move(*LiteralUtil::CreateR1(expected_values_int)
->Convert(primitive_util::NativeToPrimitiveType<DataT>())
.ValueOrDie());
@@ -138,8 +138,8 @@ class DynamicSliceTest : public ClientLibraryTestBase {
std::unique_ptr<GlobalData> start_data = CreateR1Parameter<IndexT>(
slice_starts, 0, "slice_starts", &builder, &starts);
// Build dynamic slice computation.
- auto input = builder.ConstantLiteral(input_values);
- builder.DynamicSlice(input, starts, slice_sizes);
+ auto input = ConstantLiteral(&builder, input_values);
+ DynamicSlice(input, starts, slice_sizes);
// Run computation and compare against expected values.
ComputeAndCompareLiteral(&builder, expected_values, {start_data.get()});
}
@@ -150,11 +150,11 @@ class DynamicSliceTest : public ClientLibraryTestBase {
const std::vector<int64>& slice_sizes,
const Array2D<int>& expected_values_int) {
Literal input_values =
- std::move(*Literal::CreateR2FromArray2D(input_values_int)
+ std::move(*LiteralUtil::CreateR2FromArray2D(input_values_int)
->Convert(primitive_util::NativeToPrimitiveType<DataT>())
.ValueOrDie());
Literal expected_values =
- std::move(*Literal::CreateR2FromArray2D(expected_values_int)
+ std::move(*LiteralUtil::CreateR2FromArray2D(expected_values_int)
->Convert(primitive_util::NativeToPrimitiveType<DataT>())
.ValueOrDie());
@@ -164,8 +164,8 @@ class DynamicSliceTest : public ClientLibraryTestBase {
std::unique_ptr<GlobalData> start_data = CreateR1Parameter<IndexT>(
slice_starts, 0, "slice_starts", &builder, &starts);
// Build dynamic slice computation.
- auto input = builder.ConstantLiteral(input_values);
- builder.DynamicSlice(input, starts, slice_sizes);
+ auto input = ConstantLiteral(&builder, input_values);
+ DynamicSlice(input, starts, slice_sizes);
// Run computation and compare against expected values.
ComputeAndCompareLiteral(&builder, expected_values, {start_data.get()});
}
@@ -176,11 +176,11 @@ class DynamicSliceTest : public ClientLibraryTestBase {
const std::vector<int64>& slice_sizes,
const Array3D<int>& expected_values_int) {
Literal input_values =
- std::move(*Literal::CreateR3FromArray3D(input_values_int)
+ std::move(*LiteralUtil::CreateR3FromArray3D(input_values_int)
->Convert(primitive_util::NativeToPrimitiveType<DataT>())
.ValueOrDie());
Literal expected_values =
- std::move(*Literal::CreateR3FromArray3D(expected_values_int)
+ std::move(*LiteralUtil::CreateR3FromArray3D(expected_values_int)
->Convert(primitive_util::NativeToPrimitiveType<DataT>())
.ValueOrDie());
@@ -190,8 +190,8 @@ class DynamicSliceTest : public ClientLibraryTestBase {
std::unique_ptr<GlobalData> start_data = CreateR1Parameter<IndexT>(
slice_starts, 0, "slice_starts", &builder, &starts);
// Build dynamic slice computation.
- auto input = builder.ConstantLiteral(input_values);
- builder.DynamicSlice(input, starts, slice_sizes);
+ auto input = ConstantLiteral(&builder, input_values);
+ DynamicSlice(input, starts, slice_sizes);
// Run computation and compare against expected values.
ComputeAndCompareLiteral(&builder, expected_values, {start_data.get()});
}
@@ -202,18 +202,28 @@ XLA_TEST_F(DynamicSliceTest, Int32R1) { TestR1<int32, int32>(); }
XLA_TEST_F(DynamicSliceTest, Int32R1OOB) { TestR1OOB<int32, int32>(); }
XLA_TEST_F(DynamicSliceTest, Int64R1) { TestR1<int64, float>(); }
XLA_TEST_F(DynamicSliceTest, UInt64R1) { TestR1<uint64, float>(); }
+XLA_TEST_F(DynamicSliceTest, UInt32R1OOB) {
+ RunR1<uint32, int32>({0, 1, 2, 3, 4}, {2147483648u}, {2}, {3, 4});
+}
XLA_TEST_F(DynamicSliceTest, Int32R2BF16) { TestR2<int32, bfloat16>(); }
XLA_TEST_F(DynamicSliceTest, Int32R2) { TestR2<int32, int32>(); }
XLA_TEST_F(DynamicSliceTest, Int32R2OOB) { TestR2OOB<int32, int32>(); }
XLA_TEST_F(DynamicSliceTest, Int64R2) { TestR2<int64, float>(); }
XLA_TEST_F(DynamicSliceTest, UInt64R2) { TestR2<uint64, int32>(); }
+XLA_TEST_F(DynamicSliceTest, UInt32R2OOB) {
+ RunR2<uint32, int32>({{0, 1}, {2, 3}}, {2147483648u, 0}, {1, 1}, {{2}});
+}
XLA_TEST_F(DynamicSliceTest, Int32R3BF16) { TestR3<int32, bfloat16>(); }
XLA_TEST_F(DynamicSliceTest, Int32R3) { TestR3<int32, float>(); }
XLA_TEST_F(DynamicSliceTest, Int32R3OOB) { TestR3OOB<int32, float>(); }
XLA_TEST_F(DynamicSliceTest, Int64R3) { TestR3<int64, float>(); }
XLA_TEST_F(DynamicSliceTest, UInt64R3) { TestR3<uint64, float>(); }
+XLA_TEST_F(DynamicSliceTest, UInt32R3OOB) {
+ RunR3<uint32, int32>({{{0, 1}, {2, 3}}, {{4, 5}, {6, 7}}},
+ {2147483648u, 0, 2147483648u}, {1, 1, 1}, {{{5}}});
+}
XLA_TEST_F(DynamicSliceTest, Int32R1Pred) {
// Slice at dimension start.
@@ -349,15 +359,15 @@ class DynamicUpdateSliceTest : public ClientLibraryTestBase {
void RunR0(int input_value_int, int update_value_int,
const std::vector<IndexT> slice_starts, int expected_value_int) {
Literal input_value =
- std::move(*Literal::CreateR0(input_value_int)
+ std::move(*LiteralUtil::CreateR0(input_value_int)
->Convert(primitive_util::NativeToPrimitiveType<DataT>())
.ValueOrDie());
Literal update_value =
- std::move(*Literal::CreateR0(update_value_int)
+ std::move(*LiteralUtil::CreateR0(update_value_int)
->Convert(primitive_util::NativeToPrimitiveType<DataT>())
.ValueOrDie());
Literal expected_value =
- std::move(*Literal::CreateR0(expected_value_int)
+ std::move(*LiteralUtil::CreateR0(expected_value_int)
->Convert(primitive_util::NativeToPrimitiveType<DataT>())
.ValueOrDie());
@@ -367,9 +377,9 @@ class DynamicUpdateSliceTest : public ClientLibraryTestBase {
std::unique_ptr<GlobalData> start_data = CreateR1Parameter<IndexT>(
slice_starts, 0, "slice_starts", &builder, &starts);
// Build dynamic slice computation.
- auto input = builder.ConstantLiteral(input_value);
- auto update = builder.ConstantLiteral(update_value);
- builder.DynamicUpdateSlice(input, update, starts);
+ auto input = ConstantLiteral(&builder, input_value);
+ auto update = ConstantLiteral(&builder, update_value);
+ DynamicUpdateSlice(input, update, starts);
// Run computation and compare against expected values.
ComputeAndCompareLiteral(&builder, expected_value, {start_data.get()});
}
@@ -380,15 +390,15 @@ class DynamicUpdateSliceTest : public ClientLibraryTestBase {
const std::vector<IndexT> slice_starts,
tensorflow::gtl::ArraySlice<int> expected_values_int) {
Literal input_values =
- std::move(*Literal::CreateR1(input_values_int)
+ std::move(*LiteralUtil::CreateR1(input_values_int)
->Convert(primitive_util::NativeToPrimitiveType<DataT>())
.ValueOrDie());
Literal update_values =
- std::move(*Literal::CreateR1(update_values_int)
+ std::move(*LiteralUtil::CreateR1(update_values_int)
->Convert(primitive_util::NativeToPrimitiveType<DataT>())
.ValueOrDie());
Literal expected_values =
- std::move(*Literal::CreateR1(expected_values_int)
+ std::move(*LiteralUtil::CreateR1(expected_values_int)
->Convert(primitive_util::NativeToPrimitiveType<DataT>())
.ValueOrDie());
@@ -398,9 +408,9 @@ class DynamicUpdateSliceTest : public ClientLibraryTestBase {
std::unique_ptr<GlobalData> start_data = CreateR1Parameter<IndexT>(
slice_starts, 0, "slice_starts", &builder, &starts);
// Build dynamic slice computation.
- auto input = builder.ConstantLiteral(input_values);
- auto update = builder.ConstantLiteral(update_values);
- builder.DynamicUpdateSlice(input, update, starts);
+ auto input = ConstantLiteral(&builder, input_values);
+ auto update = ConstantLiteral(&builder, update_values);
+ DynamicUpdateSlice(input, update, starts);
// Run computation and compare against expected values.
ComputeAndCompareLiteral(&builder, expected_values, {start_data.get()});
}
@@ -411,15 +421,15 @@ class DynamicUpdateSliceTest : public ClientLibraryTestBase {
const std::vector<IndexT> slice_starts,
const Array2D<int>& expected_values_int) {
Literal input_values =
- std::move(*Literal::CreateR2FromArray2D(input_values_int)
+ std::move(*LiteralUtil::CreateR2FromArray2D(input_values_int)
->Convert(primitive_util::NativeToPrimitiveType<DataT>())
.ValueOrDie());
Literal update_values =
- std::move(*Literal::CreateR2FromArray2D(update_values_int)
+ std::move(*LiteralUtil::CreateR2FromArray2D(update_values_int)
->Convert(primitive_util::NativeToPrimitiveType<DataT>())
.ValueOrDie());
Literal expected_values =
- std::move(*Literal::CreateR2FromArray2D(expected_values_int)
+ std::move(*LiteralUtil::CreateR2FromArray2D(expected_values_int)
->Convert(primitive_util::NativeToPrimitiveType<DataT>())
.ValueOrDie());
@@ -429,9 +439,9 @@ class DynamicUpdateSliceTest : public ClientLibraryTestBase {
std::unique_ptr<GlobalData> start_data = CreateR1Parameter<IndexT>(
slice_starts, 0, "slice_starts", &builder, &starts);
// Build dynamic slice computation.
- auto input = builder.ConstantLiteral(input_values);
- auto update = builder.ConstantLiteral(update_values);
- builder.DynamicUpdateSlice(input, update, starts);
+ auto input = ConstantLiteral(&builder, input_values);
+ auto update = ConstantLiteral(&builder, update_values);
+ DynamicUpdateSlice(input, update, starts);
// Run computation and compare against expected values.
ComputeAndCompareLiteral(&builder, expected_values, {start_data.get()});
}
@@ -442,15 +452,15 @@ class DynamicUpdateSliceTest : public ClientLibraryTestBase {
const std::vector<IndexT> slice_starts,
const Array3D<int>& expected_values_int) {
Literal input_values =
- std::move(*Literal::CreateR3FromArray3D(input_values_int)
+ std::move(*LiteralUtil::CreateR3FromArray3D(input_values_int)
->Convert(primitive_util::NativeToPrimitiveType<DataT>())
.ValueOrDie());
Literal update_values =
- std::move(*Literal::CreateR3FromArray3D(update_values_int)
+ std::move(*LiteralUtil::CreateR3FromArray3D(update_values_int)
->Convert(primitive_util::NativeToPrimitiveType<DataT>())
.ValueOrDie());
Literal expected_values =
- std::move(*Literal::CreateR3FromArray3D(expected_values_int)
+ std::move(*LiteralUtil::CreateR3FromArray3D(expected_values_int)
->Convert(primitive_util::NativeToPrimitiveType<DataT>())
.ValueOrDie());
@@ -460,9 +470,9 @@ class DynamicUpdateSliceTest : public ClientLibraryTestBase {
std::unique_ptr<GlobalData> start_data = CreateR1Parameter<IndexT>(
slice_starts, 0, "slice_starts", &builder, &starts);
// Build dynamic slice computation.
- auto input = builder.ConstantLiteral(input_values);
- auto update = builder.ConstantLiteral(update_values);
- builder.DynamicUpdateSlice(input, update, starts);
+ auto input = ConstantLiteral(&builder, input_values);
+ auto update = ConstantLiteral(&builder, update_values);
+ DynamicUpdateSlice(input, update, starts);
// Run computation and compare against expected values.
ComputeAndCompareLiteral(&builder, expected_values, {start_data.get()});
}
@@ -508,8 +518,8 @@ class DynamicUpdateSliceTest : public ClientLibraryTestBase {
XlaOp update;
std::unique_ptr<GlobalData> update_data = CreateR3Parameter<T>(
update_values, 1, "update_values", &builder, &update);
- auto starts = builder.ConstantR1<int32>({index, 0, 0});
- builder.DynamicUpdateSlice(input, update, starts);
+ auto starts = ConstantR1<int32>(&builder, {index, 0, 0});
+ DynamicUpdateSlice(input, update, starts);
// Run computation and compare against expected values.
ComputeAndCompareR3<T>(&builder, expected_values,
@@ -520,7 +530,7 @@ class DynamicUpdateSliceTest : public ClientLibraryTestBase {
template <typename NativeT>
void DumpArray(const string& name, const Array3D<NativeT> values) {
std::unique_ptr<Literal> literal =
- Literal::CreateR3FromArray3D<NativeT>(values);
+ LiteralUtil::CreateR3FromArray3D<NativeT>(values);
LOG(INFO) << name << ":" << literal->ToString();
}
};
@@ -530,21 +540,32 @@ XLA_TEST_F(DynamicUpdateSliceTest, Int32R0) { TestR0<int32, float>(); }
XLA_TEST_F(DynamicUpdateSliceTest, Int64R0) { TestR0<int64, float>(); }
XLA_TEST_F(DynamicUpdateSliceTest, UInt64R0) { TestR0<uint64, float>(); }
-// TODO(b/71820067): The CPU parallel backend failed for this on 2018-01-10.
XLA_TEST_F(DynamicUpdateSliceTest, Int32R1BF16) { TestR1<int32, bfloat16>(); }
XLA_TEST_F(DynamicUpdateSliceTest, Int32R1) { TestR1<int32, float>(); }
XLA_TEST_F(DynamicUpdateSliceTest, Int64R1) { TestR1<int64, float>(); }
XLA_TEST_F(DynamicUpdateSliceTest, UInt64R1) { TestR1<uint64, float>(); }
+XLA_TEST_F(DynamicUpdateSliceTest, UInt32R1OOB) {
+ RunR1<uint32, int32>({0, 1, 2, 3, 4}, {5, 6}, {2147483648u}, {0, 1, 2, 5, 6});
+}
XLA_TEST_F(DynamicUpdateSliceTest, Int32R2BF16) { TestR2<int32, bfloat16>(); }
XLA_TEST_F(DynamicUpdateSliceTest, Int32R2) { TestR2<int32, float>(); }
XLA_TEST_F(DynamicUpdateSliceTest, Int64R2) { TestR2<int64, int64>(); }
XLA_TEST_F(DynamicUpdateSliceTest, UInt64R2) { TestR2<uint64, int32>(); }
+XLA_TEST_F(DynamicUpdateSliceTest, UInt32R2OOB) {
+ RunR2<uint32, int32>({{0, 1}, {2, 3}}, {{4}}, {2147483648u, 0},
+ {{0, 1}, {4, 3}});
+}
XLA_TEST_F(DynamicUpdateSliceTest, Int32R3BF16) { TestR3<int32, bfloat16>(); }
XLA_TEST_F(DynamicUpdateSliceTest, Int32R3) { TestR3<int32, float>(); }
XLA_TEST_F(DynamicUpdateSliceTest, Int64R3) { TestR3<int64, int64>(); }
XLA_TEST_F(DynamicUpdateSliceTest, UInt64R3) { TestR3<uint64, uint64>(); }
+XLA_TEST_F(DynamicUpdateSliceTest, UInt32R3OOB) {
+ RunR3<uint32, int32>({{{0, 1}, {2, 3}}, {{4, 5}, {6, 7}}}, {{{8}}},
+ {2147483648u, 0, 2147483648u},
+ {{{0, 1}, {2, 3}}, {{4, 8}, {6, 7}}});
+}
XLA_TEST_F(DynamicUpdateSliceTest, Int32OOBBF16) { TestOOB<int32, bfloat16>(); }
XLA_TEST_F(DynamicUpdateSliceTest, Int32OOB) { TestOOB<int32, float>(); }
@@ -695,17 +716,17 @@ void BM_DynamicSlice(int num_iters) {
XlaBuilder builder("DynamicSlice");
// Create input as a constant: shape [1, 2, 3, 4]
- auto input_literal = Literal::CreateR4(
+ auto input_literal = LiteralUtil::CreateR4(
{{{{1, 2, 3, 4}, {5, 6, 7, 8}, {9, 10, 11, 12}},
{{13, 14, 15, 16}, {17, 18, 19, 20}, {21, 22, 23, 24}}}});
- auto input = builder.ConstantLiteral(*input_literal);
+ auto input = ConstantLiteral(&builder, *input_literal);
// Create dynamic slice start indices as a parameter: shape [4]
auto start_indices_shape = ShapeUtil::MakeShape(S32, {4});
auto start_indices =
- builder.Parameter(0, start_indices_shape, "start_indices");
+ Parameter(&builder, 0, start_indices_shape, "start_indices");
// Add DynamicSlice op to the computatation.
- builder.DynamicSlice(input, start_indices, {1, 1, 1, 1});
+ DynamicSlice(input, start_indices, {1, 1, 1, 1});
auto computation = builder.Build().ConsumeValueOrDie();
// Initialize and transfer parameter buffer.
@@ -715,7 +736,7 @@ void BM_DynamicSlice(int num_iters) {
start_indices_shape, &allocator, /*device_ordinal=*/0)
.ConsumeValueOrDie();
- auto start_indices_literal = Literal::CreateR1<int32>({0, 1, 2, 3});
+ auto start_indices_literal = LiteralUtil::CreateR1<int32>({0, 1, 2, 3});
auto stream =
client->mutable_backend()->BorrowStream(device_ordinal).ValueOrDie();
ASSERT_IS_OK(transfer_manager->TransferLiteralToDevice(
diff --git a/tensorflow/compiler/xla/tests/execution_profile_test.cc b/tensorflow/compiler/xla/tests/execution_profile_test.cc
index a6ba6db5d3..ebba13c5b3 100644
--- a/tensorflow/compiler/xla/tests/execution_profile_test.cc
+++ b/tensorflow/compiler/xla/tests/execution_profile_test.cc
@@ -31,10 +31,10 @@ XLA_TEST_F(ExecutionProfileTest, ExecuteWithExecutionProfile) {
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<GlobalData> input,
client_->TransferToServer(
- *Literal::CreateR2F32Linspace(1e0, 1e5, 256, 256)));
+ *LiteralUtil::CreateR2F32Linspace(1e0, 1e5, 256, 256)));
XlaBuilder b(TestName() + ".add");
- b.Dot(b.Parameter(0, shape, "param_0"), b.Parameter(1, shape, "param_1"));
+ Dot(Parameter(&b, 0, shape, "param_0"), Parameter(&b, 1, shape, "param_1"));
TF_ASSERT_OK_AND_ASSIGN(XlaComputation dot_product, b.Build());
ExecutionProfile execution_profile;
diff --git a/tensorflow/compiler/xla/tests/exhaustive_f32_elementwise_op_test.cc b/tensorflow/compiler/xla/tests/exhaustive_f32_elementwise_op_test.cc
index 0a37e4d423..86bfaea4ef 100644
--- a/tensorflow/compiler/xla/tests/exhaustive_f32_elementwise_op_test.cc
+++ b/tensorflow/compiler/xla/tests/exhaustive_f32_elementwise_op_test.cc
@@ -39,7 +39,7 @@ class ExhaustiveF32ElementwiseOpTest
XlaBuilder builder(TestName());
std::unique_ptr<Literal> input_literal =
- Literal::CreateFromDimensions(F32, {input_size});
+ LiteralUtil::CreateFromDimensions(F32, {input_size});
for (int64 i = begin; i < end; i++) {
if (i >= known_incorrect_range.first &&
i < known_incorrect_range.second) {
@@ -54,7 +54,7 @@ class ExhaustiveF32ElementwiseOpTest
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<GlobalData> input_data,
client_->TransferToServer(*input_literal));
- auto input = builder.Parameter(0, input_literal->shape(), "input");
+ auto input = Parameter(&builder, 0, input_literal->shape(), "input");
enqueue_op(&builder, input);
std::vector<float> expected_result;
@@ -79,8 +79,8 @@ XLA_TEST_P(ExhaustiveF32ElementwiseOpTest, LogF32) {
#endif
ExhaustivelyTestF32Op(
- [](XlaBuilder* builder, const XlaOp& input) { builder->Log(input); },
- std::log, known_incorrect_range);
+ [](XlaBuilder* builder, const XlaOp& input) { Log(input); }, std::log,
+ known_incorrect_range);
}
XLA_TEST_P(ExhaustiveF32ElementwiseOpTest, ExpF32) {
@@ -95,14 +95,14 @@ XLA_TEST_P(ExhaustiveF32ElementwiseOpTest, ExpF32) {
#endif
ExhaustivelyTestF32Op(
- [](XlaBuilder* builder, const XlaOp& input) { builder->Exp(input); },
- std::exp, known_incorrect_range);
+ [](XlaBuilder* builder, const XlaOp& input) { Exp(input); }, std::exp,
+ known_incorrect_range);
}
XLA_TEST_P(ExhaustiveF32ElementwiseOpTest, TanhF32) {
ExhaustivelyTestF32Op(
- [](XlaBuilder* builder, const XlaOp& input) { builder->Tanh(input); },
- std::tanh, /*known_incorrect_range=*/{0, 0});
+ [](XlaBuilder* builder, const XlaOp& input) { Tanh(input); }, std::tanh,
+ /*known_incorrect_range=*/{0, 0});
}
std::vector<std::pair<int64, int64>> CreateExhaustiveParameters() {
diff --git a/tensorflow/compiler/xla/tests/filecheck.cc b/tensorflow/compiler/xla/tests/filecheck.cc
index 93d1c921c4..dcb469087e 100644
--- a/tensorflow/compiler/xla/tests/filecheck.cc
+++ b/tensorflow/compiler/xla/tests/filecheck.cc
@@ -76,6 +76,11 @@ StatusOr<bool> RunFileCheck(const string& input, const string& pattern) {
XLA_LOG_LINES(tensorflow::WARNING, input);
LOG(WARNING) << "FileCheck pattern was:";
XLA_LOG_LINES(tensorflow::WARNING, pattern);
+ } else if (!standard_error.empty()) {
+ LOG(INFO) << "FileCheck stderr:";
+ XLA_LOG_LINES(tensorflow::INFO, standard_error);
+ LOG(INFO) << "FileCheck input was:";
+ XLA_LOG_LINES(tensorflow::INFO, input);
}
return succeeded;
}
diff --git a/tensorflow/compiler/xla/tests/floor_ceil_test.cc b/tensorflow/compiler/xla/tests/floor_ceil_test.cc
index 71eb914a8e..30dc639f11 100644
--- a/tensorflow/compiler/xla/tests/floor_ceil_test.cc
+++ b/tensorflow/compiler/xla/tests/floor_ceil_test.cc
@@ -42,12 +42,12 @@ class FloorCeilTest : public ClientLibraryTestBase {
LOG(INFO) << "input: {" << tensorflow::str_util::Join(expected, ", ")
<< "}";
XlaBuilder builder(TestName());
- auto c = builder.ConstantR1<float>(input);
+ auto c = ConstantR1<float>(&builder, input);
if (f == kCeil) {
- builder.Ceil(c);
+ Ceil(c);
} else {
ASSERT_EQ(kFloor, f);
- builder.Floor(c);
+ Floor(c);
}
ComputeAndCompareR1<float>(&builder, expected, /*arguments=*/{});
}
@@ -55,12 +55,12 @@ class FloorCeilTest : public ClientLibraryTestBase {
void TestR0F32(float input, float expected, Function f) {
LOG(INFO) << "input: " << expected;
XlaBuilder builder(TestName());
- auto c = builder.ConstantR0<float>(input);
+ auto c = ConstantR0<float>(&builder, input);
if (f == kCeil) {
- builder.Ceil(c);
+ Ceil(c);
} else {
ASSERT_EQ(kFloor, f);
- builder.Floor(c);
+ Floor(c);
}
ComputeAndCompareR0<float>(&builder, expected, /*arguments=*/{});
}
diff --git a/tensorflow/compiler/xla/tests/fmax_test.cc b/tensorflow/compiler/xla/tests/fmax_test.cc
index 73f029b59b..0254ae1baa 100644
--- a/tensorflow/compiler/xla/tests/fmax_test.cc
+++ b/tensorflow/compiler/xla/tests/fmax_test.cc
@@ -28,11 +28,11 @@ class FmaxSimpleTest : public ClientLibraryTestBase {};
TEST_F(FmaxSimpleTest, FmaxTenValues) {
XlaBuilder builder(TestName());
- auto x = builder.ConstantR1<float>(
- {-0.0, 1.0, 2.0, -3.0, -4.0, 5.0, 6.0, -7.0, -8.0, 9.0});
- auto y = builder.ConstantR1<float>(
- {-0.0, -1.0, -2.0, 3.0, 4.0, -5.0, -6.0, 7.0, 8.0, -9.0});
- builder.Max(x, y);
+ auto x = ConstantR1<float>(
+ &builder, {-0.0, 1.0, 2.0, -3.0, -4.0, 5.0, 6.0, -7.0, -8.0, 9.0});
+ auto y = ConstantR1<float>(
+ &builder, {-0.0, -1.0, -2.0, 3.0, 4.0, -5.0, -6.0, 7.0, 8.0, -9.0});
+ Max(x, y);
std::vector<float> expected = {-0.0, 1.0, 2.0, 3.0, 4.0,
5.0, 6.0, 7.0, 8.0, 9.0};
diff --git a/tensorflow/compiler/xla/tests/fusion_test.cc b/tensorflow/compiler/xla/tests/fusion_test.cc
index e6f79b5ac5..dc64477935 100644
--- a/tensorflow/compiler/xla/tests/fusion_test.cc
+++ b/tensorflow/compiler/xla/tests/fusion_test.cc
@@ -26,13 +26,14 @@ limitations under the License.
#include "tensorflow/compiler/xla/array2d.h"
#include "tensorflow/compiler/xla/client/client_library.h"
#include "tensorflow/compiler/xla/client/xla_client/xla_builder.h"
-#include "tensorflow/compiler/xla/literal_util.h"
+#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/compiler/xla/primitive_util.h"
#include "tensorflow/compiler/xla/ptr_util.h"
#include "tensorflow/compiler/xla/service/hlo_computation.h"
#include "tensorflow/compiler/xla/service/hlo_instruction.h"
#include "tensorflow/compiler/xla/service/hlo_module.h"
#include "tensorflow/compiler/xla/service/hlo_opcode.h"
+#include "tensorflow/compiler/xla/service/hlo_parser.h"
#include "tensorflow/compiler/xla/service/platform_util.h"
#include "tensorflow/compiler/xla/shape_util.h"
#include "tensorflow/compiler/xla/tests/client_library_test_base.h"
@@ -89,7 +90,7 @@ class FusionTest : public HloTestBase {
HloInstruction* hlos[4];
for (int i = 0; i < Arity; ++i) {
hlos[i + 1] = builder.AddInstruction(HloInstruction::CreateConstant(
- Literal::CreateR2FromArray2D(operand_data[i])));
+ LiteralUtil::CreateR2FromArray2D(operand_data[i])));
}
auto answer_shape =
ShapeUtil::MakeShape(prim_type, {test_width, test_height});
@@ -115,7 +116,7 @@ class FusionTest : public HloTestBase {
ArraySlice<HloInstruction*>(hlos, 0, Arity + 1),
HloInstruction::FusionKind::kLoop);
- auto expected = Literal::CreateR2FromArray2D(answer_data);
+ auto expected = LiteralUtil::CreateR2FromArray2D(answer_data);
auto actual = ExecuteAndTransfer(std::move(hlo_module), {});
if (primitive_util::IsFloatingPointType(prim_type)) {
EXPECT_TRUE(LiteralTestUtil::Near(*expected, *actual, ErrorSpec(1e-4)));
@@ -186,27 +187,28 @@ XLA_TEST_F(FusionTest, Test) {
auto builder = HloComputation::Builder(TestName());
auto hlo_module = CreateNewModule();
auto const0 = builder.AddInstruction(HloInstruction::CreateConstant(
- Literal::CreateR2<float>({{1.0}, {2.0}, {3.0}})));
+ LiteralUtil::CreateR2<float>({{1.0}, {2.0}, {3.0}})));
auto const1 = builder.AddInstruction(HloInstruction::CreateConstant(
- Literal::CreateR2<float>({{-1.0}, {-1.0}, {-1.0}})));
+ LiteralUtil::CreateR2<float>({{-1.0}, {-1.0}, {-1.0}})));
auto add2 = builder.AddInstruction(HloInstruction::CreateBinary(
ShapeUtil::MakeShape(F32, {3, 1}), HloOpcode::kAdd, const0, const1));
auto reshape3 = builder.AddInstruction(HloInstruction::CreateTranspose(
ShapeUtil::MakeShape(F32, {1, 3}), add2, {1, 0}));
auto const4 = builder.AddInstruction(HloInstruction::CreateConstant(
- Literal::CreateR2<float>({{1.62, 2.72, 3.14}})));
+ LiteralUtil::CreateR2<float>({{1.62, 2.72, 3.14}})));
auto concat5 = builder.AddInstruction(HloInstruction::CreateConcatenate(
ShapeUtil::MakeShape(F32, {2, 3}), {reshape3, const4}, 0));
auto const6 = builder.AddInstruction(HloInstruction::CreateConstant(
- Literal::CreateR2<float>({{1.0, 1.0, 1.0}, {0.0, 0.0, 0.0}})));
+ LiteralUtil::CreateR2<float>({{1.0, 1.0, 1.0}, {0.0, 0.0, 0.0}})));
auto negate7 = builder.AddInstruction(HloInstruction::CreateUnary(
ShapeUtil::MakeShape(F32, {2, 3}), HloOpcode::kNegate, const6));
auto add8 = builder.AddInstruction(HloInstruction::CreateBinary(
ShapeUtil::MakeShape(F32, {2, 3}), HloOpcode::kAdd, concat5, negate7));
auto const9 = builder.AddInstruction(HloInstruction::CreateConstant(
- Literal::CreateR2<float>({{0.5, 0.5, 0.5}, {0.5, 0.5, 0.5}})));
- auto const10 = builder.AddInstruction(HloInstruction::CreateConstant(
- Literal::CreateR2<bool>({{true, false, true}, {false, true, false}})));
+ LiteralUtil::CreateR2<float>({{0.5, 0.5, 0.5}, {0.5, 0.5, 0.5}})));
+ auto const10 = builder.AddInstruction(
+ HloInstruction::CreateConstant(LiteralUtil::CreateR2<bool>(
+ {{true, false, true}, {false, true, false}})));
auto select11 = builder.AddInstruction(
HloInstruction::CreateTernary(ShapeUtil::MakeShape(F32, {2, 3}),
HloOpcode::kSelect, const10, add8, const9));
@@ -222,7 +224,7 @@ XLA_TEST_F(FusionTest, Test) {
HloInstruction::FusionKind::kLoop);
EXPECT_TRUE(LiteralTestUtil::Near(
- *Literal::CreateR2<float>({{0.5}, {2.72}}),
+ *LiteralUtil::CreateR2<float>({{0.5}, {2.72}}),
*ExecuteAndTransfer(std::move(hlo_module), {}), ErrorSpec(1e-4)));
}
@@ -233,11 +235,11 @@ XLA_TEST_F(FusionTest, Parameter) {
auto builder = HloComputation::Builder(TestName());
auto hlo_module = CreateNewModule();
auto const0 = builder.AddInstruction(HloInstruction::CreateConstant(
- Literal::CreateR2<float>({{1.0, 2.0, 3.0}})));
+ LiteralUtil::CreateR2<float>({{1.0, 2.0, 3.0}})));
auto copy1 = builder.AddInstruction(HloInstruction::CreateUnary(
ShapeUtil::MakeShape(F32, {1, 3}), HloOpcode::kCopy, const0));
auto const2 = builder.AddInstruction(HloInstruction::CreateConstant(
- Literal::CreateR2<float>({{-2.0, -2.0, -2.0}})));
+ LiteralUtil::CreateR2<float>({{-2.0, -2.0, -2.0}})));
// add3 = copy1 + const2 = const0 + const2 = {1,2,3} + {-2,-2,-2} = {-1,0,+1}
auto add3 = builder.AddInstruction(HloInstruction::CreateBinary(
ShapeUtil::MakeShape(F32, {1, 3}), HloOpcode::kAdd, copy1, const2));
@@ -248,7 +250,7 @@ XLA_TEST_F(FusionTest, Parameter) {
HloInstruction::FusionKind::kLoop);
EXPECT_TRUE(LiteralTestUtil::Near(
- *Literal::CreateR2<float>({{-1.0, 0.0, 1.0}}),
+ *LiteralUtil::CreateR2<float>({{-1.0, 0.0, 1.0}}),
*ExecuteAndTransfer(std::move(hlo_module), {}), ErrorSpec(1e-4)));
}
@@ -269,7 +271,7 @@ XLA_TEST_F(FusionTest, RandomizedParallelPartition) {
auto hlo_module = CreateNewModule();
auto two = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(2.0)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0)));
auto x =
builder.AddInstruction(HloInstruction::CreateBroadcast(shape, two, {}));
auto y = builder.AddInstruction(
@@ -292,9 +294,9 @@ XLA_TEST_F(FusionTest, BroadcastIntoBinaryOp) {
auto builder = HloComputation::Builder(TestName());
auto hlo_module = CreateNewModule();
auto const_vector = builder.AddInstruction(HloInstruction::CreateConstant(
- Literal::CreateR1<float>({1.0, 2.0, 3.0})));
+ LiteralUtil::CreateR1<float>({1.0, 2.0, 3.0})));
auto const_array = builder.AddInstruction(HloInstruction::CreateConstant(
- Literal::CreateR2<float>({{-1.0, -2.0, -4.0}, {10.0, 20.0, 30.0}})));
+ LiteralUtil::CreateR2<float>({{-1.0, -2.0, -4.0}, {10.0, 20.0, 30.0}})));
auto broadcast = builder.AddInstruction(
HloInstruction::CreateBroadcast(const_array->shape(), const_vector, {1}));
// add2 = broadcast(const_vector) + const_array
@@ -308,7 +310,7 @@ XLA_TEST_F(FusionTest, BroadcastIntoBinaryOp) {
HloInstruction::FusionKind::kLoop);
EXPECT_TRUE(LiteralTestUtil::Near(
- *Literal::CreateR2<float>({{0.0, 0.0, -1.0}, {11.0, 22.0, 33.0}}),
+ *LiteralUtil::CreateR2<float>({{0.0, 0.0, -1.0}, {11.0, 22.0, 33.0}}),
*ExecuteAndTransfer(std::move(hlo_module), {}), ErrorSpec(1e-4)));
}
@@ -316,14 +318,14 @@ XLA_TEST_F(FusionTest, ReshapeToScalar) {
auto builder = HloComputation::Builder(TestName());
auto hlo_module = CreateNewModule();
auto single_element_array = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR2<int32>({{5}})));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR2<int32>({{5}})));
auto reshape = builder.AddInstruction(HloInstruction::CreateReshape(
ShapeUtil::MakeShape(S32, {}), single_element_array));
hlo_module->AddEntryComputation(builder.Build())
->CreateFusionInstruction(/*instructions_to_fuse=*/{reshape},
HloInstruction::FusionKind::kLoop);
EXPECT_TRUE(
- LiteralTestUtil::Equal(*Literal::CreateR0<int32>(5),
+ LiteralTestUtil::Equal(*LiteralUtil::CreateR0<int32>(5),
*ExecuteAndTransfer(std::move(hlo_module), {})));
}
@@ -331,14 +333,14 @@ XLA_TEST_F(FusionTest, Reshape_3by2_1by2by3) {
auto builder = HloComputation::Builder(TestName());
auto hlo_module = CreateNewModule();
auto const0 = builder.AddInstruction(HloInstruction::CreateConstant(
- Literal::CreateR2<int32>({{1, 2}, {3, 4}, {5, 6}})));
+ LiteralUtil::CreateR2<int32>({{1, 2}, {3, 4}, {5, 6}})));
auto reshape1 = builder.AddInstruction(HloInstruction::CreateReshape(
ShapeUtil::MakeShape(S32, {1, 2, 3}), const0));
hlo_module->AddEntryComputation(builder.Build())
->CreateFusionInstruction(/*instructions_to_fuse=*/{reshape1},
HloInstruction::FusionKind::kLoop);
EXPECT_TRUE(LiteralTestUtil::Equal(
- *Literal::CreateR3<int32>({{{1, 2, 3}, {4, 5, 6}}}),
+ *LiteralUtil::CreateR3<int32>({{{1, 2, 3}, {4, 5, 6}}}),
*ExecuteAndTransfer(std::move(hlo_module), {})));
}
@@ -346,14 +348,14 @@ XLA_TEST_F(FusionTest, Reshape_1by2by3_3by2) {
auto builder = HloComputation::Builder(TestName());
auto hlo_module = CreateNewModule();
auto const0 = builder.AddInstruction(HloInstruction::CreateConstant(
- Literal::CreateR3<int32>({{{1, 2, 3}, {4, 5, 6}}})));
+ LiteralUtil::CreateR3<int32>({{{1, 2, 3}, {4, 5, 6}}})));
auto reshape1 = builder.AddInstruction(
HloInstruction::CreateReshape(ShapeUtil::MakeShape(S32, {3, 2}), const0));
hlo_module->AddEntryComputation(builder.Build())
->CreateFusionInstruction(/*instructions_to_fuse=*/{reshape1},
HloInstruction::FusionKind::kLoop);
EXPECT_TRUE(LiteralTestUtil::Equal(
- *Literal::CreateR2<int32>({{1, 2}, {3, 4}, {5, 6}}),
+ *LiteralUtil::CreateR2<int32>({{1, 2}, {3, 4}, {5, 6}}),
*ExecuteAndTransfer(std::move(hlo_module), {})));
}
@@ -361,14 +363,14 @@ XLA_TEST_F(FusionTest, Reshape_1by1by1_) {
auto builder = HloComputation::Builder(TestName());
auto hlo_module = CreateNewModule();
auto const0 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR3<int32>({{{7}}})));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR3<int32>({{{7}}})));
auto reshape1 = builder.AddInstruction(
HloInstruction::CreateReshape(ShapeUtil::MakeShape(S32, {}), const0));
hlo_module->AddEntryComputation(builder.Build())
->CreateFusionInstruction(/*instructions_to_fuse=*/{reshape1},
HloInstruction::FusionKind::kLoop);
EXPECT_TRUE(
- LiteralTestUtil::Equal(*Literal::CreateR0<int32>(7),
+ LiteralTestUtil::Equal(*LiteralUtil::CreateR0<int32>(7),
*ExecuteAndTransfer(std::move(hlo_module), {})));
}
@@ -376,14 +378,14 @@ XLA_TEST_F(FusionTest, Reshape__1by1by1) {
auto builder = HloComputation::Builder(TestName());
auto hlo_module = CreateNewModule();
auto const0 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<int32>(7)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32>(7)));
auto reshape1 = builder.AddInstruction(HloInstruction::CreateReshape(
ShapeUtil::MakeShape(S32, {1, 1, 1}), const0));
hlo_module->AddEntryComputation(builder.Build())
->CreateFusionInstruction(/*instructions_to_fuse=*/{reshape1},
HloInstruction::FusionKind::kLoop);
EXPECT_TRUE(
- LiteralTestUtil::Equal(*Literal::CreateR3<int32>({{{7}}}),
+ LiteralTestUtil::Equal(*LiteralUtil::CreateR3<int32>({{{7}}}),
*ExecuteAndTransfer(std::move(hlo_module), {})));
}
@@ -391,14 +393,14 @@ XLA_TEST_F(FusionTest, Reshape__) {
auto builder = HloComputation::Builder(TestName());
auto hlo_module = CreateNewModule();
auto const0 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<int32>(7)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32>(7)));
auto reshape1 = builder.AddInstruction(
HloInstruction::CreateReshape(ShapeUtil::MakeShape(S32, {}), const0));
hlo_module->AddEntryComputation(builder.Build())
->CreateFusionInstruction(/*instructions_to_fuse=*/{reshape1},
HloInstruction::FusionKind::kLoop);
EXPECT_TRUE(
- LiteralTestUtil::Equal(*Literal::CreateR0<int32>(7),
+ LiteralTestUtil::Equal(*LiteralUtil::CreateR0<int32>(7),
*ExecuteAndTransfer(std::move(hlo_module), {})));
}
@@ -406,14 +408,14 @@ XLA_TEST_F(FusionTest, Reshape_3by3_3by3) {
auto builder = HloComputation::Builder(TestName());
auto hlo_module = CreateNewModule();
auto const0 = builder.AddInstruction(HloInstruction::CreateConstant(
- Literal::CreateR2<int32>({{1, 2, 3}, {4, 5, 6}, {7, 8, 9}})));
+ LiteralUtil::CreateR2<int32>({{1, 2, 3}, {4, 5, 6}, {7, 8, 9}})));
auto reshape1 = builder.AddInstruction(
HloInstruction::CreateReshape(ShapeUtil::MakeShape(S32, {3, 3}), const0));
hlo_module->AddEntryComputation(builder.Build())
->CreateFusionInstruction(/*instructions_to_fuse=*/{reshape1},
HloInstruction::FusionKind::kLoop);
EXPECT_TRUE(LiteralTestUtil::Equal(
- *Literal::CreateR2<int32>({{1, 2, 3}, {4, 5, 6}, {7, 8, 9}}),
+ *LiteralUtil::CreateR2<int32>({{1, 2, 3}, {4, 5, 6}, {7, 8, 9}}),
*ExecuteAndTransfer(std::move(hlo_module), {})));
}
@@ -421,14 +423,14 @@ XLA_TEST_F(FusionTest, Transpose_2by3) {
auto builder = HloComputation::Builder(TestName());
auto hlo_module = CreateNewModule();
auto const0 = builder.AddInstruction(HloInstruction::CreateConstant(
- Literal::CreateR2<int32>({{1, 2, 3}, {4, 5, 6}})));
+ LiteralUtil::CreateR2<int32>({{1, 2, 3}, {4, 5, 6}})));
auto reshape1 = builder.AddInstruction(HloInstruction::CreateTranspose(
ShapeUtil::MakeShape(S32, {3, 2}), const0, {1, 0}));
hlo_module->AddEntryComputation(builder.Build())
->CreateFusionInstruction(/*instructions_to_fuse=*/{reshape1},
HloInstruction::FusionKind::kLoop);
EXPECT_TRUE(LiteralTestUtil::Equal(
- *Literal::CreateR2<int32>({{1, 4}, {2, 5}, {3, 6}}),
+ *LiteralUtil::CreateR2<int32>({{1, 4}, {2, 5}, {3, 6}}),
*ExecuteAndTransfer(std::move(hlo_module), {})));
}
@@ -436,14 +438,14 @@ XLA_TEST_F(FusionTest, Transpose_3by3) {
auto builder = HloComputation::Builder(TestName());
auto hlo_module = CreateNewModule();
auto const0 = builder.AddInstruction(HloInstruction::CreateConstant(
- Literal::CreateR2<int32>({{1, 2, 3}, {4, 5, 6}, {7, 8, 9}})));
+ LiteralUtil::CreateR2<int32>({{1, 2, 3}, {4, 5, 6}, {7, 8, 9}})));
auto reshape1 = builder.AddInstruction(HloInstruction::CreateTranspose(
ShapeUtil::MakeShape(S32, {3, 3}), const0, {1, 0}));
hlo_module->AddEntryComputation(builder.Build())
->CreateFusionInstruction(/*instructions_to_fuse=*/{reshape1},
HloInstruction::FusionKind::kLoop);
EXPECT_TRUE(LiteralTestUtil::Equal(
- *Literal::CreateR2<int32>({{1, 4, 7}, {2, 5, 8}, {3, 6, 9}}),
+ *LiteralUtil::CreateR2<int32>({{1, 4, 7}, {2, 5, 8}, {3, 6, 9}}),
*ExecuteAndTransfer(std::move(hlo_module), {})));
}
@@ -451,7 +453,7 @@ XLA_TEST_F(FusionTest, Reverse) {
auto builder = HloComputation::Builder(TestName());
auto hlo_module = CreateNewModule();
auto const0 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR1<int32>({1, 2, 3})));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR1<int32>({1, 2, 3})));
auto reverse1 = builder.AddInstruction(HloInstruction::CreateReverse(
ShapeUtil::MakeShape(S32, {3}), const0, {0}));
hlo_module->AddEntryComputation(builder.Build())
@@ -459,7 +461,7 @@ XLA_TEST_F(FusionTest, Reverse) {
HloInstruction::FusionKind::kLoop);
EXPECT_TRUE(
- LiteralTestUtil::Equal(*Literal::CreateR1<int32>({3, 2, 1}),
+ LiteralTestUtil::Equal(*LiteralUtil::CreateR1<int32>({3, 2, 1}),
*ExecuteAndTransfer(std::move(hlo_module), {})));
}
@@ -467,7 +469,7 @@ XLA_TEST_F(FusionTest, ReverseNegate) {
auto builder = HloComputation::Builder(TestName());
auto hlo_module = CreateNewModule();
auto const0 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR1<int32>({1, 2, 3})));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR1<int32>({1, 2, 3})));
auto reverse1 = builder.AddInstruction(HloInstruction::CreateReverse(
ShapeUtil::MakeShape(S32, {3}), const0, {0}));
auto negate2 = builder.AddInstruction(HloInstruction::CreateUnary(
@@ -477,7 +479,7 @@ XLA_TEST_F(FusionTest, ReverseNegate) {
HloInstruction::FusionKind::kLoop);
EXPECT_TRUE(
- LiteralTestUtil::Equal(*Literal::CreateR1<int32>({-3, -2, -1}),
+ LiteralTestUtil::Equal(*LiteralUtil::CreateR1<int32>({-3, -2, -1}),
*ExecuteAndTransfer(std::move(hlo_module), {})));
}
@@ -485,7 +487,7 @@ XLA_TEST_F(FusionTest, BroadcastNegate) {
auto builder = HloComputation::Builder(TestName());
auto hlo_module = CreateNewModule();
auto const0 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<int32>(1)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32>(1)));
auto broadcast1 = builder.AddInstruction(HloInstruction::CreateBroadcast(
ShapeUtil::MakeShape(S32, {2}), const0, {}));
auto negate2 = builder.AddInstruction(HloInstruction::CreateUnary(
@@ -495,15 +497,15 @@ XLA_TEST_F(FusionTest, BroadcastNegate) {
HloInstruction::FusionKind::kLoop);
EXPECT_TRUE(
- LiteralTestUtil::Equal(*Literal::CreateR1<int32>({-1, -1}),
+ LiteralTestUtil::Equal(*LiteralUtil::CreateR1<int32>({-1, -1}),
*ExecuteAndTransfer(std::move(hlo_module), {})));
}
XLA_TEST_F(FusionTest, SliceNegate) {
auto builder = HloComputation::Builder(TestName());
auto hlo_module = CreateNewModule();
- auto const0 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR1<int32>({1, 2, 3, 4})));
+ auto const0 = builder.AddInstruction(HloInstruction::CreateConstant(
+ LiteralUtil::CreateR1<int32>({1, 2, 3, 4})));
auto slice1 = builder.AddInstruction(HloInstruction::CreateSlice(
ShapeUtil::MakeShape(S32, {2}), const0, {0}, {4}, {2}));
auto negate2 = builder.AddInstruction(HloInstruction::CreateUnary(
@@ -513,17 +515,17 @@ XLA_TEST_F(FusionTest, SliceNegate) {
HloInstruction::FusionKind::kLoop);
EXPECT_TRUE(
- LiteralTestUtil::Equal(*Literal::CreateR1<int32>({-1, -3}),
+ LiteralTestUtil::Equal(*LiteralUtil::CreateR1<int32>({-1, -3}),
*ExecuteAndTransfer(std::move(hlo_module), {})));
}
XLA_TEST_F(FusionTest, DynamicSliceNegate) {
auto builder = HloComputation::Builder(TestName());
auto hlo_module = CreateNewModule();
- auto const0 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR1<int32>({1, 2, 3, 4})));
+ auto const0 = builder.AddInstruction(HloInstruction::CreateConstant(
+ LiteralUtil::CreateR1<int32>({1, 2, 3, 4})));
auto const1 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR1<int32>({1})));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR1<int32>({1})));
auto dynamic_slice2 =
builder.AddInstruction(HloInstruction::CreateDynamicSlice(
ShapeUtil::MakeShape(S32, {2}), const0, const1, {2}));
@@ -535,15 +537,15 @@ XLA_TEST_F(FusionTest, DynamicSliceNegate) {
HloInstruction::FusionKind::kLoop);
EXPECT_TRUE(
- LiteralTestUtil::Equal(*Literal::CreateR1<int32>({-2, -3}),
+ LiteralTestUtil::Equal(*LiteralUtil::CreateR1<int32>({-2, -3}),
*ExecuteAndTransfer(std::move(hlo_module), {})));
}
XLA_TEST_F(FusionTest, ReshapeNegate) {
auto builder = HloComputation::Builder(TestName());
auto hlo_module = CreateNewModule();
- auto const0 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR1<int32>({1, 2, 3, 4})));
+ auto const0 = builder.AddInstruction(HloInstruction::CreateConstant(
+ LiteralUtil::CreateR1<int32>({1, 2, 3, 4})));
auto reshape1 = builder.AddInstruction(
HloInstruction::CreateReshape(ShapeUtil::MakeShape(S32, {2, 2}), const0));
auto negate2 = builder.AddInstruction(HloInstruction::CreateUnary(
@@ -552,17 +554,16 @@ XLA_TEST_F(FusionTest, ReshapeNegate) {
->CreateFusionInstruction(/*instructions_to_fuse=*/{negate2, reshape1},
HloInstruction::FusionKind::kLoop);
- EXPECT_TRUE(
- LiteralTestUtil::Equal(*Literal::CreateR2<int32>({{-1, -2}, {-3, -4}}),
- *ExecuteAndTransfer(std::move(hlo_module), {})));
+ EXPECT_TRUE(LiteralTestUtil::Equal(
+ *LiteralUtil::CreateR2<int32>({{-1, -2}, {-3, -4}}),
+ *ExecuteAndTransfer(std::move(hlo_module), {})));
}
-// TODO(b/64070202): Investigate failure.
-XLA_TEST_F(FusionTest, DISABLED_ON_GPU(TransposeNegate)) {
+XLA_TEST_F(FusionTest, TransposeNegate) {
auto builder = HloComputation::Builder(TestName());
auto hlo_module = CreateNewModule();
auto const0 = builder.AddInstruction(HloInstruction::CreateConstant(
- Literal::CreateR2<int32>({{1, 2}, {3, 4}})));
+ LiteralUtil::CreateR2<int32>({{1, 2}, {3, 4}})));
auto transpose1 = builder.AddInstruction(HloInstruction::CreateTranspose(
ShapeUtil::MakeShape(S32, {2, 2}), const0, {1, 0}));
auto negate2 = builder.AddInstruction(HloInstruction::CreateUnary(
@@ -571,9 +572,9 @@ XLA_TEST_F(FusionTest, DISABLED_ON_GPU(TransposeNegate)) {
->CreateFusionInstruction(/*instructions_to_fuse=*/{negate2, transpose1},
HloInstruction::FusionKind::kLoop);
- EXPECT_TRUE(
- LiteralTestUtil::Equal(*Literal::CreateR2<int32>({{-1, -3}, {-2, -4}}),
- *ExecuteAndTransfer(std::move(hlo_module), {})));
+ EXPECT_TRUE(LiteralTestUtil::Equal(
+ *LiteralUtil::CreateR2<int32>({{-1, -3}, {-2, -4}}),
+ *ExecuteAndTransfer(std::move(hlo_module), {})));
}
std::unique_ptr<HloComputation> MakeReduceTestComputation() {
@@ -591,10 +592,10 @@ XLA_TEST_F(FusionTest, DISABLED_ON_CPU(Reduce)) {
auto hlo_module = CreateNewModule();
auto builder = HloComputation::Builder(TestName());
- auto const0 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR1<int32>({1, 2, 4, 8})));
+ auto const0 = builder.AddInstruction(HloInstruction::CreateConstant(
+ LiteralUtil::CreateR1<int32>({1, 2, 4, 8})));
auto const1 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<int32>(0)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32>(0)));
auto reduce2 = builder.AddInstruction(HloInstruction::CreateReduce(
ShapeUtil::MakeShape(S32, {}), const0, const1, {0},
hlo_module->AddEmbeddedComputation(MakeReduceTestComputation())));
@@ -603,7 +604,7 @@ XLA_TEST_F(FusionTest, DISABLED_ON_CPU(Reduce)) {
HloInstruction::FusionKind::kLoop);
EXPECT_TRUE(
- LiteralTestUtil::Equal(*Literal::CreateR0<int32>(15),
+ LiteralTestUtil::Equal(*LiteralUtil::CreateR0<int32>(15),
*ExecuteAndTransfer(std::move(hlo_module), {})));
}
@@ -611,10 +612,10 @@ XLA_TEST_F(FusionTest, DISABLED_ON_CPU(ReduceImplicitBroadcast)) {
auto hlo_module = CreateNewModule();
auto builder = HloComputation::Builder(TestName());
- auto const0 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR1<int32>({1, 2, 4, 8})));
+ auto const0 = builder.AddInstruction(HloInstruction::CreateConstant(
+ LiteralUtil::CreateR1<int32>({1, 2, 4, 8})));
auto const1 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<int32>(0)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32>(0)));
auto reduce2 = builder.AddInstruction(HloInstruction::CreateReduce(
ShapeUtil::MakeShape(S32, {}), const0, const1, {0},
hlo_module->AddEmbeddedComputation(MakeReduceTestComputation())));
@@ -625,7 +626,7 @@ XLA_TEST_F(FusionTest, DISABLED_ON_CPU(ReduceImplicitBroadcast)) {
HloInstruction::FusionKind::kLoop);
EXPECT_TRUE(
- LiteralTestUtil::Equal(*Literal::CreateR0<int32>(-15),
+ LiteralTestUtil::Equal(*LiteralUtil::CreateR0<int32>(-15),
*ExecuteAndTransfer(std::move(hlo_module), {})));
}
@@ -633,9 +634,9 @@ XLA_TEST_F(FusionTest, DISABLED_ON_CPU(ReduceWindow)) {
auto builder = HloComputation::Builder(TestName());
auto hlo_module = CreateNewModule();
auto const0 = builder.AddInstruction(HloInstruction::CreateConstant(
- Literal::CreateR2<int32>({{2, 3, 5}, {7, 11, 13}, {17, 19, 23}})));
+ LiteralUtil::CreateR2<int32>({{2, 3, 5}, {7, 11, 13}, {17, 19, 23}})));
auto const1 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<int32>(1)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32>(1)));
Window window;
ASSERT_TRUE(
tensorflow::protobuf::TextFormat::ParseFromString("dimensions:{\n"
@@ -675,7 +676,7 @@ XLA_TEST_F(FusionTest, DISABLED_ON_CPU(ReduceWindow)) {
HloInstruction::FusionKind::kLoop);
EXPECT_TRUE(LiteralTestUtil::Equal(
- *Literal::CreateR2<int32>({{462, 2145}, {24871, 62491}}),
+ *LiteralUtil::CreateR2<int32>({{462, 2145}, {24871, 62491}}),
*ExecuteAndTransfer(std::move(hlo_module), {})));
}
@@ -687,9 +688,9 @@ XLA_TEST_F(FusionTest, SharedConstant) {
auto builder = HloComputation::Builder(TestName());
auto const0 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR1<int32>({0})));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR1<int32>({0})));
auto const1 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR1<int32>({2})));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR1<int32>({2})));
auto add1 = builder.AddInstruction(HloInstruction::CreateBinary(
ShapeUtil::MakeShape(S32, {1}), HloOpcode::kAdd, const1, const0));
auto add2 = builder.AddInstruction(HloInstruction::CreateBinary(
@@ -711,7 +712,7 @@ XLA_TEST_F(FusionTest, SharedConstant) {
EXPECT_EQ(entry_comp->root_instruction()->fused_instruction_count(), 6);
EXPECT_TRUE(
- LiteralTestUtil::Equal(*Literal::CreateR1<int32>({8}),
+ LiteralTestUtil::Equal(*LiteralUtil::CreateR1<int32>({8}),
*ExecuteAndTransfer(std::move(hlo_module), {})));
}
@@ -765,6 +766,39 @@ XLA_TEST_F(FusionTest, Clamp2D) {
TestElementwise2D<float, 3>(HloOpcode::kClamp);
}
+// TODO(b/73903144): Enable on interpreter once interpreter supports bitcast.
+XLA_TEST_F(FusionTest, DISABLED_ON_INTERPRETER(FusionWithLayout)) {
+ const string hlo_text = R"(
+HloModule Cluster
+
+fusion_c {
+ fusion.arg = f32[2,2]{1,0} parameter(0)
+ bitcast.0 = f32[2,2,1]{2,1,0} bitcast(fusion.arg)
+ tanh.0 = f32[2,2,1]{0,2,1} tanh(bitcast.0)
+ ROOT bitcast.2 = f32[2,2,1]{1,2,0} bitcast(tanh.0)
+}
+
+ENTRY main {
+ arg = f32[2,2]{1,0} parameter(0)
+ ROOT fusion = f32[2,2,1]{1,2,0} fusion(arg), kind=kLoop, calls=fusion_c
+}
+)";
+
+ std::unique_ptr<Literal> operand =
+ LiteralUtil::CreateR2<float>({{0., 0.}, {1., 0.}});
+ HloModuleConfig config;
+ config.set_debug_options(GetDebugOptionsForTest());
+ TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
+ ParseHloString(hlo_text, config));
+ TF_ASSERT_OK_AND_ASSIGN(
+ std::unique_ptr<Literal> result,
+ test_runner_.Execute(std::move(module), {operand.get()},
+ /*run_hlo_passes=*/false));
+ EXPECT_TRUE(LiteralTestUtil::Equal(
+ *LiteralUtil::CreateR3<float>({{{0.}, {0.76159415595}}, {{0.}, {0.}}}),
+ *result));
+}
+
void BM_ParallelFusion(int num_iters) {
// Simple element-wise computation to benchmark parallel task partitioning.
tensorflow::testing::StopTiming();
@@ -793,31 +827,31 @@ void BM_ParallelFusion(int num_iters) {
// Create computation.
XlaBuilder builder("ParallelFusion");
Shape shape0 = ShapeUtil::MakeShape(F32, {param0_dim0, param0_dim1});
- auto param0 = builder.Parameter(0, shape0, "param0");
+ auto param0 = Parameter(&builder, 0, shape0, "param0");
Shape shape1 = ShapeUtil::MakeShape(F32, {param1_dim0, param1_dim1});
- auto param1 = builder.Parameter(1, shape1, "param1");
+ auto param1 = Parameter(&builder, 1, shape1, "param1");
Shape shape2 = ShapeUtil::MakeShape(F32, {param2_dim0, param2_dim1});
- auto param2 = builder.Parameter(2, shape2, "param2");
+ auto param2 = Parameter(&builder, 2, shape2, "param2");
- auto x = builder.Mul(param0, param1);
- auto y = builder.Add(x, param2);
+ auto x = Mul(param0, param1);
+ Add(x, param2);
auto computation = builder.Build().ConsumeValueOrDie();
// Transfer literals to device.
auto param0_literal =
- Literal::CreateR2F32Linspace(1.0, 2.0, param0_dim0, param0_dim1);
+ LiteralUtil::CreateR2F32Linspace(1.0, 2.0, param0_dim0, param0_dim1);
ScopedShapedBuffer buffer0 =
client->LiteralToShapedBuffer(*param0_literal, device_ordinal)
.ConsumeValueOrDie();
auto param1_literal =
- Literal::CreateR2F32Linspace(1.0, 2.0, param1_dim0, param1_dim1);
+ LiteralUtil::CreateR2F32Linspace(1.0, 2.0, param1_dim0, param1_dim1);
ScopedShapedBuffer buffer1 =
client->LiteralToShapedBuffer(*param1_literal, device_ordinal)
.ConsumeValueOrDie();
auto param2_literal =
- Literal::CreateR2F32Linspace(1.0, 2.0, param2_dim0, param2_dim1);
+ LiteralUtil::CreateR2F32Linspace(1.0, 2.0, param2_dim0, param2_dim1);
ScopedShapedBuffer buffer2 =
client->LiteralToShapedBuffer(*param2_literal, device_ordinal)
.ConsumeValueOrDie();
diff --git a/tensorflow/compiler/xla/tests/gather_operation_test.cc b/tensorflow/compiler/xla/tests/gather_operation_test.cc
index 6fefae3695..c5ca64fa3f 100644
--- a/tensorflow/compiler/xla/tests/gather_operation_test.cc
+++ b/tensorflow/compiler/xla/tests/gather_operation_test.cc
@@ -13,6 +13,7 @@ See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
+#include "tensorflow/compiler/xla/client/xla_client/xla_builder.h"
#include "tensorflow/compiler/xla/execution_options_util.h"
#include "tensorflow/compiler/xla/service/hlo_parser.h"
#include "tensorflow/compiler/xla/status_macros.h"
@@ -21,9 +22,6 @@ limitations under the License.
#include "tensorflow/compiler/xla/tests/hlo_test_base.h"
#include "tensorflow/compiler/xla/tests/test_macros.h"
-// NB! TODO(b/74360564): These tests do not test out of bounds behavior since
-// that hasn't been specced yet.
-
namespace xla {
namespace {
@@ -62,8 +60,9 @@ ENTRY main {
}
)";
std::unique_ptr<Literal> operand =
- Literal::CreateR2<int32>({{1, 2, 3}, {4, 5, 6}, {7, 8, 9}});
- std::unique_ptr<Literal> gather_indices = Literal::CreateR1<int32>({0, 2});
+ LiteralUtil::CreateR2<int32>({{1, 2, 3}, {4, 5, 6}, {7, 8, 9}});
+ std::unique_ptr<Literal> gather_indices =
+ LiteralUtil::CreateR1<int32>({0, 2});
RunTest(hlo_text, operand.get(), gather_indices.get());
}
@@ -83,8 +82,9 @@ ENTRY main {
}
)";
std::unique_ptr<Literal> operand =
- Literal::CreateR2<int32>({{1, 2, 3}, {4, 5, 6}, {7, 8, 9}});
- std::unique_ptr<Literal> gather_indices = Literal::CreateR1<int32>({0, 2});
+ LiteralUtil::CreateR2<int32>({{1, 2, 3}, {4, 5, 6}, {7, 8, 9}});
+ std::unique_ptr<Literal> gather_indices =
+ LiteralUtil::CreateR1<int32>({0, 2});
RunTest(hlo_text, operand.get(), gather_indices.get());
}
@@ -104,9 +104,9 @@ ENTRY main {
}
)";
std::unique_ptr<Literal> operand =
- Literal::CreateR2<int32>({{1, 2, 3}, {4, 5, 6}, {7, 8, 9}});
+ LiteralUtil::CreateR2<int32>({{1, 2, 3}, {4, 5, 6}, {7, 8, 9}});
std::unique_ptr<Literal> gather_indices =
- Literal::CreateR2<int32>({{0, 2}, {2, 1}});
+ LiteralUtil::CreateR2<int32>({{0, 2}, {2, 1}});
RunTest(hlo_text, operand.get(), gather_indices.get());
}
@@ -126,9 +126,9 @@ ENTRY main {
}
)";
std::unique_ptr<Literal> operand =
- Literal::CreateR2<int32>({{1, 2, 3}, {4, 5, 6}, {7, 8, 9}});
+ LiteralUtil::CreateR2<int32>({{1, 2, 3}, {4, 5, 6}, {7, 8, 9}});
std::unique_ptr<Literal> gather_indices =
- Literal::CreateR3<int32>({{{0, 2}, {2, 1}}, {{1, 2}, {2, 0}}});
+ LiteralUtil::CreateR3<int32>({{{0, 2}, {2, 1}}, {{1, 2}, {2, 0}}});
RunTest(hlo_text, operand.get(), gather_indices.get());
}
@@ -148,9 +148,9 @@ ENTRY main {
}
)";
std::unique_ptr<Literal> operand =
- Literal::CreateR2<int32>({{1, 2, 3}, {4, 5, 6}, {7, 8, 9}});
+ LiteralUtil::CreateR2<int32>({{1, 2, 3}, {4, 5, 6}, {7, 8, 9}});
std::unique_ptr<Literal> gather_indices =
- Literal::CreateR3<int32>({{{0, 2}, {2, 1}}, {{1, 2}, {2, 0}}});
+ LiteralUtil::CreateR3<int32>({{{0, 2}, {2, 1}}, {{1, 2}, {2, 0}}});
RunTest(hlo_text, operand.get(), gather_indices.get());
}
@@ -170,11 +170,11 @@ ENTRY main {
}
)";
std::unique_ptr<Literal> operand =
- Literal::CreateR3<int32>({{{-1, 1}, {-2, 2}, {-3, 3}}, //
- {{-4, 4}, {-5, 5}, {-6, 6}}, //
- {{-7, 7}, {-8, 8}, {-9, 9}}});
+ LiteralUtil::CreateR3<int32>({{{-1, 1}, {-2, 2}, {-3, 3}}, //
+ {{-4, 4}, {-5, 5}, {-6, 6}}, //
+ {{-7, 7}, {-8, 8}, {-9, 9}}});
std::unique_ptr<Literal> gather_indices =
- Literal::CreateR2<int32>({{0, 0}, {1, 0}});
+ LiteralUtil::CreateR2<int32>({{0, 0}, {1, 0}});
RunTest(hlo_text, operand.get(), gather_indices.get());
}
@@ -194,11 +194,11 @@ ENTRY main {
}
)";
std::unique_ptr<Literal> operand =
- Literal::CreateR3<int32>({{{-1, 1}, {-2, 2}, {-3, 3}}, //
- {{-4, 4}, {-5, 5}, {-6, 6}}, //
- {{-7, 7}, {-8, 8}, {-9, 9}}});
+ LiteralUtil::CreateR3<int32>({{{-1, 1}, {-2, 2}, {-3, 3}}, //
+ {{-4, 4}, {-5, 5}, {-6, 6}}, //
+ {{-7, 7}, {-8, 8}, {-9, 9}}});
std::unique_ptr<Literal> gather_indices =
- Literal::CreateR2<int32>({{0, 0}, {1, 0}});
+ LiteralUtil::CreateR2<int32>({{0, 0}, {1, 0}});
RunTest(hlo_text, operand.get(), gather_indices.get());
}
@@ -218,8 +218,9 @@ ENTRY main {
}
)";
std::unique_ptr<Literal> operand =
- Literal::CreateR2<int32>({{1, 2, 3}, {4, 5, 6}, {7, 8, 9}});
- std::unique_ptr<Literal> gather_indices = Literal::CreateR1<int32>({1, 1});
+ LiteralUtil::CreateR2<int32>({{1, 2, 3}, {4, 5, 6}, {7, 8, 9}});
+ std::unique_ptr<Literal> gather_indices =
+ LiteralUtil::CreateR1<int32>({1, 1});
RunTest(hlo_text, operand.get(), gather_indices.get());
}
@@ -239,9 +240,9 @@ ENTRY main {
}
)";
std::unique_ptr<Literal> operand =
- Literal::CreateR2<int32>({{1, 2, 3}, {4, 5, 6}, {7, 8, 9}});
+ LiteralUtil::CreateR2<int32>({{1, 2, 3}, {4, 5, 6}, {7, 8, 9}});
std::unique_ptr<Literal> gather_indices =
- Literal::CreateR2<int32>({{2, 1}, {1, 1}});
+ LiteralUtil::CreateR2<int32>({{2, 1}, {1, 1}});
RunTest(hlo_text, operand.get(), gather_indices.get());
}
@@ -260,18 +261,15 @@ ENTRY main {
window_bounds={1, 0}
}
)";
- std::unique_ptr<Literal> operand = Literal::CreateR2<int32>({{}, {}, {}});
- std::unique_ptr<Literal> gather_indices = Literal::CreateR1<int32>({0, 2});
+ std::unique_ptr<Literal> operand = LiteralUtil::CreateR2<int32>({{}, {}, {}});
+ std::unique_ptr<Literal> gather_indices =
+ LiteralUtil::CreateR1<int32>({0, 2});
RunTest(hlo_text, operand.get(), gather_indices.get());
}
XLA_TEST_F(GatherOperationTest, OutOfBoundsIndex) {
// Out of bounds indices must not crash, and the indices in range should
// produce the same values across all backends.
- //
- // TODO(b/74360564): Once we have a well defined semantics for OOB accesses,
- // we should get rid of the mask and check that backends produce the same
- // value for OOB indices too.
const string hlo_text = R"(
HloModule BatchDynamicSlice
@@ -285,29 +283,45 @@ ENTRY main {
gather_dims_to_operand_dims={0,1},
index_vector_dim=1,
window_bounds={1,1}
- gather_reshaped = s32[6]{0} reshape(gather)
- in_bounds_mask = s32[6]{0} parameter(2)
- ROOT result = s32[6]{0} multiply(gather_reshaped, in_bounds_mask)
+ ROOT result = s32[6]{0} reshape(gather)
}
)";
std::unique_ptr<Literal> operand =
- Literal::CreateR2<int32>({{1, 2, 3}, {4, 5, 6}, {7, 8, 9}});
- std::unique_ptr<Literal> gather_indices = Literal::CreateR2<int32>(
+ LiteralUtil::CreateR2<int32>({{1, 2, 3}, {4, 5, 6}, {7, 8, 9}});
+ std::unique_ptr<Literal> gather_indices = LiteralUtil::CreateR2<int32>(
{{2, 7}, {2, 1}, {1, 1}, {5, 1}, {2147483647, 1}, {1, 2}});
- std::unique_ptr<Literal> in_bounds_mask =
- Literal::CreateR1<int32>({0, 1, 1, 0, 0, 1});
+ RunTest(hlo_text, operand.get(), gather_indices.get());
+}
+
+XLA_TEST_F(GatherOperationTest, OutOfBoundsUnsignedIndex) {
+ // Out of bounds indices must not crash, and the indices in range should
+ // produce the same values across all backends.
- RunTest(hlo_text,
- {operand.get(), gather_indices.get(), in_bounds_mask.get()});
+ const string hlo_text = R"(
+HloModule BatchDynamicSlice
+
+ENTRY main {
+ operand = s32[3,3]{1,0} parameter(0)
+ indices = u32[6,2]{1,0} parameter(1)
+ gather = s32[6,1,1]{2,1,0} gather(operand, indices),
+ output_window_dims={1,2},
+ elided_window_dims={},
+ gather_dims_to_operand_dims={0,1},
+ index_vector_dim=1,
+ window_bounds={1,1}
+ ROOT result = s32[6]{0} reshape(gather)
+}
+)";
+ std::unique_ptr<Literal> operand =
+ LiteralUtil::CreateR2<int32>({{1, 2, 3}, {4, 5, 6}, {7, 8, 9}});
+ std::unique_ptr<Literal> gather_indices = LiteralUtil::CreateR2<uint32>(
+ {{2, 7}, {2, 1}, {1, 1}, {5, 1}, {2147483648u, 1}, {1, 2}});
+ RunTest(hlo_text, operand.get(), gather_indices.get());
}
XLA_TEST_F(GatherOperationTest, NegativeIndex) {
// Negative indices must not crash, and the indices in range should produce
// the same values across all backends.
- //
- // TODO(b/74360564): Once we have a well defined semantics for negative
- // accesses, we should get rid of the mask and check that backends produce the
- // same value for negative indices too.
const string hlo_text = R"(
HloModule BatchDynamicSlice
@@ -321,20 +335,40 @@ ENTRY main {
gather_dims_to_operand_dims={0,1},
index_vector_dim=1,
window_bounds={1,1}
- gather_reshaped = s32[6]{0} reshape(gather)
- in_bounds_mask = s32[6]{0} parameter(2)
- ROOT result = s32[6]{0} multiply(gather_reshaped, in_bounds_mask)
+ ROOT result = s32[6]{0} reshape(gather)
}
)";
std::unique_ptr<Literal> operand =
- Literal::CreateR2<int32>({{1, 2, 3}, {4, 5, 6}, {7, 8, 9}});
- std::unique_ptr<Literal> gather_indices = Literal::CreateR2<int32>(
+ LiteralUtil::CreateR2<int32>({{1, 2, 3}, {4, 5, 6}, {7, 8, 9}});
+ std::unique_ptr<Literal> gather_indices = LiteralUtil::CreateR2<int32>(
{{2, -1}, {2, 1}, {1, 1}, {-500, 1}, {-2147483648, 1}, {1, 2}});
- std::unique_ptr<Literal> in_bounds_mask =
- Literal::CreateR1<int32>({0, 1, 1, 0, 0, 1});
+ RunTest(hlo_text, operand.get(), gather_indices.get());
+}
+
+XLA_TEST_F(GatherOperationTest, NegativeIndexIntoUnsignedOperand) {
+ // Negative indices must not crash, and the indices in range should produce
+ // the same values across all backends.
- RunTest(hlo_text,
- {operand.get(), gather_indices.get(), in_bounds_mask.get()});
+ const string hlo_text = R"(
+HloModule BatchDynamicSlice
+
+ENTRY main {
+ operand = u32[3,3]{1,0} parameter(0)
+ indices = s32[6,2]{1,0} parameter(1)
+ gather = u32[6,1,1]{2,1,0} gather(operand, indices),
+ output_window_dims={1,2},
+ elided_window_dims={},
+ gather_dims_to_operand_dims={0,1},
+ index_vector_dim=1,
+ window_bounds={1,1}
+ ROOT result = u32[6]{0} reshape(gather)
+}
+)";
+ std::unique_ptr<Literal> operand =
+ LiteralUtil::CreateR2<uint32>({{1, 2, 3}, {4, 5, 6}, {7, 8, 9}});
+ std::unique_ptr<Literal> gather_indices = LiteralUtil::CreateR2<int32>(
+ {{2, -1}, {2, 1}, {1, 1}, {-500, 1}, {-2147483648, 1}, {1, 2}});
+ RunTest(hlo_text, operand.get(), gather_indices.get());
}
XLA_TEST_F(GatherOperationTest, OneScalarIndex) {
@@ -352,9 +386,9 @@ ENTRY main {
window_bounds={1,3,2}
}
)";
- std::unique_ptr<Literal> operand = Literal::CreateR3<int32>(
+ std::unique_ptr<Literal> operand = LiteralUtil::CreateR3<int32>(
{{{1, 2}, {3, 4}, {5, 6}}, {{7, 8}, {9, 10}, {11, 12}}});
- std::unique_ptr<Literal> gather_indices = Literal::CreateR0<int32>(1);
+ std::unique_ptr<Literal> gather_indices = LiteralUtil::CreateR0<int32>(1);
RunTest(hlo_text, operand.get(), gather_indices.get());
}
@@ -373,8 +407,8 @@ ENTRY main {
window_bounds={1}
}
)";
- std::unique_ptr<Literal> operand = Literal::CreateR1<int32>({1, 2, 3, 4});
- std::unique_ptr<Literal> gather_indices = Literal::CreateR0<int32>(1);
+ std::unique_ptr<Literal> operand = LiteralUtil::CreateR1<int32>({1, 2, 3, 4});
+ std::unique_ptr<Literal> gather_indices = LiteralUtil::CreateR0<int32>(1);
RunTest(hlo_text, operand.get(), gather_indices.get());
}
@@ -394,8 +428,8 @@ ENTRY main {
}
)";
std::unique_ptr<Literal> operand =
- Literal::CreateR2<int32>({{1, 2, 3}, {4, 5, 6}, {7, 8, 9}});
- std::unique_ptr<Literal> gather_indices = Literal::CreateR1<int32>({});
+ LiteralUtil::CreateR2<int32>({{1, 2, 3}, {4, 5, 6}, {7, 8, 9}});
+ std::unique_ptr<Literal> gather_indices = LiteralUtil::CreateR1<int32>({});
RunTest(hlo_text, operand.get(), gather_indices.get());
}
@@ -418,8 +452,9 @@ ENTRY main {
}
)";
std::unique_ptr<Literal> operand =
- Literal::CreateR2<int32>({{1, 2, 3}, {4, 5, 6}, {7, 8, 9}});
- std::unique_ptr<Literal> gather_indices = Literal::CreateR1<int32>({0, 2});
+ LiteralUtil::CreateR2<int32>({{1, 2, 3}, {4, 5, 6}, {7, 8, 9}});
+ std::unique_ptr<Literal> gather_indices =
+ LiteralUtil::CreateR1<int32>({0, 2});
RunTest(hlo_text, operand.get(), gather_indices.get());
}
@@ -442,9 +477,9 @@ ENTRY main {
}
)";
std::unique_ptr<Literal> operand =
- Literal::CreateR2<int32>({{1, 2, 3}, {4, 5, 6}, {7, 8, 9}});
+ LiteralUtil::CreateR2<int32>({{1, 2, 3}, {4, 5, 6}, {7, 8, 9}});
std::unique_ptr<Literal> gather_indices =
- Literal::CreateR2<int32>({{0, 2}, {2, 1}});
+ LiteralUtil::CreateR2<int32>({{0, 2}, {2, 1}});
RunTest(hlo_text, operand.get(), gather_indices.get());
}
@@ -467,9 +502,9 @@ ENTRY main {
}
)";
std::unique_ptr<Literal> operand =
- Literal::CreateR2<int32>({{1, 2, 3}, {4, 5, 6}, {7, 8, 9}});
+ LiteralUtil::CreateR2<int32>({{1, 2, 3}, {4, 5, 6}, {7, 8, 9}});
std::unique_ptr<Literal> gather_indices =
- Literal::CreateR3<int32>({{{0, 2}, {2, 1}}, {{1, 2}, {2, 0}}});
+ LiteralUtil::CreateR3<int32>({{{0, 2}, {2, 1}}, {{1, 2}, {2, 0}}});
RunTest(hlo_text, operand.get(), gather_indices.get());
}
@@ -492,11 +527,11 @@ ENTRY main {
}
)";
std::unique_ptr<Literal> operand =
- Literal::CreateR3<int32>({{{-1, 1}, {-2, 2}, {-3, 3}}, //
- {{-4, 4}, {-5, 5}, {-6, 6}}, //
- {{-7, 7}, {-8, 8}, {-9, 9}}});
+ LiteralUtil::CreateR3<int32>({{{-1, 1}, {-2, 2}, {-3, 3}}, //
+ {{-4, 4}, {-5, 5}, {-6, 6}}, //
+ {{-7, 7}, {-8, 8}, {-9, 9}}});
std::unique_ptr<Literal> gather_indices =
- Literal::CreateR2<int32>({{0, 0}, {1, 0}});
+ LiteralUtil::CreateR2<int32>({{0, 0}, {1, 0}});
RunTest(hlo_text, operand.get(), gather_indices.get());
}
@@ -520,11 +555,11 @@ ENTRY main {
}
)";
std::unique_ptr<Literal> operand =
- Literal::CreateR3<int32>({{{-1, 1}, {-2, 2}, {-3, 3}}, //
- {{-4, 4}, {-5, 5}, {-6, 6}}, //
- {{-7, 7}, {-8, 8}, {-9, 9}}});
+ LiteralUtil::CreateR3<int32>({{{-1, 1}, {-2, 2}, {-3, 3}}, //
+ {{-4, 4}, {-5, 5}, {-6, 6}}, //
+ {{-7, 7}, {-8, 8}, {-9, 9}}});
std::unique_ptr<Literal> gather_indices =
- Literal::CreateR2<int32>({{0, 0}, {1, 0}});
+ LiteralUtil::CreateR2<int32>({{0, 0}, {1, 0}});
RunTest(hlo_text, operand.get(), gather_indices.get());
}
@@ -547,8 +582,9 @@ ENTRY main {
}
)";
std::unique_ptr<Literal> operand =
- Literal::CreateR2<int32>({{1, 2, 3}, {4, 5, 6}, {7, 8, 9}});
- std::unique_ptr<Literal> gather_indices = Literal::CreateR1<int32>({1, 1});
+ LiteralUtil::CreateR2<int32>({{1, 2, 3}, {4, 5, 6}, {7, 8, 9}});
+ std::unique_ptr<Literal> gather_indices =
+ LiteralUtil::CreateR1<int32>({1, 1});
RunTest(hlo_text, operand.get(), gather_indices.get());
}
@@ -571,9 +607,9 @@ ENTRY main {
}
)";
std::unique_ptr<Literal> operand =
- Literal::CreateR2<int32>({{1, 2, 3}, {4, 5, 6}, {7, 8, 9}});
+ LiteralUtil::CreateR2<int32>({{1, 2, 3}, {4, 5, 6}, {7, 8, 9}});
std::unique_ptr<Literal> gather_indices =
- Literal::CreateR2<int32>({{2, 1}, {1, 1}});
+ LiteralUtil::CreateR2<int32>({{2, 1}, {1, 1}});
RunTest(hlo_text, operand.get(), gather_indices.get());
}
@@ -598,22 +634,23 @@ XLA_TEST_F(GatherClientLibraryTest, DISABLED_ON_GPU(Basic)) {
Shape operand_shape = ShapeUtil::MakeShape(S32, {3, 3});
Shape indices_shape = ShapeUtil::MakeShape(S32, {2});
- auto operand = builder.Parameter(0, operand_shape, "operand");
- auto indices = builder.Parameter(1, indices_shape, "indices");
+ auto operand = Parameter(&builder, 0, operand_shape, "operand");
+ auto indices = Parameter(&builder, 1, indices_shape, "indices");
GatherDimensionNumbers dim_numbers;
dim_numbers.add_output_window_dims(1);
dim_numbers.add_elided_window_dims(0);
dim_numbers.add_gather_dims_to_operand_dims(0);
dim_numbers.set_index_vector_dim(1);
- builder.Gather(operand, indices, dim_numbers, {1, 3});
+ Gather(operand, indices, dim_numbers, {1, 3});
std::vector<int32> expected = {};
- TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<GlobalData> operand_arg,
- client_->TransferToServer(*Literal::CreateR2<int32>(
- {{1, 2, 3}, {4, 5, 6}, {7, 8, 9}})));
+ TF_ASSERT_OK_AND_ASSIGN(
+ std::unique_ptr<GlobalData> operand_arg,
+ client_->TransferToServer(
+ *LiteralUtil::CreateR2<int32>({{1, 2, 3}, {4, 5, 6}, {7, 8, 9}})));
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<GlobalData> indices_arg,
- client_->TransferToServer(*Literal::CreateR1<int32>({0, 2})));
+ client_->TransferToServer(*LiteralUtil::CreateR1<int32>({0, 2})));
TF_ASSERT_OK_AND_ASSIGN(std::vector<xla::DeviceHandle> devices,
client_->GetDeviceHandles(1));
xla::ExecutionOptions execution_options = CreateDefaultExecutionOptions();
diff --git a/tensorflow/compiler/xla/tests/half_test.cc b/tensorflow/compiler/xla/tests/half_test.cc
index 76bf47845c..73a47eda72 100644
--- a/tensorflow/compiler/xla/tests/half_test.cc
+++ b/tensorflow/compiler/xla/tests/half_test.cc
@@ -17,7 +17,7 @@ limitations under the License.
#include <vector>
#include "tensorflow/compiler/xla/client/xla_client/xla_builder.h"
-#include "tensorflow/compiler/xla/literal_util.h"
+#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/compiler/xla/statusor.h"
#include "tensorflow/compiler/xla/test.h"
#include "tensorflow/compiler/xla/test_helpers.h"
@@ -37,8 +37,7 @@ class HalfTestBase : public ClientLibraryTestBase {
static const int kNumElements = 4;
};
-using UnaryBuildFuncTy =
- std::function<void(xla::XlaBuilder*, const xla::XlaOp& src)>;
+using UnaryBuildFuncTy = std::function<void(const xla::XlaOp& src)>;
struct UnaryOpTestParam {
std::function<half(half)> compute_func;
@@ -62,7 +61,7 @@ XLA_TEST_P(UnaryOpTest, Ops) {
}
UnaryBuildFuncTy build_func = GetParam().build_func;
- build_func(&builder, x_opnd);
+ build_func(x_opnd);
ComputeAndCompareR1<half>(&builder, expected, {x_data.get()}, error_spec_);
}
@@ -79,18 +78,17 @@ half round_imp(half value) {
INSTANTIATE_TEST_CASE_P(
half, UnaryOpTest,
::testing::Values(
- UnaryOpTestParam{[](half x) { return abs(x); }, &XlaBuilder::Abs},
- UnaryOpTestParam{[](half x) { return round_imp(x); },
- &XlaBuilder::Round},
- UnaryOpTestParam{[](half x) { return ceil(x); }, &XlaBuilder::Ceil},
- UnaryOpTestParam{[](half x) { return cos(x); }, &XlaBuilder::Cos},
- UnaryOpTestParam{[](half x) { return exp(x); }, &XlaBuilder::Exp},
- UnaryOpTestParam{[](half x) { return floor(x); }, &XlaBuilder::Floor},
- UnaryOpTestParam{[](half x) { return log(x); }, &XlaBuilder::Log},
- UnaryOpTestParam{[](half x) { return -x; }, &XlaBuilder::Neg},
- UnaryOpTestParam{[](half x) { return sign_imp(x); }, &XlaBuilder::Sign},
- UnaryOpTestParam{[](half x) { return sin(x); }, &XlaBuilder::Sin},
- UnaryOpTestParam{[](half x) { return tanh(x); }, &XlaBuilder::Tanh}
+ UnaryOpTestParam{[](half x) { return abs(x); }, &Abs},
+ UnaryOpTestParam{[](half x) { return round_imp(x); }, &Round},
+ UnaryOpTestParam{[](half x) { return ceil(x); }, &Ceil},
+ UnaryOpTestParam{[](half x) { return cos(x); }, &Cos},
+ UnaryOpTestParam{[](half x) { return exp(x); }, &Exp},
+ UnaryOpTestParam{[](half x) { return floor(x); }, &Floor},
+ UnaryOpTestParam{[](half x) { return log(x); }, &Log},
+ UnaryOpTestParam{[](half x) { return -x; }, &Neg},
+ UnaryOpTestParam{[](half x) { return sign_imp(x); }, &Sign},
+ UnaryOpTestParam{[](half x) { return sin(x); }, &Sin},
+ UnaryOpTestParam{[](half x) { return tanh(x); }, &Tanh}
));
@@ -118,19 +116,18 @@ XLA_TEST_P(UnaryPredTest, Ops) {
}
UnaryBuildFuncTy build_func = GetParam().build_func;
- build_func(&builder, x_opnd);
+ build_func(x_opnd);
ComputeAndCompareR1<bool>(&builder, expected, {x_data.get()});
}
INSTANTIATE_TEST_CASE_P(half, UnaryPredTest,
::testing::Values(UnaryPredTestParam{
- [](half x) { return isfinite(x); },
- &XlaBuilder::IsFinite}));
+ [](half x) { return isfinite(x); }, &IsFinite}));
-using BinaryBuildFuncTy = std::function<void(
- xla::XlaBuilder*, const xla::XlaOp& x, const xla::XlaOp& y,
- tensorflow::gtl::ArraySlice<int64>)>;
+using BinaryBuildFuncTy =
+ std::function<void(const xla::XlaOp& x, const xla::XlaOp& y,
+ tensorflow::gtl::ArraySlice<int64>)>;
struct BinaryOpTestParam {
std::function<half(half, half)> compute_func;
@@ -159,7 +156,7 @@ XLA_TEST_P(BinaryOpTest, Ops) {
}
BinaryBuildFuncTy build_func = GetParam().build_func;
- build_func(&builder, x_opnd, y_opnd, {});
+ build_func(x_opnd, y_opnd, {});
ComputeAndCompareR1<half>(&builder, expected, {x_data.get(), y_data.get()},
error_spec_);
@@ -173,22 +170,15 @@ half atan2_imp(half x, half y) {
INSTANTIATE_TEST_CASE_P(
half, BinaryOpTest,
::testing::Values(
- BinaryOpTestParam{[](half x, half y) { return x + y; },
- &XlaBuilder::Add},
+ BinaryOpTestParam{[](half x, half y) { return x + y; }, &Add},
BinaryOpTestParam{[](half x, half y) { return atan2_imp(x, y); },
- &XlaBuilder::Atan2},
- BinaryOpTestParam{[](half x, half y) { return x / y; },
- &XlaBuilder::Div},
- BinaryOpTestParam{[](half x, half y) { return max(x, y); },
- &XlaBuilder::Max},
- BinaryOpTestParam{[](half x, half y) { return min(x, y); },
- &XlaBuilder::Min},
- BinaryOpTestParam{[](half x, half y) { return x * y; },
- &XlaBuilder::Mul},
- BinaryOpTestParam{[](half x, half y) { return pow(x, y); },
- &XlaBuilder::Pow},
- BinaryOpTestParam{[](half x, half y) { return x - y; },
- &XlaBuilder::Sub}
+ &Atan2},
+ BinaryOpTestParam{[](half x, half y) { return x / y; }, &Div},
+ BinaryOpTestParam{[](half x, half y) { return max(x, y); }, &Max},
+ BinaryOpTestParam{[](half x, half y) { return min(x, y); }, &Min},
+ BinaryOpTestParam{[](half x, half y) { return x * y; }, &Mul},
+ BinaryOpTestParam{[](half x, half y) { return pow(x, y); }, &Pow},
+ BinaryOpTestParam{[](half x, half y) { return x - y; }, &Sub}
));
@@ -221,27 +211,22 @@ XLA_TEST_P(BinaryPredTest, Ops) {
}
BinaryBuildFuncTy build_func = GetParam().build_func;
- build_func(&builder, x_opnd, y_opnd, {});
+ build_func(x_opnd, y_opnd, {});
ComputeAndCompareR1<bool>(&builder, expected, {x_data.get(), y_data.get()});
}
INSTANTIATE_TEST_CASE_P(
half, BinaryPredTest,
- ::testing::Values(BinaryPredTestParam{[](half x, half y) { return x == y; },
- &XlaBuilder::Eq},
- BinaryPredTestParam{[](half x, half y) { return x != y; },
- &XlaBuilder::Ne},
- BinaryPredTestParam{[](half x, half y) { return x >= y; },
- &XlaBuilder::Ge},
- BinaryPredTestParam{[](half x, half y) { return x > y; },
- &XlaBuilder::Gt},
- BinaryPredTestParam{[](half x, half y) { return x <= y; },
- &XlaBuilder::Le},
- BinaryPredTestParam{[](half x, half y) { return x < y; },
- &XlaBuilder::Lt}
-
- ));
+ ::testing::Values(
+ BinaryPredTestParam{[](half x, half y) { return x == y; }, &Eq},
+ BinaryPredTestParam{[](half x, half y) { return x != y; }, &Ne},
+ BinaryPredTestParam{[](half x, half y) { return x >= y; }, &Ge},
+ BinaryPredTestParam{[](half x, half y) { return x > y; }, &Gt},
+ BinaryPredTestParam{[](half x, half y) { return x <= y; }, &Le},
+ BinaryPredTestParam{[](half x, half y) { return x < y; }, &Lt}
+
+ ));
} // namespace
} // namespace xla
diff --git a/tensorflow/compiler/xla/tests/hlo_metadata_test.cc b/tensorflow/compiler/xla/tests/hlo_metadata_test.cc
index cf971dd61b..4d82442f7e 100644
--- a/tensorflow/compiler/xla/tests/hlo_metadata_test.cc
+++ b/tensorflow/compiler/xla/tests/hlo_metadata_test.cc
@@ -30,9 +30,9 @@ class HloMetadataTest : public LocalClientTestBase {
}
void BuildAddComputation(XlaBuilder* builder) {
- auto x = builder->Parameter(0, ShapeUtil::MakeShape(F32, {}), "x");
- auto y = builder->Parameter(1, ShapeUtil::MakeShape(F32, {}), "y");
- builder->Add(x, y);
+ auto x = Parameter(builder, 0, ShapeUtil::MakeShape(F32, {}), "x");
+ auto y = Parameter(builder, 1, ShapeUtil::MakeShape(F32, {}), "y");
+ Add(x, y);
}
OpMetadata metadata_;
diff --git a/tensorflow/compiler/xla/tests/hlo_test_base.cc b/tensorflow/compiler/xla/tests/hlo_test_base.cc
index 242cc5db11..b662e83716 100644
--- a/tensorflow/compiler/xla/tests/hlo_test_base.cc
+++ b/tensorflow/compiler/xla/tests/hlo_test_base.cc
@@ -276,9 +276,10 @@ StatusOr<::testing::AssertionResult> HloTestBase::RunAndCompareInternal(
HloComputation* HloTestBase::FindComputation(HloModule* module,
tensorflow::StringPiece name) {
- auto it = c_find_if(module->computations(),
+ auto computations = module->computations();
+ auto it = c_find_if(computations,
[&](HloComputation* c) { return c->name() == name; });
- if (it == module->computations().end()) {
+ if (it == computations.end()) {
return nullptr;
}
return *it;
@@ -287,9 +288,10 @@ HloComputation* HloTestBase::FindComputation(HloModule* module,
HloInstruction* HloTestBase::FindInstruction(HloModule* module,
tensorflow::StringPiece name) {
for (const HloComputation* c : module->computations()) {
- auto it = c_find_if(c->instructions(),
+ auto instructions = c->instructions();
+ auto it = c_find_if(instructions,
[&](HloInstruction* i) { return i->name() == name; });
- if (it != c->instructions().end()) {
+ if (it != instructions.end()) {
return *it;
}
}
diff --git a/tensorflow/compiler/xla/tests/hlo_test_base.h b/tensorflow/compiler/xla/tests/hlo_test_base.h
index 9009d67cea..66719b1460 100644
--- a/tensorflow/compiler/xla/tests/hlo_test_base.h
+++ b/tensorflow/compiler/xla/tests/hlo_test_base.h
@@ -200,6 +200,13 @@ class HloTestBase : public ::testing::Test {
->ResetLayout(layout);
}
+ void ForceResultLayout(HloModule* module, const Layout& layout,
+ ShapeIndexView shape_index) {
+ module->mutable_entry_computation_layout()
+ ->mutable_result_layout()
+ ->ResetLayout(layout, shape_index);
+ }
+
// Convenience method to clear the layout of the computation result in
// 'module'.
void ForceClearResultLayout(HloModule* module) {
diff --git a/tensorflow/compiler/xla/tests/literal_test_util.h b/tensorflow/compiler/xla/tests/literal_test_util.h
index d1b8a6cf0b..31a099c15f 100644
--- a/tensorflow/compiler/xla/tests/literal_test_util.h
+++ b/tensorflow/compiler/xla/tests/literal_test_util.h
@@ -25,6 +25,7 @@ limitations under the License.
#include "tensorflow/compiler/xla/array3d.h"
#include "tensorflow/compiler/xla/array4d.h"
#include "tensorflow/compiler/xla/error_spec.h"
+#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/compiler/xla/literal_util.h"
#include "tensorflow/compiler/xla/test.h"
#include "tensorflow/compiler/xla/test_helpers.h"
@@ -154,20 +155,20 @@ class LiteralTestUtil {
template <typename NativeT>
/* static */ void LiteralTestUtil::ExpectR0Equal(NativeT expected,
const LiteralSlice& actual) {
- EXPECT_TRUE(Equal(*Literal::CreateR0<NativeT>(expected), actual));
+ EXPECT_TRUE(Equal(*LiteralUtil::CreateR0<NativeT>(expected), actual));
}
template <typename NativeT>
/* static */ void LiteralTestUtil::ExpectR1Equal(
tensorflow::gtl::ArraySlice<NativeT> expected, const LiteralSlice& actual) {
- EXPECT_TRUE(Equal(*Literal::CreateR1<NativeT>(expected), actual));
+ EXPECT_TRUE(Equal(*LiteralUtil::CreateR1<NativeT>(expected), actual));
}
template <typename NativeT>
/* static */ void LiteralTestUtil::ExpectR2Equal(
std::initializer_list<std::initializer_list<NativeT>> expected,
const LiteralSlice& actual) {
- EXPECT_TRUE(Equal(*Literal::CreateR2<NativeT>(expected), actual));
+ EXPECT_TRUE(Equal(*LiteralUtil::CreateR2<NativeT>(expected), actual));
}
template <typename NativeT>
@@ -175,46 +176,46 @@ template <typename NativeT>
std::initializer_list<std::initializer_list<std::initializer_list<NativeT>>>
expected,
const LiteralSlice& actual) {
- EXPECT_TRUE(Equal(*Literal::CreateR3<NativeT>(expected), actual));
+ EXPECT_TRUE(Equal(*LiteralUtil::CreateR3<NativeT>(expected), actual));
}
template <typename NativeT>
/* static */ void LiteralTestUtil::ExpectR2EqualArray2D(
const Array2D<NativeT>& expected, const LiteralSlice& actual) {
- EXPECT_TRUE(Equal(*Literal::CreateR2FromArray2D(expected), actual));
+ EXPECT_TRUE(Equal(*LiteralUtil::CreateR2FromArray2D(expected), actual));
}
template <typename NativeT>
/* static */ void LiteralTestUtil::ExpectR3EqualArray3D(
const Array3D<NativeT>& expected, const LiteralSlice& actual) {
- EXPECT_TRUE(Equal(*Literal::CreateR3FromArray3D(expected), actual));
+ EXPECT_TRUE(Equal(*LiteralUtil::CreateR3FromArray3D(expected), actual));
}
template <typename NativeT>
/* static */ void LiteralTestUtil::ExpectR4EqualArray4D(
const Array4D<NativeT>& expected, const LiteralSlice& actual) {
- EXPECT_TRUE(Equal(*Literal::CreateR4FromArray4D(expected), actual));
+ EXPECT_TRUE(Equal(*LiteralUtil::CreateR4FromArray4D(expected), actual));
}
template <typename NativeT>
/* static */ void LiteralTestUtil::ExpectR0Near(NativeT expected,
const LiteralSlice& actual,
const ErrorSpec& error) {
- EXPECT_TRUE(Near(*Literal::CreateR0<NativeT>(expected), actual, error));
+ EXPECT_TRUE(Near(*LiteralUtil::CreateR0<NativeT>(expected), actual, error));
}
template <typename NativeT>
/* static */ void LiteralTestUtil::ExpectR1Near(
tensorflow::gtl::ArraySlice<NativeT> expected, const LiteralSlice& actual,
const ErrorSpec& error) {
- EXPECT_TRUE(Near(*Literal::CreateR1<NativeT>(expected), actual, error));
+ EXPECT_TRUE(Near(*LiteralUtil::CreateR1<NativeT>(expected), actual, error));
}
template <typename NativeT>
/* static */ void LiteralTestUtil::ExpectR2Near(
std::initializer_list<std::initializer_list<NativeT>> expected,
const LiteralSlice& actual, const ErrorSpec& error) {
- EXPECT_TRUE(Near(*Literal::CreateR2<NativeT>(expected), actual, error));
+ EXPECT_TRUE(Near(*LiteralUtil::CreateR2<NativeT>(expected), actual, error));
}
template <typename NativeT>
@@ -222,7 +223,7 @@ template <typename NativeT>
std::initializer_list<std::initializer_list<std::initializer_list<NativeT>>>
expected,
const LiteralSlice& actual, const ErrorSpec& error) {
- EXPECT_TRUE(Near(*Literal::CreateR3<NativeT>(expected), actual, error));
+ EXPECT_TRUE(Near(*LiteralUtil::CreateR3<NativeT>(expected), actual, error));
}
template <typename NativeT>
@@ -231,28 +232,28 @@ template <typename NativeT>
std::initializer_list<std::initializer_list<NativeT>>>>
expected,
const LiteralSlice& actual, const ErrorSpec& error) {
- EXPECT_TRUE(Near(*Literal::CreateR4<NativeT>(expected), actual, error));
+ EXPECT_TRUE(Near(*LiteralUtil::CreateR4<NativeT>(expected), actual, error));
}
template <typename NativeT>
/* static */ void LiteralTestUtil::ExpectR2NearArray2D(
const Array2D<NativeT>& expected, const LiteralSlice& actual,
const ErrorSpec& error) {
- EXPECT_TRUE(Near(*Literal::CreateR2FromArray2D(expected), actual, error));
+ EXPECT_TRUE(Near(*LiteralUtil::CreateR2FromArray2D(expected), actual, error));
}
template <typename NativeT>
/* static */ void LiteralTestUtil::ExpectR3NearArray3D(
const Array3D<NativeT>& expected, const LiteralSlice& actual,
const ErrorSpec& error) {
- EXPECT_TRUE(Near(*Literal::CreateR3FromArray3D(expected), actual, error));
+ EXPECT_TRUE(Near(*LiteralUtil::CreateR3FromArray3D(expected), actual, error));
}
template <typename NativeT>
/* static */ void LiteralTestUtil::ExpectR4NearArray4D(
const Array4D<NativeT>& expected, const LiteralSlice& actual,
const ErrorSpec& error) {
- EXPECT_TRUE(Near(*Literal::CreateR4FromArray4D(expected), actual, error));
+ EXPECT_TRUE(Near(*LiteralUtil::CreateR4FromArray4D(expected), actual, error));
}
} // namespace xla
diff --git a/tensorflow/compiler/xla/tests/literal_test_util_test.cc b/tensorflow/compiler/xla/tests/literal_test_util_test.cc
index bbac7285ae..f297b2b847 100644
--- a/tensorflow/compiler/xla/tests/literal_test_util_test.cc
+++ b/tensorflow/compiler/xla/tests/literal_test_util_test.cc
@@ -31,8 +31,9 @@ namespace xla {
namespace {
TEST(LiteralTestUtilTest, ComparesEqualTuplesEqual) {
- std::unique_ptr<Literal> literal = Literal::MakeTuple({
- Literal::CreateR0<int32>(42).get(), Literal::CreateR0<int32>(64).get(),
+ std::unique_ptr<Literal> literal = LiteralUtil::MakeTuple({
+ LiteralUtil::CreateR0<int32>(42).get(),
+ LiteralUtil::CreateR0<int32>(64).get(),
});
EXPECT_TRUE(LiteralTestUtil::Equal(*literal, *literal));
}
@@ -42,11 +43,13 @@ TEST(LiteralTestUtilTest, ComparesUnequalTuplesUnequal) {
// un-fail an assertion failure. The CHECK-failure is death, so we can make a
// death assertion.
auto unequal_things_are_equal = [] {
- std::unique_ptr<Literal> lhs = Literal::MakeTuple({
- Literal::CreateR0<int32>(42).get(), Literal::CreateR0<int32>(64).get(),
+ std::unique_ptr<Literal> lhs = LiteralUtil::MakeTuple({
+ LiteralUtil::CreateR0<int32>(42).get(),
+ LiteralUtil::CreateR0<int32>(64).get(),
});
- std::unique_ptr<Literal> rhs = Literal::MakeTuple({
- Literal::CreateR0<int32>(64).get(), Literal::CreateR0<int32>(42).get(),
+ std::unique_ptr<Literal> rhs = LiteralUtil::MakeTuple({
+ LiteralUtil::CreateR0<int32>(64).get(),
+ LiteralUtil::CreateR0<int32>(42).get(),
});
CHECK(LiteralTestUtil::Equal(*lhs, *rhs)) << "LHS and RHS are unequal";
};
@@ -55,8 +58,8 @@ TEST(LiteralTestUtilTest, ComparesUnequalTuplesUnequal) {
TEST(LiteralTestUtilTest, ExpectNearFailurePlacesResultsInTemporaryDirectory) {
auto dummy_lambda = [] {
- auto two = Literal::CreateR0<float>(2);
- auto four = Literal::CreateR0<float>(4);
+ auto two = LiteralUtil::CreateR0<float>(2);
+ auto four = LiteralUtil::CreateR0<float>(4);
ErrorSpec error(0.001);
CHECK(LiteralTestUtil::Near(*two, *four, error)) << "two is not near four";
};
@@ -98,8 +101,8 @@ TEST(LiteralTestUtilTest, ExpectNearFailurePlacesResultsInTemporaryDirectory) {
}
TEST(LiteralTestUtilTest, NotEqualHasValuesInMessage) {
- auto expected = Literal::CreateR1<int32>({1, 2, 3});
- auto actual = Literal::CreateR1<int32>({4, 5, 6});
+ auto expected = LiteralUtil::CreateR1<int32>({1, 2, 3});
+ auto actual = LiteralUtil::CreateR1<int32>({4, 5, 6});
::testing::AssertionResult result =
LiteralTestUtil::Equal(*expected, *actual);
EXPECT_THAT(result.message(), ::testing::HasSubstr("expected: {1, 2, 3}"));
@@ -107,25 +110,26 @@ TEST(LiteralTestUtilTest, NotEqualHasValuesInMessage) {
}
TEST(LiteralTestUtilTest, NearComparatorR1) {
- auto a =
- Literal::CreateR1<float>({0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8});
- auto b =
- Literal::CreateR1<float>({0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8});
+ auto a = LiteralUtil::CreateR1<float>(
+ {0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8});
+ auto b = LiteralUtil::CreateR1<float>(
+ {0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8});
EXPECT_TRUE(LiteralTestUtil::Near(*a, *b, ErrorSpec{0.0001}));
}
TEST(LiteralTestUtilTest, NearComparatorR1Nan) {
- auto a =
- Literal::CreateR1<float>({0.0, 0.1, 0.2, 0.3, NAN, 0.5, 0.6, 0.7, 0.8});
- auto b =
- Literal::CreateR1<float>({0.0, 0.1, 0.2, 0.3, NAN, 0.5, 0.6, 0.7, 0.8});
+ auto a = LiteralUtil::CreateR1<float>(
+ {0.0, 0.1, 0.2, 0.3, NAN, 0.5, 0.6, 0.7, 0.8});
+ auto b = LiteralUtil::CreateR1<float>(
+ {0.0, 0.1, 0.2, 0.3, NAN, 0.5, 0.6, 0.7, 0.8});
EXPECT_TRUE(LiteralTestUtil::Near(*a, *b, ErrorSpec{0.0001}));
}
TEST(LiteralTestUtil, NearComparatorDifferentLengths) {
- auto a =
- Literal::CreateR1<float>({0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8});
- auto b = Literal::CreateR1<float>({0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7});
+ auto a = LiteralUtil::CreateR1<float>(
+ {0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8});
+ auto b =
+ LiteralUtil::CreateR1<float>({0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7});
EXPECT_FALSE(LiteralTestUtil::Near(*a, *b, ErrorSpec{0.0001}));
EXPECT_FALSE(LiteralTestUtil::Near(*b, *a, ErrorSpec{0.0001}));
}
diff --git a/tensorflow/compiler/xla/tests/llvm_compiler_test.cc b/tensorflow/compiler/xla/tests/llvm_compiler_test.cc
index 082bc34136..13df83ffff 100644
--- a/tensorflow/compiler/xla/tests/llvm_compiler_test.cc
+++ b/tensorflow/compiler/xla/tests/llvm_compiler_test.cc
@@ -14,6 +14,7 @@ limitations under the License.
==============================================================================*/
#include "tensorflow/compiler/xla/service/llvm_compiler.h"
+#include "tensorflow/compiler/xla/literal_util.h"
#include "tensorflow/compiler/xla/service/backend.h"
#include "tensorflow/compiler/xla/service/cpu/cpu_compiler.h"
#include "tensorflow/compiler/xla/service/gpu/gpu_compiler.h"
@@ -64,7 +65,7 @@ class LLVMCompilerTest : public ::testing::Test {
// Create HLO module, and run the compiler.
auto builder = HloComputation::Builder(TestName());
builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(42.0)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0)));
auto hlo_module = CreateNewModule();
hlo_module->AddEntryComputation(builder.Build());
@@ -86,7 +87,7 @@ class LLVMCompilerTest : public ::testing::Test {
void TestMultiModuleCompilation(LLVMCompiler *compiler) {
HloComputation::Builder builder(TestName());
builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(42.0)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0)));
std::unique_ptr<HloModule> hlo_module = CreateNewModule();
hlo_module->AddEntryComputation(builder.Build());
diff --git a/tensorflow/compiler/xla/tests/llvm_irgen_test_base.cc b/tensorflow/compiler/xla/tests/llvm_irgen_test_base.cc
index 2c45f19c09..6fc1115097 100644
--- a/tensorflow/compiler/xla/tests/llvm_irgen_test_base.cc
+++ b/tensorflow/compiler/xla/tests/llvm_irgen_test_base.cc
@@ -18,6 +18,7 @@ limitations under the License.
#include <functional>
#include <utility>
+#include "tensorflow/compiler/xla/service/hlo_parser.h"
#include "tensorflow/compiler/xla/service/llvm_ir/llvm_util.h"
#include "tensorflow/compiler/xla/tests/filecheck.h"
#include "tensorflow/core/lib/core/status_test_util.h"
@@ -25,28 +26,28 @@ limitations under the License.
namespace xla {
-void LLVMIRGenTestBase::SetIrHook(bool match_optimized_ir) {
+void LlvmIrGenTestBase::SetIrHook(bool match_optimized_ir) {
auto llvm_compiler = GetLLVMCompiler();
using std::placeholders::_1;
// Add the IR inspection hook to the LLVM compiler.
if (match_optimized_ir) {
llvm_compiler->SetPostOptimizationHook(
- std::bind(&LLVMIRGenTestBase::IrHook, this, _1));
+ std::bind(&LlvmIrGenTestBase::IrHook, this, _1));
} else {
llvm_compiler->SetPreOptimizationHook(
- std::bind(&LLVMIRGenTestBase::IrHook, this, _1));
+ std::bind(&LlvmIrGenTestBase::IrHook, this, _1));
}
}
-void LLVMIRGenTestBase::ResetIrHook() {
+void LlvmIrGenTestBase::ResetIrHook() {
auto llvm_compiler = GetLLVMCompiler();
llvm_compiler->RemovePreOptimizationHook();
llvm_compiler->RemovePostOptimizationHook();
}
-void LLVMIRGenTestBase::CompileAndVerifyIr(
+void LlvmIrGenTestBase::CompileAndVerifyIr(
std::unique_ptr<HloModule> hlo_module, const string& pattern,
bool match_optimized_ir) {
SetIrHook(match_optimized_ir);
@@ -58,7 +59,17 @@ void LLVMIRGenTestBase::CompileAndVerifyIr(
EXPECT_TRUE(filecheck_result.ValueOrDie());
}
-void LLVMIRGenTestBase::CompileAheadOfTimeAndVerifyIr(
+void LlvmIrGenTestBase::CompileAndVerifyIr(const string& hlo_text,
+ const string& expected_llvm_ir,
+ bool match_optimized_ir) {
+ HloModuleConfig config;
+ config.set_debug_options(GetDebugOptionsForTest());
+ TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
+ ParseHloString(hlo_text, config));
+ CompileAndVerifyIr(std::move(module), expected_llvm_ir, match_optimized_ir);
+}
+
+void LlvmIrGenTestBase::CompileAheadOfTimeAndVerifyIr(
std::unique_ptr<HloModule> hlo_module, const AotCompilationOptions& options,
const string& pattern, bool match_optimized_ir) {
SetIrHook(match_optimized_ir);
@@ -71,11 +82,11 @@ void LLVMIRGenTestBase::CompileAheadOfTimeAndVerifyIr(
EXPECT_TRUE(filecheck_result.ValueOrDie());
}
-LLVMCompiler* LLVMIRGenTestBase::GetLLVMCompiler() {
+LLVMCompiler* LlvmIrGenTestBase::GetLLVMCompiler() {
return static_cast<LLVMCompiler*>(backend().compiler());
}
-Status LLVMIRGenTestBase::IrHook(const llvm::Module& module) {
+Status LlvmIrGenTestBase::IrHook(const llvm::Module& module) {
ir_ = llvm_ir::DumpModuleToString(module);
return Status::OK();
}
diff --git a/tensorflow/compiler/xla/tests/llvm_irgen_test_base.h b/tensorflow/compiler/xla/tests/llvm_irgen_test_base.h
index 74cbb5f5df..018f9546af 100644
--- a/tensorflow/compiler/xla/tests/llvm_irgen_test_base.h
+++ b/tensorflow/compiler/xla/tests/llvm_irgen_test_base.h
@@ -24,7 +24,7 @@ limitations under the License.
namespace xla {
// Tests that verify IR emitted by the CPU/GPU backend is as expected.
-class LLVMIRGenTestBase : public CodegenTestBase {
+class LlvmIrGenTestBase : public CodegenTestBase {
protected:
// Compiles the given HLO module to LLVM IR and verifies the IR matches the
// given pattern. `pattern` is in the FileCheck pattern matching syntax
@@ -38,6 +38,12 @@ class LLVMIRGenTestBase : public CodegenTestBase {
void CompileAndVerifyIr(std::unique_ptr<HloModule> hlo_module,
const string& pattern, bool match_optimized_ir);
+ // A thin wrapper around CompileAndVerifyIr that parses `hlo_text` to create
+ // an HLO module.
+ void CompileAndVerifyIr(const string& hlo_text,
+ const string& expected_llvm_ir,
+ bool match_optimized_ir = false);
+
// Compiles the given HLO module to LLVM IR and verifies the IR matches the
// given pattern. `pattern` is in the FileCheck pattern matching syntax
// (http://llvm.org/docs/CommandGuide/FileCheck.html).
diff --git a/tensorflow/compiler/xla/tests/local_client_allocation_test.cc b/tensorflow/compiler/xla/tests/local_client_allocation_test.cc
index f21f83992f..0df50150ae 100644
--- a/tensorflow/compiler/xla/tests/local_client_allocation_test.cc
+++ b/tensorflow/compiler/xla/tests/local_client_allocation_test.cc
@@ -17,7 +17,7 @@ limitations under the License.
#include "tensorflow/compiler/xla/client/local_client.h"
#include "tensorflow/compiler/xla/client/xla_client/xla_builder.h"
-#include "tensorflow/compiler/xla/literal_util.h"
+#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/compiler/xla/service/local_service.h"
#include "tensorflow/compiler/xla/service/shaped_buffer.h"
#include "tensorflow/compiler/xla/statusor.h"
@@ -38,14 +38,14 @@ class LocalClientAllocationTest : public LocalClientTestBase {
XLA_TEST_F(LocalClientAllocationTest, AddVectors) {
XlaBuilder builder(TestName());
- auto x = builder.ConstantR1<float>({0.0f, 1.0f, 2.0f});
- auto y = builder.ConstantR1<float>({2.0f, 3.0f, 4.0f});
- builder.Add(x, y);
+ auto x = ConstantR1<float>(&builder, {0.0f, 1.0f, 2.0f});
+ auto y = ConstantR1<float>(&builder, {2.0f, 3.0f, 4.0f});
+ Add(x, y);
TestAllocator* allocator = GetOrCreateAllocator(local_client_->platform());
auto x_array =
- LiteralToShapedBuffer(*Literal::CreateR1<float>({0.0f, 1.0f, 2.0f}));
+ LiteralToShapedBuffer(*LiteralUtil::CreateR1<float>({0.0f, 1.0f, 2.0f}));
int64 allocation_count_before = allocator_->allocation_count();
@@ -74,9 +74,9 @@ XLA_TEST_F(LocalClientAllocationTest, RunOnDevices) {
// Run a computation on every device on the system. Verify that allocation
// occurs on the proper device.
XlaBuilder builder(TestName());
- auto x = builder.ConstantR1<float>({0.0f, 1.0f, 2.0f});
- auto y = builder.ConstantR1<float>({2.0f, 3.0f, 4.0f});
- builder.Add(x, y);
+ auto x = ConstantR1<float>(&builder, {0.0f, 1.0f, 2.0f});
+ auto y = ConstantR1<float>(&builder, {2.0f, 3.0f, 4.0f});
+ Add(x, y);
auto computation = builder.Build().ConsumeValueOrDie();
TestAllocator* allocator = GetOrCreateAllocator(local_client_->platform());
diff --git a/tensorflow/compiler/xla/tests/local_client_aot_test_helper.cc b/tensorflow/compiler/xla/tests/local_client_aot_test_helper.cc
index a366afe826..70612e7c49 100644
--- a/tensorflow/compiler/xla/tests/local_client_aot_test_helper.cc
+++ b/tensorflow/compiler/xla/tests/local_client_aot_test_helper.cc
@@ -37,8 +37,8 @@ using xla::string;
xla::XlaComputation Doubler() {
xla::XlaBuilder builder("doubler");
auto r0f32 = xla::ShapeUtil::MakeShape(xla::F32, {});
- auto x = builder.Parameter(0, r0f32, "x");
- builder.Mul(x, builder.ConstantR0<float>(2.0));
+ auto x = xla::Parameter(&builder, 0, r0f32, "x");
+ xla::Mul(x, xla::ConstantR0<float>(&builder, 2.0));
return std::move(builder.Build().ValueOrDie());
}
@@ -51,10 +51,10 @@ int main(int argc, char** argv) {
xla::XlaBuilder builder("aot_test_helper");
auto opaque_shape = xla::ShapeUtil::MakeOpaqueShape();
- auto opaque_param = builder.Parameter(0, opaque_shape, "x");
+ auto opaque_param = Parameter(&builder, 0, opaque_shape, "x");
auto r0f32 = xla::ShapeUtil::MakeShape(xla::F32, {});
- auto sum = builder.CustomCall("SumStructElements", {opaque_param}, r0f32);
- builder.Call(Doubler(), {sum});
+ auto sum = CustomCall(&builder, "SumStructElements", {opaque_param}, r0f32);
+ Call(&builder, Doubler(), {sum});
if (argc != 2) {
LOG(FATAL) << "local_client_aot_test_helper TARGET_CPU";
diff --git a/tensorflow/compiler/xla/tests/local_client_execute_test.cc b/tensorflow/compiler/xla/tests/local_client_execute_test.cc
index 5a70c2a9ae..2f4d197ae6 100644
--- a/tensorflow/compiler/xla/tests/local_client_execute_test.cc
+++ b/tensorflow/compiler/xla/tests/local_client_execute_test.cc
@@ -21,7 +21,7 @@ limitations under the License.
#include "tensorflow/compiler/xla/client/local_client.h"
#include "tensorflow/compiler/xla/client/xla_client/xla_builder.h"
#include "tensorflow/compiler/xla/layout_util.h"
-#include "tensorflow/compiler/xla/literal_util.h"
+#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/compiler/xla/service/device_memory_allocator.h"
#include "tensorflow/compiler/xla/service/local_service.h"
#include "tensorflow/compiler/xla/service/platform_util.h"
@@ -54,7 +54,7 @@ class LocalClientExecuteTest : public LocalClientTestBase {
XLA_TEST_F(LocalClientExecuteTest, Constant) {
XlaBuilder builder(TestName());
- auto y = builder.ConstantR0<float>(123.0f);
+ ConstantR0<float>(&builder, 123.0f);
ScopedShapedBuffer result =
ExecuteLocallyOrDie(builder.Build().ValueOrDie(), {});
@@ -64,11 +64,11 @@ XLA_TEST_F(LocalClientExecuteTest, Constant) {
XLA_TEST_F(LocalClientExecuteTest, AddScalars) {
XlaBuilder builder(TestName());
- auto x = builder.Parameter(0, ShapeUtil::MakeShape(F32, {}), "x");
- auto y = builder.ConstantR0<float>(123.0f);
- builder.Add(x, y);
+ auto x = Parameter(&builder, 0, ShapeUtil::MakeShape(F32, {}), "x");
+ auto y = ConstantR0<float>(&builder, 123.0f);
+ Add(x, y);
- auto x_value = LiteralToShapedBuffer(*Literal::CreateR0<float>(42.0f));
+ auto x_value = LiteralToShapedBuffer(*LiteralUtil::CreateR0<float>(42.0f));
ScopedShapedBuffer result =
ExecuteLocallyOrDie(builder.Build().ValueOrDie(), {&x_value});
LiteralTestUtil::ExpectR0Near<float>(165.f, *ShapedBufferToLiteral(result),
@@ -77,11 +77,11 @@ XLA_TEST_F(LocalClientExecuteTest, AddScalars) {
XLA_TEST_F(LocalClientExecuteTest, AddZeroElementVectors) {
XlaBuilder builder(TestName());
- auto x = builder.Parameter(0, ShapeUtil::MakeShape(F32, {0}), "x");
- auto y = builder.ConstantR1<float>({});
- builder.Add(x, y);
+ auto x = Parameter(&builder, 0, ShapeUtil::MakeShape(F32, {0}), "x");
+ auto y = ConstantR1<float>(&builder, {});
+ Add(x, y);
- auto x_array = LiteralToShapedBuffer(*Literal::CreateR1<float>({}));
+ auto x_array = LiteralToShapedBuffer(*LiteralUtil::CreateR1<float>({}));
ScopedShapedBuffer result =
ExecuteLocallyOrDie(builder.Build().ValueOrDie(), {&x_array});
LiteralTestUtil::ExpectR1Near<float>({}, *ShapedBufferToLiteral(result),
@@ -90,12 +90,12 @@ XLA_TEST_F(LocalClientExecuteTest, AddZeroElementVectors) {
XLA_TEST_F(LocalClientExecuteTest, AddVectors) {
XlaBuilder builder(TestName());
- auto x = builder.Parameter(0, ShapeUtil::MakeShape(F32, {3}), "x");
- auto y = builder.ConstantR1<float>({2.0f, 3.0f, 4.0f});
- builder.Add(x, y);
+ auto x = Parameter(&builder, 0, ShapeUtil::MakeShape(F32, {3}), "x");
+ auto y = ConstantR1<float>(&builder, {2.0f, 3.0f, 4.0f});
+ Add(x, y);
auto x_array =
- LiteralToShapedBuffer(*Literal::CreateR1<float>({0.0f, 1.0f, 2.0f}));
+ LiteralToShapedBuffer(*LiteralUtil::CreateR1<float>({0.0f, 1.0f, 2.0f}));
ScopedShapedBuffer result =
ExecuteLocallyOrDie(builder.Build().ValueOrDie(), {&x_array});
LiteralTestUtil::ExpectR1Near<float>(
@@ -104,12 +104,12 @@ XLA_TEST_F(LocalClientExecuteTest, AddVectors) {
XLA_TEST_F(LocalClientExecuteTest, AddVectorsWithProfile) {
XlaBuilder builder(TestName());
- auto x = builder.Parameter(0, ShapeUtil::MakeShape(F32, {3}), "x");
- auto y = builder.ConstantR1<float>({2.0f, 3.0f, 4.0f});
- builder.Add(x, y);
+ auto x = Parameter(&builder, 0, ShapeUtil::MakeShape(F32, {3}), "x");
+ auto y = ConstantR1<float>(&builder, {2.0f, 3.0f, 4.0f});
+ Add(x, y);
auto x_array =
- LiteralToShapedBuffer(*Literal::CreateR1<float>({0.0f, 1.0f, 2.0f}));
+ LiteralToShapedBuffer(*LiteralUtil::CreateR1<float>({0.0f, 1.0f, 2.0f}));
ExecutionProfile profile;
ScopedShapedBuffer result = ExecuteLocallyOrDie(
builder.Build().ValueOrDie(), {&x_array}, DefaultExecutableBuildOptions(),
@@ -122,19 +122,19 @@ XLA_TEST_F(LocalClientExecuteTest, AddVectorsWithProfile) {
XLA_TEST_F(LocalClientExecuteTest, AddArraysWithDifferentInputLayouts) {
XlaBuilder builder(TestName());
- auto x = builder.Parameter(0, ShapeUtil::MakeShape(F32, {2, 2}), "x");
- auto y = builder.Parameter(1, ShapeUtil::MakeShape(F32, {2, 2}), "y");
- builder.Add(x, y);
+ auto x = Parameter(&builder, 0, ShapeUtil::MakeShape(F32, {2, 2}), "x");
+ auto y = Parameter(&builder, 1, ShapeUtil::MakeShape(F32, {2, 2}), "y");
+ Add(x, y);
auto computation = builder.Build().ConsumeValueOrDie();
// Create x as a col-major array.
- auto x_array = LiteralToShapedBuffer(*Literal::CreateR2WithLayout(
+ auto x_array = LiteralToShapedBuffer(*LiteralUtil::CreateR2WithLayout(
{{1.0f, 2.0f}, {3.0f, 4.0f}}, LayoutUtil::MakeLayout({0, 1})));
EXPECT_TRUE(LayoutUtil::Equal(x_array.on_device_shape().layout(),
LayoutUtil::MakeLayout({0, 1})));
// Create y as a row-major array.
- auto y_array = LiteralToShapedBuffer(*Literal::CreateR2WithLayout(
+ auto y_array = LiteralToShapedBuffer(*LiteralUtil::CreateR2WithLayout(
{{10.0f, 20.0f}, {30.0f, 40.0f}}, LayoutUtil::MakeLayout({1, 0})));
EXPECT_TRUE(LayoutUtil::Equal(y_array.on_device_shape().layout(),
LayoutUtil::MakeLayout({1, 0})));
@@ -155,15 +155,15 @@ XLA_TEST_F(LocalClientExecuteTest, AddArraysWithDifferentInputLayouts) {
XLA_TEST_F(LocalClientExecuteTest, AddArraysWithDifferentOutputLayouts) {
XlaBuilder builder(TestName());
- auto x = builder.Parameter(0, ShapeUtil::MakeShape(F32, {2, 2}), "x");
- auto y = builder.Parameter(1, ShapeUtil::MakeShape(F32, {2, 2}), "y");
- builder.Add(x, y);
+ auto x = Parameter(&builder, 0, ShapeUtil::MakeShape(F32, {2, 2}), "x");
+ auto y = Parameter(&builder, 1, ShapeUtil::MakeShape(F32, {2, 2}), "y");
+ Add(x, y);
auto computation = builder.Build().ConsumeValueOrDie();
auto x_array = LiteralToShapedBuffer(
- *Literal::CreateR2<float>({{1.0f, 2.0f}, {3.0f, 4.0f}}));
+ *LiteralUtil::CreateR2<float>({{1.0f, 2.0f}, {3.0f, 4.0f}}));
auto y_array = LiteralToShapedBuffer(
- *Literal::CreateR2<float>({{10.0f, 20.0f}, {30.0f, 40.0f}}));
+ *LiteralUtil::CreateR2<float>({{10.0f, 20.0f}, {30.0f, 40.0f}}));
// Run with col-major result layout.
ScopedShapedBuffer result_colmaj = ExecuteLocallyOrDie(
@@ -192,15 +192,15 @@ XLA_TEST_F(LocalClientExecuteTest, AddArraysWithDifferentOutputLayouts) {
XLA_TEST_F(LocalClientExecuteTest, TupleResult) {
XlaBuilder builder(TestName());
- auto x = builder.Parameter(0, ShapeUtil::MakeShape(F32, {2, 2}), "x");
- auto y = builder.Parameter(1, ShapeUtil::MakeShape(F32, {2, 2}), "y");
- builder.Tuple({x, y, x});
+ auto x = Parameter(&builder, 0, ShapeUtil::MakeShape(F32, {2, 2}), "x");
+ auto y = Parameter(&builder, 1, ShapeUtil::MakeShape(F32, {2, 2}), "y");
+ Tuple(&builder, {x, y, x});
auto computation = builder.Build().ConsumeValueOrDie();
auto x_array = LiteralToShapedBuffer(
- *Literal::CreateR2<float>({{1.0f, 2.0f}, {3.0f, 4.0f}}));
+ *LiteralUtil::CreateR2<float>({{1.0f, 2.0f}, {3.0f, 4.0f}}));
auto y_array = LiteralToShapedBuffer(
- *Literal::CreateR2<float>({{10.0f, 20.0f}, {30.0f, 40.0f}}));
+ *LiteralUtil::CreateR2<float>({{10.0f, 20.0f}, {30.0f, 40.0f}}));
ScopedShapedBuffer result =
ExecuteLocallyOrDie(computation, {&x_array, &y_array});
@@ -219,16 +219,16 @@ XLA_TEST_F(LocalClientExecuteTest, TupleResult) {
XLA_TEST_F(LocalClientExecuteTest, NestedTupleResult) {
XlaBuilder builder(TestName());
- auto x = builder.Parameter(0, ShapeUtil::MakeShape(F32, {2, 2}), "x");
- auto y = builder.Parameter(1, ShapeUtil::MakeShape(F32, {2, 2}), "y");
- auto inner_tuple = builder.Tuple({x, y, x});
- builder.Tuple({inner_tuple, x});
+ auto x = Parameter(&builder, 0, ShapeUtil::MakeShape(F32, {2, 2}), "x");
+ auto y = Parameter(&builder, 1, ShapeUtil::MakeShape(F32, {2, 2}), "y");
+ auto inner_tuple = Tuple(&builder, {x, y, x});
+ Tuple(&builder, {inner_tuple, x});
auto computation = builder.Build().ConsumeValueOrDie();
auto x_array = LiteralToShapedBuffer(
- *Literal::CreateR2<float>({{1.0f, 2.0f}, {3.0f, 4.0f}}));
+ *LiteralUtil::CreateR2<float>({{1.0f, 2.0f}, {3.0f, 4.0f}}));
auto y_array = LiteralToShapedBuffer(
- *Literal::CreateR2<float>({{10.0f, 20.0f}, {30.0f, 40.0f}}));
+ *LiteralUtil::CreateR2<float>({{10.0f, 20.0f}, {30.0f, 40.0f}}));
ScopedShapedBuffer result =
ExecuteLocallyOrDie(computation, {&x_array, &y_array});
@@ -250,12 +250,12 @@ XLA_TEST_F(LocalClientExecuteTest, NestedTupleResult) {
XLA_TEST_F(LocalClientExecuteTest, TupleResultWithLayout) {
// Verify setting the result layout of a computation with a tuple output.
XlaBuilder builder(TestName());
- auto x = builder.Parameter(0, ShapeUtil::MakeShape(F32, {2, 2}), "x");
- auto y = builder.Parameter(1, ShapeUtil::MakeShape(F32, {2, 2}), "y");
- builder.Tuple({x, y});
+ auto x = Parameter(&builder, 0, ShapeUtil::MakeShape(F32, {2, 2}), "x");
+ auto y = Parameter(&builder, 1, ShapeUtil::MakeShape(F32, {2, 2}), "y");
+ Tuple(&builder, {x, y});
auto array = LiteralToShapedBuffer(
- *Literal::CreateR2<float>({{1.0f, 2.0f}, {3.0f, 4.0f}}));
+ *LiteralUtil::CreateR2<float>({{1.0f, 2.0f}, {3.0f, 4.0f}}));
ExecutableBuildOptions options = DefaultExecutableBuildOptions();
Shape shape_with_layout = ShapeUtil::MakeTupleShape(
@@ -287,23 +287,23 @@ XLA_TEST_F(LocalClientExecuteTest, TupleArguments) {
// Computation adds the respective array and vector elements from each tuple
// argument and returns the results as a tuple.
XlaBuilder builder(TestName());
- auto x = builder.Parameter(0, tuple_shape0, "x");
- auto y = builder.Parameter(1, tuple_shape1, "y");
- auto x_0 = builder.GetTupleElement(x, 0);
- auto x_1 = builder.GetTupleElement(x, 1);
- auto y_0 = builder.GetTupleElement(y, 0);
- auto y_1 = builder.GetTupleElement(y, 1);
- auto array_sum = builder.Add(x_0, y_1);
- auto vector_diff = builder.Sub(x_1, y_0);
- builder.Tuple({array_sum, vector_diff});
+ auto x = Parameter(&builder, 0, tuple_shape0, "x");
+ auto y = Parameter(&builder, 1, tuple_shape1, "y");
+ auto x_0 = GetTupleElement(x, 0);
+ auto x_1 = GetTupleElement(x, 1);
+ auto y_0 = GetTupleElement(y, 0);
+ auto y_1 = GetTupleElement(y, 1);
+ auto array_sum = Add(x_0, y_1);
+ auto vector_diff = Sub(x_1, y_0);
+ Tuple(&builder, {array_sum, vector_diff});
auto computation = builder.Build().ConsumeValueOrDie();
- auto x_literal = Literal::MakeTuple(
- {Literal::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}}).get(),
- Literal::CreateR1<float>({42.0, 75.0, 123.0}).get()});
- auto y_literal = Literal::MakeTuple(
- {Literal::CreateR1<float>({2.0, 4.0, 6.0}).get(),
- Literal::CreateR2<float>({{55.0, 44.0}, {33.0, 22.0}}).get()});
+ auto x_literal = LiteralUtil::MakeTuple(
+ {LiteralUtil::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}}).get(),
+ LiteralUtil::CreateR1<float>({42.0, 75.0, 123.0}).get()});
+ auto y_literal = LiteralUtil::MakeTuple(
+ {LiteralUtil::CreateR1<float>({2.0, 4.0, 6.0}).get(),
+ LiteralUtil::CreateR2<float>({{55.0, 44.0}, {33.0, 22.0}}).get()});
auto x_buffer = LiteralToShapedBuffer(*x_literal);
auto y_buffer = LiteralToShapedBuffer(*y_literal);
@@ -333,23 +333,23 @@ XLA_TEST_F(LocalClientExecuteTest, NestedTupleArgument) {
// Computation negates the array element and sums the two vector elements in
// the nested tuple. The resulting array and vector are returned as a tuple.
XlaBuilder builder(TestName());
- auto param = builder.Parameter(0, nested_tuple_shape, "param");
- auto inner_tuple = builder.GetTupleElement(param, 0);
- auto inner_array = builder.GetTupleElement(inner_tuple, 0);
- auto inner_vector = builder.GetTupleElement(inner_tuple, 1);
- auto outer_vector = builder.GetTupleElement(param, 1);
-
- auto negate_array = builder.Neg(inner_array);
- auto vector_sum = builder.Add(inner_vector, outer_vector);
- builder.Tuple({negate_array, vector_sum});
+ auto param = Parameter(&builder, 0, nested_tuple_shape, "param");
+ auto inner_tuple = GetTupleElement(param, 0);
+ auto inner_array = GetTupleElement(inner_tuple, 0);
+ auto inner_vector = GetTupleElement(inner_tuple, 1);
+ auto outer_vector = GetTupleElement(param, 1);
+
+ auto negate_array = Neg(inner_array);
+ auto vector_sum = Add(inner_vector, outer_vector);
+ Tuple(&builder, {negate_array, vector_sum});
auto computation = builder.Build().ConsumeValueOrDie();
- auto arg_literal = Literal::MakeTuple(
- {Literal::MakeTuple(
- {Literal::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}}).get(),
- Literal::CreateR1<float>({42.0, 75.0, 123.0}).get()})
+ auto arg_literal = LiteralUtil::MakeTuple(
+ {LiteralUtil::MakeTuple(
+ {LiteralUtil::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}}).get(),
+ LiteralUtil::CreateR1<float>({42.0, 75.0, 123.0}).get()})
.get(),
- Literal::CreateR1<float>({222.0, -2.0, 10.0}).get()});
+ LiteralUtil::CreateR1<float>({222.0, -2.0, 10.0}).get()});
auto arg_buffer = LiteralToShapedBuffer(*arg_literal);
ScopedShapedBuffer result = ExecuteLocallyOrDie(computation, {&arg_buffer});
@@ -371,15 +371,15 @@ XLA_TEST_F(LocalClientExecuteTest, PassingTupleResultBackIntoComputation) {
ShapeUtil::MakeTupleShape({array_shape, array_shape});
XlaBuilder builder(TestName());
- auto param = builder.Parameter(0, tuple_shape, "param");
- auto element_0 = builder.GetTupleElement(param, 0);
- auto element_1 = builder.GetTupleElement(param, 1);
- builder.Tuple({builder.Neg(element_0), builder.Add(element_1, element_1)});
+ auto param = Parameter(&builder, 0, tuple_shape, "param");
+ auto element_0 = GetTupleElement(param, 0);
+ auto element_1 = GetTupleElement(param, 1);
+ Tuple(&builder, {Neg(element_0), Add(element_1, element_1)});
auto computation = builder.Build().ConsumeValueOrDie();
- auto arg_literal = Literal::MakeTuple(
- {Literal::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}}).get(),
- Literal::CreateR2<float>({{11.0, 3.0}, {4.0, 5.0}}).get()});
+ auto arg_literal = LiteralUtil::MakeTuple(
+ {LiteralUtil::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}}).get(),
+ LiteralUtil::CreateR2<float>({{11.0, 3.0}, {4.0, 5.0}}).get()});
auto arg_buffer = LiteralToShapedBuffer(*arg_literal);
ScopedShapedBuffer result_0 = ExecuteLocallyOrDie(computation, {&arg_buffer});
@@ -414,26 +414,25 @@ XLA_TEST_F(LocalClientExecuteTest, LargeTuple) {
const Shape tuple_shape = ShapeUtil::MakeTupleShape(element_shapes);
XlaBuilder builder(TestName());
- auto param = builder.Parameter(0, tuple_shape, "param");
+ auto param = Parameter(&builder, 0, tuple_shape, "param");
// Add each element's tuple index value to every element.
std::vector<XlaOp> result_elements;
for (int i = 0; i < kElementCount; ++i) {
- auto element = builder.GetTupleElement(param, i);
- result_elements.push_back(
- builder.Add(element, builder.ConstantR0<float>(i)));
+ auto element = GetTupleElement(param, i);
+ result_elements.push_back(Add(element, ConstantR0<float>(&builder, i)));
}
- builder.Tuple(result_elements);
+ Tuple(&builder, result_elements);
auto computation = builder.Build().ConsumeValueOrDie();
// Feed in a tuple where each two-element vector element is {tuple_index,
// -tuple_index}.
std::vector<std::unique_ptr<Literal>> arg_elements;
for (int i = 0; i < kElementCount; ++i) {
- arg_elements.push_back(Literal::CreateR1<float>({1.0f * i, -1.0f * i}));
+ arg_elements.push_back(LiteralUtil::CreateR1<float>({1.0f * i, -1.0f * i}));
}
std::unique_ptr<Literal> arg_literal =
- Literal::MakeTupleOwned(std::move(arg_elements));
+ LiteralUtil::MakeTupleOwned(std::move(arg_elements));
auto arg_buffer = LiteralToShapedBuffer(*arg_literal);
ScopedShapedBuffer result = ExecuteLocallyOrDie(computation, {&arg_buffer});
@@ -458,22 +457,22 @@ XLA_TEST_F(LocalClientExecuteTest, LargeNestedTuple) {
const Shape tuple_shape = ShapeUtil::MakeTupleShape(inner_tuple_shapes);
XlaBuilder builder(TestName());
- auto param = builder.Parameter(0, tuple_shape, "param");
+ auto param = Parameter(&builder, 0, tuple_shape, "param");
// The computation increments each leaf value by an amount equal to the leaf's
// ordinal position in a traversal of the tuple.
std::vector<XlaOp> result_elements;
for (int i = 0; i < kFanout; ++i) {
- auto outer_element = builder.GetTupleElement(param, i);
+ auto outer_element = GetTupleElement(param, i);
std::vector<XlaOp> inner_result_elements;
for (int j = 0; j < kFanout; ++j) {
- auto inner_element = builder.GetTupleElement(outer_element, j);
- inner_result_elements.push_back(builder.Add(
- inner_element, builder.ConstantR0<float>(i * kFanout + j)));
+ auto inner_element = GetTupleElement(outer_element, j);
+ inner_result_elements.push_back(
+ Add(inner_element, ConstantR0<float>(&builder, i * kFanout + j)));
}
- result_elements.push_back(builder.Tuple(inner_result_elements));
+ result_elements.push_back(Tuple(&builder, inner_result_elements));
}
- builder.Tuple(result_elements);
+ Tuple(&builder, result_elements);
auto computation = builder.Build().ConsumeValueOrDie();
// Construct the argument to pass to the computation.
@@ -481,12 +480,13 @@ XLA_TEST_F(LocalClientExecuteTest, LargeNestedTuple) {
for (int i = 0; i < kFanout; ++i) {
std::vector<std::unique_ptr<Literal>> inner_tuple_elements;
for (int j = 0; j < kFanout; ++j) {
- inner_tuple_elements.push_back(Literal::CreateR0<float>(i + j));
+ inner_tuple_elements.push_back(LiteralUtil::CreateR0<float>(i + j));
}
outer_tuple_elements.push_back(
- Literal::MakeTupleOwned(std::move(inner_tuple_elements)));
+ LiteralUtil::MakeTupleOwned(std::move(inner_tuple_elements)));
}
- auto arg_literal = Literal::MakeTupleOwned(std::move(outer_tuple_elements));
+ auto arg_literal =
+ LiteralUtil::MakeTupleOwned(std::move(outer_tuple_elements));
auto arg_buffer = LiteralToShapedBuffer(*arg_literal);
ScopedShapedBuffer result = ExecuteLocallyOrDie(computation, {&arg_buffer});
@@ -513,23 +513,23 @@ XLA_TEST_F(LocalClientExecuteTest, DeepTuple) {
}
XlaBuilder builder(TestName());
- auto element = builder.Parameter(0, shape, "param");
+ auto element = Parameter(&builder, 0, shape, "param");
for (int i = 0; i < kTupleDepth; ++i) {
- element = builder.GetTupleElement(element, 0);
+ element = GetTupleElement(element, 0);
}
- auto output = builder.Add(element, builder.ConstantR0<float>(42.0));
+ auto output = Add(element, ConstantR0<float>(&builder, 42.0));
for (int i = 0; i < kTupleDepth; ++i) {
- output = builder.Tuple({output});
+ output = Tuple(&builder, {output});
}
auto computation = builder.Build().ConsumeValueOrDie();
// Construct the argument to pass to the computation.
- std::unique_ptr<Literal> arg_literal = Literal::CreateR0<float>(123.0);
+ std::unique_ptr<Literal> arg_literal = LiteralUtil::CreateR0<float>(123.0);
for (int i = 0; i < kTupleDepth; ++i) {
std::vector<std::unique_ptr<Literal>> arg_vector;
arg_vector.push_back(std::move(arg_literal));
- arg_literal = Literal::MakeTupleOwned(std::move(arg_vector));
+ arg_literal = LiteralUtil::MakeTupleOwned(std::move(arg_vector));
}
auto arg_buffer = LiteralToShapedBuffer(*arg_literal);
@@ -547,12 +547,12 @@ XLA_TEST_F(LocalClientExecuteTest, DeepTuple) {
XLA_TEST_F(LocalClientExecuteTest, InvalidNumberOfArguments) {
// Test passing in an invalid number of arguments.
XlaBuilder builder(TestName());
- auto x = builder.Parameter(0, ShapeUtil::MakeShape(F32, {3}), "x");
- auto y = builder.Parameter(1, ShapeUtil::MakeShape(F32, {3}), "y");
- builder.Add(x, y);
+ auto x = Parameter(&builder, 0, ShapeUtil::MakeShape(F32, {3}), "x");
+ auto y = Parameter(&builder, 1, ShapeUtil::MakeShape(F32, {3}), "y");
+ Add(x, y);
auto x_array =
- LiteralToShapedBuffer(*Literal::CreateR1<float>({1.0f, 2.0f, 3.0f}));
+ LiteralToShapedBuffer(*LiteralUtil::CreateR1<float>({1.0f, 2.0f, 3.0f}));
auto execute_status =
ExecuteLocally(builder.Build().ValueOrDie(), {&x_array});
@@ -564,11 +564,11 @@ XLA_TEST_F(LocalClientExecuteTest, InvalidNumberOfArguments) {
XLA_TEST_F(LocalClientExecuteTest, IncorrectArgumentShape) {
// Test passing in an argument with the wrong shape.
XlaBuilder builder(TestName());
- auto x = builder.Parameter(0, ShapeUtil::MakeShape(F32, {3}), "x");
- builder.Neg(x);
+ auto x = Parameter(&builder, 0, ShapeUtil::MakeShape(F32, {3}), "x");
+ Neg(x);
auto x_array = LiteralToShapedBuffer(
- *Literal::CreateR2<float>({{0.0f, 1.0f}, {2.0f, 3.0f}}));
+ *LiteralUtil::CreateR2<float>({{0.0f, 1.0f}, {2.0f, 3.0f}}));
auto execute_status =
ExecuteLocally(builder.Build().ValueOrDie(), {&x_array});
@@ -581,11 +581,11 @@ XLA_TEST_F(LocalClientExecuteTest, IncorrectArgumentShape) {
XLA_TEST_F(LocalClientExecuteTest, InvalidResultLayout) {
// Test passing in an invalid result layout parameter.
XlaBuilder builder(TestName());
- auto x = builder.Parameter(0, ShapeUtil::MakeShape(F32, {2, 2}), "x");
- builder.Neg(x);
+ auto x = Parameter(&builder, 0, ShapeUtil::MakeShape(F32, {2, 2}), "x");
+ Neg(x);
auto x_array = LiteralToShapedBuffer(
- *Literal::CreateR2<float>({{0.0f, 1.0f}, {2.0f, 3.0f}}));
+ *LiteralUtil::CreateR2<float>({{0.0f, 1.0f}, {2.0f, 3.0f}}));
auto execute_status = ExecuteLocally(
builder.Build().ValueOrDie(), {&x_array},
DefaultExecutableBuildOptions().set_result_layout(
@@ -604,7 +604,7 @@ XLA_TEST_F(LocalClientExecuteTest, RunOnAllDeviceOrdinals) {
// Try to run a trivial computation on every device on the system. If a
// specific device is not supported, check that the right error is returned.
XlaBuilder builder(TestName());
- builder.ConstantR0<float>(42.0f);
+ ConstantR0<float>(&builder, 42.0f);
auto computation = builder.Build().ConsumeValueOrDie();
for (int d = 0; d < local_client_->device_count(); ++d) {
if (!local_client_->device_ordinal_supported(d)) {
@@ -631,7 +631,7 @@ XLA_TEST_F(LocalClientExecuteTest, InvalidDeviceOrdinalValues) {
// Try running computations on devices with device ordinal values which do not
// exist.
XlaBuilder builder(TestName());
- builder.ConstantR0<float>(42.0f);
+ ConstantR0<float>(&builder, 42.0f);
auto computation = builder.Build().ConsumeValueOrDie();
auto execute_status =
@@ -648,7 +648,7 @@ XLA_TEST_F(LocalClientExecuteTest, InvalidDeviceOrdinalValues) {
XLA_TEST_F(LocalClientExecuteTest, RunOnStream) {
// Run a computation on a specific stream on each device on the system.
XlaBuilder builder(TestName());
- builder.ConstantR0<float>(42.0f);
+ ConstantR0<float>(&builder, 42.0f);
auto computation = builder.Build().ConsumeValueOrDie();
for (int d = 0; d < local_client_->device_count(); ++d) {
@@ -684,7 +684,7 @@ XLA_TEST_F(LocalClientExecuteTest,
wrong_stream.Init();
XlaBuilder builder(TestName());
- builder.ConstantR0<float>(42.0f);
+ ConstantR0<float>(&builder, 42.0f);
auto execute_status = ExecuteLocally(
builder.Build().ValueOrDie(), {}, DefaultExecutableBuildOptions(),
DefaultExecutableRunOptions().set_stream(&wrong_stream));
@@ -701,7 +701,7 @@ XLA_TEST_F(LocalClientExecuteTest,
TestAllocator allocator(wrong_platform);
XlaBuilder builder(TestName());
- auto y = builder.ConstantR0<float>(123.0f);
+ ConstantR0<float>(&builder, 123.0f);
auto execute_status = ExecuteLocally(
builder.Build().ValueOrDie(), {}, DefaultExecutableBuildOptions(),
@@ -714,7 +714,7 @@ XLA_TEST_F(LocalClientExecuteTest,
XLA_TEST_F(LocalClientExecuteTest, RunOnUninitializedStream) {
// Try to run a computation on a stream that has not been initialized.
XlaBuilder builder(TestName());
- builder.ConstantR0<float>(42.0f);
+ ConstantR0<float>(&builder, 42.0f);
LOG(INFO) << "default device = " << local_client_->default_device_ordinal();
se::StreamExecutor* executor =
@@ -737,11 +737,11 @@ XLA_TEST_F(LocalClientExecuteTest, SelectBetweenTuples) {
std::initializer_list<float> vec1 = {1.f, 2.f, 3.f};
std::initializer_list<float> vec2 = {2.f, 4.f, 6.f};
- auto tuple12 = builder.Tuple(
- {builder.ConstantR1<float>(vec1), builder.ConstantR1<float>(vec2)});
- auto tuple21 = builder.Tuple(
- {builder.ConstantR1<float>(vec2), builder.ConstantR1<float>(vec1)});
- builder.Select(builder.ConstantR0<bool>(false), tuple12, tuple21);
+ auto tuple12 = Tuple(&builder, {ConstantR1<float>(&builder, vec1),
+ ConstantR1<float>(&builder, vec2)});
+ auto tuple21 = Tuple(&builder, {ConstantR1<float>(&builder, vec2),
+ ConstantR1<float>(&builder, vec1)});
+ Select(ConstantR0<bool>(&builder, false), tuple12, tuple21);
ScopedShapedBuffer result =
ExecuteLocallyOrDie(builder.Build().ValueOrDie(), {});
@@ -754,9 +754,9 @@ XLA_TEST_F(LocalClientExecuteTest, SelectBetweenTuples) {
XLA_TEST_F(LocalClientExecuteTest, CompileExecutable) {
XlaBuilder builder(TestName());
- auto x = builder.Parameter(0, ShapeUtil::MakeShape(F32, {3}), "x");
- auto y = builder.ConstantR1<float>({2.0f, 3.0f, 4.0f});
- builder.Add(x, y);
+ auto x = Parameter(&builder, 0, ShapeUtil::MakeShape(F32, {3}), "x");
+ auto y = ConstantR1<float>(&builder, {2.0f, 3.0f, 4.0f});
+ Add(x, y);
Shape argument_layout =
ShapeUtil::MakeShapeWithLayout(F32, /*dimensions=*/{3}, {0});
@@ -768,7 +768,7 @@ XLA_TEST_F(LocalClientExecuteTest, CompileExecutable) {
executable_status.ConsumeValueOrDie();
auto x_array =
- LiteralToShapedBuffer(*Literal::CreateR1<float>({0.0f, 1.0f, 2.0f}));
+ LiteralToShapedBuffer(*LiteralUtil::CreateR1<float>({0.0f, 1.0f, 2.0f}));
ScopedShapedBuffer result =
executable->Run({&x_array}, DefaultExecutableRunOptions())
.ConsumeValueOrDie();
@@ -792,29 +792,29 @@ XLA_TEST_F(LocalClientExecuteTest, ShapeBufferToLiteralConversion) {
};
// Array shapes.
- test_to_device_and_back(*Literal::CreateR0<float>(42.0));
- test_to_device_and_back(*Literal::CreateR0<bool>(true));
- test_to_device_and_back(*Literal::CreateR1<float>({1.0, 42.0, 744.4}));
+ test_to_device_and_back(*LiteralUtil::CreateR0<float>(42.0));
+ test_to_device_and_back(*LiteralUtil::CreateR0<bool>(true));
+ test_to_device_and_back(*LiteralUtil::CreateR1<float>({1.0, 42.0, 744.4}));
test_to_device_and_back(
- *Literal::CreateR2<float>({{1.0, 2.0, 3.0}, {44.0, 0.1, -3}}));
- test_to_device_and_back(*Literal::CreateR2<int32>({{2, 1}, {4444, 56}}));
+ *LiteralUtil::CreateR2<float>({{1.0, 2.0, 3.0}, {44.0, 0.1, -3}}));
+ test_to_device_and_back(*LiteralUtil::CreateR2<int32>({{2, 1}, {4444, 56}}));
// Null shape (empty tuple).
- test_to_device_and_back(*Literal::MakeTuple({}));
+ test_to_device_and_back(*LiteralUtil::MakeTuple({}));
// Non-nested tuples.
test_to_device_and_back(
- *Literal::MakeTuple({Literal::CreateR0<float>(12223.0).get()}));
+ *LiteralUtil::MakeTuple({LiteralUtil::CreateR0<float>(12223.0).get()}));
test_to_device_and_back(
- *Literal::MakeTuple({Literal::CreateR1<float>({1.0, -42.0}).get(),
- Literal::CreateR0<float>(123456.0).get()}));
+ *LiteralUtil::MakeTuple({LiteralUtil::CreateR1<float>({1.0, -42.0}).get(),
+ LiteralUtil::CreateR0<float>(123456.0).get()}));
// Nested tuple.
- test_to_device_and_back(*Literal::MakeTuple(
- {Literal::MakeTuple({Literal::CreateR1<float>({1.0, -42.0}).get(),
- Literal::CreateR0<float>(123456.0).get()})
+ test_to_device_and_back(*LiteralUtil::MakeTuple(
+ {LiteralUtil::MakeTuple({LiteralUtil::CreateR1<float>({1.0, -42.0}).get(),
+ LiteralUtil::CreateR0<float>(123456.0).get()})
.get(),
- Literal::CreateR0<bool>(false).get()}));
+ LiteralUtil::CreateR0<bool>(false).get()}));
}
XLA_TEST_F(LocalClientExecuteTest, ShapeBufferToLiteralConversion64bit) {
@@ -832,24 +832,47 @@ XLA_TEST_F(LocalClientExecuteTest, ShapeBufferToLiteralConversion64bit) {
};
test_to_device_and_back(
- *Literal::CreateR2<double>({{1.0, 2.0, 3.0}, {44.0, 0.1, -3}}));
- test_to_device_and_back(*Literal::CreateR2<int64>({{2, 1}, {4444, 56}}));
+ *LiteralUtil::CreateR2<double>({{1.0, 2.0, 3.0}, {44.0, 0.1, -3}}));
+ test_to_device_and_back(*LiteralUtil::CreateR2<int64>({{2, 1}, {4444, 56}}));
test_to_device_and_back(
- *Literal::CreateR2<uint64>({{20000000000ULL, 1}, {4444, 56}}));
- test_to_device_and_back(
- *Literal::MakeTuple({Literal::CreateR1<double>({1.0, -42.0}).get(),
- Literal::CreateR0<int64>(123456789000LL).get()}));
+ *LiteralUtil::CreateR2<uint64>({{20000000000ULL, 1}, {4444, 56}}));
+ test_to_device_and_back(*LiteralUtil::MakeTuple(
+ {LiteralUtil::CreateR1<double>({1.0, -42.0}).get(),
+ LiteralUtil::CreateR0<int64>(123456789000LL).get()}));
+}
+
+XLA_TEST_F(LocalClientExecuteTest, InfeedTest) {
+ XlaBuilder builder(TestName());
+ const Shape shape = ShapeUtil::MakeShape(F32, {3});
+ auto in = Infeed(&builder, shape);
+ auto constant = ConstantR1<float>(&builder, {1.0f, 2.0f, 3.0f});
+ Add(in, constant);
+
+ std::unique_ptr<Literal> result;
+ std::unique_ptr<tensorflow::Thread> thread(
+ tensorflow::Env::Default()->StartThread(
+ tensorflow::ThreadOptions(), "execute_thread", [&] {
+ result = ShapedBufferToLiteral(ExecuteLocallyOrDie(
+ builder.Build().ValueOrDie(), /*arguments=*/{}));
+ }));
+
+ ASSERT_IS_OK(local_client_->TransferToInfeedLocal(
+ *LiteralUtil::CreateR1<float>({-5.0, 123.0, 42.0}),
+ local_client_->default_device_ordinal()));
+
+ // Join the thread.
+ thread.reset();
+
+ LiteralTestUtil::ExpectR1Equal<float>({-4.0, 125.0, 45.0}, *result);
}
-// TODO(b/34359662): Support infeed/outfeed on GPU and CPU parallel.
-// 2017-10-18.
-XLA_TEST_F(LocalClientExecuteTest, DISABLED_ON_GPU(InfeedOutfeedTest)) {
+XLA_TEST_F(LocalClientExecuteTest, InfeedOutfeedTest) {
XlaBuilder builder(TestName());
const Shape shape = ShapeUtil::MakeShape(F32, {3});
- auto in = builder.Infeed(shape);
- auto constant = builder.ConstantR1<float>({1.0f, 2.0f, 3.0f});
- auto sum = builder.Add(in, constant);
- builder.Outfeed(sum, shape, /*outfeed_config=*/"");
+ auto in = Infeed(&builder, shape);
+ auto constant = ConstantR1<float>(&builder, {1.0f, 2.0f, 3.0f});
+ auto sum = Add(in, constant);
+ Outfeed(sum, shape, /*outfeed_config=*/"");
std::unique_ptr<tensorflow::Thread> thread(
tensorflow::Env::Default()->StartThread(
@@ -857,7 +880,7 @@ XLA_TEST_F(LocalClientExecuteTest, DISABLED_ON_GPU(InfeedOutfeedTest)) {
[&] { ExecuteLocallyOrDie(builder.Build().ValueOrDie(), {}); }));
ASSERT_IS_OK(local_client_->TransferToInfeedLocal(
- *Literal::CreateR1<float>({-5.0, 123.0, 42.0}),
+ *LiteralUtil::CreateR1<float>({-5.0, 123.0, 42.0}),
local_client_->default_device_ordinal()));
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<Literal> result,
@@ -884,15 +907,15 @@ void BM_LocalClientOverhead(int num_iters) {
// Use a tiny add operation as the computation.
XlaBuilder builder("Add");
auto shape = ShapeUtil::MakeShape(F32, {2, 3});
- auto x = builder.Parameter(0, shape, "x");
- builder.Add(x, x);
+ auto x = Parameter(&builder, 0, shape, "x");
+ Add(x, x);
auto computation = builder.Build().ConsumeValueOrDie();
auto buffer =
transfer_manager
->AllocateScopedShapedBuffer(shape, &allocator, /*device_ordinal=*/0)
.ConsumeValueOrDie();
- auto literal = Literal::CreateR2<float>({{0, 0, 0}, {0, 0, 0}});
+ auto literal = LiteralUtil::CreateR2<float>({{0, 0, 0}, {0, 0, 0}});
auto stream =
client->mutable_backend()->BorrowStream(device_ordinal).ValueOrDie();
ASSERT_IS_OK(transfer_manager->TransferLiteralToDevice(stream.get(), *literal,
diff --git a/tensorflow/compiler/xla/tests/log_test.cc b/tensorflow/compiler/xla/tests/log_test.cc
index c0c02e584c..cdf70ee418 100644
--- a/tensorflow/compiler/xla/tests/log_test.cc
+++ b/tensorflow/compiler/xla/tests/log_test.cc
@@ -30,8 +30,8 @@ class LogTest : public ClientLibraryTestBase {};
XLA_TEST_F(LogTest, LogZeroValues) {
XlaBuilder builder(TestName());
- auto x = builder.ConstantR3FromArray3D<float>(Array3D<float>(3, 0, 0));
- builder.Log(x);
+ auto x = ConstantR3FromArray3D<float>(&builder, Array3D<float>(3, 0, 0));
+ Log(x);
ComputeAndCompareR3<float>(&builder, Array3D<float>(3, 0, 0), {},
ErrorSpec(0.0001));
@@ -42,8 +42,8 @@ TEST_F(LogTest, LogTenValues) {
5.0, 6.0, -7.0, -8.0, 9.0};
XlaBuilder builder(TestName());
- auto x = builder.ConstantR1<float>(input);
- builder.Log(x);
+ auto x = ConstantR1<float>(&builder, input);
+ Log(x);
std::vector<float> expected;
expected.reserve(input.size());
diff --git a/tensorflow/compiler/xla/tests/map_test.cc b/tensorflow/compiler/xla/tests/map_test.cc
index 3975e91257..7ddc636931 100644
--- a/tensorflow/compiler/xla/tests/map_test.cc
+++ b/tensorflow/compiler/xla/tests/map_test.cc
@@ -21,7 +21,7 @@ limitations under the License.
#include "tensorflow/compiler/xla/client/local_client.h"
#include "tensorflow/compiler/xla/client/xla_client/xla_builder.h"
#include "tensorflow/compiler/xla/client/xla_client/xla_computation.h"
-#include "tensorflow/compiler/xla/literal_util.h"
+#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/compiler/xla/shape_util.h"
#include "tensorflow/compiler/xla/statusor.h"
#include "tensorflow/compiler/xla/test.h"
@@ -52,9 +52,9 @@ class MapTest : public ClientLibraryTestBase {
// 1.0f ---------/
XlaComputation CreateAdderToOne() {
XlaBuilder mapped_builder(TestName());
- auto x = mapped_builder.Parameter(0, ShapeUtil::MakeShape(F32, {}), "x");
- auto one = mapped_builder.ConstantR0<float>(1.0);
- mapped_builder.Add(x, one);
+ auto x = Parameter(&mapped_builder, 0, ShapeUtil::MakeShape(F32, {}), "x");
+ auto one = ConstantR0<float>(&mapped_builder, 1.0);
+ Add(x, one);
auto computation_status = mapped_builder.Build();
TF_CHECK_OK(computation_status.status());
return computation_status.ConsumeValueOrDie();
@@ -62,9 +62,9 @@ class MapTest : public ClientLibraryTestBase {
XlaComputation CreateMax() {
XlaBuilder b(TestName());
- auto lhs = b.Parameter(0, ShapeUtil::MakeShape(F32, {}), "x");
- auto rhs = b.Parameter(1, ShapeUtil::MakeShape(F32, {}), "y");
- b.Max(lhs, rhs);
+ auto lhs = Parameter(&b, 0, ShapeUtil::MakeShape(F32, {}), "x");
+ auto rhs = Parameter(&b, 1, ShapeUtil::MakeShape(F32, {}), "y");
+ Max(lhs, rhs);
auto computation_status = b.Build();
TF_CHECK_OK(computation_status.status());
return computation_status.ConsumeValueOrDie();
@@ -75,8 +75,8 @@ class MapTest : public ClientLibraryTestBase {
template <class T>
XlaComputation CreateScalarOne() {
XlaBuilder mapped_builder("scalar_one");
- (void)mapped_builder.Parameter(0, ShapeUtil::MakeShape(F32, {}), "x");
- mapped_builder.ConstantR0<T>(1);
+ (void)Parameter(&mapped_builder, 0, ShapeUtil::MakeShape(F32, {}), "x");
+ ConstantR0<T>(&mapped_builder, 1);
auto computation_status = mapped_builder.Build();
TF_CHECK_OK(computation_status.status());
return computation_status.ConsumeValueOrDie();
@@ -89,9 +89,9 @@ class MapTest : public ClientLibraryTestBase {
// 2.0f ---------/
XlaComputation CreateMulByTwo() {
XlaBuilder mapped_builder(TestName());
- auto x = mapped_builder.Parameter(0, ShapeUtil::MakeShape(F32, {}), "x");
- auto two = mapped_builder.ConstantR0<float>(2.0);
- mapped_builder.Mul(x, two);
+ auto x = Parameter(&mapped_builder, 0, ShapeUtil::MakeShape(F32, {}), "x");
+ auto two = ConstantR0<float>(&mapped_builder, 2.0);
+ Mul(x, two);
auto computation_status = mapped_builder.Build();
TF_CHECK_OK(computation_status.status());
return computation_status.ConsumeValueOrDie();
@@ -107,10 +107,10 @@ class MapTest : public ClientLibraryTestBase {
// 1.0f ---------/
XlaComputation CreateAdderToOneTimesItself() {
XlaBuilder mapped_builder(TestName());
- auto x = mapped_builder.Parameter(0, ShapeUtil::MakeShape(F32, {}), "x");
- auto one = mapped_builder.ConstantR0<float>(1.0);
- auto adder_to_one = mapped_builder.Add(x, one);
- mapped_builder.Mul(x, adder_to_one);
+ auto x = Parameter(&mapped_builder, 0, ShapeUtil::MakeShape(F32, {}), "x");
+ auto one = ConstantR0<float>(&mapped_builder, 1.0);
+ auto adder_to_one = Add(x, one);
+ Mul(x, adder_to_one);
auto computation_status = mapped_builder.Build();
TF_CHECK_OK(computation_status.status());
return computation_status.ConsumeValueOrDie();
@@ -125,10 +125,10 @@ class MapTest : public ClientLibraryTestBase {
XlaComputation CreateMapPlusN(const XlaComputation& embedded_computation,
float n) {
XlaBuilder builder(TestName());
- auto x = builder.Parameter(0, ShapeUtil::MakeShape(F32, {}), "x");
- auto map = builder.Map({x}, embedded_computation, {});
- auto constant_n = builder.ConstantR0<float>(n);
- builder.Add(map, constant_n);
+ auto x = Parameter(&builder, 0, ShapeUtil::MakeShape(F32, {}), "x");
+ auto map = Map(&builder, {x}, embedded_computation, {});
+ auto constant_n = ConstantR0<float>(&builder, n);
+ Add(map, constant_n);
auto computation_status = builder.Build();
TF_CHECK_OK(computation_status.status());
return computation_status.ConsumeValueOrDie();
@@ -138,9 +138,9 @@ class MapTest : public ClientLibraryTestBase {
// defined by (x, y) -> x > y.
XlaComputation CreateGt() {
XlaBuilder b("Gt");
- auto x = b.Parameter(0, ShapeUtil::MakeShape(F32, {}), "x");
- auto y = b.Parameter(1, ShapeUtil::MakeShape(F32, {}), "y");
- b.Gt(x, y);
+ auto x = Parameter(&b, 0, ShapeUtil::MakeShape(F32, {}), "x");
+ auto y = Parameter(&b, 1, ShapeUtil::MakeShape(F32, {}), "y");
+ Gt(x, y);
auto computation_status = b.Build();
TF_CHECK_OK(computation_status.status());
return computation_status.ConsumeValueOrDie();
@@ -155,11 +155,11 @@ class MapTest : public ClientLibraryTestBase {
// z {R0F32} ---------------/
XlaComputation CreateTernaryAdder() {
XlaBuilder mapped_builder("TernaryAdder");
- auto x = mapped_builder.Parameter(0, ShapeUtil::MakeShape(F32, {}), "x");
- auto y = mapped_builder.Parameter(1, ShapeUtil::MakeShape(F32, {}), "y");
- auto z = mapped_builder.Parameter(2, ShapeUtil::MakeShape(F32, {}), "z");
- auto xy = mapped_builder.Add(x, y);
- mapped_builder.Add(xy, z);
+ auto x = Parameter(&mapped_builder, 0, ShapeUtil::MakeShape(F32, {}), "x");
+ auto y = Parameter(&mapped_builder, 1, ShapeUtil::MakeShape(F32, {}), "y");
+ auto z = Parameter(&mapped_builder, 2, ShapeUtil::MakeShape(F32, {}), "z");
+ auto xy = Add(x, y);
+ Add(xy, z);
auto computation_status = mapped_builder.Build();
TF_CHECK_OK(computation_status.status());
return computation_status.ConsumeValueOrDie();
@@ -169,12 +169,12 @@ class MapTest : public ClientLibraryTestBase {
TEST_F(MapTest, MapEachElemPlusOneR0) {
// Applies lambda (x) (+ x 1)) to an input scalar.
XlaBuilder builder(TestName());
- std::unique_ptr<Literal> param0_literal = Literal::CreateR0<float>(42.0);
+ std::unique_ptr<Literal> param0_literal = LiteralUtil::CreateR0<float>(42.0);
std::unique_ptr<GlobalData> param0_data =
client_->TransferToServer(*param0_literal).ConsumeValueOrDie();
- auto param = builder.Parameter(0, param0_literal->shape(), "param0");
- builder.Map({param}, CreateAdderToOne(), {});
+ auto param = Parameter(&builder, 0, param0_literal->shape(), "param0");
+ Map(&builder, {param}, CreateAdderToOne(), {});
ComputeAndCompareR0<float>(&builder, 43.0, {param0_data.get()},
ErrorSpec(0.01f));
@@ -183,12 +183,12 @@ TEST_F(MapTest, MapEachElemPlusOneR0) {
XLA_TEST_F(MapTest, MapEachElemPlusOneR1S0) {
// Maps (lambda (x) (+ x 1)) onto an input R1F32 vector of length 0.
XlaBuilder builder(TestName());
- std::unique_ptr<Literal> param0_literal = Literal::CreateR1<float>({});
+ std::unique_ptr<Literal> param0_literal = LiteralUtil::CreateR1<float>({});
std::unique_ptr<GlobalData> param0_data =
client_->TransferToServer(*param0_literal).ConsumeValueOrDie();
- auto param = builder.Parameter(0, param0_literal->shape(), "param0");
- builder.Map({param}, CreateAdderToOne(), {0});
+ auto param = Parameter(&builder, 0, param0_literal->shape(), "param0");
+ Map(&builder, {param}, CreateAdderToOne(), {0});
ComputeAndCompareR1<float>(&builder, {}, {param0_data.get()},
ErrorSpec(0.01f));
@@ -198,12 +198,12 @@ TEST_F(MapTest, MapEachElemPlusOneR1S4) {
// Maps (lambda (x) (+ x 1)) onto an input R1F32 vector of length 4.
XlaBuilder builder(TestName());
std::unique_ptr<Literal> param0_literal =
- Literal::CreateR1<float>({2.2f, 3.3f, 4.4f, 5.5f});
+ LiteralUtil::CreateR1<float>({2.2f, 3.3f, 4.4f, 5.5f});
std::unique_ptr<GlobalData> param0_data =
client_->TransferToServer(*param0_literal).ConsumeValueOrDie();
- auto param = builder.Parameter(0, param0_literal->shape(), "param0");
- builder.Map({param}, CreateAdderToOne(), {0});
+ auto param = Parameter(&builder, 0, param0_literal->shape(), "param0");
+ Map(&builder, {param}, CreateAdderToOne(), {0});
ComputeAndCompareR1<float>(&builder, {3.2f, 4.3f, 5.4f, 6.5f},
{param0_data.get()}, ErrorSpec(0.01f));
@@ -212,12 +212,12 @@ TEST_F(MapTest, MapEachElemPlusOneR1S4) {
TEST_F(MapTest, MapEachF32ElementToS32Constant) {
XlaBuilder builder(TestName());
std::unique_ptr<Literal> param0_literal =
- Literal::CreateR1<float>({2.2f, 3.3f, 4.4f, 5.5f});
+ LiteralUtil::CreateR1<float>({2.2f, 3.3f, 4.4f, 5.5f});
std::unique_ptr<GlobalData> param0_data =
client_->TransferToServer(*param0_literal).ConsumeValueOrDie();
- auto param = builder.Parameter(0, param0_literal->shape(), "param0");
- builder.Map({param}, CreateScalarOne<int32>(), {0});
+ auto param = Parameter(&builder, 0, param0_literal->shape(), "param0");
+ Map(&builder, {param}, CreateScalarOne<int32>(), {0});
ComputeAndCompareR1<int32>(&builder, {1, 1, 1, 1}, {param0_data.get()});
}
@@ -225,12 +225,12 @@ TEST_F(MapTest, MapEachF32ElementToS32Constant) {
TEST_F(MapTest, MapEachF32ElementToU32Constant) {
XlaBuilder builder(TestName());
std::unique_ptr<Literal> param0_literal =
- Literal::CreateR1<float>({2.2f, 3.3f, 4.4f, 5.5f});
+ LiteralUtil::CreateR1<float>({2.2f, 3.3f, 4.4f, 5.5f});
std::unique_ptr<GlobalData> param0_data =
client_->TransferToServer(*param0_literal).ConsumeValueOrDie();
- auto param = builder.Parameter(0, param0_literal->shape(), "param0");
- builder.Map({param}, CreateScalarOne<uint32>(), {0});
+ auto param = Parameter(&builder, 0, param0_literal->shape(), "param0");
+ Map(&builder, {param}, CreateScalarOne<uint32>(), {0});
ComputeAndCompareR1<uint32>(&builder, {1, 1, 1, 1}, {param0_data.get()});
}
@@ -239,12 +239,12 @@ TEST_F(MapTest, MapEachElemLongerChainR1) {
// Maps (lambda (x) (* (+ x 1) x)) onto an input R1F32 vector.
XlaBuilder builder(TestName());
std::unique_ptr<Literal> param0_literal =
- Literal::CreateR1<float>({2.6f, -5.1f, 0.1f, 0.2f, 999.0f, 255.5f});
+ LiteralUtil::CreateR1<float>({2.6f, -5.1f, 0.1f, 0.2f, 999.0f, 255.5f});
std::unique_ptr<GlobalData> param0_data =
client_->TransferToServer(*param0_literal).ConsumeValueOrDie();
- auto param = builder.Parameter(0, param0_literal->shape(), "param0");
- builder.Map({param}, CreateAdderToOneTimesItself(), {0});
+ auto param = Parameter(&builder, 0, param0_literal->shape(), "param0");
+ Map(&builder, {param}, CreateAdderToOneTimesItself(), {0});
ComputeAndCompareR1<float>(
&builder, {9.36f, 20.91f, 0.11f, 0.24f, 999000.0f, 65535.75f},
@@ -255,13 +255,13 @@ XLA_TEST_F(MapTest, MapMultipleMapsR1S0) {
// Maps (lambda (x) (+ x 1)) onto an input R1F32 vector of length 0, and then
// maps (lambda (x) (* x 2)) on the result.
XlaBuilder builder(TestName());
- std::unique_ptr<Literal> param0_literal = Literal::CreateR1<float>({});
+ std::unique_ptr<Literal> param0_literal = LiteralUtil::CreateR1<float>({});
std::unique_ptr<GlobalData> param0_data =
client_->TransferToServer(*param0_literal).ConsumeValueOrDie();
- auto param = builder.Parameter(0, param0_literal->shape(), "param0");
- auto map1 = builder.Map({param}, CreateAdderToOne(), {0});
- builder.Map({map1}, CreateMulByTwo(), {0});
+ auto param = Parameter(&builder, 0, param0_literal->shape(), "param0");
+ auto map1 = Map(&builder, {param}, CreateAdderToOne(), {0});
+ Map(&builder, {map1}, CreateMulByTwo(), {0});
ComputeAndCompareR1<float>(&builder, {}, {param0_data.get()},
ErrorSpec(0.01f));
@@ -272,13 +272,13 @@ TEST_F(MapTest, MapMultipleMapsR1S4) {
// maps (lambda (x) (* x 2)) on the result.
XlaBuilder builder(TestName());
std::unique_ptr<Literal> param0_literal =
- Literal::CreateR1<float>({2.2f, 3.3f, 4.4f, 5.5f});
+ LiteralUtil::CreateR1<float>({2.2f, 3.3f, 4.4f, 5.5f});
std::unique_ptr<GlobalData> param0_data =
client_->TransferToServer(*param0_literal).ConsumeValueOrDie();
- auto param = builder.Parameter(0, param0_literal->shape(), "param0");
- auto map1 = builder.Map({param}, CreateAdderToOne(), {0});
- builder.Map({map1}, CreateMulByTwo(), {0});
+ auto param = Parameter(&builder, 0, param0_literal->shape(), "param0");
+ auto map1 = Map(&builder, {param}, CreateAdderToOne(), {0});
+ Map(&builder, {map1}, CreateMulByTwo(), {0});
ComputeAndCompareR1<float>(&builder, {6.4f, 8.6f, 10.8f, 13.0f},
{param0_data.get()}, ErrorSpec(0.01f));
@@ -287,13 +287,13 @@ TEST_F(MapTest, MapMultipleMapsR1S4) {
TEST_F(MapTest, MapEachElemPlusOneR2) {
// Maps (lambda (x) (+ x 1)) onto an input R2F32 vector.
XlaBuilder builder(TestName());
- std::unique_ptr<Literal> param0_literal = Literal::CreateR2<float>(
+ std::unique_ptr<Literal> param0_literal = LiteralUtil::CreateR2<float>(
{{13.25f, 14.0f}, {-7.1f, -7.2f}, {-8.8f, 8.8f}});
std::unique_ptr<GlobalData> param0_data =
client_->TransferToServer(*param0_literal).ConsumeValueOrDie();
- auto param = builder.Parameter(0, param0_literal->shape(), "param0");
- builder.Map({param}, CreateAdderToOne(), {0, 1});
+ auto param = Parameter(&builder, 0, param0_literal->shape(), "param0");
+ Map(&builder, {param}, CreateAdderToOne(), {0, 1});
Array2D<float> expected_array(
{{14.25f, 15.0f}, {-6.1f, -6.2f}, {-7.8f, 9.8f}});
@@ -319,10 +319,10 @@ XLA_TEST_F(MapTest, ComplexNestedMaps) {
auto embed3 = CreateMapPlusN(embed1, 4.0);
XlaBuilder embed4_builder("embed4");
- auto embed4_param = embed4_builder.Parameter(0, scalar_shape, "x");
- auto embed4_map_lhs = embed4_builder.Map({embed4_param}, embed2, {});
- auto embed4_map_rhs = embed4_builder.Map({embed4_param}, embed3, {});
- embed4_builder.Add(embed4_map_lhs, embed4_map_rhs);
+ auto embed4_param = Parameter(&embed4_builder, 0, scalar_shape, "x");
+ auto embed4_map_lhs = Map(&embed4_builder, {embed4_param}, embed2, {});
+ auto embed4_map_rhs = Map(&embed4_builder, {embed4_param}, embed3, {});
+ Add(embed4_map_lhs, embed4_map_rhs);
auto embed4_status = embed4_builder.Build();
ASSERT_IS_OK(embed4_status.status());
auto embed4 = embed4_status.ConsumeValueOrDie();
@@ -330,11 +330,11 @@ XLA_TEST_F(MapTest, ComplexNestedMaps) {
auto embed5 = CreateMapPlusN(embed2, 6.0);
XlaBuilder builder(TestName());
- auto constant_42 = builder.ConstantR0<float>(42.0);
- auto constant_7 = builder.ConstantR0<float>(7.0);
- auto map_42 = builder.Map({constant_42}, embed5, {});
- auto map_7 = builder.Map({constant_7}, embed4, {});
- builder.Add(map_42, map_7);
+ auto constant_42 = ConstantR0<float>(&builder, 42.0);
+ auto constant_7 = ConstantR0<float>(&builder, 7.0);
+ auto map_42 = Map(&builder, {constant_42}, embed5, {});
+ auto map_7 = Map(&builder, {constant_7}, embed4, {});
+ Add(map_42, map_7);
ComputeAndCompareR0<float>(&builder, 73.0, {}, ErrorSpec(0.01f));
}
@@ -343,17 +343,18 @@ TEST_F(MapTest, MapBinaryAdder) {
// Maps (lambda (x y) (+ x y)) onto two R1F32 vectors.
XlaBuilder builder(TestName());
std::unique_ptr<Literal> param0_literal =
- Literal::CreateR1<float>({2.2f, 3.3f, 4.4f, 5.5f});
+ LiteralUtil::CreateR1<float>({2.2f, 3.3f, 4.4f, 5.5f});
std::unique_ptr<GlobalData> param0_data =
client_->TransferToServer(*param0_literal).ConsumeValueOrDie();
std::unique_ptr<Literal> param1_literal =
- Literal::CreateR1<float>({5.1f, 4.4f, -0.1f, -5.5f});
+ LiteralUtil::CreateR1<float>({5.1f, 4.4f, -0.1f, -5.5f});
std::unique_ptr<GlobalData> param1_data =
client_->TransferToServer(*param1_literal).ConsumeValueOrDie();
- auto param0 = builder.Parameter(0, param0_literal->shape(), "param0");
- auto param1 = builder.Parameter(1, param1_literal->shape(), "param1");
- builder.Map({param0, param1}, CreateScalarAddComputation(F32, &builder), {0});
+ auto param0 = Parameter(&builder, 0, param0_literal->shape(), "param0");
+ auto param1 = Parameter(&builder, 1, param1_literal->shape(), "param1");
+ Map(&builder, {param0, param1}, CreateScalarAddComputation(F32, &builder),
+ {0});
ComputeAndCompareR1<float>(&builder, {7.3f, 7.7, 4.3f, 0},
{param0_data.get(), param1_data.get()},
@@ -364,20 +365,20 @@ TEST_F(MapTest, MapBinaryAdder) {
// for Map that used to fail in shape inference (b/28989438).
XLA_TEST_F(MapTest, AddWithMixedLayouts) {
XlaBuilder builder(TestName());
- std::unique_ptr<Literal> param0_literal = Literal::CreateR2WithLayout(
+ std::unique_ptr<Literal> param0_literal = LiteralUtil::CreateR2WithLayout(
{{1, 2}, {3, 4}}, LayoutUtil::MakeLayout({1, 0}));
std::unique_ptr<GlobalData> param0_data =
client_->TransferToServer(*param0_literal).ConsumeValueOrDie();
- std::unique_ptr<Literal> param1_literal = Literal::CreateR2WithLayout(
+ std::unique_ptr<Literal> param1_literal = LiteralUtil::CreateR2WithLayout(
{{10, 20}, {30, 40}}, LayoutUtil::MakeLayout({0, 1}));
std::unique_ptr<GlobalData> param1_data =
client_->TransferToServer(*param1_literal).ConsumeValueOrDie();
- auto param0 = builder.Parameter(0, param0_literal->shape(), "param0");
- auto param1 = builder.Parameter(1, param1_literal->shape(), "param1");
- builder.Map({param0, param1}, CreateScalarAddComputation(S32, &builder),
- {0, 1});
+ auto param0 = Parameter(&builder, 0, param0_literal->shape(), "param0");
+ auto param1 = Parameter(&builder, 1, param1_literal->shape(), "param1");
+ Map(&builder, {param0, param1}, CreateScalarAddComputation(S32, &builder),
+ {0, 1});
Array2D<int32> expected(2, 2);
expected(0, 0) = 11;
@@ -391,19 +392,19 @@ XLA_TEST_F(MapTest, AddWithMixedLayouts) {
XLA_TEST_F(MapTest, AddR3_3x0x2) {
XlaBuilder builder(TestName());
std::unique_ptr<Literal> param0_literal =
- Literal::CreateR3FromArray3D<int32>(Array3D<int32>(3, 0, 2));
+ LiteralUtil::CreateR3FromArray3D<int32>(Array3D<int32>(3, 0, 2));
std::unique_ptr<GlobalData> param0_data =
client_->TransferToServer(*param0_literal).ConsumeValueOrDie();
std::unique_ptr<Literal> param1_literal =
- Literal::CreateR3FromArray3D<int32>(Array3D<int32>(3, 0, 2));
+ LiteralUtil::CreateR3FromArray3D<int32>(Array3D<int32>(3, 0, 2));
std::unique_ptr<GlobalData> param1_data =
client_->TransferToServer(*param1_literal).ConsumeValueOrDie();
- auto param0 = builder.Parameter(0, param0_literal->shape(), "param0");
- auto param1 = builder.Parameter(1, param1_literal->shape(), "param1");
- builder.Map({param0, param1}, CreateScalarAddComputation(S32, &builder),
- {0, 1, 2});
+ auto param0 = Parameter(&builder, 0, param0_literal->shape(), "param0");
+ auto param1 = Parameter(&builder, 1, param1_literal->shape(), "param1");
+ Map(&builder, {param0, param1}, CreateScalarAddComputation(S32, &builder),
+ {0, 1, 2});
ComputeAndCompareR3<int32>(&builder, Array3D<int32>(3, 0, 2),
{param0_data.get(), param1_data.get()});
@@ -413,22 +414,22 @@ TEST_F(MapTest, MapTernaryAdder) {
// Maps (lambda (x y z) (+ x y z)) onto three R1F32 vectors.
XlaBuilder builder(TestName());
std::unique_ptr<Literal> param0_literal =
- Literal::CreateR1<float>({2.2f, 3.3f, 4.4f, 5.5f});
+ LiteralUtil::CreateR1<float>({2.2f, 3.3f, 4.4f, 5.5f});
std::unique_ptr<GlobalData> param0_data =
client_->TransferToServer(*param0_literal).ConsumeValueOrDie();
std::unique_ptr<Literal> param1_literal =
- Literal::CreateR1<float>({5.1f, 4.4f, -0.1f, -5.5f});
+ LiteralUtil::CreateR1<float>({5.1f, 4.4f, -0.1f, -5.5f});
std::unique_ptr<GlobalData> param1_data =
client_->TransferToServer(*param1_literal).ConsumeValueOrDie();
std::unique_ptr<Literal> param2_literal =
- Literal::CreateR1<float>({-10.0f, -100.0f, -900.0f, -400.0f});
+ LiteralUtil::CreateR1<float>({-10.0f, -100.0f, -900.0f, -400.0f});
std::unique_ptr<GlobalData> param2_data =
client_->TransferToServer(*param2_literal).ConsumeValueOrDie();
- auto param0 = builder.Parameter(0, param0_literal->shape(), "param0");
- auto param1 = builder.Parameter(1, param1_literal->shape(), "param1");
- auto param2 = builder.Parameter(2, param2_literal->shape(), "param2");
- builder.Map({param0, param1, param2}, CreateTernaryAdder(), {0});
+ auto param0 = Parameter(&builder, 0, param0_literal->shape(), "param0");
+ auto param1 = Parameter(&builder, 1, param1_literal->shape(), "param1");
+ auto param2 = Parameter(&builder, 2, param2_literal->shape(), "param2");
+ Map(&builder, {param0, param1, param2}, CreateTernaryAdder(), {0});
ComputeAndCompareR1<float>(
&builder, {-2.7f, -92.3f, -895.7f, -400.0f},
@@ -440,7 +441,8 @@ TEST_F(MapTest, MapGt) {
// Maps (x,y) -> x > y onto two R1F32 vectors.
XlaBuilder b(TestName());
auto gt = CreateGt();
- b.Map({b.ConstantR1<float>({1, 20}), b.ConstantR1<float>({10, 2})}, gt, {0});
+ Map(&b, {ConstantR1<float>(&b, {1, 20}), ConstantR1<float>(&b, {10, 2})}, gt,
+ {0});
ComputeAndCompareR1<bool>(&b, {false, true}, {});
}
@@ -449,15 +451,15 @@ TEST_F(MapTest, NestedBinaryMap) {
{
// max_with_square(x) = do max(x, x^2) via a map.
XlaBuilder b("max_with_square");
- auto x = b.Parameter(0, ShapeUtil::MakeShape(F32, {}), "x");
- b.Map({x, b.Mul(x, x)}, CreateMax(), {});
+ auto x = Parameter(&b, 0, ShapeUtil::MakeShape(F32, {}), "x");
+ Map(&b, {x, Mul(x, x)}, CreateMax(), {});
auto computation_status = b.Build();
ASSERT_IS_OK(computation_status.status());
max_with_square = computation_status.ConsumeValueOrDie();
}
XlaBuilder b(TestName());
- auto input = b.ConstantR1<float>({0.1f, 0.5f, -0.5f, 1.0f, 2.0f});
- b.Map({input}, max_with_square, {0});
+ auto input = ConstantR1<float>(&b, {0.1f, 0.5f, -0.5f, 1.0f, 2.0f});
+ Map(&b, {input}, max_with_square, {0});
ComputeAndCompareR1<float>(&b, {0.1f, 0.5f, 0.25f, 1.0f, 4.0f}, {});
}
@@ -468,23 +470,23 @@ TEST_F(MapTest, MapOperantionWithBuildError) {
XlaBuilder builder(TestName());
auto sub_builder = builder.CreateSubBuilder("ErrorAdd");
- auto x = sub_builder->Parameter(0, ShapeUtil::MakeShape(F32, {}), "x");
- auto y = sub_builder->Parameter(1, ShapeUtil::MakeShape(U16, {}), "y");
- sub_builder->Add(x, y);
+ auto x = Parameter(sub_builder.get(), 0, ShapeUtil::MakeShape(F32, {}), "x");
+ auto y = Parameter(sub_builder.get(), 1, ShapeUtil::MakeShape(U16, {}), "y");
+ Add(x, y);
auto error_add = sub_builder->BuildAndNoteError();
std::unique_ptr<Literal> param0_literal =
- Literal::CreateR1<float>({2.2f, 3.3f, 4.4f, 5.5f});
+ LiteralUtil::CreateR1<float>({2.2f, 3.3f, 4.4f, 5.5f});
std::unique_ptr<GlobalData> param0_data =
client_->TransferToServer(*param0_literal).ConsumeValueOrDie();
std::unique_ptr<Literal> param1_literal =
- Literal::CreateR1<float>({5.1f, 4.4f, -0.1f, -5.5f});
+ LiteralUtil::CreateR1<float>({5.1f, 4.4f, -0.1f, -5.5f});
std::unique_ptr<GlobalData> param1_data =
client_->TransferToServer(*param1_literal).ConsumeValueOrDie();
- auto param0 = builder.Parameter(0, param0_literal->shape(), "param0");
- auto param1 = builder.Parameter(1, param1_literal->shape(), "param1");
- builder.Map({param0, param1}, error_add, {0});
+ auto param0 = Parameter(&builder, 0, param0_literal->shape(), "param0");
+ auto param1 = Parameter(&builder, 1, param1_literal->shape(), "param1");
+ Map(&builder, {param0, param1}, error_add, {0});
StatusOr<XlaComputation> computation_status = builder.Build();
ASSERT_TRUE(!computation_status.ok());
@@ -506,21 +508,21 @@ TEST_F(MapTestWithFullOpt, MapScalarPower) {
XlaBuilder builder(TestName());
auto sub_builder = builder.CreateSubBuilder("power");
- auto x = sub_builder->Parameter(0, ShapeUtil::MakeShape(F32, {}), "x");
- auto y = sub_builder->Parameter(1, ShapeUtil::MakeShape(F32, {}), "y");
- sub_builder->Pow(x, y);
+ auto x = Parameter(sub_builder.get(), 0, ShapeUtil::MakeShape(F32, {}), "x");
+ auto y = Parameter(sub_builder.get(), 1, ShapeUtil::MakeShape(F32, {}), "y");
+ Pow(x, y);
auto power = sub_builder->BuildAndNoteError();
- std::unique_ptr<Literal> param0_literal = Literal::CreateR0<float>(2.0f);
- std::unique_ptr<Literal> param1_literal = Literal::CreateR0<float>(5.0f);
+ std::unique_ptr<Literal> param0_literal = LiteralUtil::CreateR0<float>(2.0f);
+ std::unique_ptr<Literal> param1_literal = LiteralUtil::CreateR0<float>(5.0f);
std::unique_ptr<GlobalData> param0_data =
client_->TransferToServer(*param0_literal).ConsumeValueOrDie();
std::unique_ptr<GlobalData> param1_data =
client_->TransferToServer(*param1_literal).ConsumeValueOrDie();
- auto param0 = builder.Parameter(0, param0_literal->shape(), "param0");
- auto param1 = builder.Parameter(1, param1_literal->shape(), "param1");
- builder.Map({param0, param1}, power, {});
+ auto param0 = Parameter(&builder, 0, param0_literal->shape(), "param0");
+ auto param1 = Parameter(&builder, 1, param1_literal->shape(), "param1");
+ Map(&builder, {param0, param1}, power, {});
ComputeAndCompareR0<float>(&builder, 32.0f,
{param0_data.get(), param1_data.get()},
@@ -533,21 +535,21 @@ TEST_F(MapTestWithFullOpt, MapSubtractOppositeOrder) {
XlaBuilder builder(TestName());
auto sub_builder = builder.CreateSubBuilder("power");
- auto x = sub_builder->Parameter(0, ShapeUtil::MakeShape(F32, {}), "x");
- auto y = sub_builder->Parameter(1, ShapeUtil::MakeShape(F32, {}), "y");
- sub_builder->Sub(y, x); // note that this is y - x, not x - y
+ auto x = Parameter(sub_builder.get(), 0, ShapeUtil::MakeShape(F32, {}), "x");
+ auto y = Parameter(sub_builder.get(), 1, ShapeUtil::MakeShape(F32, {}), "y");
+ Sub(y, x); // note that this is y - x, not x - y
auto sub_opposite = sub_builder->BuildAndNoteError();
- std::unique_ptr<Literal> param0_literal = Literal::CreateR0<float>(2.0f);
- std::unique_ptr<Literal> param1_literal = Literal::CreateR0<float>(5.0f);
+ std::unique_ptr<Literal> param0_literal = LiteralUtil::CreateR0<float>(2.0f);
+ std::unique_ptr<Literal> param1_literal = LiteralUtil::CreateR0<float>(5.0f);
std::unique_ptr<GlobalData> param0_data =
client_->TransferToServer(*param0_literal).ConsumeValueOrDie();
std::unique_ptr<GlobalData> param1_data =
client_->TransferToServer(*param1_literal).ConsumeValueOrDie();
- auto param0 = builder.Parameter(0, param0_literal->shape(), "param0");
- auto param1 = builder.Parameter(1, param1_literal->shape(), "param1");
- builder.Map({param0, param1}, sub_opposite, {});
+ auto param0 = Parameter(&builder, 0, param0_literal->shape(), "param0");
+ auto param1 = Parameter(&builder, 1, param1_literal->shape(), "param1");
+ Map(&builder, {param0, param1}, sub_opposite, {});
ComputeAndCompareR0<float>(
&builder, 3.0f, {param0_data.get(), param1_data.get()}, ErrorSpec(0.01f));
@@ -559,16 +561,16 @@ TEST_F(MapTestWithFullOpt, MapSquare) {
XlaBuilder builder(TestName());
auto sub_builder = builder.CreateSubBuilder("power");
- auto x = sub_builder->Parameter(0, ShapeUtil::MakeShape(F32, {}), "x");
- sub_builder->Mul(x, x);
+ auto x = Parameter(sub_builder.get(), 0, ShapeUtil::MakeShape(F32, {}), "x");
+ Mul(x, x);
auto square = sub_builder->BuildAndNoteError();
- std::unique_ptr<Literal> param0_literal = Literal::CreateR0<float>(10.0f);
+ std::unique_ptr<Literal> param0_literal = LiteralUtil::CreateR0<float>(10.0f);
std::unique_ptr<GlobalData> param0_data =
client_->TransferToServer(*param0_literal).ConsumeValueOrDie();
- auto param0 = builder.Parameter(0, param0_literal->shape(), "param0");
- builder.Map({param0}, square, {});
+ auto param0 = Parameter(&builder, 0, param0_literal->shape(), "param0");
+ Map(&builder, {param0}, square, {});
ComputeAndCompareR0<float>(&builder, 100.0f, {param0_data.get()},
ErrorSpec(0.01f));
diff --git a/tensorflow/compiler/xla/tests/matrix_ops_simple_test.cc b/tensorflow/compiler/xla/tests/matrix_ops_simple_test.cc
index 27fd36e06a..069b8a881f 100644
--- a/tensorflow/compiler/xla/tests/matrix_ops_simple_test.cc
+++ b/tensorflow/compiler/xla/tests/matrix_ops_simple_test.cc
@@ -21,7 +21,7 @@ limitations under the License.
#include "tensorflow/compiler/xla/client/local_client.h"
#include "tensorflow/compiler/xla/client/xla_client/xla_builder.h"
#include "tensorflow/compiler/xla/client/xla_client/xla_computation.h"
-#include "tensorflow/compiler/xla/literal_util.h"
+#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/compiler/xla/ptr_util.h"
#include "tensorflow/compiler/xla/reference_util.h"
#include "tensorflow/compiler/xla/shape_util.h"
@@ -56,15 +56,15 @@ TYPED_TEST_CASE(MatOpsSimpleTest_F16F32, TypesF16F32);
XLA_TYPED_TEST(MatOpsSimpleTest_F16F32, ExpTwoByTwoValues) {
using T = TypeParam;
XlaBuilder builder("exp_2x2");
- auto data = builder.ConstantR2FromArray2D<T>({
- {1.0f, 0.0f}, // row 0
- {-1.0f, 0.5f}, // row 1
- });
- builder.Exp(data);
+ auto data = ConstantR2FromArray2D<T>(&builder, {
+ {1.0f, 0.0f}, // row 0
+ {-1.0f, 0.5f}, // row 1
+ });
+ Exp(data);
std::unique_ptr<Literal> expected =
- Literal::CreateR2FromArray2D<T>({{2.71828f, 1.00000f}, // row 0
- {0.36788f, 1.64872f}}); // row 1
+ LiteralUtil::CreateR2FromArray2D<T>({{2.71828f, 1.00000f}, // row 0
+ {0.36788f, 1.64872f}}); // row 1
this->ComputeAndCompareLiteral(&builder, *expected, {}, ErrorSpec(1e-5));
}
@@ -76,43 +76,43 @@ XLA_TYPED_TEST(MatOpsSimpleTest_F16F32, MapTwoByTwo) {
// add_half(x) = x + 0.5
XlaBuilder builder("add_half");
auto x_value =
- builder.Parameter(0, ShapeUtil::MakeShapeWithType<T>({}), "x_value");
- auto half = builder.ConstantR0<T>(static_cast<T>(0.5));
- builder.Add(x_value, half);
+ Parameter(&builder, 0, ShapeUtil::MakeShapeWithType<T>({}), "x_value");
+ auto half = ConstantR0<T>(&builder, static_cast<T>(0.5));
+ Add(x_value, half);
auto computation_status = builder.Build();
ASSERT_IS_OK(computation_status.status());
add_half = computation_status.ConsumeValueOrDie();
}
XlaBuilder builder("map_2x2");
- auto data = builder.ConstantR2FromArray2D<T>({
- {1.0f, 0.0f}, // row 0
- {-1.0f, 0.5f}, // row 1
- });
- auto map = builder.Map({data}, add_half, {0, 1});
+ auto data = ConstantR2FromArray2D<T>(&builder, {
+ {1.0f, 0.0f}, // row 0
+ {-1.0f, 0.5f}, // row 1
+ });
+ Map(&builder, {data}, add_half, {0, 1});
std::unique_ptr<Literal> expected =
- Literal::CreateR2FromArray2D<T>({{1.5f, 0.5f}, // row 0
- {-0.5f, 1.0f}}); // row 1
+ LiteralUtil::CreateR2FromArray2D<T>({{1.5f, 0.5f}, // row 0
+ {-0.5f, 1.0f}}); // row 1
this->ComputeAndCompareLiteral(&builder, *expected, {}, ErrorSpec(1e-5));
}
XLA_TYPED_TEST(MatOpsSimpleTest_F16F32, MaxTwoByTwoValues) {
using T = TypeParam;
XlaBuilder builder("max_2x2");
- auto lhs = builder.ConstantR2FromArray2D<T>({
- {7.0f, 2.0f}, // row 0
- {3.0f, -4.0f}, // row 1
- });
- auto rhs = builder.ConstantR2FromArray2D<T>({
- {5.0f, 6.0f}, // row 0
- {1.0f, -8.0f}, // row 1
- });
- auto max = builder.Max(lhs, rhs);
+ auto lhs = ConstantR2FromArray2D<T>(&builder, {
+ {7.0f, 2.0f}, // row 0
+ {3.0f, -4.0f}, // row 1
+ });
+ auto rhs = ConstantR2FromArray2D<T>(&builder, {
+ {5.0f, 6.0f}, // row 0
+ {1.0f, -8.0f}, // row 1
+ });
+ Max(lhs, rhs);
std::unique_ptr<Literal> expected =
- Literal::CreateR2FromArray2D<T>({{7.0f, 6.0f}, // row 0
- {3.0f, -4.0f}}); // row 1
+ LiteralUtil::CreateR2FromArray2D<T>({{7.0f, 6.0f}, // row 0
+ {3.0f, -4.0f}}); // row 1
this->ComputeAndCompareLiteral(&builder, *expected, {}, ErrorSpec(1e-6));
}
@@ -137,9 +137,9 @@ class TestLinspaceMaxParametric
XlaBuilder builder(
tensorflow::strings::Printf("max_%lldx%lld_linspace", rows, cols));
- auto lhs = builder.ConstantR2FromArray2D<T>(*alhs);
- auto rhs = builder.ConstantR2FromArray2D<T>(*arhs);
- auto max = builder.Max(lhs, rhs);
+ auto lhs = ConstantR2FromArray2D<T>(&builder, *alhs);
+ auto rhs = ConstantR2FromArray2D<T>(&builder, *arhs);
+ Max(lhs, rhs);
Array2D<T> expected(rows, cols);
for (int row = 0; row < rows; ++row) {
@@ -200,31 +200,33 @@ class MatOpsDotAddTest
TF_ASSERT_OK_AND_ASSIGN(
auto lhs_handle,
- client_->TransferToServer(*Literal::CreateR2FromArray2DWithLayout<T>(
- lhs, LayoutUtil::MakeLayout(minor_to_major(row_major)))));
+ client_->TransferToServer(
+ *LiteralUtil::CreateR2FromArray2DWithLayout<T>(
+ lhs, LayoutUtil::MakeLayout(minor_to_major(row_major)))));
TF_ASSERT_OK_AND_ASSIGN(
auto rhs_handle,
- client_->TransferToServer(*Literal::CreateR2FromArray2DWithLayout<T>(
- rhs, LayoutUtil::MakeLayout(minor_to_major(row_major)))));
+ client_->TransferToServer(
+ *LiteralUtil::CreateR2FromArray2DWithLayout<T>(
+ rhs, LayoutUtil::MakeLayout(minor_to_major(row_major)))));
XlaBuilder builder(TestName());
- auto lhs_arg = builder.Parameter(0, lhs_shape, "lhs");
+ auto lhs_arg = Parameter(&builder, 0, lhs_shape, "lhs");
auto lhs_mat_arg = lhs_arg;
if (transpose) {
- lhs_mat_arg = builder.Transpose(lhs_mat_arg, {1, 0});
+ lhs_mat_arg = Transpose(lhs_mat_arg, {1, 0});
}
- auto rhs_arg = builder.Parameter(1, rhs_shape, "rhs");
- auto result = builder.Dot(lhs_mat_arg, rhs_arg);
+ auto rhs_arg = Parameter(&builder, 1, rhs_shape, "rhs");
+ auto result = Dot(lhs_mat_arg, rhs_arg);
Array2D<T> expected;
if (add_lhs) {
- result = builder.Add(result, lhs_arg);
+ result = Add(result, lhs_arg);
if (transpose) {
expected = Array2D<T>({{47.0f, 52.0f}, {71.0f, 78.0f}});
} else {
expected = Array2D<T>({{35.0f, 39.0f}, {81.0f, 89.0f}});
}
} else {
- result = builder.Add(result, rhs_arg);
+ result = Add(result, rhs_arg);
if (transpose) {
expected = Array2D<T>({{56.0f, 61.0f}, {80.0f, 87.0f}});
} else {
diff --git a/tensorflow/compiler/xla/tests/multidimensional_slice_test.cc b/tensorflow/compiler/xla/tests/multidimensional_slice_test.cc
index 0791a71aac..e576f000ef 100644
--- a/tensorflow/compiler/xla/tests/multidimensional_slice_test.cc
+++ b/tensorflow/compiler/xla/tests/multidimensional_slice_test.cc
@@ -33,9 +33,10 @@ class SliceTest : public ClientLibraryTestBase {};
XLA_TEST_F(SliceTest, Slice2D) {
XlaBuilder builder("slice_2d");
- auto original = builder.ConstantR2<float>(
+ auto original = ConstantR2<float>(
+ &builder,
{{1.0, 2.0, 3.0}, {4.0, 5.0, 6.0}, {7.0, 8.0, 9.0}, {10.0, 11.0, 12.0}});
- builder.Slice(original, {2, 1}, {4, 3}, {1, 1});
+ Slice(original, {2, 1}, {4, 3}, {1, 1});
Array2D<float> expected({{8.0f, 9.0f}, {11.0f, 12.0f}});
ComputeAndCompareR2<float>(&builder, expected, {}, ErrorSpec(0.000001));
@@ -45,8 +46,8 @@ XLA_TEST_F(SliceTest, Slice3D) {
XlaBuilder builder("slice_3d");
Array3D<float> array_3d(
{{{1.0f, 2.0f}, {3.0f, 4.0f}}, {{5.0f, 6.0f}, {7.0f, 8.0f}}});
- auto original = builder.ConstantR3FromArray3D<float>(array_3d);
- builder.Slice(original, {0, 0, 1}, {2, 1, 2}, {1, 1, 1});
+ auto original = ConstantR3FromArray3D<float>(&builder, array_3d);
+ Slice(original, {0, 0, 1}, {2, 1, 2}, {1, 1, 1});
Array3D<float> expected_3d({{{2.0f}}, {{6.0f}}});
ComputeAndCompareR3<float>(&builder, expected_3d, {}, ErrorSpec(0.000001));
diff --git a/tensorflow/compiler/xla/tests/multioutput_fusion_test.cc b/tensorflow/compiler/xla/tests/multioutput_fusion_test.cc
index a42a19af15..eb06b115da 100644
--- a/tensorflow/compiler/xla/tests/multioutput_fusion_test.cc
+++ b/tensorflow/compiler/xla/tests/multioutput_fusion_test.cc
@@ -20,7 +20,7 @@ limitations under the License.
#include <utility>
#include "tensorflow/compiler/xla/client/local_client.h"
-#include "tensorflow/compiler/xla/literal_util.h"
+#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/compiler/xla/primitive_util.h"
#include "tensorflow/compiler/xla/ptr_util.h"
#include "tensorflow/compiler/xla/service/hlo_computation.h"
@@ -60,7 +60,7 @@ class MultiOutputFusionTest : public HloTestBase {
const Shape elem_shape2 = ShapeUtil::MakeShape(F32, {size, size});
auto const0 = builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<float>(8.0f)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(8.0f)));
auto param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, elem_shape0, "0"));
@@ -105,8 +105,9 @@ class MultiOutputFusionTest : public HloTestBase {
Literal expect(ShapeUtil::MakeShape(F32, {size, size}));
expect.PopulateWithValue<float>(size * 1.5f * 3.5f);
- auto actual = ExecuteAndTransfer(
- std::move(hlo_module), {Literal::CreateR0<float>(-9.0f).get(), &arg1});
+ auto actual =
+ ExecuteAndTransfer(std::move(hlo_module),
+ {LiteralUtil::CreateR0<float>(-9.0f).get(), &arg1});
EXPECT_TRUE(LiteralTestUtil::Near(expect, *actual, error_spec_));
}
@@ -165,7 +166,8 @@ class MultiOutputFusionTest : public HloTestBase {
Literal input1(ShapeUtil::MakeShape(F64, {size}));
input1.PopulateWithValue(1.);
- Literal expect = std::move(*Literal::CreateR1<float>({size * 1.5f * 3.5f}));
+ Literal expect =
+ std::move(*LiteralUtil::CreateR1<float>({size * 1.5f * 3.5f}));
auto actual = ExecuteAndTransfer(std::move(hlo_module), {&input0, &input1});
EXPECT_TRUE(LiteralTestUtil::Near(expect, *actual, error_spec_));
}
@@ -198,16 +200,16 @@ XLA_TEST_F(MultiOutputFusionTest, FusionNodeIsRoot) {
auto module =
HloRunner::CreateModuleFromString(testcase, GetDebugOptionsForTest())
.ValueOrDie();
- auto param = Literal::MakeTupleOwned(
- Literal::MakeTupleOwned(
- Literal::MakeTupleOwned(Literal::CreateR0<int32>(42)),
- Literal::CreateR0<float>(1.0)),
- Literal::MakeTupleOwned(Literal::CreateR0<float>(3.0),
- Literal::CreateR0<int32>(4)));
+ auto param = LiteralUtil::MakeTupleOwned(
+ LiteralUtil::MakeTupleOwned(
+ LiteralUtil::MakeTupleOwned(LiteralUtil::CreateR0<int32>(42)),
+ LiteralUtil::CreateR0<float>(1.0)),
+ LiteralUtil::MakeTupleOwned(LiteralUtil::CreateR0<float>(3.0),
+ LiteralUtil::CreateR0<int32>(4)));
std::unique_ptr<Literal> result =
ExecuteNoHloPasses(std::move(module), {param.get()});
EXPECT_TRUE(LiteralTestUtil::Equal(
- *Literal::MakeTupleOwned(Literal::CreateR0<int32>(42)), *result));
+ *LiteralUtil::MakeTupleOwned(LiteralUtil::CreateR0<int32>(42)), *result));
}
XLA_TEST_F(MultiOutputFusionTest, MultiOutputLoopFusion) {
@@ -232,7 +234,7 @@ XLA_TEST_F(MultiOutputFusionTest, MultiOutputLoopFusion) {
auto module =
HloRunner::CreateModuleFromString(testcase, GetDebugOptionsForTest())
.ValueOrDie();
- auto param = Literal::CreateR1<float>({1.0, 2.0, 3.0, -1.0});
+ auto param = LiteralUtil::CreateR1<float>({1.0, 2.0, 3.0, -1.0});
std::unique_ptr<Literal> result =
ExecuteNoHloPasses(std::move(module), {param.get()});
LiteralTestUtil::ExpectR1Equal<float>({0.0, 4.0, 9.0, 1.0}, *result);
@@ -265,7 +267,7 @@ XLA_TEST_F(MultiOutputFusionTest, MultiOutputLoopFeedingMap) {
auto module =
HloRunner::CreateModuleFromString(testcase, GetDebugOptionsForTest())
.ValueOrDie();
- auto param = Literal::CreateR1<float>({1.0, 2.0, 3.0});
+ auto param = LiteralUtil::CreateR1<float>({1.0, 2.0, 3.0});
std::unique_ptr<Literal> result =
ExecuteNoHloPasses(std::move(module), {param.get()});
LiteralTestUtil::ExpectR1Equal<float>({0.0, 4.0, 9.0}, *result);
@@ -308,12 +310,14 @@ XLA_TEST_F(MultiOutputFusionTest,
auto module =
HloRunner::CreateModuleFromString(testcase, GetDebugOptionsForTest())
.ValueOrDie();
- auto param = Literal::CreateR3<float>({{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}});
+ auto param =
+ LiteralUtil::CreateR3<float>({{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}});
std::unique_ptr<Literal> result =
ExecuteNoHloPasses(std::move(module), {param.get()});
EXPECT_TRUE(LiteralTestUtil::Equal(
- *Literal::MakeTupleOwned(Literal::CreateR2<float>({{3, 7}, {11, 15}}),
- Literal::CreateR2<float>({{5, 16}, {36, 64}})),
+ *LiteralUtil::MakeTupleOwned(
+ LiteralUtil::CreateR2<float>({{3, 7}, {11, 15}}),
+ LiteralUtil::CreateR2<float>({{5, 16}, {36, 64}})),
*result));
}
@@ -338,12 +342,14 @@ XLA_TEST_F(MultiOutputFusionTest,
auto module =
HloRunner::CreateModuleFromString(testcase, GetDebugOptionsForTest())
.ValueOrDie();
- auto param = Literal::CreateR3<float>({{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}});
+ auto param =
+ LiteralUtil::CreateR3<float>({{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}});
std::unique_ptr<Literal> result =
ExecuteNoHloPasses(std::move(module), {param.get()});
EXPECT_TRUE(LiteralTestUtil::Equal(
- *Literal::MakeTupleOwned(Literal::CreateR2<float>({{6, 8}, {10, 12}}),
- Literal::CreateR2<float>({{25, 36}, {49, 64}})),
+ *LiteralUtil::MakeTupleOwned(
+ LiteralUtil::CreateR2<float>({{6, 8}, {10, 12}}),
+ LiteralUtil::CreateR2<float>({{25, 36}, {49, 64}})),
*result));
}
@@ -369,13 +375,14 @@ XLA_TEST_F(MultiOutputFusionTest,
auto module =
HloRunner::CreateModuleFromString(testcase, GetDebugOptionsForTest())
.ValueOrDie();
- auto param = Literal::CreateR3<float>({{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}});
+ auto param =
+ LiteralUtil::CreateR3<float>({{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}});
std::unique_ptr<Literal> result =
ExecuteNoHloPasses(std::move(module), {param.get()});
EXPECT_TRUE(LiteralTestUtil::Equal(
- *Literal::MakeTupleOwned(Literal::CreateR1<float>({14, 22}),
- Literal::CreateR1<float>({36, 64}),
- Literal::CreateR1<float>({66, 138})),
+ *LiteralUtil::MakeTupleOwned(LiteralUtil::CreateR1<float>({14, 22}),
+ LiteralUtil::CreateR1<float>({36, 64}),
+ LiteralUtil::CreateR1<float>({66, 138})),
*result));
}
@@ -401,14 +408,15 @@ XLA_TEST_F(MultiOutputFusionTest,
auto module =
HloRunner::CreateModuleFromString(testcase, GetDebugOptionsForTest())
.ValueOrDie();
- auto param = Literal::CreateR3<float>({{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}});
+ auto param =
+ LiteralUtil::CreateR3<float>({{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}});
std::unique_ptr<Literal> result =
ExecuteNoHloPasses(std::move(module), {param.get()});
EXPECT_TRUE(LiteralTestUtil::Equal(
- *Literal::MakeTupleOwned(
- Literal::CreateR3<float>({{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}}),
- Literal::CreateR2<float>({{3, 7}, {11, 15}}),
- Literal::CreateR2<float>({{5, 16}, {36, 64}})),
+ *LiteralUtil::MakeTupleOwned(
+ LiteralUtil::CreateR3<float>({{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}}),
+ LiteralUtil::CreateR2<float>({{3, 7}, {11, 15}}),
+ LiteralUtil::CreateR2<float>({{5, 16}, {36, 64}})),
*result));
}
@@ -434,14 +442,16 @@ XLA_TEST_F(MultiOutputFusionTest,
auto module =
HloRunner::CreateModuleFromString(testcase, GetDebugOptionsForTest())
.ValueOrDie();
- auto param = Literal::CreateR3<float>({{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}});
+ auto param =
+ LiteralUtil::CreateR3<float>({{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}});
std::unique_ptr<Literal> result =
ExecuteNoHloPasses(std::move(module), {param.get()});
EXPECT_TRUE(LiteralTestUtil::Equal(
- *Literal::MakeTupleOwned(
- Literal::CreateR2<float>({{6, 8}, {10, 12}}),
- Literal::CreateR3<float>({{{1, 4}, {9, 16}}, {{25, 36}, {49, 64}}}),
- Literal::CreateR2<float>({{25, 36}, {49, 64}})),
+ *LiteralUtil::MakeTupleOwned(
+ LiteralUtil::CreateR2<float>({{6, 8}, {10, 12}}),
+ LiteralUtil::CreateR3<float>(
+ {{{1, 4}, {9, 16}}, {{25, 36}, {49, 64}}}),
+ LiteralUtil::CreateR2<float>({{25, 36}, {49, 64}})),
*result));
}
@@ -454,7 +464,8 @@ XLA_TEST_F(MultiOutputFusionTest,
r1 = f32[2]{0} reduce(p0, c0), dimensions={0,2}, to_apply=Add
mul = f32[2,2,2]{2,1,0} multiply(p0, p0)
c1 = f32[] constant(5)
- mul2 = f32[2,2,2]{2,1,0} multiply(p0, c1)
+ b1 = f32[2,2,2]{2,1,0} broadcast(c1), dimensions={}
+ mul2 = f32[2,2,2]{2,1,0} multiply(p0, b1)
ROOT tuple = (f32[2]{0}, f32[2,2,2]{2,1,0}, f32[2,2,2]{2,1,0})
tuple(r1, mul, mul2)
}
@@ -467,14 +478,16 @@ XLA_TEST_F(MultiOutputFusionTest,
auto module =
HloRunner::CreateModuleFromString(testcase, GetDebugOptionsForTest())
.ValueOrDie();
- auto param = Literal::CreateR3<float>({{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}});
+ auto param =
+ LiteralUtil::CreateR3<float>({{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}});
std::unique_ptr<Literal> result =
ExecuteNoHloPasses(std::move(module), {param.get()});
EXPECT_TRUE(LiteralTestUtil::Equal(
- *Literal::MakeTupleOwned(
- Literal::CreateR1<float>({14, 22}),
- Literal::CreateR3<float>({{{1, 4}, {9, 16}}, {{25, 36}, {49, 64}}}),
- Literal::CreateR3<float>(
+ *LiteralUtil::MakeTupleOwned(
+ LiteralUtil::CreateR1<float>({14, 22}),
+ LiteralUtil::CreateR3<float>(
+ {{{1, 4}, {9, 16}}, {{25, 36}, {49, 64}}}),
+ LiteralUtil::CreateR3<float>(
{{{5, 10}, {15, 20}}, {{25, 30}, {35, 40}}})),
*result));
}
@@ -501,15 +514,16 @@ XLA_TEST_F(MultiOutputFusionTest,
auto module =
HloRunner::CreateModuleFromString(testcase, GetDebugOptionsForTest())
.ValueOrDie();
- auto param = Literal::CreateR3<float>({{{0, 2}, {3, 4}}, {{5, 6}, {7, 8}}});
- auto init1 = Literal::CreateR0<float>(5);
- auto init2 = Literal::CreateR0<float>(6);
+ auto param =
+ LiteralUtil::CreateR3<float>({{{0, 2}, {3, 4}}, {{5, 6}, {7, 8}}});
+ auto init1 = LiteralUtil::CreateR0<float>(5);
+ auto init2 = LiteralUtil::CreateR0<float>(6);
std::unique_ptr<Literal> result = ExecuteNoHloPasses(
std::move(module), {param.get(), init1.get(), init2.get()});
EXPECT_TRUE(LiteralTestUtil::Equal(
- *Literal::MakeTupleOwned(
- Literal::CreateR2<float>({{167, 172}, {176, 180}}),
- Literal::CreateR2<float>({{6, 6}, {6, 8}})),
+ *LiteralUtil::MakeTupleOwned(
+ LiteralUtil::CreateR2<float>({{167, 172}, {176, 180}}),
+ LiteralUtil::CreateR2<float>({{6, 6}, {6, 8}})),
*result));
}
@@ -536,19 +550,20 @@ XLA_TEST_F(MultiOutputFusionTest,
auto module =
HloRunner::CreateModuleFromString(testcase, GetDebugOptionsForTest())
.ValueOrDie();
- auto param = Literal::CreateR3<Eigen::half>(
+ auto param = LiteralUtil::CreateR3<Eigen::half>(
{{{Eigen::half(1), Eigen::half(2)}, {Eigen::half(3), Eigen::half(4)}},
{{Eigen::half(5), Eigen::half(6)}, {Eigen::half(7), Eigen::half(8)}}});
std::unique_ptr<Literal> result =
ExecuteNoHloPasses(std::move(module), {param.get()});
EXPECT_TRUE(LiteralTestUtil::Equal(
- *Literal::MakeTupleOwned(
- Literal::CreateR2<float>({{3, 7}, {11, 15}}),
- Literal::CreateR2<float>({{5, 16}, {36, 64}}),
- Literal::CreateR3<Eigen::half>({{{Eigen::half(1), Eigen::half(2)},
- {Eigen::half(3), Eigen::half(4)}},
- {{Eigen::half(5), Eigen::half(6)},
- {Eigen::half(7), Eigen::half(8)}}})),
+ *LiteralUtil::MakeTupleOwned(
+ LiteralUtil::CreateR2<float>({{3, 7}, {11, 15}}),
+ LiteralUtil::CreateR2<float>({{5, 16}, {36, 64}}),
+ LiteralUtil::CreateR3<Eigen::half>(
+ {{{Eigen::half(1), Eigen::half(2)},
+ {Eigen::half(3), Eigen::half(4)}},
+ {{Eigen::half(5), Eigen::half(6)},
+ {Eigen::half(7), Eigen::half(8)}}})),
*result));
}
diff --git a/tensorflow/compiler/xla/tests/pad_test.cc b/tensorflow/compiler/xla/tests/pad_test.cc
index ce295b832d..e428fa9b5e 100644
--- a/tensorflow/compiler/xla/tests/pad_test.cc
+++ b/tensorflow/compiler/xla/tests/pad_test.cc
@@ -93,8 +93,8 @@ XLA_TEST_P(PadTestFloat, Pad1DS0ToS0Array) {
dimension->set_edge_padding_high(0);
dimension->set_interior_padding(0);
- b.Pad(AddParam(*Literal::CreateR1<float>({}), &b),
- AddParam(*Literal::CreateR0<float>(0.1), &b), padding_config);
+ Pad(AddParam(*LiteralUtil::CreateR1<float>({}), &b),
+ AddParam(*LiteralUtil::CreateR0<float>(0.1), &b), padding_config);
ComputeAndCompareR1<float>(&b, {}, {}, DefaultErrorSpec());
}
@@ -108,8 +108,8 @@ XLA_TEST_P(PadTestFloat, Pad1DS0ToS5Array) {
dimension->set_edge_padding_high(4);
dimension->set_interior_padding(7);
- b.Pad(AddParam(*Literal::CreateR1<float>({}), &b),
- AddParam(*Literal::CreateR0<float>(0.1), &b), padding_config);
+ Pad(AddParam(*LiteralUtil::CreateR1<float>({}), &b),
+ AddParam(*LiteralUtil::CreateR0<float>(0.1), &b), padding_config);
ComputeAndCompareR1<float>(&b, std::vector<float>(5, 0.1), {},
DefaultErrorSpec());
}
@@ -123,16 +123,17 @@ XLA_TEST_P(PadTestFloat, Pad1DS3Array) {
dimension->set_edge_padding_high(0);
dimension->set_interior_padding(1);
- b.Pad(AddParam(*Literal::CreateR1<float>({1, 2, 3}), &b),
- AddParam(*Literal::CreateR0<float>(0.1), &b), padding_config);
+ Pad(AddParam(*LiteralUtil::CreateR1<float>({1, 2, 3}), &b),
+ AddParam(*LiteralUtil::CreateR0<float>(0.1), &b), padding_config);
std::vector<float> expected({0.1, 0.1, 0.1, 1, 0.1, 2, 0.1, 3});
ComputeAndCompareR1<float>(&b, expected, {}, DefaultErrorSpec());
}
XLA_TEST_P(PadTestFloat, Pad4D_2x0x3x2_FloatArray) {
XlaBuilder b(TestName());
- b.Pad(AddParam(Array4D<float>(2, 0, 3, 2), &b),
- AddParam(*Literal::CreateR0<float>(1.5), &b), r4_padding_on_dim0_dim1_);
+ Pad(AddParam(Array4D<float>(2, 0, 3, 2), &b),
+ AddParam(*LiteralUtil::CreateR0<float>(1.5), &b),
+ r4_padding_on_dim0_dim1_);
ComputeAndCompareR4<float>(&b, Array4D<float>(5, 2, 3, 2, 1.5f), {},
DefaultErrorSpec());
}
@@ -147,8 +148,8 @@ TEST_P(PadTestFloat, Pad4DFloat_1x1x3x2_Array) {
});
input->FillWithYX(input_xy);
- b.Pad(AddParam(*input, &b), AddParam(*Literal::CreateR0<float>(1.5), &b),
- r4_padding_on_dim0_dim1_);
+ Pad(AddParam(*input, &b), AddParam(*LiteralUtil::CreateR0<float>(1.5), &b),
+ r4_padding_on_dim0_dim1_);
auto expected = MakeUnique<Array4D<float>>(2, 3, 3, 2);
expected->Fill(1.5);
@@ -166,8 +167,9 @@ TEST_P(PadTestFloat, Pad4DFloatArrayWithInteriorPadding) {
const float pad_value = 1.5f;
Array4D<float> input(3, 2, 1, 1, {1, 2, 3, 4, 5, 6});
- b.Pad(AddParam(input, &b), AddParam(*Literal::CreateR0<float>(pad_value), &b),
- r4_padding_on_dim0_dim1_);
+ Pad(AddParam(input, &b),
+ AddParam(*LiteralUtil::CreateR0<float>(pad_value), &b),
+ r4_padding_on_dim0_dim1_);
auto expected = MakeUnique<Array4D<float>>(8, 5, 1, 1);
expected->Fill(pad_value);
@@ -205,11 +207,11 @@ TEST_P(PadTestFloat, Pad4DFloatArrayMinorFirstSmall) {
const float pad_value = -5.123f;
Array4D<float> input_array(1, 1, 2, 3, {1, 2, 3, 4, 5, 6});
- auto input = Literal::CreateR4FromArray4D<float>(input_array);
+ auto input = LiteralUtil::CreateR4FromArray4D<float>(input_array);
input = input->Relayout(layout);
- b.Pad(AddParam(*input, &b),
- AddParam(*Literal::CreateR0<float>(pad_value), &b), padding_config);
+ Pad(AddParam(*input, &b),
+ AddParam(*LiteralUtil::CreateR0<float>(pad_value), &b), padding_config);
Array4D<float> expected_array(1, 1, 5, 8);
expected_array.Fill(pad_value);
@@ -251,11 +253,11 @@ XLA_TEST_P(PadTestFloat, Pad4DFloatArrayMinorFirstNonTrivialMinorDimensions) {
input_array(0, 0, 0, 0) = 1.0f;
input_array(0, 24, 6, 6) = 2.0f;
input_array(0, 17, 2, 5) = 3.0f;
- auto input = Literal::CreateR4FromArray4D<float>(input_array);
+ auto input = LiteralUtil::CreateR4FromArray4D<float>(input_array);
input = input->Relayout(layout);
- b.Pad(AddParam(*input, &b),
- AddParam(*Literal::CreateR0<float>(pad_value), &b), padding_config);
+ Pad(AddParam(*input, &b),
+ AddParam(*LiteralUtil::CreateR0<float>(pad_value), &b), padding_config);
Array4D<float> expected_array(1, 25, 17, 11);
expected_array.Fill(pad_value);
@@ -275,8 +277,8 @@ XLA_TEST_F(PadTest, Pad4DU8Array) {
});
input->FillWithYX(input_xy);
- b.Pad(AddParam(*input, &b), b.ConstantR0<uint8>(35),
- r4_padding_on_dim0_dim1_);
+ Pad(AddParam(*input, &b), ConstantR0<uint8>(&b, 35),
+ r4_padding_on_dim0_dim1_);
auto expected = MakeUnique<Array4D<uint8>>(2, 3, 3, 2);
expected->Fill(35);
@@ -294,16 +296,16 @@ XLA_TEST_F(PadTest, Pad4DPredArray) {
// Since bool is currently not well supported, use Broadcast operation to
// create the operand for Pad.
- auto input = b.Broadcast(b.ConstantR0<bool>(true), {1, 1, 3, 2});
+ auto input = Broadcast(ConstantR0<bool>(&b, true), {1, 1, 3, 2});
auto padded =
- b.Pad(input, b.ConstantR0<bool>(false), r4_padding_on_dim0_dim1_);
+ Pad(input, ConstantR0<bool>(&b, false), r4_padding_on_dim0_dim1_);
// For the same reason, use Select to convert boolean values to int32.
auto zeros = MakeUnique<Array4D<int32>>(2, 3, 3, 2);
auto ones = MakeUnique<Array4D<int32>>(2, 3, 3, 2);
zeros->Fill(0);
ones->Fill(1);
- b.Select(padded, AddParam(*ones, &b), AddParam(*zeros, &b));
+ Select(padded, AddParam(*ones, &b), AddParam(*zeros, &b));
auto expected = MakeUnique<Array4D<int32>>(2, 3, 3, 2);
expected->Fill(0);
@@ -329,7 +331,7 @@ XLA_TEST_P(PadTestFloat, Large2DPad) {
padding_config.mutable_dimensions(dim)->set_edge_padding_high(58 +
100 * dim);
}
- b.Pad(input, AddParam(*Literal::CreateR0<float>(0.0f), &b), padding_config);
+ Pad(input, AddParam(*LiteralUtil::CreateR0<float>(0.0f), &b), padding_config);
auto expected = ReferenceUtil::PadArray2D(*ones, padding_config, 0.0f);
ComputeAndCompareR2<float>(&b, *expected, {}, DefaultErrorSpec());
@@ -351,7 +353,8 @@ XLA_TEST_P(PadTestFloat, AllTypes2DPad) {
padding_config.mutable_dimensions(1)->set_edge_padding_low(6);
padding_config.mutable_dimensions(1)->set_edge_padding_high(4);
padding_config.mutable_dimensions(1)->set_interior_padding(2);
- b.Pad(input, AddParam(*Literal::CreateR0<float>(3.14f), &b), padding_config);
+ Pad(input, AddParam(*LiteralUtil::CreateR0<float>(3.14f), &b),
+ padding_config);
auto expected = ReferenceUtil::PadArray2D(*operand, padding_config, 3.14f);
ComputeAndCompareR2<float>(&b, *expected, {}, DefaultErrorSpec());
@@ -376,7 +379,8 @@ XLA_TEST_P(PadTestFloat, High2DPad) {
padding_config.mutable_dimensions(dim)->set_interior_padding(
interior_padding);
}
- b.Pad(input, AddParam(*Literal::CreateR0<float>(2.718f), &b), padding_config);
+ Pad(input, AddParam(*LiteralUtil::CreateR0<float>(2.718f), &b),
+ padding_config);
auto expected = ReferenceUtil::PadArray2D(*operand, padding_config, 2.718f);
@@ -403,7 +407,8 @@ XLA_TEST_P(PadTestFloat, NegativePadding2D) {
padding_config.mutable_dimensions(dim)->set_interior_padding(
interior_padding);
}
- b.Pad(input, AddParam(*Literal::CreateR0<float>(2.718f), &b), padding_config);
+ Pad(input, AddParam(*LiteralUtil::CreateR0<float>(2.718f), &b),
+ padding_config);
auto expected = ReferenceUtil::PadArray2D(*operand, padding_config, 2.718f);
@@ -430,7 +435,8 @@ XLA_TEST_P(PadTestFloat, NegativeAndInteriorPadding2D) {
padding_config.mutable_dimensions(dim)->set_interior_padding(
interior_padding[dim]);
}
- b.Pad(input, AddParam(*Literal::CreateR0<float>(2.718f), &b), padding_config);
+ Pad(input, AddParam(*LiteralUtil::CreateR0<float>(2.718f), &b),
+ padding_config);
auto expected = ReferenceUtil::PadArray2D(*operand, padding_config, 2.718f);
@@ -446,12 +452,13 @@ XLA_TEST_P(PadTestFloat, ReducePad) {
XlaComputation add = CreateScalarAddComputation(FloatType(), &b);
auto reduce =
- b.Reduce(input, AddParam(*Literal::CreateR0<float>(0.0), &b), add, {0});
+ Reduce(input, AddParam(*LiteralUtil::CreateR0<float>(0.0), &b), add, {0});
PaddingConfig padding_config = MakeNoPaddingConfig(3);
padding_config.mutable_dimensions(0)->set_edge_padding_low(1);
padding_config.mutable_dimensions(0)->set_edge_padding_high(1);
- b.Pad(reduce, AddParam(*Literal::CreateR0<float>(0.0f), &b), padding_config);
+ Pad(reduce, AddParam(*LiteralUtil::CreateR0<float>(0.0f), &b),
+ padding_config);
Array3D<float> expected({{{0.0, 0.0}, {0.0, 0.0}},
{{2.0, 2.0}, {2.0, 2.0}},
diff --git a/tensorflow/compiler/xla/tests/params_test.cc b/tensorflow/compiler/xla/tests/params_test.cc
index 838f1b4e2f..8ba1d11b33 100644
--- a/tensorflow/compiler/xla/tests/params_test.cc
+++ b/tensorflow/compiler/xla/tests/params_test.cc
@@ -24,7 +24,7 @@ limitations under the License.
#include "tensorflow/compiler/xla/client/xla_client/xla_builder.h"
#include "tensorflow/compiler/xla/client/xla_client/xla_computation.h"
#include "tensorflow/compiler/xla/layout_util.h"
-#include "tensorflow/compiler/xla/literal_util.h"
+#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/compiler/xla/shape_util.h"
#include "tensorflow/compiler/xla/statusor.h"
#include "tensorflow/compiler/xla/tests/client_library_test_base.h"
@@ -42,11 +42,12 @@ class ParamsTest : public ClientLibraryTestBase {};
XLA_TEST_F(ParamsTest, ConstantR0F32Param) {
XlaBuilder builder(TestName());
- std::unique_ptr<Literal> param0_literal = Literal::CreateR0<float>(3.14159f);
+ std::unique_ptr<Literal> param0_literal =
+ LiteralUtil::CreateR0<float>(3.14159f);
std::unique_ptr<GlobalData> param0_data =
client_->TransferToServer(*param0_literal).ConsumeValueOrDie();
- auto p = builder.Parameter(0, ShapeUtil::MakeShape(F32, {}), "param0");
+ Parameter(&builder, 0, ShapeUtil::MakeShape(F32, {}), "param0");
ComputeAndCompareR0<float>(&builder, 3.14159f, {param0_data.get()},
ErrorSpec(0.0001f));
@@ -54,11 +55,11 @@ XLA_TEST_F(ParamsTest, ConstantR0F32Param) {
XLA_TEST_F(ParamsTest, ConstantR1S0F32Param) {
XlaBuilder builder(TestName());
- std::unique_ptr<Literal> param0_literal = Literal::CreateR1<float>({});
+ std::unique_ptr<Literal> param0_literal = LiteralUtil::CreateR1<float>({});
std::unique_ptr<GlobalData> param0_data =
client_->TransferToServer(*param0_literal).ConsumeValueOrDie();
- auto p = builder.Parameter(0, ShapeUtil::MakeShape(F32, {0}), "param0");
+ Parameter(&builder, 0, ShapeUtil::MakeShape(F32, {0}), "param0");
ComputeAndCompareR1<float>(&builder, {}, {param0_data.get()},
ErrorSpec(0.01f));
@@ -67,11 +68,11 @@ XLA_TEST_F(ParamsTest, ConstantR1S0F32Param) {
XLA_TEST_F(ParamsTest, ConstantR1S2F32Param) {
XlaBuilder builder(TestName());
std::unique_ptr<Literal> param0_literal =
- Literal::CreateR1<float>({3.14f, -100.25f});
+ LiteralUtil::CreateR1<float>({3.14f, -100.25f});
std::unique_ptr<GlobalData> param0_data =
client_->TransferToServer(*param0_literal).ConsumeValueOrDie();
- auto p = builder.Parameter(0, ShapeUtil::MakeShape(F32, {2}), "param0");
+ Parameter(&builder, 0, ShapeUtil::MakeShape(F32, {2}), "param0");
ComputeAndCompareR1<float>(&builder, {3.14f, -100.25f}, {param0_data.get()},
ErrorSpec(0.01f));
@@ -80,12 +81,13 @@ XLA_TEST_F(ParamsTest, ConstantR1S2F32Param) {
XLA_TEST_F(ParamsTest, ConstantR1U8Param) {
XlaBuilder builder(TestName());
string str("hello world");
- std::unique_ptr<Literal> param0_literal = Literal::CreateR1U8(str);
+ std::unique_ptr<Literal> param0_literal = LiteralUtil::CreateR1U8(str);
std::unique_ptr<GlobalData> param0_data =
client_->TransferToServer(*param0_literal).ConsumeValueOrDie();
- auto p = builder.Parameter(
- 0, ShapeUtil::MakeShape(U8, {static_cast<int64>(str.size())}), "param0");
+ Parameter(&builder, 0,
+ ShapeUtil::MakeShape(U8, {static_cast<int64>(str.size())}),
+ "param0");
ComputeAndCompareR1U8(&builder, str, {param0_data.get()});
}
@@ -93,11 +95,11 @@ XLA_TEST_F(ParamsTest, ConstantR1U8Param) {
XLA_TEST_F(ParamsTest, ConstantR2_3x0_F32Param) {
XlaBuilder builder(TestName());
std::unique_ptr<Literal> param0_literal =
- Literal::CreateR2FromArray2D<float>(Array2D<float>(3, 0));
+ LiteralUtil::CreateR2FromArray2D<float>(Array2D<float>(3, 0));
std::unique_ptr<GlobalData> param0_data =
client_->TransferToServer(*param0_literal).ConsumeValueOrDie();
- auto p = builder.Parameter(0, ShapeUtil::MakeShape(F32, {3, 0}), "param0");
+ Parameter(&builder, 0, ShapeUtil::MakeShape(F32, {3, 0}), "param0");
ComputeAndCompareR2<float>(&builder, Array2D<float>(3, 0),
{param0_data.get()}, ErrorSpec(0.01f));
@@ -105,12 +107,12 @@ XLA_TEST_F(ParamsTest, ConstantR2_3x0_F32Param) {
XLA_TEST_F(ParamsTest, ConstantR2F32Param) {
XlaBuilder builder(TestName());
- std::unique_ptr<Literal> param0_literal = Literal::CreateR2<float>(
+ std::unique_ptr<Literal> param0_literal = LiteralUtil::CreateR2<float>(
{{3.14f, -100.25f}, {7e8f, 7e-9f}, {30.3f, -100.0f}});
std::unique_ptr<GlobalData> param0_data =
client_->TransferToServer(*param0_literal).ConsumeValueOrDie();
- auto p = builder.Parameter(0, ShapeUtil::MakeShape(F32, {3, 2}), "param0");
+ Parameter(&builder, 0, ShapeUtil::MakeShape(F32, {3, 2}), "param0");
Array2D<float> expected_array(
{{3.14f, -100.25f}, {7e8f, 7e-9f}, {30.3f, -100.0f}});
@@ -121,28 +123,28 @@ XLA_TEST_F(ParamsTest, ConstantR2F32Param) {
XLA_TEST_F(ParamsTest, TwoParameters) {
XlaBuilder builder(TestName());
- std::unique_ptr<Literal> literal0 = Literal::CreateR1<float>({1, 2});
+ std::unique_ptr<Literal> literal0 = LiteralUtil::CreateR1<float>({1, 2});
std::unique_ptr<GlobalData> param0_data =
client_->TransferToServer(*literal0).ConsumeValueOrDie();
- auto param0 = builder.Parameter(0, literal0->shape(), "param0");
+ auto param0 = Parameter(&builder, 0, literal0->shape(), "param0");
- std::unique_ptr<Literal> literal1 = Literal::CreateR1<float>({10, 20});
+ std::unique_ptr<Literal> literal1 = LiteralUtil::CreateR1<float>({10, 20});
std::unique_ptr<GlobalData> param1_data =
client_->TransferToServer(*literal1).ConsumeValueOrDie();
- auto param1 = builder.Parameter(1, literal1->shape(), "param1");
+ auto param1 = Parameter(&builder, 1, literal1->shape(), "param1");
// Use both parameters
//
// {1, 2} + {10, 20} = {11, 22}
- auto sum = builder.Add(param0, param1);
- sum = builder.Add(param0, param1);
+ auto sum = Add(param0, param1);
+ sum = Add(param0, param1);
// Use only the second parameter again, to show that it can be used
// twice and to make the computation asymmetric in the two
// parameters to test that the parameters are not swapped.
//
// {11, 22} * {10, 20} = {110, 440}
- auto prod = builder.Mul(sum, param1);
+ Mul(sum, param1);
ComputeAndCompareR1<float>(&builder, {110, 440},
{param0_data.get(), param1_data.get()},
@@ -152,12 +154,12 @@ XLA_TEST_F(ParamsTest, TwoParameters) {
XLA_TEST_F(ParamsTest, MissingParameter) {
// Test that an error is returned when a computation with an incomplete set of
// parameters (parameter numbers not contiguous from 0) is executed.
- std::unique_ptr<Literal> literal = Literal::CreateR0<float>(3.14159f);
+ std::unique_ptr<Literal> literal = LiteralUtil::CreateR0<float>(3.14159f);
std::unique_ptr<GlobalData> data =
client_->TransferToServer(*literal).ConsumeValueOrDie();
XlaBuilder builder(TestName());
- auto p = builder.Parameter(2, ShapeUtil::MakeShape(F32, {}), "param2");
+ Parameter(&builder, 2, ShapeUtil::MakeShape(F32, {}), "param2");
auto computation_status = builder.Build();
ASSERT_NE(computation_status.status(), Status::OK());
@@ -166,15 +168,15 @@ XLA_TEST_F(ParamsTest, MissingParameter) {
XLA_TEST_F(ParamsTest, UnusedParameter) {
XlaBuilder builder(TestName());
- std::unique_ptr<Literal> literal0 = Literal::CreateR1<float>({1, 2});
+ std::unique_ptr<Literal> literal0 = LiteralUtil::CreateR1<float>({1, 2});
std::unique_ptr<GlobalData> param0_data =
client_->TransferToServer(*literal0).ConsumeValueOrDie();
- auto param0 = builder.Parameter(0, literal0->shape(), "param0");
+ Parameter(&builder, 0, literal0->shape(), "param0");
- std::unique_ptr<Literal> literal1 = Literal::CreateR1<float>({10, 20});
+ std::unique_ptr<Literal> literal1 = LiteralUtil::CreateR1<float>({10, 20});
std::unique_ptr<GlobalData> param1_data =
client_->TransferToServer(*literal1).ConsumeValueOrDie();
- auto param1 = builder.Parameter(1, literal1->shape(), "param1");
+ Parameter(&builder, 1, literal1->shape(), "param1");
ComputeAndCompareR1<float>(&builder, {10, 20},
{param0_data.get(), param1_data.get()},
@@ -186,22 +188,23 @@ XLA_TEST_F(ParamsTest, UnusedParametersInUnusedExpression) {
// unused expression.
XlaBuilder builder(TestName());
- std::unique_ptr<Literal> literal0 = Literal::CreateR1<float>({1, 2});
+ std::unique_ptr<Literal> literal0 = LiteralUtil::CreateR1<float>({1, 2});
std::unique_ptr<GlobalData> param0_data =
client_->TransferToServer(*literal0).ConsumeValueOrDie();
- std::unique_ptr<Literal> literal1 = Literal::CreateR1<float>({10, 20, 30});
+ std::unique_ptr<Literal> literal1 =
+ LiteralUtil::CreateR1<float>({10, 20, 30});
std::unique_ptr<GlobalData> param1_data =
client_->TransferToServer(*literal1).ConsumeValueOrDie();
- auto param0 = builder.Parameter(0, literal0->shape(), "param0");
- auto param1 = builder.Parameter(1, literal1->shape(), "param1");
- auto param2 = builder.Parameter(2, literal1->shape(), "param2");
+ auto param0 = Parameter(&builder, 0, literal0->shape(), "param0");
+ auto param1 = Parameter(&builder, 1, literal1->shape(), "param1");
+ auto param2 = Parameter(&builder, 2, literal1->shape(), "param2");
// This add is unused.
- builder.Add(param1, param2);
+ Add(param1, param2);
- builder.Neg(param0);
+ Neg(param0);
ComputeAndCompareR1<float>(
&builder, {-1, -2},
@@ -215,7 +218,7 @@ XLA_TEST_F(ParamsTest, HundredLargeR1Parameters) {
std::vector<float> init_value = {{0, 1}};
init_value.resize(size);
- XlaOp sum_handle = builder.ConstantR1<float>(init_value);
+ XlaOp sum_handle = ConstantR1<float>(&builder, init_value);
std::vector<float> sum = {{0, 1}};
sum.resize(size);
@@ -230,11 +233,11 @@ XLA_TEST_F(ParamsTest, HundredLargeR1Parameters) {
std::vector<float> sum_value = {{entry0, entry1}};
sum_value.resize(size);
- std::unique_ptr<Literal> literal = Literal::CreateR1<float>(sum_value);
+ std::unique_ptr<Literal> literal = LiteralUtil::CreateR1<float>(sum_value);
param_data_owner.push_back(
client_->TransferToServer(*literal).ConsumeValueOrDie());
- XlaOp param = builder.Parameter(i, literal->shape(), "param");
- sum_handle = builder.Add(sum_handle, param);
+ XlaOp param = Parameter(&builder, i, literal->shape(), "param");
+ sum_handle = Add(sum_handle, param);
}
std::vector<GlobalData*> param_data;
@@ -260,16 +263,16 @@ XLA_TEST_F(ParamsTest,
XlaBuilder builder(TestName());
std::vector<std::unique_ptr<GlobalData>> param_data_owner;
- XlaOp sum_handle = builder.ConstantR0<float>(0.0f);
+ XlaOp sum_handle = ConstantR0<float>(&builder, 0.0f);
float target = 0.0;
constexpr int kParamCount = 3000;
for (int i = 0; i < kParamCount; ++i) {
target += i;
- std::unique_ptr<Literal> literal = Literal::CreateR0<float>(i);
+ std::unique_ptr<Literal> literal = LiteralUtil::CreateR0<float>(i);
param_data_owner.push_back(
std::move(client_->TransferToServer(*literal)).ValueOrDie());
- XlaOp param = builder.Parameter(i, literal->shape(), "param");
- sum_handle = builder.Add(sum_handle, param);
+ XlaOp param = Parameter(&builder, i, literal->shape(), "param");
+ sum_handle = Add(sum_handle, param);
}
std::vector<GlobalData*> param_data;
@@ -291,26 +294,26 @@ XLA_TEST_F(ParamsTest, DISABLED_ON_CPU(DISABLED_ON_GPU(
XlaBuilder builder(TestName());
std::vector<std::unique_ptr<GlobalData>> param_data_owner;
- XlaOp sum_handle = builder.ConstantR1<int32>({0, 0});
+ XlaOp sum_handle = ConstantR1<int32>(&builder, {0, 0});
int32 target = 0;
constexpr int kParamCount = 3000;
std::vector<XlaOp> params;
for (int i = 0; i < kParamCount; ++i) {
target += i;
- std::unique_ptr<Literal> literal = Literal::CreateR1<int32>({i, i});
+ std::unique_ptr<Literal> literal = LiteralUtil::CreateR1<int32>({i, i});
param_data_owner.push_back(
std::move(client_->TransferToServer(*literal)).ValueOrDie());
- XlaOp param = builder.Parameter(i, literal->shape(), "param");
+ XlaOp param = Parameter(&builder, i, literal->shape(), "param");
params.push_back(param);
- sum_handle = builder.Add(sum_handle, param);
+ sum_handle = Add(sum_handle, param);
}
std::vector<XlaOp> outputs;
for (int i = 0; i < kParamCount; ++i) {
- outputs.push_back(builder.Add(params[i], sum_handle));
+ outputs.push_back(Add(params[i], sum_handle));
}
- builder.Tuple(outputs);
+ Tuple(&builder, outputs);
std::vector<GlobalData*> param_data;
param_data.reserve(param_data_owner.size());
@@ -321,10 +324,10 @@ XLA_TEST_F(ParamsTest, DISABLED_ON_CPU(DISABLED_ON_GPU(
std::vector<std::unique_ptr<Literal>> elements;
std::vector<const Literal*> ptrs;
for (int i = 0; i < kParamCount; ++i) {
- elements.push_back(Literal::CreateR1<int32>({target + i, target + i}));
+ elements.push_back(LiteralUtil::CreateR1<int32>({target + i, target + i}));
ptrs.push_back(elements.back().get());
}
- ComputeAndCompareTuple(&builder, *Literal::MakeTuple(ptrs), param_data);
+ ComputeAndCompareTuple(&builder, *LiteralUtil::MakeTuple(ptrs), param_data);
}
// Test large number of parameters flowing into a while-loop.
@@ -353,25 +356,25 @@ XLA_TEST_F(ParamsTest,
std::vector<XlaOp> params;
std::vector<Shape> parameter_shapes;
for (int i = 0; i < kParamCount; ++i) {
- std::unique_ptr<Literal> literal = Literal::CreateR1<int32>({i, i});
+ std::unique_ptr<Literal> literal = LiteralUtil::CreateR1<int32>({i, i});
param_data_owner.push_back(
std::move(client_->TransferToServer(*literal)).ValueOrDie());
- XlaOp param = builder.Parameter(i, literal->shape(), "param");
+ XlaOp param = Parameter(&builder, i, literal->shape(), "param");
params.push_back(param);
parameter_shapes.push_back(literal->shape());
}
// Add bool parameter for the loop condition. Use a parameter HLO instead of a
// constant because DCE may eliminate the while-body otherwise.
- std::unique_ptr<Literal> bool_literal = Literal::CreateR0<bool>(false);
+ std::unique_ptr<Literal> bool_literal = LiteralUtil::CreateR0<bool>(false);
param_data_owner.push_back(
std::move(client_->TransferToServer(*bool_literal)).ValueOrDie());
XlaOp bool_param =
- builder.Parameter(kParamCount, bool_literal->shape(), "bool_param");
+ Parameter(&builder, kParamCount, bool_literal->shape(), "bool_param");
params.push_back(bool_param);
parameter_shapes.push_back(bool_literal->shape());
- auto init = builder.Tuple(params);
+ auto init = Tuple(&builder, params);
// Create a computation for the condition: while(bool_param).
Shape while_shape = ShapeUtil::MakeTupleShape(parameter_shapes);
@@ -379,8 +382,8 @@ XLA_TEST_F(ParamsTest,
{
XlaBuilder builder("condition");
auto condition_parameter =
- builder.Parameter(0, while_shape, "condition_parameter");
- builder.GetTupleElement(condition_parameter, kParamCount);
+ Parameter(&builder, 0, while_shape, "condition_parameter");
+ GetTupleElement(condition_parameter, kParamCount);
condition = builder.Build().ConsumeValueOrDie();
}
@@ -389,27 +392,27 @@ XLA_TEST_F(ParamsTest,
XlaComputation body;
{
XlaBuilder builder("body");
- auto body_parameter = builder.Parameter(0, while_shape, "body_parameter");
+ auto body_parameter = Parameter(&builder, 0, while_shape, "body_parameter");
std::vector<XlaOp> updates;
for (int i = 0; i < kParamCount; ++i) {
- auto add = builder.Add(builder.GetTupleElement(body_parameter, i),
- builder.ConstantR1<int32>({1, 1}));
+ auto add = Add(GetTupleElement(body_parameter, i),
+ ConstantR1<int32>(&builder, {1, 1}));
updates.push_back(add);
}
// Add bool parameter.
- updates.push_back(builder.GetTupleElement(body_parameter, kParamCount));
+ updates.push_back(GetTupleElement(body_parameter, kParamCount));
- builder.Tuple(updates);
+ Tuple(&builder, updates);
body = builder.Build().ConsumeValueOrDie();
}
- auto loop = builder.While(condition, body, init);
+ auto loop = While(condition, body, init);
std::vector<XlaOp> outputs;
for (int i = 0; i < kParamCount; ++i) {
- outputs.push_back(builder.GetTupleElement(loop, i));
+ outputs.push_back(GetTupleElement(loop, i));
}
- builder.Tuple(outputs);
+ Tuple(&builder, outputs);
std::vector<GlobalData*> param_data;
param_data.reserve(param_data_owner.size());
@@ -420,10 +423,10 @@ XLA_TEST_F(ParamsTest,
std::vector<std::unique_ptr<Literal>> elements;
std::vector<const Literal*> ptrs;
for (int i = 0; i < kParamCount; ++i) {
- elements.push_back(Literal::CreateR1<int32>({i, i}));
+ elements.push_back(LiteralUtil::CreateR1<int32>({i, i}));
ptrs.push_back(elements.back().get());
}
- ComputeAndCompareTuple(&builder, *Literal::MakeTuple(ptrs), param_data);
+ ComputeAndCompareTuple(&builder, *LiteralUtil::MakeTuple(ptrs), param_data);
}
#endif
@@ -433,16 +436,16 @@ XLA_TEST_F(ParamsTest, TupleOfR1ParametersAddedTogether) {
Shape r1f32_3 = ShapeUtil::MakeShape(F32, {3});
Shape tuple_shape = ShapeUtil::MakeTupleShape({r1f32_3, r1f32_3});
- auto input = builder.Parameter(0, tuple_shape, "input");
- auto lhs = builder.GetTupleElement(input, 0);
- auto rhs = builder.GetTupleElement(input, 1);
- builder.Add(lhs, rhs);
+ auto input = Parameter(&builder, 0, tuple_shape, "input");
+ auto lhs = GetTupleElement(input, 0);
+ auto rhs = GetTupleElement(input, 1);
+ Add(lhs, rhs);
std::unique_ptr<GlobalData> data =
client_
- ->TransferToServer(*Literal::MakeTuple({
- Literal::CreateR1<float>({1, 2, 3}).get(),
- Literal::CreateR1<float>({4, 5, 6}).get(),
+ ->TransferToServer(*LiteralUtil::MakeTuple({
+ LiteralUtil::CreateR1<float>({1, 2, 3}).get(),
+ LiteralUtil::CreateR1<float>({4, 5, 6}).get(),
}))
.ConsumeValueOrDie();
@@ -454,10 +457,10 @@ XLA_TEST_F(ParamsTest, TupleOfR1ParametersAddedTogether) {
// Verifies that passing a 2x2 with {0, 1} layout returns the same value back
// when (transferred to the server and) passed through a parameter.
XLA_TEST_F(ParamsTest, R2_2x2_Layout_01) {
- std::unique_ptr<Literal> literal = Literal::CreateR2WithLayout<float>(
+ std::unique_ptr<Literal> literal = LiteralUtil::CreateR2WithLayout<float>(
{{1, 2}, {3, 4}}, LayoutUtil::MakeLayout({0, 1}));
XlaBuilder builder(TestName());
- builder.Parameter(0, literal->shape(), "input");
+ Parameter(&builder, 0, literal->shape(), "input");
std::unique_ptr<GlobalData> data =
client_->TransferToServer(*literal).ConsumeValueOrDie();
@@ -466,10 +469,10 @@ XLA_TEST_F(ParamsTest, R2_2x2_Layout_01) {
// As above, but for {1, 0} layout.
XLA_TEST_F(ParamsTest, R2_2x2_Layout_10) {
- std::unique_ptr<Literal> literal = Literal::CreateR2WithLayout<float>(
+ std::unique_ptr<Literal> literal = LiteralUtil::CreateR2WithLayout<float>(
{{1, 3}, {2, 4}}, LayoutUtil::MakeLayout({1, 0}));
XlaBuilder builder(TestName());
- builder.Parameter(0, literal->shape(), "input");
+ Parameter(&builder, 0, literal->shape(), "input");
std::unique_ptr<GlobalData> data =
client_->TransferToServer(*literal).ConsumeValueOrDie();
@@ -477,8 +480,9 @@ XLA_TEST_F(ParamsTest, R2_2x2_Layout_10) {
}
XLA_TEST_F(ParamsTest, R2_2x2_TryToPassReverseLayoutToParameter) {
- std::unique_ptr<Literal> literal = Literal::CreateR2<float>({
- {1, 3}, {2, 4},
+ std::unique_ptr<Literal> literal = LiteralUtil::CreateR2<float>({
+ {1, 3},
+ {2, 4},
});
const Shape original = literal->shape();
{
@@ -494,9 +498,9 @@ XLA_TEST_F(ParamsTest, R2_2x2_TryToPassReverseLayoutToParameter) {
}
// Use the original shape in building the computation.
XlaBuilder builder(TestName());
- auto input = builder.Parameter(0, original, "input");
+ auto input = Parameter(&builder, 0, original, "input");
// Use the slice operator to get an off-diagonal element.
- builder.Slice(input, {0, 1}, {1, 2}, {1, 1});
+ Slice(input, {0, 1}, {1, 2}, {1, 1});
std::unique_ptr<GlobalData> data =
client_->TransferToServer(*literal).ConsumeValueOrDie();
diff --git a/tensorflow/compiler/xla/tests/pred_test.cc b/tensorflow/compiler/xla/tests/pred_test.cc
index 77159efb26..5c351b2d11 100644
--- a/tensorflow/compiler/xla/tests/pred_test.cc
+++ b/tensorflow/compiler/xla/tests/pred_test.cc
@@ -29,64 +29,63 @@ namespace {
class PredTest : public ClientLibraryTestBase {
protected:
- void TestCompare(
- bool lhs, bool rhs, bool expected,
- XlaOp (XlaBuilder::*op)(const xla::XlaOp&, const xla::XlaOp&,
- tensorflow::gtl::ArraySlice<int64>)) {
+ void TestCompare(bool lhs, bool rhs, bool expected,
+ std::function<XlaOp(const xla::XlaOp&, const xla::XlaOp&,
+ tensorflow::gtl::ArraySlice<int64>)>
+ op) {
XlaBuilder builder(TestName());
- XlaOp lhs_op = builder.ConstantR0<bool>(lhs);
- XlaOp rhs_op = builder.ConstantR0<bool>(rhs);
- XlaOp result = (builder.*op)(lhs_op, rhs_op, {});
+ XlaOp lhs_op = ConstantR0<bool>(&builder, lhs);
+ XlaOp rhs_op = ConstantR0<bool>(&builder, rhs);
+ op(lhs_op, rhs_op, {});
ComputeAndCompareR0<bool>(&builder, expected, {});
}
};
TEST_F(PredTest, ConstantR0PredTrue) {
XlaBuilder builder(TestName());
- auto a = builder.ConstantR0<bool>(true);
+ ConstantR0<bool>(&builder, true);
ComputeAndCompareR0<bool>(&builder, true, {});
}
TEST_F(PredTest, ConstantR0PredFalse) {
XlaBuilder builder(TestName());
- auto a = builder.ConstantR0<bool>(false);
+ ConstantR0<bool>(&builder, false);
ComputeAndCompareR0<bool>(&builder, false, {});
}
TEST_F(PredTest, ConstantR0PredCompareEq) {
- TestCompare(true, false, false, &XlaBuilder::Eq);
+ TestCompare(true, false, false, &Eq);
}
TEST_F(PredTest, ConstantR0PredCompareNe) {
- TestCompare(true, false, true, &XlaBuilder::Ne);
+ TestCompare(true, false, true, &Ne);
}
TEST_F(PredTest, ConstantR0PredCompareLe) {
- TestCompare(true, false, false, &XlaBuilder::Le);
+ TestCompare(true, false, false, &Le);
}
TEST_F(PredTest, ConstantR0PredCompareLt) {
- TestCompare(true, false, false, &XlaBuilder::Lt);
+ TestCompare(true, false, false, &Lt);
}
TEST_F(PredTest, ConstantR0PredCompareGe) {
- TestCompare(true, false, true, &XlaBuilder::Ge);
+ TestCompare(true, false, true, &Ge);
}
TEST_F(PredTest, ConstantR0PredCompareGt) {
- TestCompare(true, false, true, &XlaBuilder::Gt);
+ TestCompare(true, false, true, &Gt);
}
TEST_F(PredTest, ConstantR1Pred) {
XlaBuilder builder(TestName());
- auto a = builder.ConstantR1<bool>({true, false, false, true});
+ ConstantR1<bool>(&builder, {true, false, false, true});
ComputeAndCompareR1<bool>(&builder, {true, false, false, true}, {});
}
TEST_F(PredTest, ConstantR2Pred) {
XlaBuilder builder(TestName());
- auto a =
- builder.ConstantR2<bool>({{false, true, true}, {true, false, false}});
+ ConstantR2<bool>(&builder, {{false, true, true}, {true, false, false}});
const string expected = R"(pred[2,3] {
{ 011 },
{ 100 }
@@ -96,44 +95,44 @@ TEST_F(PredTest, ConstantR2Pred) {
TEST_F(PredTest, AnyR1True) {
XlaBuilder builder(TestName());
- auto a = builder.ConstantR1<bool>({true, false});
- TF_ASSERT_OK(Any(a, &builder).status());
+ auto a = ConstantR1<bool>(&builder, {true, false});
+ Any(a);
ComputeAndCompareR0<bool>(&builder, true, {});
}
TEST_F(PredTest, AnyR1False) {
XlaBuilder builder(TestName());
- auto a = builder.ConstantR1<bool>({false, false});
- TF_ASSERT_OK(Any(a, &builder).status());
+ auto a = ConstantR1<bool>(&builder, {false, false});
+ Any(a);
ComputeAndCompareR0<bool>(&builder, false, {});
}
TEST_F(PredTest, AnyR1VacuouslyFalse) {
XlaBuilder builder(TestName());
- auto a = builder.ConstantR1<bool>({});
- TF_ASSERT_OK(Any(a, &builder).status());
+ auto a = ConstantR1<bool>(&builder, {});
+ Any(a);
ComputeAndCompareR0<bool>(&builder, false, {});
}
TEST_F(PredTest, AnyR2True) {
XlaBuilder builder(TestName());
- auto a = builder.ConstantR2<bool>({
- {false, false, false},
- {false, false, false},
- {false, false, true},
- });
- TF_ASSERT_OK(Any(a, &builder).status());
+ auto a = ConstantR2<bool>(&builder, {
+ {false, false, false},
+ {false, false, false},
+ {false, false, true},
+ });
+ Any(a);
ComputeAndCompareR0<bool>(&builder, true, {});
}
TEST_F(PredTest, AnyR2False) {
XlaBuilder builder(TestName());
- auto a = builder.ConstantR2<bool>({
- {false, false, false},
- {false, false, false},
- {false, false, false},
- });
- TF_ASSERT_OK(Any(a, &builder).status());
+ auto a = ConstantR2<bool>(&builder, {
+ {false, false, false},
+ {false, false, false},
+ {false, false, false},
+ });
+ Any(a);
ComputeAndCompareR0<bool>(&builder, false, {});
}
diff --git a/tensorflow/compiler/xla/tests/prng_test.cc b/tensorflow/compiler/xla/tests/prng_test.cc
index 1a2de6937c..5ebf8344d2 100644
--- a/tensorflow/compiler/xla/tests/prng_test.cc
+++ b/tensorflow/compiler/xla/tests/prng_test.cc
@@ -18,7 +18,7 @@ limitations under the License.
#include "tensorflow/compiler/xla/client/local_client.h"
#include "tensorflow/compiler/xla/client/xla_client/xla_builder.h"
-#include "tensorflow/compiler/xla/literal_util.h"
+#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/compiler/xla/primitive_util.h"
#include "tensorflow/compiler/xla/shape_util.h"
#include "tensorflow/compiler/xla/test.h"
@@ -53,8 +53,8 @@ template <typename T>
std::unique_ptr<Literal> PrngTest::UniformTest(
T a, T b, tensorflow::gtl::ArraySlice<int64> dims, int64 seed) {
XlaBuilder builder(TestName());
- builder.RngUniform(
- builder.ConstantR0<T>(a), builder.ConstantR0<T>(b),
+ RngUniform(
+ ConstantR0<T>(&builder, a), ConstantR0<T>(&builder, b),
ShapeUtil::MakeShape(primitive_util::NativeToPrimitiveType<T>(), dims));
SetSeed(seed);
@@ -141,9 +141,9 @@ double PrngTest::UniformChiSquared(int32 range_size, int32 expected_count,
int32 sample_size = range_size * expected_count;
XlaBuilder builder(TestName());
- builder.RngUniform(builder.ConstantR0<int32>(0),
- builder.ConstantR0<int32>(range_size),
- ShapeUtil::MakeShape(S32, {sample_size}));
+ RngUniform(ConstantR0<int32>(&builder, 0),
+ ConstantR0<int32>(&builder, range_size),
+ ShapeUtil::MakeShape(S32, {sample_size}));
SetSeed(seed);
auto actual =
@@ -184,21 +184,22 @@ XLA_TEST_F(PrngTest, MapUsingRng) {
// Build a x -> (x + U[0,1)) computation.
auto build_sum_rng = [this](XlaBuilder& builder) {
auto b = builder.CreateSubBuilder("sum_with_rng");
- auto x = b->Parameter(0, ShapeUtil::MakeShape(F32, {}), "input");
- b->Add(x, b->RngUniform(b->ConstantR0<float>(0), b->ConstantR0<float>(1),
- ShapeUtil::MakeShape(F32, {})));
+ auto x = Parameter(b.get(), 0, ShapeUtil::MakeShape(F32, {}), "input");
+ Add(x,
+ RngUniform(ConstantR0<float>(b.get(), 0), ConstantR0<float>(b.get(), 1),
+ ShapeUtil::MakeShape(F32, {})));
return b->BuildAndNoteError();
};
XlaBuilder builder(TestName());
std::unique_ptr<Literal> param0_literal =
- Literal::CreateR1<float>({2.2f, 5.3f, 4.4f, 5.5f});
+ LiteralUtil::CreateR1<float>({2.2f, 5.3f, 4.4f, 5.5f});
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<GlobalData> param0_data,
client_->TransferToServer(*param0_literal));
- auto param0 = builder.Parameter(0, param0_literal->shape(), "param0");
+ auto param0 = Parameter(&builder, 0, param0_literal->shape(), "param0");
auto fn = build_sum_rng(builder);
- builder.Map({param0}, fn, {0});
+ Map(&builder, {param0}, fn, {0});
TF_ASSERT_OK_AND_ASSIGN(auto computation, builder.Build());
@@ -226,9 +227,8 @@ XLA_TEST_F(PrngTest, PassInGlobalRngSeed) {
// Build a U[0,1) computation.
auto build_computation = [this]() {
XlaBuilder builder(TestName());
- builder.RngUniform(builder.ConstantR0<float>(0),
- builder.ConstantR0<float>(1),
- ShapeUtil::MakeShape(F32, {10}));
+ RngUniform(ConstantR0<float>(&builder, 0), ConstantR0<float>(&builder, 1),
+ ShapeUtil::MakeShape(F32, {10}));
return builder.Build();
};
@@ -282,8 +282,8 @@ XLA_TEST_F(PrngTest, PassInGlobalRngSeed) {
XLA_TEST_F(PrngTest, TenValuesN01) {
XlaBuilder builder(TestName());
- builder.RngNormal(builder.ConstantR0<float>(0), builder.ConstantR0<float>(1),
- ShapeUtil::MakeShape(F32, {10}));
+ RngNormal(ConstantR0<float>(&builder, 0), ConstantR0<float>(&builder, 1),
+ ShapeUtil::MakeShape(F32, {10}));
SetSeed(42);
ExecuteAndTransfer(&builder, /*arguments=*/{}).ConsumeValueOrDie();
@@ -294,9 +294,9 @@ XLA_TEST_F(PrngTest, RngUniformCrash) {
XlaBuilder builder(TestName());
// This used to crash XLA during LLVM IR generation for CPUs.
- auto rng_uniform = builder.RngUniform(builder.ConstantR0<int32>(0),
- builder.ConstantR0<int32>(1000 * 1000),
- ShapeUtil::MakeShape(S32, {}));
+ RngUniform(ConstantR0<int32>(&builder, 0),
+ ConstantR0<int32>(&builder, 1000 * 1000),
+ ShapeUtil::MakeShape(S32, {}));
SetSeed(0);
ExecuteAndTransfer(&builder, /*arguments=*/{}).ConsumeValueOrDie();
}
diff --git a/tensorflow/compiler/xla/tests/query_inferred_shape_test.cc b/tensorflow/compiler/xla/tests/query_inferred_shape_test.cc
index f95e756483..526a38e8d1 100644
--- a/tensorflow/compiler/xla/tests/query_inferred_shape_test.cc
+++ b/tensorflow/compiler/xla/tests/query_inferred_shape_test.cc
@@ -31,8 +31,8 @@ class QueryInferredShapeTest : public ClientLibraryTestBase {};
TEST_F(QueryInferredShapeTest, OnePlusOneShape) {
XlaBuilder builder("one_plus_one");
- auto one = builder.ConstantR0<float>(1.0);
- auto result = builder.Add(one, one);
+ auto one = ConstantR0<float>(&builder, 1.0);
+ auto result = Add(one, one);
StatusOr<Shape> shape_status = builder.GetShape(result);
ASSERT_IS_OK(shape_status.status());
auto shape = shape_status.ConsumeValueOrDie();
diff --git a/tensorflow/compiler/xla/tests/reduce_hlo_test.cc b/tensorflow/compiler/xla/tests/reduce_hlo_test.cc
index 9052b188ed..a080dd1732 100644
--- a/tensorflow/compiler/xla/tests/reduce_hlo_test.cc
+++ b/tensorflow/compiler/xla/tests/reduce_hlo_test.cc
@@ -95,21 +95,21 @@ XLA_TEST_P(ReduceWithLayoutTest, DISABLED_ON_GPU(Reduce)) {
*reduce_input_shape->mutable_layout() =
LayoutUtil::MakeLayout(reduce_layout.input_minor_to_major);
- std::unique_ptr<Literal> reduce_input =
- Literal::CreateR4<float>({{ /*i0=0*/
- {/*i1=0*/
- {-0.246092796, -0.179497838, -0.161181688},
- {-0.151643038, -0.240213156, -0.198156}},
- {/*i1=1*/
- {-0.14222312, -0.162200093, -0.193907976},
- {-0.239411, -0.198166847, -0.172471642}}},
- { /*i0=1*/
- {/*i1=0*/
- {-0.22965157, -0.218723893, -0.129257083},
- {-0.188762426, -0.16123569, -0.181166649}},
- {/*i1=1*/
- {-0.241772294, -0.245131493, -0.160247207},
- {-0.179881215, -0.23383224, -0.121976733}}}});
+ std::unique_ptr<Literal> reduce_input = LiteralUtil::CreateR4<float>(
+ {{ /*i0=0*/
+ {/*i1=0*/
+ {-0.246092796, -0.179497838, -0.161181688},
+ {-0.151643038, -0.240213156, -0.198156}},
+ {/*i1=1*/
+ {-0.14222312, -0.162200093, -0.193907976},
+ {-0.239411, -0.198166847, -0.172471642}}},
+ { /*i0=1*/
+ {/*i1=0*/
+ {-0.22965157, -0.218723893, -0.129257083},
+ {-0.188762426, -0.16123569, -0.181166649}},
+ {/*i1=1*/
+ {-0.241772294, -0.245131493, -0.160247207},
+ {-0.179881215, -0.23383224, -0.121976733}}}});
EXPECT_TRUE(RunAndCompareNoHloPasses(std::move(module), ErrorSpec(1e-5)));
}
diff --git a/tensorflow/compiler/xla/tests/reduce_precision_test.cc b/tensorflow/compiler/xla/tests/reduce_precision_test.cc
index b311785449..04c7f31646 100644
--- a/tensorflow/compiler/xla/tests/reduce_precision_test.cc
+++ b/tensorflow/compiler/xla/tests/reduce_precision_test.cc
@@ -24,7 +24,7 @@ limitations under the License.
#include "tensorflow/compiler/xla/client/local_client.h"
#include "tensorflow/compiler/xla/client/xla_client/xla_builder.h"
#include "tensorflow/compiler/xla/layout_util.h"
-#include "tensorflow/compiler/xla/literal_util.h"
+#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/compiler/xla/service/reduce_precision_insertion.h"
#include "tensorflow/compiler/xla/statusor.h"
#include "tensorflow/compiler/xla/test.h"
@@ -230,12 +230,13 @@ XLA_TEST_P(ReducePrecisionAccuracyTest, ReducePrecisionF32) {
XlaBuilder builder(TestName());
- std::unique_ptr<Literal> a_literal = Literal::CreateR1<float>({input_values});
+ std::unique_ptr<Literal> a_literal =
+ LiteralUtil::CreateR1<float>({input_values});
std::unique_ptr<GlobalData> a_data =
client_->TransferToServer(*a_literal).ConsumeValueOrDie();
- auto a = builder.Parameter(0, a_literal->shape(), "a");
+ auto a = Parameter(&builder, 0, a_literal->shape(), "a");
- builder.ReducePrecision(a, exponent_bits, mantissa_bits);
+ ReducePrecision(a, exponent_bits, mantissa_bits);
ComputeAndCompareR1<float>(&builder, expected_values, {a_data.get()});
}
@@ -253,18 +254,18 @@ XLA_TEST_F(ReducePrecisionInsertionTest,
DISABLED_ON_INTERPRETER(ReducePrecisionBeforeFusion)) {
XlaBuilder builder(TestName());
- std::unique_ptr<Literal> a_literal = Literal::CreateR1<float>({1.00001});
+ std::unique_ptr<Literal> a_literal = LiteralUtil::CreateR1<float>({1.00001});
std::unique_ptr<GlobalData> a_data =
client_->TransferToServer(*a_literal).ConsumeValueOrDie();
- auto a = builder.Parameter(0, a_literal->shape(), "a");
+ auto a = Parameter(&builder, 0, a_literal->shape(), "a");
// Abs doesn't affect resolution.
- auto abs = builder.Abs(a);
+ auto abs = Abs(a);
// Near 1.0, Log(x) approximates x - 1; this lets us confirm that the
// reduce-precision operation showed up in the correct place in the
// graph.
- builder.Log(abs);
+ Log(abs);
// Insert precision-reduction after the Abs(x) operation, rounding that
// result to exactly 1.0f.
@@ -282,14 +283,14 @@ XLA_TEST_F(ReducePrecisionInsertionTest,
DISABLED_ON_INTERPRETER(ReducePrecisionSkippedAfterFusion)) {
XlaBuilder builder(TestName());
- std::unique_ptr<Literal> a_literal = Literal::CreateR1<float>({1.00001});
+ std::unique_ptr<Literal> a_literal = LiteralUtil::CreateR1<float>({1.00001});
std::unique_ptr<GlobalData> a_data =
client_->TransferToServer(*a_literal).ConsumeValueOrDie();
- auto a = builder.Parameter(0, a_literal->shape(), "a");
+ auto a = Parameter(&builder, 0, a_literal->shape(), "a");
// These two operations should be fused by any reasonable backend.
- auto abs = builder.Abs(a);
- builder.Neg(abs);
+ auto abs = Abs(a);
+ Neg(abs);
// Add a pass after operation fusion, suffixing kAbs operations. This
// should not see into the fusion nodes and thus should not affect the
@@ -308,14 +309,14 @@ XLA_TEST_F(ReducePrecisionInsertionTest,
DISABLED_ON_INTERPRETER(ReducePrecisionAddedAfterFusion)) {
XlaBuilder builder(TestName());
- std::unique_ptr<Literal> a_literal = Literal::CreateR1<float>({1.00001});
+ std::unique_ptr<Literal> a_literal = LiteralUtil::CreateR1<float>({1.00001});
std::unique_ptr<GlobalData> a_data =
client_->TransferToServer(*a_literal).ConsumeValueOrDie();
- auto a = builder.Parameter(0, a_literal->shape(), "a");
+ auto a = Parameter(&builder, 0, a_literal->shape(), "a");
// These two operations should be fused by any reasonable backend.
- auto abs = builder.Abs(a);
- builder.Neg(abs);
+ auto abs = Abs(a);
+ Neg(abs);
// Add a pass after operation fusion, suffixing kFusion operations.
auto reduce_precision_pass = execution_options_.mutable_debug_options()
@@ -332,14 +333,14 @@ XLA_TEST_F(ReducePrecisionInsertionTest,
DISABLED_ON_INTERPRETER(ReducePrecisionSkippedFusionContains)) {
XlaBuilder builder(TestName());
- std::unique_ptr<Literal> a_literal = Literal::CreateR1<float>({1.00001});
+ std::unique_ptr<Literal> a_literal = LiteralUtil::CreateR1<float>({1.00001});
std::unique_ptr<GlobalData> a_data =
client_->TransferToServer(*a_literal).ConsumeValueOrDie();
- auto a = builder.Parameter(0, a_literal->shape(), "a");
+ auto a = Parameter(&builder, 0, a_literal->shape(), "a");
// These two operations should be fused by any reasonable backend.
- auto abs = builder.Abs(a);
- builder.Neg(abs);
+ auto abs = Abs(a);
+ Neg(abs);
// Add a pass suffixing fusion nodes containing kCos operations. This
// should have no effect.
@@ -357,14 +358,14 @@ XLA_TEST_F(ReducePrecisionInsertionTest,
DISABLED_ON_INTERPRETER(ReducePrecisionAddedFusionContains)) {
XlaBuilder builder(TestName());
- std::unique_ptr<Literal> a_literal = Literal::CreateR1<float>({1.00001});
+ std::unique_ptr<Literal> a_literal = LiteralUtil::CreateR1<float>({1.00001});
std::unique_ptr<GlobalData> a_data =
client_->TransferToServer(*a_literal).ConsumeValueOrDie();
- auto a = builder.Parameter(0, a_literal->shape(), "a");
+ auto a = Parameter(&builder, 0, a_literal->shape(), "a");
// These two operations should be fused by any reasonable backend.
- auto abs = builder.Abs(a);
- builder.Neg(abs);
+ auto abs = Abs(a);
+ Neg(abs);
// Add a pass suffixing fusion nodes containing kAbs operations. This
// should see the kAbs operation within the above fusion node.
diff --git a/tensorflow/compiler/xla/tests/reduce_test.cc b/tensorflow/compiler/xla/tests/reduce_test.cc
index d671d40456..1407fca72f 100644
--- a/tensorflow/compiler/xla/tests/reduce_test.cc
+++ b/tensorflow/compiler/xla/tests/reduce_test.cc
@@ -67,12 +67,12 @@ class ReduceTest : public ClientLibraryTestBase {
ReduceTest() {
// Implementation note: laid out z >> y >> x by default.
// clang-format off
- literal_2d_ = Literal::CreateR2<float>({
+ literal_2d_ = LiteralUtil::CreateR2<float>({
// x0 x1 x2
{ 1.f, 2.f, 3.f}, // y0
{ 4.f, 5.f, 6.f}, // y1
});
- literal_3d_ = Literal::CreateR3Projected<float>({
+ literal_3d_ = LiteralUtil::CreateR3Projected<float>({
// x0 x1 x2
{ 1.f, 2.f, 3.f}, // y0
{ 4.f, 5.f, 6.f}, // y1
@@ -89,9 +89,9 @@ class ReduceTest : public ClientLibraryTestBase {
XlaBuilder builder(TestName());
XlaComputation add_f32 = CreateScalarAddComputation(F32, &builder);
const Shape input_shape = ShapeUtil::MakeShape(F32, {element_count});
- auto input = builder.Parameter(0, input_shape, "input");
- auto zero = builder.ConstantR0<float>(0.0);
- builder.Reduce(input, zero, add_f32, /*dimensions_to_reduce=*/{0});
+ auto input = Parameter(&builder, 0, input_shape, "input");
+ auto zero = ConstantR0<float>(&builder, 0.0);
+ Reduce(input, zero, add_f32, /*dimensions_to_reduce=*/{0});
std::vector<float> input_data(element_count);
for (int64 i = 0; i < element_count; ++i) {
@@ -101,7 +101,7 @@ class ReduceTest : public ClientLibraryTestBase {
}
}
std::unique_ptr<Literal> input_literal =
- Literal::CreateR1(AsSlice(input_data));
+ LiteralUtil::CreateR1(AsSlice(input_data));
std::unique_ptr<GlobalData> input_global_data =
client_->TransferToServer(*input_literal).ConsumeValueOrDie();
@@ -118,22 +118,22 @@ class ReduceTest : public ClientLibraryTestBase {
const int element_count = input_data.size();
XlaBuilder builder(TestName());
const Shape input_shape = ShapeUtil::MakeShape(S32, {element_count});
- auto input_par = builder.Parameter(0, input_shape, "input");
+ auto input_par = Parameter(&builder, 0, input_shape, "input");
auto pred_values =
- builder.Eq(input_par, builder.ConstantR1<int>(element_count, 1));
+ Eq(input_par, ConstantR1<int>(&builder, element_count, 1));
XlaOp init_value;
XlaComputation reduce;
if (and_reduce) {
- init_value = builder.ConstantR0<bool>(true);
+ init_value = ConstantR0<bool>(&builder, true);
reduce = CreateScalarAndComputation(&builder);
} else {
- init_value = builder.ConstantR0<bool>(false);
+ init_value = ConstantR0<bool>(&builder, false);
reduce = CreateScalarOrComputation(&builder);
}
- builder.Reduce(pred_values, init_value, reduce,
- /*dimensions_to_reduce=*/{0});
+ Reduce(pred_values, init_value, reduce,
+ /*dimensions_to_reduce=*/{0});
- std::unique_ptr<Literal> input_literal = Literal::CreateR1(input_data);
+ std::unique_ptr<Literal> input_literal = LiteralUtil::CreateR1(input_data);
std::unique_ptr<GlobalData> input_global_data =
client_->TransferToServer(*input_literal).ConsumeValueOrDie();
@@ -156,26 +156,26 @@ class ReduceTest : public ClientLibraryTestBase {
int64 major = 0) {
XlaBuilder builder(TestName());
const Shape input_shape = ShapeUtil::MakeShape(U8, {rows, cols});
- auto input = builder.Parameter(0, input_shape, "input");
- auto input_pred = builder.Eq(input, builder.ConstantR0<uint8>(1));
+ auto input = Parameter(&builder, 0, input_shape, "input");
+ auto input_pred = Eq(input, ConstantR0<uint8>(&builder, 1));
XlaOp init_value;
XlaComputation reduce_op;
if (and_reduce) {
- init_value = builder.ConstantR0<bool>(true);
+ init_value = ConstantR0<bool>(&builder, true);
reduce_op = CreateScalarAndComputation(&builder);
} else {
- init_value = builder.ConstantR0<bool>(false);
+ init_value = ConstantR0<bool>(&builder, false);
reduce_op = CreateScalarOrComputation(&builder);
}
- builder.Reduce(input_pred, init_value, reduce_op,
- /*dimensions_to_reduce=*/{0});
+ Reduce(input_pred, init_value, reduce_op,
+ /*dimensions_to_reduce=*/{0});
Array2D<uint8> input_data(rows, cols);
input_data.FillRandom(0, 1);
std::unique_ptr<Literal> input_literal =
- Literal::CreateR2FromArray2D(input_data);
+ LiteralUtil::CreateR2FromArray2D(input_data);
input_literal =
input_literal->Relayout(LayoutUtil::MakeLayout({minor, major}));
std::unique_ptr<GlobalData> input_global_data =
@@ -202,14 +202,14 @@ class ReduceTest : public ClientLibraryTestBase {
XlaBuilder builder(TestName());
XlaComputation add_f32 = CreateScalarAddComputation(F32, &builder);
const Shape input_shape = ShapeUtil::MakeShape(F32, {rows, cols});
- auto input = builder.Parameter(0, input_shape, "input");
- auto zero = builder.ConstantR0<float>(0.0);
- builder.Reduce(input, zero, add_f32, /*dimensions_to_reduce=*/{0, 1});
+ auto input = Parameter(&builder, 0, input_shape, "input");
+ auto zero = ConstantR0<float>(&builder, 0.0);
+ Reduce(input, zero, add_f32, /*dimensions_to_reduce=*/{0, 1});
Array2D<float> input_data(rows, cols);
input_data.FillRandom(3.14f, 0.04);
std::unique_ptr<Literal> input_literal =
- Literal::CreateR2FromArray2D(input_data);
+ LiteralUtil::CreateR2FromArray2D(input_data);
input_literal =
input_literal->Relayout(LayoutUtil::MakeLayout({minor, major}));
std::unique_ptr<GlobalData> input_global_data =
@@ -230,14 +230,14 @@ class ReduceTest : public ClientLibraryTestBase {
XlaBuilder builder(TestName());
XlaComputation add_f32 = CreateScalarAddComputation(F32, &builder);
const Shape input_shape = ShapeUtil::MakeShape(F32, {rows, cols});
- auto input = builder.Parameter(0, input_shape, "input");
- auto zero = builder.ConstantR0<float>(0.0);
- builder.Reduce(input, zero, add_f32, /*dimensions_to_reduce=*/{0});
+ auto input = Parameter(&builder, 0, input_shape, "input");
+ auto zero = ConstantR0<float>(&builder, 0.0);
+ Reduce(input, zero, add_f32, /*dimensions_to_reduce=*/{0});
Array2D<float> input_data(rows, cols);
input_data.FillRandom(3.14f, 0.04);
std::unique_ptr<Literal> input_literal =
- Literal::CreateR2FromArray2D(input_data);
+ LiteralUtil::CreateR2FromArray2D(input_data);
input_literal =
input_literal->Relayout(LayoutUtil::MakeLayout({minor, major}));
std::unique_ptr<GlobalData> input_global_data =
@@ -287,15 +287,15 @@ class ReduceTest : public ClientLibraryTestBase {
XlaComputation reduction_function = reduction_function_generator(&builder);
const Shape input_shape = ShapeUtil::MakeShape(
xla::primitive_util::NativeToPrimitiveType<NativeT>(), {rows, cols});
- auto input = builder.Parameter(0, input_shape, "input");
- auto zero = builder.ConstantR0<NativeT>(initial_value);
- builder.Reduce(input, zero, reduction_function,
- /*dimensions_to_reduce=*/{0});
+ auto input = Parameter(&builder, 0, input_shape, "input");
+ auto zero = ConstantR0<NativeT>(&builder, initial_value);
+ Reduce(input, zero, reduction_function,
+ /*dimensions_to_reduce=*/{0});
Array2D<NativeT> input_data(rows, cols);
input_data.FillUnique(initial_value);
std::unique_ptr<Literal> input_literal =
- Literal::CreateR2FromArray2D(input_data);
+ LiteralUtil::CreateR2FromArray2D(input_data);
input_literal =
input_literal->Relayout(LayoutUtil::MakeLayout({minor, major}));
std::unique_ptr<GlobalData> input_global_data =
@@ -442,15 +442,15 @@ XLA_TEST_F(ReduceTest, ReduceElementwiseR2_111x50_To_R1) {
XlaBuilder builder(TestName());
XlaComputation add_f32 = CreateScalarAddComputation(F32, &builder);
const Shape input_shape = ShapeUtil::MakeShape(F32, {rows, cols});
- auto input = builder.Parameter(0, input_shape, "input");
- auto zero = builder.ConstantR0<float>(0.0);
- auto log_ = builder.Log(input);
- builder.Reduce(log_, zero, add_f32, /*dimensions_to_reduce=*/{0});
+ auto input = Parameter(&builder, 0, input_shape, "input");
+ auto zero = ConstantR0<float>(&builder, 0.0);
+ auto log_ = Log(input);
+ Reduce(log_, zero, add_f32, /*dimensions_to_reduce=*/{0});
Array2D<float> input_data(rows, cols);
input_data.FillRandom(3.14f, 0.04);
std::unique_ptr<Literal> input_literal =
- Literal::CreateR2FromArray2D(input_data);
+ LiteralUtil::CreateR2FromArray2D(input_data);
input_literal = input_literal->Relayout(LayoutUtil::MakeLayout({0, 1}));
std::unique_ptr<GlobalData> input_global_data =
client_->TransferToServer(*input_literal).ConsumeValueOrDie();
@@ -473,16 +473,16 @@ XLA_TEST_F(ReduceTest, TransposeAndReduceElementwiseR2_111x50_To_R1) {
XlaBuilder builder(TestName());
XlaComputation add_f32 = CreateScalarAddComputation(F32, &builder);
const Shape input_shape = ShapeUtil::MakeShape(F32, {rows, cols});
- auto input = builder.Parameter(0, input_shape, "input");
- auto zero = builder.ConstantR0<float>(0.0);
- auto log_ = builder.Log(input);
- auto transpose = builder.Transpose(log_, {1, 0});
- builder.Reduce(transpose, zero, add_f32, /*dimensions_to_reduce=*/{1});
+ auto input = Parameter(&builder, 0, input_shape, "input");
+ auto zero = ConstantR0<float>(&builder, 0.0);
+ auto log_ = Log(input);
+ auto transpose = Transpose(log_, {1, 0});
+ Reduce(transpose, zero, add_f32, /*dimensions_to_reduce=*/{1});
Array2D<float> input_data(rows, cols);
input_data.FillRandom(3.14f, 0.04);
std::unique_ptr<Literal> input_literal =
- Literal::CreateR2FromArray2D(input_data);
+ LiteralUtil::CreateR2FromArray2D(input_data);
input_literal = input_literal->Relayout(LayoutUtil::MakeLayout({0, 1}));
std::unique_ptr<GlobalData> input_global_data =
client_->TransferToServer(*input_literal).ConsumeValueOrDie();
@@ -505,10 +505,10 @@ XLA_TEST_F(ReduceTest, TransposeAndReduceR3_12x111x50_To_R2) {
XlaBuilder builder(TestName());
XlaComputation add_f32 = CreateScalarAddComputation(F32, &builder);
const Shape input_shape = ShapeUtil::MakeShape(F32, {12, 111, 50});
- XlaOp input = builder.Parameter(0, input_shape, "input");
- XlaOp zero = builder.ConstantR0<float>(0.0);
- XlaOp transpose = builder.Transpose(input, /*permutation=*/{1, 0, 2});
- builder.Reduce(transpose, zero, add_f32, /*dimensions_to_reduce=*/{0});
+ XlaOp input = Parameter(&builder, 0, input_shape, "input");
+ XlaOp zero = ConstantR0<float>(&builder, 0.0);
+ XlaOp transpose = Transpose(input, /*permutation=*/{1, 0, 2});
+ Reduce(transpose, zero, add_f32, /*dimensions_to_reduce=*/{0});
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<Literal> input_data,
MakeFakeLiteral(input_shape));
@@ -522,16 +522,16 @@ XLA_TEST_F(ReduceTest, Reshape_111x2x25Reduce_111x50_To_R1) {
XlaBuilder builder(TestName());
XlaComputation add_f32 = CreateScalarAddComputation(F32, &builder);
const Shape input_shape = ShapeUtil::MakeShape(F32, {rows, 2, cols / 2});
- auto input = builder.Parameter(0, input_shape, "input");
- auto zero = builder.ConstantR0<float>(0.0);
- auto log_ = builder.Tanh(input);
- auto reshape = builder.Reshape(log_, {rows, cols});
- builder.Reduce(reshape, zero, add_f32, /*dimensions_to_reduce=*/{0});
+ auto input = Parameter(&builder, 0, input_shape, "input");
+ auto zero = ConstantR0<float>(&builder, 0.0);
+ auto log_ = Tanh(input);
+ auto reshape = Reshape(log_, {rows, cols});
+ Reduce(reshape, zero, add_f32, /*dimensions_to_reduce=*/{0});
Array3D<float> input_data(rows, 2, cols / 2);
input_data.FillRandom(3.14f, 0.04);
std::unique_ptr<Literal> input_literal =
- Literal::CreateR3FromArray3D(input_data);
+ LiteralUtil::CreateR3FromArray3D(input_data);
std::unique_ptr<GlobalData> input_global_data =
client_->TransferToServer(*input_literal).ConsumeValueOrDie();
@@ -568,9 +568,9 @@ void PrintTo(const BoundsLayout& spec, std::ostream* os) {
XLA_TEST_F(ReduceTest, AddReduce2DScalarToR0) {
XlaBuilder builder(TestName());
auto add = CreateScalarAddComputation(F32, &builder);
- auto scalar = builder.ConstantR0<float>(42.0);
- auto broadcasted = builder.Broadcast(scalar, {500, 500});
- builder.Reduce(broadcasted, builder.ConstantR0<float>(0.0f), add, {0, 1});
+ auto scalar = ConstantR0<float>(&builder, 42.0);
+ auto broadcasted = Broadcast(scalar, {500, 500});
+ Reduce(broadcasted, ConstantR0<float>(&builder, 0.0f), add, {0, 1});
float expected = 42.0f * static_cast<float>(500 * 500);
ComputeAndCompareR0<float>(&builder, expected, {}, ErrorSpec(0.0001));
@@ -580,9 +580,9 @@ XLA_TEST_F(ReduceTest, AddReduce2DScalarToR0) {
XLA_TEST_F(ReduceTest, MaxReduce2DScalarToR0) {
XlaBuilder builder(TestName());
auto max = CreateScalarMaxComputation(F32, &builder);
- auto scalar = builder.ConstantR0<float>(42.0);
- auto broadcasted = builder.Broadcast(scalar, {500, 500});
- builder.Reduce(broadcasted, builder.ConstantR0<float>(0.0f), max, {0, 1});
+ auto scalar = ConstantR0<float>(&builder, 42.0);
+ auto broadcasted = Broadcast(scalar, {500, 500});
+ Reduce(broadcasted, ConstantR0<float>(&builder, 0.0f), max, {0, 1});
float expected = 42.0f;
ComputeAndCompareR0<float>(&builder, expected, {}, ErrorSpec(0.0001));
@@ -594,9 +594,9 @@ XLA_TEST_F(ReduceTest, MaxReduce2DToR0) {
auto max = CreateScalarMaxComputation(F32, &builder);
Array2D<float> input(300, 250);
input.FillRandom(214.0f);
- auto input_literal = Literal::CreateR2FromArray2D(input);
- builder.Reduce(builder.ConstantLiteral(*input_literal),
- builder.ConstantR0<float>(FLT_MIN), max, {0, 1});
+ auto input_literal = LiteralUtil::CreateR2FromArray2D(input);
+ Reduce(ConstantLiteral(&builder, *input_literal),
+ ConstantR0<float>(&builder, FLT_MIN), max, {0, 1});
auto input_max = FLT_MIN;
input.Each(
[&](int64, int64, float* v) { input_max = std::max(input_max, *v); });
@@ -609,9 +609,9 @@ XLA_TEST_F(ReduceTest, MinReduce2DToR0) {
auto min = CreateScalarMinComputation(F32, &builder);
Array2D<float> input(150, 130);
input.FillRandom(214.0f);
- auto input_literal = Literal::CreateR2FromArray2D(input);
- builder.Reduce(builder.ConstantLiteral(*input_literal),
- builder.ConstantR0<float>(FLT_MAX), min, {0, 1});
+ auto input_literal = LiteralUtil::CreateR2FromArray2D(input);
+ Reduce(ConstantLiteral(&builder, *input_literal),
+ ConstantR0<float>(&builder, FLT_MAX), min, {0, 1});
auto input_min = FLT_MAX;
input.Each(
@@ -623,12 +623,11 @@ XLA_TEST_F(ReduceTest, UnsignedInt_MinReduce) {
XlaBuilder builder(TestName());
Array2D<uint32> input({{1}, {2}});
auto min = CreateScalarMinComputation(U32, &builder);
- auto input_literal = Literal::CreateR2FromArray2D(input);
+ auto input_literal = LiteralUtil::CreateR2FromArray2D(input);
auto initial_value =
- builder.ConstantR0<uint32>(std::numeric_limits<uint32>::max());
+ ConstantR0<uint32>(&builder, std::numeric_limits<uint32>::max());
- builder.Reduce(builder.ConstantLiteral(*input_literal), initial_value, min,
- {0, 1});
+ Reduce(ConstantLiteral(&builder, *input_literal), initial_value, min, {0, 1});
ComputeAndCompareR0<uint32>(&builder, 1, {});
}
@@ -636,21 +635,20 @@ XLA_TEST_F(ReduceTest, UnsignedInt_MaxReduce) {
XlaBuilder builder(TestName());
Array2D<uint32> input({{1}, {2}});
auto max = CreateScalarMaxComputation(U32, &builder);
- auto input_literal = Literal::CreateR2FromArray2D(input);
+ auto input_literal = LiteralUtil::CreateR2FromArray2D(input);
auto initial_value =
- builder.ConstantR0<uint32>(std::numeric_limits<uint32>::min());
+ ConstantR0<uint32>(&builder, std::numeric_limits<uint32>::min());
- builder.Reduce(builder.ConstantLiteral(*input_literal), initial_value, max,
- {0, 1});
+ Reduce(ConstantLiteral(&builder, *input_literal), initial_value, max, {0, 1});
ComputeAndCompareR0<uint32>(&builder, 2, {});
}
// Reduces a matrix among dimension 1.
XLA_TEST_F(ReduceTest, Reduce2DAmong1) {
XlaBuilder builder(TestName());
- auto m = builder.ConstantLiteral(*literal_2d_);
+ auto m = ConstantLiteral(&builder, *literal_2d_);
auto add = CreateScalarAddComputation(F32, &builder);
- builder.Reduce(m, builder.ConstantR0<float>(0.0f), add, {1});
+ Reduce(m, ConstantR0<float>(&builder, 0.0f), add, {1});
std::vector<float> expected = {6.f, 15.f};
ComputeAndCompareR1<float>(&builder, expected, {}, ErrorSpec(0.0001));
@@ -659,9 +657,9 @@ XLA_TEST_F(ReduceTest, Reduce2DAmong1) {
XLA_TEST_F(ReduceTest, Reduce2DAmong0and1) {
// Reduce a matrix among dimensions 0 and 1 (sum it up to a scalar).
XlaBuilder builder(TestName());
- auto m = builder.ConstantLiteral(*literal_2d_);
+ auto m = ConstantLiteral(&builder, *literal_2d_);
auto add = CreateScalarAddComputation(F32, &builder);
- builder.Reduce(m, builder.ConstantR0<float>(0.0f), add, {0, 1});
+ Reduce(m, ConstantR0<float>(&builder, 0.0f), add, {0, 1});
ComputeAndCompareR0<float>(&builder, 21.0f, {}, ErrorSpec(0.0001, 1e-4));
}
@@ -669,9 +667,9 @@ XLA_TEST_F(ReduceTest, Reduce2DAmong0and1) {
// Tests 2D matrix ReduceToRow operation.
XLA_TEST_F(ReduceTest, Reduce2DAmongY) {
XlaBuilder builder("reduce_among_y");
- auto m = builder.ConstantLiteral(*literal_2d_);
+ auto m = ConstantLiteral(&builder, *literal_2d_);
auto add = CreateScalarAddComputation(F32, &builder);
- builder.Reduce(m, builder.ConstantR0<float>(0.0f), add, {0});
+ Reduce(m, ConstantR0<float>(&builder, 0.0f), add, {0});
std::vector<float> expected = {5.f, 7.f, 9.f};
ComputeAndCompareR1<float>(&builder, expected, {}, ErrorSpec(0.0001));
@@ -679,9 +677,9 @@ XLA_TEST_F(ReduceTest, Reduce2DAmongY) {
XLA_TEST_F(ReduceTest, ReduceR3AmongDims_1_2) {
XlaBuilder builder(TestName());
- auto m = builder.ConstantLiteral(*literal_3d_);
+ auto m = ConstantLiteral(&builder, *literal_3d_);
auto add = CreateScalarAddComputation(F32, &builder);
- builder.Reduce(m, builder.ConstantR0<float>(0.0f), add, {1, 2});
+ Reduce(m, ConstantR0<float>(&builder, 0.0f), add, {1, 2});
std::vector<float> expected = {21.f, 21.f, 21.f, 21.f};
ComputeAndCompareR1<float>(&builder, expected, {}, ErrorSpec(0.0001));
@@ -689,9 +687,9 @@ XLA_TEST_F(ReduceTest, ReduceR3AmongDims_1_2) {
XLA_TEST_F(ReduceTest, ReduceR3AmongDims_0_1) {
XlaBuilder builder(TestName());
- auto m = builder.ConstantLiteral(*literal_3d_);
+ auto m = ConstantLiteral(&builder, *literal_3d_);
auto add = CreateScalarAddComputation(F32, &builder);
- builder.Reduce(m, builder.ConstantR0<float>(0.0f), add, {0, 1});
+ Reduce(m, ConstantR0<float>(&builder, 0.0f), add, {0, 1});
std::vector<float> expected = {20.f, 28.f, 36.f};
ComputeAndCompareR1<float>(&builder, expected, {}, ErrorSpec(0.0001));
@@ -699,9 +697,9 @@ XLA_TEST_F(ReduceTest, ReduceR3AmongDims_0_1) {
XLA_TEST_F(ReduceTest, ReduceR3ToR0) {
XlaBuilder builder(TestName());
- auto m = builder.ConstantLiteral(*literal_3d_);
+ auto m = ConstantLiteral(&builder, *literal_3d_);
auto add = CreateScalarAddComputation(F32, &builder);
- builder.Reduce(m, builder.ConstantR0<float>(0.0f), add, {0, 1, 2});
+ Reduce(m, ConstantR0<float>(&builder, 0.0f), add, {0, 1, 2});
float expected = 21.0f * 4.0;
ComputeAndCompareR0<float>(&builder, expected, {}, ErrorSpec(0.0001));
@@ -709,9 +707,9 @@ XLA_TEST_F(ReduceTest, ReduceR3ToR0) {
XLA_TEST_F(ReduceTest, ReduceR3AmongDim0) {
XlaBuilder builder(TestName());
- auto m = builder.ConstantLiteral(*literal_3d_);
+ auto m = ConstantLiteral(&builder, *literal_3d_);
auto add = CreateScalarAddComputation(F32, &builder);
- builder.Reduce(m, builder.ConstantR0<float>(0.0f), add, {0});
+ Reduce(m, ConstantR0<float>(&builder, 0.0f), add, {0});
// clang-format off
Array2D<float> expected({
@@ -724,9 +722,9 @@ XLA_TEST_F(ReduceTest, ReduceR3AmongDim0) {
XLA_TEST_F(ReduceTest, ReduceR3AmongDim1) {
XlaBuilder builder(TestName());
- auto m = builder.ConstantLiteral(*literal_3d_);
+ auto m = ConstantLiteral(&builder, *literal_3d_);
auto add = CreateScalarAddComputation(F32, &builder);
- builder.Reduce(m, builder.ConstantR0<float>(0.0f), add, {1});
+ Reduce(m, ConstantR0<float>(&builder, 0.0f), add, {1});
// clang-format off
Array2D<float> expected({
@@ -741,9 +739,9 @@ XLA_TEST_F(ReduceTest, ReduceR3AmongDim1) {
XLA_TEST_F(ReduceTest, ReduceR3AmongDim2) {
XlaBuilder builder(TestName());
- auto m = builder.ConstantLiteral(*literal_3d_);
+ auto m = ConstantLiteral(&builder, *literal_3d_);
auto add = CreateScalarAddComputation(F32, &builder);
- builder.Reduce(m, builder.ConstantR0<float>(0.0f), add, {2});
+ Reduce(m, ConstantR0<float>(&builder, 0.0f), add, {2});
// clang-format off
Array2D<float> expected({
@@ -820,17 +818,17 @@ XLA_TEST_P(ReduceR3ToR2Test, ReduceR3ToR2) {
// input_array.FillRandom(3.14f, 0.05);
input_array.Fill(1.0f);
- auto input_literal = Literal::CreateR3FromArray3D(input_array);
+ auto input_literal = LiteralUtil::CreateR3FromArray3D(input_array);
input_literal =
input_literal->Relayout(LayoutUtil::MakeLayout(GetParam().layout));
std::unique_ptr<GlobalData> input_data =
client_->TransferToServer(*input_literal).ConsumeValueOrDie();
auto input_activations =
- builder.Parameter(0, input_literal->shape(), "input");
+ Parameter(&builder, 0, input_literal->shape(), "input");
XlaComputation add = CreateScalarAddComputation(F32, &builder);
- auto sum = builder.Reduce(input_activations, builder.ConstantR0<float>(0.0f),
- add, GetParam().reduce_dims);
+ Reduce(input_activations, ConstantR0<float>(&builder, 0.0f), add,
+ GetParam().reduce_dims);
auto expected =
ReferenceUtil::Reduce3DTo2D(input_array, 0.0f, GetParam().reduce_dims,
@@ -871,14 +869,15 @@ XLA_TEST_F(ReduceTest, DISABLED_ON_GPU(OperationOnConstantAsInitValue)) {
XlaBuilder builder(TestName());
XlaComputation max_f32 = CreateScalarMaxComputation(F32, &builder);
- auto a = builder.ConstantR0<float>(2.0f);
- auto a2 = builder.Abs(a);
+ auto a = ConstantR0<float>(&builder, 2.0f);
+ auto a2 = Abs(a);
- std::unique_ptr<Literal> b_literal = Literal::CreateR1<float>({1.0f, 4.0f});
+ std::unique_ptr<Literal> b_literal =
+ LiteralUtil::CreateR1<float>({1.0f, 4.0f});
std::unique_ptr<GlobalData> b_data =
client_->TransferToServer(*b_literal).ConsumeValueOrDie();
- auto b = builder.Parameter(0, b_literal->shape(), "b");
- auto max = builder.Reduce(b, a2, max_f32, {0});
+ auto b = Parameter(&builder, 0, b_literal->shape(), "b");
+ Reduce(b, a2, max_f32, {0});
ComputeAndCompareR0<float>(&builder, 4.0f, {b_data.get()});
}
@@ -900,13 +899,13 @@ class ReduceInitializerTest : public ReduceTest {
XlaComputation max_fn = CreateScalarMaxComputation(
primitive_util::NativeToPrimitiveType<T>(), &builder);
- auto init = builder.ConstantR0<T>(initializer);
+ auto init = ConstantR0<T>(&builder, initializer);
std::vector<T> input_arr(num_elems, std::numeric_limits<T>::lowest());
- auto input_literal = Literal::CreateR1<T>(input_arr);
+ auto input_literal = LiteralUtil::CreateR1<T>(input_arr);
auto input_data =
client_->TransferToServer(*input_literal).ConsumeValueOrDie();
- builder.Reduce(builder.Parameter(0, input_literal->shape(), "input"), init,
- max_fn, {0});
+ Reduce(Parameter(&builder, 0, input_literal->shape(), "input"), init,
+ max_fn, {0});
ComputeAndCompareR0<T>(&builder, initializer, {input_data.get()});
}
@@ -939,23 +938,24 @@ XLA_TEST_F(ReduceInitializerTest, U64InitializerBigValue) {
XLA_TEST_F(ReduceTest, ReduceIdentity) {
XlaBuilder builder(TestName());
Shape single_float = ShapeUtil::MakeShape(F32, {});
- builder.Parameter(0, single_float, "lhs-unused");
- builder.Parameter(1, single_float, "rhs-used");
+ Parameter(&builder, 0, single_float, "lhs-unused");
+ Parameter(&builder, 1, single_float, "rhs-used");
auto computation_status = builder.Build();
TF_ASSERT_OK(computation_status.status());
Shape operand_shape = ShapeUtil::MakeShape(F32, {1});
- builder.Reduce(builder.Parameter(0, operand_shape, "operand"),
- builder.Parameter(1, single_float, "init"),
- computation_status.ValueOrDie(), {0});
+ Reduce(Parameter(&builder, 0, operand_shape, "operand"),
+ Parameter(&builder, 1, single_float, "init"),
+ computation_status.ValueOrDie(), {0});
float operand[] = {42.0f};
float init = 58.5f;
float expected = 42.0f;
- std::unique_ptr<Literal> input_literal = Literal::CreateR1<float>(operand);
+ std::unique_ptr<Literal> input_literal =
+ LiteralUtil::CreateR1<float>(operand);
std::unique_ptr<GlobalData> input_global_data =
client_->TransferToServer(*input_literal).ConsumeValueOrDie();
- std::unique_ptr<Literal> input_literal2 = Literal::CreateR0<float>(init);
+ std::unique_ptr<Literal> input_literal2 = LiteralUtil::CreateR0<float>(init);
std::unique_ptr<GlobalData> input_global_data2 =
client_->TransferToServer(*input_literal2).ConsumeValueOrDie();
ComputeAndCompareR0<float>(
diff --git a/tensorflow/compiler/xla/tests/reduce_window_test.cc b/tensorflow/compiler/xla/tests/reduce_window_test.cc
index 266760e820..c2681f70f7 100644
--- a/tensorflow/compiler/xla/tests/reduce_window_test.cc
+++ b/tensorflow/compiler/xla/tests/reduce_window_test.cc
@@ -70,31 +70,33 @@ class ReduceWindowTest : public ::testing::WithParamInterface<bool>,
tensorflow::gtl::ArraySlice<int64> window_dimensions,
tensorflow::gtl::ArraySlice<int64> window_strides,
Padding padding) {
- auto init =
- CreateConstantFromLiteral(*Literal::CreateR0<float>(0.0f), &builder_);
- builder_.ReduceWindow(input, init,
- CreateScalarAddComputation(FloatType(), &builder_),
- window_dimensions, window_strides, padding);
+ auto init = CreateConstantFromLiteral(*LiteralUtil::CreateR0<float>(0.0f),
+ &builder_);
+ ReduceWindow(input, init,
+ CreateScalarAddComputation(FloatType(), &builder_),
+ window_dimensions, window_strides, padding);
}
void ReduceWindowMax(const XlaOp& input,
tensorflow::gtl::ArraySlice<int64> window_dimensions,
tensorflow::gtl::ArraySlice<int64> window_strides,
Padding padding) {
- auto init = CreateConstantFromLiteral(Literal::MinValue(F32), &builder_);
- builder_.ReduceWindow(input, init,
- CreateScalarMaxComputation(FloatType(), &builder_),
- window_dimensions, window_strides, padding);
+ auto init =
+ CreateConstantFromLiteral(LiteralUtil::MinValue(F32), &builder_);
+ ReduceWindow(input, init,
+ CreateScalarMaxComputation(FloatType(), &builder_),
+ window_dimensions, window_strides, padding);
}
void ReduceWindowMin(const XlaOp& input,
tensorflow::gtl::ArraySlice<int64> window_dimensions,
tensorflow::gtl::ArraySlice<int64> window_strides,
Padding padding) {
- auto init = CreateConstantFromLiteral(Literal::MaxValue(F32), &builder_);
- builder_.ReduceWindow(input, init,
- CreateScalarMinComputation(FloatType(), &builder_),
- window_dimensions, window_strides, padding);
+ auto init =
+ CreateConstantFromLiteral(LiteralUtil::MaxValue(F32), &builder_);
+ ReduceWindow(input, init,
+ CreateScalarMinComputation(FloatType(), &builder_),
+ window_dimensions, window_strides, padding);
}
XlaBuilder builder_;
@@ -102,14 +104,14 @@ class ReduceWindowTest : public ::testing::WithParamInterface<bool>,
TEST_P(ReduceWindowTest, MismatchedRanksGivesErrorStatus) {
const auto input = CreateConstantFromLiteral(
- *Literal::CreateR1<float>({1, 1, 1, 1}), &builder_);
+ *LiteralUtil::CreateR1<float>({1, 1, 1, 1}), &builder_);
const auto init_value =
- CreateConstantFromLiteral(*Literal::CreateR0<float>(0), &builder_);
+ CreateConstantFromLiteral(*LiteralUtil::CreateR0<float>(0), &builder_);
TF_ASSERT_OK(builder_.first_error());
- builder_.ReduceWindow(input, init_value,
- CreateScalarAddComputation(FloatType(), &builder_),
- /*window_dimensions=*/{1, 2},
- /*window_strides=*/{1}, Padding::kValid);
+ ReduceWindow(input, init_value,
+ CreateScalarAddComputation(FloatType(), &builder_),
+ /*window_dimensions=*/{1, 2},
+ /*window_strides=*/{1}, Padding::kValid);
ASSERT_EQ(builder_.first_error().code(), tensorflow::error::INVALID_ARGUMENT)
<< builder_.first_error();
ASSERT_THAT(builder_.first_error().error_message(),
@@ -119,33 +121,32 @@ TEST_P(ReduceWindowTest, MismatchedRanksGivesErrorStatus) {
// Regression test for b/68964348.
TEST_P(ReduceWindowTest, R0ReduceWindow) {
const auto input =
- CreateConstantFromLiteral(*Literal::CreateR0<float>(42.0), &builder_);
+ CreateConstantFromLiteral(*LiteralUtil::CreateR0<float>(42.0), &builder_);
const auto init =
- CreateConstantFromLiteral(*Literal::CreateR0<float>(1.0), &builder_);
- builder_.ReduceWindow(input, init,
- CreateScalarAddComputation(FloatType(), &builder_),
- /*window_dimensions=*/{},
- /*window_strides=*/{}, Padding::kSame);
- ComputeAndCompareLiteral(&builder_, *Literal::CreateR0<float>(43.0), {},
+ CreateConstantFromLiteral(*LiteralUtil::CreateR0<float>(1.0), &builder_);
+ ReduceWindow(input, init, CreateScalarAddComputation(FloatType(), &builder_),
+ /*window_dimensions=*/{},
+ /*window_strides=*/{}, Padding::kSame);
+ ComputeAndCompareLiteral(&builder_, *LiteralUtil::CreateR0<float>(43.0), {},
ErrorSpec(0.00001));
}
TEST_P(ReduceWindowTest, Min3In5Stride2) {
const auto input = CreateConstantFromLiteral(
- *Literal::CreateR1<float>({10000, 1000, 100, 10, 1}), &builder_);
+ *LiteralUtil::CreateR1<float>({10000, 1000, 100, 10, 1}), &builder_);
ReduceWindowMin(input, {3}, {2}, Padding::kValid);
- ComputeAndCompareLiteral(&builder_, *Literal::CreateR1<float>({100, 1}), {},
- ErrorSpec(0.00001));
+ ComputeAndCompareLiteral(&builder_, *LiteralUtil::CreateR1<float>({100, 1}),
+ {}, ErrorSpec(0.00001));
}
TEST_P(ReduceWindowTest, Min3In5Stride1WithSamePadding) {
const auto input = CreateConstantFromLiteral(
- *Literal::CreateR1<float>({10000, 1000, 100, 10, 1}), &builder_);
+ *LiteralUtil::CreateR1<float>({10000, 1000, 100, 10, 1}), &builder_);
ReduceWindowMin(input, /*window_dimensions=*/{3}, /*window_strides=*/{1},
Padding::kSame);
ComputeAndCompareLiteral(&builder_,
- *Literal::CreateR1<float>({1000, 100, 10, 1, 1}), {},
- ErrorSpec(0.00001));
+ *LiteralUtil::CreateR1<float>({1000, 100, 10, 1, 1}),
+ {}, ErrorSpec(0.00001));
}
XLA_TEST_P(ReduceWindowTest, ZeroElementSmall) {
@@ -157,7 +158,7 @@ XLA_TEST_P(ReduceWindowTest, ZeroElementSmall) {
auto res = ReferenceUtil::ReduceWindow4DAdd(input_array, 0.0f, {1, 1, 2, 1},
{1, 1, 1, 1}, padding);
- ComputeAndCompareLiteral(&builder_, *Literal::CreateFromArray(*res), {},
+ ComputeAndCompareLiteral(&builder_, *LiteralUtil::CreateFromArray(*res), {},
DefaultErrorSpec());
}
@@ -172,7 +173,7 @@ TEST_P(ReduceWindowTest, NonSquareSmall) {
auto res = ReferenceUtil::ReduceWindow4DAdd(input_array, 0.0f, {1, 1, 2, 1},
{1, 1, 1, 1}, padding);
- ComputeAndCompareLiteral(&builder_, *Literal::CreateFromArray(*res), {},
+ ComputeAndCompareLiteral(&builder_, *LiteralUtil::CreateFromArray(*res), {},
DefaultErrorSpec());
}
@@ -186,7 +187,7 @@ TEST_P(ReduceWindowTest, MiddleDimsSmall) {
auto res = ReferenceUtil::ReduceWindow4DAdd(input_array, 0.0f, {1, 1, 1, 1},
{1, 2, 2, 1}, padding);
- ComputeAndCompareLiteral(&builder_, *Literal::CreateFromArray(*res), {},
+ ComputeAndCompareLiteral(&builder_, *LiteralUtil::CreateFromArray(*res), {},
DefaultErrorSpec());
}
@@ -203,7 +204,7 @@ TEST_P(ReduceWindowTest, Along2ndMinorDim) {
auto res = ReferenceUtil::ReduceWindow4DAdd(
input_array, 0.0f, {1, 1, lrn_diameter, 1}, {1, 1, 1, 1}, padding);
- ComputeAndCompareLiteral(&builder_, *Literal::CreateFromArray(*res), {},
+ ComputeAndCompareLiteral(&builder_, *LiteralUtil::CreateFromArray(*res), {},
DefaultErrorSpec());
}
@@ -225,8 +226,8 @@ TEST_P(ReduceWindowTest, AmongMajor2Dims) {
input_array, 0.0f, {win_len, win_len, 1, 1},
{win_stride, win_stride, 1, 1}, padding);
- ComputeAndCompareLiteral(&builder_, *Literal::CreateFromArray(*result), {},
- DefaultErrorSpec());
+ ComputeAndCompareLiteral(&builder_, *LiteralUtil::CreateFromArray(*result),
+ {}, DefaultErrorSpec());
}
TEST_P(ReduceWindowTest, AmongMajor2DimsMediumSize) {
@@ -248,8 +249,8 @@ TEST_P(ReduceWindowTest, AmongMajor2DimsMediumSize) {
input_array, 0.0f, {win_len, win_len, 1, 1},
{win_stride, win_stride, 1, 1}, padding);
- ComputeAndCompareLiteral(&builder_, *Literal::CreateFromArray(*result), {},
- DefaultErrorSpec());
+ ComputeAndCompareLiteral(&builder_, *LiteralUtil::CreateFromArray(*result),
+ {}, DefaultErrorSpec());
}
// Tests the super windowing logic w.r.t handling prime number of windows in a
@@ -273,8 +274,8 @@ TEST_P(ReduceWindowTest, PrimeWindowsInReductionDimension) {
input_array, 0.0f, {win_len, win_len, 1, 1},
{win_stride, win_stride, 1, 1}, padding);
- ComputeAndCompareLiteral(&builder_, *Literal::CreateFromArray(*result), {},
- DefaultErrorSpec());
+ ComputeAndCompareLiteral(&builder_, *LiteralUtil::CreateFromArray(*result),
+ {}, DefaultErrorSpec());
}
TEST_P(ReduceWindowTest, ReduceAlongLaneDimension) {
@@ -290,8 +291,8 @@ TEST_P(ReduceWindowTest, ReduceAlongLaneDimension) {
auto result = ReferenceUtil::ReduceWindow4DAdd(
input_array, 0.0f, {1, 1, 1, 11}, {1, 1, 1, 1}, padding);
- ComputeAndCompareLiteral(&builder_, *Literal::CreateFromArray(*result), {},
- DefaultErrorSpec());
+ ComputeAndCompareLiteral(&builder_, *LiteralUtil::CreateFromArray(*result),
+ {}, DefaultErrorSpec());
}
// Tests a reduction function that is not a simple add/min/max/etc.
@@ -306,15 +307,15 @@ XLA_TEST_P(ReduceWindowTest, NonstandardReduceFunction) {
Padding padding = Padding::kValid;
const Shape scalar = ShapeUtil::MakeShape(FloatType(), {});
auto b = builder_.CreateSubBuilder("unusual");
- auto lhs = b->Parameter(0, scalar, "lhs");
- auto rhs = b->Parameter(1, scalar, "rhs");
- b->Min(b->Add(lhs, rhs),
- CreateConstantFromLiteral(*Literal::CreateR0<float>(8.0f), b.get()));
+ auto lhs = Parameter(b.get(), 0, scalar, "lhs");
+ auto rhs = Parameter(b.get(), 1, scalar, "rhs");
+ Min(Add(lhs, rhs),
+ CreateConstantFromLiteral(*LiteralUtil::CreateR0<float>(8.0f), b.get()));
XlaComputation reduce_fn = b->BuildAndNoteError();
- builder_.ReduceWindow(
+ ReduceWindow(
input,
- CreateConstantFromLiteral(*Literal::CreateR0<float>(0.0f), &builder_),
+ CreateConstantFromLiteral(*LiteralUtil::CreateR0<float>(0.0f), &builder_),
reduce_fn,
/*window_dimensions=*/{1, 1, 2, 1},
/*window_strides=*/{1, 1, 1, 1}, padding);
@@ -328,15 +329,15 @@ XLA_TEST_P(ReduceWindowTest, NonstandardReduceFunction) {
/*window=*/{1, 1, 2, 1},
/*stride=*/{1, 1, 1, 1}, padding);
- ComputeAndCompareLiteral(&builder_, *Literal::CreateFromArray(*expected), {},
- DefaultErrorSpec());
+ ComputeAndCompareLiteral(&builder_, *LiteralUtil::CreateFromArray(*expected),
+ {}, DefaultErrorSpec());
}
TEST_P(ReduceWindowTest, R4UnitWindow) {
Array4D<float> input_array(13, 12, 8, 15);
input_array.FillRandom(2.f, 2.f);
std::unique_ptr<Literal> input_literal =
- Literal::CreateR4FromArray4DWithLayout(
+ LiteralUtil::CreateR4FromArray4DWithLayout(
input_array, LayoutUtil::MakeLayout({0, 3, 2, 1}));
XlaOp input;
auto input_data = CreateParameterAndTransferLiteral(
@@ -348,7 +349,7 @@ TEST_P(ReduceWindowTest, R4UnitWindow) {
auto res = ReferenceUtil::ReduceWindow4DAdd(input_array, 0.0f, {1, 1, 7, 1},
{1, 4, 1, 1}, padding);
- ComputeAndCompareLiteral(&builder_, *Literal::CreateFromArray(*res),
+ ComputeAndCompareLiteral(&builder_, *LiteralUtil::CreateFromArray(*res),
{input_data.get()}, DefaultErrorSpec());
}
@@ -377,7 +378,7 @@ XLA_TEST_P(ReduceWindowTest, R6Add) {
auto shape = ShapeUtil::MakeShape(F32, input_dims);
std::unique_ptr<Literal> arg_literal =
- Literal::CreateFullWithDescendingLayout<float>(input_dims, 1.0f);
+ LiteralUtil::CreateFullWithDescendingLayout<float>(input_dims, 1.0f);
const auto input = CreateConstantFromLiteral(*arg_literal, &builder_);
@@ -386,7 +387,7 @@ XLA_TEST_P(ReduceWindowTest, R6Add) {
std::vector<int64> output_dims = {8, 8, 6, 6, 8, 8};
std::unique_ptr<Literal> expected =
- Literal::CreateFullWithDescendingLayout<float>(output_dims, 9.0f);
+ LiteralUtil::CreateFullWithDescendingLayout<float>(output_dims, 9.0f);
ComputeAndCompareLiteral(&builder_, *expected, {}, DefaultErrorSpec());
}
@@ -395,7 +396,7 @@ XLA_TEST_P(ReduceWindowTest, R4SecondMinorStride) {
Array4D<float> input_array(2, 1, 27, 119);
input_array.FillRandom(2.0f);
std::unique_ptr<Literal> input_literal =
- Literal::CreateR4FromArray4DWithLayout(
+ LiteralUtil::CreateR4FromArray4DWithLayout(
input_array, LayoutUtil::MakeLayout({3, 2, 1, 0}));
XlaOp input;
auto input_data = CreateParameterAndTransferLiteral(
@@ -409,7 +410,7 @@ XLA_TEST_P(ReduceWindowTest, R4SecondMinorStride) {
auto res = ReferenceUtil::ReduceWindow4DAdd(
input_array, 0.0f, {1, 1, win_len, 1}, {1, 1, stride, 1}, padding);
- ComputeAndCompareLiteral(&builder_, *Literal::CreateFromArray(*res),
+ ComputeAndCompareLiteral(&builder_, *LiteralUtil::CreateFromArray(*res),
{input_data.get()}, DefaultErrorSpec());
}
@@ -417,7 +418,7 @@ XLA_TEST_P(ReduceWindowTest, R4SecondMinorUnitStride) {
Array4D<float> input_array(3, 2, 4, 64);
input_array.FillRandom(2.0f);
std::unique_ptr<Literal> input_literal =
- Literal::CreateR4FromArray4DWithLayout(
+ LiteralUtil::CreateR4FromArray4DWithLayout(
input_array, LayoutUtil::MakeLayout({3, 2, 1, 0}));
XlaOp input;
auto input_data = CreateParameterAndTransferLiteral(
@@ -431,7 +432,7 @@ XLA_TEST_P(ReduceWindowTest, R4SecondMinorUnitStride) {
auto res = ReferenceUtil::ReduceWindow4DAdd(
input_array, 0.0f, {1, 1, win_len, 1}, {1, 1, stride, 1}, padding);
- ComputeAndCompareLiteral(&builder_, *Literal::CreateFromArray(*res),
+ ComputeAndCompareLiteral(&builder_, *LiteralUtil::CreateFromArray(*res),
{input_data.get()}, DefaultErrorSpec());
}
@@ -439,7 +440,7 @@ XLA_TEST_P(ReduceWindowTest, R4SecondMinorWin) {
Array4D<float> input_array(1, 3, 12, 200);
input_array.FillRandom(2.0f);
std::unique_ptr<Literal> input_literal =
- Literal::CreateR4FromArray4DWithLayout(
+ LiteralUtil::CreateR4FromArray4DWithLayout(
input_array, LayoutUtil::MakeLayout({3, 2, 1, 0}));
XlaOp input;
auto input_data = CreateParameterAndTransferLiteral(
@@ -453,7 +454,7 @@ XLA_TEST_P(ReduceWindowTest, R4SecondMinorWin) {
auto res = ReferenceUtil::ReduceWindow4DAdd(
input_array, 0.0f, {1, 1, win_len, 1}, {1, 1, stride, 1}, padding);
- ComputeAndCompareLiteral(&builder_, *Literal::CreateFromArray(*res),
+ ComputeAndCompareLiteral(&builder_, *LiteralUtil::CreateFromArray(*res),
{input_data.get()}, DefaultErrorSpec());
}
@@ -474,18 +475,18 @@ TEST_P(ReduceWindowTest, AmongMajor2DimsMultipleMinor) {
auto result = ReferenceUtil::ReduceWindow4DAdd(
input_array, 0.0f, {win_len, win_len, 1, 1},
{win_stride, win_stride, 1, 1}, padding);
- ComputeAndCompareLiteral(&builder_, *Literal::CreateFromArray(*result), {},
- DefaultErrorSpec());
+ ComputeAndCompareLiteral(&builder_, *LiteralUtil::CreateFromArray(*result),
+ {}, DefaultErrorSpec());
}
XLA_TEST_P(ReduceWindowTest, Add24In1152_NoOverlap) {
std::vector<float> input_vector(128 * 9, 1);
const auto input = CreateConstantFromLiteral(
- *Literal::CreateR1<float>(input_vector), &builder_);
+ *LiteralUtil::CreateR1<float>(input_vector), &builder_);
ReduceWindowAdd(input, {32}, {128}, Padding::kValid);
ComputeAndCompareLiteral(
&builder_,
- *Literal::CreateR1<float>({32, 32, 32, 32, 32, 32, 32, 32, 32}), {},
+ *LiteralUtil::CreateR1<float>({32, 32, 32, 32, 32, 32, 32, 32, 32}), {},
DefaultErrorSpec());
}
@@ -500,9 +501,9 @@ XLA_TEST_P(ReduceWindowTest, Add128In128Stride128) {
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16};
const auto input = CreateConstantFromLiteral(
- *Literal::CreateR1<float>(input_vector), &builder_);
+ *LiteralUtil::CreateR1<float>(input_vector), &builder_);
ReduceWindowAdd(input, {128}, {128}, Padding::kValid);
- ComputeAndCompareLiteral(&builder_, *Literal::CreateR1<float>({1088}), {},
+ ComputeAndCompareLiteral(&builder_, *LiteralUtil::CreateR1<float>({1088}), {},
DefaultErrorSpec());
}
@@ -517,9 +518,9 @@ XLA_TEST_P(ReduceWindowTest, Add128In128) {
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16};
const auto input = CreateConstantFromLiteral(
- *Literal::CreateR1<float>(input_vector), &builder_);
+ *LiteralUtil::CreateR1<float>(input_vector), &builder_);
ReduceWindowAdd(input, {128}, {1}, Padding::kValid);
- ComputeAndCompareLiteral(&builder_, *Literal::CreateR1<float>({1088}), {},
+ ComputeAndCompareLiteral(&builder_, *LiteralUtil::CreateR1<float>({1088}), {},
DefaultErrorSpec());
}
@@ -536,14 +537,15 @@ TEST_P(ReduceWindowTest, R2ReduceWindowInceptionFromBroadcast) {
auto res = ReferenceUtil::ReduceWindow2DAdd(
input_array, 0.0f, {win_len, win_len}, {stride, stride}, padding);
- ComputeAndCompareLiteral(&builder_, *Literal::CreateFromArray<float>(*res),
- {}, DefaultErrorSpec());
+ ComputeAndCompareLiteral(&builder_,
+ *LiteralUtil::CreateFromArray<float>(*res), {},
+ DefaultErrorSpec());
}
TEST_P(ReduceWindowTest, R2ReduceWindowNonOverlappingFromBroadcast) {
Array2D<float> input_array(6, 4, 1.0f);
- XlaOp input = builder_.Broadcast(
- CreateConstantFromLiteral(Literal::One(F32), &builder_), {6, 4});
+ XlaOp input = Broadcast(
+ CreateConstantFromLiteral(LiteralUtil::One(F32), &builder_), {6, 4});
Padding padding = Padding::kSame;
ReduceWindowAdd(input, {4, 2}, {3, 3}, padding);
@@ -551,8 +553,9 @@ TEST_P(ReduceWindowTest, R2ReduceWindowNonOverlappingFromBroadcast) {
auto res = ReferenceUtil::ReduceWindow2DAdd(input_array, 0.0f, {4, 2}, {3, 3},
padding);
- ComputeAndCompareLiteral(&builder_, *Literal::CreateFromArray<float>(*res),
- {}, DefaultErrorSpec());
+ ComputeAndCompareLiteral(&builder_,
+ *LiteralUtil::CreateFromArray<float>(*res), {},
+ DefaultErrorSpec());
}
INSTANTIATE_TEST_CASE_P(ReduceWindowTestInstance, ReduceWindowTest,
@@ -610,7 +613,7 @@ class R4ReduceWindowTest : public ReduceWindowTestBase,
param.base_bounds[2], param.base_bounds[3]);
input.FillIota(1);
std::unique_ptr<Literal> input_literal =
- Literal::CreateR4FromArray4DWithLayout(
+ LiteralUtil::CreateR4FromArray4DWithLayout(
input, LayoutUtil::MakeLayout(param.layout));
XlaOp parameter;
auto input_arg = CreateParameterAndTransferLiteral(0, *input_literal, "p0",
@@ -622,12 +625,12 @@ class R4ReduceWindowTest : public ReduceWindowTestBase,
}
auto init_value =
- CreateConstantFromLiteral(*Literal::CreateR0(kInitValue), &b);
+ CreateConstantFromLiteral(*LiteralUtil::CreateR0(kInitValue), &b);
CHECK(param.reducer == kAdd || param.reducer == kMax);
auto computation = param.reducer == kAdd
? CreateScalarAddComputation(FloatType(), &b)
: CreateScalarMaxComputation(FloatType(), &b);
- b.ReduceWindowWithGeneralPadding(
+ ReduceWindowWithGeneralPadding(
/*operand=*/parameter,
/*init_value=*/init_value,
/*computation=*/computation,
@@ -648,7 +651,7 @@ class R4ReduceWindowTest : public ReduceWindowTestBase,
/*stride=*/param.strides,
/*padding=*/padding);
std::unique_ptr<Literal> expected_literal =
- Literal::CreateFromArray(*expected);
+ LiteralUtil::CreateFromArray(*expected);
const Shape& expected_shape_with_layout = ShapeUtil::MakeShapeWithLayout(
input_literal->shape().element_type(),
AsInt64Slice(expected_literal->shape().dimensions()), param.layout);
@@ -960,25 +963,25 @@ TEST_P(R3ReduceWindowTest, Add) {
Array3D<float> input(param.base_bounds[0], param.base_bounds[1],
param.base_bounds[2], 1.0f);
std::unique_ptr<Literal> input_literal =
- Literal::CreateR3FromArray3DWithLayout(
+ LiteralUtil::CreateR3FromArray3DWithLayout(
input, LayoutUtil::MakeLayout(param.layout));
XlaOp parameter;
auto input_arg = CreateParameterAndTransferLiteral(0, *input_literal, "p0",
&b, &parameter);
auto init_value =
- CreateConstantFromLiteral(*Literal::CreateR0(kInitValue), &b);
- b.ReduceWindow(/*operand=*/parameter,
- /*init_value=*/init_value,
- /*computation=*/CreateScalarAddComputation(FloatType(), &b),
- /*window_dimensions=*/param.window_bounds,
- /*window_strides=*/param.strides, /*padding=*/param.padding);
+ CreateConstantFromLiteral(*LiteralUtil::CreateR0(kInitValue), &b);
+ ReduceWindow(/*operand=*/parameter,
+ /*init_value=*/init_value,
+ /*computation=*/CreateScalarAddComputation(FloatType(), &b),
+ /*window_dimensions=*/param.window_bounds,
+ /*window_strides=*/param.strides, /*padding=*/param.padding);
auto expected = ReferenceUtil::ReduceWindow3DAdd(
/*operand=*/input, /*init=*/kInitValue, /*window=*/param.window_bounds,
/*stride=*/param.strides, /*padding=*/param.padding);
- ComputeAndCompareLiteral(&b, *Literal::CreateFromArray(*expected),
+ ComputeAndCompareLiteral(&b, *LiteralUtil::CreateFromArray(*expected),
{input_arg.get()}, DefaultErrorSpec());
}
@@ -1094,7 +1097,7 @@ class R2ReduceWindowTest : public ReduceWindowTestBase,
const float kInitValue = 0.0f;
Array2D<float> input(param.base_bounds[0], param.base_bounds[1], 1.0f);
std::unique_ptr<Literal> input_literal =
- Literal::CreateR2FromArray2DWithLayout(
+ LiteralUtil::CreateR2FromArray2DWithLayout(
input, LayoutUtil::MakeLayout(param.layout));
XlaOp parameter;
@@ -1108,8 +1111,8 @@ class R2ReduceWindowTest : public ReduceWindowTestBase,
? CreateScalarAddComputation(FloatType(), &b)
: CreateScalarMaxComputation(FloatType(), &b);
auto init_value =
- CreateConstantFromLiteral(*Literal::CreateR0(kInitValue), &b);
- b.ReduceWindowWithGeneralPadding(
+ CreateConstantFromLiteral(*LiteralUtil::CreateR0(kInitValue), &b);
+ ReduceWindowWithGeneralPadding(
/*operand=*/parameter,
/*init_value=*/init_value,
/*computation=*/computation,
@@ -1124,7 +1127,7 @@ class R2ReduceWindowTest : public ReduceWindowTestBase,
/*window=*/param.window_bounds,
/*stride=*/param.strides, /*padding=*/padding);
- ComputeAndCompareLiteral(&b, *Literal::CreateFromArray(*expected),
+ ComputeAndCompareLiteral(&b, *LiteralUtil::CreateFromArray(*expected),
{input_arg.get()}, DefaultErrorSpec());
}
};
@@ -1293,7 +1296,7 @@ TEST_P(R1ReduceWindowTest, DoIt) {
std::vector<float> input_vector(param.base_bounds[0]);
std::iota(std::begin(input_vector), std::end(input_vector), 0);
std::unique_ptr<Literal> input_literal =
- Literal::CreateR1(tensorflow::gtl::ArraySlice<float>(input_vector));
+ LiteralUtil::CreateR1(tensorflow::gtl::ArraySlice<float>(input_vector));
XlaOp parameter;
auto input_arg = CreateParameterAndTransferLiteral(0, *input_literal, "p0",
&b, &parameter);
@@ -1305,8 +1308,8 @@ TEST_P(R1ReduceWindowTest, DoIt) {
? CreateScalarAddComputation(FloatType(), &b)
: CreateScalarMaxComputation(FloatType(), &b);
auto init_value =
- CreateConstantFromLiteral(*Literal::CreateR0(kInitValue), &b);
- b.ReduceWindowWithGeneralPadding(
+ CreateConstantFromLiteral(*LiteralUtil::CreateR0(kInitValue), &b);
+ ReduceWindowWithGeneralPadding(
/*operand=*/parameter,
/*init_value=*/init_value,
/*computation=*/computation,
@@ -1324,7 +1327,7 @@ TEST_P(R1ReduceWindowTest, DoIt) {
/*stride=*/param.strides,
/*padding=*/padding);
- ComputeAndCompareLiteral(&b, *Literal::CreateR1<float>(*expected),
+ ComputeAndCompareLiteral(&b, *LiteralUtil::CreateR1<float>(*expected),
{input_arg.get()}, DefaultErrorSpec());
}
diff --git a/tensorflow/compiler/xla/tests/replay_test.cc b/tensorflow/compiler/xla/tests/replay_test.cc
index 36d763b0f7..d544968648 100644
--- a/tensorflow/compiler/xla/tests/replay_test.cc
+++ b/tensorflow/compiler/xla/tests/replay_test.cc
@@ -19,7 +19,7 @@ limitations under the License.
#include "tensorflow/compiler/xla/client/local_client.h"
#include "tensorflow/compiler/xla/client/xla_client/xla_builder.h"
#include "tensorflow/compiler/xla/client/xla_client/xla_computation.h"
-#include "tensorflow/compiler/xla/literal_util.h"
+#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/compiler/xla/protobuf_util.h"
#include "tensorflow/compiler/xla/service/hlo.pb.h"
#include "tensorflow/compiler/xla/shape_util.h"
@@ -39,8 +39,8 @@ class ReplayTest : public ClientLibraryTestBase {};
TEST_F(ReplayTest, TwoPlusTwoReplay) {
// Make 2+2 computation.
XlaBuilder builder(TestName());
- auto two = builder.ConstantR0<int32>(2);
- builder.Add(two, two);
+ auto two = ConstantR0<int32>(&builder, 2);
+ Add(two, two);
XlaComputation computation = builder.Build().ConsumeValueOrDie();
// Serialize it out.
@@ -70,9 +70,9 @@ TEST_F(ReplayTest, TwoPlusTwoReplay) {
XLA_TEST_F(ReplayTest, XPlusYReplayWithParameters) {
// Make computation.
XlaBuilder builder(TestName());
- auto x = builder.Parameter(0, ShapeUtil::MakeShape(S32, {}), "x");
- auto y = builder.Parameter(1, ShapeUtil::MakeShape(S32, {}), "y");
- builder.Add(x, y);
+ auto x = Parameter(&builder, 0, ShapeUtil::MakeShape(S32, {}), "x");
+ auto y = Parameter(&builder, 1, ShapeUtil::MakeShape(S32, {}), "y");
+ Add(x, y);
XlaComputation computation = builder.Build().ConsumeValueOrDie();
// Serialize it out.
@@ -91,10 +91,10 @@ XLA_TEST_F(ReplayTest, XPlusYReplayWithParameters) {
// Run it.
std::unique_ptr<GlobalData> x_data =
- client_->TransferToServer(*Literal::CreateR0<int32>(2))
+ client_->TransferToServer(*LiteralUtil::CreateR0<int32>(2))
.ConsumeValueOrDie();
std::unique_ptr<GlobalData> y_data =
- client_->TransferToServer(*Literal::CreateR0<int32>(3))
+ client_->TransferToServer(*LiteralUtil::CreateR0<int32>(3))
.ConsumeValueOrDie();
std::unique_ptr<Literal> literal =
client_
@@ -111,13 +111,13 @@ TEST_F(ReplayTest, MapPlusTwoOverR1) {
// As above, but with map(+2) over some constant array.
XlaBuilder plus_two_builder("plus two");
auto input =
- plus_two_builder.Parameter(0, ShapeUtil::MakeShape(S32, {}), "input");
- plus_two_builder.Add(input, plus_two_builder.ConstantR0<int32>(2));
+ Parameter(&plus_two_builder, 0, ShapeUtil::MakeShape(S32, {}), "input");
+ Add(input, ConstantR0<int32>(&plus_two_builder, 2));
XlaComputation plus_two = plus_two_builder.Build().ConsumeValueOrDie();
XlaBuilder mapper_builder(TestName());
- auto original = mapper_builder.ConstantR1<int32>({1, 2, 3});
- mapper_builder.Map({original}, plus_two, {0});
+ auto original = ConstantR1<int32>(&mapper_builder, {1, 2, 3});
+ Map(&mapper_builder, {original}, plus_two, {0});
XlaComputation computation = mapper_builder.Build().ConsumeValueOrDie();
diff --git a/tensorflow/compiler/xla/tests/reshape_motion_test.cc b/tensorflow/compiler/xla/tests/reshape_motion_test.cc
index da1b588ec4..7c0389cfa3 100644
--- a/tensorflow/compiler/xla/tests/reshape_motion_test.cc
+++ b/tensorflow/compiler/xla/tests/reshape_motion_test.cc
@@ -24,7 +24,7 @@ limitations under the License.
#include "tensorflow/compiler/xla/client/local_client.h"
#include "tensorflow/compiler/xla/client/xla_client/xla_builder.h"
#include "tensorflow/compiler/xla/layout_util.h"
-#include "tensorflow/compiler/xla/literal_util.h"
+#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/compiler/xla/reference_util.h"
#include "tensorflow/compiler/xla/shape_util.h"
#include "tensorflow/compiler/xla/status_macros.h"
@@ -44,11 +44,11 @@ using ReshapeMotionTest = ClientLibraryTestBase;
TEST_F(ReshapeMotionTest, ElementwiseOfReshapesWithNonSameInputShapes) {
XlaBuilder builder(TestName());
- auto a = builder.ConstantR2<int32>({{2, 3, 5}, {7, 11, 13}});
- auto b = builder.ConstantR2<int32>({{17, 19}, {23, 29}, {31, 37}});
- auto c = builder.Reshape(a, {6});
- auto d = builder.Reshape(b, {6});
- auto e = builder.Mul(c, d);
+ auto a = ConstantR2<int32>(&builder, {{2, 3, 5}, {7, 11, 13}});
+ auto b = ConstantR2<int32>(&builder, {{17, 19}, {23, 29}, {31, 37}});
+ auto c = Reshape(a, {6});
+ auto d = Reshape(b, {6});
+ Mul(c, d);
ComputeAndCompareR1<int32>(&builder, {34, 57, 115, 203, 341, 481}, {});
}
diff --git a/tensorflow/compiler/xla/tests/reshape_test.cc b/tensorflow/compiler/xla/tests/reshape_test.cc
index a4580cd71d..46d91711a5 100644
--- a/tensorflow/compiler/xla/tests/reshape_test.cc
+++ b/tensorflow/compiler/xla/tests/reshape_test.cc
@@ -55,39 +55,39 @@ XLA_TEST_P(ReshapeTest, CollapseTrivial1x1) {
XlaBuilder builder(TestName());
Array2D<float> input_array(1, 1);
input_array.Fill(1.0f);
- auto input_literal = Literal::CreateR2FromArray2D(input_array);
+ auto input_literal = LiteralUtil::CreateR2FromArray2D(input_array);
XlaOp parameter;
auto input = CreateParameterAndTransferLiteral(0, *input_literal, "parameter",
&builder, &parameter);
- builder.Collapse(/*operand=*/parameter, /*dimensions=*/{0, 1});
+ Collapse(/*operand=*/parameter, /*dimensions=*/{0, 1});
- auto expected_literal = Literal::CreateR1<float>({1.0f});
+ auto expected_literal = LiteralUtil::CreateR1<float>({1.0f});
ComputeAndCompareLiteral(&builder, *expected_literal, {input.get()},
zero_error_spec_);
}
XLA_TEST_P(ReshapeTest, CollapseTrivialR1EmptyDims) {
XlaBuilder builder(TestName());
- auto input_literal = Literal::CreateR1<float>({1.0f});
+ auto input_literal = LiteralUtil::CreateR1<float>({1.0f});
XlaOp parameter;
auto input = CreateParameterAndTransferLiteral(0, *input_literal, "parameter",
&builder, &parameter);
- builder.Collapse(/*operand=*/parameter, /*dimensions=*/{});
+ Collapse(/*operand=*/parameter, /*dimensions=*/{});
- auto expected_literal = Literal::CreateR1<float>({1.0f});
+ auto expected_literal = LiteralUtil::CreateR1<float>({1.0f});
ComputeAndCompareLiteral(&builder, *expected_literal, {input.get()},
zero_error_spec_);
}
XLA_TEST_P(ReshapeTest, CollapseTrivialR1OnlyDim) {
XlaBuilder builder(TestName());
- auto input_literal = Literal::CreateR1<float>({1.0f});
+ auto input_literal = LiteralUtil::CreateR1<float>({1.0f});
XlaOp parameter;
auto input = CreateParameterAndTransferLiteral(0, *input_literal, "parameter",
&builder, &parameter);
- builder.Collapse(/*operand=*/parameter, /*dimensions=*/{0});
+ Collapse(/*operand=*/parameter, /*dimensions=*/{0});
- auto expected_literal = Literal::CreateR1<float>({1.0f});
+ auto expected_literal = LiteralUtil::CreateR1<float>({1.0f});
ComputeAndCompareLiteral(&builder, *expected_literal, {input.get()},
zero_error_spec_);
}
@@ -97,15 +97,15 @@ XLA_TEST_P(ReshapeTest, SingleElementArrayToScalar) {
XlaBuilder builder(TestName());
Array2D<float> input_array(1, 1);
input_array.Fill(1.0f);
- auto input_literal = Literal::CreateR2FromArray2D(input_array);
+ auto input_literal = LiteralUtil::CreateR2FromArray2D(input_array);
XlaOp parameter;
auto input = CreateParameterAndTransferLiteral(0, *input_literal, "parameter",
&builder, &parameter);
- auto reshape = builder.Reshape(/*operand=*/parameter, /*dimensions=*/{0, 1},
- /*new_sizes=*/{});
+ auto reshape = Reshape(/*operand=*/parameter, /*dimensions=*/{0, 1},
+ /*new_sizes=*/{});
auto new_shape = builder.GetShape(reshape).ConsumeValueOrDie();
- auto expected_literal = Literal::CreateR0<float>(1.0f);
+ auto expected_literal = LiteralUtil::CreateR0<float>(1.0f);
ComputeAndCompareLiteral(&builder, *expected_literal, {input.get()},
zero_error_spec_);
}
@@ -113,63 +113,54 @@ XLA_TEST_P(ReshapeTest, SingleElementArrayToScalar) {
XLA_TEST_P(ReshapeTest, ScalarToSingleElementArray) {
XlaBuilder builder(TestName());
- std::unique_ptr<Literal> param0_literal = Literal::CreateR0<float>(1.0f);
+ std::unique_ptr<Literal> param0_literal = LiteralUtil::CreateR0<float>(1.0f);
XlaOp parameter;
auto input = CreateParameterAndTransferLiteral(0, *param0_literal, "param0",
&builder, &parameter);
- auto a = builder.Neg(parameter);
- builder.Reshape(/*operand=*/a, /*dimensions=*/{}, /*new_sizes=*/{1});
+ auto a = Neg(parameter);
+ Reshape(/*operand=*/a, /*dimensions=*/{}, /*new_sizes=*/{1});
- auto expected_literal = Literal::CreateR1<float>({-1.0f});
+ auto expected_literal = LiteralUtil::CreateR1<float>({-1.0f});
ComputeAndCompareLiteral(&builder, *expected_literal, {input.get()},
zero_error_spec_);
}
-// TODO(b/29185393): Make this work with the GPU backend. The GPU backend
-// does not handle zero-sized shapes correctly. Failed last on 2017-11-30
-// with an incorrect result rank.
-XLA_TEST_P(ReshapeTest, DISABLED_ON_GPU(Trivial0x3)) {
+XLA_TEST_P(ReshapeTest, Trivial0x3) {
XlaBuilder builder(TestName());
Array2D<float> input_array(0, 3);
- auto input_literal = Literal::CreateR2FromArray2D(input_array);
+ auto input_literal = LiteralUtil::CreateR2FromArray2D(input_array);
XlaOp parameter;
auto input = CreateParameterAndTransferLiteral(0, *input_literal, "input",
&builder, &parameter);
- builder.Collapse(/*operand=*/parameter, /*dimensions=*/{0, 1});
- auto expected_literal = Literal::CreateR1<float>({});
+ Collapse(/*operand=*/parameter, /*dimensions=*/{0, 1});
+ auto expected_literal = LiteralUtil::CreateR1<float>({});
ComputeAndCompareLiteral(&builder, *expected_literal, {input.get()},
zero_error_spec_);
}
-// TODO(b/29185393): Make this work with the GPU backend. The GPU backend
-// does not handle zero-sized shapes correctly. Failed last on 2017-05-15
-// with an incorrect result rank.
-XLA_TEST_P(ReshapeTest, DISABLED_ON_GPU(Trivial0x3WithParameter)) {
+XLA_TEST_P(ReshapeTest, Trivial0x3WithParameter) {
XlaBuilder builder(TestName());
std::unique_ptr<Literal> param0_literal =
- Literal::CreateR2FromArray2D<float>(Array2D<float>(0, 3));
+ LiteralUtil::CreateR2FromArray2D<float>(Array2D<float>(0, 3));
XlaOp parameter;
auto input = CreateParameterAndTransferLiteral(0, *param0_literal, "param0",
&builder, &parameter);
- builder.Collapse(/*operand=*/parameter, /*dimensions=*/{0, 1});
- auto expected_literal = Literal::CreateR1<float>({});
+ Collapse(/*operand=*/parameter, /*dimensions=*/{0, 1});
+ auto expected_literal = LiteralUtil::CreateR1<float>({});
ComputeAndCompareLiteral(&builder, *expected_literal, {input.get()},
zero_error_spec_);
}
-// TODO(b/29185393): Make this work with the GPU backend. The GPU backend
-// does not handle zero-sized shapes correctly. Failed last on 2017-11-30
-// with an incorrect result rank.
-XLA_TEST_P(ReshapeTest, DISABLED_ON_GPU(Trivial3x0)) {
+XLA_TEST_P(ReshapeTest, Trivial3x0) {
XlaBuilder builder(TestName());
Array2D<float> input_array(3, 0);
- auto input_literal = Literal::CreateR2FromArray2D(input_array);
+ auto input_literal = LiteralUtil::CreateR2FromArray2D(input_array);
XlaOp parameter;
auto input = CreateParameterAndTransferLiteral(0, *input_literal, "input",
&builder, &parameter);
- builder.Collapse(/*operand=*/parameter, /*dimensions=*/{0, 1});
- auto expected_literal = Literal::CreateR1<float>({});
+ Collapse(/*operand=*/parameter, /*dimensions=*/{0, 1});
+ auto expected_literal = LiteralUtil::CreateR1<float>({});
ComputeAndCompareLiteral(&builder, *expected_literal, {input.get()},
zero_error_spec_);
}
@@ -177,12 +168,12 @@ XLA_TEST_P(ReshapeTest, DISABLED_ON_GPU(Trivial3x0)) {
// Collapses a 2-dimensional row vector to 1 dimension.
XLA_TEST_P(ReshapeTest, Trivial1x3) {
XlaBuilder builder(TestName());
- auto input_literal = Literal::CreateR2<float>({{1.0f, 2.0f, 3.0f}});
+ auto input_literal = LiteralUtil::CreateR2<float>({{1.0f, 2.0f, 3.0f}});
XlaOp parameter;
auto input = CreateParameterAndTransferLiteral(0, *input_literal, "input",
&builder, &parameter);
- builder.Collapse(/*operand=*/parameter, /*dimensions=*/{0, 1});
- auto expected_literal = Literal::CreateR1<float>({1.0f, 2.0f, 3.0f});
+ Collapse(/*operand=*/parameter, /*dimensions=*/{0, 1});
+ auto expected_literal = LiteralUtil::CreateR1<float>({1.0f, 2.0f, 3.0f});
ComputeAndCompareLiteral(&builder, *expected_literal, {input.get()},
zero_error_spec_);
}
@@ -190,30 +181,26 @@ XLA_TEST_P(ReshapeTest, Trivial1x3) {
// Collapses a 2-dimensional column vector to 1 dimension.
XLA_TEST_P(ReshapeTest, Trivial3x1) {
XlaBuilder builder(TestName());
- auto input_literal = Literal::CreateR2<float>({{1.0f}, {2.0f}, {3.0f}});
+ auto input_literal = LiteralUtil::CreateR2<float>({{1.0f}, {2.0f}, {3.0f}});
XlaOp parameter;
auto input = CreateParameterAndTransferLiteral(0, *input_literal, "input",
&builder, &parameter);
- builder.Collapse(/*operand=*/parameter, /*dimensions=*/{0, 1});
- auto expected_literal = Literal::CreateR1<float>({1.0f, 2.0f, 3.0f});
+ Collapse(/*operand=*/parameter, /*dimensions=*/{0, 1});
+ auto expected_literal = LiteralUtil::CreateR1<float>({1.0f, 2.0f, 3.0f});
ComputeAndCompareLiteral(&builder, *expected_literal, {input.get()},
zero_error_spec_);
}
-// TODO(b/29185393): Make this work with the GPU backend. The GPU backend
-// does not handle zero-sized shapes correctly. Failed last on 2017-11-30
-// with an incorrect result rank.
-//
// Splits an empty vector into an empty matrix.
-XLA_TEST_P(ReshapeTest, DISABLED_ON_GPU(R1ToR2_0_To_2x0)) {
+XLA_TEST_P(ReshapeTest, R1ToR2_0_To_2x0) {
XlaBuilder builder(TestName());
- auto input_literal = Literal::CreateR1<float>({});
+ auto input_literal = LiteralUtil::CreateR1<float>({});
XlaOp parameter;
auto input = CreateParameterAndTransferLiteral(0, *input_literal, "input",
&builder, &parameter);
- builder.Reshape(/*operand=*/parameter, /*dimensions=*/{0},
- /*new_sizes=*/{2, 0});
- auto expected_literal = Literal::CreateR2<float>({{}, {}});
+ Reshape(/*operand=*/parameter, /*dimensions=*/{0},
+ /*new_sizes=*/{2, 0});
+ auto expected_literal = LiteralUtil::CreateR2<float>({{}, {}});
ComputeAndCompareLiteral(&builder, *expected_literal, {input.get()},
zero_error_spec_);
}
@@ -222,32 +209,28 @@ XLA_TEST_P(ReshapeTest, DISABLED_ON_GPU(R1ToR2_0_To_2x0)) {
XLA_TEST_P(ReshapeTest, R1ToR2_6_To_2x3) {
XlaBuilder builder(TestName());
auto input_literal =
- Literal::CreateR1<float>({1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f});
+ LiteralUtil::CreateR1<float>({1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f});
XlaOp parameter;
auto input = CreateParameterAndTransferLiteral(0, *input_literal, "input",
&builder, &parameter);
- builder.Reshape(/*operand=*/parameter, /*dimensions=*/{0},
- /*new_sizes=*/{2, 3});
+ Reshape(/*operand=*/parameter, /*dimensions=*/{0},
+ /*new_sizes=*/{2, 3});
auto expected_literal =
- Literal::CreateR2<float>({{1.0f, 2.0f, 3.0f}, {4.0f, 5.0f, 6.0f}});
+ LiteralUtil::CreateR2<float>({{1.0f, 2.0f, 3.0f}, {4.0f, 5.0f, 6.0f}});
ComputeAndCompareLiteral(&builder, *expected_literal, {input.get()},
zero_error_spec_);
}
-// TODO(b/29185393): Make this work with the GPU backend. The GPU backend
-// does not handle zero-sized shapes correctly. Failed last on 2017-11-30
-// with an incorrect result rank.
-//
// Transposes a 2x0 array to a 0x2 array.
-XLA_TEST_P(ReshapeTest, DISABLED_ON_GPU(Reshape0x2To2x0)) {
+XLA_TEST_P(ReshapeTest, Reshape0x2To2x0) {
XlaBuilder builder(TestName());
- auto input_literal = Literal::CreateFromArray(Array2D<float>(0, 2));
+ auto input_literal = LiteralUtil::CreateFromArray(Array2D<float>(0, 2));
XlaOp parameter;
auto input = CreateParameterAndTransferLiteral(0, *input_literal, "input",
&builder, &parameter);
- builder.Reshape(/*operand=*/parameter, /*dimensions=*/{0, 1},
- /*new_sizes=*/{2, 0});
- auto expected_literal = Literal::CreateR2<float>({{}, {}});
+ Reshape(/*operand=*/parameter, /*dimensions=*/{0, 1},
+ /*new_sizes=*/{2, 0});
+ auto expected_literal = LiteralUtil::CreateR2<float>({{}, {}});
ComputeAndCompareLiteral(&builder, *expected_literal, {input.get()},
zero_error_spec_);
}
@@ -256,15 +239,15 @@ XLA_TEST_P(ReshapeTest, DISABLED_ON_GPU(Reshape0x2To2x0)) {
XLA_TEST_P(ReshapeTest, ReshapeRowToCol) {
XlaBuilder builder(TestName());
auto simple = MakeLinspaceArray2D(1.0f, 3.0f, 1, 3);
- auto input_literal = Literal::CreateFromArray(*simple);
+ auto input_literal = LiteralUtil::CreateFromArray(*simple);
XlaOp parameter;
auto input = CreateParameterAndTransferLiteral(0, *input_literal, "input",
&builder, &parameter);
- builder.Reshape(/*operand=*/parameter, /*dimensions=*/{0, 1},
- /*new_sizes=*/{3, 1});
+ Reshape(/*operand=*/parameter, /*dimensions=*/{0, 1},
+ /*new_sizes=*/{3, 1});
auto expected = ReferenceUtil::TransposeArray2D(*simple);
- auto expected_literal = Literal::CreateFromArray(*expected);
+ auto expected_literal = LiteralUtil::CreateFromArray(*expected);
ComputeAndCompareLiteral(&builder, *expected_literal, {input.get()},
zero_error_spec_);
}
@@ -273,32 +256,28 @@ XLA_TEST_P(ReshapeTest, ReshapeRowToCol) {
XLA_TEST_P(ReshapeTest, TransposeAsReshape) {
XlaBuilder builder(TestName());
auto a4x3 = MakeLinspaceArray2D(1.0f, 12.0f, 4, 3);
- auto input_literal = Literal::CreateFromArray(*a4x3);
+ auto input_literal = LiteralUtil::CreateFromArray(*a4x3);
XlaOp parameter;
auto input = CreateParameterAndTransferLiteral(0, *input_literal, "input",
&builder, &parameter);
- builder.Reshape(/*operand=*/parameter, /*dimensions=*/{1, 0},
- /*new_sizes=*/{3, 4});
+ Reshape(/*operand=*/parameter, /*dimensions=*/{1, 0},
+ /*new_sizes=*/{3, 4});
auto expected = ReferenceUtil::TransposeArray2D(*a4x3);
- auto expected_literal = Literal::CreateFromArray(*expected);
+ auto expected_literal = LiteralUtil::CreateFromArray(*expected);
ComputeAndCompareLiteral(&builder, *expected_literal, {input.get()},
zero_error_spec_);
}
-// TODO(b/29185393): Make this work with the GPU backend. The GPU backend
-// does not handle zero-sized shapes correctly. Failed last on 2017-11-30
-// with an incorrect result rank.
-//
// Transposes a 0x4 array with XlaBuilder::Transpose.
-XLA_TEST_P(ReshapeTest, DISABLED_ON_GPU(Transpose0x4)) {
+XLA_TEST_P(ReshapeTest, Transpose0x4) {
XlaBuilder builder(TestName());
- auto input_literal = Literal::CreateFromArray(Array2D<float>(0, 4));
+ auto input_literal = LiteralUtil::CreateFromArray(Array2D<float>(0, 4));
XlaOp parameter;
auto input = CreateParameterAndTransferLiteral(0, *input_literal, "input",
&builder, &parameter);
- builder.Transpose(parameter, {1, 0});
- auto expected_literal = Literal::CreateR2<float>({{}, {}, {}, {}});
+ Transpose(parameter, {1, 0});
+ auto expected_literal = LiteralUtil::CreateR2<float>({{}, {}, {}, {}});
ComputeAndCompareLiteral(&builder, *expected_literal, {input.get()},
zero_error_spec_);
}
@@ -307,49 +286,43 @@ XLA_TEST_P(ReshapeTest, DISABLED_ON_GPU(Transpose0x4)) {
XLA_TEST_P(ReshapeTest, Transpose4x3) {
XlaBuilder builder(TestName());
auto a4x3 = MakeLinspaceArray2D(1.0f, 12.0f, 4, 3);
- auto input_literal = Literal::CreateFromArray(*a4x3);
+ auto input_literal = LiteralUtil::CreateFromArray(*a4x3);
XlaOp parameter;
auto input = CreateParameterAndTransferLiteral(0, *input_literal, "input",
&builder, &parameter);
- builder.Transpose(parameter, {1, 0});
+ Transpose(parameter, {1, 0});
auto expected = ReferenceUtil::TransposeArray2D(*a4x3);
- auto expected_literal = Literal::CreateFromArray(*expected);
+ auto expected_literal = LiteralUtil::CreateFromArray(*expected);
ComputeAndCompareLiteral(&builder, *expected_literal, {input.get()},
zero_error_spec_);
}
-// TODO(b/29185393): Make this work with the GPU backend. The GPU backend
-// does not handle zero-sized shapes correctly. Failed last on 2017-11-30
-// with an incorrect result rank.
-//
// Reshapes an empty 2-dimensional array with dimensions that are not just a
// rearrangement of the originals (split), but no reordering (no shuffle).
-XLA_TEST_P(ReshapeTest, DISABLED_ON_GPU(ReshapeSplitNoShuffleZeroElements)) {
+XLA_TEST_P(ReshapeTest, ReshapeSplitNoShuffleZeroElements) {
XlaBuilder builder(TestName());
- auto input_literal = Literal::CreateFromArray(Array2D<float>(6, 0));
+ auto input_literal = LiteralUtil::CreateFromArray(Array2D<float>(6, 0));
XlaOp parameter;
auto input = CreateParameterAndTransferLiteral(0, *input_literal, "input",
&builder, &parameter);
- builder.Reshape(/*operand=*/parameter, /*dimensions=*/{0, 1},
- /*new_sizes=*/{2, 3, 0, 0});
- auto expected_literal = Literal::CreateFromArray(Array4D<float>(2, 3, 0, 0));
+ Reshape(/*operand=*/parameter, /*dimensions=*/{0, 1},
+ /*new_sizes=*/{2, 3, 0, 0});
+ auto expected_literal =
+ LiteralUtil::CreateFromArray(Array4D<float>(2, 3, 0, 0));
ComputeAndCompareLiteral(&builder, *expected_literal, {input.get()},
zero_error_spec_);
}
-// TODO(b/29185393): Make this work with the GPU backend. The GPU backend
-// does not handle zero-sized shapes correctly. Failed last on 2017-11-30
-// with an incorrect result rank.
-XLA_TEST_P(ReshapeTest, DISABLED_ON_GPU(ReshapeR4ToR2ZeroElements)) {
+XLA_TEST_P(ReshapeTest, ReshapeR4ToR2ZeroElements) {
XlaBuilder builder(TestName());
- auto input_literal = Literal::CreateFromArray(Array4D<float>(2, 3, 4, 0));
+ auto input_literal = LiteralUtil::CreateFromArray(Array4D<float>(2, 3, 4, 0));
XlaOp parameter;
auto input = CreateParameterAndTransferLiteral(0, *input_literal, "input",
&builder, &parameter);
- builder.Reshape(/*operand=*/parameter, /*dimensions=*/{0, 1, 2, 3},
- /*new_sizes=*/{24, 0});
- auto expected_literal = Literal::CreateFromArray(Array2D<float>(24, 0));
+ Reshape(/*operand=*/parameter, /*dimensions=*/{0, 1, 2, 3},
+ /*new_sizes=*/{24, 0});
+ auto expected_literal = LiteralUtil::CreateFromArray(Array2D<float>(24, 0));
ComputeAndCompareLiteral(&builder, *expected_literal, {input.get()},
zero_error_spec_);
}
@@ -359,32 +332,28 @@ XLA_TEST_P(ReshapeTest, DISABLED_ON_GPU(ReshapeR4ToR2ZeroElements)) {
XLA_TEST_P(ReshapeTest, ReshapeSplitNoShuffle) {
XlaBuilder builder(TestName());
auto a4x3 = MakeLinspaceArray2D(1.0f, 12.0f, 4, 3);
- auto input_literal = Literal::CreateFromArray(*a4x3);
+ auto input_literal = LiteralUtil::CreateFromArray(*a4x3);
XlaOp parameter;
auto input = CreateParameterAndTransferLiteral(0, *input_literal, "input",
&builder, &parameter);
- builder.Reshape(/*operand=*/parameter, /*dimensions=*/{0, 1},
- /*new_sizes=*/{2, 6});
+ Reshape(/*operand=*/parameter, /*dimensions=*/{0, 1},
+ /*new_sizes=*/{2, 6});
auto expected = MakeLinspaceArray2D(1.0f, 12.0f, 2, 6);
- auto expected_literal = Literal::CreateFromArray(*expected);
+ auto expected_literal = LiteralUtil::CreateFromArray(*expected);
ComputeAndCompareLiteral(&builder, *expected_literal, {input.get()},
zero_error_spec_);
}
-// TODO(b/29185393): Make this work with the GPU backend. The GPU backend
-// does not handle zero-sized shapes correctly. Failed last on 2017-11-30
-// with an incorrect result rank.
-//
-XLA_TEST_P(ReshapeTest, DISABLED_ON_GPU(ReshapeSplitAndShuffleZeroElements)) {
+XLA_TEST_P(ReshapeTest, ReshapeSplitAndShuffleZeroElements) {
XlaBuilder builder(TestName());
- auto input_literal = Literal::CreateFromArray(Array2D<float>(0, 6));
+ auto input_literal = LiteralUtil::CreateFromArray(Array2D<float>(0, 6));
XlaOp parameter;
auto input = CreateParameterAndTransferLiteral(0, *input_literal, "input",
&builder, &parameter);
- builder.Reshape(/*operand=*/parameter, /*dimensions=*/{1, 0},
- /*new_sizes=*/{3, 0});
- auto expected_literal = Literal::CreateFromArray(Array2D<float>(3, 0));
+ Reshape(/*operand=*/parameter, /*dimensions=*/{1, 0},
+ /*new_sizes=*/{3, 0});
+ auto expected_literal = LiteralUtil::CreateFromArray(Array2D<float>(3, 0));
ComputeAndCompareLiteral(&builder, *expected_literal, {input.get()},
zero_error_spec_);
}
@@ -394,15 +363,15 @@ XLA_TEST_P(ReshapeTest, DISABLED_ON_GPU(ReshapeSplitAndShuffleZeroElements)) {
XLA_TEST_P(ReshapeTest, ReshapeSplitAndShuffle) {
XlaBuilder builder(TestName());
auto a4x3 = MakeLinspaceArray2D(1.0f, 12.0f, 4, 3);
- auto input_literal = Literal::CreateFromArray(*a4x3);
+ auto input_literal = LiteralUtil::CreateFromArray(*a4x3);
XlaOp parameter;
auto input = CreateParameterAndTransferLiteral(0, *input_literal, "input",
&builder, &parameter);
- builder.Reshape(/*operand=*/parameter, /*dimensions=*/{1, 0},
- /*new_sizes=*/{2, 6});
+ Reshape(/*operand=*/parameter, /*dimensions=*/{1, 0},
+ /*new_sizes=*/{2, 6});
Array2D<float> expected({{1.0f, 4.0f, 7.0f, 10.0f, 2.0f, 5.0f},
{8.0f, 11.0f, 3.0f, 6.0f, 9.0f, 12.0f}});
- auto expected_literal = Literal::CreateFromArray(expected);
+ auto expected_literal = LiteralUtil::CreateFromArray(expected);
ComputeAndCompareLiteral(&builder, *expected_literal, {input.get()},
zero_error_spec_);
}
@@ -420,13 +389,13 @@ static Array3D<float> ArrayForDocR3Tests() {
XLA_TEST_P(ReshapeTest, DocR3_R1_Collapse_012) {
XlaBuilder builder(TestName());
- auto input_literal = Literal::CreateFromArray(ArrayForDocR3Tests());
+ auto input_literal = LiteralUtil::CreateFromArray(ArrayForDocR3Tests());
XlaOp parameter;
auto input = CreateParameterAndTransferLiteral(0, *input_literal, "input",
&builder, &parameter);
- builder.Reshape(/*operand=*/parameter, /*dimensions=*/{0, 1, 2},
- /*new_sizes=*/{24});
- auto expected_literal = Literal::CreateR1<float>(
+ Reshape(/*operand=*/parameter, /*dimensions=*/{0, 1, 2},
+ /*new_sizes=*/{24});
+ auto expected_literal = LiteralUtil::CreateR1<float>(
{10, 11, 12, 15, 16, 17, 20, 21, 22, 25, 26, 27,
30, 31, 32, 35, 36, 37, 40, 41, 42, 45, 46, 47});
ComputeAndCompareLiteral(&builder, *expected_literal, {input.get()},
@@ -435,33 +404,33 @@ XLA_TEST_P(ReshapeTest, DocR3_R1_Collapse_012) {
XLA_TEST_P(ReshapeTest, DocR3_R2_Collapse_012_Refine_83) {
XlaBuilder builder(TestName());
- auto input_literal = Literal::CreateFromArray(ArrayForDocR3Tests());
+ auto input_literal = LiteralUtil::CreateFromArray(ArrayForDocR3Tests());
XlaOp parameter;
auto input = CreateParameterAndTransferLiteral(0, *input_literal, "input",
&builder, &parameter);
- builder.Reshape(/*operand=*/parameter, /*dimensions=*/{0, 1, 2},
- /*new_sizes=*/{8, 3});
- auto expected_literal = Literal::CreateR2<float>({{10, 11, 12},
- {15, 16, 17},
- {20, 21, 22},
- {25, 26, 27},
- {30, 31, 32},
- {35, 36, 37},
- {40, 41, 42},
- {45, 46, 47}});
+ Reshape(/*operand=*/parameter, /*dimensions=*/{0, 1, 2},
+ /*new_sizes=*/{8, 3});
+ auto expected_literal = LiteralUtil::CreateR2<float>({{10, 11, 12},
+ {15, 16, 17},
+ {20, 21, 22},
+ {25, 26, 27},
+ {30, 31, 32},
+ {35, 36, 37},
+ {40, 41, 42},
+ {45, 46, 47}});
ComputeAndCompareLiteral(&builder, *expected_literal, {input.get()},
zero_error_spec_);
}
XLA_TEST_P(ReshapeTest, DocR3_R1_Collapse_120) {
XlaBuilder builder(TestName());
- auto input_literal = Literal::CreateFromArray(ArrayForDocR3Tests());
+ auto input_literal = LiteralUtil::CreateFromArray(ArrayForDocR3Tests());
XlaOp parameter;
auto input = CreateParameterAndTransferLiteral(0, *input_literal, "input",
&builder, &parameter);
- builder.Reshape(/*operand=*/parameter, /*dimensions=*/{1, 2, 0},
- /*new_sizes=*/{24});
- auto expected_literal = Literal::CreateR1<float>(
+ Reshape(/*operand=*/parameter, /*dimensions=*/{1, 2, 0},
+ /*new_sizes=*/{24});
+ auto expected_literal = LiteralUtil::CreateR1<float>(
{10, 20, 30, 40, 11, 21, 31, 41, 12, 22, 32, 42,
15, 25, 35, 45, 16, 26, 36, 46, 17, 27, 37, 47});
ComputeAndCompareLiteral(&builder, *expected_literal, {input.get()},
@@ -470,33 +439,33 @@ XLA_TEST_P(ReshapeTest, DocR3_R1_Collapse_120) {
XLA_TEST_P(ReshapeTest, DocR3_R2_Collapse_120_Refine_83) {
XlaBuilder builder(TestName());
- auto input_literal = Literal::CreateFromArray(ArrayForDocR3Tests());
+ auto input_literal = LiteralUtil::CreateFromArray(ArrayForDocR3Tests());
XlaOp parameter;
auto input = CreateParameterAndTransferLiteral(0, *input_literal, "input",
&builder, &parameter);
- builder.Reshape(/*operand=*/parameter, /*dimensions=*/{1, 2, 0},
- /*new_sizes=*/{8, 3});
- auto expected_literal = Literal::CreateR2<float>({{10, 20, 30},
- {40, 11, 21},
- {31, 41, 12},
- {22, 32, 42},
- {15, 25, 35},
- {45, 16, 26},
- {36, 46, 17},
- {27, 37, 47}});
+ Reshape(/*operand=*/parameter, /*dimensions=*/{1, 2, 0},
+ /*new_sizes=*/{8, 3});
+ auto expected_literal = LiteralUtil::CreateR2<float>({{10, 20, 30},
+ {40, 11, 21},
+ {31, 41, 12},
+ {22, 32, 42},
+ {15, 25, 35},
+ {45, 16, 26},
+ {36, 46, 17},
+ {27, 37, 47}});
ComputeAndCompareLiteral(&builder, *expected_literal, {input.get()},
zero_error_spec_);
}
XLA_TEST_P(ReshapeTest, DocR3_R3_Collapse_120_Refine_262) {
XlaBuilder builder(TestName());
- auto input_literal = Literal::CreateFromArray(ArrayForDocR3Tests());
+ auto input_literal = LiteralUtil::CreateFromArray(ArrayForDocR3Tests());
XlaOp parameter;
auto input = CreateParameterAndTransferLiteral(0, *input_literal, "input",
&builder, &parameter);
- builder.Reshape(/*operand=*/parameter, /*dimensions=*/{1, 2, 0},
- /*new_sizes=*/{2, 6, 2});
- auto expected_literal = Literal::CreateR3<float>(
+ Reshape(/*operand=*/parameter, /*dimensions=*/{1, 2, 0},
+ /*new_sizes=*/{2, 6, 2});
+ auto expected_literal = LiteralUtil::CreateR3<float>(
{{{10, 20}, {30, 40}, {11, 21}, {31, 41}, {12, 22}, {32, 42}},
{{15, 25}, {35, 45}, {16, 26}, {36, 46}, {17, 27}, {37, 47}}});
ComputeAndCompareLiteral(&builder, *expected_literal, {input.get()},
@@ -523,12 +492,12 @@ XLA_TEST_P(ReshapeTest, FullyConnectedCollapse) {
Array4D<float> t2x2x2x3(2, 2, 2, 3);
auto filler2x3 = MakeLinspaceArray2D(1.0f, 6.0f, 2, 3);
t2x2x2x3.FillWithYX(*filler2x3);
- auto input_literal = Literal::CreateFromArray(t2x2x2x3);
+ auto input_literal = LiteralUtil::CreateFromArray(t2x2x2x3);
XlaOp parameter;
auto input = CreateParameterAndTransferLiteral(0, *input_literal, "input",
&builder, &parameter);
- builder.Collapse(/*operand=*/parameter, /*dimensions=*/{1, 2, 3});
- auto expected_literal = Literal::CreateR2<float>(
+ Collapse(/*operand=*/parameter, /*dimensions=*/{1, 2, 3});
+ auto expected_literal = LiteralUtil::CreateR2<float>(
{{1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f},
{1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f,
6.0f}});
@@ -548,15 +517,15 @@ XLA_TEST_P(ReshapeTest, FullyConnectedCollapseDesugared) {
t(1, 0, 0, 1) = 5;
t(1, 0, 1, 0) = 6;
t(1, 0, 1, 1) = 7;
- auto input_literal = Literal::CreateFromArray(t);
+ auto input_literal = LiteralUtil::CreateFromArray(t);
XlaOp parameter;
auto input = CreateParameterAndTransferLiteral(0, *input_literal, "input",
&builder, &parameter);
- builder.Reshape(/*operand=*/parameter, /*dimensions=*/{0, 1, 2, 3},
- /*new_sizes=*/{2, 4});
+ Reshape(/*operand=*/parameter, /*dimensions=*/{0, 1, 2, 3},
+ /*new_sizes=*/{2, 4});
auto expected_literal =
- Literal::CreateR2<float>({{0, 1, 2, 3}, {4, 5, 6, 7}});
+ LiteralUtil::CreateR2<float>({{0, 1, 2, 3}, {4, 5, 6, 7}});
ComputeAndCompareLiteral(&builder, *expected_literal, {input.get()},
zero_error_spec_);
}
@@ -575,9 +544,9 @@ XLA_TEST_P(ReshapeTest, ToScalar) {
XlaOp parameter;
auto input = CreateParameterAndTransferLiteral(0, input_literal, "input",
&b, &parameter);
- b.Reshape(parameter, dimensions, {});
+ Reshape(parameter, dimensions, {});
- auto expected_literal = Literal::CreateR0<float>(83.0f);
+ auto expected_literal = LiteralUtil::CreateR0<float>(83.0f);
ComputeAndCompareLiteral(&b, *expected_literal, {input.get()},
zero_error_spec_);
}
@@ -585,11 +554,11 @@ XLA_TEST_P(ReshapeTest, ToScalar) {
XLA_TEST_P(ReshapeTest, BadDimensions) {
XlaBuilder b(TestName());
- auto input_literal = Literal::CreateR1<float>({1.0f});
+ auto input_literal = LiteralUtil::CreateR1<float>({1.0f});
XlaOp parameter;
auto input = CreateParameterAndTransferLiteral(0, *input_literal, "input", &b,
&parameter);
- b.Reshape(parameter, {}, {});
+ Reshape(parameter, {}, {});
EXPECT_THAT(
ExecuteToString(&b, {}),
::testing::HasSubstr("not a permutation of the operand dimensions"));
@@ -597,11 +566,11 @@ XLA_TEST_P(ReshapeTest, BadDimensions) {
XLA_TEST_P(ReshapeTest, BadNewSizes) {
XlaBuilder b(TestName());
- auto input_literal = Literal::CreateR1<float>({1.0f, 2.0f});
+ auto input_literal = LiteralUtil::CreateR1<float>({1.0f, 2.0f});
XlaOp parameter;
auto input = CreateParameterAndTransferLiteral(0, *input_literal, "input", &b,
&parameter);
- b.Reshape(parameter, {1}, {});
+ Reshape(parameter, {1}, {});
EXPECT_THAT(ExecuteToString(&b, {}),
::testing::HasSubstr("mismatched element counts"));
}
@@ -609,7 +578,8 @@ XLA_TEST_P(ReshapeTest, BadNewSizes) {
XLA_TEST_P(ReshapeTest, R4Dim0MinorLayoutToR2Dim0MajorLayout) {
XlaBuilder builder(TestName());
// clang-format off
- auto input_literal = Literal::CreateR4FromArray4DWithLayout(Array4D<float>{
+ auto input_literal = LiteralUtil::CreateR4FromArray4DWithLayout(
+ Array4D<float>{
{
{
{0, 1},
@@ -637,7 +607,7 @@ XLA_TEST_P(ReshapeTest, R4Dim0MinorLayoutToR2Dim0MajorLayout) {
auto input = CreateParameterAndTransferLiteral(0, *input_literal, "input",
&builder, &parameter);
- builder.Reshape(parameter, /*dimensions=*/{0, 1, 2, 3}, /*new_sizes=*/{2, 8});
+ Reshape(parameter, /*dimensions=*/{0, 1, 2, 3}, /*new_sizes=*/{2, 8});
Array2D<float> expected_array({
{0, 1, 2, 3, 100, 101, 102, 103},
@@ -654,16 +624,16 @@ XLA_TEST_P(ReshapeTest, R4Dim0MinorLayoutToR2Dim0MajorLayout) {
->ExecuteAndTransfer(computation, {input.get()}, &execution_options)
.ConsumeValueOrDie();
std::unique_ptr<Literal> expected =
- Literal::CreateR2FromArray2D<float>(expected_array);
+ LiteralUtil::CreateR2FromArray2D<float>(expected_array);
if (use_bfloat16()) {
- expected = Literal::ConvertF32ToBF16(*expected);
+ expected = LiteralUtil::ConvertF32ToBF16(*expected);
}
EXPECT_TRUE(LiteralTestUtil::Equal(*expected, *actual));
}
XLA_TEST_P(ReshapeTest, R2ToR4_3x8_To_3x2x1x4) {
XlaBuilder builder(TestName());
- std::unique_ptr<Literal> input_literal = Literal::CreateR2<float>({
+ std::unique_ptr<Literal> input_literal = LiteralUtil::CreateR2<float>({
{0, 1, 2, 3, 4, 5, 6, 7},
{100, 101, 102, 103, 104, 105, 106, 107},
{200, 201, 202, 203, 204, 205, 206, 207},
@@ -671,10 +641,10 @@ XLA_TEST_P(ReshapeTest, R2ToR4_3x8_To_3x2x1x4) {
XlaOp parameter;
auto input = CreateParameterAndTransferLiteral(0, *input_literal, "input",
&builder, &parameter);
- builder.Reshape(parameter, /*dimensions=*/{0, 1}, /*new_sizes=*/{3, 2, 1, 4});
+ Reshape(parameter, /*dimensions=*/{0, 1}, /*new_sizes=*/{3, 2, 1, 4});
// clang-format off
- auto expected_literal = Literal::CreateR4<float>({
+ auto expected_literal = LiteralUtil::CreateR4<float>({
{{{0, 1, 2, 3}},
{{4, 5, 6, 7}}},
{{{100, 101, 102, 103}},
@@ -690,7 +660,7 @@ XLA_TEST_P(ReshapeTest, R2ToR4_3x8_To_3x2x1x4) {
// Tests R2->R4 reshape with the reshape dimensions {1, 0}.
XLA_TEST_P(ReshapeTest, R2ToR4_3x8_To_3x2x1x4_Dimensions_10) {
XlaBuilder builder(TestName());
- std::unique_ptr<Literal> input_literal = Literal::CreateR2<float>({
+ std::unique_ptr<Literal> input_literal = LiteralUtil::CreateR2<float>({
{0, 1, 2, 3, 4, 5, 6, 7},
{100, 101, 102, 103, 104, 105, 106, 107},
{200, 201, 202, 203, 204, 205, 206, 207},
@@ -698,10 +668,10 @@ XLA_TEST_P(ReshapeTest, R2ToR4_3x8_To_3x2x1x4_Dimensions_10) {
XlaOp parameter;
auto input = CreateParameterAndTransferLiteral(0, *input_literal, "input",
&builder, &parameter);
- builder.Reshape(parameter, /*dimensions=*/{1, 0}, /*new_sizes=*/{3, 2, 1, 4});
+ Reshape(parameter, /*dimensions=*/{1, 0}, /*new_sizes=*/{3, 2, 1, 4});
// clang-format off
- auto expected_literal = Literal::CreateR4<float>({
+ auto expected_literal = LiteralUtil::CreateR4<float>({
{{{0, 100, 200, 1}},
{{101, 201, 2, 102}}},
{{{202, 3, 103, 203}},
@@ -723,15 +693,15 @@ XLA_TEST_P(ReshapeTest, R4ToR2_2x1x1x1_To_2x1) {
[&rng, &distribution](tensorflow::gtl::ArraySlice<int64> /* indices */,
float* cell) { *cell = distribution(rng); });
std::unique_ptr<Literal> input_literal =
- Literal::CreateR4FromArray4DWithLayout(
+ LiteralUtil::CreateR4FromArray4DWithLayout(
input, LayoutUtil::MakeLayout({3, 2, 1, 0}));
XlaOp parameter;
auto input_data = CreateParameterAndTransferLiteral(
0, *input_literal, "input", &builder, &parameter);
- builder.Reshape(parameter, /*dimensions=*/{0, 1, 2, 3}, /*new_sizes=*/{2, 1});
+ Reshape(parameter, /*dimensions=*/{0, 1, 2, 3}, /*new_sizes=*/{2, 1});
std::unique_ptr<Literal> expected =
- Literal::ReshapeSlice({2, 1}, {1, 0}, *input_literal);
+ LiteralUtil::ReshapeSlice({2, 1}, {1, 0}, *input_literal);
ComputeAndCompareLiteral(&builder, *expected, {input_data.get()},
zero_error_spec_);
}
@@ -745,15 +715,15 @@ XLA_TEST_P(ReshapeTest, R4ToR2_2x1x4x1_To_4x2) {
[&rng, &distribution](tensorflow::gtl::ArraySlice<int64> /* indices */,
float* cell) { *cell = distribution(rng); });
std::unique_ptr<Literal> input_literal =
- Literal::CreateR4FromArray4DWithLayout(
+ LiteralUtil::CreateR4FromArray4DWithLayout(
input, LayoutUtil::MakeLayout({3, 2, 1, 0}));
XlaOp parameter;
auto input_data = CreateParameterAndTransferLiteral(
0, *input_literal, "input", &builder, &parameter);
- builder.Reshape(parameter, /*dimensions=*/{0, 1, 2, 3}, /*new_sizes=*/{4, 2});
+ Reshape(parameter, /*dimensions=*/{0, 1, 2, 3}, /*new_sizes=*/{4, 2});
std::unique_ptr<Literal> expected =
- Literal::ReshapeSlice({4, 2}, {1, 0}, *input_literal);
+ LiteralUtil::ReshapeSlice({4, 2}, {1, 0}, *input_literal);
ComputeAndCompareLiteral(&builder, *expected, {input_data.get()},
zero_error_spec_);
}
@@ -768,20 +738,20 @@ XLA_TEST_P(ReshapeTest, R4ToR2_5x10x2x3_To_5x60_Dimensions_0213) {
[&rng, &distribution](tensorflow::gtl::ArraySlice<int64> /* indices */,
float* cell) { *cell = distribution(rng); });
std::unique_ptr<Literal> input_literal =
- Literal::CreateR4FromArray4DWithLayout(
+ LiteralUtil::CreateR4FromArray4DWithLayout(
input, LayoutUtil::MakeLayout({3, 2, 1, 0}));
XlaOp parameter;
auto input_data = CreateParameterAndTransferLiteral(
0, *input_literal, "input", &builder, &parameter);
- builder.Reshape(parameter, /*dimensions=*/{0, 2, 1, 3},
- /*new_sizes=*/{5, 60});
+ Reshape(parameter, /*dimensions=*/{0, 2, 1, 3},
+ /*new_sizes=*/{5, 60});
Array2D<float> expected_array(5, 60);
input.Each([&](tensorflow::gtl::ArraySlice<int64> indices, float* cell) {
expected_array(indices[0], indices[2] * 30 + indices[1] * 3 + indices[3]) =
*cell;
});
- auto expected = Literal::CreateR2FromArray2D(expected_array);
+ auto expected = LiteralUtil::CreateR2FromArray2D(expected_array);
ComputeAndCompareLiteral(&builder, *expected, {input_data.get()},
zero_error_spec_);
}
@@ -795,13 +765,13 @@ XLA_TEST_P(ReshapeTest, NoopReshape) {
[&rng, &distribution](tensorflow::gtl::ArraySlice<int64> /* indices */,
float* cell) { *cell = distribution(rng); });
std::unique_ptr<Literal> input_literal =
- Literal::CreateR4FromArray4DWithLayout(
+ LiteralUtil::CreateR4FromArray4DWithLayout(
input_array, LayoutUtil::MakeLayout({1, 2, 3, 0}));
XlaOp parameter;
auto input_data = CreateParameterAndTransferLiteral(
0, *input_literal, "input", &builder, &parameter);
- builder.Reshape(parameter, /*dimensions=*/{3, 0, 1, 2},
- /*new_sizes=*/{7, 2, 3, 5});
+ Reshape(parameter, /*dimensions=*/{3, 0, 1, 2},
+ /*new_sizes=*/{7, 2, 3, 5});
XlaComputation computation = builder.Build().ConsumeValueOrDie();
ExecutionOptions execution_options = execution_options_;
@@ -817,7 +787,7 @@ XLA_TEST_P(ReshapeTest, NoopReshape) {
// Since the reshape is a no-op, verify that it does not change the underlying
// data.
if (use_bfloat16()) {
- auto expected = Literal::ConvertF32ToBF16(*input_literal);
+ auto expected = LiteralUtil::ConvertF32ToBF16(*input_literal);
EXPECT_EQ(expected->data<bfloat16>(), output_literal->data<bfloat16>());
} else {
EXPECT_EQ(input_literal->data<float>(), output_literal->data<float>());
@@ -826,21 +796,21 @@ XLA_TEST_P(ReshapeTest, NoopReshape) {
XLA_TEST_P(ReshapeTest, R4ToR4Reshape_Trivial) {
XlaBuilder builder(TestName());
- auto literal_1x2x3x4 = Literal::CreateR4<float>(
+ auto literal_1x2x3x4 = LiteralUtil::CreateR4<float>(
{{{{1, 2, 3, 4}, {5, 6, 7, 8}, {9, 10, 11, 12}},
{{13, 14, 15, 16}, {17, 18, 19, 20}, {21, 22, 23, 24}}}});
XlaOp parameter;
auto input = CreateParameterAndTransferLiteral(0, *literal_1x2x3x4, "input",
&builder, &parameter);
- builder.Reshape(parameter, /*dimensions=*/{0, 1, 2, 3},
- /*new_sizes=*/{1, 2, 3, 4});
+ Reshape(parameter, /*dimensions=*/{0, 1, 2, 3},
+ /*new_sizes=*/{1, 2, 3, 4});
ComputeAndCompareLiteral(&builder, *literal_1x2x3x4, {input.get()});
}
XLA_TEST_P(ReshapeTest, R4ToR4Reshape) {
- auto literal_1x2x3x4 = Literal::CreateR4<float>(
+ auto literal_1x2x3x4 = LiteralUtil::CreateR4<float>(
{{{{1, 2, 3, 4}, {5, 6, 7, 8}, {9, 10, 11, 12}},
{{13, 14, 15, 16}, {17, 18, 19, 20}, {21, 22, 23, 24}}}});
@@ -848,11 +818,11 @@ XLA_TEST_P(ReshapeTest, R4ToR4Reshape) {
XlaOp parameter;
auto input = CreateParameterAndTransferLiteral(0, *literal_1x2x3x4, "input",
&builder, &parameter);
- builder.Reshape(parameter, /*dimensions=*/{1, 3, 2, 0},
- /*new_sizes=*/{2, 4, 3, 1});
+ Reshape(parameter, /*dimensions=*/{1, 3, 2, 0},
+ /*new_sizes=*/{2, 4, 3, 1});
// clang-format off
- auto expected_2x4x3x1 = Literal::CreateR4<float>(
+ auto expected_2x4x3x1 = LiteralUtil::CreateR4<float>(
{{{{1}, {5}, {9}},
{{2}, {6}, {10}},
{{3}, {7}, {11}},
@@ -876,17 +846,17 @@ XLA_TEST_P(ReshapeTest, R4TwoMinorTransposeSimple) {
[&rng, &distribution](tensorflow::gtl::ArraySlice<int64> /* indices */,
float* cell) { *cell = distribution(rng); });
std::unique_ptr<Literal> input_literal =
- Literal::CreateR4FromArray4DWithLayout(
+ LiteralUtil::CreateR4FromArray4DWithLayout(
input, LayoutUtil::MakeLayout({3, 2, 1, 0}));
XlaBuilder builder(TestName());
XlaOp parameter;
auto input_data = CreateParameterAndTransferLiteral(
0, *input_literal, "input", &builder, &parameter);
- builder.Reshape(parameter, /*dimensions=*/{0, 1, 3, 2},
- /*new_sizes=*/new_bounds);
+ Reshape(parameter, /*dimensions=*/{0, 1, 3, 2},
+ /*new_sizes=*/new_bounds);
std::unique_ptr<Literal> expected =
- Literal::ReshapeSlice(new_bounds, {2, 3, 1, 0}, *input_literal)
+ LiteralUtil::ReshapeSlice(new_bounds, {2, 3, 1, 0}, *input_literal)
->Relayout(LayoutUtil::MakeLayout({3, 2, 1, 0}));
// Specify the requested output shape explicitly to ensure that this reshape
@@ -905,17 +875,17 @@ XLA_TEST_P(ReshapeTest, R4TwoMinorTransposeMajorFirstEffectiveR2) {
[&rng, &distribution](tensorflow::gtl::ArraySlice<int64> /* indices */,
float* cell) { *cell = distribution(rng); });
std::unique_ptr<Literal> input_literal =
- Literal::CreateR4FromArray4DWithLayout(
+ LiteralUtil::CreateR4FromArray4DWithLayout(
input, LayoutUtil::MakeLayout({3, 2, 1, 0}));
XlaBuilder builder(TestName());
XlaOp parameter;
auto input_data = CreateParameterAndTransferLiteral(
0, *input_literal, "input", &builder, &parameter);
- builder.Reshape(parameter, /*dimensions=*/{0, 1, 3, 2},
- /*new_sizes=*/new_bounds);
+ Reshape(parameter, /*dimensions=*/{0, 1, 3, 2},
+ /*new_sizes=*/new_bounds);
std::unique_ptr<Literal> expected =
- Literal::ReshapeSlice(new_bounds, {2, 3, 1, 0}, *input_literal)
+ LiteralUtil::ReshapeSlice(new_bounds, {2, 3, 1, 0}, *input_literal)
->Relayout(LayoutUtil::MakeLayout({3, 2, 1, 0}));
// Specify the requested output shape explicitly to ensure that this reshape
@@ -934,17 +904,17 @@ XLA_TEST_P(ReshapeTest, R4TwoMinorTransposeMajorFirstMinorEffectiveR1) {
[&rng, &distribution](tensorflow::gtl::ArraySlice<int64> /* indices */,
float* cell) { *cell = distribution(rng); });
std::unique_ptr<Literal> input_literal =
- Literal::CreateR4FromArray4DWithLayout(
+ LiteralUtil::CreateR4FromArray4DWithLayout(
input, LayoutUtil::MakeLayout({3, 2, 1, 0}));
XlaBuilder builder(TestName());
XlaOp parameter;
auto input_data = CreateParameterAndTransferLiteral(
0, *input_literal, "input", &builder, &parameter);
- builder.Reshape(parameter, /*dimensions=*/{0, 1, 3, 2},
- /*new_sizes=*/new_bounds);
+ Reshape(parameter, /*dimensions=*/{0, 1, 3, 2},
+ /*new_sizes=*/new_bounds);
std::unique_ptr<Literal> expected =
- Literal::ReshapeSlice(new_bounds, {2, 3, 1, 0}, *input_literal)
+ LiteralUtil::ReshapeSlice(new_bounds, {2, 3, 1, 0}, *input_literal)
->Relayout(LayoutUtil::MakeLayout({3, 2, 1, 0}));
// Specify the requested output shape explicitly to ensure that this reshape
@@ -964,17 +934,17 @@ XLA_TEST_P(ReshapeTest, R4TwoMinorTransposeMajorFirstMinorEffectiveR1InR2) {
[&rng, &distribution](tensorflow::gtl::ArraySlice<int64> /* indices */,
float* cell) { *cell = distribution(rng); });
std::unique_ptr<Literal> input_literal =
- Literal::CreateR4FromArray4DWithLayout(
+ LiteralUtil::CreateR4FromArray4DWithLayout(
input, LayoutUtil::MakeLayout({3, 2, 1, 0}));
XlaBuilder builder(TestName());
XlaOp parameter;
auto input_data = CreateParameterAndTransferLiteral(
0, *input_literal, "input", &builder, &parameter);
- builder.Reshape(parameter, /*dimensions=*/{0, 1, 3, 2},
- /*new_sizes=*/new_bounds);
+ Reshape(parameter, /*dimensions=*/{0, 1, 3, 2},
+ /*new_sizes=*/new_bounds);
std::unique_ptr<Literal> expected =
- Literal::ReshapeSlice(new_bounds, {2, 3, 1, 0}, *input_literal)
+ LiteralUtil::ReshapeSlice(new_bounds, {2, 3, 1, 0}, *input_literal)
->Relayout(LayoutUtil::MakeLayout({3, 2, 1, 0}));
// Specify the requested output shape explicitly to ensure that this reshape
@@ -993,17 +963,17 @@ XLA_TEST_P(ReshapeTest, R4TwoMinorTransposeTrivialR2) {
[&rng, &distribution](tensorflow::gtl::ArraySlice<int64> /* indices */,
float* cell) { *cell = distribution(rng); });
std::unique_ptr<Literal> input_literal =
- Literal::CreateR4FromArray4DWithLayout(
+ LiteralUtil::CreateR4FromArray4DWithLayout(
input, LayoutUtil::MakeLayout({0, 1, 2, 3}));
XlaBuilder builder(TestName());
XlaOp parameter;
auto input_data = CreateParameterAndTransferLiteral(
0, *input_literal, "input", &builder, &parameter);
- builder.Reshape(parameter, /*dimensions=*/{1, 0, 2, 3},
- /*new_sizes=*/new_bounds);
+ Reshape(parameter, /*dimensions=*/{1, 0, 2, 3},
+ /*new_sizes=*/new_bounds);
std::unique_ptr<Literal> expected =
- Literal::ReshapeSlice(new_bounds, {1, 0, 2, 3}, *input_literal)
+ LiteralUtil::ReshapeSlice(new_bounds, {1, 0, 2, 3}, *input_literal)
->Relayout(input_literal->shape().layout());
// Specify the requested output shape explicitly to ensure that this reshape
diff --git a/tensorflow/compiler/xla/tests/reverse_test.cc b/tensorflow/compiler/xla/tests/reverse_test.cc
index e7bd142dc9..23f0d26d93 100644
--- a/tensorflow/compiler/xla/tests/reverse_test.cc
+++ b/tensorflow/compiler/xla/tests/reverse_test.cc
@@ -82,12 +82,12 @@ TEST_P(FloatReverseTest, Reverses) {
std::vector<float> input_vector(
ShapeUtil::ElementsIn(ShapeUtil::MakeShape(F32, spec.input_dims)));
std::iota(input_vector.begin(), input_vector.end(), 0.0);
- auto r1_literal = Literal::CreateR1<float>(input_vector);
+ auto r1_literal = LiteralUtil::CreateR1<float>(input_vector);
auto input_literal = r1_literal->Reshape(spec.input_dims).ConsumeValueOrDie();
XlaBuilder builder(TestName());
auto a = AddParam(*input_literal, &builder);
- builder.Rev(a, spec.reversal);
+ Rev(a, spec.reversal);
std::unique_ptr<Literal> expected = input_literal->CloneToUnique();
std::vector<int64> output_indices(spec.input_dims.size());
@@ -127,7 +127,7 @@ XLA_TEST_F(ReverseTest, Reverse4DU8ArrayOnDim23) {
}});
// clang-format on
- b.Rev(b.ConstantR4FromArray4D<uint8>(input), {0, 3});
+ Rev(ConstantR4FromArray4D<uint8>(&b, input), {0, 3});
// clang-format off
Array4D<uint8> expected({{
@@ -163,7 +163,7 @@ TEST_F(ReverseTest, Reverse4DFloatArrayOnDim01) {
});
// clang-format on
- b.Rev(b.ConstantR4FromArray4D<float>(input), {0, 1});
+ Rev(ConstantR4FromArray4D<float>(&b, input), {0, 1});
// clang-format off
Array4D<float> expected({
diff --git a/tensorflow/compiler/xla/tests/round_trip_packed_literal_test.cc b/tensorflow/compiler/xla/tests/round_trip_packed_literal_test.cc
index 7cfca781ac..a620fe1908 100644
--- a/tensorflow/compiler/xla/tests/round_trip_packed_literal_test.cc
+++ b/tensorflow/compiler/xla/tests/round_trip_packed_literal_test.cc
@@ -18,7 +18,7 @@ limitations under the License.
#include "tensorflow/compiler/xla/client/global_data.h"
#include "tensorflow/compiler/xla/client/local_client.h"
#include "tensorflow/compiler/xla/layout_util.h"
-#include "tensorflow/compiler/xla/literal_util.h"
+#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/compiler/xla/packed_literal_reader.h"
#include "tensorflow/compiler/xla/shape_util.h"
#include "tensorflow/compiler/xla/statusor.h"
diff --git a/tensorflow/compiler/xla/tests/round_trip_transfer_test.cc b/tensorflow/compiler/xla/tests/round_trip_transfer_test.cc
index f334a8c131..a8193c2eac 100644
--- a/tensorflow/compiler/xla/tests/round_trip_transfer_test.cc
+++ b/tensorflow/compiler/xla/tests/round_trip_transfer_test.cc
@@ -23,7 +23,7 @@ limitations under the License.
#include "tensorflow/compiler/xla/array4d.h"
#include "tensorflow/compiler/xla/client/global_data.h"
#include "tensorflow/compiler/xla/client/local_client.h"
-#include "tensorflow/compiler/xla/literal_util.h"
+#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/compiler/xla/statusor.h"
#include "tensorflow/compiler/xla/tests/client_library_test_base.h"
#include "tensorflow/compiler/xla/tests/literal_test_util.h"
@@ -46,61 +46,62 @@ class RoundTripTransferTest : public ClientLibraryTestBase {
};
TEST_F(RoundTripTransferTest, R0S32) {
- RoundTripTest(*Literal::CreateR0<int32>(42));
+ RoundTripTest(*LiteralUtil::CreateR0<int32>(42));
}
TEST_F(RoundTripTransferTest, R0F32) {
- RoundTripTest(*Literal::CreateR0<float>(42.0));
+ RoundTripTest(*LiteralUtil::CreateR0<float>(42.0));
}
TEST_F(RoundTripTransferTest, R1F32_Len0) {
- RoundTripTest(*Literal::CreateR1<float>({}));
+ RoundTripTest(*LiteralUtil::CreateR1<float>({}));
}
TEST_F(RoundTripTransferTest, R1F32_Len2) {
- RoundTripTest(*Literal::CreateR1<float>({42.0, 64.0}));
+ RoundTripTest(*LiteralUtil::CreateR1<float>({42.0, 64.0}));
}
TEST_F(RoundTripTransferTest, R1F32_Len256) {
std::vector<float> values(256);
std::iota(values.begin(), values.end(), 1.0);
- RoundTripTest(*Literal::CreateR1<float>(values));
+ RoundTripTest(*LiteralUtil::CreateR1<float>(values));
}
TEST_F(RoundTripTransferTest, R1F32_Len1024) {
std::vector<float> values(1024);
std::iota(values.begin(), values.end(), 1.0);
- RoundTripTest(*Literal::CreateR1<float>(values));
+ RoundTripTest(*LiteralUtil::CreateR1<float>(values));
}
TEST_F(RoundTripTransferTest, R1F32_Len1025) {
std::vector<float> values(1025);
std::iota(values.begin(), values.end(), 1.0);
- RoundTripTest(*Literal::CreateR1<float>(values));
+ RoundTripTest(*LiteralUtil::CreateR1<float>(values));
}
TEST_F(RoundTripTransferTest, R1F32_Len4096) {
std::vector<float> values(4096);
std::iota(values.begin(), values.end(), 1.0);
- RoundTripTest(*Literal::CreateR1<float>(values));
+ RoundTripTest(*LiteralUtil::CreateR1<float>(values));
}
TEST_F(RoundTripTransferTest, R2F32_Len10x0) {
- RoundTripTest(*Literal::CreateR2FromArray2D<float>(Array2D<float>(10, 0)));
+ RoundTripTest(
+ *LiteralUtil::CreateR2FromArray2D<float>(Array2D<float>(10, 0)));
}
TEST_F(RoundTripTransferTest, R2F32_Len2x2) {
- RoundTripTest(*Literal::CreateR2<float>({{42.0, 64.0}, {77.0, 88.0}}));
+ RoundTripTest(*LiteralUtil::CreateR2<float>({{42.0, 64.0}, {77.0, 88.0}}));
}
TEST_F(RoundTripTransferTest, R3F32) {
RoundTripTest(
- *Literal::CreateR3<float>({{{1.0, 2.0}, {1.0, 2.0}, {1.0, 2.0}},
- {{3.0, 4.0}, {3.0, 4.0}, {3.0, 4.0}}}));
+ *LiteralUtil::CreateR3<float>({{{1.0, 2.0}, {1.0, 2.0}, {1.0, 2.0}},
+ {{3.0, 4.0}, {3.0, 4.0}, {3.0, 4.0}}}));
}
TEST_F(RoundTripTransferTest, R4F32) {
- RoundTripTest(*Literal::CreateR4<float>({{
+ RoundTripTest(*LiteralUtil::CreateR4<float>({{
{{10, 11, 12, 13}, {14, 15, 16, 17}},
{{18, 19, 20, 21}, {22, 23, 24, 25}},
{{26, 27, 28, 29}, {30, 31, 32, 33}},
@@ -108,33 +109,36 @@ TEST_F(RoundTripTransferTest, R4F32) {
}
TEST_F(RoundTripTransferTest, EmptyTuple) {
- RoundTripTest(*Literal::MakeTuple({}));
+ RoundTripTest(*LiteralUtil::MakeTuple({}));
}
TEST_F(RoundTripTransferTest, TupleOfR1F32) {
- RoundTripTest(*Literal::MakeTuple({Literal::CreateR1<float>({1, 2}).get(),
- Literal::CreateR1<float>({3, 4}).get()}));
+ RoundTripTest(
+ *LiteralUtil::MakeTuple({LiteralUtil::CreateR1<float>({1, 2}).get(),
+ LiteralUtil::CreateR1<float>({3, 4}).get()}));
}
TEST_F(RoundTripTransferTest, TupleOfR1F32_Len0_Len2) {
- RoundTripTest(*Literal::MakeTuple({Literal::CreateR1<float>({}).get(),
- Literal::CreateR1<float>({3, 4}).get()}));
+ RoundTripTest(
+ *LiteralUtil::MakeTuple({LiteralUtil::CreateR1<float>({}).get(),
+ LiteralUtil::CreateR1<float>({3, 4}).get()}));
}
TEST_F(RoundTripTransferTest, TupleOfR0F32AndR1S32) {
- RoundTripTest(*Literal::MakeTuple({Literal::CreateR0<float>(1.0).get(),
- Literal::CreateR1<int>({2, 3}).get()}));
+ RoundTripTest(
+ *LiteralUtil::MakeTuple({LiteralUtil::CreateR0<float>(1.0).get(),
+ LiteralUtil::CreateR1<int>({2, 3}).get()}));
}
// Below two tests are added to identify the cost of large data transfers.
TEST_F(RoundTripTransferTest, R2F32_Large) {
- RoundTripTest(*Literal::CreateR2F32Linspace(-1.0f, 1.0f, 512, 512));
+ RoundTripTest(*LiteralUtil::CreateR2F32Linspace(-1.0f, 1.0f, 512, 512));
}
TEST_F(RoundTripTransferTest, R4F32_Large) {
Array4D<float> array4d(2, 2, 256, 256);
array4d.FillWithMultiples(1.0f);
- RoundTripTest(*Literal::CreateR4FromArray4D<float>(array4d));
+ RoundTripTest(*LiteralUtil::CreateR4FromArray4D<float>(array4d));
}
} // namespace
diff --git a/tensorflow/compiler/xla/tests/scalar_computations_test.cc b/tensorflow/compiler/xla/tests/scalar_computations_test.cc
index 308d3fc78a..3b603c0d31 100644
--- a/tensorflow/compiler/xla/tests/scalar_computations_test.cc
+++ b/tensorflow/compiler/xla/tests/scalar_computations_test.cc
@@ -21,6 +21,7 @@ limitations under the License.
#include "tensorflow/compiler/xla/client/local_client.h"
#include "tensorflow/compiler/xla/client/xla_client/xla_builder.h"
#include "tensorflow/compiler/xla/client/xla_client/xla_computation.h"
+#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/compiler/xla/literal_util.h"
#include "tensorflow/compiler/xla/status_macros.h"
#include "tensorflow/compiler/xla/statusor.h"
@@ -44,74 +45,75 @@ class ScalarComputationsTest : public ClientLibraryTestBase {
protected:
// A template for building and running a binary comparison test.
template <typename NativeT>
- void TestCompare(
- NativeT lhs, NativeT rhs, bool expected,
- XlaOp (XlaBuilder::*op)(const XlaOp&, const XlaOp&,
- tensorflow::gtl::ArraySlice<int64>)) {
+ void TestCompare(NativeT lhs, NativeT rhs, bool expected,
+ std::function<XlaOp(const XlaOp&, const XlaOp&,
+ tensorflow::gtl::ArraySlice<int64>)>
+ op) {
XlaBuilder builder(TestName());
- XlaOp lhs_op = builder.ConstantR0<NativeT>(lhs);
- XlaOp rhs_op = builder.ConstantR0<NativeT>(rhs);
- XlaOp result = (builder.*op)(lhs_op, rhs_op, {});
+ XlaOp lhs_op = ConstantR0<NativeT>(&builder, lhs);
+ XlaOp rhs_op = ConstantR0<NativeT>(&builder, rhs);
+ op(lhs_op, rhs_op, {});
ComputeAndCompareR0<bool>(&builder, expected, {});
}
template <typename NativeT>
void TestMinMax(NativeT lhs, NativeT rhs, NativeT expected,
- XlaOp (XlaBuilder::*op)(const XlaOp&, const XlaOp&,
- tensorflow::gtl::ArraySlice<int64>)) {
+ std::function<XlaOp(const XlaOp&, const XlaOp&,
+ tensorflow::gtl::ArraySlice<int64>)>
+ op) {
XlaBuilder builder(TestName());
- XlaOp lhs_op = builder.ConstantR0<NativeT>(lhs);
- XlaOp rhs_op = builder.ConstantR0<NativeT>(rhs);
- XlaOp result = (builder.*op)(lhs_op, rhs_op, {});
+ XlaOp lhs_op = ConstantR0<NativeT>(&builder, lhs);
+ XlaOp rhs_op = ConstantR0<NativeT>(&builder, rhs);
+ op(lhs_op, rhs_op, {});
ComputeAndCompareR0<NativeT>(&builder, expected, {});
}
};
XLA_TEST_F(ScalarComputationsTest, ReturnScalarF32) {
XlaBuilder builder(TestName());
- builder.ConstantR0<float>(2.1f);
+ ConstantR0<float>(&builder, 2.1f);
ComputeAndCompareR0<float>(&builder, 2.1f, {}, error_spec_);
}
XLA_TEST_F(ScalarComputationsTest, NegateScalarF32) {
XlaBuilder builder(TestName());
- builder.Neg(builder.ConstantR0<float>(2.1f));
+ Neg(ConstantR0<float>(&builder, 2.1f));
ComputeAndCompareR0<float>(&builder, -2.1f, {}, error_spec_);
}
XLA_TEST_F(ScalarComputationsTest, NegateScalarS32) {
XlaBuilder builder(TestName());
- builder.Neg(builder.ConstantR0<int32>(2));
+ Neg(ConstantR0<int32>(&builder, 2));
ComputeAndCompareR0<int32>(&builder, -2, {});
}
XLA_TEST_F(ScalarComputationsTest, AddTwoScalarsF32) {
XlaBuilder builder(TestName());
- builder.Add(builder.ConstantR0<float>(2.1f), builder.ConstantR0<float>(5.5f));
+ Add(ConstantR0<float>(&builder, 2.1f), ConstantR0<float>(&builder, 5.5f));
ComputeAndCompareR0<float>(&builder, 7.6f, {}, error_spec_);
}
XLA_TEST_F(ScalarComputationsTest, AddTwoScalarsS32) {
XlaBuilder builder(TestName());
- builder.Add(builder.ConstantR0<int32>(2), builder.ConstantR0<int32>(5));
+ Add(ConstantR0<int32>(&builder, 2), ConstantR0<int32>(&builder, 5));
ComputeAndCompareR0<int32>(&builder, 7, {});
}
XLA_TEST_F(ScalarComputationsTest, AddTwoScalarsU32) {
XlaBuilder builder(TestName());
- builder.Add(builder.ConstantR0<uint32>(35), builder.ConstantR0<uint32>(57));
+ Add(ConstantR0<uint32>(&builder, 35), ConstantR0<uint32>(&builder, 57));
ComputeAndCompareR0<uint32>(&builder, 92, {});
}
XLA_TEST_F(ScalarComputationsTest, AddTwoScalarsU8) {
XlaBuilder builder(TestName());
- builder.Add(builder.ConstantR0<uint8>(35), builder.ConstantR0<uint8>(57));
+ Add(ConstantR0<uint8>(&builder, 35), ConstantR0<uint8>(&builder, 57));
ComputeAndCompareR0<uint8>(&builder, 92, {});
}
@@ -120,7 +122,7 @@ XLA_TEST_F(ScalarComputationsTest, AddTwoScalarsU64) {
XlaBuilder builder(TestName());
const uint64 a = static_cast<uint64>(1) << 63;
const uint64 b = a + 1;
- builder.Add(builder.ConstantR0<uint64>(a), builder.ConstantR0<uint64>(b));
+ Add(ConstantR0<uint64>(&builder, a), ConstantR0<uint64>(&builder, b));
ComputeAndCompareR0<uint64>(&builder, a + b, {});
}
@@ -129,40 +131,39 @@ XLA_TEST_F(ScalarComputationsTest, AddTwoScalarsS64) {
XlaBuilder builder(TestName());
const int64 a = static_cast<int64>(1) << 62;
const int64 b = a - 1;
- builder.Add(builder.ConstantR0<int64>(a), builder.ConstantR0<int64>(b));
+ Add(ConstantR0<int64>(&builder, a), ConstantR0<int64>(&builder, b));
ComputeAndCompareR0<int64>(&builder, a + b, {});
}
XLA_TEST_F(ScalarComputationsTest, AddTwoScalarsF64) {
XlaBuilder builder(TestName());
- builder.Add(builder.ConstantR0<double>(0.25),
- builder.ConstantR0<double>(3.5));
+ Add(ConstantR0<double>(&builder, 0.25), ConstantR0<double>(&builder, 3.5));
ComputeAndCompareR0<double>(&builder, 3.75, {});
}
XLA_TEST_F(ScalarComputationsTest, SubtractTwoScalarsF32) {
XlaBuilder builder(TestName());
- builder.Sub(builder.ConstantR0<float>(2.1f), builder.ConstantR0<float>(5.5f));
+ Sub(ConstantR0<float>(&builder, 2.1f), ConstantR0<float>(&builder, 5.5f));
ComputeAndCompareR0<float>(&builder, -3.4f, {}, error_spec_);
}
XLA_TEST_F(ScalarComputationsTest, SubtractTwoScalarsS32) {
XlaBuilder builder(TestName());
- builder.Sub(builder.ConstantR0<int32>(2), builder.ConstantR0<int32>(5));
+ Sub(ConstantR0<int32>(&builder, 2), ConstantR0<int32>(&builder, 5));
ComputeAndCompareR0<int32>(&builder, -3, {});
}
XLA_TEST_F(ScalarComputationsTest, CastS64ToF32) {
XlaBuilder builder(TestName());
- auto a = builder.Parameter(0, ShapeUtil::MakeShape(S64, {}), "a");
- builder.ConvertElementType(a, F32);
+ auto a = Parameter(&builder, 0, ShapeUtil::MakeShape(S64, {}), "a");
+ ConvertElementType(a, F32);
int64 value = 3LL << 35;
- std::unique_ptr<Literal> a_literal = Literal::CreateR0<int64>(value);
+ std::unique_ptr<Literal> a_literal = LiteralUtil::CreateR0<int64>(value);
std::unique_ptr<GlobalData> a_data =
client_->TransferToServer(*a_literal).ConsumeValueOrDie();
ComputeAndCompareR0<float>(&builder, static_cast<float>(value),
@@ -171,9 +172,8 @@ XLA_TEST_F(ScalarComputationsTest, CastS64ToF32) {
XLA_TEST_F(ScalarComputationsTest, MulThreeScalarsF32) {
XlaBuilder builder(TestName());
- builder.Mul(builder.Mul(builder.ConstantR0<float>(2.1f),
- builder.ConstantR0<float>(5.5f)),
- builder.ConstantR0<float>(0.5f));
+ Mul(Mul(ConstantR0<float>(&builder, 2.1f), ConstantR0<float>(&builder, 5.5f)),
+ ConstantR0<float>(&builder, 0.5f));
ComputeAndCompareR0<float>(&builder, 5.775f, {}, error_spec_);
}
@@ -190,7 +190,7 @@ XLA_TEST_F(ScalarComputationsTest, MulTwoScalarsS32) {
for (int32 x : data) {
for (int32 y : data) {
XlaBuilder builder(TestName());
- builder.Mul(builder.ConstantR0<int32>(x), builder.ConstantR0<int32>(y));
+ Mul(ConstantR0<int32>(&builder, x), ConstantR0<int32>(&builder, y));
// Signed integer overflow is undefined behavior in C++. Convert the input
// integers to unsigned, perform the multiplication unsigned, and convert
@@ -209,7 +209,7 @@ XLA_TEST_F(ScalarComputationsTest, MulTwoScalarsU32) {
for (uint32 x : data) {
for (uint32 y : data) {
XlaBuilder builder(TestName());
- builder.Mul(builder.ConstantR0<uint32>(x), builder.ConstantR0<uint32>(y));
+ Mul(ConstantR0<uint32>(&builder, x), ConstantR0<uint32>(&builder, y));
uint32 expected = x * y;
ComputeAndCompareR0<uint32>(&builder, expected, {});
@@ -219,18 +219,17 @@ XLA_TEST_F(ScalarComputationsTest, MulTwoScalarsU32) {
XLA_TEST_F(ScalarComputationsTest, MulThreeScalarsS32) {
XlaBuilder builder(TestName());
- builder.Mul(
- builder.Mul(builder.ConstantR0<int32>(2), builder.ConstantR0<int32>(5)),
- builder.ConstantR0<int32>(1));
+ Mul(Mul(ConstantR0<int32>(&builder, 2), ConstantR0<int32>(&builder, 5)),
+ ConstantR0<int32>(&builder, 1));
ComputeAndCompareR0<int32>(&builder, 10, {});
}
XLA_TEST_F(ScalarComputationsTest, MulThreeScalarsF32Params) {
XlaBuilder builder(TestName());
- std::unique_ptr<Literal> a_literal = Literal::CreateR0<float>(2.1f);
- std::unique_ptr<Literal> b_literal = Literal::CreateR0<float>(5.5f);
- std::unique_ptr<Literal> c_literal = Literal::CreateR0<float>(0.5f);
+ std::unique_ptr<Literal> a_literal = LiteralUtil::CreateR0<float>(2.1f);
+ std::unique_ptr<Literal> b_literal = LiteralUtil::CreateR0<float>(5.5f);
+ std::unique_ptr<Literal> c_literal = LiteralUtil::CreateR0<float>(0.5f);
std::unique_ptr<GlobalData> a_data =
client_->TransferToServer(*a_literal).ConsumeValueOrDie();
@@ -239,10 +238,10 @@ XLA_TEST_F(ScalarComputationsTest, MulThreeScalarsF32Params) {
std::unique_ptr<GlobalData> c_data =
client_->TransferToServer(*c_literal).ConsumeValueOrDie();
- XlaOp a = builder.Parameter(0, a_literal->shape(), "a");
- XlaOp b = builder.Parameter(1, b_literal->shape(), "b");
- XlaOp c = builder.Parameter(2, c_literal->shape(), "c");
- builder.Mul(builder.Mul(a, b), c);
+ XlaOp a = Parameter(&builder, 0, a_literal->shape(), "a");
+ XlaOp b = Parameter(&builder, 1, b_literal->shape(), "b");
+ XlaOp c = Parameter(&builder, 2, c_literal->shape(), "c");
+ Mul(Mul(a, b), c);
ComputeAndCompareR0<float>(&builder, 5.775f,
{a_data.get(), b_data.get(), c_data.get()},
@@ -251,14 +250,14 @@ XLA_TEST_F(ScalarComputationsTest, MulThreeScalarsF32Params) {
XLA_TEST_F(ScalarComputationsTest, DivideTwoScalarsF32) {
XlaBuilder builder(TestName());
- builder.Div(builder.ConstantR0<float>(5.0f), builder.ConstantR0<float>(2.5f));
+ Div(ConstantR0<float>(&builder, 5.0f), ConstantR0<float>(&builder, 2.5f));
ComputeAndCompareR0<float>(&builder, 2.0f, {}, error_spec_);
}
XLA_TEST_F(ScalarComputationsTest, RemTwoScalarsF32) {
XlaBuilder builder(TestName());
- builder.Rem(builder.ConstantR0<float>(2.5f), builder.ConstantR0<float>(5.0f));
+ Rem(ConstantR0<float>(&builder, 2.5f), ConstantR0<float>(&builder, 5.0f));
ComputeAndCompareR0<float>(&builder, 2.5f, {}, error_spec_);
}
@@ -281,8 +280,8 @@ class DivS32Test : public ClientLibraryTestBase,
XLA_TEST_P(DivS32Test, DivideTwoScalarsS32) {
DivS32Params p = GetParam();
XlaBuilder builder(TestName());
- builder.Div(builder.ConstantR0<int32>(p.dividend),
- builder.ConstantR0<int32>(p.divisor));
+ Div(ConstantR0<int32>(&builder, p.dividend),
+ ConstantR0<int32>(&builder, p.divisor));
ComputeAndCompareR0<int32>(&builder, p.quotient, {});
}
@@ -290,8 +289,8 @@ XLA_TEST_P(DivS32Test, DivideTwoScalarsS32) {
XLA_TEST_P(DivS32Test, RemainderTwoScalarsS32) {
DivS32Params p = GetParam();
XlaBuilder builder(TestName());
- builder.Rem(builder.ConstantR0<int32>(p.dividend),
- builder.ConstantR0<int32>(p.divisor));
+ Rem(ConstantR0<int32>(&builder, p.dividend),
+ ConstantR0<int32>(&builder, p.divisor));
ComputeAndCompareR0<int32>(&builder, p.remainder, {});
}
@@ -305,7 +304,7 @@ XLA_TEST_P(DivS32Test, DivideTwoScalarsNonConstS32) {
CreateR0Parameter<int32>(p.dividend, 0, "dividend", &builder, &dividend);
auto divisord =
CreateR0Parameter<int32>(p.divisor, 1, "divisor", &builder, &divisor);
- builder.Div(dividend, divisor);
+ Div(dividend, divisor);
ComputeAndCompareR0<int32>(&builder, p.quotient,
{dividendd.get(), divisord.get()});
@@ -320,7 +319,7 @@ XLA_TEST_P(DivS32Test, RemainderTwoScalarsNonConstDivisorS32) {
CreateR0Parameter<int32>(p.dividend, 0, "dividend", &builder, &dividend);
auto divisord =
CreateR0Parameter<int32>(p.divisor, 1, "divisor", &builder, &divisor);
- builder.Rem(dividend, divisor);
+ Rem(dividend, divisor);
ComputeAndCompareR0<int32>(&builder, p.remainder,
{dividendd.get(), divisord.get()});
@@ -367,18 +366,18 @@ XLA_TEST_F(ScalarComputationsTest, DivU32s) {
XlaBuilder builder(TestName());
XlaOp dividend =
- builder.Parameter(0, ShapeUtil::MakeShape(U32, {}), "dividend");
+ Parameter(&builder, 0, ShapeUtil::MakeShape(U32, {}), "dividend");
XlaOp divisor =
- builder.Parameter(1, ShapeUtil::MakeShape(U32, {}), "divisor");
- builder.Div(dividend, divisor);
+ Parameter(&builder, 1, ShapeUtil::MakeShape(U32, {}), "divisor");
+ Div(dividend, divisor);
TF_ASSERT_OK_AND_ASSIGN(div_computation, builder.Build());
}
for (uint32 divisor : vals) {
if (divisor != 0) {
for (uint32 dividend : vals) {
- auto dividend_literal = Literal::CreateR0<uint32>(dividend);
- auto divisor_literal = Literal::CreateR0<uint32>(divisor);
+ auto dividend_literal = LiteralUtil::CreateR0<uint32>(dividend);
+ auto divisor_literal = LiteralUtil::CreateR0<uint32>(divisor);
TF_ASSERT_OK_AND_ASSIGN(auto dividend_data,
client_->TransferToServer(*dividend_literal));
TF_ASSERT_OK_AND_ASSIGN(auto divisor_data,
@@ -389,7 +388,8 @@ XLA_TEST_F(ScalarComputationsTest, DivU32s) {
{dividend_data.get(), divisor_data.get()},
&execution_options_)
.ConsumeValueOrDie();
- auto expected_literal = Literal::CreateR0<uint32>(dividend / divisor);
+ auto expected_literal =
+ LiteralUtil::CreateR0<uint32>(dividend / divisor);
EXPECT_TRUE(LiteralTestUtil::Equal(*expected_literal, *actual_literal));
}
}
@@ -408,18 +408,18 @@ XLA_TEST_F(ScalarComputationsTest, RemU32s) {
XlaBuilder builder(TestName());
XlaOp dividend =
- builder.Parameter(0, ShapeUtil::MakeShape(U32, {}), "dividend");
+ Parameter(&builder, 0, ShapeUtil::MakeShape(U32, {}), "dividend");
XlaOp divisor =
- builder.Parameter(1, ShapeUtil::MakeShape(U32, {}), "divisor");
- builder.Rem(dividend, divisor);
+ Parameter(&builder, 1, ShapeUtil::MakeShape(U32, {}), "divisor");
+ Rem(dividend, divisor);
TF_ASSERT_OK_AND_ASSIGN(rem_computation, builder.Build());
}
for (uint32 divisor : vals) {
if (divisor != 0) {
for (uint32 dividend : vals) {
- auto dividend_literal = Literal::CreateR0<uint32>(dividend);
- auto divisor_literal = Literal::CreateR0<uint32>(divisor);
+ auto dividend_literal = LiteralUtil::CreateR0<uint32>(dividend);
+ auto divisor_literal = LiteralUtil::CreateR0<uint32>(divisor);
TF_ASSERT_OK_AND_ASSIGN(auto dividend_data,
client_->TransferToServer(*dividend_literal));
TF_ASSERT_OK_AND_ASSIGN(auto divisor_data,
@@ -430,7 +430,8 @@ XLA_TEST_F(ScalarComputationsTest, RemU32s) {
{dividend_data.get(), divisor_data.get()},
&execution_options_)
.ConsumeValueOrDie();
- auto expected_literal = Literal::CreateR0<uint32>(dividend % divisor);
+ auto expected_literal =
+ LiteralUtil::CreateR0<uint32>(dividend % divisor);
EXPECT_TRUE(LiteralTestUtil::Equal(*expected_literal, *actual_literal));
}
}
@@ -439,10 +440,10 @@ XLA_TEST_F(ScalarComputationsTest, RemU32s) {
XLA_TEST_F(ScalarComputationsTest, RemainderTwoScalarsNonConstDividendS32) {
XlaBuilder builder(TestName());
- auto x = builder.Parameter(0, ShapeUtil::MakeShape(S32, {}), "x");
- builder.Rem(x, builder.ConstantR0<int32>(80000));
+ auto x = Parameter(&builder, 0, ShapeUtil::MakeShape(S32, {}), "x");
+ Rem(x, ConstantR0<int32>(&builder, 80000));
- std::unique_ptr<Literal> literal = Literal::CreateR0<int32>(87919);
+ std::unique_ptr<Literal> literal = LiteralUtil::CreateR0<int32>(87919);
TF_ASSERT_OK_AND_ASSIGN(auto input_data, client_->TransferToServer(*literal));
ComputeAndCompareR0<int32>(&builder, 7919, {input_data.get()});
}
@@ -451,15 +452,15 @@ XLA_TEST_F(ScalarComputationsTest, DivideTwoScalarsU32) {
XlaBuilder builder(TestName());
// This verifies 0xFFFFFFFE / 2 = 0x7FFFFFFF. If XLA incorrectly treated U32
// as S32, it would output -2 / 2 = -1 (0xFFFFFFFF).
- builder.Div(builder.ConstantR0<uint32>(0xFFFFFFFE),
- builder.ConstantR0<uint32>(2));
+ Div(ConstantR0<uint32>(&builder, 0xFFFFFFFE),
+ ConstantR0<uint32>(&builder, 2));
ComputeAndCompareR0<uint32>(&builder, 0x7FFFFFFF, {});
}
XLA_TEST_F(ScalarComputationsTest, RemTwoScalarsU32) {
XlaBuilder builder(TestName());
- builder.Rem(builder.ConstantR0<uint32>(11), builder.ConstantR0<uint32>(3));
+ Rem(ConstantR0<uint32>(&builder, 11), ConstantR0<uint32>(&builder, 3));
ComputeAndCompareR0<uint32>(&builder, 2, {});
}
@@ -468,7 +469,7 @@ XLA_TEST_F(ScalarComputationsTest, AndBool) {
for (bool x : {false, true}) {
for (bool y : {false, true}) {
XlaBuilder builder(TestName());
- builder.And(builder.ConstantR0<bool>(x), builder.ConstantR0<bool>(y));
+ And(ConstantR0<bool>(&builder, x), ConstantR0<bool>(&builder, y));
ComputeAndCompareR0<bool>(&builder, x && y, {});
}
@@ -479,7 +480,7 @@ XLA_TEST_F(ScalarComputationsTest, AndS32) {
for (int32 x : {0, 8}) {
for (int32 y : {1, -16}) {
XlaBuilder builder(TestName());
- builder.And(builder.ConstantR0<int32>(x), builder.ConstantR0<int32>(y));
+ And(ConstantR0<int32>(&builder, x), ConstantR0<int32>(&builder, y));
ComputeAndCompareR0<int32>(&builder, x & y, {});
}
@@ -490,7 +491,7 @@ XLA_TEST_F(ScalarComputationsTest, AndU32) {
for (uint32 x : {0, 8}) {
for (uint32 y : {1, 16}) {
XlaBuilder builder(TestName());
- builder.And(builder.ConstantR0<uint32>(x), builder.ConstantR0<uint32>(y));
+ And(ConstantR0<uint32>(&builder, x), ConstantR0<uint32>(&builder, y));
ComputeAndCompareR0<uint32>(&builder, x & y, {});
}
@@ -501,7 +502,7 @@ XLA_TEST_F(ScalarComputationsTest, OrBool) {
for (bool x : {false, true}) {
for (bool y : {false, true}) {
XlaBuilder builder(TestName());
- builder.Or(builder.ConstantR0<bool>(x), builder.ConstantR0<bool>(y));
+ Or(ConstantR0<bool>(&builder, x), ConstantR0<bool>(&builder, y));
ComputeAndCompareR0<bool>(&builder, x || y, {});
}
@@ -512,7 +513,7 @@ XLA_TEST_F(ScalarComputationsTest, OrS32) {
for (int32 x : {0, 8}) {
for (int32 y : {1, -16}) {
XlaBuilder builder(TestName());
- builder.Or(builder.ConstantR0<int32>(x), builder.ConstantR0<int32>(y));
+ Or(ConstantR0<int32>(&builder, x), ConstantR0<int32>(&builder, y));
ComputeAndCompareR0<int32>(&builder, x | y, {});
}
@@ -523,7 +524,7 @@ XLA_TEST_F(ScalarComputationsTest, OrU32) {
for (uint32 x : {0, 8}) {
for (uint32 y : {1, 16}) {
XlaBuilder builder(TestName());
- builder.Or(builder.ConstantR0<uint32>(x), builder.ConstantR0<uint32>(y));
+ Or(ConstantR0<uint32>(&builder, x), ConstantR0<uint32>(&builder, y));
ComputeAndCompareR0<uint32>(&builder, x | y, {});
}
@@ -533,7 +534,7 @@ XLA_TEST_F(ScalarComputationsTest, OrU32) {
XLA_TEST_F(ScalarComputationsTest, NotBool) {
for (bool x : {false, true}) {
XlaBuilder builder(TestName());
- builder.Not(builder.ConstantR0<bool>(x));
+ Not(ConstantR0<bool>(&builder, x));
ComputeAndCompareR0<bool>(&builder, !x, {});
}
@@ -542,7 +543,7 @@ XLA_TEST_F(ScalarComputationsTest, NotBool) {
XLA_TEST_F(ScalarComputationsTest, NotS32) {
for (int32 x : {-1, 0, 1}) {
XlaBuilder builder(TestName());
- builder.Not(builder.ConstantR0<int32>(x));
+ Not(ConstantR0<int32>(&builder, x));
ComputeAndCompareR0<int32>(&builder, ~x, {});
}
@@ -551,7 +552,7 @@ XLA_TEST_F(ScalarComputationsTest, NotS32) {
XLA_TEST_F(ScalarComputationsTest, NotU32) {
for (uint32 x : {0, 1, 2}) {
XlaBuilder builder(TestName());
- builder.Not(builder.ConstantR0<uint32>(x));
+ Not(ConstantR0<uint32>(&builder, x));
ComputeAndCompareR0<uint32>(&builder, ~x, {});
}
@@ -559,18 +560,18 @@ XLA_TEST_F(ScalarComputationsTest, NotU32) {
XLA_TEST_F(ScalarComputationsTest, SelectScalarTrue) {
XlaBuilder builder(TestName());
- builder.Select(builder.ConstantR0<bool>(true), // The predicate.
- builder.ConstantR0<float>(123.0f), // The value on true.
- builder.ConstantR0<float>(42.0f)); // The value on false.
+ Select(ConstantR0<bool>(&builder, true), // The predicate.
+ ConstantR0<float>(&builder, 123.0f), // The value on true.
+ ConstantR0<float>(&builder, 42.0f)); // The value on false.
ComputeAndCompareR0<float>(&builder, 123.0f, {}, error_spec_);
}
XLA_TEST_F(ScalarComputationsTest, SelectScalarFalse) {
XlaBuilder builder(TestName());
- builder.Select(builder.ConstantR0<bool>(false), // The predicate.
- builder.ConstantR0<float>(123.0f), // The value on true.
- builder.ConstantR0<float>(42.0f)); // The value on false.
+ Select(ConstantR0<bool>(&builder, false), // The predicate.
+ ConstantR0<float>(&builder, 123.0f), // The value on true.
+ ConstantR0<float>(&builder, 42.0f)); // The value on false.
ComputeAndCompareR0<float>(&builder, 42.0f, {}, error_spec_);
}
@@ -579,313 +580,311 @@ XLA_TEST_F(ScalarComputationsTest, SelectScalarFalse) {
// templatized comparison tests.
XLA_TEST_F(ScalarComputationsTest, CompareGtScalar) {
XlaBuilder builder(TestName());
- builder.Gt(builder.ConstantR0<float>(2.0f), builder.ConstantR0<float>(1.0f));
+ Gt(ConstantR0<float>(&builder, 2.0f), ConstantR0<float>(&builder, 1.0f));
ComputeAndCompareR0<bool>(&builder, true, {});
}
// S32 comparisons.
XLA_TEST_F(ScalarComputationsTest, CompareEqS32Greater) {
- TestCompare<int32>(2, 1, false, &XlaBuilder::Eq);
+ TestCompare<int32>(2, 1, false, &Eq);
}
XLA_TEST_F(ScalarComputationsTest, CompareEqS32Equal) {
- TestCompare<int32>(3, 3, true, &XlaBuilder::Eq);
+ TestCompare<int32>(3, 3, true, &Eq);
}
XLA_TEST_F(ScalarComputationsTest, CompareNeS32) {
- TestCompare<int32>(2, 1, true, &XlaBuilder::Ne);
+ TestCompare<int32>(2, 1, true, &Ne);
}
XLA_TEST_F(ScalarComputationsTest, CompareGeS32) {
- TestCompare<int32>(2, 1, true, &XlaBuilder::Ge);
+ TestCompare<int32>(2, 1, true, &Ge);
}
XLA_TEST_F(ScalarComputationsTest, CompareGtS32) {
- TestCompare<int32>(1, 5, false, &XlaBuilder::Gt);
+ TestCompare<int32>(1, 5, false, &Gt);
}
XLA_TEST_F(ScalarComputationsTest, CompareLeS32) {
- TestCompare<int32>(2, 1, false, &XlaBuilder::Le);
+ TestCompare<int32>(2, 1, false, &Le);
}
XLA_TEST_F(ScalarComputationsTest, CompareLtS32) {
- TestCompare<int32>(9, 7, false, &XlaBuilder::Lt);
+ TestCompare<int32>(9, 7, false, &Lt);
TestCompare<int32>(std::numeric_limits<int32>::min(),
- std::numeric_limits<int32>::max(), true, &XlaBuilder::Lt);
+ std::numeric_limits<int32>::max(), true, &Lt);
}
// U32 comparisons.
XLA_TEST_F(ScalarComputationsTest, CompareEqU32False) {
- TestCompare<uint32>(2, 1, false, &XlaBuilder::Eq);
+ TestCompare<uint32>(2, 1, false, &Eq);
}
XLA_TEST_F(ScalarComputationsTest, CompareNeU32) {
- TestCompare<uint32>(2, 1, true, &XlaBuilder::Ne);
+ TestCompare<uint32>(2, 1, true, &Ne);
}
XLA_TEST_F(ScalarComputationsTest, CompareGeU32Greater) {
- TestCompare<uint32>(2, 1, true, &XlaBuilder::Ge);
+ TestCompare<uint32>(2, 1, true, &Ge);
}
XLA_TEST_F(ScalarComputationsTest, CompareGeU32Equal) {
- TestCompare<uint32>(3, 3, true, &XlaBuilder::Ge);
+ TestCompare<uint32>(3, 3, true, &Ge);
}
XLA_TEST_F(ScalarComputationsTest, CompareGtU32) {
- TestCompare<uint32>(1, 5, false, &XlaBuilder::Gt);
- TestCompare<uint32>(5, 5, false, &XlaBuilder::Gt);
- TestCompare<uint32>(5, 1, true, &XlaBuilder::Gt);
+ TestCompare<uint32>(1, 5, false, &Gt);
+ TestCompare<uint32>(5, 5, false, &Gt);
+ TestCompare<uint32>(5, 1, true, &Gt);
}
XLA_TEST_F(ScalarComputationsTest, CompareLeU32) {
- TestCompare<uint32>(2, 1, false, &XlaBuilder::Le);
+ TestCompare<uint32>(2, 1, false, &Le);
}
XLA_TEST_F(ScalarComputationsTest, CompareLtU32) {
- TestCompare<uint32>(9, 7, false, &XlaBuilder::Lt);
- TestCompare<uint32>(0, std::numeric_limits<uint32>::max(), true,
- &XlaBuilder::Lt);
+ TestCompare<uint32>(9, 7, false, &Lt);
+ TestCompare<uint32>(0, std::numeric_limits<uint32>::max(), true, &Lt);
}
// F32 comparisons.
XLA_TEST_F(ScalarComputationsTest, CompareEqF32False) {
- TestCompare<float>(2.0, 1.3, false, &XlaBuilder::Eq);
+ TestCompare<float>(2.0, 1.3, false, &Eq);
}
XLA_TEST_F(ScalarComputationsTest, CompareNeF32) {
- TestCompare<float>(2.0, 1.3, true, &XlaBuilder::Ne);
+ TestCompare<float>(2.0, 1.3, true, &Ne);
}
XLA_TEST_F(ScalarComputationsTest, CompareGeF32Greater) {
- TestCompare<float>(2.0, 1.9, true, &XlaBuilder::Ge);
+ TestCompare<float>(2.0, 1.9, true, &Ge);
}
XLA_TEST_F(ScalarComputationsTest, CompareGeF32Equal) {
- TestCompare<float>(3.5, 3.5, true, &XlaBuilder::Ge);
+ TestCompare<float>(3.5, 3.5, true, &Ge);
}
XLA_TEST_F(ScalarComputationsTest, CompareGtF32) {
- TestCompare<float>(1.0, 5.2, false, &XlaBuilder::Gt);
+ TestCompare<float>(1.0, 5.2, false, &Gt);
}
XLA_TEST_F(ScalarComputationsTest, CompareLeF32) {
- TestCompare<float>(2.0, 1.2, false, &XlaBuilder::Le);
+ TestCompare<float>(2.0, 1.2, false, &Le);
}
XLA_TEST_F(ScalarComputationsTest, CompareLtF32) {
- TestCompare<float>(9.0, 7.2, false, &XlaBuilder::Lt);
+ TestCompare<float>(9.0, 7.2, false, &Lt);
}
// F32 comparisons with exceptional values. The test names encode the
// left/right operands at the end, and use Minf and Mzero for -inf and -0.0.
XLA_TEST_F(ScalarComputationsTest, CompareLtF32MinfMzero) {
- TestCompare<float>(-INFINITY, -0.0, true, &XlaBuilder::Lt);
+ TestCompare<float>(-INFINITY, -0.0, true, &Lt);
}
XLA_TEST_F(ScalarComputationsTest, CompareLtF32MzeroZero) {
// Comparisons of 0.0 to -0.0 consider them equal in IEEE 754.
- TestCompare<float>(-0.0, 0.0, false, &XlaBuilder::Lt);
+ TestCompare<float>(-0.0, 0.0, false, &Lt);
}
XLA_TEST_F(ScalarComputationsTest, CompareLtF32ZeroInf) {
- TestCompare<float>(0.0, INFINITY, true, &XlaBuilder::Lt);
+ TestCompare<float>(0.0, INFINITY, true, &Lt);
}
XLA_TEST_F(ScalarComputationsTest, CompareGeF32MinfMzero) {
- TestCompare<float>(-INFINITY, -0.0, false, &XlaBuilder::Ge);
+ TestCompare<float>(-INFINITY, -0.0, false, &Ge);
}
XLA_TEST_F(ScalarComputationsTest, CompareGeF32MzeroZero) {
// Comparisons of 0.0 to -0.0 consider them equal in IEEE 754.
- TestCompare<float>(-0.0, 0.0, true, &XlaBuilder::Ge);
+ TestCompare<float>(-0.0, 0.0, true, &Ge);
}
XLA_TEST_F(ScalarComputationsTest, CompareGeF32ZeroInf) {
- TestCompare<float>(0.0, INFINITY, false, &XlaBuilder::Ge);
+ TestCompare<float>(0.0, INFINITY, false, &Ge);
}
XLA_TEST_F(ScalarComputationsTest, ExpScalar) {
XlaBuilder builder(TestName());
- builder.Exp(builder.ConstantR0<float>(2.0f));
+ Exp(ConstantR0<float>(&builder, 2.0f));
ComputeAndCompareR0<float>(&builder, 7.3890562, {}, error_spec_);
}
XLA_TEST_F(ScalarComputationsTest, LogScalar) {
XlaBuilder builder("log");
- builder.Log(builder.ConstantR0<float>(2.0f));
+ Log(ConstantR0<float>(&builder, 2.0f));
ComputeAndCompareR0<float>(&builder, 0.6931471, {}, error_spec_);
}
XLA_TEST_F(ScalarComputationsTest, TanhScalar) {
XlaBuilder builder(TestName());
- builder.Tanh(builder.ConstantR0<float>(2.0f));
+ Tanh(ConstantR0<float>(&builder, 2.0f));
ComputeAndCompareR0<float>(&builder, 0.96402758, {}, error_spec_);
}
XLA_TEST_F(ScalarComputationsTest, TanhDoubleScalar) {
XlaBuilder builder(TestName());
- builder.Tanh(builder.ConstantR0<double>(2.0));
+ Tanh(ConstantR0<double>(&builder, 2.0));
ComputeAndCompareR0<double>(&builder, 0.96402758, {}, error_spec_);
}
XLA_TEST_F(ScalarComputationsTest, PowScalar) {
XlaBuilder builder(TestName());
- builder.Pow(builder.ConstantR0<float>(2.0f), builder.ConstantR0<float>(3.0f));
+ Pow(ConstantR0<float>(&builder, 2.0f), ConstantR0<float>(&builder, 3.0f));
ComputeAndCompareR0<float>(&builder, 8.0, {}, error_spec_);
}
XLA_TEST_F(ScalarComputationsTest, ClampScalarHighS32) {
XlaBuilder builder(TestName());
- builder.Clamp(builder.ConstantR0<int32>(-1), // The lower bound.
- builder.ConstantR0<int32>(5), // The operand to be clamped.
- builder.ConstantR0<int32>(3)); // The upper bound.
+ Clamp(ConstantR0<int32>(&builder, -1), // The lower bound.
+ ConstantR0<int32>(&builder, 5), // The operand to be clamped.
+ ConstantR0<int32>(&builder, 3)); // The upper bound.
ComputeAndCompareR0<int32>(&builder, 3, {});
}
XLA_TEST_F(ScalarComputationsTest, ClampScalarMiddleS32) {
XlaBuilder builder(TestName());
- builder.Clamp(builder.ConstantR0<int32>(-1), // The lower bound.
- builder.ConstantR0<int32>(2), // The operand to be clamped.
- builder.ConstantR0<int32>(3)); // The upper bound.
+ Clamp(ConstantR0<int32>(&builder, -1), // The lower bound.
+ ConstantR0<int32>(&builder, 2), // The operand to be clamped.
+ ConstantR0<int32>(&builder, 3)); // The upper bound.
ComputeAndCompareR0<int32>(&builder, 2, {});
}
XLA_TEST_F(ScalarComputationsTest, ClampScalarLowS32) {
XlaBuilder builder(TestName());
- builder.Clamp(builder.ConstantR0<int32>(-1), // The lower bound.
- builder.ConstantR0<int32>(-5), // The operand to be clamped.
- builder.ConstantR0<int32>(3)); // The upper bound.
+ Clamp(ConstantR0<int32>(&builder, -1), // The lower bound.
+ ConstantR0<int32>(&builder, -5), // The operand to be clamped.
+ ConstantR0<int32>(&builder, 3)); // The upper bound.
ComputeAndCompareR0<int32>(&builder, -1, {});
}
XLA_TEST_F(ScalarComputationsTest, ClampScalarHighU32) {
XlaBuilder builder(TestName());
- builder.Clamp(builder.ConstantR0<uint32>(1), // The lower bound.
- builder.ConstantR0<uint32>(5), // The operand to be clamped.
- builder.ConstantR0<uint32>(3)); // The upper bound.
+ Clamp(ConstantR0<uint32>(&builder, 1), // The lower bound.
+ ConstantR0<uint32>(&builder, 5), // The operand to be clamped.
+ ConstantR0<uint32>(&builder, 3)); // The upper bound.
ComputeAndCompareR0<uint32>(&builder, 3, {});
}
XLA_TEST_F(ScalarComputationsTest, ClampScalarMiddleU32) {
XlaBuilder builder(TestName());
- builder.Clamp(builder.ConstantR0<uint32>(1), // The lower bound.
- builder.ConstantR0<uint32>(2), // The operand to be clamped.
- builder.ConstantR0<uint32>(3)); // The upper bound.
+ Clamp(ConstantR0<uint32>(&builder, 1), // The lower bound.
+ ConstantR0<uint32>(&builder, 2), // The operand to be clamped.
+ ConstantR0<uint32>(&builder, 3)); // The upper bound.
ComputeAndCompareR0<uint32>(&builder, 2, {});
}
XLA_TEST_F(ScalarComputationsTest, ClampScalarLowU32) {
XlaBuilder builder(TestName());
- builder.Clamp(builder.ConstantR0<uint32>(1), // The lower bound.
- builder.ConstantR0<uint32>(0), // The operand to be clamped.
- builder.ConstantR0<uint32>(3)); // The upper bound.
+ Clamp(ConstantR0<uint32>(&builder, 1), // The lower bound.
+ ConstantR0<uint32>(&builder, 0), // The operand to be clamped.
+ ConstantR0<uint32>(&builder, 3)); // The upper bound.
ComputeAndCompareR0<uint32>(&builder, 1, {});
}
XLA_TEST_F(ScalarComputationsTest, ClampScalarHighF32) {
XlaBuilder builder(TestName());
- builder.Clamp(builder.ConstantR0<float>(2.0f), // The lower bound.
- builder.ConstantR0<float>(5.0f), // The operand to be clamped.
- builder.ConstantR0<float>(3.0f)); // The upper bound.
+ Clamp(ConstantR0<float>(&builder, 2.0f), // The lower bound.
+ ConstantR0<float>(&builder, 5.0f), // The operand to be clamped.
+ ConstantR0<float>(&builder, 3.0f)); // The upper bound.
ComputeAndCompareR0<float>(&builder, 3.0, {}, error_spec_);
}
XLA_TEST_F(ScalarComputationsTest, ClampScalarMiddleF32) {
XlaBuilder builder(TestName());
- builder.Clamp(builder.ConstantR0<float>(2.0f), // The lower bound.
- builder.ConstantR0<float>(2.5f), // The operand to be clamped.
- builder.ConstantR0<float>(3.0f)); // The upper bound.
+ Clamp(ConstantR0<float>(&builder, 2.0f), // The lower bound.
+ ConstantR0<float>(&builder, 2.5f), // The operand to be clamped.
+ ConstantR0<float>(&builder, 3.0f)); // The upper bound.
ComputeAndCompareR0<float>(&builder, 2.5, {}, error_spec_);
}
XLA_TEST_F(ScalarComputationsTest, ClampScalarLowF32) {
XlaBuilder builder(TestName());
- builder.Clamp(builder.ConstantR0<float>(2.0f), // The lower bound.
- builder.ConstantR0<float>(-5.0f), // The operand to be clamped.
- builder.ConstantR0<float>(3.0f)); // The upper bound.
+ Clamp(ConstantR0<float>(&builder, 2.0f), // The lower bound.
+ ConstantR0<float>(&builder, -5.0f), // The operand to be clamped.
+ ConstantR0<float>(&builder, 3.0f)); // The upper bound.
ComputeAndCompareR0<float>(&builder, 2.0, {}, error_spec_);
}
XLA_TEST_F(ScalarComputationsTest, MinS32Above) {
- TestMinMax<int32>(10, 3, 3, &XlaBuilder::Min);
+ TestMinMax<int32>(10, 3, 3, &Min);
}
XLA_TEST_F(ScalarComputationsTest, MinS32Below) {
- TestMinMax<int32>(-100, 3, -100, &XlaBuilder::Min);
+ TestMinMax<int32>(-100, 3, -100, &Min);
}
XLA_TEST_F(ScalarComputationsTest, MaxS32Above) {
- TestMinMax<int32>(10, 3, 10, &XlaBuilder::Max);
+ TestMinMax<int32>(10, 3, 10, &Max);
}
XLA_TEST_F(ScalarComputationsTest, MaxS32Below) {
- TestMinMax<int32>(-100, 3, 3, &XlaBuilder::Max);
+ TestMinMax<int32>(-100, 3, 3, &Max);
}
XLA_TEST_F(ScalarComputationsTest, MinU32Above) {
const uint32 large = std::numeric_limits<int32>::max();
- TestMinMax<uint32>(large, 3, 3, &XlaBuilder::Min);
+ TestMinMax<uint32>(large, 3, 3, &Min);
}
XLA_TEST_F(ScalarComputationsTest, MinU32Below) {
- TestMinMax<uint32>(0, 5, 0, &XlaBuilder::Min);
+ TestMinMax<uint32>(0, 5, 0, &Min);
}
XLA_TEST_F(ScalarComputationsTest, MaxU32Above) {
const uint32 large = std::numeric_limits<int32>::max();
- TestMinMax<uint32>(large, 3, large, &XlaBuilder::Max);
+ TestMinMax<uint32>(large, 3, large, &Max);
}
XLA_TEST_F(ScalarComputationsTest, MaxU32Below) {
- TestMinMax<uint32>(0, 5, 5, &XlaBuilder::Max);
+ TestMinMax<uint32>(0, 5, 5, &Max);
}
XLA_TEST_F(ScalarComputationsTest, MinF32Above) {
- TestMinMax<float>(10.1f, 3.1f, 3.1f, &XlaBuilder::Min);
+ TestMinMax<float>(10.1f, 3.1f, 3.1f, &Min);
}
XLA_TEST_F(ScalarComputationsTest, MinF32Below) {
- TestMinMax<float>(-100.1f, 3.1f, -100.1f, &XlaBuilder::Min);
+ TestMinMax<float>(-100.1f, 3.1f, -100.1f, &Min);
}
XLA_TEST_F(ScalarComputationsTest, MinPropagatesNan) {
SetFastMathDisabled(true);
- TestMinMax<float>(NAN, 3.1f, NAN, &XlaBuilder::Min);
- TestMinMax<float>(-3.1f, NAN, NAN, &XlaBuilder::Min);
+ TestMinMax<float>(NAN, 3.1f, NAN, &Min);
+ TestMinMax<float>(-3.1f, NAN, NAN, &Min);
}
XLA_TEST_F(ScalarComputationsTest, MaxF32Above) {
- TestMinMax<float>(10.1f, 3.1f, 10.1f, &XlaBuilder::Max);
+ TestMinMax<float>(10.1f, 3.1f, 10.1f, &Max);
}
XLA_TEST_F(ScalarComputationsTest, MaxF32Below) {
- TestMinMax<float>(-100.1f, 3.1f, 3.1f, &XlaBuilder::Max);
+ TestMinMax<float>(-100.1f, 3.1f, 3.1f, &Max);
}
XLA_TEST_F(ScalarComputationsTest, MaxPropagatesNan) {
SetFastMathDisabled(true);
- TestMinMax<float>(NAN, 3.1f, NAN, &XlaBuilder::Max);
- TestMinMax<float>(-3.1f, NAN, NAN, &XlaBuilder::Max);
+ TestMinMax<float>(NAN, 3.1f, NAN, &Max);
+ TestMinMax<float>(-3.1f, NAN, NAN, &Max);
}
XLA_TEST_F(ScalarComputationsTest, ComplicatedArithmeticExpressionF32) {
// Compute the expression (1 * (3 - 1) * (7 + 0) - 4) / 20.
XlaBuilder b(TestName());
- b.Div(
- b.Sub(b.Mul(b.ConstantR0<float>(1),
- b.Mul(b.Sub(b.ConstantR0<float>(3), b.ConstantR0<float>(1)),
- b.Add(b.ConstantR0<float>(7), b.ConstantR0<float>(0)))),
- b.ConstantR0<float>(4)),
- b.ConstantR0<float>(20));
+ Div(Sub(Mul(ConstantR0<float>(&b, 1),
+ Mul(Sub(ConstantR0<float>(&b, 3), ConstantR0<float>(&b, 1)),
+ Add(ConstantR0<float>(&b, 7), ConstantR0<float>(&b, 0)))),
+ ConstantR0<float>(&b, 4)),
+ ConstantR0<float>(&b, 20));
ComputeAndCompareR0<float>(&b, 0.5, {}, error_spec_);
}
@@ -893,30 +892,18 @@ XLA_TEST_F(ScalarComputationsTest, ComplicatedArithmeticExpressionF32) {
XLA_TEST_F(ScalarComputationsTest, ComplicatedArithmeticExpressionS32) {
// Compute the expression 1 * (3 - 1) * (7 + 0) - 4.
XlaBuilder b(TestName());
- b.Sub(b.Mul(b.ConstantR0<int32>(1),
- b.Mul(b.Sub(b.ConstantR0<int32>(3), b.ConstantR0<int32>(1)),
- b.Add(b.ConstantR0<int32>(7), b.ConstantR0<int32>(0)))),
- b.ConstantR0<int32>(4));
+ Sub(Mul(ConstantR0<int32>(&b, 1),
+ Mul(Sub(ConstantR0<int32>(&b, 3), ConstantR0<int32>(&b, 1)),
+ Add(ConstantR0<int32>(&b, 7), ConstantR0<int32>(&b, 0)))),
+ ConstantR0<int32>(&b, 4));
ComputeAndCompareR0<int32>(&b, 10, {});
}
-XLA_TEST_F(ScalarComputationsTest, SqrtF320) {
- XlaBuilder builder(TestName());
- Literal zero_literal = Literal::Zero(PrimitiveType::F32);
-
- std::unique_ptr<GlobalData> zero_data =
- client_->TransferToServer(zero_literal).ConsumeValueOrDie();
-
- XlaOp zero = builder.Parameter(0, zero_literal.shape(), "zero");
- builder.SqrtF32(zero);
-
- ComputeAndCompareR0<float>(&builder, 0.0f, {zero_data.get()}, error_spec_);
-}
XLA_TEST_F(ScalarComputationsTest, RoundScalar) {
XlaBuilder builder(TestName());
- builder.Round(builder.ConstantR0<float>(1.4f));
+ Round(ConstantR0<float>(&builder, 1.4f));
ComputeAndCompareR0<float>(&builder, 1.0f, {}, error_spec_);
}
diff --git a/tensorflow/compiler/xla/tests/select_and_scatter_test.cc b/tensorflow/compiler/xla/tests/select_and_scatter_test.cc
index 7015e5a6a3..b1f1e69d3c 100644
--- a/tensorflow/compiler/xla/tests/select_and_scatter_test.cc
+++ b/tensorflow/compiler/xla/tests/select_and_scatter_test.cc
@@ -25,7 +25,7 @@ limitations under the License.
#include "tensorflow/compiler/xla/client/xla_client/xla_builder.h"
#include "tensorflow/compiler/xla/client/xla_client/xla_computation.h"
#include "tensorflow/compiler/xla/layout_util.h"
-#include "tensorflow/compiler/xla/literal_util.h"
+#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/compiler/xla/reference_util.h"
#include "tensorflow/compiler/xla/status_macros.h"
#include "tensorflow/compiler/xla/tests/client_library_test_base.h"
@@ -73,16 +73,16 @@ XLA_TEST_P(SelectAndScatterTest, ParamTest) {
auto operand_shape = GetParam().operand_shape;
Array<float> o(operand_shape);
o.FillRandom(1.5f);
- auto operand = builder_.ConstantFromArray(o);
+ auto operand = ConstantFromArray(&builder_, o);
auto source_shape = GetParam().source_shape;
Array<float> s(source_shape);
s.FillRandom(12.0f);
- auto source = builder_.ConstantFromArray(s);
+ auto source = ConstantFromArray(&builder_, s);
- builder_.SelectAndScatter(operand, ge_f32_, GetParam().window_dimensions,
- GetParam().window_strides, GetParam().padding_type,
- source, builder_.ConstantR0<float>(0.0f), add_f32_);
+ SelectAndScatter(operand, ge_f32_, GetParam().window_dimensions,
+ GetParam().window_strides, GetParam().padding_type, source,
+ ConstantR0<float>(&builder_, 0.0f), add_f32_);
ComputeAndCompare(&builder_, {}, ErrorSpec(1e-5));
}
@@ -197,110 +197,110 @@ INSTANTIATE_TEST_CASE_P(
// Test for F32 1D array, with a zero-element input.
XLA_TEST_F(SelectAndScatterTest, R1S0F32) {
- const auto operand = builder_.ConstantR1<float>({});
- const auto source = builder_.ConstantR1<float>({});
- builder_.SelectAndScatter(operand, ge_f32_, /*window_dimensions=*/{3},
- /*window_strides=*/{3}, Padding::kValid, source,
- builder_.ConstantR0<float>(0.0f), add_f32_);
+ const auto operand = ConstantR1<float>(&builder_, {});
+ const auto source = ConstantR1<float>(&builder_, {});
+ SelectAndScatter(operand, ge_f32_, /*window_dimensions=*/{3},
+ /*window_strides=*/{3}, Padding::kValid, source,
+ ConstantR0<float>(&builder_, 0.0f), add_f32_);
ComputeAndCompareR1<float>(&builder_, {}, {}, ErrorSpec(1e-7));
}
// Test for F32 1D array, when windows do not overlap.
XLA_TEST_F(SelectAndScatterTest, R1F32) {
const auto operand =
- builder_.ConstantR1<float>({1.f, 9.f, 3.f, 7.f, 5.f, 6.f});
- const auto source = builder_.ConstantR1<float>({34.f, 42.f});
+ ConstantR1<float>(&builder_, {1.f, 9.f, 3.f, 7.f, 5.f, 6.f});
+ const auto source = ConstantR1<float>(&builder_, {34.f, 42.f});
const std::vector<float> expected = {0.f, 34.f, 0.f, 42.f, 0.f, 0.f};
- builder_.SelectAndScatter(operand, ge_f32_, /*window_dimensions=*/{3},
- /*window_strides=*/{3}, Padding::kValid, source,
- builder_.ConstantR0<float>(0.0f), add_f32_);
+ SelectAndScatter(operand, ge_f32_, /*window_dimensions=*/{3},
+ /*window_strides=*/{3}, Padding::kValid, source,
+ ConstantR0<float>(&builder_, 0.0f), add_f32_);
ComputeAndCompareR1<float>(&builder_, expected, {}, ErrorSpec(1e-7));
}
// Test for S32 1D array, when windows do not overlap and the init value is 1.
XLA_TEST_F(SelectAndScatterTest, R1S32) {
- const auto operand = builder_.ConstantR1<int32>({-1, 0, 6, 4, -4, 10});
- const auto source = builder_.ConstantR1<int32>({-10, 20});
+ const auto operand = ConstantR1<int32>(&builder_, {-1, 0, 6, 4, -4, 10});
+ const auto source = ConstantR1<int32>(&builder_, {-10, 20});
const std::vector<int32> expected = {1, 1, -9, 1, 1, 21};
- builder_.SelectAndScatter(operand, ge_s32_, /*window_dimensions=*/{3},
- /*window_strides=*/{3}, Padding::kValid, source,
- builder_.ConstantR0<int32>(1), add_s32_);
+ SelectAndScatter(operand, ge_s32_, /*window_dimensions=*/{3},
+ /*window_strides=*/{3}, Padding::kValid, source,
+ ConstantR0<int32>(&builder_, 1), add_s32_);
ComputeAndCompareR1<int32>(&builder_, expected, {});
}
// Test for S32 1D array, when windows overlap with each other.
XLA_TEST_F(SelectAndScatterTest, R1S32OverlappingWindow) {
- const auto operand = builder_.ConstantR1<int32>({1, 9, 3, 7, 5, 6});
- const auto source = builder_.ConstantR1<int32>({34, 42, 53, 19});
+ const auto operand = ConstantR1<int32>(&builder_, {1, 9, 3, 7, 5, 6});
+ const auto source = ConstantR1<int32>(&builder_, {34, 42, 53, 19});
const std::vector<int32> expected = {0, 76, 0, 72, 0, 0};
- builder_.SelectAndScatter(operand, ge_s32_, /*window_dimensions=*/{3},
- /*window_strides=*/{1}, Padding::kValid, source,
- builder_.ConstantR0<int32>(0), add_s32_);
+ SelectAndScatter(operand, ge_s32_, /*window_dimensions=*/{3},
+ /*window_strides=*/{1}, Padding::kValid, source,
+ ConstantR0<int32>(&builder_, 0), add_s32_);
ComputeAndCompareR1<int32>(&builder_, expected, {});
}
// Test for S32 2D array, when windows do not overlap.
XLA_TEST_F(SelectAndScatterTest, R2S32) {
const auto operand =
- builder_.ConstantR2<int32>({{7, 2, 5, 3, 10, 2}, {3, 8, 9, 3, 4, 2}});
- const auto source = builder_.ConstantR2<int32>({{2, 6}});
+ ConstantR2<int32>(&builder_, {{7, 2, 5, 3, 10, 2}, {3, 8, 9, 3, 4, 2}});
+ const auto source = ConstantR2<int32>(&builder_, {{2, 6}});
Array2D<int32> expected({{0, 0, 0, 0, 6, 0}, {0, 0, 2, 0, 0, 0}});
- builder_.SelectAndScatter(operand, ge_s32_, /*window_dimensions=*/{2, 3},
- /*window_strides=*/{2, 3}, Padding::kValid, source,
- builder_.ConstantR0<int32>(0), add_s32_);
+ SelectAndScatter(operand, ge_s32_, /*window_dimensions=*/{2, 3},
+ /*window_strides=*/{2, 3}, Padding::kValid, source,
+ ConstantR0<int32>(&builder_, 0), add_s32_);
ComputeAndCompareR2<int32>(&builder_, expected, {});
}
// Test for tie breaking rule in ge_f32_. When a tie is present, the operand
// that has the lower lexicographical order (smaller index) should be chosen.
XLA_TEST_F(SelectAndScatterTest, R2F32Tie) {
- const auto operand = builder_.ConstantR2<float>(
- {{0.f, 0.f, 0.f}, {0.f, 0.f, 0.f}, {0.f, 0.f, 0.f}});
- const auto source = builder_.ConstantR2<float>(
- {{1.0f, 2.0f, 3.0f}, {4.f, 5.0f, 6.0f}, {7.0f, 8.0f, 9.0f}});
+ const auto operand = ConstantR2<float>(
+ &builder_, {{0.f, 0.f, 0.f}, {0.f, 0.f, 0.f}, {0.f, 0.f, 0.f}});
+ const auto source = ConstantR2<float>(
+ &builder_, {{1.0f, 2.0f, 3.0f}, {4.f, 5.0f, 6.0f}, {7.0f, 8.0f, 9.0f}});
Array2D<float> expected(
{{12.f, 9.f, 0.f}, {15.f, 9.f, 0.f}, {0.f, 0.f, 0.f}});
- builder_.SelectAndScatter(operand, ge_f32_, /*window_dimensions=*/{3, 3},
- /*window_strides=*/{1, 1}, Padding::kSame, source,
- builder_.ConstantR0<float>(0.0f), add_f32_);
+ SelectAndScatter(operand, ge_f32_, /*window_dimensions=*/{3, 3},
+ /*window_strides=*/{1, 1}, Padding::kSame, source,
+ ConstantR0<float>(&builder_, 0.0f), add_f32_);
ComputeAndCompareR2<float>(&builder_, expected, {}, ErrorSpec(1e-7));
}
// Similar to SelectAndScatterTest.R2S32 but the input is transposed.
XLA_TEST_F(SelectAndScatterTest, ReshapeR2S32) {
- const auto operand = builder_.ConstantR2<int32>(
- {{7, 3}, {2, 8}, {5, 9}, {3, 3}, {10, 4}, {2, 2}});
+ const auto operand = ConstantR2<int32>(
+ &builder_, {{7, 3}, {2, 8}, {5, 9}, {3, 3}, {10, 4}, {2, 2}});
const auto reshape =
- builder_.Reshape(operand, /*dimensions=*/{1, 0}, /*new_sizes=*/{2, 6});
- const auto source = builder_.ConstantR2<int32>({{2, 6}});
+ Reshape(operand, /*dimensions=*/{1, 0}, /*new_sizes=*/{2, 6});
+ const auto source = ConstantR2<int32>(&builder_, {{2, 6}});
Array2D<int32> expected({{0, 0, 0, 0, 6, 0}, {0, 0, 2, 0, 0, 0}});
- builder_.SelectAndScatter(reshape, ge_s32_, /*window_dimensions=*/{2, 3},
- /*window_strides=*/{2, 3}, Padding::kValid, source,
- builder_.ConstantR0<int32>(0), add_s32_);
+ SelectAndScatter(reshape, ge_s32_, /*window_dimensions=*/{2, 3},
+ /*window_strides=*/{2, 3}, Padding::kValid, source,
+ ConstantR0<int32>(&builder_, 0), add_s32_);
ComputeAndCompareR2<int32>(&builder_, expected, {});
}
// Test for S32 2D array, when windows overlap with each other.
XLA_TEST_F(SelectAndScatterTest, R2S32OverlappingWindow) {
const auto operand =
- builder_.ConstantR2<int32>({{7, 2, 5, 3, 8}, {3, 8, 9, 3, 4}});
- const auto source = builder_.ConstantR2<int32>({{2, 6, 4}});
+ ConstantR2<int32>(&builder_, {{7, 2, 5, 3, 8}, {3, 8, 9, 3, 4}});
+ const auto source = ConstantR2<int32>(&builder_, {{2, 6, 4}});
Array2D<int32> expected({{0, 0, 0, 0, 0}, {0, 0, 12, 0, 0}});
- builder_.SelectAndScatter(operand, ge_s32_, /*window_dimensions=*/{2, 3},
- /*window_strides=*/{1, 1}, Padding::kValid, source,
- builder_.ConstantR0<int32>(0), add_s32_);
+ SelectAndScatter(operand, ge_s32_, /*window_dimensions=*/{2, 3},
+ /*window_strides=*/{1, 1}, Padding::kValid, source,
+ ConstantR0<int32>(&builder_, 0), add_s32_);
ComputeAndCompareR2<int32>(&builder_, expected, {});
}
// Test for S32 2D array, when the padding is Padding::kSAME.
XLA_TEST_F(SelectAndScatterTest, R2S32SamePadding) {
const auto operand =
- builder_.ConstantR2<int32>({{7, 2, 5, 3, 8}, {3, 8, 9, 3, 4}});
- const auto source = builder_.ConstantR2<int32>({{2, 6, 4}});
+ ConstantR2<int32>(&builder_, {{7, 2, 5, 3, 8}, {3, 8, 9, 3, 4}});
+ const auto source = ConstantR2<int32>(&builder_, {{2, 6, 4}});
Array2D<int32> expected({{0, 0, 0, 0, 4}, {0, 2, 6, 0, 0}});
- builder_.SelectAndScatter(operand, ge_s32_, /*window_dimensions=*/{2, 2},
- /*window_strides=*/{2, 2}, Padding::kSame, source,
- builder_.ConstantR0<int32>(0), add_s32_);
+ SelectAndScatter(operand, ge_s32_, /*window_dimensions=*/{2, 2},
+ /*window_strides=*/{2, 2}, Padding::kSame, source,
+ ConstantR0<int32>(&builder_, 0), add_s32_);
ComputeAndCompareR2<int32>(&builder_, expected, {});
}
@@ -308,25 +308,26 @@ XLA_TEST_F(SelectAndScatterTest, R2S32SamePadding) {
// with each other.
XLA_TEST_F(SelectAndScatterTest, R2S32SamePaddingOverlappingWindow) {
const auto operand =
- builder_.ConstantR2<int32>({{7, 2, 5, 3, 8}, {3, 8, 9, 3, 4}});
+ ConstantR2<int32>(&builder_, {{7, 2, 5, 3, 8}, {3, 8, 9, 3, 4}});
const auto source =
- builder_.ConstantR2<int32>({{2, 6, 4, 7, 1}, {3, 5, 8, 9, 10}});
+ ConstantR2<int32>(&builder_, {{2, 6, 4, 7, 1}, {3, 5, 8, 9, 10}});
Array2D<int32> expected({{0, 0, 0, 0, 8}, {0, 5, 23, 0, 19}});
- builder_.SelectAndScatter(operand, ge_s32_, /*window_dimensions=*/{2, 2},
- /*window_strides=*/{1, 1}, Padding::kSame, source,
- builder_.ConstantR0<int32>(0), add_s32_);
+ SelectAndScatter(operand, ge_s32_, /*window_dimensions=*/{2, 2},
+ /*window_strides=*/{1, 1}, Padding::kSame, source,
+ ConstantR0<int32>(&builder_, 0), add_s32_);
ComputeAndCompareR2<int32>(&builder_, expected, {});
}
XLA_TEST_F(SelectAndScatterTest, R2F32OverlappingR2Source) {
- const auto operand = builder_.ConstantR2<float>(
- {{1.5f, 2.5f, 1.5f}, {3.5f, 1.5f, 3.5f}, {4.5f, 2.5f, 4.5f}});
- const auto source = builder_.ConstantR2<float>({{1.0f, 2.0f}, {3.0f, 4.0f}});
+ const auto operand = ConstantR2<float>(
+ &builder_, {{1.5f, 2.5f, 1.5f}, {3.5f, 1.5f, 3.5f}, {4.5f, 2.5f, 4.5f}});
+ const auto source =
+ ConstantR2<float>(&builder_, {{1.0f, 2.0f}, {3.0f, 4.0f}});
Array2D<float> expected(
{{0.0f, 0.0f, 0.0f}, {1.0f, 0.0f, 2.0f}, {3.0f, 0.0f, 4.0f}});
- builder_.SelectAndScatter(operand, ge_f32_, /*window_dimensions=*/{2, 2},
- /*window_strides=*/{1, 1}, Padding::kValid, source,
- builder_.ConstantR0<float>(0.0f), add_f32_);
+ SelectAndScatter(operand, ge_f32_, /*window_dimensions=*/{2, 2},
+ /*window_strides=*/{1, 1}, Padding::kValid, source,
+ ConstantR0<float>(&builder_, 0.0f), add_f32_);
ComputeAndCompareR2<float>(&builder_, expected, {}, ErrorSpec(1e-7));
}
@@ -342,16 +343,16 @@ TEST_F(SelectAndScatterTest, R4F32Valid) {
{0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 1.0f}};
Array4D<float> o(4, 6, 15, 220);
o.FillWithPZ(pzo);
- auto operand = builder_.ConstantR4FromArray4D(o);
+ auto operand = ConstantR4FromArray4D(&builder_, o);
Array4D<float> e(4, 6, 15, 220);
e.FillWithPZ(pze);
Array4D<float> s(2, 2, 15, 220);
s.FillWithPZ(pzs);
- auto source = builder_.ConstantR4FromArray4D(s);
+ auto source = ConstantR4FromArray4D(&builder_, s);
s.FillWithPZ(pzs);
- builder_.SelectAndScatter(operand, ge_f32_, {2, 3, 1, 1}, {2, 3, 1, 1},
- Padding::kValid, source,
- builder_.ConstantR0<float>(0.0f), add_f32_);
+ SelectAndScatter(operand, ge_f32_, {2, 3, 1, 1}, {2, 3, 1, 1},
+ Padding::kValid, source, ConstantR0<float>(&builder_, 0.0f),
+ add_f32_);
ComputeAndCompareR4<float>(&builder_, e, {}, ErrorSpec(1e-7));
}
@@ -367,16 +368,16 @@ TEST_F(SelectAndScatterTest, R4F32Overlap) {
{0.0f, 0.0f, 0.0f, 1.0f, 0.0f}};
Array4D<float> o(4, 5, 17, 128);
o.FillWithPZ(pzo);
- auto operand = builder_.ConstantR4FromArray4D(o);
+ auto operand = ConstantR4FromArray4D(&builder_, o);
Array4D<float> e(4, 5, 17, 128);
e.FillWithPZ(pze);
Array4D<float> s(2, 2, 17, 128);
s.FillWithPZ(pzs);
- auto source = builder_.ConstantR4FromArray4D(s);
+ auto source = ConstantR4FromArray4D(&builder_, s);
s.FillWithPZ(pzs);
- builder_.SelectAndScatter(operand, ge_f32_, {2, 3, 1, 1}, {2, 2, 1, 1},
- Padding::kValid, source,
- builder_.ConstantR0<float>(0.0f), add_f32_);
+ SelectAndScatter(operand, ge_f32_, {2, 3, 1, 1}, {2, 2, 1, 1},
+ Padding::kValid, source, ConstantR0<float>(&builder_, 0.0f),
+ add_f32_);
ComputeAndCompareR4<float>(&builder_, e, {}, ErrorSpec(1e-7));
}
@@ -392,16 +393,16 @@ TEST_F(SelectAndScatterTest, R4F32OverlapSmall) {
{0.0f, 0.0f, 0.0f, 1.0f, 0.0f}};
Array4D<float> o(4, 5, 1, 1);
o.FillWithPZ(pzo);
- auto operand = builder_.ConstantR4FromArray4D(o);
+ auto operand = ConstantR4FromArray4D(&builder_, o);
Array4D<float> e(4, 5, 1, 1);
e.FillWithPZ(pze);
Array4D<float> s(2, 2, 1, 1);
s.FillWithPZ(pzs);
- auto source = builder_.ConstantR4FromArray4D(s);
+ auto source = ConstantR4FromArray4D(&builder_, s);
s.FillWithPZ(pzs);
- builder_.SelectAndScatter(operand, ge_f32_, {2, 3, 1, 1}, {2, 2, 1, 1},
- Padding::kValid, source,
- builder_.ConstantR0<float>(0.0f), add_f32_);
+ SelectAndScatter(operand, ge_f32_, {2, 3, 1, 1}, {2, 2, 1, 1},
+ Padding::kValid, source, ConstantR0<float>(&builder_, 0.0f),
+ add_f32_);
ComputeAndCompareR4<float>(&builder_, e, {}, ErrorSpec(1e-7));
}
@@ -414,39 +415,39 @@ TEST_F(SelectAndScatterTest, R4F32RefValidFixedSmall) {
Array2D<float> pzs = {{2.0f, 6.0f}, {3.0f, 1.0f}};
Array4D<float> o(4, 6, 4, 4);
o.FillWithPZ(pzo);
- auto operand = builder_.ConstantR4FromArray4D(o);
+ auto operand = ConstantR4FromArray4D(&builder_, o);
Array4D<float> s(2, 2, 4, 4);
s.FillWithPZ(pzs);
- auto source = builder_.ConstantR4FromArray4D(s);
+ auto source = ConstantR4FromArray4D(&builder_, s);
s.FillWithPZ(pzs);
- builder_.SelectAndScatter(operand, ge_f32_, {2, 3, 1, 1}, {2, 3, 1, 1},
- Padding::kValid, source,
- builder_.ConstantR0<float>(0.0f), add_f32_);
+ SelectAndScatter(operand, ge_f32_, {2, 3, 1, 1}, {2, 3, 1, 1},
+ Padding::kValid, source, ConstantR0<float>(&builder_, 0.0f),
+ add_f32_);
auto e = ReferenceUtil::SelectAndScatter4DGePlus(o, s, 0.0f, {2, 3, 1, 1},
{2, 3, 1, 1}, false);
ComputeAndCompareR4<float>(&builder_, *e, {}, ErrorSpec(1e-7));
}
XLA_TEST_F(SelectAndScatterTest, R1F32OverlappingWindowMaxScatter) {
- const auto operand = builder_.ConstantR1<float>({1, 2, 3, 100, 3, 2, 1});
- const auto source = builder_.ConstantR1<float>({34, 42, 53, 19});
+ const auto operand = ConstantR1<float>(&builder_, {1, 2, 3, 100, 3, 2, 1});
+ const auto source = ConstantR1<float>(&builder_, {34, 42, 53, 19});
const std::vector<float> expected = {0, 0, 0, 53, 0, 0, 0};
- builder_.SelectAndScatter(operand, ge_f32_, /*window_dimensions=*/{4},
- /*window_strides=*/{1}, Padding::kValid, source,
- builder_.ConstantR0<float>(0), max_f32_);
+ SelectAndScatter(operand, ge_f32_, /*window_dimensions=*/{4},
+ /*window_strides=*/{1}, Padding::kValid, source,
+ ConstantR0<float>(&builder_, 0), max_f32_);
ComputeAndCompareR1<float>(&builder_, expected, {}, ErrorSpec(1e-7));
}
XLA_TEST_F(SelectAndScatterTest, R1F32OverlappingWindowMinScatter) {
- const auto operand = builder_.ConstantR1<float>({1, 2, 3, 100, 3, 2, 1});
- const auto source = builder_.ConstantR1<float>({34, 42, 53, 19});
+ const auto operand = ConstantR1<float>(&builder_, {1, 2, 3, 100, 3, 2, 1});
+ const auto source = ConstantR1<float>(&builder_, {34, 42, 53, 19});
const float max_float = std::numeric_limits<float>::max();
const std::vector<float> expected = {max_float, max_float, max_float, 19,
max_float, max_float, max_float};
- builder_.SelectAndScatter(operand, ge_f32_, /*window_dimensions=*/{4},
- /*window_strides=*/{1}, Padding::kValid, source,
- builder_.ConstantR0<float>(max_float), min_f32_);
+ SelectAndScatter(operand, ge_f32_, /*window_dimensions=*/{4},
+ /*window_strides=*/{1}, Padding::kValid, source,
+ ConstantR0<float>(&builder_, max_float), min_f32_);
ComputeAndCompareR1<float>(&builder_, expected, {}, ErrorSpec(1e-7));
}
diff --git a/tensorflow/compiler/xla/tests/select_test.cc b/tensorflow/compiler/xla/tests/select_test.cc
index 72707f2244..59409ab26e 100644
--- a/tensorflow/compiler/xla/tests/select_test.cc
+++ b/tensorflow/compiler/xla/tests/select_test.cc
@@ -35,50 +35,52 @@ class SelectTest : public ClientLibraryTestBase {
TEST_F(SelectTest, SelectScalarF32True) {
XlaBuilder builder(TestName());
- auto pred = builder.ConstantR0<bool>(true);
- auto on_true = builder.ConstantR0<float>(123.0f);
- auto on_false = builder.ConstantR0<float>(42.0f);
- auto result = builder.Select(pred, on_true, on_false);
+ auto pred = ConstantR0<bool>(&builder, true);
+ auto on_true = ConstantR0<float>(&builder, 123.0f);
+ auto on_false = ConstantR0<float>(&builder, 42.0f);
+ Select(pred, on_true, on_false);
ComputeAndCompareR0<float>(&builder, 123.0f, {}, error_spec_);
}
TEST_F(SelectTest, SelectScalarS32True) {
XlaBuilder builder(TestName());
- auto pred = builder.ConstantR0<bool>(true);
- auto on_true = builder.ConstantR0<int32>(-42);
- auto on_false = builder.ConstantR0<int32>(42);
- auto result = builder.Select(pred, on_true, on_false);
+ auto pred = ConstantR0<bool>(&builder, true);
+ auto on_true = ConstantR0<int32>(&builder, -42);
+ auto on_false = ConstantR0<int32>(&builder, 42);
+ Select(pred, on_true, on_false);
ComputeAndCompareR0<int32>(&builder, -42, {});
}
TEST_F(SelectTest, SelectScalarF32False) {
XlaBuilder builder(TestName());
- auto pred = builder.ConstantR0<bool>(false);
- auto on_true = builder.ConstantR0<float>(123.0f);
- auto on_false = builder.ConstantR0<float>(42.0f);
- auto result = builder.Select(pred, on_true, on_false);
+ auto pred = ConstantR0<bool>(&builder, false);
+ auto on_true = ConstantR0<float>(&builder, 123.0f);
+ auto on_false = ConstantR0<float>(&builder, 42.0f);
+ Select(pred, on_true, on_false);
ComputeAndCompareR0<float>(&builder, 42.0f, {}, error_spec_);
}
XLA_TEST_F(SelectTest, SelectR1S0F32WithConstantR1S0PRED) {
XlaBuilder builder(TestName());
- auto pred = builder.ConstantR1<bool>({});
- auto on_true = builder.ConstantR1<float>({});
- auto on_false = builder.ConstantR1<float>({});
- auto select = builder.Select(pred, on_true, on_false);
+ auto pred = ConstantR1<bool>(&builder, {});
+ auto on_true = ConstantR1<float>(&builder, {});
+ auto on_false = ConstantR1<float>(&builder, {});
+ Select(pred, on_true, on_false);
ComputeAndCompareR1<float>(&builder, {}, {}, error_spec_);
}
TEST_F(SelectTest, SelectR1F32WithConstantR1PRED) {
XlaBuilder builder(TestName());
- auto pred = builder.ConstantR1<bool>({false, true, false, true, false});
- auto on_true = builder.ConstantR1<float>({-2.5f, 25.5f, 2.25f, -10.0f, 6.0f});
- auto on_false = builder.ConstantR1<float>({10.0f, 5.0f, 1.0f, 10.0f, -6.0f});
- auto select = builder.Select(pred, on_true, on_false);
+ auto pred = ConstantR1<bool>(&builder, {false, true, false, true, false});
+ auto on_true =
+ ConstantR1<float>(&builder, {-2.5f, 25.5f, 2.25f, -10.0f, 6.0f});
+ auto on_false =
+ ConstantR1<float>(&builder, {10.0f, 5.0f, 1.0f, 10.0f, -6.0f});
+ Select(pred, on_true, on_false);
ComputeAndCompareR1<float>(&builder, {10.0f, 25.5f, 1.0f, -10.0f, -6.0f}, {},
error_spec_);
@@ -88,12 +90,12 @@ XLA_TEST_F(SelectTest, SelectR1S0F32WithCmpR1S0S32s) {
// Similar to SelectR1S0F32WithConstantR1S0PRED, except that the pred vector
// is not a constant, but rather the result of comparing two other vectors.
XlaBuilder builder(TestName());
- auto v1 = builder.ConstantR1<int32>({});
- auto v2 = builder.ConstantR1<int32>({});
- auto cmp = builder.Eq(v1, v2);
- auto on_true = builder.ConstantR1<float>({});
- auto on_false = builder.ConstantR1<float>({});
- auto select = builder.Select(cmp, on_true, on_false);
+ auto v1 = ConstantR1<int32>(&builder, {});
+ auto v2 = ConstantR1<int32>(&builder, {});
+ auto cmp = Eq(v1, v2);
+ auto on_true = ConstantR1<float>(&builder, {});
+ auto on_false = ConstantR1<float>(&builder, {});
+ Select(cmp, on_true, on_false);
ComputeAndCompareR1<float>(&builder, {}, {}, error_spec_);
}
@@ -102,12 +104,14 @@ TEST_F(SelectTest, SelectR1F32WithCmpR1S32s) {
// Similar to SelectR1F32WithConstantR1PRED, except that the pred vector is
// not a constant, but rather the result of comparing two other vectors.
XlaBuilder builder(TestName());
- auto v1 = builder.ConstantR1<int32>({1, 2, 3, 4, 5});
- auto v2 = builder.ConstantR1<int32>({9, 2, 9, 4, 9});
- auto cmp = builder.Eq(v1, v2);
- auto on_true = builder.ConstantR1<float>({-2.5f, 25.5f, 2.25f, -10.0f, 6.0f});
- auto on_false = builder.ConstantR1<float>({10.0f, 5.0f, 1.0f, 10.0f, -6.0f});
- auto select = builder.Select(cmp, on_true, on_false);
+ auto v1 = ConstantR1<int32>(&builder, {1, 2, 3, 4, 5});
+ auto v2 = ConstantR1<int32>(&builder, {9, 2, 9, 4, 9});
+ auto cmp = Eq(v1, v2);
+ auto on_true =
+ ConstantR1<float>(&builder, {-2.5f, 25.5f, 2.25f, -10.0f, 6.0f});
+ auto on_false =
+ ConstantR1<float>(&builder, {10.0f, 5.0f, 1.0f, 10.0f, -6.0f});
+ Select(cmp, on_true, on_false);
ComputeAndCompareR1<float>(&builder, {10.0f, 25.5f, 1.0f, -10.0f, -6.0f}, {},
error_spec_);
@@ -116,12 +120,14 @@ TEST_F(SelectTest, SelectR1F32WithCmpR1S32s) {
TEST_F(SelectTest, SelectR1F32WithCmpR1F32s) {
// Similar to SelectR1F32WithCmpR1S32s, except "gt"-comparing two R1F32s.
XlaBuilder builder(TestName());
- auto v1 = builder.ConstantR1<float>({1.0f, 2.0f, 3.0f, 4.0f, 5.0f});
- auto v2 = builder.ConstantR1<float>({-1.0f, -2.0f, 13.0f, 14.0f, 4.4f});
- auto cmp = builder.Gt(v1, v2);
- auto on_true = builder.ConstantR1<float>({-2.5f, 25.5f, 2.25f, -10.0f, 6.0f});
- auto on_false = builder.ConstantR1<float>({10.0f, 5.0f, 1.0f, 10.0f, -6.0f});
- auto select = builder.Select(cmp, on_true, on_false);
+ auto v1 = ConstantR1<float>(&builder, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f});
+ auto v2 = ConstantR1<float>(&builder, {-1.0f, -2.0f, 13.0f, 14.0f, 4.4f});
+ auto cmp = Gt(v1, v2);
+ auto on_true =
+ ConstantR1<float>(&builder, {-2.5f, 25.5f, 2.25f, -10.0f, 6.0f});
+ auto on_false =
+ ConstantR1<float>(&builder, {10.0f, 5.0f, 1.0f, 10.0f, -6.0f});
+ Select(cmp, on_true, on_false);
ComputeAndCompareR1<float>(&builder, {-2.5f, 25.5f, 1.0f, 10.0f, 6.0f}, {},
error_spec_);
@@ -140,8 +146,8 @@ TEST_F(SelectTest, SelectR1F32WithCmpR1F32sFromParamsSmall) {
{21.0f, 22.0f, 23.0f, 24.0f}, /*parameter_number=*/1, /*name=*/"v2",
/*builder=*/&builder, /*data_handle=*/&v2);
- auto cmp = builder.Gt(v1, v2);
- auto select = builder.Select(cmp, v1, v2);
+ auto cmp = Gt(v1, v2);
+ Select(cmp, v1, v2);
ComputeAndCompareR1<float>(&builder, {41.0f, 22.0f, 23.0f, 84.0f},
{param0_data.get(), param1_data.get()},
error_spec_);
@@ -181,8 +187,8 @@ TEST_F(SelectTest, SelectR1F32WithCmpR1F32sFromParamsLarge) {
CreateR1Parameter<float>(v2vec, /*parameter_number=*/1, /*name=*/"v2",
/*builder=*/&builder, /*data_handle=*/&v2);
- auto cmp = builder.Gt(v1, v2);
- auto select = builder.Select(cmp, v1, v2);
+ auto cmp = Gt(v1, v2);
+ Select(cmp, v1, v2);
ComputeAndCompareR1<float>(&builder, expected_vec,
{param0_data.get(), param1_data.get()},
error_spec_);
@@ -192,14 +198,14 @@ TEST_F(SelectTest, SelectR1F32WithCmpR1S32ToScalar) {
// "gt"-compares a R1S32 with a S32 scalar, and uses the resulting R1PRED to
// select between two R1F32s.
XlaBuilder builder(TestName());
- auto v = builder.ConstantR1<int32>({1, -1, 2, -2});
- auto s = builder.ConstantR0<int32>(0);
- auto cmp = builder.Gt(v, s);
+ auto v = ConstantR1<int32>(&builder, {1, -1, 2, -2});
+ auto s = ConstantR0<int32>(&builder, 0);
+ auto cmp = Gt(v, s);
- auto on_true = builder.ConstantR1<float>({11.0f, 22.0f, 33.0f, 44.0f});
+ auto on_true = ConstantR1<float>(&builder, {11.0f, 22.0f, 33.0f, 44.0f});
auto on_false =
- builder.ConstantR1<float>({-111.0f, -222.0f, -333.0f, -444.0f});
- auto select = builder.Select(cmp, on_true, on_false);
+ ConstantR1<float>(&builder, {-111.0f, -222.0f, -333.0f, -444.0f});
+ Select(cmp, on_true, on_false);
ComputeAndCompareR1<float>(&builder, {11.0f, -222.0f, 33.0f, -444.0f}, {},
error_spec_);
@@ -209,14 +215,14 @@ TEST_F(SelectTest, SelectR1F32WithCmpR1F32ToScalar) {
// "gt"-compares a R1F32 with a F32 scalar, and uses the resulting R1PRED to
// select between two R1F32s.
XlaBuilder builder(TestName());
- auto v = builder.ConstantR1<float>({1.0f, 2.0f, 3.0f, 4.0f});
- auto s = builder.ConstantR0<float>(2.5f);
- auto cmp = builder.Gt(v, s);
+ auto v = ConstantR1<float>(&builder, {1.0f, 2.0f, 3.0f, 4.0f});
+ auto s = ConstantR0<float>(&builder, 2.5f);
+ auto cmp = Gt(v, s);
- auto on_true = builder.ConstantR1<float>({11.0f, 22.0f, 33.0f, 44.0f});
+ auto on_true = ConstantR1<float>(&builder, {11.0f, 22.0f, 33.0f, 44.0f});
auto on_false =
- builder.ConstantR1<float>({-111.0f, -222.0f, -333.0f, -444.0f});
- auto select = builder.Select(cmp, on_true, on_false);
+ ConstantR1<float>(&builder, {-111.0f, -222.0f, -333.0f, -444.0f});
+ Select(cmp, on_true, on_false);
ComputeAndCompareR1<float>(&builder, {-111.0f, -222.0f, 33.0f, 44.0f}, {},
error_spec_);
@@ -225,10 +231,10 @@ TEST_F(SelectTest, SelectR1F32WithCmpR1F32ToScalar) {
XLA_TEST_F(SelectTest, SelectR1S0F32WithScalarPredicate) {
for (bool which : {false, true}) {
XlaBuilder builder(TestName());
- auto pred = builder.ConstantR0<bool>(which);
- auto on_true = builder.ConstantR1<float>({});
- auto on_false = builder.ConstantR1<float>({});
- auto select = builder.Select(pred, on_true, on_false);
+ auto pred = ConstantR0<bool>(&builder, which);
+ auto on_true = ConstantR1<float>(&builder, {});
+ auto on_false = ConstantR1<float>(&builder, {});
+ Select(pred, on_true, on_false);
ComputeAndCompareR1<float>(&builder, {}, {}, error_spec_);
}
@@ -236,20 +242,20 @@ XLA_TEST_F(SelectTest, SelectR1S0F32WithScalarPredicate) {
TEST_F(SelectTest, SelectR1F32WithScalarPredicateTrue) {
XlaBuilder builder(TestName());
- auto pred = builder.ConstantR0<bool>(true);
- auto on_true = builder.ConstantR1<float>({-2.5f, 25.5f});
- auto on_false = builder.ConstantR1<float>({10.0f, 5.0f});
- auto select = builder.Select(pred, on_true, on_false);
+ auto pred = ConstantR0<bool>(&builder, true);
+ auto on_true = ConstantR1<float>(&builder, {-2.5f, 25.5f});
+ auto on_false = ConstantR1<float>(&builder, {10.0f, 5.0f});
+ Select(pred, on_true, on_false);
ComputeAndCompareR1<float>(&builder, {-2.5f, 25.5f}, {}, error_spec_);
}
TEST_F(SelectTest, SelectR1F32WithScalarPredicateFalse) {
XlaBuilder builder(TestName());
- auto pred = builder.ConstantR0<bool>(false);
- auto on_true = builder.ConstantR1<float>({-2.5f, 25.5f});
- auto on_false = builder.ConstantR1<float>({10.0f, 5.0f});
- auto select = builder.Select(pred, on_true, on_false);
+ auto pred = ConstantR0<bool>(&builder, false);
+ auto on_true = ConstantR1<float>(&builder, {-2.5f, 25.5f});
+ auto on_false = ConstantR1<float>(&builder, {10.0f, 5.0f});
+ Select(pred, on_true, on_false);
ComputeAndCompareR1<float>(&builder, {10.0f, 5.0f}, {}, error_spec_);
}
diff --git a/tensorflow/compiler/xla/tests/slice_test.cc b/tensorflow/compiler/xla/tests/slice_test.cc
index 5653bf11a7..48138e7b07 100644
--- a/tensorflow/compiler/xla/tests/slice_test.cc
+++ b/tensorflow/compiler/xla/tests/slice_test.cc
@@ -42,8 +42,8 @@ TEST_F(SliceTest, Slice3x3x3_To_3x3x1_F32) {
values.FillIota(0);
XlaBuilder builder(TestName());
- auto original = builder.ConstantR3FromArray3D<float>(values);
- builder.Slice(original, {0, 0, 0}, {3, 3, 1}, {1, 1, 1});
+ auto original = ConstantR3FromArray3D<float>(&builder, values);
+ Slice(original, {0, 0, 0}, {3, 3, 1}, {1, 1, 1});
Array3D<float> expected{
{{0.0}, {3.0}, {6.0}}, {{9.0}, {12.0}, {15.0}}, {{18.0}, {21.0}, {24.0}}};
@@ -55,8 +55,8 @@ TEST_F(SliceTest, Slice3x3x3_To_3x1x3_F32) {
values.FillIota(0);
XlaBuilder builder(TestName());
- auto original = builder.ConstantR3FromArray3D<float>(values);
- builder.Slice(original, {0, 0, 0}, {3, 1, 3}, {1, 1, 1});
+ auto original = ConstantR3FromArray3D<float>(&builder, values);
+ Slice(original, {0, 0, 0}, {3, 1, 3}, {1, 1, 1});
Array3D<float> expected{
{{0.0, 1.0, 2.0}}, {{9.0, 10.0, 11.0}}, {{18.0, 19.0, 20.0}}};
@@ -68,8 +68,8 @@ TEST_F(SliceTest, Slice3x3x3_To_1x3x3_F32) {
values.FillIota(0);
XlaBuilder builder(TestName());
- auto original = builder.ConstantR3FromArray3D<float>(values);
- builder.Slice(original, {0, 0, 0}, {1, 3, 3}, {1, 1, 1});
+ auto original = ConstantR3FromArray3D<float>(&builder, values);
+ Slice(original, {0, 0, 0}, {1, 3, 3}, {1, 1, 1});
Array3D<float> expected{
{{{0.0, 1.0, 2.0}, {3.0, 4.0, 5.0}, {6.0, 7.0, 8.0}}}};
@@ -78,24 +78,24 @@ TEST_F(SliceTest, Slice3x3x3_To_1x3x3_F32) {
XLA_TEST_F(SliceTest, Slice0x0to0x0F32) {
XlaBuilder builder(TestName());
- auto original = builder.ConstantR2FromArray2D<float>(Array2D<float>(0, 0));
- builder.Slice(original, {0, 0}, {0, 0}, {1, 1});
+ auto original = ConstantR2FromArray2D<float>(&builder, Array2D<float>(0, 0));
+ Slice(original, {0, 0}, {0, 0}, {1, 1});
ComputeAndCompareR2<float>(&builder, Array2D<float>(0, 0), {});
}
XLA_TEST_F(SliceTest, Slice0x20to0x5F32) {
XlaBuilder builder(TestName());
- auto original = builder.ConstantR2FromArray2D<float>(Array2D<float>(0, 20));
- builder.Slice(original, {0, 15}, {0, 20}, {1, 1});
+ auto original = ConstantR2FromArray2D<float>(&builder, Array2D<float>(0, 20));
+ Slice(original, {0, 15}, {0, 20}, {1, 1});
ComputeAndCompareR2<float>(&builder, Array2D<float>(0, 5), {});
}
XLA_TEST_F(SliceTest, Slice3x0to2x0F32) {
XlaBuilder builder(TestName());
- auto original = builder.ConstantR2FromArray2D<float>(Array2D<float>(3, 0));
- builder.Slice(original, {1, 0}, {3, 0}, {1, 1});
+ auto original = ConstantR2FromArray2D<float>(&builder, Array2D<float>(3, 0));
+ Slice(original, {1, 0}, {3, 0}, {1, 1});
ComputeAndCompareR2<float>(&builder, Array2D<float>(2, 0), {});
}
@@ -109,8 +109,8 @@ XLA_TEST_F(SliceTest, SliceQuadrantOf256x256) {
}
XlaBuilder builder(TestName());
- auto original = builder.ConstantR2FromArray2D<float>(values);
- builder.Slice(original, {128, 128}, {256, 256}, {1, 1});
+ auto original = ConstantR2FromArray2D<float>(&builder, values);
+ Slice(original, {128, 128}, {256, 256}, {1, 1});
Array2D<float> expected(128, 128);
for (int row = 0; row < 128; ++row) {
@@ -127,8 +127,8 @@ TEST_F(SliceTest, Slice_1x4096_To_1x1024) {
std::iota(values.data(), values.data() + 4096, 0.0);
XlaBuilder builder(TestName());
- auto original = builder.ConstantR2FromArray2D<float>(values);
- builder.Slice(original, {0, 3072}, {1, 4096}, {1, 1});
+ auto original = ConstantR2FromArray2D<float>(&builder, values);
+ Slice(original, {0, 3072}, {1, 4096}, {1, 1});
Array2D<float> expected(1, 1024);
std::iota(expected.data(), expected.data() + 1024, 3072.0);
@@ -148,8 +148,8 @@ TEST_F(SliceTest, Slice_16x4_To_16x2) {
}
}
XlaBuilder builder(TestName());
- auto original = builder.ConstantR2FromArray2D<float>(values);
- builder.Slice(original, {0, 0}, {16, 2}, {1, 1});
+ auto original = ConstantR2FromArray2D<float>(&builder, values);
+ Slice(original, {0, 0}, {16, 2}, {1, 1});
ComputeAndCompareR2<float>(&builder, expected, {}, ErrorSpec(0.000001));
}
@@ -160,8 +160,8 @@ TEST_F(SliceTest, SliceR4ThreeDimsMiddleMinor) {
auto expected = ReferenceUtil::Slice4D(
values, {{1, 0, 8, 0}}, {{2, 2, 16, 128}}, /*strides=*/{{1, 1, 1, 1}});
XlaBuilder builder(TestName());
- auto original = builder.ConstantR4FromArray4D(values);
- builder.Slice(original, {1, 0, 8, 0}, {2, 2, 16, 128}, {1, 1, 1, 1});
+ auto original = ConstantR4FromArray4D(&builder, values);
+ Slice(original, {1, 0, 8, 0}, {2, 2, 16, 128}, {1, 1, 1, 1});
ComputeAndCompareR4(&builder, *expected, {}, ErrorSpec(0.000001));
}
@@ -170,11 +170,11 @@ XLA_TEST_F(SliceTest, StridedSliceR4WithOutputLayout) {
values.FillRandom(3.14f);
auto expected = ReferenceUtil::Slice4D(values, {{0, 0, 0, 0}}, {{2, 4, 6, 8}},
/*strides=*/{{1, 1, 2, 1}});
- auto expected_literal = Literal::CreateR4FromArray4DWithLayout(
+ auto expected_literal = LiteralUtil::CreateR4FromArray4DWithLayout(
*expected, LayoutUtil::MakeLayout({0, 1, 2, 3}));
XlaBuilder builder(TestName());
- auto original = builder.ConstantR4FromArray4D(values);
- builder.Slice(original, {0, 0, 0, 0}, {2, 4, 6, 8}, {1, 1, 2, 1});
+ auto original = ConstantR4FromArray4D(&builder, values);
+ Slice(original, {0, 0, 0, 0}, {2, 4, 6, 8}, {1, 1, 2, 1});
ComputeAndCompareLiteral(&builder, *expected_literal, {}, ErrorSpec(0.000001),
&expected_literal->shape());
}
@@ -197,12 +197,12 @@ class SliceR1Test : public ClientLibraryTestBase,
// vector<bool>.
tensorflow::gtl::InlinedVector<NativeT, 1> input(spec.input_dim0);
std::iota(input.begin(), input.end(), NativeT());
- auto literal = Literal::CreateR1<NativeT>(input);
+ auto literal = LiteralUtil::CreateR1<NativeT>(input);
XlaBuilder builder(TestName());
- auto original = builder.Parameter(0, literal->shape(), "p0");
- builder.Slice(original, {spec.slice_start}, {spec.slice_limit},
- {spec.slice_stride});
+ auto original = Parameter(&builder, 0, literal->shape(), "p0");
+ Slice(original, {spec.slice_start}, {spec.slice_limit},
+ {spec.slice_stride});
// Ditto.
tensorflow::gtl::InlinedVector<NativeT, 1> expected;
@@ -368,12 +368,12 @@ XLA_TEST_P(SliceR2Test, DoIt) {
const R2Spec& spec = GetParam();
Array2D<int32> input(spec.input_dim0, spec.input_dim1);
input.FillUnique();
- auto literal = Literal::CreateR2FromArray2DWithLayout(
+ auto literal = LiteralUtil::CreateR2FromArray2DWithLayout(
input, LayoutUtil::MakeLayout(spec.layout));
XlaBuilder builder(TestName());
- auto a = builder.Parameter(0, literal->shape(), "p0");
- builder.Slice(a, spec.slice_starts, spec.slice_limits, spec.slice_strides);
+ auto a = Parameter(&builder, 0, literal->shape(), "p0");
+ Slice(a, spec.slice_starts, spec.slice_limits, spec.slice_strides);
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<GlobalData> arg,
client_->TransferToServer(*literal));
@@ -463,13 +463,12 @@ class SliceR4Test : public ClientLibraryTestBase,
auto expected = ReferenceUtil::Slice4D(
values, spec.slice_starts, spec.slice_limits, spec.slice_strides);
XlaBuilder builder(TestName());
- auto literal = Literal::CreateR4FromArray4DWithLayout(
+ auto literal = LiteralUtil::CreateR4FromArray4DWithLayout(
values, LayoutUtil::MakeLayout(spec.input_layout));
- auto parameter = builder.Parameter(0, literal->shape(), "p0");
+ auto parameter = Parameter(&builder, 0, literal->shape(), "p0");
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<GlobalData> arg,
client_->TransferToServer(*literal));
- builder.Slice(parameter, spec.slice_starts, spec.slice_limits,
- spec.slice_strides);
+ Slice(parameter, spec.slice_starts, spec.slice_limits, spec.slice_strides);
ComputeAndCompareR4(&builder, *expected, {arg.get()}, ErrorSpec(0.000001));
}
};
diff --git a/tensorflow/compiler/xla/tests/test_utils.cc b/tensorflow/compiler/xla/tests/test_utils.cc
index 000535a982..2647937013 100644
--- a/tensorflow/compiler/xla/tests/test_utils.cc
+++ b/tensorflow/compiler/xla/tests/test_utils.cc
@@ -14,6 +14,7 @@ limitations under the License.
==============================================================================*/
#include "tensorflow/compiler/xla/tests/test_utils.h"
+#include "tensorflow/compiler/xla/literal_util.h"
#include "tensorflow/compiler/xla/primitive_util.h"
#include "tensorflow/compiler/xla/service/hlo_dataflow_analysis.h"
#include "tensorflow/compiler/xla/service/hlo_verifier.h"
@@ -110,7 +111,7 @@ StatusOr<std::unique_ptr<Literal>> MakeFakeLiteralInternal(
MakeFakeLiteralInternal(element_shape, engine));
elements.push_back(std::move(element));
}
- return Literal::MakeTupleOwned(std::move(elements));
+ return LiteralUtil::MakeTupleOwned(std::move(elements));
}
if (engine == nullptr) {
return Literal::CreateFromShape(shape);
@@ -161,6 +162,9 @@ StatusOr<std::unique_ptr<Literal>> MakeFakeLiteralInternal(
}));
break;
}
+ // Token requires no data.
+ case TOKEN:
+ break;
default:
return Unimplemented("Unsupported type for fake literal generation: %s",
ShapeUtil::HumanString(shape).c_str());
@@ -217,7 +221,7 @@ std::unique_ptr<Literal> MakeRandomNonwrappingSliceIndex(
start_indices[i] = generator(*engine);
}
}
- return Literal::CreateR1<int32>(start_indices);
+ return LiteralUtil::CreateR1<int32>(start_indices);
}
// Use dataflow analysis on each parameter to see if there are uses that would
@@ -315,9 +319,9 @@ StatusOr<std::unique_ptr<Literal>> CreateLiteralForConstrainedUses(
} else if (needs_constant != nullptr) {
switch (constant_type) {
case ConstantType::kZero:
- return Literal::Zero(param.shape().element_type()).CloneToUnique();
+ return LiteralUtil::Zero(param.shape().element_type()).CloneToUnique();
case ConstantType::kOne:
- return Literal::One(param.shape().element_type()).CloneToUnique();
+ return LiteralUtil::One(param.shape().element_type()).CloneToUnique();
case ConstantType::kUnknown:
// We want the identity element for the computation, but we don't really
// know what it is - so any value we generate will be just as wrong.
diff --git a/tensorflow/compiler/xla/tests/test_utils.h b/tensorflow/compiler/xla/tests/test_utils.h
index a8689f6498..e59f215a9a 100644
--- a/tensorflow/compiler/xla/tests/test_utils.h
+++ b/tensorflow/compiler/xla/tests/test_utils.h
@@ -21,7 +21,7 @@ limitations under the License.
#include <random>
#include "tensorflow/compiler/xla/layout_util.h"
-#include "tensorflow/compiler/xla/literal_util.h"
+#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/compiler/xla/ptr_util.h"
#include "tensorflow/compiler/xla/service/hlo_module.h"
#include "tensorflow/compiler/xla/xla_data.pb.h"
diff --git a/tensorflow/compiler/xla/tests/test_utils_test.cc b/tensorflow/compiler/xla/tests/test_utils_test.cc
index 59afd28a80..8f424ae81f 100644
--- a/tensorflow/compiler/xla/tests/test_utils_test.cc
+++ b/tensorflow/compiler/xla/tests/test_utils_test.cc
@@ -16,6 +16,7 @@ limitations under the License.
#include "tensorflow/compiler/xla/tests/test_utils.h"
#include "tensorflow/compiler/xla/client/xla_client/xla_builder.h"
+#include "tensorflow/compiler/xla/service/hlo_parser.h"
#include "tensorflow/compiler/xla/shape_util.h"
#include "tensorflow/compiler/xla/tests/local_client_test_base.h"
#include "tensorflow/compiler/xla/tests/test_macros.h"
@@ -31,16 +32,16 @@ XLA_TEST_F(TestUtilsTest, UnusedParam) {
XlaBuilder builder(TestName());
// Make the reduction lambda.
Shape single_float = ShapeUtil::MakeShape(F32, {});
- builder.Parameter(0, single_float, "unused");
- builder.Parameter(1, single_float, "used");
+ Parameter(&builder, 0, single_float, "unused");
+ Parameter(&builder, 1, single_float, "used");
auto computation_status = builder.Build();
TF_ASSERT_OK(computation_status.status());
// Make the reduction.
Shape pair_float = ShapeUtil::MakeShape(F32, {2});
- builder.Reduce(builder.Parameter(0, pair_float, "operand"),
- builder.Parameter(1, single_float, "init"),
- computation_status.ValueOrDie(), {0});
+ Reduce(Parameter(&builder, 0, pair_float, "operand"),
+ Parameter(&builder, 1, single_float, "init"),
+ computation_status.ValueOrDie(), {0});
computation_status = builder.Build();
TF_ASSERT_OK(computation_status.status());
@@ -53,5 +54,23 @@ XLA_TEST_F(TestUtilsTest, UnusedParam) {
TF_ASSERT_OK(MakeFakeArguments(&module).status());
}
+XLA_TEST_F(TestUtilsTest, Token) {
+ auto module = ParseHloString(
+ R"(HloModule outfeed_module
+
+ ENTRY InfeedToOutfeed {
+ token = token[] parameter(0)
+ infeed = ((u32[3]{0}, pred[]), token[]) infeed(token)
+ infeed.data = (u32[3]{0}, pred[]) get-tuple-element(infeed), index=0
+ outfeed = token[] outfeed(infeed.data, token)
+ ROOT infeed.1 = ((u32[3]{0}, pred[]), token[]) infeed(token)
+ infeed.1.data = (u32[3]{0}, pred[]) get-tuple-element(infeed.1), index=0
+ infeed.1.token = token[] get-tuple-element(infeed.1), index=1
+ outfeed.1 = token[] outfeed(infeed.1.data, infeed.1.token)
+ })")
+ .ValueOrDie();
+ TF_ASSERT_OK(MakeFakeArguments(module.get()).status());
+}
+
} // namespace
} // namespace xla
diff --git a/tensorflow/compiler/xla/tests/token_hlo_test.cc b/tensorflow/compiler/xla/tests/token_hlo_test.cc
index 8541698576..2bdbd08309 100644
--- a/tensorflow/compiler/xla/tests/token_hlo_test.cc
+++ b/tensorflow/compiler/xla/tests/token_hlo_test.cc
@@ -31,27 +31,29 @@ class TokenHloTest : public HloTestBase {};
XLA_TEST_F(TokenHloTest, SingleTokenInstruction) {
std::unique_ptr<HloModule> module = CreateNewModule();
auto builder = HloComputation::Builder(TestName());
- builder.AddInstruction(HloInstruction::CreateGenerateToken({}));
- builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<int32>(42)));
+ builder.AddInstruction(HloInstruction::CreateToken());
module->AddEntryComputation(builder.Build());
- EXPECT_IS_OK(HloVerifier().Run(module.get()).status());
+
+ TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<Literal> result,
+ Execute(std::move(module), {}));
+ EXPECT_TRUE(LiteralTestUtil::Equal(*result, *LiteralUtil::CreateToken()));
}
XLA_TEST_F(TokenHloTest, TokenTree) {
std::unique_ptr<HloModule> module = CreateNewModule();
auto builder = HloComputation::Builder(TestName());
- auto token0 = builder.AddInstruction(HloInstruction::CreateGenerateToken({}));
- auto token1 = builder.AddInstruction(HloInstruction::CreateGenerateToken({}));
- auto token2 = builder.AddInstruction(HloInstruction::CreateGenerateToken({}));
+ auto token0 = builder.AddInstruction(HloInstruction::CreateToken());
+ auto token1 = builder.AddInstruction(HloInstruction::CreateToken());
+ auto token2 = builder.AddInstruction(HloInstruction::CreateToken());
builder.AddInstruction(
- HloInstruction::CreateGenerateToken({token0, token0, token1, token2}));
- builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<int32>(42)));
+ HloInstruction::CreateAfterAll({token0, token0, token1, token2}));
module->AddEntryComputation(builder.Build());
- EXPECT_IS_OK(HloVerifier().Run(module.get()).status());
+
+ TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<Literal> result,
+ Execute(std::move(module), {}));
+ EXPECT_TRUE(LiteralTestUtil::Equal(*result, *LiteralUtil::CreateToken()));
}
XLA_TEST_F(TokenHloTest, InvalidTokenShapedEntryParameter) {
@@ -62,7 +64,7 @@ XLA_TEST_F(TokenHloTest, InvalidTokenShapedEntryParameter) {
builder.AddInstruction(
HloInstruction::CreateParameter(1, ShapeUtil::MakeTokenShape(), "p1"));
builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<int32>(42)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32>(42)));
module->AddEntryComputation(builder.Build());
Status status = HloVerifier().Run(module.get()).status();
@@ -89,26 +91,14 @@ XLA_TEST_F(TokenHloTest, InvalidTupleTokenShapedEntryParameter) {
::testing::HasSubstr("Entry parameter 0 is or contains a token shape"));
}
-XLA_TEST_F(TokenHloTest, InvalidTokenRoot) {
- std::unique_ptr<HloModule> module = CreateNewModule();
- auto builder = HloComputation::Builder(TestName());
- builder.AddInstruction(HloInstruction::CreateGenerateToken({}));
- module->AddEntryComputation(builder.Build());
-
- Status status = HloVerifier().Run(module.get()).status();
- ASSERT_IS_NOT_OK(status);
- EXPECT_THAT(status.error_message(),
- ::testing::HasSubstr("Entry root is or contains a token shape"));
-}
-
XLA_TEST_F(TokenHloTest, InvalidOperandToTokenInstruction) {
std::unique_ptr<HloModule> module = CreateNewModule();
auto builder = HloComputation::Builder(TestName());
auto param = builder.AddInstruction(
HloInstruction::CreateParameter(0, ShapeUtil::MakeShape(F32, {}), "p0"));
- builder.AddInstruction(HloInstruction::CreateGenerateToken({param}));
+ builder.AddInstruction(HloInstruction::CreateAfterAll({param}));
builder.AddInstruction(
- HloInstruction::CreateConstant(Literal::CreateR0<int32>(123)));
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32>(123)));
module->AddEntryComputation(builder.Build());
Status status = HloVerifier().Run(module.get()).status();
@@ -120,7 +110,7 @@ XLA_TEST_F(TokenHloTest, InvalidOperandToTokenInstruction) {
XLA_TEST_F(TokenHloTest, TokenInWhileLoop) {
// Thread a token around a while loop. Token is created and consumed by a
- // GenerateToken instruction in the while body.
+ // AfterAll instruction in the while body.
string module_string = R"(
HloModule TokenInWhileLoop
@@ -130,8 +120,8 @@ HloModule TokenInWhileLoop
%constant.1 = s32[] constant(1)
%add = s32[] add(s32[] %get-tuple-element.1, s32[] %constant.1)
%get-tuple-element.2 = token[] get-tuple-element((s32[], token[]) %param.1), index=1
- %generate-token = token[] generate-token(token[] %get-tuple-element.2)
- ROOT %tuple = (s32[], token[]) tuple(s32[] %add, token[] %generate-token)
+ %after-all = token[] after-all(token[] %get-tuple-element.2)
+ ROOT %tuple = (s32[], token[]) tuple(s32[] %add, token[] %after-all)
}
%Cond (param: (s32[], token[])) -> pred[] {
@@ -143,7 +133,7 @@ HloModule TokenInWhileLoop
ENTRY %TokenInWhileLoop () -> s32[] {
%zero = s32[] constant(0)
- %init_token = token[] generate-token()
+ %init_token = token[] after-all()
%init_tuple = (s32[], token[]) tuple(s32[] %zero, token[] %init_token)
%while = (s32[], token[]) while((s32[], token[]) %init_tuple), condition=%Cond, body=%Body
ROOT %root = s32[] get-tuple-element((s32[], token[]) %while), index=0
@@ -172,13 +162,13 @@ HloModule TokenInConditional
%False (param.2: s32[]) -> (s32[], token[]) {
%param.2 = s32[] parameter(0)
- %new_token = token[] generate-token()
+ %new_token = token[] after-all()
ROOT %tuple = (s32[], token[]) tuple(s32[] %param.2, token[] %new_token)
}
ENTRY %TokenInConditional (param.3: pred[]) -> s32[] {
%param.3 = pred[] parameter(0)
- %init_token = token[] generate-token()
+ %init_token = token[] after-all()
%seven = s32[] constant(7)
%cond = (s32[], token[]) conditional(pred[] %param.3, token[] %init_token, s32[] %seven), true_computation=True, false_computation=False
ROOT %root = s32[] get-tuple-element((s32[], token[]) %cond), index=0
@@ -194,7 +184,7 @@ ENTRY %TokenInConditional (param.3: pred[]) -> s32[] {
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<HloModule> module,
HloRunner::CreateModuleFromString(module_string, debug_options));
- auto arg = Literal::CreateR0<bool>(true);
+ auto arg = LiteralUtil::CreateR0<bool>(true);
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<Literal> result,
Execute(std::move(module), {arg.get()}));
EXPECT_EQ(42, result->Get<int32>({}));
@@ -205,7 +195,7 @@ ENTRY %TokenInConditional (param.3: pred[]) -> s32[] {
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<HloModule> module,
HloRunner::CreateModuleFromString(module_string, debug_options));
- auto arg = Literal::CreateR0<bool>(false);
+ auto arg = LiteralUtil::CreateR0<bool>(false);
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<Literal> result,
Execute(std::move(module), {arg.get()}));
EXPECT_EQ(7, result->Get<int32>({}));
diff --git a/tensorflow/compiler/xla/tests/transfer_manager_test.cc b/tensorflow/compiler/xla/tests/transfer_manager_test.cc
index 85799d4cfb..0f86b7f20f 100644
--- a/tensorflow/compiler/xla/tests/transfer_manager_test.cc
+++ b/tensorflow/compiler/xla/tests/transfer_manager_test.cc
@@ -18,7 +18,7 @@ limitations under the License.
#include <vector>
#include "tensorflow/compiler/xla/layout_util.h"
-#include "tensorflow/compiler/xla/literal_util.h"
+#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/compiler/xla/service/device_memory_allocator.h"
#include "tensorflow/compiler/xla/service/generic_transfer_manager.h"
#include "tensorflow/compiler/xla/service/shaped_buffer.h"
@@ -68,7 +68,7 @@ class TransferManagerTest : public LocalClientTestBase {
};
XLA_TEST_F(TransferManagerTest, TransferR0U32) {
- std::unique_ptr<Literal> literal = Literal::CreateR0<uint32>(42);
+ std::unique_ptr<Literal> literal = LiteralUtil::CreateR0<uint32>(42);
const Shape& shape = literal->shape();
auto device_buffer = AllocateDeviceBuffer(shape);
@@ -84,7 +84,7 @@ XLA_TEST_F(TransferManagerTest, TransferR0U32) {
XLA_TEST_F(TransferManagerTest, TransferR1F32) {
std::unique_ptr<Literal> literal =
- Literal::CreateR1<float>({1.25f, 2.5f, -17.0f, -20.125f});
+ LiteralUtil::CreateR1<float>({1.25f, 2.5f, -17.0f, -20.125f});
const Shape& shape = literal->shape();
auto device_buffer = AllocateDeviceBuffer(shape);
@@ -102,7 +102,7 @@ XLA_TEST_F(TransferManagerTest, TransferR1F32) {
XLA_TEST_F(TransferManagerTest, TransferR1LargeF32) {
std::vector<float> test_vector(1024 * 1024);
std::iota(test_vector.begin(), test_vector.end(), 0);
- std::unique_ptr<Literal> literal = Literal::CreateR1<float>(test_vector);
+ std::unique_ptr<Literal> literal = LiteralUtil::CreateR1<float>(test_vector);
const Shape& shape = literal->shape();
auto device_buffer = AllocateDeviceBuffer(shape);
@@ -118,7 +118,7 @@ XLA_TEST_F(TransferManagerTest, TransferR1LargeF32) {
XLA_TEST_F(TransferManagerTest, TransferR1U8) {
const char* test_string = "0123456789abcdef";
- std::unique_ptr<Literal> literal = Literal::CreateR1U8(test_string);
+ std::unique_ptr<Literal> literal = LiteralUtil::CreateR1U8(test_string);
const Shape& shape = literal->shape();
auto device_buffer = AllocateDeviceBuffer(shape);
@@ -134,7 +134,7 @@ XLA_TEST_F(TransferManagerTest, TransferR1U8) {
XLA_TEST_F(TransferManagerTest, TransferR2F32) {
std::unique_ptr<Literal> literal =
- Literal::CreateR2<float>({{1.0f, 2.0f, 3.0f}, {4.0f, 5.0f, 6.0f}});
+ LiteralUtil::CreateR2<float>({{1.0f, 2.0f, 3.0f}, {4.0f, 5.0f, 6.0f}});
const Shape& shape = literal->shape();
auto device_buffer = AllocateDeviceBuffer(shape);
@@ -151,7 +151,7 @@ XLA_TEST_F(TransferManagerTest, TransferR2F32) {
XLA_TEST_F(TransferManagerTest,
TransferR2F32AndChangeLayoutTransferringToDevice) {
- std::unique_ptr<Literal> literal = Literal::CreateR2WithLayout<float>(
+ std::unique_ptr<Literal> literal = LiteralUtil::CreateR2WithLayout<float>(
{{1.0f, 2.0f, 3.0f}, {4.0f, 5.0f, 6.0f}}, LayoutUtil::MakeLayout({0, 1}));
const Shape ondevice_shape =
ShapeUtil::MakeShapeWithLayout(F32, {2, 3}, {1, 0});
@@ -172,10 +172,10 @@ XLA_TEST_F(TransferManagerTest,
}
XLA_TEST_F(TransferManagerTest, TransferTuple) {
- std::unique_ptr<Literal> literal = Literal::MakeTuple(
- {Literal::CreateR0<float>(123.0f).get(),
- Literal::CreateR2<float>({{1.0f, 2.0f}, {4.0f, 5.0f}}).get(),
- Literal::CreateR1<float>({44.0f, -10.0f, 3333333.3f}).get()});
+ std::unique_ptr<Literal> literal = LiteralUtil::MakeTuple(
+ {LiteralUtil::CreateR0<float>(123.0f).get(),
+ LiteralUtil::CreateR2<float>({{1.0f, 2.0f}, {4.0f, 5.0f}}).get(),
+ LiteralUtil::CreateR1<float>({44.0f, -10.0f, 3333333.3f}).get()});
auto device_buffer = AllocateDeviceBuffer(literal->shape());
// Round trip literal through device.
@@ -189,7 +189,7 @@ XLA_TEST_F(TransferManagerTest, TransferTuple) {
}
XLA_TEST_F(TransferManagerTest, TransferEmptyTuple) {
- std::unique_ptr<Literal> literal = Literal::MakeTuple({});
+ std::unique_ptr<Literal> literal = LiteralUtil::MakeTuple({});
auto device_buffer = AllocateDeviceBuffer(literal->shape());
// Round trip literal through device.
@@ -203,13 +203,13 @@ XLA_TEST_F(TransferManagerTest, TransferEmptyTuple) {
}
XLA_TEST_F(TransferManagerTest, TransferNestedTuple) {
- std::unique_ptr<Literal> literal = Literal::MakeTuple(
- {Literal::CreateR0<float>(123.0f).get(),
- Literal::MakeTuple(
- {Literal::CreateR2<float>({{1.0f, 2.0f}, {4.0f, 5.0f}}).get(),
- Literal::CreateR1<float>({44.0f, -10.0f, 3333333.3f}).get()})
+ std::unique_ptr<Literal> literal = LiteralUtil::MakeTuple(
+ {LiteralUtil::CreateR0<float>(123.0f).get(),
+ LiteralUtil::MakeTuple(
+ {LiteralUtil::CreateR2<float>({{1.0f, 2.0f}, {4.0f, 5.0f}}).get(),
+ LiteralUtil::CreateR1<float>({44.0f, -10.0f, 3333333.3f}).get()})
.get(),
- Literal::CreateR1<float>({-10.0f, 123.0f}).get()});
+ LiteralUtil::CreateR1<float>({-10.0f, 123.0f}).get()});
auto device_buffer = AllocateDeviceBuffer(literal->shape());
// Round trip literal through device.
@@ -223,7 +223,7 @@ XLA_TEST_F(TransferManagerTest, TransferNestedTuple) {
}
XLA_TEST_F(TransferManagerTest, TransferComplexValue) {
- std::unique_ptr<Literal> literal = Literal::CreateR1<complex64>(
+ std::unique_ptr<Literal> literal = LiteralUtil::CreateR1<complex64>(
{complex64(1.0f, 2.0f), complex64(42.0f, -123.4f)});
auto device_buffer = AllocateDeviceBuffer(literal->shape());
@@ -238,12 +238,12 @@ XLA_TEST_F(TransferManagerTest, TransferComplexValue) {
}
XLA_TEST_F(TransferManagerTest, TransferComplexValueInTuple) {
- std::unique_ptr<Literal> literal = Literal::MakeTuple(
- {Literal::CreateR1<complex64>(
+ std::unique_ptr<Literal> literal = LiteralUtil::MakeTuple(
+ {LiteralUtil::CreateR1<complex64>(
{complex64(1.0f, 2.0f), complex64(42.0f, -123.4f)})
.get(),
- Literal::CreateR1<int32>({1, 2, 3, 4, 5, 6}).get(),
- Literal::CreateR0<complex64>(complex64(0.3f, -0.4f)).get()});
+ LiteralUtil::CreateR1<int32>({1, 2, 3, 4, 5, 6}).get(),
+ LiteralUtil::CreateR0<complex64>(complex64(0.3f, -0.4f)).get()});
auto device_buffer = AllocateDeviceBuffer(literal->shape());
// Round trip literal through device.
@@ -256,22 +256,34 @@ XLA_TEST_F(TransferManagerTest, TransferComplexValueInTuple) {
EXPECT_TRUE(LiteralTestUtil::Equal(*literal, *result));
}
+XLA_TEST_F(TransferManagerTest, TransferTokenFromDevice) {
+ // "Copy" a token from the device. The token has no physical representation so
+ // no copying is actually performed, but it shouldn't fail.
+ // TODO(b/110532604): Add transferring the token to device when this is
+ // supported.
+ auto device_buffer = AllocateDeviceBuffer(ShapeUtil::MakeTokenShape());
+ TF_ASSERT_OK_AND_ASSIGN(
+ std::unique_ptr<Literal> result,
+ transfer_manager_->TransferLiteralFromDevice(stream_, device_buffer));
+ EXPECT_TRUE(LiteralTestUtil::Equal(*LiteralUtil::CreateToken(), *result));
+}
+
XLA_TEST_F(TransferManagerTest, MultiStreamRoundTripSoak) {
const int64 kIterationCount = 5000;
- std::unique_ptr<Literal> literal1 = Literal::MakeTuple(
- {Literal::CreateR0<float>(123.0f).get(),
- Literal::MakeTuple(
- {Literal::CreateR2<float>({{1.0f, 2.0f}, {4.0f, 5.0f}}).get(),
- Literal::CreateR1<float>({44.0f, -10.0f, 3333333.3f}).get()})
+ std::unique_ptr<Literal> literal1 = LiteralUtil::MakeTuple(
+ {LiteralUtil::CreateR0<float>(123.0f).get(),
+ LiteralUtil::MakeTuple(
+ {LiteralUtil::CreateR2<float>({{1.0f, 2.0f}, {4.0f, 5.0f}}).get(),
+ LiteralUtil::CreateR1<float>({44.0f, -10.0f, 3333333.3f}).get()})
.get(),
- Literal::CreateR1<float>({-10.0f, 123.0f}).get()});
- std::unique_ptr<Literal> literal2 = Literal::MakeTuple(
- {Literal::CreateR0<float>(456.0f).get(),
- Literal::MakeTuple(
- {Literal::CreateR2<float>({{5.0f, 7.0f}, {9.0f, 4.0f}}).get(),
- Literal::CreateR1<float>({44.0f, -11.0f, 3333333.3f}).get()})
+ LiteralUtil::CreateR1<float>({-10.0f, 123.0f}).get()});
+ std::unique_ptr<Literal> literal2 = LiteralUtil::MakeTuple(
+ {LiteralUtil::CreateR0<float>(456.0f).get(),
+ LiteralUtil::MakeTuple(
+ {LiteralUtil::CreateR2<float>({{5.0f, 7.0f}, {9.0f, 4.0f}}).get(),
+ LiteralUtil::CreateR1<float>({44.0f, -11.0f, 3333333.3f}).get()})
.get(),
- Literal::CreateR1<float>({-98.0f, 153.0f}).get()});
+ LiteralUtil::CreateR1<float>({-98.0f, 153.0f}).get()});
auto device_buffer1 = AllocateDeviceBuffer(literal1->shape());
auto device_buffer2 = AllocateDeviceBuffer(literal2->shape());
@@ -313,10 +325,10 @@ class TransferDeviceToHostBenchmark : public TransferManagerTest {
std::vector<std::unique_ptr<Literal>> tuple_elements;
for (int i = 0; i < num_tuple_elements; ++i) {
tuple_elements.push_back(
- Literal::CreateR2F32Linspace(0.0f, 1.0f, array_size, array_size));
+ LiteralUtil::CreateR2F32Linspace(0.0f, 1.0f, array_size, array_size));
}
std::unique_ptr<Literal> literal =
- Literal::MakeTupleOwned(std::move(tuple_elements));
+ LiteralUtil::MakeTupleOwned(std::move(tuple_elements));
auto device_buffer = AllocateDeviceBuffer(literal->shape());
TF_CHECK_OK(transfer_manager_->TransferLiteralToDevice(stream_, *literal,
device_buffer));
@@ -345,10 +357,10 @@ class TransferHostToDeviceBenchmark : public TransferManagerTest {
std::vector<std::unique_ptr<Literal>> tuple_elements;
for (int i = 0; i < num_tuple_elements; ++i) {
tuple_elements.push_back(
- Literal::CreateR2F32Linspace(0.0f, 1.0f, array_size, array_size));
+ LiteralUtil::CreateR2F32Linspace(0.0f, 1.0f, array_size, array_size));
}
std::unique_ptr<Literal> literal =
- Literal::MakeTupleOwned(std::move(tuple_elements));
+ LiteralUtil::MakeTupleOwned(std::move(tuple_elements));
auto device_buffer = AllocateDeviceBuffer(literal->shape());
tensorflow::testing::StartTiming();
for (int i = 0; i < iters; ++i) {
diff --git a/tensorflow/compiler/xla/tests/transpose_test.cc b/tensorflow/compiler/xla/tests/transpose_test.cc
index fe1e3da7ec..6ebb4324f8 100644
--- a/tensorflow/compiler/xla/tests/transpose_test.cc
+++ b/tensorflow/compiler/xla/tests/transpose_test.cc
@@ -38,34 +38,35 @@ class TransposeTest : public ClientLibraryTestBase {
XLA_TEST_F(TransposeTest, Transpose0x0) {
XlaBuilder builder("Transpose");
- auto lhs = builder.ConstantR2FromArray2D<float>(Array2D<float>(0, 0));
- auto result = builder.Transpose(lhs, {1, 0});
+ auto lhs = ConstantR2FromArray2D<float>(&builder, Array2D<float>(0, 0));
+ Transpose(lhs, {1, 0});
ComputeAndCompareR2<float>(&builder, Array2D<float>(0, 0), {}, error_spec_);
}
XLA_TEST_F(TransposeTest, Transpose0x42) {
XlaBuilder builder("Transpose");
- auto lhs = builder.ConstantR2FromArray2D<float>(Array2D<float>(0, 42));
- auto result = builder.Transpose(lhs, {1, 0});
+ auto lhs = ConstantR2FromArray2D<float>(&builder, Array2D<float>(0, 42));
+ Transpose(lhs, {1, 0});
ComputeAndCompareR2<float>(&builder, Array2D<float>(42, 0), {}, error_spec_);
}
XLA_TEST_F(TransposeTest, Transpose7x0) {
XlaBuilder builder("Transpose");
- auto lhs = builder.ConstantR2FromArray2D<float>(Array2D<float>(7, 0));
- auto result = builder.Transpose(lhs, {1, 0});
+ auto lhs = ConstantR2FromArray2D<float>(&builder, Array2D<float>(7, 0));
+ Transpose(lhs, {1, 0});
ComputeAndCompareR2<float>(&builder, Array2D<float>(0, 7), {}, error_spec_);
}
TEST_F(TransposeTest, Transpose2x2) {
XlaBuilder builder("Transpose");
- auto lhs = builder.ConstantR2<float>({
- {1.0, 2.0}, {3.0, 4.0},
- });
- auto result = builder.Transpose(lhs, {1, 0});
+ auto lhs = ConstantR2<float>(&builder, {
+ {1.0, 2.0},
+ {3.0, 4.0},
+ });
+ Transpose(lhs, {1, 0});
Array2D<float> expected({{1.0f, 3.0f}, {2.0f, 4.0f}});
@@ -74,16 +75,18 @@ TEST_F(TransposeTest, Transpose2x2) {
XLA_TEST_F(TransposeTest, Transpose0x2x3_2x3x0) {
XlaBuilder builder("Transpose");
- auto operand = builder.ConstantR3FromArray3D<int32>(Array3D<int32>(0, 2, 3));
- auto result = builder.Transpose(operand, {1, 2, 0});
+ auto operand =
+ ConstantR3FromArray3D<int32>(&builder, Array3D<int32>(0, 2, 3));
+ Transpose(operand, {1, 2, 0});
ComputeAndCompareR3<int32>(&builder, Array3D<int32>(2, 3, 0), {});
}
TEST_F(TransposeTest, Transpose1x2x3_2x3x1) {
XlaBuilder builder("Transpose");
- auto operand = builder.ConstantR3FromArray3D<int32>({{{1, 2, 3}, {4, 5, 6}}});
- auto result = builder.Transpose(operand, {1, 2, 0});
+ auto operand =
+ ConstantR3FromArray3D<int32>(&builder, {{{1, 2, 3}, {4, 5, 6}}});
+ Transpose(operand, {1, 2, 0});
Array3D<int32> expected({{{1}, {2}, {3}}, {{4}, {5}, {6}}});
@@ -92,8 +95,9 @@ TEST_F(TransposeTest, Transpose1x2x3_2x3x1) {
TEST_F(TransposeTest, Transpose1x2x3_3x2x1) {
XlaBuilder builder("Transpose");
- auto operand = builder.ConstantR3FromArray3D<int32>({{{1, 2, 3}, {4, 5, 6}}});
- auto result = builder.Transpose(operand, {2, 1, 0});
+ auto operand =
+ ConstantR3FromArray3D<int32>(&builder, {{{1, 2, 3}, {4, 5, 6}}});
+ Transpose(operand, {2, 1, 0});
Array3D<int32> expected({{{1}, {4}}, {{2}, {5}}, {{3}, {6}}});
@@ -102,8 +106,9 @@ TEST_F(TransposeTest, Transpose1x2x3_3x2x1) {
TEST_F(TransposeTest, Transpose1x2x3_1x2x3) {
XlaBuilder builder("Transpose");
- auto operand = builder.ConstantR3FromArray3D<int32>({{{1, 2, 3}, {4, 5, 6}}});
- auto result = builder.Transpose(operand, {0, 1, 2});
+ auto operand =
+ ConstantR3FromArray3D<int32>(&builder, {{{1, 2, 3}, {4, 5, 6}}});
+ Transpose(operand, {0, 1, 2});
Array3D<int32> expected({{{1, 2, 3}, {4, 5, 6}}});
@@ -116,9 +121,9 @@ TEST_F(TransposeTest, MultiTranspose3x2) {
for (int transposes = 0; transposes <= 10; ++transposes) {
XlaBuilder builder("Transpose");
- auto computed = builder.ConstantR2FromArray2D<float>(input);
+ auto computed = ConstantR2FromArray2D<float>(&builder, input);
for (int i = 0; i < transposes; ++i) {
- computed = builder.Transpose(computed, {1, 0});
+ computed = Transpose(computed, {1, 0});
}
const Array2D<float>& expected = transposes % 2 == 0 ? input : transposed;
ComputeAndCompareR2<float>(&builder, expected, {}, error_spec_);
@@ -130,8 +135,8 @@ TEST_F(TransposeTest, Small_1x1) {
auto aoperand = MakeLinspaceArray2D(0.0, 1.0, 1, 1);
XlaBuilder builder("transpose_1x1");
- auto operand = builder.ConstantR2FromArray2D<float>(*aoperand);
- builder.Transpose(operand, {1, 0});
+ auto operand = ConstantR2FromArray2D<float>(&builder, *aoperand);
+ Transpose(operand, {1, 0});
auto expected = ReferenceUtil::TransposeArray2D(*aoperand);
ComputeAndCompareR2<float>(&builder, *expected, {}, ErrorSpec(1e-4));
@@ -142,8 +147,8 @@ TEST_F(TransposeTest, Small_2x2) {
auto aoperand = MakeLinspaceArray2D(0.0, 4.0, 2, 2);
XlaBuilder builder("transpose_2x2");
- auto operand = builder.ConstantR2FromArray2D<float>(*aoperand);
- builder.Transpose(operand, {1, 0});
+ auto operand = ConstantR2FromArray2D<float>(&builder, *aoperand);
+ Transpose(operand, {1, 0});
auto expected = ReferenceUtil::TransposeArray2D(*aoperand);
ComputeAndCompareR2<float>(&builder, *expected, {}, ErrorSpec(1e-4));
@@ -162,8 +167,8 @@ void TransposeTest::TestTransposeConstant021(size_t n1, size_t n2, size_t n3) {
}
XlaBuilder builder(TestName());
- auto operand = builder.ConstantR3FromArray3D(aoperand);
- builder.Transpose(operand, {0, 2, 1});
+ auto operand = ConstantR3FromArray3D(&builder, aoperand);
+ Transpose(operand, {0, 2, 1});
ComputeAndCompareR3<int32>(&builder, expected, {});
}
diff --git a/tensorflow/compiler/xla/tests/tuple_test.cc b/tensorflow/compiler/xla/tests/tuple_test.cc
index 220d9f6320..bf86c5dfb6 100644
--- a/tensorflow/compiler/xla/tests/tuple_test.cc
+++ b/tensorflow/compiler/xla/tests/tuple_test.cc
@@ -49,12 +49,12 @@ XLA_TEST_F(TupleTest, TupleConstant) {
{1.1f, 2.2f, 3.5f}, // row 0
{4.8f, 5.0f, 6.7f}, // row 1
};
- auto value =
- Literal::MakeTuple({Literal::CreateR0<float>(constant_scalar).get(),
- Literal::CreateR1<float>(constant_vector).get(),
- Literal::CreateR2<float>(constant_matrix).get()});
+ auto value = LiteralUtil::MakeTuple(
+ {LiteralUtil::CreateR0<float>(constant_scalar).get(),
+ LiteralUtil::CreateR1<float>(constant_vector).get(),
+ LiteralUtil::CreateR2<float>(constant_matrix).get()});
- builder.ConstantLiteral(*value);
+ ConstantLiteral(&builder, *value);
ComputeAndCompareTuple(&builder, *value, {}, error_spec_);
}
@@ -64,11 +64,11 @@ XLA_TEST_F(TupleTest, TupleScalarConstant) {
const float constant_scalar1 = 7.3f;
const float constant_scalar2 = 1.2f;
- auto value =
- Literal::MakeTuple({Literal::CreateR0<float>(constant_scalar1).get(),
- Literal::CreateR0<float>(constant_scalar2).get()});
+ auto value = LiteralUtil::MakeTuple(
+ {LiteralUtil::CreateR0<float>(constant_scalar1).get(),
+ LiteralUtil::CreateR0<float>(constant_scalar2).get()});
- builder.ConstantLiteral(*value);
+ ConstantLiteral(&builder, *value);
ComputeAndCompareTuple(&builder, *value, {}, error_spec_);
}
@@ -82,14 +82,14 @@ XLA_TEST_F(TupleTest, TupleCreate) {
{1.1f, 2.2f, 3.5f}, // row 0
{4.8f, 5.0f, 6.7f}, // row 1
};
- builder.Tuple({builder.ConstantR0<float>(constant_scalar),
- builder.ConstantR1<float>(constant_vector),
- builder.ConstantR2<float>(constant_matrix)});
-
- auto expected =
- Literal::MakeTuple({Literal::CreateR0<float>(constant_scalar).get(),
- Literal::CreateR1<float>(constant_vector).get(),
- Literal::CreateR2<float>(constant_matrix).get()});
+ Tuple(&builder, {ConstantR0<float>(&builder, constant_scalar),
+ ConstantR1<float>(&builder, constant_vector),
+ ConstantR2<float>(&builder, constant_matrix)});
+
+ auto expected = LiteralUtil::MakeTuple(
+ {LiteralUtil::CreateR0<float>(constant_scalar).get(),
+ LiteralUtil::CreateR1<float>(constant_vector).get(),
+ LiteralUtil::CreateR2<float>(constant_matrix).get()});
ComputeAndCompareTuple(&builder, *expected, {}, error_spec_);
}
@@ -97,19 +97,20 @@ XLA_TEST_F(TupleTest, TupleCreate) {
XLA_TEST_F(TupleTest, TupleCreateWithZeroElementEntry) {
XlaBuilder builder(TestName());
- builder.Tuple(
- {builder.ConstantR0<float>(7.0), builder.ConstantR1<float>({})});
+ Tuple(&builder,
+ {ConstantR0<float>(&builder, 7.0), ConstantR1<float>(&builder, {})});
- auto expected = Literal::MakeTuple({Literal::CreateR0<float>(7.0).get(),
- Literal::CreateR1<float>({}).get()});
+ auto expected =
+ LiteralUtil::MakeTuple({LiteralUtil::CreateR0<float>(7.0).get(),
+ LiteralUtil::CreateR1<float>({}).get()});
ComputeAndCompareTuple(&builder, *expected, {}, error_spec_);
}
// Tests the creation of an empty tuple.
XLA_TEST_F(TupleTest, EmptyTupleCreate) {
XlaBuilder builder(TestName());
- builder.Tuple({});
- auto expected = Literal::MakeTuple({});
+ Tuple(&builder, {});
+ auto expected = LiteralUtil::MakeTuple({});
ComputeAndCompareTuple(&builder, *expected, {}, error_spec_);
}
@@ -121,9 +122,10 @@ XLA_TEST_F(TupleTest, GetTupleElement) {
{1.f, 2.f, 3.f}, // row 0
{4.f, 5.f, 6.f}, // row 1
};
- auto tuple_data = builder.Tuple({builder.ConstantR1<float>(constant_vector),
- builder.ConstantR2<float>(constant_matrix)});
- builder.GetTupleElement(tuple_data, 1);
+ auto tuple_data =
+ Tuple(&builder, {ConstantR1<float>(&builder, constant_vector),
+ ConstantR2<float>(&builder, constant_matrix)});
+ GetTupleElement(tuple_data, 1);
ComputeAndCompareR2<float>(&builder, Array2D<float>(constant_matrix), {},
error_spec_);
}
@@ -131,17 +133,18 @@ XLA_TEST_F(TupleTest, GetTupleElement) {
// Trivial test for extracting a tuple element with GetTupleElement.
XLA_TEST_F(TupleTest, GetTupleElementWithZeroElements) {
XlaBuilder builder(TestName());
- auto tuple_data = builder.Tuple(
- {builder.ConstantR1<float>({}),
- builder.ConstantR2FromArray2D<float>(Array2D<float>(0, 101))});
- builder.GetTupleElement(tuple_data, 1);
+ auto tuple_data =
+ Tuple(&builder,
+ {ConstantR1<float>(&builder, {}),
+ ConstantR2FromArray2D<float>(&builder, Array2D<float>(0, 101))});
+ GetTupleElement(tuple_data, 1);
ComputeAndCompareR2<float>(&builder, Array2D<float>(0, 101), {}, error_spec_);
}
XLA_TEST_F(TupleTest, GetTupleElementOfNonTupleFailsGracefully) {
XlaBuilder builder(TestName());
- auto value = builder.ConstantR1<float>({4.5f});
- builder.GetTupleElement(value, 1);
+ auto value = ConstantR1<float>(&builder, {4.5f});
+ GetTupleElement(value, 1);
auto result_status = builder.Build();
EXPECT_FALSE(result_status.ok());
EXPECT_THAT(
@@ -158,14 +161,15 @@ XLA_TEST_F(TupleTest, AddTupleElements) {
{1.f, 2.f, 3.f}, // row 0
{4.f, 5.f, 6.f}, // row 1
};
- auto tuple_data = builder.Tuple({builder.ConstantR1<float>(constant_vector),
- builder.ConstantR2<float>(constant_matrix)});
- auto vector_element = builder.GetTupleElement(tuple_data, 0);
- auto matrix_element = builder.GetTupleElement(tuple_data, 1);
+ auto tuple_data =
+ Tuple(&builder, {ConstantR1<float>(&builder, constant_vector),
+ ConstantR2<float>(&builder, constant_matrix)});
+ auto vector_element = GetTupleElement(tuple_data, 0);
+ auto matrix_element = GetTupleElement(tuple_data, 1);
auto vector_shape = builder.GetShape(vector_element).ConsumeValueOrDie();
auto matrix_shape = builder.GetShape(matrix_element).ConsumeValueOrDie();
- builder.Add(matrix_element, vector_element,
- /*broadcast_dimensions=*/{1});
+ Add(matrix_element, vector_element,
+ /*broadcast_dimensions=*/{1});
Array2D<float> expected({
{2.f, 4.f, 6.f}, // row 0
@@ -185,13 +189,14 @@ XLA_TEST_F(TupleTest, TupleGTEToTuple) {
{1.f, 2.f, 3.f}, // row 0
{4.f, 5.f, 6.f}, // row 1
};
- auto tuple_data = builder.Tuple({builder.ConstantR1<float>(constant_vector),
- builder.ConstantR2<float>(constant_matrix)});
- builder.Tuple({builder.GetTupleElement(tuple_data, 1),
- builder.GetTupleElement(tuple_data, 0)});
- auto expected =
- Literal::MakeTuple({Literal::CreateR2<float>(constant_matrix).get(),
- Literal::CreateR1<float>(constant_vector).get()});
+ auto tuple_data =
+ Tuple(&builder, {ConstantR1<float>(&builder, constant_vector),
+ ConstantR2<float>(&builder, constant_matrix)});
+ Tuple(&builder,
+ {GetTupleElement(tuple_data, 1), GetTupleElement(tuple_data, 0)});
+ auto expected = LiteralUtil::MakeTuple(
+ {LiteralUtil::CreateR2<float>(constant_matrix).get(),
+ LiteralUtil::CreateR1<float>(constant_vector).get()});
ComputeAndCompareTuple(&builder, *expected, {}, error_spec_);
}
@@ -206,14 +211,14 @@ XLA_TEST_F(TupleTest, SelectBetweenPredTuples) {
std::unique_ptr<GlobalData> v2_data =
CreateR0Parameter<float>(1.0f, /*parameter_number=*/1, /*name=*/"v2",
/*builder=*/&b, /*data_handle=*/&v2);
- auto v1_gt = b.Gt(v1, v2); // false
- auto v2_gt = b.Gt(v2, v1); // true
- auto v1_v2 = b.Tuple({v1_gt, v2_gt}); // {false, true}
- auto v2_v1 = b.Tuple({v2_gt, v1_gt}); // {true, false}
- b.Select(direction ? v1_gt : v2_gt, v1_v2, v2_v1);
+ auto v1_gt = Gt(v1, v2); // false
+ auto v2_gt = Gt(v2, v1); // true
+ auto v1_v2 = Tuple(&b, {v1_gt, v2_gt}); // {false, true}
+ auto v2_v1 = Tuple(&b, {v2_gt, v1_gt}); // {true, false}
+ Select(direction ? v1_gt : v2_gt, v1_v2, v2_v1);
auto expected =
- Literal::MakeTuple({Literal::CreateR0<bool>(direction).get(),
- Literal::CreateR0<bool>(!direction).get()});
+ LiteralUtil::MakeTuple({LiteralUtil::CreateR0<bool>(direction).get(),
+ LiteralUtil::CreateR0<bool>(!direction).get()});
ComputeAndCompareTuple(&b, *expected, {v1_data.get(), v2_data.get()},
error_spec_);
@@ -243,22 +248,23 @@ XLA_TEST_F(TupleTest, TupleGTEToTupleToGTEAdd) {
{1.f, 2.f, 3.f}, // row 0
{4.f, 5.f, 6.f}, // row 1
};
- auto tuple_data = builder.Tuple({builder.ConstantR1<float>(constant_vector),
- builder.ConstantR2<float>(constant_matrix)});
- auto new_tuple01 = builder.Tuple({builder.GetTupleElement(tuple_data, 0),
- builder.GetTupleElement(tuple_data, 1)});
- auto new_tuple10 = builder.Tuple({builder.GetTupleElement(tuple_data, 1),
- builder.GetTupleElement(tuple_data, 0)});
- auto vector_from_01 = builder.GetTupleElement(new_tuple01, 0);
- auto vector_from_10 = builder.GetTupleElement(new_tuple10, 1);
- auto matrix_from_01 = builder.GetTupleElement(new_tuple01, 1);
- auto matrix_from_10 = builder.GetTupleElement(new_tuple10, 0);
-
- auto addvectors = builder.Add(vector_from_01, vector_from_10);
- auto addmatrices = builder.Add(matrix_from_01, matrix_from_10);
-
- builder.Add(addmatrices, addvectors,
- /*broadcast_dimensions=*/{1});
+ auto tuple_data =
+ Tuple(&builder, {ConstantR1<float>(&builder, constant_vector),
+ ConstantR2<float>(&builder, constant_matrix)});
+ auto new_tuple01 = Tuple(&builder, {GetTupleElement(tuple_data, 0),
+ GetTupleElement(tuple_data, 1)});
+ auto new_tuple10 = Tuple(&builder, {GetTupleElement(tuple_data, 1),
+ GetTupleElement(tuple_data, 0)});
+ auto vector_from_01 = GetTupleElement(new_tuple01, 0);
+ auto vector_from_10 = GetTupleElement(new_tuple10, 1);
+ auto matrix_from_01 = GetTupleElement(new_tuple01, 1);
+ auto matrix_from_10 = GetTupleElement(new_tuple10, 0);
+
+ auto addvectors = Add(vector_from_01, vector_from_10);
+ auto addmatrices = Add(matrix_from_01, matrix_from_10);
+
+ Add(addmatrices, addvectors,
+ /*broadcast_dimensions=*/{1});
Array2D<float> expected({
{4.f, 8.f, 12.f}, // row 0
@@ -273,14 +279,15 @@ XLA_TEST_F(TupleTest, SelectBetweenTuplesOnFalse) {
std::initializer_list<float> vec1 = {1.f, 2.f, 3.f};
std::initializer_list<float> vec2 = {2.f, 4.f, 6.f};
- auto tuple12 = builder.Tuple(
- {builder.ConstantR1<float>(vec1), builder.ConstantR1<float>(vec2)});
- auto tuple21 = builder.Tuple(
- {builder.ConstantR1<float>(vec2), builder.ConstantR1<float>(vec1)});
-
- builder.Select(builder.ConstantR0<bool>(false), tuple12, tuple21);
- auto expected = Literal::MakeTuple({Literal::CreateR1<float>(vec2).get(),
- Literal::CreateR1<float>(vec1).get()});
+ auto tuple12 = Tuple(&builder, {ConstantR1<float>(&builder, vec1),
+ ConstantR1<float>(&builder, vec2)});
+ auto tuple21 = Tuple(&builder, {ConstantR1<float>(&builder, vec2),
+ ConstantR1<float>(&builder, vec1)});
+
+ Select(ConstantR0<bool>(&builder, false), tuple12, tuple21);
+ auto expected =
+ LiteralUtil::MakeTuple({LiteralUtil::CreateR1<float>(vec2).get(),
+ LiteralUtil::CreateR1<float>(vec1).get()});
ComputeAndCompareTuple(&builder, *expected, {}, error_spec_);
}
@@ -292,22 +299,22 @@ XLA_TEST_F(TupleTest, TuplesInAMap) {
// Need to put a select in there to prevent HLO-level optimizations from
// optimizing out the tuples.
XlaBuilder b("sort_square");
- auto x = b.Parameter(0, ShapeUtil::MakeShape(F32, {}), "x");
- auto x2 = b.Mul(x, x);
- auto x_smaller_tuple = b.Tuple({x, x2});
- auto x2_smaller_tuple = b.Tuple({x2, x});
- auto sorted = b.Select(b.Lt(x, x2), x_smaller_tuple, x2_smaller_tuple);
- auto smaller = b.GetTupleElement(sorted, 0);
- auto greater = b.GetTupleElement(sorted, 1);
- b.Add(greater, b.Mul(b.ConstantR0<float>(100.0f), smaller));
+ auto x = Parameter(&b, 0, ShapeUtil::MakeShape(F32, {}), "x");
+ auto x2 = Mul(x, x);
+ auto x_smaller_tuple = Tuple(&b, {x, x2});
+ auto x2_smaller_tuple = Tuple(&b, {x2, x});
+ auto sorted = Select(Lt(x, x2), x_smaller_tuple, x2_smaller_tuple);
+ auto smaller = GetTupleElement(sorted, 0);
+ auto greater = GetTupleElement(sorted, 1);
+ Add(greater, Mul(ConstantR0<float>(&b, 100.0f), smaller));
auto computation_status = b.Build();
ASSERT_IS_OK(computation_status.status());
tuple_computation = computation_status.ConsumeValueOrDie();
}
XlaBuilder b(TestName());
- auto input = b.ConstantR1<float>({-1.0f, 1.0f, 2.1f});
- b.Map({input}, tuple_computation, {0});
+ auto input = ConstantR1<float>(&b, {-1.0f, 1.0f, 2.1f});
+ Map(&b, {input}, tuple_computation, {0});
ComputeAndCompareR1<float>(&b, {-99.0f, 101.0f, 214.41f}, {}, error_spec_);
}
@@ -317,14 +324,15 @@ XLA_TEST_F(TupleTest, SelectBetweenTuplesOnTrue) {
std::initializer_list<float> vec1 = {1.f, 2.f, 3.f};
std::initializer_list<float> vec2 = {2.f, 4.f, 6.f};
- auto tuple12 = builder.Tuple(
- {builder.ConstantR1<float>(vec1), builder.ConstantR1<float>(vec2)});
- auto tuple21 = builder.Tuple(
- {builder.ConstantR1<float>(vec2), builder.ConstantR1<float>(vec1)});
-
- builder.Select(builder.ConstantR0<bool>(true), tuple12, tuple21);
- auto expected = Literal::MakeTuple({Literal::CreateR1<float>(vec1).get(),
- Literal::CreateR1<float>(vec2).get()});
+ auto tuple12 = Tuple(&builder, {ConstantR1<float>(&builder, vec1),
+ ConstantR1<float>(&builder, vec2)});
+ auto tuple21 = Tuple(&builder, {ConstantR1<float>(&builder, vec2),
+ ConstantR1<float>(&builder, vec1)});
+
+ Select(ConstantR0<bool>(&builder, true), tuple12, tuple21);
+ auto expected =
+ LiteralUtil::MakeTuple({LiteralUtil::CreateR1<float>(vec1).get(),
+ LiteralUtil::CreateR1<float>(vec2).get()});
ComputeAndCompareTuple(&builder, *expected, {}, error_spec_);
}
@@ -335,14 +343,13 @@ XLA_TEST_F(TupleTest, SelectBetweenTuplesElementResult) {
std::initializer_list<float> vec1 = {1.f, 2.f, 3.f};
std::initializer_list<float> vec2 = {2.f, 4.f, 6.f};
- auto tuple12 = builder.Tuple(
- {builder.ConstantR1<float>(vec1), builder.ConstantR1<float>(vec2)});
- auto tuple21 = builder.Tuple(
- {builder.ConstantR1<float>(vec2), builder.ConstantR1<float>(vec1)});
+ auto tuple12 = Tuple(&builder, {ConstantR1<float>(&builder, vec1),
+ ConstantR1<float>(&builder, vec2)});
+ auto tuple21 = Tuple(&builder, {ConstantR1<float>(&builder, vec2),
+ ConstantR1<float>(&builder, vec1)});
- auto select =
- builder.Select(builder.ConstantR0<bool>(false), tuple12, tuple21);
- builder.GetTupleElement(select, 0);
+ auto select = Select(ConstantR0<bool>(&builder, false), tuple12, tuple21);
+ GetTupleElement(select, 0);
ComputeAndCompareR1<float>(&builder, vec2, {}, error_spec_);
}
@@ -371,19 +378,16 @@ XLA_TEST_F(TupleTest, SelectBetweenTuplesCascaded) {
std::initializer_list<float> vec1 = {1.f, 2.f, 3.f};
std::initializer_list<float> vec2 = {2.f, 4.f, 6.f};
- auto pred_tuple = builder.Tuple(
- {builder.ConstantR0<bool>(true), builder.ConstantR0<bool>(false)});
- auto tuple12 = builder.Tuple(
- {builder.ConstantR1<float>(vec1), builder.ConstantR1<float>(vec2)});
- auto tuple21 = builder.Tuple(
- {builder.ConstantR1<float>(vec2), builder.ConstantR1<float>(vec1)});
+ auto pred_tuple = Tuple(&builder, {ConstantR0<bool>(&builder, true),
+ ConstantR0<bool>(&builder, false)});
+ auto tuple12 = Tuple(&builder, {ConstantR1<float>(&builder, vec1),
+ ConstantR1<float>(&builder, vec2)});
+ auto tuple21 = Tuple(&builder, {ConstantR1<float>(&builder, vec2),
+ ConstantR1<float>(&builder, vec1)});
- auto select1 =
- builder.Select(builder.GetTupleElement(pred_tuple, 0), tuple12, tuple21);
- auto select2 =
- builder.Select(builder.GetTupleElement(pred_tuple, 1), tuple21, select1);
- builder.Add(builder.GetTupleElement(select2, 0),
- builder.GetTupleElement(select2, 1));
+ auto select1 = Select(GetTupleElement(pred_tuple, 0), tuple12, tuple21);
+ auto select2 = Select(GetTupleElement(pred_tuple, 1), tuple21, select1);
+ Add(GetTupleElement(select2, 0), GetTupleElement(select2, 1));
ComputeAndCompareR1<float>(&builder, {3.f, 6.f, 9.f}, {}, error_spec_);
}
@@ -395,31 +399,32 @@ XLA_TEST_F(TupleTest, SelectBetweenTuplesReuseConstants) {
std::initializer_list<float> vec1 = {1.f, 2.f, 3.f};
std::initializer_list<float> vec2 = {2.f, 4.f, 6.f};
- auto c1 = builder.ConstantR1<float>(vec1);
- auto c2 = builder.ConstantR1<float>(vec2);
- auto tuple12 = builder.Tuple({c1, c2});
- auto tuple21 = builder.Tuple({c2, c1});
+ auto c1 = ConstantR1<float>(&builder, vec1);
+ auto c2 = ConstantR1<float>(&builder, vec2);
+ auto tuple12 = Tuple(&builder, {c1, c2});
+ auto tuple21 = Tuple(&builder, {c2, c1});
- builder.Select(builder.ConstantR0<bool>(false), tuple12, tuple21);
+ Select(ConstantR0<bool>(&builder, false), tuple12, tuple21);
- auto expected = Literal::MakeTuple({Literal::CreateR1<float>(vec2).get(),
- Literal::CreateR1<float>(vec1).get()});
+ auto expected =
+ LiteralUtil::MakeTuple({LiteralUtil::CreateR1<float>(vec2).get(),
+ LiteralUtil::CreateR1<float>(vec1).get()});
ComputeAndCompareTuple(&builder, *expected, {}, error_spec_);
}
XLA_TEST_F(TupleTest, NestedTuples) {
XlaBuilder builder(TestName());
- auto inner_tuple = builder.Tuple(
- {builder.ConstantR1<float>({1.0, 2.0}), builder.ConstantR0<float>(42.0)});
- builder.Tuple({inner_tuple, builder.ConstantR1<float>({22.0, 44.0})});
+ auto inner_tuple = Tuple(&builder, {ConstantR1<float>(&builder, {1.0, 2.0}),
+ ConstantR0<float>(&builder, 42.0)});
+ Tuple(&builder, {inner_tuple, ConstantR1<float>(&builder, {22.0, 44.0})});
- auto expected_v1 = Literal::CreateR1<float>({1.0, 2.0});
- auto expected_s = Literal::CreateR0<float>(42.0);
+ auto expected_v1 = LiteralUtil::CreateR1<float>({1.0, 2.0});
+ auto expected_s = LiteralUtil::CreateR0<float>(42.0);
auto expected_inner_tuple =
- Literal::MakeTuple({expected_v1.get(), expected_s.get()});
- auto expected_v2 = Literal::CreateR1<float>({22.0, 44.0});
+ LiteralUtil::MakeTuple({expected_v1.get(), expected_s.get()});
+ auto expected_v2 = LiteralUtil::CreateR1<float>({22.0, 44.0});
auto expected =
- Literal::MakeTuple({expected_inner_tuple.get(), expected_v2.get()});
+ LiteralUtil::MakeTuple({expected_inner_tuple.get(), expected_v2.get()});
ComputeAndCompareTuple(&builder, *expected, {}, error_spec_);
}
@@ -432,21 +437,21 @@ XLA_TEST_F(TupleTest, GetTupleElementOfNestedTuple) {
Shape outer_tuple_shape =
ShapeUtil::MakeTupleShape({inner_tuple_shape, data_shape});
- auto input = builder.Parameter(0, outer_tuple_shape, "input");
- auto gte0 = builder.GetTupleElement(input, 0);
- auto gte1 = builder.GetTupleElement(gte0, 1);
- builder.Add(gte1, builder.ConstantR1<float>({10.0, 11.0, 12.0}));
+ auto input = Parameter(&builder, 0, outer_tuple_shape, "input");
+ auto gte0 = GetTupleElement(input, 0);
+ auto gte1 = GetTupleElement(gte0, 1);
+ Add(gte1, ConstantR1<float>(&builder, {10.0, 11.0, 12.0}));
std::unique_ptr<GlobalData> data =
client_
- ->TransferToServer(*Literal::MakeTuple({
- Literal::MakeTuple(
+ ->TransferToServer(*LiteralUtil::MakeTuple({
+ LiteralUtil::MakeTuple(
{
- Literal::CreateR1<float>({1.0, 2.0, 3.0}).get(),
- Literal::CreateR1<float>({4.0, 5.0, 6.0}).get(),
+ LiteralUtil::CreateR1<float>({1.0, 2.0, 3.0}).get(),
+ LiteralUtil::CreateR1<float>({4.0, 5.0, 6.0}).get(),
})
.get(),
- Literal::CreateR1<float>({7.0, 8.0, 9.0}).get(),
+ LiteralUtil::CreateR1<float>({7.0, 8.0, 9.0}).get(),
}))
.ConsumeValueOrDie();
@@ -463,25 +468,26 @@ XLA_TEST_F(TupleTest, ComplexTuples) {
Shape c64r2 = ShapeUtil::MakeShape(C64, {3, 2});
Shape arg0_shape = ShapeUtil::MakeTupleShape(
{c64r0, ShapeUtil::MakeTupleShape({c64r1, c64r2})});
- auto input0 = builder.Parameter(0, arg0_shape, "input0");
- auto t0 = builder.GetTupleElement(input0, 0);
- auto t1 = builder.GetTupleElement(input0, 1);
- auto t10 = builder.GetTupleElement(t1, 0);
- auto t11 = builder.GetTupleElement(t1, 1);
- auto sum = builder.Add(builder.Add(t10, t11, {1}), t0);
- auto input1 = builder.Parameter(1, c64r1, "input1");
- auto prod = builder.Mul(input1, sum, {1});
- builder.Tuple({builder.Tuple({prod, sum}),
- builder.ConstantR0<complex64>({123, 456})});
+ auto input0 = Parameter(&builder, 0, arg0_shape, "input0");
+ auto t0 = GetTupleElement(input0, 0);
+ auto t1 = GetTupleElement(input0, 1);
+ auto t10 = GetTupleElement(t1, 0);
+ auto t11 = GetTupleElement(t1, 1);
+ auto sum = Add(Add(t10, t11, {1}), t0);
+ auto input1 = Parameter(&builder, 1, c64r1, "input1");
+ auto prod = Mul(input1, sum, {1});
+ Tuple(&builder, {Tuple(&builder, {prod, sum}),
+ ConstantR0<complex64>(&builder, {123, 456})});
}
std::unique_ptr<GlobalData> arg0 =
client_
- ->TransferToServer(*Literal::MakeTuple(
- {Literal::CreateR0<complex64>({1, 2}).get(),
- Literal::MakeTuple(
- {Literal::CreateR1<complex64>({{10, 20}, {30, 40}}).get(),
- Literal::CreateR2<complex64>(
+ ->TransferToServer(*LiteralUtil::MakeTuple(
+ {LiteralUtil::CreateR0<complex64>({1, 2}).get(),
+ LiteralUtil::MakeTuple(
+ {LiteralUtil::CreateR1<complex64>({{10, 20}, {30, 40}})
+ .get(),
+ LiteralUtil::CreateR2<complex64>(
{{{100, 200}, {300, 400}},
{{1000, 2000}, {3000, 4000}},
{{10000, 20000}, {30000, 40000}}})
@@ -490,11 +496,13 @@ XLA_TEST_F(TupleTest, ComplexTuples) {
.ConsumeValueOrDie();
std::unique_ptr<GlobalData> arg1 =
client_
- ->TransferToServer(*Literal::CreateR1<complex64>({{1, 2}, {1, -2}}))
+ ->TransferToServer(
+ *LiteralUtil::CreateR1<complex64>({{1, 2}, {1, -2}}))
.ConsumeValueOrDie();
- auto sum = Literal::CreateR2<complex64>({{{111, 222}, {331, 442}},
- {{1011, 2022}, {3031, 4042}},
- {{10011, 20022}, {30031, 40042}}});
+ auto sum =
+ LiteralUtil::CreateR2<complex64>({{{111, 222}, {331, 442}},
+ {{1011, 2022}, {3031, 4042}},
+ {{10011, 20022}, {30031, 40042}}});
auto prod = MakeUnique<Literal>(sum->shape());
ASSERT_TRUE(prod->Populate<complex64>(
[&sum](tensorflow::gtl::ArraySlice<int64> indexes) {
@@ -504,9 +512,9 @@ XLA_TEST_F(TupleTest, ComplexTuples) {
: complex64(1, -2));
})
.ok());
- auto expected =
- Literal::MakeTuple({Literal::MakeTuple({prod.get(), sum.get()}).get(),
- Literal::CreateR0<complex64>({123, 456}).get()});
+ auto expected = LiteralUtil::MakeTuple(
+ {LiteralUtil::MakeTuple({prod.get(), sum.get()}).get(),
+ LiteralUtil::CreateR0<complex64>({123, 456}).get()});
ComputeAndCompareTuple(&builder, *expected, {arg0.get(), arg1.get()},
error_spec_);
}
@@ -529,10 +537,11 @@ XLA_TEST_F(TupleHloTest, DISABLED_ON_INTERPRETER(BitcastAfterGTE)) {
auto module =
HloRunner::CreateModuleFromString(testcase, GetDebugOptionsForTest())
.ValueOrDie();
- auto param = Literal::MakeTupleOwned(Literal::CreateR1<float>({1, 2, 3}));
+ auto param =
+ LiteralUtil::MakeTupleOwned(LiteralUtil::CreateR1<float>({1, 2, 3}));
auto result = ExecuteNoHloPasses(std::move(module), {param.get()});
EXPECT_TRUE(LiteralTestUtil::Equal(
- *Literal::MakeTupleOwned(Literal::CreateR2<float>({{1, 2, 3}})),
+ *LiteralUtil::MakeTupleOwned(LiteralUtil::CreateR2<float>({{1, 2, 3}})),
*result));
}
diff --git a/tensorflow/compiler/xla/tests/unary_op_test.cc b/tensorflow/compiler/xla/tests/unary_op_test.cc
index c3abe22797..a90a6fb0a5 100644
--- a/tensorflow/compiler/xla/tests/unary_op_test.cc
+++ b/tensorflow/compiler/xla/tests/unary_op_test.cc
@@ -38,8 +38,8 @@ class UnaryOpTest : public ClientLibraryTestBase {
template <typename T>
void AbsSize0TestHelper() {
XlaBuilder builder(TestName());
- auto arg = builder.ConstantR1<T>({});
- auto abs = builder.Abs(arg);
+ auto arg = ConstantR1<T>(&builder, {});
+ Abs(arg);
if (primitive_util::NativeToPrimitiveType<T>() == C64) {
ComputeAndCompareR1<float>(&builder, {}, {});
@@ -51,8 +51,8 @@ class UnaryOpTest : public ClientLibraryTestBase {
template <typename T>
void AbsTestHelper() {
XlaBuilder builder(TestName());
- auto arg = builder.ConstantR1<T>({-2, 25, 0, -123, inf<T>(), -inf<T>()});
- auto abs = builder.Abs(arg);
+ auto arg = ConstantR1<T>(&builder, {-2, 25, 0, -123, inf<T>(), -inf<T>()});
+ Abs(arg);
ComputeAndCompareR1<T>(&builder, {2, 25, 0, 123, inf<T>(), inf<T>()}, {});
}
@@ -60,9 +60,9 @@ class UnaryOpTest : public ClientLibraryTestBase {
template <typename T>
void SignTestHelper() {
XlaBuilder builder(TestName());
- auto arg = builder.ConstantR1<T>(
- {-2, 25, 0, static_cast<T>(-0.0), -123, inf<T>(), -inf<T>()});
- auto sign = builder.Sign(arg);
+ auto arg = ConstantR1<T>(
+ &builder, {-2, 25, 0, static_cast<T>(-0.0), -123, inf<T>(), -inf<T>()});
+ Sign(arg);
ComputeAndCompareR1<T>(&builder, {-1, 1, 0, 0, -1, 1, -1}, {});
}
@@ -70,10 +70,10 @@ class UnaryOpTest : public ClientLibraryTestBase {
template <typename T>
void SignAbsTestHelper() {
XlaBuilder builder(TestName());
- auto arg = builder.ConstantR1<T>({-2, 25, 0, -123});
- auto sign = builder.Sign(arg);
- auto abs = builder.Abs(arg);
- builder.Sub(builder.Mul(sign, abs), arg);
+ auto arg = ConstantR1<T>(&builder, {-2, 25, 0, -123});
+ auto sign = Sign(arg);
+ auto abs = Abs(arg);
+ Sub(Mul(sign, abs), arg);
ComputeAndCompareR1<T>(&builder, {0, 0, 0, 0}, {});
}
@@ -92,27 +92,28 @@ int64 UnaryOpTest::inf<int64>() {
template <>
void UnaryOpTest::AbsTestHelper<complex64>() {
XlaBuilder builder(TestName());
- auto arg = builder.ConstantR1<complex64>({{-2, 0},
- {0, 25},
- {0, 0},
- {-0.3f, 0.4f},
- {0, inf<float>()},
- {-inf<float>(), 0}});
- auto abs = builder.Abs(arg);
+ auto arg = ConstantR1<complex64>(&builder, {{-2, 0},
+ {0, 25},
+ {0, 0},
+ {-0.3f, 0.4f},
+ {0, inf<float>()},
+ {-inf<float>(), 0}});
+ Abs(arg);
std::unique_ptr<Literal> expected =
- Literal::CreateR1<float>({2, 25, 0, 0.5, inf<float>(), inf<float>()});
+ LiteralUtil::CreateR1<float>({2, 25, 0, 0.5, inf<float>(), inf<float>()});
ComputeAndCompareLiteral(&builder, *expected, {}, ErrorSpec(1e-6f));
}
template <>
void UnaryOpTest::SignTestHelper<complex64>() {
XlaBuilder builder(TestName());
- auto arg = builder.ConstantR1<complex64>(
+ auto arg = ConstantR1<complex64>(
+ &builder,
{{-2, 0}, {0, 25}, {0, 0}, {static_cast<float>(-0.0), 0}, {-1, 1}});
- auto sign = builder.Sign(arg);
+ Sign(arg);
- std::unique_ptr<Literal> expected = Literal::CreateR1<complex64>(
+ std::unique_ptr<Literal> expected = LiteralUtil::CreateR1<complex64>(
{{-1, 0}, {0, 1}, {0, 0}, {0, 0}, {-std::sqrt(0.5f), std::sqrt(0.5f)}});
ComputeAndCompareLiteral(&builder, *expected, {}, ErrorSpec(1e-6f));
}
@@ -121,13 +122,13 @@ template <>
void UnaryOpTest::SignAbsTestHelper<complex64>() {
XlaBuilder builder(TestName());
auto arg =
- builder.ConstantR1<complex64>({{-2, 0}, {0, 25}, {0, 0}, {-0.4, 0.3}});
- auto sign = builder.Sign(arg);
- auto abs = builder.Abs(arg);
- builder.Sub(builder.Mul(sign, builder.ConvertElementType(abs, C64)), arg);
+ ConstantR1<complex64>(&builder, {{-2, 0}, {0, 25}, {0, 0}, {-0.4, 0.3}});
+ auto sign = Sign(arg);
+ auto abs = Abs(arg);
+ Sub(Mul(sign, ConvertElementType(abs, C64)), arg);
std::unique_ptr<Literal> expected =
- Literal::CreateR1<complex64>({0, 0, 0, 0});
+ LiteralUtil::CreateR1<complex64>({0, 0, 0, 0});
ComputeAndCompareLiteral(&builder, *expected, {}, ErrorSpec(1e-6f));
}
@@ -145,37 +146,34 @@ XLA_TEST_F(UnaryOpTest, AbsTestR1) {
XLA_TEST_F(UnaryOpTest, AbsTestR0) {
XlaBuilder builder(TestName());
- auto argi = builder.ConstantR0<int>(-5);
- auto absi = builder.Abs(argi);
- auto argf = builder.ConstantR0<float>(-3.0f);
- auto absf = builder.Abs(argf);
- auto argf0 = builder.ConstantR0<float>(-0.0f);
- auto absf0 = builder.Abs(argf0);
- auto argc = builder.ConstantR0<complex64>({-0.3f, 0.4f});
- auto absc = builder.Abs(argc);
- builder.Add(builder.Add(absc, absf0),
- builder.Add(absf, builder.ConvertElementType(absi, F32)));
+ auto argi = ConstantR0<int>(&builder, -5);
+ auto absi = Abs(argi);
+ auto argf = ConstantR0<float>(&builder, -3.0f);
+ auto absf = Abs(argf);
+ auto argf0 = ConstantR0<float>(&builder, -0.0f);
+ auto absf0 = Abs(argf0);
+ auto argc = ConstantR0<complex64>(&builder, {-0.3f, 0.4f});
+ auto absc = Abs(argc);
+ Add(Add(absc, absf0), Add(absf, ConvertElementType(absi, F32)));
ComputeAndCompareR0<float>(&builder, 8.5f, {});
}
XLA_TEST_F(UnaryOpTest, SignTestR0) {
XlaBuilder builder(TestName());
- auto argi = builder.ConstantR0<int>(-5);
- auto sgni = builder.Sign(argi); // -1
- auto argf = builder.ConstantR0<float>(-4.0f);
- auto sgnf = builder.Sign(argf); // -1
- auto argf0 = builder.ConstantR0<float>(-0.0f);
- auto sgnf0 = builder.Sign(argf0); // 0
- auto argc = builder.ConstantR0<complex64>({-.3, .4});
- auto sgnc = builder.Sign(argc); // (-.6, .8)
- builder.Add(sgnc, builder.ConvertElementType(
- builder.Add(builder.Add(sgnf0, sgnf),
- builder.ConvertElementType(sgni, F32)),
- C64));
+ auto argi = ConstantR0<int>(&builder, -5);
+ auto sgni = Sign(argi); // -1
+ auto argf = ConstantR0<float>(&builder, -4.0f);
+ auto sgnf = Sign(argf); // -1
+ auto argf0 = ConstantR0<float>(&builder, -0.0f);
+ auto sgnf0 = Sign(argf0); // 0
+ auto argc = ConstantR0<complex64>(&builder, {-.3, .4});
+ auto sgnc = Sign(argc); // (-.6, .8)
+ Add(sgnc, ConvertElementType(
+ Add(Add(sgnf0, sgnf), ConvertElementType(sgni, F32)), C64));
std::unique_ptr<Literal> expected =
- Literal::CreateR0<complex64>({-2.6f, 0.8f});
+ LiteralUtil::CreateR0<complex64>({-2.6f, 0.8f});
ComputeAndCompareLiteral(&builder, *expected, {}, ErrorSpec(1e-6f));
}
@@ -194,9 +192,9 @@ XLA_TEST_F(UnaryOpTest, SignAbsTestR1) {
XLA_TEST_F(UnaryOpTest, UnsignedAbsTestR1) {
XlaBuilder builder(TestName());
- auto arg = builder.ConstantR1<unsigned int>(
- {2, 25, 0, 123, std::numeric_limits<unsigned int>::max()});
- auto abs = builder.Abs(arg);
+ auto arg = ConstantR1<unsigned int>(
+ &builder, {2, 25, 0, 123, std::numeric_limits<unsigned int>::max()});
+ Abs(arg);
ComputeAndCompareR1<unsigned int>(
&builder, {2, 25, 0, 123, std::numeric_limits<unsigned int>::max()}, {});
@@ -204,37 +202,37 @@ XLA_TEST_F(UnaryOpTest, UnsignedAbsTestR1) {
XLA_TEST_F(UnaryOpTest, UnsignedSignTestR1) {
XlaBuilder builder(TestName());
- auto arg = builder.ConstantR1<unsigned int>(
- {2, 25, 0, 123, std::numeric_limits<unsigned int>::max()});
- auto sign = builder.Sign(arg);
+ auto arg = ConstantR1<unsigned int>(
+ &builder, {2, 25, 0, 123, std::numeric_limits<unsigned int>::max()});
+ Sign(arg);
ComputeAndCompareR1<unsigned int>(&builder, {1, 1, 0, 1, 1}, {});
}
XLA_TEST_F(UnaryOpTest, SignAbsTestR2) {
XlaBuilder builder(TestName());
- auto arg = builder.ConstantR2<float>({{1.0, -2.0}, {-3.0, 4.0}});
- auto sign = builder.Sign(arg);
- auto abs = builder.Abs(arg);
- builder.Sub(builder.Mul(sign, abs), arg);
+ auto arg = ConstantR2<float>(&builder, {{1.0, -2.0}, {-3.0, 4.0}});
+ auto sign = Sign(arg);
+ auto abs = Abs(arg);
+ Sub(Mul(sign, abs), arg);
ComputeAndCompareR2<float>(&builder, {{0, 0}, {0, 0}}, {});
}
XLA_TEST_F(UnaryOpTest, ConvertElementTypePredToS32) {
XlaBuilder builder(TestName());
- auto lhs = builder.ConstantR1<int32>({0, 1});
- auto rhs = builder.ConstantR1<int32>({1, 1});
- builder.ConvertElementType(builder.Eq(lhs, rhs), S32);
+ auto lhs = ConstantR1<int32>(&builder, {0, 1});
+ auto rhs = ConstantR1<int32>(&builder, {1, 1});
+ ConvertElementType(Eq(lhs, rhs), S32);
ComputeAndCompareR1<int32>(&builder, {0, 1}, {});
}
XLA_TEST_F(UnaryOpTest, ConvertElementTypePredToF32) {
XlaBuilder builder(TestName());
- auto lhs = builder.ConstantR1<int32>({0, 1});
- auto rhs = builder.ConstantR1<int32>({1, 1});
- builder.ConvertElementType(builder.Eq(lhs, rhs), F32);
+ auto lhs = ConstantR1<int32>(&builder, {0, 1});
+ auto rhs = ConstantR1<int32>(&builder, {1, 1});
+ ConvertElementType(Eq(lhs, rhs), F32);
ComputeAndCompareR1<float>(&builder, {0.0, 1.0}, {});
}
diff --git a/tensorflow/compiler/xla/tests/vector_ops_reduce_test.cc b/tensorflow/compiler/xla/tests/vector_ops_reduce_test.cc
index 82d301983f..ea3aba6df1 100644
--- a/tensorflow/compiler/xla/tests/vector_ops_reduce_test.cc
+++ b/tensorflow/compiler/xla/tests/vector_ops_reduce_test.cc
@@ -46,7 +46,7 @@ class VecOpsReduceTest : public ClientLibraryTestBase {
{{1.0, 2.0, 3.0}, // } plane 2 in dim 0
{4.0, 5.0, 6.0}}});
// clang-format on
- return builder_.ConstantR3FromArray3D<float>(x3d);
+ return ConstantR3FromArray3D<float>(&builder_, x3d);
}
XlaBuilder builder_;
@@ -56,11 +56,10 @@ class VecOpsReduceTest : public ClientLibraryTestBase {
TEST_F(VecOpsReduceTest, AddReduceR1F32) {
auto sum_reducer = CreateScalarAddComputation(F32, &builder_);
- auto x = builder_.ConstantR1<float>(
- {2.1, -2.6, 2.6, -4.0, 2.1, 2.3, -5.0, -0.9, -2.4, 1.6});
- auto add_reduce =
- builder_.Reduce(x, builder_.ConstantR0<float>(0.0f), sum_reducer,
- /*dimensions_to_reduce=*/{0});
+ auto x = ConstantR1<float>(
+ &builder_, {2.1, -2.6, 2.6, -4.0, 2.1, 2.3, -5.0, -0.9, -2.4, 1.6});
+ Reduce(x, ConstantR0<float>(&builder_, 0.0f), sum_reducer,
+ /*dimensions_to_reduce=*/{0});
ComputeAndCompareR0<float>(&builder_, -4.2f, {}, errspec_);
}
@@ -71,10 +70,9 @@ TEST_F(VecOpsReduceTest, AddReduceBigR1F32) {
std::vector<float> input(3000);
std::iota(input.begin(), input.end(), 100.0f);
- auto x = builder_.ConstantR1<float>(input);
- auto add_reduce =
- builder_.Reduce(x, builder_.ConstantR0<float>(0.0f), sum_reducer,
- /*dimensions_to_reduce=*/{0});
+ auto x = ConstantR1<float>(&builder_, input);
+ Reduce(x, ConstantR0<float>(&builder_, 0.0f), sum_reducer,
+ /*dimensions_to_reduce=*/{0});
float expected = std::accumulate(input.begin(), input.end(), 0.0f);
ComputeAndCompareR0<float>(&builder_, expected, {}, errspec_);
@@ -83,11 +81,10 @@ TEST_F(VecOpsReduceTest, AddReduceBigR1F32) {
TEST_F(VecOpsReduceTest, MaxReduceR1F32) {
auto max_reducer = CreateScalarMax();
- auto x = builder_.ConstantR1<float>(
- {2.1, -2.6, 2.6, -4.0, 2.1, 2.3, -5.0, -0.9, -2.4, 1.6});
- auto max_reduce =
- builder_.Reduce(x, builder_.ConstantR0<float>(0.0f), max_reducer,
- /*dimensions_to_reduce=*/{0});
+ auto x = ConstantR1<float>(
+ &builder_, {2.1, -2.6, 2.6, -4.0, 2.1, 2.3, -5.0, -0.9, -2.4, 1.6});
+ Reduce(x, ConstantR0<float>(&builder_, 0.0f), max_reducer,
+ /*dimensions_to_reduce=*/{0});
ComputeAndCompareR0<float>(&builder_, 2.6f, {}, errspec_);
}
@@ -95,11 +92,10 @@ TEST_F(VecOpsReduceTest, MaxReduceR1F32) {
TEST_F(VecOpsReduceTest, MaxReduceR1F32WithNontrivialInit) {
auto max_reducer = CreateScalarMax();
- auto x = builder_.ConstantR1<float>(
- {2.1, -2.6, 2.6, -4.0, 2.1, 2.3, -5.0, -0.9, -2.4, 1.6});
- auto max_reduce =
- builder_.Reduce(x, builder_.ConstantR0<float>(4.0f), max_reducer,
- /*dimensions_to_reduce=*/{0});
+ auto x = ConstantR1<float>(
+ &builder_, {2.1, -2.6, 2.6, -4.0, 2.1, 2.3, -5.0, -0.9, -2.4, 1.6});
+ Reduce(x, ConstantR0<float>(&builder_, 4.0f), max_reducer,
+ /*dimensions_to_reduce=*/{0});
ComputeAndCompareR0<float>(&builder_, 4.0f, {}, errspec_);
}
@@ -108,15 +104,14 @@ TEST_F(VecOpsReduceTest, AddReduceR2F32Dim1) {
auto sum_reducer = CreateScalarAddComputation(F32, &builder_);
// clang-format off
- auto x = builder_.ConstantR2<float>({
+ auto x = ConstantR2<float>(&builder_, {
{1.0, 2.0, 3.0}, // | dim 0
{4.0, 5.0, 6.0}}); // |
// ------ dim 1 ----------
// clang-format on
- auto add_reduce =
- builder_.Reduce(x, builder_.ConstantR0<float>(0.0f), sum_reducer,
- /*dimensions_to_reduce=*/{1});
+ Reduce(x, ConstantR0<float>(&builder_, 0.0f), sum_reducer,
+ /*dimensions_to_reduce=*/{1});
ComputeAndCompareR1<float>(&builder_, {6.0, 15.0}, {}, errspec_);
}
@@ -125,13 +120,12 @@ TEST_F(VecOpsReduceTest, AddReduceR2F32Dim0) {
auto sum_reducer = CreateScalarAddComputation(F32, &builder_);
// clang-format off
- auto x = builder_.ConstantR2<float>({
+ auto x = ConstantR2<float>(&builder_, {
{1.0, 2.0, 3.0},
{4.0, 5.0, 6.0}});
// clang-format on
- auto add_reduce =
- builder_.Reduce(x, builder_.ConstantR0<float>(0.0f), sum_reducer,
- /*dimensions_to_reduce=*/{0});
+ Reduce(x, ConstantR0<float>(&builder_, 0.0f), sum_reducer,
+ /*dimensions_to_reduce=*/{0});
ComputeAndCompareR1<float>(&builder_, {5.0, 7.0, 9.0}, {}, errspec_);
}
@@ -139,9 +133,8 @@ TEST_F(VecOpsReduceTest, AddReduceR2F32Dim0) {
TEST_F(VecOpsReduceTest, AddReduceR3F32Dim2) {
auto sum_reducer = CreateScalarAddComputation(F32, &builder_);
auto x = BuildSampleConstantCube();
- auto add_reduce =
- builder_.Reduce(x, builder_.ConstantR0<float>(0.0f), sum_reducer,
- /*dimensions_to_reduce=*/{2});
+ Reduce(x, ConstantR0<float>(&builder_, 0.0f), sum_reducer,
+ /*dimensions_to_reduce=*/{2});
Array2D<float> expected_array({{6.0f, 15.0f}, {6.0f, 15.0f}, {6.0f, 15.0f}});
@@ -151,9 +144,8 @@ TEST_F(VecOpsReduceTest, AddReduceR3F32Dim2) {
TEST_F(VecOpsReduceTest, AddReduceR3F32Dim1) {
auto sum_reducer = CreateScalarAddComputation(F32, &builder_);
auto x = BuildSampleConstantCube();
- auto add_reduce =
- builder_.Reduce(x, builder_.ConstantR0<float>(0.0f), sum_reducer,
- /*dimensions_to_reduce=*/{1});
+ Reduce(x, ConstantR0<float>(&builder_, 0.0f), sum_reducer,
+ /*dimensions_to_reduce=*/{1});
Array2D<float> expected_array(
{{5.0f, 7.0f, 9.0f}, {5.0f, 7.0f, 9.0f}, {5.0f, 7.0f, 9.0f}});
@@ -164,9 +156,8 @@ TEST_F(VecOpsReduceTest, AddReduceR3F32Dim1) {
TEST_F(VecOpsReduceTest, AddReduceR3F32Dim0) {
auto sum_reducer = CreateScalarAddComputation(F32, &builder_);
auto x = BuildSampleConstantCube();
- auto add_reduce =
- builder_.Reduce(x, builder_.ConstantR0<float>(0.0f), sum_reducer,
- /*dimensions_to_reduce=*/{0});
+ Reduce(x, ConstantR0<float>(&builder_, 0.0f), sum_reducer,
+ /*dimensions_to_reduce=*/{0});
Array2D<float> expected_array({{3.0f, 6.0f, 9.0f}, {12.0f, 15.0f, 18.0f}});
@@ -176,9 +167,8 @@ TEST_F(VecOpsReduceTest, AddReduceR3F32Dim0) {
TEST_F(VecOpsReduceTest, AddReduceR3F32Dims1and2) {
auto sum_reducer = CreateScalarAddComputation(F32, &builder_);
auto x = BuildSampleConstantCube();
- auto add_reduce =
- builder_.Reduce(x, builder_.ConstantR0<float>(0.0f), sum_reducer,
- /*dimensions_to_reduce=*/{1, 2});
+ Reduce(x, ConstantR0<float>(&builder_, 0.0f), sum_reducer,
+ /*dimensions_to_reduce=*/{1, 2});
ComputeAndCompareR1<float>(&builder_, {21.0, 21.0, 21.0}, {}, errspec_);
}
@@ -186,9 +176,8 @@ TEST_F(VecOpsReduceTest, AddReduceR3F32Dims1and2) {
XLA_TEST_F(VecOpsReduceTest, AddReduceR3F32Dims0and2) {
auto sum_reducer = CreateScalarAddComputation(F32, &builder_);
auto x = BuildSampleConstantCube();
- auto add_reduce =
- builder_.Reduce(x, builder_.ConstantR0<float>(0.0f), sum_reducer,
- /*dimensions_to_reduce=*/{0, 2});
+ Reduce(x, ConstantR0<float>(&builder_, 0.0f), sum_reducer,
+ /*dimensions_to_reduce=*/{0, 2});
ComputeAndCompareR1<float>(&builder_, {18.0, 45.0}, {}, errspec_);
}
@@ -196,9 +185,8 @@ XLA_TEST_F(VecOpsReduceTest, AddReduceR3F32Dims0and2) {
TEST_F(VecOpsReduceTest, AddReduceR3F32Dims0and1) {
auto sum_reducer = CreateScalarAddComputation(F32, &builder_);
auto x = BuildSampleConstantCube();
- auto add_reduce =
- builder_.Reduce(x, builder_.ConstantR0<float>(0.0f), sum_reducer,
- /*dimensions_to_reduce=*/{0, 1});
+ Reduce(x, ConstantR0<float>(&builder_, 0.0f), sum_reducer,
+ /*dimensions_to_reduce=*/{0, 1});
ComputeAndCompareR1<float>(&builder_, {15.0, 21.0, 27.0}, {}, errspec_);
}
@@ -206,9 +194,8 @@ TEST_F(VecOpsReduceTest, AddReduceR3F32Dims0and1) {
TEST_F(VecOpsReduceTest, AddReduceR3F32AllDims) {
auto sum_reducer = CreateScalarAddComputation(F32, &builder_);
auto x = BuildSampleConstantCube();
- auto add_reduce =
- builder_.Reduce(x, builder_.ConstantR0<float>(0.0f), sum_reducer,
- /*dimensions_to_reduce=*/{0, 1, 2});
+ Reduce(x, ConstantR0<float>(&builder_, 0.0f), sum_reducer,
+ /*dimensions_to_reduce=*/{0, 1, 2});
ComputeAndCompareR0<float>(&builder_, 63.0, {}, errspec_);
}
diff --git a/tensorflow/compiler/xla/tests/vector_ops_simple_test.cc b/tensorflow/compiler/xla/tests/vector_ops_simple_test.cc
index 5cce7a2bf8..79bae22dac 100644
--- a/tensorflow/compiler/xla/tests/vector_ops_simple_test.cc
+++ b/tensorflow/compiler/xla/tests/vector_ops_simple_test.cc
@@ -50,9 +50,9 @@ class VecOpsSimpleTest : public ClientLibraryTestBase {
XLA_TEST_F(VecOpsSimpleTest, ExpTenValues) {
XlaBuilder builder(TestName());
- auto x = builder.ConstantR1<float>(
- {2.1, -2.6, 2.6, -4.0, 2.1, 2.3, -5.0, -0.9, -2.4, 1.6});
- auto exp = builder.Exp(x);
+ auto x = ConstantR1<float>(
+ &builder, {2.1, -2.6, 2.6, -4.0, 2.1, 2.3, -5.0, -0.9, -2.4, 1.6});
+ Exp(x);
std::vector<float> expected = {8.1662, 7.4274e-02, 13.4637, 1.8316e-02,
8.1662, 9.9742, 6.7379e-03, 4.0657e-01,
@@ -69,8 +69,8 @@ XLA_TEST_F(VecOpsSimpleTest, ExpManyValues) {
for (int i = 0; i < count; ++i) {
exponents.push_back(i / static_cast<float>(count));
}
- auto x = builder.ConstantR1<float>(exponents);
- auto exp = builder.Exp(x);
+ auto x = ConstantR1<float>(&builder, exponents);
+ Exp(x);
std::vector<float> expected;
expected.reserve(exponents.size());
@@ -98,8 +98,8 @@ XLA_TEST_F(VecOpsSimpleTest, ExpIn4D) {
Array4D<float> expected(2, 2, 2, 2, expected_vector);
- auto x = builder.ConstantR4FromArray4D<float>(exponents);
- auto exp = builder.Exp(x);
+ auto x = ConstantR4FromArray4D<float>(&builder, exponents);
+ Exp(x);
ComputeAndCompareR4<float>(&builder, expected, {},
ErrorSpec(/*aabs=*/1e-2, /*arel=*/1e-3));
@@ -107,9 +107,9 @@ XLA_TEST_F(VecOpsSimpleTest, ExpIn4D) {
XLA_TEST_F(VecOpsSimpleTest, NegateTenFloatValues) {
XlaBuilder builder(TestName());
- auto x = builder.ConstantR1<float>(
- {2.1, -2.6, 2.6, -4.0, 2.1, 2.3, -5.0, -0.9, -2.4, 1.6});
- builder.Neg(x);
+ auto x = ConstantR1<float>(
+ &builder, {2.1, -2.6, 2.6, -4.0, 2.1, 2.3, -5.0, -0.9, -2.4, 1.6});
+ Neg(x);
std::vector<float> expected = {-2.1, 2.6, -2.6, 4.0, -2.1,
-2.3, 5.0, 0.9, 2.4, -1.6};
@@ -118,8 +118,8 @@ XLA_TEST_F(VecOpsSimpleTest, NegateTenFloatValues) {
XLA_TEST_F(VecOpsSimpleTest, NegateTenInt32Values) {
XlaBuilder builder(TestName());
- auto x = builder.ConstantR1<int32>({2, -2, 12, -4, 5, 20, -15, 0, -2, 1});
- builder.Neg(x);
+ auto x = ConstantR1<int32>(&builder, {2, -2, 12, -4, 5, 20, -15, 0, -2, 1});
+ Neg(x);
std::vector<int> expected = {-2, 2, -12, 4, -5, -20, 15, 0, 2, -1};
ComputeAndCompareR1<int32>(&builder, expected, {});
@@ -127,59 +127,19 @@ XLA_TEST_F(VecOpsSimpleTest, NegateTenInt32Values) {
XLA_TEST_F(VecOpsSimpleTest, NegateUint32Values) {
XlaBuilder builder(TestName());
- auto x = builder.ConstantR1<uint32>(
- {0, 1, 42, static_cast<uint32>(-1), static_cast<uint32>(-12)});
- builder.Neg(x);
+ auto x = ConstantR1<uint32>(
+ &builder, {0, 1, 42, static_cast<uint32>(-1), static_cast<uint32>(-12)});
+ Neg(x);
std::vector<uint32> expected = {0, static_cast<uint32>(-1),
static_cast<uint32>(-42), 1, 12};
ComputeAndCompareR1<uint32>(&builder, expected, {});
}
-XLA_TEST_F(VecOpsSimpleTest, SquareTenValues) {
- XlaBuilder builder(TestName());
- auto x = builder.ConstantR1<float>(
- {2.1, -2.6, 2.6, -4.0, 2.1, 2.3, -5.0, -0.9, -2.4, 1.6});
- builder.SquareF32(x);
-
- std::vector<float> expected = {4.41, 6.76, 6.76, 16., 4.41,
- 5.29, 25., 0.81, 5.76, 2.56};
- ComputeAndCompareR1<float>(&builder, expected, {}, error_spec_);
-}
-
-XLA_TEST_F(VecOpsSimpleTest, ReciprocalTenValues) {
- XlaBuilder builder(TestName());
- auto x = builder.ConstantR1<float>(
- {2.1, -2.6, 2.6, -4.0, 2.1, 2.3, -5.0, -0.9, -2.4, 1.6});
- builder.ReciprocalF32(x);
-
- std::vector<float> expected = {
- 0.47619048, -0.38461538, 0.38461538, -0.25, 0.47619048,
- 0.43478261, -0.2, -1.11111111, -0.41666667, 0.625};
- ComputeAndCompareR1<float>(&builder, expected, {}, error_spec_);
-}
-
-XLA_TEST_F(VecOpsSimpleTest, SqrtZeroes) {
- XlaBuilder builder(TestName());
- auto x = builder.ConstantR1<float>({0.0, -0.0});
- auto exp = builder.SqrtF32(x);
-
- ComputeAndCompareR1<float>(&builder, {0, 0}, {}, error_spec_);
-}
-
-XLA_TEST_F(VecOpsSimpleTest, SqrtSixValues) {
- XlaBuilder builder(TestName());
- auto x = builder.ConstantR1<float>({16.0, 1.0, 1024.0, 0.16, 0.2, 12345});
- auto exp = builder.SqrtF32(x);
-
- std::vector<float> expected = {4, 1, 32, 0.4, 0.4472, 111.1080};
- ComputeAndCompareR1<float>(&builder, expected, {}, error_spec_);
-}
-
XLA_TEST_F(VecOpsSimpleTest, InvSqrtSevenValues) {
XlaBuilder builder(TestName());
- auto x =
- builder.ConstantR1<float>({16.0, 1.0, 1024.0, 0.16, 0.2, 12345, 1.2345});
- auto exp = builder.Pow(x, builder.ConstantR0<float>(-.5f));
+ auto x = ConstantR1<float>(&builder,
+ {16.0, 1.0, 1024.0, 0.16, 0.2, 12345, 1.2345});
+ Pow(x, ConstantR0<float>(&builder, -.5f));
std::vector<float> expected = {.25, 1, .03125, 2.5,
2.23607, .009000, .900025};
@@ -191,11 +151,11 @@ XLA_TEST_F(VecOpsSimpleTest, AddTenValuesViaMap) {
XlaBuilder builder(TestName());
auto add = CreateScalarAddComputation(F32, &builder);
- auto x = builder.ConstantR1<float>(
- {2.1, -2.6, 2.6, -4.0, 2.1, 2.3, -5.0, -0.9, -2.4, 1.6});
- auto y = builder.ConstantR1<float>(
- {-0.4, -0.6, -3.0, 0.2, 3.8, -2.2, -1.8, 4.9, 1.4, 0.6});
- auto max = builder.Map({x, y}, add, {0});
+ auto x = ConstantR1<float>(
+ &builder, {2.1, -2.6, 2.6, -4.0, 2.1, 2.3, -5.0, -0.9, -2.4, 1.6});
+ auto y = ConstantR1<float>(
+ &builder, {-0.4, -0.6, -3.0, 0.2, 3.8, -2.2, -1.8, 4.9, 1.4, 0.6});
+ Map(&builder, {x, y}, add, {0});
std::vector<float> expected = {1.7, -3.2, -0.4, -3.8, 5.9,
0.1, -6.8, 4., -1., 2.2};
@@ -204,11 +164,11 @@ XLA_TEST_F(VecOpsSimpleTest, AddTenValuesViaMap) {
XLA_TEST_F(VecOpsSimpleTest, MaxTenValues) {
XlaBuilder builder(TestName());
- auto x = builder.ConstantR1<float>(
- {2.1, -2.6, 2.6, -4.0, 2.1, 2.3, -5.0, -0.9, -2.4, 1.6});
- auto y = builder.ConstantR1<float>(
- {-0.4, -0.6, -3.0, 0.2, 3.8, -2.2, -1.8, 4.9, 1.4, 0.6});
- auto max = builder.Max(x, y);
+ auto x = ConstantR1<float>(
+ &builder, {2.1, -2.6, 2.6, -4.0, 2.1, 2.3, -5.0, -0.9, -2.4, 1.6});
+ auto y = ConstantR1<float>(
+ &builder, {-0.4, -0.6, -3.0, 0.2, 3.8, -2.2, -1.8, 4.9, 1.4, 0.6});
+ Max(x, y);
std::vector<float> expected = {2.1, -0.6, 2.6, 0.2, 3.8,
2.3, -1.8, 4.9, 1.4, 1.6};
@@ -227,7 +187,7 @@ XLA_TEST_F(VecOpsSimpleTest, MaxTenValuesFromParams) {
{21.0f, 22.0f, 23.0f, 24.0f}, /*parameter_number=*/1, /*name=*/"v2",
/*builder=*/&builder, /*data_handle=*/&v2);
- auto max = builder.Max(v1, v2);
+ Max(v1, v2);
ComputeAndCompareR1<float>(&builder, {41.0f, 22.0f, 23.0f, 84.0f},
{param0_data.get(), param1_data.get()},
error_spec_);
@@ -267,7 +227,7 @@ XLA_TEST_F(VecOpsSimpleTest, Max15000ValuesFromParams) {
CreateR1Parameter<float>(v2vec, /*parameter_number=*/1, /*name=*/"v2",
/*builder=*/&builder, /*data_handle=*/&v2);
- auto max = builder.Max(v1, v2);
+ Max(v1, v2);
ComputeAndCompareR1<float>(&builder, expected_vec,
{param0_data.get(), param1_data.get()},
error_spec_);
@@ -275,10 +235,10 @@ XLA_TEST_F(VecOpsSimpleTest, Max15000ValuesFromParams) {
XLA_TEST_F(VecOpsSimpleTest, MaxTenValuesWithScalar) {
XlaBuilder builder(TestName());
- auto x = builder.ConstantR1<float>(
- {2.1, -2.6, 2.6, -4.0, 2.1, 2.3, -5.0, -0.9, -2.4, 1.6});
- auto y = builder.ConstantR0<float>(0);
- auto max = builder.Max(x, y);
+ auto x = ConstantR1<float>(
+ &builder, {2.1, -2.6, 2.6, -4.0, 2.1, 2.3, -5.0, -0.9, -2.4, 1.6});
+ auto y = ConstantR0<float>(&builder, 0);
+ Max(x, y);
std::vector<float> expected = {2.1, 0.0, 2.6, 0.0, 2.1,
2.3, 0.0, 0.0, 0.0, 1.6};
@@ -287,11 +247,11 @@ XLA_TEST_F(VecOpsSimpleTest, MaxTenValuesWithScalar) {
XLA_TEST_F(VecOpsSimpleTest, MinTenValues) {
XlaBuilder builder(TestName());
- auto x = builder.ConstantR1<float>(
- {2.1, -2.6, 2.6, -4.0, 2.1, 2.3, -5.0, -0.9, -2.4, 1.6});
- auto y = builder.ConstantR1<float>(
- {-0.4, -0.6, -3.0, 0.2, 3.8, -2.2, -1.8, 4.9, 1.4, 0.6});
- auto min = builder.Min(x, y);
+ auto x = ConstantR1<float>(
+ &builder, {2.1, -2.6, 2.6, -4.0, 2.1, 2.3, -5.0, -0.9, -2.4, 1.6});
+ auto y = ConstantR1<float>(
+ &builder, {-0.4, -0.6, -3.0, 0.2, 3.8, -2.2, -1.8, 4.9, 1.4, 0.6});
+ Min(x, y);
std::vector<float> expected = {-0.4, -2.6, -3.0, -4.0, 2.1,
-2.2, -5.0, -0.9, -2.4, 0.6};
@@ -300,11 +260,11 @@ XLA_TEST_F(VecOpsSimpleTest, MinTenValues) {
XLA_TEST_F(VecOpsSimpleTest, MinMaxTenValues) {
XlaBuilder builder(TestName());
- auto zero = builder.ConstantR0<float>(0);
- auto one = builder.ConstantR0<float>(1);
- auto x = builder.ConstantR1<float>(
- {2.1, -2.6, 2.6, 0.3, 3.1, 0.9, -5.0, 0.1, -2.4, 0.6});
- auto clamp = builder.Min(builder.Max(x, zero), one);
+ auto zero = ConstantR0<float>(&builder, 0);
+ auto one = ConstantR0<float>(&builder, 1);
+ auto x = ConstantR1<float>(
+ &builder, {2.1, -2.6, 2.6, 0.3, 3.1, 0.9, -5.0, 0.1, -2.4, 0.6});
+ Min(Max(x, zero), one);
std::vector<float> expected = {1.0, 0.0, 1.0, 0.3, 1.0,
0.9, 0.0, 0.1, 0.0, 0.6};
@@ -313,11 +273,11 @@ XLA_TEST_F(VecOpsSimpleTest, MinMaxTenValues) {
XLA_TEST_F(VecOpsSimpleTest, ClampTenValuesConstant) {
XlaBuilder builder(TestName());
- auto zero = builder.ConstantR0<float>(0);
- auto one = builder.ConstantR0<float>(1);
- auto x = builder.ConstantR1<float>(
- {2.1, -2.6, 2.6, 0.3, 3.1, 0.9, -5.0, 0.1, -2.4, 0.6});
- auto clamp = builder.Clamp(zero, x, one);
+ auto zero = ConstantR0<float>(&builder, 0);
+ auto one = ConstantR0<float>(&builder, 1);
+ auto x = ConstantR1<float>(
+ &builder, {2.1, -2.6, 2.6, 0.3, 3.1, 0.9, -5.0, 0.1, -2.4, 0.6});
+ Clamp(zero, x, one);
std::vector<float> expected = {1.0, 0.0, 1.0, 0.3, 1.0,
0.9, 0.0, 0.1, 0.0, 0.6};
@@ -326,10 +286,10 @@ XLA_TEST_F(VecOpsSimpleTest, ClampTenValuesConstant) {
XLA_TEST_F(VecOpsSimpleTest, ClampTwoValuesConstant) {
XlaBuilder builder(TestName());
- auto zero = builder.ConstantR1<float>({0.0f, 0.0f});
- auto one = builder.ConstantR1<float>({1.0f, 1.0f});
- auto x = builder.ConstantR1<float>({2.1, -2.6});
- auto clamp = builder.Clamp(zero, x, one);
+ auto zero = ConstantR1<float>(&builder, {0.0f, 0.0f});
+ auto one = ConstantR1<float>(&builder, {1.0f, 1.0f});
+ auto x = ConstantR1<float>(&builder, {2.1, -2.6});
+ Clamp(zero, x, one);
std::vector<float> expected = {1.0, 0.0};
ComputeAndCompareR1<float>(&builder, expected, {});
@@ -337,11 +297,11 @@ XLA_TEST_F(VecOpsSimpleTest, ClampTwoValuesConstant) {
XLA_TEST_F(VecOpsSimpleTest, ClampTenValuesConstantNonzeroLower) {
XlaBuilder builder(TestName());
- auto one = builder.ConstantR0<float>(1);
- auto two = builder.ConstantR0<float>(2);
- auto x = builder.ConstantR1<float>(
- {2.1, -2.6, 2.6, 0.3, 3.1, 0.9, -5.0, 0.1, -2.4, 0.6});
- auto clamp = builder.Clamp(one, x, two);
+ auto one = ConstantR0<float>(&builder, 1);
+ auto two = ConstantR0<float>(&builder, 2);
+ auto x = ConstantR1<float>(
+ &builder, {2.1, -2.6, 2.6, 0.3, 3.1, 0.9, -5.0, 0.1, -2.4, 0.6});
+ Clamp(one, x, two);
std::vector<float> expected = {2.0, 1.0, 2.0, 1.0, 2.0,
1.0, 1.0, 1.0, 1.0, 1.0};
@@ -350,10 +310,10 @@ XLA_TEST_F(VecOpsSimpleTest, ClampTenValuesConstantNonzeroLower) {
XLA_TEST_F(VecOpsSimpleTest, ClampValuesConstantS64) {
XlaBuilder builder(TestName());
- auto zero = builder.ConstantR0<int64>(0);
- auto one = builder.ConstantR0<int64>(10);
- auto x = builder.ConstantR1<int64>({-3, 3, 9, 13});
- auto clamp = builder.Clamp(zero, x, one);
+ auto zero = ConstantR0<int64>(&builder, 0);
+ auto one = ConstantR0<int64>(&builder, 10);
+ auto x = ConstantR1<int64>(&builder, {-3, 3, 9, 13});
+ Clamp(zero, x, one);
std::vector<int64> expected = {0, 3, 9, 10};
ComputeAndCompareR1<int64>(&builder, expected, {});
@@ -365,9 +325,9 @@ XLA_TEST_F(VecOpsSimpleTest, MapTenValues) {
// add_half(x) = x + 0.5
XlaBuilder builder("add_half");
auto x_value =
- builder.Parameter(0, ShapeUtil::MakeShape(F32, {}), "x_value");
- auto half = builder.ConstantR0<float>(0.5);
- builder.Add(x_value, half);
+ Parameter(&builder, 0, ShapeUtil::MakeShape(F32, {}), "x_value");
+ auto half = ConstantR0<float>(&builder, 0.5);
+ Add(x_value, half);
auto computation_status = builder.Build();
ASSERT_IS_OK(computation_status.status());
add_half = computation_status.ConsumeValueOrDie();
@@ -378,9 +338,9 @@ XLA_TEST_F(VecOpsSimpleTest, MapTenValues) {
// clamp(y) = clamp<0,5>(y)
XlaBuilder builder("clamp");
auto y_value =
- builder.Parameter(0, ShapeUtil::MakeShape(F32, {}), "y_value");
- auto zero = builder.ConstantR0<float>(0.0);
- auto clamped = builder.Clamp(zero, y_value, builder.ConstantR0<float>(5));
+ Parameter(&builder, 0, ShapeUtil::MakeShape(F32, {}), "y_value");
+ auto zero = ConstantR0<float>(&builder, 0.0);
+ Clamp(zero, y_value, ConstantR0<float>(&builder, 5));
auto computation_status = builder.Build();
ASSERT_IS_OK(computation_status.status());
clamp = computation_status.ConsumeValueOrDie();
@@ -391,13 +351,13 @@ XLA_TEST_F(VecOpsSimpleTest, MapTenValues) {
// mult_relu_add(z) = clamp(add_half(2 * max(z, 0)))
XlaBuilder builder("mult_relu_add");
auto z_value =
- builder.Parameter(0, ShapeUtil::MakeShape(F32, {}), "z_value");
- auto zero = builder.ConstantR0<float>(0.0);
- auto two = builder.ConstantR0<float>(2.0);
- auto max = builder.Max(z_value, zero);
- auto mult = builder.Mul(two, max);
- auto inner = builder.Map({mult}, add_half, {});
- builder.Map({inner}, clamp, {});
+ Parameter(&builder, 0, ShapeUtil::MakeShape(F32, {}), "z_value");
+ auto zero = ConstantR0<float>(&builder, 0.0);
+ auto two = ConstantR0<float>(&builder, 2.0);
+ auto max = Max(z_value, zero);
+ auto mult = Mul(two, max);
+ auto inner = Map(&builder, {mult}, add_half, {});
+ Map(&builder, {inner}, clamp, {});
auto computation_status = builder.Build();
ASSERT_IS_OK(computation_status.status());
mult_relu_add = computation_status.ConsumeValueOrDie();
@@ -405,9 +365,9 @@ XLA_TEST_F(VecOpsSimpleTest, MapTenValues) {
XlaBuilder builder("map10");
{
- auto x = builder.ConstantR1<float>(
- {2.1, -21.6, 2.6, -4.0, 2.1, 2.3, -5.0, -0.9, -2.4, 1.6});
- auto activations = builder.Map({x}, mult_relu_add, {0});
+ auto x = ConstantR1<float>(
+ &builder, {2.1, -21.6, 2.6, -4.0, 2.1, 2.3, -5.0, -0.9, -2.4, 1.6});
+ Map(&builder, {x}, mult_relu_add, {0});
}
std::vector<float> expected = {4.7, 0.5, 5.0, 0.5, 4.7,
@@ -417,9 +377,9 @@ XLA_TEST_F(VecOpsSimpleTest, MapTenValues) {
XLA_TEST_F(VecOpsSimpleTest, RemainderTenValuesS32) {
XlaBuilder builder(TestName());
- auto x = builder.ConstantR1<int32>({-5, -4, -3, -2, -1, 0, 1, 2, 3, 4});
- auto y = builder.ConstantR0<int32>(3);
- builder.Rem(x, y);
+ auto x = ConstantR1<int32>(&builder, {-5, -4, -3, -2, -1, 0, 1, 2, 3, 4});
+ auto y = ConstantR0<int32>(&builder, 3);
+ Rem(x, y);
std::vector<int32> expected = {-2, -1, 0, -2, -1, 0, 1, 2, 0, 1};
ComputeAndCompareR1<int32>(&builder, expected, {});
@@ -427,9 +387,9 @@ XLA_TEST_F(VecOpsSimpleTest, RemainderTenValuesS32) {
XLA_TEST_F(VecOpsSimpleTest, VectorPredicateEqual) {
XlaBuilder builder(TestName());
- auto x = builder.ConstantR1<bool>({false, true});
- auto y = builder.ConstantR1<bool>({true, false});
- builder.Eq(x, y);
+ auto x = ConstantR1<bool>(&builder, {false, true});
+ auto y = ConstantR1<bool>(&builder, {true, false});
+ Eq(x, y);
std::array<bool, 2> expected = {{false, false}};
ComputeAndCompareR1<bool>(&builder, expected, {});
@@ -437,9 +397,9 @@ XLA_TEST_F(VecOpsSimpleTest, VectorPredicateEqual) {
XLA_TEST_F(VecOpsSimpleTest, VectorPredicateNotEqual) {
XlaBuilder builder(TestName());
- auto x = builder.ConstantR1<bool>({false, true});
- auto y = builder.ConstantR1<bool>({true, false});
- builder.Ne(x, y);
+ auto x = ConstantR1<bool>(&builder, {false, true});
+ auto y = ConstantR1<bool>(&builder, {true, false});
+ Ne(x, y);
std::array<bool, 2> expected = {{true, true}};
ComputeAndCompareR1<bool>(&builder, expected, {});
diff --git a/tensorflow/compiler/xla/tests/while_test.cc b/tensorflow/compiler/xla/tests/while_test.cc
index c463f3eac5..29befef92e 100644
--- a/tensorflow/compiler/xla/tests/while_test.cc
+++ b/tensorflow/compiler/xla/tests/while_test.cc
@@ -22,7 +22,7 @@ limitations under the License.
#include "tensorflow/compiler/xla/client/local_client.h"
#include "tensorflow/compiler/xla/client/xla_client/xla_builder.h"
#include "tensorflow/compiler/xla/client/xla_client/xla_computation.h"
-#include "tensorflow/compiler/xla/literal_util.h"
+#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/compiler/xla/service/platform_util.h"
#include "tensorflow/compiler/xla/shape_util.h"
#include "tensorflow/compiler/xla/status_macros.h"
@@ -55,8 +55,8 @@ TEST_F(WhileTest, WhileWithScalarS32Result) {
XlaComputation condition;
{
XlaBuilder builder("condition");
- auto prev = builder.Parameter(0, result_shape, "prev");
- builder.Gt(builder.ConstantR0<int32>(5), prev);
+ auto prev = Parameter(&builder, 0, result_shape, "prev");
+ Gt(ConstantR0<int32>(&builder, 5), prev);
condition = builder.Build().ConsumeValueOrDie();
}
@@ -64,16 +64,16 @@ TEST_F(WhileTest, WhileWithScalarS32Result) {
XlaComputation body;
{
XlaBuilder builder("body");
- auto prev = builder.Parameter(0, result_shape, "prev");
- auto input = builder.ConstantR0<int32>(1);
- builder.Add(input, prev);
+ auto prev = Parameter(&builder, 0, result_shape, "prev");
+ auto input = ConstantR0<int32>(&builder, 1);
+ Add(input, prev);
body = builder.Build().ConsumeValueOrDie();
}
// Create a While node with computations for the condition and the body.
XlaBuilder builder(TestName());
- auto init = builder.ConstantR0<int32>(0);
- builder.While(condition, body, init);
+ auto init = ConstantR0<int32>(&builder, 0);
+ While(condition, body, init);
ComputeAndCompareR0<int32>(&builder, 5, {});
}
@@ -91,8 +91,8 @@ TEST_F(WhileTest, WhileWithScalarS64Result) {
XlaComputation condition;
{
XlaBuilder builder("condition");
- auto prev = builder.Parameter(0, result_shape, "prev");
- builder.Gt(builder.ConstantR0<int64>(5), prev);
+ auto prev = Parameter(&builder, 0, result_shape, "prev");
+ Gt(ConstantR0<int64>(&builder, 5), prev);
condition = builder.Build().ConsumeValueOrDie();
}
@@ -100,16 +100,16 @@ TEST_F(WhileTest, WhileWithScalarS64Result) {
XlaComputation body;
{
XlaBuilder builder("body");
- auto prev = builder.Parameter(0, result_shape, "prev");
- auto input = builder.ConstantR0<int64>(1);
- builder.Add(input, prev);
+ auto prev = Parameter(&builder, 0, result_shape, "prev");
+ auto input = ConstantR0<int64>(&builder, 1);
+ Add(input, prev);
body = builder.Build().ConsumeValueOrDie();
}
// Create a While node with computations for the condition and the body.
XlaBuilder builder(TestName());
- auto init = builder.ConstantR0<int64>(0);
- builder.While(condition, body, init);
+ auto init = ConstantR0<int64>(&builder, 0);
+ While(condition, body, init);
ComputeAndCompareR0<int64>(&builder, 5, {});
}
@@ -122,8 +122,8 @@ TEST_F(WhileTest, WhileWithScalarResultNonConstInit) {
XlaComputation condition;
{
XlaBuilder builder("condition");
- auto prev = builder.Parameter(0, result_shape, "prev");
- builder.Gt(builder.ConstantR0<int32>(5), prev);
+ auto prev = Parameter(&builder, 0, result_shape, "prev");
+ Gt(ConstantR0<int32>(&builder, 5), prev);
condition = builder.Build().ConsumeValueOrDie();
}
@@ -131,18 +131,18 @@ TEST_F(WhileTest, WhileWithScalarResultNonConstInit) {
XlaComputation body;
{
XlaBuilder builder("body");
- auto prev = builder.Parameter(0, result_shape, "prev");
- auto input = builder.ConstantR0<int32>(1);
- builder.Add(input, prev);
+ auto prev = Parameter(&builder, 0, result_shape, "prev");
+ auto input = ConstantR0<int32>(&builder, 1);
+ Add(input, prev);
body = builder.Build().ConsumeValueOrDie();
}
// Create a While node with computations for the condition and the body.
XlaBuilder builder(TestName());
- auto init = builder.Reduce(builder.ConstantR1<int32>(2, 1),
- builder.ConstantR0<int32>(0),
- CreateScalarAddComputation(S32, &builder), {0});
- builder.While(condition, body, init);
+ auto init =
+ Reduce(ConstantR1<int32>(&builder, 2, 1), ConstantR0<int32>(&builder, 0),
+ CreateScalarAddComputation(S32, &builder), {0});
+ While(condition, body, init);
ComputeAndCompareR0<int32>(&builder, 5, {});
}
@@ -154,8 +154,8 @@ TEST_F(WhileTest, WhileWithPredicateResult) {
XlaComputation condition;
{
XlaBuilder builder("condition");
- auto prev = builder.Parameter(0, result_shape, "prev");
- builder.Ne(builder.ConstantR0<bool>(true), prev);
+ auto prev = Parameter(&builder, 0, result_shape, "prev");
+ Ne(ConstantR0<bool>(&builder, true), prev);
condition = builder.Build().ConsumeValueOrDie();
}
@@ -163,16 +163,16 @@ TEST_F(WhileTest, WhileWithPredicateResult) {
XlaComputation body;
{
XlaBuilder builder("body");
- auto prev = builder.Parameter(0, result_shape, "prev");
- builder.Or(prev, builder.ConstantR0<bool>(true));
+ auto prev = Parameter(&builder, 0, result_shape, "prev");
+ Or(prev, ConstantR0<bool>(&builder, true));
body = builder.Build().ConsumeValueOrDie();
}
// Create a While node with computations for the condition and the body.
XlaBuilder builder(TestName());
- auto init = builder.Ne(builder.ConstantR0<bool>(false),
- builder.ConstantR0<bool>(true));
- builder.While(condition, body, init);
+ auto init =
+ Ne(ConstantR0<bool>(&builder, false), ConstantR0<bool>(&builder, true));
+ While(condition, body, init);
ComputeAndCompareR0<bool>(&builder, true, {});
}
@@ -184,17 +184,16 @@ TEST_F(WhileTest, WhileWithPredicateResult) {
// while (result.sum() < 15.5f) {
// result = result + vector<float>(0);
// }
-// TODO(b/29185393): does not terminate on CPU.
-TEST_F(WhileTest, DISABLED_WhileWithEmptyVectorResult) {
+TEST_F(WhileTest, DISABLED_ON_INTERPRETER(WhileWithEmptyVectorResult)) {
Shape result_shape = ShapeUtil::MakeShape(F32, {0});
// Create a computation for the reduction.
XlaComputation add;
{
XlaBuilder builder("add");
- auto x = builder.Parameter(0, ShapeUtil::MakeShape(F32, {}), "x");
- auto y = builder.Parameter(1, ShapeUtil::MakeShape(F32, {}), "y");
- builder.Add(x, y);
+ auto x = Parameter(&builder, 0, ShapeUtil::MakeShape(F32, {}), "x");
+ auto y = Parameter(&builder, 1, ShapeUtil::MakeShape(F32, {}), "y");
+ Add(x, y);
add = builder.Build().ConsumeValueOrDie();
}
@@ -203,10 +202,10 @@ TEST_F(WhileTest, DISABLED_WhileWithEmptyVectorResult) {
XlaComputation condition;
{
XlaBuilder builder("condition");
- auto prev = builder.Parameter(0, result_shape, "prev");
- auto sum = builder.Reduce(prev, builder.ConstantR0<float>(0.0f), add,
- /*dimensions_to_reduce=*/{0});
- builder.Gt(builder.ConstantR0<float>(15.5f), sum);
+ auto prev = Parameter(&builder, 0, result_shape, "prev");
+ auto sum = Reduce(prev, ConstantR0<float>(&builder, 0.0f), add,
+ /*dimensions_to_reduce=*/{0});
+ Gt(ConstantR0<float>(&builder, 15.5f), sum);
condition = builder.Build().ConsumeValueOrDie();
}
@@ -215,16 +214,16 @@ TEST_F(WhileTest, DISABLED_WhileWithEmptyVectorResult) {
XlaComputation body;
{
XlaBuilder builder("body");
- auto prev = builder.Parameter(0, result_shape, "prev");
- auto input = builder.ConstantR1<float>({});
- builder.Add(input, prev);
+ auto prev = Parameter(&builder, 0, result_shape, "prev");
+ auto input = ConstantR1<float>(&builder, {});
+ Add(input, prev);
body = builder.Build().ConsumeValueOrDie();
}
// Create a While node with computations for the condition and the body.
XlaBuilder builder("while");
- auto init = builder.ConstantR1<float>({});
- auto result = builder.While(condition, body, init);
+ auto init = ConstantR1<float>(&builder, {});
+ auto result = While(condition, body, init);
VLOG(2) << "while = "
<< ShapeUtil::HumanString(
builder.GetShape(result).ConsumeValueOrDie());
@@ -246,9 +245,9 @@ TEST_F(WhileTest, WhileWithVectorResult) {
XlaComputation add;
{
XlaBuilder builder("add");
- auto x = builder.Parameter(0, ShapeUtil::MakeShape(F32, {}), "x");
- auto y = builder.Parameter(1, ShapeUtil::MakeShape(F32, {}), "y");
- builder.Add(x, y);
+ auto x = Parameter(&builder, 0, ShapeUtil::MakeShape(F32, {}), "x");
+ auto y = Parameter(&builder, 1, ShapeUtil::MakeShape(F32, {}), "y");
+ Add(x, y);
add = builder.Build().ConsumeValueOrDie();
}
@@ -257,10 +256,10 @@ TEST_F(WhileTest, WhileWithVectorResult) {
XlaComputation condition;
{
XlaBuilder builder("condition");
- auto prev = builder.Parameter(0, result_shape, "prev");
- auto sum = builder.Reduce(prev, builder.ConstantR0<float>(0.0f), add,
- /*dimensions_to_reduce=*/{0});
- builder.Gt(builder.ConstantR0<float>(15.5f), sum);
+ auto prev = Parameter(&builder, 0, result_shape, "prev");
+ auto sum = Reduce(prev, ConstantR0<float>(&builder, 0.0f), add,
+ /*dimensions_to_reduce=*/{0});
+ Gt(ConstantR0<float>(&builder, 15.5f), sum);
condition = builder.Build().ConsumeValueOrDie();
}
@@ -269,16 +268,16 @@ TEST_F(WhileTest, WhileWithVectorResult) {
XlaComputation body;
{
XlaBuilder builder("body");
- auto prev = builder.Parameter(0, result_shape, "prev");
- auto input = builder.ConstantR1<float>(8, 0.125f);
- builder.Add(input, prev);
+ auto prev = Parameter(&builder, 0, result_shape, "prev");
+ auto input = ConstantR1<float>(&builder, 8, 0.125f);
+ Add(input, prev);
body = builder.Build().ConsumeValueOrDie();
}
// Create a While node with computations for the condition and the body.
XlaBuilder builder("while");
- auto init = builder.ConstantR1<float>(8, 0.f);
- auto result = builder.While(condition, body, init);
+ auto init = ConstantR1<float>(&builder, 8, 0.f);
+ auto result = While(condition, body, init);
VLOG(2) << "while = "
<< ShapeUtil::HumanString(
builder.GetShape(result).ConsumeValueOrDie());
@@ -306,9 +305,9 @@ TEST_F(WhileTest, WhileWithVectorResultIntoTuple) {
XlaComputation add;
{
XlaBuilder builder("add");
- auto x = builder.Parameter(0, ShapeUtil::MakeShape(F32, {}), "x");
- auto y = builder.Parameter(1, ShapeUtil::MakeShape(F32, {}), "y");
- builder.Add(x, y);
+ auto x = Parameter(&builder, 0, ShapeUtil::MakeShape(F32, {}), "x");
+ auto y = Parameter(&builder, 1, ShapeUtil::MakeShape(F32, {}), "y");
+ Add(x, y);
add = builder.Build().ConsumeValueOrDie();
}
@@ -317,10 +316,10 @@ TEST_F(WhileTest, WhileWithVectorResultIntoTuple) {
XlaComputation condition;
{
XlaBuilder builder("condition");
- auto prev = builder.Parameter(0, result_shape, "prev");
- auto sum = builder.Reduce(prev, builder.ConstantR0<float>(0.0f), add,
- /*dimensions_to_reduce=*/{0});
- builder.Gt(builder.ConstantR0<float>(15.5f), sum);
+ auto prev = Parameter(&builder, 0, result_shape, "prev");
+ auto sum = Reduce(prev, ConstantR0<float>(&builder, 0.0f), add,
+ /*dimensions_to_reduce=*/{0});
+ Gt(ConstantR0<float>(&builder, 15.5f), sum);
condition = builder.Build().ConsumeValueOrDie();
}
@@ -329,27 +328,27 @@ TEST_F(WhileTest, WhileWithVectorResultIntoTuple) {
XlaComputation body;
{
XlaBuilder builder("body");
- auto prev = builder.Parameter(0, result_shape, "prev");
- auto input = builder.ConstantR1<float>(8, 0.125f);
- builder.Add(input, prev);
+ auto prev = Parameter(&builder, 0, result_shape, "prev");
+ auto input = ConstantR1<float>(&builder, 8, 0.125f);
+ Add(input, prev);
body = builder.Build().ConsumeValueOrDie();
}
// Create a While node with computations for the condition and the body.
XlaBuilder builder("while");
- auto init = builder.ConstantR1<float>(8, 0.f);
- auto result = builder.While(condition, body, init);
+ auto init = ConstantR1<float>(&builder, 8, 0.f);
+ auto result = While(condition, body, init);
VLOG(2) << "while = "
<< ShapeUtil::HumanString(
builder.GetShape(result).ConsumeValueOrDie());
- builder.Tuple({result});
+ Tuple(&builder, {result});
// Individual elements with increase by 1/8 each time through the loop, so
// the sum will increase by 1.0. It will first be >15.5 when the elements
// have all reached 2.0.
auto expected_data =
- Literal::CreateR1<float>({2.f, 2.f, 2.f, 2.f, 2.f, 2.f, 2.f, 2.f});
- auto expected = Literal::MakeTuple({expected_data.get()});
+ LiteralUtil::CreateR1<float>({2.f, 2.f, 2.f, 2.f, 2.f, 2.f, 2.f, 2.f});
+ auto expected = LiteralUtil::MakeTuple({expected_data.get()});
VLOG(2) << "expected = " << ShapeUtil::HumanString(expected->shape());
ComputeAndCompareTuple(&builder, *expected, {}, ErrorSpec(0.0001));
}
@@ -366,9 +365,9 @@ TEST_F(WhileTest, WhileWithPermutationAndTupleResult) {
XlaComputation condition;
{
XlaBuilder builder("condition");
- auto prev = builder.Parameter(0, result_shape, "prev");
- auto iteration = builder.GetTupleElement(prev, 0);
- builder.Gt(builder.ConstantR0<int32>(N), iteration);
+ auto prev = Parameter(&builder, 0, result_shape, "prev");
+ auto iteration = GetTupleElement(prev, 0);
+ Gt(ConstantR0<int32>(&builder, N), iteration);
condition = builder.Build().ConsumeValueOrDie();
}
@@ -377,32 +376,34 @@ TEST_F(WhileTest, WhileWithPermutationAndTupleResult) {
XlaComputation body;
{
XlaBuilder builder("body");
- auto prev = builder.Parameter(0, result_shape, "prev");
- auto iteration = builder.GetTupleElement(prev, 0);
- auto w1 = builder.GetTupleElement(prev, 1);
- auto w2 = builder.GetTupleElement(prev, 2);
- auto w3 = builder.GetTupleElement(prev, 3);
- builder.Tuple(
- {builder.Add(iteration, builder.ConstantR0<int32>(1)), w3, w1, w2});
+ auto prev = Parameter(&builder, 0, result_shape, "prev");
+ auto iteration = GetTupleElement(prev, 0);
+ auto w1 = GetTupleElement(prev, 1);
+ auto w2 = GetTupleElement(prev, 2);
+ auto w3 = GetTupleElement(prev, 3);
+ Tuple(&builder,
+ {Add(iteration, ConstantR0<int32>(&builder, 1)), w3, w1, w2});
body = builder.Build().ConsumeValueOrDie();
}
// Create a While node with computations for the condition and the body.
XlaBuilder builder("while");
- auto init = builder.Tuple(
- {builder.ConstantR0<int32>(0), builder.ConstantR1<float>(3, 1.f),
- builder.ConstantR1<float>(3, 2.f), builder.ConstantR1<float>(3, 3.f)});
- auto result = builder.While(condition, body, init);
+ auto init = Tuple(&builder, {ConstantR0<int32>(&builder, 0),
+ ConstantR1<float>(&builder, 3, 1.f),
+ ConstantR1<float>(&builder, 3, 2.f),
+ ConstantR1<float>(&builder, 3, 3.f)});
+ auto result = While(condition, body, init);
VLOG(2) << "result = "
<< ShapeUtil::HumanString(
builder.GetShape(result).ConsumeValueOrDie());
- auto expected_counter = Literal::CreateR0<int32>(N);
- auto expected_w1 = Literal::CreateR1<float>({1.0f, 1.0f, 1.0f});
- auto expected_w2 = Literal::CreateR1<float>({2.0f, 2.0f, 2.0f});
- auto expected_w3 = Literal::CreateR1<float>({3.0f, 3.0f, 3.0f});
- auto expected = Literal::MakeTuple({expected_counter.get(), expected_w2.get(),
- expected_w3.get(), expected_w1.get()});
+ auto expected_counter = LiteralUtil::CreateR0<int32>(N);
+ auto expected_w1 = LiteralUtil::CreateR1<float>({1.0f, 1.0f, 1.0f});
+ auto expected_w2 = LiteralUtil::CreateR1<float>({2.0f, 2.0f, 2.0f});
+ auto expected_w3 = LiteralUtil::CreateR1<float>({3.0f, 3.0f, 3.0f});
+ auto expected =
+ LiteralUtil::MakeTuple({expected_counter.get(), expected_w2.get(),
+ expected_w3.get(), expected_w1.get()});
VLOG(2) << "expected = " << ShapeUtil::HumanString(expected->shape());
ComputeAndCompareTuple(&builder, *expected, {}, ErrorSpec(0.0001));
}
@@ -419,9 +420,9 @@ TEST_F(WhileTest, WhileWithPermutationAndVectorResult) {
XlaComputation condition;
{
XlaBuilder builder("condition");
- auto prev = builder.Parameter(0, result_shape, "prev");
- auto iteration = builder.GetTupleElement(prev, 0);
- builder.Gt(builder.ConstantR0<int32>(N), iteration);
+ auto prev = Parameter(&builder, 0, result_shape, "prev");
+ auto iteration = GetTupleElement(prev, 0);
+ Gt(ConstantR0<int32>(&builder, N), iteration);
condition = builder.Build().ConsumeValueOrDie();
}
@@ -430,26 +431,27 @@ TEST_F(WhileTest, WhileWithPermutationAndVectorResult) {
XlaComputation body;
{
XlaBuilder builder("body");
- auto prev = builder.Parameter(0, result_shape, "prev");
- auto iteration = builder.GetTupleElement(prev, 0);
- auto w1 = builder.GetTupleElement(prev, 1);
- auto w2 = builder.GetTupleElement(prev, 2);
- auto w3 = builder.GetTupleElement(prev, 3);
- builder.Tuple(
- {builder.Add(iteration, builder.ConstantR0<int32>(1)), w3, w1, w2});
+ auto prev = Parameter(&builder, 0, result_shape, "prev");
+ auto iteration = GetTupleElement(prev, 0);
+ auto w1 = GetTupleElement(prev, 1);
+ auto w2 = GetTupleElement(prev, 2);
+ auto w3 = GetTupleElement(prev, 3);
+ Tuple(&builder,
+ {Add(iteration, ConstantR0<int32>(&builder, 1)), w3, w1, w2});
body = builder.Build().ConsumeValueOrDie();
}
// Create a While node with computations for the condition and the body.
XlaBuilder builder("while");
- auto init = builder.Tuple(
- {builder.ConstantR0<int32>(0), builder.ConstantR1<float>(3, 1.f),
- builder.ConstantR1<float>(3, 2.f), builder.ConstantR1<float>(3, 3.f)});
- auto xla_while = builder.While(condition, body, init);
-
- auto add12 = builder.Add(builder.GetTupleElement(xla_while, 1),
- builder.GetTupleElement(xla_while, 2));
- auto result = builder.Add(add12, builder.GetTupleElement(xla_while, 3));
+ auto init = Tuple(&builder, {ConstantR0<int32>(&builder, 0),
+ ConstantR1<float>(&builder, 3, 1.f),
+ ConstantR1<float>(&builder, 3, 2.f),
+ ConstantR1<float>(&builder, 3, 3.f)});
+ auto xla_while = While(condition, body, init);
+
+ auto add12 =
+ Add(GetTupleElement(xla_while, 1), GetTupleElement(xla_while, 2));
+ auto result = Add(add12, GetTupleElement(xla_while, 3));
VLOG(2) << "result = "
<< ShapeUtil::HumanString(
builder.GetShape(result).ConsumeValueOrDie());
@@ -474,9 +476,9 @@ TEST_F(WhileTest, WhileWithTupleResult) {
XlaComputation condition;
{
XlaBuilder builder("condition");
- auto prev = builder.Parameter(0, result_shape, "prev");
- auto iteration = builder.GetTupleElement(prev, 0);
- builder.Gt(builder.ConstantR0<int32>(5), iteration);
+ auto prev = Parameter(&builder, 0, result_shape, "prev");
+ auto iteration = GetTupleElement(prev, 0);
+ Gt(ConstantR0<int32>(&builder, 5), iteration);
condition = builder.Build().ConsumeValueOrDie();
}
@@ -486,30 +488,30 @@ TEST_F(WhileTest, WhileWithTupleResult) {
XlaComputation body;
{
XlaBuilder builder("body");
- auto prev = builder.Parameter(0, result_shape, "prev");
- auto iteration = builder.GetTupleElement(prev, 0);
- auto weights = builder.GetTupleElement(prev, 1);
- auto input = builder.ConstantR1<float>(10, 1.f);
- auto new_weights = builder.Add(weights, input);
- builder.Tuple(
- {builder.Add(iteration, builder.ConstantR0<int32>(1)), new_weights});
+ auto prev = Parameter(&builder, 0, result_shape, "prev");
+ auto iteration = GetTupleElement(prev, 0);
+ auto weights = GetTupleElement(prev, 1);
+ auto input = ConstantR1<float>(&builder, 10, 1.f);
+ auto new_weights = Add(weights, input);
+ Tuple(&builder,
+ {Add(iteration, ConstantR0<int32>(&builder, 1)), new_weights});
body = builder.Build().ConsumeValueOrDie();
}
// Create a While node with computations for the condition and the body.
XlaBuilder builder("while");
- auto init = builder.Tuple(
- {builder.ConstantR0<int32>(0), builder.ConstantR1<float>(10, 0.f)});
- auto result = builder.While(condition, body, init);
+ auto init = Tuple(&builder, {ConstantR0<int32>(&builder, 0),
+ ConstantR1<float>(&builder, 10, 0.f)});
+ auto result = While(condition, body, init);
VLOG(2) << "while = "
<< ShapeUtil::HumanString(
builder.GetShape(result).ConsumeValueOrDie());
- auto expected_counter = Literal::CreateR0<int32>(5);
- auto expected_data = Literal::CreateR1<float>(
+ auto expected_counter = LiteralUtil::CreateR0<int32>(5);
+ auto expected_data = LiteralUtil::CreateR1<float>(
{5.0f, 5.0f, 5.0f, 5.0f, 5.0f, 5.0f, 5.0f, 5.0f, 5.0f, 5.0f});
auto expected =
- Literal::MakeTuple({expected_counter.get(), expected_data.get()});
+ LiteralUtil::MakeTuple({expected_counter.get(), expected_data.get()});
VLOG(2) << "expected = " << ShapeUtil::HumanString(expected->shape());
ComputeAndCompareTuple(&builder, *expected, {}, ErrorSpec(0.0001));
}
@@ -524,9 +526,9 @@ TEST_F(WhileTest, WhileWithPredicateTupleResult) {
XlaComputation condition;
{
XlaBuilder builder("condition");
- auto prev = builder.Parameter(0, result_shape, "prev");
- auto iteration = builder.GetTupleElement(prev, 0);
- builder.Gt(builder.ConstantR0<int32>(5), iteration);
+ auto prev = Parameter(&builder, 0, result_shape, "prev");
+ auto iteration = GetTupleElement(prev, 0);
+ Gt(ConstantR0<int32>(&builder, 5), iteration);
condition = builder.Build().ConsumeValueOrDie();
}
@@ -535,29 +537,28 @@ TEST_F(WhileTest, WhileWithPredicateTupleResult) {
XlaComputation body;
{
XlaBuilder builder("body");
- auto prev = builder.Parameter(0, result_shape, "prev");
- auto iteration = builder.GetTupleElement(prev, 0);
- auto pred = builder.GetTupleElement(prev, 1);
- auto new_pred = builder.Or(pred, builder.ConstantR0<bool>(true));
- builder.Tuple(
- {builder.Add(iteration, builder.ConstantR0<int32>(1)), new_pred});
+ auto prev = Parameter(&builder, 0, result_shape, "prev");
+ auto iteration = GetTupleElement(prev, 0);
+ auto pred = GetTupleElement(prev, 1);
+ auto new_pred = Or(pred, ConstantR0<bool>(&builder, true));
+ Tuple(&builder, {Add(iteration, ConstantR0<int32>(&builder, 1)), new_pred});
body = builder.Build().ConsumeValueOrDie();
}
// Create a While node with computations for the condition and the body.
XlaBuilder builder("while");
- auto init = builder.Tuple({builder.ConstantR0<int32>(0),
- builder.Ne(builder.ConstantR0<bool>(false),
- builder.ConstantR0<bool>(true))});
- auto result = builder.While(condition, body, init);
+ auto init = Tuple(&builder, {ConstantR0<int32>(&builder, 0),
+ Ne(ConstantR0<bool>(&builder, false),
+ ConstantR0<bool>(&builder, true))});
+ auto result = While(condition, body, init);
VLOG(2) << "while = "
<< ShapeUtil::HumanString(
builder.GetShape(result).ConsumeValueOrDie());
- auto expected_counter = Literal::CreateR0<int32>(5);
- auto expected_predicate = Literal::CreateR0<bool>(true);
- auto expected =
- Literal::MakeTuple({expected_counter.get(), expected_predicate.get()});
+ auto expected_counter = LiteralUtil::CreateR0<int32>(5);
+ auto expected_predicate = LiteralUtil::CreateR0<bool>(true);
+ auto expected = LiteralUtil::MakeTuple(
+ {expected_counter.get(), expected_predicate.get()});
ComputeAndCompareTuple(&builder, *expected, {}, ErrorSpec(0));
}
@@ -571,9 +572,9 @@ TEST_F(WhileTest, WhileWithTupleConstantScalarResult) {
XlaComputation condition;
{
XlaBuilder builder("condition");
- auto prev = builder.Parameter(0, result_shape, "prev");
- auto iteration = builder.GetTupleElement(prev, 0);
- builder.Gt(builder.ConstantR0<int32>(5), iteration);
+ auto prev = Parameter(&builder, 0, result_shape, "prev");
+ auto iteration = GetTupleElement(prev, 0);
+ Gt(ConstantR0<int32>(&builder, 5), iteration);
condition = builder.Build().ConsumeValueOrDie();
}
@@ -583,26 +584,26 @@ TEST_F(WhileTest, WhileWithTupleConstantScalarResult) {
XlaComputation body;
{
XlaBuilder builder("body");
- auto prev = builder.Parameter(0, result_shape, "prev");
- auto iteration = builder.GetTupleElement(prev, 0);
- builder.Tuple({builder.Add(iteration, builder.ConstantR0<int32>(1)),
- builder.ConstantR0<int32>(7)});
+ auto prev = Parameter(&builder, 0, result_shape, "prev");
+ auto iteration = GetTupleElement(prev, 0);
+ Tuple(&builder, {Add(iteration, ConstantR0<int32>(&builder, 1)),
+ ConstantR0<int32>(&builder, 7)});
body = builder.Build().ConsumeValueOrDie();
}
// Create a While node with computations for the condition and the body.
XlaBuilder builder("while");
- auto init = builder.Tuple(
- {builder.ConstantR0<int32>(0), builder.ConstantR0<int32>(7)});
- auto result = builder.While(condition, body, init);
+ auto init = Tuple(&builder, {ConstantR0<int32>(&builder, 0),
+ ConstantR0<int32>(&builder, 7)});
+ auto result = While(condition, body, init);
VLOG(2) << "while = "
<< ShapeUtil::HumanString(
builder.GetShape(result).ConsumeValueOrDie());
- auto expected_counter = Literal::CreateR0<int32>(5);
- auto expected_data = Literal::CreateR0<int32>(7);
+ auto expected_counter = LiteralUtil::CreateR0<int32>(5);
+ auto expected_data = LiteralUtil::CreateR0<int32>(7);
auto expected =
- Literal::MakeTuple({expected_counter.get(), expected_data.get()});
+ LiteralUtil::MakeTuple({expected_counter.get(), expected_data.get()});
VLOG(2) << "expected = " << ShapeUtil::HumanString(expected->shape());
ComputeAndCompareTuple(&builder, *expected, {}, ErrorSpec(0.0001));
}
@@ -632,9 +633,9 @@ TEST_F(WhileTest, TwoWhileWithTupleResult) {
const int c1 = 5;
{
XlaBuilder builder("condition");
- auto prev = builder.Parameter(0, result_shape, "prev");
- auto iteration = builder.GetTupleElement(prev, 0);
- builder.Lt(iteration, builder.ConstantR0<int32>(c1));
+ auto prev = Parameter(&builder, 0, result_shape, "prev");
+ auto iteration = GetTupleElement(prev, 0);
+ Lt(iteration, ConstantR0<int32>(&builder, c1));
TF_ASSERT_OK_AND_ASSIGN(condition, builder.Build());
}
@@ -642,9 +643,9 @@ TEST_F(WhileTest, TwoWhileWithTupleResult) {
const int c2 = 7;
{
XlaBuilder builder("condition2");
- auto prev = builder.Parameter(0, result_shape, "prev");
- auto iteration = builder.GetTupleElement(prev, 0);
- builder.Lt(iteration, builder.ConstantR0<int32>(c2));
+ auto prev = Parameter(&builder, 0, result_shape, "prev");
+ auto iteration = GetTupleElement(prev, 0);
+ Lt(iteration, ConstantR0<int32>(&builder, c2));
TF_ASSERT_OK_AND_ASSIGN(condition2, builder.Build());
}
@@ -654,43 +655,43 @@ TEST_F(WhileTest, TwoWhileWithTupleResult) {
XlaComputation body;
{
XlaBuilder builder("body");
- auto prev = builder.Parameter(0, result_shape, "prev");
- auto iteration = builder.GetTupleElement(prev, 0);
- auto weights = builder.GetTupleElement(prev, 1);
- auto input = builder.ConstantR1<float>(10, 1.f);
- auto new_weights = builder.Add(weights, input);
- builder.Tuple(
- {builder.Add(iteration, builder.ConstantR0<int32>(1)), new_weights});
+ auto prev = Parameter(&builder, 0, result_shape, "prev");
+ auto iteration = GetTupleElement(prev, 0);
+ auto weights = GetTupleElement(prev, 1);
+ auto input = ConstantR1<float>(&builder, 10, 1.f);
+ auto new_weights = Add(weights, input);
+ Tuple(&builder,
+ {Add(iteration, ConstantR0<int32>(&builder, 1)), new_weights});
TF_ASSERT_OK_AND_ASSIGN(body, builder.Build());
}
XlaComputation body2;
{
XlaBuilder builder("body");
- auto prev = builder.Parameter(0, result_shape, "prev");
- auto iteration = builder.GetTupleElement(prev, 0);
- auto weights = builder.GetTupleElement(prev, 1);
- auto input = builder.ConstantR1<float>(10, 1.f);
- auto new_weights = builder.Add(weights, input);
- builder.Tuple(
- {builder.Add(iteration, builder.ConstantR0<int32>(1)), new_weights});
+ auto prev = Parameter(&builder, 0, result_shape, "prev");
+ auto iteration = GetTupleElement(prev, 0);
+ auto weights = GetTupleElement(prev, 1);
+ auto input = ConstantR1<float>(&builder, 10, 1.f);
+ auto new_weights = Add(weights, input);
+ Tuple(&builder,
+ {Add(iteration, ConstantR0<int32>(&builder, 1)), new_weights});
TF_ASSERT_OK_AND_ASSIGN(body2, builder.Build());
}
// Create a While node with computations for the condition and the body.
XlaBuilder builder("while");
- auto init = builder.Tuple(
- {builder.ConstantR0<int32>(0), builder.ConstantR1<float>(10, 0.f)});
- auto while1 = builder.While(condition, body, init);
+ auto init = Tuple(&builder, {ConstantR0<int32>(&builder, 0),
+ ConstantR1<float>(&builder, 10, 0.f)});
+ auto while1 = While(condition, body, init);
- auto while2 = builder.While(condition2, body2, while1);
+ auto while2 = While(condition2, body2, while1);
- auto while_result1 = builder.GetTupleElement(while1, 1);
- auto while_result2 = builder.GetTupleElement(while2, 1);
+ auto while_result1 = GetTupleElement(while1, 1);
+ auto while_result2 = GetTupleElement(while2, 1);
VLOG(2) << "while_result2 = "
<< ShapeUtil::HumanString(
builder.GetShape(while_result2).ConsumeValueOrDie());
- auto result = builder.Add(while_result1, while_result2);
+ auto result = Add(while_result1, while_result2);
VLOG(2) << "result = "
<< ShapeUtil::HumanString(
builder.GetShape(result).ConsumeValueOrDie());
@@ -711,9 +712,9 @@ TEST_F(WhileTest, TwoWhileLoopsAndSharedBody) {
const int c1 = 5;
{
XlaBuilder builder("condition");
- auto prev = builder.Parameter(0, result_shape, "prev");
- auto iteration = builder.GetTupleElement(prev, 0);
- builder.Lt(iteration, builder.ConstantR0<int32>(c1));
+ auto prev = Parameter(&builder, 0, result_shape, "prev");
+ auto iteration = GetTupleElement(prev, 0);
+ Lt(iteration, ConstantR0<int32>(&builder, c1));
TF_ASSERT_OK_AND_ASSIGN(condition, builder.Build());
}
@@ -721,9 +722,9 @@ TEST_F(WhileTest, TwoWhileLoopsAndSharedBody) {
const int c2 = 7;
{
XlaBuilder builder("condition2");
- auto prev = builder.Parameter(0, result_shape, "prev");
- auto iteration = builder.GetTupleElement(prev, 0);
- builder.Lt(iteration, builder.ConstantR0<int32>(c2));
+ auto prev = Parameter(&builder, 0, result_shape, "prev");
+ auto iteration = GetTupleElement(prev, 0);
+ Lt(iteration, ConstantR0<int32>(&builder, c2));
TF_ASSERT_OK_AND_ASSIGN(condition2, builder.Build());
}
@@ -733,30 +734,30 @@ TEST_F(WhileTest, TwoWhileLoopsAndSharedBody) {
XlaComputation body;
{
XlaBuilder builder("body");
- auto prev = builder.Parameter(0, result_shape, "prev");
- auto iteration = builder.GetTupleElement(prev, 0);
- auto weights = builder.GetTupleElement(prev, 1);
- auto input = builder.ConstantR1<float>(10, 1.f);
- auto new_weights = builder.Add(weights, input);
- builder.Tuple(
- {builder.Add(iteration, builder.ConstantR0<int32>(1)), new_weights});
+ auto prev = Parameter(&builder, 0, result_shape, "prev");
+ auto iteration = GetTupleElement(prev, 0);
+ auto weights = GetTupleElement(prev, 1);
+ auto input = ConstantR1<float>(&builder, 10, 1.f);
+ auto new_weights = Add(weights, input);
+ Tuple(&builder,
+ {Add(iteration, ConstantR0<int32>(&builder, 1)), new_weights});
TF_ASSERT_OK_AND_ASSIGN(body, builder.Build());
}
// Create a While node with computations for the condition and the body.
XlaBuilder builder("while");
- auto init = builder.Tuple(
- {builder.ConstantR0<int32>(0), builder.ConstantR1<float>(10, 0.f)});
- auto while1 = builder.While(condition, body, init);
+ auto init = Tuple(&builder, {ConstantR0<int32>(&builder, 0),
+ ConstantR1<float>(&builder, 10, 0.f)});
+ auto while1 = While(condition, body, init);
- auto while2 = builder.While(condition2, body, while1);
+ auto while2 = While(condition2, body, while1);
- auto while_result1 = builder.GetTupleElement(while1, 1);
- auto while_result2 = builder.GetTupleElement(while2, 1);
+ auto while_result1 = GetTupleElement(while1, 1);
+ auto while_result2 = GetTupleElement(while2, 1);
VLOG(2) << "while_result2 = "
<< ShapeUtil::HumanString(
builder.GetShape(while_result2).ConsumeValueOrDie());
- auto result = builder.Add(while_result1, while_result2);
+ auto result = Add(while_result1, while_result2);
VLOG(2) << "result = "
<< ShapeUtil::HumanString(
builder.GetShape(result).ConsumeValueOrDie());
@@ -778,9 +779,9 @@ TEST_F(WhileTest, DISABLED_ON_GPU(WhileLoopsWithSharedBodyAndInit)) {
const int c1 = 5;
{
XlaBuilder builder("condition");
- auto prev = builder.Parameter(0, result_shape, "prev");
- auto iteration = builder.GetTupleElement(prev, 0);
- builder.Lt(iteration, builder.ConstantR0<int32>(c1));
+ auto prev = Parameter(&builder, 0, result_shape, "prev");
+ auto iteration = GetTupleElement(prev, 0);
+ Lt(iteration, ConstantR0<int32>(&builder, c1));
TF_ASSERT_OK_AND_ASSIGN(condition, builder.Build());
}
@@ -788,9 +789,9 @@ TEST_F(WhileTest, DISABLED_ON_GPU(WhileLoopsWithSharedBodyAndInit)) {
const int c2 = 7;
{
XlaBuilder builder("condition2");
- auto prev = builder.Parameter(0, result_shape, "prev");
- auto iteration = builder.GetTupleElement(prev, 0);
- builder.Lt(iteration, builder.ConstantR0<int32>(c2));
+ auto prev = Parameter(&builder, 0, result_shape, "prev");
+ auto iteration = GetTupleElement(prev, 0);
+ Lt(iteration, ConstantR0<int32>(&builder, c2));
TF_ASSERT_OK_AND_ASSIGN(condition2, builder.Build());
}
@@ -800,29 +801,29 @@ TEST_F(WhileTest, DISABLED_ON_GPU(WhileLoopsWithSharedBodyAndInit)) {
XlaComputation body;
{
XlaBuilder builder("body");
- auto prev = builder.Parameter(0, result_shape, "prev");
- auto iteration = builder.GetTupleElement(prev, 0);
- auto weights = builder.GetTupleElement(prev, 1);
- auto input = builder.ConstantR1<float>(10, 1.f);
- auto new_weights = builder.Add(weights, input);
- builder.Tuple(
- {builder.Add(iteration, builder.ConstantR0<int32>(1)), new_weights});
+ auto prev = Parameter(&builder, 0, result_shape, "prev");
+ auto iteration = GetTupleElement(prev, 0);
+ auto weights = GetTupleElement(prev, 1);
+ auto input = ConstantR1<float>(&builder, 10, 1.f);
+ auto new_weights = Add(weights, input);
+ Tuple(&builder,
+ {Add(iteration, ConstantR0<int32>(&builder, 1)), new_weights});
TF_ASSERT_OK_AND_ASSIGN(body, builder.Build());
}
// Create a While node with computations for the condition and the body.
XlaBuilder builder("while");
- auto init = builder.Tuple(
- {builder.ConstantR0<int32>(0), builder.ConstantR1<float>(10, 0.f)});
- auto while1 = builder.While(condition, body, init);
- auto while2 = builder.While(condition2, body, init);
+ auto init = Tuple(&builder, {ConstantR0<int32>(&builder, 0),
+ ConstantR1<float>(&builder, 10, 0.f)});
+ auto while1 = While(condition, body, init);
+ auto while2 = While(condition2, body, init);
- auto while_result1 = builder.GetTupleElement(while1, 1);
- auto while_result2 = builder.GetTupleElement(while2, 1);
+ auto while_result1 = GetTupleElement(while1, 1);
+ auto while_result2 = GetTupleElement(while2, 1);
VLOG(2) << "while_result2 = "
<< ShapeUtil::HumanString(
builder.GetShape(while_result2).ConsumeValueOrDie());
- auto result = builder.Add(while_result1, while_result2);
+ auto result = Add(while_result1, while_result2);
VLOG(2) << "result = "
<< ShapeUtil::HumanString(
builder.GetShape(result).ConsumeValueOrDie());
@@ -844,9 +845,9 @@ XLA_TEST_F(WhileTest, WhileWithDynamicUpdateSlice) {
XlaComputation condition;
{
XlaBuilder builder("condition");
- auto prev = builder.Parameter(0, result_shape, "prev");
- auto iteration = builder.GetTupleElement(prev, 0);
- builder.Gt(builder.ConstantR0<int32>(5), iteration);
+ auto prev = Parameter(&builder, 0, result_shape, "prev");
+ auto iteration = GetTupleElement(prev, 0);
+ Gt(ConstantR0<int32>(&builder, 5), iteration);
condition = builder.Build().ConsumeValueOrDie();
}
@@ -856,38 +857,37 @@ XLA_TEST_F(WhileTest, WhileWithDynamicUpdateSlice) {
XlaComputation body;
{
XlaBuilder builder("body");
- auto prev = builder.Parameter(0, result_shape, "prev");
+ auto prev = Parameter(&builder, 0, result_shape, "prev");
// TupleElement 0
- auto iteration = builder.GetTupleElement(prev, 0);
- auto out0 = builder.Add(iteration, builder.ConstantR0<int32>(1));
+ auto iteration = GetTupleElement(prev, 0);
+ auto out0 = Add(iteration, ConstantR0<int32>(&builder, 1));
// TupleElement 1
- auto input = builder.GetTupleElement(prev, 1);
+ auto input = GetTupleElement(prev, 1);
// Update.
- auto update = builder.ConvertElementType(builder.Broadcast(out0, {2}), F32);
+ auto update = ConvertElementType(Broadcast(out0, {2}), F32);
// Starts = iteration * 2;
- auto starts = builder.Reshape(
- builder.Mul(iteration, builder.ConstantR0<int32>(2)), {1});
+ auto starts = Reshape(Mul(iteration, ConstantR0<int32>(&builder, 2)), {1});
// UpdateSlice.
- auto out1 = builder.DynamicUpdateSlice(input, update, starts);
+ auto out1 = DynamicUpdateSlice(input, update, starts);
- builder.Tuple({out0, out1});
+ Tuple(&builder, {out0, out1});
body = builder.Build().ConsumeValueOrDie();
}
// Create a While node with computations for the condition and the body.
XlaBuilder builder("while");
- auto init = builder.Tuple(
- {builder.ConstantR0<int32>(0), builder.ConstantR1<float>(10, 0.f)});
- auto result = builder.While(condition, body, init);
+ auto init = Tuple(&builder, {ConstantR0<int32>(&builder, 0),
+ ConstantR1<float>(&builder, 10, 0.f)});
+ auto result = While(condition, body, init);
VLOG(2) << "while = "
<< ShapeUtil::HumanString(
builder.GetShape(result).ConsumeValueOrDie());
- auto expected_counter = Literal::CreateR0<int32>(5);
- auto expected_data = Literal::CreateR1<float>(
+ auto expected_counter = LiteralUtil::CreateR0<int32>(5);
+ auto expected_data = LiteralUtil::CreateR1<float>(
{1.0f, 1.0f, 2.0f, 2.0f, 3.0f, 3.0f, 4.0f, 4.0f, 5.0f, 5.0f});
auto expected =
- Literal::MakeTuple({expected_counter.get(), expected_data.get()});
+ LiteralUtil::MakeTuple({expected_counter.get(), expected_data.get()});
VLOG(2) << "expected = " << ShapeUtil::HumanString(expected->shape());
ComputeAndCompareTuple(&builder, *expected, {}, ErrorSpec(0.0001));
}
@@ -913,10 +913,9 @@ TEST_F(WhileTest, DISABLED_ON_INTERPRETER(WhileWithPrngScalarResult)) {
// Create a computation for the condition: repeat for count iterations.
auto build_condition = [this, v6s32](int count) {
XlaBuilder builder(TestName());
- auto prev = builder.Reshape(
- builder.Slice(builder.Parameter(0, v6s32, "prev"), {0}, {1}, {1}), {0},
- {});
- builder.Gt(builder.ConstantR0<int32>(count), prev);
+ auto prev = Reshape(
+ Slice(Parameter(&builder, 0, v6s32, "prev"), {0}, {1}, {1}), {0}, {});
+ Gt(ConstantR0<int32>(&builder, count), prev);
return builder.Build().ConsumeValueOrDie();
};
@@ -924,22 +923,22 @@ TEST_F(WhileTest, DISABLED_ON_INTERPRETER(WhileWithPrngScalarResult)) {
XlaComputation body;
{
XlaBuilder builder("body");
- auto prev = builder.Parameter(0, v6s32, "prev");
- auto inc = builder.ConcatInDim(
- {builder.ConstantR1<int32>({1}),
- builder.RngUniform(builder.ConstantR0<int32>(0),
- builder.ConstantR0<int32>(100),
- ShapeUtil::MakeShape(S32, {5}))},
- 0);
- builder.Add(inc, prev);
+ auto prev = Parameter(&builder, 0, v6s32, "prev");
+ auto inc = ConcatInDim(&builder,
+ {ConstantR1<int32>(&builder, {1}),
+ RngUniform(ConstantR0<int32>(&builder, 0),
+ ConstantR0<int32>(&builder, 100),
+ ShapeUtil::MakeShape(S32, {5}))},
+ 0);
+ Add(inc, prev);
body = builder.Build().ConsumeValueOrDie();
}
// Create a While node with computations for the condition and the body.
auto while_loop = [this, &body, build_condition](int count) {
XlaBuilder builder(TestName());
- auto init = builder.ConstantR1<int32>({0, 0, 0, 0, 0, 0});
- builder.While(build_condition(count), body, init);
+ auto init = ConstantR1<int32>(&builder, {0, 0, 0, 0, 0, 0});
+ While(build_condition(count), body, init);
return builder.Build();
};
@@ -958,33 +957,30 @@ TEST_F(WhileTest, WhileThatSwapsParameterWithTupleElement) {
auto element_shape = ShapeUtil::MakeShape(F32, {2});
XlaBuilder outer("outer");
- auto p = outer.Parameter(0, element_shape, "param");
- auto t = outer.Tuple({p, outer.ConstantR1<float>({1, 1})});
+ auto p = Parameter(&outer, 0, element_shape, "param");
+ auto t = Tuple(&outer, {p, ConstantR1<float>(&outer, {1, 1})});
TF_ASSERT_OK_AND_ASSIGN(Shape tuple_shape, outer.GetShape(t));
XlaBuilder cond("cond");
- auto cond_t = cond.Parameter(0, tuple_shape, "t");
- TF_ASSERT_OK(Any(cond.Eq(cond.GetTupleElement(cond_t, 0),
- cond.ConstantR1<float>({42, 42})),
- &cond)
- .status());
+ auto cond_t = Parameter(&cond, 0, tuple_shape, "t");
+ Any(Eq(GetTupleElement(cond_t, 0), ConstantR1<float>(&cond, {42, 42})));
XlaBuilder body("body");
- auto body_t = body.Parameter(0, tuple_shape, "t");
- auto e = body.GetTupleElement(body_t, 1);
- body.Tuple({e, e});
+ auto body_t = Parameter(&body, 0, tuple_shape, "t");
+ auto e = GetTupleElement(body_t, 1);
+ Tuple(&body, {e, e});
TF_ASSERT_OK_AND_ASSIGN(auto cond_computation, cond.Build());
TF_ASSERT_OK_AND_ASSIGN(auto body_computation, body.Build());
- outer.While(cond_computation, body_computation, t);
+ While(cond_computation, body_computation, t);
- auto expected_element = Literal::CreateR1<float>({1, 1});
+ auto expected_element = LiteralUtil::CreateR1<float>({1, 1});
auto expected =
- Literal::MakeTuple({expected_element.get(), expected_element.get()});
+ LiteralUtil::MakeTuple({expected_element.get(), expected_element.get()});
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<GlobalData> parameter_data,
- client_->TransferToServer(*Literal::CreateR1<float>({42, 42})));
+ client_->TransferToServer(*LiteralUtil::CreateR1<float>({42, 42})));
ComputeAndCompareTuple(&outer, *expected, {parameter_data.get()},
ErrorSpec(1e-6));
}
@@ -993,24 +989,23 @@ TEST_F(WhileTest, WhileThatSwapsParameterWithBroadcast) {
auto element_shape = ShapeUtil::MakeShape(F32, {2});
XlaBuilder outer("outer");
- auto p = outer.Parameter(0, element_shape, "param");
+ auto p = Parameter(&outer, 0, element_shape, "param");
XlaBuilder cond("cond");
- auto cond_t = cond.Parameter(0, element_shape, "t");
- TF_ASSERT_OK(
- Any(cond.Eq(cond_t, cond.ConstantR1<float>({42, 42})), &cond).status());
+ auto cond_t = Parameter(&cond, 0, element_shape, "t");
+ Any(Eq(cond_t, ConstantR1<float>(&cond, {42, 42})));
XlaBuilder body("body");
- auto body_t = body.Parameter(0, element_shape, "t");
- auto e = body.Broadcast(body.ConstantR0<float>(1.0), {2});
+ Parameter(&body, 0, element_shape, "t");
+ Broadcast(ConstantR0<float>(&body, 1.0), {2});
TF_ASSERT_OK_AND_ASSIGN(auto cond_computation, cond.Build());
TF_ASSERT_OK_AND_ASSIGN(auto body_computation, body.Build());
- outer.While(cond_computation, body_computation, p);
+ While(cond_computation, body_computation, p);
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<GlobalData> parameter_data,
- client_->TransferToServer(*Literal::CreateR1<float>({42, 42})));
+ client_->TransferToServer(*LiteralUtil::CreateR1<float>({42, 42})));
ComputeAndCompareR1<float>(&outer, {1.0f, 1.0f}, {parameter_data.get()},
ErrorSpec(1e-6));
}
@@ -1019,25 +1014,24 @@ TEST_F(WhileTest, WhileThatTurnsScalarParameterToTupleElement) {
auto element_shape = ShapeUtil::MakeShape(F32, {});
XlaBuilder outer("outer");
- auto p = outer.Parameter(0, element_shape, "param");
+ auto p = Parameter(&outer, 0, element_shape, "param");
XlaBuilder cond("cond");
- auto cond_t = cond.Parameter(0, element_shape, "t");
- cond.Eq(cond_t, cond.ConstantR0<float>(42));
+ auto cond_t = Parameter(&cond, 0, element_shape, "t");
+ Eq(cond_t, ConstantR0<float>(&cond, 42));
XlaBuilder body("body");
- auto body_t = body.Parameter(0, element_shape, "t");
- auto tuple =
- body.Tuple({body_t, body.Add(body_t, body.ConstantR0<float>(1))});
- auto e = body.GetTupleElement(tuple, 1);
+ auto body_t = Parameter(&body, 0, element_shape, "t");
+ auto tuple = Tuple(&body, {body_t, Add(body_t, ConstantR0<float>(&body, 1))});
+ GetTupleElement(tuple, 1);
TF_ASSERT_OK_AND_ASSIGN(auto cond_computation, cond.Build());
TF_ASSERT_OK_AND_ASSIGN(auto body_computation, body.Build());
- outer.While(cond_computation, body_computation, p);
+ While(cond_computation, body_computation, p);
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<GlobalData> parameter_data,
- client_->TransferToServer(*Literal::CreateR0<float>(42)));
+ client_->TransferToServer(*LiteralUtil::CreateR0<float>(42)));
ComputeAndCompareR0<float>(&outer, 43.0f, {parameter_data.get()},
ErrorSpec(1e-6));
}
@@ -1056,33 +1050,31 @@ TEST_F(WhileTest, WhileWithMixedTupleElements) {
XlaBuilder outer("outer");
auto p =
- outer.Tuple({outer.ConstantR0<int32>(0),
- outer.Parameter(0, ShapeUtil::MakeShape(S32, {}), "t")});
+ Tuple(&outer, {ConstantR0<int32>(&outer, 0),
+ Parameter(&outer, 0, ShapeUtil::MakeShape(S32, {}), "t")});
XlaBuilder cond("cond");
- auto params = cond.Parameter(0, result_shape, "prev");
- auto cond_t = cond.Add(cond.GetTupleElement(params, 1),
- cond.GetTupleElement(params, 0));
- cond.Lt(cond_t, cond.ConstantR0<int32>(30));
+ auto params = Parameter(&cond, 0, result_shape, "prev");
+ auto cond_t = Add(GetTupleElement(params, 1), GetTupleElement(params, 0));
+ Lt(cond_t, ConstantR0<int32>(&cond, 30));
XlaBuilder body("body");
- auto body_t = body.Parameter(0, result_shape, "t");
+ auto body_t = Parameter(&body, 0, result_shape, "t");
- auto tuple = body.Tuple(
- {body.Add(body.GetTupleElement(body_t, 0), body.ConstantR0<int32>(1)),
- body.Add(body.GetTupleElement(body_t, 1), body.ConstantR0<int32>(1))});
+ Tuple(&body, {Add(GetTupleElement(body_t, 0), ConstantR0<int32>(&body, 1)),
+ Add(GetTupleElement(body_t, 1), ConstantR0<int32>(&body, 1))});
TF_ASSERT_OK_AND_ASSIGN(auto cond_computation, cond.Build());
TF_ASSERT_OK_AND_ASSIGN(auto body_computation, body.Build());
- outer.While(cond_computation, body_computation, p);
+ While(cond_computation, body_computation, p);
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<GlobalData> parameter_data,
- client_->TransferToServer(*Literal::CreateR0<int32>(1)));
+ client_->TransferToServer(*LiteralUtil::CreateR0<int32>(1)));
- auto add1 = Literal::CreateR0<int32>(15);
- auto add2 = Literal::CreateR0<int32>(16);
- auto expected = Literal::MakeTuple({add1.get(), add2.get()});
+ auto add1 = LiteralUtil::CreateR0<int32>(15);
+ auto add2 = LiteralUtil::CreateR0<int32>(16);
+ auto expected = LiteralUtil::MakeTuple({add1.get(), add2.get()});
ComputeAndCompareTuple(&outer, *expected, {parameter_data.get()},
ErrorSpec(1e-6));
}
@@ -1105,9 +1097,9 @@ XLA_TEST_F(WhileTest, NestedWhileWithScalarResult) {
XlaComputation inner_condition;
{
XlaBuilder builder("inner_condition");
- auto params = builder.Parameter(0, inner_result_shape, "prev");
- auto i = builder.GetTupleElement(params, 0);
- builder.Lt(i, builder.ConstantR0<int32>(7));
+ auto params = Parameter(&builder, 0, inner_result_shape, "prev");
+ auto i = GetTupleElement(params, 0);
+ Lt(i, ConstantR0<int32>(&builder, 7));
inner_condition = builder.Build().ConsumeValueOrDie();
}
@@ -1116,8 +1108,8 @@ XLA_TEST_F(WhileTest, NestedWhileWithScalarResult) {
XlaComputation outer_condition;
{
XlaBuilder builder("outer_condition");
- auto prev = builder.Parameter(0, outer_result_shape, "prev");
- builder.Lt(prev, builder.ConstantR0<int32>(30));
+ auto prev = Parameter(&builder, 0, outer_result_shape, "prev");
+ Lt(prev, ConstantR0<int32>(&builder, 30));
outer_condition = builder.Build().ConsumeValueOrDie();
}
@@ -1126,12 +1118,12 @@ XLA_TEST_F(WhileTest, NestedWhileWithScalarResult) {
XlaComputation inner_body;
{
XlaBuilder builder("inner_body");
- auto params = builder.Parameter(0, inner_result_shape, "prev");
- auto i = builder.GetTupleElement(params, 0);
- auto result = builder.GetTupleElement(params, 1);
- i = builder.Add(builder.ConstantR0<int32>(1), i);
- result = builder.Add(builder.ConstantR0<int32>(2), result);
- builder.Tuple({i, result});
+ auto params = Parameter(&builder, 0, inner_result_shape, "prev");
+ auto i = GetTupleElement(params, 0);
+ auto result = GetTupleElement(params, 1);
+ i = Add(ConstantR0<int32>(&builder, 1), i);
+ result = Add(ConstantR0<int32>(&builder, 2), result);
+ Tuple(&builder, {i, result});
inner_body = builder.Build().ConsumeValueOrDie();
}
@@ -1139,17 +1131,17 @@ XLA_TEST_F(WhileTest, NestedWhileWithScalarResult) {
XlaComputation outer_body;
{
XlaBuilder builder("outer_body");
- auto prev = builder.Parameter(0, outer_result_shape, "prev");
- auto init = builder.Tuple({builder.ConstantR0<int32>(0), prev});
- auto result = builder.While(inner_condition, inner_body, init);
- builder.GetTupleElement(result, 1);
+ auto prev = Parameter(&builder, 0, outer_result_shape, "prev");
+ auto init = Tuple(&builder, {ConstantR0<int32>(&builder, 0), prev});
+ auto result = While(inner_condition, inner_body, init);
+ GetTupleElement(result, 1);
outer_body = builder.Build().ConsumeValueOrDie();
}
// Create a While node with computations for the condition and the body.
XlaBuilder builder(TestName());
- auto init = builder.ConstantR0<int32>(0);
- builder.While(outer_condition, outer_body, init);
+ auto init = ConstantR0<int32>(&builder, 0);
+ While(outer_condition, outer_body, init);
ComputeAndCompareR0<int32>(&builder, 42, {});
}
@@ -1167,8 +1159,8 @@ TEST_F(WhileTest, DISABLED_ON_INTERPRETER(WhileWithCallInsideCondition)) {
XlaComputation condition_callee;
{
XlaBuilder builder("condition_callee");
- auto prev = builder.Parameter(0, result_shape, "prev");
- builder.Tuple({builder.Gt(builder.ConstantR0<int32>(5), prev)});
+ auto prev = Parameter(&builder, 0, result_shape, "prev");
+ Tuple(&builder, {Gt(ConstantR0<int32>(&builder, 5), prev)});
condition_callee = builder.Build().ConsumeValueOrDie();
}
@@ -1176,9 +1168,9 @@ TEST_F(WhileTest, DISABLED_ON_INTERPRETER(WhileWithCallInsideCondition)) {
XlaComputation condition;
{
XlaBuilder builder("condition");
- auto prev = builder.Parameter(0, result_shape, "prev");
- auto result = builder.Call(condition_callee, {prev});
- builder.GetTupleElement(result, 0);
+ auto prev = Parameter(&builder, 0, result_shape, "prev");
+ auto result = Call(&builder, condition_callee, {prev});
+ GetTupleElement(result, 0);
condition = builder.Build().ConsumeValueOrDie();
}
@@ -1186,16 +1178,16 @@ TEST_F(WhileTest, DISABLED_ON_INTERPRETER(WhileWithCallInsideCondition)) {
XlaComputation body;
{
XlaBuilder builder("body");
- auto prev = builder.Parameter(0, result_shape, "prev");
- auto input = builder.ConstantR0<int32>(1);
- builder.Add(input, prev);
+ auto prev = Parameter(&builder, 0, result_shape, "prev");
+ auto input = ConstantR0<int32>(&builder, 1);
+ Add(input, prev);
body = builder.Build().ConsumeValueOrDie();
}
// Create a While node with computations for the condition and the body.
XlaBuilder builder(TestName());
- auto init = builder.ConstantR0<int32>(0);
- builder.While(condition, body, init);
+ auto init = ConstantR0<int32>(&builder, 0);
+ While(condition, body, init);
ComputeAndCompareR0<int32>(&builder, 5, {});
}
@@ -1210,34 +1202,34 @@ TEST_F(WhileTest, WhileWithLoopInvariantOperation) {
XlaComputation condition;
{
XlaBuilder builder("condition");
- auto state = builder.Parameter(0, while_shape, "state");
- builder.Gt(builder.ConstantR0<int32>(5), builder.GetTupleElement(state, 0));
+ auto state = Parameter(&builder, 0, while_shape, "state");
+ Gt(ConstantR0<int32>(&builder, 5), GetTupleElement(state, 0));
TF_ASSERT_OK_AND_ASSIGN(condition, builder.Build());
}
XlaComputation body;
{
XlaBuilder builder("body");
- auto state = builder.Parameter(0, while_shape, "state");
- auto indvar = builder.GetTupleElement(state, 0);
- auto input_0 = builder.GetTupleElement(state, 1);
- auto input_1 = builder.GetTupleElement(state, 2);
- auto output = builder.Tanh(builder.Dot(input_0, input_1));
- auto indvar_next = builder.Add(indvar, builder.ConstantR0<int32>(1));
- builder.Tuple({indvar_next, input_0, input_1, output});
+ auto state = Parameter(&builder, 0, while_shape, "state");
+ auto indvar = GetTupleElement(state, 0);
+ auto input_0 = GetTupleElement(state, 1);
+ auto input_1 = GetTupleElement(state, 2);
+ auto output = Tanh(Dot(input_0, input_1));
+ auto indvar_next = Add(indvar, ConstantR0<int32>(&builder, 1));
+ Tuple(&builder, {indvar_next, input_0, input_1, output});
TF_ASSERT_OK_AND_ASSIGN(body, builder.Build());
}
XlaBuilder builder(TestName());
- auto matrix_input = builder.Parameter(0, matrix_shape, "matrix");
- auto init = builder.Tuple(
- {builder.ConstantR0<int32>(0), matrix_input, matrix_input, matrix_input});
- auto while_instruction = builder.While(condition, body, init);
- builder.GetTupleElement(while_instruction, 3);
+ auto matrix_input = Parameter(&builder, 0, matrix_shape, "matrix");
+ auto init = Tuple(&builder, {ConstantR0<int32>(&builder, 0), matrix_input,
+ matrix_input, matrix_input});
+ auto while_instruction = While(condition, body, init);
+ GetTupleElement(while_instruction, 3);
- TF_ASSERT_OK_AND_ASSIGN(auto param_value,
- client_->TransferToServer(*Literal::CreateR2<float>(
- {{1.0, 2.0}, {-1.0, -2.0}})));
+ TF_ASSERT_OK_AND_ASSIGN(
+ auto param_value, client_->TransferToServer(*LiteralUtil::CreateR2<float>(
+ {{1.0, 2.0}, {-1.0, -2.0}})));
ComputeAndCompareR2<float>(
&builder, {{-0.76159416, -0.96402758}, {0.76159416, 0.96402758}},
@@ -1264,9 +1256,9 @@ void BM_WhileLoop(int num_iters) {
XlaComputation condition;
{
XlaBuilder builder("condition");
- auto prev = builder.Parameter(0, loop_state_shape, "prev");
- auto iteration = builder.GetTupleElement(prev, 0);
- builder.Lt(iteration, builder.ConstantR0<int32>(loop_limit));
+ auto prev = Parameter(&builder, 0, loop_state_shape, "prev");
+ auto iteration = GetTupleElement(prev, 0);
+ Lt(iteration, ConstantR0<int32>(&builder, loop_limit));
condition = builder.Build().ConsumeValueOrDie();
}
@@ -1274,29 +1266,29 @@ void BM_WhileLoop(int num_iters) {
XlaComputation body;
{
XlaBuilder builder("body");
- auto prev = builder.Parameter(0, loop_state_shape, "prev");
+ auto prev = Parameter(&builder, 0, loop_state_shape, "prev");
// TupleElement 0
- auto iteration = builder.GetTupleElement(prev, 0);
- auto out0 = builder.Add(iteration, builder.ConstantR0<int32>(1));
+ auto iteration = GetTupleElement(prev, 0);
+ auto out0 = Add(iteration, ConstantR0<int32>(&builder, 1));
// TupleElement 1
- auto input = builder.GetTupleElement(prev, 1);
+ auto input = GetTupleElement(prev, 1);
// Update.
- auto one = builder.ConstantR0<float>(1.0);
- auto update = builder.Broadcast(one, {1, 1024, 1024});
+ auto one = ConstantR0<float>(&builder, 1.0);
+ auto update = Broadcast(one, {1, 1024, 1024});
// Starts = iteration * 2;
- auto starts = builder.ConstantR1<int32>({0, 0, 0});
+ auto starts = ConstantR1<int32>(&builder, {0, 0, 0});
// UpdateSlice.
- auto out1 = builder.DynamicUpdateSlice(input, update, starts);
- builder.Tuple({out0, out1});
+ auto out1 = DynamicUpdateSlice(input, update, starts);
+ Tuple(&builder, {out0, out1});
body = builder.Build().ConsumeValueOrDie();
}
// Create a While instruction.
XlaBuilder builder("while");
- auto zero = builder.ConstantR0<float>(0.0);
- auto input = builder.Broadcast(zero, {seq_len, 1024, 1024});
- auto init = builder.Tuple({builder.ConstantR0<int32>(0), input});
- builder.While(condition, body, init);
+ auto zero = ConstantR0<float>(&builder, 0.0);
+ auto input = Broadcast(zero, {seq_len, 1024, 1024});
+ auto init = Tuple(&builder, {ConstantR0<int32>(&builder, 0), input});
+ While(condition, body, init);
auto computation = builder.Build().ConsumeValueOrDie();
std::unique_ptr<LocalExecutable> executable =
diff --git a/tensorflow/compiler/xla/tests/xla_hlo_profile_test.cc b/tensorflow/compiler/xla/tests/xla_hlo_profile_test.cc
index 0be950cacb..4d4dd62a3f 100644
--- a/tensorflow/compiler/xla/tests/xla_hlo_profile_test.cc
+++ b/tensorflow/compiler/xla/tests/xla_hlo_profile_test.cc
@@ -79,7 +79,9 @@ struct ParsedProfileOutputLine {
Status ParseOneProfileOutputLine(
const string& line, bool expect_hlo,
- gtl::FlatMap<string, ParsedProfileOutputLine>* parsed_results) {
+ gtl::FlatMap<string, ParsedProfileOutputLine>* parsed_results,
+ tensorflow::gtl::ArraySlice<tensorflow::StringPiece> opcodes_to_ignore =
+ {}) {
string separator = "[^:]*:: +";
string match_percentage = "\\d+\\.\\d\\d%";
string match_cycles = "(\\d+) cycles +\\( *(" + match_percentage + ")\\)";
@@ -113,7 +115,9 @@ Status ParseOneProfileOutputLine(
", Regexp: ", regexp_pattern);
}
- InsertOrDie(parsed_results, parsed_line.opcode, parsed_line);
+ if (!c_linear_search(opcodes_to_ignore, parsed_line.opcode)) {
+ InsertOrDie(parsed_results, parsed_line.opcode, parsed_line);
+ }
return Status::OK();
}
@@ -187,9 +191,9 @@ XLA_TEST_F(HloProfileTest, ProfileSingleComputation) {
ClientLibrary::GetOrCreateLocalClient(platform));
XlaBuilder builder(TestName());
- auto result = builder.Tanh(builder.Add(
- builder.Parameter(0, ShapeUtil::MakeShape(F32, {m, k}), "dot_lhs"),
- builder.Parameter(1, ShapeUtil::MakeShape(F32, {k, n}), "dot_rhs")));
+ Tanh(Add(
+ Parameter(&builder, 0, ShapeUtil::MakeShape(F32, {m, k}), "dot_lhs"),
+ Parameter(&builder, 1, ShapeUtil::MakeShape(F32, {k, n}), "dot_rhs")));
TF_ASSERT_OK_AND_ASSIGN(auto computation, builder.Build());
@@ -239,9 +243,7 @@ XLA_TEST_F(HloProfileTest, ProfileSingleComputation) {
EXPECT_TRUE(HasTrops(tanh_profile));
}
-// TODO(b/71544591): The GPU backend does not record cycles spent in on Hlo
-// instructions "interior" to while nodes.
-XLA_TEST_F(HloProfileTest, DISABLED_ON_GPU(ProfileWhileComputation)) {
+XLA_TEST_F(HloProfileTest, ProfileWhileComputation) {
const int64 size = 256;
Shape matrix_shape = ShapeUtil::MakeShape(F32, {size, size});
Shape while_result_shape =
@@ -255,30 +257,30 @@ XLA_TEST_F(HloProfileTest, DISABLED_ON_GPU(ProfileWhileComputation)) {
XlaComputation condition;
{
XlaBuilder builder("condition");
- auto state = builder.Parameter(0, while_result_shape, "state");
- auto iteration = builder.GetTupleElement(state, 0);
- builder.Gt(builder.ConstantR0<int32>(5), iteration);
+ auto state = Parameter(&builder, 0, while_result_shape, "state");
+ auto iteration = GetTupleElement(state, 0);
+ Gt(ConstantR0<int32>(&builder, 5), iteration);
TF_ASSERT_OK_AND_ASSIGN(condition, builder.Build());
}
XlaComputation body;
{
XlaBuilder builder("body");
- auto state = builder.Parameter(0, while_result_shape, "state");
- auto matrix = builder.GetTupleElement(state, 1);
- auto next_iteration = builder.Add(builder.GetTupleElement(state, 0),
- builder.ConstantR0<int32>(1));
- builder.Tuple({next_iteration, builder.Add(matrix, matrix)});
+ auto state = Parameter(&builder, 0, while_result_shape, "state");
+ auto matrix = GetTupleElement(state, 1);
+ auto next_iteration =
+ Add(GetTupleElement(state, 0), ConstantR0<int32>(&builder, 1));
+ Tuple(&builder, {next_iteration, Mul(matrix, matrix)});
TF_ASSERT_OK_AND_ASSIGN(body, builder.Build());
}
XlaBuilder builder(TestName());
auto initial_while_state =
- builder.Tuple({builder.ConstantR0<int32>(0),
- builder.Parameter(0, matrix_shape, "initial_value")});
- auto while_result = builder.While(condition, body, initial_while_state);
- builder.Add(builder.GetTupleElement(while_result, 1),
- builder.Parameter(1, matrix_shape, "other_value"));
+ Tuple(&builder, {ConstantR0<int32>(&builder, 0),
+ Parameter(&builder, 0, matrix_shape, "initial_value")});
+ auto while_result = While(condition, body, initial_while_state);
+ Add(GetTupleElement(while_result, 1),
+ Parameter(&builder, 1, matrix_shape, "other_value"));
TF_ASSERT_OK_AND_ASSIGN(auto computation, builder.Build());
@@ -290,36 +292,50 @@ XLA_TEST_F(HloProfileTest, DISABLED_ON_GPU(ProfileWhileComputation)) {
tensorflow::str_util::Split(profile_output, '\n');
auto while_body_profile_start =
- std::find_if(profile_output_lines.begin(), profile_output_lines.end(),
+ c_find_if(profile_output_lines, [](tensorflow::StringPiece s) {
+ return tensorflow::str_util::StartsWith(s,
+ "Execution profile for body");
+ });
+
+ ASSERT_NE(while_body_profile_start, profile_output_lines.cend());
+
+ auto while_body_profile_end =
+ std::find_if(while_body_profile_start, profile_output_lines.end(),
[](tensorflow::StringPiece s) {
return tensorflow::str_util::StartsWith(
- s, "Execution profile for body");
+ s, "********** microseconds report **********");
});
- ASSERT_NE(while_body_profile_start, profile_output_lines.end());
+ // We emit a blank line before the "********** microseconds report **********"
+ // line.
+ while_body_profile_end--;
- gtl::FlatMap<string, ParsedProfileOutputLine> parsed_profile_lines;
+ ASSERT_NE(while_body_profile_end, profile_output_lines.end());
- TF_ASSERT_OK(
- ParseOneProfileOutputLine(*std::next(while_body_profile_start, 1),
- /*expect_hlo=*/false, &parsed_profile_lines));
+ gtl::FlatMap<string, ParsedProfileOutputLine> parsed_profile_lines;
- TF_ASSERT_OK(
- ParseOneProfileOutputLine(*std::next(while_body_profile_start, 2),
- /*expect_hlo=*/true, &parsed_profile_lines));
+ for (auto while_body_profile_i = while_body_profile_start + 1;
+ while_body_profile_i != while_body_profile_end; while_body_profile_i++) {
+ // There are multiple "get-tuple-element" instructions in the while body so
+ // we ignore them -- we don't want parsed_profile_lines to be a multi-map.
+ TF_ASSERT_OK(ParseOneProfileOutputLine(
+ *while_body_profile_i,
+ /*expect_hlo=*/while_body_profile_i != (while_body_profile_start + 1),
+ &parsed_profile_lines, {"get-tuple-element"}));
+ }
TF_ASSERT_OK_AND_ASSIGN(ParsedProfileOutputLine total_while_body_profile,
MaybeFind(parsed_profile_lines, "[total]"));
- TF_ASSERT_OK_AND_ASSIGN(ParsedProfileOutputLine dot_profile,
- MaybeFind(parsed_profile_lines, "add"));
+ TF_ASSERT_OK_AND_ASSIGN(ParsedProfileOutputLine multiply_profile,
+ MaybeFind(parsed_profile_lines, "multiply"));
EXPECT_GT(total_while_body_profile.cycles, 0);
EXPECT_EQ(total_while_body_profile.opcode, "[total]");
EXPECT_EQ(total_while_body_profile.cycles_percentage, "100.00%");
- EXPECT_GT(total_while_body_profile.cycles, dot_profile.cycles);
- EXPECT_NE(dot_profile.cycles_percentage, "0.00%");
- EXPECT_NE(dot_profile.cycles_percentage, "100.00%");
+ EXPECT_GT(total_while_body_profile.cycles, multiply_profile.cycles);
+ EXPECT_NE(multiply_profile.cycles_percentage, "0.00%");
+ EXPECT_NE(multiply_profile.cycles_percentage, "100.00%");
}
} // namespace
} // namespace xla
@@ -336,8 +352,11 @@ static std::pair<int, char**> AddXlaHloProfileFlag(int argc, char** argv) {
new_argv[argc] = strdup("--xla_hlo_profile");
// Fusion can change the Hlo instructions that show up in the final Hlo
- // executable, so block it here.
- new_argv[argc + 1] = strdup("--xla_disable_hlo_passes=fusion");
+ // executable, so block it here. Also block the WhileLoopInvariantCodeMotion
+ // pass, otherwise a while loop is transformed and we could not match the
+ // original name in the ProfileWhileComputation test.
+ new_argv[argc + 1] = strdup(
+ "--xla_disable_hlo_passes=fusion,while-loop-invariant-code-motion");
return {argc + 2, new_argv};
}
diff --git a/tensorflow/compiler/xla/text_literal_reader.cc b/tensorflow/compiler/xla/text_literal_reader.cc
index 56702feab9..897123d760 100644
--- a/tensorflow/compiler/xla/text_literal_reader.cc
+++ b/tensorflow/compiler/xla/text_literal_reader.cc
@@ -20,7 +20,7 @@ limitations under the License.
#include <utility>
#include <vector>
-#include "tensorflow/compiler/xla/literal_util.h"
+#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/compiler/xla/ptr_util.h"
#include "tensorflow/compiler/xla/shape_util.h"
#include "tensorflow/compiler/xla/status_macros.h"
diff --git a/tensorflow/compiler/xla/text_literal_reader.h b/tensorflow/compiler/xla/text_literal_reader.h
index e45e5291c9..708e8c80d8 100644
--- a/tensorflow/compiler/xla/text_literal_reader.h
+++ b/tensorflow/compiler/xla/text_literal_reader.h
@@ -18,7 +18,7 @@ limitations under the License.
#include <memory>
-#include "tensorflow/compiler/xla/literal_util.h"
+#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/compiler/xla/statusor.h"
#include "tensorflow/compiler/xla/types.h"
#include "tensorflow/compiler/xla/xla_data.pb.h"
diff --git a/tensorflow/compiler/xla/text_literal_reader_test.cc b/tensorflow/compiler/xla/text_literal_reader_test.cc
index 23070b6638..92f9b4f9f0 100644
--- a/tensorflow/compiler/xla/text_literal_reader_test.cc
+++ b/tensorflow/compiler/xla/text_literal_reader_test.cc
@@ -17,7 +17,7 @@ limitations under the License.
#include <string>
-#include "tensorflow/compiler/xla/literal_util.h"
+#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/compiler/xla/shape_util.h"
#include "tensorflow/compiler/xla/test.h"
#include "tensorflow/compiler/xla/types.h"
diff --git a/tensorflow/compiler/xla/text_literal_writer.cc b/tensorflow/compiler/xla/text_literal_writer.cc
index 373c0d2d8d..24e0784741 100644
--- a/tensorflow/compiler/xla/text_literal_writer.cc
+++ b/tensorflow/compiler/xla/text_literal_writer.cc
@@ -17,7 +17,7 @@ limitations under the License.
#include <string>
-#include "tensorflow/compiler/xla/literal_util.h"
+#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/compiler/xla/shape_util.h"
#include "tensorflow/compiler/xla/status_macros.h"
#include "tensorflow/compiler/xla/types.h"
diff --git a/tensorflow/compiler/xla/text_literal_writer.h b/tensorflow/compiler/xla/text_literal_writer.h
index 0a1235b5e0..159ac1b7e1 100644
--- a/tensorflow/compiler/xla/text_literal_writer.h
+++ b/tensorflow/compiler/xla/text_literal_writer.h
@@ -16,7 +16,7 @@ limitations under the License.
#ifndef TENSORFLOW_COMPILER_XLA_TEXT_LITERAL_WRITER_H_
#define TENSORFLOW_COMPILER_XLA_TEXT_LITERAL_WRITER_H_
-#include "tensorflow/compiler/xla/literal_util.h"
+#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/compiler/xla/types.h"
#include "tensorflow/compiler/xla/xla_data.pb.h"
#include "tensorflow/core/lib/core/status.h"
diff --git a/tensorflow/compiler/xla/text_literal_writer_test.cc b/tensorflow/compiler/xla/text_literal_writer_test.cc
index 70cf2fb1b8..4ea02faffc 100644
--- a/tensorflow/compiler/xla/text_literal_writer_test.cc
+++ b/tensorflow/compiler/xla/text_literal_writer_test.cc
@@ -18,6 +18,7 @@ limitations under the License.
#include <memory>
#include <string>
+#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/compiler/xla/literal_util.h"
#include "tensorflow/compiler/xla/test.h"
#include "tensorflow/compiler/xla/test_helpers.h"
@@ -30,8 +31,9 @@ namespace xla {
namespace {
TEST(TextLiteralWriterTest, WritesFloatLiteral) {
- auto literal = Literal::CreateR2<float>({
- {3.14, 2.17}, {1.23, 4.56},
+ auto literal = LiteralUtil::CreateR2<float>({
+ {3.14, 2.17},
+ {1.23, 4.56},
});
string path =
tensorflow::io::JoinPath(tensorflow::testing::TmpDir(), "/whatever");
diff --git a/tensorflow/compiler/xla/tools/BUILD b/tensorflow/compiler/xla/tools/BUILD
index e4a052c8f1..55501827f2 100644
--- a/tensorflow/compiler/xla/tools/BUILD
+++ b/tensorflow/compiler/xla/tools/BUILD
@@ -74,7 +74,7 @@ cc_library(
srcs = ["replay_computation.cc"],
deps = [
"//tensorflow/compiler/xla:execution_options_util",
- "//tensorflow/compiler/xla:literal_util",
+ "//tensorflow/compiler/xla:literal",
"//tensorflow/compiler/xla:shape_util",
"//tensorflow/compiler/xla:status_macros",
"//tensorflow/compiler/xla:statusor",
@@ -123,7 +123,7 @@ tf_cc_binary(
name = "show_literal",
srcs = ["show_literal.cc"],
deps = [
- "//tensorflow/compiler/xla:literal_util",
+ "//tensorflow/compiler/xla:literal",
"//tensorflow/compiler/xla:types",
"//tensorflow/compiler/xla:xla_data_proto",
"//tensorflow/core:lib",
@@ -145,7 +145,7 @@ tf_cc_binary(
name = "show_text_literal",
srcs = ["show_text_literal.cc"],
deps = [
- "//tensorflow/compiler/xla:literal_util",
+ "//tensorflow/compiler/xla:literal",
"//tensorflow/compiler/xla:statusor",
"//tensorflow/compiler/xla:text_literal_reader",
"//tensorflow/compiler/xla:types",
diff --git a/tensorflow/compiler/xla/tools/replay_computation.cc b/tensorflow/compiler/xla/tools/replay_computation.cc
index f7574e0b1c..854e797ec2 100644
--- a/tensorflow/compiler/xla/tools/replay_computation.cc
+++ b/tensorflow/compiler/xla/tools/replay_computation.cc
@@ -43,7 +43,7 @@ limitations under the License.
#include "tensorflow/compiler/xla/client/lib/testing.h"
#include "tensorflow/compiler/xla/client/local_client.h"
#include "tensorflow/compiler/xla/execution_options_util.h"
-#include "tensorflow/compiler/xla/literal_util.h"
+#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/compiler/xla/service/gpu/infeed_manager.h"
#include "tensorflow/compiler/xla/service/hlo.pb.h"
#include "tensorflow/compiler/xla/service/hlo_parser.h"
@@ -174,6 +174,11 @@ StatusOr<Literal> ReplayComputation(const HloSnapshot& module,
client->Compile(computation, argument_layouts, ExecutableBuildOptions())
.ValueOrDie();
+ // Do not attmept to run the executable, if num_runs is less than 1.
+ if (opts.num_runs < 1) {
+ return Cancelled("Cancelled after compilation since --num_runs < 1.");
+ }
+
// Run the computation num_runs times, and return the result from the last
// execution.
StreamExecutorMemoryAllocator allocator(
@@ -191,9 +196,6 @@ StatusOr<Literal> ReplayComputation(const HloSnapshot& module,
<< static_cast<double>(profile.compute_time_ns()) / 1e9 << "s";
}
- // Check that --num_runs > 0, otherwise *result below will fail with an
- // unhelpful error (because the loop didn't run any iterations).
- CHECK_GT(opts.num_runs, 0) << "--num_runs must be > 0";
TF_ASSIGN_OR_RETURN(std::unique_ptr<Literal> result_literal,
client->ShapedBufferToLiteral(*result));
return std::move(*result_literal);
diff --git a/tensorflow/compiler/xla/tools/show_literal.cc b/tensorflow/compiler/xla/tools/show_literal.cc
index fe8e72ba32..51909190a3 100644
--- a/tensorflow/compiler/xla/tools/show_literal.cc
+++ b/tensorflow/compiler/xla/tools/show_literal.cc
@@ -21,7 +21,7 @@ limitations under the License.
#include <stdio.h>
#include <string>
-#include "tensorflow/compiler/xla/literal_util.h"
+#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/compiler/xla/types.h"
#include "tensorflow/compiler/xla/xla_data.pb.h"
#include "tensorflow/core/lib/core/status.h"
diff --git a/tensorflow/compiler/xla/tools/show_text_literal.cc b/tensorflow/compiler/xla/tools/show_text_literal.cc
index 8525873e91..48c8374811 100644
--- a/tensorflow/compiler/xla/tools/show_text_literal.cc
+++ b/tensorflow/compiler/xla/tools/show_text_literal.cc
@@ -20,7 +20,7 @@ limitations under the License.
#include <memory>
#include <string>
-#include "tensorflow/compiler/xla/literal_util.h"
+#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/compiler/xla/statusor.h"
#include "tensorflow/compiler/xla/text_literal_reader.h"
#include "tensorflow/compiler/xla/types.h"
diff --git a/tensorflow/compiler/xla/util.h b/tensorflow/compiler/xla/util.h
index 6041fae159..5ae099a462 100644
--- a/tensorflow/compiler/xla/util.h
+++ b/tensorflow/compiler/xla/util.h
@@ -500,17 +500,17 @@ bool c_is_sorted(const C& c, Compare&& comp) {
}
template <typename C>
-auto c_adjacent_find(const C& c) -> decltype(std::begin(c)) {
+auto c_adjacent_find(C& c) -> decltype(std::begin(c)) {
return std::adjacent_find(std::begin(c), std::end(c));
}
template <typename C, typename Pred>
-auto c_find_if(const C& c, Pred&& pred) -> decltype(std::begin(c)) {
+auto c_find_if(C& c, Pred&& pred) -> decltype(std::begin(c)) {
return std::find_if(std::begin(c), std::end(c), std::forward<Pred>(pred));
}
template <typename C, typename Value>
-auto c_find(const C& c, Value&& value) -> decltype(std::begin(c)) {
+auto c_find(C& c, Value&& value) -> decltype(std::begin(c)) {
return std::find(std::begin(c), std::end(c), std::forward<Value>(value));
}
@@ -534,6 +534,13 @@ c_count_if(const C& c, Pred&& pred) {
return std::count_if(std::begin(c), std::end(c), std::forward<Pred>(pred));
}
+// Determines whether `value` is present in `c`.
+template <typename C, typename T>
+bool c_linear_search(const C& c, T&& value) {
+ auto last = std::end(c);
+ return std::find(std::begin(c), last, std::forward<T>(value)) != last;
+}
+
template <typename C, typename Value>
int64 FindIndex(const C& c, Value&& value) {
auto it = c_find(c, std::forward<Value>(value));
@@ -555,6 +562,11 @@ void EraseAt(C* c, int64 index) {
c->erase(c->begin() + index);
}
+template <typename T>
+std::vector<T> ArraySliceToVector(tensorflow::gtl::ArraySlice<T> slice) {
+ return std::vector<T>(slice.begin(), slice.end());
+}
+
template <typename T, int N>
std::vector<T> InlinedVectorToVector(
const tensorflow::gtl::InlinedVector<T, N>& inlined_vector) {
diff --git a/tensorflow/contrib/BUILD b/tensorflow/contrib/BUILD
index fffab5a795..1322056d80 100644
--- a/tensorflow/contrib/BUILD
+++ b/tensorflow/contrib/BUILD
@@ -7,8 +7,8 @@ package(default_visibility = ["//tensorflow:__subpackages__"])
load("//third_party/mpi:mpi.bzl", "if_mpi")
load("@local_config_cuda//cuda:build_defs.bzl", "if_cuda")
-load("@local_config_tensorrt//:build_defs.bzl", "if_tensorrt")
load("//tensorflow:tensorflow.bzl", "if_not_windows")
+load("//tensorflow:tensorflow.bzl", "if_not_windows_cuda")
py_library(
name = "contrib_py",
@@ -26,8 +26,6 @@ py_library(
"//tensorflow/contrib/bayesflow:bayesflow_py",
"//tensorflow/contrib/boosted_trees:init_py",
"//tensorflow/contrib/checkpoint/python:checkpoint",
- "//tensorflow/contrib/cloud:cloud_py",
- "//tensorflow/contrib/cluster_resolver:cluster_resolver_pip",
"//tensorflow/contrib/cluster_resolver:cluster_resolver_py",
"//tensorflow/contrib/coder:coder_py",
"//tensorflow/contrib/compiler:compiler_py",
@@ -45,7 +43,6 @@ py_library(
"//tensorflow/contrib/factorization:factorization_py",
"//tensorflow/contrib/feature_column:feature_column_py",
"//tensorflow/contrib/framework:framework_py",
- "//tensorflow/contrib/fused_conv:fused_conv_py",
"//tensorflow/contrib/gan",
"//tensorflow/contrib/graph_editor:graph_editor_py",
"//tensorflow/contrib/grid_rnn:grid_rnn_py",
@@ -105,6 +102,7 @@ py_library(
"//tensorflow/contrib/summary:summary",
"//tensorflow/contrib/tensor_forest:init_py",
"//tensorflow/contrib/tensorboard",
+ "//tensorflow/contrib/tensorrt:init_py",
"//tensorflow/contrib/testing:testing_py",
"//tensorflow/contrib/text:text_py",
"//tensorflow/contrib/tfprof",
@@ -115,15 +113,23 @@ py_library(
"//tensorflow/contrib/util:util_py",
"//tensorflow/python:util",
"//tensorflow/python/estimator:estimator_py",
- ] + if_mpi(["//tensorflow/contrib/mpi_collectives:mpi_collectives_py"]) + if_tensorrt([
- "//tensorflow/contrib/tensorrt:init_py",
- ]) + select({
+ ] + if_mpi(["//tensorflow/contrib/mpi_collectives:mpi_collectives_py"]) + select({
"//tensorflow:with_kafka_support_windows_override": [],
"//tensorflow:with_kafka_support": [
"//tensorflow/contrib/kafka",
],
"//conditions:default": [],
- }) + if_not_windows([
+ }) + select({
+ "//tensorflow:with_aws_support_windows_override": [],
+ "//tensorflow:with_aws_support": [
+ "//tensorflow/contrib/kinesis",
+ ],
+ "//conditions:default": [],
+ }) + if_not_windows_cuda([
+ "//tensorflow/contrib/fused_conv:fused_conv_py", # unresolved symbols, need to export more symbols
+ ]) + if_not_windows([
+ "//tensorflow/contrib/bigtable", # depends on bigtable
+ "//tensorflow/contrib/cloud:cloud_py", # doesn't compile on Windows
"//tensorflow/contrib/ffmpeg:ffmpeg_ops_py",
"//tensorflow/contrib/lite/python:lite", # unix dependency, need to fix code
]),
@@ -154,6 +160,12 @@ cc_library(
"//tensorflow/contrib/kafka:dataset_kernels",
],
"//conditions:default": [],
+ }) + select({
+ "//tensorflow:with_aws_support_windows_override": [],
+ "//tensorflow:with_aws_support": [
+ "//tensorflow/contrib/kinesis:dataset_kernels",
+ ],
+ "//conditions:default": [],
}),
)
@@ -183,5 +195,11 @@ cc_library(
"//tensorflow/contrib/kafka:dataset_ops_op_lib",
],
"//conditions:default": [],
+ }) + select({
+ "//tensorflow:with_aws_support_windows_override": [],
+ "//tensorflow:with_aws_support": [
+ "//tensorflow/contrib/kinesis:dataset_ops_op_lib",
+ ],
+ "//conditions:default": [],
}),
)
diff --git a/tensorflow/contrib/__init__.py b/tensorflow/contrib/__init__.py
index 9aad772f0a..ded05da718 100644
--- a/tensorflow/contrib/__init__.py
+++ b/tensorflow/contrib/__init__.py
@@ -25,7 +25,8 @@ import os
from tensorflow.contrib import batching
from tensorflow.contrib import bayesflow
from tensorflow.contrib import checkpoint
-from tensorflow.contrib import cloud
+if os.name != "nt":
+ from tensorflow.contrib import cloud
from tensorflow.contrib import cluster_resolver
from tensorflow.contrib import coder
from tensorflow.contrib import compiler
diff --git a/tensorflow/contrib/autograph/README.md b/tensorflow/contrib/autograph/README.md
index 7e26f47118..679ab48e5c 100644
--- a/tensorflow/contrib/autograph/README.md
+++ b/tensorflow/contrib/autograph/README.md
@@ -4,7 +4,7 @@ IMPORTANT: AutoGraph is alpha software, and under active development. Expect rou
AutoGraph is a Python to TensorFlow compiler.
-With AutoGraph, you can write [Eager style](https://www.tensorflow.org/guide/eager) code in a concise manner, and run it as a TensorFlow graph. AutoGraph uses source code transformation and partial evaluation to generate Python code that builds an equivalent TensorFlow subgraph. The result is code that behaves like ops and can be freely combined with other TensorFlow ops.
+With AutoGraph, you can write [Eager style](https://www.tensorflow.org/guide/eager) code in a concise manner, and run it as a TensorFlow graph. AutoGraph uses source code transformation and partial evaluation to generate Python code that builds an equivalent TensorFlow subgraph. The result is code that behaves like ops and can be freely combined with other TensorFlow ops. [Please see this file for which parts of the Python language we currently support](LIMITATIONS.md).
For example, this Python function:
diff --git a/tensorflow/contrib/autograph/__init__.py b/tensorflow/contrib/autograph/__init__.py
index 361cf2d77c..7821c98f1c 100644
--- a/tensorflow/contrib/autograph/__init__.py
+++ b/tensorflow/contrib/autograph/__init__.py
@@ -29,6 +29,9 @@ from tensorflow.contrib.autograph.impl.api import converted_call
from tensorflow.contrib.autograph.impl.api import do_not_convert
from tensorflow.contrib.autograph.impl.api import RunMode
from tensorflow.contrib.autograph.impl.api import to_code
+from tensorflow.contrib.autograph.core.errors import improved_errors
+from tensorflow.contrib.autograph.core.errors import GraphConstructionError
+from tensorflow.contrib.autograph.core.errors import TfRuntimeError
from tensorflow.contrib.autograph.impl.api import to_graph
from tensorflow.contrib.autograph.lang.directives import set_element_type
from tensorflow.contrib.autograph.lang.directives import set_loop_options
@@ -46,6 +49,10 @@ _allowed_symbols = [
'to_graph',
# Overloaded operators
'operators',
+ # Errors
+ 'improved_errors',
+ 'GraphConstructionError',
+ 'TfRuntimeError',
# Python language "extensions"
'set_element_type',
'set_loop_options',
diff --git a/tensorflow/contrib/autograph/converters/BUILD b/tensorflow/contrib/autograph/converters/BUILD
index b2e2e27673..7cbba71683 100644
--- a/tensorflow/contrib/autograph/converters/BUILD
+++ b/tensorflow/contrib/autograph/converters/BUILD
@@ -21,16 +21,18 @@ py_library(
"break_statements.py",
"builtin_functions.py",
"call_trees.py",
+ "conditional_expressions.py",
"continue_statements.py",
"control_flow.py",
"decorators.py",
- "ifexp.py",
- "list_comprehension.py",
+ "directives.py",
+ "error_handlers.py",
+ "list_comprehensions.py",
"lists.py",
"logical_expressions.py",
"name_scopes.py",
+ "return_statements.py",
"side_effect_guards.py",
- "single_return.py",
"slices.py",
],
srcs_version = "PY2AND3",
@@ -95,6 +97,17 @@ py_test(
)
py_test(
+ name = "conditional_expressions_test",
+ srcs = ["conditional_expressions_test.py"],
+ srcs_version = "PY2AND3",
+ deps = [
+ ":converters",
+ "//tensorflow/contrib/autograph/core:test_lib",
+ "//tensorflow/python:client_testlib",
+ ],
+)
+
+py_test(
name = "continue_statements_test",
srcs = ["continue_statements_test.py"],
srcs_version = "PY2AND3",
@@ -132,6 +145,18 @@ py_test(
)
py_test(
+ name = "directives_test",
+ srcs = ["directives_test.py"],
+ srcs_version = "PY2AND3",
+ deps = [
+ ":converters",
+ "//tensorflow/contrib/autograph/core:test_lib",
+ "//tensorflow/contrib/autograph/lang",
+ "//tensorflow/python:client_testlib",
+ ],
+)
+
+py_test(
name = "name_scopes_test",
srcs = ["name_scopes_test.py"],
deps = [
@@ -143,8 +168,8 @@ py_test(
)
py_test(
- name = "list_comprehension_test",
- srcs = ["list_comprehension_test.py"],
+ name = "list_comprehensions_test",
+ srcs = ["list_comprehensions_test.py"],
srcs_version = "PY2AND3",
deps = [
":converters",
@@ -179,11 +204,6 @@ py_test(
name = "side_effect_guards_test",
srcs = ["side_effect_guards_test.py"],
srcs_version = "PY2AND3",
- tags = [
- # TODO(mdan): Fix.
- "flaky",
- "notap",
- ],
deps = [
":converters",
"//tensorflow/contrib/autograph/core:test_lib",
@@ -192,8 +212,8 @@ py_test(
)
py_test(
- name = "single_return_test",
- srcs = ["single_return_test.py"],
+ name = "return_statements_test",
+ srcs = ["return_statements_test.py"],
srcs_version = "PY2AND3",
deps = [
":converters",
@@ -204,8 +224,8 @@ py_test(
)
py_test(
- name = "ifexp_test",
- srcs = ["ifexp_test.py"],
+ name = "error_handlers_test",
+ srcs = ["error_handlers_test.py"],
srcs_version = "PY2AND3",
deps = [
":converters",
diff --git a/tensorflow/contrib/autograph/converters/__init__.py b/tensorflow/contrib/autograph/converters/__init__.py
index e4e8eda42f..6325ac78dc 100644
--- a/tensorflow/contrib/autograph/converters/__init__.py
+++ b/tensorflow/contrib/autograph/converters/__init__.py
@@ -18,5 +18,15 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
-# TODO(mdan): Define a base transformer class that can recognize skip_processing
-# TODO(mdan): All converters are incomplete, especially those that change blocks
+# Naming conventions:
+# * each converter should specialize on a single idiom; be consistent with
+# the Python reference for naming
+# * all converters inherit core.converter.Base
+# * module names describe the idiom that the converter covers, plural
+# * the converter class is named consistent with the module, singular and
+# includes the word Transformer
+#
+# Example:
+#
+# lists.py
+# class ListTransformer(converter.Base)
diff --git a/tensorflow/contrib/autograph/converters/asserts.py b/tensorflow/contrib/autograph/converters/asserts.py
index e664a403a5..af2f20f267 100644
--- a/tensorflow/contrib/autograph/converters/asserts.py
+++ b/tensorflow/contrib/autograph/converters/asserts.py
@@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
-"""Converts Assert statements to their corresponding TF calls."""
+"""Converts assert statements to their corresponding TF calls."""
from __future__ import absolute_import
from __future__ import division
@@ -24,8 +24,8 @@ from tensorflow.contrib.autograph.core import converter
from tensorflow.contrib.autograph.pyct import templates
-class AssertsTransformer(converter.Base):
- """Transforms Print nodes to Call so they can be handled as functions."""
+class AssertTransformer(converter.Base):
+ """Transforms Assert nodes to Call so they can be handled as functions."""
def visit_Assert(self, node):
self.generic_visit(node)
@@ -46,4 +46,4 @@ class AssertsTransformer(converter.Base):
def transform(node, ctx):
- return AssertsTransformer(ctx).visit(node)
+ return AssertTransformer(ctx).visit(node)
diff --git a/tensorflow/contrib/autograph/converters/asserts_test.py b/tensorflow/contrib/autograph/converters/asserts_test.py
index 2cd0e626bc..9c58ae3acc 100644
--- a/tensorflow/contrib/autograph/converters/asserts_test.py
+++ b/tensorflow/contrib/autograph/converters/asserts_test.py
@@ -32,8 +32,8 @@ class AssertsTest(converter_testing.TestCase):
def test_fn(a):
assert a > 0
- node = self.parse_and_analyze(test_fn, {})
- node = asserts.transform(node, self.ctx)
+ node, ctx = self.prepare(test_fn, {})
+ node = asserts.transform(node, ctx)
self.assertTrue(isinstance(node.body[0].body[0].value, gast.Call))
diff --git a/tensorflow/contrib/autograph/converters/break_statements.py b/tensorflow/contrib/autograph/converters/break_statements.py
index a990e359a2..2a60750bda 100644
--- a/tensorflow/contrib/autograph/converters/break_statements.py
+++ b/tensorflow/contrib/autograph/converters/break_statements.py
@@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
-"""Canonicalizes break statements by de-sugaring into a control boolean."""
+"""Lowers break statements to conditionals."""
from __future__ import absolute_import
from __future__ import division
@@ -24,17 +24,22 @@ from tensorflow.contrib.autograph.pyct import templates
from tensorflow.contrib.autograph.pyct.static_analysis.annos import NodeAnno
-# Tags for local state.
-BREAK_USED = 'break_used'
-CONTROL_VAR_NAME = 'control_var_name'
+class _Break(object):
+ def __init__(self):
+ self.used = False
+ self.control_var_name = None
-class BreakStatementTransformer(converter.Base):
+ def __repr__(self):
+ return 'used: %s, var: %s' % (self.used, self.control_var_name)
+
+
+class BreakTransformer(converter.Base):
"""Canonicalizes break statements into additional conditionals."""
def visit_Break(self, node):
- self.set_local(BREAK_USED, True)
- var_name = self.get_local(CONTROL_VAR_NAME)
+ self.state[_Break].used = True
+ var_name = self.state[_Break].control_var_name
# TODO(mdan): This will fail when expanded inside a top-level else block.
template = """
var_name = True
@@ -57,12 +62,12 @@ class BreakStatementTransformer(converter.Base):
block=block)
return node
- def _track_body(self, nodes, break_var):
- self.enter_local_scope()
- self.set_local(CONTROL_VAR_NAME, break_var)
+ def _process_body(self, nodes, break_var):
+ self.state[_Break].enter()
+ self.state[_Break].control_var_name = break_var
nodes = self.visit_block(nodes)
- break_used = self.get_local(BREAK_USED, False)
- self.exit_local_scope()
+ break_used = self.state[_Break].used
+ self.state[_Break].exit()
return nodes, break_used
def visit_While(self, node):
@@ -70,7 +75,7 @@ class BreakStatementTransformer(converter.Base):
break_var = self.ctx.namer.new_symbol('break_', scope.referenced)
node.test = self.visit(node.test)
- node.body, break_used = self._track_body(node.body, break_var)
+ node.body, break_used = self._process_body(node.body, break_var)
# A break in the else clause applies to the containing scope.
node.orelse = self.visit_block(node.orelse)
@@ -101,7 +106,7 @@ class BreakStatementTransformer(converter.Base):
node.target = self.visit(node.target)
node.iter = self.visit(node.iter)
- node.body, break_used = self._track_body(node.body, break_var)
+ node.body, break_used = self._process_body(node.body, break_var)
# A break in the else clause applies to the containing scope.
node.orelse = self.visit_block(node.orelse)
@@ -138,4 +143,4 @@ class BreakStatementTransformer(converter.Base):
def transform(node, ctx):
- return BreakStatementTransformer(ctx).visit(node)
+ return BreakTransformer(ctx).visit(node)
diff --git a/tensorflow/contrib/autograph/converters/break_statements_test.py b/tensorflow/contrib/autograph/converters/break_statements_test.py
index dcff1c54c2..c26ca2946c 100644
--- a/tensorflow/contrib/autograph/converters/break_statements_test.py
+++ b/tensorflow/contrib/autograph/converters/break_statements_test.py
@@ -25,7 +25,11 @@ from tensorflow.python.platform import test
class BreakCanonicalizationTest(converter_testing.TestCase):
- def test_basic_while(self):
+ def assertTransformedEquivalent(self, test_fn, *inputs):
+ with self.converted(test_fn, break_statements, {}) as result:
+ self.assertEqual(test_fn(*inputs), result.test_fn(*inputs))
+
+ def test_while_loop(self):
def test_fn(x):
v = []
@@ -36,15 +40,11 @@ class BreakCanonicalizationTest(converter_testing.TestCase):
v.append(x)
return v
- node = self.parse_and_analyze(test_fn, {})
- node = break_statements.transform(node, self.ctx)
-
- with self.compiled(node) as result:
- self.assertEqual([], result.test_fn(0))
- self.assertEqual([], result.test_fn(1))
- self.assertEqual([3], result.test_fn(4))
+ self.assertTransformedEquivalent(test_fn, 0)
+ self.assertTransformedEquivalent(test_fn, 1)
+ self.assertTransformedEquivalent(test_fn, 4)
- def test_basic_for(self):
+ def test_for_loop(self):
def test_fn(a):
v = []
@@ -55,18 +55,12 @@ class BreakCanonicalizationTest(converter_testing.TestCase):
v.append(x)
return v
- node = self.parse_and_analyze(test_fn, {})
- node = break_statements.transform(node, self.ctx)
-
- with self.compiled(node) as result:
+ with self.converted(test_fn, break_statements, {}) as result:
# The break is incompletely canonicalized. The loop will not interrupt,
# but the section following the break will be skipped.
- self.assertEqual([], result.test_fn([]))
- self.assertEqual([3, 3], result.test_fn([4, 4]))
- self.assertEqual([3], result.test_fn([4, 5]))
self.assertEqual([3], result.test_fn([5, 4]))
- def test_deeply_nested(self):
+ def test_nested(self):
def test_fn(x):
v = []
@@ -83,13 +77,9 @@ class BreakCanonicalizationTest(converter_testing.TestCase):
v.append(x)
return v, u, w
- node = self.parse_and_analyze(test_fn, {})
- node = break_statements.transform(node, self.ctx)
-
- with self.compiled(node) as result:
- self.assertEqual(([], [], []), result.test_fn(0))
- self.assertEqual(([2, 1], [2], [0]), result.test_fn(3))
- self.assertEqual(([10, 9, 8, 7], [10, 8], [6]), result.test_fn(11))
+ self.assertTransformedEquivalent(test_fn, 0)
+ self.assertTransformedEquivalent(test_fn, 3)
+ self.assertTransformedEquivalent(test_fn, 11)
def test_nested_loops(self):
@@ -109,16 +99,12 @@ class BreakCanonicalizationTest(converter_testing.TestCase):
v.append(x)
return v, u
- node = self.parse_and_analyze(test_fn, {})
- node = break_statements.transform(node, self.ctx)
-
- with self.compiled(node) as result:
- self.assertEqual(([], []), result.test_fn(0))
- self.assertEqual(([1], []), result.test_fn(2))
- self.assertEqual(([2, 1], [1]), result.test_fn(3))
- self.assertEqual(([4, 3, 2, 1], [3, 1]), result.test_fn(5))
+ self.assertTransformedEquivalent(test_fn, 0)
+ self.assertTransformedEquivalent(test_fn, 2)
+ self.assertTransformedEquivalent(test_fn, 3)
+ self.assertTransformedEquivalent(test_fn, 5)
- def test_loop_else(self):
+ def test_loop_orelse(self):
def test_fn(x):
v = []
@@ -134,13 +120,9 @@ class BreakCanonicalizationTest(converter_testing.TestCase):
v.append(x)
return v, u
- node = self.parse_and_analyze(test_fn, {})
- node = break_statements.transform(node, self.ctx)
-
- with self.compiled(node) as result:
- self.assertEqual(([], []), result.test_fn(0))
- self.assertEqual(([], [1]), result.test_fn(2))
- self.assertEqual(([2], [1]), result.test_fn(3))
+ self.assertTransformedEquivalent(test_fn, 0)
+ self.assertTransformedEquivalent(test_fn, 2)
+ self.assertTransformedEquivalent(test_fn, 3)
if __name__ == '__main__':
diff --git a/tensorflow/contrib/autograph/converters/builtin_functions_test.py b/tensorflow/contrib/autograph/converters/builtin_functions_test.py
index e9000e518c..d5c3e2c250 100644
--- a/tensorflow/contrib/autograph/converters/builtin_functions_test.py
+++ b/tensorflow/contrib/autograph/converters/builtin_functions_test.py
@@ -18,8 +18,6 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
-import sys
-
import six
from tensorflow.contrib.autograph.converters import builtin_functions
@@ -36,55 +34,39 @@ class BuiltinFunctionsTest(converter_testing.TestCase):
def test_fn(a):
return len(a)
- node = self.parse_and_analyze(test_fn, {'len': len})
- node = builtin_functions.transform(node, self.ctx)
-
- with self.compiled(node, array_ops.shape) as result:
+ with self.converted(test_fn, builtin_functions, {'len': len},
+ array_ops.shape) as result:
with self.test_session() as sess:
- self.assertEqual(3,
- sess.run(
- result.test_fn(constant_op.constant([0, 0, 0]))))
-
- self.assertEqual(3, result.test_fn([0, 0, 0]))
+ ops = result.test_fn(constant_op.constant([0, 0, 0]))
+ self.assertEqual(sess.run(ops), 3)
def test_print(self):
- def test_fn(a):
- print(a)
+ if six.PY2:
+ return
- node = self.parse_and_analyze(test_fn, {'print': print})
- node = builtin_functions.transform(node, self.ctx)
+ def test_fn(a):
+ return print(a)
- with self.compiled(node) as result:
+ with self.converted(test_fn, builtin_functions, {'print': print}) as result:
with self.test_session() as sess:
- try:
- out_capturer = six.StringIO()
- sys.stdout = out_capturer
- result.test_fn(constant_op.constant('a'))
- sess.run(sess.graph.get_operations())
- self.assertEqual(out_capturer.getvalue(), 'a\n')
- finally:
- sys.stdout = sys.__stdout__
+ with self.assertPrints('a\n'):
+ sess.run(result.test_fn('a'))
- def test_print_with_op_multiple_values(self):
+ def test_print_multiple_values(self):
- def test_fn(a, b, c):
- print(a, b, c)
+ if six.PY2:
+ return
- node = self.parse_and_analyze(test_fn, {'print': print})
- node = builtin_functions.transform(node, self.ctx)
+ def test_fn(a, b, c):
+ return print(a, b, c)
- with self.compiled(node) as result:
+ with self.converted(test_fn, builtin_functions, {'print': print}) as result:
with self.test_session() as sess:
- try:
- out_capturer = six.StringIO()
- sys.stdout = out_capturer
- result.test_fn(
- constant_op.constant('a'), constant_op.constant(1), [2, 3])
- sess.run(sess.graph.get_operations())
- self.assertEqual(out_capturer.getvalue(), 'a 1 [2, 3]\n')
- finally:
- sys.stdout = sys.__stdout__
+ with self.assertPrints('a 1 [2, 3]\n'):
+ sess.run(
+ result.test_fn(
+ constant_op.constant('a'), constant_op.constant(1), [2, 3]))
if __name__ == '__main__':
diff --git a/tensorflow/contrib/autograph/converters/call_trees_test.py b/tensorflow/contrib/autograph/converters/call_trees_test.py
index 27d8281b85..8cdba659ee 100644
--- a/tensorflow/contrib/autograph/converters/call_trees_test.py
+++ b/tensorflow/contrib/autograph/converters/call_trees_test.py
@@ -36,37 +36,34 @@ class CallTreesTest(converter_testing.TestCase):
def test_fn_1(_):
raise ValueError('This should not be called in the compiled version.')
- def renamed_test_fn_1(a):
+ def other_test_fn_1(a):
return a + 1
def test_fn_2(a):
return test_fn_1(a) + 1
- node = self.parse_and_analyze(test_fn_2, {'test_fn_1': test_fn_1})
- node = call_trees.transform(node, self.ctx)
+ ns = {'test_fn_1': test_fn_1}
+ node, ctx = self.prepare(test_fn_2, ns)
+ node = call_trees.transform(node, ctx)
- with self.compiled(node) as result:
- # Only test_fn_2 is transformed, so we'll insert renamed_test_fn_1
- # manually.
- result.renamed_test_fn_1 = renamed_test_fn_1
- self.assertEquals(3, result.test_fn_2(1))
+ with self.compiled(node, ns) as result:
+ new_name, _ = ctx.namer.compiled_function_name(('test_fn_1',))
+ setattr(result, new_name, other_test_fn_1)
+ self.assertEquals(result.test_fn_2(1), 3)
def test_dynamic_function(self):
def test_fn_1():
- raise ValueError('This should be masked by the mock.')
+ raise ValueError('This should be masked by the mock in self.compiled.')
def test_fn_2(f):
return f() + 3
- node = self.parse_and_analyze(test_fn_2, {})
- node = call_trees.transform(node, self.ctx)
-
- with self.compiled(node) as result:
+ with self.converted(test_fn_2, call_trees, {}) as result:
# 10 = 7 (from the mock) + 3 (from test_fn_2)
self.assertEquals(10, result.test_fn_2(test_fn_1))
- def test_simple_methods(self):
+ def test_basic_method(self):
class TestClass(object):
@@ -76,49 +73,43 @@ class CallTreesTest(converter_testing.TestCase):
def test_fn_2(self, a):
return self.test_fn_1(a) + 1
- node = self.parse_and_analyze(
- TestClass.test_fn_2, {'TestClass': TestClass},
+ ns = {'TestClass': TestClass}
+ node, ctx = self.prepare(
+ TestClass.test_fn_2,
+ ns,
namer=converter_testing.FakeNoRenameNamer(),
arg_types={'self': (TestClass.__name__, TestClass)})
- node = call_trees.transform(node, self.ctx)
+ node = call_trees.transform(node, ctx)
- with self.compiled(node) as result:
+ with self.compiled(node, ns) as result:
tc = TestClass()
self.assertEquals(3, result.test_fn_2(tc, 1))
- def test_py_func_wrap_no_retval(self):
+ def test_py_func_no_retval(self):
def test_fn(a):
setattr(a, 'foo', 'bar')
- node = self.parse_and_analyze(test_fn, {'setattr': setattr})
- node = call_trees.transform(node, self.ctx)
-
- with self.compiled(node) as result:
+ with self.converted(test_fn, call_trees, {'setattr': setattr}) as result:
with self.test_session() as sess:
- # The function has no return value, so we do some tricks to grab the
- # generated py_func node and ensure its effect only happens at graph
- # execution.
class Dummy(object):
pass
a = Dummy()
result.test_fn(a)
+ py_func_op, = sess.graph.get_operations()
self.assertFalse(hasattr(a, 'foo'))
- sess.run(sess.graph.get_operations()[0])
+ sess.run(py_func_op)
self.assertEquals('bar', a.foo)
- def test_py_func_wrap_known_function(self):
+ def test_py_func_known_function(self):
def test_fn():
return np.random.binomial(2, 0.5)
- node = self.parse_and_analyze(test_fn, {'np': np})
- node = call_trees.transform(node, self.ctx)
-
- with self.compiled(node, dtypes.int64) as result:
- result.np = np
+ with self.converted(test_fn, call_trees, {'np': np},
+ dtypes.int64) as result:
with self.test_session() as sess:
self.assertTrue(isinstance(result.test_fn(), ops.Tensor))
self.assertIn(sess.run(result.test_fn()), (0, 1, 2))
@@ -130,22 +121,17 @@ class CallTreesTest(converter_testing.TestCase):
a = math_ops.add(a, constant_op.constant(1))
return a
- node = self.parse_and_analyze(
- test_fn, {
- 'math_ops': math_ops,
- 'constant_op': constant_op
- },
+ ns = {'math_ops': math_ops, 'constant_op': constant_op}
+ node, ctx = self.prepare(
+ test_fn,
+ ns,
arg_types=set(((math_ops.__name__,), (constant_op.__name__,))))
- node = call_trees.transform(node, self.ctx)
+ node = call_trees.transform(node, ctx)
- with self.compiled(node) as result:
- result.math_ops = math_ops
- result.constant_op = constant_op
+ with self.compiled(node, ns) as result:
with self.test_session() as sess:
- # Not renamed, because the converter doesn't rename the definition
- # itself (the caller is responsible for that).
result_tensor = result.test_fn(constant_op.constant(1))
- self.assertEquals(3, sess.run(result_tensor))
+ self.assertEquals(sess.run(result_tensor), 3)
if __name__ == '__main__':
diff --git a/tensorflow/contrib/autograph/converters/conditional_expressions.py b/tensorflow/contrib/autograph/converters/conditional_expressions.py
new file mode 100644
index 0000000000..63f649dfdf
--- /dev/null
+++ b/tensorflow/contrib/autograph/converters/conditional_expressions.py
@@ -0,0 +1,129 @@
+# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""Converts the ternary conditional operator."""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+from tensorflow.contrib.autograph.core import converter
+from tensorflow.contrib.autograph.pyct import anno
+from tensorflow.contrib.autograph.pyct import templates
+from tensorflow.contrib.autograph.pyct.static_analysis.annos import NodeAnno
+
+
+class _FunctionDefs(object):
+
+ def __init__(self):
+ self.nodes = []
+
+
+class _Statement(object):
+
+ def __init__(self):
+ self.scope = None
+
+
+class ConditionalExpressionTransformer(converter.Base):
+ """Converts contitional expressions to functional form."""
+
+ def _postprocess_statement(self, node):
+ """Inserts any separate functions that node may use."""
+ replacements = []
+ for def_node in self.state[_FunctionDefs].nodes:
+ replacements.extend(def_node)
+ replacements.append(node)
+ node = replacements
+ # The corresponding enter is called by self.visit_block (see _process_block)
+ self.state[_FunctionDefs].exit()
+ return node, None
+
+ def _create_branch(self, expr, name_stem):
+ scope = self.state[_Statement].scope
+ name = self.ctx.namer.new_symbol(name_stem, scope.referenced)
+ template = """
+ def name():
+ return expr,
+ """
+ node = templates.replace(template, name=name, expr=expr)
+ self.state[_FunctionDefs].nodes.append(node)
+ return name
+
+ def visit_IfExp(self, node):
+ if anno.hasanno(node.test, anno.Basic.QN):
+ name_root = anno.getanno(node.test, anno.Basic.QN).ssf()
+ else:
+ name_root = 'ifexp'
+
+ true_fn_name = self._create_branch(node.body, '%s_true' % name_root)
+ false_fn_name = self._create_branch(node.orelse, '%s_false' % name_root)
+
+ return templates.replace_as_expression(
+ 'ag__.utils.run_cond(test, true_fn_name, false_fn_name)',
+ test=node.test,
+ true_fn_name=true_fn_name,
+ false_fn_name=false_fn_name)
+
+ def _process_block(self, scope, block):
+ self.state[_Statement].enter()
+ self.state[_Statement].scope = scope
+ block = self.visit_block(
+ block,
+ before_visit=self.state[_FunctionDefs].enter,
+ after_visit=self._postprocess_statement)
+ self.state[_Statement].exit()
+ return block
+
+ def visit_FunctionDef(self, node):
+ node.args = self.generic_visit(node.args)
+ node.decorator_list = self.visit_block(node.decorator_list)
+ node.body = self._process_block(
+ anno.getanno(node, anno.Static.SCOPE), node.body)
+ return node
+
+ def visit_For(self, node):
+ node.target = self.visit(node.target)
+ node.body = self._process_block(
+ anno.getanno(node, NodeAnno.BODY_SCOPE), node.body)
+ node.orelse = self._process_block(
+ anno.getanno(node, NodeAnno.ORELSE_SCOPE), node.orelse)
+ return node
+
+ def visit_While(self, node):
+ node.test = self.visit(node.test)
+ node.body = self._process_block(
+ anno.getanno(node, NodeAnno.BODY_SCOPE), node.body)
+ node.orelse = self._process_block(
+ anno.getanno(node, NodeAnno.ORELSE_SCOPE), node.orelse)
+ return node
+
+ def visit_If(self, node):
+ node.test = self.visit(node.test)
+ node.body = self._process_block(
+ anno.getanno(node, NodeAnno.BODY_SCOPE), node.body)
+ node.orelse = self._process_block(
+ anno.getanno(node, NodeAnno.ORELSE_SCOPE), node.orelse)
+ return node
+
+ def visit_With(self, node):
+ node.items = self.visit_block(node.items)
+ node.body = self._process_block(
+ anno.getanno(node, NodeAnno.BODY_SCOPE), node.body)
+ return node
+
+
+def transform(node, ctx):
+ node = ConditionalExpressionTransformer(ctx).visit(node)
+ return node
diff --git a/tensorflow/contrib/autograph/converters/conditional_expressions_test.py b/tensorflow/contrib/autograph/converters/conditional_expressions_test.py
new file mode 100644
index 0000000000..95a3108741
--- /dev/null
+++ b/tensorflow/contrib/autograph/converters/conditional_expressions_test.py
@@ -0,0 +1,53 @@
+# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""Tests for conditional_expressions module."""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+from tensorflow.contrib.autograph.converters import conditional_expressions
+from tensorflow.contrib.autograph.core import converter_testing
+from tensorflow.python.platform import test
+
+
+class ConditionalExpressionsTest(converter_testing.TestCase):
+
+ def assertTransformedEquivalent(self, test_fn, *inputs):
+ ns = {}
+ with self.converted(test_fn, conditional_expressions, ns) as result:
+ self.assertEqual(test_fn(*inputs), result.test_fn(*inputs))
+
+ def test_basic(self):
+
+ def test_fn(x):
+ return 1 if x else 0
+
+ self.assertTransformedEquivalent(test_fn, 0)
+ self.assertTransformedEquivalent(test_fn, 3)
+
+ def test_nested_orelse(self):
+
+ def test_fn(x):
+ y = x * x if x > 0 else x if x else 1
+ return y
+
+ self.assertTransformedEquivalent(test_fn, -2)
+ self.assertTransformedEquivalent(test_fn, 0)
+ self.assertTransformedEquivalent(test_fn, 2)
+
+
+if __name__ == '__main__':
+ test.main()
diff --git a/tensorflow/contrib/autograph/converters/continue_statements_test.py b/tensorflow/contrib/autograph/converters/continue_statements_test.py
index 2ce1837972..3a7c7d1486 100644
--- a/tensorflow/contrib/autograph/converters/continue_statements_test.py
+++ b/tensorflow/contrib/autograph/converters/continue_statements_test.py
@@ -25,7 +25,11 @@ from tensorflow.python.platform import test
class ContinueCanonicalizationTest(converter_testing.TestCase):
- def test_basic_continue(self):
+ def assertTransformedEquivalent(self, test_fn, *inputs):
+ with self.converted(test_fn, continue_statements, {}) as result:
+ self.assertEqual(test_fn(*inputs), result.test_fn(*inputs))
+
+ def test_basic(self):
def test_fn(x):
v = []
@@ -36,17 +40,12 @@ class ContinueCanonicalizationTest(converter_testing.TestCase):
v.append(x)
return v
- node = self.parse_and_analyze(test_fn, {})
- node = continue_statements.transform(node, self.ctx)
-
- with self.compiled(node) as result:
- self.assertEqual(test_fn(0), result.test_fn(0))
- self.assertEqual(test_fn(1), result.test_fn(1))
- self.assertEqual(test_fn(2), result.test_fn(2))
- self.assertEqual(test_fn(3), result.test_fn(3))
- self.assertEqual(test_fn(4), result.test_fn(4))
+ self.assertTransformedEquivalent(test_fn, 0)
+ self.assertTransformedEquivalent(test_fn, 1)
+ self.assertTransformedEquivalent(test_fn, 3)
+ self.assertTransformedEquivalent(test_fn, 4)
- def test_basic_continue_for_loop(self):
+ def test_for_loop(self):
def test_fn(a):
v = []
@@ -57,16 +56,12 @@ class ContinueCanonicalizationTest(converter_testing.TestCase):
v.append(x)
return v
- node = self.parse_and_analyze(test_fn, {})
- node = continue_statements.transform(node, self.ctx)
+ self.assertTransformedEquivalent(test_fn, [])
+ self.assertTransformedEquivalent(test_fn, [1])
+ self.assertTransformedEquivalent(test_fn, [2])
+ self.assertTransformedEquivalent(test_fn, [1, 2, 3])
- with self.compiled(node) as result:
- self.assertEqual(test_fn([]), result.test_fn([]))
- self.assertEqual(test_fn([1]), result.test_fn([1]))
- self.assertEqual(test_fn([2]), result.test_fn([2]))
- self.assertEqual(test_fn([1, 2, 3]), result.test_fn([1, 2, 3]))
-
- def test_continue_deeply_nested(self):
+ def test_nested(self):
def test_fn(x):
v = []
@@ -83,15 +78,10 @@ class ContinueCanonicalizationTest(converter_testing.TestCase):
v.append(x)
return v, u, w
- node = self.parse_and_analyze(test_fn, {})
- node = continue_statements.transform(node, self.ctx)
-
- with self.compiled(node) as result:
- self.assertEqual(test_fn(0), result.test_fn(0))
- self.assertEqual(test_fn(1), result.test_fn(1))
- self.assertEqual(test_fn(2), result.test_fn(2))
- self.assertEqual(test_fn(3), result.test_fn(3))
- self.assertEqual(test_fn(4), result.test_fn(4))
+ self.assertTransformedEquivalent(test_fn, 0)
+ self.assertTransformedEquivalent(test_fn, 1)
+ self.assertTransformedEquivalent(test_fn, 3)
+ self.assertTransformedEquivalent(test_fn, 4)
if __name__ == '__main__':
diff --git a/tensorflow/contrib/autograph/converters/control_flow.py b/tensorflow/contrib/autograph/converters/control_flow.py
index f4a8710627..a25232f713 100644
--- a/tensorflow/contrib/autograph/converters/control_flow.py
+++ b/tensorflow/contrib/autograph/converters/control_flow.py
@@ -25,8 +25,7 @@ from tensorflow.contrib.autograph.pyct import anno
from tensorflow.contrib.autograph.pyct import ast_util
from tensorflow.contrib.autograph.pyct import parser
from tensorflow.contrib.autograph.pyct import templates
-from tensorflow.contrib.autograph.pyct.static_analysis import cfg
-from tensorflow.contrib.autograph.pyct.static_analysis.annos import NodeAnno
+from tensorflow.contrib.autograph.pyct.static_analysis import annos
class SymbolNamer(object):
@@ -47,6 +46,7 @@ class SymbolNamer(object):
class ControlFlowTransformer(converter.Base):
"""Transforms control flow structures like loops an conditionals."""
+
def _create_cond_branch(self, body_name, aliased_orig_names,
aliased_new_names, body, returns):
if aliased_orig_names:
@@ -90,55 +90,51 @@ class ControlFlowTransformer(converter.Base):
return templates.replace(
template, test=test, body_name=body_name, orelse_name=orelse_name)
- def visit_If(self, node):
- self.generic_visit(node)
+ def _fmt_symbol_list(self, symbol_set):
+ if not symbol_set:
+ return 'no variables'
+ return ', '.join(map(str, symbol_set))
- body_scope = anno.getanno(node, NodeAnno.BODY_SCOPE)
- orelse_scope = anno.getanno(node, NodeAnno.ORELSE_SCOPE)
- body_defs = body_scope.created | body_scope.modified
- orelse_defs = orelse_scope.created | orelse_scope.modified
- live = anno.getanno(node, 'live_out')
-
- # We'll need to check if we're closing over variables that are defined
- # elsewhere in the function
- # NOTE: we can only detect syntactic closure in the scope
- # of the code passed in. If the AutoGraph'd function itself closes
- # over other variables, this analysis won't take that into account.
- defined = anno.getanno(node, 'defined_in')
-
- # We only need to return variables that are
- # - modified by one or both branches
- # - live (or has a live parent) at the end of the conditional
- modified = []
- for def_ in body_defs | orelse_defs:
- def_with_parents = set((def_,)) | def_.support_set
- if live & def_with_parents:
- modified.append(def_)
-
- # We need to check if live created variables are balanced
- # in both branches
- created = live & (body_scope.created | orelse_scope.created)
-
- # The if statement is illegal if there are variables that are created,
- # that are also live, but both branches don't create them.
- if created:
- if created != (body_scope.created & live):
- raise ValueError(
- 'The main branch does not create all live symbols that the else '
- 'branch does.')
- if created != (orelse_scope.created & live):
- raise ValueError(
- 'The else branch does not create all live symbols that the main '
- 'branch does.')
-
- # Alias the closure variables inside the conditional functions
- # to avoid errors caused by the local variables created in the branch
- # functions.
+ def visit_If(self, node):
+ node = self.generic_visit(node)
+
+ body_scope = anno.getanno(node, annos.NodeAnno.BODY_SCOPE)
+ orelse_scope = anno.getanno(node, annos.NodeAnno.ORELSE_SCOPE)
+ defined_in = anno.getanno(node, anno.Static.DEFINED_VARS_IN)
+ live_out = anno.getanno(node, anno.Static.LIVE_VARS_OUT)
+
+ modified_in_cond = body_scope.modified | orelse_scope.modified
+ returned_from_cond = set()
+ for s in modified_in_cond:
+ if s in live_out:
+ returned_from_cond.add(s)
+ elif s.is_composite():
+ # Special treatment for compound objects: if any of their owner entities
+ # are live, then they are outputs as well.
+ if any(owner in live_out for owner in s.owner_set):
+ returned_from_cond.add(s)
+
+ need_alias_in_body = body_scope.modified & defined_in
+ need_alias_in_orelse = orelse_scope.modified & defined_in
+
+ created_in_body = body_scope.modified & returned_from_cond - defined_in
+ created_in_orelse = orelse_scope.modified & returned_from_cond - defined_in
+
+ if created_in_body != created_in_orelse:
+ raise ValueError(
+ 'if statement may not initialize all variables: the true branch'
+ ' creates %s, while the false branch creates %s. Make sure all'
+ ' these variables are initialized either in both'
+ ' branches or before the if statement.' %
+ (self._fmt_symbol_list(created_in_body),
+ self._fmt_symbol_list(created_in_orelse)))
+
+ # Alias the closure variables inside the conditional functions, to allow
+ # the functions access to the respective variables.
# We will alias variables independently for body and orelse scope,
# because different branches might write different variables.
- aliased_body_orig_names = tuple(body_scope.modified - body_scope.created)
- aliased_orelse_orig_names = tuple(orelse_scope.modified -
- orelse_scope.created)
+ aliased_body_orig_names = tuple(need_alias_in_body)
+ aliased_orelse_orig_names = tuple(need_alias_in_orelse)
aliased_body_new_names = tuple(
self.ctx.namer.new_symbol(s.ssf(), body_scope.referenced)
for s in aliased_body_orig_names)
@@ -153,58 +149,47 @@ class ControlFlowTransformer(converter.Base):
node_body = ast_util.rename_symbols(node.body, alias_body_map)
node_orelse = ast_util.rename_symbols(node.orelse, alias_orelse_map)
- if not modified:
+ returned_from_cond = tuple(returned_from_cond)
+ if returned_from_cond:
+ if len(returned_from_cond) == 1:
+ # TODO(mdan): Move this quirk into the operator implementation.
+ cond_results = returned_from_cond[0]
+ else:
+ cond_results = gast.Tuple([s.ast() for s in returned_from_cond], None)
+
+ returned_from_body = tuple(
+ alias_body_map[s] if s in need_alias_in_body else s
+ for s in returned_from_cond)
+ returned_from_orelse = tuple(
+ alias_orelse_map[s] if s in need_alias_in_orelse else s
+ for s in returned_from_cond)
+
+ else:
# When the cond would return no value, we leave the cond called without
# results. That in turn should trigger the side effect guards. The
# branch functions will return a dummy value that ensures cond
# actually has some return value as well.
- results = None
- elif len(modified) == 1:
- results = modified[0]
- else:
- results = gast.Tuple([s.ast() for s in modified], None)
+ cond_results = None
+ # TODO(mdan): This doesn't belong here; it's specific to the operator.
+ returned_from_body = templates.replace_as_expression('1')
+ returned_from_orelse = templates.replace_as_expression('1')
body_name = self.ctx.namer.new_symbol('if_true', body_scope.referenced)
orelse_name = self.ctx.namer.new_symbol('if_false', orelse_scope.referenced)
- if modified:
-
- def build_returns(aliased_names, alias_map, scope):
- """Builds list of return variables for a branch of a conditional."""
- returns = []
- for s in modified:
- if s in aliased_names:
- returns.append(alias_map[s])
- else:
- if s not in scope.created | defined:
- raise ValueError(
- 'Attempting to return variable "%s" from the true branch of '
- 'a conditional, but it was not closed over, or created in '
- 'this branch.' % str(s))
- else:
- returns.append(s)
- return tuple(returns)
-
- body_returns = build_returns(aliased_body_orig_names, alias_body_map,
- body_scope)
- orelse_returns = build_returns(aliased_orelse_orig_names,
- alias_orelse_map, orelse_scope)
-
- else:
- body_returns = orelse_returns = templates.replace('tf.ones(())')[0].value
body_def = self._create_cond_branch(
body_name,
- aliased_orig_names=tuple(aliased_body_orig_names),
- aliased_new_names=tuple(aliased_body_new_names),
+ aliased_orig_names=aliased_body_orig_names,
+ aliased_new_names=aliased_body_new_names,
body=node_body,
- returns=body_returns)
+ returns=returned_from_body)
orelse_def = self._create_cond_branch(
orelse_name,
- aliased_orig_names=tuple(aliased_orelse_orig_names),
- aliased_new_names=tuple(aliased_orelse_new_names),
+ aliased_orig_names=aliased_orelse_orig_names,
+ aliased_new_names=aliased_orelse_new_names,
body=node_orelse,
- returns=orelse_returns)
- cond_expr = self._create_cond_expr(results, node.test, body_name,
+ returns=returned_from_orelse)
+ cond_expr = self._create_cond_expr(cond_results, node.test, body_name,
orelse_name)
return body_def + orelse_def + cond_expr
@@ -212,11 +197,11 @@ class ControlFlowTransformer(converter.Base):
def visit_While(self, node):
self.generic_visit(node)
- body_scope = anno.getanno(node, NodeAnno.BODY_SCOPE)
+ body_scope = anno.getanno(node, annos.NodeAnno.BODY_SCOPE)
body_closure = body_scope.modified - body_scope.created
all_referenced = body_scope.referenced
- cond_scope = anno.getanno(node, NodeAnno.COND_SCOPE)
+ cond_scope = anno.getanno(node, annos.NodeAnno.COND_SCOPE)
cond_closure = set()
for s in cond_scope.referenced:
for root in s.support_set:
@@ -277,7 +262,7 @@ class ControlFlowTransformer(converter.Base):
def visit_For(self, node):
self.generic_visit(node)
- body_scope = anno.getanno(node, NodeAnno.BODY_SCOPE)
+ body_scope = anno.getanno(node, annos.NodeAnno.BODY_SCOPE)
body_closure = body_scope.modified - body_scope.created
all_referenced = body_scope.referenced
@@ -331,7 +316,5 @@ class ControlFlowTransformer(converter.Base):
def transform(node, ctx):
- cfg.run_analyses(node, cfg.Liveness(ctx.info))
- cfg.run_analyses(node, cfg.Defined(ctx.info))
node = ControlFlowTransformer(ctx).visit(node)
return node
diff --git a/tensorflow/contrib/autograph/converters/control_flow_test.py b/tensorflow/contrib/autograph/converters/control_flow_test.py
index 735eb92a0d..6670b8a66f 100644
--- a/tensorflow/contrib/autograph/converters/control_flow_test.py
+++ b/tensorflow/contrib/autograph/converters/control_flow_test.py
@@ -20,16 +20,22 @@ from __future__ import print_function
from tensorflow.contrib.autograph.converters import control_flow
from tensorflow.contrib.autograph.core import converter_testing
+from tensorflow.contrib.autograph.pyct import transformer
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
-from tensorflow.python.ops import array_ops
-from tensorflow.python.ops import control_flow_ops
from tensorflow.python.platform import test
class ControlFlowTest(converter_testing.TestCase):
- def test_simple_while(self):
+ def assertTransformedResult(self, test_fn, inputs, expected):
+ if not isinstance(inputs, tuple):
+ inputs = (inputs,)
+ with self.converted(test_fn, control_flow, {}) as result:
+ with self.test_session() as sess:
+ self.assertEqual(sess.run(result.test_fn(*inputs)), expected)
+
+ def test_while_basic(self):
def test_fn(n):
i = 0
@@ -39,29 +45,18 @@ class ControlFlowTest(converter_testing.TestCase):
i += 1
return s, i, n
- node = self.parse_and_analyze(test_fn, {})
- node = control_flow.transform(node, self.ctx)
-
- with self.compiled(node) as result:
- with self.test_session() as sess:
- self.assertEqual((10, 5, 5),
- sess.run(result.test_fn(constant_op.constant(5))))
+ self.assertTransformedResult(test_fn, constant_op.constant(5), (10, 5, 5))
- def test_while_single_var(self):
+ def test_while_single_output(self):
def test_fn(n):
while n > 0:
n -= 1
return n
- node = self.parse_and_analyze(test_fn, {})
- node = control_flow.transform(node, self.ctx)
+ self.assertTransformedResult(test_fn, constant_op.constant(5), 0)
- with self.compiled(node) as result:
- with self.test_session() as sess:
- self.assertEqual(0, sess.run(result.test_fn(constant_op.constant(5))))
-
- def test_simple_if(self):
+ def test_if_basic(self):
def test_fn(n):
a = 0
@@ -72,114 +67,85 @@ class ControlFlowTest(converter_testing.TestCase):
b = 2 * n
return a, b
- node = self.parse_and_analyze(test_fn, {})
- node = control_flow.transform(node, self.ctx)
+ self.assertTransformedResult(test_fn, constant_op.constant(1), (-1, 0))
+ self.assertTransformedResult(test_fn, constant_op.constant(-1), (0, -2))
+
+ def test_if_complex_outputs(self):
+
+ class TestClass(object):
- with self.compiled(node) as result:
+ def __init__(self, a, b):
+ self.a = a
+ self.b = b
+
+ def test_fn(n, obj):
+ obj.a = 0
+ obj.b = 0
+ if n > 0:
+ obj.a = -n
+ else:
+ obj.b = 2 * n
+ return obj
+
+ with self.converted(test_fn, control_flow, {}) as result:
with self.test_session() as sess:
- self.assertEqual((-1, 0),
- sess.run(result.test_fn(constant_op.constant(1))))
- self.assertEqual((0, -2),
- sess.run(result.test_fn(constant_op.constant(-1))))
+ res_obj = result.test_fn(constant_op.constant(1), TestClass(0, 0))
+ self.assertEqual(sess.run((res_obj.a, res_obj.b)), (-1, 0))
+ res_obj = result.test_fn(constant_op.constant(-1), TestClass(0, 0))
+ self.assertEqual(sess.run((res_obj.a, res_obj.b)), (0, -2))
- def test_if_single_var(self):
+ def test_if_single_output(self):
def test_fn(n):
if n > 0:
n = -n
return n
- node = self.parse_and_analyze(test_fn, {})
- node = control_flow.transform(node, self.ctx)
+ self.assertTransformedResult(test_fn, constant_op.constant(1), -1)
- with self.compiled(node) as result:
- with self.test_session() as sess:
- self.assertEqual(-1, sess.run(result.test_fn(constant_op.constant(1))))
-
- def test_imbalanced_aliasing(self):
+ def test_if_semi(self):
def test_fn(n):
if n > 0:
n = 3
return n
- node = self.parse_and_analyze(test_fn, {})
- node = control_flow.transform(node, self.ctx)
-
- with self.compiled(node, control_flow_ops.cond) as result:
- with self.test_session() as sess:
- self.assertEqual(3, sess.run(result.test_fn(constant_op.constant(2))))
- self.assertEqual(-3, sess.run(result.test_fn(constant_op.constant(-3))))
+ self.assertTransformedResult(test_fn, constant_op.constant(2), 3)
+ self.assertTransformedResult(test_fn, constant_op.constant(-3), -3)
- def test_ignore_unread_variable(self):
+ def test_if_local_var(self):
def test_fn(n):
- b = 3 # pylint: disable=unused-variable
if n > 0:
b = 4
+ n = b + 1
return n
- node = self.parse_and_analyze(test_fn, {})
- node = control_flow.transform(node, self.ctx)
+ self.assertTransformedResult(test_fn, constant_op.constant(1), 5)
+ self.assertTransformedResult(test_fn, constant_op.constant(-1), -1)
- with self.compiled(node, control_flow_ops.cond, array_ops.ones) as result:
- with self.test_session() as sess:
- self.assertEqual(3, sess.run(result.test_fn(constant_op.constant(3))))
- self.assertEqual(-3, sess.run(result.test_fn(constant_op.constant(-3))))
+ def test_if_no_outputs(self):
- def test_handle_temp_variable(self):
+ def test_fn(n):
+ if n > 0:
+ b = 4 # pylint:disable=unused-variable
+ return n
- def test_fn_using_temp(x, y, w):
- if x < y:
- z = x + y
- else:
- w = 2
- tmp = w
- z = x - tmp
- return z, w
+ # Without side effect guards, the if statement will stage a cond,
+ # but that will be pruned at execution.
+ self.assertTransformedResult(test_fn, constant_op.constant(1), 1)
+ self.assertTransformedResult(test_fn, constant_op.constant(-1), -1)
- node = self.parse_and_analyze(test_fn_using_temp, {})
- node = control_flow.transform(node, self.ctx)
+ def test_if_imbalanced_outputs(self):
- with self.compiled(node, control_flow_ops.cond, array_ops.ones) as result:
- with self.test_session() as sess:
- z, w = sess.run(
- result.test_fn_using_temp(
- constant_op.constant(-3), constant_op.constant(3),
- constant_op.constant(3)))
- self.assertEqual(0, z)
- self.assertEqual(3, w)
- z, w = sess.run(
- result.test_fn_using_temp(
- constant_op.constant(3), constant_op.constant(-3),
- constant_op.constant(3)))
- self.assertEqual(1, z)
- self.assertEqual(2, w)
-
- def test_fn_ignoring_temp(x, y, w):
- if x < y:
- z = x + y
- else:
- w = 2
- tmp = w
- z = x - tmp
- return z
+ def test_fn(n):
+ if n > 0:
+ b = 4
+ return b
- node = self.parse_and_analyze(test_fn_ignoring_temp, {})
- node = control_flow.transform(node, self.ctx)
-
- with self.compiled(node, control_flow_ops.cond, array_ops.ones) as result:
- with self.test_session() as sess:
- z = sess.run(
- result.test_fn_ignoring_temp(
- constant_op.constant(-3), constant_op.constant(3),
- constant_op.constant(3)))
- self.assertEqual(0, z)
- z = sess.run(
- result.test_fn_ignoring_temp(
- constant_op.constant(3), constant_op.constant(-3),
- constant_op.constant(3)))
- self.assertEqual(1, z)
+ node, ctx = self.prepare(test_fn, {})
+ with self.assertRaises(transformer.AutographParseError):
+ control_flow.transform(node, ctx)
def test_simple_for(self):
@@ -191,22 +157,11 @@ class ControlFlowTest(converter_testing.TestCase):
s2 += e * e
return s1, s2
- node = self.parse_and_analyze(test_fn, {})
- node = control_flow.transform(node, self.ctx)
+ self.assertTransformedResult(test_fn, constant_op.constant([1, 3]), (4, 10))
+ empty_vector = constant_op.constant([], shape=(0,), dtype=dtypes.int32)
+ self.assertTransformedResult(test_fn, empty_vector, (0, 0))
- with self.compiled(node) as result:
- with self.test_session() as sess:
- l = [1, 2, 3]
- self.assertEqual(
- test_fn(l), sess.run(result.test_fn(constant_op.constant(l))))
- l = []
- self.assertEqual(
- test_fn(l),
- sess.run(
- result.test_fn(
- constant_op.constant(l, shape=(0,), dtype=dtypes.int32))))
-
- def test_for_single_var(self):
+ def test_for_single_output(self):
def test_fn(l):
s = 0
@@ -214,22 +169,11 @@ class ControlFlowTest(converter_testing.TestCase):
s += e
return s
- node = self.parse_and_analyze(test_fn, {})
- node = control_flow.transform(node, self.ctx)
+ self.assertTransformedResult(test_fn, constant_op.constant([1, 3]), 4)
+ empty_vector = constant_op.constant([], shape=(0,), dtype=dtypes.int32)
+ self.assertTransformedResult(test_fn, empty_vector, 0)
- with self.compiled(node) as result:
- with self.test_session() as sess:
- l = [1, 2, 3]
- self.assertEqual(
- test_fn(l), sess.run(result.test_fn(constant_op.constant(l))))
- l = []
- self.assertEqual(
- test_fn(l),
- sess.run(
- result.test_fn(
- constant_op.constant(l, shape=(0,), dtype=dtypes.int32))))
-
- def test_for_with_iterated_expression(self):
+ def test_for_iterated_expression(self):
eval_count = [0]
@@ -243,14 +187,13 @@ class ControlFlowTest(converter_testing.TestCase):
s += e
return s
- node = self.parse_and_analyze(test_fn, {'count_evals': count_evals})
- node = control_flow.transform(node, self.ctx)
+ ns = {'count_evals': count_evals}
+ node, ctx = self.prepare(test_fn, ns)
+ node = control_flow.transform(node, ctx)
- with self.compiled(node) as result:
- result.count_evals = count_evals
- self.assertEqual(test_fn(5), result.test_fn(5))
- # count_evals ran twice, once for test_fn and another for result.test_fn
- self.assertEqual(eval_count[0], 2)
+ with self.compiled(node, ns) as result:
+ self.assertEqual(result.test_fn(5), 10)
+ self.assertEqual(eval_count[0], 1)
if __name__ == '__main__':
diff --git a/tensorflow/contrib/autograph/converters/decorators_test.py b/tensorflow/contrib/autograph/converters/decorators_test.py
index d41c7fde24..095abc5edc 100644
--- a/tensorflow/contrib/autograph/converters/decorators_test.py
+++ b/tensorflow/contrib/autograph/converters/decorators_test.py
@@ -61,13 +61,13 @@ class DecoratorsTest(converter_testing.TestCase):
'simple_decorator': simple_decorator,
'converter_testing': converter_testing,
}
- node = self.parse_and_analyze(
+ node, ctx = self.prepare(
f,
namespace,
recursive=False,
autograph_decorators=autograph_decorators)
- node = decorators.transform(node, self.ctx)
- import_line = '\n'.join(self.ctx.program.additional_imports)
+ node = decorators.transform(node, ctx)
+ import_line = '\n'.join(ctx.program.additional_imports)
result, _ = compiler.ast_to_object(node, source_prefix=import_line)
return getattr(result, f.__name__)
@@ -76,11 +76,8 @@ class DecoratorsTest(converter_testing.TestCase):
def test_fn(a):
return a
- node = self.parse_and_analyze(test_fn, {})
- node = decorators.transform(node, self.ctx)
- result, _ = compiler.ast_to_object(node)
-
- self.assertEqual(1, result.test_fn(1))
+ with self.converted(test_fn, decorators, {}) as result:
+ self.assertEqual(1, result.test_fn(1))
def test_function(self):
@@ -124,7 +121,7 @@ class DecoratorsTest(converter_testing.TestCase):
return b + 11
return inner_fn(a)
- # Expected to fail because simple_decorator cannot be imported.
+ # Expected to fail because simple_decorator could not be imported.
with self.assertRaises(transformer.AutographParseError):
test_fn(1)
diff --git a/tensorflow/contrib/autograph/converters/directives.py b/tensorflow/contrib/autograph/converters/directives.py
new file mode 100644
index 0000000000..ccdf79d47b
--- /dev/null
+++ b/tensorflow/contrib/autograph/converters/directives.py
@@ -0,0 +1,108 @@
+# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""Handles directives.
+
+This converter removes the directive functions from the code and moves the
+information they specify into AST annotations. It is a specialized form of
+static analysis, one that is specific to AutoGraph.
+"""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import gast
+
+from tensorflow.contrib.autograph.core import converter
+from tensorflow.contrib.autograph.lang import directives
+from tensorflow.contrib.autograph.pyct import anno
+from tensorflow.python.util import tf_inspect
+
+ENCLOSING_LOOP = 'enclosing_loop'
+
+
+def _map_args(call_node, function):
+ """Maps AST call nodes to the actual function's arguments.
+
+ Args:
+ call_node: ast.Call
+ function: Callable[..., Any], the actual function matching call_node
+ Returns:
+ Dict[Text, ast.AST], mapping each of the function's argument names to
+ the respective AST node.
+ """
+ args = call_node.args
+ kwds = {kwd.arg: kwd.value for kwd in call_node.keywords}
+ return tf_inspect.getcallargs(function, *args, **kwds)
+
+
+class DirectivesTransformer(converter.Base):
+ """Parses compiler directives and converts them into AST annotations."""
+
+ def _process_symbol_directive(self, call_node, directive):
+ if len(call_node.args) < 1:
+ raise ValueError('"%s" requires a positional first argument'
+ ' as the target' % directive.__name__)
+ target = call_node.args[0]
+ defs = anno.getanno(target, anno.Static.ORIG_DEFINITIONS)
+ for def_ in defs:
+ def_.directives[directive] = _map_args(call_node, directive)
+ return call_node
+
+ def _process_statement_directive(self, call_node, directive):
+ if self.local_scope_level < 1:
+ raise ValueError(
+ '"%s" must be used inside a statement' % directive.__name__)
+ target = self.get_local(ENCLOSING_LOOP)
+ node_anno = anno.getanno(target, converter.AgAnno.DIRECTIVES, {})
+ node_anno[directive] = _map_args(call_node, directive)
+ anno.setanno(target, converter.AgAnno.DIRECTIVES, node_anno)
+ return call_node
+
+ def visit_Expr(self, node):
+ if isinstance(node.value, gast.Call):
+ call_node = node.value
+ if anno.hasanno(call_node.func, 'live_val'):
+ live_val = anno.getanno(call_node.func, 'live_val')
+
+ if live_val is directives.set_element_type:
+ call_node = self._process_symbol_directive(call_node, live_val)
+ elif live_val is directives.set_loop_options:
+ call_node = self._process_statement_directive(call_node, live_val)
+ else:
+ return self.generic_visit(node)
+
+ return None # Directive calls are not output in the generated code.
+ return self.generic_visit(node)
+
+ # TODO(mdan): This will be insufficient for other control flow.
+ # That means that if we ever have a directive that affects things other than
+ # loops, we'll need support for parallel scopes, or have multiple converters.
+ def _track_and_visit_loop(self, node):
+ self.enter_local_scope()
+ self.set_local(ENCLOSING_LOOP, node)
+ node = self.generic_visit(node)
+ self.exit_local_scope()
+ return node
+
+ def visit_While(self, node):
+ return self._track_and_visit_loop(node)
+
+ def visit_For(self, node):
+ return self._track_and_visit_loop(node)
+
+
+def transform(node, ctx):
+ return DirectivesTransformer(ctx).visit(node)
diff --git a/tensorflow/contrib/autograph/converters/directives_test.py b/tensorflow/contrib/autograph/converters/directives_test.py
new file mode 100644
index 0000000000..5f798a5b76
--- /dev/null
+++ b/tensorflow/contrib/autograph/converters/directives_test.py
@@ -0,0 +1,78 @@
+# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""Tests for directives module."""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+from tensorflow.contrib.autograph.converters import directives as directives_converter
+from tensorflow.contrib.autograph.core import converter_testing
+from tensorflow.contrib.autograph.core.converter import AgAnno
+from tensorflow.contrib.autograph.lang import directives
+from tensorflow.contrib.autograph.pyct import anno
+from tensorflow.python.platform import test
+
+
+class DirectivesTest(converter_testing.TestCase):
+
+ def test_local_target(self):
+
+ def test_fn():
+ l = []
+ string_var = 0
+ directives.set_element_type(l, 'a', string_var)
+
+ node, ctx = self.prepare(test_fn, {'directives': directives})
+ node = directives_converter.transform(node, ctx)
+
+ def_, = anno.getanno(node.body[0].body[0].targets[0],
+ anno.Static.DEFINITIONS)
+ d = def_.directives[directives.set_element_type]
+ self.assertEqual(d['dtype'].s, 'a')
+ self.assertEqual(d['shape'].id, 'string_var')
+
+ def test_argument_target(self):
+
+ def test_fn(a):
+ directives.set_element_type(a, 1, shape=2)
+
+ node, ctx = self.prepare(test_fn, {'directives': directives})
+ node = directives_converter.transform(node, ctx)
+
+ def_, = anno.getanno(node.body[0].args.args[0], anno.Static.DEFINITIONS)
+ d = def_.directives[directives.set_element_type]
+ self.assertEqual(d['dtype'].n, 1)
+ self.assertEqual(d['shape'].n, 2)
+
+ def test_loop_target(self):
+
+ def test_fn():
+ a = True
+ while True:
+ directives.set_loop_options(parallel_iterations=10, back_prop=a)
+
+ node, ctx = self.prepare(test_fn, {'directives': directives})
+ node = directives_converter.transform(node, ctx)
+
+ d = anno.getanno(node.body[0].body[1], AgAnno.DIRECTIVES)
+ d = d[directives.set_loop_options]
+ self.assertEqual(d['parallel_iterations'].n, 10)
+ self.assertEqual(d['back_prop'].id, 'a')
+ self.assertEqual(d['swap_memory'], directives.UNSPECIFIED)
+
+
+if __name__ == '__main__':
+ test.main()
diff --git a/tensorflow/contrib/autograph/converters/error_handlers.py b/tensorflow/contrib/autograph/converters/error_handlers.py
new file mode 100644
index 0000000000..3f23662152
--- /dev/null
+++ b/tensorflow/contrib/autograph/converters/error_handlers.py
@@ -0,0 +1,52 @@
+# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""Wraps function bodies with a try/except to rewrite error tracebacks.
+
+Only adds try/except wrappers to functions that have the anno.Basic.ORIGIN
+annotation because these are the functions originally written by the user.
+"""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+from tensorflow.contrib.autograph.core import converter
+from tensorflow.contrib.autograph.pyct import anno
+from tensorflow.contrib.autograph.pyct import templates
+
+
+class ErrorRewritingTransformer(converter.Base):
+ """Possibly wraps the body of a function in a try/except.
+
+ Only wraps functions that were originally defined by the user, detected by
+ checking for the anno.Basic.ORIGIN annotation.
+ """
+
+ def visit_FunctionDef(self, node):
+ node = self.generic_visit(node)
+
+ if anno.hasanno(node, anno.Basic.ORIGIN):
+ template = """
+ try:
+ body
+ except:
+ ag__.rewrite_graph_construction_error(ag_source_map__)
+ """
+ node.body = templates.replace(template, body=node.body)
+ return node
+
+
+def transform(node, ctx):
+ return ErrorRewritingTransformer(ctx).visit(node)
diff --git a/tensorflow/contrib/autograph/converters/error_handlers_test.py b/tensorflow/contrib/autograph/converters/error_handlers_test.py
new file mode 100644
index 0000000000..878526c8b4
--- /dev/null
+++ b/tensorflow/contrib/autograph/converters/error_handlers_test.py
@@ -0,0 +1,55 @@
+# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""Tests for error_handlers module."""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+from tensorflow.contrib.autograph.converters import error_handlers
+from tensorflow.contrib.autograph.core import converter_testing
+from tensorflow.contrib.autograph.core import errors
+from tensorflow.contrib.autograph.pyct import anno
+from tensorflow.contrib.autograph.pyct import origin_info
+from tensorflow.python.platform import test
+
+
+class ErrorHandlersTest(converter_testing.TestCase):
+
+ def test_basic(self):
+
+ def test_fn():
+ raise ValueError()
+
+ node, ctx = self.prepare(test_fn, {})
+ anno.setanno(node.body[0], anno.Basic.ORIGIN,
+ origin_info.OriginInfo('test_path', None, None, None, None))
+ node = error_handlers.transform(node, ctx)
+ with self.compiled(node, {}) as result:
+ with self.assertRaises(errors.GraphConstructionError):
+ result.test_fn()
+
+ def test_no_origin_annotation(self):
+
+ def test_fn():
+ raise ValueError()
+
+ with self.converted(test_fn, error_handlers, {}) as result:
+ with self.assertRaises(ValueError):
+ result.test_fn()
+
+
+if __name__ == '__main__':
+ test.main()
diff --git a/tensorflow/contrib/autograph/converters/ifexp.py b/tensorflow/contrib/autograph/converters/ifexp.py
deleted file mode 100644
index e996138498..0000000000
--- a/tensorflow/contrib/autograph/converters/ifexp.py
+++ /dev/null
@@ -1,49 +0,0 @@
-# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ==============================================================================
-"""Canonicalizes the ternary conditional operator."""
-
-from __future__ import absolute_import
-from __future__ import division
-from __future__ import print_function
-
-from tensorflow.contrib.autograph.core import converter
-from tensorflow.contrib.autograph.pyct import templates
-
-
-class IfExp(converter.Base):
- """Canonicalizes all IfExp nodes into plain conditionals."""
-
- def visit_IfExp(self, node):
- template = """
- ag__.utils.run_cond(test, lambda: (body,), lambda: (orelse,))
- """
- desugared_ifexp = templates.replace_as_expression(
- template, test=node.test, body=node.body, orelse=node.orelse)
- return desugared_ifexp
-
-
-def transform(node, ctx):
- """Desugar IfExp nodes into plain conditionals.
-
- Args:
- node: ast.AST, the node to transform
- ctx: converter.EntityContext
-
- Returns:
- new_node: an AST with no IfExp nodes, only conditionals.
- """
-
- node = IfExp(ctx).visit(node)
- return node
diff --git a/tensorflow/contrib/autograph/converters/ifexp_test.py b/tensorflow/contrib/autograph/converters/ifexp_test.py
deleted file mode 100644
index cdd5a2f591..0000000000
--- a/tensorflow/contrib/autograph/converters/ifexp_test.py
+++ /dev/null
@@ -1,106 +0,0 @@
-# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ==============================================================================
-"""Tests for ifexp module."""
-
-from __future__ import absolute_import
-from __future__ import division
-from __future__ import print_function
-
-from tensorflow.contrib.autograph import utils
-from tensorflow.contrib.autograph.converters import ifexp
-from tensorflow.contrib.autograph.core import converter_testing
-from tensorflow.python.platform import test
-
-
-class IfExpTest(converter_testing.TestCase):
-
- def compiled_fn(self, test_fn, *args):
- node = self.parse_and_analyze(test_fn, {})
- node = ifexp.transform(node, self.ctx)
- module = self.compiled(node, *args)
- return module
-
- def test_simple(self):
-
- def test_fn(x):
- return 1 if x else 0
-
- with self.compiled_fn(test_fn) as result:
- result.autograph_util = utils
- for x in [0, 1]:
- self.assertEqual(test_fn(x), result.test_fn(x))
-
- def test_fn(self):
-
- def f(x):
- return 3 * x
-
- def test_fn(x):
- y = f(x * x if x > 0 else x)
- return y
-
- with self.compiled_fn(test_fn) as result:
- result.autograph_util = utils
- result.f = f
- for x in [-2, 2]:
- self.assertEqual(test_fn(x), result.test_fn(x))
-
- def test_exp(self):
-
- def test_fn(x):
- return x * x if x > 0 else x
-
- with self.compiled_fn(test_fn) as result:
- result.autograph_util = utils
- for x in [-2, 2]:
- self.assertEqual(test_fn(x), result.test_fn(x))
-
- def test_nested(self):
-
- def test_fn(x):
- return x * x if x > 0 else x if x else 1
-
- with self.compiled_fn(test_fn) as result:
- result.autograph_util = utils
- for x in [-2, 0, 2]:
- self.assertEqual(test_fn(x), result.test_fn(x))
-
- def test_in_cond(self):
-
- def test_fn(x):
- if x > 0:
- return x * x if x < 5 else x * x * x
- return -x
-
- with self.compiled_fn(test_fn) as result:
- result.autograph_util = utils
- for x in [-2, 2, 5]:
- self.assertEqual(test_fn(x), result.test_fn(x))
-
- def test_assign_in_cond(self):
-
- def test_fn(x):
- if x > 0:
- x = -x if x < 5 else x
- return x
-
- with self.compiled_fn(test_fn) as result:
- result.autograph_util = utils
- for x in [-2, 2, 5]:
- self.assertEqual(test_fn(x), result.test_fn(x))
-
-
-if __name__ == '__main__':
- test.main()
diff --git a/tensorflow/contrib/autograph/converters/list_comprehension.py b/tensorflow/contrib/autograph/converters/list_comprehension.py
deleted file mode 100644
index c4a13ee822..0000000000
--- a/tensorflow/contrib/autograph/converters/list_comprehension.py
+++ /dev/null
@@ -1,77 +0,0 @@
-# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ==============================================================================
-"""Canonicalizing list comprehensions into for and if statements.
-
-e.g.
-result = [x * x for x in xs]
-
-becomes
-
-result = []
-for x in xs:
- elt = x * x
- result.append(elt)
-"""
-
-from __future__ import absolute_import
-from __future__ import division
-from __future__ import print_function
-
-import gast
-
-from tensorflow.contrib.autograph.core import converter
-from tensorflow.contrib.autograph.pyct import parser
-from tensorflow.contrib.autograph.pyct import templates
-
-
-class ListCompCanonicalizationTransformer(converter.Base):
- """NodeTransformer to canonicalize list comprehensions."""
-
- def make_update_list_node(self, list_, elt):
- return templates.replace('list_.append(elt)', list_=list_, elt=elt)[0]
-
- def instantiate_list_node(self):
- return parser.parse_str('[]').body[0].value
-
- def visit_Assign(self, node):
- if not isinstance(node.value, gast.ListComp):
- return node
- if len(node.targets) > 1:
- raise ValueError('Only support single assignment.')
- return self.canonicalize_listcomp(node.targets[0], node.value)
-
- def canonicalize_listcomp(self, result_node, list_comp_node):
-
- make_list = templates.replace(
- 'list_ = create_list',
- list_=result_node,
- create_list=self.instantiate_list_node())
- loop_body = self.make_update_list_node(result_node, list_comp_node.elt)
-
- for gen in reversed(list_comp_node.generators):
- for gen_if in reversed(gen.ifs):
- loop_body = templates.replace(
- 'if test: loop_body', test=gen_if, loop_body=loop_body)
- loop_body = templates.replace(
- 'for target in iter_: loop_body',
- iter_=gen.iter,
- target=gen.target,
- loop_body=loop_body)
-
- return make_list + loop_body
-
-
-def transform(node, ctx):
- return ListCompCanonicalizationTransformer(ctx).visit(node)
diff --git a/tensorflow/contrib/autograph/converters/list_comprehensions.py b/tensorflow/contrib/autograph/converters/list_comprehensions.py
new file mode 100644
index 0000000000..ecf4628816
--- /dev/null
+++ b/tensorflow/contrib/autograph/converters/list_comprehensions.py
@@ -0,0 +1,82 @@
+# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""Lowers list comprehensions into for and if statements.
+
+Example:
+
+ result = [x * x for x in xs]
+
+becomes
+
+ result = []
+ for x in xs:
+ elt = x * x
+ result.append(elt)
+"""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import gast
+
+from tensorflow.contrib.autograph.core import converter
+from tensorflow.contrib.autograph.pyct import templates
+
+
+# TODO(mdan): This should covert directly to operator calls.
+
+
+class ListCompTransformer(converter.Base):
+ """Lowers list comprehensions into standard control flow."""
+
+ def visit_Assign(self, node):
+ if not isinstance(node.value, gast.ListComp):
+ return self.generic_visit(node)
+ if len(node.targets) > 1:
+ raise NotImplementedError('multiple assignments')
+
+ target, = node.targets
+ list_comp_node = node.value
+
+ template = """
+ target = []
+ """
+ initialization = templates.replace(template, target=target)
+
+ template = """
+ target.append(elt)
+ """
+ body = templates.replace(template, target=target, elt=list_comp_node.elt)
+
+ for gen in reversed(list_comp_node.generators):
+ for gen_if in reversed(gen.ifs):
+ template = """
+ if test:
+ body
+ """
+ body = templates.replace(template, test=gen_if, body=body)
+ template = """
+ for target in iter_:
+ body
+ """
+ body = templates.replace(
+ template, iter_=gen.iter, target=gen.target, body=body)
+
+ return initialization + body
+
+
+def transform(node, ctx):
+ return ListCompTransformer(ctx).visit(node)
diff --git a/tensorflow/contrib/autograph/converters/list_comprehension_test.py b/tensorflow/contrib/autograph/converters/list_comprehensions_test.py
index 2bbee93412..59b5ce9ca0 100644
--- a/tensorflow/contrib/autograph/converters/list_comprehension_test.py
+++ b/tensorflow/contrib/autograph/converters/list_comprehensions_test.py
@@ -12,33 +12,31 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
-"""Tests for list_comprehension module."""
+"""Tests for list_comprehensions module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
-from tensorflow.contrib.autograph.converters import list_comprehension
+from tensorflow.contrib.autograph.converters import list_comprehensions
from tensorflow.contrib.autograph.core import converter_testing
from tensorflow.python.platform import test
class ListCompTest(converter_testing.TestCase):
+ def assertTransformedEquivalent(self, test_fn, *inputs):
+ with self.converted(test_fn, list_comprehensions, {}) as result:
+ self.assertEqual(test_fn(*inputs), result.test_fn(*inputs))
+
def test_basic(self):
def test_fn(l):
s = [e * e for e in l]
return s
- node = self.parse_and_analyze(test_fn, {})
- node = list_comprehension.transform(node, self.ctx)
-
- with self.compiled(node) as result:
- l = [1, 2, 3]
- self.assertEqual(test_fn(l), result.test_fn(l))
- l = []
- self.assertEqual(test_fn(l), result.test_fn(l))
+ self.assertTransformedEquivalent(test_fn, [])
+ self.assertTransformedEquivalent(test_fn, [1, 2, 3])
def test_multiple_generators(self):
@@ -46,29 +44,17 @@ class ListCompTest(converter_testing.TestCase):
s = [e * e for sublist in l for e in sublist]
return s
- node = self.parse_and_analyze(test_fn, {})
- node = list_comprehension.transform(node, self.ctx)
+ self.assertTransformedEquivalent(test_fn, [])
+ self.assertTransformedEquivalent(test_fn, [[1], [2], [3]])
- with self.compiled(node) as result:
- l = [[1], [2], [3]]
- self.assertEqual(test_fn(l), result.test_fn(l))
- l = []
- self.assertEqual(test_fn(l), result.test_fn(l))
-
- def test_conds(self):
+ def test_cond(self):
def test_fn(l):
s = [e * e for e in l if e > 1]
return s
- node = self.parse_and_analyze(test_fn, {})
- node = list_comprehension.transform(node, self.ctx)
-
- with self.compiled(node) as result:
- l = [1, 2, 3]
- self.assertEqual(test_fn(l), result.test_fn(l))
- l = []
- self.assertEqual(test_fn(l), result.test_fn(l))
+ self.assertTransformedEquivalent(test_fn, [])
+ self.assertTransformedEquivalent(test_fn, [1, 2, 3])
if __name__ == '__main__':
diff --git a/tensorflow/contrib/autograph/converters/lists.py b/tensorflow/contrib/autograph/converters/lists.py
index d77a044798..a02fc827b8 100644
--- a/tensorflow/contrib/autograph/converters/lists.py
+++ b/tensorflow/contrib/autograph/converters/lists.py
@@ -33,6 +33,7 @@ from __future__ import print_function
import gast
from tensorflow.contrib.autograph.core import converter
+from tensorflow.contrib.autograph.lang import directives
from tensorflow.contrib.autograph.pyct import anno
from tensorflow.contrib.autograph.pyct import parser
from tensorflow.contrib.autograph.pyct import templates
@@ -88,12 +89,12 @@ class ListTransformer(converter.Base):
scope = anno.getanno(node, NodeAnno.ARGS_SCOPE)
target_node = node.func.value
- # Attempt to use a related name if can get one. Otherwise use something
+ # Attempt to use a related name if one exists. Otherwise use something
# generic.
if anno.hasanno(target_node, anno.Basic.QN):
target_name = anno.getanno(target_node, anno.Basic.QN).ssf()
else:
- target_name = 'list'
+ target_name = 'list_'
pop_var_name = self.ctx.namer.new_symbol(target_name, scope.referenced)
pop_uses = self.get_local(POP_USES, [])
@@ -104,9 +105,10 @@ class ListTransformer(converter.Base):
def _replace_stack_call(self, node):
assert len(node.args) == 1
- dtype = anno.getanno(
+ dtype = self.get_definition_directive(
node.args[0],
- 'element_type',
+ directives.set_element_type,
+ 'dtype',
default=templates.replace_as_expression('None'))
template = """
ag__.list_stack(
@@ -134,7 +136,10 @@ class ListTransformer(converter.Base):
node = self._replace_append_call(node)
elif func_name == 'pop' and (len(node.args) <= 1):
node = self._replace_pop_call(node)
- elif func_name == 'stack' and (len(node.args) == 1):
+ elif (func_name == 'stack' and (len(node.args) == 1) and
+ (not node.keywords or node.keywords[0].arg == 'strict')):
+ # This avoids false positives with keyword args.
+ # TODO(mdan): handle kwargs properly.
node = self._replace_stack_call(node)
return node
@@ -146,15 +151,22 @@ class ListTransformer(converter.Base):
pop_element = original_call_node.args[0]
else:
pop_element = parser.parse_expression('None')
+
# The call will be something like "target.pop()", and the dtype is hooked to
# target, hence the func.value.
- dtype = anno.getanno(
+ # TODO(mdan): For lists of lists, this won't work.
+ # The reason why it won't work is because it's unclear how to annotate
+ # the list as a "list of lists with a certain element type" when using
+ # operations like `l.pop().pop()`.
+ dtype = self.get_definition_directive(
original_call_node.func.value,
- 'element_type',
+ directives.set_element_type,
+ 'dtype',
default=templates.replace_as_expression('None'))
- shape = anno.getanno(
+ shape = self.get_definition_directive(
original_call_node.func.value,
- 'element_shape',
+ directives.set_element_type,
+ 'shape',
default=templates.replace_as_expression('None'))
template = """
diff --git a/tensorflow/contrib/autograph/converters/lists_test.py b/tensorflow/contrib/autograph/converters/lists_test.py
index ea04097b28..447a88bbe2 100644
--- a/tensorflow/contrib/autograph/converters/lists_test.py
+++ b/tensorflow/contrib/autograph/converters/lists_test.py
@@ -18,9 +18,11 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
-from tensorflow.contrib.autograph import utils
from tensorflow.contrib.autograph.converters import lists
from tensorflow.contrib.autograph.core import converter_testing
+from tensorflow.contrib.autograph.lang import directives
+from tensorflow.contrib.autograph.pyct import anno
+from tensorflow.contrib.autograph.pyct import parser
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
@@ -28,6 +30,9 @@ from tensorflow.python.ops import list_ops
from tensorflow.python.platform import test
+tf = None # Will be replaced by a mock.
+
+
class ListTest(converter_testing.TestCase):
def test_empty_list(self):
@@ -35,10 +40,7 @@ class ListTest(converter_testing.TestCase):
def test_fn():
return []
- node = self.parse_and_analyze(test_fn, {})
- node = lists.transform(node, self.ctx)
-
- with self.compiled(node) as result:
+ with self.converted(test_fn, lists, {}) as result:
tl = result.test_fn()
# Empty tensor lists cannot be evaluated or stacked.
self.assertTrue(isinstance(tl, ops.Tensor))
@@ -49,10 +51,7 @@ class ListTest(converter_testing.TestCase):
def test_fn():
return [1, 2, 3]
- node = self.parse_and_analyze(test_fn, {})
- node = lists.transform(node, self.ctx)
-
- with self.compiled(node) as result:
+ with self.converted(test_fn, lists, {}) as result:
with self.test_session() as sess:
tl = result.test_fn()
r = list_ops.tensor_list_stack(tl, dtypes.int32)
@@ -66,10 +65,7 @@ class ListTest(converter_testing.TestCase):
l.append(3)
return l
- node = self.parse_and_analyze(test_fn, {})
- node = lists.transform(node, self.ctx)
-
- with self.compiled(node) as result:
+ with self.converted(test_fn, lists, {}) as result:
with self.test_session() as sess:
tl = result.test_fn()
r = list_ops.tensor_list_stack(tl, dtypes.int32)
@@ -79,23 +75,19 @@ class ListTest(converter_testing.TestCase):
def test_fn():
l = [1, 2, 3]
- utils.set_element_type(l, dtypes.int32, ())
s = l.pop()
return s, l
- node = self.parse_and_analyze(
- test_fn,
- {
- 'utils': utils,
- 'dtypes': dtypes
- },
- include_type_analysis=True,
- )
- node = lists.transform(node, self.ctx)
-
- with self.compiled(node) as result:
- result.utils = utils
- result.dtypes = dtypes
+ node, ctx = self.prepare(test_fn, {})
+ def_, = anno.getanno(node.body[0].body[0].targets[0],
+ anno.Static.ORIG_DEFINITIONS)
+ def_.directives[directives.set_element_type] = {
+ 'dtype': parser.parse_expression('tf.int32'),
+ 'shape': parser.parse_expression('()'),
+ }
+ node = lists.transform(node, ctx)
+
+ with self.compiled(node, {}, dtypes.int32) as result:
with self.test_session() as sess:
ts, tl = result.test_fn()
r = list_ops.tensor_list_stack(tl, dtypes.int32)
@@ -108,10 +100,7 @@ class ListTest(converter_testing.TestCase):
s = l.pop().pop()
return s
- node = self.parse_and_analyze(test_fn, {})
- node = lists.transform(node, self.ctx)
-
- with self.compiled(node) as result:
+ with self.converted(test_fn, lists, {}) as result:
test_input = [1, 2, [1, 2, 3]]
# TODO(mdan): Pass a list of lists of tensor when we fully support that.
# For now, we just pass a regular Python list of lists just to verify that
@@ -120,29 +109,24 @@ class ListTest(converter_testing.TestCase):
def test_list_stack(self):
- tf = None # Will be replaced with a mock.
-
def test_fn():
l = [1, 2, 3]
- utils.set_element_type(l, dtypes.int32)
return tf.stack(l)
- node = self.parse_and_analyze(
- test_fn,
- {
- 'utils': utils,
- 'dtypes': dtypes
- },
- include_type_analysis=True,
- )
- node = lists.transform(node, self.ctx)
-
- with self.compiled(node, array_ops.stack, dtypes.int32) as result:
- result.utils = utils
- result.dtypes = dtypes
+ node, ctx = self.prepare(test_fn, {})
+ def_, = anno.getanno(node.body[0].body[0].targets[0],
+ anno.Static.ORIG_DEFINITIONS)
+ def_.directives[directives.set_element_type] = {
+ 'dtype': parser.parse_expression('tf.int32')
+ }
+ node = lists.transform(node, ctx)
+
+ with self.compiled(node, {}, array_ops.stack, dtypes.int32) as result:
with self.test_session() as sess:
self.assertAllEqual(sess.run(result.test_fn()), [1, 2, 3])
+ # TODO(mdan): Add a test with tf.stack with axis kwarg.
+
if __name__ == '__main__':
test.main()
diff --git a/tensorflow/contrib/autograph/converters/logical_expressions_test.py b/tensorflow/contrib/autograph/converters/logical_expressions_test.py
index 48186024a9..ca07de5e8a 100644
--- a/tensorflow/contrib/autograph/converters/logical_expressions_test.py
+++ b/tensorflow/contrib/autograph/converters/logical_expressions_test.py
@@ -31,10 +31,8 @@ class GradientsFunctionTest(converter_testing.TestCase):
def test_fn(a, b):
return a == b
- node = self.parse_and_analyze(test_fn, {})
- node = logical_expressions.transform(node, self.ctx)
-
- with self.compiled(node, math_ops.equal) as result:
+ with self.converted(test_fn, logical_expressions, {},
+ math_ops.equal) as result:
with self.test_session() as sess:
self.assertTrue(sess.run(result.test_fn(1, 1)))
self.assertFalse(sess.run(result.test_fn(1, 2)))
@@ -44,11 +42,8 @@ class GradientsFunctionTest(converter_testing.TestCase):
def test_fn(a, b, c):
return (a or b) and (a or b or c)
- node = self.parse_and_analyze(test_fn, {})
- node = logical_expressions.transform(node, self.ctx)
-
- with self.compiled(node, math_ops.logical_or,
- math_ops.logical_and) as result:
+ with self.converted(test_fn, logical_expressions, {}, math_ops.logical_or,
+ math_ops.logical_and) as result:
with self.test_session() as sess:
self.assertTrue(sess.run(result.test_fn(True, False, True)))
diff --git a/tensorflow/contrib/autograph/converters/name_scopes_test.py b/tensorflow/contrib/autograph/converters/name_scopes_test.py
index 444d0bcd46..a329b0db70 100644
--- a/tensorflow/contrib/autograph/converters/name_scopes_test.py
+++ b/tensorflow/contrib/autograph/converters/name_scopes_test.py
@@ -31,17 +31,13 @@ class FunctionNameScopeTransformer(converter_testing.TestCase):
def test_fn(l):
"""This should stay here."""
- a = 5
+ a = 1
l += a
return l
- node = self.parse_and_analyze(test_fn, {})
- node = name_scopes.transform(node, self.ctx)
-
- with self.compiled(node, ops.name_scope) as result:
+ with self.converted(test_fn, name_scopes, {}, ops.name_scope) as result:
result_op = result.test_fn(constant_op.constant(1))
self.assertIn('test_fn/', result_op.op.name)
-
self.assertEqual('This should stay here.', result.test_fn.__doc__)
def test_long_docstring(self):
@@ -54,13 +50,12 @@ class FunctionNameScopeTransformer(converter_testing.TestCase):
Returns:
l
"""
- return l
-
- node = self.parse_and_analyze(test_fn, {})
- node = name_scopes.transform(node, self.ctx)
+ return l + 1
- with self.compiled(node, ops.name_scope) as result:
- self.assertIn('Multi-line', result.test_fn.__doc__)
+ with self.converted(test_fn, name_scopes, {}, ops.name_scope) as result:
+ result_op = result.test_fn(constant_op.constant(1))
+ self.assertIn('test_fn/', result_op.op.name)
+ self.assertIn('Multi-line docstring.', result.test_fn.__doc__)
self.assertIn('Returns:', result.test_fn.__doc__)
def test_nested_functions(self):
@@ -68,21 +63,16 @@ class FunctionNameScopeTransformer(converter_testing.TestCase):
def test_fn(l):
def inner_fn(i):
- return i ** 2
-
- l += 4
- return inner_fn(l)
+ return i + 1
- node = self.parse_and_analyze(test_fn, {})
- node = name_scopes.transform(node, self.ctx)
+ l += 1
+ return l, inner_fn(l)
- with self.compiled(node, ops.name_scope) as result:
- result_op = result.test_fn(constant_op.constant(1))
- first_result_input_name = result_op.op.inputs[0].name
- second_result_input_name = result_op.op.inputs[1].name
- self.assertIn('test_fn/', first_result_input_name)
- self.assertNotIn('inner_fn', first_result_input_name)
- self.assertIn('test_fn/inner_fn/', second_result_input_name)
+ with self.converted(test_fn, name_scopes, {}, ops.name_scope) as result:
+ first, second = result.test_fn(constant_op.constant(1))
+ self.assertIn('test_fn/', first.op.name)
+ self.assertNotIn('inner_fn', first.op.name)
+ self.assertIn('test_fn/inner_fn/', second.op.name)
def test_method(self):
@@ -91,48 +81,20 @@ class FunctionNameScopeTransformer(converter_testing.TestCase):
def test_fn(self, l):
def inner_fn(i):
- return i ** 2
-
- l += 4
- return inner_fn(l)
+ return i + 1
- # Note that 'TestClass' was needed in the namespace here.
- node = self.parse_and_analyze(
- TestClass, {'TestClass': TestClass}, owner_type=TestClass)
- node = name_scopes.transform(node, self.ctx)
+ l += 1
+ return l, inner_fn(l)
- with self.compiled(node, ops.name_scope) as result:
- result_op = result.TestClass().test_fn(constant_op.constant(1))
- first_result_input_name = result_op.op.inputs[0].name
- second_result_input_name = result_op.op.inputs[1].name
- self.assertIn('TestClass/test_fn/', first_result_input_name)
- self.assertNotIn('inner_fn', first_result_input_name)
- self.assertIn('TestClass/test_fn/inner_fn/', second_result_input_name)
+ ns = {'TestClass': TestClass}
+ node, ctx = self.prepare(TestClass, ns, owner_type=TestClass)
+ node = name_scopes.transform(node, ctx)
- def test_operator(self):
-
- class TestClass(object):
-
- def __call__(self, l):
-
- def inner_fn(i):
- return i ** 2
-
- l += 4
- return inner_fn(l)
-
- # Note that 'TestClass' was needed in the namespace here.
- node = self.parse_and_analyze(
- TestClass.__call__, {'TestClass': TestClass}, owner_type=TestClass)
- node = name_scopes.transform(node, self.ctx)
-
- with self.compiled(node, ops.name_scope) as result:
- result_op = result.__call__(TestClass(), constant_op.constant(1))
- first_result_input_name = result_op.op.inputs[0].name
- second_result_input_name = result_op.op.inputs[1].name
- self.assertIn('call__/', first_result_input_name)
- self.assertNotIn('inner_fn', first_result_input_name)
- self.assertIn('call__/inner_fn/', second_result_input_name)
+ with self.compiled(node, {}, ops.name_scope) as result:
+ first, second = result.TestClass().test_fn(constant_op.constant(1))
+ self.assertIn('TestClass/test_fn/', first.op.name)
+ self.assertNotIn('inner_fn', first.op.name)
+ self.assertIn('TestClass/test_fn/inner_fn/', second.op.name)
if __name__ == '__main__':
diff --git a/tensorflow/contrib/autograph/converters/single_return.py b/tensorflow/contrib/autograph/converters/return_statements.py
index a351cd81b8..a351cd81b8 100644
--- a/tensorflow/contrib/autograph/converters/single_return.py
+++ b/tensorflow/contrib/autograph/converters/return_statements.py
diff --git a/tensorflow/contrib/autograph/converters/return_statements_test.py b/tensorflow/contrib/autograph/converters/return_statements_test.py
new file mode 100644
index 0000000000..3c7c8c8a25
--- /dev/null
+++ b/tensorflow/contrib/autograph/converters/return_statements_test.py
@@ -0,0 +1,167 @@
+# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""Tests for return_statements module."""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+from tensorflow.contrib.autograph.converters import return_statements
+from tensorflow.contrib.autograph.core import converter_testing
+from tensorflow.python.framework import ops
+from tensorflow.python.platform import test
+
+
+class SingleReturnTest(converter_testing.TestCase):
+
+ def assertTransformedEquivalent(self, test_fn, *inputs):
+ ns = {'ops': ops}
+ with self.converted(test_fn, return_statements, ns) as result:
+ self.assertEqual(test_fn(*inputs), result.test_fn(*inputs))
+
+ def test_straightline(self):
+
+ def test_fn(x):
+ return x * x
+
+ self.assertTransformedEquivalent(test_fn, 2)
+
+ def test_conditional(self):
+
+ def test_fn(x):
+ if x > 0:
+ return x
+ else:
+ return x * x
+
+ self.assertTransformedEquivalent(test_fn, 2)
+ self.assertTransformedEquivalent(test_fn, -2)
+
+ def test_missing_orelse(self):
+
+ def test_fn(x):
+ if x > 0:
+ return x
+
+ node, ctx = self.prepare(test_fn, {})
+ with self.assertRaises(ValueError):
+ return_statements.transform(node, ctx)
+
+ def test_missing_orelse_recovrable(self):
+
+ def test_fn(x):
+ if x > 0:
+ return x
+ return x * x
+
+ self.assertTransformedEquivalent(test_fn, 2)
+ self.assertTransformedEquivalent(test_fn, -2)
+
+ def test_missing_branch_return_recoverable(self):
+
+ def test_fn(x):
+ if x < 0:
+ x *= x
+ else:
+ return x
+ return x
+
+ self.assertTransformedEquivalent(test_fn, 2)
+ self.assertTransformedEquivalent(test_fn, -2)
+
+ def test_conditional_nested(self):
+
+ def test_fn(x):
+ if x > 0:
+ if x < 5:
+ return x
+ else:
+ return x * x
+ else:
+ return x * x * x
+
+ self.assertTransformedEquivalent(test_fn, 2)
+ self.assertTransformedEquivalent(test_fn, -2)
+ self.assertTransformedEquivalent(test_fn, 5)
+
+ def test_context_manager(self):
+
+ def test_fn(x):
+ with ops.name_scope(''):
+ return x * x
+
+ self.assertTransformedEquivalent(test_fn, 2)
+ self.assertTransformedEquivalent(test_fn, -2)
+
+ def test_context_manager_in_conditional(self):
+
+ def test_fn(x):
+ if x > 0:
+ with ops.name_scope(''):
+ return x * x
+ else:
+ return x
+
+ self.assertTransformedEquivalent(test_fn, 2)
+ self.assertTransformedEquivalent(test_fn, -2)
+
+ def text_conditional_in_context_manager(self):
+
+ def test_fn(x):
+ with ops.name_scope(''):
+ if x > 0:
+ return x * x
+ else:
+ return x
+
+ self.assertTransformedEquivalent(test_fn, 2)
+ self.assertTransformedEquivalent(test_fn, -2)
+
+ def test_no_return(self):
+
+ def test_fn(x):
+ x *= x
+
+ self.assertTransformedEquivalent(test_fn, 2)
+
+ def test_nested_functions(self):
+
+ def test_fn(x):
+
+ def inner_fn(y):
+ if y > 0:
+ return y * y
+ else:
+ return y
+
+ return inner_fn(x)
+
+ self.assertTransformedEquivalent(test_fn, 2)
+ self.assertTransformedEquivalent(test_fn, -2)
+
+ def test_loop(self):
+
+ def test_fn(x):
+ for _ in range(10):
+ return x
+ return x
+
+ node, ctx = self.prepare(test_fn, {})
+ with self.assertRaises(ValueError):
+ return_statements.transform(node, ctx)
+
+
+if __name__ == '__main__':
+ test.main()
diff --git a/tensorflow/contrib/autograph/converters/side_effect_guards_test.py b/tensorflow/contrib/autograph/converters/side_effect_guards_test.py
index a7ad8efed4..de1874321e 100644
--- a/tensorflow/contrib/autograph/converters/side_effect_guards_test.py
+++ b/tensorflow/contrib/autograph/converters/side_effect_guards_test.py
@@ -25,140 +25,138 @@ from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import state_ops
-from tensorflow.python.ops import variables
+from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import test
+tf = None # Will be replaced by a mock.
+
+
class SideEffectGuardsTest(converter_testing.TestCase):
def test_side_effect_on_return_only_variable(self):
- tf = None
-
def test_fn(a):
tf.assign(a, a + 1)
return a
- node = self.parse_and_analyze(test_fn, {})
- node = side_effect_guards.transform(node, self.ctx)
+ node, ctx = self.prepare(test_fn, {})
+ node = side_effect_guards.transform(node, ctx)
- with self.compiled(node, state_ops.assign) as result:
- self.assertEqual(len(node.body[0].body), 1)
+ self.assertEqual(len(node.body[0].body), 1)
+
+ with self.compiled(node, {}, state_ops.assign) as result:
with self.test_session() as sess:
- v = variables.Variable(2)
+ v = variable_scope.get_variable('test', initializer=2)
sess.run(v.initializer)
- # NOTE: We don't expect the assignment to execute in this case, because
- # variables cannot be reliably guarded.
- self.assertEqual(2, sess.run(result.test_fn(v)))
+ sess.run(result.test_fn(v))
+ # TODO(mdan): Add support for this use case.
+ # Right now the variable `a` is not conditioned on the `assign` because
+ # there's no way to add control dependencies to a variable object.
+ self.assertEqual(2, sess.run(v))
def test_side_effect_on_used_variable(self):
- tf = None
-
def test_fn(a):
tf.assign(a, a + 1)
return a + 1
- node = self.parse_and_analyze(test_fn, {})
- node = side_effect_guards.transform(node, self.ctx)
+ node, ctx = self.prepare(test_fn, {})
+ node = side_effect_guards.transform(node, ctx)
- with self.compiled(node, state_ops.assign) as result:
- self.assertEqual(len(node.body[0].body), 1)
+ self.assertEqual(len(node.body[0].body), 1)
+
+ with self.compiled(node, {}, state_ops.assign) as result:
with self.test_session() as sess:
- v = variables.Variable(2)
+ v = variable_scope.get_variable('test', initializer=2)
sess.run(v.initializer)
- # NOTE: Unlike test_side_effect_on_return_only_variable, the variable
- # was used in the local scope and so we could catch the assign's side
- # effect.
- self.assertEqual(4, sess.run(result.test_fn(v)))
+ sess.run(result.test_fn(v))
+ # TODO(mdan): Ensure the result of test_fn(v) is also deterministic.
+ # Right now it's 3 or 4 based on whether the read is synchronized.
+ self.assertEqual(3, sess.run(v))
def test_side_effect_on_tensor(self):
- tf = None
-
def test_fn(a):
tf.Assert(a > 0, ['expected in throw'])
return a
- node = self.parse_and_analyze(test_fn, {})
- node = side_effect_guards.transform(node, self.ctx)
+ node, ctx = self.prepare(test_fn, {})
+ node = side_effect_guards.transform(node, ctx)
- with self.compiled(node, control_flow_ops.Assert) as result:
- self.assertEqual(len(node.body[0].body), 1)
+ self.assertEqual(len(node.body[0].body), 1)
+
+ with self.compiled(node, {}, control_flow_ops.Assert) as result:
with self.test_session() as sess:
- # NOTE: In this case we can also capture the side effect because the
- # argument is a tensor ans we can wrap it inside an identity.
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
'expected in throw'):
sess.run(result.test_fn(constant_op.constant(-1)))
def test_multiline_block(self):
- tf = None
-
def test_fn(a):
- tf.assign(a, a + 1)
+ tf.assign_add(a, 1)
b = a + 1
- tf.assign(a, b + 1)
- c = b + 1
- d = c + 1
- return d
+ tf.assign_add(a, 1)
+ b += 1
+ return b
- node = self.parse_and_analyze(test_fn, {})
- node = side_effect_guards.transform(node, self.ctx)
+ node, ctx = self.prepare(test_fn, {})
+ node = side_effect_guards.transform(node, ctx)
- with self.compiled(node, state_ops.assign) as result:
- self.assertEqual(len(node.body[0].body), 1)
+ self.assertEqual(len(node.body[0].body), 1)
+
+ with self.compiled(node, {}, state_ops.assign_add) as result:
with self.test_session() as sess:
- v = variables.Variable(2)
+ v = variable_scope.get_variable('test', initializer=2)
sess.run(v.initializer)
- self.assertEqual(6, sess.run(result.test_fn(v)))
+ sess.run(result.test_fn(v))
+ # TODO(mdan): Ensure the result of test_fn(v) is also deterministic.
+ self.assertEqual(4, sess.run(v))
def test_multiline_nested_block(self):
- tf = None
-
def test_fn(a):
with tf.name_scope('foo'):
tf.assign(a, a + 1)
b = a + 1
- c = b + 1
- d = c + 1
- return d
+ return b
- node = self.parse_and_analyze(test_fn, {})
- node = side_effect_guards.transform(node, self.ctx)
+ node, ctx = self.prepare(test_fn, {})
+ node = side_effect_guards.transform(node, ctx)
- with self.compiled(node, state_ops.assign, ops.name_scope) as result:
- self.assertEqual(len(node.body[0].body[0].body), 1)
+ self.assertEqual(len(node.body[0].body[0].body), 1)
+
+ with self.compiled(node, {}, state_ops.assign, ops.name_scope) as result:
with self.test_session() as sess:
- v = variables.Variable(2)
+ v = variable_scope.get_variable('test', initializer=2)
sess.run(v.initializer)
- self.assertEqual(6, sess.run(result.test_fn(v)))
+ sess.run(result.test_fn(v))
+ # TODO(mdan): Ensure the result of test_fn(v) is also deterministic.
+ self.assertEqual(3, sess.run(v))
def test_multiline_block_unsafe(self):
- tf = None
-
def test_fn(a):
tf.assign(a, a + 1)
b = a + 1
- tf.assign(a, a + 1)
+ tf.assign_add(a, 1)
c = b + 1
- d = c + 1
- return d
+ return c
+
+ node, ctx = self.prepare(test_fn, {})
+ node = side_effect_guards.transform(node, ctx)
- node = self.parse_and_analyze(test_fn, {})
- node = side_effect_guards.transform(node, self.ctx)
+ self.assertEqual(len(node.body[0].body), 1)
- with self.compiled(node, state_ops.assign) as result:
- self.assertEqual(len(node.body[0].body), 1)
+ with self.compiled(node, {}, state_ops.assign,
+ state_ops.assign_add) as result:
with self.test_session() as sess:
- v = variables.Variable(2)
+ v = variable_scope.get_variable('test', initializer=2)
sess.run(v.initializer)
- # NOTE: This intentionally highlights the flakiness. The test should be
- # tightened down once that is solved.
- self.assertTrue(sess.run(result.test_fn(v)) in (6, 7))
+ sess.run(result.test_fn(v))
+ # TODO(mdan): Ensure the result of test_fn(v) is also deterministic.
+ self.assertEqual(4, sess.run(v))
if __name__ == '__main__':
diff --git a/tensorflow/contrib/autograph/converters/single_return_test.py b/tensorflow/contrib/autograph/converters/single_return_test.py
deleted file mode 100644
index 1f0de4310e..0000000000
--- a/tensorflow/contrib/autograph/converters/single_return_test.py
+++ /dev/null
@@ -1,189 +0,0 @@
-# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ==============================================================================
-"""Tests for single_return module."""
-
-from __future__ import absolute_import
-from __future__ import division
-from __future__ import print_function
-
-from tensorflow.contrib.autograph.converters import single_return
-from tensorflow.contrib.autograph.core import converter_testing
-from tensorflow.python.framework.ops import name_scope
-from tensorflow.python.platform import test
-
-
-class SingleReturnTest(converter_testing.TestCase):
-
- def compiled_fn(self, test_fn, *args):
- node = self.parse_and_analyze(test_fn, {})
- node = single_return.transform(node, self.ctx)
- module = self.compiled(node, *args)
- return module
-
- def test_noop(self):
- # Noop
- def test_fn(x):
- return x
-
- with self.compiled_fn(test_fn) as result:
- self.assertEqual(test_fn(2.0), result.test_fn(2.0))
-
- def test_return_expression(self):
- # ANF
- def test_fn(x):
- return x * x
-
- with self.compiled_fn(test_fn) as result:
- x = 2
- self.assertEqual(test_fn(x), result.test_fn(x))
-
- def test_merge(self):
- # Simple merge
- def test_fn(x):
- if x > 0:
- return x
- else:
- return x * x
-
- with self.compiled_fn(test_fn) as result:
- for x in [-2, 2]:
- self.assertEqual(test_fn(x), result.test_fn(x))
-
- def test_orphan_branch(self):
-
- def test_fn(x):
- if x > 0:
- return x
-
- with self.assertRaises(ValueError):
- self.compiled_fn(test_fn)
-
- def test_lift_body_into_false_branch(self):
-
- def test_fn(x):
- if x > 0:
- return x
- return x * x
-
- with self.compiled_fn(test_fn) as result:
- for x in [-2, 2]:
- self.assertEqual(test_fn(x), result.test_fn(x))
-
- def test_lift_body_into_true_branch(self):
-
- def test_fn(x):
- if x < 0:
- x *= x
- else:
- # TODO(alexbw): linter bug here that requires us suppress this warning.
- return x # pylint: disable=undefined-loop-variable
- return x
-
- with self.compiled_fn(test_fn) as result:
- for x in [-2, 2]:
- self.assertEqual(test_fn(x), result.test_fn(x))
-
- def test_nested_if(self):
-
- def test_fn(x):
- if x > 0:
- if x < 5:
- return x
- else:
- return x * x
- else:
- return x * x * x
-
- with self.compiled_fn(test_fn) as result:
- for x in [-2, 2, 5]:
- self.assertEqual(test_fn(x), result.test_fn(x))
-
- def test_context_manager(self):
-
- def test_fn(x):
-
- with name_scope(''):
- return x * x
-
- with self.compiled_fn(test_fn) as result:
- result.name_scope = name_scope
- for x in [-2, 2]:
- self.assertEqual(test_fn(x), result.test_fn(x))
-
- def test_context_manager_in_conditional(self):
-
- def test_fn(x):
- if x > 0:
- with name_scope(''):
- return x * x
- else:
- return x
-
- with self.compiled_fn(test_fn, name_scope) as result:
- result.name_scope = name_scope
- for x in [-2, 2]:
- self.assertEqual(test_fn(x), result.test_fn(x))
-
- def text_conditional_in_context_manager(self):
-
- def test_fn(x):
- with name_scope(''):
- if x > 0:
- return x * x
- else:
- return x
-
- with self.compiled_fn(test_fn) as result:
- result.name_scope = name_scope
- for x in [-2, 2]:
- self.assertEqual(test_fn(x), result.test_fn(x))
-
- def test_no_return(self):
-
- def test_fn(x):
- x *= x
-
- with self.compiled_fn(test_fn) as result:
- self.assertEqual(test_fn(2), result.test_fn(2))
-
- def test_nested_functiondefs(self):
-
- def test_fn(x):
-
- def inner_fn(y):
- if y > 0:
- return y * y
- else:
- return y
-
- return inner_fn(x)
-
- with self.compiled_fn(test_fn) as result:
- for x in [-2, 2]:
- self.assertEqual(test_fn(x), result.test_fn(x))
-
- def test_loop(self):
-
- def test_fn(x):
- for _ in range(10):
- return x
- return x
-
- with self.assertRaises(ValueError):
- self.compiled_fn(test_fn)
-
-
-if __name__ == '__main__':
- test.main()
diff --git a/tensorflow/contrib/autograph/converters/slices.py b/tensorflow/contrib/autograph/converters/slices.py
index 3f5fc57125..9cfa066672 100644
--- a/tensorflow/contrib/autograph/converters/slices.py
+++ b/tensorflow/contrib/autograph/converters/slices.py
@@ -21,7 +21,7 @@ from __future__ import print_function
import gast
from tensorflow.contrib.autograph.core import converter
-from tensorflow.contrib.autograph.pyct import anno
+from tensorflow.contrib.autograph.lang import directives
from tensorflow.contrib.autograph.pyct import templates
@@ -56,17 +56,17 @@ class SliceTransformer(converter.Base):
def visit_Subscript(self, node):
node = self.generic_visit(node)
if not isinstance(node.slice, gast.Index):
- # TODO(mdan): It might make more sense to wave them through.
- raise NotImplementedError('non-index slice')
+ return node
if not isinstance(node.ctx, gast.Load):
# Index writes are handled at a higher level, one at which the rvalue is
# also available.
return node
- dtype = anno.getanno(
+ dtype = self.get_definition_directive(
node.value,
- 'element_type',
+ directives.set_element_type,
+ 'dtype',
default=templates.replace_as_expression('None'))
template = """
diff --git a/tensorflow/contrib/autograph/converters/slices_test.py b/tensorflow/contrib/autograph/converters/slices_test.py
index df9a4c8bab..3c0f81e8bc 100644
--- a/tensorflow/contrib/autograph/converters/slices_test.py
+++ b/tensorflow/contrib/autograph/converters/slices_test.py
@@ -18,9 +18,12 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
-from tensorflow.contrib.autograph import utils
from tensorflow.contrib.autograph.converters import slices
from tensorflow.contrib.autograph.core import converter_testing
+from tensorflow.contrib.autograph.lang import directives
+from tensorflow.contrib.autograph.pyct import anno
+from tensorflow.contrib.autograph.pyct import parser
+from tensorflow.contrib.autograph.pyct import transformer
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import list_ops
@@ -32,28 +35,42 @@ class SliceTest(converter_testing.TestCase):
def test_index_access(self):
def test_fn(l):
- utils.set_element_type(l, dtypes.int32)
return l[1]
- node = self.parse_and_analyze(
- test_fn,
- {
- 'utils': utils,
- 'dtypes': dtypes
- },
- include_type_analysis=True,
- )
- node = slices.transform(node, self.ctx)
-
- with self.compiled(node, dtypes.int32) as result:
- result.utils = utils
- result.dtypes = dtypes
+ node, ctx = self.prepare(test_fn, {})
+ def_, = anno.getanno(node.body[0].args.args[0], anno.Static.DEFINITIONS)
+ def_.directives[directives.set_element_type] = {
+ 'dtype': parser.parse_expression('tf.int32')
+ }
+ node = slices.transform(node, ctx)
+
+ with self.compiled(node, {}, dtypes.int32) as result:
with self.test_session() as sess:
tl = list_ops.tensor_list_from_tensor(
[1, 2], element_shape=constant_op.constant([], dtype=dtypes.int32))
y = result.test_fn(tl)
self.assertEqual(2, sess.run(y))
+ def test_index_access_multiple_definitions(self):
+
+ def test_fn(l):
+ if l:
+ l = []
+ return l[1]
+
+ node, ctx = self.prepare(test_fn, {})
+ def_, = anno.getanno(node.body[0].args.args[0], anno.Static.DEFINITIONS)
+ def_.directives[directives.set_element_type] = {
+ 'dtype': parser.parse_expression('tf.int32')
+ }
+ def_, = anno.getanno(node.body[0].body[0].body[0].targets[0],
+ anno.Static.DEFINITIONS)
+ def_.directives[directives.set_element_type] = {
+ 'dtype': parser.parse_expression('tf.float32')
+ }
+ with self.assertRaises(transformer.AutographParseError):
+ slices.transform(node, ctx)
+
if __name__ == '__main__':
test.main()
diff --git a/tensorflow/contrib/autograph/core/BUILD b/tensorflow/contrib/autograph/core/BUILD
index 833f9dced8..1873045a92 100644
--- a/tensorflow/contrib/autograph/core/BUILD
+++ b/tensorflow/contrib/autograph/core/BUILD
@@ -19,6 +19,7 @@ py_library(
srcs = [
"config.py",
"converter.py",
+ "errors.py",
"naming.py",
],
srcs_version = "PY2AND3",
@@ -30,6 +31,31 @@ py_library(
],
)
+py_test(
+ name = "errors_test",
+ srcs = ["errors_test.py"],
+ srcs_version = "PY2AND3",
+ deps = [
+ ":core",
+ "//tensorflow/python:array_ops",
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:control_flow_ops",
+ "//tensorflow/python:dtypes",
+ "//tensorflow/python:math_ops",
+ "//tensorflow/python:random_ops",
+ ],
+)
+
+py_test(
+ name = "naming_test",
+ srcs = ["naming_test.py"],
+ srcs_version = "PY2AND3",
+ deps = [
+ ":core",
+ "//tensorflow/python:client_testlib",
+ ],
+)
+
py_library(
name = "test_lib",
srcs = [
@@ -47,13 +73,3 @@ py_library(
"@six_archive//:six",
],
)
-
-py_test(
- name = "naming_test",
- srcs = ["naming_test.py"],
- srcs_version = "PY2AND3",
- deps = [
- ":core",
- "//tensorflow/python:client_testlib",
- ],
-)
diff --git a/tensorflow/contrib/autograph/core/converter.py b/tensorflow/contrib/autograph/core/converter.py
index 54e6aa0f3b..a93e4a8064 100644
--- a/tensorflow/contrib/autograph/core/converter.py
+++ b/tensorflow/contrib/autograph/core/converter.py
@@ -64,15 +64,29 @@ from __future__ import division
from __future__ import print_function
import collections
+from enum import Enum
+
from tensorflow.contrib.autograph.core import config
from tensorflow.contrib.autograph.core import naming
+from tensorflow.contrib.autograph.pyct import anno
+from tensorflow.contrib.autograph.pyct import ast_util
+from tensorflow.contrib.autograph.pyct import cfg
+from tensorflow.contrib.autograph.pyct import compiler
+from tensorflow.contrib.autograph.pyct import qual_names
from tensorflow.contrib.autograph.pyct import transformer
+from tensorflow.contrib.autograph.pyct.static_analysis import activity
+from tensorflow.contrib.autograph.pyct.static_analysis import live_values
+from tensorflow.contrib.autograph.pyct.static_analysis import liveness
+from tensorflow.contrib.autograph.pyct.static_analysis import reaching_definitions
+from tensorflow.contrib.autograph.pyct.static_analysis import type_info
# TODO(mdan): These contexts can be refactored into first class objects.
# For example, we could define Program and Entity abstractions that hold on
# to the actual entity and have conversion methods.
+# TODO(mdan): Add a test specific to this converter.
+
class ProgramContext(object):
"""ProgramContext keeps track of converting function hierarchies.
@@ -197,6 +211,46 @@ class Base(transformer.Base):
self._used = False
self._ast_depth = 0
+ def get_definition_directive(self, node, directive, arg, default):
+ """Returns the unique directive for a symbol, or a default if none exist.
+
+ See lang/directives.py for details on directives.
+
+ Args:
+ node: ast.AST
+ directive: Callable[..., Any]
+ arg: str
+ default: Any
+
+ Raises:
+ ValueError: if conflicting annotations have been found
+ """
+ defs = anno.getanno(node, anno.Static.ORIG_DEFINITIONS, ())
+ if not defs:
+ return default
+
+ # TODO(mdan): Simplify this.
+ arg_values = []
+ for def_ in defs:
+ if (directive not in def_.directives or
+ arg not in arg not in def_.directives[directive]):
+ continue
+ arg_value = def_.directives[directive][arg]
+ for prev_value in arg_values:
+ if not ast_util.matches(arg_value, prev_value):
+ qn = anno.getanno(node, anno.Basic.QN)
+ raise ValueError('%s has ambiguous annotations for %s(%s): %s, %s' %
+ (qn, directive.__name__, arg,
+ compiler.ast_to_source(arg_value).strip(),
+ compiler.ast_to_source(prev_value).strip()))
+ arg_values.append(arg_value)
+
+ if not arg_values:
+ return default
+
+ arg_value, = arg_values
+ return arg_value
+
def visit(self, node):
if not self._ast_depth:
if self._used:
@@ -208,3 +262,69 @@ class Base(transformer.Base):
return super(Base, self).visit(node)
finally:
self._ast_depth -= 1
+
+
+class AnnotatedDef(reaching_definitions.Definition):
+
+ def __init__(self):
+ super(AnnotatedDef, self).__init__()
+ self.directives = {}
+
+
+class AgAnno(Enum):
+ """Annotation labels specific to AutoGraph. See anno.py."""
+
+ DIRECTIVES = 'User directives associated with the annotated statement.'
+
+ def __repr__(self):
+ return self.name
+
+
+def standard_analysis(node, context, is_initial=False):
+ """Performs a complete static analysis of the given code.
+
+ Args:
+ node: ast.AST
+ context: converter.EntityContext
+ is_initial: bool, whether this is the initial analysis done on the input
+ source code
+
+ Returns:
+ ast.AST, same as node, with the static analysis annotations added
+ """
+ # TODO(mdan): Clear static analysis here.
+ # TODO(mdan): Consider not running all analyses every time.
+ # TODO(mdan): Don't return a node because it's modified by reference.
+ graphs = cfg.build(node)
+ node = qual_names.resolve(node)
+ node = activity.resolve(node, context.info, None)
+ node = reaching_definitions.resolve(node, context.info, graphs, AnnotatedDef)
+ node = liveness.resolve(node, context.info, graphs)
+ node = live_values.resolve(node, context.info, config.PYTHON_LITERALS)
+ node = type_info.resolve(node, context.info)
+ # This second call allows resolving first-order class attributes.
+ node = live_values.resolve(node, context.info, config.PYTHON_LITERALS)
+ if is_initial:
+ anno.dup(
+ node,
+ {
+ anno.Static.DEFINITIONS: anno.Static.ORIG_DEFINITIONS,
+ },
+ )
+ return node
+
+
+def apply_(node, context, converter_module):
+ """Applies a converter to an AST.
+
+ Args:
+ node: ast.AST
+ context: converter.EntityContext
+ converter_module: converter.Base
+
+ Returns:
+ ast.AST, the result of applying converter to node
+ """
+ node = standard_analysis(node, context)
+ node = converter_module.transform(node, context)
+ return node
diff --git a/tensorflow/contrib/autograph/core/converter_testing.py b/tensorflow/contrib/autograph/core/converter_testing.py
index 0e46aacc12..2025e32817 100644
--- a/tensorflow/contrib/autograph/core/converter_testing.py
+++ b/tensorflow/contrib/autograph/core/converter_testing.py
@@ -20,19 +20,19 @@ from __future__ import print_function
import contextlib
import imp
+import sys
+
+import six
from tensorflow.contrib.autograph import operators
from tensorflow.contrib.autograph import utils
from tensorflow.contrib.autograph.core import config
from tensorflow.contrib.autograph.core import converter
+from tensorflow.contrib.autograph.core import errors
from tensorflow.contrib.autograph.pyct import compiler
from tensorflow.contrib.autograph.pyct import parser
from tensorflow.contrib.autograph.pyct import pretty_printer
-from tensorflow.contrib.autograph.pyct import qual_names
from tensorflow.contrib.autograph.pyct import transformer
-from tensorflow.contrib.autograph.pyct.static_analysis import activity
-from tensorflow.contrib.autograph.pyct.static_analysis import live_values
-from tensorflow.contrib.autograph.pyct.static_analysis import type_info
from tensorflow.python.platform import test
@@ -74,7 +74,17 @@ class TestCase(test.TestCase):
"""Base class for unit tests in this module. Contains relevant utilities."""
@contextlib.contextmanager
- def compiled(self, node, *symbols):
+ def assertPrints(self, expected_result):
+ try:
+ out_capturer = six.StringIO()
+ sys.stdout = out_capturer
+ yield
+ self.assertEqual(out_capturer.getvalue(), expected_result)
+ finally:
+ sys.stdout = sys.__stdout__
+
+ @contextlib.contextmanager
+ def compiled(self, node, namespace, *symbols):
source = None
self.dynamic_calls = []
@@ -89,7 +99,11 @@ class TestCase(test.TestCase):
fake_ag = self.make_fake_mod('fake_ag', converted_call)
fake_ag.__dict__.update(operators.__dict__)
fake_ag.__dict__['utils'] = utils
+ fake_ag.__dict__['rewrite_graph_construction_error'] = (
+ errors.rewrite_graph_construction_error)
result.__dict__['ag__'] = fake_ag
+ for k, v in namespace.items():
+ result.__dict__[k] = v
yield result
except Exception: # pylint:disable=broad-except
if source is None:
@@ -98,6 +112,13 @@ class TestCase(test.TestCase):
print('Offending compiled code:\n%s' % source)
raise
+ @contextlib.contextmanager
+ def converted(self, entity, converter_module, namespace, *tf_symbols):
+ node, ctx = self.prepare(entity, namespace)
+ node = converter_module.transform(node, ctx)
+ with self.compiled(node, namespace, *tf_symbols) as result:
+ yield result
+
def make_fake_mod(self, name, *symbols):
fake_mod = imp.new_module(name)
for s in symbols:
@@ -114,17 +135,15 @@ class TestCase(test.TestCase):
for k, v in ns.items():
setattr(module, k, v)
- def parse_and_analyze(self,
- test_fn,
- namespace,
- namer=None,
- arg_types=None,
- include_type_analysis=True,
- owner_type=None,
- recursive=True,
- autograph_decorators=()):
+ def prepare(self,
+ test_fn,
+ namespace,
+ namer=None,
+ arg_types=None,
+ owner_type=None,
+ recursive=True,
+ autograph_decorators=()):
node, source = parser.parse_entity(test_fn)
-
if namer is None:
namer = FakeNamer()
program_ctx = converter.ProgramContext(
@@ -141,12 +160,5 @@ class TestCase(test.TestCase):
arg_types=arg_types,
owner_type=owner_type)
ctx = converter.EntityContext(namer, entity_info, program_ctx)
-
- node = qual_names.resolve(node)
- node = activity.resolve(node, entity_info)
- node = live_values.resolve(node, entity_info, {})
- if include_type_analysis:
- node = type_info.resolve(node, entity_info)
- node = live_values.resolve(node, entity_info, {})
- self.ctx = ctx
- return node
+ node = converter.standard_analysis(node, ctx, is_initial=True)
+ return node, ctx
diff --git a/tensorflow/contrib/autograph/core/errors.py b/tensorflow/contrib/autograph/core/errors.py
new file mode 100644
index 0000000000..e58745337a
--- /dev/null
+++ b/tensorflow/contrib/autograph/core/errors.py
@@ -0,0 +1,272 @@
+# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""Error rewriting logic.
+
+Contains the functions responsible for rewriting tracebacks of errors raised
+in AutoGraph (AG) code to refer to user written code, so that errors only refer
+to the original user code.
+
+When 'user code' is used in comments it refers to the original source code that
+the user wrote and is converting using AutoGraph.
+"""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import contextlib
+import logging
+import sys
+import traceback
+
+from tensorflow.contrib.autograph.pyct.origin_info import CodeLocation
+from tensorflow.python.framework import errors_impl
+from tensorflow.python.util import tf_inspect
+
+
+class GraphConstructionError(Exception):
+ """Error for graph construction errors from AutoGraph generated code."""
+
+ def __init__(self, original_error, custom_traceback):
+ self.original_error = original_error
+ self.custom_traceback = custom_traceback
+ super(GraphConstructionError, self).__init__()
+
+ def __str__(self):
+ traceback_str = ''.join(traceback.format_list(self.custom_traceback))
+ return ('Traceback (most recent call last):\n' + traceback_str + '\n' + str(
+ self.original_error) + '\n')
+
+
+class TfRuntimeError(Exception):
+ """Error wrapper for runtime errors raised by AutoGraph generated code."""
+
+ def __init__(self, op_name, op_message, custom_traceback):
+ self.op_name = op_name
+ self.op_message = op_message
+ self.custom_traceback = custom_traceback
+ super(TfRuntimeError, self).__init__()
+
+ def __str__(self):
+ message = '%s\n\nCaused by op %r, defined at:\n' % (self.op_message,
+ self.op_name)
+ return message + ''.join(traceback.format_list(self.custom_traceback))
+
+
+def _rewrite_frame(source_map, cleaned_traceback, stack_frame_indices):
+ """Rewrites the stack frames at the given indices using the given source map.
+
+ Args:
+ source_map: Dict[CodeLocation, OriginInfo], a mapping between the user and
+ AG generated code.
+ cleaned_traceback: List[Tuple[text, text, text, text]], the current
+ traceback.
+ stack_frame_indices: Iterable[Int], frame indices to possibly rewrite if
+ there are matching source mapping keys.
+
+ Returns:
+ None
+ """
+ for frame_index in stack_frame_indices:
+ # (file_path, line number, function name, code)
+ file_path, line_number, _, _ = cleaned_traceback[frame_index]
+ source_map_key = CodeLocation(file_path=file_path, line_number=line_number)
+ found_mapping = source_map_key in source_map
+ if found_mapping:
+ cleaned_traceback[frame_index] = source_map[source_map_key].as_frame()
+
+
+# TODO(znado): Make more robust to name changes in the rewriting logic.
+def _remove_rewrite_frames(tb):
+ """Remove stack frames containing the error rewriting logic."""
+ cleaned_tb = []
+ for f in tb:
+ if 'ag__.rewrite_graph_construction_error' not in f[3]:
+ cleaned_tb.append(f)
+ return cleaned_tb
+
+
+def rewrite_graph_construction_error(source_map):
+ """Rewrites errors raised by non-AG APIs inside AG generated code.
+
+ Meant to be called from the try/except block inside each AutoGraph generated
+ function. Only rewrites the traceback frames corresponding to the function
+ that this is called from. When we raise a GraphConstructionError at the end
+ it is then caught by calling functions, where they can be responsible for
+ rewriting their own frames.
+
+ Args:
+ source_map: Dict[CodeLocation, OriginInfo], a mapping between the user and
+ AG generated code.
+
+ Raises:
+ GraphConstructionError: The rewritten underlying error.
+ Exception: The underlying error, if it could not be rewritten.
+ """
+ error_info = sys.exc_info()
+ _, original_error, e_traceback = error_info
+ assert original_error is not None
+ try:
+ _, _, _, func_name, _, _ = tf_inspect.stack()[1]
+ # The latest function call is added to the beginning of a traceback, but
+ # when rewriting the traceback of multiple function calls the previous
+ # functions' except blocks may have already rewritten their own frames so
+ # we want to copy over all of the previous frames. We may have rewritten
+ # previous frames only if the error is a GraphConstructionError.
+ if isinstance(original_error, GraphConstructionError):
+ cleaned_traceback = traceback.extract_tb(e_traceback)
+ previous_traceback = original_error.custom_traceback
+ cleaned_traceback = [cleaned_traceback[0]] + previous_traceback
+ else:
+ cleaned_traceback = traceback.extract_tb(e_traceback)
+ cleaned_traceback = _remove_rewrite_frames(cleaned_traceback)
+
+ current_frame_indices = []
+ # This code is meant to be called from the try/except block that wraps a
+ # function body. Here we look for all frames that came from the function
+ # that this wraps, look for any matching line numbers in the source
+ # mapping, and then rewrite them if matches are found.
+ for fi, frame in enumerate(cleaned_traceback):
+ _, _, frame_func_name, _ = frame
+ if frame_func_name == func_name:
+ current_frame_indices.append(fi)
+ break
+ if current_frame_indices:
+ _rewrite_frame(source_map, cleaned_traceback, current_frame_indices)
+
+ if isinstance(original_error, GraphConstructionError):
+ original_error.custom_traceback = cleaned_traceback
+ new_error = original_error
+ else:
+ new_error = GraphConstructionError(original_error, cleaned_traceback)
+ except Exception:
+ logging.exception('Error while rewriting AutoGraph error:')
+ raise original_error
+ else:
+ raise new_error
+ finally:
+ # Addresses warning https://docs.python.org/2/library/sys.html#sys.exc_info.
+ del e_traceback
+
+
+def rewrite_tf_runtime_error(error, source_map):
+ """Rewrites TensorFlow runtime errors raised by ops created in AG code.
+
+ Args:
+ error: error_impl.OpError, an TensorFlow error that will have its traceback
+ rewritten.
+ source_map: Dict[CodeLocation, OriginInfo], a mapping between the user and
+ AG generated code.
+
+ Returns:
+ A TfRuntimeError with a traceback rewritten according to the given
+ source mapping.
+ """
+ # Check for cases where we leave a user method and re-enter it in the
+ # traceback. This is done by looking at the function names when the
+ # filenames are from any files the user code is in. If we find a case where
+ # we return to a user method after leaving it then we cut out the frames in
+ # between because we assume this means these in between frames are from
+ # internal AutoGraph code that shouldn't be included.
+ #
+ # An example of this is:
+ #
+ # File "file1.py", line 57, in my_func
+ # ...
+ # File "control_flow_ops.py", line 231, in cond
+ # ...
+ # File "control_flow_ops.py", line 1039, in inner_cond
+ # ...
+ # File "file1.py", line 68, in my_func
+ # ...
+ #
+ # Where we would remove the control_flow_ops.py frames because we re-enter
+ # my_func in file1.py.
+ #
+ # The source map keys are (file_path, line_number) so get the set of all user
+ # file_paths.
+ try:
+ all_user_files = set(k.file_path for k in source_map)
+ cleaned_traceback = []
+ last_user_frame_index = None
+ last_user_user_file_path = None
+ last_user_user_fn_name = None
+ for fi, frame in enumerate(error.op.traceback):
+ frame_file_path, frame_line_number, _, _ = frame
+ src_map_key = CodeLocation(
+ file_path=frame_file_path, line_number=frame_line_number)
+ if frame_file_path in all_user_files:
+ if src_map_key in source_map:
+ original_fn_name = source_map[src_map_key].function_name
+ if (last_user_frame_index is not None and
+ last_user_user_file_path == frame_file_path):
+ if last_user_user_fn_name == original_fn_name:
+ cleaned_traceback = cleaned_traceback[:last_user_frame_index]
+ else:
+ cleaned_traceback = cleaned_traceback[:last_user_frame_index + 1]
+ last_user_user_fn_name = original_fn_name
+ else:
+ last_user_user_fn_name = None
+ last_user_frame_index = fi
+ last_user_user_file_path = frame_file_path
+ cleaned_traceback.append(frame)
+
+ for fi in range(len(cleaned_traceback)):
+ _rewrite_frame(source_map, cleaned_traceback, [fi])
+ op_name = error.op.name
+ op_message = error.message
+ rewritten_error = TfRuntimeError(op_name, op_message, cleaned_traceback)
+ return rewritten_error
+ except Exception: # pylint: disable=broad-except
+ logging.exception('Error while rewriting AutoGraph error:')
+ return error
+
+
+# TODO(znado): Add arg to enable different levels of error rewriting.
+@contextlib.contextmanager
+def improved_errors(converted_function):
+ """Context manager that rewrites runtime errors.
+
+ This context manager will rewrite runtime errors so that their traceback
+ is relative to the original code before conversion.
+
+ Use with the output of to_graph, and wrap the execution of respective ops.
+ Example:
+
+ converted_my_func = ag.to_graph(my_func)
+ ops = converted_my_func(...)
+
+ with ag.improved_errors(converted_my_func):
+ sess.run(ops)
+
+ Args:
+ converted_function: Callable[..., Any], the output of a to_graph call
+
+ Yields:
+ None
+
+ Raises:
+ TfRuntimeError: if any OpError originates in the converted code, it will
+ be wrapped into a TfRuntimeError
+ ValueError: If converted_function is not generated by AutoGraph
+ """
+ if (getattr(converted_function, 'ag_source_map', None) is None or
+ not converted_function.ag_source_map):
+ raise ValueError(
+ 'converted_function must be the result of an autograph.to_graph call')
+ try:
+ yield
+ except errors_impl.OpError as e:
+ raise rewrite_tf_runtime_error(e, converted_function.ag_source_map)
diff --git a/tensorflow/contrib/autograph/core/errors_test.py b/tensorflow/contrib/autograph/core/errors_test.py
new file mode 100644
index 0000000000..7be54563a1
--- /dev/null
+++ b/tensorflow/contrib/autograph/core/errors_test.py
@@ -0,0 +1,116 @@
+# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""Tests for errors module."""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+from tensorflow.contrib.autograph.core import errors
+from tensorflow.contrib.autograph.pyct import origin_info
+from tensorflow.python.framework import dtypes
+from tensorflow.python.framework import errors as tf_errors
+from tensorflow.python.ops import array_ops
+from tensorflow.python.platform import test
+from tensorflow.python.util import tf_inspect
+
+
+def zero_div():
+ return array_ops.constant(10, dtype=dtypes.int32) // 0
+
+
+def zero_div_caller():
+ a = zero_div() + 2
+ return a
+
+
+class RuntimeErrorsTest(test.TestCase):
+
+ def setUp(self):
+ self._fake_origin = origin_info.OriginInfo('new file', 'new func', 96, 0,
+ 'print("hello world!")')
+
+ def test_error_replacement(self):
+ _, zero_div_lineno = tf_inspect.getsourcelines(zero_div)
+ src_map = {
+ errors.CodeLocation(
+ file_path=__file__, line_number=zero_div_lineno + 1):
+ self._fake_origin
+ }
+ with self.assertRaises(errors.TfRuntimeError) as cm:
+ z = zero_div_caller()
+ zero_div_caller.ag_source_map = src_map
+ with errors.improved_errors(zero_div_caller):
+ with self.test_session() as sess:
+ sess.run(z)
+ expected = cm.exception
+ current_traceback = expected.custom_traceback
+ for frame in current_traceback:
+ self.assertNotEqual('zero_div', frame[2])
+ self.assertTrue(
+ any(self._fake_origin.as_frame() == frame
+ for frame in current_traceback))
+
+ def test_error_not_found(self):
+ src_map = {
+ errors.CodeLocation(file_path=__file__, line_number=-1):
+ self._fake_origin
+ }
+ with self.assertRaises(errors.TfRuntimeError) as cm:
+ z = zero_div_caller()
+ zero_div_caller.ag_source_map = src_map
+ with errors.improved_errors(zero_div_caller):
+ with self.test_session() as sess:
+ sess.run(z)
+ expected = cm.exception
+ current_traceback = expected.custom_traceback
+ self.assertTrue(any('zero_div' in frame[2] for frame in current_traceback))
+ for frame in current_traceback:
+ self.assertNotEqual(frame, self._fake_origin.as_frame())
+
+ def test_rewriting_error(self):
+ _, zero_div_lineno = tf_inspect.getsourcelines(zero_div)
+ src_map = {
+ errors.CodeLocation(
+ file_path=__file__, line_number=zero_div_lineno + 1):
+ None
+ }
+ with self.assertRaisesRegexp(tf_errors.InvalidArgumentError,
+ 'Integer division by zero'):
+ z = zero_div_caller()
+ zero_div_caller.ag_source_map = src_map
+ with errors.improved_errors(zero_div_caller):
+ with self.test_session() as sess:
+ sess.run(z)
+
+ def test_no_ag_source_map(self):
+ with self.assertRaisesRegexp(
+ ValueError,
+ 'converted_function must be the result of an autograph.to_graph call'):
+ with errors.improved_errors(None):
+ pass
+
+ def test_bad_ag_source_map(self):
+ with self.assertRaisesRegexp(
+ ValueError,
+ 'converted_function must be the result of an autograph.to_graph call'):
+ src_map = None
+ zero_div_caller.ag_source_map = src_map
+ with errors.improved_errors(None):
+ pass
+
+
+if __name__ == '__main__':
+ test.main()
diff --git a/tensorflow/contrib/autograph/examples/integration_tests/BUILD b/tensorflow/contrib/autograph/examples/integration_tests/BUILD
new file mode 100644
index 0000000000..1368ce244c
--- /dev/null
+++ b/tensorflow/contrib/autograph/examples/integration_tests/BUILD
@@ -0,0 +1,29 @@
+licenses(["notice"]) # Apache 2.0
+
+exports_files(["LICENSE"])
+
+load("//tensorflow:tensorflow.bzl", "py_test")
+
+filegroup(
+ name = "all_files",
+ srcs = glob(
+ ["**/*"],
+ exclude = [
+ "**/METADATA",
+ "**/OWNERS",
+ ],
+ ),
+ visibility = ["//tensorflow:__subpackages__"],
+)
+
+py_test(
+ name = "keras_test",
+ srcs = [
+ "keras_test.py",
+ ],
+ srcs_version = "PY2AND3",
+ visibility = ["//visibility:public"],
+ deps = [
+ "//tensorflow:tensorflow_py",
+ ],
+)
diff --git a/tensorflow/contrib/autograph/utils/type_hints.py b/tensorflow/contrib/autograph/examples/integration_tests/keras_test.py
index aeb9e54561..a2fc7c550e 100644
--- a/tensorflow/contrib/autograph/utils/type_hints.py
+++ b/tensorflow/contrib/autograph/examples/integration_tests/keras_test.py
@@ -12,30 +12,26 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
-"""No-op utilities that provide static type hints.
-
-These are used when the data type is not known at creation, for instance in the
-case of empty lists.
-"""
+"""Keras integration tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
+import tensorflow as tf
+
+
+class MinimalKeras(tf.keras.Model):
+
+ def call(self, x):
+ return x * 3
+
+
+class KerasTest(tf.test.TestCase):
-def set_element_type(entity, dtype, shape=None):
- """Indicates that the entity is expected hold items of specified type.
+ def test_basic(self):
+ MinimalKeras()
- This function is a no-op. Its presence merely marks the data type of its
- argument. The staged TensorFlow ops will reflect and assert this data type.
- Args:
- entity: A Tensor or TensorArray.
- dtype: TensorFlow dtype value to assert for entity.
- shape: Optional shape to assert for entity.
- Returns:
- The value of entity, unchanged.
- """
- del dtype
- del shape
- return entity
+if __name__ == '__main__':
+ tf.test.main()
diff --git a/tensorflow/contrib/autograph/examples/notebooks/ag_vs_eager_collatz_speed_test.ipynb b/tensorflow/contrib/autograph/examples/notebooks/ag_vs_eager_collatz_speed_test.ipynb
new file mode 100644
index 0000000000..c10a5741f6
--- /dev/null
+++ b/tensorflow/contrib/autograph/examples/notebooks/ag_vs_eager_collatz_speed_test.ipynb
@@ -0,0 +1,299 @@
+{
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "colab_type": "text",
+ "id": "aQkTGc-d8I1k"
+ },
+ "source": [
+ "This notebook runs a basic speed test for a simple algorithm that implements the process described in Collatz Conjecture.\n",
+ "\n",
+ "https://en.wikipedia.org/wiki/Collatz_conjecture"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "colab_type": "text",
+ "id": "x5ChBlH09jk_"
+ },
+ "source": [
+ "### Imports"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 0,
+ "metadata": {
+ "colab": {
+ "autoexec": {
+ "startup": false,
+ "wait_interval": 0
+ }
+ },
+ "colab_type": "code",
+ "id": "X-QAUpWdPxUh"
+ },
+ "outputs": [],
+ "source": [
+ "!pip install -U -q tf-nightly"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 0,
+ "metadata": {
+ "colab": {
+ "autoexec": {
+ "startup": false,
+ "wait_interval": 0
+ }
+ },
+ "colab_type": "code",
+ "id": "wiKQu3w05eCa"
+ },
+ "outputs": [],
+ "source": [
+ "import numpy as np\n",
+ "from matplotlib import pyplot as plt\n",
+ "import tensorflow as tf\n",
+ "from tensorflow.contrib import autograph as ag\n",
+ "from tensorflow.python.eager import context"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "colab_type": "text",
+ "id": "_cRFTcwT9mnn"
+ },
+ "source": [
+ "### Plotting helpers"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 0,
+ "metadata": {
+ "colab": {
+ "autoexec": {
+ "startup": false,
+ "wait_interval": 0
+ }
+ },
+ "colab_type": "code",
+ "id": "ww7rc0GQ9pMu"
+ },
+ "outputs": [],
+ "source": [
+ "def plot_results(counts, times, title):\n",
+ " plt.plot(counts, np.array(times) * 1000., 'o')\n",
+ " plt.ylabel('Time (milliseconds)')\n",
+ " plt.xlabel('Collatz counter')\n",
+ " plt.title(title)\n",
+ " plt.ylim(0, 30)\n"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "colab_type": "text",
+ "id": "ESZGw9s9-Y5_"
+ },
+ "source": [
+ "### Collatz function definition"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 0,
+ "metadata": {
+ "colab": {
+ "autoexec": {
+ "startup": false,
+ "wait_interval": 0
+ }
+ },
+ "colab_type": "code",
+ "id": "qeunWm9m-dT7"
+ },
+ "outputs": [],
+ "source": [
+ "def collatz(a):\n",
+ " count = 0\n",
+ " while a \u003e 1.1:\n",
+ " if a % 2 \u003c 0.1:\n",
+ " a //= 2\n",
+ " else:\n",
+ " a = 3 * a + 1\n",
+ " count += 1\n",
+ " return count\n"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "colab_type": "text",
+ "id": "nnFmPDvScsDo"
+ },
+ "source": [
+ "# AutoGraph"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 5,
+ "metadata": {
+ "colab": {
+ "autoexec": {
+ "startup": false,
+ "wait_interval": 0
+ },
+ "height": 301
+ },
+ "colab_type": "code",
+ "executionInfo": {
+ "elapsed": 9153,
+ "status": "ok",
+ "timestamp": 1531757473651,
+ "user": {
+ "displayName": "",
+ "photoUrl": "",
+ "userId": ""
+ },
+ "user_tz": 240
+ },
+ "id": "6fU4vlxYcsDe",
+ "outputId": "11b50f28-aced-4506-a743-4b749e9645c3"
+ },
+ "outputs": [
+ {
+ "data": {
+ "image/png": "iVBORw0KGgoAAAANSUhEUgAAAYkAAAEcCAYAAAAydkhNAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAIABJREFUeJzt3XtcVGXCB/DfGRBUQA0ZURQvyIspm1reQkxNSPICgoqW\npWZu1vbmjZJV3Jc+axappVLu7guV25rU5g3wlq3iBd1wXHSN3hXy9ZaCgoOIIKAzMOf9g5dZkTkz\nB5i7v+9fzJlzzjzPHD2/Oc/znOcIoiiKICIiMkBh6wIQEZH9YkgQEZEkhgQREUliSBARkSSGBBER\nSWJIEBGRJIYEkQNYsWIFkpOTbV0MegQxJMipzJ49G8OHD4dWq5W9zeOPP45r164163O2bt2KqKgo\nDB48GKNGjcKcOXOwf//+5haXyO4xJMhpFBUV4fTp0xAEAYcPH5a9nSAIzfqc9957D1999RVWrFiB\nU6dO4fjx41iyZAmOHz8uuQ3vWSVHxZAgp5GRkYHBgwdj6tSpSE9P1y+fPXs2duzYoX+dnp6OWbNm\nAQBefvlliKKIqKgoPPXUU/juu+8AANu2bcP48eMxYsQIvPnmm7h58yYA4PLly/jmm2+wYcMGhISE\nwM3NDYIg4KmnnkJSUlKjz9ywYQNefPFFDB48GIWFhdi1axcmTpyIp556Cs899xy+/fZb/fqnTp3C\nmDFjkJKSgqeffhphYWHYs2dPo/rduXMHr7/+Op566inMnDmz2Vc/RC3BkCCnkZmZiaioKEyePBkn\nTpxAWVmZ5LoNVw9bt24FAOzevRtnzpzBhAkTkJOTg/Xr1+OTTz7BiRMn4Ofnh7i4OACASqVCt27d\nMGDAAJPl2bNnD1avXo0zZ86gW7du6Ny5M1JTU3HmzBkkJSUhKSkJ+fn5+vVLS0tRXl6O48eP48MP\nP0RiYiKuXLmif3/fvn1YuHAhcnNz4e/vj40bN7bkayJqFoYEOYXc3Fxcv34dEyZMQHBwMHr27Nnk\nl7hce/fuxfTp0/H444+jTZs2iIuLw9mzZ3H9+nXcvn0bSqWy0fpjxozBsGHDMHDgQNy4cUO/PCYm\nBn379oVCoYCrqyvGjBmDHj16AACGDh2K0NBQ5Obm6tcXBAFLlixBmzZtMGzYMIwZM0Z/ZQMA48eP\nx69+9SsoFApERkY2ChgiS2FIkFPIzMzEqFGj0LFjRwDApEmTkJGR0aJ93bx5E35+fvrX7du3R6dO\nnVBSUoJOnTrpm54aHDt2DCdPnoRWq23U99C1a9cm682cORMjRozAsGHDkJ2djdu3b+vf79ChA9zd\n3fWv/fz8Gn2Wj4+P/u927dqhurq6RfUjag5XWxeAqLXu37+P7777DjqdDqNGjQIAaDQaVFZWoqCg\nAO3bt8e9e/f066vVaqP769KlC65fv65/XV1djfLycvj6+qJTp05YvXo1/vWvfyE4OLjRdg93Tj/Y\nIa7RaLB48WKsW7cOYWFhUCgU+M///M9G21RUVODevXto27YtAODGjRsICgpq5rdBZF68kiCHd/Dg\nQbi4uOC7775DZmYmMjMz8d1332Ho0KHIzMxE//798be//Q337t3DL7/8gp07dzba3sfHp1En8OTJ\nk7Fr1y4UFBRAo9Fg/fr1GDRoEPz8/NCnTx/MnDkTcXFx+OGHH3D//n3odDqcOXPG6CgprVYLrVaL\nxx57DAqFAseOHcPf//73RuuIoohPPvkEWq0Wubm5OHr0KCZMmGDeL4uomXglQQ4vIyMD06ZNg6+v\nb6Pls2bNwvvvv4+9e/fip59+QmhoKPr164fIyEjk5OTo11u4cCHi4+Oh0WiwatUqPP/881i8eDEW\nLlyIiooKPPnkk1i/fr1+/cTERGzduhVJSUm4du0avLy80Lt3b2zcuFHfTPVwYHh4eGDlypVYvHgx\ntFotnn32WYSFhTVaR6lUomPHjnjmmWfQvn17rFq1Cr179zbzt0XUPIIlHzqk0Wjw0ksvQavVoq6u\nDhEREXjrrbdQWFiIuLg43LlzB8HBwVi7di1cXZlX9Og6deoU4uPjcfToUVsXhagRizY3ubm5YcuW\nLcjIyEBGRgays7Px448/4qOPPsK8efPw/fffw8vLq9EYdiIish8W75No164dgPqritraWgiCAJVK\nhYiICAD1wwQPHjxo6WIQEVELWDwkdDodoqOjERoaitDQUPj7+6NDhw5QKOo/umvXrk2GFBI9aoYP\nH86mJrJLFg8JhUKhb2rKy8vDxYsXm6zT3LlziIjIOqw2BNbT0xPDhg3Djz/+iIqKCuh0OgBAcXEx\nunTpYnJ7TpBGRGR9Fh1SVFZWhjZt2sDLywv37t1DTk4OFixYgBEjRuDAgQOYOHEi0tPTmwwFNEQQ\nBKjVlZYsrk0plV6sn4Ny5roBrJ+jUyq9WrW9RUNCrVZj+fLl0Ol00Ol0mDhxIsaMGYOAgADExcUh\nOTkZ/fv3x/Tp0y1ZDCIiaiGL3idhbs6e9qyfY3LmugGsn6Nr7ZUEp+UgIiJJDAkiIpLEkCAiIkkM\nCSIiksSQICIiSQwJIiKSxJAgIiJJDAkiIpLEkCAiIkkMCSIiksSQICIiSQwJIiKSxJAgIiJJDAki\nIpLEkCAiIkkMCSIiksSQICIiSQwJIiKSxJAgIiJJDAkiIpLEkCAiIkkMCSIiksSQICIiSQwJIiKS\nxJAgIiJJDAkiIpLEkCAiIkkMCSIikuRqyZ0XFxcjPj4epaWlcHFxwYwZMzB79mxs2rQJ27ZtQ+fO\nnQEAS5cuxejRoy1ZFCIiagGLhoSLiwtWrFiB/v37o6qqClOnTsXIkSMBAPPmzcO8efMs+fFERNRK\nFg0JpVIJpVIJAPDw8EDfvn1x8+ZNAIAoipb8aCIiMgOr9UkUFhaioKAAAwcOBACkpaVhypQpWLly\nJSorK61VDCIiagarhERVVRUWLVqEhIQEeHh4YNasWTh06BAyMzPh4+ODpKQkaxSDiIiaSRAt3O5T\nW1uL119/HaNHj8bcuXObvF9UVIQ33ngDe/bssWQxiIioBSzaJwEACQkJCAwMbBQQarVa31dx8OBB\nBAUFydqXWu28zVJKpRfr56CcuW4A6+folEqvVm1v0ZA4ffo09uzZg6CgIERHR0MQBCxduhR79+5F\nfn4+FAoFunfvjlWrVlmyGERE1EIWDYkhQ4YgPz+/yXLeE0FE5Bh4xzUREUliSBARkSSGBBERSWJI\nEBGRJIYEERFJYkgQEZEkhgQREUliSBARkSSGBBERSWJIEBGRJIYEERFJYkgQEZEkhgQREUliSBAR\nkSSGBBERSWJIEBGRJIYEERFJYkgQEZEk2Y8vvXfvHtRqNdzd3dGlSxdLlomIiOyE0ZDQ6XTIyMjA\n9u3bUVBQAE9PT2g0Gri6uiI8PByvvPIK+vTpY62yEhGRlRkNiRdffBGDBw/GihUrEBwcDBcXFwDA\nrVu3cPz4cSQmJuKFF17ApEmTrFJYIiKyLkEURVHqzbKyMnh7exvdgZx1zEWtrrTK59iCUunF+jko\nZ64bwPo5OqXSq1XbG+24NnTyv3XrFs6ePWt0HSIicg6yRjfNmjULlZWVqKioQHR0NFauXIk1a9ZY\numxERGRjskKiuroaXl5eOHLkCCIjI7Fnzx6cOHHC0mUjIiIbkxUSGo0GAKBSqTBy5EgoFAp9JzYR\nETkvWSExfPhwREREIDc3F8OHD0dFRQUUCt6HR0Tk7GTdTPfuu++ioKAA/v7+cHNzw927d7F69WpL\nl42IiGzMaEhcuHBB/3ebNm1QXFysf+3m5ma5UhERkV0wGhILFiyAIAgQRRE3btyAp6cnAODu3bvo\n1q0bDh8+bHTnxcXFiI+PR2lpKVxcXBAbG4s5c+bgzp07WLp0KYqKitCjRw9s3LgRXl6tG8tLRETm\nZzQkGkJg9erVGDJkCCZMmAAAOHDgAM6dO2dy5y4uLlixYgX69++PqqoqTJ06FaGhodi1axdCQkLw\n2muvITU1FSkpKXjnnXfMUB0iIjInWb3PeXl5+oAAgOeffx4nT540uZ1SqUT//v0BAB4eHujbty9K\nSkqQlZWFmJgYAEBMTAwOHTrUkrITEZGFyQqJmpoa5Obm6l/n5uaipqamWR9UWFiIgoICDBo0CLdu\n3YKPjw+A+iC5fft2s/ZFRETWIXt0U1xcHNq1awcAuH//Pj7++GPZH1JVVYVFixYhISEBHh4eEASh\nRYVt7Rwk9o71c1zOXDeA9XuUyQqJoUOH4tChQ7h8+TJEUURAQIDs0U21tbVYtGgRpkyZgvDwcABA\n586dUVpaCh8fH6jVatnzPzn7JFysn2Ny5roBrJ+js+gEfw+qq6uDm5sbXF1dcfXq1UbDY41JSEhA\nYGAg5s6dq182btw47Nq1CwCQnp6OsLCwZhabiIisQdaVRFpaGj766CN06tRJ31QkCAKysrKMbnf6\n9Gns2bMHQUFBiI6OhiAIWLp0KV577TUsWbIEO3fuhJ+fH5KTk1tfEyIiMjtZIbF582bs3bsX3bt3\nb9bOhwwZgvz8fIPvffnll83aFxERWZ+s5ialUtnsgCAiIscn60pi5MiRWLt2LSZNmgR3d3f98sDA\nQIsVjIiIbE9WSGRkZACov9O6gZw+CSIicmyyQsLUHE1EROScZIUEUD8jrEqlAgA8/fTT6Nu3r8UK\nRURE9kFWx3VGRgZeeeUV5OfnIz8/H/PmzcPu3bstXTYiIrIx2UNg09PToVQqAQBqtRrz589HVFSU\nRQtHRES2JfuO64aAePhvIiJyXrJComfPnvjkk09QUlKCmzdvYtOmTfD397d02YiIyMZkhcTvf/97\nXL58GVFRUYiKisKlS5ewatUqS5eNiIhsTFafROfOnbFhwwZLl4WIiOyMrCuJ1NRUlJeX61/fvn0b\nn3/+ucUKRURE9kFWSOzbtw+dOnXSv37sscewd+9eixWKiIjsg6yQEEWxybK6ujqzF4aIiOyLrJDo\n3bs3/vznP0MUReh0OmzevBk9e/a0dNmIiMjGZIXEypUrceTIEQwcOBCDBw/GsWPHkJiYaOmyERGR\njcka3eTr64stW7aguroaANC+fXuLFoqIiOyD7D6J7du3449//CPat2+PwsJCnDlzxtJlIyIiG5MV\nEklJSTh58iQOHToEAPDw8MAHH3xg0YIREZHtyQoJlUqFjz76CG3btgVQPwT2/v37Fi0YERHZnqyQ\ncHd3hyAI+tc6nc5iBSIiIvshq+M6KCgIu3fvhiiKKCwsRGpqKoYMGWLpshERkY3JupJYvnw5Tp06\nBbVajdjYWNTV1WHZsmWWLhsREdmYrCsJT09PrF692tJlISIiOyPrSmL//v24e/cuACA5ORnz58/H\n//zP/1i0YEREZHuyQuJPf/oTPD09kZeXhxMnTiA6OppXFkREjwBZIeHqWt8q9fe//x2xsbGIjIzk\nEFgiokeArJAQBAG7d+/Gvn37EBISAgDQarUWLRgREdmerJD43e9+hwMHDiA2Nhb+/v64cuUKRowY\nYXK7hIQEjBw5EpGRkfplmzZtwujRoxETE4OYmBhkZ2e3vPRERGRRgmjoYRFmkpubCw8PD8THx2PP\nnj0A6kPCw8MD8+bNa/b+1OpKcxfRbiiVXqyfg3LmugGsn6NTKr1atb3RIbB/+ctfMHfuXKxdu9bg\n+/Hx8UZ3PnToUBQVFTVZbsFcIiIiMzIaEu7u7gDMPzV4WloaMjMz8atf/QrLly+Hl1frko6IiCzD\nos1NAFBUVIQ33nhD39xUVlaGxx57DIIgYMOGDVCr1ZxRlojIThm9kkhLSzO68UsvvdTsD/T29tb/\nPWPGDLzxxhuyt3X2dkPWzzE5c90A1s/RWbRPwhx3VT98oaJWq6FUKgEABw8eRFBQUKs/g4iILMNo\nSCQlJbVq52+//TZUKhXKy8sxduxYLFy4ECqVCvn5+VAoFOjevTtWrVrVqs8gIiLLMRoSx44dM7rx\nmDFjjL7/8ccfN1k2bdo0GcUiIiJ7YDQkPv/8c8n3BEEwGRJEROTYjIbEV199Za1yEBGRHTIaEteu\nXYO/vz8uXLhg8P3AwECLFIqIiOyD0ZBYvXo1UlJSsGDBgibvCYKArKwsixWMiIhsz2hIpKSkAAAO\nHz5slcIQEZF9kfX4UgCoqalBcXEx6urq9MvY3ERE5NxkhcSWLVuwYcMGdOzYEQpF/ezibG4iInJ+\nskLiL3/5Cw4cOABfX19Ll4eIiOyIrIcOde3alQFBRPQIknUlsXDhQqxcuRJjxozRTx8OmL7jmoiI\nHJuskDhy5AiOHDmCK1euNOqTYEgQETk3WSFx8OBBHD58GG3btrV0eYiIyI7I6pPw9/eHq6vs0bJE\nROQkZJ35e/Xqhblz5yI8PBxubm765S156BARETkOWSGh1WrRs2dPnD9/3tLlISIiOyIrJFr78CEi\nInJMRvskTD2+VKPR4OLFi2YtEBER2Q+TE/zV1NRg8uTJGDRoEHx8fHD//n1cvnwZx48fx7Fjx7B8\n+XL07dvXWuUlIiIrMhoSn376KfLy8vDtt9/iD3/4A4qLi9GuXTsEBQUhPDwcaWlp8PT0tFZZiYjI\nykz2SQwcOBADBw60RlmIiMjOyLpPgoiIHk0MCSIiksSQICIiSQwJIiKSJCskbt26hXfeeUc/DUdB\nQQG++eYbixaMiIhsT1ZI/O53v8OQIUNQUVEBAAgICMDXX39t0YIREZHtyQqJkpISvPjii3BxcQEA\nuLm56Z8rQUREzkvWmf7hacIrKiogiqJFCkRERPZDVkiMHz8eiYmJqKqqwq5du/Dqq69i2rRpJrdL\nSEjAyJEjERkZqV92584dvPrqq4iIiMD8+fNRWVnZ8tITEZFFyQqJX//61xg6dCiCg4Nx7NgxzJ49\nG3PnzjW53dSpU/HFF180WpaamoqQkBB8//33GDFiBFJSUlpWciIisjjZj5uLiopCVFRUs3Y+dOhQ\nFBUVNVqWlZWFrVu3AgBiYmIwe/ZsvPPOO83aLxERWYeskLh16xa2bt2Kq1evora2Vr88OTm52R9Y\nVlYGHx8fAIBSqcTt27ebvQ8iIrIOWSHx5ptvYsCAAQgJCdGPcLIFpdLLZp9tDayf43LmugGs36NM\nVkjU1NTg3XffNcsHdu7cGaWlpfDx8YFarYa3t7fsbdVq5+3kViq9WD8H5cx1A1g/R9faAJTVcT1o\n0CD8/PPPLfqAh4fKjhs3Drt27QIApKenIywsrEX7JSIiy5N1JfHCCy/g5ZdfRteuXeHu7q5fvmPH\nDqPbvf3221CpVCgvL8fYsWOxcOFCLFiwAIsXL8bOnTvh5+fXon4NIiKyDlkhsWzZMrzxxhsYMGBA\ns/okPv74Y4PLv/zyS9n7ICIi25EVEu7u7pg/f76ly0JERHZGVp/EM888g+zsbEuXhYiI7IysK4lt\n27YhNTUVHh4ecHNzgyiKEAQBOTk5li4fERHZkKyQ2Llzp6XLQUREdkhWSHTv3t3S5SAiIjtkNCSW\nLVuGdevWYdq0aRAEocn7pobAEhGRYzMaEg0zvf72t7+1SmGIiMi+GA2Jr7/+Gh988AGGDx9urfIQ\nEZEdMToENj8/31rlICIiO8QHVRMRkSSjzU3nz59HSEhIk+W8T4KI6NFgNCR69+6N1NRUa5WFiIjs\njNGQcHNz4z0SRESPMKN9Em3atLFWOYiIyA4ZDYlt27ZZqxxERGSHOLqJiIgkMSSIiEgSQ4KIiCQx\nJIiISBJDgoiIJDEkiIhIEkOCiIgkMSSIiEgSQ4KIiCTJesY1EdGjTnWuBPtyruB6aTX8fNpjUkhv\njBjga+tiWRxDgojIBNW5EqTs/pf+daG6Sv/a2YOCIUFEVuHIv8T35VyRWP6Lw9ShpRgSRGRxjv5L\n/HpptcHlN25VWbkk1mezkBg3bhw8PT2hUCjg6uqKHTt22KooRGRhjv5L3M+nPQrVTQOhW2cPG5TG\numwWEoIg4KuvvkLHjh1tVQQishJH/yU+KaR3oyuhfy/v1ei1IzepSbFZSIiiCJ1OZ6uPJ3I69nyC\nsuYvcUt8Dw3b78v5BTduVaFbZw9MCunVaL+O3qQmxaZXEvPnz4cgCJg5cyZmzJhhq6IQOTx7P0HJ\n/SXeWpb8HkYM8DW6D0dvUpNis5D461//CqVSibKyMsybNw8BAQEYOnSorYpDZHXm/MVr7ycoOb/E\nzcGW34OjN6lJsVlIKJVKAIC3tzeee+45/PTTTyZDQqn0skbRbIb1c1zNrVv2PwsN/uLt0KEtRj/Z\no9F627P+F1dLKtHT1wuxYf/R6P0G129Jn6DM8b2bYx+Tx3hh8pjAZm0jt/4NWvo9mKN+Pbt64cqN\niibL/X29HPrfvk1CoqamBjqdDh4eHqiursaJEyfw1ltvmdxOra60QulsQ6n0Yv0cVEvq9s33BRLL\nf0b/HvWDOR5uOrlyowLrtp5GRcW9Jr+K/TpLt/m39nu31bFrTv0btOR7MFf9Iob5G2xSixjmb9N/\n+60NKJuERGlpKd566y0IgoC6ujpERkZi1KhRtigKkU3IaZpoTtOJsTZ/e+7QNqYlTUfW6vswxFpN\natZmk5Dw9/dHZmamLT6ayCKaeyI2NdpHda7E4PuA4TZuqRMUALvu0DbGVJAa+85tdaI21bntiHjH\nNVErSfUvANInYlO//A2916Cjp5vB5YZOUIlfqAyuay8d2sYYC1JTo5jsvW6OhFOF0yNLda4EiV+o\n8Os1R5D4hQqqcyUt2s/2rP81uHxfzi+S24wY4IvXo4LRQ+kJF4WAHkpPvB4VjBEDfCWbWRqUVdyX\nXVZHHnEzKaS3xPJeRpuiyLx4JUGPJHOOp79aYrhT0tSJWOoXr9SJ/UFyrwQceToJY01Hn+05Z3Ab\nRwg/R8OQoEeSOcfT9/Q1PPSxpSdiqRP7gwrVd/HrNUdM9n/YsiPXHKSC1JHDz9GwuYkeSVK/1otK\n7za7CSo27D8MLm/piViqmeVhOlHUXwFJldNYs5YjM9YURebFKwl6JEn9EhVF6JfLbYIa/WQPVFTc\nM9uImobtth+5gLLK+7K2MXYF5IwdubYexfQoYUhQqznSOPyGshaVym+7bugMNVZHS5yI5QYE0LQt\n3pGOSUs5Y/jZI4YEtZjqXEmTX7v2NA7/4RNlv56PIet0YZP1BAF4zNNd8qRcVHrX6vcaSPWZtHFR\nQFvXdPbkB9vi7X2yP3IsDAlqEVNj+c05Dr8lv4oNnSilOoMf83JHWYX0r3ZXheETsyXvNZDqM6mV\nmF7/wbZ4e5/sjxwLQ4JaxNRYfnMNRZTzq9hQiJgq34OMBQQgfWK25HBLqT6T7j6e/3+fgHRbvCPf\nG0H2hyHhpCzdJm1qLL+5hiJuP3LB8PKjFzBigK9kiAiCWT4er0cFY1/OFbMPtzR1fIwNXTXVFs/h\noWRODAknZI02aVNj+Zs7FFHqpCnVT9Dw61/qikGqicgQby/D/RE9lJ7678uc9xrIOT6tGb3j6PdG\nkH1hSDgha7RJS52IvDu4I3ZsYLM+x9hJ09R2zW27DxvSAz9fLTc6CV6DhvfMPdxS7vFp6egdDg8l\nc2JIOCFrtEmb80Rk7KQp9SsfqD+xu0g0K8lpuzf0eVLrmnO4pbWOD0OBzIEh4YRMzZ7Zmr4KS/R1\nSJ00C9V3YaproU40vFxO2/2DrHlSZZ8BORKGhBOSagrq17NTq/oqpJqFUnf/C92VHvrASDt4Htln\ni6CtE+EiAO3atkH1/Vr4dTYcKsb6NyQyoIk2LgroRNEhmlbYZ0COhCHhhKSaglrTV6E6V4LN+wzP\nvCni34Fx4qcb+NflMv17dSJwt0YLQDqUpE6azaETRXwW/2yr9mEt7DMgR8KQcFKGmk9aOr2yqRvn\nHvRgQEgx1EELGO48lsvRmmrYZ0COgiHhhKT6DVraFt6cG9PkkHr8ptT9CA28vdwBwfDNb2yqIbIM\nThXuZBp+9Reqq5pMJd3S6ZXlPASnOaRCydQU2bHPBuKjN0OdcuprInvFKwknY6zfYdX84fq/m9MW\nLuchOA2C+3ibbHKSCqUH2+qLSu/CVaFAnU4Hv/8fzvrgjWYMBSLrYEg4GVNj8FtygpXqWDZ0Y9q/\nRzddh7ZOBxdBQLu2rqi5XysrlBgARPaFIWFhDz6/QCEIqNPVD+r09nJH7LPNuzNZDkuMwW/uaJyX\nngvCS88FNVqmVHpBrTb8LGgisl8MiYcYegbBz1dvm7x5zFBnMdB4xE6d+O9R/2WV9y0yx7+lxuDz\nFz7Ro4kh8QBTzyCQGucvdZOZt5e7yc809xz/HINPRObEkHiA3KGeD5/YpbaT8/hJS8zxz1/9RGQu\nHAL7ALlDPR8+sbdmiKij3QRGRI8WhsQD/Hzay1rv4RO71HZympt4ExgR2TObhUR2djaef/55RERE\nIDU1tUX7UJ0rQeIXKvx6zREkfqGC6lxJq8pk6mauf6/X66HXhreLfTZQf+OXQgBcFP+e09Tby503\ngRGR3bNJn4ROp8N7772HL7/8El26dMH06dMRFhaGvn37Sm4zZdnuRrOIWuLpa4Y6ffv17GTwXgBT\n2z188xcRkSOySUjk5eWhV69e6N69OwBg0qRJyMrKMhoSOp3YKAgs9fS11jwNjGFARM7GJs1NJSUl\n6Natm/61r68vbt68KXv7fTm/WOXpXkREjzqbhIQoyn2UjGE3blVJdhZztBARkfnYpLmpa9euuH79\nuv51SUkJunTpInt7f18vxIb9B9ZtPd3kvRcj+kGp9DJLOa3NUcstlzPXz5nrBrB+jzJBbO3P+hao\nq6vD888/jy+//BJKpRKxsbFYv3690T4JIiKyPptcSbi4uOC//uu/8Oqrr0IURUyfPp0BQURkh2xy\nJUFERI6Bd1wTEZEkhgQREUliSBARkSS7DwlzzPFkb8aNG4eoqChER0dj+vTpAIA7d+7g1VdfRURE\nBObPn4/KSsd5iltCQgJGjhyJyMhI/TJj9Vm9ejXGjx+PKVOmID8/3xZFbhZD9du0aRNGjx6NmJgY\nxMTEIDs7W/9eSkoKxo8fjwkTJuDEiRO2KLJsxcXFmDNnDiZOnIjIyEhs2bIFgPMcv4fr99VXXwFw\nnuOn0WiMkXxQAAAKiUlEQVQQGxuL6OhoREZGYtOmTQCAwsJCzJgxAxEREYiLi0Ntba1+/aVLl2L8\n+PGYOXNmo1sRJIl2rK6uTgwPDxcLCwtFjUYjRkVFiRcuXLB1sVpt3LhxYnl5eaNla9euFVNTU0VR\nFMWUlBRx3bp1tihai/zjH/8Qz507J06ePFm/TKo+R48eFV977TVRFEXx7NmzYmxsrPUL3EyG6vfp\np5+KmzdvbrLuhQsXxClTpoharVa8du2aGB4eLup0OmsWt1lu3rwpnjt3ThRFUbx79644fvx48cKF\nC05z/KTq5yzHTxRFsbq6WhRFUaytrRVjY2PFs2fPiosXLxb3798viqIoJiYmit98840oiqKYlpYm\nvvvuu6IoiuK+ffvEJUuWmNy/XV9JPDjHU5s2bfRzPDk6URSh0+kaLcvKykJMTAwAICYmBocOHbJF\n0Vpk6NCh6NChQ6NlD9en4bhlZWUhOjoaADBo0CBUVlaitLTUugVuJkP1AwzPHJCVlYWJEyfC1dUV\nPXr0QK9evZCXl2eNYraIUqlE//79AQAeHh7o27cvSkpKnOb4GapfwxRAznD8AKBdu3YA6q8Samtr\nIQgCVCoVIiIiADQ+nzx4XCMiIpCTk2Ny/3YdEq2d48leCYKA+fPnY9q0adi+fTsA4NatW/Dx8QFQ\n/w/79u3btixiq5WVlTWqT1lZGQDg5s2b6Nq1q349X19flJS0bop3W0lLS8OUKVOwcuVKfXOMoX+z\njlK/wsJCFBQUYNCgQU3+PTrD8Wuo38CBAwE4z/HT6XSIjo5GaGgoQkND4e/vjw4dOkChqD+9d+3a\nVV+HB4+fi4sLOnTogPLycqP7t+uQMJT0zuCvf/0rdu3ahc8++wxpaWnIzc2FIAimN3QCho6pI9Z9\n1qxZOHToEDIzM+Hj44MPP/wQgOPWr6qqCosWLUJCQgI8PDwky+ws9XOm46dQKJCRkYHs7Gzk5eXh\n4sWLTdZpqMPD9RNF0WT97DokWjvHk71SKpUAAG9vb4SHhyMvLw+dO3fWX7ar1Wp4e3vbsoitJlUf\nX19fFBcX69crLi52yGPq7e2t/881Y8YMfZNE165dcePGDf16jlC/2tpaLFq0CFOmTEF4eDgA5zp+\nhurnTMevgaenJ4YNG4Yff/wRFRUV+ibtB+vw4PGrq6vD3bt30bFjR6P7teuQeOKJJ3D16lUUFRVB\no9Fg3759CAsLs3WxWqWmpgZVVfXTmVdXV+PEiRMICgrCuHHjsGvXLgBAenq6w9Xz4V8oUvUJCwtD\nRkYGAODs2bPo0KGDvlnDnj1cP7Varf/74MGDCAoKAlBf7/3790Oj0eDatWu4evWqvnnDXiUkJCAw\nMBBz587VL3Om42eofs5y/MrKyvRNZffu3UNOTg4CAwMxYsQIHDhwAEDj4zdu3Dikp6cDAA4cOICn\nn37a5GfY/bQc2dnZeP/99/VzPC1YsMDWRWqVa9eu4a233oIgCKirq0NkZCQWLFiA8vJyLFmyBDdu\n3ICfnx+Sk5MNdpbao7fffhsqlQrl5eXw8fHBwoULER4ejsWLFxusz6pVq3D8+HG0a9cOSUlJCA4O\ntnENjDNUP5VKhfz8fCgUCnTv3h2rVq3SnyxTUlKwY8cOuLq6YuXKlRg1apSNayDt9OnTePnllxEU\nFARBECAIApYuXYqBAwdK/nt0pOMnVb+9e/c6xfH7+eefsXz5cuh0Ouh0OkycOBG/+c1vcO3aNcTF\nxaGiogL9+/fHunXr0KZNG2g0Gixbtgz5+fno1KkT1q9fjx49ehj9DLsPCSIish27bm4iIiLbYkgQ\nEZEkhgQREUliSBARkSSGBBERSWJIEBGRJIYE2b3a2lokJycjIiICkZGRmDRpEtasWYO6ujqj261Y\nsQJpaWkA6qeGXrt2rcnPOnToEH766SezlNsSioqKsG3bNlsXgx4hDAmye8uXL8fFixeRkZGBPXv2\nYPfu3QgICIBGozH7Z2VlZdn1rJ+FhYX49ttvW7StqVAlMsTV1gUgMuaXX35BVlaW/g5foH72ytjY\nWAD1M2CuW7dO/3CYUaNGIT4+3uikZefPn8fvf/971NTUQKPRYMaMGZgzZw5OnDiBw4cPIycnBzt2\n7MArr7yCwsJCHDx4EIIgQKPR4NKlS/jHP/4BT0/PRvv85z//iXXr1qGqqgqCICA+Ph4jR45EXl4e\nPvjgA9TU1KBdu3ZYuXIlnnjiCZw6dQpr1qzBzp07AaDR61OnTuGDDz7AwIEDcfbsWSgUCqxfvx4B\nAQF47733UFRUhJiYGPTs2RPJycm4dOkSkpKSUF5eDq1Wizlz5mDq1KkAgMcffxzLli3D0aNHMWzY\nMCxatMjsx4icnFmeekFkIfv37xejo6Ml3//666/FefPmibW1taJWqxXnzp2rf8DK8uXLxa1bt4qi\nWP+QoDVr1oiiKIpVVVWiRqPR/z1x4kTx4sWLTbZ52LJly8QPP/ywyfLy8nIxNDRUPHv2rCiKoqjT\n6cSKigpRo9GIY8eOFXNyckRRFMUffvhBHDt2rKjVakWVSiVOmzZNv48HX6tUKjE4OFjMz88XRVEU\n//SnP4nvvPNOk/VEsf5BMzExMeKlS5dEUax/sE5ERIT+db9+/cTPP/9c8vsjMoVXEmTXRBOzxuTk\n5CAmJgYuLi4AgKlTp+LQoUN44YUXJLepqanBu+++i4KCAigUCqjVahQUFCAgIEBym40bN6Kmpga/\n/e1vm7x39uxZBAYGYtCgQQDqp2X28vLC+fPn4ebmpp9ELSQkBG5ubrh8+bLJevfp0wePP/44gPqH\n+xw9etTgeleuXMGlS5cQFxen/660Wi0uXryIPn36AID+IUFELcGQILsWHByMK1euoLKyEl5eXk3e\nFw3Mh29qfvz169dDqVRi7dq1+gdAGevf2LlzJ06ePKl//rOhMshd3lBeFxeXRk8nvH//fqP13N3d\n9X+7uLjon1FsaH/e3t76mT0fJggC2rdvb/A9IjnYcU12rVevXhg3bhwSExP1U6zX1dVhy5YtqKmp\nwciRI5Geno7a2lpotVpkZGQgNDTU6D4rKyvRrVs3CIKA8+fPIzc3V/+eh4cH7t69q3/9ww8/4LPP\nPsMf//hHuLm5Gdzfk08+iQsXLuDHH38EUN9PUlFRgYCAAGi1Wpw6dQoAcPLkSdTW1qJ3797o0aMH\nCgsLUVlZCVEUsW/fPlnfh6enp35qaKD+iqNt27bIzMzUL7t06ZL+uzJ1JUZkCq8kyO6tWbMGn376\nKaZOnQo3NzeIoojRo0fDzc0NM2fOxNWrV/XP7X3mmWf0ndpSfvOb3yA+Ph67d+9Gz549MWzYMP17\nU6ZMwYoVK3DgwAG88sor2LlzJ2pqajB//nz9VUBaWlqjX+cdO3bEpk2bkJSUhOrqari4uCA+Ph4h\nISH45JNPsHr1an3H9aeffgpXV1f4+vpi3rx5iImJgb+/P5544glcuHDB5HfRr18/9OnTB5GRkQgI\nCEBycjL++7//G++//z42b96Muro6+Pj4YOPGjQDs/6lqZP84VTgREUlicxMREUliSBARkSSGBBER\nSWJIEBGRJIYEERFJYkgQEZEkhgQREUliSBARkaT/AzLfG+oMx+5pAAAAAElFTkSuQmCC\n",
+ "text/plain": [
+ "\u003cmatplotlib.figure.Figure at 0x7fc3b259add0\u003e"
+ ]
+ },
+ "metadata": {
+ "tags": []
+ },
+ "output_type": "display_data"
+ }
+ ],
+ "source": [
+ "counts = []\n",
+ "times = []\n",
+ "for n in np.logspace(0, 7, 50):\n",
+ "\n",
+ " with tf.Graph().as_default():\n",
+ " tf_collatz = ag.to_graph(collatz)\n",
+ " count = tf_collatz(tf.constant(n, dtype=tf.float32))\n",
+ " with tf.Session() as sess:\n",
+ " count_value = sess.run(count)\n",
+ "\n",
+ " res = %timeit -n10 -r1 -o -q sess.run(count)\n",
+ " counts.append(count_value)\n",
+ " times.append(res.best)\n",
+ " \n",
+ "plot_results(counts, times, 'AutoGraph')"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "colab_type": "text",
+ "id": "RRENYzLRF_f3"
+ },
+ "source": [
+ "# Eager"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 6,
+ "metadata": {
+ "colab": {
+ "autoexec": {
+ "startup": false,
+ "wait_interval": 0
+ },
+ "height": 301
+ },
+ "colab_type": "code",
+ "executionInfo": {
+ "elapsed": 5003,
+ "status": "ok",
+ "timestamp": 1531757478713,
+ "user": {
+ "displayName": "",
+ "photoUrl": "",
+ "userId": ""
+ },
+ "user_tz": 240
+ },
+ "id": "dhDf8LLdF_f-",
+ "outputId": "3de0a5a5-7a11-4b41-8ab0-e4e21ce8d59b"
+ },
+ "outputs": [
+ {
+ "data": {
+ "image/png": "iVBORw0KGgoAAAANSUhEUgAAAYkAAAEcCAYAAAAydkhNAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAIABJREFUeJzt3XtYVWW+B/Dv2hshBdSQHaighhwas7Qeb6GFDjIyI3LZ\nGphdJLLMzqSlKaPQsTPm5KhZkZ7moKOnManGK17wsUfIS87QNj2jnEnIg5cQEtyAyDWBvdf5g4d9\nBPbaLGCvfeP7+QvW2mvt38tGvq71vut9BVEURRAREZmhsncBRETkuBgSREQkiSFBRESSGBJERCSJ\nIUFERJIYEkREJIkhQUREktzsXQCRvYWHh6OiogJqtRqiKEIQBMyePRtvv/22vUsjsjuGBBGA9PR0\nPPHEE3Z5b4PBALVabZf3JuoMbzcRATA38cCNGzeQmJiISZMmITQ0FMuXL0dtba1p//fffw+tVotx\n48bhjTfewNKlS5GWlmbaf+LECcTFxWHChAmYN28efvjhB9O+8PBwbNu2DTExMXj88cdhNBqVbSBR\nNzEkiCSIoohFixbhb3/7G44ePYqysjJs3rwZANDU1ITFixdjzpw5OHv2LGbNmoXjx4+bjv3++++R\nmpqKd999F2fPnsXcuXPx2muvoampyfSao0ePYtu2bTh37hxUKv5TJMfE30wiAL/97W8xceJETJgw\nARMnTsSePXswbNgwhIaGws3NDffffz8SExPx3XffAQAuXLgAg8GA559/Hmq1Gr/61a8wZswY0/n2\n7NmDZ555Bo8++igEQUBcXBzc3d1x8eJF02vmz58PPz8/uLu727y9RHKxT4IIwCeffNKhT6KyshJr\n167FuXPnUF9fD4PBgIEDBwIA9Ho9/Pz82rx+8ODBpq9/+uknHDx4ELt27QLQclXS3NyMW7dumV7j\n7++vVHOIrIYhQQTzfRKbNm2CIAg4cuQI+vfvj+zsbKxduxYAoNFoUFZW1ub1N2/exLBhwwC0BMCi\nRYvw6quvKl88kYJ4u4lIQl1dHTw9PeHl5YWysjJs377dtO+xxx6DWq1GRkYGDAYDsrOzkZeXZ9qf\nkJCAL7/80rStvr4ep06dQn19vc3bQdQTvJIgAvDaa69BpVKZnpOYMmUK3nzzTaxYsQLjx4/H8OHD\nERsbi08//RQA0KdPH2zevBmpqanYtGkTwsLCEB4ebupfeOSRR/Duu+9izZo1KCoqgoeHB8aNG4cJ\nEyYAAARBsFdTibpEUHLRocbGRjz33HNoamqCwWBAZGQkXn/9dRQXF2PZsmW4c+cORo8ejQ0bNsDN\njXlFzi0hIQHz5s2DVqu1dylEVqPo7SZ3d3fs3LkTmZmZyMzMxOnTp3Hx4kW8//77SEpKwldffQVv\nb2/s3btXyTKIFPHdd9+hvLwcBoMBBw4cwOXLl/HUU0/Zuywiq1K8T6Jv374AWq4qmpubIQgCdDod\nIiMjAQBarbbN+HIiZ3Ht2jXExsZi/Pjx+PTTT/Hxxx/D19fX3mURWZXi93iMRiNmz56NoqIiPPfc\ncwgMDET//v1NDw/5+/u3GRZI5CwSEhKQkJBg7zKIFKX4lYRKpTLdasrLy8OVK1c6vIadeEREjslm\nQ2C9vLwwYcIEXLx4EdXV1aa5akpLS/HAAw90eryC/etERCRB0dtNlZWV6NOnD7y9vfHzzz8jNzcX\nCxcuxKRJk3Ds2DHMnDkTBw4cwPTp0zs9lyAI0OtrlCzXrjQab7bPSbly2wC2z9lpNN49Ol7RkNDr\n9Vi5ciWMRiOMRiNmzpyJqVOnIigoCMuWLUNaWhpGjRqFp59+WskyiIiomxR9TsLaXD3t2T7n5Mpt\nA9g+Z9fTKwlOy0FERJIYEkREJIkhQUREkhgSREQkiSFBRESSGBJERCSJIUFERJIYEkREJIkhQURE\nkhgSREQkiSFBRESSGBJERCSJIUFERJIYEkREJIkhQUREkhgSREQkiSFBRESSGBJERCSJIUFERJIY\nEkREJIkhQUREkhgSREQkiSFBRESSGBJERCSJIUFERJIYEkREJIkhQUREkhgSREQkyU3Jk5eWliI5\nORnl5eVQq9VISEjACy+8gC1btmD37t0YNGgQAGDp0qUICwtTshQiIuoGRUNCrVZj1apVGDVqFOrq\n6jB79mxMnjwZAJCUlISkpCQl356IiHpI0ZDQaDTQaDQAAE9PT4wcORK3bt0CAIiiqORbExGRFdis\nT6K4uBgFBQUYM2YMACAjIwOxsbFITU1FTU2NrcogIqIusElI1NXVYcmSJUhJSYGnpyeeffZZZGdn\n4+DBg/D19cW6detsUQYREXWRICp836e5uRmvvvoqwsLCkJiY2GF/SUkJFi1ahMOHDytZBhERdYOi\nfRIAkJKSguDg4DYBodfrTX0Vx48fR0hIiKxz6fWue1tKo/Fm+5yUK7cNYPucnUbj3aPjFQ2J8+fP\n4/DhwwgJCUFcXBwEQcDSpUtx5MgR5OfnQ6VSYejQoVizZo2SZRARUTcpGhLjxo1Dfn5+h+18JoKI\nyDnwiWsiIpLEkCAiIkkMCSIiksSQICIiSQwJIiKSxJAgIiJJDAkiIpLEkCAiIkkMCSIiksSQICIi\nSQwJIiKSxJAgIiJJDAkiIpLEkCAiIkkMCSIiksSQICIiSQwJIiKSxJAgIiJJspcv/fnnn6HX6+Hh\n4YEHHnhAyZqIiMhBWAwJo9GIzMxM7NmzBwUFBfDy8kJjYyPc3NwQERGBF198EQ8++KCtaiUiIhuz\nGBLz5s3DY489hlWrVmH06NFQq9UAgIqKCnzzzTdYvXo1nnnmGURFRdmkWCIisi1BFEVRamdlZSV8\nfHwsnkDOa6xFr6+xyfvYg0bjzfY5KVduG8D2OTuNxrtHx1vsuDb3x7+iogIXLlyw+BoiInINskY3\nPfvss6ipqUF1dTXi4uKQmpqK9evXK10bERHZmayQqK+vh7e3N06cOIHo6GgcPnwYZ86cUbo2IiKy\nM1kh0djYCADQ6XSYPHkyVCqVqRObiIhcl6yQmDhxIiIjI3Hu3DlMnDgR1dXVUKn4HB4RkauT9TDd\nO++8g4KCAgQGBsLd3R21tbVYu3at0rUREZGdWQyJwsJC09d9+vRBaWmp6Xt3d3flqiIiIodgMSQW\nLlwIQRAgiiJu3rwJLy8vAEBtbS0GDx6Mr7/+2uLJS0tLkZycjPLycqjVasTHx2P+/Pm4c+cOli5d\nipKSEgQEBOCjjz6Ct3fPxvISEZH1WQyJ1hBYu3Ytxo0bh9/85jcAgGPHjuHSpUudnlytVmPVqlUY\nNWoU6urqMHv2bEyZMgX79+9HaGgoXnnlFWzduhXp6elYvny5FZpDRETWJKv3OS8vzxQQAPDrX/8a\n3377bafHaTQajBo1CgDg6emJkSNHoqysDDk5OdBqtQAArVaL7Ozs7tROREQKkxUSDQ0NOHfunOn7\nc+fOoaGhoUtvVFxcjIKCAowdOxYVFRXw9fUF0BIkt2/f7tK5iIjINmSPblq2bBn69u0LALh79y42\nbdok+03q6uqwZMkSpKSkwNPTE4IgdKvYns5B4ujYPuflym0D2D65Tv+jGHty/hdFZTUY5ueN+On/\ngrDHA6xybnuRFRLjx49HdnY2rl27BlEUERQUJHt0U3NzM5YsWYLY2FhEREQAAAYNGoTy8nL4+vpC\nr9fLnv/J1SfhYvuckyu3DWD75NJdKkP6oe9N31+/WY2Nu86juvpnTHrYr8fn7y5FJ/i7l8FggLu7\nO9zc3FBUVNRmeKwlKSkpCA4ORmJiomlbeHg49u/fDwA4cOAApk+f3sWyiYgcS1budYntP9q0DmuT\ndSWRkZGB999/HwMHDjTdKhIEATk5ORaPO3/+PA4fPoyQkBDExcVBEAQsXboUr7zyCt58803s27cP\nQ4YMQVpaWs9bQkRkRz+V15vdfrOizsaVWJeskNixYweOHDmCoUOHdunk48aNQ35+vtl9n376aZfO\nRUTkyIb49kOxvmMgDB7kaYdqrEfW7SaNRtPlgCAi6k2iQkdIbB9u20KsTNaVxOTJk7FhwwZERUXB\nw8PDtD04OFixwoiIHInuUhmycq/jp/J6DPHth6jQEW06pFu/zsr9ETcr6jB4kCeiQofbtdPaGmSF\nRGZmJoCWJ61byemTICJyBe1HLhXr60zftw8KZw+F9mSFRGdzNBERuTJLI5dcLRTakxUSQMuMsDqd\nDgDwxBNPYOTIkYoVRUTUXZ3dFuoOVx25JIesjuvMzEy8+OKLyM/PR35+PpKSknDo0CGlayMi6pLW\n20LF+joYRdF0W0h3qaxH5x3i28/sdmcfuSSH7CGwBw4cgEajAQDo9XosWLAAMTExihZHRNQVSt0W\nigod0aZP4v+3O/fIJTlk325qDYj2XxMROQqlbgu56sglOWSFxLBhw/Dxxx9j7ty5EAQBu3fvRmBg\noNK1ERF1iZIPtLniyCU5ZPVJ/P73v8e1a9cQExODmJgYXL16FWvWrFG6NiKiLnHVB9rsSdaVxKBB\ng/Dhhx8qXQsRUY/05ttCSpEVElu3bkVCQgIGDhwIALh9+zb27duHl19+WdHiiIi6qrfeFlKKrNtN\nWVlZpoAAgPvvvx9HjhxRrCgiInIMskJCFMUO2wwGg9WLISIixyIrJEaMGIH/+q//giiKMBqN2LFj\nB4YNG6Z0bUREZGeyQiI1NRUnTpzAmDFj8Nhjj+HUqVNYvXq10rUREZGdyeq49vPzw86dO1Ff3/Kg\nSr9+5h9RJyIi1yK7T2LPnj345JNP0K9fPxQXF+O///u/la6NiIjsTFZIrFu3Dt9++y2ys7MBAJ6e\nnnjvvfcULYyIiOxP1u0mnU6HzMxMaLVaAC1DYO/evatoYUREligxJTh1JCskPDw8IAiC6Xuj0ahY\nQUREnZG7Uhz1nKzbTSEhITh06BBEUURxcTH+/d//HePGjVO6NiIisyxNCU7WJSskVq5cibNnz0Kv\n1yM+Ph4GgwErVqxQujYiIrN680pxtibrdpOXlxfWrl2rdC1ERLIoOSU4tSXrSuLo0aOora0FAKSl\npWHBggX45z//qWhhRES6S2VYvV2Hl9efwOrtOtMypJwS3HZkhcSf/vQneHl5IS8vD2fOnEFcXByv\nLIhIUZbWq570sB9ejRmNAI0X1CoBARovvBozmp3WCpB1u8nNreVlf/vb3xAfH4/o6Gjs2LFD0cKI\nqHfrbL1qTgluG7KuJARBwKFDh5CVlYXQ0FAAQFNTk6KFEVHvxs5pxyArJN5++20cO3YM8fHxCAwM\nxPXr1zFp0qROj0tJScHkyZMRHR1t2rZlyxaEhYVBq9VCq9Xi9OnT3a+eiFzWEF/zc8Sxc9q2BNHc\nYhFWcu7cOXh6eiI5ORmHDx8G0BISnp6eSEpK6vL59Poaa5foMDQab7bPSbly2wD7ta/9A3OtrN33\n0Bs+v56w2Cfxl7/8BYmJidiwYYPZ/cnJyRZPPn78eJSUlHTYrmAuEZGD6uo0Glyv2jFYDAkPDw8A\n1p8aPCMjAwcPHsQjjzyClStXwtu7Z0lHRI6tu9NosHPa/hS93QQAJSUlWLRokel2U2VlJe6//34I\ngoAPP/wQer2eM8oSubjF75/A9ZvVHbaPGNwfm5f/0g4VkVwWryQyMjIsHvzcc891+Q19fHxMXyck\nJGDRokWyj3X1+4Zsn3Ny5bYB1mlfUan542+U1dj9Z9cbPr+esBgS1niquv2Fil6vh0ajAQAcP34c\nISEhPX4PInJsnEbDeVkMiXXr1vXo5G+99RZ0Oh2qqqowbdo0LF68GDqdDvn5+VCpVBg6dCjWrFnT\no/cgIscXFTrC7EglTqPh+CyGxKlTpywePHXqVIv7N23a1GHbnDlzZJRFRK6EI5Wcl8WQ+POf/yy5\nTxCETkOCiKgVRyo5J4sh8dlnn9mqDiIickAWQ+LGjRsIDAxEYWGh2f3BwcGKFEVERI7BYkisXbsW\n6enpWLhwYYd9giAgJydHscKIiMj+LIZEeno6AODrr7+2STFERORYZK0nAQANDQ0oLS2FwWAwbePt\nJiLH1NV5koikyAqJnTt34sMPP8SAAQOgUrXMLs7bTUSOqbvzJBGZIysk/vKXv+DYsWPw8+MvGJGj\n62xFN6KukLXokL+/PwOCyElIrehWrK/F6u066C6V2bgicmayriQWL16M1NRUTJ061TR9OND5E9dE\nJM3a/Qat5zNamNiZt56oq2SFxIkTJ3DixAlcv369TZ8EQ4Koe6zdbyC1ipsU3noiuWSFxPHjx/H1\n11/jvvvuU7oeol6hJ/0G5q5ApM4n5WZFxxlZicyRFRKBgYFwc5M9WpaIOiHVb9DZH2+pKxBB6Nr7\nc4pukkvWX/7hw4cjMTERERERcHd3N23vzqJDRNT5+gpS/RVSVwxuKhWaDMYO2328PVBZc7fDdk7R\nTXLJCommpiYMGzYMly9fVroeol7B0voKlvorpK5Amo0dAwIA4n/Z8sArp+im7pIVEj1dfIiI2rK0\nvsLq7Tqzx6Qf+h591AKMho77hvp6ISp0uGQYMBSouzpdvvSRRx6R3N/Y2IgbN25g5MiRVi+MyNVJ\nra8gdbUAAE0G88NbWwOBYUDW1ukEfw0NDZg1axbGjh0LX19f3L17F9euXcM333yDU6dOYeXKlQwJ\nIiuS6q+4Vx+1CkZR5O0jUpzFkNi8eTPy8vLw17/+Ff/xH/+B0tJS9O3bFyEhIYiIiEBGRga8vLxs\nVStRryDVX3EvoyhiW/IvbVQR9Wad9kmMGTMGY8aMsUUtRISW21B7ThSaHZXUikNYyVZkzd1ERLbV\nOipJCoewkq3wCTkiO+hs3qZJD/uhsOQOcs4Xdzh2+rgA9kGQzTAkiGxM7rxNz/0qBMFDB/AZB7Ir\nhgSRjXVl3iYOayV7k9UnUVFRgeXLl5um4SgoKMAXX3yhaGFErqq78zYR2YOskHj77bcxbtw4VFdX\nAwCCgoLw+eefK1oYkasa4tvP7HaOWCJHJCskysrKMG/ePKjVagCAu7u7aV0JIuqaqNAREts5Yokc\nj6w+ifbThFdXV0O0sPoVUW/TlVXmLM3bRORoZIXEjBkzsHr1atTV1WH//v34/PPPMWfOnE6PS0lJ\nwcmTJzFo0CAcPnwYAHDnzh0sXboUJSUlCAgIwEcffQRvb++etYLIjk7/o7jLq8yxQ5qchax7Ri+/\n/DLGjx+P0aNH49SpU3jhhReQmJjY6XGzZ8/G9u3b22zbunUrQkND8dVXX2HSpElIT0/vXuVEDmJP\nzv+a3Z6V+6ONKyGyPtlDYGNiYhATE9Olk48fPx4lJSVttuXk5GDXrl0AAK1WixdeeAHLly/v0nmJ\nHElRWY3Z7RytRK5AVkhUVFRg165dKCoqQnNzs2l7Wlpal9+wsrISvr6+AACNRoPbt293+RxE1tCV\nfgRLrx3m543rN6s7HMPRSuQKZIXEv/7rv+Lhhx9GaGioaYSTPWg0rt13wfbZjlQ/wtbD32O4f3/E\nT/8XhD0eYPG1/fvfh7DHAxA//V+wcdf5Du8xL/Ihh2pzT7hKO6S4evt6QlZINDQ04J133rHKGw4a\nNAjl5eXw9fWFXq+Hj4+P7GP1evOX9a5Ao/Fm+2zoi68KzG4XReD6zWps3HUe2w/+E/G/DJZ8QvqL\nr37AqIABCHs8ANXVP3cYrTQqYIBDtbm7HO2zs7be0L6ekBUSY8eOxQ8//ICHHnqoy2/QfqhseHg4\n9u/fj4ULF+LAgQOYPn16l89J1FOWVn9rVVlzF+mHvocgmN9/b58DRyuRq5IVEs888wyef/55+Pv7\nw8PDw7R97969Fo976623oNPpUFVVhWnTpmHx4sVYuHAh3njjDezbtw9DhgzpVr8GUU/JWf2tlZtK\nhSaDscN29jlQbyArJFasWIFFixbh4Ycf7lKfxKZNm8xu//TTT2Wfg8iaWjugS8rljzxqNnYMCIBP\nSFPvICskPDw8sGDBAqVrIeoWuaOU2k/RLddQXy9EhQ7nE9LUK8kKiaeeegqnT59GWFiY0vUQdYnc\ntRkA6Sm6AzQtIbDnZCEqqzsuGdoaCAwF6o1khcTu3buxdetWeHp6wt3dHaIoQhAE5ObmKl0fkUWW\n1mZo3d96hSF1i+lmRZ0pBFquSnjFQNRKVkjs27dP6TqIukVqlFJJeW2HKwwp93ZA84qBqC1ZITF0\n6FCl6yDqFqlRSlIjksxhBzSRNIshsWLFCmzcuBFz5syBYGaweGdDYImUcG9H9UAvd7OvkRqRJAgt\nHdG8nUQkj8WQaJ3p9Xe/+51NiiEyp30oVNb8f+dy69c+3h64U9do+sOflXvd7BXGUF8vrFkw0UaV\nEzk/iyHx+eef47333sPEifxHRfbRfvTSvQFxr3739cH7v53SZpu54a68tUTUNRZDIj8/31Z1EJm1\n50ShrNe1n5abq78RWYfs9SSIlNT+gbiHht2PH4puS145tGduigyOVCLqOYshcfnyZYSGhnbYzuck\nyJrMPRAnd16lVryNRKQMiyExYsQIbN261Va1UC8l9UCcHH3UKrwUNYpXDEQKsRgS7u7ufEaCFCdn\n2m4pDAgiZaks7ezTp4+t6qBebIhvP9mv7aNWQSW0zLf0asxoBgSRwixeSezevdtWdVAvFhU6Qtbs\nrAwFItvj6Cayu9Y//FKzsPr090D8tGAGBJEdMCTI6syt7zBrquV1djkLK5FjEsT2i1A7MFdfrNzZ\n2mcuDADzTzq3zJnkKbkgkDNzxs+uK9g+56bRWP4PWmd4JUHdIrXYj4+3h9nXi6LlBYGIyDFZHN1E\nJEXq2QY5T0i3LghERI6PIUHd0pNnG9rPs0REjou3m0iW9v0P/e5zQ21DU7fOZW6eJSJyTAwJ6pS5\n/oee4DxLRM6DIUGd6sncSq1UAjDE14tDWomcDEOCOtWd/of2K8XNmhrs0sMMiVwVQ4I6NcS3n+xb\nTHw6msi1MCSoU3LmVgrQ8FYSkStiSFCnTHMrnSg0+xwEJ94jcl12C4nw8HB4eXlBpVLBzc0Ne/fu\ntVcpJAPnViLqnewWEoIg4LPPPsOAAQPsVUKvZG6+pa78kee60US9i91CQhRFGI1Ge719ryQ13xLA\nuZSIyDy7XkksWLAAgiBg7ty5SEhIsFcpLkfqakHqeYes3B8ZEkRklt1C4ssvv4RGo0FlZSWSkpIQ\nFBSE8ePH26sclyF1tVBYckfyeQfOpUREUhxiPYktW7bA09MTSUlJ9i7F6S1+/wSu36w2u893YF+U\nVzV02D5icH9sXv5LpUsjIidklyuJhoYGGI1GeHp6or6+HmfOnMHrr7/e6XGu/MSutRY+KSqVPodU\nH1DkhEDFf7auvLCLK7cNYPucnVMuOlReXo7XX38dgiDAYDAgOjoaTz75pD1KcTmWno6+U9uIV2NG\ncwgrEclml5AIDAzEwYMH7fHWLs/S09GDB3lyCCsRdQkXHXIxkx72w/RxAWb3cYpuIuoqTsvhgp77\nVQiChw7gbSUi6jGGhIvibSUisgbebiIiIkkMCSIiksSQICIiSeyTsKGezsBKRGRrDAkbyTh+GTnn\ni03fcwZWInIGDAmF6S6VSa7oBnAGViJybAwJBbWfkdUczsBKRI6MIWEl5vobpNZvuNfgQZ5Kl0ZE\n1G0MCSuQWsNBEDo/llNlEJEj4xBYK5C6YnBTWf7xTh8XwP4IInJovJKwAqkV35ol1m/w8fZA/C+D\nGRBE5PAYElYgtYbDUF8vRIUO50R7ROS0GBKdkPMAnNQaDq2BwFAgImfFkLBAqkMaaPsAXOvXvGIg\nIlfDkGjn3isHtUS/s7kH4HjFQESuiCFxj/ZXDkaD+dfxATgi6i04BPYech5+A/gAHBH1HgyJe0gN\nZW2PD8ARUW/B2033kBrK2ketglEU2SFNRL0OQ+IeUkNZX4oaxWAgol6JIXEPDmUlImqLIdEOh7IS\nEf0/dlwTEZEkp76S4JrRRETKctqQkDtlBhERdZ/dbjedPn0av/71rxEZGYmtW7d2+XipB9+ycn/s\nWWFERGRil5AwGo149913sX37dhw5cgRZWVm4cuVKl84h9eAbp8wgIrIeu4REXl4ehg8fjqFDh6JP\nnz6IiopCTk6OxWNiVxzC6u066C6VAWh58M0cTplBRGQ9dgmJsrIyDB482PS9n58fbt26ZfEYo1E0\n9TvoLpUhKnSE2ddxygwiIuuxS0iIotij41un6n41ZjQCNF5QqwQEaLzwasxodloTEVmRXUY3+fv7\n46effjJ9X1ZWhgceeED28Tcr6qDReGPWVG/MmhqsRIl2odF427sERbly+1y5bQDb15vZJSQeffRR\nFBUVoaSkBBqNBllZWfjggw8sHnN4U6yNqiMiolZ2CQm1Wo1/+7d/w0svvQRRFPH0009j5MiR9iiF\niIgsEMSedhAQEZHL4txNREQkiSFBRESSGBJERCTJ4UOip3M8OaLw8HDExMQgLi4OTz/9NADgzp07\neOmllxAZGYkFCxagpqbGzlXKl5KSgsmTJyM6Otq0zVJ71q5dixkzZiA2Nhb5+fn2KLlLzLVvy5Yt\nCAsLg1arhVarxenTp0370tPTMWPGDPzmN7/BmTNn7FGybKWlpZg/fz5mzpyJ6Oho7Ny5E4DrfH7t\n2/fZZ58BcJ3Pr7GxEfHx8YiLi0N0dDS2bNkCACguLkZCQgIiIyOxbNkyNDc3m16/dOlSzJgxA3Pn\nzm3zKIIk0YEZDAYxIiJCLC4uFhsbG8WYmBixsLDQ3mX1WHh4uFhVVdVm24YNG8StW7eKoiiK6enp\n4saNG+1RWrd899134qVLl8RZs2aZtkm15+TJk+Irr7wiiqIoXrhwQYyPj7d9wV1krn2bN28Wd+zY\n0eG1hYWFYmxsrNjU1CTeuHFDjIiIEI1Goy3L7ZJbt26Jly5dEkVRFGtra8UZM2aIhYWFLvP5SbXP\nVT4/URTF+vp6URRFsbm5WYyPjxcvXLggvvHGG+LRo0dFURTF1atXi1988YUoiqKYkZEhvvPOO6Io\nimJWVpb45ptvdnp+h76S6M4cT85AFEUYjcY223JycqDVagEAWq0W2dnZ9iitW8aPH4/+/fu32da+\nPa2fW04mDIcbAAAJLUlEQVRODuLi4gAAY8eORU1NDcrLy21bcBeZax9gfuaAnJwczJw5E25ubggI\nCMDw4cORl5dnizK7RaPRYNSoUQAAT09PjBw5EmVlZS7z+ZlrX+sUQK7w+QFA3759AbRcJTQ3N0MQ\nBOh0OkRGRgJo+/fk3s81MjISubm5nZ7foUOiO3M8OQNBELBgwQLMmTMHe/bsAQBUVFTA19cXQMsv\n9u3bt+1ZYo9VVla2aU9lZSUA4NatW/D39ze9zs/PD2VlZXapsacyMjIQGxuL1NRU0+0Yc7+zztK+\n4uJiFBQUYOzYsR1+H13h82tt35gxYwC4zudnNBoRFxeHKVOmYMqUKQgMDET//v2hUrX8eff39ze1\n4d7PT61Wo3///qiqqrJ4focOCXNJ7wq+/PJL7N+/H9u2bUNGRgbOnTsHQRDsXZZNmPtMnbHtzz77\nLLKzs3Hw4EH4+vrij3/8IwDnbV9dXR2WLFmClJQUeHp6StbsKu1zpc9PpVIhMzMTp0+fRl5entll\nF1rb0L59oih22j6HDomezvHkqDQaDQDAx8cHERERyMvLw6BBg0yX7Xq9Hj4+PvYsscek2uPn54fS\n0lLT60pLS53yM/Xx8TH940pISDDdkvD398fNmzdNr3OG9jU3N2PJkiWIjY1FREQEANf6/My1z5U+\nv1ZeXl6YMGECLl68iOrqatMt7XvbcO/nZzAYUFtbiwEDBlg8r0OHxL1zPDU2NiIrKwvTp0+3d1k9\n0tDQgLq6loWR6uvrcebMGYSEhCA8PBz79+8HABw4cMDp2tn+fyhS7Zk+fToyMzMBABcuXED//v1N\ntzUcWfv26fV609fHjx9HSEgIgJZ2Hz16FI2Njbhx4waKiopMtzccVUpKCoKDg5GYmGja5kqfn7n2\nucrnV1lZabpV9vPPPyM3NxfBwcGYNGkSjh07BqDt5xceHo4DBw4AAI4dO4Ynnnii0/dw+Gk5Tp8+\njT/84Q+mOZ4WLlxo75J65MaNG3j99dchCAIMBgOio6OxcOFCVFVV4c0338TNmzcxZMgQpKWlme0s\ndURvvfUWdDodqqqq4Ovri8WLFyMiIgJvvPGG2fasWbMG33zzDfr27Yt169Zh9OjRdm6BZebap9Pp\nkJ+fD5VKhaFDh2LNmjWmP5bp6enYu3cv3NzckJqaiieffNLOLZB2/vx5PP/88wgJCYEgCBAEAUuX\nLsWYMWMkfx+d6fOTat+RI0dc4vP74YcfsHLlShiNRhiNRsycOROvvfYabty4gWXLlqG6uhqjRo3C\nxo0b0adPHzQ2NmLFihXIz8/HwIED8cEHHyAgIMDiezh8SBARkf049O0mIiKyL4YEERFJYkgQEZEk\nhgQREUliSBARkSSGBBERSWJIkMNrbm5GWloaIiMjER0djaioKKxfvx4Gg8HicatWrUJGRgaAlqmh\nN2zY0Ol7ZWdn43/+53+sUrcSSkpKsHv3bnuXQb0IQ4Ic3sqVK3HlyhVkZmbi8OHDOHToEIKCgtDY\n2Gj198rJyXHoWT+Li4vx17/+tVvHdhaqROa42bsAIkt+/PFH5OTkmJ7wBVpmr4yPjwfQMgPmxo0b\nTYvDPPnkk0hOTrY4adnly5fx+9//Hg0NDWhsbERCQgLmz5+PM2fO4Ouvv0Zubi727t2LF198EcXF\nxTh+/DgEQUBjYyOuXr2K7777Dl5eXm3O+Y9//AMbN25EXV0dBEFAcnIyJk+ejLy8PLz33ntoaGhA\n3759kZqaikcffRRnz57F+vXrsW/fPgBo8/3Zs2fx3nvvYcyYMbhw4QJUKhU++OADBAUF4d1330VJ\nSQm0Wi2GDRuGtLQ0XL16FevWrUNVVRWampowf/58zJ49GwDwi1/8AitWrMDJkycxYcIELFmyxOqf\nEbk4q6x6QaSQo0ePinFxcZL7P//8czEpKUlsbm4Wm5qaxMTERNMCKytXrhR37dolimLLIkHr168X\nRVEU6+rqxMbGRtPXM2fOFK9cudLhmPZWrFgh/vGPf+ywvaqqSpwyZYp44cIFURRF0Wg0itXV1WJj\nY6M4bdo0MTc3VxRFUfz73/8uTps2TWxqahJ1Op04Z84c0znu/V6n04mjR48W8/PzRVEUxT/96U/i\n8uXLO7xOFFsWmtFqteLVq1dFUWxZWCcyMtL0/UMPPST++c9/lvz5EXWGVxLk0MROZo3Jzc2FVquF\nWq0GAMyePRvZ2dl45plnJI9paGjAO++8g4KCAqhUKuj1ehQUFCAoKEjymI8++ggNDQ343e9+12Hf\nhQsXEBwcjLFjxwJomZbZ29sbly9fhru7u2kStdDQULi7u+PatWudtvvBBx/EL37xCwAti/ucPHnS\n7OuuX7+Oq1evYtmyZaafVVNTE65cuYIHH3wQAEyLBBF1B0OCHNro0aNx/fp11NTUwNvbu8N+0cx8\n+J3Nj//BBx9Ao9Fgw4YNpgWgLPVv7Nu3D99++61p/WdzNcjd3lqvWq1uszrh3bt327zOw8PD9LVa\nrTatUWzufD4+PqaZPdsTBAH9+vUzu49IDnZck0MbPnw4wsPDsXr1atMU6waDATt37kRDQwMmT56M\nAwcOoLm5GU1NTcjMzMSUKVMsnrOmpgaDBw+GIAi4fPkyzp07Z9rn6emJ2tpa0/d///vfsW3bNnzy\nySdwd3c3e77HH38chYWFuHjxIoCWfpLq6moEBQWhqakJZ8+eBQB8++23aG5uxogRIxAQEIDi4mLU\n1NRAFEVkZWXJ+nl4eXmZpoYGWq447rvvPhw8eNC07erVq6afVWdXYkSd4ZUEObz169dj8+bNmD17\nNtzd3SGKIsLCwuDu7o65c+eiqKjItG7vU089ZerUlvLaa68hOTkZhw4dwrBhwzBhwgTTvtjYWKxa\ntQrHjh3Diy++iH379qGhoQELFiwwXQVkZGS0+d/5gAEDsGXLFqxbtw719fVQq9VITk5GaGgoPv74\nY6xdu9bUcb1582a4ubnBz88PSUlJ0Gq1CAwMxKOPPorCwsJOfxYPPfQQHnzwQURHRyMoKAhpaWn4\nz//8T/zhD3/Ajh07YDAY4Ovri48++giA46+qRo6PU4UTEZEk3m4iIiJJDAkiIpLEkCAiIkkMCSIi\nksSQICIiSQwJIiKSxJAgIiJJDAkiIpL0f3zF2/hGE4QYAAAAAElFTkSuQmCC\n",
+ "text/plain": [
+ "\u003cmatplotlib.figure.Figure at 0x7fc3af690a50\u003e"
+ ]
+ },
+ "metadata": {
+ "tags": []
+ },
+ "output_type": "display_data"
+ }
+ ],
+ "source": [
+ "with context.eager_mode():\n",
+ "\n",
+ " counts = []\n",
+ " times = [] \n",
+ " for n in np.logspace(0, 7, 50):\n",
+ "\n",
+ " n_tensor = tf.constant(n, dtype=tf.float32)\n",
+ " count = collatz(n_tensor)\n",
+ "\n",
+ " res = %timeit -n10 -r1 -o -q collatz(n_tensor)\n",
+ " times.append(res.best)\n",
+ " counts.append(count)\n",
+ " \n",
+ "plot_results(counts, times, 'Eager')\n"
+ ]
+ }
+ ],
+ "metadata": {
+ "colab": {
+ "collapsed_sections": [
+ "x5ChBlH09jk_",
+ "_cRFTcwT9mnn"
+ ],
+ "default_view": {},
+ "last_runtime": {
+ "build_target": "",
+ "kind": "local"
+ },
+ "name": "Autograph vs. Eager Collatz speed test",
+ "provenance": [
+ {
+ "file_id": "0B8bm7KvwJklpMUQtbnVpYkdJUjRtOTRyWVVfSEhpRl9HYm5n",
+ "timestamp": 1531512047714
+ }
+ ],
+ "version": "0.3.2",
+ "views": {}
+ },
+ "kernelspec": {
+ "display_name": "Python 3",
+ "language": "python",
+ "name": "python3"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 0
+}
diff --git a/tensorflow/contrib/autograph/examples/notebooks/ag_vs_eager_mnist_speed_test.ipynb b/tensorflow/contrib/autograph/examples/notebooks/ag_vs_eager_mnist_speed_test.ipynb
new file mode 100644
index 0000000000..952ec091fb
--- /dev/null
+++ b/tensorflow/contrib/autograph/examples/notebooks/ag_vs_eager_mnist_speed_test.ipynb
@@ -0,0 +1,652 @@
+{
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "colab_type": "text",
+ "id": "etTmZVFN8fYO"
+ },
+ "source": [
+ "This notebook runs a basic speed test for a short training loop of a neural network training on the MNIST dataset."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "colab_type": "text",
+ "id": "eqOvRhOz8SWs"
+ },
+ "source": [
+ "### Imports"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 0,
+ "metadata": {
+ "colab": {
+ "autoexec": {
+ "startup": false,
+ "wait_interval": 0
+ }
+ },
+ "colab_type": "code",
+ "id": "nHY0tntRizGb"
+ },
+ "outputs": [],
+ "source": [
+ "!pip install -U -q tf-nightly"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 0,
+ "metadata": {
+ "colab": {
+ "autoexec": {
+ "startup": false,
+ "wait_interval": 0
+ }
+ },
+ "colab_type": "code",
+ "id": "Pa2qpEmoVOGe"
+ },
+ "outputs": [],
+ "source": [
+ "import gzip\n",
+ "import os\n",
+ "import shutil\n",
+ "import time\n",
+ "\n",
+ "import numpy as np\n",
+ "import six\n",
+ "from six.moves import urllib\n",
+ "import tensorflow as tf\n",
+ "\n",
+ "from tensorflow.contrib import autograph as ag\n",
+ "from tensorflow.contrib.eager.python import tfe\n",
+ "from tensorflow.python.eager import context\n"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "colab_type": "text",
+ "id": "PZWxEJFM9A7b"
+ },
+ "source": [
+ "### Testing boilerplate"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 0,
+ "metadata": {
+ "colab": {
+ "autoexec": {
+ "startup": false,
+ "wait_interval": 0
+ }
+ },
+ "colab_type": "code",
+ "id": "kfZk9EFZ5TeQ"
+ },
+ "outputs": [],
+ "source": [
+ "# Test-only parameters. Test checks successful completion not correctness. \n",
+ "burn_ins = 1\n",
+ "trials = 1\n",
+ "max_steps = 2\n"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "colab_type": "text",
+ "id": "k0GKbZBJ9Gt9"
+ },
+ "source": [
+ "### Speed test configuration"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 0,
+ "metadata": {
+ "colab": {
+ "autoexec": {
+ "startup": false,
+ "wait_interval": 0
+ }
+ },
+ "colab_type": "code",
+ "id": "gWXV8WHn43iZ"
+ },
+ "outputs": [],
+ "source": [
+ "#@test {\"skip\": true} \n",
+ "burn_ins = 3\n",
+ "trials = 10\n",
+ "max_steps = 500\n"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "colab_type": "text",
+ "id": "kZV_3pGy8033"
+ },
+ "source": [
+ "### Data source setup"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 0,
+ "metadata": {
+ "colab": {
+ "autoexec": {
+ "startup": false,
+ "wait_interval": 0
+ }
+ },
+ "colab_type": "code",
+ "id": "YfnHJbBOBKae"
+ },
+ "outputs": [],
+ "source": [
+ "def download(directory, filename):\n",
+ " filepath = os.path.join(directory, filename)\n",
+ " if tf.gfile.Exists(filepath):\n",
+ " return filepath\n",
+ " if not tf.gfile.Exists(directory):\n",
+ " tf.gfile.MakeDirs(directory)\n",
+ " url = 'https://storage.googleapis.com/cvdf-datasets/mnist/' + filename + '.gz'\n",
+ " zipped_filepath = filepath + '.gz'\n",
+ " print('Downloading %s to %s' % (url, zipped_filepath))\n",
+ " urllib.request.urlretrieve(url, zipped_filepath)\n",
+ " with gzip.open(zipped_filepath, 'rb') as f_in, open(filepath, 'wb') as f_out:\n",
+ " shutil.copyfileobj(f_in, f_out)\n",
+ " os.remove(zipped_filepath)\n",
+ " return filepath\n",
+ "\n",
+ "\n",
+ "def dataset(directory, images_file, labels_file):\n",
+ " images_file = download(directory, images_file)\n",
+ " labels_file = download(directory, labels_file)\n",
+ "\n",
+ " def decode_image(image):\n",
+ " # Normalize from [0, 255] to [0.0, 1.0]\n",
+ " image = tf.decode_raw(image, tf.uint8)\n",
+ " image = tf.cast(image, tf.float32)\n",
+ " image = tf.reshape(image, [784])\n",
+ " return image / 255.0\n",
+ "\n",
+ " def decode_label(label):\n",
+ " label = tf.decode_raw(label, tf.uint8)\n",
+ " label = tf.reshape(label, [])\n",
+ " return tf.to_int32(label)\n",
+ "\n",
+ " images = tf.data.FixedLengthRecordDataset(\n",
+ " images_file, 28 * 28, header_bytes=16).map(decode_image)\n",
+ " labels = tf.data.FixedLengthRecordDataset(\n",
+ " labels_file, 1, header_bytes=8).map(decode_label)\n",
+ " return tf.data.Dataset.zip((images, labels))\n",
+ "\n",
+ "\n",
+ "def mnist_train(directory):\n",
+ " return dataset(directory, 'train-images-idx3-ubyte',\n",
+ " 'train-labels-idx1-ubyte')\n",
+ "\n",
+ "def mnist_test(directory):\n",
+ " return dataset(directory, 't10k-images-idx3-ubyte', 't10k-labels-idx1-ubyte')\n",
+ "\n",
+ "def setup_mnist_data(is_training, hp, batch_size):\n",
+ " if is_training:\n",
+ " ds = mnist_train('/tmp/autograph_mnist_data')\n",
+ " ds = ds.cache()\n",
+ " ds = ds.shuffle(batch_size * 10)\n",
+ " else:\n",
+ " ds = mnist_test('/tmp/autograph_mnist_data')\n",
+ " ds = ds.cache()\n",
+ " ds = ds.repeat()\n",
+ " ds = ds.batch(batch_size)\n",
+ " return ds\n"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "colab_type": "text",
+ "id": "qzkZyZcS9THu"
+ },
+ "source": [
+ "### Keras model definition"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 0,
+ "metadata": {
+ "colab": {
+ "autoexec": {
+ "startup": false,
+ "wait_interval": 0
+ }
+ },
+ "colab_type": "code",
+ "id": "x_MU13boiok2"
+ },
+ "outputs": [],
+ "source": [
+ "def mlp_model(input_shape):\n",
+ " model = tf.keras.Sequential((\n",
+ " tf.keras.layers.Dense(100, activation='relu', input_shape=input_shape),\n",
+ " tf.keras.layers.Dense(100, activation='relu'),\n",
+ " tf.keras.layers.Dense(10, activation='softmax')))\n",
+ " model.build()\n",
+ " return model\n"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "colab_type": "text",
+ "id": "DXt4GoTxtvn2"
+ },
+ "source": [
+ "# AutoGraph"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 0,
+ "metadata": {
+ "colab": {
+ "autoexec": {
+ "startup": false,
+ "wait_interval": 0
+ }
+ },
+ "colab_type": "code",
+ "id": "W51sfbONiz_5"
+ },
+ "outputs": [],
+ "source": [
+ "def predict(m, x, y):\n",
+ " y_p = m(x)\n",
+ " losses = tf.keras.losses.categorical_crossentropy(y, y_p)\n",
+ " l = tf.reduce_mean(losses)\n",
+ " accuracies = tf.keras.metrics.categorical_accuracy(y, y_p)\n",
+ " accuracy = tf.reduce_mean(accuracies)\n",
+ " return l, accuracy\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 0,
+ "metadata": {
+ "colab": {
+ "autoexec": {
+ "startup": false,
+ "wait_interval": 0
+ }
+ },
+ "colab_type": "code",
+ "id": "CsAD0ajbi9iZ"
+ },
+ "outputs": [],
+ "source": [
+ "def fit(m, x, y, opt):\n",
+ " l, accuracy = predict(m, x, y)\n",
+ " opt.minimize(l)\n",
+ " return l, accuracy\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 0,
+ "metadata": {
+ "colab": {
+ "autoexec": {
+ "startup": false,
+ "wait_interval": 0
+ }
+ },
+ "colab_type": "code",
+ "id": "RVw57HdTjPzi"
+ },
+ "outputs": [],
+ "source": [
+ "def get_next_batch(ds):\n",
+ " itr = ds.make_one_shot_iterator()\n",
+ " image, label = itr.get_next()\n",
+ " x = tf.to_float(tf.reshape(image, (-1, 28 * 28)))\n",
+ " y = tf.one_hot(tf.squeeze(label), 10)\n",
+ " return x, y\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 0,
+ "metadata": {
+ "colab": {
+ "autoexec": {
+ "startup": false,
+ "wait_interval": 0
+ }
+ },
+ "colab_type": "code",
+ "id": "UUI0566FjZPx"
+ },
+ "outputs": [],
+ "source": [
+ "def train(train_ds, test_ds, hp):\n",
+ " m = mlp_model((28 * 28,))\n",
+ " opt = tf.train.MomentumOptimizer(hp.learning_rate, 0.9)\n",
+ "\n",
+ " train_losses = []\n",
+ " test_losses = []\n",
+ " train_accuracies = []\n",
+ " test_accuracies = []\n",
+ " ag.set_element_type(train_losses, tf.float32)\n",
+ " ag.set_element_type(test_losses, tf.float32)\n",
+ " ag.set_element_type(train_accuracies, tf.float32)\n",
+ " ag.set_element_type(test_accuracies, tf.float32)\n",
+ "\n",
+ " i = tf.constant(0)\n",
+ " while i \u003c hp.max_steps:\n",
+ " train_x, train_y = get_next_batch(train_ds)\n",
+ " test_x, test_y = get_next_batch(test_ds)\n",
+ " step_train_loss, step_train_accuracy = fit(m, train_x, train_y, opt)\n",
+ " step_test_loss, step_test_accuracy = predict(m, test_x, test_y)\n",
+ "\n",
+ " train_losses.append(step_train_loss)\n",
+ " test_losses.append(step_test_loss)\n",
+ " train_accuracies.append(step_train_accuracy)\n",
+ " test_accuracies.append(step_test_accuracy)\n",
+ "\n",
+ " i += 1\n",
+ " return (ag.stack(train_losses), ag.stack(test_losses),\n",
+ " ag.stack(train_accuracies), ag.stack(test_accuracies))\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 0,
+ "metadata": {
+ "colab": {
+ "autoexec": {
+ "startup": false,
+ "wait_interval": 0
+ },
+ "height": 215
+ },
+ "colab_type": "code",
+ "executionInfo": {
+ "elapsed": 12156,
+ "status": "ok",
+ "timestamp": 1531752050611,
+ "user": {
+ "displayName": "",
+ "photoUrl": "",
+ "userId": ""
+ },
+ "user_tz": 240
+ },
+ "id": "K1m8TwOKjdNd",
+ "outputId": "bd5746f2-bf91-44aa-9eff-38eb11ced33f"
+ },
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "('Duration:', 0.6226680278778076)\n",
+ "('Duration:', 0.6082069873809814)\n",
+ "('Duration:', 0.6223258972167969)\n",
+ "('Duration:', 0.6176440715789795)\n",
+ "('Duration:', 0.6309840679168701)\n",
+ "('Duration:', 0.6180410385131836)\n",
+ "('Duration:', 0.6219630241394043)\n",
+ "('Duration:', 0.6183009147644043)\n",
+ "('Duration:', 0.6176400184631348)\n",
+ "('Duration:', 0.6476900577545166)\n",
+ "('Mean duration:', 0.62254641056060789, '+/-', 0.0099792188690656976)\n"
+ ]
+ }
+ ],
+ "source": [
+ "#@test {\"timeout\": 90}\n",
+ "with tf.Graph().as_default():\n",
+ " hp = tf.contrib.training.HParams(\n",
+ " learning_rate=0.05,\n",
+ " max_steps=max_steps,\n",
+ " )\n",
+ " train_ds = setup_mnist_data(True, hp, 500)\n",
+ " test_ds = setup_mnist_data(False, hp, 100)\n",
+ " tf_train = ag.to_graph(train)\n",
+ " losses = tf_train(train_ds, test_ds, hp)\n",
+ "\n",
+ " with tf.Session() as sess:\n",
+ " durations = []\n",
+ " for t in range(burn_ins + trials):\n",
+ " sess.run(tf.global_variables_initializer())\n",
+ "\n",
+ " start = time.time()\n",
+ " (train_losses, test_losses, train_accuracies,\n",
+ " test_accuracies) = sess.run(losses)\n",
+ "\n",
+ " if t \u003c burn_ins:\n",
+ " continue\n",
+ "\n",
+ " duration = time.time() - start\n",
+ " durations.append(duration)\n",
+ " print('Duration:', duration)\n",
+ "\n",
+ " print('Mean duration:', np.mean(durations), '+/-', np.std(durations))\n"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "colab_type": "text",
+ "id": "A06kdgtZtlce"
+ },
+ "source": [
+ "# Eager"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 0,
+ "metadata": {
+ "colab": {
+ "autoexec": {
+ "startup": false,
+ "wait_interval": 0
+ }
+ },
+ "colab_type": "code",
+ "id": "hBKOKGrWty4e"
+ },
+ "outputs": [],
+ "source": [
+ "def predict(m, x, y):\n",
+ " y_p = m(x)\n",
+ " losses = tf.keras.losses.categorical_crossentropy(tf.cast(y, tf.float32), y_p)\n",
+ " l = tf.reduce_mean(losses)\n",
+ " accuracies = tf.keras.metrics.categorical_accuracy(y, y_p)\n",
+ " accuracy = tf.reduce_mean(accuracies)\n",
+ " return l, accuracy\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 0,
+ "metadata": {
+ "colab": {
+ "autoexec": {
+ "startup": false,
+ "wait_interval": 0
+ }
+ },
+ "colab_type": "code",
+ "id": "HCgTZ0MTt6vt"
+ },
+ "outputs": [],
+ "source": [
+ "def train(ds, hp):\n",
+ " m = mlp_model((28 * 28,))\n",
+ " opt = tf.train.MomentumOptimizer(hp.learning_rate, 0.9)\n",
+ "\n",
+ " train_losses = []\n",
+ " test_losses = []\n",
+ " train_accuracies = []\n",
+ " test_accuracies = []\n",
+ "\n",
+ " i = 0\n",
+ " train_test_itr = tfe.Iterator(ds)\n",
+ " for (train_x, train_y), (test_x, test_y) in train_test_itr:\n",
+ " train_x = tf.to_float(tf.reshape(train_x, (-1, 28 * 28)))\n",
+ " train_y = tf.one_hot(tf.squeeze(train_y), 10)\n",
+ " test_x = tf.to_float(tf.reshape(test_x, (-1, 28 * 28)))\n",
+ " test_y = tf.one_hot(tf.squeeze(test_y), 10)\n",
+ "\n",
+ " if i \u003e hp.max_steps:\n",
+ " break\n",
+ "\n",
+ " with tf.GradientTape() as tape:\n",
+ " step_train_loss, step_train_accuracy = predict(m, train_x, train_y)\n",
+ " grad = tape.gradient(step_train_loss, m.variables)\n",
+ " opt.apply_gradients(zip(grad, m.variables))\n",
+ " step_test_loss, step_test_accuracy = predict(m, test_x, test_y)\n",
+ "\n",
+ " train_losses.append(step_train_loss)\n",
+ " test_losses.append(step_test_loss)\n",
+ " train_accuracies.append(step_train_accuracy)\n",
+ " test_accuracies.append(step_test_accuracy)\n",
+ "\n",
+ " i += 1\n",
+ " return train_losses, test_losses, train_accuracies, test_accuracies\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 0,
+ "metadata": {
+ "colab": {
+ "autoexec": {
+ "startup": false,
+ "wait_interval": 0
+ },
+ "height": 215
+ },
+ "colab_type": "code",
+ "executionInfo": {
+ "elapsed": 52499,
+ "status": "ok",
+ "timestamp": 1531752103279,
+ "user": {
+ "displayName": "",
+ "photoUrl": "",
+ "userId": ""
+ },
+ "user_tz": 240
+ },
+ "id": "plv_yrn_t8Dy",
+ "outputId": "55d5ab3d-252d-48ba-8fb4-20ec3c3e6d00"
+ },
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "('Duration:', 3.9973549842834473)\n",
+ "('Duration:', 4.018772125244141)\n",
+ "('Duration:', 3.9740989208221436)\n",
+ "('Duration:', 3.9922947883605957)\n",
+ "('Duration:', 3.9795801639556885)\n",
+ "('Duration:', 3.966722011566162)\n",
+ "('Duration:', 3.986541986465454)\n",
+ "('Duration:', 3.992305040359497)\n",
+ "('Duration:', 4.012261867523193)\n",
+ "('Duration:', 4.004716157913208)\n",
+ "('Mean duration:', 3.9924648046493529, '+/-', 0.015681688635624851)\n"
+ ]
+ }
+ ],
+ "source": [
+ "#@test {\"timeout\": 90}\n",
+ "with context.eager_mode():\n",
+ " durations = []\n",
+ " for t in range(burn_ins + trials):\n",
+ " hp = tf.contrib.training.HParams(\n",
+ " learning_rate=0.05,\n",
+ " max_steps=max_steps,\n",
+ " )\n",
+ " train_ds = setup_mnist_data(True, hp, 500)\n",
+ " test_ds = setup_mnist_data(False, hp, 100)\n",
+ " ds = tf.data.Dataset.zip((train_ds, test_ds))\n",
+ " start = time.time()\n",
+ " (train_losses, test_losses, train_accuracies,\n",
+ " test_accuracies) = train(ds, hp)\n",
+ " \n",
+ " train_losses[-1].numpy()\n",
+ " test_losses[-1].numpy()\n",
+ " train_accuracies[-1].numpy()\n",
+ " test_accuracies[-1].numpy()\n",
+ "\n",
+ " if t \u003c burn_ins:\n",
+ " continue\n",
+ "\n",
+ " duration = time.time() - start\n",
+ " durations.append(duration)\n",
+ " print('Duration:', duration)\n",
+ "\n",
+ " print('Mean duration:', np.mean(durations), '+/-', np.std(durations))\n"
+ ]
+ }
+ ],
+ "metadata": {
+ "colab": {
+ "collapsed_sections": [
+ "eqOvRhOz8SWs",
+ "PZWxEJFM9A7b",
+ "kZV_3pGy8033"
+ ],
+ "default_view": {},
+ "name": "Autograph vs. Eager MNIST speed test",
+ "provenance": [
+ {
+ "file_id": "1tAQW5tHUgAc8M4-iwwJm6Xs6dV9nEqtD",
+ "timestamp": 1530297010607
+ },
+ {
+ "file_id": "18dCjshrmHiPTIe1CNsL8tnpdGkuXgpM9",
+ "timestamp": 1530289467317
+ },
+ {
+ "file_id": "1DcfimonWU11tmyivKBGVrbpAl3BIOaRG",
+ "timestamp": 1522272821237
+ },
+ {
+ "file_id": "1wCZUh73zTNs1jzzYjqoxMIdaBWCdKJ2K",
+ "timestamp": 1522238054357
+ },
+ {
+ "file_id": "1_HpC-RrmIv4lNaqeoslUeWaX8zH5IXaJ",
+ "timestamp": 1521743157199
+ },
+ {
+ "file_id": "1mjO2fQ2F9hxpAzw2mnrrUkcgfb7xSGW-",
+ "timestamp": 1520522344607
+ }
+ ],
+ "version": "0.3.2",
+ "views": {}
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 0
+}
diff --git a/tensorflow/contrib/autograph/examples/notebooks/dev_summit_2018_demo.ipynb b/tensorflow/contrib/autograph/examples/notebooks/dev_summit_2018_demo.ipynb
index 0702273fac..86e38c3490 100644
--- a/tensorflow/contrib/autograph/examples/notebooks/dev_summit_2018_demo.ipynb
+++ b/tensorflow/contrib/autograph/examples/notebooks/dev_summit_2018_demo.ipynb
@@ -1,49 +1,20 @@
{
- "nbformat": 4,
- "nbformat_minor": 0,
- "metadata": {
- "colab": {
- "name": "Dev Summit 2018 - Autograph",
- "version": "0.3.2",
- "views": {},
- "default_view": {},
- "provenance": [
- {
- "file_id": "1wCZUh73zTNs1jzzYjqoxMIdaBWCdKJ2K",
- "timestamp": 1522238054357
- },
- {
- "file_id": "1_HpC-RrmIv4lNaqeoslUeWaX8zH5IXaJ",
- "timestamp": 1521743157199
- },
- {
- "file_id": "1mjO2fQ2F9hxpAzw2mnrrUkcgfb7xSGW-",
- "timestamp": 1520522344607
- }
- ],
- "collapsed_sections": []
- },
- "kernelspec": {
- "name": "python2",
- "display_name": "Python 2"
- }
- },
"cells": [
{
+ "cell_type": "markdown",
"metadata": {
- "id": "g7nGs4mzVUHP",
- "colab_type": "text"
+ "colab_type": "text",
+ "id": "g7nGs4mzVUHP"
},
- "cell_type": "markdown",
"source": [
- "# Experimental: TF Autograph\n",
+ "# Experimental: TF AutoGraph\n",
"**TensorFlow Dev Summit, 2018.**\n",
"\n",
- "This interactive notebook demonstrates **autograph**, an experimental source-code transformation library to automatically convert TF.Eager and Python code to TensorFlow graphs.\n",
+ "This interactive notebook demonstrates **AutoGraph**, an experimental source-code transformation library to automatically convert Python, TensorFlow and NumPy code to TensorFlow graphs.\n",
"\n",
"**Note: this is pre-alpha software!** The notebook works best with Python 2, for now.\n",
"\n",
- "> ![alt text](https://lh3.googleusercontent.com/QOvy0clmg7siaVKzwmSPAjicWWNQ0OeyaB16plDjSJMf35WD3vLjF6mz4CGrhSHw60HnlZPJjkyDCBzw5XOI0oBGSewyYw=s688)\n",
+ "\u003e ![alt text](https://lh3.googleusercontent.com/QOvy0clmg7siaVKzwmSPAjicWWNQ0OeyaB16plDjSJMf35WD3vLjF6mz4CGrhSHw60HnlZPJjkyDCBzw5XOI0oBGSewyYw=s688)\n",
"\n",
"### Table of Contents\n",
"1. _Write Eager code that is fast and scalable._\n",
@@ -53,37 +24,39 @@
]
},
{
+ "cell_type": "code",
+ "execution_count": 0,
"metadata": {
- "id": "uFcgBENZqkB2",
- "colab_type": "code",
"colab": {
"autoexec": {
"startup": false,
"wait_interval": 0
}
- }
+ },
+ "colab_type": "code",
+ "id": "uFcgBENZqkB2"
},
- "cell_type": "code",
+ "outputs": [],
"source": [
"# Install TensorFlow; note that Colab notebooks run remotely, on virtual\n",
"# instances provided by Google.\n",
"!pip install -U -q tf-nightly"
- ],
- "execution_count": 0,
- "outputs": []
+ ]
},
{
+ "cell_type": "code",
+ "execution_count": 0,
"metadata": {
- "id": "Pa2qpEmoVOGe",
- "colab_type": "code",
"colab": {
"autoexec": {
"startup": false,
"wait_interval": 0
}
- }
+ },
+ "colab_type": "code",
+ "id": "Pa2qpEmoVOGe"
},
- "cell_type": "code",
+ "outputs": [],
"source": [
"import os\n",
"import time\n",
@@ -96,170 +69,172 @@
"import six\n",
"\n",
"from google.colab import widgets"
- ],
- "execution_count": 0,
- "outputs": []
+ ]
},
{
+ "cell_type": "markdown",
"metadata": {
- "id": "ZVKfj5ttVkqz",
- "colab_type": "text"
+ "colab_type": "text",
+ "id": "ZVKfj5ttVkqz"
},
- "cell_type": "markdown",
"source": [
"# 1. Write Eager code that is fast and scalable\n",
"\n",
"TF.Eager gives you more flexibility while coding, but at the cost of losing the benefits of TensorFlow graphs. For example, Eager does not currently support distributed training, exporting models, and a variety of memory and computation optimizations.\n",
"\n",
- "Autograph gives you the best of both worlds: write your code in an Eager style, and we will automatically transform it into the equivalent TF graph code. The graph code can be executed eagerly (as a single op), included as part of a larger graph, or exported."
+ "AutoGraph gives you the best of both worlds: you can write your code in an Eager style, and we will automatically transform it into the equivalent TF graph code. The graph code can be executed eagerly (as a single op), included as part of a larger graph, or exported."
]
},
{
+ "cell_type": "markdown",
"metadata": {
- "id": "snaZRFdWd9ym",
- "colab_type": "text"
+ "colab_type": "text",
+ "id": "snaZRFdWd9ym"
},
- "cell_type": "markdown",
"source": [
- "For example, autograph can convert a function like this:"
+ "For example, AutoGraph can convert a function like this:"
]
},
{
+ "cell_type": "code",
+ "execution_count": 0,
"metadata": {
- "id": "9__n8cSIeDnD",
- "colab_type": "code",
"colab": {
"autoexec": {
"startup": false,
"wait_interval": 0
}
- }
+ },
+ "colab_type": "code",
+ "id": "9__n8cSIeDnD"
},
- "cell_type": "code",
+ "outputs": [],
"source": [
"def g(x):\n",
- " if x > 0:\n",
+ " if x \u003e 0:\n",
" x = x * x\n",
" else:\n",
" x = 0\n",
" return x"
- ],
- "execution_count": 0,
- "outputs": []
+ ]
},
{
+ "cell_type": "markdown",
"metadata": {
- "id": "gq0eQcuReHET",
- "colab_type": "text"
+ "colab_type": "text",
+ "id": "gq0eQcuReHET"
},
- "cell_type": "markdown",
"source": [
"... into a TF graph-building function:"
]
},
{
+ "cell_type": "code",
+ "execution_count": 4,
"metadata": {
- "id": "sELSn599ePUF",
- "colab_type": "code",
"colab": {
"autoexec": {
"startup": false,
"wait_interval": 0
},
- "output_extras": [
- {}
- ],
- "base_uri": "https://localhost:8080/",
- "height": 413
+ "height": 431
},
- "outputId": "bb0c7216-1ca3-4da1-d1fb-589902cdcd1a",
+ "colab_type": "code",
"executionInfo": {
+ "elapsed": 69,
"status": "ok",
- "timestamp": 1522345737505,
- "user_tz": 240,
- "elapsed": 243,
+ "timestamp": 1531750911837,
"user": {
- "displayName": "Dan Moldovan",
- "photoUrl": "//lh5.googleusercontent.com/-Rneh8xjecyk/AAAAAAAAAAI/AAAAAAAACB4/c5vwsJpbktY/s50-c-k-no/photo.jpg",
- "userId": "112023154726779574577"
- }
- }
+ "displayName": "",
+ "photoUrl": "",
+ "userId": ""
+ },
+ "user_tz": 240
+ },
+ "id": "sELSn599ePUF",
+ "outputId": "2858bde5-ae05-4c32-be01-7770ac914f02"
},
- "cell_type": "code",
- "source": [
- "print(autograph.to_code(g))"
- ],
- "execution_count": 0,
"outputs": [
{
+ "name": "stdout",
"output_type": "stream",
"text": [
"from __future__ import print_function\n",
"import tensorflow as tf\n",
- "from tensorflow.contrib.autograph.impl import api as autograph_api\n",
- "from tensorflow.contrib.autograph import utils as autograph_utils\n",
"\n",
"def tf__g(x):\n",
- " with tf.name_scope('g'):\n",
+ " try:\n",
+ " with tf.name_scope('g'):\n",
"\n",
- " def if_true():\n",
- " with tf.name_scope('if_true'):\n",
- " x_1, = x,\n",
- " x_1 = x_1 * x_1\n",
- " return x_1,\n",
+ " def if_true():\n",
+ " with tf.name_scope('if_true'):\n",
+ " x_1, = x,\n",
+ " x_1 = x_1 * x_1\n",
+ " return x_1,\n",
"\n",
- " def if_false():\n",
- " with tf.name_scope('if_false'):\n",
- " x_1, = x,\n",
- " x_1 = 0\n",
- " return x_1,\n",
- " x = autograph_utils.run_cond(tf.greater(x, 0), if_true, if_false)\n",
- " return x\n",
+ " def if_false():\n",
+ " with tf.name_scope('if_false'):\n",
+ " x_2, = x,\n",
+ " x_2 = 0\n",
+ " return x_2,\n",
+ " x = ag__.utils.run_cond(tf.greater(x, 0), if_true, if_false)\n",
+ " return x\n",
+ " except:\n",
+ " ag__.rewrite_graph_construction_error(ag_source_map__)\n",
"\n"
- ],
- "name": "stdout"
+ ]
}
+ ],
+ "source": [
+ "print(autograph.to_code(g))"
]
},
{
+ "cell_type": "markdown",
"metadata": {
- "id": "j74n-8hEe6dk",
- "colab_type": "text"
+ "colab_type": "text",
+ "id": "j74n-8hEe6dk"
},
- "cell_type": "markdown",
"source": [
"You can then use the converted function as you would any regular TF op -- you can pass `Tensor` arguments and it will return `Tensor`s:"
]
},
{
+ "cell_type": "code",
+ "execution_count": 5,
"metadata": {
- "id": "AkVaY0-dfEbH",
- "colab_type": "code",
"colab": {
"autoexec": {
"startup": false,
"wait_interval": 0
},
- "output_extras": [
- {}
- ],
- "base_uri": "https://localhost:8080/",
"height": 53
},
- "outputId": "4ffe3757-c44d-424c-c2a8-7ddc973bfcce",
+ "colab_type": "code",
"executionInfo": {
+ "elapsed": 83,
"status": "ok",
- "timestamp": 1522345737841,
- "user_tz": 240,
- "elapsed": 257,
+ "timestamp": 1531750911965,
"user": {
- "displayName": "Dan Moldovan",
- "photoUrl": "//lh5.googleusercontent.com/-Rneh8xjecyk/AAAAAAAAAAI/AAAAAAAACB4/c5vwsJpbktY/s50-c-k-no/photo.jpg",
- "userId": "112023154726779574577"
- }
- }
+ "displayName": "",
+ "photoUrl": "",
+ "userId": ""
+ },
+ "user_tz": 240
+ },
+ "id": "AkVaY0-dfEbH",
+ "outputId": "f04541ad-b1d3-4663-bf27-4d902648283d"
},
- "cell_type": "code",
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "g(9) = 81\n",
+ "tf_g(9) = 81\n"
+ ]
+ }
+ ],
"source": [
"tf_g = autograph.to_graph(g)\n",
"\n",
@@ -272,77 +247,72 @@
"\n",
" print('g(9) = %s' % g(9))\n",
" print('tf_g(9) = %s' % tf_g_result)"
- ],
- "execution_count": 0,
- "outputs": [
- {
- "output_type": "stream",
- "text": [
- "g(9) = 81\n",
- "tf_g(9) = 81\n"
- ],
- "name": "stdout"
- }
]
},
{
+ "cell_type": "markdown",
"metadata": {
- "id": "trrHQBM1VnD0",
- "colab_type": "text"
+ "colab_type": "text",
+ "id": "trrHQBM1VnD0"
},
- "cell_type": "markdown",
"source": [
"# 2. Case study: complex control flow\n",
"\n",
- "Autograph can convert a large chunk of the Python language into graph-equivalent code, and we're adding new supported language features all the time. In this section, we'll give you a taste of some of the functionality in autograph.\n",
- "Autograph will automatically convert most Python control flow statements into their correct graph equivalent.\n",
+ "Autograph can convert a large subset of the Python language into graph-equivalent code, and we're adding new supported language features all the time. In this section, we'll give you a taste of some of the functionality in AutoGraph.\n",
+ "AutoGraph will automatically convert most Python control flow statements into their graph equivalent.\n",
" "
]
},
{
+ "cell_type": "markdown",
"metadata": {
- "id": "u0YG3DPgZxoW",
- "colab_type": "text"
+ "colab_type": "text",
+ "id": "u0YG3DPgZxoW"
},
- "cell_type": "markdown",
"source": [
"We support common statements like `while`, `for`, `if`, `break`, `return` and more. You can even nest them as much as you like. Imagine trying to write the graph version of this code by hand:"
]
},
{
+ "cell_type": "code",
+ "execution_count": 6,
"metadata": {
- "id": "xJYDzOcrZ8pI",
- "colab_type": "code",
"colab": {
"autoexec": {
"startup": false,
"wait_interval": 0
},
- "output_extras": [
- {}
- ],
- "base_uri": "https://localhost:8080/",
"height": 35
},
- "outputId": "6c244ee4-b141-4ad6-eefa-cfffa71f33c6",
+ "colab_type": "code",
"executionInfo": {
+ "elapsed": 169,
"status": "ok",
- "timestamp": 1522345738402,
- "user_tz": 240,
- "elapsed": 483,
+ "timestamp": 1531750912183,
"user": {
- "displayName": "Dan Moldovan",
- "photoUrl": "//lh5.googleusercontent.com/-Rneh8xjecyk/AAAAAAAAAAI/AAAAAAAACB4/c5vwsJpbktY/s50-c-k-no/photo.jpg",
- "userId": "112023154726779574577"
- }
- }
+ "displayName": "",
+ "photoUrl": "",
+ "userId": ""
+ },
+ "user_tz": 240
+ },
+ "id": "xJYDzOcrZ8pI",
+ "outputId": "f392b475-bf87-4d90-919d-44f895ee9fc7"
},
- "cell_type": "code",
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "Sum of even numbers: 42\n"
+ ]
+ }
+ ],
"source": [
"def sum_even(numbers):\n",
" s = 0\n",
" for n in numbers:\n",
- " if n % 2 > 0:\n",
+ " if n % 2 \u003e 0:\n",
" continue\n",
" s += n\n",
" return s\n",
@@ -358,77 +328,74 @@
" \n",
"# Uncomment the line below to print the generated graph code\n",
"# print(autograph.to_code(sum_even))"
- ],
- "execution_count": 0,
- "outputs": [
- {
- "output_type": "stream",
- "text": [
- "Sum of even numbers: 42\n"
- ],
- "name": "stdout"
- }
]
},
{
+ "cell_type": "markdown",
"metadata": {
- "id": "_YXo4KOcbKrn",
- "colab_type": "text"
+ "colab_type": "text",
+ "id": "_YXo4KOcbKrn"
},
- "cell_type": "markdown",
"source": [
"Try replacing the `continue` in the above code with `break` -- Autograph supports that as well!"
]
},
{
+ "cell_type": "markdown",
"metadata": {
- "id": "xHmC0rBIavW_",
- "colab_type": "text"
+ "colab_type": "text",
+ "id": "xHmC0rBIavW_"
},
- "cell_type": "markdown",
"source": [
"The Python code above is much more readable than the matching graph code. Autograph takes care of tediously converting every piece of Python code into the matching TensorFlow graph version for you, so that you can quickly write maintainable code, but still benefit from the optimizations and deployment benefits of graphs."
]
},
{
+ "cell_type": "markdown",
"metadata": {
- "id": "UEHWGpBXbS7g",
- "colab_type": "text"
+ "colab_type": "text",
+ "id": "UEHWGpBXbS7g"
},
- "cell_type": "markdown",
"source": [
"Let's try some other useful Python constructs, like `print` and `assert`. We automatically convert Python `assert` statements into the equivalent `tf.Assert` code. "
]
},
{
+ "cell_type": "code",
+ "execution_count": 7,
"metadata": {
- "id": "qUU57xlEbauI",
- "colab_type": "code",
"colab": {
"autoexec": {
"startup": false,
"wait_interval": 0
},
- "output_extras": [
- {}
- ],
- "base_uri": "https://localhost:8080/",
"height": 53
},
- "outputId": "add3db4a-2077-4dd5-f7a7-a5b5a4529c26",
+ "colab_type": "code",
"executionInfo": {
+ "elapsed": 56,
"status": "ok",
- "timestamp": 1522345738697,
- "user_tz": 240,
- "elapsed": 253,
+ "timestamp": 1531750912292,
"user": {
- "displayName": "Dan Moldovan",
- "photoUrl": "//lh5.googleusercontent.com/-Rneh8xjecyk/AAAAAAAAAAI/AAAAAAAACB4/c5vwsJpbktY/s50-c-k-no/photo.jpg",
- "userId": "112023154726779574577"
- }
- }
+ "displayName": "",
+ "photoUrl": "",
+ "userId": ""
+ },
+ "user_tz": 240
+ },
+ "id": "qUU57xlEbauI",
+ "outputId": "c9cd536a-4a95-4eb0-98c0-aafce5d79580"
},
- "cell_type": "code",
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "Got error message: assertion failed: [Do not pass zero!]\n",
+ "\t [[Node: f/Assert/Assert = Assert[T=[DT_STRING], summarize=3, _device=\"/job:localhost/replica:0/task:0/device:CPU:0\"](f/NotEqual, f/Assert/Assert/data_0)]]\n"
+ ]
+ }
+ ],
"source": [
"def f(x):\n",
" assert x != 0, 'Do not pass zero!'\n",
@@ -444,61 +411,35 @@
" \n",
"# Uncomment the line below to print the generated graph code\n",
"# print(autograph.to_code(f))"
- ],
- "execution_count": 0,
- "outputs": [
- {
- "output_type": "stream",
- "text": [
- "Got error message: assertion failed: [Do not pass zero!]\n",
- "\t [[Node: f/Assert/Assert = Assert[T=[DT_STRING], summarize=3, _device=\"/job:localhost/replica:0/task:0/device:CPU:0\"](f/NotEqual, f/Assert/Assert/data_0)]]\n"
- ],
- "name": "stdout"
- }
]
},
{
+ "cell_type": "markdown",
"metadata": {
- "id": "w5hBZaVJbck4",
- "colab_type": "text"
+ "colab_type": "text",
+ "id": "w5hBZaVJbck4"
},
- "cell_type": "markdown",
"source": [
"You can also use `print` functions in-graph:"
]
},
{
+ "cell_type": "code",
+ "execution_count": 0,
"metadata": {
- "id": "6NdzRKLEboRv",
- "colab_type": "code",
"colab": {
"autoexec": {
"startup": false,
"wait_interval": 0
- },
- "output_extras": [
- {}
- ],
- "base_uri": "https://localhost:8080/",
- "height": 35
- },
- "outputId": "fb82dfc3-790f-4127-87f6-361805be9e9b",
- "executionInfo": {
- "status": "ok",
- "timestamp": 1522345739013,
- "user_tz": 240,
- "elapsed": 247,
- "user": {
- "displayName": "Dan Moldovan",
- "photoUrl": "//lh5.googleusercontent.com/-Rneh8xjecyk/AAAAAAAAAAI/AAAAAAAACB4/c5vwsJpbktY/s50-c-k-no/photo.jpg",
- "userId": "112023154726779574577"
}
- }
+ },
+ "colab_type": "code",
+ "id": "6NdzRKLEboRv"
},
- "cell_type": "code",
+ "outputs": [],
"source": [
"def print_sign(n):\n",
- " if n >= 0:\n",
+ " if n \u003e= 0:\n",
" print(n, 'is positive!')\n",
" else:\n",
" print(n, 'is negative!')\n",
@@ -512,62 +453,58 @@
" \n",
"# Uncomment the line below to print the generated graph code\n",
"# print(autograph.to_code(print_sign))"
- ],
- "execution_count": 0,
- "outputs": [
- {
- "output_type": "stream",
- "text": [
- "1 is positive!\n"
- ],
- "name": "stdout"
- }
]
},
{
+ "cell_type": "markdown",
"metadata": {
- "id": "9u_Z3i3AivLA",
- "colab_type": "text"
+ "colab_type": "text",
+ "id": "9u_Z3i3AivLA"
},
- "cell_type": "markdown",
"source": [
- "We can convert lists to TensorArray, so appending to lists also works, with a few modifications:"
+ "Appending to lists also works, with a few modifications:"
]
},
{
+ "cell_type": "code",
+ "execution_count": 9,
"metadata": {
- "id": "MjhCQJVuiTNR",
- "colab_type": "code",
"colab": {
"autoexec": {
"startup": false,
"wait_interval": 0
},
- "output_extras": [
- {}
- ],
- "base_uri": "https://localhost:8080/",
"height": 35
},
- "outputId": "dc320b87-595b-4392-d29c-994486fd8a0a",
+ "colab_type": "code",
"executionInfo": {
+ "elapsed": 148,
"status": "ok",
- "timestamp": 1522345744470,
- "user_tz": 240,
- "elapsed": 5391,
+ "timestamp": 1531750912595,
"user": {
- "displayName": "Dan Moldovan",
- "photoUrl": "//lh5.googleusercontent.com/-Rneh8xjecyk/AAAAAAAAAAI/AAAAAAAACB4/c5vwsJpbktY/s50-c-k-no/photo.jpg",
- "userId": "112023154726779574577"
- }
- }
+ "displayName": "",
+ "photoUrl": "",
+ "userId": ""
+ },
+ "user_tz": 240
+ },
+ "id": "MjhCQJVuiTNR",
+ "outputId": "96bf9131-c7c1-4359-ee82-9c38575e7ab4"
},
- "cell_type": "code",
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "[0 1 2 3 4]\n"
+ ]
+ }
+ ],
"source": [
"def f(n):\n",
" numbers = []\n",
" # We ask you to tell us about the element dtype.\n",
- " autograph.utils.set_element_type(numbers, tf.int32)\n",
+ " autograph.set_element_type(numbers, tf.int32)\n",
" for i in range(n):\n",
" numbers.append(i)\n",
" return autograph.stack(numbers) # Stack the list so that it can be used as a Tensor\n",
@@ -580,65 +517,62 @@
" \n",
"# Uncomment the line below to print the generated graph code\n",
"# print(autograph.to_code(f))"
- ],
- "execution_count": 0,
- "outputs": [
- {
- "output_type": "stream",
- "text": [
- "[0 1 2 3 4]\n"
- ],
- "name": "stdout"
- }
]
},
{
+ "cell_type": "markdown",
"metadata": {
- "id": "UdG8ZFrkTAF2",
- "colab_type": "text"
+ "colab_type": "text",
+ "id": "UdG8ZFrkTAF2"
},
- "cell_type": "markdown",
"source": [
"And all of these functionalities, and more, can be composed into more complicated code:\n"
]
},
{
+ "cell_type": "code",
+ "execution_count": 10,
"metadata": {
- "id": "DVs6wt8NKaGQ",
- "colab_type": "code",
+ "cellView": "code",
"colab": {
"autoexec": {
"startup": false,
"wait_interval": 0
},
- "output_extras": [
- {}
- ],
- "base_uri": "https://localhost:8080/",
"height": 53
},
- "cellView": "code",
- "outputId": "0a4b8d08-8f65-4bbc-85ba-dc4c60563519",
+ "colab_type": "code",
"executionInfo": {
+ "elapsed": 555,
"status": "ok",
- "timestamp": 1522345745186,
- "user_tz": 240,
- "elapsed": 658,
+ "timestamp": 1531750913176,
"user": {
- "displayName": "Dan Moldovan",
- "photoUrl": "//lh5.googleusercontent.com/-Rneh8xjecyk/AAAAAAAAAAI/AAAAAAAACB4/c5vwsJpbktY/s50-c-k-no/photo.jpg",
- "userId": "112023154726779574577"
- }
- }
+ "displayName": "",
+ "photoUrl": "",
+ "userId": ""
+ },
+ "user_tz": 240
+ },
+ "id": "DVs6wt8NKaGQ",
+ "outputId": "8729229c-4f08-4640-d3a1-0d3f9c697a87"
},
- "cell_type": "code",
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "The prime numbers less than 50 are:\n",
+ "[ 2 3 5 7 11 13 17 19 23 29 31 37 41 43 47]\n"
+ ]
+ }
+ ],
"source": [
"def print_primes(n):\n",
" \"\"\"Returns all the prime numbers less than n.\"\"\"\n",
- " assert n > 0\n",
+ " assert n \u003e 0\n",
" \n",
" primes = []\n",
- " autograph.utils.set_element_type(primes, tf.int32)\n",
+ " autograph.set_element_type(primes, tf.int32)\n",
" for i in range(2, n):\n",
" is_prime = True\n",
" for k in range(2, i):\n",
@@ -663,45 +597,36 @@
" \n",
"# Uncomment the line below to print the generated graph code\n",
"# print(autograph.to_code(print_primes))"
- ],
- "execution_count": 0,
- "outputs": [
- {
- "output_type": "stream",
- "text": [
- "The prime numbers less than 50 are:\n",
- "[ 2 3 5 7 11 13 17 19 23 29 31 37 41 43 47]\n"
- ],
- "name": "stdout"
- }
]
},
{
+ "cell_type": "markdown",
"metadata": {
- "id": "JQ8kQT99VqDk",
- "colab_type": "text"
+ "colab_type": "text",
+ "id": "JQ8kQT99VqDk"
},
- "cell_type": "markdown",
"source": [
"# 3. Case study: training MNIST with Keras\n",
"\n",
- "As we've seen, writing control flow in Autograph is easy. So running a training loop in graph should be easy as well!\n",
+ "As we've seen, writing control flow in AutoGraph is easy. So running a training loop in graph should be easy as well!\n",
"\n",
"Here, we show an example of such a training loop for a simple Keras model that trains on MNIST."
]
},
{
+ "cell_type": "code",
+ "execution_count": 0,
"metadata": {
- "id": "0CrtGWgwuLJr",
- "colab_type": "code",
"colab": {
"autoexec": {
"startup": false,
"wait_interval": 0
}
- }
+ },
+ "colab_type": "code",
+ "id": "0CrtGWgwuLJr"
},
- "cell_type": "code",
+ "outputs": [],
"source": [
"import gzip\n",
"import shutil\n",
@@ -754,66 +679,67 @@
"\n",
"def mnist_test(directory):\n",
" return dataset(directory, 't10k-images-idx3-ubyte', 't10k-labels-idx1-ubyte')"
- ],
- "execution_count": 0,
- "outputs": []
+ ]
},
{
+ "cell_type": "markdown",
"metadata": {
- "id": "2zu1U9Nqir6L",
- "colab_type": "text"
+ "colab_type": "text",
+ "id": "2zu1U9Nqir6L"
},
- "cell_type": "markdown",
"source": [
"First, we'll define a small three-layer neural network using the Keras API"
]
},
{
+ "cell_type": "code",
+ "execution_count": 0,
"metadata": {
- "id": "x_MU13boiok2",
- "colab_type": "code",
"colab": {
"autoexec": {
"startup": false,
"wait_interval": 0
}
- }
+ },
+ "colab_type": "code",
+ "id": "x_MU13boiok2"
},
- "cell_type": "code",
+ "outputs": [],
"source": [
"def mlp_model(input_shape):\n",
- " model = tf.keras.Sequential([\n",
+ " model = tf.keras.Sequential((\n",
" tf.keras.layers.Dense(100, activation='relu', input_shape=input_shape),\n",
" tf.keras.layers.Dense(100, activation='relu'),\n",
- " tf.keras.layers.Dense(10, activation='softmax')])\n",
+ " tf.keras.layers.Dense(10, activation='softmax'),\n",
+ " ))\n",
" model.build()\n",
" return model"
- ],
- "execution_count": 0,
- "outputs": []
+ ]
},
{
+ "cell_type": "markdown",
"metadata": {
- "id": "Wuqg3H8mi0Xj",
- "colab_type": "text"
+ "colab_type": "text",
+ "id": "Wuqg3H8mi0Xj"
},
- "cell_type": "markdown",
"source": [
"Let's connect the model definition (here abbreviated as `m`) to a loss function, so that we can train our model."
]
},
{
+ "cell_type": "code",
+ "execution_count": 0,
"metadata": {
- "id": "W51sfbONiz_5",
- "colab_type": "code",
"colab": {
"autoexec": {
"startup": false,
"wait_interval": 0
}
- }
+ },
+ "colab_type": "code",
+ "id": "W51sfbONiz_5"
},
- "cell_type": "code",
+ "outputs": [],
"source": [
"def predict(m, x, y):\n",
" y_p = m(x)\n",
@@ -822,63 +748,63 @@
" accuracies = tf.keras.metrics.categorical_accuracy(y, y_p)\n",
" accuracy = tf.reduce_mean(accuracies)\n",
" return l, accuracy"
- ],
- "execution_count": 0,
- "outputs": []
+ ]
},
{
+ "cell_type": "markdown",
"metadata": {
- "id": "035tNWQki9tr",
- "colab_type": "text"
+ "colab_type": "text",
+ "id": "035tNWQki9tr"
},
- "cell_type": "markdown",
"source": [
"Now the final piece of the problem specification (before loading data, and clicking everything together) is backpropagating the loss through the model, and optimizing the weights using the gradient."
]
},
{
+ "cell_type": "code",
+ "execution_count": 0,
"metadata": {
- "id": "CsAD0ajbi9iZ",
- "colab_type": "code",
"colab": {
"autoexec": {
"startup": false,
"wait_interval": 0
}
- }
+ },
+ "colab_type": "code",
+ "id": "CsAD0ajbi9iZ"
},
- "cell_type": "code",
+ "outputs": [],
"source": [
"def fit(m, x, y, opt):\n",
" l, accuracy = predict(m, x, y)\n",
" opt.minimize(l)\n",
" return l, accuracy"
- ],
- "execution_count": 0,
- "outputs": []
+ ]
},
{
+ "cell_type": "markdown",
"metadata": {
- "id": "PcVRIacKjSwb",
- "colab_type": "text"
+ "colab_type": "text",
+ "id": "PcVRIacKjSwb"
},
- "cell_type": "markdown",
"source": [
"These are some utility functions to download data and generate batches for training"
]
},
{
+ "cell_type": "code",
+ "execution_count": 0,
"metadata": {
- "id": "RVw57HdTjPzi",
- "colab_type": "code",
"colab": {
"autoexec": {
"startup": false,
"wait_interval": 0
}
- }
+ },
+ "colab_type": "code",
+ "id": "RVw57HdTjPzi"
},
- "cell_type": "code",
+ "outputs": [],
"source": [
"def setup_mnist_data(is_training, hp, batch_size):\n",
" if is_training:\n",
@@ -896,16 +822,14 @@
" x = tf.to_float(tf.reshape(image, (-1, 28 * 28)))\n",
" y = tf.one_hot(tf.squeeze(label), 10)\n",
" return x, y"
- ],
- "execution_count": 0,
- "outputs": []
+ ]
},
{
+ "cell_type": "markdown",
"metadata": {
- "id": "2zEJH5XNjgFz",
- "colab_type": "text"
+ "colab_type": "text",
+ "id": "2zEJH5XNjgFz"
},
- "cell_type": "markdown",
"source": [
"This function specifies the main training loop. We instantiate the model (using the code above), instantiate an optimizer (here we'll use SGD with momentum, nothing too fancy), and we'll instantiate some lists to keep track of training and test loss and accuracy over time.\n",
"\n",
@@ -913,33 +837,35 @@
]
},
{
+ "cell_type": "code",
+ "execution_count": 0,
"metadata": {
- "id": "UUI0566FjZPx",
- "colab_type": "code",
"colab": {
"autoexec": {
"startup": false,
"wait_interval": 0
}
- }
+ },
+ "colab_type": "code",
+ "id": "UUI0566FjZPx"
},
- "cell_type": "code",
+ "outputs": [],
"source": [
"def train(train_ds, test_ds, hp):\n",
" m = mlp_model((28 * 28,))\n",
" opt = tf.train.MomentumOptimizer(hp.learning_rate, 0.9)\n",
+ "\n",
" train_losses = []\n",
- " train_losses = autograph.utils.set_element_type(train_losses, tf.float32)\n",
+ " autograph.set_element_type(train_losses, tf.float32)\n",
" test_losses = []\n",
- " test_losses = autograph.utils.set_element_type(test_losses, tf.float32)\n",
+ " autograph.set_element_type(test_losses, tf.float32)\n",
" train_accuracies = []\n",
- " train_accuracies = autograph.utils.set_element_type(train_accuracies,\n",
- " tf.float32)\n",
+ " autograph.set_element_type(train_accuracies, tf.float32)\n",
" test_accuracies = []\n",
- " test_accuracies = autograph.utils.set_element_type(test_accuracies,\n",
- " tf.float32)\n",
- " i = tf.constant(0)\n",
- " while i < hp.max_steps:\n",
+ " autograph.set_element_type(test_accuracies, tf.float32)\n",
+ "\n",
+ " i = 0\n",
+ " while i \u003c hp.max_steps:\n",
" train_x, train_y = get_next_batch(train_ds)\n",
" test_x, test_y = get_next_batch(test_ds)\n",
" step_train_loss, step_train_accuracy = fit(m, train_x, train_y, opt)\n",
@@ -956,173 +882,144 @@
" return (autograph.stack(train_losses), autograph.stack(test_losses),\n",
" autograph.stack(train_accuracies),\n",
" autograph.stack(test_accuracies))"
- ],
- "execution_count": 0,
- "outputs": []
+ ]
},
{
+ "cell_type": "markdown",
"metadata": {
- "id": "cYiUQ1ppkHzk",
- "colab_type": "text"
+ "colab_type": "text",
+ "id": "cYiUQ1ppkHzk"
},
- "cell_type": "markdown",
"source": [
"Everything is ready to go, let's train the model and plot its performance!"
]
},
{
+ "cell_type": "code",
+ "execution_count": 17,
"metadata": {
- "id": "K1m8TwOKjdNd",
- "colab_type": "code",
"colab": {
"autoexec": {
"startup": false,
"wait_interval": 0
},
- "output_extras": [
- {},
- {},
- {}
- ],
- "base_uri": "https://localhost:8080/",
- "height": 988
+ "height": 585
},
- "outputId": "f9d3eef3-5bea-45c1-ddf9-4edee73e4436",
+ "colab_type": "code",
"executionInfo": {
+ "elapsed": 17094,
"status": "ok",
- "timestamp": 1522345800262,
- "user_tz": 240,
- "elapsed": 52391,
+ "timestamp": 1531750930585,
"user": {
- "displayName": "Dan Moldovan",
- "photoUrl": "//lh5.googleusercontent.com/-Rneh8xjecyk/AAAAAAAAAAI/AAAAAAAACB4/c5vwsJpbktY/s50-c-k-no/photo.jpg",
- "userId": "112023154726779574577"
- }
- }
+ "displayName": "",
+ "photoUrl": "",
+ "userId": ""
+ },
+ "user_tz": 240
+ },
+ "id": "K1m8TwOKjdNd",
+ "outputId": "9f63da19-c3bf-498b-cf00-29090bf3b4f0"
},
- "cell_type": "code",
- "source": [
- "with tf.Graph().as_default():\n",
- " hp = tf.contrib.training.HParams(\n",
- " learning_rate=0.05,\n",
- " max_steps=500,\n",
- " )\n",
- " train_ds = setup_mnist_data(True, hp, 50)\n",
- " test_ds = setup_mnist_data(False, hp, 1000)\n",
- " tf_train = autograph.to_graph(train)\n",
- " (train_losses, test_losses, train_accuracies,\n",
- " test_accuracies) = tf_train(train_ds, test_ds, hp)\n",
- "\n",
- " with tf.Session() as sess:\n",
- " sess.run(tf.global_variables_initializer())\n",
- " (train_losses, test_losses, train_accuracies,\n",
- " test_accuracies) = sess.run([train_losses, test_losses, train_accuracies,\n",
- " test_accuracies])\n",
- " plt.title('MNIST train/test losses')\n",
- " plt.plot(train_losses, label='train loss')\n",
- " plt.plot(test_losses, label='test loss')\n",
- " plt.legend()\n",
- " plt.xlabel('Training step')\n",
- " plt.ylabel('Loss')\n",
- " plt.show()\n",
- " plt.title('MNIST train/test accuracies')\n",
- " plt.plot(train_accuracies, label='train accuracy')\n",
- " plt.plot(test_accuracies, label='test accuracy')\n",
- " plt.legend(loc='lower right')\n",
- " plt.xlabel('Training step')\n",
- " plt.ylabel('Accuracy')\n",
- " plt.show()"
- ],
- "execution_count": 0,
"outputs": [
{
- "output_type": "stream",
- "text": [
- "Downloading https://storage.googleapis.com/cvdf-datasets/mnist/train-images-idx3-ubyte.gz to /tmp/autograph_mnist_data/train-images-idx3-ubyte.gz\n",
- "Downloading https://storage.googleapis.com/cvdf-datasets/mnist/train-labels-idx1-ubyte.gz to /tmp/autograph_mnist_data/train-labels-idx1-ubyte.gz\n",
- "Downloading https://storage.googleapis.com/cvdf-datasets/mnist/t10k-images-idx3-ubyte.gz to /tmp/autograph_mnist_data/t10k-images-idx3-ubyte.gz\n",
- "Downloading https://storage.googleapis.com/cvdf-datasets/mnist/t10k-labels-idx1-ubyte.gz to /tmp/autograph_mnist_data/t10k-labels-idx1-ubyte.gz\n",
- "Step 0 train loss: 2.244329 test loss: 2.2499208 train accuracy: 0.12 test accuracy: 0.161\n",
- "Step 50 train loss: 0.64771986 test loss: 0.56013924 train accuracy: 0.82 test accuracy: 0.836\n",
- "Step 100 train loss: 0.49011207 test loss: 0.42143965 train accuracy: 0.84 test accuracy: 0.879\n",
- "Step 150 train loss: 0.3768609 test loss: 0.39319593 train accuracy: 0.88 test accuracy: 0.883\n",
- "Step 200 train loss: 0.36007702 test loss: 0.37089333 train accuracy: 0.9 test accuracy: 0.881\n",
- "Step 250 train loss: 0.182115 test loss: 0.28543878 train accuracy: 0.94 test accuracy: 0.915\n",
- "Step 300 train loss: 0.2119576 test loss: 0.22305593 train accuracy: 0.92 test accuracy: 0.93\n",
- "Step 350 train loss: 0.12932214 test loss: 0.29057172 train accuracy: 0.96 test accuracy: 0.906\n",
- "Step 400 train loss: 0.22937602 test loss: 0.2200287 train accuracy: 0.92 test accuracy: 0.925\n",
- "Step 450 train loss: 0.23444137 test loss: 0.19857481 train accuracy: 0.94 test accuracy: 0.94\n"
- ],
- "name": "stdout"
- },
- {
- "output_type": "display_data",
"data": {
- "image/png": "iVBORw0KGgoAAAANSUhEUgAAAe8AAAFnCAYAAACPasF4AAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMS4yLCBo\ndHRwOi8vbWF0cGxvdGxpYi5vcmcvNQv5yAAAIABJREFUeJzs3XmAFNW9Pvynlt5mYdhmQMHggnGN\nS9zCD0ElKug1edUY9ZoQTYze3GuiRk1uYjRqRHNj4n5NrhKjiUYlbihGQFRUFDSoKIvgICAO6+xL\n711V5/2jlq7qZaZnpnumZ3g+/zjTXV1dXSP91PecU+dIQggBIiIiGjLkwT4AIiIi6h2GNxER0RDD\n8CYiIhpiGN5ERERDDMObiIhoiGF4ExERDTEMb6JeOOigg3DllVdmPf6rX/0KBx10kGe766+/3rPN\ne++9h9mzZwMAtm3bhkMPPdR57osvvsCPfvQjzJw5EzNnzsTZZ5+NV199FQBw0003YdasWZg1axYO\nO+wwnHLKKc7v4XDY8x7JZBLz58/v9edavXo1Lr300oK2XbBgAebMmdPn97J19/rZs2fjhRde6PO+\niYY7hjdRL3366aee0Ewmk1izZk3WditXrsQnn3xS0D6vu+46TJs2DYsXL8bixYtxyy234LrrrsPO\nnTtxyy23YNGiRVi0aBHGjRuH3//+987vVVVVnv188sknfQrUI444Ag8//HBB2y5fvhxTpkzp83vZ\n+vt6oj0Zw5uol0444QQsWbLE+f3tt9/GV77ylaztrrnmGtx+++0F7bO+vh5HHnmk8/uRRx6JxYsX\nY/z48QUfV3NzM3784x/jo48+wkUXXQTAbAF48MEHMXPmTOi6jlWrVuHcc8/FrFmzcOaZZ2L58uUA\nzFaB0047DQBw//334ze/+Q2uuOIKfP3rX8d5552HxsZG533ee+89HHzwwVnv9cEHH+Bb3/oWTjvt\nNJx//vloaGgAAOzevRsXX3wxzjzzTJx66qm4++67cx5rPu+99x7OOecczJo1C9/+9redC6Vc++3u\ncSEE/vd//xczZ87EKaecgjlz5kDXdQDAwoULcdZZZ+GMM87AN77xDbz33nsFn3eiwcDwJuqlM844\nAy+99JLz+z//+U/MmjUr53ZCCCxatKjHfU6fPh1XXnkl/va3v2HTpk0AgHHjxkGSpIKPa+zYsbjm\nmmtw1FFH4YknnnAeF0Jg8eLFUBQFv/71r3HppZdi0aJFuPzyy3HTTTfl3NeiRYtw/fXX49VXX8WY\nMWPw7LPPAgA2bdqE2tpaTJgwwfNe4XAY//mf/4lrrrkGS5Yswfe+9z1cddVVAIBHH30Uxx13HF5+\n+WUsWLAADQ0NMAwj57FmikQiuOqqq3DDDTdg0aJF+OEPf4jrrrsOhmHk3G9jY2Pex1944QUsWrQI\nzzzzDJYsWYKGhgY8+eSTAIBbbrkFDz74IBYuXIibbroJr7/+esHnnWgwMLyJeun444/Hxo0b0dLS\nglgshlWrVmHKlCk5t73++uvxhz/8AYlEott9/v73v8d3vvMdLFiwAGeddRZmzJjhBEt/nXzyyc7P\n8+fPxxlnnAEAOOaYY5zqONOxxx6LCRMmQJIkHHLIIdi5cycAYMWKFTk/6wcffIBx48Zh6tSpAICz\nzjoLX3zxBXbs2IExY8bg7bffxvvvvw+/34+77roLdXV1BR376tWrMX78eBxzzDEAgJkzZ6KtrQ3b\nt2/Pu998jy9duhTf+ta3UF1dDVVV8e1vfxuvvPIKAGDMmDF46qmnsH37dhx77LH45S9/WdjJJRok\n6mAfANFQoygKTj/9dCxcuBCjR4/GiSeeCFXN/U/psMMOw3HHHYdHHnkERx99dN59BgIBXHrppbj0\n0kvR2dmJRYsW4fbbb8fEiRMxbdq0fh3vyJEjnZ8XLFiAv/3tb4hEIjAMA/mWNqiurnZ+VhTFaV5+\n5513cMkll2Rt39nZiYaGBk8LhN/vR2trKy655BIYhoFbbrkFjY2N+M53voOf/OQnBR17a2srRowY\nkXVsLS0tefeb7/Guri48/PDDmDdvHgBA13WMHj0aAPCnP/0Jf/rTn3Duuedir732wvXXX4/jjz++\noGMkGgwMb6I+OPPMM3H33Xdj1KhRPfbZ/vSnP8W5556LiRMn5ny+tbUV69evd6rWESNG4Pzzz8ey\nZctQX1/f7/C27d69GzfccAOefvppHHLIIfj8888xc+bMgl+vaRrWrFmT8yKkrq4O+++/P5577rmc\nr7388stx+eWXY8uWLbjsssucSronY8aMQXt7u/O7EAIdHR0YM2YMVFXNud+pU6fmfLyurg4zZszA\nd7/73az3+dKXvoTf/va3MAwD8+fPx7XXXotly5YVeGaIBh6bzYn64Oijj0ZjYyM2btzYY4VWV1eH\n73znO7j//vtzPh+Px3HllVd6wmLr1q34+OOPceyxx/bquFRVRTgczllRt7a2oqKiAvvvvz80TXMq\n0EgkUtC+V69ejYMOOgh+vz/rvY488kg0NTXh448/BgA0NDTgZz/7GYQQ+PWvf4133nkHgBmSY8eO\nhSRJ3R6r7YgjjkBzczNWrVoFwBxfMH78eEycODHvfvM9/vWvfx0vvPACYrEYAOCpp57C888/j9bW\nVnz/+99HOByGLMs48sgjezXWgGgwsPIm6gNJknDaaachFotBlnu+Bv7BD36Ap59+Oudze++9N/70\npz/hvvvuw5w5cyCEQFVVFX75y196RqAX4phjjsEf/vAHTJs2DW+++abnuYMPPhjTp0/HzJkzMWbM\nGPziF7/Ahx9+iNmzZ+O///u/e9y3fYtYvve67777cOuttyISicDn8+Gqq66CJEm48MIL8etf/xq3\n3norhBCYMWMGpkyZgh07dnheryhK1ntWVFTgnnvuwa233opoNIrRo0fjrrvu6na/I0eOzPk4AGzc\nuBHnnHMOADPYb7vtNowePRrTpk3Dt771LSiKAp/Ph9tuu61X551ooElcz5uIiGhoYbM5ERHREMPw\nJiIiGmIY3kREREMMw5uIiGiIYXgTERENMUPmVrGmpq6i7m/UqAq0tUWLus89Ec9j//Ec9h/PYXHw\nPPZfsc9hbW11zsf32MpbVbPvKaXe43nsP57D/uM5LA6ex/4bqHO4x4Y3ERHRUMXwJiIiGmIY3kRE\nREMMw5uIiGiIYXgTERENMQxvIiKiIYbhTURENMQwvImIaNh6443XCt723nvvxI4d23vc7sMP38cN\nN/y8P4fVbwxvIiIalnbu3IFXX11c8PZXXXUt9t57QgmPqHiGzPSoREREvXHXXb/D+vXr8Mgjc2EY\nBnbs2I6dO3fgnnv+iN/+9jdoampELBbDD35wOaZOnYYf//hyXHPNz7F06WuIRML44out2L59G668\n8lpMmTI153u89toSzJv3dyiKgoMOOgS33XYL6us34M47fwefzwe/349bbvktdu7cnvVYdXXuqU8L\nsceGd0c4gfc3NOLYg+sG+1CIiIa9f7z+GVZuaCzqPo87uA7nz5ic9/l///fZeO65f+D7378MDz/8\nIDQthT/+8c9oa2vF8cd/DWeccRa2b9+GG2/8BaZOneZ5bWPjbvzhD/fh3XeX44UXns0Z3tFoFA89\n9AAeeeQJVFRU4Oc//yneffddvPzyyzjnnPMwa9a/4YMPVqK1tQUvv7wg6zGGdx9ceecbaO2M46ZL\njsOk8X0/gURENDQccshhAIDq6hFYv34dXnzxOUiSjM7OjqxtjzjiKABAXV0dwuFwzv01NHyBiRO/\nhIqKCgDA0Ucfg/Xr1+PEE0/CH/7wP2ho+AJf//ppmDRp35yP9cceGd5b23YiPOFNSMnD0dwRZ3gT\nEZXY+TMmd1slDwSfzwcAWLJkETo7O/HAA39GZ2cnfvjD2VnbKkp6gREhRM79SZL3OU1LQZJCOPbY\n4/HnP/8Ny5cvw5w5N+PHP74652Nf/eqxff4se2R4f7ztCyjVbTBG70RLZ3ywD4eIiEpAlmXoup71\neHt7O/baa2/Isow333wdqVSqT/vfZ59J2LbtC0SjEVRUVGLVqg9x1VU/xrPPzsOUKSfi9NPPgBAC\n9fUbsGXLpqzHGN69dPykA7G4CZArO9DSwfAmIhqOJk3aD59+ugH33XcnKiurnMdPPnkGfvGLa/DJ\nJ2vxb//2TdTV1eGRR+b2ev+hUAhXXHEVrr32J5AkGUcccRSOPfZY7NzZghtv/AWqqqrg8/lw/fU3\nob7+06zH+kMS+doDykxTU1dR93fjit+ipTOCQyLn4yfnHlHUfe9Jamuri/632dPwHPYfz2Fx8Dz2\nX7HPYW1t7m7dPfY+7y+P2Q+SL4mmcOtgHwoREVGv7LHhPbFmPACgLdk2yEdCRETUO3tseI8JjQIA\nxBFGPKkN8tEQEREVbs8N74rRAADJH+egNSIiGlL22PAeW2FW3pI/xtvFiIhoSNljw3uME96svImI\naGjZY8M75AvCLwcg+eNoZuVNRDQs9WZJUNtHH32ItjbvnUjlsAyo2x4b3gAwMlDDypuIaJjq7ZKg\ntn/+88Ws8C43e+QMa7a6ijFojDWiqSt7UnoiIhra3EuCXnDBRbj99lvQ1dUFXddx9dU/w+TJB+Lx\nxx/Fm28uhSzLmDp1Gg455FAsW/YGtmzZjDlz7sD48eOz9pu5DOjVV1/nLANaWRkCIJdkGVC3PTy8\nxwItQKfWPtiHQkQ0rD332UtY1bimqPs8uu4rOHfyWXmfdy8J+uijf8YJJ/w/fOMbZ2PLls24994/\n4J57/oinnnoc8+cvgqIomD//WRx33NcwefKXcc01P88Z3LmWAf3ww/fx1ltLcc4552H27AuxaNHr\nJVkG1G2PDu/a0FgAQAysvImIhrM1a1ajvb0Nixe/DABIJMzu0pNP/jquvvq/cNpps3D66bN63E+u\nZUDr6zc4S362tOzClCknlWQZULc9OrzrKszwTildMISALEmDfERERMPTuZPP6rZKLjWfT8VPf/oz\nHH64dy2L6677JbZu/Ryvv74EP/nJf+Chh/7a7X5yLQMaCAScJT/XrFlZsmVA3fboAWt25Y1gFNE4\nZ1kjIhpO3EuCHnro4XjrrTcAAFu2bMZTTz2OcDiMRx6Zi0mT9sX3v38ZqqtrEI1G8i4lCniXAQWA\nVas+xEEHHYpnn52Hzs4OfPOb38QFF1yE+voNzmOnn36G81ix7NGV96hgDSQhQw5EEYmnUBXyDfYh\nERFRkbiXBP3hD3+E2267Gf/1Xz+EYRi4+urrUFVVhfb2Nlx22fcQClXg8MOPwIgRNTjqqK/ihhv+\nG7/97Z3Yf/8DPPvMtQzokUcehVgsihtv/AVGjaoBIJdkGVC3PXZJUHvZtp++fgvicQM/O+pa7L/3\niKK+x56ASwj2H89h//EcFgfPY/9xSdABEpCCkNQUIvHUYB8KERFRQfb48A4qIUiqhs4oJ2ohIqKh\nYY8P7wo1BABoj4YH+UiIiIgKs8eHd6XPvFevIxEZ5CMhIiIqzB4f3iMClQCAjjjDm4iIhoY9PrxH\nVZgj+Xa1tw3ykRARERVmjw/v0RXm7WE7OjrQHk4M8tEQERH1bI8P70qfOWBNUpNYvallkI+GiIio\nZwxvn9nnDSWFpvbY4B4MERFRAUo6Peodd9yBDz74AJqm4T/+4z9w+umnO88tX74cd911FxRFwfTp\n03HFFVeU8lDysm8Vk9QUWjvZbE5EROWvZOH97rvvYuPGjZg3bx7a2tpwzjnneMJ7zpw5ePjhhzFu\n3Dh897vfxcyZMzF58uRSHU5eITVo/qBoaOviRC1ERFT+Shbexx13HI44wlx6bcSIEYjFYtB1HYqi\noKGhATU1Ndhrr70AACeddBJWrFgxKOHtV/wAAJ9foK2NlTcREZW/koW3oijOYuXPPPMMpk+fDkVR\nAABNTU0YPXq0s+3o0aPR0NDQ7f5GjaqAqipFPcba2mqM1M3K2+8XaI8kMXZsFSSu690r+SbOp8Lx\nHPYfz2Fx8Dz230Ccw5IvCfrqq6/imWeewV/+8pd+7aetLVqkIzLZK78IISBLMiTFQCKpY+u2NlQG\nuTRoobgKUf/xHPYfz2Fx8Dz237BYVWzZsmX4v//7P8ydOxfV1ekDqKurQ3Nzs/P77t27UVdXV8pD\nyUuSJPhlP2TVXHi9jYPWiIiozJUsvLu6unDHHXfgwQcfxMiRIz3PTZw4EeFwGNu2bYOmaVi6dCmm\nTp1aqkPpkV/xAbIZ3h2R5KAdBxERUSFK1mz+8ssvo62tDVdffbXz2AknnICDDjoIp512Gm6++WZc\ne+21AIAzzzwT++23X6kOpUd+xY9kyhxpHo5xXW8iIipvJQvvCy64ABdccEHe54877jjMmzevVG/f\nKwHFjw6YS4IyvImIqNzt8TOsAYBf9kMXZmhHGN5ERFTmGN4w+7wNGIBksPImIqKyx/BGeqIWyDrC\ncYY3ERGVN4Y3zD5vAGZ4s/ImIqIyx/CG2ecNAKrPYJ83ERGVPYY3rPu8AYRCEitvIiIqewxvpPu8\nQyEgHNMG+WiIiIi6x/BGus87GABiCQ26YQzyEREREeXH8Ea68g5YS3tH46y+iYiofDG8Afhls89b\nVc2KO57UB/NwiIiIusXwRrryllUBwGw6JyIiKlcMbwABJQAAzrKgrLyJiKicMbwBhFQzvCXVrLjj\nSVbeRERUvhjeAIKKNVJNNu/xjiVYeRMRUflieAMIWpW3IZkVd4yVNxERlTGGN4CQGgIAGJJZecdZ\neRMRURljeAMIWgPWdCQBsM+biIjKG8MbgCqrUCQFmhXe7PMmIqJyxvAGIEkSgmoAKWGFNytvIiIq\nYwxvS1AJImkkAABxTtJCRERljOFtCaoBJHQrvDlJCxERlTGGtyWkBpHQk1BkNpsTEVF5Y3hbgkoQ\nAgKBoOCtYkREVNYY3hZ7opZgCIiyz5uIiMoYw9sSVM0pUitCQCSWGuSjISIiyo/hbQlZ85sHQwJJ\nzUAixaZzIiIqTwxvi115B4IGAFbfRERUvhjeFrvP2+c3wzvM8CYiojLF8LbYzeYqw5uIiMocw9ti\nV96yz+zrZngTEVG5YnhbglblLavmbWIMbyIiKlcMb4tdeUNheBMRUXljeFtC1mhzQzJDm+FNRETl\niuFtCTK8iYhoiGB4W+w+b3tNb85vTkRE5YrhbfHJKmRJdtb0TunGIB8RERFRbgxviyRJCClBZ01v\nneFNRERliuHtElQDiGlxKLLEypuIiMoWw9slqAYR1xJQFRmaJgb7cIiIiHJieLsErWZzRQE0Vt5E\nRFSmGN4uITUAAQHVLxjeRERUthjeLj7Fb/5XNRjeRERUthjeLn7ZBwCQVYGUzj5vIiIqTwxvF5+s\nAgAU1YCm9b/ybutK4MEX16G5I9bvfREREdkY3i4+xay8FaU4fd5PvFqP9z7Zjb8u3NDvfREREdkY\n3i4+u9ncZ0ArQrN5PKl7/ktERFQMDG8Xu89bUQwYQsAw2O9NRETlh+HtYjebS4rZZM5Z1oiIqBwx\nvF2c0eayGdq8XYyIiMoRw9vF7vO2K+9i9HsTEREVG8PbxWk2tyvvItwuRkREVGwlDe/6+nqceuqp\nePzxx7OemzFjBi666CLMnj0bs2fPxu7du0t5KAWxK2/I5ujwfjebC1buRERUfGqpdhyNRnHrrbdi\nypQpebeZO3cuKisrS3UIvebPCG8OWCMionJUssrb7/dj7ty5qKurK9VbFF1Ws3mxwlsqzm6IiIiA\nElbeqqpCVbvf/U033YTt27fjmGOOwbXXXgtJGtyUs6dHFZLdbM5mbyIiKj8lC++eXHnllZg2bRpq\nampwxRVXYPHixZg1a1be7UeNqoCqKkU9htraas/vcf9IAIBqLi6Gqqpg1ja94fObp9enKv3aT7kb\nzp9toPAc9h/PYXHwPPbfQJzDQQvvs88+2/l5+vTpqK+v7za829qiRX3/2tpqNDV1eR4Lx1IAgJSW\nBAA0t4TRVBPo83ukkpq1Pz3rvYaLXOeReofnsP94DouD57H/in0O810IDMqtYl1dXbj00kuRTJoh\nuXLlShx44IGDcSge9mhzQ+KANSIiKl8lq7zXrl2L3/3ud9i+fTtUVcXixYsxY8YMTJw4Eaeddhqm\nT5+OCy64AIFAAIceemi3VfdA8St2n7dZMevs8yYiojJUsvA+/PDD8dhjj+V9/uKLL8bFF19cqrfv\nE6fyBitvIiIqX5xhzUWRFEiQYMCsvDnDGhERlSOGt4skSfApPqfy7uk+7x3hXXjsk38grsUH4vCI\niIgADOJo83Lll33QhTVKvIc+7/s+eghdyTDGVdTi9H1PGYjDIyIiYuWdKagEkDQSAAC9m8p7W2MY\nXckwACBpJAfk2IiIiACGd5bairGIGREEv/oqtic3593ulfcbnJ8lzn9KREQDiOGdYXyFORe7pGpY\nrb2af0N3i/ogT+tKRER7FoZ3hnGV6YVUVPjzbifAe8CJiGhwMLwzjK+oTf8iCquoZTabExHRAGJ4\nZxhfOc75OYEINEPLvaGn8GZ4ExHRwGF4Z6j2V+EHX/4h9I4xgCTQGm/r8TXs8iYiooHE8M5h/5pJ\nMLpGAQCaYq05t/GMV8tTebNXnIiISoHhnYOqSBApc7BaLJV7KVLhSmbeKkZERAOJ4Z2DqsiAYU4+\nl8g7AYsnvYmIiAYMwzsHVZEhDAUAkNBzh3chzeZERESlwPDOQVUkQDfDO5knvHuD4U5ERMXE8M5B\nkiQo1pot21o6cm8kvNsTERENFIZ3Hgp8AICV9TuwsyWS9TxHkhMR0WBheOdhhzdkHZ2R7pvO2SxO\nREQDieGdhyqlwzsX4bpXzBD5lw4lIiIqNoZ3HnZ4S0qe6VFd3EFORERUagzvPHyKz5yIRdaR1Lqv\nrA2w8iYiooHD8M5DlRXAUCApOpKp7KZzd7HNZnMiIhpIDO88fKp1r7esI5nqofJmszkREQ0ghnce\n5ixrKiRFQ0LLUXm7f2blTUREA4jhnYeqyAVX3nqe8GZBTkREpcDwzkOWJXN+c1lHIpljxLn7VjEO\nWCMiogHE8M7DMIQ5YE0WSGiprOe9zeY9lNicw4WIiIqI4Z2HYQhAN+c3j2mJ7rdlnzcREQ0ghnce\nuiGcZUFjqXj2Bp5bxdi5TUREA6eg8F67di2WLl0KALj77rtx8cUX4/333y/pgQ023RAQyQAAIKKH\nu92Wfd5ERDSQCgrvOXPmYL/99sP777+PNWvW4MYbb8R9991X6mMbVIYhIBIhAEBMdGU9z1vFiIho\nsBQU3oFAAPvuuy9ee+01nH/++Zg8eTJkeXi3uJuVdzfh7VmYhM3mREQ0cApK4FgshoULF+LVV1/F\niSeeiPb2dnR2dpb62AaVIQREMggASCLXet7pwK7f1pZzxDkXLCEiolIoKLyvueYaLFiwAD/96U9R\nVVWFxx57DJdcckmJD21w6a5m86Scq8873VS+uy2CpvZY9hZ2djPDiYioiNRCNvra176Gww8/HFVV\nVWhubsaUKVPw1a9+tdTHNqgMwwAMFUJToSvRrOfdlTckkQ5q9zZW5c0KnIiIiqmgyvvWW2/FwoUL\n0d7ejgsvvBCPP/44br755hIf2uD60rhqAIBIhKCrkawAzry3O3ezub1taY6RiIj2TAWF9yeffIJv\nf/vbWLhwIc455xzcc8892Lp1a6mPbVBdcsbB+N7Mg+DTqwFZR0cyo49fSieyxMqbiIgGUEHhbYfP\nG2+8gRkzZgAAkslk6Y6qDFQGfTj56AkIiBEAgMZok+d54b63WxI5VyGxA53ZTURExVRQeO+33344\n88wzEYlEcMghh2D+/Pmoqakp9bGVhZAwP+fOsDe8vROziJwBzcqbiIhKoaABa3PmzEF9fT0OOOAA\nAMDkyZNxxx13lPTAykW1MgotALZ37fY8nll557rXO+lrARSJfd5ERFRUBYV3PB7H66+/jnvvvReS\nJOGoo47C5MmTS31sZWGkbzSAXM3mmaPNvQm9qf1ztO31OvwVYyBaTy71YRIR0R6koGbzG2+8EeFw\nGBdeeCHOP/98NDc344Ybbij1sZWFmmAVhACi1uIkH3zaiBfe3gJkNJvruje869s2AQCUmhb2eRMR\nUVEVVHk3Nzfjrrvucn4/5ZRTMHv27JIdVDmpCKpAVIJm6ACAB55fCwA4cLLrukcS6EqGcdt7D+Oc\nyf+GQ8cchNZ4KwBApHzs8yYioqIqeHrUWCw9g1g0GkUi0f0a18NFZVAFhAxN1z2Pp9y/S8DqjlXY\nEdmFBz5+GADQEm8DAIhkiH3eRERUVAVV3hdccAHOOOMMHH744QCAdevW4aqrrirpgZWLiqAPEBI0\n4Q3vpK65fhMQGQndaoe3prLyJiKioioovM877zxMnToV69atgyRJuPHGG/HYY4+V+tjKgll5S9AN\n74xqKS0d3lLGgDUhhFN5QzYY3kREVFQFhTcA7LXXXthrr72c31evXl2SAyo3duWti+6azYUnoBN6\n0pk+VZJ1DlgjIqKi6vOi3HtKNVkZVCGEbC5U4pIy3GEunAFtABDX4+mnFH2POVdERDQw+hzekiQV\n8zjKVkVQBSBBhze8tYzKO2GkB/DFtHR4S7LOAWtERFRU3Tabn3TSSTlDWgiBtra2kh1UOamw+ryF\n8PZdp3QNAdd2yTzhzT5vIiIqtm7D+4knnhio4yhbiixDEjIMpKC5J2KR3TOsGXkrb7DPm4iIiqzb\n8J4wYcJAHUdZkyUJAgZSWrqpXJJdt4pJQMod3qmoazsDBpjeRERUPH3u8y5EfX09Tj31VDz++ONZ\nzy1fvhznnXceLrjgAjzwwAOlPIx+kyADEEhprn5v1R3eAkmRDu+2RIfn9ULSQEREVCwlC+9oNIpb\nb70VU6ZMyfn8nDlzcP/99+PJJ5/EO++8g88++6xUh9JviiRDSAaSrvD2VN4QSBnp9c2d8DbM0ysk\n721mRERE/VGy8Pb7/Zg7dy7q6uqynmtoaEBNTQ322msvyLKMk046CStWrCjVofSbLCkABOJJVwgr\n3klaUsIV3vF28wctCAAQYOVNRETFU7LwVlUVwWAw53NNTU0YPXq08/vo0aPR1NSUc9tyoMgyJFmg\nI5JuGpdUb+Wtwd1sboV3yhwf+2w3AAAgAElEQVSPzsqbiIiKqeAZ1gbbqFEVUFWlqPusra0uaDtV\nMU+TUFzXOlblLTQVkj/puQu8PWk1m1vhDVkv+L2GouH82QYKz2H/8RwWB89j/w3EORyU8K6rq0Nz\nc7Pz++7du3M2r7u1tUW7fb63amur0dTUVdC2spABCdi2M31vu2SHt+5zqvB9qvZGQ3gHuhJh87lU\nABIAQ9IKfq+hpjfnkXLjOew/nsPi4Hnsv2Kfw3wXAiUdbZ7PxIkTEQ6HsW3bNmiahqVLl2Lq1KmD\ncSgFUWTzNHVE0/3akDXz/m3NvP6pwlhMm+AdnCecZnP2eRMRUfGUrPJeu3Ytfve732H79u1QVRWL\nFy/GjBkzMHHiRJx22mm4+eabce211wIAzjzzTOy3336lOpR+UxUF0IHOqGvaU1UDdBWQzHu4fQhA\nldOn0y/7ENet5nb2eRMRURGVLLwPP/zwbpcNPe644zBv3rxSvX1RqbIV3rH0oDQoGoSuOn3fighA\nkdN98j7Fh6iumE0bDG8iIiqiQWk2H2pUK5S7oq7R5opZeUtOePuhSunwViU1fZ+3zGZzIiIqHoZ3\nAXyKGcrhuN3nLZzK2x6spghvs7kqqxCaFeYyK28iIioehncB/NatYl0xK7xlA5IkzD5v2A/5PM3m\nqqxA6NbvisaVxYiIqGgY3gXwWfeX68KqoJ3bxNzh7Tebyi2qpMIwrN9lnUuTEBFR0TC8C1Dh91k/\nmRHszGvuCm9J+Jy+cQBmFW5V3pJVeXdGk7j/2dXY1hgekOMmIqLhieFdAL/PCm/JmkdNsSpwwzXj\nm65mNJur6cpcMdf0/ufyrVi1sRn3Pbt6AI6aiIiGK4Z3ARTJOk2SgCSlK293szkMNavZ3A53STYr\nb3s98GSKA9iIiKjvGN4FUOxbwCSByqAPvoDVg+2qvCXd22yuuprNoegw2OlNRERFMmQWJhlMslV5\nS5JAZciHsGrAACB0BYmNR0EZ2QRZqvbcKqZIKgAZQpedypuIiKgYWHkXIN1sbuDEr4yHL2D1fRsq\njLbxSG35CoRhB7b9GsXZxu7zdkjSwBw4ERENSwzvAshWEP/7qZNx5tcmweczk9i5jxuArouMZnPV\n2UbKvM+bVTgREfUDw7sAduU9fmwIkiRB8dmVtyu8hchoNndV3jL7vImIqHgY3gWQrSVBDWGGtqxa\no8Vdo811XXjmNreb0IWuAIoGwzDSO2SzORER9QPDuwB2Fa1b4S1Z93m7m80NQzgD2wCkg1xXIUlA\nyuDiJEREVBwcbV4AO5S/6NyGz9o3A0rKfMJwVd6GAclVUctOs7n537iWXguceuetj3dgQm0lDti7\nZrAPhYioLDC8C2D3eS/e+joAQLZWGfMMWDPSk7CYr/Fuc+fqe3AELhqQ4x1OYgkNjy7cAAD4yy9m\nDPLREBGVBzabF0B29WUDgJCyJ2nRDYHHXql3frfDW1LNKj2hJyBghntnJIkHnlsDg6POe6TpRs8b\nERHtYRjeBVAk72kSMMy7vQxvn/fGhnbXa8zntN2T0tsgXZl/UN+Enc2REh0xERENZwzvAmSGNwAr\nuNN93PGk7unztkebG51jobWMN3+Gd05znfeP9YhniIgoG8O7ALKsZD+oe4cLRGIpz+8KXK+xKnQD\n3hHnDO+esWeBiCgbw7sAco7KWxgZ/eAwB1c5r5HdK45Z94lL3srbYHj3iOdoePvX+t247I6l2N0a\nHexDIRpSGN4FyN9s7hV2Vd+K69TaQS/YbN5rDO/h7c8vfQLdEFi2eudgHwrRkMLwLkDmaHMATjXt\n5g7j5vZk+glhbtssbYLkT1cYKY6k7hFH5A9v/PMS9Q3DuwC5Ku9RVaFuX7P4vW3pX6yg362uQ/Co\nt5yHUymGd08Y3kRE2RjeBcjV5z1+VBWqQj4AQCjQw1w3OZrYAVbehWCzORFRNoZ3AZQczeaqrDrL\nfFZX+Lp9vcjRxA4AyZSe83FKY3jvGbhWD1HvMLwLIOf4ZnEv/1kdyg7vQyeNxrUXHoWvHTouu/KW\nNQACb3e8jHe2vwcAWPDOFsxd8ElRj3s4YHYTEWVjeBcgksq+jcW9/Gd1hT/r+ZHVfhy272jzOeE9\nzZI/DskfxxfJT/HEp88CAJ5ftgUr1u0q8pEPnLWbW7BibfGPn5U3EVE2hncBJo3YBwDw5VGTncfM\nZnPz5xGV6fAWmlmFj6kYCQCQ5exmcykQg+RPrzJmrxMOwGmKz2f+ss34+LPmPnyK0rrrHx9j7kvF\nbznggDUiomwM7wJU+6vwwIw7cOa+pzqPqa5Z1/yq7Axei6+ZisTGo3DUhP0BABUBNavZXArEIAVi\nzu+NkXQYdxdWndEkXnznc9z7zOr+faAS6unio7fKObxffGeLs+IZEdFAYnj3guIKbFVWPfNu71NX\nBQCoCYzAdbPOwKTx1QCAiqAv655wSUlB8qfD+4GP/+KsEa7p3YR3JJn3uXKR1Io7gr6cm83nL9uC\ntz7eMdiHMaSV8bUZUVljePeCu59blVQ4y2ZIwMGTRgEAamtCzs8AUBlSs/q8IQlP5d2aaIW692YA\ngN7N7WPhaCrvc+Wi2CPoyzm8iYgGC8O7F7Iqbye7JZxxwpfwzan74rJvHOp5TWXQlzUPOmTDCe+v\n7zPd3F9tAyBrWZV3Uk9i4ZZX0Z7oQGe0/CvvRLHD23U6/ufvH6KxPZZ/40HCC4y+4y1iRH3D8O4F\n9/3e7j5vSQJURcbZ0/ZH7UjvzGuVOZrNIRmQ/HEoIoBzDzwLB4a+AknVIPnj0DIq73/Uv4CXtryC\nFzctQke4/MM7WeRZ49x93vUN7Zj32sai7r8YONlO37HZnKhvGN69oHbT551PZUjN7vOWDEi+JFQj\nCAAwdOt5SUBzVXHtiQ6s2LnS+b0jo897xY6VWLBpUS8/RWkVu/IWGVVtOS7mknnBRURUagzvXvBU\n3pKCQtK7MuiDENnN5lBSkI0AAEDT0o+7+7zvWzU3/RJJdgashQLm/h7f8DQWbX0dulE+M7UVu887\nM6zLsYk6VeRBekREPWF490L2aHMzSLrrtzNvFcucpCUBSQIk3QzvlDUOTZIM6Fafd2ckieZYi/Oa\nqBZzKu+qjBndolr59AMnSthsnuv3cqAxvPuNfd9EvcPw7gXPaHO5h8VILLIsZd/n7TMnaJE0c3IX\n3S5WJQOaYQbBtX98C7rQceDIAwAAsVQMHZEEAMCvKp77qbuS4d5/mCJyH0vxR5tn/l4e4a27Dox9\n3n0nCup8IqJMDO9eUFyBrcqq606xHsoG4X1e8pshbM/GJgzreUkgkdTxxqrt0GWzyq5UK+BX/Ihq\nMcQTZjDqhkBcT8/QFklF+vyZisHdtF30Pu/MyrtMwlvT0sfR3b35ROXglZUNWL+1bbAPg4qI4d0L\n7nW9PQPWemzyywhvnxnMwqq8Dd16Xjbw7Fub8bfFn0JSzI5wvxxEhRpCTIs5FZ4hBLqS6cAO55h7\nfSC5w9tdeacMDXd/+Ces2LEy18sKkt1s3uddFZW72uaANSpniZSOp17biN8/uWqwD4WKiOHdC1kD\n1iw9ZXfdqFDOx42kWXk74S0Z2N5kNoFLqtkRHrDCO6rFnYFRhiEQTrnDe5Arb1d4ufu8t3Y24LP2\nLXh8w9N933eZjjZ3BzYHrFE5K5fWKiouhncvSK5RNYprkpae3Pz94zC+60QkPj3G83gqYTbD233e\nkiTgXApY06X65QBCaghxLY6UZm5oCOFpKh/sZnMtT+WtGVquzXsl84unXAasuQepsc+7H6w/Z+bY\nBiLqHsO7j3yyAvf0qN0J+lVMCnwZRkcthKv/OxaVYRgCuqvytkmqGXw+KYAKXxACAklh9pUbRmaz\n+WBX3q4+by0d3rmWUu2tzLDOvO97sLgvWDjavP/K5aJsOOK5HZ4Y3n2UOT1qTxTFOtVGeluR8iMS\nT0HX0n3e6RdYlbcUQIVaYT1vPmYIIJxKjzAPJ7NDMvMfbCKl4911u5zqvZjczebJZPrnLtcxLnrv\niz7tO/N7Rx/gLyJNN7BkZQOice+88u7AZp93/7Fpt3TKpauJiovh3UfmwiSmQu5R9dnh7VqkROgq\nWjsTcPLUGm0OpPu8VRFASA1ab2pW45l93pnN5l/s7sIPf7cUb3603XnsuTc346EFn2D+si0FfT63\ndVta8doH2/I+7xlt7ro4CLtuYVv4Xu/fF8jRbD7AX0TPv7UZT762EX9fUu953N1Uzmbz/mN4l065\ntFZRcTG8+6jQ+7xtimIlvHuFMV3BLY+uRDhid3obTsVsh7cCPypUc8CbZFXjhiE8TdI7Irs8s6wt\nX7sLAPDU6585jzU0dgEANu3o7NVxA8Cd8z7C35fU521+y9fn7b7/3JD7tiJa1gxrA/w9tHFbBwCg\ntTPheZwD1orD/nOyabd0mN3DE8O7j1RZ6dWiCnblLQz7vxKc028FuiS5m83NKlsRfgTUgPVYesBa\nXDPv8z669itoT3Rgbct656V2FSP7EqhvMwPcp5qj4/vTbJ6vOvKMNk+6wtvVIiAUb/gV/J5Z93kP\nbFC2dZnHPbI64Hnc22wuEI2nsO7z1gE9tuGEAVM6bNUYnhjefaRIhU2PalNVO6itjQ1X5S7Sk7TY\n7CpbNgLpJnopfatYzArv0yedAgB4a9sK57VOv/A+a3DvqofwcdNa+K33T/ajSszXt5tvkhZ35d3X\n8M5s8hvoUcntYfO4R1T4PY+nXIP0NM3AnfM+xp1PfYT6hvYBPb6hzv6nM9AXZW5CCCz9cBt2tQ7u\nfAmlwlaN4Ynh3UeqrOLkoycAAA760qgCtvc2m8vCHd7Wn8E1YE3yJyAMCbLwwWc10UtyepKWmBaD\nLBQ89sIuHDhyf2xo24hdkd3m7uzAC5lBMn/Ty/D5zPdI9WPu8XwDX9yjzd39v+5BdYbct+VMM9/S\nEAKabmTNvFYq9mfOfD8tY5KWLTvN7oimMlxvvJw5zeaD2POweWcnHnulHr+a++7gHUQJsfIenhje\nfeSTFXzntC/jjh9NwWH7ju5xe6fytprNZeSqvN3hHYNIhqDrrv512Wo2N4CYFofQfdi8vRMnjDfv\nH/+0bZP5vN1vnjJHqTdGmyGpZgWZ7EezuZ5jGlAhhGeeb/e0oRHXKHhD6Wt4Zw9Yu/z3b2DO3z7o\n0/56w90FkDkoTcszYI0LbPRNb6vD9zc04sEX1xWlqozEzC6q4VqgsvIenhjefaTKKmRJwtiRuWdP\ny9o+Y7S5e7S63Q9uN5srqg7Jn4RIhKDpBnyKtYqY5K6844BuTtEaUioBAM+/vRHN7bF0FaOkB4nF\nVXOFMvfgqriW7hMvRGbl3RZvx/ee+ylWtb0PqGY4u0MtYbgCW+1bs3n2gDXzd7vSLaXWrvT88cmM\nFgv3eXT/LGekdyyhIZbo/2Q1A03TDWzd1TVg79fb6vCP89fivU92Y3cBTd2vf7itV5/FMATunPeR\n526NoYyV9/DE8O4j91SphVCt0eb2JC2q5FrW0x6wZjWLT5xo7lskQkhpRlazOWCGt6GZj8swt4+m\n4nhx+efpK20lHRqblXcANenp835iwzO4d9VD+KhpbUGfQc+oPjd1fI6ElsCyliUIHvkG4Is74W0I\nA5qhQYHVV9zHyjuzz3sgFwGx108Huq+8NU/l7Q3vK+5+C1fc/VaJjrB0HlrwCW55dOWA9eH3tTp0\n5k/Io7E9hsdfqcctj+afXz+ztaSxPYZ1W1rx10Wf9umYyg2ze3gqaXjffvvtuOCCC3DhhRdi9erV\nnudmzJiBiy66CLNnz8bs2bOxe/fuUh5K0Vz+le/hrP1metb2LoRdeUtWda1KKn71PWu61IxmcyVo\n9puKRAU03Ug3m9vN6rIBXegwUnZ4p5/fvKPTudIWcgp1FWMBAElEoY773FMl2iPUN1rN7T3pbrIH\nSTGgjGx0giypm8EXRJV1Avp2q1jSSHpaEIq95Gh3uqLp982cRU3zDFhzDTTsY7N5S6wVN7xzOza0\nbuzbDors/Q2NAFBQZVsMfa0OMy8oMxXy/0vmn2y4dX2w8h6eShbe//rXv7B161bMmzcPt912G267\n7basbebOnYvHHnsMjz32GMaNG1eqQymqI2sPxxn7fb3Xr3Oaza3wliDjgL1roMiS0w/ujDb3m1+Y\nduWdsjPErrytMBO6VZFb64VLio4dzRGzipEMQNYxOjAKFx30LfP5jACt9JnN7YVOr6plfAnYI95t\nysgmZxR2QrdmiDMqrffuW+W9UnseoWNegz20qbezRdW3fYZH1j2BVB/mWe+KuirvjLECqTxzm/e1\ngnz1izfRlmjH3DWPFbS9EAJPLKnHui2lvT2tKuTreaMi6Gu+9HSPfUE5LHX765DHPu/hqWThvWLF\nCpx66qkAgAMOOAAdHR0Ih8M9vGr4UuzR5s7tZVbftyJD2KPN7T5t1aq8k0GkdAML3ramFrUGrNnL\nhcIKbwjF83wkrjkBH1KDOGj0gZ7nbVU+c0BbOJk7vNvi7Xh03ZOQrIuJzCrHvtf8lJFnw4hXQK5u\ncyrUlNXfrYgghC47y6D2VhhmOMk1zX16/b2rHsL7uz/CxwV2Dbh1uirvzJDwDNJzN6FrfWz+tVpy\nNFHYRcb2pghe/WAb7pz3UZ/erzvukfWZF2yl4q4OOyNJrN3ckndbz/H11I3ShzJ6uGXdnlh5G0Lg\nd3//EP9c8flgH0rJ9G6asF5obm7GYYcd5vw+evRoNDU1oaqqynnspptuwvbt23HMMcfg2muvzeov\ndBs1qgKq2rum6p7U1lYXdX/dGdlsNT/azeaKitraavhUGYmUt887FJKBlFlZ+/wqWtpTwCjXJC5W\neAvdrIpGjrA+hxXOmiGchU1GVY/AXnXWrWzW/u3PXREIAl1AzIjmPBfLPnkbK3evQuAIGfH3T0f1\niJBnu+QXZrhVh6ogkgHIwSg0w0BtbTVi7eaAMkX2QST9gJrM+R7vbVuFUcEafHns/p7Ho/EUKoLp\nqk8Zux1GR61nm978/Xyh3v+9U64vPUOSPK/3B9LHJrv6XYMVfmc7dytBT+8dCJj/FDVDK+g4O+Lp\nC7Fi/3/c1pluUQm5Pk8pqT7FeZ9fPLQEja1R3Hftydhv75qsbSOx9EVVVXXQeV2u44y7rrnyfY6a\n1phnm5he+N9tKGgKpy+cC/k8w+Ezh6NJfNrQjk8b2nHJN78y4O8/IP9mSv4Olsz7ZK+88kpMmzYN\nNTU1uOKKK7B48WLMmjUr7+vb2orb91ZbW42mpoEbTdvVZX1BWAEqdKCpqQuyLOWYpMX6YhYyOrvi\nUCUFCddrneZva8BaW6s5ktsO/65Iup9Y0hR0tVnPWzO0NTZ2QpIkdMbMintXuAlf7GyCX/Z5+vI7\nwzFnv1IwjJaWCJpC5nsmkjpefOdTqOOARFQ4rQApI4mmpi7s6jAHOukpCdD8kIKRrPNtCAN3vvMQ\nAOCBGXc4j2/Y2oY7nlyF807e32yokAClphkpyfBML9ubv19LR5dn+22NYUgSMKG2yrPdZ9s7cO/T\nH+Pq849EY0u6RSIWT3le3+EKuIireb2tPepsF0+mq+jujvWtxmVY9Nkbzu/23ydTMqXj3U9247iD\n69DWnv73UKz/j1es24VdLVEctl/61sfWtuiA/DuJu85vo9XPvnFLC6p82Y2Dja576Ztawmiq9uf9\n99zSkm7ty/c5OjLOZVNzz68ZSlpb0/8f9/R5Bvp7sVQiroWEBvrzFPsc5rsQKFmzeV1dHZqb002d\njY2NqK1NV05nn302xowZA1VVMX36dNTX1+fazbDh3ELk6vMGkNHnbQ1YU+1FjmVougG/Yo3Ylg0E\nfIrTbG73ecPwNpvHk5qzTYUagk/2eZ5/7q3NeGfNTqfPOqEncd1bv8b1Cx/CZ9s7nGN2z58uBSOe\npuKuWNJpAZAMn3MsQtagG4bTbA5dgdB8kBQdCc3bdJ7Qc98+ttIaLLVw5RanA1JSNchVfR/5HE15\nJ0/59V/+hRsf/lfWds8s/QyRuIZnlm5yms1HVPg8zea6YeCL3el/nO4+b/e98O6R/d01Xc5bu8Dz\ne0TLfaG6YPnneHThBjz56saSNO3OXfAJFiz/HM0d6XM1UPO25+qXzddk7668e1qOtZAxEpnvM9xW\n4ervx+mIJPHBp03FOZgBMtz+hrmULLynTp2KxYsXAwDWrVuHuro6p8m8q6sLl156KZJJ88t85cqV\nOPDAA0t1KGUhff+vNe847D5vKV1NWuEtK1Z1bshIaQYCavo+74BPTt8CZjWb6xrM6t0K51hCd6rz\nkBqCIiuQhAzJev6fK7bi4X+uzxpwFg5twd3/SPehulcFU0Y1oiWRHhxlGMK5QJCFz6m8JUWDpgkk\nrNHmwlAgUubFR1vcezUaTaXf372wiv1FLqvmY0I3L07kEd5+UPMiQcsK5lw6k7mvhA1hYHc0/cVk\nT6aj6Qa6oklUhXwI+BVPiK1YuxtrXQPFtDyD19yz2el5phAzRPbjLbHcg9B2tZih/vmu0t7j3tKR\n/ruUMrzdrXG5Lm7yjST3hHcPo80LGayV+d7D7YvffQ76MjPh//z9Qzzw/JohNfVvrgmlhpuShfdX\nv/pVHHbYYbjwwgsxZ84c3HTTTXjuueewZMkSVFdXY/r06c5tZKNHj+62yXw4kK0Ba3a/tV15T6yt\nAmA1nctWVW6FNwwFKV0gqKbv8w74FSek7cldNN2AJBSn2Tye0JyAt5cTlaB41wuHQEJPoNJeKxyA\n0FTPVbq78lZrt+PvDQ8imdJR39BuTlpih7er8oaiIaUbSOr2iHgZ0Mzwrm/x3pIW1dKh61772/4y\ntS8OjC6zGVcOeQc8aprAw2sfw8+W3YRIKuoJccMQePHtLc5kOJ3J3IG3cMur+M27v3fudbfvCtB0\nga5oCtUVPvhUb3hv3tHh2UfKM2DNtba5PUJd0pHQcg9Ei2vZrQ/5Rv/b/w/phijpl1OLq0uglMud\nunMkZ3jnCdGwK7x7Or5CgjgrvHvY519eXo///r/lPe63XLg/X19Gntu3Cw6lqX/zXSwPJyXt877u\nuus8vx988MHOzxdffDEuvvjiUr59WXEqbzugrdHml5xxMPbfewdeiSsw7AFp9n+FDE0zYOj23Oe6\n2Wwup8MdsKoj4Qp1pG/NspcTlYXqHW2uaBAQ2K9mknO/t4hXpudgR+4Q+cvL6/Gv9Y046/9NgqRo\nELoC3YCn8tZ1A0nDWr5UV2AkzGOYt+kZHD/hSAStVdJirvDuSHRiZMAcnGR/v8jWoDsRr4DQFUhB\n7/GkdANrms1j//mymwEAPzjsIhwz7ij8a/1uzH97C0LHCEABOhPp4HdXa+/sMCfvWLV7DY6qPdwJ\n70RKRziWwoSxlYgndU94B/2utdxlAy0j/gUlPgJSMIIW3Q/AHHyXTBmAZCB45FuYv6kT3z3sW1nn\nM7P1AzAHreVi37FgGKLHirM/8lXeiZSOrmgSY2sKm1WwJ+4gyZWx+YI3Ek+fn54uYgoZaZ35Pj0F\n/turd1rbGVDk8p/nyhPehkAP89rkNZRaJIbSsfZV+f+fN0yMqQlaP3mbzasr/Pi3KftClc1wkkc2\nIpwKQ7Kq8ZRuIJGy/keUzD5vJ9ytyjulGea93q5wloLm1fLY0Bjzd6E4zeZAetWyoBLElV/5sfmg\nrHtmrAqnIhgd9C668q/1Zn/0xoYOc1CcrkI3BISRWXlbzea6DL3xSxBJM7Cjrv5cd3gv3rrUqdad\nudntixFdhYhXWp/JfZtQdoDtipjHZ37BC2cZ1Q5X5e2e6tReS317s9msbs+E12atJmZW3rInxGLW\nQLTbLjsBoTFtSFR/Dv8Bq+GbsAmrxItOU3hS0yH545D8CWzsyD0NbVw3gzKoBHDKPicCQM570qOp\nmPP31Q0BrciVhbs5tdm1drm7JeGOJ1bh539a4Zl5rj/0HirCfBcoUdd0sz1V3oWFd+ZtgIV98Wd2\nKQgh8PFnzWU3Ha773PYn1IbSLWdsNqeiGVUdwG2XneBUywq8k18okgJJ1RD48ofYEdllzaomIaUZ\nSKYEhCFDkg34fa6QFq5lPo2McA5EASFhTMhscpZyVN4AsGZjJ3738GdQjRAgG051J4RAOBVBlTWR\nS6akpluVt2p++en2RDEaNF044W1oMiBk6O3mYEU7oAHvILKPm9bipS3mGIms6V11FUas0hz17k+/\nRtMMyJL3f2FP8Lmmh+1IdDrv51621J4AJ2m9zl533V6UpLrSD58qw3AtwBK3ngv61Zwz7dmfMakZ\nkHxmOLfEWz2f3WZX3idNnIqJVXtnfwaYs9XNee9ObAq+CkBYK6sV98vJHUSeytsVjvZ88u5m6/5w\nh0GuUMn3GfU83RQ5t+1L5a27jyv//jOPb/naXbj3mdX466INPb7nQHJ/hP5c8w2lanYoXWj0FcN7\nAO01phIHpk6D3joOU+r+n+e5zBHGftkHVTFHmyeSulllywZURXaazYWr2VwYCiRfylkARA5GIeuh\n9LzoIqMyt5qk7XlzzIsD3ak8E3oSmqGhUq1AcrN5n6Q9hzoAJDTdDEddhaYJT5/3X15ej664GZS6\nZq+mZr7WDnXAW3kDwEeNa8xNnT5vb+UNAFIo3XSe0DSnYjyq1jzGlGEHp56ezAaAgMD7uz92nks/\nYU9ba430z2hTHFFhhjeQDji7sgr6FShq9rdh0khCNwxsbwxD8iec998VzZ4C2J7oJqQGnb9VKiPk\nP2hcjY5kJ8LyLsjVbVafd3Er77hrBTXPimnWZ3avsFasL3F3tZ85h735Prk/o2dq2j40my98dytW\nb2rJu02+VfIyZVbe9iDGTdtLv2hOb3gGBvbjNoVi/z9XSkPpQqOvGN4D7IpZU3H1cT/AtMO+1O12\nqqzCp5qVdyJlhndNtYq6kaH0wDO72Vw3zIFhAEJfXQqoCUj+BJRUFYQQ+OPzaxCNCUiygNPsbM8X\n7p5iVU734dn93RVqJTsCVIYAACAASURBVPTmCdA7R8OADsBuEk5Bks3Qjic1T5/3Z9s6sGqTGVS6\nZlXyVmVu94UDQDSjv7cl3obGaHO6/9OpvBWIlNns7p5mNZyMQkDgyNrD8c39Z5rnwqpaw7GU83q9\nrQ5CAG82LEdjW8QTRPY99kKyBt9l3F49wmo2B9Jf1vGEBglAwK84I+LdknoKTy/dhKde/wzwpZug\nd4R3ZW1rV95BNQjVuqVPM7zhvXLXh87PytjtVp934V9OWzq+wJ/XPo5wMpJ3tHE8zxzg9mduaDSv\n8tQJG/HytpcKfu/u9NRsnm+kuztce2w2z9hvNK7h6Tc24Z6nP05v002fd+b+3ecvc8rcZmtA11in\ni6w86D20cBS8nyE09Vyxu5XKEcN7gPl9Cg6eNKrb2eQAwCer8Cmy1WyuQ5FVCDkJQ04BkvWl4fR5\n604VDgDKSPP+eilZiVhCw/ufNmXdCy75zdCwQ1FYfeZ25b0zYgbNCL81QYDzeiu8EXFeH4lrnsob\nSFfYuqZ4Xp9wVd6fN5mVynXH/Bhn7GtOpdueaE9/Qcqu+9l17/EDQJc1rWuVr8IJPrvyjsRS6dHq\nsSroLXthV2wXfvXss3jh7S3pE23tLwnzizfzy7raVXl/vH0zVu/YjFhSRzCgQJYkyEqu8E5imTWo\nya68ge7DO6QE0pV3RrN5S7wN1b4qyEKFXNmRNWBtV2Q33tuZf33zf9TPx6rG1bjx5b/i9sdzbxfP\n009rn4+GRnNMgG/CJqxu/xDhWApPLKnvVxO6O0dyZUrmMqw276IwvWs2D8ey++u7azbPvIBwH1Pm\nc01Wd8OoEYFuj2mgGT3cklfwfoZQNbsn9HkP2Axr1DuqYt5fHEtqSGoGAlAQTnXhXflvgGwu4iKE\nq9lcl5wFFeRqs0lQSoUQtkfmWkHv+9IGpD4/DFLAXrnMHDls6GZzvH070spdq8ztIxMAtDmVM2Qd\nMFTEpU4oMEeoR42Uq/K2mrqtCwwtZVW2OZrN12zdBXUsMMJf5dzSFtPi6S8J2T52NT0JnSss7TnZ\nK32V8Cne4AvHNE+fubbjAKhjd0Ie0YpVG9OTB9n3w8eMMO54/37ElNEA9nKe19ROrK94BsrY/fBk\nwyLz/RJnOyPOJVflbUSrIFeEkdCTqAgoiCU0p88bAHZEssM77qq884V3OBnBmNAoSMlKdIR2Q0fK\nEzi3vncnAKAmMAL71UxCwJ7Uxz4uawBdomIbNm34ctYxAN5mczc7HKMJLf33ADD3pbVYs6kNmiHw\nvZkH5XxtT/L1ecuSBEOIvCuCefq8ezlgzT1ffa73BrxVW+b+3bPmuS/0hBDOQL5yC7nM0eZ9NZQC\nkc3mNGh8soqAT0VXxPyySfc3Cydw7C/7ZMqAcC2bKVeYVZIwZIStLys7PNW6bZBrmiFb4W3Ezfu8\nDatvWlENGMLA6uZPEMIIvPCKNWGIYc+/bo149pnNqCJe4am81boGyKN2Oc3Qeko2mxFzhLd7xLs7\nvJ2Kxj2TnD0Lnavytu9Dr/RVOLPI2f3F4VjK2b/QVAgtPdGNh7WNhhS2djag0f+x5+k2YxeSUgT+\n/dMLm8STOkKBdDcBACTWHwe9dbzzGYP281blXaFU5q68dbvPO2Teiw/vrWIpQ0Ncj6PaV4VqqRaS\nBBiBzpxNyvd/NBd3rLwPQgi8/uE2ayY2gdZ4m3ksqubchZApka/Z3AooTRee127aaf5/YfSjedId\nJO4+b7v1J6nluaDoplk7U+aXeFeOkfLdNptrmeGt53zOfftavhYDIQSefXOTZxbDgeAZbd6Ppu+B\nWqSmGPaE+7wZ3mXi9Emn4OBRBzr3OvtkFUG/4vzDE3L6S8eerGREyAy8aELzNM/KlWZ4G7qcbtbU\nXaOiJQEpEDOraWsCFfteclk2oBk6UkYKWjQEZ35Su9ncqnxl64vciFeiPZxIr3AGwDfhMwgrZFMp\nCaOqA1AlMzyT1rSpumFO8iKEOUNb0BXedsUl5HSfd2azPwBENKvyViucCxk7+CKxFKCmK3e4lk1N\nnwcjPSFOPkp2c3IslUDQnx5dDwBC8zvvsaO1AxV2ePviEJoPtYFx6Eh2eia+AbwD1pZ+YIb79tb0\ngCd7lrsqfyWqYN72JwKdeQcP7Yo2YmP7Jjz+Sj2WvN+AjmSXZzIcuTJ7MFVcS2Bly/KsVeeAdEBp\nugHZdZ99NGVdlAT7vmSonmcglWKHd54Q9FbehQ9YE0KgM9q7ZvPsyts1sM89IY/r4iffRcfnu7rw\nzxVbcftj+bs4SsGd17kGBvbEPb/AUFGs1oZyxvAuE//fAWfgJ0dfZt0iBqiy2WxuS0npL2A7qGsq\nrfCOa5B82TN1CUN2+vjcfeKSrEMKRK0mc8nZFgBk1XACUNcl1768fd72hCkiXoH2cDIdrjCb0u3K\nW+gKVEXGiKDZPG/fLhWOpszPofmh6cKpvONaHAnNACCQ8rUDwgxGu9nefTucHUqVvgrzVjtIzoC4\ncCzlDG4TKX+Oyl3Af9BKz2fPJQVr/vdPj4HeYYYnanYh4LcvqlyD6qxz8MTrG8zKXElBCkZhRKsw\n2mfeKpdZfcdc4b16o1khN7anJ5SxZ56r9lVBMsygFJLebRW0rvlT5+ftHebAwUlV5gBJKZQ9Teyz\nG1/EB13L4Nvn06zn7PBKaYZ5+6HFvmjpzz3NIk+zuT1oMpmnP7uvfd66Yc6alykz4LuvvHM3m7tb\nLqJGFz5sXJ31Pok8XROl1t8Ba3Z4r9ncUvKpeYuluwuw7vx10QZnEp5yx/AuM4p137JPVhH0eatl\nN2HIGGmFdyyhQW8dl7Uvs/K2vmxEOoilQBSSqkEkXTNluSpbu8/VHinuft4OTykQMydesSZnCfpV\nnDzCmkFMNiAk3ZqaVIJPlVFTYb5XJGmGVWs4BikQgxGvQDJleJrNkykdck0z9EA7KhP7mHO4Z1T+\nABC1Ku8qfyUkSYJPVhFJxLHgnS1WeNvN5n4AMoQhpcPfl4AywgxLvWW8s09ZT48U3m+vaucWPpEM\nQsTMufn9B6xG20irerIH1bmqe8jmjGxyVbvZzN01GmN85t9nc8fnnr+RHd6bGiLOHPbuPm+7X7/K\nX+lZtz39hZT9ZRxOpPvZd0fN/v0vjzBnN5QrO7NGnO+KmhPb5Ap2O7xSuuFtclfSa8c/99YmvPbB\ntqzX9iTfaPN05e0Nu2ff3ISXln+OsNTsdH/0ps9b13NX3lpGuOVbqx0AYnmazd2tBLvHvYSH1z6O\n19au97z2b1/8H/wHZy+GUyhDCM/FQ8Gv62cVav89GhrD+M2j72PTjg7c98xqRON9v3ArplxjI/py\nwZJM6Xjzox34y8vre964DDC8y4w9baoqKZ7KO4sho6bKbPKOJjSkPj8MX459A0YsPamK0CWn8naW\nEQUgWc3u9qxn9v4As9nZvlXJXXlnjVZXXCPMAVQEVewd2MfaRoMOzemHVhUZFX4zFCNWsOzobIYk\nCYh4BZKajpCSEd6VZr9gZXQ/81hzNJt3psyFEsYEzYrYp/iwszWM55dtMf/B+lyVN2BeaNjH75rn\nXa4I43jpAnNbpB8/cOJIp5lbaH7rIsDU5WswH5fTg+Ls1gF17834rKUBcrV5cWCER2Kczzw3G9q8\nM63t6mqF0GX88dkNCPnM/bv7vO1b9qp9Va7WAyNdfarZYbR5d5vzc7u1GEyNOhpGIgg51JX1ZaZI\n9oWZAXWfDVDqvjB/l1zN5prhad2RrM8djafw0vKt+PuSeuyM7Ma8T5/POV97Lkae+7ztqYTXbmnF\nR5+lBxf+c8VWvLBuGT6vfhnq3uY8+T32eXtmFzN6rLx1XXQ72txTeWu5K2+7p+mJ1z51LpQMYaAj\n1QZlRO5FZwpx3zOr8V93vdVta4emG57lMM337t993plTwP7+yVX46LNmvPnR9l7vq9iefXMTfnTn\nm9jZ4p06ubtBh24bt7Wjrcv8/zVfyLd0xEs6HXFfMbzLjF15GxCe8E6PJbcYCmoqrfCOpwChoEau\n9TRf64bkDFjzfPFat4l5mrqtn9uqV6fvv3Y1J2eFp6x7Xl8RUOFTFXMOckWH4QlvCRX/P3vfGW9H\nVa/9TN/19H5OzknvIR0SEjpEulIFiShYLyI2BEQR9PpD5aJX5d5XQbHAtYAIypULWABpIXRIg5De\nc0pO3XXKej+sMmv2npOQkJAE5vlAOHvKXrNm9jzr356/wfTMWax0xyDt5EUKSdiOhxjTYM+5eRSY\nJjgdgxEYq+w2H3D7YGkmKkxqERuqESB33eR9z/k51OD4GZztI2F5FehIjxDu/mRMxylzRvgxascA\nsf34rgGLndIG8VhnODZGNZaFPvlpkZvgZSphKnG0pVqwrm+DiGl7xENPoQskT5vT8LwDuXXqoBTz\nlhdQPO6rsAXKmMqRmBc/EwCwTYqZ9xeY7CuJg2QroJhF9GT7AxKnQo42MQijeQN0Rt5xU4fteMg5\nebylPwa10idSYXnnfCL58Su348mtS/D0tucwHAghIg8j2DDD30f+/Cf3Bd3PWgO18DU2Fsfx8Pra\nnmFL1uRzOR4JlXYtzXrfXZ33cAlroXFuhYhEtqCG/b4RAReWkRvHlOJbv34Bn//RUwGyGS488Xah\nlogfcC/Du9Uudnd4aMlGAMCK9cFFUVAlL/yaO3uz+O7/vIyb734RQDjJ9/Tn8dWfPov/vPe1sm0H\nGxF5H2Lgcp8e8QJu85OSl+CyKR8RWeeEqEjFDWiqItxXhq4G4rfE8RPWvLxvkYsabznWy+uwY9vx\n5zUPBT4D4Mufqi4AAqjB2vKERevS4eo0EU3xyds0NCTMIHl35ujLl1reXiDmXXRcEVsnpCRhTvXd\nxUNuH+pitejpz+P7v30Zjh20qDXTptYwczcTT2rqwv51drbD3dXC+qYbgOohldBx2xePRW1lDBk7\nA0MxAaIGLG9DYeSt+AI1gfkC/GQ3x4DjemhPt8EhDr551xMAgK5cD4jiwcumEDM1DGR4jbwjXLfC\n8jZTUtzft7x5XH989RjUaC1iO8cga4X6qwfXw8vSmv2/L1+BL972NJ5bSePv/YX+wHGinaylwXE9\nPLVlCfr1jZClCbjl3ZcpAIoL64h/iYXGa10rUIqubA8Gi0P4zSNv4KofP4Wt3ZlhNbdLX7YD2SJW\n71oLrWETVOba91gI47W1PfjRH1/Dd38d7o4W51IdvLTzFQxk/UUst4qD3+1hlf009DYa/y+zvAsS\nebthbnP/M0X1hFUnt9flHqF9xe6M561d9HmRPQHDLYzeLrRS5aJDEKW6GYFF2zBW86ad9J70MC3/\nsORH3tt+1cbesm0HGxF5H2KQyduSyLs53YA5jTNgKiwm66mIWzoqkqZI7DE0FfUVPkm7riLI29ky\nDsUNkwFIwiEy2Uj//0bvWwDoAsHfTv/fHLNMxHJlyzwRM6DrKrW8VQdEcaEpPB6uIcXc5isHX8fD\n6/+B3iJzKRcSKNgutnXmoCoqNnX3omh7Qq5UfAdhMWtOiEYBLhxk+k1c87MleHNzHwaGXMhtTxW9\n6LvMAboA4W5zVodOHJal7hIYGvMU6P6POGNnYaksN0Amb7Yw8VAU4QNSQt6KXqTKdVDgekR0U4Pq\nghCC7Sx5jeTSKBRdZLJ+jTx/6QvL20j6fd+lmDe3vFNGCiopDy2IVquOKch7xY4NAIA7HlwJQgj6\nCiWlSxqXf6WWt0tCrErNRUXSxMBQEUosCzVGX3KqomJd/4aApekRD//x4m345Yrf4cnXaDLQ+m0D\nw8a8S8l73dYB/PjV22GOXClkfVHiiXpdcq/L4C9xo/0N3Lf+TxhM+fFM/rIujXFvJstgtKwXf3O8\n2rUcAwU/L6DUba5YGcSP/Jv/5ap/H4ekKgO5MmRf8HZ6cpcuSDj25DbfuGMA/3X/soDrfTjyPpRy\nuEuH+HZi3p0lLU7DKjhMQyv77FBBRN6HGHi3MZd4Abd5OkGJQybvmKkFpBh1XUVrbYX4m3gaeofY\nKp9ocDtHiAYn/Bz+viGPQpjbHIA1eSn9n5KYNz1GBzQXRHVE85WYqSNp+eP86/q/+brmtonnV+7E\nt3/zIlxbw2CBveTYGF23xDvAPuelal2d0o+LaDSBTKUdxVy1ECBcriInn58vWlzPg6XSfXVDJu8M\nYiodu6gVB+ApLC9AsUXSXqAcDzSWbjHCdlxPiKcomgvXI9gyRInMy6XYi1AR96efuXeHbE7eKSGB\nC9XzrT5meadNSu6EoKScboiGDYgKkqXPhpyYtq2/F45EzqpnsnI6D6ahwnY9IfIiQzc8NFbHA+1n\nTxt5MmY3TAfgl8ABtAFNxsliTd86keCn6wrk05JhyAYA1m4rr4tWNV8NcHcQOvkshGEnt4ltnHxl\nK01uHSvvs7p3LX6+7C68YD8ItXon9Kb1cFwPHgsDFG0XetPGkkF6tIwS/n0EEBDuKRsv8fZIzm8n\nbC1n4e+N5X3lfzyOl1d34bkV5Tr85eMoP9ef1/zfbtX+9hV7Gnep5e0GLO/wY3mf8mSMl5mW73co\nl5lF5H2IgVvepJS848wFzcmbqIgZQfI2NEVYhAAATxUPKIXix39RalmHrDBD3OoyFATd5lTpTYei\nuVAU1gwFQNzUUBEP9oC2XZ6lreI1Fssjju5b1oxcbVt+80iWM3P9J9W0fz2uCkUliM/5BxQzD6J4\nAVc37bxGaDy9pDOb6xKYjFw1g373uv4NsD0HMS3BxufPnY0C8k4BLmxhvYfNkaHQc7oegcoFDVUX\nRdtFX44nDkpa2GyBMsAWXZt6ekA8BU7RD4koiivkTLmLO2kkqTEqhwYA5NwMVI/OPfdCFIlv+e0c\npLHCZHYU8svno4KwzHvdEfK8A5LL18vTcyUTCpK8xpuPQU+IOZRlcLn17xFPJPFpqor/emCZf97d\nSHgOZotlOR+q7ore67sDf4nzygoS8/MBuFXtegSKlYU17Um83hOMsfMXf3+BHpdVemGNewVG+5so\n2g4efHo9rvrxU1i5cRfUip7AsUrA8vYTqoazvPNOAf/+3K349crf7/aa9qTbrVZ24U3mPQPefsxb\nnvdk3F+YD7eYKP246Nr4+6YncNeqe3Y7vr2F63n42h1LcO9j4W11AZQ6YgJW9HCaCNt76LuxtoL+\n/sLc6283Uc0jBKs29r6jxi97i4i8DzHwhDW3JObNLW9D5a5XD5apo7bSJ0VdV8vIuxQyAQXIhoQ8\nCnsgd/m7EjFK3rL1yWO0MVNDMhaU7LSJLb6DJxEpngHD8jCiIeWXAknhQSK3PWX/5vNAR2MaJ8xs\nDYxXYdnqPMnMMjV/PjSnrDOb43rCbc47hf34lTsAUPUzOugY7E1UCtT2CljeQ12w3mBN4FwyTGbN\nP7NsOx54YpMYe9HxROa9fJ9UaIDioj9TxKadg+jNDQKOibVbB0RCG1RPlCxx8t64Nc+6z0neBcVD\nkRRgEDZ+fq3En1SejY5CEqZdA5PF8hXNgc403Tlx0fmk280YEd4WbnnHtLjwLshKev1539LnBNfd\nnwsmj9VtwD82/QuATzAXnzyOnstxxaLW3jyOnch5W+QtkvGYkp6iEtF5T7a89Za1UONZPLL1kcDx\nolQupMd63inikefpPX3hra1Q48GMZyhyzFsi72Es78c2P4nOXDde3PkqVvS8OSxp7l6m1IM14SX8\nz9q7sXGAVkS83WzzjTv9+/R2Er5K8wGyUmfEMG/NviKTc9DVl8fGnYPIOTlsGiwvS1R3Y3kPN/4u\nFs/mW12XAEYe8SMfwWObnsTTr2/H7//xVuixpXjxjU6ahf9WePjmQCAi70MMgZh3wG1OicVQfOvN\nKnWba6qwfADfsm6o9gl+WPIOURIjw7jNOUbUVfqHqwomtlcFysc42cRMXciJcnDxEz6GuKVjYmsD\nHGLjcxeNRnUFHWdBTiJ2Zbc3V3BTkU4YaKyOB65HJOUxy7syYYpriM96TKjQnbuQkoHjEphsMdLX\n9Dh6cr2iZGt2zZHivM6OUXD7a2ATGy/upPrvXBa11G0OACZbbK3fPuhnzGsOirYrVMrkudVVHVA9\nPL9qJ2761QtQDBq394h0P1TXLxdi5L1lexH5okv34d4JVmGQGeT3UQFxNbjwJ7UvT4nZKRiIW5oY\nLzQbpk7H1S/FefkCSTdcn7wNej5L8cm74PrWZU/Od3trjLxL1dOMjlV4YM1DcDwXhAAT26tw1GRa\nG19winCJC7evDs72MaKigTeMCUPfUAGPv7JVkJDcjc4cuRJqRXfA8pZDQNLFis5hgYQzBqphH1zA\naH3t0gLDldzmEnlb4eQtJ/r9v9fuxLLulaH7DVceV3RtaHV+WODxzc8AKPdqEELws9d/jf9dG1yo\n9PTnWRc8EiDm4cgvb7t4+LmNWL2ZlmxmbT+GvCvfF3rMvoCX5xUdF//96p34/gs/KRM7KvXWBGr3\nh1ns8NACfw4c1xNVDH9a81f86onnsKnTv++7s8K3sERBfr/fDUTkfYhBVcOzzbmVoTGZUUV1ETM0\n1ErkPba1UsiE0pPQ40c2paXPJHeYbJmXan5Lx7O9yzaPb6kVC4yBrI3KlIXjjmgX23lTkpipiZcc\nh6uxlxnLJm+pTeDoFkqSd6+6V4xHdpvLMWthgbsa0gkDDdWJwPWUlsNVJM3A9ei1NN5cnaIuccfz\nhCeBqDbuXf0AvcaqMWhPjQheuEv3W9e3EaYSA8nR+f33y+eXzRG3vAH4Cxtmeedt+sKvTib8/TUD\niurhjU19gOJC0VwQx4TrefA8iJg4J29uUaowaRmT7DYXjVmkBZur0wx5Bp4QZ+cNxC1dkLeiOeLe\n9hcly7tAnzdV84TbnBOXqcZD3ea9WXo8IUy6Vy+KEkYK/9nryXK3ugKTkXPe4wsxQ1wDNCcQ81ZT\nvbjnzT8LBb8f3PMq7n70TSxdxWK3hpSAVbMT1sQXBQm6HvGrGmRIuQWDdjl5F11byMNyD4ilxv3K\nDtUT3gWZvLmGQSl6MoMwSAIN8ToAwxPgcKpyj21+MqDBz/NKZPL9xV9XYe2urVjWvRKPbHxMfH7/\nk2tx9zPPIT7zceitawLqdjIxquke4bnY3p3BH59Yi+/9lraslWV4d2a7Qse4L8ixDP9C0cP6Aerp\nKG3y44sJ2VjbtwHLi0/AGE1DII7r4anXt+FP/1obOIaHRGQJYPkdEZu6BErCf/YzuxGl6WFW/HCS\nvgcCEXkfYjihbSEA4JSO42GZ5daAcFUzy7u1jr4oJnVUY1RzhbAeAQh3LHe5A74rm26XasI7R8Dp\nbIPT2eZvl9zQJJeCN1SJRNHvuGVqJs4/kVoZU0ZS17HIqAZQLPjkHbf0gIAMjAIjW7pPU20Ccxpn\nYFrdJKzr34AhbTs7h3TxcsyaK615GtIJk1qBsopcSUb9rPH1qEun/HMxxboYUzVzXSL01wFfxtXQ\nDJhG8GfCSSTjZGEp/uKptT6FUpiaLITj66sXbQ95FhNorvGTDGO6IVnOvshMNu/QlzBbwMiWNyGA\n6rG+6l65d0K27ImniVp2wE+kKuboPbKYWA50h1U7EAzZQ1DsONA5GvYW1pVMc0SiD0+aMxCDxa5X\nJm/umvcGqJiOVrsNg3JrTql0atsQJVtVVYVlXfAYKTAvCl3EObAMSU9/1HI8ufVZPLLhn3A9V5RM\n9bA2nUqImI3sNlfCyrcUV7yMB/dgeXMPSEz1PUCK6olqD368O1gFNZ4RLm0ZWTuHQk7DB0efAcDv\nA5ArOPjt31eL/Rw3PKntjV1BFy8PXcge7KGcjd89/1TZsX99diNyBiVEo3VtoFc5J38lNgRr0guw\nJtM6/lK1uqyUUf+/ax8WvyEZL+54Bc9L/elLUXSL5fr/kuXN8asVv8OPX75dhMe4Vfzwhn/ihy//\nP2zxVkKv2waoDhzPw6/+7w08tGRjwAshpH+55e2RMg+kKpP3btrfdrPnbLgGPwcCEXkfYphcOwE/\nOf67mNVwBCyj/PYYnGBUDzFTQ1XKwq1XHI0vf5hm+fK4LQBBvgH3ouwelC1vosHeMBVeVs5WD24v\nrJyPKUnfhWxqBj588nh89zPzMGMctRZiElnlmfEbs6jbvLBsIYobJ4rtinR+njRycvvx9OsUjyXE\nlMfdrSnPCsubeNTytgzNT3aDH1fk1xC3NMwd7y88eDcxUzOggCa1aFKHXMI8Dbyvugw5NGCqscC2\ns0afiqq+2eLvQHvOgHyqi6JXBCEKWup8z0jcNH0vCCccx0Qmb9MsbE+lMe+Cr3QH1wAhCLjNdU3y\nTngl91+aJy7/6hQMxE0NMS3OzmvT5iu6DZe4UPKViO+a5hOo6kiWN53LJ1/y+8bLMW+e8ObsGAli\nGzDa30Rfwbcqq6sly3Dlb2BNfRquPgRNVaEqCoqk3PImqitCSfLcPrrxMVz1xNegN9FSL9cjwoPh\nZYOLq0DCWgi5Q/WEBSqTN/GYfKtr+6ED9jwljARkHf3BnA3Xc7FhYBNMLyUWMLe8eBs2D/ou7oJt\nU8liR0c2xyxCRn6PLN0kyc8SbM9tx5cfvwlPrH8hMNwEy81wulqhQRM6/4E4t+KhR6WJX2rp619a\n5PXYdBHVP1TwNQXYgpiXBZaSmWx5bx7ahqU7yrPOf7Xy9/jNyj+Iv4u2G9B8v/Wl/8Y1T92EXfle\n/HrFH7Az0yme9VIZ1NV9a6HX00UQJ+A1fesD+6iJwYDbnH+XJ2nYc0+G63riPnLIev6lynUyIvKO\nAADQVPYjUspdedzyVlRPuNJrKmJCwjBgeTOrNpDMEaKqxlGZNANxW0teCDA0VfrkbmoGFEVBY7Xv\n9pXJm7+EYqbGXKBKILNazlavTtPjWpK+znhpkhx3hauJId8t7lLL2zTUACnpsWLgHKauBRc2jNhM\nzYSmqXC8oOXNyVtX9fLYqpQ3YKlWYNOpI09ERW6cv13zr5d7PbTa7XijawOKjg14KtobZcvbpN4F\nkDLL2/OISNrjpMFnvQAAIABJREFULwlFt0EcAx4jb3gaFIVlC3P3eWkSIRfaAc1GB2huQNzS/fun\nOYiZmp87UIhTS5yoILaBIjJSzLsI4mpYsqwbdz9MXZM524/r8nixl6mE09kORSEYcCh5X/Ghqaiu\nCU6vmhhCwaRuV8NQy8ibXoODdFJ+PoOWqNEuNVlhiwsu7AJQFz63vF3J8k5qKcQd2kRGUT3YbJ4H\n7SHEtBgaN58HZ/toAIBDbL8Gmn1Hykj4vyvFQ6Ho4q3e9cg5eaSdVnj9dWIMXUO+KtiWHuZKdw0M\nZei4RJMdRhp6y1rE5vwdD+/6LYrI4c8rngxc85Cdpde1fip01RALKNntrbe+BcegnhAufyyseOn3\ns9T5E17dvAFf+q9nxCLHigWJqbQ3Oq9l93rpb3ht34bAdtlb4HgObNfG13/+HP7th/8Sn29l5ZM3\nPPtdvLDzZfxr6xLkmOVdCHNJMw8cH2NNrDqwWUkMBkrF1vZuxuceuwbLu94Qn8ltb6EHr4mrJAI0\ncY4QgnvefACvdvnhCdvx0McSE4frQX8gEJH3IYzm2gROmNWKL15whPgskE0eAqOEcDVVCcgbkuEs\nbwC1lbEAoafiQWICgNYaP0lNjudyWJLb3M821/06TEk0hYu4AEBVih4XsFRLM+Cl97OaYGVWsuWt\ny+QddJsbuio0vGWYqgFNU6h2t7SY4Mk3pmqUkbec9CeTsxibNN+xEMtbjWXxt/7f04Q1TxPudtNQ\nYeq+Z0V0RXNMZPIOtSCY5e1fqA04BmzH893mAGJxlLnNP3bqBEHufFvOy9JnytMRs3RhvampPjqn\njLy9giU8EKQYR8YblFzGRX9O2Hf15XwrLONkaEzZMcR+3EqLWRo8zd83zix/T6X3z9BUOGD3UnwH\n/d5kXAqTlHTVcwerpG1sIWeb8DIVYpz8he95HqAX4RXi+FjHlTAddqzqsg531PJOm0nYDoSHpugV\nxQKAex8qrKT4XXHPx7Iu6vKOF5vhDVXD3joGAPDcm742+NZeupghjo6BQWZpMsubX6Wa3hUoAyxk\ng7+/jJ1hc6RAgyFCF778bT/05vVAMQEvkxZiQaVqfRxLNvtlc+NHVOH8U/zcDzW1C/IP8ub/eQld\ng9TFbO8YgYQex/qSJjy25xPj7ct+gy8/eQN2ubQpztbuTKjVammmkKQNI0au9Oc4dCxyxjtA3d6y\nbsCDq/8BAPjjW38Wn/FjHdcLvEMAQElI5J230Vvow5Nbl+Dny+4Sn+8azIuZiCzvCACoxfzRRRNw\nxBh/tT4hdQTcwSoU3pwdekwpucdMLaiQJJM3CZKZkDdlkBOpONpqq6T9yxcSlaZvRfKXWMyULT//\n/LpE3tzy1lTNJ9mSxUVx3RHwWMIUb0nKY96moQlXOOCX9nDi0jU1IBwiX4OuKtjUOYTfPbpOfM7L\no/RQ8vYXKLy0SoYWIG95MVOSw6C6UIkuLNiKhCnlNDjCCiCOQd3mhJM3LwVzoai0tj5XcFDgbnMA\nlim7zTUoACxDCyTNAUCB5JDQaC5CwtIRN+j86rU70KmsgWKypKd8TMwDKcThEgdEp5nJil70xXDY\ngi3LuscRQtDv7PLbz7Lvz7t0e6ezCTuTVNr0kxMvw1ktF9Ahs/71pqHCUYKVA2JRKB5PAhhFjEx3\n4OjmuXRqpC588iKosHIezGKNyDsAaLKiYhQB2/QXSACgUMvbIx6G7AzSZgq27Sc2Op4jkTf9jsp4\nWlLCo9v6cixbv0jnmeTpwAekBc6OPhZGcA08vIS6yGWyo99B5X7zrx1L50hx8M+XttA+5ZkiuocG\nxBzpii5i5tzw1iq7oShAYeN4EMeEogDZYkHEkkvj/tttP8FLU5WA0Iw1+XmYE18AJ/A1W/rx6rrt\nbJ4NjKrsQHd+FwaKfqWCHMte2fMmrftnv+MbfrEUP/uLb81yFN2i0DRwPSJCFv7AWNWJ68sJK0RF\n7oVF9JqsbMBtvnEbnfNdhV6YE16A3voWc6F71I3O3iFpvQLENpnbnC0M8g5eWeu3C/WIhy2dQ3h+\nVac/3ihhLcJwiGtxFFfNg9dfH7q9lLwtUyuxvOWENXr7501uxPc/O5+2/pMs7/GtJf5MMMuCn1sr\nt7wbEv5Cg1tIPGv5e5+Zh7PmjQ0dK7e8AYiM5dIMYJJP4SOTzg1+oWR5u92tZePxX8R+9q0MQzWk\nemH/+3gs2ND0snri0fX+NRoh5C3PtxJI6C8JA6gudMUQZXQVSRNtKRqX16q6AuSbZZa3r89OAuSe\nLTjCbQ4Ahkl8kvc0aBpLAJOS5gACm+TAeSNmakjr/uIrhz5R1uQWLDEPXoH1ZlcGac9yzRPhEL5Y\n4q1fu3O7YKMgLF5ueXsKJYo3B/3yqE1bbdz10Dq2ncdXM7Br3gKgCNLjC4BYjL2U9SIUBUjoSVw0\n4TwoTgyKbqOphu4vXP/FGEBUmAodP7f+O6uepIsgT4Preb4YDot5d2W74REP9fE62K4nFp02sSWl\nO2Z5mwkpt4Fu4yEEl1Vf8DnK2QUUbRe/fGgVVm1hjXpcXWyX8wb4dRLHFGI7ikoT2VZv7sMdf10B\nWymI+VWhS25ztsBgdegkmxZz2DkwhKLtQW9eC60mqKrW7/o1y6qqBKRhAdAOaZK1nnPZ78s10Jyg\nZX49OT80kA35/cmu+tfWlau6FdyicJtD8VhIyQfPc+GLqEwxA89mioKuCkVzgqV10m9Qq+wRTXgc\nhzDLm97Hj7Z/Bl6mIuClsl0Pv3/CL9+7c/n/4DtLbsMDT/niMZHlHWFYqHu4Y2aJNWwZWlD3N1Aq\nRh/kptoE6qvi0FQ1QO6TO2rF/08fU4svXzjdj8ejNL5OURvzCZ94Kl08MJd5Q3UCR030CVa4iAGk\nErIrmrfwLL/YMXV+TJx386pImNA1Bc7WsdQqIeUxfuIRVFoVpaeDqRmi5EgJqXU3VCMgvfjZD07B\n5R+YIf7Ww8hb2t+TpEcntNUFd9RtqIqOuGR5z2+eCxAFesNmP6Pe1ZHJ2zR2yaw6a+ozfptXx0Au\nHyTvbGIjI2h6nzVNoeTLXtqJlEvdpooHz6afxS0dST2Jwlv0+lzFFpa3V4gL0RauVpbxBkUSk8hl\nYN+/dscu/Owvy/HEm5ScSYaFW3jZGrdwTN+789yrg4J8XOY2R7IXUF2MVuaCFBOB73hsgJYUcosx\nriawoycLt6hDt1xRiREgb0BkxOftIjziIR9jFmM+IfIKAFoOt2pjL/64lNbzt6aaqQyqwlu32uVu\n81hSkD/XyOdVBbativtJPy/gsZe34ull27F5Fy2Rq02m/aQ/j7vNFYgcCFsqeWT3dzBrY8122mKX\ne0Dyeep2J4SAe43VWAbEU0AKcXGN37l7KdZvH4AxgvUzcHTkXliEpNMIBzZ4GZ+mKgErmkP+zdhM\nuY84hig5zEreroxdImID3+1N57A8abDgFkTCGkqSyVJGUhCrIyzvrK+q6OqA7gT7juvhSWe268F1\nCfNuqMgXJE8Zu8ai7QbG8GrXcmjpXvEbEfu8S4jI+z2GcLe5VPIVEvMOWJYSudek/Bfr/KlNmDra\nJ3MAAUEYDpnc4WkBlzk9p2+5VyV88RiZ8MQChBGVSGarS6IuLnkDPA26ptDEKkUBoIAUEtAhuarZ\nS8ojBIs6TkBV/wwa72OQyZk37pARqJsHVZKrkhYBCb085i27zT2pfj5VojKnKABcFQ3VCUwbXYu5\nExtQHauC5VZBiQ8FMuppqZhfh6omhkQmLHENDOVtFGzfbd6XXI7p09l99VToqsK6zrH5GPMMVNZb\nmj8TBduFoijwhmhoxEYOipmHQhTAlmLezPLut/tgxIPkzc/Vn83h+VWd+PsKSt6lljePLTpMMCb/\n+kLs7CmIuHavthHfff5HUHU6B5br51qIlynJQzFzIt5tKQlk8g6Ia8BVCjBYtUYpefNcjbybD1iD\nzrYxrByPC9FQ8n19K81gbk01U8ubu80JJe+EpQuXdtKK+d4PI0jeXPdAdPDzCsiKen3672lzx4rf\nnS2XWqksROKYoGI7qng+sgUnQJwAkMl6ICBwPMePeceyIIUEAFXyDri49wnfclR0ByAqFM8MzLWm\nKqHlcmD3UYkPQq3sogtqT4NC6Dhkb1fGLre8lVjWJz+jnFjzbkGUivE5cnc14gOpy2CqlvjMcT04\nnoO8mxeqisTVoaiOmGN6fSXfwd3ujkcXAJoNuAZts+zySgJ/n7LjpTkChkmqO0CIyPsww5566IZa\n3oGYd3mdNycbz/MCCWvyQqBUfrB0eygUglhJrbpsrVt6+PG8QQh/iR49tQkfP20irr5oBkzNpCtu\nNv50wixrSiCTN38RVqUs6KqO6sKkQHtUUzVEOdCYmjZ888jrMK5q9LDXaGhqoJZ9XHMdPnDkCNx0\n2Vz/slUFhVVzoearsKDZF27hgh4yKhMJ6JqKL104HfOnUq+CiQR9YRh+0h0tFSOB8h7enAWOgf6h\nYHY9APSxzm10kaMGLG8A0OtYwhT7bOG0ZurZYQRAyTEPnSQAKNA1BQumNWFEFQ3Z7Mr3wUqweHKJ\n5a0YBVimImWrJ6Brip/YptlQFF+qlQghGVVoxW8Z2ibmwHN8qV23zw8ZKUZRWN4WErSch32HbrJY\nrsVkMJnHIGXRf9/Y0oVfPkL7NDudbSDFOAtNcPJmI0pQi7Ml2URj3sxtTsnbRdzSka70UBmjrV1L\nyZ+7r4u8RxD7DRb1XRhwugHFhcZEgyqsBGrScRAiuc0VOW4vJe0x0ti4Y1C4r0lJ7kHRsyl560W6\nwGChB+Fh01ykkiE04AYXWSqzvHUvjuKGSbC3jaLbmSWqN9MFDsnR3vSKS8chk3e2pH4bAPSGLYjN\noNnmhuWTYFKpggIFBafot2FlY/EKcShOnC7CJLc5j6kHLG/NoUTMUGrdK6oHKJS4HZewcj0ahvIX\nOPQ7MnlbkLfT3QJnRzubA7o9bmmR5R1heBT3QN56iaVoGcGENeLJ2xXpv1wmskSqkyGsLWCY5Q0A\nY6voD5vYFsa0BF3VMtEamoFvf+JI3HrF0SXnpeTI5V1jpoZjp7eIuHiVVcmuRRMNW2QYEnlfcfZ0\nfP7caRjTSo/RNZW9YPh1aSJO1VSTQGOqBhWmbJkH57M0/p00E/jwiePQ3ugfo6kKvMFaJDedgOqY\n/7nc7IGjpabc2rcU+oJVOem4GnIFF7miCy3mZ1VziyWQxyBZ+l051vCFuc0NPRgWEYlVnobPnD0F\nNRUxen+IBuJqKHg5KLotXsS6ruITZ0zGNefT+9WT3wU9zsnbEucCAK1yF5qmv+W77l0dLbVJsVDQ\n67Yj1rLJF3MJkZYFAM9gCnBF+twkYjq83iZUDbIKDL0o9NKTajWyeceP+zJLTjGZNeZpuOXf5iPN\nyHv11h68vtFPsgJo8ppX4vZWrBwsNYZ7/rYJBP4C1CU0YU2zCsg4Q+iobGGeJhWEKFA1Rt6eDVM1\nYLOsZrHAqejBC7gPWsNmaJXsGow4aiuo0EvOLhey4fFuIkkFr9vZi9gUKpzCFy5y3NzziEgMEwtX\nISTjoChJ2Y6qYhnlJeENVSXoLw4grqThdnb4izUtaBUX3jgyMA5ZMjUjZYIHcmMAQPEEeRc3TMIM\n71xYmomiWxAxb1GD7eooFF0YqinKHh2X+Cp2IrFRh6J5yBSkeWTkrQ42wB1gZWWqC9vxqKdDs0Fc\ng3lwuOVNv38wa4v5cLvaUB1jybvsGY9behTzjjA89mR5lwovWKaGie30ITthVmvoS5KngHgEQQlR\nibiUEPIu7fTEccX0T6Bi2wkgmUqcefTIYcdqqDra6lOoqYiVfS6j1Hr33dYkKNTBj5dUz2pSScwc\n71tqhqYG6n0BX7iBZ30nDD9cUGZ5l2Sex8JKxbgbnhDELR1nzO/AvMmNWDituWxfSyuPmXPyVrhl\nzd2sRRfElPpCM3KXQx2Vdf6LWGQrexp0VaVubzlhx2KuVtfXntfZfSaOgSFniMqzshc5d5vH9TgS\nehy7cr2iJI+/zD+4YIw4f6eyRri947qJz35oqug0BwBoXYmCW4ACJZDMJ5frOBq1evM5Rt5snKpD\nCXj2tCTMup3wCnGkvUYqYcleuq/iL4DqUPJm46urjPueE83P6OcvfM8jovWqxvu6aw5yWQVLWJtM\ngy1aHTh0MZ2gNdod6XamSgfAU6FoLJud2DA1U2RNBxfQ/iKNz21N2gI8Ddtz23HHsrsgMvqlccLT\nxMKoM+tnO8ulcIBP3rwlqli4Sm5z3oa3Wm3CN46/KuCh4ffC1bLwiIdxDS04YVYrxrXUse22P5eA\nOG57Fx1vgLyZZXzVjE/jqhmfDswBvRd8gRLDUMaFpZk0YU3EvNn8cfJWTJFQ5rgeduWpp8kX86H/\nDhX8MSi6DS+TRmbVLH8Bwo7f5qyHogDeUGXAbS5yC3K2mA/iGJg7voWek2kimHpkeUfYDfbU67fU\nhZyMGWitT+G2Lx6DxaeMLy9XAkRrPyrmIFnGEonK33v6yJNRH68NxH5lWJqJq886ATd8bI7I+JXB\nm6+UeglKt/NyH8sILjgqmeWt6DbSyZBac0XOXA9+h6YpActbBpf7TOp+LH5PlndY9yQeo+eqcecd\nNwafPnsKmmuTqDQqA/uGldvFWekWfzEeN82vr1WkF7/O483SgqxdnVZ2PuL6lrdcIy7I3/W158e3\nV2H+lEY0pqtEaRBPaNOlhUtNrBo9+V7U1zOyZy/CU+YEdeCJ4oB4KmaOa0RTTQLfuuyowPa8W4Cl\nmaiuKF8EAUBRoyV7OUbeosENK9dzk53wFAfurkbs7M0hm7eF29RGHlrtdroAkcSBYixPQdHcMnf0\n8nW70DfAXMUa70jmBBZIwuOkuFTVLk5Jo6OizV9oen5M2iU2DCk8M5yXAQDqE3WoqYgJ1/1rXcuR\nJf2wJlBJUd/y1oU7l3sv7O0j4Q0wi5aR8zPbn4dLiBAb4QtX/syYY19FhtDx1+ktSFspGLoKj7e5\nZZamrdHjm5J1+OiiCWirpgaBYmVhjnsZWsUudk56n555lWaqb+rpFdfG3ea5IR1JI/heUHQbnsEs\nZ9tAf6YIS7NQCIl5wzGQLzq+qJLqoug6uH/NX+k1MhU7fo0i1q7QOm6xuJcaBdmOh60OFW5xu1tD\nLe+hrB2o8EjH6Hvig8eOwHc/PQ+WoUUx7wjDY97kJswaX4+vLZ4Vup1nVNdZ9Zg2uhZnL6Qu7GSM\nJWaFkTezvUvbBcqiJvKmM0Yvwk3zrw0mp5WgtjKGUc3h5M7JebiYOd8u9MdLkt7SJn0BKbqDdLyc\nvGXLu1RIxtBUv+SoBJwYZMtbLyFXTmAfnXQhxlaNwsiKkqYlAM6Y34HT5rXjU2dNLtv2hWlXBWr0\nwzL2E5rk1lc0jGzyCb+m+xhfI54n+kj3tEFvxw+O/ffgCT1VinlL99RgbnfPz3jXVBWfOmsK6lL+\nvXOKLAFLWrjUxqphezZ67R7qvuS92y3//GkjhYq0CsXTce6xNI9A04KLy135PliaiY+dOhFnzO8o\nmwvCwgCdPVRHnN8jTmI7MszqtC1s685Qy1tWA+WhBdvCSbOobn/CYG1Nx7wu+otzwn9pdZcIJ2ga\nK8nTnMACSSgPcnI26QJjRLoVlsmS+jwNHmhfe9uz0dMnJToNoxxYvXURLM2kSoeyVCk2+vMhW95C\n598RcyD2Y8f/c9OT6NPXQIlTD0ap5a1oHvQxNJuee5Fk8halWCo9vi5OibE6Sc9jtKyHVs3ugfQc\ncm8N11bo7suhM0sJ/Sf3vFn221fTPSC1G0CKFrxsBdZvH0BPn4OcUxAiLVzbgdgx5G0XGgwxxgIZ\nQme2G9PrpooWvdzyzhTZ74Qt1OIaj/v7mgeO6yGDXpCiBZJL0wx1fs/ZImkoZ0uuewOVTGggFgcq\nUxZMQ0XRdvdoYO0vhJs+EQ5ZWKaGK88tt644UkYS35p/HSrMVHhMOqS1J3/Zlbb+k6340pZ77wS+\n5R1O/pogb/riLiXvuJThHeY214gpvqd0gUHJV8XE+Cx0NFQFtnELf3duc+5Wntc8B/Oa54SOP27p\nuOD4saHbUrFYwPIPu0dJPQk4/vbqtH+9llcJe/0UWJOfB1GZFSBZhZahIaZbSOoJP8bo6dBUBTFL\ng9vTgqJuw+zw5SHh6mVd32TLyCkyYpeItyZO44V9hX5U6JXg7RsURUFh9UxY41+B7dlIWAZqrKQI\njWglnouck0M6UYdpo2sxbXQtHlqyEYWVR6FuyhoMEhazJ4Bjq+hoTQjPByfvXqaRbqoxbO/JImbq\nouc44MtbLpzSjounUNnadExanNWzpL1Aq1z6Ha45CKj1rCpAmmON11mzeD57oSeNhL/wJCo8uEhY\nOlzVLbG2gwsYTt5xRp7phBH4neaJn+XtDXBi8suYeLlVIJ9B+v+COgjVGoKXjwsPguyB4z9z/rsy\nNBW9/QRWo+/9Kap0DPUsVl2TKM/VCHw/I/8iyeP1tT348f8+g9gR6+EO1ACuIQiZQ6vugqIAxS3j\nAU9HvujCLChQzaLoC6ym+kEIdWsXii7i4HF5FzZT4RvoKxeEGsxnAZjQ0rS6okKvQhcQ0DywHQ8O\n8ZUCs3lb/K54eCJrboNVxWrfPRWVcbqIzjssVBUbgDbiDeSKp5XNzYFAZHm/B1EXrxk2mexblx9V\n9hmnZbIbgi61yt8JRFx+mFOqSnncXkZCcmvL7U55rJlnm4dZtfzlf3TNSTh7zKmh35PYjdv8ncLU\ntYALN8z7kNb9a7JUU7jhAZoMN29C0NqX3eomW4BUxZi1ThSAKNBUBcmYgfqqONydI6ES2UrSAhYz\nAD+jHxAvYpng5Xr+ilgSs8bXi0XloglzUUmakXcLyDm5gJiPripCHpQjVhL394aqMdM8xf/A1QEo\naK5Jipp8txict7pUBTp7cxjMFuF2t+KktuMB+PKWDekKUXWRNMt/G7LkbYJtH4qvg9FOFzky2QkJ\nYJ6Mp9iIaVbwuXV05L0cYpVDVMSmpLJDBifvGHvuUnED8o8jD2r1FlbPFORbEaf7ajXbpQ575RoO\nAPWsKUYRhCWr3fjxuThnQfnikqvrmYYq7jl3mxdUujyrZ5Z3Q0U5eZeqNxJPRc7JY9naHiEA43bS\nZ3XZup7gHHDPgOwV83QoCi+1I1CT/SC5FFRCyZ9b3takpbAVapWv3iDVkrt8AcF6rTdSQZZRFvOI\nSZZ10XHhoCg8BnLuBPds8GeBjRhxg3fQo+SdSa6F0bwBm3qD7UoPFCLyfp9hREMKN867BjcvuEF8\nxt08Lvt3XP+5uOGoqwPH7U/y5pa1S8KTO9QSy7s05t2UpOpN9bHaQO05fzlzyzuMGPnLP6xjG/+e\nZIjlffHJ4zB+RFVACW5fQL9fETHN/kJ5b2ceFgAASw+St6oquPC4oDu+QrIkeQ9s/pKloQefMK5f\nPBvzpjQibUrk7Oplcyxn3E8f2YxPnDEJx83wBXZ4xj9A5+vKc6dhFksMvPCEsRjZQL8/5+QDSXma\npsLZOg7FdVP9awxJ2otp/vg4cTbX+Za36ypI6v51N1VVwiME67cPQFVUnNi+AIBfTiff0zE1I/zs\neAauugbQOefQG1g3L4mYDFWjiyJmeXuqLeLoHPa2MSDwUGxaRj/gmvNmubdJMQsgRKEd5cDIW/N/\nG0VlqGwMHfXU82GOXAU11ReYJ/n7AKDIiJfY1LXb0ZTG+NagZgPgaxYQ4ru9eYJWERkYqiEWdfXp\n8pBYaSIeHAMFL49ETIfCeoDzkM+ytUHyVpmSn+w14Za8Yuap7oHmwstUwjI1arm7vuVcTG1mx0jN\nhQIxawIt3Y8RqTbUWLWB8Sqai6ydp78VtmjpzxQDx1tSCaCzg4Z3+D1/bPNTeHzz037mPQn3KO5v\nROT9PkRDog6VVvnKmbvGLSWFpmRDYFtIXtY+QxXkHX5SlcfaWcy71FoZXdmBzx7xcXxp9hXB47ik\nNCPv0pp3gFs1VIq0FMJtHmJ5nzJnBK67ZFawZn4foCgKrr5oBmbWURWz0sQdAEiYMVHrbGkmkjFd\nkLKmBUkLAJKmFONn19DMFjgcfOlVmbLw6bOmoDImddjytLJEx3qplOeoiW1YMK1ZzB2AwPOTCLsG\naQ5ly5vfS/klHUbeimv6nhNO3jVJcbzreUhLY2itoZ6ATN5BIqajwkoHqiHkMVZYaehvLkL+9YXi\nM/k+xIzdh5scj8BQLKiJQSjxAXhKOXl7fY1I6Wm4Zl/g+NLKCQFXF2JKybgRVC5TWaxXImdu9QF+\nXH+4RLiiTscwsq4ON1xKQz1hiZIJk96zwWxRsjqZ2xw5VJgp8ZzIz5x8DYCfsEk8DXllANvct0SN\nNo9Db9hRrtYG0JJD/qyLOTviaRH+IPkkYqaGfNHBUM6fI0/3NQ9Kx2N0rGJzRFATr0KMe5mE5e0K\nsR5O/rmCFPPWHDom1QOxTdibJtFxSc/tfW89KMJYils+twcCEXm/j/GlC6ejrT6JY46gJQ+cvGWC\nGsvqoxtq4uUn2EfwOHRYpjbgW+Ya++2kQmq5p9VNLluAcLe5RuiPKszyPml2G7568UyMaPDJ6+On\nTUR7QwpjWqk1EbS8939ayOSRNbjsiPNxycTzsajjxLLtluRaNzUqQiMatygKNFULvDiC5E3nroy8\nSxwnSSNoeZeiPu6Tt0zEHBVSA5rQBUiAvP2xcs+HHDoI08h3XCI8LPwl3taQFDK6iZiBasn676j3\n3fiJmA5VUZHQ/WssXfAYugqST6Hw5mwU101FXaU/3ngIecvEWSi6mKgfDUVzoTdtgIci4iELkLRR\nCaIEO7uVhif880slmpoaIG9P9ZOkOJRA1QDvXS/FsaUua45OiW/e+A7RwS6szDNlMNlbqVaeyt8S\n2MghLXljShd79BqY5rzJa8jpta8k//TVANl5t3aXS6USxwCIhoZqdi8kmeOWDno9Zx45HjGTajP0\nZf2ySRKKQnq/AAAbb0lEQVQbCJxfHo+i29CbN7BrTIjx8Xtijl6OdblV9CBXFzkAckKbodN7Egif\nlNxzy6I/suaaYEXJgUJE3u9jTBtdi29/4ihhhXLXuKym9qULp+Nri2dhTMv+eyC55T0cefPt6YSO\nb19+JCrfpqv6iDHUHdZWzVyKIdZFzNQxqaM68PI5dnoLbrr8SBh6iOUdco79AV3VcXTLkaFjNAzV\n713NSJeTN19Y8aoCUzUQtyTVOmF5S33R6ZkCf6UMyfIOJW/frZowysm7UnqRlxJj6THyNXLrkkus\nAggo1vE6esfxfCEPRkS1FTGcMa8DC6Y14XPnTEW15Sccjm32a/m5cE9ausbSaxC1+P31cLvbxLMD\nhJO3vMAp2C7aY+MB0HI7opAyyxsAKqQmL9yKa2sIL1NEqbWmlv825PvUW/Sbhvglf/52N6RxkRyO\naU+34YNjTgsQZMqS480avEwaWsUuaPVbQBQvcDwAjKroQEqpFp2+xjbV4rR57SK0YW8eL/bV0n1C\nOnU48FBGQzUTKUr6IaW8QRu3jKqvQ8LSkc07yG0ZAQyy6xS96/05mDLC9x7yTPWkkfS9H9K+y7LP\n0jE4BkawBQ4kt7mha9QL4egY1ZzGly+cXrbodPUcTM1EOvHOQmtvFxF5RxDglrfspo5bOsa1VQ13\nyD5B3UPMW1jm8IZ/2YXg46dNxFXnHYGFU6hs4R7lW4eBoRnCZbuv53gnMHVNvMy5vCTPOOfZ2jUx\npg6lKIhJ8WruNi9VsCq1vNvSkmBMyAtVJtQwy1te1ISRe3wYy1tkrEtWolySyMvRbMcT18jjoYqi\nIBEz8IkzJqO5NonqmL+gbK3zn9EjxtJrr5LIPVGywCgNxUwf689XmEuYuJqwVfNFF0nTAnE1EVMP\nI++URN7cyjv32NE4//gxZfsSV99ziZFENos6ThL/z/UQZOudZCqRe/HkQGy/QiJfRVGwqOMEJGzf\nQ5OWyRsKiutoAqLesLnseAC4es7nME+7UCwARjdX4YLjx4pyPrenFflXj5PGT3uNDwfujeGWt73N\nn6ch0Bh52kxhVEsFXI+gp9dD88CxwXOwRe8lp4zHF844DpW5CQAANUkt85SRRJznHYQtJFzd98rx\nksGqbrg1a2jioWvgklMmYOro2jLvQ09uV6gH5kAhIu8IAsfPpAlJs8aHtxvdXxhTORJAmHVIMb1u\nCh1P24K9Om/M1DFjXB3SZhKWZgaSrvYWPEZ6INzme4IpWd5claqmgvc7py8MTmxFtwhLiqNazHug\nqzqumvFptPfRspVSWhhd6ddUf2NxeQWCjHgIecsItbyHiXnLXp20Qq1dXv8L+Ja37bpoSNDnkBBg\nzsRgDgYAVPMFDILiOVzJri3V4o+x1PKWyPu4GS1oqfWvIWUl8K3510IfkhY4ro6PnEItyUVzR9De\n6LYpuqrFJaW9tnrqrm9IBpvoANSDcvq8Dnz9yC/jxBHHBM4vo3YoqONAXE2QCQBMrh+Lr5TkfJSF\nPzxdlNQBCP09aNLzLSc+AsBXPngs4GqC+NIhxxMoYlw8h+Wy0yYKWWRSjAnLXHZpA0DlllNwztgz\n/HOxPIiKBPME9jYF8hIA6k2Z2O7f95baCmiuf2/5d+SLDlRFRYfH+ruzkreUZHmHClY5BkYIqWMF\nbj99RovJbfQjVy+rfvHnItwDc6AQ1XlHEDh9XgcWTmt+227qfcWF4z+ICdVjMatxeuj2SbXj8b2F\n3wyWK+0FNFXDNXM+H3AN7y0Sehx9hf6DYnkbuubXm7JabRHzZqRTKxGX/DIxpSz6CTVjYbkZAD1l\n7D0i5WeOj24O96x8ceZnsHFwS5m7tBRhZYmyNR6WkAYA7eZErCg8g+aUb/0J8nY8zG2cgXV9GxDL\njMTZx00qO77GCo77psvmYiBbFHM1qmoEsCV8DHweZ42vx8dOnRgcu6WjLl6LOSNH47lupn3u6Zg/\npQknzaZCL6+s7mJSpdQzInsqvrZ4Nrr6cuhVN4nvT5oWvvrJo0TYoCXVhIUtR+GxzU/R87s6IHHC\nCGUatrxYg/icf9APSohGVYKJi6RE2lh87vj3Jox8PdZ61elpQoKFX266bC5Wb+7D5JE1UFdUwovv\nYseXPwfHTm/GP5dyOWBK3o01CXz90jlYuWEX/vPe10DsGHXtl1xDtVGHk9tnYkzlKNz94t+xsZMu\ndqaMqsH0TbUY0ZjCX5esByGK8C6kzBTGj/AXXifPbsPmt6rQyYVYuDgMlzw2LZCiJRZZKTOJmDK8\n5U1cHUdPbcIf/klbpBbfnIPYnL/BNQbE9pgxvOt/uGf9QCCyvCMIKIpywIkboC7Go5pn79aqTUuZ\nrfuCpmQjUua+kT9A3c5pI1VWc/5uwNJV1roRqGSJYaUxb9EUAcGyt9KSLz6DpIS9Dc3AhOqxGF9V\n7sLlGFc9Bie3Hzfsdo6w8IdcBx6WkAYAU5Nz8NkjPo6zR/v19kdNpkQ+sb0auqrjkknn47w5c0Q+\nggzZbQ4A7Y1pTB3lx65HVUqysiXPkio66ZW7qrkVP76+zf/Q1f0sZQCmqQUy5mXhoLilo70xHcgb\nOG56G1rqgs9jXPYGuDpOnOV/XzpBVdZ4YlmYlRhIOvR8adLW+iTOOYY1B5LqpsPCHzWDM+H2NqCt\nOE/McXtjGiczmVut6Lv+wyz3uso4KhL02r2S52DyyBpceMJYv1lKyTV091PCHVXZjqnG8aLne1NN\nAl+4YDqV2iWqyI8wVB2WZiIVN/CJMybhyxdOR3tjGpMa/C6AHz91Mlrrk2KR1TdUCCRHUstbCx0P\nAMAxkIobuP6js3Hy7DYACohtwVN5A52g5X3JxPOFpxAID58cKESWd4QIIfjY5ItQcIvvaAGxr9B1\nFW5nG2yjgCvPOx8AUJP21a+AYDKWXH5klpK3SJ0t/56rZn66/MO9wHnjzsIDax7CxJpxZdsaE37o\nZTjytkwd0+qCNevnHDMacyc2BKoBhgOPaZdm1nPwpD7e31lGW30SmzuHUFc1/MtWvi7iagGXv6Vr\nAZd02Eu7OdmI5mQjtmd2oqmy3LshW84nzhyB8SP8fToa0wAUmEocBZIVVutpR7ULyeOEEYcChS7M\nJCI6YWYrTpzVhhNnt+Hz/1WEmuqDSozQZ/mCo+bi0aWNWPyhCaFzoBerwIVd08N4ssZVjcZLna/5\n1QESqtIWyFZekkUt81HNVP50guT+lhdn3PuSZImH3kAN1FgWtudn4C+QmvzMbZyJf215BgBNPj12\nuh8uaapJYHl3HGqKJr8ljSRivN+BY+L85o/h3rcegJryLWuAVtmMba3EkZMace+W5diapS4U4hgB\nsaKjW47ElNqJeK2b9q1/N2PeEXlHiBACUzOHVak70DCYhKuzdRzqE9R6a29M4ZxjRokOaTweXB+v\nDVjbZoj4DDCsmN07wokjjgnGbSXIRCFaNZYgbKyqqgTaq+4Opmbg5gXfGHZxAAC5l06iL9sPBD//\n6AcmoK0hxayr0rHTfyulxjtfuXB2YB/TUAPkHQ/pLqcoCq6Z83m81rUC0+unlm3XVA03zbsWf1n3\nMOY1B88/aSQlNiPXgEJsA5QEJRdNU8X9VhUVcT2GrJMrkTv1O7DpJIbCiqNRlQ4nlTEtlbjinOHl\nluOZDgwZW2FU9pZpP3AsnnQBptVNxuyQMFgypkvtR6llfvyMFpxz7KhABYvrlmfX88WS21/ni+WE\nYGTFCDQmGtCSKs+h+dAxo5B5eRJeGKKqZykjAVPKjxhd3Q4vlxbkXZr1P7atEvW91YK8DcUs03pI\nmynoqg7HcyLLO0KE9zOSMQOXfmBCwPpUFAVnLRgl/q6NV+PauVehxqoWtdMAy1QPwbvUKyGARR0n\n4G8bH0d7upwggXIvwb6gcpjOdhy3f+kUhDlPYqaO044qb4RSio9Nvggv7HwFExpaA5/HTC0QTx7u\npW1qJuY2zRz2/PWJWnxy6uKyz6tSFlrrkujcUgN97Aa/R3XJjeTiIh111Th/8Ww8+vwmHD2Fkpii\nKHBcD4CCUU27n6fhoCkGiqtnY+KY6mFzH3Z3jRPaq1D7Vgp96BYKZJapBcIbANA7SGPSFSG9Cnij\nkXFVo8u2AfQ6bzjqK6GeBUPXcMmcU/DCE4/T79aswH6WqQXaKB83pTyMJHdPTJrhXRJrrCp05roD\nuQ8HGhF5R4hwCIJn/u8OYaSol3Tt8t9T7z57nz36VBzVNCvUnQr4mfEHEqX913eHay6eiQeeWheY\n+yObZuHIpvIOfg3VCZw4bQyeHqB61/EDYHGdd/wY/PTPWRQ3TII3SC3x0kXYpJrxWLVrNU4ffRLG\n1ldibFu4FT1hxL6Ve/L5U/YxPUpTVZw//Rj8YvkGuF10XsMWmJUpujiZPKqmbBscE1+c/CWMqKsu\n38awu/CWoer4ztHXI+fky/bTVQWktxVesg+jnWNw6YfKEyPlBWKlVU7eAK3+6Mx1v6sJaxF5R4jw\nHkLpy4n/fRAMbyiKMixxA8O7+A8WJnZU42sds/e8I8MHpx8JrO2GSzyMZuWP+xMzxtZhbGsVVm30\nPQSliYeXT/kICm4xkMAYhpHDtOfdEy47bRLufvRNXHRSeV7D28XMhmn45lFfxdeefx1AeF+Bs44e\nicqkhWOOaA58fvnpk/Dy6i6MaWh6R9LE1bEqhFG/rqtQMrUoLF+A6qnhYYFKSU2wKhHufeClm2Hh\nkwOFiLwjRHg/4GCw9x5Qmhl/uCFhxHHxxPMO6Hc01iSwamOv+LvU8k4YiVBteY5vXDoHb27uxbi2\nfVNIbKlL4tpLyj0Pe4vGZD14NnxYuMTQNZEhLmPhEc1YWELo+xO6poqSjPgwuvNT6yahVZuADVuK\n+MAJ4eWtnLwjt3mECBH2Cj/43AK4XnnSz26SzQ869kfM+72OxupgedecCeHW4XAY3VKB0S37ZnUf\nKBxKizZFAbhBP5znPWkkcN2xl9Me4lY4ZY6pot6RltSBW2iUIiLvCBHeA6geJpt4yqgavPRmF2aN\nrwvdfjBxqLnND0U0VvtW9e1XH79XMfxDFeYwCmXvJj555iS8sbGPlX3tObSkKsqwxA0A46vH4gfH\n/ntkeUeIEGH/4NjpLRjVVPG26qbfbZjvASI60JgyqhqTOqpx9NSm9wRxA76lezBx9NRmHD2VWsn7\nK6fz3SRuICLvCBHe01AVBR1N+67xfiDwqbMmY/32gVDVtAhBGLqGr148fKnZ4YTT53Xg2eXbUfUu\nqDjuDfzQ0qEYXBoeB3Qpd/PNN+PDH/4wLrroIrz++uuBbc8++yzOP/98fPjDH8Z///d/H8hhRIgQ\n4RDC/ClN+MjJ4/e8Y4T3FM4/fgx+eOXCQBOZQwEXn0wz6WVltsMBB8zyfv7557Fx40bcc889WLt2\nLa6//nrcc889Yvt3vvMd3HnnnWhsbMTixYvxgQ98AGPHjj1Qw4kQIUKECBHKILvQDyccsCXQkiVL\ncPLJJwMAxowZg/7+fgwNDQEANm/ejMrKSjQ3N0NVVRx33HFYsmTJgRpKhAgRIkSI8J7CAbO8u7u7\nMWWK322lpqYGXV1dSKVS6OrqQk1NTWDb5s2bd3u+6uoE9P0cI6uvP7RigYcronl854jm8J0jmsP9\ng2ge3znejTl81xLWSjV59xa9vdn9NBKK+vo0uroG9+s534+I5vGdI5rDd45oDvcPonl859jfczjc\nQuCAuc0bGhrQ3d0t/u7s7ER9fX3otp07d6KhYe/EByJEiBAhQoT3Kw4YeS9YsACPPvooAGDFihVo\naGhAKkVrTdva2jA0NIQtW7bAcRw8/vjjWLBgwYEaSoQIESJEiPCewgFzm8+aNQtTpkzBRRddBEVR\ncOONN+L+++9HOp3GKaecgptuuglf+cpXAACnn346Ro0atYczRogQIUKECBEAQCHvNBj9LmF/x2Gi\n2M7+QTSP7xzRHL5zRHO4fxDN4zvHYR/zjhAhQoQIESIcGETkHSFChAgRIhxmiMg7QoQIESJEOMwQ\nkXeECBEiRIhwmCEi7wgRIkSIEOEww2GTbR4hQoQIESJEoIgs7wgRIkSIEOEwQ0TeESJEiBAhwmGG\niLwjRIgQIUKEwwwReUeIECFChAiHGSLyjhAhQoQIEQ4zROQdIUKECBEiHGY4YF3FDmXcfPPNeO21\n16AoCq6//nocccQRB3tIhzRWr16NK664Ah//+MexePFibN++Hddccw1c10V9fT3+4z/+A6Zp4sEH\nH8RvfvMbqKqKCy+8EBdccMHBHvohg1tuuQUvvfQSHMfBZz7zGUybNi2aw71ALpfDddddh56eHhQK\nBVxxxRWYOHFiNIf7iHw+jzPPPBNXXHEF5s+fH83jXmDp0qX4whe+gHHjxgEAxo8fj09+8pPv/hyS\n9xmWLl1KPv3pTxNCCFmzZg258MILD/KIDm1kMhmyePFi8o1vfIPcfffdhBBCrrvuOvJ///d/hBBC\nfvCDH5Df/va3JJPJkEWLFpGBgQGSy+XIGWecQXp7ew/m0A8ZLFmyhHzyk58khBCya9cuctxxx0Vz\nuJd46KGHyB133EEIIWTLli1k0aJF0Ry+A/zwhz8k5557LvnTn/4UzeNe4rnnniOf//znA58djDl8\n37nNlyxZgpNPPhkAMGbMGPT392NoaOggj+rQhWma+PnPf46Ghgbx2dKlS3HSSScBAE444QQsWbIE\nr732GqZNm4Z0Oo1YLIZZs2bh5ZdfPljDPqQwd+5c/PjHPwYAVFRUIJfLRXO4lzj99NPxqU99CgCw\nfft2NDY2RnO4j1i7di3WrFmD448/HkD0e94fOBhz+L4j7+7ublRXV4u/a2pq0NXVdRBHdGhD13XE\nYrHAZ7lcDqZpAgBqa2vR1dWF7u5u1NTUiH2iefWhaRoSiQQA4L777sOxxx4bzeE+4qKLLsLVV1+N\n66+/PprDfcT3v/99XHfddeLvaB73HmvWrMFnP/tZXHzxxXjmmWcOyhy+L2PeMkikDvuOMNz8RfNa\njn/84x+477778Mtf/hKLFi0Sn0dz+Pbxhz/8AatWrcJXv/rVwPxEc/j28Oc//xkzZszAiBEjQrdH\n87hnjBw5EldeeSVOO+00bN68GZdeeilc1xXb3605fN+Rd0NDA7q7u8XfnZ2dqK+vP4gjOvyQSCSQ\nz+cRi8Wwc+dONDQ0hM7rjBkzDuIoDy089dRT+NnPfoZf/OIXSKfT0RzuJZYvX47a2lo0Nzdj0qRJ\ncF0XyWQymsO9xBNPPIHNmzfjiSeewI4dO2CaZvQs7iUaGxtx+umnAwDa29tRV1eHZcuWvetz+L5z\nmy9YsACPPvooAGDFihVoaGhAKpU6yKM6vHD00UeLOfzb3/6GY445BtOnT8eyZcswMDCATCaDl19+\nGXPmzDnIIz00MDg4iFtuuQW33347qqqqAERzuLd48cUX8ctf/hIADX1ls9loDvcBP/rRj/CnP/0J\n9957Ly644AJcccUV0TzuJR588EHceeedAICuri709PTg3HPPfdfn8H3ZVezWW2/Fiy++CEVRcOON\nN2LixIkHe0iHLJYvX47vf//72Lp1K3RdR2NjI2699VZcd911KBQKaGlpwXe/+10YhoFHHnkEd955\nJxRFweLFi3H22Wcf7OEfErjnnntw2223YdSoUeKz733ve/jGN74RzeHbRD6fx9e//nVs374d+Xwe\nV155JaZOnYprr702msN9xG233YbW1lYsXLgwmse9wNDQEK6++moMDAzAtm1ceeWVmDRp0rs+h+9L\n8o4QIUKECBEOZ7zv3OYRIkSIECHC4Y6IvCNEiBAhQoTDDBF5R4gQIUKECIcZIvKOECFChAgRDjNE\n5B0hQoQIESIcZnjfibREiHC44ZZbbsGyZctQKBSwcuVKzJw5EwBw3nnn4UMf+tDbOscdd9yB8ePH\nCz3rMHz0ox/Fr3/9a2iatj+GHcDOnTuxbt06zJ8/f7+fO0KE9yOiUrEIEQ4TbNmyBR/5yEfw5JNP\nHuyh7DUefPBBrF27Fl/60pcO9lAiRHhPILK8I0Q4jHHbbbdhy5Yt2LZtG6699lrk83nceuutME0T\n+XweN954I6ZMmYLrrrsOs2fPxvz58/Fv//ZvWLhwIV5//XVkMhncfvvtaGxsxIQJE7BixQr89Kc/\nRV9fH3bs2IGNGzfiqKOOwg033IBCoYBrr70WW7duRVNTEzRNw4IFCwI9ijOZDL7yla9gYGAAjuPg\nhBNOwJlnnokf/ehHIISgqqoKl1xyCb797W9j48aNyGQyOPPMM3H55Zfj/vvvx9///ncoioKdO3di\n9OjRuPnmm2EYxkGc4QgRDk1EMe8IEQ5zbNmyBXfddRemTp2Kvr4+3HTTTbjrrrtw6aWX4vbbby/b\nf+3atTj33HPx29/+FpMmTcLDDz9cts/KlSvxk5/8BPfddx/uv/9+9Pf348EHH4TjOPjjH/+Ib37z\nm3jmmWfKjnv22WfhOA5+97vf4Q9/+AMSiQRaW1txzjnn4Oyzz8Zll12Gu+66Cw0NDbj77rvxxz/+\nEQ899BDeeOMNAMCyZctw66234r777sO2bdsOSy9DhAjvBiLLO0KEwxzTp0+HoigAgLq6Otxyyy0o\nFAoYHBxEZWVl2f7V1dUYN24cAKClpQV9fX1l+8yePRuapkHTNFRXV6O/vx+rVq3CkUceCQCor6/H\n7Nmzy46bNWsWfvKTn+ALX/gCjjvuOFxwwQVQ1aCNsHTpUuzYsQMvvPACAKBYLGLTpk3ieN4+debM\nmVi7dq3okxwhQgQfEXlHiHCYQ3YrX3PNNfjWt76F+fPn4/HHHxfNPGSUJqSFpb2E7eN5XoCIS0kZ\noL2M//KXv+CVV17BP//5T5x33nl44IEHAvuYponPfe5zOPXUUwOf33///fA8b7fjihAhAkXkNo8Q\n4T2E7u5ujBs3Dq7r4pFHHkGxWNxv5x49ejReeeUVAEBPTw9eeun/t3eHOAoDYRTHHyGYJlwAMAjg\nAFROSC0STCWCIJCYBhwOwxEqegIkuqLBbRN0LQaBxkBZsdkaDJutmeb/05PJ517eZCbz9bYmSRLF\ncazhcKggCOQ4jm63m2q1mh6Ph6SfVv97VJ/nuXa7XdH+z+ez7ve7Xq+X0jTVYDAobX6gSmjeQIUs\nFgvNZjO1Wi3N53MFQaAoikrZezqdKo5j+b6vTqcj13XfGnq329V6vVYYhqrX6zLGqN1uy3VdrVYr\nNRoNLZdLZVkm3/f1fD7leV7xVWq/39dms9HlclGv15MxppTZgarhqRiAj1yvV6VpqvF4rDzPNZlM\ntN1ui3fn/3U4HHQ6nbTf70vZD6gymjeAjzSbTR2Px+J/4tFoVFpwA/gbmjcAAJbhwhoAAJYhvAEA\nsAzhDQCAZQhvAAAsQ3gDAGAZwhsAAMt8AxJ5C+54P8QOAAAAAElFTkSuQmCC\n",
+ "image/png": "iVBORw0KGgoAAAANSUhEUgAAAYwAAAEcCAYAAADUX4MJAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAIABJREFUeJzsvXeAVNXd//++ZdrONsqyNBUECxZQRBHUoKLoE+lP0F+i\nxMT4tRDFWBKVxG7UJPaK8mBBE40lQBAVFAQE6bAU6WWBZXvf6bec3x+3zu7M7iw7w+4Onxd/MDO3\nnXtn9rzPp5zP4RhjDARBEATRAnx7N4AgCILoHJBgEARBEAlBgkEQBEEkBAkGQRAEkRAkGARBEERC\nkGAQBEEQCUGCQRDtzLp16zBq1KiE9n399dfxxz/+sc3nIYhjgQSDaHeuvPJKnHvuuaitrY36fMKE\nCTjzzDNRXFwMAHjooYdw5plnYtu2beY+hw8fxplnnmm+nzp1Kj7//HPz/cyZMzF69GgMHToUl19+\nOe677z4AwNixYzF06FAMHToUZ511FgYPHozzzz8fQ4cOxTvvvJPK240Jx3FJ2bc15yGI1iK2dwMI\nAgD69u2LhQsX4sYbbwQA7NmzB+FwOKoD5DgOubm5ePnllzF79uyoz2Mxd+5cLFiwAB988AH69u2L\nqqoqLF26FADw5ZdfmvtNnToVEydOxP/+7/+m4tYIIm0gC4PoEEyYMAFz584138+dOxeTJk1qst+k\nSZOwe/dubNiwocVzbt++HZdeein69u0LAOjWrRumTJkSc9+WCh68/vrruOeee/DHP/4RQ4cOxfjx\n41FYWIh33nkHI0eOxBVXXIEff/zR3L+8vBx33nknhg8fjmuuuQafffaZuS0cDuOhhx7CRRddhLFj\nx0ZZTMax06dPx4gRI3DVVVfhww8/bPFeY7F//35MnToVF154IcaNG2eKJQAsX74c1113HYYOHYpR\no0bhvffeAwDU1NTgjjvuwIUXXojhw4fjpptuOqZrE+kJCQbRIRgyZAj8fj8OHDgAVVXxzTffYPz4\n8U06crfbjTvuuAMvvvhiQuecN28eZs+eje3bt0NV1Ta1cdmyZZg0aRI2bNiAQYMG4Xe/+x0YY/jh\nhx8wbdo0PPLII+a+9913H3r37o2VK1filVdewYsvvog1a9YAAF577TUUFRVhyZIlmD17NubNm2ce\nxxjDHXfcgUGDBmHlypV4//33MWfOHKxatapVbZVlGXfeeScuu+wyrF69Gn/+85/xwAMPoLCwEADw\n5z//GU899RQ2bdqEL7/8EhdffDEA4L333kPPnj2xdu1a/Pjjj7j33nvb9MyI9IIEg+gwTJgwAfPm\nzcOqVatw6qmnokePHjH3u/7661FSUoIffvih2fONHz8ejzzyCFatWoWpU6di5MiRbYpPDBs2DCNH\njgTP87j22mtRU1OD2267DYIg4Oc//zmKi4vh8/lQUlKCzZs344EHHoDD4cCZZ56JKVOmYP78+QCA\nb775BnfeeSeysrKQn5+PqVOnmtfYunUramtrceedd0IQBPTt2xdTpkzBwoULW9XWgoICBAIB3Hbb\nbRBFERdffDGuuOIK0xXndDqxb98++Hw+ZGVlYdCgQQAAURRRUVGBoqIiCIKACy644JifF5F+kGAQ\nHYbx48fjyy+/xNy5czFhwoS4+zmdTkybNg2vvPJKi66ksWPH4t1338WGDRvwxBNP4NVXX231aN2g\nW7du5mu3240uXbqY8RO32w3GGPx+PyoqKpCTkwOPx2Pu37t3b5SXlwPQXE49e/aM2mZQXFyMsrIy\nXHTRRbjoootw4YUX4u2330Z1dXWr2lpeXo5evXpFfWZvw6uvvoply5bhyiuvxNSpU1FQUAAAuPXW\nW3HyySfjlltuwdVXX90uCQBEx4UEg+gw9O7dG3369MGKFSswZsyYZvedPHkyGhoa8O233yZ0bkEQ\ncM011+CMM87A3r17k9HcuPTo0QN1dXUIBALmZyUlJabFlJeXh5KSEnObkQUGAL169ULfvn2xbt06\nrFu3DuvXr8fGjRsxc+bMVrfBfg3jOkYbzjnnHLz55ptYvXo1Ro8ejT/84Q8AgIyMDDz44IP47rvv\nMHPmTLz//vumK40gSDCIDsUzzzyDDz74AG63u9n9BEHAXXfdhVmzZsXdZ+7cuVi+fDn8fj8YY1i+\nfDn279+PwYMHJ7vZUfTs2RPnn38+XnzxRUQiEezatQuff/45xo8fDwD4n//5H7z99tuor69HaWkp\nPvroI/PYwYMHIzMzE7NmzUI4HIaiKNi7d2+TwHhLDBkyBBkZGZg1axZkWcbatWuxbNkyjB07FpIk\nYcGCBfD5fBAEAV6vF4IgANDiNIcPHwagiYcgCOY2gqC0WqLdsafFnnTSSXG3NWbs2LF455130NDQ\nEHP/zMxMzJw5EwcOHICiKOjduzcef/xxDB06NOFrtAb7eV544QU89thjuOyyy5CTk4N77rkHI0aM\nAADcddddeOyxxzB69Gjk5+dj8uTJmDNnDgCA53nMnDkTzz33HEaPHg1JktC/f3/cc889rWqLw+HA\nW2+9hccffxxvv/02evbsib///e/o168fJEnC/Pnz8fTTT0NRFPTv3x/PP/88AKCwsBBPPvkkampq\nkJOTgxtvvBEXXnhhUp4P0fnhUrmAUmlpKf70pz+hsrISgiBgypQp+PWvfx21z7p16zBt2jSzo7j6\n6qsxbdq0VDWJIAiCOEZSamEIgoCHH34YgwYNgt/vx+TJk3HJJZdgwIABUfsNGzas1T5agiAI4viS\n0hhGXl6ema7n9XoxYMAAM0uDIAiC6Fwct6B3UVERdu3aFTPgWFBQgIkTJ+K2227Dvn37jleTCIIg\niFaQ0hiGgd/vx9SpUzFt2jRcddVVTbbxPA+Px4Ply5fjmWeewaJFi1LdJIIgCKKVpNzCkGUZ06dP\nx4QJE5qIBaC5qowJTqNGjYIkSU2qljbmOGgcQRAE0YiUp9XOmDEDAwcOxM033xxze2VlJbp37w5A\nK4sAALm5uc2ek+M4VFQ0NLvPiUJeXhY9Cx16Fhb0LCzoWVjk5WW16fiUCsbGjRuxYMECnH766Zg4\ncSI4jsO9996L4uJicByHG264AYsWLcLHH38MURThdrvx0ksvpbJJBEEQxDFyXGIYqYBGDBo0erKg\nZ2FBz8KCnoVFWy0MKg1CEARBJAQJBkEQBJEQJBgEQRBEQpBgEARBEAlBgkEQBEEkBAkGQRCEDZ/P\nh7lzPz+mY//0pz/A7/clvP+7776DTz75qOUdOwgkGARBEDYaGuoxd+5nMbepqtrssX//+8vwejNT\n0awOQadcQOmR715AqDIHd1w8CdleZ3s3hyCINGLmzNdRXHwUt9xyI4YNG44RIy7Be+/NQrdu3bFv\n3x58+OGnePjhB1BRUY5IJIwpU36JceMmAgCmTBmP2bM/RCAQwAMPTMe5556H7du3IC8vH8899wKc\nzvj91d69u/H8888hHA6jT58+ePjhx5CZmYnPPvsE8+f/B6Iool+//nj88b9i8+aNePXVF/RFuzi8\n8casqDXkU0WnFIzdVfsADvhg0SDcPTm1y20SBNF+fLp0H9bvatuSCILAQVGs+ckXntkD1185MO7+\nd955NwoLD+Ddd/8JANi8eSN27tyBDz/8FD179gQAzJjxGLKyshAOh/H//t+vMWrUlcjOzgZgrbpY\nVHQETzzxLB588M949NGHsWzZUowZc23c6z799OO4774HMWTIeZg9+2289947uPvu+/DPf36Azz9f\nAFEUTXfXJ598hPvvfwjnnDMYoVCoWSFKJp3aJVUfab5IIUEQRDI466yzTbEAgE8//Rd+85tf4fbb\nf4vy8nIUFR3Wt1jC1KtXbwwYoAnTGWecidLS4rjn9/t98Pt9GDLkPADAtddeh4KCzQCAgQNPw+OP\n/xmLF38NntfWVz/33CF49dUX8fnnn6ChoR48f3y68k5pYUQOnQnnKbvAvDXt3RSCIFLI9VcObNYa\nSIRklAZxu93m682bN2LTpg1455334XQ6cffdtyMSiTQ5xj7q53kh5j524lVp+sc/XkFBwSasXLkc\n77//f/joo89w002/wciRl2H16pW4/fbf4uWX38TJJ59yjHeXOJ3SwmD+HACA5CDBIAgiuWRkZCAQ\nCMTd7vf7kJWVBafTiUOHCvHTT9tj7teaMn1ebyays7OxdWsBAGDRoq9w3nlDAQBlZaU4//wLcOed\n0+H3+xAMBnD0aBFOPXUAbrzxZpxxxiAcPlyY+A22gU5pYagBrYCWH1Xt3BKCINKN7OwcnHvuENx8\n8/+H4cNHYsSIS6K2Dx8+EvPmfYHf/OZXOPnkU3DOOefatloxDC0gnTgzZjyO559/FuFwGL1798GM\nGY9BlmU8+eQj8Pv9ABhuuOFGeL2ZmDXrLWzatAGCIKBfv1Nx8cWXtHj+ZNApq9Xe/MQi+PsthuCQ\n8dpVT7X6i0knqBKnBT0LC3oWFvQsLE7IarXvPzoGQiQHTIigLlLf3s0hCII4IeiUgsFxHFyqppSV\nwep2bg1BEMSJQacUDADwIBsAUBGgOAZBEMTxoNMKRqagZUrtrYif20wQBEEkj04rGJeeoeVmbz96\npJ1bQhAEcWLQaQVjxOn9AAaEWOKVIQmCIIhjp9MKhsAL4FQnFK752ZMEQRCtoS3lzQHg008/Rjgc\njrnt7rtvx+7du4753O1NpxUMABCYE4yPQFaaLzlMEASRKM2VN0+Ezz77GOFwKIkt6jh0ypneBg7O\nBUkMoCEgoUuWq72bQxBEGtC4vPm0adPxr399iO+//xaSJONnP7sct9xyG0KhEB599CFUVJRDVVXc\nfPOtqK6uRGVlBe6++w7k5ubilVfeinudb7/9Bh999D4A4OKLL8Gdd94NVVXx3HNPYffunQA4XHfd\neFx//S9jljhvDzq1YLh4D4JcFap9fhIMgkhD/rPvS2wu39amcwg8B0W1Clqc3+NcTB44Nu7+jcub\nr1+/BkVFhzFr1hwwxvDgg/dhy5YC1NZWo3v3PPz97y8DAAIBPzIyvPj3vz/Ga6+9rZc7j01lZSVm\nznwd7733T2RmZuHee3+PlSuXIy8vHxUV5fjgg08AwCxnHqvEeXvQqV1SHkFbMKTCR7O9CYJIDevW\nrcX69etwyy034pZbbsThw4dQVHQYp546EBs2rMPMma9jy5YCZGR49SMY7GXOY7Fr108YOnQYsrNz\nwPM8rr76WhQUbEbv3n1QUlKMl19+HmvXrjbPGavEeXvQqS2MDIcHCAM17ai4BEGkjskDxzZrDSRC\nW2tJMcYwdepvMH78pCbbZs/+CKtXr8Lbb7+Oiy66GL/5za0JnzNWGb+srCy8//7HWLt2Nf7zn0+x\ndOm3ePjhR2OWOD9ea2DY6dQWRpZTU9/qABUWIwgiOTQubz58+MVYuPC/CAaDAIDKygrU1NSgsrIS\nLpcLY8Zci1/+8ibs2bNbP96rV5eNz1lnnYMtWzajvr4OiqLgu+8W4bzzhqKurhaqqmDUqCtw6613\nYu9e7ZyxSpy3B53awsh2e4EGoC7U/JdDEASRKI3Lm0+bNh2FhYW4447fAtAE5ZFHnkJR0RG88cYr\n4HkOoujAAw88DAAYP34iHnhgOrp3z2sS9DYqa3fr1h233/573H337QCAESMuxaWX/gz79u3FM888\nAcZUcByHO+64O26J8/agU5Y3B4CKigYs3rcK8w/PxymRS/Cnaye0d5PaBSrdbEHPwoKehQU9C4sT\nsry5QY+sLgCABpl+DARBEKmmUwtG36x8AECA0VKtBEEQqaZTC0ZXTxdAFRAWKK2WIAgi1XRqweA5\nHqKcBdXZAEVV2rs5BEEQaU2nFgwAcLMccLyKcl9tezeFIAgiren0gpHt1KL+e0vL2rklBEEQ6U2n\nF4zeOV0BAHtKy9u5JQRBEOlNSgWjtLQUv/71r/Hzn/8c48aNw5w5c2Lu9/TTT2PMmDGYMGECdu7c\n2apr9M/LAwAcqa5sc3sJgiCI+KR0prcgCHj44YcxaNAg+P1+TJ48GZdccgkGDBhg7rN8+XIcPnwY\nixcvxpYtW/DYY4/h008/TfgaeZm5AID6CNWTIgiCSCUptTDy8vIwaNAgAIDX68WAAQNQXh7tOlqy\nZAkmTpwIABgyZAgaGhpQWZm4tZDt1KbIB1U/1M45aZ0gCKJTcNxiGEVFRdi1axcGDx4c9Xl5eTl6\n9uxpvs/Pz0dZWeIBbCPozYQwGgJSchpLEARBNOG4FB/0+/2YPn06ZsyYAa/XG7UtVikro0BXcxg1\nUbqqGdoxjjCYwLe5Vkpn5ES853jQs7CgZ2FBzyI5pFwwZFnG9OnTMWHCBFx11VVNtufn56O0tNR8\nX1paih49erR4XnsxMSc8CDnC2H+oBrnuTl2At9VQYTULehYW9Cws6FlYdPjigzNmzMDAgQNx8803\nx9w+evRozJs3DwBQUFCA7OxsdO/evVXX8IpecI4I9hyhyXsEQRCpIqXD8Y0bN2LBggU4/fTTMXHi\nRHAch3vvvRfFxcXgOA433HADRo0aheXLl+Pqq6+Gx+PBs88+2+rr5GXmokauxNKNhzDh0n7IcDtS\ncDcEQRAnNikVjAsuuCCheRWPPvpom66T49LMLFUIo6o+TIJBEASRAjr9TG/AypSCI4x6f6R9G0MQ\nBJGmpIVgZOlzMThHBHX+cDu3hiAIIj1JC8EwLAzOEUIdWRgEQRApIS0EIy+jGwCA9/hR5yPBIAiC\nSAVpIRi9vb3AgQOfW47qAOVbEwRBpIK0EAy36AIDA+8O4oC4or2bQxAEkZakhWAAwPCeFwAAgq7i\ndm4JQRBEepI2gvGL08YBAHjJ28KeBEEQxLGQNoKR4cgAwplgnNzeTSEIgkhL0kYwAIBXBTCeBIMg\nCCIVpJVgcMwB8ApUprZ3UwiCINKOtBIMXi+NFVFoISWCIIhkk16CwbSig2GFJu8RBEEkm7QSDEG3\nMMIK1ZMiCIJINmkmGJqFEZRC7dwSgiCI9COtBEPkNMHwR0gwCIIgkk2aCYYTABCUyCVFEASRbNJM\nMDQLI0AuKYIgiKSTVoLh5HXBkMnCIAiCSDZpJRgO3SUVIguDIAgi6aSVYLh4FwAgKJNgEARBJJv0\nEgzBAwAISMF2bglBEET6kVaC4RbcAICgTIJBEASRbNJLMETNwggq5JIiCIJINmklGB7dwggpZGEQ\nBEEkm7QSDJdDBFMEqiVFEASRAtJKMESBA5MdCKvkkiIIgkg2aSUYDpEHFAcijASDIAgi2aSVYIgC\nDyaLkFiEVt0jCIJIMmklGE6HACh6iXOavEcQBJFU0kowsjwOMFkvcS4F2rk1BEEQ6UV6CUaGA0zS\n6kn5JH87t4YgCCK9SDPBcAKyLhgRXzu3hiAIIr1IK8FwOwVwClkYBEEQqSCtBIPjOHjEDACAL0KC\nQRAEkUxSKhgzZszAyJEjMW7cuJjb161bh2HDhmHSpEmYNGkS3nzzzTZf0yt4AQANErmkCIIgkomY\nypNPnjwZU6dOxZ/+9Ke4+wwbNgwzZ85M2jWznF7UAqgPk2AQBEEkk5RaGMOGDUN2dnYqL9GEHHcW\nAKAu1HBcr0sQBJHutHsMo6CgABMnTsRtt92Gffv2tfl8mW43mCKgLkKCQRAEkUxS6pJqibPPPhvf\nf/89PB4Pli9fjt///vdYtGhRm87pdTvA6jNQI1aDMQaO45LUWoIgiBObdhUMr9drvh41ahSeeOIJ\n1NbWIjc3t8Vj8/KyYn/eNROsPANSRgMcWQxdPMfXJdYexHsWJyL0LCzoWVjQs0gOKRcMxljcbZWV\nlejevTsAYOvWrQCQkFgAQEVFbJcTUxSooQwIAHYeKcRpXU5tXYM7GXl5WXGfxYkGPQsLehYW9Cws\n2iqcKRWM+++/H2vXrkVtbS0uv/xy3H333ZAkCRzH4YYbbsCiRYvw8ccfQxRFuN1uvPTSS22+ZoZL\nBAtplktFsDLtBYMgCOJ4kVLBeOGFF5rdfuONN+LGG29M6jW9btGsJ0UFCAmCIJJHu2dJJZsMtwNg\n2m3JqtLOrSEIgkgf0k4wPG7RFAyFyQkdwxhrNtZCEARBpKFgeN0imNo6C+Optc/jlc1vp7JZBEEQ\nnZ52TatNBR6nCDBt7oWcoIVRFqhAWaAilc0iCILo9KSdhcHzHDwOLeitUAyDIAgiaaSdYABAhtsF\nAJDVxCwMgiAIomXSUjCy3JqFISVgYahMTXVzCIIg0oI0FQw3ACAsSy3uS24rgiCIxEhLwcj2aC6p\nkNSyYMiMBIMgCCIR0lIwMj2ahREhC4MgCCJpJCQYX331FXw+bQW7V155Bb/73e+wffv2lDasLeRk\naBZGRG456J1o6i1BEMSJTkKC8dZbbyEzMxNbt27FypUrMXHiRDz99NOpbtsxk5OhWxhK6ywMCoAT\nBEHEJyHBEEVtft+qVaswZcoUjBs3DuFwOKUNawtZXieYykFKIK3WHsMg9xRBEER8EhIMjuPw3//+\nFwsXLsSIESMAAFICAeX2IlMvQJhIaRC7SCgUACcIgohLQoLxl7/8Bd988w2mTJmCk046CYWFhRg+\nfHiq23bMeN0ioPIJCYA9hqGQS4ogCCIuCdWSGjp0KN58803zfb9+/fDII4+krFFtxevRLIxEBIMs\nDIIgiMRIyMJ47rnn0NDQAFmW8atf/QrnnXce5s+fn+q2HTNupwAwHmoiFoZKMQyCIIhESEgwfvzx\nR2RlZWHlypXIz8/HokWL8O6776a6bccMx3HgwENFAhYGIwuDIAgiEVo1cW/9+vW4+uqrkZ+fD47j\nUtWmpMBDAEPLMQl7gUKyMAiCIOKTkGB069YNf/nLX/DVV1/hkksugSzLUJSO3bkKnADGqVBbWEkv\n2sKgoDdBEEQ8EhKMF154AQMHDsRLL72EnJwclJaW4re//W2q29YmBE4AOBWhcPPCJlPQmyAIIiES\nEoyuXbvipptugtfrxb59+9CzZ09Mnjw51W1rEyIvgOMZGgLNTzC0i0SiS7oSBEGciCSUVrtt2zZM\nnz4dTqcTjDHIsozXXnsNZ599dqrbd8w4BAcAoMYfQn5Xb9z9yMIgCIJIjIQE469//SueeeYZc5b3\nmjVr8NRTT+GTTz5JaePaglMQAQWo9Yea3U+xTdxLJA2XIAjiRCUhl1QwGDTFAgAuvvhiBIPBlDUq\nGTj1+ld1gebbGT0Pg4LeBEEQ8UhIMDweD9asWWO+X7duHTweT8oalQzcorZMa12gJQvDFsOgUucE\nQRBxScglNWPGDNxzzz1wOvW1siUJr776akob1lY8DkMwAs3uFx3DIAuDIAgiHgkJxuDBg7F48WIc\nPHgQjDH0798fY8aMwbJly1LcvGOniycbqAXqwr5m91No4h5BEERCJCQYAOBwOHD66aeb71kLE+La\nm64ZWQCA+hYEQ6bSIARBEAlxzGt6d/TSINkuTTBCavMuKYWKDxIEQSREsxbGvn374m6TE1gvuz3J\ncmYCACJoIYZBFgZBEERCNCsYt912W9xtLpcr6Y1JJlkOTTBkNJ8lJavWyoEU9CYIgohPs4KxdOnS\n49WOpJPp1GZ3K0LzpUHCSsR8TRYGQRBEfI45htHRyXRogsGEMBRVhSSrqGloKh4h2fqMYhgEQRDx\nSVvBEHkRHBPBCRJCEQUvf7YF97+xCrW+aNE4UFpjviYLgyAIIj5pKxgAIDAR4BWEIwp2HtKEoSEg\nRe3jC1ulQ+yLKREEQRDRpFQwZsyYgZEjR2LcuHFx93n66acxZswYTJgwATt37kzq9QXOAfAqghHL\nchD4RunAvLUtKDcfICcIgjiRSalgTJ48GbNnz467ffny5Th8+DAWL16MJ598Eo899lhSry9wIjhB\nwaqtJRC6lsB5xjqE5UjUPpxoWRUkGARBEPFJqWAMGzYM2dnZcbcvWbIEEydOBAAMGTIEDQ0NqKys\nTNr1Rc4B8Aq+WXcYzoFbIORUY1/DXgDAtxuO4I7nlwG8AjXsBgAE5I5dgZcgCKI9adcYRnl5OXr2\n7Gm+z8/PR1lZWdLO7+Qd4HgVgFXGpCpUDQD4+Lu9iMgqwMuA5AIYcNRXgtpwXdKuTxAEkU60q2DE\nqkeVzJIjDl5bdc8epygLldgupoLjGZgigmMOVIdq8OdVf03a9QmCINKJhIsPpoL8/HyUlpaa70tL\nS9GjR4+Ejs3Ly2pxnwyXG5AAzmW5mioj5fDk8HCduR5SST/tQ0UAVN6Uz0TO3ZHobO1NJfQsLOhZ\nWNCzSA4pF4zmqtqOHj0a//znP/Hzn/8cBQUFyM7ORvfu3RM6b0VFQ4v78EzQ/s+w9q0JV2PJzjXg\ns6vgyq7S2qiKYLyVbpvIuTsKeXlZnaq9qYSehQU9Cwt6FhZtFc6UCsb999+PtWvXora2Fpdffjnu\nvvtuSJIEjuNwww03YNSoUVi+fDmuvvpqeDwePPvss0m9vkvQFlHiXFYBQgaGf+76PHpHRQD42HWk\n/FIAHtENnkvrKSsEQRAtklLBeOGFF1rc59FHH03Z9Z2GYDi1dFk1kBVlbRgwVYh5fGWwGk+u+Qem\nnD4el/UZEXMfgiCIE4W0Hja7BC3obcQwlOr82DvGEYxSfxkUpqDEX56S9hEEQXQm0lswRK0EO+fU\nBaO2B/p5Tmu6oyqALxze5OMGyQ8ACMvNV7wlCII4EUhrwXCLRgxDn8EtO9DV0TQLi6k8uIYe6J99\nSlSswhfRlncNKyQYbUVVGRSV1hshiM5MmguGbmHoAW0mO83MqShUHqrKIPA8VKaamV0+3cIIkWC0\nmfvfWIXpr6xs72YQBNEG2nUeRqrxOJzWG1XQXE9cDMFgAhTGIOjbVKZC4ARTMMjCaDt1/kjLOxEE\n0aFJawvD47CWkeUUTTyatTB0wTDWxfBFdAsjRgxj/9E6vDF3G8IRWkODIIgTg7QWjAyn23zNq5p4\n8FxTo4rZXFKATTBMC6Pp6PivH27Ext0VWLmtpMk2giCIdCStBcMreszXAtMD4LEsDMZDsVkYsmpY\nGC0HvRWFArkEQZwYpLVguEXLwjAEg48VtlG1x8A3dklJ2gzx5oLe8QufEARBpBdpLRgem2A4OD2e\nwZresjHDQ/F7AAAgAElEQVTTW9HDEXuO1EBSZYQULR1XVmUoKsUqCII4sTlhBEPUBSNm0FsXkcMl\nWsziixV74dfjFwaUKUUQxIlOWguGyFvuJ5cuGDFjGLpLqs6nVazNyXKgIRItGDQXgyCIE520Fgw7\nTkGzNjjEF4yIpEUkvB6hiYURK7WWIAjiROKEEQw3r1sYMQoNMt3qYLpwhCXZzJDyihkArBTbpscm\nvakEQRAdkhNGMFy6haEqMZaA1YUCTNsWliWz8ODJ2X0BANWhmqhDhG7FcA9bBL+anmuAl9UEUOtL\nvlXV3IJaBEF0bE4YwRD1dNqv1xxtutEUDO3/kCybFsXJWbEFw3HqVnA8Q6G0LUUtbl8efnsN7nt9\nVdLPS3pBEJ2XE0YwOF63LNQYt8yiLYyILCMoaym1fTJ7AQCqQ7XRh4S1SYFBRks/tgaVFIMgOi1p\nLxhy2ckAgGxeXys8xjwMQBcTm2AoqgwA6JGhHdfYwjAEI8Dqk9zi9IZcUgTReUl7wegdvgjBddeg\nb9cu+icxYhgGuphEFBmyPtvbI7qR5chsIhjGefxqHVRG5UGawy4SKukFQXRa0rq8OQA89KsLUFUf\nQqhRVVnGAK6RdjDdwpAUGbKqvRZ5EV3dXXDUVwyVqeYCS5ygWSAyIjjqK8VJWb1TfCfHj2RbAXY3\nlEqKQRCdlrS3MFxOAb27e8Hb7jS48UqENl7VdGfdwuA4hoisTeITOAFO5oXMFDToqbbaBtl8uat6\nT0ra3l4kO85gPx25pAii85L2gmHA280JxQmoMYwr3cIApyKiaIIg8gJ27NUC4Ha3FCfIYLIIDjzW\nlW5Kq44w2Sup2q2KjmxgbNxdgcNllMRAEPE4cQSDbyZ2YWAExDkGSRcMgRPBIlqA2xAMSVYAQQYL\nZyCf749ifymK/aUpaXd7kGy3kV1LO2qWlKyoeGPuNjz+3vr2bgpBdFjSPoZhIMQQjOCmK6PTbG0W\nRlh3SfEcb2ZEVYdqMe+HA/jvqoPwXKRAVQR4oQXTG5cS6cwku1O3n491UBND6aDtIoiOxIljYTSO\ncAOAHO2aMkqDgGOIyDI4cJAkBiZpa2k0RHz476pCK36hiDAeoWwrf76zeg8KKran4jaOC8mPYXR8\nl1Q6uRQJIlWcOIIRw8JwORvVlTItDIaIIkHkRS27Sq8/JTNNKIwMKaaI4Fj0sq4A8HrB/2HWtjnJ\nvoXjht0llYyOVO0EQe9kx20IIh05cQQjhoXhaSIYepYUr0JSZIi8gFBENoVE1ifzRVkYrKmFYWDM\nFu9s2AUjGa6aqLTajioYHbRdBNGROHEEI4aF4XE1CuHYLAxZVSBwAgJh2YxzGIFwu4VxqFSLXZhi\nYqM23DkLE9o1QlHa3pGyTpAlRfNDCKJlTmjBcDtjC4bgrYfMFPgDCv46ZyMYixYMiFpAHLIT/oBm\nWRgzw+2zvjutYERZGG331US5pFrRMW/YVY7f/W0pSqpSn1BAFgZBtMyJIxgxYt4ZrmiXlOrPQaYj\nE3yXMiiiD4oSXbAwohoWhiYYTHaYLimj9lRYiZjn++7QcqhMBWMMSzcVobQ6kNR7ShX2zlNOhoVx\njC6pd7/aCcaAZZuL29yGlkimhaGoKpZtPor6QKTlnQmiE3HCCEastFp3Y5eUKmJkrwut92YV29gW\nBlMcZmaVYWGEbHGLXTV7sbN6Dw6U1OOjxXvwl1lrk3ErKSdeDONQaQO+WL6/1aNxtY1ZUrES3Foi\nLDWNKTVHMgXjhy0lmLNoN96c23kz5QgiFieMYHAxg95Np6FkOb3WG0MwDFFQNaHgTJeUPehtWBjR\niw4V+0oRCGnbVCh4o2A2NpVvPfYbOQ7YO3hFsVxSywuOYuHqQyipap2ldKwuqWPtwn/YWow7X1iO\nzXsrEj4mmS6pitogAOBAMVUyJtKLE0YwYloYjbOkAHgcGdYb1TiGB2OApGdCGYLBZKdtlT5NFEK6\nYAicdu4Sf5lZH5f31mFH9W7M3v5RW28npcSzMCKyJh6hSNMAf3Mcq0vqWBXj2/VFAICVW0sSPiap\nMW/zp0ZxESK9OGEEI1bQWxCafpYheszXzL52hsrb0mptMQzd+vAFNVdUSNYEY8wpV0DkRZT4y6zl\nNljT69U0hBFppfsk1dhFQra/1q2NcOTY3T0dNbaczJnenP6F2+91b1EtZi/cYT7D9mDVthJ8vmx/\nu12f6Pyc0ILBxVgbwy4YUYstMR6SEfQ2XFK2eRiBiPaZYWFkODzIEbvgaH05Xvz3Fu043upoGWOo\nrg/h/jdW4c15HcvXbe/o7C4pST42wYhXSyoQkrBxd0XcyXxMH6G3NobBmQLdijYmUzBiXP/ZjzZh\n1bZSbNlXlbTrtJbZC3fiqzWH2u36RGIEQhLmfLML5bprsyNx4ghGgr1OhsMuGBxGnN0TYy48CVB5\nax6GKIGpHKAKtpRbXTD0oLdbcKG8SoLMJJiuCcHqaINyEAdLtMqoW/e3XycSi6gYRpSFob1ubUA5\nnkvqrXnb8cbcbVi/qxwAsPtwDZ58fz3qfNFxoFjC3hzHECNPWgxj/9E6LFwdv1OWlPa3JimFuGPz\n5epDWFZQjDf/s629m9KElAvGihUrcO211+Kaa67BO++802T73LlzMWLECEyaNAmTJk3C559/nuom\nIcMl4vSTcmP2LN6oGAaPrAwHHCIPxmwuKVECZAcAzoxzGGJiWBhu0Q2oAjieAZw+UrZZGIX1R9Bg\nS7sMyiGsOroWSowZ45v2lmHBmn1tueVWERXDUJq6pEKtzUCKCnpbr38q1Kr/Fldq8yx2FNagsLTB\nFNLGIYBV20pQVpOa1ORkdaJ//XCj+Zp10BgGTVLs2BhJMnX+jpeWndJqtaqq4qmnnsL777+PHj16\n4Be/+AVGjx6NAQMGRO133XXX4S9/+UsqmxLFq3+4DDzH4bPvm3bCjV1STgevWSeqzSXFK2BG0ULT\nwtA60bAew3AJLr04IbRSIrIzatGlN7bMxmnCRQC6AgDm7luIVcVrURWqwfgB10a16Z0d70LIqsGo\nwFPIznA1afPhsgYcrfBjxDk9W/8wYqDEmbgnHWMMI56FwXGa28b4LCJr5/WHpOgTcEBRhQ+zF+4E\nALz70JXNX9B0CSXeMaakWm2sU3aAvlpRGMSm+R5EB8HwnneAn0oTUmphbN26Faeccgr69OkDh8OB\n6667DkuWLGmy3/EuSGe6p2JYGA7eYb5mjIfLIUAUeIDxCMu64vNWQUJTMHQx8ellzr0OD5i+j2lZ\n6P93dWsl0asjWtqnxyWgMqi5pbZX7WzSJiFLG4nvKyuLeT+Pv7ces77c0bSjPUZYnIl78jHGMKLK\nm9teG9+DoUkRSXvhD+pJBbZzGKOu+NdQ8fyqt7GudJMVdNa3lQcq8fKmmagKNl6X3SIVy7J3UL1I\nyuz9YyEQknCwhFKNW4QzkiY6wq8lmpQKRllZGXr16mW+z8/PR3l5eZP9Fi9ejAkTJuCee+5BaWnq\nFiK6dvjJ+OVVp8XdfuGZPaLna6gcnKIuGCoPcCq6ZLnA8aqZHWVO3NNdSUY5kFxXDqDooqILhSEc\nkwZeBwAI6gKU5XGaIlIeqIzbvgOVzT8bo8NtK/FKg5hZUq2OYcQ+t5GIYFoY+nl9IQm+oCV+HGIn\nLdgpD1RgXVEBPtjxSZNtH+38DHtrD+CLvf+Ne/xx8+u34jLBsIxguPUpzC11NO219seTH2zAUx9s\nMOepELExfukdUC9S65JKRCGvvPJKjB07Fg6HA5988gkefPBBfPDBBy0el5eX1er2/P7686Pee3X3\njihw+Ntdl6Ffr2w4HQLcQgZCiuYr79Y1AxFJBSvlwfEq8rt5cIhXwQwxMFJleRV5eVnwq35wHIeT\n8/NNC8N0RelB7755eQAAf1hzXzkcAjiH9qwkVcL+0F5c1Oc88Hy0nleEapq97+wcD/K6eWNuKyyp\nxyNv/4iHfn0hzj61W7PPqaja+oP2ZrrNaxrfJifwLT7/3YeqwXEcTj+5C6oCVuefneMxjxV4DhIA\nt9uhfSZo9/vlj4fw5Y9W4Dgjw4luXa37inXtBsGyHhwO7TxOp4i8vCwonL4YliP+76akzpqh35rf\nViAkweMSY04MjXWuzCztee46VI1Fqw9h2i+GwCHGHreNu38+AGDBCxMSaosvEMEvH/kaYy/tj9sn\nDY66tj2dN7eLF12z3QmdM5mU12i/KyYIx/T32xaO9/XagsejeTk4jutw7U6pYPTs2RPFxVYdoLKy\nMvTo0SNqn5ycHPP19ddfj+effz6hc1dUtH3t5WBQG+EzBnTxiKir1UQii89BSAmAc4YQDkmaC0bl\nwXGAIOqdfyOXVEiSUFHRgEpfNbIdWSgr85kxDI5XwABwvHZsxMfAgYfC6YHysIQan2Wqv/jjLNx1\n3q0Y1PV0RGy1qUrrK5rct/0HVVJWDyGOu+H9/25HbUMYr/17M566dXizz6XaFliuqQmY1wyFdSuq\nPhTVjj1HarFs81H89ueDzM7vgVd/AKDFG2qqY5/P6GN9/jAqKhrQ0Cg7yiAQjKC21jrHrP9swbkD\numFAb+u3c7TassxM11lYRkVFAyKS9pyliBL3d2O/55Z+W2t+KkVOpgsOkcczH27EL0efhqsvPCnm\nvo3PtftgFQ4eqcG8lQcBAKf3ycawM7W/CZWxmNl8if7W9xdr1u2XKw/i9kmDo46zWyrl5Q1Qwslx\nXx4LtbUBVFQ4j9v18vKyktJfHC9CumtZVdWkt7utApRSl9S5556Lw4cP4+jRo4hEIli4cCFGjx4d\ntU9FhVW+YcmSJRg4cGAqmxTFWf20gPMVQ/tEfZ7j1DoizhWEUxS0CX66MDhdWmdkWg+qVXyQMYa6\ncD1yXTnarGjTwlCi/j941A+m8KaLKhRRcKAsOrW2Pqz9UHy2pV+DsERlR2E1bnluKXYVVpuf2V1S\nQTmIz/bMR0PEpzWTGXMaWk46jSo+aA96c34ArIlL6rl/bsKaHWUo2NfUnSYratxaUlYMI9ol1aQ9\nKovK1vrvqkL8dc7GqH0CsmUVMS76PEYFYaWZQEVr5mG8s2AH/vHxZmzao/12P16yF8s2H03o2K/W\nHDLFArC+j6/XHsKtf/selbq7Jl7cpzliVTMwsD/b9ophdAa2H6jCii2pL3bZHJwZw2jXZsQkpRaG\nIAh45JFHcMstt4Axhl/84hcYMGAAXn31VZx77rm44oor8OGHH2Lp0qUQRRE5OTl49tlnU9mkKAad\n0gUv/P4S5GZGj3Z6ZuZhn38XOF6By8EjIvOmMBRlaCPnxhaGwhT4JD9kpsDDe3GkrMF0WxmWhRHL\neG/hPrjPFgBBQddsF6rrw3ApQXDMiQGu83CArTNX9/NL1sjX7zqChogPWc5MfLxkLwDgw+WrAUcI\nkNxRncJXB7/DsqJVOOorwR+G3mF2ynwCQwSmMkCMACpvdtQH6gohnfEtxOL+CEeiXVqc2w+hSxlU\n9awm56ppCMfNkrJiGNp7o/RIYxSFtdjJ2Z+TKuiuD92JVhfSRLesPn7QO9EYhr0dWR4rQWLOot24\n/Pw+sQ5pFpdT+0I++16bgb1lfxVGX9A3KsgvySqcjpbTmuwDhsaps2Hbs23v9cuPpZjk8eLFT7VJ\ntj8b0rvd2mDGMIz/GcPhMh/65Hm1eGo7klLBAICf/exn+NnPfhb12fTp083X9913H+67775UNyMu\nXbKapqlOPP1qLN+5B1JJfzjO4iEKvDlBz8fpFpEhGODAmCYYxmh++x4/Nh/aAqGbkVarB70NS0MR\nwFTNwuia5UZ1fRicIIHJTuzcE4HrNGteh3FOJjnAOSSsKdmAq0+5XAsK8zL2ur+B53wguO7aqJG/\nsWTsoYYi/b0uGLYMjHjWhqKq8AxdCjXshqKeAwDYXa11aI7eBxEqvihqf9fZP4ITFBRHDgKITu39\nfN9cfW6J1pnaR/JxLQxBgpBbAaWqFwAOispa7OQCNsFo6FIAFA0BGKCoCoKKH+CA2kj8DB1FjR7R\nx3s2stx0XkpbaDwp0RBXvy3oH4ooCQmGZBOFmobo1R6jLYz2FYyOOHJujKqyFhMtUkajWmTrd5Vj\n5vyfcOXQPrhpzBnt0yadE2amd2vwONyI7DsfzJ8LABB5DpwjehKNkR2lveGhMsWsVMv02AVrlCUF\n3hb/UAWAV3WfPwNEOao2VUTVrlcf0gRDLusHMA4bywoAaB1K4zbZR+heUZuAaMRAmGlhcFh86HvM\nWPV0lLvLjk/Wrsm7QmZpEJG3OqzGabWGEMpoOtFob8Mu7Pb9BHDaeaJcUk2ypLR9nP23wTlgK4S8\nIwA0AWtRMGwuqbCnBJxX8+f7pID5B6jy4ZgrIwLRa3o3Z21INpEoScL6Jo1Fx7i0L2QXjMQypYx5\nLABQ2qiisN36SMYqiolSWh1oMgGtvQUrEaQ41u7xoHEtst1HagEA63Y2zTA93pBgxOGZ2y7GpMv6\n47STciEIPDhno1RA1TbiU3koULG7qCp6m/4/7w4AnArOFdRFhNMFQ4HLof3PcQxQHOYxRqmRyqDW\n8alBL5i/C474iiGpsjY/QrT9IYqRqFFkWLW2qUw1O2qe4zB//9eojzRgdfH6mPdeI1nxFKP4oGHp\nAEBIit2BSUwTTCvXniHMQlChgvPolpIhDkoEkZNWg8+pMMXM6PB4fd4J79XOoygsZicXDMtm525Y\nGMPyz9MeR14RGLRYjp36SHQQsSZUi/21hdFus2Y6NPszLqlsWTBueW4pNu6O/4feeIEqNY6FkQj2\nTs5+vLbNOsfxSiFmjGHGO2tw72sroz5XErTMUj0PISwp2FFYbT4P+/Xs4nu8MWuR6e+NZnUEVx4J\nRhx6ds3AuEv6g+c4OAQOnKsZwWA8GBSs31MStc2wNMT8w3CctFsbsVf1wvCztJRbjlchijAtBfsK\nfuV1flTUBrFgk7Z2BgtmQY1o5zOsBrMIIgBnv59QHCwy39sXclpyeAVUPSbCc0A3txbs31a5I+a9\n19kEw+ioa8OWOyckWokKhrABgE9uwM5DNXjqgw3aB7b28Rna8cYf5+6afVAzy+E6Y6M54jRGwcw+\nQx7QXVJNO5nfv7QCr3+h1dvx68Iw/lRtljznCAGMNRGMunC0W+qx1X/Di5vehF+2xUD0Symq0qRU\ni6SoAKdC7HkQxbVWTCTbGz/rZ86i3XG3Nb4v08I4BsGwWxGSrKLWlnUWbgcLI968oHirODYEIman\nXV4bxO/+9j1WbYsuUb+vqM7MBmsrc77Zjec/KcCan0qbtPd4WBjltUE88OYq7C2qjfrcKl4ZLWSJ\nJKykGhKMBBAEHkp1tG+eRQkGh7As4XBFbfQ22z5Cdy2LRqnqhT7dvRh0UncAgCgycE6tc2cRt+nG\nWrPzKB6cuRp8Rj2YIoCFMsxyJIYY2F1SQtcyfF8zD/uOan9MQZtgzNv/FeoytE6L5zm4BK1zO1h/\nOGo/QAtu7w/9ZL43Rlp1YWtkLuVr1XUDIQlPrPmH+Xm1VInVRwrM95zNAuIztOMNwbB3xEfFjTjS\nUGyN6owZ8oarq5kYhpGZZVgYOa5s/YLasQH9/pik3XNdIwvDiPUEFMuCUvXJbw+seBSvFcyK2l+S\nVYh99sJx8m6gj/accjKdZipvLLo1M+ehqUvKsDAsKy5Rl5TdXbZ43SHc9/oqFJZqAtkeWVLxKg/E\niv3sOVKLe15dif+sOAAAWLNd68SNcjAGz3y0sUmGXCzqfGHUNMRO0zbYul/77Rws1n4TgXB0okGq\nWbDqIKrrw5g5/6eoz824lv6TtwSj9dc4WuHD58v2J+07J8FIgByvE9LBcxAptGUBKZYYaAFs1YpV\nGBZGyAsW0YLqnD5/g6kC3E4BXpfWiQgOm2CE3QAz4h4qwCngPH6ogSyYbiwADSFdMMRGMQNexfwf\ntD84Y10OA5nXOlSO40x/v8pU7KiyRr+MMby48S1Uy5YLxReQsLNqD/bVaUFvpa4buIw6bDlUhLte\n/gE1YWt0dCS0HxsjX4PTrQm7BcRn1gK8bJbgsLuGKt3b8fGuL6yRp25lGfenKGqLo+KAHESGwwOR\n10rOc4IClVkuKTWoTfybtW0OygNNV+Lzyz4Yf6EqYwjIQURUCXtrD5hpuYDWkQi52vGcU3vGWR5H\nVPpxY5oXjNguqZYsjFjuGskmCkfKtOdbU6+10e5ikY9TDMEXjC0YscR/2wHNqv16zeGkXPve11fh\n/jdWNbuPEUMzEkKOt2Bk6ll2TZ6TEXMzBUP7P9GK23aemrMBX605hI27E199sjlIMBIgv2sGHvv1\nCPx53HXWh3pw+qW7LwWLeMA5w+Yo+srzTtb2YTxCWy+LPpkqwO0U4eK1Ea8gKmZ8hEU85nnBK4Ao\ngeMYWETrcAzrwxcOAWBaJwwgvGeotl1yIcOt/QgbWw6GEPE8h5AcAqf/+2zPfEiKBEVVsXD9XjMV\nlelVeOsCIawqXqt/xkOp1WapL9tfgHhwDv3adgsjsw6us9aYHWLjWIKTt7l09OM4dwDgFCgqMztk\nvksphPzCJtf0SwFkOrVAP8e0uJCiqKZgsGCmue+yoqYdyfd1/4F76BKAU6GqDA229hX7rJIskqxq\n7QIA2QEOgNspQpaZfm/MDPAbZGVo34nj1K1wnBztBmw82pbMVQ2tDn7TngoU7LXmuDw6ex2mv/JD\nk3uwJz3U+bRnaIjDdxssd+Xxqlbrj1P/K5aFITRKgEgWzcVBGmfpBW3ttT/LzXsrzPkxycSr/602\nFifr+7EGMEDiFgZjDDsLqxGKyKabraVabIlCgpEgp/TMQj+9pAegFSn87f+ciRyvE0q5NstX6KGN\njjJdthGlKkIN299rFoZT0Kf/Cyo4l80lZRYsVK2ihYZPX9+29WAZ+NwKCF21YoQs5IUayAQnSvC6\ntX2DSgheMQMPXHCXdgpO74Q5hpAShlzfBVJlTzRIPtSG6/HV6kOYu0brzOSyk6HWafdaFwiA5/TM\nrf2DwfzapMafiovMjlGp64bIviHmLRqussZZXHyGDypjqAnV4utCrQilGtKqAwucNafBsEw4UYLn\nwm8R5GrNUanrtAI4T9llrnpoEJACyNTXY+f0DDRZZQhKIf06lmB0cWnZb/b4i3Y9GRAjUBlQbwvy\n76zeY7tOSLMmAUCMgEErLaMyBklS4Th5FzwXLgbfpRTiSbugTXRUAV6G2L0YYs/oEbSsMESUCBwD\nCsB5a82OKmxLLFi3sxyvfmGtA19U4YM/pE0U3by3An94bSWq6kJRnZzhglIUFTUNYRSWWgJ4vGIY\ngTguqVgWRpMU1iS565uL/5hZemosC0M7rrIuiNe+2IYH3159zG2org/h9f9sQ3mj0vweV+xZDcbz\nYY0sjETXhdm6vwr/+KQgytWVLCEmwThGpk0cjMv0yT0PTbgaALS1LwBkuT3RO0vWXA+m8nA7BTh0\nwdiozrO5pBpZGKaLS/9MtzCWFhwC77UCf0xyaIFiQTLrKIXkENyiG909WoBb5YxKu/ofsSzqa3oA\nYSWMo5V+0zLQhEs7T00giCMVesC6vqs1GVGQo1xwSnUvXOwZq20zBEO3FOQqK/7DGPCZrQhgeMcI\nvb36WuiCas1X0QnxNXonZ/3oxV4HIHTT4kKSIiGiSqZggIngeAWKwkz3GwtYgmEUioyVVszxShML\nY8XR1WY6bnXIcsFxDq3NxmSqyrogxJ5aDSzXaQVw9CoE5/Fh9U+lZsaXdpNWR6ooKjaUbYHYrRTu\ns9eYHX04gUKSEUnFa19sQ70/ghVbiqMyoczzq6xJxlQy/NmL1x/B9gPNL/zVGgujsbulpa4x0Tkw\nDXHcYgDA8wycK2B20AFbuRQjHmTEkuz9bWFpfdRaNi3xr+/2YtOeCnzwTXTyQ7y4XGMLsLUxjCPl\n2mDHvjBbsgw3EoxjxO7yObVHHpyC5VLJz8mE02E9WjPrBzBdUkbgWoEEzhnSOmhVNK0IIbcSYm+t\nhARTBW3tCHswWD+nQ83U0nEVBzgOCEohHK30IygH4RHd5voeKq9nYhnrkSsOK4iuhBGKKFHBd6Md\nlfV+lNQYCxoJWhsBbU6JLhiGiDiYdi2tI2Xgs7UfrFw8AEqdJlyyKkfFBKDPPTHmsLgztG1yVU8z\n/lNcXY/NeysAm8Xi6H0QzgHbAFiikOnMwIotxZAlDuAVyKrNJSW5ENl9AQCY14o5D0VQwBhDQ0Tb\nluPMRnWoBosOfQ8AqI1YmVG8JwDnoDWQnFp5lkdmr2tyOk7P9LILPOeyRpolVQFsO2C5mzbzX6Cg\nfJs114VTwGXoqdVqdCVae1B5wY+F2HbAKBOjwtF/G/icCpTXBPHOAm2kaYxo2zoPIiwp+GTJXrz4\n6ZZmR66+QMQaVNiIlSUVq6wJn10F8DIW/FioXVeWwLm076W5GIO9w20sllHt6LYP7iErUMNrIh8M\nW22V4gh2MCzjr3M24tMYa+nEw6jj1bjN9vRi+/ca18JIUDBifSfJckOSYLSS+4ZOw+DuZ2NI3tnm\nZxzHobueqgoAORkZeHX6ZfjddYO0D6LmbAhwOQUInPUZ5wrYgujWr0LsZqXpCjxvncdmfXRvGA6A\n01JyAazacQRPfPkJwkoEHtENgRfgFJxgumWh8LovVhHNa+4trsK2I0Vw9tdcUizianQt/Yet8qb4\ncYJiWQL6viLTXW9iBJzHByGnGkpdNy1+oAuNpESQ49QzmcKZ2v2qIsJ6qrA3Wxe0iBuRQ/rzE2Ts\nKKwB74ox74FXTMEoLAri/a93aRYZr0BWmCnsTHFA1WNBxrViWxgyFJuF8asz/xcCJ2BbxU6tVlgk\nuryIkFWLqowt+sEx3B+6YHA2weAzGsx9V24rwcb9tnRovhabyreas/Yd/XbAfY42XyUsKVEj68bB\nUmPlQj6rBmLeUbjO2IgFPxaiqEJfo8V9bILhC0pRKx3a/eH7iuKnuG4J/gDPsG8BRzjKqoll4TRO\nGdUBXG0AACAASURBVC1R9sN15no4T92GuXrm1OJD38M95AfwXUpRVF/S5BwG9ooHDXql5IgiYfb2\nj7CzynIvylnac68QdkNVWVSBRimOBdMQiEB116ImmHhRQEMMGmui/bu0W2OqKRjG/CRtv+bSarcf\nrDLvO5Y4JGtOCwlGKxmQ2w+3D745yqIAgG4eSzCcghNOh2CO6KIsDKa5pK7rP8b8iBOU6DTdxqgC\nnCJvm59gCUbEGHTLxjbJHNn3d2mi5hE8UFy14Lx1OJS1WGuGKpjn++KHPVEdGot4rJnsvKKvMMgB\n4CAw2xyJRllhX63UgsOcI2LGIVRfLgDOtEIiTMKuI1r7QruG6seL5qi/78n6XIxAllXt1yitos+F\nUX1WlVoICvy6NXDoqCEO2tK4ZTU+lNbVWc9Hb0NIDmPzngrUhzXTfdzJ43CaMNw8X3lNED/sKAQA\ndPd0g4Nz4VBFDZYXFKNe1s7X3WG52VzQ3F1mMNyGZWFYLinnqdvhOtcKvBvZVgYO3mH63oXuWiE8\nMf8wnl/xEd7+cru5X60vjltEjD2qNrJyYsUwGGPYWbUHSw+vaLLtyffX4+G315hCYff1Hy5rwJqd\nxZAVFXW+6LphRdDmyAi5ZVGxhFgWRuNRcbWi/Zb4HMv6MuYNuU4rwMvbXjMXHWuMPYXYsDCe+Wou\nNpVvxZtb3zW3CbL2vQWcxXhj0cqoCgZGsLix66vSXwf3OatxOGdhzGvHwhDoxnEa+3Owx3tMC6PR\n/cQTjK37K/Hiv7fgrXnabyOWNiQrz4EEI0nYLQxXIzGxp+ACgNspINPpxbhTr7HtE7+sF1MFnHda\nd2t+Aq+YgdeI3tf07qIFcjlRMjvy+Qsi+GFrMer8WkfrPtsWuJOtWeWcIJsBdqUhFyycYbuWNlHN\neJ/tdWvioQuJ0T7thQAmi5pLqpGYGBZGfTCA0toG/Tjt56fKvFkKJcRrI3g1mNVkAp8xW1wqOg1y\neV+z7XVhv3VP9mtyKg5X1gJMqzZsuOB2HKnAa//ZhrV7tNIjny85gp8O6B06r+CNudvMLK5sZxYU\nSQAnyPh+81EE9LIpl+Zei1OyTjKvA6Dp5E6j7YIE3h2AGrTWi+dt4mK4Ag3CShgRSdFcEJJ2T0Ju\nBUqFHdhSaQW/lx9dYQbW7fEd3tYOzuU3S9JogsFQK0V3tC99ugW/+9v3eH3L/+GLfV/CF4m2vCrr\nAnCduwKf7JoHILpU+vyCDfiw5GX88V+f497XV2LpJqtqL6enR/NZtVHHxJrpbc1jYfj20DL4mR4r\nsv1d5Dpzo46xx5PsRFkYumCUBLV2uQUrAYVj1t/lT8HVUccZFkYTwQhoAwZFaJSFqBOQAiitDuDl\nz7Y0qTzcuMO3p2LbRbixBRavirNBlb6WixGzUE2LxroeBb07GD0yojOoAFvVyUZi4HZqP9QshxWI\nRTMWxjmn5GHcyH4xXVL6GkxwGX8IeufPFB4Ah/e+2hXl+zeQy0+K7pANwSg7Obo9hktK/+PPyXAB\nqhgd9LYJourPBp/hM+cqGGJiWBhLtxy2soxsM+IlFobY8wB8rBo8eM2NZVoY+ig9s0ZbA9yfY2uf\njJqAXnZEFwxr4SpN1DgmAuDQv4fW4dQFtM66rF7vcGSneQ+cIOvxnDB4JsIjujWxE2R4XCIkpnVA\nua4c3HXerdrt81qasxEEz/QPtGacCzJ4XeiU2vwm3wOXUQehizbvxUiPNmJKOV5nk98ObDXMdsmr\n4ehVCPf5S+GyDQbswuUe8gOcp23W2uVxQMg/hK/r5mBd6Sb4ghIqa4PmHAiDssbzVBwR8J4ANlZr\nMZoo102u5i6K9NkA99Al+HanlW4tqppA8hn15loqQPTI+l+7Psc3hUvMDlroWoJ5+79CNbRsMuO7\nNObG2JHi1AWzz9iuD4Qwc/1HELtrLqwcwVZpmbfNE8poiBKMQCQIX8TfpIJybSh+ActN5Vvxxx8e\nxxNz/4ut+6uwYqt2TaP/53kOBeXb8O/d88BYdLmbYEiGrKhY81NplHvqQHE99uhuP9mW1LC35gD+\ntesLKKrSRIhCakBzC9s+TkahTOA4VKs9UejltToDI2X27P5dMbBvDnqc0RubGw6a242smkynJRjN\nuaT69chFZobDCi732W8GkcMh7Ufn5LSAM++t1zp42/mYypkZXAAwKOdsbLIF2DleAThj/oXuRtM7\nJtcZG8Fk69ouhwCmCOAzfHD029Gk7XLJAAg51RC66nMXDDExr2V3ZfFR2xwn70GY5SLT6YWf8dGC\nxqngM+t0V5XDFkuR8e/lO+E8xSbM9vsSFPD6SHLMsH74oASmxcIEW0kW0Qjm6+a/IwQnMsBxnGY1\neRR4XDwa5AjAAR6HC27RBQ4c6vijcJ3lg1KjLw5Wn49Tc06x2m6Ufom4oFTnm+nQAIOYd9Rsg1qb\nBzAtxhKWFGRlOBBq7F4y53hYHQDnkMA5bEvaNrJ0hBxNELxuh/m9fLjzU8gNXyOy7zwA0Vl9ZYEK\nDMjthyUbi7CjsNoUQkCb7GkXDHsRTk6UEcksQllNAHm5HiicdhznDkRlIBmdl0/yY1WxJkKXsf+n\nP/9GI2H9O919uBZ1IT+YIkAqOg3OU3aZKdONsXf8FeEybD9guf/s7WC8BMY4qHXdIORWwu+zLKvl\ngU/x1cpaBNddA3tc0V5aRlEVCLainKuOavOVkLcfqOhhDgzNET8PzNr+IQDg2n5XRnXitb4IPl6y\nF9/bLDQAeHrOBvO1PWj+8uaZAICzup0Bvy1lPBiWsZZ9As9QCcr+oWAKoNb2SNpERLIwkkSvTEsw\njHkLToeAGTddgFN7Wu6qHrkec0SQZROMU/O7xD232+HS4iH2UiM5WkZMSP9bzuP6AbILYs9CcKIU\n1YkbqasGP+3X/fpGp+uIxO3EAd3Npb93OQXzOMOtkuG0pQ0bM9sNq8ZMCbbiL11zdNcR49Et22Va\nLwAQVANwCa6oY3i3D5zbD45X9Vnv0efjmri/LOuI4xWAieA5Dh6XA1BEy5VmCIbkjC5HwqmAIwIH\n00bITI8PFVXVotrvB1M5uByi+T0D2sREo2MNBUS49ew0TpDBGWm0iojIgcHWvBxeMcUrvPsCaAkA\nDoSVMMKSApcTUUJg3C/QNO6hfWi4xmIXRfR6RG1yKLSOn8+sg5CnB9xtAfsVR3/E90dW4p/f7sHm\nvZVR82nW7i1sNAksenTr8zE8/PYarN5eAlUfwXO8ioKqzeY+isowf//X+GjnZ+ZnZufJR1sNxiDg\nHx9vRqWvQRN3Q0SKLMuoMliF8oAW77ALRkiJFpWGUNDswFVeAhQRql9LwqhnVrzEcInxuRXgnNbz\ntE84/XL38qhzu0Xtd8tn1kHIPwSfXpGBqQyctxZlmZZwheSQaWnxORX4v6VrmoiF7SmAzy2HxJp6\nCurD9VHJD5V1ISjQ3gsDNsF1+iYA8et6tRayMJJEpiP2WtoA4BasDvW5O6zO236M8WMDADWUEeXj\ndotO8BzX1D0BmCN4t+CCw5cPKfuwlhoatM7NAtlQ6rtAyNbjA4rW0ZnFEXseMjv6Jp2u2SjtmHNP\n7YbdpdGjwAyHC8afUeM2mi4po3JvVg04XgWTNZdZbpYLDbZ5CRKLWM/COMbbAOdA3dWhOKKuY3eN\n9evRBQfqbBaP7rrjVDdEgYPLoWea6Z1uhOkjcVs8B7wCzhEGxwG8rHWuiqRtq/b74OQVPQlBvy9b\n7MCwJP535CB4jHsQ5P+/vTMPr6LK8/631rvl3pt9D1khJEAgAcIWdmQTJGkWhRe1WxRFWxRwQXrU\nntHWmcYHp/vpx8exfbrtxWec0R573ufFcXoGX0VfEW1axBZwWFQSIAkhZM9dquq8f5yqU1X3XiAo\niCT1+QdS66lTt36/81vO77AEAKLfR+tJBu9qpsrOsHZCPvbM/dEQrVnl1kujdKRDSG4znxdm3EPr\nCYJP6mT3giLFVVY2FF6SR2Lns316jS2rUmjsPonG7pMAP4+6Hy0Wxnst/w/DBLNEjnWftX0ff3EK\nsHhc32l/E5BmAVE3FFXBn/Q0ZQNjBMydJ2BPrx21pXwrxDz28T3/AABQT1SiZkQGDKupX6XfEYnI\nAE+gIIrX/u8x1FXlQOOiIKoIEnXrx/YDoHEeA9eIv4BEJYQ+ngPALP0PAH86/SamFVYj2RXEZ1+0\nQ+bNb1guPISmaAaAcmiEwD3qA1jzqvpVY+kADa5yWhur/8OF9gcWopCKPgNUEWJmE5TmQrx3oBJ1\nVTnskLZQO3r7zcFmbyix1ZVojs7XwbEwLiO3jVqDtRWr4rbHBcF1rBaGWzSPCf91KrIj49jfXsk+\n4rahj84FgYdILItBxQp8y7lMoFpiD8aIlQniGEFg3Ke2IhMen/3H55XNQGJlfoZtH2LuJeV8gQ6t\nlSkgr0sCHxNjMRWsOXrlPb0xbTfdVUYW1aIpZbhr2Sh2bcMlBU2EKPCQJUGvEkyPjyIEmXMB4G0T\nEg1hzOmCRInw7F6Gu08U4z8do8+mVRSy4CqnB71pmyVb2zneuk93BSoCzhquEd1S0HqDCB80srgM\nhUGVgtKWixJ3JbuX26uAEzSo58z3YMR2fB7pvAI+UZzLmHhpVSZfRj/F7tC/xD0zQxf41gWtzOvR\nfbGjfoAuGMbJ5sRHdg6bx0EAga4Zw+IaMCbVmQJeGHYQn4TMkf8XZ6gVEvmqki5Cxit488MT+JsX\n99K5SYrI+j6il+ePrSLASVHW7/2qPSHgz6cP4L/+3IifvXYAB0/YYz/tfZ147vVPcbYr3hrsV0K0\nbIulb901u2wWlpjRCDGtGWImtQI5bxd+9QYtxmik5Z/oakJbxKz91hEyFZoJGdBE0IHgKIzLyPis\ncZiSMyFuO88JCY6mglHk6Y/VI1mFvYhUyfzgPYbLRxMR+nQa1G5rtggVqqLAsTgGEB8TIYawAphA\nNeIVNoyPsTdo20w0ARyoHzys2T94aymUZdOG29P6mIURG7wVWLtjS4hYra04Yhan4t294EX6MYwp\nzqJ+Y0MR6rW4NJWn/SPy9HyjbDofhgS97WxCogroQlDTrS6j76Sig3pCgZBwohnn6odP8kLgBfMZ\nBMUsPBmbxaUrO7ZGivFcutDo1Vd31HoDccv9GnEKEvGYmT+CAneSns4c8qH/41n6OVTo+twSIEbg\nhh/jhRvYOQDAiQlcXEaJllilYH1mfaARbRyuX4ee09qjB2rbzNGwoZxCoEJ3Zv5UjEmnc23CSgTy\nyA/Z+ZEv9Tk4hsIQFBrEVU3XrFEdIOEETMM9Z9QzUyQ9ecFUQLTvRWaBRbWw7ZlslxMj0DQN3Rq1\n9JRmGqP6n45jaGmn76IzRli3dnfhz5+fscV8DPqVEBRVs/32OTFqm+BpxBUNSJi6SOn6NvT5jnQc\nx1fBnWwA0R1O3BeOhXENcb4qkxzHsUwpm8IAkOo2zUyfvu+Bm8ZhTd14WyE9A1HgIXNm2mZsKq/N\nOlHjLQwDluranYolefXmDo2H1y0mXLbS6zKVkUcWY6wZe1mT2O0CzyGpc7Rtn0s4v8Jgqxhqpjst\nJZd+ZC5RhiQKTFm6hlM3lhrlIQg8C9hTIURAhAibbMgEsqufCS0lIlF/uP48gr+DChNNQDDB+he8\nKwS/TGMsIi9C4kXwvk5zBr0S605T6WjW1l8iOEEDQNBJ6MhR6w3aLCoA4APt+r4AzeQCVSYun24p\nhj1A1A2tJ8DOccs8OCkMGR5oUaOWma4wdMGlfFUJF9HnlRgKw514ZUYIUXBiFEJvJpTTpbRqs369\n3sAh1nZDmRjtCGnU+gjIfrgFOsiJkAh4t+lKU9vyqJvUSEKwVihg82ki6O6L4Me/j587wgpYWtyB\nRBV0a5SwthhVEgAgrIXAwVSQysnhiJ4u0q8XxeftxxHiu6C05SB6YiQIAXojvTQOZ7lXibfc9nei\n2e4hJQRVJXHK2FYRQE5sDfZG+0BAQIjlW9Sv0xOhfcsSMAA6V0nrwtuNF67eOxAchfEtYA2OxmLU\nP/LGKIwMrxko9+gun8qiVMwdn4/a4QVx1xF5Dh7eku0SY2EE3JaYBps3kaBdFuFV4LPcR+Ph0yd+\nzR1mX6PdWgbFJQuILYUCIP6jMUqgCDwy1QpEjlWxXdaYT13WjITnWe/Rqa/V7RJkyBIPrSPTdoqq\n6BaGJACKTOMT/nZwHEE0bM+sEoJn2WS5aJhHR3fY/mECyEsNskmZPxi1xrYvYEmVLg0WgxMVljbL\n+tbmTlPs/WVZ1pfz9FDfe9TNLLTcLBe23TIWQuAcDdZG3eZgQ1DBu43yLh7WT5yg4YHVVYiQMDie\nQIIHasS0cgBTKah9SVCbS+g2MQLe3w4huQ2C6mGVio1Kxka1ZD90a1iRqFXCaeCDbbS6cVuePd4E\noF9feyQgm8ouooXNwYD+PoxFxgBAyGw076G/q7AaxpHTbQgVvQ0AiDaOgHKWWjRs5C4amXAyJE42\n+1ZXQD7Zg1HDMlkbkv0uJqi1sAuwxHi+7KJtUM9lAeAAVaJl8I0MJDEKLezBkgK9phqz0CxlhCTq\nau6N9OsWhm6hnSzV+9SqMOh5N5Wstr0rowqB2poPtbnYdq8e3RWodadAOZPH+r1d/hyvHvl3fFMc\nhfEtkJdEf8RjM0bH7TMsDLfooi4TnaJMM188yWVXJiWZMXECUMHrEUylIPEinrl7KsaU0OvwmtUl\nZZYhCR+eyASAfR/g4k2LhRAeyUm0HfWli/HszCcROlCHaONw5LoKzXMkIa4UCkAtFhvEtDACXpmu\nBWJcw+KSWla6ALnuYeZpTOjGW0cuQYZLFEAiHuYyMNpAYxg8lDN0wp9r2BEAQJeRJWkR2oK+RGxP\nN/DBwRZz3ghrn2ldTMgahxJ+PPvbGpdaVW5aaETjLMrO4l4SFHbv5CSZKYZ5tdmQ3SqIItvbxysQ\nfL0Ap0HrplaoVzLjJWYBSSPuRd/7sFwP2iI0pdZDUqBEeXYOADZXROtPQl+v/nsQo+D0kv1ZkXGI\nHKmG1pcEcATJfpkpjGx3nv5cIjghimBArwLbka6P4O3W0alOah0F5CTmumtKfSMmRsfRd6wPNIz0\nY63Pb1EYUXzRaVb/JRE3KzLJSWHw/rMQ0/T0bkWCZJTQFxQ2CVRWk1i9NQgKAl6ZlaAhETfrf06M\nsNG7UUyUKCJCagjhqAo+0AbeFQJUAZnBII2b68rKmOXv66zAoQP0fby57zhaO/qZwtB6kvX0bdou\nztcBIaUVhAATc8aA0wSmFA6dpXWsSNQFrd/IxqP7enWFYc0mg6BAwYUXkxoojsL4Fkh1p+Dv6x7D\n7aPXxu0zBIwsyCxv+4l1tSjLM2MIse4qq1Ay6Asr8ImmgJ9YnoPUgNucvGONIVhy57WuNFbKnGIq\nD06zWwrpQSqYeI6n9alCSVBOl7J2A/a0W8AapJYQ+mSGZTttgyhwtFS3Ygphq4XhcYnwy5YMNEPo\nRjzQQubzcuAgCRJy032oHp6OCaWmdURUEQLPQxYFaJ3pVGD79NURozK7QviwPf5EVBH//t4XUFqG\n2awMq8IAAAHm3wHdJQUAGZ40UxmrElyyiCdvn2S+CykCjifI8Adw45wyOjlTVyZzJmYjrIbN2JOx\ndK/SiLcaqQtGC1NhkSSbAk8TdJeUIdQs5VBO9dMRcpKWgUiEp7Emyyx6EpUBRbYIySgTRElCgM6W\nD3vAccC9KyvY+i9FQdrXdD6LgtxMe+wn1sIw4i9BVwAe68zrGBcM0QRbDEMLeaC25bPf1LmeXvzH\nXjNIThTJJuDFLEspeSLArSefcLzKhLhXS4NsJCiICqKqBjFJXx2yz2/ri96IRRgDgCIhrPUjElUh\nDTvM3lPQ6wKnSbqA1yCX6bXGoh7WFz3hPpwJtUIqOKK/Lxkk5NNTogmkAlrZluMAt0tEwO2DKNP+\n+/cj/wWoEtS2XNbHctkBCKmn0aN0mX1h6XcFA6+ueyEchfEt4ZeTErqmDJcUVRj0BWemUEG4bvRa\nLCmeb5scBMTMENcJeCX4JGvWlV3J8Kop1OIC4uGYcuw6GiFmmwmf0G8PmAvBALDXvALsEwiVeCtH\n4HlMr8qlAslou+DC/SvH4o4lleA4DgGX5XmNaxMe4QPTWbaIJEjgOA48z+He5VUYV2h1pwkQBU6P\nv3BmCitAZ3kbh/WZwt7WXk1E1OYys/cDT8y/rcqc53hz8StFgqpqyE33MSEuF1IhU5CejAW1w+Bx\ni6zv2kPnQECQ4be3CaAzigGwa/tc9P2JGY1QjOKSMZbJrz57Gaf08hj97X5EIhqdk6KnJfPufmj9\nSRg5LBmV+XROESdGzPiHYMR6aJ8E/BxdD4RwyPYnY9a4XL1iMkEwzZ75Zfw7rzYbnLsHQkYTBM2N\nTG9GwgQHI1gPjdYE41x0Do4R9F08kbphQmrETEuOuOhgQFeUvByBqCtytZNa2Sw2JqjwpOjVAfqC\nOHUmRAcEQhSn2nrpipFRN6C4zHIzYoSt+24qQgkqVOw+0MQC1NHGEeB5DjxxUYUhmgFvviPPzJQT\nFPsSBRE3tLCXPq8cYuO26CnqHvRJXmhSH4TMr6DyIag9AZCI1/atyWWf4AuiL1+rSKYVLijQuPOn\nK18KjsK4yhiL+filJDZSFwX6a6nJrMKi4nlx5/gkc2RdNzYX96+swrQxObYJdMYo2Ai4c+r54xux\nghIApo3JRllekNXI4sRInMJ49NYJmFOTh+oR6Wwbx3F214LFmkm03e+VML48A//4w5mWtrtQVZqG\nKaNpgb9kt2VGfIzbIsVFLTGZtygjAMkuUwEZLin2p8UymVJRgBEFetaZItvjFVZLyaLs3DEWn6BZ\nFYa9L41ArtaTjPJhKXHXBQCvaFhunF6sEfiwmU64Ks1Kwy/un2GLE7E26QojoK+/wid1oVdoocrC\niE/pM6e/6mpEe+QsSFTG4S/68HljB+1L0eLGCnuQnuzBD+brylGMMjcIc9vo/dCn9FOFokjweiTc\nsnAkND1773/wLr0ey2jTkwYkDby3GxwH5ChVcAkyunrtgkwLeYGoG5nJHowtobEF99h3bf2W7NXf\nn2XiY7SxHADHLEZXcjdLj40co4t7ifpvhE86B9HTD6JIaDsLLJtabCpPMQIihaD20PdoXE/K/QLH\nO7+09QH7Tej127SwG1oX/RYkuMHJYXNNmDN5UBXebm3pbZd68gDFBaL/LqXSA+AFDUTlkROh5WKM\n9VjkokO2e9sGYRZIxLRmeF8HVC5qq5D9dXEUxlVmWm4tvl+5GuWpZSjM9iMvw3fBMsaAXWE8fMtE\nVJWmQxR4uCXLaD6m2JgxpwBAvMJIkHW17vpK8DzHsrU4Vz8CMQqjOCeAtfPLael1C7bgpW0mMGdZ\n4J6ek5mi+2CtzxxTHSLda5kFH9P2FDcVUrFzXZJdZuoxUQWmhAEzPREAJpTms7LfAMeCnDzHo7LQ\ndNVZLTSPZL9Xrs+sXBuIcRdGvhoJEpUw0lVL54ggPmXZrSuMmhEZmFkyDi7ehY9a6Mxoj+iB1y3i\nb2+rxTBSYzvPUBheyW4hcqpl5r3lXXRGOi0uOIDjCHhXP3PdkKgMTSPwy0m0/Iv/HBN4xj0MIdQX\npdlkRJFoZhyA22uXQiAy+jU9vVQVMboklQn6kBKCKOuz7BUZR5o68L8/OGJruzEq9rpFdITtpdON\nZ/G4JJqRxZsTH3m9ijLpC0DrS4LqPwWS1MbaAQBpoHEtKfc4iEDb3tMfxeiSNKT5/OBFBYvqqJIy\nrG4jeQAAQoSWJmHKWBfW1HWnmNYDAJ8eT5RLP2FtiCrUqiME4LzdZu2xvjL9nvR3KfjPgfN2IuD2\n4cHV1QCA1n5zFjq9d0xsy0L0VIm+3DPdJ+UdB/GcYwkG3wRHYVxlZEHGxOxq8ByP7y8aiR//YOJF\nz0lxJ2Pl8GXYMv5u+7VkgZWdOBemPvrKIipsq4vy2XFG/MDjElGSG7C7aGIo8NOApkeSUD0iPtie\nkPNU3r11YTmCxshfz5PPTDaFtzECCql2X3a2pU6XITSeuXsq/v7OyUyhSTEKI9Vtr2wqWCwM6yz4\nJMlnsz4Ml4ZHcCPoNa+Z7DUtB3dM2u+SCebaKF6LMgcAtaUIoY/nYFp5iem600SED9WyY9J0K04U\neNx8XSUKg6Y7zbA+slK8eHjuTfh+5Wrz4npbjbXMDXjVFAzKqVJInEWBxKz+CIBNliNRF8JRFSIv\nguvIB+8KQUg5oz+XBwGfzEa0Lx9+FZwUBYnKdAY9gNqKbGQlmckNRJFQlO3HjrtnAQDeP/0R5k2l\n7qHjjf14+vd/gdKab6tHZQT9PS4RTT2nbM/F0psFDhyvlzZJp242v5EFSMzEBgiKTcCnCFkYnlwG\nTg4jTPoARURuuk/vQw8kt4KRZW57PxEeoU+m256J/V+1KAxRQW5yEE+so++1NjgbRBHB+2g8ROZd\ntMAi4aGeyQfv7oOoZ34ZvycjC83AL/tYSfqCpFzbPkPx2+ZX6Rjl/+OKnl4gXX2gOArjOwTHcXGj\n9fMxq2AaSoJFtm1uSWCZM8aodU5NPratHY/66aXmgfoo/b4VVchJ8wKqBKGtDBVCTAorgMXF12F2\nQR22zbyDZkCdh4fXVONHN9NsIbUzsWLRNIKyZOp/NjJsMlLMEdyDE36IMemVmJpbazsv22dJk9U/\nghS/C5kpXqYwYl1SAi+gIf8maCEPtO5UiJb5I2qXmYHmO4/CUImKJI+pMFycKYRjlZMk8phTMB0i\nLyLHZ0/ppXA2C+p7M0qw1KJkioL2NGlrIUt3zKhwQtY4TM6egNrsGty3YizuvGEU0j1p4L+YxALs\ngmY5R5Uw1jeV/TkqP4cJoegXMVl7UReum0DbInXnsc1EFeCWJTyxrhar6uis8rMhfSEpRbYp46DF\nFUhUEa3n+pnSA4C3mvRZ2Cy+ISP8V7N9rNSNLGBa7iRb8wxlIvI8KyjJe6k147aU5bAtiWwRxpkc\nvgAAFtxJREFU8KLAI91jWqsF6SnYciNNcy0JFiGqKdjXoseHrNcIe01LzSqg9Wu7KmgBxaxAEHkZ\n1MIszciB0mJm6vkkD1vRT23T0131+AYb+SsyIkfNWJnVcrxr7A+QKZjXY0kiSoLBGYvL2U11NdGx\nl4ijMAYRLklA9MtRiDaVob50EQBaUrksP2gTikb8QBJ55iKSzoxCTdr4uGvKgoQVw29ga4Ofj/Jh\nKSjVM7u0zgxET5aiyluHm+ePYMeoGsGkbF2p6EI74DU/wAJ/Hu6q+n6c6Wyr06WPFg0BnHoelxQA\njEorR/jATJB+P7xuy8dicc/5ZR8k0bJuQBd9zpAaZusVGGuwGyNhI25i5XtlS/DszCfhERMnEFgd\nc0umFmFpbTn7Oy9m9GhVOt6Y63Ech5srV+HWypswtiwdkyqpciFdGQgfnIwcbTRSQxVI8ZsCLyCb\nQjw3OYWlWmvdabbFqB5YMYnFc1xRM8OLKBJ8bhF+r4yZJTU2F5zEuVj2HGBXGOlJSbhuYgGrZmDF\nNlK3JB4Ygxm3LGLliGUYlzHG3GfUPhN4/GjyJtv1rKnYVrebVcBLAm+LbWX6A6yfjBU0P2r5S/w1\nwLE4H5s5DqA4RtFb331mssfWt92WQlKGK9Fg0cQy9n/N4i61Zj0mu4IYhrHmSUb/kfhBnNG3WmeG\nrSpEW/s3D3w7CmMQ4ZIFQBOhnCpLKLhMU1VflIfnMFUPLC+ZWmRzD31TlJPDUe4ej9k1+Vh/QyUC\nPhkTK7JQmVaOB8bfg++PXoX1SysvGq8xmJozEZWp5Vh3fSVumFbEtpsuqXjT3Mg6AwC/165QNo3d\niDtG3wyP6LEpU8OdUZlWztYrCPpkyIKA0P6ZSGqahYnZ1XH34jguYRbcTXOHQxJ5jCy0VyPmOR4z\n86diTsF0SDECNccikD1SYgUUiyjyIH1BjOCn4oeLpuMnG6axfUGX6U4LyH4MyzLjLJolQ84q7GVB\nBukz/67Q2y8LMlaOWMa2zxtXauu/ZIsy3bC0GqW5QXAch3UxKeW2YK3NzWO4pARIvIiiQEHcPlHg\nkO3LtGULWgcZomYpkWMZVQsCh6ClfdZvpDhQaMbXABDF7r5RWwtsbQAAn5qNGcEl7G+rRZAacGFq\neTH7Oxw2r11XUQTrEGJ4bjqeXj+ZXt+iMGLffapo/i58kg8TR9KBhW0FSgAjcvQkFMIjcthirV9g\nkbaB4lSrHURcyGUEAOHDtXopCl1hCFSQPb9lJmRJQGfv5cnVZugW8eTKbEyuNH/sxcFCFMcP0i/I\n/6pYmXA7C3rz8RaG12X+vA03zMYVVTh2shNlafkAqHKwWV+qBOHQfKy7czr6iglOn+3FzQvK8S+7\njgCKC66o74Iz92OZP7EA8yfGz8wHgFUj6hNutwpJcYCZLRuXV+EP7xzDwknDEPDJyMgwlYTVOgzI\nfvgyLLP+LQLKOodEFHmo57LAJ3XSkicWhZtvsYh8MTGboMWasQrxmswq/IvkM+s+2Xzv1oQH+n9D\n2VvbZMYwaP+nulPQHaUuKTp5sRcuWcDT62bhRx/sjrsPz3M2C8Mq4CVBQoY3jZVIJxEXinMCKM0N\n4PjpLvzo5tlY/+vfQOtJxuiSVBz+6hyWTivCud4e7NZj81ZrkOM4rJo+Gvve/QN9HsENoxwjITy8\nfBL6tG4QjYfEi5CM9FuL8pRjftMLa4vxJz1hbNPy8Th4UMNHh1sRPjgFj24owzP7fgEAaD5jqV1F\neD1BQLvwMtADxLEwBhEu+SI/CE2wuWOIvtCvrCsawz1kdWd8HQwBWT4s+SJHfnMyPelYWDgHM/On\nxu2zpqL69WcbV5aO5TNLbcfZFAYAF+eFW3QhNeDGI2vHIz8jCafO0s89mPTNA4cXQ+RFXDdsFoB4\nd9X5KMkN4MHV1XGZbACQ6gli1Yh6ZHrSURwchorCFEwbnY07llSymFehv8Am4JfPLIHSRu9dmVJp\nu55X8uDGEfVIknwoCgyz7Svw03MkXrQLe1jWvdd4WMvSrJk3nC13y3vpxDPjt2i9htXCAOyJDR7Z\nFLRBj0UhWq0XYs+ei7XCc7xm7GjNrFF4aHU11lw3An9zywRwHAe1dRhIXwCFWX688OBsFOcEkOIz\nrZzYZA2rS2ndItO11tsfZen0RsxGZoM9jrmR+hR7xV+XJGBTzQbMyp+GAn8eJlZkQuA53LGkEmmW\n2MzyGcNt5xluViNu+E1wLIxBxMUsjFnjcvH2fjPzJLZEN8dx+NnGujgBeqncNHc4GqaXXFyBXQY4\njsPS0oXn3WcQ65KyYk25BZBwzkN3H7W+aisSBbUvP8tKF2FR8bzzlsa/FFySgJlZU21Kdd0SqgQq\ni+ohSEvhkz22/hpdnIZfbVmMzvB0c20PCzPyp2J63pQ4l2JxsBBPTfsbcBwXF7BPd6fiq65GWiLe\nQpJHwuphy7Fj33PoPkWVeWYqFbb5fovCVI15SnpKttda0ZkqBlUltjZpFrcaIQRZlnO8Me0zrpfm\nTsG88YWIZerobLz/12akBszz3LKA6MlSSHnH4jKZrO3IS0nB8PwuHGnqRG9/FHmBTJzsb2QLZFnL\nAkWOjkNwxP9gYdHcuDaUJRezxJH0oAe/fGg2ezaDuqocVgZ9XFk6XMHJOID/w9xq3wRHYQwiLqYw\nblk4Emvnl6OxtQefN3YgOzU+ZnEhwXpJbfkWlMWlYLikEiHFKM7Z1Xlxx2xcUYX9R9owZVR23L4r\nAcdxl0VZANbRazwXs5is8Y9Yzhd/ssZCrFRnVmFf6ydx271uEQX+dPy07m9x5wc0iypLz57z25Yx\npuLKr7/L2QV12NW4GwVJuZB66TtU9USFm8ob8Lu3Dphr1INaGLIl1hUbjJ+ZPxUCL2B2QV3C9n9/\n0UhUD09H9XBT6QR8MpSTZfBHCzB+9riE5wF0lvniyYX42WsHMG9CATwZSfiwZR/bb/0NPnvnXEji\nfHuixkXgOA4NZdfHpc5WFKVAVZOx973ZQPT838BAcRTGIOJiCgOgftzCbD8Ks88vCAYjfu/5Pxbr\n8pX/eG9dQrfO6OI0jC5Oi9t+LTCQ38W3wdiMUZiQNQ4dZ2R8atlupJJLotnOjGTTXfS3Ux7Gm4c/\nxFu99L2kJ5vK5KfTfwwOHP5jD11kyBhoT8+bgvTp5XjtnWP44nS3vo/uTPekoa3/LOy5azQetrRk\nwXnbLwo8xpfbLUyfW8LT66cgySslVKC3j74ZRzqOISAnYWyZn8ULVS0NxYFClKdQi4rjOFw/pRA5\nad6v7facN2xm3Dae4yDKvC3V+JvgKIxBhEsWsKF+NBudOZhcyMIw1qj2uMSEyuJaxyV/N0KVPMez\ncvDaTILb/4Eu0+pOYI1a3aLpnjRMzpiKt7APxTl268WwwhItaFVRlIpHi1Jx29+/BcBUJptrNuC9\nU3sxKbsm7pyvQ1YCS92gOnMMqjPN+IVh7Qm8gAcm3GM7Nja2djkQeA6CMLBMxIFwxRXG7t278dRT\nT4EQguXLl2P9+vW2/ZFIBA8//DA+++wzpKSk4Nlnn0Vu7sACfQ7xGKl2DpQlU4vw2RftbC2PRPSF\n9bURLsEFcC0x0Mmg3yY8x+HJ2yfh4JftKMk1lcC9y8fYStwYlOUHsfnGsRienziRYiBC0fDyB10B\nXF983ddq97UGz3PQNHLxAwfIFf1CNE3DE088gZdeegmZmZlYsWIF5s6di9JSU5O+9tprCAaD+NOf\n/oQ33ngD27dvx7PPPnslm+UwhPjejBJ8b0bJBY8xXDaJYjrXMo/eOoEF67+L5Kb7WGkOA2t8IJYL\nuQQvpBR9Hgm9/dG45IahgFsW0NN/eSrVAlc4rfbAgQMoLCxEXl4eJEnC9ddfj127dtmO2bVrFxoa\nGgAACxYswJ49e65kkxwc4lg5uwzXTSjA+htGXfzga4jinACqStMvfuAgIJFLyuAnd03FxJGZmFOT\nf95jBhuP3joBs6vzUDMigxX4HJ5/iZOfEnBFLYyWlhbk5JiLwGdlZeHTTz+1HdPa2orsbJp5IggC\nAoEAOjo6kJx85XP4HRwAGrtYPW/4xQ90+M7i8+iT+hIojtL8ZGyoj1/tcjBTnBNg8Z7RxWnYfONY\nlOR8xxVGbIntgRxDCBlwuQgHBwcHAJhQnonjE7pQNybn4gcPQS5Xht8VVRjZ2dk4dcqcKNbS0oLM\nzMy4Y5qbm5GVlQVVVdHT04Ng8OKa0Fr6YKjj9IWJ0xcmQ60v7lsdXzzTYKj1xZXiisYwxowZgxMn\nTuDkyZOIRCLYuXMn5s61z16cPXs2Xn/9dQDAm2++icmTJ1/JJjk4ODg4fE04MhC/0Tdg9+7d+MlP\nfgJCCFasWIH169fj5z//OcaMGYPZs2cjEongwQcfxKFDh5CcnIwdO3YgP3/oBKccHBwcrhWuuMJw\ncHBwcBgcfPdm9Dg4ODg4fCdxFIaDg4ODw4BwFIaDg4ODw4C45hTG7t27sXDhQixYsAAvvPDC1W7O\nFWfbtm2YOnUqli5dyrZ1dnbitttuw4IFC7Bu3Tp0WxYMfvLJJzF//nwsW7YMhw4duhpNviI0Nzfj\nlltuweLFi7F06VL89re/BTA0+yISiWDlypWor6/H0qVL8Ytf0JXWmpqasGrVKixYsACbN2+Goijs\n+E2bNmH+/Pm48cYbbanugwVN09DQ0IC77roLwNDtizlz5uCGG25AfX09VqxYAeAyfyPkGkJVVTJv\n3jzS1NREIpEIueGGG8jRo0evdrOuKB999BE5ePAgWbJkCdv205/+lLzwwguEEEL+6Z/+iWzfvp0Q\nQsjbb79N7rjjDkIIIfv37ycrV6789ht8hWhtbSUHDx4khBDS09ND5s+fT44ePTok+4IQQvr6+ggh\nhCiKQlauXEn2799P7rvvPvLGG28QQgh57LHHyD//8z8TQgh5+eWXyeOPP04IIWTnzp3k/vvvvypt\nvpL8+te/Jlu2bCF33nknIYQM2b6YM2cO6ejosG27nN/INWVhDKQ21WBjwoQJCATsJZ2t9bcaGhpY\nH+zatQv19XSd6LFjx6K7uxttbW3fboOvEBkZGaioqAAA+Hw+lJaWoqWlZUj2BQB4PLQ+UCQSgaIo\n4DgOe/fuxYIFdD2HhoYG/Pd//zeAwV+vrbm5Ge+88w5WrjTXff/ggw+GZF8QQqBp9hUNL+c3ck0p\njES1qVpbW69ii64O7e3tSE+nReUyMjLQ3t4OwF6XC6D909LSclXaeCVpamrC4cOHMXbsWJw9e3ZI\n9oWmaaivr8e0adMwbdo0FBQUIBAIgNertmZnZ7PnPV+9tsHCU089hYceeoiVFDp37hyCweCQ7AuO\n47Bu3TosX74cr776KgBc1m/kmloAgDhTRi5Iov4ZbHW5ent7sXHjRmzbtg0+n++8zzfY+4Lnefzx\nj39ET08P7rnnHhw7dizuGON5Y/uCDKJ6bW+//TbS09NRUVGBvXv3AqDPF/vMQ6EvAOCVV15hSuG2\n225DcXHxZf1GrimFMZDaVEOBtLQ0tLW1IT09HWfOnEFqaioAOkJobm5mxzU3Nw+q/lEUBRs3bsSy\nZcswb948AEO3LwySkpIwceJEfPLJJ+jq6oKmaeB53va8Rl9car22a4G//OUveOutt/DOO+8gHA6j\nt7cXTz31FLq7u4dcXwDUggCA1NRUzJs3DwcOHLis38g15ZIaSG2qwUjsSGDOnDn4t3/7NwDA66+/\nzvpg7ty5+OMf/wgA2L9/PwKBADNFBwPbtm1DWVkZbr31VrZtKPZFe3s7y3QJhULYs2cPysrKMGnS\nJLz55psA7H0xZ86cQVuvbfPmzXj77bexa9cu7NixA5MmTcIzzzwzJPuiv78fvb29AIC+vj689957\nGDFixGX9Rq650iCJalMNZrZs2YK9e/eio6MD6enpuPfeezFv3jzcd999OH36NHJzc/Gzn/2MBcb/\n7u/+Du+++y48Hg+efvppjBo1OBYF2rdvH9auXYsRI0aA4zhwHIdNmzahqqoK999//5Dqi88//xxb\nt26FpmnQNA2LFy/Ghg0b0NjYiM2bN6OrqwsVFRXYvn07JEkaMvXaPvzwQ/zqV7/C888/PyT7orGx\nET/84Q/BcRxUVcXSpUuxfv16dHR0XLZv5JpTGA4ODg4OV4dryiXl4ODg4HD1cBSGg4ODg8OAcBSG\ng4ODg8OAcBSGg4ODg8OAcBSGg4ODg8OAcBSGg4ODg8OAcBSGwzXNqlWr0NDQgOuvvx6jRo1CQ0MD\nGhoasG3btku+1u233z6gctePPPII9u/f/3Wae0kcPHgQ//mf/3nF7+PgMFCceRgOg4KTJ09ixYoV\nF6w+apSKuFZ49dVXsWfPHuzYseNqN8XBAcA1VkvKweFS2LNnD7Zv345x48bh4MGDuOeee9De3o6X\nX36ZLaizdetW1NbWAgBmzpyJl156CcXFxVizZg2qq6vx8ccfo7W1FUuWLMH9998PAFizZg3uvvtu\n1NXV4cEHH0RSUhKOHTuGlpYW1NTU4OmnnwZAa/M89NBDOHfuHAoKCqCqKubMmYMbb7zR1s62tjZs\n2bIF586dAwDU1dXh9ttvx3PPPYe+vj40NDRg0qRJ2Lp1Kz7++GPs2LED/f39AICNGzdixowZOHHi\nBNasWYMlS5Zg3759iEQiePzxx1FTU/Ot9LXDEOGbLNbh4PBdoampiUyePNm27f333yeVlZXk008/\nZdusi8scPXqUzJo1i/09Y8YMcvz4cUIIIatXryZbtmwhhBDS1dVFamtrSVNTE9v37rvvEkIIeeCB\nB8jatWtJNBol4XCYLFy4kOzdu5cQQsiGDRvIL3/5S0IIIY2NjaS6upq88sorcW1/8cUXyWOPPcb+\n7urqIoQQ8q//+q9k8+bNtrbX19eTs2fPEkIIaW5uJjNmzCA9PT3kq6++IuXl5WTnzp3s2WfNmkUU\nRRl4Jzo4XATHwnAY1JSUlGD06NHs7y+//BI///nP0draCkEQ0Nraio6ODiQnJ8edu2jRIgCA3+9H\ncXExTpw4gby8vLjjrrvuOogi/ZQqKytx4sQJ1NbWYu/evXjyyScBAPn5+cySiWXcuHH4/e9/j2ee\neQYTJ05EXV1dwuP27duHpqYmrFu3jhWkFAQBjY2N8Hq98Hg8WLx4MQBgypQpEAQBX375JUpLSwfa\nXQ4OF8RRGA6DGp/PZ/t706ZNePzxxzFz5kxomoaqqiqEw+GE57pcLvZ/nuehquolHTfQdRbGjx+P\n119/He+//z7+8Ic/4MUXX8Tvfve7uOMIIRg1ahReeumluH0nTpyI26Zp2qBa68Hh6nPtRAAdHC4C\nGUD+Rk9PD6tO+sorr5xXCVwOamtrWVnpkydP4sMPP0x4XFNTE5KSkrB48WJs3boVf/3rXwHQtS6M\nMuYAUFNTg6NHj+LPf/4z23bgwAH2//7+frzxxhsA6BKlAFBYWHh5H8phSONYGA6DhoGMprdt24b1\n69cjJycHkyZNgt/vT3h+7LXOt+9Cxz366KN4+OGHsXPnTpSUlKCmpsZ2P4M9e/bgt7/9LQRBACEE\nTzzxBABg2rRp+M1vfoP6+npMnjwZW7duxXPPPYft27eju7sb0WgUBQUFeP755wEA6enpOHLkCFau\nXIlIJIIdO3ZAEISL9omDw0Bx0modHK4Q4XAYkiSB53m0tLRg5cqVePnll1FQUHDZ72VkSb333nuX\n/doODgaOheHgcIU4fvw4HnnkERBCoGkaNm3adEWUhYPDt4VjYTg4ODg4DAgn6O3g4ODgMCAcheHg\n4ODgMCAcheHg4ODgMCAcheHg4ODgMCAcheHg4ODgMCAcheHg4ODgMCD+P4xSKOOE0RxSAAAAAElF\nTkSuQmCC\n",
"text/plain": [
- "<matplotlib.figure.Figure at 0x7f72fab5e290>"
+ "\u003cmatplotlib.figure.Figure at 0x7f97f1e98d90\u003e"
]
},
"metadata": {
"tags": []
- }
+ },
+ "output_type": "display_data"
},
{
- "output_type": "display_data",
"data": {
- "image/png": "iVBORw0KGgoAAAANSUhEUgAAAe8AAAFnCAYAAACPasF4AAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMS4yLCBo\ndHRwOi8vbWF0cGxvdGxpYi5vcmcvNQv5yAAAIABJREFUeJzsvXe8XVWZ///e5dTba3pCQiAJCSWE\nIJGmoSSgjsg4gmCb4Tf+dCwURUdEQXGs41gYFQvDiIyIiKIIJIAgEBJCgJBKertpt59z76m7fv9Y\nu55zboiQBCL783rllXt2WXvttfden6et55Fs27aJECFChAgRIhw1kF/vDkSIECFChAgR/jZE5B0h\nQoQIESIcZYjIO0KECBEiRDjKEJF3hAgRIkSIcJQhIu8IESJEiBDhKENE3hEiRIgQIcJRhoi8I7yp\nMW3aND796U9Xbf/iF7/ItGnTQsfdcMMNoWOWL1/OBz/4QQB2797NCSec4O3btWsXH/vYx1iwYAEL\nFizgkksu4bHHHgPgpptuYuHChSxcuJCZM2fy9re/3fudy+VC19A0jfvvv/9vvq/Vq1dz1VVXHdSx\nDzzwAF/72tde9bVcvNbz3wi46667+P73v/96dyNChFeE+np3IEKE1xsbN24kl8tRX18PCBJas2ZN\n1XErVqxg/fr1IZIeCZ/97Gd597vfzW233QbAqlWr+PCHP8zDDz/MV77yFe+4+fPn8+1vf5vTTjut\nZjvr16/n/vvv55JLLvmb7umkk07i9ttvP6hjly5dyvnnn/+qr+XitZ7/RsAHPvCB17sLESIcFCLN\nO8KbHm95y1t49NFHvd9LlizhxBNPrDruuuuu4+tf//pBtblp0yZOPvlk7/fJJ5/M4sWLGT169EH3\nq6+vj09+8pO89NJLXHHFFYCwAPz0pz9lwYIFmKbJypUrufTSS1m4cCEXX3wxS5cuBYRV4IILLgDg\n1ltv5atf/Sqf+MQnOO+883jve99LT0+Pd53ly5czffr0qmu98MIL/OM//iMXXHAB73vf++jq6gKg\nu7ubD3/4w1x88cWcf/75fO9736vZ18p7ueqqq1i4cCHz58/njjvu8PatXbuWSy+9lAULFvCBD3zA\nu85I26dNm8b+/fu9893fy5cv5/LLL+fqq6/mM5/5DAD33nsvF110ERdeeCFXXnkle/bsAcC2bb7x\njW8wf/58FixYwC9+8QtvrL74xS8CsH///pD15MknnwTAMAy++MUvsmDBAi644AI++clPVllMIkQ4\n3IjIO8KbHhdddBF//vOfvd8PPvggCxcurHmcbdssWrToFds855xz+PSnP82dd97J1q1bARg1ahSS\nJB10v9rb27nuuus45ZRT+PWvf+1tt22bxYsXoygKX/7yl7nqqqtYtGgRH/3oR7nppptqtrVo0SJu\nuOEGHnvsMdra2rjvvvsA2Lp1Kx0dHYwbNy50rVwux8c//nGuu+46Hn30UT70oQ9x9dVXA/C///u/\nzJ07l4ceeogHHniArq4uLMuq2VcXP/nJTxg/fjyLFi3il7/8Jd/97nfZt28fIISiq6++msWLF3P+\n+edzyy23HHD7gbB+/Xouv/xyvvvd79Lf389Xv/pV7rjjDh555BEmTpzIj3/8YwD+9Kc/sXr1ahYv\nXsx9993HXXfdxerVq0Ntff7zn2f69OksXryYn/3sZ3zuc59jcHCQJUuWsHv3bhYtWsQjjzzC1KlT\nWbly5Sv2LUKEQ4mIvCO86XH66aezefNm+vv7KRaLrFy5knnz5tU89oYbbuA///M/KZfLB2zzO9/5\nDldeeSUPPPAA73znO5k/fz533333Ienv2972Nu/v+++/n4suugiAOXPmeNppJU477TTGjRuHJEnM\nmDHDI85ly5bVvNcXXniBUaNGceaZZwLwzne+k127drF3717a2tpYsmQJzz//PPF4nP/6r/+is7Pz\ngH2+8cYb+dKXvgTAhAkT6OjoYPfu3Wzfvp3BwUHOPfdcQJitb7311hG3vxKSyaR3P21tbbzwwgue\nteO0007zxuepp55iwYIFxGIx6uvreeihh0LWlkKhwPLly/nIRz4CwKRJk5gzZw5PPvkkra2tbN26\nlUcffZRiscg111zD2Wef/Yp9ixDhUCLyeUd400NRFC688EIefvhhWltbOeuss1DV2p/GzJkzmTt3\nLnfccQezZ88esc1EIsFVV13FVVddxdDQEIsWLeLrX/8648ePf80TfXNzs/f3Aw88wJ133kk+n8ey\nLEYqVdDQ0OD9rSgKpmkC8Mwzz3gEFcTQ0BBdXV0hC0Q8HmdgYICPfOQjWJbFV77yFXp6erjyyiv5\n1Kc+dcA+r1mzxtO2ZVmmt7cXy7IYHBwM9U1VVVRVHXH7K6Gpqcn72zRNfvjDH/L4449jmib5fJ7J\nkycDMDg4SGNjo3dsOp0OtTM8PIxt21x++eXetkKhwBlnnMFJJ53EjTfeyK9+9Ss+//nPM3/+fG66\n6aZQexEiHG5E5B0hAnDxxRfzve99j5aWlpo+2yCuvfZaLr30UsaPH19z/8DAAC+//LKntTY2NvK+\n972Pp59+mk2bNh0yLa27u5sbb7yRe++9lxkzZrBjxw4WLFhw0OcbhsGaNWtqCiGdnZ1MmTKF3//+\n9zXP/ehHP8pHP/pRtm/fzr/+678yZ86cA17r+uuv58Mf/jDvf//7kSTJG4OWlhYymQyWZSHLMrqu\n093dPeL28ePHI8uyJ3xks9kRr/nQQw/x+OOPc9ddd9Ha2spvf/tbHnjgAe+6g4OD3rF9fX0kk0nv\nd1tbG4qicN9991FXV1fVtrs6IJPJcMMNN3D77bdz7bXXHnAMIkQ4lIjM5hEiALNnz6anp4fNmzdz\n+umnH/DYzs5OrrzyyhHNuKVSiU9/+tM8/fTT3radO3eyatWqEaPKR4KqquRyuZoa9cDAAOl0milT\npmAYBvfccw8A+Xz+oNpevXo106ZNIx6PV13r5JNPpre3l1WrVgHQ1dXF9ddfj23bfPnLX+aZZ54B\nYOLEibS3tyNJ0gH72t/fz6xZs5AkiT/84Q8Ui0UKhQLHHHMMo0eP5pFHHgHgd7/7HV/+8pdH3A7Q\n0dHBhg0bALjvvvuQ5drTWH9/P+PGjaO1tZXBwUEefvhhb2zmz5/Pgw8+iKZpFAoFrrjiCjZt2hQa\n93PPPZff/OY3ABSLRb7whS+wb98+7rvvPn70ox8BwgoyZcqUgxrvCBEOJSLyjhABkCSJCy64gLe+\n9a0jkkEQ//Iv/4Ku6zX3jR07lp/85CdeVPiFF17Itddeyxe+8IVQBPrBYM6cOfT09HD22Wd72qaL\n6dOnc84557BgwQIuu+wy5s+fzymnnOKtPX8lLF26NOTvDl4rFovxwx/+kFtuuYWLLrqIT3ziEyxc\nuBBJkrj88sv53ve+50W4z549m3nz5h2wr1dffTWf+MQneNe73kWhUOCyyy7jS1/6El1dXfzgBz/g\ntttu48ILL+TPf/4zN998M5Ik1dwOwvJx88038+53v5tUKuUt8avEO9/5TjKZDBdccAGf+cxnuOaa\na9i/fz/f/OY3ufjiiznrrLO48MILec973sN73/teTj311ND5N998MytWrGDhwoW85z3vYcKECYwZ\nM4bzzjuPdevWceGFF3LRRRexZcsW/vmf//mgxjxChEMFKarnHSFChAgRIhxdiDTvCBEiRIgQ4ShD\nRN4RIkSIECHCUYaIvCNEiBAhQoSjDBF5R4gQIUKECEcZIvKOECFChAgRjjIcNUlaenuHD2l7LS1p\nBgcLh7TNNyOicXztiMbwtSMaw0ODaBxfOw71GHZ0NNTc/qbVvFVVeb278HeBaBxfO6IxfO2IxvDQ\nIBrH144jNYZvWvKOECFChAgRjlZE5B0hQoQIESIcZYjIO0KECBEiRDjKEJF3hAgRIkSIcJQhIu8I\nESJEiBDhKENE3hEiRIgQIcJRhoi8I0SIECFChKMMEXlHiBAhQoQIRxkOK3lv2rSJ888/n7vuuqtq\n39KlS3nve9/LZZddxo9+9KPD2Y0IESJEiBDh7wqHjbwLhQK33HIL8+bNq7n/a1/7Grfeeit33303\nzzzzDFu2bDlcXYkQIUKECBH+rnDYyDsej/Pzn/+czs7Oqn1dXV00NTUxZswYZFnm3HPPZdmyZYer\nKxEivGmhGxZL1+6jWDZe76542NuXZ822/te7G0cNXtjYy879wyxduw/Lsl/v7rxq9GWKrN8x8Hp3\nA4D9AwVWbekDoKyZPPdyN7Y98tjmSzovbOw54DFHGoetMImqqqhq7eZ7e3tpbW31fre2ttLV1XXA\n9lpa0oc8Z+xICd8j/G2IxvG143CN4d2PbOTXizdw3twc11x+6mG5xt+Kf/nm4wDc/+13oSiHTn/4\ne3wP9/Tm+NEf1ni/48k4F8075rBe83CNo/vcf3XzQpobEoflGn9rX+79+jv4+d0vsmzNPmRV4aK3\nTq55/I9/8SzPv9zNdVecytvnTHjF9o/Eu3jUVBU71JVuOjoaDnmlsjcjonF87TicY7hhu9BwN+wY\neMM9p737syTjh2YK+nt9D7dWaKobt/dz2tS2w3a9IzGOXXsz6K3pw3qNg0V3zzArN/YAsGnnAKcd\n117zuA3Oc3hh/X5mTWw+YJuHegzfUFXFOjs76evr8353d3fXNK9HiBDhtcE180lIr3NPqqEZ1uvd\nhTc8SroZ+m2aR/+YvZFcOJZtY5jiG1EPYAVqrheWgsHh8hHp18HgdSHv8ePHk8vl2L17N4Zh8MQT\nT3DmmWe+Hl2JEOHvGq6LTnrjcTdGRN6viHIFeRtHsc/bRb6kv95d8GBaticQqcrIH0mLY+bP5N44\n5H3YzOZr167lW9/6Fnv27EFVVRYvXsz8+fMZP348F1xwATfffDOf+cxnALj44ouZPLm2ryFChAiv\nHW9E8tYj8n5FaHp4jEzz6CfvQun11byDQWeWZeP+UuSRddn6VAyAzAE072x5iKZE4yHp48HgsJH3\nrFmz+NWvfjXi/rlz53LPPfccrstHiPCGwf6BAo3pOOmk+Nx6MkXSCdWbEGqhe6BAQzpGOukf0z1Y\noLk+QSJWHbiZzZUxLZvWxmRou+Wazd+A7H0kzOYDQyUUWaKp/rUHSFm2TVd3jgmj6pEliZ7BAk11\nCRLx8PMoayZ9QyXGtde9pusVSjq7e3OhbYPDJbJ5jaa6uLetN1MkGVdoSMcrm6BYNtiyJ8u49rqq\ndwOEANWXLTKmrbqvA0Ml4jGF/mzJu+dK2LZNV0+Ose11ntnZtm329OUZ116H5IxTXeBdz5cM9vbl\n6WxJeedYts2W3VniMZljRjfSkynSmI6FYiJ2dQ8zpq2OmFqbZGudUwslzbdmmJZV9bdl2WzenSGV\nUEknVOJxxfuOhgo6fZkidakYqYR/naV7V/B/G+7lIye8n4s7zjng9Q8VjpqAtQgRjkaUNZMbfvYs\njXVxvv+pswD499uWIQG3//v8mufohsXNd6xg9nHtfPQfZgLQny1x48+X8455k7jk7ClV51z7388A\n8D8VbbpKhvw6cLdpmWzL7mBq85SawsOR0Lw/++OlQPW4vBosfm4X9z6xlcvPO45Tj2vn33/6LBOm\nZ7A7N3LdnH+jOdEEwDf/70V2dg/z7Y/No7059aqvd/MdK+jLlkLbNuzKcO2tS0L3c8Mf7sYup/nF\nxy6vauOexzezZNdKmuoVvnvlZVX7n1i5h3v+spmvXHU64zvqve2mZXljB3DlBcdz3pzxVeev2TbA\n9+9dxZknjuaqd5wAwOMv7uH/Ht3E+88/jtOmdfLvP32W9iZfcFi5uZdfLd7I208dxwcvnAbAqi19\n3HqfiKq//v2z+c7dK5k+sZnPXSFWSGzcNci3fr2SudM7+fgls6r6kc2V+ffbljF1XBM3fHBOjdH0\nEdT8g0vvXFJ/YVMvP7l/rbddSuZomLUSuXkaVqaTz922jHHtddzy/73FO+avu5cAsKJ7JRefeGTI\nO0qPGiHCYYRmiAlhKK8BYDj+tQMZP4tlg7Juhvxre/pymJb9N/vcfBPhkWfvezf/ie+v/CkrulfW\n3K8bZs3tbyT8cevDfGHJLWimxsrNIsh21ZY+9vYXIFair/FZ+kuDdA3v8c7Z2S0ijQcOMrhJt2qb\nkSuJuxZ2De0hPmkDieNfrLm/qzdH4riXKI15ofY1MkVsoKsnrOFXmrbXjrAuf9veLADPrNnvbXtx\nUy8AK17uYSivIaWHyE+7H7mpx2lLRG4/8aI/Zv2Be3VzAGzYlfHb3LsRKV5gxYaemv3oHiwCsGVP\ntuZ+0zIxLfG+BX3uZoC8NSe+oC9bDJ0rN/Wjy3kxxoo4d09fPnRM2RDPOqkcuSVwEXlHiHAYURlf\ndDDapghSssnGtzGsiUm1NyMmt6DPc1XvWnoLB0524pL366F5P71HJF7al+/2tlkBf+PR4PN+ZOcT\nDGnD9BbD45zJlVEa/WVcRaOaaJUDBEC52DW8m2v+egNL9jxb+wDJIjZlNXJzd2iz+1yf6FpywPZ7\nC/6qHsuuHm83IK43EyasSvIeyVRdy6Li3rdhWpiWTWzsVtHGpA0j9jN4vf4KoaW/OMCSwu+Jz3hu\nxPMrz6nEf734E7723HeBcLR7Tisi1WWIH/8Cw8ZwVV8AJFXz/pbragsHZVMck1CqXReHCxF5R4hw\nGFG5tEc/iKU+Zd1Ead9LpvU5/mfdrwF/cnU1hf35Hn625k7+47n/8jQGoCoDl6d315hkNw5sYU3f\n+oO+l1eLxri/TjVI2G/0pWLuhAygW+EI6d5MESnuE0ZBD5MfgHIQEtOjO/8KwIPbH625X2ndj9q+\nl8TxYeuFu7xpW3YnALZR7QEtaQZFxSfvWn0cibzzAQKT6rIMpNbXJH+5xj2qTuCXYdqifcVpq0Yf\na12vp6Ivq3qFCVtOjEzQwf6XtDD5dg3vYcfQLnoKfWim7l9L0fn+y98mOfNZlOZetiUfreoLgBQr\nB/7WqIWyeeSj0CPyjvC64I2UZvBwwqwgU10/OPKWG4RWtze3D/AnJ3epUG9RTMq6pYcmm0rh4EBW\n8x++9DNuW/2/r9ifkfBiz2q+8dz3KRrVpBCc6IPEFyTv16p5W7bFrSt/7hFg9X4bpW0vcsv+mvtf\nCbuGdnt/l4zw5NybKSLFAuRtVCeRMi2bIW2Y32/+MzktX7U/eI2JDeOq+g4gN4nnbNvhB1jWTWzb\nJlN2TMuKUUWufZkScr2vKeZr9NGNZu/LhImx4JiWpbosyZnL2Bd/gd3De6vOryWfuEuuTMuirJtI\nqmjLNqsDNF3BsxAwZe9zTNKphAgEfMkhb9saWRjakFvtPefKe1m+z3cZ5PScZzavJGJNyZLT86G+\nACEhDTV8zu83/5lfrL0LzXnHa1lgDhci8n6TwPUvWrZdMYGaNY97pW2vBat61/LJJz7PtuyOmvst\n2+Kl3rUU9dIBr23VEAAOVV9/uf433LT0m6/6fLcfQfI2TCtEriMJMJpmIsUFIbYmW4Cg2VycP1jy\n/YHByaaSED2zeeC3bdvkdJ9MKv3ouiGIwbKtmtqWi9vX3sXu3F5W9673zgHxXILm/JAGG+hfppzh\nzvX30F3oxbQsBofL2LZd9QxFX6rHqrfYz4bBzdy/9aGq4yzbxjBMYsesIzaxtrm2ss18SQ+ZTHcN\n++RdOSn3ZkpIcX/cCjUEGNO0+c7z/81fup7imb3Lvevphskftz7MZ578Mn0lIaTJkh+xXiwb7O4f\nRG7sQ2l0yLscDnzTdJMhLYdhi/5KEhR1v49lzWT7/iGkhE/Y+RoCxLDdR+yYtfQMD4W254o6iZlL\nSc70a04MlAZDxzyzdzm7zZeRm3tInb6IPbl92LbtRZAXk7tZMfCMr3lXQB2zleuXfImCXgwJoK5F\nJp2IYds2e/Ou8CXh2pJ0w2JgqESuqDNYyrIz/gyJ414C/JgDwzK4e+PveaFntX9fWt5/xnJ1v7Kl\noWqzeby25m1ZNn/peoqVgfaPJHlH0eZvAnQPFvjCT5/l4jMm8fLOQbbvG+J//n0+Dy7bwX1PbuPm\nf57LxFEN/PWlPdy5aCPXv382MyYJ0vjNXzbzyIouvvWxeXS8hsjZIO7fIibbv3Y9w5SmY6r2L937\nHHdv/D2N+jF0r5zOj649J7QsA2BTV4Zv/t+LfPySWcydLrLzPfp8F3c/tpkbPjgHPdVNS6KZ0XWv\nLnPfc/tFAJBhGajy3/aZvLxzkO/cvZIPLZzGceP9VIr5khEycRumTUyt1iZKuomUEGSwfVeZNW39\nXhCNaxbvCfgyQ5p3gBwf2LaY4eQQ0IYkSZR1k49/90nOOGEU557lB9Zc96On+MAFM5h/6niG8hrX\n3LqEc04eiz3xRV7u38R/nHUjsQOMgaZb/P//+SRnnTiGf3nHDD73k6VkpN0kRCBxyKToE7PFnwZv\nB6A50cSG5Z1s2JUhEVcoayafuewUZk5u5cWe1WzdrPDw091V0dvd+XDw0n/d8xLb9g3xb+85ke/+\n5iWuuGgikmKCbGFaJorsE+SqLX384Herue59JzNrShvb9w1xyy+fB+CbH5tHZ3OKVV27vON/8dBq\nxqnT/WsnXkJp9f3QtUzSmfKgR3j3P72de34Dn79iNt/69UpSpz8ROvalbfvZ2TlMc32cz922DOnY\n5SSm+wKQVKHxLVu3n98/v5LkTH9btpynLp6mpBlc/+Ol5EsGiVk+mQxr1Zp3X/ol1NR+8oqBbvhR\n0kPlAnJdmNAHy2F/76833AdAfLLQqH+y5EFSPafQ0SKeUXncc7yUAzlZ+x5iEzZj2LBpcAu5cnXf\nDNPi9kWrKDrmckm2QBZC4pduX06PE6TWcEwXeJ+5ze0PvsykUQ3stzZXxRIM6znyJdFfqYZQ8Z3f\nPUeb7FtB5MZ+5PQw2BJIdugeilr1+bWEuMOFSPN+E2CjE7X50LM72b5PfJCWbXPfk9sAPzr0waXC\nf7Z07T7v3EdWiIIxm7p8Te+1wtXmRlp77EbuZhFmuv6hamn2ryvFMfc+4ZeSve+vIjBm+cbd/PdL\nv+CW5f/5mvsa1BoPFktWi34/tGxnyOddKOkhzbsye5aLkmYguf492eLhZ3d6y1hcTb7HMZvHlXhI\nU3DJ0bAMFu34CwPNKwDxvIediPdn13fTlfMjfVEML3p2l6O1PLVqN893v0TeKJAp1Q7ScbEvI/Yv\nWbMPy7YZGCqHTI1lI0jezrNP+pO1bulelHDZuc+la/ezYWAzt6+9i8cHBUm8vCus+e2vIO91OwYp\nlk1+/dgG1NHbWbRGmFslSZivg1i0XBDzn5buAMS6eq/d/gLL973Attxmb5tml0NLBOzOTeJ/UwgE\ntczmPaVe729TEtaRxc+NUIBJ0dm0O0NXTw7dsFCawgFykmqA5L87f1ixKqQVA2RLeedehCY7arRN\nLOW/vzm9uo+u8UFp2093xo84HypWa+lBzTtoNZJi4t4yWYOd3TnvGVYimbb454unM2/mKN4+2yfI\nn6/9FXvG/L7KvVHWTZZt3hHaJsU0hgs6PYNFL9+BlvYtJA2NYoy27xtClqvzIeS0PANDzvtYg7yF\nZu5bshLTxfcjmXHn+v67nCtWZ4orRWbzCIcSlZGiUrzAQCHrmbfcCdUNsKn000LtwJRXCzenkSzV\nfv08U63j56t15ZST8CS47MM1t9nqa5N+g6biVxOIogdyJQfHslAyQj5vbQTyHtZySJK7QNsMCS8e\neRd8Yqg1BtlymKzcyF8Xe3P+RCkpJprmulWcyzb4wlqmXE3ewTEKEpebgSpE3gEBSDMsUDXkQKT2\nYDHnBWC5SMQVT4iT04JUKpPT7A1EsQ+X/D70JlcTm7iR4lh/nfJAKSx8uglz+lIr+dnqX4aEqoHC\nEHe+fA92zH+PJMW3mtiYge2Oz7aW5q0NBI4TRDE40lI/xaA3U/RiG1yhIISA1hcb5QsBVkEEBA47\nhNubKSI39DM0cTGmFCDvcjUhm5Lfn64B35ozXK6+n6DmXWt5m6aJ96w0AnnbisbZJ43lX981k7NO\nGlO1X2kJC2NlKUd8imOSdueCWJk9TuKaGcc0ITf1hPz6l54nhILebBHd9L8LN2ZgWM95Yyyp1fcg\nqToZR8gNCktSsQXbkpDrhjyXVqYQHs/GeEOkeUc4tIhVJNxPnvIUNy3/ukfq7oTvEnStmsG1siu9\nWnjBOCO8fqZDDF6QTsW1C3qRVervUNp3UyxXTxSGUjs46GAR/AC1V6F5uzm7K8k7XzIOSvMeChCv\npBj0Z/0J1jQtbNv2JlLN1MgXq33Kg+UwWelGONZhMKhNK4bXF89H3uATT7YGebtL2AAKZoA43Ykx\noKGEzeYWyROXED/Gj3LPFKsrMCVisqfpuYFKlQUt9hd88t7W7U/87uQaxEDRHw/TMulveB65foBy\n0xZW9a1DM/yJfqhUYwJWDIYdTasQ8C3bloRqJ8g774zhPV+bQS2gPTvrg0dapy8pOn2ZkhfbYBuB\n4C6XuFS/j5Ll77cKIrmKaxbvzRRDhKZaooJXvkLz3jS4FSvhH7c34z/zXMDEbmbbkGyZTDDOooal\nwRVkhgvV38yY5Hh0S/e+p2DSFu/8eFhrjU3Y4AluFB33k6qxs1tsax6TJTFNuLdkW4yHkhTj25sp\nUTQDz0lLOPeV9yPTlWrNWVI133IQ0MylvTPBjCHFyyROehqAgYL/DZzVeiGtyRaKRumIBeNG5P1m\nQ0CajKsVmrdyZDXvkczmXiCRM2lVLrfaNbybItmQ9haEJudqbg9ix9CuUDRxELkAMb0as7k7gcdU\nudpsHiDQkTSUIT3Qf8UMBVaZlk1eL2AENJ+hgJbktl+pLZtWOFguUwoICLJBWQ8njwlOpBkt7PuE\nsHBQNH1hyU0sEgzyqQxYq4zyHa6hESZiCrudSHsMYbKsDCQKmnF39PmWCKxqrTWoea8f2EivuoHE\nCc+BLO47GImdr0HekmJ4VoVcYLy1DXNRiFN0iNHVzmPHrmJDYZV/vqPlDeVqvE+2BIpBT7ZAr5sg\nJBCZHdNF/Elw3EzE3+ZQC9aQKBE6rPmad5D8k5YgviB59xT6+MHKn3r3D9CdC0SmOwKKOdSKtuUU\nVCsVGsOagVmOcDGU10LzjLZsY2voAAAgAElEQVRtFi0J0Qc3ULJWamC5gryDEfbGUKM3Bjv2i/dR\nSfnHj5VEPIIuFVBkib5MMWzCdsZzqJwj4zyDWj5vggKSs9/oHYdeSHrjLzlj1p0V42V0T6TdmEZK\nTWLaZkjjP5yIyPvvEAOlQTQzaEoNkETg5Yx55C32u2ZzzSpXSemHMsmHa3IdSZu3bLe/Yn/l8ifX\nZOx+XJU+tqLtE9NI0dI/X/Mrfvly7dz6w4Go3FdjNvfIW5FCVox8yXCehY0yagd7ctVLbwAKhk/e\nUkVErGnZVcQ8FCC/2uQttO6gmX446ANWzCrNO6g51zKbBzX3vOFf39e8S9imgm0qlMxqn3cQtZaa\nxWOyPz6KDtgVS+L00Du6dzAgyFnV01qwv7XKoxYD1oOc5vfX1WpRDE+wcTXvuvxUrFwrshX3rDWu\nEKS2+W4J28bT8mwIERtA0m5Ckm36snl6B4uOUO2/N0nTIe9gwJfzHWtbT8Z2hJu8JvrQmy2FiClh\nC7N60KKU06sF3IGCP0Yll7wHRoMZQzHTDGnDXpayWm4C1zIwVNA9rdUcGIXZN576mMidviWzXRxb\n49sXAl8wsMA/xsoJ8pcSBU/zjiXEOOp7jmVS8ngAslqWtqYkvZli2IK2XUT2DRQDgmhgjLRts5x7\nEGOcSqj+flMV30dIKLTodSL0bSNGb6ZIWhWBevkaY3M4EEWbH4XY1T3ML/68nk9eeiKdLeGi9nm9\nwJeWfoPx9WP5wunX8H+PbOIvL/oaZnACiFVq3g5Db2m+l889bdO89VLv2B/9YS3nzxnPFRccf1B9\nfOCZ7by8c5Dr3z/b+1DveXwz2ZxGLqWBAiDxg3tXsdkpSPCpfzyJyWMasQhr3pWlI7tdf6/zcXUP\nFkKBa3nTn4SeXtPFI8v3ceOHTvMi1otGkUw5S4MzET350h4ef3EPLQ0Jpk9soXNyteb98PKdrHi5\nhxs/dNorWiFcYUOpMJs//uJu9vUXkFI54pM2cHfXBo4ZfQ0dHdNC5xfMvC9WKz7hqopMMbGXb6y4\nN3R8vkLzfu7lbh7ZvAncVNWySV+2xH/+5iVQyySmr6Bo+WSlxk1PAHIzuAXJ+4k1W2nMdHHh3An8\n/qmtDA6VGTfL13SDxPenZ3aI8+MlbC2JpOrsGxxC003iMSUsSOL4CZ3+j++oZ3dvDqV1L08Ul1G2\nXQ1JRBkPFzRu+eXzDAyVuPyiseLWJBnLtugeGgScSHTJH3NbjyPFNJ7esI1ZiX5mTWmrGVQUFCCW\nb9hLYgZItoK2+VSSJz8VIkPN0kgACScVpmzF0S2DsqlVuUJsSwJLDWt5FRpfPpNAaRVBcXv6ZEa1\npukPHJM0WxlmK/Gpqyi91Iytpfz2jJj4B+zoHeCWX66gN1MiMcrC/WpiOLWoy4M8u24/v35sM23j\nstDqdlJEUvfkMlzzvb/S2ZRkz2CWeDNgim9GNtPY2GTKQ7SlWnjmZT8S37+voNbqru0W53em2wG4\nc/09nNwxq3a8i2yKNtzgMMdaUVpzJraewDZU1NE76Fk3FmjwrmFmOmlKNEFJBLt2NI9l3fYBVm7d\nBzEorT4Lu1SHjEJ/UVhrmuvj5J0xHNf/DrYMlGHKWi/4rqkuTlmvuIf++fR0OMl0VIP+vAZpMUZ9\nmRId44UrIK8XSODniT9ciDTvoxD//fs17O7Nc/+S7VX7smUhDe52tJYgcYP/UUE1eXuk5Ex++/rD\n2vdjL4i2Hti6iJuWfjNkuq3EH57ezoZdmdBktvi5Lp5d3+1V7zEsg1Vb+ymUDTI5jXXbhfZkOaTq\nkXeFGd9dJuVOYNv2DbFuh29CzZm+VnnnY2vZ11/w2ga8NchlS5DDLxdtpKsnx+qt/fz2iS0hs7mr\nzdz7xFZ27B9mYFhM/I/tepIvLf1GTbO6YYj+xlQ51Hd3PIPEuLXGWveiJTRZ24gJE51kkUooJOMK\nQx3LveMkXZBVIUBGmmFy2x/XMRQ0dct+pLrascf3IzqIxy3vOXmacUwTpk5bwlSKvLBR+JT/vHQn\nz6zd7yWPUWWVklXh/5QNpJiOrSWxTRXd0ti0W5hcS3p4vNpTbRhogM2oVnE/8amrKdhhbV+KldnX\nX2D7viGyeY2N+4VmO8FJbtJfEM9/zrQOkulAycfhZmxLwlIK/OB3Ivgp6Au1ikIjdCO1g+M1Sj8Z\nu5wS5tsa5OvmsVY1oRWu69/gCUFuxrPyunnYhhr2V1eQt2vilhQD07LpaEqScuSQs8fOo8mY6B3b\n0C76Kak6tiWDrXiad1e+i+37hsgVdU8rbUk0M8Y6EXO4ha58F89u3k6uqLN70DeB1xtCECrbBbbu\nzrJsXbfXx6ljBOm675rrLtm63/fne/0P3CMO8br7zh53BhMbxmNjM1QerhKgZEMoIak5j3uBeZKq\nYVsSdrEejDj67uORZBu5LksqoVK2xHc0a2In582ayrFNk9kwuJlp08XzH8yL91wkh5EYk5jAgN6L\nlMgzujXtPceGRJpPvWc2tiV5yk1bU9IXnB3yTlvtTFBO8PqWLfrfaaGkM7vzJE5sn0FnXTtHAhF5\nH4UYcgJC6pPVfiP7gCUvCJnN3UxIru9VqTRlSbVNzot2Pk5faaAqorkWatbudYSDsiHuo9NZu+ul\nAK2INh9Z8xb34i6BclG2AxOxc7/BW3Ozk2mmVm1Wl02yAZPycCk8ybhc/IctDzJQGqyZdcowLZAN\nCsmuqr7Hj3+exPTnvd+1AuLKtiBDu5T2+pSIKSiyhGwEAn1KQrovB8hINyykZA65MbBGOKC9h9Jo\n6qItJRYgb9MCbKRYmeZEI7aeQIqXqtJn7sntJ6HEmdQwAc0uhd6Vjsni2tZwC5gKyCb5ongPKrN8\n1cXS4n1QjFCZSxcJSbwbUkwjmw8kRTHFxDyrbTqqpNDPDuIxiX+7ZBbHT/K1HqtUJywA8ZJnBXH9\ntbGhiRyfOA2A/ny1sCNZCiCBWaE5O/uTagJFlogPC3J9dt/zvrAqmyT0Nuxio/C31iB/M9sqtErX\nv+1sb29OolllpjQdw+XT30NCrqO8+RQAzjhFVC5D1T2N2y6lMTPtKI0DyM3i25Ad8rzm1I+RUJKY\nvULI2Ws6Firn2zH6RzPJnOe0GXgXnb5ceubxpBMqaOJdcZMDlZx3Ttt6EqUX345VrAuR97hRTh4B\nh/iS8RjHNYtqeHkjXxWVPaV9lD+8DQNCaFUMMGOeddF2+iCpGsm44rXxrxefQjKhcsGkc0UDDb1M\n7KzHkp3+OO/8xLiwcClt+xjVmvb6m5QTzD6uQ5i9nW1j2tJV1gNVkZkxfpTTB92L1bDNGLppM731\nOD520j8TV0Yu9XsoEZH3UQg3pWFDuka6wVcIsAp+YO5yD9eXqCgyECAb+cDZygyrOjBDMzVRLEEO\ntx3KmuWQd8kh77FO3WOXICqXihkBn7duGV6gkrf8JlS9yaZsB5f4iD4G/es9gexfwSUvUqJA6rRH\nWbTjL962XLmCvKtyh9fI8mZaxI9byZ66p9haeDm0T2nuC/2u5VPXKGDbYDlZtSTFEOStSEiaX3fZ\ndLRGzfKfeX+5j8TMZUiq4S83CvrNAwFKdllMikrM94drugmqjiTbNMQbsEsppHiJTD6Q7U6y6Cn2\nMrZuNC1JQSZBa4LWuB3bkjB6JmBbigjGGhSknXeimG1TYczwOYK8Ee9lMmVWvXMtsrOkKFYmGxDS\nipYg77H1Yzix/QSM2BAtHWUkSfKIxb1HW0tCrOwJGG4msobisXTUif5nQwF8jvbs+DhtUw2Rr/ve\nJeQE8ZiCVaynM93OjuwuQd6ShSTbWIZ/vhCgrND5Vq4Fu9jgkYvSIN7rlqYYNjZJ1dHsFckjLtcq\nIyl6IChNwth/DBAonOFcI6UmUWQJa0jYyPP0e+MNYPaOp0FtAFsKPUM1bnrnx2Iylkve5QyWbVGS\nRTu2HgdkQdJObAKAGjP8sUMQn/usl+xZ7uVkd3FSu59tJnHcS6RmPYuk6khmjNaGROBaQEwnHlMo\nODEPKUX0rTMlNN5sOYuqytiyLtwWtqC5MbFjkWwFdew28vWbkRQD25KIqWIcG2PNSIkCclOvqG8e\n8Hm7z8H1a8tNfdhjnRUThloVVHskEJH3UYxaUeFBM26tJQth8hbHFsoOwcmSZ+6CEaIxAyjVIJ4l\ne5fzu81/Iu6UKHQTHlQm+we8oLr6VIzm+rgXqeznwq4OWOst9PmEqRiA7ZVelOoyJE56GjsogDj3\nIAX81K7mDWHylOurE9G4ZnMXlZp0Lf+pYfpJNoaMAye3qWV216WiiLB2J+eA5m1LgQxteYe8bf8e\n9pZ3ICkmetfxGN2TgLDmHXz+linGRFZNz/pSMjSSJz0FQL1aj1VOIUli+ZUXSZ7MY9kWY+pGe+lb\ng9HphprHLtWBkRBaqwQ9Q4Jsi6azpGr/MSQK47wJXU6UeEL/XxLTnwtZB1plx7edyntCK0DRsa40\nJxqZ2ijiMFIteeceAuRddDRvyRcw3ICidCzFqAZB3iUr8Jwd8rZ0550xYk6kcfC9EwlyknGFsm7S\nnmwjbxTIlYre+YYhuwMi/ne/rQpSsDVBCLGJGyFWoqlRXNclJUWWQBcEVrByoh+q7hEj+OlTvWVy\nAdO+KsvYWgoZGSvmm91BmHx1A5JyGjk9jNK2l8TMZ5AdQSKlJokpMmbJ1byz/HHrwxjNwuftWg2E\ni8dGaXfW5scCPnkH7rNetm8Fd738W4KYN2Yu7516iX8/ySFQdFRJCEiiLdcXrpGIyRSMIkkl4WXO\na3LqqWfKQyKHhaO5e5kiTJVYYTSSbLFOe1ospzNVYoo4/8KxFyFJoI7ewdi2tDf/ueMcU2XqnMC7\n2Litfl/N2EEVHDrUiMj7KEN/IUPylCeQW/bXXCccJCOj1gtVg7xdYrVtO+QTr6V5u/5qqC7WAHjL\nJJTGAZANr22fvG1PA3LN5om4Qkdziv6hEoZp+ZHyjoYeLIPZEyBeSRZtuZp3/NhVyE7mLjcgxtMw\nAm0EyTtoqQhOhi4KevgeNcMK+fprJWUIErxsB9s8sLAFIg7AUHJYpTS25ZyrGCTiCrIsYztadHnT\nqZ42plt+H12t08o3CpO1c76LEHk7yT0kxcS0bAzTIqsNeoFCti152rmUKLK/wmffnGyixZkw3XSu\nSBaWpHt+WDdCtzebc8bL0byNGLph0eFoS7HxIpuZXJ8N9bfDnoptg9wUWAoGDNvid0uiBVsT10qk\nxHlFowSWjLZtltBuXXOrI2C4qTjr4knGNAvhQx29E6VjlzceAGXNyXtQSiPJlne+uz+pCGIp6xYt\nSeH3HigP+uTtnO8SnEsGleZYs38Mck6YY+Vkgfp6cZ6vecvYegJsWJ9ZizJqJ5IEiuW7GWwthW0H\nnoOsE1fiKLLiLAGVSMuNSIkCx09o8oPLzBjDeY3ZTWcgqQbxY1cj1w1jJ7NOH5IidqMk+rJjaCeP\n7XrSfxCGK4CIMY5PWRsao+A35Uac10JKTXJK58zQNkm2ScpJLzmPa2lQO/YwNGoJBb1ISvXT5SbV\nBEklSaacJaZIQrM2VRJxcb5hWpT2jQ9dwzZjnvvw2JaJIj4hVqalIeG9h+51Fdm3HoTa0OO159rD\njIi8jzI8vnOZSBRw3Es10xAGyeBHf1hbtT84eWu2+LusmTy8fKfIchUMOlGq28+X/PaD5kkXwQpS\nUsqv4OOlHJRsQbrgVeJJxAR52zbc9sd1dGcdE6ZD8oWSwc/+tI7t+4ZYvlVIvHaAmAbcDGSBicI1\nobkfYFk3uOuRDdz57OPszPjpX3/1WKAkZg0ff7DYA8AdD7/MMxt2+PsDWt7zG3q4/+ltXoY1gBUv\nB7JG1bBkDBULWJbN/zz0Mt/9zUrW7u0CyRZBOs49SorQvFVZwpZ1UnIdVqbTm1SCfmQ3iMc2Yz75\nB4Uwl0D2noDpaeZim6abFAMa6JT6qb5Glyiw28ls5b5DxbzM48sdM6yreTt+U9fE6fZxy0AXS9fu\n8zVcI0ZXT47lS2JYxXqkej/gUJIgZTfz2TmfADOOlWsRVhGnbbmhnyFpPzNaj6cp0YBWEtey4jl+\nt/lPDJYzKHoDZt94QAqR992PbWbzfiG8NSTqGN/a4l03Ptl5FxzXgusxsYvChy4lnVgKR4BKqkkS\nMZmybnoWiKyWDWhsYdLxfMoBUhjVmgYkFIe8pXiJtMNHSVX0W5Yc06/zPONOXeykEqg1YMvCwuCQ\ntyXrYc0dyA6oSDGd3WN+i5zyg62GCjpnj52HbVbTQVJJEFcV8jlQibNreE9ovyuY6Hum+hslv5JY\ncL16LeKb0Xo8lxx7MZIk0ZRoqNqfjqX9zHqm6iXsKSf3M1jOkI6F6y00JxrZm9/PQMOLYi4zVS+W\n4p7Ht1AeaGFy9p3e8dZwi5dpsi6pYhtxJFVHVWTf8uhp3lLoHqxcE+WNp4KexDBtNnVl+PkD6w95\nIaeREJH3UQYvwtGSR9C8fXJdvbWvan9wOYcZINpHV3RhmFY4KrZm1R0/GKyW5h3URCXVr9Lk19AN\nrNX1yFtm1mThk3txU6+fwcohnadW7eXZ9d3c8svneXHnDsDRLBHLSTzLgeFrIp3pDq8PAC9s7OXJ\nrmdZXlgUShm5bmcgM1egb66/2fXLu9jTm+dXj/vJN4JLjH58/1r+9MyOUCYwSxJ/nzdnfDga10Hf\ncJ6+TJElq/exbscgK7YJ4cQq1vtai2z4ZnPZICY596m7+Zb9PnqWF1MNkH+15l3YO9Zr31svr1ue\nyVnfNY2J6SkhzXt3r/PsHRLq2qvRtdsJ7nK1UvcenWdh9ApNRx23lb+8sMeL5G9K1lPWTdZvz2L2\nj64al3pzFJObJqEbFtZwC5IEckoIdW5ynrdPOFvcS05MY7uNjSLeApDxScMn7yKPPt/FcKmIbcP0\n8e00pfzJ2DYVLjhtgvfeubGK9YrQqpV0nqb6uDdeKVVohZpmehaIgVLGF5Zcn7kTeCincsSOXYXa\nIVZttNbVMXl0g9NH8b6NHS15edBd8vVQIfx1NjaGfttaCjlRIjZlFSYaKZf8HfK2HdO7jS0sHDbU\nxVNccf5xjG2vJ2ZWL29SZMVZlSJR6vOjqMubT0HvOs57zu3pZibEnWWkqk5RFXPP5I4OLxVqXQ3N\n+x+mLOSCSW8T/ayxfGx0U6OnOYPvv3aRVivJ2zGdpzYiyRa2odJQkRBmzqQp3t9mpsPLQJlMqCTk\nJHJcFwKPa4Gq4bcHMHomYGU7aUzHMEyLp1ftZdm6/fRnj0x+84i8jzJ4ZGHEvIQQQYQCoCo0ye99\n8kzGjvJfZMM2mDymkUmjGiiUDQzDqliPWi0cZEv+MqNaPu9g6khJMTzy9uoDy7XIW+GMmaOZM80h\nXOe6biajYPUeOT0szLmOyTc4oU0d3eH9PaqCvLsHi6GsX36DZu2/HXPgLv1lnt+/MnxOILCndi7j\ngHnc6d+0Cc186rJpVUdqZjkkhA3oTgnIYr0n8UuqCNBRFAkUHQXXzxj0AYoJzrUE2IbqTToN9YHP\nXNHF0idLEe3bEpYsyLikGb7mbsQo66YnxMjJgle8xBUWTC0WIka3L+75AP/90X9gUuMElPpBhu0e\n9sbEWH78nbO5/v2zAbDyTVXjIluOS8AwPdLxVg441291TNXZrF1V79pSfWuEbz1wBQwDhRjzZo5B\nkiTU3aeKZUKKiT12LRPGiOuVyqKm9LX/cBYA8+c1M3/2uEAwWIJETMEGGmMueQ/6AW+u5u1o7kpr\nN2rbPuQ6IYTc8E9v83IPWI5PefrxKe+7cjVvL6eMHo7GnzVugvf3tz8+jwmdghzV9n0YUtkjb1fz\ndp+Vi3Qsxa1Xn8Ox45qIqTIzx4r2bEOl05jBW8ecDvhLSs0+EX9QrzZgDY5G6fdzPnz742+lNS2+\nydiY7QzYuzm+ZSpffN+5/MvFM4BqzbshVs/ExrAZ28qFBZJxLS2hnPZSxZyUrqHNB2FrqVBFwpnH\ntPD2U8czPjlZXG+oDdW5P1mSOG5MBzYW31/zQz/4L0DeDTFfwDH7x5KMKzTVJzBMS9R4l6Ct6dBU\nX3wlROR9lMElC3dyrUTIh1rxosdUORSJbdg6MVUmnVTRdKeggHJgzXsoQN7lGpp3KFtWgLx9zdvv\nk0fejmTtfaTudR3ydgPzpGQOuW4IK9vmE1egv2ogAdKYOmfpiUMm/dlSyKzuJVEIEHZQcLED2ZTu\nWH936B6DwVmVZnWlfTdKu798zG1TUYTJOwjbktCscMrUYVMEuNmlOo+0pFiZZFxBloVA4+ZxxlKw\nLRkppnnpJstWwIXg3INhB56pE8ErGEEiIdWhSeKZarqFZrmFMWLifdATyCjIyYDP2yFRvaSCGcM2\nFc9c6xKrazaPqTJtyRaQID9KVGhS+o9lcvNEjxRcK0pojB0TsW5YHmkpDYNI8aInILgTaX8mXPEL\nwFQC5K255F1EqsuKwCzbJ8J0cRJG9zEAPLN/GT3SJm98VUVmVLodWZJZ2buGPnmrFxOQiiW9dzet\nOMVB9Kz/jjvjbzlL+jwyQBBZc6LJS0lslcWzzpQy3mqKSh+xvP2tnNJxovd7Zsdx3t8xVWFWy6zQ\n8UmPvMU4G/smc0LsLG99eqXw3ZRwnoNkc4w1jytnvFcc5xatGWrj7JaFvHvM+wHoqCCphrgjPIze\nSVxOcOnUd4b2B8n7golv4wunX0MlyhtOp7R2nve7M91BIjYyTbkCigvTDs95drE+RN5u8NtFoy6l\n+OLbwYyhBoJZ61RxDz3FXuRkwUmyI85RFZn6eB0fPuFyJmbeAbZMQzqGqsjohk1vtkRrQ7KqENTh\nQkTebzDolsEL3atGXPJVMv3JtbbPO1A4voJ8Y6rirSEGQLZEBKVTYSlb0MKm3Rqad1+gwENNzTto\nNlcM8k4ke9Eh76CJ17CdJTfOB5WIK0h1Wc8n7loO3OVZSpvwVZt943yTslJtogY/o5N7P2U9LJgk\nqHP6GPQHB9ZDl/2JqXISDd5DZWrP+JS1xKesCbTpkLcsoVNhTrNUdFsLZR3TLFdzjnvFFKR4mURM\n8QOeLH+JkK3HQfXJW7c1Z3mM4pnNTcIJQkLEJTWiSQWQRIpUL3LdUB3BSyItNSKlh4jPWYSUzHkC\nUbkozKlWoQEplUNp24OccM+Pe/fdFHdIIZHHzHQwpnwasiT7BXMMv7b4jBZhnVBNJ5LesDxBQB29\nk+QpTwrLhy15/s7eTMl7Z+aOOhWAMaXT/HE2Y9iGitLc65fRDFilEjEFK+dr/xa+5qwqMnElzsJj\nzmNYy/F88REUZy11OuYHU6WoR5UUitKQJxDalkIqoYAR9zK9uVAlX5sDMHSFhBJnsJxlq5NCdErT\nJIKQyg28a8qF3u+JjX5lrrgqc+boed56cPDJ0hUQsFSmpWbzDqeNyY1+8hcARXIEViksCfnr6yVa\n9anETfE8O5rDxOmSN8C5o89hQsPY0H41UBP+H45d6AsLQVgqdqGJy6dcwTWzP8bcUbOrqskFMXfU\n7NDv9x1/ifftg1jn71aQA0g6wlZSjXvvnRog2/p4WJMXFj4xfm5g2+mjT0XVxfuSTsaIKRKGKQJn\nK8fkcCIi7zcYFu94nP9Z93/cv/Xhmvu9IDFbqql5B3OaV5KvLNuUrTC5xlWZdDIG2BhNO/xoVaiK\nNpcb+1jc/cfqvgQQIrMKn7c6ertXHxfAQiz18j5OtRSuUSyHydsNGDKHW3yTskNoqiJj4pN3a7KV\nhJIIpYN1NSZzqIVOyzFhBwQc19xp5ZrQd87wtlea+4Jthgs01Fiap7h542VPsDGHm/nQ8R/ANhVM\nWw/lHNesssiFbckhzTsekz1BwF0/DIARR1I16p01/yaaFyTkBqwFyRtVJyb5ZFkvi0lIbhhk7eAa\nj7xtM0beqaLVoDoR5bKN2tnlkVCx4JiF841IEsSPXYM6QQRTeZYRSQpN0ma2jQ4nKU88oFG17lnI\nl97yWT4y/QOUN84hVRJJRXTDCsUyiPHQUOwEsiRjWlaoZOqxzZP477d/i3GcGDonKIyBsxzPQTyu\nYA2OQt80h45Um3+Q5QsYFx9zPh+Y8T5/V66JZEz1a0obNu2pdqz4MI0Nzn2ZKumEeBZuJjcXHzxB\ntOUSgmHatCRb6C8OsGlwK82JJi8ILvhadaTamdQwgYXHnIcs++MXU2WSCVUkxnFwaufJ4hoBzTKm\nysyfcDafnfMJPjDjn0J9mtYqgs7M3rApOzPsv++9mZIXhNpWURmsMemblMc3d3IgjFQO2MVJ7TM4\nrmUKkiQRj/vvu7Z9JlY5yWzlHXz5LZ9leutxofPG1o/m+jmf8n7bxbqQ5u0+r2CKYzVQddHVvF2Y\nw63e30GN2nUDphNqiPzbm4+MyRwi8j6ieOz5Lrp6wqkphwoaDyz1g5y2O8kLdgyJZSuPrOjinsc3\ne8uhvCQNstCUlq3bz12PbOS3T2xhqKCFfd4V5Js3CuGkIgHNW27qJT55HWpnIA96KEDGJhYo4wjV\nAWuWbYfK5EmKEYo2V8eJ7E5mthUz60ySkuV9nHvl1aH2RE1rvw61p7kYcZ/YHD92XVL1lr5JG9/G\njq4yacXPmAR45KdvO9ExHVdq3k7U9daTwIxTeukcZDPBkBZ+ZkENasPgZm596o/c+9ctxBM1stsF\nNG+3kIax5zjmjj0RLAXN0vnLCr82s47uCCaSuE9bglhZWCVc4UMPrO/V40iK5UUoIxu+VcJdR+ya\n6yUTSbZIyP6k26AKv3HsmHX8pe8BhmNOX4yY9+wanWPASTiiashWjGLJoqUhUdNnbet+bEWQvO1S\n2iPvYKnaJI2MruskEVOxsh1YTlSxHtC8XcjJAoolnv/9T2/HtGxithCwOlLtSJJUpa25goxVrMPM\ntnGccoa3TxwrYWY7mBYkA0vxtFZJkpg35jRa4+K9NbonElMV7zovbupF1uqRFJN0Y9k737VqeX57\nQNtyMjNahb/YNWnbNqT0jnwAACAASURBVExrOZaSWaZgFJnaPLlm8Q5FVvjc3E/xrikLKrZLwrxs\nJLC1BAoqJ7YLAVRRwiQPMLlpkhfU6eLE9hOIbTsXfdf00PZgVbvebJGCM1e11CdCxzUHyLsj3Uot\n/MeZX+Rrb72h5r4g4oHnlwz8bfZOoLzqbYxPTmZUXW0BIRiBbpdTNc3mSlCgCZJ3haBuF/0I+CDJ\nu0pJXSoW2t4RkfffH/b05vj1Y5u56X+eC23/5cMb+MNT2/ijk6fc9dmokkJfpshv/rKZxc+JZTa6\nqfumV4e871y0kcdf3MOi5bt4fkNPyOddGdzh1om2yk6QkWx6Pu9ahelD/uBE0VtD7aLSbL5++wAl\no+RPtorOcN5J0lLWQRITsbb5VH8Nsmx6H+cwvdiWRGnVOZhZ5+OXA+StaiKBhy37EbxOn+rTMXRL\nx9bjFLJJfvC71aTUdCi5hr+EJ4ahy1X36Js7nWIMRh2y1iisCZIFWMSnP4fS0uMtWQHYYDzDw8/u\n8pa+ubBtKeTzdjNC2UYMWZbEGnDZ4PHnffK2EMk3Jo6qByTQ48LnHTCbG5r/2bpaaSJtihSwiu6T\ntvMcOjqcNe+uoBPQLprjzc44OlYBxe+jm9K0PuaTr6Tqjt88TqFk0NaYrE3eAW3ZM5sjfPluLedY\nYFJWHRLz/LPOulndsJDNMEkAyGaSkmbw4DIh7F7UcQVXTv8nprUI7TFoKhX37rgjSmm0jXOZnvTN\n6u77Z9swJu2n6cSWQxM7wPunXIm2YwZm/1hiqkzKuc79T29n5y4np3Z6nTjdUkgnVc49ZayXZAXC\na59PmSpMvP9w5jGcMdrv07njzwx0vur2PbQ42cckSfJIpLT2TK4c93FPuw1mF4yrI5ugAS47ay7Y\nMmec4I/DhXP9wLjeTNEjrvGdgqynTxTvUFOAvNuStcm7OdHkrYmvhfEd4t0MCl+1zOYH8oMDTJFP\nQ983GZBFeteKtoKat+dWIOxDNzPtmAP+OAQ17wWnC5fD204ZGyJvNxvckUBUVewIoViuvfZv/4CY\nLF3Tn0veiqzSE8gnPVzQ6Q/UL0a2KGtmyHReLBuUpaDmHSbkYUeDtMtpSJRANompCnXJcO7lifHj\n2aVtCpO/G6S07xhGG7Pon/DnqoC1fFlDUkyxbjemISkG/UMlLMumZJSRZJvpbZP5+GfO56tPbKef\nHpAt74PSEZnF7HLaXx8qW77ZPKb564fLKSRkSAhLREdTim5LCwWaqXZSRKzLplgj6yWmUCmXbVER\nyCHsKWMbSU2qZ1sOT7CoS6pCY0oBqoYkWSL5DAifbkX0uhtjEJfjDL94BvETlgc0b9kb/1s+IqKX\nZTuGpYhc4u4MLSkGthHnuHHNfPby2Vz/2FKkZE5MHE77Wjkwm7sJJGI6LQ0xioqF5Wb0slQScoJU\nyuCmj8zllj8+AAjTopsfqjXRChUp6t1o9JyjeadUn4ileAlUDb2QwrJt0kmVn3ziHfz48TgbSi+i\nNGRoUBspBsgqpHlrqZqatxfxK0tIkh+kqBsWsRqEI5kJT7iYPKaBK+efSl+fbyFprwim0ndNI3Hc\nS+h7BbknAqbYoJY3OqTNSSGTKMC4pk7MHuGLjqkyHQHTsV1hGscU39aHFkyjdetuFu0Sgkaw1vak\n0Q386NpzhGUFeNv4M2lPtYX93QcoV/Ctj83zAh49Td2Ih4g0SE6V91OJd501hVMmt4a01ffNn8q7\nz5rMt3+9kr39ec+d0tqQ4NZrziYVF8cGY0Nqrek+GHz5I3PRdCtErkGzubftAH5wgOPUuazrEgpR\nkLxdn/dImncwT4W2KRA3QVggPPeUsZw+YxTppMpTq/wA1WT8yFFqpHkfIVg1UpUCVaYxw3KLhMih\nYhCFkkFf0c/JLSt+SktX8ivr1oE1b6fghuf/U0zH5+1XParPT+WczvnO/mCqVD/pQn3MCc6p8Hkv\nyQo/va0lhd9WFVWSBoZLXtnIpkQ9qiLTXu8kvohp3sdZtovexGa7NZklV/O2QdWwveAmmTq5Ednx\ng7c3J0WQn+l/1JYernYkMi4JE2nZ4V3h47dJxhUM1zfsCACphIrlraUuh0zwthFnsiYKIXjJLZzx\nPnPs6SiWWKftEroiS2TKQ0hIjKpvce6gVhIVE0yVeFymPhUThUEUC1s2sJVgoJjTDyeoTZcLtLW4\n5nKfHBpiDWS1Idqakshp8fyntvqaVGstDckQZnt3kp7deiqzW+eIcUgNI8m2l9WsLqkSjym0SZPR\nd8yEgQl8aMpVBNXFUGCSLdf0eYeIXJFFwiBElbRa0buSmfDM+lPGNFV9R5WBQ9bgaN7ffg22YyUI\nam5BIvdWKQT6EkRdYAKPq3LIxxn0N4MTsJZUkSSJ9nTAOmGE1x2nEiqyJCFJEv90/Lt5+4Szqu53\nJKiKHCLaWvcUJKr4K5C3JElV7cnOto7mJLphsddZMphOxqhLxjyiDa7jrmXyPxioilxlNamteR+Y\nvINCyiuZzYPHnth+AnVqmium/2NVm3WBQlCSJHn9DL67ifiRo9SIvI8QauUZr7Xd07wlhd6MT475\nUrXm7aIuJV4iTTdDRSrCmrfN5sw28Zdjcg6bzZ3lN6UpXuIKKWg293Ihq6QSKkk1GdK8dctgW2GD\nc7AFZswj/L5Myat85Urnk5pEQJJclyURU9AtQ0RKuyZ3JxmDu9YbVRdm4YD/s0FpEfV3FZ2OppQg\n74DmrZXcQLhAZivHZFly5CK1bT9Kx26x3MPSPHJXFYlEXMEoOwJATAv5usHGGhjDhORkp9604Res\nUBNiQgsUtFBkiaw2RH28zsvFbLqme1dIkiyRWMJUfFOuQ85lCuiKIF+9GCAMxyeXNftobnL8pwGz\nbGO8kbxeYG+xy8vHPGOUr9U1JeqIyeIeZdstpOFkbnPMo+lEgg/NfC9WOemZ122nIlk66QpbNnax\nAXXvKbSkwlHESSXBuPix6LunoioyTfV+JLoLNaAdKrLkFXoQmncN8jYSnvm2crKH2r7HukCyjrBZ\n1m+/MR7O8hXsl/gd9h8Hr2NraYrPBXzRAZ93Q9zXhG0zTN6viFfBg0HNVKkIbHu1cO91pxO3U1dJ\nskqck9pnVvnjXytqEXWlUHWg/alEtQl+pIC1hng93z7nZs4c+5aqNmu9ZxAm/1cSKg4lIvI+QhiB\nu6vglsNUZYW+rK9590gb+O2m+/0DA8TqlgYtaWaIUIOat9zcy7J9ItLbKjnLpGJlYorsmM2dNddy\niua0Y/JSqoO9MEVO6sZ4AwOlQTQnA1le9zOvGb0TsE0VJSau35sp8v/Yu/P4qMqzf/yfs81MJpls\nkAAJ+yabICgo4i5Qt69WWxUXcKlaRVu1daFUpbUPuFT9Wbva1trqQ12hllddeLpp1YLWlcUVtAjI\nkkD2zHaW3x9nmXMmM5mQZCYZ5vP+h8xkZnLmJMx1rvu+7uuOWevL7eA9uXqMeVyl+/CrDx7Gqzv+\nbZ6npJ7Y/knrzKBmNUZxFy/ZhVRCoB0DyvxQDc0zbG533rKDrjkkbZ6rcLsrWJTvRWNwM3a173Iy\nd0kU4VckaBFX8PZUrsdR3xSBz96yUo45vxO/5IMkCOb6Z8mcKxdFc7cjuwMUAGiqfYFiPq+4OLGk\nx+nnbL3fiNaGmGiNnESCieYeVrOaFr0egZB1fK7gXR4wA+nKj58xHx8NYGBxYs4x4JfNoXMAJdoQ\nCLGg01TEzmz9PslsEqO5A5V5UWF/gNt75IiC0GGeWBAEnFJ9DtQvx6KqPODMwbqzMzkp8/YOm4uY\nV3YhYlumIbLpaMS3j4PSNNK5uEgOIgBQXtJx7tGdOaWbUxUEAQtGXYzohzM7HFcyRRZR2mFnP8HZ\nIcuI+Z2LG89FQYoe+r3NfUHiHjbvjeAdjWnmErqkQCUIAr459RKcMvLkbv+MVFLOb2e4oHFfdLmH\nsv0phs2TL9DSCaYY4QAS9RoAg/dByT1s/t6n9dANsxfuftd2llu/bEJMtTJcw0BdY9jJAPeXJ/aA\n1ttLrAIq8zXNDy8De/WtqI/sT/xQV/C1h5dlQYbeWG0uMSpuhqJ4h819QgClwSLo4SDEUKPzGu7M\ne2d9G0q1oYjpcTyx/jWs/2C3s7etumeY9fqJIri6pjBiVqFdsbWOcnRlrbmOdsBufNGyHau2/MU8\nUCt428PmghL3NOZwF0KVyFaLVCWC0lJ7eU7iP09Ts13oZm1VKCUqsdvaBGCH+SErVdShrug980nW\n/2NZMiuW7QsdqWq7N/OW4tjXHIEMa3jWmuMHzEzTzLytD3YljrgRQVxXUe4aQraXfCnDzKYgpSF7\nIwvZmUqwq5TbjVZExCYYutnD2mn5GPfDiPuwH19ik/oP8xQ0JqqI7S077SmX2JbEOmDAXPs/sMgM\n3pE2H9o3HO08xh42d9bhC4lhUfu47OBk/32bc9YdPwztAJuuGtcdJCVR8BSs+WQRgwODoe0fAqO9\nFOquMdBVn7MbXjDFvvbuzMo5Blfm7Q48/qQ51YmV46C3mFXlyRciboospXyvV0y5GPH35gGaL2Xm\nndziMxv8nmJAd/DufnAZ6JqKSHXBlC2ZsuxMz3FPz9gXAuky784Up/g7AwBZTrwWg/dByJ15P7Rq\nA155dyfuXvmOp9HK8sfeRn2zOTcc0+PY1xxFZWkAJQEFQsRd9GP9J7IztiIFUtUObCsyd/s5acDp\nAOBtB2oF4UvGLwIMEXpbGUR/GIZoNfiQzbaZPsmHoF+GVl8LQdQhVe72PB+agoaWKN57y/xD/vN7\nr+PXaz7Axm3m4+zgamgydMEMmvubo4gLVvC2Mm9REJ0Mz3OeUvTrdg9ZuzPvErnEeZ8lQSvwuTJv\nLe7q2Caaeyy7s57wl0M9VePun2suvZGgt1ZgQukkSKFGSFWJZXSxz6bCMIC4nZl7Mm+/uYey9f7E\nYDPaNHsLy0TWO7bYWspTuQcQVZSUJC5A7A+BgUHz8a/sfx5hcb815SGgusIOgmaTFA0xRIw2xHeM\nxfTBiTXqdvAGzPXtRpv5evaccNAvozpoBqq2Fsks7LOCS0u7+Tu3i3wG+BJroO3gbVfXjq01f85h\nYwc6owLuoFhZGoAAYGhVx9854B16lCUhkXlrZuadnDFqmp5YrpNuODPpQzlV4RLQ8QPXPUfaWYGX\nnbFVliay/AGl5haVMsy/02CK4J343XXN6CHm//3Dxg3M8MhERul+T6LYu5k3kH4IORvsn+WTRQyz\nKtyryjpvhuK+6PLMSSuJkbVU3+/KcSSTPXPeuQverDbPESczKWmAPPQTfLjTu7ymTdgLsbzOmeON\najFEYqq5jlY30KaJEACE6meiWbaWFllV1MGA7GzacP74ryKyx9zooXaIjMtPnAXdMPBKXQPW7/3M\nyXy11lKIZXUIi/sQ9I81s1PV3NtWFAV8e958/PKjTzF1qoh3/55ocFLiC6IZifWPdr/o3U1WW0+7\nGMfOOiUVMVWHjihEeCtSJ9YOxsdNiZaR5vPNDz17pACAOd/tt+daXQ1GrOB95IwifN76mfVzXWug\nnUYuGgTr/BiajOKAbA25CmZBmL9jsxnJNSw4o2w2Pmr+wNmJKbLhWHO/agDtLQJQYgV915y3JEWc\nJVRicTNaVfPnuzPvb596PJa/vBX75E8hKDFUlgWxwzpGe8574UmH4uebXI1rrO5XQwYU47yTxiIU\n9OGdLwdgXcM/UOoL4ZRDvo5h1SFcEDYb5OwT/us89fCRI3HWCWbryTsunYn9zebWh9VNZlCwh8JP\nnjEUf39nBzTdQHFAdoYd506agj98bG7KcsnJhyEkDMCU0WbWfszUIRhUUYTRNWaf7GWXzkSFK6hV\nlRfh9kuPwODK1FXInjlvSUQsrsEwDKfaPDljVDU9MSef5kP1vmuPxusbduGZl825fk+Rmiu4JQc0\nd5CXU2Tw9187B63huJN1L7t0JprazIs+ewcrO4ja2Zq7u9hti7xVzJnMnjIYA8sCGF2ToiNZkvsW\nH43G1ljSnHfXC9Y6M6DU3BfdMHIbvAM+GcsunYnykB+KJODLfe2oTXMRaHNfdCkpRlnSFay53X/t\nHLz18V488Tdzu9p0GXqqi4NcYPDOEbswTSzdB6m0Ac2tewAkrh63la6Fuyg3psUQi5vLqEQB2Cuo\nKJaLIDQMhVi1y1xcJOowAAQUGYJsZn2TB0zA3z7ZZ3bv8oedtZhavVWQ5jOvnu250jZhPwRBgCDH\nYaiJhgMTBtdC/FhE3JpntTPvimAJmvfHzbXWmuQUpe1vbzH/muxqcSdwxs0GNIpVze5aQlJRFAK8\nsdvJrGPbJiIweb35GnIMorVlpN6ayFxDivke3t3/Nt7d/7Z5pyvzdobQ5Rj8483vG9EihII+TB0z\nEOs273aCfak+BOMGV2P9f8zzJEuCk50FjUrobaUQi5s9xwgATc0wg7ccd4oI/ZIPoiA4PbvF4ia0\nqOZzy1xz3n6fhOpQGfaFzfqD8jLRXLalJ4bNq0PeavD4DrOJSHFAdrLYE8dNw4mY5nmcX5FQWQpU\nxkc79w0vH4RqK3sqtiqFAWDW4MPx7KsfQ9s/BJNHVngyQ3e2NW5AotBt+qihngsxURBwyPBEtfWI\nwR23dxw5OH3w6ThsbjhD58mZt98nQdONRJerNMOZpUGf50PeHdB8nmHljnP0smQeQ6oP9oqQ31lf\nDQChoA+hoLeRjP1+3BcCK+bcDkkUUaIcWMFa8rntTFmJH2VJ8/2pmrR0hyyJqAz5sa85mnYIOVvc\nf0/2KE9n5DRLwVIWrKW4QAPM3/PIFH/Hydw1BRw2Pwg5w+ZWT+WInmKHKxd7yZdfkcwPJ1GDIvoQ\njWuQkpYYybIASUlsU1jXGIERC6BdS6x7tVtzhvzmB669WUMM7TAMwwrePkSsHbxkUUaFvxx14X0Q\ny/dCHmAPi7u2WlQVZ/mUvduYMyft7GGsojUchVhsZuYhV+FOyrWg9rB7WzmiH1vLk5QoxFAj9EgR\nEHd1B/OluPp27fhlX0BIFXsgKHFIrYOh7jSDn7OUyPp9+EQ/Lp9yEcT95m5DdsEaYFZdq9aOSoYu\neLL7BmsBgFi6z5mXtzd+QDwAI+aHEGzGjlZzyL0maSlSib1LkRxDaYld7Z0YNncXOk0UToTeYI6q\ndDXzce+6lG7tbUD2I/blKECXMLC8KG27R3exXbHcvXW86aQqWItZ65cVyRu8Az4JqmZkHDYHktY4\np8mQUs2P2wVv7u1dD4Q9kuD+PZX5Qx365OdCb2XeQOJiLpeZd3d4Mu8U1eBdybzNx2U+X+6Lg1R/\nS9nC4J0jTsGatYGCs/uTLWlLQ7tHud9ndmkSJA2KYG4Dam9q4ARvSXSGtQNyAPWNYQhqAO1qO1Td\nvD+shiEKIoKKGbTsIdKI0WbuYiQYgKZ4CuiqigagOdYC//h3nPvawq41yZriFGm1WNXmRofMW0Wj\n8hnE4hZUxMd4AkiqYOLeYcoZQg81QJDjHdbRFrn28lWsYUnPPLp1DGKRtca8bZIzn+tklFa2LMIq\nHrP+I8qS4BS6tEfi0PbVmIFb9cFd6mq0l0JvLYNUXg+p2mxp65f8zsWa3h6C6I9g475NKJKLMCxU\n63kPIcVV+e/XneO2P2R8kmvNtpgYdTiQzOeiCeeiSC7C5AET0j4mZm0vW1bs82Qi7vXSgiDg4gnn\n4uyxp3d7HW86SoqlYnbzEZ8ieoJOQJGg6ZmHzYH0WZV7Pa6U4jF2Zt3Y0vlFdjr2h36uM9RUpG4U\nZ6VjX8wV+/v+fXXGezHoyox9nS8VS9aF2J2x8U22MHhnQTiqoqXduyuY0yXMyvTiRlJ3LtVbgOEE\nb8Xa9UtUAV1GLK5Bttbl2vPjih28NRkCBNQ1heEXzMBoN2Zpj4dRJAcgSaK5VCfuh2EIaFWb8ceP\nVgEAtP2DPB9WA4OuTRosEVenOMOqKPcrIiL2Bh3OnLcVOBUVMdnMumvh3bIwOXifNOxYs2DKZjVZ\nEcvMPa6T23C650EXTVqAG2dcA3XXqMTx6e75bxHlYiLrtT+c7WAfMMzXtocYJUl0/qO3RVRA9SH+\n3ymIb0/sya3IImCIiG01h6ztna38kh/2SgB7HXZYi2B8+egOGzKU+q3aASXmbCBiaHLK5TElUuLi\npegAMp+ja2bivuN+2GlbSltxkeL5MEquDp9dMxNzhx/f5Z/dZUnLxlQtfebt90nQNHPY3C4sTCdd\n5uS+v7Pg3dDazeCdIvPuK+5h855edOVL5q2kec+pMu/OCtbc2/Wm09MLou5i8M6C6x96Ddc/9Jrn\nPrt6Fk7w9gZ3Q9CgR4LmGlZRcTbZ8CsSSopkCJKOPfti5iYMgt061GroIgnOhhRtERXhqIZia3/h\npqg51xpWwwhamar5QWj2zd7eth0fNXyKQfIIaPW1GDwgEVA9OyxZhg8yX3dAqd8pShs62Oc0QnEy\nb6dtpwpVtIbsFe/8kXsI8VuHXdlh/99xQ8xWlfb/PfcmAYD3P+DQkhqMLR8FGCnmvGF1RLOqdodV\nlzgZROyzqYhvH4ehhrk8ys4AZVFwisbs4VmtvhbaPnP4fNSQUqdHtxEtcvrFA4Bf9jkdLY32xEhA\nqsy3LJAI3s7fhC55AtL/G30Kjhx8OAJS9pbq2PPcQyqDnkrsQTnaaMHdrEiWBOiG4azEUBTJO2yu\nmHPebRHVHJXqJCBJXVjDW2o1jXFn92NqzIu5IQO6N8ztVyQU+eU++2B3S3Vx0l2DrL+T0mJfhkf2\nrXS/d/v3ka63eTK7F7zYyd9YV9eJ97b+ffmUp+xCG8MwnA+WRPA2/9XQMXhDC0DdNQbV46LYGfkC\nsLbLnDV5IF54C04wcipXreCtSCIgxWFEfE5L1XJ/KRoANESbMArmnLddLFUe8mPP/nZz/tgXRZEc\nwHdnX463yxow3bUcZXz5mMTxqTK+MuJknHTUZLz7aT3iqo6nt5hrzysrJWyPxc1kU1NwzNQhEMsF\n/CeyCZKswfBFYBhCh0zbfXt8xRgIgoAbzp0Gnyxi9/52zJo4CDe/9ifnfert3jluWRKwaOL52NL4\neYcLjbISH5paDRiGGfyrS8px+lEjURr0YfaUwSgOKLj27EPx8z9thLprDIQae7g8kXnbRU32nuTj\nhpbhzGNGYV9TBDPGV+FnqzbA3GFcgN40EKK1I5tn2Nx1wTFj0FQkq7CaqMiDvsCGfebPOe+YyZ6i\no1NGmu1qX3rjC+e+AaW9u2/wLRdMx0dfNGDK6AGIxTV8/YQxB1Qo1VPupZR2sLH/litCfs8oix3I\nW9oT+5ink/yhe+uF0xGNe7OpMTVluPTUCc4GGwBw8hFD4fdJmDHeu/NWV10wd5wzrN/XejN4H35I\nFRbOH4+jJg/utdfMhuRs+vuLDkdTa+Iz1/130dn5GTE4hEtPnYBDhqcfteqrCzQG7yxStcSmCppm\nz3mbHxya4A3eEPREYxIkdtzy+yRnq0l7DbOdeTt7RUsCdCEOQwtiZ521UUdxJT4PA/sjDeZuZLrq\nZN5V5QEzeFsXEjXFQ1CsFOG4ad4sa3jpUAwOVmN3+15EP5yFY4+ag1DQh+Om1WD9B4lK7WDQgICw\ntbm9gLOPHY09cRn/ec8cNheUKBD3IVDi/aAt9lQrm+996hgzCE8YYfX/1v3QxXarUKxjRe+RVYfj\nyCGHdzj35SV+8z+rIQCCgepQGfyKhLlHJPp6H35IFYJ+Ge1R1cmU7Yst93CsnXkfOnoAJo9MVH+7\nq5zj28cDhoihlRVQRNmpcTDCJdAjRThhzHTPHL1znEWJC5KdrealwNETRnV4HODNEFJ1EOuJytIA\njp4yBIBZiX3aUSMyPKN3uTNve5jX3rSnqjzgyYrt77dFVFRXdF44l5xVpbsYOW5ajee2KAgd7jsQ\n44ZmnqLIld4M3pIo4sQZQzM/sI8lz0PbIympZJpKyPR30JWitmzo+zGdg5j7Cl/Tra+tYXNnj2Xz\nljlfavfztpc7WTtuOXt0W8HSZ+/HbDdOkTSn4GxHnVn1XVtqZtANkUa0W/PRRdY+t1X2jkuy+bo1\nJemvom8+4jpEP5wJI1zq3bQ+oCSGyJUwRH8EmpUZK7LobK0nyCoEXxRGzO/Zlxfo2s5DJa3mHLPe\n2DED6uxDKRS06wLMoFDiSz386QzJG4bntuyZ844797l55v00H+LbJmGYPsN6Qet+Q0R0w3E4b/xZ\nKX9+kc/nFA8CZuFdukpud/FVLqtac8Gdedvnede+xI5x7mFz9+890/RBbwaufCX1g6H7XMvlUHa6\nTaeyrfB+qzlkL7sCEsPmguBu2WmxN9+wMm87wxZE1QreiblQAAiIAc9rGFYWb2gydlrBe0SlOV+8\nP9Jo7kcNIGgFVLvNYeyzQzEiNAynjpyb9j0E5IDTKtL9HyIYkJ3g/Z+ItZtYuGPwhq/dXI8eD3To\nhdyV4F0ePgSRjXMQ+++UDt/r7EMped/idEt07Kvu5P9/dntUIJF5JweCVEU79lW49+VStwwFzGVP\n0Q+OcgJ4mb/jDlm2rhTP5Bv7nRquM2af50TmXeQ59+7fe6bCqT76XO1XCvECJpdD2U5ilmMcNu9F\numF45lI8mbfmLVhzb7cJwargtoKz0SHztrqLWXPefsneDMMM3ppgXQioMnZY2/UNG1AJn6hgQ/1m\nJxjYa4ZDRebws948ELfMPK/L7y8580bS7kh24xdFFlFkWM1gfFZns5i/Q1WwLMo4YegcDAqmn1eU\nZbFDoZqts0KT5Cvv9MHb/Dd52FwUEsHbnitLfs1Uy4DsD8p0u8h1PE4RRqzIXG5WuRdiJzsu2Mv4\netJoo78RBAGGYSRl3lbw3tcOvyIhFFSSNjFxZ96dz3l39fdwMGPwzi7nsz3HDp5PgT62e387rrjn\nn/j7267+1/HEsqrkgjVB6ph5G9awuW7vNuWLwO9zZ97mtZZTdWy9hu7KvJvaYigt9iHgk+GTzCD9\nft0mAMCospEA9OeBHQAAIABJREFUgPJQ9ypFk5tcOHtuW/RwCLIkmPv/Wpm37rOat8T9Kfv+njv+\nLBw39Oi0P7OzZRzJnbHcyoq9c8IBOXWBlz13XGQdWyITTPS/brcadSRn+ikzbyl1Jp+JfcEW19MX\nOdmvOXxQ560h84m9JMtd4S675rQHlgc6jES4f++ZMu+DbXqhOwrxHOTygqWvLqaZefeStz7aCwBY\n+ddPnPvcm444QyuiO/M2AAiJPautYFgSrwX8m6AM+wQ++WRnztvOvINyEIh3zLxrKspR7h/gVMi2\nurbpBIDRZWYR0qSRlTh99ghMH9e1Stprzz4UX9a3ej4EKkJ+nHroNLypbcWYkrF4+8P9MNpKofgT\nFfHmkjd7NzJft1oHdjZ3lep7t1wwHe9vrcfXjh8DUQT+ZT9WTP2zrz17Cl5Y/wVOn20VaLlesqqi\nCKOGhPD5LnP0IPkDIdV8qzNsbkVaWRJxwdxxad8DAFx51hSsa9iBrZFdnuHjZGcdMwqqpuOsY1IX\ntOWj75w/Df/3n+04+fBEEdSx02rQ0h6Hbhg4ekqiHuPCueMQ8MnY+mWip26m4D24MohTjhyOKaMq\nO33cwcyvSDhzzshO29MebIr8svmeh6R/zxfPH9/lTUk6M2N8FU6cXotjpw3p8WsdCAbvA7S18b+o\nC9fjqCHezQXswCYEWiGW1UPbMwLRlJm3GagF0XA2FnGG0q3g7YsOQoV/KBqKdwBSvMOcd5EcgBAX\nnNakLaq5DehXpo/F7JpEj+sLJ3wNL3z+NzRGm5znAeaQ8NeOTywDy+TwQ6pw+CEdA/3Xjp6Mq6uO\nwsdb67B+rbkft/sqNCgH0BSzh/SVbgZvMem22XMaSD1sPmFEhVOpfv5J4/Avc5dMKGLq4dXqiiAu\nPTWx/lpAYthbFARcd85UfPfnr6c8lmCKLlPJAf74aTU4cXpth8e5nXncGEzYfiZ+uWEfFhxydtrH\nBQMyFn7lkLTfz0dDBhTjklO869/H1pbh21/vuKzOXimwbXeLc1+mYXNBEHDeiWN74Ujz21ePHZ35\nQQeZTO/5pF6qmpclsU/+XzJ4H6AH3vkFAGDW4Bmebln2XHdgqtmcJdJa4Q3emrdgDYCZfetyIhu3\nhs1jcQ2KHgREQBOirszbqjZXJBTFi9BqBe9P2z+CKIiYPND7ITin5kjMqTkSb+95DwNTNFzpLe4P\nUPeSnqASRFMssZtXd7bLSw6YPlmCqplDy501TrAdWzsbr+5ch9HWlEEmiepz89+yksQUQ3Kmn7pg\nzTts3tWGVhWBciyddWPXHlzg3Bdt7o0/iAoJ//K7SdVVZ04ZAJKnWARRSxo2TypYgznsbcQDEKwm\nJPYcciSmQdB9ZvAWI4iq1lIxe523LCIoF6FNaoXgb8OeyJeYWDnes4mF2+GDDuvRe83Ep4hmP2rd\n8GTe7mpyo7uZd9J8kt8nOXPQXWn1eN74s/DVMachIHdvXbS3mYP3WFIOm9tz3vbwd+FNN2ad5ClY\n40cYFSYWrHWTmlRY1CGQGELSsLm9zjuRedubeiSGzc3gFo1rEKyGJHEjirC1TttemuWTRQSVIkCO\nQRpgNvaYOWh6z99UNwmC4HyIuueQPOuVXZttHIjkbPdAd0USBfGAAnfyum+3mKp5bqfaijIx523/\nfEbv3uYtWOvfG2QQZQuDdzfFda3zBwg6/vi3T7Hps30AXJm36FoTaFecJw2bb9vdgi92mvPcUSOM\ntri53tVu0qLIEkqUIATRgFS1A7IgY2rV5J6/qR6wP0QVxTtsbjNUxbOTU1clD5tne79cZ847xfda\n2uOe26kL1rpXbU5dx8ybiMG725Izb7ufucMKyA88/T6AdMPmKgZVFGFghbWHtWvplb0dZtQIO3tx\n25m3IotOYBT9EQwtHppoitJHjpo0CANKAzh8fLVzX1BJtAOVoXSrjaA7eI8cHMKF88b37EAzSZEo\nf+/iGZgwvByzJ3v34lZkEbMmVnsKopKHzZl4976JIyowqDKISSMrUFHau21iifIFL1u7SdW9WVgs\nufuVNY9tf3inK1i7Y9FMPP3uK3izHYAuosgvIRzVnK0129VE8LaboiiyiGI90XQk5Ov7db9nHjMK\nZyYtYXIPm/vl7q0td+/zfPslR2R9swdnnbfr1zRuaDluuXBGx8cKAq4+y+z89vQ/twBwVZsbicdQ\n7xo3tBx3XXVUXx8GUZ9i5t1NquEdNjdbV7rms63MuzJkZsTJvc0Bs1GLTxFRVGT9GgzRqdy2s+zW\nWBva4+3wiT5nWN0niyj3JdYvhtIUqvU1d8FadyrNgUTBmiSaLUaz3Xwh0XGte+Pe9pJBnfVqRJRF\nWc28V6xYgffffx+CIGDp0qWYOjWxdnPlypVYs2YNRFHElClT8P3vfz+bh9LrkofN46ruZNsAnK8H\nWMN6qea8BVmFJIoIBqzgrZutIOubIs6weVu8De1qGAEpALs1hSKLKJMSwbs0zaYbfc09593duWq7\nOMkO2tnecEBI7pd6gOSkJi3MvIkoG7KWeb/55pvYtm0bnnrqKSxfvhzLly93vtfa2opHHnkEK1eu\nxBNPPIGtW7fivffey9ahZEVyG8u4qnn7lVvBu9Rqv6m72qMaqnnNJPniePHzv6MdjQDMOe9ia39i\nSfdBgIDWeBva42FnO0/ALFgr8yeCd1mgf3ZOch9z8qYkXWVn3nZGm+3t9xLD5t2M3slLBhm7iSgL\nsvZJuG7dOsyda+5WNWbMGDQ1NaG11exzrSgKFEVBe3s7VFVFOBxGWVn6/Vb7Ql1jGI+t/djZDjJZ\nqszbvVOY0/LUCgLujUnsrFoYsAN/+XwtXtn5uvVY0Wk6IUkiipUgGqNNiGgRTxaryKI3ePeDOe9U\nZDExsNPtzFtK7K8N5K5Pc7eLxa0n9tU2gURUGLI2bF5fX4/JkxPLlyorK1FXV4eSkhL4/X5ce+21\nmDt3Lvx+P04//XSMGtV5v+aKiiBkuXeXCVVVpZ8rXrHyHWzZ3oiyUABXnNVxO8rikOJ5viCJiXXb\ngJN5y4qEqqoQJFkCYEAQAD3uAwLtHY+nrBhl1hy5IokYXl6DD+o+BQBUliSC9eDqEAJFiYA9fNAg\nVA3su3nvdOfRCNYC7wB6OIjSEn+n5zudygpzSkCWRef5AZ+E8cMruvV6mVx46kT86JE3cN68Q7r1\n+iWhAKqqQrj6nKn45aoNOHXO6C69TjbeS6HhOewdPI89l4tzmLNqc/cwZGtrKx5++GG89NJLKCkp\nwSWXXIKPPvoIEyZMSPv8hoaOwa4nqqpCqKtrSfv9fY1h69/2lI/b19CCOiVxf2tbzDNsPn54CB/s\nBMLhOOrqWtAeiSWK1TQZhi4msnPLVacfin+tM3+uKAoY5B+ED2AGb9lINKNoaQ5DiyZ+dVq72Ol7\nyabOzqMAH04oPh8vvl0HjDO6dYzhtqj1WnCe/7MbjoMgICvveVRVMX57y4kQRaFbr9/cHEZdXQtm\njhuIw7v4Opn+FikznsPewfPYc719DtNdCGRt2Ly6uhr19fXO7b1796KqytzcYuvWrRg2bBgqKyvh\n8/lwxBFHYNOmTdk6lG6xLzbSjdJ2GDbX9KRtPs3MW7NeJ2q0QvBHrBcXALXjdZMsys7wuiyJqA3V\nON9zL7tSJNFTCFWi9M9hcwCo8g0GNB/8Svf+1Ox13u4qc9GqPM+W3hqaL8StGIkoN7IWvOfMmYO1\na9cCADZv3ozq6mqUlJhBpra2Flu3bkUkYgazTZs2YeTIkdk6lG4xUqzTdY8edChYi2uA7B42N7Nq\nOxjvqFqDwNRXrRcSYcQ7NpdQRBmqtaRMEgUMK0kEb/dabrt/+IiQuctSd/t254IdfANK9wZ5ZDm3\nc91ERPkga8PmM2bMwOTJk7FgwQIIgoBly5Zh9erVCIVCmDdvHr7xjW9g0aJFkCQJ06dPxxFHHJH5\nRXPIvdRH1VX88aNVmO3aBlQ1OmbeYlFiqMQQzO87Veae1xZgtJVBLDYff+GEr+HThs9RVTQQmlYH\nwCxYqykZjONqZ0MWZcypmYUnsN45JgD47uGLu70eOVfsgjNfN1qjAole6WKWq8x7C+vUiCgXsjrn\nfdNNN3luu+e0FyxYgAULFmTzx/eIu8nGxvoP8cbut/HG7red76tJvc1jqg6xsinxfGgQBQGaYeCL\nvc3eFzdE6K3lQPUOAImtO4FEm1VZFCAKIs7vZH9nScxun+/eYGfe3a02l6zny3mSeff3iykiOjjk\nRzrTBxKZd+pGG8lz3jEtBiHYAr3NrArXoEIUBei6gR/8YZ33yboIvS310rhZE83+2ccfVpPy+/mm\nImQO6Q8o7V7v9UTm3b+D9xGHmPUcIwf3zzX3RHRwYW/zDARBgF/s2Jc7ntzbXG6EIBjQWiogBJuh\nG6qzx7Wn8xoAASLu/8ZXsOaLCMaVj/Z878hJgzC2tgyVKTZc+NkNxyY6teWJMbVluPvq2RhY1r3g\nbQ+79/fg/c2zJuO8ligGlhVlfjARUQ8xeKfhDJsLqbt6JQ+bq4K5lE2PBiHpkpN5R2Kad/03zD2m\ny0sCWDTp/JQ/e0CaQJevexdXl3c/oLl7m/dnkigycBNRznDYPI3EUjEBmqF3+H6HLUFFa9vOmB/Q\nRWiGBkkUsLehvUPmDZ2nvavsXuH9PfMmIsolRpE03FXDWlKWDXirzQ3DgC5Za7jjPhi6BNWIQxIF\nGAY6ZN727mCUmZ1550vBGhFRLjCKpJEp845riYC8e387oJidwIy4HzBEqIaayBalpODP4N1lSp7M\neRMR5RKjSBruOW/N6Dzz/vmfNkFQYgCAUn8I0BKZNwAIHQrWGIi6yqdIkCUBRT6WZxAR2Ri803A3\nadFTDZu75rwjMRWCEoUiKvjhJbNRO6AUcT0OwT67ycPmev9fn91fyJKI755/GM4/aWxfHwoRUb/B\ndCYDM/NOVbCWCOiabkDyx1DmC6G02I/yYDF2RXRIkpW+JxesaflZNd5XDhle0deHQETUrzDzTkN3\nNWlJOeftWuet6ToMKWoOmQPwS+YabdGa6xaS57w1XjMREVH3MXin47RHFVLPebuGzXUhBggGQtbu\nXgEreAv2RiVi0rC51rHpCxERUVcxeKdhrxRLV7AW01QnOzdEs9K8WDG37fRbu3wJaTNvDpsTEVH3\nMXhnIKYpWPt8dyN+9Zy5B7kmmpXmQTt4S1ZmbQftDpk3h82JiKj7GLy7INWcN0QNb31sbt9pWMG7\nWDaDtzNszsybiIiygME7A90wUg6bu7umGZJZvNZh2FxUARgQ/OGkF2XmTURE3cfgnYFupMm8reBt\nGAYMKXnY3NoRTFQhDdoGsbjZ05iFTVqIiKgnGLwzMAwj5Zy3ORRuQDcMCLKdeZu7StnD5pBUSKX7\nAQCXTlqQk+MlIqKDH4N3BuaweYrMGwAkFf/e+SaU2q0AgGDSnLchqBCKWmDEFVQFB+bkeImI6ODH\n4J2BYaRYKmavAZdUPPnpaufu5DlvTYpADIRhREKQRc5zExFR72DwzkDXOxasybDntL33FyctFYvI\n9eY3wqEO+38TERF1F4N3BobRcT9vUU/MaeuRIud+RTSXgNnD5mFxHwBAiIYwpHgQJMOH+M4xEFiv\nRkREPcDgnYFhGNCT5rwF3cysBUmFEU0Eb8GKyvawuV1ULmh++CQfZukLoe4cl/2DJiKigxqDdwYp\nC9bsJiuSCgjmBPjo8Hzn285SMYtgPV4wmHITEVHPMXin8NH+TyH4zMYqqQrWjLgVjK3gbRgCSvUa\n5/uKKENxFagJujeYExER9QSDd5KWWCt++t5v4J/2CgAz8/7vnibPY1S79kxSIQgGYAiQRG9WHfKF\nnK9FnbuIERFR72HwTtIWbwcAp6hM1XTsaWjzPMauX7MzbxgCxKQzGfKVOF+LOnuZExFR72HwThLT\nY57bcVV35rVtumadNsnsXW4Gb++pLHUFbwHmELoB7+sQERF1B4N3koga8dyOxXVAMAvWoh8dgaJw\nLbR6c37bnXlLSeu/Qkpi2Dz5e0RERD3B4J2kPSl4x1XNybz15gEI7T0aajQAABCUqBO8haQzWepP\nBG8hKXgn3yYiIjoQ7NmZJBz3bt8Zs4bNDQMABLRH44Dqg6EqEAJt1lruFAVrimvOW2SwJiKi3sPM\nO0lY6zhsLgg6YJinqj1ilprr4WIIgTAEUYNhCB0CtGepGDNtIiLqRQzeSbyZt4G4pjtD4wAQjpql\n5kakGIJgQPBFAUPskHlLouR8bX+L5WpERNQbGLyTeDJvwUAsrgGCDlmUMLqmFLo5fg4jXJx4nCFA\nTMquJw+YABgCYtsmdPgeERFRTzB4JwnHXcFbVJ05b1EQ4VcS2bQeDSYel2LYPOQrwYTGi6DtGclh\ncyIi6lUM3knCqmvYXNSdanMR3uAN3fV1ig5rAKwit8SwORERUW9g8E4Sdi0VEyTVWectQoLf5w7Y\n7lPXMfMG4AyxC4zeRETUixi8k3gzbw2abkBwhs1dp8u9Q1iKOW8gEbyd2M2KNSIi6gUM3kncTVoE\n0W5ibgZvn2vY3NATpy7VUjEAmDKyEgBw2NiBnvs5BU5ERD3BJi1JYpqrt7lkB28doiBBkdJn3qnm\nvOfOHIZDhldgWHVJh+8RERF1F4N3kqh7Y5KkzFv2BG9vIE+VeYuCgBGDQx3uJyIi6gkGbxfDMBDX\n4s7txLC5DkmQkoK3O1innvMmIiLKBs55u8R11bttp5TIvCVBhCy5h8q9WXiqYfNkrFcjIqLewODt\nYs93S4JZmCaIGiCqEATAJ/o9mbe7YC3dsHk6zNGJiKgnGLxdolbwDohW9zRRg+Azq89LlNABF6wl\nG1xpvu7omrLeOWAiIipInPN2iVvFakVSEG1aCyCp5sYjAEqVEGQxdcGakWadd7KTZtSiOCBj+riB\nGR9LRESUDoO3i5N5C2aGLEgaBMXMvEt9pZCTsm33110ZNpclEXMOHdJ7B0xERAWJw+YuMavS3O8M\nm6vOsHmZrzT9UrE07VGJiIiyIWPw3rp1ay6Oo1+IWcPmPqMIgJV5W8Pm5f4yyHLP5ryJiIh6Q8bg\n/e1vfxsXXHABVq1ahXA4nOnhec0eNldgBm9zztvMvCuKyrwFa8jc25yIiCgbMs55P//88/jkk0/w\n4osvYuHChZg4cSLOPfdcTJ06NRfHl1N2gxZBV2BoIgRJBXwRGLqIkFKMqBRJ/URD5LA5ERHlTJfm\nvMePH4/rr78eS5YswdatW7F48WJcdNFF+O9//5vlw8stO/OGIQG6DLG4GWKgHdq+wVCUpA5rbhw2\nJyKiHMqYee/cuRN/+tOf8Je//AVjx47F1VdfjWOPPRYbN27EzTffjGeeeSYXx5kT9py3oUkwNAmC\nYt6v7hwHRfL2Nvd0WwOYeRMRUc5kDN4LFy7E17/+dfzhD3/AoEGDnPunTp2aceh8xYoVeP/99yEI\nApYuXep5/K5du/Cd73wH8XgckyZNwp133tmDt9E77A5rhi4CmnlqDEOAEQtAlrztURXZvc5b5Jw3\nERHlTMZh8zVr1mDkyJFO4H7iiSfQ1tYGALj99tvTPu/NN9/Etm3b8NRTT2H58uVYvny55/t33303\nLr/8cjz77LOQJAlffvllT95Hr7CXihmqBEO39u6OKwAESJLgqTZP7rbGYXMiIsqVjMH7e9/7Hurr\n653bkUgEt9xyS8YXXrduHebOnQsAGDNmDJqamtDa2goA0HUdb7/9Nk466SQAwLJly1BTU9OtN9Cb\n7DlvXXNl3roMSTSryd0BO3nZGIfNiYgoVzIG78bGRixatMi5fdlll6G5uTnjC9fX16OiosK5XVlZ\nibq6OgDA/v37UVxcjLvuugsXXHAB7r///u4ce6+z57x1VYSzFEyTnEDtnvNOzrwZvImIKFcyznnH\n43Fs3boVY8aMAQBs2rQJ8Xg8w7M6MgzD8/WePXuwaNEi1NbW4qqrrsLLL7+ME044Ie3zKyqCkGXp\ngH9uZ6qqQp7bwqfmMUqiD7D28jZ0CQFFQlVVCEUlifddFFDgXMIYAgYOKO7weoWiUN93b+I57Dme\nw97B89hzuTiHGYP39773PSxevBgtLS3QNA2VlZW49957M75wdXW1Z7h97969qKqqAgBUVFSgpqYG\nw4cPBwDMnj0bn376aafBu6GhPePPPBBVVSHU1bV47mtpN39GuN2A4Lf28tYlSKKAuroWxOJa4sGu\nixEYApoa2+EvwOQ71XmkA8Nz2HM8h72D57HnevscprsQyDhsPm3aNKxduxbPP/881q5dixdffLFL\nmfecOXOwdu1aAMDmzZtRXV2NkpISAIAsyxg2bJizTnzz5s0YNWpUV99L1tjV5mocTuYNXXIqy93z\n3JJnqRg7rBERUe5kzLxbW1vx5z//GQ0NDQDMYfRVq1bhtdde6/R5M2bMwOTJk7FgwQIIgoBly5Zh\n9erVCIVCmDdvHpYuXYolS5bAMAyMHz/eKV7rS1E9BlmUoaqAPedtqDJ8VtB2B2jJ9bVhCDBARESU\nGxmD9w033ICamhq89tpr+MpXvoLXX38dP/jBD7r04jfddJPn9oQJE5yvR4wYgSeeeOLAjjbL4loc\nPlFBXNUhfjEdyvCPEd5+CJSqjgMU7gI1QTAYvImIKGcyDptHo1HceeedqK2txa233orHHnsML774\nYi6OLeeiWgw+yYeYqkNRy1C291hA9UNJUSgnJbdKNRi+iYgoNzIG73g8jvb2dui6joaGBpSXl2P7\n9u25OLaci2kx+CQz81ZkCbpuBmR3NzWbuymLJAuoCAVydpxERFTYMg6bn3XWWXj66adx7rnn4rTT\nTkNlZSVGjBiRi2PLuZgeQ7lYigZVQzCgQNV0AHDmvN3cwfvsY0alDPBERETZkDF42wVngLmka9++\nfZg4cWLWDyzXDMNATIvDJ/kQ13Qosoj2iAogc+bNGW8iIsqljOmiu7vaoEGDMGnSJCeYH0xUXYUB\nwwzeqg6fLELVzcxbSbEVqOgJ3kRERLmTMfOeOHEifvKTn2D69OlQFMW5f/bs2Vk9sFyLWq1RFVGB\nqhlQZBGaZs15KykK1kT3rmIM30RElDsZg/eHH34IAHjrrbec+wRBOOiCt92gRbY28VZkyZnzTpV5\ne4fN9RwcIRERkSlj8H788cdzcRx9zt4ONBG8RSd4y3IiUFeVB1DXGPEOmzPzJiKiHMoYvC+88MKU\nc9wrV67MygH1leTM2yeLUK1hc9k1RL78yqMQi2t4+p9bnftYsEZERLnUpQ5rtng8jvXr1yMYDGb1\noPqCvZe3CHN+293HXHb1MZcl0dkaVI8UQQyEUSQX5fBIiYio0GUM3rNmzfLcnjNnDq688sqsHVBf\nienmsLmExLC5rUM3Nfs5Hx+B4NCdOPb4g2v+n4iI+reMwTu5m9quXbvw+eefZ+2A+krMybzNU+Ju\nzCKLqZbGGTCixZD3TIFPUlJ8n4iIKDsyBu9LLrnE+VoQBJSUlOC6667L6kH1BSd4GzIA3ZN5y510\nTzv4VrwTEVF/lzF4/+Mf/4Cu6xCtoq14PO5Z732wiOnu4B3zbEYipxk2JyIi6gsZo9LatWuxePFi\n5/ZFF12El156KasH1RfsgjUY5ilxr+2WUgybc3UYERH1lYzB+9FHH8WPf/xj5/bvfvc7PProo1k9\nqL4Qt9Z5Q7fmvBV3wVr6wfGDsVUsERH1bxmDt2EYCIVCzu2SkpKDMmBFtCgAQLCCtzvzdq/ztjHx\nJiKivpJxznvKlCm44YYbMGvWLBiGgVdffRVTpkzJxbHllB287cxbUdzrvDnnTURE/UfG4H3bbbdh\nzZo12LBhAwRBwJlnnolTTjklF8eWU1HVCt6anXm7C9YOvpEGIiLKXxmDdzgchqIouP322wEATzzx\nBMLhMIqLi7N+cLlkZ96GZgZt91KxUNDX4fFVZQEAQG3VwXUeiIio/8s4Hnzrrbeivr7euR2JRHDL\nLbdk9aD6gp1566oZvH2yiOVXHolLTjkEIwaHOjz+lCOH44KTx+HKMybl9DiJiIgyBu/GxkYsWrTI\nuX3ZZZehubk5qwfVFyJaBIqoQNPM24osYsiAYhx/WG3KxyuyhHkzh6XMyomIiLIpY/COx+PYujWx\ng9bGjRsRj8ezelB9IaJFEZD8iKvWHt6ddFUjIiLqSxnnvL/3ve9h8eLFaGlpga7rqKiowL333puL\nY8upqBpFQPYjxuBNRET9XMYINW3aNKxduxarVq3CkiVLUF1djWuuuSYXx5ZTyZm3z9UelYiIqD/J\nmHm/9957WL16NV544QXouo4f/ehHmD9/fi6OLWd0Q0dUi8Ev+xHXmHkTEVH/ljZC/eY3v8Fpp52G\nG2+8EZWVlVi1ahWGDx+O008//aDbmMTuax6Q/IjHzYo1Bm8iIuqv0mbeDz74IMaOHYs77rgDRx11\nFICDt4931FrjHZADaGPmTURE/Vza4P3yyy/jT3/6E5YtWwZd13H22WcflFXmABCx1nj7JbNgTRBS\n7yRGRETUH6RNL6uqqnDVVVdh7dq1WLFiBb744gvs3LkTV199NV555ZVcHmPWOZm3VbDmk6WDdpSB\niIjyX5fGhmfOnIm7774br776Kk444QT8/Oc/z/Zx5VRYjQCAWbCm6hwyJyKifu2AolRJSQkWLFiA\np59+OlvH0ye8mbfG4E1ERP0aoxSA9ngYAFAkB9DcHkdx4OCqpiciooMLgzeAdtUM3qLuRzSmoao8\n0MdHRERElB6DN4D2eDsAIBoxT0dVeVFfHg4REVGnGLwBtFmZd6SNwZuIiPo/Bm8kMu+WVvM2gzcR\nEfVnDN5IzHk3NZnd1TjnTURE/RmDN4C2eDsUUUFLmxm8K0L+Pj4iIiKi9Bi8YQ6bFytBRGPmpiQ+\nhduBEhFR/8XgDXPYPCgXIRLX4FNEiGyNSkRE/VjBB2/d0BFWIwgqRYjFNfiZdRMRUT9X8ME7rEZg\nwECxHEQ6NoGoAAAYmElEQVSUwZuIiPIAg7ddad6so6E5Cr+PwZuIiPq3gg/eMc3co3zL9jYYADNv\nIiLq9wo+eMd1M3gbunkqGLyJiKi/Y/DWVfMLBm8iIsoTDN5W5g3dDNo+peBPCRER9XMFH6ni1pw3\nDPNUBFiwRkRE/RyDtzPnbWfeDN5ERNS/MXhzzpuIiPIMg7cz583gTURE+YHBW/MOmzN4ExFRf5fV\n4L1ixQqcf/75WLBgATZs2JDyMffffz8WLlyYzcPolDNsbhWsiSI3JSEiov4ta8H7zTffxLZt2/DU\nU09h+fLlWL58eYfHbNmyBf/5z3+ydQhdkrxUTNP0PjwaIiKizLIWvNetW4e5c+cCAMaMGYOmpia0\ntrZ6HnP33XfjxhtvzNYhdEksqcOapht9eThEREQZZS1419fXo6KiwrldWVmJuro65/bq1asxa9Ys\n1NbWZusQukR1qs3NzLu4SOnDoyEiIspMztUPMoxERtvY2IjVq1fj0UcfxZ49e7r0/IqKIGS5d4vJ\nqqpCkD63bugi5h85Al89aTwkznsfkKqqUF8fQt7jOew5nsPewfPYc7k4h1kL3tXV1aivr3du7927\nF1VVVQCA9evXY//+/bjooosQi8XwxRdfYMWKFVi6dGna12toaO/V46uqCqGurgXN7ebrGrqEuTNq\nsH9fa4Znkpt9Hqn7eA57juewd/A89lxvn8N0FwJZGzafM2cO1q5dCwDYvHkzqqurUVJSAgA45ZRT\n8MILL+Dpp5/Gz372M0yePLnTwJ1NqqvaXBILfuUcERHlgaxl3jNmzMDkyZOxYMECCIKAZcuWYfXq\n1QiFQpg3b162fuwBi7matMgSh8uJiKj/y+qc90033eS5PWHChA6PGTp0KB5//PFsHkannI1JdAmy\nxMybiIj6v4KPVqquWg1aBBaqERFRXij44B3T4xAMs4qdmTcREeWDgo9WcSt4CwJboxIRUX5g8NZU\nCKw0JyKiPFLwESuuxwFDYqU5ERHlDQZvPW4tEyv4U0FERHmioCOWYRiIaXFAl1lpTkREeaOgg7dq\naDBgwGCDFiIiyiMFHbxjWsz8QpcgcdiciIjyREFHLDt4G5rEYXMiIsobhR28rb7mhsaCNSIiyh8F\nHbHszFtn5k1ERHmkwIM3M28iIso/BR2xYnoi82a1ORER5YvCDt4sWCMiojxU4MHb3stb5FIxIiLK\nGwUdsRLrvGXOeRMRUd4o6IjlLBXTRQ6bExFR3ijs4O3qsMaCNSIiyhcM3gCgSdzPm4iI8kZBR6zE\nsLkERSnoU0FERHmkoCOWe9hcYcEaERHliYKOWFFnqZgEHzNvIiLKEwUdseJWhzWDmTcREeWRgo5Y\nMVfmrchS3x4MERFRFxV08I46c94iFLmgTwUREeWRgo5Yqq5CggRAgI/Bm4iI8kRBRyzVUCEKMgAw\n8yYiorxR0BErrschwpzrZvAmIqJ8UdARK66pruDNgjUiIsoPBR28VUOFYJingJk3ERHli4KOWKqu\nQrAybxasERFRvijoiBXXVQgG57yJiCi/FGzEMgzDzLw5bE5ERHmmYCOWqqvmF8y8iYgozxRsxIpr\nVvDW7cyb1eZERJQfCjd423t5W8PmLFgjIqJ8UbARy868DZ1z3kRElF8KNmLFrMwbmghBACRR6NsD\nIiIi6qKCDd6qlXnrugBFFiEIDN5ERJQfCjZ423t5G5oIRSrY00BERHmoYKOWXbCmaQJ8CivNiYgo\nfxRu8LaHzTWBmTcREeWVgo1acd0O3iIUpWBPAxER5aGCjVpxa85bUwXIYsGeBiIiykMFG7Xcw+ay\nzEpzIiLKH4UbvK2CNV0TITHzJiKiPFKwUcvpbW6IkCVm3kRElD8KN3jbvc11ETKrzYmIKI8UbNSy\nm7TAENkalYiI8krBBm9nP29dhMTMm4iI8kjBRq2Ys6uYBJmZNxER5ZGCDd5x97A5C9aIiCiPyNl8\n8RUrVuD999+HIAhYunQppk6d6nxv/fr1eOCBByCKIkaNGoXly5dDzOGSrYgaNb/QJBasERFRXsla\n1HrzzTexbds2PPXUU1i+fDmWL1/u+f4dd9yBhx56CE8++STa2trw6quvZutQUgqrEQCAocssWCMi\norySteC9bt06zJ07FwAwZswYNDU1obW11fn+6tWrMXjwYABAZWUlGhoasnUoKUXiZvCGJjPzJiKi\nvJK1qFVfX4+KigrndmVlJerq6pzbJSUlAIC9e/fi9ddfx/HHH5+tQ0kprEYhQLCqzZl5ExFR/sjq\nnLebYRgd7tu3bx+uvvpqLFu2zBPoU6moCEKWe2/f7XA8Ap/kRzsElJYEUFUV6rXXLjQ8dz3Hc9hz\nPIe9g+ex53JxDrMWvKurq1FfX+/c3rt3L6qqqpzbra2tuPLKK3HDDTfgmGOOyfh6DQ3tvXp8YTUC\nBQoAIBqNo66upVdfv1BUVYV47nqI57DneA57B89jz/X2OUx3IZC1YfM5c+Zg7dq1AIDNmzejurra\nGSoHgLvvvhuXXHIJjjvuuGwdQqci8QgU0QcALFgjIqK8krXMe8aMGZg8eTIWLFgAQRCwbNkyrF69\nGqFQCMcccwyee+45bNu2Dc8++ywA4IwzzsD555+frcPpIKxGUSGXAgAL1oiIKK9kdc77pptu8tye\nMGGC8/WmTZuy+aM7FddVqLoKQTffPoM3EVHfevnlv+OEE07u0mN/8pP7ce65C1BTU5vlo+q/CjJq\nRa0GLbv2xgBw2JyIqC/t2vUl/va3tV1+/PXXf7egAzeQw2rz/iSimcHb0MzqdS4VIyLqOw88cA8+\n/HAzHn30N9B1HV9+uRO7dn2JBx/8Be66607U1e1FOBzG5ZdfhTlzjsV1112F73znFvzzn39HW1sr\nvvhiG3bu3IFvf/u7mD17jvO6qqpi+fIfdHj+J598hPvvvweiKGDKlGm49trrU95n/5zRo8di1aqn\n0NjYiOnTD8eTT/4v2tvbcd11N+Ldd9/Gyy//HbquY/bsObj11u+ipaUFd955G9ra2lBSUoI77vgf\nXH75Rfj9759AMBjEhg3v4cknV2LFih93+5wVZPCOWsEb9rB5DtuyEhH1Z0//Ywv+89HeXn3NmROq\ncd5JY9N+/4ILFmL16qdx2WVX4pFHHoaqxvGLX/wWDQ37MWvWUTj11DOwc+cO3H77EsyZc6znuXv3\n7sF99z2E9ev/jT//eZUneLe0NKd8/oMP3oebb16KsWPH4Uc/ugO7d+9KeV86W7duwRNPrIbP58O7\n776NX/zitxBFEeeddxauvfabeOKJxzFr1myce+4CPPXUSrzzzls47rgT8dpr/8L8+afgtddewbx5\nX+nROS3I4G33NTc08+0z8yYi6j8mTpwMAAiFSvHhh5uxZs1qCIKI5uamDo+dOvUwAObyZHcXz86e\n/8UX2zB27DgAwO2335n2vnTGjh0Hn89crRQIBHDddVdBkiQ0NjaisbERn3zyEa644hoAwPnnXwQA\nqKmpxW9/+0vMn38K3n33bXzjG1cf+IlxKczgrSU2JQFYsEZEZDvvpLGdZsm5oChmD46//vUlNDc3\n4+c//y2am5txxRULOzxWkhLNu5KbgaV7fqpNsFLdJwiJxE5V1Q7Ht3v3Ljz11Er87ncrEQwGsXDh\nedZrSTAM3fNaY8eOw759+/Dhh5sxatQY+P3+zk9CBgUZtSL2piR25s2CNSKiPiOKIjRN63B/Y2Mj\nhgypgSiKeOWVfyAejx/Q66Z7/siRo7B5s7ni6a677sR///t5yvuKi4uxb5/ZbGzjxvdTvn5FRQWC\nwSA+/vgj7N69G/F4HBMnTsLbb/8HAPDcc6vw4ot/AQCcdNI8PPDAPZg375QDeh+pFGTwthlx88qH\nmTcRUd8ZMWIUPv74Izz00P2e+0844ST8+9+v4vrrr0FRURGqq6vx6KO/6fLrpnv+9dffhJ/97P/D\nNdd8A6FQKUaOHJXyvjPPPAf3338vbr75egwcWNXh9ceNG4+ioiCuueZy/P3v/4ezzjoHP/zhD3Hu\nuRdg06YNuO66q/Dvf7+G448/EQBw8snzsHfvXhx++MyenTAAgpGq6Xg/1Jvt5uJaHNf89hnojdWA\nIeKWC6ZjwojOe6tTamyn2HM8hz3Hc9g7eB57rrNz+Pzza7B79y584xvfPKDXS6Ug57wVSYHeMNi5\nzcybiIiy6Z57/gdffrkTd911X6+8XkEG72SsNiciomy69dbbevX1CjLl1HXvTAEL1oiIKJ8UZPCO\nxr1VjRw2JyKifFKQUSvWIXgz8yYiovxRkME7OfOW2B6ViIjySEFGrWjc2/mGmTcRUd96+eW/H/Bz\n3nvvHTQ07M/C0fR/hRm8Y0mZN+e8iYj6zIFuCWp7/vk1BRu8C3KpWMdhc2beRER9xb0l6PnnX4gV\nK36IlpYWaJqGG264GWPHjsP//u/v8cor/4Qoipgz51hMnDgJr776Mj7//DP8z//ci8GDzd4dfbEN\n6OWXX+VsAxqLReD3F2VlG1A3Bm+w2pyIyLZ6y1/w7t6Nvfqa06sPxTljz0j7ffeWoL///W9x5JFH\n4//9v6/i888/w09+ch8efPAXePLJ/8Vzz70ESZLw3HOrMHPmURg7djy+851bnMAN9M02oOeff6Gz\nDejixVfiZz/7VVa2AXVj8AabtBAR9RcbN25AY2MD1q59AQAQjZobSZ1wwsm44YbFmDfvFMyfn35j\nj77YBrS5uTkn24C6FWTwrgz54ZNF6IYBVTMgCgzeREQAcM7YMzrNkrNNUWTceOPNmDJlquf+m276\nHrZt+y/+8Y+/4lvf+iZ+/es/pHz+wbwNqOfYe+2V8sghwyvw1IrT8fBNJ+DXN5/Q14dDRFTQ3FuC\nTpo0Bf/618sAgM8//wxPPvm/aG1txaOP/gYjRozEZZddiVCoDO3tbSm3Ej2YtwH1nLNefbU8Iksi\nBEHgfDcRUR9zbwn69a+fj507t2Px4itwzz3/g8MOm4GSkhI0NjbgyisX4dvfvhqTJ09BaWkZDjts\nBm677VZ89tlW57X6YhvQ+++/x9kGdOHChVnbBtStILcEBbj1XW/heew5nsOe4znsHTyPPZd8Druz\nDWjy66VSkHPeRERE2dbb24C6MXgTERFlQW9vA+rGCV8iIqI8w+BNRESUZxi8iYiI8gyDNxERUZ5h\n8CYiIsozDN5ERER5hsGbiIgozzB4ExER5Zm8aY9KREREJmbeREREeYbBm4iIKM8weBMREeUZBm8i\nIqI8w+BNRESUZxi8iYiI8kxB7ue9YsUKvP/++xAEAUuXLsXUqVP7+pD6tU8++QSLFy/GpZdeiosv\nvhi7du3CLbfcAk3TUFVVhR//+Mfw+XxYs2YN/vCHP0AURZx33nk499xz+/rQ+417770Xb7/9NlRV\nxTe/+U0ceuihPIcHIBwOY8mSJdi3bx+i0SgWL16MCRMm8Bx2UyQSwRlnnIHFixdj9uzZPI8H4I03\n3sD111+PcePGAQDGjx+PK664Ivfn0Cgwb7zxhnHVVVcZhmEYW7ZsMc4777w+PqL+ra2tzbj44ouN\n2267zXj88ccNwzCMJUuWGC+88IJhGIZx//33GytXrjTa2tqM+fPnG83NzUY4HDZOP/10o6GhoS8P\nvd9Yt26dccUVVxiGYRj79+83jj/+eJ7DA/T8888bv/71rw3DMIwdO3YY8+fP5znsgQceeMA455xz\njFWrVvE8HqD169cb3/rWtzz39cU5LLhh83Xr1mHu3LkAgDFjxqCpqQmtra19fFT9l8/nw29+8xtU\nV1c7973xxhs4+eSTAQAnnngi1q1bh/fffx+HHnooQqEQAoEAZsyYgXfeeaevDrtfmTlzJn7yk58A\nAEpLSxEOh3kOD9Bpp52GK6+8EgCwa9cuDBo0iOewm7Zu3YotW7bghBNOAMD/z72hL85hwQXv+vp6\nVFRUOLcrKytRV1fXh0fUv8myjEAg4LkvHA7D5/MBAAYMGIC6ujrU19ejsrLSeQzPa4IkSQgGgwCA\nZ599FscddxzPYTctWLAAN910E5YuXcpz2E333HMPlixZ4tzmeTxwW7ZswdVXX40LLrgAr7/+ep+c\nw4Kc83Yz2B22R9KdP57Xjv72t7/h2Wefxe9+9zvMnz/fuZ/nsOuefPJJfPjhh7j55ps954fnsGue\ne+45HHbYYRg2bFjK7/M8ZjZy5Ehcd911OPXUU7F9+3YsWrQImqY538/VOSy44F1dXY36+nrn9t69\ne1FVVdWHR5R/gsEgIpEIAoEA9uzZg+rq6pTn9bDDDuvDo+xfXn31VfzqV7/Cb3/7W4RCIZ7DA7Rp\n0yYMGDAAQ4YMwcSJE6FpGoqLi3kOD9DLL7+M7du34+WXX8bu3bvh8/n4t3iABg0ahNNOOw0AMHz4\ncAwcOBAbN27M+TksuGHzOXPmYO3atQCAzZs3o7q6GiUlJX18VPnl6KOPds7h//3f/+HYY4/FtGnT\nsHHjRjQ3N6OtrQ3vvPMOjjjiiD4+0v6hpaUF9957Lx5++GGUl5cD4Dk8UG+99RZ+97vfATCnvtrb\n23kOu+HBBx/EqlWr8PTTT+Pcc8/F4sWLeR4P0Jo1a/DII48AAOrq6rBv3z6cc845OT+HBbmr2H33\n3Ye33noLgiBg2bJlmDBhQl8fUr+1adMm3HPPPdi5cydkWcagQYNw3333YcmSJYhGo6ipqcFdd90F\nRVHw0ksv4ZFHHoEgCLj44otx5pln9vXh9wtPPfUUfvrTn2LUqFHOfXfffTduu+02nsMuikQi+P73\nv49du3YhEonguuuuw5QpU3DrrbfyHHbTT3/6U9TW1uKYY47heTwAra2tuOmmm9Dc3Ix4PI7rrrsO\nEydOzPk5LMjgTURElM8KbticiIgo3zF4ExER5RkGbyIiojzD4E1ERJRnGLyJiIjyTME1aSHKN/fe\ney82btyIaDSKDz74ANOnTwcAfO1rX8NXv/rVLr3Gr3/9a4wfP97pZ53KwoUL8fvf/x6SJPXGYXvs\n2bMHn332GWbPnt3rr01UiLhUjChP7NixAxdeeCH+9a9/9fWhHLA1a9Zg69atuPHGG/v6UIgOCsy8\nifLYT3/6U+zYsQNffvklbr31VkQiEdx3333w+XyIRCJYtmwZJk+ejCVLluDwww/H7Nmzcc011+CY\nY47Bhg0b0NbWhocffhiDBg3CIYccgs2bN+OXv/wlGhsbsXv3bmzbtg1HHnkkbr/9dkSjUdx6663Y\nuXMnBg8eDEmSMGfOHM8exW1tbfjud7+L5uZmqKqKE088EWeccQYefPBBGIaB8vJyXHTRRbjzzjux\nbds2tLW14YwzzsDll1+O1atX469//SsEQcCePXswevRorFixAoqi9OEZJuqfOOdNlOd27NiBxx57\nDFOmTEFjYyN+8IMf4LHHHsOiRYvw8MMPd3j81q1bcc4552DlypWYOHEiXnzxxQ6P+eCDD/DQQw/h\n2WefxerVq9HU1IQ1a9ZAVVU888wzuOOOO/D66693eN6///1vqKqKP/7xj3jyyScRDAZRW1uLs88+\nG2eeeSYuu+wyPPbYY6iursbjjz+OZ555Bs8//zw++ugjAMDGjRv///bu2CW1MIzj+NcONQQRQi3W\nYnBsjDoSBFKNOVaEo0M4REO4HGyrKQin5ob+gDBaoiVyECEipakhWkKkQKFoiERPd5DOzYxLlysX\njvw+4+F5X97tx/PyHh7S6TSHh4eUy2VP3jKI/A/qvEU8bmJiAp/PB8DQ0BC7u7u8vb3x8vLC4OBg\nW73f78c0TQACgQBPT09tNZZlYRgGhmHg9/t5fn7m5uaG6elpAIaHh7Esq23d1NQUe3t7bGxsMDc3\nx8rKCj09rT3CxcUFDw8PXF5eAlCr1bi/v3fXf4xPnZyc5O7uzp2TLCK/KbxFPO7ztbJt22xvbzMz\nM8P5+bk7zOOzrw/Svnv28l2N4zgtQfw1lKE5y/j4+JhiscjZ2RnLy8scHR211PT19bG+vs7CwkLL\n90wmg+M4fzyXiDTp2lyki1QqFUzTpNFocHp6Sq1W69jeY2NjFItFAKrVKldXV201uVyObDaLZVnY\ntk1/fz/VahWfz0e9XgeaXf3HVb3jOOzs7Ljd//X1Na+vr7y/v1MoFBgfH+/Y+UW6iTpvkS6SSCSI\nx+MEAgFWV1exbZuDg4OO7L20tEQ2myUWizE6Oko4HG7r0IPBIKlUiv39fQzDIBKJMDIyQjgcJplM\n0tvby9raGre3t8RiMRqNBvPz8+6o1FAoxObmJqVSCdM0iUQiHTm7SLfRr2Ii8iOPj48UCgWi0SiO\n47C4uMjW1pb73/m/ymQy5PN50ul0R/YT6WbqvEXkRwYGBjg5OXHnE8/OznYsuEXk76jzFhER8Rg9\nWBMREfEYhbeIiIjHKLxFREQ8RuEtIiLiMQpvERERj1F4i4iIeMwvRph4T/csGFUAAAAASUVORK5C\nYII=\n",
+ "image/png": "iVBORw0KGgoAAAANSUhEUgAAAYwAAAEcCAYAAADUX4MJAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAIABJREFUeJzsvXeAHMWZ/v/pNGlnc5S0ymmFUE6WEAgQ2UJkGxtjsMEG\nbDD+YnNwZ3PnH+fD2GcwnDFHMBmcwETLIiMJ5YByzqvd1eY0eTr9/uie7p7dlRACHQ7z/LM73dVV\n1dXd71NvqLcE0zRNcsghhxxyyOFjIH7eHcghhxxyyOHvAznCyCGHHHLI4ZiQI4wccsghhxyOCTnC\nyCGHHHLI4ZiQI4wccsghhxyOCTnCyCGHHHLI4ZiQI4wccviUWL16NXPmzDmmsg899BC33377Ce5R\nDjmcGOQII4dPjTPPPJNx48bR2dmZdfyiiy6ipqaGhoYGAO68805qamrYvHmzU6a2tpaamhrn99VX\nX81LL73k/H7kkUeYO3cukydP5vTTT+e2224DYN68eUyePJnJkydz0kknMX78eCZNmsTkyZN57LHH\nTuTt9glBEE5I2Rxy+FuC/Hl3IId/DFRXV7NgwQKuuuoqAHbt2kUqlcoSjoIgUFRUxAMPPMATTzyR\ndbwvvPLKK7zxxhs888wzVFdX09bWxvvvvw/AX/7yF6fc1VdfzcUXX8xll112Im7tnwa6riNJ0ufd\njRz+hpHTMHL4THDRRRfxyiuvOL9feeUVLrnkkl7lLrnkEnbu3MnatWs/ts4tW7Ywe/ZsqqurASgt\nLeWKK67os+zHJSx46KGHuPXWW7n99tuZPHky8+fP58CBAzz22GPMmjWLM844g+XLlzvlm5ubuemm\nm5gxYwbnnnsuL774onMulUpx5513Mn36dObNm5elMWWu/d73vsfMmTM566yzeO655z72XgG6u7u5\n8cYbmTlzJjNmzODGG2+kqanJOd/V1cW//uu/cuqppzJjxgxuvvlm59y7777LxRdfzJQpUzjnnHNY\nunQpYGl/K1asyBqHjEmsvr6empoaXnrpJc444wyuvfZaAG699VZmz57NtGnTuPrqq9mzZ0/Wvd97\n772ceeaZTJ06lauuuopUKsUNN9zACy+8kHU/8+fP57333jume8/h7wM5wsjhM8GECROIxWLs27cP\nwzB48803mT9/fi9BHggEuPHGG7n//vuPqc5XX32VJ554gi1btmAYxqfq46JFi7jkkktYu3YtY8aM\n4brrrsM0TT788EO+853vcNdddzllb7vtNvr378/SpUt58MEHuf/++1m5ciUAv/71r6mrq+O9997j\niSee4NVXX3WuM02TG2+8kTFjxrB06VKefvppnn32WZYtW/ax/TMMg8suu4zFixfzwQcfEAgEuPvu\nu53zt99+O6lUioULF7J8+XJHwG/atIk777yTO+64g3Xr1vH8888zYMCAI7bTU6Nbu3YtCxcudLS+\nOXPm8M4777B8+XJOOukkfvjDHzpl7733XrZt28Yf//hH1qxZw+23344oilx88cW89tprTrkdO3bQ\n3Nx8zL6dHP4+kCOMHD4zXHTRRbz66qssW7aMYcOGUVFR0We5L33pSxw+fJgPP/zwqPXNnz+fu+66\ni2XLlnH11Vcza9asT+WfmDp1KrNmzUIURc477zw6Ojr49re/jSRJXHDBBTQ0NBCNRjl8+DDr16/n\nhz/8IYqiUFNTwxVXXOEIxDfffJObbrqJ/Px8Kisrufrqq502Nm3aRGdnJzfddBOSJFFdXc0VV1zB\nggULPrZ/RUVFnH322fh8PkKhEDfccIOjiTU3N7N06VLuvvtuwuEwkiQxdepUAF566SUuv/xyZs6c\nCUBFRQVDhw49pjERBIFbbrmFQCCAz+cD4NJLLyUYDKIoCt/97nfZsWMH0WgU0zR5+eWX+fGPf0x5\neTmCIDBx4kQURWHu3LkcPHiQ2tpaAF577TUuuOACZDln9f5HQu5p5vCZYf78+Xzta1+jrq6Oiy66\n6IjlfD4f3/nOd3jwwQe57777jlrnvHnzmDdvHrqu8+677/KDH/yAsWPHcsopp3zi/pWWljr/BwIB\niouLndl2IBDANE1isRgtLS0UFhYSDAad8v3792fr1q2AJbyrqqqyzmXQ0NBAU1MT06dPByyNwzAM\npk2b9rH9SyaT3HPPPSxdupTu7m5M0yQej2OaJo2NjRQWFhIOh3td19jY+Klm8t57MQyD+++/n7fe\neouOjg4EQUAQBDo6Okin06TTaQYOHNirDp/Px/nnn8/rr7/Od7/7XRYsWMCvf/3r4+5TDn+byGkY\nOXxm6N+/PwMGDGDJkiWcc845Ry176aWXEolEeOedd46pbkmSOPfccxk9ejS7d+/+LLp7RFRUVNDV\n1UU8HneOHT582NGYysvLOXz4sHMuEwUG0K9fP6qrq1m9ejWrV69mzZo1rFu3jkceeeRj233yySc5\ncOAAL730EmvXrnV8AqZp0q9fP7q6uohGo72uq6qq4tChQ33WGQqFSCaTzu+WlpZeZbwmqjfeeIMP\nPviAZ555hrVr1/L+++87ZsXi4mL8fr+jRfTExRdfzOuvv86KFSsIBoNMmDDhY+85h78v5Agjh88U\n99xzD8888wyBQOCo5SRJ4uabb+bxxx8/YplXXnmFxYsXE4vFME2TxYsXs3fvXsaPH/9ZdzsLVVVV\nTJo0ifvvv590Os2OHTt46aWXmD9/PgDnn38+jz76KN3d3TQ2NvL88887144fP55wOMzjjz9OKpVC\n13V2797dyzHeF2KxGIFAgHA4TGdnZ9YMvby8nNNOO42f/OQndHd3o2maY666/PLLefnll1m5ciWm\nadLU1MS+ffsAqKmpYcGCBWiaxubNm3nrrbey2uzpY4rFYvh8PgoKCojH49x3330OoQiCwKWXXsq9\n995Lc3MzhmGwYcMGVFUFYOLEiQiCwL333ntUDTOHv1/kCCOHTw3vDHXgwIGMHTu2z3M9MW/ePCoq\nKnqF3mYQDod55JFHOPPMM5k2bRr33XcfP/nJT5g8efIR2/808NZz3333UVdXx6mnnsr3vvc9br31\nVsdHcPPNN9O/f3/mzp3L9ddfz8UXX+xcJ4oijzzyCDt27GDu3LnMmjWLu+66q0/NoCeuueYaEokE\nM2bM4Morr+xlZvrFL36BLMucf/75nHLKKTz77LOARVL33HMP99xzD1OmTOHrX/+6owHdeuut1NbW\nMn36dH7zm99w4YUXHvGewdIS+vXrx2mnnca8efOYNGlS1vk77riDUaNGcfnllzNjxgzuu+++LNK5\n+OKL2b17t0OuOfxjQTiRGyj927/9G4sWLaK0tJQ33nijzzI//elPWbJkCcFgkHvvvZcxY8acqO7k\nkEMOJxivvvoqL774Yq8Q2xz+MXBCNYxLL700a4FWTyxevJja2lrefvtt7r77bv7jP/7jRHYnhxxy\nOIFIJBL8/ve/58tf/vLn3ZUcThBOKGFMnTqVgoKCI55/7733HHV+woQJRCIRWltbT2SXcsghhxOA\npUuXMmvWLMrLy5k3b97n3Z0cThA+17DanuGJlZWVNDU1UVZW9jn2KocccvikmD17NuvXr/+8u5HD\nCcbn6vTuy32SS8yWQw455PC3ic9Vw6isrKSxsdH53djYeMTVwV6Yppkjlj6w+1AHtz2whLOmDeLW\nKycdsdyFP7BWLL/+y/m5cQSu+veFdMfSnDdzCN+9vO+1A5kxe+JHZ1NREvq/7N5x48ofLSAYUHjq\nrqOvifm/RmYsM3juJ+dRlO//VHXe98I6Fn1UR2VJiN/+6Ow+2/vpjbOYMLL8U7Xz94LMPT/6r3N5\n8vWtrNrayOCqfB66/cxPVe8JJ4yjBWHNnTuXF154gQsuuIANGzZQUFBwTOYoQRBoaYl8lt38u0V5\neb4zFoebrL+xeOqYxudwYzeK/I8TWe0di08Cv2KNQWdX4mOv37anBWFoyXH17/8S5eX5pFQD01T/\n5r+Vg3UdqGV5n6qOZMpaC2IYxhHvt+sYnu8/Glpbo6TTGgC6/ulyscEJJowf/OAHrFq1is7OTk4/\n/XRuueUWVFVFEAS+/OUvM2fOHBYvXszZZ59NMBjkZz/72Ynszj88kpkXwzi2SGlVM/6hCON4EfBZ\nn0EipX1s2cb2OGP/DgjDNE003cAwzL95jTyaUP9vGjpxKwj+ZqHrn+09n1DC+Lg8QQD//u//fiK7\n8HeFV5bsQ5FF5s0aclzXJ9M6AFqPl0TTDR59fSunnNyPiSNdDU7tMeMwTJPH39jG5FHlTKv5eNPg\nkbB6exMbdrdy/YUnIX4Ggmr7wQ4+3NjANy4Y84kJ7u3Vtai6wRdnDgFg4cqDxFMal80Z7pQJ+qw9\nIHYe6uRXf9rI9fPGkB/y9VlfY1u8z+PHCtM0eXrhDkZWFzF7fL9jvm7jnlbW7mzmG+ePQRSPPqYv\nLdpLSZGVB8swTVTNwKdIJFIaD7+yme64ypTR5Zw7bRCPvLaFc6cPomZwcVYde+q7+MvyA5w7fRDv\nravj+nljHGI9XpimydKGlQihbsy4Gz0ZswnDNE1eeGcXowYWMX1MZda1ndEUzyzcwVfPHkV5UZBn\n39zBkH4FnDahP0fC+l0trNja2Ov42wc/YEndCn404zaCspWRIJnWeGLBdubNHMLgqnyn7CtL9iHL\nIudOG8hv/7KNc6YPQhDg7dWH+OYXxyAKAk/+dTtnTBrAqIFFR+zLB+vr2X6gHd0wue6LJxHwSTy5\ncAszavpRVhTkpUV7+cYFYwgHlaOOoWGaPPfWTsYMLu41Ru3dSZ57aydfO2e0c+wXv19Pfsiq87Og\njlzywb8hvLH8AMBxE0bKJoyequfO2k7W7Wxh3c4WnrzTtWFqWna5uuYoq7Y1sWpbE9PuPH5b5yOv\nWUn65s8eStVnYO//799b0TcnDyth1snHLmQB/vC+tZdDhjBeXLQXIIswAn7rM0imdTbva2PBioNc\nOXdkVj2CYE1Qu2Kp47qHDJJpnQ83HebDTYc/EWE8+NImAM6ZNoiBFb0TEHrx15UHs9tUdXyKxKHm\nKFsPdADQHUtTkh9g4942Nu5ty3ovAH7+wkfohsmmvW0ArNhSzBmTq4+5v31hS9t2/rDzFQKjAyTW\nn+4cz2gY0YTK+x/V8/5H9VnCcEf7bl5eu5Y9eytpj2zmrmumsmhDA2xoOCJh7O7Yy0MLt2AmXOGv\n2xrGa3sXAlAfPcyIIiur73vr6li3s4Wt+9t5+DZ3hX3mmxxVXcjanS0E/DLLNh3GBEYMKCQUkFm9\nby8fRRfxmytvQBazReob+94iko7y7lsuIb+5+iCD+iusDz7HmlWDKY9PpaE1RuHivXz9vBqOhuaO\nBIs3NLB4Q0MvwvjtX7axo7YT5QN3/5JoQiWaUBH8cUw+/beYs0f8A8HRMHqYpOJHMLX01DAy139W\n+KyTCLR19y2sNUPrs62jte89F/Rnf+R9jYMsiUc890lgfMoxMY7R3OhFMq2zs30Pf2n4MwhW/6MJ\nFekomkrGrCnkdYKkEgpkz3y70xHePvgBunHs47Hy8DrrHymddTyaUDFMgx1te+hrHvzrDY9TL68H\nOUVje5y0mmnT4L9W3c/Le/6SVd4wDR5Y/yiBccuy6lM1g7Tumr/SejrrnFjYgjFkFRtatvTqQyRu\nXdfUHndqTKQ1UqqO/+RlyFUH2dDcO1/YmwfeY1nDqqx+mCbsi1i5vuSqg6iabtenO/3f3bG3z/e3\nuaNvDVc3dGqL30Cu3oUiZYt1sbCFwIQlJPx1fV77SZAjjL8RaJ+BQyp5BOdW0kMY3pdQ7aFhpNTP\nmjA+m3qCfstk1BntTRgN0UZuXfRvvFu7uNe5tOreX8/xTXvuvafZrOe4eMt8WsI4VptyR7KTrW07\nevUn84w/CZIpjf/Z8Bh7YzsR861913XDxMRECHXjG7GepfUre10nFjcSGLsSZeBOeloWf7n2N7y2\ndyEfNW/qdd2KhjV9Po+D3XZG3VS2hhRNqLy463We3v00UqmV+TcSt4R5d9p1Uguyagl9ezzEgg4a\nYo28V7skS1uOqwn3HvLbnf9VzeBQpN75HVOzha/cfy9SUSsv7sqO4gKIJlUEX5zD7VGHaFXNQNMM\nBNF+pj0GyUtOSO5zkyWRjnRH1m9w39E/7XqNB9Y/yrrmjb364TWJxpJu/S2JNkx/DKX/Pgrzss2p\nUqG1GDoldvWq75MiRxh/IzheQaRqOnsbupw6hGCEpsKlJDUrpfXuuk7W7Gy2CitJNh5yzRVb9reh\n6Qa76zqtvReSfQuj7liaw22xY+pPbVME5DRCsBvdMDnUHKUj0lvQ17VE+WhXizO7OhqKwn7kfnvZ\nl9rE4bYYLZ2uQFh52MrY+vq+N3td53Wm9iTDlGe89R47+Xn71BlN0dQedz7mhJo9O/44dMfTNLS6\nY9fXxKC2KdKLDH+8/B4e3vgknamurPtNqTrN8RY6U9kf//aWvexv7OxTgzkcdVOaC7Lb/2hCQ648\niFTSxO93vuwcb+uy3h2l2kojLxa0O+S7pXU7d3z4/9GWtASxZljvzKHmKA2tMZYcWM/zO17klT0L\n2LinlbSqs35XC3/e9i4dKYusDDF7DOtaoyypt7bHFQKWQFy5rQnTNNnZ7ppXBMW6LqNhSCWufyLz\n/bR0Jlm5y33HMwQJ1thn+g0QVd3nEjHakeyykXSUVdsbWbuj2Tm/uHYVgYlLSBbudrSvnYc62RV3\ntZGE5qaRB9hS56ac9457Q2uMpqSVHNLURYcwDhyO0NAa48N6a0vdPa3ZGoGmG/z10EL8J60ATJra\nrfdibeP6LHI50Nht/aOk8I34CKnEakslu3/Hg5wP428ExzNzBPjv59exYvNhbr9yIsm0hlxeRzJY\nx57O/QwMDONnz3/klA1OWsTjexYB5wHw4gd7WbDccgJ/4/yarFl3Bg3RRn76wROk9kzgkVvOR5Gl\nI/ZFNwx+8tQafKM2IxW1cChaw2//ZM0Ye9rI//2J1QB8/bzRnD7xyNuJAsgSyAP20KqH+NHjpVn1\ntdtCqMTf2+HoJYxkSifPY1ZJpjUK7JmYN6pMHrCLneHFrG5UmV41mcde30pDWxzdMJEqD9A+eAeH\nIoMZmJ9tO09oSVJ6iiJ/YdbxH/5mGZpu8ugPT0eRxSzCeLd2MZXBCn711OGse1IN913oSnXT2elu\n5BRPqfxy3aP0z6vi+5NvBGB72y4e2vxbzHSA6092d//LoDHu7gsu+JKOP+ZQpA653J1xx9UEISXI\n7f+7HDAQApZANVNB0jaJvrxnQZagTWgJuqIp7n56Dbphogzajmwnb3jwlbV8oWYgK3cdJDhpkdsH\nOTsqavPBRoKZva1MS3j+/t3dHPQvYX2LZ5ZtC92uWBowkIotwhBMKWvC9YclW/DXZNpyiVjVDKIe\noo157mOT9h7YCoJu6jz6149AzaToN2kOWxqYWNgKh4cBsC+6C7/hrm6PezSWw20xHv7ravwZ/7Os\ngt2VNTua8U9oQrSXnkiS1XBbd5IfP7GcoL3X1vvrGri8xnAIZdX2w6jFe61ZvpKmuSNOVbnCU9t+\nnzWeO2o7AAGl/16kEpf0eo778SCnYXzO2Nd1gOe2/4l46thnrpqh8dy2P7Gv6wArNlvCpqUraWkY\n9kee0JJEssIVvTNPj/qe0kDUeLnlCbZE1/Zq65FNTyHkdSH320c0cXRSy3y0UpE1o13ZvKJXmc5U\nF49vfg4hYKX7jnn6uKN9Ny9sf7GXXTxhRhBEE1Po3X5H0iKM4kBvwuiMuTPzbEI2Wd+yyTGneM1E\nctVBNCnBxpatmKbJwaYI3bE0CAa+wZaJ6EB3tlO5NdHGv3z4E/5r1f29+q77u5BKGhytRbXbEnwJ\nXtmzgEc2P9Wr33s79zv/d6W6s3xQjYkmYmqcxpgrCFoSrXadSZ7Y9Thi2DV3AFnaiOBLUhS2JNVH\nvJJVzjv7FgJxx8IiyKqjYfQPV2VdE1XjtEdSrs/D7wpNQVGpa445moFzXNJBcN9B7+wbyX0fssgC\nMIwonQdWEImriMXNCIodXSXoJNLudVnt2f/Xr36SSDRKV6o7q+8ZJOjEiBWgHh5i1eFzZ+OZb6on\npEIrIEA9NArINnF1x9IIfvf9y+qToCP4rHOCZCDJ7jsj5nVnXdNqa3sHuw+xtMPdbEzwWd93T7Oa\n1THrfTH17MndsEFH36PmWJAjjM8Z9617mJWH17KtfYdz7OOcxR81b2Jl41ruW/ewcywvoPQgjESW\nDd/7ASBlCzUx3ElajLBLX96rrbakJXxM1Z8l3PtCMmU77aJWyGRnuqNXmUc2PsWGls3I/Syh6J3d\n/3rD4yw/vIZaj50ZIClYH5Ep9m6/w+6fX8peKdyZ6uKJgw+gDLYitrwzUKmsntfrXubJLVYKbs0x\nSZkgWuW60910x9Ik7HvyCkJnKmpjY8tWDNMgriWIqO6+F2lVJ3DycnwjNtEcs4SLrhuI4Q4CE10b\nf0Z4uPW5Zo7udMTtu6RSn7DGLaJGHRu5LGY7pMViV6MQi5pZ3f2+p62ko1n1RGui3Xaqm/hGeyYP\nctrRMCQhW2TE1FiWJpcxKYFFBLGkmkUIercdLeSx6XuFqSBpDKrsOwrM0KN0HlxBJJ7GN3gbpiFg\nJKwFfwnNHXdBSXn+t+oeMP2byEowizAyGoZmaJiCjqkpmGlLqGa+F8GXIDB+qVufl0jsdvRua11O\nTHPvfWf3dnxDtmeNhXeMvO4OFY/PJeya0JDTNLZbdf732oeoVbdl9SMaV4lrvQnDuX+jhzVA+vQa\nRs4k9TeCtMdubpgm0lHWL/S0lYJlDoqnUwglCadMWnDr9M6SBEnD1F0hk0UmR4IufewCq2TGTyBb\nwiClZ9vldUPnUNQyUZmqJeD7cgKrRnY7qhSxZjaiYc1MbbOFZmh02U5Rb8QLQF2kAR0NufIQWtNg\nkmndIWKxwBLeJiYJLen2QUk5H3JXqtv5WCF7jBJq9nh5TRGdqS7HLNXs8T3s7NjDkNIqVN1ALGrO\nul4saEdvtcxyHzVvYkm9q5l1pSOItnYUnPIe2zy32ZnqpCJUTlLLJhzRQ27+UR9lnUNJUZTn46BH\ny0zvOxnfsC00x1qJhVWEvG5Ev0cwejSMuN3WndO+z71rHiCmxok6zl0zezYuq0SjKkKBdX6wOpO9\nyUNQ0IEgq5iaDyGvC/+Y1VltDanKp7ap94ZTzWtXoMbbePBn38McahKuGknzijUopSbJg1sYdOoP\nqV/zDIZQj2molM0cSPFJFqHse+9nbJ/4bbbVr2bfcxvJG1TE/ob1bB24ih/ffbfVgC5jpizzX6xp\nOy2rX8IUUygb0wy+fCxSyI9pxmnc+CeSnXWIqxNUnjEIfypE9+42/vT4U7znf5WioiLU+WEaP9iP\n5JMoP2UQgpzmwOL7GTD9GyhVm9jx65WEhxQTr+vGd9ZYmjavJdl5COQIReNLqDpjKIKSYv2GjTx6\n3wvsbtmHKIsMu2Yi+5/fSMXkcqLJycRUld2/XUf1haMJ2kQrKGnMZLYJSjDFLBPc8SJHGJ8Cj72+\nleqKMBd8YfDHll27o5n31tXx/740AZ/S2w+wfHMTYAmaX/xuPd+/YgKvr9iNIMCX5libSv158V40\n3aB0RG/BrWoGcbPLEXhr9zTQb6ibF0kIel4WMVvD8M4KAXx2qoy31rqmEUSjF2F8uLGBrQfa+fb8\nsTzxl+2I9uRTsGePqulKt39/YhVCqAsyoeO2SSKjYUTSroCIqjF+984u8kMKF8wcjOGLuqqwpIHm\n44/v70aVXVNLysgmjPakO1MT/AmSac1pSwy5kTc/enoRna3WjNtLCm2JLnYeyq7D6V86gW4YPPDi\nJmaMqaRRd/vRmewCe01aoydQYE/3Ps5ltkVOerZGkJkRNnXEeXbVe5AHp/afyYcNK+hOdRNIZZtw\nMuhIdlERKidhE7PW2h9faTOGp6+mJiPYBB4S84kF4hT4fM4MX2+vwIhZ791bm7YxavoUZzast1eC\npCEVtpHUUqRUnX2NbQiKyLLVUZIb57DCFFlt2DNpSSO1cY6nbQXDMKCznOSGOew2/OjmaLSG4Zj2\n7NdMB0husK6RShoRC1sZXFUAG9090zOonD2BZFOCC274Aet5lc51PhLNbQz68jTMhjkYUeh/6lxC\n47dgqDq7H11HwUjL1yQIAjvTFjGl2xMMu2ISgeIyzPc6efbFV2EwmLqMmbSEbv6IPIK+m5FKGoh0\nvUnj2+30O30cTWtWIPoGMmTObfgnLEZPpkltTlP3wQ5GXDeFSmEW3z3lbH665efW/dn3KfhS9t8E\nUkE7qbYEA848jeoL06jpA5TVnIevsgNl2Eb2PrmZxKgkgao2nv3lvdzz01/wh8hLpFIJ9JbhlExp\noWvPFnYWnsaqvRswdYNgZRhTlxAkHd+odSTXneU8Y/XQSIoGtmWZ4I4XOcI4TpimycptTbCt6ZgI\n4+FXLTPDtgMdWautM2iLxsgQxu66LhZtqGeJ8RSmIfIl7sU0TRassGznlw/pHVmk6gYJwRVcta0d\nxPpZAr60wE+XR033mgPAQLRnhULm5bZZ509LNxPIbJ8t6r0I46mFlhlt3qwh2StqbdVX8xBGXUsM\nqbKOjDFEsEkrE6FUH3UFRDQd4911ll3+zMnV2dqRqGHi463Vh5BKDuMbYR1P6T0JwzWHCZJGMq1b\nhCEYCEGXnLrT3UAZYKIM3OVWIBi8unwXYAn3LA0jneJwa5yt+9vZur+dorFNYKdC6vD4Cxq6XZ9A\nl+2c13TDGZ9Qqpq4v85x5m7Z105STyIBs8pO48OGFXSlIwiq3uOZ2fdo15mJiNOaBpNXHEf3R7F8\nVgJmOoAgW/dbKJYTV/bhl9IOqZu6jJkIY2oKCV8zb66qdWamelcpYoF1D3E1wZZ97cTVJAISopKZ\nHRhoug6IzkTE1BS7jky4qfXXMHCseYKoW85eUUQ3PSHO4U5KigVkScA0rUhVpWkcauVmRJsIu9Jd\n4ANT9REor8BXFCDVbOeSalzHzqUbQVNIR1Ko3d3IFbVWqpRoAUIgSaikADlcjq5ECBcNYMee/UiD\nsTSMZAgVe2coAAAgAElEQVQjGUIz66lb9Ti61g5SAtlfweDScvbu66BqwjzARPSlkM0COjtqCQ8p\nwlcUoN3YwMrdE62bMUFrGoSpg1RxyHr37O/QVxBEjp2BEVuBmNdN5PA6utd/CIKK2mmS6ugCOQ+l\nSCQiFqGhQrICvWUoRSfvo+n9j6gfGKH90DpKJvWjguHUbqsiMG4ZgqRbwQ32M9ZaqvEPitOuu+/j\n8SLnwzhO9Ey/cawwj7RAX8wWCM2dthAXrY/Jigyx0BrrnUBN1QxU0bXPIqnUtViC4sJThhIMecxT\nGR+GqBOY/D5SiW3zNi3C0DQDwzSz7bWSSxh7Ovez6NAypLI6lOEbaenwmEQE3YlL1/EIDUCyTUHW\nj8yqdOt8c9wN/Yx6/AD3b3jIcS5a11njJPgS+Ea4TtGeJikvYZAhDN1AGbTD0sJMwa7Hnvn540i2\ncDRVW+OwP24hrwtlwF6nuriazHqKpmchmtdG3p5w+xDXrdmdphvOhyx1DrTqtwV0bVPENhdKiIYP\nRZTpSnWRTGsIHvtzRdCacOzptBZ/ZQgDTYZ0yNIoMuYIu63/N/kmCgQr3Uta6XDMhugKgyrzMbqL\nEf0JyyeUaUtXQLPGIqHHiSbSlilJV/jSmSMITFxMYOJifKOsSKHAycvwjV6LGIoQmLgYuf9+EHQC\n45ZaPht/FLGgzbnu9usHMe2sJue3MmgngqTzSsPvqShREATQO8vpPjgAvbMcMa8bQdSJaDYpaz5E\nyeqff9RHjCnvJNVcx8hvT2XIWTcQrCjC0HR8Q2zbv01oAbEAIxFGEE1SZoqUaj3nYZWlgIDRVUb9\nwu0U10xi+BWXUX3haEzN4LSRYyw3V7iLqgofCAZjq/sxwJM4URAN3k09ab0XyUICYhCtfgSCaODz\nPBdBCFptRYtIdyTorHuHEdePY+T1c8grr0HrtFaoC5LGa13/a9Wn+UD1kxcIkz+8hGjzRiJ12yke\nV4nZWo2ZyEdtsFauC/6EO8nQFQrkEnxS376rT4IcYRwn+lrcdSS0JToITPwAsbAl67jXuS30cEQf\naM+2cze1xwETqayO2m7LKWwa7uPTdANN8URYSBqHbDuwTxadWSzgkJPgSzjmCsg4lU10w6S1K5nt\n2/BoGL/66H95cfdr+IZtQS49TG17b4FuNYBr/pJTiIWtGCnbqZjRMOwxaEm4dURSrkbRmMg2TWSE\nrVdLKPIX9iKMTMy/dY1KMq2hGabjP0jvH2t1K2Bf57H3aq22GcMmE9leTJZBQku6z05Oo/pcYsi0\na5om27QPneMZ56Smm05biYjlx8kQxsHGiDV+ukxKMygJFNOe7LSc3vZzUvQwd07/PmXBUtY1bSCu\nJhyflqnL6HEr/YMYtLQMQUkR0ssYUTSUAsEimqjQ7BCQqcvkh3yOWapN2YNUbI2RqfkcX1Pc7Ka2\nvc0aE122khp2WvZFQUkC1n2Zmg9Ts7UyOY3cf58zjqbmw+ioRK21Yk3rY41Zi+wyaEk24Q+a9jWW\nEURrGYDokzH0JHHDevam6sfUPUYSqRMpKCPKIsm2CPGG7KALQdIsTSqlOSlDkkYcVbfGNuy3828l\n8zBSOr5CGUFWad/QCKbA2LIa8keU0LlvFSm/pVH7NR9SuJrI7iRp22el2d+JEixGSDRhJMLEGyJE\nOzsQ82yys18fMxVET+mIPgnRL5FuDBJr2YnePJBgXn/UaJp4g/Vd63ER0zQZVjCUkmkVtGx/hdCA\nAqSgQn2T6tQHOBqGaYhgikzNP527Z97Za6w/KXImqeNEz7QaALXddeyvTzBr5HAUWcQwTdbvamWX\nsRTBl8I3YgO6foZTPsuM4o0aCUY4rLaR8XRs3NtMR0RFzG/HN2wLhzPWJU8UxKY9bZgFUTAES1BL\nGrV1liaiyFLWLNgRut64bDUAStIS8IZMXXO0F2HEEmqfC+0aOj3CWe5hOpE0MGTE/A4E0URrHYA4\nYK9DJJpmsGxLA+vbXHNQdyoKFPRpt3c0DJsAvzr6Mt7Zv4yo1sq2/W3UHe6iJXWY/V3uoilkjb31\n3fQvy0OQVYxYAUbMcjTkF+qk6tyxUA+NtGZyeKJNMuTWVerY8zNrVvwnrcAUVcsMI2mOZlPb1kZM\nsEjQSAVI+ZNEEylWbWtyxj/SKRMwcQiktjlKoFrDTPtZta2J4tJimuIttGh1jqCRuwdxoD7G2KKx\nLD68hHV1u0lmggt0mURHPkopSOWHEIuaEUSTZERm8YZ65GQZpilRl96NMtg1H+UXKZjddhK+4u3O\ne2dqCoYdAXQgtpe60IcgWOGaq3c0kd49Cd+kd0FWEfwJS3NTfZjpgKUlFbZhJj35izQFENC7SlGA\nDc2b2W1rST1hZhb3ZQhB9SGHFEJVFax78vcU1BTiZwiioZA+WINv8A4aBx4kujRG08OrkcVWAkWD\n7f6bCIL1jI2khKabTnRVVypCLKVRSMhJRGimglSeMYT6hS+jbA4QGlhAyhQp8hcy8NRx7PvrWjb+\n+SEQBUZdWU1Mq6by5MvZ/7s/ASZyno/h10wkXF1Dqm45+xc8R94IEX9pyF33YgduGKkgwUFhgv3C\n7HxoNUqgjGDxUECkIDKRwZcdpv4v2zE0A8HMY8DkGkYVj2Br/21IAZGSSVY+MkO1EwymLcKQ++2z\nfJP2+IV8PvJ9R89BdizIEcZxomfiPt3Q+fna/wFg+66vc+NFJ7N002GeXriD/hNbwQcYEgnPegBv\nDLXgIQwrB46LB1/5iEElJb3CYREM8kM+IvE0O+vbCFRGMOMFVtiepNFt57/xKyKm5PVhZMI0M06x\nUYihbqTSRkfAb2/Zh1LtWWVraxjrdmVrSQDN3RHIiJketnZB0jBVV8CbSVt9t4Xwxr1trGhfhNLf\nDQWNZKI5PGYYPVKElN/pjpMtZNvadRpbU0gFGnc89CEgIA/cgdLPQK0bgVK9B0HS2LCnlQ17mwlO\n0zE0BdNelCVlTFKOTV9xZtWCkrQCADIhlgemwIS3SeopVDsiTAzY8fSyiqiFaEtYZq3/+v1ylLGg\nNVdbZORP8r2H3wfVj2+MhmkIFuHrisfcZIKkYep5vLu2Dt+QBFIFNBa/j8+ORu3qMvn579ZTPcKA\nEvjj7j9jyvYs3RTRuotQALnM1czSCZln3twJgDK8nEhpI2JGjusy4aAbTpoFTcFI52OqCnKFu+pY\nDEb57RvbMUxLQImhCIEJS6wuaAoYMnpbP+sa2+8gdw90TJ5mMoxihhyyEBD4cuVNPL/5dacdof9u\n0HA0iAyJV593CmJBO4KskvxoNGd8fQ5rDlkmp6SQoPqrJxEQQ3SsPM1qt3wHcIARX/4acngHkhxm\nyJzbMFWLgMMnjySUCgHbCSlBQMNMhigcX05hjbvZ0ujpVwKQ5ytm0KUnOcdPH3cm+Wsklqo6pefN\n6hGZlMcPf/Rz/ufljwhOfTdraIecfROyJqLb79qgS6w6UzunYHRZ7QbS+YxWvkno238EQG0Yilbn\n46Sykfxpg/Xe5o8occcd8JsWKYghWwuzAz0CviMvuP0kyJmkjhM9NYxme/EUwOrtlkq/z07Z0RW3\nPhrTELPWA8Q0T+RSH07NDARRo7Y548j0HJd0CsLWiyLmdSGIJka0yI6McV9cRRYxPITRU8MwNcX5\nMDPn6hPZi9MyUVKxPhbvtXt8KkLPWO8+2hJN2THBdcfSjo9CShTjl3xE7Ygp12nXH1oH91lfdwQw\n7HmPTUKZ64yovZgv0yfJ7QOaYglYKZFdRpO5cJplMhF8KQZV5CMoKUxdol9xIaYukVCTpDSjlwYk\nqnl0pSOkdRVDsAlS87k+EZs0BVm1Z36C7SBOO/0XBNOZFRqpIL1gR1jV19rCV/aadATQ/Jjp7DUp\nGQIEMDqzd5zLmKRMtTdhmLZGYESzU5+bhuSkIDHV7Igv2bTqccYea11Ov9gpngpERhinuj8xmTVm\nEN+ddhWTS6xlzo3avqz7dbQ+XxLBH8eI53PV2aO5ft4Yrj9nalYf8hU3Q22GCH1DtyJIumvCsutD\ndgMA8ny2hpHuPe43zLOiP4JiftbxylAFXzt7FP9x7TR8PVwERrSIQRVh9/304AeXTWf2uH4Y8QKM\nuFun939ZEqgqDZHaPg0z7XdCrzd+uJbdj6+l6qxhDM0fglQ3BQyZorCP//z66VntZL6THGF8zujp\nw2iI9g4DzJCDKGUMltmE0Z3yOK+dUNc+nOJStiD0oiDfeoQZk4UeKbY+fiXtCEFJBiTVTQbo+BWs\n8yWhsPsh2W2kNDvssqvUmg3bGkYqbc+OPfDaoTMLpRw7dg+NwNR8Vqih2Nu0JR6YSZG/iC579ud1\nzJaGM05AeywywlfzuStae2hOVeHSrD44JKpbgtBn5qEKcfucq2GcNtZyHKKkGFQVRvAnMdMBqsvD\nln9BT6NqRtaCO61xMELamrZ/cOhDN4eRprjCTk4jD9yBGIw54ZboEoIvbZmQQvYCxcysug/CyNj0\nzXQAI9l3umovQQCOoAHQu0t7FBYI+qS+NQy7jxnzHcDs/jMYFHfNqpnV1hmU5RXafXClp5EMUxTO\nlqaFRjUPnH4PY0tr+GrNZUiiyPjhpVQXZffPebaagmlaa1YEAcx4mJrBxUiiyPQRgykLlCCLMpMq\nxnN6v9Pd6+PZAj4gWfdZErLfJ1l1vpOwL5R131747b1A8noQRnmwFJ8iMbgqH9MT7ZXeOx40n5OO\nRu8qRcKtt6a6wtJeDYnUllMY4B9ivZeeZyeJApUlIYxIKckNZzghv+ef/0V+/L8/Y9bsU7l50nVU\nYKXqLykIUBLOo+DwaaS2zbBuJWb199PuZZJBziR1nPDmBDJNk4Zo741aMoudRMkua4hZSe/aeoR+\nWoV7C1JB0qwgyZ4mKSAUykT72I7PVBCjuwQpvwMxvx2js5LX619yzgmBBMqAvWjNAx1tYES/MtbU\ntme1ldRS4AO1biT+0WudKKmkpiKIJnpnGXpbf3zDN3kcxiZyv/2YJugd5cjlDS7ZZcpoCromZt2n\noKQQ1TxSSYHSQDFN8WYQNQTbOW9qClXFhXR67tNLQJkP3Aq59Ttj6RdDCAh9ajkAQTGPLpoAN9QV\nXabAlw+m1a+64GKEtIqp+qgsCUKHhGqmSau6syq3Mj2eA7X9CA6tBaxEiMpAPP2zSF0qbUSusHwr\n7sI46/n5hm51H6pNGHpXH9sVO2s4BFJbZiGGO/HXrHVs8pBNGKmdUzBTHmJRA4wqqGFXtxUSbSbz\nrDDqPoRkpm+GZ0+Jr9RcxsPbNwMtWWOZQUAMuved6U86QFFpNokZpokiynxnwjezjs+pnoUoiAzM\nH8Db+5ew0dGIBNAUh6CMeIEVzAGIgsh/zPwXTNNEEiUi8TTPYq3OHhQeTO0u3VnAWJIXphsYUFrI\nLl1CUNKIpjVpKA4UAJZ14Cv9b+Cpv+5AGbKNEZXucwhIQSezzpSKCUiiO26Z8GC1YRh6mxU4kVnT\nlN45lSvPG41QVks0HUUUxKy8bFcN/xq/fnkzCVxLgCSJR9xPZu6g05g76DT7/oWsv/5UBUY0yuDO\neezYZ1kxPqudNXMaxnHCq2E89PJmlh305sI3eWt1LRv2WGaqhO7amJdsbOAHv1nGDx9exs5GTwqM\noxBG5pwv4LaZSbFQG7TTPkiuQDZsQZMJE21OWmTmnWlKth0YYOzAKlcjsDWE1qgdhaTLljARdeJJ\njZitTZi67NEi3BBOMRTB6C7FiJTY5/oQ1rpkCUw5TSaSRzaDpFSdPMme+fkTjoYRkP2UBSqsuPxQ\nt3WNrWHEYgJkZqGiq2GYhoBf8hOQ/S5ZeUgLICSFARPfiI1O/itTU5BECUH3I/iTHEpbfhwjUoxf\nkZBQMFDpSHXgG24984BZCAjEunrvlmZqiiOwM2QBONpeJmIo65oMKegKqe3TUBuGec65c7zivDyM\n7jLmyNeS2jrTLWObpExDdOzhXnxp6JepqL2MxPrTMZNhR/CqDUPRGgdz57Tvc8fU7znlja5S9O5i\nrj3pq9b9emar6r5xaI2DLU3UhALJ3r5W9RKGv1fK7SPt6xGQA5w9+HRqSkZy3divY6ZDVBRnk5Bp\nWuG2smffB1EQHeHtTTJZXZ6P0VnhaGahgFWmqiRkmQP9CcTCVoRUPpVhV7vpX1COmQ6S3jWF2cXn\nO8eDinsf3zz5qqy+OyHzpquBC4Jgj69AMmUwp3oWXxx2DuDuJW/1J8+a3HggScIxbUCW2aUv8zez\njsqnF4Lmd+r6LJDTMI4TXsLYcOgAgWKPM1jS+OP7vdMyI2pZi98+OlBL5vtyBGsfZqfMMcVnkAZS\n26chV1o+hiitWAI0EyapYCZsf4Q/AZjEtThiogitYTi+/vsxRZ3+VQqqItEFDC4rwUzYaQWCEaev\nAKP6l9AgKhiiRgroiEUhz1LfC0tLaAIKC0UKggaGqNEMmKmQa4bx234a2TaJ6bITIRKc/L6V7E0A\nn2l9GPFuxel7hoiCcoCg7MfszkMq6CAw5V3XBxLVPFFNacyEPV6GjF+WCEgBYpLttLZJZkBxMeWF\n5ZRVRjncsMddh4IrkAv9BXTq1jM1Yvmoh0YhjxbxC3kkpC7WR62QWb2rlCADgE6MVO+Pe9zgKjZu\nVjENwVmfotYPR++01kQU0p90ohgj6AkB9chSI1JKuTIQv5yiRavP0h5GDypi5dYmGpu1LDt55h6O\ntD+SLIkEfYqTjfULY6vYUduBblQwqDLfycT7pTMMJ6tvLDmKaVVDgGx7uJkOotZamQgQNQomlAP1\nWRqGqAcJBrJFzbFsJBXyy5wzbSBD+xVwsDHCYqx3yegu4dSThvQyczntiQLzZg2hrDDA5FHlmILA\nTsqI0UhC7GDK6GmcMq4fi1f5EPyWGfCU6slZRBgOevrr6eqXJp3G3sV7OXf4ab3a/ebYq3h++4uc\nVDwJIRxkUKU1+fnxNVN5fel+5kzMznA8fngZm/a2MbgqH38fPgZZFKksDvGFkyqtRcJHwNfOGYUg\nwFVnj7Lv3zpumCY3XXwym/a0UlHUhz/sOJAjjOOE1+mdlTAMPLmaTKSyekQ79YaXDARfAqmkCcGU\n0ZN+O1bd7B0JBc7MOUMY86aP5C9rTDd1sZy2BbLghiEaohUWKqvopo6oBQCBkeo57PIvJL9fC/u7\nLRNKcSjsONsy2TIzff32vAk8vHk1TVFLW+mIRyAPJg/vx1nDJnL3ync4eVQeqxvfdrprpv0Y3SWY\nhkigooVo/UhL+NtOVG+4rtLvAAB+wRK2sYgCPpswbNJSBL/luI8VIgZjDlmYhkB7JIUpZaKabHVe\n0jB1GZ8iEZQDCJLtRLcjdvoVFfLtOeN4v7Z3csSM9jG8tD/rmi3C0BqHgO5DlkTyKSNBA43GXkxV\nIb1zCsnBlmTuy+cwdXg1GzfWYkRKHOe+1jDcIc1pNRXUh8vYF3P74kSSYZkZ7vnWDDRjCk+/s5kV\nmvuuDanMZ+XWJg4191jImVmUeAT7gSQKWcJRkUW+deHYXuXOmzGoz+sD/iM4UL3OXc//kh7o5XQ1\njmEZkyAIzla5M06qpHHdaHZ27USrH8k3bh5z1GsvPc3Vyu78+jSW7/LxwPpHOGfw6cz4wjgg25x2\n/qjZ+D0+hnDQoyF5CCPPH+Cn59zUZ5tTKicwpXJCr+PV5WG+c8m4XscHVoT5169NOeI9SJKAKAp8\ne/5Y5k6p5r+eW9dnuZKCALdcNt75ndEwTNN6v6bVVByxjU+KnEnqOOENq80IIjFtxzlnVvKWHsY3\nzLPdo3fXrSorT5NsBDET1voAb8RGFjL1KVabhUHLFKE1WpFD/lEfWZu/2AIZBGQziOCP4x+zyuqj\nZs0mFXsmnyELsGbw6ApGKmjnWXKJK98fxC/50AXLfNSVtMgvpAQJyZaAbI67EWJg29AN2TJlKBGU\nYZusML+MIOuR7hogKFpj195qvZK+wdtRBlob+CiCD0US0Vtck1p6/1jSuyfRGU31zjBqL37zySJB\nOWiNn5J0MuTmS5Y5r6iPlOgZQd4vz90vORMlZJgmJZnNHsBe7CaSyKQf13rPeDOOVL3Ds/+y6X52\niizik+2oqGSIacp8x/4N1kxXEAQUScHXY0/monzL1JPZutbnmDhs34PZt1SWJfFTRc18UgeqpId6\nXXM8W9VeUHUuh1/194raOhaMLB7G7JbxTCw52TmWeV/KxGqK/IX4PTnegh5S/LTb6h4vvCY3fx/5\n546ETD6549nO9+PwT0UYa3c0H/POcQBbD7Q75ZfVr+LB9Y+x6NAyHlz/GCnVE29tE0Y6Ypt1MkK/\n15oETyimLTTDTV/AsGeUYjCWlSJEtGc8gqSBnKbbb4UaFoes8hlBKYatqCKvfdtPHoKiWnWCE32h\nkD0LvnnC9c6MxIgUIShppPI6pIJ2ezcw2UkpoAzZSjRtEUbYZxGGX/JxwEM+Vr9sG3pmEZG9JsBU\nA4693IuS5BiqJWvG2NSH5q0IPkvDiJSSPlhDet/J6C0DMboqSKV1d92Eo6VpmJpHwxCsPToE0UBr\nGkSRYtmqi/wFWe3oHRVkhG2VhzAymkNbV5Iqv0tahm3Gc/ercG1AWtMgpGgV+b48u+5K+xpXewCL\nMBz7sikwrGBYFqH0lagyg4BPptJj4y7Jt94Hvd0itUtGfLHP6yRJ+FSE8UmEF1ihtgGlp4bxyYWZ\npBl07l7/8QWPgJdf+hOplKvdavUj0CNFTAudC1imrAwET7boz5IvdP3Yd9b07rkuHmX/9Z5wNYwc\nYRw3uqIpHn51Cz96fNUxlVc1nfv+sMEp/7udf2ZXxx5e3P0auzr20Kl6NqXxxzFNHD+AQxR9PS8x\n21fR1OheJwSijrlletVkTi+4xDouq/g9+xP0K7EEXc9QSO8aiJ6z0YyJQBEUfPb+CacNmMWY0lFO\nkYxQcyN2rBfvi0MtJ51cUYdUbS0AG5jfD0mUmFHVW6XOxPR7yUnvLkatHU1Bno/UzsluWU1mgDrN\nEaz0CAkFCAhhZJto9KYh6K3V2e1lNAwlZa9lsO7Xp9gaBm5Kc729Etk28hb63N3xUtunkd7t9qsq\nz1Lj8+QQX5w5BIAR1YUUhcKOZpcJLvBubas1DsJI5KEeHENB8yn4MpEwqp/k5lmkdkzP6rsiiRT5\n8537KAhlayleQpgwIjtqKuCTGFLlRjBl9pEw4wXcPv7HzB14GtPHWPcxdog7K5dFkZKCQK/6jxVe\nshk+wHoX+9v5lE4aUuwIuglcSGr3RBRJ7mXGOnlYySdu9+knH0GNt1G37H94+GFrkezvfvcc3/rW\n17n22q/y5JOPAZBMJvmXf/k+3/jGV7nmmitZuHAhL730B1pbW7jllhu59VbLpKS39yO9/QsUBQp4\n+unf8q1vXcOBxffTtOnPAEwaWUY61sbzj/yEa6/9KtdddzUNDVagygsvPMM111zJN77xVR599DcA\n3HLLDezcaUWfdXV1csUV8wFYuPAv3HXXndxxx//jtttuIZFIcOut3+G6667mmmu+wtKl1t4oM0+u\npLtuHQcW/4qDSx5g6YLHiMfjXHHFRYSD1viVF4hcccX8oxKP6DFJfdb4p/FhJNVjZ3bg43eX09zw\nN9GfsNIhZGyits9h6thiNtmLuX1qCWmlHUHWkIUApcUSHYaAronki4VowMk1QbbssGZANcUj6U5b\nAk0IRrN24qosCvOf189gb9ce/njQTcAnKCrhoEI0oeLvQRhCOjOzFcj35dOWbGdcmWsHlkQBvbOc\nfMqJ2CGTGfIaXjSEfKmQiN6F6E+iNQxl9BlW7Pe4spOy9m8A+NcvzaIsXMi6Vh8v77MIRj00GjNW\nREF/H60NFWiNg5GrDmJqlvbgmizcmZR6aBR6exWhYWEUj3r+X9+awdqdLbyyxNK4fvSVmfxqxyLL\nxODJwqrIEtjpHjIRY0a8wJnRF/o9C6aS2TP/fnmVfHvcNQzKH0CRv5DZ4/tRWRwimlBRa2vQmgc6\ncfEZk9SVZ47gD+5eRUii4BAdgJnI1mgAZFnk4hEXsHhjA2r9CAKTJX51y2zASo8+sMJN5zBxRBnX\nnl/D03aW4IBP4tLThjFpZBmSKFKc73cWjQZ9fgRB4LovnsQVp49g+ZbDbD1g+UkkSeC8GYMYNbDo\nmKJweiLgk5EH7kAqaSSWH6BqqDWrrTIMXm1bReUpJqYJO9M6yiCVuLiLx3Z/gH+CvTo56OPt6Cre\n7rFf16SKcVw6Yt4R273pplvYu28vj//2BYJ+mTVrVlJXV8vjjz+LaZrcccdtbNy4gc7OdsrKyvnF\nLx6wxiIoMHWqyR//+Ht+/etHKSjIfg6yJHDZZV/m2muvJxJP88tf3M3y5Uu56eJZfLTgl3ztm9cx\ne/YcVFXFMAxWrlzO0qVLePzxZ/H5fEQivZOBWnDf5a1bN/Pss38kHA5jGAY/+9kvCYVCdHV1csMN\n32D27DlMGGDyx9YVjD3rFmKqzIzRhYRCISZPnsKm9av4+Y0z+fD9vzLo9LlI0pG1vIwyciI0jH8a\nwhCOsiFRX/i4zYISzqY1JigppEQxWo/V0uEwELdCJwdXF3KQ1Yj57ZSZZdZCsbi12tcvBtEAjZSb\nUVP2kySIqUvZ2VptDCjLo6L4JA7qk1m2qd6J9Mns+RASLPu8IsrcOP4b/O/WRkBFECxhmNbTjCwe\n7tTnUyQSKZPx+kWYFftY3vZ+VnsFvgIiiS7MtB+haYwznhUhd9Z77uAzKQ4UMaLKmtUWxz0fph1m\nmZlBqw3DEQJx1EOjUEb3bVM3U0HMVAhZErPiyPuV5hFQ3FTNAyvCBHblEVNSrjlQl/ErIrKc0T7S\nVuJDXXFsw7Loef378D9MKHcdwZXFlmDND1p+ogxZgDXmfkVi3PBS/uCJjpMlIYvo+oIii4SUIOoB\nq62AX3JCUHuGogIMKHeJLeCT8CkSowdZ2oNXQGSISpFFSgsDTsglWEQmCAKjBvbhwzkGeLUFr9lE\nsl7OvssAACAASURBVO9VEAQrd1MmlTnZ35/8KUI8JVEg6Lee2+rVq1izZjXf/OZVmKZJIpGkrq6W\n8eMn8pvfPMgjjzzEzJmzOeusU0kkbN9cH2q/KAqsW7ea3/3uOVKpJJFIhFEjRzJx4mS6OtuYPdva\nr0NRrDFcu3Y1X/zihfjspd35+fm96uyJadNmEA5b74xhGDz66ENs2LAeURRobW2ho6OdDRvWcdbc\ns9mWCoGaxh+w3rl58y7id797jtmz5/D22wu4444fH7Utx8T88cP5ifFPQxifFD0JwycqpD07waV0\n2xYqWInNArKPZIYwZBVREBDkTNK6MvoNHsRBczVSSSO0jUJHdXwOQTFIDEgZSQT7W/RLfuv1TuYh\n2NpFdbg/pw1w4+0VSeG2U77FB6/+GZ+S4ltf+CK/XW9F0fQXxnDR5AmUBIooCRQjmNZaDEEQuOak\nK1ENFcUjMH2ySCJl7fw3INhjNTBWfDxYkSU+z4KjYr8rdMaW1jC8aIjzO19xhWrGz+Bsh6r5SO+y\nzFmyLGaFFRrRAsRwt+MjUGSxVyoW78zdp0j4hSBxOe5ESpmqgiKJ+GSP2c7ug1fI5UtFdGtdWX6D\noyEv2HutBVhC0DsuVjtiVj/7Qk9C+Tj/gN/TRk9HcpZQFrPr9fb7k06eevVBkdAO1aAdquE/7zzz\niOUWbajn2Td30q8qn3/56iS+c7+Vb+rWG77gEPCngWmaXH31tcyff0mvc0888TwrVizj0UcfYteu\nzVxxxdVHrEfXNO6//xc8+eTzlJWV8+STj5FOW0EeR2oXeo+hJEnOam/rehfBoGuefeedN+ns7OSp\np15AFC0TUyqVdgg/U3OG/8eNm0Bj48/ZsOEjDMNg6NBhHA0nUsP4p/Fh6H1klz0avPtX17VEScaz\nBUU0kw7DdmIHFZ8zSxWVNGWFAYdUTE2hUClCNoIIgTiabpA0kgiGveLYFmppM+msp/BLfnyymJX+\n4bwhczllwIzenTVk0tu/wJTKCc5MMi/gY0TRUEoC1uwzbM/sg36JkBKksIfDt7TQ9jvIEiXB3qYT\nE3vmbkieaByyVro6foi+ftuJ5zIC0jtTVmSRoEf4pXZOJbVthpOCWpFEZ9V8xmneU9AGpCCC5K6+\nNhNhEARnbMFNV+FdxPSV6utJrj271/0eCflHIAxJElGU7D7JkpBFTn2h5wrcjyvvHfujOa5TPUyw\n4SP0+0Qi0z9ZErOIsCeZHStCoRDxuJuwc8aML7BgweskEta3aM3UO2htbcXv93POOefxla98jW3b\nttnX5xGL9Q56EdEQBCgoKCQej7No0XtO+YqKSj78cBH8/+3deXxU1f038M+9d2Yy2ReyEjBCEAWM\nAsomNMgiQcKSFKIsVm1Q3BGiCNIifUqr/YHlKTwqlmKlVV7Sal36M6htQUULYl0ALaKCYkggC4Ts\nyyz3PH/cmTuZbDMJmSQz+bxfr76aO3MzOXNk7ne+59zzPQCsVisaGxswdqz2d50T6FVV2he6pKRk\nHD+u/a133/1Xi7/jVFNTg+joGMiyjM8++wTFxdpNIddcMxbvvvsv2B03ljTUu9qakTELv/jFz5CZ\nOddjPzm/HIQEdf1/8z6TYXT0rozqJgHjnY8L9LBvLUqFMfkkyqqrAARr+0xD+7Y/69qh2Ft3CCkD\nTZgWNxifWBxbVzrWBBgagmBVamG122GxW2CUItEAINhkhFkxo1GthxxdqxW6C43HgCtM2HcmAWeh\nZQfNL/JOt994hT7xuiLnKrzz8WnccO1At3Puy74Sez76AZlt7A54X3Ya3vjwe/w4fTAMBoGYE8lI\n6+cakokLjcF31d9DrQ9zK2kAuLKv5uWTw4zux4C2O194iBGzr7sUj2w76Og7GUMHRmHssHiYTQom\npfXHZ9+U4e2PtbuvDAYZE0Yk4lRxNW64Vpvwbn6hHRAdjeLSH/RbcZ3lLNwDhpZhNL1gpQ2Kw/Uj\nB8JkkPGP/zQpid6GfpFmTB2djJIL9YgMNeHAl45V9HbV7ds/oF38w4KNuHHcJahtsKG8ugH9Isw4\nXlDh2N/EFfhWLRyJ4wUViPOwwKpp37dW7mHdbdfi0LESDLs0BufPu/YM6cqAMbh/BDLGDsS1l7d/\nf78zAzIoknv208kyFRERkUhLuxq33bYQ48Zdh3vvXY5Tp07h7rt/CkALKOvWbUBh4Wk8/fQWyLIE\ng8GIX/96AwBg7twsPPzwcsTGxmHLlm149JbROPhlMa4dMRBz5mTj1ltvRlJSfwwb5vp3//Of/x9s\n2vQ4duz4PYxGIzZs+A3GjZuAEye+wdKlt8JkMmL8+IlYtuxeLFq0BOvWPYp33nkL11wzps33MWPG\nTKxenYc777wVQ4ZcjpQUrXbZoEGDceutufjt//t/UIUEUTAE98y/1vE7N2LHjmcxffoMj/20cNpl\nMBpkZE1qPxPpjD4TMOwdDBhNh6TCgo1AowrVUacJySfRqDrKYjvmHAySAbPHXIG97wNh4SrGDkvA\nB582aIvpVAVGgwwDTIChEjZo6WqwYkY1AHOQAaHGYJxvOA85SCuJ7RwCmjUyDc99qU1sR5paHytN\nv9p1335yXBhyM1suakrqF4qlmcNbPO4UHR6E22+8Qj/eMOVBt+dzhs7FocNVsBamICjW/QP/iwmr\nUdFYqd+R5KSViwaCJDOcMz4hZgNuv9G9fUaDjBCzAXfPc90jP2RApB4wjI45jFszXGU0mo+qhAe5\nByfn4rembWotw5BlCbdmXI7PvynzKmBIkoRbZriX8zjwZTEaLPYWGYZzTD9nyhC3x785XYHf7NJq\nGzkvnsMujcGwSz3fOdS0nERrQ0uDkiIwKCmixW2YXRkwZEnCzVMv83hecJMMo6mLmcN47LENbsc5\nOQuRk7PQ7bH+/ZMxdux4/TguLhxlZdWYP/9mzJ9/s/74ZQOicNkAbUj1jjvuxh133N3i7w0YMBBb\ntmxr8fiSJbdhyZLb3B675JJL8ac/vaQfO1/vxhtn48YbXZP5kZFRePbZP7b6/mbOzMS/votCeVUj\nJqa51vwcOfI5rr9+GkJDPe9pERUW1O5n/WIwYLShpt41BilLEiRZdZQB1z54+hyGI8MwyAaYFCOC\nDWZUWbS7Jupt9XoZa5NRhkEKgiQJ2GXt22WoY4evIKOCEGOIXozQVurKDgaEuYJBRBsZRncINgQj\nvOpK1Kv1LdYGRAZFtJr9yJKMX123Fke/rcCfoN3RpLRykfNmYri5pkUcASDM6Brisl+Id5UfaSXD\naHUMv5PXMOeF2K4K/XZGp7aGl5rOJ3h67821ty6jPT0xJBXUZsDoMyPhF8dxyfrd7zbho48O4skn\nt/Rse9CXAkaTPbj3HzmDlIRwpCS2/MYuhMAbJ97BB7YPIIeNhloTjeo6K2CyA6pZ2zcZTSe9HUNS\njrUNEaYIVFq0Mc16W4Ne9MxkUGCStAuW3VAHBUBEkHaRM5sUfW2EWh8CUedaHxAb7PrW2XSSuic4\n747pSOXLaHMUgg1NbkFu5SLqaYiitQtM8zH60CYBo+l6ioSQJsX3HHNMVbUtV5o3L/zmrbYmwYG2\nA0bTi3dHq4h6muNoS2cDzcVoOiTV1MVkGH2B89+i84q1YsWqnmtMM30m1NubFK/Z+dZxvPHh962e\n91Hxp/jn6X2QjFZHZVSgqs4CSCqEKusZhr4VqbPOk+NiHmkKR621DjbVhjpbvV6O2qBISIrSAkGQ\no8rpgOh+UGQJ/WND9Q2Y1NpITBntWk0sSzIWXp6Nm4e2vBOkuzk3u4+NbGXvhHa43XrZygWvMxmG\ncyhh2mhtTsOstFzwd/nAKIQYQ/QsbdwQbf5mUFIrmVonr2FtTYK3J7RJIb6Oftt2ZkfhIZ3LGDob\ncDojMtQEk9G1SND576Z5JkbuenP39J0Mo9mQVFVdy2+ZAHDgzMf6zwYDYAdQWdsAqZ+AJBRXcT/F\nimnXDMC732jrAYyKI8NwLAYrrTsHi90CYXUsvpMkJEVG4kgFYIMFI+PSMOuydMxcriA4yIC3/q39\nK8m6ZjRmDnatvgaAHzW5lbYn/XTWMMwcl4Kkfh27JdKt5EJrAcPTraetPJ+SGI7fPTCp1QvnM3np\nsFhVRDjWMeRdcy++vXASI/pdgYUTbK0Oz3T2M9pehtHWIGjTINGZfQqeWpEOo6HjLX4mL73TmVRn\nBAcZsOme6/Ry448vG49Gq/2ib+vtK3qohFW7+mzAaG1hnipUFNWc0Y8jwhSUAaiu14afTIoBjfq2\nmlaMuiwW755wZhiOW9kM2sX0bK1294yzrpIkAcFG1wRs7ojF2i2pjv8Cd111Gw6e+Q+mX3pdr/1A\nGRTZbeWxtzxlGJ6+ZbeVgUQ0Wdg2Kj4NpxtO49p+Wplqc5M1b0GKCVc6VrV39Vh+cHs1mbz4wHcm\nYISYO/ex7apd1zoivEmpE4Mic/7CC66Pf++LGH0nYNjdO7+2WcAQQuBc3Xk02i0wS2FoEDVw7pVS\n3dAIAwBFcmyPaTVBMjZiQHyYPodhUrTnzAYtQJyt1Uo06HWOmq0JaLp+AQAujbgEl0a0Xk7a3zXN\nMFobjvC0uYs3m78YZAPuGrMEZWVtlWloX2djdHsXYW8+7ryAUlt6X7joSwGjWQH+2gYb7KoKxXFP\n/rbXv8TnZV/ANASIkhJQLGrgqAQAq2prGTDMtQgJUiDJzoDhvgjvrVPawh1nwDAbFZQ3qT/VlzS9\nM6q1DKM3jGl3tAKrU2hwOwHDizGFjlQhpb6hX4QZZRUNCA9ufYOonuTzgLF//348/vjjEEJg/vz5\nWLZsmdvzZ8+exerVq1FdXQ1VVZGXl4fJkyd3eTtau622tsGm1zb65OsyKLGOrVCFNrlrNDqW6jdZ\nawEAsJkgSY6tVx0BI8jgWEltcJ8QXnx9Gi6cicDll0QBF5IAoNUKr4HMLcNoLWB4uGh2xxDd0IFR\nmH1dCkYPbbmlaXuS+oUi+0eD9HpOD908Er/9y2GPv7d68Sh8W1jZar0o6tvumD0c//jPacydeGlP\nN6UFnwYMVVWxYcMG7Ny5E/Hx8ViwYAGmTZuG1FRX0btt27Zh1qxZWLhwIU6ePIk777wT+/bta+dV\nO6fVgFFvRUSIybUK3BEYZKF9iA3O4W5HUHBmGMFKCCwAqi01gKT9jp5hKO4BY2hiIpKHaIHiipjL\nsHrMciSHJnXZ+/IHngKGp3jQHd/BJUnCj9NTPZ/YijkTB+k/jxgUg4HxYThdWtPupOXll0TrQYao\nqZgIs77TYG/j0wHUo0ePIiUlBcnJyTAajcjMzMTevXvdzpEkCTU1WgmDqqoqJCQktPZSF635HAbg\nmvgur3Ls1OYIDJLzVliDM5Boj+sbGtm1eYoaa22TDEP7HXOTDCM18lL0D3Wt1gSAS8IHtJi/CHSe\n5jA8DUn1ghGrDvGz5hJ5zacZRklJCZKSXN+mExIS8MUXX7idc//99yM3NxcvvPACGhoa8Pzzz/uk\nLc3nMABXwCi+4Cho5pjA/vpULUypgKw4Aogji5BaCRh6kHEU12saMG4amtVr73jqTp7u/fc8h+Fn\nfehnzSXylk8DhjeTfvn5+Zg/fz5uv/12HD58GKtWrUJ+fr7H34uL81yDvqngkJYLuyRFQVxcOGzf\nOfZWcFz8nWXHTWatrr/z8eEpcTh9BLhueAr2lR2BMFmR0j8UZwAMHhCLuLhw1BtdpcFTkhIQE9yx\ndnZGR/uiu9VYXcG6aVunXjsQ+z45jauuSEBkWMv/PjdOuBRvHTyFMWlJ6BfZflG+1l6/pxgdE+hG\nk9Kj7ekNfdFbsC+6hk8DRmJiIs6cca1rKCkpQXy8e4XLV155Bc899xwAYOTIkWhsbER5eTliYtov\nxNbR2ycrK+tbPHa2rBplZdU478wwZOdeDY7yHxYLQs1G1Dke7xcWjGcfmoTvq7/HvjKguPw8RgyK\nxJkCQG1UUVZWjfoGV8mKxiqBsprO3ebpLWdhtd6sssJVkrppW5dMG4KbJg+Gpd6CsvqWCylzJg/G\nvOtSoFpsXr3H3tIXNpv276Wx0bt2+0Jv6YvegH3hcrGB06dzGGlpaSgoKEBRUREsFgvy8/Mxbdo0\nt3P69++PAwe0vRpPnjwJi8XiMVh0RmuT3s4hKYujLpHkGJISqgFCAHZhQ2iwUb9LyigbYTIqCHPs\n81BtrYXVsamSwbFwz9xk0tvQw7Wfeou27oKSJMljjaOeqIFERK3z6RVNURSsW7cOubm5EEJgwYIF\nSE1NxdatW5GWloYpU6Zg9erV+PnPf46dO3dClmX8z//8T5e2obSiHmFmY6tzGM7FexbHN0LnXVJQ\nZUBVYBM2hAUbcM6qfft1VkR17vNQY6lBiFFb2e2sJeVcuEcufW2tQfMd04gChc+/AqenpyM9Pd3t\nseXLl+s/p6am4qWXXmr+a13is2/K8NSrXyA4yICMMQNbPF9Tr627cGYYzklvqDIgZNiEHZEhJkj1\n2oK7CMd+FGGOIFFjrdVrSDkDhizJkCAhOaxv3TrbntZKmgeyhJgQnCqu7nCRRqLeLqDHTM5VarfL\n1jfaWpTDBoAaRwFC5/af+qS3UABVhk21Ys7ES1H+5ccoBRDpKCyoyApCDSGottSgzlYPo2xw26ti\n65QnfPiu/E9fyzCW3DAUybGhmOqopEsUKAI6YDTdx7uhlYBRGvI5vjwXAoujUrnzFlmoMoQqw6ra\ncGliBGKLJZSWa3tdOIWZQlFcp9WLuixqsNteFbLE+kBNdWdJ7d4gLFjbgpYo0AT0lc3WZKK7+Q5t\nMDagMeobbDv6vGsOo+mQlKrApmqRpNJShSDF5DY/Ue7YHQ8ALo/unasye4u+lmEQBaqADhhNM4wD\nXxa7PRfUpISPPofhzDCENofhvAOqylKtz184OSe+r44dgWmXuM/RkLu+lmEQBarAHpJqZx/vsHAJ\nztUBFpsKJeYslIhyCFUCIOlDUnbVjhpLLeIj3YvS3X3V7ThR+T0mJ/fe/St6C/YPUWAI6IBhs7e8\nldYpOFi4AobVDtOQIwAASXYEGVWGKlRcaKyAgEC0OdLt9weE98eA8P6+aHbAYYZBFBgCfEiq7QzD\nFOya09DnMJpy1IYqrdP22o4OiuraxvUhnMMgCgyBnWG0MSQlRxfjbJhrzwJLK3dQCUd5kMJqrbRJ\njJkBo7MYMIgCQ4BnGK0PSRmTvnc7Pnu+rsU5wqIVuztZqZ0bzYDRab1hRz0iuniBHTBayTDWLBmN\n/v3Cmj3qfp7JKGP2tcMAACcqTgHgkNTFemB+GtbfPqanm0FEFyGwh6RayTAGxIUhvCwIxU2315bd\nh6TSBvVDapwROAM02LXV4lHNJr2pY0Zd1rGtT4mo9wn4DMOQ9B2MqYfhzCIUWYLavCpcs4DRaLMj\nxuzaPlOChBCDd/sxEBEFqoAOGFa7DcaB38DQrxgwaHWjFEVCjbXG7TypWcCwWlW3gBFiCGa5DyLq\n8wL6KlituDZvks11SEkIhyJLqLI020xFsQNCm5iV6qKxaPplWikQx94WIUZmF0REAT2H0ShX6T9L\nQXVY/9MxsNitqLc1wCAZYLUCksGmD0n1D03E6uuX6xsfRZjC0FDfoO95QUTUlwV0hmEXrm0/JbN2\n62xFYyUAYHjUlbCVpGjPKTZAEgg3hbntkucsNhiscF8DIqLADhiSVf9ZCnIGjAoAQIw5ErBrq7kl\ng3ObVfeES5G051Vw6zQiosAOGHAFDNlchxMV3+O7ygIAQGxIDITq2C/aETCMzQOGrD1vd5Q5JyLq\nywJ6DkN1ZBjCrkAOq8T//Wyb/lxCWAx+lDYQh6q/ajPDMEjasV20XcSQiKivCOgMwxkw1NqIFs9F\nm6NwSZxjMZ7SesBw1o+KbLYXBhFRXxTgGYYNQgCiLhyIuOD2XHRQJIIUbRclSR+SMrqdM/+yOTAb\nzMhImdo9DSYi6sUCNmD885PTsAkLJFWB2hCqPy5Bwk1D58FsMMNs1AKEpGhzFAbHnIWT2WDG/Mvm\ndF+jiYh6sYAdknrpX98Cih2SasSg8MH64z8fl4f0AdcBAMwGxz6tbWQYRETkErABA3BkDnYFa3Mm\n64/FmGP0n4ONQY7zHHMYknuGQURELgE7JAUAUGxQHftaPDZ+FSobK2FSXFlEkJ5haENSzDCIiNoW\nwAFDhSSrUG1a1pAQEoeEEPcS284AoWcYSgB3BxHRRQrcISnHRDbUtoOAHjAcGYZz3QUREbUUsAFD\nMtcDAISl7TpQpmYZRfOV3kRE5BKwAUMO1kqYq3VtL7prPmfRfOEeERG5BG7ACNEChqhvvn+3iyIp\naLr5HjMMIqK2BdwV8u8ffo+TZ6ogBWu76rWXYUiSBKiKtoESmGEQEbUn4K6Qr3/4PQAgaLgNQpVw\n95yr2/8FVWbAICLyQsAOSUFSIUPB2GEJ7Z6mlzgH12EQEbUncAOGrEISXqzcVl1d0LyWFBERuQRu\nwJBUSN68PWYYRERe8XnA2L9/P2bOnImMjAxs37691XP27NmDzMxMzJkzBw8//HCX/F3JywxDbrJY\nj3MYRERt8+kVUlVVbNiwATt37kR8fDwWLFiAadOmITU1VT/nhx9+wI4dO/CXv/wFYWFhKC8v75o/\n7mWGkZoYjZNV2l4ZvK2WiKhtPs0wjh49ipSUFCQnJ8NoNCIzMxN79+51O+evf/0rFi9ejLAwbb1E\nTExMay/VcbI26e2Js2ItwAyDiKg9Pg0YJSUlSEpK0o8TEhJQWlrqds6pU6fw/fffY9GiRVi4cCE+\n+OCDrvnjkgrJi4BhNrgCBjMMIqK2ebxClpSUICGh/VtT2yKaLqNug91uR0FBAXbt2oUzZ85gyZIl\nyM/P1zOOzhGQZAFZ9RwwghRmGERE3vB4hZw/fz5GjRqFxYsXY8KECR168cTERJw5c0Y/LikpQXx8\nvNs5CQkJGDVqFGRZxoABAzBo0CCcOnUKV155ZbuvHRfX9gpuSFqgMshK++cBiC5yPZ8YH6Wt/vYz\nnt5jX8K+cGFfuLAvuobHgLFv3z7s2bMHv/vd77BhwwYsWbIE8+bN8yoDSEtLQ0FBAYqKihAXF4f8\n/Hxs3rzZ7Zzp06cjPz8fWVlZKC8vxw8//ICBAwd6fO2ysuq2n5RU7f+F3P55AFSLK0CcO1fj8e/2\nNnFx4R7fY1/BvnBhX7iwL1wuNnB6DBgmkwlZWVnIysrCZ599hry8PPz2t79FdnY27r33XvTr16/N\n31UUBevWrUNubi6EEFiwYAFSU1OxdetWpKWlYcqUKfjRj36Ef//738jMzISiKHjkkUcQGRnZ6Tck\nSYCQtYCheDPp3WRIioiI2ubVoH1RURF2796NN998ExMmTEBOTg4++ugjLF26FK+//nq7v5ueno70\n9HS3x5YvX+52vGbNGqxZs6aDTW9JCKFVn3VkGLIXGyIFGRgwiIi84fGKevfdd+Obb77BwoUL8eqr\nryI6OhoAMHr0aOzZs8fnDewIu6rNXUgdyDDMzDCIiLziMWDMmzcPM2bMgKK0vPi++eabPmlUZ9nt\njruy9AyjY3dJERFR2zyuw4iMjERdXZ1+XFVVhYMHD/q0UZ3lzDDgyDAMXgQMs6HtLVyJiMjFY8DY\nuHGj2x1RYWFh2Lhxo08b1Vl21XF3lCPDULyYw+CQFBGRdzwGDCGE29oEWZZht9t92qjOcs1haO1T\nOCRFRNRlPAaM0NBQHDlyRD8+cuQIQkJCfNqoztLnMJyT3l4EDO6BQUTkHY9jNqtWrcJ9992HIUOG\nAABOnDiBp556yucN6wzXkJQWOIQX5c0jTOEwK0EYkzjal00jIvJ7HgPGqFGjkJ+fj8OHD0MIgVGj\nRl3Uwjpf0ie99ZXenst8KLKCJ9N/6ZclQYiIupNXC/ciIyMxefJkX7flojVfh+HVFq0AgwURkRc8\nBozjx49j/fr1OH78OCwWi/74V1995dOGdYbdLgDFCslUrz2gBu4OtERE3c1jwPjFL36BFStW4Ikn\nnsCOHTuwa9cuhIaGdkfbOsyuCgSNOAjZrK0bEQwYRERdxuMV1WKxYMKECRBCID4+HitXruy6TY66\nmF1V9WABAAaVi/KIiLqKx4Ahy9opkZGROH78OC5cuICioiKfN6wzVNV9wyazLbaHWkJEFHg8Dkll\nZmbiwoULWLZsGRYtWgRVVVtUm+0tbM0CRlRocA+1hIgo8LQbMFRVxYQJExAdHY309HR8/PHHaGxs\nvMjtU33HbhcQqgxJVjFWmY/Z4y/t6SYREQWMdoekZFnGz372M/3YaDT22mABAFa7DZKsop+cjNsm\nj0NwEPfoJiLqKh7nMFJTU1FYWNgdbbloFrt2269RMvVwS4iIAo/Hr+Dl5eWYO3currnmGrcaUlu2\nbPFpwzqjwd4IADDKDBhERF3Nq0nvzMzM7mjLRWu0OTIM2djDLSEiCjweA0Z2dnZ3tKNLNNgbAAAm\nmSXLiYi6mseAsXz58lZrLfXGISmLygyDiMhXPAaMKVOm6D83NjbinXfeQWpqqk8b1VnOgBHEDIOI\nqMt1eEjqxz/+Me655x6fNehiOO+SMimc9CYi6modrs4nSVKvvc3Womp3SXGfbiKirtehOQwhBL7+\n+mtMmDDB5w3rjEbHbbVmA4sOEhF1tQ7NYSiKgtzcXIwcOdKnjeqsRlXbByPM2Dv3HCci8mcBdVtt\no3AGjN5bvoSIyF95nMNYtGgRKisr9eOKigosWbLEp43qrEahrcMIN/XODZ6IiPyZx4BRV1eHyMhI\n/TgqKgo1NTU+bVRnWVEPoUoIMXIOg4ioq3kMGKqqoq7OtYtdbW0t7Ha7TxvVWVbRANhMMBqUnm4K\nEVHA8TiHMXv2bOTm5mLRokUAgJdeeglz5871ecM6wyo1QNiCoCgtV6YTEdHF8Rgw7rrrLsTHx2Pf\nvn0QQmDhwoXIysrqjrZ1iF21Q5WsELZwGJUOLy8hIiIPvNphKDs7u9ffLVVcVwoAEFYTFAYMF7dU\nkAAAFDhJREFUIqIu5/HK+sADD6CiokI/vnDhAh588EGfNqoz3jq1FwBgP5/EDIOIyAc8XllPnz6N\nqKgo/Tg6OhoFBQU+bVRnlNaVQVKNUCviOYdBROQDHgOG3W53uyvKarXCYrH4tFGdUW9rgKwaIUGC\nIjNgEBF1NY8BY9KkSVi5ciU++eQTfPLJJ8jLy0N6errXf2D//v2YOXMmMjIysH379jbPe/vtt3HF\nFVfgv//9r9ev3VS9rQGSaoSiyK3u30FERBfH46R3Xl4efv/73+M3v/kNAK221Lhx47x6cVVVsWHD\nBuzcuRPx8fFYsGABpk2b1mI/jdraWrz44oudrlElhECDrQGKGgIDh6OIiHzCY4ZhNBpx//334+mn\nn8YNN9yAv//971i7dq1XL3706FGkpKQgOTkZRqMRmZmZ2Lt3b4vztmzZgjvvvBNGY+d2ymu0WyAg\nALsBBk54ExH5RLsZhs1mw759+/C3v/0Nhw8fhs1mw3PPPed1JlBSUoKkpCT9OCEhAV988YXbOV99\n9RWKi4sxefJk7NixoxNvwbWXN+xGZhhERD7S5tfxJ554Atdffz12796N2bNn4/3330dkZGSHho2E\nEB6ff/zxx7FmzRqvf6c1DTYtYAhmGEREPtNmhvHSSy9h1KhRWLZsGcaPHw8AHZ5MTkxMxJkzZ/Tj\nkpISxMfH68e1tbU4ceIEfvKTn0AIgXPnzuHee+/Ftm3bMGLEiHZfOy4uXP/5glSm/WA3IMhkcHuu\nL+hr77c97AsX9oUL+6JrtBkwPvzwQ/zv//4vNm7ciMrKSmRlZXW46GBaWhoKCgpQVFSEuLg45Ofn\nY/PmzfrzYWFhOHjwoH78k5/8BI8++iiGDx/u8bXLyqr1n8+eLwcA2K0KpGbPBbq4uPA+9X7bw75w\nYV+4sC9cLjZwtjl+ExERgSVLluDVV1/F008/jcrKSjQ0NGDJkiXYvXu3Vy+uKArWrVuH3NxczJ49\nG5mZmUhNTcXWrVvx7rvvtjhfkqTODUk5tmZVbQrnMIiIfEQSHbhCW61W/POf/8Rrr72GP/zhD75s\nl0dlZdUoqCrEy9/+HcNiLkP+9/+E5WQaBpmHY+1PrunRtnUnfntyYV+4sC9c2BcuF5theFV80Mlo\nNGLWrFmYNWvWRf3RrrL7m9fwQ9VpfFd5CoA26W2x9c69OoiI/J1f31IUFRTp/oDdgPOVDT3TGCKi\nAOfXASM2OMbtWNgNqG2w9VBriIgCm18HjBbsnVspTkREnvl1wLCr7vMVwm5AanJED7WGiCiwdWjS\nu7exqe7DT4umXIGJIwb0UGuIiAKbX2cYNuGeYQzpH4MQs1/HQCKiXsuvA0bzISmzSemhlhARBT6/\nDhjlNfVux2YTswsiIl/x64BRXe++5oIZBhGR7/h1wLA3m8MIMjJgEBH5SkAFDFlm4UEiIl/x64Ch\nwhUwbGXJPdgSIqLA59ezxKpQIVQZDZ9NBwSzCyIiX/LrgGEXNkDI2v+IiMin/PpKq0JlsCAi6iZ+\nfbUVsEOofv0WiIj8hl9fbbUMg3MXRETdwa/nMATsgDBg2ugBGDs8vqebQ0QU0Pw8YKiAKuOmqakw\nGrhoj4jIl/x6SEpI2qS3ovj12yAi8gt+faUVjrukZInzGEREvua3AUMVKiAJSP77FoiI/IrfXm2d\nu+1JXIdBRNQt/PZqa3NsniSBk91ERN3BbwOGs1Kt7L9vgYjIr/jt1VYfkmKGQUTULfw4YDgzDAYM\nIqLu4LcBwy60DINDUkRE3cNvr7ZWZ4YhMcMgIuoOfhswnHMYCgMGEVG38NuAYbE7h6QYMIiIuoP/\nBgybFQCgyAwYRETdwW8DRqMzYDDDICLqFn4ZMKpqLbDYtCEpAzMMIqJu4Zf7YSx57C0kDq4AYjnp\nTUTUXXyeYezfvx8zZ85ERkYGtm/f3uL5nTt3IjMzE/PmzcNPf/pTnD171qvXLausAwAYZL+MeURE\nfsenAUNVVWzYsAHPPfcc3nzzTeTn5+PkyZNu5wwfPhyvvvoq3njjDcyYMQMbN2707sUlFQADBhFR\nd/FpwDh69ChSUlKQnJwMo9GIzMxM7N271+2csWPHIigoCAAwcuRIlJSUePfishYwjAoDBhFRd/Bp\nwCgpKUFSUpJ+nJCQgNLS0jbPf+WVV5Cenu7di+sZBucwiIi6g0+/ngshvD73jTfewH//+1+88MIL\nXp0vSdprh4cEIy4uvFPtCxR9/f03xb5wYV+4sC+6hk8DRmJiIs6cOaMfl5SUID4+vsV5Bw4cwPbt\n2/Hiiy/CaDR69+KOISnVBpSVVXdJe/1RXFx4n37/TbEvXNgXLuwLl4sNnD4dkkpLS0NBQQGKiopg\nsViQn5+PadOmuZ1z7NgxrF+/Htu2bUN0dLT3L+4YkjJxDoOIqFv49GqrKArWrVuH3NxcCCGwYMEC\npKamYuvWrUhLS8OUKVOwadMm1NfX48EHH4QQAv3798czzzzj+cUl56Q35zCIiLqDz7+ep6ent5jI\nXr58uf7z888/36nXlWRnhuHlEBYREV0UvywNAgBwTHqbDAwYRETdwS8DRnR4EOcwiIi6mV8GDINB\n1u+SCvL2rioiIroo/hkwZFnPMIKYYRARdQv/DBgGSV+4Z2KGQUTULfwyYCiya0jKzElvIqJu4ZcB\nw6BI+pCUmRkGEVG38NOA0WQOw8A5DCKi7uCXAUNRZEiyCqHKMBm40puIqDv4ZcAwKjKg2AC7AqPB\nL98CEZHf8curraJIkIyNENYgBgwiom7il1dbWRGQDDYIaxAUxS/fAhGR3/HPq63SCAAQVhNkSerh\nxhAR9Q1+GTBUgzNgBPVwS4iI+g6/DBh2uR4AINkZMIiIuotfBgxVbgAAGEVwD7eEiKjv8MuAcVoc\nAQAEqRE93BIior7DLwNGg1QJ27n+CFHjeropRER9hl8GDABQq2IQEsSyIERE3cV/A0ZdOIIZMIiI\nuo1/BgwBiPowmE2sI0VE1F38MmAEl6cBQkFYMEubExF1F78MGOvmLsaVg2Iwd+Kgnm4KEVGf4ZeT\nAEMGRCHv5pE93Qwioj7FLzMMIiLqfgwYRETkFQYMIiLyCgMGERF5hQGDiIi8woBBREReYcAgIiKv\nMGAQEZFXGDCIiMgrDBhEROQVBgwiIvKKzwPG/v37MXPmTGRkZGD79u0tnrdYLFi5ciVmzJiBm2++\nGWfOnPF1k4iIqBN8GjBUVcWGDRvw3HPP4c0330R+fj5Onjzpds4rr7yCyMhI/OMf/8Btt92GTZs2\n+bJJRETUST4NGEePHkVKSgqSk5NhNBqRmZmJvXv3up2zd+9eZGdnAwAyMjJw8OBBXzaJiIg6yacB\no6SkBElJSfpxQkICSktL3c4pLS1FYmIiAEBRFERERKCiosKXzSIiok7wacAQQnT4HCEEJEnyVZOI\niKiTfLqBUmJiotskdklJCeLj41ucU1xcjISEBNjtdtTU1CAyMtLja8fFhXd5e/0V+8KFfeHCvnBh\nX3QNn2YYaWlpKCgoQFFRESwWC/Lz8zFt2jS3c6ZMmYLXXnsNAPD2229j/PjxvmwSERF1kiS8GTe6\nCPv378evf/1rCCGwYMECLFu2DFu3bkVaWhqmTJkCi8WCVatW4auvvkJUVBQ2b96MAQMG+LJJRETU\nCT4PGEREFBi40puIiLzCgEFERF5hwCAiIq/4XcDwVJsq0KxduxbXXXcd5syZoz9WWVmJ3NxcZGRk\nYOnSpaiurtaf+9WvfoUZM2Zg3rx5+Oqrr3qiyT5RXFyMW2+9FbNmzcKcOXPw5z//GUDf7AuLxYKc\nnBxkZWVhzpw5eOqppwAAhYWFuOmmm5CRkYG8vDzYbDb9/ECv16aqKrKzs3H33XcD6Lt9MXXqVMyd\nOxdZWVlYsGABgC7+jAg/YrfbxfTp00VhYaGwWCxi7ty54sSJEz3dLJ/6z3/+I44dOyZmz56tP7Zx\n40axfft2IYQQv//978WmTZuEEEK899574s477xRCCHH48GGRk5PT/Q32kdLSUnHs2DEhhBA1NTVi\nxowZ4sSJE32yL4QQoq6uTgghhM1mEzk5OeLw4cPiwQcfFHv27BFCCPHYY4+Jl156SQghxK5du8T6\n9euFEELk5+eLFStW9Eibfen5558XDz30kLjrrruEEKLP9sXUqVNFRUWF22Nd+RnxqwzDm9pUgeba\na69FRESE22NN629lZ2frfbB3715kZWUBAK6++mpUV1fj3Llz3dtgH4mLi8OwYcMAAKGhoUhNTUVJ\nSUmf7AsACA4OBqB9Y7bZbJAkCYcOHUJGRgYArS/+9a9/AQj8em3FxcV4//33kZOToz/20Ucf9cm+\nEEJAVVW3x7ryM+JXAcOb2lR9QXl5OWJjYwFoF9Ly8nIA7nW5AK1/SkpKeqSNvlRYWIjjx4/j6quv\nxvnz5/tkX6iqiqysLEycOBETJ07EwIEDERERAVnWPtKJiYn6+w30em2PP/44HnnkEb2k0IULFxAZ\nGdkn+0KSJCxduhTz58/Hyy+/DABd+hnxaWmQria4ZKRdrfVPoNXlqq2txfLly7F27VqEhoa2+f4C\nvS9kWcbrr7+Ompoa3HfffS22DQBc77d5X4gAqtf23nvvITY2FsOGDcOhQ4cAaO+v+XvuC30BALt3\n79aDQm5uLgYNGtSlnxG/Chje1KbqC/r164dz584hNjYWZWVliImJAaB9QyguLtbPKy4uDqj+sdls\nWL58OebNm4fp06cD6Lt94RQWFoYxY8bgyJEjqKqqgqqqkGXZ7f06+6Kj9dr8wWeffYZ9+/bh/fff\nR2NjI2pra/H444+jurq6z/UFoGUQABATE4Pp06fj6NGjXfoZ8ashKW9qUwWi5t8Epk6dildffRUA\n8Nprr+l9MG3aNLz++usAgMOHDyMiIkJPRQPB2rVrMWTIENx22236Y32xL8rLy/U7XRoaGnDw4EEM\nGTIE48aNw9tvvw3AvS+mTp0asPXa8vLy8N5772Hv3r3YvHkzxo0bhyeffLJP9kV9fT1qa2sBAHV1\ndfjwww8xdOjQLv2M+F1pkNZqUwWyhx56CIcOHUJFRQViY2PxwAMPYPr06XjwwQdx9uxZ9O/fH1u2\nbNEnxn/5y1/igw8+QHBwMJ544gmMGDGih99B1/j0009xyy23YOjQoZAkCZIkYeXKlbjqqquwYsWK\nPtUXX3/9NdasWQNVVaGqKmbNmoV77rkHp0+fRl5eHqqqqjBs2DBs2rQJRqOxz9Rr+/jjj/HHP/4R\nzz77bJ/si9OnT+P++++HJEmw2+2YM2cOli1bhoqKii77jPhdwCAiop7hV0NSRETUcxgwiIjIKwwY\nRETkFQYMIiLyCgMGERF5hQGDiIi8woBBfu2mm25CdnY2MjMzMWLECGRnZyM7Oxtr167t8Gvdcccd\nXpW7fvTRR3H48OHONLdDjh07hnfeecfnf4fIW1yHQQGhqKgICxYsaLf6qLNUhL94+eWXcfDgQWze\nvLmnm0IEwM9qSRF1xMGDB7Fp0yaMHDkSx44dw3333Yfy8nLs2rVL31BnzZo1GDt2LABg8uTJ2Llz\nJwYNGoTFixdj1KhR+Pzzz1FaWorZs2djxYoVAIDFixfj3nvvxaRJk7Bq1SqEhYXh5MmTKCkpwejR\no/HEE08A0GrzPPLII7hw4QIGDhwIu92OqVOn4uabb3Zr57lz5/DQQw/hwoULAIBJkybhjjvuwDPP\nPIO6ujpkZ2dj3LhxWLNmDT7//HNs3rwZ9fX1AIDly5cjPT0dBQUFWLx4MWbPno1PP/0UFosF69ev\nx+jRo7ulr6mPuJjNOoh6i8LCQjF+/Hi3xw4cOCCGDx8uvvjiC/2xppvLnDhxQlx//fX6cXp6uvju\nu++EEEIsWrRIPPTQQ0IIIaqqqsTYsWNFYWGh/twHH3wghBDi4YcfFrfccouwWq2isbFRzJw5Uxw6\ndEgIIcQ999wj/vCHPwghhDh9+rQYNWqU2L17d4u279ixQzz22GP6cVVVlRBCiL/+9a8iLy/Pre1Z\nWVni/PnzQgghiouLRXp6uqipqRE//PCDuPzyy0V+fr7+3q+//nphs9m870QiD5hhUEAbPHgwrrzy\nSv341KlT2Lp1K0pLS6EoCkpLS1FRUYGoqKgWv3vjjTcCAMLDwzFo0CAUFBQgOTm5xXk33HADDAbt\nozR8+HAUFBRg7NixOHToEH71q18BAAYMGKBnMs2NHDkSL774Ip588kmMGTMGkyZNavW8Tz/9FIWF\nhVi6dKlekFJRFJw+fRohISEIDg7GrFmzAAATJkyAoig4deoUUlNTve0uonYxYFBACw0NdTteuXIl\n1q9fj8mTJ0NVVVx11VVobGxs9XeDgoL0n2VZht1u79B53u6zcM011+C1117DgQMH8Le//Q07duzA\nCy+80OI8IQRGjBiBnTt3tniuoKCgxWOqqgbUXg/U8/xnBpDIA+HF/Rs1NTV6ddLdu3e3GQS6wtix\nY/Wy0kVFRfj4449bPa+wsBBhYWGYNWsW1qxZgy+//BKAtteFs4w5AIwePRonTpzAJ598oj929OhR\n/ef6+nrs2bMHgLZFKQCkpKR07ZuiPo0ZBgUMb75Nr127FsuWLUNSUhLGjRuH8PDwVn+/+Wu19Vx7\n561btw6rV69Gfn4+Bg8ejNGjR7v9PaeDBw/iz3/+MxRFgRACGzZsAABMnDgRf/rTn5CVlYXx48dj\nzZo1eOaZZ7Bp0yZUV1fDarVi4MCBePbZZwEAsbGx+Pbbb5GTkwOLxYLNmzdDURSPfULkLd5WS+Qj\njY2NMBqNkGUZJSUlyMnJwa5duzBw4MAu/1vOu6Q+/PDDLn9tIidmGEQ+8t133+HRRx+FEAKqqmLl\nypU+CRZE3YUZBhEReYWT3kRE5BUGDCIi8goDBhEReYUBg4iIvMKAQUREXmHAICIir/x/apbYj523\no60AAAAASUVORK5CYII=\n",
"text/plain": [
- "<matplotlib.figure.Figure at 0x7f72f867ef90>"
+ "\u003cmatplotlib.figure.Figure at 0x7f97f1330850\u003e"
]
},
"metadata": {
"tags": []
- }
+ },
+ "output_type": "display_data"
}
+ ],
+ "source": [
+ "def plot(train, test, label):\n",
+ " plt.title('MNIST model %s' % label)\n",
+ " plt.plot(train, label='train %s' % label)\n",
+ " plt.plot(test, label='test %s' % label)\n",
+ " plt.legend()\n",
+ " plt.xlabel('Training step')\n",
+ " plt.ylabel(label.capitalize())\n",
+ " plt.show()\n",
+ " \n",
+ "\n",
+ "with tf.Graph().as_default():\n",
+ " hp = tf.contrib.training.HParams(\n",
+ " learning_rate=0.05,\n",
+ " max_steps=tf.constant(500),\n",
+ " )\n",
+ " train_ds = setup_mnist_data(True, hp, 50)\n",
+ " test_ds = setup_mnist_data(False, hp, 1000)\n",
+ " tf_train = autograph.to_graph(train)\n",
+ " all_losses = tf_train(train_ds, test_ds, hp)\n",
+ "\n",
+ " with tf.Session() as sess:\n",
+ " sess.run(tf.global_variables_initializer())\n",
+ " (train_losses, test_losses, train_accuracies,\n",
+ " test_accuracies) = sess.run(all_losses)\n",
+ " \n",
+ " plot(train_losses, test_losses, 'loss')\n",
+ " plot(train_accuracies, test_accuracies, 'accuracy')"
]
},
{
+ "cell_type": "markdown",
"metadata": {
- "id": "HNqUFL4deCsL",
- "colab_type": "text"
+ "colab_type": "text",
+ "id": "HNqUFL4deCsL"
},
- "cell_type": "markdown",
"source": [
"# 4. Case study: building an RNN\n"
]
},
{
+ "cell_type": "markdown",
"metadata": {
- "id": "YkC1k4HEQ7rw",
- "colab_type": "text"
+ "colab_type": "text",
+ "id": "YkC1k4HEQ7rw"
},
- "cell_type": "markdown",
"source": [
"In this exercise we build and train a model similar to the RNNColorbot model that was used in the main Eager notebook. The model is adapted for converting and training in graph mode."
]
},
{
+ "cell_type": "markdown",
"metadata": {
- "id": "7nkPDl5CTCNb",
- "colab_type": "text"
+ "colab_type": "text",
+ "id": "7nkPDl5CTCNb"
},
- "cell_type": "markdown",
"source": [
"To get started, we load the colorbot dataset. The code is identical to that used in the other exercise and its details are unimportant."
]
},
{
+ "cell_type": "code",
+ "execution_count": 0,
"metadata": {
- "id": "A0uREmVXCQEw",
- "colab_type": "code",
"colab": {
"autoexec": {
"startup": false,
"wait_interval": 0
}
- }
+ },
+ "colab_type": "code",
+ "id": "A0uREmVXCQEw"
},
- "cell_type": "code",
+ "outputs": [],
"source": [
"def parse(line):\n",
" \"\"\"Parses a line from the colors dataset.\n",
@@ -1137,7 +1034,7 @@
" A tuple of three tensors (rgb, chars, length), of shapes: (batch_size, 3),\n",
" (batch_size, max_sequence_length, 256) and respectively (batch_size).\n",
" \"\"\"\n",
- " items = tf.string_split([line], \",\").values\n",
+ " items = tf.string_split(tf.expand_dims(line, 0), \",\").values\n",
" rgb = tf.string_to_number(items[1:], out_type=tf.float32) / 255.0\n",
" color_name = items[0]\n",
" chars = tf.one_hot(tf.decode_raw(color_name, tf.uint8), depth=256)\n",
@@ -1169,23 +1066,21 @@
" dataset = dataset.repeat()\n",
" if training:\n",
" dataset = dataset.shuffle(buffer_size=3000)\n",
- " dataset = dataset.padded_batch(batch_size, padded_shapes=([None], [None, None], []))\n",
+ " dataset = dataset.padded_batch(batch_size, padded_shapes=((None,), (None, None), ()))\n",
" return dataset\n",
"\n",
"\n",
"train_url = \"https://raw.githubusercontent.com/random-forests/tensorflow-workshop/master/extras/colorbot/data/train.csv\"\n",
"test_url = \"https://raw.githubusercontent.com/random-forests/tensorflow-workshop/master/extras/colorbot/data/test.csv\"\n",
"data_dir = \"tmp/rnn/data\""
- ],
- "execution_count": 0,
- "outputs": []
+ ]
},
{
+ "cell_type": "markdown",
"metadata": {
- "id": "waZ89t3DTUla",
- "colab_type": "text"
+ "colab_type": "text",
+ "id": "waZ89t3DTUla"
},
- "cell_type": "markdown",
"source": [
"Next, we set up the RNNColobot model, which is very similar to the one we used in the main exercise.\n",
"\n",
@@ -1193,17 +1088,19 @@
]
},
{
+ "cell_type": "code",
+ "execution_count": 0,
"metadata": {
- "id": "9v8AJouiC44V",
- "colab_type": "code",
"colab": {
"autoexec": {
"startup": false,
"wait_interval": 0
}
- }
+ },
+ "colab_type": "code",
+ "id": "9v8AJouiC44V"
},
- "cell_type": "code",
+ "outputs": [],
"source": [
"def model_components():\n",
" lower_cell = tf.contrib.rnn.LSTMBlockCell(256)\n",
@@ -1227,12 +1124,13 @@
" Returns:\n",
" A Tensor of shape (max_sequence_length, batch_size, output_size).\n",
" \"\"\"\n",
- " hidden_outputs = []\n",
- " autograph.utils.set_element_type(hidden_outputs, tf.float32)\n",
+ " hidden_outputs = tf.TensorArray(tf.float32, size=0, dynamic_size=True)\n",
" state, output = cell.zero_state(batch_size, tf.float32)\n",
+ " initial_state_shape = state.shape\n",
+ " initial_output_shape = output.shape\n",
" n = tf.shape(chars)[0]\n",
" i = 0\n",
- " while i < n:\n",
+ " while i \u003c n:\n",
" ch = chars[i]\n",
" cell_output, (state, output) = cell.call(ch, (state, output))\n",
" hidden_outputs.append(cell_output)\n",
@@ -1261,50 +1159,51 @@
" A Tensor of shape (batch_size, 3) - the model predictions.\n",
" \"\"\"\n",
" (chars, length) = inputs\n",
- " chars_time_major = tf.transpose(chars, [1, 0, 2])\n",
+ " chars_time_major = tf.transpose(chars, (1, 0, 2))\n",
" chars_time_major.set_shape((None, batch_size, 256))\n",
"\n",
" hidden_outputs = rnn_layer(chars_time_major, lower_cell, batch_size, training)\n",
" final_outputs = rnn_layer(hidden_outputs, upper_cell, batch_size, training)\n",
"\n",
" # Grab just the end-of-sequence from each output.\n",
- " indices = tf.stack([length - 1, range(batch_size)], axis=1)\n",
+ " indices = tf.stack((length - 1, range(batch_size)), axis=1)\n",
" sequence_ends = tf.gather_nd(final_outputs, indices)\n",
+ " sequence_ends.set_shape((batch_size, 128))\n",
" return relu_layer(sequence_ends)\n",
"\n",
"def loss_fn(labels, predictions):\n",
" return tf.reduce_mean((predictions - labels) ** 2)"
- ],
- "execution_count": 0,
- "outputs": []
+ ]
},
{
+ "cell_type": "markdown",
"metadata": {
- "id": "JjK4gXFvFsf4",
- "colab_type": "text"
+ "colab_type": "text",
+ "id": "JjK4gXFvFsf4"
},
- "cell_type": "markdown",
"source": [
"The train and test functions are also similar to the ones used in the Eager notebook. Since the network requires a fixed batch size, we'll train in a single shot, rather than by epoch."
]
},
{
+ "cell_type": "code",
+ "execution_count": 0,
"metadata": {
- "id": "ZWQMExk0S6X6",
- "colab_type": "code",
"colab": {
"autoexec": {
"startup": false,
"wait_interval": 0
}
- }
+ },
+ "colab_type": "code",
+ "id": "ZWQMExk0S6X6"
},
- "cell_type": "code",
+ "outputs": [],
"source": [
"def train(optimizer, train_data, lower_cell, upper_cell, relu_layer, batch_size, num_steps):\n",
" iterator = train_data.make_one_shot_iterator()\n",
" step = 0\n",
- " while step < num_steps:\n",
+ " while step \u003c num_steps:\n",
" labels, chars, sequence_length = iterator.get_next()\n",
" predictions = model((chars, sequence_length), lower_cell, upper_cell, relu_layer, batch_size, training=True)\n",
" loss = loss_fn(labels, predictions)\n",
@@ -1319,7 +1218,7 @@
" total_loss = 0.0\n",
" iterator = eval_data.make_one_shot_iterator()\n",
" step = 0\n",
- " while step < num_steps:\n",
+ " while step \u003c num_steps:\n",
" labels, chars, sequence_length = iterator.get_next()\n",
" predictions = model((chars, sequence_length), lower_cell, upper_cell, relu_layer, batch_size, training=False)\n",
" total_loss += loss_fn(labels, predictions)\n",
@@ -1340,16 +1239,14 @@
" # Here, we create a no_op that will drive the execution of all other code in\n",
" # this function. Autograph will add the necessary control dependencies.\n",
" return tf.no_op()"
- ],
- "execution_count": 0,
- "outputs": []
+ ]
},
{
+ "cell_type": "markdown",
"metadata": {
- "id": "iopcs5hXG2od",
- "colab_type": "text"
+ "colab_type": "text",
+ "id": "iopcs5hXG2od"
},
- "cell_type": "markdown",
"source": [
"Finally, we add code to run inference on a single input, which we'll read from the input.\n",
"\n",
@@ -1357,17 +1254,19 @@
]
},
{
+ "cell_type": "code",
+ "execution_count": 0,
"metadata": {
- "id": "DyU0wnnAFEYj",
- "colab_type": "code",
"colab": {
"autoexec": {
"startup": false,
"wait_interval": 0
}
- }
+ },
+ "colab_type": "code",
+ "id": "DyU0wnnAFEYj"
},
- "cell_type": "code",
+ "outputs": [],
"source": [
"@autograph.do_not_convert(run_as=autograph.RunMode.PY_FUNC)\n",
"def draw_prediction(color_name, pred):\n",
@@ -1389,16 +1288,14 @@
" draw_prediction(color_name, pred)\n",
" # Create an op that will drive the entire function.\n",
" return tf.no_op()"
- ],
- "execution_count": 0,
- "outputs": []
+ ]
},
{
+ "cell_type": "markdown",
"metadata": {
- "id": "Nt0Kv5OCHip0",
- "colab_type": "text"
+ "colab_type": "text",
+ "id": "Nt0Kv5OCHip0"
},
- "cell_type": "markdown",
"source": [
"Finally, we put everything together.\n",
"\n",
@@ -1406,218 +1303,132 @@
]
},
{
+ "cell_type": "code",
+ "execution_count": 22,
"metadata": {
- "id": "-GmWa0GtYWdh",
- "colab_type": "code",
"colab": {
"autoexec": {
"startup": false,
"wait_interval": 0
},
- "output_extras": [
- {},
- {},
- {},
- {},
- {},
- {},
- {},
- {},
- {},
- {},
- {},
- {},
- {},
- {},
- {},
- {},
- {},
- {},
- {},
- {},
- {},
- {},
- {}
- ],
- "base_uri": "https://localhost:8080/",
- "height": 668
+ "height": 415
},
- "outputId": "61f4af1d-c81e-44db-9079-1a7b8ed8ce58",
+ "colab_type": "code",
"executionInfo": {
+ "elapsed": 15536,
"status": "ok",
- "timestamp": 1522345877153,
- "user_tz": 240,
- "elapsed": 75500,
+ "timestamp": 1531750946373,
"user": {
- "displayName": "Dan Moldovan",
- "photoUrl": "//lh5.googleusercontent.com/-Rneh8xjecyk/AAAAAAAAAAI/AAAAAAAACB4/c5vwsJpbktY/s50-c-k-no/photo.jpg",
- "userId": "112023154726779574577"
- }
- }
+ "displayName": "",
+ "photoUrl": "",
+ "userId": ""
+ },
+ "user_tz": 240
+ },
+ "id": "-GmWa0GtYWdh",
+ "outputId": "2e7a9856-9809-43a3-8b43-3c8514ea43e9"
},
- "cell_type": "code",
- "source": [
- "def run_input_loop(sess, inference_ops, color_name_placeholder):\n",
- " \"\"\"Helper function that reads from input and calls the inference ops in a loop.\"\"\"\n",
- "\n",
- " tb = widgets.TabBar([\"RNN Colorbot\"])\n",
- " while True:\n",
- " with tb.output_to(0):\n",
- " try:\n",
- " color_name = six.moves.input(\"Give me a color name (or press 'enter' to exit): \")\n",
- " except (EOFError, KeyboardInterrupt):\n",
- " break\n",
- " if not color_name:\n",
- " break\n",
- " with tb.output_to(0):\n",
- " tb.clear_tab()\n",
- " sess.run(inference_ops, {color_name_placeholder: color_name})\n",
- " plt.show()\n",
- "\n",
- "with tf.Graph().as_default():\n",
- " # Read the data.\n",
- " batch_size = 64\n",
- " train_data = load_dataset(data_dir, train_url, batch_size)\n",
- " eval_data = load_dataset(data_dir, test_url, 50, training=False)\n",
- " \n",
- " # Create the model components.\n",
- " lower_cell, upper_cell, relu_layer = model_components()\n",
- " # Create the helper placeholder for inference.\n",
- " color_name_placeholder = tf.placeholder(tf.string, shape=())\n",
- " \n",
- " # Compile the train / test code.\n",
- " tf_train_model = autograph.to_graph(train_model)\n",
- " train_model_ops = tf_train_model(\n",
- " train_data, eval_data, batch_size, lower_cell, upper_cell, relu_layer, train_steps=100)\n",
- " \n",
- " # Compile the inference code.\n",
- " tf_inference = autograph.to_graph(inference)\n",
- " inference_ops = tf_inference(color_name_placeholder, lower_cell, upper_cell, relu_layer)\n",
- " \n",
- " with tf.Session() as sess:\n",
- " sess.run(tf.global_variables_initializer())\n",
- " \n",
- " # Run training and testing.\n",
- " sess.run(train_model_ops)\n",
- " \n",
- " # Run the inference loop.\n",
- " run_input_loop(sess, inference_ops, color_name_placeholder)"
- ],
- "execution_count": 0,
"outputs": [
{
+ "name": "stdout",
"output_type": "stream",
"text": [
- "('Successfully downloaded', 'train.csv', 28010L, 'bytes.')\n",
- "('Successfully downloaded', 'test.csv', 2414L, 'bytes.')\n",
- "Step 0 train loss 0.37890616\n",
- "Step 10 train loss 0.18515904\n",
- "Step 20 train loss 0.0892782\n",
- "Step 30 train loss 0.07883155\n",
- "Step 40 train loss 0.08585831\n",
- "Step 50 train loss 0.09302989\n",
- "Step 60 train loss 0.089012615\n",
- "Step 70 train loss 0.07275697\n",
- "Step 80 train loss 0.06644974\n",
- "Step 90 train loss 0.0854013\n",
- "Test loss 0.13216865Colorbot is ready to generate colors!\n",
- "\n",
+ "Test loss 0.138294\n",
+ "Colorbot is ready to generate colors!\n",
"\n",
"\n"
- ],
- "name": "stdout"
+ ]
},
{
- "output_type": "display_data",
"data": {
- "text/plain": [
- "<IPython.core.display.HTML object>"
- ],
"text/html": [
- "<link rel=stylesheet type=text/css href='/nbextensions/google.colab/tabbar.css'></link>"
+ "\u003clink rel=stylesheet type=text/css href='/nbextensions/google.colab/tabbar.css'\u003e\u003c/link\u003e"
+ ],
+ "text/plain": [
+ "\u003cIPython.core.display.HTML at 0x7f97ee42bb90\u003e"
]
},
"metadata": {
"tags": [
"outputarea_id1"
]
- }
+ },
+ "output_type": "display_data"
},
{
- "output_type": "display_data",
"data": {
- "text/plain": [
- "<IPython.core.display.HTML object>"
- ],
"text/html": [
- "<script src='/nbextensions/google.colab/tabbar_main.min.js'></script>"
+ "\u003cscript src='/nbextensions/google.colab/tabbar_main.min.js'\u003e\u003c/script\u003e"
+ ],
+ "text/plain": [
+ "\u003cIPython.core.display.HTML at 0x7f97ee42be10\u003e"
]
},
"metadata": {
"tags": [
"outputarea_id1"
]
- }
+ },
+ "output_type": "display_data"
},
{
- "output_type": "display_data",
"data": {
- "text/plain": [
- "<IPython.core.display.HTML object>"
- ],
"text/html": [
- "<div id=\"id1\"></div>"
+ "\u003cdiv id=\"id1\"\u003e\u003c/div\u003e"
+ ],
+ "text/plain": [
+ "\u003cIPython.core.display.HTML at 0x7f97ee42bd90\u003e"
]
},
"metadata": {
"tags": [
"outputarea_id1"
]
- }
+ },
+ "output_type": "display_data"
},
{
- "output_type": "display_data",
"data": {
"application/javascript": [
- "window[\"b102d936-3379-11e8-ac70-0242ac110002\"] = colab_lib.createTabBar({\"contentBorder\": [\"0px\"], \"borderColor\": [\"#a7a7a7\"], \"tabNames\": [\"RNN Colorbot\"], \"initialSelection\": 0, \"location\": \"top\", \"contentHeight\": [\"initial\"], \"elementId\": \"id1\"});\n",
- "//# sourceURL=js_e223a56194"
+ "window[\"a6045494-8903-11e8-99f9-c8d3ffb5fbe0\"] = colab_lib.createTabBar({\"location\": \"top\", \"borderColor\": [\"#a7a7a7\"], \"initialSelection\": 0, \"elementId\": \"id1\", \"contentHeight\": [\"initial\"], \"contentBorder\": [\"0px\"], \"tabNames\": [\"RNN Colorbot\"]});\n",
+ "//# sourceURL=js_02f896cbda"
],
"text/plain": [
- "<IPython.core.display.Javascript object>"
+ "\u003cIPython.core.display.Javascript at 0x7f97ee2ab810\u003e"
]
},
"metadata": {
"tags": [
"outputarea_id1"
]
- }
+ },
+ "output_type": "display_data"
},
{
- "output_type": "display_data",
"data": {
"application/javascript": [
- "window[\"b103532a-3379-11e8-ac70-0242ac110002\"] = window[\"id1\"].setSelectedTabIndex(0);\n",
- "//# sourceURL=js_b8c6a821fb"
+ "window[\"a6045495-8903-11e8-99f9-c8d3ffb5fbe0\"] = window[\"id1\"].setSelectedTabIndex(0);\n",
+ "//# sourceURL=js_7e8f9f77a0"
],
"text/plain": [
- "<IPython.core.display.Javascript object>"
+ "\u003cIPython.core.display.Javascript at 0x7f97ee2ab710\u003e"
]
},
"metadata": {
"tags": [
"outputarea_id1"
]
- }
+ },
+ "output_type": "display_data"
},
{
- "output_type": "display_data",
"data": {
"application/javascript": [
- "window[\"b105b28c-3379-11e8-ac70-0242ac110002\"] = google.colab.output.getActiveOutputArea();\n",
- "//# sourceURL=js_44805e254b"
+ "window[\"a6045496-8903-11e8-99f9-c8d3ffb5fbe0\"] = google.colab.output.getActiveOutputArea();\n",
+ "//# sourceURL=js_5531553c2f"
],
"text/plain": [
- "<IPython.core.display.Javascript object>"
+ "\u003cIPython.core.display.Javascript at 0x7f97ee2ab6d0\u003e"
]
},
"metadata": {
@@ -1625,17 +1436,17 @@
"id1_content_0",
"outputarea_id1"
]
- }
+ },
+ "output_type": "display_data"
},
{
- "output_type": "display_data",
"data": {
"application/javascript": [
- "window[\"b106197a-3379-11e8-ac70-0242ac110002\"] = document.querySelector(\"#id1_content_0\");\n",
- "//# sourceURL=js_a63d3c6c47"
+ "window[\"a6045497-8903-11e8-99f9-c8d3ffb5fbe0\"] = document.querySelector(\"#id1_content_0\");\n",
+ "//# sourceURL=js_d1f809ec17"
],
"text/plain": [
- "<IPython.core.display.Javascript object>"
+ "\u003cIPython.core.display.Javascript at 0x7f97ee2ab990\u003e"
]
},
"metadata": {
@@ -1643,17 +1454,17 @@
"id1_content_0",
"outputarea_id1"
]
- }
+ },
+ "output_type": "display_data"
},
{
- "output_type": "display_data",
"data": {
"application/javascript": [
- "window[\"b1069f44-3379-11e8-ac70-0242ac110002\"] = google.colab.output.setActiveOutputArea(window[\"b106197a-3379-11e8-ac70-0242ac110002\"]);\n",
- "//# sourceURL=js_7e203b8bce"
+ "window[\"a6045498-8903-11e8-99f9-c8d3ffb5fbe0\"] = google.colab.output.setActiveOutputArea(window[\"a6045497-8903-11e8-99f9-c8d3ffb5fbe0\"]);\n",
+ "//# sourceURL=js_3a3123cadb"
],
"text/plain": [
- "<IPython.core.display.Javascript object>"
+ "\u003cIPython.core.display.Javascript at 0x7f97ee2aba50\u003e"
]
},
"metadata": {
@@ -1661,17 +1472,17 @@
"id1_content_0",
"outputarea_id1"
]
- }
+ },
+ "output_type": "display_data"
},
{
- "output_type": "display_data",
"data": {
"application/javascript": [
- "window[\"b1070f38-3379-11e8-ac70-0242ac110002\"] = window[\"id1\"].setSelectedTabIndex(0);\n",
- "//# sourceURL=js_d53293d4a7"
+ "window[\"a6045499-8903-11e8-99f9-c8d3ffb5fbe0\"] = window[\"id1\"].setSelectedTabIndex(0);\n",
+ "//# sourceURL=js_1a0e1f7d6f"
],
"text/plain": [
- "<IPython.core.display.Javascript object>"
+ "\u003cIPython.core.display.Javascript at 0x7f97ee2ab890\u003e"
]
},
"metadata": {
@@ -1679,17 +1490,17 @@
"id1_content_0",
"outputarea_id1"
]
- }
+ },
+ "output_type": "display_data"
},
{
- "output_type": "display_data",
"data": {
"application/javascript": [
- "window[\"c6d90d5c-3379-11e8-ac70-0242ac110002\"] = google.colab.output.setActiveOutputArea(window[\"b105b28c-3379-11e8-ac70-0242ac110002\"]);\n",
- "//# sourceURL=js_3000dc2c05"
+ "window[\"a8e54762-8903-11e8-99f9-c8d3ffb5fbe0\"] = google.colab.output.setActiveOutputArea(window[\"a6045496-8903-11e8-99f9-c8d3ffb5fbe0\"]);\n",
+ "//# sourceURL=js_6213539615"
],
"text/plain": [
- "<IPython.core.display.Javascript object>"
+ "\u003cIPython.core.display.Javascript at 0x7f97ee2abad0\u003e"
]
},
"metadata": {
@@ -1697,17 +1508,17 @@
"id1_content_0",
"outputarea_id1"
]
- }
+ },
+ "output_type": "display_data"
},
{
- "output_type": "display_data",
"data": {
"application/javascript": [
- "window[\"c6da872c-3379-11e8-ac70-0242ac110002\"] = google.colab.output.getActiveOutputArea();\n",
- "//# sourceURL=js_4136f669a3"
+ "window[\"a8e54763-8903-11e8-99f9-c8d3ffb5fbe0\"] = google.colab.output.getActiveOutputArea();\n",
+ "//# sourceURL=js_0bd7f95c6e"
],
"text/plain": [
- "<IPython.core.display.Javascript object>"
+ "\u003cIPython.core.display.Javascript at 0x7f97ee2ab950\u003e"
]
},
"metadata": {
@@ -1715,17 +1526,17 @@
"id1_content_0",
"outputarea_id1"
]
- }
+ },
+ "output_type": "display_data"
},
{
- "output_type": "display_data",
"data": {
"application/javascript": [
- "window[\"c6dac868-3379-11e8-ac70-0242ac110002\"] = document.querySelector(\"#id1_content_0\");\n",
- "//# sourceURL=js_2f70dd9aee"
+ "window[\"a8e54764-8903-11e8-99f9-c8d3ffb5fbe0\"] = document.querySelector(\"#id1_content_0\");\n",
+ "//# sourceURL=js_215f004f6b"
],
"text/plain": [
- "<IPython.core.display.Javascript object>"
+ "\u003cIPython.core.display.Javascript at 0x7f97ee2abb10\u003e"
]
},
"metadata": {
@@ -1733,17 +1544,17 @@
"id1_content_0",
"outputarea_id1"
]
- }
+ },
+ "output_type": "display_data"
},
{
- "output_type": "display_data",
"data": {
"application/javascript": [
- "window[\"c6db07d8-3379-11e8-ac70-0242ac110002\"] = google.colab.output.setActiveOutputArea(window[\"c6dac868-3379-11e8-ac70-0242ac110002\"]);\n",
- "//# sourceURL=js_7226726048"
+ "window[\"a8e54765-8903-11e8-99f9-c8d3ffb5fbe0\"] = google.colab.output.setActiveOutputArea(window[\"a8e54764-8903-11e8-99f9-c8d3ffb5fbe0\"]);\n",
+ "//# sourceURL=js_a06186c8ad"
],
"text/plain": [
- "<IPython.core.display.Javascript object>"
+ "\u003cIPython.core.display.Javascript at 0x7f97ee2aba90\u003e"
]
},
"metadata": {
@@ -1751,17 +1562,17 @@
"id1_content_0",
"outputarea_id1"
]
- }
+ },
+ "output_type": "display_data"
},
{
- "output_type": "display_data",
"data": {
"application/javascript": [
- "window[\"c6dcc6fe-3379-11e8-ac70-0242ac110002\"] = window[\"id1\"].setSelectedTabIndex(0);\n",
- "//# sourceURL=js_72e7709865"
+ "window[\"a8e54766-8903-11e8-99f9-c8d3ffb5fbe0\"] = window[\"id1\"].setSelectedTabIndex(0);\n",
+ "//# sourceURL=js_383fbaae67"
],
"text/plain": [
- "<IPython.core.display.Javascript object>"
+ "\u003cIPython.core.display.Javascript at 0x7f97ee2abc50\u003e"
]
},
"metadata": {
@@ -1769,14 +1580,14 @@
"id1_content_0",
"outputarea_id1"
]
- }
+ },
+ "output_type": "display_data"
},
{
- "output_type": "display_data",
"data": {
- "image/png": "iVBORw0KGgoAAAANSUhEUgAAAVQAAAFZCAYAAADHDNdrAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMS4yLCBo\ndHRwOi8vbWF0cGxvdGxpYi5vcmcvNQv5yAAAB9JJREFUeJzt3E1Lle0ax+HTF4jeEAyMBhE0DawI\nwsCH0AIlaGBWNJBo0CDoA0TQhmDXuKAGDioiCA2KlEAlnl05FD9Co8BeaGCQoBDa2jPZsXt4Bvu/\n0+o4Rmvd1zW4rsmP84bFamo0Go0C4H/WvNYHAPhVCCpAiKAChAgqQIigAoQIKkCIoAKECCpAiKDy\nUxgeHq5Dhw7V4OBgPXz4sHp7e+vWrVt15cqVOnnyZN2/f78ajUbdvn27+vr6qqenp65du1YrKytV\nVfXhw4e6cOFC9fX1VV9fX01PT1dV1dzcXHV3d9eDBw/q+PHj9ccff9TExMRaXpWfWOtaHwD+zuvX\nr+vOnTs1MTFRbW1tdf78+dW16enpGh8fr/b29hobG6upqal6/Phxbdy4sS5evFgjIyM1NDRUly5d\nqv3799fw8HC9efOmTp8+XVNTU1VV9enTp2pubq5nz57V5ORk3bhxo44dO7ZW1+UnZkJl3Zudna2D\nBw9WR0dHbdiwoQYHB1fX9u7dW+3t7VVV9fLlyxocHKytW7dWa2trnTp1qp4/f16Li4s1MzNT586d\nq6qqXbt21YEDB1an1OXl5Tpx4kRVVe3Zs6fevXv3Yy/IL8OEyrr3+fPnamtrW/2+ffv21c//+Xxh\nYaHu3r1bjx49qqqqlZWVam9vr4WFhWo0GnXmzJnVvYuLi9XV1VVVVS0tLbVp06aqqmpubq6vX7/+\nX+/Dr0tQWfe2bNlSi4uLq98/fvz43X0dHR3V29tbQ0ND3zxfXl6ulpaWevLkSW3evPmbtbm5ufyB\n+W155Wfd6+zsrJmZmZqfn68vX77U2NjYd/cdOXKkxsfHa2lpqaqqRkdH6+nTp9Xa2lqHDx+u0dHR\nqqpaWlqqy5cv1/v373/YHfg9CCrrXmdnZw0MDNTAwECdPXu2enp6vrvv6NGj1dPTUwMDA9Xf318v\nXryo7u7uqqq6evVqzc7OVn9/fw0MDNTOnTtrx44dP/Ia/Aaa/B8qP4NGo1FNTU1VVfXq1au6efPm\nX06qsFZMqKx78/Pz1dXVVW/fvq1Go1GTk5O1b9++tT4W/BcTKj+FkZGRunfvXjU1NdXu3bvr+vXr\ntW3btrU+FnxDUAFCvPIDhAgqQMi6+WH/kX8eXesjAPytf/3jz79cM6EChAgqQIigAoQIKkCIoAKE\nCCpAiKAChAgqQIigAoQIKkCIoAKECCpAiKAChAgqQIigAoQIKkCIoAKECCpAiKAChAgqQIigAoQI\nKkCIoAKECCpAiKAChAgqQIigAoQIKkCIoAKECCpAiKAChAgqQIigAoQIKkCIoAKECCpAiKAChAgq\nQIigAoQIKkCIoAKECCpAiKAChAgqQIigAoQIKkCIoAKECCpAiKAChAgqQIigAoQIKkCIoAKECCpA\niKAChAgqQIigAoQIKkCIoAKECCpAiKAChAgqQIigAoQIKkCIoAKECCpAiKAChAgqQIigAoQIKkCI\noAKECCpAiKAChAgqQIigAoQIKkCIoAKECCpAiKAChAgqQIigAoQIKkCIoAKECCpAiKAChAgqQIig\nAoQIKkCIoAKECCpAiKAChAgqQIigAoQIKkCIoAKECCpAiKAChAgqQIigAoQIKkCIoAKECCpAiKAC\nhAgqQIigAoQIKkCIoAKECCpAiKAChAgqQIigAoQIKkCIoAKECCpAiKAChAgqQIigAoQIKkCIoAKE\nCCpAiKAChAgqQIigAoQIKkCIoAKECCpAiKAChAgqQIigAoQIKkCIoAKECCpAiKAChAgqQIigAoQI\nKkCIoAKECCpAiKAChAgqQIigAoQIKkCIoAKECCpAiKAChAgqQIigAoQIKkCIoAKECCpAiKAChAgq\nQIigAoQIKkCIoAKECCpAiKAChAgqQIigAoQIKkCIoAKECCpAiKAChAgqQIigAoQIKkCIoAKECCpA\niKAChAgqQIigAoQIKkCIoAKECCpAiKAChAgqQIigAoQIKkCIoAKECCpAiKAChAgqQIigAoQIKkCI\noAKECCpAiKAChAgqQIigAoQIKkCIoAKECCpAiKAChAgqQIigAoQIKkCIoAKECCpAiKAChAgqQIig\nAoQIKkCIoAKECCpAiKAChAgqQIigAoQIKkCIoAKECCpAiKAChAgqQIigAoQIKkCIoAKECCpAiKAC\nhAgqQIigAoQIKkCIoAKECCpAiKAChAgqQIigAoQIKkCIoAKECCpAiKAChAgqQIigAoQIKkCIoAKE\nCCpAiKAChAgqQIigAoQIKkCIoAKECCpAiKAChAgqQIigAoQIKkCIoAKECCpAiKAChAgqQIigAoQI\nKkCIoAKECCpAiKAChAgqQIigAoQIKkCIoAKECCpAiKAChAgqQIigAoQIKkCIoAKECCpAiKAChAgq\nQIigAoQIKkCIoAKECCpAiKAChAgqQIigAoQIKkCIoAKECCpAiKAChAgqQIigAoQIKkCIoAKECCpA\niKAChAgqQIigAoQIKkCIoAKECCpAiKAChAgqQIigAoQIKkCIoAKECCpAiKAChAgqQIigAoQIKkBI\nU6PRaKz1IQB+BSZUgBBBBQgRVIAQQQUIEVSAEEEFCBFUgBBBBQgRVIAQQQUIEVSAEEEFCBFUgBBB\nBQgRVIAQQQUIEVSAEEEFCBFUgBBBBQgRVIAQQQUIEVSAkH8D1Aj8lNhhe7QAAAAASUVORK5CYII=\n",
+ "image/png": "iVBORw0KGgoAAAANSUhEUgAAAQwAAAENCAYAAAD60Fs2AAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAACL9JREFUeJzt3F+IlXUex/Gv2ziiBRGVOQaFd2JBzOg5aiH+IZGoJgmM\n/uhVGIlgFE0QEYHdFQaRGBJ10VX0D5TAi8jKomGmILsYjEAkmBwbRIxKGDV/e7G7w8ouux9jd911\nX6+rcx6e85zveS7e/J7zb0ZrrRVA4A8XewDgf4dgADHBAGKCAcQEA4gJBhATDC6Kp59+urrdbt13\n3301OjpaK1euvNgjERCMS9yaNWtqeHj4Yo9xnq+++qqGh4frs88+q7fffruqqmbMmHGRpyIhGPxH\n/fbbb/XDDz/U9ddfX7NmzbrY43CBBOMS9tRTT9XExERt2bKlBgYG6vXXX69vvvmm7r///up0OrV+\n/foaHR2d3n/Tpk318ssv1wMPPFADAwP18MMP18mTJ6uq6vTp0zU0NFRLly6tTqdTGzZsqBMnTlRV\n1eTkZG3ZsqWWLl1a69atq3feeWf6mDt37qxt27bV0NBQLVmypN5777169tln6+DBgzUwMFA7d+78\nm7kPHz5cmzZtqk6nU3fffXft37+/qqrGx8er0+lM7/fMM8/UrbfeOn1/aGio3nzzzX/tSeR8jUva\n6tWr2/DwcGuttWPHjrVut9sOHDjQWmvtiy++aN1ut504caK11trGjRvb2rVr2/fff9+mpqbaxo0b\n244dO1prrb311lvt0UcfbVNTU+3cuXNtbGys/fLLL6211h566KG2ffv2dvr06Xbo0KG2bNmy6ed8\n5ZVX2k033dQ++uij1lprU1NT7f33328PPvjg9IwjIyNt5cqVrbXWzpw509auXdt2797dzpw504aH\nh1t/f387cuTI9OsZGxtrrbW2bt26dvvtt7fDhw+31lpbtWpVO3To0L/rVNJas8L4P9D+/HOhvXv3\n1qpVq2rFihVVVbV8+fK6+eab69NPP53e9957760bbrihent764477qhDhw5VVVVPT0+dPHmyjhw5\nUjNmzKhFixbV5ZdfXseOHauvv/66nnzyyZo5c2YtXLiwNmzYUHv27Jk+Zn9/f61Zs6aqqnp7e//h\nrAcPHqxTp07VI488Uj09PbVs2bJavXp1ffDBB1VVtWTJkhodHa3jx49XVdW6devqyy+/rPHx8fr1\n119r4cKF/6Kzxt/Tc7EH4D/n6NGjtW/fvvr444+r6k8hOXv2bC1fvnx6n2uuuWb69uzZs+vUqVNV\nVXXPPffUsWPH6oknnqiff/65BgcH6/HHH6/Jycm68sora/bs2dOPmz9/fo2NjU3fnzdvXjzj5ORk\n9fX1nbdt/vz5NTk5WVVVnU6n9u/fX9ddd111u93qdru1Z8+e6u3trcWLF1/A2eD3EIxL3F9/+tDX\n11fr16+v7du3X/Bxenp6auvWrbV169Y6evRobd68uRYsWFC33XZb/fTTT3Xq1KmaM2dOVVVNTEzU\n3Llz/+4M/8zcuXNrYmLivG1Hjx6tBQsWVFVVt9utF198sfr6+qrT6dTAwEA999xz1dvbW91u94Jf\nFxfGJckl7tprr63x8fGqqhocHKz9+/fX559/XufOnaupqakaHR2tH3/88Z8eZ2RkpL777rs6d+5c\nzZkzp3p6euqyyy6refPmVX9/f7300kt1+vTp+vbbb+vdd9+twcHB3zXvLbfcUnPmzKnXXnutzp49\nWyMjI/XJJ5/UnXfeWVVVN954Y82aNav27t1bnU6nrrjiirr66qvrww8/PO8NUf49BOMSt3nz5tq1\na1d1u93at29f7dq1q3bv3l3Lly+v1atX1xtvvDH9Hsc/WgkcP368tm3bVosXL6677rqrli5dOh2F\nHTt21Pj4eK1YsaK2bdtWjz322HmXORdi5syZ9eqrr9aBAwdq2bJl9fzzz9cLL7wwvcKo+tMq46qr\nrpq+1PlLKBYtWvS7npPcjNb8gQ6QscIAYoIBxAQDiAkGEPuv/R7GxN7+iz0C/F/rG/z6b7ZZYQAx\nwQBiggHEBAOICQYQEwwgJhhATDCAmGAAMcEAYoIBxAQDiAkGEBMMICYYQEwwgJhgADHBAGKCAcQE\nA4gJBhATDCAmGEBMMICYYAAxwQBiggHEBAOICQYQEwwgJhhATDCAmGAAMcEAYoIBxAQDiAkGEBMM\nICYYQEwwgJhgADHBAGKCAcQEA4gJBhATDCAmGEBMMICYYAAxwQBiggHEBAOICQYQEwwgJhhATDCA\nmGAAMcEAYoIBxAQDiAkGEBMMICYYQEwwgJhgADHBAGKCAcQEA4gJBhATDCAmGEBMMICYYAAxwQBi\nggHEBAOICQYQEwwgJhhATDCAmGAAMcEAYoIBxAQDiAkGEBMMICYYQEwwgJhgADHBAGKCAcQEA4gJ\nBhATDCAmGEBMMICYYAAxwQBiggHEBAOICQYQEwwgJhhATDCAmGAAMcEAYoIBxAQDiAkGEBMMICYY\nQEwwgJhgADHBAGKCAcQEA4gJBhATDCAmGEBMMICYYAAxwQBiggHEBAOICQYQEwwgJhhATDCAmGAA\nMcEAYoIBxAQDiAkGEBMMICYYQEwwgJhgADHBAGKCAcQEA4gJBhATDCAmGEBMMICYYAAxwQBiggHE\nBAOICQYQEwwgJhhATDCAmGAAMcEAYoIBxAQDiAkGEBMMICYYQEwwgJhgADHBAGKCAcQEA4gJBhAT\nDCAmGEBMMICYYAAxwQBiggHEBAOICQYQEwwgJhhATDCAmGAAMcEAYoIBxAQDiAkGEBMMICYYQEww\ngJhgADHBAGKCAcQEA4gJBhATDCAmGEBMMICYYAAxwQBiggHEBAOICQYQEwwgJhhATDCAmGAAMcEA\nYoIBxAQDiAkGEBMMICYYQEwwgJhgADHBAGKCAcQEA4gJBhATDCAmGEBMMICYYAAxwQBiggHEBAOI\nCQYQEwwgNqO11i72EMD/BisMICYYQEwwgJhgADHBAGKCAcQEA4gJBhATDCAmGEBMMICYYAAxwQBi\nggHEBAOICQYQEwwgJhhATDCAmGAAMcEAYoIBxAQDiP0RoqNMBlokHDIAAAAASUVORK5CYII=\n",
"text/plain": [
- "<matplotlib.figure.Figure at 0x7f72f402e850>"
+ "\u003cmatplotlib.figure.Figure at 0x7f97ee42bb90\u003e"
]
},
"metadata": {
@@ -1785,17 +1596,17 @@
"outputarea_id1",
"user_output"
]
- }
+ },
+ "output_type": "display_data"
},
{
- "output_type": "display_data",
"data": {
"application/javascript": [
- "window[\"c70592aa-3379-11e8-ac70-0242ac110002\"] = google.colab.output.setActiveOutputArea(window[\"c6da872c-3379-11e8-ac70-0242ac110002\"]);\n",
- "//# sourceURL=js_25c3aaf79a"
+ "window[\"a8e54767-8903-11e8-99f9-c8d3ffb5fbe0\"] = google.colab.output.setActiveOutputArea(window[\"a8e54763-8903-11e8-99f9-c8d3ffb5fbe0\"]);\n",
+ "//# sourceURL=js_28bd08ac10"
],
"text/plain": [
- "<IPython.core.display.Javascript object>"
+ "\u003cIPython.core.display.Javascript at 0x7f97ea9efc10\u003e"
]
},
"metadata": {
@@ -1803,17 +1614,17 @@
"id1_content_0",
"outputarea_id1"
]
- }
+ },
+ "output_type": "display_data"
},
{
- "output_type": "display_data",
"data": {
"application/javascript": [
- "window[\"c70842c0-3379-11e8-ac70-0242ac110002\"] = google.colab.output.getActiveOutputArea();\n",
- "//# sourceURL=js_984c56b816"
+ "window[\"a8e54768-8903-11e8-99f9-c8d3ffb5fbe0\"] = google.colab.output.getActiveOutputArea();\n",
+ "//# sourceURL=js_ae2887f57d"
],
"text/plain": [
- "<IPython.core.display.Javascript object>"
+ "\u003cIPython.core.display.Javascript at 0x7f97ea9efb50\u003e"
]
},
"metadata": {
@@ -1821,17 +1632,17 @@
"id1_content_0",
"outputarea_id1"
]
- }
+ },
+ "output_type": "display_data"
},
{
- "output_type": "display_data",
"data": {
"application/javascript": [
- "window[\"c708dec4-3379-11e8-ac70-0242ac110002\"] = document.querySelector(\"#id1_content_0\");\n",
- "//# sourceURL=js_e0451a1217"
+ "window[\"a8e54769-8903-11e8-99f9-c8d3ffb5fbe0\"] = document.querySelector(\"#id1_content_0\");\n",
+ "//# sourceURL=js_608805a786"
],
"text/plain": [
- "<IPython.core.display.Javascript object>"
+ "\u003cIPython.core.display.Javascript at 0x7f97ea9ef710\u003e"
]
},
"metadata": {
@@ -1839,17 +1650,17 @@
"id1_content_0",
"outputarea_id1"
]
- }
+ },
+ "output_type": "display_data"
},
{
- "output_type": "display_data",
"data": {
"application/javascript": [
- "window[\"c7092726-3379-11e8-ac70-0242ac110002\"] = google.colab.output.setActiveOutputArea(window[\"c708dec4-3379-11e8-ac70-0242ac110002\"]);\n",
- "//# sourceURL=js_7aa23d7385"
+ "window[\"a8e5476a-8903-11e8-99f9-c8d3ffb5fbe0\"] = google.colab.output.setActiveOutputArea(window[\"a8e54769-8903-11e8-99f9-c8d3ffb5fbe0\"]);\n",
+ "//# sourceURL=js_3d87cf7d0f"
],
"text/plain": [
- "<IPython.core.display.Javascript object>"
+ "\u003cIPython.core.display.Javascript at 0x7f97ea9efa90\u003e"
]
},
"metadata": {
@@ -1857,17 +1668,17 @@
"id1_content_0",
"outputarea_id1"
]
- }
+ },
+ "output_type": "display_data"
},
{
- "output_type": "display_data",
"data": {
"application/javascript": [
- "window[\"c7099044-3379-11e8-ac70-0242ac110002\"] = window[\"id1\"].setSelectedTabIndex(0);\n",
- "//# sourceURL=js_5722756ddb"
+ "window[\"a8e5476b-8903-11e8-99f9-c8d3ffb5fbe0\"] = window[\"id1\"].setSelectedTabIndex(0);\n",
+ "//# sourceURL=js_5e91101199"
],
"text/plain": [
- "<IPython.core.display.Javascript object>"
+ "\u003cIPython.core.display.Javascript at 0x7f97ea9efa50\u003e"
]
},
"metadata": {
@@ -1875,24 +1686,149 @@
"id1_content_0",
"outputarea_id1"
]
- }
+ },
+ "output_type": "display_data"
},
{
- "output_type": "stream",
- "text": [
- "Give me a color name (or press 'enter' to exit): \n"
- ],
- "name": "stdout"
+ "data": {
+ "text/html": [
+ "\u003cdiv class=id_45185901 style=\"margin-right:10px; display:flex;align-items:center;\"\u003e\u003cspan style=\"margin-right: 3px;\"\u003e\u003c/span\u003e\u003c/div\u003e"
+ ],
+ "text/plain": [
+ "\u003cIPython.core.display.HTML at 0x7f97ee42bd90\u003e"
+ ]
+ },
+ "metadata": {
+ "tags": [
+ "id1_content_0",
+ "outputarea_id1",
+ "user_output"
+ ]
+ },
+ "output_type": "display_data"
+ },
+ {
+ "data": {
+ "application/javascript": [
+ "window[\"a8e5476c-8903-11e8-99f9-c8d3ffb5fbe0\"] = jQuery(\".id_45185901 span\");\n",
+ "//# sourceURL=js_f43052a94e"
+ ],
+ "text/plain": [
+ "\u003cIPython.core.display.Javascript at 0x7f97ea9ef750\u003e"
+ ]
+ },
+ "metadata": {
+ "tags": [
+ "id1_content_0",
+ "outputarea_id1",
+ "user_output"
+ ]
+ },
+ "output_type": "display_data"
+ },
+ {
+ "data": {
+ "application/javascript": [
+ "window[\"a8e5476d-8903-11e8-99f9-c8d3ffb5fbe0\"] = window[\"a8e5476c-8903-11e8-99f9-c8d3ffb5fbe0\"].text(\"Give me a color name (or press 'enter' to exit): \");\n",
+ "//# sourceURL=js_bfc0fb76ce"
+ ],
+ "text/plain": [
+ "\u003cIPython.core.display.Javascript at 0x7f97ea9efb10\u003e"
+ ]
+ },
+ "metadata": {
+ "tags": [
+ "id1_content_0",
+ "outputarea_id1",
+ "user_output"
+ ]
+ },
+ "output_type": "display_data"
+ },
+ {
+ "data": {
+ "application/javascript": [
+ "window[\"a9e9b8b0-8903-11e8-99f9-c8d3ffb5fbe0\"] = jQuery(\".id_45185901 input\");\n",
+ "//# sourceURL=js_7f167283fa"
+ ],
+ "text/plain": [
+ "\u003cIPython.core.display.Javascript at 0x7f97ea9ef610\u003e"
+ ]
+ },
+ "metadata": {
+ "tags": [
+ "id1_content_0",
+ "outputarea_id1",
+ "user_output"
+ ]
+ },
+ "output_type": "display_data"
+ },
+ {
+ "data": {
+ "application/javascript": [
+ "window[\"a9e9b8b1-8903-11e8-99f9-c8d3ffb5fbe0\"] = window[\"a9e9b8b0-8903-11e8-99f9-c8d3ffb5fbe0\"].remove();\n",
+ "//# sourceURL=js_016ae4bf21"
+ ],
+ "text/plain": [
+ "\u003cIPython.core.display.Javascript at 0x7f97ea9ef250\u003e"
+ ]
+ },
+ "metadata": {
+ "tags": [
+ "id1_content_0",
+ "outputarea_id1",
+ "user_output"
+ ]
+ },
+ "output_type": "display_data"
+ },
+ {
+ "data": {
+ "application/javascript": [
+ "window[\"a9e9b8b2-8903-11e8-99f9-c8d3ffb5fbe0\"] = jQuery(\".id_45185901 span\");\n",
+ "//# sourceURL=js_e666f179bc"
+ ],
+ "text/plain": [
+ "\u003cIPython.core.display.Javascript at 0x7f97ea9ef550\u003e"
+ ]
+ },
+ "metadata": {
+ "tags": [
+ "id1_content_0",
+ "outputarea_id1",
+ "user_output"
+ ]
+ },
+ "output_type": "display_data"
},
{
- "output_type": "display_data",
"data": {
"application/javascript": [
- "window[\"c7baac12-3379-11e8-ac70-0242ac110002\"] = google.colab.output.setActiveOutputArea(window[\"c70842c0-3379-11e8-ac70-0242ac110002\"]);\n",
- "//# sourceURL=js_cdd622e58f"
+ "window[\"a9e9b8b3-8903-11e8-99f9-c8d3ffb5fbe0\"] = window[\"a9e9b8b2-8903-11e8-99f9-c8d3ffb5fbe0\"].text(\"Give me a color name (or press 'enter' to exit): \");\n",
+ "//# sourceURL=js_cbb9d14aec"
],
"text/plain": [
- "<IPython.core.display.Javascript object>"
+ "\u003cIPython.core.display.Javascript at 0x7f97ea9ef1d0\u003e"
+ ]
+ },
+ "metadata": {
+ "tags": [
+ "id1_content_0",
+ "outputarea_id1",
+ "user_output"
+ ]
+ },
+ "output_type": "display_data"
+ },
+ {
+ "data": {
+ "application/javascript": [
+ "window[\"a9e9b8b4-8903-11e8-99f9-c8d3ffb5fbe0\"] = google.colab.output.setActiveOutputArea(window[\"a8e54768-8903-11e8-99f9-c8d3ffb5fbe0\"]);\n",
+ "//# sourceURL=js_2967a79665"
+ ],
+ "text/plain": [
+ "\u003cIPython.core.display.Javascript at 0x7f97ea9ef1d0\u003e"
]
},
"metadata": {
@@ -1900,21 +1836,102 @@
"id1_content_0",
"outputarea_id1"
]
- }
+ },
+ "output_type": "display_data"
}
+ ],
+ "source": [
+ "def run_input_loop(sess, inference_ops, color_name_placeholder):\n",
+ " \"\"\"Helper function that reads from input and calls the inference ops in a loop.\"\"\"\n",
+ "\n",
+ " tb = widgets.TabBar([\"RNN Colorbot\"])\n",
+ " while True:\n",
+ " with tb.output_to(0):\n",
+ " try:\n",
+ " color_name = six.moves.input(\"Give me a color name (or press 'enter' to exit): \")\n",
+ " except (EOFError, KeyboardInterrupt):\n",
+ " break\n",
+ " if not color_name:\n",
+ " break\n",
+ " with tb.output_to(0):\n",
+ " tb.clear_tab()\n",
+ " sess.run(inference_ops, {color_name_placeholder: color_name})\n",
+ " plt.show()\n",
+ "\n",
+ "with tf.Graph().as_default():\n",
+ " # Read the data.\n",
+ " batch_size = 64\n",
+ " train_data = load_dataset(data_dir, train_url, batch_size)\n",
+ " eval_data = load_dataset(data_dir, test_url, 50, training=False)\n",
+ " \n",
+ " # Create the model components.\n",
+ " lower_cell, upper_cell, relu_layer = model_components()\n",
+ " # Create the helper placeholder for inference.\n",
+ " color_name_placeholder = tf.placeholder(tf.string, shape=())\n",
+ " \n",
+ " # Compile the train / test code.\n",
+ " tf_train_model = autograph.to_graph(train_model)\n",
+ " train_model_ops = tf_train_model(\n",
+ " train_data, eval_data, batch_size, lower_cell, upper_cell, relu_layer, train_steps=100)\n",
+ " \n",
+ " # Compile the inference code.\n",
+ " tf_inference = autograph.to_graph(inference)\n",
+ " inference_ops = tf_inference(color_name_placeholder, lower_cell, upper_cell, relu_layer)\n",
+ " \n",
+ " with tf.Session() as sess:\n",
+ " sess.run(tf.global_variables_initializer())\n",
+ " \n",
+ " # Run training and testing.\n",
+ " sess.run(train_model_ops)\n",
+ " \n",
+ " # Run the inference loop.\n",
+ " run_input_loop(sess, inference_ops, color_name_placeholder)"
]
},
{
+ "cell_type": "markdown",
"metadata": {
- "id": "AHJ2c47U-A5W",
- "colab_type": "text"
+ "colab_type": "text",
+ "id": "AHJ2c47U-A5W"
},
- "cell_type": "markdown",
"source": [
"# Where do we go next?\n",
"\n",
- "Autograph is available in tensorflow.contrib, but it's still in its early stages. We're excited about the possibilities it brings — write your machine learning code in the flexible Eager style, but still enjoy all the benefits that come with running in graph mode. A beta version will be available soon -- stay tuned!"
+ "AutoGraph is still in its early stages, but is available in [tensorflow.contrib](https://github.com/tensorflow/tensorflow/tree/master/tensorflow/contrib/autograph). We're excited about the possibilities it brings. New versions will be available soon — stay tuned!"
]
}
- ]
+ ],
+ "metadata": {
+ "colab": {
+ "collapsed_sections": [],
+ "default_view": {},
+ "last_runtime": {
+ "build_target": "",
+ "kind": "local"
+ },
+ "name": "Dev Summit 2018 - Autograph",
+ "provenance": [
+ {
+ "file_id": "1wCZUh73zTNs1jzzYjqoxMIdaBWCdKJ2K",
+ "timestamp": 1522238054357
+ },
+ {
+ "file_id": "1_HpC-RrmIv4lNaqeoslUeWaX8zH5IXaJ",
+ "timestamp": 1521743157199
+ },
+ {
+ "file_id": "1mjO2fQ2F9hxpAzw2mnrrUkcgfb7xSGW-",
+ "timestamp": 1520522344607
+ }
+ ],
+ "version": "0.3.2",
+ "views": {}
+ },
+ "kernelspec": {
+ "display_name": "Python 2",
+ "name": "python2"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 0
}
diff --git a/tensorflow/contrib/autograph/examples/notebooks/rnn_keras_estimator.ipynb b/tensorflow/contrib/autograph/examples/notebooks/rnn_keras_estimator.ipynb
index 324b23c24b..44532cb078 100644
--- a/tensorflow/contrib/autograph/examples/notebooks/rnn_keras_estimator.ipynb
+++ b/tensorflow/contrib/autograph/examples/notebooks/rnn_keras_estimator.ipynb
@@ -190,7 +190,6 @@
" self.upper_cell = tf.contrib.rnn.LSTMBlockCell(128)\n",
" self.relu_layer = tf.layers.Dense(3, activation=tf.nn.relu)\n",
"\n",
- "\n",
" def _rnn_layer(self, chars, cell, batch_size, training):\n",
" \"\"\"A single RNN layer.\n",
"\n",
@@ -203,13 +202,12 @@
" Returns:\n",
" A Tensor of shape (max_sequence_length, batch_size, output_size).\n",
" \"\"\"\n",
- " hidden_outputs = []\n",
- " autograph.utils.set_element_type(hidden_outputs, tf.float32)\n",
+ " hidden_outputs = tf.TensorArray(tf.float32, 0, True)\n",
" state, output = cell.zero_state(batch_size, tf.float32)\n",
" for ch in chars:\n",
" cell_output, (state, output) = cell.call(ch, (state, output))\n",
" hidden_outputs.append(cell_output)\n",
- " hidden_outputs = hidden_outputs.stack()\n",
+ " hidden_outputs = autograph.stack(hidden_outputs)\n",
" if training:\n",
" hidden_outputs = tf.nn.dropout(hidden_outputs, 0.5)\n",
" return hidden_outputs\n",
@@ -223,7 +221,7 @@
"\n",
"\n",
" def call(self, inputs, training=False):\n",
- " \"\"\"The RNN model code. Uses Eager and \n",
+ " \"\"\"The RNN model code. Uses Eager.\n",
"\n",
" The model consists of two RNN layers (made by lower_cell and upper_cell),\n",
" followed by a fully connected layer with ReLU activation.\n",
@@ -243,7 +241,8 @@
" seq = self._rnn_layer(seq, self.upper_cell, batch_size, training)\n",
"\n",
" # Grab just the end-of-sequence from each output.\n",
- " indices = tf.stack([length - 1, range(batch_size)], axis=1)\n",
+ " indices = (length - 1, range(batch_size))\n",
+ " indices = tf.stack(indices, 1)\n",
" sequence_ends = tf.gather_nd(seq, indices)\n",
" return self.relu_layer(sequence_ends)\n",
"\n",
@@ -381,7 +380,7 @@
},
{
"cell_type": "code",
- "execution_count": 7,
+ "execution_count": 107,
"metadata": {
"colab": {
"autoexec": {
@@ -392,9 +391,9 @@
},
"colab_type": "code",
"executionInfo": {
- "elapsed": 10604,
+ "elapsed": 5454,
"status": "ok",
- "timestamp": 1524095272039,
+ "timestamp": 1529952160455,
"user": {
"displayName": "",
"photoUrl": "",
@@ -403,7 +402,7 @@
"user_tz": 240
},
"id": "2pg1AfbxBJQq",
- "outputId": "9c924b4f-06e1-4538-976c-a3e1ddac5660",
+ "outputId": "4aef3052-f7c7-4bb1-a0a2-73fef2e96efb",
"slideshow": {
"slide_type": "-"
}
@@ -413,7 +412,7 @@
"name": "stdout",
"output_type": "stream",
"text": [
- "Eval loss at step 100: 0.0674834\n"
+ "Eval loss at step 100: 0.0705221\n"
]
}
],
@@ -423,8 +422,8 @@
" 'learning_rate': 0.01,\n",
"}\n",
"\n",
- "train_url = \"https://raw.githubusercontent.com/random-forests/tensorflow-workshop/master/extras/colorbot/data/train.csv\"\n",
- "test_url = \"https://raw.githubusercontent.com/random-forests/tensorflow-workshop/master/extras/colorbot/data/test.csv\"\n",
+ "train_url = \"https://raw.githubusercontent.com/random-forests/tensorflow-workshop/master/archive/extras/colorbot/data/train.csv\"\n",
+ "test_url = \"https://raw.githubusercontent.com/random-forests/tensorflow-workshop/master/archive/extras/colorbot/data/test.csv\"\n",
"data_dir = \"tmp/rnn/data\"\n",
"\n",
"regressor = tf.estimator.Estimator(\n",
@@ -457,7 +456,7 @@
},
{
"cell_type": "code",
- "execution_count": 8,
+ "execution_count": 108,
"metadata": {
"colab": {
"autoexec": {
@@ -468,9 +467,9 @@
},
"colab_type": "code",
"executionInfo": {
- "elapsed": 7990,
+ "elapsed": 3432,
"status": "ok",
- "timestamp": 1524095280105,
+ "timestamp": 1529952163923,
"user": {
"displayName": "",
"photoUrl": "",
@@ -479,7 +478,7 @@
"user_tz": 240
},
"id": "dxHex2tUN_10",
- "outputId": "2b889e5a-b9ed-4645-bf03-d98f26c72101",
+ "outputId": "1ff438f2-b045-4f4e-86a0-4dae7503f6b2",
"slideshow": {
"slide_type": "slide"
}
@@ -491,12 +490,12 @@
"\u003clink rel=stylesheet type=text/css href='/nbextensions/google.colab/tabbar.css'\u003e\u003c/link\u003e"
],
"text/plain": [
- "\u003cIPython.core.display.HTML at 0x7f3f36aa6cd0\u003e"
+ "\u003cIPython.core.display.HTML at 0x7fcd7222a110\u003e"
]
},
"metadata": {
"tags": [
- "outputarea_id1"
+ "outputarea_id3"
]
},
"output_type": "display_data"
@@ -507,12 +506,12 @@
"\u003cscript src='/nbextensions/google.colab/tabbar_main.min.js'\u003e\u003c/script\u003e"
],
"text/plain": [
- "\u003cIPython.core.display.HTML at 0x7f3eca67f7d0\u003e"
+ "\u003cIPython.core.display.HTML at 0x7fcd7222a8d0\u003e"
]
},
"metadata": {
"tags": [
- "outputarea_id1"
+ "outputarea_id3"
]
},
"output_type": "display_data"
@@ -520,15 +519,15 @@
{
"data": {
"text/html": [
- "\u003cdiv id=\"id1\"\u003e\u003c/div\u003e"
+ "\u003cdiv id=\"id3\"\u003e\u003c/div\u003e"
],
"text/plain": [
- "\u003cIPython.core.display.HTML at 0x7f3eca67f8d0\u003e"
+ "\u003cIPython.core.display.HTML at 0x7fcd7222a050\u003e"
]
},
"metadata": {
"tags": [
- "outputarea_id1"
+ "outputarea_id3"
]
},
"output_type": "display_data"
@@ -536,16 +535,16 @@
{
"data": {
"application/javascript": [
- "window[\"e8ddfa22-4362-11e8-91ec-c8d3ffb5fbe0\"] = colab_lib.createTabBar({\"contentBorder\": [\"0px\"], \"elementId\": \"id1\", \"borderColor\": [\"#a7a7a7\"], \"contentHeight\": [\"initial\"], \"tabNames\": [\"RNN Colorbot\"], \"location\": \"top\", \"initialSelection\": 0});\n",
- "//# sourceURL=js_71b9087b6d"
+ "window[\"8a03307e-78a7-11e8-99f9-c8d3ffb5fbe0\"] = colab_lib.createTabBar({\"contentBorder\": [\"0px\"], \"elementId\": \"id3\", \"contentHeight\": [\"initial\"], \"tabNames\": [\"RNN Colorbot\"], \"location\": \"top\", \"initialSelection\": 0, \"borderColor\": [\"#a7a7a7\"]});\n",
+ "//# sourceURL=js_dc5d7f2784"
],
"text/plain": [
- "\u003cIPython.core.display.Javascript at 0x7f3eca67f950\u003e"
+ "\u003cIPython.core.display.Javascript at 0x7fcd7222a190\u003e"
]
},
"metadata": {
"tags": [
- "outputarea_id1"
+ "outputarea_id3"
]
},
"output_type": "display_data"
@@ -553,16 +552,16 @@
{
"data": {
"application/javascript": [
- "window[\"e8ddfa23-4362-11e8-91ec-c8d3ffb5fbe0\"] = window[\"id1\"].setSelectedTabIndex(0);\n",
- "//# sourceURL=js_e390445f33"
+ "window[\"8a03307f-78a7-11e8-99f9-c8d3ffb5fbe0\"] = window[\"id3\"].setSelectedTabIndex(0);\n",
+ "//# sourceURL=js_be7950150b"
],
"text/plain": [
- "\u003cIPython.core.display.Javascript at 0x7f3eca67f990\u003e"
+ "\u003cIPython.core.display.Javascript at 0x7fcd7222ac90\u003e"
]
},
"metadata": {
"tags": [
- "outputarea_id1"
+ "outputarea_id3"
]
},
"output_type": "display_data"
@@ -570,17 +569,17 @@
{
"data": {
"application/javascript": [
- "window[\"e8ddfa24-4362-11e8-91ec-c8d3ffb5fbe0\"] = google.colab.output.getActiveOutputArea();\n",
- "//# sourceURL=js_241dd76d85"
+ "window[\"8a033080-78a7-11e8-99f9-c8d3ffb5fbe0\"] = google.colab.output.getActiveOutputArea();\n",
+ "//# sourceURL=js_d0c3bd4eaa"
],
"text/plain": [
- "\u003cIPython.core.display.Javascript at 0x7f3eca67fc50\u003e"
+ "\u003cIPython.core.display.Javascript at 0x7fcd7222aad0\u003e"
]
},
"metadata": {
"tags": [
- "id1_content_0",
- "outputarea_id1"
+ "id3_content_0",
+ "outputarea_id3"
]
},
"output_type": "display_data"
@@ -588,17 +587,17 @@
{
"data": {
"application/javascript": [
- "window[\"e8ddfa25-4362-11e8-91ec-c8d3ffb5fbe0\"] = document.querySelector(\"#id1_content_0\");\n",
- "//# sourceURL=js_60c64e3d50"
+ "window[\"8a033081-78a7-11e8-99f9-c8d3ffb5fbe0\"] = document.querySelector(\"#id3_content_0\");\n",
+ "//# sourceURL=js_f10f6eba86"
],
"text/plain": [
- "\u003cIPython.core.display.Javascript at 0x7f3eca67fd90\u003e"
+ "\u003cIPython.core.display.Javascript at 0x7fcd7222aed0\u003e"
]
},
"metadata": {
"tags": [
- "id1_content_0",
- "outputarea_id1"
+ "id3_content_0",
+ "outputarea_id3"
]
},
"output_type": "display_data"
@@ -606,17 +605,17 @@
{
"data": {
"application/javascript": [
- "window[\"e8ddfa26-4362-11e8-91ec-c8d3ffb5fbe0\"] = google.colab.output.setActiveOutputArea(window[\"e8ddfa25-4362-11e8-91ec-c8d3ffb5fbe0\"]);\n",
- "//# sourceURL=js_14ea437cbd"
+ "window[\"8a033082-78a7-11e8-99f9-c8d3ffb5fbe0\"] = google.colab.output.setActiveOutputArea(window[\"8a033081-78a7-11e8-99f9-c8d3ffb5fbe0\"]);\n",
+ "//# sourceURL=js_ff29697179"
],
"text/plain": [
- "\u003cIPython.core.display.Javascript at 0x7f3eca67fe10\u003e"
+ "\u003cIPython.core.display.Javascript at 0x7fcd7222abd0\u003e"
]
},
"metadata": {
"tags": [
- "id1_content_0",
- "outputarea_id1"
+ "id3_content_0",
+ "outputarea_id3"
]
},
"output_type": "display_data"
@@ -624,17 +623,17 @@
{
"data": {
"application/javascript": [
- "window[\"e8ddfa27-4362-11e8-91ec-c8d3ffb5fbe0\"] = window[\"id1\"].setSelectedTabIndex(0);\n",
- "//# sourceURL=js_09294c2226"
+ "window[\"8a033083-78a7-11e8-99f9-c8d3ffb5fbe0\"] = window[\"id3\"].setSelectedTabIndex(0);\n",
+ "//# sourceURL=js_ff85295dc7"
],
"text/plain": [
- "\u003cIPython.core.display.Javascript at 0x7f3eca67fcd0\u003e"
+ "\u003cIPython.core.display.Javascript at 0x7fcd7222ab90\u003e"
]
},
"metadata": {
"tags": [
- "id1_content_0",
- "outputarea_id1"
+ "id3_content_0",
+ "outputarea_id3"
]
},
"output_type": "display_data"
@@ -642,17 +641,17 @@
{
"data": {
"application/javascript": [
- "window[\"ec965514-4362-11e8-91ec-c8d3ffb5fbe0\"] = google.colab.output.setActiveOutputArea(window[\"e8ddfa24-4362-11e8-91ec-c8d3ffb5fbe0\"]);\n",
- "//# sourceURL=js_e5e8266997"
+ "window[\"8b18d8dc-78a7-11e8-99f9-c8d3ffb5fbe0\"] = google.colab.output.setActiveOutputArea(window[\"8a033080-78a7-11e8-99f9-c8d3ffb5fbe0\"]);\n",
+ "//# sourceURL=js_ed7aabfedb"
],
"text/plain": [
- "\u003cIPython.core.display.Javascript at 0x7f3eca67fe10\u003e"
+ "\u003cIPython.core.display.Javascript at 0x7fcd7222a110\u003e"
]
},
"metadata": {
"tags": [
- "id1_content_0",
- "outputarea_id1"
+ "id3_content_0",
+ "outputarea_id3"
]
},
"output_type": "display_data"
@@ -660,17 +659,17 @@
{
"data": {
"application/javascript": [
- "window[\"ec965515-4362-11e8-91ec-c8d3ffb5fbe0\"] = google.colab.output.getActiveOutputArea();\n",
- "//# sourceURL=js_07a097f0ee"
+ "window[\"8b18d8dd-78a7-11e8-99f9-c8d3ffb5fbe0\"] = google.colab.output.getActiveOutputArea();\n",
+ "//# sourceURL=js_c86f8feaf4"
],
"text/plain": [
- "\u003cIPython.core.display.Javascript at 0x7f3eca67fc90\u003e"
+ "\u003cIPython.core.display.Javascript at 0x7fcd7222acd0\u003e"
]
},
"metadata": {
"tags": [
- "id1_content_0",
- "outputarea_id1"
+ "id3_content_0",
+ "outputarea_id3"
]
},
"output_type": "display_data"
@@ -678,17 +677,17 @@
{
"data": {
"application/javascript": [
- "window[\"ec965516-4362-11e8-91ec-c8d3ffb5fbe0\"] = document.querySelector(\"#id1_content_0\");\n",
- "//# sourceURL=js_790d669ca8"
+ "window[\"8b18d8de-78a7-11e8-99f9-c8d3ffb5fbe0\"] = document.querySelector(\"#id3_content_0\");\n",
+ "//# sourceURL=js_4d0fde6662"
],
"text/plain": [
- "\u003cIPython.core.display.Javascript at 0x7f3eca67f8d0\u003e"
+ "\u003cIPython.core.display.Javascript at 0x7fcd7222ae50\u003e"
]
},
"metadata": {
"tags": [
- "id1_content_0",
- "outputarea_id1"
+ "id3_content_0",
+ "outputarea_id3"
]
},
"output_type": "display_data"
@@ -696,17 +695,17 @@
{
"data": {
"application/javascript": [
- "window[\"ec965517-4362-11e8-91ec-c8d3ffb5fbe0\"] = google.colab.output.setActiveOutputArea(window[\"ec965516-4362-11e8-91ec-c8d3ffb5fbe0\"]);\n",
- "//# sourceURL=js_d30df771f0"
+ "window[\"8b18d8df-78a7-11e8-99f9-c8d3ffb5fbe0\"] = google.colab.output.setActiveOutputArea(window[\"8b18d8de-78a7-11e8-99f9-c8d3ffb5fbe0\"]);\n",
+ "//# sourceURL=js_3f66d52720"
],
"text/plain": [
- "\u003cIPython.core.display.Javascript at 0x7f3eca67fd90\u003e"
+ "\u003cIPython.core.display.Javascript at 0x7fcd7222a210\u003e"
]
},
"metadata": {
"tags": [
- "id1_content_0",
- "outputarea_id1"
+ "id3_content_0",
+ "outputarea_id3"
]
},
"output_type": "display_data"
@@ -714,32 +713,32 @@
{
"data": {
"application/javascript": [
- "window[\"ec965518-4362-11e8-91ec-c8d3ffb5fbe0\"] = window[\"id1\"].setSelectedTabIndex(0);\n",
- "//# sourceURL=js_8a43a2da4b"
+ "window[\"8b18d8e0-78a7-11e8-99f9-c8d3ffb5fbe0\"] = window[\"id3\"].setSelectedTabIndex(0);\n",
+ "//# sourceURL=js_375f5ae6d7"
],
"text/plain": [
- "\u003cIPython.core.display.Javascript at 0x7f3eca67fc50\u003e"
+ "\u003cIPython.core.display.Javascript at 0x7fcd7222a310\u003e"
]
},
"metadata": {
"tags": [
- "id1_content_0",
- "outputarea_id1"
+ "id3_content_0",
+ "outputarea_id3"
]
},
"output_type": "display_data"
},
{
"data": {
- "image/png": "iVBORw0KGgoAAAANSUhEUgAAAQwAAAENCAYAAAD60Fs2AAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAACMBJREFUeJzt3F+I1XX+x/G32zjiFERUpgaFd2JBzOg5joX4h0SiMgmM\n/uhVGIlgFBlERGB3hUEkhkRdtDfRP1ACL6KpLBqcguxCjEAkmGamQcSohFHzsxe7O6zssvsydtff\n+ns8rs758j3f8z7fiyef7/k3o7XWCiDwh4s9APC/QzCAmGAAMcEAYoIBxAQDiAkGF8XTTz9d3W63\n7rvvvhoZGakVK1Zc7JEICMYlbvXq1TU8PHyxxzjPV199VcPDw/XZZ5/V22+/XVVVM2bMuMhTkRAM\n/qt+++23+uGHH+r666+vWbNmXexxuECCcQl76qmnanx8vLZs2VIDAwP1+uuv1zfffFP3339/dTqd\nWr9+fY2MjEzvv2nTpnr55ZfrgQceqIGBgXr44Yfr5MmTVVV1+vTp2r59ey1durQ6nU5t2LChTpw4\nUVVVk5OTtWXLllq6dGmtXbu23nnnnelj7tq1q7Zt21bbt2+vJUuW1HvvvVfPPvtsHTp0qAYGBmrX\nrl1/N/fRo0dr06ZN1el06u67766hoaGqqhodHa1OpzO93zPPPFO33nrr9P3t27fXm2+++e89iZyv\ncUlbtWpVGx4ebq21NjEx0brdbjtw4EBrrbUvvviidbvdduLEidZaaxs3bmxr1qxp33//fZuammob\nN25sO3fubK219tZbb7VHH320TU1NtXPnzrXDhw+3X375pbXW2kMPPdR27NjRTp8+3Y4cOdIGBwen\nn/OVV15pN910U/voo49aa61NTU21999/vz344IPTMx48eLCtWLGitdbamTNn2po1a9qePXvamTNn\n2vDwcOvv72/Hjh2bfj2HDx9urbW2du3advvtt7ejR4+21lpbuXJlO3LkyH/qVNJas8L4f6D95edC\n+/btq5UrV9by5curqmrZsmV1880316effjq977333ls33HBD9fb21h133FFHjhypqqqenp46efJk\nHTt2rGbMmFGLFi2qyy+/vCYmJurrr7+uJ598smbOnFkLFy6sDRs21N69e6eP2d/fX6tXr66qqt7e\n3n8666FDh+rUqVP1yCOPVE9PTw0ODtaqVavqgw8+qKqqJUuW1MjISB0/fryqqtauXVtffvlljY6O\n1q+//loLFy78N501/pGeiz0A/z1jY2O1f//++vjjj6vqzyE5e/ZsLVu2bHqfa665Zvr27Nmz69Sp\nU1VVdc8999TExEQ98cQT9fPPP9e6devq8ccfr8nJybryyitr9uzZ04+bP39+HT58ePr+3Llz4xkn\nJydr3rx5522bP39+TU5OVlVVp9OpoaGhuu6666rb7Va32629e/dWb29vLV68+ALOBr+HYFzi/vbT\nh3nz5tX69etrx44dF3ycnp6e2rp1a23durXGxsZq8+bNtWDBgrrtttvqp59+qlOnTlVfX19VVY2P\nj9ecOXP+4Qz/ypw5c2p8fPy8bWNjY7VgwYKqqup2u/Xiiy/WvHnzqtPp1MDAQD333HPV29tb3W73\ngl8XF8YlySXu2muvrdHR0aqqWrduXQ0NDdXnn39e586dq6mpqRoZGakff/zxXx7n4MGD9d1339W5\nc+eqr6+venp66rLLLqu5c+dWf39/vfTSS3X69On69ttv6913361169b9rnlvueWW6uvrq9dee63O\nnj1bBw8erE8++aTuvPPOqqq68cYba9asWbVv377qdDp1xRVX1NVXX10ffvjheW+I8p8hGJe4zZs3\n1+7du6vb7db+/ftr9+7dtWfPnlq2bFmtWrWq3njjjen3OP7ZSuD48eO1bdu2Wrx4cd111121dOnS\n6Sjs3LmzRkdHa/ny5bVt27Z67LHHzrvMuRAzZ86sV199tQ4cOFCDg4P1/PPP1wsvvDC9wqj68yrj\nqquumr7U+WsoFi1a9Luek9yM1vyBDpCxwgBiggHEBAOICQYQ+z/7PYzjf/QRGVxM12z68u+2WWEA\nMcEAYoIBxAQDiAkGEBMMICYYQEwwgJhgADHBAGKCAcQEA4gJBhATDCAmGEBMMICYYAAxwQBiggHE\nBAOICQYQEwwgJhhATDCAmGAAMcEAYoIBxAQDiAkGEBMMICYYQEwwgJhgADHBAGKCAcQEA4gJBhAT\nDCAmGEBMMICYYAAxwQBiggHEBAOICQYQEwwgJhhATDCAmGAAMcEAYoIBxAQDiAkGEBMMICYYQEww\ngJhgADHBAGKCAcQEA4gJBhATDCAmGEBMMICYYAAxwQBiggHEBAOICQYQEwwgJhhATDCAmGAAMcEA\nYoIBxAQDiAkGEBMMICYYQEwwgJhgADHBAGKCAcQEA4gJBhATDCAmGEBMMICYYAAxwQBiggHEBAOI\nCQYQEwwgJhhATDCAmGAAMcEAYoIBxAQDiAkGEBMMICYYQEwwgJhgADHBAGKCAcQEA4gJBhATDCAm\nGEBMMICYYAAxwQBiggHEBAOICQYQEwwgJhhATDCAmGAAMcEAYoIBxAQDiAkGEBMMICYYQEwwgJhg\nADHBAGKCAcQEA4gJBhATDCAmGEBMMICYYAAxwQBiggHEBAOICQYQEwwgJhhATDCAmGAAMcEAYoIB\nxAQDiAkGEBMMICYYQEwwgJhgADHBAGKCAcQEA4gJBhATDCAmGEBMMICYYAAxwQBiggHEBAOICQYQ\nEwwgJhhATDCAmGAAMcEAYoIBxAQDiAkGEBMMICYYQEwwgJhgADHBAGKCAcQEA4gJBhATDCAmGEBM\nMICYYAAxwQBiggHEBAOICQYQEwwgJhhATDCAmGAAMcEAYoIBxAQDiAkGEBMMICYYQEwwgJhgADHB\nAGKCAcQEA4gJBhATDCAmGEBMMICYYAAxwQBiggHEBAOICQYQEwwgJhhATDCAmGAAMcEAYoIBxAQD\niAkGEBMMIDajtdYu9hDA/wYrDCAmGEBMMICYYAAxwQBiggHEBAOICQYQEwwgJhhATDCAmGAAMcEA\nYoIBxAQDiAkGEBMMICYYQEwwgJhgADHBAGKCAcQEA4j9CY2LTAbbRbWuAAAAAElFTkSuQmCC\n",
+ "image/png": "iVBORw0KGgoAAAANSUhEUgAAAQwAAAENCAYAAAD60Fs2AAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAABTFJREFUeJzt3C+LV30eh/HP6EZvbP4ZJmkXDA6oQdZRMIhYLIKCMGVA\nyyaLT2ERLMqEDfoUFA2y3WpRrOKoSUSECePcYUEWdsN1OzfOyr5e8ZwT3unie34cfgvb29vbAxDs\n2e0BwK9DMIBMMIBMMIBMMIBMMIBMMPipXrx4MWfOnNntGfwgweCnW1hY2O0J/CDBYEe2trZ2ewI/\nkWDwh509e3bW19fn0qVLc/z48dnY2Jhbt27NyZMn59y5c/Pw4cPvz25ubs7t27dneXl5Ll68OC9f\nvtzF5ezUX3Z7AL+mJ0+ezPr6+uzfv3+uXr0658+fn7t3787GxsbcuHFjjhw5MqdPn5579+7N27dv\n5/nz5/P169dZXV3d7ensgBMGP+T69etz8ODBef369Xz69GnW1tZm7969s7S0NFeuXJnHjx/PzMzT\np09nbW1tfvvttzl48OBcu3Ztl5ezE04Y/JBDhw7NzMy7d+/mw4cPs7y8PDMz29vb8+3btzlx4sTM\nzHz8+PH7szMzi4uLP38sfxrBYEcOHz48S0tL8+zZs/96/8CBA7OxsTFHjx6dmX8Fhl+XVxJ25Nix\nY7Nv375ZX1+fzc3N2dramjdv3nz/cfPChQvz4MGD+fz587x//34ePXq0y4vZCcHgD/v37yj27Nkz\n9+/fn1evXs3KysqcOnVq7ty5M1++fJmZmZs3b87i4uKsrKzM6urqXL58ebdm8ydY8Ac6QOWEAWSC\nAWSCAWSCAWT/s99h/P3GX3d7Avxf+9s//vkf15wwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEww\ngEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEww\ngEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEww\ngEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEww\ngEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEww\ngEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEww\ngEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEww\ngEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEww\ngEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEww\ngEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEww\ngEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEww\ngEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEww\ngEwwgEwwgEwwgGxhe3t7e7dHAL8GJwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwg\nEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwgEwwg+x1QoZHG4XIe4gAAAABJRU5ErkJggg==\n",
"text/plain": [
- "\u003cmatplotlib.figure.Figure at 0x7f3ecc00bf10\u003e"
+ "\u003cmatplotlib.figure.Figure at 0x7fcd0d02dc90\u003e"
]
},
"metadata": {
"tags": [
- "id1_content_0",
- "outputarea_id1",
+ "id3_content_0",
+ "outputarea_id3",
"user_output"
]
},
@@ -748,17 +747,17 @@
{
"data": {
"application/javascript": [
- "window[\"ec965519-4362-11e8-91ec-c8d3ffb5fbe0\"] = google.colab.output.setActiveOutputArea(window[\"ec965515-4362-11e8-91ec-c8d3ffb5fbe0\"]);\n",
- "//# sourceURL=js_893ad561f4"
+ "window[\"8b18d8e1-78a7-11e8-99f9-c8d3ffb5fbe0\"] = google.colab.output.setActiveOutputArea(window[\"8b18d8dd-78a7-11e8-99f9-c8d3ffb5fbe0\"]);\n",
+ "//# sourceURL=js_34b0509660"
],
"text/plain": [
- "\u003cIPython.core.display.Javascript at 0x7f3f31b55c90\u003e"
+ "\u003cIPython.core.display.Javascript at 0x7fcd08e6e850\u003e"
]
},
"metadata": {
"tags": [
- "id1_content_0",
- "outputarea_id1"
+ "id3_content_0",
+ "outputarea_id3"
]
},
"output_type": "display_data"
@@ -766,17 +765,17 @@
{
"data": {
"application/javascript": [
- "window[\"ec96551a-4362-11e8-91ec-c8d3ffb5fbe0\"] = google.colab.output.getActiveOutputArea();\n",
- "//# sourceURL=js_2d99e0ac17"
+ "window[\"8b18d8e2-78a7-11e8-99f9-c8d3ffb5fbe0\"] = google.colab.output.getActiveOutputArea();\n",
+ "//# sourceURL=js_518a0f26fe"
],
"text/plain": [
- "\u003cIPython.core.display.Javascript at 0x7f3eca67fe50\u003e"
+ "\u003cIPython.core.display.Javascript at 0x7fcd08e6ec90\u003e"
]
},
"metadata": {
"tags": [
- "id1_content_0",
- "outputarea_id1"
+ "id3_content_0",
+ "outputarea_id3"
]
},
"output_type": "display_data"
@@ -784,17 +783,17 @@
{
"data": {
"application/javascript": [
- "window[\"ec96551b-4362-11e8-91ec-c8d3ffb5fbe0\"] = document.querySelector(\"#id1_content_0\");\n",
- "//# sourceURL=js_5c19462e32"
+ "window[\"8b18d8e3-78a7-11e8-99f9-c8d3ffb5fbe0\"] = document.querySelector(\"#id3_content_0\");\n",
+ "//# sourceURL=js_17eb3ff612"
],
"text/plain": [
- "\u003cIPython.core.display.Javascript at 0x7f3f31b55dd0\u003e"
+ "\u003cIPython.core.display.Javascript at 0x7fcd08e6eb50\u003e"
]
},
"metadata": {
"tags": [
- "id1_content_0",
- "outputarea_id1"
+ "id3_content_0",
+ "outputarea_id3"
]
},
"output_type": "display_data"
@@ -802,17 +801,17 @@
{
"data": {
"application/javascript": [
- "window[\"ec96551c-4362-11e8-91ec-c8d3ffb5fbe0\"] = google.colab.output.setActiveOutputArea(window[\"ec96551b-4362-11e8-91ec-c8d3ffb5fbe0\"]);\n",
- "//# sourceURL=js_b9c8b7567b"
+ "window[\"8b18d8e4-78a7-11e8-99f9-c8d3ffb5fbe0\"] = google.colab.output.setActiveOutputArea(window[\"8b18d8e3-78a7-11e8-99f9-c8d3ffb5fbe0\"]);\n",
+ "//# sourceURL=js_99da807c8e"
],
"text/plain": [
- "\u003cIPython.core.display.Javascript at 0x7f3f31b55a50\u003e"
+ "\u003cIPython.core.display.Javascript at 0x7fcd08e6eb90\u003e"
]
},
"metadata": {
"tags": [
- "id1_content_0",
- "outputarea_id1"
+ "id3_content_0",
+ "outputarea_id3"
]
},
"output_type": "display_data"
@@ -820,17 +819,17 @@
{
"data": {
"application/javascript": [
- "window[\"ec96551d-4362-11e8-91ec-c8d3ffb5fbe0\"] = window[\"id1\"].setSelectedTabIndex(0);\n",
- "//# sourceURL=js_fd05186348"
+ "window[\"8b18d8e5-78a7-11e8-99f9-c8d3ffb5fbe0\"] = window[\"id3\"].setSelectedTabIndex(0);\n",
+ "//# sourceURL=js_dee01cb4b6"
],
"text/plain": [
- "\u003cIPython.core.display.Javascript at 0x7f3f31b55810\u003e"
+ "\u003cIPython.core.display.Javascript at 0x7fcd08e6e610\u003e"
]
},
"metadata": {
"tags": [
- "id1_content_0",
- "outputarea_id1"
+ "id3_content_0",
+ "outputarea_id3"
]
},
"output_type": "display_data"
@@ -838,16 +837,16 @@
{
"data": {
"text/html": [
- "\u003cdiv class=id_888646481 style=\"margin-right:10px; display:flex;align-items:center;\"\u003e\u003cspan style=\"margin-right: 3px;\"\u003e\u003c/span\u003e\u003c/div\u003e"
+ "\u003cdiv class=id_853612217 style=\"margin-right:10px; display:flex;align-items:center;\"\u003e\u003cspan style=\"margin-right: 3px;\"\u003e\u003c/span\u003e\u003c/div\u003e"
],
"text/plain": [
- "\u003cIPython.core.display.HTML at 0x7f3f32414810\u003e"
+ "\u003cIPython.core.display.HTML at 0x7fcd7222aa10\u003e"
]
},
"metadata": {
"tags": [
- "id1_content_0",
- "outputarea_id1",
+ "id3_content_0",
+ "outputarea_id3",
"user_output"
]
},
@@ -856,17 +855,17 @@
{
"data": {
"application/javascript": [
- "window[\"ec96551e-4362-11e8-91ec-c8d3ffb5fbe0\"] = jQuery(\".id_888646481 span\");\n",
- "//# sourceURL=js_efef96e882"
+ "window[\"8b18d8e6-78a7-11e8-99f9-c8d3ffb5fbe0\"] = jQuery(\".id_853612217 span\");\n",
+ "//# sourceURL=js_8c378be329"
],
"text/plain": [
- "\u003cIPython.core.display.Javascript at 0x7f3f31b55710\u003e"
+ "\u003cIPython.core.display.Javascript at 0x7fcd08e6e990\u003e"
]
},
"metadata": {
"tags": [
- "id1_content_0",
- "outputarea_id1",
+ "id3_content_0",
+ "outputarea_id3",
"user_output"
]
},
@@ -875,17 +874,17 @@
{
"data": {
"application/javascript": [
- "window[\"ec96551f-4362-11e8-91ec-c8d3ffb5fbe0\"] = window[\"ec96551e-4362-11e8-91ec-c8d3ffb5fbe0\"].text(\"Give me a color name (or press 'enter' to exit): \");\n",
- "//# sourceURL=js_6eca889864"
+ "window[\"8b18d8e7-78a7-11e8-99f9-c8d3ffb5fbe0\"] = window[\"8b18d8e6-78a7-11e8-99f9-c8d3ffb5fbe0\"].text(\"Give me a color name (or press 'enter' to exit): \");\n",
+ "//# sourceURL=js_f0b946600c"
],
"text/plain": [
- "\u003cIPython.core.display.Javascript at 0x7f3eca67f990\u003e"
+ "\u003cIPython.core.display.Javascript at 0x7fcd08e6e310\u003e"
]
},
"metadata": {
"tags": [
- "id1_content_0",
- "outputarea_id1",
+ "id3_content_0",
+ "outputarea_id3",
"user_output"
]
},
@@ -894,17 +893,17 @@
{
"data": {
"application/javascript": [
- "window[\"ed8ea972-4362-11e8-91ec-c8d3ffb5fbe0\"] = jQuery(\".id_888646481 input\");\n",
- "//# sourceURL=js_f02070cc60"
+ "window[\"8b18d8e9-78a7-11e8-99f9-c8d3ffb5fbe0\"] = jQuery(\".id_853612217 input\");\n",
+ "//# sourceURL=js_9e21b1373a"
],
"text/plain": [
- "\u003cIPython.core.display.Javascript at 0x7f3f31b553d0\u003e"
+ "\u003cIPython.core.display.Javascript at 0x7fcd08e6ea90\u003e"
]
},
"metadata": {
"tags": [
- "id1_content_0",
- "outputarea_id1",
+ "id3_content_0",
+ "outputarea_id3",
"user_output"
]
},
@@ -913,17 +912,17 @@
{
"data": {
"application/javascript": [
- "window[\"ed8ea973-4362-11e8-91ec-c8d3ffb5fbe0\"] = window[\"ed8ea972-4362-11e8-91ec-c8d3ffb5fbe0\"].remove();\n",
- "//# sourceURL=js_ed9faba660"
+ "window[\"8b18d8ea-78a7-11e8-99f9-c8d3ffb5fbe0\"] = window[\"8b18d8e9-78a7-11e8-99f9-c8d3ffb5fbe0\"].remove();\n",
+ "//# sourceURL=js_a7764968c6"
],
"text/plain": [
- "\u003cIPython.core.display.Javascript at 0x7f3f31a95450\u003e"
+ "\u003cIPython.core.display.Javascript at 0x7fcd08e6e5d0\u003e"
]
},
"metadata": {
"tags": [
- "id1_content_0",
- "outputarea_id1",
+ "id3_content_0",
+ "outputarea_id3",
"user_output"
]
},
@@ -932,17 +931,17 @@
{
"data": {
"application/javascript": [
- "window[\"ed8ea974-4362-11e8-91ec-c8d3ffb5fbe0\"] = jQuery(\".id_888646481 span\");\n",
- "//# sourceURL=js_f3458d7074"
+ "window[\"8b18d8eb-78a7-11e8-99f9-c8d3ffb5fbe0\"] = jQuery(\".id_853612217 span\");\n",
+ "//# sourceURL=js_74279d3ff0"
],
"text/plain": [
- "\u003cIPython.core.display.Javascript at 0x7f3f31a95250\u003e"
+ "\u003cIPython.core.display.Javascript at 0x7fcd08e6e890\u003e"
]
},
"metadata": {
"tags": [
- "id1_content_0",
- "outputarea_id1",
+ "id3_content_0",
+ "outputarea_id3",
"user_output"
]
},
@@ -951,17 +950,17 @@
{
"data": {
"application/javascript": [
- "window[\"ed8ea975-4362-11e8-91ec-c8d3ffb5fbe0\"] = window[\"ed8ea974-4362-11e8-91ec-c8d3ffb5fbe0\"].text(\"Give me a color name (or press 'enter' to exit): \");\n",
- "//# sourceURL=js_3ffd97bd6f"
+ "window[\"8b18d8ec-78a7-11e8-99f9-c8d3ffb5fbe0\"] = window[\"8b18d8eb-78a7-11e8-99f9-c8d3ffb5fbe0\"].text(\"Give me a color name (or press 'enter' to exit): \");\n",
+ "//# sourceURL=js_82b6c34cdb"
],
"text/plain": [
- "\u003cIPython.core.display.Javascript at 0x7f3f31a953d0\u003e"
+ "\u003cIPython.core.display.Javascript at 0x7fcd08e6e8d0\u003e"
]
},
"metadata": {
"tags": [
- "id1_content_0",
- "outputarea_id1",
+ "id3_content_0",
+ "outputarea_id3",
"user_output"
]
},
@@ -970,17 +969,17 @@
{
"data": {
"application/javascript": [
- "window[\"ed8ea976-4362-11e8-91ec-c8d3ffb5fbe0\"] = google.colab.output.setActiveOutputArea(window[\"ec96551a-4362-11e8-91ec-c8d3ffb5fbe0\"]);\n",
- "//# sourceURL=js_7f73e8bcca"
+ "window[\"8b18d8ed-78a7-11e8-99f9-c8d3ffb5fbe0\"] = google.colab.output.setActiveOutputArea(window[\"8b18d8e2-78a7-11e8-99f9-c8d3ffb5fbe0\"]);\n",
+ "//# sourceURL=js_ff6144734a"
],
"text/plain": [
- "\u003cIPython.core.display.Javascript at 0x7f3f31b55710\u003e"
+ "\u003cIPython.core.display.Javascript at 0x7fcd08e6e8d0\u003e"
]
},
"metadata": {
"tags": [
- "id1_content_0",
- "outputarea_id1"
+ "id3_content_0",
+ "outputarea_id3"
]
},
"output_type": "display_data"
@@ -1043,28 +1042,6 @@
"kind": "local"
},
"name": "RNN Colorbot using Keras and Estimators",
- "provenance": [
- {
- "file_id": "1CtzefX39ffFibX_BqE6cRbT0UW_DdVKl",
- "timestamp": 1523579810961
- },
- {
- "file_id": "1DcfimonWU11tmyivKBGVrbpAl3BIOaRG",
- "timestamp": 1523016192637
- },
- {
- "file_id": "1wCZUh73zTNs1jzzYjqoxMIdaBWCdKJ2K",
- "timestamp": 1522238054357
- },
- {
- "file_id": "1_HpC-RrmIv4lNaqeoslUeWaX8zH5IXaJ",
- "timestamp": 1521743157199
- },
- {
- "file_id": "1mjO2fQ2F9hxpAzw2mnrrUkcgfb7xSGW-",
- "timestamp": 1520522344607
- }
- ],
"version": "0.3.2",
"views": {}
},
diff --git a/tensorflow/contrib/autograph/examples/notebooks/workshop.ipynb b/tensorflow/contrib/autograph/examples/notebooks/workshop.ipynb
new file mode 100644
index 0000000000..e7dfb13e15
--- /dev/null
+++ b/tensorflow/contrib/autograph/examples/notebooks/workshop.ipynb
@@ -0,0 +1,1129 @@
+{
+ "cells": [
+ {
+ "cell_type": "code",
+ "execution_count": 0,
+ "metadata": {
+ "colab": {
+ "autoexec": {
+ "startup": false,
+ "wait_interval": 0
+ }
+ },
+ "colab_type": "code",
+ "id": "u3B7Uh50lozN"
+ },
+ "outputs": [],
+ "source": [
+ "!pip install -U -q tf-nightly"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 0,
+ "metadata": {
+ "colab": {
+ "autoexec": {
+ "startup": false,
+ "wait_interval": 0
+ }
+ },
+ "colab_type": "code",
+ "id": "qWUV0FYjDSKj"
+ },
+ "outputs": [],
+ "source": [
+ "import tensorflow as tf\n",
+ "from tensorflow.contrib import autograph\n",
+ "\n",
+ "import matplotlib.pyplot as plt"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "colab_type": "text",
+ "id": "kGXS3UWBBNoc"
+ },
+ "source": [
+ "# 1. AutoGraph writes graph code for you\n",
+ "\n",
+ "[AutoGraph](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/autograph/README.md) helps you write complicated graph code using just plain Python -- behind the scenes, AutoGraph automatically transforms your code into the equivalent TF graph code. We support a large chunk of the Python language, which is growing. [Please see this document for what we currently support, and what we're working on](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/autograph/LIMITATIONS.md).\n",
+ "\n",
+ "Here's a quick example of how it works:\n",
+ "\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 0,
+ "metadata": {
+ "colab": {
+ "autoexec": {
+ "startup": false,
+ "wait_interval": 0
+ }
+ },
+ "colab_type": "code",
+ "id": "aA3gOodCBkOw"
+ },
+ "outputs": [],
+ "source": [
+ "# Autograph can convert functions like this...\n",
+ "def g(x):\n",
+ " if x \u003e 0:\n",
+ " x = x * x\n",
+ " else:\n",
+ " x = 0.0\n",
+ " return x\n",
+ "\n",
+ "# ...into graph-building functions like this:\n",
+ "def tf_g(x):\n",
+ " with tf.name_scope('g'):\n",
+ "\n",
+ " def if_true():\n",
+ " with tf.name_scope('if_true'):\n",
+ " x_1, = x,\n",
+ " x_1 = x_1 * x_1\n",
+ " return x_1,\n",
+ "\n",
+ " def if_false():\n",
+ " with tf.name_scope('if_false'):\n",
+ " x_1, = x,\n",
+ " x_1 = 0.0\n",
+ " return x_1,\n",
+ "\n",
+ " x = autograph_utils.run_cond(tf.greater(x, 0), if_true, if_false)\n",
+ " return x"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 0,
+ "metadata": {
+ "colab": {
+ "autoexec": {
+ "startup": false,
+ "wait_interval": 0
+ }
+ },
+ "colab_type": "code",
+ "id": "I1RtBvoKBxq5"
+ },
+ "outputs": [],
+ "source": [
+ "# You can run your plain-Python code in graph mode,\n",
+ "# and get the same results out, but with all the benfits of graphs:\n",
+ "print('Original value: %2.2f' % g(9.0))\n",
+ "\n",
+ "# Generate a graph-version of g and call it:\n",
+ "tf_g = autograph.to_graph(g)\n",
+ "\n",
+ "with tf.Graph().as_default():\n",
+ " # The result works like a regular op: takes tensors in, returns tensors.\n",
+ " # You can inspect the graph using tf.get_default_graph().as_graph_def()\n",
+ " g_ops = tf_g(tf.constant(9.0))\n",
+ " with tf.Session() as sess:\n",
+ " print('Autograph value: %2.2f\\n' % sess.run(g_ops))\n",
+ "\n",
+ "\n",
+ "# You can view, debug and tweak the generated code:\n",
+ "print(autograph.to_code(g))"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "colab_type": "text",
+ "id": "m-jWmsCmByyw"
+ },
+ "source": [
+ "#### Automatically converting complex control flow\n",
+ "\n",
+ "AutoGraph can convert a large chunk of the Python language into equivalent graph-construction code, and we're adding new supported language features all the time. In this section, we'll give you a taste of some of the functionality in AutoGraph.\n",
+ "AutoGraph will automatically convert most Python control flow statements into their correct graph equivalent. \n",
+ " \n",
+ "We support common statements like `while`, `for`, `if`, `break`, `return` and more. You can even nest them as much as you like. Imagine trying to write the graph version of this code by hand:\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 0,
+ "metadata": {
+ "colab": {
+ "autoexec": {
+ "startup": false,
+ "wait_interval": 0
+ }
+ },
+ "colab_type": "code",
+ "id": "toxKBOXbB1ro"
+ },
+ "outputs": [],
+ "source": [
+ "# Continue in a loop\n",
+ "def f(l):\n",
+ " s = 0\n",
+ " for c in l:\n",
+ " if c % 2 \u003e 0:\n",
+ " continue\n",
+ " s += c\n",
+ " return s\n",
+ "\n",
+ "print('Original value: %d' % f([10,12,15,20]))\n",
+ "\n",
+ "tf_f = autograph.to_graph(f)\n",
+ "with tf.Graph().as_default():\n",
+ " with tf.Session():\n",
+ " print('Graph value: %d\\n\\n' % tf_f(tf.constant([10,12,15,20])).eval())\n",
+ "\n",
+ "print(autograph.to_code(f))"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "colab_type": "text",
+ "id": "FUJJ-WTdCGeq"
+ },
+ "source": [
+ "Try replacing the `continue` in the above code with `break` -- AutoGraph supports that as well! \n",
+ " \n",
+ "Let's try some other useful Python constructs, like `print` and `assert`. We automatically convert Python `assert` statements into the equivalent `tf.Assert` code. "
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 0,
+ "metadata": {
+ "colab": {
+ "autoexec": {
+ "startup": false,
+ "wait_interval": 0
+ }
+ },
+ "colab_type": "code",
+ "id": "IAOgh62zCPZ4"
+ },
+ "outputs": [],
+ "source": [
+ "def f(x):\n",
+ " assert x != 0, 'Do not pass zero!'\n",
+ " return x * x\n",
+ "\n",
+ "tf_f = autograph.to_graph(f)\n",
+ "with tf.Graph().as_default():\n",
+ " with tf.Session():\n",
+ " try:\n",
+ " print(tf_f(tf.constant(0)).eval())\n",
+ " except tf.errors.InvalidArgumentError as e:\n",
+ " print('Got error message:\\n%s' % e.message)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "colab_type": "text",
+ "id": "KRu8iIPBCQr5"
+ },
+ "source": [
+ "You can also use plain Python `print` functions in in-graph"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 0,
+ "metadata": {
+ "colab": {
+ "autoexec": {
+ "startup": false,
+ "wait_interval": 0
+ }
+ },
+ "colab_type": "code",
+ "id": "ySTsuxnqCTQi"
+ },
+ "outputs": [],
+ "source": [
+ "def f(n):\n",
+ " if n \u003e= 0:\n",
+ " while n \u003c 5:\n",
+ " n += 1\n",
+ " print(n)\n",
+ " return n\n",
+ "\n",
+ "tf_f = autograph.to_graph(f)\n",
+ "with tf.Graph().as_default():\n",
+ " with tf.Session():\n",
+ " tf_f(tf.constant(0)).eval()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "colab_type": "text",
+ "id": "NqF0GT-VCVFh"
+ },
+ "source": [
+ "Appending to lists in loops also works (we create a tensor list ops behind the scenes)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 0,
+ "metadata": {
+ "colab": {
+ "autoexec": {
+ "startup": false,
+ "wait_interval": 0
+ }
+ },
+ "colab_type": "code",
+ "id": "ABX070KwCczR"
+ },
+ "outputs": [],
+ "source": [
+ "def f(n):\n",
+ " z = []\n",
+ " # We ask you to tell us the element dtype of the list\n",
+ " autograph.set_element_type(z, tf.int32)\n",
+ " for i in range(n):\n",
+ " z.append(i)\n",
+ " # when you're done with the list, stack it\n",
+ " # (this is just like np.stack)\n",
+ " return autograph.stack(z)\n",
+ "\n",
+ "tf_f = autograph.to_graph(f)\n",
+ "with tf.Graph().as_default():\n",
+ " with tf.Session():\n",
+ " print(tf_f(tf.constant(3)).eval())\n",
+ "\n",
+ "print('\\n\\n'+autograph.to_code(f))"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 0,
+ "metadata": {
+ "colab": {
+ "autoexec": {
+ "startup": false,
+ "wait_interval": 0
+ }
+ },
+ "colab_type": "code",
+ "id": "iu5IF7n2Df7C"
+ },
+ "outputs": [],
+ "source": [
+ "def fizzbuzz(num):\n",
+ " if num % 3 == 0 and num % 5 == 0:\n",
+ " print('FizzBuzz')\n",
+ " elif num % 3 == 0:\n",
+ " print('Fizz')\n",
+ " elif num % 5 == 0:\n",
+ " print('Buzz')\n",
+ " else:\n",
+ " print(num)\n",
+ " return num"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 0,
+ "metadata": {
+ "colab": {
+ "autoexec": {
+ "startup": false,
+ "wait_interval": 0
+ }
+ },
+ "colab_type": "code",
+ "id": "EExAjWuwDPpR"
+ },
+ "outputs": [],
+ "source": [
+ "tf_g = autograph.to_graph(fizzbuzz)\n",
+ "\n",
+ "with tf.Graph().as_default():\n",
+ " # The result works like a regular op: takes tensors in, returns tensors.\n",
+ " # You can inspect the graph using tf.get_default_graph().as_graph_def()\n",
+ " g_ops = tf_g(tf.constant(15))\n",
+ " with tf.Session() as sess:\n",
+ " sess.run(g_ops) \n",
+ " \n",
+ "# You can view, debug and tweak the generated code:\n",
+ "print('\\n')\n",
+ "print(autograph.to_code(fizzbuzz))"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "colab_type": "text",
+ "id": "SzpKGzVpBkph"
+ },
+ "source": [
+ "# De-graphify Exercises\n"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "colab_type": "text",
+ "id": "8k23dxcSmmXq"
+ },
+ "source": [
+ "#### Easy print statements"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 0,
+ "metadata": {
+ "colab": {
+ "autoexec": {
+ "startup": false,
+ "wait_interval": 0
+ }
+ },
+ "colab_type": "code",
+ "id": "dE1Vsmp-mlpK"
+ },
+ "outputs": [],
+ "source": [
+ "# See what happens when you turn AutoGraph off.\n",
+ "# Do you see the type or the value of x when you print it?\n",
+ "\n",
+ "# @autograph.convert()\n",
+ "def square_log(x):\n",
+ " x = x * x\n",
+ " print('Squared value of x =', x)\n",
+ " return x\n",
+ "\n",
+ "\n",
+ "with tf.Graph().as_default():\n",
+ " with tf.Session() as sess:\n",
+ " print(sess.run(square_log(tf.constant(4))))"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "colab_type": "text",
+ "id": "_R-Q7BbxmkBF"
+ },
+ "source": [
+ "#### Convert the TensorFlow code into Python code for AutoGraph"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 0,
+ "metadata": {
+ "colab": {
+ "autoexec": {
+ "startup": false,
+ "wait_interval": 0
+ }
+ },
+ "colab_type": "code",
+ "id": "SwA11tO-yCvg"
+ },
+ "outputs": [],
+ "source": [
+ "def square_if_positive(x):\n",
+ " x = tf.cond(tf.greater(x, 0), lambda: x * x, lambda: x)\n",
+ " return x\n",
+ "\n",
+ "with tf.Session() as sess:\n",
+ " print(sess.run(square_if_positive(tf.constant(4))))"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 0,
+ "metadata": {
+ "colab": {
+ "autoexec": {
+ "startup": false,
+ "wait_interval": 0
+ }
+ },
+ "colab_type": "code",
+ "id": "GPmx4CNhyPI_"
+ },
+ "outputs": [],
+ "source": [
+ "@autograph.convert()\n",
+ "def square_if_positive(x):\n",
+ "\n",
+ " pass # TODO: fill it in!\n",
+ "\n",
+ "\n",
+ "with tf.Session() as sess:\n",
+ " print(sess.run(square_if_positive(tf.constant(4))))"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "colab_type": "text",
+ "id": "qqsjik-QyA9R"
+ },
+ "source": [
+ "#### Uncollapse to see answer"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 0,
+ "metadata": {
+ "colab": {
+ "autoexec": {
+ "startup": false,
+ "wait_interval": 0
+ }
+ },
+ "colab_type": "code",
+ "id": "DaSmaWUEvMRv"
+ },
+ "outputs": [],
+ "source": [
+ "# Simple cond\n",
+ "@autograph.convert()\n",
+ "def square_if_positive(x):\n",
+ " if x \u003e 0:\n",
+ " x = x * x\n",
+ " return x\n",
+ "\n",
+ "with tf.Graph().as_default(): \n",
+ " with tf.Session() as sess:\n",
+ " print(sess.run(square_if_positive(tf.constant(4))))"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "colab_type": "text",
+ "id": "qj7am2I_xvTJ"
+ },
+ "source": [
+ "#### Nested If statement"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 0,
+ "metadata": {
+ "colab": {
+ "autoexec": {
+ "startup": false,
+ "wait_interval": 0
+ }
+ },
+ "colab_type": "code",
+ "id": "4yyNOf-Twr6s"
+ },
+ "outputs": [],
+ "source": [
+ "def nearest_odd_square(x):\n",
+ "\n",
+ " def if_positive():\n",
+ " x1 = x * x\n",
+ " x1 = tf.cond(tf.equal(x1 % 2, 0), lambda: x1 + 1, lambda: x1)\n",
+ " return x1,\n",
+ "\n",
+ " x = tf.cond(tf.greater(x, 0), if_positive, lambda: x)\n",
+ " return x\n",
+ "\n",
+ "with tf.Graph().as_default():\n",
+ " with tf.Session() as sess:\n",
+ " print(sess.run(nearest_odd_square(tf.constant(4))))"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 0,
+ "metadata": {
+ "colab": {
+ "autoexec": {
+ "startup": false,
+ "wait_interval": 0
+ }
+ },
+ "colab_type": "code",
+ "id": "hqmh5b2VyU9w"
+ },
+ "outputs": [],
+ "source": [
+ "@autograph.convert()\n",
+ "def nearest_odd_square(x):\n",
+ "\n",
+ " pass # TODO: fill it in!\n",
+ "\n",
+ "\n",
+ "with tf.Session() as sess:\n",
+ " print(sess.run(nearest_odd_square(tf.constant(4))))"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "colab_type": "text",
+ "id": "b9AXIkNLxp6J"
+ },
+ "source": [
+ "#### Uncollapse to reveal answer"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 0,
+ "metadata": {
+ "colab": {
+ "autoexec": {
+ "startup": false,
+ "wait_interval": 0
+ }
+ },
+ "colab_type": "code",
+ "id": "8RlCVEpNxD91"
+ },
+ "outputs": [],
+ "source": [
+ "@autograph.convert()\n",
+ "def nearest_odd_square(x):\n",
+ " if x \u003e 0:\n",
+ " x = x * x\n",
+ " if x % 2 == 0:\n",
+ " x = x + 1\n",
+ " return x\n",
+ "\n",
+ "with tf.Graph().as_default():\n",
+ " with tf.Session() as sess:\n",
+ " print(sess.run(nearest_odd_square(tf.constant(4))))"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "colab_type": "text",
+ "id": "jXAxjeBr1qWK"
+ },
+ "source": [
+ "#### Convert a while loop"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 0,
+ "metadata": {
+ "colab": {
+ "autoexec": {
+ "startup": false,
+ "wait_interval": 0
+ }
+ },
+ "colab_type": "code",
+ "id": "kWkv7anlxoee"
+ },
+ "outputs": [],
+ "source": [
+ "# Convert a while loop\n",
+ "def square_until_stop(x, y):\n",
+ " x = tf.while_loop(lambda x: tf.less(x, y), lambda x: x * x, [x])\n",
+ " return x\n",
+ "\n",
+ "with tf.Graph().as_default():\n",
+ " with tf.Session() as sess:\n",
+ " print(sess.run(square_until_stop(tf.constant(4), tf.constant(100))))"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 0,
+ "metadata": {
+ "colab": {
+ "autoexec": {
+ "startup": false,
+ "wait_interval": 0
+ }
+ },
+ "colab_type": "code",
+ "id": "zVUsc1eA1u2K"
+ },
+ "outputs": [],
+ "source": [
+ "@autograph.convert()\n",
+ "def square_until_stop(x, y):\n",
+ "\n",
+ " pass # TODO: fill it in!\n",
+ "\n",
+ "\n",
+ "with tf.Graph().as_default():\n",
+ " with tf.Session() as sess:\n",
+ " print(sess.run(square_until_stop(tf.constant(4), tf.constant(100))))"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "colab_type": "text",
+ "id": "L2psuzPI02S9"
+ },
+ "source": [
+ "#### Uncollapse for the answer\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 0,
+ "metadata": {
+ "colab": {
+ "autoexec": {
+ "startup": false,
+ "wait_interval": 0
+ }
+ },
+ "colab_type": "code",
+ "id": "ucmZyQVL03bF"
+ },
+ "outputs": [],
+ "source": [
+ "@autograph.convert()\n",
+ "def square_until_stop(x, y):\n",
+ " while x \u003c y:\n",
+ " x = x * x\n",
+ " return x\n",
+ "\n",
+ "with tf.Graph().as_default():\n",
+ " with tf.Session() as sess:\n",
+ " print(sess.run(square_until_stop(tf.constant(4), tf.constant(100))))"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "colab_type": "text",
+ "id": "FXB0Zbwl13PY"
+ },
+ "source": [
+ "#### Nested loop and conditional"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 0,
+ "metadata": {
+ "colab": {
+ "autoexec": {
+ "startup": false,
+ "wait_interval": 0
+ }
+ },
+ "colab_type": "code",
+ "id": "clGymxdf15Ig"
+ },
+ "outputs": [],
+ "source": [
+ "@autograph.convert()\n",
+ "def argwhere_cumsum(x, threshold):\n",
+ " current_sum = 0.0\n",
+ " idx = 0\n",
+ "\n",
+ " for i in range(len(x)):\n",
+ " idx = i\n",
+ " if current_sum \u003e= threshold:\n",
+ " break\n",
+ " current_sum += x[i]\n",
+ " return idx\n",
+ "\n",
+ "n = 10\n",
+ "with tf.Graph().as_default():\n",
+ " with tf.Session() as sess:\n",
+ " idx = argwhere_cumsum(tf.ones(n), tf.constant(float(n / 2)))\n",
+ " print(sess.run(idx))"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 0,
+ "metadata": {
+ "colab": {
+ "autoexec": {
+ "startup": false,
+ "wait_interval": 0
+ }
+ },
+ "colab_type": "code",
+ "id": "i7PF-uId9lp5"
+ },
+ "outputs": [],
+ "source": [
+ "@autograph.convert()\n",
+ "def argwhere_cumsum(x, threshold):\n",
+ "\n",
+ " pass # TODO: fill it in!\n",
+ "\n",
+ "\n",
+ "n = 10\n",
+ "with tf.Graph().as_default():\n",
+ " with tf.Session() as sess:\n",
+ " idx = argwhere_cumsum(tf.ones(n), tf.constant(float(n / 2)))\n",
+ " print(sess.run(idx))"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "colab_type": "text",
+ "id": "weKFXAb615Vp"
+ },
+ "source": [
+ "#### Uncollapse to see answer"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 0,
+ "metadata": {
+ "colab": {
+ "autoexec": {
+ "startup": false,
+ "wait_interval": 0
+ }
+ },
+ "colab_type": "code",
+ "id": "1sjaFcL717Ig"
+ },
+ "outputs": [],
+ "source": [
+ "@autograph.convert()\n",
+ "def argwhere_cumsum(x, threshold):\n",
+ " current_sum = 0.0\n",
+ " idx = 0\n",
+ " for i in range(len(x)):\n",
+ " idx = i\n",
+ " if current_sum \u003e= threshold:\n",
+ " break\n",
+ " current_sum += x[i]\n",
+ " return idx\n",
+ "\n",
+ "n = 10\n",
+ "with tf.Graph().as_default(): \n",
+ " with tf.Session() as sess:\n",
+ " idx = argwhere_cumsum(tf.ones(n), tf.constant(float(n / 2)))\n",
+ " print(sess.run(idx))"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "colab_type": "text",
+ "id": "4LfnJjm0Bm0B"
+ },
+ "source": [
+ "# 3. Training MNIST in-graph\n",
+ "\n",
+ "Writing control flow in AutoGraph is easy, so running a training loop in a TensorFlow graph should be easy as well! \n",
+ "\n",
+ "Here, we show an example of training a simple Keras model on MNIST, where the entire training process -- loading batches, calculating gradients, updating parameters, calculating validation accuracy, and repeating until convergence -- is done in-graph."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "colab_type": "text",
+ "id": "Em5dzSUOtLRP"
+ },
+ "source": [
+ "#### Download data"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 0,
+ "metadata": {
+ "colab": {
+ "autoexec": {
+ "startup": false,
+ "wait_interval": 0
+ }
+ },
+ "colab_type": "code",
+ "id": "xqoxumv0ssQW"
+ },
+ "outputs": [],
+ "source": [
+ "import gzip\n",
+ "import os\n",
+ "import shutil\n",
+ "\n",
+ "from six.moves import urllib\n",
+ "\n",
+ "\n",
+ "def download(directory, filename):\n",
+ " filepath = os.path.join(directory, filename)\n",
+ " if tf.gfile.Exists(filepath):\n",
+ " return filepath\n",
+ " if not tf.gfile.Exists(directory):\n",
+ " tf.gfile.MakeDirs(directory)\n",
+ " url = 'https://storage.googleapis.com/cvdf-datasets/mnist/' + filename + '.gz'\n",
+ " zipped_filepath = filepath + '.gz'\n",
+ " print('Downloading %s to %s' % (url, zipped_filepath))\n",
+ " urllib.request.urlretrieve(url, zipped_filepath)\n",
+ " with gzip.open(zipped_filepath, 'rb') as f_in, open(filepath, 'wb') as f_out:\n",
+ " shutil.copyfileobj(f_in, f_out)\n",
+ " os.remove(zipped_filepath)\n",
+ " return filepath\n",
+ "\n",
+ "\n",
+ "def dataset(directory, images_file, labels_file):\n",
+ " images_file = download(directory, images_file)\n",
+ " labels_file = download(directory, labels_file)\n",
+ "\n",
+ " def decode_image(image):\n",
+ " # Normalize from [0, 255] to [0.0, 1.0]\n",
+ " image = tf.decode_raw(image, tf.uint8)\n",
+ " image = tf.cast(image, tf.float32)\n",
+ " image = tf.reshape(image, [784])\n",
+ " return image / 255.0\n",
+ "\n",
+ " def decode_label(label):\n",
+ " label = tf.decode_raw(label, tf.uint8)\n",
+ " label = tf.reshape(label, [])\n",
+ " return tf.to_int32(label)\n",
+ "\n",
+ " images = tf.data.FixedLengthRecordDataset(\n",
+ " images_file, 28 * 28, header_bytes=16).map(decode_image)\n",
+ " labels = tf.data.FixedLengthRecordDataset(\n",
+ " labels_file, 1, header_bytes=8).map(decode_label)\n",
+ " return tf.data.Dataset.zip((images, labels))\n",
+ "\n",
+ "\n",
+ "def mnist_train(directory):\n",
+ " return dataset(directory, 'train-images-idx3-ubyte',\n",
+ " 'train-labels-idx1-ubyte')\n",
+ "\n",
+ "def mnist_test(directory):\n",
+ " return dataset(directory, 't10k-images-idx3-ubyte', 't10k-labels-idx1-ubyte')"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "colab_type": "text",
+ "id": "znmy4l8ntMvW"
+ },
+ "source": [
+ "#### Define the model"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 0,
+ "metadata": {
+ "colab": {
+ "autoexec": {
+ "startup": false,
+ "wait_interval": 0
+ }
+ },
+ "colab_type": "code",
+ "id": "Pe-erWQdBoC5"
+ },
+ "outputs": [],
+ "source": [
+ "def mlp_model(input_shape):\n",
+ " model = tf.keras.Sequential((\n",
+ " tf.keras.layers.Dense(100, activation='relu', input_shape=input_shape),\n",
+ " tf.keras.layers.Dense(100, activation='relu'),\n",
+ " tf.keras.layers.Dense(10, activation='softmax')))\n",
+ " model.build()\n",
+ " return model\n",
+ "\n",
+ "\n",
+ "def predict(m, x, y):\n",
+ " y_p = m(x)\n",
+ " losses = tf.keras.losses.categorical_crossentropy(y, y_p)\n",
+ " l = tf.reduce_mean(losses)\n",
+ " accuracies = tf.keras.metrics.categorical_accuracy(y, y_p)\n",
+ " accuracy = tf.reduce_mean(accuracies)\n",
+ " return l, accuracy\n",
+ "\n",
+ "\n",
+ "def fit(m, x, y, opt):\n",
+ " l, accuracy = predict(m, x, y)\n",
+ " opt.minimize(l)\n",
+ " return l, accuracy\n",
+ "\n",
+ "\n",
+ "def setup_mnist_data(is_training, hp, batch_size):\n",
+ " if is_training:\n",
+ " ds = mnist_train('/tmp/autograph_mnist_data')\n",
+ " ds = ds.shuffle(batch_size * 10)\n",
+ " else:\n",
+ " ds = mnist_test('/tmp/autograph_mnist_data')\n",
+ " ds = ds.repeat()\n",
+ " ds = ds.batch(batch_size)\n",
+ " return ds\n",
+ "\n",
+ "\n",
+ "def get_next_batch(ds):\n",
+ " itr = ds.make_one_shot_iterator()\n",
+ " image, label = itr.get_next()\n",
+ " x = tf.to_float(tf.reshape(image, (-1, 28 * 28)))\n",
+ " y = tf.one_hot(tf.squeeze(label), 10)\n",
+ " return x, y"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "colab_type": "text",
+ "id": "oeYV6mKnJGMr"
+ },
+ "source": [
+ "#### Define the training loop"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 0,
+ "metadata": {
+ "colab": {
+ "autoexec": {
+ "startup": false,
+ "wait_interval": 0
+ }
+ },
+ "colab_type": "code",
+ "id": "3xtg_MMhJETd"
+ },
+ "outputs": [],
+ "source": [
+ "def train(train_ds, test_ds, hp):\n",
+ " m = mlp_model((28 * 28,))\n",
+ " opt = tf.train.MomentumOptimizer(hp.learning_rate, 0.9)\n",
+ "\n",
+ " # We'd like to save our losses to a list. In order for AutoGraph\n",
+ " # to convert these lists into their graph equivalent,\n",
+ " # we need to specify the element type of the lists.\n",
+ " train_losses = []\n",
+ " test_losses = []\n",
+ " train_accuracies = []\n",
+ " test_accuracies = []\n",
+ " autograph.set_element_type(train_losses, tf.float32)\n",
+ " autograph.set_element_type(test_losses, tf.float32)\n",
+ " autograph.set_element_type(train_accuracies, tf.float32)\n",
+ " autograph.set_element_type(test_accuracies, tf.float32)\n",
+ "\n",
+ " # This entire training loop will be run in-graph.\n",
+ " i = tf.constant(0)\n",
+ " while i \u003c hp.max_steps:\n",
+ " train_x, train_y = get_next_batch(train_ds)\n",
+ " test_x, test_y = get_next_batch(test_ds)\n",
+ "\n",
+ " step_train_loss, step_train_accuracy = fit(m, train_x, train_y, opt)\n",
+ " step_test_loss, step_test_accuracy = predict(m, test_x, test_y)\n",
+ "\n",
+ " if i % (hp.max_steps // 10) == 0:\n",
+ " print('Step', i, 'train loss:', step_train_loss, 'test loss:',\n",
+ " step_test_loss, 'train accuracy:', step_train_accuracy,\n",
+ " 'test accuracy:', step_test_accuracy)\n",
+ "\n",
+ " train_losses.append(step_train_loss)\n",
+ " test_losses.append(step_test_loss)\n",
+ " train_accuracies.append(step_train_accuracy)\n",
+ " test_accuracies.append(step_test_accuracy)\n",
+ "\n",
+ " i += 1\n",
+ "\n",
+ " # We've recorded our loss values and accuracies\n",
+ " # to a list in a graph with AutoGraph's help.\n",
+ " # In order to return the values as a Tensor,\n",
+ " # we need to stack them before returning them.\n",
+ " return (\n",
+ " autograph.stack(train_losses),\n",
+ " autograph.stack(test_losses),\n",
+ " autograph.stack(train_accuracies),\n",
+ " autograph.stack(test_accuracies),\n",
+ " )"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 0,
+ "metadata": {
+ "colab": {
+ "autoexec": {
+ "startup": false,
+ "wait_interval": 0
+ }
+ },
+ "colab_type": "code",
+ "id": "HYh6MSZyJOag"
+ },
+ "outputs": [],
+ "source": [
+ "with tf.Graph().as_default():\n",
+ " hp = tf.contrib.training.HParams(\n",
+ " learning_rate=0.05,\n",
+ " max_steps=500,\n",
+ " )\n",
+ " train_ds = setup_mnist_data(True, hp, 50)\n",
+ " test_ds = setup_mnist_data(False, hp, 1000)\n",
+ " tf_train = autograph.to_graph(train)\n",
+ " loss_tensors = tf_train(train_ds, test_ds, hp)\n",
+ "\n",
+ " with tf.Session() as sess:\n",
+ " sess.run(tf.global_variables_initializer())\n",
+ " (\n",
+ " train_losses,\n",
+ " test_losses,\n",
+ " train_accuracies,\n",
+ " test_accuracies\n",
+ " ) = sess.run(loss_tensors)\n",
+ "\n",
+ " plt.title('MNIST train/test losses')\n",
+ " plt.plot(train_losses, label='train loss')\n",
+ " plt.plot(test_losses, label='test loss')\n",
+ " plt.legend()\n",
+ " plt.xlabel('Training step')\n",
+ " plt.ylabel('Loss')\n",
+ " plt.show()\n",
+ " plt.title('MNIST train/test accuracies')\n",
+ " plt.plot(train_accuracies, label='train accuracy')\n",
+ " plt.plot(test_accuracies, label='test accuracy')\n",
+ " plt.legend(loc='lower right')\n",
+ " plt.xlabel('Training step')\n",
+ " plt.ylabel('Accuracy')\n",
+ " plt.show()"
+ ]
+ }
+ ],
+ "metadata": {
+ "colab": {
+ "collapsed_sections": [
+ "qqsjik-QyA9R",
+ "b9AXIkNLxp6J",
+ "L2psuzPI02S9",
+ "weKFXAb615Vp",
+ "Em5dzSUOtLRP"
+ ],
+ "default_view": {},
+ "name": "AutoGraph Workshop.ipynb",
+ "provenance": [
+ {
+ "file_id": "1kE2gz_zuwdYySL4K2HQSz13uLCYi-fYP",
+ "timestamp": 1530563781803
+ }
+ ],
+ "version": "0.3.2",
+ "views": {}
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 0
+}
diff --git a/tensorflow/contrib/autograph/impl/api.py b/tensorflow/contrib/autograph/impl/api.py
index c7401c7df1..f7fe3de5da 100644
--- a/tensorflow/contrib/autograph/impl/api.py
+++ b/tensorflow/contrib/autograph/impl/api.py
@@ -99,6 +99,7 @@ def do_not_convert(run_as=RunMode.GRAPH, return_dtypes=None):
Returns:
A decorator that wraps the original function.
"""
+
def decorator(f):
"""Decorator implementation."""
@@ -109,8 +110,7 @@ def do_not_convert(run_as=RunMode.GRAPH, return_dtypes=None):
@wraps(f)
def py_func_wrapper(*args, **kwargs):
if kwargs:
- raise NotImplementedError(
- 'RunMode.PY_FUNC does not yet support kwargs')
+ raise NotImplementedError('RunMode.PY_FUNC does not yet support kwargs')
# TODO(mdan): Add support for kwargs.
return py_func.wrap_py_func(
f, return_dtypes, args, kwargs, use_dummy_return=not return_dtypes)
@@ -231,7 +231,10 @@ def to_graph(e,
Returns:
A function with a signature identical to `o`, but which when executed it
- creates TF a graph that has the same functionality as the original entity.
+ creates TF a graph that has the same functionality as the original entity.
+ Raises:
+ ValueError: If the converted function defines or refers to symbol names that
+ are reserved for AutoGraph.
"""
program_ctx = converter.ProgramContext(
recursive=recursive,
@@ -256,6 +259,19 @@ def to_graph(e,
compiled_node.__dict__[key] = val
compiled_fn = getattr(compiled_node, name)
+ # Need this so the source_mapping attribute is available for the context
+ # manager to access for runtime errors.
+ #
+ # Note that compiler.ast_to_object attaches the source map 'ag_source_map__'
+ # symbol to the compiled module.
+ source_map_attribute_name = 'ag_source_map'
+ if getattr(compiled_fn, source_map_attribute_name, None) is not None:
+ raise ValueError('cannot convert %s because is has an attribute '
+ '"%s", which is reserved for AutoGraph.' %
+ (compiled_fn, source_map_attribute_name))
+ setattr(compiled_fn, source_map_attribute_name,
+ compiled_node.__dict__['ag_source_map__'])
+
if verbose:
logging.info('Compiled output of %s:\n\n%s\n', e, compiled_src)
@@ -292,7 +308,7 @@ def to_code(e,
conversion.entity_to_graph(e, program_ctx, arg_values, arg_types)
code = '\n'.join(
- compiler.ast_to_source(dep, indentation)
+ compiler.ast_to_source(dep, indentation)[0]
for dep in reversed(tuple(six.itervalues(program_ctx.dependency_cache))))
return program_ctx.required_imports + '\n\n' + code
diff --git a/tensorflow/contrib/autograph/impl/api_test.py b/tensorflow/contrib/autograph/impl/api_test.py
index 9943093332..4de7df6572 100644
--- a/tensorflow/contrib/autograph/impl/api_test.py
+++ b/tensorflow/contrib/autograph/impl/api_test.py
@@ -206,8 +206,8 @@ class ApiTest(test.TestCase):
return x
with self.test_session() as sess:
- x = api.converted_call(
- test_fn, False, False, {}, constant_op.constant(-1))
+ x = api.converted_call(test_fn, False, False, {},
+ constant_op.constant(-1))
self.assertEqual(1, sess.run(x))
def test_converted_call_method(self):
@@ -274,8 +274,8 @@ class ApiTest(test.TestCase):
return self.x
with self.test_session() as sess:
- tc = api.converted_call(
- TestClass, False, False, {}, constant_op.constant(-1))
+ tc = api.converted_call(TestClass, False, False, {},
+ constant_op.constant(-1))
# tc is now a converted object.
x = tc.test_method()
self.assertEqual(1, sess.run(x))
@@ -305,6 +305,13 @@ class ApiTest(test.TestCase):
# Just check that it is parseable Python code.
self.assertIsNotNone(parser.parse_str(compiled_code))
+ def test_source_map_attribute_present(self):
+
+ def test_fn(y):
+ return y**2
+
+ self.assertTrue(hasattr(api.to_graph(test_fn), 'ag_source_map'))
+
if __name__ == '__main__':
test.main()
diff --git a/tensorflow/contrib/autograph/impl/conversion.py b/tensorflow/contrib/autograph/impl/conversion.py
index 776d19f672..7bd0ba3f2d 100644
--- a/tensorflow/contrib/autograph/impl/conversion.py
+++ b/tensorflow/contrib/autograph/impl/conversion.py
@@ -28,26 +28,27 @@ from tensorflow.contrib.autograph.converters import asserts
from tensorflow.contrib.autograph.converters import break_statements
from tensorflow.contrib.autograph.converters import builtin_functions
from tensorflow.contrib.autograph.converters import call_trees
+from tensorflow.contrib.autograph.converters import conditional_expressions
from tensorflow.contrib.autograph.converters import continue_statements
from tensorflow.contrib.autograph.converters import control_flow
from tensorflow.contrib.autograph.converters import decorators
-from tensorflow.contrib.autograph.converters import ifexp
+from tensorflow.contrib.autograph.converters import directives
+from tensorflow.contrib.autograph.converters import error_handlers
from tensorflow.contrib.autograph.converters import lists
from tensorflow.contrib.autograph.converters import logical_expressions
from tensorflow.contrib.autograph.converters import name_scopes
+from tensorflow.contrib.autograph.converters import return_statements
from tensorflow.contrib.autograph.converters import side_effect_guards
-from tensorflow.contrib.autograph.converters import single_return
from tensorflow.contrib.autograph.converters import slices
from tensorflow.contrib.autograph.core import config
from tensorflow.contrib.autograph.core import converter
+from tensorflow.contrib.autograph.core import errors
from tensorflow.contrib.autograph.pyct import ast_util
from tensorflow.contrib.autograph.pyct import inspect_utils
+from tensorflow.contrib.autograph.pyct import origin_info
from tensorflow.contrib.autograph.pyct import parser
from tensorflow.contrib.autograph.pyct import qual_names
from tensorflow.contrib.autograph.pyct import transformer
-from tensorflow.contrib.autograph.pyct.static_analysis import activity
-from tensorflow.contrib.autograph.pyct.static_analysis import live_values
-from tensorflow.contrib.autograph.pyct.static_analysis import type_info
from tensorflow.python.util import tf_inspect
@@ -157,7 +158,8 @@ def class_to_graph(c, program_ctx):
program_ctx=program_ctx,
arg_values={},
arg_types={'self': (c.__name__, c)},
- owner_type=c)
+ owner_type=c,
+ rewrite_errors=False)
if class_namespace is None:
class_namespace = namespace
else:
@@ -231,6 +233,8 @@ def _add_self_references(namespace, autograph_module):
ag_internal = imp.new_module('autograph')
ag_internal.converted_call = autograph_module.converted_call
ag_internal.utils = utils
+ ag_internal.rewrite_graph_construction_error = (
+ errors.rewrite_graph_construction_error)
# TODO(mdan): Add safeguards against name clashes.
# We don't want to create a submodule because we want the operators to be
# accessible as ag__.<operator>
@@ -239,11 +243,17 @@ def _add_self_references(namespace, autograph_module):
_add_reserved_symbol(namespace, 'ag__', ag_internal)
-def function_to_graph(f, program_ctx, arg_values, arg_types, owner_type=None):
+def function_to_graph(f,
+ program_ctx,
+ arg_values,
+ arg_types,
+ owner_type=None,
+ rewrite_errors=True):
"""Specialization of `entity_to_graph` for callable functions."""
+
node, source = parser.parse_entity(f)
node = node.body[0]
-
+ origin_info.resolve(node, source, f)
namespace = inspect_utils.getnamespace(f)
_add_self_references(namespace, program_ctx.autograph_module)
namer = program_ctx.new_namer(namespace)
@@ -256,7 +266,7 @@ def function_to_graph(f, program_ctx, arg_values, arg_types, owner_type=None):
arg_types=arg_types,
owner_type=owner_type)
context = converter.EntityContext(namer, entity_info, program_ctx)
- node = node_to_graph(node, context)
+ node = node_to_graph(node, context, rewrite_errors=rewrite_errors)
# TODO(mdan): This somewhat duplicates the call rename logic in call_treest.py
new_name, did_rename = namer.compiled_function_name(f.__name__, f, owner_type)
@@ -272,22 +282,13 @@ def function_to_graph(f, program_ctx, arg_values, arg_types, owner_type=None):
return node, new_name, namespace
-def _apply_transformer(node, context, converter_module):
- # TODO(mdan): Clear static analysis here.
- node = qual_names.resolve(node)
- node = activity.resolve(node, context.info, None)
- node = live_values.resolve(node, context.info, config.PYTHON_LITERALS)
- node = type_info.resolve(node, context.info)
- node = converter_module.transform(node, context)
- return node
-
-
-def node_to_graph(node, context):
+def node_to_graph(node, context, rewrite_errors=True):
"""Convert Python code to equivalent TF graph mode code.
Args:
node: AST, the code to convert.
context: converter.EntityContext
+ rewrite_errors: Boolean, whether or not to rewrite the error traceback.
Returns:
A tuple (node, deps):
@@ -295,28 +296,33 @@ def node_to_graph(node, context):
* deps: A set of strings, the fully qualified names of entity
dependencies that this node has.
"""
- # TODO(mdan): Verify arguments for correctness.
+ # TODO(mdan): Insert list_comprehensions somewhere.
- node = _apply_transformer(node, context, ifexp)
+ node = converter.standard_analysis(node, context, is_initial=True)
# Past this point, line numbers are no longer accurate so we ignore the
# source.
# TODO(mdan): Is it feasible to reconstruct intermediate source code?
context.info.source_code = None
- node = _apply_transformer(node, context, decorators)
- node = _apply_transformer(node, context, break_statements)
- node = _apply_transformer(node, context, asserts)
+
+ node = converter.apply_(node, context, decorators)
+ node = converter.apply_(node, context, directives)
+ node = converter.apply_(node, context, break_statements)
+ node = converter.apply_(node, context, asserts)
# Note: sequencing continue canonicalization before for loop one avoids
# dealing with the extra loop increment operation that the for
# canonicalization creates.
- node = _apply_transformer(node, context, continue_statements)
+ node = converter.apply_(node, context, continue_statements)
context.info.namespace['len'] = len
- node = _apply_transformer(node, context, single_return)
- node = _apply_transformer(node, context, lists)
- node = _apply_transformer(node, context, slices)
- node = _apply_transformer(node, context, builtin_functions)
- node = _apply_transformer(node, context, call_trees)
- node = _apply_transformer(node, context, control_flow)
- node = _apply_transformer(node, context, logical_expressions)
- node = _apply_transformer(node, context, side_effect_guards)
- node = _apply_transformer(node, context, name_scopes)
+ node = converter.apply_(node, context, return_statements)
+ node = converter.apply_(node, context, lists)
+ node = converter.apply_(node, context, slices)
+ node = converter.apply_(node, context, builtin_functions)
+ node = converter.apply_(node, context, call_trees)
+ node = converter.apply_(node, context, control_flow)
+ node = converter.apply_(node, context, conditional_expressions)
+ node = converter.apply_(node, context, logical_expressions)
+ node = converter.apply_(node, context, side_effect_guards)
+ node = converter.apply_(node, context, name_scopes)
+ if rewrite_errors:
+ node = converter.apply_(node, context, error_handlers)
return node
diff --git a/tensorflow/contrib/autograph/impl/conversion_test.py b/tensorflow/contrib/autograph/impl/conversion_test.py
index f5279298af..207225a1ac 100644
--- a/tensorflow/contrib/autograph/impl/conversion_test.py
+++ b/tensorflow/contrib/autograph/impl/conversion_test.py
@@ -79,10 +79,12 @@ class ConversionTest(test.TestCase):
self.assertTrue(f in program_ctx.dependency_cache)
self.assertTrue(g in program_ctx.dependency_cache)
self.assertEqual('tf__f', program_ctx.dependency_cache[f].name)
- # need the extra .body[0] in order to step past the with tf.name_scope('f')
- # that is added automatically
+ # need one extra .body[0] in order to step past the try/except wrapper that
+ # is added automatically, the other for the with tf.name_scope('f') that is
+ # added automatically
self.assertEqual(
- 'tf__g', program_ctx.dependency_cache[f].body[0].body[0].value.func.id)
+ 'tf__g',
+ program_ctx.dependency_cache[f].body[0].body[0].body[0].value.func.id)
self.assertEqual('tf__g', program_ctx.dependency_cache[g].name)
def test_entity_to_graph_class_hierarchy(self):
diff --git a/tensorflow/contrib/autograph/operators/__init__.py b/tensorflow/contrib/autograph/operators/__init__.py
index c900fd6af2..392cb60bcc 100644
--- a/tensorflow/contrib/autograph/operators/__init__.py
+++ b/tensorflow/contrib/autograph/operators/__init__.py
@@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
-"""This module implements operators that we overload.
+"""This module implements operators that AutoGraph overloads.
Note that "operator" is used loosely here, and includes control structures like
conditionals and loops, implemented in functional form, using for example
diff --git a/tensorflow/contrib/autograph/pyct/BUILD b/tensorflow/contrib/autograph/pyct/BUILD
index 8f09689fe9..f77a6ab392 100644
--- a/tensorflow/contrib/autograph/pyct/BUILD
+++ b/tensorflow/contrib/autograph/pyct/BUILD
@@ -22,8 +22,10 @@ py_library(
"__init__.py",
"anno.py",
"ast_util.py",
+ "cfg.py",
"compiler.py",
"inspect_utils.py",
+ "origin_info.py",
"parser.py",
"pretty_printer.py",
"qual_names.py",
@@ -64,6 +66,17 @@ py_test(
)
py_test(
+ name = "cfg_test",
+ srcs = ["cfg_test.py"],
+ srcs_version = "PY2AND3",
+ deps = [
+ ":pyct",
+ "//tensorflow/python:client_testlib",
+ "@gast_archive//:gast",
+ ],
+)
+
+py_test(
name = "compiler_test",
srcs = ["compiler_test.py"],
srcs_version = "PY2AND3",
diff --git a/tensorflow/contrib/autograph/pyct/anno.py b/tensorflow/contrib/autograph/pyct/anno.py
index ae861627fd..1a52110ef3 100644
--- a/tensorflow/contrib/autograph/pyct/anno.py
+++ b/tensorflow/contrib/autograph/pyct/anno.py
@@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
-"""Handling annotations on AST nodes.
+"""AST node annotation support.
Adapted from Tangent.
"""
@@ -21,37 +21,90 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
-from enum import Enum
+import enum
+# pylint:disable=g-bad-import-order
+import gast
+# pylint:enable=g-bad-import-order
-class NoValue(Enum):
+
+# TODO(mdan): Shorten the names.
+# These names are heavily used, and anno.blaa
+# TODO(mdan): Replace the attr-dict mechanism with a more typed solution.
+
+
+class NoValue(enum.Enum):
def __repr__(self):
return self.name
class Basic(NoValue):
- """Container for annotation keys.
+ """Container for basic annotation keys.
The enum values are used strictly for documentation purposes.
"""
- QN = 'Qualified name, as it appeared in the code.'
+ QN = 'Qualified name, as it appeared in the code. See qual_names.py.'
SKIP_PROCESSING = (
'This node should be preserved as is and not processed any further.')
INDENT_BLOCK_REMAINDER = (
- 'When a node is annotated with this, the remainder of the block should '
- 'be indented below it. The annotation contains a tuple '
- '(new_body, name_map), where `new_body` is the new indented block and '
- '`name_map` allows renaming symbols.')
+ 'When a node is annotated with this, the remainder of the block should'
+ ' be indented below it. The annotation contains a tuple'
+ ' (new_body, name_map), where `new_body` is the new indented block and'
+ ' `name_map` allows renaming symbols.')
+ ORIGIN = ('Information about the source code that converted code originated'
+ ' from. See origin_information.py.')
+
+
+class Static(NoValue):
+ """Container for static analysis annotation keys.
+
+ The enum values are used strictly for documentation purposes.
+ """
+
+ # Deprecated - use reaching definitions instead.
+ # Symbols
+ # These flags are boolean.
+ IS_LOCAL = 'Symbol is local to the function scope being analyzed.'
+ IS_PARAM = 'Symbol is a parameter to the function being analyzed.'
+
+ # Scopes
+ # Scopes are represented by objects of type activity.Scope.
+ SCOPE = 'The scope for the annotated node. See activity.py.'
+ # TODO(mdan): Drop these in favor of accessing the child's SCOPE.
+ ARGS_SCOPE = 'The scope for the argument list of a function call.'
+ COND_SCOPE = 'The scope for the test node of a conditional statement.'
+ BODY_SCOPE = (
+ 'The scope for the main body of a statement (True branch for if '
+ 'statements, main body for loops).')
+ ORELSE_SCOPE = (
+ 'The scope for the orelse body of a statement (False branch for if '
+ 'statements, orelse body for loops).')
+
+ # Static analysis annotations.
+ DEFINITIONS = (
+ 'Reaching definition information. See reaching_definitions.py.')
+ ORIG_DEFINITIONS = (
+ 'The value of DEFINITIONS that applied to the original code before any'
+ ' conversion.')
+ DEFINED_VARS_IN = (
+ 'Symbols defined when entering the node. See reaching_definitions.py.')
+ LIVE_VARS_OUT = ('Symbols live when exiting the node. See liveness.py.')
FAIL = object()
+def keys(node, field_name='___pyct_anno'):
+ if not hasattr(node, field_name):
+ return frozenset()
+ return frozenset(getattr(node, field_name).keys())
+
+
def getanno(node, key, default=FAIL, field_name='___pyct_anno'):
- if (default is FAIL or
- (hasattr(node, field_name) and (key in getattr(node, field_name)))):
+ if (default is FAIL or (hasattr(node, field_name) and
+ (key in getattr(node, field_name)))):
return getattr(node, field_name)[key]
else:
return default
@@ -86,3 +139,19 @@ def copyanno(from_node, to_node, key, field_name='___pyct_anno'):
key,
getanno(from_node, key, field_name=field_name),
field_name=field_name)
+
+
+def dup(node, copy_map, field_name='___pyct_anno'):
+ """Recursively copies annotations in an AST tree.
+
+ Args:
+ node: ast.AST
+ copy_map: Dict[Hashable, Hashable], maps a source anno key to a destination
+ key. All annotations with the source key will be copied to identical
+ annotations with the destination key.
+ field_name: str
+ """
+ for n in gast.walk(node):
+ for k in copy_map:
+ if hasanno(n, k, field_name):
+ setanno(n, copy_map[k], getanno(n, k, field_name), field_name)
diff --git a/tensorflow/contrib/autograph/pyct/anno_test.py b/tensorflow/contrib/autograph/pyct/anno_test.py
index f2c0c8cf05..5ef4da61a3 100644
--- a/tensorflow/contrib/autograph/pyct/anno_test.py
+++ b/tensorflow/contrib/autograph/pyct/anno_test.py
@@ -32,22 +32,27 @@ class AnnoTest(test.TestCase):
def test_basic(self):
node = ast.Name()
+ self.assertEqual(anno.keys(node), set())
self.assertFalse(anno.hasanno(node, 'foo'))
with self.assertRaises(AttributeError):
anno.getanno(node, 'foo')
anno.setanno(node, 'foo', 3)
+
+ self.assertEqual(anno.keys(node), {'foo'})
self.assertTrue(anno.hasanno(node, 'foo'))
self.assertEqual(anno.getanno(node, 'foo'), 3)
self.assertEqual(anno.getanno(node, 'bar', default=7), 7)
anno.delanno(node, 'foo')
+
+ self.assertEqual(anno.keys(node), set())
self.assertFalse(anno.hasanno(node, 'foo'))
with self.assertRaises(AttributeError):
anno.getanno(node, 'foo')
self.assertIsNone(anno.getanno(node, 'foo', default=None))
- def test_copyanno(self):
+ def test_copy(self):
node_1 = ast.Name()
anno.setanno(node_1, 'foo', 3)
@@ -58,6 +63,22 @@ class AnnoTest(test.TestCase):
self.assertTrue(anno.hasanno(node_2, 'foo'))
self.assertFalse(anno.hasanno(node_2, 'bar'))
+ def test_duplicate(self):
+ node = ast.If(
+ test=ast.Num(1),
+ body=[ast.Expr(ast.Name('bar', ast.Load()))],
+ orelse=[])
+ anno.setanno(node, 'spam', 1)
+ anno.setanno(node, 'ham', 1)
+ anno.setanno(node.body[0], 'ham', 1)
+
+ anno.dup(node, {'spam': 'eggs'})
+
+ self.assertTrue(anno.hasanno(node, 'spam'))
+ self.assertTrue(anno.hasanno(node, 'ham'))
+ self.assertTrue(anno.hasanno(node, 'eggs'))
+ self.assertFalse(anno.hasanno(node.body[0], 'eggs'))
+
if __name__ == '__main__':
test.main()
diff --git a/tensorflow/contrib/autograph/pyct/ast_util.py b/tensorflow/contrib/autograph/pyct/ast_util.py
index c4f82d1170..86e3f56a64 100644
--- a/tensorflow/contrib/autograph/pyct/ast_util.py
+++ b/tensorflow/contrib/autograph/pyct/ast_util.py
@@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
-"""Copy an AST tree, discarding annotations."""
+"""AST manipulation utilities."""
from __future__ import absolute_import
from __future__ import division
@@ -20,53 +20,60 @@ from __future__ import print_function
import ast
+import collections
import gast
from tensorflow.contrib.autograph.pyct import anno
from tensorflow.contrib.autograph.pyct import parser
-class CleanCopier(gast.NodeVisitor):
- """Copies AST nodes.
+class CleanCopier(object):
+ """NodeTransformer-like visitor that copies an AST."""
- The copied nodes will ignore almost all fields that are prefixed by '__'.
- Exceptions make some annotations.
- """
+ def __init__(self, preserve_annos):
+ super(CleanCopier, self).__init__()
+ self.preserve_annos = preserve_annos
- # TODO(mdan): Parametrize which annotations get carried over.
+ def copy(self, node):
+ """Returns a deep copy of node (excluding some fields, see copy_clean)."""
+
+ if isinstance(node, list):
+ return [self.copy(n) for n in node]
+ elif isinstance(node, tuple):
+ return tuple(self.copy(n) for n in node)
+ elif not isinstance(node, (gast.AST, ast.AST)):
+ # Assuming everything that's not an AST, list or tuple is a value type
+ # and may simply be assigned.
+ return node
+
+ assert isinstance(node, (gast.AST, ast.AST))
- def generic_visit(self, node):
new_fields = {}
for f in node._fields:
- if f.startswith('__'):
- continue
- if not hasattr(node, f):
- continue
- v = getattr(node, f)
- if isinstance(v, list):
- v = [self.generic_visit(n) for n in v]
- elif isinstance(v, tuple):
- v = tuple(self.generic_visit(n) for n in v)
- elif isinstance(v, (gast.AST, ast.AST)):
- v = self.generic_visit(v)
- else:
- # Assume everything else is a value type.
- pass
- new_fields[f] = v
+ if not f.startswith('__') and hasattr(node, f):
+ new_fields[f] = self.copy(getattr(node, f))
new_node = type(node)(**new_fields)
- if anno.hasanno(node, anno.Basic.SKIP_PROCESSING):
- anno.setanno(new_node, anno.Basic.SKIP_PROCESSING, True)
+
+ if self.preserve_annos:
+ for k in self.preserve_annos:
+ anno.copyanno(node, new_node, k)
return new_node
-def copy_clean(node):
- copier = CleanCopier()
- if isinstance(node, list):
- return [copier.visit(n) for n in node]
- elif isinstance(node, tuple):
- return tuple(copier.visit(n) for n in node)
- else:
- return copier.visit(node)
+def copy_clean(node, preserve_annos=None):
+ """Creates a deep copy of an AST.
+
+ The copy will not include fields that are prefixed by '__', with the
+ exception of user-specified annotations.
+
+ Args:
+ node: ast.AST
+ preserve_annos: Optional[Set[Hashable]], annotation keys to include in the
+ copy
+ Returns:
+ ast.AST
+ """
+ return CleanCopier(preserve_annos).copy(node)
class SymbolRenamer(gast.NodeTransformer):
@@ -78,7 +85,11 @@ class SymbolRenamer(gast.NodeTransformer):
def _process(self, node):
qn = anno.getanno(node, anno.Basic.QN)
if qn in self.name_map:
- return gast.Name(str(self.name_map[qn]), node.ctx, None)
+ new_node = gast.Name(str(self.name_map[qn]), node.ctx, None)
+ # All annotations get carried over.
+ for k in anno.keys(node):
+ anno.copyanno(node, new_node, k)
+ return new_node
return self.generic_visit(node)
def visit_Name(self, node):
@@ -92,6 +103,7 @@ class SymbolRenamer(gast.NodeTransformer):
def rename_symbols(node, name_map):
+ """Renames symbols in an AST. Requires qual_names annotations."""
renamer = SymbolRenamer(name_map)
if isinstance(node, list):
return [renamer.visit(n) for n in node]
@@ -101,6 +113,7 @@ def rename_symbols(node, name_map):
def keywords_to_dict(keywords):
+ """Converts a list of ast.keyword objects to a dict."""
keys = []
values = []
for kw in keywords:
@@ -110,10 +123,7 @@ def keywords_to_dict(keywords):
class PatternMatcher(gast.NodeVisitor):
- """Matches a node against a pattern represented by a node.
-
- The pattern may contain wildcards represented by the symbol '_'.
- """
+ """Matches a node against a pattern represented by a node."""
def __init__(self, pattern):
self.pattern = pattern
@@ -175,11 +185,98 @@ class PatternMatcher(gast.NodeVisitor):
if v != p:
return self.no_match()
-
def matches(node, pattern):
+ """Basic pattern matcher for AST.
+
+ The pattern may contain wildcards represented by the symbol '_'. A node
+ matches a pattern if for every node in the tree, either there is a node of
+ the same type in pattern, or a Name node with id='_'.
+
+ Args:
+ node: ast.AST
+ pattern: ast.AST
+ Returns:
+ bool
+ """
if isinstance(pattern, str):
pattern = parser.parse_expression(pattern)
matcher = PatternMatcher(pattern)
matcher.visit(node)
return matcher.matches
+
+# TODO(mdan): Once we have error tracing, we may be able to just go to SSA.
+def apply_to_single_assignments(targets, values, apply_fn):
+ """Applies a function to each individual assignment.
+
+ This function can process a possibly-unpacked (e.g. a, b = c, d) assignment.
+ It tries to break down the unpacking if possible. In effect, it has the same
+ effect as passing the assigned values in SSA form to apply_fn.
+
+ Examples:
+
+ The following will result in apply_fn(a, c), apply_fn(b, d):
+
+ a, b = c, d
+
+ The following will result in apply_fn(a, c[0]), apply_fn(b, c[1]):
+
+ a, b = c
+
+ The following will result in apply_fn(a, (b, c)):
+
+ a = b, c
+
+ It uses the visitor pattern to allow subclasses to process single
+ assignments individually.
+
+ Args:
+ targets: Union[List[ast.AST, ...], Tuple[ast.AST, ...], ast.AST, should be
+ used with the targets field of an ast.Assign node
+ values: ast.AST
+ apply_fn: Callable[[ast.AST, ast.AST], None], called with the
+ respective nodes of each single assignment
+ """
+ if not isinstance(targets, (list, tuple)):
+ targets = (targets,)
+ for target in targets:
+ if isinstance(target, (gast.Tuple, gast.List)):
+ for i in range(len(target.elts)):
+ target_el = target.elts[i]
+ if isinstance(values, (gast.Tuple, gast.List)):
+ value_el = values.elts[i]
+ else:
+ idx = parser.parse_expression(str(i))
+ value_el = gast.Subscript(values, gast.Index(idx), ctx=gast.Load())
+ apply_to_single_assignments(target_el, value_el, apply_fn)
+ else:
+ apply_fn(target, values)
+
+
+def iter_fields(node):
+ for field in sorted(node._fields):
+ try:
+ yield getattr(node, field)
+ except AttributeError:
+ pass
+
+
+def iter_child_nodes(node):
+ for field in iter_fields(node):
+ if isinstance(field, gast.AST):
+ yield field
+ elif isinstance(field, list):
+ for item in field:
+ if isinstance(item, gast.AST):
+ yield item
+
+
+def parallel_walk(node_a, node_b):
+ todo_a = collections.deque([node_a])
+ todo_b = collections.deque([node_b])
+ while todo_a and todo_b:
+ node_a = todo_a.popleft()
+ node_b = todo_b.popleft()
+ todo_a.extend(iter_child_nodes(node_a))
+ todo_b.extend(iter_child_nodes(node_b))
+ yield node_a, node_b
diff --git a/tensorflow/contrib/autograph/pyct/ast_util_test.py b/tensorflow/contrib/autograph/pyct/ast_util_test.py
index 3afa04a506..981e398b93 100644
--- a/tensorflow/contrib/autograph/pyct/ast_util_test.py
+++ b/tensorflow/contrib/autograph/pyct/ast_util_test.py
@@ -19,7 +19,10 @@ from __future__ import division
from __future__ import print_function
import ast
+import collections
+import textwrap
+from tensorflow.contrib.autograph.pyct import anno
from tensorflow.contrib.autograph.pyct import ast_util
from tensorflow.contrib.autograph.pyct import compiler
from tensorflow.contrib.autograph.pyct import parser
@@ -29,53 +32,66 @@ from tensorflow.python.platform import test
class AstUtilTest(test.TestCase):
- def test_rename_symbols(self):
- node = ast.Tuple([
- ast.Name('a', ast.Load()),
- ast.Name('b', ast.Load()),
- ast.Attribute(ast.Name('b', None), 'c', ast.Store()),
- ast.Attribute(
- ast.Attribute(ast.Name('b', None), 'c', ast.Load()), 'd', None)
- ], None)
+ def setUp(self):
+ super(AstUtilTest, self).setUp()
+ self._invocation_counts = collections.defaultdict(lambda: 0)
+
+ def test_rename_symbols_basic(self):
+ node = parser.parse_str('a + b')
+ node = qual_names.resolve(node)
+
+ node = ast_util.rename_symbols(
+ node, {qual_names.QN('a'): qual_names.QN('renamed_a')})
+
+ self.assertIsInstance(node.body[0].value.left.id, str)
+ source, _ = compiler.ast_to_source(node)
+ self.assertEqual(source.strip(), 'renamed_a + b')
+
+ def test_rename_symbols_attributes(self):
+ node = parser.parse_str('b.c = b.c.d')
node = qual_names.resolve(node)
+
node = ast_util.rename_symbols(
- node, {
- qual_names.QN('a'):
- qual_names.QN('renamed_a'),
- qual_names.QN(qual_names.QN('b'), attr='c'):
- qual_names.QN('renamed_b_c'),
- })
-
- self.assertEqual(node.elts[0].id, 'renamed_a')
- self.assertTrue(isinstance(node.elts[0].ctx, ast.Load))
- self.assertEqual(node.elts[1].id, 'b')
- self.assertEqual(node.elts[2].id, 'renamed_b_c')
- self.assertTrue(isinstance(node.elts[2].ctx, ast.Store))
- self.assertEqual(node.elts[3].value.id, 'renamed_b_c')
- self.assertTrue(isinstance(node.elts[3].value.ctx, ast.Load))
+ node, {qual_names.from_str('b.c'): qual_names.QN('renamed_b_c')})
+
+ source, _ = compiler.ast_to_source(node)
+ self.assertEqual(source.strip(), 'renamed_b_c = renamed_b_c.d')
+
+ def test_rename_symbols_annotations(self):
+ node = parser.parse_str('a[i]')
+ node = qual_names.resolve(node)
+ anno.setanno(node, 'foo', 'bar')
+ orig_anno = anno.getanno(node, 'foo')
+
+ node = ast_util.rename_symbols(node,
+ {qual_names.QN('a'): qual_names.QN('b')})
+
+ self.assertIs(anno.getanno(node, 'foo'), orig_anno)
def test_copy_clean(self):
- ret = ast.Return(
- ast.BinOp(
- op=ast.Add(),
- left=ast.Name(id='a', ctx=ast.Load()),
- right=ast.Num(1)))
- setattr(ret, '__foo', 'bar')
- node = ast.FunctionDef(
- name='f',
- args=ast.arguments(
- args=[ast.Name(id='a', ctx=ast.Param())],
- vararg=None,
- kwarg=None,
- defaults=[]),
- body=[ret],
- decorator_list=[],
- returns=None)
+ node = parser.parse_str(
+ textwrap.dedent("""
+ def f(a):
+ return a + 1
+ """))
+ setattr(node.body[0], '__foo', 'bar')
new_node = ast_util.copy_clean(node)
- self.assertFalse(node is new_node)
- self.assertFalse(ret is new_node.body[0])
+ self.assertIsNot(new_node, node)
+ self.assertIsNot(new_node.body[0], node.body[0])
self.assertFalse(hasattr(new_node.body[0], '__foo'))
+ def test_copy_clean_preserves_annotations(self):
+ node = parser.parse_str(
+ textwrap.dedent("""
+ def f(a):
+ return a + 1
+ """))
+ anno.setanno(node.body[0], 'foo', 'bar')
+ anno.setanno(node.body[0], 'baz', 1)
+ new_node = ast_util.copy_clean(node, preserve_annos={'foo'})
+ self.assertEqual(anno.getanno(new_node.body[0], 'foo'), 'bar')
+ self.assertFalse(anno.hasanno(new_node.body[0], 'baz'))
+
def test_keywords_to_dict(self):
keywords = parser.parse_expression('f(a=b, c=1, d=\'e\')').keywords
d = ast_util.keywords_to_dict(keywords)
@@ -113,6 +129,52 @@ class AstUtilTest(test.TestCase):
self.assertNoMatch('super(Foo, self).__init__()',
'super(Bar, _).__init__(_)')
+ def _mock_apply_fn(self, target, source):
+ target, _ = compiler.ast_to_source(target)
+ source, _ = compiler.ast_to_source(source)
+ self._invocation_counts[(target.strip(), source.strip())] += 1
+
+ def test_apply_to_single_assignments_dynamic_unpack(self):
+ node = parser.parse_str('a, b, c = d')
+ node = node.body[0]
+ ast_util.apply_to_single_assignments(node.targets, node.value,
+ self._mock_apply_fn)
+ self.assertDictEqual(self._invocation_counts, {
+ ('a', 'd[0]'): 1,
+ ('b', 'd[1]'): 1,
+ ('c', 'd[2]'): 1,
+ })
+
+ def test_apply_to_single_assignments_static_unpack(self):
+ node = parser.parse_str('a, b, c = d, e, f')
+ node = node.body[0]
+ ast_util.apply_to_single_assignments(node.targets, node.value,
+ self._mock_apply_fn)
+ self.assertDictEqual(self._invocation_counts, {
+ ('a', 'd'): 1,
+ ('b', 'e'): 1,
+ ('c', 'f'): 1,
+ })
+
+ def test_parallel_walk(self):
+ ret = ast.Return(
+ ast.BinOp(
+ op=ast.Add(),
+ left=ast.Name(id='a', ctx=ast.Load()),
+ right=ast.Num(1)))
+ node = ast.FunctionDef(
+ name='f',
+ args=ast.arguments(
+ args=[ast.Name(id='a', ctx=ast.Param())],
+ vararg=None,
+ kwarg=None,
+ defaults=[]),
+ body=[ret],
+ decorator_list=[],
+ returns=None)
+ for child_a, child_b in ast_util.parallel_walk(node, node):
+ self.assertEqual(child_a, child_b)
+
if __name__ == '__main__':
test.main()
diff --git a/tensorflow/contrib/autograph/pyct/cfg.py b/tensorflow/contrib/autograph/pyct/cfg.py
new file mode 100644
index 0000000000..25fec7fd53
--- /dev/null
+++ b/tensorflow/contrib/autograph/pyct/cfg.py
@@ -0,0 +1,817 @@
+# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""Control flow graph (CFG) structure for Python AST representation.
+
+The CFG is a digraph with edges representing valid control flow. Each
+node is associated with exactly one AST node, but not all AST nodes may have
+a corresponding CFG counterpart.
+
+Once built, the CFG itself is immutable, but the values it holds need not be;
+they are usually annotated with information extracted by walking the graph.
+"""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import collections
+from enum import Enum
+
+# pylint:disable=g-bad-import-order
+import gast
+# pylint:enable=g-bad-import-order
+
+from tensorflow.contrib.autograph.pyct import compiler
+
+
+class Node(object):
+ """A node in the CFG.
+
+ Although new instances of this class are mutable, the objects that a user
+ finds in the CFG are typically not.
+
+ The nodes represent edges in the CFG graph, and maintain pointers to allow
+ efficient walking in both forward and reverse order. The following property
+ holds for all nodes: "child in node.next" iff "node in child.prev".
+
+ Attributes:
+ next: FrozenSet[Node, ...], the nodes that follow this node, in control
+ flow order
+ prev: FrozenSet[Node, ...], the nodes that precede this node, in reverse
+ control flow order
+ ast_node: ast.AST, the AST node corresponding to this CFG node
+ """
+
+ def __init__(self, next_, prev, ast_node):
+ self.next = next_
+ self.prev = prev
+ self.ast_node = ast_node
+
+ def freeze(self):
+ self.next = frozenset(self.next)
+ self.prev = frozenset(self.prev)
+
+ def __repr__(self):
+ if isinstance(self.ast_node, gast.FunctionDef):
+ return 'def %s' % self.ast_node.name
+ elif isinstance(self.ast_node, gast.withitem):
+ source, _ = compiler.ast_to_source(self.ast_node.context_expr)
+ return source.strip()
+ source, _ = compiler.ast_to_source(self.ast_node)
+ return source.strip()
+
+
+class Graph(
+ collections.namedtuple(
+ 'Graph',
+ ['entry', 'exit', 'error', 'index', 'stmt_prev', 'stmt_next'])):
+ """A Control Flow Graph.
+
+ The CFG maintains an index to allow looking up a CFG node by the AST node to
+ which it is associated. The index can also be enumerated in top-down, depth
+ first order.
+
+ Walking the graph in forward or reverse order is supported by double
+ parent-child links.
+
+ Note: the error nodes are not wired to their corresponding finally guards,
+ because these are shared, and wiring them would create a reverse path from
+ normal control flow into the error nodes, which we want to avoid.
+
+ The graph also maintains edges corresponding to higher level statements
+ like for-else loops. A node is considered successor of a statement if there
+ is an edge from a node that is lexically a child of that statement to a node
+ that is not. Statement predecessors are analogously defined.
+
+ Attributes:
+ entry: Node, the entry node
+ exit: FrozenSet[Node, ...], the exit nodes
+ error: FrozenSet[Node, ...], nodes that exit due to an explicitly raised
+ error (errors propagated from function calls are not accounted)
+ index: Dict[ast.Node, Node], mapping AST nodes to the respective CFG
+ node
+ stmt_prev: Dict[ast.Node, FrozenSet[Node, ...]], mapping statement AST
+ nodes to their predecessor CFG nodes
+ stmt_next: Dict[ast.Node, FrozenSet[Node, ...]], mapping statement AST
+ nodes to their successor CFG nodes
+ """
+
+ def __repr__(self):
+ result = 'digraph CFG {\n'
+ for node in self.index.values():
+ result += ' %s [label="%s"];\n' % (id(node), node)
+ for node in self.index.values():
+ for next_ in node.next:
+ result += ' %s -> %s;\n' % (id(node), id(next_))
+ result += '}'
+ return result
+
+
+class _WalkMode(Enum):
+ FORWARD = 1
+ REVERSE = 2
+
+
+# TODO(mdan): Rename to DataFlowAnalyzer.
+# TODO(mdan): Consider specializations that use gen/kill/transfer abstractions.
+class GraphVisitor(object):
+ """Base class for a CFG visitors.
+
+ This implementation is not thread safe.
+
+ The visitor has some facilities to simplify dataflow analyses. In particular,
+ it allows revisiting the nodes at the decision of the subclass. This can be
+ used to visit the graph until the state reaches a fixed point.
+
+ For more details on dataflow analysis, see
+ https://www.seas.harvard.edu/courses/cs252/2011sp/slides/Lec02-Dataflow.pdf
+
+ Note: the literature generally suggests visiting successor nodes only when the
+ state of the current node changed, regardless of whether that successor has
+ ever been visited. This implementation visits every successor at least once.
+
+ Attributes:
+ graph: Graph
+ in_: Dict[Node, Any], stores node-keyed state during a visit
+ out: Dict[Node, Any], stores node-keyed state during a visit
+ """
+
+ def __init__(self, graph):
+ self.graph = graph
+ self.reset()
+
+ def init_state(self, node):
+ """State initialization function. Optional to overload.
+
+ An in/out state slot will be created for each node in the graph. Subclasses
+ must overload this to control what that is initialized to.
+
+ Args:
+ node: Node
+ """
+ raise NotImplementedError('Subclasses must implement this.')
+
+ # TODO(mdan): Rename to flow?
+ def visit_node(self, node):
+ """Visitor function.
+
+ Args:
+ node: Node
+ Returns:
+ bool, whether the node should be revisited; subclasses can visit every
+ reachable node exactly once by always returning False
+ """
+ raise NotImplementedError('Subclasses must implement this.')
+
+ def reset(self):
+ self.in_ = {
+ node: self.init_state(node) for node in self.graph.index.values()
+ }
+ self.out = {
+ node: self.init_state(node) for node in self.graph.index.values()
+ }
+
+ def _visit_internal(self, mode):
+ """Visits the CFG, depth-first."""
+ assert mode in (_WalkMode.FORWARD, _WalkMode.REVERSE)
+ if mode == _WalkMode.FORWARD:
+ open_ = [self.graph.entry]
+ elif mode == _WalkMode.REVERSE:
+ open_ = list(self.graph.exit)
+ closed = set()
+
+ while open_:
+ node = open_.pop(0)
+ closed.add(node)
+
+ should_revisit = self.visit_node(node)
+
+ if mode == _WalkMode.FORWARD:
+ children = node.next
+ elif mode == _WalkMode.REVERSE:
+ children = node.prev
+
+ for next_ in children:
+ if should_revisit or next_ not in closed:
+ open_.append(next_)
+
+ def visit_forward(self):
+ self._visit_internal(_WalkMode.FORWARD)
+
+ def visit_reverse(self):
+ self._visit_internal(_WalkMode.REVERSE)
+
+
+class GraphBuilder(object):
+ """Builder that constructs a CFG from a given AST.
+
+ This GraphBuilder facilitates constructing the DAG that forms the CFG when
+ nodes
+ are supplied in lexical order (i.e., top-down, depth first). Under these
+ conditions, it supports building patterns found in typical structured
+ programs.
+
+ This builder ignores the flow generated by exceptions, which are assumed to
+ always be catastrophic and present purely for diagnostic purposes (e.g. to
+ print debug information). Statements like raise and try/catch sections are
+ allowed and will generate control flow edges, but ordinaty statements are
+ assumed not to raise exceptions.
+
+ Finally sections are also correctly interleaved between break/continue/return
+ nodes and their subsequent statements.
+
+ Important concepts:
+ * nodes - nodes refer refer to CFG nodes; AST nodes are qualified explicitly
+ * leaf set - since the graph is constructed gradually, a leaf set maintains
+ the CFG nodes that will precede the node that the builder expects to
+ receive next; when an ordinary node is added, it is connected to the
+ existing leaves and it in turn becomes the new leaf
+ * jump nodes - nodes that should generate edges other than what
+ ordinary nodes would; these correspond to break, continue and return
+ statements
+ * sections - logical delimiters for subgraphs that require special
+ edges; there are various types of nodes, each admitting various
+ types of jump nodes; sections are identified by their corresponding AST
+ node
+ """
+
+ # TODO(mdan): Perhaps detail this in a markdown doc.
+ # TODO(mdan): Add exception support.
+
+ def __init__(self, parent_ast_node):
+ self.reset()
+ self.parent = parent_ast_node
+
+ def reset(self):
+ """Resets the state of this factory."""
+ self.head = None
+ self.errors = set()
+ self.node_index = collections.OrderedDict()
+
+ # TODO(mdan): Too many primitives. Use classes.
+ self.leaves = set()
+
+ # Note: This mechanism requires that nodes are added in lexical order (top
+ # to bottom, depth first).
+ self.active_stmts = set()
+ self.owners = {} # type: Set[any]
+ self.forward_edges = set() # type: Tuple[Node, Node] # (from, to)
+
+ self.finally_sections = {}
+ # Dict values represent (entry, exits)
+ self.finally_section_subgraphs = {
+ } # type: Dict[ast.AST, Tuple[Node, Set[Node]]]
+ # Whether the guard section can be reached from the statement that precedes
+ # it.
+ self.finally_section_has_direct_flow = {}
+ # Finally sections that await their first node.
+ self.pending_finally_sections = set()
+
+ # Exit jumps keyed by the section they affect.
+ self.exits = {}
+
+ # The entry of loop sections, keyed by the section.
+ self.section_entry = {}
+ # Continue jumps keyed by the section they affect.
+ self.continues = {}
+
+ # The entry of conditional sections, keyed by the section.
+ self.cond_entry = {}
+ # Lists of leaf nodes corresponding to each branch in the section.
+ self.cond_leaves = {}
+
+ def _connect_nodes(self, first, second):
+ """Connects nodes to signify that control flows from first to second.
+
+ Args:
+ first: Union[Set[Node, ...], Node]
+ second: Node
+ """
+ if isinstance(first, Node):
+ first.next.add(second)
+ second.prev.add(first)
+ self.forward_edges.add((first, second))
+ else:
+ for node in first:
+ self._connect_nodes(node, second)
+
+ def _add_new_node(self, ast_node):
+ """Grows the graph by adding a CFG node following the current leaves."""
+ if ast_node is self.node_index:
+ raise ValueError('%s added twice' % ast_node)
+ node = Node(next_=set(), prev=set(), ast_node=ast_node)
+ self.node_index[ast_node] = node
+ self.owners[node] = frozenset(self.active_stmts)
+
+ if self.head is None:
+ self.head = node
+
+ for leaf in self.leaves:
+ self._connect_nodes(leaf, node)
+
+ # If any finally section awaits its first node, populate it.
+ for section_id in self.pending_finally_sections:
+ self.finally_section_subgraphs[section_id][0] = node
+ self.pending_finally_sections = set()
+
+ return node
+
+ def begin_statement(self, stmt):
+ """Marks the beginning of a statement.
+
+ Args:
+ stmt: Hashable, a key by which the statement can be identified in
+ the CFG's stmt_prev and stmt_next attributes
+ """
+ self.active_stmts.add(stmt)
+
+ def end_statement(self, stmt):
+ """Marks the end of a statement.
+
+ Args:
+ stmt: Hashable, a key by which the statement can be identified in
+ the CFG's stmt_prev and stmt_next attributes; must match a key
+ previously passed to begin_statement.
+ """
+ self.active_stmts.remove(stmt)
+
+ def add_ordinary_node(self, ast_node):
+ """Grows the graph by adding an ordinary CFG node.
+
+ Ordinary nodes are followed by the next node, in lexical order, that is,
+ they become the new leaf set.
+
+ Args:
+ ast_node: ast.AST
+ Returns:
+ Node
+ """
+ node = self._add_new_node(ast_node)
+ self.leaves = set((node,))
+ return node
+
+ def _add_jump_node(self, ast_node, guards):
+ """Grows the graph by adding a jump node.
+
+ Jump nodes are added to the current leaf set, and the leaf set becomes
+ empty. If the jump node is the last in a cond section, then it may be added
+ back to the leaf set by a separate mechanism.
+
+ Args:
+ ast_node: ast.AST
+ guards: Tuple[ast.AST, ...], the finally sections active for this node
+ Returns:
+ Node
+ """
+ node = self._add_new_node(ast_node)
+ self.leaves = set()
+ # The guards themselves may not yet be complete, and will be wired later.
+ self.finally_sections[node] = guards
+ return node
+
+ def _connect_jump_to_finally_sections(self, node):
+ """Connects a jump node to the finally sections protecting it."""
+ cursor = set((node,))
+ for guard_section_id in self.finally_sections[node]:
+ guard_begin, guard_ends = self.finally_section_subgraphs[guard_section_id]
+ self._connect_nodes(cursor, guard_begin)
+ cursor = guard_ends
+ del self.finally_sections[node]
+ # TODO(mdan): Should garbage-collect finally_section_subgraphs.
+ return cursor
+
+ def add_exit_node(self, ast_node, section_id, guards):
+ """Grows the graph by adding an exit node.
+
+ This node becomes an exit for the current section.
+
+ Args:
+ ast_node: ast.AST
+ section_id: Hashable, the node for which ast_node should be considered
+ to be an exit node
+ guards: Tuple[ast.AST, ...], the finally sections that guard ast_node
+ """
+ node = self._add_jump_node(ast_node, guards)
+ self.exits[section_id].add(node)
+
+ def add_continue_node(self, ast_node, section_id, guards):
+ """Grows the graph by adding a reentry node.
+
+ This node causes control flow to go back to the loop section's entry.
+
+ Args:
+ ast_node: ast.AST
+ section_id: Hashable, the node for which ast_node should be considered
+ to be an exit node
+ guards: Tuple[ast.AST, ...], the finally sections that guard ast_node
+ """
+ node = self._add_jump_node(ast_node, guards)
+ self.continues[section_id].add(node)
+
+ def add_error_node(self, ast_node, guards):
+ """Grows the graph by adding an error node.
+
+ This node becomes an exit for the entire graph.
+
+ Args:
+ ast_node: ast.AST
+ guards: Tuple[ast.AST, ...], the finally sections that guard ast_node
+ """
+ node = self._add_jump_node(ast_node, guards)
+ self.errors.add(node)
+ self.leaves = set()
+
+ def enter_section(self, section_id):
+ """Enters a regular section.
+
+ Regular sections admit exit jumps, which end the section.
+
+ Args:
+ section_id: Hashable, the same node that will be used in calls to the
+ ast_node arg passed to add_exit_node
+ """
+ assert section_id not in self.exits
+ self.exits[section_id] = set()
+
+ def exit_section(self, section_id):
+ """Exits a regular section."""
+
+ # Exits are jump nodes, which may be protected.
+ for exit_ in self.exits[section_id]:
+ self.leaves |= self._connect_jump_to_finally_sections(exit_)
+
+ del self.exits[section_id]
+
+ def enter_loop_section(self, section_id, entry_node):
+ """Enters a loop section.
+
+ Loop sections define an entry node. The end of the section always flows back
+ to the entry node. These admit continue jump nodes which also flow to the
+ entry node.
+
+ Args:
+ section_id: Hashable, the same node that will be used in calls to the
+ ast_node arg passed to add_continue_node
+ entry_node: ast.AST, the entry node into the loop (e.g. the test node
+ for while loops)
+ """
+ assert section_id not in self.section_entry
+ assert section_id not in self.continues
+ self.continues[section_id] = set()
+ node = self.add_ordinary_node(entry_node)
+ self.section_entry[section_id] = node
+
+ def exit_loop_section(self, section_id):
+ """Exits a loop section."""
+ self._connect_nodes(self.leaves, self.section_entry[section_id])
+
+ # continues are jump nodes, which may be protected.
+ for reentry in self.continues[section_id]:
+ guard_ends = self._connect_jump_to_finally_sections(reentry)
+ self._connect_nodes(guard_ends, self.section_entry[section_id])
+
+ # Loop nodes always loop back.
+ self.leaves = set((self.section_entry[section_id],))
+
+ del self.continues[section_id]
+ del self.section_entry[section_id]
+
+ def enter_cond_section(self, section_id):
+ """Enters a conditional section.
+
+ Conditional sections define an entry node, and one or more branches.
+
+ Args:
+ section_id: Hashable, the same node that will be used in calls to the
+ section_id arg passed to new_cond_branch
+ """
+
+ assert section_id not in self.cond_entry
+ assert section_id not in self.cond_leaves
+ self.cond_leaves[section_id] = []
+
+ def new_cond_branch(self, section_id):
+ """Begins a new branch in a cond section."""
+ assert section_id in self.cond_leaves
+
+ if section_id in self.cond_entry:
+ # Subsequent splits move back to the split point, and memorize the
+ # current leaves.
+ self.cond_leaves[section_id].append(self.leaves)
+ self.leaves = self.cond_entry[section_id]
+ else:
+ # If this is the first time we split a section, just remember the split
+ # point.
+ self.cond_entry[section_id] = self.leaves
+
+ def exit_cond_section(self, section_id):
+ """Exits a conditional section."""
+ for split in self.cond_leaves[section_id]:
+ self.leaves |= split
+ del self.cond_entry[section_id]
+ del self.cond_leaves[section_id]
+
+ def enter_finally_section(self, section_id):
+ """Enters a finally section."""
+ # TODO(mdan): This, not the caller, should track the active sections.
+ self.finally_section_subgraphs[section_id] = [None, None]
+ if self.leaves:
+ self.finally_section_has_direct_flow[section_id] = True
+ else:
+ self.finally_section_has_direct_flow[section_id] = False
+ self.pending_finally_sections.add(section_id)
+
+ def exit_finally_section(self, section_id):
+ """Exits a finally section."""
+ assert section_id not in self.pending_finally_sections, 'Empty finally?'
+ self.finally_section_subgraphs[section_id][1] = self.leaves
+ # If the guard can only be reached by a jump, then it will not flow
+ # into the statement that follows it.
+ if not self.finally_section_has_direct_flow[section_id]:
+ self.leaves = set()
+ del self.finally_section_has_direct_flow[section_id]
+
+ def build(self):
+ """Returns the CFG accumulated so far and resets the builder.
+
+ Returns:
+ Graph
+ """
+ # Freeze the nodes.
+ for node in self.node_index.values():
+ node.freeze()
+
+ # Build the statement edges.
+ stmt_next = {}
+ stmt_prev = {}
+ for node, _ in self.forward_edges:
+ for stmt in self.owners[node]:
+ if stmt not in stmt_next:
+ stmt_next[stmt] = set()
+ if stmt not in stmt_prev:
+ stmt_prev[stmt] = set()
+ for first, second in self.forward_edges:
+ stmts_exited = self.owners[first] - self.owners[second]
+ for stmt in stmts_exited:
+ stmt_next[stmt].add(second)
+ stmts_entered = self.owners[second] - self.owners[first]
+ for stmt in stmts_entered:
+ stmt_prev[stmt].add(first)
+ for stmt in stmt_next:
+ stmt_next[stmt] = frozenset(stmt_next[stmt])
+ for stmt in stmt_prev:
+ stmt_prev[stmt] = frozenset(stmt_prev[stmt])
+
+ # Construct the final graph object.
+ result = Graph(
+ entry=self.head,
+ exit=self.leaves,
+ error=self.errors,
+ index=self.node_index,
+ stmt_prev=stmt_prev,
+ stmt_next=stmt_next)
+
+ # Reset the state.
+ self.reset()
+
+ return result
+
+
+class AstToCfg(gast.NodeVisitor):
+ """Converts an AST to CFGs.
+
+ A separate CFG will be constructed for each function.
+ """
+
+ def __init__(self):
+ super(AstToCfg, self).__init__()
+
+ self.builder_stack = []
+ self.builder = None
+ self.cfgs = {}
+
+ self.lexical_scopes = []
+
+ def _enter_lexical_scope(self, node):
+ self.lexical_scopes.append(node)
+
+ def _exit_lexical_scope(self, node):
+ leaving_node = self.lexical_scopes.pop()
+ assert node == leaving_node
+
+ def _get_enclosing_scopes(self, include, stop_at):
+ included = []
+ for node in reversed(self.lexical_scopes):
+ if isinstance(node, include):
+ included.append(node)
+ if isinstance(node, stop_at):
+ return node, included
+ return None, included
+
+ def _process_basic_statement(self, node):
+ self.generic_visit(node)
+ self.builder.add_ordinary_node(node)
+
+ def _process_exit_statement(self, node, *exits_nodes_of_type):
+ # Note: this is safe because we process functions separately.
+ try_node, guards = self._get_enclosing_scopes(
+ include=(gast.Try,),
+ stop_at=tuple(exits_nodes_of_type),
+ )
+ if try_node is None:
+ raise ValueError(
+ '%s that is not enclosed by any of %s' % (node, exits_nodes_of_type))
+ self.builder.add_exit_node(node, try_node, guards)
+
+ def _process_continue_statement(self, node, *loops_to_nodes_of_type):
+ # Note: this is safe because we process functions separately.
+ try_node, guards = self._get_enclosing_scopes(
+ include=(gast.Try,),
+ stop_at=tuple(loops_to_nodes_of_type),
+ )
+ if try_node is None:
+ raise ValueError('%s that is not enclosed by any of %s' %
+ (node, loops_to_nodes_of_type))
+ self.builder.add_continue_node(node, try_node, guards)
+
+ def visit_FunctionDef(self, node):
+ # We also keep the FunctionDef node in the CFG. This allows us to determine
+ # things like reaching definitions via closure. Note that the function body
+ # will be stored in a separate graph, because function definitions are not
+ # the same as function calls.
+ if self.builder is not None:
+ self.builder.add_ordinary_node(node)
+
+ self.builder_stack.append(self.builder)
+ self.builder = GraphBuilder(node)
+
+ self._enter_lexical_scope(node)
+ self.builder.enter_section(node)
+
+ self._process_basic_statement(node.args)
+ for stmt in node.body:
+ self.visit(stmt)
+
+ self.builder.exit_section(node)
+ self._exit_lexical_scope(node)
+
+ self.cfgs[node] = self.builder.build()
+ self.builder = self.builder_stack.pop()
+
+ def visit_Lambda(self, node):
+ # TODO(mdan): Treat like FunctionDef? That would be a separate CFG.
+ raise NotImplementedError()
+
+ def visit_Return(self, node):
+ self._process_exit_statement(node, gast.FunctionDef)
+
+ def visit_Expr(self, node):
+ self._process_basic_statement(node)
+
+ def visit_Assign(self, node):
+ self._process_basic_statement(node)
+
+ def visit_AnnAssign(self, node):
+ self._process_basic_statement(node)
+
+ def visit_AugAssign(self, node):
+ self._process_basic_statement(node)
+
+ def visit_Print(self, node):
+ self._process_basic_statement(node)
+
+ def visit_Raise(self, node):
+ try_node, guards = self._get_enclosing_scopes(
+ include=(gast.Try,),
+ stop_at=(gast.FunctionDef,),
+ )
+ if try_node is None:
+ raise ValueError('%s that is not enclosed by any FunctionDef' % node)
+ self.builder.add_error_node(node, guards)
+
+ def visit_Assert(self, node):
+ # Ignoring the effect of exceptions.
+ self._process_basic_statement(node)
+
+ def visit_Delete(self, node):
+ self._process_basic_statement(node)
+
+ def visit_If(self, node):
+ # No need to track ifs as lexical scopes, for now.
+ # Lexical scopes are generally tracked in order to be able to resolve the
+ # targets of jump statements like break/continue/etc. Since there is no
+ # statement that can interrupt a conditional, we don't need to track their
+ # lexical scope. That may change in the future.
+ self.builder.begin_statement(node)
+
+ self.builder.enter_cond_section(node)
+ self._process_basic_statement(node.test)
+
+ self.builder.new_cond_branch(node)
+ for stmt in node.body:
+ self.visit(stmt)
+
+ self.builder.new_cond_branch(node)
+ for stmt in node.orelse:
+ self.visit(stmt)
+
+ self.builder.exit_cond_section(node)
+ self.builder.end_statement(node)
+
+ def visit_While(self, node):
+ self.builder.begin_statement(node)
+ self._enter_lexical_scope(node)
+
+ self.builder.enter_section(node)
+
+ self.builder.enter_loop_section(node, node.test)
+ for stmt in node.body:
+ self.visit(stmt)
+ self.builder.exit_loop_section(node)
+
+ # Note: although the orelse is technically part of the loop node,
+ # the statements inside it don't affect the loop itself. For example, a
+ # break in the loop's orelse will not affect the loop itself.
+ self._exit_lexical_scope(node)
+
+ for stmt in node.orelse:
+ self.visit(stmt)
+
+ self.builder.exit_section(node)
+ self.builder.end_statement(node)
+
+ def visit_For(self, node):
+ self.builder.begin_statement(node)
+ self._enter_lexical_scope(node)
+
+ self.builder.enter_section(node)
+
+ # TODO(mdan): Strictly speaking, this should be node.target + node.iter.
+ # A blind dataflow analysis would have to process both node.target and
+ # node.iter to properly process read and write access.
+ self.builder.enter_loop_section(node, node.iter)
+ for stmt in node.body:
+ self.visit(stmt)
+ self.builder.exit_loop_section(node)
+
+ # Note: although the orelse is technically part of the loop node,
+ # they don't count as loop bodies. For example, a break in the loop's
+ # orelse will affect the parent loop, not the current one.
+ self._exit_lexical_scope(node)
+
+ for stmt in node.orelse:
+ self.visit(stmt)
+
+ self.builder.exit_section(node)
+ self.builder.end_statement(node)
+
+ def visit_Break(self, node):
+ self._process_exit_statement(node, gast.While, gast.For)
+
+ def visit_Continue(self, node):
+ self._process_continue_statement(node, gast.While, gast.For)
+
+ def visit_Try(self, node):
+ self._enter_lexical_scope(node)
+
+ for stmt in node.body:
+ self.visit(stmt)
+ # Unlike loops, the orelse is a simple continuation of the body.
+ for stmt in node.orelse:
+ self.visit(stmt)
+
+ if node.handlers:
+ # TODO(mdan): Should we still support bare try/except? Might be confusing.
+ raise NotImplementedError('exceptions are not yet supported')
+
+ self._exit_lexical_scope(node)
+
+ self.builder.enter_finally_section(node)
+ for stmt in node.finalbody:
+ self.visit(stmt)
+ self.builder.exit_finally_section(node)
+
+ def visit_With(self, node):
+ # TODO(mdan): Mark the context manager's exit call as exit guard.
+ for item in node.items:
+ self._process_basic_statement(item)
+ for stmt in node.body:
+ self.visit(stmt)
+
+
+def build(node):
+ visitor = AstToCfg()
+ visitor.visit(node)
+ return visitor.cfgs
diff --git a/tensorflow/contrib/autograph/pyct/cfg_test.py b/tensorflow/contrib/autograph/pyct/cfg_test.py
new file mode 100644
index 0000000000..9d0a85d615
--- /dev/null
+++ b/tensorflow/contrib/autograph/pyct/cfg_test.py
@@ -0,0 +1,969 @@
+# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""Tests for cfg module."""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+from tensorflow.contrib.autograph.pyct import cfg
+from tensorflow.contrib.autograph.pyct import parser
+from tensorflow.python.platform import test
+
+
+class CountingVisitor(cfg.GraphVisitor):
+
+ def __init__(self, graph):
+ super(CountingVisitor, self).__init__(graph)
+ self.counts = {}
+
+ def init_state(self, _):
+ return None
+
+ def visit_node(self, node):
+ self.counts[node.ast_node] = self.counts.get(node.ast_node, 0) + 1
+ return False # visit only once
+
+
+class GraphVisitorTest(test.TestCase):
+
+ def _build_cfg(self, fn):
+ node, _ = parser.parse_entity(fn)
+ cfgs = cfg.build(node)
+ return cfgs, node
+
+ def test_basic_coverage_forward(self):
+
+ def test_fn(a):
+ while a > 0:
+ a = 1
+ break
+ return a # pylint:disable=unreachable
+ a = 2
+
+ graphs, node = self._build_cfg(test_fn)
+ graph, = graphs.values()
+ visitor = CountingVisitor(graph)
+ visitor.visit_forward()
+ fn_node = node.body[0]
+
+ self.assertEqual(visitor.counts[fn_node.args], 1)
+ self.assertEqual(visitor.counts[fn_node.body[0].test], 1)
+ self.assertEqual(visitor.counts[fn_node.body[0].body[0]], 1)
+ self.assertEqual(visitor.counts[fn_node.body[0].body[1]], 1)
+ # The return node should be unreachable in forward direction.
+ self.assertTrue(fn_node.body[0].body[2] not in visitor.counts)
+ self.assertEqual(visitor.counts[fn_node.body[1]], 1)
+
+ def test_basic_coverage_reverse(self):
+
+ def test_fn(a):
+ while a > 0:
+ a = 1
+ break
+ return a # pylint:disable=unreachable
+ a = 2
+
+ graphs, node = self._build_cfg(test_fn)
+ graph, = graphs.values()
+ visitor = CountingVisitor(graph)
+ visitor.visit_reverse()
+ fn_node = node.body[0]
+
+ self.assertEqual(visitor.counts[fn_node.args], 1)
+ self.assertEqual(visitor.counts[fn_node.body[0].test], 1)
+ self.assertEqual(visitor.counts[fn_node.body[0].body[0]], 1)
+ self.assertEqual(visitor.counts[fn_node.body[0].body[1]], 1)
+ self.assertTrue(visitor.counts[fn_node.body[0].body[2]], 1)
+ self.assertEqual(visitor.counts[fn_node.body[1]], 1)
+
+
+class AstToCfgTest(test.TestCase):
+
+ def _build_cfg(self, fn):
+ node, _ = parser.parse_entity(fn)
+ cfgs = cfg.build(node)
+ return cfgs
+
+ def _repr_set(self, node_set):
+ return frozenset(repr(n) for n in node_set)
+
+ def _as_set(self, elements):
+ if elements is None:
+ return frozenset()
+ elif isinstance(elements, str):
+ return frozenset((elements,))
+ else:
+ return frozenset(elements)
+
+ def assertGraphMatches(self, graph, edges):
+ """Tests whether the CFG contains the specified edges."""
+ for prev, node_repr, next_ in edges:
+ matched = False
+ for cfg_node in graph.index.values():
+ if repr(cfg_node) == node_repr:
+ if (self._as_set(prev) == frozenset(map(repr, cfg_node.prev)) and
+ self._as_set(next_) == frozenset(map(repr, cfg_node.next))):
+ matched = True
+ break
+ if not matched:
+ self.fail(
+ 'match failed for node "%s" in graph:\n%s' % (node_repr, graph))
+
+ def assertStatementEdges(self, graph, edges):
+ """Tests whether the CFG contains the specified statement edges."""
+ for prev_node_reprs, node_repr, next_node_reprs in edges:
+ matched = False
+ partial_matches = []
+ self.assertSetEqual(
+ frozenset(graph.stmt_next.keys()), frozenset(graph.stmt_prev.keys()))
+ for stmt_ast_node in graph.stmt_next:
+ ast_repr = '%s:%s' % (stmt_ast_node.__class__.__name__,
+ stmt_ast_node.lineno)
+ if ast_repr == node_repr:
+ actual_next = frozenset(map(repr, graph.stmt_next[stmt_ast_node]))
+ actual_prev = frozenset(map(repr, graph.stmt_prev[stmt_ast_node]))
+ partial_matches.append((actual_prev, node_repr, actual_next))
+ if (self._as_set(prev_node_reprs) == actual_prev and
+ self._as_set(next_node_reprs) == actual_next):
+ matched = True
+ break
+ if not matched:
+ self.fail('edges mismatch for %s: %s' % (node_repr, partial_matches))
+
+ def test_straightline(self):
+
+ def test_fn(a):
+ a += 1
+ a = 2
+ a = 3
+ return
+
+ graph, = self._build_cfg(test_fn).values()
+
+ self.assertGraphMatches(
+ graph,
+ (
+ (None, 'a', 'a += 1'),
+ ('a += 1', 'a = 2', 'a = 3'),
+ ('a = 2', 'a = 3', 'return'),
+ ('a = 3', 'return', None),
+ ),
+ )
+
+ def test_straightline_no_return(self):
+
+ def test_fn(a, b):
+ a = b + 1
+ a += max(a)
+
+ graph, = self._build_cfg(test_fn).values()
+
+ self.assertGraphMatches(
+ graph,
+ (
+ (None, 'a, b', 'a = b + 1'),
+ ('a = b + 1', 'a += max(a)', None),
+ ),
+ )
+
+ def test_unreachable_code(self):
+
+ def test_fn(a):
+ return
+ a += 1 # pylint:disable=unreachable
+
+ graph, = self._build_cfg(test_fn).values()
+
+ self.assertGraphMatches(
+ graph,
+ (
+ (None, 'a', 'return'),
+ ('a', 'return', None),
+ (None, 'a += 1', None),
+ ),
+ )
+
+ def test_if_straightline(self):
+
+ def test_fn(a):
+ if a > 0:
+ a = 1
+ else:
+ a += -1
+
+ graph, = self._build_cfg(test_fn).values()
+
+ self.assertGraphMatches(
+ graph,
+ (
+ (None, 'a', '(a > 0)'),
+ ('(a > 0)', 'a = 1', None),
+ ('(a > 0)', 'a += -1', None),
+ ),
+ )
+ self.assertStatementEdges(
+ graph,
+ (('a', 'If:2', None),),
+ )
+
+ def test_branch_nested(self):
+
+ def test_fn(a):
+ if a > 0:
+ if a > 1:
+ a = 1
+ else:
+ a = 2
+ else:
+ if a > 2:
+ a = 3
+ else:
+ a = 4
+
+ graph, = self._build_cfg(test_fn).values()
+
+ self.assertGraphMatches(
+ graph,
+ (
+ (None, 'a', '(a > 0)'),
+ ('a', '(a > 0)', ('(a > 1)', '(a > 2)')),
+ ('(a > 0)', '(a > 1)', ('a = 1', 'a = 2')),
+ ('(a > 1)', 'a = 1', None),
+ ('(a > 1)', 'a = 2', None),
+ ('(a > 0)', '(a > 2)', ('a = 3', 'a = 4')),
+ ('(a > 2)', 'a = 3', None),
+ ('(a > 2)', 'a = 4', None),
+ ),
+ )
+ self.assertStatementEdges(
+ graph,
+ (
+ ('a', 'If:2', None),
+ ('(a > 0)', 'If:3', None),
+ ('(a > 0)', 'If:8', None),
+ ),
+ )
+
+ def test_branch_straightline_semi(self):
+
+ def test_fn(a):
+ if a > 0:
+ a = 1
+
+ graph, = self._build_cfg(test_fn).values()
+
+ self.assertGraphMatches(
+ graph,
+ (
+ (None, 'a', '(a > 0)'),
+ ('a', '(a > 0)', 'a = 1'),
+ ('(a > 0)', 'a = 1', None),
+ ),
+ )
+ self.assertStatementEdges(
+ graph,
+ (('a', 'If:2', None),),
+ )
+
+ def test_branch_return(self):
+
+ def test_fn(a):
+ if a > 0:
+ return
+ else:
+ a = 1
+ a = 2
+
+ graph, = self._build_cfg(test_fn).values()
+
+ self.assertGraphMatches(
+ graph,
+ (
+ ('a', '(a > 0)', ('return', 'a = 1')),
+ ('(a > 0)', 'a = 1', 'a = 2'),
+ ('(a > 0)', 'return', None),
+ ('a = 1', 'a = 2', None),
+ ),
+ )
+ self.assertStatementEdges(
+ graph,
+ (('a', 'If:2', 'a = 2'),),
+ )
+
+ def test_branch_return_minimal(self):
+
+ def test_fn(a):
+ if a > 0:
+ return
+
+ graph, = self._build_cfg(test_fn).values()
+
+ self.assertGraphMatches(
+ graph,
+ (
+ ('a', '(a > 0)', 'return'),
+ ('(a > 0)', 'return', None),
+ ),
+ )
+ self.assertStatementEdges(
+ graph,
+ (('a', 'If:2', None),),
+ )
+
+ def test_while_straightline(self):
+
+ def test_fn(a):
+ while a > 0:
+ a = 1
+ a = 2
+
+ graph, = self._build_cfg(test_fn).values()
+
+ self.assertGraphMatches(
+ graph,
+ (
+ (('a', 'a = 1'), '(a > 0)', ('a = 1', 'a = 2')),
+ ('(a > 0)', 'a = 1', '(a > 0)'),
+ ('(a > 0)', 'a = 2', None),
+ ),
+ )
+ self.assertStatementEdges(
+ graph,
+ (('a', 'While:2', 'a = 2'),),
+ )
+
+ def test_while_else_straightline(self):
+
+ def test_fn(a):
+ while a > 0:
+ a = 1
+ else: # pylint:disable=useless-else-on-loop
+ a = 2
+ a = 3
+
+ graph, = self._build_cfg(test_fn).values()
+
+ self.assertGraphMatches(
+ graph,
+ (
+ (('a', 'a = 1'), '(a > 0)', ('a = 1', 'a = 2')),
+ ('(a > 0)', 'a = 1', '(a > 0)'),
+ ('(a > 0)', 'a = 2', 'a = 3'),
+ ('a = 2', 'a = 3', None),
+ ),
+ )
+ self.assertStatementEdges(
+ graph,
+ (('a', 'While:2', 'a = 3'),),
+ )
+
+ def test_while_else_continue(self):
+
+ def test_fn(a):
+ while a > 0:
+ if a > 1:
+ continue
+ else:
+ a = 0
+ a = 1
+ else: # pylint:disable=useless-else-on-loop
+ a = 2
+ a = 3
+
+ graph, = self._build_cfg(test_fn).values()
+
+ self.assertGraphMatches(
+ graph,
+ (
+ (('a', 'continue', 'a = 1'), '(a > 0)', ('(a > 1)', 'a = 2')),
+ ('(a > 0)', '(a > 1)', ('continue', 'a = 0')),
+ ('(a > 1)', 'continue', '(a > 0)'),
+ ('a = 0', 'a = 1', '(a > 0)'),
+ ('(a > 0)', 'a = 2', 'a = 3'),
+ ('a = 2', 'a = 3', None),
+ ),
+ )
+ self.assertStatementEdges(
+ graph,
+ (
+ ('a', 'While:2', 'a = 3'),
+ ('(a > 0)', 'If:3', ('a = 1', '(a > 0)')),
+ ),
+ )
+
+ def test_while_else_break(self):
+
+ def test_fn(a):
+ while a > 0:
+ if a > 1:
+ break
+ a = 1
+ else:
+ a = 2
+ a = 3
+
+ graph, = self._build_cfg(test_fn).values()
+
+ self.assertGraphMatches(
+ graph,
+ (
+ (('a', 'a = 1'), '(a > 0)', ('(a > 1)', 'a = 2')),
+ ('(a > 0)', '(a > 1)', ('break', 'a = 1')),
+ ('(a > 1)', 'break', 'a = 3'),
+ ('(a > 1)', 'a = 1', '(a > 0)'),
+ ('(a > 0)', 'a = 2', 'a = 3'),
+ (('break', 'a = 2'), 'a = 3', None),
+ ),
+ )
+ self.assertStatementEdges(
+ graph,
+ (
+ ('a', 'While:2', 'a = 3'),
+ ('(a > 0)', 'If:3', ('a = 1', 'a = 3')),
+ ),
+ )
+
+ def test_while_else_return(self):
+
+ def test_fn(a):
+ while a > 0:
+ if a > 1:
+ return
+ a = 1
+ else: # pylint:disable=useless-else-on-loop
+ a = 2
+ a = 3
+
+ graph, = self._build_cfg(test_fn).values()
+
+ self.assertGraphMatches(
+ graph,
+ (
+ (('a', 'a = 1'), '(a > 0)', ('(a > 1)', 'a = 2')),
+ ('(a > 0)', '(a > 1)', ('return', 'a = 1')),
+ ('(a > 1)', 'return', None),
+ ('(a > 1)', 'a = 1', '(a > 0)'),
+ ('(a > 0)', 'a = 2', 'a = 3'),
+ ('a = 2', 'a = 3', None),
+ ),
+ )
+ self.assertStatementEdges(
+ graph,
+ (
+ ('a', 'While:2', 'a = 3'),
+ ('(a > 0)', 'If:3', 'a = 1'),
+ ),
+ )
+
+ def test_while_nested_straightline(self):
+
+ def test_fn(a):
+ while a > 0:
+ while a > 1:
+ a = 1
+ a = 2
+ a = 3
+
+ graph, = self._build_cfg(test_fn).values()
+
+ self.assertGraphMatches(
+ graph,
+ (
+ (('a', 'a = 2'), '(a > 0)', ('(a > 1)', 'a = 3')),
+ (('(a > 0)', 'a = 1'), '(a > 1)', ('a = 1', 'a = 2')),
+ ('(a > 1)', 'a = 1', '(a > 1)'),
+ ('(a > 1)', 'a = 2', '(a > 0)'),
+ ('(a > 0)', 'a = 3', None),
+ ),
+ )
+ self.assertStatementEdges(
+ graph,
+ (
+ ('a', 'While:2', 'a = 3'),
+ ('(a > 0)', 'While:3', 'a = 2'),
+ ),
+ )
+
+ def test_while_nested_continue(self):
+
+ def test_fn(a):
+ while a > 0:
+ while a > 1:
+ if a > 3:
+ continue
+ a = 1
+ a = 2
+ a = 3
+
+ graph, = self._build_cfg(test_fn).values()
+
+ self.assertGraphMatches(
+ graph,
+ (
+ (('a', 'a = 2'), '(a > 0)', ('(a > 1)', 'a = 3')),
+ (('(a > 0)', 'continue', 'a = 1'), '(a > 1)', ('(a > 3)', 'a = 2')),
+ ('(a > 1)', '(a > 3)', ('continue', 'a = 1')),
+ ('(a > 3)', 'continue', '(a > 1)'),
+ ('(a > 3)', 'a = 1', '(a > 1)'),
+ ('(a > 1)', 'a = 2', '(a > 0)'),
+ ('(a > 0)', 'a = 3', None),
+ ),
+ )
+ self.assertStatementEdges(
+ graph,
+ (
+ ('a', 'While:2', 'a = 3'),
+ ('(a > 0)', 'While:3', 'a = 2'),
+ ('(a > 1)', 'If:4', ('a = 1', '(a > 1)')),
+ ),
+ )
+
+ def test_while_nested_break(self):
+
+ def test_fn(a):
+ while a > 0:
+ while a > 1:
+ if a > 2:
+ break
+ a = 1
+ a = 2
+ a = 3
+
+ graph, = self._build_cfg(test_fn).values()
+
+ self.assertGraphMatches(graph, (
+ (('a', 'a = 2'), '(a > 0)', ('(a > 1)', 'a = 3')),
+ (('(a > 0)', 'a = 1'), '(a > 1)', ('(a > 2)', 'a = 2')),
+ ('(a > 1)', '(a > 2)', ('break', 'a = 1')),
+ ('(a > 2)', 'break', 'a = 2'),
+ ('(a > 2)', 'a = 1', '(a > 1)'),
+ (('(a > 1)', 'break'), 'a = 2', '(a > 0)'),
+ ('(a > 0)', 'a = 3', None),
+ ))
+ self.assertStatementEdges(
+ graph,
+ (
+ ('a', 'While:2', 'a = 3'),
+ ('(a > 0)', 'While:3', 'a = 2'),
+ ('(a > 1)', 'If:4', ('a = 1', 'a = 2')),
+ ),
+ )
+
+ def test_for_straightline(self):
+
+ def test_fn(a):
+ for a in range(0, a):
+ a = 1
+ a = 2
+
+ graph, = self._build_cfg(test_fn).values()
+
+ self.assertGraphMatches(
+ graph,
+ (
+ (('a', 'a = 1'), 'range(0, a)', ('a = 1', 'a = 2')),
+ ('range(0, a)', 'a = 1', 'range(0, a)'),
+ ('range(0, a)', 'a = 2', None),
+ ),
+ )
+ self.assertStatementEdges(
+ graph,
+ (('a', 'For:2', 'a = 2'),),
+ )
+
+ def test_for_else_straightline(self):
+
+ def test_fn(a):
+ for a in range(0, a):
+ a = 1
+ else: # pylint:disable=useless-else-on-loop
+ a = 2
+ a = 3
+
+ graph, = self._build_cfg(test_fn).values()
+
+ self.assertGraphMatches(
+ graph,
+ (
+ (('a', 'a = 1'), 'range(0, a)', ('a = 1', 'a = 2')),
+ ('range(0, a)', 'a = 1', 'range(0, a)'),
+ ('range(0, a)', 'a = 2', 'a = 3'),
+ ('a = 2', 'a = 3', None),
+ ),
+ )
+ self.assertStatementEdges(
+ graph,
+ (('a', 'For:2', 'a = 3'),),
+ )
+
+ def test_for_else_continue(self):
+
+ def test_fn(a):
+ for a in range(0, a):
+ if a > 1:
+ continue
+ else:
+ a = 0
+ a = 1
+ else: # pylint:disable=useless-else-on-loop
+ a = 2
+ a = 3
+
+ graph, = self._build_cfg(test_fn).values()
+
+ self.assertGraphMatches(
+ graph,
+ (
+ (('a', 'continue', 'a = 1'), 'range(0, a)', ('(a > 1)', 'a = 2')),
+ ('range(0, a)', '(a > 1)', ('continue', 'a = 0')),
+ ('(a > 1)', 'continue', 'range(0, a)'),
+ ('(a > 1)', 'a = 0', 'a = 1'),
+ ('a = 0', 'a = 1', 'range(0, a)'),
+ ('range(0, a)', 'a = 2', 'a = 3'),
+ ('a = 2', 'a = 3', None),
+ ),
+ )
+ self.assertStatementEdges(
+ graph,
+ (
+ ('a', 'For:2', 'a = 3'),
+ ('range(0, a)', 'If:3', ('a = 1', 'range(0, a)')),
+ ),
+ )
+
+ def test_for_else_break(self):
+
+ def test_fn(a):
+ for a in range(0, a):
+ if a > 1:
+ break
+ a = 1
+ else:
+ a = 2
+ a = 3
+
+ graph, = self._build_cfg(test_fn).values()
+
+ self.assertGraphMatches(
+ graph,
+ (
+ (('a', 'a = 1'), 'range(0, a)', ('(a > 1)', 'a = 2')),
+ ('range(0, a)', '(a > 1)', ('break', 'a = 1')),
+ ('(a > 1)', 'break', 'a = 3'),
+ ('(a > 1)', 'a = 1', 'range(0, a)'),
+ ('range(0, a)', 'a = 2', 'a = 3'),
+ (('break', 'a = 2'), 'a = 3', None),
+ ),
+ )
+ self.assertStatementEdges(
+ graph,
+ (
+ ('a', 'For:2', 'a = 3'),
+ ('range(0, a)', 'If:3', ('a = 1', 'a = 3')),
+ ),
+ )
+
+ def test_for_else_return(self):
+
+ def test_fn(a):
+ for a in range(0, a):
+ if a > 1:
+ return
+ a = 1
+ else: # pylint:disable=useless-else-on-loop
+ a = 2
+ a = 3
+
+ graph, = self._build_cfg(test_fn).values()
+
+ self.assertGraphMatches(
+ graph,
+ (
+ (('a', 'a = 1'), 'range(0, a)', ('(a > 1)', 'a = 2')),
+ ('range(0, a)', '(a > 1)', ('return', 'a = 1')),
+ ('(a > 1)', 'return', None),
+ ('(a > 1)', 'a = 1', 'range(0, a)'),
+ ('range(0, a)', 'a = 2', 'a = 3'),
+ ('a = 2', 'a = 3', None),
+ ),
+ )
+ self.assertStatementEdges(
+ graph,
+ (
+ ('a', 'For:2', 'a = 3'),
+ ('range(0, a)', 'If:3', 'a = 1'),
+ ),
+ )
+
+ def test_for_nested_straightline(self):
+
+ def test_fn(a):
+ for a in range(0, a):
+ for b in range(1, a):
+ b += 1
+ a = 2
+ a = 3
+
+ graph, = self._build_cfg(test_fn).values()
+
+ self.assertGraphMatches(
+ graph,
+ (
+ (('a', 'a = 2'), 'range(0, a)', ('range(1, a)', 'a = 3')),
+ (('range(0, a)', 'b += 1'), 'range(1, a)', ('b += 1', 'a = 2')),
+ ('range(1, a)', 'b += 1', 'range(1, a)'),
+ ('range(1, a)', 'a = 2', 'range(0, a)'),
+ ('range(0, a)', 'a = 3', None),
+ ),
+ )
+ self.assertStatementEdges(
+ graph,
+ (
+ ('a', 'For:2', 'a = 3'),
+ ('range(0, a)', 'For:3', 'a = 2'),
+ ),
+ )
+
+ def test_for_nested_continue(self):
+
+ def test_fn(a):
+ for a in range(0, a):
+ for b in range(1, a):
+ if a > 3:
+ continue
+ b += 1
+ a = 2
+ a = 3
+
+ graph, = self._build_cfg(test_fn).values()
+
+ self.assertGraphMatches(
+ graph,
+ (
+ (('a', 'a = 2'), 'range(0, a)', ('range(1, a)', 'a = 3')),
+ (('range(0, a)', 'continue', 'b += 1'), 'range(1, a)',
+ ('(a > 3)', 'a = 2')),
+ ('range(1, a)', '(a > 3)', ('continue', 'b += 1')),
+ ('(a > 3)', 'continue', 'range(1, a)'),
+ ('(a > 3)', 'b += 1', 'range(1, a)'),
+ ('range(1, a)', 'a = 2', 'range(0, a)'),
+ ('range(0, a)', 'a = 3', None),
+ ),
+ )
+ self.assertStatementEdges(
+ graph,
+ (
+ ('a', 'For:2', 'a = 3'),
+ ('range(0, a)', 'For:3', 'a = 2'),
+ ('range(1, a)', 'If:4', ('b += 1', 'range(1, a)')),
+ ),
+ )
+
+ def test_for_nested_break(self):
+
+ def test_fn(a):
+ for a in range(0, a):
+ for b in range(1, a):
+ if a > 2:
+ break
+ b += 1
+ a = 2
+ a = 3
+
+ graph, = self._build_cfg(test_fn).values()
+
+ self.assertGraphMatches(
+ graph,
+ (
+ (('a', 'a = 2'), 'range(0, a)', ('range(1, a)', 'a = 3')),
+ (('range(0, a)', 'b += 1'), 'range(1, a)', ('(a > 2)', 'a = 2')),
+ ('range(1, a)', '(a > 2)', ('break', 'b += 1')),
+ ('(a > 2)', 'break', 'a = 2'),
+ ('(a > 2)', 'b += 1', 'range(1, a)'),
+ (('range(1, a)', 'break'), 'a = 2', 'range(0, a)'),
+ ('range(0, a)', 'a = 3', None),
+ ),
+ )
+ self.assertStatementEdges(
+ graph,
+ (
+ ('a', 'For:2', 'a = 3'),
+ ('range(0, a)', 'For:3', 'a = 2'),
+ ('range(1, a)', 'If:4', ('b += 1', 'a = 2')),
+ ),
+ )
+
+ def test_complex(self):
+
+ def test_fn(a):
+ b = 0
+ while a > 0:
+ for b in range(0, a):
+ if a > 2:
+ break
+ if a > 3:
+ if a > 4:
+ continue
+ else:
+ max(a)
+ break
+ b += 1
+ else: # for b in range(0, a):
+ return a
+ a = 2
+ for a in range(1, a):
+ return b
+ a = 3
+
+ graph, = self._build_cfg(test_fn).values()
+
+ self.assertGraphMatches(
+ graph,
+ (
+ (('b = 0', 'a = 2'), '(a > 0)', ('range(0, a)', 'range(1, a)')),
+ (
+ ('(a > 0)', 'continue', 'b += 1'),
+ 'range(0, a)',
+ ('(a > 2)', 'return a'),
+ ),
+ ('range(0, a)', '(a > 2)', ('(a > 3)', 'break')),
+ ('(a > 2)', 'break', 'a = 2'),
+ ('(a > 2)', '(a > 3)', ('(a > 4)', 'b += 1')),
+ ('(a > 3)', '(a > 4)', ('continue', 'max(a)')),
+ ('(a > 4)', 'max(a)', 'break'),
+ ('max(a)', 'break', 'a = 2'),
+ ('(a > 4)', 'continue', 'range(0, a)'),
+ ('(a > 3)', 'b += 1', 'range(0, a)'),
+ ('range(0, a)', 'return a', None),
+ ('break', 'a = 2', '(a > 0)'),
+ ('(a > 0)', 'range(1, a)', ('return b', 'a = 3')),
+ ('range(1, a)', 'return b', None),
+ ('range(1, a)', 'a = 3', None),
+ ),
+ )
+ self.assertStatementEdges(
+ graph,
+ (
+ ('b = 0', 'While:3', 'range(1, a)'),
+ ('(a > 0)', 'For:4', 'a = 2'),
+ ('range(0, a)', 'If:5', ('(a > 3)', 'a = 2')),
+ ('(a > 2)', 'If:7', ('b += 1', 'a = 2', 'range(0, a)')),
+ ('(a > 3)', 'If:8', ('a = 2', 'range(0, a)')),
+ ('(a > 0)', 'For:17', 'a = 3'),
+ ),
+ )
+
+ def test_finally_straightline(self):
+
+ def test_fn(a):
+ try:
+ a += 1
+ finally:
+ a = 2
+ a = 3
+
+ graph, = self._build_cfg(test_fn).values()
+
+ self.assertGraphMatches(
+ graph,
+ (
+ ('a', 'a += 1', 'a = 2'),
+ ('a += 1', 'a = 2', 'a = 3'),
+ ('a = 2', 'a = 3', None),
+ ),
+ )
+
+ def test_return_finally(self):
+
+ def test_fn(a):
+ try:
+ return a
+ finally:
+ a = 1
+ a = 2
+
+ graph, = self._build_cfg(test_fn).values()
+
+ self.assertGraphMatches(
+ graph,
+ (
+ ('a', 'return a', 'a = 1'),
+ ('return a', 'a = 1', None),
+ (None, 'a = 2', None),
+ ),
+ )
+
+ def test_break_finally(self):
+
+ def test_fn(a):
+ while a > 0:
+ try:
+ break
+ finally:
+ a = 1
+
+ graph, = self._build_cfg(test_fn).values()
+
+ self.assertGraphMatches(
+ graph,
+ (
+ ('a', '(a > 0)', 'break'),
+ ('(a > 0)', 'break', 'a = 1'),
+ ('break', 'a = 1', None),
+ ),
+ )
+
+ def test_continue_finally(self):
+
+ def test_fn(a):
+ while a > 0:
+ try:
+ continue
+ finally:
+ a = 1
+
+ graph, = self._build_cfg(test_fn).values()
+
+ self.assertGraphMatches(
+ graph,
+ (
+ (('a', 'a = 1'), '(a > 0)', 'continue'),
+ ('(a > 0)', 'continue', 'a = 1'),
+ ('continue', 'a = 1', '(a > 0)'),
+ ),
+ )
+
+ def test_with_straightline(self):
+
+ def test_fn(a):
+ with max(a) as b:
+ a = 0
+ return b
+
+ graph, = self._build_cfg(test_fn).values()
+
+ self.assertGraphMatches(
+ graph,
+ (
+ ('a', 'max(a)', 'a = 0'),
+ ('max(a)', 'a = 0', 'return b'),
+ ('a = 0', 'return b', None),
+ ),
+ )
+
+
+if __name__ == '__main__':
+ test.main()
diff --git a/tensorflow/contrib/autograph/pyct/compiler.py b/tensorflow/contrib/autograph/pyct/compiler.py
index 24c4517afa..c172ab21f6 100644
--- a/tensorflow/contrib/autograph/pyct/compiler.py
+++ b/tensorflow/contrib/autograph/pyct/compiler.py
@@ -30,9 +30,49 @@ import tempfile
import astor
import gast
+from tensorflow.contrib.autograph.pyct import anno
+from tensorflow.contrib.autograph.pyct import ast_util
+from tensorflow.contrib.autograph.pyct import origin_info
+from tensorflow.contrib.autograph.pyct import parser
+
+
+def _build_source_map(node, code):
+ """Return the Python objects represented by given AST.
+
+ Compiling the AST code this way ensures that the source code is readable by
+ e.g. `pdb` or `inspect`.
+
+ Args:
+ node: An AST node of the original generated code, before the source code is
+ generated.
+ code: The string representation of the source code for the newly generated
+ code.
+
+ Returns:
+ Dict[CodeLocation, OriginInfo], a mapping between the user and AutoGraph
+ generated code.
+ """
+ # After we have the final generated code we reparse it to get the final line
+ # numbers. Then we walk through the generated and original ASTs in parallel
+ # to build the mapping between the user and generated code.
+ new_node = parser.parse_str(code)
+ origin_info.resolve(new_node, code)
+ source_mapping = {}
+ for before, after in ast_util.parallel_walk(node, new_node):
+ # Need both checks because if origin information is ever copied over to new
+ # nodes then we need to rely on the fact that only the original user code
+ # has the origin annotation.
+ if (anno.hasanno(before, anno.Basic.ORIGIN) and
+ anno.hasanno(after, anno.Basic.ORIGIN)):
+ source_info = anno.getanno(before, anno.Basic.ORIGIN)
+ new_line_number = anno.getanno(after, anno.Basic.ORIGIN).line_number
+ source_mapping[new_line_number] = source_info
+ return source_mapping
+
def ast_to_source(node, indentation=' '):
"""Return the source code of given AST."""
+ original_node = node
if isinstance(node, gast.AST):
node = gast.gast_to_ast(node)
generator = astor.codegen.SourceGenerator(indentation, False,
@@ -42,11 +82,16 @@ def ast_to_source(node, indentation=' '):
# In some versions of Python, literals may appear as actual values. This
# ensures everything is string.
code = map(str, generator.result)
- return astor.source_repr.pretty_source(code).lstrip()
+ code = astor.source_repr.pretty_source(code).lstrip()
+ source_mapping = _build_source_map(original_node, code)
+ return code, source_mapping
-def ast_to_object(
- node, indentation=' ', source_prefix=None, delete_on_exit=True):
+
+def ast_to_object(node,
+ indentation=' ',
+ source_prefix=None,
+ delete_on_exit=True):
"""Return the Python objects represented by given AST.
Compiling the AST code this way ensures that the source code is readable by
@@ -56,15 +101,30 @@ def ast_to_object(
node: The code to compile, as an AST object.
indentation: The string to use for indentation.
source_prefix: Optional string to print as-is into the source file.
- delete_on_exit: Whether to delete the temporary file used for compilation
- on exit.
+ delete_on_exit: Whether to delete the temporary file used for compilation on
+ exit.
Returns:
A module object containing the compiled source code.
+ Raises:
+ ValueError: If ag_source_map__ is already in the namespace of the compiled
+ node.
"""
- source = ast_to_source(node, indentation)
+ # code_source_mapping does not yet include the offsets from import statements.
+ source, code_source_mapping = ast_to_source(node, indentation=indentation)
with tempfile.NamedTemporaryFile(mode='w', suffix='.py', delete=False) as f:
+ # TODO(znado): move into an _offset_source_map() helper function.
+ # Need to offset the generated line numbers by the number of import lines.
+ if source_prefix:
+ num_import_lines = source_prefix.count('\n') + 1
+ else:
+ num_import_lines = 0
+ source_mapping = {}
+ for line_number, original_position in code_source_mapping.items():
+ source_map_key = origin_info.CodeLocation(
+ file_path=f.name, line_number=line_number + num_import_lines)
+ source_mapping[source_map_key] = original_position
module_name = os.path.basename(f.name[:-3])
if source_prefix:
f.write(source_prefix)
@@ -72,4 +132,27 @@ def ast_to_object(
f.write(source)
if delete_on_exit:
atexit.register(lambda: os.remove(f.name))
- return imp.load_source(module_name, f.name), source
+ compiled_node = imp.load_source(module_name, f.name)
+
+ # TODO(znado): Clean this up so we don't need to attach it to the namespace.
+ # TODO(znado): This does not work for classes because their methods share a
+ # namespace.
+ # This attaches the source map which is needed for error handling. Note that
+ # api.to_graph copies this source map into an attribute of the function.
+ #
+ # We need this so the ag_source_map__ variable is available to the call to
+ # rewrite_graph_construction_error in the except block inside each function
+ # that handles graph construction errors.
+ #
+ # We cannot get the rewritten function name until it is too late so templating
+ # is hard, and this cleanly fixes the
+ # issues encountered with nested functions because this is attached to the
+ # outermost one.
+ source_map_name = 'ag_source_map__'
+ if source_map_name in compiled_node.__dict__:
+ raise ValueError('cannot convert %s because is has namespace attribute '
+ '"%s", which is reserved for AutoGraph.' %
+ (compiled_node, source_map_name))
+ compiled_node.__dict__[source_map_name] = source_mapping
+
+ return compiled_node, source
diff --git a/tensorflow/contrib/autograph/pyct/compiler_test.py b/tensorflow/contrib/autograph/pyct/compiler_test.py
index 98cdc1506b..e29fa9324c 100644
--- a/tensorflow/contrib/autograph/pyct/compiler_test.py
+++ b/tensorflow/contrib/autograph/pyct/compiler_test.py
@@ -59,14 +59,14 @@ class CompilerTest(test.TestCase):
value=gast.Str('c'))
])
+ source, _ = compiler.ast_to_source(node, indentation=' ')
self.assertEqual(
textwrap.dedent("""
if 1:
a = b
else:
a = 'c'
- """).strip(),
- compiler.ast_to_source(node, indentation=' ').strip())
+ """).strip(), source.strip())
def test_ast_to_object(self):
node = gast.FunctionDef(
diff --git a/tensorflow/contrib/autograph/pyct/origin_info.py b/tensorflow/contrib/autograph/pyct/origin_info.py
new file mode 100644
index 0000000000..614e346634
--- /dev/null
+++ b/tensorflow/contrib/autograph/pyct/origin_info.py
@@ -0,0 +1,100 @@
+# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""Container for origin source code information before AutoGraph compilation."""
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import collections
+
+import gast
+
+from tensorflow.contrib.autograph.pyct import anno
+from tensorflow.python.util import tf_inspect
+
+
+class CodeLocation(
+ collections.namedtuple('CodeLocation', ('file_path', 'line_number'))):
+ """Location of a line of code.
+
+ Attributes:
+ file_path: text, the full path to the file containing the code.
+ line_number: Int, the 1-based line number of the code in its file.
+ """
+ pass
+
+
+class OriginInfo(
+ collections.namedtuple('OriginInfo',
+ ('file_path', 'function_name', 'line_number',
+ 'column_offset', 'source_code_line'))):
+ """Container for information about the source code before conversion.
+
+ Instances of this class contain information about the source code that
+ transformed code originated from. Examples include:
+ * line number
+ * file name
+ * original user code
+ """
+
+ def as_frame(self):
+ """Makes a traceback frame tuple.
+
+ Returns:
+ A tuple of (file_path, line_number, function_name, source_code_line).
+ """
+ return (self.file_path, self.line_number, self.function_name,
+ self.source_code_line)
+
+
+# TODO(znado): Consider refactoring this into a Visitor.
+def resolve(node, source, function=None):
+ """Adds an origin information to all nodes inside the body of function.
+
+ Args:
+ node: The AST node for the function whose body nodes will be annotated.
+ source: Text, the source code string for the function whose body nodes will
+ be annotated.
+ function: Callable, the function that will have all nodes inside of it
+ annotation with an OriginInfo annotation with key anno.Basic.ORIGIN. If
+ it is None then only the line numbers and column offset will be set in the
+ annotation, with the rest of the information being None.
+
+ Returns:
+ A tuple of the AST node for function and a String containing its source
+ code.
+ """
+ if function:
+ _, function_lineno = tf_inspect.getsourcelines(function)
+ function_filepath = tf_inspect.getsourcefile(function)
+ else:
+ function_lineno = None
+ function_filepath = None
+ source_lines = source.split('\n')
+ for n in gast.walk(node):
+ if hasattr(n, 'lineno'):
+ # n.lineno is relative to the start of the enclosing function, so need to
+ # offset it by the line of the function.
+ source_code_line = source_lines[n.lineno - 1]
+ if function:
+ source_lineno = n.lineno + function_lineno - 1
+ function_name = function.__name__
+ else:
+ source_lineno = n.lineno
+ function_name = None
+ anno.setanno(
+ n, anno.Basic.ORIGIN,
+ OriginInfo(function_filepath, function_name, source_lineno,
+ n.col_offset, source_code_line))
diff --git a/tensorflow/contrib/autograph/pyct/qual_names.py b/tensorflow/contrib/autograph/pyct/qual_names.py
index da07013cf4..fb81404edc 100644
--- a/tensorflow/contrib/autograph/pyct/qual_names.py
+++ b/tensorflow/contrib/autograph/pyct/qual_names.py
@@ -30,6 +30,7 @@ import collections
import gast
from tensorflow.contrib.autograph.pyct import anno
+from tensorflow.contrib.autograph.pyct import parser
class Symbol(collections.namedtuple('Symbol', ['name'])):
@@ -89,7 +90,8 @@ class QN(object):
if not isinstance(base, (str, StringLiteral, NumberLiteral)):
# TODO(mdan): Require Symbol instead of string.
raise ValueError(
- 'For simple QNs, base must be a string or a Literal object.')
+ 'for simple QNs, base must be a string or a Literal object;'
+ ' got instead "%s"' % type(base))
assert '.' not in base and '[' not in base and ']' not in base
self._parent = None
self.qn = (base,)
@@ -113,6 +115,22 @@ class QN(object):
return self._parent
@property
+ def owner_set(self):
+ """Returns all the symbols (simple or composite) that own this QN.
+
+ In other words, if this symbol was modified, the symbols in the owner set
+ may also be affected.
+
+ Examples:
+ 'a.b[c.d]' has two owners, 'a' and 'a.b'
+ """
+ owners = set()
+ if self.has_attr() or self.has_subscript():
+ owners.add(self.parent)
+ owners.update(self.parent.owner_set)
+ return owners
+
+ @property
def support_set(self):
"""Returns the set of simple symbols that this QN relies on.
@@ -122,7 +140,7 @@ class QN(object):
Examples:
'a.b' has only one support symbol, 'a'
- 'a[i]' has two roots, 'a' and 'i'
+ 'a[i]' has two support symbols, 'a' and 'i'
"""
# TODO(mdan): This might be the set of Name nodes in the AST. Track those?
roots = set()
@@ -231,3 +249,9 @@ class QnResolver(gast.NodeTransformer):
def resolve(node):
return QnResolver().visit(node)
+
+
+def from_str(qn_str):
+ node = parser.parse_expression(qn_str)
+ node = resolve(node)
+ return anno.getanno(node, anno.Basic.QN)
diff --git a/tensorflow/contrib/autograph/pyct/qual_names_test.py b/tensorflow/contrib/autograph/pyct/qual_names_test.py
index 264afd508c..c793c2bb39 100644
--- a/tensorflow/contrib/autograph/pyct/qual_names_test.py
+++ b/tensorflow/contrib/autograph/pyct/qual_names_test.py
@@ -30,6 +30,15 @@ from tensorflow.python.platform import test
class QNTest(test.TestCase):
+ def test_from_str(self):
+ a = QN('a')
+ b = QN('b')
+ a_dot_b = QN(a, attr='b')
+ a_sub_b = QN(a, subscript=b)
+ self.assertEqual(qual_names.from_str('a.b'), a_dot_b)
+ self.assertEqual(qual_names.from_str('a'), a)
+ self.assertEqual(qual_names.from_str('a[b]'), a_sub_b)
+
def test_basic(self):
a = QN('a')
self.assertEqual(a.qn, ('a',))
diff --git a/tensorflow/contrib/autograph/pyct/static_analysis/BUILD b/tensorflow/contrib/autograph/pyct/static_analysis/BUILD
index bcf2dacec2..92eacba3fd 100644
--- a/tensorflow/contrib/autograph/pyct/static_analysis/BUILD
+++ b/tensorflow/contrib/autograph/pyct/static_analysis/BUILD
@@ -19,8 +19,9 @@ py_library(
srcs = [
"activity.py",
"annos.py",
- "cfg.py",
"live_values.py",
+ "liveness.py",
+ "reaching_definitions.py",
"type_info.py",
],
srcs_version = "PY2AND3",
@@ -28,6 +29,7 @@ py_library(
deps = [
"//tensorflow/contrib/autograph/pyct",
"//tensorflow/contrib/autograph/utils",
+ "//tensorflow/python:util",
"@gast_archive//:gast",
],
)
@@ -46,23 +48,32 @@ py_test(
)
py_test(
- name = "cfg_test",
- srcs = ["cfg_test.py"],
+ name = "live_values_test",
+ srcs = ["live_values_test.py"],
srcs_version = "PY2AND3",
tags = ["no_windows"],
deps = [
":static_analysis",
"//tensorflow/contrib/autograph/pyct",
"//tensorflow/python:client_testlib",
- "@gast_archive//:gast",
],
)
py_test(
- name = "live_values_test",
- srcs = ["live_values_test.py"],
+ name = "liveness_test",
+ srcs = ["liveness_test.py"],
+ srcs_version = "PY2AND3",
+ deps = [
+ ":static_analysis",
+ "//tensorflow/contrib/autograph/pyct",
+ "//tensorflow/python:client_testlib",
+ ],
+)
+
+py_test(
+ name = "reaching_definitions_test",
+ srcs = ["reaching_definitions_test.py"],
srcs_version = "PY2AND3",
- tags = ["no_windows"],
deps = [
":static_analysis",
"//tensorflow/contrib/autograph/pyct",
diff --git a/tensorflow/contrib/autograph/pyct/static_analysis/__init__.py b/tensorflow/contrib/autograph/pyct/static_analysis/__init__.py
index c325e19f28..9a82de735d 100644
--- a/tensorflow/contrib/autograph/pyct/static_analysis/__init__.py
+++ b/tensorflow/contrib/autograph/pyct/static_analysis/__init__.py
@@ -18,10 +18,14 @@ This module contains utilities to help annotate AST nodes with as much runtime
information as can be possibly extracted without actually executing the code,
under that assumption that the context in which the code will run is known.
-Note: It's a fair bet that this analysis cannot be reused across contexts
-without re-running it. In most cases, the context usually means referenced
-modules, which should be static enough to allow reuse, but that is not being
-reliably verified.
+Overall, the different analyses have the functions listed below:
+
+ * activity: inventories symbols read, written to, params, etc. at different
+ levels
+ * liveness, reaching_definitions: dataflow analyses based on the program's CFG
+ and using the symbol information gathered by activity analysis
+ * live_values, type_info: type and value inference based on dataflow
+ analysis and context information
"""
from __future__ import absolute_import
diff --git a/tensorflow/contrib/autograph/pyct/static_analysis/activity.py b/tensorflow/contrib/autograph/pyct/static_analysis/activity.py
index 4d7b0cbb7b..a0182da9d1 100644
--- a/tensorflow/contrib/autograph/pyct/static_analysis/activity.py
+++ b/tensorflow/contrib/autograph/pyct/static_analysis/activity.py
@@ -12,7 +12,10 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
-"""Activity analysis."""
+"""Activity analysis.
+
+Requires qualified name annotations (see qual_names.py).
+"""
from __future__ import absolute_import
from __future__ import division
@@ -59,9 +62,10 @@ class Scope(object):
self.parent = parent
self.add_unknown_symbols = add_unknown_symbols
self.modified = set()
+ # TODO(mdan): Completely remove this.
self.created = set()
self.used = set()
- self.params = set()
+ self.params = {}
self.returned = set()
# TODO(mdan): Rename to `locals`
@@ -106,37 +110,23 @@ class Scope(object):
self.modified |= other.modified
self.created |= other.created
self.used |= other.used
- self.params |= other.params
+ self.params.update(other.params)
self.returned |= other.returned
def has(self, name):
- if name in self.modified or name in self.params:
+ if name in self.modified:
return True
elif self.parent is not None:
return self.parent.has(name)
return False
- def is_modified_since_entry(self, name):
- if name in self.modified:
- return True
- elif self.parent is not None and not self.isolated:
- return self.parent.is_modified_since_entry(name)
- return False
-
- def is_param(self, name):
- if name in self.params:
- return True
- elif self.parent is not None and not self.isolated:
- return self.parent.is_param(name)
- return False
-
def mark_read(self, name):
self.used.add(name)
if self.parent is not None and name not in self.created:
self.parent.mark_read(name)
- def mark_param(self, name):
- self.params.add(name)
+ def mark_param(self, name, owner):
+ self.params[name] = owner
def mark_creation(self, name, writes_create_symbol=False):
"""Mark a qualified name as created."""
@@ -226,37 +216,56 @@ class ActivityAnalyzer(transformer.Base):
elif isinstance(node.ctx, gast.Param):
# Param contexts appear in function defs, so they have the meaning of
# defining a variable.
- # TODO(mdan): This may be incorrect with nested functions.
- # For nested functions, we'll have to add the notion of hiding args from
- # the parent scope, not writing to them.
- self.scope.mark_creation(qn)
- self.scope.mark_param(qn)
+ self.scope.mark_write(qn)
+ self.scope.mark_param(qn, self.enclosing_entities[-1])
else:
raise ValueError('Unknown context %s for node %s.' % (type(node.ctx), qn))
anno.setanno(node, NodeAnno.IS_LOCAL, self.scope.has(qn))
- anno.setanno(node, NodeAnno.IS_MODIFIED_SINCE_ENTRY,
- self.scope.is_modified_since_entry(qn))
- anno.setanno(node, NodeAnno.IS_PARAM, self.scope.is_param(qn))
if self._in_return_statement:
self.scope.mark_returned(qn)
+ def _enter_scope(self, isolated):
+ self.scope = Scope(self.scope, isolated=isolated)
+
+ def _exit_scope(self):
+ self.scope = self.scope.parent
+
+ def _process_statement(self, node):
+ self._enter_scope(False)
+ node = self.generic_visit(node)
+ anno.setanno(node, anno.Static.SCOPE, self.scope)
+ self._exit_scope()
+ return node
+
+ def visit_Expr(self, node):
+ return self._process_statement(node)
+
+ def visit_Return(self, node):
+ self._in_return_statement = True
+ node = self._process_statement(node)
+ self._in_return_statement = False
+ return node
+
+ def visit_Assign(self, node):
+ return self._process_statement(node)
+
def visit_AugAssign(self, node):
# Special rules for AugAssign. In Assign, the target is only written,
# but in AugAssig (e.g. a += b), the target is both read and written.
self._in_aug_assign = True
- self.generic_visit(node)
+ node = self._process_statement(node)
self._in_aug_assign = False
return node
def visit_Name(self, node):
- self.generic_visit(node)
+ node = self.generic_visit(node)
self._track_symbol(node)
return node
def visit_Attribute(self, node):
- self.generic_visit(node)
+ node = self.generic_visit(node)
if self._in_constructor and self._node_sets_self_attribute(node):
self._track_symbol(
node, composite_writes_alter_parent=True, writes_create_symbol=True)
@@ -265,44 +274,38 @@ class ActivityAnalyzer(transformer.Base):
return node
def visit_Subscript(self, node):
- self.generic_visit(node)
+ node = self.generic_visit(node)
# Subscript writes (e.g. a[b] = "value") are considered to modify
# both the element itself (a[b]) and its parent (a).
- self._track_symbol(node, composite_writes_alter_parent=True)
+ self._track_symbol(node)
return node
def visit_Print(self, node):
- current_scope = self.scope
- args_scope = Scope(current_scope)
- self.scope = args_scope
- for n in node.values:
- self.visit(n)
- anno.setanno(node, NodeAnno.ARGS_SCOPE, args_scope)
- self.scope = current_scope
+ self._enter_scope(False)
+ node.values = self.visit_block(node.values)
+ anno.setanno(node, anno.Static.SCOPE, self.scope)
+ anno.setanno(node, NodeAnno.ARGS_SCOPE, self.scope)
+ self._exit_scope()
return node
+ def visit_Assert(self, node):
+ return self._process_statement(node)
+
def visit_Call(self, node):
- current_scope = self.scope
- args_scope = Scope(current_scope, isolated=False)
- self.scope = args_scope
- for n in node.args:
- self.visit(n)
+ self._enter_scope(False)
+ node.args = self.visit_block(node.args)
+ node.keywords = self.visit_block(node.keywords)
# TODO(mdan): Account starargs, kwargs
- for n in node.keywords:
- self.visit(n)
- anno.setanno(node, NodeAnno.ARGS_SCOPE, args_scope)
- self.scope = current_scope
- self.visit(node.func)
+ anno.setanno(node, NodeAnno.ARGS_SCOPE, self.scope)
+ self._exit_scope()
+ node.func = self.visit(node.func)
return node
def _process_block_node(self, node, block, scope_name):
- current_scope = self.scope
- block_scope = Scope(current_scope, isolated=False)
- self.scope = block_scope
- for n in block:
- self.visit(n)
- anno.setanno(node, scope_name, block_scope)
- self.scope = current_scope
+ self._enter_scope(False)
+ block = self.visit_block(block)
+ anno.setanno(node, scope_name, self.scope)
+ self._exit_scope()
return node
def _process_parallel_blocks(self, parent, children):
@@ -321,94 +324,75 @@ class ActivityAnalyzer(transformer.Base):
self.scope.merge_from(after_child)
return parent
+ def visit_arguments(self, node):
+ return self._process_statement(node)
+
def visit_FunctionDef(self, node):
- if self.scope:
- qn = qual_names.QN(node.name)
- self.scope.mark_write(qn)
- current_scope = self.scope
- body_scope = Scope(current_scope, isolated=True)
- self.scope = body_scope
- self.generic_visit(node)
- anno.setanno(node, NodeAnno.BODY_SCOPE, body_scope)
- self.scope = current_scope
+ # The FunctionDef node itself has a Scope object that tracks the creation
+ # of its name, along with the usage of any decorator accompany it.
+ self._enter_scope(False)
+ node.decorator_list = self.visit_block(node.decorator_list)
+ self.scope.mark_write(qual_names.QN(node.name))
+ anno.setanno(node, anno.Static.SCOPE, self.scope)
+ self._exit_scope()
+
+ # A separate Scope tracks the actual function definition.
+ self._enter_scope(True)
+ node.args = self.visit(node.args)
+
+ # Track the body separately. This is for compatibility reasons, it may not
+ # be strictly needed.
+ self._enter_scope(False)
+ node.body = self.visit_block(node.body)
+ anno.setanno(node, NodeAnno.BODY_SCOPE, self.scope)
+ self._exit_scope()
+
+ self._exit_scope()
return node
def visit_With(self, node):
- current_scope = self.scope
- with_scope = Scope(current_scope, isolated=False)
- self.scope = with_scope
- self.generic_visit(node)
- anno.setanno(node, NodeAnno.BODY_SCOPE, with_scope)
- self.scope = current_scope
+ self._enter_scope(False)
+ node = self.generic_visit(node)
+ anno.setanno(node, NodeAnno.BODY_SCOPE, self.scope)
+ self._exit_scope()
return node
- def visit_If(self, node):
- current_scope = self.scope
- cond_scope = Scope(current_scope, isolated=False)
- self.scope = cond_scope
- self.visit(node.test)
- anno.setanno(node, NodeAnno.COND_SCOPE, cond_scope)
- self.scope = current_scope
+ def visit_withitem(self, node):
+ return self._process_statement(node)
+ def visit_If(self, node):
+ self._enter_scope(False)
+ node.test = self.visit(node.test)
+ anno.setanno(node, NodeAnno.COND_SCOPE, self.scope)
+ anno.setanno(node.test, anno.Static.SCOPE, self.scope)
+ self._exit_scope()
node = self._process_parallel_blocks(node,
((node.body, NodeAnno.BODY_SCOPE),
(node.orelse, NodeAnno.ORELSE_SCOPE)))
return node
def visit_For(self, node):
- self.visit(node.target)
- self.visit(node.iter)
+ self._enter_scope(False)
+ node.target = self.visit(node.target)
+ node.iter = self.visit(node.iter)
+ anno.setanno(node.iter, anno.Static.SCOPE, self.scope)
+ self._exit_scope()
node = self._process_parallel_blocks(node,
((node.body, NodeAnno.BODY_SCOPE),
(node.orelse, NodeAnno.ORELSE_SCOPE)))
return node
def visit_While(self, node):
- current_scope = self.scope
- cond_scope = Scope(current_scope, isolated=False)
- self.scope = cond_scope
- self.visit(node.test)
- anno.setanno(node, NodeAnno.COND_SCOPE, cond_scope)
- self.scope = current_scope
-
+ self._enter_scope(False)
+ node.test = self.visit(node.test)
+ anno.setanno(node, NodeAnno.COND_SCOPE, self.scope)
+ anno.setanno(node.test, anno.Static.SCOPE, self.scope)
+ self._exit_scope()
node = self._process_parallel_blocks(node,
((node.body, NodeAnno.BODY_SCOPE),
(node.orelse, NodeAnno.ORELSE_SCOPE)))
return node
- def visit_Return(self, node):
- self._in_return_statement = True
- node = self.generic_visit(node)
- self._in_return_statement = False
- return node
-
-
-def get_read(node, context):
- """Return the variable names as QNs (qual_names.py) read by this statement."""
- analyzer = ActivityAnalyzer(context, None, True)
- analyzer.visit(node)
- return analyzer.scope.used
-
-
-def get_updated(node, context):
- """Return the variable names created or mutated by this statement.
-
- This function considers assign statements, augmented assign statements, and
- the targets of for loops, as well as function arguments.
- For example, `x[0] = 2` will return `x`, `x, y = 3, 4` will return `x` and
- `y`, `for i in range(x)` will return `i`, etc.
- Args:
- node: An AST node
- context: An EntityContext instance
-
- Returns:
- A set of variable names (QNs, see qual_names.py) of all the variables
- created or mutated.
- """
- analyzer = ActivityAnalyzer(context, None, True)
- analyzer.visit(node)
- return analyzer.scope.created | analyzer.scope.modified
-
def resolve(node, context, parent_scope=None):
return ActivityAnalyzer(context, parent_scope).visit(node)
diff --git a/tensorflow/contrib/autograph/pyct/static_analysis/activity_test.py b/tensorflow/contrib/autograph/pyct/static_analysis/activity_test.py
index bc22be0a27..e940516190 100644
--- a/tensorflow/contrib/autograph/pyct/static_analysis/activity_test.py
+++ b/tensorflow/contrib/autograph/pyct/static_analysis/activity_test.py
@@ -52,18 +52,18 @@ class ScopeTest(test.TestCase):
other = activity.Scope(None)
other.copy_from(scope)
- self.assertTrue(QN('foo') in other.created)
+ self.assertTrue(QN('foo') in other.modified)
scope.mark_write(QN('bar'))
scope.copy_from(other)
- self.assertFalse(QN('bar') in scope.created)
+ self.assertFalse(QN('bar') in scope.modified)
scope.mark_write(QN('bar'))
scope.merge_from(other)
- self.assertTrue(QN('bar') in scope.created)
- self.assertFalse(QN('bar') in other.created)
+ self.assertTrue(QN('bar') in scope.modified)
+ self.assertFalse(QN('bar') in other.modified)
def test_copy_of(self):
scope = activity.Scope(None)
@@ -157,7 +157,8 @@ class ActivityAnalyzerTest(test.TestCase):
"""Assert the scope contains specific used, modified & created variables."""
self.assertSymbolSetsAre(used, scope.used, 'read')
self.assertSymbolSetsAre(modified, scope.modified, 'modified')
- self.assertSymbolSetsAre(created, scope.created, 'created')
+ # Created is deprecated, we're no longer verifying it.
+ # self.assertSymbolSetsAre(created, scope.created, 'created')
def test_print_statement(self):
@@ -215,12 +216,6 @@ class ActivityAnalyzerTest(test.TestCase):
(),
(),
)
- self.assertScopeIsRmc(
- anno.getanno(call_node, NodeAnno.ARGS_SCOPE).parent,
- ('a', 'a.b', 'a.c', 'a.d', 'foo'),
- ('a.c',),
- ('a',),
- )
def test_call_args_subscripts(self):
@@ -241,12 +236,6 @@ class ActivityAnalyzerTest(test.TestCase):
(),
(),
)
- self.assertScopeIsRmc(
- anno.getanno(call_node, NodeAnno.ARGS_SCOPE).parent,
- ('a', 'a[0]', 'a[b]', 'a[c]', 'b', 'c', 'foo'),
- ('b', 'c'),
- ('a', 'b', 'c'),
- )
def test_while(self):
@@ -362,20 +351,20 @@ class ActivityAnalyzerTest(test.TestCase):
self.assertScopeIsRmc(
anno.getanno(if_node, NodeAnno.BODY_SCOPE),
('a', 'b', 'c', 'a[c]'),
- ('a', 'a[b]', 'd'),
+ ('a[b]', 'd'),
('d',),
)
# TODO(mdan): Should subscript writes (a[0] = 1) be considered to read "a"?
self.assertScopeIsRmc(
anno.getanno(if_node, NodeAnno.ORELSE_SCOPE),
('a', 'e'),
- ('a', 'a[0]', 'd'),
+ ('a[0]', 'd'),
('d',),
)
self.assertScopeIsRmc(
anno.getanno(if_node, NodeAnno.ORELSE_SCOPE).parent,
('a', 'b', 'c', 'd', 'e', 'a[c]'),
- ('a', 'd', 'a[b]', 'a[0]'),
+ ('d', 'a[b]', 'a[0]'),
('a', 'b', 'c', 'd', 'e'),
)
@@ -416,10 +405,6 @@ class ActivityAnalyzerTest(test.TestCase):
fn_def_node = node.body[0].body[0]
self.assertScopeIsRmc(
- anno.getanno(fn_def_node,
- NodeAnno.BODY_SCOPE).parent, ('b', 'i', 'f', 'c', 'a'),
- ('f', 'b', 'c', 'i'), ('f', 'a', 'b', 'c', 'i'))
- self.assertScopeIsRmc(
anno.getanno(fn_def_node, NodeAnno.BODY_SCOPE), ('x', 'y'), ('y',), (
'x',
'y',
@@ -452,7 +437,7 @@ class ActivityAnalyzerTest(test.TestCase):
self.assertScopeIsRmc(
anno.getanno(fn_node, NodeAnno.BODY_SCOPE),
('a', 'a[0]'),
- ('a', 'a[0]'),
+ ('a[0]',),
('a',),
)
@@ -518,47 +503,6 @@ class ActivityAnalyzerTest(test.TestCase):
anno.getanno(fn_node, NodeAnno.BODY_SCOPE), ('b',), (('')),
(('a', 'b')))
- def test_get_read(self):
-
- def test_fn(x, y):
- z = test_fn(x, y)
- return z
-
- node, ctx = self._parse_and_analyze(test_fn)
- node = node.body[0].body[0]
- read_vars = activity.get_read(node, ctx)
- self.assertEqual(read_vars, set(map(qual_names.QN, ('test_fn', 'x', 'y'))))
-
- def test_fn2(x, y, z):
- z += test_fn2(x, y, z)
- return z
-
- node, ctx = self._parse_and_analyze(test_fn2)
- node = node.body[0].body[0]
- read_vars = activity.get_read(node, ctx)
- self.assertEqual(read_vars,
- set(map(qual_names.QN, ('test_fn2', 'x', 'y', 'z'))))
-
- def test_get_updated(self):
-
- def test_fn(x, y):
- z = test_fn(x, y)
- return z
-
- node, ctx = self._parse_and_analyze(test_fn)
- node = node.body[0].body[0]
- updated_vars = activity.get_updated(node, ctx)
- self.assertEqual(updated_vars, set(map(qual_names.QN, ('z'))))
-
- def test_fn2(x, y, z):
- z += test_fn2(x, y, z)
- return z
-
- node, ctx = self._parse_and_analyze(test_fn2)
- node = node.body[0].body[0]
- updated_vars = activity.get_updated(node, ctx)
- self.assertEqual(updated_vars, set(map(qual_names.QN, ('z'))))
-
if __name__ == '__main__':
test.main()
diff --git a/tensorflow/contrib/autograph/pyct/static_analysis/annos.py b/tensorflow/contrib/autograph/pyct/static_analysis/annos.py
index b929b35b79..5eefecf278 100644
--- a/tensorflow/contrib/autograph/pyct/static_analysis/annos.py
+++ b/tensorflow/contrib/autograph/pyct/static_analysis/annos.py
@@ -21,6 +21,9 @@ from __future__ import print_function
from enum import Enum
+# TODO(mdan): Remove.
+
+
class NoValue(Enum):
def __repr__(self):
@@ -50,10 +53,3 @@ class NodeAnno(NoValue):
ORELSE_SCOPE = (
'The scope for the orelse body of a statement (False branch for if '
'statements, orelse body for loops).')
-
- # Type and Value annotations
- # Type annotations are represented by objects of type type_info.Type.
- STATIC_INFO = (
- 'The type or value information that should be asserted about the entity '
- 'referenced by the symbol holding this annotation, irrespective of the '
- 'execution context.')
diff --git a/tensorflow/contrib/autograph/pyct/static_analysis/cfg.py b/tensorflow/contrib/autograph/pyct/static_analysis/cfg.py
deleted file mode 100644
index 39eca6e444..0000000000
--- a/tensorflow/contrib/autograph/pyct/static_analysis/cfg.py
+++ /dev/null
@@ -1,446 +0,0 @@
-# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ==============================================================================
-"""Control flow graph analysis.
-
-Given a Python AST we construct a control flow graph, with edges both to the
-next and previous statements (so it can easily walk the graph both ways). Its
-nodes contain the AST of the statements. It can then perform forward or backward
-analysis on this CFG.
-"""
-from __future__ import absolute_import
-from __future__ import division
-from __future__ import print_function
-
-from collections import namedtuple
-import functools
-import operator
-
-import gast
-
-from tensorflow.contrib.autograph.pyct import anno
-from tensorflow.contrib.autograph.pyct.static_analysis import activity
-
-
-class CfgNode(object):
- """A node in the CFG."""
- __slots__ = ['next', 'value', 'prev']
-
- def __init__(self, value):
- self.next = set()
- self.prev = set()
- self.value = value
-
-
-class Cfg(namedtuple('Cfg', ['entry', 'exit'])):
- """A Control Flow Graph.
-
- Each statement is represented as a node. For control flow statements such
- as conditionals and loops the conditional itself is a node which either
- branches or cycles, respectively.
- Attributes:
- entry: The entry node, which contains the `gast.arguments` node of the
- function definition.
- exit: The exit node. This node is special because it has no value (i.e. no
- corresponding AST node). This is because Python functions can have
- multiple return statements.
- """
- pass
-
-
-class CfgBuilder(gast.NodeVisitor):
- """Construct a control flow graph.
-
- Construct a CFG starting from a FunctionDef node.
- Usage:
- cfg_obj = CfgBuilder().build_cfg(fndef_node)
- """
-
- def __init__(self):
- # The current leaves of the CFG
- self.current_leaves = []
- # TODO(alexbw): generalize to break, return, continue, yield, etc.
- # A stack of lists, tracking continue statements
- self.continue_ = []
- # A stack of lists tracking break nodes
- self.break_ = []
-
- def set_current_leaves(self, cfg_node):
- """Link this cfg_node to the current leaves.
-
- This is the central function for building the CFG. It links the current
- head cfg_nodes to the passed cfg_node. It then resets the head to the
- passed cfg_node.
-
- Args:
- cfg_node: A CfgNode instance.
- """
- for head in self.current_leaves:
- head.next.add(cfg_node)
- # While we're linking the CFG forward, add backlinks
- cfg_node.prev.add(head)
- self.current_leaves = [cfg_node]
-
- def build_cfg(self, node):
- """Build a CFG for a function.
-
- Implementation of building a CFG for dataflow analysis. See, e.g.:
- https://www.seas.harvard.edu/courses/cs252/2011sp/slides/Lec02-Dataflow.pdf
-
- Args:
- node: A function definition the body of which to analyze.
- Returns:
- A CFG object.
- Raises:
- TypeError: If the input is not a function definition.
- """
- if not isinstance(node, gast.FunctionDef):
- raise TypeError('input must be a function definition')
- entry_cfg_node = CfgNode(node.args)
- self.current_leaves = [entry_cfg_node]
- self.visit_statements(node.body)
- exit_cfg_node = CfgNode(None)
- self.set_current_leaves(exit_cfg_node)
- return Cfg(entry_cfg_node, exit_cfg_node)
-
- def visit_statements(self, nodes):
- for node in nodes:
- # Check for control flow
- if isinstance(node, (gast.For, gast.While, gast.If, gast.Try, gast.Break,
- gast.Continue, gast.With)):
- self.visit(node)
- else:
- expr = CfgNode(node)
- self.set_current_leaves(expr)
-
- def generic_visit(self, node):
- raise ValueError('unknown control flow')
-
- def visit_If(self, node):
- # TODO(alexbw): change this to use immutable tuples instead of lists
- # The current head will hold the conditional
- test = CfgNode(node.test)
- self.set_current_leaves(test)
- # Handle the body
- self.visit_statements(node.body)
- body_exit = self.current_leaves
- self.current_leaves = [test]
- # Handle the orelse
- self.visit_statements(node.orelse)
- self.current_leaves.extend(body_exit)
-
- def visit_While(self, node):
- test = CfgNode(node.test)
- self.set_current_leaves(test)
- # Start a new level of nesting
- self.break_.append([])
- self.continue_.append([])
- # Handle the body
- self.visit_statements(node.body)
- body_exit = self.current_leaves
- self.current_leaves.extend(self.continue_.pop())
- self.set_current_leaves(test)
- # Handle the orelse
- self.visit_statements(node.orelse)
- # The break statements and the test go to the next node
- self.current_leaves.extend(self.break_.pop())
- # Body and orelse statements can reach out of the loop
- self.current_leaves.extend(body_exit)
-
- def visit_For(self, node):
- iter_ = CfgNode(node.iter)
- self.set_current_leaves(iter_)
- self.break_.append([])
- self.continue_.append([])
- self.visit_statements(node.body)
- body_exit = self.current_leaves
- self.current_leaves.extend(self.continue_.pop())
- self.set_current_leaves(iter_)
- # Handle the orelse
- self.visit_statements(node.orelse)
- # The break statements and the test go to the next node
- self.current_leaves.extend(self.break_.pop())
- # Body and orelse statements can reach out of the loop
- self.current_leaves.extend(body_exit)
-
- def visit_Break(self, node):
- self.break_[-1].extend(self.current_leaves)
- self.current_leaves[:] = []
-
- def visit_Continue(self, node):
- self.continue_[-1].extend(self.current_leaves)
- self.current_leaves[:] = []
-
- def visit_Try(self, node):
- self.visit_statements(node.body)
- body = self.current_leaves
- handlers = []
- for handler in node.handlers:
- self.current_leaves = body[:]
- self.visit_statements(handler.body)
- handlers.extend(self.current_leaves)
- self.current_leaves = body
- self.visit_statements(node.orelse)
- self.current_leaves = handlers + self.current_leaves
- self.visit_statements(node.finalbody)
-
- def visit_With(self, node):
- for item in node.items:
- self.set_current_leaves(CfgNode(item))
- self.visit_statements(node.body)
-
-
-# TODO(alexbw): once CFG analysis occurs at a block level,
-# this extra class will not be necessary
-class PropagateAnalysis(gast.NodeVisitor):
- """Port analysis annotations from statements to their enclosing blocks."""
-
- def __init__(self, analysis):
- self.transfer_fn = analysis.transfer_fn
- self.in_label = analysis.in_label
- self.out_label = analysis.out_label
- super(PropagateAnalysis, self).__init__()
-
- def visit_If(self, node):
- # Depth-first.
- self.generic_visit(node)
- incoming = anno.getanno(node.body[0], self.in_label)
- incoming |= anno.getanno(node.test, self.in_label)
- outgoing = anno.getanno(node.body[-1], self.out_label)
- outgoing |= anno.getanno(node.test, self.out_label)
- if node.orelse:
- orelse_outgoing = anno.getanno(node.orelse[-1], self.out_label)
- outgoing = self.transfer_fn(outgoing, orelse_outgoing)
- anno.setanno(node, self.in_label, incoming)
- anno.setanno(node, self.out_label, outgoing)
-
- def visit_For(self, node):
- self.generic_visit(node)
- incoming = set(anno.getanno(node.body[0], self.in_label))
- incoming -= set((anno.getanno(node.target, anno.Basic.QN),))
- outgoing = anno.getanno(node.body[-1], self.out_label)
- if node.orelse:
- orelse_outgoing = anno.getanno(node.orelse[-1], self.out_label)
- outgoing = self.transfer_fn(outgoing, orelse_outgoing)
- anno.setanno(node, self.in_label, frozenset(incoming))
- anno.setanno(node, self.out_label, outgoing)
-
- def visit_While(self, node):
- self.generic_visit(node)
- incoming = anno.getanno(node.body[0], self.in_label)
- incoming |= anno.getanno(node.test, self.in_label)
- outgoing = anno.getanno(node.body[-1], self.out_label)
- if node.orelse:
- orelse_outgoing = anno.getanno(node.orelse[-1], self.out_label)
- outgoing = self.transfer_fn(outgoing, orelse_outgoing)
- anno.setanno(node, self.in_label, incoming)
- anno.setanno(node, self.out_label, outgoing)
-
- def visit_With(self, node):
- self.generic_visit(node)
- incoming = anno.getanno(node.body[0], self.in_label)
- for item in node.items:
- incoming |= anno.getanno(item, self.in_label)
- outgoing = anno.getanno(node.body[-1], self.out_label)
- anno.setanno(node, self.in_label, incoming)
- anno.setanno(node, self.out_label, outgoing)
-
-
-# TODO(alexbw): Abstract the CFG walking machinery into a superclass
-# which is parameterized on which fields it selects when walking.
-# TODO(alexbw): Abstract the application of dataflow analysis
-class Forward(object):
- """Forward analysis on CFG.
-
- Args:
- label: A name for this analysis e.g. 'active' for activity analysis. The AST
- nodes in the CFG will be given annotations 'name_in', 'name_out',
- 'name_gen' and 'name_kill' which contain the incoming values, outgoing
- values, values generated by the statement, and values deleted by the
- statement respectively.
- transfer_fn: Either the AND or OR operator. If the AND operator is used it
- turns into forward must analysis (i.e. a value will only be carried
- forward if it appears on all incoming paths). The OR operator means that
- forward may analysis is done (i.e. the union of incoming values will be
- taken).
- """
-
- def __init__(self, label, source_info, transfer_fn=operator.or_):
- self.transfer_fn = transfer_fn
- self.source_info = source_info
- self.out_label = label + '_out'
- self.in_label = label + '_in'
- self.gen_label = label + '_gen'
- self.kill_label = label + '_kill'
-
- # TODO(alexbw): see if we can simplify by visiting breadth-first
- def visit(self, node):
- """Depth-first walking the CFG, applying dataflow information propagation."""
- # node.value is None only for the exit CfgNode.
- if not node.value:
- return
-
- if anno.hasanno(node.value, self.out_label):
- before = hash(anno.getanno(node.value, self.out_label))
- else:
- before = None
- preds = [
- anno.getanno(pred.value, self.out_label)
- for pred in node.prev
- if anno.hasanno(pred.value, self.out_label)
- ]
- if preds:
- incoming = functools.reduce(self.transfer_fn, preds[1:], preds[0])
- else:
- incoming = frozenset()
- anno.setanno(node.value, self.in_label, incoming)
- gen, kill = self.get_gen_kill(node, incoming)
- anno.setanno(node.value, self.gen_label, gen)
- anno.setanno(node.value, self.kill_label, kill)
- anno.setanno(node.value, self.out_label, (incoming - kill) | gen)
-
- if hash(anno.getanno(node.value, self.out_label)) != before:
- for succ in node.next:
- self.visit(succ)
-
- def get_gen_kill(self, cfg_node, incoming):
- """Calculate Gen and Kill properties of a CFG node in dataflow analysis.
-
- A function which takes the CFG node as well as a set of incoming
- values. It must return a set of newly generated values by the statement as
- well as a set of deleted (killed) values.
-
- Args:
- cfg_node: A CfgNode instance.
- incoming:
- """
- raise NotImplementedError()
-
-
-class Backward(Forward):
- """Backward analysis on CFG."""
-
- def visit(self, cfg_node):
- # cfg_node.value is None for the exit node, which will be visited only once
- if not cfg_node.value:
- for pred in cfg_node.prev:
- self.visit(pred)
- return
-
- if anno.hasanno(cfg_node.value, self.in_label):
- before = hash(anno.getanno(cfg_node.value, self.in_label))
- else:
- before = None
- succs = [
- anno.getanno(succ.value, self.in_label)
- for succ in cfg_node.next
- if anno.hasanno(succ.value, self.in_label)
- ]
- if succs:
- incoming = functools.reduce(self.transfer_fn, succs[1:], succs[0])
- else:
- incoming = frozenset()
- anno.setanno(cfg_node.value, self.out_label, incoming)
- gen, kill = self.get_gen_kill(cfg_node, incoming)
- anno.setanno(cfg_node.value, self.gen_label, gen)
- anno.setanno(cfg_node.value, self.kill_label, kill)
- anno.setanno(cfg_node.value, self.in_label, (incoming - kill) | gen)
- if hash(anno.getanno(cfg_node.value, self.in_label)) != before:
- for pred in cfg_node.prev:
- self.visit(pred)
-
-
-def run_analyses(node, analyses):
- """Perform dataflow analysis on all functions within an AST.
-
- Args:
- node: An AST node on which to run dataflow analysis.
- analyses: Either an instance of the Forward or Backward dataflow analysis
- class, or a list or tuple of them.
-
- Returns:
- node: The node, but now with annotations on the AST nodes containing the
- results of the dataflow analyses.
- """
- if not isinstance(analyses, (tuple, list)):
- analyses = (analyses,)
- for analysis in analyses:
- if not isinstance(analysis, (Forward, Backward)):
- raise TypeError('not a valid forward analysis object')
-
- for child_node in gast.walk(node):
- if isinstance(child_node, gast.FunctionDef):
- cfg_obj = CfgBuilder().build_cfg(child_node)
- for analysis in analyses:
- if isinstance(analysis, Backward):
- analysis.visit(cfg_obj.exit)
- elif isinstance(analysis, Forward):
- analysis.visit(cfg_obj.entry)
- for analysis in analyses:
- PropagateAnalysis(analysis).visit(node)
- return node
-
-
-class Liveness(Backward):
- """Perform a liveness analysis.
-
- Each statement is annotated with a set of variables that may be used
- later in the program.
- """
-
- def __init__(self, source_info):
- super(Liveness, self).__init__('live', source_info)
-
- def get_gen_kill(self, node, _):
- # A variable's parents are live if it is live
- # e.g. x is live if x.y is live. This means gen needs to return
- # all parents of a variable (if it's an Attribute or Subscript).
- # This doesn't apply to kill (e.g. del x.y doesn't affect liveness of x)
- gen = activity.get_read(node.value, self.source_info)
- gen = functools.reduce(lambda left, right: left | right.support_set, gen,
- gen)
- kill = activity.get_updated(node.value, self.source_info)
- return gen, kill
-
-
-class ReachingDefinitions(Forward):
- """Perform reaching definition analysis.
-
- Each statement is annotated with a set of (variable, definition) pairs.
- """
-
- def __init__(self, source_info):
- super(ReachingDefinitions, self).__init__('definitions', source_info)
-
- def get_gen_kill(self, node, incoming):
- definitions = activity.get_updated(node.value, self.source_info)
- gen = frozenset((id_, node.value) for id_ in definitions)
- kill = frozenset(def_ for def_ in incoming if def_[0] in definitions)
- return gen, kill
-
-
-class Defined(Forward):
- """Perform defined variable analysis.
-
- Each statement is annotated with a set of variables which are guaranteed to
- be defined at that point.
- """
-
- def __init__(self, source_info):
- super(Defined, self).__init__(
- 'defined', source_info, transfer_fn=operator.and_)
-
- def get_gen_kill(self, node, _):
- gen = activity.get_updated(node.value, self.source_info)
- return gen, frozenset()
diff --git a/tensorflow/contrib/autograph/pyct/static_analysis/cfg_test.py b/tensorflow/contrib/autograph/pyct/static_analysis/cfg_test.py
deleted file mode 100644
index 428ebbedca..0000000000
--- a/tensorflow/contrib/autograph/pyct/static_analysis/cfg_test.py
+++ /dev/null
@@ -1,303 +0,0 @@
-# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ==============================================================================
-"""Tests for cfg module."""
-
-from __future__ import absolute_import
-from __future__ import division
-from __future__ import print_function
-
-import functools
-
-import gast
-
-from tensorflow.contrib.autograph.pyct import anno
-from tensorflow.contrib.autograph.pyct import parser
-from tensorflow.contrib.autograph.pyct import qual_names
-from tensorflow.contrib.autograph.pyct import transformer
-from tensorflow.contrib.autograph.pyct.static_analysis import cfg
-from tensorflow.python.platform import test
-
-
-class CFGTest(test.TestCase):
-
- def _parse_and_analyze(self, test_fn):
- node, source = parser.parse_entity(test_fn)
- entity_info = transformer.EntityInfo(
- source_code=source,
- source_file=None,
- namespace={},
- arg_values=None,
- arg_types=None,
- owner_type=None)
- node = qual_names.resolve(node)
- return node, entity_info
-
- def _check_anno_matches(self, node, anno_name, var_names):
- if isinstance(var_names, str):
- var_names = (var_names,)
- qual_vars = set()
- for var_name in var_names:
- if isinstance(var_name, str):
- if '[' in var_name or ']' in var_name:
- raise ValueError('Annotation matching not supported with subscript.')
- if '.' not in var_name:
- qual_vars.add(qual_names.QN(var_name))
- else:
- attrs = var_name.split('.')
- this_qn = functools.reduce(qual_names.QN, attrs[1:],
- qual_names.QN(attrs[0]))
- qual_vars.add(this_qn)
- self.assertEqual(anno.getanno(node, anno_name), qual_vars)
-
- def test_reaching(self):
-
- def f(x):
- print(x)
- while True:
- x = x
- x = x
- return x
-
- node, ctx = self._parse_and_analyze(f)
- cfg.run_analyses(node, cfg.ReachingDefinitions(ctx))
- body = node.body[0].body
- # Only the argument reaches the expression
- def_in = anno.getanno(body[0], 'definitions_in')
- # One element, x, from arguments
- self.assertEqual(set(type(d[1]) for d in def_in), set((gast.arguments,)))
-
- while_body = body[1].body
- def_in = anno.getanno(while_body[0], 'definitions_in')
- # One definition, two possible sources.
- # - One from an assignment (if the loop is entered)
- # - The other from the arguments (if loop is not entered)
- self.assertEqual(
- set(type(d[1]) for d in def_in), set((gast.arguments, gast.Assign)))
-
- def_in = anno.getanno(while_body[1], 'definitions_in')
- # If we've reached this line, the only reaching definition of x is the
- # Assign node in previous line
- self.assertEqual(set(type(d[1]) for d in def_in), set((gast.Assign,)))
-
- def_in = anno.getanno(body[2], 'definitions_in')
- # Same situation as while_body[0]
- self.assertEqual(
- set(type(d[1]) for d in def_in), set((gast.arguments, gast.Assign)))
-
- def test_defined(self):
-
- def f(x):
- if x:
- y = 2 # pylint: disable=unused-variable
- return x
-
- node, ctx = self._parse_and_analyze(f)
- cfg.run_analyses(node, cfg.Defined(ctx))
- body = node.body[0].body
- # only x is for sure defined at the end
- self._check_anno_matches(body[1], 'defined_in', 'x')
- # at the end of the if body both x and y are defined
- if_body = body[0].body
- self._check_anno_matches(if_body[0], 'defined_out', ('x', 'y'))
-
- def _get_live_annotated_fnbody(self, f):
- node, ctx = self._parse_and_analyze(f)
- cfg.run_analyses(node, cfg.Liveness(ctx))
- body = node.body[0].body
- return body
-
- def test_live_straightline(self):
-
- def f1(x):
- a = g(x) # pylint: disable=undefined-variable
- b = h(a) # pylint: disable=undefined-variable, unused-variable
- return x
-
- body = self._get_live_annotated_fnbody(f1)
- self._check_anno_matches(body[1], 'live_in', ('a', 'h', 'x'))
- self._check_anno_matches(body[2], 'live_in', ('x'))
- self._check_anno_matches(body[0], 'live_in', ('g', 'h', 'x'))
- self._check_anno_matches(body[2], 'live_out', ())
-
- def test_live_stacked_conds_with_else(self):
-
- def f2(x, a): # pylint: disable=unused-argument
- if a > 0: # x should not be live
- x = 0
- if a > 1:
- x = 1
- else:
- x = 2
-
- body = self._get_live_annotated_fnbody(f2)
- self._check_anno_matches(body[0], 'live_in', ('a'))
- self._check_anno_matches(body[1], 'live_in', ('a'))
-
- def test_live_stacked_conds(self):
-
- def f3(x, a):
- if a > 0: # x and a should be live
- x = 0
- if a > 1: # x and a should be live_in
- x = 1
- return x # x should be live
-
- body = self._get_live_annotated_fnbody(f3)
- self._check_anno_matches(body[0], 'live_in', ('a', 'x'))
- self._check_anno_matches(body[1], 'live_in', ('a', 'x'))
- self._check_anno_matches(body[2], 'live_in', ('x'))
-
- def test_live_possibly_unused_cond(self):
-
- def f4(x, a):
- if a > 0: # x should be live
- x = 0
- x += 1
-
- body = self._get_live_annotated_fnbody(f4)
- self._check_anno_matches(body[0], 'live_in', ('x', 'a'))
- self._check_anno_matches(body[1], 'live_in', ('x'))
-
- def test_live_attribute_in_cond(self):
-
- def f5(x, a):
- if a > 0: # x.y should be live
- x.y = 0
- return x.y
-
- body = self._get_live_annotated_fnbody(f5)
- self._check_anno_matches(body[0], 'live_in', ('x', 'x.y', 'a'))
-
- def test_live_noop(self):
-
- def f6(x):
- return x # should this cause x.* to be live?
-
- body = self._get_live_annotated_fnbody(f6)
- self._check_anno_matches(body[0], 'live_in', ('x'))
-
- def test_live_loop(self):
-
- def f7(x, n):
- for i in range(n):
- x += i
- return x
-
- body = self._get_live_annotated_fnbody(f7)
- self._check_anno_matches(body[0], 'live_in', ('x', 'n', 'range'))
- self._check_anno_matches(body[1], 'live_in', ('x'))
-
- def test_live_context_manager(self):
-
- def f8(x, f):
- with f:
- x += 1
-
- body = self._get_live_annotated_fnbody(f8)
- self._check_anno_matches(body[0], 'live_in', ('f', 'x'))
-
- def test_node_equality(self):
- node_a = gast.parse('y = x').body[0]
- node_b = gast.parse('y = x').body[0]
- self.assertNotEqual(node_a, node_b)
-
- def test_nested_functions_defined(self):
-
- def f(x):
- y = x * 2
-
- def g(z):
- return z + y
-
- return g(x)
-
- node, ctx = self._parse_and_analyze(f)
- cfg.run_analyses(node, cfg.Defined(ctx))
-
- body = node.body[0].body
- self.assertEqual(
- anno.getanno(body[2], 'defined_in'),
- frozenset(map(qual_names.QN, ('g', 'x', 'y'))))
-
- # TODO(alexbw): CFG analysis doesn't currently cross FunctionDef boundaries.
- # NOTE: 'z' is easy to find, but 'y' is not identified as
- # defined, because CFG analysis is applied with each function separately.
- # fndef_body = body[1].body
- # self.assertEqual(
- # anno.getanno(fndef_body[0], 'defined_in'),
- # frozenset(map(qual_names.QN, ('z', 'y'))))
-
- def test_nested_functions_dont_leak_definitions(self):
-
- def f(x):
- print(x)
-
- def g():
- y = 2
- return y
-
- return g() # y is not defined here
-
- node, ctx = self._parse_and_analyze(f)
- cfg.run_analyses(node, cfg.Defined(ctx))
- body = node.body[0].body
- self.assertEqual(
- anno.getanno(body[2], 'defined_in'),
- frozenset(map(qual_names.QN, ('x', 'g'))))
-
- def test_loop_else(self):
-
- # Disabling useless-else-on-loop error, because 'break' and 'continue'
- # canonicalization are a separate analysis pass, and here we test
- # the CFG analysis in isolation.
- def for_orelse(x):
- y = 0
- for i in range(len(x)):
- x += i
- else: # pylint: disable=useless-else-on-loop
- y = 1
- return x, y
-
- def while_orelse(x, i):
- y = 0
- while x < 10:
- x += i
- else: # pylint: disable=useless-else-on-loop
- y = 1
- return x, y
-
- for f in (for_orelse, while_orelse):
- node, ctx = self._parse_and_analyze(f)
- cfg.run_analyses(node, cfg.ReachingDefinitions(ctx))
- body = node.body[0].body
- return_node = body[-1]
- reaching_defs = anno.getanno(return_node, 'definitions_in')
-
- # Y could be defined by Assign(Num(0)) or Assign(Num(1))
- # X could be defined as an argument or an AugAssign.
- y_defs = [node for var, node in reaching_defs if str(var) == 'y']
- x_defs = [node for var, node in reaching_defs if str(var) == 'x']
-
- self.assertEqual(set((gast.Assign,)), set(type(def_) for def_ in y_defs))
- self.assertEqual(set((0, 1)), set(def_.value.n for def_ in y_defs))
- self.assertEqual(len(y_defs), 2)
- self.assertEqual(
- set((gast.arguments, gast.AugAssign)),
- set(type(def_) for def_ in x_defs))
- self.assertEqual(len(x_defs), 2)
-
-
-if __name__ == '__main__':
- test.main()
diff --git a/tensorflow/contrib/autograph/pyct/static_analysis/live_values.py b/tensorflow/contrib/autograph/pyct/static_analysis/live_values.py
index 9ccb98f79a..32802069ba 100644
--- a/tensorflow/contrib/autograph/pyct/static_analysis/live_values.py
+++ b/tensorflow/contrib/autograph/pyct/static_analysis/live_values.py
@@ -16,7 +16,7 @@
Live values are extracted from the known execution context.
-Requires activity analysis annotations.
+Requires activity and reaching definitions analyses.
"""
from __future__ import absolute_import
@@ -45,14 +45,12 @@ class LiveValueResolver(transformer.Base):
def visit_Name(self, node):
self.generic_visit(node)
if isinstance(node.ctx, gast.Load):
- assert anno.hasanno(node, NodeAnno.IS_LOCAL), node
- symbol_is_local = anno.getanno(node, NodeAnno.IS_LOCAL)
- assert anno.hasanno(node, NodeAnno.IS_MODIFIED_SINCE_ENTRY), node
- symbol_is_modified = anno.getanno(node, NodeAnno.IS_MODIFIED_SINCE_ENTRY)
- assert anno.hasanno(node, NodeAnno.IS_PARAM), node
- symbol_is_param = anno.getanno(node, NodeAnno.IS_PARAM)
-
- if not symbol_is_local and not symbol_is_param:
+ defs = anno.getanno(node, anno.Static.DEFINITIONS, ())
+
+ is_defined = bool(defs)
+ has_single_def = len(defs) == 1
+
+ if not is_defined:
if node.id in self.literals:
anno.setanno(node, 'live_val', self.literals[node.id])
elif node.id in self.entity_info.namespace:
@@ -79,11 +77,13 @@ class LiveValueResolver(transformer.Base):
# TODO(mdan): Attempt to trace its value through the local chain.
# TODO(mdan): Use type annotations as fallback.
- if not symbol_is_modified:
- if node.id in self.entity_info.arg_values:
- obj = self.entity_info.arg_values[node.id]
- anno.setanno(node, 'live_val', obj)
- anno.setanno(node, 'fqn', (obj.__class__.__name__,))
+ if has_single_def:
+ def_, = defs
+ if def_.param_of is self.enclosing_entities[0]:
+ if node.id in self.entity_info.arg_values:
+ obj = self.entity_info.arg_values[node.id]
+ anno.setanno(node, 'live_val', obj)
+ anno.setanno(node, 'fqn', (obj.__class__.__name__,))
return node
def visit_Attribute(self, node):
diff --git a/tensorflow/contrib/autograph/pyct/static_analysis/live_values_test.py b/tensorflow/contrib/autograph/pyct/static_analysis/live_values_test.py
index 38af792777..fe3051179c 100644
--- a/tensorflow/contrib/autograph/pyct/static_analysis/live_values_test.py
+++ b/tensorflow/contrib/autograph/pyct/static_analysis/live_values_test.py
@@ -21,11 +21,13 @@ from __future__ import print_function
import six
from tensorflow.contrib.autograph.pyct import anno
+from tensorflow.contrib.autograph.pyct import cfg
from tensorflow.contrib.autograph.pyct import parser
from tensorflow.contrib.autograph.pyct import qual_names
from tensorflow.contrib.autograph.pyct import transformer
from tensorflow.contrib.autograph.pyct.static_analysis import activity
from tensorflow.contrib.autograph.pyct.static_analysis import live_values
+from tensorflow.contrib.autograph.pyct.static_analysis import reaching_definitions
from tensorflow.contrib.autograph.pyct.static_analysis import type_info
from tensorflow.python.framework import constant_op
from tensorflow.python.platform import test
@@ -48,7 +50,10 @@ class LiveValuesResolverTest(test.TestCase):
arg_types=arg_types,
owner_type=None)
node = qual_names.resolve(node)
+ graphs = cfg.build(node)
node = activity.resolve(node, entity_info)
+ node = reaching_definitions.resolve(node, entity_info, graphs,
+ reaching_definitions.Definition)
node = live_values.resolve(node, entity_info, literals)
node = type_info.resolve(node, entity_info)
node = live_values.resolve(node, entity_info, literals)
diff --git a/tensorflow/contrib/autograph/pyct/static_analysis/liveness.py b/tensorflow/contrib/autograph/pyct/static_analysis/liveness.py
new file mode 100644
index 0000000000..bf29d868a2
--- /dev/null
+++ b/tensorflow/contrib/autograph/pyct/static_analysis/liveness.py
@@ -0,0 +1,200 @@
+# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""Live variable analysis.
+
+This analysis attaches a set containing the live symbols that are live at the
+exit of control flow statements.
+
+Requires activity analysis.
+"""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import gast
+
+from tensorflow.contrib.autograph.pyct import anno
+from tensorflow.contrib.autograph.pyct import cfg
+from tensorflow.contrib.autograph.pyct import transformer
+from tensorflow.contrib.autograph.pyct.static_analysis import annos
+
+
+class Analyzer(cfg.GraphVisitor):
+ """CFG visitor that performs liveness analysis at statement level."""
+
+ def __init__(self, graph):
+ super(Analyzer, self).__init__(graph)
+ # This allows communicating that nodes generate extra symbols,
+ # e.g. those that a function definition closes over.
+ self.extra_gen = {}
+
+ def init_state(self, _):
+ return set()
+
+ def visit_node(self, node):
+ prev_live_in = self.in_[node]
+
+ if anno.hasanno(node.ast_node, anno.Static.SCOPE):
+ node_scope = anno.getanno(node.ast_node, anno.Static.SCOPE)
+
+ gen = node_scope.used | self.extra_gen.get(node.ast_node, frozenset())
+ # TODO(mdan): verify whether composites' parents need to be added.
+ # E.g. if x.y is live whether x needs to be added. Theoretically the
+ # activity analysis should have both so that wouldn't be needed.
+ kill = node_scope.modified
+
+ live_out = set()
+ for n in node.next:
+ live_out |= self.in_[n]
+ live_in = gen | (live_out - kill)
+
+ else:
+ # Nodes that don't have a scope annotation are assumed not to touch any
+ # symbols.
+ # This Name node below is a literal name, e.g. False
+ assert isinstance(node.ast_node,
+ (gast.Name, gast.Continue, gast.Break)), type(
+ node.ast_node)
+ live_in = prev_live_in
+ live_out = live_in
+
+ self.in_[node] = live_in
+ self.out[node] = live_out
+
+ # TODO(mdan): Move this to the superclass?
+ return prev_live_in != live_in
+
+
+class WholeTreeAnalyzer(transformer.Base):
+ """Runs liveness analysis on each of the functions defined in the AST.
+
+ If a function defined other local functions, those will have separate CFGs.
+ However, dataflow analysis needs to tie up these CFGs to properly emulate the
+ effect of closures. In the case of liveness, the parent function's live
+ variables must account for the variables that are live at the entry of each
+ subfunction. For example:
+
+ def foo():
+ # baz is live here
+ def bar():
+ print(baz)
+
+ This analyzer runs liveness analysis on each individual function, accounting
+ for the effect above.
+ """
+
+ def __init__(self, source_info, graphs):
+ super(WholeTreeAnalyzer, self).__init__(source_info)
+ self.graphs = graphs
+ self.current_analyzer = None
+ self.analyzers = {}
+
+ def visit_FunctionDef(self, node):
+ parent_analyzer = self.current_analyzer
+ subgraph = self.graphs[node]
+
+ # Postorder tree processing makes this a bit complicated:
+ # 1. construct an analyzer object and put it on stack
+ # 2. recursively walk the subtree; this will initialize the analyzer's
+ # in_ state properly (done in a block below)
+ # 3. run the final analysis
+ analyzer = Analyzer(subgraph)
+ self.current_analyzer = analyzer
+ node = self.generic_visit(node)
+ analyzer.visit_reverse()
+
+ if parent_analyzer is not None:
+ # Wire the state between the two subgraphs' analyzers.
+ child_in_state = analyzer.in_[subgraph.entry]
+ # Exception: symbols modified in the child function are local to it
+ body_scope = anno.getanno(node, annos.NodeAnno.BODY_SCOPE)
+ for qn in body_scope.modified:
+ # Note: a function modifying the symbol doesn't make that symbol
+ # live at the function's entry. In fact when that happens it is
+ # probably a case of undefined assignment, like this:
+ #
+ # bar = 0
+ # def foo():
+ # print(bar) # bar is undefined here!
+ # bar = 1
+ #
+ # Hence we use discard and not remove below.
+ child_in_state.discard(qn)
+ parent_analyzer.extra_gen[node] = frozenset(child_in_state,)
+
+ self.analyzers[node] = analyzer
+ self.current_analyzer = parent_analyzer
+ return node
+
+ def visit_nonlocal(self, node):
+ raise NotImplementedError()
+
+ def visit_global(self, node):
+ raise NotImplementedError()
+
+
+class Annotator(transformer.Base):
+ """AST visitor that annotates each control flow block with live symbols."""
+
+ # Note: additional nodes may be added as needed.
+
+ def __init__(self, source_info, cross_function_analyzer):
+ super(Annotator, self).__init__(source_info)
+ self.cross_function_analyzer = cross_function_analyzer
+ self.current_analyzer = None
+
+ def visit_FunctionDef(self, node):
+ parent_analyzer = self.current_analyzer
+ self.current_analyzer = self.cross_function_analyzer.analyzers[node]
+
+ node = self.generic_visit(node)
+ self.current_analyzer = parent_analyzer
+ return node
+
+ def _aggregate_successors_live_in(self, node):
+ successors = self.current_analyzer.graph.stmt_next[node]
+ node_live_out = set()
+ for s in successors:
+ node_live_out.update(self.current_analyzer.in_[s])
+ anno.setanno(node, anno.Static.LIVE_VARS_OUT, frozenset(node_live_out))
+ node = self.generic_visit(node)
+ return node
+
+ def visit_If(self, node):
+ return self._aggregate_successors_live_in(node)
+
+ def visit_For(self, node):
+ return self._aggregate_successors_live_in(node)
+
+ def visit_While(self, node):
+ return self._aggregate_successors_live_in(node)
+
+
+def resolve(node, source_info, graphs):
+ """Resolves the live symbols at the exit of control flow statements.
+
+ Args:
+ node: ast.AST
+ source_info: transformer.SourceInfo
+ graphs: Dict[ast.FunctionDef, cfg.Graph]
+ Returns:
+ ast.AST
+ """
+ cross_function_analyzer = WholeTreeAnalyzer(source_info, graphs)
+ node = cross_function_analyzer.visit(node)
+ visitor = Annotator(source_info, cross_function_analyzer)
+ node = visitor.visit(node)
+ return node
diff --git a/tensorflow/contrib/autograph/pyct/static_analysis/liveness_test.py b/tensorflow/contrib/autograph/pyct/static_analysis/liveness_test.py
new file mode 100644
index 0000000000..d53adb28af
--- /dev/null
+++ b/tensorflow/contrib/autograph/pyct/static_analysis/liveness_test.py
@@ -0,0 +1,149 @@
+# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""Tests for liveness module."""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+from tensorflow.contrib.autograph.pyct import anno
+from tensorflow.contrib.autograph.pyct import cfg
+from tensorflow.contrib.autograph.pyct import parser
+from tensorflow.contrib.autograph.pyct import qual_names
+from tensorflow.contrib.autograph.pyct import transformer
+from tensorflow.contrib.autograph.pyct.static_analysis import activity
+from tensorflow.contrib.autograph.pyct.static_analysis import liveness
+from tensorflow.python.platform import test
+
+
+class LivenessTest(test.TestCase):
+
+ def _parse_and_analyze(self, test_fn):
+ node, source = parser.parse_entity(test_fn)
+ entity_info = transformer.EntityInfo(
+ source_code=source,
+ source_file=None,
+ namespace={},
+ arg_values=None,
+ arg_types=None,
+ owner_type=None)
+ node = qual_names.resolve(node)
+ node = activity.resolve(node, entity_info)
+ graphs = cfg.build(node)
+ liveness.resolve(node, entity_info, graphs)
+ return node
+
+ def assertHasLiveOut(self, node, expected):
+ live_out = anno.getanno(node, anno.Static.LIVE_VARS_OUT)
+ live_out_str = set(str(v) for v in live_out)
+ if not expected:
+ expected = ()
+ if not isinstance(expected, tuple):
+ expected = (expected,)
+ self.assertSetEqual(live_out_str, set(expected))
+
+ def test_stacked_if(self):
+
+ def test_fn(x, a):
+ if a > 0:
+ x = 0
+ if a > 1:
+ x = 1
+ return x
+
+ node = self._parse_and_analyze(test_fn)
+ fn_body = node.body[0].body
+
+ self.assertHasLiveOut(fn_body[0], ('a', 'x'))
+ self.assertHasLiveOut(fn_body[1], 'x')
+
+ def test_stacked_if_else(self):
+
+ def test_fn(x, a):
+ if a > 0:
+ x = 0
+ if a > 1:
+ x = 1
+ else:
+ x = 2
+ return x
+
+ node = self._parse_and_analyze(test_fn)
+ fn_body = node.body[0].body
+
+ self.assertHasLiveOut(fn_body[0], 'a')
+ self.assertHasLiveOut(fn_body[1], 'x')
+
+ def test_for_basic(self):
+
+ def test_fn(x, a):
+ for i in range(a):
+ x += i
+ return x
+
+ node = self._parse_and_analyze(test_fn)
+ fn_body = node.body[0].body
+
+ self.assertHasLiveOut(fn_body[0], 'x')
+
+ def test_attributes(self):
+
+ def test_fn(x, a):
+ if a > 0:
+ x.y = 0
+ return x.y
+
+ node = self._parse_and_analyze(test_fn)
+ fn_body = node.body[0].body
+
+ self.assertHasLiveOut(fn_body[0], ('x.y', 'x'))
+
+ def test_nested_functions(self):
+
+ def test_fn(a, b):
+ if b:
+ a = []
+
+ def foo():
+ return a
+
+ foo()
+
+ node = self._parse_and_analyze(test_fn)
+ fn_body = node.body[0].body
+
+ self.assertHasLiveOut(fn_body[0], 'a')
+
+ def test_nested_functions_isolation(self):
+
+ def test_fn(b):
+ if b:
+ a = 0 # pylint:disable=unused-variable
+
+ def child():
+ max(a) # pylint:disable=used-before-assignment
+ a = 1
+ return a
+
+ child()
+
+ node = self._parse_and_analyze(test_fn)
+ fn_body = node.body[0].body
+
+ self.assertHasLiveOut(fn_body[0], 'max')
+
+
+if __name__ == '__main__':
+ test.main()
diff --git a/tensorflow/contrib/autograph/pyct/static_analysis/reaching_definitions.py b/tensorflow/contrib/autograph/pyct/static_analysis/reaching_definitions.py
new file mode 100644
index 0000000000..9a84f1231c
--- /dev/null
+++ b/tensorflow/contrib/autograph/pyct/static_analysis/reaching_definitions.py
@@ -0,0 +1,301 @@
+# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""Reaching definition analysis.
+
+This analysis attaches a set of a Definition objects to each symbol, one
+for each distinct definition that may reach it. The Definition objects are
+mutable and may be used by subsequent analyses to further annotate data like
+static type and value information.
+The analysis also attaches the set of the symbols defined at the entry of
+control flow statements.
+
+Requires activity analysis.
+"""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import gast
+
+from tensorflow.contrib.autograph.pyct import anno
+from tensorflow.contrib.autograph.pyct import cfg
+from tensorflow.contrib.autograph.pyct import transformer
+from tensorflow.contrib.autograph.pyct.static_analysis import annos
+
+
+class Definition(object):
+ """Definition objects describe a unique definition of a variable.
+
+ Subclasses of this may be used by passing an appropriate factory fuction to
+ resolve.
+
+ Attributes:
+ param_of: Optional[ast.AST]
+ """
+
+ def __init__(self):
+ self.param_of = None
+
+ def __repr__(self):
+ return '%s[%d]' % (self.__class__.__name__, id(self))
+
+
+class _NodeState(object):
+ """Abstraction for the state of the CFG walk for reaching definition analysis.
+
+ This is a value type. Only implements the strictly necessary operators.
+
+ Attributes:
+ value: Dict[qual_names.QN, Set[Definition, ...]], the defined symbols and
+ their possible definitions
+ """
+
+ def __init__(self, init_from=None):
+ if init_from:
+ if isinstance(init_from, _NodeState):
+ self.value = {
+ s: set(other_infos) for s, other_infos in init_from.value.items()
+ }
+ elif isinstance(init_from, dict):
+ self.value = {s: set((init_from[s],)) for s in init_from}
+ else:
+ assert False, init_from
+ else:
+ self.value = {}
+
+ def __eq__(self, other):
+ if frozenset(self.value.keys()) != frozenset(other.value.keys()):
+ return False
+ ret = all(self.value[s] == other.value[s] for s in self.value)
+ return ret
+
+ def __ne__(self, other):
+ return not self.__eq__(other)
+
+ def __or__(self, other):
+ assert isinstance(other, _NodeState)
+ result = _NodeState(self)
+ for s, other_infos in other.value.items():
+ if s in result.value:
+ result.value[s].update(other_infos)
+ else:
+ result.value[s] = set(other_infos)
+ return result
+
+ def __sub__(self, other):
+ assert isinstance(other, set)
+ result = _NodeState(self)
+ for s in other:
+ result.value.pop(s, None)
+ return result
+
+ def __repr__(self):
+ return 'NodeState[%s]=%s' % (id(self), repr(self.value))
+
+
+class Analyzer(cfg.GraphVisitor):
+ """CFG visitor that determines reaching definitions at statement level."""
+
+ def __init__(self, graph, definition_factory):
+ self._definition_factory = definition_factory
+ super(Analyzer, self).__init__(graph)
+ # This allows communicating that nodes have extra reaching definitions,
+ # e.g. those that a function closes over.
+ self.extra_in = {}
+
+ self.gen_map = {}
+
+ def init_state(self, _):
+ return _NodeState()
+
+ def visit_node(self, node):
+ prev_defs_out = self.out[node]
+
+ defs_in = _NodeState(self.extra_in.get(node.ast_node, None))
+ for n in node.prev:
+ defs_in |= self.out[n]
+
+ if anno.hasanno(node.ast_node, anno.Static.SCOPE):
+ node_scope = anno.getanno(node.ast_node, anno.Static.SCOPE)
+ # The definition objects created by each node must be singletons because
+ # their ids are used in equality checks.
+ if node not in self.gen_map:
+ node_symbols = {}
+ for s in node_scope.modified:
+ def_ = self._definition_factory()
+ if s in node_scope.params:
+ def_.param_of = node_scope.params[s]
+ node_symbols[s] = def_
+ self.gen_map[node] = _NodeState(node_symbols)
+
+ gen = self.gen_map[node]
+ kill = node_scope.modified
+ defs_out = gen | (defs_in - kill)
+
+ else:
+ # Nodes that don't have a scope annotation are assumed not to touch any
+ # symbols.
+ # This Name node below is a literal name, e.g. False
+ # This can also happen if activity.py forgot to annotate the node with a
+ # scope object.
+ assert isinstance(
+ node.ast_node,
+ (gast.Name, gast.Break, gast.Continue, gast.Raise)), (node.ast_node,
+ node)
+ defs_out = defs_in
+
+ self.in_[node] = defs_in
+ self.out[node] = defs_out
+
+ # TODO(mdan): Move this to the superclass?
+ return prev_defs_out != defs_out
+
+
+class TreeAnnotator(transformer.Base):
+ """AST visitor that annotates each symbol name with its reaching definitions.
+
+ Simultaneously, the visitor runs the dataflow analysis on each function node,
+ accounting for the effect of closures. For example:
+
+ def foo():
+ bar = 1
+ def baz():
+ # bar = 1 reaches here
+ """
+
+ def __init__(self, source_info, graphs, definition_factory):
+ super(TreeAnnotator, self).__init__(source_info)
+ self.definition_factory = definition_factory
+ self.graphs = graphs
+ self.current_analyzer = None
+ self.current_cfg_node = None
+
+ def visit_FunctionDef(self, node):
+ parent_analyzer = self.current_analyzer
+ subgraph = self.graphs[node]
+
+ # Preorder tree processing:
+ # 1. if this is a child function, the parent was already analyzed and it
+ # has the proper state value for the subgraph's entry
+ # 2. analyze the current function body
+ # 2. recursively walk the subtree; child functions will be processed
+ analyzer = Analyzer(subgraph, self.definition_factory)
+ if parent_analyzer is not None:
+ # Wire the state between the two subgraphs' analyzers.
+ parent_out_state = parent_analyzer.out[parent_analyzer.graph.index[node]]
+ # Exception: symbols modified in the child function are local to it
+ body_scope = anno.getanno(node, annos.NodeAnno.BODY_SCOPE)
+ parent_out_state -= body_scope.modified
+ analyzer.extra_in[node.args] = parent_out_state
+
+ # Complete the analysis for the local function and annotate its body.
+ analyzer.visit_forward()
+
+ # Recursively process any remaining subfunctions.
+ self.current_analyzer = analyzer
+ # Note: not visiting name, decorator_list and returns because they don't
+ # apply to this anlysis.
+ # TODO(mdan): Should we still process the function name?
+ node.args = self.visit(node.args)
+ node.body = self.visit_block(node.body)
+ self.current_analyzer = parent_analyzer
+
+ return node
+
+ def visit_nonlocal(self, node):
+ raise NotImplementedError()
+
+ def visit_global(self, node):
+ raise NotImplementedError()
+
+ def visit_Name(self, node):
+ if self.current_analyzer is None:
+ # Names may appear outside function defs - for example in class
+ # definitions.
+ return node
+
+ analyzer = self.current_analyzer
+ cfg_node = self.current_cfg_node
+
+ assert cfg_node is not None, 'name node outside of any statement?'
+
+ qn = anno.getanno(node, anno.Basic.QN)
+ if isinstance(node.ctx, gast.Load):
+ anno.setanno(node, anno.Static.DEFINITIONS,
+ tuple(analyzer.in_[cfg_node].value.get(qn, ())))
+ else:
+ anno.setanno(node, anno.Static.DEFINITIONS,
+ tuple(analyzer.out[cfg_node].value.get(qn, ())))
+
+ return node
+
+ def _aggregate_predecessors_defined_in(self, node):
+ preds = self.current_analyzer.graph.stmt_prev[node]
+ node_defined_in = set()
+ for p in preds:
+ node_defined_in |= set(self.current_analyzer.out[p].value.keys())
+ anno.setanno(node, anno.Static.DEFINED_VARS_IN, frozenset(node_defined_in))
+
+ def visit_If(self, node):
+ self._aggregate_predecessors_defined_in(node)
+ return self.generic_visit(node)
+
+ def visit_For(self, node):
+ self._aggregate_predecessors_defined_in(node)
+
+ # Manually accounting for the shortcoming described in
+ # cfg.AstToCfg.visit_For.
+ parent = self.current_cfg_node
+ self.current_cfg_node = self.current_analyzer.graph.index[node.iter]
+ node.target = self.visit(node.target)
+ self.current_cfg_node = parent
+
+ node.iter = self.visit(node.iter)
+ node.body = self.visit_block(node.body)
+ node.orelse = self.visit_block(node.orelse)
+
+ return node
+
+ def visit_While(self, node):
+ self._aggregate_predecessors_defined_in(node)
+ return self.generic_visit(node)
+
+ def visit(self, node):
+ parent = self.current_cfg_node
+
+ if (self.current_analyzer is not None and
+ node in self.current_analyzer.graph.index):
+ self.current_cfg_node = self.current_analyzer.graph.index[node]
+ node = super(TreeAnnotator, self).visit(node)
+
+ self.current_cfg_node = parent
+ return node
+
+
+def resolve(node, source_info, graphs, definition_factory):
+ """Resolves reaching definitions for each symbol.
+
+ Args:
+ node: ast.AST
+ source_info: transformer.SourceInfo
+ graphs: Dict[ast.FunctionDef, cfg.Graph]
+ definition_factory: Callable[[], Definition]
+ Returns:
+ ast.AST
+ """
+ visitor = TreeAnnotator(source_info, graphs, definition_factory)
+ node = visitor.visit(node)
+ return node
diff --git a/tensorflow/contrib/autograph/pyct/static_analysis/reaching_definitions_test.py b/tensorflow/contrib/autograph/pyct/static_analysis/reaching_definitions_test.py
new file mode 100644
index 0000000000..243fe804b2
--- /dev/null
+++ b/tensorflow/contrib/autograph/pyct/static_analysis/reaching_definitions_test.py
@@ -0,0 +1,263 @@
+# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""Tests for reaching_definitions module."""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+from tensorflow.contrib.autograph.pyct import anno
+from tensorflow.contrib.autograph.pyct import cfg
+from tensorflow.contrib.autograph.pyct import parser
+from tensorflow.contrib.autograph.pyct import qual_names
+from tensorflow.contrib.autograph.pyct import transformer
+from tensorflow.contrib.autograph.pyct.static_analysis import activity
+from tensorflow.contrib.autograph.pyct.static_analysis import reaching_definitions
+from tensorflow.python.platform import test
+
+
+class DefinitionInfoTest(test.TestCase):
+
+ def _parse_and_analyze(self, test_fn):
+ node, source = parser.parse_entity(test_fn)
+ entity_info = transformer.EntityInfo(
+ source_code=source,
+ source_file=None,
+ namespace={},
+ arg_values=None,
+ arg_types=None,
+ owner_type=None)
+ node = qual_names.resolve(node)
+ node = activity.resolve(node, entity_info)
+ graphs = cfg.build(node)
+ node = reaching_definitions.resolve(node, entity_info, graphs,
+ reaching_definitions.Definition)
+ return node
+
+ def assertHasDefs(self, node, num):
+ defs = anno.getanno(node, anno.Static.DEFINITIONS)
+ self.assertEqual(len(defs), num)
+ for r in defs:
+ self.assertIsInstance(r, reaching_definitions.Definition)
+
+ def assertHasDefinedIn(self, node, expected):
+ defined_in = anno.getanno(node, anno.Static.DEFINED_VARS_IN)
+ defined_in_str = set(str(v) for v in defined_in)
+ if not expected:
+ expected = ()
+ if not isinstance(expected, tuple):
+ expected = (expected,)
+ self.assertSetEqual(defined_in_str, set(expected))
+
+ def assertSameDef(self, first, second):
+ self.assertHasDefs(first, 1)
+ self.assertHasDefs(second, 1)
+ self.assertIs(
+ anno.getanno(first, anno.Static.DEFINITIONS)[0],
+ anno.getanno(second, anno.Static.DEFINITIONS)[0])
+
+ def assertNotSameDef(self, first, second):
+ self.assertHasDefs(first, 1)
+ self.assertHasDefs(second, 1)
+ self.assertIsNot(
+ anno.getanno(first, anno.Static.DEFINITIONS)[0],
+ anno.getanno(second, anno.Static.DEFINITIONS)[0])
+
+ def test_conditional(self):
+
+ def test_fn(a, b):
+ a = []
+ if b:
+ a = []
+ return a
+
+ node = self._parse_and_analyze(test_fn)
+ fn_body = node.body[0].body
+
+ self.assertHasDefs(fn_body[0].targets[0], 1)
+ self.assertHasDefs(fn_body[1].test, 1)
+ self.assertHasDefs(fn_body[1].body[0].targets[0], 1)
+ self.assertHasDefs(fn_body[2].value, 2)
+
+ self.assertHasDefinedIn(fn_body[1], ('a', 'b'))
+
+ def test_while(self):
+
+ def test_fn(a):
+ max(a)
+ while True:
+ a = a
+ a = a
+ return a
+
+ node = self._parse_and_analyze(test_fn)
+ fn_body = node.body[0].body
+
+ self.assertHasDefs(fn_body[0].value.args[0], 1)
+ self.assertHasDefs(fn_body[1].body[0].targets[0], 1)
+ self.assertHasDefs(fn_body[1].body[1].targets[0], 1)
+ self.assertHasDefs(fn_body[1].body[1].value, 1)
+ # The loop does have an invariant test, but the CFG doesn't know that.
+ self.assertHasDefs(fn_body[1].body[0].value, 2)
+ self.assertHasDefs(fn_body[2].value, 2)
+
+ def test_while_else(self):
+
+ def test_fn(x, i):
+ y = 0
+ while x:
+ x += i
+ if i:
+ break
+ else:
+ y = 1
+ return x, y
+
+ node = self._parse_and_analyze(test_fn)
+ fn_body = node.body[0].body
+
+ self.assertHasDefs(fn_body[0].targets[0], 1)
+ self.assertHasDefs(fn_body[1].test, 2)
+ self.assertHasDefs(fn_body[1].body[0].target, 1)
+ self.assertHasDefs(fn_body[1].body[1].test, 1)
+ self.assertHasDefs(fn_body[1].orelse[0].targets[0], 1)
+ self.assertHasDefs(fn_body[2].value.elts[0], 2)
+ self.assertHasDefs(fn_body[2].value.elts[1], 2)
+
+ def test_for_else(self):
+
+ def test_fn(x, i):
+ y = 0
+ for i in x:
+ x += i
+ if i:
+ break
+ else:
+ continue
+ else:
+ y = 1
+ return x, y
+
+ node = self._parse_and_analyze(test_fn)
+ fn_body = node.body[0].body
+
+ self.assertHasDefs(fn_body[0].targets[0], 1)
+ self.assertHasDefs(fn_body[1].target, 1)
+ self.assertHasDefs(fn_body[1].body[0].target, 1)
+ self.assertHasDefs(fn_body[1].body[1].test, 1)
+ self.assertHasDefs(fn_body[1].orelse[0].targets[0], 1)
+ self.assertHasDefs(fn_body[2].value.elts[0], 2)
+ self.assertHasDefs(fn_body[2].value.elts[1], 2)
+
+ def test_nested_functions(self):
+
+ def test_fn(a, b):
+ a = []
+ if b:
+ a = []
+
+ def foo():
+ return a
+
+ foo()
+
+ return a
+
+ node = self._parse_and_analyze(test_fn)
+ fn_body = node.body[0].body
+ def_of_a_in_if = fn_body[1].body[0].targets[0]
+
+ self.assertHasDefs(fn_body[0].targets[0], 1)
+ self.assertHasDefs(fn_body[1].test, 1)
+ self.assertHasDefs(def_of_a_in_if, 1)
+ self.assertHasDefs(fn_body[2].value, 2)
+
+ inner_fn_body = fn_body[1].body[1].body
+ self.assertSameDef(inner_fn_body[0].value, def_of_a_in_if)
+
+ def test_nested_functions_isolation(self):
+
+ def test_fn(a):
+ a = 0
+
+ def child():
+ a = 1
+ return a
+
+ child()
+ return a
+
+ node = self._parse_and_analyze(test_fn)
+ fn_body = node.body[0].body
+
+ parent_return = fn_body[3]
+ child_return = fn_body[1].body[1]
+ # The assignment `a = 1` makes `a` local to `child`.
+ self.assertNotSameDef(parent_return.value, child_return.value)
+
+ def test_function_call_in_with(self):
+
+ def foo(_):
+ pass
+
+ def test_fn(a):
+ with foo(a):
+ return a
+
+ node = self._parse_and_analyze(test_fn)
+ fn_body = node.body[0].body
+
+ self.assertHasDefs(fn_body[0].items[0].context_expr.func, 0)
+ self.assertHasDefs(fn_body[0].items[0].context_expr.args[0], 1)
+
+ def test_mutation_subscript(self):
+
+ def test_fn(a):
+ l = []
+ l[0] = a
+ return l
+
+ node = self._parse_and_analyze(test_fn)
+ fn_body = node.body[0].body
+
+ creation = fn_body[0].targets[0]
+ mutation = fn_body[1].targets[0].value
+ use = fn_body[2].value
+ self.assertSameDef(creation, mutation)
+ self.assertSameDef(creation, use)
+
+ def test_replacement(self):
+
+ def foo(a):
+ return a
+
+ def test_fn(a):
+ a = foo(a)
+ return a
+
+ node = self._parse_and_analyze(test_fn)
+ fn_body = node.body[0].body
+
+ param = node.body[0].args.args[0]
+ source = fn_body[0].value.args[0]
+ target = fn_body[0].targets[0]
+ retval = fn_body[1].value
+ self.assertSameDef(param, source)
+ self.assertNotSameDef(source, target)
+ self.assertSameDef(target, retval)
+
+
+if __name__ == '__main__':
+ test.main()
diff --git a/tensorflow/contrib/autograph/pyct/static_analysis/type_info.py b/tensorflow/contrib/autograph/pyct/static_analysis/type_info.py
index a229c288a8..835d5199fa 100644
--- a/tensorflow/contrib/autograph/pyct/static_analysis/type_info.py
+++ b/tensorflow/contrib/autograph/pyct/static_analysis/type_info.py
@@ -43,9 +43,8 @@ from __future__ import print_function
import gast
-from tensorflow.contrib.autograph import utils
from tensorflow.contrib.autograph.pyct import anno
-from tensorflow.contrib.autograph.pyct import parser
+from tensorflow.contrib.autograph.pyct import ast_util
from tensorflow.contrib.autograph.pyct import transformer
from tensorflow.python.util import tf_inspect
@@ -166,7 +165,6 @@ class TypeInfoResolver(transformer.Base):
definition = self.scope.getval(qn)
anno.copyanno(definition, node, 'type')
anno.copyanno(definition, node, 'type_fqn')
- anno.setanno(node, 'definition', definition)
# TODO(mdan): Remove this when the directives module is in.
anno.copyanno(definition, node, 'element_type')
@@ -198,52 +196,18 @@ class TypeInfoResolver(transformer.Base):
def visit_With(self, node):
for item in node.items:
if item.optional_vars is not None:
- self.apply_to_single_assignments((item.optional_vars,),
- item.context_expr,
- self._process_variable_assignment)
+ ast_util.apply_to_single_assignments((item.optional_vars,),
+ item.context_expr,
+ self._process_variable_assignment)
self.generic_visit(node)
return node
def visit_Assign(self, node):
self.generic_visit(node)
- self.apply_to_single_assignments(
- node.targets, node.value, self._process_variable_assignment)
+ ast_util.apply_to_single_assignments(node.targets, node.value,
+ self._process_variable_assignment)
return node
- # TODO(mdan): Remove as soon as the new directives module is ready.
- def visit_Call(self, node):
- if anno.hasanno(node.func, 'live_val'):
- # Symbols targeted by the "set_type" marker function are assigned the data
- # type that it specified.
- if anno.getanno(node.func, 'live_val') is utils.set_element_type:
-
- if len(node.args) < 2 or len(node.args) > 3:
- raise ValueError('"%s" must have either two or three parameters'
- % self.context.type_annotation_func)
- if len(node.args) == 2:
- target_arg, type_arg = node.args
- shape_arg = parser.parse_expression('None')
- else:
- target_arg, type_arg, shape_arg = node.args
- if not anno.hasanno(target_arg, anno.Basic.QN):
- raise ValueError('the first argument of "%s" must by a symbol' %
- utils.set_element_type)
- # TODO(mdan): This is vulnerable to symbol renaming.
- element_type = type_arg
- element_shape = shape_arg
-
- target_symbol = anno.getanno(target_arg, anno.Basic.QN)
- # Find the definition of this symbol and annotate it with the given
- # data type. That in turn will cause future uses of the symbol
- # to receive the same type annotation.
- definition = self.scope.getval(target_symbol)
- anno.setanno(node, 'element_type', element_type)
- anno.setanno(node, 'element_shape', element_shape)
- anno.setanno(definition, 'element_type', element_type)
- anno.setanno(definition, 'element_shape', element_shape)
- # TODO(mdan): Should we update references between definition and here?
- return self.generic_visit(node)
-
def resolve(node, context):
return TypeInfoResolver(context).visit(node)
diff --git a/tensorflow/contrib/autograph/pyct/static_analysis/type_info_test.py b/tensorflow/contrib/autograph/pyct/static_analysis/type_info_test.py
index 32b1148ab2..404311ba24 100644
--- a/tensorflow/contrib/autograph/pyct/static_analysis/type_info_test.py
+++ b/tensorflow/contrib/autograph/pyct/static_analysis/type_info_test.py
@@ -19,11 +19,13 @@ from __future__ import division
from __future__ import print_function
from tensorflow.contrib.autograph.pyct import anno
+from tensorflow.contrib.autograph.pyct import cfg
from tensorflow.contrib.autograph.pyct import parser
from tensorflow.contrib.autograph.pyct import qual_names
from tensorflow.contrib.autograph.pyct import transformer
from tensorflow.contrib.autograph.pyct.static_analysis import activity
from tensorflow.contrib.autograph.pyct.static_analysis import live_values
+from tensorflow.contrib.autograph.pyct.static_analysis import reaching_definitions
from tensorflow.contrib.autograph.pyct.static_analysis import type_info
from tensorflow.python.client import session
from tensorflow.python.platform import test
@@ -69,7 +71,10 @@ class TypeInfoResolverTest(test.TestCase):
arg_types=arg_types,
owner_type=None)
node = qual_names.resolve(node)
+ graphs = cfg.build(node)
node = activity.resolve(node, entity_info)
+ node = reaching_definitions.resolve(node, entity_info, graphs,
+ reaching_definitions.Definition)
node = live_values.resolve(node, entity_info, {})
node = type_info.resolve(node, entity_info)
node = live_values.resolve(node, entity_info, {})
diff --git a/tensorflow/contrib/autograph/pyct/templates.py b/tensorflow/contrib/autograph/pyct/templates.py
index 9c479ebc2f..72d1d3b269 100644
--- a/tensorflow/contrib/autograph/pyct/templates.py
+++ b/tensorflow/contrib/autograph/pyct/templates.py
@@ -26,6 +26,7 @@ import textwrap
import gast
+from tensorflow.contrib.autograph.pyct import anno
from tensorflow.contrib.autograph.pyct import ast_util
from tensorflow.contrib.autograph.pyct import parser
from tensorflow.contrib.autograph.pyct import qual_names
@@ -43,39 +44,65 @@ class ReplaceTransformer(gast.NodeTransformer):
"""
self.replacements = replacements
self.in_replacements = False
+ self.preserved_annos = {
+ anno.Basic.ORIGIN,
+ anno.Basic.SKIP_PROCESSING,
+ anno.Static.ORIG_DEFINITIONS,
+ }
+
+ def _prepare_replacement(self, replaced, key):
+ """Prepares a replacement AST that's safe to swap in for a node.
+
+ Args:
+ replaced: ast.AST, the node being replaced
+ key: Hashable, the key of the replacement AST
+ Returns:
+ ast.AST, the replacement AST
+ """
+ repl = self.replacements[key]
+
+ new_nodes = ast_util.copy_clean(repl, preserve_annos=self.preserved_annos)
+ if isinstance(new_nodes, gast.AST):
+ new_nodes = [new_nodes]
+
+ return new_nodes
def visit_Expr(self, node):
- if (isinstance(node.value, gast.Name) and
- node.value.id in self.replacements):
- return self.visit(node.value)
- self.generic_visit(node)
- return node
+ # When replacing a placeholder with an entire statement, the replacement
+ # must stand on its own and not be wrapped in an Expr.
+ new_value = self.visit(node.value)
+ if new_value is node.value:
+ return node
+ return new_value
def visit_keyword(self, node):
- if node.arg in self.replacements:
- repl = self.replacements[node.arg]
- if isinstance(repl, gast.keyword):
- return repl
- elif (isinstance(repl, (list, tuple)) and repl and
- all(isinstance(r, gast.keyword) for r in repl)):
- return repl
- # TODO(mdan): We may allow replacing with a string as well.
- # For example, if one wanted to replace foo with bar in foo=baz, then
- # we could allow changing just node arg, so that we end up with bar=baz.
- raise ValueError(
- 'a keyword argument may only be replaced by another keyword or a '
- 'non-empty list of keywords. Found: %s' % repl)
- return self.generic_visit(node)
+ if node.arg not in self.replacements:
+ return self.generic_visit(node)
+
+ repl = self._prepare_replacement(node, node.arg)
+ if isinstance(repl, gast.keyword):
+ return repl
+ elif (repl and isinstance(repl, (list, tuple)) and
+ all(isinstance(r, gast.keyword) for r in repl)):
+ return repl
+ # TODO(mdan): We may allow replacing with a string as well.
+ # For example, if one wanted to replace foo with bar in foo=baz, then
+ # we could allow changing just node arg, so that we end up with bar=baz.
+ raise ValueError(
+ 'a keyword argument may only be replaced by another keyword or a '
+ 'non-empty list of keywords. Found: %s' % repl)
def visit_FunctionDef(self, node):
node = self.generic_visit(node)
- if node.name in self.replacements:
- repl = self.replacements[node.name]
- if not isinstance(repl, (gast.Name, ast.Name)):
- raise ValueError(
- 'a function name can only be replaced by a Name node. Found: %s' %
- repl)
- node.name = repl.id
+ if node.name not in self.replacements:
+ return node
+
+ repl = self.replacements[node.name]
+ if not isinstance(repl, (gast.Name, ast.Name)):
+ raise ValueError(
+ 'a function name can only be replaced by a Name node. Found: %s' %
+ repl)
+ node.name = repl.id
return node
def _check_has_context(self, node):
@@ -148,6 +175,7 @@ class ReplaceTransformer(gast.NodeTransformer):
node = self.generic_visit(node)
if node.attr not in self.replacements:
return node
+
repl = self.replacements[node.attr]
if not isinstance(repl, gast.Name):
raise ValueError(
@@ -159,9 +187,7 @@ class ReplaceTransformer(gast.NodeTransformer):
if node.id not in self.replacements:
return node
- new_nodes = ast_util.copy_clean(self.replacements[node.id])
- if isinstance(new_nodes, gast.AST):
- new_nodes = [new_nodes]
+ new_nodes = self._prepare_replacement(node, node.id)
# Preserve the target context.
for n in new_nodes:
@@ -182,7 +208,7 @@ class ReplaceTransformer(gast.NodeTransformer):
def _convert_to_ast(n):
- """Convert from a known data type to AST."""
+ """Converts from a known data type to AST."""
if isinstance(n, str):
# Note: the node will receive the ctx value from the template, see
# ReplaceTransformer.visit_Name.
@@ -197,7 +223,7 @@ def _convert_to_ast(n):
def replace(template, **replacements):
- """Replace placeholders in a Python template.
+ """Replaces placeholders in a Python template.
AST Name and Tuple nodes always receive the context that inferred from
the template. However, when replacing more complex nodes (that can potentially
diff --git a/tensorflow/contrib/autograph/pyct/templates_test.py b/tensorflow/contrib/autograph/pyct/templates_test.py
index a01f8bf04c..a8bbc5a4de 100644
--- a/tensorflow/contrib/autograph/pyct/templates_test.py
+++ b/tensorflow/contrib/autograph/pyct/templates_test.py
@@ -151,17 +151,13 @@ class TemplatesTest(test.TestCase):
self.assertEqual(node.func.id, 'bar')
self.assertEqual(node.func.args[0].id, 'baz')
- def replace_as_expression_restrictions(self):
+ def test_replace_as_expression_restrictions(self):
template = """
foo(a)
bar(b)
"""
with self.assertRaises(ValueError):
templates.replace_as_expression(template)
- with self.assertRaises(ValueError):
- templates.replace('')
- with self.assertRaises(ValueError):
- templates.replace('a = b')
if __name__ == '__main__':
diff --git a/tensorflow/contrib/autograph/pyct/transformer.py b/tensorflow/contrib/autograph/pyct/transformer.py
index 7655811830..bbdfefc50a 100644
--- a/tensorflow/contrib/autograph/pyct/transformer.py
+++ b/tensorflow/contrib/autograph/pyct/transformer.py
@@ -59,6 +59,103 @@ class EntityInfo(object):
self.owner_type = owner_type
+class _StateStack(object):
+ """Typed stack abstraction.
+
+ This class provides syntactic sugar for a stack of objects of known
+ type. It allows accessing attributes of the object at the top of the stack
+ directly against this object, which allows for very terse syntax.
+
+ For example, this code:
+
+ stack = _StateStack(Foo)
+ stack.enter()
+ stack.bar
+
+ Is equivalent to:
+
+ stack = []
+ stack.append(Foo())
+ foo = stack[-1]
+ foo.bar
+
+ See _State for more on how this is used.
+
+ Attributes:
+ type: Any, the type of objects that this stack holds
+ level: int, the current stack depth
+ value: Any, the instance of the object at the top of the stack
+ """
+
+ def __init__(self, type_):
+ # Because we override __setattr__, we need to attach these attributes using
+ # the superclass' setattr.
+ object.__setattr__(self, 'type', type_)
+ object.__setattr__(self, '_stack', [])
+ self.enter()
+
+ def enter(self):
+ self._stack.append(self.type())
+
+ def exit(self):
+ return self._stack.pop()
+
+ @property
+ def level(self):
+ return len(self._stack)
+
+ @property
+ def value(self):
+ return self._stack[-1]
+
+ def __getattr__(self, key):
+ return getattr(self._stack[-1], key)
+
+ def __setattr__(self, key, value):
+ setattr(self._stack[-1], key, value)
+
+
+class _State(object):
+ """Supporting class for nested scope variable space for converter.Base.
+
+ This structure offers syntactic sugar over a dict of stacks of objects
+ of known type. These structures are useful to keep state during AST walks.
+ Multiple different scopes can be tracked in parallel. For example:
+
+ s = _State()
+
+ s[foo].enter()
+ s[bar].enter() # this will not affect s[foo]
+
+ Element access has special semantics:
+ * keys are a data type
+ * element values are _StateStack(type=key) objects
+ * missing elements are automatically added, similarly to defaultdict
+
+ For example, the following block :
+
+ _State s
+ s[Foo]
+
+ Is equivalent to:
+
+ s = {}
+ if Foo not in s:
+ s[Foo] = Foo()
+ s[Foo]
+
+ See Base for how it's used.
+ """
+
+ def __init__(self):
+ self._value = {}
+
+ def __getitem__(self, key):
+ if key not in self._value:
+ self._value[key] = _StateStack(key)
+ return self._value[key]
+
+
class Base(gast.NodeTransformer):
"""Base class for general-purpose code transformers transformers.
@@ -71,6 +168,27 @@ class Base(gast.NodeTransformer):
(possibly nested) scopes, use enter/exit_local_scope and set/get_local.
You must call enter/exit_local_scope manually, but the transformer detects
when they are not properly paired.
+
+ The transformer allows keeping state across calls to visit_* that is local to
+ arbitrary nodes and their descendants, using the self.state attribute.
+ Multiple independent scopes are allowed and automatically constructed.
+
+ For example, to keep track of the If node that encloses any Name node, one can
+ write:
+
+ class FooType(object):
+
+ def __init__(self):
+ self.foo_property = None
+
+ class DummyTransformer(Base):
+
+ def visit_If(self, node):
+ self.state[FooType].enter()
+ self.state[FooType].foo_property = node
+
+ def visit_Name(self, node):
+ self.state[FooType].foo_property # will hold the innermost enclosing if
"""
# TODO(mdan): Document all extra features.
@@ -92,6 +210,12 @@ class Base(gast.NodeTransformer):
self._local_scope_state = []
self.enter_local_scope()
+ # Allows scoping of local variables to keep state across calls to visit_*
+ # methods. Multiple scope hierchies may exist and are keyed by tag. A scope
+ # is valid at one or more nodes and all its children. Scopes created in
+ # child nodes supersede their parent. Scopes are isolated from one another.
+ self.state = _State()
+
@property
def enclosing_entities(self):
return tuple(self._enclosing_entities)
@@ -101,7 +225,9 @@ class Base(gast.NodeTransformer):
return len(self._local_scope_state)
def enter_local_scope(self, inherit=None):
- """Marks entry into a new local scope.
+ """Deprecated. Use self.state instead.
+
+ Marks entry into a new local scope.
Args:
inherit: Optional enumerable of variable names to copy from the
@@ -116,7 +242,9 @@ class Base(gast.NodeTransformer):
self._local_scope_state.append(scope_entered)
def exit_local_scope(self, keep=None):
- """Marks exit from the current local scope.
+ """Deprecated. Use self.state instead.
+
+ Marks exit from the current local scope.
Args:
keep: Optional enumerable of variable names to copy into the
@@ -133,9 +261,11 @@ class Base(gast.NodeTransformer):
return scope_left
def set_local(self, name, value):
+ """Deprecated. Use self.state instead."""
self._local_scope_state[-1][name] = value
def get_local(self, name, default=None):
+ """Deprecated. Use self.state instead."""
return self._local_scope_state[-1].get(name, default)
def debug_print(self, node):
@@ -216,7 +346,7 @@ class Base(gast.NodeTransformer):
node_destination = new_destination
return results
- # TODO(mdan): Once we have error tracing, we may be able to just go to SSA.
+ # TODO(mdan): Remove.
def apply_to_single_assignments(self, targets, values, apply_fn):
"""Applies a function to each individual assignment.
@@ -266,7 +396,8 @@ class Base(gast.NodeTransformer):
def _get_source(self, node):
try:
- return compiler.ast_to_source(node)
+ source, _ = compiler.ast_to_source(node)
+ return source
except AssertionError:
return '<could not convert AST to source>'
diff --git a/tensorflow/contrib/autograph/pyct/transformer_test.py b/tensorflow/contrib/autograph/pyct/transformer_test.py
index baf04653ae..19b80b09ac 100644
--- a/tensorflow/contrib/autograph/pyct/transformer_test.py
+++ b/tensorflow/contrib/autograph/pyct/transformer_test.py
@@ -93,6 +93,83 @@ class TransformerTest(test.TestCase):
inner_function, lambda_node),
anno.getanno(lambda_expr, 'enclosing_entities'))
+ def assertSameAnno(self, first, second, key):
+ self.assertIs(anno.getanno(first, key), anno.getanno(second, key))
+
+ def assertDifferentAnno(self, first, second, key):
+ self.assertIsNot(anno.getanno(first, key), anno.getanno(second, key))
+
+ def test_state_tracking(self):
+
+ class LoopState(object):
+ pass
+
+ class CondState(object):
+ pass
+
+ class TestTransformer(transformer.Base):
+
+ def visit(self, node):
+ anno.setanno(node, 'loop_state', self.state[LoopState].value)
+ anno.setanno(node, 'cond_state', self.state[CondState].value)
+ return super(TestTransformer, self).visit(node)
+
+ def visit_While(self, node):
+ self.state[LoopState].enter()
+ node = self.generic_visit(node)
+ self.state[LoopState].exit()
+ return node
+
+ def visit_If(self, node):
+ self.state[CondState].enter()
+ node = self.generic_visit(node)
+ self.state[CondState].exit()
+ return node
+
+ tr = TestTransformer(self._simple_source_info())
+
+ def test_function(a):
+ a = 1
+ while a:
+ _ = 'a'
+ if a > 2:
+ _ = 'b'
+ while True:
+ raise '1'
+ if a > 3:
+ _ = 'c'
+ while True:
+ raise '1'
+
+ node, _ = parser.parse_entity(test_function)
+ node = tr.visit(node)
+
+ fn_body = node.body[0].body
+ outer_while_body = fn_body[1].body
+ self.assertSameAnno(fn_body[0], outer_while_body[0], 'cond_state')
+ self.assertDifferentAnno(fn_body[0], outer_while_body[0], 'loop_state')
+
+ first_if_body = outer_while_body[1].body
+ self.assertDifferentAnno(outer_while_body[0], first_if_body[0],
+ 'cond_state')
+ self.assertSameAnno(outer_while_body[0], first_if_body[0], 'loop_state')
+
+ first_inner_while_body = first_if_body[1].body
+ self.assertSameAnno(first_if_body[0], first_inner_while_body[0],
+ 'cond_state')
+ self.assertDifferentAnno(first_if_body[0], first_inner_while_body[0],
+ 'loop_state')
+
+ second_if_body = outer_while_body[2].body
+ self.assertDifferentAnno(first_if_body[0], second_if_body[0], 'cond_state')
+ self.assertSameAnno(first_if_body[0], second_if_body[0], 'loop_state')
+
+ second_inner_while_body = second_if_body[1].body
+ self.assertDifferentAnno(first_inner_while_body[0],
+ second_inner_while_body[0], 'cond_state')
+ self.assertDifferentAnno(first_inner_while_body[0],
+ second_inner_while_body[0], 'loop_state')
+
def test_local_scope_info_stack(self):
class TestTransformer(transformer.Base):
diff --git a/tensorflow/contrib/autograph/utils/BUILD b/tensorflow/contrib/autograph/utils/BUILD
index d82c17bf2a..d2b399f19b 100644
--- a/tensorflow/contrib/autograph/utils/BUILD
+++ b/tensorflow/contrib/autograph/utils/BUILD
@@ -28,7 +28,6 @@ py_library(
"tensor_list.py",
"testing.py",
"type_check.py",
- "type_hints.py",
],
srcs_version = "PY2AND3",
visibility = ["//tensorflow:__subpackages__"],
diff --git a/tensorflow/contrib/autograph/utils/__init__.py b/tensorflow/contrib/autograph/utils/__init__.py
index 817d4126d1..57b5f74741 100644
--- a/tensorflow/contrib/autograph/utils/__init__.py
+++ b/tensorflow/contrib/autograph/utils/__init__.py
@@ -30,4 +30,3 @@ from tensorflow.contrib.autograph.utils.py_func import wrap_py_func
from tensorflow.contrib.autograph.utils.tensor_list import dynamic_list_append
from tensorflow.contrib.autograph.utils.testing import fake_tf
from tensorflow.contrib.autograph.utils.type_check import is_tensor
-from tensorflow.contrib.autograph.utils.type_hints import set_element_type
diff --git a/tensorflow/contrib/batching/python/ops/batch_ops.py b/tensorflow/contrib/batching/python/ops/batch_ops.py
index 47b80bdf4a..55faad983f 100644
--- a/tensorflow/contrib/batching/python/ops/batch_ops.py
+++ b/tensorflow/contrib/batching/python/ops/batch_ops.py
@@ -58,8 +58,6 @@ def batch_function(num_batch_threads,
max_batch_size,
batch_timeout_micros,
allowed_batch_sizes=None,
- grad_timeout_micros=60 * 1000 * 1000,
- unbatch_timeout_micros=60 * 1000 * 1000,
max_enqueued_batches=10):
"""Batches the computation done by the decorated function.
@@ -94,10 +92,6 @@ def batch_function(num_batch_threads,
does nothing. Otherwise, supplies a list of batch sizes, causing the op
to pad batches up to one of those sizes. The entries must increase
monotonically, and the final entry must equal max_batch_size.
- grad_timeout_micros: The timeout to use for the gradient. See the
- documentation of the unbatch op for more details. Defaults to 60s.
- unbatch_timeout_micros: The timeout to use for unbatching. See the
- documentation of the unbatch op for more details. Defaults to 60s.
max_enqueued_batches: The maximum depth of the batch queue. Defaults to 10.
Returns:
diff --git a/tensorflow/contrib/bayesflow/python/ops/monte_carlo_impl.py b/tensorflow/contrib/bayesflow/python/ops/monte_carlo_impl.py
index 032b859d46..68ead2f760 100644
--- a/tensorflow/contrib/bayesflow/python/ops/monte_carlo_impl.py
+++ b/tensorflow/contrib/bayesflow/python/ops/monte_carlo_impl.py
@@ -192,7 +192,7 @@ def _logspace_mean(log_values):
def expectation(f, samples, log_prob=None, use_reparametrization=True,
axis=0, keep_dims=False, name=None):
- """Computes the Monte-Carlo approximation of \\(E_p[f(X)]\\).
+ r"""Computes the Monte-Carlo approximation of \\(E_p[f(X)]\\).
This function computes the Monte-Carlo approximation of an expectation, i.e.,
diff --git a/tensorflow/contrib/bigtable/BUILD b/tensorflow/contrib/bigtable/BUILD
new file mode 100644
index 0000000000..71538e0770
--- /dev/null
+++ b/tensorflow/contrib/bigtable/BUILD
@@ -0,0 +1,213 @@
+# Cloud Bigtable client for TensorFlow
+
+package(
+ default_visibility = ["//tensorflow:internal"],
+)
+
+licenses(["notice"]) # Apache 2.0
+
+load("//tensorflow:tensorflow.bzl", "tf_custom_op_py_library")
+load(
+ "//tensorflow:tensorflow.bzl",
+ "tf_copts",
+ "tf_custom_op_library",
+ "tf_gen_op_libs",
+ "tf_gen_op_wrapper_py",
+ "tf_kernel_library",
+ "tf_cc_test",
+ "tf_py_test",
+)
+
+tf_custom_op_py_library(
+ name = "bigtable",
+ srcs = ["__init__.py"] + glob(["python/ops/*.py"]),
+ dso = [
+ ":python/ops/_bigtable.so",
+ ],
+ kernels = [
+ ":bigtable_kernels",
+ ":bigtable_ops_op_lib",
+ ],
+ srcs_version = "PY2AND3",
+ deps = [
+ ":bigtable_ops",
+ "//tensorflow/contrib/data/python/ops:interleave_ops",
+ "//tensorflow/contrib/util:util_py",
+ "//tensorflow/python:framework_for_generated_wrappers",
+ "//tensorflow/python:platform",
+ "//tensorflow/python:util",
+ "//tensorflow/python/data",
+ ],
+)
+
+KERNEL_FILES = [
+ "kernels/bigtable_kernels.cc",
+ "kernels/bigtable_lookup_dataset_op.cc",
+ "kernels/bigtable_prefix_key_dataset_op.cc",
+ "kernels/bigtable_range_key_dataset_op.cc",
+ "kernels/bigtable_sample_keys_dataset_op.cc",
+ "kernels/bigtable_sample_key_pairs_dataset_op.cc",
+ "kernels/bigtable_scan_dataset_op.cc",
+]
+
+tf_custom_op_library(
+ name = "python/ops/_bigtable.so",
+ srcs = KERNEL_FILES + [
+ "ops/bigtable_ops.cc",
+ ],
+ deps = [
+ ":bigtable_lib_cc",
+ ":bigtable_range_helpers",
+ "@com_github_googlecloudplatform_google_cloud_cpp//google/cloud/bigtable:bigtable_client",
+ ],
+)
+
+tf_gen_op_wrapper_py(
+ name = "bigtable_ops",
+ deps = [":bigtable_ops_op_lib"],
+)
+
+tf_gen_op_libs(
+ op_lib_names = [
+ "bigtable_ops",
+ "bigtable_test_ops",
+ ],
+)
+
+tf_kernel_library(
+ name = "bigtable_kernels",
+ srcs = KERNEL_FILES,
+ deps = [
+ ":bigtable_lib_cc",
+ ":bigtable_range_helpers",
+ "//tensorflow/core:framework_headers_lib",
+ "//third_party/eigen3",
+ "@com_github_googlecloudplatform_google_cloud_cpp//google/cloud/bigtable:bigtable_client",
+ ],
+)
+
+# A library for use in the bigtable kernels.
+cc_library(
+ name = "bigtable_lib_cc",
+ srcs = ["kernels/bigtable_lib.cc"],
+ hdrs = ["kernels/bigtable_lib.h"],
+ deps = [
+ "//tensorflow/core:framework_headers_lib",
+ "//third_party/eigen3",
+ "@com_github_googlecloudplatform_google_cloud_cpp//google/cloud/bigtable:bigtable_client",
+ ],
+)
+
+cc_library(
+ name = "bigtable_range_helpers",
+ srcs = ["kernels/bigtable_range_helpers.cc"],
+ hdrs = ["kernels/bigtable_range_helpers.h"],
+ deps = [
+ "//tensorflow/core:framework_headers_lib",
+ ],
+)
+
+cc_library(
+ name = "bigtable_test_client",
+ srcs = ["kernels/test_kernels/bigtable_test_client.cc"],
+ hdrs = ["kernels/test_kernels/bigtable_test_client.h"],
+ deps = [
+ "//tensorflow/core:framework_headers_lib",
+ "@com_github_googleapis_googleapis//:bigtable_protos",
+ "@com_github_googlecloudplatform_google_cloud_cpp//google/cloud/bigtable:bigtable_client",
+ "@com_googlesource_code_re2//:re2",
+ ],
+)
+
+tf_cc_test(
+ name = "bigtable_test_client_test",
+ srcs = ["kernels/test_kernels/bigtable_test_client_test.cc"],
+ tags = ["manual"],
+ deps = [
+ ":bigtable_test_client",
+ "//tensorflow/core:test",
+ "//tensorflow/core:test_main",
+ "@com_github_googlecloudplatform_google_cloud_cpp//google/cloud/bigtable:bigtable_client",
+ ],
+)
+
+tf_cc_test(
+ name = "bigtable_range_helpers_test",
+ size = "small",
+ srcs = ["kernels/bigtable_range_helpers_test.cc"],
+ deps = [
+ ":bigtable_range_helpers",
+ "//tensorflow/core:test",
+ "//tensorflow/core:test_main",
+ ],
+)
+
+tf_gen_op_wrapper_py(
+ name = "bigtable_test_ops",
+ deps = [":bigtable_test_ops_op_lib"],
+)
+
+tf_custom_op_library(
+ name = "python/kernel_tests/_bigtable_test.so",
+ srcs = [
+ "kernels/test_kernels/bigtable_test_client_op.cc",
+ "ops/bigtable_test_ops.cc",
+ ],
+ deps = [
+ ":bigtable_lib_cc",
+ ":bigtable_test_client",
+ "@com_googlesource_code_re2//:re2",
+ ],
+)
+
+# Don't use tf_kernel_library because it prevents access to strings/stringprintf.h
+cc_library(
+ name = "bigtable_test_kernels",
+ srcs = [
+ "kernels/test_kernels/bigtable_test_client_op.cc",
+ ],
+ copts = tf_copts(),
+ linkstatic = 1,
+ deps = [
+ ":bigtable_lib_cc",
+ ":bigtable_test_client",
+ "//tensorflow/core:framework_headers_lib",
+ "//third_party/eigen3",
+ "@com_googlesource_code_re2//:re2",
+ ],
+ alwayslink = 1,
+)
+
+tf_custom_op_py_library(
+ name = "bigtable_test_py",
+ dso = [
+ ":python/kernel_tests/_bigtable_test.so",
+ ],
+ kernels = [
+ ":bigtable_test_kernels",
+ ":bigtable_test_ops_op_lib",
+ ],
+ srcs_version = "PY2AND3",
+ deps = [
+ ":bigtable_test_ops",
+ ],
+)
+
+tf_py_test(
+ name = "bigtable_ops_test",
+ size = "small",
+ srcs = ["python/kernel_tests/bigtable_ops_test.py"],
+ additional_deps = [
+ ":bigtable",
+ ":bigtable_test_py",
+ "//tensorflow/core:protos_all_py",
+ "//tensorflow/contrib/util:util_py",
+ "//tensorflow/python:array_ops",
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:dtypes",
+ "//tensorflow/python:framework_for_generated_wrappers",
+ "//tensorflow/python:platform",
+ "//tensorflow/python:util",
+ ],
+ tags = ["manual"],
+)
diff --git a/tensorflow/contrib/bigtable/README.md b/tensorflow/contrib/bigtable/README.md
new file mode 100644
index 0000000000..ef3c60069e
--- /dev/null
+++ b/tensorflow/contrib/bigtable/README.md
@@ -0,0 +1,10 @@
+# Bigtable #
+
+[Google Cloud Bigtable](https://cloud.google.com/bigtable/) is a high
+performance storage system that can store and serve training data. This contrib
+package contains an experimental integration with TensorFlow.
+
+> **Status: Highly experimental.** The current implementation is very much in
+> flux. Please use at your own risk! :-)
+
+<!-- TODO(saeta): Document usage / methods / etc. -->
diff --git a/tensorflow/contrib/proto/python/kernel_tests/test_case.py b/tensorflow/contrib/bigtable/__init__.py
index b95202c5df..7df054637c 100644
--- a/tensorflow/contrib/proto/python/kernel_tests/test_case.py
+++ b/tensorflow/contrib/bigtable/__init__.py
@@ -1,4 +1,3 @@
-# =============================================================================
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -12,24 +11,29 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-# =============================================================================
-"""Test case base for testing proto operations."""
+# ==============================================================================
+"""Cloud Bigtable Client for TensorFlow.
+
+This contrib package allows TensorFlow to interface directly with Cloud Bigtable
+for high-speed data loading.
+
+@@BigtableClient
+@@BigTable
+
+"""
-# Python3 preparedness imports.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
-import ctypes as ct
-import os
-
-from tensorflow.python.platform import test
+from tensorflow.contrib.bigtable.python.ops.bigtable_api import BigTable
+from tensorflow.contrib.bigtable.python.ops.bigtable_api import BigtableClient
+from tensorflow.python.util.all_util import remove_undocumented
-class ProtoOpTestCase(test.TestCase):
+_allowed_symbols = [
+ 'BigTable',
+ 'BigtableClient',
+]
- def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
- super(ProtoOpTestCase, self).__init__(methodName)
- lib = os.path.join(os.path.dirname(__file__), 'libtestexample.so')
- if os.path.isfile(lib):
- ct.cdll.LoadLibrary(lib)
+remove_undocumented(__name__, _allowed_symbols)
diff --git a/tensorflow/contrib/bigtable/kernels/bigtable_kernels.cc b/tensorflow/contrib/bigtable/kernels/bigtable_kernels.cc
new file mode 100644
index 0000000000..70923e6287
--- /dev/null
+++ b/tensorflow/contrib/bigtable/kernels/bigtable_kernels.cc
@@ -0,0 +1,355 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#include "tensorflow/contrib/bigtable/kernels/bigtable_lib.h"
+
+#include "tensorflow/core/framework/op_kernel.h"
+#include "tensorflow/core/lib/core/threadpool.h"
+
+namespace tensorflow {
+
+namespace {
+
+class BigtableClientOp : public OpKernel {
+ public:
+ explicit BigtableClientOp(OpKernelConstruction* ctx) : OpKernel(ctx) {
+ OP_REQUIRES_OK(ctx, ctx->GetAttr("project_id", &project_id_));
+ OP_REQUIRES_OK(ctx, ctx->GetAttr("instance_id", &instance_id_));
+ OP_REQUIRES(ctx, !project_id_.empty(),
+ errors::InvalidArgument("project_id must be non-empty"));
+ OP_REQUIRES(ctx, !instance_id_.empty(),
+ errors::InvalidArgument("instance_id must be non-empty"));
+
+ OP_REQUIRES_OK(
+ ctx, ctx->GetAttr("connection_pool_size", &connection_pool_size_));
+ // If left unset by the client code, set it to a default of 100. Note: the
+ // cloud-cpp default of 4 concurrent connections is far too low for high
+ // performance streaming.
+ if (connection_pool_size_ == -1) {
+ connection_pool_size_ = 100;
+ }
+
+ OP_REQUIRES_OK(ctx, ctx->GetAttr("max_receive_message_size",
+ &max_receive_message_size_));
+ // If left unset by the client code, set it to a default of 100. Note: the
+ // cloud-cpp default of 4 concurrent connections is far too low for high
+ // performance streaming.
+ if (max_receive_message_size_ == -1) {
+ max_receive_message_size_ = 1 << 24; // 16 MBytes
+ }
+ OP_REQUIRES(ctx, max_receive_message_size_ > 0,
+ errors::InvalidArgument("connection_pool_size must be > 0"));
+ }
+
+ ~BigtableClientOp() override {
+ if (cinfo_.resource_is_private_to_kernel()) {
+ if (!cinfo_.resource_manager()
+ ->Delete<BigtableClientResource>(cinfo_.container(),
+ cinfo_.name())
+ .ok()) {
+ // Do nothing; the resource can have been deleted by session resets.
+ }
+ }
+ }
+
+ void Compute(OpKernelContext* ctx) override LOCKS_EXCLUDED(mu_) {
+ mutex_lock l(mu_);
+ if (!initialized_) {
+ ResourceMgr* mgr = ctx->resource_manager();
+ OP_REQUIRES_OK(ctx, cinfo_.Init(mgr, def()));
+ BigtableClientResource* resource;
+ OP_REQUIRES_OK(
+ ctx,
+ mgr->LookupOrCreate<BigtableClientResource>(
+ cinfo_.container(), cinfo_.name(), &resource,
+ [this, ctx](
+ BigtableClientResource** ret) EXCLUSIVE_LOCKS_REQUIRED(mu_) {
+ auto client_options =
+ google::cloud::bigtable::ClientOptions()
+ .set_connection_pool_size(connection_pool_size_)
+ .set_data_endpoint("batch-bigtable.googleapis.com");
+ auto channel_args = client_options.channel_arguments();
+ channel_args.SetMaxReceiveMessageSize(
+ max_receive_message_size_);
+ channel_args.SetUserAgentPrefix("tensorflow");
+ client_options.set_channel_arguments(channel_args);
+ std::shared_ptr<google::cloud::bigtable::DataClient> client =
+ google::cloud::bigtable::CreateDefaultDataClient(
+ project_id_, instance_id_, std::move(client_options));
+ *ret = new BigtableClientResource(project_id_, instance_id_,
+ std::move(client));
+ return Status::OK();
+ }));
+ core::ScopedUnref resource_cleanup(resource);
+ initialized_ = true;
+ }
+ OP_REQUIRES_OK(ctx, MakeResourceHandleToOutput(
+ ctx, 0, cinfo_.container(), cinfo_.name(),
+ MakeTypeIndex<BigtableClientResource>()));
+ }
+
+ private:
+ string project_id_;
+ string instance_id_;
+ int64 connection_pool_size_;
+ int32 max_receive_message_size_;
+
+ mutex mu_;
+ ContainerInfo cinfo_ GUARDED_BY(mu_);
+ bool initialized_ GUARDED_BY(mu_) = false;
+};
+
+REGISTER_KERNEL_BUILDER(Name("BigtableClient").Device(DEVICE_CPU),
+ BigtableClientOp);
+
+class BigtableTableOp : public OpKernel {
+ public:
+ explicit BigtableTableOp(OpKernelConstruction* ctx) : OpKernel(ctx) {
+ OP_REQUIRES_OK(ctx, ctx->GetAttr("table_name", &table_));
+ OP_REQUIRES(ctx, !table_.empty(),
+ errors::InvalidArgument("table_name must be non-empty"));
+ }
+
+ ~BigtableTableOp() override {
+ if (cinfo_.resource_is_private_to_kernel()) {
+ if (!cinfo_.resource_manager()
+ ->Delete<BigtableTableResource>(cinfo_.container(),
+ cinfo_.name())
+ .ok()) {
+ // Do nothing; the resource can have been deleted by session resets.
+ }
+ }
+ }
+
+ void Compute(OpKernelContext* ctx) override LOCKS_EXCLUDED(mu_) {
+ mutex_lock l(mu_);
+ if (!initialized_) {
+ ResourceMgr* mgr = ctx->resource_manager();
+ OP_REQUIRES_OK(ctx, cinfo_.Init(mgr, def()));
+
+ BigtableClientResource* client_resource;
+ OP_REQUIRES_OK(
+ ctx, LookupResource(ctx, HandleFromInput(ctx, 0), &client_resource));
+ core::ScopedUnref unref_client(client_resource);
+
+ BigtableTableResource* resource;
+ OP_REQUIRES_OK(
+ ctx, mgr->LookupOrCreate<BigtableTableResource>(
+ cinfo_.container(), cinfo_.name(), &resource,
+ [this, client_resource](BigtableTableResource** ret) {
+ *ret = new BigtableTableResource(client_resource, table_);
+ return Status::OK();
+ }));
+ initialized_ = true;
+ }
+ OP_REQUIRES_OK(ctx, MakeResourceHandleToOutput(
+ ctx, 0, cinfo_.container(), cinfo_.name(),
+ MakeTypeIndex<BigtableTableResource>()));
+ }
+
+ private:
+ string table_; // Note: this is const after construction.
+
+ mutex mu_;
+ ContainerInfo cinfo_ GUARDED_BY(mu_);
+ bool initialized_ GUARDED_BY(mu_) = false;
+};
+
+REGISTER_KERNEL_BUILDER(Name("BigtableTable").Device(DEVICE_CPU),
+ BigtableTableOp);
+
+class ToBigtableOp : public AsyncOpKernel {
+ public:
+ explicit ToBigtableOp(OpKernelConstruction* ctx)
+ : AsyncOpKernel(ctx),
+ thread_pool_(new thread::ThreadPool(
+ ctx->env(), ThreadOptions(),
+ strings::StrCat("to_bigtable_op_", SanitizeThreadSuffix(name())),
+ /* num_threads = */ 1, /* low_latency_hint = */ false)) {}
+
+ void ComputeAsync(OpKernelContext* ctx, DoneCallback done) override {
+ // The call to `iterator->GetNext()` may block and depend on an
+ // inter-op thread pool thread, so we issue the call from the
+ // owned thread pool.
+ thread_pool_->Schedule([this, ctx, done]() {
+ const Tensor* column_families_tensor;
+ OP_REQUIRES_OK_ASYNC(
+ ctx, ctx->input("column_families", &column_families_tensor), done);
+ OP_REQUIRES_ASYNC(
+ ctx, column_families_tensor->dims() == 1,
+ errors::InvalidArgument("`column_families` must be a vector."), done);
+
+ const Tensor* columns_tensor;
+ OP_REQUIRES_OK_ASYNC(ctx, ctx->input("columns", &columns_tensor), done);
+ OP_REQUIRES_ASYNC(ctx, columns_tensor->dims() == 1,
+ errors::InvalidArgument("`columns` must be a vector."),
+ done);
+ OP_REQUIRES_ASYNC(
+ ctx,
+ columns_tensor->NumElements() ==
+ column_families_tensor->NumElements(),
+ errors::InvalidArgument("len(column_families) != len(columns)"),
+ done);
+
+ std::vector<string> column_families;
+ column_families.reserve(column_families_tensor->NumElements());
+ std::vector<string> columns;
+ columns.reserve(column_families_tensor->NumElements());
+ for (uint64 i = 0; i < column_families_tensor->NumElements(); ++i) {
+ column_families.push_back(column_families_tensor->flat<string>()(i));
+ columns.push_back(columns_tensor->flat<string>()(i));
+ }
+
+ DatasetBase* dataset;
+ OP_REQUIRES_OK_ASYNC(
+ ctx, GetDatasetFromVariantTensor(ctx->input(1), &dataset), done);
+
+ IteratorContext iter_ctx = dataset::MakeIteratorContext(ctx);
+ std::unique_ptr<IteratorBase> iterator;
+ OP_REQUIRES_OK_ASYNC(
+ ctx,
+ dataset->MakeIterator(&iter_ctx, "ToBigtableOpIterator", &iterator),
+ done);
+
+ int64 timestamp_int;
+ OP_REQUIRES_OK_ASYNC(
+ ctx, ParseScalarArgument<int64>(ctx, "timestamp", &timestamp_int),
+ done);
+ OP_REQUIRES_ASYNC(ctx, timestamp_int >= -1,
+ errors::InvalidArgument("timestamp must be >= -1"),
+ done);
+
+ BigtableTableResource* resource;
+ OP_REQUIRES_OK_ASYNC(
+ ctx, LookupResource(ctx, HandleFromInput(ctx, 0), &resource), done);
+ core::ScopedUnref resource_cleanup(resource);
+
+ std::vector<Tensor> components;
+ components.reserve(dataset->output_dtypes().size());
+ bool end_of_sequence = false;
+ do {
+ ::google::cloud::bigtable::BulkMutation mutation;
+ // TODO(saeta): Make # of mutations configurable.
+ for (uint64 i = 0; i < 100 && !end_of_sequence; ++i) {
+ OP_REQUIRES_OK_ASYNC(
+ ctx, iterator->GetNext(&iter_ctx, &components, &end_of_sequence),
+ done);
+ if (!end_of_sequence) {
+ OP_REQUIRES_OK_ASYNC(
+ ctx,
+ CreateMutation(std::move(components), column_families, columns,
+ timestamp_int, &mutation),
+ done);
+ }
+ components.clear();
+ }
+ grpc::Status mutation_status;
+ std::vector<::google::cloud::bigtable::FailedMutation> failures =
+ resource->table().BulkApply(std::move(mutation), mutation_status);
+ if (!mutation_status.ok()) {
+ LOG(ERROR) << "Failure applying mutation: "
+ << mutation_status.error_code() << " - "
+ << mutation_status.error_message() << " ("
+ << mutation_status.error_details() << ").";
+ }
+ if (!failures.empty()) {
+ for (const auto& failure : failures) {
+ LOG(ERROR) << "Failure applying mutation on row ("
+ << failure.original_index()
+ << "): " << failure.mutation().row_key()
+ << " - error: " << failure.status().error_message()
+ << " (Details: " << failure.status().error_details()
+ << ").";
+ }
+ }
+ OP_REQUIRES_ASYNC(
+ ctx, failures.empty() && mutation_status.ok(),
+ errors::Unknown("Failure while writing to BigTable: ",
+ mutation_status.error_code(), " - ",
+ mutation_status.error_message(), " (",
+ mutation_status.error_details(),
+ "), # of mutation failures: ", failures.size(),
+ ". See the log for the specific error details."),
+ done);
+ } while (!end_of_sequence);
+ done();
+ });
+ }
+
+ private:
+ static string SanitizeThreadSuffix(string suffix) {
+ string clean;
+ for (int i = 0; i < suffix.size(); ++i) {
+ const char ch = suffix[i];
+ if ((ch >= 'a' && ch <= 'z') || (ch >= 'A' && ch <= 'Z') ||
+ (ch >= '0' && ch <= '9') || ch == '_' || ch == '-') {
+ clean += ch;
+ } else {
+ clean += '_';
+ }
+ }
+ return clean;
+ }
+
+ Status CreateMutation(
+ std::vector<Tensor> tensors, const std::vector<string>& column_families,
+ const std::vector<string>& columns, int64 timestamp_int,
+ ::google::cloud::bigtable::BulkMutation* bulk_mutation) {
+ if (tensors.size() != column_families.size() + 1) {
+ return errors::InvalidArgument(
+ "Iterator produced a set of Tensors shorter than expected");
+ }
+ ::google::cloud::bigtable::SingleRowMutation mutation(
+ std::move(tensors[0].scalar<string>()()));
+ std::chrono::milliseconds timestamp(timestamp_int);
+ for (size_t i = 1; i < tensors.size(); ++i) {
+ if (!TensorShapeUtils::IsScalar(tensors[i].shape())) {
+ return errors::Internal("Output tensor ", i, " was not a scalar");
+ }
+ if (timestamp_int == -1) {
+ mutation.emplace_back(::google::cloud::bigtable::SetCell(
+ column_families[i - 1], columns[i - 1],
+ std::move(tensors[i].scalar<string>()())));
+ } else {
+ mutation.emplace_back(::google::cloud::bigtable::SetCell(
+ column_families[i - 1], columns[i - 1], timestamp,
+ std::move(tensors[i].scalar<string>()())));
+ }
+ }
+ bulk_mutation->emplace_back(std::move(mutation));
+ return Status::OK();
+ }
+
+ template <typename T>
+ Status ParseScalarArgument(OpKernelContext* ctx,
+ const StringPiece& argument_name, T* output) {
+ const Tensor* argument_t;
+ TF_RETURN_IF_ERROR(ctx->input(argument_name, &argument_t));
+ if (!TensorShapeUtils::IsScalar(argument_t->shape())) {
+ return errors::InvalidArgument(argument_name, " must be a scalar");
+ }
+ *output = argument_t->scalar<T>()();
+ return Status::OK();
+ }
+
+ std::unique_ptr<thread::ThreadPool> thread_pool_;
+};
+
+REGISTER_KERNEL_BUILDER(Name("DatasetToBigtable").Device(DEVICE_CPU),
+ ToBigtableOp);
+
+} // namespace
+
+} // namespace tensorflow
diff --git a/tensorflow/contrib/bigtable/kernels/bigtable_lib.cc b/tensorflow/contrib/bigtable/kernels/bigtable_lib.cc
new file mode 100644
index 0000000000..2514575f30
--- /dev/null
+++ b/tensorflow/contrib/bigtable/kernels/bigtable_lib.cc
@@ -0,0 +1,45 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#include "tensorflow/contrib/bigtable/kernels/bigtable_lib.h"
+
+namespace tensorflow {
+
+Status GrpcStatusToTfStatus(const ::grpc::Status& status) {
+ if (status.ok()) {
+ return Status::OK();
+ }
+ auto grpc_code = status.error_code();
+ if (status.error_code() == ::grpc::StatusCode::ABORTED ||
+ status.error_code() == ::grpc::StatusCode::UNAVAILABLE ||
+ status.error_code() == ::grpc::StatusCode::OUT_OF_RANGE) {
+ grpc_code = ::grpc::StatusCode::INTERNAL;
+ }
+ return Status(
+ static_cast<::tensorflow::error::Code>(status.error_code()),
+ strings::StrCat("Error reading from BigTable: ", status.error_message(),
+ " (Details: ", status.error_details(), ")"));
+}
+
+string RegexFromStringSet(const std::vector<string>& strs) {
+ CHECK(!strs.empty()) << "The list of strings to turn into a regex was empty.";
+ std::unordered_set<string> uniq(strs.begin(), strs.end());
+ if (uniq.size() == 1) {
+ return *uniq.begin();
+ }
+ return str_util::Join(uniq, "|");
+}
+
+} // namespace tensorflow
diff --git a/tensorflow/contrib/bigtable/kernels/bigtable_lib.h b/tensorflow/contrib/bigtable/kernels/bigtable_lib.h
new file mode 100644
index 0000000000..a2a5df1037
--- /dev/null
+++ b/tensorflow/contrib/bigtable/kernels/bigtable_lib.h
@@ -0,0 +1,143 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#ifndef TENSORFLOW_CONTRIB_BIGTABLE_KERNELS_BIGTABLE_LIB_H_
+#define TENSORFLOW_CONTRIB_BIGTABLE_KERNELS_BIGTABLE_LIB_H_
+
+// Note: we use bigtable/client/internal/table.h as this is the no-exception API
+
+#include "google/cloud/bigtable/data_client.h"
+#include "google/cloud/bigtable/internal/table.h"
+#include "tensorflow/core/framework/dataset.h"
+#include "tensorflow/core/framework/resource_mgr.h"
+
+namespace tensorflow {
+
+Status GrpcStatusToTfStatus(const ::grpc::Status& status);
+
+string RegexFromStringSet(const std::vector<string>& strs);
+
+class BigtableClientResource : public ResourceBase {
+ public:
+ BigtableClientResource(
+ string project_id, string instance_id,
+ std::shared_ptr<google::cloud::bigtable::DataClient> client)
+ : project_id_(std::move(project_id)),
+ instance_id_(std::move(instance_id)),
+ client_(std::move(client)) {}
+
+ std::shared_ptr<google::cloud::bigtable::DataClient> get_client() {
+ return client_;
+ }
+
+ string DebugString() override {
+ return strings::StrCat("BigtableClientResource(project_id: ", project_id_,
+ ", instance_id: ", instance_id_, ")");
+ }
+
+ private:
+ const string project_id_;
+ const string instance_id_;
+ std::shared_ptr<google::cloud::bigtable::DataClient> client_;
+};
+
+class BigtableTableResource : public ResourceBase {
+ public:
+ BigtableTableResource(BigtableClientResource* client, string table_name)
+ : client_(client),
+ table_name_(std::move(table_name)),
+ table_(client->get_client(), table_name_,
+ google::cloud::bigtable::AlwaysRetryMutationPolicy()) {
+ client_->Ref();
+ }
+
+ ~BigtableTableResource() override { client_->Unref(); }
+
+ ::google::cloud::bigtable::noex::Table& table() { return table_; }
+
+ string DebugString() override {
+ return strings::StrCat(
+ "BigtableTableResource(client: ", client_->DebugString(),
+ ", table: ", table_name_, ")");
+ }
+
+ private:
+ BigtableClientResource* client_; // Ownes one ref.
+ const string table_name_;
+ ::google::cloud::bigtable::noex::Table table_;
+};
+
+// BigtableReaderDatasetIterator is an abstract class for iterators from
+// datasets that are "readers" (source datasets, not transformation datasets)
+// that read from Bigtable.
+template <typename Dataset>
+class BigtableReaderDatasetIterator : public DatasetIterator<Dataset> {
+ public:
+ explicit BigtableReaderDatasetIterator(
+ const typename DatasetIterator<Dataset>::Params& params)
+ : DatasetIterator<Dataset>(params), iterator_(nullptr, false) {}
+
+ Status GetNextInternal(IteratorContext* ctx, std::vector<Tensor>* out_tensors,
+ bool* end_of_sequence) override {
+ mutex_lock l(mu_);
+ TF_RETURN_IF_ERROR(EnsureIteratorInitialized());
+ if (iterator_ == reader_->end()) {
+ grpc::Status status = reader_->Finish();
+ if (status.ok()) {
+ *end_of_sequence = true;
+ return Status::OK();
+ }
+ return GrpcStatusToTfStatus(status);
+ }
+ *end_of_sequence = false;
+ google::cloud::bigtable::Row& row = *iterator_;
+ Status s = ParseRow(ctx, row, out_tensors);
+ // Ensure we always advance.
+ ++iterator_;
+ return s;
+ }
+
+ protected:
+ virtual ::google::cloud::bigtable::RowRange MakeRowRange() = 0;
+ virtual ::google::cloud::bigtable::Filter MakeFilter() = 0;
+ virtual Status ParseRow(IteratorContext* ctx,
+ const ::google::cloud::bigtable::Row& row,
+ std::vector<Tensor>* out_tensors) = 0;
+
+ private:
+ Status EnsureIteratorInitialized() EXCLUSIVE_LOCKS_REQUIRED(mu_) {
+ if (reader_) {
+ return Status::OK();
+ }
+
+ auto rows = MakeRowRange();
+ auto filter = MakeFilter();
+
+ // Note: the this in `this->dataset()` below is necessary due to namespace
+ // name conflicts.
+ reader_.reset(new ::google::cloud::bigtable::RowReader(
+ this->dataset()->table()->table().ReadRows(rows, filter)));
+ iterator_ = reader_->begin();
+ return Status::OK();
+ }
+
+ mutex mu_;
+ std::unique_ptr<::google::cloud::bigtable::RowReader> reader_ GUARDED_BY(mu_);
+ ::google::cloud::bigtable::RowReader::iterator iterator_ GUARDED_BY(mu_);
+};
+
+} // namespace tensorflow
+
+#endif // TENSORFLOW_CONTRIB_BIGTABLE_KERNELS_BIGTABLE_LIB_H_
diff --git a/tensorflow/contrib/bigtable/kernels/bigtable_lookup_dataset_op.cc b/tensorflow/contrib/bigtable/kernels/bigtable_lookup_dataset_op.cc
new file mode 100644
index 0000000000..9e49fa35db
--- /dev/null
+++ b/tensorflow/contrib/bigtable/kernels/bigtable_lookup_dataset_op.cc
@@ -0,0 +1,221 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#include "tensorflow/contrib/bigtable/kernels/bigtable_lib.h"
+#include "tensorflow/core/framework/op_kernel.h"
+
+namespace tensorflow {
+namespace {
+
+class BigtableLookupDatasetOp : public UnaryDatasetOpKernel {
+ public:
+ using UnaryDatasetOpKernel::UnaryDatasetOpKernel;
+
+ void MakeDataset(OpKernelContext* ctx, DatasetBase* input,
+ DatasetBase** output) override {
+ BigtableTableResource* table;
+ OP_REQUIRES_OK(ctx, LookupResource(ctx, HandleFromInput(ctx, 1), &table));
+
+ std::vector<string> column_families;
+ std::vector<string> columns;
+ OP_REQUIRES_OK(ctx, ParseVectorArgument<string>(ctx, "column_families",
+ &column_families));
+ OP_REQUIRES_OK(ctx, ParseVectorArgument<string>(ctx, "columns", &columns));
+ OP_REQUIRES(
+ ctx, column_families.size() == columns.size(),
+ errors::InvalidArgument("len(columns) != len(column_families)"));
+
+ const uint64 num_outputs = columns.size() + 1;
+ std::vector<PartialTensorShape> output_shapes;
+ output_shapes.reserve(num_outputs);
+ DataTypeVector output_types;
+ output_types.reserve(num_outputs);
+ for (uint64 i = 0; i < num_outputs; ++i) {
+ output_shapes.push_back({});
+ output_types.push_back(DT_STRING);
+ }
+
+ *output =
+ new Dataset(ctx, input, table, std::move(column_families),
+ std::move(columns), output_types, std::move(output_shapes));
+ }
+
+ private:
+ class Dataset : public GraphDatasetBase {
+ public:
+ explicit Dataset(OpKernelContext* ctx, const DatasetBase* input,
+ BigtableTableResource* table,
+ std::vector<string> column_families,
+ std::vector<string> columns,
+ const DataTypeVector& output_types,
+ std::vector<PartialTensorShape> output_shapes)
+ : GraphDatasetBase(ctx),
+ input_(input),
+ table_(table),
+ column_families_(std::move(column_families)),
+ columns_(std::move(columns)),
+ output_types_(output_types),
+ output_shapes_(std::move(output_shapes)),
+ filter_(MakeFilter(column_families_, columns_)) {
+ table_->Ref();
+ input_->Ref();
+ }
+
+ ~Dataset() override {
+ table_->Unref();
+ input_->Unref();
+ }
+
+ std::unique_ptr<IteratorBase> MakeIteratorInternal(
+ const string& prefix) const override {
+ return std::unique_ptr<IteratorBase>(new Iterator(
+ {this, strings::StrCat(prefix, "::BigtableLookupDataset")}));
+ }
+
+ const DataTypeVector& output_dtypes() const override {
+ return output_types_;
+ }
+
+ const std::vector<PartialTensorShape>& output_shapes() const override {
+ return output_shapes_;
+ }
+
+ string DebugString() const override {
+ return "BigtableLookupDatasetOp::Dataset";
+ }
+
+ private:
+ static ::google::cloud::bigtable::Filter MakeFilter(
+ const std::vector<string>& column_families,
+ const std::vector<string>& columns) {
+ string column_family_regex = RegexFromStringSet(column_families);
+ string column_regex = RegexFromStringSet(columns);
+
+ return ::google::cloud::bigtable::Filter::Chain(
+ ::google::cloud::bigtable::Filter::Latest(1),
+ ::google::cloud::bigtable::Filter::FamilyRegex(column_family_regex),
+ ::google::cloud::bigtable::Filter::ColumnRegex(column_regex));
+ }
+
+ class Iterator : public DatasetIterator<Dataset> {
+ public:
+ explicit Iterator(const Params& params)
+ : DatasetIterator<Dataset>(params) {}
+
+ Status Initialize(IteratorContext* ctx) override {
+ return dataset()->input_->MakeIterator(ctx, prefix(), &input_impl_);
+ }
+
+ Status GetNextInternal(IteratorContext* ctx,
+ std::vector<Tensor>* out_tensors,
+ bool* end_of_sequence) override {
+ mutex_lock l(mu_); // Sequence requests.
+ std::vector<Tensor> input_tensors;
+ TF_RETURN_IF_ERROR(
+ input_impl_->GetNext(ctx, &input_tensors, end_of_sequence));
+ if (*end_of_sequence) {
+ return Status::OK();
+ }
+ if (input_tensors.size() != 1) {
+ return errors::InvalidArgument(
+ "Upstream iterator (", dataset()->input_->DebugString(),
+ ") did not produce a single `tf.string` `tf.Tensor`. It "
+ "produced ",
+ input_tensors.size(), " tensors.");
+ }
+ if (input_tensors[0].NumElements() == 0) {
+ return errors::InvalidArgument("Upstream iterator (",
+ dataset()->input_->DebugString(),
+ ") return an empty set of keys.");
+ }
+ if (input_tensors[0].NumElements() == 1) {
+ // Single key lookup.
+ ::grpc::Status status;
+ auto pair = dataset()->table_->table().ReadRow(
+ input_tensors[0].scalar<string>()(), dataset()->filter_, status);
+ if (!status.ok()) {
+ return GrpcStatusToTfStatus(status);
+ }
+ if (!pair.first) {
+ return errors::DataLoss("Row key '",
+ input_tensors[0].scalar<string>()(),
+ "' not found.");
+ }
+ TF_RETURN_IF_ERROR(ParseRow(ctx, pair.second, out_tensors));
+ } else {
+ // Batched get.
+ return errors::Unimplemented(
+ "BigtableLookupDataset doesn't yet support batched retrieval.");
+ }
+ return Status::OK();
+ }
+
+ private:
+ Status ParseRow(IteratorContext* ctx,
+ const ::google::cloud::bigtable::Row& row,
+ std::vector<Tensor>* out_tensors) {
+ out_tensors->reserve(dataset()->columns_.size() + 1);
+ Tensor row_key_tensor(ctx->allocator({}), DT_STRING, {});
+ row_key_tensor.scalar<string>()() = string(row.row_key());
+ out_tensors->emplace_back(std::move(row_key_tensor));
+
+ if (row.cells().size() > 2 * dataset()->columns_.size()) {
+ LOG(WARNING) << "An excessive number of columns ("
+ << row.cells().size()
+ << ") were retrieved when reading row: "
+ << row.row_key();
+ }
+
+ for (uint64 i = 0; i < dataset()->columns_.size(); ++i) {
+ Tensor col_tensor(ctx->allocator({}), DT_STRING, {});
+ bool found_column = false;
+ for (auto cell_itr = row.cells().begin();
+ !found_column && cell_itr != row.cells().end(); ++cell_itr) {
+ if (cell_itr->family_name() == dataset()->column_families_[i] &&
+ string(cell_itr->column_qualifier()) ==
+ dataset()->columns_[i]) {
+ col_tensor.scalar<string>()() = string(cell_itr->value());
+ found_column = true;
+ }
+ }
+ if (!found_column) {
+ return errors::DataLoss("Column ", dataset()->column_families_[i],
+ ":", dataset()->columns_[i],
+ " not found in row: ", row.row_key());
+ }
+ out_tensors->emplace_back(std::move(col_tensor));
+ }
+ return Status::OK();
+ }
+
+ mutex mu_;
+ std::unique_ptr<IteratorBase> input_impl_ GUARDED_BY(mu_);
+ };
+
+ const DatasetBase* const input_;
+ BigtableTableResource* table_;
+ const std::vector<string> column_families_;
+ const std::vector<string> columns_;
+ const DataTypeVector output_types_;
+ const std::vector<PartialTensorShape> output_shapes_;
+ const ::google::cloud::bigtable::Filter filter_;
+ };
+};
+
+REGISTER_KERNEL_BUILDER(Name("BigtableLookupDataset").Device(DEVICE_CPU),
+ BigtableLookupDatasetOp);
+
+} // namespace
+} // namespace tensorflow
diff --git a/tensorflow/contrib/bigtable/kernels/bigtable_prefix_key_dataset_op.cc b/tensorflow/contrib/bigtable/kernels/bigtable_prefix_key_dataset_op.cc
new file mode 100644
index 0000000000..e960719614
--- /dev/null
+++ b/tensorflow/contrib/bigtable/kernels/bigtable_prefix_key_dataset_op.cc
@@ -0,0 +1,104 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#include "tensorflow/contrib/bigtable/kernels/bigtable_lib.h"
+#include "tensorflow/core/framework/op_kernel.h"
+
+namespace tensorflow {
+namespace {
+
+class BigtablePrefixKeyDatasetOp : public DatasetOpKernel {
+ public:
+ using DatasetOpKernel::DatasetOpKernel;
+
+ void MakeDataset(OpKernelContext* ctx, DatasetBase** output) override {
+ string prefix;
+ OP_REQUIRES_OK(ctx, ParseScalarArgument<string>(ctx, "prefix", &prefix));
+
+ BigtableTableResource* resource;
+ OP_REQUIRES_OK(ctx,
+ LookupResource(ctx, HandleFromInput(ctx, 0), &resource));
+
+ *output = new Dataset(ctx, resource, std::move(prefix));
+ }
+
+ private:
+ class Dataset : public GraphDatasetBase {
+ public:
+ explicit Dataset(OpKernelContext* ctx, BigtableTableResource* table,
+ string prefix)
+ : GraphDatasetBase(ctx), table_(table), prefix_(std::move(prefix)) {
+ table_->Ref();
+ }
+
+ ~Dataset() override { table_->Unref(); }
+
+ std::unique_ptr<IteratorBase> MakeIteratorInternal(
+ const string& prefix) const override {
+ return std::unique_ptr<IteratorBase>(new Iterator(
+ {this, strings::StrCat(prefix, "::BigtablePrefixKeyDataset")}));
+ }
+
+ const DataTypeVector& output_dtypes() const override {
+ static DataTypeVector* dtypes = new DataTypeVector({DT_STRING});
+ return *dtypes;
+ }
+
+ const std::vector<PartialTensorShape>& output_shapes() const override {
+ static std::vector<PartialTensorShape>* shapes =
+ new std::vector<PartialTensorShape>({{}});
+ return *shapes;
+ }
+
+ string DebugString() const override {
+ return "BigtablePrefixKeyDatasetOp::Dataset";
+ }
+
+ BigtableTableResource* table() const { return table_; }
+
+ private:
+ class Iterator : public BigtableReaderDatasetIterator<Dataset> {
+ public:
+ explicit Iterator(const Params& params)
+ : BigtableReaderDatasetIterator<Dataset>(params) {}
+
+ ::google::cloud::bigtable::RowRange MakeRowRange() override {
+ return ::google::cloud::bigtable::RowRange::Prefix(dataset()->prefix_);
+ }
+ ::google::cloud::bigtable::Filter MakeFilter() override {
+ return ::google::cloud::bigtable::Filter::Chain(
+ ::google::cloud::bigtable::Filter::CellsRowLimit(1),
+ ::google::cloud::bigtable::Filter::StripValueTransformer());
+ }
+ Status ParseRow(IteratorContext* ctx,
+ const ::google::cloud::bigtable::Row& row,
+ std::vector<Tensor>* out_tensors) override {
+ Tensor output_tensor(ctx->allocator({}), DT_STRING, {});
+ output_tensor.scalar<string>()() = string(row.row_key());
+ out_tensors->emplace_back(std::move(output_tensor));
+ return Status::OK();
+ }
+ };
+
+ BigtableTableResource* const table_;
+ const string prefix_;
+ };
+};
+
+REGISTER_KERNEL_BUILDER(Name("BigtablePrefixKeyDataset").Device(DEVICE_CPU),
+ BigtablePrefixKeyDatasetOp);
+
+} // namespace
+} // namespace tensorflow
diff --git a/tensorflow/contrib/bigtable/kernels/bigtable_range_helpers.cc b/tensorflow/contrib/bigtable/kernels/bigtable_range_helpers.cc
new file mode 100644
index 0000000000..51965f6214
--- /dev/null
+++ b/tensorflow/contrib/bigtable/kernels/bigtable_range_helpers.cc
@@ -0,0 +1,68 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#include "tensorflow/contrib/bigtable/kernels/bigtable_range_helpers.h"
+
+#include "tensorflow/core/platform/logging.h"
+
+namespace tensorflow {
+
+namespace {
+
+string MakePrefixEndKey(const string& prefix) {
+ string end = prefix;
+ while (true) {
+ if (end.empty()) {
+ return end;
+ }
+ ++end[end.size() - 1];
+ if (end[end.size() - 1] == 0) {
+ // Handle wraparound case.
+ end = end.substr(0, end.size() - 1);
+ } else {
+ return end;
+ }
+ }
+}
+
+} // namespace
+
+/* static */ MultiModeKeyRange MultiModeKeyRange::FromPrefix(string prefix) {
+ string end = MakePrefixEndKey(prefix);
+ VLOG(1) << "Creating MultiModeKeyRange from Prefix: " << prefix
+ << ", with end key: " << end;
+ return MultiModeKeyRange(std::move(prefix), std::move(end));
+}
+
+/* static */ MultiModeKeyRange MultiModeKeyRange::FromRange(string begin,
+ string end) {
+ return MultiModeKeyRange(std::move(begin), std::move(end));
+}
+
+const string& MultiModeKeyRange::begin_key() const { return begin_; }
+
+const string& MultiModeKeyRange::end_key() const { return end_; }
+
+bool MultiModeKeyRange::contains_key(StringPiece key) const {
+ if (StringPiece(begin_) > key) {
+ return false;
+ }
+ if (StringPiece(end_) <= key && !end_.empty()) {
+ return false;
+ }
+ return true;
+}
+
+} // namespace tensorflow
diff --git a/tensorflow/contrib/bigtable/kernels/bigtable_range_helpers.h b/tensorflow/contrib/bigtable/kernels/bigtable_range_helpers.h
new file mode 100644
index 0000000000..44c628e366
--- /dev/null
+++ b/tensorflow/contrib/bigtable/kernels/bigtable_range_helpers.h
@@ -0,0 +1,67 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#ifndef TENSORFLOW_CONTRIB_BIGTABLE_KERNELS_BIGTABLE_RANGE_HELPERS_H_
+#define TENSORFLOW_CONTRIB_BIGTABLE_KERNELS_BIGTABLE_RANGE_HELPERS_H_
+
+#include <string>
+
+#include "tensorflow/core/lib/core/stringpiece.h"
+#include "tensorflow/core/platform/types.h"
+
+namespace tensorflow {
+
+// Represents a continuous range of keys defined by either a prefix or a range.
+//
+// Ranges are represented as "half-open", where the beginning key is included
+// in the range, and the end_key is the first excluded key after the range.
+//
+// The range of keys can be specified either by a key prefix, or by an explicit
+// begin key and end key. All methods on this class are valid no matter which
+// way the range was specified.
+//
+// Example:
+// MultiModeKeyRange range = MultiModeKeyRange::FromPrefix("myPrefix");
+// if (range.contains_key("myPrefixedKey")) {
+// LOG(INFO) << "range from " << range.begin_key() << " to "
+// << range.end_key() << "contains \"myPrefixedKey\"";
+// }
+// if (!range.contains_key("randomKey")) {
+// LOG(INFO) << "range does not contain \"randomKey\"";
+// }
+// range = MultiModeKeyRange::FromRange("a_start_key", "z_end_key");
+class MultiModeKeyRange {
+ public:
+ static MultiModeKeyRange FromPrefix(string prefix);
+ static MultiModeKeyRange FromRange(string begin, string end);
+
+ // The first valid key in the range.
+ const string& begin_key() const;
+ // The first invalid key after the valid range.
+ const string& end_key() const;
+ // Returns true if the provided key is a part of the range, false otherwise.
+ bool contains_key(StringPiece key) const;
+
+ private:
+ MultiModeKeyRange(string begin, string end)
+ : begin_(std::move(begin)), end_(std::move(end)) {}
+
+ const string begin_;
+ const string end_;
+};
+
+} // namespace tensorflow
+
+#endif // TENSORFLOW_CONTRIB_BIGTABLE_KERNELS_BIGTABLE_RANGE_HELPERS_H_
diff --git a/tensorflow/contrib/bigtable/kernels/bigtable_range_helpers_test.cc b/tensorflow/contrib/bigtable/kernels/bigtable_range_helpers_test.cc
new file mode 100644
index 0000000000..1bfc547271
--- /dev/null
+++ b/tensorflow/contrib/bigtable/kernels/bigtable_range_helpers_test.cc
@@ -0,0 +1,107 @@
+/* Copyright 2016 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#include "tensorflow/contrib/bigtable/kernels/bigtable_range_helpers.h"
+#include "tensorflow/core/platform/test.h"
+
+namespace tensorflow {
+namespace {
+
+TEST(MultiModeKeyRangeTest, SimplePrefix) {
+ MultiModeKeyRange r = MultiModeKeyRange::FromPrefix("prefix");
+ EXPECT_EQ("prefix", r.begin_key());
+ EXPECT_EQ("prefiy", r.end_key());
+ EXPECT_TRUE(r.contains_key("prefixed_key"));
+ EXPECT_FALSE(r.contains_key("not-prefixed-key"));
+ EXPECT_FALSE(r.contains_key("prefi"));
+ EXPECT_FALSE(r.contains_key("prefiy"));
+ EXPECT_FALSE(r.contains_key("early"));
+ EXPECT_FALSE(r.contains_key(""));
+}
+
+TEST(MultiModeKeyRangeTest, Range) {
+ MultiModeKeyRange r = MultiModeKeyRange::FromRange("a", "b");
+ EXPECT_EQ("a", r.begin_key());
+ EXPECT_EQ("b", r.end_key());
+ EXPECT_TRUE(r.contains_key("a"));
+ EXPECT_TRUE(r.contains_key("ab"));
+ EXPECT_FALSE(r.contains_key("b"));
+ EXPECT_FALSE(r.contains_key("bc"));
+ EXPECT_FALSE(r.contains_key("A"));
+ EXPECT_FALSE(r.contains_key("B"));
+ EXPECT_FALSE(r.contains_key(""));
+}
+
+TEST(MultiModeKeyRangeTest, InvertedRange) {
+ MultiModeKeyRange r = MultiModeKeyRange::FromRange("b", "a");
+ EXPECT_FALSE(r.contains_key("a"));
+ EXPECT_FALSE(r.contains_key("b"));
+ EXPECT_FALSE(r.contains_key(""));
+}
+
+TEST(MultiModeKeyRangeTest, EmptyPrefix) {
+ MultiModeKeyRange r = MultiModeKeyRange::FromPrefix("");
+ EXPECT_EQ("", r.begin_key());
+ EXPECT_EQ("", r.end_key());
+ EXPECT_TRUE(r.contains_key(""));
+ EXPECT_TRUE(r.contains_key("a"));
+ EXPECT_TRUE(r.contains_key("z"));
+ EXPECT_TRUE(r.contains_key("A"));
+ EXPECT_TRUE(r.contains_key("ZZZZZZ"));
+}
+
+TEST(MultiModeKeyRangeTest, HalfRange) {
+ MultiModeKeyRange r = MultiModeKeyRange::FromRange("start", "");
+ EXPECT_EQ("start", r.begin_key());
+ EXPECT_EQ("", r.end_key());
+ EXPECT_TRUE(r.contains_key("start"));
+ EXPECT_TRUE(r.contains_key("starting"));
+ EXPECT_TRUE(r.contains_key("z-end"));
+ EXPECT_FALSE(r.contains_key(""));
+ EXPECT_FALSE(r.contains_key("early"));
+}
+
+TEST(MultiModeKeyRangeTest, PrefixWrapAround) {
+ string prefix = "abc\xff";
+ MultiModeKeyRange r = MultiModeKeyRange::FromPrefix(prefix);
+ EXPECT_EQ(prefix, r.begin_key());
+ EXPECT_EQ("abd", r.end_key());
+
+ EXPECT_TRUE(r.contains_key("abc\xff\x07"));
+ EXPECT_TRUE(r.contains_key("abc\xff\x15"));
+ EXPECT_TRUE(r.contains_key("abc\xff\x61"));
+ EXPECT_TRUE(r.contains_key("abc\xff\xff"));
+ EXPECT_FALSE(r.contains_key("abc\0"));
+ EXPECT_FALSE(r.contains_key("abd"));
+}
+
+TEST(MultiModeKeyRangeTest, PrefixSignedWrapAround) {
+ string prefix = "abc\x7f";
+ MultiModeKeyRange r = MultiModeKeyRange::FromPrefix(prefix);
+ EXPECT_EQ(prefix, r.begin_key());
+ EXPECT_EQ("abc\x80", r.end_key());
+
+ EXPECT_TRUE(r.contains_key("abc\x7f\x07"));
+ EXPECT_TRUE(r.contains_key("abc\x7f\x15"));
+ EXPECT_TRUE(r.contains_key("abc\x7f\x61"));
+ EXPECT_TRUE(r.contains_key("abc\x7f\xff"));
+ EXPECT_FALSE(r.contains_key("abc\0"));
+ EXPECT_FALSE(r.contains_key("abc\x01"));
+ EXPECT_FALSE(r.contains_key("abd"));
+ EXPECT_FALSE(r.contains_key("ab\x80"));
+}
+
+} // namespace
+} // namespace tensorflow
diff --git a/tensorflow/contrib/bigtable/kernels/bigtable_range_key_dataset_op.cc b/tensorflow/contrib/bigtable/kernels/bigtable_range_key_dataset_op.cc
new file mode 100644
index 0000000000..96d3565d9b
--- /dev/null
+++ b/tensorflow/contrib/bigtable/kernels/bigtable_range_key_dataset_op.cc
@@ -0,0 +1,112 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#include "tensorflow/contrib/bigtable/kernels/bigtable_lib.h"
+#include "tensorflow/core/framework/op_kernel.h"
+
+namespace tensorflow {
+namespace {
+
+class BigtableRangeKeyDatasetOp : public DatasetOpKernel {
+ public:
+ using DatasetOpKernel::DatasetOpKernel;
+
+ void MakeDataset(OpKernelContext* ctx, DatasetBase** output) override {
+ string start_key;
+ OP_REQUIRES_OK(ctx,
+ ParseScalarArgument<string>(ctx, "start_key", &start_key));
+ string end_key;
+ OP_REQUIRES_OK(ctx, ParseScalarArgument<string>(ctx, "end_key", &end_key));
+
+ BigtableTableResource* resource;
+ OP_REQUIRES_OK(ctx,
+ LookupResource(ctx, HandleFromInput(ctx, 0), &resource));
+
+ *output =
+ new Dataset(ctx, resource, std::move(start_key), std::move(end_key));
+ }
+
+ private:
+ class Dataset : public GraphDatasetBase {
+ public:
+ explicit Dataset(OpKernelContext* ctx, BigtableTableResource* table,
+ string start_key, string end_key)
+ : GraphDatasetBase(ctx),
+ table_(table),
+ start_key_(std::move(start_key)),
+ end_key_(std::move(end_key)) {
+ table_->Ref();
+ }
+
+ ~Dataset() override { table_->Unref(); }
+
+ std::unique_ptr<IteratorBase> MakeIteratorInternal(
+ const string& prefix) const override {
+ return std::unique_ptr<IteratorBase>(new Iterator(
+ {this, strings::StrCat(prefix, "::BigtableRangeKeyDataset")}));
+ }
+
+ const DataTypeVector& output_dtypes() const override {
+ static DataTypeVector* dtypes = new DataTypeVector({DT_STRING});
+ return *dtypes;
+ }
+
+ const std::vector<PartialTensorShape>& output_shapes() const override {
+ static std::vector<PartialTensorShape>* shapes =
+ new std::vector<PartialTensorShape>({{}});
+ return *shapes;
+ }
+
+ string DebugString() const override {
+ return "BigtableRangeKeyDatasetOp::Dataset";
+ }
+
+ BigtableTableResource* table() const { return table_; }
+
+ private:
+ class Iterator : public BigtableReaderDatasetIterator<Dataset> {
+ public:
+ explicit Iterator(const Params& params)
+ : BigtableReaderDatasetIterator<Dataset>(params) {}
+
+ ::google::cloud::bigtable::RowRange MakeRowRange() override {
+ return ::google::cloud::bigtable::RowRange::Range(dataset()->start_key_,
+ dataset()->end_key_);
+ }
+ ::google::cloud::bigtable::Filter MakeFilter() override {
+ return ::google::cloud::bigtable::Filter::Chain(
+ ::google::cloud::bigtable::Filter::CellsRowLimit(1),
+ ::google::cloud::bigtable::Filter::StripValueTransformer());
+ }
+ Status ParseRow(IteratorContext* ctx,
+ const ::google::cloud::bigtable::Row& row,
+ std::vector<Tensor>* out_tensors) override {
+ Tensor output_tensor(ctx->allocator({}), DT_STRING, {});
+ output_tensor.scalar<string>()() = string(row.row_key());
+ out_tensors->emplace_back(std::move(output_tensor));
+ return Status::OK();
+ }
+ };
+
+ BigtableTableResource* const table_;
+ const string start_key_;
+ const string end_key_;
+ };
+};
+
+REGISTER_KERNEL_BUILDER(Name("BigtableRangeKeyDataset").Device(DEVICE_CPU),
+ BigtableRangeKeyDatasetOp);
+} // namespace
+} // namespace tensorflow
diff --git a/tensorflow/contrib/bigtable/kernels/bigtable_sample_key_pairs_dataset_op.cc b/tensorflow/contrib/bigtable/kernels/bigtable_sample_key_pairs_dataset_op.cc
new file mode 100644
index 0000000000..a1a63a975a
--- /dev/null
+++ b/tensorflow/contrib/bigtable/kernels/bigtable_sample_key_pairs_dataset_op.cc
@@ -0,0 +1,200 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#include "tensorflow/contrib/bigtable/kernels/bigtable_lib.h"
+#include "tensorflow/contrib/bigtable/kernels/bigtable_range_helpers.h"
+#include "tensorflow/core/framework/op_kernel.h"
+
+namespace tensorflow {
+namespace {
+
+class BigtableSampleKeyPairsDatasetOp : public DatasetOpKernel {
+ public:
+ using DatasetOpKernel::DatasetOpKernel;
+
+ void MakeDataset(OpKernelContext* ctx, DatasetBase** output) override {
+ string prefix;
+ OP_REQUIRES_OK(ctx, ParseScalarArgument<string>(ctx, "prefix", &prefix));
+
+ string start_key;
+ OP_REQUIRES_OK(ctx,
+ ParseScalarArgument<string>(ctx, "start_key", &start_key));
+ string end_key;
+ OP_REQUIRES_OK(ctx, ParseScalarArgument<string>(ctx, "end_key", &end_key));
+
+ BigtableTableResource* resource;
+ OP_REQUIRES_OK(ctx,
+ LookupResource(ctx, HandleFromInput(ctx, 0), &resource));
+
+ OP_REQUIRES(ctx, prefix.empty() || start_key.empty(),
+ errors::InvalidArgument(
+ "Only one of prefix and start_key can be provided"));
+ if (!prefix.empty()) {
+ OP_REQUIRES(ctx, end_key.empty(),
+ errors::InvalidArgument(
+ "If prefix is specified, end_key must be empty."));
+ }
+
+ *output = new Dataset(ctx, resource, std::move(prefix),
+ std::move(start_key), std::move(end_key));
+ }
+
+ private:
+ class Dataset : public GraphDatasetBase {
+ public:
+ explicit Dataset(OpKernelContext* ctx, BigtableTableResource* table,
+ string prefix, string start_key, string end_key)
+ : GraphDatasetBase(ctx),
+ table_(table),
+ key_range_(MakeMultiModeKeyRange(
+ std::move(prefix), std::move(start_key), std::move(end_key))) {
+ table_->Ref();
+ }
+
+ ~Dataset() override { table_->Unref(); }
+
+ std::unique_ptr<IteratorBase> MakeIteratorInternal(
+ const string& prefix) const override {
+ return std::unique_ptr<IteratorBase>(new Iterator(
+ {this, strings::StrCat(prefix, "::BigtableSampleKeyPairsDataset")}));
+ }
+
+ const DataTypeVector& output_dtypes() const override {
+ static DataTypeVector* dtypes =
+ new DataTypeVector({DT_STRING, DT_STRING});
+ return *dtypes;
+ }
+
+ const std::vector<PartialTensorShape>& output_shapes() const override {
+ static std::vector<PartialTensorShape>* shapes =
+ new std::vector<PartialTensorShape>({{}, {}});
+ return *shapes;
+ }
+
+ string DebugString() const override {
+ return "BigtableSampleKeyPairsDatasetOp::Dataset";
+ }
+
+ private:
+ static MultiModeKeyRange MakeMultiModeKeyRange(string prefix,
+ string start_key,
+ string end_key) {
+ if (!start_key.empty()) {
+ return MultiModeKeyRange::FromRange(std::move(start_key),
+ std::move(end_key));
+ }
+ return MultiModeKeyRange::FromPrefix(std::move(prefix));
+ }
+
+ BigtableTableResource& table() const { return *table_; }
+
+ class Iterator : public DatasetIterator<Dataset> {
+ public:
+ explicit Iterator(const Params& params)
+ : DatasetIterator<Dataset>(params) {}
+
+ // Computes split points (`keys_`) to use when scanning the table.
+ //
+ // Initialize first retrieves the sample keys from the table (`row_keys`),
+ // as these often form good split points within the table. We then iterate
+ // over them, and copy them to `keys_` if they fall within the requested
+ // range to scan (`dataset()->key_range_`). Because the requested range
+ // might start between elements of the sampled keys list, care is taken to
+ // ensure we don't accidentally miss any subsets of the requested range by
+ // including `begin_key()` and `end_key()` as appropriate.
+ Status Initialize(IteratorContext* ctx) override {
+ grpc::Status status;
+ std::vector<google::cloud::bigtable::RowKeySample> row_keys =
+ dataset()->table().table().SampleRows(status);
+ if (!status.ok()) {
+ return GrpcStatusToTfStatus(status);
+ }
+
+ for (size_t i = 0; i < row_keys.size(); ++i) {
+ string row_key(row_keys[i].row_key);
+ if (dataset()->key_range_.contains_key(row_key)) {
+ // First key: check to see if we need to add the begin_key.
+ if (keys_.empty() && dataset()->key_range_.begin_key() != row_key) {
+ keys_.push_back(dataset()->key_range_.begin_key());
+ }
+ keys_.push_back(std::move(row_key));
+ } else if (!keys_.empty()) {
+ // If !keys_.empty(), then we have found at least one element of
+ // `row_keys` that is within our requested range
+ // (`dataset()->key_range_`). Because `row_keys` is sorted, if we
+ // have found an element that's not within our key range, then we
+ // are after our requested range (ranges are contiguous) and can end
+ // iteration early.
+ break;
+ }
+ }
+
+ // Handle the case where we skip over the selected range entirely.
+ if (keys_.empty()) {
+ keys_.push_back(dataset()->key_range_.begin_key());
+ }
+
+ // Last key: check to see if we need to add the end_key.
+ if (keys_.back() != dataset()->key_range_.end_key()) {
+ keys_.push_back(dataset()->key_range_.end_key());
+ }
+ return Status::OK();
+ }
+
+ Status GetNextInternal(IteratorContext* ctx,
+ std::vector<Tensor>* out_tensors,
+ bool* end_of_sequence) override {
+ mutex_lock l(mu_);
+ if (index_ > keys_.size() - 2) {
+ *end_of_sequence = true;
+ return Status::OK();
+ }
+
+ *end_of_sequence = false;
+ out_tensors->emplace_back(ctx->allocator({}), DT_STRING,
+ TensorShape({}));
+ out_tensors->back().scalar<string>()() = keys_[index_];
+
+ out_tensors->emplace_back(ctx->allocator({}), DT_STRING,
+ TensorShape({}));
+ out_tensors->back().scalar<string>()() = keys_[index_ + 1];
+ ++index_;
+
+ return Status::OK();
+ }
+
+ private:
+ mutex mu_;
+ size_t index_ GUARDED_BY(mu_) = 0;
+ // Note: we store the keys_ on the iterator instead of the dataset
+ // because we want to re-sample the row keys in case there have been
+ // tablet rebalancing operations since the dataset was created.
+ //
+ // Note: keys_ is readonly after Initialize, and thus does not need a
+ // guarding lock.
+ std::vector<string> keys_;
+ };
+
+ BigtableTableResource* const table_;
+ const MultiModeKeyRange key_range_;
+ };
+};
+
+REGISTER_KERNEL_BUILDER(
+ Name("BigtableSampleKeyPairsDataset").Device(DEVICE_CPU),
+ BigtableSampleKeyPairsDatasetOp);
+
+} // namespace
+} // namespace tensorflow
diff --git a/tensorflow/contrib/bigtable/kernels/bigtable_sample_keys_dataset_op.cc b/tensorflow/contrib/bigtable/kernels/bigtable_sample_keys_dataset_op.cc
new file mode 100644
index 0000000000..a5a47cfe2d
--- /dev/null
+++ b/tensorflow/contrib/bigtable/kernels/bigtable_sample_keys_dataset_op.cc
@@ -0,0 +1,113 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#include "tensorflow/contrib/bigtable/kernels/bigtable_lib.h"
+#include "tensorflow/core/framework/op_kernel.h"
+
+namespace tensorflow {
+namespace {
+
+class BigtableSampleKeysDatasetOp : public DatasetOpKernel {
+ public:
+ using DatasetOpKernel::DatasetOpKernel;
+
+ void MakeDataset(OpKernelContext* ctx, DatasetBase** output) override {
+ BigtableTableResource* resource;
+ OP_REQUIRES_OK(ctx,
+ LookupResource(ctx, HandleFromInput(ctx, 0), &resource));
+ *output = new Dataset(ctx, resource);
+ }
+
+ private:
+ class Dataset : public GraphDatasetBase {
+ public:
+ explicit Dataset(OpKernelContext* ctx, BigtableTableResource* table)
+ : GraphDatasetBase(ctx), table_(table) {
+ table_->Ref();
+ }
+
+ ~Dataset() override { table_->Unref(); }
+
+ std::unique_ptr<IteratorBase> MakeIteratorInternal(
+ const string& prefix) const override {
+ return std::unique_ptr<IteratorBase>(new Iterator(
+ {this, strings::StrCat(prefix, "::BigtableSampleKeysDataset")}));
+ }
+
+ const DataTypeVector& output_dtypes() const override {
+ static DataTypeVector* dtypes = new DataTypeVector({DT_STRING});
+ return *dtypes;
+ }
+
+ const std::vector<PartialTensorShape>& output_shapes() const override {
+ static std::vector<PartialTensorShape>* shapes =
+ new std::vector<PartialTensorShape>({{}});
+ return *shapes;
+ }
+
+ string DebugString() const override {
+ return "BigtableRangeKeyDatasetOp::Dataset";
+ }
+
+ BigtableTableResource* table() const { return table_; }
+
+ private:
+ class Iterator : public DatasetIterator<Dataset> {
+ public:
+ explicit Iterator(const Params& params)
+ : DatasetIterator<Dataset>(params) {}
+
+ Status Initialize(IteratorContext* ctx) override {
+ ::grpc::Status status;
+ row_keys_ = dataset()->table()->table().SampleRows(status);
+ if (!status.ok()) {
+ row_keys_.clear();
+ return GrpcStatusToTfStatus(status);
+ }
+ return Status::OK();
+ }
+
+ Status GetNextInternal(IteratorContext* ctx,
+ std::vector<Tensor>* out_tensors,
+ bool* end_of_sequence) override {
+ mutex_lock l(mu_);
+ if (index_ < row_keys_.size()) {
+ out_tensors->emplace_back(ctx->allocator({}), DT_STRING,
+ TensorShape({}));
+ out_tensors->back().scalar<string>()() =
+ string(row_keys_[index_].row_key);
+ *end_of_sequence = false;
+ index_++;
+ } else {
+ *end_of_sequence = true;
+ }
+ return Status::OK();
+ }
+
+ private:
+ mutex mu_;
+ size_t index_ = 0;
+ std::vector<::google::cloud::bigtable::RowKeySample> row_keys_;
+ };
+
+ BigtableTableResource* const table_;
+ };
+};
+
+REGISTER_KERNEL_BUILDER(Name("BigtableSampleKeysDataset").Device(DEVICE_CPU),
+ BigtableSampleKeysDatasetOp);
+
+} // namespace
+} // namespace tensorflow
diff --git a/tensorflow/contrib/bigtable/kernels/bigtable_scan_dataset_op.cc b/tensorflow/contrib/bigtable/kernels/bigtable_scan_dataset_op.cc
new file mode 100644
index 0000000000..13cb868167
--- /dev/null
+++ b/tensorflow/contrib/bigtable/kernels/bigtable_scan_dataset_op.cc
@@ -0,0 +1,219 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#include "tensorflow/contrib/bigtable/kernels/bigtable_lib.h"
+#include "tensorflow/core/framework/op_kernel.h"
+
+namespace tensorflow {
+namespace {
+
+class BigtableScanDatasetOp : public DatasetOpKernel {
+ public:
+ using DatasetOpKernel::DatasetOpKernel;
+
+ void MakeDataset(OpKernelContext* ctx, DatasetBase** output) override {
+ string prefix;
+ OP_REQUIRES_OK(ctx, ParseScalarArgument<string>(ctx, "prefix", &prefix));
+ string start_key;
+ OP_REQUIRES_OK(ctx,
+ ParseScalarArgument<string>(ctx, "start_key", &start_key));
+ string end_key;
+ OP_REQUIRES_OK(ctx, ParseScalarArgument<string>(ctx, "end_key", &end_key));
+
+ OP_REQUIRES(ctx, !(prefix.empty() && start_key.empty()),
+ errors::InvalidArgument(
+ "Either prefix or start_key must be specified"));
+ OP_REQUIRES(ctx, prefix.empty() || start_key.empty(),
+ errors::InvalidArgument(
+ "Only one of prefix and start_key can be provided"));
+ if (!prefix.empty()) {
+ OP_REQUIRES(ctx, end_key.empty(),
+ errors::InvalidArgument(
+ "If prefix is specified, end_key must be empty."));
+ }
+
+ std::vector<string> column_families;
+ std::vector<string> columns;
+ OP_REQUIRES_OK(ctx, ParseVectorArgument<string>(ctx, "column_families",
+ &column_families));
+ OP_REQUIRES_OK(ctx, ParseVectorArgument<string>(ctx, "columns", &columns));
+ OP_REQUIRES(
+ ctx, column_families.size() == columns.size(),
+ errors::InvalidArgument("len(columns) != len(column_families)"));
+ OP_REQUIRES(ctx, !column_families.empty(),
+ errors::InvalidArgument("`column_families` is empty"));
+
+ float probability = 0;
+ OP_REQUIRES_OK(
+ ctx, ParseScalarArgument<float>(ctx, "probability", &probability));
+ OP_REQUIRES(
+ ctx, probability > 0 && probability <= 1,
+ errors::InvalidArgument(
+ "Probability outside the range of (0, 1]. Got: ", probability));
+
+ BigtableTableResource* resource;
+ OP_REQUIRES_OK(ctx,
+ LookupResource(ctx, HandleFromInput(ctx, 0), &resource));
+
+ const uint64 num_outputs = columns.size() + 1;
+ std::vector<PartialTensorShape> output_shapes;
+ output_shapes.reserve(num_outputs);
+ DataTypeVector output_types;
+ output_types.reserve(num_outputs);
+ for (uint64 i = 0; i < num_outputs; ++i) {
+ output_shapes.push_back({});
+ output_types.push_back(DT_STRING);
+ }
+
+ *output = new Dataset(ctx, resource, std::move(prefix),
+ std::move(start_key), std::move(end_key),
+ std::move(column_families), std::move(columns),
+ probability, output_types, std::move(output_shapes));
+ }
+
+ private:
+ class Dataset : public GraphDatasetBase {
+ public:
+ explicit Dataset(OpKernelContext* ctx, BigtableTableResource* table,
+ string prefix, string start_key, string end_key,
+ std::vector<string> column_families,
+ std::vector<string> columns, float probability,
+ const DataTypeVector& output_types,
+ std::vector<PartialTensorShape> output_shapes)
+ : GraphDatasetBase(ctx),
+ table_(table),
+ prefix_(std::move(prefix)),
+ start_key_(std::move(start_key)),
+ end_key_(std::move(end_key)),
+ column_families_(std::move(column_families)),
+ columns_(std::move(columns)),
+ column_family_regex_(RegexFromStringSet(column_families_)),
+ column_regex_(RegexFromStringSet(columns_)),
+ probability_(probability),
+ output_types_(output_types),
+ output_shapes_(std::move(output_shapes)) {
+ table_->Ref();
+ }
+
+ ~Dataset() override { table_->Unref(); }
+
+ std::unique_ptr<IteratorBase> MakeIteratorInternal(
+ const string& prefix) const override {
+ return std::unique_ptr<IteratorBase>(new Iterator(
+ {this, strings::StrCat(prefix, "::BigtableScanDataset")}));
+ }
+
+ const DataTypeVector& output_dtypes() const override {
+ return output_types_;
+ }
+
+ const std::vector<PartialTensorShape>& output_shapes() const override {
+ return output_shapes_;
+ }
+
+ string DebugString() const override {
+ return "BigtableScanDatasetOp::Dataset";
+ }
+
+ BigtableTableResource* table() const { return table_; }
+
+ private:
+ class Iterator : public BigtableReaderDatasetIterator<Dataset> {
+ public:
+ explicit Iterator(const Params& params)
+ : BigtableReaderDatasetIterator<Dataset>(params) {}
+
+ ::google::cloud::bigtable::RowRange MakeRowRange() override {
+ if (!dataset()->prefix_.empty()) {
+ DCHECK(dataset()->start_key_.empty());
+ return ::google::cloud::bigtable::RowRange::Prefix(
+ dataset()->prefix_);
+ } else {
+ DCHECK(!dataset()->start_key_.empty())
+ << "Both prefix and start_key were empty!";
+ return ::google::cloud::bigtable::RowRange::Range(
+ dataset()->start_key_, dataset()->end_key_);
+ }
+ }
+ ::google::cloud::bigtable::Filter MakeFilter() override {
+ // TODO(saeta): Investigate optimal ordering here.
+ return ::google::cloud::bigtable::Filter::Chain(
+ ::google::cloud::bigtable::Filter::Latest(1),
+ ::google::cloud::bigtable::Filter::FamilyRegex(
+ dataset()->column_family_regex_),
+ ::google::cloud::bigtable::Filter::ColumnRegex(
+ dataset()->column_regex_),
+ dataset()->probability_ != 1.0
+ ? ::google::cloud::bigtable::Filter::RowSample(
+ dataset()->probability_)
+ : ::google::cloud::bigtable::Filter::PassAllFilter());
+ }
+ Status ParseRow(IteratorContext* ctx,
+ const ::google::cloud::bigtable::Row& row,
+ std::vector<Tensor>* out_tensors) override {
+ out_tensors->reserve(dataset()->columns_.size() + 1);
+ Tensor row_key_tensor(ctx->allocator({}), DT_STRING, {});
+ row_key_tensor.scalar<string>()() = string(row.row_key());
+ out_tensors->emplace_back(std::move(row_key_tensor));
+
+ if (row.cells().size() > 2 * dataset()->columns_.size()) {
+ LOG(WARNING) << "An excessive number of columns ("
+ << row.cells().size()
+ << ") were retrieved when reading row: "
+ << row.row_key();
+ }
+
+ for (uint64 i = 0; i < dataset()->columns_.size(); ++i) {
+ Tensor col_tensor(ctx->allocator({}), DT_STRING, {});
+ bool found_column = false;
+ for (auto cell_itr = row.cells().begin();
+ !found_column && cell_itr != row.cells().end(); ++cell_itr) {
+ if (cell_itr->family_name() == dataset()->column_families_[i] &&
+ string(cell_itr->column_qualifier()) ==
+ dataset()->columns_[i]) {
+ col_tensor.scalar<string>()() = string(cell_itr->value());
+ found_column = true;
+ }
+ }
+ if (!found_column) {
+ return errors::InvalidArgument(
+ "Column ", dataset()->column_families_[i], ":",
+ dataset()->columns_[i], " not found in row: ", row.row_key());
+ }
+ out_tensors->emplace_back(std::move(col_tensor));
+ }
+ return Status::OK();
+ }
+ };
+
+ BigtableTableResource* table_;
+ const string prefix_;
+ const string start_key_;
+ const string end_key_;
+ const std::vector<string> column_families_;
+ const std::vector<string> columns_;
+ const string column_family_regex_;
+ const string column_regex_;
+ const float probability_;
+ const DataTypeVector output_types_;
+ const std::vector<PartialTensorShape> output_shapes_;
+ };
+};
+
+REGISTER_KERNEL_BUILDER(Name("BigtableScanDataset").Device(DEVICE_CPU),
+ BigtableScanDatasetOp);
+
+} // namespace
+} // namespace tensorflow
diff --git a/tensorflow/contrib/bigtable/kernels/test_kernels/bigtable_test_client.cc b/tensorflow/contrib/bigtable/kernels/test_kernels/bigtable_test_client.cc
new file mode 100644
index 0000000000..f083ce6f44
--- /dev/null
+++ b/tensorflow/contrib/bigtable/kernels/test_kernels/bigtable_test_client.cc
@@ -0,0 +1,374 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#include "tensorflow/contrib/bigtable/kernels/test_kernels/bigtable_test_client.h"
+
+#include "google/bigtable/v2/data.pb.h"
+#include "google/protobuf/wrappers.pb.h"
+#include "re2/re2.h"
+#include "tensorflow/core/lib/strings/stringprintf.h"
+#include "tensorflow/core/util/ptr_util.h"
+// #include "util/task/codes.pb.h"
+
+namespace tensorflow {
+namespace {
+
+void UpdateRow(const ::google::bigtable::v2::Mutation& mut,
+ std::map<string, string>* row) {
+ if (mut.has_set_cell()) {
+ CHECK(mut.set_cell().timestamp_micros() >= -1)
+ << "Timestamp_micros: " << mut.set_cell().timestamp_micros();
+ auto col =
+ strings::Printf("%s:%s", mut.set_cell().family_name().c_str(),
+ string(mut.set_cell().column_qualifier()).c_str());
+ (*row)[col] = string(mut.set_cell().value());
+ } else if (mut.has_delete_from_column()) {
+ auto col = strings::Printf(
+ "%s:%s", mut.delete_from_column().family_name().c_str(),
+ string(mut.delete_from_column().column_qualifier()).c_str());
+ row->erase(col);
+ } else if (mut.has_delete_from_family()) {
+ auto itr = row->lower_bound(mut.delete_from_family().family_name());
+ auto prefix =
+ strings::Printf("%s:", mut.delete_from_family().family_name().c_str());
+ while (itr != row->end() && itr->first.substr(0, prefix.size()) == prefix) {
+ row->erase(itr);
+ }
+ } else if (mut.has_delete_from_row()) {
+ row->clear();
+ } else {
+ LOG(ERROR) << "Unknown mutation: " << mut.ShortDebugString();
+ }
+}
+
+} // namespace
+
+class SampleRowKeysResponse : public grpc::ClientReaderInterface<
+ google::bigtable::v2::SampleRowKeysResponse> {
+ public:
+ explicit SampleRowKeysResponse(BigtableTestClient* client)
+ : client_(client) {}
+
+ bool NextMessageSize(uint32_t* sz) override {
+ mutex_lock l(mu_);
+ mutex_lock l2(client_->mu_);
+ if (num_messages_sent_ * 2 < client_->table_.rows.size()) {
+ *sz = 10000; // A sufficiently high enough value to not worry about.
+ return true;
+ }
+ return false;
+ }
+
+ bool Read(google::bigtable::v2::SampleRowKeysResponse* resp) override {
+ // Send every other key from the table.
+ mutex_lock l(mu_);
+ mutex_lock l2(client_->mu_);
+ *resp = google::bigtable::v2::SampleRowKeysResponse();
+ auto itr = client_->table_.rows.begin();
+ for (uint64 i = 0; i < 2 * num_messages_sent_; ++i) {
+ ++itr;
+ if (itr == client_->table_.rows.end()) {
+ return false;
+ }
+ }
+ resp->set_row_key(itr->first);
+ resp->set_offset_bytes(100 * num_messages_sent_);
+ num_messages_sent_++;
+ return true;
+ }
+
+ grpc::Status Finish() override { return grpc::Status::OK; }
+
+ void WaitForInitialMetadata() override {} // Do nothing.
+
+ private:
+ mutex mu_;
+ int64 num_messages_sent_ GUARDED_BY(mu_) = 0;
+ BigtableTestClient* client_; // Not owned.
+};
+
+class ReadRowsResponse : public grpc::ClientReaderInterface<
+ google::bigtable::v2::ReadRowsResponse> {
+ public:
+ ReadRowsResponse(BigtableTestClient* client,
+ google::bigtable::v2::ReadRowsRequest const& request)
+ : client_(client), request_(request) {}
+
+ bool NextMessageSize(uint32_t* sz) override {
+ mutex_lock l(mu_);
+ if (sent_first_message_) {
+ return false;
+ }
+ *sz = 10000000; // A sufficiently high enough value to not worry about.
+ return true;
+ }
+
+ bool Read(google::bigtable::v2::ReadRowsResponse* resp) override {
+ mutex_lock l(mu_);
+ if (sent_first_message_) {
+ return false;
+ }
+ sent_first_message_ = true;
+ RowFilter filter = MakeRowFilter();
+
+ mutex_lock l2(client_->mu_);
+ *resp = google::bigtable::v2::ReadRowsResponse();
+ // Send all contents in first response.
+ for (auto itr = client_->table_.rows.begin();
+ itr != client_->table_.rows.end(); ++itr) {
+ if (filter.AllowRow(itr->first)) {
+ ::google::bigtable::v2::ReadRowsResponse_CellChunk* chunk = nullptr;
+ bool sent_first = false;
+ for (auto col_itr = itr->second.columns.begin();
+ col_itr != itr->second.columns.end(); ++col_itr) {
+ if (filter.AllowColumn(col_itr->first)) {
+ chunk = resp->add_chunks();
+ if (!sent_first) {
+ sent_first = true;
+ chunk->set_row_key(itr->first);
+ }
+ auto colon_idx = col_itr->first.find(":");
+ CHECK(colon_idx != string::npos)
+ << "No ':' found in: " << col_itr->first;
+ chunk->mutable_family_name()->set_value(
+ string(col_itr->first, 0, colon_idx));
+ chunk->mutable_qualifier()->set_value(
+ string(col_itr->first, ++colon_idx));
+ if (!filter.strip_values) {
+ chunk->set_value(col_itr->second);
+ }
+ if (filter.only_one_column) {
+ break;
+ }
+ }
+ }
+ if (sent_first) {
+ // We are sending this row, so set the commit flag on the last chunk.
+ chunk->set_commit_row(true);
+ }
+ }
+ }
+ return true;
+ }
+
+ grpc::Status Finish() override { return grpc::Status::OK; }
+
+ void WaitForInitialMetadata() override {} // Do nothing.
+
+ private:
+ struct RowFilter {
+ std::set<string> row_set;
+ std::vector<std::pair<string, string>> row_ranges;
+ double row_sample = 0.0; // Note: currently ignored.
+ std::unique_ptr<RE2> col_filter;
+ bool strip_values = false;
+ bool only_one_column = false;
+
+ bool AllowRow(const string& row) {
+ if (row_set.find(row) != row_set.end()) {
+ return true;
+ }
+ for (const auto& range : row_ranges) {
+ if (range.first <= row && range.second > row) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ bool AllowColumn(const string& col) {
+ if (col_filter) {
+ return RE2::FullMatch(col, *col_filter);
+ } else {
+ return true;
+ }
+ }
+ };
+
+ RowFilter MakeRowFilter() {
+ RowFilter filter;
+ for (auto i = request_.rows().row_keys().begin();
+ i != request_.rows().row_keys().end(); ++i) {
+ filter.row_set.insert(string(*i));
+ }
+ for (auto i = request_.rows().row_ranges().begin();
+ i != request_.rows().row_ranges().end(); ++i) {
+ if (i->start_key_case() !=
+ google::bigtable::v2::RowRange::kStartKeyClosed ||
+ i->end_key_case() != google::bigtable::v2::RowRange::kEndKeyOpen) {
+ LOG(WARNING) << "Skipping row range that cannot be processed: "
+ << i->ShortDebugString();
+ continue;
+ }
+ filter.row_ranges.emplace_back(std::make_pair(
+ string(i->start_key_closed()), string(i->end_key_open())));
+ }
+ if (request_.filter().has_chain()) {
+ string family_filter;
+ string qualifier_filter;
+ for (auto i = request_.filter().chain().filters().begin();
+ i != request_.filter().chain().filters().end(); ++i) {
+ switch (i->filter_case()) {
+ case google::bigtable::v2::RowFilter::kFamilyNameRegexFilter:
+ family_filter = i->family_name_regex_filter();
+ break;
+ case google::bigtable::v2::RowFilter::kColumnQualifierRegexFilter:
+ qualifier_filter = i->column_qualifier_regex_filter();
+ break;
+ case google::bigtable::v2::RowFilter::kCellsPerColumnLimitFilter:
+ if (i->cells_per_column_limit_filter() != 1) {
+ LOG(ERROR) << "Unexpected cells_per_column_limit_filter: "
+ << i->cells_per_column_limit_filter();
+ }
+ break;
+ case google::bigtable::v2::RowFilter::kStripValueTransformer:
+ filter.strip_values = i->strip_value_transformer();
+ break;
+ case google::bigtable::v2::RowFilter::kRowSampleFilter:
+ LOG(INFO) << "Ignoring row sample directive.";
+ break;
+ case google::bigtable::v2::RowFilter::kPassAllFilter:
+ break;
+ case google::bigtable::v2::RowFilter::kCellsPerRowLimitFilter:
+ filter.only_one_column = true;
+ break;
+ default:
+ LOG(WARNING) << "Ignoring unknown filter type: "
+ << i->ShortDebugString();
+ }
+ }
+ if (family_filter.empty() || qualifier_filter.empty()) {
+ LOG(WARNING) << "Missing regex!";
+ } else {
+ string regex = strings::Printf("%s:%s", family_filter.c_str(),
+ qualifier_filter.c_str());
+ filter.col_filter.reset(new RE2(regex));
+ }
+ } else {
+ LOG(WARNING) << "Read request did not have a filter chain specified: "
+ << request_.filter().DebugString();
+ }
+ return filter;
+ }
+
+ mutex mu_;
+ bool sent_first_message_ GUARDED_BY(mu_) = false;
+ BigtableTestClient* client_; // Not owned.
+ const google::bigtable::v2::ReadRowsRequest request_;
+};
+
+class MutateRowsResponse : public grpc::ClientReaderInterface<
+ google::bigtable::v2::MutateRowsResponse> {
+ public:
+ explicit MutateRowsResponse(size_t num_successes)
+ : num_successes_(num_successes) {}
+
+ bool NextMessageSize(uint32_t* sz) override {
+ mutex_lock l(mu_);
+ if (sent_first_message_) {
+ return false;
+ }
+ *sz = 10000000; // A sufficiently high enough value to not worry about.
+ return true;
+ }
+
+ bool Read(google::bigtable::v2::MutateRowsResponse* resp) override {
+ mutex_lock l(mu_);
+ if (sent_first_message_) {
+ return false;
+ }
+ sent_first_message_ = true;
+ *resp = google::bigtable::v2::MutateRowsResponse();
+ for (size_t i = 0; i < num_successes_; ++i) {
+ auto entry = resp->add_entries();
+ entry->set_index(i);
+ }
+ return true;
+ }
+
+ grpc::Status Finish() override { return grpc::Status::OK; }
+
+ void WaitForInitialMetadata() override {} // Do nothing.
+
+ private:
+ const size_t num_successes_;
+
+ mutex mu_;
+ bool sent_first_message_ = false;
+};
+
+grpc::Status BigtableTestClient::MutateRow(
+ grpc::ClientContext* context,
+ google::bigtable::v2::MutateRowRequest const& request,
+ google::bigtable::v2::MutateRowResponse* response) {
+ mutex_lock l(mu_);
+ auto* row = &table_.rows[string(request.row_key())];
+ for (int i = 0; i < request.mutations_size(); ++i) {
+ UpdateRow(request.mutations(i), &row->columns);
+ }
+ *response = google::bigtable::v2::MutateRowResponse();
+ return grpc::Status::OK;
+}
+grpc::Status BigtableTestClient::CheckAndMutateRow(
+ grpc::ClientContext* context,
+ google::bigtable::v2::CheckAndMutateRowRequest const& request,
+ google::bigtable::v2::CheckAndMutateRowResponse* response) {
+ return grpc::Status(grpc::StatusCode::UNIMPLEMENTED,
+ "CheckAndMutateRow not implemented.");
+}
+grpc::Status BigtableTestClient::ReadModifyWriteRow(
+ grpc::ClientContext* context,
+ google::bigtable::v2::ReadModifyWriteRowRequest const& request,
+ google::bigtable::v2::ReadModifyWriteRowResponse* response) {
+ return grpc::Status(grpc::StatusCode::UNIMPLEMENTED,
+ "ReadModifyWriteRow not implemented.");
+}
+std::unique_ptr<
+ grpc::ClientReaderInterface<google::bigtable::v2::ReadRowsResponse>>
+BigtableTestClient::ReadRows(
+ grpc::ClientContext* context,
+ google::bigtable::v2::ReadRowsRequest const& request) {
+ return MakeUnique<ReadRowsResponse>(this, request);
+}
+
+std::unique_ptr<
+ grpc::ClientReaderInterface<google::bigtable::v2::SampleRowKeysResponse>>
+BigtableTestClient::SampleRowKeys(
+ grpc::ClientContext* context,
+ google::bigtable::v2::SampleRowKeysRequest const& request) {
+ return MakeUnique<SampleRowKeysResponse>(this);
+}
+std::unique_ptr<
+ grpc::ClientReaderInterface<google::bigtable::v2::MutateRowsResponse>>
+BigtableTestClient::MutateRows(
+ grpc::ClientContext* context,
+ google::bigtable::v2::MutateRowsRequest const& request) {
+ mutex_lock l(mu_);
+ for (auto i = request.entries().begin(); i != request.entries().end(); ++i) {
+ auto* row = &table_.rows[string(i->row_key())];
+ for (auto mut = i->mutations().begin(); mut != i->mutations().end();
+ ++mut) {
+ UpdateRow(*mut, &row->columns);
+ }
+ }
+ return MakeUnique<MutateRowsResponse>(request.entries_size());
+}
+
+std::shared_ptr<grpc::Channel> BigtableTestClient::Channel() {
+ LOG(WARNING) << "Call to InMemoryDataClient::Channel(); this will likely "
+ "cause a crash!";
+ return nullptr;
+}
+} // namespace tensorflow
diff --git a/tensorflow/contrib/bigtable/kernels/test_kernels/bigtable_test_client.h b/tensorflow/contrib/bigtable/kernels/test_kernels/bigtable_test_client.h
new file mode 100644
index 0000000000..dac2b16a21
--- /dev/null
+++ b/tensorflow/contrib/bigtable/kernels/test_kernels/bigtable_test_client.h
@@ -0,0 +1,87 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#ifndef TENSORFLOW_CONTRIB_BIGTABLE_KERNELS_TEST_KERNELS_BIGTABLE_TEST_CLIENT_H_
+#define TENSORFLOW_CONTRIB_BIGTABLE_KERNELS_TEST_KERNELS_BIGTABLE_TEST_CLIENT_H_
+
+#include "google/cloud/bigtable/data_client.h"
+#include "tensorflow/core/platform/logging.h"
+#include "tensorflow/core/platform/mutex.h"
+
+namespace tensorflow {
+
+class BigtableTestClient : public ::google::cloud::bigtable::DataClient {
+ public:
+ std::string const& project_id() const override { return project_id_; }
+ std::string const& instance_id() const override { return instance_id_; }
+ void reset() override {
+ mutex_lock l(mu_);
+ table_ = Table();
+ }
+
+ grpc::Status MutateRow(
+ grpc::ClientContext* context,
+ google::bigtable::v2::MutateRowRequest const& request,
+ google::bigtable::v2::MutateRowResponse* response) override;
+
+ grpc::Status CheckAndMutateRow(
+ grpc::ClientContext* context,
+ google::bigtable::v2::CheckAndMutateRowRequest const& request,
+ google::bigtable::v2::CheckAndMutateRowResponse* response) override;
+
+ grpc::Status ReadModifyWriteRow(
+ grpc::ClientContext* context,
+ google::bigtable::v2::ReadModifyWriteRowRequest const& request,
+ google::bigtable::v2::ReadModifyWriteRowResponse* response) override;
+
+ std::unique_ptr<
+ grpc::ClientReaderInterface<google::bigtable::v2::ReadRowsResponse>>
+ ReadRows(grpc::ClientContext* context,
+ google::bigtable::v2::ReadRowsRequest const& request) override;
+ std::unique_ptr<
+ grpc::ClientReaderInterface<google::bigtable::v2::SampleRowKeysResponse>>
+ SampleRowKeys(
+ grpc::ClientContext* context,
+ google::bigtable::v2::SampleRowKeysRequest const& request) override;
+
+ std::unique_ptr<
+ grpc::ClientReaderInterface<google::bigtable::v2::MutateRowsResponse>>
+ MutateRows(grpc::ClientContext* context,
+ google::bigtable::v2::MutateRowsRequest const& request) override;
+
+ std::shared_ptr<grpc::Channel> Channel() override;
+
+ private:
+ friend class SampleRowKeysResponse;
+ friend class ReadRowsResponse;
+ friend class MutateRowsResponse;
+
+ struct Row {
+ string row_key;
+ std::map<string, string> columns;
+ };
+ struct Table {
+ std::map<string, Row> rows;
+ };
+
+ mutex mu_;
+ const std::string project_id_ = "testproject";
+ const std::string instance_id_ = "testinstance";
+ Table table_ GUARDED_BY(mu_);
+};
+
+} // namespace tensorflow
+
+#endif // TENSORFLOW_CONTRIB_BIGTABLE_KERNELS_TEST_KERNELS_BIGTABLE_TEST_CLIENT_H_
diff --git a/tensorflow/contrib/bigtable/kernels/test_kernels/bigtable_test_client_op.cc b/tensorflow/contrib/bigtable/kernels/test_kernels/bigtable_test_client_op.cc
new file mode 100644
index 0000000000..fa3e587b90
--- /dev/null
+++ b/tensorflow/contrib/bigtable/kernels/test_kernels/bigtable_test_client_op.cc
@@ -0,0 +1,78 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#include "tensorflow/contrib/bigtable/kernels/bigtable_lib.h"
+#include "tensorflow/contrib/bigtable/kernels/test_kernels/bigtable_test_client.h"
+#include "tensorflow/core/framework/op_kernel.h"
+#include "tensorflow/core/lib/strings/stringprintf.h"
+
+namespace tensorflow {
+
+namespace {
+
+class BigtableTestClientOp : public OpKernel {
+ public:
+ explicit BigtableTestClientOp(OpKernelConstruction* ctx) : OpKernel(ctx) {}
+ ~BigtableTestClientOp() override {
+ if (cinfo_.resource_is_private_to_kernel()) {
+ if (!cinfo_.resource_manager()
+ ->Delete<BigtableClientResource>(cinfo_.container(),
+ cinfo_.name())
+ .ok()) {
+ // Do nothing; the resource can have been deleted by session resets.
+ }
+ }
+ }
+ void Compute(OpKernelContext* ctx) override LOCKS_EXCLUDED(mu_) {
+ mutex_lock l(mu_);
+ if (!initialized_) {
+ ResourceMgr* mgr = ctx->resource_manager();
+ OP_REQUIRES_OK(ctx, cinfo_.Init(mgr, def()));
+ BigtableClientResource* resource;
+ OP_REQUIRES_OK(
+ ctx,
+ mgr->LookupOrCreate<BigtableClientResource>(
+ cinfo_.container(), cinfo_.name(), &resource,
+ [this, ctx](BigtableClientResource** ret)
+ EXCLUSIVE_LOCKS_REQUIRED(mu_) {
+ std::shared_ptr<google::cloud::bigtable::DataClient> client(
+ new BigtableTestClient());
+ // Note: must make explicit copies to sequence
+ // them before the move of client.
+ string project_id = client->project_id();
+ string instance_id = client->instance_id();
+ *ret = new BigtableClientResource(std::move(project_id),
+ std::move(instance_id),
+ std::move(client));
+ return Status::OK();
+ }));
+ initialized_ = true;
+ }
+ OP_REQUIRES_OK(ctx, MakeResourceHandleToOutput(
+ ctx, 0, cinfo_.container(), cinfo_.name(),
+ MakeTypeIndex<BigtableClientResource>()));
+ }
+
+ private:
+ mutex mu_;
+ ContainerInfo cinfo_ GUARDED_BY(mu_);
+ bool initialized_ GUARDED_BY(mu_) = false;
+};
+
+REGISTER_KERNEL_BUILDER(Name("BigtableTestClient").Device(DEVICE_CPU),
+ BigtableTestClientOp);
+
+} // namespace
+} // namespace tensorflow
diff --git a/tensorflow/contrib/bigtable/kernels/test_kernels/bigtable_test_client_test.cc b/tensorflow/contrib/bigtable/kernels/test_kernels/bigtable_test_client_test.cc
new file mode 100644
index 0000000000..32611e2590
--- /dev/null
+++ b/tensorflow/contrib/bigtable/kernels/test_kernels/bigtable_test_client_test.cc
@@ -0,0 +1,345 @@
+/* Copyright 2016 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#include "tensorflow/contrib/bigtable/kernels/test_kernels/bigtable_test_client.h"
+#include "google/cloud/bigtable/internal/table.h"
+#include "tensorflow/core/platform/test.h"
+
+namespace tensorflow {
+namespace {
+
+void WriteCell(const string& row, const string& family, const string& column,
+ const string& value,
+ ::google::cloud::bigtable::noex::Table* table) {
+ ::google::cloud::bigtable::SingleRowMutation mut(row);
+ mut.emplace_back(::google::cloud::bigtable::SetCell(family, column, value));
+ table->Apply(std::move(mut));
+}
+
+TEST(BigtableTestClientTest, EmptyRowRead) {
+ std::shared_ptr<::google::cloud::bigtable::DataClient> client_ptr =
+ std::make_shared<BigtableTestClient>();
+ ::google::cloud::bigtable::noex::Table table(client_ptr, "test_table");
+
+ ::google::cloud::bigtable::RowSet rowset;
+ rowset.Append("r1");
+ auto filter = ::google::cloud::bigtable::Filter::Chain(
+ ::google::cloud::bigtable::Filter::Latest(1));
+ auto rows = table.ReadRows(std::move(rowset), filter);
+ EXPECT_EQ(rows.begin(), rows.end()) << "Some rows were returned in response!";
+ EXPECT_TRUE(rows.Finish().ok()) << "Error reading rows.";
+}
+
+TEST(BigtableTestClientTest, SingleRowWriteAndRead) {
+ std::shared_ptr<::google::cloud::bigtable::DataClient> client_ptr =
+ std::make_shared<BigtableTestClient>();
+ ::google::cloud::bigtable::noex::Table table(client_ptr, "test_table");
+
+ WriteCell("r1", "f1", "c1", "v1", &table);
+
+ ::google::cloud::bigtable::RowSet rowset("r1");
+ auto filter = ::google::cloud::bigtable::Filter::Chain(
+ ::google::cloud::bigtable::Filter::Latest(1));
+ auto rows = table.ReadRows(std::move(rowset), filter);
+ auto itr = rows.begin();
+ EXPECT_NE(itr, rows.end()) << "No rows were returned in response!";
+ EXPECT_EQ(itr->row_key(), "r1");
+ EXPECT_EQ(itr->cells().size(), 1);
+ EXPECT_EQ(itr->cells()[0].family_name(), "f1");
+ EXPECT_EQ(itr->cells()[0].column_qualifier(), "c1");
+ EXPECT_EQ(itr->cells()[0].value(), "v1");
+
+ ++itr;
+ EXPECT_EQ(itr, rows.end());
+ EXPECT_TRUE(rows.Finish().ok());
+}
+
+TEST(BigtableTestClientTest, MultiRowWriteAndSingleRowRead) {
+ std::shared_ptr<::google::cloud::bigtable::DataClient> client_ptr =
+ std::make_shared<BigtableTestClient>();
+ ::google::cloud::bigtable::noex::Table table(client_ptr, "test_table");
+
+ WriteCell("r1", "f1", "c1", "v1", &table);
+ WriteCell("r2", "f1", "c1", "v2", &table);
+ WriteCell("r3", "f1", "c1", "v3", &table);
+
+ ::google::cloud::bigtable::RowSet rowset("r1");
+ auto filter = ::google::cloud::bigtable::Filter::Chain(
+ ::google::cloud::bigtable::Filter::Latest(1));
+ auto rows = table.ReadRows(std::move(rowset), filter);
+ auto itr = rows.begin();
+
+ EXPECT_NE(itr, rows.end()) << "Missing rows";
+ EXPECT_EQ(itr->row_key(), "r1");
+ EXPECT_EQ(itr->cells().size(), 1);
+ EXPECT_EQ(itr->cells()[0].family_name(), "f1");
+ EXPECT_EQ(itr->cells()[0].column_qualifier(), "c1");
+ EXPECT_EQ(itr->cells()[0].value(), "v1");
+
+ ++itr;
+ EXPECT_EQ(itr, rows.end()) << "Extra rows in the response.";
+ EXPECT_TRUE(rows.Finish().ok());
+}
+
+TEST(BigtableTestClientTest, MultiRowWriteAndRead) {
+ std::shared_ptr<::google::cloud::bigtable::DataClient> client_ptr =
+ std::make_shared<BigtableTestClient>();
+ ::google::cloud::bigtable::noex::Table table(client_ptr, "test_table");
+
+ WriteCell("r1", "f1", "c1", "v1", &table);
+ WriteCell("r2", "f1", "c1", "v2", &table);
+ WriteCell("r3", "f1", "c1", "v3", &table);
+
+ ::google::cloud::bigtable::RowSet rowset("r1", "r2", "r3");
+ auto filter = ::google::cloud::bigtable::Filter::Chain(
+ ::google::cloud::bigtable::Filter::Latest(1));
+ auto rows = table.ReadRows(std::move(rowset), filter);
+ auto itr = rows.begin();
+
+ EXPECT_NE(itr, rows.end()) << "Missing rows";
+ EXPECT_EQ(itr->row_key(), "r1");
+ EXPECT_EQ(itr->cells().size(), 1);
+ EXPECT_EQ(itr->cells()[0].family_name(), "f1");
+ EXPECT_EQ(itr->cells()[0].column_qualifier(), "c1");
+ EXPECT_EQ(itr->cells()[0].value(), "v1");
+
+ ++itr;
+
+ EXPECT_NE(itr, rows.end()) << "Missing rows";
+ EXPECT_EQ(itr->row_key(), "r2");
+ EXPECT_EQ(itr->cells().size(), 1);
+ EXPECT_EQ(itr->cells()[0].family_name(), "f1");
+ EXPECT_EQ(itr->cells()[0].column_qualifier(), "c1");
+ EXPECT_EQ(itr->cells()[0].value(), "v2");
+
+ ++itr;
+
+ EXPECT_NE(itr, rows.end()) << "Missing rows";
+ EXPECT_EQ(itr->row_key(), "r3");
+ EXPECT_EQ(itr->cells().size(), 1);
+ EXPECT_EQ(itr->cells()[0].family_name(), "f1");
+ EXPECT_EQ(itr->cells()[0].column_qualifier(), "c1");
+ EXPECT_EQ(itr->cells()[0].value(), "v3");
+
+ ++itr;
+ EXPECT_EQ(itr, rows.end()) << "Extra rows in the response.";
+ EXPECT_TRUE(rows.Finish().ok());
+}
+
+TEST(BigtableTestClientTest, MultiRowWriteAndPrefixRead) {
+ std::shared_ptr<::google::cloud::bigtable::DataClient> client_ptr =
+ std::make_shared<BigtableTestClient>();
+ ::google::cloud::bigtable::noex::Table table(client_ptr, "test_table");
+
+ WriteCell("r1", "f1", "c1", "v1", &table);
+ WriteCell("r2", "f1", "c1", "v2", &table);
+ WriteCell("r3", "f1", "c1", "v3", &table);
+
+ auto filter = ::google::cloud::bigtable::Filter::Chain(
+ ::google::cloud::bigtable::Filter::Latest(1));
+ auto rows =
+ table.ReadRows(::google::cloud::bigtable::RowRange::Prefix("r"), filter);
+ auto itr = rows.begin();
+
+ EXPECT_NE(itr, rows.end()) << "Missing rows";
+ EXPECT_EQ(itr->row_key(), "r1");
+ EXPECT_EQ(itr->cells().size(), 1);
+ EXPECT_EQ(itr->cells()[0].family_name(), "f1");
+ EXPECT_EQ(itr->cells()[0].column_qualifier(), "c1");
+ EXPECT_EQ(itr->cells()[0].value(), "v1");
+
+ ++itr;
+
+ EXPECT_NE(itr, rows.end()) << "Missing rows";
+ EXPECT_EQ(itr->row_key(), "r2");
+ EXPECT_EQ(itr->cells().size(), 1);
+ EXPECT_EQ(itr->cells()[0].family_name(), "f1");
+ EXPECT_EQ(itr->cells()[0].column_qualifier(), "c1");
+ EXPECT_EQ(itr->cells()[0].value(), "v2");
+
+ ++itr;
+
+ EXPECT_NE(itr, rows.end()) << "Missing rows";
+ EXPECT_EQ(itr->row_key(), "r3");
+ EXPECT_EQ(itr->cells().size(), 1);
+ EXPECT_EQ(itr->cells()[0].family_name(), "f1");
+ EXPECT_EQ(itr->cells()[0].column_qualifier(), "c1");
+ EXPECT_EQ(itr->cells()[0].value(), "v3");
+
+ ++itr;
+ EXPECT_EQ(itr, rows.end()) << "Extra rows in the response.";
+ EXPECT_TRUE(rows.Finish().ok());
+}
+
+TEST(BigtableTestClientTest, ColumnFiltering) {
+ std::shared_ptr<::google::cloud::bigtable::DataClient> client_ptr =
+ std::make_shared<BigtableTestClient>();
+ ::google::cloud::bigtable::noex::Table table(client_ptr, "test_table");
+
+ WriteCell("r1", "f1", "c1", "v1", &table);
+ WriteCell("r2", "f1", "c1", "v2", &table);
+ WriteCell("r3", "f1", "c1", "v3", &table);
+
+ // Extra cells
+ WriteCell("r1", "f2", "c1", "v1", &table);
+ WriteCell("r2", "f2", "c1", "v2", &table);
+ WriteCell("r3", "f1", "c2", "v3", &table);
+
+ auto filter = ::google::cloud::bigtable::Filter::Chain(
+ ::google::cloud::bigtable::Filter::Latest(1),
+ ::google::cloud::bigtable::Filter::FamilyRegex("f1"),
+ ::google::cloud::bigtable::Filter::ColumnRegex("c1"));
+ auto rows =
+ table.ReadRows(::google::cloud::bigtable::RowRange::Prefix("r"), filter);
+ auto itr = rows.begin();
+
+ EXPECT_NE(itr, rows.end()) << "Missing rows";
+ EXPECT_EQ(itr->row_key(), "r1");
+ EXPECT_EQ(itr->cells().size(), 1);
+ EXPECT_EQ(itr->cells()[0].family_name(), "f1");
+ EXPECT_EQ(itr->cells()[0].column_qualifier(), "c1");
+ EXPECT_EQ(itr->cells()[0].value(), "v1");
+
+ ++itr;
+
+ EXPECT_NE(itr, rows.end()) << "Missing rows";
+ EXPECT_EQ(itr->row_key(), "r2");
+ EXPECT_EQ(itr->cells().size(), 1);
+ EXPECT_EQ(itr->cells()[0].family_name(), "f1");
+ EXPECT_EQ(itr->cells()[0].column_qualifier(), "c1");
+ EXPECT_EQ(itr->cells()[0].value(), "v2");
+
+ ++itr;
+
+ EXPECT_NE(itr, rows.end()) << "Missing rows";
+ EXPECT_EQ(itr->row_key(), "r3");
+ EXPECT_EQ(itr->cells().size(), 1);
+ EXPECT_EQ(itr->cells()[0].family_name(), "f1");
+ EXPECT_EQ(itr->cells()[0].column_qualifier(), "c1");
+ EXPECT_EQ(itr->cells()[0].value(), "v3");
+
+ ++itr;
+ EXPECT_EQ(itr, rows.end()) << "Extra rows in the response.";
+ EXPECT_TRUE(rows.Finish().ok());
+}
+
+TEST(BigtableTestClientTest, RowKeys) {
+ std::shared_ptr<::google::cloud::bigtable::DataClient> client_ptr =
+ std::make_shared<BigtableTestClient>();
+ ::google::cloud::bigtable::noex::Table table(client_ptr, "test_table");
+
+ WriteCell("r1", "f1", "c1", "v1", &table);
+ WriteCell("r2", "f1", "c1", "v2", &table);
+ WriteCell("r3", "f1", "c1", "v3", &table);
+
+ // Extra cells
+ WriteCell("r1", "f2", "c1", "v1", &table);
+ WriteCell("r2", "f2", "c1", "v2", &table);
+ WriteCell("r3", "f1", "c2", "v3", &table);
+
+ auto filter = ::google::cloud::bigtable::Filter::Chain(
+ ::google::cloud::bigtable::Filter::Latest(1),
+ ::google::cloud::bigtable::Filter::CellsRowLimit(1),
+ ::google::cloud::bigtable::Filter::StripValueTransformer());
+ auto rows =
+ table.ReadRows(::google::cloud::bigtable::RowRange::Prefix("r"), filter);
+ auto itr = rows.begin();
+ EXPECT_NE(itr, rows.end()) << "Missing rows";
+ EXPECT_EQ(itr->row_key(), "r1");
+ EXPECT_EQ(itr->cells().size(), 1);
+ EXPECT_EQ(itr->cells()[0].family_name(), "f1");
+ EXPECT_EQ(itr->cells()[0].column_qualifier(), "c1");
+ EXPECT_EQ(itr->cells()[0].value(), "");
+
+ ++itr;
+
+ EXPECT_NE(itr, rows.end()) << "Missing rows";
+ EXPECT_EQ(itr->row_key(), "r2");
+ EXPECT_EQ(itr->cells().size(), 1);
+ EXPECT_EQ(itr->cells()[0].family_name(), "f1");
+ EXPECT_EQ(itr->cells()[0].column_qualifier(), "c1");
+ EXPECT_EQ(itr->cells()[0].value(), "");
+
+ ++itr;
+
+ EXPECT_NE(itr, rows.end()) << "Missing rows";
+ EXPECT_EQ(itr->row_key(), "r3");
+ EXPECT_EQ(itr->cells().size(), 1);
+ EXPECT_EQ(itr->cells()[0].family_name(), "f1");
+ EXPECT_EQ(itr->cells()[0].column_qualifier(), "c1");
+ EXPECT_EQ(itr->cells()[0].value(), "");
+
+ ++itr;
+ EXPECT_EQ(itr, rows.end()) << "Extra rows in the response.";
+ EXPECT_TRUE(rows.Finish().ok());
+}
+
+TEST(BigtableTestClientTest, SampleKeys) {
+ std::shared_ptr<::google::cloud::bigtable::DataClient> client_ptr =
+ std::make_shared<BigtableTestClient>();
+ ::google::cloud::bigtable::noex::Table table(client_ptr, "test_table");
+
+ WriteCell("r1", "f1", "c1", "v1", &table);
+ WriteCell("r2", "f1", "c1", "v2", &table);
+ WriteCell("r3", "f1", "c1", "v3", &table);
+ WriteCell("r4", "f1", "c1", "v4", &table);
+ WriteCell("r5", "f1", "c1", "v5", &table);
+
+ grpc::Status status;
+ auto resp = table.SampleRows(status);
+ EXPECT_TRUE(status.ok());
+ EXPECT_EQ(3, resp.size());
+ EXPECT_EQ("r1", string(resp[0].row_key));
+ EXPECT_EQ(0, resp[0].offset_bytes);
+ EXPECT_EQ("r3", string(resp[1].row_key));
+ EXPECT_EQ(100, resp[1].offset_bytes);
+ EXPECT_EQ("r5", string(resp[2].row_key));
+ EXPECT_EQ(200, resp[2].offset_bytes);
+}
+
+TEST(BigtableTestClientTest, SampleKeysShort) {
+ std::shared_ptr<::google::cloud::bigtable::DataClient> client_ptr =
+ std::make_shared<BigtableTestClient>();
+ ::google::cloud::bigtable::noex::Table table(client_ptr, "test_table");
+
+ WriteCell("r1", "f1", "c1", "v1", &table);
+
+ grpc::Status status;
+ auto resp = table.SampleRows(status);
+ EXPECT_TRUE(status.ok());
+ EXPECT_EQ(1, resp.size());
+ EXPECT_EQ("r1", string(resp[0].row_key));
+}
+
+TEST(BigtableTestClientTest, SampleKeysEvenNumber) {
+ std::shared_ptr<::google::cloud::bigtable::DataClient> client_ptr =
+ std::make_shared<BigtableTestClient>();
+ ::google::cloud::bigtable::noex::Table table(client_ptr, "test_table");
+
+ WriteCell("r1", "f1", "c1", "v1", &table);
+ WriteCell("r2", "f1", "c1", "v2", &table);
+ WriteCell("r3", "f1", "c1", "v3", &table);
+ WriteCell("r4", "f1", "c1", "v4", &table);
+
+ grpc::Status status;
+ auto resp = table.SampleRows(status);
+ EXPECT_TRUE(status.ok());
+ EXPECT_EQ(2, resp.size());
+ EXPECT_EQ("r1", string(resp[0].row_key));
+ EXPECT_EQ("r3", string(resp[1].row_key));
+}
+
+} // namespace
+} // namespace tensorflow
diff --git a/tensorflow/contrib/bigtable/ops/bigtable_ops.cc b/tensorflow/contrib/bigtable/ops/bigtable_ops.cc
new file mode 100644
index 0000000000..416b719e30
--- /dev/null
+++ b/tensorflow/contrib/bigtable/ops/bigtable_ops.cc
@@ -0,0 +1,107 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#include "tensorflow/core/framework/common_shape_fns.h"
+#include "tensorflow/core/framework/op.h"
+
+namespace tensorflow {
+
+// TODO(saeta): Add support for setting ClientOptions values.
+REGISTER_OP("BigtableClient")
+ .Attr("project_id: string")
+ .Attr("instance_id: string")
+ .Attr("connection_pool_size: int")
+ .Attr("max_receive_message_size: int = -1")
+ .Attr("container: string = ''")
+ .Attr("shared_name: string = ''")
+ .Output("client: resource")
+ .SetShapeFn(shape_inference::ScalarShape);
+
+// TODO(saeta): Add support for Application Profiles.
+// See https://cloud.google.com/bigtable/docs/app-profiles for more info.
+REGISTER_OP("BigtableTable")
+ .Input("client: resource")
+ .Attr("table_name: string")
+ .Attr("container: string = ''")
+ .Attr("shared_name: string = ''")
+ .Output("table: resource")
+ .SetShapeFn(shape_inference::ScalarShape);
+
+REGISTER_OP("DatasetToBigtable")
+ .Input("table: resource")
+ .Input("input_dataset: variant")
+ .Input("column_families: string")
+ .Input("columns: string")
+ .Input("timestamp: int64")
+ .SetShapeFn(shape_inference::NoOutputs);
+
+REGISTER_OP("BigtableLookupDataset")
+ .Input("keys_dataset: variant")
+ .Input("table: resource")
+ .Input("column_families: string")
+ .Input("columns: string")
+ .Output("handle: variant")
+ .SetShapeFn(shape_inference::ScalarShape);
+
+REGISTER_OP("BigtablePrefixKeyDataset")
+ .Input("table: resource")
+ .Input("prefix: string")
+ .Output("handle: variant")
+ .SetIsStateful() // TODO(b/65524810): Source dataset ops must be marked
+ // stateful to inhibit constant folding.
+ .SetShapeFn(shape_inference::ScalarShape);
+
+REGISTER_OP("BigtableRangeKeyDataset")
+ .Input("table: resource")
+ .Input("start_key: string")
+ .Input("end_key: string")
+ .Output("handle: variant")
+ .SetIsStateful() // TODO(b/65524810): Source dataset ops must be marked
+ // stateful to inhibit constant folding.
+ .SetShapeFn(shape_inference::ScalarShape);
+
+REGISTER_OP("BigtableSampleKeysDataset")
+ .Input("table: resource")
+ .Output("handle: variant")
+ .SetIsStateful() // TODO(b/65524810): Source dataset ops must be marked
+ // stateful to inhibit constant folding.
+ .SetShapeFn(shape_inference::ScalarShape);
+
+REGISTER_OP("BigtableSampleKeyPairsDataset")
+ .Input("table: resource")
+ .Input("prefix: string")
+ .Input("start_key: string")
+ .Input("end_key: string")
+ .Output("handle: variant")
+ .SetIsStateful() // TODO(b/65524810): Source dataset ops must be marked
+ // stateful to inhibit constant folding.
+ .SetShapeFn(shape_inference::ScalarShape);
+
+// TODO(saeta): Support continuing despite bad data (e.g. empty string, or
+// skip incomplete row.)
+REGISTER_OP("BigtableScanDataset")
+ .Input("table: resource")
+ .Input("prefix: string")
+ .Input("start_key: string")
+ .Input("end_key: string")
+ .Input("column_families: string")
+ .Input("columns: string")
+ .Input("probability: float")
+ .Output("handle: variant")
+ .SetIsStateful() // TODO(b/65524810): Source dataset ops must be marked
+ // stateful to inhibit constant folding.
+ .SetShapeFn(shape_inference::ScalarShape);
+
+} // namespace tensorflow
diff --git a/tensorflow/contrib/bigtable/ops/bigtable_test_ops.cc b/tensorflow/contrib/bigtable/ops/bigtable_test_ops.cc
new file mode 100644
index 0000000000..f7d02458f6
--- /dev/null
+++ b/tensorflow/contrib/bigtable/ops/bigtable_test_ops.cc
@@ -0,0 +1,27 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#include "tensorflow/core/framework/common_shape_fns.h"
+#include "tensorflow/core/framework/op.h"
+
+namespace tensorflow {
+
+REGISTER_OP("BigtableTestClient")
+ .Attr("container: string = ''")
+ .Attr("shared_name: string = ''")
+ .Output("client: resource")
+ .SetShapeFn(shape_inference::ScalarShape);
+
+} // namespace tensorflow
diff --git a/tensorflow/contrib/bigtable/python/kernel_tests/__init__.py b/tensorflow/contrib/bigtable/python/kernel_tests/__init__.py
new file mode 100644
index 0000000000..292d8f4e51
--- /dev/null
+++ b/tensorflow/contrib/bigtable/python/kernel_tests/__init__.py
@@ -0,0 +1,20 @@
+# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+
+"""This module contains tests for the bigtable integration."""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
diff --git a/tensorflow/contrib/bigtable/python/kernel_tests/bigtable_ops_test.py b/tensorflow/contrib/bigtable/python/kernel_tests/bigtable_ops_test.py
new file mode 100644
index 0000000000..2f20064619
--- /dev/null
+++ b/tensorflow/contrib/bigtable/python/kernel_tests/bigtable_ops_test.py
@@ -0,0 +1,272 @@
+# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""Tests for Bigtable Ops."""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+from tensorflow.contrib import bigtable
+from tensorflow.contrib.bigtable.ops import gen_bigtable_ops
+from tensorflow.contrib.bigtable.ops import gen_bigtable_test_ops
+from tensorflow.contrib.bigtable.python.ops import bigtable_api
+from tensorflow.contrib.util import loader
+from tensorflow.python.data.ops import dataset_ops
+from tensorflow.python.framework import errors
+from tensorflow.python.platform import resource_loader
+from tensorflow.python.platform import test
+from tensorflow.python.util import compat
+
+_bigtable_so = loader.load_op_library(
+ resource_loader.get_path_to_datafile("_bigtable_test.so"))
+
+
+def _ListOfTuplesOfStringsToBytes(values):
+ return [(compat.as_bytes(i[0]), compat.as_bytes(i[1])) for i in values]
+
+
+class BigtableOpsTest(test.TestCase):
+ COMMON_ROW_KEYS = ["r1", "r2", "r3"]
+ COMMON_VALUES = ["v1", "v2", "v3"]
+
+ def setUp(self):
+ self._client = gen_bigtable_test_ops.bigtable_test_client()
+ table = gen_bigtable_ops.bigtable_table(self._client, "testtable")
+ self._table = bigtable.BigTable("testtable", None, table)
+
+ def _makeSimpleDataset(self):
+ output_rows = dataset_ops.Dataset.from_tensor_slices(self.COMMON_ROW_KEYS)
+ output_values = dataset_ops.Dataset.from_tensor_slices(self.COMMON_VALUES)
+ return dataset_ops.Dataset.zip((output_rows, output_values))
+
+ def _writeCommonValues(self, sess):
+ output_ds = self._makeSimpleDataset()
+ write_op = self._table.write(output_ds, ["cf1"], ["c1"])
+ sess.run(write_op)
+
+ def runReadKeyTest(self, read_ds):
+ itr = read_ds.make_initializable_iterator()
+ n = itr.get_next()
+ expected = list(self.COMMON_ROW_KEYS)
+ expected.reverse()
+ with self.test_session() as sess:
+ self._writeCommonValues(sess)
+ sess.run(itr.initializer)
+ for i in range(3):
+ output = sess.run(n)
+ want = expected.pop()
+ self.assertEqual(
+ compat.as_bytes(want), compat.as_bytes(output),
+ "Unequal at step %d: want: %s, got: %s" % (i, want, output))
+
+ def testReadPrefixKeys(self):
+ self.runReadKeyTest(self._table.keys_by_prefix_dataset("r"))
+
+ def testReadRangeKeys(self):
+ self.runReadKeyTest(self._table.keys_by_range_dataset("r1", "r4"))
+
+ def runScanTest(self, read_ds):
+ itr = read_ds.make_initializable_iterator()
+ n = itr.get_next()
+ expected_keys = list(self.COMMON_ROW_KEYS)
+ expected_keys.reverse()
+ expected_values = list(self.COMMON_VALUES)
+ expected_values.reverse()
+ with self.test_session() as sess:
+ self._writeCommonValues(sess)
+ sess.run(itr.initializer)
+ for i in range(3):
+ output = sess.run(n)
+ want = expected_keys.pop()
+ self.assertEqual(
+ compat.as_bytes(want), compat.as_bytes(output[0]),
+ "Unequal keys at step %d: want: %s, got: %s" % (i, want, output[0]))
+ want = expected_values.pop()
+ self.assertEqual(
+ compat.as_bytes(want), compat.as_bytes(output[1]),
+ "Unequal values at step: %d: want: %s, got: %s" % (i, want,
+ output[1]))
+
+ def testScanPrefixStringCol(self):
+ self.runScanTest(self._table.scan_prefix("r", cf1="c1"))
+
+ def testScanPrefixListCol(self):
+ self.runScanTest(self._table.scan_prefix("r", cf1=["c1"]))
+
+ def testScanPrefixTupleCol(self):
+ self.runScanTest(self._table.scan_prefix("r", columns=("cf1", "c1")))
+
+ def testScanRangeStringCol(self):
+ self.runScanTest(self._table.scan_range("r1", "r4", cf1="c1"))
+
+ def testScanRangeListCol(self):
+ self.runScanTest(self._table.scan_range("r1", "r4", cf1=["c1"]))
+
+ def testScanRangeTupleCol(self):
+ self.runScanTest(self._table.scan_range("r1", "r4", columns=("cf1", "c1")))
+
+ def testLookup(self):
+ ds = self._table.keys_by_prefix_dataset("r")
+ ds = ds.apply(self._table.lookup_columns(cf1="c1"))
+ itr = ds.make_initializable_iterator()
+ n = itr.get_next()
+ expected_keys = list(self.COMMON_ROW_KEYS)
+ expected_values = list(self.COMMON_VALUES)
+ expected_tuples = zip(expected_keys, expected_values)
+ with self.test_session() as sess:
+ self._writeCommonValues(sess)
+ sess.run(itr.initializer)
+ for i, elem in enumerate(expected_tuples):
+ output = sess.run(n)
+ self.assertEqual(
+ compat.as_bytes(elem[0]), compat.as_bytes(output[0]),
+ "Unequal keys at step %d: want: %s, got: %s" %
+ (i, compat.as_bytes(elem[0]), compat.as_bytes(output[0])))
+ self.assertEqual(
+ compat.as_bytes(elem[1]), compat.as_bytes(output[1]),
+ "Unequal values at step %d: want: %s, got: %s" %
+ (i, compat.as_bytes(elem[1]), compat.as_bytes(output[1])))
+
+ def testSampleKeys(self):
+ ds = self._table.sample_keys()
+ itr = ds.make_initializable_iterator()
+ n = itr.get_next()
+ expected_key = self.COMMON_ROW_KEYS[0]
+ with self.test_session() as sess:
+ self._writeCommonValues(sess)
+ sess.run(itr.initializer)
+ output = sess.run(n)
+ self.assertEqual(
+ compat.as_bytes(self.COMMON_ROW_KEYS[0]), compat.as_bytes(output),
+ "Unequal keys: want: %s, got: %s" % (compat.as_bytes(
+ self.COMMON_ROW_KEYS[0]), compat.as_bytes(output)))
+ output = sess.run(n)
+ self.assertEqual(
+ compat.as_bytes(self.COMMON_ROW_KEYS[2]), compat.as_bytes(output),
+ "Unequal keys: want: %s, got: %s" % (compat.as_bytes(
+ self.COMMON_ROW_KEYS[2]), compat.as_bytes(output)))
+ with self.assertRaises(errors.OutOfRangeError):
+ sess.run(n)
+
+ def runSampleKeyPairsTest(self, ds, expected_key_pairs):
+ itr = ds.make_initializable_iterator()
+ n = itr.get_next()
+ with self.test_session() as sess:
+ self._writeCommonValues(sess)
+ sess.run(itr.initializer)
+ for i, elems in enumerate(expected_key_pairs):
+ output = sess.run(n)
+ self.assertEqual(
+ compat.as_bytes(elems[0]), compat.as_bytes(output[0]),
+ "Unequal key pair (first element) at step %d; want: %s, got %s" %
+ (i, compat.as_bytes(elems[0]), compat.as_bytes(output[0])))
+ self.assertEqual(
+ compat.as_bytes(elems[1]), compat.as_bytes(output[1]),
+ "Unequal key pair (second element) at step %d; want: %s, got %s" %
+ (i, compat.as_bytes(elems[1]), compat.as_bytes(output[1])))
+ with self.assertRaises(errors.OutOfRangeError):
+ sess.run(n)
+
+ def testSampleKeyPairsSimplePrefix(self):
+ ds = bigtable_api._BigtableSampleKeyPairsDataset(
+ self._table, prefix="r", start="", end="")
+ expected_key_pairs = [("r", "r1"), ("r1", "r3"), ("r3", "s")]
+ self.runSampleKeyPairsTest(ds, expected_key_pairs)
+
+ def testSampleKeyPairsSimpleRange(self):
+ ds = bigtable_api._BigtableSampleKeyPairsDataset(
+ self._table, prefix="", start="r1", end="r3")
+ expected_key_pairs = [("r1", "r3")]
+ self.runSampleKeyPairsTest(ds, expected_key_pairs)
+
+ def testSampleKeyPairsSkipRangePrefix(self):
+ ds = bigtable_api._BigtableSampleKeyPairsDataset(
+ self._table, prefix="r2", start="", end="")
+ expected_key_pairs = [("r2", "r3")]
+ self.runSampleKeyPairsTest(ds, expected_key_pairs)
+
+ def testSampleKeyPairsSkipRangeRange(self):
+ ds = bigtable_api._BigtableSampleKeyPairsDataset(
+ self._table, prefix="", start="r2", end="r3")
+ expected_key_pairs = [("r2", "r3")]
+ self.runSampleKeyPairsTest(ds, expected_key_pairs)
+
+ def testSampleKeyPairsOffsetRanges(self):
+ ds = bigtable_api._BigtableSampleKeyPairsDataset(
+ self._table, prefix="", start="r2", end="r4")
+ expected_key_pairs = [("r2", "r3"), ("r3", "r4")]
+ self.runSampleKeyPairsTest(ds, expected_key_pairs)
+
+ def testSampleKeyPairEverything(self):
+ ds = bigtable_api._BigtableSampleKeyPairsDataset(
+ self._table, prefix="", start="", end="")
+ expected_key_pairs = [("", "r1"), ("r1", "r3"), ("r3", "")]
+ self.runSampleKeyPairsTest(ds, expected_key_pairs)
+
+ def testSampleKeyPairsPrefixAndStartKey(self):
+ ds = bigtable_api._BigtableSampleKeyPairsDataset(
+ self._table, prefix="r", start="r1", end="")
+ itr = ds.make_initializable_iterator()
+ with self.test_session() as sess:
+ with self.assertRaises(errors.InvalidArgumentError):
+ sess.run(itr.initializer)
+
+ def testSampleKeyPairsPrefixAndEndKey(self):
+ ds = bigtable_api._BigtableSampleKeyPairsDataset(
+ self._table, prefix="r", start="", end="r3")
+ itr = ds.make_initializable_iterator()
+ with self.test_session() as sess:
+ with self.assertRaises(errors.InvalidArgumentError):
+ sess.run(itr.initializer)
+
+ def testParallelScanPrefix(self):
+ ds = self._table.parallel_scan_prefix(prefix="r", cf1="c1")
+ itr = ds.make_initializable_iterator()
+ n = itr.get_next()
+ with self.test_session() as sess:
+ self._writeCommonValues(sess)
+ sess.run(itr.initializer)
+ expected_values = list(zip(self.COMMON_ROW_KEYS, self.COMMON_VALUES))
+ actual_values = []
+ for _ in range(len(expected_values)):
+ output = sess.run(n)
+ actual_values.append(output)
+ with self.assertRaises(errors.OutOfRangeError):
+ sess.run(n)
+ self.assertItemsEqual(
+ _ListOfTuplesOfStringsToBytes(expected_values),
+ _ListOfTuplesOfStringsToBytes(actual_values))
+
+ def testParallelScanRange(self):
+ ds = self._table.parallel_scan_range(start="r1", end="r4", cf1="c1")
+ itr = ds.make_initializable_iterator()
+ n = itr.get_next()
+ with self.test_session() as sess:
+ self._writeCommonValues(sess)
+ sess.run(itr.initializer)
+ expected_values = list(zip(self.COMMON_ROW_KEYS, self.COMMON_VALUES))
+ actual_values = []
+ for _ in range(len(expected_values)):
+ output = sess.run(n)
+ actual_values.append(output)
+ with self.assertRaises(errors.OutOfRangeError):
+ sess.run(n)
+ self.assertItemsEqual(
+ _ListOfTuplesOfStringsToBytes(expected_values),
+ _ListOfTuplesOfStringsToBytes(actual_values))
+
+
+if __name__ == "__main__":
+ test.main()
diff --git a/tensorflow/contrib/bigtable/python/ops/__init__.py b/tensorflow/contrib/bigtable/python/ops/__init__.py
new file mode 100644
index 0000000000..36d75b0d70
--- /dev/null
+++ b/tensorflow/contrib/bigtable/python/ops/__init__.py
@@ -0,0 +1,20 @@
+# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+
+"""This module contains the Python API for the Cloud Bigtable integration."""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
diff --git a/tensorflow/contrib/bigtable/python/ops/bigtable_api.py b/tensorflow/contrib/bigtable/python/ops/bigtable_api.py
new file mode 100644
index 0000000000..9f73b7223c
--- /dev/null
+++ b/tensorflow/contrib/bigtable/python/ops/bigtable_api.py
@@ -0,0 +1,741 @@
+# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""The Python API for TensorFlow's Bigtable integration.
+
+TensorFlow has support for reading from and writing to Cloud Bigtable. To use
+the Bigtable TensorFlow integration, first create a BigtableClient (which
+configures your connection to Cloud Bigtable), and then open a Table. The Table
+object then allows you to create numerous @{tf.data.Dataset}s to read data, or
+write a @{tf.data.Dataset} object to the underlying Bigtable Table.
+
+For background on Google Cloud Bigtable, see: https://cloud.google.com/bigtable.
+"""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+from six import iteritems
+from six import string_types
+
+from tensorflow.contrib.bigtable.ops import gen_bigtable_ops
+from tensorflow.contrib.data.python.ops import interleave_ops
+from tensorflow.contrib.util import loader
+from tensorflow.python.data.ops import dataset_ops
+from tensorflow.python.data.util import nest
+from tensorflow.python.framework import dtypes
+from tensorflow.python.framework import ops
+from tensorflow.python.framework import tensor_shape
+from tensorflow.python.platform import resource_loader
+
+_bigtable_so = loader.load_op_library(
+ resource_loader.get_path_to_datafile("_bigtable.so"))
+
+
+class BigtableClient(object):
+ """BigtableClient is the entrypoint for interacting with Cloud Bigtable in TF.
+
+ BigtableClient encapsulates a connection to Cloud Bigtable, and exposes the
+ `table` method to open a Bigtable Table.
+ """
+
+ def __init__(self,
+ project_id,
+ instance_id,
+ connection_pool_size=None,
+ max_receive_message_size=None):
+ """Creates a BigtableClient that can be used to open connections to tables.
+
+ Args:
+ project_id: A string representing the GCP project id to connect to.
+ instance_id: A string representing the Bigtable instance to connect to.
+ connection_pool_size: (Optional.) A number representing the number of
+ concurrent connections to the Cloud Bigtable service to make.
+ max_receive_message_size: (Optional.) The maximum bytes received in a
+ single gRPC response.
+
+ Raises:
+ ValueError: if the arguments are invalid (e.g. wrong type, or out of
+ expected ranges (e.g. negative).)
+ """
+ if not isinstance(project_id, str):
+ raise ValueError("`project_id` must be a string")
+ self._project_id = project_id
+
+ if not isinstance(instance_id, str):
+ raise ValueError("`instance_id` must be a string")
+ self._instance_id = instance_id
+
+ if connection_pool_size is None:
+ connection_pool_size = -1
+ elif connection_pool_size < 1:
+ raise ValueError("`connection_pool_size` must be positive")
+
+ if max_receive_message_size is None:
+ max_receive_message_size = -1
+ elif max_receive_message_size < 1:
+ raise ValueError("`max_receive_message_size` must be positive")
+
+ self._connection_pool_size = connection_pool_size
+
+ self._resource = gen_bigtable_ops.bigtable_client(
+ project_id, instance_id, connection_pool_size, max_receive_message_size)
+
+ def table(self, name, snapshot=None):
+ """Opens a table and returns a `BigTable` object.
+
+ Args:
+ name: A `tf.string` `tf.Tensor` name of the table to open.
+ snapshot: Either a `tf.string` `tf.Tensor` snapshot id, or `True` to
+ request the creation of a snapshot. (Note: currently unimplemented.)
+
+ Returns:
+ A `BigTable` python object representing the operations available on the
+ table.
+ """
+ # TODO(saeta): Implement snapshot functionality.
+ table = gen_bigtable_ops.bigtable_table(self._resource, name)
+ return BigTable(name, snapshot, table)
+
+
+class BigTable(object):
+ """BigTable is the entrypoint for reading and writing data in Cloud Bigtable.
+
+ This BigTable class is the python representation of the Cloud Bigtable table
+ within TensorFlow. Methods on this class allow data to be read from and
+ written to the Cloud Bigtable service in flexible and high performance
+ manners.
+ """
+
+ # TODO(saeta): Investigate implementing tf.contrib.lookup.LookupInterface.
+ # TODO(saeta): Consider variant tensors instead of resources (while supporting
+ # connection pooling).
+
+ def __init__(self, name, snapshot, resource):
+ self._name = name
+ self._snapshot = snapshot
+ self._resource = resource
+
+ def lookup_columns(self, *args, **kwargs):
+ """Retrieves the values of columns for a dataset of keys.
+
+ Example usage:
+ ```
+ table = bigtable_client.table("my_table")
+ key_dataset = table.get_keys_prefix("imagenet")
+ images = key_dataset.apply(table.lookup_columns(("cf1", "image"),
+ ("cf2", "label"),
+ ("cf2", "boundingbox")))
+ training_data = images.map(parse_and_crop, num_parallel_calls=64).batch(128)
+ ```
+
+ Alternatively, you can use keyword arguments to specify the columns to
+ capture. Example (same as above, rewritten):
+ ```
+ table = bigtable_client.table("my_table")
+ key_dataset = table.get_keys_prefix("imagenet")
+ images = key_dataset.apply(table.lookup_columns(
+ cf1="image", cf2=("label", "boundingbox")))
+ training_data = images.map(parse_and_crop, num_parallel_calls=64).batch(128)
+ ```
+
+ Note: certain kwargs keys are reserved, and thus some column families cannot
+ be identified using the kwargs syntax. Instead, please use the args syntax.
+ This list includes:
+ - 'name'
+ This list can change at any time.
+
+ Args:
+ *args: A list of tuples containing (column family, column name) pairs.
+ **kwargs: Column families and
+
+ Returns:
+ A function that can be passed to `tf.data.Dataset.apply` to retrieve the
+ values of columns for the rows.
+ """
+ table = self # Capture self
+ normalized = args
+ if normalized is None:
+ normalized = []
+ if isinstance(normalized, tuple):
+ normalized = list(normalized)
+ for key, value in iteritems(kwargs):
+ if key == "name":
+ continue
+ if isinstance(value, str):
+ normalized.append((key, value))
+ continue
+ for col in value:
+ normalized.append((key, col))
+
+ def _apply_fn(dataset):
+ # TODO(saeta): Verify dataset's types are correct!
+ return _BigtableLookupDataset(dataset, table, normalized)
+
+ return _apply_fn
+
+ def keys_by_range_dataset(self, start, end):
+ """Retrieves all row keys between start and end.
+
+ Note: it does NOT retrieve the values of columns.
+
+ Args:
+ start: The start row key. The row keys for rows after start (inclusive)
+ will be retrieved.
+ end: (Optional.) The end row key. Rows up to (but not including) end will
+ be retrieved. If end is None, all subsequent row keys will be retrieved.
+
+ Returns:
+ A @{tf.data.Dataset} containing `tf.string` Tensors corresponding to all
+ of the row keys between `start` and `end`.
+ """
+ # TODO(saeta): Make inclusive / exclusive configurable?
+ if end is None:
+ end = ""
+ return _BigtableRangeKeyDataset(self, start, end)
+
+ def keys_by_prefix_dataset(self, prefix):
+ """Retrieves the row keys matching a given prefix.
+
+ Args:
+ prefix: All row keys that begin with `prefix` in the table will be
+ retrieved.
+
+ Returns:
+ A @{tf.data.Dataset}. containing `tf.string` Tensors corresponding to all
+ of the row keys matching that prefix.
+ """
+ return _BigtablePrefixKeyDataset(self, prefix)
+
+ def sample_keys(self):
+ """Retrieves a sampling of row keys from the Bigtable table.
+
+ This dataset is most often used in conjunction with
+ @{tf.contrib.data.parallel_interleave} to construct a set of ranges for
+ scanning in parallel.
+
+ Returns:
+ A @{tf.data.Dataset} returning string row keys.
+ """
+ return _BigtableSampleKeysDataset(self)
+
+ def scan_prefix(self, prefix, probability=None, columns=None, **kwargs):
+ """Retrieves row (including values) from the Bigtable service.
+
+ Rows with row-key prefixed by `prefix` will be retrieved.
+
+ Specifying the columns to retrieve for each row is done by either using
+ kwargs or in the columns parameter. To retrieve values of the columns "c1",
+ and "c2" from the column family "cfa", and the value of the column "c3"
+ from column family "cfb", the following datasets (`ds1`, and `ds2`) are
+ equivalent:
+
+ ```
+ table = # ...
+ ds1 = table.scan_prefix("row_prefix", columns=[("cfa", "c1"),
+ ("cfa", "c2"),
+ ("cfb", "c3")])
+ ds2 = table.scan_prefix("row_prefix", cfa=["c1", "c2"], cfb="c3")
+ ```
+
+ Note: only the latest value of a cell will be retrieved.
+
+ Args:
+ prefix: The prefix all row keys must match to be retrieved for prefix-
+ based scans.
+ probability: (Optional.) A float between 0 (exclusive) and 1 (inclusive).
+ A non-1 value indicates to probabilistically sample rows with the
+ provided probability.
+ columns: The columns to read. Note: most commonly, they are expressed as
+ kwargs. Use the columns value if you are using column families that are
+ reserved. The value of columns and kwargs are merged. Columns is a list
+ of tuples of strings ("column_family", "column_qualifier").
+ **kwargs: The column families and columns to read. Keys are treated as
+ column_families, and values can be either lists of strings, or strings
+ that are treated as the column qualifier (column name).
+
+ Returns:
+ A @{tf.data.Dataset} returning the row keys and the cell contents.
+
+ Raises:
+ ValueError: If the configured probability is unexpected.
+ """
+ probability = _normalize_probability(probability)
+ normalized = _normalize_columns(columns, kwargs)
+ return _BigtableScanDataset(self, prefix, "", "", normalized, probability)
+
+ def scan_range(self, start, end, probability=None, columns=None, **kwargs):
+ """Retrieves rows (including values) from the Bigtable service.
+
+ Rows with row-keys between `start` and `end` will be retrieved.
+
+ Specifying the columns to retrieve for each row is done by either using
+ kwargs or in the columns parameter. To retrieve values of the columns "c1",
+ and "c2" from the column family "cfa", and the value of the column "c3"
+ from column family "cfb", the following datasets (`ds1`, and `ds2`) are
+ equivalent:
+
+ ```
+ table = # ...
+ ds1 = table.scan_range("row_start", "row_end", columns=[("cfa", "c1"),
+ ("cfa", "c2"),
+ ("cfb", "c3")])
+ ds2 = table.scan_range("row_start", "row_end", cfa=["c1", "c2"], cfb="c3")
+ ```
+
+ Note: only the latest value of a cell will be retrieved.
+
+ Args:
+ start: The start of the range when scanning by range.
+ end: (Optional.) The end of the range when scanning by range.
+ probability: (Optional.) A float between 0 (exclusive) and 1 (inclusive).
+ A non-1 value indicates to probabilistically sample rows with the
+ provided probability.
+ columns: The columns to read. Note: most commonly, they are expressed as
+ kwargs. Use the columns value if you are using column families that are
+ reserved. The value of columns and kwargs are merged. Columns is a list
+ of tuples of strings ("column_family", "column_qualifier").
+ **kwargs: The column families and columns to read. Keys are treated as
+ column_families, and values can be either lists of strings, or strings
+ that are treated as the column qualifier (column name).
+
+ Returns:
+ A @{tf.data.Dataset} returning the row keys and the cell contents.
+
+ Raises:
+ ValueError: If the configured probability is unexpected.
+ """
+ probability = _normalize_probability(probability)
+ normalized = _normalize_columns(columns, kwargs)
+ return _BigtableScanDataset(self, "", start, end, normalized, probability)
+
+ def parallel_scan_prefix(self,
+ prefix,
+ num_parallel_scans=None,
+ probability=None,
+ columns=None,
+ **kwargs):
+ """Retrieves row (including values) from the Bigtable service at high speed.
+
+ Rows with row-key prefixed by `prefix` will be retrieved. This method is
+ similar to `scan_prefix`, but by constrast performs multiple sub-scans in
+ parallel in order to achieve higher performance.
+
+ Note: The dataset produced by this method is not deterministic!
+
+ Specifying the columns to retrieve for each row is done by either using
+ kwargs or in the columns parameter. To retrieve values of the columns "c1",
+ and "c2" from the column family "cfa", and the value of the column "c3"
+ from column family "cfb", the following datasets (`ds1`, and `ds2`) are
+ equivalent:
+
+ ```
+ table = # ...
+ ds1 = table.parallel_scan_prefix("row_prefix", columns=[("cfa", "c1"),
+ ("cfa", "c2"),
+ ("cfb", "c3")])
+ ds2 = table.parallel_scan_prefix("row_prefix", cfa=["c1", "c2"], cfb="c3")
+ ```
+
+ Note: only the latest value of a cell will be retrieved.
+
+ Args:
+ prefix: The prefix all row keys must match to be retrieved for prefix-
+ based scans.
+ num_parallel_scans: (Optional.) The number of concurrent scans against the
+ Cloud Bigtable instance.
+ probability: (Optional.) A float between 0 (exclusive) and 1 (inclusive).
+ A non-1 value indicates to probabilistically sample rows with the
+ provided probability.
+ columns: The columns to read. Note: most commonly, they are expressed as
+ kwargs. Use the columns value if you are using column families that are
+ reserved. The value of columns and kwargs are merged. Columns is a list
+ of tuples of strings ("column_family", "column_qualifier").
+ **kwargs: The column families and columns to read. Keys are treated as
+ column_families, and values can be either lists of strings, or strings
+ that are treated as the column qualifier (column name).
+
+ Returns:
+ A @{tf.data.Dataset} returning the row keys and the cell contents.
+
+ Raises:
+ ValueError: If the configured probability is unexpected.
+ """
+ probability = _normalize_probability(probability)
+ normalized = _normalize_columns(columns, kwargs)
+ ds = _BigtableSampleKeyPairsDataset(self, prefix, "", "")
+ return self._make_parallel_scan_dataset(ds, num_parallel_scans, probability,
+ normalized)
+
+ def parallel_scan_range(self,
+ start,
+ end,
+ num_parallel_scans=None,
+ probability=None,
+ columns=None,
+ **kwargs):
+ """Retrieves rows (including values) from the Bigtable service.
+
+ Rows with row-keys between `start` and `end` will be retrieved. This method
+ is similar to `scan_range`, but by constrast performs multiple sub-scans in
+ parallel in order to achieve higher performance.
+
+ Note: The dataset produced by this method is not deterministic!
+
+ Specifying the columns to retrieve for each row is done by either using
+ kwargs or in the columns parameter. To retrieve values of the columns "c1",
+ and "c2" from the column family "cfa", and the value of the column "c3"
+ from column family "cfb", the following datasets (`ds1`, and `ds2`) are
+ equivalent:
+
+ ```
+ table = # ...
+ ds1 = table.parallel_scan_range("row_start",
+ "row_end",
+ columns=[("cfa", "c1"),
+ ("cfa", "c2"),
+ ("cfb", "c3")])
+ ds2 = table.parallel_scan_range("row_start", "row_end",
+ cfa=["c1", "c2"], cfb="c3")
+ ```
+
+ Note: only the latest value of a cell will be retrieved.
+
+ Args:
+ start: The start of the range when scanning by range.
+ end: (Optional.) The end of the range when scanning by range.
+ num_parallel_scans: (Optional.) The number of concurrent scans against the
+ Cloud Bigtable instance.
+ probability: (Optional.) A float between 0 (exclusive) and 1 (inclusive).
+ A non-1 value indicates to probabilistically sample rows with the
+ provided probability.
+ columns: The columns to read. Note: most commonly, they are expressed as
+ kwargs. Use the columns value if you are using column families that are
+ reserved. The value of columns and kwargs are merged. Columns is a list
+ of tuples of strings ("column_family", "column_qualifier").
+ **kwargs: The column families and columns to read. Keys are treated as
+ column_families, and values can be either lists of strings, or strings
+ that are treated as the column qualifier (column name).
+
+ Returns:
+ A @{tf.data.Dataset} returning the row keys and the cell contents.
+
+ Raises:
+ ValueError: If the configured probability is unexpected.
+ """
+ probability = _normalize_probability(probability)
+ normalized = _normalize_columns(columns, kwargs)
+ ds = _BigtableSampleKeyPairsDataset(self, "", start, end)
+ return self._make_parallel_scan_dataset(ds, num_parallel_scans, probability,
+ normalized)
+
+ def write(self, dataset, column_families, columns, timestamp=None):
+ """Writes a dataset to the table.
+
+ Args:
+ dataset: A @{tf.data.Dataset} to be written to this table. It must produce
+ a list of number-of-columns+1 elements, all of which must be strings.
+ The first value will be used as the row key, and subsequent values will
+ be used as cell values for the corresponding columns from the
+ corresponding column_families and columns entries.
+ column_families: A @{tf.Tensor} of `tf.string`s corresponding to the
+ column names to store the dataset's elements into.
+ columns: A `tf.Tensor` of `tf.string`s corresponding to the column names
+ to store the dataset's elements into.
+ timestamp: (Optional.) An int64 timestamp to write all the values at.
+ Leave as None to use server-provided timestamps.
+
+ Returns:
+ A @{tf.Operation} that can be run to perform the write.
+
+ Raises:
+ ValueError: If there are unexpected or incompatible types, or if the
+ number of columns and column_families does not match the output of
+ `dataset`.
+ """
+ if timestamp is None:
+ timestamp = -1 # Bigtable server provided timestamp.
+ for tensor_type in nest.flatten(dataset.output_types):
+ if tensor_type != dtypes.string:
+ raise ValueError("Not all elements of the dataset were `tf.string`")
+ for shape in nest.flatten(dataset.output_shapes):
+ if not shape.is_compatible_with(tensor_shape.scalar()):
+ raise ValueError("Not all elements of the dataset were scalars")
+ if len(column_families) != len(columns):
+ raise ValueError("len(column_families) != len(columns)")
+ if len(nest.flatten(dataset.output_types)) != len(columns) + 1:
+ raise ValueError("A column name must be specified for every component of "
+ "the dataset elements. (e.g.: len(columns) != "
+ "len(dataset.output_types))")
+ return gen_bigtable_ops.dataset_to_bigtable(
+ self._resource,
+ dataset._as_variant_tensor(), # pylint: disable=protected-access
+ column_families,
+ columns,
+ timestamp)
+
+ def _make_parallel_scan_dataset(self, ds, num_parallel_scans,
+ normalized_probability, normalized_columns):
+ """Builds a parallel dataset from a given range.
+
+ Args:
+ ds: A `_BigtableSampleKeyPairsDataset` returning ranges of keys to use.
+ num_parallel_scans: The number of concurrent parallel scans to use.
+ normalized_probability: A number between 0 and 1 for the keep probability.
+ normalized_columns: The column families and column qualifiers to retrieve.
+
+ Returns:
+ A @{tf.data.Dataset} representing the result of the parallel scan.
+ """
+ if num_parallel_scans is None:
+ num_parallel_scans = 50
+
+ ds = ds.shuffle(buffer_size=10000) # TODO(saeta): Make configurable.
+
+ def _interleave_fn(start, end):
+ return _BigtableScanDataset(
+ self,
+ prefix="",
+ start=start,
+ end=end,
+ normalized=normalized_columns,
+ probability=normalized_probability)
+
+ # Note prefetch_input_elements must be set in order to avoid rpc timeouts.
+ ds = ds.apply(
+ interleave_ops.parallel_interleave(
+ _interleave_fn,
+ cycle_length=num_parallel_scans,
+ sloppy=True,
+ prefetch_input_elements=1))
+ return ds
+
+
+def _normalize_probability(probability):
+ if probability is None:
+ probability = 1.0
+ if isinstance(probability, float) and (probability <= 0.0 or
+ probability > 1.0):
+ raise ValueError("probability must be in the range (0, 1].")
+ return probability
+
+
+def _normalize_columns(columns, provided_kwargs):
+ """Converts arguments (columns, and kwargs dict) to C++ representation.
+
+ Args:
+ columns: a datastructure containing the column families and qualifier to
+ retrieve. Valid types include (1) None, (2) list of tuples, (3) a tuple of
+ strings.
+ provided_kwargs: a dictionary containing the column families and qualifiers
+ to retrieve
+
+ Returns:
+ A list of pairs of column family+qualifier to retrieve.
+
+ Raises:
+ ValueError: If there are no cells to retrieve or the columns are in an
+ incorrect format.
+ """
+ normalized = columns
+ if normalized is None:
+ normalized = []
+ if isinstance(normalized, tuple):
+ if len(normalized) == 2:
+ normalized = [normalized]
+ else:
+ raise ValueError("columns was a tuple of inappropriate length")
+ for key, value in iteritems(provided_kwargs):
+ if key == "name":
+ continue
+ if isinstance(value, string_types):
+ normalized.append((key, value))
+ continue
+ for col in value:
+ normalized.append((key, col))
+ if not normalized:
+ raise ValueError("At least one column + column family must be specified.")
+ return normalized
+
+
+class _BigtableKeyDataset(dataset_ops.Dataset):
+ """_BigtableKeyDataset is an abstract class representing the keys of a table.
+ """
+
+ def __init__(self, table):
+ """Constructs a _BigtableKeyDataset.
+
+ Args:
+ table: a Bigtable class.
+ """
+ super(_BigtableKeyDataset, self).__init__()
+ self._table = table
+
+ @property
+ def output_classes(self):
+ return ops.Tensor
+
+ @property
+ def output_shapes(self):
+ return tensor_shape.TensorShape([])
+
+ @property
+ def output_types(self):
+ return dtypes.string
+
+
+class _BigtablePrefixKeyDataset(_BigtableKeyDataset):
+ """_BigtablePrefixKeyDataset represents looking up keys by prefix.
+ """
+
+ def __init__(self, table, prefix):
+ super(_BigtablePrefixKeyDataset, self).__init__(table)
+ self._prefix = prefix
+
+ def _as_variant_tensor(self):
+ return gen_bigtable_ops.bigtable_prefix_key_dataset(
+ table=self._table._resource, # pylint: disable=protected-access
+ prefix=self._prefix)
+
+
+class _BigtableRangeKeyDataset(_BigtableKeyDataset):
+ """_BigtableRangeKeyDataset represents looking up keys by range.
+ """
+
+ def __init__(self, table, start, end):
+ super(_BigtableRangeKeyDataset, self).__init__(table)
+ self._start = start
+ self._end = end
+
+ def _as_variant_tensor(self):
+ return gen_bigtable_ops.bigtable_range_key_dataset(
+ table=self._table._resource, # pylint: disable=protected-access
+ start_key=self._start,
+ end_key=self._end)
+
+
+class _BigtableSampleKeysDataset(_BigtableKeyDataset):
+ """_BigtableSampleKeysDataset represents a sampling of row keys.
+ """
+
+ # TODO(saeta): Expose the data size offsets into the keys.
+
+ def __init__(self, table):
+ super(_BigtableSampleKeysDataset, self).__init__(table)
+
+ def _as_variant_tensor(self):
+ return gen_bigtable_ops.bigtable_sample_keys_dataset(
+ table=self._table._resource) # pylint: disable=protected-access
+
+
+class _BigtableLookupDataset(dataset_ops.Dataset):
+ """_BigtableLookupDataset represents a dataset that retrieves values for keys.
+ """
+
+ def __init__(self, dataset, table, normalized):
+ self._num_outputs = len(normalized) + 1 # 1 for row key
+ self._dataset = dataset
+ self._table = table
+ self._normalized = normalized
+ self._column_families = [i[0] for i in normalized]
+ self._columns = [i[1] for i in normalized]
+
+ @property
+ def output_classes(self):
+ return tuple([ops.Tensor] * self._num_outputs)
+
+ @property
+ def output_shapes(self):
+ return tuple([tensor_shape.TensorShape([])] * self._num_outputs)
+
+ @property
+ def output_types(self):
+ return tuple([dtypes.string] * self._num_outputs)
+
+ def _as_variant_tensor(self):
+ # pylint: disable=protected-access
+ return gen_bigtable_ops.bigtable_lookup_dataset(
+ keys_dataset=self._dataset._as_variant_tensor(),
+ table=self._table._resource,
+ column_families=self._column_families,
+ columns=self._columns)
+
+
+class _BigtableScanDataset(dataset_ops.Dataset):
+ """_BigtableScanDataset represents a dataset that retrieves keys and values.
+ """
+
+ def __init__(self, table, prefix, start, end, normalized, probability):
+ self._table = table
+ self._prefix = prefix
+ self._start = start
+ self._end = end
+ self._column_families = [i[0] for i in normalized]
+ self._columns = [i[1] for i in normalized]
+ self._probability = probability
+ self._num_outputs = len(normalized) + 1 # 1 for row key
+
+ @property
+ def output_classes(self):
+ return tuple([ops.Tensor] * self._num_outputs)
+
+ @property
+ def output_shapes(self):
+ return tuple([tensor_shape.TensorShape([])] * self._num_outputs)
+
+ @property
+ def output_types(self):
+ return tuple([dtypes.string] * self._num_outputs)
+
+ def _as_variant_tensor(self):
+ return gen_bigtable_ops.bigtable_scan_dataset(
+ table=self._table._resource, # pylint: disable=protected-access
+ prefix=self._prefix,
+ start_key=self._start,
+ end_key=self._end,
+ column_families=self._column_families,
+ columns=self._columns,
+ probability=self._probability)
+
+
+class _BigtableSampleKeyPairsDataset(dataset_ops.Dataset):
+ """_BigtableKeyRangeDataset returns key pairs from the Bigtable.
+ """
+
+ def __init__(self, table, prefix, start, end):
+ self._table = table
+ self._prefix = prefix
+ self._start = start
+ self._end = end
+
+ @property
+ def output_classes(self):
+ return (ops.Tensor, ops.Tensor)
+
+ @property
+ def output_shapes(self):
+ return (tensor_shape.TensorShape([]), tensor_shape.TensorShape([]))
+
+ @property
+ def output_types(self):
+ return (dtypes.string, dtypes.string)
+
+ def _as_variant_tensor(self):
+ # pylint: disable=protected-access
+ return gen_bigtable_ops.bigtable_sample_key_pairs_dataset(
+ table=self._table._resource,
+ prefix=self._prefix,
+ start_key=self._start,
+ end_key=self._end)
diff --git a/tensorflow/contrib/boosted_trees/estimator_batch/BUILD b/tensorflow/contrib/boosted_trees/estimator_batch/BUILD
index 8cff1a3bb1..ef0e80cd09 100644
--- a/tensorflow/contrib/boosted_trees/estimator_batch/BUILD
+++ b/tensorflow/contrib/boosted_trees/estimator_batch/BUILD
@@ -15,8 +15,9 @@ py_library(
srcs = ["__init__.py"],
srcs_version = "PY2AND3",
deps = [
- "custom_export_strategy",
+ ":custom_export_strategy",
":custom_loss_head",
+ ":distillation_loss",
":estimator",
":model",
":trainer_hooks",
@@ -144,6 +145,7 @@ py_library(
srcs = ["dnn_tree_combined_estimator.py"],
srcs_version = "PY2AND3",
deps = [
+ ":distillation_loss",
":estimator_utils",
":trainer_hooks",
"//tensorflow/contrib/boosted_trees:gbdt_batch",
@@ -156,6 +158,17 @@ py_library(
],
)
+py_library(
+ name = "distillation_loss",
+ srcs = ["distillation_loss.py"],
+ srcs_version = "PY2AND3",
+ deps = [
+ "//tensorflow/contrib/learn",
+ "//tensorflow/python:math_ops",
+ "//tensorflow/python:nn",
+ ],
+)
+
py_test(
name = "dnn_tree_combined_estimator_test",
size = "medium",
diff --git a/tensorflow/contrib/boosted_trees/estimator_batch/distillation_loss.py b/tensorflow/contrib/boosted_trees/estimator_batch/distillation_loss.py
new file mode 100644
index 0000000000..9aacc55343
--- /dev/null
+++ b/tensorflow/contrib/boosted_trees/estimator_batch/distillation_loss.py
@@ -0,0 +1,75 @@
+# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""Utill functions for distillation loss.
+
+The distillation loss_fn will be called with the following:
+
+Args:
+ dnn_logits: Tensor of logits from the dnn, treated as the "target". This will
+ be the output of a call to tf.stop_gradient().
+ tree_logits: Tensor of logits from the tree, treated as the "predictions".
+ example_weights: Tensor of example weights, or a single scalar.
+
+Returns:
+ A scalar indicating the reduced loss for that batch of examples.
+
+Note: we calls the loss_fn defined in contrib head, which is computing two
+losses, first one for training and second one for reporting. We only take the
+first one here.
+"""
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+from tensorflow.contrib.learn.python.learn.estimators import head as head_lib
+from tensorflow.python.ops import math_ops
+from tensorflow.python.ops import nn
+
+
+def _logits_to_label_for_tree(logits, n_classes):
+ if n_classes == 2:
+ return math_ops.sigmoid(logits)
+ else:
+ return nn.softmax(logits)
+
+
+def create_dnn_to_tree_squared_loss_fn(n_classes):
+ """Returns a squared loss function for dnn to tree distillation."""
+
+ def _dnn_to_tree_squared_loss(dnn_logits, tree_logits, example_weights):
+ return head_lib._mean_squared_loss( # pylint: disable=protected-access
+ labels=_logits_to_label_for_tree(dnn_logits, n_classes),
+ logits=_logits_to_label_for_tree(tree_logits, n_classes),
+ weights=example_weights)[0]
+
+ return _dnn_to_tree_squared_loss
+
+
+def create_dnn_to_tree_cross_entropy_loss_fn(n_classes):
+ """Returns a cross entropy loss function for dnn to tree distillation."""
+
+ def _dnn_to_tree_cross_entropy_loss(dnn_logits, tree_logits, example_weights):
+ if n_classes == 2:
+ return head_lib._log_loss_with_two_classes( # pylint: disable=protected-access
+ labels=_logits_to_label_for_tree(dnn_logits, n_classes),
+ logits=tree_logits,
+ weights=example_weights)[0]
+ else:
+ return head_lib._softmax_cross_entropy_loss( # pylint: disable=protected-access
+ labels=_logits_to_label_for_tree(dnn_logits, n_classes),
+ logits=tree_logits,
+ weights=example_weights)[0]
+
+ return _dnn_to_tree_cross_entropy_loss
diff --git a/tensorflow/contrib/boosted_trees/estimator_batch/dnn_tree_combined_estimator.py b/tensorflow/contrib/boosted_trees/estimator_batch/dnn_tree_combined_estimator.py
index 911d87fa10..7eb429b636 100644
--- a/tensorflow/contrib/boosted_trees/estimator_batch/dnn_tree_combined_estimator.py
+++ b/tensorflow/contrib/boosted_trees/estimator_batch/dnn_tree_combined_estimator.py
@@ -24,7 +24,9 @@ from __future__ import division
from __future__ import print_function
import six
+
from tensorflow.contrib import layers
+from tensorflow.contrib.boosted_trees.estimator_batch import distillation_loss
from tensorflow.contrib.boosted_trees.estimator_batch import estimator_utils
from tensorflow.contrib.boosted_trees.estimator_batch import trainer_hooks
from tensorflow.contrib.boosted_trees.python.ops import model_ops
@@ -35,11 +37,13 @@ from tensorflow.contrib.learn.python.learn.estimators import head as head_lib
from tensorflow.contrib.learn.python.learn.estimators import model_fn
from tensorflow.python.feature_column import feature_column as feature_column_lib
from tensorflow.python.framework import ops
+from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import partitioned_variables
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
+from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.summary import summary
from tensorflow.python.training import training_util
@@ -77,6 +81,7 @@ def _dnn_tree_combined_model_fn(features,
predict_with_tree_only=False,
tree_feature_columns=None,
tree_center_bias=False,
+ dnn_to_tree_distillation_param=None,
use_core_versions=False):
"""DNN and GBDT combined model_fn.
@@ -117,6 +122,13 @@ def _dnn_tree_combined_model_fn(features,
set to True, these features are in addition to dnn_feature_columns.
tree_center_bias: Whether a separate tree should be created for
first fitting the bias.
+ dnn_to_tree_distillation_param: A Tuple of (float, loss_fn), where the
+ float defines the weight of the distillation loss, and the loss_fn, for
+ computing distillation loss, takes dnn_logits, tree_logits and weight
+ tensor. If the entire tuple is None, no distillation will be applied. If
+ only the loss_fn is None, we will take the sigmoid/softmax cross entropy
+ loss be default. When distillation is applied, `predict_with_tree_only`
+ will be set to True.
use_core_versions: Whether feature columns and loss are from the core (as
opposed to contrib) version of tensorflow.
@@ -132,6 +144,12 @@ def _dnn_tree_combined_model_fn(features,
if not dnn_feature_columns:
raise ValueError("dnn_feature_columns must be specified")
+ if dnn_to_tree_distillation_param:
+ if not predict_with_tree_only:
+ logging.warning("update predict_with_tree_only to True since distillation"
+ "is specified.")
+ predict_with_tree_only = True
+
# Build DNN Logits.
dnn_parent_scope = "dnn"
dnn_partitioner = dnn_input_layer_partitioner or (
@@ -225,6 +243,25 @@ def _dnn_tree_combined_model_fn(features,
def _tree_train_op_fn(loss):
"""Returns the op to optimize the loss."""
+ if dnn_to_tree_distillation_param:
+ loss_weight, loss_fn = dnn_to_tree_distillation_param
+ weight_tensor = head_lib._weight_tensor( # pylint: disable=protected-access
+ features, head.weight_column_name)
+ dnn_logits_fixed = array_ops.stop_gradient(dnn_logits)
+
+ if loss_fn is None:
+ # we create the loss_fn similar to the head loss_fn for
+ # multi_class_head used previously as the default one.
+ n_classes = 2 if head.logits_dimension == 1 else head.logits_dimension
+ loss_fn = distillation_loss.create_dnn_to_tree_cross_entropy_loss_fn(
+ n_classes)
+
+ dnn_to_tree_distillation_loss = loss_weight * loss_fn(
+ dnn_logits_fixed, tree_logits, weight_tensor)
+ summary.scalar("dnn_to_tree_distillation_loss",
+ dnn_to_tree_distillation_loss)
+ loss += dnn_to_tree_distillation_loss
+
update_op = gbdt_model.train(loss, predictions_dict, labels)
with ops.control_dependencies(
[update_op]), (ops.colocate_with(global_step)):
@@ -232,7 +269,7 @@ def _dnn_tree_combined_model_fn(features,
return update_op
if predict_with_tree_only:
- if mode == model_fn.ModeKeys.TRAIN or mode == model_fn.ModeKeys.PREDICT:
+ if mode == model_fn.ModeKeys.TRAIN or mode == model_fn.ModeKeys.INFER:
tree_train_logits = tree_logits
else:
tree_train_logits = control_flow_ops.cond(
@@ -331,6 +368,7 @@ class DNNBoostedTreeCombinedClassifier(estimator.Estimator):
predict_with_tree_only=False,
tree_feature_columns=None,
tree_center_bias=False,
+ dnn_to_tree_distillation_param=None,
use_core_versions=False):
"""Initializes a DNNBoostedTreeCombinedClassifier instance.
@@ -378,6 +416,13 @@ class DNNBoostedTreeCombinedClassifier(estimator.Estimator):
set to True, these features are in addition to dnn_feature_columns.
tree_center_bias: Whether a separate tree should be created for
first fitting the bias.
+ dnn_to_tree_distillation_param: A Tuple of (float, loss_fn), where the
+ float defines the weight of the distillation loss, and the loss_fn, for
+ computing distillation loss, takes dnn_logits, tree_logits and weight
+ tensor. If the entire tuple is None, no distillation will be applied. If
+ only the loss_fn is None, we will take the sigmoid/softmax cross entropy
+ loss be default. When distillation is applied, `predict_with_tree_only`
+ will be set to True.
use_core_versions: Whether feature columns and loss are from the core (as
opposed to contrib) version of tensorflow.
"""
@@ -409,6 +454,7 @@ class DNNBoostedTreeCombinedClassifier(estimator.Estimator):
predict_with_tree_only=predict_with_tree_only,
tree_feature_columns=tree_feature_columns,
tree_center_bias=tree_center_bias,
+ dnn_to_tree_distillation_param=dnn_to_tree_distillation_param,
use_core_versions=use_core_versions)
super(DNNBoostedTreeCombinedClassifier, self).__init__(
@@ -442,6 +488,7 @@ class DNNBoostedTreeCombinedRegressor(estimator.Estimator):
predict_with_tree_only=False,
tree_feature_columns=None,
tree_center_bias=False,
+ dnn_to_tree_distillation_param=None,
use_core_versions=False):
"""Initializes a DNNBoostedTreeCombinedRegressor instance.
@@ -489,6 +536,13 @@ class DNNBoostedTreeCombinedRegressor(estimator.Estimator):
set to True, these features are in addition to dnn_feature_columns.
tree_center_bias: Whether a separate tree should be created for
first fitting the bias.
+ dnn_to_tree_distillation_param: A Tuple of (float, loss_fn), where the
+ float defines the weight of the distillation loss, and the loss_fn, for
+ computing distillation loss, takes dnn_logits, tree_logits and weight
+ tensor. If the entire tuple is None, no distillation will be applied. If
+ only the loss_fn is None, we will take the sigmoid/softmax cross entropy
+ loss be default. When distillation is applied, `predict_with_tree_only`
+ will be set to True.
use_core_versions: Whether feature columns and loss are from the core (as
opposed to contrib) version of tensorflow.
"""
@@ -525,6 +579,7 @@ class DNNBoostedTreeCombinedRegressor(estimator.Estimator):
predict_with_tree_only=predict_with_tree_only,
tree_feature_columns=tree_feature_columns,
tree_center_bias=tree_center_bias,
+ dnn_to_tree_distillation_param=dnn_to_tree_distillation_param,
use_core_versions=use_core_versions)
super(DNNBoostedTreeCombinedRegressor, self).__init__(
@@ -559,6 +614,7 @@ class DNNBoostedTreeCombinedEstimator(estimator.Estimator):
predict_with_tree_only=False,
tree_feature_columns=None,
tree_center_bias=False,
+ dnn_to_tree_distillation_param=None,
use_core_versions=False):
"""Initializes a DNNBoostedTreeCombinedEstimator instance.
@@ -601,6 +657,13 @@ class DNNBoostedTreeCombinedEstimator(estimator.Estimator):
set to True, these features are in addition to dnn_feature_columns.
tree_center_bias: Whether a separate tree should be created for
first fitting the bias.
+ dnn_to_tree_distillation_param: A Tuple of (float, loss_fn), where the
+ float defines the weight of the distillation loss, and the loss_fn, for
+ computing distillation loss, takes dnn_logits, tree_logits and weight
+ tensor. If the entire tuple is None, no distillation will be applied. If
+ only the loss_fn is None, we will take the sigmoid/softmax cross entropy
+ loss be default. When distillation is applied, `predict_with_tree_only`
+ will be set to True.
use_core_versions: Whether feature columns and loss are from the core (as
opposed to contrib) version of tensorflow.
"""
@@ -626,6 +689,7 @@ class DNNBoostedTreeCombinedEstimator(estimator.Estimator):
predict_with_tree_only=predict_with_tree_only,
tree_feature_columns=tree_feature_columns,
tree_center_bias=tree_center_bias,
+ dnn_to_tree_distillation_param=dnn_to_tree_distillation_param,
use_core_versions=use_core_versions)
super(DNNBoostedTreeCombinedEstimator, self).__init__(
diff --git a/tensorflow/contrib/boosted_trees/estimator_batch/dnn_tree_combined_estimator_test.py b/tensorflow/contrib/boosted_trees/estimator_batch/dnn_tree_combined_estimator_test.py
index f495edc62f..9b7acfa664 100644
--- a/tensorflow/contrib/boosted_trees/estimator_batch/dnn_tree_combined_estimator_test.py
+++ b/tensorflow/contrib/boosted_trees/estimator_batch/dnn_tree_combined_estimator_test.py
@@ -131,6 +131,30 @@ class DNNBoostedTreeCombinedTest(test_util.TensorFlowTestCase):
classifier.fit(input_fn=_train_input_fn, steps=15)
classifier.evaluate(input_fn=_eval_input_fn, steps=1)
+ def testFitAndEvaluateWithDistillation(self):
+ learner_config = learner_pb2.LearnerConfig()
+ learner_config.num_classes = 2
+ learner_config.constraints.max_tree_depth = 1
+ model_dir = tempfile.mkdtemp()
+ config = run_config.RunConfig()
+
+ classifier = estimator.DNNBoostedTreeCombinedClassifier(
+ dnn_hidden_units=[1],
+ dnn_feature_columns=[feature_column.real_valued_column("x")],
+ tree_learner_config=learner_config,
+ num_trees=1,
+ tree_examples_per_layer=3,
+ n_classes=2,
+ model_dir=model_dir,
+ config=config,
+ dnn_steps_to_train=10,
+ dnn_input_layer_to_tree=False,
+ tree_feature_columns=[feature_column.real_valued_column("x")],
+ dnn_to_tree_distillation_param=(1, None))
+
+ classifier.fit(input_fn=_train_input_fn, steps=15)
+ classifier.evaluate(input_fn=_eval_input_fn, steps=1)
+
if __name__ == "__main__":
googletest.main()
diff --git a/tensorflow/contrib/boosted_trees/estimator_batch/estimator.py b/tensorflow/contrib/boosted_trees/estimator_batch/estimator.py
index 9c36c30221..59a78515c6 100644
--- a/tensorflow/contrib/boosted_trees/estimator_batch/estimator.py
+++ b/tensorflow/contrib/boosted_trees/estimator_batch/estimator.py
@@ -269,3 +269,88 @@ class GradientBoostedDecisionTreeEstimator(estimator.Estimator):
model_dir=model_dir,
config=config,
feature_engineering_fn=feature_engineering_fn)
+
+
+class GradientBoostedDecisionTreeRanker(estimator.Estimator):
+ """A ranking estimator using gradient boosted decision trees."""
+
+ def __init__(
+ self,
+ learner_config,
+ examples_per_layer,
+ head,
+ ranking_model_pair_keys,
+ num_trees=None,
+ feature_columns=None,
+ weight_column_name=None,
+ model_dir=None,
+ config=None,
+ label_keys=None,
+ feature_engineering_fn=None,
+ logits_modifier_function=None,
+ center_bias=False,
+ use_core_libs=False,
+ output_leaf_index=False,
+ ):
+ """Initializes a GradientBoostedDecisionTreeRanker instance.
+
+ This is an estimator that can be trained off the pairwise data and can be
+ used for inference on non-paired data. This is essentially LambdaMart.
+ Args:
+ learner_config: A config for the learner.
+ examples_per_layer: Number of examples to accumulate before growing a
+ layer. It can also be a function that computes the number of examples
+ based on the depth of the layer that's being built.
+ head: `Head` instance.
+ ranking_model_pair_keys: Keys to distinguish between features
+ for left and right part of the training pairs for ranking. For example,
+ for an Example with features "a.f1" and "b.f1", the keys would be
+ ("a", "b").
+ num_trees: An int, number of trees to build.
+ feature_columns: A list of feature columns.
+ weight_column_name: Name of the column for weights, or None if not
+ weighted.
+ model_dir: Directory for model exports, etc.
+ config: `RunConfig` object to configure the runtime settings.
+ label_keys: Optional list of strings with size `[n_classes]` defining the
+ label vocabulary. Only supported for `n_classes` > 2.
+ feature_engineering_fn: Feature engineering function. Takes features and
+ labels which are the output of `input_fn` and returns features and
+ labels which will be fed into the model.
+ logits_modifier_function: A modifier function for the logits.
+ center_bias: Whether a separate tree should be created for first fitting
+ the bias.
+ use_core_libs: Whether feature columns and loss are from the core (as
+ opposed to contrib) version of tensorflow.
+ output_leaf_index: whether to output leaf indices along with predictions
+ during inference. The leaf node indexes are available in predictions
+ dict by the key 'leaf_index'. It is a Tensor of rank 2 and its shape is
+ [batch_size, num_trees].
+ For example,
+ result_iter = classifier.predict(...)
+ for result_dict in result_iter:
+ # access leaf index list by result_dict["leaf_index"]
+ # which contains one leaf index per tree
+
+ Raises:
+ ValueError: If learner_config is not valid.
+ """
+ super(GradientBoostedDecisionTreeRanker, self).__init__(
+ model_fn=model.ranking_model_builder,
+ params={
+ 'head': head,
+ 'n_classes': 2,
+ 'feature_columns': feature_columns,
+ 'learner_config': learner_config,
+ 'num_trees': num_trees,
+ 'weight_column_name': weight_column_name,
+ 'examples_per_layer': examples_per_layer,
+ 'center_bias': center_bias,
+ 'logits_modifier_function': logits_modifier_function,
+ 'use_core_libs': use_core_libs,
+ 'output_leaf_index': output_leaf_index,
+ 'ranking_model_pair_keys': ranking_model_pair_keys,
+ },
+ model_dir=model_dir,
+ config=config,
+ feature_engineering_fn=feature_engineering_fn)
diff --git a/tensorflow/contrib/boosted_trees/estimator_batch/estimator_test.py b/tensorflow/contrib/boosted_trees/estimator_batch/estimator_test.py
index 75ef1b0500..2c2dcb039d 100644
--- a/tensorflow/contrib/boosted_trees/estimator_batch/estimator_test.py
+++ b/tensorflow/contrib/boosted_trees/estimator_batch/estimator_test.py
@@ -37,12 +37,31 @@ def _train_input_fn():
return features, label
+def _ranking_train_input_fn():
+ features = {
+ "a.f1": constant_op.constant([[3.], [0.3], [1.]]),
+ "a.f2": constant_op.constant([[0.1], [3.], [1.]]),
+ "b.f1": constant_op.constant([[13.], [0.4], [5.]]),
+ "b.f2": constant_op.constant([[1.], [3.], [0.01]]),
+ }
+ label = constant_op.constant([[0], [0], [1]], dtype=dtypes.int32)
+ return features, label
+
+
def _eval_input_fn():
features = {"x": constant_op.constant([[1.], [2.], [2.]])}
label = constant_op.constant([[0], [1], [1]], dtype=dtypes.int32)
return features, label
+def _infer_ranking_train_input_fn():
+ features = {
+ "f1": constant_op.constant([[3.], [2], [1.]]),
+ "f2": constant_op.constant([[0.1], [3.], [1.]])
+ }
+ return features, None
+
+
class BoostedTreeEstimatorTest(test_util.TensorFlowTestCase):
def setUp(self):
@@ -155,6 +174,34 @@ class BoostedTreeEstimatorTest(test_util.TensorFlowTestCase):
regressor.evaluate(input_fn=_eval_input_fn, steps=1)
regressor.export(self._export_dir_base)
+ def testRankingDontThrowExceptionForForEstimator(self):
+ learner_config = learner_pb2.LearnerConfig()
+ learner_config.num_classes = 2
+ learner_config.constraints.max_tree_depth = 1
+ model_dir = tempfile.mkdtemp()
+ config = run_config.RunConfig()
+
+ head_fn = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss(
+ loss_reduction=losses.Reduction.SUM_OVER_BATCH_SIZE)
+
+ model = estimator.GradientBoostedDecisionTreeRanker(
+ head=head_fn,
+ learner_config=learner_config,
+ num_trees=1,
+ examples_per_layer=3,
+ model_dir=model_dir,
+ config=config,
+ use_core_libs=True,
+ feature_columns=[
+ core_feature_column.numeric_column("f1"),
+ core_feature_column.numeric_column("f2")
+ ],
+ ranking_model_pair_keys=("a", "b"))
+
+ model.fit(input_fn=_ranking_train_input_fn, steps=1000)
+ model.evaluate(input_fn=_ranking_train_input_fn, steps=1)
+ model.predict(input_fn=_infer_ranking_train_input_fn)
+
if __name__ == "__main__":
googletest.main()
diff --git a/tensorflow/contrib/boosted_trees/estimator_batch/model.py b/tensorflow/contrib/boosted_trees/estimator_batch/model.py
index 1ee8911989..0e8a56e6e9 100644
--- a/tensorflow/contrib/boosted_trees/estimator_batch/model.py
+++ b/tensorflow/contrib/boosted_trees/estimator_batch/model.py
@@ -20,6 +20,7 @@ from __future__ import print_function
import copy
+from tensorflow.contrib import learn
from tensorflow.contrib.boosted_trees.estimator_batch import estimator_utils
from tensorflow.contrib.boosted_trees.estimator_batch import trainer_hooks
from tensorflow.contrib.boosted_trees.python.ops import model_ops
@@ -28,7 +29,6 @@ from tensorflow.python.framework import ops
from tensorflow.python.ops import state_ops
from tensorflow.python.training import training_util
-
def model_builder(features, labels, mode, params, config):
"""Multi-machine batch gradient descent tree model.
@@ -141,3 +141,184 @@ def model_builder(features, labels, mode, params, config):
trainer_hooks.StopAfterNTrees(num_trees, attempted_trees,
finalized_trees))
return model_fn_ops
+
+
+def ranking_model_builder(features, labels, mode, params, config):
+ """Multi-machine batch gradient descent tree model for ranking.
+
+ Args:
+ features: `Tensor` or `dict` of `Tensor` objects.
+ labels: Labels used to train on.
+ mode: Mode we are in. (TRAIN/EVAL/INFER)
+ params: A dict of hyperparameters.
+ The following hyperparameters are expected:
+ * head: A `Head` instance.
+ * learner_config: A config for the learner.
+ * feature_columns: An iterable containing all the feature columns used by
+ the model.
+ * examples_per_layer: Number of examples to accumulate before growing a
+ layer. It can also be a function that computes the number of examples
+ based on the depth of the layer that's being built.
+ * weight_column_name: The name of weight column.
+ * center_bias: Whether a separate tree should be created for first fitting
+ the bias.
+ * ranking_model_pair_keys (Optional): Keys to distinguish between features
+ for left and right part of the training pairs for ranking. For example,
+ for an Example with features "a.f1" and "b.f1", the keys would be
+ ("a", "b").
+ config: `RunConfig` of the estimator.
+
+ Returns:
+ A `ModelFnOps` object.
+ Raises:
+ ValueError: if inputs are not valid.
+ """
+ head = params["head"]
+ learner_config = params["learner_config"]
+ examples_per_layer = params["examples_per_layer"]
+ feature_columns = params["feature_columns"]
+ weight_column_name = params["weight_column_name"]
+ num_trees = params["num_trees"]
+ use_core_libs = params["use_core_libs"]
+ logits_modifier_function = params["logits_modifier_function"]
+ output_leaf_index = params["output_leaf_index"]
+ ranking_model_pair_keys = params["ranking_model_pair_keys"]
+
+ if features is None:
+ raise ValueError("At least one feature must be specified.")
+
+ if config is None:
+ raise ValueError("Missing estimator RunConfig.")
+
+ center_bias = params["center_bias"]
+
+ if isinstance(features, ops.Tensor):
+ features = {features.name: features}
+
+ # Make a shallow copy of features to ensure downstream usage
+ # is unaffected by modifications in the model function.
+ training_features = copy.copy(features)
+ training_features.pop(weight_column_name, None)
+ global_step = training_util.get_global_step()
+ with ops.device(global_step.device):
+ ensemble_handle = model_ops.tree_ensemble_variable(
+ stamp_token=0,
+ tree_ensemble_config="", # Initialize an empty ensemble.
+ name="ensemble_model")
+
+ # Extract the features.
+ if mode == learn.ModeKeys.TRAIN or mode == learn.ModeKeys.EVAL:
+ # For ranking pairwise training, we extract two sets of features.
+ if len(ranking_model_pair_keys) != 2:
+ raise ValueError("You must provide keys for ranking.")
+ left_pair_key = ranking_model_pair_keys[0]
+ right_pair_key = ranking_model_pair_keys[1]
+ if left_pair_key is None or right_pair_key is None:
+ raise ValueError("Both pair keys should be provided for ranking.")
+
+ features_1 = {}
+ features_2 = {}
+ for name in training_features:
+ feature = training_features[name]
+ new_name = name[2:]
+ if name.startswith(left_pair_key + "."):
+ features_1[new_name] = feature
+ else:
+ assert name.startswith(right_pair_key + ".")
+ features_2[new_name] = feature
+
+ main_features = features_1
+ supplementary_features = features_2
+ else:
+ # For non-ranking or inference ranking, we have only 1 set of features.
+ main_features = training_features
+
+ # Create GBDT model.
+ gbdt_model_main = gbdt_batch.GradientBoostedDecisionTreeModel(
+ is_chief=config.is_chief,
+ num_ps_replicas=config.num_ps_replicas,
+ ensemble_handle=ensemble_handle,
+ center_bias=center_bias,
+ examples_per_layer=examples_per_layer,
+ learner_config=learner_config,
+ feature_columns=feature_columns,
+ logits_dimension=head.logits_dimension,
+ features=main_features,
+ use_core_columns=use_core_libs,
+ output_leaf_index=output_leaf_index)
+
+ with ops.name_scope("gbdt", "gbdt_optimizer"):
+ # Logits for inference.
+ if mode == learn.ModeKeys.INFER:
+ predictions_dict = gbdt_model_main.predict(mode)
+ logits = predictions_dict[gbdt_batch.PREDICTIONS]
+ if logits_modifier_function:
+ logits = logits_modifier_function(logits, features, mode)
+ else:
+ gbdt_model_supplementary = gbdt_batch.GradientBoostedDecisionTreeModel(
+ is_chief=config.is_chief,
+ num_ps_replicas=config.num_ps_replicas,
+ ensemble_handle=ensemble_handle,
+ center_bias=center_bias,
+ examples_per_layer=examples_per_layer,
+ learner_config=learner_config,
+ feature_columns=feature_columns,
+ logits_dimension=head.logits_dimension,
+ features=supplementary_features,
+ use_core_columns=use_core_libs,
+ output_leaf_index=output_leaf_index)
+
+ # Logits for train and eval.
+ if not supplementary_features:
+ raise ValueError("Features for ranking must be specified.")
+
+ predictions_dict_1 = gbdt_model_main.predict(mode)
+ predictions_1 = predictions_dict_1[gbdt_batch.PREDICTIONS]
+
+ predictions_dict_2 = gbdt_model_supplementary.predict(mode)
+ predictions_2 = predictions_dict_2[gbdt_batch.PREDICTIONS]
+
+ logits = predictions_1 - predictions_2
+ if logits_modifier_function:
+ logits = logits_modifier_function(logits, features, mode)
+
+ predictions_dict = predictions_dict_1
+ predictions_dict[gbdt_batch.PREDICTIONS] = logits
+
+ def _train_op_fn(loss):
+ """Returns the op to optimize the loss."""
+ update_op = gbdt_model_main.train(loss, predictions_dict, labels)
+ with ops.control_dependencies(
+ [update_op]), (ops.colocate_with(global_step)):
+ update_op = state_ops.assign_add(global_step, 1).op
+ return update_op
+
+ create_estimator_spec_op = getattr(head, "create_estimator_spec", None)
+ if use_core_libs and callable(create_estimator_spec_op):
+ model_fn_ops = head.create_estimator_spec(
+ features=features,
+ mode=mode,
+ labels=labels,
+ train_op_fn=_train_op_fn,
+ logits=logits)
+ model_fn_ops = estimator_utils.estimator_spec_to_model_fn_ops(model_fn_ops)
+ else:
+ model_fn_ops = head.create_model_fn_ops(
+ features=features,
+ mode=mode,
+ labels=labels,
+ train_op_fn=_train_op_fn,
+ logits=logits)
+
+ if output_leaf_index and gbdt_batch.LEAF_INDEX in predictions_dict:
+ model_fn_ops.predictions[gbdt_batch.LEAF_INDEX] = predictions_dict[
+ gbdt_batch.LEAF_INDEX]
+ if num_trees:
+ if center_bias:
+ num_trees += 1
+ finalized_trees, attempted_trees = (
+ gbdt_model_main.get_number_of_trees_tensor())
+ model_fn_ops.training_hooks.append(
+ trainer_hooks.StopAfterNTrees(num_trees, attempted_trees,
+ finalized_trees))
+ return model_fn_ops
diff --git a/tensorflow/contrib/boosted_trees/lib/learner/batch/base_split_handler.py b/tensorflow/contrib/boosted_trees/lib/learner/batch/base_split_handler.py
index 56ff00b390..1b7f59ea42 100644
--- a/tensorflow/contrib/boosted_trees/lib/learner/batch/base_split_handler.py
+++ b/tensorflow/contrib/boosted_trees/lib/learner/batch/base_split_handler.py
@@ -37,6 +37,7 @@ class BaseSplitHandler(object):
gradient_shape,
hessian_shape,
multiclass_strategy,
+ loss_uses_sum_reduction=False,
name=None):
"""Constructor for BaseSplitHandler.
@@ -51,6 +52,8 @@ class BaseSplitHandler(object):
gradient_shape: A TensorShape, containing shape of gradients.
hessian_shape: A TensorShape, containing shape of hessians.
multiclass_strategy: Strategy describing how to treat multiclass problems.
+ loss_uses_sum_reduction: A scalar boolean tensor that specifies whether
+ SUM or MEAN reduction was used for the loss.
name: An optional handler name.
"""
self._l1_regularization = l1_regularization
@@ -62,6 +65,7 @@ class BaseSplitHandler(object):
self._multiclass_strategy = multiclass_strategy
self._hessian_shape = hessian_shape
self._gradient_shape = gradient_shape
+ self._loss_uses_sum_reduction = loss_uses_sum_reduction
def scheduled_reads(self):
"""Returns the list of `ScheduledOp`s required for update_stats."""
diff --git a/tensorflow/contrib/boosted_trees/lib/learner/batch/categorical_split_handler.py b/tensorflow/contrib/boosted_trees/lib/learner/batch/categorical_split_handler.py
index 9f78ab2024..bf686237ff 100644
--- a/tensorflow/contrib/boosted_trees/lib/learner/batch/categorical_split_handler.py
+++ b/tensorflow/contrib/boosted_trees/lib/learner/batch/categorical_split_handler.py
@@ -23,6 +23,7 @@ from tensorflow.contrib.boosted_trees.python.ops import split_handler_ops
from tensorflow.contrib.boosted_trees.python.ops import stats_accumulator_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
+from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
@@ -44,6 +45,7 @@ class EqualitySplitHandler(base_split_handler.BaseSplitHandler):
hessian_shape,
multiclass_strategy,
init_stamp_token=0,
+ loss_uses_sum_reduction=False,
name=None):
"""Initialize the internal state for this split handler.
@@ -62,6 +64,8 @@ class EqualitySplitHandler(base_split_handler.BaseSplitHandler):
multiclass_strategy: Strategy describing how to treat multiclass problems.
init_stamp_token: A tensor containing an scalar for initial stamp of the
stamped objects.
+ loss_uses_sum_reduction: A scalar boolean tensor that specifies whether
+ SUM or MEAN reduction was used for the loss.
name: An optional handler name.
"""
super(EqualitySplitHandler, self).__init__(
@@ -73,6 +77,7 @@ class EqualitySplitHandler(base_split_handler.BaseSplitHandler):
gradient_shape=gradient_shape,
hessian_shape=hessian_shape,
multiclass_strategy=multiclass_strategy,
+ loss_uses_sum_reduction=loss_uses_sum_reduction,
name=name)
self._stats_accumulator = stats_accumulator_ops.StatsAccumulator(
init_stamp_token,
@@ -173,6 +178,11 @@ class EqualitySplitHandler(base_split_handler.BaseSplitHandler):
# pair.
num_minibatches, partition_ids, feature_ids, gradients, hessians = (
self._stats_accumulator.flush(stamp_token, next_stamp_token))
+ # For sum_reduction, we don't need to divide by number of minibatches.
+
+ num_minibatches = control_flow_ops.cond(
+ ops.convert_to_tensor(self._loss_uses_sum_reduction),
+ lambda: math_ops.to_int64(1), lambda: num_minibatches)
partition_ids, gains, split_infos = (
split_handler_ops.build_categorical_equality_splits(
num_minibatches=num_minibatches,
@@ -187,7 +197,7 @@ class EqualitySplitHandler(base_split_handler.BaseSplitHandler):
tree_complexity_regularization=self._tree_complexity_regularization,
min_node_weight=self._min_node_weight,
bias_feature_id=_BIAS_FEATURE_ID,
- multiclass_strategy=self._multiclass_strategy,))
+ multiclass_strategy=self._multiclass_strategy))
# There are no warm-up rounds needed in the equality column handler. So we
# always return ready.
are_splits_ready = constant_op.constant(True)
diff --git a/tensorflow/contrib/boosted_trees/lib/learner/batch/categorical_split_handler_test.py b/tensorflow/contrib/boosted_trees/lib/learner/batch/categorical_split_handler_test.py
index 0b65eba2a7..ef253e7cec 100644
--- a/tensorflow/contrib/boosted_trees/lib/learner/batch/categorical_split_handler_test.py
+++ b/tensorflow/contrib/boosted_trees/lib/learner/batch/categorical_split_handler_test.py
@@ -90,7 +90,17 @@ class EqualitySplitHandlerTest(test_util.TensorFlowTestCase):
empty_hessians,
example_weights,
is_active=array_ops.constant([True, True]))
- with ops.control_dependencies([update_1]):
+ update_2 = split_handler.update_stats_sync(
+ 0,
+ partition_ids,
+ gradients,
+ hessians,
+ empty_gradients,
+ empty_hessians,
+ example_weights,
+ is_active=array_ops.constant([True, True]))
+
+ with ops.control_dependencies([update_1, update_2]):
are_splits_ready, partitions, gains, splits = (
split_handler.make_splits(0, 1, class_id))
are_splits_ready, partitions, gains, splits = (sess.run(
@@ -159,6 +169,129 @@ class EqualitySplitHandlerTest(test_util.TensorFlowTestCase):
self.assertEqual(1, split_node.feature_id)
+ def testGenerateFeatureSplitCandidatesSumReduction(self):
+ with self.test_session() as sess:
+ # The data looks like the following:
+ # Example | Gradients | Partition | Feature ID |
+ # i0 | (0.2, 0.12) | 0 | 1,2 |
+ # i1 | (-0.5, 0.07) | 0 | |
+ # i2 | (1.2, 0.2) | 0 | 2 |
+ # i3 | (4.0, 0.13) | 1 | 1 |
+ gradients = array_ops.constant([0.2, -0.5, 1.2, 4.0])
+ hessians = array_ops.constant([0.12, 0.07, 0.2, 0.13])
+ partition_ids = [0, 0, 0, 1]
+ indices = [[0, 0], [0, 1], [2, 0], [3, 0]]
+ values = array_ops.constant([1, 2, 2, 1], dtype=dtypes.int64)
+
+ gradient_shape = tensor_shape.scalar()
+ hessian_shape = tensor_shape.scalar()
+ class_id = -1
+
+ split_handler = categorical_split_handler.EqualitySplitHandler(
+ l1_regularization=0.1,
+ l2_regularization=1,
+ tree_complexity_regularization=0,
+ min_node_weight=0,
+ sparse_int_column=sparse_tensor.SparseTensor(indices, values, [4, 1]),
+ feature_column_group_id=0,
+ gradient_shape=gradient_shape,
+ hessian_shape=hessian_shape,
+ multiclass_strategy=learner_pb2.LearnerConfig.TREE_PER_CLASS,
+ init_stamp_token=0,
+ loss_uses_sum_reduction=True)
+ resources.initialize_resources(resources.shared_resources()).run()
+
+ empty_gradients, empty_hessians = get_empty_tensors(
+ gradient_shape, hessian_shape)
+ example_weights = array_ops.ones([4, 1], dtypes.float32)
+
+ update_1 = split_handler.update_stats_sync(
+ 0,
+ partition_ids,
+ gradients,
+ hessians,
+ empty_gradients,
+ empty_hessians,
+ example_weights,
+ is_active=array_ops.constant([True, True]))
+ update_2 = split_handler.update_stats_sync(
+ 0,
+ partition_ids,
+ gradients,
+ hessians,
+ empty_gradients,
+ empty_hessians,
+ example_weights,
+ is_active=array_ops.constant([True, True]))
+ with ops.control_dependencies([update_1, update_2]):
+ are_splits_ready, partitions, gains, splits = (
+ split_handler.make_splits(0, 1, class_id))
+ are_splits_ready, partitions, gains, splits = (
+ sess.run([are_splits_ready, partitions, gains, splits]))
+ self.assertTrue(are_splits_ready)
+ self.assertAllEqual([0, 1], partitions)
+
+ # Check the split on partition 0.
+ # -(0.4 + 2.4 - 0.1) / (0.24 + 0.4 + 1)
+ expected_left_weight = -1.6463414634146338
+
+ # (0.4 + 2.4 - 0.1) ** 2 / (0.24 + 0.4 + 1)
+ expected_left_gain = 4.445121951219511
+
+ # -(-1 + 0.1) / (0.14 + 1)
+ expected_right_weight = 0.789473684211
+
+ # (-1 + 0.1) ** 2 / (0.14 + 1)
+ expected_right_gain = 0.710526315789
+
+ # (0.4 + -1 + 2.4 - 0.1) ** 2 / (0.24 + 0.14 + 0.4 + 1)
+ expected_bias_gain = 1.6235955056179772
+
+ split_info = split_info_pb2.SplitInfo()
+ split_info.ParseFromString(splits[0])
+ left_child = split_info.left_child.vector
+ right_child = split_info.right_child.vector
+ split_node = split_info.split_node.categorical_id_binary_split
+
+ self.assertEqual(0, split_node.feature_column)
+
+ self.assertEqual(2, split_node.feature_id)
+
+ self.assertAllClose(
+ expected_left_gain + expected_right_gain - expected_bias_gain, gains[0],
+ 0.00001)
+
+ self.assertAllClose([expected_left_weight], left_child.value, 0.00001)
+
+ self.assertAllClose([expected_right_weight], right_child.value, 0.00001)
+
+ # Check the split on partition 1.
+ # (-8 + 0.1) / (0.26 + 1)
+ expected_left_weight = -6.26984126984
+ # (-8 + 0.1) ** 2 / (0.26 + 1)
+ expected_left_gain = 49.5317460317
+ expected_right_weight = 0
+ expected_right_gain = 0
+ # (-8 + 0.1) ** 2 / (0.26 + 1)
+ expected_bias_gain = 49.5317460317
+
+ # Verify candidate for partition 1, there's only one active feature here
+ # so zero gain is expected.
+ split_info = split_info_pb2.SplitInfo()
+ split_info.ParseFromString(splits[1])
+ left_child = split_info.left_child.vector
+ right_child = split_info.right_child.vector
+ split_node = split_info.split_node.categorical_id_binary_split
+ self.assertAllClose(0.0, gains[1], 0.00001)
+
+ self.assertAllClose([expected_left_weight], left_child.value, 0.00001)
+
+ self.assertAllClose([expected_right_weight], right_child.value, 0.00001)
+
+ self.assertEqual(0, split_node.feature_column)
+
+ self.assertEqual(1, split_node.feature_id)
+
def testGenerateFeatureSplitCandidatesMulticlass(self):
with self.test_session() as sess:
# Batch size is 4, 2 gradients per each instance.
diff --git a/tensorflow/contrib/boosted_trees/lib/learner/batch/ordinal_split_handler.py b/tensorflow/contrib/boosted_trees/lib/learner/batch/ordinal_split_handler.py
index 409a2d8f46..df0bec1fe3 100644
--- a/tensorflow/contrib/boosted_trees/lib/learner/batch/ordinal_split_handler.py
+++ b/tensorflow/contrib/boosted_trees/lib/learner/batch/ordinal_split_handler.py
@@ -99,6 +99,7 @@ class InequalitySplitHandler(base_split_handler.BaseSplitHandler):
hessian_shape,
multiclass_strategy,
init_stamp_token=0,
+ loss_uses_sum_reduction=False,
name=None):
"""Initialize the internal state for this split handler.
@@ -117,6 +118,8 @@ class InequalitySplitHandler(base_split_handler.BaseSplitHandler):
multiclass_strategy: Strategy describing how to treat multiclass problems.
init_stamp_token: A tensor containing an scalar for initial stamp of the
stamped objects.
+ loss_uses_sum_reduction: A scalar boolean tensor that specifies whether
+ SUM or MEAN reduction was used for the loss.
name: An optional handler name.
"""
super(InequalitySplitHandler, self).__init__(
@@ -128,7 +131,8 @@ class InequalitySplitHandler(base_split_handler.BaseSplitHandler):
feature_column_group_id=feature_column_group_id,
gradient_shape=gradient_shape,
hessian_shape=hessian_shape,
- multiclass_strategy=multiclass_strategy)
+ multiclass_strategy=multiclass_strategy,
+ loss_uses_sum_reduction=loss_uses_sum_reduction)
self._stats_accumulator = stats_accumulator_ops.StatsAccumulator(
init_stamp_token,
gradient_shape,
@@ -160,6 +164,7 @@ class DenseSplitHandler(InequalitySplitHandler):
hessian_shape,
multiclass_strategy,
init_stamp_token=0,
+ loss_uses_sum_reduction=False,
name=None):
"""Initialize the internal state for this split handler.
@@ -179,6 +184,8 @@ class DenseSplitHandler(InequalitySplitHandler):
multiclass_strategy: Strategy describing how to treat multiclass problems.
init_stamp_token: A tensor containing an scalar for initial stamp of the
stamped objects.
+ loss_uses_sum_reduction: A scalar boolean tensor that specifies whether
+ SUM or MEAN reduction was used for the loss.
name: An optional handler name.
"""
super(DenseSplitHandler, self).__init__(
@@ -193,7 +200,8 @@ class DenseSplitHandler(InequalitySplitHandler):
name=name,
gradient_shape=gradient_shape,
hessian_shape=hessian_shape,
- multiclass_strategy=multiclass_strategy)
+ multiclass_strategy=multiclass_strategy,
+ loss_uses_sum_reduction=loss_uses_sum_reduction)
self._dense_float_column = dense_float_column
# Register dense_make_stats_update function as an Op to the graph.
g = ops.get_default_graph()
@@ -255,15 +263,15 @@ class DenseSplitHandler(InequalitySplitHandler):
next_stamp_token, self._multiclass_strategy, class_id,
self._feature_column_group_id, self._l1_regularization,
self._l2_regularization, self._tree_complexity_regularization,
- self._min_node_weight))
+ self._min_node_weight, self._loss_uses_sum_reduction))
return are_splits_ready, partition_ids, gains, split_infos
-def _make_dense_split(quantile_accumulator_handle, stats_accumulator_handle,
- stamp_token, next_stamp_token, multiclass_strategy,
- class_id, feature_column_id, l1_regularization,
- l2_regularization, tree_complexity_regularization,
- min_node_weight, is_multi_dimentional):
+def _make_dense_split(
+ quantile_accumulator_handle, stats_accumulator_handle, stamp_token,
+ next_stamp_token, multiclass_strategy, class_id, feature_column_id,
+ l1_regularization, l2_regularization, tree_complexity_regularization,
+ min_node_weight, is_multi_dimentional, loss_uses_sum_reduction):
"""Function that builds splits for a dense feature column."""
# Get the bucket boundaries
are_splits_ready, buckets = (
@@ -291,7 +299,10 @@ def _make_dense_split(quantile_accumulator_handle, stats_accumulator_handle,
num_minibatches, partition_ids, bucket_ids, gradients, hessians = (
gen_stats_accumulator_ops.stats_accumulator_scalar_flush(
stats_accumulator_handle, stamp_token, next_stamp_token))
-
+ # For sum_reduction, we don't need to divide by number of minibatches.
+ num_minibatches = control_flow_ops.cond(loss_uses_sum_reduction,
+ lambda: math_ops.to_int64(1),
+ lambda: num_minibatches)
# Put quantile and stats accumulator flushing in the dependency path.
with ops.control_dependencies([flush_quantiles, partition_ids]):
are_splits_ready = array_ops.identity(are_splits_ready)
@@ -329,6 +340,7 @@ class SparseSplitHandler(InequalitySplitHandler):
hessian_shape,
multiclass_strategy,
init_stamp_token=0,
+ loss_uses_sum_reduction=False,
name=None):
"""Initialize the internal state for this split handler.
@@ -348,6 +360,8 @@ class SparseSplitHandler(InequalitySplitHandler):
multiclass_strategy: Strategy describing how to treat multiclass problems.
init_stamp_token: A tensor containing an scalar for initial stamp of the
stamped objects.
+ loss_uses_sum_reduction: A scalar boolean tensor that specifies whether
+ SUM or MEAN reduction was used for the loss.
name: An optional handler name.
"""
super(SparseSplitHandler, self).__init__(
@@ -362,6 +376,7 @@ class SparseSplitHandler(InequalitySplitHandler):
hessian_shape=hessian_shape,
multiclass_strategy=multiclass_strategy,
init_stamp_token=init_stamp_token,
+ loss_uses_sum_reduction=loss_uses_sum_reduction,
name=name)
self._sparse_float_column = sparse_float_column
@@ -424,15 +439,15 @@ class SparseSplitHandler(InequalitySplitHandler):
next_stamp_token, self._multiclass_strategy, class_id,
self._feature_column_group_id, self._l1_regularization,
self._l2_regularization, self._tree_complexity_regularization,
- self._min_node_weight))
+ self._min_node_weight, self._loss_uses_sum_reduction))
return are_splits_ready, partition_ids, gains, split_infos
-def _make_sparse_split(quantile_accumulator_handle, stats_accumulator_handle,
- stamp_token, next_stamp_token, multiclass_strategy,
- class_id, feature_column_id, l1_regularization,
- l2_regularization, tree_complexity_regularization,
- min_node_weight, is_multi_dimentional):
+def _make_sparse_split(
+ quantile_accumulator_handle, stats_accumulator_handle, stamp_token,
+ next_stamp_token, multiclass_strategy, class_id, feature_column_id,
+ l1_regularization, l2_regularization, tree_complexity_regularization,
+ min_node_weight, is_multi_dimentional, loss_uses_sum_reduction):
"""Function that builds splits for a sparse feature column."""
# Get the bucket boundaries
are_splits_ready, buckets = (
@@ -460,7 +475,9 @@ def _make_sparse_split(quantile_accumulator_handle, stats_accumulator_handle,
num_minibatches, partition_ids, bucket_ids, gradients, hessians = (
gen_stats_accumulator_ops.stats_accumulator_scalar_flush(
stats_accumulator_handle, stamp_token, next_stamp_token))
-
+ num_minibatches = control_flow_ops.cond(loss_uses_sum_reduction,
+ lambda: math_ops.to_int64(1),
+ lambda: num_minibatches)
# Put quantile and stats accumulator flushing in the dependency path.
with ops.control_dependencies([flush_quantiles, partition_ids]):
are_splits_ready = array_ops.identity(are_splits_ready)
@@ -498,17 +515,18 @@ def _specialize_make_split(func, is_multi_dimentional):
dtypes.float32,
dtypes.float32,
dtypes.float32,
+ dtypes.bool,
noinline=True)
def f(quantile_accumulator_handle, stats_accumulator_handle, stamp_token,
next_stamp_token, multiclass_strategy, class_id, feature_column_id,
l1_regularization, l2_regularization, tree_complexity_regularization,
- min_node_weight):
+ min_node_weight, loss_uses_sum_reduction):
"""Function that builds splits for a sparse feature column."""
- return func(
- quantile_accumulator_handle, stats_accumulator_handle, stamp_token,
- next_stamp_token, multiclass_strategy, class_id, feature_column_id,
- l1_regularization, l2_regularization, tree_complexity_regularization,
- min_node_weight, is_multi_dimentional)
+ return func(quantile_accumulator_handle, stats_accumulator_handle,
+ stamp_token, next_stamp_token, multiclass_strategy, class_id,
+ feature_column_id, l1_regularization, l2_regularization,
+ tree_complexity_regularization, min_node_weight,
+ is_multi_dimentional, loss_uses_sum_reduction)
return f
diff --git a/tensorflow/contrib/boosted_trees/lib/learner/batch/ordinal_split_handler_test.py b/tensorflow/contrib/boosted_trees/lib/learner/batch/ordinal_split_handler_test.py
index 2f2c230211..d59732cf92 100644
--- a/tensorflow/contrib/boosted_trees/lib/learner/batch/ordinal_split_handler_test.py
+++ b/tensorflow/contrib/boosted_trees/lib/learner/batch/ordinal_split_handler_test.py
@@ -182,6 +182,144 @@ class DenseSplitHandlerTest(test_util.TensorFlowTestCase):
self.assertAllClose(0.52, split_node.threshold, 0.00001)
+ def testGenerateFeatureSplitCandidatesLossUsesSumReduction(self):
+ with self.test_session() as sess:
+ # The data looks like the following:
+ # Example | Gradients | Partition | Dense Quantile |
+ # i0 | (0.2, 0.12) | 0 | 1 |
+ # i1 | (-0.5, 0.07) | 0 | 1 |
+ # i2 | (1.2, 0.2) | 0 | 0 |
+ # i3 | (4.0, 0.13) | 1 | 1 |
+ dense_column = array_ops.constant([0.52, 0.52, 0.3, 0.52])
+ gradients = array_ops.constant([0.2, -0.5, 1.2, 4.0])
+ hessians = array_ops.constant([0.12, 0.07, 0.2, 0.13])
+ partition_ids = array_ops.constant([0, 0, 0, 1], dtype=dtypes.int32)
+ class_id = -1
+
+ gradient_shape = tensor_shape.scalar()
+ hessian_shape = tensor_shape.scalar()
+ split_handler = ordinal_split_handler.DenseSplitHandler(
+ l1_regularization=0.2,
+ l2_regularization=2.,
+ tree_complexity_regularization=0.,
+ min_node_weight=0.,
+ epsilon=0.001,
+ num_quantiles=10,
+ feature_column_group_id=0,
+ dense_float_column=dense_column,
+ init_stamp_token=0,
+ gradient_shape=gradient_shape,
+ hessian_shape=hessian_shape,
+ multiclass_strategy=learner_pb2.LearnerConfig.TREE_PER_CLASS,
+ loss_uses_sum_reduction=True)
+ resources.initialize_resources(resources.shared_resources()).run()
+
+ empty_gradients, empty_hessians = get_empty_tensors(
+ gradient_shape, hessian_shape)
+ example_weights = array_ops.ones([4, 1], dtypes.float32)
+
+ update_1 = split_handler.update_stats_sync(
+ 0,
+ partition_ids,
+ gradients,
+ hessians,
+ empty_gradients,
+ empty_hessians,
+ example_weights,
+ is_active=array_ops.constant([True, True]))
+ with ops.control_dependencies([update_1]):
+ are_splits_ready = split_handler.make_splits(
+ np.int64(0), np.int64(1), class_id)[0]
+
+ with ops.control_dependencies([are_splits_ready]):
+ update_2 = split_handler.update_stats_sync(
+ 1,
+ partition_ids,
+ gradients,
+ hessians,
+ empty_gradients,
+ empty_hessians,
+ example_weights,
+ is_active=array_ops.constant([True, True]))
+ update_3 = split_handler.update_stats_sync(
+ 1,
+ partition_ids,
+ gradients,
+ hessians,
+ empty_gradients,
+ empty_hessians,
+ example_weights,
+ is_active=array_ops.constant([True, True]))
+ with ops.control_dependencies([update_2, update_3]):
+ are_splits_ready2, partitions, gains, splits = (
+ split_handler.make_splits(np.int64(1), np.int64(2), class_id))
+ are_splits_ready, are_splits_ready2, partitions, gains, splits = (
+ sess.run([
+ are_splits_ready, are_splits_ready2, partitions, gains, splits
+ ]))
+
+ # During the first iteration, inequality split handlers are not going to
+ # have any splits. Make sure that we return not_ready in that case.
+ self.assertFalse(are_splits_ready)
+ self.assertTrue(are_splits_ready2)
+
+ self.assertAllEqual([0, 1], partitions)
+
+ # Check the split on partition 0.
+ # -(2.4 - 0.2) / (0.4 + 2)
+ expected_left_weight = -0.91666
+
+ # expected_left_weight * -(2.4 - 0.2)
+ expected_left_gain = 2.016666666666666
+
+ # -(-1 + 0.4 + 0.2) / (0.38 + 2)
+ expected_right_weight = 0.1680672
+
+ # expected_right_weight * -(-1 + 0.4 + 0.2)
+ expected_right_gain = 0.0672268907563025
+
+ # (0.2 + -0.5 + 1.2 - 0.1) ** 2 / (0.12 + 0.07 + 0.2 + 1)
+ expected_bias_gain = 0.9208633093525178
+
+ split_info = split_info_pb2.SplitInfo()
+ split_info.ParseFromString(splits[0])
+ left_child = split_info.left_child.vector
+ right_child = split_info.right_child.vector
+ split_node = split_info.split_node.dense_float_binary_split
+ self.assertAllClose(
+ expected_left_gain + expected_right_gain - expected_bias_gain, gains[0],
+ 0.00001)
+
+ self.assertAllClose([expected_left_weight], left_child.value, 0.00001)
+
+ self.assertAllClose([expected_right_weight], right_child.value, 0.00001)
+
+ self.assertEqual(0, split_node.feature_column)
+
+ self.assertAllClose(0.3, split_node.threshold, 0.00001)
+
+ # Check the split on partition 1.
+ # (-8 + 0.2) / (0.26 + 2)
+ expected_left_weight = -3.4513274336283186
+ expected_right_weight = 0
+
+ # Verify candidate for partition 1, there's only one active bucket here
+ # so zero gain is expected.
+ split_info = split_info_pb2.SplitInfo()
+ split_info.ParseFromString(splits[1])
+ left_child = split_info.left_child.vector
+ right_child = split_info.right_child.vector
+ split_node = split_info.split_node.dense_float_binary_split
+ self.assertAllClose(0.0, gains[1], 0.00001)
+
+ self.assertAllClose([expected_left_weight], left_child.value, 0.00001)
+
+ self.assertAllClose([expected_right_weight], right_child.value, 0.00001)
+
+ self.assertEqual(0, split_node.feature_column)
+
+ self.assertAllClose(0.52, split_node.threshold, 0.00001)
+
def testGenerateFeatureSplitCandidatesMulticlassFullHessian(self):
with self.test_session() as sess:
dense_column = array_ops.constant([0.52, 0.52, 0.3, 0.52])
@@ -798,6 +936,139 @@ class SparseSplitHandlerTest(test_util.TensorFlowTestCase):
self.assertAllClose(0.52, split_node.split.threshold)
+ def testGenerateFeatureSplitCandidatesLossUsesSumReduction(self):
+ with self.test_session() as sess:
+ # The data looks like the following:
+ # Example | Gradients | Partition | Sparse Quantile |
+ # i0 | (0.2, 0.12) | 0 | 1 |
+ # i1 | (-0.5, 0.07) | 0 | N/A |
+ # i2 | (1.2, 0.2) | 0 | 0 |
+ # i3 | (4.0, 0.13) | 1 | 1 |
+ gradients = array_ops.constant([0.2, -0.5, 1.2, 4.0])
+ hessians = array_ops.constant([0.12, 0.07, 0.2, 0.13])
+ example_partitions = array_ops.constant([0, 0, 0, 1], dtype=dtypes.int32)
+ indices = array_ops.constant([[0, 0], [2, 0], [3, 0]], dtype=dtypes.int64)
+ values = array_ops.constant([0.52, 0.3, 0.52])
+ sparse_column = sparse_tensor.SparseTensor(indices, values, [4, 1])
+
+ gradient_shape = tensor_shape.scalar()
+ hessian_shape = tensor_shape.scalar()
+ class_id = -1
+
+ split_handler = ordinal_split_handler.SparseSplitHandler(
+ l1_regularization=0.0,
+ l2_regularization=4.0,
+ tree_complexity_regularization=0.0,
+ min_node_weight=0.0,
+ epsilon=0.01,
+ num_quantiles=2,
+ feature_column_group_id=0,
+ sparse_float_column=sparse_column,
+ init_stamp_token=0,
+ gradient_shape=gradient_shape,
+ hessian_shape=hessian_shape,
+ multiclass_strategy=learner_pb2.LearnerConfig.TREE_PER_CLASS,
+ loss_uses_sum_reduction=True)
+ resources.initialize_resources(resources.shared_resources()).run()
+
+ empty_gradients, empty_hessians = get_empty_tensors(
+ gradient_shape, hessian_shape)
+ example_weights = array_ops.ones([4, 1], dtypes.float32)
+
+ update_1 = split_handler.update_stats_sync(
+ 0,
+ example_partitions,
+ gradients,
+ hessians,
+ empty_gradients,
+ empty_hessians,
+ example_weights,
+ is_active=array_ops.constant([True, True]))
+ with ops.control_dependencies([update_1]):
+ are_splits_ready = split_handler.make_splits(
+ np.int64(0), np.int64(1), class_id)[0]
+ with ops.control_dependencies([are_splits_ready]):
+ update_2 = split_handler.update_stats_sync(
+ 1,
+ example_partitions,
+ gradients,
+ hessians,
+ empty_gradients,
+ empty_hessians,
+ example_weights,
+ is_active=array_ops.constant([True, True]))
+ update_3 = split_handler.update_stats_sync(
+ 1,
+ example_partitions,
+ gradients,
+ hessians,
+ empty_gradients,
+ empty_hessians,
+ example_weights,
+ is_active=array_ops.constant([True, True]))
+ with ops.control_dependencies([update_2, update_3]):
+ are_splits_ready2, partitions, gains, splits = (
+ split_handler.make_splits(np.int64(1), np.int64(2), class_id))
+ are_splits_ready, are_splits_ready2, partitions, gains, splits = (
+ sess.run([
+ are_splits_ready, are_splits_ready2, partitions, gains, splits
+ ]))
+
+ # During the first iteration, inequality split handlers are not going to
+ # have any splits. Make sure that we return not_ready in that case.
+ self.assertFalse(are_splits_ready)
+ self.assertTrue(are_splits_ready2)
+
+ self.assertAllEqual([0, 1], partitions)
+ # Check the split on partition 0.
+ # -(0.4 + 2.4) / (0.24 + 0.4 + 4)
+ expected_left_weight = -0.603448275862069
+ # (0.4 + 2.4) ** 2 / (0.24 + 0.4 + 4)
+ expected_left_gain = 1.689655172413793
+ # 1 / (0.14 + 4)
+ expected_right_weight = 0.24154589371980678
+ # 1 ** 2 / (0.14 + 4)
+ expected_right_gain = 0.24154589371980678
+ # (0.4 + 2.4 - 1) ** 2 / (0.24 + 0.4 + 0.14 + 4)
+ expected_bias_gain = 0.6778242677824265
+
+ split_info = split_info_pb2.SplitInfo()
+ split_info.ParseFromString(splits[0])
+ left_child = split_info.left_child.vector
+ right_child = split_info.right_child.vector
+ split_node = split_info.split_node.sparse_float_binary_split_default_right
+ self.assertAllClose(
+ expected_left_gain + expected_right_gain - expected_bias_gain, gains[0])
+
+ self.assertAllClose([expected_left_weight], left_child.value)
+
+ self.assertAllClose([expected_right_weight], right_child.value)
+
+ self.assertEqual(0, split_node.split.feature_column)
+
+ self.assertAllClose(0.52, split_node.split.threshold)
+
+ # Check the split on partition 1.
+ expected_left_weight = -1.8779342723004695
+ expected_right_weight = 0
+
+ # Verify candidate for partition 1, there's only one active bucket here
+ # so zero gain is expected.
+ split_info.ParseFromString(splits[1])
+ left_child = split_info.left_child.vector
+ right_child = split_info.right_child.vector
+ split_node = split_info.split_node.sparse_float_binary_split_default_left
+
+ self.assertAllClose(0.0, gains[1])
+
+ self.assertAllClose([expected_left_weight], left_child.value)
+
+ self.assertAllClose([expected_right_weight], right_child.value)
+
+ self.assertEqual(0, split_node.split.feature_column)
+
+ self.assertAllClose(0.52, split_node.split.threshold)
+
def testGenerateFeatureSplitCandidatesMulticlassFullHessian(self):
with self.test_session() as sess:
# Batch is 4, 2 classes
diff --git a/tensorflow/contrib/boosted_trees/lib/quantiles/weighted_quantiles_summary.h b/tensorflow/contrib/boosted_trees/lib/quantiles/weighted_quantiles_summary.h
index a7e7bfc13c..69bb8fd4ad 100644
--- a/tensorflow/contrib/boosted_trees/lib/quantiles/weighted_quantiles_summary.h
+++ b/tensorflow/contrib/boosted_trees/lib/quantiles/weighted_quantiles_summary.h
@@ -51,7 +51,7 @@ class WeightedQuantilesSummary {
SummaryEntry() {
memset(this, 0, sizeof(*this));
- value = 0;
+ value = ValueType();
weight = 0;
min_rank = 0;
max_rank = 0;
diff --git a/tensorflow/contrib/boosted_trees/lib/utils/batch_features.cc b/tensorflow/contrib/boosted_trees/lib/utils/batch_features.cc
index 35b059f349..4fab2b0b7d 100644
--- a/tensorflow/contrib/boosted_trees/lib/utils/batch_features.cc
+++ b/tensorflow/contrib/boosted_trees/lib/utils/batch_features.cc
@@ -16,6 +16,7 @@
#include "tensorflow/contrib/boosted_trees/lib/utils/batch_features.h"
#include "tensorflow/contrib/boosted_trees/lib/utils/macros.h"
#include "tensorflow/contrib/boosted_trees/lib/utils/tensor_utils.h"
+#include "tensorflow/core/lib/core/errors.h"
namespace tensorflow {
namespace boosted_trees {
@@ -96,9 +97,11 @@ Status BatchFeatures::Initialize(
"Sparse float feature shape incompatible with batch size."));
auto tensor_shape = TensorShape({shape_flat(0), shape_flat(1)});
auto order_dims = sparse::SparseTensor::VarDimArray({0, 1});
- sparse_float_feature_columns_.emplace_back(sparse_float_feature_indices,
- sparse_float_feature_values,
- tensor_shape, order_dims);
+ sparse::SparseTensor sparse_tensor;
+ TF_RETURN_IF_ERROR(sparse::SparseTensor::Create(
+ sparse_float_feature_indices, sparse_float_feature_values, tensor_shape,
+ order_dims, &sparse_tensor));
+ sparse_float_feature_columns_.push_back(std::move(sparse_tensor));
}
// Read sparse int features.
@@ -136,9 +139,11 @@ Status BatchFeatures::Initialize(
"Sparse int feature shape incompatible with batch size."));
auto tensor_shape = TensorShape({shape_flat(0), shape_flat(1)});
auto order_dims = sparse::SparseTensor::VarDimArray({0, 1});
- sparse_int_feature_columns_.emplace_back(sparse_int_feature_indices,
- sparse_int_feature_values,
- tensor_shape, order_dims);
+ sparse::SparseTensor sparse_tensor;
+ TF_RETURN_IF_ERROR(sparse::SparseTensor::Create(
+ sparse_int_feature_indices, sparse_int_feature_values, tensor_shape,
+ order_dims, &sparse_tensor));
+ sparse_int_feature_columns_.push_back(std::move(sparse_tensor));
}
return Status::OK();
}
diff --git a/tensorflow/contrib/boosted_trees/lib/utils/examples_iterable_test.cc b/tensorflow/contrib/boosted_trees/lib/utils/examples_iterable_test.cc
index d8a6088648..30c37435fe 100644
--- a/tensorflow/contrib/boosted_trees/lib/utils/examples_iterable_test.cc
+++ b/tensorflow/contrib/boosted_trees/lib/utils/examples_iterable_test.cc
@@ -43,27 +43,35 @@ TEST_F(ExamplesIterableTest, Iterate) {
test::AsTensor<int64>({0, 0, 2, 0, 3, 0, 4, 0}, {4, 2});
auto sparse_float_values1 = test::AsTensor<float>({-3.0f, 0.0f, 5.0f, 0.0f});
auto sparse_float_shape1 = TensorShape({8, 1});
- sparse::SparseTensor sparse_float_tensor1(
- sparse_float_indices1, sparse_float_values1, sparse_float_shape1);
+ sparse::SparseTensor sparse_float_tensor1;
+ TF_ASSERT_OK(
+ sparse::SparseTensor::Create(sparse_float_indices1, sparse_float_values1,
+ sparse_float_shape1, &sparse_float_tensor1));
auto sparse_float_indices2 = test::AsTensor<int64>(
{0, 1, 1, 0, 2, 1, 3, 0, 4, 1, 5, 0, 5, 1, 7, 0}, {8, 2});
auto sparse_float_values2 =
test::AsTensor<float>({1.f, 4.0f, 3.f, 7.0f, 4.3f, 9.0f, 0.8f, -4.0f});
auto sparse_float_shape2 = TensorShape({8, 2});
- sparse::SparseTensor sparse_float_tensor2(
- sparse_float_indices2, sparse_float_values2, sparse_float_shape2);
+ sparse::SparseTensor sparse_float_tensor2;
+ TF_ASSERT_OK(
+ sparse::SparseTensor::Create(sparse_float_indices2, sparse_float_values2,
+ sparse_float_shape2, &sparse_float_tensor2));
auto sparse_int_indices1 =
test::AsTensor<int64>({0, 0, 0, 1, 1, 0, 3, 0, 3, 1, 7, 0}, {6, 2});
auto sparse_int_values1 = test::AsTensor<int64>({1, 8, 0, 2, 0, 5});
auto sparse_int_shape1 = TensorShape({8, 2});
- sparse::SparseTensor sparse_int_tensor1(
- sparse_int_indices1, sparse_int_values1, sparse_int_shape1);
+ sparse::SparseTensor sparse_int_tensor1;
+ TF_ASSERT_OK(
+ sparse::SparseTensor::Create(sparse_int_indices1, sparse_int_values1,
+ sparse_int_shape1, &sparse_int_tensor1));
auto sparse_int_indices2 =
test::AsTensor<int64>({1, 0, 2, 0, 3, 0, 4, 0}, {4, 2});
auto sparse_int_values2 = test::AsTensor<int64>({7, 13, 4, 0});
auto sparse_int_shape2 = TensorShape({8, 1});
- sparse::SparseTensor sparse_int_tensor2(
- sparse_int_indices2, sparse_int_values2, sparse_int_shape2);
+ sparse::SparseTensor sparse_int_tensor2;
+ TF_ASSERT_OK(
+ sparse::SparseTensor::Create(sparse_int_indices2, sparse_int_values2,
+ sparse_int_shape2, &sparse_int_tensor2));
auto validate_example_features = [](int64 example_idx,
const Example& example) {
diff --git a/tensorflow/contrib/boosted_trees/python/training/functions/gbdt_batch.py b/tensorflow/contrib/boosted_trees/python/training/functions/gbdt_batch.py
index bc8651ba92..1ee7f2395e 100644
--- a/tensorflow/contrib/boosted_trees/python/training/functions/gbdt_batch.py
+++ b/tensorflow/contrib/boosted_trees/python/training/functions/gbdt_batch.py
@@ -46,6 +46,7 @@ from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
+from tensorflow.python.ops.losses import losses
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.summary import summary
from tensorflow.python.training import device_setter
@@ -62,15 +63,11 @@ LEAF_INDEX = "leaf_index"
_FEATURE_NAME_TEMPLATE = "%s_%d"
# Keys in Training state.
-_NUM_LAYER_EXAMPLES = "num_layer_examples"
-_NUM_LAYER_STEPS = "num_layer_steps"
-_NUM_LAYERS = "num_layers"
-_ACTIVE_TREE = "active_tree"
-_ACTIVE_LAYER = "active_layer"
-_CONTINUE_CENTERING = "continue_centering"
-_BIAS_STATS_ACCUMULATOR = "bias_stats_accumulator"
-_STEPS_ACCUMULATOR = "steps_accumulator"
-_HANDLERS = "handlers"
+GBDTTrainingState = collections.namedtuple("GBDTTrainingState", [
+ "num_layer_examples", "num_layer_steps", "num_layers", "active_tree",
+ "active_layer", "continue_centering", "bias_stats_accumulator",
+ "steps_accumulator", "handlers"
+])
def _get_column_by_index(tensor, indices):
@@ -287,6 +284,7 @@ class GradientBoostedDecisionTreeModel(object):
learner_config,
features,
logits_dimension,
+ loss_reduction=losses.Reduction.SUM_OVER_NONZERO_WEIGHTS,
feature_columns=None,
use_core_columns=False,
output_leaf_index=False):
@@ -303,7 +301,10 @@ class GradientBoostedDecisionTreeModel(object):
learner_config: A learner config.
features: `dict` of `Tensor` objects.
logits_dimension: An int, the dimension of logits.
+ loss_reduction: Either `SUM_OVER_NONZERO_WEIGHTS` (mean) or `SUM`.
feature_columns: A list of feature columns.
+ use_core_columns: A boolean specifying whether core feature columns are
+ used.
output_leaf_index: A boolean variable indicating whether to output leaf
index into predictions dictionary.
@@ -326,6 +327,13 @@ class GradientBoostedDecisionTreeModel(object):
self._center_bias = center_bias
self._examples_per_layer = examples_per_layer
+ # Check loss reduction value.
+ if (loss_reduction != losses.Reduction.SUM and
+ loss_reduction != losses.Reduction.SUM_OVER_NONZERO_WEIGHTS):
+ raise ValueError(
+ "Invalid loss reduction is provided: %s." % loss_reduction)
+ self._loss_reduction = loss_reduction
+
# Fill in the defaults.
if (learner_config.multi_class_strategy ==
learner_pb2.LearnerConfig.MULTI_CLASS_STRATEGY_UNSPECIFIED):
@@ -383,6 +391,7 @@ class GradientBoostedDecisionTreeModel(object):
sparse_int_values, sparse_int_shapes) = extract_features(
features, self._feature_columns, use_core_columns)
logging.info("Active Feature Columns: " + str(fc_names))
+ logging.info("Learner config: " + str(learner_config))
self._fc_names = fc_names
self._dense_floats = dense_floats
self._sparse_float_indices = sparse_float_indices
@@ -565,7 +574,11 @@ class GradientBoostedDecisionTreeModel(object):
about predictions per example.
Returns:
- An op that adds a new tree to the ensemble.
+ Three values:
+ - An op that adds a new tree to the ensemble, and
+ - An op that increments the stamp but removes all the trees and resets
+ the handlers. This can be used to reset the state of the ensemble.
+ - A dict containing the training state.
Raises:
ValueError: if inputs are not valid.
@@ -642,6 +655,8 @@ class GradientBoostedDecisionTreeModel(object):
self._learner_config.regularization.tree_complexity, dtypes.float32)
min_node_weight = constant_op.constant(
self._learner_config.constraints.min_node_weight, dtypes.float32)
+ loss_uses_sum_reduction = self._loss_reduction == losses.Reduction.SUM
+ loss_uses_sum_reduction = constant_op.constant(loss_uses_sum_reduction)
epsilon = 0.01
num_quantiles = 100
strategy_tensor = constant_op.constant(strategy)
@@ -655,7 +670,8 @@ class GradientBoostedDecisionTreeModel(object):
l2_regularization=l2_regularization,
tree_complexity_regularization=tree_complexity_regularization,
min_node_weight=min_node_weight,
- feature_column_group_id=dense_float_column_idx,
+ feature_column_group_id=constant_op.constant(
+ dense_float_column_idx),
epsilon=epsilon,
num_quantiles=num_quantiles,
dense_float_column=self._dense_floats[dense_float_column_idx],
@@ -663,7 +679,9 @@ class GradientBoostedDecisionTreeModel(object):
gradient_shape=self._gradient_shape,
hessian_shape=self._hessian_shape,
multiclass_strategy=strategy_tensor,
- init_stamp_token=init_stamp_token))
+ init_stamp_token=init_stamp_token,
+ loss_uses_sum_reduction=loss_uses_sum_reduction,
+ ))
fc_name_idx += 1
# Create handlers for sparse float columns.
@@ -675,7 +693,8 @@ class GradientBoostedDecisionTreeModel(object):
l2_regularization=l2_regularization,
tree_complexity_regularization=tree_complexity_regularization,
min_node_weight=min_node_weight,
- feature_column_group_id=sparse_float_column_idx,
+ feature_column_group_id=constant_op.constant(
+ sparse_float_column_idx),
epsilon=epsilon,
num_quantiles=num_quantiles,
sparse_float_column=sparse_tensor.SparseTensor(
@@ -686,7 +705,8 @@ class GradientBoostedDecisionTreeModel(object):
gradient_shape=self._gradient_shape,
hessian_shape=self._hessian_shape,
multiclass_strategy=strategy_tensor,
- init_stamp_token=init_stamp_token))
+ init_stamp_token=init_stamp_token,
+ loss_uses_sum_reduction=loss_uses_sum_reduction))
fc_name_idx += 1
# Create handlers for sparse int columns.
@@ -698,7 +718,8 @@ class GradientBoostedDecisionTreeModel(object):
l2_regularization=l2_regularization,
tree_complexity_regularization=tree_complexity_regularization,
min_node_weight=min_node_weight,
- feature_column_group_id=sparse_int_column_idx,
+ feature_column_group_id=constant_op.constant(
+ sparse_int_column_idx),
sparse_int_column=sparse_tensor.SparseTensor(
self._sparse_int_indices[sparse_int_column_idx],
self._sparse_int_values[sparse_int_column_idx],
@@ -707,7 +728,8 @@ class GradientBoostedDecisionTreeModel(object):
gradient_shape=self._gradient_shape,
hessian_shape=self._hessian_shape,
multiclass_strategy=strategy_tensor,
- init_stamp_token=init_stamp_token))
+ init_stamp_token=init_stamp_token,
+ loss_uses_sum_reduction=loss_uses_sum_reduction))
fc_name_idx += 1
# Create ensemble stats variables.
@@ -843,21 +865,45 @@ class GradientBoostedDecisionTreeModel(object):
for update in update_results.values():
stats_update_ops += update
- training_state = {
- _NUM_LAYER_EXAMPLES: num_layer_examples,
- _NUM_LAYER_STEPS: num_layer_steps,
- _NUM_LAYERS: num_layers,
- _ACTIVE_TREE: active_tree,
- _ACTIVE_LAYER: active_layer,
- _CONTINUE_CENTERING: continue_centering,
- _BIAS_STATS_ACCUMULATOR: bias_stats_accumulator,
- _STEPS_ACCUMULATOR: steps_accumulator,
- _HANDLERS: handlers
- }
- return stats_update_ops, training_state
-
- def increment_step_counter_and_maybe_update_ensemble(
- self, predictions_dict, batch_size, training_state):
+ training_state = GBDTTrainingState(
+ num_layer_examples=num_layer_examples,
+ num_layer_steps=num_layer_steps,
+ num_layers=num_layers,
+ active_tree=active_tree,
+ active_layer=active_layer,
+ continue_centering=continue_centering,
+ bias_stats_accumulator=bias_stats_accumulator,
+ steps_accumulator=steps_accumulator,
+ handlers=handlers)
+
+ reset_op = control_flow_ops.no_op()
+ if self._is_chief:
+ # Advance the ensemble stamp to throw away staggered workers.
+ stamp_token, _ = model_ops.tree_ensemble_serialize(self._ensemble_handle)
+ next_stamp_token = stamp_token + 1
+
+ reset_ops = []
+ for handler in handlers:
+ reset_ops.append(handler.make_splits(stamp_token, next_stamp_token, 0))
+ if self._center_bias:
+ reset_ops.append(
+ bias_stats_accumulator.flush(stamp_token, next_stamp_token))
+ reset_ops.append(steps_accumulator.flush(stamp_token, next_stamp_token))
+ reset_ops.append(self._finalized_trees.assign(0).op)
+ reset_ops.append(self._attempted_trees.assign(0).op)
+ reset_ops.append(
+ model_ops.tree_ensemble_deserialize(
+ self._ensemble_handle,
+ stamp_token=next_stamp_token,
+ tree_ensemble_config="",
+ name="reset_gbdt"))
+
+ reset_op = control_flow_ops.group([reset_ops])
+
+ return stats_update_ops, reset_op, training_state
+
+ def increment_step_counter_and_maybe_update_ensemble(self, predictions_dict,
+ training_state):
"""Increments number of visited examples and grows the ensemble.
If the number of visited examples reaches the target examples_per_layer,
@@ -866,24 +912,20 @@ class GradientBoostedDecisionTreeModel(object):
Args:
predictions_dict: Dictionary of Rank 2 `Tensor` representing information
about predictions per example.
- batch_size: Number of examples in the batch.
training_state: `dict` returned by update_stats.
Returns:
An op that updates the counters and potientially grows the ensemble.
"""
+ batch_size = math_ops.cast(
+ array_ops.shape(predictions_dict[PREDICTIONS])[0], dtypes.float32)
ensemble_stamp = predictions_dict[ENSEMBLE_STAMP]
# Accumulate a step after updating stats.
- num_layer_examples = training_state[_NUM_LAYER_EXAMPLES]
- num_layer_steps = training_state[_NUM_LAYER_STEPS]
- num_layers = training_state[_NUM_LAYERS]
- active_tree = training_state[_ACTIVE_TREE]
- active_layer = training_state[_ACTIVE_LAYER]
- continue_centering = training_state[_CONTINUE_CENTERING]
- bias_stats_accumulator = training_state[_BIAS_STATS_ACCUMULATOR]
- steps_accumulator = training_state[_STEPS_ACCUMULATOR]
- handlers = training_state[_HANDLERS]
+ steps_accumulator = training_state.steps_accumulator
+ num_layer_examples = training_state.num_layer_examples
+ num_layer_steps = training_state.num_layer_steps
+ active_layer = training_state.active_layer
add_step_op = steps_accumulator.add(
ensemble_stamp, [0], [[0, 0]], [batch_size], [1.0])
@@ -910,11 +952,8 @@ class GradientBoostedDecisionTreeModel(object):
ensemble_update_ops.append(
control_flow_ops.cond(
acc_examples >= examples_per_layer,
- self.make_update_ensemble_fn(
- ensemble_stamp, steps_accumulator,
- bias_stats_accumulator, continue_centering,
- handlers, num_layers, active_tree,
- active_layer, dropout_seed, class_id),
+ self.make_update_ensemble_fn(ensemble_stamp, training_state,
+ dropout_seed, class_id),
control_flow_ops.no_op))
# Note, the loss is calculated from the prediction considering dropouts, so
@@ -922,9 +961,7 @@ class GradientBoostedDecisionTreeModel(object):
# high. eval_loss might be referred instead in the aspect of convergence.
return control_flow_ops.group(*ensemble_update_ops)
- def make_update_ensemble_fn(self, ensemble_stamp, steps_accumulator,
- bias_stats_accumulator, continue_centering,
- handlers, num_layers, active_tree, active_layer,
+ def make_update_ensemble_fn(self, ensemble_stamp, training_state,
dropout_seed, class_id):
"""A method to create the function which updates the tree ensemble."""
# Determine learning rate.
@@ -943,8 +980,9 @@ class GradientBoostedDecisionTreeModel(object):
# Get next stamp token.
next_ensemble_stamp = ensemble_stamp + 1
# Finalize bias stats.
- _, _, _, bias_grads, bias_hess = bias_stats_accumulator.flush(
- ensemble_stamp, next_ensemble_stamp)
+ _, _, _, bias_grads, bias_hess = (
+ training_state.bias_stats_accumulator.flush(ensemble_stamp,
+ next_ensemble_stamp))
# Finalize handler splits.
are_splits_ready_list = []
@@ -952,7 +990,7 @@ class GradientBoostedDecisionTreeModel(object):
gains_list = []
split_info_list = []
- for handler in handlers:
+ for handler in training_state.handlers:
(are_splits_ready,
partition_ids, gains, split_info) = handler.make_splits(
ensemble_stamp, next_ensemble_stamp, class_id)
@@ -985,7 +1023,7 @@ class GradientBoostedDecisionTreeModel(object):
next_stamp_token=next_ensemble_stamp,
delta_updates=delta_updates,
learner_config=self._learner_config_serialized)
- return continue_centering.assign(center_bias)
+ return training_state.continue_centering.assign(center_bias)
# Define ensemble growing operations.
def _grow_ensemble_ready_fn():
@@ -1030,7 +1068,7 @@ class GradientBoostedDecisionTreeModel(object):
# Update ensemble.
update_ops = [are_all_splits_ready]
if self._center_bias:
- update_model = control_flow_ops.cond(continue_centering,
+ update_model = control_flow_ops.cond(training_state.continue_centering,
_center_bias_fn, _grow_ensemble_fn)
else:
update_model = _grow_ensemble_fn()
@@ -1042,13 +1080,15 @@ class GradientBoostedDecisionTreeModel(object):
self._ensemble_handle, stamp_token=next_ensemble_stamp)
update_ops.append(self._finalized_trees.assign(stats.num_trees))
update_ops.append(self._attempted_trees.assign(stats.attempted_trees))
- update_ops.append(num_layers.assign(stats.num_layers))
- update_ops.append(active_tree.assign(stats.active_tree))
- update_ops.append(active_layer.assign(stats.active_layer))
+ update_ops.append(training_state.num_layers.assign(stats.num_layers))
+ update_ops.append(training_state.active_tree.assign(stats.active_tree))
+ update_ops.append(
+ training_state.active_layer.assign(stats.active_layer))
# Flush step stats.
update_ops.extend(
- steps_accumulator.flush(ensemble_stamp, next_ensemble_stamp))
+ training_state.steps_accumulator.flush(ensemble_stamp,
+ next_ensemble_stamp))
return control_flow_ops.group(*update_ops, name="update_ensemble")
return _update_ensemble
@@ -1063,7 +1103,8 @@ class GradientBoostedDecisionTreeModel(object):
loss: A scalar tensor representing average loss of examples.
predictions_dict: Dictionary of Rank 2 `Tensor` representing information
about predictions per example.
- labels: Rank 2 `Tensor` representing labels per example.
+ labels: Rank 2 `Tensor` representing labels per example. Has no effect
+ on the training and is only kept for backward compatibility.
Returns:
An op that adds a new tree to the ensemble.
@@ -1071,11 +1112,11 @@ class GradientBoostedDecisionTreeModel(object):
Raises:
ValueError: if inputs are not valid.
"""
- batch_size = math_ops.cast(array_ops.shape(labels)[0], dtypes.float32)
- update_op, handlers = self.update_stats(loss, predictions_dict)
+ del labels # unused; kept for backward compatibility.
+ update_op, _, training_state = self.update_stats(loss, predictions_dict)
with ops.control_dependencies(update_op):
return self.increment_step_counter_and_maybe_update_ensemble(
- predictions_dict, batch_size, handlers)
+ predictions_dict, training_state)
def _get_weights(self, hessian_shape, hessians):
"""Derives weights to be used based on hessians and multiclass strategy."""
diff --git a/tensorflow/contrib/boosted_trees/python/training/functions/gbdt_batch_test.py b/tensorflow/contrib/boosted_trees/python/training/functions/gbdt_batch_test.py
index e3d4397fad..f7867d882d 100644
--- a/tensorflow/contrib/boosted_trees/python/training/functions/gbdt_batch_test.py
+++ b/tensorflow/contrib/boosted_trees/python/training/functions/gbdt_batch_test.py
@@ -29,6 +29,7 @@ from tensorflow.contrib.layers.python.layers import feature_column as feature_co
from tensorflow.contrib.learn.python.learn.estimators import model_fn
from tensorflow.python.feature_column import feature_column_lib as core_feature_column
from tensorflow.python.framework import dtypes
+from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
@@ -1560,6 +1561,301 @@ class GbdtTest(test_util.TensorFlowTestCase):
self.assertEquals(output.growing_metadata.num_layers_attempted, 2)
+ def testResetModelBeforeAndAfterSplit(self):
+ """Tests whether resetting works."""
+ with self.test_session():
+ # First build a small tree and train it to verify training works.
+ ensemble_handle = model_ops.tree_ensemble_variable(
+ stamp_token=0, tree_ensemble_config="", name="tree_ensemble")
+ learner_config = learner_pb2.LearnerConfig()
+ learner_config.learning_rate_tuner.fixed.learning_rate = 0.1
+ learner_config.num_classes = 2
+ learner_config.constraints.max_tree_depth = 1
+ features = {}
+ features["dense_float"] = array_ops.ones([4, 1], dtypes.float32)
+
+ gbdt_model = gbdt_batch.GradientBoostedDecisionTreeModel(
+ is_chief=True,
+ num_ps_replicas=0,
+ center_bias=False,
+ ensemble_handle=ensemble_handle,
+ examples_per_layer=1,
+ learner_config=learner_config,
+ logits_dimension=1,
+ features=features)
+
+ predictions = array_ops.constant(
+ [[0.0], [1.0], [0.0], [2.0]], dtype=dtypes.float32)
+ partition_ids = array_ops.zeros([4], dtypes.int32)
+ ensemble_stamp = model_ops.tree_ensemble_stamp_token(ensemble_handle)
+
+ predictions_dict = {
+ "predictions": predictions,
+ "predictions_no_dropout": predictions,
+ "partition_ids": partition_ids,
+ "ensemble_stamp": ensemble_stamp,
+ "num_trees": 12,
+ "max_tree_depth": 4,
+ }
+
+ labels = array_ops.ones([4, 1], dtypes.float32)
+ weights = array_ops.ones([4, 1], dtypes.float32)
+ loss = math_ops.reduce_mean(_squared_loss(labels, weights, predictions))
+
+ # Create train op.
+ update_op, reset_op, training_state = gbdt_model.update_stats(
+ loss, predictions_dict)
+ with ops.control_dependencies(update_op):
+ train_op = gbdt_model.increment_step_counter_and_maybe_update_ensemble(
+ predictions_dict, training_state)
+
+ variables.global_variables_initializer().run()
+ resources.initialize_resources(resources.shared_resources()).run()
+
+ original_stamp = ensemble_stamp.eval()
+ expected_tree = """
+ nodes {
+ dense_float_binary_split {
+ threshold: 1.0
+ left_id: 1
+ right_id: 2
+ }
+ node_metadata {
+ gain: 0
+ }
+ }
+ nodes {
+ leaf {
+ vector {
+ value: 0.25
+ }
+ }
+ }
+ nodes {
+ leaf {
+ vector {
+ value: 0.0
+ }
+ }
+ }"""
+
+ def _train_once_and_check(expect_split):
+ stamp = ensemble_stamp.eval()
+ train_op.run()
+ stamp_token, serialized = model_ops.tree_ensemble_serialize(
+ ensemble_handle)
+ output = tree_config_pb2.DecisionTreeEnsembleConfig()
+ output.ParseFromString(serialized.eval())
+ self.assertEquals(stamp_token.eval(), stamp + 1)
+ if expect_split:
+ # State of the ensemble after a split occurs.
+ self.assertEquals(len(output.trees), 1)
+ self.assertProtoEquals(expected_tree, output.trees[0])
+ else:
+ # State of the ensemble after a single accumulation but before any
+ # splitting occurs
+ self.assertEquals(len(output.trees), 0)
+ self.assertProtoEquals("""
+ growing_metadata {
+ num_trees_attempted: 1
+ num_layers_attempted: 1
+ }""", output)
+
+ def _run_reset():
+ stamp_before_reset = ensemble_stamp.eval()
+ reset_op.run()
+ stamp_after_reset = ensemble_stamp.eval()
+ self.assertNotEquals(stamp_after_reset, stamp_before_reset)
+
+ _, serialized = model_ops.tree_ensemble_serialize(
+ ensemble_handle)
+ output = tree_config_pb2.DecisionTreeEnsembleConfig()
+ output.ParseFromString(serialized.eval())
+ self.assertProtoEquals("", output)
+
+ return stamp_after_reset
+
+ # Exit after one train_op, so no new layer are created but the handlers
+ # contain enough information to split on the next call to train.
+ _train_once_and_check(expect_split=False)
+ self.assertEquals(ensemble_stamp.eval(), original_stamp + 1)
+
+ # Reset the handlers so it still requires two training calls to split.
+ stamp_after_reset = _run_reset()
+
+ _train_once_and_check(expect_split=False)
+ _train_once_and_check(expect_split=True)
+ self.assertEquals(ensemble_stamp.eval(), stamp_after_reset + 2)
+
+ # This time, test that the reset_op works right after splitting.
+ stamp_after_reset = _run_reset()
+
+ # Test that after resetting, the tree can be trained as normal.
+ _train_once_and_check(expect_split=False)
+ _train_once_and_check(expect_split=True)
+ self.assertEquals(ensemble_stamp.eval(), stamp_after_reset + 2)
+
+ def testResetModelNonChief(self):
+ """Tests the reset function on a non-chief worker."""
+ with self.test_session():
+ # Create ensemble with one bias node.
+ ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig()
+ text_format.Merge(
+ """
+ trees {
+ nodes {
+ leaf {
+ vector {
+ value: 0.25
+ }
+ }
+ }
+ }
+ tree_weights: 1.0
+ tree_metadata {
+ num_tree_weight_updates: 1
+ num_layers_grown: 1
+ is_finalized: false
+ }""", ensemble_config)
+ ensemble_handle = model_ops.tree_ensemble_variable(
+ stamp_token=0,
+ tree_ensemble_config=ensemble_config.SerializeToString(),
+ name="tree_ensemble")
+ learner_config = learner_pb2.LearnerConfig()
+ learner_config.learning_rate_tuner.fixed.learning_rate = 0.1
+ learner_config.num_classes = 2
+ learner_config.constraints.max_tree_depth = 1
+ features = {}
+ features["dense_float"] = array_ops.ones([4, 1], dtypes.float32)
+
+ gbdt_model = gbdt_batch.GradientBoostedDecisionTreeModel(
+ is_chief=False,
+ num_ps_replicas=0,
+ center_bias=False,
+ ensemble_handle=ensemble_handle,
+ examples_per_layer=1,
+ learner_config=learner_config,
+ logits_dimension=1,
+ features=features)
+
+ predictions = array_ops.constant(
+ [[0.0], [1.0], [0.0], [2.0]], dtype=dtypes.float32)
+ partition_ids = array_ops.zeros([4], dtypes.int32)
+ ensemble_stamp = model_ops.tree_ensemble_stamp_token(ensemble_handle)
+
+ predictions_dict = {
+ "predictions": predictions,
+ "predictions_no_dropout": predictions,
+ "partition_ids": partition_ids,
+ "ensemble_stamp": ensemble_stamp
+ }
+
+ labels = array_ops.ones([4, 1], dtypes.float32)
+ weights = array_ops.ones([4, 1], dtypes.float32)
+ loss = math_ops.reduce_mean(_squared_loss(labels, weights, predictions))
+
+ # Create reset op.
+ _, reset_op, _ = gbdt_model.update_stats(
+ loss, predictions_dict)
+
+ variables.global_variables_initializer().run()
+ resources.initialize_resources(resources.shared_resources()).run()
+
+ # Reset op doesn't do anything because this is a non-chief worker.
+ reset_op.run()
+ stamp_token, serialized = model_ops.tree_ensemble_serialize(
+ ensemble_handle)
+ output = tree_config_pb2.DecisionTreeEnsembleConfig()
+ output.ParseFromString(serialized.eval())
+ self.assertEquals(len(output.trees), 1)
+ self.assertEquals(len(output.tree_weights), 1)
+ self.assertEquals(stamp_token.eval(), 0)
+
+ def testResetModelWithCenterBias(self):
+ """Tests the reset function running on chief with bias centering."""
+ with self.test_session():
+ ensemble_handle = model_ops.tree_ensemble_variable(
+ stamp_token=0, tree_ensemble_config="", name="tree_ensemble")
+ learner_config = learner_pb2.LearnerConfig()
+ learner_config.learning_rate_tuner.fixed.learning_rate = 0.1
+ learner_config.num_classes = 2
+ learner_config.regularization.l1 = 0
+ learner_config.regularization.l2 = 0
+ learner_config.constraints.max_tree_depth = 1
+ learner_config.constraints.min_node_weight = 0
+ features = {}
+ features["dense_float"] = array_ops.ones([4, 1], dtypes.float32)
+
+ gbdt_model = gbdt_batch.GradientBoostedDecisionTreeModel(
+ is_chief=True,
+ num_ps_replicas=0,
+ center_bias=True,
+ ensemble_handle=ensemble_handle,
+ examples_per_layer=1,
+ learner_config=learner_config,
+ logits_dimension=1,
+ features=features)
+
+ predictions = array_ops.constant(
+ [[0.0], [1.0], [0.0], [2.0]], dtype=dtypes.float32)
+ partition_ids = array_ops.zeros([4], dtypes.int32)
+ ensemble_stamp = model_ops.tree_ensemble_stamp_token(ensemble_handle)
+
+ predictions_dict = {
+ "predictions": predictions,
+ "predictions_no_dropout": predictions,
+ "partition_ids": partition_ids,
+ "ensemble_stamp": ensemble_stamp,
+ "num_trees": 12,
+ }
+
+ labels = array_ops.ones([4, 1], dtypes.float32)
+ weights = array_ops.ones([4, 1], dtypes.float32)
+ loss = math_ops.reduce_mean(_squared_loss(labels, weights, predictions))
+
+ # Create train op.
+ update_op, reset_op, training_state = gbdt_model.update_stats(
+ loss, predictions_dict)
+ with ops.control_dependencies(update_op):
+ train_op = gbdt_model.increment_step_counter_and_maybe_update_ensemble(
+ predictions_dict, training_state)
+
+ variables.global_variables_initializer().run()
+ resources.initialize_resources(resources.shared_resources()).run()
+
+ # On first run, expect bias to be centered.
+ def train_and_check():
+ train_op.run()
+ _, serialized = model_ops.tree_ensemble_serialize(ensemble_handle)
+ output = tree_config_pb2.DecisionTreeEnsembleConfig()
+ output.ParseFromString(serialized.eval())
+ expected_tree = """
+ nodes {
+ leaf {
+ vector {
+ value: 0.25
+ }
+ }
+ }"""
+ self.assertEquals(len(output.trees), 1)
+ self.assertAllEqual(output.tree_weights, [1.0])
+ self.assertProtoEquals(expected_tree, output.trees[0])
+
+ train_and_check()
+ self.assertEquals(ensemble_stamp.eval(), 1)
+
+ reset_op.run()
+ stamp_token, serialized = model_ops.tree_ensemble_serialize(
+ ensemble_handle)
+ output = tree_config_pb2.DecisionTreeEnsembleConfig()
+ output.ParseFromString(serialized.eval())
+ self.assertEquals(len(output.trees), 0)
+ self.assertEquals(len(output.tree_weights), 0)
+ self.assertEquals(stamp_token.eval(), 2)
+
+ train_and_check()
+ self.assertEquals(ensemble_stamp.eval(), 3)
+
if __name__ == "__main__":
googletest.main()
diff --git a/tensorflow/contrib/boosted_trees/python/utils/losses.py b/tensorflow/contrib/boosted_trees/python/utils/losses.py
index ab7ac2aba6..b5ebaf1999 100644
--- a/tensorflow/contrib/boosted_trees/python/utils/losses.py
+++ b/tensorflow/contrib/boosted_trees/python/utils/losses.py
@@ -23,6 +23,12 @@ from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
+from tensorflow.python.ops.losses import losses
+
+
+def per_example_squared_hinge_loss(labels, weights, predictions):
+ loss = losses.hinge_loss(labels=labels, logits=predictions, weights=weights)
+ return math_ops.square(loss), control_flow_ops.no_op()
def per_example_logistic_loss(labels, weights, predictions):
@@ -126,7 +132,7 @@ def per_example_squared_loss(labels, weights, predictions):
def per_example_exp_loss(labels, weights, predictions, name=None, eps=0.1):
- """Exponential loss given labels, example weights and predictions.
+ """Trimmed exponential loss given labels, example weights and predictions.
Note that this is only for binary classification.
If logistic loss tries to make sure that the classifier is certain of its
@@ -211,3 +217,62 @@ def per_example_exp_loss(labels, weights, predictions, name=None, eps=0.1):
unweighted_loss = exp_with_logits(
name=name, eps=eps, labels=labels, logits=predictions)
return unweighted_loss * weights, control_flow_ops.no_op()
+
+
+def per_example_full_exp_loss(labels, weights, predictions, name=None):
+ """Full exponential loss given labels, example weights and predictions.
+
+ Note that this is only for binary classification.
+ The loss returns is exp(-targets*logits), where targets are converted to -1
+ and 1.
+
+ Args:
+ labels: Rank 2 (N, D) tensor of per-example labels.
+ weights: Rank 2 (N, 1) tensor of per-example weights.
+ predictions: Rank 2 (N, D) tensor of per-example predictions.
+ name: A name for the operation (optional).
+
+ Returns:
+ loss: A Rank 2 (N, 1) tensor of per-example exp loss
+ update_op: An update operation to update the loss's internal state.
+ """
+
+ def full_exp_with_logits(name, labels=None, logits=None):
+ """Computes exponential loss given `logits`.
+
+ Args:
+ name: A name for the operation (optional).
+ labels: A `Tensor` of the same type and shape as `logits`.
+ logits: A `Tensor` of type `float32` or `float64`.
+
+ Returns:
+ A `Tensor` of the same shape as `logits` with the componentwise
+ exponential losses.
+
+ Raises:
+ ValueError: If `logits` and `labels` do not have the same shape.
+ """
+ with ops.name_scope(name, "exp_loss", [logits, labels]) as name:
+ logits = ops.convert_to_tensor(logits, name="logits")
+ labels = ops.convert_to_tensor(labels, name="labels")
+ try:
+ labels.get_shape().merge_with(logits.get_shape())
+ except ValueError:
+ raise ValueError("logits and labels must have the same shape (%s vs %s)"
+ % (logits.get_shape(), labels.get_shape()))
+
+ # Default threshold of 0 to switch between classes
+ zeros = array_ops.zeros_like(logits, dtype=logits.dtype)
+ ones = array_ops.ones_like(logits, dtype=logits.dtype)
+ neg_ones = -array_ops.ones_like(logits, dtype=logits.dtype)
+
+ # Convert labels to 1 and -1
+ cond_labels = (labels > zeros)
+ labels_converted = array_ops.where(cond_labels, ones, neg_ones)
+
+ return math_ops.exp(-1.0 * logits * labels_converted)
+
+ labels = math_ops.to_float(labels)
+ unweighted_loss = full_exp_with_logits(
+ name=name, labels=labels, logits=predictions)
+ return unweighted_loss * weights, control_flow_ops.no_op()
diff --git a/tensorflow/contrib/checkpoint/__init__.py b/tensorflow/contrib/checkpoint/__init__.py
index 8c1ce5c2a2..2fbaa31d5e 100644
--- a/tensorflow/contrib/checkpoint/__init__.py
+++ b/tensorflow/contrib/checkpoint/__init__.py
@@ -44,8 +44,8 @@ from tensorflow.core.protobuf.checkpointable_object_graph_pb2 import Checkpointa
from tensorflow.python.training.checkpointable.base import CheckpointableBase
from tensorflow.python.training.checkpointable.data_structures import List
from tensorflow.python.training.checkpointable.data_structures import Mapping
+from tensorflow.python.training.checkpointable.data_structures import NoDependency
from tensorflow.python.training.checkpointable.tracking import Checkpointable
-from tensorflow.python.training.checkpointable.tracking import NoDependency
from tensorflow.python.training.checkpointable.util import capture_dependencies
from tensorflow.python.training.checkpointable.util import list_objects
from tensorflow.python.training.checkpointable.util import object_metadata
diff --git a/tensorflow/contrib/checkpoint/python/containers_test.py b/tensorflow/contrib/checkpoint/python/containers_test.py
index 64d056bd68..ac85c7be80 100644
--- a/tensorflow/contrib/checkpoint/python/containers_test.py
+++ b/tensorflow/contrib/checkpoint/python/containers_test.py
@@ -26,6 +26,7 @@ from tensorflow.python.keras import layers
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.platform import test
+from tensorflow.python.training.checkpointable import data_structures
from tensorflow.python.training.checkpointable import tracking
from tensorflow.python.training.checkpointable import util
@@ -79,7 +80,7 @@ class UniqueNameTrackerTests(test.TestCase):
resource_variable_ops.ResourceVariable(4.), "y"))
slots.append(slotdeps.track(
resource_variable_ops.ResourceVariable(5.), "x"))
- self.slots = slots
+ self.slots = data_structures.NoDependency(slots)
manager = SlotManager()
self.evaluate([v.initializer for v in manager.slots])
diff --git a/tensorflow/contrib/cloud/BUILD b/tensorflow/contrib/cloud/BUILD
index 1a7a3759ba..523a9efcf0 100644
--- a/tensorflow/contrib/cloud/BUILD
+++ b/tensorflow/contrib/cloud/BUILD
@@ -50,6 +50,7 @@ py_library(
deps = [
":gen_bigquery_reader_ops",
":gen_gcs_config_ops",
+ "//tensorflow/contrib/bigtable",
"//tensorflow/python:framework_for_generated_wrappers",
"//tensorflow/python:io_ops",
"//tensorflow/python:util",
diff --git a/tensorflow/contrib/cloud/README.md b/tensorflow/contrib/cloud/README.md
new file mode 100644
index 0000000000..134ce057f4
--- /dev/null
+++ b/tensorflow/contrib/cloud/README.md
@@ -0,0 +1,18 @@
+# Cloud #
+
+## BigTable ##
+
+[Google Cloud BigTable](https://cloud.google.com/bigtable/) is a high
+performance storage system that can store and serve training data. This contrib
+package contains an experimental integration with TensorFlow.
+
+> **Status: Highly experimental.** The current implementation is very much in
+> flux. Please use at your own risk! :-)
+
+<!-- TODO(saeta): Document usage / methods / etc. -->
+
+## Cloud Storage (GCS) ##
+
+The Google Cloud Storage ops allow the user to configure the GCS File System.
+
+<!-- TODO(saeta): Document usage / methods / etc. -->
diff --git a/tensorflow/contrib/cloud/__init__.py b/tensorflow/contrib/cloud/__init__.py
index ef7aa7624c..af81106a68 100644
--- a/tensorflow/contrib/cloud/__init__.py
+++ b/tensorflow/contrib/cloud/__init__.py
@@ -18,15 +18,24 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
-# pylint: disable=line-too-long,wildcard-import
+import os
+
+# pylint: disable=line-too-long,wildcard-import,g-import-not-at-top
from tensorflow.contrib.cloud.python.ops.bigquery_reader_ops import *
from tensorflow.contrib.cloud.python.ops.gcs_config_ops import *
-# pylint: enable=line-too-long,wildcard-import
+
+if os.name != 'nt':
+ from tensorflow.contrib.bigtable.python.ops.bigtable_api import BigTable
+ from tensorflow.contrib.bigtable.python.ops.bigtable_api import BigtableClient
+
+del os
from tensorflow.python.util.all_util import remove_undocumented
_allowed_symbols = [
'BigQueryReader',
+ 'BigTable',
+ 'BigtableClient',
'BlockCacheParams',
'configure_colab_session',
'configure_gcs',
diff --git a/tensorflow/contrib/cluster_resolver/BUILD b/tensorflow/contrib/cluster_resolver/BUILD
index c239e6f8f9..707f621184 100644
--- a/tensorflow/contrib/cluster_resolver/BUILD
+++ b/tensorflow/contrib/cluster_resolver/BUILD
@@ -12,6 +12,15 @@ licenses(["notice"]) # Apache 2.0
py_library(
name = "cluster_resolver_pip",
+ srcs_version = "PY2AND3",
+ visibility = ["//visibility:public"],
+ deps = [
+ ":cluster_resolver_py",
+ ],
+)
+
+py_library(
+ name = "cluster_resolver_py",
srcs = [
"__init__.py",
"python/training/__init__.py",
@@ -19,7 +28,7 @@ py_library(
srcs_version = "PY2AND3",
visibility = ["//visibility:public"],
deps = [
- ":cluster_resolver_py",
+ ":base_cluster_resolver_py",
":gce_cluster_resolver_py",
":tpu_cluster_resolver_py",
"//tensorflow/python:util",
@@ -27,7 +36,7 @@ py_library(
)
py_library(
- name = "cluster_resolver_py",
+ name = "base_cluster_resolver_py",
srcs = ["python/training/cluster_resolver.py"],
srcs_version = "PY2AND3",
deps = [
@@ -40,7 +49,7 @@ py_library(
srcs = ["python/training/gce_cluster_resolver.py"],
srcs_version = "PY2AND3",
deps = [
- ":cluster_resolver_py",
+ ":base_cluster_resolver_py",
"//tensorflow/python:training",
],
)
@@ -50,13 +59,13 @@ py_library(
srcs = ["python/training/tpu_cluster_resolver.py"],
srcs_version = "PY2AND3",
deps = [
- ":cluster_resolver_py",
+ ":base_cluster_resolver_py",
"//tensorflow/python:training",
],
)
tf_py_test(
- name = "cluster_resolver_py_test",
+ name = "base_cluster_resolver_py_test",
srcs = ["python/training/cluster_resolver_test.py"],
additional_deps = [
":cluster_resolver_py",
diff --git a/tensorflow/contrib/cmake/CMakeLists.txt b/tensorflow/contrib/cmake/CMakeLists.txt
index a0a5b0e00c..708618dcb0 100644
--- a/tensorflow/contrib/cmake/CMakeLists.txt
+++ b/tensorflow/contrib/cmake/CMakeLists.txt
@@ -145,26 +145,41 @@ if(WIN32)
# temporary fix for #18241
add_definitions(-DEIGEN_DEFAULT_DENSE_INDEX_TYPE=std::int64_t)
endif()
- add_definitions(-DNOMINMAX -D_WIN32_WINNT=0x0A00 -DLANG_CXX11)
- add_definitions(-DWIN32 -DOS_WIN -D_MBCS -DWIN32_LEAN_AND_MEAN -DNOGDI -DPLATFORM_WINDOWS)
+ add_definitions(-DNOMINMAX -D_WIN32_WINNT=0x0A00)
+ add_definitions(-DWIN32_LEAN_AND_MEAN -DNOGDI -DPLATFORM_WINDOWS)
add_definitions(-DTENSORFLOW_USE_EIGEN_THREADPOOL -DEIGEN_HAS_C99_MATH)
add_definitions(-DTF_COMPILE_LIBRARY)
- add_definitions(/bigobj /nologo /EHsc /GF /MP /Gm-)
+ add_compile_options(/bigobj /GF /MP /Gm-)
# Suppress warnings to reduce build log size.
- add_definitions(/wd4267 /wd4244 /wd4800 /wd4503 /wd4554 /wd4996 /wd4348 /wd4018)
- add_definitions(/wd4099 /wd4146 /wd4267 /wd4305 /wd4307)
- add_definitions(/wd4715 /wd4722 /wd4723 /wd4838 /wd4309 /wd4334)
- add_definitions(/wd4003 /wd4244 /wd4267 /wd4503 /wd4506 /wd4800 /wd4996)
+ add_compile_options(/wd4267 /wd4244 /wd4800 /wd4503 /wd4554 /wd4996 /wd4348 /wd4018)
+ add_compile_options(/wd4099 /wd4146 /wd4267 /wd4305 /wd4307)
+ add_compile_options(/wd4715 /wd4722 /wd4723 /wd4838 /wd4309 /wd4334)
+ add_compile_options(/wd4003 /wd4244 /wd4267 /wd4503 /wd4506 /wd4800 /wd4996)
# Suppress linker warnings.
set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} /ignore:4049 /ignore:4197 /ignore:4217 /ignore:4221")
set(CMAKE_MODULE_LINKER_FLAGS "${CMAKE_MODULE_LINKER_FLAGS} /ignore:4049 /ignore:4197 /ignore:4217 /ignore:4221")
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} /ignore:4049 /ignore:4197 /ignore:4217 /ignore:4221")
- set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /MP")
set(CMAKE_CXX_FLAGS_DEBUG "/D_DEBUG /MDd /Ob2")
set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} /D_ITERATOR_DEBUG_LEVEL=0")
set(CMAKE_CXX_FLAGS_MINSIZEREL "${CMAKE_CXX_FLAGS_MINSIZEREL} /D_ITERATOR_DEBUG_LEVEL=0")
set(CMAKE_CXX_FLAGS_RELWITHDEBINFO "${CMAKE_CXX_FLAGS_RELWITHDEBINFO} /D_ITERATOR_DEBUG_LEVEL=0")
+ set(compiler_flags
+ CMAKE_CXX_FLAGS
+ CMAKE_CXX_FLAGS_DEBUG
+ CMAKE_CXX_FLAGS_RELEASE
+ CMAKE_C_FLAGS
+ CMAKE_C_FLAGS_DEBUG
+ CMAKE_C_FLAGS_RELEASE
+ )
+ # No exception
+ foreach(flag ${compiler_flags})
+ string(REPLACE "/EHsc" "/EHs-c-" ${flag} "${${flag}}")
+ endforeach()
+ add_definitions(/D_HAS_EXCEPTIONS=0)
+ # Suppress 'noexcept used with no exception handling mode specified' warning
+ add_compile_options(/wd4577)
+
# Try to avoid flaky failures due to failed generation of generate.stamp files.
set(CMAKE_SUPPRESS_REGENERATION ON)
endif()
diff --git a/tensorflow/contrib/cmake/external/nsync.cmake b/tensorflow/contrib/cmake/external/nsync.cmake
index 6d50a4956b..eba3bcfc79 100644
--- a/tensorflow/contrib/cmake/external/nsync.cmake
+++ b/tensorflow/contrib/cmake/external/nsync.cmake
@@ -16,7 +16,7 @@ include (ExternalProject)
set(nsync_INCLUDE_DIR ${CMAKE_CURRENT_BINARY_DIR}/external/nsync/public)
set(nsync_URL https://github.com/google/nsync)
-set(nsync_TAG 5e8b19a81e5729922629dd505daa651f6ffdf107)
+set(nsync_TAG 1.20.0)
set(nsync_BUILD ${CMAKE_CURRENT_BINARY_DIR}/nsync/src/nsync)
set(nsync_INSTALL ${CMAKE_CURRENT_BINARY_DIR}/nsync/install)
diff --git a/tensorflow/contrib/cmake/python_modules.txt b/tensorflow/contrib/cmake/python_modules.txt
index d530572e91..75e00f3267 100644
--- a/tensorflow/contrib/cmake/python_modules.txt
+++ b/tensorflow/contrib/cmake/python_modules.txt
@@ -14,6 +14,7 @@ tensorflow/examples/tutorials
tensorflow/examples/tutorials/mnist
tensorflow/python
tensorflow/python/client
+tensorflow/python/compat
tensorflow/python/data
tensorflow/python/data/ops
tensorflow/python/data/util
@@ -61,6 +62,8 @@ tensorflow/python/saved_model
tensorflow/python/summary
tensorflow/python/summary/writer
tensorflow/python/tools
+tensorflow/python/tools/api
+tensorflow/python/tools/api/generator
tensorflow/python/training
tensorflow/python/training/checkpointable
tensorflow/python/user_ops
@@ -68,7 +71,6 @@ tensorflow/python/util
tensorflow/python/util/protobuf
tensorflow/tools
tensorflow/tools/api
-tensorflow/tools/api/generator
tensorflow/tools/graph_transforms
tensorflow/contrib
tensorflow/contrib/all_reduce
@@ -86,6 +88,8 @@ tensorflow/contrib/batching/python/ops
tensorflow/contrib/bayesflow
tensorflow/contrib/bayesflow/python
tensorflow/contrib/bayesflow/python/ops
+# tensorflow/contrib/bigtable/python
+# tensorflow/contrib/bigtable/python/ops
tensorflow/contrib/boosted_trees
tensorflow/contrib/boosted_trees/estimator_batch
tensorflow/contrib/boosted_trees/kernels
@@ -238,6 +242,8 @@ tensorflow/contrib/keras/api/keras/wrappers/scikit_learn
tensorflow/contrib/kernel_methods
tensorflow/contrib/kernel_methods/python
tensorflow/contrib/kernel_methods/python/mappers
+tensorflow/contrib/kinesis/python
+tensorflow/contrib/kinesis/python/ops
tensorflow/contrib/kfac
tensorflow/contrib/kfac/examples
tensorflow/contrib/kfac/python
diff --git a/tensorflow/contrib/cmake/tf_c.cmake b/tensorflow/contrib/cmake/tf_c.cmake
index 2e0a2fcef4..7a30eb94f5 100644
--- a/tensorflow/contrib/cmake/tf_c.cmake
+++ b/tensorflow/contrib/cmake/tf_c.cmake
@@ -36,16 +36,3 @@ add_dependencies(
tf_cc_while_loop
tf_core_lib
tf_protos_cc)
-
-if(tensorflow_BUILD_PYTHON_BINDINGS)
- add_library(tf_c_python_api OBJECT
- "${tensorflow_source_dir}/tensorflow/c/python_api.cc"
- "${tensorflow_source_dir}/tensorflow/c/python_api.h"
- )
- add_dependencies(
- tf_c_python_api
- tf_c
- tf_core_lib
- tf_core_framework
- tf_protos_cc)
-endif()
diff --git a/tensorflow/contrib/cmake/tf_core_framework.cmake b/tensorflow/contrib/cmake/tf_core_framework.cmake
index d044ac75ae..067c299a71 100644
--- a/tensorflow/contrib/cmake/tf_core_framework.cmake
+++ b/tensorflow/contrib/cmake/tf_core_framework.cmake
@@ -125,6 +125,7 @@ endfunction()
file(GLOB_RECURSE tf_protos_cc_srcs RELATIVE ${tensorflow_source_dir}
"${tensorflow_source_dir}/tensorflow/core/*.proto"
+ "${tensorflow_source_dir}/tensorflow/compiler/xla/*.proto"
"${tensorflow_source_dir}/tensorflow/contrib/boosted_trees/proto/*.proto"
"${tensorflow_source_dir}/tensorflow/contrib/tpu/proto/*.proto"
)
diff --git a/tensorflow/contrib/cmake/tf_python.cmake b/tensorflow/contrib/cmake/tf_python.cmake
index 786ea05c74..32b185f07b 100755
--- a/tensorflow/contrib/cmake/tf_python.cmake
+++ b/tensorflow/contrib/cmake/tf_python.cmake
@@ -456,6 +456,18 @@ add_custom_command(
COMMENT "Running SWIG to generate Python wrappers"
VERBATIM )
+add_library(tf_c_python_api OBJECT
+ "${tensorflow_source_dir}/tensorflow/c/python_api.cc"
+ "${tensorflow_source_dir}/tensorflow/c/python_api.h"
+)
+add_dependencies(
+ tf_c_python_api
+ tf_c
+ tf_core_lib
+ tf_core_framework
+ tf_protos_cc
+ tf_python_protos_cc)
+
set (pywrap_tensorflow_internal_src
"${tensorflow_source_dir}/tensorflow/core/profiler/internal/print_model_analysis.h"
"${tensorflow_source_dir}/tensorflow/core/profiler/internal/print_model_analysis.cc"
@@ -724,8 +736,8 @@ endif()
# Generate API __init__.py files.
########################################################
-# Parse tensorflow/tools/api/generator/BUILD to get list of generated files.
-FILE(READ ${tensorflow_source_dir}/tensorflow/tools/api/generator/api_gen.bzl api_generator_BUILD_text)
+# Parse tensorflow/python/tools/api/generator/BUILD to get list of generated files.
+FILE(READ ${tensorflow_source_dir}/tensorflow/python/tools/api/generator/api_gen.bzl api_generator_BUILD_text)
STRING(REGEX MATCH "# BEGIN GENERATED FILES.*# END GENERATED FILES" api_init_files_text ${api_generator_BUILD_text})
string(REPLACE "# BEGIN GENERATED FILES" "" api_init_files_text ${api_init_files_text})
string(REPLACE "# END GENERATED FILES" "" api_init_files_text ${api_init_files_text})
@@ -769,7 +781,7 @@ if (tensorflow_ENABLE_MKL_SUPPORT)
# Run create_python_api.py to generate API init files.
COMMAND ${CMAKE_COMMAND} -E env PYTHONPATH=${CMAKE_CURRENT_BINARY_DIR}/tf_python PATH=${PY_RUNTIME_ENV} ${PYTHON_EXECUTABLE}
- "${CMAKE_CURRENT_BINARY_DIR}/tf_python/tensorflow/tools/api/generator/create_python_api.py"
+ "${CMAKE_CURRENT_BINARY_DIR}/tf_python/tensorflow/python/tools/api/generator/create_python_api.py"
"--root_init_template=${CMAKE_CURRENT_BINARY_DIR}/tf_python/tensorflow/api_template.__init__.py"
"--apidir=${CMAKE_CURRENT_BINARY_DIR}/tf_python/tensorflow"
"--package=tensorflow.python"
@@ -791,7 +803,7 @@ else (tensorflow_ENABLE_MKL_SUPPORT)
# Run create_python_api.py to generate API init files.
COMMAND ${CMAKE_COMMAND} -E env PYTHONPATH=${CMAKE_CURRENT_BINARY_DIR}/tf_python ${PYTHON_EXECUTABLE}
- "${CMAKE_CURRENT_BINARY_DIR}/tf_python/tensorflow/tools/api/generator/create_python_api.py"
+ "${CMAKE_CURRENT_BINARY_DIR}/tf_python/tensorflow/python/tools/api/generator/create_python_api.py"
"--root_init_template=${CMAKE_CURRENT_BINARY_DIR}/tf_python/tensorflow/api_template.__init__.py"
"--apidir=${CMAKE_CURRENT_BINARY_DIR}/tf_python/tensorflow"
"--package=tensorflow.python"
@@ -812,8 +824,8 @@ add_dependencies(tf_python_api tf_python_ops)
# Generate API __init__.py files for tf.estimator.
########################################################
-# Parse tensorflow/tools/api/generator/BUILD to get list of generated files.
-FILE(READ ${tensorflow_source_dir}/tensorflow/tools/api/generator/api_gen.bzl api_generator_BUILD_text)
+# Parse tensorflow/python/tools/api/generator/BUILD to get list of generated files.
+FILE(READ ${tensorflow_source_dir}/tensorflow/python/tools/api/generator/api_gen.bzl api_generator_BUILD_text)
STRING(REGEX MATCH "# BEGIN GENERATED ESTIMATOR FILES.*# END GENERATED ESTIMATOR FILES" api_init_files_text ${api_generator_BUILD_text})
string(REPLACE "# BEGIN GENERATED ESTIMATOR FILES" "" api_init_files_text ${api_init_files_text})
string(REPLACE "# END GENERATED ESTIMATOR FILES" "" api_init_files_text ${api_init_files_text})
@@ -837,10 +849,11 @@ add_custom_command(
# Run create_python_api.py to generate API init files.
COMMAND ${CMAKE_COMMAND} -E env PYTHONPATH=${CMAKE_CURRENT_BINARY_DIR}/tf_python ${PYTHON_EXECUTABLE}
- "${CMAKE_CURRENT_BINARY_DIR}/tf_python/tensorflow/tools/api/generator/create_python_api.py"
+ "${CMAKE_CURRENT_BINARY_DIR}/tf_python/tensorflow/python/tools/api/generator/create_python_api.py"
"--apidir=${CMAKE_CURRENT_BINARY_DIR}/tf_python/tensorflow/python/estimator/api"
"--package=tensorflow.python.estimator"
"--apiname=estimator"
+ "--output_package=tensorflow.python.estimator.api"
"${estimator_api_init_list_file}"
COMMENT "Generating __init__.py files for Python API."
diff --git a/tensorflow/contrib/cmake/tf_stream_executor.cmake b/tensorflow/contrib/cmake/tf_stream_executor.cmake
index 9a37b68119..6d634cb170 100644
--- a/tensorflow/contrib/cmake/tf_stream_executor.cmake
+++ b/tensorflow/contrib/cmake/tf_stream_executor.cmake
@@ -64,8 +64,6 @@ file(GLOB tf_stream_executor_srcs
if (tensorflow_ENABLE_GPU)
file(GLOB tf_stream_executor_gpu_srcs
"${tensorflow_source_dir}/tensorflow/stream_executor/cuda/*.cc"
- "${tensorflow_source_dir}/tensorflow/compiler/xla/statusor.h"
- "${tensorflow_source_dir}/tensorflow/compiler/xla/statusor.cc"
)
if (NOT tensorflow_BUILD_CC_TESTS)
file(GLOB tf_stream_executor_gpu_tests
@@ -76,11 +74,11 @@ if (tensorflow_ENABLE_GPU)
list(APPEND tf_stream_executor_srcs ${tf_stream_executor_gpu_srcs})
endif()
-#file(GLOB_RECURSE tf_stream_executor_test_srcs
-# "${tensorflow_source_dir}/tensorflow/stream_executor/*_test.cc"
-# "${tensorflow_source_dir}/tensorflow/stream_executor/*_test.h"
-#)
-#list(REMOVE_ITEM tf_stream_executor_srcs ${tf_stream_executor_test_srcs})
+file(GLOB_RECURSE tf_stream_executor_test_srcs
+ "${tensorflow_source_dir}/tensorflow/stream_executor/*test.cc"
+ "${tensorflow_source_dir}/tensorflow/stream_executor/lib/*test.h"
+)
+list(REMOVE_ITEM tf_stream_executor_srcs ${tf_stream_executor_test_srcs})
if (NOT WIN32)
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -lgomp")
diff --git a/tensorflow/contrib/cmake/tf_tests.cmake b/tensorflow/contrib/cmake/tf_tests.cmake
index eb9482dc25..b2330c4e34 100644
--- a/tensorflow/contrib/cmake/tf_tests.cmake
+++ b/tensorflow/contrib/cmake/tf_tests.cmake
@@ -193,6 +193,7 @@ if (tensorflow_BUILD_PYTHON_TESTS)
# flaky test
"${tensorflow_source_dir}/tensorflow/python/profiler/internal/run_metadata_test.py"
"${tensorflow_source_dir}/tensorflow/python/profiler/model_analyzer_test.py"
+ "${tensorflow_source_dir}/tensorflow/python/data/kernel_tests/map_dataset_op_test.py"
# Fails because uses data dependencies with bazel
"${tensorflow_source_dir}/tensorflow/python/saved_model/saved_model_test.py"
"${tensorflow_source_dir}/tensorflow/contrib/image/python/kernel_tests/sparse_image_warp_test.py"
@@ -216,7 +217,8 @@ if (tensorflow_BUILD_PYTHON_TESTS)
${tensorflow_source_dir}/tensorflow/python/kernel_tests/duplicate_op_test.py
${tensorflow_source_dir}/tensorflow/python/kernel_tests/invalid_op_test.py
${tensorflow_source_dir}/tensorflow/python/kernel_tests/ackermann_test.py
-
+ # Tests too large to run.
+ ${tensorflow_source_dir}/tensorflow/python/kernel_tests/linalg/linear_operator_low_rank_update_test.py
)
if (WIN32)
set(tf_test_src_py_exclude
diff --git a/tensorflow/contrib/copy_graph/python/util/copy_elements.py b/tensorflow/contrib/copy_graph/python/util/copy_elements.py
index a0dd3881a8..5931c8a279 100644
--- a/tensorflow/contrib/copy_graph/python/util/copy_elements.py
+++ b/tensorflow/contrib/copy_graph/python/util/copy_elements.py
@@ -18,7 +18,7 @@ These functions allow for recursive copying of elements (ops and variables)
from one graph to another. The copied elements are initialized inside a
user-specified scope in the other graph. There are separate functions to
copy ops and variables.
-There is also a function to retrive the copied version of an op from the
+There is also a function to retrieve the copied version of an op from the
first graph inside a scope in the second graph.
@@copy_op_to_graph
@@ -77,7 +77,7 @@ def copy_variable_to_graph(org_instance, to_graph, scope=''):
else:
collections.append(scope + '/' + name)
- #See if its trainable.
+ #See if it's trainable.
trainable = (
org_instance in org_instance.graph.get_collection(
ops.GraphKeys.TRAINABLE_VARIABLES))
@@ -162,7 +162,7 @@ def copy_op_to_graph(org_instance, to_graph, variables, scope=''):
if isinstance(org_instance, ops.Tensor):
- #If its a Tensor, it is one of the outputs of the underlying
+ #If it's a Tensor, it is one of the outputs of the underlying
#op. Therefore, copy the op itself and return the appropriate
#output.
op = org_instance.op
diff --git a/tensorflow/contrib/data/__init__.py b/tensorflow/contrib/data/__init__.py
index 156538b4e0..675330716b 100644
--- a/tensorflow/contrib/data/__init__.py
+++ b/tensorflow/contrib/data/__init__.py
@@ -34,6 +34,7 @@ See @{$guide/datasets$Importing Data} for an overview.
@@batch_and_drop_remainder
@@bucket_by_sequence_length
@@choose_from_datasets
+@@copy_to_device
@@dense_to_sparse_batch
@@enumerate_dataset
@@ -86,6 +87,7 @@ from tensorflow.contrib.data.python.ops.interleave_ops import sample_from_datase
from tensorflow.contrib.data.python.ops.interleave_ops import sloppy_interleave
from tensorflow.contrib.data.python.ops.iterator_ops import CheckpointInputPipelineHook
from tensorflow.contrib.data.python.ops.iterator_ops import make_saveable_from_iterator
+from tensorflow.contrib.data.python.ops.prefetching_ops import copy_to_device
from tensorflow.contrib.data.python.ops.prefetching_ops import prefetch_to_device
from tensorflow.contrib.data.python.ops.random_ops import RandomDataset
from tensorflow.contrib.data.python.ops.readers import CsvDataset
diff --git a/tensorflow/contrib/data/kernels/prefetching_kernels.cc b/tensorflow/contrib/data/kernels/prefetching_kernels.cc
index a2bfce0362..b3d464d716 100644
--- a/tensorflow/contrib/data/kernels/prefetching_kernels.cc
+++ b/tensorflow/contrib/data/kernels/prefetching_kernels.cc
@@ -40,7 +40,8 @@ class FunctionBufferingResource : public ResourceBase {
const NameAttrList& func, int64 buffer_size,
const string& source_device,
const string& target_device,
- const std::vector<Tensor>& func_args)
+ const std::vector<Tensor>& func_args,
+ const DataTypeVector& output_types)
: lib_(lib),
pflr_(std::move(pflr)),
func_(func),
@@ -48,6 +49,7 @@ class FunctionBufferingResource : public ResourceBase {
source_device_(source_device),
target_device_(target_device),
func_args_(func_args),
+ output_types_(output_types),
handle_(kInvalidHandle),
is_buffering_(false),
end_of_sequence_(false),
@@ -176,6 +178,13 @@ class FunctionBufferingResource : public ResourceBase {
AllocatorAttributes arg_alloc_attr;
arg_alloc_attr.set_on_host(true);
opts.args_alloc_attrs.push_back(arg_alloc_attr);
+ for (const auto& dtype : output_types_) {
+ AllocatorAttributes ret_alloc_attrs;
+ if (DataTypeAlwaysOnHost(dtype)) {
+ ret_alloc_attrs.set_on_host(true);
+ }
+ opts.rets_alloc_attrs.push_back(ret_alloc_attrs);
+ }
if (opts.source_device != target_device_) {
opts.remote_execution = true;
}
@@ -233,6 +242,7 @@ class FunctionBufferingResource : public ResourceBase {
const string source_device_;
const string target_device_;
const std::vector<Tensor> func_args_;
+ const DataTypeVector output_types_;
FunctionLibraryRuntime::Handle handle_ GUARDED_BY(mu_);
std::deque<BufferElement> buffer_ GUARDED_BY(mu_);
std::deque<FunctionBufferCallback> requests_ GUARDED_BY(mu_);
@@ -250,6 +260,7 @@ class FunctionBufferResourceHandleOp : public OpKernel {
OP_REQUIRES_OK(ctx, ctx->GetAttr("buffer_size", &buffer_size_));
OP_REQUIRES_OK(ctx, ctx->GetAttr("container", &container_));
OP_REQUIRES_OK(ctx, ctx->GetAttr("shared_name", &name_));
+ OP_REQUIRES_OK(ctx, ctx->GetAttr("output_types", &output_types_));
}
~FunctionBufferResourceHandleOp() override {
@@ -269,18 +280,20 @@ class FunctionBufferResourceHandleOp : public OpKernel {
std::vector<Tensor> func_args;
func_args.push_back(*string_arg);
+ const string& source_device = ctx->device()->name();
+
// Obtain and canonicalize target_device.
const Tensor* target_arg;
OP_REQUIRES_OK(ctx, ctx->input("target_device", &target_arg));
- const string& target_device =
- DeviceNameUtils::CanonicalizeDeviceName(target_arg->scalar<string>()());
+ string target_device;
+ OP_REQUIRES_OK(ctx, DeviceNameUtils::CanonicalizeDeviceName(
+ target_arg->scalar<string>()(), source_device,
+ &target_device));
FunctionLibraryRuntime* lib = ctx->function_library();
OP_REQUIRES(ctx, lib != nullptr,
errors::Internal("No function library is provided."));
- const string& source_device = ctx->device()->name();
-
mutex_lock l(mu_);
if (!initialized_) {
OP_REQUIRES_OK(ctx, cinfo_.Init(ctx->resource_manager(), def()));
@@ -297,7 +310,7 @@ class FunctionBufferResourceHandleOp : public OpKernel {
this](FunctionBufferingResource** ptr) {
*ptr = new FunctionBufferingResource(
clone_lib, std::move(pflr), func_, buffer_size_,
- source_device, target_device, func_args);
+ source_device, target_device, func_args, output_types_);
return Status::OK();
}));
core::ScopedUnref s(buffer);
@@ -319,6 +332,7 @@ class FunctionBufferResourceHandleOp : public OpKernel {
int64 buffer_size_;
string container_;
string name_;
+ DataTypeVector output_types_;
};
REGISTER_KERNEL_BUILDER(Name("FunctionBufferingResource")
diff --git a/tensorflow/contrib/data/ops/dataset_ops.cc b/tensorflow/contrib/data/ops/dataset_ops.cc
index f48e96509a..8413fcaf87 100644
--- a/tensorflow/contrib/data/ops/dataset_ops.cc
+++ b/tensorflow/contrib/data/ops/dataset_ops.cc
@@ -104,6 +104,7 @@ REGISTER_OP("FunctionBufferingResource")
.Attr("container: string")
.Attr("f: func")
.Attr("buffer_size: int")
+ .Attr("output_types: list(type)")
.SetShapeFn(shape_inference::UnknownShape)
.Doc(R"doc(
Creates a resource that fills up a buffer by making function calls.
@@ -117,6 +118,7 @@ container: If non-empty, this resource is placed in the given container.
Otherwise, a default container is used.
shared_name: If non-empty, this resource will be shared under the given name
across multiple sessions.
+output_types: The type list for the return values.
)doc");
REGISTER_OP("FunctionBufferingResourceGetNext")
diff --git a/tensorflow/contrib/data/python/kernel_tests/BUILD b/tensorflow/contrib/data/python/kernel_tests/BUILD
index d81654e039..18457320b9 100644
--- a/tensorflow/contrib/data/python/kernel_tests/BUILD
+++ b/tensorflow/contrib/data/python/kernel_tests/BUILD
@@ -188,6 +188,7 @@ py_test(
"optonly",
],
deps = [
+ "//tensorflow/contrib/data/python/ops:batching",
"//tensorflow/contrib/data/python/ops:error_ops",
"//tensorflow/python:array_ops",
"//tensorflow/python:client_testlib",
@@ -228,9 +229,11 @@ cuda_py_test(
"//tensorflow/python:framework_test_lib",
"//tensorflow/python:function",
"//tensorflow/python:resource_variable_ops",
+ "//tensorflow/python/compat:compat",
"//tensorflow/python/data/ops:dataset_ops",
"//tensorflow/python/data/ops:iterator_ops",
],
+ tags = ["no_windows_gpu"],
)
py_test(
@@ -377,6 +380,7 @@ py_test(
"//tensorflow/python:sparse_tensor",
"//tensorflow/python/data/ops:dataset_ops",
"//third_party/py/numpy",
+ "@absl_py//absl/testing:parameterized",
],
)
@@ -466,6 +470,28 @@ py_test(
)
py_test(
+ name = "window_dataset_op_test",
+ size = "medium",
+ srcs = ["window_dataset_op_test.py"],
+ srcs_version = "PY2AND3",
+ tags = [
+ "no_pip",
+ ],
+ deps = [
+ "//tensorflow/contrib/data/python/ops:batching",
+ "//tensorflow/contrib/data/python/ops:grouping",
+ "//tensorflow/python:array_ops",
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:dtypes",
+ "//tensorflow/python:math_ops",
+ "//tensorflow/python:sparse_tensor",
+ "//tensorflow/python/data/ops:dataset_ops",
+ "//third_party/py/numpy",
+ "@absl_py//absl/testing:parameterized",
+ ],
+)
+
+py_test(
name = "writer_ops_test",
size = "small",
srcs = ["writer_ops_test.py"],
diff --git a/tensorflow/contrib/data/python/kernel_tests/batch_dataset_op_test.py b/tensorflow/contrib/data/python/kernel_tests/batch_dataset_op_test.py
index af97fbf87a..42adfd17f0 100644
--- a/tensorflow/contrib/data/python/kernel_tests/batch_dataset_op_test.py
+++ b/tensorflow/contrib/data/python/kernel_tests/batch_dataset_op_test.py
@@ -293,7 +293,7 @@ class BatchDatasetTest(test.TestCase, parameterized.TestCase):
ph2: np.arange(8).astype(np.int32)
})
with self.assertRaises(errors.InvalidArgumentError):
- print(sess.run(next_element))
+ sess.run(next_element)
# No 0th dimension (i.e. scalar value) for one component.
sess.run(
@@ -303,7 +303,7 @@ class BatchDatasetTest(test.TestCase, parameterized.TestCase):
ph2: 7
})
with self.assertRaises(errors.InvalidArgumentError):
- print(sess.run(next_element))
+ sess.run(next_element)
def testBatchAndDropRemainder(self):
components = (np.arange(7),
diff --git a/tensorflow/contrib/data/python/kernel_tests/bucketing_test.py b/tensorflow/contrib/data/python/kernel_tests/bucketing_test.py
index 5fc7e51d81..2022c1f2bd 100644
--- a/tensorflow/contrib/data/python/kernel_tests/bucketing_test.py
+++ b/tensorflow/contrib/data/python/kernel_tests/bucketing_test.py
@@ -616,7 +616,44 @@ class BucketBySequenceLength(test.TestCase):
batch_sizes = batch_sizes[:-1]
self.assertEqual(sum(batch_sizes_val), sum(batch_sizes))
self.assertEqual(sorted(batch_sizes), sorted(batch_sizes_val))
- self.assertEqual(sorted(boundaries), sorted(lengths_val))
+ self.assertEqual([boundary - 1 for boundary in sorted(boundaries)],
+ sorted(lengths_val))
+
+ def testPadToBoundaryNoExtraneousPadding(self):
+
+ boundaries = [3, 7, 11]
+ batch_sizes = [2, 2, 2, 2]
+ lengths = range(1, 11)
+
+ def element_gen():
+ for length in lengths:
+ yield ([1] * length,)
+
+ element_len = lambda element: array_ops.shape(element)[0]
+ dataset = dataset_ops.Dataset.from_generator(
+ element_gen, (dtypes.int64,), ([None],)).apply(
+ grouping.bucket_by_sequence_length(
+ element_len, boundaries, batch_sizes,
+ pad_to_bucket_boundary=True))
+ batch, = dataset.make_one_shot_iterator().get_next()
+
+ with self.test_session() as sess:
+ batches = []
+ for _ in range(5):
+ batches.append(sess.run(batch))
+ with self.assertRaises(errors.OutOfRangeError):
+ sess.run(batch)
+
+ self.assertAllEqual(batches[0], [[1, 0],
+ [1, 1]])
+ self.assertAllEqual(batches[1], [[1, 1, 1, 0, 0, 0],
+ [1, 1, 1, 1, 0, 0]])
+ self.assertAllEqual(batches[2], [[1, 1, 1, 1, 1, 0],
+ [1, 1, 1, 1, 1, 1]])
+ self.assertAllEqual(batches[3], [[1, 1, 1, 1, 1, 1, 1, 0, 0, 0],
+ [1, 1, 1, 1, 1, 1, 1, 1, 0, 0]])
+ self.assertAllEqual(batches[4], [[1, 1, 1, 1, 1, 1, 1, 1, 1, 0],
+ [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])
def testTupleElements(self):
diff --git a/tensorflow/contrib/data/python/kernel_tests/map_dataset_op_test.py b/tensorflow/contrib/data/python/kernel_tests/map_dataset_op_test.py
index 270a2297b4..b7025f3802 100644
--- a/tensorflow/contrib/data/python/kernel_tests/map_dataset_op_test.py
+++ b/tensorflow/contrib/data/python/kernel_tests/map_dataset_op_test.py
@@ -17,19 +17,28 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
+import hashlib
+import itertools
import os
+import time
import numpy as np
+from tensorflow.contrib.data.python.ops import batching
from tensorflow.contrib.data.python.ops import error_ops
+from tensorflow.core.protobuf import config_pb2
+from tensorflow.python.client import session
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import io_ops
+from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
from tensorflow.python.util import compat
+_NUMPY_RANDOM_SEED = 42
+
class MapDatasetTest(test.TestCase):
@@ -135,5 +144,125 @@ class MapDatasetTest(test.TestCase):
sess.run(get_next)
+class MapDatasetBenchmark(test.Benchmark):
+
+ # The purpose of this benchmark is to compare the performance of chaining vs
+ # fusing of the map and batch transformations across various configurations.
+ #
+ # NOTE: It is recommended to build the benchmark with
+ # `-c opt --copt=-mavx --copt=-mavx2 --copt=-mfma --copt=-gmlt`
+ # and execute it on a machine with at least 32 CPU cores.
+ def benchmarkMapAndBatch(self):
+
+ # Sequential pipeline configurations.
+ seq_elem_size_series = itertools.product([1], [1], [1, 2, 4, 8], [16])
+ seq_batch_size_series = itertools.product([1], [1], [1], [8, 16, 32, 64])
+
+ # Parallel pipeline configuration.
+ par_elem_size_series = itertools.product([32], [32], [1, 2, 4, 8], [256])
+ par_batch_size_series = itertools.product([32], [32], [1],
+ [128, 256, 512, 1024])
+ par_num_calls_series = itertools.product([8, 16, 32, 64], [32], [1], [512])
+ par_inter_op_series = itertools.product([32], [8, 16, 32, 64], [1], [512])
+
+ def name(method, label, num_calls, inter_op, element_size, batch_size):
+ return ("%s_id_%s_num_calls_%d_inter_op_%d_elem_size_%d_batch_size_%d" % (
+ method,
+ hashlib.sha1(label).hexdigest(),
+ num_calls,
+ inter_op,
+ element_size,
+ batch_size,
+ ))
+
+ def benchmark(label, series):
+
+ print("%s:" % label)
+ for num_calls, inter_op, element_size, batch_size in series:
+
+ num_iters = 1024 // (
+ (element_size * batch_size) // min(num_calls, inter_op))
+ k = 1024 * 1024
+ dataset = dataset_ops.Dataset.from_tensors((np.random.rand(
+ element_size, 4 * k), np.random.rand(4 * k, 1))).repeat()
+
+ chained_dataset = dataset.map(
+ math_ops.matmul,
+ num_parallel_calls=num_calls).batch(batch_size=batch_size)
+ chained_iterator = chained_dataset.make_one_shot_iterator()
+ chained_get_next = chained_iterator.get_next()
+
+ chained_deltas = []
+ with session.Session(
+ config=config_pb2.ConfigProto(
+ inter_op_parallelism_threads=inter_op,
+ use_per_session_threads=True)) as sess:
+ for _ in range(5):
+ sess.run(chained_get_next.op)
+ for _ in range(num_iters):
+ start = time.time()
+ sess.run(chained_get_next.op)
+ end = time.time()
+ chained_deltas.append(end - start)
+
+ fused_dataset = dataset = dataset.apply(
+ batching.map_and_batch(
+ math_ops.matmul,
+ num_parallel_calls=num_calls,
+ batch_size=batch_size))
+ fused_iterator = fused_dataset.make_one_shot_iterator()
+ fused_get_next = fused_iterator.get_next()
+
+ fused_deltas = []
+ with session.Session(
+ config=config_pb2.ConfigProto(
+ inter_op_parallelism_threads=inter_op,
+ use_per_session_threads=True)) as sess:
+
+ for _ in range(5):
+ sess.run(fused_get_next.op)
+ for _ in range(num_iters):
+ start = time.time()
+ sess.run(fused_get_next.op)
+ end = time.time()
+ fused_deltas.append(end - start)
+
+ print(
+ "batch size: %d, num parallel calls: %d, inter-op parallelism: %d, "
+ "element size: %d, num iters: %d\nchained wall time: %f (median), "
+ "%f (mean), %f (stddev), %f (min), %f (max)\n fused wall time: "
+ "%f (median), %f (mean), %f (stddev), %f (min), %f (max)\n "
+ "chained/fused: %.2fx (median), %.2fx (mean)" %
+ (batch_size, num_calls, inter_op, element_size, num_iters,
+ np.median(chained_deltas), np.mean(chained_deltas),
+ np.std(chained_deltas), np.min(chained_deltas),
+ np.max(chained_deltas), np.median(fused_deltas),
+ np.mean(fused_deltas), np.std(fused_deltas), np.min(fused_deltas),
+ np.max(fused_deltas),
+ np.median(chained_deltas) / np.median(fused_deltas),
+ np.mean(chained_deltas) / np.mean(fused_deltas)))
+
+ self.report_benchmark(
+ iters=num_iters,
+ wall_time=np.median(chained_deltas),
+ name=name("chained", label, num_calls, inter_op, element_size,
+ batch_size))
+
+ self.report_benchmark(
+ iters=num_iters,
+ wall_time=np.median(fused_deltas),
+ name=name("fused", label, num_calls, inter_op, element_size,
+ batch_size))
+
+ print("")
+
+ np.random.seed(_NUMPY_RANDOM_SEED)
+ benchmark("Sequential element size evaluation", seq_elem_size_series)
+ benchmark("Sequential batch size evaluation", seq_batch_size_series)
+ benchmark("Parallel element size evaluation", par_elem_size_series)
+ benchmark("Parallel batch size evaluation", par_batch_size_series)
+ benchmark("Transformation parallelism evaluation", par_num_calls_series)
+ benchmark("Threadpool size evaluation", par_inter_op_series)
+
if __name__ == "__main__":
test.main()
diff --git a/tensorflow/contrib/data/python/kernel_tests/optimize_dataset_op_test.py b/tensorflow/contrib/data/python/kernel_tests/optimize_dataset_op_test.py
index e35be8a23f..21eebccd11 100644
--- a/tensorflow/contrib/data/python/kernel_tests/optimize_dataset_op_test.py
+++ b/tensorflow/contrib/data/python/kernel_tests/optimize_dataset_op_test.py
@@ -35,8 +35,6 @@ class OptimizeDatasetTest(test.TestCase):
with self.test_session() as sess:
graph = graph_pb2.GraphDef().FromString(
sess.run(dataset._as_serialized_graph()))
- self.assertTrue(
- all([node.op != "MapAndBatchDatasetV2" for node in graph.node]))
self.assertAllEqual([x * x for x in range(10)], sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
@@ -50,8 +48,6 @@ class OptimizeDatasetTest(test.TestCase):
with self.test_session() as sess:
graph = graph_pb2.GraphDef().FromString(
sess.run(dataset._as_serialized_graph()))
- self.assertTrue(
- all([node.op != "MapAndBatchDatasetV2" for node in graph.node]))
self.assertAllEqual([x * x for x in range(10)], sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
@@ -65,12 +61,21 @@ class OptimizeDatasetTest(test.TestCase):
with self.test_session() as sess:
graph = graph_pb2.GraphDef().FromString(
sess.run(dataset._as_serialized_graph()))
- self.assertTrue(
- any([node.op == "MapAndBatchDatasetV2" for node in graph.node]))
self.assertAllEqual([x * x for x in range(10)], sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
+ def testFunctionLibraryDefinitionModification(self):
+ dataset = dataset_ops.Dataset.from_tensors(0).map(lambda x: x).apply(
+ optimization.optimize(["_test_only_function_rename"]))
+ iterator = dataset.make_one_shot_iterator()
+ get_next = iterator.get_next()
+
+ with self.test_session() as sess:
+ with self.assertRaisesRegexp(errors.NotFoundError,
+ "Function .* is not defined."):
+ sess.run(get_next)
+
if __name__ == "__main__":
test.main()
diff --git a/tensorflow/contrib/data/python/kernel_tests/prefetching_ops_test.py b/tensorflow/contrib/data/python/kernel_tests/prefetching_ops_test.py
index b08132cd72..82543b1039 100644
--- a/tensorflow/contrib/data/python/kernel_tests/prefetching_ops_test.py
+++ b/tensorflow/contrib/data/python/kernel_tests/prefetching_ops_test.py
@@ -21,6 +21,7 @@ import threading
from tensorflow.contrib.data.python.ops import prefetching_ops
from tensorflow.core.protobuf import config_pb2
+from tensorflow.python.compat import compat
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.ops import iterator_ops
from tensorflow.python.framework import constant_op
@@ -68,6 +69,7 @@ class PrefetchingKernelsOpsTest(test.TestCase):
with ops.device(device1):
buffer_resource_handle = prefetching_ops.function_buffering_resource(
f=_remote_fn,
+ output_types=[dtypes.float32],
target_device=target,
string_arg=ds_iterator_handle,
buffer_size=3,
@@ -85,8 +87,7 @@ class PrefetchingKernelsOpsTest(test.TestCase):
return (prefetch_op, reset_op, destroy_op)
def _prefetch_fn_helper_one_shot(self, buffer_name, device0, device1):
- worker_config = config_pb2.ConfigProto()
- worker_config.device_count["CPU"] = 2
+ worker_config = config_pb2.ConfigProto(device_count={"CPU": 2})
ds, ds_iterator = self._create_ds_and_iterator(device0, initializable=False)
prefetch_op, _, destroy_op = self._create_ops(ds, ds_iterator, buffer_name,
@@ -125,8 +126,7 @@ class PrefetchingKernelsOpsTest(test.TestCase):
"/job:localhost/replica:0/task:0/gpu:0")
def testReinitialization(self):
- worker_config = config_pb2.ConfigProto()
- worker_config.device_count["CPU"] = 2
+ worker_config = config_pb2.ConfigProto(device_count={"CPU": 2})
device0 = "/job:localhost/replica:0/task:0/cpu:0"
device1 = "/job:localhost/replica:0/task:0/cpu:1"
@@ -166,8 +166,7 @@ class PrefetchingKernelsOpsTest(test.TestCase):
sess.run(destroy_op)
def testReinitializationOutOfRange(self):
- worker_config = config_pb2.ConfigProto()
- worker_config.device_count["CPU"] = 2
+ worker_config = config_pb2.ConfigProto(device_count={"CPU": 2})
device0 = "/job:localhost/replica:0/task:0/cpu:0"
device1 = "/job:localhost/replica:0/task:0/cpu:1"
@@ -201,6 +200,49 @@ class PrefetchingKernelsOpsTest(test.TestCase):
sess.run(destroy_op)
+ def testStringsGPU(self):
+ if not test_util.is_gpu_available():
+ self.skipTest("No GPU available")
+
+ device0 = "/job:localhost/replica:0/task:0/cpu:0"
+ device1 = "/job:localhost/replica:0/task:0/gpu:0"
+
+ ds = dataset_ops.Dataset.from_tensor_slices(["a", "b", "c"])
+ ds_iterator = ds.make_one_shot_iterator()
+ ds_iterator_handle = ds_iterator.string_handle()
+
+ @function.Defun(dtypes.string)
+ def _remote_fn(h):
+ remote_iterator = iterator_ops.Iterator.from_string_handle(
+ h, ds.output_types, ds.output_shapes)
+ return remote_iterator.get_next()
+
+ target = constant_op.constant(device0)
+ with ops.device(device1):
+ buffer_resource_handle = prefetching_ops.function_buffering_resource(
+ f=_remote_fn,
+ output_types=[dtypes.string],
+ target_device=target,
+ string_arg=ds_iterator_handle,
+ buffer_size=3,
+ shared_name="strings")
+
+ with ops.device(device1):
+ prefetch_op = prefetching_ops.function_buffering_resource_get_next(
+ function_buffer_resource=buffer_resource_handle,
+ output_types=[dtypes.string])
+ destroy_op = resource_variable_ops.destroy_resource_op(
+ buffer_resource_handle, ignore_lookup_error=True)
+
+ with self.test_session() as sess:
+ self.assertEqual([b"a"], sess.run(prefetch_op))
+ self.assertEqual([b"b"], sess.run(prefetch_op))
+ self.assertEqual([b"c"], sess.run(prefetch_op))
+ with self.assertRaises(errors.OutOfRangeError):
+ sess.run(prefetch_op)
+
+ sess.run(destroy_op)
+
class PrefetchToDeviceTest(test.TestCase):
@@ -227,14 +269,43 @@ class PrefetchToDeviceTest(test.TestCase):
self.assertEqual(dtypes.int64, next_element.dtype)
self.assertEqual([], next_element.shape)
- worker_config = config_pb2.ConfigProto()
- worker_config.device_count["CPU"] = 2
+ worker_config = config_pb2.ConfigProto(device_count={"CPU": 2})
with self.test_session(config=worker_config) as sess:
for i in range(10):
self.assertEqual(i, sess.run(next_element))
with self.assertRaises(errors.OutOfRangeError):
sess.run(next_element)
+ def testPrefetchToSameDevice(self):
+ host_dataset = dataset_ops.Dataset.range(10)
+ device_dataset = host_dataset.apply(
+ prefetching_ops.prefetch_to_device(
+ "/job:localhost/replica:0/task:0/device:CPU:0"))
+
+ # NOTE(mrry): This device block creates the "host" dataset and iterator on
+ # /cpu:0, and ensures that the prefetching is across devices. In typical use
+ # this would not be necessary, because the GPU device would not support any
+ # of the dataset-related ops.
+ with ops.device("/cpu:0"):
+ iterator = device_dataset.make_one_shot_iterator()
+
+ self.assertEqual(host_dataset.output_types, device_dataset.output_types)
+ self.assertEqual(host_dataset.output_types, iterator.output_types)
+ self.assertEqual(host_dataset.output_shapes, device_dataset.output_shapes)
+ self.assertEqual(host_dataset.output_shapes, iterator.output_shapes)
+ self.assertEqual(host_dataset.output_classes, device_dataset.output_classes)
+ self.assertEqual(host_dataset.output_classes, iterator.output_classes)
+
+ next_element = iterator.get_next()
+ self.assertEqual(dtypes.int64, next_element.dtype)
+ self.assertEqual([], next_element.shape)
+
+ with self.test_session() as sess:
+ for i in range(10):
+ self.assertEqual(i, sess.run(next_element))
+ with self.assertRaises(errors.OutOfRangeError):
+ sess.run(next_element)
+
def testPrefetchDictToDevice(self):
host_dataset = dataset_ops.Dataset.range(10).map(lambda x: {"a": x})
device_dataset = host_dataset.apply(
@@ -258,8 +329,7 @@ class PrefetchToDeviceTest(test.TestCase):
self.assertEqual(dtypes.int64, next_element["a"].dtype)
self.assertEqual([], next_element["a"].shape)
- worker_config = config_pb2.ConfigProto()
- worker_config.device_count["CPU"] = 2
+ worker_config = config_pb2.ConfigProto(device_count={"CPU": 2})
with self.test_session(config=worker_config) as sess:
for i in range(10):
self.assertEqual({"a": i}, sess.run(next_element))
@@ -292,8 +362,7 @@ class PrefetchToDeviceTest(test.TestCase):
next_element = iterator.get_next()
self.assertEqual(dtypes.int64, next_element.dtype)
- worker_config = config_pb2.ConfigProto()
- worker_config.device_count["CPU"] = 2
+ worker_config = config_pb2.ConfigProto(device_count={"CPU": 2})
with self.test_session(config=worker_config) as sess:
for i in range(10):
actual = sess.run(next_element)
@@ -343,8 +412,7 @@ class PrefetchToDeviceTest(test.TestCase):
self.assertEqual(dtypes.int64, next_element.dtype)
self.assertEqual([], next_element.shape)
- worker_config = config_pb2.ConfigProto()
- worker_config.device_count["CPU"] = 2
+ worker_config = config_pb2.ConfigProto(device_count={"CPU": 2})
with self.test_session(config=worker_config) as sess:
sess.run(iterator.initializer)
for i in range(5):
@@ -377,5 +445,467 @@ class PrefetchToDeviceTest(test.TestCase):
sess.run(next_element)
+class CopyToDeviceTest(test.TestCase):
+
+ def testCopyToDevice(self):
+ host_dataset = dataset_ops.Dataset.range(10)
+ device_dataset = host_dataset.apply(
+ prefetching_ops.copy_to_device("/cpu:1"))
+
+ with ops.device("/cpu:1"):
+ iterator = device_dataset.make_one_shot_iterator()
+ next_element = iterator.get_next()
+
+ self.assertEqual(host_dataset.output_types, device_dataset.output_types)
+ self.assertEqual(host_dataset.output_types, iterator.output_types)
+ self.assertEqual(host_dataset.output_shapes, device_dataset.output_shapes)
+ self.assertEqual(host_dataset.output_shapes, iterator.output_shapes)
+ self.assertEqual(host_dataset.output_classes, device_dataset.output_classes)
+ self.assertEqual(host_dataset.output_classes, iterator.output_classes)
+
+ self.assertEqual(dtypes.int64, next_element.dtype)
+ self.assertEqual([], next_element.shape)
+
+ worker_config = config_pb2.ConfigProto(device_count={"CPU": 2})
+ with self.test_session(config=worker_config) as sess:
+ for i in range(10):
+ self.assertEqual(i, sess.run(next_element))
+ with self.assertRaises(errors.OutOfRangeError):
+ sess.run(next_element)
+
+ def testCopyToDeviceInt32(self):
+ host_dataset = dataset_ops.Dataset.from_tensors([0, 1, 2, 3])
+ device_dataset = host_dataset.apply(
+ prefetching_ops.copy_to_device("/cpu:1"))
+
+ with ops.device("/cpu:1"):
+ iterator = device_dataset.make_one_shot_iterator()
+ next_element = iterator.get_next()
+
+ self.assertEqual(host_dataset.output_types, device_dataset.output_types)
+ self.assertEqual(host_dataset.output_types, iterator.output_types)
+ self.assertEqual(host_dataset.output_shapes, device_dataset.output_shapes)
+ self.assertEqual(host_dataset.output_shapes, iterator.output_shapes)
+ self.assertEqual(host_dataset.output_classes, device_dataset.output_classes)
+ self.assertEqual(host_dataset.output_classes, iterator.output_classes)
+
+ self.assertEqual(dtypes.int32, next_element.dtype)
+ self.assertEqual((4,), next_element.shape)
+
+ worker_config = config_pb2.ConfigProto(device_count={"CPU": 2})
+ with self.test_session(config=worker_config) as sess:
+ self.assertAllEqual([0, 1, 2, 3], sess.run(next_element))
+ with self.assertRaises(errors.OutOfRangeError):
+ sess.run(next_element)
+
+ def testCopyToSameDevice(self):
+ host_dataset = dataset_ops.Dataset.range(10)
+ device_dataset = host_dataset.apply(
+ prefetching_ops.copy_to_device("/cpu:0"))
+
+ with ops.device("/cpu:0"):
+ iterator = device_dataset.make_one_shot_iterator()
+ next_element = iterator.get_next()
+
+ self.assertEqual(host_dataset.output_types, device_dataset.output_types)
+ self.assertEqual(host_dataset.output_types, iterator.output_types)
+ self.assertEqual(host_dataset.output_shapes, device_dataset.output_shapes)
+ self.assertEqual(host_dataset.output_shapes, iterator.output_shapes)
+ self.assertEqual(host_dataset.output_classes, device_dataset.output_classes)
+ self.assertEqual(host_dataset.output_classes, iterator.output_classes)
+
+ self.assertEqual(dtypes.int64, next_element.dtype)
+ self.assertEqual([], next_element.shape)
+
+ worker_config = config_pb2.ConfigProto(device_count={"CPU": 2})
+ with self.test_session(config=worker_config) as sess:
+ for i in range(10):
+ self.assertEqual(i, sess.run(next_element))
+ with self.assertRaises(errors.OutOfRangeError):
+ sess.run(next_element)
+
+ def testCopyToDeviceWithPrefetch(self):
+ host_dataset = dataset_ops.Dataset.range(10)
+ device_dataset = host_dataset.apply(
+ prefetching_ops.copy_to_device("/cpu:1")).prefetch(1)
+
+ with ops.device("/cpu:1"):
+ iterator = device_dataset.make_one_shot_iterator()
+ next_element = iterator.get_next()
+
+ self.assertEqual(host_dataset.output_types, device_dataset.output_types)
+ self.assertEqual(host_dataset.output_types, iterator.output_types)
+ self.assertEqual(host_dataset.output_shapes, device_dataset.output_shapes)
+ self.assertEqual(host_dataset.output_shapes, iterator.output_shapes)
+ self.assertEqual(host_dataset.output_classes, device_dataset.output_classes)
+ self.assertEqual(host_dataset.output_classes, iterator.output_classes)
+
+ self.assertEqual(dtypes.int64, next_element.dtype)
+ self.assertEqual([], next_element.shape)
+
+ worker_config = config_pb2.ConfigProto(device_count={"CPU": 2})
+ with self.test_session(config=worker_config) as sess:
+ for i in range(10):
+ self.assertEqual(i, sess.run(next_element))
+ with self.assertRaises(errors.OutOfRangeError):
+ sess.run(next_element)
+
+ def testCopyDictToDevice(self):
+ host_dataset = dataset_ops.Dataset.range(10).map(lambda x: {"a": x})
+ device_dataset = host_dataset.apply(
+ prefetching_ops.copy_to_device("/cpu:1"))
+
+ with ops.device("/cpu:1"):
+ iterator = device_dataset.make_one_shot_iterator()
+ next_element = iterator.get_next()
+
+ self.assertEqual(host_dataset.output_types, device_dataset.output_types)
+ self.assertEqual(host_dataset.output_types, iterator.output_types)
+ self.assertEqual(host_dataset.output_shapes, device_dataset.output_shapes)
+ self.assertEqual(host_dataset.output_shapes, iterator.output_shapes)
+ self.assertEqual(host_dataset.output_classes, device_dataset.output_classes)
+ self.assertEqual(host_dataset.output_classes, iterator.output_classes)
+
+ self.assertEqual(dtypes.int64, next_element["a"].dtype)
+ self.assertEqual([], next_element["a"].shape)
+
+ worker_config = config_pb2.ConfigProto(device_count={"CPU": 2})
+ with self.test_session(config=worker_config) as sess:
+ for i in range(10):
+ self.assertEqual({"a": i}, sess.run(next_element))
+ with self.assertRaises(errors.OutOfRangeError):
+ sess.run(next_element)
+
+ def testCopyDictToDeviceWithPrefetch(self):
+ host_dataset = dataset_ops.Dataset.range(10).map(lambda x: {"a": x})
+ device_dataset = host_dataset.apply(
+ prefetching_ops.copy_to_device("/cpu:1")).prefetch(1)
+
+ with ops.device("/cpu:1"):
+ iterator = device_dataset.make_one_shot_iterator()
+ next_element = iterator.get_next()
+
+ self.assertEqual(host_dataset.output_types, device_dataset.output_types)
+ self.assertEqual(host_dataset.output_types, iterator.output_types)
+ self.assertEqual(host_dataset.output_shapes, device_dataset.output_shapes)
+ self.assertEqual(host_dataset.output_shapes, iterator.output_shapes)
+ self.assertEqual(host_dataset.output_classes, device_dataset.output_classes)
+ self.assertEqual(host_dataset.output_classes, iterator.output_classes)
+
+ self.assertEqual(dtypes.int64, next_element["a"].dtype)
+ self.assertEqual([], next_element["a"].shape)
+
+ worker_config = config_pb2.ConfigProto(device_count={"CPU": 2})
+ with self.test_session(config=worker_config) as sess:
+ for i in range(10):
+ self.assertEqual({"a": i}, sess.run(next_element))
+ with self.assertRaises(errors.OutOfRangeError):
+ sess.run(next_element)
+
+ def testCopySparseTensorsToDevice(self):
+
+ def make_tensor(i):
+ return sparse_tensor.SparseTensorValue(
+ indices=[[0, 0]], values=(i * [1]), dense_shape=[2, 2])
+
+ host_dataset = dataset_ops.Dataset.range(10).map(make_tensor)
+
+ device_dataset = host_dataset.apply(
+ prefetching_ops.copy_to_device("/cpu:1"))
+
+ with ops.device("/cpu:1"):
+ iterator = device_dataset.make_one_shot_iterator()
+ next_element = iterator.get_next()
+
+ self.assertEqual(host_dataset.output_types, device_dataset.output_types)
+ self.assertEqual(host_dataset.output_types, iterator.output_types)
+ self.assertEqual(host_dataset.output_shapes, device_dataset.output_shapes)
+ self.assertEqual(host_dataset.output_shapes, iterator.output_shapes)
+ self.assertEqual(host_dataset.output_classes, device_dataset.output_classes)
+ self.assertEqual(host_dataset.output_classes, iterator.output_classes)
+
+ self.assertEqual(dtypes.int64, next_element.dtype)
+
+ worker_config = config_pb2.ConfigProto(device_count={"CPU": 2})
+ with self.test_session(config=worker_config) as sess:
+ for i in range(10):
+ actual = sess.run(next_element)
+ self.assertAllEqual([i], actual.values)
+ self.assertAllEqual([[0, 0]], actual.indices)
+ self.assertAllEqual([2, 2], actual.dense_shape)
+ with self.assertRaises(errors.OutOfRangeError):
+ sess.run(next_element)
+
+ def testCopySparseTensorsToDeviceWithPrefetch(self):
+
+ def make_tensor(i):
+ return sparse_tensor.SparseTensorValue(
+ indices=[[0, 0]], values=(i * [1]), dense_shape=[2, 2])
+
+ host_dataset = dataset_ops.Dataset.range(10).map(make_tensor)
+
+ device_dataset = host_dataset.apply(
+ prefetching_ops.copy_to_device("/cpu:1")).prefetch(1)
+
+ with ops.device("/cpu:1"):
+ iterator = device_dataset.make_one_shot_iterator()
+ next_element = iterator.get_next()
+
+ self.assertEqual(host_dataset.output_types, device_dataset.output_types)
+ self.assertEqual(host_dataset.output_types, iterator.output_types)
+ self.assertEqual(host_dataset.output_shapes, device_dataset.output_shapes)
+ self.assertEqual(host_dataset.output_shapes, iterator.output_shapes)
+ self.assertEqual(host_dataset.output_classes, device_dataset.output_classes)
+ self.assertEqual(host_dataset.output_classes, iterator.output_classes)
+
+ self.assertEqual(dtypes.int64, next_element.dtype)
+
+ worker_config = config_pb2.ConfigProto(device_count={"CPU": 2})
+ with self.test_session(config=worker_config) as sess:
+ for i in range(10):
+ actual = sess.run(next_element)
+ self.assertAllEqual([i], actual.values)
+ self.assertAllEqual([[0, 0]], actual.indices)
+ self.assertAllEqual([2, 2], actual.dense_shape)
+ with self.assertRaises(errors.OutOfRangeError):
+ sess.run(next_element)
+
+ def testCopyToDeviceGpu(self):
+ if not test_util.is_gpu_available():
+ self.skipTest("No GPU available")
+
+ host_dataset = dataset_ops.Dataset.range(10)
+ device_dataset = host_dataset.apply(
+ prefetching_ops.copy_to_device("/gpu:0"))
+
+ with ops.device("/gpu:0"):
+ iterator = device_dataset.make_initializable_iterator()
+ next_element = iterator.get_next()
+
+ with self.test_session() as sess:
+ sess.run(iterator.initializer)
+ for i in range(10):
+ self.assertEqual(i, sess.run(next_element))
+ with self.assertRaises(errors.OutOfRangeError):
+ sess.run(next_element)
+
+ def testCopyToDeviceGpuWithPrefetch(self):
+ if not test_util.is_gpu_available():
+ self.skipTest("No GPU available")
+
+ host_dataset = dataset_ops.Dataset.range(10)
+ device_dataset = host_dataset.apply(
+ prefetching_ops.copy_to_device("/gpu:0")).prefetch(1)
+
+ with ops.device("/gpu:0"):
+ iterator = device_dataset.make_initializable_iterator()
+ next_element = iterator.get_next()
+
+ with self.test_session() as sess:
+ sess.run(iterator.initializer)
+ for i in range(10):
+ self.assertEqual(i, sess.run(next_element))
+ with self.assertRaises(errors.OutOfRangeError):
+ sess.run(next_element)
+
+ def testCopyToDeviceGpuInt32(self):
+ if not test_util.is_gpu_available():
+ self.skipTest("No GPU available")
+
+ host_dataset = dataset_ops.Dataset.from_tensors([0, 1, 2, 3])
+ device_dataset = host_dataset.apply(
+ prefetching_ops.copy_to_device("/gpu:0"))
+
+ with ops.device("/gpu:0"):
+ iterator = device_dataset.make_initializable_iterator()
+ next_element = iterator.get_next()
+
+ with self.test_session() as sess:
+ sess.run(iterator.initializer)
+ self.assertAllEqual([0, 1, 2, 3], sess.run(next_element))
+ with self.assertRaises(errors.OutOfRangeError):
+ sess.run(next_element)
+
+ def testCopyToDeviceGpuInt32AndPrefetch(self):
+ if not test_util.is_gpu_available():
+ self.skipTest("No GPU available")
+
+ host_dataset = dataset_ops.Dataset.from_tensors([0, 1, 2, 3])
+ device_dataset = host_dataset.apply(
+ prefetching_ops.copy_to_device("/gpu:0")).prefetch(1)
+
+ with ops.device("/gpu:0"):
+ iterator = device_dataset.make_initializable_iterator()
+ next_element = iterator.get_next()
+
+ with self.test_session() as sess:
+ sess.run(iterator.initializer)
+ self.assertAllEqual([0, 1, 2, 3], sess.run(next_element))
+ with self.assertRaises(errors.OutOfRangeError):
+ sess.run(next_element)
+
+ def testCopyToDeviceGpuStrings(self):
+ if not test_util.is_gpu_available():
+ self.skipTest("No GPU available")
+
+ host_dataset = dataset_ops.Dataset.from_tensors(["a", "b", "c"])
+ device_dataset = host_dataset.apply(
+ prefetching_ops.copy_to_device("/gpu:0"))
+
+ with ops.device("/gpu:0"):
+ iterator = device_dataset.make_initializable_iterator()
+ next_element = iterator.get_next()
+
+ with self.test_session() as sess:
+ sess.run(iterator.initializer)
+ self.assertAllEqual([b"a", b"b", b"c"], sess.run(next_element))
+ with self.assertRaises(errors.OutOfRangeError):
+ sess.run(next_element)
+
+ def testCopyToDeviceGpuStringsAndPrefetch(self):
+ if not test_util.is_gpu_available():
+ self.skipTest("No GPU available")
+
+ host_dataset = dataset_ops.Dataset.from_tensors(["a", "b", "c"])
+ device_dataset = host_dataset.apply(
+ prefetching_ops.copy_to_device("/gpu:0"))
+
+ with ops.device("/gpu:0"):
+ iterator = device_dataset.make_initializable_iterator()
+ next_element = iterator.get_next()
+
+ with self.test_session() as sess:
+ sess.run(iterator.initializer)
+ self.assertAllEqual([b"a", b"b", b"c"], sess.run(next_element))
+ with self.assertRaises(errors.OutOfRangeError):
+ sess.run(next_element)
+
+ def testCopyToDevicePingPongCPUGPU(self):
+ if not test_util.is_gpu_available():
+ self.skipTest("No GPU available")
+
+ with compat.forward_compatibility_horizon(2018, 8, 4):
+ host_dataset = dataset_ops.Dataset.range(10)
+ device_dataset = host_dataset.apply(
+ prefetching_ops.copy_to_device("/gpu:0", source_device="/cpu:0"))
+ back_to_cpu_dataset = device_dataset.apply(
+ prefetching_ops.copy_to_device("/cpu:0", source_device="/gpu:0"))
+
+ with ops.device("/cpu:0"):
+ iterator = back_to_cpu_dataset.make_initializable_iterator()
+ next_element = iterator.get_next()
+
+ with self.test_session() as sess:
+ sess.run(iterator.initializer)
+ for i in range(10):
+ self.assertEqual(i, sess.run(next_element))
+ with self.assertRaises(errors.OutOfRangeError):
+ sess.run(next_element)
+
+ def testCopyToDeviceWithReInit(self):
+ host_dataset = dataset_ops.Dataset.range(10)
+ device_dataset = host_dataset.apply(
+ prefetching_ops.copy_to_device("/cpu:1"))
+
+ with ops.device("/cpu:1"):
+ iterator = device_dataset.make_initializable_iterator()
+ next_element = iterator.get_next()
+
+ self.assertEqual(host_dataset.output_types, device_dataset.output_types)
+ self.assertEqual(host_dataset.output_types, iterator.output_types)
+ self.assertEqual(host_dataset.output_shapes, device_dataset.output_shapes)
+ self.assertEqual(host_dataset.output_shapes, iterator.output_shapes)
+ self.assertEqual(host_dataset.output_classes, device_dataset.output_classes)
+ self.assertEqual(host_dataset.output_classes, iterator.output_classes)
+
+ self.assertEqual(dtypes.int64, next_element.dtype)
+ self.assertEqual([], next_element.shape)
+
+ worker_config = config_pb2.ConfigProto(device_count={"CPU": 2})
+ with self.test_session(config=worker_config) as sess:
+ sess.run(iterator.initializer)
+ for i in range(5):
+ self.assertEqual(i, sess.run(next_element))
+ sess.run(iterator.initializer)
+ for i in range(10):
+ self.assertEqual(i, sess.run(next_element))
+ with self.assertRaises(errors.OutOfRangeError):
+ sess.run(next_element)
+
+ def testCopyToDeviceWithReInitAndPrefetch(self):
+ host_dataset = dataset_ops.Dataset.range(10)
+ device_dataset = host_dataset.apply(
+ prefetching_ops.copy_to_device("/cpu:1")).prefetch(1)
+
+ with ops.device("/cpu:1"):
+ iterator = device_dataset.make_initializable_iterator()
+ next_element = iterator.get_next()
+
+ self.assertEqual(host_dataset.output_types, device_dataset.output_types)
+ self.assertEqual(host_dataset.output_types, iterator.output_types)
+ self.assertEqual(host_dataset.output_shapes, device_dataset.output_shapes)
+ self.assertEqual(host_dataset.output_shapes, iterator.output_shapes)
+ self.assertEqual(host_dataset.output_classes, device_dataset.output_classes)
+ self.assertEqual(host_dataset.output_classes, iterator.output_classes)
+
+ self.assertEqual(dtypes.int64, next_element.dtype)
+ self.assertEqual([], next_element.shape)
+
+ worker_config = config_pb2.ConfigProto(device_count={"CPU": 2})
+ with self.test_session(config=worker_config) as sess:
+ sess.run(iterator.initializer)
+ for i in range(5):
+ self.assertEqual(i, sess.run(next_element))
+ sess.run(iterator.initializer)
+ for i in range(10):
+ self.assertEqual(i, sess.run(next_element))
+ with self.assertRaises(errors.OutOfRangeError):
+ sess.run(next_element)
+
+ def testCopyToDeviceGpuWithReInit(self):
+ if not test_util.is_gpu_available():
+ self.skipTest("No GPU available")
+
+ host_dataset = dataset_ops.Dataset.range(10)
+ device_dataset = host_dataset.apply(
+ prefetching_ops.copy_to_device("/gpu:0"))
+
+ with ops.device("/gpu:0"):
+ iterator = device_dataset.make_initializable_iterator()
+ next_element = iterator.get_next()
+
+ with self.test_session() as sess:
+ sess.run(iterator.initializer)
+ for i in range(5):
+ self.assertEqual(i, sess.run(next_element))
+ sess.run(iterator.initializer)
+ for i in range(10):
+ self.assertEqual(i, sess.run(next_element))
+ with self.assertRaises(errors.OutOfRangeError):
+ sess.run(next_element)
+
+ def testCopyToDeviceGpuWithReInitAndPrefetch(self):
+ if not test_util.is_gpu_available():
+ self.skipTest("No GPU available")
+
+ host_dataset = dataset_ops.Dataset.range(10)
+ device_dataset = host_dataset.apply(
+ prefetching_ops.copy_to_device("/gpu:0")).prefetch(1)
+
+ with ops.device("/gpu:0"):
+ iterator = device_dataset.make_initializable_iterator()
+ next_element = iterator.get_next()
+
+ with self.test_session() as sess:
+ sess.run(iterator.initializer)
+ for i in range(5):
+ self.assertEqual(i, sess.run(next_element))
+ sess.run(iterator.initializer)
+ for i in range(10):
+ self.assertEqual(i, sess.run(next_element))
+ with self.assertRaises(errors.OutOfRangeError):
+ sess.run(next_element)
+
+
if __name__ == "__main__":
test.main()
diff --git a/tensorflow/contrib/data/python/kernel_tests/slide_dataset_op_test.py b/tensorflow/contrib/data/python/kernel_tests/slide_dataset_op_test.py
index 5590a4bf78..8b2f846494 100644
--- a/tensorflow/contrib/data/python/kernel_tests/slide_dataset_op_test.py
+++ b/tensorflow/contrib/data/python/kernel_tests/slide_dataset_op_test.py
@@ -17,6 +17,7 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
+from absl.testing import parameterized
import numpy as np
from tensorflow.contrib.data.python.ops import sliding
@@ -29,28 +30,45 @@ from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
-class SlideDatasetTest(test.TestCase):
-
- def testSlideDataset(self):
- """Test an dataset that maps a TF function across its input elements."""
+class SlideDatasetTest(test.TestCase, parameterized.TestCase):
+
+ @parameterized.parameters(
+ (20, 14, 7, 1),
+ (20, 17, 9, 1),
+ (20, 14, 14, 1),
+ (20, 10, 14, 1),
+ (20, 14, 19, 1),
+ (20, 4, 1, 2),
+ (20, 2, 1, 6),
+ (20, 4, 7, 2),
+ (20, 2, 7, 6),
+ (1, 10, 4, 1),
+ (0, 10, 4, 1),
+ )
+ def testSlideDataset(self, count, window_size, window_shift, window_stride):
+ """Tests a dataset that slides a window its input elements."""
components = (np.arange(7),
np.array([[1, 2, 3]]) * np.arange(7)[:, np.newaxis],
np.array(37.0) * np.arange(7))
- count = array_ops.placeholder(dtypes.int64, shape=[])
- window_size = array_ops.placeholder(dtypes.int64, shape=[])
- stride = array_ops.placeholder(dtypes.int64, shape=[])
+ count_t = array_ops.placeholder(dtypes.int64, shape=[])
+ window_size_t = array_ops.placeholder(dtypes.int64, shape=[])
+ window_shift_t = array_ops.placeholder(dtypes.int64, shape=[])
+ window_stride_t = array_ops.placeholder(dtypes.int64, shape=[])
def _map_fn(x, y, z):
return math_ops.square(x), math_ops.square(y), math_ops.square(z)
# The pipeline is TensorSliceDataset -> MapDataset(square_3) ->
- # RepeatDataset(count) -> _SlideDataset(window_size, stride).
- iterator = (dataset_ops.Dataset.from_tensor_slices(components)
- .map(_map_fn)
- .repeat(count)
- .apply(sliding.sliding_window_batch(window_size, stride))
- .make_initializable_iterator())
+ # RepeatDataset(count) ->
+ # _SlideDataset(window_size, window_shift, window_stride).
+ iterator = (
+ dataset_ops.Dataset.from_tensor_slices(components).map(_map_fn)
+ .repeat(count).apply(
+ sliding.sliding_window_batch(
+ window_size=window_size_t,
+ window_shift=window_shift_t,
+ window_stride=window_stride_t)).make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
@@ -58,90 +76,126 @@ class SlideDatasetTest(test.TestCase):
[t.shape.as_list() for t in get_next])
with self.test_session() as sess:
- # stride < window_size.
- # Slide over a finite input, where the window_size divides the
- # total number of elements.
- sess.run(init_op, feed_dict={count: 20, window_size: 14, stride: 7})
- # Same formula with convolution layer.
- num_batches = (20 * 7 - 14) // 7 + 1
- for i in range(num_batches):
- result = sess.run(get_next)
- for component, result_component in zip(components, result):
- for j in range(14):
- self.assertAllEqual(component[(i*7 + j) % 7]**2,
- result_component[j])
- with self.assertRaises(errors.OutOfRangeError):
- sess.run(get_next)
- # Slide over a finite input, where the window_size does not
- # divide the total number of elements.
- sess.run(init_op, feed_dict={count: 20, window_size: 17, stride: 9})
- num_batches = (20 * 7 - 17) // 9 + 1
+ sess.run(
+ init_op,
+ feed_dict={
+ count_t: count,
+ window_size_t: window_size,
+ window_shift_t: window_shift,
+ window_stride_t: window_stride
+ })
+ num_batches = (count * 7 - (
+ (window_size - 1) * window_stride + 1)) // window_shift + 1
for i in range(num_batches):
result = sess.run(get_next)
for component, result_component in zip(components, result):
- for j in range(17):
- self.assertAllEqual(component[(i*9 + j) % 7]**2,
- result_component[j])
+ for j in range(window_size):
+ self.assertAllEqual(
+ component[(i * window_shift + j * window_stride) % 7]**2,
+ result_component[j])
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
- # stride == window_size.
- sess.run(init_op, feed_dict={count: 20, window_size: 14, stride: 14})
- num_batches = 20 * 7 // 14
- for i in range(num_batches):
- result = sess.run(get_next)
- for component, result_component in zip(components, result):
- for j in range(14):
- self.assertAllEqual(component[(i*14 + j) % 7]**2,
- result_component[j])
- with self.assertRaises(errors.OutOfRangeError):
- sess.run(get_next)
+ @parameterized.parameters(
+ (20, 14, 7, 1),
+ (20, 17, 9, 1),
+ (20, 14, 14, 1),
+ (20, 10, 14, 1),
+ (20, 14, 19, 1),
+ (20, 4, 1, 2),
+ (20, 2, 1, 6),
+ (20, 4, 7, 2),
+ (20, 2, 7, 6),
+ (1, 10, 4, 1),
+ (0, 10, 4, 1),
+ )
+ def testSlideDatasetDeprecated(self, count, window_size, stride,
+ window_stride):
+ """Tests a dataset that slides a window its input elements."""
+ components = (np.arange(7),
+ np.array([[1, 2, 3]]) * np.arange(7)[:, np.newaxis],
+ np.array(37.0) * np.arange(7))
- # stride > window_size.
- sess.run(init_op, feed_dict={count: 20, window_size: 10, stride: 14})
- num_batches = 20 * 7 // 14
- for i in range(num_batches):
- result = sess.run(get_next)
- for component, result_component in zip(components, result):
- for j in range(10):
- self.assertAllEqual(component[(i*14 + j) % 7]**2,
- result_component[j])
- with self.assertRaises(errors.OutOfRangeError):
- sess.run(get_next)
- # Drop the last batch which is smaller than window_size.
- sess.run(init_op, feed_dict={count: 20, window_size: 14, stride: 19})
- num_batches = (20 * 7 - 7) // 19 # = 19 * 7 // 19
- for i in range(num_batches):
- result = sess.run(get_next)
- for component, result_component in zip(components, result):
- for j in range(14):
- self.assertAllEqual(component[(i*19 + j) % 7]**2,
- result_component[j])
- with self.assertRaises(errors.OutOfRangeError):
- sess.run(get_next)
+ count_t = array_ops.placeholder(dtypes.int64, shape=[])
+ window_size_t = array_ops.placeholder(dtypes.int64, shape=[])
+ stride_t = array_ops.placeholder(dtypes.int64, shape=[])
+ window_stride_t = array_ops.placeholder(dtypes.int64, shape=[])
- # Slide over a finite input, which is less than window_size,
- # should fail straight away.
- sess.run(init_op, feed_dict={count: 1, window_size: 10, stride: 4})
- with self.assertRaises(errors.OutOfRangeError):
- sess.run(get_next)
+ def _map_fn(x, y, z):
+ return math_ops.square(x), math_ops.square(y), math_ops.square(z)
- sess.run(init_op, feed_dict={count: 1, window_size: 10, stride: 8})
- with self.assertRaises(errors.OutOfRangeError):
- sess.run(get_next)
+ # The pipeline is TensorSliceDataset -> MapDataset(square_3) ->
+ # RepeatDataset(count) -> _SlideDataset(window_size, stride, window_stride).
+ iterator = (
+ dataset_ops.Dataset.from_tensor_slices(components).map(_map_fn)
+ .repeat(count).apply(
+ sliding.sliding_window_batch(
+ window_size=window_size_t,
+ stride=stride_t,
+ window_stride=window_stride_t)).make_initializable_iterator())
+ init_op = iterator.initializer
+ get_next = iterator.get_next()
- # Slide over an empty input should fail straight away.
- sess.run(init_op, feed_dict={count: 0, window_size: 8, stride: 4})
+ self.assertEqual([[None] + list(c.shape[1:]) for c in components],
+ [t.shape.as_list() for t in get_next])
+
+ with self.test_session() as sess:
+ sess.run(
+ init_op,
+ feed_dict={
+ count_t: count,
+ window_size_t: window_size,
+ stride_t: stride,
+ window_stride_t: window_stride
+ })
+ num_batches = (count * 7 - (
+ (window_size - 1) * window_stride + 1)) // stride + 1
+ for i in range(num_batches):
+ result = sess.run(get_next)
+ for component, result_component in zip(components, result):
+ for j in range(window_size):
+ self.assertAllEqual(
+ component[(i * stride + j * window_stride) % 7]**2,
+ result_component[j])
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
- # Empty window_size should be an initialization time error.
- with self.assertRaises(errors.InvalidArgumentError):
- sess.run(init_op, feed_dict={count: 14, window_size: 0, stride: 0})
+ @parameterized.parameters(
+ (14, 0, 3, 1),
+ (14, 3, 0, 1),
+ (14, 3, 3, 0),
+ )
+ def testSlideDatasetInvalid(self, count, window_size, window_shift,
+ window_stride):
+ count_t = array_ops.placeholder(dtypes.int64, shape=[])
+ window_size_t = array_ops.placeholder(dtypes.int64, shape=[])
+ window_shift_t = array_ops.placeholder(dtypes.int64, shape=[])
+ window_stride_t = array_ops.placeholder(dtypes.int64, shape=[])
+
+ iterator = (
+ dataset_ops.Dataset.range(10).map(lambda x: x).repeat(count_t).apply(
+ sliding.sliding_window_batch(
+ window_size=window_size_t,
+ window_shift=window_shift_t,
+ window_stride=window_stride_t)).make_initializable_iterator())
+ init_op = iterator.initializer
- # Invalid stride should be an initialization time error.
+ with self.test_session() as sess:
with self.assertRaises(errors.InvalidArgumentError):
- sess.run(init_op, feed_dict={count: 14, window_size: 3, stride: 0})
+ sess.run(
+ init_op,
+ feed_dict={
+ count_t: count,
+ window_size_t: window_size,
+ window_shift_t: window_shift,
+ window_stride_t: window_stride
+ })
+
+ def testSlideDatasetValueError(self):
+ with self.assertRaises(ValueError):
+ dataset_ops.Dataset.range(10).map(lambda x: x).apply(
+ sliding.sliding_window_batch(
+ window_size=1, stride=1, window_shift=1, window_stride=1))
def assertSparseValuesEqual(self, a, b):
self.assertAllEqual(a.indices, b.indices)
@@ -155,7 +209,8 @@ class SlideDatasetTest(test.TestCase):
indices=[[0]], values=(i * [1]), dense_shape=[1])
iterator = dataset_ops.Dataset.range(10).map(_sparse).apply(
- sliding.sliding_window_batch(5, 3)).make_initializable_iterator()
+ sliding.sliding_window_batch(
+ window_size=5, window_shift=3)).make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
@@ -183,7 +238,8 @@ class SlideDatasetTest(test.TestCase):
dense_shape=[i])
iterator = dataset_ops.Dataset.range(10).map(_sparse).apply(
- sliding.sliding_window_batch(5, 3)).make_initializable_iterator()
+ sliding.sliding_window_batch(
+ window_size=5, window_shift=3)).make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
@@ -213,11 +269,11 @@ class SlideDatasetTest(test.TestCase):
return sparse_tensor.SparseTensorValue(
indices=[[0]], values=(i * [1]), dense_shape=[1])
- iterator = (dataset_ops.Dataset.range(10)
- .map(_sparse)
- .apply(sliding.sliding_window_batch(4, 2))
- .apply(sliding.sliding_window_batch(3, 1))
- .make_initializable_iterator())
+ iterator = (
+ dataset_ops.Dataset.range(10).map(_sparse).apply(
+ sliding.sliding_window_batch(window_size=4, window_shift=2)).apply(
+ sliding.sliding_window_batch(window_size=3, window_shift=1))
+ .make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
@@ -226,9 +282,9 @@ class SlideDatasetTest(test.TestCase):
# Slide: 1st batch.
actual = sess.run(get_next)
expected = sparse_tensor.SparseTensorValue(
- indices=[[0, 0, 0], [0, 1, 0], [0, 2, 0], [0, 3, 0],
- [1, 0, 0], [1, 1, 0], [1, 2, 0], [1, 3, 0],
- [2, 0, 0], [2, 1, 0], [2, 2, 0], [2, 3, 0]],
+ indices=[[0, 0, 0], [0, 1, 0], [0, 2, 0], [0, 3, 0], [1, 0, 0],
+ [1, 1, 0], [1, 2, 0], [1, 3, 0], [2, 0, 0], [2, 1, 0],
+ [2, 2, 0], [2, 3, 0]],
values=[0, 1, 2, 3, 2, 3, 4, 5, 4, 5, 6, 7],
dense_shape=[3, 4, 1])
self.assertTrue(sparse_tensor.is_sparse(actual))
@@ -236,9 +292,9 @@ class SlideDatasetTest(test.TestCase):
# Slide: 2nd batch.
actual = sess.run(get_next)
expected = sparse_tensor.SparseTensorValue(
- indices=[[0, 0, 0], [0, 1, 0], [0, 2, 0], [0, 3, 0],
- [1, 0, 0], [1, 1, 0], [1, 2, 0], [1, 3, 0],
- [2, 0, 0], [2, 1, 0], [2, 2, 0], [2, 3, 0]],
+ indices=[[0, 0, 0], [0, 1, 0], [0, 2, 0], [0, 3, 0], [1, 0, 0],
+ [1, 1, 0], [1, 2, 0], [1, 3, 0], [2, 0, 0], [2, 1, 0],
+ [2, 2, 0], [2, 3, 0]],
values=[2, 3, 4, 5, 4, 5, 6, 7, 6, 7, 8, 9],
dense_shape=[3, 4, 1])
self.assertTrue(sparse_tensor.is_sparse(actual))
@@ -253,10 +309,11 @@ class SlideDatasetTest(test.TestCase):
yield [4.0, 5.0, 6.0]
yield [7.0, 8.0, 9.0, 10.0]
- iterator = (dataset_ops.Dataset.from_generator(generator, dtypes.float32,
- output_shapes=[None])
- .apply(sliding.sliding_window_batch(3, 1))
- .make_initializable_iterator())
+ iterator = (
+ dataset_ops.Dataset.from_generator(
+ generator, dtypes.float32, output_shapes=[None]).apply(
+ sliding.sliding_window_batch(window_size=3, window_shift=1))
+ .make_initializable_iterator())
next_element = iterator.get_next()
with self.test_session() as sess:
diff --git a/tensorflow/contrib/data/python/kernel_tests/window_dataset_op_test.py b/tensorflow/contrib/data/python/kernel_tests/window_dataset_op_test.py
new file mode 100644
index 0000000000..33d95d6754
--- /dev/null
+++ b/tensorflow/contrib/data/python/kernel_tests/window_dataset_op_test.py
@@ -0,0 +1,523 @@
+# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""Tests for the experimental input pipeline ops."""
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+from absl.testing import parameterized
+import numpy as np
+
+from tensorflow.contrib.data.python.ops import batching
+from tensorflow.contrib.data.python.ops import grouping
+from tensorflow.python.data.ops import dataset_ops
+from tensorflow.python.framework import dtypes
+from tensorflow.python.framework import errors
+from tensorflow.python.framework import sparse_tensor
+from tensorflow.python.ops import array_ops
+from tensorflow.python.ops import sparse_ops
+from tensorflow.python.platform import test
+
+
+class WindowDatasetTest(test.TestCase, parameterized.TestCase):
+
+ def _structuredDataset(self, structure, shape, dtype):
+ if structure is None:
+ return dataset_ops.Dataset.from_tensors(
+ array_ops.zeros(shape, dtype=dtype))
+ else:
+ return dataset_ops.Dataset.zip(
+ tuple([
+ self._structuredDataset(substructure, shape, dtype)
+ for substructure in structure
+ ]))
+
+ def _structuredElement(self, structure, shape, dtype):
+ if structure is None:
+ return array_ops.zeros(shape, dtype=dtype)
+ else:
+ return tuple([
+ self._structuredElement(substructure, shape, dtype)
+ for substructure in structure
+ ])
+
+ def _assertEqual(self, xs, ys):
+ self.assertEqual(type(xs), type(ys))
+ if isinstance(xs, tuple) and isinstance(ys, tuple):
+ self.assertEqual(len(xs), len(ys))
+ for x, y in zip(xs, ys):
+ self._assertEqual(x, y)
+ elif isinstance(xs, np.ndarray) and isinstance(ys, np.ndarray):
+ self.assertAllEqual(xs, ys)
+ else:
+ self.assertEqual(xs, ys)
+
+ @parameterized.parameters(
+ (None, np.int32([]), dtypes.bool),
+ (None, np.int32([]), dtypes.int32),
+ (None, np.int32([]), dtypes.float32),
+ (None, np.int32([]), dtypes.string),
+ (None, np.int32([2]), dtypes.int32),
+ (None, np.int32([2, 2]), dtypes.int32),
+ ((None, None, None), np.int32([]), dtypes.int32),
+ ((None, (None, None)), np.int32([]), dtypes.int32),
+ )
+ def testWindowDatasetFlatMap(self, structure, shape, dtype):
+ """Tests windowing by chaining it with flat map.
+
+ Args:
+ structure: the input structure
+ shape: the input shape
+ dtype: the input data type
+ """
+
+ def fn(*args):
+ if len(args) == 1 and not isinstance(args[0], tuple):
+ return args[0]
+ return dataset_ops.Dataset.zip(
+ tuple([fn(*arg) if isinstance(arg, tuple) else arg for arg in args]))
+
+ dataset = self._structuredDataset(structure, shape, dtype).apply(
+ grouping.window_dataset(5)).flat_map(fn)
+ get_next = dataset.make_one_shot_iterator().get_next()
+ with self.test_session() as sess:
+ expected = sess.run(self._structuredElement(structure, shape, dtype))
+ actual = sess.run(get_next)
+ self._assertEqual(expected, actual)
+
+ @parameterized.parameters(
+ (None, np.int32([]), dtypes.bool),
+ (None, np.int32([]), dtypes.int32),
+ (None, np.int32([]), dtypes.float32),
+ (None, np.int32([]), dtypes.string),
+ (None, np.int32([2]), dtypes.int32),
+ (None, np.int32([2, 2]), dtypes.int32),
+ ((None, None, None), np.int32([]), dtypes.int32),
+ ((None, (None, None)), np.int32([]), dtypes.int32),
+ )
+ def testWindowDatasetBatchDense(self, structure, shape, dtype):
+ """Tests batching of dense tensor windows.
+
+ Args:
+ structure: the input structure
+ shape: the input shape
+ dtype: the input data type
+ """
+
+ def fn(*args):
+ if len(args) == 1 and not isinstance(args[0], tuple):
+ return batching.batch_window(args[0])
+
+ return tuple([
+ fn(*arg) if isinstance(arg, tuple) else batching.batch_window(arg)
+ for arg in args
+ ])
+
+ dataset = self._structuredDataset(structure, shape, dtype).repeat(5).apply(
+ grouping.window_dataset(5)).apply(grouping._map_x_dataset(fn))
+ get_next = dataset.make_one_shot_iterator().get_next()
+ with self.test_session() as sess:
+ expected = sess.run(
+ self._structuredElement(structure, np.concatenate(
+ ([5], shape), axis=0), dtype))
+ actual = sess.run(get_next)
+ self._assertEqual(expected, actual)
+
+ @parameterized.parameters(
+ (np.int32([]),),
+ (np.int32([1]),),
+ (np.int32([1, 2, 3]),),
+ )
+ def testWindowDatasetBatchDenseDynamicShape(self, shape):
+ """Tests batching of dynamically shaped dense tensor windows.
+
+ Args:
+ shape: the input shape
+ """
+
+ shape_t = array_ops.placeholder(dtypes.int32)
+ dataset = dataset_ops.Dataset.from_tensors(
+ array_ops.zeros(shape_t)).repeat(5).apply(
+ grouping.window_dataset(5)).apply(
+ grouping._map_x_dataset(batching.batch_window))
+ iterator = dataset.make_initializable_iterator()
+ init_op = iterator.initializer
+ get_next = iterator.get_next()
+ with self.test_session() as sess:
+ sess.run(init_op, {shape_t: shape})
+ expected = sess.run(
+ self._structuredElement(None, np.concatenate(([5], shape), axis=0),
+ dtypes.int32))
+ actual = sess.run(get_next)
+ self._assertEqual(expected, actual)
+
+ def _make_dense_to_sparse_fn(self, is_scalar):
+
+ def dense_to_sparse_scalar(tensor):
+ indices = [[]]
+ values = array_ops.expand_dims(tensor, 0)
+ shape = []
+ return sparse_tensor.SparseTensorValue(indices, values, shape)
+
+ def dense_to_sparse_non_scalar(tensor):
+ indices = array_ops.where(array_ops.ones_like(tensor, dtype=dtypes.bool))
+ values = array_ops.gather_nd(tensor, indices)
+ shape = array_ops.shape(tensor, out_type=dtypes.int64)
+ return sparse_tensor.SparseTensorValue(indices, values, shape)
+
+ if is_scalar:
+ return dense_to_sparse_scalar
+ return dense_to_sparse_non_scalar
+
+ def _structuredSparseDataset(self, structure, shape, dtype):
+ dense_to_sparse = self._make_dense_to_sparse_fn(len(shape) == 0) # pylint: disable=g-explicit-length-test
+ if structure is None:
+ return dataset_ops.Dataset.from_tensors(
+ dense_to_sparse(array_ops.zeros(shape, dtype=dtype)))
+ else:
+ return dataset_ops.Dataset.zip(
+ tuple([
+ self._structuredSparseDataset(substructure, shape, dtype)
+ for substructure in structure
+ ]))
+
+ def _structuredSparseElement(self, structure, shape, dtype):
+ dense_to_sparse = self._make_dense_to_sparse_fn(len(shape) == 0) # pylint: disable=g-explicit-length-test
+ if structure is None:
+ return dense_to_sparse(array_ops.zeros(shape, dtype=dtype))
+ else:
+ return tuple([
+ self._structuredSparseElement(substructure, shape, dtype)
+ for substructure in structure
+ ])
+
+ @parameterized.parameters(
+ (None, np.int32([]), dtypes.bool),
+ (None, np.int32([]), dtypes.int32),
+ (None, np.int32([]), dtypes.float32),
+ (None, np.int32([]), dtypes.string),
+ (None, np.int32([2]), dtypes.int32),
+ (None, np.int32([2, 2]), dtypes.int32),
+ ((None, None, None), np.int32([]), dtypes.int32),
+ ((None, (None, None)), np.int32([]), dtypes.int32),
+ )
+ def testWindowDatasetBatchSparse(self, structure, shape, dtype):
+ """Tests batching of sparse tensor windows.
+
+ Args:
+ structure: the input structure
+ shape: the input shape
+ dtype: the input data type
+ """
+
+ def fn(*args):
+ if len(args) == 1 and not isinstance(args[0], tuple):
+ return batching.batch_window(args[0])
+
+ return tuple([
+ fn(*arg) if isinstance(arg, tuple) else batching.batch_window(arg)
+ for arg in args
+ ])
+
+ dataset = self._structuredSparseDataset(
+ structure, shape, dtype).repeat(5).apply(
+ grouping.window_dataset(5)).apply(grouping._map_x_dataset(fn))
+ get_next = dataset.make_one_shot_iterator().get_next()
+ with self.test_session() as sess:
+ expected = sess.run(
+ self._structuredSparseElement(structure,
+ np.concatenate(([5], shape), axis=0),
+ dtype))
+ actual = sess.run(get_next)
+ self._assertEqual(expected, actual)
+
+ @parameterized.parameters(
+ (np.int32([]),),
+ (np.int32([1]),),
+ (np.int32([1, 2, 3]),),
+ )
+ def testWindowDatasetBatchSparseDynamicShape(self, shape):
+ """Tests batching of dynamically shaped sparse tensor windows.
+
+ Args:
+ shape: the input shape
+ """
+
+ shape_t = array_ops.placeholder(dtypes.int32)
+ dataset = dataset_ops.Dataset.from_tensors(array_ops.zeros(shape_t)).map(
+ self._make_dense_to_sparse_fn(len(shape) == 0)).repeat(5).apply( # pylint: disable=g-explicit-length-test
+ grouping.window_dataset(5)).apply(
+ grouping._map_x_dataset(batching.batch_window))
+ iterator = dataset.make_initializable_iterator()
+ init_op = iterator.initializer
+ get_next = iterator.get_next()
+ with self.test_session() as sess:
+ sess.run(init_op, {shape_t: shape})
+ expected = sess.run(
+ self._structuredSparseElement(None,
+ np.concatenate(([5], shape), axis=0),
+ dtypes.int32))
+ actual = sess.run(get_next)
+ self._assertEqual(expected, actual)
+
+ def _structuredRaggedDataset(self, structure, shapes, dtype):
+
+ if structure is None:
+ return dataset_ops.Dataset.from_tensor_slices(shapes).map(
+ lambda shape: array_ops.zeros(shape, dtype=dtype))
+ else:
+ return dataset_ops.Dataset.zip(
+ tuple([
+ self._structuredRaggedDataset(substructure, shapes, dtype)
+ for substructure in structure
+ ]))
+
+ @parameterized.parameters(
+ (None, np.int32([[1], [2], [3]]), dtypes.bool, [-1]),
+ (None, np.int32([[1], [2], [3]]), dtypes.int32, [-1]),
+ (None, np.int32([[1], [2], [3]]), dtypes.float32, [-1]),
+ (None, np.int32([[1], [2], [3]]), dtypes.string, [-1]),
+ (None, np.int32([[1, 3], [2, 2], [3, 1]]), dtypes.int32, [-1, -1]),
+ (None, np.int32([[3, 1, 3], [1, 3, 1]]), dtypes.int32, [-1, -1, -1]),
+ ((None, None, None), np.int32([[1], [2], [3]]), dtypes.int32, [-1]),
+ ((None, (None, None)), np.int32([[1], [2], [3]]), dtypes.int32, [-1]),
+ (None, np.int32([[1], [2], [3]]), dtypes.int32, [-1]),
+ (None, np.int32([[1], [2], [3]]), dtypes.int32, np.int32([10])),
+ )
+ def testWindowDatasetPaddedBatchDense(self, structure, shapes, dtype,
+ padded_shape):
+ """Tests padded batching of dense tensor windows.
+
+ Args:
+ structure: the input structure
+ shapes: the input shapes
+ dtype: the input data type
+ padded_shape: the shape to pad the output to
+ """
+
+ def fn(*args):
+ if len(args) == 1 and not isinstance(args[0], tuple):
+ return batching.padded_batch_window(args[0], padded_shape)
+
+ return tuple([
+ fn(*arg) if isinstance(arg, tuple) else batching.padded_batch_window(
+ arg, padded_shape) for arg in args
+ ])
+
+ dataset = self._structuredRaggedDataset(structure, shapes, dtype).apply(
+ grouping.window_dataset(len(shapes))).apply(
+ grouping._map_x_dataset(fn))
+ get_next = dataset.make_one_shot_iterator().get_next()
+ with self.test_session() as sess:
+ expected_shape = np.maximum(np.amax(shapes, axis=0), padded_shape)
+ expected = sess.run(
+ self._structuredElement(
+ structure,
+ np.concatenate((np.int32([len(shapes)]), expected_shape)), dtype))
+ actual = sess.run(get_next)
+ self._assertEqual(expected, actual)
+
+ @parameterized.parameters(
+ (np.int32([[1], [2], [3]]), [-1]),
+ (np.int32([[1, 3], [2, 2], [3, 1]]), [-1, -1]),
+ (np.int32([[3, 1, 3], [1, 3, 1]]), [-1, -1, -1]),
+ )
+ def testWindowDatasetPaddedBatchDenseDynamicShape(self, shapes, padded_shape):
+ """Tests padded batching of dynamically shaped dense tensor windows.
+
+ Args:
+ shapes: the input shapes
+ padded_shape: the shape to pad the output to
+ """
+
+ shapes_t = array_ops.placeholder(dtypes.int32)
+ dataset = dataset_ops.Dataset.from_tensor_slices(shapes_t).map(
+ lambda shape: array_ops.zeros(shape, dtype=dtypes.int32)).apply(
+ grouping.window_dataset(len(shapes))).apply(
+ grouping._map_x_dataset(
+ lambda x: batching.padded_batch_window(x, padded_shape)))
+ iterator = dataset.make_initializable_iterator()
+ init_op = iterator.initializer
+ get_next = iterator.get_next()
+ with self.test_session() as sess:
+ sess.run(init_op, {shapes_t: shapes})
+ expected_shape = np.maximum(np.amax(shapes, axis=0), padded_shape)
+ expected = sess.run(
+ self._structuredElement(
+ None, np.concatenate((np.int32([len(shapes)]), expected_shape)),
+ dtypes.int32))
+ actual = sess.run(get_next)
+ self._assertEqual(expected, actual)
+
+ @parameterized.parameters(
+ (np.int32([[1]]), np.int32([0])),
+ (np.int32([[10], [20]]), np.int32([15])),
+ )
+ def testWindowDatasetPaddedBatchDenseInvalid(self, shapes, padded_shape):
+ """Tests invalid padded batching of dense tensor windows.
+
+ Args:
+ shapes: the input shapes
+ padded_shape: the shape to pad the output to
+ """
+
+ dataset = dataset_ops.Dataset.from_tensor_slices(shapes).map(
+ lambda shape: array_ops.zeros(shape, dtype=dtypes.int32)).apply(
+ grouping.window_dataset(len(shapes))).apply(
+ grouping._map_x_dataset(
+ lambda x: batching.padded_batch_window(x, padded_shape)))
+ get_next = dataset.make_one_shot_iterator().get_next()
+ with self.test_session() as sess:
+ with self.assertRaises(errors.InvalidArgumentError):
+ sess.run(get_next)
+
+ def _structuredRaggedSparseDataset(self, structure, shapes, dtype):
+
+ def map_fn(shape):
+ dense_to_sparse = self._make_dense_to_sparse_fn(False)
+ return dense_to_sparse(array_ops.zeros(shape, dtype=dtype))
+
+ if structure is None:
+ return dataset_ops.Dataset.from_tensor_slices(shapes).map(map_fn)
+ else:
+ return dataset_ops.Dataset.zip(
+ tuple([
+ self._structuredRaggedSparseDataset(substructure, shapes, dtype)
+ for substructure in structure
+ ]))
+
+ def _structuredRaggedSparseElement(self, structure, shapes, dtype,
+ padded_shape):
+ if structure is None:
+ dense_shape = np.maximum(np.amax(shapes, axis=0), padded_shape)
+ values = []
+ for shape in shapes:
+ dense_to_sparse = self._make_dense_to_sparse_fn(len(shape) == 0) # pylint: disable=g-explicit-length-test
+ sparse = dense_to_sparse(array_ops.zeros(shape, dtype=dtype))
+ padded_sparse = sparse_tensor.SparseTensor(sparse.indices,
+ sparse.values, dense_shape)
+ reshaped_sparse = sparse_ops.sparse_reshape(
+ padded_sparse,
+ array_ops.concat([np.array([1], dtype=np.int64), dense_shape], 0))
+ values.append(reshaped_sparse)
+ return sparse_ops.sparse_concat(0, values)
+ else:
+ return tuple([
+ self._structuredRaggedSparseElement(substructure, shapes, dtype,
+ padded_shape)
+ for substructure in structure
+ ])
+
+ @parameterized.parameters(
+ (None, np.int64([[1], [2], [3]]), dtypes.bool, [-1]),
+ (None, np.int64([[1], [2], [3]]), dtypes.int32, [-1]),
+ (None, np.int64([[1], [2], [3]]), dtypes.float32, [-1]),
+ (None, np.int64([[1], [2], [3]]), dtypes.string, [-1]),
+ (None, np.int64([[1, 3], [2, 2], [3, 1]]), dtypes.int32, [-1, -1]),
+ (None, np.int64([[1, 3, 1], [3, 1, 3]]), dtypes.int32, [-1, -1, -1]),
+ ((None, None, None), np.int64([[1], [2], [3]]), dtypes.int32, [-1]),
+ ((None, (None, None)), np.int64([[1], [2], [3]]), dtypes.int32, [-1]),
+ (None, np.int64([[1], [2], [3]]), dtypes.int32, [-1]),
+ (None, np.int64([[1], [2], [3]]), dtypes.int32, np.int64([10])),
+ )
+ def testWindowDatasetPaddedBatchSparse(self, structure, shapes, dtype,
+ padded_shape):
+ """Tests padded batching of sparse tensor windows.
+
+ Args:
+ structure: the input structure
+ shapes: the input shapes
+ dtype: the input data type
+ padded_shape: the shape to pad the output to
+ """
+
+ def fn(*args):
+ if len(args) == 1 and not isinstance(args[0], tuple):
+ return batching.padded_batch_window(args[0], padded_shape)
+
+ return tuple([
+ fn(*arg) if isinstance(arg, tuple) else batching.padded_batch_window(
+ arg, padded_shape) for arg in args
+ ])
+
+ dataset = self._structuredRaggedSparseDataset(
+ structure, shapes, dtype).apply(grouping.window_dataset(
+ len(shapes))).apply(grouping._map_x_dataset(fn))
+ get_next = dataset.make_one_shot_iterator().get_next()
+ with self.test_session() as sess:
+ expected = sess.run(
+ self._structuredRaggedSparseElement(structure, shapes, dtype,
+ padded_shape))
+ actual = sess.run(get_next)
+ self._assertEqual(expected, actual)
+
+ @parameterized.parameters(
+ (np.int64([[1], [2], [3]]), [-1]),
+ (np.int64([[1, 3], [2, 2], [3, 1]]), [-1, -1]),
+ (np.int64([[3, 1, 3], [1, 3, 1]]), [-1, -1, -1]),
+ )
+ def testWindowDatasetPaddedBatchSparseDynamicShape(self, shapes,
+ padded_shape):
+ """Tests padded batching of dynamically shaped sparse tensor windows.
+
+ Args:
+ shapes: the input shapes
+ padded_shape: the shape to pad the output to
+ """
+
+ shapes_t = array_ops.placeholder(dtypes.int32)
+ dataset = dataset_ops.Dataset.from_tensor_slices(shapes_t).map(
+ lambda shape: array_ops.zeros(shape, dtype=dtypes.int32)).map(
+ self._make_dense_to_sparse_fn(False)
+ ).apply(grouping.window_dataset(len(shapes))).apply(
+ grouping._map_x_dataset(
+ lambda x: batching.padded_batch_window(x, padded_shape)))
+ iterator = dataset.make_initializable_iterator()
+ init_op = iterator.initializer
+ get_next = iterator.get_next()
+ with self.test_session() as sess:
+ sess.run(init_op, {shapes_t: shapes})
+ expected = sess.run(
+ self._structuredRaggedSparseElement(None, shapes, dtypes.int32,
+ padded_shape))
+ actual = sess.run(get_next)
+ self._assertEqual(expected, actual)
+
+ @parameterized.parameters(
+ (np.int64([[1]]), [0]),
+ (np.int64([[10], [20]]), [15]),
+ )
+ def testWindowDatasetPaddedBatchSparseInvalid(self, shapes, padded_shape):
+ """Tests invalid padded batching of sparse tensor windows.
+
+ Args:
+ shapes: the input shapes
+ padded_shape: the shape to pad the output to
+ """
+
+ dataset = dataset_ops.Dataset.from_tensor_slices(shapes).map(
+ lambda shape: array_ops.zeros(shape, dtype=dtypes.int32)).map(
+ self._make_dense_to_sparse_fn(False)
+ ).apply(grouping.window_dataset(len(shapes))).apply(
+ grouping._map_x_dataset(
+ lambda x: batching.padded_batch_window(x, padded_shape)))
+ get_next = dataset.make_one_shot_iterator().get_next()
+ with self.test_session() as sess:
+ with self.assertRaises(errors.InvalidArgumentError):
+ sess.run(get_next)
+
+
+if __name__ == "__main__":
+ test.main()
diff --git a/tensorflow/contrib/data/python/ops/BUILD b/tensorflow/contrib/data/python/ops/BUILD
index 0240814562..160d7fe22a 100644
--- a/tensorflow/contrib/data/python/ops/BUILD
+++ b/tensorflow/contrib/data/python/ops/BUILD
@@ -115,6 +115,8 @@ py_library(
srcs = ["batching.py"],
srcs_version = "PY2AND3",
deps = [
+ ":get_single_element",
+ ":grouping",
"//tensorflow/contrib/framework:framework_py",
"//tensorflow/python:array_ops",
"//tensorflow/python:dataset_ops_gen",
diff --git a/tensorflow/contrib/data/python/ops/batching.py b/tensorflow/contrib/data/python/ops/batching.py
index 5708d47c20..a4914f4cde 100644
--- a/tensorflow/contrib/data/python/ops/batching.py
+++ b/tensorflow/contrib/data/python/ops/batching.py
@@ -17,22 +17,135 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
+import numpy as np
+
+from tensorflow.contrib.data.python.ops import get_single_element
+from tensorflow.contrib.data.python.ops import grouping
from tensorflow.contrib.framework import with_shape
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.util import convert
from tensorflow.python.data.util import nest
from tensorflow.python.data.util import sparse
+from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
+from tensorflow.python.ops import check_ops
+from tensorflow.python.ops import control_flow_ops
+from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import gen_dataset_ops
from tensorflow.python.ops import math_ops
+from tensorflow.python.ops import sparse_ops
from tensorflow.python.util import deprecation
+def batch_window(dataset):
+ """Batches a window of tensors.
+
+ Args:
+ dataset: the input dataset.
+
+ Returns:
+ A `Tensor` representing the batch of the entire input dataset.
+ """
+ if isinstance(dataset.output_classes, tuple):
+ raise TypeError("Input dataset expected to have a single component")
+ if dataset.output_classes is ops.Tensor:
+ return _batch_dense_window(dataset)
+ elif dataset.output_classes is sparse_tensor.SparseTensor:
+ return _batch_sparse_window(dataset)
+ else:
+ raise TypeError("Unsupported dataset type: %s" % dataset.output_classes)
+
+
+def _batch_dense_window(dataset):
+ """Batches a window of dense tensors."""
+
+ def key_fn(_):
+ return np.int64(0)
+
+ def shape_init_fn(_):
+ return array_ops.shape(first_element)
+
+ def shape_reduce_fn(state, value):
+ check_ops.assert_equal(state, array_ops.shape(value))
+ return state
+
+ def finalize_fn(state):
+ return state
+
+ if dataset.output_shapes.is_fully_defined():
+ shape = dataset.output_shapes
+ else:
+ first_element = get_single_element.get_single_element(dataset.take(1))
+ shape_reducer = grouping.Reducer(shape_init_fn, shape_reduce_fn,
+ finalize_fn)
+ shape = get_single_element.get_single_element(
+ dataset.apply(grouping.group_by_reducer(key_fn, shape_reducer)))
+
+ def batch_init_fn(_):
+ batch_shape = array_ops.concat([[0], shape], 0)
+ return gen_array_ops.empty(batch_shape, dtype=dataset.output_types)
+
+ def batch_reduce_fn(state, value):
+ return array_ops.concat([state, [value]], 0)
+
+ batch_reducer = grouping.Reducer(batch_init_fn, batch_reduce_fn, finalize_fn)
+ return get_single_element.get_single_element(
+ dataset.apply(grouping.group_by_reducer(key_fn, batch_reducer)))
+
+
+def _batch_sparse_window(dataset):
+ """Batches a window of sparse tensors."""
+
+ def key_fn(_):
+ return np.int64(0)
+
+ def shape_init_fn(_):
+ return first_element.dense_shape
+
+ def shape_reduce_fn(state, value):
+ check_ops.assert_equal(state, value.dense_shape)
+ return state
+
+ def finalize_fn(state):
+ return state
+
+ if dataset.output_shapes.is_fully_defined():
+ shape = dataset.output_shapes
+ else:
+ first_element = get_single_element.get_single_element(dataset.take(1))
+ shape_reducer = grouping.Reducer(shape_init_fn, shape_reduce_fn,
+ finalize_fn)
+ shape = get_single_element.get_single_element(
+ dataset.apply(grouping.group_by_reducer(key_fn, shape_reducer)))
+
+ def batch_init_fn(_):
+ indices_shape = array_ops.concat([[0], [array_ops.size(shape) + 1]], 0)
+ return sparse_tensor.SparseTensor(
+ indices=gen_array_ops.empty(indices_shape, dtype=dtypes.int64),
+ values=constant_op.constant([], shape=[0], dtype=dataset.output_types),
+ dense_shape=array_ops.concat(
+ [np.array([0], dtype=np.int64),
+ math_ops.cast(shape, dtypes.int64)], 0))
+
+ def batch_reduce_fn(state, value):
+ return sparse_ops.sparse_concat(0, [state, value])
+
+ def reshape_fn(value):
+ return sparse_ops.sparse_reshape(
+ value,
+ array_ops.concat([np.array([1], dtype=np.int64), value.dense_shape], 0))
+
+ batch_reducer = grouping.Reducer(batch_init_fn, batch_reduce_fn, finalize_fn)
+ return get_single_element.get_single_element(
+ dataset.map(reshape_fn).apply(
+ grouping.group_by_reducer(key_fn, batch_reducer)))
+
+
def dense_to_sparse_batch(batch_size, row_shape):
"""A transformation that batches ragged elements into `tf.SparseTensor`s.
@@ -82,6 +195,157 @@ def dense_to_sparse_batch(batch_size, row_shape):
return _apply_fn
+def padded_batch_window(dataset, padded_shape, padding_value=None):
+ """Batches a window of tensors with padding.
+
+ Args:
+ dataset: the input dataset.
+ padded_shape: (Optional.) `tf.TensorShape` or `tf.int64` vector tensor-like
+ object representing the shape to which the input elements should be padded
+ prior to batching. Any unknown dimensions (e.g. `tf.Dimension(None)` in a
+ `tf.TensorShape` or `-1` in a tensor-like object) will be padded to the
+ maximum size of that dimension in each batch.
+ padding_value: (Optional.) A scalar-shaped `tf.Tensor`, representing the
+ padding value to use. Defaults are `0` for numeric types and the empty
+ string for string types. If `dataset` contains `tf.SparseTensor`, this
+ value is ignored.
+
+ Returns:
+ A `Tensor` representing the batch of the entire input dataset.
+
+ Raises:
+ ValueError: if invalid arguments are provided.
+ """
+ if not issubclass(dataset.output_classes,
+ (ops.Tensor, sparse_tensor.SparseTensor)):
+ raise TypeError("Input dataset expected to have a single tensor component")
+ if issubclass(dataset.output_classes, (ops.Tensor)):
+ return _padded_batch_dense_window(dataset, padded_shape, padding_value)
+ elif issubclass(dataset.output_classes, (sparse_tensor.SparseTensor)):
+ if padding_value is not None:
+ raise ValueError("Padding value not allowed for sparse tensors")
+ return _padded_batch_sparse_window(dataset, padded_shape)
+ else:
+ raise TypeError("Unsupported dataset type: %s" % dataset.output_classes)
+
+
+def _padded_batch_dense_window(dataset, padded_shape, padding_value=None):
+ """Batches a window of dense tensors with padding."""
+
+ padded_shape = math_ops.cast(
+ convert.partial_shape_to_tensor(padded_shape), dtypes.int32)
+
+ def key_fn(_):
+ return np.int64(0)
+
+ def max_init_fn(_):
+ return padded_shape
+
+ def max_reduce_fn(state, value):
+ """Computes the maximum shape to pad to."""
+ condition = math_ops.reduce_all(
+ math_ops.logical_or(
+ math_ops.less_equal(array_ops.shape(value), padded_shape),
+ math_ops.equal(padded_shape, -1)))
+ assert_op = control_flow_ops.Assert(condition, [
+ "Actual shape greater than padded shape: ",
+ array_ops.shape(value), padded_shape
+ ])
+ with ops.control_dependencies([assert_op]):
+ return math_ops.maximum(state, array_ops.shape(value))
+
+ def finalize_fn(state):
+ return state
+
+ # Compute the padded shape.
+ max_reducer = grouping.Reducer(max_init_fn, max_reduce_fn, finalize_fn)
+ padded_shape = get_single_element.get_single_element(
+ dataset.apply(grouping.group_by_reducer(key_fn, max_reducer)))
+
+ if padding_value is None:
+ if dataset.output_types == dtypes.string:
+ padding_value = ""
+ elif dataset.output_types == dtypes.bool:
+ padding_value = False
+ elif dataset.output_types == dtypes.variant:
+ raise TypeError("Unable to create padding for field of type 'variant'")
+ else:
+ padding_value = 0
+
+ def batch_init_fn(_):
+ return array_ops.fill(
+ array_ops.concat([np.array([0], dtype=np.int32), padded_shape], 0),
+ constant_op.constant(padding_value, dtype=dataset.output_types))
+
+ def batch_reduce_fn(state, value):
+ return array_ops.concat([state, [value]], 0)
+
+ def pad_fn(value):
+ shape = array_ops.shape(value)
+ left = array_ops.zeros_like(shape)
+ right = padded_shape - shape
+ return array_ops.pad(
+ value, array_ops.stack([left, right], 1), constant_values=padding_value)
+
+ batch_reducer = grouping.Reducer(batch_init_fn, batch_reduce_fn, finalize_fn)
+ return get_single_element.get_single_element(
+ dataset.map(pad_fn).apply(
+ grouping.group_by_reducer(key_fn, batch_reducer)))
+
+
+def _padded_batch_sparse_window(dataset, padded_shape):
+ """Batches a window of sparse tensors with padding."""
+
+ def key_fn(_):
+ return np.int64(0)
+
+ def max_init_fn(_):
+ return convert.partial_shape_to_tensor(padded_shape)
+
+ def max_reduce_fn(state, value):
+ """Computes the maximum shape to pad to."""
+ condition = math_ops.reduce_all(
+ math_ops.logical_or(
+ math_ops.less_equal(value.dense_shape, padded_shape),
+ math_ops.equal(padded_shape, -1)))
+ assert_op = control_flow_ops.Assert(condition, [
+ "Actual shape greater than padded shape: ", value.dense_shape,
+ padded_shape
+ ])
+ with ops.control_dependencies([assert_op]):
+ return math_ops.maximum(state, value.dense_shape)
+
+ def finalize_fn(state):
+ return state
+
+ # Compute the padded shape.
+ max_reducer = grouping.Reducer(max_init_fn, max_reduce_fn, finalize_fn)
+ padded_shape = get_single_element.get_single_element(
+ dataset.apply(grouping.group_by_reducer(key_fn, max_reducer)))
+
+ def batch_init_fn(_):
+ indices_shape = array_ops.concat([[0], [array_ops.size(padded_shape) + 1]],
+ 0)
+ return sparse_tensor.SparseTensor(
+ indices=gen_array_ops.empty(indices_shape, dtype=dtypes.int64),
+ values=constant_op.constant([], shape=[0], dtype=dataset.output_types),
+ dense_shape=array_ops.concat(
+ [np.array([0], dtype=np.int64), padded_shape], 0))
+
+ def batch_reduce_fn(state, value):
+ padded_value = sparse_tensor.SparseTensor(
+ indices=value.indices, values=value.values, dense_shape=padded_shape)
+ reshaped_value = sparse_ops.sparse_reshape(
+ padded_value,
+ array_ops.concat(
+ [np.array([1], dtype=np.int64), padded_value.dense_shape], 0))
+ return sparse_ops.sparse_concat(0, [state, reshaped_value])
+
+ reducer = grouping.Reducer(batch_init_fn, batch_reduce_fn, finalize_fn)
+ return get_single_element.get_single_element(
+ dataset.apply(grouping.group_by_reducer(key_fn, reducer)))
+
+
class _UnbatchDataset(dataset_ops.Dataset):
"""A dataset that splits the elements of its input into multiple elements."""
@@ -175,7 +439,7 @@ def unbatch():
return _apply_fn
-def filter_irregular_batches(batch_size):
+def _filter_irregular_batches(batch_size):
"""Transformation that filters out batches that are not of size batch_size."""
def _apply_fn(dataset):
@@ -254,7 +518,7 @@ def batch_and_drop_remainder(batch_size):
# TODO(jsimsa): Switch to using `batch(..., drop_remainder=True)` any time
# after 6/30/2018.
batched = dataset.batch(batch_size)
- return filter_irregular_batches(batch_size)(batched)
+ return _filter_irregular_batches(batch_size)(batched)
return _apply_fn
@@ -293,7 +557,7 @@ def padded_batch_and_drop_remainder(batch_size,
# any time after 6/30/2018.
batched = dataset.padded_batch(
batch_size, padded_shapes=padded_shapes, padding_values=padding_values)
- return filter_irregular_batches(batch_size)(batched)
+ return _filter_irregular_batches(batch_size)(batched)
return _apply_fn
diff --git a/tensorflow/contrib/data/python/ops/grouping.py b/tensorflow/contrib/data/python/ops/grouping.py
index ca9540bf13..bd8d398c58 100644
--- a/tensorflow/contrib/data/python/ops/grouping.py
+++ b/tensorflow/contrib/data/python/ops/grouping.py
@@ -149,9 +149,9 @@ def bucket_by_sequence_length(element_length_func,
@{tf.data.Dataset.padded_batch}. Defaults to padding with 0.
pad_to_bucket_boundary: bool, if `False`, will pad dimensions with unknown
size to maximum length in batch. If `True`, will pad dimensions with
- unknown size to bucket boundary, and caller must ensure that the source
- `Dataset` does not contain any elements with length longer than
- `max(bucket_boundaries)`.
+ unknown size to bucket boundary minus 1 (i.e., the maximum length in each
+ bucket), and caller must ensure that the source `Dataset` does not contain
+ any elements with length longer than `max(bucket_boundaries)`.
Returns:
A `Dataset` transformation function, which can be passed to
@@ -203,7 +203,7 @@ def bucket_by_sequence_length(element_length_func,
none_filler = None
if pad_to_bucket_boundary:
err_msg = ("When pad_to_bucket_boundary=True, elements must have "
- "length <= max(bucket_boundaries).")
+ "length < max(bucket_boundaries).")
check = check_ops.assert_less(
bucket_id,
constant_op.constant(len(bucket_batch_sizes) - 1,
@@ -213,7 +213,7 @@ def bucket_by_sequence_length(element_length_func,
boundaries = constant_op.constant(bucket_boundaries,
dtype=dtypes.int64)
bucket_boundary = boundaries[bucket_id]
- none_filler = bucket_boundary
+ none_filler = bucket_boundary - 1
shapes = make_padded_shapes(
padded_shapes or grouped_dataset.output_shapes,
none_filler=none_filler)
@@ -227,6 +227,50 @@ def bucket_by_sequence_length(element_length_func,
return _apply_fn
+def _map_x_dataset(map_func):
+ """A transformation that maps `map_func` across its input.
+
+ This transformation is similar to `tf.data.Dataset.map`, but in addition to
+ supporting dense and sparse tensor inputs, it also supports dataset inputs.
+
+ Args:
+ map_func: A function mapping a nested structure of tensors and/or datasets
+ (having shapes and types defined by `self.output_shapes` and
+ `self.output_types`) to another nested structure of tensors and/or
+ datasets.
+
+ Returns:
+ Dataset: A `Dataset`.
+ """
+
+ def _apply_fn(dataset):
+ """Function from `Dataset` to `Dataset` that applies the transformation."""
+ return _MapXDataset(dataset, map_func)
+
+ return _apply_fn
+
+
+def window_dataset(window_size):
+ """A transformation that creates window datasets from the input dataset.
+
+ The resulting datasets will contain `window_size` elements (or
+ `N % window_size` for the last dataset if `window_size` does not divide the
+ number of input elements `N` evenly).
+
+ Args:
+ window_size: A `tf.int64` scalar `tf.Tensor`, representing the number of
+ consecutive elements of the input dataset to combine into a window.
+
+ Returns:
+ Dataset: A `Dataset`.
+ """
+
+ def _apply_fn(dataset):
+ return _WindowDataset(dataset, window_size)
+
+ return _apply_fn
+
+
class _GroupByReducerDataset(dataset_ops.Dataset):
"""A `Dataset` that groups its input and performs a reduction."""
@@ -468,3 +512,85 @@ class Reducer(object):
@property
def finalize_func(self):
return self._finalize_func
+
+
+class _MapXDataset(dataset_ops.Dataset):
+ """A `Dataset` that maps a function over elements in its input."""
+
+ def __init__(self, input_dataset, map_func):
+ """See `map_x_dataset()` for details."""
+ super(_MapXDataset, self).__init__()
+ self._input_dataset = input_dataset
+
+ wrapped_func = dataset_ops.StructuredFunctionWrapper(
+ map_func,
+ "tf.contrib.data.map_x_dataset()",
+ input_dataset,
+ experimental_nested_dataset_support=True)
+ self._output_classes = wrapped_func.output_classes
+ self._output_shapes = wrapped_func.output_shapes
+ self._output_types = wrapped_func.output_types
+ self._map_func = wrapped_func.function
+
+ def _as_variant_tensor(self):
+ input_t = self._input_dataset._as_variant_tensor() # pylint: disable=protected-access
+ return gen_dataset_ops.map_dataset(
+ input_t,
+ self._map_func.captured_inputs,
+ f=self._map_func,
+ **dataset_ops.flat_structure(self))
+
+ @property
+ def output_classes(self):
+ return self._output_classes
+
+ @property
+ def output_shapes(self):
+ return self._output_shapes
+
+ @property
+ def output_types(self):
+ return self._output_types
+
+
+class _WindowDataset(dataset_ops.Dataset):
+ """A dataset that creates window datasets from the input elements."""
+
+ def __init__(self, input_dataset, window_size):
+ """See `window_dataset()` for more details."""
+ super(_WindowDataset, self).__init__()
+ self._input_dataset = input_dataset
+ self._window_size = ops.convert_to_tensor(
+ window_size, dtype=dtypes.int64, name="window_size")
+ self._output_classes = nest.pack_sequence_as(
+ input_dataset.output_classes,
+ [
+ dataset_ops._NestedDatasetComponent( # pylint: disable=protected-access
+ output_classes=output_class,
+ output_shapes=output_shape,
+ output_types=output_type)
+ for output_class, output_shape, output_type in zip(
+ nest.flatten(input_dataset.output_classes),
+ nest.flatten(input_dataset.output_shapes),
+ nest.flatten(input_dataset.output_types))
+ ])
+ self._output_shapes = self._output_classes
+ self._output_types = self._output_classes
+
+ def _as_variant_tensor(self):
+ return gen_dataset_ops.window_dataset(
+ self._input_dataset._as_variant_tensor(), # pylint: disable=protected-access
+ self._window_size,
+ **dataset_ops.flat_structure(self))
+
+ @property
+ def output_classes(self):
+ return self._output_classes
+
+ @property
+ def output_shapes(self):
+ return self._output_shapes
+
+ @property
+ def output_types(self):
+ return self._output_types
diff --git a/tensorflow/contrib/data/python/ops/prefetching_ops.py b/tensorflow/contrib/data/python/ops/prefetching_ops.py
index e4c9f8b58a..50212d3b52 100644
--- a/tensorflow/contrib/data/python/ops/prefetching_ops.py
+++ b/tensorflow/contrib/data/python/ops/prefetching_ops.py
@@ -26,21 +26,42 @@ from tensorflow.python.data.ops import iterator_ops
from tensorflow.python.data.util import nest
from tensorflow.python.data.util import sparse
from tensorflow.python.eager import context
+from tensorflow.python.framework import device as framework_device
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import function
from tensorflow.python.framework import ops
+from tensorflow.python.ops import array_ops
+from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import gen_dataset_ops as core_gen_dataset_ops
+from tensorflow.python.ops import resource_variable_ops
-# TODO(rohanj): Add a python class that constructs resource in the __init__
-# method and provides a get_next() that calls the prefetch op.
def function_buffering_resource(string_arg,
target_device,
f,
buffer_size,
+ output_types,
container="",
shared_name=None,
name=None):
+ """Creates a FunctionBufferingResource.
+
+ A FunctionBufferingResource fills up a buffer by calling a function `f` on
+ `target_device`. `f` should take in only a single string argument as input.
+
+ Args:
+ string_arg: The single string argument to the function.
+ target_device: The device to run `f` on.
+ f: The function to be executed.
+ buffer_size: Size of the buffer to be populated.
+ output_types: The output types generated by the function.
+ container: (Optional) string. Defaults to "".
+ shared_name: (Optional) string.
+ name: (Optional) string to name the op.
+
+ Returns:
+ Handle to a FunctionBufferingResource.
+ """
if shared_name is None:
shared_name = ""
return gen_dataset_ops.function_buffering_resource(
@@ -50,7 +71,8 @@ def function_buffering_resource(string_arg,
f=f,
buffer_size=buffer_size,
container=container,
- name=name)
+ name=name,
+ output_types=output_types)
def function_buffering_resource_get_next(function_buffer_resource,
@@ -123,7 +145,10 @@ class _PrefetchToDeviceIterator(object):
target_device=iterator_device,
string_arg=input_iterator_handle,
buffer_size=buffer_size,
- shared_name=shared_name)
+ shared_name=shared_name,
+ output_types=nest.flatten(
+ sparse.as_dense_types(self._input_dataset.output_types,
+ self._input_dataset.output_classes)))
if not self._one_shot:
reset_op = function_buffering_resource_reset(self._buffering_resource)
@@ -212,6 +237,7 @@ class _PrefetchToDeviceEagerIterator(iterator_ops.EagerIterator):
with ops.device(device):
self._buffering_resource = function_buffering_resource(
f=_prefetch_fn,
+ output_types=self._flat_output_types,
target_device=gen_dataset_ops.iterator_get_device(self._resource),
string_arg=input_iterator_handle,
buffer_size=buffer_size,
@@ -323,3 +349,172 @@ def prefetch_to_device(device, buffer_size=None):
return _PrefetchToDeviceDataset(dataset, device, buffer_size)
return _apply_fn
+
+
+def copy_to_device(target_device, source_device="/cpu:0"):
+ """A transformation that copies dataset elements to the given `target_device`.
+
+ Args:
+ target_device: The name of a device to which elements will be copied.
+ source_device: The original device on which `input_dataset` will be placed.
+
+ Returns:
+ A `Dataset` transformation function, which can be passed to
+ @{tf.data.Dataset.apply}.
+ """
+
+ def _apply_fn(dataset):
+ return _CopyToDeviceDataset(
+ dataset, target_device=target_device, source_device=source_device)
+
+ return _apply_fn
+
+
+# TODO(rohanj): Use the _input_hostmem attr on the RemoteCall ops to indicate
+# all inputs to the Op are in host memory, thereby avoiding some unnecessary
+# Sends and Recvs.
+class _CopyToDeviceDataset(dataset_ops.Dataset):
+ """A `Dataset` that copies elements to another device."""
+
+ def __init__(self, input_dataset, target_device, source_device="/cpu:0"):
+ """Constructs a _CopyToDeviceDataset.
+
+ Args:
+ input_dataset: `Dataset` to be copied
+ target_device: The name of the device to which elements would be copied.
+ source_device: Device where input_dataset would be placed.
+ """
+ self._input_dataset = input_dataset
+ self._target_device = target_device
+ spec = framework_device.DeviceSpec().from_string(self._target_device)
+ self._is_gpu_target = (spec.device_type == "GPU")
+ self._source_device_string = source_device
+ self._source_device = ops.convert_to_tensor(source_device)
+
+ self._flat_output_shapes = nest.flatten(
+ sparse.as_dense_shapes(self._input_dataset.output_shapes,
+ self._input_dataset.output_classes))
+ self._flat_output_types = nest.flatten(
+ sparse.as_dense_types(self._input_dataset.output_types,
+ self._input_dataset.output_classes))
+
+ @function.Defun()
+ def _init_func():
+ """Creates an iterator for the input dataset.
+
+ Returns:
+ A `string` tensor that encapsulates the iterator created.
+ """
+ # pylint: disable=protected-access
+ ds_variant = self._input_dataset._as_variant_tensor()
+ resource = core_gen_dataset_ops.anonymous_iterator(
+ output_types=self._flat_output_types,
+ output_shapes=self._flat_output_shapes)
+ with ops.control_dependencies(
+ [core_gen_dataset_ops.make_iterator(ds_variant, resource)]):
+ return core_gen_dataset_ops.iterator_to_string_handle(resource)
+
+ @function.Defun()
+ def _remote_init_func():
+ return functional_ops.remote_call(
+ target=self._source_device,
+ args=_init_func.captured_inputs,
+ Tout=[dtypes.string],
+ f=_init_func)
+
+ self._init_func = _remote_init_func
+ self._init_captured_args = _remote_init_func.captured_inputs
+
+ @function.Defun(dtypes.string)
+ def _next_func(string_handle):
+ """Calls get_next for created iterator.
+
+ Args:
+ string_handle: An iterator string handle created by _init_func
+ Returns:
+ The elements generated from `input_dataset`
+ """
+ with ops.device(self._source_device_string):
+ iterator = iterator_ops.Iterator.from_string_handle(
+ string_handle, self.output_types, self.output_shapes,
+ self.output_classes)
+ ret = iterator.get_next()
+ return nest.flatten(sparse.serialize_sparse_tensors(ret))
+
+ @function.Defun(dtypes.string)
+ def _remote_next_func(string_handle):
+ return functional_ops.remote_call(
+ target=self._source_device,
+ args=[string_handle] + _next_func.captured_inputs,
+ Tout=self._flat_output_types,
+ f=_next_func)
+
+ self._next_func = _remote_next_func
+ self._next_captured_args = _remote_next_func.captured_inputs
+
+ @function.Defun(dtypes.string)
+ def _finalize_func(string_handle):
+ """Destroys the iterator resource created.
+
+ Args:
+ string_handle: An iterator string handle created by _init_func
+ Returns:
+ Tensor constant 0
+ """
+ iterator_resource = core_gen_dataset_ops.iterator_from_string_handle_v2(
+ string_handle,
+ output_types=self._flat_output_types,
+ output_shapes=self._flat_output_shapes)
+ with ops.control_dependencies([
+ resource_variable_ops.destroy_resource_op(
+ iterator_resource, ignore_lookup_error=True)]):
+ return array_ops.constant(0, dtypes.int64)
+
+ @function.Defun(dtypes.string)
+ def _remote_finalize_func(string_handle):
+ return functional_ops.remote_call(
+ target=self._source_device,
+ args=[string_handle] + _finalize_func.captured_inputs,
+ Tout=[dtypes.int64],
+ f=_finalize_func)
+
+ self._finalize_func = _remote_finalize_func
+ self._finalize_captured_args = _remote_finalize_func.captured_inputs
+ # pylint: enable=protected-scope
+
+ # The one_shot_iterator implementation needs a 0 arg _make_dataset function
+ # that thereby captures all the inputs required to create the dataset. Since
+ # there are strings that are inputs to the GeneratorDataset which can't be
+ # placed on a GPU, this fails for the GPU case. Therefore, disabling it for
+ # GPU
+ def make_one_shot_iterator(self):
+ if self._is_gpu_target:
+ raise ValueError("Cannot create a one shot iterator when using "
+ "`tf.contrib.data.copy_to_device()` on GPU. Please use "
+ "`Dataset.make_initializable_iterator()` instead.")
+ else:
+ return super(_CopyToDeviceDataset, self).make_one_shot_iterator()
+
+ def _as_variant_tensor(self):
+ with ops.device(self._target_device):
+ return core_gen_dataset_ops.generator_dataset(
+ self._init_captured_args,
+ self._next_captured_args,
+ self._finalize_captured_args,
+ init_func=self._init_func,
+ next_func=self._next_func,
+ finalize_func=self._finalize_func,
+ output_types=self._flat_output_types,
+ output_shapes=self._flat_output_shapes)
+
+ @property
+ def output_types(self):
+ return self._input_dataset.output_types
+
+ @property
+ def output_shapes(self):
+ return self._input_dataset.output_shapes
+
+ @property
+ def output_classes(self):
+ return self._input_dataset.output_classes
diff --git a/tensorflow/contrib/data/python/ops/readers.py b/tensorflow/contrib/data/python/ops/readers.py
index 83095c7ba1..9373e37f5f 100644
--- a/tensorflow/contrib/data/python/ops/readers.py
+++ b/tensorflow/contrib/data/python/ops/readers.py
@@ -540,11 +540,11 @@ class CsvDataset(dataset_ops.Dataset):
The expected output of its iterations is:
```python
- next = dataset.make_one_shot_iterator().get_next()
+ next_element = dataset.make_one_shot_iterator().get_next()
with tf.Session() as sess:
while True:
try:
- print(sess.run(nxt))
+ print(sess.run(next_element))
except tf.errors.OutOfRangeError:
break
diff --git a/tensorflow/contrib/data/python/ops/sliding.py b/tensorflow/contrib/data/python/ops/sliding.py
index 3f3c5ca17c..e9dd74530a 100644
--- a/tensorflow/contrib/data/python/ops/sliding.py
+++ b/tensorflow/contrib/data/python/ops/sliding.py
@@ -23,25 +23,29 @@ from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import gen_dataset_ops
+from tensorflow.python.util import deprecation
class _SlideDataset(dataset_ops.Dataset):
"""A `Dataset` that passes a sliding window over its input."""
- def __init__(self, input_dataset, window_size, stride=1):
+ def __init__(self, input_dataset, window_size, window_shift, window_stride):
"""See `sliding_window_batch` for details."""
super(_SlideDataset, self).__init__()
self._input_dataset = input_dataset
self._window_size = ops.convert_to_tensor(
- window_size, dtype=dtypes.int64, name="window_size")
- self._stride = ops.convert_to_tensor(
- stride, dtype=dtypes.int64, name="stride")
+ window_size, dtype=dtypes.int64, name="window_stride")
+ self._window_stride = ops.convert_to_tensor(
+ window_stride, dtype=dtypes.int64, name="window_stride")
+ self._window_shift = ops.convert_to_tensor(
+ window_shift, dtype=dtypes.int64, name="window_shift")
def _as_variant_tensor(self):
return gen_dataset_ops.slide_dataset(
self._input_dataset._as_variant_tensor(), # pylint: disable=protected-access
window_size=self._window_size,
- stride=self._stride,
+ window_shift=self._window_shift,
+ window_stride=self._window_stride,
**dataset_ops.flat_structure(self))
@property
@@ -61,38 +65,63 @@ class _SlideDataset(dataset_ops.Dataset):
return self._input_dataset.output_types
-def sliding_window_batch(window_size, stride=1):
- """A sliding window with size of `window_size` and step of `stride`.
+@deprecation.deprecated_args(
+ None, "stride is deprecated, use window_shift instead", "stride")
+def sliding_window_batch(window_size,
+ stride=None,
+ window_shift=None,
+ window_stride=1):
+ """A sliding window over a dataset.
- This transformation passes a sliding window over this dataset. The
- window size is `window_size` and step size is `stride`. If the left
- elements cannot fill up the sliding window, this transformation will
- drop the final smaller element. For example:
+ This transformation passes a sliding window over this dataset. The window size
+ is `window_size`, the stride of the input elements is `window_stride`, and the
+ shift between consecutive windows is `window_shift`. If the remaining elements
+ cannot fill up the sliding window, this transformation will drop the final
+ smaller element. For example:
```python
# NOTE: The following examples use `{ ... }` to represent the
# contents of a dataset.
a = { [1], [2], [3], [4], [5], [6] }
- a.apply(tf.contrib.data.sliding_window_batch(window_size=3, stride=2)) ==
- {
- [[1], [2], [3]],
- [[3], [4], [5]],
- }
+ a.apply(sliding_window_batch(window_size=3)) ==
+ { [[1], [2], [3]], [[2], [3], [4]], [[3], [4], [5]], [[4], [5], [6]] }
+
+ a.apply(sliding_window_batch(window_size=3, window_shift=2)) ==
+ { [[1], [2], [3]], [[3], [4], [5]] }
+
+ a.apply(sliding_window_batch(window_size=3, window_stride=2)) ==
+ { [[1], [3], [5]], [[2], [4], [6]] }
```
Args:
window_size: A `tf.int64` scalar `tf.Tensor`, representing the number of
- elements in the sliding window.
+ elements in the sliding window. It must be positive.
stride: (Optional.) A `tf.int64` scalar `tf.Tensor`, representing the
- steps moving the sliding window forward for one iteration. The default
- is `1`. It must be positive.
+ forward shift of the sliding window in each iteration. The default is `1`.
+ It must be positive. Deprecated alias for `window_shift`.
+ window_shift: (Optional.) A `tf.int64` scalar `tf.Tensor`, representing the
+ forward shift of the sliding window in each iteration. The default is `1`.
+ It must be positive.
+ window_stride: (Optional.) A `tf.int64` scalar `tf.Tensor`, representing the
+ stride of the input elements in the sliding window. The default is `1`.
+ It must be positive.
Returns:
A `Dataset` transformation function, which can be passed to
@{tf.data.Dataset.apply}.
+
+ Raises:
+ ValueError: if invalid arguments are provided.
"""
+ if stride is None and window_shift is None:
+ window_shift = 1
+ elif stride is not None and window_shift is None:
+ window_shift = stride
+ elif stride is not None and window_shift is not None:
+ raise ValueError("Cannot specify both `stride` and `window_shift`")
+
def _apply_fn(dataset):
- return _SlideDataset(dataset, window_size, stride)
+ return _SlideDataset(dataset, window_size, window_shift, window_stride)
return _apply_fn
diff --git a/tensorflow/contrib/distribute/BUILD b/tensorflow/contrib/distribute/BUILD
index 74b2cd90a1..1126f76f58 100644
--- a/tensorflow/contrib/distribute/BUILD
+++ b/tensorflow/contrib/distribute/BUILD
@@ -30,6 +30,7 @@ py_library(
"//tensorflow/contrib/distribute/python:monitor",
"//tensorflow/contrib/distribute/python:one_device_strategy",
"//tensorflow/contrib/distribute/python:step_fn",
+ "//tensorflow/contrib/distribute/python:tpu_strategy",
"//tensorflow/python:training",
"//tensorflow/python:util",
],
diff --git a/tensorflow/contrib/distribute/__init__.py b/tensorflow/contrib/distribute/__init__.py
index 76711baf3a..2e2c3be853 100644
--- a/tensorflow/contrib/distribute/__init__.py
+++ b/tensorflow/contrib/distribute/__init__.py
@@ -24,6 +24,7 @@ from tensorflow.contrib.distribute.python.mirrored_strategy import MirroredStrat
from tensorflow.contrib.distribute.python.monitor import Monitor
from tensorflow.contrib.distribute.python.one_device_strategy import OneDeviceStrategy
from tensorflow.contrib.distribute.python.step_fn import *
+from tensorflow.contrib.distribute.python.tpu_strategy import TPUStrategy
from tensorflow.python.training.distribute import *
from tensorflow.python.util.all_util import remove_undocumented
@@ -41,6 +42,7 @@ _allowed_symbols = [
'StandardInputStep',
'StandardSingleLossStep',
'TowerContext',
+ 'TPUStrategy',
'get_cross_tower_context',
'get_distribution_strategy',
'get_loss_reduction',
diff --git a/tensorflow/contrib/distribute/python/BUILD b/tensorflow/contrib/distribute/python/BUILD
index eba0dd0ea3..40dbfa3dd2 100644
--- a/tensorflow/contrib/distribute/python/BUILD
+++ b/tensorflow/contrib/distribute/python/BUILD
@@ -587,6 +587,7 @@ cuda_py_test(
],
tags = [
"multi_and_single_gpu",
+ "no_windows_gpu",
"notsan",
],
)
diff --git a/tensorflow/contrib/distribute/python/cross_tower_ops.py b/tensorflow/contrib/distribute/python/cross_tower_ops.py
index 1009c3c012..b0baf0dad1 100644
--- a/tensorflow/contrib/distribute/python/cross_tower_ops.py
+++ b/tensorflow/contrib/distribute/python/cross_tower_ops.py
@@ -28,11 +28,12 @@ from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
+from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import device_util
-def _validate_destinations(destinations):
+def validate_destinations(destinations):
if not isinstance(destinations,
(value_lib.DistributedValues, six.string_types, list)):
raise ValueError("destinations must be one of a `DistributedValues` object,"
@@ -55,7 +56,7 @@ def _validate_value_destination_pairs(value_destination_pairs):
# TODO(yuefengz): consider calling this function in the caller of CrossTowerOps.
-def _get_devices_from(destinations):
+def get_devices_from(destinations):
if isinstance(destinations, value_lib.DistributedValues):
return list(destinations.devices)
elif isinstance(destinations, six.string_types):
@@ -65,7 +66,7 @@ def _get_devices_from(destinations):
def _devices_match(left, right):
- return set(_get_devices_from(left)) == set(_get_devices_from(right))
+ return set(get_devices_from(left)) == set(get_devices_from(right))
def _all_devices_match(value_destination_pairs):
@@ -80,7 +81,7 @@ def _all_devices_match(value_destination_pairs):
def _simple_broadcast(value, destinations):
index = {}
- devices = _get_devices_from(destinations)
+ devices = get_devices_from(destinations)
for d in devices:
index[d] = cross_tower_utils.copy_tensor_or_indexed_slices_to_device(
value, d)
@@ -88,7 +89,7 @@ def _simple_broadcast(value, destinations):
def _simple_reduce(per_device_value, reduce_to_device, accumulation_fn,
- method_string):
+ aggregation):
# pylint: disable=g-missing-docstring
all_values = []
count = 0
@@ -112,11 +113,12 @@ def _simple_reduce(per_device_value, reduce_to_device, accumulation_fn,
with context.context().device_policy(context.DEVICE_PLACEMENT_SILENT):
reduced = cross_tower_utils.aggregate_tensors_or_indexed_slices(
all_values, accumulation_fn)
- if method_string == "mean":
+ if aggregation == vs.VariableAggregation.MEAN:
reduced = cross_tower_utils.divide_by_n_tensors_or_indexed_slices(
reduced, count)
- elif method_string != "sum":
- raise ValueError("`method_string` must be 'sum' or 'mean'")
+ elif aggregation != vs.VariableAggregation.SUM:
+ raise ValueError("`aggregation` must be VariableAggregation.SUM "
+ "or VariableAggregation.MEAN.")
return reduced
@@ -126,14 +128,15 @@ class CrossTowerOps(object):
def __init__(self):
pass
- def reduce(self, method_string, per_device_value, destinations=None):
+ def reduce(self, aggregation, per_device_value, destinations=None):
"""Reduce `per_device_value` to `destinations`.
- It runs the reduction operation defined by `method_string` and put the
+ It runs the reduction operation defined by `aggregation` and put the
result on `destinations`.
Args:
- method_string: either 'sum' or 'mean' specifying the reduction method.
+ aggregation: Indicates how a variable will be aggregated. Accepted values
+ are @{tf.VariableAggregation.SUM}, @{tf.VariableAggregation.MEAN}.
per_device_value: a PerDevice object.
destinations: the reduction destinations.
@@ -146,17 +149,18 @@ class CrossTowerOps(object):
if not isinstance(per_device_value, value_lib.PerDevice):
raise ValueError("`per_device_value` must be a `PerDevice` object.")
if destinations is not None:
- _validate_destinations(destinations)
- return self._reduce(method_string, per_device_value, destinations)
+ validate_destinations(destinations)
+ return self._reduce(aggregation, per_device_value, destinations)
- def batch_reduce(self, method_string, value_destination_pairs):
+ def batch_reduce(self, aggregation, value_destination_pairs):
"""Reduce PerDevice objects in a batch.
Reduce each first element in `value_destination_pairs` to each second
element which indicates the destinations.
Args:
- method_string: either 'sum' or 'mean' specifying the reduction method.
+ aggregation: Indicates how a variable will be aggregated. Accepted values
+ are @{tf.VariableAggregation.SUM}, @{tf.VariableAggregation.MEAN}.
value_destination_pairs: a list or a tuple of tuples of PerDevice objects
and destinations. If a destination is None, then the destinations
are set to match the devices of the input PerDevice object.
@@ -173,9 +177,9 @@ class CrossTowerOps(object):
"tuples of PerDevice objects and destinations")
for _, d in value_destination_pairs:
if d is not None:
- _validate_destinations(d)
+ validate_destinations(d)
- return self._batch_reduce(method_string, value_destination_pairs)
+ return self._batch_reduce(aggregation, value_destination_pairs)
def broadcast(self, tensor, destinations):
"""Broadcast the `tensor` to destinations.
@@ -187,14 +191,14 @@ class CrossTowerOps(object):
Returns:
a Mirrored object.
"""
- _validate_destinations(destinations)
+ validate_destinations(destinations)
return self._broadcast(tensor, destinations)
- def _reduce(self, method_string, per_device_value, destinations):
+ def _reduce(self, aggregation, per_device_value, destinations):
raise NotImplementedError(
"_reduce method must be implemented in descendants.")
- def _batch_reduce(self, method_string, value_destination_pairs):
+ def _batch_reduce(self, aggregation, value_destination_pairs):
raise NotImplementedError(
"_batch_reduce method must be implemented in descendants.")
@@ -220,16 +224,18 @@ class ReductionToOneDeviceCrossTowerOps(CrossTowerOps):
self.accumulation_fn = accumulation_fn
super(ReductionToOneDeviceCrossTowerOps, self).__init__()
- def _reduce(self, method_string, per_device_value, destinations):
- devices = _get_devices_from(destinations or per_device_value)
+ def _reduce(self, aggregation, per_device_value, destinations):
+ devices = get_devices_from(destinations or per_device_value)
reduce_to_device = self.reduce_to_device or devices[0]
reduced = _simple_reduce(per_device_value, reduce_to_device,
- self.accumulation_fn, method_string)
+ self.accumulation_fn, aggregation)
return self.broadcast(reduced, devices)
- def _batch_reduce(self, method_string, value_destination_pairs):
- return [self._reduce(method_string, t, destinations=v)
- for t, v in value_destination_pairs]
+ def _batch_reduce(self, aggregation, value_destination_pairs):
+ return [
+ self._reduce(aggregation, t, destinations=v)
+ for t, v in value_destination_pairs
+ ]
def _group_value_by_device(per_device_values):
@@ -260,18 +266,19 @@ def _group_value_by_device(per_device_values):
return grouped
-def _ungroup_and_make_mirrored(grouped_reduced, destinations, method_string):
+def _ungroup_and_make_mirrored(grouped_reduced, destinations, aggregation):
"""Ungroup results from all-reduce and make Mirrored objects.
Each all-reduce result will be divided by the number of destinations before
- Mirrored objects are created if method_string is "mean".
+ Mirrored objects are created if aggregation is "mean".
Args:
grouped_reduced: a list of lists, each sublist has components for each
device, paired with a None. It is the result from
cross_tower_utils.aggregate_gradients_using*.
destinations: a list of device strings for returned Mirrored objects.
- method_string: "mean" or "sum".
+ aggregation: Indicates how a variable will be aggregated. Accepted values
+ are @{tf.VariableAggregation.SUM}, @{tf.VariableAggregation.MEAN}.
Returns:
a list of Mirrored objects.
@@ -279,7 +286,7 @@ def _ungroup_and_make_mirrored(grouped_reduced, destinations, method_string):
index = [{} for _ in range(len(grouped_reduced[0]))]
for d, per_device_reduced in enumerate(grouped_reduced):
for i, (v, _) in enumerate(per_device_reduced):
- if method_string == "mean":
+ if aggregation == vs.VariableAggregation.MEAN:
index[i][destinations[d]] = v / len(destinations)
else:
index[i][destinations[d]] = v
@@ -488,32 +495,32 @@ class AllReduceCrossTowerOps(CrossTowerOps):
self._agg_small_grads_max_group = agg_small_grads_max_group
super(AllReduceCrossTowerOps, self).__init__()
- def _reduce(self, method_string, per_device_value, destinations):
+ def _reduce(self, aggregation, per_device_value, destinations):
contains_indexed_slices = cross_tower_utils.contains_indexed_slices(
per_device_value)
if ((destinations is None or _devices_match(per_device_value, destinations))
and not context.executing_eagerly()
and not contains_indexed_slices):
- return self._batch_all_reduce(method_string, [per_device_value])[0]
+ return self._batch_all_reduce(aggregation, [per_device_value])[0]
else:
if contains_indexed_slices:
logging.log_first_n(
logging.WARN,
"Efficient allreduce is not supported for IndexedSlices.", 10)
- devices = _get_devices_from(destinations or per_device_value)
+ devices = get_devices_from(destinations or per_device_value)
reduce_to_device = devices[0]
reduced = _simple_reduce(per_device_value, reduce_to_device,
- math_ops.add_n, method_string)
+ math_ops.add_n, aggregation)
return self.broadcast(reduced, devices)
- def _batch_reduce(self, method_string, value_destination_pairs):
+ def _batch_reduce(self, aggregation, value_destination_pairs):
all_devices_match = _all_devices_match(value_destination_pairs)
contains_indexed_slices = cross_tower_utils.contains_indexed_slices(
value_destination_pairs)
if (all_devices_match and not context.executing_eagerly()
and not contains_indexed_slices):
- return self._batch_all_reduce(method_string,
+ return self._batch_all_reduce(aggregation,
[v[0] for v in value_destination_pairs])
else:
if not all_devices_match:
@@ -521,11 +528,11 @@ class AllReduceCrossTowerOps(CrossTowerOps):
"destinations are different.")
return [
- self._reduce(method_string, t, destinations=v)
+ self._reduce(aggregation, t, destinations=v)
for t, v in value_destination_pairs
]
- def _batch_all_reduce(self, method_string, per_device_values):
+ def _batch_all_reduce(self, aggregation, per_device_values):
"""All reduce algorithm in a batch."""
logging.info(
"batch_all_reduce invoked for batches size = %d with "
@@ -556,7 +563,7 @@ class AllReduceCrossTowerOps(CrossTowerOps):
reduced = _unpack_tensors(reduced, tensor_packer)
return _ungroup_and_make_mirrored(reduced, per_device_values[0].devices,
- method_string)
+ aggregation)
AllReduceSpecTuple = collections.namedtuple("AllReduceSpecTuple",
@@ -635,7 +642,7 @@ class MultiWorkerAllReduce(AllReduceCrossTowerOps):
validate_and_complete_spec(spec) for spec in all_reduce_spec
]
- def _batch_all_reduce(self, method_string, per_device_values):
+ def _batch_all_reduce(self, aggregation, per_device_values):
"""All reduce algorithm in a batch."""
logging.info(
"distributed batch_all_reduce invoked for batches size = %d with "
@@ -682,7 +689,7 @@ class MultiWorkerAllReduce(AllReduceCrossTowerOps):
assert not remaining_grads
return _ungroup_and_make_mirrored(aggregated_grads, destinations,
- method_string)
+ aggregation)
_dgx1_links = [[1, 2, 3, 4], [0, 2, 3, 5], [0, 1, 3, 6], [0, 1, 2, 7],
diff --git a/tensorflow/contrib/distribute/python/cross_tower_ops_test.py b/tensorflow/contrib/distribute/python/cross_tower_ops_test.py
index fed5505d92..6a780ff60f 100644
--- a/tensorflow/contrib/distribute/python/cross_tower_ops_test.py
+++ b/tensorflow/contrib/distribute/python/cross_tower_ops_test.py
@@ -32,11 +32,12 @@ from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
+from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.training import device_util
def _make_per_device(values, devices):
- devices = cross_tower_ops_lib._get_devices_from(devices)
+ devices = cross_tower_ops_lib.get_devices_from(devices)
assert len(values) == len(devices)
index = {}
for d, v in zip(devices, values):
@@ -53,7 +54,7 @@ def _fake_mirrored(value, devices):
All components of the returned Mirrored have the same objects, which is not
true in reality.
"""
- devices = cross_tower_ops_lib._get_devices_from(devices)
+ devices = cross_tower_ops_lib.get_devices_from(devices)
return value_lib.Mirrored(
{d: v for d, v in zip(devices, [value] * len(devices))})
@@ -93,7 +94,7 @@ class CrossTowerOpsTestBase(test.TestCase, parameterized.TestCase):
self._assert_values_equal(l, r)
else:
self.assertEqual(type(left), type(right))
- self.assertEqual(left.devices, right.devices)
+ self.assertEqual(set(left.devices), set(right.devices))
if isinstance(list(left._index.values())[0], ops.IndexedSlices):
for (d, v) in left._index.items():
self._assert_indexed_slices_equal(v, right._index[d])
@@ -129,32 +130,45 @@ class CrossTowerOpsTestBase(test.TestCase, parameterized.TestCase):
# test reduce()
for destinations in all_destinations:
self._assert_values_equal(
- cross_tower_ops.reduce("mean", per_device, destinations=destinations),
+ cross_tower_ops.reduce(
+ vs.VariableAggregation.MEAN,
+ per_device,
+ destinations=destinations),
_fake_mirrored(mean, destinations or per_device))
self._assert_values_equal(
cross_tower_ops.reduce(
- "mean", per_device_2, destinations=destinations),
+ vs.VariableAggregation.MEAN,
+ per_device_2,
+ destinations=destinations),
_fake_mirrored(mean_2, destinations or per_device))
self._assert_values_equal(
- cross_tower_ops.reduce("sum", per_device, destinations=destinations),
+ cross_tower_ops.reduce(
+ vs.VariableAggregation.SUM, per_device,
+ destinations=destinations),
_fake_mirrored(mean * len(devices), destinations or per_device))
self._assert_values_equal(
cross_tower_ops.reduce(
- "sum", per_device_2, destinations=destinations),
+ vs.VariableAggregation.SUM,
+ per_device_2,
+ destinations=destinations),
_fake_mirrored(mean_2 * len(devices), destinations or per_device))
# test batch_reduce()
for d1, d2 in itertools.product(all_destinations, all_destinations):
self._assert_values_equal(
- cross_tower_ops.batch_reduce(
- "mean", [(per_device, d1), (per_device_2, d2)]),
- [_fake_mirrored(mean, d1 or per_device),
- _fake_mirrored(mean_2, d2 or per_device_2)])
+ cross_tower_ops.batch_reduce(vs.VariableAggregation.MEAN,
+ [(per_device, d1), (per_device_2, d2)]),
+ [
+ _fake_mirrored(mean, d1 or per_device),
+ _fake_mirrored(mean_2, d2 or per_device_2)
+ ])
self._assert_values_equal(
- cross_tower_ops.batch_reduce(
- "sum", [(per_device, d1), (per_device_2, d2)]),
- [_fake_mirrored(mean * len(devices), d1 or per_device),
- _fake_mirrored(mean_2 * len(devices), d2 or per_device_2)])
+ cross_tower_ops.batch_reduce(vs.VariableAggregation.SUM,
+ [(per_device, d1), (per_device_2, d2)]),
+ [
+ _fake_mirrored(mean * len(devices), d1 or per_device),
+ _fake_mirrored(mean_2 * len(devices), d2 or per_device_2)
+ ])
# test broadcast()
for destinations in all_destinations:
@@ -255,8 +269,8 @@ class SingleWorkerCrossTowerOpsTest(CrossTowerOpsTestBase):
t0 = _make_indexed_slices([[1., 2.]], [1], [5, 2], devices[0])
t1 = _make_indexed_slices([[3., 4.], [5., 6.]], [1, 3], [5, 2], devices[1])
per_device = value_lib.PerDevice({devices[0]: t0, devices[1]: t1})
- result = cross_tower_ops_lib._simple_reduce(per_device, devices[0],
- math_ops.add_n, "sum")
+ result = cross_tower_ops_lib._simple_reduce(
+ per_device, devices[0], math_ops.add_n, vs.VariableAggregation.SUM)
# Test that the result is semantically equal to both the concatenated
# IndexedSlices with and without duplicate indices.
@@ -267,21 +281,22 @@ class SingleWorkerCrossTowerOpsTest(CrossTowerOpsTestBase):
self._assert_indexed_slices_equal(total_with_dups, result)
self._assert_indexed_slices_equal(total_without_dups, result)
- @combinations.generate(combinations.combine(
- cross_tower_ops_instance=[
- combinations.NamedObject(
- "ReductionToOneDeviceCrossTowerOps",
- cross_tower_ops_lib.ReductionToOneDeviceCrossTowerOps()),
- combinations.NamedObject(
- "AllReduceCrossTowerOps",
- cross_tower_ops_lib.AllReduceCrossTowerOps())
- ],
- method_string=["sum", "mean"],
- batch_reduce=[True, False],
- mode=["graph", "eager"],
- required_gpus=1))
- def testIndexedSlicesAllReduce(self, cross_tower_ops_instance,
- method_string, batch_reduce):
+ @combinations.generate(
+ combinations.combine(
+ cross_tower_ops_instance=[
+ combinations.NamedObject(
+ "ReductionToOneDeviceCrossTowerOps",
+ cross_tower_ops_lib.ReductionToOneDeviceCrossTowerOps()),
+ combinations.NamedObject(
+ "AllReduceCrossTowerOps",
+ cross_tower_ops_lib.AllReduceCrossTowerOps())
+ ],
+ aggregation=[vs.VariableAggregation.SUM, vs.VariableAggregation.MEAN],
+ batch_reduce=[True, False],
+ mode=["graph", "eager"],
+ required_gpus=1))
+ def testIndexedSlicesAllReduce(self, cross_tower_ops_instance, aggregation,
+ batch_reduce):
devices = ["/cpu:0", "/gpu:0"]
dense_shape = [5, 2]
t0 = _make_indexed_slices([[1., 2.]], [1], dense_shape, devices[0])
@@ -290,20 +305,19 @@ class SingleWorkerCrossTowerOpsTest(CrossTowerOpsTestBase):
per_device = value_lib.PerDevice({devices[0]: t0, devices[1]: t1})
if batch_reduce:
- result = cross_tower_ops_instance.batch_reduce(method_string,
+ result = cross_tower_ops_instance.batch_reduce(aggregation,
[(per_device, devices)])
else:
- result = cross_tower_ops_instance.reduce(method_string, per_device,
- devices)
+ result = cross_tower_ops_instance.reduce(aggregation, per_device, devices)
total_indices_with_dups = [1, 1, 3]
total_indices_without_dups = [1, 3]
- if method_string == "sum":
+ if aggregation == vs.VariableAggregation.SUM:
total_values_with_dups = [[1., 2.], [3., 4.], [5., 6.]]
total_values_without_dups = [[4., 6.], [5., 6.]]
else:
- assert method_string == "mean"
+ assert aggregation == vs.VariableAggregation.MEAN
total_values_with_dups = [[0.5, 1.], [1.5, 2.], [2.5, 3.]]
total_values_without_dups = [[2., 3.], [2.5, 3.]]
diff --git a/tensorflow/contrib/distribute/python/mirrored_strategy.py b/tensorflow/contrib/distribute/python/mirrored_strategy.py
index 98fea76b3d..dcbc6b0878 100644
--- a/tensorflow/contrib/distribute/python/mirrored_strategy.py
+++ b/tensorflow/contrib/distribute/python/mirrored_strategy.py
@@ -104,9 +104,36 @@ class MirroredStrategy(distribute_lib.DistributionStrategy):
colocate_with = kwargs.pop("colocate_with", None)
devices = self._get_devices_from(colocate_with)
- tower_local = kwargs.pop("tower_local_reduce_method", None)
- if tower_local is not None:
+ # Get synchronization value
+ synchronization = kwargs.get(
+ "synchronization", variable_scope.VariableSynchronization.ON_WRITE)
+ if synchronization == variable_scope.VariableSynchronization.NONE:
+ raise ValueError("`NONE` variable synchronization mode is not "
+ "supported with `Mirrored` distribution strategy. Please"
+ " change the `synchronization` for variable: " +
+ kwargs["name"])
+ elif synchronization == variable_scope.VariableSynchronization.ON_READ:
+ # Variables that are to be synced on read are tower local.
+ is_tower_local = True
kwargs["trainable"] = False
+ elif (synchronization == variable_scope.VariableSynchronization.ON_WRITE or
+ synchronization == variable_scope.VariableSynchronization.AUTO):
+ # `AUTO` synchronization for `MirroredStrategy` is `ON_WRITE`.
+ is_tower_local = False
+ else:
+ raise ValueError("Invalid variable synchronization mode: " +
+ synchronization + " for variable: " + kwargs["name"])
+
+ # Get aggregation value
+ aggregation = kwargs.pop("aggregation",
+ variable_scope.VariableAggregation.NONE)
+ if aggregation not in [
+ variable_scope.VariableAggregation.NONE,
+ variable_scope.VariableAggregation.SUM,
+ variable_scope.VariableAggregation.MEAN
+ ]:
+ raise ValueError("Invalid variable aggregation mode: " + aggregation +
+ " for variable: " + kwargs["name"])
# Ignore user-specified caching device, not needed for mirrored variables.
kwargs.pop("caching_device", None)
@@ -139,11 +166,11 @@ class MirroredStrategy(distribute_lib.DistributionStrategy):
assert not isinstance(v, values.DistributedVariable)
index[d] = v
- if tower_local is None:
- result = values.MirroredVariable(index, index[devices[0]])
+ if is_tower_local:
+ result = values.TowerLocalVariable(index, index[devices[0]],
+ aggregation)
else:
- result = values.TowerLocalVariable(
- index, index[devices[0]], tower_local)
+ result = values.MirroredVariable(index, index[devices[0]], aggregation)
if not context.executing_eagerly():
g = ops.get_default_graph()
@@ -308,16 +335,36 @@ class MirroredStrategy(distribute_lib.DistributionStrategy):
cross_tower_ops_lib.ReductionToOneDeviceCrossTowerOps())
return self._cross_tower_ops
- def _reduce(self, method_string, value, destinations):
- if len(self._devices) == 1 and not isinstance(value, values.PerDevice):
- value = values.PerDevice({self._devices[0]: value})
- assert isinstance(value, values.PerDevice)
+ def _reduce(self, aggregation, value, destinations):
+ assert not isinstance(value, values.Mirrored)
+ if not isinstance(value, values.PerDevice):
+ if value == 0:
+ return 0
+ if aggregation == variable_scope.VariableAggregation.MEAN:
+ return self._broadcast(value, destinations)
+
+ cross_tower_ops_lib.validate_destinations(destinations)
+ if len(self._devices) == 1:
+ if destinations:
+ # TODO(anjalisridhar): Moves these methods to a device utility file?
+ devices = cross_tower_ops_lib.get_devices_from(destinations)
+ if len(devices) == 1:
+ with ops.device(devices[0]):
+ return array_ops.identity(value)
+ else:
+ value_updates = {}
+ for d in devices:
+ with ops.device(d):
+ value_updates[d] = array_ops.identity(value)
+ return values.Mirrored(value_updates)
+ raise ValueError("A non PerDevice value cannot be reduced with the given "
+ "aggregation.")
return self._get_cross_tower_ops().reduce(
- method_string, value, destinations=destinations)
+ aggregation, value, destinations=destinations)
- def _batch_reduce(self, method_string, value_destination_pairs):
- return self._get_cross_tower_ops().batch_reduce(method_string,
+ def _batch_reduce(self, aggregation, value_destination_pairs):
+ return self._get_cross_tower_ops().batch_reduce(aggregation,
value_destination_pairs)
def _update(self, var, fn, *args, **kwargs):
diff --git a/tensorflow/contrib/distribute/python/mirrored_strategy_multigpu_test.py b/tensorflow/contrib/distribute/python/mirrored_strategy_multigpu_test.py
index 647cf953d7..6a14b833d2 100644
--- a/tensorflow/contrib/distribute/python/mirrored_strategy_multigpu_test.py
+++ b/tensorflow/contrib/distribute/python/mirrored_strategy_multigpu_test.py
@@ -32,12 +32,14 @@ from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.layers import core
+from tensorflow.python.ops import math_ops
from tensorflow.python.ops import rnn
from tensorflow.python.ops import rnn_cell_impl
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.training import distribute as distribute_lib
+
GPU_TEST = "test_gpu" in sys.argv[0]
@@ -112,12 +114,35 @@ class MirroredTwoDeviceDistributionTest(strategy_test_lib.DistributionTestBase):
dist = self._get_distribution_strategy()
with dist.scope():
result = dist.call_for_each_tower(run_fn, dist.worker_device_index)
- reduced = dist.reduce("sum", result, destinations="/device:CPU:0")
+ reduced = dist.reduce(
+ variable_scope.VariableAggregation.SUM,
+ result,
+ destinations="/device:CPU:0")
unwrapped = dist.unwrap(reduced)
self.assertEqual(1, len(unwrapped))
expected = sum(range(len(dist.worker_devices)))
self.assertEqual(expected, self.evaluate(unwrapped[0]))
+ @test_util.run_in_graph_and_eager_modes()
+ def testReduceToMultipleDestinations(self):
+ if not GPU_TEST:
+ self.skipTest("Not GPU test")
+
+ devices = ["/device:GPU:0"]
+ if GPU_TEST:
+ self.assertGreater(context.num_gpus(), 0)
+ print(self.id().split(".")[-1], "devices:", ", ".join(devices))
+
+ dist = mirrored_strategy.MirroredStrategy(devices)
+ with dist.scope():
+ reduced = dist.reduce(
+ variable_scope.VariableAggregation.SUM,
+ 1.0,
+ destinations=["/device:CPU:0", "/device:GPU:0"])
+ unwrapped = dist.unwrap(reduced)
+ self.assertEqual(2, len(unwrapped))
+ self.assertEqual(1.0, self.evaluate(unwrapped[0]))
+
class MirroredStrategyVariableCreationTest(test.TestCase):
@@ -264,18 +289,68 @@ class MirroredStrategyVariableCreationTest(test.TestCase):
self.assertEquals("common/dense" + suffix + "/bias:0", bias.name)
@test_util.run_in_graph_and_eager_modes(config=config)
+ def testWithVariableAndVariableScope(self):
+ self._skip_eager_if_gpus_less_than(1)
+
+ def model_fn():
+ v0 = variable_scope.variable(1.0, name="var0", aggregation=None)
+ with variable_scope.variable_scope("common"):
+ v1 = variable_scope.variable(1.0, name="var1")
+ # This will pause the current thread, and execute the other thread.
+ distribute_lib.get_tower_context().merge_call(lambda _: _)
+ v2 = variable_scope.variable(
+ 1.0,
+ name="var2",
+ synchronization=variable_scope.VariableSynchronization.ON_READ,
+ aggregation=variable_scope.VariableAggregation.SUM)
+ v3 = variable_scope.variable(
+ 1.0,
+ name="var3",
+ synchronization=variable_scope.VariableSynchronization.ON_WRITE,
+ aggregation=variable_scope.VariableAggregation.MEAN)
+
+ return v0, v1, v2, v3
+
+ devices = ["/device:CPU:0", "/device:GPU:0"]
+ dist = mirrored_strategy.MirroredStrategy(devices)
+ with dist.scope():
+ v = variable_scope.variable(1.0, name="var-main0")
+ self.assertEquals("var-main0:0", v.name)
+
+ result = dist.call_for_each_tower(model_fn, run_concurrently=False)
+ self.assertEquals(4, len(result))
+ v0, v1, v2, v3 = result
+ self.assertIsInstance(v0, values.MirroredVariable)
+ self.assertEquals("var0:0", v0.name)
+ self.assertIsInstance(v1, values.MirroredVariable)
+ self.assertEquals("common/var1:0", v1.name)
+ self.assertIsInstance(v2, values.TowerLocalVariable)
+ self.assertEquals("common/var2:0", v2.name)
+ self.assertEquals(variable_scope.VariableAggregation.SUM, v2.aggregation)
+ self.assertIsInstance(v3, values.MirroredVariable)
+ self.assertEquals("common/var3:0", v3.name)
+ self.assertEquals(variable_scope.VariableAggregation.MEAN, v3.aggregation)
+
+ @test_util.run_in_graph_and_eager_modes(config=config)
def testWithGetVariableAndVariableScope(self):
self._skip_eager_if_gpus_less_than(1)
def model_fn():
- v0 = variable_scope.get_variable("var-thread0", [1])
+ v0 = variable_scope.get_variable("var0", [1])
with variable_scope.variable_scope("common"):
- v1 = variable_scope.get_variable("var-thread1", [1])
+ v1 = variable_scope.get_variable("var1", [1])
# This will pause the current thread, and execute the other thread.
distribute_lib.get_tower_context().merge_call(lambda _: _)
- v2 = variable_scope.get_variable("var-thread2", [1])
+ v2 = variable_scope.get_variable(
+ "var2", [1],
+ synchronization=variable_scope.VariableSynchronization.ON_READ,
+ aggregation=variable_scope.VariableAggregation.SUM)
+ v3 = variable_scope.get_variable(
+ "var3", [1],
+ synchronization=variable_scope.VariableSynchronization.ON_WRITE,
+ aggregation=variable_scope.VariableAggregation.MEAN)
- return v0, v1, v2
+ return v0, v1, v2, v3
devices = ["/device:CPU:0", "/device:GPU:0"]
dist = mirrored_strategy.MirroredStrategy(devices)
@@ -285,14 +360,89 @@ class MirroredStrategyVariableCreationTest(test.TestCase):
self.assertEquals("main/var-main0:0", v.name)
result = dist.call_for_each_tower(model_fn, run_concurrently=False)
- self.assertEquals(3, len(result))
- v0, v1, v2 = result
+ self.assertEquals(4, len(result))
+ v0, v1, v2, v3 = result
self.assertIsInstance(v0, values.MirroredVariable)
- self.assertEquals("main/var-thread0:0", v0.name)
+ self.assertEquals("main/var0:0", v0.name)
self.assertIsInstance(v1, values.MirroredVariable)
- self.assertEquals("main/common/var-thread1:0", v1.name)
- self.assertIsInstance(v2, values.MirroredVariable)
- self.assertEquals("main/common/var-thread2:0", v2.name)
+ self.assertEquals("main/common/var1:0", v1.name)
+ self.assertIsInstance(v2, values.TowerLocalVariable)
+ self.assertEquals("main/common/var2:0", v2.name)
+ self.assertEquals(variable_scope.VariableAggregation.SUM,
+ v2.aggregation)
+ self.assertIsInstance(v3, values.MirroredVariable)
+ self.assertEquals("main/common/var3:0", v3.name)
+ self.assertEquals(variable_scope.VariableAggregation.MEAN,
+ v3.aggregation)
+
+ @test_util.run_in_graph_and_eager_modes(config=config)
+ def testNoneSynchronizationWithGetVariable(self):
+ self._skip_eager_if_gpus_less_than(1)
+ devices = ["/device:CPU:0", "/device:GPU:0"]
+ dist = mirrored_strategy.MirroredStrategy(devices)
+ with dist.scope():
+ with self.assertRaisesRegexp(
+ ValueError, "`NONE` variable synchronization mode is not "
+ "supported with `Mirrored` distribution strategy. Please change "
+ "the `synchronization` for variable: v"):
+ variable_scope.get_variable(
+ "v", [1],
+ synchronization=variable_scope.VariableSynchronization.NONE)
+
+ @test_util.run_in_graph_and_eager_modes(config=config)
+ def testNoneSynchronizationWithVariable(self):
+ self._skip_eager_if_gpus_less_than(1)
+ devices = ["/device:CPU:0", "/device:GPU:0"]
+ dist = mirrored_strategy.MirroredStrategy(devices)
+ with dist.scope():
+ with self.assertRaisesRegexp(
+ ValueError, "`NONE` variable synchronization mode is not "
+ "supported with `Mirrored` distribution strategy. Please change "
+ "the `synchronization` for variable: v"):
+ variable_scope.variable(
+ 1.0,
+ name="v",
+ synchronization=variable_scope.VariableSynchronization.NONE)
+
+ @test_util.run_in_graph_and_eager_modes(config=config)
+ def testInvalidSynchronizationWithVariable(self):
+ self._skip_eager_if_gpus_less_than(1)
+ devices = ["/device:CPU:0", "/device:GPU:0"]
+ dist = mirrored_strategy.MirroredStrategy(devices)
+ with dist.scope():
+ with self.assertRaisesRegexp(
+ ValueError, "Invalid variable synchronization mode: Invalid for "
+ "variable: v"):
+ variable_scope.variable(1.0, name="v", synchronization="Invalid")
+
+ @test_util.run_in_graph_and_eager_modes(config=config)
+ def testInvalidAggregationWithGetVariable(self):
+ self._skip_eager_if_gpus_less_than(1)
+ devices = ["/device:CPU:0", "/device:GPU:0"]
+ dist = mirrored_strategy.MirroredStrategy(devices)
+ with dist.scope():
+ with self.assertRaisesRegexp(
+ ValueError, "Invalid variable aggregation mode: invalid for "
+ "variable: v"):
+ variable_scope.get_variable(
+ "v", [1],
+ synchronization=variable_scope.VariableSynchronization.ON_WRITE,
+ aggregation="invalid")
+
+ @test_util.run_in_graph_and_eager_modes(config=config)
+ def testInvalidAggregationWithVariable(self):
+ self._skip_eager_if_gpus_less_than(1)
+ devices = ["/device:CPU:0", "/device:GPU:0"]
+ dist = mirrored_strategy.MirroredStrategy(devices)
+ with dist.scope():
+ with self.assertRaisesRegexp(
+ ValueError, "Invalid variable aggregation mode: invalid for "
+ "variable: v"):
+ variable_scope.variable(
+ 1.0,
+ name="v",
+ synchronization=variable_scope.VariableSynchronization.ON_WRITE,
+ aggregation="invalid")
@test_util.run_in_graph_and_eager_modes(config=config)
def testThreeDevices(self):
@@ -341,11 +491,14 @@ class MirroredStrategyVariableCreationTest(test.TestCase):
components_mean = {}
def model_fn(device_id):
- tower_context = distribute_lib.get_tower_context()
- with tower_context.tower_local_var_scope("sum"):
- v_sum = variable_scope.variable(1.0)
- with tower_context.tower_local_var_scope("mean"):
- v_mean = variable_scope.variable(4.0)
+ v_sum = variable_scope.variable(
+ 1.0,
+ synchronization=variable_scope.VariableSynchronization.ON_READ,
+ aggregation=variable_scope.VariableAggregation.SUM)
+ v_mean = variable_scope.variable(
+ 4.0,
+ synchronization=variable_scope.VariableSynchronization.ON_READ,
+ aggregation=variable_scope.VariableAggregation.MEAN)
self.assertTrue(isinstance(v_sum, values.TowerLocalVariable))
self.assertTrue(isinstance(v_mean, values.TowerLocalVariable))
updates = [v_sum.assign_add(2.0 + device_id),
@@ -548,9 +701,10 @@ class MirroredStrategyVariableCreationTest(test.TestCase):
with context.graph_mode():
def model_fn():
- tower_context = distribute_lib.get_tower_context()
- with tower_context.tower_local_var_scope("sum"):
- v_sum = variable_scope.variable(1.0)
+ v_sum = variable_scope.variable(
+ 1.0,
+ synchronization=variable_scope.VariableSynchronization.ON_READ,
+ aggregation=variable_scope.VariableAggregation.SUM)
self.assertTrue(isinstance(v_sum, values.TowerLocalVariable))
return v_sum
@@ -581,5 +735,237 @@ class MirroredStrategyVariableCreationTest(test.TestCase):
self.assertEquals(10.0, self.evaluate(ret_v_sum))
+class MirroredVariableUpdateTest(test.TestCase):
+ # The following tests check assign, assign_add and assign_sub on Mirrored
+ # variables in tower and cross tower context.
+ config = config_pb2.ConfigProto()
+ config.allow_soft_placement = True
+
+ def _skip_eager_if_gpus_less_than(self, num_gpus):
+ if context.num_gpus() < num_gpus and context.executing_eagerly():
+ self.skipTest("Enough GPUs not available for this test in eager mode.")
+
+ @test_util.run_in_graph_and_eager_modes(config=config)
+ def testAssignMirroredVarTowerContextWithoutAggregationType(self):
+ # Test that we always have an aggregation type set on the mirrored variable
+ # if we assign to it in tower mode.
+ self._skip_eager_if_gpus_less_than(1)
+ def var_fn():
+ v = variable_scope.variable(1.0, name="foo")
+ return v
+
+ dist = mirrored_strategy.MirroredStrategy(
+ ["/device:GPU:0", "/device:CPU:0"])
+
+ with dist.scope():
+ mirrored_var = dist.call_for_each_tower(var_fn, run_concurrently=False)
+ self.assertIsInstance(mirrored_var, values.MirroredVariable)
+ self.evaluate(variables.global_variables_initializer())
+
+ def model_fn():
+ return mirrored_var.assign(5.0)
+
+ with self.assertRaisesRegexp(
+ ValueError, "You must specify an aggregation method to update a "
+ "MirroredVariable in Tower Context."):
+ self.evaluate(dist.unwrap(dist.call_for_each_tower(model_fn)))
+
+ @test_util.run_in_graph_and_eager_modes(config=config)
+ def testAssignMirroredVarTowerContextWithSum(self):
+ # Test that we don't reduce a non-per-device value with the "sum"
+ # aggregation type.
+ self._skip_eager_if_gpus_less_than(1)
+ def var_fn():
+ v = variable_scope.variable(
+ 1.0, name="foo", aggregation=variable_scope.VariableAggregation.SUM)
+ return v
+
+ dist = mirrored_strategy.MirroredStrategy(
+ ["/device:GPU:0", "/device:CPU:0"])
+
+ with dist.scope():
+ mirrored_var = dist.call_for_each_tower(var_fn, run_concurrently=False)
+ self.assertIsInstance(mirrored_var, values.MirroredVariable)
+ self.evaluate(variables.global_variables_initializer())
+
+ def model_fn():
+ return mirrored_var.assign(5.0)
+
+ with self.assertRaisesRegexp(
+ ValueError, "A non PerDevice value cannot be reduced with the given "
+ "aggregation."):
+ self.evaluate(dist.unwrap(dist.call_for_each_tower(model_fn)))
+
+ @test_util.run_in_graph_and_eager_modes(config=config)
+ def testAssignMirroredVarCrossTowerContext(self):
+ self._skip_eager_if_gpus_less_than(1)
+ def var_fn():
+ return variable_scope.variable(1.0, name="foo")
+
+ dist = mirrored_strategy.MirroredStrategy(
+ ["/device:GPU:0", "/device:CPU:0"])
+
+ with dist.scope():
+ mirrored_var = dist.call_for_each_tower(var_fn, run_concurrently=False)
+ self.assertIsInstance(mirrored_var, values.MirroredVariable)
+ self.evaluate(variables.global_variables_initializer())
+ self.assertEquals(1.0, self.evaluate(mirrored_var))
+ mirrored_var_result = self.evaluate(mirrored_var.assign(6.0))
+ self.assertEquals(6.0, mirrored_var_result)
+
+ @test_util.run_in_graph_and_eager_modes(config=config)
+ def testAssignMirroredVarTowerContext(self):
+ self._skip_eager_if_gpus_less_than(1)
+ def var_fn():
+ return variable_scope.variable(
+ 1.0, name="foo", aggregation=variable_scope.VariableAggregation.MEAN)
+
+ dist = mirrored_strategy.MirroredStrategy(
+ ["/device:GPU:0", "/device:CPU:0"])
+
+ with dist.scope():
+ mirrored_var = dist.call_for_each_tower(var_fn, run_concurrently=False)
+ self.assertIsInstance(mirrored_var, values.MirroredVariable)
+ self.evaluate(variables.global_variables_initializer())
+ self.assertEquals(1.0, self.evaluate(mirrored_var))
+
+ def model_fn():
+ value = math_ops.cast(distribute_lib.get_tower_context().tower_id,
+ mirrored_var.dtype)
+ return mirrored_var.assign(value)
+
+ self.evaluate(dist.unwrap(dist.call_for_each_tower(
+ model_fn, run_concurrently=False)))
+ self.assertEquals(0.5, self.evaluate(mirrored_var))
+
+ @test_util.run_in_graph_and_eager_modes(config=config)
+ def testAssignAddMirroredVarCrossTowerContext(self):
+ self._skip_eager_if_gpus_less_than(1)
+ def var_fn():
+ return variable_scope.variable(1.0, name="foo")
+
+ dist = mirrored_strategy.MirroredStrategy(
+ ["/device:GPU:0", "/device:CPU:0"])
+
+ with dist.scope():
+ mirrored_var = dist.call_for_each_tower(var_fn, run_concurrently=False)
+ self.assertIsInstance(mirrored_var, values.MirroredVariable)
+ self.evaluate(variables.global_variables_initializer())
+ self.assertEquals(1.0, self.evaluate(mirrored_var))
+ mirrored_var_result = self.evaluate(mirrored_var.assign_add(6.0))
+ self.assertEquals(7.0, mirrored_var_result)
+
+ @test_util.run_in_graph_and_eager_modes(config=config)
+ def testAssignAddMirroredVarTowerContext(self):
+ self._skip_eager_if_gpus_less_than(1)
+ def var_fn():
+ return variable_scope.variable(
+ 1.0, name="foo", aggregation=variable_scope.VariableAggregation.MEAN)
+
+ dist = mirrored_strategy.MirroredStrategy(
+ ["/device:GPU:0", "/device:CPU:0"])
+
+ with dist.scope():
+ mirrored_var = dist.call_for_each_tower(var_fn, run_concurrently=False)
+ self.assertIsInstance(mirrored_var, values.MirroredVariable)
+ self.evaluate(variables.global_variables_initializer())
+ self.assertEquals(1.0, self.evaluate(mirrored_var))
+
+ def model_fn():
+ value = math_ops.cast(distribute_lib.get_tower_context().tower_id,
+ mirrored_var.dtype)
+ return mirrored_var.assign_add(value)
+
+ self.evaluate(dist.unwrap(dist.call_for_each_tower(
+ model_fn, run_concurrently=False)))
+ self.assertEquals(1.5, self.evaluate(mirrored_var))
+
+ @test_util.run_in_graph_and_eager_modes(config=config)
+ def testAssignSubMirroredVarCrossTowerContext(self):
+ self._skip_eager_if_gpus_less_than(1)
+ def var_fn():
+ return variable_scope.variable(5.0, name="foo")
+
+ dist = mirrored_strategy.MirroredStrategy(
+ ["/device:GPU:0", "/device:CPU:0"])
+
+ with dist.scope():
+ mirrored_var = dist.call_for_each_tower(var_fn, run_concurrently=False)
+ self.assertIsInstance(mirrored_var, values.MirroredVariable)
+ self.evaluate(variables.global_variables_initializer())
+ self.assertEquals(5.0, self.evaluate(mirrored_var))
+ mirrored_var_result = self.evaluate(mirrored_var.assign_sub(2.0))
+ self.assertEquals(3.0, mirrored_var_result)
+
+ @test_util.run_in_graph_and_eager_modes(config=config)
+ def testAssignSubMirroredVarTowerContext(self):
+ self._skip_eager_if_gpus_less_than(1)
+ def var_fn():
+ return variable_scope.variable(
+ 5.0, name="foo", aggregation=variable_scope.VariableAggregation.MEAN)
+
+ dist = mirrored_strategy.MirroredStrategy(
+ ["/device:GPU:0", "/device:CPU:0"])
+
+ with dist.scope():
+ mirrored_var = dist.call_for_each_tower(var_fn, run_concurrently=False)
+ self.assertIsInstance(mirrored_var, values.MirroredVariable)
+ self.evaluate(variables.global_variables_initializer())
+ self.assertEquals(5.0, self.evaluate(mirrored_var))
+
+ def model_fn():
+ value = math_ops.cast(distribute_lib.get_tower_context().tower_id,
+ mirrored_var.dtype)
+ return mirrored_var.assign_sub(value)
+
+ self.evaluate(dist.unwrap(dist.call_for_each_tower(
+ model_fn, run_concurrently=False)))
+ self.assertEquals(4.5, self.evaluate(mirrored_var))
+
+
+class MirroredAndTowerLocalVariableInitializerTest(test.TestCase):
+ config = config_pb2.ConfigProto()
+ config.allow_soft_placement = True
+
+ def testAssignMirroredVarInitializer(self):
+ # This test is not eager compatible since in eager variables are initialized
+ # upon construction instead of once the initialization op is run.
+ with context.graph_mode():
+ def var_fn():
+ v = variable_scope.variable(1.0, name="foo")
+ return v
+
+ dist = mirrored_strategy.MirroredStrategy(
+ ["/device:GPU:0", "/device:CPU:0"])
+
+ with dist.scope():
+ mirrored_var = dist.call_for_each_tower(var_fn)
+ self.assertIsInstance(mirrored_var, values.MirroredVariable)
+ self.assertFalse(self.evaluate(mirrored_var.is_initialized()))
+ self.evaluate(mirrored_var.initializer)
+ self.assertTrue(self.evaluate(mirrored_var.is_initialized()))
+
+ def testAssignTowerLocalVarInitializer(self):
+ # This test is not eager compatible since in eager variables are initialized
+ # upon construction instead of once the initialization op is run.
+ with context.graph_mode():
+ def model_fn():
+ v_sum = variable_scope.variable(
+ 1.0,
+ synchronization=variable_scope.VariableSynchronization.ON_READ,
+ aggregation=variable_scope.VariableAggregation.SUM)
+ self.assertTrue(isinstance(v_sum, values.TowerLocalVariable))
+ return v_sum
+
+ dist = mirrored_strategy.MirroredStrategy(
+ ["/device:GPU:0", "/device:CPU:0"])
+
+ with dist.scope():
+ tower_local_var = dist.call_for_each_tower(model_fn)
+ self.assertTrue(isinstance(tower_local_var, values.TowerLocalVariable))
+ self.assertFalse(self.evaluate(tower_local_var.is_initialized()))
+ self.evaluate(tower_local_var.initializer)
+ self.assertTrue(self.evaluate(tower_local_var.is_initialized()))
+
if __name__ == "__main__":
test.main()
diff --git a/tensorflow/contrib/distribute/python/multi_worker_strategy.py b/tensorflow/contrib/distribute/python/multi_worker_strategy.py
index 0f21a42732..cbfe5df61d 100644
--- a/tensorflow/contrib/distribute/python/multi_worker_strategy.py
+++ b/tensorflow/contrib/distribute/python/multi_worker_strategy.py
@@ -46,7 +46,7 @@ class MultiWorkerMirroredStrategy(MirroredStrategy):
* **In-graph replication**: the `client` creates a single `tf.Graph` that
specifies tasks for devices on all workers. The `client` then creates a
client session which will talk to the `master` service of a `worker`. Then
- the `master` will parition the graph and distribute the work to all
+ the `master` will partition the graph and distribute the work to all
participating workers.
* **Worker**: A `worker` is a TensorFlow `task` that usually maps to one
physical machine. We will have multiple `worker`s with different `task`
diff --git a/tensorflow/contrib/distribute/python/one_device_strategy.py b/tensorflow/contrib/distribute/python/one_device_strategy.py
index a580dac96c..dbd3514aec 100644
--- a/tensorflow/contrib/distribute/python/one_device_strategy.py
+++ b/tensorflow/contrib/distribute/python/one_device_strategy.py
@@ -24,6 +24,7 @@ from tensorflow.contrib.distribute.python import values
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
+from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.training import distribute as distribute_lib
@@ -43,11 +44,6 @@ class OneDeviceStrategy(distribute_lib.DistributionStrategy):
self._default_device = device
def _create_variable(self, next_creator, *args, **kwargs):
- # No need to distinguish tower-local variables when not mirroring,
- # we just enforce that they are not trainable.
- if kwargs.pop("tower_local_reduce_method", None) is not None:
- kwargs["trainable"] = False
-
colocate_with = kwargs.pop("colocate_with", None)
if colocate_with is None:
with ops.device(self._device):
@@ -80,15 +76,15 @@ class OneDeviceStrategy(distribute_lib.DistributionStrategy):
with ops.device(self._device):
return values.MapOutput([fn(m, *args, **kwargs) for m in map_over])
- def _reduce(self, method_string, value, destinations):
+ def _reduce(self, aggregation, value, destinations):
if not isinstance(value, values.MapOutput):
return value
l = value.get()
assert l
with ops.device(self._device):
- if method_string == "sum":
+ if aggregation == vs.VariableAggregation.SUM:
return math_ops.add_n(l)
- elif method_string == "mean":
+ elif aggregation == vs.VariableAggregation.MEAN:
return math_ops.add_n(l) / len(l)
else:
assert False
diff --git a/tensorflow/contrib/distribute/python/prefetching_ops_v2.py b/tensorflow/contrib/distribute/python/prefetching_ops_v2.py
index 7b3670b45a..24cdc627a3 100644
--- a/tensorflow/contrib/distribute/python/prefetching_ops_v2.py
+++ b/tensorflow/contrib/distribute/python/prefetching_ops_v2.py
@@ -89,6 +89,9 @@ class _PrefetchToDeviceIterator(object):
with ops.device(device):
buffer_resource_handle = prefetching_ops.function_buffering_resource(
f=_prefetch_fn,
+ output_types=data_nest.flatten(
+ sparse.as_dense_types(self._input_dataset.output_types,
+ self._input_dataset.output_classes)),
target_device=target_device,
string_arg=input_iterator_handle,
buffer_size=buffer_size,
diff --git a/tensorflow/contrib/distribute/python/strategy_test_lib.py b/tensorflow/contrib/distribute/python/strategy_test_lib.py
index d2fe8b3b1e..baed0ebaae 100644
--- a/tensorflow/contrib/distribute/python/strategy_test_lib.py
+++ b/tensorflow/contrib/distribute/python/strategy_test_lib.py
@@ -26,6 +26,7 @@ from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.layers import core
from tensorflow.python.ops import array_ops
+from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.training import distribute as distribute_lib
from tensorflow.python.training import optimizer
@@ -110,7 +111,8 @@ class DistributionTestBase(test.TestCase):
before_list.append(fetched)
# control_dependencies irrelevant but harmless in eager execution
with ops.control_dependencies([fetched]):
- g = d.reduce("sum", g, destinations=v)
+ g = d.reduce(
+ variable_scope.VariableAggregation.SUM, g, destinations=v)
with ops.control_dependencies(d.unwrap(d.update(v, update, g))):
after_list.append(d.read_var(v))
return before_list, after_list
@@ -162,7 +164,8 @@ class DistributionTestBase(test.TestCase):
fetched = d.read_var(v)
before_list.append(fetched)
with ops.control_dependencies([fetched]):
- g = d.reduce("sum", g, destinations=v)
+ g = d.reduce(
+ variable_scope.VariableAggregation.SUM, g, destinations=v)
with ops.control_dependencies(d.unwrap(d.update(v, update, g))):
after_list.append(d.read_var(v))
return before_list, after_list
@@ -184,7 +187,7 @@ class DistributionTestBase(test.TestCase):
with d.scope():
map_in = [constant_op.constant(i) for i in range(10)]
map_out = d.map(map_in, lambda x, y: x * y, 2)
- observed = d.reduce("sum", map_out)
+ observed = d.reduce(variable_scope.VariableAggregation.SUM, map_out)
expected = 90 # 2 * (0 + 1 + ... + 9)
self.assertEqual(expected, observed.numpy())
diff --git a/tensorflow/contrib/distribute/python/tpu_strategy.py b/tensorflow/contrib/distribute/python/tpu_strategy.py
index b177e09adb..bc53898539 100644
--- a/tensorflow/contrib/distribute/python/tpu_strategy.py
+++ b/tensorflow/contrib/distribute/python/tpu_strategy.py
@@ -23,10 +23,13 @@ from __future__ import print_function
from tensorflow.contrib import tpu
from tensorflow.contrib.distribute.python import one_device_strategy
+from tensorflow.contrib.distribute.python import values
from tensorflow.contrib.tpu.python.ops import tpu_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
+from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
+from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.util import nest
@@ -47,7 +50,10 @@ class TPUStrategy(one_device_strategy.OneDeviceStrategy):
return self._call_dataset_fn(dataset_fn)
# TODO(priyag): Deal with OutOfRange errors.
- def run_steps_on_dataset(self, fn, iterator, iterations):
+ # TODO(sourabhbajaj): Remove the initial_loop_values parameter when we have
+ # a mechanism to infer the outputs of `fn`. Pending b/110550782.
+ def _run_steps_on_dataset(self, fn, iterator, iterations,
+ initial_loop_values=None):
# Enqueue ops
shapes = nest.flatten(iterator.output_shapes)
if any([not s.is_fully_defined() for s in shapes]):
@@ -93,26 +99,48 @@ class TPUStrategy(one_device_strategy.OneDeviceStrategy):
return nest.pack_sequence_as(iterator.output_shapes, dequeued)
# Wrap `fn` for repeat.
- run_fn = lambda: fn(dequeue_fn())
+ if initial_loop_values is None:
+ initial_loop_values = []
+ ctx = values.MultiStepContext(initial_loop_values)
+ def run_fn(*args, **kwargs):
+ del args, kwargs
+ fn_result = fn(ctx, dequeue_fn())
+ if ctx.last_step_outputs is None:
+ ctx.last_step_outputs = []
+ with ops.control_dependencies([fn_result]):
+ return array_ops.identity(ctx.last_step_outputs)
# Repeat
+ # TODO(sourabhbajaj): The input to while loop should be based on the output
+ # type of the step_fn
def iterate_on_tpu():
- return tpu.repeat(iterations, run_fn, [])
+ return tpu.repeat(iterations, run_fn, [initial_loop_values])
# Re-write and distribute computation.
- tpu_result = tpu.batch_parallel(
+ # TODO(sourabhbajaj): Convert the output to PerDevice variable and
+ # implement support for that in reduce.
+ last_step_tensor_outputs = tpu.batch_parallel(
iterate_on_tpu, [], num_shards=self._num_cores_per_host)
- return control_flow_ops.group(tpu_result, enqueue_ops)
+ # Take index [0] of last_step_tensor_outputs as we wrapped
+ # initial_loop_values in a list in the `repeat` call.
+ return (control_flow_ops.group(last_step_tensor_outputs, enqueue_ops),
+ last_step_tensor_outputs[0], ctx)
def _call_for_each_tower(self, fn, *args, **kwargs):
kwargs.pop('run_concurrently', None)
with one_device_strategy._OneDeviceTowerContext(self): # pylint: disable=protected-access
return fn(*args, **kwargs)
- def _reduce(self, method_string, value, destinations):
+ def get_initialization_ops(self):
+ return [tpu.initialize_system()]
+
+ def get_finalize_ops(self):
+ return [tpu.shutdown_system()]
+
+ def _reduce(self, aggregation, value, destinations):
del destinations # TPU is graph mode only. Rely on implicit Send/Recv.
- if method_string == 'mean':
+ if aggregation == vs.VariableAggregation.MEAN:
# TODO(jhseu): Revisit once we support model-parallelism.
value *= (1. / self._num_cores_per_host)
return tpu_ops.cross_replica_sum(value)
diff --git a/tensorflow/contrib/distribute/python/values.py b/tensorflow/contrib/distribute/python/values.py
index 9a48928a95..1b5e00bc79 100644
--- a/tensorflow/contrib/distribute/python/values.py
+++ b/tensorflow/contrib/distribute/python/values.py
@@ -23,7 +23,6 @@ from __future__ import print_function
import collections
import weakref
-
import six
from tensorflow.contrib.distribute.python import input_ops
@@ -34,6 +33,8 @@ from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
+from tensorflow.python.ops import state_ops
+from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.training import device_util
from tensorflow.python.training import distribute as distribute_lib
from tensorflow.python.training import saver
@@ -251,21 +252,6 @@ class DistributedVariable(DistributedDelegate):
ops.register_dense_tensor_like_type(DistributedVariable)
-class _MirroredSaveable(saver.BaseSaverBuilder.ResourceVariableSaveable):
- """Class for defining how to restore a MirroredVariable."""
-
- def __init__(self, mirrored_variable, primary_variable, name):
- self._mirrored_variable = mirrored_variable
- super(_MirroredSaveable, self).__init__(primary_variable, "", name)
-
- def restore(self, restored_tensors, restored_shapes):
- """Restore the same value into all variables."""
- tensor, = restored_tensors
- return control_flow_ops.group([
- _assign_on_device(d, v, tensor)
- for d, v in six.iteritems(self._mirrored_variable._index)]) # pylint: disable=protected-access
-
-
def _get_update_device():
"""Validate we are in update/update_non_slot() and return current device.
@@ -286,30 +272,113 @@ def _get_update_device():
return device
+class _MirroredSaveable(saver.BaseSaverBuilder.ResourceVariableSaveable):
+ """Class for defining how to restore a MirroredVariable."""
+
+ def __init__(self, mirrored_variable, primary_variable, name):
+ self._mirrored_variable = mirrored_variable
+ super(_MirroredSaveable, self).__init__(primary_variable, "", name)
+
+ def restore(self, restored_tensors, restored_shapes):
+ """Restore the same value into all variables."""
+ tensor, = restored_tensors
+ return control_flow_ops.group([
+ _assign_on_device(d, v, tensor)
+ for d, v in six.iteritems(self._mirrored_variable._index)]) # pylint: disable=protected-access
+
+
class MirroredVariable(DistributedVariable, Mirrored,
checkpointable.CheckpointableBase):
"""Holds a map from device to variables whose values are kept in sync."""
- def __init__(self, index, primary_var):
+ def __init__(self, index, primary_var, aggregation):
+ # Use a weakref to make it easy to map from the contained values
+ # to the container without introducing a reference cycle.
+ for v in six.itervalues(index):
+ v._mirrored_container = weakref.ref(self) # pylint: disable=protected-access
self._primary_var = primary_var
+ # tf.keras keeps track of variables initialized using this attribute. When
+ # tf.keras gets the default session, it initializes all uninitialized vars.
+ # We need to make _keras_initialized a member of MirroredVariable because
+ # without this it will use `__getattr__` which will delegate to a component
+ # variable.
+ self._keras_initialized = False
+ self._aggregation = aggregation
super(MirroredVariable, self).__init__(index)
- # We use _get_update_device() for the assign* methods to enforce
- # that we are in an update() function. The arguments to update() are
- # automatically unwrapped so the update() function would normally
- # see regular variables, not MirroredVariables. However, the update
- # function can still operate on wrapped MirroredVariables through
- # object members, captured arguments, etc. This is more likely in an
+ # The arguments to update() are automatically unwrapped so the update()
+ # function would normally see regular variables, not MirroredVariables.
+ # However, the update function can still operate on wrapped MirroredVariables
+ # through object members, captured arguments, etc. This is more likely in an
# update_non_slot() function (like OptimizerV2._finish), which can
# update several non-slot variables in one call.
+ def _assign_func(self, *args, **kwargs):
+ f = kwargs.pop("f")
+ if distribute_lib.get_cross_tower_context():
+ update_device = distribute_lib.get_update_device()
+ # We are calling update on the mirrored variable in cross tower context.
+ if update_device is not None:
+ # We are calling an assign function on the mirrored variable in cross
+ # tower context.
+ v = self.get(device=update_device)
+ return f(v, *args, **kwargs)
+
+ return distribute_lib.get_distribution_strategy().update(
+ self, f, *args, **kwargs)
+ else:
+ # We are calling an assign function on the mirrored variable in tower
+ # context.
+ # We reduce the value we want to assign/add/sub. More details about how we
+ # handle the different use cases can be found in the _reduce method.
+ # We call the function on each of the mirrored variables with the reduced
+ # value.
+ if self._aggregation == vs.VariableAggregation.NONE:
+ raise ValueError("You must specify an aggregation method to update a "
+ "MirroredVariable in Tower Context.")
+
+ def merge_fn(strategy, value):
+ return strategy.update(
+ self, f,
+ strategy.reduce(
+ aggregation=self._aggregation, value=value, destinations=self))
+
+ return distribute_lib.get_tower_context().merge_call(merge_fn, *args,
+ **kwargs)
+
def assign_sub(self, *args, **kwargs):
- return self.get(device=_get_update_device()).assign_sub(*args, **kwargs)
+ return self._assign_func(f=state_ops.assign_sub, *args, **kwargs)
def assign_add(self, *args, **kwargs):
- return self.get(device=_get_update_device()).assign_add(*args, **kwargs)
+ return self._assign_func(f=state_ops.assign_add, *args, **kwargs)
def assign(self, *args, **kwargs):
- return self.get(device=_get_update_device()).assign(*args, **kwargs)
+ return self._assign_func(f=state_ops.assign, *args, **kwargs)
+
+ def is_initialized(self, name=None):
+ # We have to cast the self._index.values() to a `list` because when we
+ # use `model_to_estimator` to run tf.keras models, self._index.values() is
+ # of type `dict_values` and not `list`.
+ values_list = list(self._index.values())
+ result = values_list[0].is_initialized()
+ # We iterate through the list of values except the last one to allow us to
+ # name the final `logical_and` op the same name that is passed by the user
+ # to the `is_initialized` op. For mirrored variables, the `is_initialized`
+ # op is a `logical_and` op.
+ for v in values_list[1:-1]:
+ result = math_ops.logical_and(result, v.is_initialized())
+ result = math_ops.logical_and(result, values_list[-1].is_initialized(),
+ name=name)
+ return result
+
+ @property
+ def initializer(self):
+ # return grouped ops of all the var initializations of component values of
+ # the mirrored variable
+ return control_flow_ops.group([v.initializer for v in self._index.values()])
+
+ @property
+ def aggregation(self):
+ return self._aggregation
def _get_cross_tower(self):
device = device_util.canonicalize(device_util.current())
@@ -374,7 +443,7 @@ class _TowerLocalSaveable(saver.BaseSaverBuilder.SaveableObject):
# To preserve the sum across save and restore, we have to divide the
# total across all devices when restoring a variable that was summed
# when saving.
- if self._tower_local_variable.reduce_method == "sum":
+ if self._tower_local_variable.aggregation == vs.VariableAggregation.SUM:
tensor *= 1. / len(self._tower_local_variable.devices)
return control_flow_ops.group([
_assign_on_device(d, v, tensor)
@@ -391,9 +460,15 @@ class TowerLocalVariable(DistributedVariable, PerDevice,
checkpointable.CheckpointableBase):
"""Holds a map from device to variables whose values are reduced on save."""
- def __init__(self, index, primary_var, reduce_method):
+ def __init__(self, index, primary_var, aggregation):
self._primary_var = primary_var
- self._reduce_method = reduce_method
+ self._aggregation = aggregation
+ # tf.keras keeps track of variables initialized using this attribute. When
+ # tf.keras gets the default session, it initializes all uninitialized vars.
+ # We need to make _keras_initialized a member of TowerLocalVariable because
+ # without this it will use `__getattr__` which will delegate to a component
+ # variable.
+ self._keras_initialized = False
super(TowerLocalVariable, self).__init__(index)
def assign_sub(self, *args, **kwargs):
@@ -408,15 +483,37 @@ class TowerLocalVariable(DistributedVariable, PerDevice,
_assert_tower_context()
return self.get().assign(*args, **kwargs)
+ def is_initialized(self, name=None):
+ # We have to cast the self._index.values() to a `list` because when we
+ # use `model_to_estimator` to run tf.keras models, self._index.values() is
+ # of type `dict_values` and not `list`.
+ values_list = list(self._index.values())
+ result = values_list[0].is_initialized()
+ # We iterate through the list of values except the last one to allow us to
+ # name the final `logical_and` op the same name that is passed by the user
+ # to the `is_initialized` op. For tower local variables, the
+ # `is_initialized` op is a `logical_and` op.
+ for v in values_list[1:-1]:
+ result = math_ops.logical_and(result, v.is_initialized())
+ result = math_ops.logical_and(result, values_list[-1].is_initialized(),
+ name=name)
+ return result
+
@property
- def reduce_method(self):
- return self._reduce_method
+ def initializer(self):
+ # return grouped ops of all the var initializations of component values of
+ # the tower local variable
+ return control_flow_ops.group([v.initializer for v in self._index.values()])
+
+ @property
+ def aggregation(self):
+ return self._aggregation
def _get_cross_tower(self):
all_components = tuple(self._index.values())
# TODO(josh11b): Use a strategy-specific method.
total = math_ops.add_n(all_components)
- if self._reduce_method == "mean":
+ if self._aggregation == vs.VariableAggregation.MEAN:
return total * (1./ len(all_components))
return total
@@ -824,3 +921,71 @@ class MapOutput(object):
def get(self):
return self._l
+
+
+class MultiStepContext(object):
+ """A context object that can be used to capture things when running steps.
+
+ This context object is useful when running multiple steps at a time using the
+ `run_steps_on_dataset` API. For e.g. it allows the user's step function to
+ specify which outputs to emit at what frequency. Currently it only supports
+ capturing output from the last step, but will soon be augmented to support
+ other use cases such as output each N steps.
+ """
+
+ def __init__(self, initial_loop_values=None):
+ """Initializes an output context.
+
+ Args:
+ initial_loop_values: Initial values passed to the run steps
+ while loop. The only purpose is to verify the shapes and types
+ when the actual output is set. This will be removed once we
+ automatically infer the output shapes and types (and do not need to
+ check for user error in specifying them manually).
+ Returns:
+ A context object.
+ """
+ self._last_step_outputs = None
+ self._non_tensor_outputs = None
+ self._initial_loop_values = initial_loop_values
+
+ @property
+ def last_step_outputs(self):
+ """Return the last step's outputs."""
+ return self._last_step_outputs
+
+ @last_step_outputs.setter
+ def last_step_outputs(self, outputs):
+ """Set the last step's outputs."""
+ self._verify_structure_shapes_types(outputs, self._initial_loop_values)
+ self._last_step_outputs = outputs
+
+ @property
+ def non_tensor_outputs(self):
+ """Return the non tensor outputs."""
+ return self._non_tensor_outputs
+
+ @non_tensor_outputs.setter
+ def non_tensor_outputs(self, outputs):
+ """Set any non tensor outputs."""
+ self._non_tensor_outputs = outputs
+
+ def _verify_structure_shapes_types(self, left, right):
+ """Verify that the structure, shapes and types of left are same as right."""
+ nest.assert_same_structure(left, right)
+ flat_left = nest.flatten(left)
+ flat_right = nest.flatten(right)
+ assert len(flat_left) == len(flat_right), (
+ "Length of left {} and right {} should be same.".
+ format(len(flat_left), len(flat_right)))
+
+ for o, i in zip(flat_left, flat_right):
+ # TODO(priyag): Add checks for other types like IndexedSlices.
+ if isinstance(o, ops.Tensor):
+ assert isinstance(i, ops.Tensor)
+ assert o.shape == i.shape, (
+ "Shape {} of left {} doesn't match shape {} of right {}.".
+ format(o.shape, o, i.shape, i))
+ assert o.dtype == i.dtype, (
+ "Dtype {} of left {} doesn't match dtype {} of right {}.".
+ format(o.dtype, o, i.dtype, i))
diff --git a/tensorflow/contrib/distribute/python/values_test.py b/tensorflow/contrib/distribute/python/values_test.py
index c5b246e804..8e44f2fea1 100644
--- a/tensorflow/contrib/distribute/python/values_test.py
+++ b/tensorflow/contrib/distribute/python/values_test.py
@@ -158,7 +158,8 @@ def _make_mirrored():
v.append(variable_scope.get_variable(
name=n, initializer=init, use_resource=True))
index[d] = v[-1]
- mirrored = values.MirroredVariable(index, v[0])
+ mirrored = values.MirroredVariable(index, v[0],
+ variable_scope.VariableAggregation.SUM)
return v, devices, mirrored
@@ -277,7 +278,8 @@ class RegroupAndSelectDeviceTest(test.TestCase):
v = variable_scope.get_variable(
name="v", initializer=1., use_resource=True)
index = {d: v}
- mirrored = values.MirroredVariable(index, v)
+ mirrored = values.MirroredVariable(index, v,
+ variable_scope.VariableAggregation.SUM)
result = values.regroup(index)
self.assertIs(mirrored, result)
@@ -581,7 +583,8 @@ class MirroredVariableTest(test.TestCase):
v = variable_scope.get_variable(
name="v", initializer=[1.], use_resource=True)
index = {"/job:foo/device:CPU:0": v}
- mirrored = values.MirroredVariable(index, v)
+ mirrored = values.MirroredVariable(index, v,
+ variable_scope.VariableAggregation.MEAN)
self.assertEquals(v.name, mirrored.name)
self.assertEquals(v.dtype, mirrored.dtype)
@@ -716,7 +719,9 @@ class MirroredVariableTest(test.TestCase):
with ops.device("/device:GPU:0"):
v = variable_scope.get_variable(
name="v", initializer=1., use_resource=True)
- mirrored = values.MirroredVariable({"/device:GPU:0": v}, v)
+ mirrored = values.MirroredVariable({
+ "/device:GPU:0": v
+ }, v, variable_scope.VariableAggregation.MEAN)
sess.run(variables_lib.global_variables_initializer())
sess.run({"complicated": mirrored})
@@ -746,24 +751,27 @@ class TowerLocalVariableTest(test.TestCase):
if context.num_gpus() < 1 and context.executing_eagerly():
self.skipTest("A GPU is not available for this test in eager mode.")
- v, tower_local = _make_tower_local("sum")
+ v, tower_local = _make_tower_local(variable_scope.VariableAggregation.SUM)
self.assertEquals(v[0].name, tower_local.name)
self.assertEquals(v[0].dtype, tower_local.dtype)
self.assertEquals(v[0].shape, tower_local.shape)
- self.assertEquals("sum", tower_local.reduce_method)
+ self.assertEquals(variable_scope.VariableAggregation.SUM,
+ tower_local.aggregation)
@test_util.run_in_graph_and_eager_modes(config=config)
def testVariableOnAnotherDevice(self):
v = variable_scope.get_variable(
name="v", initializer=[1.], use_resource=True)
index = {"/job:foo/device:CPU:0": v}
- tower_local = values.TowerLocalVariable(index, v, "mean")
+ tower_local = values.TowerLocalVariable(
+ index, v, variable_scope.VariableAggregation.MEAN)
self.assertEquals(v.name, tower_local.name)
self.assertEquals(v.dtype, tower_local.dtype)
self.assertEquals(v.shape, tower_local.shape)
- self.assertEquals("mean", tower_local.reduce_method)
+ self.assertEquals(variable_scope.VariableAggregation.MEAN,
+ tower_local.aggregation)
def _assign_tower_local(self, devices, v, new):
for d, var, n in zip(devices, v, new):
@@ -789,7 +797,7 @@ class TowerLocalVariableTest(test.TestCase):
self.skipTest("A GPU is not available for this test in eager mode.")
with self.test_session() as sess:
- v, tower_local = _make_tower_local("sum")
+ v, tower_local = _make_tower_local(variable_scope.VariableAggregation.SUM)
# Overwrite the initial values.
self._assign_tower_local(_devices, v, [3., 4.])
@@ -812,7 +820,8 @@ class TowerLocalVariableTest(test.TestCase):
self.skipTest("A GPU is not available for this test in eager mode.")
with self.test_session() as sess:
- v, tower_local = _make_tower_local("mean")
+ v, tower_local = _make_tower_local(
+ variable_scope.VariableAggregation.MEAN)
# Overwrite the initial values.
self._assign_tower_local(_devices, v, [3., 4.])
@@ -831,7 +840,8 @@ class TowerLocalVariableTest(test.TestCase):
def _save_tower_local_mean(self):
"""Save variables with mirroring, returns save_path."""
with self.test_session(graph=ops.Graph()) as sess:
- v, tower_local = _make_tower_local("mean")
+ v, tower_local = _make_tower_local(
+ variable_scope.VariableAggregation.MEAN)
# Overwrite the initial values.
self._assign_tower_local(_devices, v, [3., 4.])
@@ -893,7 +903,8 @@ class TowerLocalVariableTest(test.TestCase):
def _restore_tower_local_mean(self, save_path):
"""Restore to variables with mirroring in a fresh graph."""
with self.test_session(graph=ops.Graph()) as sess:
- v, tower_local = _make_tower_local("mean")
+ v, tower_local = _make_tower_local(
+ variable_scope.VariableAggregation.MEAN)
# Overwrite the initial values.
self._assign_tower_local(_devices, v, [7., 8.])
@@ -907,7 +918,7 @@ class TowerLocalVariableTest(test.TestCase):
def _restore_tower_local_sum(self, save_path):
"""Restore to variables with mirroring in a fresh graph."""
with self.test_session(graph=ops.Graph()) as sess:
- v, tower_local = _make_tower_local("sum")
+ v, tower_local = _make_tower_local(variable_scope.VariableAggregation.SUM)
# Overwrite the initial values.
self._assign_tower_local(_devices, v, [7., 8.])
@@ -968,7 +979,7 @@ class TowerLocalVariableTest(test.TestCase):
def testTensorConversion(self):
with context.graph_mode():
- _, tower_local = _make_tower_local("sum")
+ _, tower_local = _make_tower_local(variable_scope.VariableAggregation.SUM)
converted = ops.internal_convert_to_tensor(tower_local, as_ref=False)
self.assertIsInstance(converted, ops.Tensor)
self.assertEqual(converted.dtype, tower_local.dtype)
diff --git a/tensorflow/contrib/distributions/python/ops/bijectors/masked_autoregressive.py b/tensorflow/contrib/distributions/python/ops/bijectors/masked_autoregressive.py
index b8f2a4b2c7..296e66f2b2 100644
--- a/tensorflow/contrib/distributions/python/ops/bijectors/masked_autoregressive.py
+++ b/tensorflow/contrib/distributions/python/ops/bijectors/masked_autoregressive.py
@@ -514,9 +514,8 @@ def masked_autoregressive_default_template(
Masked Autoencoder for Distribution Estimation. In _International
Conference on Machine Learning_, 2015. https://arxiv.org/abs/1502.03509
"""
-
- with ops.name_scope(name, "masked_autoregressive_default_template",
- values=[log_scale_min_clip, log_scale_max_clip]):
+ name = name or "masked_autoregressive_default_template"
+ with ops.name_scope(name, values=[log_scale_min_clip, log_scale_max_clip]):
def _fn(x):
"""MADE parameterized via `masked_autoregressive_default_template`."""
# TODO(b/67594795): Better support of dynamic shape.
@@ -552,8 +551,7 @@ def masked_autoregressive_default_template(
else _clip_by_value_preserve_grad)
log_scale = which_clip(log_scale, log_scale_min_clip, log_scale_max_clip)
return shift, log_scale
- return template_ops.make_template(
- "masked_autoregressive_default_template", _fn)
+ return template_ops.make_template(name, _fn)
@deprecation.deprecated(
diff --git a/tensorflow/contrib/eager/python/datasets.py b/tensorflow/contrib/eager/python/datasets.py
index adf92c27ea..58c548d798 100644
--- a/tensorflow/contrib/eager/python/datasets.py
+++ b/tensorflow/contrib/eager/python/datasets.py
@@ -102,6 +102,7 @@ class Iterator(iterator_ops.EagerIterator, checkpointable.CheckpointableBase):
with ops.device(self._device):
self._buffer_resource_handle = prefetching_ops.function_buffering_resource( # pylint: disable=line-too-long
string_arg=iter_string_handle,
+ output_types=self._flat_output_types,
f=remote_fn,
target_device=target,
buffer_size=10,
diff --git a/tensorflow/contrib/eager/python/examples/densenet/BUILD b/tensorflow/contrib/eager/python/examples/densenet/BUILD
new file mode 100644
index 0000000000..de2a817d17
--- /dev/null
+++ b/tensorflow/contrib/eager/python/examples/densenet/BUILD
@@ -0,0 +1,29 @@
+licenses(["notice"]) # Apache 2.0
+
+package(default_visibility = ["//tensorflow:internal"])
+
+load("//tensorflow:tensorflow.bzl", "cuda_py_test")
+
+py_binary(
+ name = "densenet",
+ srcs = ["densenet.py"],
+ srcs_version = "PY2AND3",
+ deps = [
+ "//tensorflow:tensorflow_py",
+ "//tensorflow/contrib/eager/python:tfe",
+ ],
+)
+
+cuda_py_test(
+ name = "densenet_test",
+ srcs = ["densenet_test.py"],
+ additional_deps = [
+ ":densenet",
+ "//tensorflow/contrib/eager/python:tfe",
+ "//tensorflow:tensorflow_py",
+ ],
+ tags = [
+ "no_pip",
+ "optonly",
+ ],
+)
diff --git a/tensorflow/contrib/eager/python/examples/densenet/densenet.py b/tensorflow/contrib/eager/python/examples/densenet/densenet.py
new file mode 100644
index 0000000000..3a2b2de250
--- /dev/null
+++ b/tensorflow/contrib/eager/python/examples/densenet/densenet.py
@@ -0,0 +1,274 @@
+# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""Densely Connected Convolutional Networks.
+
+Reference [
+Densely Connected Convolutional Networks](https://arxiv.org/abs/1608.06993)
+
+"""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import tensorflow as tf
+l2 = tf.keras.regularizers.l2
+
+
+class ConvBlock(tf.keras.Model):
+ """Convolutional Block consisting of (batchnorm->relu->conv).
+
+ Arguments:
+ num_filters: number of filters passed to a convolutional layer.
+ bottleneck: if True, then a 1x1 Conv is performed followed by 3x3 Conv.
+ weight_decay: weight decay
+ dropout_rate: dropout rate.
+ """
+
+ def __init__(self, num_filters, bottleneck, weight_decay=1e-4,
+ dropout_rate=0):
+ super(ConvBlock, self).__init__()
+ self.bottleneck = bottleneck
+ inter_filter = num_filters * 4
+ # don't forget to set use_bias=False when using batchnorm
+ self.conv2 = tf.keras.layers.Conv2D(num_filters,
+ (3, 3),
+ padding="same",
+ use_bias=False,
+ kernel_initializer="he_normal",
+ kernel_regularizer=l2(weight_decay))
+ self.batchnorm1 = tf.keras.layers.BatchNormalization()
+ self.dropout = tf.keras.layers.Dropout(dropout_rate)
+
+ if self.bottleneck:
+ self.conv1 = tf.keras.layers.Conv2D(inter_filter,
+ (1, 1),
+ padding="same",
+ use_bias=False,
+ kernel_initializer="he_normal",
+ kernel_regularizer=l2(weight_decay))
+ self.batchnorm2 = tf.keras.layers.BatchNormalization()
+
+ def call(self, x, training=True):
+ output = self.batchnorm1(x, training=training)
+
+ if self.bottleneck:
+ output = self.conv1(tf.nn.relu(output))
+ output = self.batchnorm2(output, training=training)
+
+ output = self.conv2(tf.nn.relu(output))
+ output = self.dropout(output, training=training)
+
+ return output
+
+
+class TransitionBlock(tf.keras.Model):
+ """Transition Block to reduce the number of features.
+
+ Arguments:
+ num_filters: number of filters passed to a convolutional layer.
+ weight_decay: weight decay
+ dropout_rate: dropout rate.
+ """
+
+ def __init__(self, num_filters, weight_decay=1e-4, dropout_rate=0):
+ super(TransitionBlock, self).__init__()
+ self.batchnorm = tf.keras.layers.BatchNormalization()
+ self.conv = tf.keras.layers.Conv2D(num_filters,
+ (1, 1),
+ padding="same",
+ use_bias=False,
+ kernel_initializer="he_normal",
+ kernel_regularizer=l2(weight_decay))
+ self.avg_pool = tf.keras.layers.AveragePooling2D()
+
+ def call(self, x, training=True):
+ output = self.batchnorm(x, training=training)
+ output = self.conv(tf.nn.relu(output))
+ output = self.avg_pool(output)
+ return output
+
+
+class DenseBlock(tf.keras.Model):
+ """Dense Block consisting of ConvBlocks where each block's
+ output is concatenated with its input.
+
+ Arguments:
+ num_layers: Number of layers in each block.
+ growth_rate: number of filters to add per conv block.
+ bottleneck: boolean, that decides which part of ConvBlock to call.
+ weight_decay: weight decay
+ dropout_rate: dropout rate.
+ """
+
+ def __init__(self, num_layers, growth_rate, bottleneck,
+ weight_decay=1e-4, dropout_rate=0):
+ super(DenseBlock, self).__init__()
+ self.num_layers = num_layers
+
+ self.blocks = []
+ for _ in range(int(self.num_layers)):
+ self.blocks.append(ConvBlock(growth_rate,
+ bottleneck,
+ weight_decay,
+ dropout_rate))
+
+ def call(self, x, training=True):
+ for i in range(int(self.num_layers)):
+ output = self.blocks[i](x, training=training)
+ x = tf.concat([x, output], axis=-1)
+
+ return x
+
+
+class DenseNet(tf.keras.Model):
+ """Creating the Densenet Architecture.
+
+ Arguments:
+ depth_of_model: number of layers in the model.
+ growth_rate: number of filters to add per conv block.
+ num_of_blocks: number of dense blocks.
+ output_classes: number of output classes.
+ num_layers_in_each_block: number of layers in each block.
+ If -1, then we calculate this by (depth-3)/4.
+ If positive integer, then the it is used as the
+ number of layers per block.
+ If list or tuple, then this list is used directly.
+ bottleneck: boolean, to decide which part of conv block to call.
+ compression: reducing the number of inputs(filters) to the transition block.
+ weight_decay: weight decay
+ rate: dropout rate.
+ pool_initial: If True add a 7x7 conv with stride 2 followed by 3x3 maxpool
+ else, do a 3x3 conv with stride 1.
+ include_top: If true, GlobalAveragePooling Layer and Dense layer are
+ included.
+ """
+
+ def __init__(self, depth_of_model, growth_rate, num_of_blocks,
+ output_classes, num_layers_in_each_block,
+ bottleneck=True, compression=0.5, weight_decay=1e-4,
+ dropout_rate=0, pool_initial=False, include_top=True):
+ super(DenseNet, self).__init__()
+ self.depth_of_model = depth_of_model
+ self.growth_rate = growth_rate
+ self.num_of_blocks = num_of_blocks
+ self.output_classes = output_classes
+ self.num_layers_in_each_block = num_layers_in_each_block
+ self.bottleneck = bottleneck
+ self.compression = compression
+ self.weight_decay = weight_decay
+ self.dropout_rate = dropout_rate
+ self.pool_initial = pool_initial
+ self.include_top = include_top
+
+ # deciding on number of layers in each block
+ if isinstance(self.num_layers_in_each_block, list) or isinstance(
+ self.num_layers_in_each_block, tuple):
+ self.num_layers_in_each_block = list(self.num_layers_in_each_block)
+ else:
+ if self.num_layers_in_each_block == -1:
+ if self.num_of_blocks != 3:
+ raise ValueError(
+ "Number of blocks must be 3 if num_layers_in_each_block is -1")
+ if (self.depth_of_model - 4) % 3 == 0:
+ num_layers = (self.depth_of_model - 4) / 3
+ if self.bottleneck:
+ num_layers //= 2
+ self.num_layers_in_each_block = [num_layers] * self.num_of_blocks
+ else:
+ raise ValueError("Depth must be 3N+4 if num_layer_in_each_block=-1")
+ else:
+ self.num_layers_in_each_block = [
+ self.num_layers_in_each_block] * self.num_of_blocks
+
+ # setting the filters and stride of the initial covn layer.
+ if self.pool_initial:
+ init_filters = (7, 7)
+ stride = (2, 2)
+ else:
+ init_filters = (3, 3)
+ stride = (1, 1)
+
+ self.num_filters = 2 * self.growth_rate
+
+ # first conv and pool layer
+ self.conv1 = tf.keras.layers.Conv2D(self.num_filters,
+ init_filters,
+ strides=stride,
+ padding="same",
+ use_bias=False,
+ kernel_initializer="he_normal",
+ kernel_regularizer=l2(
+ self.weight_decay))
+ if self.pool_initial:
+ self.pool1 = tf.keras.layers.MaxPooling2D(pool_size=(3, 3),
+ strides=(2, 2),
+ padding="same")
+ self.batchnorm1 = tf.keras.layers.BatchNormalization()
+
+ self.batchnorm2 = tf.keras.layers.BatchNormalization()
+
+ # last pooling and fc layer
+ if self.include_top:
+ self.last_pool = tf.keras.layers.GlobalAveragePooling2D()
+ self.classifier = tf.keras.layers.Dense(self.output_classes)
+
+ # calculating the number of filters after each block
+ num_filters_after_each_block = [self.num_filters]
+ for i in range(1, self.num_of_blocks):
+ temp_num_filters = num_filters_after_each_block[i-1] + (
+ self.growth_rate * self.num_layers_in_each_block[i-1])
+ # using compression to reduce the number of inputs to the
+ # transition block
+ temp_num_filters = int(temp_num_filters * compression)
+ num_filters_after_each_block.append(temp_num_filters)
+
+ # dense block initialization
+ self.dense_blocks = []
+ self.transition_blocks = []
+ for i in range(self.num_of_blocks):
+ self.dense_blocks.append(DenseBlock(self.num_layers_in_each_block[i],
+ self.growth_rate,
+ self.bottleneck,
+ self.weight_decay,
+ self.dropout_rate))
+ if i+1 < self.num_of_blocks:
+ self.transition_blocks.append(
+ TransitionBlock(num_filters_after_each_block[i+1],
+ self.weight_decay,
+ self.dropout_rate))
+
+ def call(self, x, training=True):
+ output = self.conv1(x)
+
+ if self.pool_initial:
+ output = self.batchnorm1(output, training=training)
+ output = tf.nn.relu(output)
+ output = self.pool1(output)
+
+ for i in range(self.num_of_blocks - 1):
+ output = self.dense_blocks[i](output, training=training)
+ output = self.transition_blocks[i](output, training=training)
+
+ output = self.dense_blocks[
+ self.num_of_blocks - 1](output, training=training)
+ output = self.batchnorm2(output, training=training)
+ output = tf.nn.relu(output)
+
+ if self.include_top:
+ output = self.last_pool(output)
+ output = self.classifier(output)
+
+ return output
diff --git a/tensorflow/contrib/eager/python/examples/densenet/densenet_test.py b/tensorflow/contrib/eager/python/examples/densenet/densenet_test.py
new file mode 100644
index 0000000000..56d3362f3b
--- /dev/null
+++ b/tensorflow/contrib/eager/python/examples/densenet/densenet_test.py
@@ -0,0 +1,83 @@
+# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""Tests for various Densenet architectures."""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import tensorflow as tf
+from tensorflow.contrib.eager.python.examples.densenet import densenet
+
+
+class DensenetTest(tf.test.TestCase):
+
+ def test_bottleneck_true(self):
+ depth = 7
+ growth_rate = 2
+ num_blocks = 3
+ output_classes = 10
+ num_layers_in_each_block = -1
+ batch_size = 1
+
+ model = densenet.DenseNet(depth, growth_rate, num_blocks,
+ output_classes, num_layers_in_each_block,
+ bottleneck=True, compression=0.5,
+ weight_decay=1e-4, dropout_rate=0,
+ pool_initial=False, include_top=True)
+
+ rand_input = tf.random_uniform((batch_size, 32, 32, 3))
+ output_shape = model(rand_input).shape
+ self.assertEqual(output_shape, (batch_size, output_classes))
+
+ def test_bottleneck_false(self):
+ depth = 7
+ growth_rate = 2
+ num_blocks = 3
+ output_classes = 10
+ num_layers_in_each_block = -1
+ batch_size = 1
+
+ model = densenet.DenseNet(depth, growth_rate, num_blocks,
+ output_classes, num_layers_in_each_block,
+ bottleneck=False, compression=0.5,
+ weight_decay=1e-4, dropout_rate=0,
+ pool_initial=False, include_top=True)
+
+ rand_input = tf.random_uniform((batch_size, 32, 32, 3))
+ output_shape = model(rand_input).shape
+ self.assertEqual(output_shape, (batch_size, output_classes))
+
+ def test_pool_initial_true(self):
+ depth = 7
+ growth_rate = 2
+ num_blocks = 4
+ output_classes = 10
+ num_layers_in_each_block = [1, 2, 2, 1]
+ batch_size = 1
+
+ model = densenet.DenseNet(depth, growth_rate, num_blocks,
+ output_classes, num_layers_in_each_block,
+ bottleneck=True, compression=0.5,
+ weight_decay=1e-4, dropout_rate=0,
+ pool_initial=True, include_top=True)
+
+ rand_input = tf.random_uniform((batch_size, 32, 32, 3))
+ output_shape = model(rand_input).shape
+ self.assertEqual(output_shape, (batch_size, output_classes))
+
+if __name__ == '__main__':
+ tf.enable_eager_execution()
+ tf.test.main()
diff --git a/tensorflow/contrib/eager/python/examples/gan/mnist.py b/tensorflow/contrib/eager/python/examples/gan/mnist.py
index cc9cf53410..b33243021b 100644
--- a/tensorflow/contrib/eager/python/examples/gan/mnist.py
+++ b/tensorflow/contrib/eager/python/examples/gan/mnist.py
@@ -214,7 +214,7 @@ def train_one_epoch(generator, discriminator, generator_optimizer,
total_generator_loss = 0.0
total_discriminator_loss = 0.0
- for (batch_index, images) in enumerate(tfe.Iterator(dataset)):
+ for (batch_index, images) in enumerate(dataset):
with tf.device('/cpu:0'):
tf.assign_add(step_counter, 1)
@@ -227,7 +227,10 @@ def train_one_epoch(generator, discriminator, generator_optimizer,
maxval=1.,
seed=batch_index)
- with tf.GradientTape(persistent=True) as g:
+ # we can use 2 tapes or a single persistent tape.
+ # Using two tapes is memory efficient since intermediate tensors can be
+ # released between the two .gradient() calls below
+ with tf.GradientTape() as gen_tape, tf.GradientTape() as disc_tape:
generated_images = generator(noise)
tf.contrib.summary.image(
'generated_images',
@@ -243,9 +246,10 @@ def train_one_epoch(generator, discriminator, generator_optimizer,
generator_loss_val = generator_loss(discriminator_gen_outputs)
total_generator_loss += generator_loss_val
- generator_grad = g.gradient(generator_loss_val, generator.variables)
- discriminator_grad = g.gradient(discriminator_loss_val,
- discriminator.variables)
+ generator_grad = gen_tape.gradient(generator_loss_val,
+ generator.variables)
+ discriminator_grad = disc_tape.gradient(discriminator_loss_val,
+ discriminator.variables)
generator_optimizer.apply_gradients(
zip(generator_grad, generator.variables))
diff --git a/tensorflow/contrib/eager/python/examples/generative_examples/image_captioning_with_attention.ipynb b/tensorflow/contrib/eager/python/examples/generative_examples/image_captioning_with_attention.ipynb
new file mode 100644
index 0000000000..1a5a186e7a
--- /dev/null
+++ b/tensorflow/contrib/eager/python/examples/generative_examples/image_captioning_with_attention.ipynb
@@ -0,0 +1,1184 @@
+{
+ "nbformat": 4,
+ "nbformat_minor": 0,
+ "metadata": {
+ "colab": {
+ "name": "image_captioning_with_attention.ipynb",
+ "version": "0.3.2",
+ "views": {},
+ "default_view": {},
+ "provenance": [
+ {
+ "file_id": "1HI8OK2sMjcx9CTWVn0122QAHOuXaOaMg",
+ "timestamp": 1530222436922
+ }
+ ],
+ "private_outputs": true,
+ "collapsed_sections": [],
+ "toc_visible": true
+ },
+ "kernelspec": {
+ "display_name": "Python 3",
+ "language": "python",
+ "name": "python3"
+ },
+ "accelerator": "GPU"
+ },
+ "cells": [
+ {
+ "metadata": {
+ "id": "K2s1A9eLRPEj",
+ "colab_type": "text"
+ },
+ "cell_type": "markdown",
+ "source": [
+ "##### Copyright 2018 The TensorFlow Authors.\n",
+ "\n",
+ "Licensed under the Apache License, Version 2.0 (the \"License\").\n"
+ ]
+ },
+ {
+ "metadata": {
+ "id": "Cffg2i257iMS",
+ "colab_type": "text"
+ },
+ "cell_type": "markdown",
+ "source": [
+ "# Image Captioning with Attention\n",
+ "\n",
+ "<table class=\"tfo-notebook-buttons\" align=\"left\"><td>\n",
+ "<a target=\"_blank\" href=\"https://colab.research.google.com/github/tensorflow/tensorflow/blob/master/tensorflow/contrib/eager/python/examples/generative_examples/image_captioning_with_attention.ipynb\">\n",
+ " <img src=\"https://www.tensorflow.org/images/colab_logo_32px.png\" />Run in Google Colab</a> \n",
+ "</td><td>\n",
+ "<a target=\"_blank\" href=\"https://github.com/tensorflow/tensorflow/tree/master/tensorflow/contrib/eager/python/examples/generative_examples/image_captioning_with_attention.ipynb\"><img width=32px src=\"https://www.tensorflow.org/images/GitHub-Mark-32px.png\" />View source on GitHub</a></td></table>"
+ ]
+ },
+ {
+ "metadata": {
+ "id": "QASbY_HGo4Lq",
+ "colab_type": "text"
+ },
+ "cell_type": "markdown",
+ "source": [
+ "Image captioning is the task of generating a caption for an image. Given an image like this:\n",
+ "\n",
+ "![Man Surfing](https://tensorflow.org/images/surf.jpg) \n",
+ "\n",
+ "[Image Source](https://commons.wikimedia.org/wiki/Surfing#/media/File:Surfing_in_Hawaii.jpg), License: Public Domain\n",
+ "\n",
+ "Our goal is generate a caption, such as \"a surfer riding on a wave\". Here, we'll use an attention based model. This enables us to see which parts of the image the model focuses on as it generates a caption.\n",
+ "\n",
+ "![Prediction](https://tensorflow.org/images/imcap_prediction.png)\n",
+ "\n",
+ "This model architecture below is similar to [Show, Attend and Tell: Neural Image Caption Generation with Visual Attention](https://arxiv.org/abs/1502.03044). \n",
+ "\n",
+ "The code uses [tf.keras](https://www.tensorflow.org/programmers_guide/keras) and [eager execution](https://www.tensorflow.org/programmers_guide/eager), which you can learn more about in the linked guides.\n",
+ "\n",
+ "This notebook is an end-to-end example. If you run it, it will download the [MS-COCO](http://cocodataset.org/#home) dataset, preprocess and cache a subset of the images using Inception V3, train an encoder-decoder model, and use it to generate captions on new images.\n",
+ "\n",
+ "The code requires TensorFlow version >=1.9. If you're running this in [Colab]()\n",
+ "\n",
+ "In this example, we're training on a relatively small amount of data as an example. On a single P100 GPU, this example will take about ~2 hours to train. We train on the first 30,000 captions (corresponding to about ~20,000 images depending on shuffling, as there are multiple captions per image in the dataset)\n"
+ ]
+ },
+ {
+ "metadata": {
+ "id": "U8l4RJ0XRPEm",
+ "colab_type": "code",
+ "colab": {
+ "autoexec": {
+ "startup": false,
+ "wait_interval": 0
+ }
+ }
+ },
+ "cell_type": "code",
+ "source": [
+ "# Import TensorFlow and enable eager execution\n",
+ "# This code requires TensorFlow version >=1.9\n",
+ "import tensorflow as tf\n",
+ "tf.enable_eager_execution()\n",
+ "\n",
+ "# We'll generate plots of attention in order to see which parts of an image\n",
+ "# our model focuses on during captioning\n",
+ "import matplotlib.pyplot as plt\n",
+ "\n",
+ "# Scikit-learn includes many helpful utilities\n",
+ "from sklearn.model_selection import train_test_split\n",
+ "from sklearn.utils import shuffle\n",
+ "\n",
+ "import re\n",
+ "import numpy as np\n",
+ "import os\n",
+ "import time\n",
+ "import json\n",
+ "from glob import glob\n",
+ "from PIL import Image\n",
+ "import pickle"
+ ],
+ "execution_count": 0,
+ "outputs": []
+ },
+ {
+ "metadata": {
+ "id": "b6qbGw8MRPE5",
+ "colab_type": "text"
+ },
+ "cell_type": "markdown",
+ "source": [
+ "## Download and prepare the MS-COCO dataset\n",
+ "\n",
+ "We will use the [MS-COCO dataset](http://cocodataset.org/#home) to train our model. This dataset contains >82,000 images, each of which has been annotated with at least 5 different captions. The code code below will download and extract the dataset automatically. \n",
+ "\n",
+ "**Caution: large download ahead**. We'll use the training set, it's a 13GB file."
+ ]
+ },
+ {
+ "metadata": {
+ "id": "krQuPYTtRPE7",
+ "colab_type": "code",
+ "colab": {
+ "autoexec": {
+ "startup": false,
+ "wait_interval": 0
+ }
+ }
+ },
+ "cell_type": "code",
+ "source": [
+ "annotation_zip = tf.keras.utils.get_file('captions.zip', \n",
+ " cache_subdir=os.path.abspath('.'),\n",
+ " origin = 'http://images.cocodataset.org/annotations/annotations_trainval2014.zip',\n",
+ " extract = True)\n",
+ "annotation_file = os.path.dirname(annotation_zip)+'/annotations/captions_train2014.json'\n",
+ "\n",
+ "name_of_zip = 'train2014.zip'\n",
+ "if not os.path.exists(os.path.abspath('.') + '/' + name_of_zip):\n",
+ " image_zip = tf.keras.utils.get_file(name_of_zip, \n",
+ " cache_subdir=os.path.abspath('.'),\n",
+ " origin = 'http://images.cocodataset.org/zips/train2014.zip',\n",
+ " extract = True)\n",
+ " PATH = os.path.dirname(image_zip)+'/train2014/'\n",
+ "else:\n",
+ " PATH = os.path.abspath('.')+'/train2014/'"
+ ],
+ "execution_count": 0,
+ "outputs": []
+ },
+ {
+ "metadata": {
+ "id": "aANEzb5WwSzg",
+ "colab_type": "text"
+ },
+ "cell_type": "markdown",
+ "source": [
+ "## Optionally, limit the size of the training set for faster training\n",
+ "For this example, we'll select a subset of 30,000 captions and use these and the corresponding images to train our model. As always, captioning quality will improve if you choose to use more data."
+ ]
+ },
+ {
+ "metadata": {
+ "id": "4G3b8x8_RPFD",
+ "colab_type": "code",
+ "colab": {
+ "autoexec": {
+ "startup": false,
+ "wait_interval": 0
+ }
+ }
+ },
+ "cell_type": "code",
+ "source": [
+ "# read the json file\n",
+ "with open(annotation_file, 'r') as f:\n",
+ " annotations = json.load(f)\n",
+ "\n",
+ "# storing the captions and the image name in vectors\n",
+ "all_captions = []\n",
+ "all_img_name_vector = []\n",
+ "\n",
+ "for annot in annotations['annotations']:\n",
+ " caption = '<start> ' + annot['caption'] + ' <end>'\n",
+ " image_id = annot['image_id']\n",
+ " full_coco_image_path = PATH + 'COCO_train2014_' + '%012d.jpg' % (image_id)\n",
+ " \n",
+ " all_img_name_vector.append(full_coco_image_path)\n",
+ " all_captions.append(caption)\n",
+ "\n",
+ "# shuffling the captions and image_names together\n",
+ "# setting a random state\n",
+ "train_captions, img_name_vector = shuffle(all_captions,\n",
+ " all_img_name_vector,\n",
+ " random_state=1)\n",
+ "\n",
+ "# selecting the first 30000 captions from the shuffled set\n",
+ "num_examples = 30000\n",
+ "train_captions = train_captions[:num_examples]\n",
+ "img_name_vector = img_name_vector[:num_examples]"
+ ],
+ "execution_count": 0,
+ "outputs": []
+ },
+ {
+ "metadata": {
+ "id": "mPBMgK34RPFL",
+ "colab_type": "code",
+ "colab": {
+ "autoexec": {
+ "startup": false,
+ "wait_interval": 0
+ }
+ }
+ },
+ "cell_type": "code",
+ "source": [
+ "len(train_captions), len(all_captions)"
+ ],
+ "execution_count": 0,
+ "outputs": []
+ },
+ {
+ "metadata": {
+ "id": "8cSW4u-ORPFQ",
+ "colab_type": "text"
+ },
+ "cell_type": "markdown",
+ "source": [
+ "## Preprocess the images using InceptionV3\n",
+ "Next, we will use InceptionV3 (pretrained on Imagenet) to classify each image. We will extract features from the last convolutional layer. \n",
+ "\n",
+ "First, we will need to convert the images into the format inceptionV3 expects by:\n",
+ "* Resizing the image to (299, 299)\n",
+ "* Using the [preprocess_input](https://www.tensorflow.org/api_docs/python/tf/keras/applications/inception_v3/preprocess_input) method to place the pixels in the range of -1 to 1 (to match the format of the images used to train InceptionV3)."
+ ]
+ },
+ {
+ "metadata": {
+ "id": "zXR0217aRPFR",
+ "colab_type": "code",
+ "colab": {
+ "autoexec": {
+ "startup": false,
+ "wait_interval": 0
+ }
+ }
+ },
+ "cell_type": "code",
+ "source": [
+ "def load_image(image_path):\n",
+ " img = tf.read_file(image_path)\n",
+ " img = tf.image.decode_jpeg(img, channels=3)\n",
+ " img = tf.image.resize_images(img, (299, 299))\n",
+ " img = tf.keras.applications.inception_v3.preprocess_input(img)\n",
+ " return img, image_path"
+ ],
+ "execution_count": 0,
+ "outputs": []
+ },
+ {
+ "metadata": {
+ "id": "MDvIu4sXRPFV",
+ "colab_type": "text"
+ },
+ "cell_type": "markdown",
+ "source": [
+ "## Initialize InceptionV3 and load the pretrained Imagenet weights\n",
+ "\n",
+ "To do so, we'll create a tf.keras model where the output layer is the last convolutional layer in the InceptionV3 architecture. \n",
+ "* Each image is forwarded through the network and the vector that we get at the end is stored in a dictionary (image_name --> feature_vector). \n",
+ "* We use the last convolutional layer because we are using attention in this example. The shape of the output of this layer is ```8x8x2048```. \n",
+ "* We avoid doing this during training so it does not become a bottleneck. \n",
+ "* After all the images are passed through the network, we pickle the dictionary and save it to disk."
+ ]
+ },
+ {
+ "metadata": {
+ "id": "RD3vW4SsRPFW",
+ "colab_type": "code",
+ "colab": {
+ "autoexec": {
+ "startup": false,
+ "wait_interval": 0
+ }
+ }
+ },
+ "cell_type": "code",
+ "source": [
+ "image_model = tf.keras.applications.InceptionV3(include_top=False, \n",
+ " weights='imagenet')\n",
+ "new_input = image_model.input\n",
+ "hidden_layer = image_model.layers[-1].output\n",
+ "\n",
+ "image_features_extract_model = tf.keras.Model(new_input, hidden_layer)"
+ ],
+ "execution_count": 0,
+ "outputs": []
+ },
+ {
+ "metadata": {
+ "id": "rERqlR3WRPGO",
+ "colab_type": "text"
+ },
+ "cell_type": "markdown",
+ "source": [
+ "## Caching the features extracted from InceptionV3\n",
+ "\n",
+ "We will pre-process each image with InceptionV3 and cache the output to disk. Caching the output in RAM would be faster but memory intensive, requiring 8 \\* 8 \\* 2048 floats per image. At the time of writing, this would exceed the memory limitations of Colab (although these may change, an instance appears to have about 12GB of memory currently). \n",
+ "\n",
+ "Performance could be improved with a more sophisticated caching strategy (e.g., by sharding the images to reduce random access disk I/O) at the cost of more code.\n",
+ "\n",
+ "This will take about 10 minutes to run in Colab with a GPU. If you'd like to see a progress bar, you could: install [tqdm](https://github.com/tqdm/tqdm) (```!pip install tqdm```), then change this line: \n",
+ "\n",
+ "```for img, path in image_dataset:``` \n",
+ "\n",
+ "to:\n",
+ "\n",
+ "```for img, path in tqdm(image_dataset):```."
+ ]
+ },
+ {
+ "metadata": {
+ "id": "Dx_fvbVgRPGQ",
+ "colab_type": "code",
+ "colab": {
+ "autoexec": {
+ "startup": false,
+ "wait_interval": 0
+ }
+ }
+ },
+ "cell_type": "code",
+ "source": [
+ "# getting the unique images\n",
+ "encode_train = sorted(set(img_name_vector))\n",
+ "\n",
+ "# feel free to change the batch_size according to your system configuration\n",
+ "image_dataset = tf.data.Dataset.from_tensor_slices(\n",
+ " encode_train).map(load_image).batch(16)\n",
+ "\n",
+ "for img, path in image_dataset:\n",
+ " batch_features = image_features_extract_model(img)\n",
+ " batch_features = tf.reshape(batch_features, \n",
+ " (batch_features.shape[0], -1, batch_features.shape[3]))\n",
+ "\n",
+ " for bf, p in zip(batch_features, path):\n",
+ " path_of_feature = p.numpy().decode(\"utf-8\")\n",
+ " np.save(path_of_feature, bf.numpy())"
+ ],
+ "execution_count": 0,
+ "outputs": []
+ },
+ {
+ "metadata": {
+ "id": "nyqH3zFwRPFi",
+ "colab_type": "text"
+ },
+ "cell_type": "markdown",
+ "source": [
+ "## Preprocess and tokenize the captions\n",
+ "\n",
+ "* First, we'll tokenize the captions (e.g., by splitting on spaces). This will give us a vocabulary of all the unique words in the data (e.g., \"surfing\", \"football\", etc).\n",
+ "* Next, we'll limit the vocabulary size to the top 5,000 words to save memory. We'll replace all other words with the token \"UNK\" (for unknown).\n",
+ "* Finally, we create a word --> index mapping and vice-versa.\n",
+ "* We will then pad all sequences to the be same length as the longest one. "
+ ]
+ },
+ {
+ "metadata": {
+ "id": "HZfK8RhQRPFj",
+ "colab_type": "code",
+ "colab": {
+ "autoexec": {
+ "startup": false,
+ "wait_interval": 0
+ }
+ }
+ },
+ "cell_type": "code",
+ "source": [
+ "# This will find the maximum length of any caption in our dataset\n",
+ "def calc_max_length(tensor):\n",
+ " return max(len(t) for t in tensor)"
+ ],
+ "execution_count": 0,
+ "outputs": []
+ },
+ {
+ "metadata": {
+ "id": "oJGE34aiRPFo",
+ "colab_type": "code",
+ "colab": {
+ "autoexec": {
+ "startup": false,
+ "wait_interval": 0
+ }
+ }
+ },
+ "cell_type": "code",
+ "source": [
+ "# The steps above is a general process of dealing with text processing\n",
+ "\n",
+ "# choosing the top 5000 words from the vocabulary\n",
+ "top_k = 5000\n",
+ "tokenizer = tf.keras.preprocessing.text.Tokenizer(num_words=top_k, \n",
+ " oov_token=\"<unk>\", \n",
+ " filters='!\"#$%&()*+.,-/:;=?@[\\]^_`{|}~ ')\n",
+ "tokenizer.fit_on_texts(train_captions)\n",
+ "train_seqs = tokenizer.texts_to_sequences(train_captions)"
+ ],
+ "execution_count": 0,
+ "outputs": []
+ },
+ {
+ "metadata": {
+ "id": "8Q44tNQVRPFt",
+ "colab_type": "code",
+ "colab": {
+ "autoexec": {
+ "startup": false,
+ "wait_interval": 0
+ }
+ }
+ },
+ "cell_type": "code",
+ "source": [
+ "tokenizer.word_index = {key:value for key, value in tokenizer.word_index.items() if value <= top_k}\n",
+ "# putting <unk> token in the word2idx dictionary\n",
+ "tokenizer.word_index[tokenizer.oov_token] = top_k + 1\n",
+ "tokenizer.word_index['<pad>'] = 0"
+ ],
+ "execution_count": 0,
+ "outputs": []
+ },
+ {
+ "metadata": {
+ "id": "0fpJb5ojRPFv",
+ "colab_type": "code",
+ "colab": {
+ "autoexec": {
+ "startup": false,
+ "wait_interval": 0
+ }
+ }
+ },
+ "cell_type": "code",
+ "source": [
+ "# creating the tokenized vectors\n",
+ "train_seqs = tokenizer.texts_to_sequences(train_captions)"
+ ],
+ "execution_count": 0,
+ "outputs": []
+ },
+ {
+ "metadata": {
+ "id": "olQArbgbRPF1",
+ "colab_type": "code",
+ "colab": {
+ "autoexec": {
+ "startup": false,
+ "wait_interval": 0
+ }
+ }
+ },
+ "cell_type": "code",
+ "source": [
+ "# creating a reverse mapping (index -> word)\n",
+ "index_word = {value:key for key, value in tokenizer.word_index.items()}"
+ ],
+ "execution_count": 0,
+ "outputs": []
+ },
+ {
+ "metadata": {
+ "id": "AidglIZVRPF4",
+ "colab_type": "code",
+ "colab": {
+ "autoexec": {
+ "startup": false,
+ "wait_interval": 0
+ }
+ }
+ },
+ "cell_type": "code",
+ "source": [
+ "# padding each vector to the max_length of the captions\n",
+ "# if the max_length parameter is not provided, pad_sequences calculates that automatically\n",
+ "cap_vector = tf.keras.preprocessing.sequence.pad_sequences(train_seqs, padding='post')"
+ ],
+ "execution_count": 0,
+ "outputs": []
+ },
+ {
+ "metadata": {
+ "id": "gL0wkttkRPGA",
+ "colab_type": "code",
+ "colab": {
+ "autoexec": {
+ "startup": false,
+ "wait_interval": 0
+ }
+ }
+ },
+ "cell_type": "code",
+ "source": [
+ "# calculating the max_length \n",
+ "# used to store the attention weights\n",
+ "max_length = calc_max_length(train_seqs)"
+ ],
+ "execution_count": 0,
+ "outputs": []
+ },
+ {
+ "metadata": {
+ "id": "M3CD75nDpvTI",
+ "colab_type": "text"
+ },
+ "cell_type": "markdown",
+ "source": [
+ "## Split the data into training and testing"
+ ]
+ },
+ {
+ "metadata": {
+ "id": "iS7DDMszRPGF",
+ "colab_type": "code",
+ "colab": {
+ "autoexec": {
+ "startup": false,
+ "wait_interval": 0
+ }
+ }
+ },
+ "cell_type": "code",
+ "source": [
+ "# Create training and validation sets using 80-20 split\n",
+ "img_name_train, img_name_val, cap_train, cap_val = train_test_split(img_name_vector, \n",
+ " cap_vector, \n",
+ " test_size=0.2, \n",
+ " random_state=0)"
+ ],
+ "execution_count": 0,
+ "outputs": []
+ },
+ {
+ "metadata": {
+ "id": "XmViPkRFRPGH",
+ "colab_type": "code",
+ "colab": {
+ "autoexec": {
+ "startup": false,
+ "wait_interval": 0
+ }
+ }
+ },
+ "cell_type": "code",
+ "source": [
+ "len(img_name_train), len(cap_train), len(img_name_val), len(cap_val)"
+ ],
+ "execution_count": 0,
+ "outputs": []
+ },
+ {
+ "metadata": {
+ "id": "uEWM9xrYcg45",
+ "colab_type": "text"
+ },
+ "cell_type": "markdown",
+ "source": [
+ "## Our images and captions are ready! Next, let's create a tf.data dataset to use for training our model.\n",
+ "\n"
+ ]
+ },
+ {
+ "metadata": {
+ "id": "Q3TnZ1ToRPGV",
+ "colab_type": "code",
+ "colab": {
+ "autoexec": {
+ "startup": false,
+ "wait_interval": 0
+ }
+ }
+ },
+ "cell_type": "code",
+ "source": [
+ "# feel free to change these parameters according to your system's configuration\n",
+ "\n",
+ "BATCH_SIZE = 64\n",
+ "BUFFER_SIZE = 1000\n",
+ "embedding_dim = 256\n",
+ "units = 512\n",
+ "vocab_size = len(tokenizer.word_index)\n",
+ "# shape of the vector extracted from InceptionV3 is (64, 2048)\n",
+ "# these two variables represent that\n",
+ "features_shape = 2048\n",
+ "attention_features_shape = 64"
+ ],
+ "execution_count": 0,
+ "outputs": []
+ },
+ {
+ "metadata": {
+ "id": "SmZS2N0bXG3T",
+ "colab_type": "code",
+ "colab": {
+ "autoexec": {
+ "startup": false,
+ "wait_interval": 0
+ }
+ }
+ },
+ "cell_type": "code",
+ "source": [
+ "# loading the numpy files \n",
+ "def map_func(img_name, cap):\n",
+ " img_tensor = np.load(img_name.decode('utf-8')+'.npy')\n",
+ " return img_tensor, cap"
+ ],
+ "execution_count": 0,
+ "outputs": []
+ },
+ {
+ "metadata": {
+ "id": "FDF_Nm3tRPGZ",
+ "colab_type": "code",
+ "colab": {
+ "autoexec": {
+ "startup": false,
+ "wait_interval": 0
+ }
+ }
+ },
+ "cell_type": "code",
+ "source": [
+ "dataset = tf.data.Dataset.from_tensor_slices((img_name_train, cap_train))\n",
+ "\n",
+ "# using map to load the numpy files in parallel\n",
+ "# NOTE: Be sure to set num_parallel_calls to the number of CPU cores you have\n",
+ "# https://www.tensorflow.org/api_docs/python/tf/py_func\n",
+ "dataset = dataset.map(lambda item1, item2: tf.py_func(\n",
+ " map_func, [item1, item2], [tf.float32, tf.int32]), num_parallel_calls=8)\n",
+ "\n",
+ "# shuffling and batching\n",
+ "dataset = dataset.shuffle(BUFFER_SIZE)\n",
+ "# https://www.tensorflow.org/api_docs/python/tf/contrib/data/batch_and_drop_remainder\n",
+ "dataset = dataset.batch(BATCH_SIZE)\n",
+ "dataset = dataset.prefetch(1)"
+ ],
+ "execution_count": 0,
+ "outputs": []
+ },
+ {
+ "metadata": {
+ "id": "nrvoDphgRPGd",
+ "colab_type": "text"
+ },
+ "cell_type": "markdown",
+ "source": [
+ "## Model\n",
+ "\n",
+ "Fun fact, the decoder below is identical to the one in the example for [Neural Machine Translation with Attention]( https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/eager/python/examples/nmt_with_attention/nmt_with_attention.ipynb).\n",
+ "\n",
+ "The model architecture is inspired by the [Show, Attend and Tell](https://arxiv.org/pdf/1502.03044.pdf) paper.\n",
+ "\n",
+ "* In this example, we extract the features from the lower convolutional layer of InceptionV3 giving us a vector of shape (8, 8, 2048). \n",
+ "* We squash that to a shape of (64, 2048).\n",
+ "* This vector is then passed through the CNN Encoder(which consists of a single Fully connected layer).\n",
+ "* The RNN(here GRU) attends over the image to predict the next word."
+ ]
+ },
+ {
+ "metadata": {
+ "id": "AAppCGLKRPGd",
+ "colab_type": "code",
+ "colab": {
+ "autoexec": {
+ "startup": false,
+ "wait_interval": 0
+ }
+ }
+ },
+ "cell_type": "code",
+ "source": [
+ "def gru(units):\n",
+ " # If you have a GPU, we recommend using the CuDNNGRU layer (it provides a \n",
+ " # significant speedup).\n",
+ " if tf.test.is_gpu_available():\n",
+ " return tf.keras.layers.CuDNNGRU(units, \n",
+ " return_sequences=True, \n",
+ " return_state=True, \n",
+ " recurrent_initializer='glorot_uniform')\n",
+ " else:\n",
+ " return tf.keras.layers.GRU(units, \n",
+ " return_sequences=True, \n",
+ " return_state=True, \n",
+ " recurrent_activation='sigmoid', \n",
+ " recurrent_initializer='glorot_uniform')"
+ ],
+ "execution_count": 0,
+ "outputs": []
+ },
+ {
+ "metadata": {
+ "id": "ja2LFTMSdeV3",
+ "colab_type": "code",
+ "colab": {
+ "autoexec": {
+ "startup": false,
+ "wait_interval": 0
+ }
+ }
+ },
+ "cell_type": "code",
+ "source": [
+ "class BahdanauAttention(tf.keras.Model):\n",
+ " def __init__(self, units):\n",
+ " super(BahdanauAttention, self).__init__()\n",
+ " self.W1 = tf.keras.layers.Dense(units)\n",
+ " self.W2 = tf.keras.layers.Dense(units)\n",
+ " self.V = tf.keras.layers.Dense(1)\n",
+ " \n",
+ " def call(self, features, hidden):\n",
+ " # features(CNN_encoder output) shape == (batch_size, 64, embedding_dim)\n",
+ " \n",
+ " # hidden shape == (batch_size, hidden_size)\n",
+ " # hidden_with_time_axis shape == (batch_size, 1, hidden_size)\n",
+ " hidden_with_time_axis = tf.expand_dims(hidden, 1)\n",
+ " \n",
+ " # score shape == (batch_size, 64, hidden_size)\n",
+ " score = tf.nn.tanh(self.W1(features) + self.W2(hidden_with_time_axis))\n",
+ " \n",
+ " # attention_weights shape == (batch_size, 64, 1)\n",
+ " # we get 1 at the last axis because we are applying score to self.V\n",
+ " attention_weights = tf.nn.softmax(self.V(score), axis=1)\n",
+ " \n",
+ " # context_vector shape after sum == (batch_size, hidden_size)\n",
+ " context_vector = attention_weights * features\n",
+ " context_vector = tf.reduce_sum(context_vector, axis=1)\n",
+ " \n",
+ " return context_vector, attention_weights"
+ ],
+ "execution_count": 0,
+ "outputs": []
+ },
+ {
+ "metadata": {
+ "id": "AZ7R1RxHRPGf",
+ "colab_type": "code",
+ "colab": {
+ "autoexec": {
+ "startup": false,
+ "wait_interval": 0
+ }
+ }
+ },
+ "cell_type": "code",
+ "source": [
+ "class CNN_Encoder(tf.keras.Model):\n",
+ " # Since we have already extracted the features and dumped it using pickle\n",
+ " # This encoder passes those features through a Fully connected layer\n",
+ " def __init__(self, embedding_dim):\n",
+ " super(CNN_Encoder, self).__init__()\n",
+ " # shape after fc == (batch_size, 64, embedding_dim)\n",
+ " self.fc = tf.keras.layers.Dense(embedding_dim)\n",
+ " \n",
+ " def call(self, x):\n",
+ " x = self.fc(x)\n",
+ " x = tf.nn.relu(x)\n",
+ " return x"
+ ],
+ "execution_count": 0,
+ "outputs": []
+ },
+ {
+ "metadata": {
+ "id": "V9UbGQmERPGi",
+ "colab_type": "code",
+ "colab": {
+ "autoexec": {
+ "startup": false,
+ "wait_interval": 0
+ }
+ }
+ },
+ "cell_type": "code",
+ "source": [
+ "class RNN_Decoder(tf.keras.Model):\n",
+ " def __init__(self, embedding_dim, units, vocab_size):\n",
+ " super(RNN_Decoder, self).__init__()\n",
+ " self.units = units\n",
+ "\n",
+ " self.embedding = tf.keras.layers.Embedding(vocab_size, embedding_dim)\n",
+ " self.gru = gru(self.units)\n",
+ " self.fc1 = tf.keras.layers.Dense(self.units)\n",
+ " self.fc2 = tf.keras.layers.Dense(vocab_size)\n",
+ " \n",
+ " self.attention = BahdanauAttention(self.units)\n",
+ " \n",
+ " def call(self, x, features, hidden):\n",
+ " # defining attention as a separate model\n",
+ " context_vector, attention_weights = self.attention(features, hidden)\n",
+ " \n",
+ " # x shape after passing through embedding == (batch_size, 1, embedding_dim)\n",
+ " x = self.embedding(x)\n",
+ " \n",
+ " # x shape after concatenation == (batch_size, 1, embedding_dim + hidden_size)\n",
+ " x = tf.concat([tf.expand_dims(context_vector, 1), x], axis=-1)\n",
+ " \n",
+ " # passing the concatenated vector to the GRU\n",
+ " output, state = self.gru(x)\n",
+ " \n",
+ " # shape == (batch_size, max_length, hidden_size)\n",
+ " x = self.fc1(output)\n",
+ " \n",
+ " # x shape == (batch_size * max_length, hidden_size)\n",
+ " x = tf.reshape(x, (-1, x.shape[2]))\n",
+ " \n",
+ " # output shape == (batch_size * max_length, vocab)\n",
+ " x = self.fc2(x)\n",
+ "\n",
+ " return x, state, attention_weights\n",
+ "\n",
+ " def reset_state(self, batch_size):\n",
+ " return tf.zeros((batch_size, self.units))"
+ ],
+ "execution_count": 0,
+ "outputs": []
+ },
+ {
+ "metadata": {
+ "id": "Qs_Sr03wRPGk",
+ "colab_type": "code",
+ "colab": {
+ "autoexec": {
+ "startup": false,
+ "wait_interval": 0
+ }
+ }
+ },
+ "cell_type": "code",
+ "source": [
+ "encoder = CNN_Encoder(embedding_dim)\n",
+ "decoder = RNN_Decoder(embedding_dim, units, vocab_size)"
+ ],
+ "execution_count": 0,
+ "outputs": []
+ },
+ {
+ "metadata": {
+ "id": "-bYN7xA0RPGl",
+ "colab_type": "code",
+ "colab": {
+ "autoexec": {
+ "startup": false,
+ "wait_interval": 0
+ }
+ }
+ },
+ "cell_type": "code",
+ "source": [
+ "optimizer = tf.train.AdamOptimizer()\n",
+ "\n",
+ "# We are masking the loss calculated for padding\n",
+ "def loss_function(real, pred):\n",
+ " mask = 1 - np.equal(real, 0)\n",
+ " loss_ = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=real, logits=pred) * mask\n",
+ " return tf.reduce_mean(loss_)"
+ ],
+ "execution_count": 0,
+ "outputs": []
+ },
+ {
+ "metadata": {
+ "id": "PHod7t72RPGn",
+ "colab_type": "text"
+ },
+ "cell_type": "markdown",
+ "source": [
+ "## Training\n",
+ "\n",
+ "* We extract the features stored in the respective `.npy` files and then pass those features through the encoder.\n",
+ "* The encoder output, hidden state(initialized to 0) and the decoder input (which is the start token) is passed to the decoder.\n",
+ "* The decoder returns the predictions and the decoder hidden state.\n",
+ "* The decoder hidden state is then passed back into the model and the predictions are used to calculate the loss.\n",
+ "* Use teacher forcing to decide the next input to the decoder.\n",
+ "* Teacher forcing is the technique where the target word is passed as the next input to the decoder.\n",
+ "* The final step is to calculate the gradients and apply it to the optimizer and backpropagate.\n"
+ ]
+ },
+ {
+ "metadata": {
+ "id": "Vt4WZ5mhJE-E",
+ "colab_type": "code",
+ "colab": {
+ "autoexec": {
+ "startup": false,
+ "wait_interval": 0
+ }
+ }
+ },
+ "cell_type": "code",
+ "source": [
+ "# adding this in a separate cell because if you run the training cell \n",
+ "# many times, the loss_plot array will be reset\n",
+ "loss_plot = []"
+ ],
+ "execution_count": 0,
+ "outputs": []
+ },
+ {
+ "metadata": {
+ "id": "UlA4VIQpRPGo",
+ "colab_type": "code",
+ "colab": {
+ "autoexec": {
+ "startup": false,
+ "wait_interval": 0
+ }
+ }
+ },
+ "cell_type": "code",
+ "source": [
+ "EPOCHS = 20\n",
+ "\n",
+ "for epoch in range(EPOCHS):\n",
+ " start = time.time()\n",
+ " total_loss = 0\n",
+ " \n",
+ " for (batch, (img_tensor, target)) in enumerate(dataset):\n",
+ " loss = 0\n",
+ " \n",
+ " # initializing the hidden state for each batch\n",
+ " # because the captions are not related from image to image\n",
+ " hidden = decoder.reset_state(batch_size=target.shape[0])\n",
+ "\n",
+ " dec_input = tf.expand_dims([tokenizer.word_index['<start>']] * BATCH_SIZE, 1)\n",
+ " \n",
+ " with tf.GradientTape() as tape:\n",
+ " features = encoder(img_tensor)\n",
+ " \n",
+ " for i in range(1, target.shape[1]):\n",
+ " # passing the features through the decoder\n",
+ " predictions, hidden, _ = decoder(dec_input, features, hidden)\n",
+ "\n",
+ " loss += loss_function(target[:, i], predictions)\n",
+ " \n",
+ " # using teacher forcing\n",
+ " dec_input = tf.expand_dims(target[:, i], 1)\n",
+ " \n",
+ " total_loss += (loss / int(target.shape[1]))\n",
+ " \n",
+ " variables = encoder.variables + decoder.variables\n",
+ " \n",
+ " gradients = tape.gradient(loss, variables) \n",
+ " \n",
+ " optimizer.apply_gradients(zip(gradients, variables), tf.train.get_or_create_global_step())\n",
+ " \n",
+ " if batch % 100 == 0:\n",
+ " print ('Epoch {} Batch {} Loss {:.4f}'.format(epoch + 1, \n",
+ " batch, \n",
+ " loss.numpy() / int(target.shape[1])))\n",
+ " # storing the epoch end loss value to plot later\n",
+ " loss_plot.append(total_loss / len(cap_vector))\n",
+ " \n",
+ " print ('Epoch {} Loss {:.6f}'.format(epoch + 1, \n",
+ " total_loss/len(cap_vector)))\n",
+ " print ('Time taken for 1 epoch {} sec\\n'.format(time.time() - start))"
+ ],
+ "execution_count": 0,
+ "outputs": []
+ },
+ {
+ "metadata": {
+ "id": "1Wm83G-ZBPcC",
+ "colab_type": "code",
+ "colab": {
+ "autoexec": {
+ "startup": false,
+ "wait_interval": 0
+ }
+ }
+ },
+ "cell_type": "code",
+ "source": [
+ "plt.plot(loss_plot)\n",
+ "plt.xlabel('Epochs')\n",
+ "plt.ylabel('Loss')\n",
+ "plt.title('Loss Plot')\n",
+ "plt.show()"
+ ],
+ "execution_count": 0,
+ "outputs": []
+ },
+ {
+ "metadata": {
+ "id": "xGvOcLQKghXN",
+ "colab_type": "text"
+ },
+ "cell_type": "markdown",
+ "source": [
+ "## Caption!\n",
+ "\n",
+ "* The evaluate function is similar to the training loop, except we don't use teacher forcing here. The input to the decoder at each time step is its previous predictions along with the hidden state and the encoder output.\n",
+ "* Stop predicting when the model predicts the end token.\n",
+ "* And store the attention weights for every time step."
+ ]
+ },
+ {
+ "metadata": {
+ "id": "RCWpDtyNRPGs",
+ "colab_type": "code",
+ "colab": {
+ "autoexec": {
+ "startup": false,
+ "wait_interval": 0
+ }
+ }
+ },
+ "cell_type": "code",
+ "source": [
+ "def evaluate(image):\n",
+ " attention_plot = np.zeros((max_length, attention_features_shape))\n",
+ "\n",
+ " hidden = decoder.reset_state(batch_size=1)\n",
+ "\n",
+ " temp_input = tf.expand_dims(load_image(image)[0], 0)\n",
+ " img_tensor_val = image_features_extract_model(temp_input)\n",
+ " img_tensor_val = tf.reshape(img_tensor_val, (img_tensor_val.shape[0], -1, img_tensor_val.shape[3]))\n",
+ "\n",
+ " features = encoder(img_tensor_val)\n",
+ "\n",
+ " dec_input = tf.expand_dims([tokenizer.word_index['<start>']], 0)\n",
+ " result = []\n",
+ "\n",
+ " for i in range(max_length):\n",
+ " predictions, hidden, attention_weights = decoder(dec_input, features, hidden)\n",
+ "\n",
+ " attention_plot[i] = tf.reshape(attention_weights, (-1, )).numpy()\n",
+ "\n",
+ " predicted_id = tf.multinomial(tf.exp(predictions), num_samples=1)[0][0].numpy()\n",
+ " result.append(index_word[predicted_id])\n",
+ "\n",
+ " if index_word[predicted_id] == '<end>':\n",
+ " return result, attention_plot\n",
+ "\n",
+ " dec_input = tf.expand_dims([predicted_id], 0)\n",
+ "\n",
+ " attention_plot = attention_plot[:len(result), :]\n",
+ " return result, attention_plot"
+ ],
+ "execution_count": 0,
+ "outputs": []
+ },
+ {
+ "metadata": {
+ "id": "fD_y7PD6RPGt",
+ "colab_type": "code",
+ "colab": {
+ "autoexec": {
+ "startup": false,
+ "wait_interval": 0
+ }
+ }
+ },
+ "cell_type": "code",
+ "source": [
+ "def plot_attention(image, result, attention_plot):\n",
+ " temp_image = np.array(Image.open(image))\n",
+ "\n",
+ " fig = plt.figure(figsize=(10, 10))\n",
+ " \n",
+ " len_result = len(result)\n",
+ " for l in range(len_result):\n",
+ " temp_att = np.resize(attention_plot[l], (8, 8))\n",
+ " ax = fig.add_subplot(len_result//2, len_result//2, l+1)\n",
+ " ax.set_title(result[l])\n",
+ " img = ax.imshow(temp_image)\n",
+ " ax.imshow(temp_att, cmap='gray', alpha=0.6, extent=img.get_extent())\n",
+ "\n",
+ " plt.tight_layout()\n",
+ " plt.show()"
+ ],
+ "execution_count": 0,
+ "outputs": []
+ },
+ {
+ "metadata": {
+ "id": "io7ws3ReRPGv",
+ "colab_type": "code",
+ "colab": {
+ "autoexec": {
+ "startup": false,
+ "wait_interval": 0
+ }
+ }
+ },
+ "cell_type": "code",
+ "source": [
+ "# captions on the validation set\n",
+ "rid = np.random.randint(0, len(img_name_val))\n",
+ "image = img_name_val[rid]\n",
+ "real_caption = ' '.join([index_word[i] for i in cap_val[rid] if i not in [0]])\n",
+ "result, attention_plot = evaluate(image)\n",
+ "\n",
+ "print ('Real Caption:', real_caption)\n",
+ "print ('Prediction Caption:', ' '.join(result))\n",
+ "plot_attention(image, result, attention_plot)\n",
+ "# opening the image\n",
+ "Image.open(img_name_val[rid])"
+ ],
+ "execution_count": 0,
+ "outputs": []
+ },
+ {
+ "metadata": {
+ "id": "Rprk3HEvZuxb",
+ "colab_type": "text"
+ },
+ "cell_type": "markdown",
+ "source": [
+ "## Try it on your own images\n",
+ "For fun, below we've provided a method you can use to caption your own images with the model we've just trained. Keep in mind, it was trained on a relatively small amount of data, and your images may be different from the training data (so be prepared for weird results!)\n"
+ ]
+ },
+ {
+ "metadata": {
+ "id": "9Psd1quzaAWg",
+ "colab_type": "code",
+ "colab": {
+ "autoexec": {
+ "startup": false,
+ "wait_interval": 0
+ }
+ }
+ },
+ "cell_type": "code",
+ "source": [
+ "image_url = 'https://tensorflow.org/images/surf.jpg'\n",
+ "image_extension = image_url[-4:]\n",
+ "image_path = tf.keras.utils.get_file('image'+image_extension, \n",
+ " origin=image_url)\n",
+ "\n",
+ "result, attention_plot = evaluate(image_path)\n",
+ "print ('Prediction Caption:', ' '.join(result))\n",
+ "plot_attention(image_path, result, attention_plot)\n",
+ "# opening the image\n",
+ "Image.open(image_path)"
+ ],
+ "execution_count": 0,
+ "outputs": []
+ },
+ {
+ "metadata": {
+ "id": "VJZXyJco6uLO",
+ "colab_type": "text"
+ },
+ "cell_type": "markdown",
+ "source": [
+ "# Next steps\n",
+ "\n",
+ "Congrats! You've just trained an image captioning model with attention. Next, we recommend taking a look at this example [Neural Machine Translation with Attention]( https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/eager/python/examples/nmt_with_attention/nmt_with_attention.ipynb). It uses a similar architecture to translate between Spanish and English sentences. You can also experiment with training the code in this notebook on a different dataset."
+ ]
+ }
+ ]
+}
diff --git a/tensorflow/contrib/eager/python/examples/generative_examples/text_generation.ipynb b/tensorflow/contrib/eager/python/examples/generative_examples/text_generation.ipynb
new file mode 100644
index 0000000000..6be09f98df
--- /dev/null
+++ b/tensorflow/contrib/eager/python/examples/generative_examples/text_generation.ipynb
@@ -0,0 +1,689 @@
+{
+ "nbformat": 4,
+ "nbformat_minor": 0,
+ "metadata": {
+ "colab": {
+ "name": "text_generation.ipynb",
+ "version": "0.3.2",
+ "views": {},
+ "default_view": {},
+ "provenance": [],
+ "private_outputs": true,
+ "collapsed_sections": [],
+ "toc_visible": true
+ },
+ "kernelspec": {
+ "display_name": "Python 3",
+ "language": "python",
+ "name": "python3"
+ },
+ "accelerator": "GPU"
+ },
+ "cells": [
+ {
+ "metadata": {
+ "id": "hcD2nPQvPOFM",
+ "colab_type": "text"
+ },
+ "cell_type": "markdown",
+ "source": [
+ "##### Copyright 2018 The TensorFlow Authors.\n",
+ "\n",
+ "Licensed under the Apache License, Version 2.0 (the \"License\").\n",
+ "\n",
+ "# Text Generation using a RNN\n",
+ "\n",
+ "<table class=\"tfo-notebook-buttons\" align=\"left\"><td>\n",
+ "<a target=\"_blank\" href=\"https://colab.research.google.com/github/tensorflow/tensorflow/blob/master/tensorflow/contrib/eager/python/examples/generative_examples/text_generation.ipynb\">\n",
+ " <img src=\"https://www.tensorflow.org/images/colab_logo_32px.png\" />Run in Google Colab</a> \n",
+ "</td><td>\n",
+ "<a target=\"_blank\" href=\"https://github.com/tensorflow/tensorflow/tree/master/tensorflow/contrib/eager/python/examples/generative_examples/text_generation.ipynb\"><img width=32px src=\"https://www.tensorflow.org/images/GitHub-Mark-32px.png\" />View source on Github</a></td></table>"
+ ]
+ },
+ {
+ "metadata": {
+ "id": "BwpJ5IffzRG6",
+ "colab_type": "text"
+ },
+ "cell_type": "markdown",
+ "source": [
+ "This notebook demonstrates how to generate text using an RNN using [tf.keras](https://www.tensorflow.org/programmers_guide/keras) and [eager execution](https://www.tensorflow.org/programmers_guide/eager). If you like, you can write a similar [model](https://github.com/fchollet/deep-learning-with-python-notebooks/blob/master/8.1-text-generation-with-lstm.ipynb) using less code. Here, we show a lower-level impementation that's useful to understand as prework before diving in to deeper examples in a similar, like [Neural Machine Translation with Attention](https://colab.research.google.com/github/tensorflow/tensorflow/blob/master/tensorflow/contrib/eager/python/examples/nmt_with_attention/nmt_with_attention.ipynb).\n",
+ "\n",
+ "This notebook is an end-to-end example. When you run it, it will download a dataset of Shakespeare's writing. We'll use a collection of plays, borrowed from Andrej Karpathy's excellent [The Unreasonable Effectiveness of Recurrent Neural Networks](http://karpathy.github.io/2015/05/21/rnn-effectiveness/). The notebook will train a model, and use it to generate sample output.\n",
+ " \n",
+ "Here is the output(with start string='w') after training a single layer GRU for 30 epochs with the default settings below:\n",
+ "\n",
+ "```\n",
+ "were to the death of him\n",
+ "And nothing of the field in the view of hell,\n",
+ "When I said, banish him, I will not burn thee that would live.\n",
+ "\n",
+ "HENRY BOLINGBROKE:\n",
+ "My gracious uncle--\n",
+ "\n",
+ "DUKE OF YORK:\n",
+ "As much disgraced to the court, the gods them speak,\n",
+ "And now in peace himself excuse thee in the world.\n",
+ "\n",
+ "HORTENSIO:\n",
+ "Madam, 'tis not the cause of the counterfeit of the earth,\n",
+ "And leave me to the sun that set them on the earth\n",
+ "And leave the world and are revenged for thee.\n",
+ "\n",
+ "GLOUCESTER:\n",
+ "I would they were talking with the very name of means\n",
+ "To make a puppet of a guest, and therefore, good Grumio,\n",
+ "Nor arm'd to prison, o' the clouds, of the whole field,\n",
+ "With the admire\n",
+ "With the feeding of thy chair, and we have heard it so,\n",
+ "I thank you, sir, he is a visor friendship with your silly your bed.\n",
+ "\n",
+ "SAMPSON:\n",
+ "I do desire to live, I pray: some stand of the minds, make thee remedies\n",
+ "With the enemies of my soul.\n",
+ "\n",
+ "MENENIUS:\n",
+ "I'll keep the cause of my mistress.\n",
+ "\n",
+ "POLIXENES:\n",
+ "My brother Marcius!\n",
+ "\n",
+ "Second Servant:\n",
+ "Will't ple\n",
+ "```\n",
+ "\n",
+ "Of course, while some of the sentences are grammatical, most do not make sense. But, consider:\n",
+ "\n",
+ "* Our model is character based (when we began training, it did not yet know how to spell a valid English word, or that words were even a unit of text).\n",
+ "\n",
+ "* The structure of the output resembles a play (blocks begin with a speaker name, in all caps similar to the original text). Sentences generally end with a period. If you look at the text from a distance (or don't read the invididual words too closely, it appears as if it's an excerpt from a play).\n",
+ "\n",
+ "As a next step, you can experiment training the model on a different dataset - any large text file(ASCII) will do, and you can modify a single line of code below to make that change. Have fun!\n"
+ ]
+ },
+ {
+ "metadata": {
+ "id": "R3p22DBDsaCA",
+ "colab_type": "text"
+ },
+ "cell_type": "markdown",
+ "source": [
+ "## Install unidecode library\n",
+ "A helpful library to convert unicode to ASCII."
+ ]
+ },
+ {
+ "metadata": {
+ "id": "wZ6LOM12wKGH",
+ "colab_type": "code",
+ "colab": {
+ "autoexec": {
+ "startup": false,
+ "wait_interval": 0
+ }
+ }
+ },
+ "cell_type": "code",
+ "source": [
+ "!pip install unidecode"
+ ],
+ "execution_count": 0,
+ "outputs": []
+ },
+ {
+ "metadata": {
+ "id": "WGyKZj3bzf9p",
+ "colab_type": "text"
+ },
+ "cell_type": "markdown",
+ "source": [
+ "## Import tensorflow and enable eager execution."
+ ]
+ },
+ {
+ "metadata": {
+ "id": "yG_n40gFzf9s",
+ "colab_type": "code",
+ "colab": {
+ "autoexec": {
+ "startup": false,
+ "wait_interval": 0
+ }
+ }
+ },
+ "cell_type": "code",
+ "source": [
+ "# Import TensorFlow >= 1.9 and enable eager execution\n",
+ "import tensorflow as tf\n",
+ "\n",
+ "# Note: Once you enable eager execution, it cannot be disabled. \n",
+ "tf.enable_eager_execution()\n",
+ "\n",
+ "import numpy as np\n",
+ "import re\n",
+ "import random\n",
+ "import unidecode\n",
+ "import time"
+ ],
+ "execution_count": 0,
+ "outputs": []
+ },
+ {
+ "metadata": {
+ "id": "EHDoRoc5PKWz",
+ "colab_type": "text"
+ },
+ "cell_type": "markdown",
+ "source": [
+ "## Download the dataset\n",
+ "\n",
+ "In this example, we will use the [shakespeare dataset](https://raw.githubusercontent.com/karpathy/char-rnn/master/data/tinyshakespeare/input.txt). You can use any other dataset that you like.\n",
+ "\n"
+ ]
+ },
+ {
+ "metadata": {
+ "id": "pD_55cOxLkAb",
+ "colab_type": "code",
+ "colab": {
+ "autoexec": {
+ "startup": false,
+ "wait_interval": 0
+ }
+ }
+ },
+ "cell_type": "code",
+ "source": [
+ "path_to_file = tf.keras.utils.get_file('shakespeare.txt', 'https://storage.googleapis.com/yashkatariya/shakespeare.txt')"
+ ],
+ "execution_count": 0,
+ "outputs": []
+ },
+ {
+ "metadata": {
+ "id": "UHjdCjDuSvX_",
+ "colab_type": "text"
+ },
+ "cell_type": "markdown",
+ "source": [
+ "## Read the dataset\n",
+ "\n"
+ ]
+ },
+ {
+ "metadata": {
+ "id": "-E5JvY3wzf94",
+ "colab_type": "code",
+ "colab": {
+ "autoexec": {
+ "startup": false,
+ "wait_interval": 0
+ }
+ }
+ },
+ "cell_type": "code",
+ "source": [
+ "text = unidecode.unidecode(open(path_to_file).read())\n",
+ "# length of text is the number of characters in it\n",
+ "print (len(text))"
+ ],
+ "execution_count": 0,
+ "outputs": []
+ },
+ {
+ "metadata": {
+ "id": "Il9ww98izf-D",
+ "colab_type": "text"
+ },
+ "cell_type": "markdown",
+ "source": [
+ "Creating dictionaries to map from characters to their indices and vice-versa, which will be used to vectorize the inputs"
+ ]
+ },
+ {
+ "metadata": {
+ "id": "IalZLbvOzf-F",
+ "colab_type": "code",
+ "colab": {
+ "autoexec": {
+ "startup": false,
+ "wait_interval": 0
+ }
+ }
+ },
+ "cell_type": "code",
+ "source": [
+ "# unique contains all the unique characters in the file\n",
+ "unique = sorted(set(text))\n",
+ "\n",
+ "# creating a mapping from unique characters to indices\n",
+ "char2idx = {u:i for i, u in enumerate(unique)}\n",
+ "idx2char = {i:u for i, u in enumerate(unique)}"
+ ],
+ "execution_count": 0,
+ "outputs": []
+ },
+ {
+ "metadata": {
+ "id": "1v_qUYfAzf-I",
+ "colab_type": "code",
+ "colab": {
+ "autoexec": {
+ "startup": false,
+ "wait_interval": 0
+ }
+ }
+ },
+ "cell_type": "code",
+ "source": [
+ "# setting the maximum length sentence we want for a single input in characters\n",
+ "max_length = 100\n",
+ "\n",
+ "# length of the vocabulary in chars\n",
+ "vocab_size = len(unique)\n",
+ "\n",
+ "# the embedding dimension \n",
+ "embedding_dim = 256\n",
+ "\n",
+ "# number of RNN (here GRU) units\n",
+ "units = 1024\n",
+ "\n",
+ "# batch size \n",
+ "BATCH_SIZE = 64\n",
+ "\n",
+ "# buffer size to shuffle our dataset\n",
+ "BUFFER_SIZE = 10000"
+ ],
+ "execution_count": 0,
+ "outputs": []
+ },
+ {
+ "metadata": {
+ "id": "LFjSVAlWzf-N",
+ "colab_type": "text"
+ },
+ "cell_type": "markdown",
+ "source": [
+ "## Creating the input and output tensors\n",
+ "\n",
+ "Vectorizing the input and the target text because our model cannot understand strings only numbers.\n",
+ "\n",
+ "But first, we need to create the input and output vectors.\n",
+ "Remember the max_length we set above, we will use it here. We are creating **max_length** chunks of input, where each input vector is all the characters in that chunk except the last and the target vector is all the characters in that chunk except the first.\n",
+ "\n",
+ "For example, consider that the string = 'tensorflow' and the max_length is 9\n",
+ "\n",
+ "So, the `input = 'tensorflo'` and `output = 'ensorflow'`\n",
+ "\n",
+ "After creating the vectors, we convert each character into numbers using the **char2idx** dictionary we created above."
+ ]
+ },
+ {
+ "metadata": {
+ "id": "0UHJDA39zf-O",
+ "colab_type": "code",
+ "colab": {
+ "autoexec": {
+ "startup": false,
+ "wait_interval": 0
+ }
+ }
+ },
+ "cell_type": "code",
+ "source": [
+ "input_text = []\n",
+ "target_text = []\n",
+ "\n",
+ "for f in range(0, len(text)-max_length, max_length):\n",
+ " inps = text[f:f+max_length]\n",
+ " targ = text[f+1:f+1+max_length]\n",
+ "\n",
+ " input_text.append([char2idx[i] for i in inps])\n",
+ " target_text.append([char2idx[t] for t in targ])\n",
+ " \n",
+ "print (np.array(input_text).shape)\n",
+ "print (np.array(target_text).shape)"
+ ],
+ "execution_count": 0,
+ "outputs": []
+ },
+ {
+ "metadata": {
+ "id": "MJdfPmdqzf-R",
+ "colab_type": "text"
+ },
+ "cell_type": "markdown",
+ "source": [
+ "## Creating batches and shuffling them using tf.data"
+ ]
+ },
+ {
+ "metadata": {
+ "id": "p2pGotuNzf-S",
+ "colab_type": "code",
+ "colab": {
+ "autoexec": {
+ "startup": false,
+ "wait_interval": 0
+ }
+ }
+ },
+ "cell_type": "code",
+ "source": [
+ "dataset = tf.data.Dataset.from_tensor_slices((input_text, target_text)).shuffle(BUFFER_SIZE)\n",
+ "dataset = dataset.apply(tf.contrib.data.batch_and_drop_remainder(BATCH_SIZE))"
+ ],
+ "execution_count": 0,
+ "outputs": []
+ },
+ {
+ "metadata": {
+ "id": "m8gPwEjRzf-Z",
+ "colab_type": "text"
+ },
+ "cell_type": "markdown",
+ "source": [
+ "## Creating the model\n",
+ "\n",
+ "We use the Model Subclassing API which gives us full flexibility to create the model and change it however we like. We use 3 layers to define our model.\n",
+ "\n",
+ "* Embedding layer\n",
+ "* GRU layer (you can use an LSTM layer here)\n",
+ "* Fully connected layer"
+ ]
+ },
+ {
+ "metadata": {
+ "id": "P3KTiiInzf-a",
+ "colab_type": "code",
+ "colab": {
+ "autoexec": {
+ "startup": false,
+ "wait_interval": 0
+ }
+ }
+ },
+ "cell_type": "code",
+ "source": [
+ "class Model(tf.keras.Model):\n",
+ " def __init__(self, vocab_size, embedding_dim, units, batch_size):\n",
+ " super(Model, self).__init__()\n",
+ " self.units = units\n",
+ " self.batch_sz = batch_size\n",
+ "\n",
+ " self.embedding = tf.keras.layers.Embedding(vocab_size, embedding_dim)\n",
+ "\n",
+ " if tf.test.is_gpu_available():\n",
+ " self.gru = tf.keras.layers.CuDNNGRU(self.units, \n",
+ " return_sequences=True, \n",
+ " return_state=True, \n",
+ " recurrent_initializer='glorot_uniform')\n",
+ " else:\n",
+ " self.gru = tf.keras.layers.GRU(self.units, \n",
+ " return_sequences=True, \n",
+ " return_state=True, \n",
+ " recurrent_activation='sigmoid', \n",
+ " recurrent_initializer='glorot_uniform')\n",
+ "\n",
+ " self.fc = tf.keras.layers.Dense(vocab_size)\n",
+ " \n",
+ " def call(self, x, hidden):\n",
+ " x = self.embedding(x)\n",
+ "\n",
+ " # output shape == (batch_size, max_length, hidden_size) \n",
+ " # states shape == (batch_size, hidden_size)\n",
+ "\n",
+ " # states variable to preserve the state of the model\n",
+ " # this will be used to pass at every step to the model while training\n",
+ " output, states = self.gru(x, initial_state=hidden)\n",
+ "\n",
+ "\n",
+ " # reshaping the output so that we can pass it to the Dense layer\n",
+ " # after reshaping the shape is (batch_size * max_length, hidden_size)\n",
+ " output = tf.reshape(output, (-1, output.shape[2]))\n",
+ "\n",
+ " # The dense layer will output predictions for every time_steps(max_length)\n",
+ " # output shape after the dense layer == (max_length * batch_size, vocab_size)\n",
+ " x = self.fc(output)\n",
+ "\n",
+ " return x, states"
+ ],
+ "execution_count": 0,
+ "outputs": []
+ },
+ {
+ "metadata": {
+ "id": "trpqTWyvk0nr",
+ "colab_type": "text"
+ },
+ "cell_type": "markdown",
+ "source": [
+ "## Call the model and set the optimizer and the loss function"
+ ]
+ },
+ {
+ "metadata": {
+ "id": "7t2XrzEOzf-e",
+ "colab_type": "code",
+ "colab": {
+ "autoexec": {
+ "startup": false,
+ "wait_interval": 0
+ }
+ }
+ },
+ "cell_type": "code",
+ "source": [
+ "model = Model(vocab_size, embedding_dim, units, BATCH_SIZE)"
+ ],
+ "execution_count": 0,
+ "outputs": []
+ },
+ {
+ "metadata": {
+ "id": "dkjWIATszf-h",
+ "colab_type": "code",
+ "colab": {
+ "autoexec": {
+ "startup": false,
+ "wait_interval": 0
+ }
+ }
+ },
+ "cell_type": "code",
+ "source": [
+ "optimizer = tf.train.AdamOptimizer()\n",
+ "\n",
+ "# using sparse_softmax_cross_entropy so that we don't have to create one-hot vectors\n",
+ "def loss_function(real, preds):\n",
+ " return tf.losses.sparse_softmax_cross_entropy(labels=real, logits=preds)"
+ ],
+ "execution_count": 0,
+ "outputs": []
+ },
+ {
+ "metadata": {
+ "id": "lPrP0XMUzf-p",
+ "colab_type": "text"
+ },
+ "cell_type": "markdown",
+ "source": [
+ "## Train the model\n",
+ "\n",
+ "Here we will use a custom training loop with the help of GradientTape()\n",
+ "\n",
+ "* We initialize the hidden state of the model with zeros and shape == (batch_size, number of rnn units). We do this by calling the function defined while creating the model.\n",
+ "\n",
+ "* Next, we iterate over the dataset(batch by batch) and calculate the **predictions and the hidden states** associated with that input.\n",
+ "\n",
+ "* There are a lot of interesting things happening here.\n",
+ " * The model gets hidden state(initialized with 0), lets call that **H0** and the first batch of input, lets call that **I0**.\n",
+ " * The model then returns the predictions **P1** and **H1**.\n",
+ " * For the next batch of input, the model receives **I1** and **H1**.\n",
+ " * The interesting thing here is that we pass **H1** to the model with **I1** which is how the model learns. The context learned from batch to batch is contained in the **hidden state**.\n",
+ " * We continue doing this until the dataset is exhausted and then we start a new epoch and repeat this.\n",
+ "\n",
+ "* After calculating the predictions, we calculate the **loss** using the loss function defined above. Then we calculate the gradients of the loss with respect to the model variables(input)\n",
+ "\n",
+ "* Finally, we take a step in that direction with the help of the optimizer using the apply_gradients function.\n",
+ "\n",
+ "Note:- If you are running this notebook in Colab which has a **Tesla K80 GPU** it takes about 23 seconds per epoch.\n"
+ ]
+ },
+ {
+ "metadata": {
+ "id": "d4tSNwymzf-q",
+ "colab_type": "code",
+ "colab": {
+ "autoexec": {
+ "startup": false,
+ "wait_interval": 0
+ }
+ }
+ },
+ "cell_type": "code",
+ "source": [
+ "# Training step\n",
+ "\n",
+ "EPOCHS = 30\n",
+ "\n",
+ "for epoch in range(EPOCHS):\n",
+ " start = time.time()\n",
+ " \n",
+ " # initializing the hidden state at the start of every epoch\n",
+ " hidden = model.reset_states()\n",
+ " \n",
+ " for (batch, (inp, target)) in enumerate(dataset):\n",
+ " with tf.GradientTape() as tape:\n",
+ " # feeding the hidden state back into the model\n",
+ " # This is the interesting step\n",
+ " predictions, hidden = model(inp, hidden)\n",
+ " \n",
+ " # reshaping the target because that's how the \n",
+ " # loss function expects it\n",
+ " target = tf.reshape(target, (-1,))\n",
+ " loss = loss_function(target, predictions)\n",
+ " \n",
+ " grads = tape.gradient(loss, model.variables)\n",
+ " optimizer.apply_gradients(zip(grads, model.variables), global_step=tf.train.get_or_create_global_step())\n",
+ "\n",
+ " if batch % 100 == 0:\n",
+ " print ('Epoch {} Batch {} Loss {:.4f}'.format(epoch+1,\n",
+ " batch,\n",
+ " loss))\n",
+ " \n",
+ " print ('Epoch {} Loss {:.4f}'.format(epoch+1, loss))\n",
+ " print('Time taken for 1 epoch {} sec\\n'.format(time.time() - start))"
+ ],
+ "execution_count": 0,
+ "outputs": []
+ },
+ {
+ "metadata": {
+ "id": "DjGz1tDkzf-u",
+ "colab_type": "text"
+ },
+ "cell_type": "markdown",
+ "source": [
+ "## Predicting using our trained model\n",
+ "\n",
+ "The below code block is used to generated the text\n",
+ "\n",
+ "* We start by choosing a start string and initializing the hidden state and setting the number of characters we want to generate.\n",
+ "\n",
+ "* We get predictions using the start_string and the hidden state\n",
+ "\n",
+ "* Then we use a multinomial distribution to calculate the index of the predicted word. **We use this predicted word as our next input to the model**\n",
+ "\n",
+ "* **The hidden state returned by the model is fed back into the model so that it now has more context rather than just one word.** After we predict the next word, the modified hidden states are again fed back into the model, which is how it learns as it gets more context from the previously predicted words.\n",
+ "\n",
+ "* If you see the predictions, the model knows when to capitalize, make paragraphs and the text follows a shakespeare style of writing which is pretty awesome!"
+ ]
+ },
+ {
+ "metadata": {
+ "id": "WvuwZBX5Ogfd",
+ "colab_type": "code",
+ "colab": {
+ "autoexec": {
+ "startup": false,
+ "wait_interval": 0
+ }
+ }
+ },
+ "cell_type": "code",
+ "source": [
+ "# Evaluation step(generating text using the model learned)\n",
+ "\n",
+ "# number of characters to generate\n",
+ "num_generate = 1000\n",
+ "\n",
+ "# You can change the start string to experiment\n",
+ "start_string = 'Q'\n",
+ "# converting our start string to numbers(vectorizing!) \n",
+ "input_eval = [char2idx[s] for s in start_string]\n",
+ "input_eval = tf.expand_dims(input_eval, 0)\n",
+ "\n",
+ "# empty string to store our results\n",
+ "text_generated = ''\n",
+ "\n",
+ "# low temperatures results in more predictable text.\n",
+ "# higher temperatures results in more surprising text\n",
+ "# experiment to find the best setting\n",
+ "temperature = 1.0\n",
+ "\n",
+ "# hidden state shape == (batch_size, number of rnn units); here batch size == 1\n",
+ "hidden = [tf.zeros((1, units))]\n",
+ "for i in range(num_generate):\n",
+ " predictions, hidden = model(input_eval, hidden)\n",
+ "\n",
+ " # using a multinomial distribution to predict the word returned by the model\n",
+ " predictions = predictions / temperature\n",
+ " predicted_id = tf.multinomial(tf.exp(predictions), num_samples=1)[0][0].numpy()\n",
+ " \n",
+ " # We pass the predicted word as the next input to the model\n",
+ " # along with the previous hidden state\n",
+ " input_eval = tf.expand_dims([predicted_id], 0)\n",
+ " \n",
+ " text_generated += idx2char[predicted_id]\n",
+ "\n",
+ "print (start_string + text_generated)"
+ ],
+ "execution_count": 0,
+ "outputs": []
+ },
+ {
+ "metadata": {
+ "id": "AM2Uma_-yVIq",
+ "colab_type": "text"
+ },
+ "cell_type": "markdown",
+ "source": [
+ "## Next steps\n",
+ "\n",
+ "* Change the start string to a different character, or the start of a sentence.\n",
+ "* Experiment with training on a different, or with different parameters. [Project Gutenberg](http://www.gutenberg.org/ebooks/100), for example, contains a large collection of books.\n",
+ "* Experiment with the temperature parameter.\n",
+ "* Add another RNN layer.\n"
+ ]
+ },
+ {
+ "metadata": {
+ "id": "gtEd86sX5cB2",
+ "colab_type": "code",
+ "colab": {
+ "autoexec": {
+ "startup": false,
+ "wait_interval": 0
+ }
+ }
+ },
+ "cell_type": "code",
+ "source": [
+ ""
+ ],
+ "execution_count": 0,
+ "outputs": []
+ }
+ ]
+}
diff --git a/tensorflow/contrib/eager/python/examples/nmt_with_attention/nmt_with_attention.ipynb b/tensorflow/contrib/eager/python/examples/nmt_with_attention/nmt_with_attention.ipynb
index 54ebcad8e9..1f66d7e752 100644
--- a/tensorflow/contrib/eager/python/examples/nmt_with_attention/nmt_with_attention.ipynb
+++ b/tensorflow/contrib/eager/python/examples/nmt_with_attention/nmt_with_attention.ipynb
@@ -41,11 +41,11 @@
"\n",
"# Neural Machine Translation with Attention\n",
"\n",
- "<table align=\"left\"><td>\n",
- "<a target=\"_blank\" href=\"https://colab.sandbox.google.com/github/tensorflow/tensorflow/blob/master/tensorflow/contrib/eager/python/examples/nmt_with_attention/nmt_with_attention.ipynb\">\n",
+ "<table class=\"tfo-notebook-buttons\" align=\"left\"><td>\n",
+ "<a target=\"_blank\" href=\"https://colab.research.google.com/github/tensorflow/tensorflow/blob/master/tensorflow/contrib/eager/python/examples/nmt_with_attention/nmt_with_attention.ipynb\">\n",
" <img src=\"https://www.tensorflow.org/images/colab_logo_32px.png\" />Run in Google Colab</a> \n",
"</td><td>\n",
- "<a target=\"_blank\" href=\"https://github.com/tensorflow/tensorflow/tree/master/tensorflow/contrib/eager/python/examples/nmt_with_attention/nmt_with_attention.ipynb\"><img width=32px src=\"https://www.tensorflow.org/images/GitHub-Mark-32px.png\" />View source on Github</a></td></table>"
+ "<a target=\"_blank\" href=\"https://github.com/tensorflow/tensorflow/tree/master/tensorflow/contrib/eager/python/examples/nmt_with_attention/nmt_with_attention.ipynb\"><img width=32px src=\"https://www.tensorflow.org/images/GitHub-Mark-32px.png\" />View source on GitHub</a></td></table>"
]
},
{
diff --git a/tensorflow/contrib/eager/python/examples/notebooks/2_gradients.ipynb b/tensorflow/contrib/eager/python/examples/notebooks/2_gradients.ipynb
deleted file mode 100644
index 9c1af9c208..0000000000
--- a/tensorflow/contrib/eager/python/examples/notebooks/2_gradients.ipynb
+++ /dev/null
@@ -1,323 +0,0 @@
-{
- "cells": [
- {
- "cell_type": "markdown",
- "metadata": {
- "colab_type": "text",
- "id": "vDJ4XzMqodTy"
- },
- "source": [
- "# Automatic Differentiation\n",
- "\n",
- "In the previous tutorial we introduced `Tensor`s and operations on them. In this tutorial we will cover [automatic differentiation](https://en.wikipedia.org/wiki/Automatic_differentiation), a key technique for optimizing machine learning models."
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "colab_type": "text",
- "id": "GQJysDM__Qb0"
- },
- "source": [
- "## Setup\n"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 0,
- "metadata": {
- "colab": {
- "autoexec": {
- "startup": false,
- "wait_interval": 0
- }
- },
- "colab_type": "code",
- "id": "OiMPZStlibBv"
- },
- "outputs": [],
- "source": [
- "import tensorflow as tf\n",
- "tf.enable_eager_execution()\n",
- "\n",
- "tfe = tf.contrib.eager # Shorthand for some symbols"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "colab_type": "text",
- "id": "1CLWJl0QliB0"
- },
- "source": [
- "## Derivatives of a function\n",
- "\n",
- "TensorFlow provides APIs for automatic differentiation - computing the derivative of a function. The way that more closely mimics the math is to encapsulate the computation in a Python function, say `f`, and use `tfe.gradients_function` to create a function that computes the derivatives of `f` with respect to its arguments. If you're familiar with [autograd](https://github.com/HIPS/autograd) for differentiating numpy functions, this will be familiar. For example: "
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 0,
- "metadata": {
- "colab": {
- "autoexec": {
- "startup": false,
- "wait_interval": 0
- }
- },
- "colab_type": "code",
- "id": "9FViq92UX7P8"
- },
- "outputs": [],
- "source": [
- "from math import pi\n",
- "\n",
- "def f(x):\n",
- " return tf.square(tf.sin(x))\n",
- "\n",
- "assert f(pi/2).numpy() == 1.0\n",
- "\n",
- "\n",
- "# grad_f will return a list of derivatives of f\n",
- "# with respect to its arguments. Since f() has a single argument,\n",
- "# grad_f will return a list with a single element.\n",
- "grad_f = tfe.gradients_function(f)\n",
- "assert tf.abs(grad_f(pi/2)[0]).numpy() \u003c 1e-7"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "colab_type": "text",
- "id": "v9fPs8RyopCf"
- },
- "source": [
- "### Higher-order gradients\n",
- "\n",
- "The same API can be used to differentiate as many times as you like:\n"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 0,
- "metadata": {
- "colab": {
- "autoexec": {
- "startup": false,
- "wait_interval": 0
- },
- "height": 276
- },
- "colab_type": "code",
- "executionInfo": {
- "elapsed": 730,
- "status": "ok",
- "timestamp": 1527005655565,
- "user": {
- "displayName": "",
- "photoUrl": "",
- "userId": ""
- },
- "user_tz": 420
- },
- "id": "3D0ZvnGYo0rW",
- "outputId": "e23f8cc6-6813-4944-f20f-825b8a03c2ff"
- },
- "outputs": [
- {
- "data": {
- "image/png": "iVBORw0KGgoAAAANSUhEUgAAAXYAAAEDCAYAAAAhsS8XAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAIABJREFUeJzsnXd0HNX5sJ/ZXrTq3ZLV3IvcDdgGGwOm2WCbHhJa6C2B\nUBISQioBfoQPkjhACA4QCIQSDITQbGMbsHHvVbZ6s7q0vc18f4xmJVltJa0q+5zDOXhn9s7dqzvv\nfe/briBJkkSYMGHChBkxqAa7A2HChAkTJrSEBXuYMGHCjDDCgj1MmDBhRhhhwR4mTJgwI4ywYA8T\nJkyYEUZYsIcJEybMCCNkgl0URVasWMHtt98eqibDhAkTJkwvCJlgf+2118jJyQlVc2HChAkTppeE\nRLBXVlayceNGrrjiilA0FyZMmDBh+kBIBPvjjz/OQw89hCAIoWguTJgwYcL0gT4L9g0bNhAfH8/E\niRMJVycIEyZMmMFH6GutmGeeeYYPP/wQtVqN2+3Gbrdz3nnn8dRTT3X6HUmSwtp9CKittvH8UxsQ\nxZY/4aXXTGfa7PRB7NXAU1dj5y9PrIfmYUgeFcnya2aQmBI5uB0bYE5WNPHS/9uE6JcHYukVucw8\nPWOQezXw7NhcyCfvH0Bqfi+uumkO4ycnD3KvBpY+C/bWbNu2jdWrV/PCCy90e291tTVUj+03EhIs\nQ7qfWzfls2tzMTNPH01UrJEv/3eU5LRIVnx/5mB3rUP6azw3fnaMQ7vLOX1RNrVVNvIOVZGeFcPS\nq6YNmT6GmlP7KYoi/3ltF9WVNhacO4btXxfi9fi5+Mpc0jJjhkw/+5t9O0r5Zu1xDEYtpy/KZuOn\nR4mOM3HlTbNRqTo3UAynv3swhOPYhymSJJF3sAqtTs35l05mQm4K6VkxVJY2UVdtH+zuDRgOu4ej\n+yqIjDYwbW4a514yiYTkCMqKGnC7vIPdvQFjz9YSqittjJuSxNTZaVywcgoAX3xwCL9PHOTeDRyH\ndpej0ai47PqZTJyWwoTcFOprHBzdf3KwuzaghFSwz507NyhtPUzfOVnehLXRRdbYeLQ6DQATp6UC\ncGhv+WB2bUA5sLMMv19i2pz0gEaWNS4BUZQoOlE3yL0bGDxuHzu+LsRk1jH/nDEApI6OZtL0VFxO\nLyfLmwa5hwNDU4OT+loHozJiiIw2AjB7QSYajYrtXxXg9foHuYcDR1hjH6bkHawCYOzkxMBnmWPj\nMJq1HDtwEt93YBJ7PT4O7CrDYNQwPrfFhpo1Lh6AgmPVg9W1AaWyrBG/X2JCbjIGozbweXqWbIIp\nLawfrK4NKMX58kI+Oic28FmERc/UOWnYbR7yDn53tPawYB+GiKLI8SNVGEzaNvZTtVrFhKkpuF0+\n8o+OfKGWd7gKt8vHlJmj0GrVgc9j4kxExRopzq/7Tixw5cWNAKSkR7f5PHV0NIIApUXfEcHevEMb\nnR3b5vPxU5IAqChpHPA+DRZhwT4MKS2sx+XwMmZCYjuH0MRpKQAc3lsxGF0bUJQXNWdiYpvPBUEg\ne1w8Pq9IyXdAW60oaUAQ5Gig1uj0GhJTIqkqb8Lj9g1S7wYGn89PWXE90XGmgBlGITrWhN6gobIs\nLNjDDGE6MsMoRMUYSUiOoLKsacQ7zaoqrGh1amLiTO2uZY1LAKDgWM1Ad2tA8Xr9VFVYSUi2oNNr\n2l1Py4xBkqC8pGEQejdwVJQ04vOKZJyirYO80CeNiqSpwYXD5h6E3g08YcE+zJAkiZKCOswWHUmp\nHcdpJyRbEEWJupqRGx3jdvloqHWQmGLpMCciMcWCOUJH0fEaRHHkLnBV5U2IokRKelSH10dlyOaZ\nkW5nD5hhcuI6vJ48Sh6fyrLvhiM5LNiHGQ67B6fDS2JyZKdJXgnJcqxrdeXQj8vtLcpv6ywJSRAE\nMsfF43L6RvTLXF4sa+Kn2tcVkkdFodGoKCvqu8b+zjtv8f3vX8Fvf/ton9sKNUX5tWi0KlLSOl7g\nFDPVSJ4LrWm/dwszpKk5aQMgLimi03u+C4JdCeFLTOk8YSMlLYqDu8qpOWkjtRPBN9wpb/YzpHai\nsas1KlLSoygpqMdhc2OK0Pf6WWvWvMsf//hnkpNTet1Gf9BY76Sxzknm2DjUmo51VXlnBye/I3b2\nsMY+zKitkgV7fGLngj02wYxKLVBdaRuobg04VRWyYO/MHAUQlyCPkTJmIw2/X+RkeRNxCWb0Bm2n\n943KaA577IPW/vTTf6C8vIyHH76ft99+s9ft9AeKUzQto/MMW61OQ1xiBFWV1hHve4Kwxj7sUDT2\n+C40drVaRVyCmdpqG36/iFo9stZvSZKoKrditugwWzrXQKNijahUwojLxH17/XF25VXj8fhx+Hzo\nGh1s/+vmTu8X/SJ2RA59egTDxhMd3jNnQiJXLh7TaRsPPPAztm79lj//+UUiI4dWDZ76Zl9SXBfK\nDshmqZqTNqpPWgM295HKyHrjvwPUVNnQGzRERHa9pU5ItiD6pREn1ADsVjcOu6fbIl9qtYqYeBN1\nNfYRWXlU0Ty7W7hVzddFf181VYlApbUhhDLHYxPMXd6XnCbPl5PfATt7WGMfRng9PhrrnM2JJ11X\nx5Tt7BVUn7QGbO4jhZPliuO0+98VlxBBbZWdpgYnUTHtwyKHI1cuHsNdV83g1b9upuhELdf/cG63\ntvN/v7ydpgYnN99xxoirrFpX48Bk1rXJuu2IlsiYRqYxsiughjX2YURts2bSlRlGocWBOvLsy4p9\nPZiyvLGJshZXWzXydi71tXYMJm1QDtHYeBM+r4itaWTFcXs9PqyNLmLiu1+0IyL1mCN0VJY2jcgd\nXGvCgn0YEbCvd2NLBIiNN6NSCdSMwMiYqoqeaeww8hyoPq9fFmixwe1CouPkBa6+ti8L3NDT9Otq\nHED3ZhiQQ2ATUyNx2D3YbZ7+7tqgEhbsw4hgHKcKao2K2AQztVWyA3WkIIoS1ZVWYuJNHWZankpc\n8wtfO8J8DbLfAKI7yLrtiNhmjba+WRD2hnfe+YDIyKHldAzY1+O7F+xAIEu5sa734zAcCAv2YURt\nlQ2VWgj6ZU5ItuD3S4GogZFAU4MTr8dPQlJwfgNThA6DUTPinMi11fIi31E5hY5Q5kx97cgSaMrc\nDkZjB7luDEBDnbPf+jQUCAv2YYIoitRW24mNNwcdvjgS7eyN9fILGR1r7OZOGUEQiE2IoLFeXhBG\nCjXNpqXoYE0xMSYEoa+mmKGHUjYjJi44wR7VPG/CGnuYIUFDnRO/TwzKDKOg3DuS7MuNzZpWVJAC\nDVrMMSOpdk5AsAepsas1KiJjjNTXOEaU47Cuxo7ZokdvCC7AL6yxhxlS9MRxqqBotU0NI2cSN9bL\nmlZUTHAaO7Qkrijmi5GAYpazRBmC/k5snBm3y4fTMTKODHS7vNitnqDNMAAGoxaDUUNDWGMPMxRQ\nbMTdZde1Rm/QojdoaGxw9Ve3BhzFFNMTwa68+HUjJORRkiRqquxExciZtcESHXCgjoxxCETEBBHq\n2JqoWBNNDc4RFVRwKn0W7B6PhyuuuILly5ezbNky/vKXv4SiX2FOQdG6eyLQlPubGpyI4sjYfjfW\nOzGatEFFxCgoERMjJTLGYfPgcfuCdpwqxI4wB2rAcRpkRIxCdIwRSQJr48hReE6lz4Jdp9Px2muv\nsWbNGtasWcOmTZvYt29fKPoWphVNDU7UGhWmCF2PvhcZbUT0S9itwz8xxe8XsTa6Ag6wYNHq1ETF\nGKkbIaYYRTAHa19XiGkWgL0NeWxdtvebb77ijTdeDfq7lZUVfPHFp0Hd+/jjv2bjxvXd3te6lMCa\nNe/x2Wf/C6r9qICdXR6HTz75L9XVLUdJPvnk7ykqKgyqraFKSEoKGI3yi+bxePD5RvYRXINFU4OL\nyChDj9PBFQ2/sd7ZI3vsUMTa6EKS6FVpgMgYIyX5TjxuX4+0/aGIIpCCTU5SUByHvY2MObVs7/z5\nZ7a7x+/3o1ar231eXl7GF198xnnnXdCrZ3eE4gyPjDawfPllQX9PGQfFEf+//33EzJlTSUrKAODh\nh38esj4OFiGZ4aIosnLlSoqLi7n22mvJzc0NRbNhmnG7vLhdvnZnWgZDZLQszGVTTudlTYcDgYiY\nHpqjACKjlHFw9SiyaCjS0EuNXatTY4nU98oU07ps78UXX4LFYuHIkUPcd99DPP74r7FYIsnLO8r4\n8ROZP/9MnnvuaQRBQKvV8OyzL/Dii6soKirkppuu5YILlnLllde0af+ZZ55k9+6dpKSktonaOXr0\nCH/+8zO4XC6ioqL5+c8fIzY2jnvuuQ3RFUt1XSGxH5Zht9sxmUycccYCfve7x3jpJXk3UVlZwcMP\n38+rr77JK6/8nW+++QqHw4lWSmTS9HvYsGEdR44c5sEHH0Sj0fL886t54IF7ufvu+zh8+ADl5eXc\neee9gKzZHz16hB//+AE+//wT3nnnLfx+H5MmTeEnP/npkKrBExLBrlKpWLNmDTabjTvvvJPjx48z\nZkznJUDD9IymZufnqYf0BoMiBEdCZExDLyJiFJQFztroHPaC/Vv315ROK+RP+ZsRCnomTJxjPfh8\nInnfbGgjiGYkTmXlmKWdfu/Usr2ffPLfNt8vLS3mT396AYCHH76Pn/zkp0yZkktEhIamJg+33343\nb731Ok8++f/atb1x45eUlpbwz3++TU1NDd///hUsXXopPp+PZ599iieeeIaoqGjWrfuCF19cxc9+\n9kskScLpsHPdlT9l6VXTWL36bwBkZGTi9/uoqCgnJSWVdes+55xzzgPgssuu4oYbbsbn9XPj9+9k\n566t3P/Idbz33tv88pe/ICGhbWGwRYvO5fbbbwwI9nXrPuf6639IUVEh69Z9zgsvrEatVvPHPz7J\n559/wvnnX9Sjv0V/EtI9aUREBHPnzuWrr77qVrAnJAyPioNDoZ/VzdUMU9OiO+1PZ58b9HLFO5fD\nNyR+S1/64HHKCUaZ2fE9bidttLxb8fukbr87FMapK9xuH6oIAU0v6uyrNSp8PhEkUKtbBLPJqOv2\nd6tUEBdnJjragsViwNj8HYNBy8KFSwPfP/30uTz//HMsW7aMJUuWkJSURHS0CZ1O0+Ezjh07wIoV\nl5KQYCEhwcK8eWcQGWnEZquhoCCfBx+8F0mSEEWRxMREEhIsCAhkpE4nMTmShAQLZrMes9lAQoKF\npUsvZuvWTdxyyy1s2rSeZ599loQEC7t2bebll1/G6XRSVV9FWXkaCQkWtFo1ktQyL7RaNTExJsaO\nTSczM4OKigJGjx5NeXkpixcv4I033uD48WPccceNSJKE2+0mLS15SM2bPgv2uro6tFotFosFl8vF\nli1buPXWW7v9XnX10C9OlZBgGRL9LC2WDyJWaYQO+9NVPyVJQqNVUV1pHfTf0tfxPFkhn5QjIva4\nHalZhlWUNnb53aHyN+8Mr9dPbN5YZo45gwvPn9Lj7x/aU87GT49x9sUTmDA1uc217n63KErU1trw\netVYrS6cTg/V1VZcLi8+X8vcXLHiGqZNm8uWLV9z5ZVX8swzq2hocODx+Dp8htPpwWZzB6653V6a\nmpzU1dnIysrm+edXt+uny+VFY9Gh0amorrZit7uRJDXV1VZOO+0sHn30p8yaNQ+/X8JojKGsrJZf\n/erXrF79OvHxCTx8/2+wNjgpL6vH6/W3+f1er5/6egfV1VYWLDibd99dQ0ZGJvPnL6S62orV6mTJ\nkou47ba7ejR+oSDYxaPPUTHV1dVcd911XHrppVxxxRUsWLCAhQsX9rXZMK1QzCi9McUIgkBktJHG\nBuewzzhsqHNiMut65fxsbYoZziip8PGJPQvxU1Ac6LZ+DPUrKyslOzuHa6+9nilTplBcXIjJZMZu\n79hpO23aTNau/RxRFKmpqWHXrp0AjB6dSX19AwcO7AfA5/NRUJAPtBwy0tE7MWpUGmq1ilde+TuL\nF8tmGI/HgyBAZGQUDoeD44W7geY5ZTJhs3UcMbVw4WK++mpDG5POrFlz2bBhHfX1ssLV1NREZWVl\nr8aqv+izxj5+/Hjef//9UPQlTCcoSTmW6N5FtcihfnacDi8mc8/CJYcKfr+IrcnV6yPN9AY59r1p\nmMcuK6nwSjninqIIdmtTb8YhOHv+O++8ya5dO1Cr1YwfP47TT58PgFqt4cYbv8eFFy5r4zxduPBs\ndu3azvXXX016egYzZswCQKPR8LvfPcmzz/4fNpsNUfRz5ZXXkJWVjd8vtfk9p7J48RKef/5P3HLL\nnYBsJl62bAXXXXcVKSmpZGeNw14vv1sXXbSMxx57DK1Wx/PPr27jO7BYLGRmZlNcXMiECZMAyMzM\n4pZb7uT+++9CFCW0Wi333/8QycnJHfZlMBCkQVLjhvJ2V2GobMtff/5b/H6R6++e1+H17vq5ef0J\n9m4rYcX3Z5CcNnhlV/synvW1dt56aTsTpiZz9sUTetXGO//YQUOtg5t/cmanEQxD5W/eGbu/Lebb\nDflcddOcwCEiPcHn8/PS018xKiOaS66Z3g89bEt/jeen/zlAwbEarr9nXq+UleL8Wj5+ez9zFmQy\ne0HmkP+7KwyYKSZM/6JoqpG91NahVSz7MI6MCZQS6GFyUmssUQZ8PhGnffgesqBkS0b38pg/jUaN\nyawb9lmX1kYXGo0Ko6nr4/A6I1AMrH5kZOGeSliwD3FsTW4kCSKjei/QomLkRUERjsORvsSwKyj2\n2OFsjrE1m1D6Mg4RUfrmeTV8fS7WRheWXiTsKUREGlCpBJrqh+9c6IqwYB/iBBynoRBoI0Fj78OB\n1C3JWsP3ZbY2udHp1d0e3NwVkVEGRFEatsfDuV0+3C5fnzKpVSoBc4QOm3X4zoWuCAv2IU5LclLv\nJ7GinQxrjT0g2Hs/DgHH4TBd4CRJwtroIiKyb6UhlO/3Z2RMf6LsWvpaIiMi0oDd6hmRVR7Dgn2I\n05dQRwWVSsASbRjW205rkwuDSYtW1/tAroDGPkwFmsftw+vxY4nU96kdRSAO13FQ+t3bKDGFiCh5\nHEdCgbxTCQv2IU6LYO/bJI6KNuJyyjVnhhuSJGFvchNhCZFAG6amGGujLIAi+qipBmLZexXyOPhY\nlV1sCDR2kP1YI42wYB/iNDXI3v++xp8PZzu72+XD5xOJ6KOmqtGoMUcM34gQJfbc0kdTjPL94TYO\nu3fv5KGH7gv0uzNTzD333MbRo0e6bU/Z+diaXPzpT39i587tverX22+/idvdsjg89NCPsdsHt0R0\nWLAPYSRJoqnBiSW6995/BWXbaRuG207lRY6w9L3ssCXagK3JNSztqoqG3dcFztI8F4abYAcQBLoV\n7MGiaOyNDU7uvfdeZs2a06t23nnnTdzulrF86qlnMZsHt9Dc8C5MPcJxu3x43H5S0ntvX1dQzBjD\n0Z6oLEZ9FWggh41WljZht7r75LcYDBRTTF8FmlanwWDU9Eiwu1wufvnLn1JdXYUoilx//c0sXnxu\np2V1y8pK+b//exybrQlJEvjtb58gNXUUq1Y9x9atmxEEFddddxPnnHMeu3fvZPXqvxEVFU1BwQkm\nTJjIo4/+FoBvv93Mn//8DNHRMYwdOx6ApkYnGq0qEBnkdrt5/PFfU1RUSEZGBh5PS7TP9u3f8vLL\nf8Pr9TJqVBqPPPIYBoOBK664hLMXXcAXm7/Er1vGV9veZNas09HrDfzvfx/xm9/8AZB3Cf/+9xs8\n8cQzPP30Exw9egi3282iRedw00238u67b1FTU80999xOdHQ0zz33PFdccQkvv/xP3njjNZKTU1ix\n4nIAVq/+G2azmauuupZ//euffPnlF3i9Ps46axE33dR9fa2eEBbsQxjlxeurLRFaBPtwtCfam0In\n2C2tQh6Hm2BXNHb/hv+yY9WePu065tg8iH6J/IffBsAyew4JV1zd6f1bt24mPj6Bp556FgCHw95l\nWd1f//oXXHfdjaxYsZTy8jpEUWTjxvWcOJHHa6/9m/r6Om6++TpmzJgJQF7eMV5//R3i4uK4444f\nsn//XsaPn8hTT/2eP//5RUaNSuOXv/wZ0D6Gfc2adzEajbzyyr84ceI4N910LQCNjQ28+upqnnvu\nr+j1Bt5441Xeeut1brjhZvk3R5pZMu8uRqfHcrSkVB6XOafx9NN/wO12odcbWLfuCxYvXgLAbbfd\nhcViQRRFfvSjO8jPP87ll1/Nv//9ZqCcsYzcr3PPXcJzz/0xINjXr1/LM8/8me3bv6W0tJiXXnoN\nSZJ4+OH72bt3D9OmhS4TOCzYhzC2EAo087DW2BUTRN8XuMCBG43D7+ARa5MLlUpAq1XTVxe4oBKQ\n/CKSJJs3uiM7ewyrVj3HCy/8hTPOWMC0adPJzz9Bfv4J7rvvruayuhLx8Qk4HA5qaqpZsEAuBqjV\nypr1vn17OPfc8wGIiYllxoxZHD58CJPJxKRJk4mPjwdgzJhxVFRUYDAYSU0dxahRaQAsWXIhH3zw\nH3kXm9ayKO/Zs5srmhelnJwxjBkzDoCDBw9QWJjPHXf8EEmS8Pl8TJkyLfC9JUvO57//ymuj7KjV\nak477Qy+/vorFi1azJYtX3PXXT8CYN26z/jwwzX4/X7q6mopKCggO3sMIDX/pyD//9ix42loaKC2\ntob6+noiIyNJTEzinXfeYvv2bdx007VyXXmni9LS4rBg/66gCGFzH6NBWrcxHCMhrMoCF4JxaHEi\nD79xsDW6MVv0JF55NQl33dKn2ibfrDvOvu2lrLxuJkmp3Z/MlZ4+mpdffp0tW77hxRf/wty5p3PW\nWYvIzs5pV1bX4ei4iuOpma6t/60IfwC1WoXf3/HS5fPKu5RTzVGtfVBKu5IkMWfO6Tz22O86bMto\nNBIRaWj3TixefB7/+c/bREZamDhxMkajkYqKct566w1efvmfmM0RPP74r/F4uleSzj77HL78ci21\ntbWcc86SQL9+8IMbuOSSFd1+v7eEnadDmIBtOQQCTa2WD8Iejs5TW5MbQQCzpe+VKZXdj32YmaT8\nPhGH3ROyc2t7GvJYU1ODXq9nyZILuOaa73Ps2NFOy+qaTGYSE5P46qsNAHi9XtxuF9OmzWTdui8Q\nRZH6+nr27dvDpEmTO31mRkYmlZUVlJeXAbB27Wf4mmuntx6H6dNn8PnnnwCQn3+cEyfyAJg8eSr7\n9++lrEw2s7jdLkpKits8IyJSj8ftlw8faWbGjFkcO3aUDz9cEyjVa7fbMRqNmExm6upq+fbbzYH7\nuypJvHjxeaxb9zkbN67n7LPPAeC0007n448/xOl0No9tdaAEcKgIa+xDmFBq7CAvEDVVNiRJGlLn\nM3aHvcmFKUKHStV3PSSwcxlmC5xijuprcpKCEvIYbJJSfv5xVq16DpVKQKPR8sADP+uyrO4vfvFr\n/u//HueVV15CENT89rdPsHDh2Rw8uI8bbrgGQVBx5533EhMTS2FhQZtnKXNTp9Px4IOP8OCDPyI6\nOobc3OmcrKiT+99KsC9ffjmPP/5rbrjhe4wdO45Jk+QDSKKjo3nkkcf41a8ewePxIggCt9xyB+np\no1Hs4Ip5T1kwQD7qc968BXzyycf84he/BmDMmLGMHTueH/zgKlJTR5Gb22LSueSS5TzwwL3Exyfw\n3HPP07q8cVZWNg6Hg4SEJGJj4wCYM+d0iooKuf32GwEwmUw8+uhviYkJnWkwXLa3Cwa7lOcH/9pD\neXEDtz54FuoujkELtp99LXXaV3oznqIo8dLTm0hIsbDyBzND0o9X/vQNOr2G7912Wkj6OBCUFtbz\n0Vt7mTUvg7lnZfW5nzUnrbzzj51MmZnKmUvGhbCnbQn1eH79RR77d5Zx+Q2zSEju+1F0u7YUsXVj\nAVf/cC4xCb2vQzRQhMv2jgDsVjdGs7ZLod4ThmPIo8PuQRSlkJijFMwWPXbr8KpuGKr6KAqBujnD\nLJY9lKGvcjtKlNTwS9zrirBgH6JIkoTN2vc0+tZERA6/kMdQJeW0JsKix+cTh1V5hUCSVojGQT5R\nSh1wTA8X7FY3KrXQp+qWrVHGczifVdARYcE+RHG7fPh9Ysjs69CqNsYwKlVqDziQQ6OpwvAM/VQW\n41Bp7CDb2a2NrmG1c7Fb3Zgj9CHzEQV8DcO48mlH9FmwV1ZWct1113HRRRexbNkyXnvttVD06zuP\nLYQhfgrDWaCFUmMfjg5UpU5MSOdDpB6vx4/X4+/+5iGAKMqRQaGIjlIwRegQhJGnsfc5KkatVvOz\nn/2MiRMnYrfbWblyJfPnzycnJycU/fvOEuqIGBie2afWfjDFBBY42zAah0YXRpMWjVYdsjbNES0L\nvU4/9APkHHYvktTS71AghwHrh/VZBR3RZ409ISGBiRMnAmA2m8nJyaGqqqrPHfuuE8oYdgVThKzp\nDCfB3qKxh84EEXAiD5NxCPhbQjgGMPwWuP5QdkAOIW1qdCGKw8ck1R0htbGXlpZy5MgRcnNzQ9ls\nv2I/sA9nfv5gd6Md/TGJ1WpV83Fg7V9kT2UFjsOHQvasUKE4y3p7aHFHKFv51uMgSRKi14vo9SL5\nhpZT1enwIvqlkO5aAMzNC73d2lI0S3S7se3Zjd/WtuyszWbj/fffDfxbKaHbEU8++XuKigq7fX5X\nbbRGKcMbeCeC0NhffvnFoMvwRkQakEQJR/MC9/bbb+JyOrHu2IansmJIlOHtKSHbf9ntdu69914e\neeQRzGZzt/cHG4/Z3xT+8xU8dfWkLL2IjB9ci1rfdtIMVj+V1OnRmXHExoduPKNiTVSWNRIfFwGS\nSPlHH1P15QYchUUApF9zFaOvvrL3HQ9RPxUcNg9R0UYSE7tPew+WSItcVsDr8ZOQYEH0ejn8uz/Q\nsGcvxwFUKjK+/z3SLuu/lO+eUOlpBCA+IaLN+PV1bqamRcv/I8lt+Z1ODj3zJE2HDiOo1URNyyV1\n6UXEzJqJ293IRx/9h1tvlZNqoqNN6PWaDvvw9NNPtPm3co8oim2SzLpqozVarZqYGBP2etlhmjIq\nqsvviKLIT3/6QPcD0ExisoXjh6vQqNUkJFh49+03mJF/HOn4CZIvWMI//vFy0G0NFUIi2H0+H/fe\ney+XXnoGQVm+AAAgAElEQVQp5557blDfGSpJIEm33U3l6r9R8dHH1Gzbwagf/wRdQiIwuMkqNVXy\nc90eb7d96Ek/DUYNol+iuKgW19frqHn3bVCrMU+bjqesjJI3/43D4SFu2aV9/g196SfIafQ2q5vU\n9KiQ/x10ejX1tQ6qq61UvfUGDXv2ohuVhikhDmt+AUX/fANfXDLmyVNC+tzeUFoip5urNEJgHEIx\nN33N1SGrKps4WVpD2XPP4Dx2FOOEiYgOBw27dtOwZy8Zj/2Gx//2V4qLi1m27BJmzz6NM86YT0ND\nE7fddme7Urv33HMbd999H+PHT2DJkrO46qpr2bbtW+6++8fY7fY2ZXg9Hl+733FqGV673Ul9vYP6\nCh8V1cf42aOrEVRSuzK8F198Cdu3b2XlyivZunUz8+efGVQZ3sYGG3GWCZQUTeTdF/8fVSdP8ugX\nnxEdHcOq85exaNHZg16GVyHYxTwkgv2RRx5hzJgxXH/99aFobkAxZmeT8cvfUPOfd2hY+wU1775N\n6h13D3a3sFvdGIyhdZZBS9hgQ1k19o8+QB1hIePXv0MTFYW3tpbS/3uC2g/eR9DpiD3/wpA+u6co\ntt9Q25ahJUnJumsnDWu/QJeSyuhHHiUpLZ6SbXspfuL3VP79RTIe+y2a6OiQP78nKONgajZBbF5/\ngsK8GsQ+HhaimJSP7KvEsXsHWXlHiZg9h5RbbkdQq7Ht3kX5qj9R9cY/uf32uykszGf16jcAWUB2\nVGp36tRpbZ7hdDrJyRnDD394Gx6Ph6uvXtGuDO+pdFaGt7qqhgN5a3nxpb+RkBTdrgyvTqdn1aqX\nALnMMARXhvfEkZM89PC9HDt8mNOLi3lPq+WPj/6G1IVnN4dVDn4Z3p7SZxv7zp07+eijj/j2229Z\nvnw5K1asYNOmTaHo24Ch0ulIuOp76DOzsO3cgfuUQkEDTX8kJykoNvvyz9Yjud3EX34lmqgoALRx\ncaQ9+DDqqGhqP3i/nZ11oOmPUEeFCIset8tH+auvIOh0pNx+F6pmM5whK5uEK67Cb7VS8dILSOLg\nnrak2MAVm3ioUELBRb+Ir64Oc+40Um6+DUEtKxMRM2ZinjET57Gj2Hbvavd9pdSuIAiBUrunotFo\nWLhwMQBFRYXtyvB2xJ49uwPXWpfhPX7iCI22kzz007u48cbv8emnH3Py5MnA95SCXa1pXYbX7/ez\nZcvXnHmmXE543brPuOmm7/PL395Do/Ukx3ftQLTZEIwmLDNntYqVb1+G9/jxvEAZ3m3btgbK8N50\n07UUFxdRWjq4MqTPGvusWbM4fPhwKPoyqAiCQPzyFZQ9+wy1H35A6l33DFpfPG4fPm9ok5MUApl2\nReUk5owhct78Nte1cfHELDmfmnf+TeNXm4i98KKQ9yFY+iPrVEFxwDk9kPW9a9GPGtXmevQ55+E4\nchj7nt3Y9+4hYkZo6tT0BsWpp8yHeYtzuPSq6SExT73+/Ld4GxsZW7uDhB8/jqBpKxISr7qGwoMH\nqPv4w3YLXDCldnU6Xa+SiToqw+tyekhLnsA//vFih98xGjs+OKW7MrySX8Odt/4Ya1UtKrMZVSft\nwOCV4e0p4czTVpgmT8WQnYNt905cxUWD1g8lWsPcLwJN1vpcmggSr/0BQgcVE6POPAtBr6dh/dpB\njRCx2xRNNfTjYDLJWqkvbhSR889sd10QBOIvXQlA49eDuwPtL40dwKgRcUtajJNz0aWktruujU8g\n9qKlaB0ObLW1PW6/dVZrR2V4O6KzMrwW4yiq6gq6LMPbEd2V4XW6rZRXH8EraIg9/0LM5oghV4a3\np4QFeysEQSDuUnnVrf1wzaD1w94PMewK2qZqAPyJ6RhGZ3R4j9pkJmr+Anz1dR1uwQeK/opbBlDX\nyMJFGJ/b4eIGoE9PR5+ZhX3/PnwNDSHvQ7DYbW40GlW/JBFprDVIggrDWZ0HPcScfwGRlkhydDqu\nu+5q/vrXP7W7p7WG3dn/63Q6Hnro5zz44I+4665bSOlgIQG5DK/D4eCGG77Hm2++zqRJU/B6fKgF\nI8vOv5lf/eoRrr/+Gm677SaKAwpY57sCpQzv1q1bmDdPXsRbl+F96onfkBSdgU+tJ3rxOYEyvD/6\n0R3t2u6sDO95553P7bffyPXXX82jjz6M0+notD8DQbhs7ylIkkTJE7/HdeI4s/72PFbVwJ+LeWhv\nORs/OcbZF09gwtTkbu/vSYTEybfe5D8FSSTGaLns9vaaqoLnZCWFP/8phpwxjP7ZL4Lue6j6CfDZ\n+wfJP1rNdXefEXKtffsfVrFDmMycuUnMXjyx0z42bFhP1euvEb/ycmIvWhrSPgTLK3/+Bp2ubZnh\nkETFNNTz6ZNvURI1kcuun0liSuchpZWvrKbp602kPfAwpgkTO73vVEIVWVZfY+etv29n4rQUFl04\nvs/ttWl7/Vo+3tSI0xTLzQ8uGtJnFYTL9vYSQRCInLcAgLqt2walD/Z+qBMDIIki9p3b0IsunGLX\n2p8uKRlz7jRcJ47jzD8R0n4Ei8Mun5xkNIXWBOEuL0dVIv8mp6/rqCPL3NMRtFoav/lqUIpl+f0i\nTrs3kDUcShq+XI/eKzvIFbNXZ0SedjoA1m1bQ96PYLDb+m/3Ztu1E53fgU8Uhk3dnO4IC/YOiJg+\nAwSB2i3fDsrzbf1kgnDmHcNXX49Rr8Jh93QrqKIXy9tza6tjwAYSu9WDyaxDpQqtBtX09Sb0Pnvg\nGV2hNpmImDUb78mTOPOOhbQfweC094+fQZIkmrZuwaCSfSjdFYYzjp+AOioK687tg+J3USKkQlkA\nDMBvteI8dpSICNkRPFzKK3RHWLB3gCYqCuOYsTQdPoKvqWnAn99iYw/tJLZukxeqiPhI/H4Jj7vr\nF9Q0YSIqgwH7/n0Drq1KkoTD7gm5pir5fDRt+Qa9SYtaLQRV4TFqwVkANH61MaR9CYYWB3Jox8FT\nUY6vpoao0SmAnOHbFYJKhWX2XES7HfuhgyHtSzD0lyPdtncPiCIxaXJSYncL/XAhLNg7IWLGTJAk\n7Ht2D/izbVY3Or0arS50zjLJ58O6cwfqqCgik2SnT3eTWNBoME2egre6Gm9l+xjl/sTjluvRm0L8\nIjvzjuG3Womae7qcpBSEhmYcPwFNfDz23bsGXFvtLweyfd9eAGInjmnznK6wzJVt/IqCMJD0V0CB\nbfdOABInZAItoaXDnbBg74SIGbOAlj/8QOKweUKumdgPHUS02bDMnoup+eVw2LufxObmTEJbsyAY\nKPorxM9+8IDcbm4uZoseh82Dv5sMTkEQME/JRXS5cBUMbME4RZMO9c7Fvm8vCALxM+SSCcEscIbs\nHLTxCdh270Z0D6wA7I8FTnS5cBw8gG5UGjHpSfJzutm5DBfCgr0TtAkJmLMycRw+hN85cLWa/c1H\ntoX6Rbbtkhcoy9zTWqr6BTGJzVOnyvfu3xfS/nSHsuiEWmN3HDyAoNFgHDs+ICQUO3ZXmCdPBhhw\nM0TAaRjCcfA77DiP52HIysIQG41OrwlqLgiCgGXuaUhu14BXArXb3KjVAnpD6Hax9gP7kXw+ImbM\nDJykFLaxfweIPf00JJ8P+/6B01Zb6oKEVrA7jxxGZTJhyMoOtN2dXRVAExWNPjNLNmEM4ALXHxq7\nr6kJd0kxxrHjUOn1LQePBGGGMI6fCCoVjmaNf6DoD03VcfAgiGJgN2a26II+VcvUXBTNcWSABbvV\ng9kSuiPxoGU3HjFzVuDIwWDeieFAWLB3QdzpcwGwD2CSjqNZezSZQ/cie2uq8dZUYxw3HkGlajk5\nJ0jtxDw1F/x+HIcGTqgFxiGEgt1xWNa2TZNk4WQyB7/AqU0mDNk5uAry8XeSldgf2PvBFKPY1825\nzYI9Qq6b4/N2H+pnyM5B0GpxHDkSsv50h9/ffCReCHctkt+Pfd9eNHFx6NNHN5+jGjbFfCcwZWSg\njo7GcfTIgEWFOPohCkJ5CZXEkp4INICIZgFg3zdw5pieHKoQLIq2bWo2q/Rk5wLIJXwlaUC1VbtN\nPrZOG6Iqn5IoYj+wD3VUNPrmzOOWk5S6HweVVotxzFg8pSX4rAMTMRYI+QzhrsVdXITodGKeMhVB\nEFCpBExmXdh5+l1AEARM48bjb2rC26qKXH/SH84yx1G5SJsi2I1mbY+0E31GJmpLJPb9ewes0mGo\nw/wkScJ+8CBqiwV9Wnpz280CLQgnMoBpkrwgOAbQzu6whfbwZldhAX6rFfPU3IBZo+UkpeDGwdg8\nj5xHj4asX13RktcRwnfimNx347gJgc9MEXrstu7zO4YDYcHeDcaxcvqy89jATGJFyChadV+RJAnn\nkcOoLRZ0qXIFQ5VKhdEUvHYiqFSYp0zF39SEp6wsJP3qDiXr1BCirFNPeRn+xgZMkyYHasP0VGM3\nZGahMhqxHzwwIC+/z+vH7fKFdtfSvCgpTnHo+dmnioLgODIwVV0d/RDDrrzPxrHjAp+ZI3T4fWK3\n+R3DgbBg7wbjOFmwO/IGRrAHJnGItp3eqpNytun4CW2KXZkidEFlnyooL4DzeF5I+tUdoc46DZhh\nJrWciNRTk5SgVmOaOAlfTQ3eATiwvT+Sk5S/n6KwyO03C/Ygk3MMGZkIegPOgRLsIfa3SKKIM+8Y\n2oQEtLGxgc+VMOCRkKQUFuzdoEtJQRURMWAae8AUEyKNXdGqTi3cZI7Q4fOKeNzB1cYwjh0LDIxg\n74+sUyVMUQlbBNDpNWi0qh5FQgSiQgbAkRyIkArRIi+JIq4Tx9EmJaGJbCn4pZg4gtXY5XDRcXgq\nK/A19H952lC/E56yMkSHo83iBq1MUiPAzh4W7N0gqFQYx47DV1uLt7am35/nsHnQaENXotXZiWBX\n4sODSVIC0CY3L3An+l+whzrrVBJFXMfz0CYno4mOaXPNZNYFbWMHMI1vti/n9f84hNqR7ikvQ3Q6\nMeaMbfO5orH3xHFomiDbph1H+z865tSjAfuKsvtWduMKph7kdwx1woI9CEwBO3v/F4Gy290hsyVK\nkoTjyBHU0dFok9qW/+2xGUIQMOaMwVdT0+9aWqhj2D3lZYguF8bsMe2umSL0uBxeRDE4k5Q2KUle\n4PKPh6RvXRHqyKCAGWZMW8FuNMsFsHq0c5kwSf7OAJye5rSHVmMP2NfHnaqx93yBG6qEBXsQKBPA\n2c92dlFsLtEaqi1nRTl+axOm8RPbJXa0bL+Df5kVgdDf5phQZ50qZYcNOe0FuzlChySB09GDBS47\nR17gGvv38I1Ql6pV/m6GUwS77EzXYg8iA1dBP3o0KpMJ59H+F+x2m6f5oJG+h3xKkoTz2FFZ2UlI\naHOtJToorLED8MgjjzBv3jyWLVsWiuaGHPr0dFQGQyBEqr9w2r1A6JxErhOyVqnYx1ujJED1RDsZ\nKMEeao3ddUIW7MacnHbXerpzATlJB8DVz3XqQ+08dR0/jspsRpfc/vAWU4QuqNIKCoJKhTFnDN7q\n6n6vgKr4W0KRdeo9eRJ/UxOmcePbtWfqYeLeUCYkgn3lypW8/PLLoWhqSCKo1RjGjMVbWYmvsbHf\nnhNq779SsEoRRK3paagfgD4zE0GjwXm8f80QIR+H/BOoDIZAuGdrejMOxmbN33mifwW70idjCHZw\nvoYGOfs4Z0yHRwGazDo8bj/eILJPFQxZ2QD9WhhNFCWcdk/ozTCnOE4BjCYtKpUwIsoKhESwz549\nm8jIzo/VGgmYAuaY/rOzh7rgkzM/H0GnQz8qrd21nhQCU1BpdegzMuWsvX6s7hdK27LfbsdTUY4h\nK7tjgdbDJCUAQ1YWCEJgR9RfOOweDEYtanXfX9PO7OsKyjj0RGs3ZCuCvf8WOJfTiySFbpFX3l/j\nuHHtrgmCIIcBjwCNPfSn445QAtvvgnwss+cAYPPa2V65G5WgIjMyndSIFLSq4IZUkiRKqmwcKqzH\n6/Oj16pxVcs1SEKhnYhuN56yUoxjxiKo29smjQETRM8msXHMGFwnjuMqyA9E2tS7GjhQewS7186M\nxFySTAndtNKCy+OjqNJKQYUVm9NLXJSB6pPyGZmhMEEoQqejXUvrZ/RES1MZjOhSR+EqKkTy+RA0\nGlw+F2W2SmpddVg9NqbGTySxB+NQ3eCk+KSVmkYXjXYPybEmbFY3lsj+ta8rKHPObvMQGR3cOb+G\nTEWwFwQ+a/JYOVhzBLVKjU6tY5pxLALB/4ayGjvHShpweXx4vCKG5jyLUNVOchacQGU0ouvkIG1T\nhI6aShuSJA3ps0+7Y9AEe7CHsg42Sj995qmUCgL+smI0ESJrDn/G+vxvcPtbBIJereOHs65mUdYZ\nnbbncvt4d30ea7cXU9voanMtFRiFig0HK7GkRzNtbPCC4dTxbDxYDJJEzKTxnY61KUKH2+Xr0d9C\nNWsa9Z99iqqiGPuUFJ7f/k8K6ksC1z/K/4zxcdlcPuVipiVP6rSftY1O3vz8KGu3FeM/JSJlAgIR\nCHyxr4JLzxpDQkzvDxR3VpYCkDRzKrEd/E7RKz9b8kuBvgUzHo1TJnLys1JM9joKLV6e3foyTW5b\n4PoHJ/7HOTkLuHzyxUQbOt7NSpLEoYI63t9wnG2HKmmdKyYAs1FRWu9k8+EqLpqXiVbTdoHuyd+t\nvCgfQaMhbfZU1Pr2QjIxSW5Lq1YF326ChbKUZNyFBcTGGvmycAtv7H0fu7elCqjmoIZrc5dz4biz\nUQkd7zx8fpEvthbxxbZi8kraOqQjgfGo2Ha8moRJSSyYntprgeuz2zlWWUlU7lQSk6La/5wECzGx\nZqrKrUSY9CEvGT2QDJpgD8XJ5f3NqSes65JTsB4/zs8+e4I6dwMx+miWZi3BrDVT2FTCjpO7+eu2\n1yisKueirPPaTEBJktiTV8O/1h6jtsmN2aDh9MlJ5GbHYTHpcHv9HPy2GFu5lX2FdWx9YTNn5qZw\n9TljMXYT097RSfB1u+WEHCk5vdOxNpq0NDW4evS38CXIduqSndt5Ub0Zl8/FxNhxTImbiElrZGvF\nTo7WHucPm1Zx69TrmBrfItwTEixUVDby4TcFfLatBK9PJCnWxPQxcWSlRBJl1lHX5GbfF3l4PH4+\n2JTPf78uYMVZ2Vxw2mhUvXiha/fLBbs8cakd/k63V3ZY19bYqa62djiWHZI6GoAvv3iP12MLUQkq\nFqbNJ9mUiFqlYm3RRj4/vomvCrfx4xm3k2ZpqyHaXV5Wf3yY3XlybkRWSiRzJiQSH2Ug0qyjsKSB\nE5sK8UgSf//gAO9/mcfV54xj1viEwFgG+3cT3W5s+QUYMjKpa/IA7XcnIvKqUlHeSHxK8AuGdnQW\nrq1b+MM7v2efUIlBrWdZ9gVEaE04vE6+LPuKV/e8y9aivVw/+WoidW3bLqux8/f/HqKo0oogQG5O\nHLPGJWAx6dBpVRzeW0HV4WqqrW6een0H//06hmvPG0dKnDnoPiooNeRVqe3fCWU8NVp58SkuriMu\nIaLHz+hvgl10QybYR0LhnO7QjE7HU1GOVFXDBdPO56LMc1GrZC3qtJRZLEybx1/3ruZ/hWupdzdy\n7YTLEQQBUZR4Y+0xvtxVhlolcPEZGSw9IxO9rq0GdnJfJTbgnqum8eaXJ/hqXwWHCuu5fflkclLb\naxhdoURsKHbQjjBF6KmtsuP1+II+hk9jiUSKjcZZkI97ZgLXTbqKuckzA9fnJs8krz6fv+59mb/v\n/ye35t7A5DjZP9Fk9/D/3t7L4aJ6Yix6li/IYt7UZNStbN+SJLH/02MkJ0bww9mjeG/jCd7dcIJj\nJQ3cvHQSEUZt0GMgiSKu/BNok5JQR3T8khqMvXOYGZtNO1VH9hC5KI2bp/6A7KjMwPXTk2ezsWwz\n7+V9xPP7/sFDs+8hSi9r7gUVTTy/5gA1jS7GpUez8qxsxqZFtVEEIlUCJyhk/oxR5Khh3c4yVr2/\nn0vmZ3LJgqwe9dVdUgx+f9dzQTHN9cDGDqDPysK6dQuewgJy58zmqvHLida3zNWLpy7iua//wcHa\nI/xt32vcN/P2wDuzbmcp/15/HJ9fZP6UZFYuzCHmlNBOZ7mVqsPVfO+C8aw7Ws3+/FoeW72dW5dN\nYvaExB711VUom4wMWZ2PnzIOTrsHgt8wDzlC4jz9yU9+wtVXX01BQQGLFi3ivffeC0WzQwqv38s2\nXSUAC8VMlmYtCUxQhWRzIg/MvovRllFsqdjOloodeLx+Vr2/ny93lZGWEMGvb5rLZQtz2gl1kF8q\ntVpgfGYsj14/m6XzMqizunj6zT0cKqzrUX9dBfmoLZFoYuM6vcds7rkDtdZZzwmLG6Nb5Oa0S9oI\ndYWxMdncnnsjgiDw0v5XKWgspqzGzk+e28jhonpmjI3ndzefxpnTUtsIdWjJOjVb9MyfmsKvbpzL\n5KxY9p2o5TevbKemIfjDPjyVFXKmZQeJSQqCIGDsRbnWfK0Nl05gVK3Iw3N+3EaoA6hVahann8ml\n2RfS4G7kxX2v4vF72Hm0isf/uZPaRheXzM/koWtmMC49up15QVlooqMMXLV4LI/dMJuEaAMfflPI\nX98/gKsHhapcRYUAGDK6EGi98DUA7NLLO46JNjO3TP1BG6EOEG2I5I7cG5mdNJ2CpiI+yP8ESZJ4\nb+MJ3vjiGEa9mrtXTuWHSye1E+rQ4sxNSbLw4ytyuXP5FNRqgefXHGDtjpJ293dFQLBndqXs9G4c\nhhohEex//OMf+frrrzlw4AAbNmzgsssuC0WzQ4qPC77ggFGO1811xXRq54vUWbhl6nUY1HrezfuQ\nJ975ht15NUzMiOGn184kNb7zLaTdJod1CYKARq1i5Vk53L1iKn5R5Nl39rEnL7iSBr6GBnx1dRiy\ns7u0R5osPZvEkiTx5tH3qIyRp022tXPn5vjYMdw85Qd4RR+vHnybJ/+1g8paB8vmZXLXyqmdmpdO\nTaOPNOu478ppLJ2XSU2ji6fe3E1NY3DCPbBr6SB+vTXmCB32HhREs3nsvHbk31TGa7FYvZjdnX/v\nvIxFnJ48myJrCX/Z9i9e+OAgGo2K+66axvIzszstcnZqyOeohAgevX4OE0ZHs+tYNb//xza8vuBC\nE92FhYBcfrkzeqOx76s+yIeu3fhVkNOk79SGLggC14xfSaIpnnXFm1i1di0fbykiMcbIo9fPZua4\nzlXj1rH8giAwe0IiP/3eTCLNOv61No/3NgYfkeMqKGhWdmI7vUcJKuhJstZQJJx5GgQn7VWsL/kK\nX1IcqFS4iwq6vD/WEMOKMctw+92UGzczd1Ii9105DVMX5zVKUnO87ikOmxnjEvjRFdNQqWDV+/vZ\nd6K22/4G4tezOtdMAMzmniVkbKvcxeG6YwGNx11U1OX9U+InMit+FtWuKlxRedxxWS4rzsru0lau\nCJbWsdsqQWDlWdmsODNLFu7/Ck64K9Ea3Y2DyaxD9Eu4Xd1rwZIk8caRd2n0WIkeI0cFKZpgRwiC\nwDUTVhKvTeaE8xCaqHruv3IaU7I630lBx4WvIoxa7r9qOtPHxLMnr5oXPjiIr5uDuEHW2AW9ocPE\nJIWeFkRz+py8ceRdVFodmrQ0vKWliN7Ov2vQGPjh5O+jktQckjaQkqziZ9fOJD6qa8d4R+WbM5It\n/PwHs0iKMfLxliI+3VrcbX99TU346moxZGV1qewoCoUzrLGPbCRJ4p28D/FLflZOvBR9Wjru4mIk\nX+dCQJQkDuw04a9PQB1Vx4QZTWi6iUV2OeV6JR3F607OjOX+K6ejUgk8/8EBik927TQLVrD3ZNvZ\n5LHybt6H6NU6zjvjGvk5XQg0gEa7h6Nbk5G8OvTp+czO7d7x4+iiLsiy+Vksbxbu/+/tvTi6EcTu\n4iJQqzuM429NT8Zhb81B9tUcZGx0NhNzF7Y8pwsKym1U7pPNIElTCsgZ1X3OR2dJWhq1ijuWT2ba\n2Hh259Xwj/8d7nKnIbrdchz/6NEdxvG3xmTWBa2xf160AZvXzgWZ5xA1Zjz4/biLuxawhw77cBWN\nQ9B4mTCnhqggok4cNg9GU/vyzfHRRh64egbRETre/vI4Ww5UdtmOMle72rVA730NQ42wYO+GvTUH\nOVx3jImx45iWMAVDZhaSz4e7vPMDJ9798gTbDlUxyn0GBrWeTwq/aBMW2RHdnZw0Lj2aW5ZOwuPx\n8+w7e6lrcnV4H7QW7F072QICLYhJvOb4/3D4nFyacxEJcaloE5PkOO5OhIrXJ7Lq/f1U1/qZrJ+P\niI+Xd/272+d0V6L1kvlZLJmTTkWtg+c/OIC/kxOdJJ8Pd0kx+lFpCJquHcPBVroUJZGP8j9DQDYt\nKDZrxYbdETUNTv7yn/34bdGMi5hMtfsk31bs6PI50PU4aDVqfn7jaeSkRrLl4En+u7nz57uL5bBX\nfWb3DldThB6n3dNtQbQ6Vz1flnxFtD6KxekLMGQpOR6dL/Q7j1bx7/XHMTtyiNPHsa1qBycd1V0+\np7vyzXFRBu6/ajomvYbV/zvMwS78UO4gHKcARlNYsI94vH4v7+V9hFpQc8XYSxAEAUPzC9LZJN5y\nsJJPtxWTEmfivhWncXb6mdi8djaVbu7yWQFbYhfJSbMnJHLl4jE02Dw89+4+3B2kf0uShKuwAG1S\nMmpT1yFhwdZJqXLUsK1yF6nmZM4cdToAhsxMRLsdX017u78kSbzxxVGOlzYyd2Iid5x1PuNixrC7\n4gDHG7rW8oMpJ3Dl2WOYlhPHwYI63lrbcfanp6ICyefDkJnZ5fOgbXJOV+w4uYdK+0lOS5lFkjkR\nTXQ06sjITk1STreP597bh9Xh5drzxnL9tOXoVFo+PPEpTl/nCzO0ONI7K99s1Gu457Jc4iL1vP9V\nAbuPdSwkXUWKwzCzy+eBPA6SJO8eu+Kj/M/wij4uyb4AnVrXUlqgsOPSAkWVVv720SF0WjX3XT6D\n5WMvDCySXeH1+PF5xS7nQlpCBPdenosgwAtrDlDdiXM9GMcpgFqjQm/QhJ2nI5mvirZT56rnrLQz\nSN51YmQAACAASURBVDLLoVXKit/RJC6qtPLqJ0cw6tXcc1kuEUYti9PPxKgxsLZ4Iy5f5xqhI8ia\n00vmpLNoeiolVTZe+7T9IdvemmpEpxNDN1tOCH7b+VnReiQkLsg8J+AgU7a0rg78Det3lbFpbwUZ\nSRZuvGgiKpWKZdnny20Vru/yWcEcqqBSCdx6yWRGJZhZt6uUTXvL292jaNHKgc1dEczOxS/6+Tj/\nc9SCmosyzwVk+7l+dCa+ulr81rbmMUmS+Pt/D1FWbeecWWmcPTONaH0USzIWY/XaWF+8qcs+OZr9\nLV3ZgyPNOu65LBedVsXf/nuI0mpbu3sCAi2I+dCShdv5PC2xlrG9cjdpEanMSZ4BgDYxEUFv6NAU\nY3V4WPX+frw+kdsumUxGsoUZCVPJsKSzu2ofRU2dR7Z0ZZZrzbj0aK49bxx2l49V/9nfTuGRJAlX\nQQGa2Lg2B4x0hnK62HAmLNg7QZREPjwiv8jnjl4Y+FyXOgpBpwts7RRsTi+r3t+Pxydy89JJJMea\nADBpjUFp7cEWvhIEgWvOHUd28zb8y91tTUKK9qgfPbrb36jRqtHp1V1O4hpnHdsqd5FkSmRGYss5\nmYqgcDVHXCjklTbw5to8Ik1a7rlsKnqtHNaZHZXB5MRxHKo7SnFTaafPC/ZlNuo1/OiyXMwGDa9/\nfoyiyraC1V0s90s/OrPLdiC4sgJbKrZT46pjfuppxBlboioMGfLC4TrFzv7p1uJANNTV57SEWy4e\nfSZmjYmNZZvxdGKeCzjSgygtMTrJwg8vnoTb42fVf/bjPCUM0l1UhMpgQJuY1G1bxiAW+k8L5UV+\n+ZiLAou8oFJhGD0aT0V5mxpCoiTx9Bs7qWkO7Zw+Nl6+XxC4NOdCAD488Wmnz+rJwe4Lp49i4fRU\niqtsvHqKwuOrq8NvberWDKNgMssZ2X7fwBzc3h+EBXsn7Ks5RLn1JHOTZ7aJzRXUavTpo3GXlSF6\n5IknShIvfXQoMIFnnFIKYHH6AowaY7PW3vEWvCfHf2k1Ku5cPoUIo5Y31+ZxpJVtUXHkBaOhKc/r\n6kX+vOhLREnkgszFbcLZFE3Y3cq+3OTw8MIHB5GQuGP5FGIjDW3aWjHxAgA+K/qy0+c57B50ejUa\nbfe1t+Ojjdy8dBI+v8hf1+zH4WoxIbiKikClQp/eteMUujdJ+UU/nxauR6vSckHm4jbXlJ1L63E4\nWlzPuxtPEB2h47ZLJreJ1derdZyZdgZ2r6NTW3tXjvSOmDMhkQtPG83Jeif/+KRFqIkuJ57KCvSj\nM7p1nEL3C1yNs5a91QcYbRnFhJi2NWf0ozNAknCXtSzaH35dwK4jVUzJjm2XVDU+dgzjonM4Up9H\nqbX9jgtaFcULsk7M984dR05qJN8ePMmGVgpPixkmSMHeA9/TUCUs2DtAkiQ+L/oSAaGNtq5gyMgA\nUcRdKk/iT74tYn9+LZOz2k9gAKPGyDnpZ2L3Odhcsb3DZ/a0VG1spIHbL52MKEk8+c8d2Jrtoorm\nqE/vXmMHWai5HF78HYTN1bsa+LZiB4nGeGYlTmtzTW0yoU1KDjhQlcWt3upm5VnZjB8d0669qUkT\nyLCks7f6AJX2kx32x9HDEq3TxsSzdF4G1Q0u/v5fOUJEEkXcJcXoUkeh0nbfVncF0fbWHKTe3cAZ\nKbMD2aMKp2rsjTY3z39wEAGB2y+dQmQHv2Vh2jw0Kg3rSr5ClNqPe7C7ltasaM5e3XGkivW7ypr7\nJDtOgxVoxm58DV+WfI2ExOL0s9qZiJQdorJjPFhQx0ffFJIYY+TWZZM7DHFdPPpMud3Srzt8Xkeh\nr12h1ai4Q1F41uUFdnGKshOMWQ5GRmRMWLB3QF7DCYqaSpgzahrJ5vZpywFttaSIYyUNvL+pgBiL\nnluWTeo0RvvMUWegUWnYVLq5y5fZaAo+ZX5SZizLF2RR0+Dk7/89hF8UcRcVoYmL6zSF/lSUhcTl\naO8w21S2Bb/k57yMRe2ybEHeFYgOB97qaj7eXMjBgjpyc+K48PSOXyBBEDg/82wkJD4v2tDuut8v\n4nL0/ASp5QuymZgRw57jNXy2rQRPZQWSxxP0rkWtVmHo4gShDSXfALAwbX67a5rYOFQREbiLChFF\niRc/PEiT3cPli3IYlx7dYXuROgunJc9s1oAPtrvem8ObNWoVt186BYtJy1vr8sgvbwoqMak1gRju\nDsbB4ZWVkmh9FDMTc9tdNzSbvNwlRdRb3fzto4OoVAIPXzen0zIQk+MmkGiMZ0flbqye9v6B3pz5\nGhtpaN7FSc27OF/LLjZowa4cQhMW7COKdc2OrUsnLunwuiLYrScKeOED+bT62y6ZTKSp8wkYoTMz\nO3E61c5aDte1P4HIYfc0F/rv2Z/k4jMymT4ugX0nalm34SB+a1PQmgl0blf1ij42l2/DrDExO2lG\nh99VIi3yt+9nzdcFxEbquXlp54sbwNT4SSQa49lZtReb197mmtOhnCDVs6p6ijM1yqzjvY0nKN4j\nH9emzwh+HMzmjk8QKrGWcaKxgImx4zpc5AVBwDA6A+//Z++9oyS560PfT3WOk3ty3JyjNiqsJAQS\nCiRjHgbDRRhjHDg8Xb/jc1+wr6/TxX6PCxiuMRgso4vBZIQQKGu1knalzTnvTs6xezqHqvdHdfX0\nzHRPV3XXzG6P+nMO54jpqq7f/vpX39/3942jo/zqlYtc7pli++oaHtzdsuDz3tVyDwICL/W8Ns8B\nnm+jkUq3lc8+thFRlPjGL87jV8JeVUTEwMLRQW8OHCWaiHJv850ZN3lLQ4Ncvri7i2/98gLTwRgf\nuX8VazKc3BQMgoF7W+4iLiV4vf/IvM+12NjT2bKymkf2yae4J399iXBvD6bKKoxudQW0SqaYZch4\naIIL41foKGtldXXmI6y1sQmMRgbOX2HKH+VDB1Zk1c7SOdCyH4BDfW/O+yzfLjEGg8Cffmwn5S4L\nxw+eBtRrJpDdvnxq5Cz+WIC9jXdgMWbWuJQN5PQbZzAIAn/4/k05i3QZBAN3Ne0lLsbn2ZgLaVpc\n7rTw2ffJpqmzb+QxD65kB6HobOfjweRvdW8GbV1BmYdTh85QU27j04/M7zE7lzpnLZtq1tPl66Fr\nTmRIPqYYhY0dVTx2ZzvjvjCjF6/JjlOPumJZNocFQZgv0BJigoN9b2I1WrizcU/GewWTCUtzC6He\nPq71TLBzjYcHdub2b+yp34ndZONQ/xFi4uy5L2QePnB3B2tbKrh0sYfE1JSqYAIFRw7TXDFQEuxz\nODxwFAmJu5Lx2pkQTCZC5R5c02NsX1HFQ3vULZpWdzMdZW1cGL/CaHCmNEAsliAaSeTdJabCbeVz\n79tIXVj+zkRt5iYCmZhJzpn9Mh/qO4KAwN2N2WvLm5plrbQiMMZv37eKlU3qKlDubbgDs8HE6/1v\nzTJL5auhKaxvq+T9d3VQ4RtBQsDctLDWnI5ycvGnNTKejvo5PnyaWnsNG6rnt1JTiNfKpYwbouP8\n4Qc24bSpM6cdaJI3+jcH3p7190Ln4X13drCp2YUjMEmgok6V4xRkJcHumF8Q7ezYRaYiXvY27MJh\nzl4CIFBei0FMsMYa5vGH16mqm24zWdnfuJvpqJ+Tw2dmfabFkT4Xo8HAH7x/Ix2CbGcPVOSOClIo\n2diXGQkxwZuDR7Gb7OyY4yxM5/zNca7HnJilBJ/YVampTviB5v1ISBzqnwl9DGl0EmVibWslO8rk\nF/IHF0JZMzLnkmkR90730+nrZn31GjyO7DVNfnFsiCmTi6b4FA/snN9PNBtOs4OdtdsYC41zZWIm\nwUirsywTj+xppSE2yZiljGeOZY62yIQjJdhnopYODxwlLsY50Hxn1gJXsbjIDy7K9+yqiNHRoL5F\n5NqqVVTbqjgxfJpQfCaxphBNFWQB/cntZRiQuBiyc6l7UvW9mWK4lY3nrizaOsDIZJBDI7IA/vB6\nKw6VmxvIG5yAwBsDb836e9BfWK/TCpeVRzrkMT3fk8AXVCeoS6aYZcaZsQtMR/3srd+Z1fwwPBHk\nn5++wIhdFniG4eylBTKxvXYzZRa3XNI3IduU83GWZaJ8eoSIxcGZ4Rg/Oaiu6l0mU8yhPtneqWiU\nmXjr4hDPvd2D112DNRpE1Nip/u5m+USUblstVKABJMZGMSViTLk8PHO4S3VFzJR9OdlvVZREDg8c\nxWIws6dhfmlihe+/dJXzkxA3WamYHtE0VoNg4M7G3UTFGMeGTqX+rkcTa9PYIAAjtiq59rvKcscO\np4V4TCSajIcfD01weeIaK8rbaHRlLiIWiSX4p5+fp9con9hck5kjnrJRba9iXdVqbnq7GUxGSyUS\nIuFQrOAuRmU+OSP3pljGN35+XlXRNKvNJNfoLwn25cGb/UnNpCmzZhIMx/jqT84SjMTZcfc2gJyF\nj+ZiMpjY23AHoXiI06Pn5O/N01mWTsLvJz4+TvmqFdRVO3n+aC+vn82tsc7VTkLxMMeHT1Ftq8xq\nfugemubffn0Zm8XI6js2AvMTdHLR5m6hxd3E2bGLTIbldmip7NsCBFqkV/491u/ZjNlk4F9+dYHB\n8UCOu2bmwZ8U7NcmbzIWnmB77Rbspszmh4On+3nt9ACtdW6cHe3ERoY1N/ne27ALg2DgjYG3U05U\nPZpYK+ty54Ht+EMxvp4hIzMTc9fD4cFjSEhZbeuiJPHtZy7SM+Jn3a4NcvXTXm3vBMD+xt3y8waO\nAoX5W9KJ9HZjcDhZtbGdK71TfO+FqznLM880tS4J9qJnJDjG5clrrKrooN453x4nihL//MsLDE0E\neXB3C3fcK0eKaBVoAPsa5GbYR5LOQz00VeVlcrS384UPyxmZTz13Jecx3GY3z3KYnRw5Q1SMsa9h\nd0bzw4QvzNd/dpZoXOSzj22kZu2qWc9XiyAI3N20FwmJI8nYfj02OGUc9RtW86n3riMUSfA/fniG\nqRyOsJQpxidfd3hQFjCKwJnL2RtjfO/5q7jsZrm+fFurnKDTp635Q7nVzZaajfT7B1NO1KA/e+Er\ntUR65cqW++/blsrI/PavLuYs8JV+gkuICY4MHMNusmUMcQT46cEbnLg6yrrWCn7noY1Y6hsI9/Qg\nqTQFKmyp2YDL7OTtoRPExLgu74QYDhEbGcHa2spnHt1Ia62LQ2cGePlE9sxnBSVxr1g7w5UEexJF\nuNzVON9pKkoS//aby5y/OcHmFdX89r2rMNrtmGvr5BK+Gn/8WkcNqyo6uDp5nbHQuC6mmHBaEkZ9\nlYM/+ZCc/v8/f3aOgbHsGqviMFM0pCMDxxEQ2Nuwc961/lCM//GjM4z7IvzWgRVsW12TSoTKVbo2\nEztrt2IxmHlr8ASiJBIMROXa2xra380lPUFr38Z6Pnh3B+O+MF/58Zl56fbppNvYg7Egp0fPU+fw\nsHJOZySQW9v90y/OYzQKfOHDW/BU2LG2JHMbNJ7gYMZ2/cbAW8RjCaKReEFrQUomz1kbGzGYzXz8\n3WtY21LBiSujPPX8lQXXa7rGfmH8Mt6oj11127EY54/ntdP9/ObtHuqqHPzRBzdjMhqwtrUhRcLE\nRrSZY0wGE3sadhKIBTk7ekGnTb5PTtBqacVqkes3lTkt/ODlaxy9tPD4lBr9UQ2dqm4nSoId2Z76\n9uAJ7CYbWz2bZn0mSRLfe+Eqb5wbpL3ezR+8b2OqNrS1pQUxGCA+kbv5xVz2N8ia4JHB4/os4jkZ\np2tbK/nUe9cRjMT5hx+con8B4a5oJ0OBYTp93ayrWk2lbXb4Zjga58s/OsPAWID37Grh4WQSkqmq\nCoPTSaRXm6YKcvOFHbVbGQ9PcH3qplx72zm/9rYWIr09mKpmErQe3d/OPVsb6Rn28z9/fo5INLM5\nIt0Uc3T4FHExzr6GXfMiO/pH/Xz1x2eIxUU+976NqUggm5J5mYcZQnaiVnJy5CyTPjlRpxDBHh0a\nQopGU2vBZDTw+d/aQmudrLH+7FDmKozpzw0GoryZNItkMsO8fnaAp567gtNm4n//7S2pMFdbARuc\n8k4cHjiqi8Ye7p1dN6m63MYXPrwFq9nIvzxzMWtFTCj+FnklwQ5cmriKN+pjZ922WU5TUZT4wUvX\nOHiqn5Zal1z7Oa0LUioDNQ9tdXvtZmxGK28NHk/VAS/UFCPHLM/UqblzcwMff/cafIEo//D9k/SN\nzM/uA7C7LMSiCd7slU1DiqlIwReM8qUfnqZz0Medm+r5yP2rUgJPEASsLa3ERoZJhNT3I1XY23AH\nMLPBFTIHce8UCa93VsyyIAh84sE1bFtVw8WuSf6/H55KlV9Ix2I1YTAK+H0RDg8cxSAY2DPn1HKj\n38sX//0kvmCM333PWrantXSzNDSC0ZiXYDcIBvbU7ySaiHKm/zKgjzkqPVHNYTPxnz+yLdV16Iev\nXEPMoLkr8z/pnebixBVa3U00u2eHzx483c+Tv76Mw2bi//joduoqHanPlLkP5zEP9c5aVpZ3cHny\nGmNTXnk8eig7afPQ0VDGEx/Zislo4J9+cT6rcz1XeYXbHV0E+6FDh3jooYd48MEH+da3vqXHVy4p\niq17X1LIAATCMf76X9/mpRN9NNY4+dOPbpuXfKMkwITz0E4sRgs767YxFfEy4Z1esPZ2LhKRCNHB\nQawt87vkvGtnM598cC3TwRh///2TnL0xfyErNeBP9VzAaXKwxbMx9dnAWIC/+e5xbvT72Luxjk89\nvG5eeKcyD1GN9mWAVRUdeOzVnB68KNfeLmhzk58/t06O0WDgjz64ib0b67jR7+Pv//0k497ZxdgE\nQcDhtOD1Buj3D7K5ej1llplMxbM3xvh//+MUoUiC33tkPfdtnx3eKZhMWBubiPT1IiXU9SJNZ09y\n7V3ol7OSC5qHLPWCypwW/vSj22iodvD80V65xO2cE4wiSPvGhxElkb1pm3xCFPnF6zd56rkruB1m\n/uxjO2irn53NaU3mNuSzwQHsa5Sf1z0qO/4Lm4ceBLMZS33DrL+vbq7gCx/egtEg8LWfnuVXh7vm\n+R6cRR7yWLBgF0WRv/7rv+Y73/kOv/rVr3j22We5cUN9g9lbTSAW5NzoBeqddbS55UV5Y8DLX/3b\nMY5fGmZjRxX/5eM7MpYLSBU+ykNjB9ifXMTT06FUE+t8CPb0yl1yWjIn5Ny7vYnfe2Q9kViCr/z4\nLN9/8eqsRsj25CKOhBLsqt+O2WAiIYocPNXP3/6vmbKrv//ohlmVChUUAZKPI1kQBFlrj8jfq4um\nmqEAmslo4DOPbuCBO5rpHwvwF//6NgdP9c/SWh1Oi6yhSTMCJhiO893nLvOVH59FkuBPPrSZOzc3\nzPt+5blSLEZ0WJt9GaDGXsWaipWMTckRQos1DzXldv6vT+xkXWsFp66N8bf/6wRXe6dSnyuCdGzK\ni0kwckfdtuT/D/H3/36KX77ZRXWZjT/72A5aaufXIzK6XJiqqvMW7Ns9m7EYLYwm5yHfkE8pHic6\n0I+lqRnBOD/BaV1bJX/2sR1UuK387NBN/vt3j+JNc7Ar85CpzEQxkJ+KmMbZs2dpa2ujqUnWYB55\n5BFefvllVuboDH+7cGz4FHEpwd76ndwY8PGrw12phtEfeWAN79nRlNXmayqvwFhenpd9GeSQv3pH\nHVLEgLUy/58ikOzmtFBFxzs3N9BS6+Kbv7zASyf6OHVtjPt2NHHXlobUIjbFrGyr3s6JKyM8/UYn\nfaMBrBYjv//oBvZtyt4I2VqAfRnktPJXzsjOaz00VVuWeTAIAr/zrtU0e1z88JXrPPX8FY5cGOL+\nHc1sXVWN3WkGUaDcUEGdqY3nj/bw/NEepvxRmj1OHn94/YIJSNbWVjgsR6RYG9Vn/yrsbbiD586f\nBPKfB0mSiPT2YK7xYHQ4Ml7jtMlNsb//4lUOnh7gi/9+kl3rannPrhbaG9wYTQKJMGz2bGRiUuSn\np65w5PwQkViC3etr+eSDaxdMQLK2thI4fYq4dwo86uqzKNhMVnZ4tjB8TijIkR4dHJA7aC1QVmJF\nYxn/9VO7+Oenz/PW+SFOXh7hwLYmHtzdoqo2/e1MwYJ9eHiYhoYZDaauro5z584V+rVLxuHXbuK2\n1fLTX0SIhk4AsKa5nA/cvYK772hldHThxtHWllaC58+R8PtVV1RUEASBXVU76ZQgYgzm/W8I3OyS\nx5KjNkprnZu/+NQufn7oJgdP9/OTgzf4+aGbNDsEagFLqIIvfvs6kgQCcNeWBj50zwoqciSJWOrl\nAlD5OMwAKm0VtFrlscfN+dfnCPf2YLDbMdXUZL1GEATu2drI5hXVfO+FK5y6Nsa1Pi9mk4GV9ghu\nrIhDTfyXf5ZzGkxGgQ/e3cF797blbEg+43PpgT3ZSzFkY1vtZl6NyzZ2m4Yqn+nEp6ZITE9jX71m\nwetMRgOffGgd+zc38IOXrnHs8gjHLo9gtRhZb4hgilk5fzzB4SHZgVpdZuV337OG/Zvqc54srS2y\nYI/09sIq9WUdFPY27OTZ2GWwJPJ2pCvm0VzlqxXz1MkbE/zwxSu8eLyXF4/3Umkzsgq41jfIPopD\nSU2nYMGeb5ynR+NOvliU9zVisVThrK6mbUMZ79ndxuZVM4Ih1zgDa1cRPH8O2/QYFR2Zj+gLsT+4\nk05OMMl43nMyeLMTwWikactaDJbcmt7nP7qDx9+/mVeO9/DayT68kWvgr0OYqmJ9exWbV9Zw59ZG\nOhrV1X4BGGxvI9DVTXWFDYM5u1DK9m9cX76Wq/gYZgCPJ3Ps+EIkwmGuDg9TtnEDtbW50/o9Hjd/\n9bkauod8vHlmgDfODBCMD+CmkcRoLVtX13Dn1ib2b26gXGX2Y9yxnj5AGh7I+7f0mGqJAn7HOOs8\nC6+nTM+Y6L4KQNW61arG4PG42bOliWMXhzhxeYSzN4eJ+idwBMqxhl3csb6Sh/a2cceGeowqhaxh\n01omngHT+FDWcS5Edc0WXox3EbIFcFeYsZltuW+aw/SY/Oy6LesoU/H8h+vKeffuVl4+1suxi8Pc\nnOwkOi4QIXbbyCotFCzY6+vrGRiYyXAcHh6mtjZ3NblcmvBS4XY5KBMcfOKTM45TZWwejzvnOMUa\n+eUbOXeZWEO75uf7RuQ42QlxnDOd17KmbWdDEkUC3d2Y6xsY90YA9RrvvnW17F3r4YsHj8BwHfva\nV/LgYzPt77T8RoaGJqTrNxg4dzWrlrTQfNojZYCPs5PnGRrOXP99IUI3roMkYahv1DRuh1Hg3Tua\n2L3RzZd+fhTGG/n9d+1gzUY5SS0aijIaUn8cN9d4mL5xk5ERX14+E1vcSVgI8XLnm7Q5s5/Ass3l\n+DlZ449X1WmahxV1Lvl/66Z58RdhhEAl//UTu1MmoYnxzBFVmYiVy9FCE5ev0Yz2dz0WjSMkjMRM\nYV64eDjl79DC1JVrIAiEnFVEVDzf43EzNRlk56pqdq6q5t8vXeTwwDH+eNunbxtZBeo3yYKdp5s3\nb6anp4f+/n6i0SjPPvss73rXuwr92iXD6ZKTc/I9eaQch3nalxUbXswS4a2hzK3SFiI2MoIYDmsq\nS5pOr7+fgbhc7yYRzj/LrpAIIYBIQN7gvMIklyauar9/AYehGo4NnyJqliNlCnGYWVtaSUxPk/BO\n5b44A2JIQLLEOTt2nmBMe/io1m5BczkyeCxlDss3httUXYPBbi/4nYibI6mINS2k/Ax1dRhs2rX9\nSCLKyZEzVNrKWVe1OvcNtyEFC3aj0cif//mf8+lPf5pHH32URx55pGgcpyB73RMFZJjJHdqteduX\nlZfHZIWjQydJiNpC5RSBls1hmIu3Bk8gGuIYjIU5itK7SuVD+sv81tAJzfcXItglSeLI4HEkc2zW\nWPIhFcedx3qQJEmO5XdZiIlxToyc1vwdkd4ejC43psrsDS6yMRme4vLENdxuuTZOvvOQym0YHiYR\nztzjdyGUd6LM7eCGt5ORYPZEokzEx8cQQ6G834nTI+cIJyLsadiZtarn7Y4uo77nnnt4/vnneeGF\nF/jsZz+rx1cuGWo61C+EYDBgbW6RO7THtH+H8vKsbmhjOurn4sQVTfcXItBiYpzjQ6dwW1w4XbaC\nsuysTc0gCPlvcIEoJrOB2rIazo1eIBDT5kyO9PSA0Sg3QdFIl6+HocAwq+rlzamgeSigxEIkHEcU\nJWoqKhAQNGuriWSbQmtLa15moLcGTyAhsaJWbpBR8AYnSQS7ta8H5bntHvm31DoPhZ7elAYwe+vv\nyHHl7Utxbkc6okdYk7W1FUSRaL/6+t8KyrH/jhbZtq2kcatFrfc/E+fGLhKIB9lVvx2nq7CiRwab\nDXNdHZFe7bVzYKb29r6GO4hLCY4Pq9dWpUSCSF8v1sYmBJN2t5FSUXBfm1yeVw+NPZ/QT+W55W4H\nG6rX0u3rZcA/pPr+mYxT7WtBlETeGjyGxWBmbf0KoHCTFID/Zqfme5WNdVVdG3aTnbcHj2s6yabe\niTzMUWOhCa5O3ZAT5xboRXC7844X7Hp0S0lpaXmYIZTnrqxrodXdxIXxy0xFvKrvj/T2YPXUaA61\nhJkyxXc27sbutCBJEM6Qbq8WW2sbYihEbEzb0VkUJULBKA6XlV11OzAIBt5MK2Obi+jQEFIslteL\nHI6HOT5yhipbJRtqV2O1mQpaC6bKKowud14ae3oxOKWsw9z2gQtRiGC/PtWZKlNcWe6aNZ58UHwu\ngc4uzfcq819WZmdX3Ta80WlNJ9lCNPa3FW29QbvD9naiJNiz9PzUQiGOQ7n9lwmTycj+xt2pgmRq\niHu9JLxTODsy92ZdiLHQOJcnr7GyvJ16Z50uRY9mzBDa5iEciiFJ8m9RbnWzuWYD/f5BeqZzl1eV\nn9clP19D82qFkyNniSai7G24A4NgwOW2FiTYBUHA2tpKbHSURDB3Hfh00ovBba5Zj9Ps4O2hE6q1\n1ZlSAtrnQaluuq9hly6nWKV2TiAfjT2tAJgSEXNk4Jjq+yM9PRjLyzGVqw/XBbmD2uHBY1iNw/az\nyAAAIABJREFUFrZ7Nue+4TamJNh1qAlhaWzKu8FAeu3tO+q2YTaYOTxwdFYv0GwoJwRnR7vm5x5O\nvihK5T5dTi55OlDnNthQxvRG/1tZ70lH2VBteQi0wwPHEBBSdYJcZTbCwRgJFZ12sjErUUkDMxq7\nFZPBxO76HfhjAc6MXVB1f7inB8FiwVKvLWQ2FA9xauQcHns1qyo6sCeTowra4JK1c4Ld3Zpr56QL\n9hZXE02uBs6NX8IXzR12mPD7iU+M56WtX5y4wlTEy676HdhMhXVuutWUBLsOGrvBYsFS30Ckt1dT\ng4FU+6/kGOReq1sYC09wbTJ7aVWFcHdSsK9coWm8CTHBW4PHsJvsbE82ULiVGvvcssXrq1ZTZavk\n+PBpQvHcURWRnm4QhKy1crIxmFamuMomR5G43PILHQ7mb5KaqSFU2DwovQFe7zuS9R4FMRYjOjiA\ntblZdfNqhbcGTxATY+xv2I0gCBiNBmx287ym1lqxtrUhRqNEhwY13Rf0y450s8WIIAjsb1B/kk1F\nieVhlns9qUjcnaEnQ7FREuw6VXGztrbKDQZG1duXQ0nh4XDOZGoq2qrSwWchlKO3a4U2wX5+/BLe\n6DS767enyhTrobGbysowVlRoPrnMbTSS3gv0+PCphW6diVmu1R6zrPgY0rskKYK9kHlImeY0nlzm\ntoOrd9aypmIlV6duMBRYuLBYdKAfEgnNZhhJkni9/wgmwTgrEShTU2utKPMQ6dY+D+lF8XbXb8ds\nMPN6/5GcJ9l87eujgXEujl+ho6x1XpniYuQdL9hNJiMWa2EOM8jPgTpjgpg59q0ob6PeUcupkXN4\nIwsfPSM93Rhdbiw12rz3mRooOJNp84U2FrC1thGfnCQ+rb65daZGI/uUXqD9CztR42NjiMFgqtGF\nWsLxMEcGj1NucbOlZkPq704dBLu5tg7BastbY7enbfR3N8s1Z17PYZbK13F6ZfI6w8FRttduxW2Z\nccA7nBaikQRxFX1Ss2FtawcgnPSBqCEVy59WBM1hdrC7fjvj4UkujF9e8P5wlpLFuXj55htyb9em\n4tfWoSTYAXRpXGtTFnFXl+p7UppqmkATBIEDzXeSkBK80Z/9CJ4IBOSY5bY2TTHLI8HRlGbS5Jqp\nRTLjMCvw+J2HGSJTa8ByaxmbazbQ5x9I9QLNhCI0tEbEvDV0gnAizN1N+zAZZkIkXW7brDHlg2Aw\nYG1J5jZE1X9PwB9JOdIVttZspMzi5u2hE0QS2b8rX8fpoeQaO9A8u2iZLj6X5hbZ96RBY1cc6XPL\n9R5ovhOAg71vLnh/pLtbDr1VUdZEISEmePnmYewmOzuz9HYtNkqCHXkRh0M6Ocw0LOJsLfH2NOzE\nbrJzqP8IsURmW2+mLjlqeLVX1kzua7l71t9TDrMCN7h8en9mm4d7mmRh82rv61nvjeQRsyxKIq/1\nvYlJMHLXHA3NVVa4xg7JVnnJ3qNqCQWiqYQ5BaNBjpYKxcOcWCC2P9zTI/sZmptVP28yPMXZ0Qu0\nuBppL5ut4ephojRYrdibGjU1t86k7AA0uRpYVSF3VxoKjGS8VwyHiQ4NYm1t0+RnOD16Dm/Yx976\nnRl7uxYjJcGOPkX1jQ4H5to6wt1dquOvszWxthot3NW4B38skDVRJ9zdBYBNQ4ifPxbgyOBxqmyV\nbJvT29VoNGBzmAno4GsArSappAliTqnatZWraHY1cnLkLGOhiYz3ztRGUX/0vjRxjZHgGDvrts0y\nP0CaYC/UcagxQkh2pMczNpa4q3EPAgIH+97MuLYkUSTS24uloUFVdU+FN/rfQkLinub98059ejWa\ncK1ckWxunVkYz2WhXqeK1n4oy0k20tsjN5xJnp7VIEkSL/a8hoDAPc3aSy3frpQEO/o5UG1tbXJz\n67HMfRTnEsiiqQIcaN6PQTDwat8bGV/mGYHWrnp8b/S/TUyMcV/znRmrJzqdloJfZHONB4PDkYrY\nUUMwEMXuNGOYo2UJgsC7Wu9BQuKVLFp7uKcHU2UVJnfuUr0KB/veAODepKBIJ2WKKXiD09YPN7TA\nWqi0VbCzbiv9/kHOj1+a93lsZBgpEtZkhgnHw7ze/xYOkz3VJSkdvRpNOJOOfbV29oUE+9aajZRb\nynh78HjGaKl8lJ0rk9fpne5nT/N2ah2e3DcUCSXBjj72REhzFiUXWC5CSU3VmaHed6Wtgu2ezfT7\nB7k2Nb/VYKS7G4PdPqt59ULExDiv9b2JzWhjX2PmeucOl+wwixXgMBMEAVtbO7HhIRJBdfVeFmpi\nvbN2K5XWCo4MHMUfm53wIzevntKkrQ/4h7g4foUV5e20ls03WzidFgRBB5NUY5Pc3FqlSWohgQbw\nnrb7AHi+65V5G324S04CsmlIVDvUf4RAPMj9LXdnND/oEQYMssYO6k2UC82D0WDkQPN+wolIRlv7\njGBvVz2+F7pfBeD969+j+p5ioCTY0U+w2zQK9kAggsEgYLVlrm9yX8tdAPxmzssshsNEh4dkW6JK\nx+nx4dP4otNy+QBT5rBAvY7fyganRluNRePEogkcWZpZGA1G7mu5i6gY4/W+2ZEh+djXn+18AYD3\ntN2b8XPBIMz0Pi0AwWTC2tSsurl1LsHe5Gpgc80GOn098zZ6xWFva1Mn2COJKC/3HMJmtKXMG3PR\n6xSrJM+p3uCy2NgVDjTvx2ly8HLvoXlljSPd3QhWG+Y6dQla3b5erkxeZ23lKlZW5Vfm+HalJNjR\nJzkH0h2oXaquV7JOswnnjvI2NlSt5erkdS5PXEv9PdIrN69Wm4QRS8T4deeLmAQj97ZkfpFhZh4K\nFWqK5hjuzJ1OHgwosfzZbcPKZnSw7w3CaUfwlIamUmPv8fVxevQ87WWtbKpen/U6R4EF0RSsrW1y\nc+vB3MXhsvlb0nmw7X4Anu96ddbfw12dsuNU5Ty80f8W/liA+1ruxGG2Z7xGL43d5HTKvqcedb6n\nXPNgM9l4oPUAoXiIV5MmNQAxEiE6OICttVW14/TF7oPAzGloOVES7OinsRudTswejyoHaqZ43Uy8\nb+V7AXj6xq9TyRlhjbVRXus/zER4kgPNd6YyLDOhxNMXHPrZnhTs3SoE+5xyAhm/z2Tj/pa78ccC\nPN89I9RmTBDqErSeufk8AI+teHDBk47DaSURF4lG8jdJyePqmDXOhcgWGZROR3kraytXcXnyGlfH\n5MxkKZEg0tONpbEJgzV3Gnw0EePFnoNYjZZ5kVHpWG0mDEZBl2bO1tY2xECA+MR4zmuV9ZDJiaxw\nT/N+XGYnr/a+ntLatTpOu329nB49T6u7ibWVq1TdU0yUBDv6aewgmyHEQID4+MIO1Eg4jpiQcgr2\nFncjd9Rto9c/wMmRs/K93eodp/5YgOe6XsZhsvNQ+/0LXjtz/C4sIsRUVS1XOFQR069GoAE80HqA\nSmsFr/S+zlhoAkmSCHfexFhRgakid1OJ61OdXJy4wpqKlTm74ug1D6kNrjN3eYhcphiFhzveDcC/\nnvwhoiQSHRpEikZTz8rFq72vMx31c6D5TpxmR9brBEE2Sekh2BVnphqHeiAQxeYwY1ygcbjNZE1q\n7WFe6T2U/O6u5LPacz5DlER+dPVpJCQ+uOqRvGrX3+6UBDtgs5sRhMJty6Dezp7LlpjOYysexCgY\neebm88TFOOGebtXFnp7rfJlQPMx729+FY4EXGfQ7uQiCgLW9ndjYKAn/wr0y1ZggACxGCx9Y9TBx\nMc7Prz9LfHKShNerSlsXJZFfXP81AI+tfDDn9XqZIaxNzQgmkzqTlMr1sKqig931O7g52cOhviMz\np5b29pzPGA6O8uuul3CbXTzQeiDn9Ypg18MkBRBRc3LxR3HmWAsga+1ui4sXe15jKDCcMn+q0djf\nGjxOl6+HnbVbWbMMtXUoCXZgRjsp1LYMaY7DHNrJjKaa+/hcY6/m7qa9jIXGefbys0T7+7C1tee0\nJfb7BznUf4QaWxV3N+/P+Rw9Ty6KoMm5wanUVEGOkFlR3s7p0XN0npdjme0qBPvzXa/S6etmR+0W\nVpS357xeL1+DYDJhbWsn0tebMwM1FIgiCLKSkYsPrXoUp8XBMzefw3dD7g9rzeE4FSWRf7/0E+Ji\nnI+s/cCC2rqCw2VBTEhEwvm1jVRIKTs5NrhYNJF0pOdeC1ajhY+u+SBxMc5TF39EuKsLwWrNqewE\nY0GevvEbLEYLH1z1iOp/Q7FREuxJHAU2tVZIFYDKqbHnti2n89iKB6m113DhzKuy4zRH4a9ALMi3\nzn6XhJTgw2veh9mQu7OQXho7gK09Gb+cQ0tTa4oBeQP+8OrHEBA4d+ol+Tk5BHunt5tfd71IhbWc\nj679kJqhF9wuMR1be4ecgZqjMJrib1FjFnBbXHx8ywcJJyIMXz0jtwTMUdnyzYG3ueHtZKtnk+pa\n44rSESgwWcvocmGuqyfcdXPBDFTF9KVG2QHYVruZXXU76J/sITI4ILcEXEDZkSSJH1/7Jf5YgIfb\nH6DSVqHtH1JEFCTYn3vuOR599FHWr1/PhQvqakbfrjicFuJxkVi0MIeZ0eXCVFOT04G6UHJSJmwm\nG7+36XdpnJDHF2/OrpmIksi/XfgBY+EJHmq7n81pRa4WwmwxYjIb9NXYcwl2laYYhbayFt634iEq\nRmQTj9CcvRJfOB7m3y78AEmS+E8bPqpKS4UZwVKojR3SI4Sy29klSSLojy7oMJzL/Sv2s8rVimPE\nR6imbMGWgDemuvj59Wexm2z8b2s+oNqmrOcGZ1+xEjEUWrCEb0CDeVLhI2veR7vfgiBJRBqqFrz2\nmZvPc3ToJK3uplQo8XKlIMG+Zs0avv71r7NrV3G3kQL9Mu1A1lZFv3/BEr4zyUnqF3Gzu5GdIbmS\n43+E3s7YvT0hJvjZtV9xceIKG6rX8sgK9YkXejrMTBWVGMsrcjpQlSbWFqv6XqUPtNxDw6TERJmR\n73U9k7HD0Hhokq+c+iZj4Qne3XYvaypXqv5+XU8uyRPFQoI9GkkQj4ua1oJBMPCJqvswiXDdHeLZ\nzhczXnd54hpfP/0vxMQ4v7vutym3qs/Q1cskBaROmOGb2edB2UDU2NgVHGYHDxnWAfBi4irnxi5m\nvO5g75s83/0KHns1f7T192YVfluOFCTYV6xYQXt7e8Hmi9sBPe3L9lWyQyZ841rWawIabMsKkiTh\nGJwk6rJxnXH++7Gv8ubA28QSMSRJotPbzd8f/0de7XsDj72axzf8DgZB20+smKREsfDf1NbeTnxy\ngrh3Kus1akI+5xIfGcYUjROsr+T06Dn+7uiXOTt6QY6UiYc5N3aRvz/+VXqn+9nXsItHO7RlFerl\nPAW5hK/B4Vjw5JIyy6k0QSiYBuSNPVBXwW+6XuKpiz+k0ys3E58IT/JKzyG+ceZfEZH47OZPsq1W\nW7s3p1OfujkAthXyxhq+OT+LWkFLQEE65UNyiejBWgvfPPtdXuh+lfHQJJIk0Tc9wJMXvs9Prv0S\nt8XFn2z7zLz6QMuR5b1taSC1iHXQ0uwrZcEeun6dsn2ZE4JSha80CLX4xAQJr5eqHTt5fOPd/MeV\nn/H9yz/l+5d/ikEwpOLc72zczftXPpwzCiYTDqc11dRaq8Cdi629g8CZ04S7unBtnV+PRBQlQoEo\ndU3qtUiYMe9s2v4uhhoDHB44xjfPfReb0Uo4IQsho2Dkd9Z+iDsb92gOZzOaDHJTax0EuyAI2No7\nCF68QMLvz9h0PJDH6Q1InYYevPPjdE78hreHTvD20AncZhfTMdlUZTGY+YMtn8oZ4pkJXcOAm5oR\nzGbCndkFeyCPDU6SJELXr2EsL+f37v5j/vncv/H0jd/w9I3f4DQ5CMTlshaNznr+04aPUmPX1rug\nWMkp2B9//HHGMhS1euKJJ7j//oXjohfC43Hnfe9iUN8oCxdBmj22fMYpVmygz2Ih1n0z6/3RcByH\n00J9vfqGu2NXzwFQvXkDWzfdza6Ojfzowq+YCE4RSUSxGM18eOPDrPdof4kVqmuc3LwyitVsKvg3\nMm3byPjTP8cw1IvnATkZJv07/dMRJAkqq5yanjU9JJfCbb1jO19Ys5rf8j3ED889Q59vkFpnNR5H\nNfd27GNVdXte4/Z43JRV2Jn2hnVZp8GN6whevIB1apjKjoZ5nw/2eAGoayjT9LxY900MFgsb9uzm\nHw17OTt8iYOdR7gwcpXtDRvZ0bCZXU1bqXLk5yS0W+UInXhMLGgelHuHV6/Cd/kKVS4TRvv8jFcx\nLp8SW1orqax2qvruyOgoiakpqvbuYf2qjaxs/L95vfsoNya6uTnZTXtVM4+tfTfbGzbm3OBvN5lU\nCDkF+5NPPrkoDx4dzd2YdimJJ731I8PTqbF5PO68x2ltayd4/RpDPSMZF7HPG8JVZtP0/aOnzgOQ\nqGtO3mfmtzs+OG+chcytYJQXf3/fJEZLYUFTiepGEATGz5zH8eD0vHGODcv/bTQZNI158uIVMBoJ\nuqoJj05jxcUn1/zO7IvE/OZBGaPVZmJ0KMbgwBQm8/xKmFoQa5sAGD59gXjzfFv/0IAs2EVJUj3m\nSrtAsKcX+9p1jE/K2ZdNplY+vroV0vb1RABGA/mtB1GUEASYnAjkvabSf3NjcxtcvETf8XM41s0v\n6TAxLhd5C0diqp83ffQMAIaW9uQ9RvbX7GN/zewSvGNjC+dTFPKuLyVqNx/dwh2L3c7u1Cm0S8G2\nchUksyPnEo8liEYSmk0doZs3wGDQVL1OK3ral40OB9bmFsKdNxFj8xuGaAl1VJDicSK9PVhbWjGY\nc8d858tSOlDzsS37Ll8BScK+Kv/TWS4MSkG06cLnANLs7FnmIdVBSsNGGrpxHZgxf5aQKUiwv/TS\nSxw4cIAzZ87wuc99js985jN6jWvJ0VOgAakXLpxceOnkLdB6urE2NauqCZIvelX1U7CvXo0Ui2Us\njKY11BFk+7oUj2PX2MBbK3rOg6miAlNVNaEb1zPGcSvKRKbyzdnwXZCjP+yr1xQ8voXQqyAazETG\nhLI4UIP++R2kchG6cT2ZCLa8qjMWSkHO0wceeIAHHnhAr7HcUowmAza7WZfQLgDbSlk7CV2fHxmT\nj0CL9PUixWIprWex0H2DW72WqVdeJnTtKuzbMeuzlNPQrX4eglfkZsb2Net0GV829HQcAtjXrmX6\nyGGiA/1yL9A0gn456zS9iXUufJcugyBgX7nI68FlZXTITzQSx2or7IRkqqzCWFFB+OYNJEmaZfNO\nxEUi4Tg1deojVsRIhEhPN7aOFRjMy6OlnV6UMk/TcLosuoR2AZjcZZjr6uRFPEdLyycRQ9FycmWc\nFopTx9hlkDV2QBbsc8hHUw1dvSJ/75q1OowuO8qY9BLsjrXyRhRMjj+dgD+C3WGZ10EqG2Isiv/a\ndaytbRhsmcvu6oWe60EQBOwdK0l4vfMqPeZzig13dYIolswwGSgJ9jQcbqvcQShaWG0MBfuKVXK2\n3eDsbDslo1GTQFM01UW0qQLYHMkOQjpkXYKcqGT2eAhdn2+GCE5re5mleJzQtatYGpswlWkLkdSK\ncnIJ6DQP9qRgV35HBSXrVJNA60yao1Yv7lqAxTjBJTf6K7M3uFSoo1P9O6GYOW2LfGopRkqCPQ29\ntVVbMlEpNCdRSUvhK5CbFQcvX8JUVY25tk6XsWXDYBCwOy26vcgA9lVrEIMBgr19s/4e8EcwGAVV\nha9Arr8jRaPY1y6utg76m2LMNR5MlVWErlyZZa/OJ+s0nDTvLbZ9HcDp1i9JCcCxXi5vEbw0O0M0\nmEcsf8lxmp2SYE9D7+O3suDC1+YIdo2mmEhPD2IggGPDhiWpHe10yZUu9Yp0UgSQ7+Lslzngj+J0\nWVX/mxRtVzFrLCZ6a6qCIGBfu5aEf5rowExHpXyyToNXZbOWfdXiC/bUyUWnebA0NWN0uwlcujBr\nfWk1xUiiSOjGdUzV1arq8b/TKAn2NGZqY+ijnVgam+RFfPH87EWs0XmqaDeKtrPYOF3WlDNLD5Tj\nt+/ijBlCFCWC/ogmDW2pHKdAMuzOoFt0EMxsSKErl1J/05p1Koki4RvXsDU2YCpXn9yWL3qfXASD\nAce69SSmpoilFQTT+k5EeroR/f4leyeKjZJgTyNlitEpblcwGHBs3ETC6yXa15v6e9AvF74yW9TF\n6wYvyZUzHeuWSLAnj9+BaX02OHN9A0aXG9/FGYEWDkaRJPWaqhSPE7p+DUtD46Lb1xWcLqu+Jqk1\n8x2oWjX2aH8fYihE2frsPVv1xKljpUsFx/qNAATSzDFaywkEzstZ2M5N2urfvFMoCfY0UuVaddLY\nYWbhKQsRwO+PqDZBiLGoLNCampdEQwP9fQ2CIGBfs4bo2BjRoaFZ36021DHc3YUUiaSckEuBw2kh\nFNSnIBqAubYWU2XlLDu7Vo1dOb2VbVwawa6EYOql7EBmO7tWG3vg/DkQhNQmUWI2JcGeht4CDcCx\ncRMIQkqwJ+Ii4WAspRXnInzjBlI0uqRHTr01dgDnlq0A+M+ckr9bY6ijEua4FPZ1BYfLgiRBKKjn\nBreOxLQvFSml1d/iP30KBIHKnTtyX6wDBoMBu9Osq0nK7PHIkVKXLyEl5JLLWk6xiUCA8I3r2Fas\nxOhUV1PmnUZJsKeRqsmuo8ZucpdhbWsndP0aYjg0I9BUaqpLbV8H/TrnpOPcvFXe4M6clr97WqOm\nelk24yx2/Ho6etuXIS2e/bL8u2rZ4BJ+P6Hr17CtWImlYum6/zhdVgL+iK5lQxzrNyCGQqkG14FA\nRHUHqeClCyBJODdv0W08y42SYE/DaJS1Ez01dkiaYxIJgpcupR291WmqwUsXwGDAsQQhfgrKpqPn\nPJjKy3GvWU3o+jUSfr8mm2oiECB4+RLW1rYlM0dBWv0gHU8ujqRpzn/yhPzdGrJOA+fPgihmLIG8\nmDhcFuKxwruLzfrOpAkleOkCoigSCsRK9nUdKQn2OSyGdpJuZ1eEhBpTTCIYINzZKadML3KGYTqu\nRTDFAFTt3gWiSODc2Rmbqop58J8+CYkE7juWtlNXyiSl48nFXFWFbeUqQlcuE/f5CGrIOvWflk87\nzq3bdRuPGmYK5OnoSF6XPLlcukgoEEs+J/fpTZIkAufPYXS5sbaW6sNkoyTY5+BcBO3E1rECg8NB\n4MI5/Elh6VIj0E6evCVHTovVhNFk0NUkBVC1+w5AtrPPmCByv8z+48cAcO1cWsGu/EZ+nTc4985d\nIElMnzyhOutUiscJnj+L2ePB0pi9z+ti4FgkE6WtYwWhq1fwDcvlBdTMQ7S/j8TUFI6NmxZsXP1O\npzQzc1gM+7JgNOLYsJH42BjTQxOAOk3Vd+RNAMr27Mtxpb4IgiAnKekYCQFgb2nB7PEQPH+OgC+C\n2WLM2es0EQwQuHgBa0srlrrFzbqdy4wTWd95cN0hb3BTx0+qzjoNXrmMGA7j3Lp9SZLU0tGz92k6\n7n37QRQZPymH86oxTwbOlcwwaigJ9jnoHcuu4Eoen729Q7Oek43Y+BihK5exr1mL2ePRdSxqcLqt\nBANREon5ZWbzRRAEnFu3I4bDBLxBVRqa/9QpSCRwLbEZBtJ8DTpr7OaqamwrVjJ1U85tUGNbDiSj\niVzbltYMA/pnZCuU7doDRiMTV+X67K6yhedBkiR8bx0GoxHHpk26jmW5URLsc9C7NoaCa+cdGBxO\npsenEYTcx07fW0cAKNu7X9dxqEV5mUM6hrmBLJhEDISjkioNzX9CNsMstX0dwGQyYrObdDfFgLwe\nIkbZb5Jrk5dEEf/p0xgcjkUvApeJmdr0+s6D0e3GuXkLfp86v1P4+jWi/X24tu/E5F6aJLVipSTY\n57BYx06DxULZnXcRESzYzCzoLJMkCd+RNxFMpluiqcLiRMaAXJ0yXlkLgMO+cMxyIhggcOE81pYW\nLHX1uo5DLU63VXeNHeSNShHsuTT2wNkzxCfGcW3fiWBa+v7zi3WKBSjbt5+ISW66nsvvNHXwFQAq\n7r1P93EsN0qCfQ56t8hLp/yee4kYnViiC/dfjHR1EhsawrV9B0aHQ/dxqGExQv0ABJMJy54DAJgm\nBhe8dvrYMdkMs8RO03ScbiuxaIJoRJ+6OQrm6hrE2mYArGSfY0mSmPj1rwCofM9Duo5BLQ6XFUEA\n/3RY9+92btlGxCr38XQ4sod8xqd9+E8cx9LQuKTZx8VKSbDPYTGSUhSkihpEgxGzf4LIQH/W6xSn\nqXvfrTHDwOKE+ikIa2THl3TzEmI08zyLkQgTv3oawWymbP9duo9BLYsV+gkgJRtbx46+kfWa0LWr\nhG/ewLltO9amJt3HoAaDQcDhshLw6T8HBrOZmKMKczxE5NrlrNf53ngdKR6n/MB9S+48LkZKgn0O\n9mSjCb1NEEDKlmiNB/AefDXjNdGhQbyvH8JYXoFzw61zEC3m8Tuc/EqzfwLf4cxCbfKlF4hPTlL5\n7gcxV1XpPga1KCeXxbCzx9zVAMRPHSHS25PxmolfPwtA1Xsf0f35WnCVWQn49auboyBJEiEs2OIB\nxn72E6T4/JORJIp4XzuIYLFQtv/WKTvFREGC/R/+4R9473vfy/vf/34+//nP4/cvbGIoBpTO7Ho7\nT2FG+7WbJbxvHCLS2zvrc0kUGXryO0ixGLUf+/gtsacqLKbGrmyaNqJMPv+bVL0QhbjPx+RvnsXo\nclP50MO6P18Li1E3R8E/HUEQwBIPMfqTH837PNLbQ/D8Wexr1t7yZhIutxVRlHR3pkfCcRIJCWeZ\njUh3FxPP/XreNVMvv0hsbBT37r0YHaXaMGooSLDfddddPPvsszz99NO0tbXxzW9+U69x3VIcLquu\njSYUFOHg2b0NKRql/2tfJj41lfp88sXnCd+4jnvXbjmJ5RaSciIvgkBTNovqbZuIjY4y+sMfzGqb\nN/7M04jhMFXve/8t8zEoLKZgD/giuMpsODdsIHjh/KwKoHHvFEPffRK49do6LF6yljJpmH0fAAAa\nGklEQVSvVWtXYKyoYPyZp4mklbj2nznN6I/+A2N5OdXve7+uz17OFCTY9+/fn4ru2LZtG0PJkqzF\njtNlIREXCQVjun6vsohrNq+j5kMfJj4xQf/XvkLg/Dkmnv8N47/4GUa3m9qPfULX5+aDEuq3GCYp\nZR4aH3svloZGpl55iYGvfYXg1Sv0f+0reF99GXNdHRX33Kv7s7WSEmg6z0MiIRLwR3G5rdR8+CMg\nCAz809cY/fF/ELx0kZ6//SsiXZ2U7b8zVV/mVuJMxpj7dbazKxuFu8pJ3Sc/BYkEg//yTbyHXmP6\nxDEGv/UNBLOZpj/5Auaqal2fvZzR7az/k5/8hEceufWahR64ymwA+KZCGC36uSHSC4BVvPcRosPD\n+N58nf6vfEm+QBCo/cSnMLrduj2zEBwuK36f/pEQQX8Um92EraaKlv/z/2Hwm/9E4NxZAufOAnIr\nvdqPfeKWmqIUUmGfOgs0xTnvKrNia22j/vd+n7Gf/pjJ559j8vnnAKj+4G9R9fCjt4WzcLGcyOm1\nk1ybtlF+zwG8h15j+KknU9c0/OGfYOtYoetzlzs535zHH3+csbGxeX9/4oknuP/++wH4xje+gdls\n5rHHHlP9YI/n9hBemahvLOP8yX68UyHWbtQvfjoWkW3JbR3VWG1map74Y3qb5DR5Z1srrlUrsdXn\n97zFmM/KagcTowHKy+w5U//V4vG4CQailFfak2N2U/fXf0H3975P4GYnTR98P+Vbt9xSYZY+l5Ik\nYbYYiYRius5xKOmU9tSV4fG48Tz2IB0P3sfwiy8x+tobNH3wfVTv26t6nItNJCg7NRNxUfNzF7pe\nTMjmzqaWSjweNzX/+fP4H32IYE8Pwd4+XCtX4jlwd/4D12mcxUbOt/XJJ59c8POf//znvPbaazz1\n1FOaHjw6Oq3p+qVEMMpCxTcZ0nWck+MBzBYjvukwJGOCHe95FAAJmAam83iex+NelPlUmh50dY5T\nWV24rdvjcTPQP0UkHMdqM80as/PhD+AEYsDY2K1zwmeaS4fLwtSUvmuht2cSAKNZmPW9pt1307D7\nbkQWfkcW6zfPRizp4B4dntb03FzjHB2SP4snEjPXVTVgqGrAtW2PfM0S/DuXej7zRe3mU5Cd4dCh\nQ3z729/mG9/4BhaL+qbEtzvKsdM7FdL1ewMamzffapyL0CpwOmnaUcxdxYDLbSUcjJGI61c3J6Ch\nyuftgMNpwWAQdDfF+DWUsS6hnoLO13/zN39DLBbj05/+NABbt27lL//yL/UY1y1FKUbkndRPsMfj\nCcKhONW1Lt2+c7FZjIgQxWbvLi8ewZ6ejVxWoU9dfH9qgysOgSYnKVkWJSrGZjdhNqtr7F5CHQUJ\n9hdeeEGvcdxWKCnUemrsWhpL3C4ojkM9X+Zpb1JTLRKBBmkRIdN6CnZlHopng3O5rQwP+BBFCYNB\nHx+IPKfFMwfFQinzNAMGg4DLbcWno8ZejEdOd1Lo6Bnipphi3MUk0Bahbo5/OoLJbMBqu/WRP2px\nlVmRJHRrbB2NxIlFE0VjjiomSoI9C64yG9O+sG71yFM2VZV9HW8HFG1y2qtfyGNRmmIWySTlcltv\ni1BGtSjzoFcIbDEqO8VCSbBnwVWe1E50SkyZidctHuep1WbCYjWltGw9mPZGVNWjv53Q2yQVi8n+\nlmIywwC43PJ49drgis2BXEyUBHsWUtqqTkJN0XqLSVMFKCu3Me0N61Zewe8L43RbMRqLZ+nNJOfo\nu8kXm0Bz6Zx9qrbBRgntFM/btcS4dV7ExSrYXeVW4jGRcKjw8gpiQiQwHSk6TdWuc6hfSqAVkQMZ\n0kwxemvsRTYPxUBJsGfBlXIc6qOx+7xhLFYjVlv2ZgK3I8pGpMcG5/OGkaSZTbNYUJp769Vowl+E\nDmSYEcC6bXAlG/uiURLsWdDz2ClJEtPeMGXl+oTKLSXuVN2cwoWaEj7qKrJTC8gbXGA6qkuS0kyo\nY3EJNLtDPrnodYpN+Z2KKKCgWCgJ9iwojiI9NPZwKEY8JhadGQbSNXYdBHsyfLTYNHYgFb+uh8/F\nX6Q2doNB55PLdASL1ahbHaISM5QEexasNhNWm4lpHbSTYrWvw8yY9Qh59E4GgeJKylFwV+h3cim2\nrNN0nGU2gv4ooljYyUWSJDnkswjXQjFQEuwLUF5h10VTTQn2Isyw01ewh2Z9ZzFRVj5TyrlQ/NMR\nrDYTZkvxaaoutz5hwJFwnGgkUco6XSRKgn0ByirtRCMJIuHCOtQrWl4xCjRZABl1MUEUsynGrZhi\nCtzgZE01UnRmGAW9fE/KWtCrREOJ2ZQE+wKUJxddoTZFRRiUFaFgFwQBV5lVN429WDXVMp1MMak0\n+iLc3CDNmV7gelBOPiWNfXEoCfYFKK9MCvYCtZNitrGDvCEVenKRJAnvVKho58DhtGA0GZj2FmaK\nmYlhL855KEu+E4XWUVI2yJLGvjiUBPsCpDT2As0QPm84lZ5fjLh0iIwJh2JFrakKgoC73Fawxp4K\ndSxSU0x5pbwWCq18OqOxlwT7YlAS7AugaCeFRMYoMezFqqmCPsdvRaAVW1JOOmXlNiLheEEnF8W2\nrJwGiw1XmQ1B0FFjL+L34namJNgXQA+NPZTsvFPMtsRULHsBgl0xRxVzeJvyGxZijil2wW40GnCX\n23TR2F1lVoymkghaDEqzugDu8qR2UsDxu9jt66BPyGOqDnt5cZogANzJzOFC1oMSy1+sgh3ksYcC\nMaKR/E4uibiI3xcpaeuLSEmwL4DRaKCswo53In/tRLElLgvBXsDJxZ/snFTM86BHZIx3MoTdaS5a\nfwvM2MXznQdlHZXs64tHSbDnoKLKTjgUy7u64XLQ2O0OczIiJH9fQzE2sZ7LzMklv40+kRCZ9oYp\nr3ToOawlRzlt5NsTuBTquPgUpDZ89atf5eWXX8ZgMFBdXc0Xv/hFPB6PXmO7LSivcsCNCbyTIWx2\n7ZUZZ2LYi1c7EQQBd4Gx7FMTQSxWE3ZHcVW3TCelqeY5D9PJ6pbFbIaBdI09T8E+mXwninwebmcK\n0tg/85nP8Mtf/pJf/OIX3HvvvXz961/Xa1y3DRVV8uKbGg/mdf+Mxl68tmWQtVUlZFEroijhnQxR\nU+sqqlZwc0nVD8rTBKGY9IpesCshjwVr7MU9D7czBQl2p9OZ+u9QKITBsPwsOxVV8rF5ajI/we7z\nhrHZzUWZbZmOklKfz8s87Q0jJiSqa525L77NcZfbknXltXeUUtaQoiwUKwVr7KnkpJIpZrEoWNp8\n+ctf5umnn8btdvPUU0/pMabbivKkYM/HgSpJEn5vmOpal97DWnIqq+V5mBwPUFOn7d+jnHZqlsE8\nlFXYGBv2EwxENdcR9y2T+ihmsxGny5J3LLtvKoTZYszLtFlCHTkF++OPP87Y2Ni8vz/xxBPcf//9\nPPHEEzzxxBN861vf4nvf+x6f//znVT3Y43FrH+0toL2jGrPFiN8X0TxmnzdEIiFRU+ta9H/vYn9/\n+4oa3uQ60VBC87OuXxgBWJJ50IOFxljXUM7NK2MYMWj+twT9sgN+5eparLbCT3C3ci6ra130dE5Q\nWenAZDIueG36OCVJwucNU1XjpLa2bLGHqYliWJtqybm6nnzySVVf9Oijj/IHf/AHqgX76Oi0qutu\nJR6Pm7ExP+UVdsZH/YyM+DTZiHs7JwCwuyyL+u/1eNyLPp8Gs/zv7uuZ1Pysvp5JAKprF3+chZJr\nLk0W2dzY0zWOzaVN4xwdnsbhtOCbDkGB07AUv/lCOJwWkODm9bHUaS4Tc8cZDESJRRM4Fvmd0Mqt\nnk+1qN18CjKKd3d3p/775ZdfZsWKFYV83W1LeZWdeEzU3OtxYjQAQLWn+G3LTpcFs8XI5HhA871T\n40EEAapqijvMD/KPZU8kRPy+cNE7ThXyLQZWcpwuDQWdB7/0pS/R2dmJwWCgsbGR//bf/pte47qt\nSDlQJ0Ka4rAnxmQhWFlT/IJdEAQqaxyMDfkRRVGTo3xyIoi73JbzyF4MKGtB6wbnm1oeoY4KqVh2\njQ7UkuN0aShIsP/jP/6jXuO4rSlXQh4ngjS3V6q+b2IsgMEgLJuXubLaycjANN7J8ILH73TCoRjh\nYIy6huVhv3SX2zBbjIyPaBPsqVICRR4Ro1Be0thva5ZffOIiUJFHZIwkSUyOBamodmA0Lo9pTkXG\njKkXalMTyRA/lRvB7Y4gCFTXOpmaCBKPq4/pXy4x7AqKxq1VY1fWTrGHfN7uLA+Js8ikkpQ0xLL7\nfRFi0QRVy8AMo1BZo5gh1M+DEuqobI7LgSqPC0mCyTH186AIwOUi2K02M1abSXNew9iwH4vVVNQl\nNoqBkmBXgdVmxuYwa9LYFcfpcnAYKlRWy5uUFvuysgksF40dZpzhym+shuWmsYN8gvNNhojH1J1c\nYtEEUxMhauqKOwO5GCgJdpVUVNnxTYVIJERV1yuO06plEBGj4C63YTQZNGmqisau1iZfDCgJZ+Oj\nftX3eCdDOFyWos9ATsdT70aSYFzlBqfM13JIVLvdKQl2lVRUOpAk9WFuyykiRsFgEKiosjM1HlSd\nUj81EcRqMy2rLEPFvKbWgRoJx5n2qnc4Fws19bJDfHRIXfz32LAs2Ks1Zi6X0E5JsKskPTJGDROj\nAYwmw7Lz/lfWOInHRVWVHhMJEd9UmIpqx7I6elttJtxlVtWmGEXw1TbcXpmWheKplwW0WsE+PlLS\n2JeKkmBXiWJSGVOxiEVRYmo8SGW1A4Nh+Qg0SK8Zk3uD802FEEWJymXkOFWoqnURDEQJBaM5rx0Z\n9AFQu0xCPhUqqx2YTAZNGrvBIKSc8CUWj5JgV0ldo6xtDfX7cl477Q0Rj4vLKiJGIeVAVWFnV65Z\nTo5TBcWBqsYcMzwgrxllDS0XDAYD1XUuJsdyh36Kosj4aICqGueyCf+9nSnNsErsDgsVVXaGB3yI\n4sL25YlRWaAtJ8epwkzIo3qBphzZlxNqHaiSJDEyMI3TbcHpLu6a/Jnw1LkRRSnnBuedCJGIiyX7\n+hJREuwaqG8qJxZN5EzQmXGcLj9NtbzSjsEgpOylCzHQO4XBIFDXWL4EI1taUiGPOQRaYDpCMBBd\ndvZ1BbV29jHFvl4S7EtCSbBroK5ZMcd4F7wuFeq4DE0xRqOB2sYyxob9RMLZ+8DGonHGhvx46t2Y\nLcVfI2Yu5VV2jEYhZ6jfyKDiOF1e9nUFj8rIGCUipuQ4XRpKgl0D9U2y5jnUl93OLkkSw33eZZ1d\n19xWgSTBQM9U1muG+mWTVUPL8tPWQbYvV9Y4mRgLLGiaW672dYXKGtmBOja08AkuFepYEuxLQkmw\na6Cy2oHFalpQY58cDzLti9C6onJZhfiloxRC6+uazHrNYK88R40tFUsypltBtcdJIi4u6G9QNHZF\ns11uGAwGqmtdTIwFsjpQJUlibMSPu9ymS4ORErkpCXYNCIJAfVMZvqkwwUDmMLeeG+MAtKyoXsqh\nLSm1jWWYzAb6urNr7AO98mf1zctTYwdobJM3uO7r4xk/F0WJ0aFpKmtkhWC54ql3IYpS1rj+oD9K\nOBgr2deXkJJg10h9k3ykHs4S9thzU+6a1LqiasnGtNQYjQYaWyuYGg/iz9B8JB5PMDLgo6bOtaw1\ntPZV1QgCdF6d3zoS5HIKsWhi2TpOFXLZ2ZV3Yrmao25HSoJdI3WKnT2DOSYaiTPY68VT75Jbhy1j\nmpPaan8Gc8zIwDSJxPK1ryvY7GYaWysYGZzOuMEp9vXl6jhV8CT/fdlMc9cuDgOwan3tko3pnU5J\nsGukrtGNIGROVOrrmkQUJVqXsRlGoSkp2Pu657/Mg0kzzHK2ryt0rKkBoCuD1t6f7PW63DXVqhon\nVR4nXdfG55kofd4Q/d1T1DeXL9tggtuRkmDXiNliorrWxeigj3BodrhfygyzcvmaYRSqa53YHGb6\nuybnFQQbSDpOl7vGDtCxWhbsN6+Ozvq7fzrCjUujVFTZl71tWRAENmxrQBQlrpwbmvXZhdMDAKze\nUNLWlxJdBPt3vvMd1q1bx9RUdmfacmLNxjoSCYmTR2aaeUuSRM/NcWx207K3qYL8Mje3VRDwR2cV\nRgsGogz1e6msdmB3LG9zFICrzEZtg5uBnqlZG/25432IosTWPS3LNjoqnTUb6zCaDFw6Mzhroz9/\nsh+DQWDlOs8tHN07j4IF+9DQEIcPH6axsVGP8RQFm3Y04S6zcu5Ef6rK4cRogMB0lJaOqmVX+Csb\nTcmwx7PH+1N/e/2Fa8RjIhu3v3PWQ8eaGiRpJjomEo5z8fQADqeFNRvrbvHolgarzcyqdR68k7Lp\nBeTQ38E+Ly0dle+ITf52omDB/nd/93f82Z/9mR5jKRqMJgO77ulATEgcfb0T72SIF56+CEB78mj+\nTmD1+lqqPE4unhrg3Ik+blwe4eaVUeqby9m0s+lWD2/J6Fgja6PnT8kb/cUzA0QjCTbf0YTJtPyy\nbrOxYZu8mV86M5A0ywwCsGrDO2Nzu50oKBbtlVdeoaGhgbVr1+o1nqJhzca6/7+9u4tpMkvjAP6v\ntIDDOKaK06DD6CwOG4gFRhPdgURtbeSjVlFRboymDUZvrCB+hKJGA8aAqJekxAjRZDTK2myI0Wym\nWiEIIsYFN6Q6bHAcjAVRMhSj9OvZC9dO2NJqzOgp5fndnSYn+acfT09P3/c56Or4DY/+PYBfe19g\n7I0H6Uu/mVI/OWXRUuQVKPH3c/fQ+nMvZNFSREmnQZX31ymx/fCOfPYXSPxOjt/6hvGT+Q6ipNMg\ni46aUr9aAEAx7yvI47/Af+zP8fiXFng8Psiio/Dd95F/MUG4eW9h1+v1GBoK/Me/uLgYZrMZZ8+e\n9T/2oafqRAKJRIK/rfwLrl56ALfLixU5yf4Vy1QyY2Yscjcq8Y+f/gXXmAc/qpIi6uDqD5W3KQ29\nPQPobP0Vvw+/RvrSRMTERs6pUR9CIpFg8Y/z0fLPX/DVzFjMmhOHH5Z+G1HHAU4WEvrIavzo0SPo\n9XrExsa+7Y8yMACFQoHLly9j9mz+hmaMMVE+urD/P7VaDYvFgpkzI/8SN8YYC2d/2nXsEolkSm3F\nMMZYuPrTVuyMMcbCA995yhhjEYYLO2OMRRgu7IwxFmGEFXa73Y7CwkLk5+ejoKAADx48EBXlvc6f\nP4+cnBzodDrU1NSIjhNUuPfsqa6uRm5uLtatW4ddu3ZhdPT9B2J/Ts3NzcjJyUF2djbq6upEx5mQ\nw+HA1q1bkZeXB51Oh3PnzomOFJTP58P69euxc+dO0VGCcjqdMBqNyM3NhVarRVdXl+hIE2poaMCa\nNWug0+lQWloKl2vig378SBCDwUAtLS1ERGSz2WjLli2iooTU3t5Oer2e3G43ERG9ePFCcKKJPXv2\njAwGA6lUKhoeHhYdZ0Ktra3k9XqJiOjEiRNUU1MjONEfvF4vaTQa6u/vJ5fLRWvXrqXe3l7RsQIM\nDg5ST08PERGNjo7S6tWrwzInEVF9fT2VlpbSjh07REcJ6sCBA9TY2EhERG63m5xOp+BEgRwOB6nV\nahobGyMiot27d5PFYgk5R9iKXSKRwOl8e+KK0+mEQhGe/SQuXLiA7du3Qyp9e/fcrFnh2ZJ3MvTs\nyczMxLRpb99yGRkZcDgc75nx+XR3d2P+/PmYN28eZDIZtFotrFar6FgB5syZg5SUFABAXFwckpKS\nMDg4KDhVIIfDgVu3bmHTpk2iowQ1OjqKzs5ObNy4EQAglUrx5Zfh2WLZ5/Ph9evX8Hg8ePPmDb7+\nOnQbZGH3+paVlaGoqAhVVVUgIly8eFFUlJAeP36Mzs5OnD59GjExMdi/fz+USqXoWONMxp49jY2N\n0Gq1omP4DQwMICEhwT9WKBRhvT0IAP39/bDb7UhLSxMdJcC7hca7xVs46u/vh1wuR1lZGex2OxYt\nWoTy8nLExobXgSAKhQJ6vR4rV67E9OnTkZWVhczMzJBzPmlhD9ZnpqSkBLdv30Z5eTk0Gg2uX78O\nk8mE+vr6TxknqFD9cLxeL0ZGRnDp0iV0d3ejuLhYyEpusvTsCfWaq9VqAEBtbS1kMhl0Ot3njheU\nyOfsY7x69QpGoxEmkwlxcXGi44xjs9kQHx+PlJQU3LlzR3ScoDweD3p6enD48GEolUocO3YMdXV1\nMBqNoqONMzIyAqvVips3b2LGjBkwGo1oamoK/fn55BtEQSxZsmTcePHixYKShFZUVEQdHR3+sUaj\noZcvXwpMNN7Dhw8pMzOT1Go1qVQqSk1NJZVKRUNDQ6KjTejKlStUWFjo3y8MF/fv3yeDweAfm81m\nMpvNAhMF53a7yWAwUENDg+goEzp58iStWLGC1Go1ZWVlUUZGBu3bt090rADPnz8ntVrtH9+9ezcs\n/w+4du0alZeX+8cWi4WOHj0aco6wPXaFQoGOjg4AQFtbGxYsWCAqSkgajQZtbW0AgL6+Png8Hsjl\ncsGp/pCcnIzW1lZYrVbcuHEDCoUCFoslLBuxNTc348yZM6itrUV0dHgdvKBUKvHkyRM8ffoULpcL\nV69exapVq0THmpDJZMLChQuxbds20VEmtGfPHthsNlitVpw6dQrLli1DdXW16FgB4uPjkZCQgL6+\nPgBAe3s7kpKSBKcKNHfuXHR1dWFsbAxE9EE5he2xV1RUoLKyEj6fDzExMaioqBAVJaQNGzbAZDJB\np9NBJpOhqqpKdKSQwrlnT2VlJdxuNwwGAwAgPT0dR44cERvqf6KionDo0CEYDAYQEQoKCsLyQ37v\n3j00NTUhOTkZ+fn5kEgkKCkpwfLly0VHm5QOHjyIvXv3wuPxIDExEcePHxcdKUBaWhqys7ORn58P\nqVSK1NRUbN68OeQc7hXDGGMRhu88ZYyxCMOFnTHGIgwXdsYYizBc2BljLMJwYWeMsQjDhZ0xxiIM\nF3bGGIswXNgZYyzC/Be68EGj7hfMcwAAAABJRU5ErkJggg==\n",
- "text/plain": [
- "\u003cmatplotlib.figure.Figure at 0x7f385e198650\u003e"
- ]
- },
- "metadata": {
- "tags": []
- },
- "output_type": "display_data"
- }
- ],
- "source": [
- "def f(x):\n",
- " return tf.square(tf.sin(x))\n",
- "\n",
- "def grad(f):\n",
- " return lambda x: tfe.gradients_function(f)(x)[0]\n",
- "\n",
- "x = tf.lin_space(-2*pi, 2*pi, 100) # 100 points between -2π and +2π\n",
- "\n",
- "import matplotlib.pyplot as plt\n",
- "\n",
- "plt.plot(x, f(x), label=\"f\")\n",
- "plt.plot(x, grad(f)(x), label=\"first derivative\")\n",
- "plt.plot(x, grad(grad(f))(x), label=\"second derivative\")\n",
- "plt.plot(x, grad(grad(grad(f)))(x), label=\"third derivative\")\n",
- "plt.legend()\n",
- "plt.show()"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "colab_type": "text",
- "id": "-39gouo7mtgu"
- },
- "source": [
- "## Gradient tapes\n",
- "\n",
- "Every differentiable TensorFlow operation has an associated gradient function. For example, the gradient function of `tf.square(x)` would be a function that returns `2.0 * x`. To compute the gradient of a user-defined function (like `f(x)` in the example above), TensorFlow first \"records\" all the operations applied to compute the output of the function. We call this record a \"tape\". It then uses that tape and the gradients functions associated with each primitive operation to compute the gradients of the user-defined function using [reverse mode differentiation](https://en.wikipedia.org/wiki/Automatic_differentiation).\n",
- "\n",
- "Since operations are recorded as they are executed, Python control flow (using `if`s and `while`s for example) is naturally handled:\n",
- "\n"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 0,
- "metadata": {
- "colab": {
- "autoexec": {
- "startup": false,
- "wait_interval": 0
- }
- },
- "colab_type": "code",
- "id": "MH0UfjympWf7"
- },
- "outputs": [],
- "source": [
- "def f(x, y):\n",
- " output = 1\n",
- " for i in range(y):\n",
- " output = tf.multiply(output, x)\n",
- " return output\n",
- "\n",
- "def g(x, y):\n",
- " # Return the gradient of `f` with respect to it's first parameter\n",
- " return tfe.gradients_function(f)(x, y)[0]\n",
- "\n",
- "assert f(3.0, 2).numpy() == 9.0 # f(x, 2) is essentially x * x\n",
- "assert g(3.0, 2).numpy() == 6.0 # And its gradient will be 2 * x\n",
- "assert f(4.0, 3).numpy() == 64.0 # f(x, 3) is essentially x * x * x\n",
- "assert g(4.0, 3).numpy() == 48.0 # And its gradient will be 3 * x * x"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "colab_type": "text",
- "id": "aNmR5-jhpX2t"
- },
- "source": [
- "At times it may be inconvenient to encapsulate computation of interest into a function. For example, if you want the gradient of the output with respect to intermediate values computed in the function. In such cases, the slightly more verbose but explicit [tf.GradientTape](https://www.tensorflow.org/api_docs/python/tf/GradientTape) context is useful. All computation inside the context of a `tf.GradientTape` is \"recorded\".\n",
- "\n",
- "For example:"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 0,
- "metadata": {
- "colab": {
- "autoexec": {
- "startup": false,
- "wait_interval": 0
- }
- },
- "colab_type": "code",
- "id": "bAFeIE8EuVIq"
- },
- "outputs": [],
- "source": [
- "x = tf.ones((2, 2))\n",
- " \n",
- "# TODO(b/78880779): Remove the 'persistent=True' argument and use\n",
- "# a single t.gradient() call when the bug is resolved.\n",
- "with tf.GradientTape(persistent=True) as t:\n",
- " # TODO(ashankar): Explain with \"watch\" argument better?\n",
- " t.watch(x)\n",
- " y = tf.reduce_sum(x)\n",
- " z = tf.multiply(y, y)\n",
- "\n",
- "# Use the same tape to compute the derivative of z with respect to the\n",
- "# intermediate value y.\n",
- "dz_dy = t.gradient(z, y)\n",
- "assert dz_dy.numpy() == 8.0\n",
- "\n",
- "# Derivative of z with respect to the original input tensor x\n",
- "dz_dx = t.gradient(z, x)\n",
- "for i in [0, 1]:\n",
- " for j in [0, 1]:\n",
- " assert dz_dx[i][j].numpy() == 8.0"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "colab_type": "text",
- "id": "DK05KXrAAld3"
- },
- "source": [
- "### Higher-order gradients\n",
- "\n",
- "Operations inside of the `GradientTape` context manager are recorded for automatic differentiation. If gradients are computed in that context, then the gradient computation is recorded as well. As a result, the exact same API works for higher-order gradients as well. For example:"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 0,
- "metadata": {
- "colab": {
- "autoexec": {
- "startup": false,
- "wait_interval": 0
- }
- },
- "colab_type": "code",
- "id": "cPQgthZ7ugRJ"
- },
- "outputs": [],
- "source": [
- "# TODO(ashankar): Should we use the persistent tape here instead? Follow up on Tom and Alex's discussion\n",
- "\n",
- "x = tf.constant(1.0) # Convert the Python 1.0 to a Tensor object\n",
- "\n",
- "with tf.GradientTape() as t:\n",
- " with tf.GradientTape() as t2:\n",
- " t2.watch(x)\n",
- " y = x * x * x\n",
- " # Compute the gradient inside the 't' context manager\n",
- " # which means the gradient computation is differentiable as well.\n",
- " dy_dx = t2.gradient(y, x)\n",
- "d2y_dx2 = t.gradient(dy_dx, x)\n",
- "\n",
- "assert dy_dx.numpy() == 3.0\n",
- "assert d2y_dx2.numpy() == 6.0"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "colab_type": "text",
- "id": "4U1KKzUpNl58"
- },
- "source": [
- "## Next Steps\n",
- "\n",
- "In this tutorial we covered gradient computation in TensorFlow. With that we have enough of the primitives required to build an train neural networks, which we will cover in the [next tutorial](https://github.com/tensorflow/models/tree/master/official/contrib/eager/python/examples/notebooks/3_neural_networks.ipynb)."
- ]
- }
- ],
- "metadata": {
- "colab": {
- "collapsed_sections": [],
- "default_view": {},
- "name": "Automatic Differentiation",
- "provenance": [],
- "version": "0.3.2",
- "views": {}
- }
- },
- "nbformat": 4,
- "nbformat_minor": 0
-}
diff --git a/tensorflow/contrib/eager/python/examples/notebooks/3_datasets.ipynb b/tensorflow/contrib/eager/python/examples/notebooks/3_datasets.ipynb
deleted file mode 100644
index d268cbcd91..0000000000
--- a/tensorflow/contrib/eager/python/examples/notebooks/3_datasets.ipynb
+++ /dev/null
@@ -1,209 +0,0 @@
-{
- "cells": [
- {
- "cell_type": "markdown",
- "metadata": {
- "colab_type": "text",
- "id": "U9i2Dsh-ziXr"
- },
- "source": [
- "# Eager Execution Tutorial: Importing Data\n",
- "\n",
- "This notebook demonstrates the use of the [`tf.data.Dataset` API](https://www.tensorflow.org/guide/datasets) to build pipelines to feed data to your program. It covers:\n",
- "\n",
- "* Creating a `Dataset`.\n",
- "* Iteration over a `Dataset` with eager execution enabled.\n",
- "\n",
- "We recommend using the `Dataset`s API for building performant, complex input pipelines from simple, re-usable pieces that will feed your model's training or evaluation loops.\n",
- "\n",
- "If you're familiar with TensorFlow graphs, the API for constructing the `Dataset` object remains exactly the same when eager execution is enabled, but the process of iterating over elements of the dataset is slightly simpler.\n",
- "You can use Python iteration over the `tf.data.Dataset` object and do not need to explicitly create an `tf.data.Iterator` object.\n",
- "As a result, the discussion on iterators in the [TensorFlow Guide](https://www.tensorflow.org/guide/datasets) is not relevant when eager execution is enabled."
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "colab_type": "text",
- "id": "z1JcS5iBXMRO"
- },
- "source": [
- "# Setup: Enable eager execution\n"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 0,
- "metadata": {
- "cellView": "code",
- "colab": {
- "autoexec": {
- "startup": false,
- "wait_interval": 0
- }
- },
- "colab_type": "code",
- "id": "RlIWhyeLoYnG"
- },
- "outputs": [],
- "source": [
- "# Import TensorFlow.\n",
- "import tensorflow as tf\n",
- "\n",
- "# Enable eager execution\n",
- "tf.enable_eager_execution()"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "colab_type": "text",
- "id": "H9UySOPLXdaw"
- },
- "source": [
- "# Step 1: Create a source `Dataset`\n",
- "\n",
- "Create a _source_ dataset using one of the factory functions like [`Dataset.from_tensors`](https://www.tensorflow.org/api_docs/python/tf/data/Dataset#from_tensors), [`Dataset.from_tensor_slices`](https://www.tensorflow.org/api_docs/python/tf/data/Dataset#from_tensor_slices) or using objects that read from files like [`TextLineDataset`](https://www.tensorflow.org/api_docs/python/tf/data/TextLineDataset) or [`TFRecordDataset`](https://www.tensorflow.org/api_docs/python/tf/data/TFRecordDataset). See the [TensorFlow Guide](https://www.tensorflow.org/guide/datasets#reading_input_data) for more information."
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 0,
- "metadata": {
- "cellView": "code",
- "colab": {
- "autoexec": {
- "startup": false,
- "wait_interval": 0
- }
- },
- "colab_type": "code",
- "id": "WPTUfGq6kJ5w"
- },
- "outputs": [],
- "source": [
- "ds_tensors = tf.data.Dataset.from_tensor_slices([1, 2, 3, 4, 5, 6])\n",
- "\n",
- "# Create a CSV file\n",
- "import tempfile\n",
- "_, filename = tempfile.mkstemp()\n",
- "with open(filename, 'w') as f:\n",
- " f.write(\"\"\"Line 1\n",
- "Line 2\n",
- "Line 3\n",
- " \"\"\")\n",
- "ds_file = tf.data.TextLineDataset(filename)\n"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "colab_type": "text",
- "id": "twBfWd5xyu_d"
- },
- "source": [
- "# Step 2: Apply transformations\n",
- "\n",
- "Use the transformations functions like [`map`](https://www.tensorflow.org/api_docs/python/tf/data/Dataset#map), [`batch`](https://www.tensorflow.org/api_docs/python/tf/data/Dataset#batch), [`shuffle`](https://www.tensorflow.org/api_docs/python/tf/data/Dataset#shuffle) etc. to apply transformations to the records of the dataset. See the [API documentation for `tf.data.Dataset`](https://www.tensorflow.org/api_docs/python/tf/data/Dataset) for details."
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 0,
- "metadata": {
- "cellView": "code",
- "colab": {
- "autoexec": {
- "startup": false,
- "wait_interval": 0
- }
- },
- "colab_type": "code",
- "id": "ngUe237Wt48W"
- },
- "outputs": [],
- "source": [
- "ds_tensors = ds_tensors.map(tf.square).shuffle(2).batch(2)\n",
- "ds_file = ds_file.batch(2)"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "colab_type": "text",
- "id": "IDY4WsYRhP81"
- },
- "source": [
- "# Step 3: Iterate\n",
- "\n",
- "When eager execution is enabled `Dataset` objects support iteration.\n",
- "If you're familiar with the use of `Dataset`s in TensorFlow graphs, note that there is no need for calls to `Dataset.make_one_shot_iterator()` or `get_next()` calls."
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 0,
- "metadata": {
- "colab": {
- "autoexec": {
- "startup": false,
- "wait_interval": 0
- },
- "base_uri": "https://localhost:8080/",
- "height": 153
- },
- "colab_type": "code",
- "executionInfo": {
- "elapsed": 388,
- "status": "ok",
- "timestamp": 1525154629129,
- "user": {
- "displayName": "",
- "photoUrl": "",
- "userId": ""
- },
- "user_tz": 420
- },
- "id": "lCUWzso6mbqR",
- "outputId": "8e4b0298-d27d-4ac7-e26a-ef94af0594ec"
- },
- "outputs": [
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "Elements of ds_tensors:\n",
- "tf.Tensor([1 9], shape=(2,), dtype=int32)\n",
- "tf.Tensor([16 25], shape=(2,), dtype=int32)\n",
- "tf.Tensor([ 4 36], shape=(2,), dtype=int32)\n",
- "\n",
- "Elements in ds_file:\n",
- "tf.Tensor(['Line 1' 'Line 2'], shape=(2,), dtype=string)\n",
- "tf.Tensor(['Line 3' ' '], shape=(2,), dtype=string)\n"
- ]
- }
- ],
- "source": [
- "print('Elements of ds_tensors:')\n",
- "for x in ds_tensors:\n",
- " print(x)\n",
- "\n",
- "print('\\nElements in ds_file:')\n",
- "for x in ds_file:\n",
- " print(x)"
- ]
- }
- ],
- "metadata": {
- "colab": {
- "collapsed_sections": [],
- "default_view": {},
- "name": "Eager Execution Tutorial: Importing Data",
- "provenance": [],
- "version": "0.3.2",
- "views": {}
- }
- },
- "nbformat": 4,
- "nbformat_minor": 0
-}
diff --git a/tensorflow/contrib/eager/python/examples/notebooks/3_training_models.ipynb b/tensorflow/contrib/eager/python/examples/notebooks/3_training_models.ipynb
deleted file mode 100644
index 84f1d031d4..0000000000
--- a/tensorflow/contrib/eager/python/examples/notebooks/3_training_models.ipynb
+++ /dev/null
@@ -1,485 +0,0 @@
-{
- "cells": [
- {
- "cell_type": "markdown",
- "metadata": {
- "colab_type": "text",
- "id": "k2o3TTG4TFpt"
- },
- "source": [
- "# Training Models\n",
- "\n",
- "In the previous tutorial we covered the TensorFlow APIs for automatic differentiation, a basic building block for machine learning.\n",
- "In this tutorial we will use the TensorFlow primitives introduced in the prior tutorials to do some simple machine learning.\n",
- "\n",
- "TensorFlow also includes a higher-level neural networks API (`tf.keras`) which provides useful abstractions to reduce boilerplate. We strongly recommend those higher level APIs for people working with neural networks. However, in this short tutorial we cover neural network training from first principles to establish a strong foundation."
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "colab_type": "text",
- "id": "3LXMVuV0VhDr"
- },
- "source": [
- "## Setup"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 0,
- "metadata": {
- "colab": {
- "autoexec": {
- "startup": false,
- "wait_interval": 0
- }
- },
- "colab_type": "code",
- "id": "PJ64L90aVir3"
- },
- "outputs": [],
- "source": [
- "import tensorflow as tf\n",
- "tf.enable_eager_execution()\n",
- "tfe = tf.contrib.eager # Shorthand for some symbols"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "colab_type": "text",
- "id": "eMAWbDJFVmMk"
- },
- "source": [
- "## Variables\n",
- "\n",
- "Tensors in TensorFlow are immutable stateless objects. Machine learning models, however, need to have changing state: as your model trains, the same code to compute predictions should behave differently over time (hopefully with a lower loss!). To represent this state which needs to change over the course of your computation, you can choose to rely on the fact that Python is a stateful programming language:\n"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 0,
- "metadata": {
- "colab": {
- "autoexec": {
- "startup": false,
- "wait_interval": 0
- }
- },
- "colab_type": "code",
- "id": "VkJwtLS_Jbn8"
- },
- "outputs": [],
- "source": [
- "# Using python state\n",
- "x = tf.zeros([10, 10])\n",
- "x += 2 # This is equivalent to x = x + 2, which does not mutate the original\n",
- " # value of x\n",
- "print(x)"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "colab_type": "text",
- "id": "wfneTXy7JcUz"
- },
- "source": [
- "TensorFlow, however, has stateful operations built in, and these are often more pleasant to use than low-level Python representations of your state. To represent weights in a model, for example, it's often convenient and efficient to use TensorFlow variables.\n",
- "\n",
- "A Variable is an object which stores a value and, when used in a TensorFlow computation, will implicitly read from this stored value. There are operations (`tf.assign_sub`, `tf.scatter_update`, etc) which manipulate the value stored in a TensorFlow variable."
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 0,
- "metadata": {
- "colab": {
- "autoexec": {
- "startup": false,
- "wait_interval": 0
- }
- },
- "colab_type": "code",
- "id": "itxmrMil6DQi"
- },
- "outputs": [],
- "source": [
- "v = tfe.Variable(1.0)\n",
- "assert v.numpy() == 1.0\n",
- "\n",
- "# Re-assign the value\n",
- "v.assign(3.0)\n",
- "assert v.numpy() == 3.0\n",
- "\n",
- "# Use `v` in a TensorFlow operation like tf.square() and reassign\n",
- "v.assign(tf.square(v))\n",
- "assert v.numpy() == 9.0"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "colab_type": "text",
- "id": "-paSaeq1JzwC"
- },
- "source": [
- "Computations using Variables are automatically traced when computing gradients. For Variables representing embeddings TensorFlow will do sparse updates by default, which are more computation and memory efficient.\n",
- "\n",
- "Using Variables is also a way to quickly let a reader of your code know that this piece of state is mutable."
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "colab_type": "text",
- "id": "BMiFcDzE7Qu3"
- },
- "source": [
- "## Example: Fitting a linear model\n",
- "\n",
- "Let's now put the few concepts we have so far ---`Tensor`, `GradientTape`, `Variable` --- to build and train a simple model. This typically involves a few steps:\n",
- "\n",
- "1. Define the model.\n",
- "2. Define a loss function.\n",
- "3. Obtain training data.\n",
- "4. Run through the training data and use an \"optimizer\" to adjust the variables to fit the data.\n",
- "\n",
- "In this tutorial, we'll walk through a trivial example of a simple linear model: `f(x) = x * W + b`, which has two variables - `W` and `b`. Furthermore, we'll synthesize data such that a well trained model would have `W = 3.0` and `b = 2.0`."
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "colab_type": "text",
- "id": "gFzH64Jn9PIm"
- },
- "source": [
- "### Define the model\n",
- "\n",
- "Let's define a simple class to encapsulate the variables and the computation."
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 0,
- "metadata": {
- "colab": {
- "autoexec": {
- "startup": false,
- "wait_interval": 0
- }
- },
- "colab_type": "code",
- "id": "_WRu7Pze7wk8"
- },
- "outputs": [],
- "source": [
- "class Model(object):\n",
- " def __init__(self):\n",
- " # Initialize variable to (5.0, 0.0)\n",
- " # In practice, these should be initialized to random values.\n",
- " self.W = tfe.Variable(5.0)\n",
- " self.b = tfe.Variable(0.0)\n",
- " \n",
- " def __call__(self, x):\n",
- " return self.W * x + self.b\n",
- " \n",
- "model = Model()\n",
- "\n",
- "assert model(3.0).numpy() == 15.0"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "colab_type": "text",
- "id": "xa6j_yXa-j79"
- },
- "source": [
- "### Define a loss function\n",
- "\n",
- "A loss function measures how well the output of a model for a given input matches the desired output. Let's use the standard L2 loss."
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 0,
- "metadata": {
- "colab": {
- "autoexec": {
- "startup": false,
- "wait_interval": 0
- }
- },
- "colab_type": "code",
- "id": "Y0ysUFGY924U"
- },
- "outputs": [],
- "source": [
- "def loss(predicted_y, desired_y):\n",
- " return tf.reduce_mean(tf.square(predicted_y - desired_y))"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "colab_type": "text",
- "id": "qutT_fkl_CBc"
- },
- "source": [
- "### Obtain training data\n",
- "\n",
- "Let's synthesize the training data with some noise."
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 0,
- "metadata": {
- "colab": {
- "autoexec": {
- "startup": false,
- "wait_interval": 0
- }
- },
- "colab_type": "code",
- "id": "gxPTb-kt_N5m"
- },
- "outputs": [],
- "source": [
- "TRUE_W = 3.0\n",
- "TRUE_b = 2.0\n",
- "NUM_EXAMPLES = 1000\n",
- "\n",
- "inputs = tf.random_normal(shape=[NUM_EXAMPLES])\n",
- "noise = tf.random_normal(shape=[NUM_EXAMPLES])\n",
- "outputs = inputs * TRUE_W + TRUE_b + noise"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "colab_type": "text",
- "id": "-50nq-wPBsAW"
- },
- "source": [
- "Before we train the model let's visualize where the model stands right now. We'll plot the model's predictions in red and the training data in blue."
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 0,
- "metadata": {
- "colab": {
- "autoexec": {
- "startup": false,
- "wait_interval": 0
- },
- "height": 293
- },
- "colab_type": "code",
- "executionInfo": {
- "elapsed": 1210,
- "status": "ok",
- "timestamp": 1527005898290,
- "user": {
- "displayName": "",
- "photoUrl": "",
- "userId": ""
- },
- "user_tz": 420
- },
- "id": "_eb83LtrB4nt",
- "outputId": "3873f508-72fb-41e7-a7f5-3f513deefe38"
- },
- "outputs": [
- {
- "data": {
- "image/png": "iVBORw0KGgoAAAANSUhEUgAAAXwAAAEDCAYAAAA2k7/eAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAIABJREFUeJztnXlgU1X2xz/pAhRautCWUsCwWVlcUHHGBUFQcSg7uM8P\nFLUICo4VpygObihI3UdmUHBB0IGZQbEgFNGCqKgMolV2pKylCy1pukDp+n5/3LxmaUsDTUjSns8/\nbZKXd09C+b7zvvfccw2apmkIgiAITR4/TwcgCIIgnB9E8AVBEJoJIviCIAjNBBF8QRCEZoIIviAI\nQjNBBF8QBKGZENDYE+Tk5JCUlER+fj7+/v7cdtttTJgwgcLCQhITEzl27BidOnXijTfeICQkxBUx\nC4IgCOeAobF1+Hl5eeTn59OrVy9OnjzJ2LFj+ec//8mnn35KWFgYCQkJLFy4kKKiIh5//HFXxS0I\ngiCcJY22dKKioujVqxcAbdq0oXv37uTm5pKWlsaYMWMAGDNmDF999VVjhxIEQRAagUs9/MzMTPbs\n2cNll13GiRMniIyMBNRFoaCgwJVDCYIgCGeJywT/5MmTPPLII8ycOZM2bdpgMBhcdWpBEATBBbhE\n8CsrK3nkkUcYNWoUN910EwDt2rUjPz8fUD5/REREg+eRtj6CIAjuo9FVOgAzZ86kR48e3HPPPTXP\nDR48mE8//ZRJkyaxcuVKbrzxxgbPYzAYyMsrdkVIbiUqKkTidCESp2vxhTh9IUbwrTidodGCv23b\nNlavXk1cXByjR4/GYDCQmJhIQkICjz76KJ988gmxsbG8+eabjR1KEARBaASNFvwrr7yS3bt31/na\n4sWLG3t6QRAEwUXISltBEIRmggi+IAhCM0EEXxAEoZkggi8IgtBMEMEXBEFoJojgC4IgNBNE8AVB\nEJoJIviCIAjNBBF8QRCEZoIIviAIQjNBBF8QBKGZIIIvCILQTBDBFwRBaCaI4AuCIDQTRPAFQRCa\nCSL4giAIzQQRfEEQhLOk0GTi84R7+XbIDXyecA+FBSZPh+QULtnTVhAEoTnx7YzHuDflUwyAlv4z\nizEwfNFiT4fVIJLhC4IgnCWhhw9hsPxusDz2BVwi+DNnzuTaa69lxIgRNc/Nnz+fAQMGMGbMGMaM\nGcM333zjiqEEQRA8TqHRiGb5XQMKjV08GI3zuMTSGTt2LOPHjycpKcnu+YkTJzJx4kRXDCEIguA1\nXJ/8OosxEHr4EIXGLlyf/JqnQ3IKlwh+v379OHbsWK3nNU2r42hBEATfJjQ8wic8e0fc6uF//PHH\njBo1iqeeeori4mJ3DiUIgiA0gNsE/+677+arr74iJSWFyMhI5s6d666hBEEQXMLRjAwW9u3FWmN7\nFvbtxeGMDE+H5FLcVpYZERFR8/vtt9/O5MmTnXpfVFSIu0JyKRKna5E4XYsvxOmNMb53xQhmZh1T\n5Zalx5h3ww08cfSop8NyGS4TfEe/Pi8vj6ioKAC+/PJL4uLinDpPXp73Wz9RUSESpwuROF2LL8Tp\nTTEWmkx8O+MxQg8fIjory67cMtZk8po4z4SzF0+XCP706dPZsmULZrOZG264gWnTprFlyxZ2796N\nn58fHTt25Pnnn3fFUIIgCC7FdhHVXFSZpcHyM8vGqWgKuETwX3311VrPjRs3zhWnFgRBcCu2i6ju\nBp4JDKR7QACZ4RH839dfezAy1yMrbQVBaNbYLqK6AOgaP4L4w7lMSt+NsXt3T4bmcqSXjiAIzRpf\nXUR1LojgC4LQrPHVRVTnglg6giA0WXy1jbG7kAxfEIQmi6+2MXYXIviCIDQZbGvqC41Ggg9k+GQb\nY3chgi8IQpPBMaOfE9vRrq7eV9oYuwsRfEEQfB49s/dbn2qX0V8QEcHiq/7odAWOyWRmxoyNHD7c\nFqOxkPffHwX4uzv884YIviAIPk2hycS/B1/HJVnH2In9StnK7heelWc/Y8ZGUlLGAwbS0zWmTFnO\n/PnD3RK3JxDBFwTBJzmakUHquOFE5mTTpbqaAcD1wDygQ1AQ1UOGnnVN/eHDbcHmHuHgwWDXBu1h\nRPAFQfBJUscNt3a2BJYDdwG9gRNDhp5TNY7RWEh6uvUeoWvXEhdG7HlE8AVB8BkKTSY2PDqVgB+/\nI8ZstvPrg1HCvz22I3ec42rZ5OTBwFKLh1/EggUjqapyTezegAi+IAhejz4pq23aQBuzmWHAAuz9\n+t8CA8mPH8Edya8RGn5uXS7tu7w3vS1aRfAFQfBq9ElZR/vmbuBZwGgwkN0hlqEr19C5a7dGjdXU\nJ22ltYIgCF7NtzMe4xKL2IPVvrkAuAgwjBzDpPTdjRZ7aPqTtiL4giB4NaGHD1GC1WDR7ZvktqFs\nbH85r2eMJiHhUwoKzI0ey2gstBtJJm0FQRDciF5u2anARGZ4BC169eIBlI3TBsuk7MbNPJ60Sdkv\nuQa279CApSxaNOasx7NdbNWhw0mGDn2P7OxImbQVBEFwF/rEbNba1cysqKjZSPyF6mo+GzWW0MOH\nOGHsUjMp62i/HD7cttZK2eTkwYSHh51xXEffftSopaxffyMAERHes/euKxDBFwTBK9D74HwO9u0R\nCs3E11FT71gzbzQW1RJvZ7L+ui4cTRWXCP7MmTP5+uuvadeuHatXrwagsLCQxMREjh07RqdOnXjj\njTcICXFuZ3VBEJo+u7Zt48vRQ+ladpqDBgNRrVtjAIqxL7fMrKfE0rFmPjl5EHfcsY2zFe+6LhxN\nFZcI/tixYxk/fjxJSUk1zy1cuJBrrrmGhIQEFi5cyDvvvMPjjz/uiuEEQWgCfDkmntllp5XMahpP\nnzyJBsQDy4BC/MhoFcawxcvqfH94eFit7P1cxLuuC0dTxSWC369fP44dO2b3XFpaGh999BEAY8aM\nYfz48SL4giCwa9s20sbG0+10qZ110x14JSwMf8LZZO7HKt6G0+Hs/8dSFi3q69S5z0W867pwNMS5\nzBV4A27z8E0mE5GRkQBERUVRUFDgrqEEQfAQZyN8+qTs6VUruUjTOIS9dZMNxAwczN8Pjyc9fXTN\n+87GUz8X8T4XzmWuwBvwuknbqCjf8PklTtcicbqW8xXn1Kmf2wlfy5bL+fe/76p1nPnECVbc1J/e\nmZmUAEOBpajOltHAAYOBsBtvZMz7i1g3ZZ2dLRMXV4qfXxUPPZTKwYPBdO1azIIF8UREnJ+Muq7v\nMisrHNu5gqyscJ/423Cb4Ldr1478/HwiIyPJy8sjIsK53ha+UAIVFeUbpVoSp2uROGuzb18QtsK3\nb18QeXnFtTL/+PIVzMjMtGuN0BUYDsxqFcRfjuQCUFEFs2dfT1mZ1ZaZPXsQ99+/qubCsnWrRlnZ\nUubNG+R2W6W+7zI21oTt/UlsbIFH/zacvdi4TPA1+65DDB48mE8//ZRJkyaxcuVKbrzxRlcNJQiC\nl1DfJKlueRgwcUH6FPBbb+fXtwF+A7a0CuLmVevszlmXLVNX6aQnbRVfneh1ieBPnz6dLVu2YDab\nueGGG5g2bRqTJk3iL3/5C5988gmxsbG8+eabrhhKEAQvoi7hKzSZCN34D17hUfIoYS4VLKu29+t3\nderEHWnfOd3Vsq4Liyfr58/XXIGrcYngv/rqq3U+v3jxYlecXhAEL8VW+ApNJj57YAKl337NDSgp\n7mD5GY+yccotO1FNfn8RuXknSUhY6ZQlU9eFJSlpQ7Opn3cVXjdpKwiCb/LtjMeI/fZr7sKayb9k\n+RkG3AkstuxEFRYRwr33fe60JaNfWPS5gTvu2Far742v2CqeRARfEASnqasMs9BUwOJxCVyanU4+\nUIgSeAOqffFLQPuwMAwDB3N98muYTGamTv2c9evB1pLJyPBvMOOvr++NyWQmKcn36uLPNyL4giA4\nja3g/pqeT+DqP3BJ9UHmY83ql6E2J9GAn4Hc9pezLCqRblRzHX4251iGrbNvMh1mx44nOVPGX59v\n76t18ecbEXxBEJxGF1wDJ5jM5fyjOrNWs7NC4G3gd/zZenUS3/74ol0LY6toK2c/KKiCIUPgwIE4\nsrLOPAnrOHl7/PguDhzowaZNucDnqE488U26AVpjEMEXBKEW9a2g7djhGMb0eG5mHa3R6mx2to6r\nWcUPwCrC9uRbXjEDqaxfD+HhO4GBNe8oKytl69Z8evUKtjuTPglr36++nPbtnyY39yrgJFlZUxg7\ndgFm85PY3mMYjZVn/BzNFRF8QRBqoSySEcA60tPD2bp1CY/cW0nv1GfpDeQBuWDX7Kwc2A2s4l+W\nV04C+ZbfU4E7KS01UFqqERT0DKWlLYGZVFcbyMrSqK5+gVGjate2O9o1YWGvACNrYi0o6ITtPUZY\n2GmSk2+u873N3eoRwReEZsDZZrrKElkH3Ik/WxmSNYaCOdX0B0qAB4BVwNNAJ9QFIB9Y1vZeKNoO\n/Aj8iWuuWU6LFktZvx5KS62iXFraDyXS1ucKC411irGjbw/tsL0TCA8/Smmp9fHAgQE1n6059bp3\nBhF8QWgG1JXpnqk1gdFYyK/pp4nnEv7ATiKBUGCA5edyIALoDOwliNfIJCzsM7744g/MmfOz5Zyr\nSU4eTnh4GAkJn5KSYmv8nLT8tBXuzDpjd/Ttr7mmmhYtrHcCM2eOYs6cule9Nqde984ggi8ITRDH\njP7AgTY01JrgxImX+emnYsrKuhKifcxf2EAw0BdqGp6lAnehWiMUA7tpxRtsB8Ixm1vx7LPf0aJF\na8s41nYrtgunjh/fRVbWFEs8y/DzKyYm5gQrV1ptGltqL7q6pdbdyaJFRiff27xr9UXwBaEJ4ijm\nsbFzcJwQdbQ7Nm8uAm0ioxjFzeykEHgCa06+HNCnVf8H/MKlrGUssMvyTDybNy+kqOhBHD1z2xW5\nBQVXMmvWOvbtC8JorCQ5Of6M9lJj2hj4agsEdyGCLwhNEEcxj4jowlVXnbk1gb92kkR6MM/yzCrs\nnfM2wK/Atxh4mXuB14A1qJ6X6hynToXSkGceHh7Gv/99l090Hm1qiOALQhNEedcFqInXlvz++06O\nHGmFn18nOnSoAuztjuh2+7k07Q36Y5XrEuzLLb8DXmYDMAhQ1TLV1aUUFS1BOfoltG5toqiocZ65\nlFK6DxF8QfBRMjIOM27cKgoKOhEefpSVK0fRtavyspOTB7N16wKyslR9elnZGMrKlgHxpKauZfPm\nLwgOziW8bTsuP/4SxvTddMZe5IcCs4COwC78mc8e4D+Wo4rp1CmW7t0rSUmZgC7w/fq9xZ49cy0x\nZTJzZm1fXm+toCyd2oIupZTuQwRfEFzI+cxOx41bVSPopaUaI0c+w9VX9yArK5zYWBMREUa7lasQ\nhFoDO4PiIhN9i/7ENVk/0Qb4G/AKcDvKq28DbAPKgHXczKqaupyLgRGAxv79s3jvvTuxnRQtLw+0\ni2nOnKW1JlQbEnQppXQfIviC4EKczU6dvTDUd5zJZCYnR8O2nUBeXsuasdUuTHOxN2X2AH3wYz/j\nuYgYNHoDpZYjwlBVOCGWM5qBv/Moyqu3LacEMHD6tCrBtP18Q4akoZorpALBbNqUQ0GB2e6z2Qt6\nIZs25TJkSFrN55NSSvchgi8ILsTZ7NTZC4PjcWVl7wGQlpZLdfVMbNsJaFqE3dgFBbG0ajWL06fb\no0Q4GH/SmcooWgNXo8wZ/Qy3AWuBTGB/y1DSus7B//dDVFUtAyqBY8Bky/mV+Dt+PiXWa8HSJNls\nHk5S0lK71saHDlUCHwPDgLWYzY+Tnm79HqSU0n2I4AuCC3E2O63vwmCb0cfE5PH99/arUX/80Q+z\neSLUallWTnCwieJi69iqdcFsYmJe4FTO10xhA3HAPuBFrEK/BJiLMmwOGAL5vPsLxPVpz6fJg3n0\n0dWkpgLkoMR+Hcrw2QU8SEzMJ3YtjWfOvJJNm/6H2Xzmjpb6pC+0q3WslFK6DxF8QXAhzmanHTpk\nk57+L5SBUkSHDrZ7waoeNtAeVd9uFfGiohzL744ty/IpKTlOy5azKC/vhqYFoaZdDbSqzOMeNjCX\nusstw1E9cF7yv4viqo9hv4Hd+zVSU5+ksjIEg8FAy5a5lJXNQ9PiwLLQKiZmPgZDJCkp92N7pzJw\noL/dqlr9oud4kevS5UKMxsI6jxXcgwi+ILgQ57PTQLDbG+o9TCazpc1viuX1AcD1wDwgBmhBdXWU\n5fjrUHl5NKqTzd1o2mbKyu5CtTK7k0BWkMjtdM2HFtRfbvk9MI+HoeoKm6MKKS9vA1wClHD69KXA\nBJt3LeP06WNkZ3fA8U7l3/++krouenXd/Yh9c35xu+APHjyY4OBg/Pz8CAgIYMWKFe4eUhC8nuzs\nSGyFMjs7khkzNmI2P255vgBVUdMH8ENJdjzqYvAekAHMwX4dbIjl8XX48xBTeJvLLM9uwb7c8kng\nAiCdIBaRALyB/YYka1G1O/r5P8T+viAEaFeniNd30bMV97i4UmbPHiT2zXnG7YJvMBhYunQpoaGh\n7h5KEHwG+4VRbTh+fCdVVRdhFdV1wAzL4+G0aJFEefkBVG/KAqAH9gIcDBThzxb+j6uJRTnt+j1E\nf+ApoCfKfd9LIPN4CUjEKub6VuOlqEla2/PnYX9fUMw111Rb2hA7l6HbintUVIistPUAbhd8TdOo\nrq529zCC4HHOpgbfcWFUVtYITKZZwDisjQysgtuyZSTl5UlYBVffHlx/vJcW/EoiH2EE2qLuC/Qz\nhANGlNjPYxgwGnXXsAw4hf1W48ss77I9fy7qziIAP78sbrklnDfeGC4Zuo9xXjL8+++/H4PBwB13\n3MHtt9/u7iEFwSMkJq4hNbUt4E96egDl5Z/z4Yf/V+ex4eFhREf3tlsYdfp0F5RfHw38jvLvwwGN\n4uJg7DPuzsALQCcCWMOdfEJHqJmYreuSsA94jf0og8d2/mA2yh5S7REgwTKOyvZbtjxAWdlUoAug\nMWKErHz1Vdwu+MuXLycqKgqTycTEiRPp1q0b/fr1q/f4qKgQd4fkEiRO1+LNcZ44Yeahh1I5eDCY\nrl2LWbAgnoiIsFrHfPVVNqA6RYLGjz++WvO5Tpww88ADKWzapAF5DBgQRocO2PnfaguRGTaP56E8\n/BIgG3v5Pgp0pxWf8hc+IRi4FPtLQk9Urm4GdhDCAn4BuqPyfNsjL0c1QHseqEB1vDcAd9Kp0zx+\n/fVxpkxJZd++X8jP38vhw0amTl1d5/dwNnjzv7ktvhKnM7hd8KOiogCIiIjg5ptvZvv27WcUfF/w\n9XzFf5Q4XUNCwqqaUsmtW4P57rt/sHHjBMLDw2r62eTktKO6ugu2QlpcHMK+fUctG4Cssus5k5Ky\nBPgNeBWIRAl4H+yFuDd6GwNYgLVBcQkBHGIqM7kQ5ei3pnb1zS6gCEjmJ9Qq226Wcxc5HKkvv7rC\n8rt1nLCwzlRV+TN//nASElaSnj6DzEx9Edi5Z/re/m+u40txOoNbBb+0tJTq6mratGnDqVOn+O67\n75g6dao7hxSEs8IZ3z0jwx94B1XXspOsrF4MGrSEjRsn2PWzUatHrUJaWdmKQYOWEh3d27K61FbM\nNVT3Guvyp8DAX6ioGIO9ZBuAdJSFcyd+7Gc0/ehOUU0bYw1l7tyLtQ/O98AxgviI7aisvrvlqF7A\nXtQU7oVAS9RkrS781cDdNWfu3n1pzfcgPW58H7cKfn5+PlOnTsVgMFBVVcWIESPo37+/O4cUhLPC\nmRYHJtNhVCHjcvQtQbKyNJKSllJQYFuHPgyVscehWo/dR1bWf8nK8kdNehage/JwANs+OFBJUFB7\n2rV7kZycSNS0613AZtQdwAECuZ1EVnARqqmZ7eWjI+oeIBz4BQMvk4Ta+1XP6kNRtf0VwHPAEWAp\nEAXMx8+vkN69+9K5cxHwHtnZkbJdYBPErYLfuXNnUlJS3DmEIDQKZ7LWdu3iLJOr9hOnq1ZVomm7\nsWb1eulxgeXnRqADavJ1OJCEqoTRd4PNRU3QLgBKKSp63tJL/hmU4P8XmI4BE4MZSD920hvV0cYP\ne1PGhDJqnuIOVOb+PKp/zjKgHFWRU2zzGb5HZfnqDDExc9mwwdrKWL/zueOObTV3PrJIyveRlbZC\ns8aZrLVbt5Ns365qz21lVrUviEOJahCqQUEgyjL5K9a+MwuAKajFSvYNz2AkyqefZxnNgLJdDgNt\n8eN1pjKDi6gkFHUPEYqa2l2GtbNlJvAmM1C1OXrzhDCUPbPaMsYCwsJ2YzYPx/Hi1a5dnN1nru/O\nR6pzfBsRfKFZo2etGRmtMZn2kZFhJCHhUzsv33qMH/v3P83p00bUQqRhwHpU24NiVLZ+P8qqWYeq\naTegxHYJyj5xXK2q/x6MkvA2qJLMSIJYwiNs4VpqbyLeFdiBWob1Bd1YxR2oOwioPX2rHgcGmtiy\nZQJJSUvZtCnHIvzqmG7dTtl9L+LXN01E8IVmjb5w6J57PmbHji5kZYWwY0cOP/zwHuXlFwD5XHNN\nMG+8MYJZs75jx47nsbY+eB3ohxL7oSj/Xq+jz0ZZKmGWn0eAVjiuVlVoKKPmYcBAC/L4Cw/QAlUh\nb9s8Qd9E/DCQg4G5bEU1MzuFaocQgrJwnkDdfRxH1c8vY8CAkJrPW1BgJimpfntG/PqmiQi+0OQ4\n212nTCYzX32VhbWG/l8cP/4Mutilpi6jRYuNZGWFY93cIwNV6a7L8XxUhYttHf0ylKWi96XRPfVi\nlOtegrJgwlGZfSEB/I9HeYCXUFOqtvcDbVDSvhmYxzxURq8Bn6CqbabYjP0CMBbdVoqN3cE//zm+\n5jM3tEJW/PqmiQi+0OQ42z1RZ8zYSEVFP6zyGoKj9ZKSchz4BiW5T6KyedvVqi+gJktt31cIvI+q\njLH11FehBD8Y/QLhzxbGE04HrF1yjlG7q2UJLfgHPwArLec5iTJ42tuN3bZtB6677hNLtY2Z5OTx\ntS56Z7owSsuEpokIvtDkcPSfMzL87TbpePLJK5k792ebTUayUTX2+i5MjguTTKiKmj4o774QVSrp\n2Opgj8P7TqKmWG1ragpQ9fVRwDEMHORmRtGXHXRB1ebonW3uRuX/eqOFDYRyqtfrxBauo23bzhQV\n7aBduzgOHy6nqGgnaq5AjT1oUIsGBVs2C29+iOALTQ6r/1wIrGXv3kPs2KGqY9LTNdasmUVl5eya\n15V4ZwMXofZvLUJZNsrDV4+fw75LDdiLeybKdHkS1aYsFHjA8vM9VK+aXqhFVOpcLXmTKXSnDfZe\n/RKsPStPAj8Bb/MhsbGZpG+6tdbn7dv3LYqKpqAvuwoK+onk5IRaxzkiE7PNDz9PByAIrsRkMlNe\nXkFY2AcEBLwCDKWiwr7LTGVlZ8vjVNRkaxFqknMsSozboiY6W6BE+yrss/k+KL9cL4FcjppwDUDZ\nQS1R+XmY5fho1H+1/6F8/+W04s88yqNcBfzB4ewRqPqeA0ApLXibn4AJhIZ2r/Mzq5LKcJTFNJKe\nPS8/45yFjtFYiLrEgEzMNg8kwxe8nrOZhH300S9Yt05tuWetbdFw3A5Q/QxGTWr2xl5y+6Hq4/X3\nV1PbqgkDLkbZKDp6q4IdDsf/BDwGrCKA9fyFJfRE4xDKvsHh6L3Ad0AyM7Dtf1lYmFHnZ7auE1DH\nXXjh6TN9nTXIxGzzQwRf8HrOxmv+8UfbLvB6bcsAVHVMIcq6Kbc8PoaycRzr1k/avL8Y5bnvRmX9\nB1CLqqC21/+75Zi7gVmoydTjwP34cZw7uI8LqKpZLfsAyux5DGsPnP8BvxDLWraj7gqWoyZ9Aykp\naUtBgbnWxc5RuBcsGElVVcPfq0zMNj9E8AWv5+y8Zj1710V4p817v0cJcg9U1h2E6lLZBiW90ZZj\nJlse630oo4CHULZJAaqRWm/LuZdg7SMfClwLfInK9A1AJa14iGmspSWq4YFt82Mj8E/LmX/AwFv8\nRHT0ajiul4BqqN2n/CkqaklS0sZaIu0o3BERvtHhUTj/iOALXo+zi4BMJjMtWxZjbTlcSfv2p+jQ\noYqYmFOsWxeNmjgNQVXbPIG1cuYN1H+HauADVNOx6Vjl+VUgFtXorA/KytmL/cbeT6IuAN2Bv+HP\nVhKYRDBVhKHW49ree8SiMv1iYBVhFF74MqN676CkJJy0tGVAlkMMS2RiVWgUIviC1+Ho2c+ceSWO\nXrPtMR06ZAOB/PCDH2ZzT/SOM4GBc7jiigt4440refTRNahM3LZ2XpffdcCzNs/PcXjdgLqABKP6\n4kRZXg/HWk+Ti6rq6QQYaM10pvI6ccAh1KXAcQeqXagcftfVf+XzVbNqPv+QIWmoLQhXO8QQjtFo\nbvwXLDRbRPAFr8Pesy9g69YFREf3tpuwTUhYaXPMv7AX8uXAXVRUXEpqan9++WW+peVwFUpiQdkx\noGrsq7AX1hhqL3tqgbXR2WzUHMCtKBtHb5u8kAC+ZzjzuAhqvPo4y1nuxtp4YR/wHZEcaD+Jrz+c\nbPf5rXc09s3aYmN3kJw8HkE4V0TwBa/D3rNfR1bWk2RlWSds580bxKZNucBnqLr2AOBDy/GjUR0r\n56JWn75ETs5L2Fe5t8Bq59S1+2suKovXNwjMBfoC/0JZOlGoOv3lqHmAUYCBEN5iCjsJwbbxMDxt\n+WlEratNAl7hCoYOfYCvLRuB22Jt1uaPyTSXdu3i6NbtVJ2rZQXhbBDBF7wG3aZRu0Ppq17bYJt9\n792rcdllb1NW9kfURGknVL2LrdeeidqnNQJrWwMsP09TO6PvgrU12V7L43hUnX4R9nbPMlRWfxKV\nvz+PH/uJJ5o+VDAX1SvT9uzdUReAjqgan9dYQlBQFR9+OK7O70GqZwR3IYIveA22Vg5otG37EuXl\nJzl9Wm8ZUMC+fXuprn4Re4G3ldeLUNOhQ1HevGPVTjFqktb2Ob2RgV4FvxtlEd2Ftbe8fv5y1F3E\nt0B/gunNFPbQBWtdTrHD2fcAJ4C5PI1a2KURGvqi6744QXASEXzBa3AsvywpiaK6uhxYCORjMBRS\nXd0fewFuR+3e7yFY+9G/i/1WIUUoF/0pVO69H2XLrEb5+WGoCdqXUP89CrHtUaNkPRQD2VxPF66h\niDhUBX6X0UVuAAAgAElEQVRLyxHxWHtiHgR+IJhv+NYS0xLgd/r0EWtGOP9IawXB45w4YSYhYSUH\nDuzFdql/dfUhVOVLS+AhNK071kVSYF3s9CyqNn4Z1lYJuhV0G9bSS/189wDXAPcB/ijhH2455/2o\n7cCfQOXl01F2zyrURSKHAJ5kEg9yDUX0Rjn8D6LuJf6Gala8C7WI6oer/8qivbsIC/sSVc4ZCEzn\nxIm62yQIgjtxe4b/zTffMGfOHDRNY9y4cUyaNMndQwpegG3ZZExMHgZDJdnZHepsjfDQQ6kWK8ex\nX/x0rJt+L0fVzt+OypL1TUNOowTeH+XNL0CJ/SFUZh6GyvR160evrNmCEnRQUv0qtdsi2/aoAX/2\ncieP0Qkl246LqK6wRP078D3h7G8/hc/evIvw8DAGDowmJcW6w5T0rRE8gVsFv7q6mtmzZ7N48WKi\no6O59dZbufHGG+neXbKbpo6jH6+EfDTp6Rrl5e/QokXrmjr7I0d0K0fvF78ENXG6DjWRql8AilCZ\nfABqQrYUJdIXUbsscwLwIsryse1cqS+gMqIyeX3B1KWoUk1be2h/zWMD+dxOEp1Qa2mzqb2Iapcl\nor/zHPA05GrMmbOURYuM0rdG8ArcKvi//fYbRqORjh07AjBs2DDS0tJE8JsBGRn+WCtfirGVx82b\n8ygq6g74k54eQIcOv6AmQnWhPWY51rZ0ch5KmF8GrkZZO9NRG4E4ZubBKHHvbvndtsGZXrnTEmuZ\nZXeUXD+OtavNVmASBhYxhme4kZyada/hqBoix0VUR4BlrETdbahY9JWxUnkjeANuFfzc3Fw6dOhQ\n87h9+/Zs377dnUMKHka3cvbu3U99PeSLiqqwzchPnnyRsLBXMJs7oCpkOlN7pWs0ag9Z2wqd5aiL\nQ0vs5fc3lGUzHXVn8aHl+TxUM7OHgR9Qwr4AlZf/EVv7BvJoxTKmMZN5DiPehSoYnYOq9P8dSOav\nQDLWuxn1WcW6EbwJtwq+pmkNH+RAVFSIGyJxPRJn3Uyd+rnFyvkMW8E2GNqiaUtQAh2DdW/YYIqK\nqhk6tA2pqX6orQIN1M6hg1Btix07YZajBFvvn3MUlbFnoCyhLOy3F1mG6pXzrOW5EahGadZiSj/2\n8Sce4BKUfeM4Iqj7h2LgZ+C+las5tKyYgwdX07GjCU2rICtrNV27lrBgwUgiIs7/34ov/H36Qozg\nO3E6g1sFPyYmhqysrJrHubm5REdHn/E9vtDlLyrKN7oReiLOffuCUNKod3pUQqtpLVFTnX1Q2fda\nrFn+cNLSnqBt21YUFenyOgwl4hEosR9qeY/tRWAr1lYJlUCZ5fl01DKnO1HzAbaSHYK6IDjePagW\nyi1ZwZ9ZSRRK7B0bJ+9EXbIOA/P4KzCPqsX1t2uuqjr/f9O+8PfpCzGCb8XpDG4V/EsuuYQjR45w\n7NgxoqKiWLNmDa+99po7hxRsUOWOq5zaOMRVqD4wBajVrh+ibJTTqHLIO1HSeT3wH2xFt7z8QgwG\n6ySpyqFjUcuWdGtoKMoaCkNV1kSiBL81+mbg1sVYRcBbqAzfceGVfZ8cP78f8av+HxN5kWiUQXQZ\nSuyHYu/qlwDbCWAZe1AXDqSDpeAzuFXw/f39mTVrFvfddx+apnHrrbfKhO15xFrueP42qU5OHszW\nrQvIyrLtJvMSyh/XbZxAVAXMIpS9UwSUU1b2V1Stew+sE7ftUNXtF6JEvhR1Z6B78Ccs4ziutu1v\nGddoOacR5d/HoCqBVJ+cmBgThTmnmcrrNVO8rbGK/TqsG5OUAIb7JnHqxLWQ0s0ynvj0gu/g9jr8\nAQMGMGDAAHcPI9TBwYPBOL9xyJlxdpvB8PAwoqN7k5VlK8DtgV9RkqnbOONQojsCdVF4EXVR6Im6\nIPwN6wVjBipTvxh157Ac1YuyBEgE3qb2att1KMG3nW69HVXW+RtgwJ9D9Mx5mb5Qk9lXAL9YzqqL\n/fdArrErr//8ExVVgRQUmJESS8EXkdYKTZiuXYvZurXhjUOcwXGbQb2WPiOjNSbTXiIiutC9eyXJ\nyYOJicnDXoBboeri11HbT9d/jwIWo5oRnLa8pxRVNtkVtSh8JKoLpm255nLUBWW25Ryhlvd84zBW\nBaq0cwYQTkve5EFeJghVgW9bxT8Pa9u0X4G+H3zMjcNGEGbZSUpKLAVfRQS/CbNgQTxlZWfORPXM\nvS7hts3gHfvc/PBDMWbzg+gymZX1Pjt2BLFmzVpURfoLqJbCe1GLnlRFTm0/HcvvIVgbmC1Bib6+\n4YgJlYNrqDsAx7qZwyg//3eU7/8ZyhKy9sDx89tD9+4XcOD3KQzj31yE2tMqHzUlbHvGCNQ9QC6w\ns/f/MX2YbR2/IPguIvhNmIiIhjNRxxWxWVnL2bFjZK1NRxy3GSwpsb0AFKJE/lkqKx27wFdYfgaj\nJmuXo7L3LagFSgtQ3vpfLOcyoKpt9K0D9bJJtayp9sYkO1Fi74eaatVQ62BNGAwLMBgKCAgopLz8\nSTJ/f5XH+DctsC/UdOyGvxdY3fmv9L7iYj4Su0ZoQojgN3McM3clzKvsNh3ZsuUFIiO70bLlLMrK\nugIFVFaWohqSrUMJdBeH8/RC9YzvgzJJ/FENyu5CyeoW1MpWfd1qqOW9GsqnL0RV46iWxMHBwbRu\nncHJkyGcPDkLuBJ1FzAFa0Y/E1sZ17TOaFoorQK2MLG8KxEUcrHlXbaRhqIWUYUDu6KiefS7//FE\neIQLvl1B8C5E8Jsp1s1Gcqg94Wm/yjUnpx05OX7AH1AZ9RSsbvdc6l4odQRrqeQIm2MvRpVq9gJS\nULtP9QdmWc5/EjVluhblxa8FWtO2bQGXXRZJaupk9L48+lixsVmUlMTY1PAb0Dcab8F7TDr1Fn1Q\nS7JKLKPbRhqGarV22YrV3DZgoAu+XUHwTkTwmxG2lTbHj+8kK+shlOwtIyTkFOXlBygr80ctWtJ3\nnApFudlTsIq33mCgN9YLg75QKhrlpV+OfR6t7yk7EiXYek2+vvq1o+U1nWLgH+hZe1aWRnb2U8BS\nlGe/gKCgIMLDs4mIMFJdfYCiIpvaevZzEy25iHIuR80QBAKnLL+/iJrizQUyW7Zk8jdb6Ny1G4LQ\nlBHBb0bY+/WjgPctrxRQXNwG5YPbNv3VO0teQG3bR29yZrtQqhglpzEoJ9w2j24DVGP1823PV4i6\nSNger0u09ThNuxp1UVCWTXi4ucZ6ggJiY+cSHd2bnN8/5s8nV9ADlc3bVuC8iqrSH44ylHq/9TZT\n7ri7MV+rIPgMIvhNGMeVthkZAdgLbQFK0O+zPLbfzi8oKJLQ0AxycsB+Zep2AgK+oby8M0pC26EE\nXpU8qvLKu7GuUf0NdRFohVoEFYSSXF2GQ1H5ti7HJSg7Zz72F4GdqBYIYfj5taekRP8cAOFEtA1j\n4P4JtDhZXNPw7ANq32f8AmwA/mgptxSE5oIIfhPmgQdSSElR1S7p6RrR0S9gK6ABAcFUVtq2Frbv\nHBMenkV09CXk5NyAEu8yoAXV1X+mvPxfqIlafU3qfJTYg8r030bZNDstj5+ynONFVEbvKO6foTJ6\n2wtBEQbDU5bM/iQwGVXeeSctWhykqKhnTbxBPMWf9syhB8qmsZ3yrVXT87fnmPlIoku+Y0HwJUTw\nmzCbNtlPvppMkSi/3AAcoqoqFNiBVWSHoiZXewO7aN26DXv2bANyUPZNN5QL/jFK7HeiRPtt7Gvs\ny7BfHDUHqxVkQElxLPbibsDffzdVVXrXSwNt215ARUUwpaW23n4psbFzadu2M3v2DMOfF3iAp4lE\nTfmWoNbTrkXdY4xCFYgagX1Az7feZqRYOEIzRQTfx6ivxUFdzzvWo1RXF6AmX5cBT6BpytYJCHgG\n6EBl5WGUtbINuICMjN/RtBmo0kt9kdW/UBuRLMde1Gdh3SzcfkMSgyHC0iq72CaeocTEvMjp07EY\nDCauvroNYCQ19YGacw4atJStW49SWmr9DLGxOaSnTyMh4VP279nCgzxNR2qvvT2NMpbyLaMa3nqb\nv4rQC80cEXwfo74WB5s2VWI2twRuID09FFjK1Ve3JDX1JZS1coyIiALy8x0nTcMJDu6C2TwRtcI1\nEHgMNUmql17G2hyvi7njxGtfVG/6LNRCKqtI33ijgV275pKV1cVyvjhiY/ewceM9hIeH1bSgLSgw\n06KF/cpgs7mQMWPmUlDQifDwTFauHMnRjAx6bnyEKyiqKcB0XHt7CDUzkN82lAlfbpIKHEFABN/n\naKjFgV4yefhwW7p00YBpNa/17fsOO3a8YKmpt9opRUU5qOy8LepPwlY+9SZluoAXYW2LYOuOH0Jd\nWKpQ3S5fom3bKAYNakFy8jAAkpI2cvhwT4uYj6/VfK2uHjXh4WGkp0+refzh669wdO7zRKNaIDiu\nHNCAHwEzEP3W20yXrF4QahDB9zEcWxyoCpnaJZNGYxHHjkXYvZafH0OfPhXk5LRCTZqGAK2orn4I\nlQ8/i6qksfXWT6ImVZfTqlUZoaEZ5OYuQS2YeslyjmKUp1+NukNQq2mvu+49OwFvTMOxXdu28Wn8\nYDprGiGWT51nGd22Z/1m4GhQax7/+nvJ6gXBARF8H0H36A8caENs7BxLk7MqysurSE21XgDCwvYw\ncGABTz55Bbfeuhpb8TYai9i06TQwFaugv4+qfAFlyVyCtY/8LtS+sGHAnUREzKWg4EJUnxtFQMBz\nVFY+jaqLWYtqoaA2B8/OjnTJZ1/98VL2JD7MGyhhn24T/VxUfVBHVGfLuLfe5nHJ6gWhTkTwfQTH\nJmeXXvoe0IKjR1sTGzuXdu3i6NbtFMnJdxIeHkZCwkoyMyej574xMb9SXh5JUVE7lH0TjxLyAlQd\n/nKs1TS6NdQH+CcBAZFERBwnK2sq6uKgoQt8dXVny/kqsDY8U6tnjcZKwPle+nXxzovPU/LmK/S0\njOLY2bIT6rK0u20od4lXLwhnRATfwzgrho7e/Y8/+mE2Wy8AV11l3c3KZDKzaVMlqi7+LgCOH99h\n6UNjK+h3Uv8kbAVwjKFDI/jww7sZMiSN48f15z9E7Vg1nerqcMv5PrR7f1jYaZKTbwZqTzQ7s/NW\nWspnbEmYQBuse8sOpfZWJzuB61es5o/SA0cQGkQE38M4K4a1vXt9az8A+92sZszYiNlchbJWQoAi\nqqvD7I4PCqrA3382JSX+qBW2O7H37gNRxY7v2Yy/FvssXu+pY8DP7xjV1db4Bg4MqLlwOV6sGtp5\n6z8LF3D0bzO4Cvu2CMtRl6VZqLqhA35+jFi3kd59Lz/j+QRBUIjgexhnxVDV1VtLFsvL29h590Zj\nUc3dwvr1oNab2u4rOwfb3HjIEPjii3KsneGvB55BbczdApVPG2p8+OTkwWza9CVms2MBJIDGLbdE\n1Cqp1HG8WNW389aWDRv45s7RdEXtcWVfza9G247aB6vlW28zQ7x6QTgr3Cb48+fP5z//+Q/t2rUD\nIDExUfa2rQNnxdCxZLGumvWkJFuf374vjiqvXEZY2GkGDgwgOXkQ69dX2RwTDlyFqrixdrLU4wkP\nD2PgQH9SUmwXQe0gOrqamJh8gHptKceLVV07b+kWzlUood9B7f2xvgcKWrfmwY1SgSMI54JbM/yJ\nEycyceJEdw7h8zgjhnWhXwD0rP6OO7ZZetvrXWRKsG5Q0gb4BX//Mq65xkhy8gjCw8MID8+yW8Wq\nO+V610nHeGrHOr5mgjgl5X7qs6XOtAfsrm3bWDlyCK0qKojCauH0x9pBPwzVmu1SaYsgCI3CrYKv\nVmoKZ+JMYujMhO6jj37BunVKbK37wd4DDMVgeBlNe9Hy2giqql4lNXUKLVooQV65cpRlFWsHNO0A\nXbv2IC5udZ2LovRY580bVBNTUtIGkpMHn7VHD8q++e6uMcRpGq1QbdG+xv5+oyewBwiZ+wp/u39S\ng+cUBOHMuFXwP/74Y1JSUrj44ot54oknCAkJcedwTQZd6Otql+B4cfjxRz9sxTYg4DQXX/yZpea+\nu4PnrpqS6YLctavRbhWrM9Q1yWw0ak7ZUjqrP17KvsSHa/bK0uvpY7G3cHYAwdNncKeIvSC4hEYJ\n/sSJE8nPz6/1fGJiInfffTcPP/wwBoOB119/nblz5zJnzpwGzxkV5RsXBXfFeeKEmZtu+pjMTH17\nQGs1TFZWeK1xDYYT2MpkSEgxv/zyIACjRy+289z1n3FxpWcV/4kTZh56KJWDB4PZv78a2wtMVlY4\n69Zdz5Qpyzl4MJiuXUtYsGAkERG1z3/49995+7rr0PLyuBb7GYYY1KaFy1BtEQ4FBjLhhx+49Mor\nnY7zfNDc/z5diS/ECL4TpzM0SvA/+OADp467/fbbmTx5slPH5uUVNyak84Le7MsdJCSsIjPTdutA\na7uE2NiCWuNefXUbUlP1LpXFXHGFH6NHL7HYQBUMHfoemZlhnDixj4gII927L2X27EHk5RU7vQYg\nIWGVzWSw/d61sbEFVFX5M3/+8Jrjq6pq/ztu2bCBNXeOph2qyfJOVF2QXsV/AHVZy42MYsSaL7nN\nMinrTX8P7vx3dyW+EKcvxAi+FaczuM3SycvLIyoqCoAvv/ySuLg4dw3VpFB2i307ML1dgu0Eqi7W\nmZnRxMbutWm10NbOchk1ailpabcAt9Qay9k1APYe/TDCwl6hS5cLnZpkLjSZ+PD2MZz67Rcisd9A\nUe+8vxXVxrjrW2/zkEzKCoLbcJvgv/zyy+zevRs/Pz86duzI888/766hmhQxMXnArVhbIvzGpk33\n1Mq8HVst6CtthwxJw9kJVGcnW+1LR0MZOLA9ixbd2OBnKTSZePe6fgSeyKcDEIf9fUsUkI5aQnaD\nbDcoCG7HbYKfnJzsrlM3aQyGSlS/GmXRXH55O6daLehi7Wxd/9kce7alo4UmEx/eOoLAHdvpgloC\n1prabYx/B4au38TAmwf4xG2zIPg6stLWy8jO7oCavtQff1bncfWJta04x8WVMnt2/eLsrJCfqXTU\nkS0bNrDqztG0R7VAsF3nexfWNsbfA/1XrJa2CIJwHhHB9zIam3XbinNDE05nI+QNUWgy8cn/3U7Z\nT/8jGrVm19a+CQcWoBZRHbj8Sh5Y/gmh4REuGVsQBOcQwT+POFMV8+STV7J1q76l31FmzhxV57lc\nKdaNpdBk4tUr+hB56iRdgQyUjWNr32QBtGzFdau/kKxeEDyECP55xJmqmLlzfyYr60nAQGmpxpw5\nS1m0yOiJcJ1CX0TVDvsKnGewt286zn1FFlAJgodp8oJfV1ataZzzhhyNwZmqmHNpU+AJ0lI+Iz1h\nAj1QmyN2wN7CuQDV1fJXoK9U4AiCV9DkBb+urBo46w05XIEz/vzZVNl4gkKTifWJD3MkdY1da4SZ\n2Fs4+1FCP12EXhC8hiYv+PVnzOc/i3amKuZcu2eeD45mZLDwun6EVVfVqqmPRO2EG4US+/C/PSdZ\nvSB4GU1e8OvOmM+u2ZercGai1ZsmY3UKTSa+eHgSpWnrCUNtObgT1XxZb41QBJQBmZf2JfG/n0kF\njiB4IU1e8OvPmL0zi/Y2jmZk8J8Bf2RuRTnLgenozZZVa4RoVEYfcNUfeOCj/4jQC4IX0+QFv76M\n2duyaG9k17ZtpA4dRFfq3ua8N/CjwY/79hwQoRcEH6DJC75w9hzNyODzUX/C73guc4FXULZNMbW3\nHBz6xUYRe0HwEUTwBTv0rL43qtfNEdTq2GUooX8JaAvkRbfn9tVfyN6yguBDiOALgEXoRw7hgooK\nLgGGoerrXwKmAGtRk7SFLVpy7efrZbWsIPggIvjNHL0C50Taeru6erXHFrRHZfeHUZ0tbxOhFwSf\nRQTfDTi7k5SnSUv5jG8SJhAFGKlrjy3YB1RFRnHXmi/FvhEEH0cEvwHqEu+GthNzdicpT1JoMvFz\nwgQ6A0+gsnjbCdm9wGZUVi/2jSA0DUTwG6Au8f7sswlnfI8398PRWyPkfLWei4BAVKTxKBvnJJCN\n2nLwZulXLwhNCj9PB+DtnIt4G42FqDwZvKkfztGMDN699CJOpK7huYoKgoBjqEjDgDtRi6hCbhzC\ntL2H+OOAgZ4MVxAEFyMZfgOcSzMzb+yHU2gy8emga7m2vAwT1qx+CfA00BXYHxjI7d9tFa9eEJoo\njRL8devWMX/+fDIyMlixYgV9+vSpee2dd97hk08+wd/fn6eeeor+/fs3OlhPcC7i7S39cMwnTvDf\ne+6hcPO3VBYXM1vTMAAfY83qpwFzAgOpvOkW7ntjviyiEoQmTKMEPy4ujvnz5/P000/bPZ+RkUFq\naipr164lJyeHiRMnsn79egwGQz1n8l68RbzPlqMZGSy89goiNI0eqEVU24FLUTX2r6I6XO5v2Yp7\nf9sjQi8IzYBGCX63burWX9M0u+fT0tKIj48nICCATp06YTQa+e2337jssssaM5zgJLp9E6VpdrtQ\nPY0S/FDADJyIiua2z9eL2AtCM8EtHn5ubi59+/atedy+fXtyc3PdMZTgQKHJxL8HX0e306VUYF9b\n3xVYDByL7ci9GzeL0AtCM6NBwZ84cSL5+fm1nk9MTGTw4MF1vscx4wectnMaqnH3FrwtTvOJE6Q8\n8ACZa9Yws6LCzqvXM/x9QI9Ro3j4/fcJi/Ausfe277M+JE7X4Qsxgu/E6QwNCv4HH3xw1ieNiYkh\nOzu75nFOTg7R0dFOvTcvr/isxzvfREWFeE2cu7Zt48sx8XQ9Xcpx7FfMDgNeADqixF7fW7aiyru+\nZ2/6Ps+ExOk6fCFG8K04ncFldfi2Wf3gwYNZu3Yt5eXlHD16lCNHjnDppZe6aijBhi/HxDP7dCn3\no1bM7sW6AiAU8IvtyIC9h5h+vEi2HBSEZk6jPPyvvvqK2bNnU1BQwOTJk+nZsyfvvvsuPXr0YOjQ\noQwbNoyAgACeeeYZn6zQ8WaOZmSQOm443U6X2vn03VFtEsqBnE6duCPtO/HqBUEAwKDVZbh7EF+5\nffJUnIUmE9/OeIyDa1fzXEUFy1BdLXWffhYQGhZG62uu488fLaGiKtAjcZ4NvnTbLHG6Bl+IEXwr\nTmeQlbY+gi702qYNtDSb6YZ9D5xS4ECrIG5eta6m/01YhG/8sQqCcH4QwfcRvp3xGPemfGqXydv2\nwJkT25G/pO/2ZIiCIHg5IvhejJ7Vhx4+hHbogJ1X3xO1G1V7Pz+yYzowdOUazwUqCIJPIILvxdhm\n9Y419dlhYcQMHMz1ya/JpKwgCE4hgu9l7Nq2jS9G/wljWRm5wALgblRN/SthYXTv0o1CYxfGiNAL\ngnCWiOB7GV+OiefFsrKaTH4ZkIry6SMHDub6RYs9GZ4gCD6MCL6X0a3stJ1XHwKYgoJYPGQo1ye/\n5sHIBEHwdUTwPYztxGyh0cjuwBZo5dYMvxioHjKU4ZLZC4LQSETwPYxduWX6z/x94CCe+vF7jGVl\nHDcYaHX9QMZIZi8IggsQwfcwoYcP2Vk4nQsLuftonidDEgShiSKbmJ9HCk0mPk+4l2+H3MDnCfdQ\nWGCi0Gi02e4cCo1dPBihIAhNGcnwzyOO9s1iDFyf/DqLMVg8/C4yMSsIgtsQwT+PONo3oYcPERoe\nIROygiCcF8TSOY+IfSMIgieRDN8NOJZaXp/8OqHhEWLfCILgUUTw3UBdXv3wRYvFvhEEwaOIpeMG\n6vLqBUEQPI0IvhsQr14QBG9ELB03IF69IAjeSKMEf926dcyfP5+MjAxWrFhBnz59ADh27Bjx8fF0\n69YNgMsuu4xnn3220cH6CuLVC4LgjTRK8OPi4pg/fz5PP/10rdcuuOACVq5c2ZjTC4IgCC6kUYKv\nZ/CapjVwpCAIguBp3DZpm5mZydixYxk/fjw//fSTu4YRBEEQnKTBDH/ixInk5+fXej4xMZHBgwfX\n+Z7o6Gi+/vprQkND2blzJw8//DBr1qyhTZs2DQYUFRXiRNjnD/OJE6Q+9BDBBw9S3LUr8QsWAN4X\nZ31InK5F4nQdvhAj+E6cztCg4H/wwQdnfdLAwEBCQ0MB6NOnD507d+bQoUM1k7pnIi+v+KzHcyef\nJ0yyLqLaupXFZZVM/OwTr4uzLqKiQiROFyJxug5fiBF8K05ncJmlY+vjm0wmqqurATh69ChHjhyh\nc+fOrhrqvCKLqARBaCo0atL2q6++Yvbs2RQUFDB58mR69uzJu+++y08//cTf//53AgIC8PPz4/nn\nn6dt27auivm8Umg0oqX/XLPloCyiEgTBV2mU4N90003cdNNNtZ4fMmQIQ4YMacypvQZZRCUIQlNB\nVto2gCyiEgShqSC9dARBEJoJzVLw69pbVhAEoanTLC2d+vrVC4IgNGWaZYYvpZaCIDRHmqXgS796\nQRCaI03e0qlrf1kptRQEoTnS5AW/Pr9ePHtBEJobTd7SEb9eEARB0eQFX/x6QRAERZO3dMSvFwRB\nUDR5wZfWCIIgCIomb+kIgiAIChF8QRCEZoIIviAIQjNBBF8QBKGZIIIvCILQTBDBFwRBaCY0SvCT\nk5MZOnQoo0aNYtq0aZSUlNS89s477zBkyBCGDh3Kd9991+hABUEQhMbRKMHv378/a9asISUlBaPR\nyDvvvAPA/v37SU1NZe3atSxatIjnnnsOTdMaOJsgCILgThol+Ndeey1+fuoUffv2JScnB4ANGzYQ\nHx9PQEAAnTp1wmg08ttvvzU+WkEQBOGccZmHv2LFCgYOHAhAbm4uHTp0qHmtffv25ObmumooQRAE\n4RxosLXCxIkTyc/Pr/V8YmIigwcPBmDBggUEBgYyfPhwgDrtG4PBUOs5QRAE4fzRoOB/8MEHZ3x9\n5cqVbNq0iSVLltQ8FxMTQ3Z2ds3jnJwcoqOjnQooKirEqeM8jcTpWiRO1+ILcfpCjOA7cTpDoyyd\nb775hnfffZcFCxbQokWLmucHDx7M2rVrKS8v5+jRoxw5coRLL7200cEKgiAI545Ba0T5zJAhQ6io\nqIMzjrUAAATvSURBVCAsLAyAyy67jGeffRZQZZkrVqwgICCAp556iv79+7skYEEQBOHcaJTgC4Ig\nCL6DrLQVBEFoJojgC4IgNBNE8AVBEJoJXiv47733Hj179sRsNns6lDp58803GTlyJKNHj+b+++8n\nLy/P0yHVyZn6HXkT69atY/jw4fTq1YudO3d6Ohw7vvnmG/70pz9xyy23sHDhQk+HUy8zZ87k2muv\nZcSIEZ4OpV5ycnKYMGEC8fHxjBgxwq6c25soLy/ntttuY/To0YwYMYL58+d7OqR6qa6uZsyYMUye\nPLnhgzUvJDs7W7vvvvu0QYMGaQUFBZ4Op05KSkpqfl+yZIn29NNPezCa+tm8ebNWVVWlaZqmvfzy\ny9orr7zi4YjqJiMjQzt48KA2fvx4bceOHZ4Op4aqqirtpptu0jIzM7Xy8nJt5MiR2v79+z0dVp1s\n3bpV27VrlzZ8+HBPh1Ivx48f13bt2qVpmvo/NGTIEK/9Pk+dOqVpmqZVVlZqt912m/brr796OKK6\n+eCDD7Tp06drDz74YIPHemWGP2fOHJKSkjwdxhlp06ZNze+lpaU1PYW8jfr6HXkb3bp1o0uXLl7X\nZO+3337DaDTSsWNHAgMDGTZsGGlpaZ4Oq0769etH27ZtPR3GGYmKiqJXr16A+j/UvXt3jh8/7uGo\n6iYoKAhQ2X5lZaWHo6mbnJwcNm3axG233ebU8Q2utD3fbNiwgQ4dOnDRRRd5OpQGef3110lJSSEk\nJMRrb01tWbFiBcOGDfN0GD5FXX2htm/f7sGImg6ZmZns2bPHaxdlVldXM3bsWI4cOcKf//xnr4xT\nT46Li4udOt4jgl9ff55HH32Ud955h/fff7/mOU9mfA31EUpMTCQxMZGFCxfy0UcfMW3aNA9EeXb9\njjzp7zoTp7fhbXccTYWTJ0/yyCOPMHPmTLu7ZW/Cz8+Pzz77jJKSEh566CH2799Pjx49PB1WDV9/\n/TWRkZH06tWLLVu2OPUejwh+ff159u3bx7Fjxxg1ahSappGbm8u4ceP473//S7t27c5zlA33EdIZ\nPnw4Dz74oMcE/1z6HXkCZ79PbyImJoasrKyax7m5uU73hRLqprKykkceeYRRo0Zx0003eTqcBgkO\nDuYPf/gD3377rVcJ/s8//8yGDRvYtGkTZWVlnDx5kqSkJJKTk+t9j1cZz3FxcWzevJm0tDQ2bNhA\n+/btWblypUfEviEOHz5c83taWhrdunXzYDT1U1+/I2/Gm7LqSy65hCNHjnDs2DHKy8tZs2YNN954\no6fDqhdv+u7qY+bMmfTo0YN77rnH06HUi8lkqrFJTp8+zQ8//OB1/8cfe+wxvv76a9LS0njttdf4\n4x//eEaxBy/08G0xGAxe+wf86quvcvDgQfz8/IiNjeW5557zdEh18sILL1BRUcF9990H2Pc78ia+\n+uorZs+eTUFBAZMnT6Znz568++67ng4Lf39/Zs2axX333Yemadx66610797d02HVyfTp09myZQtm\ns5kbbriBadOmMW7cOE+HZce2bdtYvXo1cXFxjB49GoPBQGJiIgMGDPB0aHbk5eXxxBNPUF1dTXV1\nNfHx8TX7ffgy0ktHEAShmeBVlo4gCILgPkTwBUEQmgki+IIgCM0EEXxBEIRmggi+IAhCM0EEXxAE\noZkggi8IgtBMEMEXBEFoJvw//5K32R/vBHAAAAAASUVORK5CYII=\n",
- "text/plain": [
- "\u003cmatplotlib.figure.Figure at 0x7f5be3c99f50\u003e"
- ]
- },
- "metadata": {
- "tags": []
- },
- "output_type": "display_data"
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "Current loss: 9.48636\n"
- ]
- }
- ],
- "source": [
- "import matplotlib.pyplot as plt\n",
- "\n",
- "plt.scatter(inputs, outputs, c='b')\n",
- "plt.scatter(inputs, model(inputs), c='r')\n",
- "plt.show()\n",
- "\n",
- "print('Current loss: '),\n",
- "print(loss(model(inputs), outputs).numpy())"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "colab_type": "text",
- "id": "sSDP-yeq_4jE"
- },
- "source": [
- "### Define a training loop\n",
- "\n",
- "We now have our network and our training data. Let's train it, i.e., use the training data to update the model's variables (`W` and `b`) so that the loss goes down using [gradient descent](https://en.wikipedia.org/wiki/Gradient_descent). There are many variants of the gradient descent scheme that are captured in `tf.train.Optimizer` implementations. We'd highly recommend using those implementations, but in the spirit of building from first principles, in this particular example we will implement the basic math ourselves."
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 0,
- "metadata": {
- "colab": {
- "autoexec": {
- "startup": false,
- "wait_interval": 0
- }
- },
- "colab_type": "code",
- "id": "MBIACgdnA55X"
- },
- "outputs": [],
- "source": [
- "def train(model, inputs, outputs, learning_rate):\n",
- " with tf.GradientTape() as t:\n",
- " current_loss = loss(model(inputs), outputs)\n",
- " dW, db = t.gradient(current_loss, [model.W, model.b])\n",
- " model.W.assign_sub(learning_rate * dW)\n",
- " model.b.assign_sub(learning_rate * db)"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "colab_type": "text",
- "id": "RwWPaJryD2aN"
- },
- "source": [
- "Finally, let's repeatedly run through the training data and see how `W` and `b` evolve."
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 0,
- "metadata": {
- "colab": {
- "autoexec": {
- "startup": false,
- "wait_interval": 0
- },
- "height": 446
- },
- "colab_type": "code",
- "executionInfo": {
- "elapsed": 569,
- "status": "ok",
- "timestamp": 1527005915434,
- "user": {
- "displayName": "",
- "photoUrl": "",
- "userId": ""
- },
- "user_tz": 420
- },
- "id": "XdfkR223D9dW",
- "outputId": "c43591ae-d5ac-4f2b-a8e7-bfce607e0919"
- },
- "outputs": [
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "Epoch 0: W=5.00 b=0.00, loss=9.48636\n",
- "Epoch 1: W=4.58 b=0.42, loss=6.28101\n",
- "Epoch 2: W=4.24 b=0.76, loss=4.29357\n",
- "Epoch 3: W=3.98 b=1.02, loss=3.06128\n",
- "Epoch 4: W=3.78 b=1.23, loss=2.29721\n",
- "Epoch 5: W=3.61 b=1.39, loss=1.82345\n",
- "Epoch 6: W=3.49 b=1.52, loss=1.52970\n",
- "Epoch 7: W=3.38 b=1.62, loss=1.34756\n",
- "Epoch 8: W=3.30 b=1.70, loss=1.23463\n",
- "Epoch 9: W=3.24 b=1.76, loss=1.16460\n"
- ]
- },
- {
- "data": {
- "image/png": "iVBORw0KGgoAAAANSUhEUgAAAW0AAAEDCAYAAAD+/1UIAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAIABJREFUeJzt3Xl4VOXdPvD7zJZ9XwmELQkQIAELsiTsi6xiEBGXAiIW\nbV8WBY2K0tLa4lbsr283qxURtIoioAi8SpFNg6whi0FJKAoJBgLZt5k5c87vj5OZLIRkgEnOGXJ/\nritXJsmZyT0sN1+enPOMIMuyDCIicgs6tQMQEZHzWNpERG6EpU1E5EZY2kREboSlTUTkRljaRERu\nxODMQePGjYOvry90Oh0MBgM2b97c1rmIiKgZTpW2IAjYuHEjAgIC2joPERG1wKnlEVmWIUlSW2ch\nIqJWCM5cETl+/HgEBARAEATMmTMH9957b3tkIyKiJpxaHvnggw8QFhaG4uJiLFiwAD179sTgwYPb\nOhsRETXh1PJIWFgYACA4OBgTJ05EVlZWi8fL3t6AIADdugFvvglYrTeflIiIWl8eqampgSRJ8PHx\nQXV1NR5++GEsXrwYI0aMuPadCgtRvfoFeL2zDkJtLWxdu6NqRSrMs+8DDE4N9y4XFuaHoqIKVb73\ntTCTc7SYCdBmLmZyjlYzOaPVSfvy5ct44IEHkJKSgjlz5mDcuHEtFzYAREai6oWXUHwkA9WPPApd\n4QX4L/sVgpMGwWPTvwFRdCocERE15tQPIm9Ew3/FdBcK4P3ntfB89x0IVivEmFhUP/kMzCmzAL2+\nLb79VbT6LysztU6LmQBt5mIm52g1kzPa5YpIKaozKl9+DcWHT6Jm7gLof/wB/r98BEGjh8Fj28cA\nTyckInJKu17GLnWJRuXaP6P40AnUPDgP+jN58F+0AEFjhsO0fRvLm4ioFarsPSJ1647KP/0VxWnH\nUTvnAehPf4+AhfMQNG4ETDs/A/hiOkREzVJ1wyipR09U/OV1lHx9FLX3zIH+uxwEPPQAAieMgunz\nXSxvIqImNLHLny0mDhV/fxMlB4+g9u57YMjORMDcOQicNAamPV+wvImI6miitO1scb1Q8fo6lOz/\nBrUzZsJ4Mh0B99+DwKnjYdy7h+VNRNftL395DR999IHj4+XLl2DVqlWOj//61/+HDz/8txrRboim\nStvO1iceFf96B8V702CeNgPG48cQOGcmAu+cBOOBfSxvInJa//6JyM7OAKBsfldWVorc3FzH17Oz\nM5GQMECteNdNk6VtZ+vXH+Vvv4uSPQdhnjwVxiPfIPCeGQhImQpj2ldqxyMiN5CQMBBZWZkAgLNn\nz6Bnzxj4+PigsrISVqsVP/74A+Liequc0nnqXFN+ncSEASjf8AEMJ0/A+9UX4bH7c5hSpsIycjSq\nnloJcdhwtSMSkRN8Vj8Pj+3bXPqY5jtTULX699f8emhoKPR6Ay5duoisrEz075+I6uoyZGdnwsfH\nBzExsTCotL3GjdD0pN2UOPBnKH/vI5Ts2gPL2PEwHdyPoBmTEDD7LhiOHlY7HhFpVGJiIrKyMpCd\nrZT2gAEDkJWVgaws91oaAdxk0m5KHHQ7yjZtheHIYfi8sgam/Xth2r8X5vETUZ26EuJtg9SOSETN\nqFr9+xan4rbSr18isrIy8d//KssjHh4y/vnPf8HX1wfTpt3V7nluhltN2k2JQ4aibPMnKP1kFyzJ\nI+GxZzeCJo2F/8/vhSHzpNrxiEgjEhIGIC3tIPz9/SEIAgICAlBZWYHs7Cz075+gdrzr4talbWcd\nnoyyrTtQuuUzWIYlweOL/0PQhFHwn3c/9HU/gCCijismJhbl5WXo3z+x0ef8/Pzg7+9er33bLrv8\ntStZhvHAPvi8/AcYjx0BAJin3wWPXz+Hom69lRdn0Ait7jTGTM7RYi5mco5WMznjlpi0GxEEWEeP\nRemO3Sj9YAusPxsEj88+AYYMQeCEUfBc/xaEinK1UxIR3ZBbr7TtBAHWcRNQuutLlG7aCsycCUNO\nNvxSn0BIQm/4Ll8CQ/pxXqhDRG7l1i1tO0GAdex4YMsWFJ88hapnV0EKCYHXu+8gaNJYBI4fCc+3\n/wWhvEztpERErbr1S7sBKSIS1U88heKjmSj9YAvM02bAcOpb+D29HCGJveH7xGIYThzj9E1EmtWh\nSttBp4N13ASUv/2uMn2v/DWk0DB4vbcBQZPHIWjcCHiue5PTNxFpTscs7QakiEhUP/4kio9koHTT\nVpinzYD++1Pwe2aFMn0//j8wHD/K6ZuINKHDl7aDTgfr2PHK9J2eg8rnfgMpNBxe/96IoCnjETQ2\nmdM3kZsqLPwJ8+bNUTuGS7C0myFFRKJm2QoUHzmpTN/T74L+9HfK9J3QC77LfgXDsSOcvonciKCh\nazRuBku7Jfbpe91GXEk/hcrnV0MKj4DX++8iaOoEZfp+6w0IZaVqJyWiVoiiiD/8YTXmz78fy5Yt\ng9lsVjvSDbn1roi8BpddASVJMB7YB6+N62Ha9RkEUYTs5QXzXXejZu5DEAcPcfqqS61elcVMztFi\nLq1nWr3aA9u3u3afujvvFLF6dcsFXFj4E2bPnoF//GMd+vdPwJ/+9CI6dYrGfff93KVZbkbHvSKy\nrel0sI4Zh/K3NuDKye9Q+fxvIYVHwPOD9xA0bSKCxiTB861/cvom0piIiEjH5lAzZsxAZmaGyolu\njFtuzaoVcng4apY+gZrFy2A8uB+eG9fDY+d2+D37FHx/92uYZ8xEzbwF1zV9E93KVq82tzoVt5Wm\na9ru+leSk7Yr6HSwjh6Lin+9o0zfq34HKSISnpv+XTd9D4fnv16HUFqidlKiDquw8Cd8+202AGDH\njh1ITByocqIbw9J2MTk8HDVLHkfxN+ko3fwpau+6G/q8XPitTEVIYm/4LXkMhiOHeeYJUTvr3r0H\ndu36DPPn34+ysjKkpNyjdqQbwh9EtgOhqAiem/4Nz41vw3D2vwAAsU88DAsfxpVREyH16KlKruZo\n/QdZWqLFXMzkHK1mcgYn7XYgh4WhZvEylBw6gdKPt6M25W7oz+QBTz2FkKEDETR6OLxf/gMMWRmc\nwImoRfxBZHvS6WAdORrWkaNReeUKQr/eA/Omj2A6sA8+a1+Gz9qXYYvuCvOUabBMvRPWIcMAN3qV\naCJqe2wElcghIcDChSifcS+EygoYv/wPPHZ+BtPuz+H9xj/g/cY/IAUHwzxpKixTpsMyeizg5aV2\nbCJSGUtbA2RfP1hmzIRlxkzAYoHx64NKgf/fDni9/y683n8Xsrc3LGMnwDx1OiwTJ0EODFI7NhGp\ngKWtNSYTrGPHKy/c8PJaGE4cg8euHTDt3A6PHZ/CY8enkA0GWJNGKgU+ZRqkTlFqpyaidsLS1jKd\nDuLgIRAHD0HV86uhP/09PHZ9BtPO7TAd2AvTgb3AMytg/dkgmKdMh2XqnbDF9VI7NRG1IZ494i4E\nAbbefVD9+JMo/WI/rqTnoOLFV2EZOQaGjJPw/cNvEZw8GEFJg+Dz+9XKHuCSpHZqItVVVlZi69bN\nbfb406dPQGVlJQDgypXLGDnydmRlZTT4+kSUl7vuxcSdLm1JkjBz5kw89thjLvvmdOOkzl1Qu/BR\nlH38Ka7knEH5X/8J89Q7oS/Ih/f/voagKeMRPDAevqlPwLjvS8BiUTsykSoqKsqxdetHzX5NcsFg\n07dvArKzMwEA2dmZ6NWrD7KylI/PnfsRgYFB8Pf3v+nvY+d0aW/YsAExMTEu+8bkOnJQMMz33o/y\n9e/h8qmzKHvnfdTOeQCCuRZe699C4L0pCOkbA79fPgLT9m1A3VRA1BG8/vpfceFCAR5++EH8/e//\ni/T045g3bx5++9vnMX/+fVe9QML777+Lt99+EwBQUJCPFSuW4pFH5mHx4kU4d+7Hqx4/ISHRUdpZ\nWZmYM+dBfPttfYknJCS69Pk4taZdWFiI/fv347HHHsPbb7/t0gDkYt7esEyZBsuUaYAowvhNGky7\nPoPHzs/g+fGH8Pz4Q8geHrCMGQfLlOkw3zEFcmio2qmpAwke1L/Zzxcfz3bJ8U398pdL8MMP/8W6\nde8BANLTjyMrKwsbNnyIyMhIFBb+dM0XSHjllTVITV2Jzp27ICcnG2vXvoQ///kfjY7p3z8R69e/\nBQA4depbPPLIY/joo38DUEo8IWGAUzmd5VRpr1mzBqmpqaio0NZln9QKgwHWEaNgHTEKVb9/GYas\nDOUslF074PH5Lnh8vgu+Oh2sQ4fDMnU6zFOmA2HN/wUhupUkJiYiMjKyxWNqamqQnZ2BVauehn23\nD1EUrzqub99+yM39HrW1tbDZbPD09ERUVGcUFOQjOzsD99/v2j27Wy3tffv2ITQ0FPHx8Th8+LDT\nD+zsdfTtqcNnGj9SeVv7CpCbC3zyCYStW2E6lAbToa/hu+pZICEBYWPGAGPGAKNGARqZwrX4ewdo\nM5fmMzWzxAAAYde68/Ue34TFUg69XufIEBjoDS8vL8fHklQNQajPaDQCOp0JwcHeCAgIwPbtn7by\nHfzQvXs37N//OQYMSEBYmB+GDBmMrKxjKC8vw6Br/E/hRrVa2idOnMCXX36J/fv3w2w2o6qqCqmp\nqXjllVdavJ8WN2NhpgYCI4H5jwLzH4Vw8SI8Pt8Jj53bYUr7CsjKAv7yFwCAGN8X1uHJsCSPhHVY\nMuQwZ/+quI4Wf+8AbeZipqvV1sqoqKh0ZCgtrQZQ31GSZMLly1dw5kwBPD09sXv3HgwbloSaGhkR\nEZ3w4YdbMXbsBABAXl4uYmPjrvoeffr0w7p1b2PhwkdRVFSBbt164YUXViE+vp/Tz93Zf2xbLe3l\ny5dj+fLlAIAjR45g3bp1rRY2uRc5IgK18xagdt4ChPmbUPrFPhi/Pghj2tcwHjsMw6kceK1TfjAj\n9u4D6/BkWJNHwjJ8BOTwcJXTE7XM3z8ACQkDMH/+fRg6NAnDhyc3+rrBYMCCBY9g0aL5iIrqjG7d\nuju+9utfv4A//vElvPPOOthsIsaPv6PZ0k5IGIDNmzehXz/llXF69+6DoqIizJgx0+XP57q2ZrWX\n9uuvv97qsfzXvnVukcligSH9BEyHvlKK/OhhCNXVji+Lcb1gHT4C1uQRsCaNgBTR8jqhSzJphBZz\nMZNztJrJGdxPW0VumclqheHkCRgPfQ3T1wdhOHIYuqr6UwjFmFhYk0Y43lxxib0Wf50AbeZiJudo\nNZMzeBk7XR+jEeLtQyHePhQ1S5crJZ55UllKSTsI4+Fv4LVxPbw2rgcA2Lr3UNbD65ZUpM5d1M1P\n5OZY2nRzjEaIg26HOOh21Cx5HBBFGLIylBI/9BWMh9Lg9d4GeL23AQBg69odluQR9SUe3VXlJ0Dk\nXlja5FoGA8TbBkG8bRBq/mcpYLPB8G0WjF9/VV/iddvNAoAtuiusSSNgsS+ndO3mvi+TTdQOWNrU\ntvR6iIkDISYORM0vFwM2G/Q538KUdtAxjXtu+jc8NylXkNk6d3Gsh1uSRkDq3kPlJ0CkLSxtal96\nPWwJiahJSETNo/8DSBL0p3Ial/hHH8Dzow8AALZOUcCY0fDqkwAxcQDEhETI/gEqPwki9bC0SV06\nHWz9+qOmX3/U/OKXSol//x2MaV/BlKYsqeD99+GL9x13EXv0VKb3hAF1RT5Aefk2og6ApU3aotPB\nFt8Xtvi+qF24CJBlhJUWonx/GgyZGcpb1kl4frIF+GSL4262LtH1JZ44AGLiwDY5Z5zcT2VlJXbv\n/j/MnHlPm32PNWt+i+TkkRg9elybfQ87ljZpmyAAvXrBHNQJ5pRZyudkGbr8844CN2RmwJhxEh67\nPoPHrs8cd7WFR9SXeMJAiIkDIHWJ5g86Oxj7ftpNS1uSJOh07vc6MCxtcj+CACm6KyzRXWGZdqfj\n07qLhTBknmwwkWfA4z9fwOM/XziOkYKCHAVuf7N17wm44V9edzVokE+znz9+vMolxzfVcD9tvV4P\nLy9vREVF4ttvc/Dqq39Gaurj2LBhEwBlL+3a2hosWPALFBTk47XXXkFZWSk8PT2Rmvocunbtds3v\nc/ToYXz44fsoKSnG4sVPIClphFP5rhdLm24ZUkQkLBMnwzJxsuNzwpUrMGTVl7gh82T962va7+fr\nBzEh0bE+LiYOhC02DjDwr8etoOF+2unpx5Ga+gTWrn0VRqPfTe+l3VBh4U/429/eRH7+eSxd+hg2\nbdoGo9Ho8ufDP5V0S5NDQmAdMw7WMfVrjUJ5GQzZWfVTeVYGjIcPwXTo6/r7eXlB7NvfsT4uJg6A\n2DseMJnUeBq3FGcn5Bs9vjV9+/ZDVFRUi5exO7uXdkPjxk0EAHTpEo2oqM748ccfmt1c6maxtKnD\nkf0DHOeCO1RVwZCT3WAiz4AhIx3G40fr72c0QozvpxR4/0Rg2CAIIZ2VnQ65Tu42PD09Hbf1ej1s\ntvrXibRYzAAAWZbg5+fveLUbZzSd2K81wd8sljYRAPj4OPZUcTCbYfgup9FZK4Zvs2HMPOk4JBSA\n5B8AW2wsbDFxsMX1glj33tajJ+Dh0f7PhRrx9vZGdd3OlE33xwsKCkZpaQnKy8vh6emJtLSvMGxY\nEry9fdCpUxT27v1Pq3tp2+3d+x9MnjwNFy4U4MKFghbXv28GS5voWjw8IA64DeKA2+o/Z7VCn3sa\nhqwM+F/4EeaMbOjP5MKQlQnjieON7i7rdJC6doMYG+codFtsHMTYXsqLSXA6bxcN99M2mTwQHBzs\n+Jor9tK2i47uhsWLF6GkpBhPPbWyTdazAW7Nqipmco4WMwFNcokidOd+hOFMLvS5udCfyVXKPS8X\nustFV93XMZ3H1he5LTbupqdzLf5aMZNzuDUrUXsyGCD1jIGlZwzQ4OwVABBKS6DPy4U+LxeGuvf6\nvNOtT+f2Iq9bcuF0TgBLm6jNyYFBEAcPgTh4CMwNvyCK0J/7oa7E86DPO11X7KeVc8sbnF8O1E3n\nccpSixjXS1lyccF0Ts7bsGEd9u79DwRBgCzLEAQBY8dOwNy5C9otA5dHVMRMztFiJqBtc101neee\nVpZczv4XgtXa6FjHdB7XCx59eqEyJBK26GhIXaJh69IVcmioqhO6Fn//tJrJGZy0iTTIqem8bu3c\nUFfoHrs/B3Z/Dt+mj+XlBVvnLkqJR3eFFN0VtrpCl6KjIUV2AvT6dnx2dDNY2kTuxGCArWcsbD1j\ngTumNPqSUFKM0MorKMv8Dvr8c9Dln4f+/Pm69z/CkJfb7EPKBgOkqM6wdbFP59GOYpeio2HrHM3l\nFw1haRPdIuSgYKBXN1iir3FaWmUl9PnnlUI/fx76/PPQ5Z9zFLvx0NcQrrFaaguPUAo8uiukLg0K\nvW5al32d+6893TyWNlFH4esLW5942PrEN/91sxm6CwV1ZX4e+vPn6m+fOwdDxkkYjx9r9q5SYKBS\n4F2i69bT64sdiX0A2YNLMC7C0iYihYcHpB49IfXo2fzXbTboLhbWTen1yy/224b/5kHIzmz2rqF6\nPaTQMEjhEZAiIhq/D49s9DG8vdvwSbo/ljYROUevhxTVGVJUZ4hDh139dVmGUFzcYPlFKXPv4iKI\n5wuUrXPP5ELIymjx20h+/pDCwyFFRCrvHcVu/1wEpIhIyMHBHXJLXZY2EbmGIEAOCYEYEgI0uPTf\nO8wPpQ1OrxMqK6C7dBG6ixfr3hdCd+lS3fv6z+v/e+aaa+xA3Q9Qw8KbTO0RjlJvWPJosEmUu2Np\nE1G7kn39YPP1U86AaYnVCt2Vy1eVeePCvwjD96cgZKS3+FBSQGD91B4RAXTtAm9PX0jBIZCCgyEH\nBUMKCoYcEgIpKFjTJc/SJiJtMhohRXZSziNviSxDqChvMq03md7r3gy5px13a/71cOoe0ttbKfSg\nukIPaVDswcH1X6u7LQcHQ/bxbZeLmFjaROTeBAGyfwBs/gHKKw61xGKB7nIRQsQqlJ45D11JMYSS\nYuiuXGl0Wygpga6kGIYzeRCqnXsRBtlobDSty0H1hS4FBSsTfXDj4pcDAq97XZ6lTUQdh8kEKaoz\nEOYHa9dezt3HbFYKvbgYuuIrSrHbbxcX15e9/eOfLsBwKseph5Z1OsiBgZCCQ4AG/wtoCUubiKgl\nHh7KEk1kJ9icvY8oQigtVQq9boq/ZvHX3XYWS5uIyNUMBsihobCFhgJOvkxkmJMP3fFOciQicmMs\nbSIiN8LSJiJyIyxtIiI30uoPIi0WCx588EFYrVbYbDZMmjQJixcvbo9sRETURKulbTKZsGHDBnh5\necFms+H+++/HqFGjkJiY2B75iIioAaeWR7y8vAAoU7coim0aiIiIrs2p0pYkCSkpKUhOTkZycjKn\nbCIilTh1cY1Op8O2bdtQWVmJX/3qV8jLy0NsbAs7dHXvjmDp6i0Vi49nN3t48KD+zX7epcfrhKsy\nqZoHuCqT6nmaZNJEngaZNJPH7tyPmsrD42+N41tzXVdE+vr6YsiQITh48GDLpQ1Ar7t6t6trvkR8\nM8e2xfFNM6mdp2kmLeRpmEkreeyZtJSnxfuolMd+/FX3UznPVffVQJ5GH2skj7MEWW5hl3EAxcXF\nMBqN8PPzQ21tLRYuXIhFixZh9OjRLT5wUYNNz7UgLMyPmZzATM7TYi5mco5WMzmj1Um7qKgIzzzz\nDCRJgiRJmDp1aquFTUREbaPV0u7duze2bt3aHlmIiKgVvCKSiMiNsLSJiNwIS5uIyI2wtImI3AhL\nm4jIjbC0iYjcCEubiMiNsLSJiNwIS5uIyI2wtImI3AhLm4jIjbC0iYjcCEubiMiNsLSJiNwIS5uI\nyI2wtImI3AhLm4jIjbC0iYjcCEubiMiNsLSJiNwIS5uIyI2wtImI3AhLm4jIjbC0iYjcCEubiMiN\nsLSJiNwIS5uIyI2wtImI3AhLm4jIjbC0iYjcCEubiMiNsLSJiNwIS5uIyI2wtImI3AhLm4jIjbC0\niYjciKG1AwoLC5GamorLly9Dr9dj9uzZmDdvXntkIyKiJlotbb1ej2effRbx8fGoqqrC3XffjeTk\nZMTExLRHPiIiaqDV5ZGwsDDEx8cDAHx8fBATE4NLly61eTAiIrrada1p5+fn47vvvkNiYmJb5SEi\noha0ujxiV1VVhaVLl2LlypXw8fFp8dju3QFJuvqY48ermj1+0KDmH8+Vx+t0V2dSMw+AqzKpnadp\nJi3kaZhJK3nszp1r9tOq5eHxt8bxrXGqtEVRxNKlS3HXXXdhwoQJTj2wTnf1EB8W5neNY5t/DFcf\n3zST2nmaZtJCnoaZtJLHnklLeVq6j1p57Mc3vZ/aeZre1kKehh9rJY+zBFmW5dYOSk1NRVBQEJ59\n9lmnH7ioqOKGArWVsDA/ZnICMzlPi7mYyTlazeSMVte0jx8/ju3bt+Obb75BSkoKZs6ciQMHDtx0\nQCIiun6tLo8MGjQIp06dao8sRETUCl4RSUTkRljaRERuhKVNRORGWNpERG6EpU1E5EacviKSiIiu\nnyQBZWVASYmA4uL6t5ISwfG5khIBn37q3OOxtImInGSxoFHRNizgpkWsfAyUlgqQJMFlGVjaRNTh\nyDJQWYmrCvdaU7D981VVzpWvXi8jKEhGaKiMuDgJQUEygoOVt6Ag1L2XHe+DgmQAvk49NkubiG4Z\nNTVAUZGAixcFXLqkw6VLyu2iIuVj5fMCLl8GLBbnLhv39lZKtUcPqVHR1pdw4/INCZHh5wcIrhuu\nG2FpE5GmSZIyEV+6JDhK2F7IDd8uXtShvLzlpvTwkBERIWPgQMDfX2yxfO23vbza6Yk6iaVNRKqo\nqUGjwm1cwvVTcVGRAFFsuYxDQiR07izhtttkhIfLiIiQEB5uvy3X3Zbg769MwMqGUTXt9Exdi6VN\nRC4likBhoYD8fB3OnxdQUQGcPevRYEpWStn5qVhCeLjUqIAblnJYmAyjsZ2enAawtInoutTUABcu\nCDh/Xof8fB3y8+23laK+cEGAzda0kE2OW9eaiusnYuVzbbku7M5Y2kTUSFkZGpVw49sCLl9u/po8\nQZARGSnjZz+T0KWL/U1GfLwnPD2rEBGhnE3RkabitsDSJupAZFlZR25Ywsq0XH+7oqL58dZolNG5\ns4z4eBFdusjo0kVCdLTkuB0VJcNkuvp+YWGeKCqS2viZdRwsbaJbiNUKnDvXtJDrlzIKCgSYzc2X\nso+P3KiEu3SxfywhOlpZtmjppdeofbC0idyMLAM//SQgL0+H3FwdzpzRIS9PeV9QAEhS8xdphIZK\niI9X1pPrC7m+mAMDuYbsDljaRBpVXQ2cOaOUccNyzsvTobr66naNiJCQlARERFgbTczR0TI6d5bg\n7a3CkyCXY2kTqcg+Nefm1heyfWrOz796LcLTU0bPnhJiYxu/xcQoZ1so5x/XqvBMqL2wtInagX1q\nbljK9um5uak5MlLCyJEiYmIal3OXLlxX7uhY2kQuIsvK+csNJ2b7W0HBtafmuDjJUc72277O7R1E\nHRBLm+g6WSzA99/rcOkScOKEqdH03NzU3KmTMjU3XMqIi5PQuTOnZrp+LG2iFtTUADk5OmRm6pGV\npbw/dUoHq9Vezh4AAC+va681c2omV2JpE9WprASys/XIzKwv6dOndY0uyfbwkJGQIKF/fxsGDzYh\nIqIasbGcmqn9sLSpQyopAbKylIJW3utx5kzj1vX2ljF4sA2JiRISEpT3cXGS4zLssDATiopsKqSn\njoylTbe8S5cEx9KGvaTPnWtc0AEBMkaOFJGQICEx0YbERBt69uT0TNrD0qZbhv3sjYblnJmpQ2Fh\n4+YNDZUwbpyIxESbo6S7dpV5NSC5BZY2uSVZBn74QXAUs30N+sqVxgUdFSVh8mRrgwlaQmQkC5rc\nF0ubNM9mA06f1jUq56ws/VWb6HfrJiEpyepYg05IkBAWJquUmqhtsLRJc8xmID1dj6+/1iMtTY/j\nx4Hqah/H1wVBeYXrCRPqp+f+/W0IDFQxNFE7YWmT6mprgRMnlIJOS9Pj2DE9amvrp+h+/YCEBKtj\nDbpfPxvPfaYOi6VN7a6mBjh+vL6kjx/XO/Z4FgQZfftKSE62YfhwG4YPF9G7NzdBIrJjaVObq64G\njh2rL+lQSYIFAAANpklEQVQTJ/SwWOpLun9/CUlJNiQl2TBsmIigIJUDE2kYS5tcrqrq6pK2X/at\n09WXdHKyiKFDuRZNdD1Y2nTTKiuBo0ftJW1AeroOolhf0omJ9klaKemAAJUDE7kxljZdt8pK4MgR\n+9kdBmRk1Je0Xi9jwAAJw4crk/SQITb4+6scmOgW0mppr1y5Evv27UNISAi2b9/eHplIYyoqgMOH\n6yfpjIz6TZT0ehkDB0pIShKRnGzDkCE8s4OoLbVa2nfffTfmzp2L1NTU9shDGlBeDnzzjVLQaWnK\nFYeSpJS0wSDjttskJCeLGD6cJU3U3lot7cGDB6OgoKA9spBKZBk4eVKHXbsMOHgQSE/3dZS00ajs\ndGc/u+P2223w8WnlAYmozXBNu4OyWoFDh/TYtcuAXbsMuHBB2bPDaARuv92G5GSlpAcPtvFVvIk0\npM1KOyzMr60e+oZ19EzV1cAXXwBbtwLbtyt7SgNAYCAwdy6QkgJMmgT4+BigtX/Ptfh7B2gzFzM5\nR4uZnNFmfzOLiira6qFvSFiYX4fMVFICfPGFATt3GrBvnwE1NcqyR2SkhAULREydKiIpyebY2N/H\np2P+Ot0ILeZiJudoNZMznCptWeZOae7kwgUBu3YpRZ2Wpnec6REba8PUqUpRDxwocYN/IjfUammv\nWLEChw8fRmlpKcaMGYMlS5Zg1qxZ7ZGNrkNurg47dypFnZ6ud3z+ttuUop4yRUSvXpKKCYnIFVot\n7bVr17ZHDrpOkqSc8WEv6rw8paj1euVls+xFHRXF/yUR3Uq09dMmapHVCqSl1Z/x8dNPyvqGl5eM\nKVOsmDpVxB13cMMlolsZS1vjqquBvXuVaXr3bgNKS5X16cBAGffeqxT1mDEiT8sj6iBY2hpUUgJ8\n/rlS1Pv315/xERUlYdYspaiHDas/44OIOg6WtkYUFAiOZY+GZ3z06lX/g8SBAyW+IC1RB8fSVtGp\nU8C775qwc6cBJ0/Wn/Hxs5/ZT82zIjaWP0gkonos7XZWWgp89JER775rxKlTAOABg0HGqFH1Z3x0\n6sSiJqLmsbTbgSwDR4/qsGGDCZ9+akBtrQCjUcbMmcCECTWYOFHkq7cQkVNY2m2orEyZqjduNOLU\nKWX5o0cPCXPnmjFnjoi+fX1RVCSqnJKI3AlL28VkGTh2TIeNG0345BPlzA+jUcZdd1kxd64VI0bY\nePk4Ed0wlraLlJUBmzcbsWFD/VTdrZuEuXMtuP9+K8LCuE5NRDePpX0TZBk4cUJZq962TZmqDQYZ\nM2YoU/XIkZyqici1WNo3oLy8fqrOyWk8Vd93nxXh4ZyqiahtsLSdJMtAeroOGzYYsW2bEdXVylQ9\nfboV8+ZZMWoUp2oianss7VZUVChT9caNRmRnK1N11671U3VEBKdqImo/LO1m2F/oduNGI7ZsUaZq\nvV7GtGnKVD16NKdqIlIHS7uBysr6qTorq36q/vnPlTNAOFUTkdpY2gAyMpS16o8/rp+qp05Vpuox\nYzhVE5F2dNjSrqwEtmxRzgDJzFSm6i5dJCxdasEDD1gRGcmpmoi0p8OVdmamDu+8o6xVV1UpU/Xk\nyVbMn69M1Xp9649BRKSWDlHalZXAtm3A3//u7dgCtXNnCYsXK1M1d9UjIndxS5d2eTnwxhsmvP66\nCeXlgE6nw+TJylr12LGcqonI/dySpV1ZCbz1lgl/+5sJpaUCQkIkrF4tICWliq9OTkRu7ZYq7epq\nYN06I/72NxOuXNEhMFDGc8+ZsXChBT16+KGoiIVNRO7tlijt2lpgwwYj/vxnE4qKdPD3l5Gaasai\nRRb4+6udjojIddy6tM1m4N13lbIuLNTBx0fG8uVmPPaYha8EQ0S3JLcsbasVeP99I/70JxMKCnTw\n9paxZIkZv/qVFSEhXAIholuXW5W2KAIffWTA2rUeOHdOB09PGY89ZsGSJRa+yAARdQhuUdo2G7Bl\niwF//KMHzp7VwWSS8cgjFixbZuF+IETUoWi6tCUJ+PRTA1591YTcXD2MRhkPPWTB449beOoeEXVI\nmixtSQJ27lTK+tQpPfR6GT//uVLWXbuyrImo49JUacsy8MUXerz8sgeys/XQ6WTMmWPF8uVm9OjB\nsiYi0kRpyzKwd69S1unpegiCjLvvtuLJJ82IjWVZExHZqVrasgwcPKiU9dGjykYgM2ZY8eSTFvTp\nI6kZjYhIk1Qr7UOH9HjpJRMOHVIiTJlixVNPWdC/P8uaiOha2r20jx7V4aWXPHDwoPKtJ04UkZpq\nxoABLGsiotY49UJaBw4cwOTJkzFp0iS88cYbN/SNTpzQ4b77vDBtmg8OHjRgzBgRu3ZV4b33aljY\nREROanXSliQJL7zwAtavX4/w8HDcc889GD9+PGJiYpz6BllZOrzyigc+/1z5ViNGiEhNtWDYMNvN\nJSci6oBaLe3MzEx069YNnTt3BgBMmzYNe/bsabW0c3J0ePVVE3bsMAIAhg4V8fTTFowYwbImIrpR\nrZb2xYsX0alTJ8fHERERyMrKavE+990HfPihN2RZwKBBNjz9tBmjR9sgCDcfmIioI2u1tGX5+s+T\n3rQJGDBAwtNPmzF+PMuaiMhVWi3tyMhIXLhwwfHxxYsXER4e3uJ9lJ7XA/C+yXiuFRbmp3aEqzCT\nc7SYCdBmLmZyjhYzOaPVs0cSEhJw7tw5FBQUwGKxYMeOHRg/fnx7ZCMioiZanbT1ej1WrVqFhx9+\nGLIs45577nH6zBEiInItQb6RRWsiIlKFUxfXEBGRNrC0iYjcCEubiMiNuHTDqAMHDmDNmjWQZRmz\nZs3CokWLXPnwN2TlypXYt28fQkJCsH37drXjAAAKCwuRmpqKy5cvQ6/XY/bs2Zg3b56qmSwWCx58\n8EFYrVbYbDZMmjQJixcvVjWTnSRJmDVrFiIiIvD666+rHQfjxo2Dr68vdDodDAYDNm/erHYkVFRU\n4LnnnkNubi50Oh3WrFmDAQMGqJrp7NmzeOKJJyAIAmRZxvnz57Fs2TLV/6yvX78emzdvhiAI6NWr\nF1588UWYTCZVM73zzjuOP0et9oHsIjabTZ4wYYKcn58vWywWecaMGXJeXp6rHv6GHT16VM7JyZGn\nT5+udhSHS5cuyTk5ObIsy3JlZaV8xx13aOLXqrq6WpZlWRZFUZ49e7ackZGhciLF22+/La9YsUJ+\n9NFH1Y4iy7Isjxs3Ti4tLVU7RiNPP/20vHnzZlmWZdlqtcoVFRUqJ2rMZrPJycnJ8oULF1TNUVhY\nKI8bN042m82yLMvysmXL5K1bt6qa6fTp0/L06dNls9ksi6IoP/TQQ/KPP/54zeNdtjzScI8So9Ho\n2KNEbYMHD4a/v7/aMRoJCwtDfHw8AMDHxwcxMTG4dOmSyqkALy8vAMrULYqiymkUhYWF2L9/P2bP\nnq12FAdZliFJ2tmZsrKyEseOHcOsWbMAAAaDAb6+viqnaiwtLQ1du3ZttCWGWiRJQk1NDURRRG1t\nbasXC7a1M2fOYODAgTCZTNDr9bj99tuxe/fuax7vstJubo8SLRSR1uXn5+O7775DYmKi2lEgSRJS\nUlKQnJyM5ORkTWRas2YNUlNTIWhoLwRBELBw4ULMmjULH374odpxkJ+fj6CgIDz77LOYOXMmVq1a\nhdraWrVjNbJz505MmzZN7RiIiIjAggULMGbMGIwaNQp+fn5ISkpSNVNcXByOHj2KsrIy1NTU4MCB\nA/jpp5+uebzLSlvm6d7XraqqCkuXLsXKlSvh4+OjdhzodDps27YNBw4cQEZGBvLy8lTNs2/fPoSG\nhiI+Pl5Tf74++OADbNmyBW+++Sbee+89HDt2TNU8oigiJycHDzzwALZu3QpPT88b3ve+LVitVnz5\n5ZeYMmWK2lFQXl6OPXv2YO/evTh48CCqq6tV/1lXTEwMfvGLX2DBggVYtGgR+vTpA4Ph2j9udFlp\n38geJR2ZKIpYunQp7rrrLkyYMEHtOI34+vpiyJAhOHjwoKo5Tpw4gS+//BLjx4/HihUrcPjwYaSm\npqqaCVCWtwAgODgYEydObHXXy7YWGRmJyMhIJCQkAAAmTZqEnJwcVTM1dODAAfTr1w/BwcFqR0Fa\nWhqio6MRGBgIvV6PiRMnIj09Xe1YmDVrFrZs2YKNGzciICAA3bp1u+axLittLe9RoqUpzW7lypWI\njY3F/Pnz1Y4CACguLkZFRQUAoLa2FocOHULPnj1VzbR8+XLs27cPe/bswWuvvYahQ4filVdeUTVT\nTU0NqqqqAADV1dX46quvEBcXp2qm0NBQdOrUCWfPngUAfPPNN5raamLHjh2YPn262jEAAFFRUcjI\nyIDZbIYsy5r5tSouLgYAXLhwAbt3727x18tlp/xpdY8S+4RWWlqKMWPGYMmSJY4f2Kjl+PHj2L59\nO3r16oWUlBQIgoAnnngCo0aNUi1TUVERnnnmGUiSBEmSMHXqVIwePVq1PFp1+fJlLF68GIIgwGaz\n4c4778SIESPUjoXnn38eTz75JERRRHR0NF588UW1IwFQBoC0tDT87ne/UzsKACAxMRGTJk1CSkoK\nDAYD+vbti3vvvVftWFiyZAnKyspgMBjwm9/8Bn5+196BkHuPEBG5EV4RSUTkRljaRERuhKVNRORG\nWNpERG6EpU1E5EZY2kREboSlTUTkRljaRERu5P8D+7Wym3BFpegAAAAASUVORK5CYII=\n",
- "text/plain": [
- "\u003cmatplotlib.figure.Figure at 0x7f5be4b8ec50\u003e"
- ]
- },
- "metadata": {
- "tags": []
- },
- "output_type": "display_data"
- }
- ],
- "source": [
- "model = Model()\n",
- "\n",
- "# Collect the history of W-values and b-values to plot later\n",
- "Ws, bs = [], []\n",
- "epochs = range(10)\n",
- "for epoch in epochs:\n",
- " Ws.append(model.W.numpy())\n",
- " bs.append(model.b.numpy())\n",
- " current_loss = loss(model(inputs), outputs)\n",
- "\n",
- " train(model, inputs, outputs, learning_rate=0.1)\n",
- " print('Epoch %2d: W=%1.2f b=%1.2f, loss=%2.5f' %\n",
- " (epoch, Ws[-1], bs[-1], current_loss))\n",
- "\n",
- "# Let's plot it all\n",
- "plt.plot(epochs, Ws, 'r',\n",
- " epochs, bs, 'b')\n",
- "plt.plot([TRUE_W] * len(epochs), 'r--',\n",
- " [TRUE_b] * len(epochs), 'b--')\n",
- "plt.legend(['W', 'b', 'true W', 'true_b'])\n",
- "plt.show()\n",
- " "
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "colab_type": "text",
- "id": "vPnIVuaSJwWz"
- },
- "source": [
- "## Next Steps\n",
- "\n",
- "In this tutorial we covered `Variable`s and built and trained a simple linear model using the TensorFlow primitives discussed so far.\n",
- "\n",
- "In theory, this is pretty much all you need to use TensorFlow for your machine learning research.\n",
- "In practice, particularly for neural networks, the higher level APIs like `tf.keras` will be much more convenient since it provides higher level building blocks (called \"layers\"), utilities to save and restore state, a suite of loss functions, a suite of optimization strategies etc. \n",
- "\n",
- "The [next tutorial](TODO) will cover these higher level APIs."
- ]
- }
- ],
- "metadata": {
- "colab": {
- "collapsed_sections": [],
- "default_view": {},
- "name": "Training Models",
- "provenance": [],
- "version": "0.3.2",
- "views": {}
- }
- },
- "nbformat": 4,
- "nbformat_minor": 0
-}
diff --git a/tensorflow/contrib/eager/python/examples/notebooks/4_high_level.ipynb b/tensorflow/contrib/eager/python/examples/notebooks/4_high_level.ipynb
deleted file mode 100644
index 5749f22ac5..0000000000
--- a/tensorflow/contrib/eager/python/examples/notebooks/4_high_level.ipynb
+++ /dev/null
@@ -1,551 +0,0 @@
-{
- "cells": [
- {
- "cell_type": "code",
- "execution_count": 0,
- "metadata": {
- "colab": {
- "autoexec": {
- "startup": false,
- "wait_interval": 0
- }
- },
- "colab_type": "code",
- "id": "pwX7Fii1rwsJ"
- },
- "outputs": [],
- "source": [
- "import tensorflow as tf\n",
- "tf.enable_eager_execution()\n",
- "tfe = tf.contrib.eager\n"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "colab_type": "text",
- "id": "UEu3q4jmpKVT"
- },
- "source": [
- "# High level API\n",
- "\n",
- "We recommend using `tf.keras` as a high-level API for building neural networks. That said, most TensorFlow APIs are usable with eager execution.\n",
- "\n"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "colab_type": "text",
- "id": "zSFfVVjkrrsI"
- },
- "source": [
- "## Layers: common sets of useful operations\n",
- "\n",
- "Most of the time when writing code for machine learning models you want to operate at a higher level of abstraction than individual operations and manipulation of individual variables.\n",
- "\n",
- "Many machine learning models are expressible as the composition and stacking of relatively simple layers, and TensorFlow provides both a set of many common layers as a well as easy ways for you to write your own application-specific layers either from scratch or as the composition of existing layers.\n",
- "\n",
- "TensorFlow includes the full [Keras](https://keras.io) API in the tf.keras package, and the Keras layers are very useful when building your own models.\n"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 0,
- "metadata": {
- "colab": {
- "autoexec": {
- "startup": false,
- "wait_interval": 0
- }
- },
- "colab_type": "code",
- "id": "8PyXlPl-4TzQ"
- },
- "outputs": [],
- "source": [
- "# In the tf.keras.layers package, layers are objects. To construct a layer,\n",
- "# simply construct the object. Most layers take as a first argument the number\n",
- "# of output dimensions / channels.\n",
- "layer = tf.keras.layers.Dense(100)\n",
- "# The number of input dimensions is often unnecessary, as it can be inferred\n",
- "# the first time the layer is used, but it can be provided if you want to \n",
- "# specify it manually, which is useful in some complex models.\n",
- "layer = tf.keras.layers.Dense(10, input_shape=(None, 5))"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "colab_type": "text",
- "id": "Fn69xxPO5Psr"
- },
- "source": [
- "The full list of pre-existing layers can be seen in [the documentation](https://www.tensorflow.org/api_docs/python/tf/keras/layers). It includes Dense (a fully-connected layer),\n",
- "Conv2D, LSTM, BatchNormalization, Dropout, and many others."
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 3,
- "metadata": {
- "colab": {
- "autoexec": {
- "startup": false,
- "wait_interval": 0
- },
- "height": 204
- },
- "colab_type": "code",
- "executionInfo": {
- "elapsed": 244,
- "status": "ok",
- "timestamp": 1527783641557,
- "user": {
- "displayName": "",
- "photoUrl": "",
- "userId": ""
- },
- "user_tz": 420
- },
- "id": "E3XKNknP5Mhb",
- "outputId": "c5d52434-d980-4488-efa7-5660819d0207"
- },
- "outputs": [
- {
- "data": {
- "text/plain": [
- "\u003ctf.Tensor: id=30, shape=(10, 10), dtype=float32, numpy=\n",
- "array([[ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],\n",
- " [ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],\n",
- " [ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],\n",
- " [ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],\n",
- " [ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],\n",
- " [ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],\n",
- " [ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],\n",
- " [ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],\n",
- " [ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],\n",
- " [ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]], dtype=float32)\u003e"
- ]
- },
- "execution_count": 3,
- "metadata": {
- "tags": []
- },
- "output_type": "execute_result"
- }
- ],
- "source": [
- "# To use a layer, simply call it.\n",
- "layer(tf.zeros([10, 5]))"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 4,
- "metadata": {
- "colab": {
- "autoexec": {
- "startup": false,
- "wait_interval": 0
- },
- "height": 221
- },
- "colab_type": "code",
- "executionInfo": {
- "elapsed": 320,
- "status": "ok",
- "timestamp": 1527783642457,
- "user": {
- "displayName": "",
- "photoUrl": "",
- "userId": ""
- },
- "user_tz": 420
- },
- "id": "Wt_Nsv-L5t2s",
- "outputId": "f0d96dce-0128-4080-bfe2-0ee6fbc0ad90"
- },
- "outputs": [
- {
- "data": {
- "text/plain": [
- "[\u003ctf.Variable 'dense_1/kernel:0' shape=(5, 10) dtype=float32, numpy=\n",
- " array([[ 0.43788117, -0.62099844, -0.30525017, -0.59352523, 0.1783089 ,\n",
- " 0.47078604, -0.23620895, -0.30482283, 0.01366901, -0.1288507 ],\n",
- " [ 0.18407935, -0.56550485, 0.54180616, -0.42254075, 0.3702994 ,\n",
- " 0.36705834, -0.29678228, 0.36660975, 0.36717761, 0.46269661],\n",
- " [ 0.1709305 , -0.11529458, 0.32710236, 0.46300393, -0.62802851,\n",
- " 0.51641601, 0.39624029, 0.26918125, -0.25196898, 0.21353298],\n",
- " [ 0.35752094, 0.44161648, 0.61500639, -0.12653333, 0.41629118,\n",
- " 0.36193585, 0.066082 , -0.59253877, 0.47318751, 0.17115968],\n",
- " [-0.22554061, -0.17727301, 0.5525015 , 0.3678053 , -0.00454676,\n",
- " 0.24066836, -0.53640735, 0.13792562, -0.10727292, 0.59708995]], dtype=float32)\u003e,\n",
- " \u003ctf.Variable 'dense_1/bias:0' shape=(10,) dtype=float32, numpy=array([ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.], dtype=float32)\u003e]"
- ]
- },
- "execution_count": 4,
- "metadata": {
- "tags": []
- },
- "output_type": "execute_result"
- }
- ],
- "source": [
- "# Layers have many useful methods. For example, you can inspect all variables\n",
- "# in a layer by calling layer.variables. In this case a fully-connected layer\n",
- "# will have variables for weights and biases.\n",
- "layer.variables"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 5,
- "metadata": {
- "colab": {
- "autoexec": {
- "startup": false,
- "wait_interval": 0
- },
- "height": 221
- },
- "colab_type": "code",
- "executionInfo": {
- "elapsed": 226,
- "status": "ok",
- "timestamp": 1527783643252,
- "user": {
- "displayName": "",
- "photoUrl": "",
- "userId": ""
- },
- "user_tz": 420
- },
- "id": "6ilvKjz8_4MQ",
- "outputId": "f647fced-c2d7-41a3-c237-242036784665"
- },
- "outputs": [
- {
- "data": {
- "text/plain": [
- "(\u003ctf.Variable 'dense_1/kernel:0' shape=(5, 10) dtype=float32, numpy=\n",
- " array([[ 0.43788117, -0.62099844, -0.30525017, -0.59352523, 0.1783089 ,\n",
- " 0.47078604, -0.23620895, -0.30482283, 0.01366901, -0.1288507 ],\n",
- " [ 0.18407935, -0.56550485, 0.54180616, -0.42254075, 0.3702994 ,\n",
- " 0.36705834, -0.29678228, 0.36660975, 0.36717761, 0.46269661],\n",
- " [ 0.1709305 , -0.11529458, 0.32710236, 0.46300393, -0.62802851,\n",
- " 0.51641601, 0.39624029, 0.26918125, -0.25196898, 0.21353298],\n",
- " [ 0.35752094, 0.44161648, 0.61500639, -0.12653333, 0.41629118,\n",
- " 0.36193585, 0.066082 , -0.59253877, 0.47318751, 0.17115968],\n",
- " [-0.22554061, -0.17727301, 0.5525015 , 0.3678053 , -0.00454676,\n",
- " 0.24066836, -0.53640735, 0.13792562, -0.10727292, 0.59708995]], dtype=float32)\u003e,\n",
- " \u003ctf.Variable 'dense_1/bias:0' shape=(10,) dtype=float32, numpy=array([ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.], dtype=float32)\u003e)"
- ]
- },
- "execution_count": 5,
- "metadata": {
- "tags": []
- },
- "output_type": "execute_result"
- }
- ],
- "source": [
- "# The variables are also accessible through nice accessors\n",
- "layer.kernel, layer.bias"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "colab_type": "text",
- "id": "O0kDbE54-5VS"
- },
- "source": [
- "## Implementing custom layers\n",
- "The best way to implement your own layer is extending the tf.keras.Layer class and implementing:\n",
- " * `__init__` , where you can do all input-independent initialization\n",
- " * `build`, where you know the shapes of the input tensors and can do the rest of the initialization\n",
- " * `call`, where you do the forward computation\n",
- "\n",
- "Note that you don't have to wait until `build` is called to create your variables, you can also create them in `__init__`. However, the advantage of creating them in `build` is that it enables late variable creation based on the shape of the inputs the layer will operate on. On the other hand, creating variables in `__init__` would mean that shapes required to create the variables will need to be explicitly specified."
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 7,
- "metadata": {
- "colab": {
- "autoexec": {
- "startup": false,
- "wait_interval": 0
- },
- "height": 391
- },
- "colab_type": "code",
- "executionInfo": {
- "elapsed": 251,
- "status": "ok",
- "timestamp": 1527783661512,
- "user": {
- "displayName": "",
- "photoUrl": "",
- "userId": ""
- },
- "user_tz": 420
- },
- "id": "5Byl3n1k5kIy",
- "outputId": "6e7f9285-649a-4132-82ce-73ea92f15862"
- },
- "outputs": [
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "tf.Tensor(\n",
- "[[ 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]\n",
- " [ 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]\n",
- " [ 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]\n",
- " [ 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]\n",
- " [ 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]\n",
- " [ 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]\n",
- " [ 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]\n",
- " [ 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]\n",
- " [ 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]\n",
- " [ 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]], shape=(10, 10), dtype=float32)\n",
- "[\u003ctf.Variable 'my_dense_layer_1/kernel:0' shape=(5, 10) dtype=float32, numpy=\n",
- "array([[-0.4011991 , 0.22458655, -0.33237562, -0.25117266, 0.33528614,\n",
- " -0.01392961, 0.58580834, -0.16346583, 0.28465688, -0.47191954],\n",
- " [-0.52922136, 0.22416979, -0.58209574, -0.60914612, 0.05226624,\n",
- " -0.18325993, 0.5591442 , -0.24718609, 0.37148207, 0.40475875],\n",
- " [ 0.16912812, -0.47618777, -0.38989353, 0.30105609, -0.08085585,\n",
- " 0.44758242, 0.545829 , 0.51421839, 0.11063248, 0.20159996],\n",
- " [ 0.34073615, -0.59835428, 0.06498981, -0.44489855, -0.34302285,\n",
- " 0.20969599, 0.35527444, -0.03173476, -0.22227573, 0.09303057],\n",
- " [ 0.41764337, -0.06435019, -0.52509922, -0.39957345, 0.56811184,\n",
- " 0.23481232, -0.61666459, 0.31144124, -0.11532354, -0.42421889]], dtype=float32)\u003e]\n"
- ]
- }
- ],
- "source": [
- "class MyDenseLayer(tf.keras.layers.Layer):\n",
- " def __init__(self, num_outputs):\n",
- " super(MyDenseLayer, self).__init__()\n",
- " self.num_outputs = num_outputs\n",
- " \n",
- " def build(self, input_shape):\n",
- " self.kernel = self.add_variable(\"kernel\", \n",
- " shape=[input_shape[-1].value, \n",
- " self.num_outputs])\n",
- " \n",
- " def call(self, input):\n",
- " return tf.matmul(input, self.kernel)\n",
- " \n",
- "layer = MyDenseLayer(10)\n",
- "print(layer(tf.zeros([10, 5])))\n",
- "print(layer.variables)"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "colab_type": "text",
- "id": "tk8E2vY0-z4Z"
- },
- "source": [
- "Note that you don't have to wait until `build` is called to create your variables, you can also create them in `__init__`.\n",
- "\n",
- "Overall code is easier to read and maintain if it uses standard layers whenever possible, as other readers will be familiar with the behavior of standard layers. If you want to use a layer which is not present in tf.keras.layers or tf.contrib.layers, consider filing a [github issue](http://github.com/tensorflow/tensorflow/issues/new) or, even better, sending us a pull request!"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "colab_type": "text",
- "id": "Qhg4KlbKrs3G"
- },
- "source": [
- "## Models: composing layers\n",
- "\n",
- "Many interesting layer-like things in machine learning models are implemented by composing existing layers. For example, each residual block in a resnet is a composition of convolutions, batch normalizations, and a shortcut.\n",
- "\n",
- "The main class used when creating a layer-like thing which contains other layers is tf.keras.Model. Implementing one is done by inheriting from tf.keras.Model."
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 9,
- "metadata": {
- "colab": {
- "autoexec": {
- "startup": false,
- "wait_interval": 0
- },
- "height": 190
- },
- "colab_type": "code",
- "executionInfo": {
- "elapsed": 420,
- "status": "ok",
- "timestamp": 1527783698512,
- "user": {
- "displayName": "",
- "photoUrl": "",
- "userId": ""
- },
- "user_tz": 420
- },
- "id": "N30DTXiRASlb",
- "outputId": "a8b23a8e-5cf9-4bbf-f93b-6c763d74e2b3"
- },
- "outputs": [
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "tf.Tensor(\n",
- "[[[[ 0. 0. 0.]\n",
- " [ 0. 0. 0.]\n",
- " [ 0. 0. 0.]]\n",
- "\n",
- " [[ 0. 0. 0.]\n",
- " [ 0. 0. 0.]\n",
- " [ 0. 0. 0.]]]], shape=(1, 2, 3, 3), dtype=float32)\n",
- "['resnet_identity_block_1/conv2d_3/kernel:0', 'resnet_identity_block_1/conv2d_3/bias:0', 'resnet_identity_block_1/batch_normalization_3/gamma:0', 'resnet_identity_block_1/batch_normalization_3/beta:0', 'resnet_identity_block_1/conv2d_4/kernel:0', 'resnet_identity_block_1/conv2d_4/bias:0', 'resnet_identity_block_1/batch_normalization_4/gamma:0', 'resnet_identity_block_1/batch_normalization_4/beta:0', 'resnet_identity_block_1/conv2d_5/kernel:0', 'resnet_identity_block_1/conv2d_5/bias:0', 'resnet_identity_block_1/batch_normalization_5/gamma:0', 'resnet_identity_block_1/batch_normalization_5/beta:0', 'resnet_identity_block_1/batch_normalization_3/moving_mean:0', 'resnet_identity_block_1/batch_normalization_3/moving_variance:0', 'resnet_identity_block_1/batch_normalization_4/moving_mean:0', 'resnet_identity_block_1/batch_normalization_4/moving_variance:0', 'resnet_identity_block_1/batch_normalization_5/moving_mean:0', 'resnet_identity_block_1/batch_normalization_5/moving_variance:0']\n"
- ]
- }
- ],
- "source": [
- "class ResnetIdentityBlock(tf.keras.Model):\n",
- " def __init__(self, kernel_size, filters):\n",
- " super(ResnetIdentityBlock, self).__init__(name='')\n",
- " filters1, filters2, filters3 = filters\n",
- "\n",
- " self.conv2a = tf.keras.layers.Conv2D(filters1, (1, 1))\n",
- " self.bn2a = tf.keras.layers.BatchNormalization()\n",
- "\n",
- " self.conv2b = tf.keras.layers.Conv2D(filters2, kernel_size, padding='same')\n",
- " self.bn2b = tf.keras.layers.BatchNormalization()\n",
- "\n",
- " self.conv2c = tf.keras.layers.Conv2D(filters3, (1, 1))\n",
- " self.bn2c = tf.keras.layers.BatchNormalization()\n",
- "\n",
- " def call(self, input_tensor, training=False):\n",
- " x = self.conv2a(input_tensor)\n",
- " x = self.bn2a(x, training=training)\n",
- " x = tf.nn.relu(x)\n",
- "\n",
- " x = self.conv2b(x)\n",
- " x = self.bn2b(x, training=training)\n",
- " x = tf.nn.relu(x)\n",
- "\n",
- " x = self.conv2c(x)\n",
- " x = self.bn2c(x, training=training)\n",
- "\n",
- " x += input_tensor\n",
- " return tf.nn.relu(x)\n",
- "\n",
- " \n",
- "block = ResnetIdentityBlock(1, [1, 2, 3])\n",
- "print(block(tf.zeros([1, 2, 3, 3])))\n",
- "print([x.name for x in block.variables])"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "colab_type": "text",
- "id": "wYfucVw65PMj"
- },
- "source": [
- "Much of the time, however, models which compose many layers simply call one layer after the other. This can be done in very little code using tf.keras.Sequential"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 0,
- "metadata": {
- "colab": {
- "autoexec": {
- "startup": false,
- "wait_interval": 0
- },
- "base_uri": "https://localhost:8080/",
- "height": 153
- },
- "colab_type": "code",
- "executionInfo": {
- "elapsed": 361,
- "status": "ok",
- "timestamp": 1526674830777,
- "user": {
- "displayName": "Alexandre Passos",
- "photoUrl": "//lh4.googleusercontent.com/-kmTTWXEgAPw/AAAAAAAAAAI/AAAAAAAAAC0/q_DoOzKGwds/s50-c-k-no/photo.jpg",
- "userId": "108023195365833072773"
- },
- "user_tz": 420
- },
- "id": "L9frk7Ur4uvJ",
- "outputId": "882e9076-b6d9-4380-bb1e-7c6b57d54c39"
- },
- "outputs": [
- {
- "data": {
- "text/plain": [
- "\u003ctf.Tensor: id=1423, shape=(1, 2, 3, 3), dtype=float32, numpy=\n",
- "array([[[[0., 0., 0.],\n",
- " [0., 0., 0.],\n",
- " [0., 0., 0.]],\n",
- "\n",
- " [[0., 0., 0.],\n",
- " [0., 0., 0.],\n",
- " [0., 0., 0.]]]], dtype=float32)\u003e"
- ]
- },
- "execution_count": 26,
- "metadata": {
- "tags": []
- },
- "output_type": "execute_result"
- }
- ],
- "source": [
- " my_seq = tf.keras.Sequential([tf.keras.layers.Conv2D(1, (1, 1)),\n",
- " tf.keras.layers.BatchNormalization(),\n",
- " tf.keras.layers.Conv2D(2, 1, \n",
- " padding='same'),\n",
- " tf.keras.layers.BatchNormalization(),\n",
- " tf.keras.layers.Conv2D(3, (1, 1)),\n",
- " tf.keras.layers.BatchNormalization()])\n",
- "my_seq(tf.zeros([1, 2, 3, 3]))"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "colab_type": "text",
- "id": "c5YwYcnuK-wc"
- },
- "source": [
- "# Next steps\n",
- "\n",
- "Now you can go back to the previous notebook and adapt the linear regression example to use layers and models to be better structured."
- ]
- }
- ],
- "metadata": {
- "colab": {
- "collapsed_sections": [],
- "default_view": {},
- "name": "4 - High level API - TensorFlow Eager.ipynb",
- "provenance": [],
- "version": "0.3.2",
- "views": {}
- },
- "kernelspec": {
- "display_name": "Python 3",
- "name": "python3"
- }
- },
- "nbformat": 4,
- "nbformat_minor": 0
-}
diff --git a/tensorflow/contrib/eager/python/examples/notebooks/README.md b/tensorflow/contrib/eager/python/examples/notebooks/README.md
new file mode 100644
index 0000000000..0d5ed84894
--- /dev/null
+++ b/tensorflow/contrib/eager/python/examples/notebooks/README.md
@@ -0,0 +1,11 @@
+## Research and experimentation
+
+Eager execution provides an imperative, define-by-run interface for advanced
+operations. Write custom layers, forward passes, and training loops with auto
+differentiation. Start with these notebooks, then read the
+[eager execution guide](https://www.tensorflow.org/guide/eager).
+
+1. [Eager execution basics](./eager_basics.ipynb)
+2. [Automatic differentiation and gradient tapes](./automatic_differentiation.ipynb)
+3. [Custom training: basics](./custom_training.ipynb)
+4. [Custom layers](./custom_layers.ipynb)
diff --git a/tensorflow/contrib/eager/python/examples/notebooks/automatic_differentiation.ipynb b/tensorflow/contrib/eager/python/examples/notebooks/automatic_differentiation.ipynb
new file mode 100644
index 0000000000..7c0f9b5b81
--- /dev/null
+++ b/tensorflow/contrib/eager/python/examples/notebooks/automatic_differentiation.ipynb
@@ -0,0 +1,364 @@
+{
+ "nbformat": 4,
+ "nbformat_minor": 0,
+ "metadata": {
+ "colab": {
+ "name": "automatic_differentiation.ipynb",
+ "version": "0.3.2",
+ "views": {},
+ "default_view": {},
+ "provenance": [],
+ "private_outputs": true,
+ "collapsed_sections": [],
+ "toc_visible": true
+ },
+ "kernelspec": {
+ "name": "python3",
+ "display_name": "Python 3"
+ }
+ },
+ "cells": [
+ {
+ "metadata": {
+ "id": "t09eeeR5prIJ",
+ "colab_type": "text"
+ },
+ "cell_type": "markdown",
+ "source": [
+ "##### Copyright 2018 The TensorFlow Authors."
+ ]
+ },
+ {
+ "metadata": {
+ "id": "GCCk8_dHpuNf",
+ "colab_type": "code",
+ "colab": {
+ "autoexec": {
+ "startup": false,
+ "wait_interval": 0
+ }
+ },
+ "cellView": "form"
+ },
+ "cell_type": "code",
+ "source": [
+ "#@title Licensed under the Apache License, Version 2.0 (the \"License\");\n",
+ "# you may not use this file except in compliance with the License.\n",
+ "# You may obtain a copy of the License at\n",
+ "#\n",
+ "# https://www.apache.org/licenses/LICENSE-2.0\n",
+ "#\n",
+ "# Unless required by applicable law or agreed to in writing, software\n",
+ "# distributed under the License is distributed on an \"AS IS\" BASIS,\n",
+ "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n",
+ "# See the License for the specific language governing permissions and\n",
+ "# limitations under the License."
+ ],
+ "execution_count": 0,
+ "outputs": []
+ },
+ {
+ "metadata": {
+ "id": "xh8WkEwWpnm7",
+ "colab_type": "text"
+ },
+ "cell_type": "markdown",
+ "source": [
+ "# Automatic differentiation and gradient tape"
+ ]
+ },
+ {
+ "metadata": {
+ "id": "idv0bPeCp325",
+ "colab_type": "text"
+ },
+ "cell_type": "markdown",
+ "source": [
+ "<table class=\"tfo-notebook-buttons\" align=\"left\"><td>\n",
+ "<a target=\"_blank\" href=\"https://colab.research.google.com/github/tensorflow/tensorflow/blob/master/tensorflow/contrib/eager/python/examples/notebooks/automatic_differentiation.ipynb\">\n",
+ " <img src=\"https://www.tensorflow.org/images/colab_logo_32px.png\" />Run in Google Colab</a>\n",
+ "</td><td>\n",
+ "<a target=\"_blank\" href=\"https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/eager/python/examples/notebooks/automatic_differentiation.ipynb\"><img width=32px src=\"https://www.tensorflow.org/images/GitHub-Mark-32px.png\" />View source on GitHub</a></td></table>"
+ ]
+ },
+ {
+ "metadata": {
+ "id": "vDJ4XzMqodTy",
+ "colab_type": "text"
+ },
+ "cell_type": "markdown",
+ "source": [
+ "In the previous tutorial we introduced `Tensor`s and operations on them. In this tutorial we will cover [automatic differentiation](https://en.wikipedia.org/wiki/Automatic_differentiation), a key technique for optimizing machine learning models."
+ ]
+ },
+ {
+ "metadata": {
+ "id": "GQJysDM__Qb0",
+ "colab_type": "text"
+ },
+ "cell_type": "markdown",
+ "source": [
+ "## Setup\n"
+ ]
+ },
+ {
+ "metadata": {
+ "id": "OiMPZStlibBv",
+ "colab_type": "code",
+ "colab": {
+ "autoexec": {
+ "startup": false,
+ "wait_interval": 0
+ }
+ }
+ },
+ "cell_type": "code",
+ "source": [
+ "import tensorflow as tf\n",
+ "tf.enable_eager_execution()\n",
+ "\n",
+ "tfe = tf.contrib.eager # Shorthand for some symbols"
+ ],
+ "execution_count": 0,
+ "outputs": []
+ },
+ {
+ "metadata": {
+ "id": "1CLWJl0QliB0",
+ "colab_type": "text"
+ },
+ "cell_type": "markdown",
+ "source": [
+ "## Derivatives of a function\n",
+ "\n",
+ "TensorFlow provides APIs for automatic differentiation - computing the derivative of a function. The way that more closely mimics the math is to encapsulate the computation in a Python function, say `f`, and use `tfe.gradients_function` to create a function that computes the derivatives of `f` with respect to its arguments. If you're familiar with [autograd](https://github.com/HIPS/autograd) for differentiating numpy functions, this will be familiar. For example: "
+ ]
+ },
+ {
+ "metadata": {
+ "id": "9FViq92UX7P8",
+ "colab_type": "code",
+ "colab": {
+ "autoexec": {
+ "startup": false,
+ "wait_interval": 0
+ }
+ }
+ },
+ "cell_type": "code",
+ "source": [
+ "from math import pi\n",
+ "\n",
+ "def f(x):\n",
+ " return tf.square(tf.sin(x))\n",
+ "\n",
+ "assert f(pi/2).numpy() == 1.0\n",
+ "\n",
+ "\n",
+ "# grad_f will return a list of derivatives of f\n",
+ "# with respect to its arguments. Since f() has a single argument,\n",
+ "# grad_f will return a list with a single element.\n",
+ "grad_f = tfe.gradients_function(f)\n",
+ "assert tf.abs(grad_f(pi/2)[0]).numpy() < 1e-7"
+ ],
+ "execution_count": 0,
+ "outputs": []
+ },
+ {
+ "metadata": {
+ "id": "v9fPs8RyopCf",
+ "colab_type": "text"
+ },
+ "cell_type": "markdown",
+ "source": [
+ "### Higher-order gradients\n",
+ "\n",
+ "The same API can be used to differentiate as many times as you like:\n"
+ ]
+ },
+ {
+ "metadata": {
+ "id": "3D0ZvnGYo0rW",
+ "colab_type": "code",
+ "colab": {
+ "autoexec": {
+ "startup": false,
+ "wait_interval": 0
+ }
+ }
+ },
+ "cell_type": "code",
+ "source": [
+ "def f(x):\n",
+ " return tf.square(tf.sin(x))\n",
+ "\n",
+ "def grad(f):\n",
+ " return lambda x: tfe.gradients_function(f)(x)[0]\n",
+ "\n",
+ "x = tf.lin_space(-2*pi, 2*pi, 100) # 100 points between -2π and +2π\n",
+ "\n",
+ "import matplotlib.pyplot as plt\n",
+ "\n",
+ "plt.plot(x, f(x), label=\"f\")\n",
+ "plt.plot(x, grad(f)(x), label=\"first derivative\")\n",
+ "plt.plot(x, grad(grad(f))(x), label=\"second derivative\")\n",
+ "plt.plot(x, grad(grad(grad(f)))(x), label=\"third derivative\")\n",
+ "plt.legend()\n",
+ "plt.show()"
+ ],
+ "execution_count": 0,
+ "outputs": []
+ },
+ {
+ "metadata": {
+ "id": "-39gouo7mtgu",
+ "colab_type": "text"
+ },
+ "cell_type": "markdown",
+ "source": [
+ "## Gradient tapes\n",
+ "\n",
+ "Every differentiable TensorFlow operation has an associated gradient function. For example, the gradient function of `tf.square(x)` would be a function that returns `2.0 * x`. To compute the gradient of a user-defined function (like `f(x)` in the example above), TensorFlow first \"records\" all the operations applied to compute the output of the function. We call this record a \"tape\". It then uses that tape and the gradients functions associated with each primitive operation to compute the gradients of the user-defined function using [reverse mode differentiation](https://en.wikipedia.org/wiki/Automatic_differentiation).\n",
+ "\n",
+ "Since operations are recorded as they are executed, Python control flow (using `if`s and `while`s for example) is naturally handled:\n",
+ "\n"
+ ]
+ },
+ {
+ "metadata": {
+ "id": "MH0UfjympWf7",
+ "colab_type": "code",
+ "colab": {
+ "autoexec": {
+ "startup": false,
+ "wait_interval": 0
+ }
+ }
+ },
+ "cell_type": "code",
+ "source": [
+ "def f(x, y):\n",
+ " output = 1\n",
+ " for i in range(y):\n",
+ " output = tf.multiply(output, x)\n",
+ " return output\n",
+ "\n",
+ "def g(x, y):\n",
+ " # Return the gradient of `f` with respect to it's first parameter\n",
+ " return tfe.gradients_function(f)(x, y)[0]\n",
+ "\n",
+ "assert f(3.0, 2).numpy() == 9.0 # f(x, 2) is essentially x * x\n",
+ "assert g(3.0, 2).numpy() == 6.0 # And its gradient will be 2 * x\n",
+ "assert f(4.0, 3).numpy() == 64.0 # f(x, 3) is essentially x * x * x\n",
+ "assert g(4.0, 3).numpy() == 48.0 # And its gradient will be 3 * x * x"
+ ],
+ "execution_count": 0,
+ "outputs": []
+ },
+ {
+ "metadata": {
+ "id": "aNmR5-jhpX2t",
+ "colab_type": "text"
+ },
+ "cell_type": "markdown",
+ "source": [
+ "At times it may be inconvenient to encapsulate computation of interest into a function. For example, if you want the gradient of the output with respect to intermediate values computed in the function. In such cases, the slightly more verbose but explicit [tf.GradientTape](https://www.tensorflow.org/api_docs/python/tf/GradientTape) context is useful. All computation inside the context of a `tf.GradientTape` is \"recorded\".\n",
+ "\n",
+ "For example:"
+ ]
+ },
+ {
+ "metadata": {
+ "id": "bAFeIE8EuVIq",
+ "colab_type": "code",
+ "colab": {
+ "autoexec": {
+ "startup": false,
+ "wait_interval": 0
+ }
+ }
+ },
+ "cell_type": "code",
+ "source": [
+ "x = tf.ones((2, 2))\n",
+ " \n",
+ "# TODO(b/78880779): Remove the 'persistent=True' argument and use\n",
+ "# a single t.gradient() call when the bug is resolved.\n",
+ "with tf.GradientTape(persistent=True) as t:\n",
+ " # TODO(ashankar): Explain with \"watch\" argument better?\n",
+ " t.watch(x)\n",
+ " y = tf.reduce_sum(x)\n",
+ " z = tf.multiply(y, y)\n",
+ "\n",
+ "# Use the same tape to compute the derivative of z with respect to the\n",
+ "# intermediate value y.\n",
+ "dz_dy = t.gradient(z, y)\n",
+ "assert dz_dy.numpy() == 8.0\n",
+ "\n",
+ "# Derivative of z with respect to the original input tensor x\n",
+ "dz_dx = t.gradient(z, x)\n",
+ "for i in [0, 1]:\n",
+ " for j in [0, 1]:\n",
+ " assert dz_dx[i][j].numpy() == 8.0"
+ ],
+ "execution_count": 0,
+ "outputs": []
+ },
+ {
+ "metadata": {
+ "id": "DK05KXrAAld3",
+ "colab_type": "text"
+ },
+ "cell_type": "markdown",
+ "source": [
+ "### Higher-order gradients\n",
+ "\n",
+ "Operations inside of the `GradientTape` context manager are recorded for automatic differentiation. If gradients are computed in that context, then the gradient computation is recorded as well. As a result, the exact same API works for higher-order gradients as well. For example:"
+ ]
+ },
+ {
+ "metadata": {
+ "id": "cPQgthZ7ugRJ",
+ "colab_type": "code",
+ "colab": {
+ "autoexec": {
+ "startup": false,
+ "wait_interval": 0
+ }
+ }
+ },
+ "cell_type": "code",
+ "source": [
+ "# TODO(ashankar): Should we use the persistent tape here instead? Follow up on Tom and Alex's discussion\n",
+ "\n",
+ "x = tf.constant(1.0) # Convert the Python 1.0 to a Tensor object\n",
+ "\n",
+ "with tf.GradientTape() as t:\n",
+ " with tf.GradientTape() as t2:\n",
+ " t2.watch(x)\n",
+ " y = x * x * x\n",
+ " # Compute the gradient inside the 't' context manager\n",
+ " # which means the gradient computation is differentiable as well.\n",
+ " dy_dx = t2.gradient(y, x)\n",
+ "d2y_dx2 = t.gradient(dy_dx, x)\n",
+ "\n",
+ "assert dy_dx.numpy() == 3.0\n",
+ "assert d2y_dx2.numpy() == 6.0"
+ ],
+ "execution_count": 0,
+ "outputs": []
+ },
+ {
+ "metadata": {
+ "id": "4U1KKzUpNl58",
+ "colab_type": "text"
+ },
+ "cell_type": "markdown",
+ "source": [
+ "## Next Steps\n",
+ "\n",
+ "In this tutorial we covered gradient computation in TensorFlow. With that we have enough of the primitives required to build an train neural networks, which we will cover in the [next tutorial](https://github.com/tensorflow/models/tree/master/official/contrib/eager/python/examples/notebooks/3_neural_networks.ipynb)."
+ ]
+ }
+ ]
+} \ No newline at end of file
diff --git a/tensorflow/contrib/eager/python/examples/notebooks/custom_layers.ipynb b/tensorflow/contrib/eager/python/examples/notebooks/custom_layers.ipynb
new file mode 100644
index 0000000000..a0bbbb6123
--- /dev/null
+++ b/tensorflow/contrib/eager/python/examples/notebooks/custom_layers.ipynb
@@ -0,0 +1,399 @@
+{
+ "nbformat": 4,
+ "nbformat_minor": 0,
+ "metadata": {
+ "colab": {
+ "name": "custom_layers.ipynb",
+ "version": "0.3.2",
+ "views": {},
+ "default_view": {},
+ "provenance": [],
+ "private_outputs": true,
+ "collapsed_sections": [],
+ "toc_visible": true
+ },
+ "kernelspec": {
+ "display_name": "Python 3",
+ "name": "python3"
+ }
+ },
+ "cells": [
+ {
+ "metadata": {
+ "id": "tDnwEv8FtJm7",
+ "colab_type": "text"
+ },
+ "cell_type": "markdown",
+ "source": [
+ "##### Copyright 2018 The TensorFlow Authors."
+ ]
+ },
+ {
+ "metadata": {
+ "id": "JlknJBWQtKkI",
+ "colab_type": "code",
+ "colab": {
+ "autoexec": {
+ "startup": false,
+ "wait_interval": 0
+ }
+ },
+ "cellView": "form"
+ },
+ "cell_type": "code",
+ "source": [
+ "#@title Licensed under the Apache License, Version 2.0 (the \"License\");\n",
+ "# you may not use this file except in compliance with the License.\n",
+ "# You may obtain a copy of the License at\n",
+ "#\n",
+ "# https://www.apache.org/licenses/LICENSE-2.0\n",
+ "#\n",
+ "# Unless required by applicable law or agreed to in writing, software\n",
+ "# distributed under the License is distributed on an \"AS IS\" BASIS,\n",
+ "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n",
+ "# See the License for the specific language governing permissions and\n",
+ "# limitations under the License."
+ ],
+ "execution_count": 0,
+ "outputs": []
+ },
+ {
+ "metadata": {
+ "id": "60RdWsg1tETW",
+ "colab_type": "text"
+ },
+ "cell_type": "markdown",
+ "source": [
+ "# Custom layers"
+ ]
+ },
+ {
+ "metadata": {
+ "id": "BcJg7Enms86w",
+ "colab_type": "text"
+ },
+ "cell_type": "markdown",
+ "source": [
+ "<table class=\"tfo-notebook-buttons\" align=\"left\"><td>\n",
+ "<a target=\"_blank\" href=\"https://colab.research.google.com/github/tensorflow/tensorflow/blob/master/tensorflow/contrib/eager/python/examples/notebooks/custom_layers.ipynb\">\n",
+ " <img src=\"https://www.tensorflow.org/images/colab_logo_32px.png\" />Run in Google Colab</a>\n",
+ "</td><td>\n",
+ "<a target=\"_blank\" href=\"https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/eager/python/examples/notebooks/custom_layers.ipynb\"><img width=32px src=\"https://www.tensorflow.org/images/GitHub-Mark-32px.png\" />View source on GitHub</a></td></table>"
+ ]
+ },
+ {
+ "metadata": {
+ "id": "UEu3q4jmpKVT",
+ "colab_type": "text"
+ },
+ "cell_type": "markdown",
+ "source": [
+ "We recommend using `tf.keras` as a high-level API for building neural networks. That said, most TensorFlow APIs are usable with eager execution.\n"
+ ]
+ },
+ {
+ "metadata": {
+ "id": "pwX7Fii1rwsJ",
+ "colab_type": "code",
+ "colab": {
+ "autoexec": {
+ "startup": false,
+ "wait_interval": 0
+ }
+ }
+ },
+ "cell_type": "code",
+ "source": [
+ "import tensorflow as tf\n",
+ "tfe = tf.contrib.eager\n",
+ "\n",
+ "tf.enable_eager_execution()"
+ ],
+ "execution_count": 0,
+ "outputs": []
+ },
+ {
+ "metadata": {
+ "id": "zSFfVVjkrrsI",
+ "colab_type": "text"
+ },
+ "cell_type": "markdown",
+ "source": [
+ "## Layers: common sets of useful operations\n",
+ "\n",
+ "Most of the time when writing code for machine learning models you want to operate at a higher level of abstraction than individual operations and manipulation of individual variables.\n",
+ "\n",
+ "Many machine learning models are expressible as the composition and stacking of relatively simple layers, and TensorFlow provides both a set of many common layers as a well as easy ways for you to write your own application-specific layers either from scratch or as the composition of existing layers.\n",
+ "\n",
+ "TensorFlow includes the full [Keras](https://keras.io) API in the tf.keras package, and the Keras layers are very useful when building your own models.\n"
+ ]
+ },
+ {
+ "metadata": {
+ "id": "8PyXlPl-4TzQ",
+ "colab_type": "code",
+ "colab": {
+ "autoexec": {
+ "startup": false,
+ "wait_interval": 0
+ }
+ }
+ },
+ "cell_type": "code",
+ "source": [
+ "# In the tf.keras.layers package, layers are objects. To construct a layer,\n",
+ "# simply construct the object. Most layers take as a first argument the number\n",
+ "# of output dimensions / channels.\n",
+ "layer = tf.keras.layers.Dense(100)\n",
+ "# The number of input dimensions is often unnecessary, as it can be inferred\n",
+ "# the first time the layer is used, but it can be provided if you want to \n",
+ "# specify it manually, which is useful in some complex models.\n",
+ "layer = tf.keras.layers.Dense(10, input_shape=(None, 5))"
+ ],
+ "execution_count": 0,
+ "outputs": []
+ },
+ {
+ "metadata": {
+ "id": "Fn69xxPO5Psr",
+ "colab_type": "text"
+ },
+ "cell_type": "markdown",
+ "source": [
+ "The full list of pre-existing layers can be seen in [the documentation](https://www.tensorflow.org/api_docs/python/tf/keras/layers). It includes Dense (a fully-connected layer),\n",
+ "Conv2D, LSTM, BatchNormalization, Dropout, and many others."
+ ]
+ },
+ {
+ "metadata": {
+ "id": "E3XKNknP5Mhb",
+ "colab_type": "code",
+ "colab": {
+ "autoexec": {
+ "startup": false,
+ "wait_interval": 0
+ }
+ }
+ },
+ "cell_type": "code",
+ "source": [
+ "# To use a layer, simply call it.\n",
+ "layer(tf.zeros([10, 5]))"
+ ],
+ "execution_count": 0,
+ "outputs": []
+ },
+ {
+ "metadata": {
+ "id": "Wt_Nsv-L5t2s",
+ "colab_type": "code",
+ "colab": {
+ "autoexec": {
+ "startup": false,
+ "wait_interval": 0
+ }
+ }
+ },
+ "cell_type": "code",
+ "source": [
+ "# Layers have many useful methods. For example, you can inspect all variables\n",
+ "# in a layer by calling layer.variables. In this case a fully-connected layer\n",
+ "# will have variables for weights and biases.\n",
+ "layer.variables"
+ ],
+ "execution_count": 0,
+ "outputs": []
+ },
+ {
+ "metadata": {
+ "id": "6ilvKjz8_4MQ",
+ "colab_type": "code",
+ "colab": {
+ "autoexec": {
+ "startup": false,
+ "wait_interval": 0
+ }
+ }
+ },
+ "cell_type": "code",
+ "source": [
+ "# The variables are also accessible through nice accessors\n",
+ "layer.kernel, layer.bias"
+ ],
+ "execution_count": 0,
+ "outputs": []
+ },
+ {
+ "metadata": {
+ "id": "O0kDbE54-5VS",
+ "colab_type": "text"
+ },
+ "cell_type": "markdown",
+ "source": [
+ "## Implementing custom layers\n",
+ "The best way to implement your own layer is extending the tf.keras.Layer class and implementing:\n",
+ " * `__init__` , where you can do all input-independent initialization\n",
+ " * `build`, where you know the shapes of the input tensors and can do the rest of the initialization\n",
+ " * `call`, where you do the forward computation\n",
+ "\n",
+ "Note that you don't have to wait until `build` is called to create your variables, you can also create them in `__init__`. However, the advantage of creating them in `build` is that it enables late variable creation based on the shape of the inputs the layer will operate on. On the other hand, creating variables in `__init__` would mean that shapes required to create the variables will need to be explicitly specified."
+ ]
+ },
+ {
+ "metadata": {
+ "id": "5Byl3n1k5kIy",
+ "colab_type": "code",
+ "colab": {
+ "autoexec": {
+ "startup": false,
+ "wait_interval": 0
+ }
+ }
+ },
+ "cell_type": "code",
+ "source": [
+ "class MyDenseLayer(tf.keras.layers.Layer):\n",
+ " def __init__(self, num_outputs):\n",
+ " super(MyDenseLayer, self).__init__()\n",
+ " self.num_outputs = num_outputs\n",
+ " \n",
+ " def build(self, input_shape):\n",
+ " self.kernel = self.add_variable(\"kernel\", \n",
+ " shape=[input_shape[-1].value, \n",
+ " self.num_outputs])\n",
+ " \n",
+ " def call(self, input):\n",
+ " return tf.matmul(input, self.kernel)\n",
+ " \n",
+ "layer = MyDenseLayer(10)\n",
+ "print(layer(tf.zeros([10, 5])))\n",
+ "print(layer.variables)"
+ ],
+ "execution_count": 0,
+ "outputs": []
+ },
+ {
+ "metadata": {
+ "id": "tk8E2vY0-z4Z",
+ "colab_type": "text"
+ },
+ "cell_type": "markdown",
+ "source": [
+ "Note that you don't have to wait until `build` is called to create your variables, you can also create them in `__init__`.\n",
+ "\n",
+ "Overall code is easier to read and maintain if it uses standard layers whenever possible, as other readers will be familiar with the behavior of standard layers. If you want to use a layer which is not present in tf.keras.layers or tf.contrib.layers, consider filing a [github issue](http://github.com/tensorflow/tensorflow/issues/new) or, even better, sending us a pull request!"
+ ]
+ },
+ {
+ "metadata": {
+ "id": "Qhg4KlbKrs3G",
+ "colab_type": "text"
+ },
+ "cell_type": "markdown",
+ "source": [
+ "## Models: composing layers\n",
+ "\n",
+ "Many interesting layer-like things in machine learning models are implemented by composing existing layers. For example, each residual block in a resnet is a composition of convolutions, batch normalizations, and a shortcut.\n",
+ "\n",
+ "The main class used when creating a layer-like thing which contains other layers is tf.keras.Model. Implementing one is done by inheriting from tf.keras.Model."
+ ]
+ },
+ {
+ "metadata": {
+ "id": "N30DTXiRASlb",
+ "colab_type": "code",
+ "colab": {
+ "autoexec": {
+ "startup": false,
+ "wait_interval": 0
+ }
+ }
+ },
+ "cell_type": "code",
+ "source": [
+ "class ResnetIdentityBlock(tf.keras.Model):\n",
+ " def __init__(self, kernel_size, filters):\n",
+ " super(ResnetIdentityBlock, self).__init__(name='')\n",
+ " filters1, filters2, filters3 = filters\n",
+ "\n",
+ " self.conv2a = tf.keras.layers.Conv2D(filters1, (1, 1))\n",
+ " self.bn2a = tf.keras.layers.BatchNormalization()\n",
+ "\n",
+ " self.conv2b = tf.keras.layers.Conv2D(filters2, kernel_size, padding='same')\n",
+ " self.bn2b = tf.keras.layers.BatchNormalization()\n",
+ "\n",
+ " self.conv2c = tf.keras.layers.Conv2D(filters3, (1, 1))\n",
+ " self.bn2c = tf.keras.layers.BatchNormalization()\n",
+ "\n",
+ " def call(self, input_tensor, training=False):\n",
+ " x = self.conv2a(input_tensor)\n",
+ " x = self.bn2a(x, training=training)\n",
+ " x = tf.nn.relu(x)\n",
+ "\n",
+ " x = self.conv2b(x)\n",
+ " x = self.bn2b(x, training=training)\n",
+ " x = tf.nn.relu(x)\n",
+ "\n",
+ " x = self.conv2c(x)\n",
+ " x = self.bn2c(x, training=training)\n",
+ "\n",
+ " x += input_tensor\n",
+ " return tf.nn.relu(x)\n",
+ "\n",
+ " \n",
+ "block = ResnetIdentityBlock(1, [1, 2, 3])\n",
+ "print(block(tf.zeros([1, 2, 3, 3])))\n",
+ "print([x.name for x in block.variables])"
+ ],
+ "execution_count": 0,
+ "outputs": []
+ },
+ {
+ "metadata": {
+ "id": "wYfucVw65PMj",
+ "colab_type": "text"
+ },
+ "cell_type": "markdown",
+ "source": [
+ "Much of the time, however, models which compose many layers simply call one layer after the other. This can be done in very little code using tf.keras.Sequential"
+ ]
+ },
+ {
+ "metadata": {
+ "id": "L9frk7Ur4uvJ",
+ "colab_type": "code",
+ "colab": {
+ "autoexec": {
+ "startup": false,
+ "wait_interval": 0
+ }
+ }
+ },
+ "cell_type": "code",
+ "source": [
+ " my_seq = tf.keras.Sequential([tf.keras.layers.Conv2D(1, (1, 1)),\n",
+ " tf.keras.layers.BatchNormalization(),\n",
+ " tf.keras.layers.Conv2D(2, 1, \n",
+ " padding='same'),\n",
+ " tf.keras.layers.BatchNormalization(),\n",
+ " tf.keras.layers.Conv2D(3, (1, 1)),\n",
+ " tf.keras.layers.BatchNormalization()])\n",
+ "my_seq(tf.zeros([1, 2, 3, 3]))"
+ ],
+ "execution_count": 0,
+ "outputs": []
+ },
+ {
+ "metadata": {
+ "id": "c5YwYcnuK-wc",
+ "colab_type": "text"
+ },
+ "cell_type": "markdown",
+ "source": [
+ "# Next steps\n",
+ "\n",
+ "Now you can go back to the previous notebook and adapt the linear regression example to use layers and models to be better structured."
+ ]
+ }
+ ]
+} \ No newline at end of file
diff --git a/tensorflow/contrib/eager/python/examples/notebooks/custom_training.ipynb b/tensorflow/contrib/eager/python/examples/notebooks/custom_training.ipynb
new file mode 100644
index 0000000000..591e2d0c85
--- /dev/null
+++ b/tensorflow/contrib/eager/python/examples/notebooks/custom_training.ipynb
@@ -0,0 +1,478 @@
+{
+ "nbformat": 4,
+ "nbformat_minor": 0,
+ "metadata": {
+ "colab": {
+ "name": "Custom training: basics",
+ "version": "0.3.2",
+ "views": {},
+ "default_view": {},
+ "provenance": [],
+ "private_outputs": true,
+ "collapsed_sections": [],
+ "toc_visible": true
+ },
+ "kernelspec": {
+ "name": "python3",
+ "display_name": "Python 3"
+ }
+ },
+ "cells": [
+ {
+ "metadata": {
+ "id": "5rmpybwysXGV",
+ "colab_type": "text"
+ },
+ "cell_type": "markdown",
+ "source": [
+ "##### Copyright 2018 The TensorFlow Authors."
+ ]
+ },
+ {
+ "metadata": {
+ "id": "m8y3rGtQsYP2",
+ "colab_type": "code",
+ "colab": {
+ "autoexec": {
+ "startup": false,
+ "wait_interval": 0
+ }
+ },
+ "cellView": "form"
+ },
+ "cell_type": "code",
+ "source": [
+ "#@title Licensed under the Apache License, Version 2.0 (the \"License\");\n",
+ "# you may not use this file except in compliance with the License.\n",
+ "# You may obtain a copy of the License at\n",
+ "#\n",
+ "# https://www.apache.org/licenses/LICENSE-2.0\n",
+ "#\n",
+ "# Unless required by applicable law or agreed to in writing, software\n",
+ "# distributed under the License is distributed on an \"AS IS\" BASIS,\n",
+ "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n",
+ "# See the License for the specific language governing permissions and\n",
+ "# limitations under the License."
+ ],
+ "execution_count": 0,
+ "outputs": []
+ },
+ {
+ "metadata": {
+ "id": "hrXv0rU9sIma",
+ "colab_type": "text"
+ },
+ "cell_type": "markdown",
+ "source": [
+ "# Custom training: basics"
+ ]
+ },
+ {
+ "metadata": {
+ "id": "7S0BwJ_8sLu7",
+ "colab_type": "text"
+ },
+ "cell_type": "markdown",
+ "source": [
+ "<table class=\"tfo-notebook-buttons\" align=\"left\"><td>\n",
+ "<a target=\"_blank\" href=\"https://colab.research.google.com/github/tensorflow/tensorflow/blob/master/tensorflow/contrib/eager/python/examples/notebooks/custom_training.ipynb\">\n",
+ " <img src=\"https://www.tensorflow.org/images/colab_logo_32px.png\" />Run in Google Colab</a>\n",
+ "</td><td>\n",
+ "<a target=\"_blank\" href=\"https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/eager/python/examples/notebooks/custom_training.ipynb\"><img width=32px src=\"https://www.tensorflow.org/images/GitHub-Mark-32px.png\" />View source on GitHub</a></td></table>"
+ ]
+ },
+ {
+ "metadata": {
+ "id": "k2o3TTG4TFpt",
+ "colab_type": "text"
+ },
+ "cell_type": "markdown",
+ "source": [
+ "In the previous tutorial we covered the TensorFlow APIs for automatic differentiation, a basic building block for machine learning.\n",
+ "In this tutorial we will use the TensorFlow primitives introduced in the prior tutorials to do some simple machine learning.\n",
+ "\n",
+ "TensorFlow also includes a higher-level neural networks API (`tf.keras`) which provides useful abstractions to reduce boilerplate. We strongly recommend those higher level APIs for people working with neural networks. However, in this short tutorial we cover neural network training from first principles to establish a strong foundation."
+ ]
+ },
+ {
+ "metadata": {
+ "id": "3LXMVuV0VhDr",
+ "colab_type": "text"
+ },
+ "cell_type": "markdown",
+ "source": [
+ "## Setup"
+ ]
+ },
+ {
+ "metadata": {
+ "id": "PJ64L90aVir3",
+ "colab_type": "code",
+ "colab": {
+ "autoexec": {
+ "startup": false,
+ "wait_interval": 0
+ }
+ }
+ },
+ "cell_type": "code",
+ "source": [
+ "import tensorflow as tf\n",
+ "tfe = tf.contrib.eager # Shorthand for some symbols\n",
+ "\n",
+ "tf.enable_eager_execution()"
+ ],
+ "execution_count": 0,
+ "outputs": []
+ },
+ {
+ "metadata": {
+ "id": "eMAWbDJFVmMk",
+ "colab_type": "text"
+ },
+ "cell_type": "markdown",
+ "source": [
+ "## Variables\n",
+ "\n",
+ "Tensors in TensorFlow are immutable stateless objects. Machine learning models, however, need to have changing state: as your model trains, the same code to compute predictions should behave differently over time (hopefully with a lower loss!). To represent this state which needs to change over the course of your computation, you can choose to rely on the fact that Python is a stateful programming language:\n"
+ ]
+ },
+ {
+ "metadata": {
+ "id": "VkJwtLS_Jbn8",
+ "colab_type": "code",
+ "colab": {
+ "autoexec": {
+ "startup": false,
+ "wait_interval": 0
+ }
+ }
+ },
+ "cell_type": "code",
+ "source": [
+ "# Using python state\n",
+ "x = tf.zeros([10, 10])\n",
+ "x += 2 # This is equivalent to x = x + 2, which does not mutate the original\n",
+ " # value of x\n",
+ "print(x)"
+ ],
+ "execution_count": 0,
+ "outputs": []
+ },
+ {
+ "metadata": {
+ "id": "wfneTXy7JcUz",
+ "colab_type": "text"
+ },
+ "cell_type": "markdown",
+ "source": [
+ "TensorFlow, however, has stateful operations built in, and these are often more pleasant to use than low-level Python representations of your state. To represent weights in a model, for example, it's often convenient and efficient to use TensorFlow variables.\n",
+ "\n",
+ "A Variable is an object which stores a value and, when used in a TensorFlow computation, will implicitly read from this stored value. There are operations (`tf.assign_sub`, `tf.scatter_update`, etc) which manipulate the value stored in a TensorFlow variable."
+ ]
+ },
+ {
+ "metadata": {
+ "id": "itxmrMil6DQi",
+ "colab_type": "code",
+ "colab": {
+ "autoexec": {
+ "startup": false,
+ "wait_interval": 0
+ }
+ }
+ },
+ "cell_type": "code",
+ "source": [
+ "v = tfe.Variable(1.0)\n",
+ "assert v.numpy() == 1.0\n",
+ "\n",
+ "# Re-assign the value\n",
+ "v.assign(3.0)\n",
+ "assert v.numpy() == 3.0\n",
+ "\n",
+ "# Use `v` in a TensorFlow operation like tf.square() and reassign\n",
+ "v.assign(tf.square(v))\n",
+ "assert v.numpy() == 9.0"
+ ],
+ "execution_count": 0,
+ "outputs": []
+ },
+ {
+ "metadata": {
+ "id": "-paSaeq1JzwC",
+ "colab_type": "text"
+ },
+ "cell_type": "markdown",
+ "source": [
+ "Computations using Variables are automatically traced when computing gradients. For Variables representing embeddings TensorFlow will do sparse updates by default, which are more computation and memory efficient.\n",
+ "\n",
+ "Using Variables is also a way to quickly let a reader of your code know that this piece of state is mutable."
+ ]
+ },
+ {
+ "metadata": {
+ "id": "BMiFcDzE7Qu3",
+ "colab_type": "text"
+ },
+ "cell_type": "markdown",
+ "source": [
+ "## Example: Fitting a linear model\n",
+ "\n",
+ "Let's now put the few concepts we have so far ---`Tensor`, `GradientTape`, `Variable` --- to build and train a simple model. This typically involves a few steps:\n",
+ "\n",
+ "1. Define the model.\n",
+ "2. Define a loss function.\n",
+ "3. Obtain training data.\n",
+ "4. Run through the training data and use an \"optimizer\" to adjust the variables to fit the data.\n",
+ "\n",
+ "In this tutorial, we'll walk through a trivial example of a simple linear model: `f(x) = x * W + b`, which has two variables - `W` and `b`. Furthermore, we'll synthesize data such that a well trained model would have `W = 3.0` and `b = 2.0`."
+ ]
+ },
+ {
+ "metadata": {
+ "id": "gFzH64Jn9PIm",
+ "colab_type": "text"
+ },
+ "cell_type": "markdown",
+ "source": [
+ "### Define the model\n",
+ "\n",
+ "Let's define a simple class to encapsulate the variables and the computation."
+ ]
+ },
+ {
+ "metadata": {
+ "id": "_WRu7Pze7wk8",
+ "colab_type": "code",
+ "colab": {
+ "autoexec": {
+ "startup": false,
+ "wait_interval": 0
+ }
+ }
+ },
+ "cell_type": "code",
+ "source": [
+ "class Model(object):\n",
+ " def __init__(self):\n",
+ " # Initialize variable to (5.0, 0.0)\n",
+ " # In practice, these should be initialized to random values.\n",
+ " self.W = tfe.Variable(5.0)\n",
+ " self.b = tfe.Variable(0.0)\n",
+ " \n",
+ " def __call__(self, x):\n",
+ " return self.W * x + self.b\n",
+ " \n",
+ "model = Model()\n",
+ "\n",
+ "assert model(3.0).numpy() == 15.0"
+ ],
+ "execution_count": 0,
+ "outputs": []
+ },
+ {
+ "metadata": {
+ "id": "xa6j_yXa-j79",
+ "colab_type": "text"
+ },
+ "cell_type": "markdown",
+ "source": [
+ "### Define a loss function\n",
+ "\n",
+ "A loss function measures how well the output of a model for a given input matches the desired output. Let's use the standard L2 loss."
+ ]
+ },
+ {
+ "metadata": {
+ "id": "Y0ysUFGY924U",
+ "colab_type": "code",
+ "colab": {
+ "autoexec": {
+ "startup": false,
+ "wait_interval": 0
+ }
+ }
+ },
+ "cell_type": "code",
+ "source": [
+ "def loss(predicted_y, desired_y):\n",
+ " return tf.reduce_mean(tf.square(predicted_y - desired_y))"
+ ],
+ "execution_count": 0,
+ "outputs": []
+ },
+ {
+ "metadata": {
+ "id": "qutT_fkl_CBc",
+ "colab_type": "text"
+ },
+ "cell_type": "markdown",
+ "source": [
+ "### Obtain training data\n",
+ "\n",
+ "Let's synthesize the training data with some noise."
+ ]
+ },
+ {
+ "metadata": {
+ "id": "gxPTb-kt_N5m",
+ "colab_type": "code",
+ "colab": {
+ "autoexec": {
+ "startup": false,
+ "wait_interval": 0
+ }
+ }
+ },
+ "cell_type": "code",
+ "source": [
+ "TRUE_W = 3.0\n",
+ "TRUE_b = 2.0\n",
+ "NUM_EXAMPLES = 1000\n",
+ "\n",
+ "inputs = tf.random_normal(shape=[NUM_EXAMPLES])\n",
+ "noise = tf.random_normal(shape=[NUM_EXAMPLES])\n",
+ "outputs = inputs * TRUE_W + TRUE_b + noise"
+ ],
+ "execution_count": 0,
+ "outputs": []
+ },
+ {
+ "metadata": {
+ "id": "-50nq-wPBsAW",
+ "colab_type": "text"
+ },
+ "cell_type": "markdown",
+ "source": [
+ "Before we train the model let's visualize where the model stands right now. We'll plot the model's predictions in red and the training data in blue."
+ ]
+ },
+ {
+ "metadata": {
+ "id": "_eb83LtrB4nt",
+ "colab_type": "code",
+ "colab": {
+ "autoexec": {
+ "startup": false,
+ "wait_interval": 0
+ }
+ }
+ },
+ "cell_type": "code",
+ "source": [
+ "import matplotlib.pyplot as plt\n",
+ "\n",
+ "plt.scatter(inputs, outputs, c='b')\n",
+ "plt.scatter(inputs, model(inputs), c='r')\n",
+ "plt.show()\n",
+ "\n",
+ "print('Current loss: '),\n",
+ "print(loss(model(inputs), outputs).numpy())"
+ ],
+ "execution_count": 0,
+ "outputs": []
+ },
+ {
+ "metadata": {
+ "id": "sSDP-yeq_4jE",
+ "colab_type": "text"
+ },
+ "cell_type": "markdown",
+ "source": [
+ "### Define a training loop\n",
+ "\n",
+ "We now have our network and our training data. Let's train it, i.e., use the training data to update the model's variables (`W` and `b`) so that the loss goes down using [gradient descent](https://en.wikipedia.org/wiki/Gradient_descent). There are many variants of the gradient descent scheme that are captured in `tf.train.Optimizer` implementations. We'd highly recommend using those implementations, but in the spirit of building from first principles, in this particular example we will implement the basic math ourselves."
+ ]
+ },
+ {
+ "metadata": {
+ "id": "MBIACgdnA55X",
+ "colab_type": "code",
+ "colab": {
+ "autoexec": {
+ "startup": false,
+ "wait_interval": 0
+ }
+ }
+ },
+ "cell_type": "code",
+ "source": [
+ "def train(model, inputs, outputs, learning_rate):\n",
+ " with tf.GradientTape() as t:\n",
+ " current_loss = loss(model(inputs), outputs)\n",
+ " dW, db = t.gradient(current_loss, [model.W, model.b])\n",
+ " model.W.assign_sub(learning_rate * dW)\n",
+ " model.b.assign_sub(learning_rate * db)"
+ ],
+ "execution_count": 0,
+ "outputs": []
+ },
+ {
+ "metadata": {
+ "id": "RwWPaJryD2aN",
+ "colab_type": "text"
+ },
+ "cell_type": "markdown",
+ "source": [
+ "Finally, let's repeatedly run through the training data and see how `W` and `b` evolve."
+ ]
+ },
+ {
+ "metadata": {
+ "id": "XdfkR223D9dW",
+ "colab_type": "code",
+ "colab": {
+ "autoexec": {
+ "startup": false,
+ "wait_interval": 0
+ }
+ }
+ },
+ "cell_type": "code",
+ "source": [
+ "model = Model()\n",
+ "\n",
+ "# Collect the history of W-values and b-values to plot later\n",
+ "Ws, bs = [], []\n",
+ "epochs = range(10)\n",
+ "for epoch in epochs:\n",
+ " Ws.append(model.W.numpy())\n",
+ " bs.append(model.b.numpy())\n",
+ " current_loss = loss(model(inputs), outputs)\n",
+ "\n",
+ " train(model, inputs, outputs, learning_rate=0.1)\n",
+ " print('Epoch %2d: W=%1.2f b=%1.2f, loss=%2.5f' %\n",
+ " (epoch, Ws[-1], bs[-1], current_loss))\n",
+ "\n",
+ "# Let's plot it all\n",
+ "plt.plot(epochs, Ws, 'r',\n",
+ " epochs, bs, 'b')\n",
+ "plt.plot([TRUE_W] * len(epochs), 'r--',\n",
+ " [TRUE_b] * len(epochs), 'b--')\n",
+ "plt.legend(['W', 'b', 'true W', 'true_b'])\n",
+ "plt.show()\n",
+ " "
+ ],
+ "execution_count": 0,
+ "outputs": []
+ },
+ {
+ "metadata": {
+ "id": "vPnIVuaSJwWz",
+ "colab_type": "text"
+ },
+ "cell_type": "markdown",
+ "source": [
+ "## Next Steps\n",
+ "\n",
+ "In this tutorial we covered `Variable`s and built and trained a simple linear model using the TensorFlow primitives discussed so far.\n",
+ "\n",
+ "In theory, this is pretty much all you need to use TensorFlow for your machine learning research.\n",
+ "In practice, particularly for neural networks, the higher level APIs like `tf.keras` will be much more convenient since it provides higher level building blocks (called \"layers\"), utilities to save and restore state, a suite of loss functions, a suite of optimization strategies etc. \n",
+ "\n",
+ "The [next tutorial](TODO) will cover these higher level APIs."
+ ]
+ }
+ ]
+} \ No newline at end of file
diff --git a/tensorflow/contrib/eager/python/examples/notebooks/1_basics.ipynb b/tensorflow/contrib/eager/python/examples/notebooks/eager_basics.ipynb
index 51d10a7784..f1e13de5de 100644
--- a/tensorflow/contrib/eager/python/examples/notebooks/1_basics.ipynb
+++ b/tensorflow/contrib/eager/python/examples/notebooks/eager_basics.ipynb
@@ -1,27 +1,107 @@
{
+ "nbformat": 4,
+ "nbformat_minor": 0,
+ "metadata": {
+ "colab": {
+ "name": "eager_basics.ipynb",
+ "version": "0.3.2",
+ "views": {},
+ "default_view": {},
+ "provenance": [],
+ "private_outputs": true,
+ "collapsed_sections": [],
+ "toc_visible": true
+ },
+ "kernelspec": {
+ "name": "python3",
+ "display_name": "Python 3"
+ }
+ },
"cells": [
{
+ "metadata": {
+ "id": "iPpI7RaYoZuE",
+ "colab_type": "text"
+ },
"cell_type": "markdown",
+ "source": [
+ "##### Copyright 2018 The TensorFlow Authors."
+ ]
+ },
+ {
"metadata": {
- "colab_type": "text",
- "id": "U9i2Dsh-ziXr"
+ "id": "hro2InpHobKk",
+ "colab_type": "code",
+ "colab": {
+ "autoexec": {
+ "startup": false,
+ "wait_interval": 0
+ }
+ },
+ "cellView": "form"
},
+ "cell_type": "code",
+ "source": [
+ "#@title Licensed under the Apache License, Version 2.0 (the \"License\");\n",
+ "# you may not use this file except in compliance with the License.\n",
+ "# You may obtain a copy of the License at\n",
+ "#\n",
+ "# https://www.apache.org/licenses/LICENSE-2.0\n",
+ "#\n",
+ "# Unless required by applicable law or agreed to in writing, software\n",
+ "# distributed under the License is distributed on an \"AS IS\" BASIS,\n",
+ "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n",
+ "# See the License for the specific language governing permissions and\n",
+ "# limitations under the License."
+ ],
+ "execution_count": 0,
+ "outputs": []
+ },
+ {
+ "metadata": {
+ "id": "U9i2Dsh-ziXr",
+ "colab_type": "text"
+ },
+ "cell_type": "markdown",
+ "source": [
+ "# Eager execution basics"
+ ]
+ },
+ {
+ "metadata": {
+ "id": "Hndw-YcxoOJK",
+ "colab_type": "text"
+ },
+ "cell_type": "markdown",
+ "source": [
+ "<table class=\"tfo-notebook-buttons\" align=\"left\"><td>\n",
+ "<a target=\"_blank\" href=\"https://colab.research.google.com/github/tensorflow/tensorflow/blob/master/tensorflow/contrib/eager/python/examples/notebooks/eager_basics.ipynb\">\n",
+ " <img src=\"https://www.tensorflow.org/images/colab_logo_32px.png\" />Run in Google Colab</a>\n",
+ "</td><td>\n",
+ "<a target=\"_blank\" href=\"https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/eager/python/examples/notebooks/eager_basics.ipynb\"><img width=32px src=\"https://www.tensorflow.org/images/GitHub-Mark-32px.png\" />View source on GitHub</a></td></table>"
+ ]
+ },
+ {
+ "metadata": {
+ "id": "6sILUVbHoSgH",
+ "colab_type": "text"
+ },
+ "cell_type": "markdown",
"source": [
- "# An introduction to TensorFlow\n",
- "\n",
"This is an introductory tutorial for using TensorFlow. It will cover:\n",
"\n",
"* Importing required packages\n",
"* Creating and using Tensors\n",
- "* Using GPU acceleration\n"
+ "* Using GPU acceleration\n",
+ "* Datasets"
]
},
{
- "cell_type": "markdown",
"metadata": {
- "colab_type": "text",
- "id": "z1JcS5iBXMRO"
+ "id": "z1JcS5iBXMRO",
+ "colab_type": "text"
},
+ "cell_type": "markdown",
"source": [
"## Import TensorFlow\n",
"\n",
@@ -30,32 +110,32 @@
]
},
{
- "cell_type": "code",
- "execution_count": 0,
"metadata": {
- "cellView": "code",
+ "id": "RlIWhyeLoYnG",
+ "colab_type": "code",
"colab": {
"autoexec": {
"startup": false,
"wait_interval": 0
}
},
- "colab_type": "code",
- "id": "RlIWhyeLoYnG"
+ "cellView": "code"
},
- "outputs": [],
+ "cell_type": "code",
"source": [
"import tensorflow as tf\n",
"\n",
"tf.enable_eager_execution()"
- ]
+ ],
+ "execution_count": 0,
+ "outputs": []
},
{
- "cell_type": "markdown",
"metadata": {
- "colab_type": "text",
- "id": "H9UySOPLXdaw"
+ "id": "H9UySOPLXdaw",
+ "colab_type": "text"
},
+ "cell_type": "markdown",
"source": [
"## Tensors\n",
"\n",
@@ -63,46 +143,18 @@
]
},
{
- "cell_type": "code",
- "execution_count": 0,
"metadata": {
- "cellView": "code",
+ "id": "ngUe237Wt48W",
+ "colab_type": "code",
"colab": {
"autoexec": {
"startup": false,
"wait_interval": 0
- },
- "height": 125
- },
- "colab_type": "code",
- "executionInfo": {
- "elapsed": 320,
- "status": "ok",
- "timestamp": 1526420535530,
- "user": {
- "displayName": "",
- "photoUrl": "",
- "userId": ""
- },
- "user_tz": 420
+ }
},
- "id": "ngUe237Wt48W",
- "outputId": "b1a1cd60-4eb3-443d-cd6b-68406390784e"
+ "cellView": "code"
},
- "outputs": [
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "tf.Tensor(3, shape=(), dtype=int32)\n",
- "tf.Tensor([4 6], shape=(2,), dtype=int32)\n",
- "tf.Tensor(25, shape=(), dtype=int32)\n",
- "tf.Tensor(6, shape=(), dtype=int32)\n",
- "tf.Tensor(aGVsbG8gd29ybGQ, shape=(), dtype=string)\n",
- "tf.Tensor(13, shape=(), dtype=int32)\n"
- ]
- }
- ],
+ "cell_type": "code",
"source": [
"print(tf.add(1, 2))\n",
"print(tf.add([1, 2], [3, 4]))\n",
@@ -112,66 +164,46 @@
"\n",
"# Operator overloading is also supported\n",
"print(tf.square(2) + tf.square(3))"
- ]
+ ],
+ "execution_count": 0,
+ "outputs": []
},
{
- "cell_type": "markdown",
"metadata": {
- "colab_type": "text",
- "id": "IDY4WsYRhP81"
+ "id": "IDY4WsYRhP81",
+ "colab_type": "text"
},
+ "cell_type": "markdown",
"source": [
"Each Tensor has a shape and a datatype"
]
},
{
- "cell_type": "code",
- "execution_count": 0,
"metadata": {
+ "id": "srYWH1MdJNG7",
+ "colab_type": "code",
"colab": {
"autoexec": {
"startup": false,
"wait_interval": 0
- },
- "height": 53
- },
- "colab_type": "code",
- "executionInfo": {
- "elapsed": 215,
- "status": "ok",
- "timestamp": 1526420538162,
- "user": {
- "displayName": "",
- "photoUrl": "",
- "userId": ""
- },
- "user_tz": 420
- },
- "id": "srYWH1MdJNG7",
- "outputId": "5e4ac41c-5115-4e50-eba0-42e249c16561"
- },
- "outputs": [
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "(1, 2)\n",
- "\u003cdtype: 'int32'\u003e\n"
- ]
+ }
}
- ],
+ },
+ "cell_type": "code",
"source": [
"x = tf.matmul([[1]], [[2, 3]])\n",
"print(x.shape)\n",
"print(x.dtype)"
- ]
+ ],
+ "execution_count": 0,
+ "outputs": []
},
{
- "cell_type": "markdown",
"metadata": {
- "colab_type": "text",
- "id": "eBPw8e8vrsom"
+ "id": "eBPw8e8vrsom",
+ "colab_type": "text"
},
+ "cell_type": "markdown",
"source": [
"The most obvious differences between NumPy arrays and TensorFlow Tensors are:\n",
"\n",
@@ -180,11 +212,11 @@
]
},
{
- "cell_type": "markdown",
"metadata": {
- "colab_type": "text",
- "id": "Dwi1tdW3JBw6"
+ "id": "Dwi1tdW3JBw6",
+ "colab_type": "text"
},
+ "cell_type": "markdown",
"source": [
"### NumPy Compatibility\n",
"\n",
@@ -197,52 +229,17 @@
]
},
{
- "cell_type": "code",
- "execution_count": 0,
"metadata": {
+ "id": "lCUWzso6mbqR",
+ "colab_type": "code",
"colab": {
"autoexec": {
"startup": false,
"wait_interval": 0
- },
- "height": 251
- },
- "colab_type": "code",
- "executionInfo": {
- "elapsed": 238,
- "status": "ok",
- "timestamp": 1526420540562,
- "user": {
- "displayName": "",
- "photoUrl": "",
- "userId": ""
- },
- "user_tz": 420
- },
- "id": "lCUWzso6mbqR",
- "outputId": "fd0a22bc-8249-49dd-fcbd-63161cc47e46"
- },
- "outputs": [
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "TensorFlow operations convert numpy arrays to Tensors automatically\n",
- "tf.Tensor(\n",
- "[[ 42. 42. 42.]\n",
- " [ 42. 42. 42.]\n",
- " [ 42. 42. 42.]], shape=(3, 3), dtype=float64)\n",
- "And NumPy operations convert Tensors to numpy arrays automatically\n",
- "[[ 43. 43. 43.]\n",
- " [ 43. 43. 43.]\n",
- " [ 43. 43. 43.]]\n",
- "The .numpy() method explicitly converts a Tensor to a numpy array\n",
- "[[ 42. 42. 42.]\n",
- " [ 42. 42. 42.]\n",
- " [ 42. 42. 42.]]\n"
- ]
+ }
}
- ],
+ },
+ "cell_type": "code",
"source": [
"import numpy as np\n",
"\n",
@@ -258,14 +255,16 @@
"\n",
"print(\"The .numpy() method explicitly converts a Tensor to a numpy array\")\n",
"print(tensor.numpy())"
- ]
+ ],
+ "execution_count": 0,
+ "outputs": []
},
{
- "cell_type": "markdown",
"metadata": {
- "colab_type": "text",
- "id": "PBNP8yTRfu_X"
+ "id": "PBNP8yTRfu_X",
+ "colab_type": "text"
},
+ "cell_type": "markdown",
"source": [
"## GPU acceleration\n",
"\n",
@@ -273,42 +272,18 @@
]
},
{
- "cell_type": "code",
- "execution_count": 0,
"metadata": {
- "cellView": "code",
+ "id": "3Twf_Rw-gQFM",
+ "colab_type": "code",
"colab": {
"autoexec": {
"startup": false,
"wait_interval": 0
- },
- "height": 53
- },
- "colab_type": "code",
- "executionInfo": {
- "elapsed": 340,
- "status": "ok",
- "timestamp": 1526420543562,
- "user": {
- "displayName": "",
- "photoUrl": "",
- "userId": ""
- },
- "user_tz": 420
+ }
},
- "id": "3Twf_Rw-gQFM",
- "outputId": "2239ae2b-adf3-4895-b1f3-464cf5361d1b"
+ "cellView": "code"
},
- "outputs": [
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "Is there a GPU available: False\n",
- "Is the Tensor on GPU #0: False\n"
- ]
- }
- ],
+ "cell_type": "code",
"source": [
"x = tf.random_uniform([3, 3])\n",
"\n",
@@ -317,26 +292,28 @@
"\n",
"print(\"Is the Tensor on GPU #0: \"),\n",
"print(x.device.endswith('GPU:0'))"
- ]
+ ],
+ "execution_count": 0,
+ "outputs": []
},
{
- "cell_type": "markdown",
"metadata": {
- "colab_type": "text",
- "id": "vpgYzgVXW2Ud"
+ "id": "vpgYzgVXW2Ud",
+ "colab_type": "text"
},
+ "cell_type": "markdown",
"source": [
"### Device Names\n",
"\n",
- "The `Tensor.device` property provides a fully qualified string name of the device hosting the contents of the Tensor. This name encodes a bunch of details, such as an identifier of the network address of the host on which this program is executing and the device within that host. This is required for distributed execution of TensorFlow programs, but we'll skip that for now. The string will end with `GPU:\u003cN\u003e` if the tensor is placed on the `N`-th tensor on the host."
+ "The `Tensor.device` property provides a fully qualified string name of the device hosting the contents of the Tensor. This name encodes a bunch of details, such as an identifier of the network address of the host on which this program is executing and the device within that host. This is required for distributed execution of TensorFlow programs, but we'll skip that for now. The string will end with `GPU:<N>` if the tensor is placed on the `N`-th tensor on the host."
]
},
{
- "cell_type": "markdown",
"metadata": {
- "colab_type": "text",
- "id": "ZWZQCimzuqyP"
+ "id": "ZWZQCimzuqyP",
+ "colab_type": "text"
},
+ "cell_type": "markdown",
"source": [
"\n",
"\n",
@@ -346,41 +323,17 @@
]
},
{
- "cell_type": "code",
- "execution_count": 0,
"metadata": {
+ "id": "RjkNZTuauy-Q",
+ "colab_type": "code",
"colab": {
"autoexec": {
"startup": false,
"wait_interval": 0
- },
- "height": 53
- },
- "colab_type": "code",
- "executionInfo": {
- "elapsed": 1762,
- "status": "ok",
- "timestamp": 1526420547562,
- "user": {
- "displayName": "",
- "photoUrl": "",
- "userId": ""
- },
- "user_tz": 420
- },
- "id": "RjkNZTuauy-Q",
- "outputId": "2e613293-ccac-4db2-b793-8ceb5b5adcfd"
- },
- "outputs": [
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "On CPU:\n",
- "10 loops, best of 3: 35.8 ms per loop\n"
- ]
+ }
}
- ],
+ },
+ "cell_type": "code",
"source": [
"def time_matmul(x):\n",
" %timeit tf.matmul(x, x)\n",
@@ -398,32 +351,141 @@
" x = tf.random_uniform([1000, 1000])\n",
" assert x.device.endswith(\"GPU:0\")\n",
" time_matmul(x)"
+ ],
+ "execution_count": 0,
+ "outputs": []
+ },
+ {
+ "metadata": {
+ "id": "o1K4dlhhHtQj",
+ "colab_type": "text"
+ },
+ "cell_type": "markdown",
+ "source": [
+ "## Datasets\n",
+ "\n",
+ "This section demonstrates the use of the [`tf.data.Dataset` API](https://www.tensorflow.org/guide/datasets) to build pipelines to feed data to your model. It covers:\n",
+ "\n",
+ "* Creating a `Dataset`.\n",
+ "* Iteration over a `Dataset` with eager execution enabled.\n",
+ "\n",
+ "We recommend using the `Dataset`s API for building performant, complex input pipelines from simple, re-usable pieces that will feed your model's training or evaluation loops.\n",
+ "\n",
+ "If you're familiar with TensorFlow graphs, the API for constructing the `Dataset` object remains exactly the same when eager execution is enabled, but the process of iterating over elements of the dataset is slightly simpler.\n",
+ "You can use Python iteration over the `tf.data.Dataset` object and do not need to explicitly create an `tf.data.Iterator` object.\n",
+ "As a result, the discussion on iterators in the [TensorFlow Guide](https://www.tensorflow.org/guide/datasets) is not relevant when eager execution is enabled."
]
},
{
+ "metadata": {
+ "id": "zI0fmOynH-Ne",
+ "colab_type": "text"
+ },
"cell_type": "markdown",
+ "source": [
+ "### Create a source `Dataset`\n",
+ "\n",
+ "Create a _source_ dataset using one of the factory functions like [`Dataset.from_tensors`](https://www.tensorflow.org/api_docs/python/tf/data/Dataset#from_tensors), [`Dataset.from_tensor_slices`](https://www.tensorflow.org/api_docs/python/tf/data/Dataset#from_tensor_slices) or using objects that read from files like [`TextLineDataset`](https://www.tensorflow.org/api_docs/python/tf/data/TextLineDataset) or [`TFRecordDataset`](https://www.tensorflow.org/api_docs/python/tf/data/TFRecordDataset). See the [TensorFlow Guide](https://www.tensorflow.org/guide/datasets#reading_input_data) for more information."
+ ]
+ },
+ {
"metadata": {
- "colab_type": "text",
- "id": "YEOJTNiOvnpQ"
+ "id": "F04fVOHQIBiG",
+ "colab_type": "code",
+ "colab": {
+ "autoexec": {
+ "startup": false,
+ "wait_interval": 0
+ }
+ }
},
+ "cell_type": "code",
"source": [
- "## Next Steps\n",
+ "ds_tensors = tf.data.Dataset.from_tensor_slices([1, 2, 3, 4, 5, 6])\n",
"\n",
- "In this tutorial we covered the most fundamental concepts in TensorFlow - `Tensor`s, operations, and devices.\n",
- "In [the next tutorial](https://github.com/tensorflow/models/tree/master/official/contrib/eager/python/examples/notebooks/2_gradients.ipynb) we will cover automatic differentiation - a building block required for training many machine learning models like neural networks."
+ "# Create a CSV file\n",
+ "import tempfile\n",
+ "_, filename = tempfile.mkstemp()\n",
+ "\n",
+ "with open(filename, 'w') as f:\n",
+ " f.write(\"\"\"Line 1\n",
+ "Line 2\n",
+ "Line 3\n",
+ " \"\"\")\n",
+ "\n",
+ "ds_file = tf.data.TextLineDataset(filename)"
+ ],
+ "execution_count": 0,
+ "outputs": []
+ },
+ {
+ "metadata": {
+ "id": "vbxIhC-5IPdf",
+ "colab_type": "text"
+ },
+ "cell_type": "markdown",
+ "source": [
+ "### Apply transformations\n",
+ "\n",
+ "Use the transformations functions like [`map`](https://www.tensorflow.org/api_docs/python/tf/data/Dataset#map), [`batch`](https://www.tensorflow.org/api_docs/python/tf/data/Dataset#batch), [`shuffle`](https://www.tensorflow.org/api_docs/python/tf/data/Dataset#shuffle) etc. to apply transformations to the records of the dataset. See the [API documentation for `tf.data.Dataset`](https://www.tensorflow.org/api_docs/python/tf/data/Dataset) for details."
]
+ },
+ {
+ "metadata": {
+ "id": "uXSDZWE-ISsd",
+ "colab_type": "code",
+ "colab": {
+ "autoexec": {
+ "startup": false,
+ "wait_interval": 0
+ }
+ }
+ },
+ "cell_type": "code",
+ "source": [
+ "ds_tensors = ds_tensors.map(tf.square).shuffle(2).batch(2)\n",
+ "\n",
+ "ds_file = ds_file.batch(2)"
+ ],
+ "execution_count": 0,
+ "outputs": []
+ },
+ {
+ "metadata": {
+ "id": "A8X1GNfoIZKJ",
+ "colab_type": "text"
+ },
+ "cell_type": "markdown",
+ "source": [
+ "### Iterate\n",
+ "\n",
+ "When eager execution is enabled `Dataset` objects support iteration.\n",
+ "If you're familiar with the use of `Dataset`s in TensorFlow graphs, note that there is no need for calls to `Dataset.make_one_shot_iterator()` or `get_next()` calls."
+ ]
+ },
+ {
+ "metadata": {
+ "id": "ws-WKRk5Ic6-",
+ "colab_type": "code",
+ "colab": {
+ "autoexec": {
+ "startup": false,
+ "wait_interval": 0
+ }
+ }
+ },
+ "cell_type": "code",
+ "source": [
+ "print('Elements of ds_tensors:')\n",
+ "for x in ds_tensors:\n",
+ " print(x)\n",
+ "\n",
+ "print('\\nElements in ds_file:')\n",
+ "for x in ds_file:\n",
+ " print(x)"
+ ],
+ "execution_count": 0,
+ "outputs": []
}
- ],
- "metadata": {
- "colab": {
- "collapsed_sections": [],
- "default_view": {},
- "name": "TensorFlow: An introduction",
- "provenance": [],
- "version": "0.3.2",
- "views": {}
- }
- },
- "nbformat": 4,
- "nbformat_minor": 0
-}
+ ]
+} \ No newline at end of file
diff --git a/tensorflow/contrib/eager/python/examples/resnet50/resnet50_test.py b/tensorflow/contrib/eager/python/examples/resnet50/resnet50_test.py
index b14ef1df8f..07d8788882 100644
--- a/tensorflow/contrib/eager/python/examples/resnet50/resnet50_test.py
+++ b/tensorflow/contrib/eager/python/examples/resnet50/resnet50_test.py
@@ -29,6 +29,7 @@ import tensorflow.contrib.eager as tfe
from tensorflow.contrib.eager.python.examples.resnet50 import resnet50
from tensorflow.contrib.summary import summary_test_util
from tensorflow.python.client import device_lib
+from tensorflow.python.eager import tape
def device_and_data_format():
@@ -49,13 +50,21 @@ def random_batch(batch_size, data_format):
return images, one_hot
-def compute_gradients(model, images, labels):
- with tf.GradientTape() as tape:
+def compute_gradients(model, images, labels, num_replicas=1):
+ with tf.GradientTape() as grad_tape:
logits = model(images, training=True)
loss = tf.losses.softmax_cross_entropy(
logits=logits, onehot_labels=labels)
tf.contrib.summary.scalar(name='loss', tensor=loss)
- return tape.gradient(loss, model.variables)
+ if num_replicas != 1:
+ loss /= num_replicas
+
+ # TODO(b/110991947): We can mistakenly trace the gradient call in
+ # multi-threaded environment. Explicitly disable recording until
+ # this is fixed.
+ with tape.stop_recording():
+ grads = grad_tape.gradient(loss, model.variables)
+ return grads
def apply_gradients(model, optimizer, gradients):
@@ -188,11 +197,14 @@ class ResNet50Benchmarks(tf.test.Benchmark):
return (32,)
return (16, 32)
- def _report(self, label, start, num_iters, device, batch_size, data_format):
+ def _report(self, label, start, num_iters, device, batch_size, data_format,
+ num_replicas=1):
avg_time = (time.time() - start) / num_iters
dev = tf.DeviceSpec.from_string(device).device_type.lower()
- name = '%s_%s_batch_%d_%s' % (label, dev, batch_size, data_format)
- extras = {'examples_per_sec': batch_size / avg_time}
+ replica_str = '' if num_replicas == 1 else 'replicas_%d_' % num_replicas
+ name = '%s_%s_batch_%d_%s%s' % (label, dev, batch_size,
+ replica_str, data_format)
+ extras = {'examples_per_sec': (num_replicas * batch_size) / avg_time}
self.report_benchmark(
iters=num_iters, wall_time=avg_time, name=name, extras=extras)
diff --git a/tensorflow/contrib/eager/python/examples/revnet/BUILD b/tensorflow/contrib/eager/python/examples/revnet/BUILD
index 432bb546f8..0c0e4c0eb9 100644
--- a/tensorflow/contrib/eager/python/examples/revnet/BUILD
+++ b/tensorflow/contrib/eager/python/examples/revnet/BUILD
@@ -72,11 +72,13 @@ cuda_py_test(
size = "large",
srcs = ["revnet_test.py"],
additional_deps = [
+ ":blocks_test",
":config",
":revnet",
"//tensorflow:tensorflow_py",
],
tags = [
+ "no_pip", # depends on blocks_test, which is not available in pip package
"optonly",
],
)
@@ -87,7 +89,6 @@ py_library(
srcs = ["cifar_input.py"],
srcs_version = "PY2AND3",
deps = [
- ":revnet",
"//tensorflow:tensorflow_py",
],
)
diff --git a/tensorflow/contrib/eager/python/examples/revnet/README.md b/tensorflow/contrib/eager/python/examples/revnet/README.md
new file mode 100644
index 0000000000..21fc44febc
--- /dev/null
+++ b/tensorflow/contrib/eager/python/examples/revnet/README.md
@@ -0,0 +1,45 @@
+# RevNet with TensorFlow eager execution
+
+This folder contains an TensorFlow eager implementation of the [Reversible Residual Network](https://arxiv.org/pdf/1707.04585.pdf) adapted from the released implementation by the authors. The presented implementation can be ran both in eager and graph mode. The code is considerably simplified with `tf.GradientTape`. Moreover, we reduce the step of reconstructing the outputs. This saves us from using `tf.stop_gradient` and makes the model run faster.
+
+## Content
+
+- `revnet.py`: The RevNet model.
+- `blocks.py`: The relevant reversible blocks.
+- `cifar_tfrecords.py`: Script to generate the TFRecords for both CIFAR-10 and CIFAR-100.
+- `cifar_input.py`: Script to read from TFRecords and generate dataset objects with the `tf.data` API.
+- `config.py`: Configuration file for network architectures and training hyperparameters.
+- `main.py`: Main training and evaluation script.
+- `ops.py`: Auxiliary downsampling operation.
+
+## To run
+- Make sure you have installed TensorFlow 1.9+ or the latest `tf-nightly`
+or `tf-nightly-gpu` pip package in order to access the eager execution feature.
+
+- First run
+
+```bash
+python cifar_tfrecords.py --data_dir ${PWD}/cifar
+```
+to download the cifar dataset and convert them
+to TFRecords. This produces TFRecord files for both CIFAR-10 and CIFAR-100.
+
+- To train a model run
+
+```bash
+python main.py --data_dir ${PWD}/cifar
+```
+
+- Optional arguments for `main.py` include
+ - `train_dir`: Directory to store eventfiles and checkpoints.
+ - `restore`: Restore the latest checkpoint.
+ - `validate`: Use validation set for training monitoring.
+ - `manual_grad`: Use the manually defined gradient map given by the authors.
+ - `dataset`: Use either `cifar-10` or `cifar-100`
+
+## Performance
+- With the current implementation, RevNet-38 achieves >92% on CIFAR-10 and >71% on CIFAR-100.
+
+## Reference
+The Reversible Residual Network: Backpropagation Without Storing Activations.
+Aidan N. Gomez, Mengye Ren, Raquel Urtasun, Roger B. Grosse. Neural Information Processing Systems (NIPS), 2017.
diff --git a/tensorflow/contrib/eager/python/examples/revnet/blocks.py b/tensorflow/contrib/eager/python/examples/revnet/blocks.py
index af41f64286..306096e9f8 100644
--- a/tensorflow/contrib/eager/python/examples/revnet/blocks.py
+++ b/tensorflow/contrib/eager/python/examples/revnet/blocks.py
@@ -43,7 +43,8 @@ class RevBlock(tf.keras.Model):
batch_norm_first=False,
data_format="channels_first",
bottleneck=False,
- fused=True):
+ fused=True,
+ dtype=tf.float32):
"""Initialize RevBlock.
Args:
@@ -55,6 +56,7 @@ class RevBlock(tf.keras.Model):
data_format: tensor data format, "NCHW"/"NHWC"
bottleneck: use bottleneck residual if True
fused: use fused batch normalization if True
+ dtype: float16, float32, or float64
"""
super(RevBlock, self).__init__()
self.blocks = tf.contrib.checkpoint.List()
@@ -68,7 +70,8 @@ class RevBlock(tf.keras.Model):
batch_norm_first=curr_batch_norm_first,
data_format=data_format,
bottleneck=bottleneck,
- fused=fused)
+ fused=fused,
+ dtype=dtype)
self.blocks.append(block)
if data_format == "channels_first":
@@ -93,11 +96,23 @@ class RevBlock(tf.keras.Model):
for i in reversed(range(len(self.blocks))):
block = self.blocks[i]
- y_inv = x if i == 0 else block.backward(y, training=training)
- dy, grads, vars_ = block.backward_grads_and_vars(
- y_inv, dy, training=training)
- grads_all += grads
- vars_all += vars_
+ if i == 0:
+ # First block usually contains downsampling that can't be reversed
+ with tf.GradientTape() as tape:
+ x = tf.identity(x)
+ tape.watch(x)
+ y = block(x, training=training)
+
+ grads_combined = tape.gradient(
+ y, [x] + block.trainable_variables, output_gradients=dy)
+ dy = grads_combined[0]
+ grads_all += grads_combined[1:]
+ vars_all += block.trainable_variables
+ else:
+ y, dy, grads, vars_ = block.backward_grads_and_vars(
+ y, dy, training=training)
+ grads_all += grads
+ vars_all += vars_
return dy, grads_all, vars_all
@@ -115,6 +130,7 @@ class _Residual(tf.keras.Model):
data_format: tensor data format, "NCHW"/"NHWC",
bottleneck: use bottleneck residual if True
fused: use fused batch normalization if True
+ dtype: float16, float32, or float64
"""
def __init__(self,
@@ -124,7 +140,8 @@ class _Residual(tf.keras.Model):
batch_norm_first=True,
data_format="channels_first",
bottleneck=False,
- fused=True):
+ fused=True,
+ dtype=tf.float32):
super(_Residual, self).__init__()
self.filters = filters
@@ -146,75 +163,68 @@ class _Residual(tf.keras.Model):
input_shape=f_input_shape,
batch_norm_first=batch_norm_first,
data_format=data_format,
- fused=fused)
+ fused=fused,
+ dtype=dtype)
self.g = factory(
filters=filters // 2,
strides=(1, 1),
input_shape=g_input_shape,
batch_norm_first=batch_norm_first,
data_format=data_format,
- fused=fused)
+ fused=fused,
+ dtype=dtype)
def call(self, x, training=True, concat=True):
"""Apply residual block to inputs."""
x1, x2 = tf.split(x, num_or_size_splits=2, axis=self.axis)
- f_x2 = self.f.call(x2, training=training)
- # TODO(lxuechen): Replace with simpler downsampling
+ f_x2 = self.f(x2, training=training)
x1_down = ops.downsample(
x1, self.filters // 2, self.strides, axis=self.axis)
x2_down = ops.downsample(
x2, self.filters // 2, self.strides, axis=self.axis)
y1 = f_x2 + x1_down
- g_y1 = self.g.call(y1, training=training) # self.g(y1) gives pylint error
+ g_y1 = self.g(y1, training=training)
y2 = g_y1 + x2_down
- if not concat: # Concat option needed for correct backward grads
+ if not concat: # For correct backward grads
return y1, y2
- return tf.concat([y1, y2], axis=self.axis)
-
- def backward(self, y, training=True):
- """Reconstruct inputs from outputs; only valid when stride 1."""
-
- assert self.strides == (1, 1)
-
- y1, y2 = tf.split(y, num_or_size_splits=2, axis=self.axis)
- g_y1 = self.g.call(y1, training=training)
- x2 = y2 - g_y1
- f_x2 = self.f.call(x2, training=training)
- x1 = y1 - f_x2
- return tf.concat([x1, x2], axis=self.axis)
+ return tf.concat([y1, y2], axis=self.axis)
- def backward_grads_and_vars(self, x, dy, training=True):
+ def backward_grads_and_vars(self, y, dy, training=True):
"""Manually compute backward gradients given input and output grads."""
+ dy1, dy2 = tf.split(dy, num_or_size_splits=2, axis=self.axis)
with tf.GradientTape(persistent=True) as tape:
- x = tf.identity(x) # TODO(lxuechen): Remove after b/110264016 is fixed
- x1, x2 = tf.split(x, num_or_size_splits=2, axis=self.axis)
- tape.watch([x1, x2])
- # Stitch back x for `call` so tape records correct grads
- x = tf.concat([x1, x2], axis=self.axis)
- dy1, dy2 = tf.split(dy, num_or_size_splits=2, axis=self.axis)
- y1, y2 = self.call(x, training=training, concat=False)
- x2_down = ops.downsample(
- x2, self.filters // 2, self.strides, axis=self.axis)
+ y = tf.identity(y)
+ tape.watch(y)
+ y1, y2 = tf.split(y, num_or_size_splits=2, axis=self.axis)
+ z1 = y1
+ gz1 = self.g(z1, training=training)
+ x2 = y2 - gz1
+ fx2 = self.f(x2, training=training)
+ x1 = z1 - fx2
grads_combined = tape.gradient(
- y2, [y1] + self.g.trainable_variables, output_gradients=[dy2])
- dy2_y1, dg = grads_combined[0], grads_combined[1:]
- dy1_plus = dy2_y1 + dy1
+ gz1, [z1] + self.g.trainable_variables, output_gradients=dy2)
+ dz1 = dy1 + grads_combined[0]
+ dg = grads_combined[1:]
+ dx1 = dz1
grads_combined = tape.gradient(
- y1, [x1, x2] + self.f.trainable_variables, output_gradients=[dy1_plus])
- dx1, dx2, df = grads_combined[0], grads_combined[1], grads_combined[2:]
- dx2 += tape.gradient(x2_down, [x2], output_gradients=[dy2])[0]
+ fx2, [x2] + self.f.trainable_variables, output_gradients=dz1)
+ dx2 = dy2 + grads_combined[0]
+ df = grads_combined[1:]
del tape
grads = df + dg
vars_ = self.f.trainable_variables + self.g.trainable_variables
- return tf.concat([dx1, dx2], axis=self.axis), grads, vars_
+ x = tf.concat([x1, x2], axis=self.axis)
+ dx = tf.concat([dx1, dx2], axis=self.axis)
+
+ return x, dx, grads, vars_
def _BottleneckResidualInner(filters,
@@ -222,7 +232,8 @@ def _BottleneckResidualInner(filters,
input_shape,
batch_norm_first=True,
data_format="channels_first",
- fused=True):
+ fused=True,
+ dtype=tf.float32):
"""Single bottleneck residual inner function contained in _Resdual.
Corresponds to the `F`/`G` functions in the paper.
@@ -235,6 +246,7 @@ def _BottleneckResidualInner(filters,
batch_norm_first: whether to apply activation and batch norm before conv
data_format: tensor data format, "NCHW"/"NHWC"
fused: use fused batch normalization if True
+ dtype: float16, float32, or float64
Returns:
A keras model
@@ -245,7 +257,7 @@ def _BottleneckResidualInner(filters,
if batch_norm_first:
model.add(
tf.keras.layers.BatchNormalization(
- axis=axis, input_shape=input_shape, fused=fused))
+ axis=axis, input_shape=input_shape, fused=fused, dtype=dtype))
model.add(tf.keras.layers.Activation("relu"))
model.add(
tf.keras.layers.Conv2D(
@@ -255,9 +267,11 @@ def _BottleneckResidualInner(filters,
input_shape=input_shape,
data_format=data_format,
use_bias=False,
- padding="SAME"))
+ padding="SAME",
+ dtype=dtype))
- model.add(tf.keras.layers.BatchNormalization(axis=axis, fused=fused))
+ model.add(
+ tf.keras.layers.BatchNormalization(axis=axis, fused=fused, dtype=dtype))
model.add(tf.keras.layers.Activation("relu"))
model.add(
tf.keras.layers.Conv2D(
@@ -266,9 +280,11 @@ def _BottleneckResidualInner(filters,
strides=(1, 1),
data_format=data_format,
use_bias=False,
- padding="SAME"))
+ padding="SAME",
+ dtype=dtype))
- model.add(tf.keras.layers.BatchNormalization(axis=axis, fused=fused))
+ model.add(
+ tf.keras.layers.BatchNormalization(axis=axis, fused=fused, dtype=dtype))
model.add(tf.keras.layers.Activation("relu"))
model.add(
tf.keras.layers.Conv2D(
@@ -277,7 +293,8 @@ def _BottleneckResidualInner(filters,
strides=(1, 1),
data_format=data_format,
use_bias=False,
- padding="SAME"))
+ padding="SAME",
+ dtype=dtype))
return model
@@ -287,7 +304,8 @@ def _ResidualInner(filters,
input_shape,
batch_norm_first=True,
data_format="channels_first",
- fused=True):
+ fused=True,
+ dtype=tf.float32):
"""Single residual inner function contained in _ResdualBlock.
Corresponds to the `F`/`G` functions in the paper.
@@ -299,6 +317,7 @@ def _ResidualInner(filters,
batch_norm_first: whether to apply activation and batch norm before conv
data_format: tensor data format, "NCHW"/"NHWC"
fused: use fused batch normalization if True
+ dtype: float16, float32, or float64
Returns:
A keras model
@@ -309,7 +328,7 @@ def _ResidualInner(filters,
if batch_norm_first:
model.add(
tf.keras.layers.BatchNormalization(
- axis=axis, input_shape=input_shape, fused=fused))
+ axis=axis, input_shape=input_shape, fused=fused, dtype=dtype))
model.add(tf.keras.layers.Activation("relu"))
model.add(
tf.keras.layers.Conv2D(
@@ -319,9 +338,11 @@ def _ResidualInner(filters,
input_shape=input_shape,
data_format=data_format,
use_bias=False,
- padding="SAME"))
+ padding="SAME",
+ dtype=dtype))
- model.add(tf.keras.layers.BatchNormalization(axis=axis, fused=fused))
+ model.add(
+ tf.keras.layers.BatchNormalization(axis=axis, fused=fused, dtype=dtype))
model.add(tf.keras.layers.Activation("relu"))
model.add(
tf.keras.layers.Conv2D(
@@ -330,6 +351,7 @@ def _ResidualInner(filters,
strides=(1, 1),
data_format=data_format,
use_bias=False,
- padding="SAME"))
+ padding="SAME",
+ dtype=dtype))
return model
diff --git a/tensorflow/contrib/eager/python/examples/revnet/blocks_test.py b/tensorflow/contrib/eager/python/examples/revnet/blocks_test.py
index f4436fd925..d74785c8fe 100644
--- a/tensorflow/contrib/eager/python/examples/revnet/blocks_test.py
+++ b/tensorflow/contrib/eager/python/examples/revnet/blocks_test.py
@@ -22,6 +22,27 @@ import tensorflow as tf
from tensorflow.contrib.eager.python.examples.revnet import blocks
+def compute_degree(g1, g2, eps=1e-7):
+ """Compute the degree between two vectors using their usual inner product."""
+
+ def _dot(u, v):
+ return tf.reduce_sum(u * v)
+
+ g1_norm = tf.sqrt(_dot(g1, g1))
+ g2_norm = tf.sqrt(_dot(g2, g2))
+ if g1_norm.numpy() == 0 and g2_norm.numpy() == 0:
+ cosine = 1. - eps
+ else:
+ g1_norm = 1. if g1_norm.numpy() == 0 else g1_norm
+ g2_norm = 1. if g2_norm.numpy() == 0 else g2_norm
+ cosine = _dot(g1, g2) / g1_norm / g2_norm
+ # Restrict to arccos range
+ cosine = tf.minimum(tf.maximum(cosine, eps - 1.), 1. - eps)
+ degree = tf.acos(cosine) * 180. / 3.141592653589793
+
+ return degree
+
+
def _validate_block_call_channels_last(block_factory, test):
"""Generic testing function for `channels_last` data format.
@@ -33,30 +54,30 @@ def _validate_block_call_channels_last(block_factory, test):
test: tf.test.TestCase object
"""
with tf.device("/cpu:0"): # NHWC format
- input_shape = (224, 224, 32)
+ input_shape = (8, 8, 128)
data_shape = (16,) + input_shape
x = tf.random_normal(shape=data_shape)
# Stride 1
block = block_factory(
- filters=64,
+ filters=128,
strides=(1, 1),
input_shape=input_shape,
data_format="channels_last")
y_tr, y_ev = block(x, training=True), block(x, training=False)
test.assertEqual(y_tr.shape, y_ev.shape)
- test.assertEqual(y_ev.shape, (16, 224, 224, 64))
+ test.assertEqual(y_ev.shape, (16, 8, 8, 128))
test.assertNotAllClose(y_tr, y_ev)
# Stride of 2
block = block_factory(
- filters=64,
+ filters=128,
strides=(2, 2),
input_shape=input_shape,
data_format="channels_last")
y_tr, y_ev = block(x, training=True), block(x, training=False)
test.assertEqual(y_tr.shape, y_ev.shape)
- test.assertEqual(y_ev.shape, (16, 112, 112, 64))
+ test.assertEqual(y_ev.shape, (16, 4, 4, 128))
test.assertNotAllClose(y_tr, y_ev)
@@ -74,22 +95,22 @@ def _validate_block_call_channels_first(block_factory, test):
test.skipTest("GPU not available")
with tf.device("/gpu:0"): # Default NCHW format
- input_shape = (32, 224, 224)
+ input_shape = (128, 8, 8)
data_shape = (16,) + input_shape
x = tf.random_normal(shape=data_shape)
# Stride of 1
- block = block_factory(filters=64, strides=(1, 1), input_shape=input_shape)
+ block = block_factory(filters=128, strides=(1, 1), input_shape=input_shape)
y_tr, y_ev = block(x, training=True), block(x, training=False)
test.assertEqual(y_tr.shape, y_ev.shape)
- test.assertEqual(y_ev.shape, (16, 64, 224, 224))
+ test.assertEqual(y_ev.shape, (16, 128, 8, 8))
test.assertNotAllClose(y_tr, y_ev)
# Stride of 2
- block = block_factory(filters=64, strides=(2, 2), input_shape=input_shape)
+ block = block_factory(filters=128, strides=(2, 2), input_shape=input_shape)
y_tr, y_ev = block(x, training=True), block(x, training=False)
test.assertEqual(y_tr.shape, y_ev.shape)
- test.assertEqual(y_ev.shape, (16, 64, 112, 112))
+ test.assertEqual(y_ev.shape, (16, 128, 4, 4))
test.assertNotAllClose(y_tr, y_ev)
@@ -101,121 +122,116 @@ class RevBlockTest(tf.test.TestCase):
self.skipTest("GPU not available")
with tf.device("/gpu:0"): # Default NCHW format
- input_shape = (32, 224, 224)
+ input_shape = (128, 8, 8)
data_shape = (16,) + input_shape
x = tf.random_normal(shape=data_shape)
# Stride of 1
block = blocks.RevBlock(
- n_res=3, filters=64, strides=(1, 1), input_shape=input_shape)
+ n_res=3, filters=128, strides=(1, 1), input_shape=input_shape)
y_tr, y_ev = block(x, training=True), block(x, training=False)
self.assertEqual(y_tr.shape, y_ev.shape)
- self.assertEqual(y_ev.shape, (16, 64, 224, 224))
+ self.assertEqual(y_ev.shape, (16, 128, 8, 8))
self.assertNotAllClose(y_tr, y_ev)
# Stride of 2
block = blocks.RevBlock(
- n_res=3, filters=64, strides=(2, 2), input_shape=input_shape)
+ n_res=3, filters=128, strides=(2, 2), input_shape=input_shape)
y_tr, y_ev = block(x, training=True), block(x, training=False)
self.assertEqual(y_tr.shape, y_ev.shape)
- self.assertEqual(y_ev.shape, [16, 64, 112, 112])
+ self.assertEqual(y_ev.shape, [16, 128, 4, 4])
self.assertNotAllClose(y_tr, y_ev)
def test_call_channels_last(self):
"""Test `call` function with `channels_last` data format."""
with tf.device("/cpu:0"): # NHWC format
- input_shape = (224, 224, 32)
+ input_shape = (8, 8, 128)
data_shape = (16,) + input_shape
x = tf.random_normal(shape=data_shape)
# Stride 1
block = blocks.RevBlock(
n_res=3,
- filters=64,
+ filters=128,
strides=(1, 1),
input_shape=input_shape,
data_format="channels_last")
y_tr, y_ev = block(x, training=True), block(x, training=False)
self.assertEqual(y_tr.shape, y_ev.shape)
- self.assertEqual(y_ev.shape, (16, 224, 224, 64))
+ self.assertEqual(y_ev.shape, (16, 8, 8, 128))
self.assertNotAllClose(y_tr, y_ev)
# Stride of 2
block = blocks.RevBlock(
n_res=3,
- filters=64,
+ filters=128,
strides=(2, 2),
input_shape=input_shape,
data_format="channels_last")
y_tr, y_ev = block(x, training=True), block(x, training=False)
self.assertEqual(y_tr.shape, y_ev.shape)
- self.assertEqual(y_ev.shape, (16, 112, 112, 64))
+ self.assertEqual(y_ev.shape, (16, 4, 4, 128))
self.assertNotAllClose(y_tr, y_ev)
+ def _check_grad_angle(self, grads, grads_true, atol=1e0):
+ """Check the angle between two list of vectors are all close."""
+ for g1, g2 in zip(grads, grads_true):
+ degree = compute_degree(g1, g2)
+ self.assertLessEqual(degree, atol)
+
def test_backward_grads_and_vars_channels_first(self):
"""Test `backward` function with `channels_first` data format."""
if not tf.test.is_gpu_available():
self.skipTest("GPU not available")
with tf.device("/gpu:0"): # Default NCHW format
- input_shape = (32, 224, 224)
- data_shape = (16,) + input_shape
- x = tf.random_normal(shape=data_shape)
-
# Stride 1
- y = tf.random_normal(shape=data_shape)
- dy = tf.random_normal(shape=data_shape)
- block = blocks.RevBlock(
- n_res=3, filters=32, strides=(1, 1), input_shape=input_shape)
- dy, grads, vars_ = block.backward_grads_and_vars(x, y, dy)
- self.assertEqual(dy.shape, x.shape)
- self.assertTrue(isinstance(grads, list))
- self.assertTrue(isinstance(vars_, list))
-
- # Stride 2
- y = tf.random_normal(shape=(16, 32, 112, 112))
- dy = tf.random_normal(shape=(16, 32, 112, 112))
- block = blocks.RevBlock(
- n_res=3, filters=32, strides=(2, 2), input_shape=input_shape)
- dy, grads, vars_ = block.backward_grads_and_vars(x, y, dy)
- self.assertEqual(dy.shape, x.shape)
- self.assertTrue(isinstance(grads, list))
- self.assertTrue(isinstance(vars_, list))
-
- def test_backward_grads_and_vars_channels_last(self):
- """Test `backward` function with `channels_last` data format."""
- with tf.device("/cpu:0"): # NHWC format
- input_shape = (224, 224, 32)
+ input_shape = (128, 8, 8)
data_shape = (16,) + input_shape
- x = tf.random_normal(shape=data_shape)
-
- # Stride 1
- y = tf.random_normal(shape=data_shape)
- dy = tf.random_normal(shape=data_shape)
+ x = tf.random_normal(shape=data_shape, dtype=tf.float64)
+ dy = tf.random_normal(shape=data_shape, dtype=tf.float64)
block = blocks.RevBlock(
n_res=3,
- filters=32,
+ filters=128,
strides=(1, 1),
input_shape=input_shape,
- data_format="channels_last")
- dy, grads, vars_ = block.backward_grads_and_vars(x, y, dy)
- self.assertEqual(dy.shape, x.shape)
- self.assertTrue(isinstance(grads, list))
- self.assertTrue(isinstance(vars_, list))
+ fused=False,
+ dtype=tf.float64)
+ with tf.GradientTape() as tape:
+ tape.watch(x)
+ y = block(x, training=True)
+ # Compute grads from reconstruction
+ dx, dw, vars_ = block.backward_grads_and_vars(x, y, dy, training=True)
+ # Compute true grads
+ grads = tape.gradient(y, [x] + vars_, output_gradients=dy)
+ dx_true, dw_true = grads[0], grads[1:]
+ self.assertAllClose(dx_true, dx)
+ self.assertAllClose(dw_true, dw)
+ self._check_grad_angle(dx_true, dx)
+ self._check_grad_angle(dw_true, dw)
# Stride 2
- y = tf.random_normal(shape=(16, 112, 112, 32))
- dy = tf.random_normal(shape=(16, 112, 112, 32))
+ x = tf.random_normal(shape=data_shape, dtype=tf.float64)
+ dy = tf.random_normal(shape=(16, 128, 4, 4), dtype=tf.float64)
block = blocks.RevBlock(
n_res=3,
- filters=32,
+ filters=128,
strides=(2, 2),
input_shape=input_shape,
- data_format="channels_last")
- dy, grads, vars_ = block.backward_grads_and_vars(x, y, dy)
- self.assertEqual(dy.shape, x.shape)
- self.assertTrue(isinstance(grads, list))
- self.assertTrue(isinstance(vars_, list))
+ fused=False,
+ dtype=tf.float64)
+ with tf.GradientTape() as tape:
+ tape.watch(x)
+ y = block(x, training=True)
+ # Compute grads from reconstruction
+ dx, dw, vars_ = block.backward_grads_and_vars(x, y, dy, training=True)
+ # Compute true grads
+ grads = tape.gradient(y, [x] + vars_, output_gradients=dy)
+ dx_true, dw_true = grads[0], grads[1:]
+ self.assertAllClose(dx_true, dx)
+ self.assertAllClose(dw_true, dw)
+ self._check_grad_angle(dx_true, dx)
+ self._check_grad_angle(dw_true, dw)
class _ResidualTest(tf.test.TestCase):
@@ -229,98 +245,40 @@ class _ResidualTest(tf.test.TestCase):
_validate_block_call_channels_first(blocks._Residual, self)
_validate_block_call_channels_last(blocks._Residual, self)
- def test_backward_channels_first(self):
- """Test `backward` function with `channels_first` data format."""
- if not tf.test.is_gpu_available():
- self.skipTest("GPU not available")
-
- with tf.device("/gpu:0"): # Default NCHW format
- input_shape = (16, 224, 224)
- data_shape = (16,) + input_shape
- x = tf.random_normal(shape=data_shape)
- residual = blocks._Residual(
- filters=16, strides=(1, 1), input_shape=input_shape)
- y_tr, y_ev = residual(x, training=True), residual(x, training=False)
- x_ = residual.backward(y_tr, training=True)
- # The numerical loss is alarming; reconstructed inputs could differ from
- # the original inputs often by more than 1e-3
- self.assertAllClose(x, x_, rtol=1e-01, atol=1e-01)
- x_ = residual.backward(y_ev, training=False)
- self.assertAllClose(x, x_, rtol=1e-01, atol=1e-01)
-
- def test_backward_channels_last(self):
- """Test `backward` function with `channels_last` data format."""
- with tf.device("/cpu:0"): # NHWC format
- input_shape = (224, 224, 16)
- data_shape = (16,) + input_shape
- x = tf.random_normal(shape=data_shape)
- residual = blocks._Residual(
- filters=16,
- strides=(1, 1),
- input_shape=input_shape,
- data_format="channels_last")
- y_tr, y_ev = residual(x, training=True), residual(x, training=False)
- x_ = residual.backward(y_tr, training=True)
- # Egregious numerical error
- self.assertAllClose(x, x_, rtol=1e-01, atol=1e-01)
- x_ = residual.backward(y_ev, training=False)
- self.assertAllClose(x, x_, rtol=1e-01, atol=1e-01)
-
def test_backward_grads_and_vars_channels_first(self):
"""Test `backward_grads` function with `channels_first` data format."""
if not tf.test.is_gpu_available():
self.skipTest("GPU not available")
with tf.device("/gpu:0"): # Default NCHW format
- input_shape = (16, 224, 224)
- data_shape = (16,) + input_shape
- x = tf.random_normal(shape=data_shape)
- dy = tf.random_normal(shape=data_shape)
- residual = blocks._Residual(
- filters=16, strides=(1, 1), input_shape=input_shape)
- dx_tr, grads_tr, vars_tr = residual.backward_grads_and_vars(
- x, dy=dy, training=True)
- dx_ev, grads_ev, vars_ev = residual.backward_grads_and_vars(
- x, dy=dy, training=False)
- self.assertNotAllClose(dx_tr, dx_ev)
- self.assertTrue(isinstance(grads_tr, list))
- self.assertTrue(isinstance(grads_ev, list))
- self.assertTrue(isinstance(vars_tr, list))
- self.assertTrue(isinstance(vars_ev, list))
- for grad_tr, var_tr, grad_ev, var_ev in zip(grads_tr, vars_tr, grads_ev,
- vars_ev):
- if grad_tr is not None: # Batch norm moving mean, var gives None grad
- self.assertEqual(grad_tr.shape, grad_ev.shape)
- self.assertEqual(var_tr.shape, var_ev.shape)
- self.assertEqual(grad_tr.shape, var_tr.shape)
-
- def test_backward_grads_and_vars_channels_last(self):
- """Test `backward_grads` function with `channels_last` data format."""
- with tf.device("/cpu:0"): # NHWC format
- input_shape = (224, 224, 16)
+ input_shape = (128, 8, 8)
data_shape = (16,) + input_shape
- x = tf.random_normal(shape=data_shape)
- dy = tf.random_normal(shape=data_shape)
+ # Use double precision for testing
+ x_true = tf.random_normal(shape=data_shape, dtype=tf.float64)
+ dy = tf.random_normal(shape=data_shape, dtype=tf.float64)
residual = blocks._Residual(
- filters=16,
+ filters=128,
strides=(1, 1),
input_shape=input_shape,
- data_format="channels_last")
- dx_tr, grads_tr, vars_tr = residual.backward_grads_and_vars(
- x, dy=dy, training=True)
- dx_ev, grads_ev, vars_ev = residual.backward_grads_and_vars(
- x, dy=dy, training=False)
- self.assertNotAllClose(dx_tr, dx_ev)
- self.assertTrue(isinstance(grads_tr, list))
- self.assertTrue(isinstance(grads_ev, list))
- self.assertTrue(isinstance(vars_tr, list))
- self.assertTrue(isinstance(vars_ev, list))
- for grad_tr, var_tr, grad_ev, var_ev in zip(grads_tr, vars_tr, grads_ev,
- vars_ev):
- if grad_tr is not None: # Batch norm moving mean, var gives None grad
- self.assertEqual(grad_tr.shape, grad_ev.shape)
- self.assertEqual(var_tr.shape, var_ev.shape)
- self.assertEqual(grad_tr.shape, var_tr.shape)
+ fused=False,
+ dtype=tf.float64)
+
+ with tf.GradientTape() as tape:
+ x_true = tf.identity(x_true)
+ tape.watch(x_true)
+ y = residual(x_true, training=True)
+
+ # Gradients computed due to reversibility
+ x, dx, dw, vars_ = residual.backward_grads_and_vars(
+ y, dy=dy, training=True)
+
+ # True gradients computed by the tape
+ grads = tape.gradient(y, [x_true] + vars_, output_gradients=dy)
+ dx_true, dw_true = grads[0], grads[1:]
+
+ self.assertAllClose(x_true, x)
+ self.assertAllClose(dx_true, dx)
+ self.assertAllClose(dw_true, dw)
class _ResidualInnerTest(tf.test.TestCase):
diff --git a/tensorflow/contrib/eager/python/examples/revnet/cifar_input.py b/tensorflow/contrib/eager/python/examples/revnet/cifar_input.py
index 3bc69da5ad..b6d4c35bfd 100644
--- a/tensorflow/contrib/eager/python/examples/revnet/cifar_input.py
+++ b/tensorflow/contrib/eager/python/examples/revnet/cifar_input.py
@@ -26,8 +26,6 @@ import tensorflow as tf
IMAGE_HEIGHT = 32
IMAGE_WIDTH = 32
NUM_CHANNEL = 3
-NUM_TRAIN_IMG = 50000
-NUM_TEST_IMG = 10000
def get_ds_from_tfrecords(data_dir,
@@ -37,8 +35,8 @@ def get_ds_from_tfrecords(data_dir,
epochs=None,
shuffle=True,
data_format="channels_first",
- num_parallel_calls=4,
- prefetch=True,
+ num_parallel_calls=12,
+ prefetch=0,
div255=True,
dtype=tf.float32):
"""Returns a tf.train.Dataset object from reading tfrecords.
@@ -48,11 +46,12 @@ def get_ds_from_tfrecords(data_dir,
split: "train", "validation", or "test"
data_aug: Apply data augmentation if True
batch_size: Batch size of dataset object
- epochs: Number of epochs to repeat the dataset
+ epochs: Number of epochs to repeat the dataset; default `None` means
+ repeating indefinitely
shuffle: Shuffle the dataset if True
data_format: `channels_first` or `channels_last`
num_parallel_calls: Number of threads for dataset preprocess
- prefetch: Apply prefetch for the dataset if True
+ prefetch: Buffer size for prefetch
div255: Divide the images by 255 if True
dtype: Data type of images
Returns:
@@ -62,7 +61,7 @@ def get_ds_from_tfrecords(data_dir,
ValueError: Unknown split
"""
- if split not in ["train", "validation", "test"]:
+ if split not in ["train", "validation", "test", "train_all"]:
raise ValueError("Unknown split {}".format(split))
def _parser(serialized_example):
@@ -74,7 +73,11 @@ def get_ds_from_tfrecords(data_dir,
"label": tf.FixedLenFeature([], tf.int64),
})
image = tf.decode_raw(features["image"], tf.uint8)
- image = tf.reshape(image, [IMAGE_HEIGHT, IMAGE_WIDTH, NUM_CHANNEL])
+ # Initially reshaping to [H, W, C] does not work
+ image = tf.reshape(image, [NUM_CHANNEL, IMAGE_HEIGHT, IMAGE_WIDTH])
+ # This is needed for `tf.image.resize_image_with_crop_or_pad`
+ image = tf.transpose(image, [1, 2, 0])
+
image = tf.cast(image, dtype)
label = tf.cast(features["label"], tf.int32)
@@ -93,13 +96,21 @@ def get_ds_from_tfrecords(data_dir,
return image, label
filename = os.path.join(data_dir, split + ".tfrecords")
- dataset = tf.data.TFRecordDataset(filename).repeat(epochs)
+ dataset = tf.data.TFRecordDataset(filename)
+ dataset = dataset.repeat(epochs)
dataset = dataset.map(_parser, num_parallel_calls=num_parallel_calls)
+ dataset = dataset.prefetch(prefetch)
- if prefetch:
- dataset = dataset.prefetch(batch_size)
if shuffle:
- dataset = dataset.shuffle(NUM_TRAIN_IMG)
+ # Find the right size according to the split
+ size = {
+ "train": 40000,
+ "validation": 10000,
+ "test": 10000,
+ "train_all": 50000
+ }[split]
+ dataset = dataset.shuffle(size)
+
dataset = dataset.batch(batch_size)
return dataset
diff --git a/tensorflow/contrib/eager/python/examples/revnet/cifar_tfrecords.py b/tensorflow/contrib/eager/python/examples/revnet/cifar_tfrecords.py
index f79428b2a9..377844ad8f 100644
--- a/tensorflow/contrib/eager/python/examples/revnet/cifar_tfrecords.py
+++ b/tensorflow/contrib/eager/python/examples/revnet/cifar_tfrecords.py
@@ -12,10 +12,10 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
-"""Read CIFAR-10 data from pickled numpy arrays and writes TFRecords.
+"""Read CIFAR data from pickled numpy arrays and writes TFRecords.
Generates tf.train.Example protos and writes them to TFRecord files from the
-python version of the CIFAR-10 dataset downloaded from
+python version of the CIFAR dataset downloaded from
https://www.cs.toronto.edu/~kriz/cifar.html.
"""
@@ -32,20 +32,22 @@ from six.moves import cPickle as pickle
from six.moves import urllib
import tensorflow as tf
-CIFAR_FILENAME = 'cifar-10-python.tar.gz'
-CIFAR_DOWNLOAD_URL = 'https://www.cs.toronto.edu/~kriz/' + CIFAR_FILENAME
-CIFAR_LOCAL_FOLDER = 'cifar-10-batches-py'
+BASE_URL = 'https://www.cs.toronto.edu/~kriz/'
+CIFAR_FILE_NAMES = ['cifar-10-python.tar.gz', 'cifar-100-python.tar.gz']
+CIFAR_DOWNLOAD_URLS = [BASE_URL + name for name in CIFAR_FILE_NAMES]
+CIFAR_LOCAL_FOLDERS = ['cifar-10', 'cifar-100']
+EXTRACT_FOLDERS = ['cifar-10-batches-py', 'cifar-100-python']
-def download_and_extract(data_dir):
- """Download CIFAR-10 if not already downloaded."""
- filepath = os.path.join(data_dir, CIFAR_FILENAME)
+def download_and_extract(data_dir, file_name, url):
+ """Download CIFAR if not already downloaded."""
+ filepath = os.path.join(data_dir, file_name)
if tf.gfile.Exists(filepath):
return filepath
if not tf.gfile.Exists(data_dir):
tf.gfile.MakeDirs(data_dir)
- urllib.request.urlretrieve(CIFAR_DOWNLOAD_URL, filepath)
+ urllib.request.urlretrieve(url, filepath)
tarfile.open(os.path.join(filepath), 'r:gz').extractall(data_dir)
return filepath
@@ -58,12 +60,22 @@ def _bytes_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
-def _get_file_names():
+def _get_file_names(folder):
"""Returns the file names expected to exist in the input_dir."""
+ assert folder in ['cifar-10', 'cifar-100']
+
file_names = {}
- file_names['train'] = ['data_batch_%d' % i for i in range(1, 5)]
- file_names['validation'] = ['data_batch_5']
- file_names['test'] = ['test_batch']
+ if folder == 'cifar-10':
+ file_names['train'] = ['data_batch_%d' % i for i in range(1, 5)]
+ file_names['validation'] = ['data_batch_5']
+ file_names['train_all'] = ['data_batch_%d' % i for i in range(1, 6)]
+ file_names['test'] = ['test_batch']
+ else:
+ file_names['train_all'] = ['train']
+ file_names['test'] = ['test']
+ # Split in `convert_to_tfrecord` function
+ file_names['train'] = ['train']
+ file_names['validation'] = ['train']
return file_names
@@ -76,14 +88,28 @@ def read_pickle_from_file(filename):
return data_dict
-def convert_to_tfrecord(input_files, output_file):
+def convert_to_tfrecord(input_files, output_file, folder):
"""Converts files with pickled data to TFRecords."""
+ assert folder in ['cifar-10', 'cifar-100']
+
print('Generating %s' % output_file)
with tf.python_io.TFRecordWriter(output_file) as record_writer:
for input_file in input_files:
data_dict = read_pickle_from_file(input_file)
data = data_dict[b'data']
- labels = data_dict[b'labels']
+ try:
+ labels = data_dict[b'labels']
+ except KeyError:
+ labels = data_dict[b'fine_labels']
+
+ if folder == 'cifar-100' and input_file.endswith('train.tfrecords'):
+ data = data[:40000]
+ labels = labels[:40000]
+ elif folder == 'cifar-100' and input_file.endswith(
+ 'validation.tfrecords'):
+ data = data[40000:]
+ labels = labels[40000:]
+
num_entries_in_batch = len(labels)
for i in range(num_entries_in_batch):
@@ -97,19 +123,24 @@ def convert_to_tfrecord(input_files, output_file):
def main(_):
- print('Download from {} and extract.'.format(CIFAR_DOWNLOAD_URL))
- download_and_extract(FLAGS.data_dir)
- file_names = _get_file_names()
- input_dir = os.path.join(FLAGS.data_dir, CIFAR_LOCAL_FOLDER)
-
- for mode, files in file_names.items():
- input_files = [os.path.join(input_dir, f) for f in files]
- output_file = os.path.join(FLAGS.data_dir, mode + '.tfrecords')
- try:
- os.remove(output_file)
- except OSError:
- pass
- convert_to_tfrecord(input_files, output_file)
+ for file_name, url, folder, extract_folder in zip(
+ CIFAR_FILE_NAMES, CIFAR_DOWNLOAD_URLS, CIFAR_LOCAL_FOLDERS,
+ EXTRACT_FOLDERS):
+ print('Download from {} and extract.'.format(url))
+ data_dir = os.path.join(FLAGS.data_dir, folder)
+ download_and_extract(data_dir, file_name, url)
+ file_names = _get_file_names(folder)
+ input_dir = os.path.join(data_dir, extract_folder)
+
+ for mode, files in file_names.items():
+ input_files = [os.path.join(input_dir, f) for f in files]
+ output_file = os.path.join(data_dir, mode + '.tfrecords')
+ try:
+ os.remove(output_file)
+ except OSError:
+ pass
+ convert_to_tfrecord(input_files, output_file, folder)
+
print('Done!')
@@ -118,6 +149,6 @@ if __name__ == '__main__':
flags.DEFINE_string(
'data_dir',
default=None,
- help='Directory to download and extract CIFAR-10 to.')
+ help='Directory to download, extract and store TFRecords.')
tf.app.run(main)
diff --git a/tensorflow/contrib/eager/python/examples/revnet/config.py b/tensorflow/contrib/eager/python/examples/revnet/config.py
index 263a65dc76..3d93fa955a 100644
--- a/tensorflow/contrib/eager/python/examples/revnet/config.py
+++ b/tensorflow/contrib/eager/python/examples/revnet/config.py
@@ -61,18 +61,39 @@ def get_hparams_cifar_38():
config.add_hparam("max_train_iter", 80000)
config.add_hparam("seed", 1234)
config.add_hparam("shuffle", True)
- config.add_hparam("prefetch", True)
- config.add_hparam("log_every", 50)
- config.add_hparam("save_every", 50)
+ config.add_hparam("log_every", 500)
+ config.add_hparam("save_every", 500)
config.add_hparam("dtype", tf.float32)
- config.add_hparam("eval_batch_size", 500)
+ config.add_hparam("eval_batch_size", 1000)
config.add_hparam("div255", True)
+ # This is imprecise, when training with validation set,
+ # we only have 40k images in training data
config.add_hparam("iters_per_epoch", 50000 // config.batch_size)
config.add_hparam("epochs", config.max_train_iter // config.iters_per_epoch)
return config
+def get_hparams_cifar_110():
+ config = get_hparams_cifar_38()
+ config.filters = [32, 64, 128]
+ config.n_res = [9, 9, 9]
+
+ return config
+
+
+def get_hparams_cifar_164():
+ config = get_hparams_cifar_38()
+ config.filters = [32, 64, 128]
+ config.n_res = [9, 9, 9]
+ config.use_bottleneck = True
+ # Due to bottleneck residual blocks
+ filters = [f * 4 for f in config.filters]
+ config.filters = filters
+
+ return config
+
+
def get_hparams_imagenet_56():
"""RevNet-56 configurations for ImageNet."""
@@ -104,18 +125,16 @@ def get_hparams_imagenet_56():
config.add_hparam("max_train_iter", 600000)
config.add_hparam("seed", 1234)
config.add_hparam("shuffle", True)
- config.add_hparam("prefetch", True)
config.add_hparam("log_every", 50)
config.add_hparam("save_every", 50)
config.add_hparam("dtype", tf.float32)
- config.add_hparam("eval_batch_size", 500)
+ config.add_hparam("eval_batch_size", 1000)
config.add_hparam("div255", True)
# TODO(lxuechen): Update this according to ImageNet data
config.add_hparam("iters_per_epoch", 50000 // config.batch_size)
config.add_hparam("epochs", config.max_train_iter // config.iters_per_epoch)
-
- if config.bottleneck:
- filters = [f * 4 for f in config.filters]
- config.filters = filters
+ # Due to bottleneck residual blocks
+ filters = [f * 4 for f in config.filters]
+ config.filters = filters
return config
diff --git a/tensorflow/contrib/eager/python/examples/revnet/main.py b/tensorflow/contrib/eager/python/examples/revnet/main.py
index 9ef11f8e9b..e2f43b03f9 100644
--- a/tensorflow/contrib/eager/python/examples/revnet/main.py
+++ b/tensorflow/contrib/eager/python/examples/revnet/main.py
@@ -19,6 +19,7 @@ from __future__ import division
from __future__ import print_function
import os
+import sys
from absl import flags
import tensorflow as tf
@@ -30,118 +31,226 @@ tfe = tf.contrib.eager
def main(_):
"""Eager execution workflow with RevNet trained on CIFAR-10."""
+ config = get_config()
+ ds_train, ds_train_one_shot, ds_validation, ds_test = get_datasets(config)
+ model = revnet.RevNet(config=config)
+ global_step = tf.train.get_or_create_global_step() # Ensure correct summary
+ global_step.assign(1)
+ learning_rate = tf.train.piecewise_constant(
+ global_step, config.lr_decay_steps, config.lr_list)
+ optimizer = tf.train.MomentumOptimizer(
+ learning_rate, momentum=config.momentum)
+ checkpointer = tf.train.Checkpoint(
+ optimizer=optimizer, model=model, optimizer_step=global_step)
+
+ if FLAGS.train_dir:
+ summary_writer = tf.contrib.summary.create_file_writer(FLAGS.train_dir)
+ if FLAGS.restore:
+ latest_path = tf.train.latest_checkpoint(FLAGS.train_dir)
+ checkpointer.restore(latest_path)
+ print("Restored latest checkpoint at path:\"{}\" "
+ "with global_step: {}".format(latest_path, global_step.numpy()))
+ sys.stdout.flush()
+
+ if FLAGS.manual_grad:
+ print("Using manual gradients.")
+ else:
+ print("Not using manual gradients.")
+ sys.stdout.flush()
+
+ for x, y in ds_train:
+ train_one_iter(model, x, y, optimizer, global_step=global_step)
+
+ if global_step.numpy() % config.log_every == 0:
+ it_train = ds_train_one_shot.make_one_shot_iterator()
+ it_test = ds_test.make_one_shot_iterator()
+ acc_train, loss_train = evaluate(model, it_train)
+ acc_test, loss_test = evaluate(model, it_test)
+
+ if FLAGS.validate:
+ it_validation = ds_validation.make_one_shot_iterator()
+ acc_validation, loss_validation = evaluate(model, it_validation)
+ print("Iter {}, "
+ "training set accuracy {:.4f}, loss {:.4f}; "
+ "validation set accuracy {:.4f}, loss {:4.f}"
+ "test accuracy {:.4f}, loss {:.4f}".format(
+ global_step.numpy(), acc_train, loss_train, acc_validation,
+ loss_validation, acc_test, loss_test))
+ else:
+ print("Iter {}, "
+ "training set accuracy {:.4f}, loss {:.4f}; "
+ "test accuracy {:.4f}, loss {:.4f}".format(
+ global_step.numpy(), acc_train, loss_train, acc_test,
+ loss_test))
+ sys.stdout.flush()
+
+ if FLAGS.train_dir:
+ with summary_writer.as_default():
+ with tf.contrib.summary.always_record_summaries():
+ tf.contrib.summary.scalar("Training accuracy", acc_train)
+ tf.contrib.summary.scalar("Test accuracy", acc_test)
+ tf.contrib.summary.scalar("Training loss", loss_train)
+ tf.contrib.summary.scalar("Test loss", loss_test)
+ if FLAGS.validate:
+ tf.contrib.summary.scalar("Validation accuracy", acc_validation)
+ tf.contrib.summary.scalar("Validation loss", loss_validation)
+
+ if global_step.numpy() % config.save_every == 0 and FLAGS.train_dir:
+ saved_path = checkpointer.save(
+ file_prefix=os.path.join(FLAGS.train_dir, "ckpt"))
+ print("Saved checkpoint at path: \"{}\" "
+ "with global_step: {}".format(saved_path, global_step.numpy()))
+ sys.stdout.flush()
+
+
+def get_config():
+ """Return configuration."""
+ print("Config: {}".format(FLAGS.config))
+ sys.stdout.flush()
+ config = {
+ "revnet-38": config_.get_hparams_cifar_38(),
+ "revnet-110": config_.get_hparams_cifar_110(),
+ "revnet-164": config_.get_hparams_cifar_164(),
+ }[FLAGS.config]
+
+ if FLAGS.dataset == "cifar-100":
+ config.n_classes = 100
+
+ return config
+
+
+def get_datasets(config):
+ """Return dataset."""
if FLAGS.data_dir is None:
raise ValueError("No supplied data directory")
-
if not os.path.exists(FLAGS.data_dir):
raise ValueError("Data directory {} does not exist".format(FLAGS.data_dir))
+ if FLAGS.dataset not in ["cifar-10", "cifar-100"]:
+ raise ValueError("Unknown dataset {}".format(FLAGS.dataset))
- tf.enable_eager_execution()
- config = config_.get_hparams_cifar_38()
- model = revnet.RevNet(config=config)
-
- ds_train = cifar_input.get_ds_from_tfrecords(
- data_dir=FLAGS.data_dir,
- split="train",
- data_aug=True,
- batch_size=config.batch_size,
- epochs=config.epochs,
- shuffle=config.shuffle,
- data_format=config.data_format,
- dtype=config.dtype,
- prefetch=config.prefetch)
+ print("Training on {} dataset.".format(FLAGS.dataset))
+ sys.stdout.flush()
+ data_dir = os.path.join(FLAGS.data_dir, FLAGS.dataset)
+ if FLAGS.validate:
+ # 40k Training set
+ ds_train = cifar_input.get_ds_from_tfrecords(
+ data_dir=data_dir,
+ split="train",
+ data_aug=True,
+ batch_size=config.batch_size,
+ epochs=config.epochs,
+ shuffle=config.shuffle,
+ data_format=config.data_format,
+ dtype=config.dtype,
+ prefetch=config.batch_size)
+ # 10k Training set
+ ds_validation = cifar_input.get_ds_from_tfrecords(
+ data_dir=data_dir,
+ split="validation",
+ data_aug=False,
+ batch_size=config.eval_batch_size,
+ epochs=1,
+ shuffle=False,
+ data_format=config.data_format,
+ dtype=config.dtype,
+ prefetch=config.eval_batch_size)
+ else:
+ # 50k Training set
+ ds_train = cifar_input.get_ds_from_tfrecords(
+ data_dir=data_dir,
+ split="train_all",
+ data_aug=True,
+ batch_size=config.batch_size,
+ epochs=config.epochs,
+ shuffle=config.shuffle,
+ data_format=config.data_format,
+ dtype=config.dtype,
+ prefetch=config.batch_size)
+ ds_validation = None
- ds_validation = cifar_input.get_ds_from_tfrecords(
- data_dir=FLAGS.data_dir,
- split="validation",
+ # Always compute loss and accuracy on whole training and test set
+ ds_train_one_shot = cifar_input.get_ds_from_tfrecords(
+ data_dir=data_dir,
+ split="train_all",
data_aug=False,
batch_size=config.eval_batch_size,
epochs=1,
+ shuffle=False,
data_format=config.data_format,
dtype=config.dtype,
- prefetch=config.prefetch)
+ prefetch=config.eval_batch_size)
ds_test = cifar_input.get_ds_from_tfrecords(
- data_dir=FLAGS.data_dir,
+ data_dir=data_dir,
split="test",
data_aug=False,
batch_size=config.eval_batch_size,
epochs=1,
+ shuffle=False,
data_format=config.data_format,
dtype=config.dtype,
- prefetch=config.prefetch)
-
- global_step = tfe.Variable(1, trainable=False)
+ prefetch=config.eval_batch_size)
- def learning_rate(): # TODO(lxuechen): Remove once cl/201089859 is in place
- return tf.train.piecewise_constant(global_step, config.lr_decay_steps,
- config.lr_list)
-
- optimizer = tf.train.MomentumOptimizer(learning_rate, momentum=0.9)
- checkpoint = tf.train.Checkpoint(
- optimizer=optimizer, model=model, optimizer_step=global_step)
-
- if FLAGS.train_dir:
- summary_writer = tf.contrib.summary.create_file_writer(FLAGS.train_dir)
- if FLAGS.restore:
- latest_path = tf.train.latest_checkpoint(FLAGS.train_dir)
- checkpoint.restore(latest_path)
-
- for x, y in ds_train:
- loss = train_one_iter(model, x, y, optimizer, global_step=global_step)
-
- if global_step % config.log_every == 0:
- it_validation = ds_validation.make_one_shot_iterator()
- it_test = ds_test.make_one_shot_iterator()
- acc_validation = evaluate(model, it_validation)
- acc_test = evaluate(model, it_test)
- print("Iter {}, "
- "train loss {}, "
- "validation accuracy {}, "
- "test accuracy {}".format(global_step.numpy(), loss, acc_validation,
- acc_test))
-
- if FLAGS.train_dir:
- with summary_writer.as_default():
- with tf.contrib.summary.always_record_summaries():
- tf.contrib.summary.scalar("Validation accuracy", acc_validation)
- tf.contrib.summary.scalar("Test accuracy", acc_test)
- tf.contrib.summary.scalar("Training loss", loss)
-
- if global_step.numpy() % config.save_every == 0 and FLAGS.train_dir:
- checkpoint.save(file_prefix=FLAGS.train_dir + "ckpt")
+ return ds_train, ds_train_one_shot, ds_validation, ds_test
def train_one_iter(model, inputs, labels, optimizer, global_step=None):
"""Train for one iteration."""
- grads, vars_, loss = model.compute_gradients(inputs, labels, training=True)
- optimizer.apply_gradients(zip(grads, vars_), global_step=global_step)
+ if FLAGS.manual_grad:
+ grads, vars_, loss = model.compute_gradients(inputs, labels, training=True)
+ optimizer.apply_gradients(zip(grads, vars_), global_step=global_step)
+ else: # For correctness validation
+ with tf.GradientTape() as tape:
+ logits, _ = model(inputs, training=True)
+ loss = model.compute_loss(logits=logits, labels=labels)
+ tf.logging.info("Logits are placed on device: {}".format(logits.device))
+ grads = tape.gradient(loss, model.trainable_variables)
+ optimizer.apply_gradients(
+ zip(grads, model.trainable_variables), global_step=global_step)
return loss.numpy()
def evaluate(model, iterator):
"""Compute accuracy with the given dataset iterator."""
+ mean_loss = tfe.metrics.Mean()
accuracy = tfe.metrics.Accuracy()
for x, y in iterator:
logits, _ = model(x, training=False)
+ loss = model.compute_loss(logits=logits, labels=y)
accuracy(
labels=tf.cast(y, tf.int64),
predictions=tf.argmax(logits, axis=1, output_type=tf.int64))
+ mean_loss(loss)
- return accuracy.result().numpy()
+ return accuracy.result().numpy(), mean_loss.result().numpy()
if __name__ == "__main__":
flags.DEFINE_string(
+ "data_dir", default=None, help="Directory to load tfrecords")
+ flags.DEFINE_string(
"train_dir",
default=None,
help="[Optional] Directory to store the training information")
- flags.DEFINE_string(
- "data_dir", default=None, help="Directory to load tfrecords.")
flags.DEFINE_boolean(
"restore",
- default=True,
+ default=False,
help="[Optional] Restore the latest checkpoint from `train_dir` if True")
+ flags.DEFINE_boolean(
+ "validate",
+ default=False,
+ help="[Optional] Use the validation set or not for hyperparameter search")
+ flags.DEFINE_boolean(
+ "manual_grad",
+ default=False,
+ help="[Optional] Use manual gradient graph to save memory")
+ flags.DEFINE_string(
+ "dataset",
+ default="cifar-10",
+ help="[Optional] The dataset used; either `cifar-10` or `cifar-100`")
+ flags.DEFINE_string(
+ "config", default="revnet-38", help="[Optional] Architecture of network.")
FLAGS = flags.FLAGS
+ tf.enable_eager_execution()
tf.app.run(main)
diff --git a/tensorflow/contrib/eager/python/examples/revnet/revnet.py b/tensorflow/contrib/eager/python/examples/revnet/revnet.py
index b3b8c262b1..af0d20fa72 100644
--- a/tensorflow/contrib/eager/python/examples/revnet/revnet.py
+++ b/tensorflow/contrib/eager/python/examples/revnet/revnet.py
@@ -27,6 +27,7 @@ from __future__ import print_function
import functools
import operator
+import six
import tensorflow as tf
from tensorflow.contrib.eager.python.examples.revnet import blocks
@@ -58,9 +59,12 @@ class RevNet(tf.keras.Model):
data_format=self.config.data_format,
use_bias=False,
padding="SAME",
- input_shape=self.config.input_shape),
+ input_shape=self.config.input_shape,
+ dtype=self.config.dtype),
tf.keras.layers.BatchNormalization(
- axis=self.axis, fused=self.config.fused),
+ axis=self.axis,
+ fused=self.config.fused,
+ dtype=self.config.dtype),
tf.keras.layers.Activation("relu"),
],
name="init")
@@ -70,7 +74,8 @@ class RevNet(tf.keras.Model):
pool_size=(3, 3),
strides=(2, 2),
padding="SAME",
- data_format=self.config.data_format))
+ data_format=self.config.data_format,
+ dtype=self.config.dtype))
return init_block
def _construct_final_block(self):
@@ -95,11 +100,13 @@ class RevNet(tf.keras.Model):
tf.keras.layers.BatchNormalization(
axis=self.axis,
input_shape=input_shape,
- fused=self.config.fused),
+ fused=self.config.fused,
+ dtype=self.config.dtype),
tf.keras.layers.Activation("relu"),
tf.keras.layers.GlobalAveragePooling2D(
- data_format=self.config.data_format),
- tf.keras.layers.Dense(self.config.n_classes)
+ data_format=self.config.data_format, dtype=self.config.dtype),
+ tf.keras.layers.Dense(
+ self.config.n_classes, dtype=self.config.dtype)
],
name="final")
return final_block
@@ -137,7 +144,8 @@ class RevNet(tf.keras.Model):
batch_norm_first=(i != 0), # Only skip on first block
data_format=self.config.data_format,
bottleneck=self.config.bottleneck,
- fused=self.config.fused)
+ fused=self.config.fused,
+ dtype=self.config.dtype)
block_list.append(rev_block)
# Precompute input shape for the next block
@@ -153,7 +161,6 @@ class RevNet(tf.keras.Model):
def call(self, inputs, training=True):
"""Forward pass."""
- # Only store hidden states during training
if training:
saved_hidden = [inputs]
@@ -173,25 +180,39 @@ class RevNet(tf.keras.Model):
def compute_loss(self, logits, labels):
"""Compute cross entropy loss."""
- cross_ent = tf.nn.sparse_softmax_cross_entropy_with_logits(
- logits=logits, labels=labels)
+ if self.config.dtype == tf.float32 or self.config.dtype == tf.float16:
+ cross_ent = tf.nn.sparse_softmax_cross_entropy_with_logits(
+ logits=logits, labels=labels)
+ else:
+ # `sparse_softmax_cross_entropy_with_logits` does not have a GPU kernel
+ # for float64, int32 pairs
+ labels = tf.one_hot(
+ labels, depth=self.config.n_classes, axis=1, dtype=self.config.dtype)
+ cross_ent = tf.nn.softmax_cross_entropy_with_logits(
+ logits=logits, labels=labels)
return tf.reduce_mean(cross_ent)
- def compute_gradients(self, inputs, labels, training=True):
+ def compute_gradients(self, inputs, labels, training=True, l2_reg=True):
"""Manually computes gradients.
+ When eager execution is enabled, this method also SILENTLY updates the
+ running averages of batch normalization when `training` is set to True.
+
Args:
inputs: Image tensor, either NHWC or NCHW, conforming to `data_format`
labels: One-hot labels for classification
- training: for batch normalization
+ training: Use the mini-batch stats in batch norm if set to True
+ l2_reg: Apply l2 regularization
Returns:
- list of tuple each being (grad, var) for optimizer use
+ list of tuples each being (grad, var) for optimizer to use
"""
- # Forward pass record hidden states before downsampling
+ # Run forward pass to record hidden states; avoid updating running averages
+ vars_and_vals = self.get_moving_stats()
_, saved_hidden = self.call(inputs, training=training)
+ self.restore_moving_stats(vars_and_vals)
grads_all = []
vars_all = []
@@ -199,8 +220,9 @@ class RevNet(tf.keras.Model):
# Manually backprop through last block
x = saved_hidden[-1]
with tf.GradientTape() as tape:
- x = tf.identity(x) # TODO(lxuechen): Remove after b/110264016 is fixed
+ x = tf.identity(x)
tape.watch(x)
+ # Running stats updated below
logits = self._final_block(x, training=training)
loss = self.compute_loss(logits, labels)
@@ -225,17 +247,55 @@ class RevNet(tf.keras.Model):
assert not saved_hidden # Cleared after backprop
with tf.GradientTape() as tape:
- x = tf.identity(x) # TODO(lxuechen): Remove after b/110264016 is fixed
+ x = tf.identity(x)
+ # Running stats updated below
y = self._init_block(x, training=training)
grads_all += tape.gradient(
- y, self._init_block.trainable_variables, output_gradients=[dy])
+ y, self._init_block.trainable_variables, output_gradients=dy)
vars_all += self._init_block.trainable_variables
- grads_all = self._apply_weight_decay(grads_all, vars_all)
+ # Apply weight decay
+ if l2_reg:
+ grads_all = self._apply_weight_decay(grads_all, vars_all)
return grads_all, vars_all, loss
def _apply_weight_decay(self, grads, vars_):
"""Update gradients to reflect weight decay."""
- return [g + self.config.weight_decay * v for g, v in zip(grads, vars_)]
+ # Don't decay bias
+ return [
+ g + self.config.weight_decay * v if v.name.endswith("kernel:0") else g
+ for g, v in zip(grads, vars_)
+ ]
+
+ def get_moving_stats(self):
+ """Get moving averages of batch normalization.
+
+ This is needed to avoid updating the running average twice in one iteration.
+
+ Returns:
+ A dictionary mapping variables for batch normalization moving averages
+ to their current values.
+ """
+ vars_and_vals = {}
+
+ def _is_moving_var(v):
+ n = v.name
+ return n.endswith("moving_mean:0") or n.endswith("moving_variance:0")
+
+ for v in filter(_is_moving_var, self.variables):
+ vars_and_vals[v] = v.read_value()
+
+ return vars_and_vals
+
+ def restore_moving_stats(self, vars_and_vals):
+ """Restore moving averages of batch normalization.
+
+ This is needed to avoid updating the running average twice in one iteration.
+
+ Args:
+ vars_and_vals: The dictionary mapping variables to their previous values.
+ """
+ for var_, val in six.iteritems(vars_and_vals):
+ var_.assign(val)
diff --git a/tensorflow/contrib/eager/python/examples/revnet/revnet_test.py b/tensorflow/contrib/eager/python/examples/revnet/revnet_test.py
index cb3bac13f9..b2ac4b67c9 100644
--- a/tensorflow/contrib/eager/python/examples/revnet/revnet_test.py
+++ b/tensorflow/contrib/eager/python/examples/revnet/revnet_test.py
@@ -22,6 +22,7 @@ import gc
import time
import tensorflow as tf
+from tensorflow.contrib.eager.python.examples.revnet import blocks_test
from tensorflow.contrib.eager.python.examples.revnet import config as config_
from tensorflow.contrib.eager.python.examples.revnet import revnet
from tensorflow.python.client import device_lib
@@ -36,19 +37,22 @@ def train_one_iter(model, inputs, labels, optimizer, global_step=None):
return loss
-class RevnetTest(tf.test.TestCase):
+class RevNetTest(tf.test.TestCase):
def setUp(self):
- super(RevnetTest, self).setUp()
- config = config_.get_hparams_imagenet_56()
+ super(RevNetTest, self).setUp()
+ config = config_.get_hparams_cifar_38()
+ # Reconstruction could cause numerical error, use double precision for tests
+ config.dtype = tf.float64
+ config.fused = False # Fused batch norm does not support tf.float64
shape = (config.batch_size,) + config.input_shape
self.model = revnet.RevNet(config=config)
- self.x = tf.random_normal(shape=shape)
+ self.x = tf.random_normal(shape=shape, dtype=tf.float64)
self.t = tf.random_uniform(
shape=[config.batch_size],
minval=0,
maxval=config.n_classes,
- dtype=tf.int32)
+ dtype=tf.int64)
self.config = config
def tearDown(self):
@@ -56,7 +60,7 @@ class RevnetTest(tf.test.TestCase):
del self.x
del self.t
del self.config
- super(RevnetTest, self).tearDown()
+ super(RevNetTest, self).tearDown()
def test_call(self):
"""Test `call` function."""
@@ -64,27 +68,58 @@ class RevnetTest(tf.test.TestCase):
y, _ = self.model(self.x, training=False)
self.assertEqual(y.shape, [self.config.batch_size, self.config.n_classes])
+ def _check_grad_angle_combined(self, grads, grads_true):
+ """Verify that the reconstructed gradients has correct direction.
+
+ Due to numerical imprecision, the magnitude may be slightly different.
+ Yet according to the paper, the angle should be roughly the same.
+
+ Args:
+ grads: list of gradients from reconstruction
+ grads_true: list of true gradients
+ """
+
+ def _combine(gs):
+ return [tf.reshape(g, [-1]) for g in gs]
+
+ g1_all = tf.concat(_combine(grads), axis=0)
+ g2_all = tf.concat(_combine(grads_true), axis=0)
+
+ self.assertEqual(len(g1_all.shape), 1)
+ self.assertEqual(len(g2_all.shape), 1)
+
+ degree = blocks_test.compute_degree(g1_all, g2_all)
+ self.assertLessEqual(degree, 1e0)
+
def test_compute_gradients(self):
"""Test `compute_gradients` function."""
-
- grads, vars_, _ = self.model.compute_gradients(inputs=self.x, labels=self.t)
+ self.model(self.x, training=False) # Initialize model
+ grads, vars_, loss = self.model.compute_gradients(
+ inputs=self.x, labels=self.t, training=True, l2_reg=True)
self.assertTrue(isinstance(grads, list))
self.assertTrue(isinstance(vars_, list))
self.assertEqual(len(grads), len(vars_))
for grad, var in zip(grads, vars_):
- if grad is not None:
- self.assertEqual(grad.shape, var.shape)
+ self.assertEqual(grad.shape, var.shape)
+
+ # Compare against the true gradient computed by the tape
+ with tf.GradientTape() as tape:
+ logits, _ = self.model(self.x, training=True)
+ loss_true = self.model.compute_loss(logits=logits, labels=self.t)
+ grads_true = tape.gradient(loss_true, vars_)
+ self.assertAllClose(loss, loss_true)
+ self.assertAllClose(grads, grads_true, rtol=1e-4, atol=1e-4)
+ self._check_grad_angle_combined(grads, grads_true)
def test_call_defun(self):
"""Test `call` function with defun."""
-
y, _ = tfe.defun(self.model.call)(self.x, training=False)
self.assertEqual(y.shape, [self.config.batch_size, self.config.n_classes])
def test_compute_gradients_defun(self):
"""Test `compute_gradients` function with defun."""
compute_gradients = tfe.defun(self.model.compute_gradients)
- grads, vars_, _ = compute_gradients(self.x, self.t)
+ grads, vars_, _ = compute_gradients(self.x, self.t, training=True)
self.assertTrue(isinstance(grads, list))
self.assertTrue(isinstance(vars_, list))
self.assertEqual(len(grads), len(vars_))
@@ -94,8 +129,8 @@ class RevnetTest(tf.test.TestCase):
def test_training_graph(self):
"""Test model training in graph mode."""
-
with tf.Graph().as_default():
+ config = config_.get_hparams_cifar_38()
x = tf.random_normal(
shape=(self.config.batch_size,) + self.config.input_shape)
t = tf.random_uniform(
@@ -104,12 +139,14 @@ class RevnetTest(tf.test.TestCase):
maxval=self.config.n_classes,
dtype=tf.int32)
global_step = tfe.Variable(0., trainable=False)
- model = revnet.RevNet(config=self.config)
- grads_all, vars_all, _ = model.compute_gradients(x, t, training=True)
- optimizer = tf.train.AdamOptimizer(learning_rate=1e-3)
+ model = revnet.RevNet(config=config)
+ model(x)
updates = model.get_updates_for(x)
- self.assertEqual(len(updates), 192)
- with tf.control_dependencies(model.get_updates_for(x)):
+
+ x_ = tf.identity(x)
+ grads_all, vars_all, _ = model.compute_gradients(x_, t, training=True)
+ optimizer = tf.train.AdamOptimizer(learning_rate=1e-3)
+ with tf.control_dependencies(updates):
train_op = optimizer.apply_gradients(
zip(grads_all, vars_all), global_step=global_step)
@@ -144,7 +181,7 @@ class MockIterator(object):
return self._tensors
-class RevnetBenchmark(tf.test.Benchmark):
+class RevNetBenchmark(tf.test.Benchmark):
"""Eager and graph benchmarks for RevNet."""
def _train_batch_sizes(self):
diff --git a/tensorflow/contrib/eager/python/examples/workshop/1_basic.ipynb b/tensorflow/contrib/eager/python/examples/workshop/1_basic.ipynb
new file mode 100644
index 0000000000..75cb3f8227
--- /dev/null
+++ b/tensorflow/contrib/eager/python/examples/workshop/1_basic.ipynb
@@ -0,0 +1,282 @@
+{
+ "nbformat": 4,
+ "nbformat_minor": 0,
+ "metadata": {
+ "colab": {
+ "name": "TFE Workshop: control flow",
+ "version": "0.3.2",
+ "provenance": [],
+ "include_colab_link": true
+ }
+ },
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "view-in-github",
+ "colab_type": "text"
+ },
+ "source": [
+ "[View in Colaboratory](https://colab.research.google.com/gist/alextp/664b2f8700485ff6801f4d26293bd567/tfe-workshop-control-flow.ipynb)"
+ ]
+ },
+ {
+ "metadata": {
+ "id": "9BpQzh9BvJlj",
+ "colab_type": "code",
+ "colab": {
+ "base_uri": "https://localhost:8080/",
+ "height": 37
+ },
+ "outputId": "0b336886-8204-4815-89fa-5291a49d5784"
+ },
+ "cell_type": "code",
+ "source": [
+ "import tensorflow as tf\n",
+ "import numpy as np\n",
+ "tf.enable_eager_execution()"
+ ],
+ "execution_count": 1,
+ "outputs": []
+ },
+ {
+ "metadata": {
+ "id": "0roIB19GvOjI",
+ "colab_type": "text"
+ },
+ "cell_type": "markdown",
+ "source": [
+ "# Eager execution basics\n",
+ "\n",
+ "When eager execution is enabled TensorFlow immediately executes operations, and Tensors are always available. "
+ ]
+ },
+ {
+ "metadata": {
+ "id": "jeO8F-V-vN24",
+ "colab_type": "code",
+ "colab": {
+ "base_uri": "https://localhost:8080/",
+ "height": 68
+ },
+ "outputId": "aeb3bdec-50b7-440d-93d8-5a171f091081"
+ },
+ "cell_type": "code",
+ "source": [
+ "t = tf.constant([[1, 2], [3, 4]])\n",
+ "t"
+ ],
+ "execution_count": 2,
+ "outputs": [
+ {
+ "output_type": "execute_result",
+ "data": {
+ "text/plain": [
+ "<tf.Tensor: id=0, shape=(2, 2), dtype=int32, numpy=\n",
+ "array([[1, 2],\n",
+ " [3, 4]], dtype=int32)>"
+ ]
+ },
+ "metadata": {
+ "tags": []
+ },
+ "execution_count": 2
+ }
+ ]
+ },
+ {
+ "metadata": {
+ "id": "Y17RwSFxvlDL",
+ "colab_type": "code",
+ "colab": {
+ "base_uri": "https://localhost:8080/",
+ "height": 68
+ },
+ "outputId": "cfcc10c7-707b-4997-99b3-a5f382c5166b"
+ },
+ "cell_type": "code",
+ "source": [
+ "tf.matmul(t, t)"
+ ],
+ "execution_count": 3,
+ "outputs": [
+ {
+ "output_type": "execute_result",
+ "data": {
+ "text/plain": [
+ "<tf.Tensor: id=2, shape=(2, 2), dtype=int32, numpy=\n",
+ "array([[ 7, 10],\n",
+ " [15, 22]], dtype=int32)>"
+ ]
+ },
+ "metadata": {
+ "tags": []
+ },
+ "execution_count": 3
+ }
+ ]
+ },
+ {
+ "metadata": {
+ "id": "Dab1bS3TvmRE",
+ "colab_type": "code",
+ "colab": {
+ "base_uri": "https://localhost:8080/",
+ "height": 34
+ },
+ "outputId": "8a624f3d-a658-4359-c586-1c5f6bf4c8b7"
+ },
+ "cell_type": "code",
+ "source": [
+ "# It's also possible to have Python control flow which depends on the value of tensors.\n",
+ "if t[0, 0] > 0.5:\n",
+ " print(\"T is bigger\")\n",
+ "else:\n",
+ " print(\"T is smaller\")"
+ ],
+ "execution_count": 4,
+ "outputs": [
+ {
+ "output_type": "stream",
+ "text": [
+ "T is bigger\n"
+ ],
+ "name": "stdout"
+ }
+ ]
+ },
+ {
+ "metadata": {
+ "id": "dPgptJcGwIon",
+ "colab_type": "code",
+ "colab": {
+ "base_uri": "https://localhost:8080/",
+ "height": 34
+ },
+ "outputId": "c4f27f2b-0848-4475-dde5-2534dac65a5c"
+ },
+ "cell_type": "code",
+ "source": [
+ "# Tensors are also usable as numpy arrays\n",
+ "np.prod(t)"
+ ],
+ "execution_count": 6,
+ "outputs": [
+ {
+ "output_type": "execute_result",
+ "data": {
+ "text/plain": [
+ "24"
+ ]
+ },
+ "metadata": {
+ "tags": []
+ },
+ "execution_count": 6
+ }
+ ]
+ },
+ {
+ "metadata": {
+ "id": "p3DTfQXnwXzj",
+ "colab_type": "text"
+ },
+ "cell_type": "markdown",
+ "source": [
+ "# Exercise\n",
+ "\n",
+ "The algorithm for bisecting line search is a pretty simple way to find a zero of a continuous scalar function in an interval [a,b] where f(a) and f(b) have different signs. Simply evaluate f((a+b)/2), and narrow the interval by replacing either a or b with (a+b)/2 such that the function when applied on the boundary of the interval still has different signs.\n",
+ "\n",
+ "Implement a python function `bisecting_line_search(f, a, b, epsilon)` which returns a value such that `tf.abs(f(value)) < epsilon`.\n",
+ "\n",
+ "One thing to keep in mind: python's `==` opertor is not overloaded on Tensors, so you need to use `tf.equal` to compare for equality."
+ ]
+ },
+ {
+ "metadata": {
+ "id": "6eq0YuI6ykm5",
+ "colab_type": "code",
+ "colab": {}
+ },
+ "cell_type": "code",
+ "source": [
+ "# Example test harness to get you going\n",
+ "\n",
+ "def test_f(x):\n",
+ " return x - 0.1234\n",
+ "def bisecting_line_search(f, a, b, epsilon):\n",
+ " # Return x such that f(x) <= epsilon.\n",
+ " pass\n",
+ "a = tf.constant(0.0)\n",
+ "b = tf.constant(1.0)\n",
+ "epsilon = tf.constant(0.001)\n",
+ "x = bisecting_line_search(test_f, a, b, epsilon)\n"
+ ],
+ "execution_count": 0,
+ "outputs": []
+ },
+ {
+ "metadata": {
+ "id": "LcMmEfd_xvej",
+ "colab_type": "code",
+ "colab": {
+ "base_uri": "https://localhost:8080/",
+ "height": 170
+ },
+ "outputId": "f402aa50-8ce3-4416-f755-8bbcd1af7809"
+ },
+ "cell_type": "code",
+ "source": [
+ "#@title Double-click to see the solution\n",
+ "\n",
+ "def bisecting_line_search(f, a, b, epsilon):\n",
+ " f_a = f(a)\n",
+ " f_b = f(b)\n",
+ " probe = (a + b) / 2\n",
+ " f_probe = f(probe)\n",
+ " while tf.abs(f_probe) > epsilon:\n",
+ " if tf.equal(tf.sign(f_probe), tf.sign(f_a)):\n",
+ " a = probe\n",
+ " f_a = f_probe\n",
+ " else:\n",
+ " b = probe\n",
+ " f_b = f_probe\n",
+ " probe = (a + b) / 2\n",
+ " f_probe = f(probe)\n",
+ " print(\"new probe\", probe)\n",
+ " return probe\n",
+ "\n",
+ "bisecting_line_search(test_f, 0., 1., 0.001)"
+ ],
+ "execution_count": 8,
+ "outputs": [
+ {
+ "output_type": "stream",
+ "text": [
+ "('new probe', 0.25)\n",
+ "('new probe', 0.125)\n",
+ "('new probe', 0.0625)\n",
+ "('new probe', 0.09375)\n",
+ "('new probe', 0.109375)\n",
+ "('new probe', 0.1171875)\n",
+ "('new probe', 0.12109375)\n",
+ "('new probe', 0.123046875)\n"
+ ],
+ "name": "stdout"
+ },
+ {
+ "output_type": "execute_result",
+ "data": {
+ "text/plain": [
+ "0.123046875"
+ ]
+ },
+ "metadata": {
+ "tags": []
+ },
+ "execution_count": 8
+ }
+ ]
+ }
+ ]
+}
diff --git a/tensorflow/contrib/eager/python/examples/workshop/2_models.ipynb b/tensorflow/contrib/eager/python/examples/workshop/2_models.ipynb
new file mode 100644
index 0000000000..4f1410e00b
--- /dev/null
+++ b/tensorflow/contrib/eager/python/examples/workshop/2_models.ipynb
@@ -0,0 +1,1018 @@
+{
+ "nbformat": 4,
+ "nbformat_minor": 0,
+ "metadata": {
+ "colab": {
+ "name": "TFE Workshop: Models.ipynb",
+ "version": "0.3.2",
+ "provenance": [],
+ "collapsed_sections": [],
+ "include_colab_link": true
+ },
+ "kernelspec": {
+ "name": "python3",
+ "display_name": "Python 3"
+ }
+ },
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "view-in-github",
+ "colab_type": "text"
+ },
+ "source": [
+ "[View in Colaboratory](https://colab.research.google.com/gist/alextp/5cfcffd408bd5103f5ae747bc97ab0b5/tfe-workshop-models.ipynb)"
+ ]
+ },
+ {
+ "metadata": {
+ "id": "BMxv1O6Q0SJL",
+ "colab_type": "code",
+ "colab": {
+ "base_uri": "https://localhost:8080/",
+ "height": 17
+ },
+ "outputId": "8be9c556-ac7f-4142-e35e-19dc2b097121"
+ },
+ "cell_type": "code",
+ "source": [
+ "import tensorflow as tf\n",
+ "tf.enable_eager_execution()\n",
+ "tfe = tf.contrib.eager"
+ ],
+ "execution_count": 1,
+ "outputs": []
+ },
+ {
+ "metadata": {
+ "id": "lE1vJhxp0WR9",
+ "colab_type": "text"
+ },
+ "cell_type": "markdown",
+ "source": [
+ "# Variables\n",
+ "\n",
+ "TensorFlow variables are useful to store the state in your program. They are integrated with other parts of the API (taking gradients, checkpointing, graph functions)."
+ ]
+ },
+ {
+ "metadata": {
+ "id": "C4ztQNgc0VpW",
+ "colab_type": "code",
+ "colab": {
+ "base_uri": "https://localhost:8080/",
+ "height": 34
+ },
+ "outputId": "8b63ae1f-2670-49c0-a31b-8cf7fc4194a1"
+ },
+ "cell_type": "code",
+ "source": [
+ "# Creating variables\n",
+ "v = tfe.Variable(1.0)\n",
+ "v"
+ ],
+ "execution_count": 2,
+ "outputs": [
+ {
+ "output_type": "execute_result",
+ "data": {
+ "text/plain": [
+ "<tf.Variable 'Variable:0' shape=() dtype=float32, numpy=1.0>"
+ ]
+ },
+ "metadata": {
+ "tags": []
+ },
+ "execution_count": 2
+ }
+ ]
+ },
+ {
+ "metadata": {
+ "id": "H0daItGg1IAp",
+ "colab_type": "code",
+ "colab": {
+ "base_uri": "https://localhost:8080/",
+ "height": 34
+ },
+ "outputId": "e47d5aab-16a1-4e29-c27d-7fbc0b94b5d3"
+ },
+ "cell_type": "code",
+ "source": [
+ "v.assign_add(1.0)\n",
+ "v"
+ ],
+ "execution_count": 3,
+ "outputs": [
+ {
+ "output_type": "execute_result",
+ "data": {
+ "text/plain": [
+ "<tf.Variable 'Variable:0' shape=() dtype=float32, numpy=2.0>"
+ ]
+ },
+ "metadata": {
+ "tags": []
+ },
+ "execution_count": 3
+ }
+ ]
+ },
+ {
+ "metadata": {
+ "id": "BJvBzcIG1hyK",
+ "colab_type": "text"
+ },
+ "cell_type": "markdown",
+ "source": [
+ "# Layers: common sets of useful operations\n",
+ "\n",
+ "Most of the time when writing code for machine learning models you want to operate at a higher level of abstraction than individual operations and manipulation of individual variables.\n",
+ "\n",
+ "Many machine learning models are expressible as the composition and stacking of relatively simple layers, and TensorFlow provides both a set of many common layers as a well as easy ways for you to write your own application-specific layers either from scratch or as the composition of existing layers.\n",
+ "\n",
+ "TensorFlow includes the full [Keras](https://keras.io) API in the tf.keras package, and the Keras layers are very useful when building your own models.\n"
+ ]
+ },
+ {
+ "metadata": {
+ "id": "iSQTS3QW1YQQ",
+ "colab_type": "code",
+ "colab": {
+ "base_uri": "https://localhost:8080/",
+ "height": 17
+ },
+ "outputId": "c5d8aa10-dcad-44f7-f0eb-0faf5249fd7e"
+ },
+ "cell_type": "code",
+ "source": [
+ "# In the tf.keras.layers package, layers are objects. To construct a layer,\n",
+ "# simply construct the object. Most layers take as a first argument the number\n",
+ "# of output dimensions / channels.\n",
+ "layer = tf.keras.layers.Dense(100)\n",
+ "\n",
+ "# The number of input dimensions is often unnecessary, as it can be inferred\n",
+ "# the first time the layer is used, but it can be provided if you want to \n",
+ "# specify it manually, which is useful in some complex models.\n",
+ "layer = tf.keras.layers.Dense(10, input_shape=(None, 5))\n"
+ ],
+ "execution_count": 4,
+ "outputs": []
+ },
+ {
+ "metadata": {
+ "id": "nRuUogoS1liV",
+ "colab_type": "code",
+ "colab": {
+ "base_uri": "https://localhost:8080/",
+ "height": 68
+ },
+ "outputId": "c352ce79-d519-45e4-a12e-1eaba76871a2"
+ },
+ "cell_type": "code",
+ "source": [
+ "layer(tf.zeros([2, 2]))"
+ ],
+ "execution_count": 5,
+ "outputs": [
+ {
+ "output_type": "execute_result",
+ "data": {
+ "text/plain": [
+ "<tf.Tensor: id=43, shape=(2, 10), dtype=float32, numpy=\n",
+ "array([[0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],\n",
+ " [0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]], dtype=float32)>"
+ ]
+ },
+ "metadata": {
+ "tags": []
+ },
+ "execution_count": 5
+ }
+ ]
+ },
+ {
+ "metadata": {
+ "id": "JH4Kf4ka1mht",
+ "colab_type": "code",
+ "colab": {
+ "base_uri": "https://localhost:8080/",
+ "height": 136
+ },
+ "outputId": "c34e2378-f83d-42c5-d30a-ebe55620368a"
+ },
+ "cell_type": "code",
+ "source": [
+ "layer.variables"
+ ],
+ "execution_count": 6,
+ "outputs": [
+ {
+ "output_type": "execute_result",
+ "data": {
+ "text/plain": [
+ "[<tf.Variable 'dense/kernel:0' shape=(2, 10) dtype=float32, numpy=\n",
+ " array([[-0.42494273, -0.2067694 , 0.4519381 , 0.6842533 , 0.04131705,\n",
+ " 0.70547956, 0.4021917 , -0.5939298 , -0.5671462 , 0.5586321 ],\n",
+ " [ 0.3709975 , -0.64126074, -0.5386696 , -0.42212513, 0.6550072 ,\n",
+ " 0.70081085, 0.08859557, -0.30801034, -0.31450653, 0.02522504]],\n",
+ " dtype=float32)>,\n",
+ " <tf.Variable 'dense/bias:0' shape=(10,) dtype=float32, numpy=array([0., 0., 0., 0., 0., 0., 0., 0., 0., 0.], dtype=float32)>]"
+ ]
+ },
+ "metadata": {
+ "tags": []
+ },
+ "execution_count": 6
+ }
+ ]
+ },
+ {
+ "metadata": {
+ "id": "DSI4NF0_1vn-",
+ "colab_type": "text"
+ },
+ "cell_type": "markdown",
+ "source": [
+ "The full list of pre-existing layers can be seen in [the documentation](https://www.tensorflow.org/api_docs/python/tf/keras/layers). It includes Dense (a fully-connected layer),\n",
+ "Conv2D, LSTM, BatchNormalization, Dropout, and many others."
+ ]
+ },
+ {
+ "metadata": {
+ "id": "hMgDBftJ12Bp",
+ "colab_type": "text"
+ },
+ "cell_type": "markdown",
+ "source": [
+ "# Models: composing layers\n",
+ "\n",
+ "Many interesting layer-like things in machine learning models are implemented by composing existing layers. For example, each residual block in a resnet is a composition of convolutions, batch normalizations, and a shortcut.\n",
+ "\n",
+ "The main class used when creating a layer-like thing which contains other layers is tf.keras.Model. Implementing one is done by inheriting from tf.keras.Model.\n"
+ ]
+ },
+ {
+ "metadata": {
+ "id": "K3gVY6gj1nbe",
+ "colab_type": "code",
+ "colab": {
+ "base_uri": "https://localhost:8080/",
+ "height": 190
+ },
+ "outputId": "6e9be0c4-960e-46c2-cdd9-7e94ad09d46b"
+ },
+ "cell_type": "code",
+ "source": [
+ "class ResnetIdentityBlock(tf.keras.Model):\n",
+ " def __init__(self, kernel_size, filters):\n",
+ " super(ResnetIdentityBlock, self).__init__(name='')\n",
+ " filters1, filters2, filters3 = filters\n",
+ "\n",
+ " self.conv2a = tf.keras.layers.Conv2D(filters1, (1, 1))\n",
+ " self.bn2a = tf.keras.layers.BatchNormalization()\n",
+ "\n",
+ " self.conv2b = tf.keras.layers.Conv2D(filters2, kernel_size, padding='same')\n",
+ " self.bn2b = tf.keras.layers.BatchNormalization()\n",
+ "\n",
+ " self.conv2c = tf.keras.layers.Conv2D(filters3, (1, 1))\n",
+ " self.bn2c = tf.keras.layers.BatchNormalization()\n",
+ "\n",
+ " def call(self, input_tensor, training=False):\n",
+ " x = self.conv2a(input_tensor)\n",
+ " x = self.bn2a(x, training=training)\n",
+ " x = tf.nn.relu(x)\n",
+ "\n",
+ " x = self.conv2b(x)\n",
+ " x = self.bn2b(x, training=training)\n",
+ " x = tf.nn.relu(x)\n",
+ "\n",
+ " x = self.conv2c(x)\n",
+ " x = self.bn2c(x, training=training)\n",
+ "\n",
+ " x += input_tensor\n",
+ " return tf.nn.relu(x)\n",
+ " \n",
+ "block = ResnetIdentityBlock(1, [1, 2, 3])\n",
+ "print(block(tf.zeros([1, 2, 3, 3])))\n",
+ "print([x.name for x in block.variables])"
+ ],
+ "execution_count": 7,
+ "outputs": [
+ {
+ "output_type": "stream",
+ "text": [
+ "tf.Tensor(\n",
+ "[[[[0. 0. 0.]\n",
+ " [0. 0. 0.]\n",
+ " [0. 0. 0.]]\n",
+ "\n",
+ " [[0. 0. 0.]\n",
+ " [0. 0. 0.]\n",
+ " [0. 0. 0.]]]], shape=(1, 2, 3, 3), dtype=float32)\n",
+ "['resnet_identity_block/conv2d/kernel:0', 'resnet_identity_block/conv2d/bias:0', 'resnet_identity_block/batch_normalization/gamma:0', 'resnet_identity_block/batch_normalization/beta:0', 'resnet_identity_block/conv2d_1/kernel:0', 'resnet_identity_block/conv2d_1/bias:0', 'resnet_identity_block/batch_normalization_1/gamma:0', 'resnet_identity_block/batch_normalization_1/beta:0', 'resnet_identity_block/conv2d_2/kernel:0', 'resnet_identity_block/conv2d_2/bias:0', 'resnet_identity_block/batch_normalization_2/gamma:0', 'resnet_identity_block/batch_normalization_2/beta:0', 'resnet_identity_block/batch_normalization/moving_mean:0', 'resnet_identity_block/batch_normalization/moving_variance:0', 'resnet_identity_block/batch_normalization_1/moving_mean:0', 'resnet_identity_block/batch_normalization_1/moving_variance:0', 'resnet_identity_block/batch_normalization_2/moving_mean:0', 'resnet_identity_block/batch_normalization_2/moving_variance:0']\n"
+ ],
+ "name": "stdout"
+ }
+ ]
+ },
+ {
+ "metadata": {
+ "id": "LPXhHUIc1-sO",
+ "colab_type": "text"
+ },
+ "cell_type": "markdown",
+ "source": [
+ "Much of the time, however, models which compose many layers simply call one layer after the other. This can be done in very little code using tf.keras.Sequential"
+ ]
+ },
+ {
+ "metadata": {
+ "id": "5pXgzNAU17xk",
+ "colab_type": "code",
+ "colab": {
+ "base_uri": "https://localhost:8080/",
+ "height": 173
+ },
+ "outputId": "03b7eaf8-9b35-482b-bcf0-a99af6c2c6a4"
+ },
+ "cell_type": "code",
+ "source": [
+ " my_seq = tf.keras.Sequential([tf.keras.layers.Conv2D(1, (1, 1)),\n",
+ " tf.keras.layers.BatchNormalization(),\n",
+ " tf.keras.layers.Conv2D(2, 1, \n",
+ " padding='same'),\n",
+ " tf.keras.layers.BatchNormalization(),\n",
+ " tf.keras.layers.Conv2D(3, (1, 1)),\n",
+ " tf.keras.layers.BatchNormalization()])\n",
+ "my_seq(tf.zeros([1, 2, 3, 3]))\n"
+ ],
+ "execution_count": 8,
+ "outputs": [
+ {
+ "output_type": "execute_result",
+ "data": {
+ "text/plain": [
+ "<tf.Tensor: id=493, shape=(1, 2, 3, 3), dtype=float32, numpy=\n",
+ "array([[[[0., 0., 0.],\n",
+ " [0., 0., 0.],\n",
+ " [0., 0., 0.]],\n",
+ "\n",
+ " [[0., 0., 0.],\n",
+ " [0., 0., 0.],\n",
+ " [0., 0., 0.]]]], dtype=float32)>"
+ ]
+ },
+ "metadata": {
+ "tags": []
+ },
+ "execution_count": 8
+ }
+ ]
+ },
+ {
+ "metadata": {
+ "id": "MZrns6p22GEQ",
+ "colab_type": "text"
+ },
+ "cell_type": "markdown",
+ "source": [
+ "## Exercise!\n",
+ "\n",
+ "Make a simple convolutional neural network model, useful for things such as MNIST which don't need too many parameters. A sequence of two or three convolutions with small output channels (say, 32 and 64) plus one or two fully connected layers is probably enough.\n",
+ "\n",
+ "The input shape should be [batch_size, 28, 28, 1]."
+ ]
+ },
+ {
+ "metadata": {
+ "id": "8CAUa3KNN916",
+ "colab_type": "code",
+ "colab": {
+ "base_uri": "https://localhost:8080/",
+ "height": 17
+ },
+ "outputId": "97c0ff3c-c962-4c13-eee8-406101465761"
+ },
+ "cell_type": "code",
+ "source": [
+ "# TODO: Implement a convolutional model as described above, and assign it to\n",
+ "# model.\n",
+ "model = tf.keras.Sequential([\n",
+ " \n",
+ "])"
+ ],
+ "execution_count": 9,
+ "outputs": []
+ },
+ {
+ "metadata": {
+ "id": "vLDDduR32E82",
+ "colab_type": "code",
+ "colab": {
+ "base_uri": "https://localhost:8080/",
+ "height": 34
+ },
+ "outputId": "09bb1d43-b4c6-44b5-916e-0d2903d10cf4"
+ },
+ "cell_type": "code",
+ "source": [
+ "#@title Click to see the answer\n",
+ "\n",
+ "max_pool = tf.keras.layers.MaxPooling2D(\n",
+ " (2, 2), (2, 2), padding='same')\n",
+ " # The model consists of a sequential chain of layers, so tf.keras.Sequential\n",
+ " # (a subclass of tf.keras.Model) makes for a compact description.\n",
+ "model = tf.keras.Sequential(\n",
+ " [\n",
+ " tf.keras.layers.Conv2D(\n",
+ " 32,\n",
+ " 5,\n",
+ " padding='same',\n",
+ " activation=tf.nn.relu),\n",
+ " max_pool,\n",
+ " tf.keras.layers.Conv2D(\n",
+ " 64,\n",
+ " 5,\n",
+ " padding='same',\n",
+ " activation=tf.nn.relu),\n",
+ " max_pool,\n",
+ " tf.keras.layers.Flatten(),\n",
+ " tf.keras.layers.Dense(1024, activation=tf.nn.relu),\n",
+ " tf.keras.layers.Dropout(0.4),\n",
+ " tf.keras.layers.Dense(10)\n",
+ " ])\n",
+ "\n",
+ "model(tf.zeros([1, 28, 28, 1]))"
+ ],
+ "execution_count": 10,
+ "outputs": [
+ {
+ "output_type": "execute_result",
+ "data": {
+ "text/plain": [
+ "<tf.Tensor: id=625, shape=(1, 10), dtype=float32, numpy=array([[0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]], dtype=float32)>"
+ ]
+ },
+ "metadata": {
+ "tags": []
+ },
+ "execution_count": 10
+ }
+ ]
+ },
+ {
+ "metadata": {
+ "id": "H_CKVBroik4M",
+ "colab_type": "text"
+ },
+ "cell_type": "markdown",
+ "source": [
+ "# Stop here for now"
+ ]
+ },
+ {
+ "metadata": {
+ "id": "_yRwuE6MMmzC",
+ "colab_type": "text"
+ },
+ "cell_type": "markdown",
+ "source": [
+ "# Training\n",
+ "\n",
+ "When eager execution is enabled, you can write Pythonic training loops. Simply\n",
+ "\n",
+ "1. load your data into a `tf.data.Dataset`, which lets you construct functional pipelines for processing, shuffling, and batching your data,\n",
+ "2. iterate over the dataset using a Python `for` loop, and\n",
+ "3. perform an optimization step in the body of your `for` loop.\n",
+ "\n",
+ "This workflow is exemplified in the following exercise."
+ ]
+ },
+ {
+ "metadata": {
+ "id": "gj0-EkTc_Xt1",
+ "colab_type": "text"
+ },
+ "cell_type": "markdown",
+ "source": [
+ "\n",
+ "\n",
+ "## Exercise!\n",
+ "\n",
+ "In this exercise, you'll train the convolutional model you implemented for the previous exericse on the MNIST dataset. "
+ ]
+ },
+ {
+ "metadata": {
+ "id": "WOGm9HHn_byR",
+ "colab_type": "code",
+ "colab": {
+ "base_uri": "https://localhost:8080/",
+ "height": 17
+ },
+ "outputId": "bbccc7ad-33cd-446e-bcda-f358c7547e1b"
+ },
+ "cell_type": "code",
+ "source": [
+ "#@title Utilities for downloading MNIST data (double-click to show code)\n",
+ "import gzip\n",
+ "import os\n",
+ "import tempfile\n",
+ "from six.moves import urllib\n",
+ "import shutil\n",
+ "\n",
+ "import numpy as np\n",
+ "\n",
+ "def read32(bytestream):\n",
+ " \"\"\"Read 4 bytes from bytestream as an unsigned 32-bit integer.\"\"\"\n",
+ " dt = np.dtype(np.uint32).newbyteorder('>')\n",
+ " return np.frombuffer(bytestream.read(4), dtype=dt)[0]\n",
+ "\n",
+ "\n",
+ "def check_image_file_header(filename):\n",
+ " \"\"\"Validate that filename corresponds to images for the MNIST dataset.\"\"\"\n",
+ " with tf.gfile.Open(filename, 'rb') as f:\n",
+ " magic = read32(f)\n",
+ " read32(f) # num_images, unused\n",
+ " rows = read32(f)\n",
+ " cols = read32(f)\n",
+ " if magic != 2051:\n",
+ " raise ValueError('Invalid magic number %d in MNIST file %s' % (magic,\n",
+ " f.name))\n",
+ " if rows != 28 or cols != 28:\n",
+ " raise ValueError(\n",
+ " 'Invalid MNIST file %s: Expected 28x28 images, found %dx%d' %\n",
+ " (f.name, rows, cols))\n",
+ "\n",
+ "\n",
+ "def check_labels_file_header(filename):\n",
+ " \"\"\"Validate that filename corresponds to labels for the MNIST dataset.\"\"\"\n",
+ " with tf.gfile.Open(filename, 'rb') as f:\n",
+ " magic = read32(f)\n",
+ " read32(f) # num_items, unused\n",
+ " if magic != 2049:\n",
+ " raise ValueError('Invalid magic number %d in MNIST file %s' % (magic,\n",
+ " f.name))\n",
+ " \n",
+ "def download(directory, filename):\n",
+ " \"\"\"Download (and unzip) a file from the MNIST dataset if not already done.\"\"\"\n",
+ " filepath = os.path.join(directory, filename)\n",
+ " if tf.gfile.Exists(filepath):\n",
+ " return filepath\n",
+ " if not tf.gfile.Exists(directory):\n",
+ " tf.gfile.MakeDirs(directory)\n",
+ " # CVDF mirror of http://yann.lecun.com/exdb/mnist/\n",
+ " url = 'https://storage.googleapis.com/cvdf-datasets/mnist/' + filename + '.gz'\n",
+ " _, zipped_filepath = tempfile.mkstemp(suffix='.gz')\n",
+ " print('Downloading %s to %s' % (url, zipped_filepath))\n",
+ " urllib.request.urlretrieve(url, zipped_filepath)\n",
+ " with gzip.open(zipped_filepath, 'rb') as f_in, \\\n",
+ " tf.gfile.Open(filepath, 'wb') as f_out:\n",
+ " shutil.copyfileobj(f_in, f_out)\n",
+ " os.remove(zipped_filepath)\n",
+ " return filepath\n",
+ "\n",
+ "\n",
+ "def dataset(directory, images_file, labels_file):\n",
+ " \"\"\"Download and parse MNIST dataset.\"\"\"\n",
+ "\n",
+ " images_file = download(directory, images_file)\n",
+ " labels_file = download(directory, labels_file)\n",
+ "\n",
+ " check_image_file_header(images_file)\n",
+ " check_labels_file_header(labels_file)\n",
+ "\n",
+ " def decode_image(image):\n",
+ " # Normalize from [0, 255] to [0.0, 1.0]\n",
+ " image = tf.decode_raw(image, tf.uint8)\n",
+ " image = tf.cast(image, tf.float32)\n",
+ " image = tf.reshape(image, [28, 28, 1])\n",
+ " return image / 255.0\n",
+ "\n",
+ " def decode_label(label):\n",
+ " label = tf.decode_raw(label, tf.uint8) # tf.string -> [tf.uint8]\n",
+ " label = tf.reshape(label, []) # label is a scalar\n",
+ " return tf.to_int32(label)\n",
+ "\n",
+ " images = tf.data.FixedLengthRecordDataset(\n",
+ " images_file, 28 * 28, header_bytes=16).map(decode_image)\n",
+ " labels = tf.data.FixedLengthRecordDataset(\n",
+ " labels_file, 1, header_bytes=8).map(decode_label)\n",
+ " return tf.data.Dataset.zip((images, labels))\n",
+ "\n",
+ "\n",
+ "def get_training_data(directory):\n",
+ " \"\"\"tf.data.Dataset object for MNIST training data.\"\"\"\n",
+ " return dataset(directory, 'train-images-idx3-ubyte',\n",
+ " 'train-labels-idx1-ubyte').take(1024)\n",
+ "\n",
+ "def get_test_data(directory):\n",
+ " \"\"\"tf.data.Dataset object for MNIST test data.\"\"\"\n",
+ " return dataset(directory, 't10k-images-idx3-ubyte', 't10k-labels-idx1-ubyte')"
+ ],
+ "execution_count": 11,
+ "outputs": []
+ },
+ {
+ "metadata": {
+ "id": "4ejmJ2dv_f0R",
+ "colab_type": "code",
+ "colab": {
+ "base_uri": "https://localhost:8080/",
+ "height": 85
+ },
+ "outputId": "274c0381-e505-4e69-f910-3def6f8572a7"
+ },
+ "cell_type": "code",
+ "source": [
+ "# Don't forget to run the cell above!\n",
+ "training_data = get_training_data(\"/tmp/mnist/train\")\n",
+ "test_data = get_test_data(\"/tmp/mnist/test\")"
+ ],
+ "execution_count": 12,
+ "outputs": [
+ {
+ "output_type": "stream",
+ "text": [
+ "Downloading https://storage.googleapis.com/cvdf-datasets/mnist/train-images-idx3-ubyte.gz to /tmp/tmp4ull1xwa.gz\n",
+ "Downloading https://storage.googleapis.com/cvdf-datasets/mnist/train-labels-idx1-ubyte.gz to /tmp/tmp1eikhj1v.gz\n",
+ "Downloading https://storage.googleapis.com/cvdf-datasets/mnist/t10k-images-idx3-ubyte.gz to /tmp/tmpcp8xah9c.gz\n",
+ "Downloading https://storage.googleapis.com/cvdf-datasets/mnist/t10k-labels-idx1-ubyte.gz to /tmp/tmpqww_1e74.gz\n"
+ ],
+ "name": "stdout"
+ }
+ ]
+ },
+ {
+ "metadata": {
+ "id": "TANpFS6GKLMC",
+ "colab_type": "text"
+ },
+ "cell_type": "markdown",
+ "source": [
+ "Fill in the implementation of `train_one_epoch` below and run the cell to train your model. "
+ ]
+ },
+ {
+ "metadata": {
+ "id": "btKL0Ss9_rmC",
+ "colab_type": "code",
+ "colab": {
+ "base_uri": "https://localhost:8080/",
+ "height": 102
+ },
+ "outputId": "56858516-86fc-424a-f00d-6f088f98bf9b"
+ },
+ "cell_type": "code",
+ "source": [
+ "EPOCHS = 5\n",
+ "optimizer = tf.train.MomentumOptimizer(learning_rate=0.01, momentum=0.5)\n",
+ "\n",
+ "def loss_fn(logits, labels):\n",
+ " return tf.reduce_mean(\n",
+ " tf.nn.sparse_softmax_cross_entropy_with_logits(\n",
+ " logits=tf.squeeze(logits), labels=labels))\n",
+ "\n",
+ "def train_one_epoch(model, training_data, optimizer):\n",
+ " # TODO: Implement an optimization step and return the average loss.\n",
+ " #\n",
+ " # Hint: Use `tf.GradientTape` to compute the gradient of the loss, and use\n",
+ " # `optimizer.apply_gradients` to update the model's variables, which are\n",
+ " # accessible as `model.variables`\n",
+ " average_loss = tfe.metrics.Mean('loss')\n",
+ " for images, labels in training_data.shuffle(buffer_size=10000).batch(64):\n",
+ " pass\n",
+ " return average_loss.result()\n",
+ "\n",
+ "for epoch in range(EPOCHS):\n",
+ " loss = train_one_epoch(model, training_data, optimizer)\n",
+ " print(\"Average loss after epoch %d: %.4f\" % (epoch, loss))"
+ ],
+ "execution_count": 14,
+ "outputs": [
+ {
+ "output_type": "stream",
+ "text": [
+ "Average loss after epoch 0: 2.2847\n",
+ "Average loss after epoch 1: 2.2305\n",
+ "Average loss after epoch 2: 2.1334\n",
+ "Average loss after epoch 3: 1.9115\n",
+ "Average loss after epoch 4: 1.4285\n"
+ ],
+ "name": "stdout"
+ }
+ ]
+ },
+ {
+ "metadata": {
+ "id": "yAOFupJN_htg",
+ "colab_type": "code",
+ "colab": {
+ "base_uri": "https://localhost:8080/",
+ "height": 102
+ },
+ "outputId": "67e711e4-76c9-4e3f-bb49-a14955dba03a"
+ },
+ "cell_type": "code",
+ "source": [
+ "#@title Double-click to see a solution.\n",
+ "EPOCHS = 5\n",
+ "optimizer = tf.train.MomentumOptimizer(learning_rate=0.01, momentum=0.5)\n",
+ "\n",
+ "def _loss_fn(logits, labels):\n",
+ " return tf.reduce_mean(\n",
+ " tf.nn.sparse_softmax_cross_entropy_with_logits(\n",
+ " logits=tf.squeeze(logits), labels=labels))\n",
+ "\n",
+ "def _train_one_epoch(model, training_data):\n",
+ " average_loss = tfe.metrics.Mean(\"loss\")\n",
+ " for images, labels in training_data.shuffle(buffer_size=10000).batch(64):\n",
+ " with tf.GradientTape() as tape:\n",
+ " logits = model(images, training=True)\n",
+ " loss = _loss_fn(logits, labels)\n",
+ " average_loss(loss)\n",
+ " gradients = tape.gradient(loss, model.variables)\n",
+ " optimizer.apply_gradients(zip(gradients, model.variables))\n",
+ " return average_loss.result()\n",
+ " \n",
+ "for epoch in range(EPOCHS):\n",
+ " loss = _train_one_epoch(model, training_data)\n",
+ " print(\"Average loss after epoch %d: %.4f\" % (epoch, loss))"
+ ],
+ "execution_count": 15,
+ "outputs": [
+ {
+ "output_type": "stream",
+ "text": [
+ "Average loss after epoch 0: 1.0563\n",
+ "Average loss after epoch 1: 0.8013\n",
+ "Average loss after epoch 2: 0.6306\n",
+ "Average loss after epoch 3: 0.5543\n",
+ "Average loss after epoch 4: 0.5037\n"
+ ],
+ "name": "stdout"
+ }
+ ]
+ },
+ {
+ "metadata": {
+ "id": "uDy1DrYA_2Jz",
+ "colab_type": "text"
+ },
+ "cell_type": "markdown",
+ "source": [
+ "Run the below cell to qualitatively evaluate your model. Note how eager execution interoperates seamlessly with `matplotlib`."
+ ]
+ },
+ {
+ "metadata": {
+ "id": "vR7rMtpu_3nB",
+ "colab_type": "code",
+ "colab": {
+ "base_uri": "https://localhost:8080/",
+ "height": 1752
+ },
+ "outputId": "b212aefa-f4b3-425c-f34d-2491429fa521"
+ },
+ "cell_type": "code",
+ "source": [
+ "import matplotlib.pyplot as plt\n",
+ "\n",
+ "sampled_data = test_data.batch(1).shuffle(buffer_size=10000).take(5)\n",
+ "for image, label in sampled_data:\n",
+ " plt.figure()\n",
+ " plt.imshow(tf.reshape(image, (28, 28)))\n",
+ " plt.show()\n",
+ " logits = model(image, training=False)\n",
+ " prediction = tf.argmax(logits, axis=1, output_type=tf.int64)\n",
+ " print(\"Prediction: %d\" % prediction)"
+ ],
+ "execution_count": 16,
+ "outputs": [
+ {
+ "output_type": "display_data",
+ "data": {
+ "image/png": "iVBORw0KGgoAAAANSUhEUgAAAUsAAAFKCAYAAACU6307AAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMS4yLCBo\ndHRwOi8vbWF0cGxvdGxpYi5vcmcvNQv5yAAAEwpJREFUeJzt3X1Ilff/x/HXmScxV2GZOmLVohXK\nKmLQjbUsy+pbI7rbaEm1IFhRSU1aE+kO3LqxCGrBMlsNkq0zZIM2Cu1mUTg1itXQbVnBQqKZNtcN\n2d3J3x9ffpLrNN/ndM65jn6fj7/m5cfrvI9XPHedc7zOcTU3NzcLAPCvXnJ6AABoD4glABgQSwAw\nIJYAYEAsAcCAWAKAAbEEAANiCQAG7kB/cOPGjbpw4YJcLpdyc3M1ZMiQYM4FABEloFieOXNGV69e\nlcfj0ZUrV5SbmyuPxxPs2QAgYgT0MLy8vFwZGRmSpP79++vWrVu6e/duUAcDgEgSUCwbGhrUvXv3\nlq979Oih+vr6oA0FAJEmKC/w8F4cADq6gGKZmJiohoaGlq9v3LihhISEoA0FAJEmoFiOHj1aJSUl\nkqTq6molJiaqS5cuQR0MACJJQK+Gv/nmm3rjjTf03nvvyeVyaf369cGeCwAiios3/wWAtnEFDwAY\nEEsAMCCWAGBALAHAgFgCgAGxBAADYgkABsQSAAyIJQAYEEsAMCCWAGBALAHAgFgCgAGxBAADYgkA\nBsQSAAyIJQAYEEsAMCCWAGBALAHAgFgCgAGxBAADYgkABsQSAAyIJQAYEEsAMCCWAGBALAHAgFgC\ngAGxBAADYgkABsQSAAyIJQAYEEsAMCCWAGBALAHAgFgCgAGxBAADYgkABsQSAAyIJQAYEEsAMHA7\nPQAQiAcPHpjX3rlzx+f2nj17qqGhodW2kydPmvb566+/mm//xx9/NK+13r4kjRgx4pltFRUVGjly\nZKttP/30k3mfL73E+dPz8JsBAIOAziwrKyu1YsUKDRgwQJI0cOBArV27NqiDAUAkCfhh+PDhw7Vz\n585gzgIAEYuH4QBgEHAsL1++rCVLlmju3LkqKysL5kwAEHFczc3Nzf7+UF1dnc6dO6cpU6aotrZW\nCxYsUGlpqaKjo0MxIwA4LqDnLJOSkjR16lRJUp8+fdSzZ0/V1dWpd+/eQR0OeB7+dIg/HQq3gH4z\nhw4d0hdffCFJqq+v182bN5WUlBTUwQAgkgR0Zjl+/HitWrVKx48f16NHj7RhwwYeggPo0AKKZZcu\nXbR79+5gzwIAESugF3gAf1RVVZnXfvfdd6Z1hw8fNu/zzJkzPrd7vV5FRUWZ99Me+LpPDx8+NP98\nR/t9BBPP5gKAAbEEAANiCQAGxBIADIglABgQSwAwIJYAYEAsAcCAWAKAAbEEAAM+3RGtPO/qV5fL\n1ep7BQUF5n1mZWWZ1z558sS8NhRcLpdpnT9vZebPJYT9+vUzry0pKfG5/Y8//mj1NW+7Fhz8FgHA\ngFgCgAGxBAADYgkABsQSAAyIJQAYEEsAMCCWAGBALAHAgCt40MrBgwd9bp87d26r7y1btsy8z1de\necW89q233jKte//99837/Dfff/99q68TExNNP/fqq6+ab8Of+x8MvXv3Duvt/a/gzBIADIglABgQ\nSwAwIJYAYEAsAcCAWAKAAbEEAANiCQAGxBIADIglABi4mp/3CVXoMB49emRe+/rrr/vcfvXqVfXt\n27fl68zMTPM+P/74Y/PauLg481ognDizBAADYgkABsQSAAyIJQAYEEsAMCCWAGBALAHAgFgCgAGx\nBAADYgkABny6YztVX19vXjthwgTz2oEDB5q+l5eXZ96n223/Z/b48WPTuuvXr5v3efz4cZ/bFy5c\nqC+//NK8n0CNHTvWvLZfv34hnAQvwnRmWVNTo4yMDBUVFUn67z/U+fPnKzMzUytWrNDDhw9DOiQA\nOK3NWN67d095eXlKTU1t2bZz505lZmbqq6++Ut++fVVcXBzSIQHAaW3GMjo6WoWFha0+fL6ysrLl\noV16errKy8tDNyEARIA2n0xyu93PPOfU1NSk6OhoSVJ8fLxfz58BQHv0wi/w8HaYzkhISDCv/eWX\nX4Jym0ePHg3Kfv6N9cWg3r17m/e5cOHCgL4HPC2gWMbGxur+/fuKiYlRXV1dq4foCI9QvRqelJTk\nc/vRo0c1ceLElq+PHDli3ievhvNqeEcQ0N9Zjho1SiUlJZKk0tJSjRkzJqhDAUCkafN/+VVVVdqy\nZYuuXbsmt9utkpISbdu2TTk5OfJ4POrVq5dmzJgRjlkBwDFtxnLQoEE6cODAM9v3798fkoEAIBLx\ngWXt1A8//GBeO3v2bPPa572Ik5aWplOnTrV8ff78efM+J02aZF5rnfX333837/N5vF6voqKiAvrZ\nd99917x20KBB5rWrVq0yr42JiTGvxYvj2nAAMCCWAGBALAHAgFgCgAGxBAADYgkABsQSAAyIJQAY\nEEsAMCCWAGDA5Y7tlD+X23377bcvfHv/vDTQn7cS8+ft1NLS0kzr/Ln/o0aN8rk9OTn5mcsmO3Xq\nZNrn7du3zbc/YsQI89q9e/ea1y5YsMC8Fi+OM0sAMCCWAGBALAHAgFgCgAGxBAADYgkABsQSAAyI\nJQAYEEsAMCCWAGDQ5kfhIjItXrzYvHb06NHmtRcvXnzu9z744IOW//bnUruhQ4ea11ovN3S7g/NP\nNzk5OaCfe/qTLtvi9XrNa/351E4udwwvziwBwIBYAoABsQQAA2IJAAbEEgAMiCUAGBBLADAglgBg\nQCwBwIAreNqpjIyMkKz9N59//nlQ9tMRPHjwwOkREGacWQKAAbEEAANiCQAGxBIADIglABgQSwAw\nIJYAYEAsAcCAWAKAAbEEAANiCQAGxBIADEyxrKmpUUZGhoqKiiRJOTk5mjZtmubPn6/58+fr5MmT\noZwRABzX5rsO3bt3T3l5eUpNTW21PTs7W+np6SEbDAAiSZtnltHR0SosLFRiYmI45gGAiNTmmaXb\n7Zbb/eyyoqIi7d+/X/Hx8Vq7dq169OgRkgGBSDRx4kTzWq/XG8JJEC4Bvfnv9OnTFRcXp5SUFO3Z\ns0e7du3SunXrgj0bELGOHj1qXvuf//zHvHb27Nnmtd988415LV5cQK+Gp6amKiUlRZI0fvx41dTU\nBHUoAIg0AcUyKytLtbW1kqTKykoNGDAgqEMBQKRp82F4VVWVtmzZomvXrsntdqukpETz5s3TypUr\n1blzZ8XGxmrTpk3hmBUAHNNmLAcNGqQDBw48s33y5MkhGQgAIhGf7ggEgAsx/vdwuSMAGBBLADAg\nlgBgQCwBwIBYAoABsQQAA2IJAAbEEgAMiCUAGBBLADDgckcgAKdPnw7JfqdNmxaS/eLFcWYJAAbE\nEgAMiCUAGBBLADAglgBgQCwBwIBYAoABsQQAA2IJAAZcwQM85dSpU6Z1P//8s3mfL7/8snntuHHj\nzGsRXpxZAoABsQQAA2IJAAbEEgAMiCUAGBBLADAglgBgQCwBwIBYAoABsQQAAy53RIf3999/+9we\nFxf3zPcyMjJM+/R6vebbP3jwoHlt7969zWsRXpxZAoABsQQAA2IJAAbEEgAMiCUAGBBLADAglgBg\nQCwBwIBYAoABsQQAAy53DIMnT56Y1+bm5prWbdiwwbzPmJgY89r24u7du+a1b7/9ts/tZWVlz3zP\nehnjO++8Y7792bNnm9cicplimZ+fr3Pnzunx48davHixBg8erNWrV8vr9SohIUFbt25VdHR0qGcF\nAMe0GcuKigpdunRJHo9HjY2NmjlzplJTU5WZmakpU6Zo+/btKi4uVmZmZjjmBQBHtPmc5bBhw7Rj\nxw5JUrdu3dTU1KTKykpNmDBBkpSenq7y8vLQTgkADmszllFRUYqNjZUkFRcXKy0tTU1NTS0Pu+Pj\n41VfXx/aKQHAYeYXeI4dO6bi4mLt27dPkyZNatne3NwcksE6kpdesv/RwebNm0M4ScfRpUsX89qy\nsrKAvgc8zRTL06dPa/fu3dq7d6+6du2q2NhY3b9/XzExMaqrq1NiYmKo52zXeDU8+Px5NXzy5Mk+\nt5eVlWn06NGttlVUVJj26c+r4V9//bV5rT//Y0V4tXlk7ty5o/z8fBUUFCguLk6SNGrUKJWUlEiS\nSktLNWbMmNBOCQAOa/PM8vDhw2psbNTKlStbtm3evFlr1qyRx+NRr169NGPGjJAOCQBOazOWc+bM\n0Zw5c57Zvn///pAMBACRyNXMKzQh58+HW1n/uP/TTz817zM7Ozvotx8qv/32m2nd0qVLzfs8deqU\nz+1er1dRUVHm/TyturravDY5OTmg20Bk4dlkADAglgBgQCwBwIBYAoABsQQAA2IJAAbEEgAMiCUA\nGBBLADAglgBgwOWOYeDP5Y4JCQmmdbdu3TLvc+LEiea148aN87k9Jycn4PfavH//vnntJ598Ylrn\nzz/bbt26+dze2Nio7t27t9p28eJF0z6tx0mSXC6XeS0iF2eWAGBALAHAgFgCgAGxBAADYgkABsQS\nAAyIJQAYEEsAMCCWAGBALAHAgMsdI0xxcbFp3bJly8z7bGhoCHScFi/ySYj++Oflh88zefJk8z4/\n+ugjn9uHDh2q8+fPP7MN8IUzSwAwIJYAYEAsAcCAWAKAAbEEAANiCQAGxBIADIglABgQSwAw4Aqe\ndqqmpsa8Njs727z2yJEjPre/yBU8q1evNq8dPHiwaV1mZmZAswCB4swSAAyIJQAYEEsAMCCWAGBA\nLAHAgFgCgAGxBAADYgkABsQSAAyIJQAYcLkjABi4LYvy8/N17tw5PX78WIsXL9aJEydUXV2tuLg4\nSdKiRYs0bty4UM4JAI5qM5YVFRW6dOmSPB6PGhsbNXPmTI0cOVLZ2dlKT08Px4wA4Lg2Yzls2DAN\nGTJEktStWzc1NTXJ6/WGfDAAiCR+PWfp8Xh09uxZRUVFqb6+Xo8ePVJ8fLzWrl2rHj16hHJOAHCU\nOZbHjh1TQUGB9u3bp6qqKsXFxSklJUV79uzRn3/+qXXr1oV6VgBwjOlPh06fPq3du3ersLBQXbt2\nVWpqqlJSUiRJ48eP9+uNaAGgPWozlnfu3FF+fr4KCgpaXv3OyspSbW2tJKmyslIDBgwI7ZQA4LA2\nX+A5fPiwGhsbtXLlypZts2bN0sqVK9W5c2fFxsZq06ZNIR0SAJzGH6UDgAGXOwKAAbEEAANiCQAG\nxBIADIglABgQSwAwIJYAYEAsAcCAWAKAAbEEAANiCQAGxBIADIglABgQSwAwIJYAYEAsAcCAWAKA\nAbEEAANiCQAGxBIADIglABgQSwAwIJYAYEAsAcCAWAKAAbEEAANiCQAGxBIADIglABi4nbjRjRs3\n6sKFC3K5XMrNzdWQIUOcGCOoKisrtWLFCg0YMECSNHDgQK1du9bhqQJXU1OjpUuXauHChZo3b56u\nX7+u1atXy+v1KiEhQVu3blV0dLTTY/rln/cpJydH1dXViouLkyQtWrRI48aNc3ZIP+Xn5+vcuXN6\n/PixFi9erMGDB7f74yQ9e79OnDjh+LEKeyzPnDmjq1evyuPx6MqVK8rNzZXH4wn3GCExfPhw7dy5\n0+kxXti9e/eUl5en1NTUlm07d+5UZmampkyZou3bt6u4uFiZmZkOTukfX/dJkrKzs5Wenu7QVC+m\noqJCly5dksfjUWNjo2bOnKnU1NR2fZwk3/dr5MiRjh+rsD8MLy8vV0ZGhiSpf//+unXrlu7evRvu\nMfAvoqOjVVhYqMTExJZtlZWVmjBhgiQpPT1d5eXlTo0XEF/3qb0bNmyYduzYIUnq1q2bmpqa2v1x\nknzfL6/X6/BUDsSyoaFB3bt3b/m6R48eqq+vD/cYIXH58mUtWbJEc+fOVVlZmdPjBMztdismJqbV\ntqamppaHc/Hx8e3umPm6T5JUVFSkBQsW6MMPP9Rff/3lwGSBi4qKUmxsrCSpuLhYaWlp7f44Sb7v\nV1RUlOPHypHnLJ/W3Nzs9AhB8dprr2n58uWaMmWKamtrtWDBApWWlrbL54va0lGO2fTp0xUXF6eU\nlBTt2bNHu3bt0rp165wey2/Hjh1TcXGx9u3bp0mTJrVsb+/H6en7VVVV5fixCvuZZWJiohoaGlq+\nvnHjhhISEsI9RtAlJSVp6tSpcrlc6tOnj3r27Km6ujqnxwqa2NhY3b9/X5JUV1fXIR7OpqamKiUl\nRZI0fvx41dTUODyR/06fPq3du3ersLBQXbt27TDH6Z/3KxKOVdhjOXr0aJWUlEiSqqurlZiYqC5d\nuoR7jKA7dOiQvvjiC0lSfX29bt68qaSkJIenCp5Ro0a1HLfS0lKNGTPG4YleXFZWlmprayX99znZ\n//9Lhvbizp07ys/PV0FBQcurxB3hOPm6X5FwrFzNDpyrb9u2TWfPnpXL5dL69euVnJwc7hGC7u7d\nu1q1apVu376tR48eafny5Ro7dqzTYwWkqqpKW7Zs0bVr1+R2u5WUlKRt27YpJydHDx48UK9evbRp\n0yZ16tTJ6VHNfN2nefPmac+ePercubNiY2O1adMmxcfHOz2qmcfj0WeffaZ+/fq1bNu8ebPWrFnT\nbo+T5Pt+zZo1S0VFRY4eK0diCQDtDVfwAIABsQQAA2IJAAbEEgAMiCUAGBBLADAglgBgQCwBwOD/\nAKCzFeFbFn4BAAAAAElFTkSuQmCC\n",
+ "text/plain": [
+ "<matplotlib.figure.Figure at 0x7fd61cfd1e80>"
+ ]
+ },
+ "metadata": {
+ "tags": []
+ }
+ },
+ {
+ "output_type": "stream",
+ "text": [
+ "Prediction: 5\n"
+ ],
+ "name": "stdout"
+ },
+ {
+ "output_type": "display_data",
+ "data": {
+ "image/png": "iVBORw0KGgoAAAANSUhEUgAAAUsAAAFKCAYAAACU6307AAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMS4yLCBo\ndHRwOi8vbWF0cGxvdGxpYi5vcmcvNQv5yAAAEQ1JREFUeJzt3W9Ilff/x/HXSSd2VmKaRwiqjTBy\nq9gfap2iliaFQfRvsCXW1rpRRJGTCJG0MSHLIpbF8M9qN3L7cjZvNQiOVAQt7LQcBLqB1Y0QaXYs\naUa2mZ3fjS9ff7Vcvj2ec65jez7ueZ1P57wPlzy7Li8vjysUCoUEAHihcU4PAABjAbEEAANiCQAG\nxBIADIglABgQSwAwIJYAYEAsAcAgMdx/uH//fl27dk0ul0ulpaWaO3duJOcCgLgSViyvXLmiW7du\nyefz6ebNmyotLZXP54v0bAAQN8I6DW9ublZeXp4kacaMGbp//74ePHgQ0cEAIJ6EFcvu7m5NmjRp\n8Ou0tDQFg8GIDQUA8SYiF3j4WxwAXnZhxdLj8ai7u3vw6zt37igjIyNiQwFAvAkrlosWLZLf75ck\ntbW1yePxaMKECREdDADiSVhXw9955x29+eab+uijj+RyubRv375IzwUAccXFH/8FgOFxBw8AGBBL\nADAglgBgQCwBwIBYAoABsQQAA2IJAAbEEgAMiCUAGBBLADAglgBgQCwBwIBYAoABsQQAA2IJAAbE\nEgAMiCUAGBBLADAglgBgQCwBwIBYAoABsQQAA2IJAAbEEgAMiCUAGBBLADAglgBgQCwBwIBYAoAB\nsQQAA2IJAAbEEgAMEp0eAIgnP/30k2nd+vXrzc+Zl5dnXvvtt9+a1yK2OLIEAANiCQAGxBIADIgl\nABgQSwAwIJYAYEAsAcCAWAKAAbEEAAPu4AGecuzYMdO6YDBofk6XyxXuOIgjHFkCgEFYR5aBQEC7\ndu1SVlaWJGnmzJkqKyuL6GAAEE/CPg2fP3++qqurIzkLAMQtTsMBwCDsWN64cUPbtm3Thg0bdOnS\npUjOBABxxxUKhUIj/UddXV1qaWlRfn6+Ojo6tGnTJjU1NSkpKSkaMwKA48L6mWVmZqZWrlwpSZo2\nbZomT56srq4uTZ06NaLDAbH24Ycfmtb98MMP5ucsKCgwr21oaDCvRWyFdRp++vRpnThxQtJ/f9/s\n7t27yszMjOhgABBPwjqyzM3N1e7du3Xu3Dn19/fr888/5xQcwEstrFhOmDBBNTU1kZ4FAOIWtzsC\nT7lw4ULEn3PVqlURf07EHr9nCQAGxBIADIglABgQSwAwIJYAYEAsAcCAWAKAAbEEAANiCQAGxBIA\nDLjdES89v98/5PYVK1Y899hIPrXRqre3N+LPidjjyBIADIglABgQSwAwIJYAYEAsAcCAWAKAAbEE\nAANiCQAGxBIADLiDB2NSKBQyr21oaBhy+4oVK/7xsUh6++23o/4aiD6OLAHAgFgCgAGxBAADYgkA\nBsQSAAyIJQAYEEsAMCCWAGBALAHAgFgCgIErNJL7xoA40dnZaV47derUIbc/efJE48aFd7zw7rvv\nmtf+/PPPYb0G4gtHlgBgQCwBwIBYAoABsQQAA2IJAAbEEgAMiCUAGBBLADAglgBgQCwBwIBPd8SY\nVFlZ6ejrb9682dHXR+yZjizb29uVl5c3+LGht2/f1saNG1VQUKBdu3bpr7/+iuqQAOC0YWP58OFD\nVVRUyOv1Dm6rrq5WQUGBvvvuO02fPl2NjY1RHRIAnDZsLJOSklRfXy+PxzO4LRAIaNmyZZKknJwc\nNTc3R29CAIgDw/7MMjExUYmJzy7r6+tTUlKSJCk9PV3BYDA60wFAnBj1BR7+HCaccPz48YisffLk\nSSTGwb9AWLF0u9169OiRkpOT1dXV9cwpOhALO3bsMK/96quvhtw+mj/+O5JYb9++PazXQHwJ6ztl\n4cKF8vv9kqSmpiYtXrw4okMBQLwZ9siytbVVBw8eVGdnpxITE+X3+3X48GGVlJTI5/NpypQpWrNm\nTSxmBQDHDBvL2bNn69SpU89t/+abb6IyEADEI+7gQVyxXnCJ1oeAWX/+XlhYGJXXR/zi3nAAMCCW\nAGBALAHAgFgCgAGxBAADYgkABsQSAAyIJQAYEEsAMCCWAGDA7Y6IKxUVFaZ10brd8dVXXzWt6+3t\nNT9nSkpKuOMgjnBkCQAGxBIADIglABgQSwAwIJYAYEAsAcCAWAKAAbEEAANiCQAGxBIADLjdEXHl\nyy+/dPT1BwYGTOv8fr/5OT/99NNwx0Ec4cgSAAyIJQAYEEsAMCCWAGBALAHAgFgCgAGxBAADYgkA\nBsQSAAy4gwdR99tvv5nXjuSDwKzcbrf5sV9++cX0nGlpaaOaCWMPR5YAYEAsAcCAWAKAAbEEAANi\nCQAGxBIADIglABgQSwAwIJYAYEAsAcCA2x0RFusHe0kj+xCyJ0+ehDPOC507d878GLcx4p9wZAkA\nBqZYtre3Ky8vTw0NDZKkkpISrVq1Shs3btTGjRt14cKFaM4IAI4b9jT84cOHqqiokNfrfWZ7cXGx\ncnJyojYYAMSTYY8sk5KSVF9fL4/HE4t5ACAuuUKhUMiy8NixY5o0aZIKCwtVUlKiYDCo/v5+paen\nq6ysjB+MA3iphXU1fPXq1UpNTVV2drbq6up0/PhxlZeXR3o2xLGRXA3fvn27eW19fX0447xQc3Pz\nkNvfe+89BQKB57YBQwnrarjX61V2drYkKTc3V+3t7REdCgDiTVix3Llzpzo6OiRJgUBAWVlZER0K\nAOLNsKfhra2tOnjwoDo7O5WYmCi/36/CwkIVFRVp/PjxcrvdqqysjMWsAOCYYWM5e/ZsnTp16rnt\nK1asiMpAABCPzFfDgafdu3fPvHby5MkRf/0PPvjAvPY///nPkNsTEhKeu1CVkJAwqrnw8uJ2RwAw\nIJYAYEAsAcCAWAKAAbEEAANiCQAGxBIADIglABgQSwAwIJYAYMCnO+IZ//TpiuPGjXvmsc2bN0fl\n9V0ul2ndF198YX7OF93CyO2NsOLIEgAMiCUAGBBLADAglgBgQCwBwIBYAoABsQQAA2IJAAbEEgAM\nuIMHz/jf58H/3fTp05957Mcff4zK6xcWFprWzZo1KyqvD/wTjiwBwIBYAoABsQQAA2IJAAbEEgAM\niCUAGBBLADAglgBgQCwBwIBYAoABtzviGRcuXBhy+8cff/zMY6FQKCqvX15eHpXnBUaLI0sAMCCW\nAGBALAHAgFgCgAGxBAADYgkABsQSAAyIJQAYEEsAMCCWAGDgCkXrvjXEjV9//dW8ds6cOUNuHxgY\nUEJCwuDXI/m2Wb9+vXmtz+czrRs3jv/nEVume8OrqqrU0tKix48fa+vWrZozZ4727NmjgYEBZWRk\n6NChQ0pKSor2rADgmGFjefnyZV2/fl0+n089PT1au3atvF6vCgoKlJ+fryNHjqixsVEFBQWxmBcA\nHDHsucy8efN09OhRSVJKSor6+voUCAS0bNkySVJOTo6am5ujOyUAOGzYWCYkJMjtdkuSGhsbtWTJ\nEvX19Q2edqenpysYDEZ3SgBwmPnvWZ49e1aNjY06efKkli9fPrid60Px74033jCvHRgYCOsx4GVn\niuXFixdVU1Ojr7/+WhMnTpTb7dajR4+UnJysrq4ueTyeaM+JUeBqODB6w37H9fb2qqqqSrW1tUpN\nTZUkLVy4UH6/X5LU1NSkxYsXR3dKAHDYsEeWZ86cUU9Pj4qKiga3HThwQHv37pXP59OUKVO0Zs2a\nqA4JAE7jl9L/BTgNB0aPDyz7F7AGSHpxBJ9+LCUlxfycJ06cMK8lgohXfGcCgAGxBAADYgkABsQS\nAAyIJQAYEEsAMCCWAGBALAHAgFgCgAGxBAADbnf8F7hx44Z5rfV2x+TkZPNzjuTWSCBecWQJAAbE\nEgAMiCUAGBBLADAglgBgQCwBwIBYAoABsQQAA2IJAAbEEgAMuN3xX6C4uNi89vvvv//HxxIT///b\n5a233hrVTMBYw5ElABgQSwAwIJYAYEAsAcCAWAKAAbEEAANiCQAGxBIADIglABi4Qi/6hCoAgCSO\nLAHAhFgCgAGxBAADYgkABsQSAAyIJQAYEEsAMCCWAGBALAHAgFgCgAGxBAAD06c7VlVVqaWlRY8f\nP9bWrVt1/vx5tbW1KTU1VZK0ZcsWLV26NJpzAoCjho3l5cuXdf36dfl8PvX09Gjt2rVasGCBiouL\nlZOTE4sZAcBxw8Zy3rx5mjt3riQpJSVFfX19GhgYiPpgABBPRvQn2nw+n65evaqEhAQFg0H19/cr\nPT1dZWVlSktLi+acAOAocyzPnj2r2tpanTx5Uq2trUpNTVV2drbq6ur0+++/q7y8PNqzAoBjTFfD\nL168qJqaGtXX12vixInyer3Kzs6WJOXm5qq9vT2qQwKA04aNZW9vr6qqqlRbWzt49Xvnzp3q6OiQ\nJAUCAWVlZUV3SgBw2LAXeM6cOaOenh4VFRUNblu3bp2Kioo0fvx4ud1uVVZWRnVIAHAan8EDAAbc\nwQMABsQSAAyIJQAYEEsAMCCWAGBALAHAgFgCgAGxBAADYgkABsQSAAyIJQAYEEsAMCCWAGBALAHA\ngFgCgAGxBAADYgkABsQSAAyIJQAYEEsAMCCWAGBALAHAgFgCgAGxBAADYgkABsQSAAyIJQAYEEsA\nMCCWAGCQ6MSL7t+/X9euXZPL5VJpaanmzp3rxBgRFQgEtGvXLmVlZUmSZs6cqbKyMoenCl97e7u2\nb9+uTz75RIWFhbp9+7b27NmjgYEBZWRk6NChQ0pKSnJ6zBH5+3sqKSlRW1ubUlNTJUlbtmzR0qVL\nnR1yhKqqqtTS0qLHjx9r69atmjNnzpjfT9Lz7+v8+fOO76uYx/LKlSu6deuWfD6fbt68qdLSUvl8\nvliPERXz589XdXW102OM2sOHD1VRUSGv1zu4rbq6WgUFBcrPz9eRI0fU2NiogoICB6ccmaHekyQV\nFxcrJyfHoalG5/Lly7p+/bp8Pp96enq0du1aeb3eMb2fpKHf14IFCxzfVzE/DW9ublZeXp4kacaM\nGbp//74ePHgQ6zHwAklJSaqvr5fH4xncFggEtGzZMklSTk6OmpubnRovLEO9p7Fu3rx5Onr0qCQp\nJSVFfX19Y34/SUO/r4GBAYenciCW3d3dmjRp0uDXaWlpCgaDsR4jKm7cuKFt27Zpw4YNunTpktPj\nhC0xMVHJycnPbOvr6xs8nUtPTx9z+2yo9yRJDQ0N2rRpkz777DPdu3fPgcnCl5CQILfbLUlqbGzU\nkiVLxvx+koZ+XwkJCY7vK0d+Zvm0UCjk9AgR8dprr2nHjh3Kz89XR0eHNm3apKampjH586LhvCz7\nbPXq1UpNTVV2drbq6up0/PhxlZeXOz3WiJ09e1aNjY06efKkli9fPrh9rO+np99Xa2ur4/sq5keW\nHo9H3d3dg1/fuXNHGRkZsR4j4jIzM7Vy5Uq5XC5NmzZNkydPVldXl9NjRYzb7dajR48kSV1dXS/F\n6azX61V2drYkKTc3V+3t7Q5PNHIXL15UTU2N6uvrNXHixJdmP/39fcXDvop5LBctWiS/3y9Jamtr\nk8fj0YQJE2I9RsSdPn1aJ06ckCQFg0HdvXtXmZmZDk8VOQsXLhzcb01NTVq8eLHDE43ezp071dHR\nIem/P5P9328yjBW9vb2qqqpSbW3t4FXil2E/DfW+4mFfuUIOHKsfPnxYV69elcvl0r59+zRr1qxY\njxBxDx480O7du/XHH3+ov79fO3bs0Pvvv+/0WGFpbW3VwYMH1dnZqcTERGVmZurw4cMqKSnRn3/+\nqSlTpqiyslKvvPKK06OaDfWeCgsLVVdXp/Hjx8vtdquyslLp6elOj2rm8/l07Ngxvf7664PbDhw4\noL17947Z/SQN/b7WrVunhoYGR/eVI7EEgLGGO3gAwIBYAoABsQQAA2IJAAbEEgAMiCUAGBBLADAg\nlgBg8H/nb4OLnfGqVAAAAABJRU5ErkJggg==\n",
+ "text/plain": [
+ "<matplotlib.figure.Figure at 0x7fd61bade5c0>"
+ ]
+ },
+ "metadata": {
+ "tags": []
+ }
+ },
+ {
+ "output_type": "stream",
+ "text": [
+ "Prediction: 1\n"
+ ],
+ "name": "stdout"
+ },
+ {
+ "output_type": "display_data",
+ "data": {
+ "image/png": "iVBORw0KGgoAAAANSUhEUgAAAUsAAAFKCAYAAACU6307AAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMS4yLCBo\ndHRwOi8vbWF0cGxvdGxpYi5vcmcvNQv5yAAAE1ZJREFUeJzt3X1olfX/x/HXccc1DyrLuY1GaRGL\nRqZSaE7zZmqKgnhDsVwqkYGRE29QW8tp4M102solNJ03fzSqgyPoBmFDIlg1Jw0xNsrZDbKGranD\nG5x3x33/+NF+rp153js751znrOfjv13n43Xex4NPrrPL61yujo6ODgEA7muA0wMAQCwglgBgQCwB\nwIBYAoABsQQAA2IJAAbEEgAMiCUAGLiD/YM7duzQ6dOn5XK5lJ+fr9GjR4dyLgCIKkHF8uTJkzp3\n7py8Xq9+++035efny+v1hno2AIgaQX0Mr6mp0cyZMyVJjz/+uC5fvqxr166FdDAAiCZBxfLChQt6\n8MEHO38eNmyYWltbQzYUAESbkJzg4bs4APR3QcUyJSVFFy5c6Pz577//VnJycsiGAoBoE1QsJ02a\npMrKSklSQ0ODUlJSNHjw4JAOBgDRJKiz4c8884yeeuopvfzyy3K5XNqyZUuo5wKAqOLiy38BIDCu\n4AEAA2IJAAbEEgAMiCUAGBBLADAglgBgQCwBwIBYAoABsQQAA2IJAAbEEgAMiCUAGBBLADAglgBg\nQCwBwIBYAoABsQQAA2IJAAbEEgAMiCUAGBBLADAglgBgQCwBwIBYAoABsQQAA2IJAAbEEgAMiCUA\nGBBLADAglgBgQCwBwIBYAoABsQQAA2IJAAbEEgAMiCUAGBBLADAglgBgQCwBwIBYAoABsQQAA2IJ\nAAbEEgAMiCUAGLiD+UO1tbVavXq10tPTJUlPPPGECgoKQjoYAESToGIpSePHj1dJSUkoZwGAqMXH\ncAAwCDqWv/76q9544w0tXrxY33//fShnAoCo4+ro6Ojo7R9qaWlRXV2d5syZo6amJi1btkxVVVWK\nj48Px4wA4LigjixTU1M1d+5cuVwujRgxQsOHD1dLS0uoZwOAqBFULL/88ksdOnRIktTa2qqLFy8q\nNTU1pIMBQDQJ6mP4tWvXtH79el25ckW3b99Wbm6upk6dGo75ACAqBBVLAPivCfr/WQL90alTp0zr\nSktLzfssKysLdpz78nec09HRIZfL1WVbbm6ueZ+9+b/T/36e/o7/ZwkABsQSAAyIJQAYEEsAMCCW\nAGBALAHAgFgCgAGxBAADYgkABsQSAAy4Nhz93tmzZ/1uT09P7/bY4sWLTfu0XhYZaT6fT3FxcUH/\n+Vu3bpnX9uV5YhFHlgBgQCwBwIBYAoABsQQAA2IJAAbEEgAMiCUAGBBLADAglgBgwA3LEHa9uUjs\nzJkzpnXz588377Opqcnv9uvXr2vMmDFdtt28edO8Xyu32/7PrKCgwLw2Pj7e7/bCwsIuPz/77LPm\nfQ4YwPFTT/ibAQADYgkABsQSAAyIJQAYEEsAMCCWAGBALAHAgFgCgAGxBAADYgkABtywDEG5ffu2\nee1bb71lXrt3795gxgmKv5t7PfTQQ6Y/u3r1avPzLF++3Lz2yJEj5rW5ubndtj3wwAPdLtl84IEH\nzPtEzziyBAADYgkABsQSAAyIJQAYEEsAMCCWAGBALAHAgFgCgAGxBAADYgkABtzdEV3cvXvX7/YB\nAwZ0eSwvL8+8z0hewujPokWLzI999NFHpn16PB7z8y9evNi89uuvvzavbW5u7ratuLhYb7/9drdt\n6DvTkWVjY6Nmzpyp8vJySdL58+e1dOlS5eTkaPXq1bp161ZYhwQApwWM5fXr17V161ZlZmZ2bisp\nKVFOTo4++eQTjRw5UhUVFWEdEgCcFjCW8fHxKisrU0pKSue22tpazZgxQ5KUlZWlmpqa8E0IAFEg\n4O8s3W633O6uy9rb2xUfHy9JSkpKUmtra3imA4Ao0ecTPHwdZv8yYEDPHzbufey9994z77M3ayPt\n6NGjYX+OL774IuzPcS9O6IRHULH0eDy6ceOGEhIS1NLS0uUjOmKb9Wz4hg0bzPv84IMP+jxXX/R0\nNvzo0aN66aWXumyLpbPh/r6AuLi4WOvWreu2DX0X1P+znDhxoiorKyVJVVVVmjx5ckiHAoBoE/DI\nsr6+Xrt27VJzc7PcbrcqKyu1Z88e5eXlyev1Ki0tTQsWLIjErADgmICxHDVqlD7++ONu23tzrxAA\niHVcwfMf8Ndff5nXzpo1y+/2n376SWPHju38uaGhoc9z+TN06FDTutLSUvM+X3zxxR4f++yzz7r8\nfL8TXPf69NNPzc/fm99D9kZaWlqvtqNvuDYcAAyIJQAYEEsAMCCWAGBALAHAgFgCgAGxBAADYgkA\nBsQSAAyIJQAYuDr4QsqYdPXqVfPaUaNGmdf++eeffrf7fD7FxcWZ93Ovf75V3+LQoUOmdY888khQ\nswRivUVKdnZ2WJ7/ny/Vtjh16lS3bU8++aR++eWXbtvQdxxZAoABsQQAA2IJAAbEEgAMiCUAGBBL\nADAglgBgQCwBwIBYAoABsQQAA+7uGKPKy8vNa3u6hLEvlixZYl67Z88e89rk5GTTupaWFvM+X3/9\ndb/bv/rqK82bN6/LtsrKSvN+w6E3d43s6TJGLm8MD44sAcCAWAKAAbEEAANiCQAGxBIADIglABgQ\nSwAwIJYAYEAsAcCAG5ZFmbt375rWvfDCC+Z9fvvtt+a1Pd0wq729XYMGDer8ubGx0bzPtLQ089qf\nf/7ZtG7Dhg3mfVZVVfnd3pebsIXLjRs3zGsHDhwYxknwbxxZAoABsQQAA2IJAAbEEgAMiCUAGBBL\nADAglgBgQCwBwIBYAoABsQQAA25YFmWsV5/25hLG3vD5fKbHiouLzfv8448/zGu/+uor89pYsWDB\nAvPaaLv8Ev+PI0sAMDDFsrGxUTNnzuy8/WpeXp7mzZunpUuXaunSpWE7ygGAaBHwY/j169e1detW\nZWZmdtm+bt06ZWVlhW0wAIgmAY8s4+PjVVZWppSUlEjMAwBRKeCRpdvtltvdfVl5ebmOHDmipKQk\nFRQUaNiwYWEZ8L/G+gv++52ICZdbt25F/DnDzYm/R8SmoM6Gz58/X4mJicrIyNCBAwe0b98+bd68\nOdSz/SdZ//H29CW9fdVTrG/dutXlOVeuXGneZ7SeDY/Ul//25mz40aNHzWsHDOD8bCQF9bedmZmp\njIwMSdL06dN79a3ZABCLgorlqlWr1NTUJEmqra1Venp6SIcCgGgT8GN4fX29du3apebmZrndblVW\nVmrJkiVas2aNBg0aJI/Ho8LCwkjMCgCOCRjLUaNG6eOPP+62ffbs2WEZCACiEZc7ogvr5Y4lJSWR\nGKdf6M0JHk7aRC/eGQAwIJYAYEAsAcCAWAKAAbEEAANiCQAGxBIADIglABgQSwAwIJYAYMDljlHG\nernbsWPHzPvszeV24fiC3958MfT69etN6/Lz84MdJyS2bdtmXvvKK6+EcRJECkeWAGBALAHAgFgC\ngAGxBAADYgkABsQSAAyIJQAYEEsAMCCWAGDAFTxRxuVymdb15u6ap06dMq+9dOlSj49VV1eb93Ov\nsWPHmtfW1dUF9RyhMmbMGNO6lStXmvfJTcj6B95FADAglgBgQCwBwIBYAoABsQQAA2IJAAbEEgAM\niCUAGBBLADAglgBg4Oro6Ohwegj0b21tbea1kyZNMq07c+ZMsON08vl8iouL67Lthx9+MP3Z5557\nrs/Pj9jCkSUAGBBLADAglgBgQCwBwIBYAoABsQQAA2IJAAbEEgAMiCUAGBBLADDg7o4Iu5MnT5rX\nhuIyxn/Ly8szPzZ+/PiQPz/6B1Msi4qKVFdXpzt37mjFihV6+umntXHjRvl8PiUnJ2v37t2Kj48P\n96wA4JiAsTxx4oTOnj0rr9ertrY2LVy4UJmZmcrJydGcOXNUXFysiooK5eTkRGJeAHBEwN9Zjhs3\nTnv37pUkDR06VO3t7aqtrdWMGTMkSVlZWaqpqQnvlADgsICxjIuLk8fjkSRVVFRoypQpam9v7/zY\nnZSUpNbW1vBOCQAOM5/gOX78uCoqKnT48GHNmjWrcztfh4lAZs+ebV7r8/nCOEl327dvj+jzIXaZ\nYlldXa3S0lIdPHhQQ4YMkcfj0Y0bN5SQkKCWlhalpKSEe07EsMrKSvPauXPnhvz5ezobvn37dr3z\nzjtdtm3bts20T5fL1ee5EFsCfgy/evWqioqKtH//fiUmJkqSJk6c2PkPoKqqSpMnTw7vlADgsIBH\nlseOHVNbW5vWrFnTuW3nzp3atGmTvF6v0tLStGDBgrAOCQBOCxjL7OxsZWdnd9t+5MiRsAwEANGI\nG5YhKL25CVlGRoZ5bTj+Z8Xvv//ud/vIkSN17ty5btsAf7g2HAAMiCUAGBBLADAglgBgQCwBwIBY\nAoABsQQAA2IJAAbEEgAMiCUAGHDDMgSlrKzMvDYclzDm5uaa16alpQX1GHAvjiwBwIBYAoABsQQA\nA2IJAAbEEgAMiCUAGBBLADAglgBgQCwBwIBYAoABlzuiizt37vjd7na7uzz2+eefh+X5V61aZVr3\n/vvvm/fpcrl6fGzgwIHm/eC/jSNLADAglgBgQCwBwIBYAoABsQQAA2IJAAbEEgAMiCUAGBBLADBw\ndXR0dDg9BKLHd99953f7888/3+WxqVOnmvf58MMPm9eeOXPGtC4hIcG8TyAUOLIEAANiCQAGxBIA\nDIglABgQSwAwIJYAYEAsAcCAWAKAAbEEAANiCQAG3LAMXQwZMiSox+5ny5Yt5rVcxohoZYplUVGR\n6urqdOfOHa1YsULffPONGhoalJiYKElavny5pk2bFs45AcBRAWN54sQJnT17Vl6vV21tbVq4cKEm\nTJigdevWKSsrKxIzAoDjAsZy3LhxGj16tCRp6NCham9vl8/nC/tgABBNAp7giYuLk8fjkSRVVFRo\nypQpiouLU3l5uZYtW6a1a9fq0qVLYR8UAJxk/j7L48ePa//+/Tp8+LDq6+uVmJiojIwMHThwQH/9\n9Zc2b94c7lkBwDGmEzzV1dUqLS3VwYMHNWTIEGVmZnY+Nn36dL377rvhmg8Rdvr0ab/bx4wZ0+Wx\nZ555xrzPsrIy89rXXnvNvBaIpIAfw69evaqioiLt37+/8+z3qlWr1NTUJEmqra1Venp6eKcEAIcF\nPLI8duyY2tratGbNms5tixYt0po1azRo0CB5PB4VFhaGdUgAcFrAWGZnZys7O7vb9oULF4ZlIACI\nRlzuCAAG3N0RAAw4sgQAA2IJAAbEEgAMiCUAGBBLADAglgBgQCwBwIBYAoABsQQAA2IJAAbEEgAM\niCUAGBBLADAglgBgQCwBwIBYAoABsQQAA2IJAAbEEgAMiCUAGBBLADAglgBgQCwBwIBYAoABsQQA\nA2IJAAbEEgAM3E486Y4dO3T69Gm5XC7l5+dr9OjRTowRUrW1tVq9erXS09MlSU888YQKCgocnip4\njY2NevPNN/Xqq69qyZIlOn/+vDZu3Cifz6fk5GTt3r1b8fHxTo/ZK/9+TXl5eWpoaFBiYqIkafny\n5Zo2bZqzQ/ZSUVGR6urqdOfOHa1YsUJPP/10zL9PUvfX9c033zj+XkU8lidPntS5c+fk9Xr122+/\nKT8/X16vN9JjhMX48eNVUlLi9Bh9dv36dW3dulWZmZmd20pKSpSTk6M5c+aouLhYFRUVysnJcXDK\n3vH3miRp3bp1ysrKcmiqvjlx4oTOnj0rr9ertrY2LVy4UJmZmTH9Pkn+X9eECRMcf68i/jG8pqZG\nM2fOlCQ9/vjjunz5sq5duxbpMXAf8fHxKisrU0pKSue22tpazZgxQ5KUlZWlmpoap8YLir/XFOvG\njRunvXv3SpKGDh2q9vb2mH+fJP+vy+fzOTyVA7G8cOGCHnzwwc6fhw0bptbW1kiPERa//vqr3njj\nDS1evFjff/+90+MEze12KyEhocu29vb2zo9zSUlJMfee+XtNklReXq5ly5Zp7dq1unTpkgOTBS8u\nLk4ej0eSVFFRoSlTpsT8+yT5f11xcXGOv1eO/M7yXh0dHU6PEBKPPvqocnNzNWfOHDU1NWnZsmWq\nqqqKyd8XBdJf3rP58+crMTFRGRkZOnDggPbt26fNmzc7PVavHT9+XBUVFTp8+LBmzZrVuT3W36d7\nX1d9fb3j71XEjyxTUlJ04cKFzp///vtvJScnR3qMkEtNTdXcuXPlcrk0YsQIDR8+XC0tLU6PFTIe\nj0c3btyQJLW0tPSLj7OZmZnKyMiQJE2fPl2NjY0OT9R71dXVKi0tVVlZmYYMGdJv3qd/v65oeK8i\nHstJkyapsrJSktTQ0KCUlBQNHjw40mOE3JdffqlDhw5JklpbW3Xx4kWlpqY6PFXoTJw4sfN9q6qq\n0uTJkx2eqO9WrVqlpqYmSf/3O9l//idDrLh69aqKioq0f//+zrPE/eF98ve6ouG9cnU4cKy+Z88e\n/fjjj3K5XNqyZYuefPLJSI8QcteuXdP69et15coV3b59W7m5uZo6darTYwWlvr5eu3btUnNzs9xu\nt1JTU7Vnzx7l5eXp5s2bSktLU2FhoQYOHOj0qGb+XtOSJUt04MABDRo0SB6PR4WFhUpKSnJ6VDOv\n16sPP/xQjz32WOe2nTt3atOmTTH7Pkn+X9eiRYtUXl7u6HvlSCwBINZwBQ8AGBBLADAglgBgQCwB\nwIBYAoABsQQAA2IJAAbEEgAM/gepgR0uaefKmwAAAABJRU5ErkJggg==\n",
+ "text/plain": [
+ "<matplotlib.figure.Figure at 0x7fd6199ef278>"
+ ]
+ },
+ "metadata": {
+ "tags": []
+ }
+ },
+ {
+ "output_type": "stream",
+ "text": [
+ "Prediction: 4\n"
+ ],
+ "name": "stdout"
+ },
+ {
+ "output_type": "display_data",
+ "data": {
+ "image/png": "iVBORw0KGgoAAAANSUhEUgAAAUsAAAFKCAYAAACU6307AAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMS4yLCBo\ndHRwOi8vbWF0cGxvdGxpYi5vcmcvNQv5yAAAEelJREFUeJzt3W9MlfX/x/HXEWJyhg5BIG1ZfR0u\nKr3hhopOE2Q23FxiN0xCdNmGa5pG6hhTtNn8g85NtI0/aS1Z29moG96wILM2dYDKDRu0hrpyzCkC\nkUocDeH8brQfk8R4czyH64DPx624+Hid99nFnl2H61wHl8/n8wkA8J/GOD0AAIwExBIADIglABgQ\nSwAwIJYAYEAsAcCAWAKAAbEEAINwf//h7t27denSJblcLhUUFGjGjBmBnAsAQopfsTx//ryuXbsm\nj8ejq1evqqCgQB6PJ9CzAUDI8OtleE1NjdLT0yVJU6dO1e3bt9XZ2RnQwQAglPgVy7a2Nk2YMKHv\n65iYGLW2tgZsKAAINQG5wMNncQAY7fyKZXx8vNra2vq+vnXrluLi4gI2FACEGr9iOW/ePFVVVUmS\nGhsbFR8fr6ioqIAOBgChxK+r4TNnztSrr76qt99+Wy6XSzt27Aj0XAAQUlx8+C8ADI47eADAgFgC\ngAGxBAADYgkABsQSAAyIJQAYEEsAMCCWAGBALAHAgFgCgAGxBAADYgkABsQSAAyIJQAYEEsAMCCW\nAGBALAHAgFgCgAGxBAADYgkABsQSAAz8+lO4AJz3yy+/PLLtlVdeeWT777//bt7ne++9Z147f/58\n0zqPx2PeZyjjzBIADIglABgQSwAwIJYAYEAsAcCAWAKAAbEEAANiCQAGxBIADIglABi4fD6fz+kh\ngNHsr7/+Mq+tr683r33rrbce2dba2qq4uLh+29rb2837XL16tXntp59+alrndrvN+wxlnFkCgAGx\nBAADYgkABsQSAAyIJQAYEEsAMCCWAGBALAHAgFgCgAF/sAzww/37981rMzMzzWtPnTplXvu4O2O8\nXm+/rysrK837XLJkiXnt2LFjzWtHA84sAcDArzPLuro6bdy4UYmJiZKkadOmafv27QEdDABCid8v\nw2fNmqXi4uJAzgIAIYuX4QBg4Hcsr1y5onXr1mnlypU6d+5cIGcCgJDj1+dZtrS0qL6+XhkZGWpu\nblZOTo6qq6sVERERjBkBwHF+/c4yISGh7y0GU6ZM0cSJE9XS0qLnn38+oMMBoWoobx1aunSpee2T\nvnWos7NTUVFR/bZ9+eWX5n3y1qHH8+tl+IkTJ3T06FFJ/3wyc3t7uxISEgI6GACEEr/OLNPS0rR5\n82b98MMP6u7u1s6dO3kJDmBU8yuWUVFRKikpCfQsABCyuN0ReIj1vcNbtmwx77O7u9u8dii/9//x\nxx8H3P7zzz/3+/p///ufeZ94PN5nCQAGxBIADIglABgQSwAwIJYAYEAsAcCAWAKAAbEEAANiCQAG\nxBIADPz6PEvAaT09Pea1x48fH3D7mjVr9MUXX/Tblpuba9pnb2+v+fE/+eQT89qcnBzz2kmTJpnX\n4slxZgkABsQSAAyIJQAYEEsAMCCWAGBALAHAgFgCgAGxBAADYgkABtzBgxHpcXflDGT16tUDbu/t\n7dWYMf6dL+zcudO8trCw0K/HQGjhzBIADIglABgQSwAwIJYAYEAsAcCAWAKAAbEEAANiCQAGxBIA\nDIglABhwuyNCSnFxsWndRx99ZN7n4/642UC3O77zzjumff77D539l7CwMPNahC7OLAHAgFgCgAGx\nBAADYgkABsQSAAyIJQAYEEsAMCCWAGBALAHAgFgCgAG3OyLovF6vee2kSZNM6+7cuePvOH0Gut2x\npqbG9G9nz579xI+PkcV0ZtnU1KT09HRVVFRIkm7cuKFVq1YpKytLGzdu1N9//x3UIQHAaYPGsqur\nS7t27VJKSkrftuLiYmVlZemrr77SCy+8oMrKyqAOCQBOGzSWERERKi8vV3x8fN+2uro6LVq0SJKU\nmppqfukCACNV+KALwsMVHt5/mdfrVUREhCQpNjZWra2twZkOAELEoLEcDNeHMJjIyEjz2j///DOI\nkzyqt7d3WB8PI5dfsXS73bp3757Gjh2rlpaWfi/RgX/jajhGA7/eZzl37lxVVVVJkqqrqzV//vyA\nDgUAoWbQM8uGhgbt27dP169fV3h4uKqqqnTgwAHl5+fL4/Fo8uTJWrZs2XDMCgCO4U3pCDpehmM0\neOILPHg6ffvtt+a1hw4dMq8NRASfRElJiWkdsXz6cG84ABgQSwAwIJYAYEAsAcCAWAKAAbEEAANi\nCQAGxBIADIglABgQSwAw4HZH+MV6W6D0zydTWU2ZMsW07v79++Z9trS0mNcCj8OZJQAYEEsAMCCW\nAGBALAHAgFgCgAGxBAADYgkABsQSAAyIJQAYEEsAMOB2R/Rz4cKFAbcnJyf3+15tbW1QHv/77783\nrRvKX4FMTk72dxygD2eWAGBALAHAgFgCgAGxBAADYgkABsQSAAyIJQAYEEsAMCCWAGDAHTzoZ8GC\nBQNu93q9/b43lD8YNhTWP1jm9XqD8vjA43BmCQAGxBIADIglABgQSwAwIJYAYEAsAcCAWAKAAbEE\nAANiCQAGxBIADLjd8Slw5MgR89r/uo3R31scZ8yYYV7rcrn8eoxAuXnzpmldV1eXeZ9ut9vfcRBC\nOLMEAANTLJuampSenq6KigpJUn5+vpYuXapVq1Zp1apV+umnn4I5IwA4btCX4V1dXdq1a5dSUlL6\nbc/Ly1NqamrQBgOAUDLomWVERITKy8sVHx8/HPMAQEhy+Xw+n2Xh4cOHNWHCBGVnZys/P1+tra3q\n7u5WbGystm/frpiYmGDPCgCO8etq+Jtvvqno6GglJSWprKxMR44cUWFhYaBnQ4AM5Wr4Bx98MOD2\n3t5ejRnj3/XAoVwNP3/+vGndUK5GP+5/5AM9pzfeeMO0z6+//tr8+FwNHx38+ulPSUlRUlKSJCkt\nLU1NTU0BHQoAQo1fsdywYYOam5slSXV1dUpMTAzoUAAQagZ9Gd7Q0KB9+/bp+vXrCg8PV1VVlbKz\ns7Vp0yZFRkbK7XZrz549wzErADhm0Fi+9tprOn78+CPbrb/bAYDRgNsdnwLt7e2OPv6WLVvMayMi\nIkzrhnKBZyiqqqpM63799VfzPmfOnOnvOAgh3O4IAAbEEgAMiCUAGBBLADAglgBgQCwBwIBYAoAB\nsQQAA2IJAAbEEgAMuN0RfomNjTWvTU5ODvjjnz17NuD7lNT30YODee6554Ly+AhdnFkCgAGxBAAD\nYgkABsQSAAyIJQAYEEsAMCCWAGBALAHAgFgCgAF38MAv48ePN6999tlnA/74FRUVAd+nJM2aNcu0\nLiEhISiPj9DFmSUAGBBLADAglgBgQCwBwIBYAoABsQQAA2IJAAbEEgAMiCUAGBBLADDgdkf45bff\nfjOv/eabb8xrs7OzTet6e3vN+/T5fH59D3gYZ5YAYEAsAcCAWAKAAbEEAANiCQAGxBIADIglABgQ\nSwAwIJYAYEAsAcCA2x0RdO+++25Q1lq5XC6/vgc8zBTLoqIi1dfX68GDB8rNzdX06dO1detW9fT0\nKC4uTvv371dERESwZwUAxwway9raWl2+fFkej0cdHR3KzMxUSkqKsrKylJGRoYMHD6qyslJZWVnD\nMS8AOGLQ31kmJyfr0KFDkqTx48fL6/Wqrq5OixYtkiSlpqaqpqYmuFMCgMMGjWVYWJjcbrckqbKy\nUgsWLJDX6+172R0bG6vW1tbgTgkADjNf4Dl16pQqKyt17NgxLV68uG87nwcY+nbs2BGQtUP5DMmR\nYjQ+JwSHKZZnzpxRSUmJPvvsM40bN05ut1v37t3T2LFj1dLSovj4+GDPiSfw8ccfP/Ha3t5ejRkz\nut5pNtBzWr16tenffv7558EYCSFs0J/+u3fvqqioSKWlpYqOjpYkzZ07V1VVVZKk6upqzZ8/P7hT\nAoDDBj2zPHnypDo6OrRp06a+bXv37tW2bdvk8Xg0efJkLVu2LKhDAoDTBo3lihUrtGLFike28zIE\nwNOEO3ieAnl5eea1Fy5ceOz3lixZ0vffZ8+eNe/zzp075rVAqBpdv7EHgCAhlgBgQCwBwIBYAoAB\nsQQAA2IJAAbEEgAMiCUAGBBLADAglgBg4PLxgZTww3fffWde+/Btkk543I+4z+d75A+W1dbWmvY5\ne/bsJ54LIwtnlgBgQCwBwIBYAoABsQQAA2IJAAbEEgAMiCUAGBBLADAglgBgQCwBwIDbHQHAgDNL\nADAglgBgQCwBwIBYAoABsQQAA2IJAAbEEgAMiCUAGBBLADAglgBgQCwBwIBYAoABsQQAA2IJAAbE\nEgAMiCUAGBBLADAglgBgQCwBwIBYAoABsQQAg3DLoqKiItXX1+vBgwfKzc3V6dOn1djYqOjoaEnS\n2rVrtXDhwmDOCQCOGjSWtbW1unz5sjwejzo6OpSZmak5c+YoLy9PqampwzEjADhu0FgmJydrxowZ\nkqTx48fL6/Wqp6cn6IMBQChx+Xw+n3Wxx+PRxYsXFRYWptbWVnV3dys2Nlbbt29XTExMMOcEAEeZ\nY3nq1CmVlpbq2LFjamhoUHR0tJKSklRWVqabN2+qsLAw2LMCgGNMV8PPnDmjkpISlZeXa9y4cUpJ\nSVFSUpIkKS0tTU1NTUEdEgCcNmgs7969q6KiIpWWlvZd/d6wYYOam5slSXV1dUpMTAzulADgsEEv\n8Jw8eVIdHR3atGlT37bly5dr06ZNioyMlNvt1p49e4I6JAA4bUgXeADgacUdPABgQCwBwIBYAoAB\nsQQAA2IJAAbEEgAMiCUAGBBLADAglgBgQCwBwIBYAoABsQQAA2IJAAbEEgAMiCUAGBBLADAglgBg\nQCwBwIBYAoABsQQAA2IJAAbEEgAMiCUAGBBLADAglgBgQCwBwIBYAoABsQQAA2IJAAbhTjzo7t27\ndenSJblcLhUUFGjGjBlOjBFQdXV12rhxoxITEyVJ06ZN0/bt2x2eyn9NTU16//33tWbNGmVnZ+vG\njRvaunWrenp6FBcXp/379ysiIsLpMYfk388pPz9fjY2Nio6OliStXbtWCxcudHbIISoqKlJ9fb0e\nPHig3NxcTZ8+fcQfJ+nR53X69GnHj9Wwx/L8+fO6du2aPB6Prl69qoKCAnk8nuEeIyhmzZql4uJi\np8d4Yl1dXdq1a5dSUlL6thUXFysrK0sZGRk6ePCgKisrlZWV5eCUQzPQc5KkvLw8paamOjTVk6mt\nrdXly5fl8XjU0dGhzMxMpaSkjOjjJA38vObMmeP4sRr2l+E1NTVKT0+XJE2dOlW3b99WZ2fncI+B\n/xAREaHy8nLFx8f3baurq9OiRYskSampqaqpqXFqPL8M9JxGuuTkZB06dEiSNH78eHm93hF/nKSB\nn1dPT4/DUzkQy7a2Nk2YMKHv65iYGLW2tg73GEFx5coVrVu3TitXrtS5c+ecHsdv4eHhGjt2bL9t\nXq+37+VcbGzsiDtmAz0nSaqoqFBOTo4+/PBD/fHHHw5M5r+wsDC53W5JUmVlpRYsWDDij5M08PMK\nCwtz/Fg58jvLh/l8PqdHCIgXX3xR69evV0ZGhpqbm5WTk6Pq6uoR+fuiwYyWY/bmm28qOjpaSUlJ\nKisr05EjR1RYWOj0WEN26tQpVVZW6tixY1q8eHHf9pF+nB5+Xg0NDY4fq2E/s4yPj1dbW1vf17du\n3VJcXNxwjxFwCQkJWrJkiVwul6ZMmaKJEyeqpaXF6bECxu126969e5KklpaWUfFyNiUlRUlJSZKk\ntLQ0NTU1OTzR0J05c0YlJSUqLy/XuHHjRs1x+vfzCoVjNeyxnDdvnqqqqiRJjY2Nio+PV1RU1HCP\nEXAnTpzQ0aNHJUmtra1qb29XQkKCw1MFzty5c/uOW3V1tebPn+/wRE9uw4YNam5ulvTP72T//50M\nI8Xdu3dVVFSk0tLSvqvEo+E4DfS8QuFYuXwOnKsfOHBAFy9elMvl0o4dO/Tyyy8P9wgB19nZqc2b\nN+vOnTvq7u7W+vXr9frrrzs9ll8aGhq0b98+Xb9+XeHh4UpISNCBAweUn5+v+/fva/LkydqzZ4+e\neeYZp0c1G+g5ZWdnq6ysTJGRkXK73dqzZ49iY2OdHtXM4/Ho8OHDeumll/q27d27V9u2bRuxx0ka\n+HktX75cFRUVjh4rR2IJACMNd/AAgAGxBAADYgkABsQSAAyIJQAYEEsAMCCWAGBALAHA4P8ALqDX\nN3rmU3AAAAAASUVORK5CYII=\n",
+ "text/plain": [
+ "<matplotlib.figure.Figure at 0x7fd62944c6d8>"
+ ]
+ },
+ "metadata": {
+ "tags": []
+ }
+ },
+ {
+ "output_type": "stream",
+ "text": [
+ "Prediction: 1\n"
+ ],
+ "name": "stdout"
+ },
+ {
+ "output_type": "display_data",
+ "data": {
+ "image/png": "iVBORw0KGgoAAAANSUhEUgAAAUsAAAFKCAYAAACU6307AAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMS4yLCBo\ndHRwOi8vbWF0cGxvdGxpYi5vcmcvNQv5yAAAEqVJREFUeJzt3W9Ilff/x/HX+eWkpMQ0dQRrZdgm\nq24Miiz6Y0nrFKPVjZqiMgiW/SMX0ZxlDYJMiyALZrnqRlKc4a1u5B9cjIWZUbDA7ljWQqJMm1iR\nbSbne2P8/H7NY77P8Ryvoz0f97y8us777BpPrnMuP+e4vF6vVwCAd/o/pwcAgNGAWAKAAbEEAANi\nCQAGxBIADIglABgQSwAwIJYAYBAR6D88dOiQbt++LZfLpYKCAs2dOzeYcwFAWAkoljdu3NDDhw/l\n8XjU0tKigoICeTyeYM8GAGEjoJfhDQ0NSk9PlyTNnDlTXV1devnyZVAHA4BwElAsOzo6NHny5L6f\nY2Nj1d7eHrShACDcBOUGD5/FAWCsCyiWCQkJ6ujo6Pv56dOnio+PD9pQABBuAorlokWLVFNTI0m6\nc+eOEhISNHHixKAOBgDhJKC74Z9//rk+++wzff3113K5XDpw4ECw5wKAsOLiw38BYGis4AEAA2IJ\nAAbEEgAMiCUAGBBLADAglgBgQCwBwIBYAoABsQQAA2IJAAbEEgAMiCUAGBBLADAglgBgQCwBwIBY\nAoABsQQAA2IJAAbEEgAMiCUAGBBLADAI6KtwgVC5ePGiab+9e/eaj/ngwQOf271er1wul/k4gWpp\naTHvm5SUFMJJMBxcWQKAAbEEAANiCQAGxBIADIglABgQSwAwIJYAYEAsAcCAWAKAAbEEAAOWOyIg\n9+/fD8lxMzMzTfutWrXKfMzBljv6MmPGjKAf88mTJ+Z9We4YvriyBAADYgkABsQSAAyIJQAYEEsA\nMCCWAGBALAHAgFgCgAGxBAADVvAgIOnp6eZ9/VntYrV06VLzvh6PZ9DfdXV19fs5OjradMwtW7aY\nH3/27NnmfRG+uLIEAIOAriwbGxu1c+dOJScnS5JmzZqlwsLCoA4GAOEk4Jfh8+fPV2lpaTBnAYCw\nxctwADAIOJb37t1Tbm6uMjIyVF9fH8yZACDsuLxer9fff9TW1qZbt27J7XartbVVOTk5qq2tVWRk\nZChmBADHBfSeZWJiolavXi1JmjZtmqZMmaK2tjZ99NFHQR0O4cufD6kNxZ8OFRUVmffdunWrz+3R\n0dF6/vz5gG0W/vzpUHFxsXlf6+Nj5AX0MvzSpUs6c+aMJKm9vV3Pnj1TYmJiUAcDgHAS0JXl8uXL\ntXv3bv3666/q6enRjz/+yEtwAGNaQLGcOHGiysrKgj0LAIStgG7wYHR5+325d9m4caPP7VVVVXK7\n3X0/V1dXD3suX6zvRebn54fk8YHB8HeWAGBALAHAgFgCgAGxBAADYgkABsQSAAyIJQAYEEsAMCCW\nAGBALAHAgOWO7wF/Pk5ssDX/Xq9XLpcroMf35+PUWMaIcMWVJQAYEEsAMCCWAGBALAHAgFgCgAGx\nBAADYgkABsQSAAyIJQAYsIJnlLp27Zp530WLFg378d5ewXPhwgXzv83IyBj24wNO48oSAAyIJQAY\nEEsAMCCWAGBALAHAgFgCgAGxBAADYgkABsQSAAyIJQAYRDg9APp7/vy5ab9gLGH0JTc31/Q7ljDi\nfcOVJQAYEEsAMCCWAGBALAHAgFgCgAGxBAADYgkABsQSAAyIJQAYEEsAMODbHcOM2+027VddXW0+\n5qpVq8z7ejwen9ujo6P7LcWMjo42HxMYC0xXls3NzUpPT1dFRYUk6fHjx8rOzlZmZqZ27typf/75\nJ6RDAoDThozlq1evdPDgQaWmpvZtKy0tVWZmpi5cuKCPP/5YlZWVIR0SAJw2ZCwjIyNVXl6uhISE\nvm2NjY1asWKFJCktLU0NDQ2hmxAAwsCQH9EWERGhiIj+u3V3dysyMlKSFBcXp/b29tBMBwBhYtif\nZ8n9oeCqqqpyeoRBcVMH77OAYhkVFaXXr19r/Pjxamtr6/cSHcPD3XAgPAX0d5YLFy5UTU2NJKm2\ntlaLFy8O6lAAEG6GvLJsampScXGxHj16pIiICNXU1Ojo0aPKz8+Xx+PR1KlT9dVXX43ErADgmCFj\nOXv2bJ0/f37A9nPnzoVkIAAIR6zgGQH379837ztz5sygP35LS4t536SkpKA/PjAWsDYcAAyIJQAY\nEEsAMCCWAGBALAHAgFgCgAGxBAADYgkABsQSAAyIJQAYDPvzLDG0I0eOBP2Yubm55n1ZwggMH1eW\nAGBALAHAgFgCgAGxBAADYgkABsQSAAyIJQAYEEsAMCCWAGBALAHAgOWOI6Cmpibox8zOzg76Mceq\nwb5dMykpacDvrEtT//zzT/PjT58+3byvP/+vfPLJJwO2VVVVye1299uWk5NjPuaaNWvM+0ZHR5v3\nHQu4sgQAA2IJAAbEEgAMiCUAGBBLADAglgBgQCwBwIBYAoABsQQAA5fX6/U6PcRY588Xhj148MC0\nX0tLS0ge32kXL1407bd3717zMQf7b+r1euVyuczHGQ2G+5xWrVpl3tfj8Zj2GysrfbiyBAADYgkA\nBsQSAAyIJQAYEEsAMCCWAGBALAHAgFgCgAGxBAADYgkABix3HAFbtmwx71tWVmbabzSdtlAs9wyG\n4SwN9GdZYHV1dUCPEYiRXMJpXXI7mpbbvgtXlgBgYIplc3Oz0tPTVVFRIUnKz8/Xl19+qezsbGVn\nZ+u3334L5YwA4Lghvzf81atXOnjwoFJTU/tt37Vrl9LS0kI2GACEkyGvLCMjI1VeXq6EhISRmAcA\nwpL5Bs+JEyc0efJkZWVlKT8/X+3t7erp6VFcXJwKCwsVGxsb6lkBwDFDvgz3Ze3atYqJiVFKSopO\nnz6tkydPav/+/cGebczgbjh3w0cKd8NDJ6C74ampqUpJSZEkLV++XM3NzUEdCgDCTUCx3LFjh1pb\nWyVJjY2NSk5ODupQABBuhnwZ3tTUpOLiYj169EgRERGqqalRVlaW8vLyNGHCBEVFRamoqGgkZgUA\nxwwZy9mzZ+v8+fMDtn/xxRchGQgAwlFAN3gAt9tt3jcUN238eTWzYcOGQX/39k2KKVOmBDzTYEL1\n7YbPnz/3ub2rq6vfz99//735mNYbjJK0bds2035VVVXmY4YzljsCgAGxBAADYgkABsQSAAyIJQAY\nEEsAMCCWAGBALAHAgFgCgAGxBAADljuOUteuXTPvu3DhwmEfd+HChf1+F6rPaKyvrzft589zepfR\n/FmLgy2jfHv7Tz/9ZD6mP8sd3zdcWQKAAbEEAANiCQAGxBIADIglABgQSwAwIJYAYEAsAcCAWAKA\nASt4RkBxcbF535qaGtN+WVlZ5mP+8ccf5n19fZOn9O+KmcF+NxR/vlwsWCtz8F/+rPbyR2FhYUiO\nG664sgQAA2IJAAbEEgAMiCUAGBBLADAglgBgQCwBwIBYAoABsQQAA2IJAAYur9frdXoI/Jd1adqi\nRYtCPEl/Xq9XLpcroH/b1dVl3newL+HCQBcvXhywLSMjY8D2zMxM8zEvXLhg3nfNmjWm/cbKOeXK\nEgAMiCUAGBBLADAglgBgQCwBwIBYAoABsQQAA2IJAAbEEgAMiCUAGLDccZTy5xv7grE0cjjLHf35\ndseHDx+a9svOzjYf88MPP/S5PSkpSffv3++37ZdffjEdc8mSJebH98fBgwfN+1ZXVw/YNpzzJEn1\n9fXmfd+3b+I0fRVuSUmJbt26pTdv3mjz5s2aM2eO9uzZo97eXsXHx+vIkSOKjIwM9awA4JghY3n9\n+nXdvXtXHo9HnZ2dWrdunVJTU5WZmSm3261jx46psrLSr8X6ADDaDPme5bx583T8+HFJ/356SHd3\ntxobG7VixQpJUlpamhoaGkI7JQA4bMhYjhs3TlFRUZKkyspKLVmyRN3d3X0vu+Pi4tTe3h7aKQHA\nYab3LCWprq5OlZWVOnv2rFauXNm3nftDzvDnzfVgnaOxeK6TkpL6/Zyfn+/QJP+qqqoa9jHG4nkK\nB6ZYXr16VWVlZfr55581adIkRUVF6fXr1xo/frza2tqUkJAQ6jnxFu6Gczecu+Eja8iX4S9evFBJ\nSYlOnTqlmJgYSf/+R6qpqZEk1dbWavHixaGdEgAcNuSV5eXLl9XZ2am8vLy+bYcPH9a+ffvk8Xg0\ndepUffXVVyEdEgCcNmQsN27cqI0bNw7Yfu7cuZAMBADhiBU874G335d7l23btvncXlVVJbfb3fez\nr/fLRpvhvr/ntBkzZgzYdv/+/QE3rerq6szHnDJlinnfsfJFZFasDQcAA2IJAAbEEgAMiCUAGBBL\nADAglgBgQCwBwIBYAoABsQQAA2IJAAYsd0RA/PmIuPPnz5v3tX702u+//24+5g8//OBzu6/ljr6W\nEPry7bffmh9/w4YN5n398fayRoQWV5YAYEAsAcCAWAKAAbEEAANiCQAGxBIADIglABgQSwAwIJYA\nYEAsAcCA5Y4AYMCVJQAYEEsAMCCWAGBALAHAgFgCgAGxBAADYgkABsQSAAyIJQAYEEsAMCCWAGBA\nLAHAgFgCgAGxBAADYgkABsQSAAyIJQAYEEsAMCCWAGBALAHAgFgCgEGEZaeSkhLdunVLb9680ebN\nm3XlyhXduXNHMTExkqRNmzZp2bJloZwTABw1ZCyvX7+uu3fvyuPxqLOzU+vWrdOCBQu0a9cupaWl\njcSMAOC4IWM5b948zZ07V5IUHR2t7u5u9fb2hnwwAAgnLq/X67Xu7PF4dPPmTY0bN07t7e3q6elR\nXFycCgsLFRsbG8o5AcBR5ljW1dXp1KlTOnv2rJqamhQTE6OUlBSdPn1aT5480f79+0M9KwA4xnQ3\n/OrVqyorK1N5ebkmTZqk1NRUpaSkSJKWL1+u5ubmkA4JAE4bMpYvXrxQSUmJTp061Xf3e8eOHWpt\nbZUkNTY2Kjk5ObRTAoDDhrzBc/nyZXV2diovL69v2/r165WXl6cJEyYoKipKRUVFIR0SAJzm1w0e\nAHhfsYIHAAyIJQAYEEsAMCCWAGBALAHAgFgCgAGxBAADYgkABsQSAAyIJQAYEEsAMCCWAGBALAHA\ngFgCgAGxBAADYgkABsQSAAyIJQAYEEsAMCCWAGBALAHAgFgCgAGxBAADYgkABsQSAAyIJQAYEEsA\nMCCWAGBALAHAIMKJBz106JBu374tl8ulgoICzZ0714kxgqqxsVE7d+5UcnKyJGnWrFkqLCx0eKrA\nNTc3a+vWrfrmm2+UlZWlx48fa8+ePert7VV8fLyOHDmiyMhIp8f0y9vPKT8/X3fu3FFMTIwkadOm\nTVq2bJmzQ/qppKREt27d0ps3b7R582bNmTNn1J8naeDzunLliuPnasRjeePGDT18+FAej0ctLS0q\nKCiQx+MZ6TFCYv78+SotLXV6jGF79eqVDh48qNTU1L5tpaWlyszMlNvt1rFjx1RZWanMzEwHp/SP\nr+ckSbt27VJaWppDUw3P9evXdffuXXk8HnV2dmrdunVKTU0d1edJ8v28FixY4Pi5GvGX4Q0NDUpP\nT5ckzZw5U11dXXr58uVIj4F3iIyMVHl5uRISEvq2NTY2asWKFZKktLQ0NTQ0ODVeQHw9p9Fu3rx5\nOn78uCQpOjpa3d3do/48Sb6fV29vr8NTORDLjo4OTZ48ue/n2NhYtbe3j/QYIXHv3j3l5uYqIyND\n9fX1To8TsIiICI0fP77ftu7u7r6Xc3FxcaPunPl6TpJUUVGhnJwcfffdd/rrr78cmCxw48aNU1RU\nlCSpsrJSS5YsGfXnSfL9vMaNG+f4uXLkPcv/5fV6nR4hKKZPn67t27fL7XartbVVOTk5qq2tHZXv\nFw1lrJyztWvXKiYmRikpKTp9+rROnjyp/fv3Oz2W3+rq6lRZWamzZ89q5cqVfdtH+3n63+fV1NTk\n+Lka8SvLhIQEdXR09P389OlTxcfHj/QYQZeYmKjVq1fL5XJp2rRpmjJlitra2pweK2iioqL0+vVr\nSVJbW9uYeDmbmpqqlJQUSdLy5cvV3Nzs8ET+u3r1qsrKylReXq5JkyaNmfP09vMKh3M14rFctGiR\nampqJEl37txRQkKCJk6cONJjBN2lS5d05swZSVJ7e7uePXumxMREh6cKnoULF/adt9raWi1evNjh\niYZvx44dam1tlfTve7L//5cMo8WLFy9UUlKiU6dO9d0lHgvnydfzCodz5fI6cK1+9OhR3bx5Uy6X\nSwcOHNCnn3460iME3cuXL7V79249f/5cPT092r59u5YuXer0WAFpampScXGxHj16pIiICCUmJuro\n0aPKz8/X33//ralTp6qoqEgffPCB06Oa+XpOWVlZOn36tCZMmKCoqCgVFRUpLi7O6VHNPB6PTpw4\noRkzZvRtO3z4sPbt2zdqz5Pk+3mtX79eFRUVjp4rR2IJAKMNK3gAwIBYAoABsQQAA2IJAAbEEgAM\niCUAGBBLADAglgBg8B9OkjtgR8VvdgAAAABJRU5ErkJggg==\n",
+ "text/plain": [
+ "<matplotlib.figure.Figure at 0x7fd619a40b00>"
+ ]
+ },
+ "metadata": {
+ "tags": []
+ }
+ },
+ {
+ "output_type": "stream",
+ "text": [
+ "Prediction: 6\n"
+ ],
+ "name": "stdout"
+ }
+ ]
+ },
+ {
+ "metadata": {
+ "id": "4SJizeJtNaAs",
+ "colab_type": "text"
+ },
+ "cell_type": "markdown",
+ "source": [
+ "# Profiling\n",
+ "\n",
+ "If you want to drill down into the performance characteristics of your code, you can use native Python profilers like [`cProfile`](https://docs.python.org/3/library/profile.html). In the next exercise, you'll do just that."
+ ]
+ },
+ {
+ "metadata": {
+ "id": "_2v0QnG8__PJ",
+ "colab_type": "text"
+ },
+ "cell_type": "markdown",
+ "source": [
+ "## Exercise!\n",
+ "\n",
+ "This exercise does not require coding. If you have not completed the training exercise, replace `train_one_epoch` below with `_train_one_epoch`.\n",
+ "\n",
+ "Run the below cell and inspect the printed profiles. What parts of the code appear to be hotspots or\n",
+ "bottlenecks? How does sorting the profile by total time compare to sorting it\n",
+ "by cumulative time?\n",
+ "\n"
+ ]
+ },
+ {
+ "metadata": {
+ "id": "IFypaYbG_9fB",
+ "colab_type": "code",
+ "colab": {
+ "base_uri": "https://localhost:8080/",
+ "height": 714
+ },
+ "outputId": "d9c3596b-a165-4edd-fc6b-53ccd0d01d19"
+ },
+ "cell_type": "code",
+ "source": [
+ "import cProfile\n",
+ "import pstats\n",
+ "\n",
+ "cProfile.run(\"train_one_epoch(model, training_data, optimizer)\", \"training_profile\")\n",
+ "\n",
+ "stats = pstats.Stats(\"training_profile\").strip_dirs().sort_stats(\"tottime\")\n",
+ "stats.print_stats(10)\n",
+ "\n",
+ "stats.sort_stats(\"cumtime\").print_stats(10)"
+ ],
+ "execution_count": 17,
+ "outputs": [
+ {
+ "output_type": "stream",
+ "text": [
+ "Thu Jun 7 12:25:04 2018 training_profile\n",
+ "\n",
+ " 92209 function calls (91817 primitive calls) in 3.446 seconds\n",
+ "\n",
+ " Ordered by: internal time\n",
+ " List reduced from 672 to 10 due to restriction <10>\n",
+ "\n",
+ " ncalls tottime percall cumtime percall filename:lineno(function)\n",
+ " 1080 2.552 0.002 2.552 0.002 {built-in method _pywrap_tensorflow_internal.TFE_Py_FastPathExecute}\n",
+ " 83 0.753 0.009 0.753 0.009 {built-in method _pywrap_tensorflow_internal.TFE_Py_Execute}\n",
+ " 16 0.006 0.000 1.019 0.064 network.py:736(_run_internal_graph)\n",
+ " 16 0.005 0.000 2.253 0.141 {built-in method _pywrap_tensorflow_internal.TFE_Py_TapeGradient}\n",
+ " 2321 0.004 0.000 0.007 0.000 abc.py:178(__instancecheck__)\n",
+ " 288 0.004 0.000 0.009 0.000 inspect.py:2092(_signature_from_function)\n",
+ " 878 0.004 0.000 0.005 0.000 ops.py:5936(__enter__)\n",
+ " 288 0.004 0.000 0.016 0.000 inspect.py:1079(getfullargspec)\n",
+ " 11006 0.003 0.000 0.005 0.000 {built-in method builtins.isinstance}\n",
+ " 768 0.003 0.000 0.008 0.000 {built-in method _pywrap_tensorflow_internal.Flatten}\n",
+ "\n",
+ "\n",
+ "Thu Jun 7 12:25:04 2018 training_profile\n",
+ "\n",
+ " 92209 function calls (91817 primitive calls) in 3.446 seconds\n",
+ "\n",
+ " Ordered by: cumulative time\n",
+ " List reduced from 672 to 10 due to restriction <10>\n",
+ "\n",
+ " ncalls tottime percall cumtime percall filename:lineno(function)\n",
+ " 1 0.000 0.000 3.446 3.446 {built-in method builtins.exec}\n",
+ " 1 0.000 0.000 3.446 3.446 <string>:1(<module>)\n",
+ " 1 0.001 0.001 3.446 3.446 <ipython-input-14-bcffed60b545>:9(train_one_epoch)\n",
+ " 1080 2.552 0.002 2.552 0.002 {built-in method _pywrap_tensorflow_internal.TFE_Py_FastPathExecute}\n",
+ " 16 0.000 0.000 2.255 0.141 backprop.py:739(gradient)\n",
+ " 16 0.000 0.000 2.253 0.141 imperative_grad.py:31(imperative_grad)\n",
+ " 16 0.005 0.000 2.253 0.141 {built-in method _pywrap_tensorflow_internal.TFE_Py_TapeGradient}\n",
+ " 400 0.002 0.000 2.246 0.006 backprop.py:145(grad_fn)\n",
+ " 400 0.002 0.000 2.239 0.006 backprop.py:95(_magic_gradient_function)\n",
+ " 32 0.001 0.000 1.601 0.050 nn_grad.py:497(_Conv2DGrad)\n",
+ "\n",
+ "\n"
+ ],
+ "name": "stdout"
+ },
+ {
+ "output_type": "execute_result",
+ "data": {
+ "text/plain": [
+ "<pstats.Stats at 0x7fd61f841710>"
+ ]
+ },
+ "metadata": {
+ "tags": []
+ },
+ "execution_count": 17
+ }
+ ]
+ },
+ {
+ "metadata": {
+ "id": "8ixpnyCNNTI4",
+ "colab_type": "code",
+ "colab": {}
+ },
+ "cell_type": "code",
+ "source": [
+ ""
+ ],
+ "execution_count": 0,
+ "outputs": []
+ }
+ ]
+} \ No newline at end of file
diff --git a/tensorflow/contrib/eager/python/examples/workshop/3_inspecting.ipynb b/tensorflow/contrib/eager/python/examples/workshop/3_inspecting.ipynb
new file mode 100644
index 0000000000..64d19ec5c9
--- /dev/null
+++ b/tensorflow/contrib/eager/python/examples/workshop/3_inspecting.ipynb
@@ -0,0 +1,443 @@
+{
+ "nbformat": 4,
+ "nbformat_minor": 0,
+ "metadata": {
+ "colab": {
+ "name": "Debugging \"graph-first\" models with eager execution",
+ "version": "0.3.2",
+ "provenance": [],
+ "include_colab_link": true
+ }
+ },
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "view-in-github",
+ "colab_type": "text"
+ },
+ "source": [
+ "[View in Colaboratory](https://colab.research.google.com/gist/alextp/9568ab40f6ed6f9a3ba4736f6aef6127/debugging-graph-first-models-with-eager-execution.ipynb)"
+ ]
+ },
+ {
+ "metadata": {
+ "id": "mm-t0GuIu1Dt",
+ "colab_type": "text"
+ },
+ "cell_type": "markdown",
+ "source": [
+ "This colab uses eager execution and the Python debugger to modify the execution of a translation model. This combination lets you quickly explore counterfactuals when researching and designing modifications to a model.\n",
+ "\n",
+ "The model, Transformer from [Tensor2Tensor](https://github.com/tensorflow/tensor2tensor), was originally written with graph building in mind. Executing it eagerly can still be helpful!"
+ ]
+ },
+ {
+ "metadata": {
+ "id": "gxb1DvIDg4sv",
+ "colab_type": "code",
+ "colab": {}
+ },
+ "cell_type": "code",
+ "source": [
+ "#@title License (double click to show)\n",
+ "# Copyright 2018 The TensorFlow Authors.\n",
+ "\n",
+ "# Licensed under the Apache License, Version 2.0 (the \"License\");\n",
+ "# you may not use this file except in compliance with the License.\n",
+ "# You may obtain a copy of the License at\n",
+ "\n",
+ "# https://www.apache.org/licenses/LICENSE-2.0\n",
+ "\n",
+ "# Unless required by applicable law or agreed to in writing, software\n",
+ "# distributed under the License is distributed on an \"AS IS\" BASIS,\n",
+ "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n",
+ "# See the License for the specific language governing permissions and\n",
+ "# limitations under the License."
+ ],
+ "execution_count": 0,
+ "outputs": []
+ },
+ {
+ "metadata": {
+ "id": "Gx3HA9N1ui64",
+ "colab_type": "code",
+ "colab": {
+ "base_uri": "https://localhost:8080/",
+ "height": 37
+ },
+ "outputId": "f6986f34-f3e1-44e1-c902-2eb33081acad"
+ },
+ "cell_type": "code",
+ "source": [
+ "import tensorflow as tf\n",
+ "import pdb\n",
+ "tfe = tf.contrib.eager\n",
+ "\n",
+ "tf.enable_eager_execution()"
+ ],
+ "execution_count": 1,
+ "outputs": []
+ },
+ {
+ "metadata": {
+ "id": "3LkOm2ct-Lmc",
+ "colab_type": "code",
+ "colab": {
+ "base_uri": "https://localhost:8080/",
+ "height": 37
+ },
+ "outputId": "2edc74d9-6bc0-4e78-ab4e-83bf96099ef4"
+ },
+ "cell_type": "code",
+ "source": [
+ "!pip install -q -U tensor2tensor\n",
+ "from tensor2tensor.models import transformer"
+ ],
+ "execution_count": 2,
+ "outputs": []
+ },
+ {
+ "metadata": {
+ "id": "1Z3oMsqV0zB6",
+ "colab_type": "code",
+ "colab": {
+ "base_uri": "https://localhost:8080/",
+ "height": 170
+ },
+ "outputId": "0a8186ee-c688-457f-c9f6-9a6c1477a93b"
+ },
+ "cell_type": "code",
+ "source": [
+ "#@title Create a tensor2tensor translation model, fetch a checkpoint (double click to show)\n",
+ "from tensor2tensor import problems\n",
+ "from tensor2tensor.utils import trainer_lib\n",
+ "from tensor2tensor.utils import registry\n",
+ "\n",
+ "import numpy as np\n",
+ "import os\n",
+ "\n",
+ "# Setup some directories\n",
+ "data_dir = os.path.expanduser(\"~/t2t/data\")\n",
+ "tmp_dir = os.path.expanduser(\"~/t2t/tmp\")\n",
+ "train_dir = os.path.expanduser(\"~/t2t/train\")\n",
+ "checkpoint_dir = os.path.expanduser(\"~/t2t/checkpoints\")\n",
+ "tf.gfile.MakeDirs(data_dir)\n",
+ "tf.gfile.MakeDirs(tmp_dir)\n",
+ "tf.gfile.MakeDirs(train_dir)\n",
+ "tf.gfile.MakeDirs(checkpoint_dir)\n",
+ "gs_data_dir = \"gs://tensor2tensor-data\"\n",
+ "gs_ckpt_dir = \"gs://tensor2tensor-checkpoints/\"\n",
+ "\n",
+ "# Fetch the problem\n",
+ "ende_problem = problems.problem(\"translate_ende_wmt32k\")\n",
+ "\n",
+ "# Copy the vocab file locally so we can encode inputs and decode model outputs\n",
+ "# All vocabs are stored on GCS\n",
+ "vocab_name = \"vocab.ende.32768\"\n",
+ "vocab_file = os.path.join(gs_data_dir, vocab_name)\n",
+ "!gsutil cp {vocab_file} {data_dir}\n",
+ "\n",
+ "# Get the encoders from the problem\n",
+ "encoders = ende_problem.feature_encoders(data_dir)\n",
+ "\n",
+ "# Setup helper functions for encoding and decoding\n",
+ "def encode(input_str, output_str=None):\n",
+ " \"\"\"Input str to features dict, ready for inference\"\"\"\n",
+ " inputs = encoders[\"inputs\"].encode(input_str) + [1] # add EOS id\n",
+ " batch_inputs = tf.reshape(inputs, [1, -1, 1]) # Make it 3D.\n",
+ " return {\"inputs\": batch_inputs}\n",
+ "\n",
+ "def decode(integers):\n",
+ " \"\"\"List of ints to str\"\"\"\n",
+ " integers = list(np.squeeze(integers))\n",
+ " if 1 in integers:\n",
+ " integers = integers[:integers.index(1)]\n",
+ " return encoders[\"inputs\"].decode(np.squeeze(integers))\n",
+ "\n",
+ "# Copy the pretrained checkpoint locally\n",
+ "ckpt_name = \"transformer_ende_test\"\n",
+ "gs_ckpt = os.path.join(gs_ckpt_dir, ckpt_name)\n",
+ "!gsutil -q cp -R {gs_ckpt} {checkpoint_dir}\n",
+ "checkpoint_path = tf.train.latest_checkpoint(\n",
+ " os.path.join(checkpoint_dir, ckpt_name))\n",
+ "\n",
+ "# Create hparams and the model\n",
+ "model_name = \"transformer\"\n",
+ "hparams_set = \"transformer_base\"\n",
+ "\n",
+ "hparams = trainer_lib.create_hparams(hparams_set, data_dir=data_dir, problem_name=\"translate_ende_wmt32k\")\n",
+ "\n",
+ "# NOTE: Only create the model once when restoring from a checkpoint; it's a\n",
+ "# Layer and so subsequent instantiations will have different variable scopes\n",
+ "# that will not match the checkpoint.\n",
+ "translate_model = registry.model(model_name)(hparams, tf.estimator.ModeKeys.EVAL)"
+ ],
+ "execution_count": 3,
+ "outputs": [
+ {
+ "output_type": "stream",
+ "text": [
+ "Copying gs://tensor2tensor-data/vocab.ende.32768...\n",
+ "/ [1 files][316.4 KiB/316.4 KiB] \n",
+ "Operation completed over 1 objects/316.4 KiB. \n",
+ "INFO:tensorflow:Setting T2TModel mode to 'eval'\n",
+ "INFO:tensorflow:Setting hparams.layer_prepostprocess_dropout to 0.0\n",
+ "INFO:tensorflow:Setting hparams.symbol_dropout to 0.0\n",
+ "INFO:tensorflow:Setting hparams.attention_dropout to 0.0\n",
+ "INFO:tensorflow:Setting hparams.dropout to 0.0\n",
+ "INFO:tensorflow:Setting hparams.relu_dropout to 0.0\n"
+ ],
+ "name": "stdout"
+ }
+ ]
+ },
+ {
+ "metadata": {
+ "id": "4IblPXLGjuCl",
+ "colab_type": "text"
+ },
+ "cell_type": "markdown",
+ "source": [
+ "We've created a Transformer model and fetched an existing training checkpoint. It hasn't created variables yet, and we want to load them from the checkpoint before they're used (restore-on-create) so the first run of the model outputs the correct value. The `tfe.restore_variables_on_create` API looks up variables by name on creation and restores their values."
+ ]
+ },
+ {
+ "metadata": {
+ "id": "o3MWxcAqJoqG",
+ "colab_type": "code",
+ "colab": {
+ "base_uri": "https://localhost:8080/",
+ "height": 51
+ },
+ "outputId": "fbc1b1bf-ffbe-4621-b3cb-5eb855fec3a8"
+ },
+ "cell_type": "code",
+ "source": [
+ "with tfe.restore_variables_on_create(checkpoint_path):\n",
+ " model_output = translate_model.infer(encode(\"Eager execution\"))\n",
+ "print(decode(model_output[\"outputs\"]))"
+ ],
+ "execution_count": 4,
+ "outputs": [
+ {
+ "output_type": "stream",
+ "text": [
+ "INFO:tensorflow:Greedy Decoding\n",
+ "Hinrichtung\n"
+ ],
+ "name": "stdout"
+ }
+ ]
+ },
+ {
+ "metadata": {
+ "id": "xk5HV9Hhu9zO",
+ "colab_type": "text"
+ },
+ "cell_type": "markdown",
+ "source": [
+ "Using global variable names can get somewhat fragile, so for new code we recommend the object-based `tf.keras.Model.save_weights` or `tf.train.Checkpoint`. However, these require some small code changes to work with existing graph building code.\n",
+ "\n",
+ "The Transformer model translates \"Eager execution\" in English to \"Hinrichtung\" in German, which refers to capital punishment rather than getting things done. Transformer first encodes the English, then decodes to German. We'll add a debugging hook at the start of the decode phase (once the encodings have been finalized) and see if we can correct the translation."
+ ]
+ },
+ {
+ "metadata": {
+ "id": "GUGwbYvXZ9-7",
+ "colab_type": "code",
+ "colab": {}
+ },
+ "cell_type": "code",
+ "source": [
+ "previous_fast_decode = transformer.fast_decode\n",
+ "def debug_fn(*args, **kwargs):\n",
+ " pdb.set_trace()\n",
+ " return previous_fast_decode(*args, **kwargs) # \"step\" in pdb to step in\n",
+ "transformer.fast_decode = debug_fn # Add our debugging hook to Transformer"
+ ],
+ "execution_count": 0,
+ "outputs": []
+ },
+ {
+ "metadata": {
+ "id": "f61HlvECxJn0",
+ "colab_type": "text"
+ },
+ "cell_type": "markdown",
+ "source": [
+ "Now that we've \"monkey patched\" the model, we'll drop into a debugger just before decoding starts. In most cases it'd be simpler to add the `pdb.set_trace()` call to the code directly, but in this case we're working with prepackaged library code.\n",
+ "\n",
+ "First, let's find an encoding which represents the correct sense of \"execution\". Then we'll patch part of that encoding into the encoding of \"Eager execution\" to fix the translation. Feel free to poke around with the debugger (e.g. print a Tensor's value), but your main task is to save the encodings by assigning them to an attribute of the function:\n",
+ "\n",
+ "```\n",
+ "(running the next cell drops you into a pdb shell)\n",
+ "step\n",
+ "fast_decode.previous_encoding = encoder_output\n",
+ "continue\n",
+ "\n",
+ "```\n",
+ "\n",
+ "You can type `next` (or `n`) a few times before `continue` to watch the decoding ops run."
+ ]
+ },
+ {
+ "metadata": {
+ "id": "dX4CPOGSpZrb",
+ "colab_type": "code",
+ "colab": {
+ "base_uri": "https://localhost:8080/",
+ "height": 179
+ },
+ "outputId": "6de38c31-836f-40ef-b701-e42908172619"
+ },
+ "cell_type": "code",
+ "source": [
+ "model_output = translate_model.infer(encode(\"Immediate running\"))\n",
+ "print(decode(model_output[\"outputs\"]))"
+ ],
+ "execution_count": 7,
+ "outputs": [
+ {
+ "output_type": "stream",
+ "text": [
+ "> <ipython-input-6-ee9b4225ba2a>(4)debug_fn()\n",
+ "-> return previous_fast_decode(*args, **kwargs) # \"step\" in pdb to step in\n",
+ "(Pdb) step\n",
+ "--Call--\n",
+ "> /usr/local/lib/python2.7/dist-packages/tensor2tensor/models/transformer.py(427)fast_decode()\n",
+ "-> def fast_decode(encoder_output,\n",
+ "(Pdb) fast_decode.previous_encoding = encoder_output\n",
+ "(Pdb) continue\n",
+ "Sofortige Durchführung\n"
+ ],
+ "name": "stdout"
+ }
+ ]
+ },
+ {
+ "metadata": {
+ "id": "-ZEZciV4FpLo",
+ "colab_type": "text"
+ },
+ "cell_type": "markdown",
+ "source": [
+ "Now we have an encoding saved which gets the correct sense for \"execution\"."
+ ]
+ },
+ {
+ "metadata": {
+ "id": "QeC_oDVqHD_v",
+ "colab_type": "code",
+ "colab": {
+ "base_uri": "https://localhost:8080/",
+ "height": 179
+ },
+ "outputId": "253c9af1-003e-46bd-8bf5-db968cf6a8cf"
+ },
+ "cell_type": "code",
+ "source": [
+ "# Assumes you followed the pdb instructions above!\n",
+ "transformer.fast_decode.previous_encoding"
+ ],
+ "execution_count": 8,
+ "outputs": [
+ {
+ "output_type": "execute_result",
+ "data": {
+ "text/plain": [
+ "<tf.Tensor: id=9528, shape=(1, 4, 512), dtype=float32, numpy=\n",
+ "array([[[-0.15239455, 0.12273102, -0.11209048, ..., -0.12478986,\n",
+ " 0.37216735, -0.40987235],\n",
+ " [-0.2686283 , 0.51448774, 0.03650613, ..., 0.08731575,\n",
+ " 0.51110077, -0.6646815 ],\n",
+ " [-0.24441548, 0.36622533, 0.11685672, ..., 0.21941349,\n",
+ " -0.03304008, -0.579611 ],\n",
+ " [-0.03339856, -0.01185844, 0.00579634, ..., 0.00294734,\n",
+ " 0.00136655, -0.01362935]]], dtype=float32)>"
+ ]
+ },
+ "metadata": {
+ "tags": []
+ },
+ "execution_count": 8
+ }
+ ]
+ },
+ {
+ "metadata": {
+ "id": "bC9JjeDcHEav",
+ "colab_type": "text"
+ },
+ "cell_type": "markdown",
+ "source": [
+ "Let's replace part of the encoding for \"Eager execution\" with the encoding of \"Immediate running\".\n",
+ "\n",
+ "Again we'll drop into a pdb shell. This time we'll run some TensorFlow operations to patch the encodings while the model is running.\n",
+ "\n",
+ "```\n",
+ "(running the next cell again drops you into a pdb shell)\n",
+ "step\n",
+ "encoder_output = tf.concat([fast_decode.previous_encoding[:, :3], encoder_output[:, 3:]], axis=1)\n",
+ "continue\n",
+ "```"
+ ]
+ },
+ {
+ "metadata": {
+ "id": "t2as_Kn1h65G",
+ "colab_type": "code",
+ "colab": {
+ "base_uri": "https://localhost:8080/",
+ "height": 179
+ },
+ "outputId": "5b4e546e-3bb4-4761-c545-467b631e3ffe"
+ },
+ "cell_type": "code",
+ "source": [
+ "model_output = translate_model.infer(encode(\"Eager execution\"))\n",
+ "print(decode(model_output[\"outputs\"]))"
+ ],
+ "execution_count": 9,
+ "outputs": [
+ {
+ "output_type": "stream",
+ "text": [
+ "> <ipython-input-6-ee9b4225ba2a>(4)debug_fn()\n",
+ "-> return previous_fast_decode(*args, **kwargs) # \"step\" in pdb to step in\n",
+ "(Pdb) step\n",
+ "--Call--\n",
+ "> /usr/local/lib/python2.7/dist-packages/tensor2tensor/models/transformer.py(427)fast_decode()\n",
+ "-> def fast_decode(encoder_output,\n",
+ "(Pdb) encoder_output = tf.concat([fast_decode.previous_encoding[:, :3], encoder_output[:, 3:]], axis=1)\n",
+ "(Pdb) continue\n",
+ "sofortige Ausführung\n"
+ ],
+ "name": "stdout"
+ }
+ ]
+ },
+ {
+ "metadata": {
+ "id": "rK6tYZ23I2cm",
+ "colab_type": "text"
+ },
+ "cell_type": "markdown",
+ "source": [
+ "We get a different decoding, with the correct sense of \"execution\". Likely we're keeping just the encoding of \"tion\" from \"Eager execution\", so no great breakthrough in translation modeling.\n",
+ "\n",
+ "Similarly it's possible to modify attention vectors, or change words during decoding to help debug a beam search."
+ ]
+ },
+ {
+ "metadata": {
+ "id": "Nb-4ipYNRWxA",
+ "colab_type": "text"
+ },
+ "cell_type": "markdown",
+ "source": [
+ "This colab was adapted from the [Tensor2Tensor colab](https://colab.research.google.com/github/tensorflow/tensor2tensor/blob/master/tensor2tensor/notebooks/hello_t2t.ipynb). Credit to Ankur Taly for its concept."
+ ]
+ }
+ ]
+} \ No newline at end of file
diff --git a/tensorflow/contrib/estimator/BUILD b/tensorflow/contrib/estimator/BUILD
index 30d297a5fb..11d40f5982 100644
--- a/tensorflow/contrib/estimator/BUILD
+++ b/tensorflow/contrib/estimator/BUILD
@@ -18,6 +18,7 @@ py_library(
":boosted_trees",
":dnn",
":dnn_linear_combined",
+ ":early_stopping",
":export",
":extenders",
":head",
@@ -590,3 +591,31 @@ py_test(
"@six_archive//:six",
],
)
+
+py_library(
+ name = "early_stopping",
+ srcs = ["python/estimator/early_stopping.py"],
+ srcs_version = "PY2AND3",
+ deps = [
+ "//tensorflow/python:dtypes",
+ "//tensorflow/python:framework_ops",
+ "//tensorflow/python:init_ops",
+ "//tensorflow/python:platform",
+ "//tensorflow/python:state_ops",
+ "//tensorflow/python:summary",
+ "//tensorflow/python:training",
+ "//tensorflow/python/estimator",
+ ],
+)
+
+py_test(
+ name = "early_stopping_test",
+ srcs = ["python/estimator/early_stopping_test.py"],
+ srcs_version = "PY2AND3",
+ deps = [
+ ":early_stopping",
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python/estimator",
+ "@absl_py//absl/testing:parameterized",
+ ],
+)
diff --git a/tensorflow/contrib/estimator/__init__.py b/tensorflow/contrib/estimator/__init__.py
index 788ac5ca70..09fcfd66a1 100644
--- a/tensorflow/contrib/estimator/__init__.py
+++ b/tensorflow/contrib/estimator/__init__.py
@@ -23,6 +23,7 @@ from tensorflow.contrib.estimator.python.estimator.baseline import *
from tensorflow.contrib.estimator.python.estimator.boosted_trees import *
from tensorflow.contrib.estimator.python.estimator.dnn import *
from tensorflow.contrib.estimator.python.estimator.dnn_linear_combined import *
+from tensorflow.contrib.estimator.python.estimator.early_stopping import *
from tensorflow.contrib.estimator.python.estimator.export import *
from tensorflow.contrib.estimator.python.estimator.extenders import *
from tensorflow.contrib.estimator.python.estimator.head import *
@@ -63,6 +64,12 @@ _allowed_symbols = [
'RNNEstimator',
'export_saved_model_for_mode',
'export_all_saved_models',
+ 'make_early_stopping_hook',
+ 'read_eval_metrics',
+ 'stop_if_lower_hook',
+ 'stop_if_higher_hook',
+ 'stop_if_no_increase_hook',
+ 'stop_if_no_decrease_hook',
]
remove_undocumented(__name__, allowed_exception_list=_allowed_symbols)
diff --git a/tensorflow/contrib/estimator/python/estimator/baseline_test.py b/tensorflow/contrib/estimator/python/estimator/baseline_test.py
index d0e3e670f7..505c94e971 100644
--- a/tensorflow/contrib/estimator/python/estimator/baseline_test.py
+++ b/tensorflow/contrib/estimator/python/estimator/baseline_test.py
@@ -113,6 +113,8 @@ class BaselineEstimatorEvaluationTest(test.TestCase):
self.assertDictEqual({
metric_keys.MetricKeys.LOSS: 18.,
metric_keys.MetricKeys.LOSS_MEAN: 9.,
+ metric_keys.MetricKeys.PREDICTION_MEAN: 13.,
+ metric_keys.MetricKeys.LABEL_MEAN: 10.,
ops.GraphKeys.GLOBAL_STEP: 100
}, eval_metrics)
@@ -141,6 +143,8 @@ class BaselineEstimatorEvaluationTest(test.TestCase):
self.assertDictEqual({
metric_keys.MetricKeys.LOSS: 27.,
metric_keys.MetricKeys.LOSS_MEAN: 9.,
+ metric_keys.MetricKeys.PREDICTION_MEAN: 13.,
+ metric_keys.MetricKeys.LABEL_MEAN: 10.,
ops.GraphKeys.GLOBAL_STEP: 100
}, eval_metrics)
@@ -166,7 +170,9 @@ class BaselineEstimatorEvaluationTest(test.TestCase):
self.assertItemsEqual(
(metric_keys.MetricKeys.LOSS, metric_keys.MetricKeys.LOSS_MEAN,
- ops.GraphKeys.GLOBAL_STEP), eval_metrics.keys())
+ metric_keys.MetricKeys.PREDICTION_MEAN,
+ metric_keys.MetricKeys.LABEL_MEAN, ops.GraphKeys.GLOBAL_STEP),
+ eval_metrics.keys())
# Logit is bias which is [46, 58]
self.assertAlmostEqual(0, eval_metrics[metric_keys.MetricKeys.LOSS])
diff --git a/tensorflow/contrib/estimator/python/estimator/boosted_trees.py b/tensorflow/contrib/estimator/python/estimator/boosted_trees.py
index bd641014e9..43bfcffd79 100644
--- a/tensorflow/contrib/estimator/python/estimator/boosted_trees.py
+++ b/tensorflow/contrib/estimator/python/estimator/boosted_trees.py
@@ -49,7 +49,8 @@ class _BoostedTreesEstimator(estimator.Estimator):
l2_regularization=0.,
tree_complexity=0.,
min_node_weight=0.,
- config=None):
+ config=None,
+ center_bias=False):
"""Initializes a `BoostedTreesEstimator` instance.
Args:
@@ -82,17 +83,30 @@ class _BoostedTreesEstimator(estimator.Estimator):
considered. The value will be compared with sum(leaf_hessian)/
(batch_size * n_batches_per_layer).
config: `RunConfig` object to configure the runtime settings.
+ center_bias: Whether bias centering needs to occur. Bias centering refers
+ to the first node in the very first tree returning the prediction that
+ is aligned with the original labels distribution. For example, for
+ regression problems, the first node will return the mean of the labels.
+ For binary classification problems, it will return a logit for a prior
+ probability of label 1.
+
"""
# pylint:disable=protected-access
# HParams for the model.
tree_hparams = canned_boosted_trees._TreeHParams(
n_trees, max_depth, learning_rate, l1_regularization, l2_regularization,
- tree_complexity, min_node_weight)
+ tree_complexity, min_node_weight, center_bias)
def _model_fn(features, labels, mode, config):
return canned_boosted_trees._bt_model_fn(
- features, labels, mode, head, feature_columns, tree_hparams,
- n_batches_per_layer, config)
+ features,
+ labels,
+ mode,
+ head,
+ feature_columns,
+ tree_hparams,
+ n_batches_per_layer,
+ config=config)
super(_BoostedTreesEstimator, self).__init__(
model_fn=_model_fn, model_dir=model_dir, config=config)
@@ -114,7 +128,8 @@ def boosted_trees_classifier_train_in_memory(
tree_complexity=0.,
min_node_weight=0.,
config=None,
- train_hooks=None):
+ train_hooks=None,
+ center_bias=False):
"""Trains a boosted tree classifier with in memory dataset.
Example:
@@ -186,7 +201,13 @@ def boosted_trees_classifier_train_in_memory(
considered. The value will be compared with sum(leaf_hessian)/
(batch_size * n_batches_per_layer).
config: `RunConfig` object to configure the runtime settings.
- train_hooks: a list of Hook instances to be passed to estimator.train().
+ train_hooks: a list of Hook instances to be passed to estimator.train()
+ center_bias: Whether bias centering needs to occur. Bias centering refers
+ to the first node in the very first tree returning the prediction that
+ is aligned with the original labels distribution. For example, for
+ regression problems, the first node will return the mean of the labels.
+ For binary classification problems, it will return a logit for a prior
+ probability of label 1.
Returns:
a `BoostedTreesClassifier` instance created with the given arguments and
@@ -207,7 +228,7 @@ def boosted_trees_classifier_train_in_memory(
# HParams for the model.
tree_hparams = canned_boosted_trees._TreeHParams(
n_trees, max_depth, learning_rate, l1_regularization, l2_regularization,
- tree_complexity, min_node_weight)
+ tree_complexity, min_node_weight, center_bias)
def _model_fn(features, labels, mode, config):
return canned_boosted_trees._bt_model_fn(
@@ -247,7 +268,8 @@ def boosted_trees_regressor_train_in_memory(
tree_complexity=0.,
min_node_weight=0.,
config=None,
- train_hooks=None):
+ train_hooks=None,
+ center_bias=False):
"""Trains a boosted tree regressor with in memory dataset.
Example:
@@ -313,6 +335,12 @@ def boosted_trees_regressor_train_in_memory(
(batch_size * n_batches_per_layer).
config: `RunConfig` object to configure the runtime settings.
train_hooks: a list of Hook instances to be passed to estimator.train().
+ center_bias: Whether bias centering needs to occur. Bias centering refers
+ to the first node in the very first tree returning the prediction that
+ is aligned with the original labels distribution. For example, for
+ regression problems, the first node will return the mean of the labels.
+ For binary classification problems, it will return a logit for a prior
+ probability of label 1.
Returns:
a `BoostedTreesClassifier` instance created with the given arguments and
@@ -332,7 +360,7 @@ def boosted_trees_regressor_train_in_memory(
# HParams for the model.
tree_hparams = canned_boosted_trees._TreeHParams(
n_trees, max_depth, learning_rate, l1_regularization, l2_regularization,
- tree_complexity, min_node_weight)
+ tree_complexity, min_node_weight, center_bias)
def _model_fn(features, labels, mode, config):
return canned_boosted_trees._bt_model_fn(
diff --git a/tensorflow/contrib/estimator/python/estimator/boosted_trees_test.py b/tensorflow/contrib/estimator/python/estimator/boosted_trees_test.py
index 76cbefe5e9..999c2aa5e2 100644
--- a/tensorflow/contrib/estimator/python/estimator/boosted_trees_test.py
+++ b/tensorflow/contrib/estimator/python/estimator/boosted_trees_test.py
@@ -115,6 +115,27 @@ class BoostedTreesEstimatorTest(test_util.TensorFlowTestCase):
eval_res = est.evaluate(input_fn=input_fn, steps=1)
self.assertAllClose(eval_res['average_loss'], 1.008551)
+ def testTrainAndEvaluateEstimatorWithCenterBias(self):
+ input_fn = _make_train_input_fn(is_classification=False)
+
+ est = boosted_trees._BoostedTreesEstimator(
+ feature_columns=self._feature_columns,
+ n_batches_per_layer=1,
+ n_trees=2,
+ head=self._head,
+ max_depth=5,
+ center_bias=True)
+
+ # It will stop after 11 steps because of the max depth and num trees.
+ num_steps = 100
+ # Train for a few steps, and validate final checkpoint.
+ est.train(input_fn, steps=num_steps)
+ # 10 steps for training and 2 step for bias centering.
+ self._assert_checkpoint(
+ est.model_dir, global_step=12, finalized_trees=2, attempted_layers=10)
+ eval_res = est.evaluate(input_fn=input_fn, steps=1)
+ self.assertAllClose(eval_res['average_loss'], 0.614642)
+
def testInferEstimator(self):
train_input_fn = _make_train_input_fn(is_classification=False)
predict_input_fn = numpy_io.numpy_input_fn(
@@ -139,6 +160,33 @@ class BoostedTreesEstimatorTest(test_util.TensorFlowTestCase):
[[0.571619], [0.262821], [0.124549], [0.956801], [1.769801]],
[pred['predictions'] for pred in predictions])
+ def testInferEstimatorWithCenterBias(self):
+ train_input_fn = _make_train_input_fn(is_classification=False)
+ predict_input_fn = numpy_io.numpy_input_fn(
+ x=FEATURES_DICT, y=None, batch_size=1, num_epochs=1, shuffle=False)
+
+ est = boosted_trees._BoostedTreesEstimator(
+ feature_columns=self._feature_columns,
+ n_batches_per_layer=1,
+ n_trees=1,
+ max_depth=5,
+ center_bias=True,
+ head=self._head)
+
+ # It will stop after 6 steps because of the max depth and num trees (5 for
+ # training and 2 for bias centering).
+ num_steps = 100
+ # Train for a few steps, and validate final checkpoint.
+ est.train(train_input_fn, steps=num_steps)
+ self._assert_checkpoint(
+ est.model_dir, global_step=7, finalized_trees=1, attempted_layers=5)
+ # Validate predictions.
+ predictions = list(est.predict(input_fn=predict_input_fn))
+
+ self.assertAllClose(
+ [[1.634501], [1.325703], [1.187431], [2.019683], [2.832683]],
+ [pred['predictions'] for pred in predictions])
+
def testBinaryClassifierTrainInMemoryAndEvalAndInfer(self):
train_input_fn = _make_train_input_fn(is_classification=True)
predict_input_fn = numpy_io.numpy_input_fn(
@@ -159,14 +207,40 @@ class BoostedTreesEstimatorTest(test_util.TensorFlowTestCase):
self.assertAllClose([[0], [1], [1], [0], [0]],
[pred['class_ids'] for pred in predictions])
+ def testBinaryClassifierTrainInMemoryAndEvalAndInferWithCenterBias(self):
+ train_input_fn = _make_train_input_fn(is_classification=True)
+ predict_input_fn = numpy_io.numpy_input_fn(
+ x=FEATURES_DICT, y=None, batch_size=1, num_epochs=1, shuffle=False)
+
+ est = boosted_trees.boosted_trees_classifier_train_in_memory(
+ train_input_fn=train_input_fn,
+ feature_columns=self._feature_columns,
+ n_trees=1,
+ max_depth=5,
+ center_bias=True)
+ # It will stop after 5 steps + 3 for bias, because of the max depth and num
+ # trees.
+ self._assert_checkpoint(
+ est.model_dir, global_step=8, finalized_trees=1, attempted_layers=5)
+
+ # Check evaluate and predict.
+ eval_res = est.evaluate(input_fn=train_input_fn, steps=1)
+ self.assertAllClose(eval_res['accuracy'], 1.0)
+ # Validate predictions.
+ predictions = list(est.predict(input_fn=predict_input_fn))
+ self.assertAllClose([[0], [1], [1], [0], [0]],
+ [pred['class_ids'] for pred in predictions])
+
def testBinaryClassifierTrainInMemoryWithDataset(self):
train_input_fn = _make_train_input_fn_dataset(is_classification=True)
predict_input_fn = numpy_io.numpy_input_fn(
x=FEATURES_DICT, y=None, batch_size=1, num_epochs=1, shuffle=False)
est = boosted_trees.boosted_trees_classifier_train_in_memory(
- train_input_fn=train_input_fn, feature_columns=self._feature_columns,
- n_trees=1, max_depth=5)
+ train_input_fn=train_input_fn,
+ feature_columns=self._feature_columns,
+ n_trees=1,
+ max_depth=5)
# It will stop after 5 steps because of the max depth and num trees.
self._assert_checkpoint(
est.model_dir, global_step=5, finalized_trees=1, attempted_layers=5)
diff --git a/tensorflow/contrib/estimator/python/estimator/dnn.py b/tensorflow/contrib/estimator/python/estimator/dnn.py
index f1c60a912c..9efa8f474d 100644
--- a/tensorflow/contrib/estimator/python/estimator/dnn.py
+++ b/tensorflow/contrib/estimator/python/estimator/dnn.py
@@ -53,6 +53,18 @@ class DNNEstimator(estimator.Estimator):
l1_regularization_strength=0.001
))
+ # Or estimator using an optimizer with a learning rate decay.
+ estimator = DNNEstimator(
+ head=tf.contrib.estimator.multi_label_head(n_classes=3),
+ feature_columns=[sparse_feature_a_emb, sparse_feature_b_emb],
+ hidden_units=[1024, 512, 256],
+ optimizer=lambda: tf.AdamOptimizer(
+ learning_rate=tf.exponential_decay(
+ learning_rate=0.1,
+ global_step=tf.get_global_step(),
+ decay_steps=10000,
+ decay_rate=0.96))
+
# Or estimator with warm-starting from a previous checkpoint.
estimator = DNNEstimator(
head=tf.contrib.estimator.multi_label_head(n_classes=3),
@@ -100,7 +112,8 @@ class DNNEstimator(estimator.Estimator):
dropout=None,
input_layer_partitioner=None,
config=None,
- warm_start_from=None):
+ warm_start_from=None,
+ batch_norm=False):
"""Initializes a `DNNEstimator` instance.
Args:
@@ -115,8 +128,9 @@ class DNNEstimator(estimator.Estimator):
model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into a estimator to
continue training a previously saved model.
- optimizer: An instance of `tf.Optimizer` used to train the model. Defaults
- to Adagrad optimizer.
+ optimizer: An instance of `tf.Optimizer` used to train the model. Can also
+ be a string (one of 'Adagrad', 'Adam', 'Ftrl', 'RMSProp', 'SGD'), or
+ callable. Defaults to Adagrad optimizer.
activation_fn: Activation function applied to each layer. If `None`, will
use `tf.nn.relu`.
dropout: When not `None`, the probability we will drop out a given
@@ -129,6 +143,7 @@ class DNNEstimator(estimator.Estimator):
string filepath is provided instead of a `WarmStartSettings`, then all
weights are warm-started, and it is assumed that vocabularies and Tensor
names are unchanged.
+ batch_norm: Whether to use batch normalization after each hidden layer.
"""
def _model_fn(features, labels, mode, config):
return dnn_lib._dnn_model_fn( # pylint: disable=protected-access
@@ -142,7 +157,8 @@ class DNNEstimator(estimator.Estimator):
activation_fn=activation_fn,
dropout=dropout,
input_layer_partitioner=input_layer_partitioner,
- config=config)
+ config=config,
+ batch_norm=batch_norm)
super(DNNEstimator, self).__init__(
model_fn=_model_fn, model_dir=model_dir, config=config,
warm_start_from=warm_start_from)
diff --git a/tensorflow/contrib/estimator/python/estimator/dnn_linear_combined.py b/tensorflow/contrib/estimator/python/estimator/dnn_linear_combined.py
index ccaf1128bf..2eef60c39f 100644
--- a/tensorflow/contrib/estimator/python/estimator/dnn_linear_combined.py
+++ b/tensorflow/contrib/estimator/python/estimator/dnn_linear_combined.py
@@ -53,12 +53,19 @@ class DNNLinearCombinedEstimator(estimator.Estimator):
dnn_hidden_units=[1000, 500, 100],
dnn_optimizer=tf.train.ProximalAdagradOptimizer(...))
- # To apply L1 and L2 regularization, you can set optimizers as follows:
+ # To apply L1 and L2 regularization, you can set dnn_optimizer to:
tf.train.ProximalAdagradOptimizer(
learning_rate=0.1,
l1_regularization_strength=0.001,
l2_regularization_strength=0.001)
- # It is same for FtrlOptimizer.
+ # To apply learning rate decay, you can set dnn_optimizer to a callable:
+ lambda: tf.AdamOptimizer(
+ learning_rate=tf.exponential_decay(
+ learning_rate=0.1,
+ global_step=tf.get_global_step(),
+ decay_steps=10000,
+ decay_rate=0.96)
+ # It is the same for linear_optimizer.
# Input builders
def input_fn_train: # returns x, y
@@ -103,7 +110,8 @@ class DNNLinearCombinedEstimator(estimator.Estimator):
dnn_activation_fn=nn.relu,
dnn_dropout=None,
input_layer_partitioner=None,
- config=None):
+ config=None,
+ linear_sparse_combiner='sum'):
"""Initializes a DNNLinearCombinedEstimator instance.
Args:
@@ -116,12 +124,16 @@ class DNNLinearCombinedEstimator(estimator.Estimator):
used by linear part of the model. All items in the set must be
instances of classes derived from `FeatureColumn`.
linear_optimizer: An instance of `tf.Optimizer` used to apply gradients to
- the linear part of the model. Defaults to FTRL optimizer.
+ the linear part of the model. Can also be a string (one of 'Adagrad',
+ 'Adam', 'Ftrl', 'RMSProp', 'SGD'), or callable. Defaults to FTRL
+ optimizer.
dnn_feature_columns: An iterable containing all the feature columns used
by deep part of the model. All items in the set must be instances of
classes derived from `FeatureColumn`.
dnn_optimizer: An instance of `tf.Optimizer` used to apply gradients to
- the deep part of the model. Defaults to Adagrad optimizer.
+ the deep part of the model. Can also be a string (one of 'Adagrad',
+ 'Adam', 'Ftrl', 'RMSProp', 'SGD'), or callable. Defaults to Adagrad
+ optimizer.
dnn_hidden_units: List of hidden units per layer. All layers are fully
connected.
dnn_activation_fn: Activation function applied to each layer. If None,
@@ -131,6 +143,11 @@ class DNNLinearCombinedEstimator(estimator.Estimator):
input_layer_partitioner: Partitioner for input layer. Defaults to
`min_max_variable_partitioner` with `min_slice_size` 64 << 20.
config: RunConfig object to configure the runtime settings.
+ linear_sparse_combiner: A string specifying how to reduce the linear model
+ if a categorical column is multivalent. One of "mean", "sqrtn", and
+ "sum" -- these are effectively different ways to do example-level
+ normalization, which can be useful for bag-of-words features. For more
+ details, see @{tf.feature_column.linear_model$linear_model}.
Raises:
ValueError: If both linear_feature_columns and dnn_features_columns are
@@ -158,7 +175,8 @@ class DNNLinearCombinedEstimator(estimator.Estimator):
dnn_activation_fn=dnn_activation_fn,
dnn_dropout=dnn_dropout,
input_layer_partitioner=input_layer_partitioner,
- config=config)
+ config=config,
+ linear_sparse_combiner=linear_sparse_combiner)
super(DNNLinearCombinedEstimator, self).__init__(
model_fn=_model_fn, model_dir=model_dir, config=config)
diff --git a/tensorflow/contrib/estimator/python/estimator/dnn_linear_combined_test.py b/tensorflow/contrib/estimator/python/estimator/dnn_linear_combined_test.py
index dd009a6753..51b9ce7005 100644
--- a/tensorflow/contrib/estimator/python/estimator/dnn_linear_combined_test.py
+++ b/tensorflow/contrib/estimator/python/estimator/dnn_linear_combined_test.py
@@ -100,7 +100,8 @@ def _linear_only_estimator_fn(
weight_column=None,
optimizer='Ftrl',
config=None,
- partitioner=None):
+ partitioner=None,
+ sparse_combiner='sum'):
return dnn_linear_combined.DNNLinearCombinedEstimator(
head=head_lib.regression_head(
weight_column=weight_column, label_dimension=label_dimension,
@@ -110,7 +111,8 @@ def _linear_only_estimator_fn(
linear_feature_columns=feature_columns,
linear_optimizer=optimizer,
input_layer_partitioner=partitioner,
- config=config)
+ config=config,
+ linear_sparse_combiner=sparse_combiner)
class LinearOnlyEstimatorEvaluateTest(
diff --git a/tensorflow/contrib/estimator/python/estimator/early_stopping.py b/tensorflow/contrib/estimator/python/estimator/early_stopping.py
new file mode 100644
index 0000000000..af4855e91e
--- /dev/null
+++ b/tensorflow/contrib/estimator/python/estimator/early_stopping.py
@@ -0,0 +1,468 @@
+# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""Utilities for early stopping."""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import operator
+import os
+
+from tensorflow.python.estimator import estimator as estimator_lib
+from tensorflow.python.framework import dtypes
+from tensorflow.python.framework import ops
+from tensorflow.python.ops import init_ops
+from tensorflow.python.ops import state_ops
+from tensorflow.python.ops import variable_scope
+from tensorflow.python.platform import gfile
+from tensorflow.python.platform import tf_logging
+from tensorflow.python.summary import summary_iterator
+from tensorflow.python.training import basic_session_run_hooks
+from tensorflow.python.training import session_run_hook
+from tensorflow.python.training import training_util
+
+_EVENT_FILE_GLOB_PATTERN = 'events.out.tfevents.*'
+
+
+def make_early_stopping_hook(estimator,
+ should_stop_fn,
+ run_every_secs=60,
+ run_every_steps=None):
+ """Creates early-stopping hook.
+
+ Returns a `SessionRunHook` that stops training when `should_stop_fn` returns
+ `True`.
+
+ Usage example:
+
+ ```python
+ estimator = ...
+ hook = early_stopping.make_early_stopping_hook(
+ estimator, should_stop_fn=make_stop_fn(...))
+ train_spec = tf.estimator.TrainSpec(..., hooks=[hook])
+ tf.estimator.train_and_evaluate(estimator, train_spec, ...)
+ ```
+
+ Args:
+ estimator: A `tf.estimator.Estimator` instance.
+ should_stop_fn: `callable`, function that takes no arguments and returns a
+ `bool`. If the function returns `True`, stopping will be initiated by the
+ chief.
+ run_every_secs: If specified, calls `should_stop_fn` at an interval of
+ `run_every_secs` seconds. Defaults to 60 seconds. Either this or
+ `run_every_steps` must be set.
+ run_every_steps: If specified, calls `should_stop_fn` every
+ `run_every_steps` steps. Either this or `run_every_secs` must be set.
+
+ Returns:
+ A `SessionRunHook` that periodically executes `should_stop_fn` and initiates
+ early stopping if the function returns `True`.
+
+ Raises:
+ TypeError: If `estimator` is not of type `tf.estimator.Estimator`.
+ ValueError: If both `run_every_secs` and `run_every_steps` are set.
+ """
+ if not isinstance(estimator, estimator_lib.Estimator):
+ raise TypeError('`estimator` must have type `tf.estimator.Estimator`. '
+ 'Got: {}'.format(type(estimator)))
+
+ if run_every_secs is not None and run_every_steps is not None:
+ raise ValueError('Only one of `run_every_secs` and `run_every_steps` must '
+ 'be set.')
+
+ if estimator.config.is_chief:
+ return _StopOnPredicateHook(should_stop_fn, run_every_secs, run_every_steps)
+ else:
+ return _CheckForStoppingHook()
+
+
+def stop_if_higher_hook(estimator,
+ metric_name,
+ threshold,
+ eval_dir=None,
+ min_steps=0,
+ run_every_secs=60,
+ run_every_steps=None):
+ """Creates hook to stop if the given metric is higher than the threshold.
+
+ Usage example:
+
+ ```python
+ estimator = ...
+ # Hook to stop training if accuracy becomes higher than 0.9.
+ hook = early_stopping.stop_if_higher_hook(estimator, "accuracy", 0.9)
+ train_spec = tf.estimator.TrainSpec(..., hooks=[hook])
+ tf.estimator.train_and_evaluate(estimator, train_spec, ...)
+ ```
+
+ Args:
+ estimator: A `tf.estimator.Estimator` instance.
+ metric_name: `str`, metric to track. "loss", "accuracy", etc.
+ threshold: Numeric threshold for the given metric.
+ eval_dir: If set, directory containing summary files with eval metrics. By
+ default, `estimator.eval_dir()` will be used.
+ min_steps: `int`, stop is never requested if global step is less than this
+ value. Defaults to 0.
+ run_every_secs: If specified, calls `should_stop_fn` at an interval of
+ `run_every_secs` seconds. Defaults to 60 seconds. Either this or
+ `run_every_steps` must be set.
+ run_every_steps: If specified, calls `should_stop_fn` every
+ `run_every_steps` steps. Either this or `run_every_secs` must be set.
+
+ Returns:
+ An early-stopping hook of type `SessionRunHook` that periodically checks
+ if the given metric is higher than specified threshold and initiates
+ early stopping if true.
+ """
+ return _stop_if_threshold_crossed_hook(
+ estimator=estimator,
+ metric_name=metric_name,
+ threshold=threshold,
+ higher_is_better=True,
+ eval_dir=eval_dir,
+ min_steps=min_steps,
+ run_every_secs=run_every_secs,
+ run_every_steps=run_every_steps)
+
+
+def stop_if_lower_hook(estimator,
+ metric_name,
+ threshold,
+ eval_dir=None,
+ min_steps=0,
+ run_every_secs=60,
+ run_every_steps=None):
+ """Creates hook to stop if the given metric is lower than the threshold.
+
+ Usage example:
+
+ ```python
+ estimator = ...
+ # Hook to stop training if loss becomes lower than 100.
+ hook = early_stopping.stop_if_lower_hook(estimator, "loss", 100)
+ train_spec = tf.estimator.TrainSpec(..., hooks=[hook])
+ tf.estimator.train_and_evaluate(estimator, train_spec, ...)
+ ```
+
+ Args:
+ estimator: A `tf.estimator.Estimator` instance.
+ metric_name: `str`, metric to track. "loss", "accuracy", etc.
+ threshold: Numeric threshold for the given metric.
+ eval_dir: If set, directory containing summary files with eval metrics. By
+ default, `estimator.eval_dir()` will be used.
+ min_steps: `int`, stop is never requested if global step is less than this
+ value. Defaults to 0.
+ run_every_secs: If specified, calls `should_stop_fn` at an interval of
+ `run_every_secs` seconds. Defaults to 60 seconds. Either this or
+ `run_every_steps` must be set.
+ run_every_steps: If specified, calls `should_stop_fn` every
+ `run_every_steps` steps. Either this or `run_every_secs` must be set.
+
+ Returns:
+ An early-stopping hook of type `SessionRunHook` that periodically checks
+ if the given metric is lower than specified threshold and initiates
+ early stopping if true.
+ """
+ return _stop_if_threshold_crossed_hook(
+ estimator=estimator,
+ metric_name=metric_name,
+ threshold=threshold,
+ higher_is_better=False,
+ eval_dir=eval_dir,
+ min_steps=min_steps,
+ run_every_secs=run_every_secs,
+ run_every_steps=run_every_steps)
+
+
+def stop_if_no_increase_hook(estimator,
+ metric_name,
+ max_steps_without_increase,
+ eval_dir=None,
+ min_steps=0,
+ run_every_secs=60,
+ run_every_steps=None):
+ """Creates hook to stop if metric does not increase within given max steps.
+
+ Usage example:
+
+ ```python
+ estimator = ...
+ # Hook to stop training if accuracy does not increase in over 100000 steps.
+ hook = early_stopping.stop_if_no_increase_hook(estimator, "accuracy", 100000)
+ train_spec = tf.estimator.TrainSpec(..., hooks=[hook])
+ tf.estimator.train_and_evaluate(estimator, train_spec, ...)
+ ```
+
+ Args:
+ estimator: A `tf.estimator.Estimator` instance.
+ metric_name: `str`, metric to track. "loss", "accuracy", etc.
+ max_steps_without_increase: `int`, maximum number of training steps with no
+ increase in the given metric.
+ eval_dir: If set, directory containing summary files with eval metrics. By
+ default, `estimator.eval_dir()` will be used.
+ min_steps: `int`, stop is never requested if global step is less than this
+ value. Defaults to 0.
+ run_every_secs: If specified, calls `should_stop_fn` at an interval of
+ `run_every_secs` seconds. Defaults to 60 seconds. Either this or
+ `run_every_steps` must be set.
+ run_every_steps: If specified, calls `should_stop_fn` every
+ `run_every_steps` steps. Either this or `run_every_secs` must be set.
+
+ Returns:
+ An early-stopping hook of type `SessionRunHook` that periodically checks
+ if the given metric shows no increase over given maximum number of
+ training steps, and initiates early stopping if true.
+ """
+ return _stop_if_no_metric_improvement_hook(
+ estimator=estimator,
+ metric_name=metric_name,
+ max_steps_without_improvement=max_steps_without_increase,
+ higher_is_better=True,
+ eval_dir=eval_dir,
+ min_steps=min_steps,
+ run_every_secs=run_every_secs,
+ run_every_steps=run_every_steps)
+
+
+def stop_if_no_decrease_hook(estimator,
+ metric_name,
+ max_steps_without_decrease,
+ eval_dir=None,
+ min_steps=0,
+ run_every_secs=60,
+ run_every_steps=None):
+ """Creates hook to stop if metric does not decrease within given max steps.
+
+ Usage example:
+
+ ```python
+ estimator = ...
+ # Hook to stop training if loss does not decrease in over 100000 steps.
+ hook = early_stopping.stop_if_no_decrease_hook(estimator, "loss", 100000)
+ train_spec = tf.estimator.TrainSpec(..., hooks=[hook])
+ tf.estimator.train_and_evaluate(estimator, train_spec, ...)
+ ```
+
+ Args:
+ estimator: A `tf.estimator.Estimator` instance.
+ metric_name: `str`, metric to track. "loss", "accuracy", etc.
+ max_steps_without_decrease: `int`, maximum number of training steps with no
+ decrease in the given metric.
+ eval_dir: If set, directory containing summary files with eval metrics. By
+ default, `estimator.eval_dir()` will be used.
+ min_steps: `int`, stop is never requested if global step is less than this
+ value. Defaults to 0.
+ run_every_secs: If specified, calls `should_stop_fn` at an interval of
+ `run_every_secs` seconds. Defaults to 60 seconds. Either this or
+ `run_every_steps` must be set.
+ run_every_steps: If specified, calls `should_stop_fn` every
+ `run_every_steps` steps. Either this or `run_every_secs` must be set.
+
+ Returns:
+ An early-stopping hook of type `SessionRunHook` that periodically checks
+ if the given metric shows no decrease over given maximum number of
+ training steps, and initiates early stopping if true.
+ """
+ return _stop_if_no_metric_improvement_hook(
+ estimator=estimator,
+ metric_name=metric_name,
+ max_steps_without_improvement=max_steps_without_decrease,
+ higher_is_better=False,
+ eval_dir=eval_dir,
+ min_steps=min_steps,
+ run_every_secs=run_every_secs,
+ run_every_steps=run_every_steps)
+
+
+def read_eval_metrics(eval_dir):
+ """Helper to read eval metrics from eval summary files.
+
+ Args:
+ eval_dir: Directory containing summary files with eval metrics.
+
+ Returns:
+ A `dict` with global steps mapping to `dict` of metric names and values.
+ """
+ eval_metrics_dict = {}
+ for event in _summaries(eval_dir):
+ if not event.HasField('summary'):
+ continue
+ metrics = {}
+ for value in event.summary.value:
+ if value.HasField('simple_value'):
+ metrics[value.tag] = value.simple_value
+ if metrics:
+ eval_metrics_dict[event.step] = metrics
+ return eval_metrics_dict
+
+
+def _stop_if_threshold_crossed_hook(estimator, metric_name, threshold,
+ higher_is_better, eval_dir, min_steps,
+ run_every_secs, run_every_steps):
+ """Creates early-stopping hook to stop training if threshold is crossed."""
+
+ if eval_dir is None:
+ eval_dir = estimator.eval_dir()
+
+ is_lhs_better = operator.gt if higher_is_better else operator.lt
+ greater_or_lesser = 'greater than' if higher_is_better else 'less than'
+
+ def stop_if_threshold_crossed_fn():
+ """Returns `True` if the given metric crosses specified threshold."""
+
+ eval_results = read_eval_metrics(eval_dir)
+
+ for step, metrics in eval_results.items():
+ if step < min_steps:
+ continue
+ val = metrics[metric_name]
+ if is_lhs_better(val, threshold):
+ tf_logging.info(
+ 'At step %s, metric "%s" has value %s which is %s the configured '
+ 'threshold (%s) for early stopping.', step, metric_name, val,
+ greater_or_lesser, threshold)
+ return True
+ return False
+
+ return make_early_stopping_hook(
+ estimator=estimator,
+ should_stop_fn=stop_if_threshold_crossed_fn,
+ run_every_secs=run_every_secs,
+ run_every_steps=run_every_steps)
+
+
+def _stop_if_no_metric_improvement_hook(
+ estimator, metric_name, max_steps_without_improvement, higher_is_better,
+ eval_dir, min_steps, run_every_secs, run_every_steps):
+ """Returns hook to stop training if given metric shows no improvement."""
+
+ if eval_dir is None:
+ eval_dir = estimator.eval_dir()
+
+ is_lhs_better = operator.gt if higher_is_better else operator.lt
+ increase_or_decrease = 'increase' if higher_is_better else 'decrease'
+
+ def stop_if_no_metric_improvement_fn():
+ """Returns `True` if metric does not improve within max steps."""
+
+ eval_results = read_eval_metrics(eval_dir)
+
+ best_val = None
+ best_val_step = None
+ for step, metrics in eval_results.items():
+ if step < min_steps:
+ continue
+ val = metrics[metric_name]
+ if best_val is None or is_lhs_better(val, best_val):
+ best_val = val
+ best_val_step = step
+ if step - best_val_step >= max_steps_without_improvement:
+ tf_logging.info(
+ 'No %s in metric "%s" for %s steps, which is greater than or equal '
+ 'to max steps (%s) configured for early stopping.',
+ increase_or_decrease, metric_name, step - best_val_step,
+ max_steps_without_improvement)
+ return True
+ return False
+
+ return make_early_stopping_hook(
+ estimator=estimator,
+ should_stop_fn=stop_if_no_metric_improvement_fn,
+ run_every_secs=run_every_secs,
+ run_every_steps=run_every_steps)
+
+
+def _summaries(eval_dir):
+ """Yields `tensorflow.Event` protos from event files in the eval dir.
+
+ Args:
+ eval_dir: Directory containing summary files with eval metrics.
+
+ Yields:
+ `tensorflow.Event` object read from the event files.
+ """
+ for event_file in gfile.Glob(
+ os.path.join(eval_dir, _EVENT_FILE_GLOB_PATTERN)):
+ for event in summary_iterator.summary_iterator(event_file):
+ yield event
+
+
+def _get_or_create_stop_var():
+ with variable_scope.variable_scope(
+ name_or_scope='signal_early_stopping',
+ values=[],
+ reuse=variable_scope.AUTO_REUSE):
+ return variable_scope.get_variable(
+ name='STOP',
+ shape=[],
+ dtype=dtypes.bool,
+ initializer=init_ops.constant_initializer(False),
+ collections=[ops.GraphKeys.GLOBAL_VARIABLES],
+ trainable=False)
+
+
+class _StopOnPredicateHook(session_run_hook.SessionRunHook):
+ """Hook that requests stop when `should_stop_fn` returns `True`."""
+
+ def __init__(self, should_stop_fn, run_every_secs=60, run_every_steps=None):
+ if not callable(should_stop_fn):
+ raise TypeError('`should_stop_fn` must be callable.')
+
+ self._should_stop_fn = should_stop_fn
+ self._timer = basic_session_run_hooks.SecondOrStepTimer(
+ every_secs=run_every_secs, every_steps=run_every_steps)
+ self._global_step_tensor = None
+ self._stop_var = None
+ self._stop_op = None
+
+ def begin(self):
+ self._global_step_tensor = training_util.get_global_step()
+ self._stop_var = _get_or_create_stop_var()
+ self._stop_op = state_ops.assign(self._stop_var, True)
+
+ def before_run(self, run_context):
+ del run_context
+ return session_run_hook.SessionRunArgs(self._global_step_tensor)
+
+ def after_run(self, run_context, run_values):
+ global_step = run_values.results
+ if self._timer.should_trigger_for_step(global_step):
+ self._timer.update_last_triggered_step(global_step)
+ if self._should_stop_fn():
+ tf_logging.info('Requesting early stopping at global step %d',
+ global_step)
+ run_context.session.run(self._stop_op)
+ run_context.request_stop()
+
+
+class _CheckForStoppingHook(session_run_hook.SessionRunHook):
+ """Hook that requests stop if stop is requested by `_StopOnPredicateHook`."""
+
+ def __init__(self):
+ self._stop_var = None
+
+ def begin(self):
+ self._stop_var = _get_or_create_stop_var()
+
+ def before_run(self, run_context):
+ del run_context
+ return session_run_hook.SessionRunArgs(self._stop_var)
+
+ def after_run(self, run_context, run_values):
+ should_early_stop = run_values.results
+ if should_early_stop:
+ tf_logging.info('Early stopping requested, suspending run.')
+ run_context.request_stop()
diff --git a/tensorflow/contrib/estimator/python/estimator/early_stopping_test.py b/tensorflow/contrib/estimator/python/estimator/early_stopping_test.py
new file mode 100644
index 0000000000..b5eee818fa
--- /dev/null
+++ b/tensorflow/contrib/estimator/python/estimator/early_stopping_test.py
@@ -0,0 +1,233 @@
+# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""Tests for early_stopping."""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import os
+import tempfile
+
+from absl.testing import parameterized
+from tensorflow.contrib.estimator.python.estimator import early_stopping
+from tensorflow.python.estimator import estimator
+from tensorflow.python.estimator import run_config
+from tensorflow.python.framework import ops
+from tensorflow.python.ops import control_flow_ops
+from tensorflow.python.ops import state_ops
+from tensorflow.python.platform import test
+from tensorflow.python.training import monitored_session
+from tensorflow.python.training import training_util
+
+
+class _FakeRunConfig(run_config.RunConfig):
+
+ def __init__(self, is_chief):
+ super(_FakeRunConfig, self).__init__()
+ self._is_chief = is_chief
+
+ @property
+ def is_chief(self):
+ return self._is_chief
+
+
+def _dummy_model_fn(features, labels, params):
+ _, _, _ = features, labels, params
+
+
+class _FakeEstimator(estimator.Estimator):
+ """Fake estimator for testing."""
+
+ def __init__(self, config):
+ super(_FakeEstimator, self).__init__(
+ model_fn=_dummy_model_fn, config=config)
+
+
+def _write_events(eval_dir, params):
+ """Test helper to write events to summary files."""
+ for steps, loss, accuracy in params:
+ estimator._write_dict_to_summary(eval_dir, {
+ 'loss': loss,
+ 'accuracy': accuracy,
+ }, steps)
+
+
+class ReadEvalMetricsTest(test.TestCase):
+
+ def test_read_eval_metrics(self):
+ eval_dir = tempfile.mkdtemp()
+ _write_events(
+ eval_dir,
+ [
+ # steps, loss, accuracy
+ (1000, 1, 2),
+ (2000, 3, 4),
+ (3000, 5, 6),
+ ])
+ self.assertEqual({
+ 1000: {
+ 'loss': 1,
+ 'accuracy': 2
+ },
+ 2000: {
+ 'loss': 3,
+ 'accuracy': 4
+ },
+ 3000: {
+ 'loss': 5,
+ 'accuracy': 6
+ },
+ }, early_stopping.read_eval_metrics(eval_dir))
+
+
+class EarlyStoppingHooksTest(test.TestCase, parameterized.TestCase):
+
+ def setUp(self):
+ config = _FakeRunConfig(is_chief=True)
+ self._estimator = _FakeEstimator(config=config)
+ eval_dir = self._estimator.eval_dir()
+ os.makedirs(eval_dir)
+ _write_events(
+ eval_dir,
+ [
+ # steps, loss, accuracy
+ (1000, 0.8, 0.5),
+ (2000, 0.7, 0.6),
+ (3000, 0.4, 0.7),
+ (3500, 0.41, 0.68),
+ ])
+
+ def run_session(self, hooks, should_stop):
+ hooks = hooks if isinstance(hooks, list) else [hooks]
+ with ops.Graph().as_default():
+ training_util.create_global_step()
+ no_op = control_flow_ops.no_op()
+ with monitored_session.SingularMonitoredSession(hooks=hooks) as mon_sess:
+ mon_sess.run(no_op)
+ self.assertEqual(mon_sess.should_stop(), should_stop)
+
+ @parameterized.parameters((0.8, 0, False), (0.6, 4000, False), (0.6, 0, True))
+ def test_stop_if_higher_hook(self, threshold, min_steps, should_stop):
+ self.run_session(
+ early_stopping.stop_if_higher_hook(
+ self._estimator,
+ metric_name='accuracy',
+ threshold=threshold,
+ min_steps=min_steps), should_stop)
+
+ @parameterized.parameters((0.3, 0, False), (0.5, 4000, False), (0.5, 0, True))
+ def test_stop_if_lower_hook(self, threshold, min_steps, should_stop):
+ self.run_session(
+ early_stopping.stop_if_lower_hook(
+ self._estimator,
+ metric_name='loss',
+ threshold=threshold,
+ min_steps=min_steps), should_stop)
+
+ @parameterized.parameters((1500, 0, False), (500, 4000, False),
+ (500, 0, True))
+ def test_stop_if_no_increase_hook(self, max_steps, min_steps, should_stop):
+ self.run_session(
+ early_stopping.stop_if_no_increase_hook(
+ self._estimator,
+ metric_name='accuracy',
+ max_steps_without_increase=max_steps,
+ min_steps=min_steps), should_stop)
+
+ @parameterized.parameters((1500, 0, False), (500, 4000, False),
+ (500, 0, True))
+ def test_stop_if_no_decrease_hook(self, max_steps, min_steps, should_stop):
+ self.run_session(
+ early_stopping.stop_if_no_decrease_hook(
+ self._estimator,
+ metric_name='loss',
+ max_steps_without_decrease=max_steps,
+ min_steps=min_steps), should_stop)
+
+ @parameterized.parameters((1500, 0.3, False), (1500, 0.5, True),
+ (500, 0.3, True))
+ def test_multiple_hooks(self, max_steps, loss_threshold, should_stop):
+ self.run_session([
+ early_stopping.stop_if_no_decrease_hook(
+ self._estimator,
+ metric_name='loss',
+ max_steps_without_decrease=max_steps),
+ early_stopping.stop_if_lower_hook(
+ self._estimator, metric_name='loss', threshold=loss_threshold)
+ ], should_stop)
+
+ @parameterized.parameters(False, True)
+ def test_make_early_stopping_hook(self, should_stop):
+ self.run_session([
+ early_stopping.make_early_stopping_hook(
+ self._estimator, should_stop_fn=lambda: should_stop)
+ ], should_stop)
+
+ def test_make_early_stopping_hook_typeerror(self):
+ with self.assertRaises(TypeError):
+ early_stopping.make_early_stopping_hook(
+ estimator=object(), should_stop_fn=lambda: True)
+
+ def test_make_early_stopping_hook_valueerror(self):
+ with self.assertRaises(ValueError):
+ early_stopping.make_early_stopping_hook(
+ self._estimator,
+ should_stop_fn=lambda: True,
+ run_every_secs=60,
+ run_every_steps=100)
+
+
+class StopOnPredicateHookTest(test.TestCase):
+
+ def test_stop(self):
+ hook = early_stopping._StopOnPredicateHook(
+ should_stop_fn=lambda: False, run_every_secs=0)
+ with ops.Graph().as_default():
+ training_util.create_global_step()
+ no_op = control_flow_ops.no_op()
+ with monitored_session.SingularMonitoredSession(hooks=[hook]) as mon_sess:
+ mon_sess.run(no_op)
+ self.assertFalse(mon_sess.should_stop())
+ self.assertFalse(mon_sess.raw_session().run(hook._stop_var))
+
+ hook = early_stopping._StopOnPredicateHook(
+ should_stop_fn=lambda: True, run_every_secs=0)
+ with ops.Graph().as_default():
+ training_util.create_global_step()
+ no_op = control_flow_ops.no_op()
+ with monitored_session.SingularMonitoredSession(hooks=[hook]) as mon_sess:
+ mon_sess.run(no_op)
+ self.assertTrue(mon_sess.should_stop())
+ self.assertTrue(mon_sess.raw_session().run(hook._stop_var))
+
+
+class CheckForStoppingHookTest(test.TestCase):
+
+ def test_stop(self):
+ hook = early_stopping._CheckForStoppingHook()
+ with ops.Graph().as_default():
+ no_op = control_flow_ops.no_op()
+ assign_op = state_ops.assign(early_stopping._get_or_create_stop_var(),
+ True)
+ with monitored_session.SingularMonitoredSession(hooks=[hook]) as mon_sess:
+ mon_sess.run(no_op)
+ self.assertFalse(mon_sess.should_stop())
+ mon_sess.run(assign_op)
+ self.assertTrue(mon_sess.should_stop())
+
+
+if __name__ == '__main__':
+ test.main()
diff --git a/tensorflow/contrib/estimator/python/estimator/head.py b/tensorflow/contrib/estimator/python/estimator/head.py
index 9594e5132f..c9d86ef4ab 100644
--- a/tensorflow/contrib/estimator/python/estimator/head.py
+++ b/tensorflow/contrib/estimator/python/estimator/head.py
@@ -534,7 +534,8 @@ def multi_label_head(n_classes,
* An integer `SparseTensor` of class indices. The `dense_shape` must be
`[D0, D1, ... DN, ?]` and the values within `[0, n_classes)`.
* If `label_vocabulary` is given, a string `SparseTensor`. The `dense_shape`
- must be `[D0, D1, ... DN, ?]` and the values within `label_vocabulary`.
+ must be `[D0, D1, ... DN, ?]` and the values within `label_vocabulary` or a
+ multi-hot tensor of shape `[D0, D1, ... DN, n_classes]`.
If `weight_column` is specified, weights must be of shape
`[D0, D1, ... DN]`, or `[D0, D1, ... DN, 1]`.
diff --git a/tensorflow/contrib/estimator/python/estimator/head_test.py b/tensorflow/contrib/estimator/python/estimator/head_test.py
index b2b57fa06b..7b884402d4 100644
--- a/tensorflow/contrib/estimator/python/estimator/head_test.py
+++ b/tensorflow/contrib/estimator/python/estimator/head_test.py
@@ -568,6 +568,33 @@ class MultiLabelHead(test.TestCase):
expected_loss=expected_loss,
expected_metrics=expected_metrics)
+ def test_eval_with_label_vocabulary_with_multi_hot_input(self):
+ n_classes = 2
+ head = head_lib.multi_label_head(
+ n_classes, label_vocabulary=['class0', 'class1'])
+ logits = np.array([[-1., 1.], [-1.5, 1.5]], dtype=np.float32)
+ labels_multi_hot = np.array([[1, 0], [1, 1]], dtype=np.int64)
+ # loss = labels * -log(sigmoid(logits)) +
+ # (1 - labels) * -log(1 - sigmoid(logits))
+ # Sum over examples, divide by batch_size.
+ expected_loss = 0.5 * np.sum(
+ _sigmoid_cross_entropy(labels=labels_multi_hot, logits=logits))
+ keys = metric_keys.MetricKeys
+ expected_metrics = {
+ # Average loss over examples.
+ keys.LOSS_MEAN: expected_loss,
+ # auc and auc_pr cannot be reliably calculated for only 4 samples, but
+ # this assert tests that the algorithm remains consistent.
+ keys.AUC: 0.3333,
+ keys.AUC_PR: 0.7639,
+ }
+ self._test_eval(
+ head=head,
+ logits=logits,
+ labels=labels_multi_hot,
+ expected_loss=expected_loss,
+ expected_metrics=expected_metrics)
+
def test_eval_with_thresholds(self):
n_classes = 2
thresholds = [0.25, 0.5, 0.75]
diff --git a/tensorflow/contrib/estimator/python/estimator/linear.py b/tensorflow/contrib/estimator/python/estimator/linear.py
index 3bf4abe83d..62a37abefb 100644
--- a/tensorflow/contrib/estimator/python/estimator/linear.py
+++ b/tensorflow/contrib/estimator/python/estimator/linear.py
@@ -39,6 +39,18 @@ class LinearEstimator(estimator.Estimator):
feature_columns=[categorical_column_a,
categorical_feature_a_x_categorical_feature_b])
+ # Or estimator using an optimizer with a learning rate decay.
+ estimator = LinearEstimator(
+ head=tf.contrib.estimator.multi_label_head(n_classes=3),
+ feature_columns=[categorical_column_a,
+ categorical_feature_a_x_categorical_feature_b],
+ optimizer=lambda: tf.train.FtrlOptimizer(
+ learning_rate=tf.exponential_decay(
+ learning_rate=0.1,
+ global_step=tf.get_global_step(),
+ decay_steps=10000,
+ decay_rate=0.96))
+
# Or estimator using the FTRL optimizer with regularization.
estimator = LinearEstimator(
head=tf.contrib.estimator.multi_label_head(n_classes=3),
@@ -87,7 +99,8 @@ class LinearEstimator(estimator.Estimator):
model_dir=None,
optimizer='Ftrl',
config=None,
- partitioner=None):
+ partitioner=None,
+ sparse_combiner='sum'):
"""Initializes a `LinearEstimator` instance.
Args:
@@ -99,10 +112,16 @@ class LinearEstimator(estimator.Estimator):
model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into a estimator
to continue training a previously saved model.
- optimizer: An instance of `tf.Optimizer` used to train the model. Defaults
- to FTRL optimizer.
+ optimizer: An instance of `tf.Optimizer` used to train the model. Can also
+ be a string (one of 'Adagrad', 'Adam', 'Ftrl', 'RMSProp', 'SGD'), or
+ callable. Defaults to FTRL optimizer.
config: `RunConfig` object to configure the runtime settings.
partitioner: Optional. Partitioner for input layer.
+ sparse_combiner: A string specifying how to reduce if a categorical column
+ is multivalent. One of "mean", "sqrtn", and "sum" -- these are
+ effectively different ways to do example-level normalization, which can
+ be useful for bag-of-words features. for more details, see
+ @{tf.feature_column.linear_model$linear_model}.
"""
def _model_fn(features, labels, mode, config):
return linear_lib._linear_model_fn( # pylint: disable=protected-access
@@ -113,6 +132,7 @@ class LinearEstimator(estimator.Estimator):
feature_columns=tuple(feature_columns or []),
optimizer=optimizer,
partitioner=partitioner,
- config=config)
+ config=config,
+ sparse_combiner=sparse_combiner)
super(LinearEstimator, self).__init__(
model_fn=_model_fn, model_dir=model_dir, config=config)
diff --git a/tensorflow/contrib/factorization/kernels/wals_solver_ops.cc b/tensorflow/contrib/factorization/kernels/wals_solver_ops.cc
index bb9b835889..7fcae5ad8e 100644
--- a/tensorflow/contrib/factorization/kernels/wals_solver_ops.cc
+++ b/tensorflow/contrib/factorization/kernels/wals_solver_ops.cc
@@ -62,10 +62,11 @@ class WALSComputePartialLhsAndRhsOp : public OpKernel {
public:
explicit WALSComputePartialLhsAndRhsOp(OpKernelConstruction* context)
: OpKernel(context) {
- OP_REQUIRES_OK(context, context->MatchSignature(
- {DT_FLOAT, DT_FLOAT, DT_FLOAT, DT_FLOAT,
- DT_INT64, DT_FLOAT, DT_INT64, DT_BOOL},
- {DT_FLOAT, DT_FLOAT}));
+ OP_REQUIRES_OK(context,
+ context->MatchSignature(
+ {DT_FLOAT, DT_FLOAT, DT_FLOAT, DT_FLOAT, DT_INT64,
+ DT_FLOAT, DT_FLOAT, DT_INT64, DT_BOOL},
+ {DT_FLOAT, DT_FLOAT}));
}
void Compute(OpKernelContext* context) override {
@@ -75,8 +76,9 @@ class WALSComputePartialLhsAndRhsOp : public OpKernel {
const Tensor& input_weights = context->input(3);
const Tensor& input_indices = context->input(4);
const Tensor& input_values = context->input(5);
- const Tensor& input_block_size = context->input(6);
- const Tensor& input_is_transpose = context->input(7);
+ const Tensor& entry_weights = context->input(6);
+ const Tensor& input_block_size = context->input(7);
+ const Tensor& input_is_transpose = context->input(8);
OP_REQUIRES(context, TensorShapeUtils::IsMatrix(factors.shape()),
InvalidArgument("Input factors should be a matrix."));
@@ -89,13 +91,33 @@ class WALSComputePartialLhsAndRhsOp : public OpKernel {
InvalidArgument("Input input_weights should be a vector."));
OP_REQUIRES(context, TensorShapeUtils::IsMatrix(input_indices.shape()),
InvalidArgument("Input input_indices should be a matrix."));
+ OP_REQUIRES(
+ context, input_indices.dim_size(1) == 2,
+ InvalidArgument("Input input_indices should have shape (?, 2)."));
OP_REQUIRES(context, TensorShapeUtils::IsVector(input_values.shape()),
InvalidArgument("Input input_values should be a vector"));
+ OP_REQUIRES(context, TensorShapeUtils::IsVector(entry_weights.shape()),
+ InvalidArgument("Input entry_weights should be a vector"));
+ OP_REQUIRES(context, input_indices.dim_size(0) == input_values.dim_size(0),
+ InvalidArgument("Input input_values' length should match the "
+ "first dimension of Input input_indices "));
OP_REQUIRES(context, TensorShapeUtils::IsScalar(input_block_size.shape()),
InvalidArgument("Input input_block_size should be a scalar."));
OP_REQUIRES(
context, TensorShapeUtils::IsScalar(input_is_transpose.shape()),
InvalidArgument("Input input_is_transpose should be a scalar."));
+ OP_REQUIRES(
+ context,
+ ((input_weights.dim_size(0) > 0 &&
+ factor_weights.dim_size(0) == factors.dim_size(0) &&
+ entry_weights.dim_size(0) == 0) ||
+ (input_weights.dim_size(0) == 0 && factor_weights.dim_size(0) == 0 &&
+ entry_weights.dim_size(0) == input_indices.dim_size(0))),
+ InvalidArgument("To specify the weights for observed entries, either "
+ "(1) entry_weights must be set or (2) input_weights "
+ "and factor_weights must be set, but not both."));
+ // TODO(yifanchen): Deprecate the support of input_weights and
+ // factor_weights.
const int64 factor_dim = factors.dim_size(1);
const int64 factors_size = factors.dim_size(0);
@@ -105,6 +127,7 @@ class WALSComputePartialLhsAndRhsOp : public OpKernel {
const auto& input_weights_vec = input_weights.vec<float>();
const float w_0 = unobserved_weights.scalar<float>()();
const auto& input_values_vec = input_values.vec<float>();
+ const auto& entry_weights_vec = entry_weights.vec<float>();
ConstEigenMatrixFloatMap factors_mat(factors.matrix<float>().data(),
factor_dim, factors_size);
@@ -134,6 +157,8 @@ class WALSComputePartialLhsAndRhsOp : public OpKernel {
return is_transpose ? indices_mat(0, i) : indices_mat(1, i);
};
+ const bool use_entry_weights = entry_weights_vec.size() > 0;
+
// TODO(rmlarsen): In principle, we should be using the SparseTensor class
// and machinery for iterating over groups, but the fact that class
// SparseTensor makes a complete copy of the matrix makes me reluctant to
@@ -195,6 +220,8 @@ class WALSComputePartialLhsAndRhsOp : public OpKernel {
// map using the hash of the thread id as the key.
//
// TODO(jpoulson): Switch to try_emplace once C++17 is supported
+ // TODO(b/72952120): Check whether the 3 lock-unlock pairs can be
+ // consolidated into just one.
map_mutex.lock();
const auto key_count = factor_batch_map.count(id_hash);
map_mutex.unlock();
@@ -213,6 +240,8 @@ class WALSComputePartialLhsAndRhsOp : public OpKernel {
CHECK_LE(shard.second, perm.size());
CHECK_LE(shard.first, shard.second);
const int64 input_index = get_input_index(perm[shard.first]);
+ const float input_weight =
+ use_entry_weights ? 1.0 : input_weights_vec(input_index);
// Accumulate the rhs and lhs terms in the normal equations
// for the non-zero elements in the row or column of the sparse matrix
// corresponding to input_index.
@@ -228,7 +257,8 @@ class WALSComputePartialLhsAndRhsOp : public OpKernel {
const int64 factor_index = get_factor_index(i);
const float input_value = input_values_vec(i);
const float weight =
- input_weights_vec(input_index) * factor_weights_vec(factor_index);
+ use_entry_weights ? entry_weights_vec(i)
+ : input_weight * factor_weights_vec(factor_index);
CHECK_GE(weight, 0);
factor_batch.col(num_batched) =
factors_mat.col(factor_index) * std::sqrt(weight);
diff --git a/tensorflow/contrib/factorization/ops/factorization_ops.cc b/tensorflow/contrib/factorization/ops/factorization_ops.cc
index 11ea36946e..1d31bd38c8 100644
--- a/tensorflow/contrib/factorization/ops/factorization_ops.cc
+++ b/tensorflow/contrib/factorization/ops/factorization_ops.cc
@@ -25,20 +25,33 @@ REGISTER_OP("WALSComputePartialLhsAndRhs")
.Input("input_weights: float32")
.Input("input_indices: int64")
.Input("input_values: float32")
+ .Input("entry_weights: float32")
.Input("input_block_size: int64")
.Input("input_is_transpose: bool")
.Output("partial_lhs: float32")
.Output("partial_rhs: float32")
.SetShapeFn(shape_inference::UnknownShape)
.Doc(R"(
-Computes the partial left-hand side and right-hand side of WALS update.
+Computes the partial left-hand side and right-hand side of WALS update. For
+observed entry input_indices[i]=[m, n] with value input_values[i]=v, the weight
+should be specified either through (1) entry_weights[i] or (2) through
+input_weights[m] * factor_weights[n] (if input_is_transpose is false) or
+input_weights[n] * factor_weights[m] (if input_is_transpose is true). Note it is
+not allowed to have both (1) and (2) specified at the same time: when one
+approach is used, the input tensors related to the other approach must be kept
+completely empty.
factors: Matrix of size m * k.
-factor_weights: Vector of size m. Corresponds to column weights
+factor_weights: Vector of size m. Corresponds to column weights. Should be empty
+ if entry_weights is used.
unobserved_weights: Scalar. Weight for unobserved input entries.
-input_weights: Vector of size n. Corresponds to row weights.
+input_weights: Vector of size n. Corresponds to row weights. Should be empty if
+ entry_weights is used.
input_indices: Indices for the input SparseTensor.
input_values: Values for the input SparseTensor.
+entry_weights: If not empty, this must be same length as input_vaues and is used
+ as the per-entry non-zero weight. If this is used, input_weights and
+ factor_weights must be empty.
input_block_size: Scalar. Number of rows spanned by input.
input_is_transpose: If true, logically transposes the input for processing.
partial_lhs: 3-D tensor with size input_block_size x k x k.
diff --git a/tensorflow/contrib/factorization/python/kernel_tests/wals_solver_ops_test.py b/tensorflow/contrib/factorization/python/kernel_tests/wals_solver_ops_test.py
index ba30fd9977..6c2f1d4608 100644
--- a/tensorflow/contrib/factorization/python/kernel_tests/wals_solver_ops_test.py
+++ b/tensorflow/contrib/factorization/python/kernel_tests/wals_solver_ops_test.py
@@ -55,7 +55,41 @@ class WalsSolverOpsTest(test.TestCase):
rhs_matrix] = gen_factorization_ops.wals_compute_partial_lhs_and_rhs(
self._column_factors, self._column_weights, self._unobserved_weights,
self._row_weights, sparse_block.indices, sparse_block.values,
- sparse_block.dense_shape[0], False)
+ [],
+ input_block_size=sparse_block.dense_shape[0],
+ input_is_transpose=False)
+ self.assertAllClose(lhs_tensor.eval(), [[
+ [0.014800, 0.017000, 0.019200],
+ [0.017000, 0.019600, 0.022200],
+ [0.019200, 0.022200, 0.025200],
+ ], [
+ [0.0064000, 0.0080000, 0.0096000],
+ [0.0080000, 0.0100000, 0.0120000],
+ [0.0096000, 0.0120000, 0.0144000],
+ ], [
+ [0.0099000, 0.0126000, 0.0153000],
+ [0.0126000, 0.0162000, 0.0198000],
+ [0.0153000, 0.0198000, 0.0243000],
+ ], [
+ [0.058800, 0.067200, 0.075600],
+ [0.067200, 0.076800, 0.086400],
+ [0.075600, 0.086400, 0.097200],
+ ]])
+ self.assertAllClose(rhs_matrix.eval(), [[0.019300, 0.023000, 0.026700],
+ [0.061600, 0.077000, 0.092400],
+ [0.160400, 0.220000, 0.279600],
+ [0.492800, 0.563200, 0.633600]])
+
+ def testWalsSolverLhsEntryWeights(self):
+ sparse_block = SparseBlock3x3()
+ with self.test_session():
+ [lhs_tensor,
+ rhs_matrix] = gen_factorization_ops.wals_compute_partial_lhs_and_rhs(
+ self._column_factors, [], self._unobserved_weights,
+ [], sparse_block.indices, sparse_block.values,
+ [0.01, 0.03, 0.04, 0.03, 0.06, 0.12],
+ input_block_size=sparse_block.dense_shape[0],
+ input_is_transpose=False)
self.assertAllClose(lhs_tensor.eval(), [[
[0.014800, 0.017000, 0.019200],
[0.017000, 0.019600, 0.022200],
diff --git a/tensorflow/contrib/factorization/python/ops/factorization_ops.py b/tensorflow/contrib/factorization/python/ops/factorization_ops.py
index 8f73274c2a..7ab70fbcfd 100644
--- a/tensorflow/contrib/factorization/python/ops/factorization_ops.py
+++ b/tensorflow/contrib/factorization/python/ops/factorization_ops.py
@@ -943,6 +943,7 @@ class WALSModel(object):
row_weights_slice,
new_sp_input.indices,
new_sp_input.values,
+ [],
num_rows,
transpose_input,
name="wals_compute_partial_lhs_rhs"))
diff --git a/tensorflow/contrib/framework/python/ops/variables_test.py b/tensorflow/contrib/framework/python/ops/variables_test.py
index 7e0c7dbec1..3c44630a51 100644
--- a/tensorflow/contrib/framework/python/ops/variables_test.py
+++ b/tensorflow/contrib/framework/python/ops/variables_test.py
@@ -106,8 +106,9 @@ class LocalVariableTest(test.TestCase):
def testResourceVariable(self):
a = variables_lib2.local_variable(0)
b = variables_lib2.local_variable(0, use_resource=True)
- self.assertEqual(type(a), variables_lib.Variable)
- self.assertEqual(type(b), resource_variable_ops.ResourceVariable)
+ self.assertTrue(isinstance(a, variables_lib.Variable))
+ self.assertFalse(isinstance(a, resource_variable_ops.ResourceVariable))
+ self.assertTrue(isinstance(b, resource_variable_ops.ResourceVariable))
class GlobalVariableTest(test.TestCase):
@@ -176,8 +177,9 @@ class GlobalVariableTest(test.TestCase):
def testResourceVariable(self):
a = variables_lib2.global_variable(0)
b = variables_lib2.global_variable(0, use_resource=True)
- self.assertEqual(type(a), variables_lib.Variable)
- self.assertEqual(type(b), resource_variable_ops.ResourceVariable)
+ self.assertTrue(isinstance(a, variables_lib.Variable))
+ self.assertFalse(isinstance(a, resource_variable_ops.ResourceVariable))
+ self.assertTrue(isinstance(b, resource_variable_ops.ResourceVariable))
class GlobalStepTest(test.TestCase):
diff --git a/tensorflow/contrib/gan/BUILD b/tensorflow/contrib/gan/BUILD
index b305f37791..10a8796bcb 100644
--- a/tensorflow/contrib/gan/BUILD
+++ b/tensorflow/contrib/gan/BUILD
@@ -45,6 +45,7 @@ py_library(
"//tensorflow/python:framework_ops",
"//tensorflow/python:init_ops",
"//tensorflow/python:training",
+ "//tensorflow/python:training_util",
"//tensorflow/python:variable_scope",
"//tensorflow/python/ops/distributions",
"//tensorflow/python/ops/losses",
@@ -59,6 +60,7 @@ py_test(
deps = [
":features",
":namedtuples",
+ ":random_tensor_pool",
":train",
"//tensorflow/contrib/framework:framework_py",
"//tensorflow/contrib/slim:learning",
@@ -70,6 +72,7 @@ py_test(
"//tensorflow/python:random_ops",
"//tensorflow/python:random_seed",
"//tensorflow/python:training",
+ "//tensorflow/python:training_util",
"//tensorflow/python:variable_scope",
"//tensorflow/python:variables",
"//tensorflow/python/ops/distributions",
@@ -188,6 +191,7 @@ py_test(
srcs = ["python/losses/python/tuple_losses_test.py"],
srcs_version = "PY2AND3",
deps = [
+ ":namedtuples",
":tuple_losses",
"//tensorflow/python:client_testlib",
"//tensorflow/python:constant_op",
@@ -344,9 +348,11 @@ py_library(
"//tensorflow/python:image_ops",
"//tensorflow/python:linalg_ops",
"//tensorflow/python:math_ops",
+ "//tensorflow/python:nn",
"//tensorflow/python:nn_ops",
"//tensorflow/python:platform",
"//tensorflow/python:util",
+ "@six_archive//:six",
],
)
@@ -470,12 +476,12 @@ py_library(
],
srcs_version = "PY2AND3",
deps = [
- ":head",
":namedtuples",
":summaries",
":train",
"//tensorflow/contrib/framework:framework_py",
"//tensorflow/python:framework_ops",
+ "//tensorflow/python:metrics",
"//tensorflow/python:util",
"//tensorflow/python:variable_scope",
"//tensorflow/python/estimator",
@@ -498,16 +504,19 @@ py_test(
"//tensorflow/core:protos_all_py",
"//tensorflow/python:array_ops",
"//tensorflow/python:client_testlib",
- "//tensorflow/python:control_flow_ops",
"//tensorflow/python:dtypes",
"//tensorflow/python:framework_ops",
+ "//tensorflow/python:math_ops",
+ "//tensorflow/python:metrics",
"//tensorflow/python:parsing_ops",
"//tensorflow/python:summary",
"//tensorflow/python:training",
- "//tensorflow/python/estimator:head",
+ "//tensorflow/python:training_util",
+ "//tensorflow/python:variable_scope",
"//tensorflow/python/estimator:model_fn",
"//tensorflow/python/estimator:numpy_io",
"//third_party/py/numpy",
+ "@absl_py//absl/testing:parameterized",
"@six_archive//:six",
],
)
diff --git a/tensorflow/contrib/gan/python/estimator/python/gan_estimator_impl.py b/tensorflow/contrib/gan/python/estimator/python/gan_estimator_impl.py
index 4092b32004..8e4affb9b4 100644
--- a/tensorflow/contrib/gan/python/estimator/python/gan_estimator_impl.py
+++ b/tensorflow/contrib/gan/python/estimator/python/gan_estimator_impl.py
@@ -24,11 +24,11 @@ import enum
from tensorflow.contrib.framework.python.ops import variables as variable_lib
from tensorflow.contrib.gan.python import namedtuples as tfgan_tuples
from tensorflow.contrib.gan.python import train as tfgan_train
-from tensorflow.contrib.gan.python.estimator.python import head as head_lib
from tensorflow.contrib.gan.python.eval.python import summaries as tfgan_summaries
from tensorflow.python.estimator import estimator
from tensorflow.python.estimator import model_fn as model_fn_lib
from tensorflow.python.framework import ops
+from tensorflow.python.ops import metrics as metrics_lib
from tensorflow.python.ops import variable_scope
from tensorflow.python.util import tf_inspect as inspect
@@ -154,94 +154,93 @@ class GANEstimator(estimator.Estimator):
use_loss_summaries: If `True`, add loss summaries. If `False`, does not.
If `None`, uses defaults.
config: `RunConfig` object to configure the runtime settings.
+
+ Raises:
+ ValueError: If loss functions aren't callable.
+ ValueError: If `use_loss_summaries` isn't boolean or `None`.
+ ValueError: If `get_hooks_fn` isn't callable or `None`.
"""
- # TODO(joelshor): Explicitly validate inputs.
+ if not callable(generator_loss_fn):
+ raise ValueError('generator_loss_fn must be callable.')
+ if not callable(discriminator_loss_fn):
+ raise ValueError('discriminator_loss_fn must be callable.')
+ if use_loss_summaries not in [True, False, None]:
+ raise ValueError('use_loss_summaries must be True, False or None.')
+ if get_hooks_fn is not None and not callable(get_hooks_fn):
+ raise TypeError('get_hooks_fn must be callable.')
def _model_fn(features, labels, mode):
- gopt = (generator_optimizer() if callable(generator_optimizer) else
- generator_optimizer)
- dopt = (discriminator_optimizer() if callable(discriminator_optimizer)
- else discriminator_optimizer)
- gan_head = head_lib.gan_head(
- generator_loss_fn, discriminator_loss_fn, gopt, dopt,
- use_loss_summaries, get_hooks_fn=get_hooks_fn,
- get_eval_metric_ops_fn=get_eval_metric_ops_fn)
- return _gan_model_fn(
- features, labels, mode, generator_fn, discriminator_fn, gan_head,
+ """GANEstimator model function."""
+ if mode not in [model_fn_lib.ModeKeys.TRAIN, model_fn_lib.ModeKeys.EVAL,
+ model_fn_lib.ModeKeys.PREDICT]:
+ raise ValueError('Mode not recognized: %s' % mode)
+ real_data = labels # rename inputs for clarity
+ generator_inputs = features # rename inputs for clarity
+
+ # Make GANModel, which encapsulates the GAN model architectures.
+ gan_model = _get_gan_model(
+ mode, generator_fn, discriminator_fn, real_data, generator_inputs,
add_summaries)
+ # Make the EstimatorSpec, which incorporates the GANModel, losses, eval
+ # metrics, and optimizers (if required).
+ return _get_estimator_spec(
+ mode, gan_model, generator_loss_fn, discriminator_loss_fn,
+ get_eval_metric_ops_fn, generator_optimizer, discriminator_optimizer,
+ get_hooks_fn)
+
super(GANEstimator, self).__init__(
model_fn=_model_fn, model_dir=model_dir, config=config)
-def _gan_model_fn(
- features,
- labels,
- mode,
- generator_fn,
- discriminator_fn,
- head,
- add_summaries=None,
- generator_scope_name='Generator'):
- """The `model_fn` for the GAN estimator.
-
- We make the following convention:
- features -> TFGAN's `generator_inputs`
- labels -> TFGAN's `real_data`
-
- Args:
- features: A dictionary to feed to generator. In the unconditional case,
- this might be just `noise`. In the conditional GAN case, this
- might be the generator's conditioning. The `generator_fn` determines
- what the required keys are.
- labels: Real data. Can be any structure, as long as `discriminator_fn`
- can accept it for the first argument.
- mode: Defines whether this is training, evaluation or prediction.
- See `ModeKeys`.
- generator_fn: A python lambda that takes `generator_inputs` as inputs and
- returns the outputs of the GAN generator.
- discriminator_fn: A python lambda that takes `real_data`/`generated data`
- and `generator_inputs`. Outputs a Tensor in the range [-inf, inf].
- head: A `Head` instance suitable for GANs.
- add_summaries: `None`, a single `SummaryType`, or a list of `SummaryType`.
- generator_scope_name: The name of the generator scope. We need this to be
- the same for GANModels produced by TFGAN's `train.gan_model` and the
- manually constructed ones for predictions.
-
- Returns:
- `ModelFnOps`
-
- Raises:
- ValueError: If `labels` isn't `None` during prediction.
- """
- real_data = labels
- generator_inputs = features
-
- if mode == model_fn_lib.ModeKeys.TRAIN:
- gan_model = _make_train_gan_model(
- generator_fn, discriminator_fn, real_data, generator_inputs,
- generator_scope_name, add_summaries)
- elif mode == model_fn_lib.ModeKeys.EVAL:
- gan_model = _make_eval_gan_model(
- generator_fn, discriminator_fn, real_data, generator_inputs,
- generator_scope_name, add_summaries)
- else:
+def _get_gan_model(
+ mode, generator_fn, discriminator_fn, real_data, generator_inputs,
+ add_summaries, generator_scope='Generator'):
+ """Makes the GANModel tuple, which encapsulates the GAN model architecture."""
+ if mode == model_fn_lib.ModeKeys.PREDICT:
if real_data is not None:
raise ValueError('`labels` must be `None` when mode is `predict`. '
'Instead, found %s' % real_data)
gan_model = _make_prediction_gan_model(
- generator_inputs, generator_fn, generator_scope_name)
+ generator_inputs, generator_fn, generator_scope)
+ else: # model_fn_lib.ModeKeys.TRAIN or model_fn_lib.ModeKeys.EVAL
+ gan_model = _make_gan_model(
+ generator_fn, discriminator_fn, real_data, generator_inputs,
+ generator_scope, add_summaries, mode)
- return head.create_estimator_spec(
- features=None,
- mode=mode,
- logits=gan_model,
- labels=None)
+ return gan_model
+
+
+def _get_estimator_spec(
+ mode, gan_model, generator_loss_fn, discriminator_loss_fn,
+ get_eval_metric_ops_fn, generator_optimizer, discriminator_optimizer,
+ get_hooks_fn=None):
+ """Get the EstimatorSpec for the current mode."""
+ if mode == model_fn_lib.ModeKeys.PREDICT:
+ estimator_spec = model_fn_lib.EstimatorSpec(
+ mode=mode, predictions=gan_model.generated_data)
+ else:
+ gan_loss = tfgan_tuples.GANLoss(
+ generator_loss=generator_loss_fn(gan_model),
+ discriminator_loss=discriminator_loss_fn(gan_model))
+ if mode == model_fn_lib.ModeKeys.EVAL:
+ estimator_spec = _get_eval_estimator_spec(
+ gan_model, gan_loss, get_eval_metric_ops_fn)
+ else: # model_fn_lib.ModeKeys.TRAIN:
+ gopt = (generator_optimizer() if callable(generator_optimizer) else
+ generator_optimizer)
+ dopt = (discriminator_optimizer() if callable(discriminator_optimizer)
+ else discriminator_optimizer)
+ get_hooks_fn = get_hooks_fn or tfgan_train.get_sequential_train_hooks()
+ estimator_spec = _get_train_estimator_spec(
+ gan_model, gan_loss, gopt, dopt, get_hooks_fn)
+
+ return estimator_spec
def _make_gan_model(generator_fn, discriminator_fn, real_data,
generator_inputs, generator_scope, add_summaries, mode):
- """Make a `GANModel`, and optionally pass in `mode`."""
+ """Construct a `GANModel`, and optionally pass in `mode`."""
# If network functions have an argument `mode`, pass mode to it.
if 'mode' in inspect.getargspec(generator_fn).args:
generator_fn = functools.partial(generator_fn, mode=mode)
@@ -264,22 +263,6 @@ def _make_gan_model(generator_fn, discriminator_fn, real_data,
return gan_model
-def _make_train_gan_model(generator_fn, discriminator_fn, real_data,
- generator_inputs, generator_scope, add_summaries):
- """Make a `GANModel` for training."""
- return _make_gan_model(generator_fn, discriminator_fn, real_data,
- generator_inputs, generator_scope, add_summaries,
- model_fn_lib.ModeKeys.TRAIN)
-
-
-def _make_eval_gan_model(generator_fn, discriminator_fn, real_data,
- generator_inputs, generator_scope, add_summaries):
- """Make a `GANModel` for evaluation."""
- return _make_gan_model(generator_fn, discriminator_fn, real_data,
- generator_inputs, generator_scope, add_summaries,
- model_fn_lib.ModeKeys.EVAL)
-
-
def _make_prediction_gan_model(generator_inputs, generator_fn, generator_scope):
"""Make a `GANModel` from just the generator."""
# If `generator_fn` has an argument `mode`, pass mode to it.
@@ -303,3 +286,46 @@ def _make_prediction_gan_model(generator_inputs, generator_fn, generator_scope):
discriminator_variables=None,
discriminator_scope=None,
discriminator_fn=None)
+
+
+def _get_eval_estimator_spec(gan_model, gan_loss, get_eval_metric_ops_fn=None,
+ name=None):
+ """Return an EstimatorSpec for the eval case."""
+ scalar_loss = gan_loss.generator_loss + gan_loss.discriminator_loss
+ with ops.name_scope(None, 'metrics',
+ [gan_loss.generator_loss,
+ gan_loss.discriminator_loss]):
+ def _summary_key(head_name, val):
+ return '%s/%s' % (val, head_name) if head_name else val
+ eval_metric_ops = {
+ _summary_key(name, 'generator_loss'):
+ metrics_lib.mean(gan_loss.generator_loss),
+ _summary_key(name, 'discriminator_loss'):
+ metrics_lib.mean(gan_loss.discriminator_loss)
+ }
+ if get_eval_metric_ops_fn is not None:
+ custom_eval_metric_ops = get_eval_metric_ops_fn(gan_model)
+ if not isinstance(custom_eval_metric_ops, dict):
+ raise TypeError('get_eval_metric_ops_fn must return a dict, '
+ 'received: {}'.format(custom_eval_metric_ops))
+ eval_metric_ops.update(custom_eval_metric_ops)
+ return model_fn_lib.EstimatorSpec(
+ mode=model_fn_lib.ModeKeys.EVAL,
+ predictions=gan_model.generated_data,
+ loss=scalar_loss,
+ eval_metric_ops=eval_metric_ops)
+
+
+def _get_train_estimator_spec(
+ gan_model, gan_loss, generator_optimizer, discriminator_optimizer,
+ get_hooks_fn, train_op_fn=tfgan_train.gan_train_ops):
+ """Return an EstimatorSpec for the train case."""
+ scalar_loss = gan_loss.generator_loss + gan_loss.discriminator_loss
+ train_ops = train_op_fn(gan_model, gan_loss, generator_optimizer,
+ discriminator_optimizer)
+ training_hooks = get_hooks_fn(train_ops)
+ return model_fn_lib.EstimatorSpec(
+ loss=scalar_loss,
+ mode=model_fn_lib.ModeKeys.TRAIN,
+ train_op=train_ops.global_step_inc_op,
+ training_hooks=training_hooks)
diff --git a/tensorflow/contrib/gan/python/estimator/python/gan_estimator_test.py b/tensorflow/contrib/gan/python/estimator/python/gan_estimator_test.py
index 955482599b..9ac9c6ca9c 100644
--- a/tensorflow/contrib/gan/python/estimator/python/gan_estimator_test.py
+++ b/tensorflow/contrib/gan/python/estimator/python/gan_estimator_test.py
@@ -21,30 +21,30 @@ from __future__ import print_function
import shutil
import tempfile
+from absl.testing import parameterized
import numpy as np
import six
from tensorflow.contrib import layers
-from tensorflow.contrib.gan.python import namedtuples
+from tensorflow.contrib.gan.python import namedtuples as tfgan_tuples
from tensorflow.contrib.gan.python.estimator.python import gan_estimator_impl as estimator
from tensorflow.contrib.gan.python.losses.python import tuple_losses as losses
from tensorflow.contrib.learn.python.learn.learn_io import graph_io
from tensorflow.core.example import example_pb2
from tensorflow.core.example import feature_pb2
from tensorflow.python.estimator import model_fn as model_fn_lib
-from tensorflow.python.estimator.canned import head as head_lib
from tensorflow.python.estimator.inputs import numpy_io
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
-from tensorflow.python.ops import control_flow_ops
+from tensorflow.python.ops import math_ops
from tensorflow.python.ops import metrics as metrics_lib
from tensorflow.python.ops import parsing_ops
+from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import test
from tensorflow.python.summary.writer import writer_cache
from tensorflow.python.training import input as input_lib
from tensorflow.python.training import learning_rate_decay
-from tensorflow.python.training import monitored_session
from tensorflow.python.training import training
from tensorflow.python.training import training_util
@@ -60,120 +60,109 @@ def discriminator_fn(data, unused_conditioning, mode):
return layers.fully_connected(data, 1)
-def mock_head(testcase, expected_generator_inputs, expected_real_data,
- generator_scope_name):
- """Returns a mock head that validates logits values and variable names."""
- discriminator_scope_name = 'Discriminator' # comes from TFGAN defaults
- generator_var_names = set([
- '%s/fully_connected/weights:0' % generator_scope_name,
- '%s/fully_connected/biases:0' % generator_scope_name])
- discriminator_var_names = set([
- '%s/fully_connected/weights:0' % discriminator_scope_name,
- '%s/fully_connected/biases:0' % discriminator_scope_name])
-
- def _create_estimator_spec(features, mode, logits, labels):
- gan_model = logits # renaming for clarity
- is_predict = mode == model_fn_lib.ModeKeys.PREDICT
- testcase.assertIsNone(features)
- testcase.assertIsNone(labels)
- testcase.assertIsInstance(gan_model, namedtuples.GANModel)
-
- trainable_vars = ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)
- expected_var_names = (generator_var_names if is_predict else
- generator_var_names | discriminator_var_names)
- testcase.assertItemsEqual(expected_var_names,
- [var.name for var in trainable_vars])
-
- assertions = []
- def _or_none(x):
- return None if is_predict else x
- testcase.assertEqual(expected_generator_inputs, gan_model.generator_inputs)
- # TODO(joelshor): Add check on `generated_data`.
- testcase.assertItemsEqual(
- generator_var_names,
- set([x.name for x in gan_model.generator_variables]))
- testcase.assertEqual(generator_scope_name, gan_model.generator_scope.name)
- testcase.assertEqual(_or_none(expected_real_data), gan_model.real_data)
- # TODO(joelshor): Add check on `discriminator_real_outputs`.
- # TODO(joelshor): Add check on `discriminator_gen_outputs`.
- if is_predict:
- testcase.assertIsNone(gan_model.discriminator_scope)
- else:
- testcase.assertEqual(discriminator_scope_name,
- gan_model.discriminator_scope.name)
-
- with ops.control_dependencies(assertions):
- if mode == model_fn_lib.ModeKeys.TRAIN:
- return model_fn_lib.EstimatorSpec(
- mode=mode, loss=array_ops.zeros([]),
- train_op=control_flow_ops.no_op(), training_hooks=[])
- elif mode == model_fn_lib.ModeKeys.EVAL:
- return model_fn_lib.EstimatorSpec(
- mode=mode, predictions=gan_model.generated_data,
- loss=array_ops.zeros([]))
- elif mode == model_fn_lib.ModeKeys.PREDICT:
- return model_fn_lib.EstimatorSpec(
- mode=mode, predictions=gan_model.generated_data)
- else:
- testcase.fail('Invalid mode: {}'.format(mode))
-
- head = test.mock.NonCallableMagicMock(spec=head_lib._Head)
- head.create_estimator_spec = test.mock.MagicMock(
- wraps=_create_estimator_spec)
-
- return head
-
-
-class GANModelFnTest(test.TestCase):
- """Tests that _gan_model_fn passes expected logits to mock head."""
-
- def setUp(self):
- self._model_dir = tempfile.mkdtemp()
-
- def tearDown(self):
- if self._model_dir:
- writer_cache.FileWriterCache.clear()
- shutil.rmtree(self._model_dir)
+class GetGANModelTest(test.TestCase, parameterized.TestCase):
+ """Tests that `GetGANModel` produces the correct model."""
- def _test_logits_helper(self, mode):
- """Tests that the expected logits are passed to mock head."""
+ @parameterized.named_parameters(
+ ('train', model_fn_lib.ModeKeys.TRAIN),
+ ('eval', model_fn_lib.ModeKeys.EVAL),
+ ('predict', model_fn_lib.ModeKeys.PREDICT))
+ def test_get_gan_model(self, mode):
with ops.Graph().as_default():
- training_util.get_or_create_global_step()
- generator_inputs = {'x': array_ops.zeros([5, 4])}
- real_data = (None if mode == model_fn_lib.ModeKeys.PREDICT else
- array_ops.zeros([5, 4]))
- generator_scope_name = 'generator'
- head = mock_head(self,
- expected_generator_inputs=generator_inputs,
- expected_real_data=real_data,
- generator_scope_name=generator_scope_name)
- estimator_spec = estimator._gan_model_fn(
- features=generator_inputs,
- labels=real_data,
- mode=mode,
- generator_fn=generator_fn,
- discriminator_fn=discriminator_fn,
- generator_scope_name=generator_scope_name,
- head=head)
- with monitored_session.MonitoredTrainingSession(
- checkpoint_dir=self._model_dir) as sess:
- if mode == model_fn_lib.ModeKeys.TRAIN:
- sess.run(estimator_spec.train_op)
- elif mode == model_fn_lib.ModeKeys.EVAL:
- sess.run(estimator_spec.loss)
- elif mode == model_fn_lib.ModeKeys.PREDICT:
- sess.run(estimator_spec.predictions)
- else:
- self.fail('Invalid mode: {}'.format(mode))
-
- def test_logits_predict(self):
- self._test_logits_helper(model_fn_lib.ModeKeys.PREDICT)
-
- def test_logits_eval(self):
- self._test_logits_helper(model_fn_lib.ModeKeys.EVAL)
-
- def test_logits_train(self):
- self._test_logits_helper(model_fn_lib.ModeKeys.TRAIN)
+ generator_inputs = {'x': array_ops.ones([3, 4])}
+ real_data = (array_ops.zeros([3, 4]) if
+ mode != model_fn_lib.ModeKeys.PREDICT else None)
+ gan_model = estimator._get_gan_model(
+ mode, generator_fn, discriminator_fn, real_data, generator_inputs,
+ add_summaries=False)
+
+ self.assertEqual(generator_inputs, gan_model.generator_inputs)
+ self.assertIsNotNone(gan_model.generated_data)
+ self.assertEqual(2, len(gan_model.generator_variables)) # 1 FC layer
+ self.assertIsNotNone(gan_model.generator_fn)
+ if mode == model_fn_lib.ModeKeys.PREDICT:
+ self.assertIsNone(gan_model.real_data)
+ self.assertIsNone(gan_model.discriminator_real_outputs)
+ self.assertIsNone(gan_model.discriminator_gen_outputs)
+ self.assertIsNone(gan_model.discriminator_variables)
+ self.assertIsNone(gan_model.discriminator_scope)
+ self.assertIsNone(gan_model.discriminator_fn)
+ else:
+ self.assertIsNotNone(gan_model.real_data)
+ self.assertIsNotNone(gan_model.discriminator_real_outputs)
+ self.assertIsNotNone(gan_model.discriminator_gen_outputs)
+ self.assertEqual(2, len(gan_model.discriminator_variables)) # 1 FC layer
+ self.assertIsNotNone(gan_model.discriminator_scope)
+ self.assertIsNotNone(gan_model.discriminator_fn)
+
+
+def get_dummy_gan_model():
+ # TODO(joelshor): Find a better way of creating a variable scope.
+ with variable_scope.variable_scope('generator') as gen_scope:
+ gen_var = variable_scope.get_variable('dummy_var', initializer=0.0)
+ with variable_scope.variable_scope('discriminator') as dis_scope:
+ dis_var = variable_scope.get_variable('dummy_var', initializer=0.0)
+ return tfgan_tuples.GANModel(
+ generator_inputs=None,
+ generated_data=array_ops.ones([3, 4]),
+ generator_variables=[gen_var],
+ generator_scope=gen_scope,
+ generator_fn=None,
+ real_data=array_ops.zeros([3, 4]),
+ discriminator_real_outputs=array_ops.ones([1, 2, 3]) * dis_var,
+ discriminator_gen_outputs=array_ops.ones([1, 2, 3]) * gen_var * dis_var,
+ discriminator_variables=[dis_var],
+ discriminator_scope=dis_scope,
+ discriminator_fn=None)
+
+
+def dummy_loss_fn(gan_model):
+ return math_ops.reduce_sum(gan_model.discriminator_real_outputs -
+ gan_model.discriminator_gen_outputs)
+
+
+def get_metrics(gan_model):
+ return {
+ 'mse_custom_metric': metrics_lib.mean_squared_error(
+ gan_model.real_data, gan_model.generated_data)
+ }
+
+
+class GetEstimatorSpecTest(test.TestCase, parameterized.TestCase):
+ """Tests that the EstimatorSpec is constructed appropriately."""
+
+ @classmethod
+ def setUpClass(cls):
+ cls._generator_optimizer = training.GradientDescentOptimizer(1.0)
+ cls._discriminator_optimizer = training.GradientDescentOptimizer(1.0)
+
+ @parameterized.named_parameters(
+ ('train', model_fn_lib.ModeKeys.TRAIN),
+ ('eval', model_fn_lib.ModeKeys.EVAL),
+ ('predict', model_fn_lib.ModeKeys.PREDICT))
+ def test_get_estimator_spec(self, mode):
+ with ops.Graph().as_default():
+ self._gan_model = get_dummy_gan_model()
+ spec = estimator._get_estimator_spec(
+ mode,
+ self._gan_model,
+ generator_loss_fn=dummy_loss_fn,
+ discriminator_loss_fn=dummy_loss_fn,
+ get_eval_metric_ops_fn=get_metrics,
+ generator_optimizer=self._generator_optimizer,
+ discriminator_optimizer=self._discriminator_optimizer)
+
+ self.assertEqual(mode, spec.mode)
+ if mode == model_fn_lib.ModeKeys.PREDICT:
+ self.assertEqual(self._gan_model.generated_data, spec.predictions)
+ elif mode == model_fn_lib.ModeKeys.TRAIN:
+ self.assertShapeEqual(np.array(0), spec.loss) # must be a scalar
+ self.assertIsNotNone(spec.train_op)
+ self.assertIsNotNone(spec.training_hooks)
+ elif mode == model_fn_lib.ModeKeys.EVAL:
+ self.assertEqual(self._gan_model.generated_data, spec.predictions)
+ self.assertShapeEqual(np.array(0), spec.loss) # must be a scalar
+ self.assertIsNotNone(spec.eval_metric_ops)
# TODO(joelshor): Add pandas test.
@@ -195,12 +184,6 @@ class GANEstimatorIntegrationTest(test.TestCase):
lr = learning_rate_decay.exponential_decay(1.0, gstep, 10, 0.9)
return training.GradientDescentOptimizer(lr)
- def get_metrics(gan_model):
- return {
- 'mse_custom_metric': metrics_lib.mean_squared_error(
- gan_model.real_data, gan_model.generated_data)
- }
-
gopt = make_opt if lr_decay else training.GradientDescentOptimizer(1.0)
dopt = make_opt if lr_decay else training.GradientDescentOptimizer(1.0)
est = estimator.GANEstimator(
diff --git a/tensorflow/contrib/gan/python/estimator/python/head_impl.py b/tensorflow/contrib/gan/python/estimator/python/head_impl.py
index 5b5557bd8f..1a0ee6dfc4 100644
--- a/tensorflow/contrib/gan/python/estimator/python/head_impl.py
+++ b/tensorflow/contrib/gan/python/estimator/python/head_impl.py
@@ -27,16 +27,21 @@ from tensorflow.python.estimator.canned import head
from tensorflow.python.estimator.export import export_output
from tensorflow.python.framework import ops
from tensorflow.python.ops import metrics as metrics_lib
+from tensorflow.python.util import deprecation
__all__ = [
'GANHead',
'gan_head',
]
+
def _summary_key(head_name, val):
return '%s/%s' % (val, head_name) if head_name else val
+@deprecation.deprecated(
+ None, 'Please use tf.contrib.gan.GANEstimator without explicitly making a '
+ 'GANHead.')
def gan_head(generator_loss_fn, discriminator_loss_fn, generator_optimizer,
discriminator_optimizer, use_loss_summaries=True,
get_hooks_fn=tfgan_train.get_sequential_train_hooks(),
@@ -77,6 +82,9 @@ def gan_head(generator_loss_fn, discriminator_loss_fn, generator_optimizer,
class GANHead(head._Head): # pylint: disable=protected-access
"""`Head` for a GAN."""
+ @deprecation.deprecated(
+ None, 'Please use tf.contrib.gan.GANEstimator without explicitly making '
+ 'a GANHead.')
def __init__(self, generator_loss_fn, discriminator_loss_fn,
generator_optimizer, discriminator_optimizer,
use_loss_summaries=True,
@@ -103,9 +111,20 @@ class GANHead(head._Head): # pylint: disable=protected-access
name: name of the head. If provided, summary and metrics keys will be
suffixed by `"/" + name`.
"""
+
+ if not callable(generator_loss_fn):
+ raise TypeError('generator_loss_fn must be callable.')
+ if not callable(discriminator_loss_fn):
+ raise TypeError('discriminator_loss_fn must be callable.')
+ if use_loss_summaries not in [True, False, None]:
+ raise ValueError('use_loss_summaries must be True, False or None.')
+ if get_hooks_fn is not None and not callable(get_hooks_fn):
+ raise TypeError('get_hooks_fn must be callable.')
+ if name is not None and not isinstance(name, str):
+ raise TypeError('name must be string.')
+
if get_hooks_fn is None:
get_hooks_fn = tfgan_train.get_sequential_train_hooks()
- # TODO(joelshor): Validate inputs.
if use_loss_summaries in [True, False]:
generator_loss_fn = functools.partial(
diff --git a/tensorflow/contrib/gan/python/estimator/python/head_test.py b/tensorflow/contrib/gan/python/estimator/python/head_test.py
index 5309d87765..8205bc889d 100644
--- a/tensorflow/contrib/gan/python/estimator/python/head_test.py
+++ b/tensorflow/contrib/gan/python/estimator/python/head_test.py
@@ -67,7 +67,7 @@ class GANHeadTest(test.TestCase):
generator_optimizer=training.GradientDescentOptimizer(1.0),
discriminator_optimizer=training.GradientDescentOptimizer(1.0),
get_eval_metric_ops_fn=self.get_metrics)
- self.assertTrue(isinstance(self.gan_head, head.GANHead))
+ self.assertIsInstance(self.gan_head, head.GANHead)
def get_metrics(self, gan_model):
self.assertTrue(isinstance(gan_model, tfgan_tuples.GANModel))
diff --git a/tensorflow/contrib/gdr/gdr_memory_manager.cc b/tensorflow/contrib/gdr/gdr_memory_manager.cc
index 81e70ae30a..1435e19109 100644
--- a/tensorflow/contrib/gdr/gdr_memory_manager.cc
+++ b/tensorflow/contrib/gdr/gdr_memory_manager.cc
@@ -34,8 +34,9 @@ limitations under the License.
#include "tensorflow/core/common_runtime/device.h"
#include "tensorflow/core/common_runtime/dma_helper.h"
#if GOOGLE_CUDA
+#include "tensorflow/core/common_runtime/gpu/gpu_process_state.h"
#include "tensorflow/core/common_runtime/gpu/gpu_util.h"
-#include "tensorflow/core/common_runtime/gpu/process_state.h"
+#include "tensorflow/core/common_runtime/process_state.h"
#endif // GOOGLE_CUDA
#include "tensorflow/core/framework/allocator_registry.h"
#include "tensorflow/core/lib/core/status.h"
@@ -274,7 +275,7 @@ Status GdrMemoryManager::Init() {
Allocator* allocators[] = {
#if GOOGLE_CUDA
- ProcessState::singleton()->GetCUDAHostAllocator(0),
+ GPUProcessState::singleton()->GetCUDAHostAllocator(0),
ProcessState::singleton()->GetCPUAllocator(0),
#endif // GOOGLE_CUDA
cpu_allocator(),
@@ -308,7 +309,8 @@ Status GdrMemoryManager::Init() {
if (IsGDRAvailable()) {
// Note we don't free allocated GPU memory so there is no free visitor
int32_t bus_id = TryToReadNumaNode(listening_->verbs->device) + 1;
- ProcessState::singleton()->AddGPUAllocVisitor(bus_id, cuda_alloc_visitor);
+ GPUProcessState::singleton()->AddGPUAllocVisitor(bus_id,
+ cuda_alloc_visitor);
LOG(INFO) << "Instrumenting GPU allocator with bus_id " << bus_id;
}
#endif // GOOGLE_CUDA
@@ -430,7 +432,7 @@ void GdrMemoryManager::TransportOptionsFromTensor(
#if GOOGLE_CUDA
if (!on_host) {
- Allocator* alloc = ProcessState::singleton()->GetCUDAHostAllocator(0);
+ Allocator* alloc = GPUProcessState::singleton()->GetCUDAHostAllocator(0);
Tensor* host_copy = new Tensor(alloc, tensor.dtype(), tensor.shape());
GPUUtil::CopyGPUTensorToCPU(
device, device_context, &tensor, host_copy,
@@ -532,7 +534,7 @@ void GdrMemoryManager::TensorFromTransportOptions(
Tensor host_copy;
#if GOOGLE_CUDA
if (mr == nullptr && !on_host) {
- Allocator* alloc = ProcessState::singleton()->GetCUDAHostAllocator(0);
+ Allocator* alloc = GPUProcessState::singleton()->GetCUDAHostAllocator(0);
host_copy = Tensor(alloc, tensor->dtype(), tensor->shape());
buffer = DMAHelper::buffer(&host_copy);
addr = buffer->data();
diff --git a/tensorflow/contrib/image/kernels/image_ops.cc b/tensorflow/contrib/image/kernels/image_ops.cc
index c2e32da133..022e17d139 100644
--- a/tensorflow/contrib/image/kernels/image_ops.cc
+++ b/tensorflow/contrib/image/kernels/image_ops.cc
@@ -35,6 +35,7 @@ typedef Eigen::ThreadPoolDevice CPUDevice;
template struct FillProjectiveTransform<CPUDevice, uint8>;
template struct FillProjectiveTransform<CPUDevice, int32>;
template struct FillProjectiveTransform<CPUDevice, int64>;
+template struct FillProjectiveTransform<CPUDevice, Eigen::half>;
template struct FillProjectiveTransform<CPUDevice, float>;
template struct FillProjectiveTransform<CPUDevice, double>;
@@ -99,6 +100,7 @@ class ImageProjectiveTransform : public OpKernel {
TF_CALL_uint8(REGISTER);
TF_CALL_int32(REGISTER);
TF_CALL_int64(REGISTER);
+TF_CALL_half(REGISTER);
TF_CALL_float(REGISTER);
TF_CALL_double(REGISTER);
diff --git a/tensorflow/contrib/image/kernels/image_ops.h b/tensorflow/contrib/image/kernels/image_ops.h
index ad50133061..209aa24548 100644
--- a/tensorflow/contrib/image/kernels/image_ops.h
+++ b/tensorflow/contrib/image/kernels/image_ops.h
@@ -21,6 +21,7 @@ limitations under the License.
#define EIGEN_USE_THREADS
#include "third_party/eigen3/unsupported/Eigen/CXX11/Tensor"
+
#include "tensorflow/core/framework/tensor_types.h"
#include "tensorflow/core/platform/types.h"
@@ -58,6 +59,11 @@ class ProjectiveGenerator {
? transforms_.data()
: &transforms_.data()[transforms_.dimension(1) * coords[0]];
float projection = transform[6] * output_x + transform[7] * output_y + 1.f;
+ if (projection == 0) {
+ // Return the fill value (0) for infinite coordinates,
+ // which are outside the input image
+ return T(0);
+ }
const float input_x =
(transform[0] * output_x + transform[1] * output_y + transform[2]) /
projection;
@@ -105,21 +111,21 @@ class ProjectiveGenerator {
// f(x, y_floor) = (x_ceil - x) / (x_ceil - x_floor) * f(x_floor, y_floor)
// + (x - x_floor) / (x_ceil - x_floor) * f(x_ceil, y_floor)
const float value_yfloor =
- (x_ceil - x) * read_with_fill_value(batch, DenseIndex(y_floor),
- DenseIndex(x_floor), channel,
- fill_value) +
- (x - x_floor) * read_with_fill_value(batch, DenseIndex(y_floor),
- DenseIndex(x_ceil), channel,
- fill_value);
+ (x_ceil - x) * static_cast<float>(read_with_fill_value(
+ batch, DenseIndex(y_floor), DenseIndex(x_floor),
+ channel, fill_value)) +
+ (x - x_floor) * static_cast<float>(read_with_fill_value(
+ batch, DenseIndex(y_floor), DenseIndex(x_ceil),
+ channel, fill_value));
// f(x, y_ceil) = (x_ceil - x) / (x_ceil - x_floor) * f(x_floor, y_ceil)
// + (x - x_floor) / (x_ceil - x_floor) * f(x_ceil, y_ceil)
const float value_yceil =
- (x_ceil - x) * read_with_fill_value(batch, DenseIndex(y_ceil),
- DenseIndex(x_floor), channel,
- fill_value) +
- (x - x_floor) * read_with_fill_value(batch, DenseIndex(y_ceil),
- DenseIndex(x_ceil), channel,
- fill_value);
+ (x_ceil - x) * static_cast<float>(read_with_fill_value(
+ batch, DenseIndex(y_ceil), DenseIndex(x_floor),
+ channel, fill_value)) +
+ (x - x_floor) * static_cast<float>(read_with_fill_value(
+ batch, DenseIndex(y_ceil), DenseIndex(x_ceil),
+ channel, fill_value));
// f(x, y) = (y_ceil - y) / (y_ceil - y_floor) * f(x, y_floor)
// + (y - y_floor) / (y_ceil - y_floor) * f(x, y_ceil)
return T((y_ceil - y) * value_yfloor + (y - y_floor) * value_yceil);
diff --git a/tensorflow/contrib/image/ops/image_ops.cc b/tensorflow/contrib/image/ops/image_ops.cc
index ebdcaea7ab..e59f1bf844 100644
--- a/tensorflow/contrib/image/ops/image_ops.cc
+++ b/tensorflow/contrib/image/ops/image_ops.cc
@@ -29,7 +29,7 @@ using shape_inference::ShapeHandle;
REGISTER_OP("ImageProjectiveTransform")
.Input("images: dtype")
.Input("transforms: float32")
- .Attr("dtype: {uint8, int32, int64, float32, float64}")
+ .Attr("dtype: {uint8, int32, int64, float16, float32, float64}")
.Attr("interpolation: string")
.Output("transformed_images: dtype")
.SetShapeFn([](InferenceContext* c) {
diff --git a/tensorflow/contrib/image/python/kernel_tests/image_ops_test.py b/tensorflow/contrib/image/python/kernel_tests/image_ops_test.py
index b50177ae56..62a22dcf34 100644
--- a/tensorflow/contrib/image/python/kernel_tests/image_ops_test.py
+++ b/tensorflow/contrib/image/python/kernel_tests/image_ops_test.py
@@ -30,7 +30,8 @@ from tensorflow.python.ops import math_ops
from tensorflow.python.platform import googletest
_DTYPES = set(
- [dtypes.uint8, dtypes.int32, dtypes.int64, dtypes.float32, dtypes.float64])
+ [dtypes.uint8, dtypes.int32, dtypes.int64,
+ dtypes.float16, dtypes.float32, dtypes.float64])
class ImageOpsTest(test_util.TensorFlowTestCase):
@@ -127,6 +128,23 @@ class ImageOpsTest(test_util.TensorFlowTestCase):
[0, 1, 0, 1],
[0, 1, 1, 1]])
+ def test_extreme_projective_transform(self):
+ for dtype in _DTYPES:
+ with self.test_session():
+ image = constant_op.constant(
+ [[1, 0, 1, 0],
+ [0, 1, 0, 1],
+ [1, 0, 1, 0],
+ [0, 1, 0, 1]], dtype=dtype)
+ transformation = constant_op.constant([1, 0, 0, 0, 1, 0, -1, 0],
+ dtypes.float32)
+ image_transformed = image_ops.transform(image, transformation)
+ self.assertAllEqual(image_transformed.eval(),
+ [[1, 0, 0, 0],
+ [0, 0, 0, 0],
+ [1, 0, 0, 0],
+ [0, 0, 0, 0]])
+
def test_bilinear(self):
with self.test_session():
image = constant_op.constant(
diff --git a/tensorflow/contrib/image/python/ops/image_ops.py b/tensorflow/contrib/image/python/ops/image_ops.py
index cd984c8054..86b0ffe9a0 100644
--- a/tensorflow/contrib/image/python/ops/image_ops.py
+++ b/tensorflow/contrib/image/python/ops/image_ops.py
@@ -33,7 +33,8 @@ _image_ops_so = loader.load_op_library(
resource_loader.get_path_to_datafile("_image_ops.so"))
_IMAGE_DTYPES = set(
- [dtypes.uint8, dtypes.int32, dtypes.int64, dtypes.float32, dtypes.float64])
+ [dtypes.uint8, dtypes.int32, dtypes.int64,
+ dtypes.float16, dtypes.float32, dtypes.float64])
ops.RegisterShape("ImageConnectedComponents")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("ImageProjectiveTransform")(common_shapes.call_cpp_shape_fn)
diff --git a/tensorflow/contrib/kafka/ops/kafka_ops.cc b/tensorflow/contrib/kafka/ops/kafka_ops.cc
new file mode 100644
index 0000000000..8cdf16103b
--- /dev/null
+++ b/tensorflow/contrib/kafka/ops/kafka_ops.cc
@@ -0,0 +1,44 @@
+/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#include "tensorflow/core/framework/common_shape_fns.h"
+#include "tensorflow/core/framework/op.h"
+#include "tensorflow/core/framework/shape_inference.h"
+
+namespace tensorflow {
+
+REGISTER_OP("KafkaDataset")
+ .Input("topics: string")
+ .Input("servers: string")
+ .Input("group: string")
+ .Input("eof: bool")
+ .Input("timeout: int64")
+ .Output("handle: variant")
+ .SetIsStateful()
+ .SetShapeFn(shape_inference::ScalarShape)
+ .Doc(R"doc(
+Creates a dataset that emits the messages of one or more Kafka topics.
+
+topics: A `tf.string` tensor containing one or more subscriptions,
+ in the format of [topic:partition:offset:length],
+ by default length is -1 for unlimited.
+servers: A list of bootstrap servers.
+group: The consumer group id.
+eof: If True, the kafka reader will stop on EOF.
+timeout: The timeout value for the Kafka Consumer to wait
+ (in millisecond).
+)doc");
+
+} // namespace tensorflow
diff --git a/tensorflow/contrib/kinesis/BUILD b/tensorflow/contrib/kinesis/BUILD
new file mode 100644
index 0000000000..25443d0ad4
--- /dev/null
+++ b/tensorflow/contrib/kinesis/BUILD
@@ -0,0 +1,113 @@
+package(default_visibility = ["//tensorflow:internal"])
+
+licenses(["notice"]) # Apache 2.0
+
+exports_files(["LICENSE"])
+
+load(
+ "//tensorflow:tensorflow.bzl",
+ "tf_custom_op_library",
+ "tf_custom_op_py_library",
+ "tf_gen_op_libs",
+ "tf_gen_op_wrapper_py",
+ "tf_kernel_library",
+ "tf_py_test",
+)
+
+py_library(
+ name = "kinesis",
+ srcs = ["__init__.py"],
+ srcs_version = "PY2AND3",
+ deps = [
+ ":dataset_ops",
+ ],
+)
+
+tf_custom_op_library(
+ name = "_dataset_ops.so",
+ srcs = ["ops/dataset_ops.cc"],
+ deps = [":dataset_kernels"],
+)
+
+tf_gen_op_libs(
+ op_lib_names = ["dataset_ops"],
+)
+
+cc_library(
+ name = "dataset_kernels",
+ srcs = [
+ "kernels/kinesis_dataset_ops.cc",
+ ],
+ deps = [
+ "//tensorflow/core:framework_headers_lib",
+ "//tensorflow/core/platform/s3:aws_crypto",
+ "//third_party/eigen3",
+ "@aws",
+ "@protobuf_archive//:protobuf_headers",
+ ],
+ alwayslink = 1,
+)
+
+py_library(
+ name = "dataset_ops",
+ srcs = [
+ "python/ops/kinesis_dataset_ops.py",
+ ],
+ srcs_version = "PY2AND3",
+ deps = [
+ ":kinesis_op_loader",
+ "//tensorflow/python:dataset_ops_gen",
+ "//tensorflow/python:util",
+ "//tensorflow/python/data/ops:dataset_ops",
+ "//tensorflow/python/data/util:nest",
+ ],
+)
+
+tf_gen_op_wrapper_py(
+ name = "gen_dataset_ops",
+ out = "python/ops/gen_dataset_ops.py",
+ deps = ["//tensorflow/contrib/kinesis:dataset_ops_op_lib"],
+)
+
+tf_kernel_library(
+ name = "dataset_ops_kernels",
+ deps = [
+ ":dataset_kernels",
+ "//tensorflow/core:framework",
+ ],
+ alwayslink = 1,
+)
+
+tf_custom_op_py_library(
+ name = "kinesis_op_loader",
+ srcs = ["python/ops/kinesis_op_loader.py"],
+ dso = ["//tensorflow/contrib/kinesis:_dataset_ops.so"],
+ kernels = [
+ ":dataset_ops_kernels",
+ "//tensorflow/contrib/kinesis:dataset_ops_op_lib",
+ ],
+ srcs_version = "PY2AND3",
+ deps = [
+ ":gen_dataset_ops",
+ "//tensorflow/contrib/util:util_py",
+ "//tensorflow/python:platform",
+ ],
+)
+
+tf_py_test(
+ name = "kinesis_test",
+ srcs = ["python/kernel_tests/kinesis_test.py"],
+ additional_deps = [
+ ":kinesis",
+ "//third_party/py/numpy",
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:framework",
+ "//tensorflow/python:framework_test_lib",
+ "//tensorflow/python:platform_test",
+ ],
+ tags = [
+ "manual",
+ "no_windows",
+ "notap",
+ ],
+)
diff --git a/tensorflow/contrib/kinesis/__init__.py b/tensorflow/contrib/kinesis/__init__.py
new file mode 100644
index 0000000000..3824b8ae75
--- /dev/null
+++ b/tensorflow/contrib/kinesis/__init__.py
@@ -0,0 +1,32 @@
+# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""Kinesis Dataset.
+
+@@KinesisDataset
+"""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+from tensorflow.contrib.kinesis.python.ops.kinesis_dataset_ops import KinesisDataset
+
+from tensorflow.python.util.all_util import remove_undocumented
+
+_allowed_symbols = [
+ "KinesisDataset",
+]
+
+remove_undocumented(__name__)
diff --git a/tensorflow/contrib/kinesis/kernels/kinesis_dataset_ops.cc b/tensorflow/contrib/kinesis/kernels/kinesis_dataset_ops.cc
new file mode 100644
index 0000000000..3212279c4c
--- /dev/null
+++ b/tensorflow/contrib/kinesis/kernels/kinesis_dataset_ops.cc
@@ -0,0 +1,359 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#include <aws/core/Aws.h>
+#include <aws/core/config/AWSProfileConfigLoader.h>
+#include <aws/core/utils/Outcome.h>
+#include <aws/kinesis/KinesisClient.h>
+#include <aws/kinesis/model/DescribeStreamRequest.h>
+#include <aws/kinesis/model/GetRecordsRequest.h>
+#include <aws/kinesis/model/GetShardIteratorRequest.h>
+#include <aws/kinesis/model/PutRecordsRequest.h>
+#include <aws/kinesis/model/ShardIteratorType.h>
+#include "tensorflow/core/framework/dataset.h"
+#include "tensorflow/core/platform/s3/aws_crypto.h"
+
+namespace tensorflow {
+namespace {
+
+Aws::Client::ClientConfiguration* InitializeDefaultClientConfig() {
+ static Aws::Client::ClientConfiguration config;
+ const char* endpoint = getenv("KINESIS_ENDPOINT");
+ if (endpoint) {
+ config.endpointOverride = Aws::String(endpoint);
+ }
+ const char* region = getenv("AWS_REGION");
+ if (region) {
+ config.region = Aws::String(region);
+ } else {
+ // Load config file (e.g., ~/.aws/config) only if AWS_SDK_LOAD_CONFIG
+ // is set with a truthy value.
+ const char* load_config_env = getenv("AWS_SDK_LOAD_CONFIG");
+ string load_config =
+ load_config_env ? str_util::Lowercase(load_config_env) : "";
+ if (load_config == "true" || load_config == "1") {
+ Aws::String config_file;
+ // If AWS_CONFIG_FILE is set then use it, otherwise use ~/.aws/config.
+ const char* config_file_env = getenv("AWS_CONFIG_FILE");
+ if (config_file_env) {
+ config_file = config_file_env;
+ } else {
+ const char* home_env = getenv("HOME");
+ if (home_env) {
+ config_file = home_env;
+ config_file += "/.aws/config";
+ }
+ }
+ Aws::Config::AWSConfigFileProfileConfigLoader loader(config_file);
+ // Load the configuration. If successful, get the region.
+ // If the load is not successful, then generate a warning.
+ if (loader.Load()) {
+ auto profiles = loader.GetProfiles();
+ if (!profiles["default"].GetRegion().empty()) {
+ config.region = profiles["default"].GetRegion();
+ }
+ } else {
+ LOG(WARNING) << "Failed to load the profile in " << config_file << ".";
+ }
+ }
+ }
+ const char* use_https = getenv("KINESIS_USE_HTTPS");
+ if (use_https) {
+ if (use_https[0] == '0') {
+ config.scheme = Aws::Http::Scheme::HTTP;
+ } else {
+ config.scheme = Aws::Http::Scheme::HTTPS;
+ }
+ }
+ const char* verify_ssl = getenv("KINESIS_VERIFY_SSL");
+ if (verify_ssl) {
+ if (verify_ssl[0] == '0') {
+ config.verifySSL = false;
+ } else {
+ config.verifySSL = true;
+ }
+ }
+ const char* connect_timeout = getenv("KINESIS_CONNECT_TIMEOUT_MSEC");
+ if (connect_timeout) {
+ int64 timeout;
+
+ if (strings::safe_strto64(connect_timeout, &timeout)) {
+ config.connectTimeoutMs = timeout;
+ }
+ }
+ const char* request_timeout = getenv("KINESIS_REQUEST_TIMEOUT_MSEC");
+ if (request_timeout) {
+ int64 timeout;
+
+ if (strings::safe_strto64(request_timeout, &timeout)) {
+ config.requestTimeoutMs = timeout;
+ }
+ }
+
+ return &config;
+}
+
+Aws::Client::ClientConfiguration& GetDefaultClientConfig() {
+ static Aws::Client::ClientConfiguration* config =
+ InitializeDefaultClientConfig();
+ return *config;
+}
+
+static mutex mu(LINKER_INITIALIZED);
+static unsigned count(0);
+void AwsInitAPI() {
+ mutex_lock lock(mu);
+ count++;
+ if (count == 1) {
+ Aws::SDKOptions options;
+ options.cryptoOptions.sha256Factory_create_fn = []() {
+ return Aws::MakeShared<AWSSHA256Factory>(AWSCryptoAllocationTag);
+ };
+ options.cryptoOptions.sha256HMACFactory_create_fn = []() {
+ return Aws::MakeShared<AWSSHA256HmacFactory>(AWSCryptoAllocationTag);
+ };
+ Aws::InitAPI(options);
+ }
+}
+void AwsShutdownAPI() {
+ mutex_lock lock(mu);
+ count--;
+ if (count == 0) {
+ Aws::SDKOptions options;
+ Aws::ShutdownAPI(options);
+ }
+}
+void ShutdownClient(Aws::Kinesis::KinesisClient* client) {
+ if (client != nullptr) {
+ delete client;
+ AwsShutdownAPI();
+ }
+}
+}
+class KinesisDatasetOp : public DatasetOpKernel {
+ public:
+ using DatasetOpKernel::DatasetOpKernel;
+
+ void MakeDataset(OpKernelContext* ctx, DatasetBase** output) override {
+ std::string stream = "";
+ OP_REQUIRES_OK(ctx,
+ ParseScalarArgument<std::string>(ctx, "stream", &stream));
+ std::string shard = "";
+ OP_REQUIRES_OK(ctx, ParseScalarArgument<std::string>(ctx, "shard", &shard));
+ bool read_indefinitely = true;
+ OP_REQUIRES_OK(ctx, ParseScalarArgument<bool>(ctx, "read_indefinitely",
+ &read_indefinitely));
+ int64 interval = -1;
+ OP_REQUIRES_OK(ctx, ParseScalarArgument<int64>(ctx, "interval", &interval));
+ OP_REQUIRES(ctx, (interval > 0),
+ errors::InvalidArgument(
+ "Interval value should be large than 0, got ", interval));
+ *output = new Dataset(ctx, stream, shard, read_indefinitely, interval);
+ }
+
+ private:
+ class Dataset : public GraphDatasetBase {
+ public:
+ Dataset(OpKernelContext* ctx, const string& stream, const string& shard,
+ const bool read_indefinitely, const int64 interval)
+ : GraphDatasetBase(ctx),
+ stream_(stream),
+ shard_(shard),
+ read_indefinitely_(read_indefinitely),
+ interval_(interval) {}
+
+ std::unique_ptr<IteratorBase> MakeIteratorInternal(
+ const string& prefix) const override {
+ return std::unique_ptr<IteratorBase>(
+ new Iterator({this, strings::StrCat(prefix, "::Kinesis")}));
+ }
+
+ const DataTypeVector& output_dtypes() const override {
+ static DataTypeVector* dtypes = new DataTypeVector({DT_STRING});
+ return *dtypes;
+ }
+
+ const std::vector<PartialTensorShape>& output_shapes() const override {
+ static std::vector<PartialTensorShape>* shapes =
+ new std::vector<PartialTensorShape>({{}});
+ return *shapes;
+ }
+
+ string DebugString() const override { return "KinesisDatasetOp::Dataset"; }
+
+ protected:
+ Status AsGraphDefInternal(DatasetGraphDefBuilder* b,
+ Node** output) const override {
+ Node* stream = nullptr;
+ TF_RETURN_IF_ERROR(b->AddScalar(stream_, &stream));
+ Node* shard = nullptr;
+ TF_RETURN_IF_ERROR(b->AddScalar(shard_, &shard));
+ Node* read_indefinitely = nullptr;
+ TF_RETURN_IF_ERROR(b->AddScalar(read_indefinitely_, &read_indefinitely));
+ Node* interval = nullptr;
+ TF_RETURN_IF_ERROR(b->AddScalar(interval_, &interval));
+ TF_RETURN_IF_ERROR(b->AddDataset(
+ this, {stream, shard, read_indefinitely, interval}, output));
+ return Status::OK();
+ }
+
+ private:
+ class Iterator : public DatasetIterator<Dataset> {
+ public:
+ explicit Iterator(const Params& params)
+ : DatasetIterator<Dataset>(params),
+ client_(nullptr, ShutdownClient) {}
+
+ Status GetNextInternal(IteratorContext* ctx,
+ std::vector<Tensor>* out_tensors,
+ bool* end_of_sequence) override {
+ mutex_lock l(mu_);
+ if (iterator_ == "") {
+ TF_RETURN_IF_ERROR(SetupStreamsLocked());
+ }
+ do {
+ Aws::Kinesis::Model::GetRecordsRequest request;
+ auto outcome = client_->GetRecords(
+ request.WithShardIterator(iterator_).WithLimit(1));
+ if (!outcome.IsSuccess()) {
+ return errors::Unknown(outcome.GetError().GetExceptionName(), ": ",
+ outcome.GetError().GetMessage());
+ }
+ if (outcome.GetResult().GetRecords().size() == 0) {
+ // If no records were returned then nothing is available at the
+ // moment.
+ if (!dataset()->read_indefinitely_) {
+ *end_of_sequence = true;
+ return Status::OK();
+ }
+ // Continue the loop after a period of time.
+ ctx->env()->SleepForMicroseconds(dataset()->interval_);
+ continue;
+ }
+ if (outcome.GetResult().GetRecords().size() != 1) {
+ return errors::Unknown("invalid number of records ",
+ outcome.GetResult().GetRecords().size(),
+ " returned");
+ }
+
+ iterator_ = outcome.GetResult().GetNextShardIterator();
+
+ const auto& data = outcome.GetResult().GetRecords()[0].GetData();
+ StringPiece value(
+ reinterpret_cast<const char*>(data.GetUnderlyingData()),
+ data.GetLength());
+ Tensor value_tensor(ctx->allocator({}), DT_STRING, {});
+ value_tensor.scalar<std::string>()() = std::string(value);
+ out_tensors->emplace_back(std::move(value_tensor));
+
+ *end_of_sequence = false;
+ return Status::OK();
+ } while (true);
+ }
+
+ protected:
+ Status SaveInternal(IteratorStateWriter* writer) override {
+ return errors::Unimplemented("SaveInternal is currently not supported");
+ }
+
+ Status RestoreInternal(IteratorContext* ctx,
+ IteratorStateReader* reader) override {
+ return errors::Unimplemented(
+ "RestoreInternal is currently not supported");
+ }
+
+ private:
+ // Sets up Kinesis streams to read from.
+ Status SetupStreamsLocked() EXCLUSIVE_LOCKS_REQUIRED(mu_) {
+ AwsInitAPI();
+ client_.reset(
+ new Aws::Kinesis::KinesisClient(GetDefaultClientConfig()));
+
+ Aws::Kinesis::Model::DescribeStreamRequest request;
+ auto outcome = client_->DescribeStream(
+ request.WithStreamName(dataset()->stream_.c_str()));
+ if (!outcome.IsSuccess()) {
+ return errors::Unknown(outcome.GetError().GetExceptionName(), ": ",
+ outcome.GetError().GetMessage());
+ }
+ Aws::String shard;
+ Aws::String sequence;
+ if (dataset()->shard_ == "") {
+ if (outcome.GetResult().GetStreamDescription().GetShards().size() !=
+ 1) {
+ return errors::InvalidArgument(
+ "shard has to be provided unless the stream only have one "
+ "shard, there are ",
+ outcome.GetResult().GetStreamDescription().GetShards().size(),
+ " shards in stream ", dataset()->stream_);
+ }
+ shard = outcome.GetResult()
+ .GetStreamDescription()
+ .GetShards()[0]
+ .GetShardId();
+ sequence = outcome.GetResult()
+ .GetStreamDescription()
+ .GetShards()[0]
+ .GetSequenceNumberRange()
+ .GetStartingSequenceNumber();
+ } else {
+ for (const auto& entry :
+ outcome.GetResult().GetStreamDescription().GetShards()) {
+ if (entry.GetShardId() == dataset()->shard_.c_str()) {
+ shard = entry.GetShardId();
+ sequence =
+ entry.GetSequenceNumberRange().GetStartingSequenceNumber();
+ break;
+ }
+ }
+ if (shard == "") {
+ return errors::InvalidArgument("no shard ", dataset()->shard_,
+ " in stream ", dataset()->stream_);
+ }
+ }
+
+ Aws::Kinesis::Model::GetShardIteratorRequest iterator_request;
+ auto iterator_outcome = client_->GetShardIterator(
+ iterator_request.WithStreamName(dataset()->stream_.c_str())
+ .WithShardId(shard)
+ .WithShardIteratorType(
+ Aws::Kinesis::Model::ShardIteratorType::AT_SEQUENCE_NUMBER)
+ .WithStartingSequenceNumber(sequence));
+ if (!iterator_outcome.IsSuccess()) {
+ return errors::Unknown(iterator_outcome.GetError().GetExceptionName(),
+ ": ",
+ iterator_outcome.GetError().GetMessage());
+ }
+ iterator_ = iterator_outcome.GetResult().GetShardIterator();
+ return Status::OK();
+ }
+
+ mutex mu_;
+ Aws::String iterator_ GUARDED_BY(mu_);
+ std::unique_ptr<Aws::Kinesis::KinesisClient, decltype(&ShutdownClient)>
+ client_ GUARDED_BY(mu_);
+ };
+
+ const std::string stream_;
+ const std::string shard_;
+ const bool read_indefinitely_;
+ const int64 interval_;
+ };
+};
+
+REGISTER_KERNEL_BUILDER(Name("KinesisDataset").Device(DEVICE_CPU),
+ KinesisDatasetOp);
+
+} // namespace tensorflow
diff --git a/tensorflow/contrib/kinesis/ops/dataset_ops.cc b/tensorflow/contrib/kinesis/ops/dataset_ops.cc
new file mode 100644
index 0000000000..54204513cf
--- /dev/null
+++ b/tensorflow/contrib/kinesis/ops/dataset_ops.cc
@@ -0,0 +1,42 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#include "tensorflow/core/framework/common_shape_fns.h"
+#include "tensorflow/core/framework/op.h"
+#include "tensorflow/core/framework/shape_inference.h"
+
+namespace tensorflow {
+
+REGISTER_OP("KinesisDataset")
+ .Input("stream: string")
+ .Input("shard: string")
+ .Input("read_indefinitely: bool")
+ .Input("interval: int64")
+ .Output("handle: variant")
+ .SetIsStateful()
+ .SetShapeFn(shape_inference::ScalarShape)
+ .Doc(R"doc(
+Creates a dataset that emits the messages of one or more Kinesis topics.
+
+stream: A `tf.string` tensor containing the name of the stream.
+shard: A `tf.string` tensor containing the id of the shard.
+read_indefinitely: If `True`, the Kinesis dataset will keep retry
+ again on `EOF` after the `interval` period. If `False`, then
+ the dataset will stop on `EOF`. The default value is `True`.
+interval: The interval for the Kinesis Client to wait before
+ it tries to get records again (in millisecond).
+)doc");
+
+} // namespace tensorflow
diff --git a/tensorflow/contrib/kinesis/python/kernel_tests/kinesis_test.py b/tensorflow/contrib/kinesis/python/kernel_tests/kinesis_test.py
new file mode 100644
index 0000000000..7289b45c50
--- /dev/null
+++ b/tensorflow/contrib/kinesis/python/kernel_tests/kinesis_test.py
@@ -0,0 +1,139 @@
+# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+# ==============================================================================
+"""Tests for KinesisDataset.
+NOTE: boto3 is needed and the test has to be invoked manually:
+```
+$ bazel test -s --verbose_failures --config=opt \
+ --action_env=AWS_ACCESS_KEY_ID=XXXXXX \
+ --action_env=AWS_SECRET_ACCESS_KEY=XXXXXX \
+ //tensorflow/contrib/kinesis:kinesis_test
+```
+"""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import boto3
+
+from tensorflow.contrib.kinesis.python.ops import kinesis_dataset_ops
+from tensorflow.python.data.ops import iterator_ops
+from tensorflow.python.framework import dtypes
+from tensorflow.python.framework import errors
+from tensorflow.python.ops import array_ops
+from tensorflow.python.platform import test
+
+
+class KinesisDatasetTest(test.TestCase):
+
+ def testKinesisDatasetOneShard(self):
+ client = boto3.client('kinesis', region_name='us-east-1')
+
+ # Setup the Kinesis with 1 shard.
+ stream_name = "tf_kinesis_test_1"
+ client.create_stream(StreamName=stream_name, ShardCount=1)
+ # Wait until stream exists, default is 10 * 18 seconds.
+ client.get_waiter('stream_exists').wait(StreamName=stream_name)
+ for i in range(10):
+ data = "D" + str(i)
+ client.put_record(
+ StreamName=stream_name, Data=data, PartitionKey="TensorFlow" + str(i))
+
+ stream = array_ops.placeholder(dtypes.string, shape=[])
+ num_epochs = array_ops.placeholder(dtypes.int64, shape=[])
+ batch_size = array_ops.placeholder(dtypes.int64, shape=[])
+
+ repeat_dataset = kinesis_dataset_ops.KinesisDataset(
+ stream, read_indefinitely=False).repeat(num_epochs)
+ batch_dataset = repeat_dataset.batch(batch_size)
+
+ iterator = iterator_ops.Iterator.from_structure(batch_dataset.output_types)
+ init_op = iterator.make_initializer(repeat_dataset)
+ init_batch_op = iterator.make_initializer(batch_dataset)
+ get_next = iterator.get_next()
+
+ with self.test_session() as sess:
+ # Basic test: read from shard 0 of stream 1.
+ sess.run(init_op, feed_dict={stream: stream_name, num_epochs: 1})
+ for i in range(10):
+ self.assertEqual("D" + str(i), sess.run(get_next))
+ with self.assertRaises(errors.OutOfRangeError):
+ sess.run(get_next)
+
+ client.delete_stream(StreamName=stream_name)
+ # Wait until stream deleted, default is 10 * 18 seconds.
+ client.get_waiter('stream_not_exists').wait(StreamName=stream_name)
+
+ def testKinesisDatasetTwoShards(self):
+ client = boto3.client('kinesis', region_name='us-east-1')
+
+ # Setup the Kinesis with 2 shards.
+ stream_name = "tf_kinesis_test_2"
+ client.create_stream(StreamName=stream_name, ShardCount=2)
+ # Wait until stream exists, default is 10 * 18 seconds.
+ client.get_waiter('stream_exists').wait(StreamName=stream_name)
+
+ for i in range(10):
+ data = "D" + str(i)
+ client.put_record(
+ StreamName=stream_name, Data=data, PartitionKey="TensorFlow" + str(i))
+ response = client.describe_stream(StreamName=stream_name)
+ shard_id_0 = response["StreamDescription"]["Shards"][0]["ShardId"]
+ shard_id_1 = response["StreamDescription"]["Shards"][1]["ShardId"]
+
+ stream = array_ops.placeholder(dtypes.string, shape=[])
+ shard = array_ops.placeholder(dtypes.string, shape=[])
+ num_epochs = array_ops.placeholder(dtypes.int64, shape=[])
+ batch_size = array_ops.placeholder(dtypes.int64, shape=[])
+
+ repeat_dataset = kinesis_dataset_ops.KinesisDataset(
+ stream, shard, read_indefinitely=False).repeat(num_epochs)
+ batch_dataset = repeat_dataset.batch(batch_size)
+
+ iterator = iterator_ops.Iterator.from_structure(batch_dataset.output_types)
+ init_op = iterator.make_initializer(repeat_dataset)
+ init_batch_op = iterator.make_initializer(batch_dataset)
+ get_next = iterator.get_next()
+
+ data = list()
+ with self.test_session() as sess:
+ # Basic test: read from shard 0 of stream 2.
+ sess.run(
+ init_op, feed_dict={
+ stream: stream_name, shard: shard_id_0, num_epochs: 1})
+ with self.assertRaises(errors.OutOfRangeError):
+ # Use range(11) to guarantee the OutOfRangeError.
+ for i in range(11):
+ data.append(sess.run(get_next))
+
+ # Basic test: read from shard 1 of stream 2.
+ sess.run(
+ init_op, feed_dict={
+ stream: stream_name, shard: shard_id_1, num_epochs: 1})
+ with self.assertRaises(errors.OutOfRangeError):
+ # Use range(11) to guarantee the OutOfRangeError.
+ for i in range(11):
+ data.append(sess.run(get_next))
+
+ data.sort()
+ self.assertEqual(data, ["D" + str(i) for i in range(10)])
+
+ client.delete_stream(StreamName=stream_name)
+ # Wait until stream deleted, default is 10 * 18 seconds.
+ client.get_waiter('stream_not_exists').wait(StreamName=stream_name)
+
+
+if __name__ == "__main__":
+ test.main()
diff --git a/tensorflow/contrib/kinesis/python/ops/kinesis_dataset_ops.py b/tensorflow/contrib/kinesis/python/ops/kinesis_dataset_ops.py
new file mode 100644
index 0000000000..ca2df95ba4
--- /dev/null
+++ b/tensorflow/contrib/kinesis/python/ops/kinesis_dataset_ops.py
@@ -0,0 +1,96 @@
+# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""Kinesis Dataset."""
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+from tensorflow.contrib.kinesis.python.ops import kinesis_op_loader # pylint: disable=unused-import
+from tensorflow.contrib.kinesis.python.ops import gen_dataset_ops
+from tensorflow.python.data.ops.dataset_ops import Dataset
+from tensorflow.python.framework import dtypes
+from tensorflow.python.framework import ops
+from tensorflow.python.framework import tensor_shape
+
+
+class KinesisDataset(Dataset):
+ """A Kinesis Dataset that consumes the message.
+
+ Kinesis is a managed service provided by AWS for data streaming.
+ This dataset reads messages from Kinesis with each message presented
+ as a `tf.string`.
+
+ For example, we can construct and use the KinesisDataset as follows:
+ ```python
+ dataset = tf.contrib.kinesis.KinesisDataset(
+ "kinesis_stream_name", read_indefinitely=False)
+ next = dataset.make_one_shot_iterator().get_next()
+ with tf.Session() as sess:
+ while True:
+ try:
+ print(sess.run(nxt))
+ except tf.errors.OutOfRangeError:
+ break
+ ```
+
+ Since Kinesis is a data streaming service, data may not be available
+ at the time it is being read. The argument `read_indefinitely` is
+ used to control the behavior in this situation. If `read_indefinitely`
+ is `True`, then `KinesisDataset` will keep retrying to retrieve data
+ from the stream. If `read_indefinitely` is `False`, an `OutOfRangeError`
+ is returned immediately instead.
+ """
+
+ def __init__(self,
+ stream,
+ shard="",
+ read_indefinitely=True,
+ interval=100000):
+ """Create a KinesisDataset.
+
+ Args:
+ stream: A `tf.string` tensor containing the name of the stream.
+ shard: A `tf.string` tensor containing the id of the shard.
+ read_indefinitely: If `True`, the Kinesis dataset will keep retry
+ again on `EOF` after the `interval` period. If `False`, then
+ the dataset will stop on `EOF`. The default value is `True`.
+ interval: The interval for the Kinesis Client to wait before
+ it tries to get records again (in millisecond).
+ """
+ super(KinesisDataset, self).__init__()
+ self._stream = ops.convert_to_tensor(
+ stream, dtype=dtypes.string, name="stream")
+ self._shard = ops.convert_to_tensor(
+ shard, dtype=dtypes.string, name="shard")
+ self._read_indefinitely = ops.convert_to_tensor(
+ read_indefinitely, dtype=dtypes.bool, name="read_indefinitely")
+ self._interval = ops.convert_to_tensor(
+ interval, dtype=dtypes.int64, name="interval")
+
+ def _as_variant_tensor(self):
+ return gen_dataset_ops.kinesis_dataset(
+ self._stream, self._shard, self._read_indefinitely, self._interval)
+
+ @property
+ def output_classes(self):
+ return ops.Tensor
+
+ @property
+ def output_shapes(self):
+ return tensor_shape.scalar()
+
+ @property
+ def output_types(self):
+ return dtypes.string
diff --git a/tensorflow/contrib/kinesis/python/ops/kinesis_op_loader.py b/tensorflow/contrib/kinesis/python/ops/kinesis_op_loader.py
new file mode 100644
index 0000000000..c9ce9f3646
--- /dev/null
+++ b/tensorflow/contrib/kinesis/python/ops/kinesis_op_loader.py
@@ -0,0 +1,24 @@
+# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""Python helper for loading kinesis ops and kernels."""
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+from tensorflow.contrib.util import loader
+from tensorflow.python.platform import resource_loader
+
+_dataset_ops = loader.load_op_library(
+ resource_loader.get_path_to_datafile("../../_dataset_ops.so"))
diff --git a/tensorflow/contrib/layers/python/layers/embedding_ops_test.py b/tensorflow/contrib/layers/python/layers/embedding_ops_test.py
index dd2395f8c9..7ede193029 100644
--- a/tensorflow/contrib/layers/python/layers/embedding_ops_test.py
+++ b/tensorflow/contrib/layers/python/layers/embedding_ops_test.py
@@ -21,7 +21,6 @@ from __future__ import print_function
import itertools
import math
-import sys
import numpy as np
diff --git a/tensorflow/contrib/layers/python/layers/rev_block_lib.py b/tensorflow/contrib/layers/python/layers/rev_block_lib.py
index 0e35b1aa8b..dad3da3748 100644
--- a/tensorflow/contrib/layers/python/layers/rev_block_lib.py
+++ b/tensorflow/contrib/layers/python/layers/rev_block_lib.py
@@ -514,15 +514,15 @@ def _recompute_grad(fn, args, use_data_dep=_USE_DEFAULT, tupleize_grads=False):
original_vars = set(tape.watched_variables())
# Backward pass
- def grad_fn(*output_grads, **kwargs):
+ def _grad_fn(output_grads, variables=None):
"""Recompute outputs for gradient computation."""
- variables = []
+ variables = variables or []
if original_vars:
- variables = kwargs["variables"]
- if set(variables) != original_vars:
- raise ValueError(_WRONG_VARS_ERR)
- del kwargs
- inputs = list(args)
+ assert variables, ("Fn created variables but the variables were not "
+ "passed to the gradient fn.")
+ if set(variables) != original_vars:
+ raise ValueError(_WRONG_VARS_ERR)
+ inputs = [array_ops.identity(x) for x in list(args)]
# Recompute outputs
with framework_ops.control_dependencies(output_grads):
if use_data_dep_:
@@ -538,7 +538,7 @@ def _recompute_grad(fn, args, use_data_dep=_USE_DEFAULT, tupleize_grads=False):
if original_vars != recompute_vars:
raise ValueError(_WRONG_VARS_ERR)
- if not (isinstance(outputs, list) or isinstance(outputs, tuple)):
+ if not isinstance(outputs, (list, tuple)):
outputs = [outputs]
outputs = list(outputs)
grads = gradients_impl.gradients(outputs, inputs + variables,
@@ -554,6 +554,16 @@ def _recompute_grad(fn, args, use_data_dep=_USE_DEFAULT, tupleize_grads=False):
grad_vars = grads[len(inputs):]
return grad_inputs, grad_vars
+ # custom_gradient inspects the signature of the function to determine
+ # whether the user expects variables passed in the grad_fn. If the function
+ # created variables, the grad_fn should accept the "variables" kwarg.
+ if original_vars:
+ def grad_fn(*output_grads, **kwargs):
+ return _grad_fn(output_grads, kwargs["variables"])
+ else:
+ def grad_fn(*output_grads):
+ return _grad_fn(output_grads)
+
return outputs, grad_fn
return fn_with_recompute(*args)
diff --git a/tensorflow/contrib/layers/python/layers/rev_block_lib_test.py b/tensorflow/contrib/layers/python/layers/rev_block_lib_test.py
index bc09ba8d43..d5971fb9d8 100644
--- a/tensorflow/contrib/layers/python/layers/rev_block_lib_test.py
+++ b/tensorflow/contrib/layers/python/layers/rev_block_lib_test.py
@@ -372,6 +372,26 @@ class RecomputeTest(test.TestCase):
self.assertEqual(2, len(update_ops))
self.assertEqual([False, True], kwarg_values)
+ def testWithoutVariables(self):
+
+ def concat_n(layer_list, num_inputs):
+ return math_ops.reduce_sum(
+ array_ops.concat([x for x in layer_list[-num_inputs:]], axis=-1),
+ axis=1, keepdims=True)
+
+ @rev_block_lib.recompute_grad
+ def concat_n_wrap(*args):
+ return concat_n(args, 3)
+
+ # DenseNet-style layers
+ layer_list = [random_ops.random_uniform((4, 8))]
+ for _ in range(5):
+ layer_list.append(math_ops.sqrt(concat_n_wrap(*layer_list)))
+
+ grads = gradients_impl.gradients(layer_list[-1], layer_list[0])
+ with self.test_session() as sess:
+ sess.run(grads)
+
if __name__ == "__main__":
test.main()
diff --git a/tensorflow/contrib/learn/python/learn/estimators/run_config.py b/tensorflow/contrib/learn/python/learn/estimators/run_config.py
index 14ee2ba609..7cb87619d9 100644
--- a/tensorflow/contrib/learn/python/learn/estimators/run_config.py
+++ b/tensorflow/contrib/learn/python/learn/estimators/run_config.py
@@ -240,6 +240,7 @@ class RunConfig(ClusterConfig, core_run_config.RunConfig):
keep_checkpoint_max=5,
keep_checkpoint_every_n_hours=10000,
log_step_count_steps=100,
+ protocol=None,
evaluation_master='',
model_dir=None,
session_config=None):
@@ -289,6 +290,8 @@ class RunConfig(ClusterConfig, core_run_config.RunConfig):
session_config: a ConfigProto used to set session parameters, or None.
Note - using this argument, it is easy to provide settings which break
otherwise perfectly good models. Use with care.
+ protocol: An optional argument which specifies the protocol used when
+ starting server. None means default to grpc.
"""
# Neither parent class calls super().__init__(), so here we have to
# manually call their __init__() methods.
@@ -313,6 +316,7 @@ class RunConfig(ClusterConfig, core_run_config.RunConfig):
self._save_summary_steps = save_summary_steps
self._save_checkpoints_secs = save_checkpoints_secs
self._log_step_count_steps = log_step_count_steps
+ self._protocol = protocol
self._session_config = session_config
if save_checkpoints_secs == RunConfig._USE_DEFAULT:
if save_checkpoints_steps is None:
diff --git a/tensorflow/contrib/legacy_seq2seq/python/ops/seq2seq.py b/tensorflow/contrib/legacy_seq2seq/python/ops/seq2seq.py
index 5e7b422e3c..e742447208 100644
--- a/tensorflow/contrib/legacy_seq2seq/python/ops/seq2seq.py
+++ b/tensorflow/contrib/legacy_seq2seq/python/ops/seq2seq.py
@@ -625,11 +625,13 @@ def attention_decoder(decoder_inputs,
v = []
attention_vec_size = attn_size # Size of query vectors for attention.
for a in xrange(num_heads):
- k = variable_scope.get_variable("AttnW_%d" % a,
- [1, 1, attn_size, attention_vec_size])
+ k = variable_scope.get_variable(
+ "AttnW_%d" % a, [1, 1, attn_size, attention_vec_size],
+ dtype=dtype)
hidden_features.append(nn_ops.conv2d(hidden, k, [1, 1, 1, 1], "SAME"))
v.append(
- variable_scope.get_variable("AttnV_%d" % a, [attention_vec_size]))
+ variable_scope.get_variable(
+ "AttnV_%d" % a, [attention_vec_size], dtype=dtype))
state = initial_state
@@ -647,11 +649,13 @@ def attention_decoder(decoder_inputs,
with variable_scope.variable_scope("Attention_%d" % a):
y = Linear(query, attention_vec_size, True)(query)
y = array_ops.reshape(y, [-1, 1, 1, attention_vec_size])
+ y = math_ops.cast(y, dtype)
# Attention mask is a softmax of v^T * tanh(...).
s = math_ops.reduce_sum(v[a] * math_ops.tanh(hidden_features[a] + y),
[2, 3])
- a = nn_ops.softmax(s)
+ a = nn_ops.softmax(math_ops.cast(s, dtype=dtypes.float32))
# Now calculate the attention-weighted vector d.
+ a = math_ops.cast(a, dtype)
d = math_ops.reduce_sum(
array_ops.reshape(a, [-1, attn_length, 1, 1]) * hidden, [1, 2])
ds.append(array_ops.reshape(d, [-1, attn_size]))
@@ -681,6 +685,7 @@ def attention_decoder(decoder_inputs,
raise ValueError("Could not infer input size from input: %s" % inp.name)
inputs = [inp] + attns
+ inputs = [math_ops.cast(e, dtype) for e in inputs]
x = Linear(inputs, input_size, True)(inputs)
# Run the RNN.
cell_output, state = cell(x, state)
@@ -693,6 +698,7 @@ def attention_decoder(decoder_inputs,
attns = attention(state)
with variable_scope.variable_scope("AttnOutputProjection"):
+ cell_output = math_ops.cast(cell_output, dtype)
inputs = [cell_output] + attns
output = Linear(inputs, output_size, True)(inputs)
if loop_function is not None:
diff --git a/tensorflow/contrib/linear_optimizer/BUILD b/tensorflow/contrib/linear_optimizer/BUILD
index 5b89c6cef9..fe0ba19fcb 100644
--- a/tensorflow/contrib/linear_optimizer/BUILD
+++ b/tensorflow/contrib/linear_optimizer/BUILD
@@ -41,6 +41,7 @@ py_test(
size = "medium",
srcs = ["python/kernel_tests/sdca_ops_test.py"],
srcs_version = "PY2AND3",
+ tags = ["no_windows_gpu"],
deps = [
":sdca_ops_py",
":sparse_feature_column_py",
diff --git a/tensorflow/contrib/lite/BUILD b/tensorflow/contrib/lite/BUILD
index 8c17c65fcc..b95d4d0fce 100644
--- a/tensorflow/contrib/lite/BUILD
+++ b/tensorflow/contrib/lite/BUILD
@@ -128,6 +128,7 @@ cc_library(
hdrs = [
"allocation.h",
"context.h",
+ "context_util.h",
"error_reporter.h",
"graph_info.h",
"interpreter.h",
@@ -145,6 +146,7 @@ cc_library(
":memory_planner",
":schema_fbs_version",
":simple_memory_arena",
+ ":string",
":util",
"//tensorflow/contrib/lite/kernels:eigen_support",
"//tensorflow/contrib/lite/kernels:gemm_support",
diff --git a/tensorflow/contrib/lite/Makefile b/tensorflow/contrib/lite/Makefile
index 2b6997146e..a616138d33 100644
--- a/tensorflow/contrib/lite/Makefile
+++ b/tensorflow/contrib/lite/Makefile
@@ -17,7 +17,29 @@ else
endif
endif
-ARCH := $(shell if [[ $(shell uname -m) =~ i[345678]86 ]]; then echo x86_32; else echo $(shell uname -m); fi)
+HOST_ARCH := $(shell if [[ $(shell uname -m) =~ i[345678]86 ]]; then echo x86_32; else echo $(shell uname -m); fi)
+
+# Self-hosting
+TARGET_ARCH := ${HOST_ARCH}
+
+# Cross compiling
+ifeq ($(CROSS),rpi)
+ TARGET_ARCH := armv7l
+ TARGET_TOOLCHAIN_PREFIX := arm-linux-gnueabihf-
+endif
+
+ifeq ($(CROSS),riscv)
+ TARGET_ARCH := riscv
+ TARGET_TOOLCHAIN_PREFIX := riscv32-unknown-elf-
+endif
+ifeq ($(CROSS),stm32f7)
+ TARGET_ARCH := armf7
+ TARGET_TOOLCHAIN_PREFIX := arm-none-eabi-
+endif
+ifeq ($(CROSS),stm32f1)
+ TARGET_ARCH := armm1
+ TARGET_TOOLCHAIN_PREFIX := arm-none-eabi-
+endif
# Where compiled objects are stored.
OBJDIR := $(MAKEFILE_DIR)/gen/obj/
@@ -25,11 +47,46 @@ BINDIR := $(MAKEFILE_DIR)/gen/bin/
LIBDIR := $(MAKEFILE_DIR)/gen/lib/
GENDIR := $(MAKEFILE_DIR)/gen/obj/
+LIBS :=
+ifeq ($(TARGET_ARCH),x86_64)
+ CXXFLAGS += -fPIC -DGEMMLOWP_ALLOW_SLOW_SCALAR_FALLBACK -pthread # -msse4.2
+endif
+
+ifeq ($(TARGET_ARCH),armv7l)
+ CXXFLAGS += -mfpu=neon -pthread -fPIC
+ LIBS += -ldl
+endif
+
+ifeq ($(TARGET_ARCH),riscv)
+# CXXFLAGS += -march=gap8
+ CXXFLAGS += -DTFLITE_MCU
+ LIBS += -ldl
+ BUILD_TYPE := micro
+endif
+
+ifeq ($(TARGET_ARCH),armf7)
+ CXXFLAGS += -DGEMMLOWP_ALLOW_SLOW_SCALAR_FALLBACK -DTFLITE_MCU
+ CXXFLAGS += -fno-rtti -fmessage-length=0 -fno-exceptions -fno-builtin -ffunction-sections -fdata-sections
+ CXXFLAGS += -funsigned-char -MMD
+ CXXFLAGS += -mcpu=cortex-m7 -mthumb -mfpu=fpv5-sp-d16 -mfloat-abi=softfp
+ CXXFLAGS += '-std=gnu++11' '-fno-rtti' '-Wvla' '-c' '-Wall' '-Wextra' '-Wno-unused-parameter' '-Wno-missing-field-initializers' '-fmessage-length=0' '-fno-exceptions' '-fno-builtin' '-ffunction-sections' '-fdata-sections' '-funsigned-char' '-MMD' '-fno-delete-null-pointer-checks' '-fomit-frame-pointer' '-Os'
+ LIBS += -ldl
+ BUILD_TYPE := micro
+endif
+ifeq ($(TARGET_ARCH),armm1)
+ CXXFLAGS += -DGEMMLOWP_ALLOW_SLOW_SCALAR_FALLBACK -mcpu=cortex-m1 -mthumb -DTFLITE_MCU
+ CXXFLAGS += -fno-rtti -fmessage-length=0 -fno-exceptions -fno-builtin -ffunction-sections -fdata-sections
+ CXXFLAGS += -funsigned-char -MMD
+ LIBS += -ldl
+endif
+
# Settings for the host compiler.
-CXX := $(CC_PREFIX)gcc
-CXXFLAGS := --std=c++11 -O3 -DNDEBUG
-CC := $(CC_PREFIX)gcc
-CCFLAGS := -O3 -DNDEBUG
+CXX := $(CC_PREFIX) ${TARGET_TOOLCHAIN_PREFIX}g++
+CXXFLAGS += --std=c++11 -O3 -DNDEBUG
+CCFLAGS := ${CXXFLAGS}
+CC := $(CC_PREFIX) ${TARGET_TOOLCHAIN_PREFIX}gcc
+AR := $(CC_PREFIX) ${TARGET_TOOLCHAIN_PREFIX}ar
+CFLAGS :=
LDOPTS :=
LDOPTS += -L/usr/local/lib
ARFLAGS := -r
@@ -48,7 +105,7 @@ INCLUDES := \
# override local versions in the source tree.
INCLUDES += -I/usr/local/include
-LIBS := \
+LIBS += \
-lstdc++ \
-lpthread \
-lm \
@@ -92,18 +149,21 @@ PROFILE_SUMMARIZER_SRCS := \
CORE_CC_ALL_SRCS := \
$(wildcard tensorflow/contrib/lite/*.cc) \
+$(wildcard tensorflow/contrib/lite/*.c)
+ifneq ($(BUILD_TYPE),micro)
+CORE_CC_ALL_SRCS += \
$(wildcard tensorflow/contrib/lite/kernels/*.cc) \
$(wildcard tensorflow/contrib/lite/kernels/internal/*.cc) \
$(wildcard tensorflow/contrib/lite/kernels/internal/optimized/*.cc) \
$(wildcard tensorflow/contrib/lite/kernels/internal/reference/*.cc) \
$(PROFILER_SRCS) \
-$(wildcard tensorflow/contrib/lite/*.c) \
$(wildcard tensorflow/contrib/lite/kernels/*.c) \
$(wildcard tensorflow/contrib/lite/kernels/internal/*.c) \
$(wildcard tensorflow/contrib/lite/kernels/internal/optimized/*.c) \
$(wildcard tensorflow/contrib/lite/kernels/internal/reference/*.c) \
$(wildcard tensorflow/contrib/lite/downloads/farmhash/src/farmhash.cc) \
$(wildcard tensorflow/contrib/lite/downloads/fft2d/fftsg.c)
+endif
# Remove any duplicates.
CORE_CC_ALL_SRCS := $(sort $(CORE_CC_ALL_SRCS))
CORE_CC_EXCLUDE_SRCS := \
@@ -113,6 +173,11 @@ $(wildcard tensorflow/contrib/lite/*/*/*test.cc) \
$(wildcard tensorflow/contrib/lite/*/*/*/*test.cc) \
$(wildcard tensorflow/contrib/lite/kernels/test_util.cc) \
$(MINIMAL_SRCS)
+ifeq ($(BUILD_TYPE),micro)
+CORE_CC_EXCLUDE_SRCS += \
+tensorflow/contrib/lite/model.cc \
+tensorflow/contrib/lite/nnapi_delegate.cc
+endif
# Filter out all the excluded files.
TF_LITE_CC_SRCS := $(filter-out $(CORE_CC_EXCLUDE_SRCS), $(CORE_CC_ALL_SRCS))
# File names of the intermediate files target compilation generates.
@@ -120,7 +185,6 @@ TF_LITE_CC_OBJS := $(addprefix $(OBJDIR), \
$(patsubst %.cc,%.o,$(patsubst %.c,%.o,$(TF_LITE_CC_SRCS))))
LIB_OBJS := $(TF_LITE_CC_OBJS)
-
# Benchmark sources
BENCHMARK_SRCS_DIR := tensorflow/contrib/lite/tools/benchmark
BENCHMARK_ALL_SRCS := $(TFLITE_CC_SRCS) \
@@ -146,6 +210,9 @@ $(OBJDIR)%.o: %.c
# The target that's compiled if there's no command-line arguments.
all: $(LIB_PATH) $(MINIMAL_PATH) $(BENCHMARK_BINARY)
+# The target that's compiled for micro-controllers
+micro: $(LIB_PATH)
+
# Gathers together all the objects we've compiled into a single '.a' archive.
$(LIB_PATH): $(LIB_OBJS)
@mkdir -p $(dir $@)
diff --git a/tensorflow/contrib/lite/allocation.cc b/tensorflow/contrib/lite/allocation.cc
index a4772731ec..c42622ff02 100644
--- a/tensorflow/contrib/lite/allocation.cc
+++ b/tensorflow/contrib/lite/allocation.cc
@@ -14,7 +14,9 @@ limitations under the License.
==============================================================================*/
#include <fcntl.h>
+#ifndef TFLITE_MCU
#include <sys/mman.h>
+#endif
#include <sys/stat.h>
#include <sys/types.h>
#include <unistd.h>
@@ -27,10 +29,13 @@ limitations under the License.
#include "tensorflow/contrib/lite/allocation.h"
#include "tensorflow/contrib/lite/context.h"
#include "tensorflow/contrib/lite/error_reporter.h"
+#ifndef TFLITE_MCU
#include "tensorflow/contrib/lite/nnapi_delegate.h"
+#endif
namespace tflite {
+#ifndef TFLITE_MCU
MMAPAllocation::MMAPAllocation(const char* filename,
ErrorReporter* error_reporter)
: Allocation(error_reporter), mmapped_buffer_(MAP_FAILED) {
@@ -111,6 +116,7 @@ MemoryAllocation::MemoryAllocation(const void* ptr, size_t num_bytes,
buffer_ = ptr;
buffer_size_bytes_ = num_bytes;
}
+#endif
MemoryAllocation::~MemoryAllocation() {}
diff --git a/tensorflow/contrib/lite/allocation.h b/tensorflow/contrib/lite/allocation.h
index 68aee2e644..827ea86503 100644
--- a/tensorflow/contrib/lite/allocation.h
+++ b/tensorflow/contrib/lite/allocation.h
@@ -23,6 +23,7 @@ limitations under the License.
#include "tensorflow/contrib/lite/context.h"
#include "tensorflow/contrib/lite/error_reporter.h"
#include "tensorflow/contrib/lite/simple_memory_arena.h"
+#include "tensorflow/contrib/lite/string.h"
namespace tflite {
diff --git a/tensorflow/contrib/lite/arena_planner.cc b/tensorflow/contrib/lite/arena_planner.cc
index 22be64d6ff..16a0e71624 100644
--- a/tensorflow/contrib/lite/arena_planner.cc
+++ b/tensorflow/contrib/lite/arena_planner.cc
@@ -35,12 +35,14 @@ struct AllocationInfo {
};
ArenaPlanner::ArenaPlanner(TfLiteContext* context,
- std::unique_ptr<GraphInfo> graph_info)
+ std::unique_ptr<GraphInfo> graph_info,
+ bool preserve_inputs, bool preserve_intermediates)
: context_(context),
graph_info_(std::move(graph_info)),
arena_(kDefaultArenaAlignment),
- persistent_arena_(kDefaultArenaAlignment) {}
-
+ persistent_arena_(kDefaultArenaAlignment),
+ preserve_inputs_(preserve_inputs),
+ preserve_intermediates_(preserve_intermediates) {}
ArenaPlanner::~ArenaPlanner() {}
int64_t ArenaPlanner::BasePointer(TfLiteAllocationType type) {
@@ -112,9 +114,13 @@ TfLiteStatus ArenaPlanner::PlanAllocations() {
refcounts[tensor_index]++;
}
- // Queue all graph inputs for allocation.
+ // Queue all graph inputs for allocation. If preserve_inputs_ is true, make
+ // sure they never be overwritten.
for (int tensor_index : graph_info_->inputs()) {
if (tensor_index != kOptionalTensor) {
+ if (preserve_inputs_) {
+ refcounts[tensor_index]++;
+ }
TF_LITE_ENSURE_STATUS(allocate(0, tensor_index));
}
}
@@ -159,13 +165,15 @@ TfLiteStatus ArenaPlanner::PlanAllocations() {
// Then update the ref-counts of the node's inputs, and if necessary queue
// them for deallocation.
- TfLiteIntArray* node_inputs = node.inputs;
- for (int j = 0; j < node_inputs->size; ++j) {
- int tensor_index = node_inputs->data[j];
- if (tensor_index != kOptionalTensor) {
- refcounts[tensor_index]--;
- if (refcounts[tensor_index] == 0) {
- TF_LITE_ENSURE_STATUS(deallocate(i, tensor_index));
+ if (!preserve_intermediates_) {
+ TfLiteIntArray* node_inputs = node.inputs;
+ for (int j = 0; j < node_inputs->size; ++j) {
+ int tensor_index = node_inputs->data[j];
+ if (tensor_index != kOptionalTensor) {
+ refcounts[tensor_index]--;
+ if (refcounts[tensor_index] == 0) {
+ TF_LITE_ENSURE_STATUS(deallocate(i, tensor_index));
+ }
}
}
}
diff --git a/tensorflow/contrib/lite/arena_planner.h b/tensorflow/contrib/lite/arena_planner.h
index e9d0fbc5a9..82c866734f 100644
--- a/tensorflow/contrib/lite/arena_planner.h
+++ b/tensorflow/contrib/lite/arena_planner.h
@@ -43,8 +43,11 @@ struct AllocationInfo;
class ArenaPlanner : public MemoryPlanner {
public:
// Ownership of 'context' is not taken and it must remain util the
- // ArenaPlanner is destroyed.
- ArenaPlanner(TfLiteContext* context, std::unique_ptr<GraphInfo> graph_info);
+ // ArenaPlanner is destroyed. If 'preserve_inputs' is true the inputs to the
+ // graph will not share memory with any other tensor, effectively preserving
+ // them until the end of inference.
+ ArenaPlanner(TfLiteContext* context, std::unique_ptr<GraphInfo> graph_info,
+ bool preserve_inputs, bool preserve_intermediates);
~ArenaPlanner() override;
ArenaPlanner(const ArenaPlanner&) = delete;
ArenaPlanner& operator=(const ArenaPlanner&) = delete;
@@ -100,6 +103,15 @@ class ArenaPlanner : public MemoryPlanner {
// Raw memory buffer that is allocated for persistent tensors that are
// declared as kTfLiteArenaRwPersistent.
SimpleMemoryArena persistent_arena_;
+
+ // Ensure that the memory self-allocated for inputs is never reused by the
+ // allocator. This allows for example, multiple runs without getting
+ // unpredictable results.
+ bool preserve_inputs_;
+
+ // If true, then no overlapping of memory areas is done, meaning intermediates
+ // results can be queried after running (modulo running delegates).
+ bool preserve_intermediates_;
};
} // namespace tflite
diff --git a/tensorflow/contrib/lite/arena_planner_test.cc b/tensorflow/contrib/lite/arena_planner_test.cc
index f0fd35216f..1adb426d58 100644
--- a/tensorflow/contrib/lite/arena_planner_test.cc
+++ b/tensorflow/contrib/lite/arena_planner_test.cc
@@ -151,11 +151,12 @@ void ReportError(TfLiteContext* context, const char* format, ...) {
class ArenaPlannerTest : public ::testing::Test {
protected:
- void SetGraph(TestGraph* graph) {
+ void SetGraph(TestGraph* graph, bool preserve_inputs = false) {
graph_ = graph;
context_.ReportError = ReportError;
planner_.reset(new ArenaPlanner(
- &context_, std::unique_ptr<GraphInfo>(new TestGraphInfo(graph))));
+ &context_, std::unique_ptr<GraphInfo>(new TestGraphInfo(graph)),
+ preserve_inputs, /*preserve intermediates*/ false));
CHECK(planner_->ResetAllocations() == kTfLiteOk);
CHECK(planner_->PlanAllocations() == kTfLiteOk);
}
@@ -243,6 +244,30 @@ TEST_F(ArenaPlannerTest, SimpleGraph) {
EXPECT_EQ(GetOffset(3), 0);
}
+TEST_F(ArenaPlannerTest, SimpleGraphInputsPreserved) {
+ TestGraph graph({0, 1},
+ {
+ /* in, out, tmp */
+ {{0, 1}, {2}, {}}, // First op
+ {{2, 0}, {4, 5}, {}}, // Second op
+ {{4, 5}, {3}, {}} // Third op
+ },
+ {3});
+ SetGraph(&graph, /*preserve_inputs=*/true);
+ Execute(0, 10);
+
+ // Alloc(+) and dealloc(-) order: +0 +1 +2 +4 +5 -2 +3 -4 -5
+ EXPECT_EQ(GetOffset(0), 0);
+ EXPECT_EQ(GetOffset(1), GetOffsetAfter(0));
+ EXPECT_EQ(GetOffset(2), GetOffsetAfter(1));
+ EXPECT_EQ(GetOffset(4), GetOffsetAfter(2));
+ EXPECT_EQ(GetOffset(5), GetOffsetAfter(4));
+ // Because we are keeping the inputs alive until the end (due to
+ // preserve_inputs=true), the output tensor will not be able to use that
+ // space. It will end up using the same are as tensor #2.
+ EXPECT_EQ(GetOffset(3), GetOffsetAfter(1));
+}
+
TEST_F(ArenaPlannerTest, SimpleGraphWithTemporary) {
TestGraph graph({0, 1},
{
diff --git a/tensorflow/contrib/lite/build_def.bzl b/tensorflow/contrib/lite/build_def.bzl
index 81883ba1fd..bed862454e 100644
--- a/tensorflow/contrib/lite/build_def.bzl
+++ b/tensorflow/contrib/lite/build_def.bzl
@@ -195,7 +195,7 @@ def json_to_tflite(name, src, out):
def generated_test_models():
return [
"add",
- "arg_max",
+ "arg_min_max",
"avg_pool",
"batch_to_space_nd",
"concat",
@@ -232,7 +232,10 @@ def generated_test_models():
"not_equal",
"pad",
"padv2",
- # "prelu",
+ "prelu",
+ "pow",
+ "reduce_max",
+ "reduce_prod",
"relu",
"relu1",
"relu6",
@@ -256,7 +259,7 @@ def generated_test_models():
"tile",
"topk",
"transpose",
- "transpose_conv",
+ #"transpose_conv", # disabled due to b/111213074
"where",
]
diff --git a/tensorflow/contrib/lite/builtin_op_data.h b/tensorflow/contrib/lite/builtin_op_data.h
index 1b1b8b2985..a24aaad7dd 100644
--- a/tensorflow/contrib/lite/builtin_op_data.h
+++ b/tensorflow/contrib/lite/builtin_op_data.h
@@ -92,8 +92,17 @@ typedef struct {
TfLiteFusedActivation activation;
} TfLiteSequenceRNNParams;
+typedef enum {
+ kTfLiteFullyConnectedWeightsFormatDefault = 0,
+ kTfLiteFullyConnectedWeightsFormatShuffled4x16Int8 = 1,
+} TfLiteFullyConnectedWeightsFormat;
+
typedef struct {
+ // Parameters for FullyConnected version 1 or above.
TfLiteFusedActivation activation;
+
+ // Parameters for FullyConnected version 2 or above.
+ TfLiteFullyConnectedWeightsFormat weights_format;
} TfLiteFullyConnectedParams;
typedef enum {
@@ -241,6 +250,10 @@ typedef struct {
} TfLiteArgMaxParams;
typedef struct {
+ TfLiteType output_type;
+} TfLiteArgMinParams;
+
+typedef struct {
TfLitePadding padding;
int stride_width;
int stride_height;
@@ -254,6 +267,16 @@ typedef struct {
TfLiteType out_type;
} TfLiteShapeParams;
+typedef struct {
+ // Parameters supported by version 1:
+ float min;
+ float max;
+ int num_bits;
+
+ // Parameters supported by version 2:
+ bool narrow_range;
+} TfLiteFakeQuantParams;
+
#ifdef __cplusplus
} // extern "C"
#endif // __cplusplus
diff --git a/tensorflow/contrib/lite/builtin_ops.h b/tensorflow/contrib/lite/builtin_ops.h
index 7a78206ebf..4c7b27c4e0 100644
--- a/tensorflow/contrib/lite/builtin_ops.h
+++ b/tensorflow/contrib/lite/builtin_ops.h
@@ -103,6 +103,11 @@ typedef enum {
kTfLiteBuiltinSqrt = 75,
kTfLiteBuiltinRsqrt = 76,
kTfLiteBuiltinShape = 77,
+ kTfLiteBuiltinPow = 78,
+ kTfLiteBuiltinArgMin = 79,
+ kTfLiteBuiltinFakeQuant = 80,
+ kTfLiteBuiltinReduceProd = 81,
+ kTfLiteBuiltinReduceMax = 82,
} TfLiteBuiltinOperator;
#ifdef __cplusplus
diff --git a/tensorflow/contrib/lite/context.h b/tensorflow/contrib/lite/context.h
index 6434e265b1..1ff8843fa7 100644
--- a/tensorflow/contrib/lite/context.h
+++ b/tensorflow/contrib/lite/context.h
@@ -39,6 +39,26 @@ extern "C" {
typedef enum { kTfLiteOk = 0, kTfLiteError = 1 } TfLiteStatus;
+// The list of external context types known to TF Lite. This list exists solely
+// to avoid conflicts and to ensure ops can share the external contexts they
+// need. Access to the external contexts is controled by one of the
+// corresponding support files.
+typedef enum {
+ kTfLiteEigenContext = 0, // include eigen_support.h to use.
+ kTfLiteGemmLowpContext = 1, // include gemm_support.h to use.
+ kTfLiteMaxExternalContexts = 2
+} TfLiteExternalContextType;
+
+// An external context is a collection of information unrelated to the TF Lite
+// framework, but useful to a subset of the ops. TF Lite knows very little
+// about about the actual contexts, but it keeps a list of them, and is able to
+// refresh them if configurations like the number of recommended threads
+// change.
+typedef struct {
+ TfLiteExternalContextType type;
+ TfLiteStatus (*Refresh)(struct TfLiteContext* context);
+} TfLiteExternalContext;
+
// Forward declare so GetNode can use this is in Context.
typedef struct _TfLiteRegistration TfLiteRegistration;
typedef struct _TfLiteDelegate TfLiteDelegate;
@@ -139,6 +159,7 @@ typedef enum {
kTfLiteString = 5,
kTfLiteBool = 6,
kTfLiteInt16 = 7,
+ kTfLiteComplex64 = 8,
} TfLiteType;
// Parameters for asymmetric quantization. Quantized values can be converted
@@ -159,6 +180,7 @@ typedef union {
uint8_t* uint8;
bool* b;
int16_t* i16;
+ _Complex float* c64;
} TfLitePtrUnion;
// Memory allocation strategies. kTfLiteMmapRo is for read-only memory-mapped
@@ -243,7 +265,8 @@ void TfLiteTensorReset(TfLiteType type, const char* name, TfLiteIntArray* dims,
const void* allocation, bool is_variable,
TfLiteTensor* tensor);
-// Resize the allocated data of a (dynamic) tensor.
+// Resize the allocated data of a (dynamic) tensor. Tensors with allocation
+// types other than kTfLiteDynamic will be ignored.
void TfLiteTensorRealloc(size_t num_bytes, TfLiteTensor* tensor);
// A structure representing an instance of a node.
@@ -336,10 +359,15 @@ typedef struct TfLiteContext {
// eigen.
int recommended_num_threads;
- // TODO(ahentz): we should create a more general mechanism for this sort of
- // library-global objects.
- void* gemm_context;
- void* eigen_context;
+ // Access external contexts by type.
+ // WARNING: This is an experimental interface that is subject to change.
+ TfLiteExternalContext* (*GetExternalContext)(struct TfLiteContext*,
+ TfLiteExternalContextType);
+ // Set the value of a external context. Does not take ownership of the
+ // pointer.
+ // WARNING: This is an experimental interface that is subject to change.
+ void (*SetExternalContext)(struct TfLiteContext*, TfLiteExternalContextType,
+ TfLiteExternalContext*);
} TfLiteContext;
typedef struct _TfLiteRegistration {
diff --git a/tensorflow/contrib/lite/delegates/eager/BUILD b/tensorflow/contrib/lite/delegates/eager/BUILD
new file mode 100644
index 0000000000..066b106215
--- /dev/null
+++ b/tensorflow/contrib/lite/delegates/eager/BUILD
@@ -0,0 +1,35 @@
+#
+# This is a TF Lite delegate that is powered by TensorFlow's Eager.
+#
+package(default_visibility = [
+ "//visibility:public",
+])
+
+licenses(["notice"]) # Apache 2.0
+
+cc_library(
+ name = "util",
+ srcs = ["util.cc"],
+ hdrs = ["util.h"],
+ deps = [
+ "//tensorflow/contrib/lite:framework",
+ "//tensorflow/contrib/lite:kernel_api",
+ "//tensorflow/core:framework",
+ "//tensorflow/core:lib",
+ ],
+)
+
+cc_test(
+ name = "util_test",
+ size = "small",
+ srcs = ["util_test.cc"],
+ tags = [
+ "tflite_not_portable",
+ ],
+ deps = [
+ ":util",
+ "//tensorflow/contrib/lite/testing:util",
+ "//tensorflow/core:lib",
+ "@com_google_googletest//:gtest",
+ ],
+)
diff --git a/tensorflow/contrib/lite/delegates/eager/util.cc b/tensorflow/contrib/lite/delegates/eager/util.cc
new file mode 100644
index 0000000000..04a852e515
--- /dev/null
+++ b/tensorflow/contrib/lite/delegates/eager/util.cc
@@ -0,0 +1,47 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+#include "tensorflow/contrib/lite/delegates/eager/util.h"
+
+namespace tflite {
+
+TfLiteStatus ConvertStatus(TfLiteContext* context,
+ const tensorflow::Status& status) {
+ if (!status.ok()) {
+ context->ReportError(context, "%s", status.error_message().c_str());
+ return kTfLiteError;
+ }
+ return kTfLiteOk;
+}
+
+TfLiteStatus CopyShape(TfLiteContext* context, const tensorflow::Tensor& src,
+ TfLiteTensor* tensor) {
+ int num_dims = src.dims();
+ TfLiteIntArray* shape = TfLiteIntArrayCreate(num_dims);
+ for (int j = 0; j < num_dims; ++j) {
+ // We need to cast from TensorFlow's int64 to TF Lite's int32. Let's
+ // make sure there's no overflow.
+ if (src.dim_size(j) >= std::numeric_limits<int>::max()) {
+ context->ReportError(context,
+ "Dimension value in TensorFlow shape is larger than "
+ "supported by TF Lite");
+ TfLiteIntArrayFree(shape);
+ return kTfLiteError;
+ }
+ shape->data[j] = static_cast<int>(src.dim_size(j));
+ }
+ return context->ResizeTensor(context, tensor, shape);
+}
+
+} // namespace tflite
diff --git a/tensorflow/contrib/lite/delegates/eager/util.h b/tensorflow/contrib/lite/delegates/eager/util.h
new file mode 100644
index 0000000000..2696ca8d0d
--- /dev/null
+++ b/tensorflow/contrib/lite/delegates/eager/util.h
@@ -0,0 +1,35 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+#ifndef TENSORFLOW_CONTRIB_LITE_DELEGATES_EAGER_UTIL_H_
+#define TENSORFLOW_CONTRIB_LITE_DELEGATES_EAGER_UTIL_H_
+
+#include "tensorflow/contrib/lite/context.h"
+#include "tensorflow/core/framework/tensor.h"
+#include "tensorflow/core/lib/core/status.h"
+
+namespace tflite {
+
+// Converts a tensorflow:Status into a TfLiteStatus. If the original status
+// represented an error, reports it using the given 'context'.
+TfLiteStatus ConvertStatus(TfLiteContext* context,
+ const tensorflow::Status& status);
+
+// Copies the given shape of the given 'src' into a TF Lite 'tensor'. Logs an
+// error and returns kTfLiteError if the shape can't be converted.
+TfLiteStatus CopyShape(TfLiteContext* context, const tensorflow::Tensor& src,
+ TfLiteTensor* tensor);
+} // namespace tflite
+
+#endif // TENSORFLOW_CONTRIB_LITE_DELEGATES_EAGER_UTIL_H_
diff --git a/tensorflow/contrib/lite/delegates/eager/util_test.cc b/tensorflow/contrib/lite/delegates/eager/util_test.cc
new file mode 100644
index 0000000000..563f82dec3
--- /dev/null
+++ b/tensorflow/contrib/lite/delegates/eager/util_test.cc
@@ -0,0 +1,100 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+#include "tensorflow/contrib/lite/delegates/eager/util.h"
+
+#include <cstdarg>
+
+#include <gmock/gmock.h>
+#include <gtest/gtest.h>
+#include "tensorflow/contrib/lite/testing/util.h"
+
+namespace tflite {
+namespace {
+
+using ::testing::ElementsAre;
+
+struct TestContext : public TfLiteContext {
+ string error;
+ std::vector<int> new_size;
+};
+
+void ReportError(TfLiteContext* context, const char* format, ...) {
+ TestContext* c = static_cast<TestContext*>(context);
+ const size_t kBufferSize = 1024;
+ char temp_buffer[kBufferSize];
+
+ va_list args;
+ va_start(args, format);
+ vsnprintf(temp_buffer, kBufferSize, format, args);
+ va_end(args);
+
+ c->error = temp_buffer;
+}
+
+TfLiteStatus ResizeTensor(TfLiteContext* context, TfLiteTensor* tensor,
+ TfLiteIntArray* new_size) {
+ TestContext* c = static_cast<TestContext*>(context);
+ c->new_size.clear();
+ for (int i = 0; i < new_size->size; ++i) {
+ c->new_size.push_back(new_size->data[i]);
+ }
+ TfLiteIntArrayFree(new_size);
+ return kTfLiteOk;
+}
+
+TEST(UtilTest, ConvertStatus) {
+ TestContext context;
+ context.ReportError = ReportError;
+
+ EXPECT_EQ(ConvertStatus(&context, tensorflow::errors::Internal("Some Error")),
+ kTfLiteError);
+ EXPECT_EQ(context.error, "Some Error");
+
+ context.error.clear();
+ EXPECT_EQ(ConvertStatus(&context, tensorflow::Status()), kTfLiteOk);
+ EXPECT_TRUE(context.error.empty());
+}
+
+TEST(UtilTest, CopyShape) {
+ TestContext context;
+ context.ReportError = ReportError;
+ context.ResizeTensor = ResizeTensor;
+
+ using tensorflow::DT_FLOAT;
+ using tensorflow::Tensor;
+
+ TfLiteTensor dst;
+
+ EXPECT_EQ(CopyShape(&context, Tensor(), &dst), kTfLiteOk);
+ EXPECT_THAT(context.new_size, ElementsAre(0));
+
+ EXPECT_EQ(CopyShape(&context, Tensor(DT_FLOAT, {1, 2}), &dst), kTfLiteOk);
+ EXPECT_THAT(context.new_size, ElementsAre(1, 2));
+
+ EXPECT_EQ(CopyShape(&context, Tensor(DT_FLOAT, {1LL << 44, 2}), &dst),
+ kTfLiteError);
+ EXPECT_EQ(context.error,
+ "Dimension value in TensorFlow shape is larger than supported by "
+ "TF Lite");
+}
+
+} // namespace
+} // namespace tflite
+
+int main(int argc, char** argv) {
+ ::tflite::LogToStderr();
+ ::testing::InitGoogleTest(&argc, argv);
+ return RUN_ALL_TESTS();
+}
diff --git a/tensorflow/contrib/lite/delegates/nnapi/nnapi_delegate.cc b/tensorflow/contrib/lite/delegates/nnapi/nnapi_delegate.cc
index e96ee92376..f0d16575ec 100644
--- a/tensorflow/contrib/lite/delegates/nnapi/nnapi_delegate.cc
+++ b/tensorflow/contrib/lite/delegates/nnapi/nnapi_delegate.cc
@@ -61,7 +61,10 @@ int32_t GetAndroidSdkVersion() {
return 0;
}
+constexpr int32_t kMinSdkVersionForNNAPI = 27;
+constexpr int32_t kMinSdkVersionForNNAPI11 = 28;
static const int32_t kAndroidSdkVersion = GetAndroidSdkVersion();
+
} // namespace
// RAII NN API Model Destructor for use with std::unique_ptr
@@ -133,6 +136,12 @@ class NNAPIOpBuilder {
return AddScalarOperand<float>(value, ANEURALNETWORKS_FLOAT32);
}
+ TfLiteStatus AddVectorInt32Operand(const int32_t* values,
+ uint32_t num_values) {
+ return AddVectorOperand<int32_t>(values, num_values,
+ ANEURALNETWORKS_TENSOR_INT32);
+ }
+
TfLiteStatus AddPoolingParams(void* data) {
auto builtin = reinterpret_cast<TfLitePoolParams*>(data);
AddScalarInt32Operand(builtin->padding);
@@ -244,6 +253,21 @@ class NNAPIOpBuilder {
return kTfLiteOk;
}
+ template <typename T>
+ TfLiteStatus AddVectorOperand(const T* values, uint32_t num_values,
+ int32_t nn_type) {
+ ANeuralNetworksOperandType operand_type{
+ .type = nn_type, .dimensionCount = 1, .dimensions = &num_values};
+ CHECK_NN(context_,
+ ANeuralNetworksModel_addOperand(nn_model_, &operand_type));
+ int ann_operand = operand_mapping_->add_new_non_tensor_operand();
+ CHECK_NN(context_,
+ ANeuralNetworksModel_setOperandValue(
+ nn_model_, ann_operand, values, sizeof(T) * num_values));
+ augmented_inputs_.push_back(ann_operand);
+ return kTfLiteOk;
+ }
+
// TfLiteContext for error handling. Must be named context for macros to
// work.
TfLiteContext* context_;
@@ -411,6 +435,40 @@ class NNAPIDelegateKernel {
return nullptr;
}
break;
+ case kTfLiteBuiltinSqueeze:
+ // Squeeze requires NNAPI1.1.
+ if (version == 1 && kAndroidSdkVersion >= kMinSdkVersionForNNAPI11) {
+ return [](TfLiteContext* context, NNAPIOpBuilder* builder,
+ TfLiteNode* node) -> ANeuralNetworksOperationType {
+ auto builtin =
+ reinterpret_cast<TfLiteSqueezeParams*>(node->builtin_data);
+ // Note that we add the squeeze dimensions even if the dimensions
+ // were unspecified (empty), as NNAPI requires the operand.
+ builder->AddVectorInt32Operand(
+ builtin->squeeze_dims,
+ static_cast<uint32_t>(builtin->num_squeeze_dims));
+ return ANEURALNETWORKS_SQUEEZE;
+ };
+ } else {
+ return nullptr;
+ }
+ case kTfLiteBuiltinTranspose:
+ // Transpose requires NNAPI1.1. Also note that the permutation input
+ // tensor value dictates the output dimensions.
+ // TODO(b/110888333): Support dynamically-sized tensors in delegates.
+ if ((version == 1) &&
+ (kAndroidSdkVersion >= kMinSdkVersionForNNAPI11) &&
+ (node->inputs->size > 1) &&
+ (context->tensors[node->inputs->data[1]].allocation_type ==
+ kTfLiteMmapRo)) {
+ return [](TfLiteContext* context, NNAPIOpBuilder* builder,
+ TfLiteNode* node) -> ANeuralNetworksOperationType {
+ return ANEURALNETWORKS_TRANSPOSE;
+ };
+ } else {
+ return nullptr;
+ }
+ break;
default:
return nullptr;
}
@@ -560,8 +618,9 @@ TfLiteDelegate* NnApiDelegate() {
.Prepare = [](TfLiteContext* context,
TfLiteDelegate* delegate) -> TfLiteStatus {
// Do not check nodes_ if NN API is unavailable.
- // NN API is only available since Android O-MR1 (API 27).
- if (kAndroidSdkVersion < 27 || !NNAPIExists()) return kTfLiteOk;
+ if (kAndroidSdkVersion < kMinSdkVersionForNNAPI || !NNAPIExists()) {
+ return kTfLiteOk;
+ }
std::vector<int> supported_nodes(1);
// We don't care about all nodes_, we only care about ones in the
diff --git a/tensorflow/contrib/lite/delegates/nnapi/nnapi_delegate_test.cc b/tensorflow/contrib/lite/delegates/nnapi/nnapi_delegate_test.cc
index 799e3efe0b..ab2181e8ff 100644
--- a/tensorflow/contrib/lite/delegates/nnapi/nnapi_delegate_test.cc
+++ b/tensorflow/contrib/lite/delegates/nnapi/nnapi_delegate_test.cc
@@ -27,14 +27,20 @@ using ::testing::ElementsAreArray;
// TODO(b/110368244): figure out how to share the existing tests in kernels/ but
// with the delegation on. Also, add more unit tests to improve code coverage.
-class FloatAddOpModel : public SingleOpModel {
+class SingleOpModelWithNNAPI : public SingleOpModel {
+ public:
+ SingleOpModelWithNNAPI() {
+ this->SetApplyDelegate([](Interpreter* interpreter) {
+ interpreter->ModifyGraphWithDelegate(NnApiDelegate(), false);
+ });
+ }
+};
+
+class FloatAddOpModel : public SingleOpModelWithNNAPI {
public:
FloatAddOpModel(const TensorData& input1, const TensorData& input2,
const TensorData& output,
ActivationFunctionType activation_type) {
- this->SetApplyDelegate([](Interpreter* interpreter) {
- interpreter->ModifyGraphWithDelegate(NnApiDelegate());
- });
input1_ = AddInput(input1);
input2_ = AddInput(input2);
output_ = AddOutput(output);
@@ -81,9 +87,6 @@ class FloatMulOpModel : public SingleOpModel {
FloatMulOpModel(const TensorData& input1, const TensorData& input2,
const TensorData& output,
ActivationFunctionType activation_type) {
- this->SetApplyDelegate([](Interpreter* interpreter) {
- interpreter->ModifyGraphWithDelegate(NnApiDelegate());
- });
input1_ = AddInput(input1);
input2_ = AddInput(input2);
output_ = AddOutput(output);
@@ -114,15 +117,11 @@ TEST(NNAPIDelegate, MulWithNoActivation) {
ElementsAreArray(ArrayFloatNear({-0.2, 0.04, 0.21, 0.4})));
}
-class FloatPoolingOpModel : public SingleOpModel {
+class FloatPoolingOpModel : public SingleOpModelWithNNAPI {
public:
FloatPoolingOpModel(BuiltinOperator type, const TensorData& input,
int filter_width, int filter_height,
const TensorData& output) {
- this->SetApplyDelegate([](Interpreter* interpreter) {
- interpreter->ModifyGraphWithDelegate(NnApiDelegate());
- });
-
input_ = AddInput(input);
output_ = AddOutput(output);
@@ -193,10 +192,6 @@ class BaseConvolutionOpModel : public SingleOpModel {
enum Padding padding = Padding_VALID,
enum ActivationFunctionType activation = ActivationFunctionType_NONE,
int dilation_width_factor = 1, int dilation_height_factor = 1) {
- this->SetApplyDelegate([](Interpreter* interpreter) {
- interpreter->ModifyGraphWithDelegate(NnApiDelegate());
- });
-
input_ = AddInput(input);
filter_ = AddInput(filter);
@@ -344,14 +339,10 @@ TEST(NNAPIDelegate, Conv2DWithNoActivation) {
}));
}
-class DepthwiseConvolutionOpModel : public SingleOpModel {
+class DepthwiseConvolutionOpModel : public SingleOpModelWithNNAPI {
public:
DepthwiseConvolutionOpModel(const TensorData& input, const TensorData& filter,
const TensorData& output) {
- this->SetApplyDelegate([](Interpreter* interpreter) {
- interpreter->ModifyGraphWithDelegate(NnApiDelegate());
- });
-
input_ = AddInput(input);
filter_ = AddInput(filter);
@@ -426,15 +417,11 @@ TEST(NNAPIDelegate, DepthwiseConv2DWithNoActivation) {
}));
}
-class FloatFullyConnectedOpModel : public SingleOpModel {
+class FloatFullyConnectedOpModel : public SingleOpModelWithNNAPI {
public:
FloatFullyConnectedOpModel(int units, int batches, const TensorData& input,
const TensorData& output = {TensorType_FLOAT32})
: batches_(batches), units_(units) {
- this->SetApplyDelegate([](Interpreter* interpreter) {
- interpreter->ModifyGraphWithDelegate(NnApiDelegate());
- });
-
int total_input_size = 1;
for (int i = 0; i < input.shape.size(); ++i) {
total_input_size *= input.shape[i];
@@ -515,14 +502,10 @@ TEST(NNAPIDelegate, FullyConnectedSimpleTest) {
EXPECT_THAT(m.GetOutput(), ElementsAre(24, 25, 26, 58, 59, 60));
}
-class SoftmaxOpModel : public SingleOpModel {
+class SoftmaxOpModel : public SingleOpModelWithNNAPI {
public:
SoftmaxOpModel(int batches, int size, float beta)
: batches_(batches), input_size_(size), beta_(beta) {
- this->SetApplyDelegate([](Interpreter* interpreter) {
- interpreter->ModifyGraphWithDelegate(NnApiDelegate());
- });
-
input_ = AddInput(TensorType_FLOAT32);
output_ = AddOutput(TensorType_FLOAT32);
SetBuiltinOp(BuiltinOperator_SOFTMAX, BuiltinOptions_SoftmaxOptions,
@@ -566,14 +549,10 @@ TEST(NNAPIDelegate, SoftmaxSimpleTest) {
1e-6)));
}
-class ReshapeOpModel : public SingleOpModel {
+class ReshapeOpModel : public SingleOpModelWithNNAPI {
public:
ReshapeOpModel(std::initializer_list<int> input_shape,
std::initializer_list<int> new_shape) {
- this->SetApplyDelegate([](Interpreter* interpreter) {
- interpreter->ModifyGraphWithDelegate(NnApiDelegate());
- });
-
input_ = AddInput(TensorType_FLOAT32);
new_shape_ = AddInput(TensorType_INT32);
output_ = AddOutput(TensorType_FLOAT32);
@@ -605,6 +584,100 @@ TEST(NNAPIDelegate, ReshapeSimpleTest) {
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({2, 2, 2}));
}
+class SqueezeOpModel : public SingleOpModelWithNNAPI {
+ public:
+ SqueezeOpModel(const TensorData& input, const TensorData& output,
+ std::initializer_list<int> axis) {
+ input_ = AddInput(input);
+ output_ = AddOutput(output);
+ SetBuiltinOp(
+ BuiltinOperator_SQUEEZE, BuiltinOptions_SqueezeOptions,
+ CreateSqueezeOptions(builder_, builder_.CreateVector<int>(axis))
+ .Union());
+ BuildInterpreter({GetShape(input_)});
+ }
+
+ void SetInput(std::initializer_list<float> data) {
+ PopulateTensor<float>(input_, data);
+ }
+ std::vector<float> GetOutput() { return ExtractVector<float>(output_); }
+ std::vector<int> GetOutputShape() { return GetTensorShape(output_); }
+
+ private:
+ int input_;
+ int new_shape_;
+ int output_;
+};
+
+TEST(NNAPIDelegate, SqueezeSimpleTest) {
+ std::initializer_list<float> data = {
+ 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0,
+ 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0};
+ SqueezeOpModel m({TensorType_FLOAT32, {1, 24, 1}}, {TensorType_FLOAT32, {24}},
+ {});
+ m.SetInput(data);
+ m.Invoke();
+ EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({24}));
+ EXPECT_THAT(
+ m.GetOutput(),
+ ElementsAreArray({1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0,
+ 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0,
+ 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0}));
+}
+
+TEST(NNAPIDelegate, SqueezeWithAxisTest) {
+ std::initializer_list<float> data = {
+ 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0,
+ 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0};
+ SqueezeOpModel m({TensorType_FLOAT32, {1, 24, 1}}, {TensorType_FLOAT32, {24}},
+ {2});
+ m.SetInput(data);
+ m.Invoke();
+ EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({1, 24}));
+ EXPECT_THAT(
+ m.GetOutput(),
+ ElementsAreArray({1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0,
+ 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0,
+ 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0}));
+}
+
+class TransposeSimpleModel : public SingleOpModelWithNNAPI {
+ public:
+ TransposeSimpleModel(std::initializer_list<int> input_shape,
+ std::initializer_list<int> perm_shape,
+ std::initializer_list<int> perm) {
+ input_ = AddInput(TensorType_FLOAT32);
+ perm_ = AddConstInput(TensorType_INT32, perm, perm_shape);
+ output_ = AddOutput(TensorType_FLOAT32);
+ SetBuiltinOp(BuiltinOperator_TRANSPOSE, BuiltinOptions_TransposeOptions,
+ CreateTransposeOptions(builder_).Union());
+ BuildInterpreter({input_shape, perm_shape});
+ }
+
+ void SetInput(std::initializer_list<float> data) {
+ PopulateTensor<float>(input_, data);
+ }
+
+ std::vector<float> GetOutput() { return ExtractVector<float>(output_); }
+ std::vector<int> GetOutputShape() { return GetTensorShape(output_); }
+
+ private:
+ int input_;
+ int perm_;
+ int output_;
+};
+
+TEST(NNAPIDelegate, TransposeSimpleTest) {
+ TransposeSimpleModel m({2, 3, 4}, {3}, {2, 0, 1});
+ m.SetInput({0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
+ 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23});
+ m.Invoke();
+ EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({4, 2, 3}));
+ EXPECT_THAT(m.GetOutput(),
+ ElementsAreArray({0, 4, 8, 12, 16, 20, 1, 5, 9, 13, 17, 21,
+ 2, 6, 10, 14, 18, 22, 3, 7, 11, 15, 19, 23}));
+}
+
} // namespace
} // namespace tflite
diff --git a/tensorflow/contrib/lite/examples/android/BUILD b/tensorflow/contrib/lite/examples/android/BUILD
index dd2cd17324..4d2437e7d3 100644
--- a/tensorflow/contrib/lite/examples/android/BUILD
+++ b/tensorflow/contrib/lite/examples/android/BUILD
@@ -37,6 +37,7 @@ android_binary(
"@tflite_conv_actions_frozen//:conv_actions_frozen.tflite",
"//tensorflow/contrib/lite/examples/android/app/src/main/assets:conv_actions_labels.txt",
"@tflite_mobilenet_ssd//:mobilenet_ssd.tflite",
+ "@tflite_mobilenet_ssd_quant//:detect.tflite",
"//tensorflow/contrib/lite/examples/android/app/src/main/assets:box_priors.txt",
"//tensorflow/contrib/lite/examples/android/app/src/main/assets:coco_labels_list.txt",
],
diff --git a/tensorflow/contrib/lite/examples/android/app/README.md b/tensorflow/contrib/lite/examples/android/app/README.md
new file mode 100644
index 0000000000..8e12bd04dd
--- /dev/null
+++ b/tensorflow/contrib/lite/examples/android/app/README.md
@@ -0,0 +1,19 @@
+# TF Lite Android App Example
+
+## Building from Source with Bazel
+
+1. Follow the [Bazel steps for the TF Demo App](https://github.com/tensorflow/tensorflow/tree/master/tensorflow/examples/android#bazel).
+
+2. Build the app with Bazel. The demo needs C++11. We configure the fat_apk_cpu flag to package support for 4 hardware variants. You may replace it with --config=android_arm64 on a 64-bit device and --config=android_arm for 32-bit device:
+
+ ```shell
+ bazel build -c opt --cxxopt='--std=c++11' --fat_apk_cpu=x86,x86_64,arm64-v8a,armeabi-v7a \
+ //tensorflow/contrib/lite/examples/android:tflite_demo
+ ```
+
+3. Install the demo on a
+ [debug-enabled device](https://github.com/tensorflow/tensorflow/tree/master/tensorflow/examples/android#install):
+
+ ```shell
+ adb install bazel-bin/tensorflow/contrib/lite/examples/android/tflite_demo.apk
+ ```
diff --git a/tensorflow/contrib/lite/examples/android/app/build.gradle b/tensorflow/contrib/lite/examples/android/app/build.gradle
index 8e0a98ed63..eb7fd705e1 100644
--- a/tensorflow/contrib/lite/examples/android/app/build.gradle
+++ b/tensorflow/contrib/lite/examples/android/app/build.gradle
@@ -9,7 +9,7 @@ android {
targetSdkVersion 26
versionCode 1
versionName "1.0"
- testInstrumentationRunner "android.support.test.runner.AndroidJUnitRunner"
+ testInstrumentationRunner "androidx.test.runner.AndroidJUnitRunner"
// Remove this block.
jackOptions {
@@ -51,7 +51,7 @@ apply from: "download-models.gradle"
dependencies {
compile fileTree(dir: 'libs', include: ['*.jar'])
- androidTestCompile('com.android.support.test.espresso:espresso-core:2.2.2', {
+ androidTestCompile('androidx.test.espresso:espresso-core:3.1.0-alpha3', {
exclude group: 'com.android.support', module: 'support-annotations'
})
compile 'org.tensorflow:tensorflow-lite:0.0.0-nightly'
diff --git a/tensorflow/contrib/lite/examples/android/app/download-models.gradle b/tensorflow/contrib/lite/examples/android/app/download-models.gradle
index 8e65dc076f..c100e37c16 100644
--- a/tensorflow/contrib/lite/examples/android/app/download-models.gradle
+++ b/tensorflow/contrib/lite/examples/android/app/download-models.gradle
@@ -12,8 +12,9 @@
def models = ['conv_actions_tflite.zip',
'mobilenet_ssd_tflite_v1.zip',
- 'mobilenet_v1_224_android_quant_2017_11_08.zip']
-// LINT.ThenChange(//tensorflow/examples/android/BUILD)
+ 'mobilenet_v1_224_android_quant_2017_11_08.zip',
+ 'coco_ssd_mobilenet_v1_1.0_quant_2018_06_29.zip']
+// LINT.ThenChange(//tensorflow/contrib/lite/examples/android/BUILD)
// Root URL for model archives
def MODEL_URL = 'https://storage.googleapis.com/download.tensorflow.org/models/tflite'
diff --git a/tensorflow/contrib/lite/examples/android/app/src/main/assets/pets_labels_list.txt b/tensorflow/contrib/lite/examples/android/app/src/main/assets/pets_labels_list.txt
new file mode 100644
index 0000000000..d581f733e4
--- /dev/null
+++ b/tensorflow/contrib/lite/examples/android/app/src/main/assets/pets_labels_list.txt
@@ -0,0 +1,38 @@
+???
+Abyssinian
+american_bulldog
+american_pit_bull_terrier
+basset_hound
+beagle
+Bengal
+Birman
+Bombay
+boxer
+British_Shorthair
+chihuahua
+Egyptian_Mau
+english_cocker_spaniel
+english_setter
+german_shorthaired
+great_pyrenees
+havanese
+japanese_chin
+keeshond
+leonberger
+Maine_Coon
+miniature_pinscher
+newfoundland
+Persian
+pomeranian
+pug
+Ragdoll
+Russian_Blue
+saint_bernard
+samoyed
+scottish_terrier
+shiba_inu
+Siamese
+Sphynx
+staffordshire_bull_terrier
+wheaten_terrier
+yorkshire_terrier
diff --git a/tensorflow/contrib/lite/examples/android/app/src/main/java/org/tensorflow/demo/DetectorActivity.java b/tensorflow/contrib/lite/examples/android/app/src/main/java/org/tensorflow/demo/DetectorActivity.java
index de997e454a..87160f6b3f 100644
--- a/tensorflow/contrib/lite/examples/android/app/src/main/java/org/tensorflow/demo/DetectorActivity.java
+++ b/tensorflow/contrib/lite/examples/android/app/src/main/java/org/tensorflow/demo/DetectorActivity.java
@@ -1,5 +1,5 @@
/*
- * Copyright 2016 The TensorFlow Authors. All Rights Reserved.
+ * Copyright 2018 The TensorFlow Authors. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -50,9 +50,10 @@ public class DetectorActivity extends CameraActivity implements OnImageAvailable
// Configuration values for the prepackaged SSD model.
private static final int TF_OD_API_INPUT_SIZE = 300;
- private static final String TF_OD_API_MODEL_FILE = "mobilenet_ssd.tflite";
+ private static final boolean TF_OD_API_IS_QUANTIZED = true;
+ private static final String TF_OD_API_MODEL_FILE = "detect.tflite";
private static final String TF_OD_API_LABELS_FILE = "file:///android_asset/coco_labels_list.txt";
-
+
// Which detection model to use: by default uses Tensorflow Object Detection API frozen
// checkpoints.
private enum DetectorMode {
@@ -107,7 +108,11 @@ public class DetectorActivity extends CameraActivity implements OnImageAvailable
try {
detector =
TFLiteObjectDetectionAPIModel.create(
- getAssets(), TF_OD_API_MODEL_FILE, TF_OD_API_LABELS_FILE, TF_OD_API_INPUT_SIZE);
+ getAssets(),
+ TF_OD_API_MODEL_FILE,
+ TF_OD_API_LABELS_FILE,
+ TF_OD_API_INPUT_SIZE,
+ TF_OD_API_IS_QUANTIZED);
cropSize = TF_OD_API_INPUT_SIZE;
} catch (final IOException e) {
LOGGER.e("Exception initializing classifier!", e);
diff --git a/tensorflow/contrib/lite/examples/android/app/src/main/java/org/tensorflow/demo/TFLiteObjectDetectionAPIModel.java b/tensorflow/contrib/lite/examples/android/app/src/main/java/org/tensorflow/demo/TFLiteObjectDetectionAPIModel.java
index bfb4a0a04b..9eb21de9d0 100644
--- a/tensorflow/contrib/lite/examples/android/app/src/main/java/org/tensorflow/demo/TFLiteObjectDetectionAPIModel.java
+++ b/tensorflow/contrib/lite/examples/android/app/src/main/java/org/tensorflow/demo/TFLiteObjectDetectionAPIModel.java
@@ -25,15 +25,14 @@ import java.io.FileInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
+import java.nio.ByteBuffer;
+import java.nio.ByteOrder;
import java.nio.MappedByteBuffer;
import java.nio.channels.FileChannel;
import java.util.ArrayList;
-import java.util.Comparator;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
-import java.util.PriorityQueue;
-import java.util.StringTokenizer;
import java.util.Vector;
import org.tensorflow.demo.env.Logger;
import org.tensorflow.lite.Interpreter;
@@ -46,32 +45,35 @@ public class TFLiteObjectDetectionAPIModel implements Classifier {
private static final Logger LOGGER = new Logger();
// Only return this many results.
- private static final int NUM_RESULTS = 1917;
- private static final int NUM_CLASSES = 91;
-
- private static final float Y_SCALE = 10.0f;
- private static final float X_SCALE = 10.0f;
- private static final float H_SCALE = 5.0f;
- private static final float W_SCALE = 5.0f;
-
+ private static final int NUM_DETECTIONS = 10;
+ private boolean isModelQuantized;
+ // Float model
+ private static final float IMAGE_MEAN = 128.0f;
+ private static final float IMAGE_STD = 128.0f;
+ // Number of threads in the java app
+ private static final int NUM_THREADS = 4;
// Config values.
private int inputSize;
-
- private final float[][] boxPriors = new float[4][NUM_RESULTS];
-
// Pre-allocated buffers.
private Vector<String> labels = new Vector<String>();
private int[] intValues;
+ // outputLocations: array of shape [Batchsize, NUM_DETECTIONS,4]
+ // contains the location of detected boxes
private float[][][] outputLocations;
- private float[][][] outputClasses;
-
- float[][][][] img;
+ // outputClasses: array of shape [Batchsize, NUM_DETECTIONS]
+ // contains the classes of detected boxes
+ private float[][] outputClasses;
+ // outputScores: array of shape [Batchsize, NUM_DETECTIONS]
+ // contains the scores of detected boxes
+ private float[][] outputScores;
+ // numDetections: array of shape [Batchsize]
+ // contains the number of detected boxes
+ private float[] numDetections;
+
+ private ByteBuffer imgData;
private Interpreter tfLite;
- private float expit(final float x) {
- return (float) (1. / (1. + Math.exp(-x)));
- }
/** Memory-map the model file in Assets. */
private static MappedByteBuffer loadModelFile(AssetManager assets, String modelFilename)
@@ -84,77 +86,24 @@ public class TFLiteObjectDetectionAPIModel implements Classifier {
return fileChannel.map(FileChannel.MapMode.READ_ONLY, startOffset, declaredLength);
}
- private void loadCoderOptions(
- final AssetManager assetManager, final String locationFilename, final float[][] boxPriors)
- throws IOException {
- // Try to be intelligent about opening from assets or sdcard depending on prefix.
- final String assetPrefix = "file:///android_asset/";
- InputStream is;
- if (locationFilename.startsWith(assetPrefix)) {
- is = assetManager.open(locationFilename.split(assetPrefix, -1)[1]);
- } else {
- is = new FileInputStream(locationFilename);
- }
-
- final BufferedReader reader = new BufferedReader(new InputStreamReader(is));
-
- for (int lineNum = 0; lineNum < 4; ++lineNum) {
- String line = reader.readLine();
- final StringTokenizer st = new StringTokenizer(line, ", ");
- int priorIndex = 0;
- while (st.hasMoreTokens()) {
- final String token = st.nextToken();
- try {
- final float number = Float.parseFloat(token);
- boxPriors[lineNum][priorIndex++] = number;
- } catch (final NumberFormatException e) {
- // Silently ignore.
- }
- }
- if (priorIndex != NUM_RESULTS) {
- throw new RuntimeException(
- "BoxPrior length mismatch: " + priorIndex + " vs " + NUM_RESULTS);
- }
- }
-
- LOGGER.i("Loaded box priors!");
- }
-
- void decodeCenterSizeBoxes(float[][][] predictions) {
- for (int i = 0; i < NUM_RESULTS; ++i) {
- float ycenter = predictions[0][i][0] / Y_SCALE * boxPriors[2][i] + boxPriors[0][i];
- float xcenter = predictions[0][i][1] / X_SCALE * boxPriors[3][i] + boxPriors[1][i];
- float h = (float) Math.exp(predictions[0][i][2] / H_SCALE) * boxPriors[2][i];
- float w = (float) Math.exp(predictions[0][i][3] / W_SCALE) * boxPriors[3][i];
-
- float ymin = ycenter - h / 2.f;
- float xmin = xcenter - w / 2.f;
- float ymax = ycenter + h / 2.f;
- float xmax = xcenter + w / 2.f;
-
- predictions[0][i][0] = ymin;
- predictions[0][i][1] = xmin;
- predictions[0][i][2] = ymax;
- predictions[0][i][3] = xmax;
- }
- }
-
/**
* Initializes a native TensorFlow session for classifying images.
*
* @param assetManager The asset manager to be used to load assets.
* @param modelFilename The filepath of the model GraphDef protocol buffer.
* @param labelFilename The filepath of label file for classes.
+ * @param inputSize The size of image input
+ * @param isQuantized Boolean representing model is quantized or not
*/
public static Classifier create(
final AssetManager assetManager,
final String modelFilename,
final String labelFilename,
- final int inputSize) throws IOException {
+ final int inputSize,
+ final boolean isQuantized)
+ throws IOException {
final TFLiteObjectDetectionAPIModel d = new TFLiteObjectDetectionAPIModel();
- d.loadCoderOptions(assetManager, "file:///android_asset/box_priors.txt", d.boxPriors);
-
InputStream labelsInput = null;
String actualFilename = labelFilename.split("file:///android_asset/")[1];
labelsInput = assetManager.open(actualFilename);
@@ -175,12 +124,23 @@ public class TFLiteObjectDetectionAPIModel implements Classifier {
throw new RuntimeException(e);
}
+ d.isModelQuantized = isQuantized;
// Pre-allocate buffers.
- d.img = new float[1][inputSize][inputSize][3];
-
+ int numBytesPerChannel;
+ if (isQuantized) {
+ numBytesPerChannel = 1; // Quantized
+ } else {
+ numBytesPerChannel = 4; // Floating point
+ }
+ d.imgData = ByteBuffer.allocateDirect(1 * d.inputSize * d.inputSize * 3 * numBytesPerChannel);
+ d.imgData.order(ByteOrder.nativeOrder());
d.intValues = new int[d.inputSize * d.inputSize];
- d.outputLocations = new float[1][NUM_RESULTS][4];
- d.outputClasses = new float[1][NUM_RESULTS][NUM_CLASSES];
+
+ d.tfLite.setNumThreads(NUM_THREADS);
+ d.outputLocations = new float[1][NUM_DETECTIONS][4];
+ d.outputClasses = new float[1][NUM_DETECTIONS];
+ d.outputScores = new float[1][NUM_DETECTIONS];
+ d.numDetections = new float[1];
return d;
}
@@ -196,25 +156,37 @@ public class TFLiteObjectDetectionAPIModel implements Classifier {
// on the provided parameters.
bitmap.getPixels(intValues, 0, bitmap.getWidth(), 0, 0, bitmap.getWidth(), bitmap.getHeight());
+ imgData.rewind();
for (int i = 0; i < inputSize; ++i) {
for (int j = 0; j < inputSize; ++j) {
- int pixel = intValues[j * inputSize + i];
- img[0][j][i][2] = (float) (pixel & 0xFF) / 128.0f - 1.0f;
- img[0][j][i][1] = (float) ((pixel >> 8) & 0xFF) / 128.0f - 1.0f;
- img[0][j][i][0] = (float) ((pixel >> 16) & 0xFF) / 128.0f - 1.0f;
+ int pixelValue = intValues[i * inputSize + j];
+ if (isModelQuantized) {
+ // Quantized model
+ imgData.put((byte) ((pixelValue >> 16) & 0xFF));
+ imgData.put((byte) ((pixelValue >> 8) & 0xFF));
+ imgData.put((byte) (pixelValue & 0xFF));
+ } else { // Float model
+ imgData.putFloat((((pixelValue >> 16) & 0xFF) - IMAGE_MEAN) / IMAGE_STD);
+ imgData.putFloat((((pixelValue >> 8) & 0xFF) - IMAGE_MEAN) / IMAGE_STD);
+ imgData.putFloat(((pixelValue & 0xFF) - IMAGE_MEAN) / IMAGE_STD);
+ }
}
}
Trace.endSection(); // preprocessBitmap
// Copy the input data into TensorFlow.
Trace.beginSection("feed");
- outputLocations = new float[1][NUM_RESULTS][4];
- outputClasses = new float[1][NUM_RESULTS][NUM_CLASSES];
+ outputLocations = new float[1][NUM_DETECTIONS][4];
+ outputClasses = new float[1][NUM_DETECTIONS];
+ outputScores = new float[1][NUM_DETECTIONS];
+ numDetections = new float[1];
- Object[] inputArray = {img};
+ Object[] inputArray = {imgData};
Map<Integer, Object> outputMap = new HashMap<>();
outputMap.put(0, outputLocations);
outputMap.put(1, outputClasses);
+ outputMap.put(2, outputScores);
+ outputMap.put(3, numDetections);
Trace.endSection();
// Run the inference call.
@@ -222,56 +194,26 @@ public class TFLiteObjectDetectionAPIModel implements Classifier {
tfLite.runForMultipleInputsOutputs(inputArray, outputMap);
Trace.endSection();
- decodeCenterSizeBoxes(outputLocations);
-
- // Find the best detections.
- final PriorityQueue<Recognition> pq =
- new PriorityQueue<Recognition>(
- 1,
- new Comparator<Recognition>() {
- @Override
- public int compare(final Recognition lhs, final Recognition rhs) {
- // Intentionally reversed to put high confidence at the head of the queue.
- return Float.compare(rhs.getConfidence(), lhs.getConfidence());
- }
- });
-
- // Scale them back to the input size.
- for (int i = 0; i < NUM_RESULTS; ++i) {
- float topClassScore = -1000f;
- int topClassScoreIndex = -1;
-
- // Skip the first catch-all class.
- for (int j = 1; j < NUM_CLASSES; ++j) {
- float score = expit(outputClasses[0][i][j]);
-
- if (score > topClassScore) {
- topClassScoreIndex = j;
- topClassScore = score;
- }
- }
-
- if (topClassScore > 0.001f) {
- final RectF detection =
- new RectF(
- outputLocations[0][i][1] * inputSize,
- outputLocations[0][i][0] * inputSize,
- outputLocations[0][i][3] * inputSize,
- outputLocations[0][i][2] * inputSize);
-
- pq.add(
- new Recognition(
- "" + i,
- labels.get(topClassScoreIndex),
- outputClasses[0][i][topClassScoreIndex],
- detection));
- }
- }
-
- final ArrayList<Recognition> recognitions = new ArrayList<Recognition>();
- for (int i = 0; i < Math.min(pq.size(), 10); ++i) {
- Recognition recog = pq.poll();
- recognitions.add(recog);
+ // Show the best detections.
+ // after scaling them back to the input size.
+ final ArrayList<Recognition> recognitions = new ArrayList<>(NUM_DETECTIONS);
+ for (int i = 0; i < NUM_DETECTIONS; ++i) {
+ final RectF detection =
+ new RectF(
+ outputLocations[0][i][1] * inputSize,
+ outputLocations[0][i][0] * inputSize,
+ outputLocations[0][i][3] * inputSize,
+ outputLocations[0][i][2] * inputSize);
+ // SSD Mobilenet V1 Model assumes class 0 is background class
+ // in label file and class labels start from 1 to number_of_classes+1,
+ // while outputClasses correspond to class index from 0 to number_of_classes
+ int labelOffset = 1;
+ recognitions.add(
+ new Recognition(
+ "" + i,
+ labels.get((int) outputClasses[0][i] + labelOffset),
+ outputScores[0][i],
+ detection));
}
Trace.endSection(); // "recognizeImage"
return recognitions;
diff --git a/tensorflow/contrib/lite/g3doc/benchmarks.md b/tensorflow/contrib/lite/g3doc/benchmarks.md
new file mode 100644
index 0000000000..96536cba27
--- /dev/null
+++ b/tensorflow/contrib/lite/g3doc/benchmarks.md
@@ -0,0 +1,178 @@
+# Performance Benchmark numbers
+
+This document contains the performance benchmark numbers for running a few well
+known models on some Android and iOS devices.
+
+The benchmark numbers were generated by running the [TFLite benchmark
+binary](https://github.com/tensorflow/tensorflow/tree/master/tensorflow/contrib/lite/tools/benchmark)
+on Android and running the [iOS benchmark
+app](https://github.com/tensorflow/tensorflow/tree/master/tensorflow/contrib/lite/tools/benchmark/ios)
+on iOS.
+
+# Android benchmarks
+
+When running Android benchmarks, the CPU affinity is set to use big cores on the
+device to reduce variance (see
+[details](https://github.com/tensorflow/tensorflow/tree/master/tensorflow/contrib/lite/tools/benchmark#reducing-variance-between-runs-on-android)).
+
+Models are assumed to have been downloaded from the link, unzipped and pushed to
+`/data/local/tmp/tflite_models` folder. The benchmark binary is built according
+to instructions listed
+[here](https://github.com/tensorflow/tensorflow/tree/master/tensorflow/contrib/lite/tools/benchmark#on-android)
+and is assumed to have been pushed to `/data/local/tmp`.
+
+The following command was used to run the benchmark:
+
+```
+adb shell taskset ${CPU_MASK} /data/local/tmp/benchmark_model \
+ --num_threads=1 \
+ --graph=/data/local/tmp/tflite_models/${GRAPH} \
+ --warmup_runs=1 \
+ --num_runs=50 \
+ --use_nnapi=false
+```
+
+where `${GRAPH}` is the name of model and `${CPU_MASK}` is the CPU affinity
+chosen according to the following table:
+
+Device | CPU_MASK |
+-------| ----------
+Pixel 2 | f0 |
+Pixel xl | 0c |
+
+
+<table>
+ <thead>
+ <tr>
+ <th>Model Name</th>
+ <th>Device </th>
+ <th>Mean inference time (std dev)</th>
+ </tr>
+ </thead>
+ <tr>
+ <td rowspan = 2>
+ <a href="http://download.tensorflow.org/models/mobilenet_v1_2018_02_22/mobilenet_v1_1.0_224.tgz">Mobilenet_1.0_224(float)</a>
+ </td>
+ <td>Pixel 2 </td>
+ <td>166.5 ms (2.6 ms)</td>
+ </tr>
+ <tr>
+ <td>Pixel xl </td>
+ <td>122.9 ms (1.8 ms) </td>
+ </tr>
+ <tr>
+ <td rowspan = 2>
+ <a href="http://download.tensorflow.org/models/mobilenet_v1_2018_02_22/mobilenet_v1_1.0_224_quant.tgz">Mobilenet_1.0_224 (quant)</a>
+ </td>
+ <td>Pixel 2 </td>
+ <td>69.5 ms (0.9 ms)</td>
+ </tr>
+ <tr>
+ <td>Pixel xl </td>
+ <td>78.9 ms (2.2 ms) </td>
+ </tr>
+ <tr>
+ <td rowspan = 2>
+ <a href="https://storage.googleapis.com/download.tensorflow.org/models/tflite/model_zoo/upload_20180427/nasnet_mobile_2018_04_27.tgz">NASNet mobile</a>
+ </td>
+ <td>Pixel 2 </td>
+ <td>273.8 ms (3.5 ms)</td>
+ </tr>
+ <tr>
+ <td>Pixel xl </td>
+ <td>210.8 ms (4.2 ms)</td>
+ </tr>
+ <tr>
+ <td rowspan = 2>
+ <a href="https://storage.googleapis.com/download.tensorflow.org/models/tflite/model_zoo/upload_20180427/squeezenet_2018_04_27.tgz">SqueezeNet</a>
+ </td>
+ <td>Pixel 2 </td>
+ <td>234.0 ms (2.1 ms)</td>
+ </tr>
+ <tr>
+ <td>Pixel xl </td>
+ <td>158.0 ms (2.1 ms)</td>
+ </tr>
+ <tr>
+ <td rowspan = 2>
+ <a href="https://storage.googleapis.com/download.tensorflow.org/models/tflite/model_zoo/upload_20180427/inception_resnet_v2_2018_04_27.tgz">Inception_ResNet_V2</a>
+ </td>
+ <td>Pixel 2 </td>
+ <td>2846.0 ms (15.0 ms)</td>
+ </tr>
+ <tr>
+ <td>Pixel xl </td>
+ <td>1973.0 ms (15.0 ms) </td>
+ </tr>
+ <tr>
+ <td rowspan = 2>
+ <a href="https://storage.googleapis.com/download.tensorflow.org/models/tflite/model_zoo/upload_20180427/inception_v4_2018_04_27.tgz">Inception_V4</a>
+ </td>
+ <td>Pixel 2 </td>
+ <td>3180.0 ms (11.7 ms)</td>
+ </tr>
+ <tr>
+ <td>Pixel xl </td>
+ <td>2262.0 ms (21.0 ms) </td>
+ </tr>
+
+ </table>
+
+# iOS benchmarks
+
+For running iOS benchmarks, the [benchmark
+app](https://github.com/tensorflow/tensorflow/tree/master/tensorflow/contrib/lite/tools/benchmark/ios)
+was modified to include the appropriate model and `benchmark_params.json` was
+modified to set `num_threads` to 1.
+
+<table>
+ <thead>
+ <tr>
+ <th>Model Name</th>
+ <th>Device </th>
+ <th>Mean inference time (std dev)</th>
+ </tr>
+ </thead>
+ <tr>
+ <td>
+ <a href="http://download.tensorflow.org/models/mobilenet_v1_2018_02_22/mobilenet_v1_1.0_224.tgz">Mobilenet_1.0_224(float)</a>
+ </td>
+ <td>iPhone 8 </td>
+ <td>32.2 ms (0.8 ms)</td>
+ </tr>
+ <tr>
+ <td>
+ <a href="http://download.tensorflow.org/models/mobilenet_v1_2018_02_22/mobilenet_v1_1.0_224_quant.tgz)">Mobilenet_1.0_224 (quant)</a>
+ </td>
+ <td>iPhone 8 </td>
+ <td>24.4 ms (0.8 ms)</td>
+ </tr>
+ <tr>
+ <td>
+ <a href="https://storage.googleapis.com/download.tensorflow.org/models/tflite/model_zoo/upload_20180427/nasnet_mobile_2018_04_27.tgz">NASNet mobile</a>
+ </td>
+ <td>iPhone 8 </td>
+ <td>60.3 ms (0.6 ms)</td>
+ </tr>
+ <tr>
+ <td>
+ <a href="https://storage.googleapis.com/download.tensorflow.org/models/tflite/model_zoo/upload_20180427/squeezenet_2018_04_27.tgz">SqueezeNet</a>
+ </td>
+ <td>iPhone 8 </td>
+ <td>44.3 (0.7 ms)</td>
+ </tr>
+ <tr>
+ <td>
+ <a href="https://storage.googleapis.com/download.tensorflow.org/models/tflite/model_zoo/upload_20180427/inception_resnet_v2_2018_04_27.tgz">Inception_ResNet_V2</a>
+ </td>
+ <td>iPhone 8</td>
+ <td>562.4 ms (18.2 ms)</td>
+ </tr>
+ <tr>
+ <td>
+ <a href="https://storage.googleapis.com/download.tensorflow.org/models/tflite/model_zoo/upload_20180427/inception_v4_2018_04_27.tgz">Inception_V4</a>
+ </td>
+ <td>iPhone 8 </td>
+ <td>661.0 ms (29.2 ms)</td>
+ </tr>
+ </table>
diff --git a/tensorflow/contrib/lite/g3doc/models.md b/tensorflow/contrib/lite/g3doc/models.md
index c1c8ef049f..4e7d33a1b6 100644
--- a/tensorflow/contrib/lite/g3doc/models.md
+++ b/tensorflow/contrib/lite/g3doc/models.md
@@ -39,22 +39,22 @@ single thread large core.
Model Name | Paper_Model_Files | Model_Size | Top-1 Accuracy | Top-5 Accuracy | TF Lite Performance
------------------------ | :-------------------------------------------------------------------------------------------------------------------------------------------------------: | ---------: | -------------: | -------------: | ------------------:
-Mobilenet_0.25_128_quant | [paper](https://arxiv.org/pdf/1712.05877.pdf), [tflite&pb](http://download.tensorflow.org/models/mobilenet_v1_2018_02_22/mobilenet_v1_0.25_128_quant.tgz) | 0.5 Mb | 39.9% | 65.8% | 3.7 ms
-Mobilenet_0.25_160_quant | [paper](https://arxiv.org/pdf/1712.05877.pdf), [tflite&pb](http://download.tensorflow.org/models/mobilenet_v1_2018_02_22/mobilenet_v1_0.25_160_quant.tgz) | 0.5 Mb | 43.5% | 69.1% | 5.5 ms
-Mobilenet_0.25_192_quant | [paper](https://arxiv.org/pdf/1712.05877.pdf), [tflite&pb](http://download.tensorflow.org/models/mobilenet_v1_2018_02_22/mobilenet_v1_0.25_192_quant.tgz) | 0.5 Mb | 45.8% | 71.9% | 7.9 ms
-Mobilenet_0.25_224_quant | [paper](https://arxiv.org/pdf/1712.05877.pdf), [tflite&pb](http://download.tensorflow.org/models/mobilenet_v1_2018_02_22/mobilenet_v1_0.25_224_quant.tgz) | 0.5 Mb | 48.2% | 73.8% | 10.4 ms
-Mobilenet_0.50_128_quant | [paper](https://arxiv.org/pdf/1712.05877.pdf), [tflite&pb](http://download.tensorflow.org/models/mobilenet_v1_2018_02_22/mobilenet_v1_0.5_128_quant.tgz) | 1.4 Mb | 54.9% | 78.9% | 8.8 ms
-Mobilenet_0.50_160_quant | [paper](https://arxiv.org/pdf/1712.05877.pdf), [tflite&pb](http://download.tensorflow.org/models/mobilenet_v1_2018_02_22/mobilenet_v1_0.5_160_quant.tgz) | 1.4 Mb | 57.7% | 81.3% | 13.0 ms
-Mobilenet_0.50_192_quant | [paper](https://arxiv.org/pdf/1712.05877.pdf), [tflite&pb](http://download.tensorflow.org/models/mobilenet_v1_2018_02_22/mobilenet_v1_0.5_192_quant.tgz) | 1.4 Mb | 60.4% | 83.2% | 18.3 ms
-Mobilenet_0.50_224_quant | [paper](https://arxiv.org/pdf/1712.05877.pdf), [tflite&pb](http://download.tensorflow.org/models/mobilenet_v1_2018_02_22/mobilenet_v1_0.5_224_quant.tgz) | 1.4 Mb | 62.2% | 84.5% | 24.7 ms
-Mobilenet_0.75_128_quant | [paper](https://arxiv.org/pdf/1712.05877.pdf), [tflite&pb](http://download.tensorflow.org/models/mobilenet_v1_2018_02_22/mobilenet_v1_0.75_128_quant.tgz) | 2.6 Mb | 59.8% | 82.8% | 16.2 ms
-Mobilenet_0.75_160_quant | [paper](https://arxiv.org/pdf/1712.05877.pdf), [tflite&pb](http://download.tensorflow.org/models/mobilenet_v1_2018_02_22/mobilenet_v1_0.75_160_quant.tgz) | 2.6 Mb | 63.9% | 85.5% | 24.3 ms
-Mobilenet_0.75_192_quant | [paper](https://arxiv.org/pdf/1712.05877.pdf), [tflite&pb](http://download.tensorflow.org/models/mobilenet_v1_2018_02_22/mobilenet_v1_0.75_192_quant.tgz) | 2.6 Mb | 66.2% | 87.1% | 33.8 ms
-Mobilenet_0.75_224_quant | [paper](https://arxiv.org/pdf/1712.05877.pdf), [tflite&pb](http://download.tensorflow.org/models/mobilenet_v1_2018_02_22/mobilenet_v1_0.75_224_quant.tgz) | 2.6 Mb | 67.9% | 88.1% | 45.4 ms
-Mobilenet_1.0_128_quant | [paper](https://arxiv.org/pdf/1712.05877.pdf), [tflite&pb](http://download.tensorflow.org/models/mobilenet_v1_2018_02_22/mobilenet_v1_1.0_128_quant.tgz) | 4.3 Mb | 64.0% | 85.5% | 24.9 ms
-Mobilenet_1.0_160_quant | [paper](https://arxiv.org/pdf/1712.05877.pdf), [tflite&pb](http://download.tensorflow.org/models/mobilenet_v1_2018_02_22/mobilenet_v1_1.0_160_quant.tgz) | 4.3 Mb | 67.3% | 87.7% | 37.4 ms
-Mobilenet_1.0_192_quant | [paper](https://arxiv.org/pdf/1712.05877.pdf), [tflite&pb](http://download.tensorflow.org/models/mobilenet_v1_2018_02_22/mobilenet_v1_1.0_192_quant.tgz) | 4.3 Mb | 69.0% | 88.9% | 51.9 ms
-Mobilenet_1.0_224_quant | [paper](https://arxiv.org/pdf/1712.05877.pdf), [tflite&pb](http://download.tensorflow.org/models/mobilenet_v1_2018_02_22/mobilenet_v1_1.0_224_quant.tgz) | 4.3 Mb | 69.7% | 89.5% | 70.2 ms
+Mobilenet_0.25_128_quant | [paper](https://arxiv.org/pdf/1712.05877.pdf), [tflite&pb](http://download.tensorflow.org/models/mobilenet_v1_2018_07_12/mobilenet_v1_0.25_128_quant.tgz) | 0.5 Mb | 39.7% | 65.8% | 3.7 ms
+Mobilenet_0.25_160_quant | [paper](https://arxiv.org/pdf/1712.05877.pdf), [tflite&pb](http://download.tensorflow.org/models/mobilenet_v1_2018_07_12/mobilenet_v1_0.25_160_quant.tgz) | 0.5 Mb | 41.9% | 69.1% | 5.5 ms
+Mobilenet_0.25_192_quant | [paper](https://arxiv.org/pdf/1712.05877.pdf), [tflite&pb](http://download.tensorflow.org/models/mobilenet_v1_2018_07_12/mobilenet_v1_0.25_192_quant.tgz) | 0.5 Mb | 45.3% | 71.9% | 7.9 ms
+Mobilenet_0.25_224_quant | [paper](https://arxiv.org/pdf/1712.05877.pdf), [tflite&pb](http://download.tensorflow.org/models/mobilenet_v1_2018_07_12/mobilenet_v1_0.25_224_quant.tgz) | 0.5 Mb | 46.4% | 73.8% | 10.4 ms
+Mobilenet_0.50_128_quant | [paper](https://arxiv.org/pdf/1712.05877.pdf), [tflite&pb](http://download.tensorflow.org/models/mobilenet_v1_2018_07_12/mobilenet_v1_0.5_128_quant.tgz) | 1.4 Mb | 54.1% | 78.9% | 8.8 ms
+Mobilenet_0.50_160_quant | [paper](https://arxiv.org/pdf/1712.05877.pdf), [tflite&pb](http://download.tensorflow.org/models/mobilenet_v1_2018_07_12/mobilenet_v1_0.5_160_quant.tgz) | 1.4 Mb | 57.6% | 81.3% | 13.0 ms
+Mobilenet_0.50_192_quant | [paper](https://arxiv.org/pdf/1712.05877.pdf), [tflite&pb](http://download.tensorflow.org/models/mobilenet_v1_2018_07_12/mobilenet_v1_0.5_192_quant.tgz) | 1.4 Mb | 59.1% | 83.2% | 18.3 ms
+Mobilenet_0.50_224_quant | [paper](https://arxiv.org/pdf/1712.05877.pdf), [tflite&pb](http://download.tensorflow.org/models/mobilenet_v1_2018_07_12/mobilenet_v1_0.5_224_quant.tgz) | 1.4 Mb | 61.0% | 84.5% | 24.7 ms
+Mobilenet_0.75_128_quant | [paper](https://arxiv.org/pdf/1712.05877.pdf), [tflite&pb](http://download.tensorflow.org/models/mobilenet_v1_2018_07_12/mobilenet_v1_0.75_128_quant.tgz) | 2.6 Mb | 52.5% | 82.8% | 16.2 ms
+Mobilenet_0.75_160_quant | [paper](https://arxiv.org/pdf/1712.05877.pdf), [tflite&pb](http://download.tensorflow.org/models/mobilenet_v1_2018_07_12/mobilenet_v1_0.75_160_quant.tgz) | 2.6 Mb | 63.6% | 85.5% | 24.3 ms
+Mobilenet_0.75_192_quant | [paper](https://arxiv.org/pdf/1712.05877.pdf), [tflite&pb](http://download.tensorflow.org/models/mobilenet_v1_2018_07_12/mobilenet_v1_0.75_192_quant.tgz) | 2.6 Mb | 61.1% | 87.1% | 33.8 ms
+Mobilenet_0.75_224_quant | [paper](https://arxiv.org/pdf/1712.05877.pdf), [tflite&pb](http://download.tensorflow.org/models/mobilenet_v1_2018_07_12/mobilenet_v1_0.75_224_quant.tgz) | 2.6 Mb | 66.7% | 88.1% | 45.4 ms
+Mobilenet_1.0_128_quant | [paper](https://arxiv.org/pdf/1712.05877.pdf), [tflite&pb](http://download.tensorflow.org/models/mobilenet_v1_2018_07_12/mobilenet_v1_1.0_128_quant.tgz) | 4.3 Mb | 62.7% | 85.5% | 24.9 ms
+Mobilenet_1.0_160_quant | [paper](https://arxiv.org/pdf/1712.05877.pdf), [tflite&pb](http://download.tensorflow.org/models/mobilenet_v1_2018_07_12/mobilenet_v1_1.0_160_quant.tgz) | 4.3 Mb | 66.6% | 87.7% | 37.4 ms
+Mobilenet_1.0_192_quant | [paper](https://arxiv.org/pdf/1712.05877.pdf), [tflite&pb](http://download.tensorflow.org/models/mobilenet_v1_2018_07_12/mobilenet_v1_1.0_192_quant.tgz) | 4.3 Mb | 69.2% | 88.9% | 51.9 ms
+Mobilenet_1.0_224_quant | [paper](https://arxiv.org/pdf/1712.05877.pdf), [tflite&pb](http://download.tensorflow.org/models/mobilenet_v1_2018_07_12/mobilenet_v1_1.0_224_quant.tgz) | 4.3 Mb | 69.3% | 89.5% | 70.2 ms
## Other models
diff --git a/tensorflow/contrib/lite/g3doc/tf_ops_compatibility.md b/tensorflow/contrib/lite/g3doc/tf_ops_compatibility.md
index 45104c1419..49d00a66ba 100644
--- a/tensorflow/contrib/lite/g3doc/tf_ops_compatibility.md
+++ b/tensorflow/contrib/lite/g3doc/tf_ops_compatibility.md
@@ -42,6 +42,7 @@ counterparts:
*as long as the input tensor is 4D (1 batch + 2 spatial + 1 other) and the
crops attribute is not used*
* [tf.exp](https://www.tensorflow.org/api_docs/python/tf/exp)
+* [tf.fake_quant*](https://www.tensorflow.org/api_docs/python/tf/fake_quant_with_min_max_args)
* [tf.matmul](https://www.tensorflow.org/api_docs/python/tf/matmul) - *as long
as the second argument is constant and transposition is not used*
* [tf.nn.avg_pool](https://www.tensorflow.org/api_docs/python/tf/nn/avg_pool)
@@ -778,6 +779,42 @@ Outputs {
}
```
+**POW**
+
+```
+Inputs {
+ 0: a tensor
+ 1: a tensor
+}
+Outputs {
+ 0: elementwise pow of the input tensors
+}
+```
+
+**ARG_MAX**
+
+```
+Inputs {
+ 0: a tensor
+ 1: a tensor
+}
+Outputs {
+ 0: A tensor of indices of maximum values.
+}
+```
+
+**ARG_MIN**
+
+```
+Inputs {
+ 0: a tensor
+ 1: a tensor
+}
+Outputs {
+ 0: A tensor of indices of minium values.
+}
+```
+
And these are TensorFlow Lite operations that are present but not ready for
custom models yet:
diff --git a/tensorflow/contrib/lite/interpreter.cc b/tensorflow/contrib/lite/interpreter.cc
index 57b2c0f32b..d103786694 100644
--- a/tensorflow/contrib/lite/interpreter.cc
+++ b/tensorflow/contrib/lite/interpreter.cc
@@ -22,17 +22,21 @@ limitations under the License.
#include "tensorflow/contrib/lite/arena_planner.h"
#include "tensorflow/contrib/lite/context.h"
+#include "tensorflow/contrib/lite/context_util.h"
#include "tensorflow/contrib/lite/error_reporter.h"
#include "tensorflow/contrib/lite/graph_info.h"
-#include "tensorflow/contrib/lite/kernels/eigen_support.h"
-#include "tensorflow/contrib/lite/kernels/gemm_support.h"
#include "tensorflow/contrib/lite/memory_planner.h"
+#ifndef TFLITE_MCU
#include "tensorflow/contrib/lite/nnapi_delegate.h"
+#endif
#include "tensorflow/contrib/lite/profiling/profiler.h"
#include "tensorflow/contrib/lite/schema/schema_generated.h"
#include "tensorflow/contrib/lite/util.h"
namespace tflite {
+#ifdef TFLITE_MCU
+class NNAPIDelegate {};
+#endif
namespace {
@@ -53,6 +57,19 @@ void SetForbiddenContextFunction(FunctionType* func) {
*func = reinterpret_cast<FunctionType>(ForbiddenContextFunction);
}
+// Returns true if at least one tensor in the given list is kTfLiteDynamic.
+template <typename TensorIntArray>
+bool HasDynamicTensorImpl(const TfLiteContext& context,
+ const TensorIntArray& int_array) {
+ for (int i : int_array) {
+ const TfLiteTensor& tensor = context.tensors[i];
+ if (tensor.allocation_type == kTfLiteDynamic) {
+ return true;
+ }
+ }
+ return false;
+}
+
} // namespace
// A trivial implementation of GraphInfo around the Interpreter.
@@ -99,9 +116,9 @@ Interpreter::Interpreter(ErrorReporter* error_reporter)
context_.AddTensors = AddTensors;
context_.tensors = nullptr;
context_.tensors_size = 0;
- context_.eigen_context = nullptr;
- context_.gemm_context = nullptr;
context_.recommended_num_threads = -1;
+ context_.GetExternalContext = GetExternalContext;
+ context_.SetExternalContext = SetExternalContext;
// Invalid to call these these except from TfLiteDelegate
SetForbiddenContextFunction(&context_.GetNodeAndRegistration);
@@ -112,6 +129,11 @@ Interpreter::Interpreter(ErrorReporter* error_reporter)
tensors_.reserve(kTensorsReservedCapacity);
nodes_and_registration_.reserve(kTensorsReservedCapacity);
next_execution_plan_index_to_prepare_ = 0;
+
+ for (int i = 0; i < kTfLiteMaxExternalContexts; ++i) {
+ external_contexts_[i] = nullptr;
+ }
+
UseNNAPI(false);
}
@@ -269,6 +291,33 @@ TfLiteStatus Interpreter::ReplaceSubgraphsWithDelegateKernels(
return kTfLiteOk;
}
+TfLiteExternalContext* Interpreter::GetExternalContext(
+ TfLiteExternalContextType type) {
+ if (type >= 0 && type < kTfLiteMaxExternalContexts) {
+ return external_contexts_[type];
+ }
+ return nullptr;
+}
+
+TfLiteExternalContext* Interpreter::GetExternalContext(
+ struct TfLiteContext* context, TfLiteExternalContextType type) {
+ return static_cast<Interpreter*>(context->impl_)->GetExternalContext(type);
+}
+
+void Interpreter::SetExternalContext(TfLiteExternalContextType type,
+ TfLiteExternalContext* ctx) {
+ if (type >= 0 && type < kTfLiteMaxExternalContexts) {
+ external_contexts_[type] = ctx;
+ }
+}
+
+void Interpreter::SetExternalContext(struct TfLiteContext* context,
+ TfLiteExternalContextType type,
+ TfLiteExternalContext* ctx) {
+ return static_cast<Interpreter*>(context->impl_)
+ ->SetExternalContext(type, ctx);
+}
+
// Gets an TfLiteIntArray* representing the execution plan. The interpreter owns
// this memory and it is only guaranteed to exist during the invocation of the
// delegate prepare.
@@ -359,33 +408,46 @@ TfLiteStatus Interpreter::BytesRequired(TfLiteType type, const int* dims,
case kTfLiteBool:
*bytes = sizeof(bool) * count;
break;
+ case kTfLiteComplex64:
+ *bytes = sizeof(std::complex<float>) * count;
+ break;
default:
ReportError(&context_,
- "Only float32, int16, int32, int64, uint8, bool supported "
- "currently.");
+ "Only float32, int16, int32, int64, uint8, bool, complex64 "
+ "supported currently.");
return kTfLiteError;
}
return kTfLiteOk;
}
TfLiteStatus Interpreter::AllocateTensors() {
- next_execution_plan_index_to_prepare_ = 0;
- if (memory_planner_) {
- TF_LITE_ENSURE_STATUS(memory_planner_->ResetAllocations());
- }
-
if (!consistent_) {
ReportError(&context_, "AllocateTensors() called on inconsistent model.");
return kTfLiteError;
}
- TF_LITE_ENSURE_STATUS(PrepareOpsAndTensors());
+ // Explicit (re)allocation is necessary if nodes have been changed or tensors
+ // have been resized. For inputs marked as dynamic, we can't short-circuit the
+ // allocation as the client may have done the resize manually.
+ if (state_ != kStateUninvokable && !HasDynamicTensorImpl(context_, inputs_)) {
+ return kTfLiteOk;
+ }
- if (state_ == kStateUninvokable) {
- state_ = kStateInvokable;
+ next_execution_plan_index_to_prepare_ = 0;
+ if (memory_planner_) {
+ TF_LITE_ENSURE_STATUS(memory_planner_->ResetAllocations());
}
- TF_LITE_ENSURE(&context_, state_ == kStateInvokable ||
- state_ == kStateInvokableAndImmutable);
+
+ TF_LITE_ENSURE_STATUS(PrepareOpsAndTensors());
+
+ state_ = kStateInvokable;
+
+ // Reset the variable tensors to zero after (re)allocating the tensors.
+ // Developers shouldn't rely on the side effect of this function to reset
+ // variable tesnsors. They should call `ResetVariableTensorsToZero` directly
+ // instead.
+ ResetVariableTensorsToZero();
+
return kTfLiteOk;
}
@@ -478,26 +540,26 @@ TfLiteStatus Interpreter::ResizeInputTensor(int tensor_index,
"ResizeInputTensor is disallowed when graph is immutable.");
return kTfLiteError;
}
- state_ = kStateUninvokable;
// TODO(aselle): All bounds checks can be implemented as one-sided bounds
// checks by casting to unsigned for efficiency. Profile before doing this.
TF_LITE_ENSURE(&context_,
tensor_index < context_.tensors_size && tensor_index >= 0);
- TfLiteIntArray* dims_lite = ConvertVectorToTfLiteIntArray(dims);
- return ResizeTensorImpl(&context_.tensors[tensor_index], dims_lite);
+ TfLiteTensor* tensor = &context_.tensors[tensor_index];
+
+ // Short-circuit the state change if the dimensions don't change, avoiding
+ // unnecessary (re)allocations.
+ if (EqualArrayAndTfLiteIntArray(tensor->dims, dims.size(), dims.data())) {
+ return kTfLiteOk;
+ }
+
+ state_ = kStateUninvokable;
+ return ResizeTensorImpl(tensor, ConvertVectorToTfLiteIntArray(dims));
}
-// Returns true if at least one tensor in the given list is kTfLiteDynamic.
bool HasDynamicTensor(const TfLiteContext& context,
- const TfLiteIntArray* tensors) {
- for (int i = 0; i < tensors->size; ++i) {
- const TfLiteTensor& tensor = context.tensors[tensors->data[i]];
- if (tensor.allocation_type == kTfLiteDynamic) {
- return true;
- }
- }
- return false;
+ const TfLiteIntArray* int_array) {
+ return HasDynamicTensorImpl(context, TfLiteIntArrayView{int_array});
}
TfLiteStatus Interpreter::PrepareOpsStartingAt(
@@ -510,6 +572,8 @@ TfLiteStatus Interpreter::PrepareOpsStartingAt(
nodes_and_registration_[node_index].second;
EnsureTensorsVectorCapacity();
if (OpPrepare(registration, &node) == kTfLiteError) {
+ context_.ReportError(&context_, "Node %d failed to prepare.\n",
+ node_index);
return kTfLiteError;
}
@@ -528,7 +592,8 @@ TfLiteStatus Interpreter::PrepareOpsStartingAt(
TfLiteStatus Interpreter::PrepareOpsAndTensors() {
if (!memory_planner_) {
memory_planner_.reset(new ArenaPlanner(
- &context_, std::unique_ptr<GraphInfo>(new InterpreterInfo(this))));
+ &context_, std::unique_ptr<GraphInfo>(new InterpreterInfo(this)),
+ /*preserve_inputs=*/true, /*preserve_intermediates*/ false));
memory_planner_->PlanAllocations();
}
@@ -554,6 +619,7 @@ TfLiteStatus Interpreter::Invoke() {
}
TfLiteStatus status = kTfLiteOk;
+#ifndef TFLITE_MCU
if (nnapi_delegate_) {
if (next_execution_plan_index_to_prepare_ == execution_plan_.size()) {
TF_LITE_ENSURE_OK(&context_, nnapi_delegate_->Invoke(this));
@@ -567,6 +633,7 @@ TfLiteStatus Interpreter::Invoke() {
return kTfLiteError;
}
}
+#endif
// Invocations are always done in node order.
// Note that calling Invoke repeatedly will cause the original memory plan to
@@ -607,6 +674,8 @@ TfLiteStatus Interpreter::Invoke() {
EnsureTensorsVectorCapacity();
tensor_resized_since_op_invoke_ = false;
if (OpInvoke(registration, &node) == kTfLiteError) {
+ context_.ReportError(&context_, "Node %d failed to invoke.\n",
+ node_index);
status = kTfLiteError;
}
@@ -823,6 +892,7 @@ TfLiteStatus Interpreter::ResizeTensorImpl(TfLiteTensor* tensor,
}
void Interpreter::UseNNAPI(bool enable) {
+#ifndef TFLITE_MCU
// TODO(aselle): This is a workaround for finding if NNAPI exists.
// We also need to make sure getLibraryHandle() is renamed to be NNAPI
// prefixed.
@@ -832,15 +902,18 @@ void Interpreter::UseNNAPI(bool enable) {
} else if (!nnapi_delegate_) {
nnapi_delegate_.reset(new NNAPIDelegate);
}
+#endif
}
void Interpreter::SetNumThreads(int num_threads) {
context_.recommended_num_threads = num_threads;
- // TODO(ahentz): find a way to avoid this. It causes gemmlowp and eigen to
- // be required in order to compile the framework.
- gemm_support::SetNumThreads(&context_, num_threads);
- eigen_support::SetNumThreads(&context_, num_threads);
+ for (int i = 0; i < kTfLiteMaxExternalContexts; ++i) {
+ auto* c = external_contexts_[i];
+ if (c && c->Refresh) {
+ c->Refresh(&context_);
+ }
+ }
}
TfLiteStatus Interpreter::ModifyGraphWithDelegate(TfLiteDelegate* delegate,
@@ -884,9 +957,10 @@ TfLiteStatus Interpreter::ModifyGraphWithDelegate(TfLiteDelegate* delegate,
TF_LITE_ENSURE_OK(&context_, status);
if (!allow_dynamic_tensors) {
+ // Reset the state to force tensor/op reallocation.
+ state_ = kStateUninvokable;
TF_LITE_ENSURE_OK(&context_, AllocateTensors());
- TF_LITE_ENSURE(&context_, state_ == kStateInvokable ||
- state_ == kStateInvokableAndImmutable);
+ TF_LITE_ENSURE_EQ(&context_, state_, kStateInvokable);
// After using a delegate which doesn't support dynamic tensors, make the
// entire graph immutable.
state_ = kStateInvokableAndImmutable;
diff --git a/tensorflow/contrib/lite/interpreter.h b/tensorflow/contrib/lite/interpreter.h
index e67543671b..1a1c3e272b 100644
--- a/tensorflow/contrib/lite/interpreter.h
+++ b/tensorflow/contrib/lite/interpreter.h
@@ -17,6 +17,7 @@ limitations under the License.
#ifndef TENSORFLOW_CONTRIB_LITE_INTERPRETER_H_
#define TENSORFLOW_CONTRIB_LITE_INTERPRETER_H_
+#include <complex>
#include <cstdio>
#include <cstdlib>
#include <vector>
@@ -58,6 +59,14 @@ template <>
constexpr TfLiteType typeToTfLiteType<bool>() {
return kTfLiteBool;
}
+template <>
+constexpr TfLiteType typeToTfLiteType<std::complex<float>>() {
+ return kTfLiteComplex64;
+}
+template <>
+constexpr TfLiteType typeToTfLiteType<string>() {
+ return kTfLiteString;
+}
// Forward declare since NNAPIDelegate uses Interpreter.
class NNAPIDelegate;
@@ -405,6 +414,8 @@ class Interpreter {
}
private:
+ friend class InterpreterTest;
+
// Give 'op_reg' a chance to initialize itself using the contents of
// 'buffer'.
void* OpInit(const TfLiteRegistration& op_reg, const char* buffer,
@@ -517,6 +528,18 @@ class Interpreter {
static TfLiteStatus GetExecutionPlan(struct TfLiteContext* context,
TfLiteIntArray** execution_plan);
+ // Retrieve an existing external context by type.
+ TfLiteExternalContext* GetExternalContext(TfLiteExternalContextType type);
+ static TfLiteExternalContext* GetExternalContext(
+ struct TfLiteContext* context, TfLiteExternalContextType type);
+
+ // Set the value of an external context.
+ void SetExternalContext(TfLiteExternalContextType type,
+ TfLiteExternalContext* ctx);
+ static void SetExternalContext(struct TfLiteContext* context,
+ TfLiteExternalContextType type,
+ TfLiteExternalContext* ctx);
+
// Ensures that `tensors_` has at least `kTensorsCapacityHeadroom` extra
// capacity. Calling this function may invalidate existing pointers to
// tensors. After calling this function, adding `kTensorsCapacityHeadroom`
@@ -607,6 +630,9 @@ class Interpreter {
// Profiler for this interpreter instance.
profiling::Profiler* profiler_;
+
+ // List of active external contexts.
+ TfLiteExternalContext* external_contexts_[kTfLiteMaxExternalContexts];
};
} // namespace tflite
diff --git a/tensorflow/contrib/lite/interpreter_test.cc b/tensorflow/contrib/lite/interpreter_test.cc
index 21cdf87d1e..10119903fe 100644
--- a/tensorflow/contrib/lite/interpreter_test.cc
+++ b/tensorflow/contrib/lite/interpreter_test.cc
@@ -23,6 +23,15 @@ limitations under the License.
#include "tensorflow/contrib/lite/testing/util.h"
namespace tflite {
+
+// InterpreterTest is a friend of Interpreter, so it can access context_.
+class InterpreterTest : public ::testing::Test {
+ protected:
+ TfLiteContext* GetInterpreterContext() { return &interpreter_.context_; }
+
+ Interpreter interpreter_;
+};
+
namespace ops {
namespace builtin {
TfLiteRegistration* Register_PADV2();
@@ -48,6 +57,22 @@ TEST(BasicInterpreter, InvokeInvalidModel) {
ASSERT_EQ(interpreter.Invoke(), kTfLiteOk);
}
+TEST(BasicInterpreter, TestAllocateTensorsResetVariableTensors) {
+ Interpreter interpreter;
+ int tensor_index;
+ ASSERT_EQ(interpreter.AddTensors(1, &tensor_index), kTfLiteOk);
+ constexpr int kTensorSize = 16;
+ interpreter.SetTensorParametersReadWrite(tensor_index, kTfLiteFloat32, "",
+ {kTensorSize}, {}, true);
+ interpreter.SetVariables({tensor_index});
+ ASSERT_EQ(interpreter.AllocateTensors(), kTfLiteOk);
+ TfLiteTensor* tensor = interpreter.tensor(tensor_index);
+ // Ensure that variable tensors are reset to zero.
+ for (int i = 0; i < kTensorSize; ++i) {
+ ASSERT_EQ(tensor->data.f[i], 0.0f);
+ }
+}
+
// Test size accessor functions.
TEST(BasicInterpreter, TestSizeFunctions) {
Interpreter interpreter;
@@ -231,32 +256,16 @@ TEST(BasicInterpreter, CheckArenaAllocation) {
ASSERT_EQ(interpreter.AllocateTensors(), kTfLiteOk);
- ASSERT_EQ(interpreter.tensor(0)->data.raw, interpreter.tensor(4)->data.raw);
- ASSERT_EQ(interpreter.tensor(1)->data.raw, interpreter.tensor(7)->data.raw);
- ASSERT_EQ(interpreter.tensor(8)->data.raw, nullptr);
-
- ASSERT_LT(interpreter.tensor(4)->data.raw, interpreter.tensor(1)->data.raw);
- ASSERT_LT(interpreter.tensor(6)->data.raw, interpreter.tensor(1)->data.raw);
ASSERT_LT(interpreter.tensor(0)->data.raw, interpreter.tensor(1)->data.raw);
-
- ASSERT_LT(interpreter.tensor(0)->data.raw, interpreter.tensor(3)->data.raw);
- ASSERT_LT(interpreter.tensor(1)->data.raw, interpreter.tensor(3)->data.raw);
+ ASSERT_LT(interpreter.tensor(1)->data.raw, interpreter.tensor(2)->data.raw);
ASSERT_LT(interpreter.tensor(2)->data.raw, interpreter.tensor(3)->data.raw);
- ASSERT_LT(interpreter.tensor(4)->data.raw, interpreter.tensor(3)->data.raw);
- ASSERT_LT(interpreter.tensor(6)->data.raw, interpreter.tensor(3)->data.raw);
- ASSERT_LT(interpreter.tensor(7)->data.raw, interpreter.tensor(3)->data.raw);
- ASSERT_LT(interpreter.tensor(8)->data.raw, interpreter.tensor(3)->data.raw);
- ASSERT_LT(interpreter.tensor(9)->data.raw, interpreter.tensor(3)->data.raw);
-
- ASSERT_LT(interpreter.tensor(0)->data.raw, interpreter.tensor(5)->data.raw);
- ASSERT_LT(interpreter.tensor(1)->data.raw, interpreter.tensor(5)->data.raw);
- ASSERT_LT(interpreter.tensor(2)->data.raw, interpreter.tensor(5)->data.raw);
- ASSERT_LT(interpreter.tensor(3)->data.raw, interpreter.tensor(5)->data.raw);
+ ASSERT_LT(interpreter.tensor(3)->data.raw, interpreter.tensor(4)->data.raw);
ASSERT_LT(interpreter.tensor(4)->data.raw, interpreter.tensor(5)->data.raw);
- ASSERT_LT(interpreter.tensor(6)->data.raw, interpreter.tensor(5)->data.raw);
- ASSERT_LT(interpreter.tensor(7)->data.raw, interpreter.tensor(5)->data.raw);
- ASSERT_LT(interpreter.tensor(8)->data.raw, interpreter.tensor(5)->data.raw);
- ASSERT_LT(interpreter.tensor(9)->data.raw, interpreter.tensor(5)->data.raw);
+ ASSERT_LT(interpreter.tensor(5)->data.raw, interpreter.tensor(7)->data.raw);
+ ASSERT_EQ(interpreter.tensor(6)->data.raw, interpreter.tensor(2)->data.raw);
+ // #7 is the one with the largest pointer.
+ ASSERT_EQ(interpreter.tensor(8)->data.raw, nullptr);
+ ASSERT_EQ(interpreter.tensor(9)->data.raw, interpreter.tensor(5)->data.raw);
}
TEST(BasicInterpreter, BufferAccess) {
@@ -292,6 +301,57 @@ TEST(BasicInterpreter, NoOpInterpreter) {
ASSERT_EQ(interpreter.Invoke(), kTfLiteOk);
}
+TEST(BasicInterpreter, RedundantAllocateTensors) {
+ Interpreter interpreter;
+ ASSERT_EQ(interpreter.AddTensors(1), kTfLiteOk);
+ ASSERT_EQ(interpreter.SetInputs({0}), kTfLiteOk);
+
+ ASSERT_EQ(interpreter.SetTensorParametersReadWrite(
+ 0, kTfLiteFloat32, "", {3}, TfLiteQuantizationParams()),
+ kTfLiteOk);
+
+ ASSERT_EQ(interpreter.AllocateTensors(), kTfLiteOk);
+ const auto data_raw = interpreter.tensor(0)->data.raw;
+ ASSERT_NE(data_raw, nullptr);
+
+ // A redundant allocation request should have no impact.
+ ASSERT_EQ(interpreter.AllocateTensors(), kTfLiteOk);
+ ASSERT_EQ(interpreter.tensor(0)->data.raw, data_raw);
+}
+
+TEST(BasicInterpreter, RedundantAllocateTensorsWithDynamicInputs) {
+ Interpreter interpreter;
+ TfLiteRegistration reg = {nullptr, nullptr, nullptr, nullptr};
+ ASSERT_EQ(interpreter.AddTensors(2), kTfLiteOk);
+ interpreter.SetInputs({0});
+ interpreter.SetOutputs({1});
+ interpreter.AddNodeWithParameters({0}, {1}, nullptr, 0, nullptr, &reg);
+
+ ASSERT_EQ(interpreter.SetTensorParametersReadWrite(
+ 0, kTfLiteFloat32, "", {3}, TfLiteQuantizationParams()),
+ kTfLiteOk);
+ ASSERT_EQ(interpreter.SetTensorParametersReadWrite(
+ 1, kTfLiteFloat32, "", {3}, TfLiteQuantizationParams()),
+ kTfLiteOk);
+
+ // Configure the input tensor as dynamic.
+ interpreter.tensor(0)->data.raw = nullptr;
+ interpreter.tensor(0)->allocation_type = kTfLiteDynamic;
+
+ ASSERT_EQ(interpreter.ResizeInputTensor(interpreter.inputs()[0], {1, 2, 3}),
+ kTfLiteOk);
+ ASSERT_EQ(interpreter.AllocateTensors(), kTfLiteOk);
+ ASSERT_NE(interpreter.tensor(1)->data.raw, nullptr);
+
+ // Reset the output tensor's buffer.
+ interpreter.tensor(1)->data.raw = nullptr;
+
+ // A redundant allocation request should be honored, as the input tensor
+ // was marked dynamic.
+ ASSERT_EQ(interpreter.AllocateTensors(), kTfLiteOk);
+ ASSERT_NE(interpreter.tensor(1)->data.raw, nullptr);
+}
+
TEST(BasicInterpreter, ResizingTensors) {
Interpreter interpreter;
ASSERT_EQ(interpreter.AddTensors(1), kTfLiteOk);
@@ -349,6 +409,37 @@ TEST(BasicInterpreter, ResizingTensors) {
tensor->data.f[15] = 0.123f;
}
+TEST(BasicInterpreter, NoopResizingTensors) {
+ Interpreter interpreter;
+ ASSERT_EQ(interpreter.AddTensors(1), kTfLiteOk);
+ ASSERT_EQ(interpreter.SetInputs({0}), kTfLiteOk);
+ ASSERT_EQ(interpreter.SetOutputs({0}), kTfLiteOk);
+
+ ASSERT_EQ(interpreter.SetTensorParametersReadWrite(
+ 0, kTfLiteFloat32, "", {3}, TfLiteQuantizationParams()),
+ kTfLiteOk);
+
+ int t = interpreter.inputs()[0];
+ TfLiteTensor* tensor = interpreter.tensor(t);
+
+ ASSERT_EQ(interpreter.ResizeInputTensor(t, {1, 2, 3}), kTfLiteOk);
+ EXPECT_EQ(tensor->bytes, 6 * sizeof(float));
+ ASSERT_EQ(interpreter.AllocateTensors(), kTfLiteOk);
+ tensor->data.f[5] = 0.123f;
+
+ // Resizing to the same size should not trigger re-allocation.
+ ASSERT_EQ(interpreter.ResizeInputTensor(t, {1, 2, 3}), kTfLiteOk);
+ EXPECT_EQ(tensor->bytes, 6 * sizeof(float));
+ ASSERT_NE(tensor->data.raw, nullptr);
+ ASSERT_EQ(tensor->data.f[5], 0.123f);
+
+ // Explicitly allocating should be a no-op, as no resize was performed.
+ ASSERT_EQ(interpreter.AllocateTensors(), kTfLiteOk);
+ EXPECT_EQ(tensor->bytes, 6 * sizeof(float));
+ ASSERT_NE(tensor->data.raw, nullptr);
+ ASSERT_EQ(tensor->data.f[5], 0.123f);
+}
+
TEST(BasicInterpreter, OneOpInterpreter) {
Interpreter interpreter;
ASSERT_EQ(interpreter.AddTensors(2), kTfLiteOk);
@@ -714,6 +805,47 @@ TEST(InterpreterTensorsCapacityTest, TestExceedHeadroom) {
ASSERT_EQ(interpreter.AllocateTensors(), kTfLiteOk);
}
+struct TestExternalContext : public TfLiteExternalContext {
+ static const TfLiteExternalContextType kType = kTfLiteGemmLowpContext;
+
+ static TestExternalContext* Get(TfLiteContext* context) {
+ return reinterpret_cast<TestExternalContext*>(
+ context->GetExternalContext(context, kType));
+ }
+
+ static void Set(TfLiteContext* context, TestExternalContext* value) {
+ context->SetExternalContext(context, kType, value);
+ }
+
+ int num_refreshes = 0;
+};
+
+TEST_F(InterpreterTest, GetSetResetExternalContexts) {
+ auto* context = GetInterpreterContext();
+
+ TestExternalContext external_context;
+ external_context.Refresh = [](TfLiteContext* context) {
+ auto* ptr = TestExternalContext::Get(context);
+ if (ptr != nullptr) {
+ ++ptr->num_refreshes;
+ }
+ return kTfLiteOk;
+ };
+
+ EXPECT_EQ(TestExternalContext::Get(context), nullptr);
+ interpreter_.SetNumThreads(4);
+
+ TestExternalContext::Set(context, &external_context);
+ EXPECT_EQ(TestExternalContext::Get(context), &external_context);
+ interpreter_.SetNumThreads(4);
+ interpreter_.SetNumThreads(5);
+ EXPECT_EQ(external_context.num_refreshes, 2);
+
+ TestExternalContext::Set(context, nullptr);
+ EXPECT_EQ(TestExternalContext::Get(context), nullptr);
+ interpreter_.SetNumThreads(4);
+}
+
// Test fixture that allows playing with execution plans. It creates a two
// node graph that can be executed in either [0,1] order or [1,0] order.
// The CopyOp records when it is invoked in the class member run_order_
diff --git a/tensorflow/contrib/lite/java/demo/app/build.gradle b/tensorflow/contrib/lite/java/demo/app/build.gradle
index 44ea2dcd90..92f04c651c 100644
--- a/tensorflow/contrib/lite/java/demo/app/build.gradle
+++ b/tensorflow/contrib/lite/java/demo/app/build.gradle
@@ -5,11 +5,12 @@ android {
buildToolsVersion "26.0.1"
defaultConfig {
applicationId "android.example.com.tflitecamerademo"
- minSdkVersion 15
+ // Required by Camera2 API.
+ minSdkVersion 21
targetSdkVersion 26
versionCode 1
versionName "1.0"
- testInstrumentationRunner "android.support.test.runner.AndroidJUnitRunner"
+ testInstrumentationRunner "androidx.test.runner.AndroidJUnitRunner"
// Remove this block.
jackOptions {
@@ -43,7 +44,7 @@ repositories {
dependencies {
compile fileTree(dir: 'libs', include: ['*.jar'])
- androidTestCompile('com.android.support.test.espresso:espresso-core:2.2.2', {
+ androidTestCompile('androidx.test.espresso:espresso-core:3.1.0-alpha3', {
exclude group: 'com.android.support', module: 'support-annotations'
})
compile 'com.android.support:appcompat-v7:25.2.0'
@@ -91,4 +92,4 @@ class DownloadUrlTask extends DefaultTask {
void download() {
ant.get(src: sourceUrl, dest: target)
}
-} \ No newline at end of file
+}
diff --git a/tensorflow/contrib/lite/java/ovic/demo/app/build.gradle b/tensorflow/contrib/lite/java/ovic/demo/app/build.gradle
index c5d19bad89..2a08608bbb 100644
--- a/tensorflow/contrib/lite/java/ovic/demo/app/build.gradle
+++ b/tensorflow/contrib/lite/java/ovic/demo/app/build.gradle
@@ -9,7 +9,7 @@ android {
targetSdkVersion 26
versionCode 1
versionName "1.0"
- testInstrumentationRunner "android.support.test.runner.AndroidJUnitRunner"
+ testInstrumentationRunner "androidx.test.runner.AndroidJUnitRunner"
// Remove this block.
jackOptions {
@@ -43,7 +43,7 @@ repositories {
dependencies {
compile fileTree(dir: 'libs', include: ['*.jar'])
- androidTestCompile('com.android.support.test.espresso:espresso-core:2.2.2', {
+ androidTestCompile('androidx.test.espresso:espresso-core:3.1.0-alpha3', {
exclude group: 'com.android.support', module: 'support-annotations'
})
compile 'com.android.support:appcompat-v7:25.2.0'
diff --git a/tensorflow/contrib/lite/java/ovic/src/test/java/org/tensorflow/ovic/OvicClassifierTest.java b/tensorflow/contrib/lite/java/ovic/src/test/java/org/tensorflow/ovic/OvicClassifierTest.java
index 56f3e7604a..1587c3c56f 100644
--- a/tensorflow/contrib/lite/java/ovic/src/test/java/org/tensorflow/ovic/OvicClassifierTest.java
+++ b/tensorflow/contrib/lite/java/ovic/src/test/java/org/tensorflow/ovic/OvicClassifierTest.java
@@ -127,12 +127,8 @@ public final class OvicClassifierTest {
try {
testResult = classifier.classifyByteBuffer(testImage);
fail();
- } catch (RuntimeException e) {
- assertThat(e)
- .hasMessageThat()
- .contains(
- "Failed to get input dimensions. 0-th input should have 49152 bytes, "
- + "but found 150528 bytes.");
+ } catch (IllegalArgumentException e) {
+ // Success.
}
}
diff --git a/tensorflow/contrib/lite/java/src/main/java/org/tensorflow/lite/DataType.java b/tensorflow/contrib/lite/java/src/main/java/org/tensorflow/lite/DataType.java
index 75334cd96e..94a1ec65d6 100644
--- a/tensorflow/contrib/lite/java/src/main/java/org/tensorflow/lite/DataType.java
+++ b/tensorflow/contrib/lite/java/src/main/java/org/tensorflow/lite/DataType.java
@@ -27,10 +27,7 @@ enum DataType {
UINT8(3),
/** 64-bit signed integer. */
- INT64(4),
-
- /** A {@link ByteBuffer}. */
- BYTEBUFFER(999);
+ INT64(4);
private final int value;
@@ -69,8 +66,6 @@ enum DataType {
return 1;
case INT64:
return 8;
- case BYTEBUFFER:
- return 1;
}
throw new IllegalArgumentException(
"DataType error: DataType " + this + " is not supported yet");
@@ -87,8 +82,6 @@ enum DataType {
return "byte";
case INT64:
return "long";
- case BYTEBUFFER:
- return "ByteBuffer";
}
throw new IllegalArgumentException(
"DataType error: DataType " + this + " is not supported yet");
diff --git a/tensorflow/contrib/lite/java/src/main/java/org/tensorflow/lite/Interpreter.java b/tensorflow/contrib/lite/java/src/main/java/org/tensorflow/lite/Interpreter.java
index fd1f0ffa68..7002f82677 100644
--- a/tensorflow/contrib/lite/java/src/main/java/org/tensorflow/lite/Interpreter.java
+++ b/tensorflow/contrib/lite/java/src/main/java/org/tensorflow/lite/Interpreter.java
@@ -135,7 +135,8 @@ public final class Interpreter implements AutoCloseable {
* including int, float, long, and byte. {@link ByteBuffer} is the preferred way to pass large
* input data. When {@link ByteBuffer} is used, its content should remain unchanged until
* model inference is done.
- * @param output a multidimensional array of output data.
+ * @param output a multidimensional array of output data, or a {@link ByteBuffer} of primitive
+ * types including int, float, long, and byte.
*/
public void run(@NonNull Object input, @NonNull Object output) {
Object[] inputs = {input};
@@ -155,28 +156,16 @@ public final class Interpreter implements AutoCloseable {
* primitive types including int, float, long, and byte. {@link ByteBuffer} is the preferred
* way to pass large input data. When {@link ByteBuffer} is used, its content should remain
* unchanged until model inference is done.
- * @param outputs a map mapping output indices to multidimensional arrays of output data. It only
- * needs to keep entries for the outputs to be used.
+ * @param outputs a map mapping output indices to multidimensional arrays of output data or {@link
+ * ByteBuffer}s of primitive types including int, float, long, and byte. It only needs to keep
+ * entries for the outputs to be used.
*/
public void runForMultipleInputsOutputs(
@NonNull Object[] inputs, @NonNull Map<Integer, Object> outputs) {
if (wrapper == null) {
throw new IllegalStateException("Internal error: The Interpreter has already been closed.");
}
- Tensor[] tensors = wrapper.run(inputs);
- if (outputs == null || tensors == null || outputs.size() > tensors.length) {
- throw new IllegalArgumentException("Output error: Outputs do not match with model outputs.");
- }
- final int size = tensors.length;
- for (Integer idx : outputs.keySet()) {
- if (idx == null || idx < 0 || idx >= size) {
- throw new IllegalArgumentException(
- String.format(
- "Output error: Invalid index of output %d (should be in range [0, %d))",
- idx, size));
- }
- tensors[idx].copyTo(outputs.get(idx));
- }
+ wrapper.run(inputs, outputs);
}
/**
@@ -249,8 +238,10 @@ public final class Interpreter implements AutoCloseable {
/** Release resources associated with the {@code Interpreter}. */
@Override
public void close() {
- wrapper.close();
- wrapper = null;
+ if (wrapper != null) {
+ wrapper.close();
+ wrapper = null;
+ }
}
@Override
diff --git a/tensorflow/contrib/lite/java/src/main/java/org/tensorflow/lite/NativeInterpreterWrapper.java b/tensorflow/contrib/lite/java/src/main/java/org/tensorflow/lite/NativeInterpreterWrapper.java
index 80de88b6a1..767a220f8c 100644
--- a/tensorflow/contrib/lite/java/src/main/java/org/tensorflow/lite/NativeInterpreterWrapper.java
+++ b/tensorflow/contrib/lite/java/src/main/java/org/tensorflow/lite/NativeInterpreterWrapper.java
@@ -15,10 +15,10 @@ limitations under the License.
package org.tensorflow.lite;
-import java.lang.reflect.Array;
import java.nio.ByteBuffer;
import java.nio.ByteOrder;
import java.nio.MappedByteBuffer;
+import java.util.Arrays;
import java.util.HashMap;
import java.util.Map;
@@ -40,6 +40,8 @@ final class NativeInterpreterWrapper implements AutoCloseable {
modelHandle = createModel(modelPath, errorHandle);
interpreterHandle = createInterpreter(modelHandle, errorHandle, numThreads);
isMemoryAllocated = true;
+ inputTensors = new Tensor[getInputCount(interpreterHandle)];
+ outputTensors = new Tensor[getOutputCount(interpreterHandle)];
}
/**
@@ -72,6 +74,8 @@ final class NativeInterpreterWrapper implements AutoCloseable {
modelHandle = createModelWithBuffer(modelByteBuffer, errorHandle);
interpreterHandle = createInterpreter(modelHandle, errorHandle, numThreads);
isMemoryAllocated = true;
+ inputTensors = new Tensor[getInputCount(interpreterHandle)];
+ outputTensors = new Tensor[getOutputCount(interpreterHandle)];
}
/** Releases resources associated with this {@code NativeInterpreterWrapper}. */
@@ -85,75 +89,63 @@ final class NativeInterpreterWrapper implements AutoCloseable {
inputsIndexes = null;
outputsIndexes = null;
isMemoryAllocated = false;
+ Arrays.fill(inputTensors, null);
+ Arrays.fill(outputTensors, null);
}
/** Sets inputs, runs model inference and returns outputs. */
- Tensor[] run(Object[] inputs) {
+ void run(Object[] inputs, Map<Integer, Object> outputs) {
+ inferenceDurationNanoseconds = -1;
if (inputs == null || inputs.length == 0) {
throw new IllegalArgumentException("Input error: Inputs should not be null or empty.");
}
- int[] dataTypes = new int[inputs.length];
- Object[] sizes = new Object[inputs.length];
- int[] numsOfBytes = new int[inputs.length];
+ if (outputs == null || outputs.isEmpty()) {
+ throw new IllegalArgumentException("Input error: Outputs should not be null or empty.");
+ }
+
+ // TODO(b/80431971): Remove implicit resize after deprecating multi-dimensional array inputs.
+ // Rather than forcing an immediate resize + allocation if an input's shape differs, we first
+ // flush all resizes, avoiding redundant allocations.
for (int i = 0; i < inputs.length; ++i) {
- DataType dataType = dataTypeOf(inputs[i]);
- dataTypes[i] = dataType.getNumber();
- if (dataType == DataType.BYTEBUFFER) {
- ByteBuffer buffer = (ByteBuffer) inputs[i];
- if (buffer == null || !buffer.isDirect() || buffer.order() != ByteOrder.nativeOrder()) {
- throw new IllegalArgumentException(
- "Input error: ByteBuffer should be a direct ByteBuffer that uses "
- + "ByteOrder.nativeOrder().");
- }
- numsOfBytes[i] = buffer.limit();
- sizes[i] = getInputDims(interpreterHandle, i, numsOfBytes[i]);
- } else if (isNonEmptyArray(inputs[i])) {
- int[] dims = shapeOf(inputs[i]);
- sizes[i] = dims;
- numsOfBytes[i] = dataType.elemByteSize() * numElements(dims);
- } else {
- throw new IllegalArgumentException(
- String.format(
- "Input error: %d-th element of the %d inputs is not an array or a ByteBuffer.",
- i, inputs.length));
+ Tensor tensor = getInputTensor(i);
+ int[] newShape = tensor.getInputShapeIfDifferent(inputs[i]);
+ if (newShape != null) {
+ resizeInput(i, newShape);
}
}
- inferenceDurationNanoseconds = -1;
- long[] outputsHandles =
- run(
- interpreterHandle,
- errorHandle,
- sizes,
- dataTypes,
- numsOfBytes,
- inputs,
- this,
- isMemoryAllocated);
- if (outputsHandles == null || outputsHandles.length == 0) {
- throw new IllegalStateException("Internal error: Interpreter has no outputs.");
+
+ if (!isMemoryAllocated) {
+ allocateTensors(interpreterHandle, errorHandle);
+ isMemoryAllocated = true;
+ // Allocation can trigger dynamic resizing of output tensors, so clear the
+ // output tensor cache.
+ Arrays.fill(outputTensors, null);
}
- isMemoryAllocated = true;
- Tensor[] outputs = new Tensor[outputsHandles.length];
- for (int i = 0; i < outputsHandles.length; ++i) {
- outputs[i] = Tensor.fromHandle(outputsHandles[i]);
+
+ for (int i = 0; i < inputs.length; ++i) {
+ getInputTensor(i).setTo(inputs[i]);
+ }
+
+ long inferenceStartNanos = System.nanoTime();
+ run(interpreterHandle, errorHandle);
+ long inferenceDurationNanoseconds = System.nanoTime() - inferenceStartNanos;
+
+ for (Map.Entry<Integer, Object> output : outputs.entrySet()) {
+ getOutputTensor(output.getKey()).copyTo(output.getValue());
}
- return outputs;
+
+ // Only set if the entire operation succeeds.
+ this.inferenceDurationNanoseconds = inferenceDurationNanoseconds;
}
- private static native long[] run(
- long interpreterHandle,
- long errorHandle,
- Object[] sizes,
- int[] dtypes,
- int[] numsOfBytes,
- Object[] values,
- NativeInterpreterWrapper wrapper,
- boolean memoryAllocated);
+ private static native boolean run(long interpreterHandle, long errorHandle);
/** Resizes dimensions of a specific input. */
void resizeInput(int idx, int[] dims) {
if (resizeInput(interpreterHandle, errorHandle, idx, dims)) {
isMemoryAllocated = false;
+ // Resizing will invalidate the Tensor's shape, so invalidate the Tensor handle.
+ inputTensors[idx] = null;
}
}
@@ -212,78 +204,6 @@ final class NativeInterpreterWrapper implements AutoCloseable {
}
}
- static int numElements(int[] shape) {
- if (shape == null) {
- return 0;
- }
- int n = 1;
- for (int i = 0; i < shape.length; i++) {
- n *= shape[i];
- }
- return n;
- }
-
- static boolean isNonEmptyArray(Object o) {
- return (o != null && o.getClass().isArray() && Array.getLength(o) != 0);
- }
-
- /** Returns the type of the data. */
- static DataType dataTypeOf(Object o) {
- if (o != null) {
- Class<?> c = o.getClass();
- while (c.isArray()) {
- c = c.getComponentType();
- }
- if (float.class.equals(c)) {
- return DataType.FLOAT32;
- } else if (int.class.equals(c)) {
- return DataType.INT32;
- } else if (byte.class.equals(c)) {
- return DataType.UINT8;
- } else if (long.class.equals(c)) {
- return DataType.INT64;
- } else if (ByteBuffer.class.isInstance(o)) {
- return DataType.BYTEBUFFER;
- }
- }
- throw new IllegalArgumentException(
- "DataType error: cannot resolve DataType of " + o.getClass().getName());
- }
-
- /** Returns the shape of an object as an int array. */
- static int[] shapeOf(Object o) {
- int size = numDimensions(o);
- int[] dimensions = new int[size];
- fillShape(o, 0, dimensions);
- return dimensions;
- }
-
- static int numDimensions(Object o) {
- if (o == null || !o.getClass().isArray()) {
- return 0;
- }
- if (Array.getLength(o) == 0) {
- throw new IllegalArgumentException("Array lengths cannot be 0.");
- }
- return 1 + numDimensions(Array.get(o, 0));
- }
-
- static void fillShape(Object o, int dim, int[] shape) {
- if (shape == null || dim == shape.length) {
- return;
- }
- final int len = Array.getLength(o);
- if (shape[dim] == 0) {
- shape[dim] = len;
- } else if (shape[dim] != len) {
- throw new IllegalArgumentException(
- String.format("Mismatched lengths (%d and %d) in dimension %d", shape[dim], len, dim));
- }
- for (int i = 0; i < len; ++i) {
- fillShape(Array.get(o, i), dim + 1, shape);
- }
- }
-
/**
* Gets the last inference duration in nanoseconds. It returns null if there is no previous
* inference run or the last inference run failed.
@@ -293,40 +213,55 @@ final class NativeInterpreterWrapper implements AutoCloseable {
}
/**
- * Gets the dimensions of an input. It throws IllegalArgumentException if input index is invalid.
+ * Gets the quantization zero point of an output.
+ *
+ * @throws IllegalArgumentException if the output index is invalid.
*/
- int[] getInputDims(int index) {
- return getInputDims(interpreterHandle, index, -1);
+ int getOutputQuantizationZeroPoint(int index) {
+ return getOutputQuantizationZeroPoint(interpreterHandle, index);
}
/**
- * Gets the dimensions of an input. If numBytes >= 0, it will check whether num of bytes match the
- * input.
+ * Gets the quantization scale of an output.
+ *
+ * @throws IllegalArgumentException if the output index is invalid.
*/
- private static native int[] getInputDims(long interpreterHandle, int inputIdx, int numBytes);
-
- /** Gets the type of an output. It throws IllegalArgumentException if output index is invalid. */
- String getOutputDataType(int index) {
- int type = getOutputDataType(interpreterHandle, index);
- return DataType.fromNumber(type).toStringName();
+ float getOutputQuantizationScale(int index) {
+ return getOutputQuantizationScale(interpreterHandle, index);
}
/**
- * Gets the quantization zero point of an output.
+ * Gets the input {@link Tensor} for the provided input index.
*
- * @throws IllegalArgumentExeption if the output index is invalid.
+ * @throws IllegalArgumentException if the input index is invalid.
*/
- int getOutputQuantizationZeroPoint(int index) {
- return getOutputQuantizationZeroPoint(interpreterHandle, index);
+ Tensor getInputTensor(int index) {
+ if (index < 0 || index >= inputTensors.length) {
+ throw new IllegalArgumentException("Invalid input Tensor index: " + index);
+ }
+ Tensor inputTensor = inputTensors[index];
+ if (inputTensor == null) {
+ inputTensor =
+ inputTensors[index] = Tensor.fromHandle(getInputTensor(interpreterHandle, index));
+ }
+ return inputTensor;
}
/**
- * Gets the quantization scale of an output.
+ * Gets the output {@link Tensor} for the provided output index.
*
- * @throws IllegalArgumentExeption if the output index is invalid.
+ * @throws IllegalArgumentException if the output index is invalid.
*/
- float getOutputQuantizationScale(int index) {
- return getOutputQuantizationScale(interpreterHandle, index);
+ Tensor getOutputTensor(int index) {
+ if (index < 0 || index >= outputTensors.length) {
+ throw new IllegalArgumentException("Invalid output Tensor index: " + index);
+ }
+ Tensor outputTensor = outputTensors[index];
+ if (outputTensor == null) {
+ outputTensor =
+ outputTensors[index] = Tensor.fromHandle(getOutputTensor(interpreterHandle, index));
+ }
+ return outputTensor;
}
private static native int getOutputDataType(long interpreterHandle, int outputIdx);
@@ -343,18 +278,30 @@ final class NativeInterpreterWrapper implements AutoCloseable {
private long modelHandle;
- private int inputSize;
-
private long inferenceDurationNanoseconds = -1;
private ByteBuffer modelByteBuffer;
+ // Lazily constructed maps of input and output names to input and output Tensor indexes.
private Map<String, Integer> inputsIndexes;
-
private Map<String, Integer> outputsIndexes;
+ // Lazily constructed and populated arrays of input and output Tensor wrappers.
+ private final Tensor[] inputTensors;
+ private final Tensor[] outputTensors;
+
private boolean isMemoryAllocated = false;
+ private static native long allocateTensors(long interpreterHandle, long errorHandle);
+
+ private static native long getInputTensor(long interpreterHandle, int inputIdx);
+
+ private static native long getOutputTensor(long interpreterHandle, int outputIdx);
+
+ private static native int getInputCount(long interpreterHandle);
+
+ private static native int getOutputCount(long interpreterHandle);
+
private static native String[] getInputNames(long interpreterHandle);
private static native String[] getOutputNames(long interpreterHandle);
diff --git a/tensorflow/contrib/lite/java/src/main/java/org/tensorflow/lite/Tensor.java b/tensorflow/contrib/lite/java/src/main/java/org/tensorflow/lite/Tensor.java
index 09e887aae3..2403570c52 100644
--- a/tensorflow/contrib/lite/java/src/main/java/org/tensorflow/lite/Tensor.java
+++ b/tensorflow/contrib/lite/java/src/main/java/org/tensorflow/lite/Tensor.java
@@ -15,6 +15,9 @@ limitations under the License.
package org.tensorflow.lite;
+import java.lang.reflect.Array;
+import java.nio.ByteBuffer;
+import java.nio.ByteOrder;
import java.util.Arrays;
/**
@@ -29,30 +32,179 @@ final class Tensor {
return new Tensor(nativeHandle);
}
- /** Reads Tensor content into an array. */
- <T> T copyTo(T dst) {
- if (NativeInterpreterWrapper.dataTypeOf(dst) != dtype) {
+ /** Returns the {@link DataType} of elements stored in the Tensor. */
+ public DataType dataType() {
+ return dtype;
+ }
+
+ /** Returns the size, in bytes, of the tensor data. */
+ public int numBytes() {
+ return numBytes(nativeHandle);
+ }
+
+ /**
+ * Returns the <a href="https://www.tensorflow.org/resources/dims_types.html#shape">shape</a> of
+ * the Tensor, i.e., the sizes of each dimension.
+ *
+ * @return an array where the i-th element is the size of the i-th dimension of the tensor.
+ */
+ public int[] shape() {
+ return shapeCopy;
+ }
+
+ /**
+ * Copies the contents of the provided {@code src} object to the Tensor.
+ *
+ * <p>The {@code src} should either be a (multi-dimensional) array with a shape matching that of
+ * this tensor, or a {@link ByteByffer} of compatible primitive type with a matching flat size.
+ *
+ * @throws IllegalArgumentException if the tensor is a scalar or if {@code src} is not compatible
+ * with the tensor (for example, mismatched data types or shapes).
+ */
+ void setTo(Object src) {
+ throwExceptionIfTypeIsIncompatible(src);
+ if (isByteBuffer(src)) {
+ ByteBuffer srcBuffer = (ByteBuffer) src;
+ // For direct ByteBuffer instances we support zero-copy. Note that this assumes the caller
+ // retains ownership of the source buffer until inference has completed.
+ if (srcBuffer.isDirect() && srcBuffer.order() == ByteOrder.nativeOrder()) {
+ writeDirectBuffer(nativeHandle, srcBuffer);
+ } else {
+ buffer().put(srcBuffer);
+ }
+ return;
+ }
+ writeMultiDimensionalArray(nativeHandle, src);
+ }
+
+ /**
+ * Copies the contents of the tensor to {@code dst} and returns {@code dst}.
+ *
+ * @param dst the destination buffer, either an explicitly-typed array or a {@link ByteBuffer}.
+ * @throws IllegalArgumentException if {@code dst} is not compatible with the tensor (for example,
+ * mismatched data types or shapes).
+ */
+ Object copyTo(Object dst) {
+ throwExceptionIfTypeIsIncompatible(dst);
+ if (dst instanceof ByteBuffer) {
+ ByteBuffer dstByteBuffer = (ByteBuffer) dst;
+ dstByteBuffer.put(buffer());
+ return dst;
+ }
+ readMultiDimensionalArray(nativeHandle, dst);
+ return dst;
+ }
+
+ /** Returns the provided buffer's shape if specified and different from this Tensor's shape. */
+ // TODO(b/80431971): Remove this method after deprecating multi-dimensional array inputs.
+ int[] getInputShapeIfDifferent(Object input) {
+ // Implicit resizes based on ByteBuffer capacity isn't supported, so short-circuit that path.
+ // The ByteBuffer's size will be validated against this Tensor's size in {@link #setTo(Object)}.
+ if (isByteBuffer(input)) {
+ return null;
+ }
+ int[] inputShape = shapeOf(input);
+ if (Arrays.equals(shapeCopy, inputShape)) {
+ return null;
+ }
+ return inputShape;
+ }
+
+ /** Returns the type of the data. */
+ static DataType dataTypeOf(Object o) {
+ if (o != null) {
+ Class<?> c = o.getClass();
+ while (c.isArray()) {
+ c = c.getComponentType();
+ }
+ if (float.class.equals(c)) {
+ return DataType.FLOAT32;
+ } else if (int.class.equals(c)) {
+ return DataType.INT32;
+ } else if (byte.class.equals(c)) {
+ return DataType.UINT8;
+ } else if (long.class.equals(c)) {
+ return DataType.INT64;
+ }
+ }
+ throw new IllegalArgumentException(
+ "DataType error: cannot resolve DataType of " + o.getClass().getName());
+ }
+
+ /** Returns the shape of an object as an int array. */
+ static int[] shapeOf(Object o) {
+ int size = numDimensions(o);
+ int[] dimensions = new int[size];
+ fillShape(o, 0, dimensions);
+ return dimensions;
+ }
+
+ /** Returns the number of dimensions of a multi-dimensional array, otherwise 0. */
+ static int numDimensions(Object o) {
+ if (o == null || !o.getClass().isArray()) {
+ return 0;
+ }
+ if (Array.getLength(o) == 0) {
+ throw new IllegalArgumentException("Array lengths cannot be 0.");
+ }
+ return 1 + numDimensions(Array.get(o, 0));
+ }
+
+ /** Recursively populates the shape dimensions for a given (multi-dimensional) array. */
+ static void fillShape(Object o, int dim, int[] shape) {
+ if (shape == null || dim == shape.length) {
+ return;
+ }
+ final int len = Array.getLength(o);
+ if (shape[dim] == 0) {
+ shape[dim] = len;
+ } else if (shape[dim] != len) {
+ throw new IllegalArgumentException(
+ String.format("Mismatched lengths (%d and %d) in dimension %d", shape[dim], len, dim));
+ }
+ for (int i = 0; i < len; ++i) {
+ fillShape(Array.get(o, i), dim + 1, shape);
+ }
+ }
+
+ private void throwExceptionIfTypeIsIncompatible(Object o) {
+ if (isByteBuffer(o)) {
+ ByteBuffer oBuffer = (ByteBuffer) o;
+ if (oBuffer.capacity() != numBytes()) {
+ throw new IllegalArgumentException(
+ String.format(
+ "Cannot convert between a TensorFlowLite buffer with %d bytes and a "
+ + "ByteBuffer with %d bytes.",
+ numBytes(), oBuffer.capacity()));
+ }
+ return;
+ }
+ DataType oType = dataTypeOf(o);
+ if (oType != dtype) {
throw new IllegalArgumentException(
String.format(
- "Output error: Cannot convert an TensorFlowLite tensor with type %s to a Java "
- + "object of type %s (which is compatible with the TensorFlowLite type %s)",
- dtype, dst.getClass().getName(), NativeInterpreterWrapper.dataTypeOf(dst)));
+ "Cannot convert between a TensorFlowLite tensor with type %s and a Java "
+ + "object of type %s (which is compatible with the TensorFlowLite type %s).",
+ dtype, o.getClass().getName(), oType));
}
- int[] dstShape = NativeInterpreterWrapper.shapeOf(dst);
- if (!Arrays.equals(dstShape, shapeCopy)) {
+
+ int[] oShape = shapeOf(o);
+ if (!Arrays.equals(oShape, shapeCopy)) {
throw new IllegalArgumentException(
String.format(
- "Output error: Shape of output target %s does not match with the shape of the "
- + "Tensor %s.",
- Arrays.toString(dstShape), Arrays.toString(shapeCopy)));
+ "Cannot copy between a TensorFlowLite tensor with shape %s and a Java object "
+ + "with shape %s.",
+ Arrays.toString(shapeCopy), Arrays.toString(oShape)));
}
- readMultiDimensionalArray(nativeHandle, dst);
- return dst;
}
- final long nativeHandle;
- final DataType dtype;
- final int[] shapeCopy;
+ private static boolean isByteBuffer(Object o) {
+ return o instanceof ByteBuffer;
+ }
+
+ private final long nativeHandle;
+ private final DataType dtype;
+ private final int[] shapeCopy;
private Tensor(long nativeHandle) {
this.nativeHandle = nativeHandle;
@@ -60,11 +212,23 @@ final class Tensor {
this.shapeCopy = shape(nativeHandle);
}
+ private ByteBuffer buffer() {
+ return buffer(nativeHandle).order(ByteOrder.nativeOrder());
+ }
+
+ private static native ByteBuffer buffer(long handle);
+
+ private static native void writeDirectBuffer(long handle, ByteBuffer src);
+
private static native int dtype(long handle);
private static native int[] shape(long handle);
- private static native void readMultiDimensionalArray(long handle, Object value);
+ private static native int numBytes(long handle);
+
+ private static native void readMultiDimensionalArray(long handle, Object dst);
+
+ private static native void writeMultiDimensionalArray(long handle, Object src);
static {
TensorFlowLite.init();
diff --git a/tensorflow/contrib/lite/java/src/main/native/BUILD b/tensorflow/contrib/lite/java/src/main/native/BUILD
index 4399ed2025..4b4e1c21d8 100644
--- a/tensorflow/contrib/lite/java/src/main/native/BUILD
+++ b/tensorflow/contrib/lite/java/src/main/native/BUILD
@@ -11,7 +11,6 @@ licenses(["notice"]) # Apache 2.0
cc_library(
name = "native_framework_only",
srcs = [
- "duration_utils_jni.cc",
"exception_jni.cc",
"nativeinterpreterwrapper_jni.cc",
"tensor_jni.cc",
diff --git a/tensorflow/contrib/lite/java/src/main/native/nativeinterpreterwrapper_jni.cc b/tensorflow/contrib/lite/java/src/main/native/nativeinterpreterwrapper_jni.cc
index 31f7b58fbc..e2c1edd9af 100644
--- a/tensorflow/contrib/lite/java/src/main/native/nativeinterpreterwrapper_jni.cc
+++ b/tensorflow/contrib/lite/java/src/main/native/nativeinterpreterwrapper_jni.cc
@@ -16,9 +16,6 @@ limitations under the License.
#include "tensorflow/contrib/lite/java/src/main/native/nativeinterpreterwrapper_jni.h"
namespace {
-const int kByteBufferValue = 999;
-const int kBufferSize = 256;
-
tflite::Interpreter* convertLongToInterpreter(JNIEnv* env, jlong handle) {
if (handle == 0) {
throwException(env, kIllegalArgumentException,
@@ -62,22 +59,6 @@ std::vector<int> convertJIntArrayToVector(JNIEnv* env, jintArray inputs) {
return outputs;
}
-bool isByteBuffer(jint data_type) { return data_type == kByteBufferValue; }
-
-TfLiteType resolveDataType(jint data_type) {
- switch (data_type) {
- case 1:
- return kTfLiteFloat32;
- case 2:
- return kTfLiteInt32;
- case 3:
- return kTfLiteUInt8;
- case 4:
- return kTfLiteInt64;
- default:
- return kTfLiteNoType;
- }
-}
int getDataType(TfLiteType data_type) {
switch (data_type) {
@@ -108,64 +89,6 @@ void printDims(char* buffer, int max_size, int* dims, int num_dims) {
}
}
-TfLiteStatus checkInputs(JNIEnv* env, tflite::Interpreter* interpreter,
- const int input_size, jintArray data_types,
- jintArray nums_of_bytes, jobjectArray values,
- jobjectArray sizes) {
- if (input_size != interpreter->inputs().size()) {
- throwException(env, kIllegalArgumentException,
- "Input error: Expected num of inputs is %d but got %d",
- interpreter->inputs().size(), input_size);
- return kTfLiteError;
- }
- if (input_size != env->GetArrayLength(data_types) ||
- input_size != env->GetArrayLength(nums_of_bytes) ||
- input_size != env->GetArrayLength(values)) {
- throwException(env, kIllegalArgumentException,
- "Internal error: Arrays in arguments should be of the same "
- "length, but got %d sizes, %d data_types, %d nums_of_bytes, "
- "and %d values",
- input_size, env->GetArrayLength(data_types),
- env->GetArrayLength(nums_of_bytes),
- env->GetArrayLength(values));
- return kTfLiteError;
- }
- for (int i = 0; i < input_size; ++i) {
- int input_idx = interpreter->inputs()[i];
- TfLiteTensor* target = interpreter->tensor(input_idx);
- jintArray dims =
- static_cast<jintArray>(env->GetObjectArrayElement(sizes, i));
- int num_dims = static_cast<int>(env->GetArrayLength(dims));
- if (target->dims->size != num_dims) {
- throwException(env, kIllegalArgumentException,
- "Input error: %d-th input should have %d dimensions, but "
- "found %d dimensions",
- i, target->dims->size, num_dims);
- return kTfLiteError;
- }
- jint* ptr = env->GetIntArrayElements(dims, nullptr);
- for (int j = 1; j < num_dims; ++j) {
- if (target->dims->data[j] != ptr[j]) {
- std::unique_ptr<char[]> expected_dims(new char[kBufferSize]);
- std::unique_ptr<char[]> obtained_dims(new char[kBufferSize]);
- printDims(expected_dims.get(), kBufferSize, target->dims->data,
- num_dims);
- printDims(obtained_dims.get(), kBufferSize, ptr, num_dims);
- throwException(env, kIllegalArgumentException,
- "Input error: %d-th input dimension should be [%s], but "
- "found [%s]",
- i, expected_dims.get(), obtained_dims.get());
- env->ReleaseIntArrayElements(dims, ptr, JNI_ABORT);
- return kTfLiteError;
- }
- }
- env->ReleaseIntArrayElements(dims, ptr, JNI_ABORT);
- env->DeleteLocalRef(dims);
- if (env->ExceptionCheck()) return kTfLiteError;
- }
- return kTfLiteOk;
-}
-
// Checks whether there is any difference between dimensions of a tensor and a
// given dimensions. Returns true if there is difference, else false.
bool areDimsDifferent(JNIEnv* env, TfLiteTensor* tensor, jintArray dims) {
@@ -188,74 +111,6 @@ bool areDimsDifferent(JNIEnv* env, TfLiteTensor* tensor, jintArray dims) {
return false;
}
-bool areInputDimensionsTheSame(JNIEnv* env, tflite::Interpreter* interpreter,
- int input_size, jobjectArray sizes) {
- if (interpreter->inputs().size() != input_size) {
- return false;
- }
- for (int i = 0; i < input_size; ++i) {
- int input_idx = interpreter->inputs()[i];
- jintArray dims =
- static_cast<jintArray>(env->GetObjectArrayElement(sizes, i));
- TfLiteTensor* target = interpreter->tensor(input_idx);
- if (areDimsDifferent(env, target, dims)) return false;
- env->DeleteLocalRef(dims);
- if (env->ExceptionCheck()) return false;
- }
- return true;
-}
-
-TfLiteStatus resizeInputs(JNIEnv* env, tflite::Interpreter* interpreter,
- int input_size, jobjectArray sizes) {
- for (int i = 0; i < input_size; ++i) {
- int input_idx = interpreter->inputs()[i];
- jintArray dims =
- static_cast<jintArray>(env->GetObjectArrayElement(sizes, i));
- TfLiteStatus status = interpreter->ResizeInputTensor(
- input_idx, convertJIntArrayToVector(env, dims));
- if (status != kTfLiteOk) {
- return status;
- }
- env->DeleteLocalRef(dims);
- if (env->ExceptionCheck()) return kTfLiteError;
- }
- return kTfLiteOk;
-}
-
-TfLiteStatus setInputs(JNIEnv* env, tflite::Interpreter* interpreter,
- int input_size, jintArray data_types,
- jintArray nums_of_bytes, jobjectArray values) {
- jint* data_type = env->GetIntArrayElements(data_types, nullptr);
- jint* num_bytes = env->GetIntArrayElements(nums_of_bytes, nullptr);
- for (int i = 0; i < input_size; ++i) {
- int input_idx = interpreter->inputs()[i];
- TfLiteTensor* target = interpreter->tensor(input_idx);
- jobject value = env->GetObjectArrayElement(values, i);
- bool is_byte_buffer = isByteBuffer(data_type[i]);
- if (is_byte_buffer) {
- writeByteBuffer(env, value, &(target->data.raw),
- static_cast<int>(num_bytes[i]));
- } else {
- TfLiteType type = resolveDataType(data_type[i]);
- if (type != target->type) {
- throwException(env, kIllegalArgumentException,
- "Input error: DataType (%d) of input data does not "
- "match with the DataType (%d) of model inputs.",
- type, target->type);
- return kTfLiteError;
- }
- writeMultiDimensionalArray(env, value, target->type, target->dims->size,
- &(target->data.raw),
- static_cast<int>(num_bytes[i]));
- }
- env->DeleteLocalRef(value);
- if (env->ExceptionCheck()) return kTfLiteError;
- }
- env->ReleaseIntArrayElements(data_types, data_type, JNI_ABORT);
- env->ReleaseIntArrayElements(nums_of_bytes, num_bytes, JNI_ABORT);
- return kTfLiteOk;
-}
-
// TODO(yichengfan): evaluate the benefit to use tflite verifier.
bool VerifyModel(const void* buf, size_t len) {
flatbuffers::Verifier verifier(static_cast<const uint8_t*>(buf), len);
@@ -287,6 +142,63 @@ Java_org_tensorflow_lite_NativeInterpreterWrapper_getInputNames(JNIEnv* env,
return names;
}
+JNIEXPORT void JNICALL
+Java_org_tensorflow_lite_NativeInterpreterWrapper_allocateTensors(
+ JNIEnv* env, jclass clazz, jlong handle, jlong error_handle) {
+ tflite::Interpreter* interpreter = convertLongToInterpreter(env, handle);
+ if (interpreter == nullptr) return;
+ BufferErrorReporter* error_reporter =
+ convertLongToErrorReporter(env, error_handle);
+ if (error_reporter == nullptr) return;
+
+ if (interpreter->AllocateTensors() != kTfLiteOk) {
+ throwException(env, kNullPointerException,
+ "Internal error: Cannot allocate memory for the interpreter:"
+ " %s",
+ error_reporter->CachedErrorMessage());
+ }
+}
+
+JNIEXPORT jlong JNICALL
+Java_org_tensorflow_lite_NativeInterpreterWrapper_getInputTensor(JNIEnv* env,
+ jclass clazz,
+ jlong handle,
+ jint index) {
+ tflite::Interpreter* interpreter = convertLongToInterpreter(env, handle);
+ if (interpreter == nullptr) return 0;
+ return reinterpret_cast<jlong>(
+ interpreter->tensor(interpreter->inputs()[index]));
+}
+
+JNIEXPORT jlong JNICALL
+Java_org_tensorflow_lite_NativeInterpreterWrapper_getOutputTensor(JNIEnv* env,
+ jclass clazz,
+ jlong handle,
+ jint index) {
+ tflite::Interpreter* interpreter = convertLongToInterpreter(env, handle);
+ if (interpreter == nullptr) return 0;
+ return reinterpret_cast<jlong>(
+ interpreter->tensor(interpreter->outputs()[index]));
+}
+
+JNIEXPORT jint JNICALL
+Java_org_tensorflow_lite_NativeInterpreterWrapper_getInputCount(JNIEnv* env,
+ jclass clazz,
+ jlong handle) {
+ tflite::Interpreter* interpreter = convertLongToInterpreter(env, handle);
+ if (interpreter == nullptr) return 0;
+ return static_cast<jint>(interpreter->inputs().size());
+}
+
+JNIEXPORT jint JNICALL
+Java_org_tensorflow_lite_NativeInterpreterWrapper_getOutputCount(JNIEnv* env,
+ jclass clazz,
+ jlong handle) {
+ tflite::Interpreter* interpreter = convertLongToInterpreter(env, handle);
+ if (interpreter == nullptr) return 0;
+ return static_cast<jint>(interpreter->outputs().size());
+}
+
JNIEXPORT jobjectArray JNICALL
Java_org_tensorflow_lite_NativeInterpreterWrapper_getOutputNames(JNIEnv* env,
jclass clazz,
@@ -434,114 +346,21 @@ Java_org_tensorflow_lite_NativeInterpreterWrapper_createInterpreter(
}
// Sets inputs, runs inference, and returns outputs as long handles.
-JNIEXPORT jlongArray JNICALL
-Java_org_tensorflow_lite_NativeInterpreterWrapper_run(
- JNIEnv* env, jclass clazz, jlong interpreter_handle, jlong error_handle,
- jobjectArray sizes, jintArray data_types, jintArray nums_of_bytes,
- jobjectArray values, jobject wrapper, jboolean memory_allocated) {
+JNIEXPORT void JNICALL Java_org_tensorflow_lite_NativeInterpreterWrapper_run(
+ JNIEnv* env, jclass clazz, jlong interpreter_handle, jlong error_handle) {
tflite::Interpreter* interpreter =
convertLongToInterpreter(env, interpreter_handle);
- if (interpreter == nullptr) return nullptr;
+ if (interpreter == nullptr) return;
BufferErrorReporter* error_reporter =
convertLongToErrorReporter(env, error_handle);
- if (error_reporter == nullptr) return nullptr;
- const int input_size = env->GetArrayLength(sizes);
- // validates inputs
- TfLiteStatus status = checkInputs(env, interpreter, input_size, data_types,
- nums_of_bytes, values, sizes);
- if (status != kTfLiteOk) return nullptr;
- if (!memory_allocated ||
- !areInputDimensionsTheSame(env, interpreter, input_size, sizes)) {
- // resizes inputs
- status = resizeInputs(env, interpreter, input_size, sizes);
- if (status != kTfLiteOk) {
- throwException(env, kNullPointerException,
- "Internal error: Can not resize the input: %s",
- error_reporter->CachedErrorMessage());
- return nullptr;
- }
- // allocates memory
- status = interpreter->AllocateTensors();
- if (status != kTfLiteOk) {
- throwException(env, kNullPointerException,
- "Internal error: Can not allocate memory for the given "
- "inputs: %s",
- error_reporter->CachedErrorMessage());
- return nullptr;
- }
- }
- // sets inputs
- status = setInputs(env, interpreter, input_size, data_types, nums_of_bytes,
- values);
- if (status != kTfLiteOk) return nullptr;
- timespec beforeInference = ::tflite::getCurrentTime();
- // runs inference
+ if (error_reporter == nullptr) return;
+
if (interpreter->Invoke() != kTfLiteOk) {
throwException(env, kIllegalArgumentException,
"Internal error: Failed to run on the given Interpreter: %s",
error_reporter->CachedErrorMessage());
- return nullptr;
- }
- timespec afterInference = ::tflite::getCurrentTime();
- jclass wrapper_clazz = env->GetObjectClass(wrapper);
- jfieldID fid =
- env->GetFieldID(wrapper_clazz, "inferenceDurationNanoseconds", "J");
- if (env->ExceptionCheck()) {
- env->ExceptionClear();
- } else if (fid != nullptr) {
- env->SetLongField(
- wrapper, fid,
- ::tflite::timespec_diff_nanoseconds(&beforeInference, &afterInference));
- }
- // returns outputs
- const std::vector<int>& results = interpreter->outputs();
- if (results.empty()) {
- throwException(
- env, kIllegalArgumentException,
- "Internal error: The Interpreter does not have any outputs.");
- return nullptr;
- }
- jlongArray outputs = env->NewLongArray(results.size());
- size_t size = results.size();
- for (int i = 0; i < size; ++i) {
- TfLiteTensor* source = interpreter->tensor(results[i]);
- jlong output = reinterpret_cast<jlong>(source);
- env->SetLongArrayRegion(outputs, i, 1, &output);
- }
- return outputs;
-}
-
-JNIEXPORT jintArray JNICALL
-Java_org_tensorflow_lite_NativeInterpreterWrapper_getInputDims(
- JNIEnv* env, jclass clazz, jlong handle, jint input_idx, jint num_bytes) {
- tflite::Interpreter* interpreter = convertLongToInterpreter(env, handle);
- if (interpreter == nullptr) return nullptr;
- const int idx = static_cast<int>(input_idx);
- if (input_idx < 0 || input_idx >= interpreter->inputs().size()) {
- throwException(env, kIllegalArgumentException,
- "Input error: Out of range: Failed to get %d-th input out of"
- " %d inputs",
- input_idx, interpreter->inputs().size());
- return nullptr;
- }
- TfLiteTensor* target = interpreter->tensor(interpreter->inputs()[idx]);
- int size = target->dims->size;
- if (num_bytes >= 0) { // verifies num of bytes matches if num_bytes if valid.
- int expected_num_bytes = elementByteSize(target->type);
- for (int i = 0; i < size; ++i) {
- expected_num_bytes *= target->dims->data[i];
- }
- if (num_bytes != expected_num_bytes) {
- throwException(env, kIllegalArgumentException,
- "Input error: Failed to get input dimensions. %d-th input "
- "should have %d bytes, but found %d bytes.",
- idx, expected_num_bytes, num_bytes);
- return nullptr;
- }
+ return;
}
- jintArray outputs = env->NewIntArray(size);
- env->SetIntArrayRegion(outputs, 0, size, &(target->dims->data[0]));
- return outputs;
}
JNIEXPORT jint JNICALL
diff --git a/tensorflow/contrib/lite/java/src/main/native/nativeinterpreterwrapper_jni.h b/tensorflow/contrib/lite/java/src/main/native/nativeinterpreterwrapper_jni.h
index 128ece4981..618fba480e 100644
--- a/tensorflow/contrib/lite/java/src/main/native/nativeinterpreterwrapper_jni.h
+++ b/tensorflow/contrib/lite/java/src/main/native/nativeinterpreterwrapper_jni.h
@@ -29,9 +29,6 @@ limitations under the License.
namespace tflite {
// This is to be provided at link-time by a library.
extern std::unique_ptr<OpResolver> CreateOpResolver();
-extern timespec getCurrentTime();
-extern jlong timespec_diff_nanoseconds(struct timespec* start,
- struct timespec* stop);
} // namespace tflite
#ifdef __cplusplus
@@ -40,6 +37,57 @@ extern "C" {
/*
* Class: org_tensorflow_lite_NativeInterpreterWrapper
+ * Method: allocateTensors
+ * Signature: (JJ)V
+ */
+JNIEXPORT void JNICALL
+Java_org_tensorflow_lite_NativeInterpreterWrapper_allocateTensors(
+ JNIEnv* env, jclass clazz, jlong handle, jlong error_handle);
+
+/*
+ * Class: org_tensorflow_lite_NativeInterpreterWrapper
+ * Method: getInputTensor
+ * Signature: (JI)J
+ */
+JNIEXPORT jlong JNICALL
+Java_org_tensorflow_lite_NativeInterpreterWrapper_getInputTensor(JNIEnv* env,
+ jclass clazz,
+ jlong handle,
+ jint index);
+
+/*
+ * Class: org_tensorflow_lite_NativeInterpreterWrapper
+ * Method: getOutputTensor
+ * Signature: (JI)J
+ */
+JNIEXPORT jlong JNICALL
+Java_org_tensorflow_lite_NativeInterpreterWrapper_getOutputTensor(JNIEnv* env,
+ jclass clazz,
+ jlong handle,
+ jint index);
+
+/*
+ * Class: org_tensorflow_lite_NativeInterpreterWrapper
+ * Method: getInputCount
+ * Signature: (J)I
+ */
+JNIEXPORT jint JNICALL
+Java_org_tensorflow_lite_NativeInterpreterWrapper_getInputCount(JNIEnv* env,
+ jclass clazz,
+ jlong handle);
+
+/*
+ * Class: org_tensorflow_lite_NativeInterpreterWrapper
+ * Method: getOutputCount
+ * Signature: (J)I
+ */
+JNIEXPORT jint JNICALL
+Java_org_tensorflow_lite_NativeInterpreterWrapper_getOutputCount(JNIEnv* env,
+ jclass clazz,
+ jlong handle);
+
+/*
+ * Class: org_tensorflow_lite_NativeInterpreterWrapper
* Method:
* Signature: (J)[Ljava/lang/Object;
*/
@@ -118,28 +166,11 @@ Java_org_tensorflow_lite_NativeInterpreterWrapper_createInterpreter(
/*
* Class: org_tensorflow_lite_NativeInterpreterWrapper
- * Method:
- * Signature:
- * (JJ[Ljava/lang/Object;[I[I[Ljava/lang/Object;Ljava/lang/Object;Z)[J
- */
-JNIEXPORT jlongArray JNICALL
-Java_org_tensorflow_lite_NativeInterpreterWrapper_run(
- JNIEnv* env, jclass clazz, jlong interpreter_handle, jlong error_handle,
- jobjectArray sizes, jintArray data_types, jintArray nums_of_bytes,
- jobjectArray values, jobject wrapper, jboolean memory_allocated);
-
-/*
- * Class: org_tensorflow_lite_NativeInterpreterWrapper
- * Method:
- * Signature: (JII)[I
- *
- * Gets input dimensions. If num_bytes is non-negative, it will check whether
- * num_bytes matches num of bytes required by the input, and return null and
- * throw IllegalArgumentException if not.
+ * Method: run
+ * Signature: (JJ)V
*/
-JNIEXPORT jintArray JNICALL
-Java_org_tensorflow_lite_NativeInterpreterWrapper_getInputDims(
- JNIEnv* env, jclass clazz, jlong handle, jint input_idx, jint num_bytes);
+JNIEXPORT void JNICALL Java_org_tensorflow_lite_NativeInterpreterWrapper_run(
+ JNIEnv* env, jclass clazz, jlong interpreter_handle, jlong error_handle);
/*
* Class: org_tensorflow_lite_NativeInterpreterWrapper
diff --git a/tensorflow/contrib/lite/java/src/main/native/tensor_jni.cc b/tensorflow/contrib/lite/java/src/main/native/tensor_jni.cc
index 9e9387da86..7ff96a3172 100644
--- a/tensorflow/contrib/lite/java/src/main/native/tensor_jni.cc
+++ b/tensorflow/contrib/lite/java/src/main/native/tensor_jni.cc
@@ -29,6 +29,35 @@ TfLiteTensor* convertLongToTensor(JNIEnv* env, jlong handle) {
return reinterpret_cast<TfLiteTensor*>(handle);
}
+size_t elementByteSize(TfLiteType data_type) {
+ // The code in this file makes the assumption that the
+ // TensorFlow TF_DataTypes and the Java primitive types
+ // have the same byte sizes. Validate that:
+ switch (data_type) {
+ case kTfLiteFloat32:
+ static_assert(sizeof(jfloat) == 4,
+ "Interal error: Java float not compatible with "
+ "kTfLiteFloat");
+ return 4;
+ case kTfLiteInt32:
+ static_assert(sizeof(jint) == 4,
+ "Interal error: Java int not compatible with kTfLiteInt");
+ return 4;
+ case kTfLiteUInt8:
+ static_assert(sizeof(jbyte) == 1,
+ "Interal error: Java byte not compatible with "
+ "kTfLiteUInt8");
+ return 1;
+ case kTfLiteInt64:
+ static_assert(sizeof(jlong) == 8,
+ "Interal error: Java long not compatible with "
+ "kTfLiteInt64");
+ return 8;
+ default:
+ return 0;
+ }
+}
+
size_t writeOneDimensionalArray(JNIEnv* env, jobject object, TfLiteType type,
void* dst, size_t dst_size) {
jarray array = static_cast<jarray>(object);
@@ -141,48 +170,6 @@ size_t readMultiDimensionalArray(JNIEnv* env, TfLiteType data_type, char* src,
}
}
-} // namespace
-
-size_t elementByteSize(TfLiteType data_type) {
- // The code in this file makes the assumption that the
- // TensorFlow TF_DataTypes and the Java primitive types
- // have the same byte sizes. Validate that:
- switch (data_type) {
- case kTfLiteFloat32:
- static_assert(sizeof(jfloat) == 4,
- "Interal error: Java float not compatible with "
- "kTfLiteFloat");
- return 4;
- case kTfLiteInt32:
- static_assert(sizeof(jint) == 4,
- "Interal error: Java int not compatible with kTfLiteInt");
- return 4;
- case kTfLiteUInt8:
- static_assert(sizeof(jbyte) == 1,
- "Interal error: Java byte not compatible with "
- "kTfLiteUInt8");
- return 1;
- case kTfLiteInt64:
- static_assert(sizeof(jlong) == 8,
- "Interal error: Java long not compatible with "
- "kTfLiteInt64");
- return 8;
- default:
- return 0;
- }
-}
-
-size_t writeByteBuffer(JNIEnv* env, jobject object, char** dst, int dst_size) {
- char* buf = static_cast<char*>(env->GetDirectBufferAddress(object));
- if (!buf) {
- throwException(env, kIllegalArgumentException,
- "Input ByteBuffer is not a direct buffer");
- return 0;
- }
- *dst = buf;
- return dst_size;
-}
-
size_t writeMultiDimensionalArray(JNIEnv* env, jobject src, TfLiteType type,
int dims_left, char** dst, int dst_size) {
if (dims_left <= 1) {
@@ -203,6 +190,37 @@ size_t writeMultiDimensionalArray(JNIEnv* env, jobject src, TfLiteType type,
}
}
+} // namespace
+
+JNIEXPORT jobject JNICALL Java_org_tensorflow_lite_Tensor_buffer(JNIEnv* env,
+ jclass clazz,
+ jlong handle) {
+ TfLiteTensor* tensor = convertLongToTensor(env, handle);
+ if (tensor == nullptr) return nullptr;
+ if (tensor->data.raw == nullptr) {
+ throwException(env, kIllegalArgumentException,
+ "Internal error: Tensor hasn't been allocated.");
+ return nullptr;
+ }
+ return env->NewDirectByteBuffer(static_cast<void*>(tensor->data.raw),
+ static_cast<jlong>(tensor->bytes));
+}
+
+JNIEXPORT void JNICALL Java_org_tensorflow_lite_Tensor_writeDirectBuffer(
+ JNIEnv* env, jclass clazz, jlong handle, jobject src) {
+ TfLiteTensor* tensor = convertLongToTensor(env, handle);
+ if (tensor == nullptr) return;
+
+ char* src_data_raw = static_cast<char*>(env->GetDirectBufferAddress(src));
+ if (!src_data_raw) {
+ throwException(env, kIllegalArgumentException,
+ "Input ByteBuffer is not a direct buffer");
+ return;
+ }
+
+ tensor->data.raw = src_data_raw;
+}
+
JNIEXPORT void JNICALL
Java_org_tensorflow_lite_Tensor_readMultiDimensionalArray(JNIEnv* env,
jclass clazz,
@@ -220,6 +238,27 @@ Java_org_tensorflow_lite_Tensor_readMultiDimensionalArray(JNIEnv* env,
num_dims, static_cast<jarray>(value));
}
+JNIEXPORT void JNICALL
+Java_org_tensorflow_lite_Tensor_writeMultiDimensionalArray(JNIEnv* env,
+ jclass clazz,
+ jlong handle,
+ jobject src) {
+ TfLiteTensor* tensor = convertLongToTensor(env, handle);
+ if (tensor == nullptr) return;
+ if (tensor->data.raw == nullptr) {
+ throwException(env, kIllegalArgumentException,
+ "Internal error: Target Tensor hasn't been allocated.");
+ return;
+ }
+ if (tensor->dims->size == 0) {
+ throwException(env, kIllegalArgumentException,
+ "Internal error: Cannot copy empty/scalar Tensors.");
+ return;
+ }
+ writeMultiDimensionalArray(env, src, tensor->type, tensor->dims->size,
+ &tensor->data.raw, tensor->bytes);
+}
+
JNIEXPORT jint JNICALL Java_org_tensorflow_lite_Tensor_dtype(JNIEnv* env,
jclass clazz,
jlong handle) {
@@ -237,3 +276,11 @@ Java_org_tensorflow_lite_Tensor_shape(JNIEnv* env, jclass clazz, jlong handle) {
env->SetIntArrayRegion(result, 0, num_dims, tensor->dims->data);
return result;
}
+
+JNIEXPORT jint JNICALL Java_org_tensorflow_lite_Tensor_numBytes(JNIEnv* env,
+ jclass clazz,
+ jlong handle) {
+ const TfLiteTensor* tensor = convertLongToTensor(env, handle);
+ if (tensor == nullptr) return 0;
+ return static_cast<jint>(tensor->bytes);
+}
diff --git a/tensorflow/contrib/lite/java/src/main/native/tensor_jni.h b/tensorflow/contrib/lite/java/src/main/native/tensor_jni.h
index 3a4910dcc3..06e2546af8 100644
--- a/tensorflow/contrib/lite/java/src/main/native/tensor_jni.h
+++ b/tensorflow/contrib/lite/java/src/main/native/tensor_jni.h
@@ -24,8 +24,25 @@ extern "C" {
#endif // __cplusplus
/*
- * Class: org_tensorflow_lite_TfLiteTensor
- * Method:
+ * Class: org_tensorflow_lite_Tensor
+ * Method: buffer
+ * Signature: (J)Ljava/nio/ByteBuffer;
+ */
+JNIEXPORT jobject JNICALL Java_org_tensorflow_lite_Tensor_buffer(JNIEnv* env,
+ jclass clazz,
+ jlong handle);
+
+/*
+ * Class: org_tensorflow_lite_Tensor
+ * Method: writeDirectBuffer
+ * Signature: (JLjava/nio/ByteBuffer;)
+ */
+JNIEXPORT void JNICALL Java_org_tensorflow_lite_Tensor_writeDirectBuffer(
+ JNIEnv* env, jclass clazz, jlong handle, jobject src);
+
+/*
+ * Class: org_tensorflow_lite_Tensor
+ * Method: dtype
* Signature: (J)I
*/
JNIEXPORT jint JNICALL Java_org_tensorflow_lite_Tensor_dtype(JNIEnv* env,
@@ -33,8 +50,8 @@ JNIEXPORT jint JNICALL Java_org_tensorflow_lite_Tensor_dtype(JNIEnv* env,
jlong handle);
/*
- * Class: org_tensorflow_lite_TfLiteTensor
- * Method:
+ * Class: org_tensorflow_lite_Tensor
+ * Method: shape
* Signature: (J)[I
*/
JNIEXPORT jintArray JNICALL Java_org_tensorflow_lite_Tensor_shape(JNIEnv* env,
@@ -42,31 +59,35 @@ JNIEXPORT jintArray JNICALL Java_org_tensorflow_lite_Tensor_shape(JNIEnv* env,
jlong handle);
/*
- * Class: org_tensorflow_lite_TfLiteTensor
- * Method:
+ * Class: org_tensorflow_lite_Tensor
+ * Method: numBytes
+ * Signature: (J)I
+ */
+JNIEXPORT jint JNICALL Java_org_tensorflow_lite_Tensor_numBytes(JNIEnv* env,
+ jclass clazz,
+ jlong handle);
+
+/*
+ * Class: org_tensorflow_lite_Tensor
+ * Method: readMultiDimensionalArray
* Signature: (JLjava/lang/Object;)
*/
JNIEXPORT void JNICALL
Java_org_tensorflow_lite_Tensor_readMultiDimensionalArray(JNIEnv* env,
jclass clazz,
jlong handle,
- jobject value);
+ jobject dst);
/*
- * Finds the size of each data type.
- */
-size_t elementByteSize(TfLiteType data_type);
-
-/*
- * Writes data of a ByteBuffer into dest.
- */
-size_t writeByteBuffer(JNIEnv* env, jobject object, char** dst, int dst_size);
-
-/*
- * Writes a multi-dimensional array into dest.
+ * Class: org_tensorflow_lite_Tensor
+ * Method: writeMultidimensionalArray
+ * Signature: (JLjava/lang/Object;)
*/
-size_t writeMultiDimensionalArray(JNIEnv* env, jobject src, TfLiteType type,
- int dims_left, char** dst, int dst_size);
+JNIEXPORT void JNICALL
+Java_org_tensorflow_lite_Tensor_writeMultiDimensionalArray(JNIEnv* env,
+ jclass clazz,
+ jlong handle,
+ jobject src);
#ifdef __cplusplus
} // extern "C"
diff --git a/tensorflow/contrib/lite/java/src/test/java/org/tensorflow/lite/InterpreterTest.java b/tensorflow/contrib/lite/java/src/test/java/org/tensorflow/lite/InterpreterTest.java
index 82007a6ab5..d66a73db94 100644
--- a/tensorflow/contrib/lite/java/src/test/java/org/tensorflow/lite/InterpreterTest.java
+++ b/tensorflow/contrib/lite/java/src/test/java/org/tensorflow/lite/InterpreterTest.java
@@ -165,6 +165,24 @@ public final class InterpreterTest {
}
@Test
+ public void testRunWithByteBufferOutput() {
+ float[] oneD = {1.23f, 6.54f, 7.81f};
+ float[][] twoD = {oneD, oneD, oneD, oneD, oneD, oneD, oneD, oneD};
+ float[][][] threeD = {twoD, twoD, twoD, twoD, twoD, twoD, twoD, twoD};
+ float[][][][] fourD = {threeD, threeD};
+ ByteBuffer parsedOutput =
+ ByteBuffer.allocateDirect(2 * 8 * 8 * 3 * 4).order(ByteOrder.nativeOrder());
+ try (Interpreter interpreter = new Interpreter(MODEL_FILE)) {
+ interpreter.run(fourD, parsedOutput);
+ }
+ float[] outputOneD = {
+ parsedOutput.getFloat(0), parsedOutput.getFloat(4), parsedOutput.getFloat(8)
+ };
+ float[] expected = {3.69f, 19.62f, 23.43f};
+ assertThat(outputOneD).usingTolerance(0.1f).containsExactly(expected).inOrder();
+ }
+
+ @Test
public void testMobilenetRun() {
// Create a gray image.
float[][][][] img = new float[1][224][224][3];
@@ -203,7 +221,9 @@ public final class InterpreterTest {
assertThat(e)
.hasMessageThat()
.contains(
- "DataType (2) of input data does not match with the DataType (1) of model inputs.");
+ "Cannot convert between a TensorFlowLite tensor with type "
+ + "FLOAT32 and a Java object of type [[[[I (which is compatible with the"
+ + " TensorFlowLite type INT32)");
}
interpreter.close();
}
@@ -223,8 +243,8 @@ public final class InterpreterTest {
assertThat(e)
.hasMessageThat()
.contains(
- "Cannot convert an TensorFlowLite tensor with type "
- + "FLOAT32 to a Java object of type [[[[I (which is compatible with the"
+ "Cannot convert between a TensorFlowLite tensor with type "
+ + "FLOAT32 and a Java object of type [[[[I (which is compatible with the"
+ " TensorFlowLite type INT32)");
}
interpreter.close();
@@ -311,4 +331,11 @@ public final class InterpreterTest {
interpreter.close();
fileChannel.close();
}
+
+ @Test
+ public void testRedundantClose() throws Exception {
+ Interpreter interpreter = new Interpreter(MODEL_FILE);
+ interpreter.close();
+ interpreter.close();
+ }
}
diff --git a/tensorflow/contrib/lite/java/src/test/java/org/tensorflow/lite/NativeInterpreterWrapperTest.java b/tensorflow/contrib/lite/java/src/test/java/org/tensorflow/lite/NativeInterpreterWrapperTest.java
index 9e41cb132d..9c4a5acd79 100644
--- a/tensorflow/contrib/lite/java/src/test/java/org/tensorflow/lite/NativeInterpreterWrapperTest.java
+++ b/tensorflow/contrib/lite/java/src/test/java/org/tensorflow/lite/NativeInterpreterWrapperTest.java
@@ -20,6 +20,8 @@ import static org.junit.Assert.fail;
import java.nio.ByteBuffer;
import java.nio.ByteOrder;
+import java.util.HashMap;
+import java.util.Map;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.junit.runners.JUnit4;
@@ -101,10 +103,10 @@ public final class NativeInterpreterWrapperTest {
float[][][] threeD = {twoD, twoD, twoD, twoD, twoD, twoD, twoD, twoD};
float[][][][] fourD = {threeD, threeD};
Object[] inputs = {fourD};
- Tensor[] outputs = wrapper.run(inputs);
- assertThat(outputs.length).isEqualTo(1);
float[][][][] parsedOutputs = new float[2][8][8][3];
- outputs[0].copyTo(parsedOutputs);
+ Map<Integer, Object> outputs = new HashMap<>();
+ outputs.put(0, parsedOutputs);
+ wrapper.run(inputs, outputs);
float[] outputOneD = parsedOutputs[0][0][0];
float[] expected = {3.69f, -19.62f, 23.43f};
assertThat(outputOneD).usingTolerance(0.1f).containsExactly(expected).inOrder();
@@ -112,6 +114,27 @@ public final class NativeInterpreterWrapperTest {
}
@Test
+ public void testRunWithBufferOutput() {
+ try (NativeInterpreterWrapper wrapper = new NativeInterpreterWrapper(FLOAT_MODEL_PATH)) {
+ float[] oneD = {1.23f, -6.54f, 7.81f};
+ float[][] twoD = {oneD, oneD, oneD, oneD, oneD, oneD, oneD, oneD};
+ float[][][] threeD = {twoD, twoD, twoD, twoD, twoD, twoD, twoD, twoD};
+ float[][][][] fourD = {threeD, threeD};
+ Object[] inputs = {fourD};
+ ByteBuffer parsedOutput =
+ ByteBuffer.allocateDirect(2 * 8 * 8 * 3 * 4).order(ByteOrder.nativeOrder());
+ Map<Integer, Object> outputs = new HashMap<>();
+ outputs.put(0, parsedOutput);
+ wrapper.run(inputs, outputs);
+ float[] outputOneD = {
+ parsedOutput.getFloat(0), parsedOutput.getFloat(4), parsedOutput.getFloat(8)
+ };
+ float[] expected = {3.69f, -19.62f, 23.43f};
+ assertThat(outputOneD).usingTolerance(0.1f).containsExactly(expected).inOrder();
+ }
+ }
+
+ @Test
public void testRunWithInputsOfSameDims() {
NativeInterpreterWrapper wrapper = new NativeInterpreterWrapper(FLOAT_MODEL_PATH);
float[] oneD = {1.23f, -6.54f, 7.81f};
@@ -119,17 +142,16 @@ public final class NativeInterpreterWrapperTest {
float[][][] threeD = {twoD, twoD, twoD, twoD, twoD, twoD, twoD, twoD};
float[][][][] fourD = {threeD, threeD};
Object[] inputs = {fourD};
- Tensor[] outputs = wrapper.run(inputs);
- assertThat(outputs.length).isEqualTo(1);
float[][][][] parsedOutputs = new float[2][8][8][3];
- outputs[0].copyTo(parsedOutputs);
+ Map<Integer, Object> outputs = new HashMap<>();
+ outputs.put(0, parsedOutputs);
+ wrapper.run(inputs, outputs);
float[] outputOneD = parsedOutputs[0][0][0];
float[] expected = {3.69f, -19.62f, 23.43f};
assertThat(outputOneD).usingTolerance(0.1f).containsExactly(expected).inOrder();
- outputs = wrapper.run(inputs);
- assertThat(outputs.length).isEqualTo(1);
parsedOutputs = new float[2][8][8][3];
- outputs[0].copyTo(parsedOutputs);
+ outputs.put(0, parsedOutputs);
+ wrapper.run(inputs, outputs);
outputOneD = parsedOutputs[0][0][0];
assertThat(outputOneD).usingTolerance(0.1f).containsExactly(expected).inOrder();
wrapper.close();
@@ -143,10 +165,10 @@ public final class NativeInterpreterWrapperTest {
int[][][] threeD = {twoD, twoD, twoD, twoD, twoD, twoD, twoD, twoD};
int[][][][] fourD = {threeD, threeD};
Object[] inputs = {fourD};
- Tensor[] outputs = wrapper.run(inputs);
- assertThat(outputs.length).isEqualTo(1);
int[][][][] parsedOutputs = new int[2][4][4][12];
- outputs[0].copyTo(parsedOutputs);
+ Map<Integer, Object> outputs = new HashMap<>();
+ outputs.put(0, parsedOutputs);
+ wrapper.run(inputs, outputs);
int[] outputOneD = parsedOutputs[0][0][0];
int[] expected = {3, 7, -4, 3, 7, -4, 3, 7, -4, 3, 7, -4};
assertThat(outputOneD).isEqualTo(expected);
@@ -161,10 +183,10 @@ public final class NativeInterpreterWrapperTest {
long[][][] threeD = {twoD, twoD, twoD, twoD, twoD, twoD, twoD, twoD};
long[][][][] fourD = {threeD, threeD};
Object[] inputs = {fourD};
- Tensor[] outputs = wrapper.run(inputs);
- assertThat(outputs.length).isEqualTo(1);
long[][][][] parsedOutputs = new long[2][4][4][12];
- outputs[0].copyTo(parsedOutputs);
+ Map<Integer, Object> outputs = new HashMap<>();
+ outputs.put(0, parsedOutputs);
+ wrapper.run(inputs, outputs);
long[] outputOneD = parsedOutputs[0][0][0];
long[] expected = {-892834092L, 923423L, 2123918239018L, -892834092L, 923423L, 2123918239018L,
-892834092L, 923423L, 2123918239018L, -892834092L, 923423L, 2123918239018L};
@@ -182,10 +204,10 @@ public final class NativeInterpreterWrapperTest {
Object[] inputs = {fourD};
int[] inputDims = {2, 8, 8, 3};
wrapper.resizeInput(0, inputDims);
- Tensor[] outputs = wrapper.run(inputs);
- assertThat(outputs.length).isEqualTo(1);
byte[][][][] parsedOutputs = new byte[2][4][4][12];
- outputs[0].copyTo(parsedOutputs);
+ Map<Integer, Object> outputs = new HashMap<>();
+ outputs.put(0, parsedOutputs);
+ wrapper.run(inputs, outputs);
byte[] outputOneD = parsedOutputs[0][0][0];
byte[] expected = {(byte) 0xe0, 0x4f, (byte) 0xd0, (byte) 0xe0, 0x4f, (byte) 0xd0,
(byte) 0xe0, 0x4f, (byte) 0xd0, (byte) 0xe0, 0x4f, (byte) 0xd0};
@@ -208,13 +230,14 @@ public final class NativeInterpreterWrapperTest {
}
}
}
+ bbuf.rewind();
Object[] inputs = {bbuf};
int[] inputDims = {2, 8, 8, 3};
wrapper.resizeInput(0, inputDims);
- Tensor[] outputs = wrapper.run(inputs);
- assertThat(outputs.length).isEqualTo(1);
byte[][][][] parsedOutputs = new byte[2][4][4][12];
- outputs[0].copyTo(parsedOutputs);
+ Map<Integer, Object> outputs = new HashMap<>();
+ outputs.put(0, parsedOutputs);
+ wrapper.run(inputs, outputs);
byte[] outputOneD = parsedOutputs[0][0][0];
byte[] expected = {
(byte) 0xe0, 0x4f, (byte) 0xd0, (byte) 0xe0, 0x4f, (byte) 0xd0,
@@ -240,21 +263,22 @@ public final class NativeInterpreterWrapperTest {
}
}
Object[] inputs = {bbuf};
+ float[][][][] parsedOutputs = new float[4][8][8][3];
+ Map<Integer, Object> outputs = new HashMap<>();
+ outputs.put(0, parsedOutputs);
try {
- wrapper.run(inputs);
+ wrapper.run(inputs, outputs);
fail();
} catch (IllegalArgumentException e) {
assertThat(e)
.hasMessageThat()
.contains(
- "Failed to get input dimensions. 0-th input should have 768 bytes, but found 3072 bytes");
+ "Cannot convert between a TensorFlowLite buffer with 768 bytes and a "
+ + "ByteBuffer with 3072 bytes.");
}
int[] inputDims = {4, 8, 8, 3};
wrapper.resizeInput(0, inputDims);
- Tensor[] outputs = wrapper.run(inputs);
- assertThat(outputs.length).isEqualTo(1);
- float[][][][] parsedOutputs = new float[4][8][8][3];
- outputs[0].copyTo(parsedOutputs);
+ wrapper.run(inputs, outputs);
float[] outputOneD = parsedOutputs[0][0][0];
float[] expected = {3.69f, -19.62f, 23.43f};
assertThat(outputOneD).usingTolerance(0.1f).containsExactly(expected).inOrder();
@@ -267,14 +291,18 @@ public final class NativeInterpreterWrapperTest {
ByteBuffer bbuf = ByteBuffer.allocateDirect(2 * 7 * 8 * 3);
bbuf.order(ByteOrder.nativeOrder());
Object[] inputs = {bbuf};
+ Map<Integer, Object> outputs = new HashMap<>();
+ ByteBuffer parsedOutput = ByteBuffer.allocateDirect(2 * 7 * 8 * 3);
+ outputs.put(0, parsedOutput);
try {
- wrapper.run(inputs);
+ wrapper.run(inputs, outputs);
fail();
} catch (IllegalArgumentException e) {
assertThat(e)
.hasMessageThat()
.contains(
- "Failed to get input dimensions. 0-th input should have 192 bytes, but found 336 bytes.");
+ "Cannot convert between a TensorFlowLite buffer with 192 bytes and a "
+ + "ByteBuffer with 336 bytes.");
}
wrapper.close();
}
@@ -287,14 +315,18 @@ public final class NativeInterpreterWrapperTest {
int[][][] threeD = {twoD, twoD, twoD, twoD, twoD, twoD, twoD, twoD};
int[][][][] fourD = {threeD, threeD};
Object[] inputs = {fourD};
+ int[][][][] parsedOutputs = new int[2][8][8][3];
+ Map<Integer, Object> outputs = new HashMap<>();
+ outputs.put(0, parsedOutputs);
try {
- wrapper.run(inputs);
+ wrapper.run(inputs, outputs);
fail();
} catch (IllegalArgumentException e) {
assertThat(e)
.hasMessageThat()
.contains(
- "DataType (2) of input data does not match with the DataType (1) of model inputs.");
+ "Cannot convert between a TensorFlowLite tensor with type FLOAT32 and a Java object "
+ + "of type [[[[I (which is compatible with the TensorFlowLite type INT32)");
}
wrapper.close();
}
@@ -308,8 +340,11 @@ public final class NativeInterpreterWrapperTest {
float[][][] threeD = {twoD, twoD, twoD, twoD, twoD, twoD, twoD, twoD};
float[][][][] fourD = {threeD, threeD};
Object[] inputs = {fourD};
+ float[][][][] parsedOutputs = new float[2][8][8][3];
+ Map<Integer, Object> outputs = new HashMap<>();
+ outputs.put(0, parsedOutputs);
try {
- wrapper.run(inputs);
+ wrapper.run(inputs, outputs);
fail();
} catch (IllegalArgumentException e) {
assertThat(e).hasMessageThat().contains("Invalid handle to Interpreter.");
@@ -321,7 +356,7 @@ public final class NativeInterpreterWrapperTest {
NativeInterpreterWrapper wrapper = new NativeInterpreterWrapper(FLOAT_MODEL_PATH);
try {
Object[] inputs = {};
- wrapper.run(inputs);
+ wrapper.run(inputs, null);
fail();
} catch (IllegalArgumentException e) {
assertThat(e).hasMessageThat().contains("Inputs should not be null or empty.");
@@ -337,11 +372,14 @@ public final class NativeInterpreterWrapperTest {
float[][][] threeD = {twoD, twoD, twoD, twoD, twoD, twoD, twoD, twoD};
float[][][][] fourD = {threeD, threeD};
Object[] inputs = {fourD, fourD};
+ float[][][][] parsedOutputs = new float[2][8][8][3];
+ Map<Integer, Object> outputs = new HashMap<>();
+ outputs.put(0, parsedOutputs);
try {
- wrapper.run(inputs);
+ wrapper.run(inputs, outputs);
fail();
} catch (IllegalArgumentException e) {
- assertThat(e).hasMessageThat().contains("Expected num of inputs is 1 but got 2");
+ assertThat(e).hasMessageThat().contains("Invalid input Tensor index: 1");
}
wrapper.close();
}
@@ -353,13 +391,18 @@ public final class NativeInterpreterWrapperTest {
float[][] twoD = {oneD, oneD, oneD, oneD, oneD, oneD, oneD};
float[][][] threeD = {twoD, twoD, twoD, twoD, twoD, twoD, twoD, twoD};
Object[] inputs = {threeD};
+ float[][][][] parsedOutputs = new float[2][8][8][3];
+ Map<Integer, Object> outputs = new HashMap<>();
+ outputs.put(0, parsedOutputs);
try {
- wrapper.run(inputs);
+ wrapper.run(inputs, outputs);
fail();
} catch (IllegalArgumentException e) {
assertThat(e)
.hasMessageThat()
- .contains("0-th input should have 4 dimensions, but found 3 dimensions");
+ .contains(
+ "Cannot copy between a TensorFlowLite tensor with shape [8, 7, 3] and a "
+ + "Java object with shape [2, 8, 8, 3].");
}
wrapper.close();
}
@@ -372,92 +415,23 @@ public final class NativeInterpreterWrapperTest {
float[][][] threeD = {twoD, twoD, twoD, twoD, twoD, twoD, twoD, twoD};
float[][][][] fourD = {threeD, threeD};
Object[] inputs = {fourD};
+ float[][][][] parsedOutputs = new float[2][8][8][3];
+ Map<Integer, Object> outputs = new HashMap<>();
+ outputs.put(0, parsedOutputs);
try {
- wrapper.run(inputs);
+ wrapper.run(inputs, outputs);
fail();
} catch (IllegalArgumentException e) {
assertThat(e)
.hasMessageThat()
- .contains("0-th input dimension should be [?,8,8,3], but found [?,8,7,3]");
+ .contains(
+ "Cannot copy between a TensorFlowLite tensor with shape [2, 8, 7, 3] and a "
+ + "Java object with shape [2, 8, 8, 3].");
}
wrapper.close();
}
@Test
- public void testNumElements() {
- int[] shape = {2, 3, 4};
- int num = NativeInterpreterWrapper.numElements(shape);
- assertThat(num).isEqualTo(24);
- shape = null;
- num = NativeInterpreterWrapper.numElements(shape);
- assertThat(num).isEqualTo(0);
- }
-
- @Test
- public void testIsNonEmtpyArray() {
- assertThat(NativeInterpreterWrapper.isNonEmptyArray(null)).isFalse();
- assertThat(NativeInterpreterWrapper.isNonEmptyArray(3.2)).isFalse();
- int[] emptyArray = {};
- assertThat(NativeInterpreterWrapper.isNonEmptyArray(emptyArray)).isFalse();
- int[] validArray = {9, 5, 2, 1};
- assertThat(NativeInterpreterWrapper.isNonEmptyArray(validArray)).isTrue();
- }
-
- @Test
- public void testDataTypeOf() {
- float[] testEmtpyArray = {};
- DataType dataType = NativeInterpreterWrapper.dataTypeOf(testEmtpyArray);
- assertThat(dataType).isEqualTo(DataType.FLOAT32);
- float[] testFloatArray = {0.783f, 0.251f};
- dataType = NativeInterpreterWrapper.dataTypeOf(testFloatArray);
- assertThat(dataType).isEqualTo(DataType.FLOAT32);
- float[][] testMultiDimArray = {testFloatArray, testFloatArray, testFloatArray};
- dataType = NativeInterpreterWrapper.dataTypeOf(testFloatArray);
- assertThat(dataType).isEqualTo(DataType.FLOAT32);
- try {
- double[] testDoubleArray = {0.783, 0.251};
- NativeInterpreterWrapper.dataTypeOf(testDoubleArray);
- fail();
- } catch (IllegalArgumentException e) {
- assertThat(e).hasMessageThat().contains("cannot resolve DataType of");
- }
- try {
- Float[] testBoxedArray = {0.783f, 0.251f};
- NativeInterpreterWrapper.dataTypeOf(testBoxedArray);
- fail();
- } catch (IllegalArgumentException e) {
- assertThat(e).hasMessageThat().contains("cannot resolve DataType of [Ljava.lang.Float;");
- }
- }
-
- @Test
- public void testNumDimensions() {
- int scalar = 1;
- assertThat(NativeInterpreterWrapper.numDimensions(scalar)).isEqualTo(0);
- int[][] array = {{2, 4}, {1, 9}};
- assertThat(NativeInterpreterWrapper.numDimensions(array)).isEqualTo(2);
- try {
- int[] emptyArray = {};
- NativeInterpreterWrapper.numDimensions(emptyArray);
- fail();
- } catch (IllegalArgumentException e) {
- assertThat(e).hasMessageThat().contains("Array lengths cannot be 0.");
- }
- }
-
- @Test
- public void testFillShape() {
- int[][][] array = {{{23}, {14}, {87}}, {{12}, {42}, {31}}};
- int num = NativeInterpreterWrapper.numDimensions(array);
- int[] shape = new int[num];
- NativeInterpreterWrapper.fillShape(array, 0, shape);
- assertThat(num).isEqualTo(3);
- assertThat(shape[0]).isEqualTo(2);
- assertThat(shape[1]).isEqualTo(3);
- assertThat(shape[2]).isEqualTo(1);
- }
-
- @Test
public void testGetInferenceLatency() {
NativeInterpreterWrapper wrapper = new NativeInterpreterWrapper(FLOAT_MODEL_PATH);
float[] oneD = {1.23f, 6.54f, 7.81f};
@@ -465,8 +439,10 @@ public final class NativeInterpreterWrapperTest {
float[][][] threeD = {twoD, twoD, twoD, twoD, twoD, twoD, twoD, twoD};
float[][][][] fourD = {threeD, threeD};
Object[] inputs = {fourD};
- Tensor[] outputs = wrapper.run(inputs);
- assertThat(outputs.length).isEqualTo(1);
+ float[][][][] parsedOutputs = new float[2][8][8][3];
+ Map<Integer, Object> outputs = new HashMap<>();
+ outputs.put(0, parsedOutputs);
+ wrapper.run(inputs, outputs);
assertThat(wrapper.getLastNativeInferenceDurationNanoseconds()).isGreaterThan(0L);
wrapper.close();
}
@@ -486,13 +462,14 @@ public final class NativeInterpreterWrapperTest {
float[][][] threeD = {twoD, twoD, twoD, twoD, twoD, twoD, twoD, twoD};
float[][][][] fourD = {threeD, threeD};
Object[] inputs = {fourD};
+ float[][][][] parsedOutputs = new float[2][8][8][3];
+ Map<Integer, Object> outputs = new HashMap<>();
+ outputs.put(0, parsedOutputs);
try {
- wrapper.run(inputs);
+ wrapper.run(inputs, outputs);
fail();
} catch (IllegalArgumentException e) {
- assertThat(e)
- .hasMessageThat()
- .contains("0-th input dimension should be [?,8,8,3], but found [?,8,7,3]");
+ // Expected.
}
assertThat(wrapper.getLastNativeInferenceDurationNanoseconds()).isNull();
wrapper.close();
@@ -502,41 +479,7 @@ public final class NativeInterpreterWrapperTest {
public void testGetInputDims() {
NativeInterpreterWrapper wrapper = new NativeInterpreterWrapper(FLOAT_MODEL_PATH);
int[] expectedDims = {1, 8, 8, 3};
- assertThat(wrapper.getInputDims(0)).isEqualTo(expectedDims);
- wrapper.close();
- }
-
- @Test
- public void testGetInputDimsOutOfRange() {
- NativeInterpreterWrapper wrapper = new NativeInterpreterWrapper(FLOAT_MODEL_PATH);
- try {
- wrapper.getInputDims(-1);
- fail();
- } catch (IllegalArgumentException e) {
- assertThat(e).hasMessageThat().contains("Out of range");
- }
- try {
- wrapper.getInputDims(1);
- fail();
- } catch (IllegalArgumentException e) {
- assertThat(e).hasMessageThat().contains("Out of range");
- }
- wrapper.close();
- }
-
- @Test
- public void testGetOutputDataType() {
- NativeInterpreterWrapper wrapper = new NativeInterpreterWrapper(FLOAT_MODEL_PATH);
- assertThat(wrapper.getOutputDataType(0)).contains("float");
- wrapper.close();
- wrapper = new NativeInterpreterWrapper(LONG_MODEL_PATH);
- assertThat(wrapper.getOutputDataType(0)).contains("long");
- wrapper.close();
- wrapper = new NativeInterpreterWrapper(INT_MODEL_PATH);
- assertThat(wrapper.getOutputDataType(0)).contains("int");
- wrapper.close();
- wrapper = new NativeInterpreterWrapper(BYTE_MODEL_PATH);
- assertThat(wrapper.getOutputDataType(0)).contains("byte");
+ assertThat(wrapper.getInputTensor(0).shape()).isEqualTo(expectedDims);
wrapper.close();
}
diff --git a/tensorflow/contrib/lite/java/src/test/java/org/tensorflow/lite/TensorTest.java b/tensorflow/contrib/lite/java/src/test/java/org/tensorflow/lite/TensorTest.java
index 94b6632bb8..71ef044943 100644
--- a/tensorflow/contrib/lite/java/src/test/java/org/tensorflow/lite/TensorTest.java
+++ b/tensorflow/contrib/lite/java/src/test/java/org/tensorflow/lite/TensorTest.java
@@ -18,6 +18,10 @@ package org.tensorflow.lite;
import static com.google.common.truth.Truth.assertThat;
import static org.junit.Assert.fail;
+import java.nio.ByteBuffer;
+import java.nio.ByteOrder;
+import java.util.HashMap;
+import java.util.Map;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
@@ -32,7 +36,7 @@ public final class TensorTest {
"tensorflow/contrib/lite/java/src/testdata/add.bin";
private NativeInterpreterWrapper wrapper;
- private long nativeHandle;
+ private Tensor tensor;
@Before
public void setUp() {
@@ -42,8 +46,10 @@ public final class TensorTest {
float[][][] threeD = {twoD, twoD, twoD, twoD, twoD, twoD, twoD, twoD};
float[][][][] fourD = {threeD, threeD};
Object[] inputs = {fourD};
- Tensor[] outputs = wrapper.run(inputs);
- nativeHandle = outputs[0].nativeHandle;
+ Map<Integer, Object> outputs = new HashMap<>();
+ outputs.put(0, new float[2][8][8][3]);
+ wrapper.run(inputs, outputs);
+ tensor = wrapper.getOutputTensor(0);
}
@After
@@ -52,17 +58,16 @@ public final class TensorTest {
}
@Test
- public void testFromHandle() throws Exception {
- Tensor tensor = Tensor.fromHandle(nativeHandle);
+ public void testBasic() throws Exception {
assertThat(tensor).isNotNull();
int[] expectedShape = {2, 8, 8, 3};
- assertThat(tensor.shapeCopy).isEqualTo(expectedShape);
- assertThat(tensor.dtype).isEqualTo(DataType.FLOAT32);
+ assertThat(tensor.shape()).isEqualTo(expectedShape);
+ assertThat(tensor.dataType()).isEqualTo(DataType.FLOAT32);
+ assertThat(tensor.numBytes()).isEqualTo(2 * 8 * 8 * 3 * 4);
}
@Test
public void testCopyTo() {
- Tensor tensor = Tensor.fromHandle(nativeHandle);
float[][][][] parsedOutputs = new float[2][8][8][3];
tensor.copyTo(parsedOutputs);
float[] outputOneD = parsedOutputs[0][0][0];
@@ -71,8 +76,31 @@ public final class TensorTest {
}
@Test
+ public void testCopyToByteBuffer() {
+ ByteBuffer parsedOutput =
+ ByteBuffer.allocateDirect(2 * 8 * 8 * 3 * 4).order(ByteOrder.nativeOrder());
+ tensor.copyTo(parsedOutput);
+ assertThat(parsedOutput.position()).isEqualTo(2 * 8 * 8 * 3 * 4);
+ float[] outputOneD = {
+ parsedOutput.getFloat(0), parsedOutput.getFloat(4), parsedOutput.getFloat(8)
+ };
+ float[] expected = {3.69f, 19.62f, 23.43f};
+ assertThat(outputOneD).usingTolerance(0.1f).containsExactly(expected).inOrder();
+ }
+
+ @Test
+ public void testCopyToInvalidByteBuffer() {
+ ByteBuffer parsedOutput = ByteBuffer.allocateDirect(3 * 4).order(ByteOrder.nativeOrder());
+ try {
+ tensor.copyTo(parsedOutput);
+ fail();
+ } catch (IllegalArgumentException e) {
+ // Expected.
+ }
+ }
+
+ @Test
public void testCopyToWrongType() {
- Tensor tensor = Tensor.fromHandle(nativeHandle);
int[][][][] parsedOutputs = new int[2][8][8][3];
try {
tensor.copyTo(parsedOutputs);
@@ -81,15 +109,13 @@ public final class TensorTest {
assertThat(e)
.hasMessageThat()
.contains(
- "Cannot convert an TensorFlowLite tensor with type "
- + "FLOAT32 to a Java object of type [[[[I (which is compatible with the TensorFlowLite "
- + "type INT32)");
+ "Cannot convert between a TensorFlowLite tensor with type FLOAT32 and a Java object "
+ + "of type [[[[I (which is compatible with the TensorFlowLite type INT32)");
}
}
@Test
public void testCopyToWrongShape() {
- Tensor tensor = Tensor.fromHandle(nativeHandle);
float[][][][] parsedOutputs = new float[1][8][8][3];
try {
tensor.copyTo(parsedOutputs);
@@ -98,8 +124,104 @@ public final class TensorTest {
assertThat(e)
.hasMessageThat()
.contains(
- "Shape of output target [1, 8, 8, 3] does not match "
- + "with the shape of the Tensor [2, 8, 8, 3].");
+ "Cannot copy between a TensorFlowLite tensor with shape [2, 8, 8, 3] "
+ + "and a Java object with shape [1, 8, 8, 3].");
+ }
+ }
+
+ @Test
+ public void testSetTo() {
+ float[][][][] input = new float[2][8][8][3];
+ float[][][][] output = new float[2][8][8][3];
+ ByteBuffer inputByteBuffer =
+ ByteBuffer.allocateDirect(2 * 8 * 8 * 3 * 4).order(ByteOrder.nativeOrder());
+
+ input[0][0][0][0] = 2.0f;
+ tensor.setTo(input);
+ tensor.copyTo(output);
+ assertThat(output[0][0][0][0]).isEqualTo(2.0f);
+
+ inputByteBuffer.putFloat(0, 3.0f);
+ tensor.setTo(inputByteBuffer);
+ tensor.copyTo(output);
+ assertThat(output[0][0][0][0]).isEqualTo(3.0f);
+ }
+
+ @Test
+ public void testSetToInvalidByteBuffer() {
+ ByteBuffer input = ByteBuffer.allocateDirect(3 * 4).order(ByteOrder.nativeOrder());
+ try {
+ tensor.setTo(input);
+ fail();
+ } catch (IllegalArgumentException e) {
+ // Success.
+ }
+ }
+
+ @Test
+ public void testGetInputShapeIfDifferent() {
+ ByteBuffer bytBufferInput = ByteBuffer.allocateDirect(3 * 4).order(ByteOrder.nativeOrder());
+ assertThat(tensor.getInputShapeIfDifferent(bytBufferInput)).isNull();
+
+ float[][][][] sameShapeInput = new float[2][8][8][3];
+ assertThat(tensor.getInputShapeIfDifferent(sameShapeInput)).isNull();
+
+ float[][][][] differentShapeInput = new float[1][8][8][3];
+ assertThat(tensor.getInputShapeIfDifferent(differentShapeInput))
+ .isEqualTo(new int[] {1, 8, 8, 3});
+ }
+
+ @Test
+ public void testDataTypeOf() {
+ float[] testEmptyArray = {};
+ DataType dataType = Tensor.dataTypeOf(testEmptyArray);
+ assertThat(dataType).isEqualTo(DataType.FLOAT32);
+ float[] testFloatArray = {0.783f, 0.251f};
+ dataType = Tensor.dataTypeOf(testFloatArray);
+ assertThat(dataType).isEqualTo(DataType.FLOAT32);
+ float[][] testMultiDimArray = {testFloatArray, testFloatArray, testFloatArray};
+ dataType = Tensor.dataTypeOf(testFloatArray);
+ assertThat(dataType).isEqualTo(DataType.FLOAT32);
+ try {
+ double[] testDoubleArray = {0.783, 0.251};
+ Tensor.dataTypeOf(testDoubleArray);
+ fail();
+ } catch (IllegalArgumentException e) {
+ assertThat(e).hasMessageThat().contains("cannot resolve DataType of");
}
+ try {
+ Float[] testBoxedArray = {0.783f, 0.251f};
+ Tensor.dataTypeOf(testBoxedArray);
+ fail();
+ } catch (IllegalArgumentException e) {
+ assertThat(e).hasMessageThat().contains("cannot resolve DataType of [Ljava.lang.Float;");
+ }
+ }
+
+ @Test
+ public void testNumDimensions() {
+ int scalar = 1;
+ assertThat(Tensor.numDimensions(scalar)).isEqualTo(0);
+ int[][] array = {{2, 4}, {1, 9}};
+ assertThat(Tensor.numDimensions(array)).isEqualTo(2);
+ try {
+ int[] emptyArray = {};
+ Tensor.numDimensions(emptyArray);
+ fail();
+ } catch (IllegalArgumentException e) {
+ assertThat(e).hasMessageThat().contains("Array lengths cannot be 0.");
+ }
+ }
+
+ @Test
+ public void testFillShape() {
+ int[][][] array = {{{23}, {14}, {87}}, {{12}, {42}, {31}}};
+ int num = Tensor.numDimensions(array);
+ int[] shape = new int[num];
+ Tensor.fillShape(array, 0, shape);
+ assertThat(num).isEqualTo(3);
+ assertThat(shape[0]).isEqualTo(2);
+ assertThat(shape[1]).isEqualTo(3);
+ assertThat(shape[2]).isEqualTo(1);
}
}
diff --git a/tensorflow/contrib/lite/java/src/testhelper/java/org/tensorflow/lite/TestHelper.java b/tensorflow/contrib/lite/java/src/testhelper/java/org/tensorflow/lite/TestHelper.java
index 3aef0c3bb6..c23521c077 100644
--- a/tensorflow/contrib/lite/java/src/testhelper/java/org/tensorflow/lite/TestHelper.java
+++ b/tensorflow/contrib/lite/java/src/testhelper/java/org/tensorflow/lite/TestHelper.java
@@ -58,7 +58,7 @@ public class TestHelper {
*/
public static int[] getInputDims(Interpreter interpreter, int index) {
if (interpreter != null && interpreter.wrapper != null) {
- return interpreter.wrapper.getInputDims(index);
+ return interpreter.wrapper.getInputTensor(index).shape();
} else {
throw new IllegalArgumentException(
"Interpreter has not initialized;" + " Failed to get input dimensions.");
@@ -77,7 +77,7 @@ public class TestHelper {
*/
public static String getOutputDataType(Interpreter interpreter, int index) {
if (interpreter != null && interpreter.wrapper != null) {
- return interpreter.wrapper.getOutputDataType(index);
+ return interpreter.wrapper.getOutputTensor(index).dataType().toStringName();
} else {
throw new IllegalArgumentException(
"Interpreter has not initialized;" + " Failed to get output data type.");
diff --git a/tensorflow/contrib/lite/kernels/BUILD b/tensorflow/contrib/lite/kernels/BUILD
index a77897a173..33594c138b 100644
--- a/tensorflow/contrib/lite/kernels/BUILD
+++ b/tensorflow/contrib/lite/kernels/BUILD
@@ -46,11 +46,17 @@ cc_library(
hdrs = [
"eigen_support.h",
],
- copts = tflite_copts(),
+ copts = tflite_copts() + [
+ "-Wno-error=reorder",
+ ] + select({
+ "//tensorflow:ios": ["-Wno-error=invalid-partial-specialization"],
+ "//conditions:default": [
+ ],
+ }),
deps = [
":op_macros",
"//tensorflow/contrib/lite:context",
- "//third_party/eigen3",
+ "//tensorflow/contrib/lite/kernels/internal:optimized",
],
)
@@ -130,7 +136,7 @@ cc_library(
srcs = [
"activations.cc",
"add.cc",
- "arg_max.cc",
+ "arg_min_max.cc",
"audio_spectrogram.cc",
"basic_rnn.cc",
"batch_to_space_nd.cc",
@@ -149,6 +155,7 @@ cc_library(
"embedding_lookup_sparse.cc",
"exp.cc",
"expand_dims.cc",
+ "fake_quant.cc",
"floor.cc",
"fully_connected.cc",
"gather.cc",
@@ -163,6 +170,7 @@ cc_library(
"neg.cc",
"pad.cc",
"pooling.cc",
+ "pow.cc",
"reduce.cc",
"register.cc",
"reshape.cc",
@@ -289,9 +297,9 @@ tf_cc_test(
)
tf_cc_test(
- name = "arg_max_test",
+ name = "arg_min_max_test",
size = "small",
- srcs = ["arg_max_test.cc"],
+ srcs = ["arg_min_max_test.cc"],
tags = [
"tflite_not_portable_ios",
],
@@ -557,6 +565,19 @@ tf_cc_test(
)
tf_cc_test(
+ name = "fake_quant_test",
+ size = "small",
+ srcs = ["fake_quant_test.cc"],
+ tags = ["tflite_not_portable_ios"],
+ deps = [
+ ":builtin_ops",
+ "//tensorflow/contrib/lite:framework",
+ "//tensorflow/contrib/lite/kernels:test_util",
+ "@com_google_googletest//:gtest",
+ ],
+)
+
+tf_cc_test(
name = "maximum_minimum_test",
size = "small",
srcs = ["maximum_minimum_test.cc"],
@@ -1009,6 +1030,20 @@ tf_cc_test(
],
)
+tf_cc_test(
+ name = "pow_test",
+ size = "small",
+ srcs = ["pow_test.cc"],
+ tags = ["tflite_not_portable_ios"],
+ deps = [
+ ":builtin_ops",
+ "//tensorflow/contrib/lite:builtin_op_data",
+ "//tensorflow/contrib/lite:framework",
+ "//tensorflow/contrib/lite/kernels:test_util",
+ "@com_google_googletest//:gtest",
+ ],
+)
+
filegroup(
name = "all_files",
srcs = glob(
diff --git a/tensorflow/contrib/lite/kernels/add.cc b/tensorflow/contrib/lite/kernels/add.cc
index ccb957ebc5..f44d531cbf 100644
--- a/tensorflow/contrib/lite/kernels/add.cc
+++ b/tensorflow/contrib/lite/kernels/add.cc
@@ -170,29 +170,44 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
}
template <KernelType kernel_type>
-void EvalAddFloat(TfLiteContext* context, TfLiteNode* node,
- TfLiteAddParams* params, const OpData* data,
- const TfLiteTensor* input1, const TfLiteTensor* input2,
- TfLiteTensor* output) {
- float output_activation_min, output_activation_max;
- CalculateActivationRangeFloat(params->activation, &output_activation_min,
- &output_activation_max);
-#define TF_LITE_ADD(type, opname) \
- type::opname(GetTensorData<float>(input1), GetTensorDims(input1), \
- GetTensorData<float>(input2), GetTensorDims(input2), \
- output_activation_min, output_activation_max, \
- GetTensorData<float>(output), GetTensorDims(output))
- if (kernel_type == kReference) {
- if (data->requires_broadcast) {
- TF_LITE_ADD(reference_ops, BroadcastAdd);
+void EvalAdd(TfLiteContext* context, TfLiteNode* node, TfLiteAddParams* params,
+ const OpData* data, const TfLiteTensor* input1,
+ const TfLiteTensor* input2, TfLiteTensor* output) {
+#define TF_LITE_ADD(type, opname, data_type) \
+ data_type output_activation_min, output_activation_max; \
+ CalculateActivationRange(params->activation, &output_activation_min, \
+ &output_activation_max); \
+ type::opname(GetTensorData<data_type>(input1), GetTensorDims(input1), \
+ GetTensorData<data_type>(input2), GetTensorDims(input2), \
+ output_activation_min, output_activation_max, \
+ GetTensorData<data_type>(output), GetTensorDims(output))
+ if (output->type == kTfLiteInt32) {
+ if (kernel_type == kReference) {
+ if (data->requires_broadcast) {
+ TF_LITE_ADD(reference_ops, BroadcastAdd, int32_t);
+ } else {
+ TF_LITE_ADD(reference_ops, Add, int32_t);
+ }
} else {
- TF_LITE_ADD(reference_ops, Add);
+ if (data->requires_broadcast) {
+ TF_LITE_ADD(optimized_ops, BroadcastAdd, int32_t);
+ } else {
+ TF_LITE_ADD(optimized_ops, Add, int32_t);
+ }
}
- } else {
- if (data->requires_broadcast) {
- TF_LITE_ADD(optimized_ops, BroadcastAdd);
+ } else if (output->type == kTfLiteFloat32) {
+ if (kernel_type == kReference) {
+ if (data->requires_broadcast) {
+ TF_LITE_ADD(reference_ops, BroadcastAdd, float);
+ } else {
+ TF_LITE_ADD(reference_ops, Add, float);
+ }
} else {
- TF_LITE_ADD(optimized_ops, Add);
+ if (data->requires_broadcast) {
+ TF_LITE_ADD(optimized_ops, BroadcastAdd, float);
+ } else {
+ TF_LITE_ADD(optimized_ops, Add, float);
+ }
}
}
#undef TF_LITE_ADD
@@ -251,9 +266,8 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* input2 = GetInput(context, node, kInputTensor2);
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
- if (output->type == kTfLiteFloat32) {
- EvalAddFloat<kernel_type>(context, node, params, data, input1, input2,
- output);
+ if (output->type == kTfLiteFloat32 || output->type == kTfLiteInt32) {
+ EvalAdd<kernel_type>(context, node, params, data, input1, input2, output);
} else if (output->type == kTfLiteUInt8 || output->type == kTfLiteInt16) {
TF_LITE_ENSURE_OK(context,
EvalAddQuantized<kernel_type>(context, node, params, data,
diff --git a/tensorflow/contrib/lite/kernels/add_test.cc b/tensorflow/contrib/lite/kernels/add_test.cc
index 456a754e7e..0b58443211 100644
--- a/tensorflow/contrib/lite/kernels/add_test.cc
+++ b/tensorflow/contrib/lite/kernels/add_test.cc
@@ -52,6 +52,13 @@ class FloatAddOpModel : public BaseAddOpModel {
std::vector<float> GetOutput() { return ExtractVector<float>(output_); }
};
+class IntegerAddOpModel : public BaseAddOpModel {
+ public:
+ using BaseAddOpModel::BaseAddOpModel;
+
+ std::vector<int32_t> GetOutput() { return ExtractVector<int32_t>(output_); }
+};
+
class QuantizedAddOpModel : public BaseAddOpModel {
public:
using BaseAddOpModel::BaseAddOpModel;
@@ -133,6 +140,57 @@ TEST(FloatAddOpModel, WithBroadcast) {
}
}
+TEST(IntegerAddOpModel, NoActivation) {
+ IntegerAddOpModel m({TensorType_INT32, {1, 2, 2, 1}},
+ {TensorType_INT32, {1, 2, 2, 1}}, {TensorType_INT32, {}},
+ ActivationFunctionType_NONE);
+ m.PopulateTensor<int32_t>(m.input1(), {-20, 2, 7, 8});
+ m.PopulateTensor<int32_t>(m.input2(), {1, 2, 3, 5});
+ m.Invoke();
+ EXPECT_THAT(m.GetOutput(), ElementsAreArray({-19, 4, 10, 13}));
+}
+
+TEST(IntegerAddOpModel, ActivationRELU_N1_TO_1) {
+ IntegerAddOpModel m({TensorType_INT32, {1, 2, 2, 1}},
+ {TensorType_INT32, {1, 2, 2, 1}}, {TensorType_INT32, {}},
+ ActivationFunctionType_RELU_N1_TO_1);
+ m.PopulateTensor<int32_t>(m.input1(), {-20, 2, 7, 8});
+ m.PopulateTensor<int32_t>(m.input2(), {1, 2, 3, 5});
+ m.Invoke();
+ EXPECT_THAT(m.GetOutput(), ElementsAreArray({-1, 1, 1, 1}));
+}
+
+TEST(IntegerAddOpModel, VariousInputShapes) {
+ std::vector<std::initializer_list<int>> test_shapes = {
+ {6}, {2, 3}, {2, 1, 3}, {1, 3, 1, 2}};
+ for (int i = 0; i < test_shapes.size(); ++i) {
+ IntegerAddOpModel m({TensorType_INT32, test_shapes[i]},
+ {TensorType_INT32, test_shapes[i]},
+ {TensorType_INT32, {}}, ActivationFunctionType_NONE);
+ m.PopulateTensor<int32_t>(m.input1(), {-20, 2, 7, 8, 11, 20});
+ m.PopulateTensor<int32_t>(m.input2(), {1, 2, 3, 5, 11, 1});
+ m.Invoke();
+ EXPECT_THAT(m.GetOutput(), ElementsAreArray({-19, 04, 10, 13, 22, 21}))
+ << "With shape number " << i;
+ }
+}
+
+TEST(IntegerAddOpModel, WithBroadcast) {
+ std::vector<std::initializer_list<int>> test_shapes = {
+ {6}, {2, 3}, {2, 1, 3}, {1, 3, 1, 2}};
+ for (int i = 0; i < test_shapes.size(); ++i) {
+ IntegerAddOpModel m({TensorType_INT32, test_shapes[i]},
+ {TensorType_INT32, {}}, // always a scalar
+ {TensorType_INT32, {}}, ActivationFunctionType_NONE);
+ m.PopulateTensor<int32_t>(m.input1(), {-20, 2, 7, 8, 11, 20});
+ m.PopulateTensor<int32_t>(m.input2(), {1});
+ m.Invoke();
+ EXPECT_THAT(m.GetOutput(),
+ ElementsAreArray(ArrayFloatNear({-19, 3, 8, 9, 12, 21})))
+ << "With shape number " << i;
+ }
+}
+
TEST(QuantizedAddOpModel, QuantizedTestsNoActivation) {
float kQuantizedTolerance = GetTolerance(-1.0, 1.0);
std::vector<std::initializer_list<float>> inputs1 = {
diff --git a/tensorflow/contrib/lite/kernels/arg_max.cc b/tensorflow/contrib/lite/kernels/arg_min_max.cc
index 26f57e8896..4f30d09030 100644
--- a/tensorflow/contrib/lite/kernels/arg_max.cc
+++ b/tensorflow/contrib/lite/kernels/arg_min_max.cc
@@ -23,7 +23,7 @@ limitations under the License.
namespace tflite {
namespace ops {
namespace builtin {
-namespace arg_max {
+namespace arg_min_max {
constexpr int kInputTensor = 0;
constexpr int kAxis = 1;
@@ -80,30 +80,39 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
return context->ResizeTensor(context, output, output_size);
}
+template <typename T>
+std::function<bool(T, T)> GetComparefunction(bool is_arg_max) {
+ if (is_arg_max) {
+ return std::greater<T>();
+ } else {
+ return std::less<T>();
+ }
+}
+
// The current impl actually ignores the axis argument.
// Only determine the index of the maximum value in the last dimension.
-TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
+TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node, bool is_arg_max) {
const TfLiteTensor* input = GetInput(context, node, kInputTensor);
const TfLiteTensor* axis = GetInput(context, node, kAxis);
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
-#define TF_LITE_ARG_MAX(data_type, axis_type, output_type) \
- optimized_ops::ArgMax(GetTensorData<axis_type>(axis), \
- GetTensorData<data_type>(input), GetTensorDims(input), \
- GetTensorData<output_type>(output), \
- GetTensorDims(output))
+#define TF_LITE_ARG_MIN_MAX(data_type, axis_type, output_type) \
+ optimized_ops::ArgMinMax( \
+ GetTensorData<axis_type>(axis), GetTensorData<data_type>(input), \
+ GetTensorDims(input), GetTensorData<output_type>(output), \
+ GetTensorDims(output), GetComparefunction<data_type>(is_arg_max))
if (axis->type == kTfLiteInt32) {
switch (output->type) {
case kTfLiteInt32: {
switch (input->type) {
case kTfLiteFloat32:
- TF_LITE_ARG_MAX(float, int32_t, int32_t);
+ TF_LITE_ARG_MIN_MAX(float, int32_t, int32_t);
break;
case kTfLiteUInt8:
- TF_LITE_ARG_MAX(uint8_t, int32_t, int32_t);
+ TF_LITE_ARG_MIN_MAX(uint8_t, int32_t, int32_t);
break;
case kTfLiteInt32:
- TF_LITE_ARG_MAX(int32_t, int32_t, int32_t);
+ TF_LITE_ARG_MIN_MAX(int32_t, int32_t, int32_t);
break;
default:
return kTfLiteError;
@@ -112,13 +121,13 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
case kTfLiteInt64: {
switch (input->type) {
case kTfLiteFloat32:
- TF_LITE_ARG_MAX(float, int32_t, int64_t);
+ TF_LITE_ARG_MIN_MAX(float, int32_t, int64_t);
break;
case kTfLiteUInt8:
- TF_LITE_ARG_MAX(uint8_t, int32_t, int64_t);
+ TF_LITE_ARG_MIN_MAX(uint8_t, int32_t, int64_t);
break;
case kTfLiteInt32:
- TF_LITE_ARG_MAX(int32_t, int32_t, int64_t);
+ TF_LITE_ARG_MIN_MAX(int32_t, int32_t, int64_t);
break;
default:
return kTfLiteError;
@@ -132,13 +141,13 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
case kTfLiteInt32: {
switch (input->type) {
case kTfLiteFloat32:
- TF_LITE_ARG_MAX(float, int64_t, int32_t);
+ TF_LITE_ARG_MIN_MAX(float, int64_t, int32_t);
break;
case kTfLiteUInt8:
- TF_LITE_ARG_MAX(uint8_t, int64_t, int32_t);
+ TF_LITE_ARG_MIN_MAX(uint8_t, int64_t, int32_t);
break;
case kTfLiteInt32:
- TF_LITE_ARG_MAX(int32_t, int64_t, int32_t);
+ TF_LITE_ARG_MIN_MAX(int32_t, int64_t, int32_t);
break;
default:
return kTfLiteError;
@@ -147,13 +156,13 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
case kTfLiteInt64: {
switch (input->type) {
case kTfLiteFloat32:
- TF_LITE_ARG_MAX(float, int64_t, int64_t);
+ TF_LITE_ARG_MIN_MAX(float, int64_t, int64_t);
break;
case kTfLiteUInt8:
- TF_LITE_ARG_MAX(uint8_t, int64_t, int64_t);
+ TF_LITE_ARG_MIN_MAX(uint8_t, int64_t, int64_t);
break;
case kTfLiteInt32:
- TF_LITE_ARG_MAX(int32_t, int64_t, int64_t);
+ TF_LITE_ARG_MIN_MAX(int32_t, int64_t, int64_t);
break;
default:
return kTfLiteError;
@@ -163,16 +172,30 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
return kTfLiteError;
}
}
-#undef TF_LITE_ARG_MAX
+#undef TF_LITE_ARG_MIN_MAX
return kTfLiteOk;
}
-} // namespace arg_max
+TfLiteStatus ArgMinEval(TfLiteContext* context, TfLiteNode* node) {
+ return Eval(context, node, false);
+}
+
+TfLiteStatus ArgMaxEval(TfLiteContext* context, TfLiteNode* node) {
+ return Eval(context, node, true);
+}
+
+} // namespace arg_min_max
TfLiteRegistration* Register_ARG_MAX() {
- static TfLiteRegistration r = {nullptr, nullptr, arg_max::Prepare,
- arg_max::Eval};
+ static TfLiteRegistration r = {nullptr, nullptr, arg_min_max::Prepare,
+ arg_min_max::ArgMaxEval};
+ return &r;
+}
+
+TfLiteRegistration* Register_ARG_MIN() {
+ static TfLiteRegistration r = {nullptr, nullptr, arg_min_max::Prepare,
+ arg_min_max::ArgMinEval};
return &r;
}
diff --git a/tensorflow/contrib/lite/kernels/arg_max_test.cc b/tensorflow/contrib/lite/kernels/arg_min_max_test.cc
index 31b15fe19a..90e5fdc532 100644
--- a/tensorflow/contrib/lite/kernels/arg_max_test.cc
+++ b/tensorflow/contrib/lite/kernels/arg_min_max_test.cc
@@ -24,16 +24,13 @@ namespace {
using ::testing::ElementsAreArray;
template <typename T>
-class ArgMaxOpModel : public SingleOpModel {
+class ArgBaseOpModel : public SingleOpModel {
public:
- ArgMaxOpModel(std::initializer_list<int> input_shape, TensorType input_type,
- TensorType output_type, TensorType index_output_type) {
+ ArgBaseOpModel(std::initializer_list<int> input_shape, TensorType input_type,
+ TensorType output_type, TensorType index_output_type) {
input_ = AddInput(input_type);
axis_ = AddInput(TensorType_INT32);
output_ = AddOutput(output_type);
- SetBuiltinOp(BuiltinOperator_ARG_MAX, BuiltinOptions_ArgMaxOptions,
- CreateArgMaxOptions(builder_, index_output_type).Union());
- BuildInterpreter({input_shape, {1, 1, 1, 1}});
}
int input() { return input_; }
@@ -42,12 +39,42 @@ class ArgMaxOpModel : public SingleOpModel {
std::vector<T> GetOutput() { return ExtractVector<T>(output_); }
std::vector<int> GetOutputShape() { return GetTensorShape(output_); }
- private:
+ protected:
int input_;
int axis_;
int output_;
};
+template <typename T>
+class ArgMaxOpModel : public ArgBaseOpModel<T> {
+ public:
+ ArgMaxOpModel(std::initializer_list<int> input_shape, TensorType input_type,
+ TensorType output_type, TensorType index_output_type)
+ : ArgBaseOpModel<T>(input_shape, input_type, output_type,
+ index_output_type) {
+ ArgBaseOpModel<T>::SetBuiltinOp(
+ BuiltinOperator_ARG_MAX, BuiltinOptions_ArgMaxOptions,
+ CreateArgMaxOptions(ArgBaseOpModel<T>::builder_, index_output_type)
+ .Union());
+ ArgBaseOpModel<T>::BuildInterpreter({input_shape, {1, 1, 1, 1}});
+ }
+};
+
+template <typename T>
+class ArgMinOpModel : public ArgBaseOpModel<T> {
+ public:
+ ArgMinOpModel(std::initializer_list<int> input_shape, TensorType input_type,
+ TensorType output_type, TensorType index_output_type)
+ : ArgBaseOpModel<T>(input_shape, input_type, output_type,
+ index_output_type) {
+ ArgBaseOpModel<T>::SetBuiltinOp(
+ BuiltinOperator_ARG_MIN, BuiltinOptions_ArgMinOptions,
+ CreateArgMinOptions(ArgBaseOpModel<T>::builder_, index_output_type)
+ .Union());
+ ArgBaseOpModel<T>::BuildInterpreter({input_shape, {1, 1, 1, 1}});
+ }
+};
+
TEST(ArgMaxOpTest, GetMaxArgFloat) {
ArgMaxOpModel<int32_t> model({1, 1, 1, 4}, TensorType_FLOAT32,
TensorType_INT32, TensorType_INT32);
@@ -96,6 +123,54 @@ TEST(ArgMaxOpTest, GetMaxArgOutput64) {
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({1, 1, 2, 1}));
}
+TEST(ArgMinOpTest, GetMinArgFloat) {
+ ArgMinOpModel<int32_t> model({1, 1, 1, 4}, TensorType_FLOAT32,
+ TensorType_INT32, TensorType_INT32);
+ model.PopulateTensor<float>(model.input(), {0.1, 0.9, 0.7, 0.3});
+ // Currently only support the last dimension.
+ model.PopulateTensor<int>(model.axis(), {3});
+ model.Invoke();
+
+ EXPECT_THAT(model.GetOutput(), ElementsAreArray({0}));
+ EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({1, 1, 1, 1}));
+}
+
+TEST(ArgMinOpTest, GetMinArgInt) {
+ ArgMinOpModel<int32_t> model({1, 1, 1, 4}, TensorType_INT32, TensorType_INT32,
+ TensorType_INT32);
+ model.PopulateTensor<int>(model.input(), {1, 9, 7, 3});
+ // Currently only support the last dimension.
+ model.PopulateTensor<int>(model.axis(), {3});
+ model.Invoke();
+
+ EXPECT_THAT(model.GetOutput(), ElementsAreArray({0}));
+ EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({1, 1, 1, 1}));
+}
+
+TEST(ArgMinOpTest, GetMinArgMulDimensions) {
+ ArgMinOpModel<int32_t> model({1, 1, 2, 4}, TensorType_INT32, TensorType_INT32,
+ TensorType_INT32);
+ model.PopulateTensor<int>(model.input(), {1, 2, 7, 8, 1, 9, 7, 3});
+ // Currently only support the last dimension.
+ model.PopulateTensor<int>(model.axis(), {3});
+ model.Invoke();
+
+ EXPECT_THAT(model.GetOutput(), ElementsAreArray({0, 0}));
+ EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({1, 1, 2, 1}));
+}
+
+TEST(ArgMinOpTest, GetMinArgOutput64) {
+ ArgMinOpModel<int64_t> model({1, 1, 2, 4}, TensorType_INT32, TensorType_INT64,
+ TensorType_INT64);
+ model.PopulateTensor<int>(model.input(), {10, 2, 7, 8, 1, 9, 7, 3});
+ // Currently only support the last dimension.
+ model.PopulateTensor<int>(model.axis(), {3});
+ model.Invoke();
+
+ EXPECT_THAT(model.GetOutput(), ElementsAreArray({1, 0}));
+ EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({1, 1, 2, 1}));
+}
+
} // namespace
} // namespace tflite
diff --git a/tensorflow/contrib/lite/kernels/bidirectional_sequence_lstm.cc b/tensorflow/contrib/lite/kernels/bidirectional_sequence_lstm.cc
index 3425288f02..14a19aeef3 100644
--- a/tensorflow/contrib/lite/kernels/bidirectional_sequence_lstm.cc
+++ b/tensorflow/contrib/lite/kernels/bidirectional_sequence_lstm.cc
@@ -276,27 +276,33 @@ TfLiteStatus CheckLstmTensorDimensions(
TfLiteStatus CheckInputTensorDimensions(TfLiteContext* context,
TfLiteNode* node, int n_input,
int n_output, int n_cell) {
- CheckLstmTensorDimensions(
- context, node, n_input, n_output, n_cell, kFwInputToInputWeightsTensor,
- kFwInputToForgetWeightsTensor, kFwInputToCellWeightsTensor,
- kFwInputToOutputWeightsTensor, kFwRecurrentToInputWeightsTensor,
- kFwRecurrentToForgetWeightsTensor, kFwRecurrentToCellWeightsTensor,
- kFwRecurrentToOutputWeightsTensor, kFwCellToInputWeightsTensor,
- kFwCellToForgetWeightsTensor, kFwCellToOutputWeightsTensor,
- kFwInputGateBiasTensor, kFwForgetGateBiasTensor, kFwCellGateBiasTensor,
- kFwOutputGateBiasTensor, kFwProjectionWeightsTensor,
- kFwProjectionBiasTensor);
-
- CheckLstmTensorDimensions(
- context, node, n_input, n_output, n_cell, kBwInputToInputWeightsTensor,
- kBwInputToForgetWeightsTensor, kBwInputToCellWeightsTensor,
- kBwInputToOutputWeightsTensor, kBwRecurrentToInputWeightsTensor,
- kBwRecurrentToForgetWeightsTensor, kBwRecurrentToCellWeightsTensor,
- kBwRecurrentToOutputWeightsTensor, kBwCellToInputWeightsTensor,
- kBwCellToForgetWeightsTensor, kBwCellToOutputWeightsTensor,
- kBwInputGateBiasTensor, kBwForgetGateBiasTensor, kBwCellGateBiasTensor,
- kBwOutputGateBiasTensor, kBwProjectionWeightsTensor,
- kBwProjectionBiasTensor);
+ TF_LITE_ENSURE_OK(
+ context,
+ CheckLstmTensorDimensions(
+ context, node, n_input, n_output, n_cell,
+ kFwInputToInputWeightsTensor, kFwInputToForgetWeightsTensor,
+ kFwInputToCellWeightsTensor, kFwInputToOutputWeightsTensor,
+ kFwRecurrentToInputWeightsTensor, kFwRecurrentToForgetWeightsTensor,
+ kFwRecurrentToCellWeightsTensor, kFwRecurrentToOutputWeightsTensor,
+ kFwCellToInputWeightsTensor, kFwCellToForgetWeightsTensor,
+ kFwCellToOutputWeightsTensor, kFwInputGateBiasTensor,
+ kFwForgetGateBiasTensor, kFwCellGateBiasTensor,
+ kFwOutputGateBiasTensor, kFwProjectionWeightsTensor,
+ kFwProjectionBiasTensor));
+
+ TF_LITE_ENSURE_OK(
+ context,
+ CheckLstmTensorDimensions(
+ context, node, n_input, n_output, n_cell,
+ kBwInputToInputWeightsTensor, kBwInputToForgetWeightsTensor,
+ kBwInputToCellWeightsTensor, kBwInputToOutputWeightsTensor,
+ kBwRecurrentToInputWeightsTensor, kBwRecurrentToForgetWeightsTensor,
+ kBwRecurrentToCellWeightsTensor, kBwRecurrentToOutputWeightsTensor,
+ kBwCellToInputWeightsTensor, kBwCellToForgetWeightsTensor,
+ kBwCellToOutputWeightsTensor, kBwInputGateBiasTensor,
+ kBwForgetGateBiasTensor, kBwCellGateBiasTensor,
+ kBwOutputGateBiasTensor, kBwProjectionWeightsTensor,
+ kBwProjectionBiasTensor));
// Check if Forward and Backward tensors match along required dimensions.
return kTfLiteOk;
@@ -334,7 +340,9 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
const int n_fw_output = fw_recurrent_to_output_weights->dims->data[1];
// Check that input tensor dimensions matches with each other.
- CheckInputTensorDimensions(context, node, n_input, n_fw_output, n_fw_cell);
+ TF_LITE_ENSURE_OK(
+ context, CheckInputTensorDimensions(context, node, n_input, n_fw_output,
+ n_fw_cell));
// Get the pointer to output, state and scratch buffer tensors.
TfLiteTensor* fw_output = GetOutput(context, node, kFwOutputTensor);
@@ -404,7 +412,9 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
const int n_bw_output = bw_recurrent_to_output_weights->dims->data[1];
// Check that input tensor dimensions matches with each other.
- CheckInputTensorDimensions(context, node, n_input, n_bw_output, n_bw_cell);
+ TF_LITE_ENSURE_OK(
+ context, CheckInputTensorDimensions(context, node, n_input, n_bw_output,
+ n_bw_cell));
// Get the pointer to output, output_state and cell_state buffer tensors.
TfLiteTensor* bw_output = GetOutput(context, node, kBwOutputTensor);
diff --git a/tensorflow/contrib/lite/kernels/cast.cc b/tensorflow/contrib/lite/kernels/cast.cc
index 60770ca0aa..8dd48af57f 100644
--- a/tensorflow/contrib/lite/kernels/cast.cc
+++ b/tensorflow/contrib/lite/kernels/cast.cc
@@ -14,6 +14,7 @@ limitations under the License.
==============================================================================*/
#include <string.h>
#include <algorithm>
+#include <complex>
#include "tensorflow/contrib/lite/builtin_op_data.h"
#include "tensorflow/contrib/lite/context.h"
#include "tensorflow/contrib/lite/kernels/internal/optimized/optimized_ops.h"
@@ -53,6 +54,20 @@ void copyCast(const FromT* in, ToT* out, int num_elements) {
[](FromT a) { return static_cast<ToT>(a); });
}
+template <typename ToT>
+void copyCast(const std::complex<float>* in, ToT* out, int num_elements) {
+ std::transform(in, in + num_elements, out, [](std::complex<float> a) {
+ return static_cast<ToT>(std::real(a));
+ });
+}
+
+template <>
+void copyCast(const std::complex<float>* in, std::complex<float>* out,
+ int num_elements) {
+ std::transform(in, in + num_elements, out,
+ [](std::complex<float> a) { return a; });
+}
+
template <typename FromT>
TfLiteStatus copyToTensor(const FromT* in, TfLiteTensor* out,
int num_elements) {
@@ -72,6 +87,10 @@ TfLiteStatus copyToTensor(const FromT* in, TfLiteTensor* out,
case kTfLiteBool:
copyCast(in, out->data.b, num_elements);
break;
+ case kTfLiteComplex64:
+ copyCast(in, reinterpret_cast<std::complex<float>*>(out->data.c64),
+ num_elements);
+ break;
default:
// Unsupported type.
return kTfLiteError;
@@ -95,6 +114,10 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
return copyToTensor(input->data.f, output, num_elements);
case kTfLiteBool:
return copyToTensor(input->data.b, output, num_elements);
+ case kTfLiteComplex64:
+ return copyToTensor(
+ reinterpret_cast<std::complex<float>*>(input->data.c64), output,
+ num_elements);
default:
// Unsupported type.
return kTfLiteError;
diff --git a/tensorflow/contrib/lite/kernels/cast_test.cc b/tensorflow/contrib/lite/kernels/cast_test.cc
index 53e2000737..954f998206 100644
--- a/tensorflow/contrib/lite/kernels/cast_test.cc
+++ b/tensorflow/contrib/lite/kernels/cast_test.cc
@@ -12,6 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
+#include <complex>
+
#include <gtest/gtest.h>
#include "tensorflow/contrib/lite/interpreter.h"
#include "tensorflow/contrib/lite/kernels/register.h"
@@ -73,6 +75,71 @@ TEST(CastOpModel, CastBoolToFloat) {
ElementsAreArray({1.f, 1.0f, 0.f, 1.0f, 0.0f, 1.0f}));
}
+TEST(CastOpModel, CastComplex64ToFloat) {
+ CastOpModel m({TensorType_COMPLEX64, {2, 3}}, {TensorType_FLOAT32, {2, 3}});
+ m.PopulateTensor<std::complex<float>>(
+ m.input(),
+ {std::complex<float>(1.0f, 11.0f), std::complex<float>(2.0f, 12.0f),
+ std::complex<float>(3.0f, 13.0f), std::complex<float>(4.0f, 14.0f),
+ std::complex<float>(5.0f, 15.0f), std::complex<float>(6.0f, 16.0f)});
+ m.Invoke();
+ EXPECT_THAT(m.ExtractVector<float>(m.output()),
+ ElementsAreArray({1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f}));
+}
+
+TEST(CastOpModel, CastFloatToComplex64) {
+ CastOpModel m({TensorType_FLOAT32, {2, 3}}, {TensorType_COMPLEX64, {2, 3}});
+ m.PopulateTensor<float>(m.input(), {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f});
+ m.Invoke();
+ EXPECT_THAT(
+ m.ExtractVector<std::complex<float>>(m.output()),
+ ElementsAreArray(
+ {std::complex<float>(1.0f, 0.0f), std::complex<float>(2.0f, 0.0f),
+ std::complex<float>(3.0f, 0.0f), std::complex<float>(4.0f, 0.0f),
+ std::complex<float>(5.0f, 0.0f), std::complex<float>(6.0f, 0.0f)}));
+}
+
+TEST(CastOpModel, CastComplex64ToInt) {
+ CastOpModel m({TensorType_COMPLEX64, {2, 3}}, {TensorType_INT32, {2, 3}});
+ m.PopulateTensor<std::complex<float>>(
+ m.input(),
+ {std::complex<float>(1.0f, 11.0f), std::complex<float>(2.0f, 12.0f),
+ std::complex<float>(3.0f, 13.0f), std::complex<float>(4.0f, 14.0f),
+ std::complex<float>(5.0f, 15.0f), std::complex<float>(6.0f, 16.0f)});
+ m.Invoke();
+ EXPECT_THAT(m.ExtractVector<int>(m.output()),
+ ElementsAreArray({1, 2, 3, 4, 5, 6}));
+}
+
+TEST(CastOpModel, CastIntToComplex64) {
+ CastOpModel m({TensorType_INT32, {2, 3}}, {TensorType_COMPLEX64, {2, 3}});
+ m.PopulateTensor<int>(m.input(), {1, 2, 3, 4, 5, 6});
+ m.Invoke();
+ EXPECT_THAT(
+ m.ExtractVector<std::complex<float>>(m.output()),
+ ElementsAreArray(
+ {std::complex<float>(1.0f, 0.0f), std::complex<float>(2.0f, 0.0f),
+ std::complex<float>(3.0f, 0.0f), std::complex<float>(4.0f, 0.0f),
+ std::complex<float>(5.0f, 0.0f), std::complex<float>(6.0f, 0.0f)}));
+}
+
+TEST(CastOpModel, CastComplex64ToComplex64) {
+ CastOpModel m({TensorType_COMPLEX64, {2, 3}}, {TensorType_COMPLEX64, {2, 3}});
+ m.PopulateTensor<std::complex<float>>(
+ m.input(),
+ {std::complex<float>(1.0f, 11.0f), std::complex<float>(2.0f, 12.0f),
+ std::complex<float>(3.0f, 13.0f), std::complex<float>(4.0f, 14.0f),
+ std::complex<float>(5.0f, 15.0f), std::complex<float>(6.0f, 16.0f)});
+ m.Invoke();
+ EXPECT_THAT(
+ m.ExtractVector<std::complex<float>>(m.output()),
+ ElementsAreArray(
+ {std::complex<float>(1.0f, 11.0f), std::complex<float>(2.0f, 12.0f),
+ std::complex<float>(3.0f, 13.0f), std::complex<float>(4.0f, 14.0f),
+ std::complex<float>(5.0f, 15.0f),
+ std::complex<float>(6.0f, 16.0f)}));
+}
+
} // namespace
} // namespace tflite
int main(int argc, char** argv) {
diff --git a/tensorflow/contrib/lite/kernels/conv.cc b/tensorflow/contrib/lite/kernels/conv.cc
index 93267f9a4f..a4fe9e5550 100644
--- a/tensorflow/contrib/lite/kernels/conv.cc
+++ b/tensorflow/contrib/lite/kernels/conv.cc
@@ -309,18 +309,8 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TfLiteTensor* hwcn_weights =
&context->tensors[node->temporaries->data[data->hwcn_weights_index]];
hwcn_weights->type = data_type;
- hwcn_weights->allocation_type = kTfLiteDynamic;
- // Make sure we release any previous allocations before we reallocate.
- // TODO(petewarden): Persistent arenas would be a better fit for this, but
- // they aren't fully implemented yet.
- if (hwcn_weights->data.raw) {
- free(hwcn_weights->data.raw);
- hwcn_weights->data.raw = nullptr;
- }
+ hwcn_weights->allocation_type = kTfLiteArenaRwPersistent;
- // Note that hwcn_weights_status is a kTfLiteDynamic tensor, and
- // ResizeTensor will actually allocate space for it. The would be more
- // efficient if we placed hwcn_weights_status in the persistent arena.
auto hwcn_weights_status =
context->ResizeTensor(context, hwcn_weights, hwcn_weights_size);
if (hwcn_weights_status != kTfLiteOk) return hwcn_weights_status;
@@ -382,8 +372,8 @@ void EvalFloat(TfLiteContext* context, TfLiteNode* node,
TfLiteTensor* filter, TfLiteTensor* bias, TfLiteTensor* im2col,
TfLiteTensor* hwcn_weights, TfLiteTensor* output) {
float output_activation_min, output_activation_max;
- CalculateActivationRangeFloat(params->activation, &output_activation_min,
- &output_activation_max);
+ CalculateActivationRange(params->activation, &output_activation_min,
+ &output_activation_max);
KernelType effective_kernel_type;
if (((kernel_type == kMultithreadOptimized) ||
(kernel_type == kCblasOptimized)) &&
@@ -428,6 +418,7 @@ void EvalFloat(TfLiteContext* context, TfLiteNode* node,
filter_data = GetTensorData<float>(filter);
}
multithreaded_ops::Conv(
+ *eigen_support::GetThreadPoolDevice(context),
GetTensorData<float>(input), GetTensorDims(input), filter_data,
GetTensorDims(filter), GetTensorData<float>(bias),
GetTensorDims(bias), params->stride_width, params->stride_height,
diff --git a/tensorflow/contrib/lite/kernels/depthwise_conv.cc b/tensorflow/contrib/lite/kernels/depthwise_conv.cc
index a308de055f..16e5f1d065 100644
--- a/tensorflow/contrib/lite/kernels/depthwise_conv.cc
+++ b/tensorflow/contrib/lite/kernels/depthwise_conv.cc
@@ -173,8 +173,8 @@ void EvalFloat(TfLiteContext* context, TfLiteNode* node,
const TfLiteTensor* input, const TfLiteTensor* filter,
const TfLiteTensor* bias, TfLiteTensor* output) {
float output_activation_min, output_activation_max;
- CalculateActivationRangeFloat(params->activation, &output_activation_min,
- &output_activation_max);
+ CalculateActivationRange(params->activation, &output_activation_min,
+ &output_activation_max);
void (*depthwise_conv)(const float*, const Dims<4>&, const float*,
const Dims<4>&, const float*, const Dims<4>&, int, int,
diff --git a/tensorflow/contrib/lite/kernels/div.cc b/tensorflow/contrib/lite/kernels/div.cc
index d264821e30..bc5c3783fd 100644
--- a/tensorflow/contrib/lite/kernels/div.cc
+++ b/tensorflow/contrib/lite/kernels/div.cc
@@ -83,8 +83,8 @@ void EvalFloat(TfLiteContext* context, TfLiteNode* node,
const TfLiteTensor* input1, const TfLiteTensor* input2,
TfLiteTensor* output) {
float output_activation_min, output_activation_max;
- CalculateActivationRangeFloat(params->activation, &output_activation_min,
- &output_activation_max);
+ CalculateActivationRange(params->activation, &output_activation_min,
+ &output_activation_max);
#define TF_LITE_DIV(type, opname) \
type::opname(GetTensorData<float>(input1), GetTensorDims(input1), \
GetTensorData<float>(input2), GetTensorDims(input2), \
diff --git a/tensorflow/contrib/lite/kernels/eigen_support.cc b/tensorflow/contrib/lite/kernels/eigen_support.cc
index f1fdb42624..4f0d020793 100644
--- a/tensorflow/contrib/lite/kernels/eigen_support.cc
+++ b/tensorflow/contrib/lite/kernels/eigen_support.cc
@@ -14,31 +14,89 @@ limitations under the License.
==============================================================================*/
#include "tensorflow/contrib/lite/kernels/eigen_support.h"
-#include "third_party/eigen3/Eigen/Core"
+#include <utility>
+
+#include "tensorflow/contrib/lite/kernels/internal/optimized/eigen_spatial_convolutions.h"
#include "tensorflow/contrib/lite/kernels/op_macros.h"
namespace tflite {
namespace eigen_support {
+namespace {
+
+// We have a single global threadpool for all convolution operations. This means
+// that inferences started from different threads may block each other, but
+// since the underlying resource of CPU cores should be consumed by the
+// operations anyway, it shouldn't affect overall performance.
+class EigenThreadPoolWrapper : public Eigen::ThreadPoolInterface {
+ public:
+ // Takes ownership of 'pool'
+ explicit EigenThreadPoolWrapper(Eigen::ThreadPool* pool) : pool_(pool) {}
+ ~EigenThreadPoolWrapper() override {}
+
+ void Schedule(std::function<void()> fn) override {
+ pool_->Schedule(std::move(fn));
+ }
+ int NumThreads() const override { return pool_->NumThreads(); }
+ int CurrentThreadId() const override { return pool_->CurrentThreadId(); }
+
+ private:
+ std::unique_ptr<Eigen::ThreadPool> pool_;
+};
-struct RefCountedEigenContext {
+struct RefCountedEigenContext : public TfLiteExternalContext {
+ std::unique_ptr<Eigen::ThreadPoolInterface> thread_pool_wrapper;
+ std::unique_ptr<Eigen::ThreadPoolDevice> device;
int num_references = 0;
};
+RefCountedEigenContext* GetEigenContext(TfLiteContext* context) {
+ return reinterpret_cast<RefCountedEigenContext*>(
+ context->GetExternalContext(context, kTfLiteEigenContext));
+}
+
+void InitDevice(TfLiteContext* context, RefCountedEigenContext* ptr) {
+ int num_threads = 4;
+ if (context->recommended_num_threads != -1) {
+ num_threads = context->recommended_num_threads;
+ }
+ ptr->device.reset(); // destroy before we invalidate the thread pool
+ ptr->thread_pool_wrapper.reset(
+ new EigenThreadPoolWrapper(new Eigen::ThreadPool(num_threads)));
+ ptr->device.reset(
+ new Eigen::ThreadPoolDevice(ptr->thread_pool_wrapper.get(), num_threads));
+}
+
+TfLiteStatus Refresh(TfLiteContext* context) {
+ Eigen::setNbThreads(context->recommended_num_threads);
+
+ auto* ptr = GetEigenContext(context);
+ if (ptr != nullptr) {
+ InitDevice(context, ptr);
+ }
+
+ return kTfLiteOk;
+}
+
+} // namespace
+
void IncrementUsageCounter(TfLiteContext* context) {
- auto* ptr = reinterpret_cast<RefCountedEigenContext*>(context->eigen_context);
+ auto* ptr = GetEigenContext(context);
if (ptr == nullptr) {
if (context->recommended_num_threads != -1) {
Eigen::setNbThreads(context->recommended_num_threads);
}
ptr = new RefCountedEigenContext;
+ ptr->type = kTfLiteEigenContext;
+ ptr->Refresh = Refresh;
ptr->num_references = 0;
- context->eigen_context = ptr;
+ InitDevice(context, ptr);
+ context->SetExternalContext(context, kTfLiteEigenContext, ptr);
}
ptr->num_references++;
}
void DecrementUsageCounter(TfLiteContext* context) {
- auto* ptr = reinterpret_cast<RefCountedEigenContext*>(context->eigen_context);
+ auto* ptr = GetEigenContext(context);
if (ptr == nullptr) {
TF_LITE_FATAL(
"Call to DecrementUsageCounter() not preceded by "
@@ -46,14 +104,17 @@ void DecrementUsageCounter(TfLiteContext* context) {
}
if (--ptr->num_references == 0) {
delete ptr;
- context->eigen_context = nullptr;
+ context->SetExternalContext(context, kTfLiteEigenContext, nullptr);
}
}
-void SetNumThreads(TfLiteContext* context, int num_threads) {
- IncrementUsageCounter(context);
- Eigen::setNbThreads(num_threads);
- DecrementUsageCounter(context);
+const Eigen::ThreadPoolDevice* GetThreadPoolDevice(TfLiteContext* context) {
+ auto* ptr = GetEigenContext(context);
+ if (ptr == nullptr) {
+ TF_LITE_FATAL(
+ "Call to GetFromContext() not preceded by IncrementUsageCounter()");
+ }
+ return ptr->device.get();
}
} // namespace eigen_support
diff --git a/tensorflow/contrib/lite/kernels/eigen_support.h b/tensorflow/contrib/lite/kernels/eigen_support.h
index aa8c351fd8..ec77856b10 100644
--- a/tensorflow/contrib/lite/kernels/eigen_support.h
+++ b/tensorflow/contrib/lite/kernels/eigen_support.h
@@ -17,6 +17,10 @@ limitations under the License.
#include "tensorflow/contrib/lite/context.h"
+namespace EigenForTFLite {
+class ThreadPoolDevice;
+}
+
namespace tflite {
namespace eigen_support {
@@ -28,8 +32,8 @@ void IncrementUsageCounter(TfLiteContext* context);
// usages all temporary Eigen objects will be deleted.
void DecrementUsageCounter(TfLiteContext* context);
-// Set the number of threads that can be used by Eigen.
-void SetNumThreads(TfLiteContext* context, int num_threads);
+const EigenForTFLite::ThreadPoolDevice* GetThreadPoolDevice(
+ TfLiteContext* context);
} // namespace eigen_support
} // namespace tflite
diff --git a/tensorflow/contrib/lite/kernels/embedding_lookup.cc b/tensorflow/contrib/lite/kernels/embedding_lookup.cc
index 9410bead5e..f550339d03 100644
--- a/tensorflow/contrib/lite/kernels/embedding_lookup.cc
+++ b/tensorflow/contrib/lite/kernels/embedding_lookup.cc
@@ -94,7 +94,7 @@ TfLiteStatus EvalHybrid(TfLiteContext* context, TfLiteNode* node,
const TfLiteTensor* lookup, const TfLiteTensor* value,
TfLiteTensor* output) {
const int row_size = SizeOfDimension(value, 0);
- const double scaling_factor = 1.0 / value->params.scale;
+ const double scaling_factor = value->params.scale;
// col_size after we flatten tensor into 2D.
int col_size = 1;
@@ -112,8 +112,9 @@ TfLiteStatus EvalHybrid(TfLiteContext* context, TfLiteNode* node,
// TODO(alanchiao): refactor scalar multiply into separate function
// for ease of adding a neon equivalent if ever necessary.
for (int j = 0; j < col_size; j++) {
+ const int8_t* value_ptr = reinterpret_cast<int8_t*>(value->data.uint8);
output->data.f[j + i * col_size] =
- value->data.uint8[j + idx * col_size] * scaling_factor;
+ value_ptr[j + idx * col_size] * scaling_factor;
}
}
}
diff --git a/tensorflow/contrib/lite/kernels/embedding_lookup_test.cc b/tensorflow/contrib/lite/kernels/embedding_lookup_test.cc
index 04657fd863..4a88d168c6 100644
--- a/tensorflow/contrib/lite/kernels/embedding_lookup_test.cc
+++ b/tensorflow/contrib/lite/kernels/embedding_lookup_test.cc
@@ -107,9 +107,9 @@ TEST(HybridEmbeddingLookupHybridOpTest, Simple2DTest) {
HybridEmbeddingLookupOpModel m({3}, {3, 8});
m.SetInput({1, 0, 2});
m.SetWeight({
- 0.00, 0.01, 0.02, 0.03, 0.10, 0.11, 0.12, 0.13, // Row 0
- 1.00, 1.01, 1.02, 1.03, 1.10, 1.11, 1.12, 1.13, // Row 1
- 2.00, 2.01, 2.02, 2.03, 2.10, 2.11, 2.12, 2.13, // Row 2
+ 0.00, 0.01, 0.02, 0.03, 0.10, 0.11, 0.12, 0.13, // Row 0
+ 1.00, -1.01, 1.02, 1.03, 1.10, 1.11, 1.12, 1.13, // Row 1
+ 2.00, 2.01, 2.02, 2.03, 2.10, 2.11, 2.12, 2.13, // Row 2
});
m.Invoke();
@@ -117,9 +117,9 @@ TEST(HybridEmbeddingLookupHybridOpTest, Simple2DTest) {
EXPECT_THAT(m.GetOutput(),
ElementsAreArray(ArrayFloatNear(
{
- 1.00, 1.01, 1.02, 1.03, 1.10, 1.11, 1.12, 1.13, // Row 1
- 0.00, 0.01, 0.02, 0.03, 0.10, 0.11, 0.12, 0.13, // Row 0
- 2.00, 2.01, 2.02, 2.03, 2.10, 2.11, 2.12, 2.13, // Row 2
+ 1.00, -1.01, 1.02, 1.03, 1.10, 1.11, 1.12, 1.13, // Row 1
+ 0.00, 0.01, 0.02, 0.03, 0.10, 0.11, 0.12, 0.13, // Row 0
+ 2.00, 2.01, 2.02, 2.03, 2.10, 2.11, 2.12, 2.13, // Row 2
},
7.41e-03)));
}
@@ -128,9 +128,9 @@ TEST(HybridEmbeddingLookupHybridOpTest, Simple3DTest) {
HybridEmbeddingLookupOpModel m({3}, {3, 2, 4});
m.SetInput({1, 0, 2});
m.SetWeight({
- 0.00, 0.01, 0.02, 0.03, 0.10, 0.11, 0.12, 0.13, // Row 0
- 1.00, 1.01, 1.02, 1.03, 1.10, 1.11, 1.12, 1.13, // Row 1
- 2.00, 2.01, 2.02, 2.03, 2.10, 2.11, 2.12, 2.13, // Row 2
+ 0.00, 0.01, 0.02, 0.03, 0.10, 0.11, 0.12, 0.13, // Row 0
+ 1.00, -1.01, 1.02, 1.03, 1.10, 1.11, 1.12, 1.13, // Row 1
+ 2.00, 2.01, 2.02, 2.03, 2.10, 2.11, 2.12, 2.13, // Row 2
});
m.Invoke();
@@ -138,9 +138,9 @@ TEST(HybridEmbeddingLookupHybridOpTest, Simple3DTest) {
EXPECT_THAT(m.GetOutput(),
ElementsAreArray(ArrayFloatNear(
{
- 1.00, 1.01, 1.02, 1.03, 1.10, 1.11, 1.12, 1.13, // Row 1
- 0.00, 0.01, 0.02, 0.03, 0.10, 0.11, 0.12, 0.13, // Row 0
- 2.00, 2.01, 2.02, 2.03, 2.10, 2.11, 2.12, 2.13, // Row 2
+ 1.00, -1.01, 1.02, 1.03, 1.10, 1.11, 1.12, 1.13, // Row 1
+ 0.00, 0.01, 0.02, 0.03, 0.10, 0.11, 0.12, 0.13, // Row 0
+ 2.00, 2.01, 2.02, 2.03, 2.10, 2.11, 2.12, 2.13, // Row 2
},
7.41e-03)));
}
@@ -149,9 +149,9 @@ TEST(HybridEmbeddingLookupHybridOpTest, Simple4DTest) {
HybridEmbeddingLookupOpModel m({3}, {3, 2, 2, 2});
m.SetInput({1, 0, 2});
m.SetWeight({
- 0.00, 0.01, 0.02, 0.03, 0.10, 0.11, 0.12, 0.13, // Row 0
- 1.00, 1.01, 1.02, 1.03, 1.10, 1.11, 1.12, 1.13, // Row 1
- 2.00, 2.01, 2.02, 2.03, 2.10, 2.11, 2.12, 2.13, // Row 2
+ 0.00, 0.01, 0.02, 0.03, 0.10, 0.11, 0.12, 0.13, // Row 0
+ 1.00, -1.01, 1.02, 1.03, 1.10, 1.11, 1.12, 1.13, // Row 1
+ 2.00, 2.01, 2.02, 2.03, 2.10, 2.11, 2.12, 2.13, // Row 2
});
m.Invoke();
@@ -159,9 +159,9 @@ TEST(HybridEmbeddingLookupHybridOpTest, Simple4DTest) {
EXPECT_THAT(m.GetOutput(),
ElementsAreArray(ArrayFloatNear(
{
- 1.00, 1.01, 1.02, 1.03, 1.10, 1.11, 1.12, 1.13, // Row 1
- 0.00, 0.01, 0.02, 0.03, 0.10, 0.11, 0.12, 0.13, // Row 0
- 2.00, 2.01, 2.02, 2.03, 2.10, 2.11, 2.12, 2.13, // Row 2
+ 1.00, -1.01, 1.02, 1.03, 1.10, 1.11, 1.12, 1.13, // Row 1
+ 0.00, 0.01, 0.02, 0.03, 0.10, 0.11, 0.12, 0.13, // Row 0
+ 2.00, 2.01, 2.02, 2.03, 2.10, 2.11, 2.12, 2.13, // Row 2
},
7.41e-03)));
}
diff --git a/tensorflow/contrib/lite/kernels/fake_quant.cc b/tensorflow/contrib/lite/kernels/fake_quant.cc
new file mode 100644
index 0000000000..0ef1a50b30
--- /dev/null
+++ b/tensorflow/contrib/lite/kernels/fake_quant.cc
@@ -0,0 +1,92 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+#include <string.h>
+#include <vector>
+#include "tensorflow/contrib/lite/builtin_op_data.h"
+#include "tensorflow/contrib/lite/context.h"
+#include "tensorflow/contrib/lite/kernels/internal/reference/reference_ops.h"
+#include "tensorflow/contrib/lite/kernels/internal/tensor.h"
+#include "tensorflow/contrib/lite/kernels/kernel_util.h"
+#include "tensorflow/contrib/lite/kernels/op_macros.h"
+
+namespace tflite {
+namespace ops {
+namespace builtin {
+namespace fake_quant {
+
+// This file has reference implementation of FakeQuant.
+enum KernelType {
+ kReference,
+};
+
+struct OpContext {
+ OpContext(TfLiteContext* context, TfLiteNode* node) {
+ input = GetInput(context, node, 0);
+ output = GetOutput(context, node, 0);
+ }
+ const TfLiteTensor* input;
+ TfLiteTensor* output;
+};
+
+TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
+ TF_LITE_ENSURE_EQ(context, NumInputs(node), 1);
+ TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
+
+ const auto* params =
+ reinterpret_cast<TfLiteFakeQuantParams*>(node->builtin_data);
+
+ if (params->narrow_range) {
+ context->ReportError(
+ context,
+ "narrow_range FakeQuant is not currently supported at runtime. "
+ "narrow_range is only meant to be applied to weights, not activations");
+ return kTfLiteError;
+ }
+
+ OpContext op_context(context, node);
+ TfLiteIntArray* output_dims = TfLiteIntArrayCopy(op_context.input->dims);
+ op_context.output->type = op_context.input->type;
+ return context->ResizeTensor(context, op_context.output, output_dims);
+}
+
+template <KernelType kernel_type>
+TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
+ OpContext op_context(context, node);
+
+ const auto* params =
+ reinterpret_cast<TfLiteFakeQuantParams*>(node->builtin_data);
+
+ reference_ops::FakeQuant(GetTensorData<float>(op_context.input),
+ GetTensorDims(op_context.input), params->min,
+ params->max, params->num_bits,
+ GetTensorData<float>(op_context.output),
+ GetTensorDims(op_context.output));
+
+ return kTfLiteOk;
+}
+
+} // namespace fake_quant
+
+TfLiteRegistration* Register_FAKE_QUANT_REF() {
+ static TfLiteRegistration r = {nullptr, nullptr, fake_quant::Prepare,
+ fake_quant::Eval<fake_quant::kReference>};
+ return &r;
+}
+
+TfLiteRegistration* Register_FAKE_QUANT() { return Register_FAKE_QUANT_REF(); }
+
+} // namespace builtin
+} // namespace ops
+} // namespace tflite
diff --git a/tensorflow/contrib/lite/kernels/fake_quant_test.cc b/tensorflow/contrib/lite/kernels/fake_quant_test.cc
new file mode 100644
index 0000000000..11a02f7ed7
--- /dev/null
+++ b/tensorflow/contrib/lite/kernels/fake_quant_test.cc
@@ -0,0 +1,112 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+#include <gtest/gtest.h>
+#include "tensorflow/contrib/lite/interpreter.h"
+#include "tensorflow/contrib/lite/kernels/register.h"
+#include "tensorflow/contrib/lite/kernels/test_util.h"
+#include "tensorflow/contrib/lite/model.h"
+
+namespace tflite {
+namespace {
+
+using ::testing::ElementsAreArray;
+
+class FakeQuantOpModel : public SingleOpModel {
+ public:
+ FakeQuantOpModel(const TensorData& input, const TensorType& output, float min,
+ float max, int num_bits) {
+ input_ = AddInput(input);
+ output_ = AddOutput(output);
+ SetBuiltinOp(BuiltinOperator_FAKE_QUANT, BuiltinOptions_FakeQuantOptions,
+ CreateFakeQuantOptions(builder_, min, max, num_bits).Union());
+ BuildInterpreter({GetShape(input_)});
+ }
+
+ template <class T>
+ void SetInput(std::initializer_list<T> data) {
+ PopulateTensor(input_, data);
+ }
+
+ template <class T>
+ std::vector<T> GetOutput() {
+ return ExtractVector<T>(output_);
+ }
+ std::vector<int> GetOutputShape() { return GetTensorShape(output_); }
+
+ protected:
+ int input_;
+ int output_;
+};
+
+TEST(FakeQuantOpTest, FloatPositiveRange8Test) {
+ std::initializer_list<float> data = {0.0, 1.0, 0.25,
+ 0.50, 0.4444444, 0.00001};
+ FakeQuantOpModel m({TensorType_FLOAT32, {3, 1, 2}}, TensorType_FLOAT32, 0.0f,
+ 1.0f, 8);
+ m.SetInput<float>(data);
+ m.Invoke();
+ EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({3, 1, 2}));
+ EXPECT_THAT(
+ m.GetOutput<float>(),
+ ElementsAreArray(ArrayFloatNear({0, 1, 0.25098, 0.498039, 0.443137, 0})));
+}
+
+TEST(FakeQuantOpTest, FloatNegativeRange8Test) {
+ std::initializer_list<float> data = {0.0, -0.9, 0.25,
+ 0.50, 0.4444444, -0.00001};
+ FakeQuantOpModel m({TensorType_FLOAT32, {3, 1, 2}}, TensorType_FLOAT32, -0.9f,
+ 0.9f, 8);
+ m.SetInput<float>(data);
+ m.Invoke();
+ EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({3, 1, 2}));
+ EXPECT_THAT(m.GetOutput<float>(),
+ ElementsAreArray(ArrayFloatNear(
+ {0, -0.896471, 0.247059, 0.501176, 0.444706, 0})));
+}
+
+TEST(FakeQuantOpTest, FloatPositiveRange16Test) {
+ std::initializer_list<float> data = {0.0, 1.0, 0.25,
+ 0.50, 0.4444444, 0.00001};
+ FakeQuantOpModel m({TensorType_FLOAT32, {3, 1, 2}}, TensorType_FLOAT32, 0.0f,
+ 1.0f, 16);
+ m.SetInput<float>(data);
+ m.Invoke();
+ EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({3, 1, 2}));
+ EXPECT_THAT(m.GetOutput<float>(),
+ ElementsAreArray(ArrayFloatNear(
+ {0, 1, 0.250004, 0.500008, 0.44445, 1.5259e-05})));
+}
+
+TEST(FakeQuantOpTest, FloatNegativeRange16Test) {
+ std::initializer_list<float> data = {0.0, -0.9, 0.25,
+ 0.50, 0.4444444, -0.00001};
+ FakeQuantOpModel m({TensorType_FLOAT32, {3, 1, 2}}, TensorType_FLOAT32, -0.9f,
+ 0.9f, 16);
+ m.SetInput<float>(data);
+ m.Invoke();
+ EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({3, 1, 2}));
+ EXPECT_THAT(m.GetOutput<float>(),
+ ElementsAreArray(ArrayFloatNear(
+ {0, -0.900014, 0.249998, 0.499995, 0.444431, 0})));
+}
+
+} // namespace
+} // namespace tflite
+
+int main(int argc, char** argv) {
+ ::tflite::LogToStderr();
+ ::testing::InitGoogleTest(&argc, argv);
+ return RUN_ALL_TESTS();
+}
diff --git a/tensorflow/contrib/lite/kernels/fully_connected.cc b/tensorflow/contrib/lite/kernels/fully_connected.cc
index f6fc0f5b6a..3b203dd480 100644
--- a/tensorflow/contrib/lite/kernels/fully_connected.cc
+++ b/tensorflow/contrib/lite/kernels/fully_connected.cc
@@ -63,6 +63,7 @@ constexpr int kInputTensor = 0;
constexpr int kWeightsTensor = 1;
constexpr int kBiasTensor = 2;
constexpr int kOutputTensor = 0;
+constexpr int kShuffledInputWorkspaceTensor = 1;
constexpr int kScratchBufferTensor = 1;
void* Init(TfLiteContext* context, const char* buffer, size_t length) {
@@ -87,7 +88,11 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
// Check we have all the inputs and outputs we need.
TF_LITE_ENSURE_EQ(context, node->inputs->size, 3);
- TF_LITE_ENSURE_EQ(context, node->outputs->size, 1);
+ // Shuffled formats need a workspace to store the shuffled input activations.
+ const int expected_outputs_count =
+ params->weights_format == kTfLiteFullyConnectedWeightsFormatDefault ? 1
+ : 2;
+ TF_LITE_ENSURE_EQ(context, node->outputs->size, expected_outputs_count);
const TfLiteTensor* input = GetInput(context, node, kInputTensor);
const TfLiteTensor* filter = GetInput(context, node, kWeightsTensor);
@@ -121,9 +126,9 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
QuantizeMultiplierSmallerThanOneExp(
real_multiplier, &data->output_multiplier, &data->output_shift);
data->output_shift *= -1;
- CalculateActivationRangeUint8(params->activation, output,
- &data->output_activation_min,
- &data->output_activation_max);
+ TF_LITE_ENSURE_STATUS(CalculateActivationRangeQuantized(
+ context, params->activation, output, &data->output_activation_min,
+ &data->output_activation_max));
}
// If we have to perform on-the-fly quantization (with quantized weights and
@@ -278,30 +283,49 @@ TfLiteStatus EvalQuantized(TfLiteContext* context, TfLiteNode* node,
int32_t input_offset = -input->params.zero_point;
int32_t filter_offset = -filter->params.zero_point;
int32_t output_offset = output->params.zero_point;
-#define TF_LITE_FULLY_CONNECTED(type) \
+#define TF_LITE_FULLY_CONNECTED(type, output_data_type) \
type::FullyConnected( \
GetTensorData<uint8_t>(input), GetTensorDims(input), input_offset, \
GetTensorData<uint8_t>(filter), GetTensorDims(filter), filter_offset, \
GetTensorData<int32_t>(bias), GetTensorDims(bias), output_offset, \
data->output_multiplier, data->output_shift, \
data->output_activation_min, data->output_activation_max, \
- GetTensorData<uint8_t>(output), GetTensorDims(output), gemm_context)
+ GetTensorData<output_data_type>(output), GetTensorDims(output), \
+ gemm_context)
if (kernel_type == kReference) {
- TF_LITE_FULLY_CONNECTED(reference_ops);
- } else if (kernel_type == kPie) {
- if (input->type == kTfLiteFloat32) {
- // Pie currently only supports quantized models and float inputs/outputs.
- TfLiteTensor* input_quantized =
- &context->tensors[node->temporaries->data[0]];
- return EvalPieQuantized(context, node, params, data, input, filter, bias,
- input_quantized, output);
- } else {
- // TODO(ahentz): we don't have a quantized version of the PIE kernels, so
- // we just defer to the MINI ones.
- TF_LITE_FULLY_CONNECTED(optimized_ops);
+ switch (output->type) {
+ case kTfLiteUInt8:
+ TF_LITE_FULLY_CONNECTED(reference_ops, uint8_t);
+ break;
+ case kTfLiteInt16:
+ TF_LITE_FULLY_CONNECTED(reference_ops, int16_t);
+ break;
+ default:
+ context->ReportError(
+ context,
+ "Quantized FullyConnected expects output data type uint8 or int16");
+ return kTfLiteError;
}
+ } else if (kernel_type == kPie && input->type == kTfLiteFloat32) {
+ // Pie currently only supports quantized models and float inputs/outputs.
+ TfLiteTensor* input_quantized =
+ &context->tensors[node->temporaries->data[0]];
+ return EvalPieQuantized(context, node, params, data, input, filter, bias,
+ input_quantized, output);
} else {
- TF_LITE_FULLY_CONNECTED(optimized_ops);
+ switch (output->type) {
+ case kTfLiteUInt8:
+ TF_LITE_FULLY_CONNECTED(optimized_ops, uint8_t);
+ break;
+ case kTfLiteInt16:
+ TF_LITE_FULLY_CONNECTED(optimized_ops, int16_t);
+ break;
+ default:
+ context->ReportError(
+ context,
+ "Quantized FullyConnected expects output data type uint8 or int16");
+ return kTfLiteError;
+ }
}
#undef TF_LITE_FULLY_CONNECTED
@@ -309,13 +333,51 @@ TfLiteStatus EvalQuantized(TfLiteContext* context, TfLiteNode* node,
}
template <KernelType kernel_type>
+TfLiteStatus EvalShuffledQuantized(TfLiteContext* context, TfLiteNode* node,
+ TfLiteFullyConnectedParams* params,
+ OpData* data, const TfLiteTensor* input,
+ const TfLiteTensor* filter,
+ const TfLiteTensor* bias,
+ TfLiteTensor* output,
+ TfLiteTensor* shuffled_input_workspace) {
+ gemmlowp::GemmContext* gemm_context = gemm_support::GetFromContext(context);
+
+ // TODO(b/110697972) decide more consistently if / how / where we want
+ // to perform this kind of runtime data type checks.
+ if (input->type != kTfLiteUInt8 || filter->type != kTfLiteUInt8 ||
+ bias->type != kTfLiteInt32 || output->type != kTfLiteInt16 ||
+ shuffled_input_workspace->type != kTfLiteUInt8) {
+ context->ReportError(context, "Unexpected data type");
+ return kTfLiteError;
+ }
+
+#define TF_LITE_SHUFFLED_FULLY_CONNECTED(type) \
+ type::ShuffledFullyConnected( \
+ GetTensorData<uint8_t>(input), GetTensorDims(input), \
+ GetTensorData<uint8_t>(filter), GetTensorDims(filter), \
+ GetTensorData<int32_t>(bias), GetTensorDims(bias), \
+ data->output_multiplier, data->output_shift, \
+ data->output_activation_min, data->output_activation_max, \
+ GetTensorData<int16_t>(output), GetTensorDims(output), \
+ GetTensorData<uint8_t>(shuffled_input_workspace), gemm_context)
+ if (kernel_type == kReference) {
+ TF_LITE_SHUFFLED_FULLY_CONNECTED(reference_ops);
+ } else {
+ TF_LITE_SHUFFLED_FULLY_CONNECTED(optimized_ops);
+ }
+#undef TF_LITE_SHUFFLED_FULLY_CONNECTED
+
+ return kTfLiteOk;
+}
+
+template <KernelType kernel_type>
TfLiteStatus EvalFloat(TfLiteContext* context, TfLiteNode* node,
TfLiteFullyConnectedParams* params, OpData* data,
const TfLiteTensor* input, const TfLiteTensor* filter,
const TfLiteTensor* bias, TfLiteTensor* output) {
float output_activation_min, output_activation_max;
- CalculateActivationRangeFloat(params->activation, &output_activation_min,
- &output_activation_max);
+ CalculateActivationRange(params->activation, &output_activation_min,
+ &output_activation_max);
#define TF_LITE_FULLY_CONNECTED(type) \
type::FullyConnected(GetTensorData<float>(input), GetTensorDims(input), \
GetTensorData<float>(filter), GetTensorDims(filter), \
@@ -352,8 +414,22 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
return EvalFloat<kernel_type>(context, node, params, data, input, filter,
bias, output);
case kTfLiteUInt8:
- return EvalQuantized<kernel_type>(context, node, params, data, input,
- filter, bias, output);
+ if (params->weights_format ==
+ kTfLiteFullyConnectedWeightsFormatShuffled4x16Int8) {
+ TfLiteTensor* shuffled_input_workspace =
+ GetOutput(context, node, kShuffledInputWorkspaceTensor);
+ return EvalShuffledQuantized<kernel_type>(context, node, params, data,
+ input, filter, bias, output,
+ shuffled_input_workspace);
+ } else if (params->weights_format ==
+ kTfLiteFullyConnectedWeightsFormatDefault) {
+ return EvalQuantized<kernel_type>(context, node, params, data, input,
+ filter, bias, output);
+ } else {
+ context->ReportError(context,
+ "Unhandled fully-connected weights format");
+ return kTfLiteError;
+ }
default:
context->ReportError(context, "Type %d not currently supported.",
filter->type);
diff --git a/tensorflow/contrib/lite/kernels/fully_connected_test.cc b/tensorflow/contrib/lite/kernels/fully_connected_test.cc
index 05dd028b48..ec94905697 100644
--- a/tensorflow/contrib/lite/kernels/fully_connected_test.cc
+++ b/tensorflow/contrib/lite/kernels/fully_connected_test.cc
@@ -15,6 +15,7 @@ limitations under the License.
// Unit test for TFLite FULLY_CONNECTED op.
#include <iomanip>
+#include <random>
#include <vector>
#include <gmock/gmock.h>
@@ -133,9 +134,12 @@ static float fully_connected_golden_output[] = {
class BaseFullyConnectedOpModel : public SingleOpModel {
public:
// TODO(ahentz): test different activation types too.
- BaseFullyConnectedOpModel(TfLiteRegistration* registration, int units,
- int batches, const TensorData& input,
- const TensorData& output = {TensorType_FLOAT32})
+ BaseFullyConnectedOpModel(
+ TfLiteRegistration* registration, int units, int batches,
+ const TensorData& input, const TensorData& output = {TensorType_FLOAT32},
+ ActivationFunctionType activation_func = ActivationFunctionType_RELU,
+ FullyConnectedOptionsWeightsFormat weights_format =
+ FullyConnectedOptionsWeightsFormat_DEFAULT)
: batches_(batches), units_(units) {
int total_input_size = 1;
for (int i = 0; i < input.shape.size(); ++i) {
@@ -159,10 +163,13 @@ class BaseFullyConnectedOpModel : public SingleOpModel {
}
output_ = AddOutput(output);
+ if (weights_format != FullyConnectedOptionsWeightsFormat_DEFAULT) {
+ AddOutput({TensorType_UINT8, input.shape});
+ }
SetBuiltinOp(
BuiltinOperator_FULLY_CONNECTED, BuiltinOptions_FullyConnectedOptions,
- CreateFullyConnectedOptions(builder_, ActivationFunctionType_RELU)
+ CreateFullyConnectedOptions(builder_, activation_func, weights_format)
.Union());
resolver_ = absl::make_unique<SingleOpResolver>(
BuiltinOperator_FULLY_CONNECTED, registration);
@@ -188,13 +195,11 @@ class FloatFullyConnectedOpModel : public BaseFullyConnectedOpModel {
public:
using BaseFullyConnectedOpModel::BaseFullyConnectedOpModel;
- void SetBias(std::initializer_list<float> f) { PopulateTensor(bias_, f); }
+ void SetBias(const std::vector<float>& f) { PopulateTensor(bias_, f); }
- void SetWeights(std::initializer_list<float> f) {
- PopulateTensor(weights_, f);
- }
+ void SetWeights(const std::vector<float>& f) { PopulateTensor(weights_, f); }
- void SetInput(std::initializer_list<float> data) {
+ void SetInput(const std::vector<float>& data) {
PopulateTensor(input_, data);
}
void SetInput(int offset, float* begin, float* end) {
@@ -208,20 +213,50 @@ class QuantizedFullyConnectedOpModel : public BaseFullyConnectedOpModel {
public:
using BaseFullyConnectedOpModel::BaseFullyConnectedOpModel;
- void SetBias(std::initializer_list<float> data) {
+ void SetBias(const std::vector<float>& data) {
QuantizeAndPopulate<int32_t>(bias_, data);
}
- void SetWeights(std::initializer_list<float> data) {
+ void SetWeights(const std::vector<float>& data) {
QuantizeAndPopulate<uint8_t>(weights_, data);
}
- void SetInput(std::initializer_list<float> data) {
+ void ShuffleAndSetWeights(const std::vector<float>& data, int input_depth,
+ int output_depth) {
+ std::vector<float> shuffled_data(data.size());
+ CHECK_EQ(input_depth % 16, 0);
+ CHECK_EQ(output_depth % 4, 0);
+ float* shuffled_data_ptr = shuffled_data.data();
+ for (int block_o = 0; block_o < output_depth; block_o += 4) {
+ for (int block_i = 0; block_i < input_depth; block_i += 16) {
+ for (int o = 0; o < 4; o++) {
+ for (int i = 0; i < 16; i++) {
+ *shuffled_data_ptr++ =
+ data[(block_o + o) * input_depth + block_i + i];
+ }
+ }
+ }
+ }
+ TfLiteTensor* t = interpreter_->tensor(weights_);
+ auto quantized_data =
+ Quantize<uint8_t>(shuffled_data, t->params.scale, t->params.zero_point);
+ for (uint8_t& q : quantized_data) {
+ q ^= 0x80;
+ }
+ PopulateTensor(weights_, 0, quantized_data.data(),
+ quantized_data.data() + quantized_data.size());
+ }
+ void SetInput(const std::vector<float>& data) {
QuantizeAndPopulate<uint8_t>(input_, data);
}
- std::vector<uint8_t> GetOutput() { return ExtractVector<uint8_t>(output_); }
+ template <typename T>
+ std::vector<T> GetOutput() {
+ return ExtractVector<T>(output_);
+ }
+
+ template <typename T>
std::vector<float> GetDequantizedOutput() {
- return Dequantize<uint8_t>(ExtractVector<uint8_t>(output_),
- GetScale(output_), GetZeroPoint(output_));
+ return Dequantize<T>(ExtractVector<T>(output_), GetScale(output_),
+ GetZeroPoint(output_));
}
};
@@ -256,12 +291,12 @@ class HybridFullyConnectedOpModel : public SingleOpModel {
ops::builtin::Register_FULLY_CONNECTED_PIE());
BuildInterpreter({GetShape(input_), GetShape(weights_), GetShape(bias_)});
}
- void SetBias(std::initializer_list<float> f) { PopulateTensor(bias_, f); }
- void SetWeights(std::initializer_list<float> data) {
+ void SetBias(const std::vector<float>& f) { PopulateTensor(bias_, f); }
+ void SetWeights(const std::vector<float>& data) {
SymmetricQuantizeAndPopulate(weights_, data);
}
- void SetInput(std::initializer_list<float> f) { PopulateTensor(input_, f); }
+ void SetInput(const std::vector<float>& f) { PopulateTensor(input_, f); }
std::vector<float> GetOutput() { return ExtractVector<float>(output_); }
int input_size() { return input_size_; }
@@ -340,6 +375,24 @@ TEST_P(FloatFullyConnectedOpTest, SimpleTest) {
EXPECT_THAT(m.GetOutput(), ElementsAre(24, 25, 26, 58, 59, 60));
}
+TEST_P(FloatFullyConnectedOpTest, SimpleTest2) {
+ FloatFullyConnectedOpModel m(GetRegistration(), /*units=*/1, /*batches=*/2,
+ /*input=*/{TensorType_FLOAT32, {2, 2}});
+ m.SetWeights({
+ 2, 4, // u = 0
+ });
+ m.SetBias({1});
+
+ m.SetInput({
+ 1, 2, // b = 0
+ 2, 1, // b = 1
+ });
+
+ m.Invoke();
+
+ EXPECT_THAT(m.GetOutput(), ElementsAre(11, 9));
+}
+
TEST_P(QuantizedFullyConnectedOpTest, SimpleTestQuantized) {
QuantizedFullyConnectedOpModel m(
GetRegistration(), /*units=*/3, /*batches*/ 2,
@@ -350,7 +403,7 @@ TEST_P(QuantizedFullyConnectedOpTest, SimpleTestQuantized) {
m.SetWeights({
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, // u = 0
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, // u = 1
- 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, // u = 1
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, // u = 2
});
m.SetBias({1, 2, 3});
@@ -361,11 +414,136 @@ TEST_P(QuantizedFullyConnectedOpTest, SimpleTestQuantized) {
m.Invoke();
- EXPECT_THAT(m.GetDequantizedOutput(), ElementsAreArray(ArrayFloatNear({
- 24, 25, 26, //
- 58, 59, 60, //
- })));
- EXPECT_THAT(m.GetOutput(), ElementsAre(151, 152, 153, 185, 186, 187));
+ EXPECT_THAT(m.GetDequantizedOutput<uint8_t>(),
+ ElementsAreArray(ArrayFloatNear({
+ 24, 25, 26, //
+ 58, 59, 60, //
+ })));
+ EXPECT_THAT(m.GetOutput<uint8_t>(),
+ ElementsAre(151, 152, 153, 185, 186, 187));
+}
+
+void SimpleTestQuantizedInt16OutputCase(
+ TfLiteRegistration* registration, int input_depth, int output_depth,
+ int batches, FullyConnectedOptionsWeightsFormat weights_format) {
+ const uint8_t kWeightsZeroPoint = 128;
+ const float kWeightsScale = 1.f / 128.f;
+ const uint8_t kInputZeroPoint = 128;
+ const float kInputScale = 1.f / 128.f;
+ const float kInputMin = (0 - kInputZeroPoint) * kInputScale;
+ const float kInputMax = (255 - kInputZeroPoint) * kInputScale;
+ // Output ranges in [-8..8] encoded as int16
+ const float kOutputScale = 8.f / 32768.f;
+ const float kOutputMin = -32768 * kOutputScale;
+ const float kOutputMax = 32767 * kOutputScale;
+
+ QuantizedFullyConnectedOpModel m(
+ registration, output_depth, batches,
+ /*input=*/
+ {TensorType_UINT8, {batches, input_depth}, kInputMin, kInputMax},
+ /*output=*/{TensorType_INT16, {}, kOutputMin, kOutputMax},
+ /*activation_func=*/ActivationFunctionType_NONE, weights_format);
+
+ std::mt19937 random_engine;
+ std::uniform_int_distribution<uint8_t> weights_dist;
+
+ std::vector<float> weights_data(input_depth * output_depth);
+ for (auto& w : weights_data) {
+ uint8_t q = weights_dist(random_engine);
+ w = (q - kWeightsZeroPoint) * kWeightsScale;
+ }
+
+ // Based on weights_format, enforce any shape requirement for that format/path
+ // and set the (possibly shuffled) weights.
+ switch (weights_format) {
+ case FullyConnectedOptionsWeightsFormat_DEFAULT:
+ m.SetWeights(weights_data);
+ break;
+ case FullyConnectedOptionsWeightsFormat_SHUFFLED4x16INT8:
+ // The shuffled path currently supports only a restrictive subset of
+ // shapes, described by the following assertions:
+ CHECK_EQ(input_depth % 16, 0);
+ CHECK_EQ(output_depth % 4, 0);
+ CHECK(batches == 1 || batches == 4);
+ m.ShuffleAndSetWeights(weights_data, input_depth, output_depth);
+ break;
+ default:
+ LOG(FATAL) << "Unhandled weights format";
+ }
+
+ std::uniform_int_distribution<uint8_t> input_dist;
+ std::vector<float> input_data(input_depth * batches);
+ for (auto& i : input_data) {
+ uint8_t q = input_dist(random_engine);
+ i = (q - kInputZeroPoint) * kInputScale;
+ }
+
+ std::vector<float> bias_data(output_depth);
+ // As the output ranges in [-8, 8], it's reasonable to have bias values
+ // in [-1, 1], this won't result in too much saturation.
+ std::uniform_real_distribution<float> bias_dist(-1.f, 1.f);
+ for (auto& b : bias_data) {
+ b = bias_dist(random_engine);
+ }
+
+ m.SetBias(bias_data);
+ m.SetInput(input_data);
+
+ m.Invoke();
+
+ std::vector<float> expected_output_data(output_depth * batches);
+ for (int b = 0; b < batches; b++) {
+ for (int o = 0; o < output_depth; o++) {
+ float accum = bias_data[o];
+ for (int i = 0; i < input_depth; i++) {
+ accum +=
+ input_data[b * input_depth + i] * weights_data[o * input_depth + i];
+ }
+ accum = std::min(accum, kOutputMax);
+ accum = std::max(accum, kOutputMin);
+ expected_output_data[b * output_depth + o] = accum;
+ }
+ }
+
+ EXPECT_THAT(m.GetDequantizedOutput<int16_t>(),
+ ElementsAreArray(ArrayFloatNear(expected_output_data, 3e-4f)));
+}
+
+TEST_P(QuantizedFullyConnectedOpTest,
+ SimpleTestQuantizedInt16OutputDefaultWeights) {
+ for (int input_depth : {1, 3, 10, 100}) {
+ for (int output_depth : {1, 3, 10, 100}) {
+ for (int batch : {1, 3, 10, 100}) {
+ SimpleTestQuantizedInt16OutputCase(
+ GetRegistration(), input_depth, output_depth, batch,
+ FullyConnectedOptionsWeightsFormat_DEFAULT);
+ }
+ }
+ }
+}
+
+TEST_P(QuantizedFullyConnectedOpTest,
+ SimpleTestQuantizedInt16OutputShuffled4x16Int8Weights) {
+ // The shuffled weights block shape is 4x16. The shape of the weights matrix
+ // is: rows = output_depth, cols = input_depth. It must be a multiple of 4x16.
+ // This means that output_depth must be a multiple of 4, and input_deth must
+ // be a multiple of 16.
+ for (int input_depth_numblocks : {1, 3}) {
+ for (int output_depth_numblocks : {1, 3}) {
+ int input_depth = 16 * input_depth_numblocks;
+ int output_depth = 4 * output_depth_numblocks;
+ // The fast shuffled path is currently supporting only batch sizes of 1
+ // and 4. The idea is that the whole point of that path is to go as fast
+ // as possible for small batch size, which requires fully specializing
+ // it for each batch size, and for larger batch sizes the generic
+ // gemmlowp-based implementation is fast enough.
+ for (int batch : {1, 4}) {
+ SimpleTestQuantizedInt16OutputCase(
+ GetRegistration(), input_depth, output_depth, batch,
+ FullyConnectedOptionsWeightsFormat_SHUFFLED4x16INT8);
+ }
+ }
+ }
}
TEST(HybridFullyConnectedOpTest, SimpleTestQuantized) {
@@ -396,11 +574,11 @@ TEST(HybridFullyConnectedOpTest, SimpleTestQuantized) {
/*max_abs_error=*/1.3f)));
}
-TEST(FloatFullyConnectedOpTest, SimpleTest4DInput) {
+TEST_P(FloatFullyConnectedOpTest, SimpleTest4DInput) {
// Note that it is not required that the first dimension be the number of
// batches. All we care is that the input can be evenly distributed in
// batches. In this case, we need the input to have multiples of '2'.
- FloatFullyConnectedOpModel m(ops::builtin::Register_FULLY_CONNECTED_PIE(),
+ FloatFullyConnectedOpModel m(GetRegistration(),
/*units=*/3, /*batches=*/2,
/*input=*/{TensorType_FLOAT32, {4, 1, 5, 1}});
m.SetWeights({
@@ -444,11 +622,13 @@ TEST_P(QuantizedFullyConnectedOpTest, SimpleTest4dInputQuantized) {
m.Invoke();
- EXPECT_THAT(m.GetDequantizedOutput(), ElementsAreArray(ArrayFloatNear({
- 24, 25, 26, //
- 58, 59, 60, //
- })));
- EXPECT_THAT(m.GetOutput(), ElementsAre(151, 152, 153, 185, 186, 187));
+ EXPECT_THAT(m.GetDequantizedOutput<uint8_t>(),
+ ElementsAreArray(ArrayFloatNear({
+ 24, 25, 26, //
+ 58, 59, 60, //
+ })));
+ EXPECT_THAT(m.GetOutput<uint8_t>(),
+ ElementsAre(151, 152, 153, 185, 186, 187));
}
INSTANTIATE_TEST_CASE_P(
diff --git a/tensorflow/contrib/lite/kernels/gather.cc b/tensorflow/contrib/lite/kernels/gather.cc
index 6a2341461f..2b2a9e6620 100644
--- a/tensorflow/contrib/lite/kernels/gather.cc
+++ b/tensorflow/contrib/lite/kernels/gather.cc
@@ -40,10 +40,8 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
// Only INT32 positions are supported.
TF_LITE_ENSURE_EQ(context, positions->type, kTfLiteInt32);
- // Check that input and output types match.
- TF_LITE_ENSURE_EQ(context, input->type, output->type);
- // TODO(mgubin): only 0D or 1D positions are currently supported.
- TF_LITE_ENSURE(context, NumDimensions(positions) <= 1);
+ // Assign to output the input type.
+ output->type = input->type;
// TODO(mgubin): Only default axis == 0 is supported.
TF_LITE_ENSURE_EQ(context, params->axis, 0);
// Check conditions for different types.
@@ -102,6 +100,7 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_GATHER(int32_t, int32_t);
break;
case kTfLiteString: {
+ // TODO(mgubin): Currently support only for 1D output tensors.
DynamicBuffer buffer;
const int32* indexes = positions->data.i32;
const int num_strings = GetStringCount(input);
diff --git a/tensorflow/contrib/lite/kernels/gather_test.cc b/tensorflow/contrib/lite/kernels/gather_test.cc
index cdadbeda18..1d4292955c 100644
--- a/tensorflow/contrib/lite/kernels/gather_test.cc
+++ b/tensorflow/contrib/lite/kernels/gather_test.cc
@@ -96,6 +96,15 @@ TEST(GatherOpTest, Test0DIndexWith0DResult) {
EXPECT_TRUE(m.GetOutputShape().empty());
}
+TEST(GatherOpTest, Test2DIndexWith2DResult) {
+ GatherOpModel m({3}, TensorType_FLOAT32, {1, 2});
+ m.SetInputFloat({1.0, 2.0, 3.0});
+ m.SetPositions({1, 0});
+ m.Invoke();
+ EXPECT_THAT(m.GetOutputFloat(), ElementsAreArray(ArrayFloatNear({2.0, 1.0})));
+ EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({1, 2}));
+}
+
TEST(FloatGatherOpTest, Duplicate) {
GatherOpModel m({1, 2, 2}, TensorType_FLOAT32, {2});
m.SetInputFloat({-2.0, 0.2, 0.7, 0.8});
diff --git a/tensorflow/contrib/lite/kernels/gemm_support.cc b/tensorflow/contrib/lite/kernels/gemm_support.cc
index 95f45ea768..ed334af2da 100644
--- a/tensorflow/contrib/lite/kernels/gemm_support.cc
+++ b/tensorflow/contrib/lite/kernels/gemm_support.cc
@@ -14,57 +14,70 @@ limitations under the License.
==============================================================================*/
#include "tensorflow/contrib/lite/kernels/gemm_support.h"
+#include <memory>
+
#include "tensorflow/contrib/lite/kernels/op_macros.h"
namespace tflite {
namespace gemm_support {
+namespace {
-struct RefCountedGemmContext {
- gemmlowp::GemmContext* gemm_context_ = nullptr;
- int num_references_ = 0;
+struct RefCountedGemmContext : public TfLiteExternalContext {
+ std::unique_ptr<gemmlowp::GemmContext> gemm_context;
+ int num_references = 0;
};
+RefCountedGemmContext* GetGemmLowpContext(TfLiteContext* context) {
+ return reinterpret_cast<RefCountedGemmContext*>(
+ context->GetExternalContext(context, kTfLiteGemmLowpContext));
+}
+
+TfLiteStatus Refresh(TfLiteContext* context) {
+ auto* ptr = GetGemmLowpContext(context);
+ if (ptr != nullptr) {
+ ptr->gemm_context->set_max_num_threads(context->recommended_num_threads);
+ }
+ return kTfLiteOk;
+}
+
+} // namespace
+
void IncrementUsageCounter(TfLiteContext* context) {
- auto* ptr = reinterpret_cast<RefCountedGemmContext*>(context->gemm_context);
+ auto* ptr = GetGemmLowpContext(context);
if (ptr == nullptr) {
ptr = new RefCountedGemmContext;
- ptr->gemm_context_ = new gemmlowp::GemmContext();
+ ptr->type = kTfLiteGemmLowpContext;
+ ptr->Refresh = Refresh;
+ ptr->gemm_context.reset(new gemmlowp::GemmContext());
if (context->recommended_num_threads != -1) {
- ptr->gemm_context_->set_max_num_threads(context->recommended_num_threads);
+ ptr->gemm_context->set_max_num_threads(context->recommended_num_threads);
}
- ptr->num_references_ = 0;
- context->gemm_context = ptr;
+ ptr->num_references = 0;
+ context->SetExternalContext(context, kTfLiteGemmLowpContext, ptr);
}
- ptr->num_references_++;
+ ptr->num_references++;
}
void DecrementUsageCounter(TfLiteContext* context) {
- auto* ptr = reinterpret_cast<RefCountedGemmContext*>(context->gemm_context);
+ auto* ptr = GetGemmLowpContext(context);
if (ptr == nullptr) {
TF_LITE_FATAL(
"Call to DecrementUsageCounter() not preceded by "
"IncrementUsageCounter()");
}
- if (--ptr->num_references_ == 0) {
- delete ptr->gemm_context_;
+ if (--ptr->num_references == 0) {
delete ptr;
- context->gemm_context = nullptr;
+ context->SetExternalContext(context, kTfLiteGemmLowpContext, nullptr);
}
}
gemmlowp::GemmContext* GetFromContext(TfLiteContext* context) {
- auto* ptr = reinterpret_cast<RefCountedGemmContext*>(context->gemm_context);
+ auto* ptr = GetGemmLowpContext(context);
if (ptr == nullptr) {
TF_LITE_FATAL(
"Call to GetFromContext() not preceded by IncrementUsageCounter()");
}
- return ptr->gemm_context_;
-}
-
-void SetNumThreads(TfLiteContext* context, int num_threads) {
- IncrementUsageCounter(context);
- GetFromContext(context)->set_max_num_threads(num_threads);
- DecrementUsageCounter(context);
+ return ptr->gemm_context.get();
}
} // namespace gemm_support
diff --git a/tensorflow/contrib/lite/kernels/gemm_support.h b/tensorflow/contrib/lite/kernels/gemm_support.h
index f033501cb6..37af772c68 100644
--- a/tensorflow/contrib/lite/kernels/gemm_support.h
+++ b/tensorflow/contrib/lite/kernels/gemm_support.h
@@ -45,9 +45,6 @@ void IncrementUsageCounter(TfLiteContext* context);
// 'context'. If there are no more usages the GemmContext will be deleted.
void DecrementUsageCounter(TfLiteContext* context);
-// Set the number of threads that can be used by gemmlowp.
-void SetNumThreads(TfLiteContext* context, int num_threads);
-
} // namespace gemm_support
} // namespace tflite
diff --git a/tensorflow/contrib/lite/kernels/internal/kernel_utils.cc b/tensorflow/contrib/lite/kernels/internal/kernel_utils.cc
index 36c25388e8..200f2f1515 100644
--- a/tensorflow/contrib/lite/kernels/internal/kernel_utils.cc
+++ b/tensorflow/contrib/lite/kernels/internal/kernel_utils.cc
@@ -255,14 +255,6 @@ void LstmStep(
output_state_ptr);
}
-// TODO(alanchiao): move this to tensor_utils.
-void VectorMultiply(const int8_t* vector, const int v_size, const float scale,
- float* result) {
- for (int i = 0; i < v_size; ++i) {
- *result++ = scale * *vector++;
- }
-}
-
void LstmStep(
const float* input_ptr_batch, const int8_t* input_to_input_weights_ptr,
float input_to_input_weights_scale,
@@ -415,8 +407,9 @@ void LstmStep(
// For each batch and cell: update input gate.
if (!use_cifg) {
if (use_peephole && !is_cell_state_all_zeros) {
- VectorMultiply(cell_to_input_weights_ptr, n_cell,
- 1. / cell_to_input_weights_scale, recovered_cell_weights);
+ tensor_utils::VectorScalarMultiply(cell_to_input_weights_ptr, n_cell,
+ cell_to_input_weights_scale,
+ recovered_cell_weights);
tensor_utils::VectorBatchVectorCwiseProductAccumulate(
recovered_cell_weights, n_cell, cell_state_ptr, n_batch,
input_gate_scratch);
@@ -427,8 +420,9 @@ void LstmStep(
// For each batch and cell: update forget gate.
if (use_peephole && !is_cell_state_all_zeros) {
- VectorMultiply(cell_to_forget_weights_ptr, n_cell,
- 1. / cell_to_forget_weights_scale, recovered_cell_weights);
+ tensor_utils::VectorScalarMultiply(cell_to_forget_weights_ptr, n_cell,
+ cell_to_forget_weights_scale,
+ recovered_cell_weights);
tensor_utils::VectorBatchVectorCwiseProductAccumulate(
recovered_cell_weights, n_cell, cell_state_ptr, n_batch,
forget_gate_scratch);
@@ -459,8 +453,9 @@ void LstmStep(
tensor_utils::IsZeroVector(cell_state_ptr, n_batch * n_cell);
// For each batch and cell: update the output gate.
if (use_peephole && !is_cell_state_all_zeros) {
- VectorMultiply(cell_to_output_weights_ptr, n_cell,
- 1. / cell_to_output_weights_scale, recovered_cell_weights);
+ tensor_utils::VectorScalarMultiply(cell_to_output_weights_ptr, n_cell,
+ cell_to_output_weights_scale,
+ recovered_cell_weights);
tensor_utils::VectorBatchVectorCwiseProductAccumulate(
recovered_cell_weights, n_cell, cell_state_ptr, n_batch,
output_gate_scratch);
diff --git a/tensorflow/contrib/lite/kernels/internal/optimized/depthwiseconv_uint8_3x3_filter.h b/tensorflow/contrib/lite/kernels/internal/optimized/depthwiseconv_uint8_3x3_filter.h
index 4cfaa0f36d..0ce64f8c70 100644
--- a/tensorflow/contrib/lite/kernels/internal/optimized/depthwiseconv_uint8_3x3_filter.h
+++ b/tensorflow/contrib/lite/kernels/internal/optimized/depthwiseconv_uint8_3x3_filter.h
@@ -3242,6 +3242,7 @@ inline void DepthwiseConv3x3Filter(
int32 output_shift, int32 output_activation_min,
int32 output_activation_max, uint8* output_data,
const Dims<4>& output_dims) {
+ gemmlowp::ScopedProfilingLabel label(__PRETTY_FUNCTION__);
DepthwiseConvParams params;
params.input_depth = ArraySize(input_dims, 0);
params.input_width = ArraySize(input_dims, 1);
diff --git a/tensorflow/contrib/lite/kernels/internal/optimized/legacy_optimized_ops.h b/tensorflow/contrib/lite/kernels/internal/optimized/legacy_optimized_ops.h
index 7816752132..6db41d7961 100644
--- a/tensorflow/contrib/lite/kernels/internal/optimized/legacy_optimized_ops.h
+++ b/tensorflow/contrib/lite/kernels/internal/optimized/legacy_optimized_ops.h
@@ -61,9 +61,17 @@ inline void AveragePool(const float* input_data, const Dims<4>& input_dims,
float output_activation_min,
float output_activation_max, float* output_data,
const Dims<4>& output_dims) {
- AveragePool(input_data, DimsToShape(input_dims), stride_width, stride_height,
- pad_width, pad_height, kwidth, kheight, output_activation_min,
- output_activation_max, output_data, DimsToShape(output_dims));
+ tflite::PoolParams params;
+ params.stride_height = stride_height;
+ params.stride_width = stride_width;
+ params.filter_height = kheight;
+ params.filter_width = kwidth;
+ params.padding_values.height = pad_height;
+ params.padding_values.width = pad_width;
+ params.float_activation_min = output_activation_min;
+ params.float_activation_max = output_activation_max;
+ AveragePool(params, DimsToShape(input_dims), input_data,
+ DimsToShape(output_dims), output_data);
}
// legacy, for compatibility with old checked-in code
@@ -96,10 +104,17 @@ inline void AveragePool(const uint8* input_data, const Dims<4>& input_dims,
int32 output_activation_min,
int32 output_activation_max, uint8* output_data,
const Dims<4>& output_dims) {
- AveragePool(input_data, DimsToShape(input_dims), stride_width, stride_height,
- pad_width, pad_height, filter_width, filter_height,
- output_activation_min, output_activation_max, output_data,
- DimsToShape(output_dims));
+ tflite::PoolParams params;
+ params.stride_height = stride_height;
+ params.stride_width = stride_width;
+ params.filter_height = filter_height;
+ params.filter_width = filter_width;
+ params.padding_values.height = pad_height;
+ params.padding_values.width = pad_width;
+ params.quantized_activation_min = output_activation_min;
+ params.quantized_activation_max = output_activation_max;
+ AveragePool(params, DimsToShape(input_dims), input_data,
+ DimsToShape(output_dims), output_data);
}
// legacy, for compatibility with old checked-in code
@@ -140,9 +155,17 @@ inline void MaxPool(const float* input_data, const Dims<4>& input_dims,
int pad_height, int kwidth, int kheight,
float output_activation_min, float output_activation_max,
float* output_data, const Dims<4>& output_dims) {
- MaxPool(input_data, DimsToShape(input_dims), stride_width, stride_height,
- pad_width, pad_height, kwidth, kheight, output_activation_min,
- output_activation_max, output_data, DimsToShape(output_dims));
+ tflite::PoolParams params;
+ params.stride_height = stride_height;
+ params.stride_width = stride_width;
+ params.filter_height = kheight;
+ params.filter_width = kwidth;
+ params.padding_values.height = pad_height;
+ params.padding_values.width = pad_width;
+ params.float_activation_min = output_activation_min;
+ params.float_activation_max = output_activation_max;
+ MaxPool(params, DimsToShape(input_dims), input_data, DimsToShape(output_dims),
+ output_data);
}
// legacy, for compatibility with old checked-in code
@@ -172,10 +195,17 @@ inline void MaxPool(const uint8* input_data, const Dims<4>& input_dims,
int pad_height, int filter_width, int filter_height,
int32 output_activation_min, int32 output_activation_max,
uint8* output_data, const Dims<4>& output_dims) {
- MaxPool(input_data, DimsToShape(input_dims), stride_width, stride_height,
- pad_width, pad_height, filter_width, filter_height,
- output_activation_min, output_activation_max, output_data,
- DimsToShape(output_dims));
+ PoolParams params;
+ params.stride_height = stride_height;
+ params.stride_width = stride_width;
+ params.filter_height = filter_height;
+ params.filter_width = filter_width;
+ params.padding_values.height = pad_height;
+ params.padding_values.width = pad_width;
+ params.quantized_activation_min = output_activation_min;
+ params.quantized_activation_max = output_activation_max;
+ MaxPool(params, DimsToShape(input_dims), input_data, DimsToShape(output_dims),
+ output_data);
}
// legacy, for compatibility with old checked-in code
@@ -215,10 +245,17 @@ inline void L2Pool(const float* input_data, const Dims<4>& input_dims,
int pad_height, int filter_width, int filter_height,
float output_activation_min, float output_activation_max,
float* output_data, const Dims<4>& output_dims) {
- L2Pool(input_data, DimsToShape(input_dims), stride_width, stride_height,
- pad_width, pad_height, filter_width, filter_height,
- output_activation_min, output_activation_max, output_data,
- DimsToShape(output_dims));
+ PoolParams params;
+ params.stride_height = stride_height;
+ params.stride_width = stride_width;
+ params.filter_height = filter_height;
+ params.filter_width = filter_width;
+ params.padding_values.height = pad_height;
+ params.padding_values.width = pad_width;
+ params.float_activation_min = output_activation_min;
+ params.float_activation_max = output_activation_max;
+ L2Pool(params, DimsToShape(input_dims), input_data, DimsToShape(output_dims),
+ output_data);
}
// legacy, for compatibility with old checked-in code
diff --git a/tensorflow/contrib/lite/kernels/internal/optimized/multithreaded_conv.h b/tensorflow/contrib/lite/kernels/internal/optimized/multithreaded_conv.h
index 27d9224512..4a3545d47a 100644
--- a/tensorflow/contrib/lite/kernels/internal/optimized/multithreaded_conv.h
+++ b/tensorflow/contrib/lite/kernels/internal/optimized/multithreaded_conv.h
@@ -35,35 +35,6 @@ limitations under the License.
namespace tflite {
namespace multithreaded_ops {
-class EigenThreadPoolWrapper : public Eigen::ThreadPoolInterface {
- public:
- explicit EigenThreadPoolWrapper(Eigen::ThreadPool* pool) : pool_(pool) {}
- ~EigenThreadPoolWrapper() override {}
-
- void Schedule(std::function<void()> fn) override {
- pool_->Schedule(std::move(fn));
- }
- int NumThreads() const override { return pool_->NumThreads(); }
- int CurrentThreadId() const override { return pool_->CurrentThreadId(); }
-
- private:
- Eigen::ThreadPool* pool_ = nullptr;
-};
-
-// We have a single global threadpool for all convolution operations. This means
-// that inferences started from different threads may block each other, but
-// since the underlying resource of CPU cores should be consumed by the
-// operations anyway, it shouldn't affect overall performance.
-const Eigen::ThreadPoolDevice& GetThreadPoolDevice() {
- const int thread_count = 4;
- static Eigen::ThreadPool* tp = new Eigen::ThreadPool(thread_count);
- static EigenThreadPoolWrapper* thread_pool_wrapper =
- new EigenThreadPoolWrapper(tp);
- static Eigen::ThreadPoolDevice* device =
- new Eigen::ThreadPoolDevice(thread_pool_wrapper, thread_count);
- return *device;
-}
-
// Shorthands for the types we need when interfacing with the EigenTensor
// library.
typedef Eigen::TensorMap<
@@ -113,14 +84,13 @@ class EigenTensorConvFunctor {
}
public:
- void operator()(const T* input_data, T* im2col_buffer, int input_batches,
- int input_height, int input_width, int input_depth,
- const T* filter_data, int filter_height, int filter_width,
- int filter_count, int stride_rows, int stride_cols,
- int pad_width, int pad_height, TfLitePadding padding,
- T* output_data, int output_height, int output_width) {
- const Eigen::ThreadPoolDevice& device = GetThreadPoolDevice();
-
+ void operator()(const Eigen::ThreadPoolDevice& device, const T* input_data,
+ T* im2col_buffer, int input_batches, int input_height,
+ int input_width, int input_depth, const T* filter_data,
+ int filter_height, int filter_width, int filter_count,
+ int stride_rows, int stride_cols, int pad_width,
+ int pad_height, TfLitePadding padding, T* output_data,
+ int output_height, int output_width) {
const bool is_1x1_kernel = (filter_height == 1 && filter_width == 1 &&
stride_rows == 1 && stride_cols == 1);
if (is_1x1_kernel) {
@@ -162,11 +132,11 @@ class EigenTensorConvFunctor {
}
};
-inline void Conv(const float* input_data, const Dims<4>& input_dims,
- const float* filter_data, const Dims<4>& filter_dims,
- const float* bias_data, const Dims<4>& bias_dims,
- int stride_width, int stride_height, int pad_width,
- int pad_height, TfLitePadding padding,
+inline void Conv(const Eigen::ThreadPoolDevice& device, const float* input_data,
+ const Dims<4>& input_dims, const float* filter_data,
+ const Dims<4>& filter_dims, const float* bias_data,
+ const Dims<4>& bias_dims, int stride_width, int stride_height,
+ int pad_width, int pad_height, TfLitePadding padding,
float output_activation_min, float output_activation_max,
float* output_data, const Dims<4>& output_dims,
float* im2col_data, const Dims<4>& im2col_dims) {
@@ -180,10 +150,11 @@ inline void Conv(const float* input_data, const Dims<4>& input_dims,
const int output_height = ArraySize(output_dims, 2);
const int output_width = ArraySize(output_dims, 1);
EigenTensorConvFunctor<float> conv_functor;
- conv_functor(input_data, im2col_data, batches, input_height, input_width,
- input_depth, filter_data, filter_height, filter_width,
- output_depth, stride_height, stride_width, pad_height, pad_width,
- padding, output_data, output_height, output_width);
+ conv_functor(device, input_data, im2col_data, batches, input_height,
+ input_width, input_depth, filter_data, filter_height,
+ filter_width, output_depth, stride_height, stride_width,
+ pad_height, pad_width, padding, output_data, output_height,
+ output_width);
optimized_ops::AddBiasAndEvalActivationFunction(
bias_data, bias_dims, output_data, output_dims, output_activation_min,
diff --git a/tensorflow/contrib/lite/kernels/internal/optimized/neon_tensor_utils.cc b/tensorflow/contrib/lite/kernels/internal/optimized/neon_tensor_utils.cc
index 38ad32c734..420bc68b43 100644
--- a/tensorflow/contrib/lite/kernels/internal/optimized/neon_tensor_utils.cc
+++ b/tensorflow/contrib/lite/kernels/internal/optimized/neon_tensor_utils.cc
@@ -55,83 +55,33 @@ void NeonMatrixBatchVectorMultiplyAccumulate(const float* matrix, int m_rows,
const int postamble_start =
m_cols - (m_cols & (kFloatWeightsPerNeonLane - 1));
- // The arrays used to cache the vector.
- void* aligned_vector_cache_free = nullptr;
- float32x4_t* vector_cache_float32x4 =
- reinterpret_cast<float32x4_t*>(aligned_alloc(
- sizeof(float32x4_t), (postamble_start >> 2) * sizeof(float32x4_t),
- &aligned_vector_cache_free));
-
- const int kUnrollSize = 2;
for (int b = 0; b < n_batch; b++) {
float* result_in_batch = result + b * m_rows * result_stride;
const float* vector_in_batch = vector + b * m_cols;
+ const float* matrix_row = matrix;
- const float* matrix_ptr0 = matrix;
- // If there is only 1 row, we don't want to assign an illegal pointer.
- const float* matrix_ptr1 = nullptr;
- if (m_rows > 1) {
- matrix_ptr1 = matrix + m_cols;
- }
-
- // Cache the vector.
- for (int c = 0; c < postamble_start; c += kFloatWeightsPerNeonLane) {
- vector_cache_float32x4[c >> 2] = vld1q_f32(vector_in_batch + c);
- }
-
- // Main matrix by vector multiplication loop, which handles two rows of
- // matrix by vector multiplication.
- for (int r = 0; r < (m_rows & ~(kUnrollSize - 1)); r += kUnrollSize) {
- float32x4_t acc0_32x4 = vmovq_n_f32(0.0);
- float32x4_t acc1_32x4 = vmovq_n_f32(0.0);
+ // Main matrix by vector multiplication loop
+ for (int r = 0; r < m_rows; r++) {
+ float32x4_t acc_32x4 = vmovq_n_f32(0.0);
for (int c = 0; c < postamble_start; c += kFloatWeightsPerNeonLane) {
- float32x4_t temp = vector_cache_float32x4[c >> 2];
- // Load 4 float values from vector1 and vector2 and accumulator.
- float32x4_t v0_f32x4 = vld1q_f32(matrix_ptr0 + c);
- float32x4_t v1_f32x4 = vld1q_f32(matrix_ptr1 + c);
- // Vector multiply-accumulate 4 float
- acc0_32x4 = vmlaq_f32(acc0_32x4, v0_f32x4, temp);
- acc1_32x4 = vmlaq_f32(acc1_32x4, v1_f32x4, temp);
+ // Load 4 float values from vector and matrix row.
+ float32x4_t vector_f32x4 = vld1q_f32(vector_in_batch + c);
+ float32x4_t matrix_f32x4 = vld1q_f32(matrix_row + c);
+ // Multiply the vector and matrix row and add to accumulator.
+ acc_32x4 = vmlaq_f32(acc_32x4, matrix_f32x4, vector_f32x4);
}
// Add the 4 intermediate sum values to get the final dot-prod value for
// this column.
*result_in_batch +=
- (vgetq_lane_f32(acc0_32x4, 0) + vgetq_lane_f32(acc0_32x4, 1) +
- vgetq_lane_f32(acc0_32x4, 2) + vgetq_lane_f32(acc0_32x4, 3));
- *(result_in_batch + result_stride) +=
- (vgetq_lane_f32(acc1_32x4, 0) + vgetq_lane_f32(acc1_32x4, 1) +
- vgetq_lane_f32(acc1_32x4, 2) + vgetq_lane_f32(acc1_32x4, 3));
+ (vgetq_lane_f32(acc_32x4, 0) + vgetq_lane_f32(acc_32x4, 1) +
+ vgetq_lane_f32(acc_32x4, 2) + vgetq_lane_f32(acc_32x4, 3));
for (int c = postamble_start; c < m_cols; c++) {
- *result_in_batch += matrix_ptr0[c] * vector_in_batch[c];
- *(result_in_batch + result_stride) +=
- matrix_ptr1[c] * vector_in_batch[c];
+ *result_in_batch += matrix_row[c] * vector_in_batch[c];
}
- matrix_ptr0 += kUnrollSize * m_cols;
- matrix_ptr1 += kUnrollSize * m_cols;
- result_in_batch += kUnrollSize * result_stride;
- }
- for (int r = (m_rows & ~(kUnrollSize - 1)); r < m_rows; r++) {
- float32x4_t acc0_32x4 = vmovq_n_f32(0.0);
- for (int c = 0; c < postamble_start; c += kFloatWeightsPerNeonLane) {
- float32x4_t temp = vector_cache_float32x4[c >> 2];
- // Load 4 float values from vector1 and vector2 and accumulator.
- float32x4_t v0_f32x4 = vld1q_f32(matrix_ptr0 + c);
- // Vector multiply-accumulate 4 float
- acc0_32x4 = vmlaq_f32(acc0_32x4, v0_f32x4, temp);
- }
- // Add the 4 intermediate sum values to get the final dot-prod value for
- // this column.
- *result_in_batch +=
- (vgetq_lane_f32(acc0_32x4, 0) + vgetq_lane_f32(acc0_32x4, 1) +
- vgetq_lane_f32(acc0_32x4, 2) + vgetq_lane_f32(acc0_32x4, 3));
- for (int c = postamble_start; c < m_cols; c++) {
- *result_in_batch += matrix_ptr0[c] * vector_in_batch[c];
- }
- matrix_ptr0 += m_cols;
+ matrix_row += m_cols;
result_in_batch += result_stride;
}
}
- free(aligned_vector_cache_free);
}
void NeonMatrixBatchVectorMultiplyAccumulate(
@@ -162,7 +112,7 @@ void NeonMatrixBatchVectorMultiplyAccumulate(
int batch, row, col;
for (batch = 0; batch < n_batch; ++batch) {
- const float batch_scaling_factor_inv = 1.0 / scaling_factors[batch];
+ const float batch_scaling_factor = scaling_factors[batch];
// Copy the vector data to an aligned vector.
memcpy(aligned_vec, vectors + batch * m_cols, sizeof(int8) * m_cols);
// Compute dot-product for every column.
@@ -232,7 +182,7 @@ void NeonMatrixBatchVectorMultiplyAccumulate(
int32 neon_sum =
vgetq_lane_s64(pairwiseAdded, 0) + vgetq_lane_s64(pairwiseAdded, 1);
- *result += ((neon_sum + postable_sum) * batch_scaling_factor_inv);
+ *result += ((neon_sum + postable_sum) * batch_scaling_factor);
} // for row
} // for batch
@@ -296,17 +246,6 @@ void NeonVectorBatchVectorCwiseProductAccumulate(const float* vector,
const int postamble_start =
v_size - (v_size & (kFloatWeightsPerNeonLane - 1));
- // The arrays used to cache the vector.
- void* aligned_vector_cache_free = nullptr;
- float32x4_t* vector_cache_float32x4 =
- reinterpret_cast<float32x4_t*>(aligned_alloc(
- sizeof(float32x4_t), (postamble_start >> 2) * sizeof(float32x4_t),
- &aligned_vector_cache_free));
-
- for (int v = 0; v < postamble_start; v += kFloatWeightsPerNeonLane) {
- vector_cache_float32x4[v >> 2] = vld1q_f32(vector + v);
- }
-
float* result_ptr = result;
const float* batch_vector_ptr = batch_vector;
for (int b = 0; b < n_batch; b++) {
@@ -314,9 +253,9 @@ void NeonVectorBatchVectorCwiseProductAccumulate(const float* vector,
// Load from memory to vectors.
float32x4_t result_f32x4 = vld1q_f32(result_ptr + v);
float32x4_t batch_vector_f32x4 = vld1q_f32(batch_vector_ptr + v);
+ float32x4_t vector_f32x4 = vld1q_f32(vector + v);
// Multiply-accumulate.
- result_f32x4 = vmlaq_f32(result_f32x4, batch_vector_f32x4,
- vector_cache_float32x4[v >> 2]);
+ result_f32x4 = vmlaq_f32(result_f32x4, batch_vector_f32x4, vector_f32x4);
// Store.
vst1q_f32(result_ptr + v, result_f32x4);
}
@@ -328,7 +267,6 @@ void NeonVectorBatchVectorCwiseProductAccumulate(const float* vector,
result_ptr += v_size;
batch_vector_ptr += v_size;
}
- free(aligned_vector_cache_free);
}
void NeonSub1Vector(const float* vector, int v_size, float* result) {
@@ -404,6 +342,77 @@ void NeonClipVector(const float* vector, int v_size, float abs_limit,
}
}
+void NeonVectorScalarMultiply(const int8_t* vector, const int v_size,
+ const float scale, float* result) {
+ // Here the assumption is that each buffer is 4-byte aligned.
+ const int kWeightsPerUint32 = 4;
+ TFLITE_CHECK_EQ((intptr_t)(&vector[0]) & (kWeightsPerUint32 - 1), 0);
+ // If v_size is not divisible by kWeightsPerNeonLane, we cannot use the main
+ // vectorized loop, and we need to process sequentially. postamble_start shows
+ // the start index where this should happen.
+ const int kWeightsPerNeonLane = 16;
+ const int postamble_start = v_size - (v_size & (kWeightsPerNeonLane - 1));
+
+ // Create a vector of 4 floats with the scale value.
+ const float32x4_t scale_f32x4 = vdupq_n_f32(scale);
+ int v = 0;
+ for (; v < postamble_start; v += kWeightsPerNeonLane) {
+ // Load int8 values, sixteen at a time.
+ const int8x16_t v_i8x16 = vld1q_s8(vector + v);
+ // Split it into two components of size eight.
+ const int8x8_t v0_i8x8 = vget_low_s8(v_i8x16);
+ const int8x8_t v1_i8x8 = vget_high_s8(v_i8x16);
+ // Convert both components to int16 first.
+ const int16x8_t v0_i16x8 = vmovl_s8(v0_i8x8);
+ const int16x8_t v1_i16x8 = vmovl_s8(v1_i8x8);
+ // Split each of them into two components each.
+ const int16x4_t v0_i16x4 = vget_low_s16(v0_i16x8);
+ const int16x4_t v1_i16x4 = vget_high_s16(v0_i16x8);
+ const int16x4_t v2_i16x4 = vget_low_s16(v1_i16x8);
+ const int16x4_t v3_i16x4 = vget_high_s16(v1_i16x8);
+ // Convert these to int32 and then to float.
+ float32x4_t v0_f32x4 = vcvtq_f32_s32(vmovl_s16(v0_i16x4));
+ float32x4_t v1_f32x4 = vcvtq_f32_s32(vmovl_s16(v1_i16x4));
+ float32x4_t v2_f32x4 = vcvtq_f32_s32(vmovl_s16(v2_i16x4));
+ float32x4_t v3_f32x4 = vcvtq_f32_s32(vmovl_s16(v3_i16x4));
+ // Vector multiply four floats at a time.
+ v0_f32x4 = vmulq_f32(v0_f32x4, scale_f32x4);
+ v1_f32x4 = vmulq_f32(v1_f32x4, scale_f32x4);
+ v2_f32x4 = vmulq_f32(v2_f32x4, scale_f32x4);
+ v3_f32x4 = vmulq_f32(v3_f32x4, scale_f32x4);
+ // Store the results.
+ vst1q_f32(result + v, v0_f32x4);
+ vst1q_f32(result + v + 4, v1_f32x4);
+ vst1q_f32(result + v + 8, v2_f32x4);
+ vst1q_f32(result + v + 12, v3_f32x4);
+ }
+
+ if (v_size - postamble_start >= (kWeightsPerNeonLane >> 1)) {
+ // Load eight int8 values, if there is at least eight remaining.
+ const int8x8_t v_i8x8 = vld1_s8(vector + v);
+ // Convert them to int16 first.
+ const int16x8_t v_i16x8 = vmovl_s8(v_i8x8);
+ // Split it into two components.
+ const int16x4_t v0_i16x4 = vget_low_s16(v_i16x8);
+ const int16x4_t v1_i16x4 = vget_high_s16(v_i16x8);
+ // Convert the components two floats.
+ float32x4_t v0_f32x4 = vcvtq_f32_s32(vmovl_s16(v0_i16x4));
+ float32x4_t v1_f32x4 = vcvtq_f32_s32(vmovl_s16(v1_i16x4));
+ // Vector multiply four floats at a time.
+ v0_f32x4 = vmulq_f32(v0_f32x4, scale_f32x4);
+ v1_f32x4 = vmulq_f32(v1_f32x4, scale_f32x4);
+ // Store the results.
+ vst1q_f32(result + v, v0_f32x4);
+ vst1q_f32(result + v + 4, v1_f32x4);
+ v += (kWeightsPerNeonLane >> 1);
+ }
+
+ // Postamble loop.
+ for (; v < v_size; v++) {
+ result[v] = scale * vector[v];
+ }
+}
+
void NeonSymmetricQuantizeFloats(const float* values, const int size,
int8_t* quantized_values, float* min,
float* max, float* scaling_factor) {
@@ -418,13 +427,14 @@ void NeonSymmetricQuantizeFloats(const float* values, const int size,
*scaling_factor = 1;
return;
}
- *scaling_factor = kScale / range;
+ *scaling_factor = range / kScale;
+ const float scaling_factor_inv = 1.0f / *scaling_factor;
const int postamble_start =
size - (size & (2 * kFloatWeightsPerNeonLane - 1));
// Vectorized constants.
- const float32x4_t q_factor_f32x4 = vmovq_n_f32(*scaling_factor);
+ const float32x4_t q_factor_f32x4 = vmovq_n_f32(scaling_factor_inv);
const float32x4_t point5_f32x4 = vmovq_n_f32(0.5);
const float32x4_t zero_f32x4 = vmovq_n_f32(0.0);
const int32x4_t scale_i32x4 = vmovq_n_s32(kScale);
@@ -476,7 +486,7 @@ void NeonSymmetricQuantizeFloats(const float* values, const int size,
for (int i = postamble_start; i < size; ++i) {
const int32 quantized_value =
- static_cast<int32>(TfLiteRound(*scaling_factor * values[i]));
+ static_cast<int32>(TfLiteRound(scaling_factor_inv * values[i]));
quantized_values[i] = std::min(kScale, std::max(-kScale, quantized_value));
}
}
diff --git a/tensorflow/contrib/lite/kernels/internal/optimized/neon_tensor_utils.h b/tensorflow/contrib/lite/kernels/internal/optimized/neon_tensor_utils.h
index 7a5a8fc541..45c9f65b64 100644
--- a/tensorflow/contrib/lite/kernels/internal/optimized/neon_tensor_utils.h
+++ b/tensorflow/contrib/lite/kernels/internal/optimized/neon_tensor_utils.h
@@ -105,6 +105,10 @@ bool IsZeroVector(const float* vector, int v_size) {
return NEON_OR_PORTABLE(IsZeroVector, vector, v_size);
}
+void VectorScalarMultiply(const int8_t* vector, int v_size, float scale,
+ float* result) {
+ NEON_OR_PORTABLE(VectorScalarMultiply, vector, v_size, scale, result);
+}
void ClipVector(const float* vector, int v_size, float abs_limit,
float* result) {
NEON_OR_PORTABLE(ClipVector, vector, v_size, abs_limit, result);
diff --git a/tensorflow/contrib/lite/kernels/internal/optimized/optimized_ops.h b/tensorflow/contrib/lite/kernels/internal/optimized/optimized_ops.h
index 6b5d35f21e..2f73036e03 100644
--- a/tensorflow/contrib/lite/kernels/internal/optimized/optimized_ops.h
+++ b/tensorflow/contrib/lite/kernels/internal/optimized/optimized_ops.h
@@ -41,6 +41,7 @@ namespace optimized_ops {
// Unoptimized reference ops:
using reference_ops::ArgMax;
+using reference_ops::ArgMinMax;
using reference_ops::BroadcastGreater;
using reference_ops::BroadcastGreaterEqual;
using reference_ops::BroadcastLess;
@@ -59,6 +60,7 @@ using reference_ops::Mean;
using reference_ops::RankOneSelect;
using reference_ops::Relu1;
using reference_ops::Relu6;
+using reference_ops::ReluX;
using reference_ops::Select;
using reference_ops::SpaceToBatchND;
using reference_ops::StridedSlice;
@@ -170,16 +172,9 @@ template <typename Scalar, int N>
MatrixMap<Scalar> MapAsMatrixWithGivenNumberOfRows(Scalar* data,
const Dims<N>& dims,
int rows) {
- int cols = 1;
- bool matched_rows = false;
- for (int d = 0; d < N; d++) {
- cols *= dims.sizes[d];
- if (cols == rows) {
- matched_rows = true;
- cols = 1;
- }
- }
- TFLITE_DCHECK(matched_rows);
+ const int flatsize = FlatSize(dims);
+ TFLITE_DCHECK((flatsize % rows) == 0);
+ const int cols = flatsize / rows;
return MatrixMap<Scalar>(data, rows, cols);
}
@@ -2714,6 +2709,20 @@ inline void Add(const int16* input1_data, const Dims<4>& input1_dims,
}
}
+inline void Add(const int32* input1_data, const Dims<4>& input1_dims,
+ const int32* input2_data, const Dims<4>& input2_dims,
+ int32 output_activation_min, int32 output_activation_max,
+ int32* output_data, const Dims<4>& output_dims) {
+ gemmlowp::ScopedProfilingLabel label("Add/int32");
+
+ const int flat_size = MatchingFlatSize(input1_dims, input2_dims, output_dims);
+ for (int i = 0; i < flat_size; ++i) {
+ output_data[i] = ActivationFunctionWithMinMax(
+ input1_data[i] + input2_data[i], output_activation_min,
+ output_activation_max);
+ }
+}
+
template <FusedActivationFunctionType Ac>
inline void Add(const int16* input1_data, const Dims<4>& input1_dims,
int input1_shift, const int16* input2_data,
@@ -3045,6 +3054,20 @@ void Mul(const float* input1_data, const Dims<4>& input1_dims,
output_activation_max, output_data, output_dims);
}
+inline void Mul(const int32* input1_data, const Dims<4>& input1_dims,
+ const int32* input2_data, const Dims<4>& input2_dims,
+ int32 output_activation_min, int32 output_activation_max,
+ int32* output_data, const Dims<4>& output_dims) {
+ gemmlowp::ScopedProfilingLabel label("Mul/int32");
+
+ const int flat_size = MatchingFlatSize(input1_dims, input2_dims, output_dims);
+ for (int i = 0; i < flat_size; ++i) {
+ output_data[i] = ActivationFunctionWithMinMax(
+ input1_data[i] * input2_data[i], output_activation_min,
+ output_activation_max);
+ }
+}
+
template <FusedActivationFunctionType Ac>
void Mul(const int32* input1_data, const Dims<4>& input1_dims,
const int32* input2_data, const Dims<4>& input2_dims,
@@ -3295,6 +3318,19 @@ inline void Sub(const float* input1_data, const Dims<4>& input1_dims,
}
}
+inline void Sub(const int32* input1_data, const Dims<4>& input1_dims,
+ const int32* input2_data, const Dims<4>& input2_dims,
+ int32 output_activation_min, int32 output_activation_max,
+ int32* output_data, const Dims<4>& output_dims) {
+ gemmlowp::ScopedProfilingLabel label("Sub/int32");
+ const int flat_size = MatchingFlatSize(input1_dims, input2_dims, output_dims);
+ for (int i = 0; i < flat_size; ++i) {
+ output_data[i] = ActivationFunctionWithMinMax(
+ input1_data[i] - input2_data[i], output_activation_min,
+ output_activation_max);
+ }
+}
+
// TODO(jiawen): We can implement BroadcastSub on buffers of arbitrary
// dimensionality if the runtime code does a single loop over one dimension
// that handles broadcasting as the base case. The code generator would then
@@ -3763,21 +3799,20 @@ inline int NodeOffset(int b, int h, int w, int height, int width) {
return (b * height + h) * width + w;
}
-inline void AveragePool(const float* input_data,
- const RuntimeShape& input_shape, int stride_width,
- int stride_height, int pad_width, int pad_height,
- int kwidth, int kheight, float output_activation_min,
- float output_activation_max, float* output_data,
- const RuntimeShape& output_shape) {
+inline void AveragePool(const PoolParams& params,
+ const RuntimeShape& input_shape,
+ const float* input_data,
+ const RuntimeShape& output_shape, float* output_data) {
gemmlowp::ScopedProfilingLabel label("AveragePool");
TFLITE_DCHECK_EQ(input_shape.DimensionsCount(), 4);
TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 4);
const int batches = MatchingDim(input_shape, 0, output_shape, 0);
- const int depth = MatchingDim(input_shape, 3, output_shape, 3);
const int input_height = input_shape.Dims(1);
const int input_width = input_shape.Dims(2);
const int output_height = output_shape.Dims(1);
const int output_width = output_shape.Dims(2);
+ const int stride_height = params.stride_height;
+ const int stride_width = params.stride_width;
// TODO(benoitjacob) make this a proper reference impl without Eigen!
const auto in_mat = MapAsMatrixWithLastDimAsRows(input_data, input_shape);
@@ -3792,12 +3827,15 @@ inline void AveragePool(const float* input_data,
for (int w = 0; w < input_width; ++w) {
// (h_start, h_end) * (w_start, w_end) is the range that the input
// vector projects to.
- int hpad = h + pad_height;
- int wpad = w + pad_width;
- int h_start =
- (hpad < kheight) ? 0 : (hpad - kheight) / stride_height + 1;
+ int hpad = h + params.padding_values.height;
+ int wpad = w + params.padding_values.width;
+ int h_start = (hpad < params.filter_height)
+ ? 0
+ : (hpad - params.filter_height) / stride_height + 1;
int h_end = std::min(hpad / stride_height + 1, output_height);
- int w_start = (wpad < kwidth) ? 0 : (wpad - kwidth) / stride_width + 1;
+ int w_start = (wpad < params.filter_width)
+ ? 0
+ : (wpad - params.filter_width) / stride_width + 1;
int w_end = std::min(wpad / stride_width + 1, output_width);
// compute elementwise sum
for (int ph = h_start; ph < h_end; ++ph) {
@@ -3815,29 +3853,21 @@ inline void AveragePool(const float* input_data,
TFLITE_DCHECK_GT(out_count.minCoeff(), 0);
out_mat.array().rowwise() /= out_count.transpose().array();
- for (int b = 0; b < batches; ++b) {
- for (int y = 0; y < output_height; ++y) {
- for (int x = 0; x < output_width; ++x) {
- for (int c = 0; c < depth; ++c) {
- output_data[Offset(output_shape, b, y, x, c)] =
- ActivationFunctionWithMinMax(
- output_data[Offset(output_shape, b, y, x, c)],
- output_activation_min, output_activation_max);
- }
- }
- }
+ const int flat_size = output_shape.FlatSize();
+ for (int i = 0; i < flat_size; ++i) {
+ output_data[i] = ActivationFunctionWithMinMax(output_data[i],
+ params.float_activation_min,
+ params.float_activation_max);
}
}
-inline void AveragePool(const uint8* input_data,
- const RuntimeShape& input_shape, int stride_width,
- int stride_height, int pad_width, int pad_height,
- int filter_width, int filter_height,
- int32 output_activation_min,
- int32 output_activation_max, uint8* output_data,
- const RuntimeShape& output_shape) {
+inline void AveragePool(const PoolParams& params,
+ const RuntimeShape& input_shape,
+ const uint8* input_data,
+ const RuntimeShape& output_shape, uint8* output_data) {
gemmlowp::ScopedProfilingLabel label("AveragePool/8bit");
- TFLITE_DCHECK_LE(output_activation_min, output_activation_max);
+ TFLITE_DCHECK_LE(params.quantized_activation_min,
+ params.quantized_activation_max);
TFLITE_DCHECK_EQ(input_shape.DimensionsCount(), 4);
TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 4);
const int batches = MatchingDim(input_shape, 0, output_shape, 0);
@@ -3846,17 +3876,21 @@ inline void AveragePool(const uint8* input_data,
const int input_width = input_shape.Dims(2);
const int output_height = output_shape.Dims(1);
const int output_width = output_shape.Dims(2);
+ const int stride_height = params.stride_height;
+ const int stride_width = params.stride_width;
for (int batch = 0; batch < batches; ++batch) {
for (int out_y = 0; out_y < output_height; ++out_y) {
for (int out_x = 0; out_x < output_width; ++out_x) {
- const int in_x_origin = (out_x * stride_width) - pad_width;
- const int in_y_origin = (out_y * stride_height) - pad_height;
+ const int in_x_origin =
+ (out_x * stride_width) - params.padding_values.width;
+ const int in_y_origin =
+ (out_y * stride_height) - params.padding_values.height;
const int filter_x_start = std::max(0, -in_x_origin);
const int filter_x_end =
- std::min(filter_width, input_width - in_x_origin);
+ std::min(params.filter_width, input_width - in_x_origin);
const int filter_y_start = std::max(0, -in_y_origin);
const int filter_y_end =
- std::min(filter_height, input_height - in_y_origin);
+ std::min(params.filter_height, input_height - in_y_origin);
const int filter_count =
(filter_x_end - filter_x_start) * (filter_y_end - filter_y_start);
// 1280 required by Inception v3
@@ -3904,18 +3938,18 @@ inline void AveragePool(const uint8* input_data,
output_data + Offset(output_shape, batch, out_y, out_x, 0);
int channel = 0;
#ifdef USE_NEON
-#define AVGPOOL_DIVIDING_BY(FILTER_COUNT) \
- if (filter_count == FILTER_COUNT) { \
- for (; channel <= depth - 8; channel += 8) { \
- uint16 buf[8]; \
- for (int i = 0; i < 8; i++) { \
- buf[i] = (acc[channel + i] + FILTER_COUNT / 2) / FILTER_COUNT; \
- } \
- uint8x8_t buf8 = vqmovn_u16(vld1q_u16(buf)); \
- buf8 = vmin_u8(buf8, vdup_n_u8(output_activation_max)); \
- buf8 = vmax_u8(buf8, vdup_n_u8(output_activation_min)); \
- vst1_u8(output_ptr + channel, buf8); \
- } \
+#define AVGPOOL_DIVIDING_BY(FILTER_COUNT) \
+ if (filter_count == FILTER_COUNT) { \
+ for (; channel <= depth - 8; channel += 8) { \
+ uint16 buf[8]; \
+ for (int i = 0; i < 8; i++) { \
+ buf[i] = (acc[channel + i] + FILTER_COUNT / 2) / FILTER_COUNT; \
+ } \
+ uint8x8_t buf8 = vqmovn_u16(vld1q_u16(buf)); \
+ buf8 = vmin_u8(buf8, vdup_n_u8(params.quantized_activation_max)); \
+ buf8 = vmax_u8(buf8, vdup_n_u8(params.quantized_activation_min)); \
+ vst1_u8(output_ptr + channel, buf8); \
+ } \
}
AVGPOOL_DIVIDING_BY(9)
AVGPOOL_DIVIDING_BY(15)
@@ -3926,15 +3960,15 @@ inline void AveragePool(const uint8* input_data,
buf[i] = (acc[channel + i] + filter_count / 2) / filter_count;
}
uint8x8_t buf8 = vqmovn_u16(vld1q_u16(buf));
- buf8 = vmin_u8(buf8, vdup_n_u8(output_activation_max));
- buf8 = vmax_u8(buf8, vdup_n_u8(output_activation_min));
+ buf8 = vmin_u8(buf8, vdup_n_u8(params.quantized_activation_max));
+ buf8 = vmax_u8(buf8, vdup_n_u8(params.quantized_activation_min));
vst1_u8(output_ptr + channel, buf8);
}
#endif
for (; channel < depth; ++channel) {
uint16 a = (acc[channel] + filter_count / 2) / filter_count;
- a = std::max<uint16>(a, output_activation_min);
- a = std::min<uint16>(a, output_activation_max);
+ a = std::max<uint16>(a, params.quantized_activation_min);
+ a = std::min<uint16>(a, params.quantized_activation_max);
output_ptr[channel] = static_cast<uint8>(a);
}
}
@@ -3942,20 +3976,19 @@ inline void AveragePool(const uint8* input_data,
}
}
-inline void MaxPool(const float* input_data, const RuntimeShape& input_shape,
- int stride_width, int stride_height, int pad_width,
- int pad_height, int kwidth, int kheight,
- float output_activation_min, float output_activation_max,
- float* output_data, const RuntimeShape& output_shape) {
+inline void MaxPool(const PoolParams& params, const RuntimeShape& input_shape,
+ const float* input_data, const RuntimeShape& output_shape,
+ float* output_data) {
gemmlowp::ScopedProfilingLabel label("MaxPool");
TFLITE_DCHECK_EQ(input_shape.DimensionsCount(), 4);
TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 4);
const int batches = MatchingDim(input_shape, 0, output_shape, 0);
- const int depth = MatchingDim(input_shape, 3, output_shape, 3);
const int input_height = input_shape.Dims(1);
const int input_width = input_shape.Dims(2);
const int output_height = output_shape.Dims(1);
const int output_width = output_shape.Dims(2);
+ const int stride_height = params.stride_height;
+ const int stride_width = params.stride_width;
const auto in_mat = MapAsMatrixWithLastDimAsRows(input_data, input_shape);
auto out_mat = MapAsMatrixWithLastDimAsRows(output_data, output_shape);
@@ -3966,12 +3999,15 @@ inline void MaxPool(const float* input_data, const RuntimeShape& input_shape,
for (int w = 0; w < input_width; ++w) {
// (h_start, h_end) * (w_start, w_end) is the range that the input
// vector projects to.
- int hpad = h + pad_height;
- int wpad = w + pad_width;
- int h_start =
- (hpad < kheight) ? 0 : (hpad - kheight) / stride_height + 1;
+ int hpad = h + params.padding_values.height;
+ int wpad = w + params.padding_values.width;
+ int h_start = (hpad < params.filter_height)
+ ? 0
+ : (hpad - params.filter_height) / stride_height + 1;
int h_end = std::min(hpad / stride_height + 1, output_height);
- int w_start = (wpad < kwidth) ? 0 : (wpad - kwidth) / stride_width + 1;
+ int w_start = (wpad < params.filter_width)
+ ? 0
+ : (wpad - params.filter_width) / stride_width + 1;
int w_end = std::min(wpad / stride_width + 1, output_width);
// compute elementwise sum
for (int ph = h_start; ph < h_end; ++ph) {
@@ -3986,28 +4022,20 @@ inline void MaxPool(const float* input_data, const RuntimeShape& input_shape,
}
}
}
-
- for (int b = 0; b < batches; ++b) {
- for (int y = 0; y < output_height; ++y) {
- for (int x = 0; x < output_width; ++x) {
- for (int c = 0; c < depth; ++c) {
- output_data[Offset(output_shape, b, y, x, c)] =
- ActivationFunctionWithMinMax(
- output_data[Offset(output_shape, b, y, x, c)],
- output_activation_min, output_activation_max);
- }
- }
- }
+ const int flat_size = output_shape.FlatSize();
+ for (int i = 0; i < flat_size; ++i) {
+ output_data[i] = ActivationFunctionWithMinMax(output_data[i],
+ params.float_activation_min,
+ params.float_activation_max);
}
}
-inline void MaxPool(const uint8* input_data, const RuntimeShape& input_shape,
- int stride_width, int stride_height, int pad_width,
- int pad_height, int filter_width, int filter_height,
- int32 output_activation_min, int32 output_activation_max,
- uint8* output_data, const RuntimeShape& output_shape) {
+inline void MaxPool(const PoolParams& params, const RuntimeShape& input_shape,
+ const uint8* input_data, const RuntimeShape& output_shape,
+ uint8* output_data) {
gemmlowp::ScopedProfilingLabel label("MaxPool/8bit");
- TFLITE_DCHECK_LE(output_activation_min, output_activation_max);
+ TFLITE_DCHECK_LE(params.quantized_activation_min,
+ params.quantized_activation_max);
TFLITE_DCHECK_EQ(input_shape.DimensionsCount(), 4);
TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 4);
const int batches = MatchingDim(input_shape, 0, output_shape, 0);
@@ -4016,17 +4044,21 @@ inline void MaxPool(const uint8* input_data, const RuntimeShape& input_shape,
const int input_width = input_shape.Dims(2);
const int output_height = output_shape.Dims(1);
const int output_width = output_shape.Dims(2);
+ const int stride_height = params.stride_height;
+ const int stride_width = params.stride_width;
for (int batch = 0; batch < batches; ++batch) {
for (int out_y = 0; out_y < output_height; ++out_y) {
for (int out_x = 0; out_x < output_width; ++out_x) {
- const int in_x_origin = (out_x * stride_width) - pad_width;
- const int in_y_origin = (out_y * stride_height) - pad_height;
+ const int in_x_origin =
+ (out_x * stride_width) - params.padding_values.width;
+ const int in_y_origin =
+ (out_y * stride_height) - params.padding_values.height;
const int filter_x_start = std::max(0, -in_x_origin);
const int filter_x_end =
- std::min(filter_width, input_width - in_x_origin);
+ std::min(params.filter_width, input_width - in_x_origin);
const int filter_y_start = std::max(0, -in_y_origin);
const int filter_y_end =
- std::min(filter_height, input_height - in_y_origin);
+ std::min(params.filter_height, input_height - in_y_origin);
// 2048 required by Inception v3
static constexpr int kAccBufferMaxSize = 2048;
TFLITE_DCHECK_LE(depth, kAccBufferMaxSize);
@@ -4069,21 +4101,21 @@ inline void MaxPool(const uint8* input_data, const RuntimeShape& input_shape,
#ifdef USE_NEON
for (; channel <= depth - 16; channel += 16) {
uint8x16_t a = vld1q_u8(acc + channel);
- a = vminq_u8(a, vdupq_n_u8(output_activation_max));
- a = vmaxq_u8(a, vdupq_n_u8(output_activation_min));
+ a = vminq_u8(a, vdupq_n_u8(params.quantized_activation_max));
+ a = vmaxq_u8(a, vdupq_n_u8(params.quantized_activation_min));
vst1q_u8(output_ptr + channel, a);
}
for (; channel <= depth - 8; channel += 8) {
uint8x8_t a = vld1_u8(acc + channel);
- a = vmin_u8(a, vdup_n_u8(output_activation_max));
- a = vmax_u8(a, vdup_n_u8(output_activation_min));
+ a = vmin_u8(a, vdup_n_u8(params.quantized_activation_max));
+ a = vmax_u8(a, vdup_n_u8(params.quantized_activation_min));
vst1_u8(output_ptr + channel, a);
}
#endif
for (; channel < depth; ++channel) {
uint8 a = acc[channel];
- a = std::max<uint8>(a, output_activation_min);
- a = std::min<uint8>(a, output_activation_max);
+ a = std::max<uint8>(a, params.quantized_activation_min);
+ a = std::min<uint8>(a, params.quantized_activation_max);
output_ptr[channel] = static_cast<uint8>(a);
}
}
@@ -4091,11 +4123,9 @@ inline void MaxPool(const uint8* input_data, const RuntimeShape& input_shape,
}
}
-inline void L2Pool(const float* input_data, const RuntimeShape& input_shape,
- int stride_width, int stride_height, int pad_width,
- int pad_height, int filter_width, int filter_height,
- float output_activation_min, float output_activation_max,
- float* output_data, const RuntimeShape& output_shape) {
+inline void L2Pool(const PoolParams& params, const RuntimeShape& input_shape,
+ const float* input_data, const RuntimeShape& output_shape,
+ float* output_data) {
gemmlowp::ScopedProfilingLabel label("L2Pool");
TFLITE_DCHECK_EQ(input_shape.DimensionsCount(), 4);
TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 4);
@@ -4104,6 +4134,8 @@ inline void L2Pool(const float* input_data, const RuntimeShape& input_shape,
const int input_width = input_shape.Dims(2);
const int output_height = output_shape.Dims(1);
const int output_width = output_shape.Dims(2);
+ const int stride_height = params.stride_height;
+ const int stride_width = params.stride_width;
// Actually carry out L2 Pool. Code is written in forward mode: we go through
// the input values once, and write to all the pooled regions that it maps to.
const auto in_mat = MapAsMatrixWithLastDimAsRows(input_data, input_shape);
@@ -4118,15 +4150,17 @@ inline void L2Pool(const float* input_data, const RuntimeShape& input_shape,
for (int w = 0; w < input_width; ++w) {
// (h_start, h_end) * (w_start, w_end) is the range that the input
// vector projects to.
- const int hpad = h + pad_height;
- const int wpad = w + pad_width;
- const int h_start = (hpad < filter_height)
- ? 0
- : (hpad - filter_height) / stride_height + 1;
+ const int hpad = h + params.padding_values.height;
+ const int wpad = w + params.padding_values.width;
+ const int h_start =
+ (hpad < params.filter_height)
+ ? 0
+ : (hpad - params.filter_height) / stride_height + 1;
const int h_end = std::min(hpad / stride_height + 1, output_height);
- const int w_start = (wpad < filter_width)
- ? 0
- : (wpad - filter_width) / stride_width + 1;
+ const int w_start =
+ (wpad < params.filter_width)
+ ? 0
+ : (wpad - params.filter_width) / stride_width + 1;
const int w_end = std::min(wpad / stride_width + 1, output_width);
// pre-compute square
const int in_offset = w + input_width * (h + input_height * b);
@@ -4147,6 +4181,13 @@ inline void L2Pool(const float* input_data, const RuntimeShape& input_shape,
out_count = out_count.array().inverse();
out_mat =
(out_mat.array().rowwise() * out_count.transpose().array()).cwiseSqrt();
+
+ const int flat_size = output_shape.FlatSize();
+ for (int i = 0; i < flat_size; ++i) {
+ output_data[i] = ActivationFunctionWithMinMax(output_data[i],
+ params.float_activation_min,
+ params.float_activation_max);
+ }
}
inline void LocalResponseNormalization(const float* input_data,
diff --git a/tensorflow/contrib/lite/kernels/internal/optimized/tensor_utils_impl.h b/tensorflow/contrib/lite/kernels/internal/optimized/tensor_utils_impl.h
index f14667090f..db7926df9a 100644
--- a/tensorflow/contrib/lite/kernels/internal/optimized/tensor_utils_impl.h
+++ b/tensorflow/contrib/lite/kernels/internal/optimized/tensor_utils_impl.h
@@ -124,6 +124,12 @@ void PortableCopyVector(const float* vector, int v_size, float* result);
// Fill vector with 0.f.
void PortableZeroVector(float* vector, int v_size);
+// Multiply all elements of vector with a scalar.
+void PortableVectorScalarMultiply(const int8_t* vector, int v_size, float scale,
+ float* result);
+void NeonVectorScalarMultiply(const int8_t* vector, int v_size, float scale,
+ float* result);
+
// Limit a float input f between +abs_limit and -abs_limit.
float PortableClip(float f, float abs_limit);
diff --git a/tensorflow/contrib/lite/kernels/internal/quantization_util.h b/tensorflow/contrib/lite/kernels/internal/quantization_util.h
index 525857a2e6..9b3f1823dc 100644
--- a/tensorflow/contrib/lite/kernels/internal/quantization_util.h
+++ b/tensorflow/contrib/lite/kernels/internal/quantization_util.h
@@ -28,8 +28,9 @@ namespace tflite {
// Given the min and max values of a float array, return
// reasonable quantization parameters to use for this array.
template <typename T>
-QuantizationParams ChooseQuantizationParams(double rmin, double rmax) {
- const T qmin = std::numeric_limits<T>::min();
+QuantizationParams ChooseQuantizationParams(double rmin, double rmax,
+ bool narrow_range) {
+ const T qmin = std::numeric_limits<T>::min() + (narrow_range ? 1 : 0);
const T qmax = std::numeric_limits<T>::max();
const double qmin_double = qmin;
const double qmax_double = qmax;
@@ -97,6 +98,11 @@ QuantizationParams ChooseQuantizationParams(double rmin, double rmax) {
return quantization_params;
}
+template <typename T>
+QuantizationParams ChooseQuantizationParams(double rmin, double rmax) {
+ return ChooseQuantizationParams<T>(rmin, rmax, false);
+}
+
// Converts a floating-point number to an integer. For all inputs x where
// static_cast<IntOut>(x) is legal according to the C++ standard, the result
// is identical to that cast (i.e. the result is x with its fractional part
diff --git a/tensorflow/contrib/lite/kernels/internal/reference/legacy_reference_ops.h b/tensorflow/contrib/lite/kernels/internal/reference/legacy_reference_ops.h
index 878b2441b4..f715d34bc1 100644
--- a/tensorflow/contrib/lite/kernels/internal/reference/legacy_reference_ops.h
+++ b/tensorflow/contrib/lite/kernels/internal/reference/legacy_reference_ops.h
@@ -69,9 +69,17 @@ inline void AveragePool(const float* input_data, const Dims<4>& input_dims,
float output_activation_min,
float output_activation_max, float* output_data,
const Dims<4>& output_dims) {
- AveragePool(input_data, DimsToShape(input_dims), stride_width, stride_height,
- pad_width, pad_height, kwidth, kheight, output_activation_min,
- output_activation_max, output_data, DimsToShape(output_dims));
+ tflite::PoolParams params;
+ params.stride_height = stride_height;
+ params.stride_width = stride_width;
+ params.filter_height = kheight;
+ params.filter_width = kwidth;
+ params.padding_values.height = pad_height;
+ params.padding_values.width = pad_width;
+ params.float_activation_min = output_activation_min;
+ params.float_activation_max = output_activation_max;
+ AveragePool(params, DimsToShape(input_dims), input_data,
+ DimsToShape(output_dims), output_data);
}
// legacy, for compatibility with old checked-in code
@@ -104,10 +112,17 @@ inline void AveragePool(const uint8* input_data, const Dims<4>& input_dims,
int32 output_activation_min,
int32 output_activation_max, uint8* output_data,
const Dims<4>& output_dims) {
- AveragePool(input_data, DimsToShape(input_dims), stride_width, stride_height,
- pad_width, pad_height, filter_width, filter_height,
- output_activation_min, output_activation_max, output_data,
- DimsToShape(output_dims));
+ tflite::PoolParams params;
+ params.stride_height = stride_height;
+ params.stride_width = stride_width;
+ params.filter_height = filter_height;
+ params.filter_width = filter_width;
+ params.padding_values.height = pad_height;
+ params.padding_values.width = pad_width;
+ params.quantized_activation_min = output_activation_min;
+ params.quantized_activation_max = output_activation_max;
+ AveragePool(params, DimsToShape(input_dims), input_data,
+ DimsToShape(output_dims), output_data);
}
// legacy, for compatibility with old checked-in code
@@ -148,9 +163,17 @@ inline void MaxPool(const float* input_data, const Dims<4>& input_dims,
int pad_height, int kwidth, int kheight,
float output_activation_min, float output_activation_max,
float* output_data, const Dims<4>& output_dims) {
- MaxPool(input_data, DimsToShape(input_dims), stride_width, stride_height,
- pad_width, pad_height, kwidth, kheight, output_activation_min,
- output_activation_max, output_data, DimsToShape(output_dims));
+ tflite::PoolParams params;
+ params.stride_height = stride_height;
+ params.stride_width = stride_width;
+ params.filter_height = kheight;
+ params.filter_width = kwidth;
+ params.padding_values.height = pad_height;
+ params.padding_values.width = pad_width;
+ params.float_activation_min = output_activation_min;
+ params.float_activation_max = output_activation_max;
+ MaxPool(params, DimsToShape(input_dims), input_data, DimsToShape(output_dims),
+ output_data);
}
// legacy, for compatibility with old checked-in code
@@ -180,10 +203,17 @@ inline void MaxPool(const uint8* input_data, const Dims<4>& input_dims,
int pad_height, int filter_width, int filter_height,
int32 output_activation_min, int32 output_activation_max,
uint8* output_data, const Dims<4>& output_dims) {
- MaxPool(input_data, DimsToShape(input_dims), stride_width, stride_height,
- pad_width, pad_height, filter_width, filter_height,
- output_activation_min, output_activation_max, output_data,
- DimsToShape(output_dims));
+ PoolParams params;
+ params.stride_height = stride_height;
+ params.stride_width = stride_width;
+ params.filter_height = filter_height;
+ params.filter_width = filter_width;
+ params.padding_values.height = pad_height;
+ params.padding_values.width = pad_width;
+ params.quantized_activation_min = output_activation_min;
+ params.quantized_activation_max = output_activation_max;
+ MaxPool(params, DimsToShape(input_dims), input_data, DimsToShape(output_dims),
+ output_data);
}
// legacy, for compatibility with old checked-in code
@@ -223,10 +253,17 @@ inline void L2Pool(const float* input_data, const Dims<4>& input_dims,
int pad_height, int filter_width, int filter_height,
float output_activation_min, float output_activation_max,
float* output_data, const Dims<4>& output_dims) {
- L2Pool(input_data, DimsToShape(input_dims), stride_width, stride_height,
- pad_width, pad_height, filter_width, filter_height,
- output_activation_min, output_activation_max, output_data,
- DimsToShape(output_dims));
+ PoolParams params;
+ params.stride_height = stride_height;
+ params.stride_width = stride_width;
+ params.filter_height = filter_height;
+ params.filter_width = filter_width;
+ params.padding_values.height = pad_height;
+ params.padding_values.width = pad_width;
+ params.float_activation_min = output_activation_min;
+ params.float_activation_max = output_activation_max;
+ L2Pool(params, DimsToShape(input_dims), input_data, DimsToShape(output_dims),
+ output_data);
}
// legacy, for compatibility with old checked-in code
diff --git a/tensorflow/contrib/lite/kernels/internal/reference/portable_tensor_utils.cc b/tensorflow/contrib/lite/kernels/internal/reference/portable_tensor_utils.cc
index f8c6f341f7..7ead449ca8 100644
--- a/tensorflow/contrib/lite/kernels/internal/reference/portable_tensor_utils.cc
+++ b/tensorflow/contrib/lite/kernels/internal/reference/portable_tensor_utils.cc
@@ -51,10 +51,11 @@ void PortableSymmetricQuantizeFloats(const float* values, const int size,
*scaling_factor = 1;
return;
}
- *scaling_factor = kScale / range;
+ *scaling_factor = range / kScale;
+ const float scaling_factor_inv = 1.0f / *scaling_factor;
for (int i = 0; i < size; ++i) {
const int32_t quantized_value =
- static_cast<int32_t>(TfLiteRound(*scaling_factor * values[i]));
+ static_cast<int32_t>(TfLiteRound(values[i] * scaling_factor_inv));
// Clamp: just in case some odd numeric offset.
quantized_values[i] = std::min(kScale, std::max(-kScale, quantized_value));
}
@@ -85,7 +86,7 @@ void PortableMatrixBatchVectorMultiplyAccumulate(
float* __restrict__ result, int result_stride) {
int batch, row, col;
for (batch = 0; batch < n_batch; ++batch, vectors += m_cols) {
- const float batch_scaling_factor_inv = 1.0 / scaling_factors[batch];
+ const float batch_scaling_factor = scaling_factors[batch];
// Get the address of the first row.
const int8_t* row_ptr = matrix;
for (row = 0; row < m_rows; ++row, result += result_stride) {
@@ -98,7 +99,7 @@ void PortableMatrixBatchVectorMultiplyAccumulate(
for (col = 0; col < m_cols; ++col, ++row_ptr) {
dotprod += (*row_ptr) * (vectors[col]);
} // for col
- *result += (dotprod * batch_scaling_factor_inv);
+ *result += (dotprod * batch_scaling_factor);
} // for row
} // for batch
}
@@ -194,6 +195,13 @@ void PortableZeroVector(float* vector, int v_size) {
memset(vector, 0, v_size * sizeof(float));
}
+void PortableVectorScalarMultiply(const int8_t* vector, const int v_size,
+ const float scale, float* result) {
+ for (int v = 0; v < v_size; ++v) {
+ *result++ = scale * *vector++;
+ }
+}
+
void PortableClipVector(const float* vector, int v_size, float abs_limit,
float* result) {
for (int v = 0; v < v_size; v++) {
diff --git a/tensorflow/contrib/lite/kernels/internal/reference/portable_tensor_utils.h b/tensorflow/contrib/lite/kernels/internal/reference/portable_tensor_utils.h
index d2e1fecd25..d3a4fa8507 100644
--- a/tensorflow/contrib/lite/kernels/internal/reference/portable_tensor_utils.h
+++ b/tensorflow/contrib/lite/kernels/internal/reference/portable_tensor_utils.h
@@ -96,6 +96,10 @@ void PortableSub1Vector(const float* vector, int v_size, float* result);
// Fill vector with 0.f.
void PortableZeroVector(float* vector, int v_size);
+// Multiply all elements of vector with a scalar.
+void PortableVectorScalarMultiply(const int8_t* vector, int v_size, float scale,
+ float* result);
+
// Clip elements of a vector using a abs_limit value.
void PortableClipVector(const float* vector, int v_size, float abs_limit,
float* result);
@@ -199,6 +203,12 @@ void ZeroVector(float* vector, int v_size) {
PortableZeroVector(vector, v_size);
}
+// Multiply all elements of vector with a scalar.
+void VectorScalarMultiply(const int8_t* vector, int v_size, float scale,
+ float* result) {
+ PortableVectorScalarMultiply(vector, v_size, scale, result);
+}
+
void ClipVector(const float* vector, int v_size, float abs_limit,
float* result) {
PortableClipVector(vector, v_size, abs_limit, result);
diff --git a/tensorflow/contrib/lite/kernels/internal/reference/reference_ops.h b/tensorflow/contrib/lite/kernels/internal/reference/reference_ops.h
index 7b8a56a524..6fabb9c268 100644
--- a/tensorflow/contrib/lite/kernels/internal/reference/reference_ops.h
+++ b/tensorflow/contrib/lite/kernels/internal/reference/reference_ops.h
@@ -951,6 +951,19 @@ inline void Relu6(const float* input_data, const RuntimeShape& input_shape,
}
}
+inline void ReluX(uint8 min_value, uint8 max_value, const uint8* input_data,
+ const RuntimeShape& input_shape, uint8* output_data,
+ const RuntimeShape& output_shape) {
+ gemmlowp::ScopedProfilingLabel label("Quantized ReluX (not fused)");
+ const int flat_size = MatchingFlatSize(input_shape, output_shape);
+ for (int i = 0; i < flat_size; ++i) {
+ const uint8 val = input_data[i];
+ const uint8 clamped =
+ val > max_value ? max_value : val < min_value ? min_value : val;
+ output_data[i] = clamped;
+ }
+}
+
template <FusedActivationFunctionType Ac>
void L2Normalization(const float* input_data, const RuntimeShape& input_shape,
float* output_data, const RuntimeShape& output_shape) {
@@ -1051,10 +1064,11 @@ inline void L2Normalization(const uint8* input_data,
}
}
-inline void Add(const float* input1_data, const Dims<4>& input1_dims,
- const float* input2_data, const Dims<4>& input2_dims,
- float output_activation_min, float output_activation_max,
- float* output_data, const Dims<4>& output_dims) {
+template <typename T>
+inline void Add(const T* input1_data, const Dims<4>& input1_dims,
+ const T* input2_data, const Dims<4>& input2_dims,
+ T output_activation_min, T output_activation_max,
+ T* output_data, const Dims<4>& output_dims) {
const int flat_size = MatchingFlatSize(input1_dims, input2_dims, output_dims);
for (int i = 0; i < flat_size; ++i) {
output_data[i] = ActivationFunctionWithMinMax(
@@ -1415,10 +1429,11 @@ inline void BroadcastAddFivefold(
output_activation_max, output_data, output_dims);
}
-inline void Mul(const float* input1_data, const Dims<4>& input1_dims,
- const float* input2_data, const Dims<4>& input2_dims,
- float output_activation_min, float output_activation_max,
- float* output_data, const Dims<4>& output_dims) {
+template <typename T>
+inline void Mul(const T* input1_data, const Dims<4>& input1_dims,
+ const T* input2_data, const Dims<4>& input2_dims,
+ T output_activation_min, T output_activation_max,
+ T* output_data, const Dims<4>& output_dims) {
const int flat_size = MatchingFlatSize(input1_dims, input2_dims, output_dims);
for (int i = 0; i < flat_size; ++i) {
output_data[i] = ActivationFunctionWithMinMax(
@@ -1651,10 +1666,11 @@ inline void Div(const float* input1_data, const Dims<4>& input1_dims,
}
}
-inline void Sub(const float* input1_data, const Dims<4>& input1_dims,
- const float* input2_data, const Dims<4>& input2_dims,
- float output_activation_min, float output_activation_max,
- float* output_data, const Dims<4>& output_dims) {
+template <typename T>
+inline void Sub(const T* input1_data, const Dims<4>& input1_dims,
+ const T* input2_data, const Dims<4>& input2_dims,
+ T output_activation_min, T output_activation_max,
+ T* output_data, const Dims<4>& output_dims) {
const int flat_size = MatchingFlatSize(input1_dims, input2_dims, output_dims);
for (int i = 0; i < flat_size; ++i) {
output_data[i] = ActivationFunctionWithMinMax(
@@ -2259,13 +2275,10 @@ inline int NodeOffset(int b, int h, int w, int height, int width) {
return (b * height + h) * width + w;
}
-inline void AveragePool(const float* input_data,
- const RuntimeShape& input_shape, int stride_width,
- int stride_height, int pad_width, int pad_height,
- int filter_width, int filter_height,
- float output_activation_min,
- float output_activation_max, float* output_data,
- const RuntimeShape& output_shape) {
+inline void AveragePool(const PoolParams& params,
+ const RuntimeShape& input_shape,
+ const float* input_data,
+ const RuntimeShape& output_shape, float* output_data) {
TFLITE_DCHECK_EQ(input_shape.DimensionsCount(), 4);
TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 4);
const int batches = MatchingDim(input_shape, 0, output_shape, 0);
@@ -2274,20 +2287,24 @@ inline void AveragePool(const float* input_data,
const int input_width = input_shape.Dims(2);
const int output_height = output_shape.Dims(1);
const int output_width = output_shape.Dims(2);
+ const int stride_height = params.stride_height;
+ const int stride_width = params.stride_width;
for (int batch = 0; batch < batches; ++batch) {
for (int out_y = 0; out_y < output_height; ++out_y) {
for (int out_x = 0; out_x < output_width; ++out_x) {
for (int channel = 0; channel < depth; ++channel) {
- const int in_x_origin = (out_x * stride_width) - pad_width;
- const int in_y_origin = (out_y * stride_height) - pad_height;
+ const int in_x_origin =
+ (out_x * stride_width) - params.padding_values.width;
+ const int in_y_origin =
+ (out_y * stride_height) - params.padding_values.height;
// Compute the boundaries of the filter region clamped so as to
// ensure that the filter window fits in the input array.
const int filter_x_start = std::max(0, -in_x_origin);
const int filter_x_end =
- std::min(filter_width, input_width - in_x_origin);
+ std::min(params.filter_width, input_width - in_x_origin);
const int filter_y_start = std::max(0, -in_y_origin);
const int filter_y_end =
- std::min(filter_height, input_height - in_y_origin);
+ std::min(params.filter_height, input_height - in_y_origin);
float total = 0.f;
float filter_count = 0;
for (int filter_y = filter_y_start; filter_y < filter_y_end;
@@ -2303,22 +2320,20 @@ inline void AveragePool(const float* input_data,
}
const float average = total / filter_count;
output_data[Offset(output_shape, batch, out_y, out_x, channel)] =
- ActivationFunctionWithMinMax(average, output_activation_min,
- output_activation_max);
+ ActivationFunctionWithMinMax(average, params.float_activation_min,
+ params.float_activation_max);
}
}
}
}
}
-inline void AveragePool(const uint8* input_data,
- const RuntimeShape& input_shape, int stride_width,
- int stride_height, int pad_width, int pad_height,
- int filter_width, int filter_height,
- int32 output_activation_min,
- int32 output_activation_max, uint8* output_data,
- const RuntimeShape& output_shape) {
- TFLITE_DCHECK_LE(output_activation_min, output_activation_max);
+inline void AveragePool(const PoolParams& params,
+ const RuntimeShape& input_shape,
+ const uint8* input_data,
+ const RuntimeShape& output_shape, uint8* output_data) {
+ TFLITE_DCHECK_LE(params.quantized_activation_min,
+ params.quantized_activation_max);
TFLITE_DCHECK_EQ(input_shape.DimensionsCount(), 4);
TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 4);
const int batches = MatchingDim(input_shape, 0, output_shape, 0);
@@ -2327,20 +2342,24 @@ inline void AveragePool(const uint8* input_data,
const int input_width = input_shape.Dims(2);
const int output_height = output_shape.Dims(1);
const int output_width = output_shape.Dims(2);
+ const int stride_height = params.stride_height;
+ const int stride_width = params.stride_width;
for (int batch = 0; batch < batches; ++batch) {
for (int out_y = 0; out_y < output_height; ++out_y) {
for (int out_x = 0; out_x < output_width; ++out_x) {
for (int channel = 0; channel < depth; ++channel) {
- const int in_x_origin = (out_x * stride_width) - pad_width;
- const int in_y_origin = (out_y * stride_height) - pad_height;
+ const int in_x_origin =
+ (out_x * stride_width) - params.padding_values.width;
+ const int in_y_origin =
+ (out_y * stride_height) - params.padding_values.height;
// Compute the boundaries of the filter region clamped so as to
// ensure that the filter window fits in the input array.
const int filter_x_start = std::max(0, -in_x_origin);
const int filter_x_end =
- std::min(filter_width, input_width - in_x_origin);
+ std::min(params.filter_width, input_width - in_x_origin);
const int filter_y_start = std::max(0, -in_y_origin);
const int filter_y_end =
- std::min(filter_height, input_height - in_y_origin);
+ std::min(params.filter_height, input_height - in_y_origin);
int32 acc = 0;
int filter_count = 0;
for (int filter_y = filter_y_start; filter_y < filter_y_end;
@@ -2355,8 +2374,8 @@ inline void AveragePool(const uint8* input_data,
}
}
acc = (acc + filter_count / 2) / filter_count;
- acc = std::max(acc, output_activation_min);
- acc = std::min(acc, output_activation_max);
+ acc = std::max(acc, params.quantized_activation_min);
+ acc = std::min(acc, params.quantized_activation_max);
output_data[Offset(output_shape, batch, out_y, out_x, channel)] =
static_cast<uint8>(acc);
}
@@ -2365,11 +2384,9 @@ inline void AveragePool(const uint8* input_data,
}
}
-inline void L2Pool(const float* input_data, const RuntimeShape& input_shape,
- int stride_width, int stride_height, int pad_width,
- int pad_height, int filter_width, int filter_height,
- float output_activation_min, float output_activation_max,
- float* output_data, const RuntimeShape& output_shape) {
+inline void L2Pool(const PoolParams& params, const RuntimeShape& input_shape,
+ const float* input_data, const RuntimeShape& output_shape,
+ float* output_data) {
TFLITE_DCHECK_EQ(input_shape.DimensionsCount(), 4);
TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 4);
const int batches = MatchingDim(input_shape, 0, output_shape, 0);
@@ -2378,20 +2395,24 @@ inline void L2Pool(const float* input_data, const RuntimeShape& input_shape,
const int input_width = input_shape.Dims(2);
const int output_height = output_shape.Dims(1);
const int output_width = output_shape.Dims(2);
+ const int stride_height = params.stride_height;
+ const int stride_width = params.stride_width;
for (int batch = 0; batch < batches; ++batch) {
for (int out_y = 0; out_y < output_height; ++out_y) {
for (int out_x = 0; out_x < output_width; ++out_x) {
for (int channel = 0; channel < depth; ++channel) {
- const int in_x_origin = (out_x * stride_width) - pad_width;
- const int in_y_origin = (out_y * stride_height) - pad_height;
+ const int in_x_origin =
+ (out_x * stride_width) - params.padding_values.width;
+ const int in_y_origin =
+ (out_y * stride_height) - params.padding_values.height;
// Compute the boundaries of the filter region clamped so as to
// ensure that the filter window fits in the input array.
const int filter_x_start = std::max(0, -in_x_origin);
const int filter_x_end =
- std::min(filter_width, input_width - in_x_origin);
+ std::min(params.filter_width, input_width - in_x_origin);
const int filter_y_start = std::max(0, -in_y_origin);
const int filter_y_end =
- std::min(filter_height, input_height - in_y_origin);
+ std::min(params.filter_height, input_height - in_y_origin);
float sum_squares = 0.f;
int filter_count = 0;
for (int filter_y = filter_y_start; filter_y < filter_y_end;
@@ -2408,19 +2429,18 @@ inline void L2Pool(const float* input_data, const RuntimeShape& input_shape,
}
const float l2pool_result = std::sqrt(sum_squares / filter_count);
output_data[Offset(output_shape, batch, out_y, out_x, channel)] =
- ActivationFunctionWithMinMax(l2pool_result, output_activation_min,
- output_activation_max);
+ ActivationFunctionWithMinMax(l2pool_result,
+ params.float_activation_min,
+ params.float_activation_max);
}
}
}
}
}
-inline void MaxPool(const float* input_data, const RuntimeShape& input_shape,
- int stride_width, int stride_height, int pad_width,
- int pad_height, int filter_width, int filter_height,
- float output_activation_min, float output_activation_max,
- float* output_data, const RuntimeShape& output_shape) {
+inline void MaxPool(const PoolParams& params, const RuntimeShape& input_shape,
+ const float* input_data, const RuntimeShape& output_shape,
+ float* output_data) {
TFLITE_DCHECK_EQ(input_shape.DimensionsCount(), 4);
TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 4);
const int batches = MatchingDim(input_shape, 0, output_shape, 0);
@@ -2429,20 +2449,24 @@ inline void MaxPool(const float* input_data, const RuntimeShape& input_shape,
const int input_width = input_shape.Dims(2);
const int output_height = output_shape.Dims(1);
const int output_width = output_shape.Dims(2);
+ const int stride_height = params.stride_height;
+ const int stride_width = params.stride_width;
for (int batch = 0; batch < batches; ++batch) {
for (int out_y = 0; out_y < output_height; ++out_y) {
for (int out_x = 0; out_x < output_width; ++out_x) {
for (int channel = 0; channel < depth; ++channel) {
- const int in_x_origin = (out_x * stride_width) - pad_width;
- const int in_y_origin = (out_y * stride_height) - pad_height;
+ const int in_x_origin =
+ (out_x * stride_width) - params.padding_values.width;
+ const int in_y_origin =
+ (out_y * stride_height) - params.padding_values.height;
// Compute the boundaries of the filter region clamped so as to
// ensure that the filter window fits in the input array.
const int filter_x_start = std::max(0, -in_x_origin);
const int filter_x_end =
- std::min(filter_width, input_width - in_x_origin);
+ std::min(params.filter_width, input_width - in_x_origin);
const int filter_y_start = std::max(0, -in_y_origin);
const int filter_y_end =
- std::min(filter_height, input_height - in_y_origin);
+ std::min(params.filter_height, input_height - in_y_origin);
float max = std::numeric_limits<float>::lowest();
for (int filter_y = filter_y_start; filter_y < filter_y_end;
++filter_y) {
@@ -2456,22 +2480,21 @@ inline void MaxPool(const float* input_data, const RuntimeShape& input_shape,
}
}
output_data[Offset(output_shape, batch, out_y, out_x, channel)] =
- ActivationFunctionWithMinMax(max, output_activation_min,
- output_activation_max);
+ ActivationFunctionWithMinMax(max, params.float_activation_min,
+ params.float_activation_max);
}
}
}
}
}
-inline void MaxPool(const uint8* input_data, const RuntimeShape& input_shape,
- int stride_width, int stride_height, int pad_width,
- int pad_height, int filter_width, int filter_height,
- int32 output_activation_min, int32 output_activation_max,
- uint8* output_data, const RuntimeShape& output_shape) {
- TFLITE_DCHECK_LE(output_activation_min, output_activation_max);
- TFLITE_DCHECK_GE(output_activation_min, 0);
- TFLITE_DCHECK_LE(output_activation_max, 255);
+inline void MaxPool(const PoolParams& params, const RuntimeShape& input_shape,
+ const uint8* input_data, const RuntimeShape& output_shape,
+ uint8* output_data) {
+ TFLITE_DCHECK_LE(params.quantized_activation_min,
+ params.quantized_activation_max);
+ TFLITE_DCHECK_GE(params.quantized_activation_min, 0);
+ TFLITE_DCHECK_LE(params.quantized_activation_max, 255);
TFLITE_DCHECK_EQ(input_shape.DimensionsCount(), 4);
TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 4);
const int batches = MatchingDim(input_shape, 0, output_shape, 0);
@@ -2480,20 +2503,24 @@ inline void MaxPool(const uint8* input_data, const RuntimeShape& input_shape,
const int input_width = input_shape.Dims(2);
const int output_height = output_shape.Dims(1);
const int output_width = output_shape.Dims(2);
+ const int stride_height = params.stride_height;
+ const int stride_width = params.stride_width;
for (int batch = 0; batch < batches; ++batch) {
for (int out_y = 0; out_y < output_height; ++out_y) {
for (int out_x = 0; out_x < output_width; ++out_x) {
for (int channel = 0; channel < depth; ++channel) {
- const int in_x_origin = (out_x * stride_width) - pad_width;
- const int in_y_origin = (out_y * stride_height) - pad_height;
+ const int in_x_origin =
+ (out_x * stride_width) - params.padding_values.width;
+ const int in_y_origin =
+ (out_y * stride_height) - params.padding_values.height;
// Compute the boundaries of the filter region clamped so as to
// ensure that the filter window fits in the input array.
const int filter_x_start = std::max(0, -in_x_origin);
const int filter_x_end =
- std::min(filter_width, input_width - in_x_origin);
+ std::min(params.filter_width, input_width - in_x_origin);
const int filter_y_start = std::max(0, -in_y_origin);
const int filter_y_end =
- std::min(filter_height, input_height - in_y_origin);
+ std::min(params.filter_height, input_height - in_y_origin);
uint8 max = 0;
for (int filter_y = filter_y_start; filter_y < filter_y_end;
++filter_y) {
@@ -2506,8 +2533,8 @@ inline void MaxPool(const uint8* input_data, const RuntimeShape& input_shape,
input_data[Offset(input_shape, batch, in_y, in_x, channel)]);
}
}
- max = std::max<uint8>(max, output_activation_min);
- max = std::min<uint8>(max, output_activation_max);
+ max = std::max<uint8>(max, params.quantized_activation_min);
+ max = std::min<uint8>(max, params.quantized_activation_max);
output_data[Offset(output_shape, batch, out_y, out_x, channel)] =
static_cast<uint8>(max);
}
@@ -3342,7 +3369,7 @@ inline void Pad(const T* input_data, const Dims<4>& input_dims,
template <typename T>
inline void StridedSlice(const T* input_data, const Dims<4>& input_dims,
- int begin_mask, int end_mask,
+ int begin_mask, int end_mask, int shrink_axis_mask,
const std::vector<int>& start_indices,
const std::vector<int>& stop_indices,
const std::vector<int>& strides, T* output_data,
@@ -3354,20 +3381,24 @@ inline void StridedSlice(const T* input_data, const Dims<4>& input_dims,
TFLITE_DCHECK_EQ(strides.size(), 4);
const int start_b = strided_slice::StartForAxis(begin_mask, start_indices,
strides, input_dims.sizes, 3);
- const int stop_b = strided_slice::StopForAxis(end_mask, stop_indices, strides,
- input_dims.sizes, 3);
+ const int stop_b =
+ strided_slice::StopForAxis(end_mask, shrink_axis_mask, stop_indices,
+ strides, input_dims.sizes, 3, start_b);
const int start_h = strided_slice::StartForAxis(begin_mask, start_indices,
strides, input_dims.sizes, 2);
- const int stop_h = strided_slice::StopForAxis(end_mask, stop_indices, strides,
- input_dims.sizes, 2);
+ const int stop_h =
+ strided_slice::StopForAxis(end_mask, shrink_axis_mask, stop_indices,
+ strides, input_dims.sizes, 2, start_h);
const int start_w = strided_slice::StartForAxis(begin_mask, start_indices,
strides, input_dims.sizes, 1);
- const int stop_w = strided_slice::StopForAxis(end_mask, stop_indices, strides,
- input_dims.sizes, 1);
+ const int stop_w =
+ strided_slice::StopForAxis(end_mask, shrink_axis_mask, stop_indices,
+ strides, input_dims.sizes, 1, start_w);
const int start_d = strided_slice::StartForAxis(begin_mask, start_indices,
strides, input_dims.sizes, 0);
- const int stop_d = strided_slice::StopForAxis(end_mask, stop_indices, strides,
- input_dims.sizes, 0);
+ const int stop_d =
+ strided_slice::StopForAxis(end_mask, shrink_axis_mask, stop_indices,
+ strides, input_dims.sizes, 0, start_d);
T* out_ptr = output_data;
for (int in_b = start_b;
@@ -3437,7 +3468,8 @@ inline bool Reduce(const In* input_data, const int* input_dims,
const int* output_dims, const int input_num_dims,
const int output_num_dims, const int* axis,
const int num_axis, int* input_iter,
- Out reducer(Out current, const In in), Out* output_data) {
+ Out reducer(const Out current, const In in),
+ Out* output_data) {
// Reset input iterator.
TFLITE_DCHECK(input_num_dims > 0);
for (int idx = 0; idx < input_num_dims; ++idx) {
@@ -3455,11 +3487,12 @@ inline bool Reduce(const In* input_data, const int* input_dims,
return true;
}
-inline bool ResolveAxis(const int num_dims, const int* axis, const int num_axis,
- int* out_axis, int* out_num_axis) {
+inline bool ResolveAxis(const int num_dims, const int* axis,
+ const int64_t num_axis, int* out_axis,
+ int* out_num_axis) {
*out_num_axis = 0; // Just in case.
// o(n^2) is fine since out_num_axis should be really small, mostly <= 4
- for (int idx = 0; idx < num_axis; ++idx) {
+ for (int64_t idx = 0; idx < num_axis; ++idx) {
// Handle negative index.
int current = axis[idx] < 0 ? (axis[idx] + num_dims) : axis[idx];
TFLITE_DCHECK(current >= 0 && current < num_dims);
@@ -3485,7 +3518,7 @@ inline bool ReduceSumImpl(const In* input_data, const int* input_dims,
const int output_num_dims, const int* axis,
const int num_axis, int* input_iter,
Out* output_data) {
- auto reducer = [](Out current, const In in) -> Out {
+ auto reducer = [](const Out current, const In in) -> Out {
const Out actual_in = static_cast<Out>(in);
return current + actual_in;
};
@@ -3494,6 +3527,24 @@ inline bool ReduceSumImpl(const In* input_data, const int* input_dims,
output_data);
}
+template <typename T>
+inline bool InitTensorDataForReduce(const int* dims, const int num_dims,
+ const T init_value, T* data) {
+ size_t num_elements = 1;
+ for (int idx = 0; idx < num_dims; ++idx) {
+ size_t current = static_cast<size_t>(dims[idx]);
+ // Overflow prevention.
+ if (num_elements > std::numeric_limits<size_t>::max() / current) {
+ return false;
+ }
+ num_elements *= current;
+ }
+ for (size_t idx = 0; idx < num_elements; ++idx) {
+ data[idx] = init_value;
+ }
+ return true;
+}
+
// Computes the sum of elements across dimensions given in axis.
template <typename T>
inline bool Sum(const T* input_data, const int* input_dims,
@@ -3502,17 +3553,9 @@ inline bool Sum(const T* input_data, const int* input_dims,
const int* axis, const int num_axis_dimensions, bool keep_dims,
int* temp_index, int* resolved_axis) {
// Reset output data.
- size_t num_outputs = 1;
- for (int idx = 0; idx < output_num_dims; ++idx) {
- size_t current = static_cast<size_t>(output_dims[idx]);
- // Overflow prevention.
- if (num_outputs > std::numeric_limits<size_t>::max() / current) {
- return false;
- }
- num_outputs *= current;
- }
- for (size_t idx = 0; idx < num_outputs; ++idx) {
- output_data[idx] = T();
+ if (!InitTensorDataForReduce(output_dims, output_num_dims, static_cast<T>(0),
+ output_data)) {
+ return false;
}
// Resolve axis.
@@ -3527,6 +3570,61 @@ inline bool Sum(const T* input_data, const int* input_dims,
num_resolved_axis, temp_index, output_data);
}
+// Computes the max of elements across dimensions given in axis.
+template <typename T>
+inline bool ReduceMax(const T* input_data, const int* input_dims,
+ const int input_num_dims, T* output_data,
+ const int* output_dims, const int output_num_dims,
+ const int* axis, const int64_t num_axis_dimensions,
+ bool keep_dims, int* temp_index, int* resolved_axis) {
+ T init_value = std::numeric_limits<T>::lowest();
+ // Reset output data.
+ if (!InitTensorDataForReduce(output_dims, output_num_dims, init_value,
+ output_data)) {
+ return false;
+ }
+
+ // Resolve axis.
+ int num_resolved_axis = 0;
+ if (!ResolveAxis(input_num_dims, axis, num_axis_dimensions, resolved_axis,
+ &num_resolved_axis)) {
+ return false;
+ }
+
+ auto reducer = [](const T current, const T in) -> T {
+ return (in > current) ? in : current;
+ };
+ return Reduce<T, T>(input_data, input_dims, output_dims, input_num_dims,
+ output_num_dims, resolved_axis, num_resolved_axis,
+ temp_index, reducer, output_data);
+}
+
+// Computes the prod of elements across dimensions given in axis.
+template <typename T>
+inline bool ReduceProd(const T* input_data, const int* input_dims,
+ const int input_num_dims, T* output_data,
+ const int* output_dims, const int output_num_dims,
+ const int* axis, const int64_t num_axis_dimensions,
+ bool keep_dims, int* temp_index, int* resolved_axis) {
+ // Reset output data.
+ if (!InitTensorDataForReduce(output_dims, output_num_dims, static_cast<T>(1),
+ output_data)) {
+ return false;
+ }
+
+ // Resolve axis.
+ int num_resolved_axis = 0;
+ if (!ResolveAxis(input_num_dims, axis, num_axis_dimensions, resolved_axis,
+ &num_resolved_axis)) {
+ return false;
+ }
+
+ auto reducer = [](const T current, const T in) -> T { return in * current; };
+ return Reduce<T, T>(input_data, input_dims, output_dims, input_num_dims,
+ output_num_dims, resolved_axis, num_resolved_axis,
+ temp_index, reducer, output_data);
+}
+
// Computes the mean of elements across dimensions given in axis.
// It does so in two stages, first calculates the sum of elements along the axis
// then divides it by the number of element in axis.
@@ -3699,9 +3797,9 @@ void TensorFlowMaximumMinimum(const T* input1_data, const Dims<4>& input1_dims,
}
}
-template <typename T1, typename T2, typename T3>
-void ArgMax(const T3* axis, const T1* input_data, const Dims<4>& input_dims,
- T2* output_data, const Dims<4>& output_dims) {
+template <typename T1, typename T2, typename T3, typename Cmp>
+void ArgMinMax(const T3* axis, const T1* input_data, const Dims<4>& input_dims,
+ T2* output_data, const Dims<4>& output_dims, const Cmp& cmp) {
// The current ArgMax implemention can only determine the index of the maximum
// value in the last dimension. So the axis argument is ignored.
@@ -3714,19 +3812,28 @@ void ArgMax(const T3* axis, const T1* input_data, const Dims<4>& input_dims,
const int depth = ArraySize(input_dims, 0);
for (int i = 0; i < outer_size; ++i) {
- auto max_value = input_data[i * depth];
- int max_index = 0;
+ auto min_max_value = input_data[i * depth];
+ int min_max_index = 0;
for (int d = 1; d < depth; ++d) {
const auto& curr_value = input_data[i * depth + d];
- if (curr_value > max_value) {
- max_value = curr_value;
- max_index = d;
+ if (cmp(curr_value, min_max_value)) {
+ min_max_value = curr_value;
+ min_max_index = d;
}
}
- output_data[i] = max_index;
+ output_data[i] = min_max_index;
}
}
+// TODO(renjieliu): Remove this one.
+template <typename T1, typename T2, typename T3>
+void ArgMax(const T3* axis, const T1* input_data,
+ const tflite::Dims<4>& input_dims, T2* output_data,
+ const tflite::Dims<4>& output_dims) {
+ ArgMinMax(axis, input_data, input_dims, output_data, output_dims,
+ std::greater<T1>());
+}
+
template <typename T>
void Transpose(const T* input, const Dims<4>& input_dims, T* output,
const Dims<4>& output_dims, const int* permuted_axes) {
@@ -4069,6 +4176,36 @@ inline void SparseToDense(const std::vector<std::vector<I>>& indices,
}
}
+template <typename T>
+inline void Pow(const T* input1_data, const Dims<4>& input1_dims,
+ const T* input2_data, const Dims<4>& input2_dims,
+ T* output_data, const Dims<4>& output_dims) {
+ const int flat_size = MatchingFlatSize(input1_dims, input2_dims, output_dims);
+ for (int i = 0; i < flat_size; ++i) {
+ output_data[i] = std::pow(input1_data[i], input2_data[i]);
+ }
+}
+
+template <typename T>
+inline void BroadcastPow(const T* input1_data, const Dims<4>& input1_dims,
+ const T* input2_data, const Dims<4>& input2_dims,
+ T* output_data, const Dims<4>& output_dims) {
+ NdArrayDesc<4> desc1;
+ NdArrayDesc<4> desc2;
+ NdArrayDescsForElementwiseBroadcast(input1_dims, input2_dims, &desc1, &desc2);
+ for (int b = 0; b < ArraySize(output_dims, 3); ++b) {
+ for (int y = 0; y < ArraySize(output_dims, 2); ++y) {
+ for (int x = 0; x < ArraySize(output_dims, 1); ++x) {
+ for (int c = 0; c < ArraySize(output_dims, 0); ++c) {
+ output_data[Offset(output_dims, c, x, y, b)] =
+ std::pow(input1_data[SubscriptToIndex(desc1, c, x, y, b)],
+ input2_data[SubscriptToIndex(desc2, c, x, y, b)]);
+ }
+ }
+ }
+ }
+}
+
} // namespace reference_ops
} // namespace tflite
diff --git a/tensorflow/contrib/lite/kernels/internal/strided_slice_logic.h b/tensorflow/contrib/lite/kernels/internal/strided_slice_logic.h
index ef77371bf6..5994fad5c7 100644
--- a/tensorflow/contrib/lite/kernels/internal/strided_slice_logic.h
+++ b/tensorflow/contrib/lite/kernels/internal/strided_slice_logic.h
@@ -74,12 +74,22 @@ inline int StartForAxis(int begin_mask,
// size 4, this function would return 4 as the stop, because it is one past the
// "real" indices of 0, 1, 2 & 3.
template <typename IntType>
-inline int StopForAxis(int end_mask, std::vector<IntType> const& stop_indices,
+inline int StopForAxis(int end_mask, int shrink_axis_mask,
+ std::vector<IntType> const& stop_indices,
std::vector<IntType> const& strides,
- int const* input_shape, int axis) {
+ int const* input_shape, int axis, int start_for_axis) {
// Begin with the specified index
+ const bool shrink_axis = shrink_axis_mask & (1 << axis);
int stop = stop_indices[axis];
+ // When shrinking an axis, the end position does not matter (and can be
+ // incorrect when negative indexing is used, see Issue #19260). Always use
+ // start_for_axis + 1 to generate a length 1 slice, since start_for_axis has
+ // already been adjusted for negative indices.
+ if (shrink_axis) {
+ stop = start_for_axis + 1;
+ }
+
// end_mask override
if (end_mask & (1 << axis)) {
if (strides[axis] > 0) {
@@ -93,7 +103,7 @@ inline int StopForAxis(int end_mask, std::vector<IntType> const& stop_indices,
}
// Handle negative indices
- int axis_size = input_shape[axis];
+ const int axis_size = input_shape[axis];
if (stop < 0) {
stop += axis_size;
}
diff --git a/tensorflow/contrib/lite/kernels/internal/tensor.h b/tensorflow/contrib/lite/kernels/internal/tensor.h
index 518bee1c63..ee2af5b460 100644
--- a/tensorflow/contrib/lite/kernels/internal/tensor.h
+++ b/tensorflow/contrib/lite/kernels/internal/tensor.h
@@ -15,6 +15,7 @@ limitations under the License.
#ifndef TENSORFLOW_CONTRIB_LITE_KERNELS_INTERNAL_TENSOR_H_
#define TENSORFLOW_CONTRIB_LITE_KERNELS_INTERNAL_TENSOR_H_
+#include <complex>
#include <vector>
#include "tensorflow/contrib/lite/context.h"
#include "tensorflow/contrib/lite/kernels/internal/types.h"
@@ -54,6 +55,13 @@ inline bool* GetTensorData(TfLiteTensor* tensor) {
return tensor != nullptr ? tensor->data.b : nullptr;
}
+template <>
+inline std::complex<float>* GetTensorData(TfLiteTensor* tensor) {
+ return tensor != nullptr
+ ? reinterpret_cast<std::complex<float>*>(tensor->data.c64)
+ : nullptr;
+}
+
template <typename T>
inline const T* GetTensorData(const TfLiteTensor* tensor);
@@ -87,6 +95,13 @@ inline const bool* GetTensorData(const TfLiteTensor* tensor) {
return tensor != nullptr ? tensor->data.b : nullptr;
}
+template <>
+inline const std::complex<float>* GetTensorData(const TfLiteTensor* tensor) {
+ return tensor != nullptr
+ ? reinterpret_cast<const std::complex<float>*>(tensor->data.c64)
+ : nullptr;
+}
+
inline int RemapDim(int max_dimensions, int d) {
return max_dimensions - d - 1;
}
diff --git a/tensorflow/contrib/lite/kernels/internal/tensor_utils.h b/tensorflow/contrib/lite/kernels/internal/tensor_utils.h
index 5160e22307..82f4503127 100644
--- a/tensorflow/contrib/lite/kernels/internal/tensor_utils.h
+++ b/tensorflow/contrib/lite/kernels/internal/tensor_utils.h
@@ -124,6 +124,10 @@ void Sub1Vector(const float* vector, int v_size, float* result);
// Fill vector with 0.f.
void ZeroVector(float* vector, int v_size);
+// Multiply all elements of vector with a scalar.
+void VectorScalarMultiply(const int8_t* vector, int v_size, float scale,
+ float* result);
+
// Clip elements of a vector using a abs_limit value.
void ClipVector(const float* vector, int v_size, float abs_limit,
float* result);
diff --git a/tensorflow/contrib/lite/kernels/internal/tensor_utils_test.cc b/tensorflow/contrib/lite/kernels/internal/tensor_utils_test.cc
index 14ee528394..372a6efec5 100644
--- a/tensorflow/contrib/lite/kernels/internal/tensor_utils_test.cc
+++ b/tensorflow/contrib/lite/kernels/internal/tensor_utils_test.cc
@@ -32,6 +32,22 @@ TEST(uKernels, ClipTest) {
{0.0, -0.5, 1.0, -1.5, 2.0, -2.0, 2.0, -2.0, 2.0, -2.0})));
}
+TEST(uKernels, VectorScalarMultiply) {
+ constexpr int kVectorSize = 29;
+ static int8_t input[kVectorSize];
+ for (int i = 0; i < 29; ++i) {
+ input[i] = static_cast<int8_t>(i - 14);
+ }
+ const float scale = 0.1f;
+ std::vector<float> output(kVectorSize, 0.0f);
+ VectorScalarMultiply(input, kVectorSize, scale, output.data());
+ EXPECT_THAT(output,
+ ElementsAreArray(ArrayFloatNear(
+ {-1.4, -1.3, -1.2, -1.1, -1.0, -0.9, -0.8, -0.7, -0.6, -0.5,
+ -0.4, -0.3, -0.2, -0.1, 0, 0.1, 0.2, 0.3, 0.4, 0.5,
+ 0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3, 1.4})));
+}
+
TEST(uKernels, IsZeroTest) {
constexpr int kVectorSize = 21;
static float zeros[kVectorSize] = {0.0};
@@ -63,7 +79,8 @@ TEST(uKernels, SymmetricQuantizeFloatsTest) {
EXPECT_EQ(min, -640);
EXPECT_EQ(max, 1000);
- EXPECT_NEAR(scaling_factor, 0.127, 1e-6); // EQ won't work due to fpoint.
+ // EQ won't work due to fpoint.
+ EXPECT_NEAR(scaling_factor, 1000 / 127.0, 1e-6);
EXPECT_THAT(output,
testing::ElementsAreArray({-81, -81, -80, 1, 0, -1, -1, 0, 127}));
}
@@ -95,7 +112,7 @@ TEST(uKernels, SymmetricQuantizeFloatsAllAlmostZeroTest) {
EXPECT_NEAR(min, -9e-05, 1e-6);
EXPECT_NEAR(max, 0.0002, 1e-6);
- EXPECT_EQ(scaling_factor, 635000);
+ EXPECT_NEAR(scaling_factor, 1.57e-6, 1e-6);
EXPECT_THAT(output,
testing::ElementsAreArray({-6, 19, -4, -57, 1, 25, 6, 127, 0}));
}
diff --git a/tensorflow/contrib/lite/kernels/internal/types.h b/tensorflow/contrib/lite/kernels/internal/types.h
index fa2420713f..737cfb69c9 100644
--- a/tensorflow/contrib/lite/kernels/internal/types.h
+++ b/tensorflow/contrib/lite/kernels/internal/types.h
@@ -23,7 +23,12 @@ limitations under the License.
namespace tflite {
enum class FusedActivationFunctionType : uint8 { kNone, kRelu6, kRelu1, kRelu };
-enum class PaddingType { kNone, kSame, kValid };
+enum class PaddingType : uint8 { kNone, kSame, kValid };
+
+struct PaddingValues {
+ int8 width;
+ int8 height;
+};
// This enumeration allows for non-default formats for the weights array
// of a fully-connected operator, allowing the use of special optimized
@@ -588,6 +593,22 @@ void ComputeStrides(Dims<N>* dims) {
}
}
+struct PoolParams {
+ FusedActivationFunctionType activation;
+ PaddingType padding_type;
+ PaddingValues padding_values;
+ int stride_height;
+ int stride_width;
+ int filter_height;
+ int filter_width;
+ // uint8, etc, inference params.
+ int32 quantized_activation_min;
+ int32 quantized_activation_max;
+ // float inference params.
+ float float_activation_min;
+ float float_activation_max;
+};
+
} // namespace tflite
#endif // TENSORFLOW_CONTRIB_LITE_KERNELS_INTERNAL_TYPES_H_
diff --git a/tensorflow/contrib/lite/kernels/kernel_util.cc b/tensorflow/contrib/lite/kernels/kernel_util.cc
index fdf9856912..08f942c933 100644
--- a/tensorflow/contrib/lite/kernels/kernel_util.cc
+++ b/tensorflow/contrib/lite/kernels/kernel_util.cc
@@ -103,24 +103,6 @@ void CalculateActivationRangeUint8(TfLiteFusedActivation activation,
act_max);
}
-void CalculateActivationRangeFloat(TfLiteFusedActivation activation,
- float* activation_min,
- float* activation_max) {
- if (activation == kTfLiteActRelu) {
- *activation_min = 0.f;
- *activation_max = std::numeric_limits<float>::max();
- } else if (activation == kTfLiteActRelu6) {
- *activation_min = 0.f;
- *activation_max = 6.f;
- } else if (activation == kTfLiteActRelu1) {
- *activation_min = -1.f;
- *activation_max = 1.f;
- } else {
- *activation_min = std::numeric_limits<float>::lowest();
- *activation_max = std::numeric_limits<float>::max();
- }
-}
-
bool HaveSameShapes(const TfLiteTensor* input1, const TfLiteTensor* input2) {
return TfLiteIntArrayEqual(input1->dims, input2->dims);
}
diff --git a/tensorflow/contrib/lite/kernels/kernel_util.h b/tensorflow/contrib/lite/kernels/kernel_util.h
index 20058a5f69..c8ce3c917d 100644
--- a/tensorflow/contrib/lite/kernels/kernel_util.h
+++ b/tensorflow/contrib/lite/kernels/kernel_util.h
@@ -15,6 +15,8 @@ limitations under the License.
#ifndef TENSORFLOW_CONTRIB_LITE_KERNELS_KERNEL_UTIL_H_
#define TENSORFLOW_CONTRIB_LITE_KERNELS_KERNEL_UTIL_H_
+#include <algorithm>
+
#include "tensorflow/contrib/lite/builtin_op_data.h"
#include "tensorflow/contrib/lite/context.h"
@@ -86,8 +88,8 @@ TfLiteStatus GetQuantizedConvolutionMultipler(TfLiteContext* context,
TfLiteTensor* output,
double* multiplier);
-// Calculates the useful range of an activation layer given its activation
-// tensor.
+// Calculates the useful quantized range of an activation layer given its
+// activation tensor.
TfLiteStatus CalculateActivationRangeQuantized(TfLiteContext* context,
TfLiteFusedActivation activation,
TfLiteTensor* output,
@@ -96,9 +98,25 @@ TfLiteStatus CalculateActivationRangeQuantized(TfLiteContext* context,
void CalculateActivationRangeUint8(TfLiteFusedActivation activation,
TfLiteTensor* output, int32_t* act_min,
int32_t* act_max);
-void CalculateActivationRangeFloat(TfLiteFusedActivation activation,
- float* activation_min,
- float* activation_max);
+// Calculates the useful range of an activation layer given its activation
+// tensor.a
+template <typename T>
+void CalculateActivationRange(TfLiteFusedActivation activation,
+ T* activation_min, T* activation_max) {
+ if (activation == kTfLiteActRelu) {
+ *activation_min = 0;
+ *activation_max = std::numeric_limits<T>::max();
+ } else if (activation == kTfLiteActRelu6) {
+ *activation_min = 0;
+ *activation_max = 6;
+ } else if (activation == kTfLiteActRelu1) {
+ *activation_min = -1;
+ *activation_max = 1;
+ } else {
+ *activation_min = std::numeric_limits<T>::lowest();
+ *activation_max = std::numeric_limits<T>::max();
+ }
+}
// Return true if the given tensors have the same shape.
bool HaveSameShapes(const TfLiteTensor* input1, const TfLiteTensor* input2);
diff --git a/tensorflow/contrib/lite/kernels/lstm.cc b/tensorflow/contrib/lite/kernels/lstm.cc
index 3577ae6caa..4dfc891548 100644
--- a/tensorflow/contrib/lite/kernels/lstm.cc
+++ b/tensorflow/contrib/lite/kernels/lstm.cc
@@ -306,7 +306,8 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
const int n_output = recurrent_to_output_weights->dims->data[1];
// Check that input tensor dimensions matches with each other.
- CheckInputTensorDimensions(context, node, n_input, n_output, n_cell);
+ TF_LITE_ENSURE_OK(context, CheckInputTensorDimensions(context, node, n_input,
+ n_output, n_cell));
// Get the pointer to output, activation_state and cell_state tensors.
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
diff --git a/tensorflow/contrib/lite/kernels/lstm_test.cc b/tensorflow/contrib/lite/kernels/lstm_test.cc
index 3f5c44a63e..0266f5fe57 100644
--- a/tensorflow/contrib/lite/kernels/lstm_test.cc
+++ b/tensorflow/contrib/lite/kernels/lstm_test.cc
@@ -13,6 +13,9 @@ See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
// Unit test for TFLite LSTM op.
+//
+// TODO(alanchiao): add unit test with invalid input dimensions for this and its
+// variants.
#include <memory>
#include <vector>
@@ -360,14 +363,6 @@ class BaseLstmTest : public ::testing::Test {
}
EXPECT_THAT(lstm->GetOutput(),
ElementsAreArray(ArrayFloatNear(expected, tolerance)));
- for (int i = 0; i < num_outputs; ++i) {
- std::cout << lstm->GetOutput()[i] << ", ";
- }
- std::cout << std::endl;
- for (int i = 0; i < num_outputs; ++i) {
- std::cout << expected[i] << ", ";
- }
- std::cout << std::endl;
}
}
};
diff --git a/tensorflow/contrib/lite/kernels/mul.cc b/tensorflow/contrib/lite/kernels/mul.cc
index 9e01b73c49..349f3e6726 100644
--- a/tensorflow/contrib/lite/kernels/mul.cc
+++ b/tensorflow/contrib/lite/kernels/mul.cc
@@ -100,29 +100,44 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
}
template <KernelType kernel_type>
-void EvalFloat(TfLiteContext* context, TfLiteNode* node,
- TfLiteMulParams* params, const OpData* data,
- const TfLiteTensor* input1, const TfLiteTensor* input2,
- TfLiteTensor* output) {
- float output_activation_min, output_activation_max;
- CalculateActivationRangeFloat(params->activation, &output_activation_min,
- &output_activation_max);
-#define TF_LITE_MUL(type, opname) \
- type::opname(GetTensorData<float>(input1), GetTensorDims(input1), \
- GetTensorData<float>(input2), GetTensorDims(input2), \
- output_activation_min, output_activation_max, \
- GetTensorData<float>(output), GetTensorDims(output))
- if (kernel_type == kReference) {
- if (data->requires_broadcast) {
- TF_LITE_MUL(reference_ops, BroadcastMul);
+void EvalMul(TfLiteContext* context, TfLiteNode* node, TfLiteMulParams* params,
+ const OpData* data, const TfLiteTensor* input1,
+ const TfLiteTensor* input2, TfLiteTensor* output) {
+#define TF_LITE_MUL(type, opname, data_type) \
+ data_type output_activation_min, output_activation_max; \
+ CalculateActivationRange(params->activation, &output_activation_min, \
+ &output_activation_max); \
+ type::opname(GetTensorData<data_type>(input1), GetTensorDims(input1), \
+ GetTensorData<data_type>(input2), GetTensorDims(input2), \
+ output_activation_min, output_activation_max, \
+ GetTensorData<data_type>(output), GetTensorDims(output))
+ if (output->type == kTfLiteInt32) {
+ if (kernel_type == kReference) {
+ if (data->requires_broadcast) {
+ TF_LITE_MUL(reference_ops, BroadcastMul, int32_t);
+ } else {
+ TF_LITE_MUL(reference_ops, Mul, int32_t);
+ }
} else {
- TF_LITE_MUL(reference_ops, Mul);
+ if (data->requires_broadcast) {
+ TF_LITE_MUL(optimized_ops, BroadcastMul, int32_t);
+ } else {
+ TF_LITE_MUL(optimized_ops, Mul, int32_t);
+ }
}
- } else {
- if (data->requires_broadcast) {
- TF_LITE_MUL(optimized_ops, BroadcastMul);
+ } else if (output->type == kTfLiteFloat32) {
+ if (kernel_type == kReference) {
+ if (data->requires_broadcast) {
+ TF_LITE_MUL(reference_ops, BroadcastMul, float);
+ } else {
+ TF_LITE_MUL(reference_ops, Mul, float);
+ }
} else {
- TF_LITE_MUL(optimized_ops, Mul);
+ if (data->requires_broadcast) {
+ TF_LITE_MUL(optimized_ops, BroadcastMul, float);
+ } else {
+ TF_LITE_MUL(optimized_ops, Mul, float);
+ }
}
}
#undef TF_LITE_MUL
@@ -194,17 +209,17 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* input2 = GetInput(context, node, kInputTensor2);
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
- if (output->type == kTfLiteFloat32) {
- EvalFloat<kernel_type>(context, node, params, data, input1, input2, output);
+ if (output->type == kTfLiteFloat32 || output->type == kTfLiteInt32) {
+ EvalMul<kernel_type>(context, node, params, data, input1, input2, output);
} else if (output->type == kTfLiteUInt8 || output->type == kTfLiteInt16) {
TF_LITE_ENSURE_OK(
context, EvalQuantized<kernel_type>(context, node, params, data, input1,
input2, output));
} else {
- context->ReportError(
- context,
- "Mul only supports FLOAT32 and quantized UINT8 and INT16 now, got %d.",
- output->type);
+ context->ReportError(context,
+ "Mul only supports FLOAT32, INT32 and quantized UINT8 "
+ "and INT16 now, got %d.",
+ output->type);
return kTfLiteError;
}
diff --git a/tensorflow/contrib/lite/kernels/mul_test.cc b/tensorflow/contrib/lite/kernels/mul_test.cc
index 43d56e50d2..2807550a6b 100644
--- a/tensorflow/contrib/lite/kernels/mul_test.cc
+++ b/tensorflow/contrib/lite/kernels/mul_test.cc
@@ -52,6 +52,13 @@ class FloatMulOpModel : public BaseMulOpModel {
std::vector<float> GetOutput() { return ExtractVector<float>(output_); }
};
+class IntegerMulOpModel : public BaseMulOpModel {
+ public:
+ using BaseMulOpModel::BaseMulOpModel;
+
+ std::vector<int32_t> GetOutput() { return ExtractVector<int32_t>(output_); }
+};
+
// For quantized Mul, the error shouldn't exceed (2*step + step^2).
// The param min=-1.0 & max=1.0 is used in the following tests.
// The tolerance value is ~0.0157.
@@ -133,6 +140,57 @@ TEST(FloatMulOpTest, WithBroadcast) {
}
}
+TEST(IntegerMulOpTest, NoActivation) {
+ IntegerMulOpModel m({TensorType_INT32, {1, 2, 2, 1}},
+ {TensorType_INT32, {1, 2, 2, 1}}, {TensorType_INT32, {}},
+ ActivationFunctionType_NONE);
+ m.PopulateTensor<int32_t>(m.input1(), {-20, 2, 7, 8});
+ m.PopulateTensor<int32_t>(m.input2(), {1, 2, 3, 5});
+ m.Invoke();
+ EXPECT_THAT(m.GetOutput(), ElementsAreArray({-20, 4, 21, 40}));
+}
+
+TEST(IntegerMulOpTest, ActivationRELU_N1_TO_1) {
+ IntegerMulOpModel m({TensorType_INT32, {1, 2, 2, 1}},
+ {TensorType_INT32, {1, 2, 2, 1}}, {TensorType_INT32, {}},
+ ActivationFunctionType_RELU_N1_TO_1);
+ m.PopulateTensor<int32_t>(m.input1(), {-20, 2, 7, 8});
+ m.PopulateTensor<int32_t>(m.input2(), {1, 2, 3, 5});
+ m.Invoke();
+ EXPECT_THAT(m.GetOutput(), ElementsAreArray({-1, 1, 1, 1}));
+}
+
+TEST(IntegerMulOpTest, VariousInputShapes) {
+ std::vector<std::initializer_list<int>> test_shapes = {
+ {6}, {2, 3}, {2, 1, 3}, {1, 3, 1, 2}};
+ for (int i = 0; i < test_shapes.size(); ++i) {
+ IntegerMulOpModel m({TensorType_INT32, test_shapes[i]},
+ {TensorType_INT32, test_shapes[i]},
+ {TensorType_INT32, {}}, ActivationFunctionType_NONE);
+ m.PopulateTensor<int32_t>(m.input1(), {-20, 2, 7, 8, 11, 20});
+ m.PopulateTensor<int32_t>(m.input2(), {1, 2, 3, 5, 11, 1});
+ m.Invoke();
+ EXPECT_THAT(m.GetOutput(), ElementsAreArray({-20, 4, 21, 40, 121, 20}))
+ << "With shape number " << i;
+ }
+}
+
+TEST(IntegerMulOpTest, WithBroadcast) {
+ std::vector<std::initializer_list<int>> test_shapes = {
+ {6}, {2, 3}, {2, 1, 3}, {1, 3, 1, 2}};
+ for (int i = 0; i < test_shapes.size(); ++i) {
+ IntegerMulOpModel m({TensorType_INT32, test_shapes[i]},
+ {TensorType_INT32, {}}, // always a scalar
+ {TensorType_INT32, {}}, ActivationFunctionType_NONE);
+ m.PopulateTensor<int32_t>(m.input1(), {-20, 2, 7, 8, 11, 20});
+ m.PopulateTensor<int32_t>(m.input2(), {1});
+ m.Invoke();
+ EXPECT_THAT(m.GetOutput(),
+ ElementsAreArray(ArrayFloatNear({-20, 2, 7, 8, 11, 20})))
+ << "With shape number " << i;
+ }
+}
+
TEST(QuantizedMulOpTest, NoActivation) {
QuantizedMulOpModel m({TensorType_UINT8, {1, 2, 2, 1}, -1.0, 1.0},
{TensorType_UINT8, {1, 2, 2, 1}, -1.0, 1.0},
diff --git a/tensorflow/contrib/lite/kernels/pooling.cc b/tensorflow/contrib/lite/kernels/pooling.cc
index 58d74c97a7..9b0487ae16 100644
--- a/tensorflow/contrib/lite/kernels/pooling.cc
+++ b/tensorflow/contrib/lite/kernels/pooling.cc
@@ -124,15 +124,21 @@ void AverageEvalFloat(TfLiteContext* context, TfLiteNode* node,
TfLitePoolParams* params, OpData* data,
const TfLiteTensor* input, TfLiteTensor* output) {
float activation_min, activation_max;
- CalculateActivationRangeFloat(params->activation, &activation_min,
- &activation_max);
-#define TF_LITE_AVERAGE_POOL(type) \
- type::AveragePool(GetTensorData<float>(input), GetTensorShape(input), \
- params->stride_width, params->stride_height, \
- data->padding.width, data->padding.height, \
- params->filter_width, params->filter_height, \
- activation_min, activation_max, \
- GetTensorData<float>(output), GetTensorShape(output))
+ CalculateActivationRange(params->activation, &activation_min,
+ &activation_max);
+#define TF_LITE_AVERAGE_POOL(type) \
+ tflite::PoolParams op_params; \
+ op_params.stride_height = params->stride_height; \
+ op_params.stride_width = params->stride_width; \
+ op_params.filter_height = params->filter_height; \
+ op_params.filter_width = params->filter_width; \
+ op_params.padding_values.height = data->padding.height; \
+ op_params.padding_values.width = data->padding.width; \
+ op_params.float_activation_min = activation_min; \
+ op_params.float_activation_max = activation_max; \
+ type::AveragePool(op_params, GetTensorShape(input), \
+ GetTensorData<float>(input), GetTensorShape(output), \
+ GetTensorData<float>(output))
if (kernel_type == kReference) {
TF_LITE_AVERAGE_POOL(reference_ops);
} else {
@@ -149,13 +155,19 @@ void AverageEvalQuantized(TfLiteContext* context, TfLiteNode* node,
int32_t activation_max;
CalculateActivationRangeUint8(params->activation, output, &activation_min,
&activation_max);
-#define TF_LITE_AVERAGE_POOL(type) \
- type::AveragePool(GetTensorData<uint8_t>(input), GetTensorShape(input), \
- params->stride_width, params->stride_height, \
- data->padding.width, data->padding.height, \
- params->filter_width, params->filter_height, \
- activation_min, activation_max, \
- GetTensorData<uint8_t>(output), GetTensorShape(output))
+#define TF_LITE_AVERAGE_POOL(type) \
+ tflite::PoolParams op_params; \
+ op_params.stride_height = params->stride_height; \
+ op_params.stride_width = params->stride_width; \
+ op_params.filter_height = params->filter_height; \
+ op_params.filter_width = params->filter_width; \
+ op_params.padding_values.height = data->padding.height; \
+ op_params.padding_values.width = data->padding.width; \
+ op_params.quantized_activation_min = activation_min; \
+ op_params.quantized_activation_max = activation_max; \
+ type::AveragePool(op_params, GetTensorShape(input), \
+ GetTensorData<uint8_t>(input), GetTensorShape(output), \
+ GetTensorData<uint8_t>(output))
if (kernel_type == kReference) {
TF_LITE_AVERAGE_POOL(reference_ops);
} else {
@@ -169,15 +181,20 @@ void MaxEvalFloat(TfLiteContext* context, TfLiteNode* node,
TfLitePoolParams* params, OpData* data,
const TfLiteTensor* input, TfLiteTensor* output) {
float activation_min, activation_max;
- CalculateActivationRangeFloat(params->activation, &activation_min,
- &activation_max);
-#define TF_LITE_MAX_POOL(type) \
- type::MaxPool(GetTensorData<float>(input), GetTensorShape(input), \
- params->stride_width, params->stride_height, \
- data->padding.width, data->padding.height, \
- params->filter_width, params->filter_height, activation_min, \
- activation_max, GetTensorData<float>(output), \
- GetTensorShape(output))
+ CalculateActivationRange(params->activation, &activation_min,
+ &activation_max);
+#define TF_LITE_MAX_POOL(type) \
+ tflite::PoolParams op_params; \
+ op_params.stride_height = params->stride_height; \
+ op_params.stride_width = params->stride_width; \
+ op_params.filter_height = params->filter_height; \
+ op_params.filter_width = params->filter_width; \
+ op_params.padding_values.height = data->padding.height; \
+ op_params.padding_values.width = data->padding.width; \
+ op_params.float_activation_min = activation_min; \
+ op_params.float_activation_max = activation_max; \
+ type::MaxPool(op_params, GetTensorShape(input), GetTensorData<float>(input), \
+ GetTensorShape(output), GetTensorData<float>(output))
if (kernel_type == kReference) {
TF_LITE_MAX_POOL(reference_ops);
} else {
@@ -194,13 +211,19 @@ void MaxEvalQuantized(TfLiteContext* context, TfLiteNode* node,
int32_t activation_max;
CalculateActivationRangeUint8(params->activation, output, &activation_min,
&activation_max);
-#define TF_LITE_MAX_POOL(type) \
- type::MaxPool(GetTensorData<uint8_t>(input), GetTensorShape(input), \
- params->stride_width, params->stride_height, \
- data->padding.width, data->padding.height, \
- params->filter_width, params->filter_height, activation_min, \
- activation_max, GetTensorData<uint8_t>(output), \
- GetTensorShape(output))
+#define TF_LITE_MAX_POOL(type) \
+ tflite::PoolParams op_params; \
+ op_params.stride_height = params->stride_height; \
+ op_params.stride_width = params->stride_width; \
+ op_params.filter_height = params->filter_height; \
+ op_params.filter_width = params->filter_width; \
+ op_params.padding_values.height = data->padding.height; \
+ op_params.padding_values.width = data->padding.width; \
+ op_params.quantized_activation_min = activation_min; \
+ op_params.quantized_activation_max = activation_max; \
+ type::MaxPool(op_params, GetTensorShape(input), \
+ GetTensorData<uint8_t>(input), GetTensorShape(output), \
+ GetTensorData<uint8_t>(output))
if (kernel_type == kReference) {
TF_LITE_MAX_POOL(reference_ops);
} else {
@@ -214,15 +237,20 @@ void L2EvalFloat(TfLiteContext* context, TfLiteNode* node,
TfLitePoolParams* params, OpData* data,
const TfLiteTensor* input, TfLiteTensor* output) {
float activation_min, activation_max;
- CalculateActivationRangeFloat(params->activation, &activation_min,
- &activation_max);
-#define TF_LITE_L2_POOL(type) \
- type::L2Pool(GetTensorData<float>(input), GetTensorShape(input), \
- params->stride_width, params->stride_height, \
- data->padding.width, data->padding.height, \
- params->filter_width, params->filter_height, activation_min, \
- activation_max, GetTensorData<float>(output), \
- GetTensorShape(output))
+ CalculateActivationRange(params->activation, &activation_min,
+ &activation_max);
+#define TF_LITE_L2_POOL(type) \
+ tflite::PoolParams op_params; \
+ op_params.stride_height = params->stride_height; \
+ op_params.stride_width = params->stride_width; \
+ op_params.filter_height = params->filter_height; \
+ op_params.filter_width = params->filter_width; \
+ op_params.padding_values.height = data->padding.height; \
+ op_params.padding_values.width = data->padding.width; \
+ op_params.float_activation_min = activation_min; \
+ op_params.float_activation_max = activation_max; \
+ type::L2Pool(op_params, GetTensorShape(input), GetTensorData<float>(input), \
+ GetTensorShape(output), GetTensorData<float>(output))
if (kernel_type == kReference) {
TF_LITE_L2_POOL(reference_ops);
} else {
diff --git a/tensorflow/contrib/lite/kernels/pow.cc b/tensorflow/contrib/lite/kernels/pow.cc
new file mode 100644
index 0000000000..4a539c47a8
--- /dev/null
+++ b/tensorflow/contrib/lite/kernels/pow.cc
@@ -0,0 +1,143 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+#include "tensorflow/contrib/lite/context.h"
+#include "tensorflow/contrib/lite/kernels/internal/reference/reference_ops.h"
+#include "tensorflow/contrib/lite/kernels/internal/tensor.h"
+#include "tensorflow/contrib/lite/kernels/kernel_util.h"
+#include "tensorflow/contrib/lite/kernels/op_macros.h"
+
+namespace tflite {
+namespace ops {
+namespace builtin {
+namespace pow {
+namespace {
+
+// Input/output tensor index.
+constexpr int kInputTensor1 = 0;
+constexpr int kInputTensor2 = 1;
+constexpr int kOutputTensor = 0;
+
+// Op data for pow op.
+struct OpData {
+ bool requires_broadcast;
+};
+
+void* Init(TfLiteContext* context, const char* buffer, size_t length) {
+ auto* data = new OpData;
+ data->requires_broadcast = false;
+ return data;
+}
+
+void Free(TfLiteContext* context, void* buffer) {
+ delete reinterpret_cast<OpData*>(buffer);
+}
+
+TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
+ TF_LITE_ENSURE_EQ(context, NumInputs(node), 2);
+ TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
+
+ OpData* data = reinterpret_cast<OpData*>(node->user_data);
+
+ const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1);
+ const TfLiteTensor* input2 = GetInput(context, node, kInputTensor2);
+ TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
+
+ TF_LITE_ENSURE_EQ(context, input1->type, input2->type);
+
+ const TfLiteType type = input1->type;
+ if (type != kTfLiteInt32 && type != kTfLiteFloat32) {
+ context->ReportError(context, "Unsupported data type %d.", type);
+ return kTfLiteError;
+ }
+ output->type = type;
+
+ data->requires_broadcast = !HaveSameShapes(input1, input2);
+
+ TfLiteIntArray* output_size = nullptr;
+ if (data->requires_broadcast) {
+ TF_LITE_ENSURE_OK(context, CalculateShapeForBroadcast(
+ context, input1, input2, &output_size));
+ } else {
+ output_size = TfLiteIntArrayCopy(input1->dims);
+ }
+
+ return context->ResizeTensor(context, output, output_size);
+}
+
+template <typename T>
+void PowImpl(const TfLiteTensor* input1, const TfLiteTensor* input2,
+ TfLiteTensor* output, bool requires_broadcast) {
+ if (requires_broadcast) {
+ reference_ops::BroadcastPow(GetTensorData<T>(input1), GetTensorDims(input1),
+ GetTensorData<T>(input2), GetTensorDims(input2),
+ GetTensorData<T>(output),
+ GetTensorDims(output));
+ } else {
+ reference_ops::Pow(GetTensorData<T>(input1), GetTensorDims(input1),
+ GetTensorData<T>(input2), GetTensorDims(input2),
+ GetTensorData<T>(output), GetTensorDims(output));
+ }
+}
+
+TfLiteStatus CheckValue(TfLiteContext* context, const TfLiteTensor* input) {
+ const int64_t num_elements = NumElements(input);
+ const int32_t* data = GetTensorData<int32_t>(input);
+ for (int i = 0; i < num_elements; ++i) {
+ if (data[i] < 0) {
+ context->ReportError(context,
+ "POW does not support negative value for int32.");
+ return kTfLiteError;
+ }
+ }
+ return kTfLiteOk;
+}
+
+TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
+ OpData* data = reinterpret_cast<OpData*>(node->user_data);
+
+ const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1);
+ const TfLiteTensor* input2 = GetInput(context, node, kInputTensor2);
+ TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
+
+ switch (output->type) {
+ case kTfLiteInt32: {
+ // TensorFlow does not support negative for int32.
+ TF_LITE_ENSURE_OK(context, CheckValue(context, input2));
+ PowImpl<int32_t>(input1, input2, output, data->requires_broadcast);
+ break;
+ }
+ case kTfLiteFloat32: {
+ PowImpl<float>(input1, input2, output, data->requires_broadcast);
+ break;
+ }
+ default: {
+ context->ReportError(context, "Unsupported data type: %d", output->type);
+ return kTfLiteError;
+ }
+ }
+ return kTfLiteOk;
+}
+
+} // namespace
+} // namespace pow
+
+TfLiteRegistration* Register_POW() {
+ static TfLiteRegistration r = {pow::Init, pow::Free, pow::Prepare, pow::Eval};
+ return &r;
+}
+
+} // namespace builtin
+} // namespace ops
+} // namespace tflite
diff --git a/tensorflow/contrib/lite/kernels/pow_test.cc b/tensorflow/contrib/lite/kernels/pow_test.cc
new file mode 100644
index 0000000000..474d323bc3
--- /dev/null
+++ b/tensorflow/contrib/lite/kernels/pow_test.cc
@@ -0,0 +1,117 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+#include <gtest/gtest.h>
+#include "tensorflow/contrib/lite/interpreter.h"
+#include "tensorflow/contrib/lite/kernels/register.h"
+#include "tensorflow/contrib/lite/kernels/test_util.h"
+#include "tensorflow/contrib/lite/model.h"
+
+namespace tflite {
+namespace {
+
+using ::testing::ElementsAre;
+using ::testing::ElementsAreArray;
+
+template <typename T>
+class PowOpModel : public SingleOpModel {
+ public:
+ PowOpModel(const TensorData& input1, const TensorData& input2,
+ const TensorData& output) {
+ input1_ = AddInput(input1);
+ input2_ = AddInput(input2);
+ output_ = AddOutput(output);
+ SetBuiltinOp(BuiltinOperator_POW, BuiltinOptions_PowOptions,
+ CreatePowOptions(builder_).Union());
+ BuildInterpreter({GetShape(input1_), GetShape(input2_)});
+ }
+
+ int input1() { return input1_; }
+ int input2() { return input2_; }
+
+ std::vector<T> GetOutput() { return ExtractVector<T>(output_); }
+ std::vector<int> GetOutputShape() { return GetTensorShape(output_); }
+
+ private:
+ int input1_;
+ int input2_;
+ int output_;
+};
+
+TEST(PowOpModel, Simple) {
+ PowOpModel<int32> model({TensorType_INT32, {1, 2, 2, 1}},
+ {TensorType_INT32, {1, 2, 2, 1}},
+ {TensorType_INT32, {}});
+ model.PopulateTensor<int32>(model.input1(), {12, 2, 7, 8});
+ model.PopulateTensor<int32>(model.input2(), {1, 2, 3, 1});
+ model.Invoke();
+ EXPECT_THAT(model.GetOutputShape(), ElementsAre(1, 2, 2, 1));
+ EXPECT_THAT(model.GetOutput(), ElementsAre(12, 4, 343, 8));
+}
+
+TEST(PowOpModel, NegativeAndZeroValue) {
+ PowOpModel<int32> model({TensorType_INT32, {1, 2, 2, 1}},
+ {TensorType_INT32, {1, 2, 2, 1}},
+ {TensorType_INT32, {}});
+ model.PopulateTensor<int32>(model.input1(), {0, 2, -7, 8});
+ model.PopulateTensor<int32>(model.input2(), {1, 2, 3, 0});
+ model.Invoke();
+ EXPECT_THAT(model.GetOutputShape(), ElementsAre(1, 2, 2, 1));
+ EXPECT_THAT(model.GetOutput(), ElementsAre(0, 4, -343, 1));
+}
+
+TEST(PowOpModel, Float) {
+ PowOpModel<float> model({TensorType_FLOAT32, {1, 2, 2, 1}},
+ {TensorType_FLOAT32, {1, 2, 2, 1}},
+ {TensorType_FLOAT32, {}});
+ model.PopulateTensor<float>(model.input1(), {0.3, 0.4, 0.7, 5.8});
+ model.PopulateTensor<float>(model.input2(), {0.5, 2.7, 3.1, 3.2});
+ model.Invoke();
+ EXPECT_THAT(model.GetOutputShape(), ElementsAre(1, 2, 2, 1));
+ EXPECT_THAT(model.GetOutput(),
+ ElementsAreArray(ArrayFloatNear(
+ {0.5477226, 0.08424846, 0.33098164, 277.313}, 1e-3)));
+}
+
+TEST(PowOpModel, NegativeFloatTest) {
+ PowOpModel<float> model({TensorType_FLOAT32, {1, 2, 2, 1}},
+ {TensorType_FLOAT32, {1, 2, 2, 1}},
+ {TensorType_FLOAT32, {}});
+ model.PopulateTensor<float>(model.input1(), {0.3, 0.4, 0.7, 5.8});
+ model.PopulateTensor<float>(model.input2(), {0.5, -2.7, 3.1, -3.2});
+ model.Invoke();
+ EXPECT_THAT(model.GetOutputShape(), ElementsAre(1, 2, 2, 1));
+ EXPECT_THAT(model.GetOutput(),
+ ElementsAreArray(ArrayFloatNear(
+ {0.5477226, 11.869653, 0.33098164, 0.003606}, 1e-3)));
+}
+
+TEST(PowOpModel, BroadcastTest) {
+ PowOpModel<int32> model({TensorType_INT32, {1, 2, 2, 1}},
+ {TensorType_INT32, {1}}, {TensorType_INT32, {}});
+ model.PopulateTensor<int32>(model.input1(), {12, 2, 7, 8});
+ model.PopulateTensor<int32>(model.input2(), {4});
+ model.Invoke();
+ EXPECT_THAT(model.GetOutputShape(), ElementsAre(1, 2, 2, 1));
+ EXPECT_THAT(model.GetOutput(), ElementsAre(20736, 16, 2401, 4096));
+}
+
+} // namespace
+} // namespace tflite
+
+int main(int argc, char** argv) {
+ ::tflite::LogToStderr();
+ ::testing::InitGoogleTest(&argc, argv);
+ return RUN_ALL_TESTS();
+}
diff --git a/tensorflow/contrib/lite/kernels/reduce.cc b/tensorflow/contrib/lite/kernels/reduce.cc
index 31c331a8c6..52e4084ff8 100644
--- a/tensorflow/contrib/lite/kernels/reduce.cc
+++ b/tensorflow/contrib/lite/kernels/reduce.cc
@@ -315,6 +315,99 @@ TfLiteStatus EvalSum(TfLiteContext* context, TfLiteNode* node) {
return kTfLiteOk;
}
+template <KernelType kernel_type>
+TfLiteStatus EvalProd(TfLiteContext* context, TfLiteNode* node) {
+ OpContext op_context(context, node);
+ int64_t num_axis = NumElements(op_context.axis);
+ TfLiteTensor* temp_index = GetTemporary(context, node, /*index=*/0);
+ TfLiteTensor* resolved_axis = GetTemporary(context, node, /*index=*/1);
+ // Resize the output tensor if the output tensor is dynamic.
+ if (IsDynamicTensor(op_context.output)) {
+ TF_LITE_ENSURE_OK(context,
+ ResizeTempAxis(context, &op_context, resolved_axis));
+ TF_LITE_ENSURE_OK(context, ResizeOutputTensor(context, &op_context));
+ }
+
+#define TF_LITE_PROD(kernel_type, data_type) \
+ kernel_type::ReduceProd<>( \
+ GetTensorData<data_type>(op_context.input), \
+ op_context.input->dims->data, op_context.input->dims->size, \
+ GetTensorData<data_type>(op_context.output), \
+ op_context.output->dims->data, op_context.output->dims->size, \
+ GetTensorData<int>(op_context.axis), num_axis, \
+ op_context.params->keep_dims, GetTensorData<int>(temp_index), \
+ GetTensorData<int>(resolved_axis))
+
+ if (kernel_type == kReference) {
+ switch (op_context.input->type) {
+ case kTfLiteFloat32:
+ TF_LITE_ENSURE(context, TF_LITE_PROD(reference_ops, float));
+ break;
+ case kTfLiteInt32:
+ TF_LITE_ENSURE(context, TF_LITE_PROD(reference_ops, int));
+ break;
+ case kTfLiteInt64:
+ TF_LITE_ENSURE(context, TF_LITE_PROD(reference_ops, int64_t));
+ break;
+ case kTfLiteUInt8:
+ // TODO(wangtz): uint8 reduce_prod is not yet supported.
+ default:
+ return kTfLiteError;
+ }
+ }
+#undef TF_LITE_PROD
+ return kTfLiteOk;
+}
+
+template <KernelType kernel_type>
+TfLiteStatus EvalMax(TfLiteContext* context, TfLiteNode* node) {
+ OpContext op_context(context, node);
+ int64_t num_axis = NumElements(op_context.axis);
+ TfLiteTensor* temp_index = GetTemporary(context, node, /*index=*/0);
+ TfLiteTensor* resolved_axis = GetTemporary(context, node, /*index=*/1);
+ // Resize the output tensor if the output tensor is dynamic.
+ if (IsDynamicTensor(op_context.output)) {
+ TF_LITE_ENSURE_OK(context,
+ ResizeTempAxis(context, &op_context, resolved_axis));
+ TF_LITE_ENSURE_OK(context, ResizeOutputTensor(context, &op_context));
+ }
+
+#define TF_LITE_MAX(kernel_type, data_type) \
+ kernel_type::ReduceMax<>( \
+ GetTensorData<data_type>(op_context.input), \
+ op_context.input->dims->data, op_context.input->dims->size, \
+ GetTensorData<data_type>(op_context.output), \
+ op_context.output->dims->data, op_context.output->dims->size, \
+ GetTensorData<int>(op_context.axis), num_axis, \
+ op_context.params->keep_dims, GetTensorData<int>(temp_index), \
+ GetTensorData<int>(resolved_axis))
+
+ if (kernel_type == kReference) {
+ switch (op_context.input->type) {
+ case kTfLiteFloat32:
+ TF_LITE_ENSURE(context, TF_LITE_MAX(reference_ops, float));
+ break;
+ case kTfLiteInt32:
+ TF_LITE_ENSURE(context, TF_LITE_MAX(reference_ops, int));
+ break;
+ case kTfLiteInt64:
+ TF_LITE_ENSURE(context, TF_LITE_MAX(reference_ops, int64_t));
+ break;
+ case kTfLiteUInt8:
+ TF_LITE_ENSURE_EQ(context, op_context.input->params.scale,
+ op_context.output->params.scale);
+ TF_LITE_ENSURE_EQ(context, op_context.input->params.zero_point,
+ op_context.output->params.zero_point);
+ TF_LITE_ENSURE(context, TF_LITE_MAX(reference_ops, uint8_t));
+ break;
+ default:
+ return kTfLiteError;
+ }
+ }
+#undef TF_LITE_MAX
+ return kTfLiteOk;
+}
+
} // namespace reduce
TfLiteRegistration* Register_MEAN_REF() {
@@ -331,9 +424,27 @@ TfLiteRegistration* Register_SUM_REF() {
return &r;
}
+TfLiteRegistration* Register_REDUCE_PROD_REF() {
+ static TfLiteRegistration r = {reduce::Init, reduce::Free,
+ reduce::PrepareSimple,
+ reduce::EvalProd<reduce::kReference>};
+ return &r;
+}
+
+TfLiteRegistration* Register_REDUCE_MAX_REF() {
+ static TfLiteRegistration r = {reduce::Init, reduce::Free,
+ reduce::PrepareSimple,
+ reduce::EvalMax<reduce::kReference>};
+ return &r;
+}
+
// TODO(kanlig): add optimized implementation of Mean.
TfLiteRegistration* Register_MEAN() { return Register_MEAN_REF(); }
TfLiteRegistration* Register_SUM() { return Register_SUM_REF(); }
+TfLiteRegistration* Register_REDUCE_PROD() {
+ return Register_REDUCE_PROD_REF();
+}
+TfLiteRegistration* Register_REDUCE_MAX() { return Register_REDUCE_MAX_REF(); }
} // namespace builtin
} // namespace ops
diff --git a/tensorflow/contrib/lite/kernels/reduce_test.cc b/tensorflow/contrib/lite/kernels/reduce_test.cc
index 9e946822c6..7d28931ecd 100644
--- a/tensorflow/contrib/lite/kernels/reduce_test.cc
+++ b/tensorflow/contrib/lite/kernels/reduce_test.cc
@@ -25,10 +25,10 @@ using ::testing::ElementsAreArray;
class BaseOpModel : public SingleOpModel {
public:
- void SetAxis(std::initializer_list<int> data) { PopulateTensor(axis_, data); }
+ void SetAxis(const std::vector<int>& data) { PopulateTensor(axis_, data); }
template <class T>
- void SetInput(std::initializer_list<T> data) {
+ void SetInput(std::vector<T> data) {
PopulateTensor(input_, data);
}
@@ -110,14 +110,72 @@ class SumOpDynamicModel : public BaseOpModel {
}
};
+// Model for the tests case where axis is a const tensor.
+class ProdOpConstModel : public BaseOpModel {
+ public:
+ ProdOpConstModel(const TensorData& input, const TensorData& output,
+ std::initializer_list<int> axis_shape,
+ std::initializer_list<int> axis, bool keep_dims) {
+ input_ = AddInput(input);
+ axis_ = AddConstInput(TensorType_INT32, axis, axis_shape);
+ output_ = AddOutput(output);
+ SetBuiltinOp(BuiltinOperator_REDUCE_PROD, BuiltinOptions_ReducerOptions,
+ CreateReducerOptions(builder_, keep_dims).Union());
+ BuildInterpreter({GetShape(input_)});
+ }
+};
+
+// Model for the tests case where axis is a dynamic tensor.
+class ProdOpDynamicModel : public BaseOpModel {
+ public:
+ ProdOpDynamicModel(const TensorData& input, const TensorData& output,
+ const TensorData& axis, bool keep_dims) {
+ input_ = AddInput(input);
+ axis_ = AddInput(axis);
+ output_ = AddOutput(output);
+ SetBuiltinOp(BuiltinOperator_REDUCE_PROD, BuiltinOptions_ReducerOptions,
+ CreateReducerOptions(builder_, keep_dims).Union());
+ BuildInterpreter({GetShape(input_)});
+ }
+};
+
+// Model for the tests case where axis is a const tensor.
+class MaxOpConstModel : public BaseOpModel {
+ public:
+ MaxOpConstModel(const TensorData& input, const TensorData& output,
+ std::initializer_list<int> axis_shape,
+ std::initializer_list<int> axis, bool keep_dims) {
+ input_ = AddInput(input);
+ axis_ = AddConstInput(TensorType_INT32, axis, axis_shape);
+ output_ = AddOutput(output);
+ SetBuiltinOp(BuiltinOperator_REDUCE_MAX, BuiltinOptions_ReducerOptions,
+ CreateReducerOptions(builder_, keep_dims).Union());
+ BuildInterpreter({GetShape(input_)});
+ }
+};
+
+// Model for the tests case where axis is a dynamic tensor.
+class MaxOpDynamicModel : public BaseOpModel {
+ public:
+ MaxOpDynamicModel(const TensorData& input, const TensorData& output,
+ const TensorData& axis, bool keep_dims) {
+ input_ = AddInput(input);
+ axis_ = AddInput(axis);
+ output_ = AddOutput(output);
+ SetBuiltinOp(BuiltinOperator_REDUCE_MAX, BuiltinOptions_ReducerOptions,
+ CreateReducerOptions(builder_, keep_dims).Union());
+ BuildInterpreter({GetShape(input_)});
+ }
+};
+
// for quantized Add, the error shouldn't exceed step
float GetTolerance(int min, int max) { return (max - min) / 255.0; }
// Tests for reduce_mean
TEST(ConstFloatMeanOpTest, NotKeepDims) {
- std::initializer_list<float> data = {
- 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0,
- 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0};
+ std::vector<float> data = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0,
+ 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0,
+ 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0};
MeanOpConstModel m({TensorType_FLOAT32, {4, 3, 2}}, {TensorType_FLOAT32, {2}},
{4}, {1, 0, -3, -3}, false);
m.SetInput(data);
@@ -127,9 +185,9 @@ TEST(ConstFloatMeanOpTest, NotKeepDims) {
}
TEST(ConstFloatMeanOpTest, KeepDims) {
- std::initializer_list<float> data = {
- 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0,
- 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0};
+ std::vector<float> data = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0,
+ 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0,
+ 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0};
MeanOpConstModel m({TensorType_FLOAT32, {4, 3, 2}}, {TensorType_FLOAT32, {3}},
{2}, {0, 2}, true);
m.SetInput(data);
@@ -140,13 +198,13 @@ TEST(ConstFloatMeanOpTest, KeepDims) {
}
TEST(DynamicFloatMeanOpTest, NotKeepDims) {
- std::initializer_list<float> data = {
- 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0,
- 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0};
+ std::vector<float> data = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0,
+ 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0,
+ 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0};
MeanOpDynamicModel m({TensorType_FLOAT32, {4, 3, 2}},
{TensorType_FLOAT32, {2}}, {TensorType_INT32, {4}},
false);
- std::initializer_list<int> axis = {1, 0, -3, -3};
+ std::vector<int> axis = {1, 0, -3, -3};
m.SetAxis(axis);
m.SetInput(data);
m.Invoke();
@@ -155,13 +213,13 @@ TEST(DynamicFloatMeanOpTest, NotKeepDims) {
}
TEST(DynamicFloatMeanOpTest, KeepDims) {
- std::initializer_list<float> data = {
- 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0,
- 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0};
+ std::vector<float> data = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0,
+ 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0,
+ 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0};
MeanOpDynamicModel m({TensorType_FLOAT32, {4, 3, 2}},
{TensorType_FLOAT32, {3}}, {TensorType_INT32, {2}},
true);
- std::initializer_list<int> axis = {0, 2};
+ std::vector<int> axis = {0, 2};
m.SetAxis(axis);
m.SetInput(data);
m.Invoke();
@@ -171,10 +229,10 @@ TEST(DynamicFloatMeanOpTest, KeepDims) {
}
TEST(DynamicFloatMeanOpTest, Scale) {
- std::initializer_list<float> data = {9.527};
+ std::vector<float> data = {9.527};
MeanOpDynamicModel m({TensorType_FLOAT32, {1}}, {TensorType_FLOAT32, {1}},
{TensorType_INT32, {1}}, true);
- std::initializer_list<int> axis = {0};
+ std::vector<int> axis = {0};
m.SetAxis(axis);
m.SetInput(data);
m.Invoke();
@@ -185,7 +243,7 @@ TEST(DynamicFloatMeanOpTest, Scale) {
TEST(ConstUint8MeanOpTest, NotKeepDims) {
float kQuantizedTolerance = GetTolerance(-1.0, 1.0);
- std::initializer_list<float> data = {0.4, 0.2, 0.3, 0.4, 0.5, 0.6};
+ std::vector<float> data = {0.4, 0.2, 0.3, 0.4, 0.5, 0.6};
MeanOpConstModel m({TensorType_UINT8, {1, 3, 2}, -1.0, 1.0},
{TensorType_UINT8, {2}, -1.0, 1.0}, {1}, {1}, false);
m.QuantizeAndPopulate<uint8_t>(m.Input(), data);
@@ -197,7 +255,7 @@ TEST(ConstUint8MeanOpTest, NotKeepDims) {
TEST(ConstUint8MeanOpTest, KeepDims) {
float kQuantizedTolerance = GetTolerance(-1.0, 1.0);
- std::initializer_list<float> data = {0.4, 0.2, 0.3, 0.4, 0.5, 0.6};
+ std::vector<float> data = {0.4, 0.2, 0.3, 0.4, 0.5, 0.6};
MeanOpConstModel m({TensorType_UINT8, {3, 2}, -1.0, 1.0},
{TensorType_UINT8, {3}, -1.0, 1.0}, {1}, {1}, true);
m.QuantizeAndPopulate<uint8_t>(m.Input(), data);
@@ -210,11 +268,11 @@ TEST(ConstUint8MeanOpTest, KeepDims) {
TEST(DynamicUint8MeanOpTest, NotKeepDims) {
float kQuantizedTolerance = GetTolerance(-5.0, 2.0);
- std::initializer_list<float> data = {1.3, -4.8, -3.6, 0.24};
+ std::vector<float> data = {1.3, -4.8, -3.6, 0.24};
MeanOpDynamicModel m({TensorType_UINT8, {2, 2}, -5.0, 2.0},
{TensorType_UINT8, {2}, -5.0, 2.0},
{TensorType_INT32, {1}}, false);
- std::initializer_list<int> axis = {1};
+ std::vector<int> axis = {1};
m.SetAxis(axis);
m.QuantizeAndPopulate<uint8_t>(m.Input(), data);
m.Invoke();
@@ -226,11 +284,11 @@ TEST(DynamicUint8MeanOpTest, NotKeepDims) {
TEST(DynamicUint8MeanOpTest, KeepDims) {
float kQuantizedTolerance = GetTolerance(-10.0, 12.0);
- std::initializer_list<float> data = {11.14, -0.14, 7.423, 0.879};
+ std::vector<float> data = {11.14, -0.14, 7.423, 0.879};
MeanOpDynamicModel m({TensorType_UINT8, {2, 2}, -10.0, 12.0},
{TensorType_UINT8, {2}, -10.0, 12.0},
{TensorType_INT32, {1}}, true);
- std::initializer_list<int> axis = {0};
+ std::vector<int> axis = {0};
m.SetAxis(axis);
m.QuantizeAndPopulate<uint8_t>(m.Input(), data);
m.Invoke();
@@ -243,9 +301,9 @@ TEST(DynamicUint8MeanOpTest, KeepDims) {
// Tests for reduce_sum
TEST(ConstFloatSumOpTest, NotKeepDims) {
- std::initializer_list<float> data = {
- 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0,
- 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0};
+ std::vector<float> data = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0,
+ 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0,
+ 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0};
SumOpConstModel m({TensorType_FLOAT32, {4, 3, 2}}, {TensorType_FLOAT32, {2}},
{4}, {1, 0, -3, -3}, false);
m.SetInput(data);
@@ -256,9 +314,9 @@ TEST(ConstFloatSumOpTest, NotKeepDims) {
}
TEST(ConstFloatSumOpTest, KeepDims) {
- std::initializer_list<float> data = {
- 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0,
- 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0};
+ std::vector<float> data = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0,
+ 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0,
+ 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0};
SumOpConstModel m({TensorType_FLOAT32, {4, 3, 2}}, {TensorType_FLOAT32, {3}},
{2}, {0, 2}, true);
m.SetInput(data);
@@ -269,13 +327,13 @@ TEST(ConstFloatSumOpTest, KeepDims) {
}
TEST(DynamicFloatSumOpTest, NotKeepDims) {
- std::initializer_list<float> data = {
- 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0,
- 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0};
+ std::vector<float> data = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0,
+ 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0,
+ 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0};
SumOpDynamicModel m({TensorType_FLOAT32, {4, 3, 2}},
{TensorType_FLOAT32, {2}}, {TensorType_INT32, {4}},
false);
- std::initializer_list<int> axis = {1, 0, -3, -3};
+ std::vector<int> axis = {1, 0, -3, -3};
m.SetAxis(axis);
m.SetInput(data);
m.Invoke();
@@ -285,12 +343,12 @@ TEST(DynamicFloatSumOpTest, NotKeepDims) {
}
TEST(DynamicFloatSumOpTest, KeepDims) {
- std::initializer_list<float> data = {
- 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0,
- 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0};
+ std::vector<float> data = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0,
+ 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0,
+ 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0};
SumOpDynamicModel m({TensorType_FLOAT32, {4, 3, 2}},
{TensorType_FLOAT32, {3}}, {TensorType_INT32, {2}}, true);
- std::initializer_list<int> axis = {0, 2};
+ std::vector<int> axis = {0, 2};
m.SetAxis(axis);
m.SetInput(data);
m.Invoke();
@@ -300,10 +358,10 @@ TEST(DynamicFloatSumOpTest, KeepDims) {
}
TEST(DynamicFloatSumOpTest, Scale) {
- std::initializer_list<float> data = {9.527};
+ std::vector<float> data = {9.527};
SumOpDynamicModel m({TensorType_FLOAT32, {1}}, {TensorType_FLOAT32, {1}},
{TensorType_INT32, {1}}, true);
- std::initializer_list<int> axis = {0};
+ std::vector<int> axis = {0};
m.SetAxis(axis);
m.SetInput(data);
m.Invoke();
@@ -313,7 +371,7 @@ TEST(DynamicFloatSumOpTest, Scale) {
TEST(ConstUint8SumOpTest, NotKeepDims) {
float kQuantizedTolerance = GetTolerance(-1.0, 1.0);
- std::initializer_list<float> data = {0.4, 0.2, 0.3, 0.4, 0.5, 0.6};
+ std::vector<float> data = {0.4, 0.2, 0.3, 0.4, 0.5, 0.6};
SumOpConstModel m({TensorType_UINT8, {1, 3, 2}, -1.0, 1.0},
{TensorType_UINT8, {2}, -1.0, 1.0}, {1}, {1}, false);
m.QuantizeAndPopulate<uint8_t>(m.Input(), data);
@@ -326,7 +384,7 @@ TEST(ConstUint8SumOpTest, NotKeepDims) {
TEST(ConstUint8SumOpTest, KeepDims) {
float kQuantizedTolerance = GetTolerance(-1.0, 1.0);
- std::initializer_list<float> data = {0.4, 0.2, 0.3, 0.4, 0.5, 0.6};
+ std::vector<float> data = {0.4, 0.2, 0.3, 0.4, 0.5, 0.6};
SumOpConstModel m({TensorType_UINT8, {3, 2}, -1.0, 1.0},
{TensorType_UINT8, {3}, -1.0, 1.0}, {1}, {1}, true);
m.QuantizeAndPopulate<uint8_t>(m.Input(), data);
@@ -339,11 +397,11 @@ TEST(ConstUint8SumOpTest, KeepDims) {
TEST(DynamicUint8SumOpTest, NotKeepDims) {
float kQuantizedTolerance = GetTolerance(-5.0, 2.0);
- std::initializer_list<float> data = {1.3, -4.8, -3.6, 0.24};
+ std::vector<float> data = {1.3, -4.8, -3.6, 0.24};
SumOpDynamicModel m({TensorType_UINT8, {2, 2}, -5.0, 2.0},
{TensorType_UINT8, {2}, -5.0, 2.0},
{TensorType_INT32, {1}}, false);
- std::initializer_list<int> axis = {1};
+ std::vector<int> axis = {1};
m.SetAxis(axis);
m.QuantizeAndPopulate<uint8_t>(m.Input(), data);
m.Invoke();
@@ -355,11 +413,11 @@ TEST(DynamicUint8SumOpTest, NotKeepDims) {
TEST(DynamicUint8SumOpTest, KeepDims) {
float kQuantizedTolerance = GetTolerance(-10.0, 12.0);
- std::initializer_list<float> data = {11.14, -0.14, 7.423, 0.879};
+ std::vector<float> data = {11.14, -0.14, 7.423, 0.879};
SumOpDynamicModel m({TensorType_UINT8, {2, 2}, -10.0, 12.0},
{TensorType_UINT8, {2}, -10.0, 12.0},
{TensorType_INT32, {1}}, true);
- std::initializer_list<int> axis = {0};
+ std::vector<int> axis = {0};
m.SetAxis(axis);
m.QuantizeAndPopulate<uint8_t>(m.Input(), data);
m.Invoke();
@@ -369,6 +427,209 @@ TEST(DynamicUint8SumOpTest, KeepDims) {
ElementsAreArray(ArrayFloatNear({6.47059, 10.698}, kQuantizedTolerance)));
}
+// Tests for reduce_prod
+
+TEST(ConstFloatProdOpTest, NotKeepDims) {
+ std::vector<float> data = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0,
+ 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0,
+ 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0};
+ ProdOpConstModel m({TensorType_FLOAT32, {4, 3, 2}}, {TensorType_FLOAT32, {2}},
+ {4}, {1, 0, -3, -3}, false);
+ m.SetInput(data);
+ m.Invoke();
+ EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({2}));
+ EXPECT_THAT(
+ m.GetOutput<float>(),
+ ElementsAreArray(ArrayFloatNear({3.162341376e+11, 1.9619905536e+12})));
+}
+
+TEST(ConstFloatProdOpTest, KeepDims) {
+ std::vector<float> data = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0,
+ 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0,
+ 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0};
+ ProdOpConstModel m({TensorType_FLOAT32, {4, 3, 2}}, {TensorType_FLOAT32, {3}},
+ {2}, {0, 2}, true);
+ m.SetInput(data);
+ m.Invoke();
+ EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({1, 3, 1}));
+ EXPECT_THAT(m.GetOutput<float>(),
+ ElementsAreArray(
+ ArrayFloatNear({7.74592e+06, 1.197504e+08, 6.6889152e+08})));
+}
+
+TEST(DynamicFloatProdOpTest, NotKeepDims) {
+ std::vector<float> data = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0,
+ 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0,
+ 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0};
+ ProdOpDynamicModel m({TensorType_FLOAT32, {4, 3, 2}},
+ {TensorType_FLOAT32, {2}}, {TensorType_INT32, {4}},
+ false);
+ std::vector<int> axis = {1, 0, -3, -3};
+ m.SetAxis(axis);
+ m.SetInput(data);
+ m.Invoke();
+ EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({2}));
+ EXPECT_THAT(
+ m.GetOutput<float>(),
+ ElementsAreArray(ArrayFloatNear({3.16234143225e+11, 1.9619905536e+12})));
+}
+
+TEST(DynamicFloatProdOpTest, KeepDims) {
+ std::vector<float> data = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0,
+ 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0,
+ 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0};
+ ProdOpDynamicModel m({TensorType_FLOAT32, {4, 3, 2}},
+ {TensorType_FLOAT32, {3}}, {TensorType_INT32, {2}},
+ true);
+ std::vector<int> axis = {0, 2};
+ m.SetAxis(axis);
+ m.SetInput(data);
+ m.Invoke();
+ EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({1, 3, 1}));
+ EXPECT_THAT(m.GetOutput<float>(),
+ ElementsAreArray(
+ ArrayFloatNear({7.74592e+06, 1.197504e+08, 6.6889152e+08})));
+}
+
+TEST(DynamicFloatProdOpTest, Scale) {
+ std::vector<float> data = {9.527};
+ ProdOpDynamicModel m({TensorType_FLOAT32, {1}}, {TensorType_FLOAT32, {1}},
+ {TensorType_INT32, {1}}, true);
+ std::vector<int> axis = {0};
+ m.SetAxis(axis);
+ m.SetInput(data);
+ m.Invoke();
+ EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({1}));
+ EXPECT_THAT(m.GetOutput<float>(), ElementsAreArray(ArrayFloatNear({9.527})));
+}
+
+// Tests for reduce_max
+
+TEST(ConstFloatMaxOpTest, NotKeepDims) {
+ std::vector<float> data = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0,
+ 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0,
+ 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0};
+ MaxOpConstModel m({TensorType_FLOAT32, {4, 3, 2}}, {TensorType_FLOAT32, {2}},
+ {4}, {1, 0, -3, -3}, false);
+ m.SetInput(data);
+ m.Invoke();
+ EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({2}));
+ EXPECT_THAT(m.GetOutput<float>(), ElementsAreArray(ArrayFloatNear({23, 24})));
+}
+
+TEST(ConstFloatMaxOpTest, KeepDims) {
+ std::vector<float> data = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0,
+ 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0,
+ 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0};
+ MaxOpConstModel m({TensorType_FLOAT32, {4, 3, 2}}, {TensorType_FLOAT32, {3}},
+ {2}, {0, 2}, true);
+ m.SetInput(data);
+ m.Invoke();
+ EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({1, 3, 1}));
+ EXPECT_THAT(m.GetOutput<float>(),
+ ElementsAreArray(ArrayFloatNear({20, 22, 24})));
+}
+
+TEST(DynamicFloatMaxOpTest, NotKeepDims) {
+ std::vector<float> data = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0,
+ 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0,
+ 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0};
+ MaxOpDynamicModel m({TensorType_FLOAT32, {4, 3, 2}},
+ {TensorType_FLOAT32, {2}}, {TensorType_INT32, {4}},
+ false);
+ std::vector<int> axis = {1, 0, -3, -3};
+ m.SetAxis(axis);
+ m.SetInput(data);
+ m.Invoke();
+ EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({2}));
+ EXPECT_THAT(m.GetOutput<float>(), ElementsAreArray(ArrayFloatNear({23, 24})));
+}
+
+TEST(DynamicFloatMaxOpTest, KeepDims) {
+ std::vector<float> data = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0,
+ 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0,
+ 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0};
+ MaxOpDynamicModel m({TensorType_FLOAT32, {4, 3, 2}},
+ {TensorType_FLOAT32, {3}}, {TensorType_INT32, {2}}, true);
+ std::vector<int> axis = {0, 2};
+ m.SetAxis(axis);
+ m.SetInput(data);
+ m.Invoke();
+ EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({1, 3, 1}));
+ EXPECT_THAT(m.GetOutput<float>(),
+ ElementsAreArray(ArrayFloatNear({20, 22, 24})));
+}
+
+TEST(DynamicFloatMaxOpTest, Scale) {
+ std::vector<float> data = {9.527};
+ MaxOpDynamicModel m({TensorType_FLOAT32, {1}}, {TensorType_FLOAT32, {1}},
+ {TensorType_INT32, {1}}, true);
+ std::vector<int> axis = {0};
+ m.SetAxis(axis);
+ m.SetInput(data);
+ m.Invoke();
+ EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({1}));
+ EXPECT_THAT(m.GetOutput<float>(), ElementsAreArray(ArrayFloatNear({9.527})));
+}
+
+TEST(ConstUint8MaxOpTest, NotKeepDims) {
+ float kQuantizedTolerance = GetTolerance(-1.0, 1.0);
+ std::vector<float> data = {0.4, 0.2, 0.3, 0.4, 0.5, 0.6};
+ MaxOpConstModel m({TensorType_UINT8, {1, 3, 2}, -1.0, 1.0},
+ {TensorType_UINT8, {2}, -1.0, 1.0}, {1}, {1}, false);
+ m.QuantizeAndPopulate<uint8_t>(m.Input(), data);
+ m.Invoke();
+ EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({1, 2}));
+ EXPECT_THAT(m.GetDequantizedOutput(),
+ ElementsAreArray(
+ ArrayFloatNear({0.501961, 0.603922}, kQuantizedTolerance)));
+}
+
+TEST(ConstUint8MaxOpTest, KeepDims) {
+ float kQuantizedTolerance = GetTolerance(-1.0, 1.0);
+ std::vector<float> data = {0.4, 0.2, 0.3, 0.4, 0.5, 0.6};
+ MaxOpConstModel m({TensorType_UINT8, {3, 2}, -1.0, 1.0},
+ {TensorType_UINT8, {3}, -1.0, 1.0}, {1}, {1}, true);
+ m.QuantizeAndPopulate<uint8_t>(m.Input(), data);
+ m.Invoke();
+ EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({3, 1}));
+ EXPECT_THAT(m.GetDequantizedOutput(),
+ ElementsAreArray(
+ ArrayFloatNear({0.4, 0.4, 0.603922}, kQuantizedTolerance)));
+}
+
+TEST(DynamicUint8MaxOpTest, NotKeepDims) {
+ float kQuantizedTolerance = GetTolerance(-5.0, 2.0);
+ std::vector<float> data = {1.3, -4.8, -3.6, 0.24};
+ MaxOpDynamicModel m({TensorType_UINT8, {2, 2}, -5.0, 2.0},
+ {TensorType_UINT8, {2}, -5.0, 2.0},
+ {TensorType_INT32, {1}}, false);
+ std::vector<int> axis = {1};
+ m.SetAxis(axis);
+ m.QuantizeAndPopulate<uint8_t>(m.Input(), data);
+ m.Invoke();
+ EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({2}));
+ EXPECT_THAT(m.GetDequantizedOutput(),
+ ElementsAreArray(
+ ArrayFloatNear({1.2902, 0.247059}, kQuantizedTolerance)));
+}
+
+TEST(DynamicUint8MaxOpTest, KeepDims) {
+ float kQuantizedTolerance = GetTolerance(-10.0, 12.0);
+ std::vector<float> data = {11.14, -0.14, 7.423, 0.879};
+ MaxOpDynamicModel m({TensorType_UINT8, {2, 2}, -10.0, 12.0},
+ {TensorType_UINT8, {2}, -10.0, 12.0},
+ {TensorType_INT32, {1}}, true);
+ std::vector<int> axis = {0};
+ m.SetAxis(axis);
+ m.QuantizeAndPopulate<uint8_t>(m.Input(), data);
+ m.Invoke();
+ EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({1, 2}));
+ EXPECT_THAT(m.GetDequantizedOutput(),
+ ElementsAreArray(
+ ArrayFloatNear({11.1294, 0.862745}, kQuantizedTolerance)));
+}
+
} // namespace
} // namespace tflite
diff --git a/tensorflow/contrib/lite/kernels/register.cc b/tensorflow/contrib/lite/kernels/register.cc
index 67f6caea67..f0f2757277 100644
--- a/tensorflow/contrib/lite/kernels/register.cc
+++ b/tensorflow/contrib/lite/kernels/register.cc
@@ -82,6 +82,7 @@ TfLiteRegistration* Register_PRELU();
TfLiteRegistration* Register_MAXIMUM();
TfLiteRegistration* Register_MINIMUM();
TfLiteRegistration* Register_ARG_MAX();
+TfLiteRegistration* Register_ARG_MIN();
TfLiteRegistration* Register_GREATER();
TfLiteRegistration* Register_GREATER_EQUAL();
TfLiteRegistration* Register_LESS();
@@ -90,6 +91,8 @@ TfLiteRegistration* Register_FLOOR();
TfLiteRegistration* Register_TILE();
TfLiteRegistration* Register_NEG();
TfLiteRegistration* Register_SUM();
+TfLiteRegistration* Register_REDUCE_PROD();
+TfLiteRegistration* Register_REDUCE_MAX();
TfLiteRegistration* Register_SELECT();
TfLiteRegistration* Register_SLICE();
TfLiteRegistration* Register_SIN();
@@ -101,6 +104,8 @@ TfLiteRegistration* Register_NOT_EQUAL();
TfLiteRegistration* Register_SQRT();
TfLiteRegistration* Register_RSQRT();
TfLiteRegistration* Register_SHAPE();
+TfLiteRegistration* Register_POW();
+TfLiteRegistration* Register_FAKE_QUANT();
BuiltinOpResolver::BuiltinOpResolver() {
AddBuiltin(BuiltinOperator_RELU, Register_RELU());
@@ -122,7 +127,9 @@ BuiltinOpResolver::BuiltinOpResolver() {
AddBuiltin(BuiltinOperator_EMBEDDING_LOOKUP, Register_EMBEDDING_LOOKUP());
AddBuiltin(BuiltinOperator_EMBEDDING_LOOKUP_SPARSE,
Register_EMBEDDING_LOOKUP_SPARSE());
- AddBuiltin(BuiltinOperator_FULLY_CONNECTED, Register_FULLY_CONNECTED());
+ AddBuiltin(BuiltinOperator_FULLY_CONNECTED, Register_FULLY_CONNECTED(),
+ /* min_version */ 1,
+ /* max_version */ 2);
AddBuiltin(BuiltinOperator_LSH_PROJECTION, Register_LSH_PROJECTION());
AddBuiltin(BuiltinOperator_HASHTABLE_LOOKUP, Register_HASHTABLE_LOOKUP());
AddBuiltin(BuiltinOperator_SOFTMAX, Register_SOFTMAX());
@@ -164,6 +171,7 @@ BuiltinOpResolver::BuiltinOpResolver() {
AddBuiltin(BuiltinOperator_MAXIMUM, Register_MAXIMUM());
AddBuiltin(BuiltinOperator_MINIMUM, Register_MINIMUM());
AddBuiltin(BuiltinOperator_ARG_MAX, Register_ARG_MAX());
+ AddBuiltin(BuiltinOperator_ARG_MIN, Register_ARG_MIN());
AddBuiltin(BuiltinOperator_GREATER, Register_GREATER());
AddBuiltin(BuiltinOperator_GREATER_EQUAL, Register_GREATER_EQUAL());
AddBuiltin(BuiltinOperator_LESS, Register_LESS());
@@ -176,6 +184,8 @@ BuiltinOpResolver::BuiltinOpResolver() {
AddBuiltin(BuiltinOperator_TRANSPOSE_CONV, Register_TRANSPOSE_CONV());
AddBuiltin(BuiltinOperator_TILE, Register_TILE());
AddBuiltin(BuiltinOperator_SUM, Register_SUM());
+ AddBuiltin(BuiltinOperator_REDUCE_PROD, Register_REDUCE_PROD());
+ AddBuiltin(BuiltinOperator_REDUCE_MAX, Register_REDUCE_MAX());
AddBuiltin(BuiltinOperator_EXPAND_DIMS, Register_EXPAND_DIMS());
AddBuiltin(BuiltinOperator_SPARSE_TO_DENSE, Register_SPARSE_TO_DENSE());
AddBuiltin(BuiltinOperator_EQUAL, Register_EQUAL());
@@ -183,6 +193,8 @@ BuiltinOpResolver::BuiltinOpResolver() {
AddBuiltin(BuiltinOperator_SQRT, Register_SQRT());
AddBuiltin(BuiltinOperator_RSQRT, Register_RSQRT());
AddBuiltin(BuiltinOperator_SHAPE, Register_SHAPE());
+ AddBuiltin(BuiltinOperator_POW, Register_POW());
+ AddBuiltin(BuiltinOperator_FAKE_QUANT, Register_FAKE_QUANT(), 1, 2);
// TODO(andrewharp, ahentz): Move these somewhere more appropriate so that
// custom ops aren't always included by default.
diff --git a/tensorflow/contrib/lite/kernels/select.cc b/tensorflow/contrib/lite/kernels/select.cc
index 9b6cee3cb5..3cdb5db209 100644
--- a/tensorflow/contrib/lite/kernels/select.cc
+++ b/tensorflow/contrib/lite/kernels/select.cc
@@ -89,6 +89,9 @@ TfLiteStatus SelectEval(TfLiteContext* context, TfLiteNode* node) {
case kTfLiteUInt8: \
TF_LITE_SELECT(uint8_t, op); \
break; \
+ case kTfLiteInt16: \
+ TF_LITE_SELECT(int16_t, op); \
+ break; \
case kTfLiteInt32: \
TF_LITE_SELECT(int32_t, op); \
break; \
diff --git a/tensorflow/contrib/lite/kernels/select_test.cc b/tensorflow/contrib/lite/kernels/select_test.cc
index 4664b9acb4..5b2e61cd29 100644
--- a/tensorflow/contrib/lite/kernels/select_test.cc
+++ b/tensorflow/contrib/lite/kernels/select_test.cc
@@ -96,6 +96,19 @@ TEST(SelectOpTest, SelectUInt8) {
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({1, 1, 1, 4}));
}
+TEST(SelectOpTest, SelectInt16) {
+ SelectOpModel model({1, 1, 1, 4}, {1, 1, 1, 4}, {1, 1, 1, 4},
+ TensorType_INT16);
+
+ model.PopulateTensor<bool>(model.input1(), {false, true, false, false});
+ model.PopulateTensor<int16_t>(model.input2(), {1, 2, 3, 4});
+ model.PopulateTensor<int16_t>(model.input3(), {5, 6, 7, 8});
+ model.Invoke();
+
+ EXPECT_THAT(model.GetOutput<int16_t>(), ElementsAreArray({5, 2, 7, 8}));
+ EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({1, 1, 1, 4}));
+}
+
TEST(SelectOpTest, SelectInt32) {
SelectOpModel model({1, 1, 1, 4}, {1, 1, 1, 4}, {1, 1, 1, 4},
TensorType_INT32);
diff --git a/tensorflow/contrib/lite/kernels/strided_slice.cc b/tensorflow/contrib/lite/kernels/strided_slice.cc
index 725dd8105a..bed2117f9a 100644
--- a/tensorflow/contrib/lite/kernels/strided_slice.cc
+++ b/tensorflow/contrib/lite/kernels/strided_slice.cc
@@ -121,10 +121,19 @@ TfLiteStatus ResizeOutputTensor(TfLiteContext* context,
int32_t begin = GetBeginValueAtIndex(op_context, idx);
int32_t end = GetEndValueAtIndex(op_context, idx);
+ // When shrinking an axis, the end position does not matter (and can be
+ // incorrect when negative indexing is used, see Issue #19260). Always use
+ // begin + 1 to generate a length 1 slice, since begin has
+ // already been adjusted for negative indices by GetBeginValueAtIndex.
+ const bool shrink_axis = op_context->params->shrink_axis_mask & (1 << idx);
+ if (shrink_axis) {
+ end = begin + 1;
+ }
+
// This is valid for both positive and negative strides
int32_t dim_shape = ceil((end - begin) / static_cast<float>(stride));
dim_shape = dim_shape < 0 ? 0 : dim_shape;
- if (!(op_context->params->shrink_axis_mask & (1 << idx))) {
+ if (!shrink_axis) {
output_shape_vector.push_back(dim_shape);
}
}
@@ -204,13 +213,15 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
int begin_mask =
ReverseMaskBits(op_context.params->begin_mask, op_context.dims);
int end_mask = ReverseMaskBits(op_context.params->end_mask, op_context.dims);
-
-#define TF_LITE_STRIDED_SLICE(kernel_type, data_type) \
- kernel_type::StridedSlice(GetTensorData<data_type>(op_context.input), \
- GetTensorDims(op_context.input), begin_mask, \
- end_mask, starts, stops, strides, \
- GetTensorData<data_type>(op_context.output), \
- GetTensorDims(op_context.output))
+ int shrink_axis_mask =
+ ReverseMaskBits(op_context.params->shrink_axis_mask, op_context.dims);
+
+#define TF_LITE_STRIDED_SLICE(kernel_type, data_type) \
+ kernel_type::StridedSlice( \
+ GetTensorData<data_type>(op_context.input), \
+ GetTensorDims(op_context.input), begin_mask, end_mask, shrink_axis_mask, \
+ starts, stops, strides, GetTensorData<data_type>(op_context.output), \
+ GetTensorDims(op_context.output))
switch (op_context.input->type) {
case kTfLiteFloat32:
diff --git a/tensorflow/contrib/lite/kernels/strided_slice_test.cc b/tensorflow/contrib/lite/kernels/strided_slice_test.cc
index e2be41d958..c5d4f9affb 100644
--- a/tensorflow/contrib/lite/kernels/strided_slice_test.cc
+++ b/tensorflow/contrib/lite/kernels/strided_slice_test.cc
@@ -383,6 +383,45 @@ TEST(StridedSliceOpTest, In1D_ShrinkAxisMask1) {
EXPECT_THAT(m.GetOutput(), ElementsAreArray({2}));
}
+TEST(StridedSliceOpTest, In1D_ShrinkAxisMask1_NegativeSlice) {
+ // This is equivalent to tf.range(4)[-1].
+ StridedSliceOpModel<> m({4}, {1}, {1}, {1}, 0, 0, 0, 0, 1);
+ m.SetInput({0, 1, 2, 3});
+ m.SetBegin({-1});
+ m.SetEnd({0});
+ m.SetStrides({1});
+
+ m.Invoke();
+ EXPECT_TRUE(m.GetOutputShape().empty());
+ EXPECT_THAT(m.GetOutput(), ElementsAreArray({3}));
+}
+
+TEST(StridedSliceOpTest, In2D_ShrinkAxis3_NegativeSlice) {
+ // This is equivalent to tf.range(4)[:, tf.newaxis][-2, -1].
+ StridedSliceOpModel<> m({4, 1}, {2}, {2}, {2}, 0, 0, 0, 0, 3);
+ m.SetInput({0, 1, 2, 3});
+ m.SetBegin({-2, -1});
+ m.SetEnd({-1, 0});
+ m.SetStrides({1, 1});
+
+ m.Invoke();
+ EXPECT_TRUE(m.GetOutputShape().empty());
+ EXPECT_THAT(m.GetOutput(), ElementsAreArray({2}));
+}
+
+TEST(StridedSliceOpTest, In2D_ShrinkAxis2_BeginEndAxis1_NegativeSlice) {
+ // This is equivalent to tf.range(4)[:, tf.newaxis][:, -1].
+ StridedSliceOpModel<> m({4, 1}, {2}, {2}, {2}, 1, 1, 0, 0, 2);
+ m.SetInput({0, 1, 2, 3});
+ m.SetBegin({0, -1});
+ m.SetEnd({0, 0});
+ m.SetStrides({1, 1});
+
+ m.Invoke();
+ EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({4}));
+ EXPECT_THAT(m.GetOutput(), ElementsAreArray({0, 1, 2, 3}));
+}
+
TEST(StridedSliceOpTest, In1D_BeginMaskShrinkAxisMask1) {
StridedSliceOpModel<> m({4}, {1}, {1}, {1}, 1, 0, 0, 0, 1);
m.SetInput({1, 2, 3, 4});
@@ -394,17 +433,6 @@ TEST(StridedSliceOpTest, In1D_BeginMaskShrinkAxisMask1) {
EXPECT_THAT(m.GetOutput(), ElementsAreArray({1}));
}
-TEST(StridedSliceOpTest, In1D_NegativeBeginNegativeStrideShrinkAxisMask1) {
- StridedSliceOpModel<> m({4}, {1}, {1}, {1}, 0, 0, 0, 0, 1);
- m.SetInput({1, 2, 3, 4});
- m.SetBegin({-2});
- m.SetEnd({-3});
- m.SetStrides({-1});
- m.Invoke();
- EXPECT_TRUE(m.GetOutputShape().empty());
- EXPECT_THAT(m.GetOutput(), ElementsAreArray({3}));
-}
-
TEST(StridedSliceOpTest, In2D_ShrinkAxisMask1) {
StridedSliceOpModel<> m({2, 3}, {2}, {2}, {2}, 0, 0, 0, 0, 1);
m.SetInput({1, 2, 3, 4, 5, 6});
diff --git a/tensorflow/contrib/lite/kernels/sub.cc b/tensorflow/contrib/lite/kernels/sub.cc
index a8b8035899..541c85f756 100644
--- a/tensorflow/contrib/lite/kernels/sub.cc
+++ b/tensorflow/contrib/lite/kernels/sub.cc
@@ -78,29 +78,44 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
}
template <KernelType kernel_type>
-void EvalFloat(TfLiteContext* context, TfLiteNode* node,
- TfLiteSubParams* params, const OpData* data,
- const TfLiteTensor* input1, const TfLiteTensor* input2,
- TfLiteTensor* output) {
- float output_activation_min, output_activation_max;
- CalculateActivationRangeFloat(params->activation, &output_activation_min,
- &output_activation_max);
-#define TF_LITE_SUB(type, opname) \
- type::opname(GetTensorData<float>(input1), GetTensorDims(input1), \
- GetTensorData<float>(input2), GetTensorDims(input2), \
- output_activation_min, output_activation_max, \
- GetTensorData<float>(output), GetTensorDims(output))
- if (kernel_type == kReference) {
- if (data->requires_broadcast) {
- TF_LITE_SUB(reference_ops, BroadcastSub);
+void EvalSub(TfLiteContext* context, TfLiteNode* node, TfLiteSubParams* params,
+ const OpData* data, const TfLiteTensor* input1,
+ const TfLiteTensor* input2, TfLiteTensor* output) {
+#define TF_LITE_SUB(type, opname, data_type) \
+ data_type output_activation_min, output_activation_max; \
+ CalculateActivationRange(params->activation, &output_activation_min, \
+ &output_activation_max); \
+ type::opname(GetTensorData<data_type>(input1), GetTensorDims(input1), \
+ GetTensorData<data_type>(input2), GetTensorDims(input2), \
+ output_activation_min, output_activation_max, \
+ GetTensorData<data_type>(output), GetTensorDims(output))
+ if (output->type == kTfLiteInt32) {
+ if (kernel_type == kReference) {
+ if (data->requires_broadcast) {
+ TF_LITE_SUB(reference_ops, BroadcastSub, int32_t);
+ } else {
+ TF_LITE_SUB(reference_ops, Sub, int32_t);
+ }
} else {
- TF_LITE_SUB(reference_ops, Sub);
+ if (data->requires_broadcast) {
+ TF_LITE_SUB(optimized_ops, BroadcastSub, int32_t);
+ } else {
+ TF_LITE_SUB(optimized_ops, Sub, int32_t);
+ }
}
- } else {
- if (data->requires_broadcast) {
- TF_LITE_SUB(optimized_ops, BroadcastSub);
+ } else if (output->type == kTfLiteFloat32) {
+ if (kernel_type == kReference) {
+ if (data->requires_broadcast) {
+ TF_LITE_SUB(reference_ops, BroadcastSub, float);
+ } else {
+ TF_LITE_SUB(reference_ops, Sub, float);
+ }
} else {
- TF_LITE_SUB(optimized_ops, Sub);
+ if (data->requires_broadcast) {
+ TF_LITE_SUB(optimized_ops, BroadcastSub, float);
+ } else {
+ TF_LITE_SUB(optimized_ops, Sub, float);
+ }
}
}
#undef TF_LITE_SUB
@@ -171,14 +186,15 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* input2 = GetInput(context, node, kInputTensor2);
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
- if (output->type == kTfLiteFloat32) {
- EvalFloat<kernel_type>(context, node, params, data, input1, input2, output);
+ if (output->type == kTfLiteFloat32 || output->type == kTfLiteInt32) {
+ EvalSub<kernel_type>(context, node, params, data, input1, input2, output);
} else if (output->type == kTfLiteUInt8) {
EvalQuantized<kernel_type>(context, node, params, data, input1, input2,
output);
} else {
context->ReportError(
- context, "output type %d is not supported, requires float|uint8 types.",
+ context,
+ "output type %d is not supported, requires float|uint8|int32 types.",
output->type);
return kTfLiteError;
}
diff --git a/tensorflow/contrib/lite/kernels/sub_test.cc b/tensorflow/contrib/lite/kernels/sub_test.cc
index ff07aeec49..5978c574d3 100644
--- a/tensorflow/contrib/lite/kernels/sub_test.cc
+++ b/tensorflow/contrib/lite/kernels/sub_test.cc
@@ -52,6 +52,13 @@ class FloatSubOpModel : public BaseSubOpModel {
std::vector<float> GetOutput() { return ExtractVector<float>(output_); }
};
+class IntegerSubOpModel : public BaseSubOpModel {
+ public:
+ using BaseSubOpModel::BaseSubOpModel;
+
+ std::vector<int32_t> GetOutput() { return ExtractVector<int32_t>(output_); }
+};
+
class QuantizedSubOpModel : public BaseSubOpModel {
public:
using BaseSubOpModel::BaseSubOpModel;
@@ -125,6 +132,57 @@ TEST(FloatSubOpModel, WithBroadcast) {
}
}
+TEST(IntegerSubOpModel, NoActivation) {
+ IntegerSubOpModel m({TensorType_INT32, {1, 2, 2, 1}},
+ {TensorType_INT32, {1, 2, 2, 1}}, {TensorType_INT32, {}},
+ ActivationFunctionType_NONE);
+ m.PopulateTensor<int32_t>(m.input1(), {-20, 2, 7, 8});
+ m.PopulateTensor<int32_t>(m.input2(), {1, 2, 3, 5});
+ m.Invoke();
+ EXPECT_THAT(m.GetOutput(), ElementsAreArray({-21, 0, 4, 3}));
+}
+
+TEST(IntegerSubOpModel, ActivationRELU_N1_TO_1) {
+ IntegerSubOpModel m({TensorType_INT32, {1, 2, 2, 1}},
+ {TensorType_INT32, {1, 2, 2, 1}}, {TensorType_INT32, {}},
+ ActivationFunctionType_RELU_N1_TO_1);
+ m.PopulateTensor<int32_t>(m.input1(), {-20, 2, 7, 8});
+ m.PopulateTensor<int32_t>(m.input2(), {1, 2, 3, 5});
+ m.Invoke();
+ EXPECT_THAT(m.GetOutput(), ElementsAreArray({-1, 0, 1, 1}));
+}
+
+TEST(IntegerSubOpModel, VariousInputShapes) {
+ std::vector<std::initializer_list<int>> test_shapes = {
+ {6}, {2, 3}, {2, 1, 3}, {1, 3, 1, 2}};
+ for (int i = 0; i < test_shapes.size(); ++i) {
+ IntegerSubOpModel m({TensorType_INT32, test_shapes[i]},
+ {TensorType_INT32, test_shapes[i]},
+ {TensorType_INT32, {}}, ActivationFunctionType_NONE);
+ m.PopulateTensor<int32_t>(m.input1(), {-20, 2, 7, 8, 11, 20});
+ m.PopulateTensor<int32_t>(m.input2(), {1, 2, 3, 5, 11, 1});
+ m.Invoke();
+ EXPECT_THAT(m.GetOutput(), ElementsAreArray({-21, 0, 4, 3, 0, 19}))
+ << "With shape number " << i;
+ }
+}
+
+TEST(IntegerSubOpModel, WithBroadcast) {
+ std::vector<std::initializer_list<int>> test_shapes = {
+ {6}, {2, 3}, {2, 1, 3}, {1, 3, 1, 2}};
+ for (int i = 0; i < test_shapes.size(); ++i) {
+ IntegerSubOpModel m({TensorType_INT32, test_shapes[i]},
+ {TensorType_INT32, {}}, // always a scalar
+ {TensorType_INT32, {}}, ActivationFunctionType_NONE);
+ m.PopulateTensor<int32_t>(m.input1(), {-20, 2, 7, 8, 11, 20});
+ m.PopulateTensor<int32_t>(m.input2(), {1});
+ m.Invoke();
+ EXPECT_THAT(m.GetOutput(),
+ ElementsAreArray(ArrayFloatNear({-21, 1, 6, 7, 10, 19})))
+ << "With shape number " << i;
+ }
+}
+
TEST(QuantizedSubOpModel, QuantizedTestsNoActivation) {
float kQuantizedTolerance = GetTolerance(-1.0, 1.0);
std::vector<std::initializer_list<float>> inputs1 = {
diff --git a/tensorflow/contrib/lite/kernels/svdf.cc b/tensorflow/contrib/lite/kernels/svdf.cc
index 308860c299..22eebdd4ce 100644
--- a/tensorflow/contrib/lite/kernels/svdf.cc
+++ b/tensorflow/contrib/lite/kernels/svdf.cc
@@ -12,6 +12,10 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
+
+// SVDF op that compresses a fully connected op via low-rank matrix
+// factorization. See https://research.google.com/pubs/archive/43813.pdf for
+// details.
#include <unistd.h>
#include <cassert>
#include <cmath>
@@ -32,6 +36,67 @@ namespace ops {
namespace builtin {
namespace svdf {
+namespace {
+
+struct OpData {
+ int scratch_tensor_index;
+ bool float_weights_time_initialized;
+};
+
+static inline void ApplyTimeWeightsBiasAndActivation(
+ int batch_size, int memory_size, int num_filters, int num_units, int rank,
+ const TfLiteTensor* weights_time, const TfLiteTensor* bias,
+ TfLiteFusedActivation activation, TfLiteTensor* state,
+ TfLiteTensor* scratch, TfLiteTensor* output) {
+ // Compute matmul(state, weights_time).
+ // The right most column is used to save temporary output (with the size of
+ // num_filters). This is achieved by starting at state->data.f and having the
+ // stride equal to memory_size.
+ for (int b = 0; b < batch_size; ++b) {
+ float* state_ptr_batch = state->data.f + b * memory_size * num_filters;
+ float* scratch_ptr_batch = scratch->data.f + b * num_filters;
+ tensor_utils::BatchVectorBatchVectorDotProduct(
+ weights_time->data.f, state_ptr_batch, memory_size, num_filters,
+ scratch_ptr_batch, /*result_stride=*/1);
+ }
+
+ // Initialize output with bias if provided.
+ if (bias) {
+ tensor_utils::VectorBatchVectorAssign(bias->data.f, num_units, batch_size,
+ output->data.f);
+ } else {
+ tensor_utils::ZeroVector(output->data.f, batch_size * num_units);
+ }
+
+ // Reduction sum.
+ for (int b = 0; b < batch_size; ++b) {
+ float* output_ptr_batch = output->data.f + b * num_units;
+ float* scratch_ptr_batch = scratch->data.f + b * num_filters;
+ tensor_utils::ReductionSumVector(scratch_ptr_batch, output_ptr_batch,
+ num_units, rank);
+ }
+
+ // Apply activation.
+ for (int b = 0; b < batch_size; ++b) {
+ float* output_ptr_batch = output->data.f + b * num_units;
+ tensor_utils::ApplyActivationToVector(output_ptr_batch, num_units,
+ activation, output_ptr_batch);
+ }
+
+ // Left shift the state to make room for next cycle's activation.
+ // TODO(alanchiao): explore collapsing this into a single loop.
+ for (int b = 0; b < batch_size; ++b) {
+ float* state_ptr_batch = state->data.f + b * memory_size * num_filters;
+ for (int f = 0; f < num_filters; ++f) {
+ tensor_utils::VectorShiftLeft(state_ptr_batch, memory_size,
+ /*shift_value=*/0.0);
+ state_ptr_batch += memory_size;
+ }
+ }
+}
+
+} // namespace
+
constexpr int kInputTensor = 0;
constexpr int kWeightsFeatureTensor = 1;
constexpr int kWeightsTimeTensor = 2;
@@ -40,29 +105,34 @@ constexpr int kStateTensor = 0;
constexpr int kOutputTensor = 1;
void* Init(TfLiteContext* context, const char* buffer, size_t length) {
- auto* scratch_tensor_index = new int;
- context->AddTensors(context, 1, scratch_tensor_index);
- return scratch_tensor_index;
+ auto* op_data = new OpData;
+ op_data->float_weights_time_initialized = false;
+ context->AddTensors(context, /*tensors_to_add=*/4,
+ &op_data->scratch_tensor_index);
+ return op_data;
}
void Free(TfLiteContext* context, void* buffer) {
- delete reinterpret_cast<int*>(buffer);
+ delete reinterpret_cast<OpData*>(buffer);
}
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
- auto* params = reinterpret_cast<TfLiteSVDFParams*>(node->builtin_data);
- int* scratch_tensor_index = reinterpret_cast<int*>(node->user_data);
+ const auto* params = reinterpret_cast<TfLiteSVDFParams*>(node->builtin_data);
+ OpData* op_data = reinterpret_cast<OpData*>(node->user_data);
+ int scratch_tensor_index = op_data->scratch_tensor_index;
// Check we have all the inputs and outputs we need.
TF_LITE_ENSURE_EQ(context, node->inputs->size, 4);
TF_LITE_ENSURE_EQ(context, node->outputs->size, 2);
- TfLiteTensor* input = &context->tensors[node->inputs->data[kInputTensor]];
+ const TfLiteTensor* input = GetInput(context, node, kInputTensor);
const TfLiteTensor* weights_feature =
GetInput(context, node, kWeightsFeatureTensor);
const TfLiteTensor* weights_time =
GetInput(context, node, kWeightsTimeTensor);
+ TF_LITE_ENSURE_EQ(context, input->type, kTfLiteFloat32);
+
// Check all the parameters of tensor match within themselves and match the
// input configuration.
const int rank = params->rank;
@@ -103,10 +173,18 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_OK(context,
context->ResizeTensor(context, output, output_size_array));
+ // The weights are of consistent type, so it suffices to check one.
+ const bool is_hybrid_op =
+ (input->type == kTfLiteFloat32 && weights_feature->type == kTfLiteUInt8);
+
// Resize scratch.
TfLiteIntArrayFree(node->temporaries);
- node->temporaries = TfLiteIntArrayCreate(1);
- node->temporaries->data[0] = *scratch_tensor_index;
+ if (is_hybrid_op) {
+ node->temporaries = TfLiteIntArrayCreate(4);
+ } else {
+ node->temporaries = TfLiteIntArrayCreate(1);
+ }
+ node->temporaries->data[0] = scratch_tensor_index;
TfLiteIntArray* scratch_size_array = TfLiteIntArrayCreate(2);
scratch_size_array->data[0] = batch_size;
@@ -118,24 +196,56 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, scratch_tensor,
scratch_size_array));
- return kTfLiteOk;
-}
-
-TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
- auto* params = reinterpret_cast<TfLiteSVDFParams*>(node->builtin_data);
-
- const TfLiteTensor* input = GetInput(context, node, kInputTensor);
- const TfLiteTensor* weights_feature =
- GetInput(context, node, kWeightsFeatureTensor);
- const TfLiteTensor* weights_time =
- GetInput(context, node, kWeightsTimeTensor);
+ if (is_hybrid_op) {
+ // Tell interpreter to allocate temporary tensors to store quantized values
+ // of input tensors.
+ node->temporaries->data[1] = scratch_tensor_index + 1;
+ TfLiteTensor* input_quantized = GetTemporary(context, node, /*index=*/1);
+ input_quantized->type = kTfLiteUInt8;
+ input_quantized->allocation_type = kTfLiteArenaRw;
+ if (!TfLiteIntArrayEqual(input_quantized->dims, input->dims)) {
+ TfLiteIntArray* input_quantized_size = TfLiteIntArrayCopy(input->dims);
+ TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, input_quantized,
+ input_quantized_size));
+ }
- TfLiteTensor* state = GetOutput(context, node, kStateTensor);
- TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
- TfLiteTensor* scratch = GetTemporary(context, node, /*index=*/0);
+ // Tell interpreter to allocate temporary tensors to store scaling factors.
+ node->temporaries->data[2] = scratch_tensor_index + 2;
+ TfLiteTensor* scaling_factors = GetTemporary(context, node, /*index=*/2);
+ scaling_factors->type = kTfLiteFloat32;
+ scaling_factors->allocation_type = kTfLiteArenaRw;
+ TfLiteIntArray* scaling_factors_size = TfLiteIntArrayCreate(1);
+ scaling_factors_size->data[0] = batch_size;
+ if (!TfLiteIntArrayEqual(scaling_factors->dims, scaling_factors_size)) {
+ TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, scaling_factors,
+ scaling_factors_size));
+ }
- const TfLiteTensor* bias = GetOptionalInputTensor(context, node, kBiasTensor);
+ // Used to store dequantized weights_time matrix for hybrid computation
+ // of matmul(state, weights_time), which occurs in floating point.
+ node->temporaries->data[3] = scratch_tensor_index + 3;
+ TfLiteTensor* float_weights_time = GetTemporary(context, node, /*index=*/3);
+ float_weights_time->type = kTfLiteFloat32;
+ // Persistent so that we can compute the dequantized weights only once.
+ float_weights_time->allocation_type = kTfLiteArenaRwPersistent;
+ if (!TfLiteIntArrayEqual(float_weights_time->dims, weights_time->dims)) {
+ TfLiteIntArray* float_weights_time_size =
+ TfLiteIntArrayCopy(weights_time->dims);
+ TF_LITE_ENSURE_OK(context,
+ context->ResizeTensor(context, float_weights_time,
+ float_weights_time_size));
+ }
+ }
+ return kTfLiteOk;
+}
+TfLiteStatus EvalFloat(TfLiteContext* context, TfLiteNode* node,
+ const TfLiteTensor* input,
+ const TfLiteTensor* weights_feature,
+ const TfLiteTensor* weights_time,
+ const TfLiteTensor* bias, const TfLiteSVDFParams* params,
+ TfLiteTensor* scratch, TfLiteTensor* state,
+ TfLiteTensor* output) {
const int rank = params->rank;
const int batch_size = input->dims->data[0];
const int input_size = input->dims->data[1];
@@ -146,67 +256,151 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
// Clear the activation (state left most column).
// TODO(ghodrat): Add a test which initialize state with invalid values in
// left most column and make sure it passes.
- for (int b = 0; b < batch_size; b++) {
+ for (int b = 0; b < batch_size; ++b) {
float* state_ptr_batch = state->data.f + b * memory_size * num_filters;
- for (int c = 0; c < num_filters; c++) {
+ for (int c = 0; c < num_filters; ++c) {
float* state_ptr = state_ptr_batch + c * memory_size;
state_ptr[memory_size - 1] = 0.0;
}
}
// Compute conv1d(inputs, weights_feature).
- // The state left most column is used to save current cycle activation. This
+ // The state right most column is used to save current cycle activation. This
// is achieved by starting at state->data.f[memory_size - 1] and having the
// stride equal to memory_size.
tensor_utils::MatrixBatchVectorMultiplyAccumulate(
weights_feature->data.f, num_filters, input_size, input->data.f,
batch_size, &state->data.f[memory_size - 1], memory_size);
- // Compute matmul(state, weights_time).
- // The right most column is used to save temporary output (with the size of
- // num_filters). This is achieved by starting at state->data.f and having the
- // stride equal to memory_size.
- for (int b = 0; b < batch_size; b++) {
+ ApplyTimeWeightsBiasAndActivation(batch_size, memory_size, num_filters,
+ num_units, rank, weights_time, bias,
+ params->activation, state, scratch, output);
+ return kTfLiteOk;
+}
+
+TfLiteStatus EvalHybrid(
+ TfLiteContext* context, TfLiteNode* node, const TfLiteTensor* input,
+ const TfLiteTensor* weights_feature, const TfLiteTensor* weights_time,
+ const TfLiteTensor* bias, const TfLiteSVDFParams* params,
+ TfLiteTensor* scratch, TfLiteTensor* scaling_factors,
+ TfLiteTensor* input_quantized, TfLiteTensor* state, TfLiteTensor* output) {
+ const int rank = params->rank;
+ const int batch_size = input->dims->data[0];
+ const int input_size = input->dims->data[1];
+ const int num_filters = weights_feature->dims->data[0];
+ const int num_units = num_filters / rank;
+ const int memory_size = weights_time->dims->data[1];
+
+ // Initialize the pointer to input.
+ const float* input_ptr_batch = input->data.f;
+
+ // Initialize the pointer to storage for quantized values and
+ // scaling factors.
+ int8_t* quantized_input_ptr_batch =
+ reinterpret_cast<int8_t*>(input_quantized->data.uint8);
+
+ float* scaling_factors_ptr = scaling_factors->data.f;
+
+ // Other initializations.
+ const int8_t* weights_feature_ptr =
+ reinterpret_cast<int8_t*>(weights_feature->data.uint8);
+ const float weights_feature_scale = weights_feature->params.scale;
+
+ // Clear the activation (state left most column).
+ // TODO(ghodrat): Add a test which initialize state with invalid values in
+ // left most column and make sure it passes.
+ for (int b = 0; b < batch_size; ++b) {
float* state_ptr_batch = state->data.f + b * memory_size * num_filters;
- float* scratch_ptr_batch = scratch->data.f + b * num_filters;
- tensor_utils::BatchVectorBatchVectorDotProduct(
- weights_time->data.f, state_ptr_batch, memory_size, num_filters,
- scratch_ptr_batch, /*result_stride=*/1);
+ for (int c = 0; c < num_filters; ++c) {
+ float* state_ptr = state_ptr_batch + c * memory_size;
+ state_ptr[memory_size - 1] = 0.0;
+ }
}
- // Initialize output with bias if provided.
- if (bias) {
- tensor_utils::VectorBatchVectorAssign(bias->data.f, num_units, batch_size,
- output->data.f);
- } else {
- tensor_utils::ZeroVector(output->data.f, batch_size * num_units);
- }
+ if (!tensor_utils::IsZeroVector(input_ptr_batch, batch_size * input_size)) {
+ // Quantize input from float to int8.
+ float unused_min, unused_max;
+ for (int b = 0; b < batch_size; ++b) {
+ const int offset = b * input_size;
+ tensor_utils::SymmetricQuantizeFloats(
+ input_ptr_batch + offset, input_size,
+ quantized_input_ptr_batch + offset, &unused_min, &unused_max,
+ &scaling_factors_ptr[b]);
+ scaling_factors_ptr[b] *= weights_feature_scale;
+ }
- // Reduction sum
- for (int b = 0; b < batch_size; b++) {
- float* output_ptr_batch = output->data.f + b * num_units;
- float* scratch_ptr_batch = scratch->data.f + b * num_filters;
- tensor_utils::ReductionSumVector(scratch_ptr_batch, output_ptr_batch,
- num_units, rank);
+ // Compute conv1d(inputs, weights_feature).
+ // The state right most column is used to save current cycle activation.
+ // This is achieved by starting at state->data.f[memory_size - 1] and having
+ // the stride equal to memory_size.
+ tensor_utils::MatrixBatchVectorMultiplyAccumulate(
+ weights_feature_ptr, num_filters, input_size, quantized_input_ptr_batch,
+ scaling_factors_ptr, batch_size, &state->data.f[memory_size - 1],
+ memory_size);
}
- // Apply activation.
- for (int b = 0; b < batch_size; b++) {
- float* output_ptr_batch = output->data.f + b * num_units;
- tensor_utils::ApplyActivationToVector(output_ptr_batch, num_units,
- params->activation, output_ptr_batch);
- }
+ // TODO(alanchiao): can optimize hybrid case ~5% by unrolling loop in applying
+ // time weights so that the inner loop multiplies eight elements at a time.
+ ApplyTimeWeightsBiasAndActivation(batch_size, memory_size, num_filters,
+ num_units, rank, weights_time, bias,
+ params->activation, state, scratch, output);
+ return kTfLiteOk;
+}
- // Right shift the state.
- for (int b = 0; b < batch_size; b++) {
- float* state_ptr_batch = state->data.f + b * memory_size * num_filters;
- for (int f = 0; f < num_filters; f++) {
- tensor_utils::VectorShiftLeft(state_ptr_batch, memory_size,
- /*shift_value=*/0.0);
- state_ptr_batch += memory_size;
+TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
+ auto* params = reinterpret_cast<TfLiteSVDFParams*>(node->builtin_data);
+ OpData* op_data = reinterpret_cast<OpData*>(node->user_data);
+
+ const TfLiteTensor* input = GetInput(context, node, kInputTensor);
+ const TfLiteTensor* weights_feature =
+ GetInput(context, node, kWeightsFeatureTensor);
+ const TfLiteTensor* weights_time =
+ GetInput(context, node, kWeightsTimeTensor);
+ const TfLiteTensor* bias = GetOptionalInputTensor(context, node, kBiasTensor);
+
+ TfLiteTensor* scratch = GetTemporary(context, node, /*index=*/0);
+
+ TfLiteTensor* state = GetOutput(context, node, kStateTensor);
+ TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
+
+ switch (weights_feature->type) {
+ case kTfLiteFloat32: {
+ return EvalFloat(context, node, input, weights_feature, weights_time,
+ bias, params, scratch, state, output);
+ break;
}
+ case kTfLiteUInt8: {
+ TfLiteTensor* input_quantized = GetTemporary(context, node, /*index=*/1);
+ TfLiteTensor* scaling_factors = GetTemporary(context, node, /*index=*/2);
+ TfLiteTensor* float_weights_time =
+ GetTemporary(context, node, /*index=*/3);
+
+ // Dequantize weights time.
+ // TODO(alanchiao): this dequantization initialization only needs to
+ // happen once per model and should theoretically be placed in either Init
+ // or Prepare. However, TFLite doesn't allocate float_weights_time until
+ // the Eval function.
+ // TODO(alanchiao): refactor logic out into dequantize function.
+ if (!op_data->float_weights_time_initialized) {
+ const float dequantization_scale = weights_time->params.scale;
+ const int8_t* weights_time_ptr =
+ reinterpret_cast<int8_t*>(weights_time->data.uint8);
+ for (int i = 0; i < NumElements(float_weights_time); ++i) {
+ float_weights_time->data.f[i] =
+ weights_time_ptr[i] * dequantization_scale;
+ }
+ op_data->float_weights_time_initialized = true;
+ }
+ return EvalHybrid(context, node, input, weights_feature,
+ float_weights_time, bias, params, scratch,
+ scaling_factors, input_quantized, state, output);
+ break;
+ }
+ default:
+ context->ReportError(context, "Type %d not currently supported.",
+ weights_feature->type);
+ return kTfLiteError;
}
- return kTfLiteOk;
}
} // namespace svdf
diff --git a/tensorflow/contrib/lite/kernels/svdf_test.cc b/tensorflow/contrib/lite/kernels/svdf_test.cc
index 0f166dc69b..5af3ff8500 100644
--- a/tensorflow/contrib/lite/kernels/svdf_test.cc
+++ b/tensorflow/contrib/lite/kernels/svdf_test.cc
@@ -126,17 +126,20 @@ static float svdf_golden_output_rank_2[] = {
};
// Derived class of SingleOpModel, which is used to test SVDF TFLite op.
-class SVDFOpModel : public SingleOpModel {
+class BaseSVDFOpModel : public SingleOpModel {
public:
- SVDFOpModel(int batches, int units, int input_size, int memory_size, int rank)
+ BaseSVDFOpModel(int batches, int units, int input_size, int memory_size,
+ int rank,
+ TensorType weights_feature_type = TensorType_FLOAT32,
+ TensorType weights_time_type = TensorType_FLOAT32)
: batches_(batches),
units_(units),
input_size_(input_size),
memory_size_(memory_size),
rank_(rank) {
input_ = AddInput(TensorType_FLOAT32);
- weights_feature_ = AddInput(TensorType_FLOAT32);
- weights_time_ = AddInput(TensorType_FLOAT32);
+ weights_feature_ = AddInput(weights_feature_type);
+ weights_time_ = AddInput(weights_time_type);
bias_ = AddNullInput();
state_ = AddOutput(TensorType_FLOAT32);
output_ = AddOutput(TensorType_FLOAT32);
@@ -182,7 +185,7 @@ class SVDFOpModel : public SingleOpModel {
int num_units() { return units_; }
int num_batches() { return batches_; }
- private:
+ protected:
int input_;
int weights_feature_;
int weights_time_;
@@ -197,7 +200,61 @@ class SVDFOpModel : public SingleOpModel {
int rank_;
};
-TEST(SVDFOpTest, BlackBoxTestRank1) {
+class SVDFOpModel : public BaseSVDFOpModel {
+ public:
+ using BaseSVDFOpModel::BaseSVDFOpModel;
+};
+
+class HybridSVDFOpModel : public BaseSVDFOpModel {
+ public:
+ HybridSVDFOpModel(int batches, int units, int input_size, int memory_size,
+ int rank)
+ : BaseSVDFOpModel(batches, units, input_size, memory_size, rank,
+ TensorType_UINT8, TensorType_UINT8) {}
+
+ void SetWeightsFeature(std::initializer_list<float> f) {
+ SymmetricQuantizeAndPopulate(weights_feature_, f);
+ }
+
+ void SetWeightsTime(std::initializer_list<float> f) {
+ SymmetricQuantizeAndPopulate(weights_time_, f);
+ }
+};
+
+class SVDFOpTest : public ::testing::Test {
+ protected:
+ void VerifyGoldens(float golden_input[], float golden_output[],
+ int golden_size, BaseSVDFOpModel* svdf,
+ float tolerance = 1e-5) {
+ const int svdf_num_batches = svdf->num_batches();
+ const int svdf_input_size = svdf->input_size();
+ const int svdf_num_units = svdf->num_units();
+ const int input_sequence_size =
+ golden_size / sizeof(float) / (svdf_input_size * svdf_num_batches);
+ // Going over each input batch, setting the input tensor, invoking the SVDF
+ // op and checking the output with the expected golden values.
+ for (int i = 0; i < input_sequence_size; i++) {
+ float* batch_start =
+ golden_input + i * svdf_input_size * svdf_num_batches;
+ float* batch_end = batch_start + svdf_input_size * svdf_num_batches;
+ svdf->SetInput(0, batch_start, batch_end);
+
+ svdf->Invoke();
+
+ const float* golden_start =
+ golden_output + i * svdf_num_units * svdf_num_batches;
+ const float* golden_end =
+ golden_start + svdf_num_units * svdf_num_batches;
+ std::vector<float> expected;
+ expected.insert(expected.end(), golden_start, golden_end);
+
+ EXPECT_THAT(svdf->GetOutput(),
+ ElementsAreArray(ArrayFloatNear(expected, tolerance)));
+ }
+ }
+};
+
+TEST_F(SVDFOpTest, BlackBoxTestRank1) {
SVDFOpModel svdf(/*batches=*/2, /*units=*/4, /*input_size=*/3,
/*memory_size=*/10, /*rank=*/1);
svdf.SetWeightsFeature({-0.31930989, -0.36118156, 0.0079667, 0.37613347,
@@ -218,31 +275,11 @@ TEST(SVDFOpTest, BlackBoxTestRank1) {
-0.01580888, -0.14943552, 0.15465137, 0.09784451, -0.0337657});
svdf.ResetState();
- const int svdf_num_batches = svdf.num_batches();
- const int svdf_input_size = svdf.input_size();
- const int svdf_num_units = svdf.num_units();
- const int input_sequence_size =
- sizeof(svdf_input) / sizeof(float) / (svdf_input_size * svdf_num_batches);
- // Going over each input batch, setting the input tensor, invoking the SVDF op
- // and checking the output with the expected golden values.
- for (int i = 0; i < input_sequence_size; i++) {
- float* batch_start = svdf_input + i * svdf_input_size * svdf_num_batches;
- float* batch_end = batch_start + svdf_input_size * svdf_num_batches;
- svdf.SetInput(0, batch_start, batch_end);
-
- svdf.Invoke();
-
- float* golden_start =
- svdf_golden_output_rank_1 + i * svdf_num_units * svdf_num_batches;
- float* golden_end = golden_start + svdf_num_units * svdf_num_batches;
- std::vector<float> expected;
- expected.insert(expected.end(), golden_start, golden_end);
-
- EXPECT_THAT(svdf.GetOutput(), ElementsAreArray(ArrayFloatNear(expected)));
- }
+ VerifyGoldens(svdf_input, svdf_golden_output_rank_1, sizeof(svdf_input),
+ &svdf);
}
-TEST(SVDFOpTest, BlackBoxTestRank2) {
+TEST_F(SVDFOpTest, BlackBoxTestRank2) {
SVDFOpModel svdf(/*batches=*/2, /*units=*/4, /*input_size=*/3,
/*memory_size=*/10, /*rank=*/2);
svdf.SetWeightsFeature({-0.31930989, 0.0079667, 0.39296314, 0.37613347,
@@ -278,28 +315,75 @@ TEST(SVDFOpTest, BlackBoxTestRank2) {
0.08682203, 0.1258215, 0.1851041, 0.29228821, 0.12366763});
svdf.ResetState();
- const int svdf_num_batches = svdf.num_batches();
- const int svdf_input_size = svdf.input_size();
- const int svdf_num_units = svdf.num_units();
- const int input_sequence_size =
- sizeof(svdf_input) / sizeof(float) / (svdf_input_size * svdf_num_batches);
- // Going over each input batch, setting the input tensor, invoking the SVDF op
- // and checking the output with the expected golden values.
- for (int i = 0; i < input_sequence_size; i++) {
- float* batch_start = svdf_input + i * svdf_input_size * svdf_num_batches;
- float* batch_end = batch_start + svdf_input_size * svdf_num_batches;
- svdf.SetInput(0, batch_start, batch_end);
-
- svdf.Invoke();
-
- float* golden_start =
- svdf_golden_output_rank_2 + i * svdf_num_units * svdf_num_batches;
- float* golden_end = golden_start + svdf_num_units * svdf_num_batches;
- std::vector<float> expected;
- expected.insert(expected.end(), golden_start, golden_end);
-
- EXPECT_THAT(svdf.GetOutput(), ElementsAreArray(ArrayFloatNear(expected)));
- }
+ VerifyGoldens(svdf_input, svdf_golden_output_rank_2, sizeof(svdf_input),
+ &svdf);
+}
+
+TEST_F(SVDFOpTest, BlackBoxTestHybridRank1) {
+ HybridSVDFOpModel svdf(/*batches=*/2, /*units=*/4, /*input_size=*/3,
+ /*memory_size=*/10, /*rank=*/1);
+ svdf.SetWeightsFeature({-0.31930989, -0.36118156, 0.0079667, 0.37613347,
+ 0.22197971, 0.12416199, 0.27901134, 0.27557442,
+ 0.3905206, -0.36137494, -0.06634006, -0.10640851});
+
+ svdf.SetWeightsTime(
+ {-0.31930989, 0.37613347, 0.27901134, -0.36137494, -0.36118156,
+ 0.22197971, 0.27557442, -0.06634006, 0.0079667, 0.12416199,
+
+ 0.3905206, -0.10640851, -0.0976817, 0.15294972, 0.39635518,
+ -0.02702999, 0.39296314, 0.15785322, 0.21931258, 0.31053296,
+
+ -0.36916667, 0.38031587, -0.21580373, 0.27072677, 0.23622236,
+ 0.34936687, 0.18174365, 0.35907319, -0.17493086, 0.324846,
+
+ -0.10781813, 0.27201805, 0.14324132, -0.23681851, -0.27115166,
+ -0.01580888, -0.14943552, 0.15465137, 0.09784451, -0.0337657});
+
+ svdf.ResetState();
+ VerifyGoldens(svdf_input, svdf_golden_output_rank_1, sizeof(svdf_input),
+ &svdf,
+ /*tolerance=*/0.002945);
+}
+
+TEST_F(SVDFOpTest, BlackBoxTestHybridRank2) {
+ HybridSVDFOpModel svdf(/*batches=*/2, /*units=*/4, /*input_size=*/3,
+ /*memory_size=*/10, /*rank=*/2);
+ svdf.SetWeightsFeature({-0.31930989, 0.0079667, 0.39296314, 0.37613347,
+ 0.12416199, 0.15785322, 0.27901134, 0.3905206,
+ 0.21931258, -0.36137494, -0.10640851, 0.31053296,
+ -0.36118156, -0.0976817, -0.36916667, 0.22197971,
+ 0.15294972, 0.38031587, 0.27557442, 0.39635518,
+ -0.21580373, -0.06634006, -0.02702999, 0.27072677});
+
+ svdf.SetWeightsTime(
+ {-0.31930989, 0.37613347, 0.27901134, -0.36137494, -0.36118156,
+ 0.22197971, 0.27557442, -0.06634006, 0.0079667, 0.12416199,
+
+ 0.3905206, -0.10640851, -0.0976817, 0.15294972, 0.39635518,
+ -0.02702999, 0.39296314, 0.15785322, 0.21931258, 0.31053296,
+
+ -0.36916667, 0.38031587, -0.21580373, 0.27072677, 0.23622236,
+ 0.34936687, 0.18174365, 0.35907319, -0.17493086, 0.324846,
+
+ -0.10781813, 0.27201805, 0.14324132, -0.23681851, -0.27115166,
+ -0.01580888, -0.14943552, 0.15465137, 0.09784451, -0.0337657,
+
+ -0.14884081, 0.19931212, -0.36002168, 0.34663299, -0.11405486,
+ 0.12672701, 0.39463779, -0.07886535, -0.06384811, 0.08249187,
+
+ -0.26816407, -0.19905911, 0.29211238, 0.31264046, -0.28664589,
+ 0.05698794, 0.11613581, 0.14078894, 0.02187902, -0.21781836,
+
+ -0.15567942, 0.08693647, -0.38256618, 0.36580828, -0.22922277,
+ -0.0226903, 0.12878349, -0.28122205, -0.10850525, -0.11955214,
+
+ 0.27179423, -0.04710215, 0.31069002, 0.22672787, 0.09580326,
+ 0.08682203, 0.1258215, 0.1851041, 0.29228821, 0.12366763});
+
+ svdf.ResetState();
+ VerifyGoldens(svdf_input, svdf_golden_output_rank_2, sizeof(svdf_input),
+ &svdf,
+ /*tolerance=*/0.00625109);
}
} // namespace
diff --git a/tensorflow/contrib/lite/kernels/test_util.h b/tensorflow/contrib/lite/kernels/test_util.h
index 5094e1343a..bedbe93ae6 100644
--- a/tensorflow/contrib/lite/kernels/test_util.h
+++ b/tensorflow/contrib/lite/kernels/test_util.h
@@ -148,20 +148,18 @@ class SingleOpModel {
int AddOutput(const TensorData& t);
template <typename T>
- void QuantizeAndPopulate(int index, std::initializer_list<float> data) {
+ void QuantizeAndPopulate(int index, const std::vector<float>& data) {
TfLiteTensor* t = interpreter_->tensor(index);
auto q = Quantize<T>(data, t->params.scale, t->params.zero_point);
PopulateTensor(index, 0, q.data(), q.data() + q.size());
}
- void SymmetricQuantizeAndPopulate(int index,
- std::initializer_list<float> data) {
+ void SymmetricQuantizeAndPopulate(int index, const std::vector<float>& data) {
TfLiteTensor* t = interpreter_->tensor(index);
- std::vector<float> values(data);
- const int length = values.size();
+ const int length = data.size();
std::vector<int8_t> q(length);
float min, max, scaling_factor;
- tensor_utils::SymmetricQuantizeFloats(values.data(), length, q.data(), &min,
+ tensor_utils::SymmetricQuantizeFloats(data.data(), length, q.data(), &min,
&max, &scaling_factor);
// Update quantization params.
t->params.scale = scaling_factor;
@@ -198,8 +196,22 @@ class SingleOpModel {
}
// Populate the tensor given its index.
+ // TODO(b/110696148) clean up and merge with vector-taking variant below.
template <typename T>
- void PopulateTensor(int index, std::initializer_list<T> data) {
+ void PopulateTensor(int index, const std::initializer_list<T>& data) {
+ T* v = interpreter_->typed_tensor<T>(index);
+ CHECK(v) << "No tensor with index '" << index << "'.";
+ for (T f : data) {
+ *v = f;
+ ++v;
+ }
+ }
+
+ // Populate the tensor given its index.
+ // TODO(b/110696148) clean up and merge with initializer_list-taking variant
+ // above.
+ template <typename T>
+ void PopulateTensor(int index, const std::vector<T>& data) {
T* v = interpreter_->typed_tensor<T>(index);
CHECK(v) << "No tensor with index '" << index << "'.";
for (T f : data) {
diff --git a/tensorflow/contrib/lite/kernels/topk_v2.cc b/tensorflow/contrib/lite/kernels/topk_v2.cc
index fb0e49c90c..2dd760bbfe 100644
--- a/tensorflow/contrib/lite/kernels/topk_v2.cc
+++ b/tensorflow/contrib/lite/kernels/topk_v2.cc
@@ -56,11 +56,13 @@ TfLiteStatus ResizeOutput(TfLiteContext* context, TfLiteNode* node) {
output_values_shape->data[num_dimensions - 1] = k;
TfLiteTensor* output_indexes = GetOutput(context, node, kOutputIndexes);
TfLiteTensor* output_values = GetOutput(context, node, kOutputValues);
+ // Force output types.
+ output_indexes->type = kTfLiteInt32;
+ output_values->type = input->type;
auto resize_tensor = [context](TfLiteTensor* tensor, TfLiteIntArray* new_size,
TfLiteIntArray* delete_on_error) {
TfLiteStatus status = context->ResizeTensor(context, tensor, new_size);
if (status != kTfLiteOk) {
- TfLiteIntArrayFree(new_size);
if (delete_on_error != nullptr) {
TfLiteIntArrayFree(delete_on_error);
}
diff --git a/tensorflow/contrib/lite/kernels/unidirectional_sequence_lstm.cc b/tensorflow/contrib/lite/kernels/unidirectional_sequence_lstm.cc
index 1c28123a24..c48b470f92 100644
--- a/tensorflow/contrib/lite/kernels/unidirectional_sequence_lstm.cc
+++ b/tensorflow/contrib/lite/kernels/unidirectional_sequence_lstm.cc
@@ -70,9 +70,21 @@ constexpr int kOutputStateTensor = 0;
constexpr int kCellStateTensor = 1;
constexpr int kOutputTensor = 2;
+// Temporary tensors
+enum TemporaryTensor {
+ kScratchBuffer = 0,
+ kInputQuantized = 1,
+ kOutputStateQuantized = 2,
+ kCellStateQuantized = 3,
+ kScalingFactors = 4,
+ kProductScalingFactors = 5,
+ kRecoveredCellWeights = 6,
+ kNumTemporaryTensors = 7
+};
+
void* Init(TfLiteContext* context, const char* buffer, size_t length) {
auto* scratch_tensor_index = new int;
- context->AddTensors(context, 1, scratch_tensor_index);
+ context->AddTensors(context, kNumTemporaryTensors, scratch_tensor_index);
return scratch_tensor_index;
}
@@ -84,7 +96,7 @@ void Free(TfLiteContext* context, void* buffer) {
TfLiteStatus CheckInputTensorDimensions(TfLiteContext* context,
TfLiteNode* node, int n_input,
int n_output, int n_cell) {
- auto* params = reinterpret_cast<TfLiteLSTMParams*>(node->builtin_data);
+ const auto* params = reinterpret_cast<TfLiteLSTMParams*>(node->builtin_data);
// Making sure clipping parameters have valid values.
// == 0 means no clipping
@@ -242,6 +254,7 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
// Inferring batch size, number of outputs and sequence length and
// number of cells from the input tensors.
const TfLiteTensor* input = GetInput(context, node, kInputTensor);
+ TF_LITE_ENSURE_EQ(context, input->type, kTfLiteFloat32);
TF_LITE_ENSURE(context, input->dims->size > 1);
const int max_time = input->dims->data[0];
const int n_batch = input->dims->data[1];
@@ -261,7 +274,8 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
const int n_output = recurrent_to_output_weights->dims->data[1];
// Check that input tensor dimensions matches with each other.
- CheckInputTensorDimensions(context, node, n_input, n_output, n_cell);
+ TF_LITE_ENSURE_OK(context, CheckInputTensorDimensions(context, node, n_input,
+ n_output, n_cell));
// Get the pointer to output, output_state and cell_state buffer tensors.
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
@@ -288,86 +302,156 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_OK(context,
context->ResizeTensor(context, cell_state, cell_size));
- // Create a scratch buffer tensor.
+ // Mark state tensors as persistent tensors.
+ output_state->allocation_type = kTfLiteArenaRwPersistent;
+ cell_state->allocation_type = kTfLiteArenaRwPersistent;
+
+ // The weights are of consistent type, so it suffices to check one.
+ // TODO(mirkov): create a utility/macro for this check, so all Ops can use it.
+ const bool is_hybrid_op = (input_to_output_weights->type == kTfLiteUInt8 &&
+ input->type == kTfLiteFloat32);
+
TfLiteIntArrayFree(node->temporaries);
- node->temporaries = TfLiteIntArrayCreate(1);
+ if (is_hybrid_op) {
+ node->temporaries = TfLiteIntArrayCreate(kNumTemporaryTensors);
+ } else {
+ node->temporaries = TfLiteIntArrayCreate(1);
+ }
node->temporaries->data[0] = *scratch_tensor_index;
- TfLiteTensor* scratch_buffer = GetTemporary(context, node, /*index=*/0);
+
+ // Create a scratch buffer tensor.
+ TfLiteTensor* scratch_buffer = GetTemporary(context, node, kScratchBuffer);
scratch_buffer->type = input->type;
scratch_buffer->allocation_type = kTfLiteArenaRw;
- // Mark state tensors as persistent tensors.
- output_state->allocation_type = kTfLiteArenaRwPersistent;
- cell_state->allocation_type = kTfLiteArenaRwPersistent;
-
const TfLiteTensor* input_to_input_weights =
GetOptionalInputTensor(context, node, kInputToInputWeightsTensor);
const bool use_cifg = (input_to_input_weights == nullptr);
+ TfLiteIntArray* scratch_buffer_size = TfLiteIntArrayCreate(2);
+ scratch_buffer_size->data[0] = n_batch;
if (use_cifg) {
- TfLiteIntArray* scratch_buffer_size = TfLiteIntArrayCreate(2);
- scratch_buffer_size->data[0] = n_batch;
// Reserving space for Cell, Forget, Output gates
scratch_buffer_size->data[1] = n_cell * 3;
- TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, scratch_buffer,
- scratch_buffer_size));
} else {
- TfLiteIntArray* scratch_buffer_size = TfLiteIntArrayCreate(2);
- scratch_buffer_size->data[0] = n_batch;
// Reserving space for Input, Cell, Forget, Output gates
scratch_buffer_size->data[1] = n_cell * 4;
- TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, scratch_buffer,
- scratch_buffer_size));
+ }
+ TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, scratch_buffer,
+ scratch_buffer_size));
+
+ if (is_hybrid_op) {
+ // Allocate temporary tensors to store quantized values of input,
+ // output_state and cell_state tensors.
+ node->temporaries->data[kInputQuantized] =
+ *scratch_tensor_index + kInputQuantized;
+ TfLiteTensor* input_quantized =
+ GetTemporary(context, node, kInputQuantized);
+ input_quantized->type = kTfLiteUInt8;
+ input_quantized->allocation_type = kTfLiteArenaRw;
+ if (!TfLiteIntArrayEqual(input_quantized->dims, input->dims)) {
+ TfLiteIntArray* input_quantized_size = TfLiteIntArrayCopy(input->dims);
+ TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, input_quantized,
+ input_quantized_size));
+ }
+ node->temporaries->data[kOutputStateQuantized] =
+ *scratch_tensor_index + kOutputStateQuantized;
+ TfLiteTensor* output_state_quantized =
+ GetTemporary(context, node, kOutputStateQuantized);
+ output_state_quantized->type = kTfLiteUInt8;
+ output_state_quantized->allocation_type = kTfLiteArenaRw;
+ if (!TfLiteIntArrayEqual(output_state_quantized->dims,
+ output_state->dims)) {
+ TfLiteIntArray* output_state_quantized_size =
+ TfLiteIntArrayCopy(output_state->dims);
+ TF_LITE_ENSURE_OK(context,
+ context->ResizeTensor(context, output_state_quantized,
+ output_state_quantized_size));
+ }
+ node->temporaries->data[kCellStateQuantized] =
+ *scratch_tensor_index + kCellStateQuantized;
+ TfLiteTensor* cell_state_quantized =
+ GetTemporary(context, node, kCellStateQuantized);
+ cell_state_quantized->type = kTfLiteUInt8;
+ cell_state_quantized->allocation_type = kTfLiteArenaRw;
+ if (!TfLiteIntArrayEqual(cell_state_quantized->dims, cell_state->dims)) {
+ TfLiteIntArray* cell_state_quantized_size =
+ TfLiteIntArrayCopy(cell_state->dims);
+ TF_LITE_ENSURE_OK(context,
+ context->ResizeTensor(context, cell_state_quantized,
+ cell_state_quantized_size));
+ }
+
+ // Allocate temporary tensors to store scaling factors and product scaling
+ // factors. The latter is a convenience storage which allows to quantize
+ // a vector once (which produces the scaling factors) and multiply it with
+ // different matrices (which requires multiplying the scaling factors with
+ // the scaling factor of the matrix).
+ node->temporaries->data[kScalingFactors] =
+ *scratch_tensor_index + kScalingFactors;
+ TfLiteTensor* scaling_factors =
+ GetTemporary(context, node, kScalingFactors);
+ scaling_factors->type = kTfLiteFloat32;
+ scaling_factors->allocation_type = kTfLiteArenaRw;
+ TfLiteIntArray* scaling_factors_size = TfLiteIntArrayCreate(1);
+ scaling_factors_size->data[0] = n_batch;
+ if (!TfLiteIntArrayEqual(scaling_factors->dims, scaling_factors_size)) {
+ TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, scaling_factors,
+ scaling_factors_size));
+ }
+ node->temporaries->data[kProductScalingFactors] =
+ *scratch_tensor_index + kProductScalingFactors;
+ TfLiteTensor* prod_scaling_factors =
+ GetTemporary(context, node, kProductScalingFactors);
+ prod_scaling_factors->type = kTfLiteFloat32;
+ prod_scaling_factors->allocation_type = kTfLiteArenaRw;
+ TfLiteIntArray* prod_scaling_factors_size = TfLiteIntArrayCreate(1);
+ prod_scaling_factors_size->data[0] = n_batch;
+ if (!TfLiteIntArrayEqual(prod_scaling_factors->dims,
+ prod_scaling_factors_size)) {
+ TF_LITE_ENSURE_OK(context,
+ context->ResizeTensor(context, prod_scaling_factors,
+ prod_scaling_factors_size));
+ }
+
+ // Allocate a temporary tensor to store the recovered cell weights. Since
+ // this is used for diagonal matrices, only need to store n_cell values.
+ node->temporaries->data[kRecoveredCellWeights] =
+ *scratch_tensor_index + kRecoveredCellWeights;
+ TfLiteTensor* recovered_cell_weights =
+ GetTemporary(context, node, kRecoveredCellWeights);
+ recovered_cell_weights->type = kTfLiteFloat32;
+ recovered_cell_weights->allocation_type = kTfLiteArenaRw;
+ TfLiteIntArray* recovered_cell_weights_size = TfLiteIntArrayCreate(1);
+ recovered_cell_weights_size->data[0] = n_cell;
+ if (!TfLiteIntArrayEqual(recovered_cell_weights->dims,
+ recovered_cell_weights_size)) {
+ TF_LITE_ENSURE_OK(context,
+ context->ResizeTensor(context, recovered_cell_weights,
+ recovered_cell_weights_size));
+ }
}
return kTfLiteOk;
}
// The LSTM Op engine.
-TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
- auto* params = reinterpret_cast<TfLiteLSTMParams*>(node->builtin_data);
- const TfLiteTensor* input = GetInput(context, node, kInputTensor);
-
- const TfLiteTensor* input_to_input_weights =
- GetOptionalInputTensor(context, node, kInputToInputWeightsTensor);
- const TfLiteTensor* input_to_forget_weights =
- GetInput(context, node, kInputToForgetWeightsTensor);
- const TfLiteTensor* input_to_cell_weights =
- GetInput(context, node, kInputToCellWeightsTensor);
- const TfLiteTensor* input_to_output_weights =
- GetInput(context, node, kInputToOutputWeightsTensor);
-
- const TfLiteTensor* recurrent_to_input_weights =
- GetOptionalInputTensor(context, node, kRecurrentToInputWeightsTensor);
- const TfLiteTensor* recurrent_to_forget_weights =
- GetInput(context, node, kRecurrentToForgetWeightsTensor);
- const TfLiteTensor* recurrent_to_cell_weights =
- GetInput(context, node, kRecurrentToCellWeightsTensor);
- const TfLiteTensor* recurrent_to_output_weights =
- GetInput(context, node, kRecurrentToOutputWeightsTensor);
-
- const TfLiteTensor* cell_to_input_weights =
- GetOptionalInputTensor(context, node, kCellToInputWeightsTensor);
- const TfLiteTensor* cell_to_forget_weights =
- GetOptionalInputTensor(context, node, kCellToForgetWeightsTensor);
- const TfLiteTensor* cell_to_output_weights =
- GetOptionalInputTensor(context, node, kCellToOutputWeightsTensor);
-
- const TfLiteTensor* input_gate_bias =
- GetOptionalInputTensor(context, node, kInputGateBiasTensor);
- const TfLiteTensor* forget_gate_bias =
- GetInput(context, node, kForgetGateBiasTensor);
- const TfLiteTensor* cell_bias = GetInput(context, node, kCellGateBiasTensor);
- const TfLiteTensor* output_gate_bias =
- GetInput(context, node, kOutputGateBiasTensor);
-
- const TfLiteTensor* projection_weights =
- GetOptionalInputTensor(context, node, kProjectionWeightsTensor);
- const TfLiteTensor* projection_bias =
- GetOptionalInputTensor(context, node, kProjectionBiasTensor);
-
- TfLiteTensor* output_state = GetOutput(context, node, kOutputStateTensor);
- TfLiteTensor* cell_state = GetOutput(context, node, kCellStateTensor);
- TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
-
+TfLiteStatus EvalFloat(
+ const TfLiteTensor* input, const TfLiteTensor* input_to_input_weights,
+ const TfLiteTensor* input_to_forget_weights,
+ const TfLiteTensor* input_to_cell_weights,
+ const TfLiteTensor* input_to_output_weights,
+ const TfLiteTensor* recurrent_to_input_weights,
+ const TfLiteTensor* recurrent_to_forget_weights,
+ const TfLiteTensor* recurrent_to_cell_weights,
+ const TfLiteTensor* recurrent_to_output_weights,
+ const TfLiteTensor* cell_to_input_weights,
+ const TfLiteTensor* cell_to_forget_weights,
+ const TfLiteTensor* cell_to_output_weights,
+ const TfLiteTensor* input_gate_bias, const TfLiteTensor* forget_gate_bias,
+ const TfLiteTensor* cell_bias, const TfLiteTensor* output_gate_bias,
+ const TfLiteTensor* projection_weights, const TfLiteTensor* projection_bias,
+ const TfLiteLSTMParams* params, TfLiteTensor* scratch_buffer,
+ TfLiteTensor* output_state, TfLiteTensor* cell_state,
+ TfLiteTensor* output) {
const int max_time = input->dims->data[0];
const int n_batch = input->dims->data[1];
const int n_input = input->dims->data[2];
@@ -380,8 +464,6 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
const bool use_cifg = (input_to_input_weights == nullptr);
const bool use_peephole = (cell_to_output_weights != nullptr);
- // Index the scratch buffers pointers to the global scratch buffer.
- TfLiteTensor* scratch_buffer = GetTemporary(context, node, /*index=*/0);
float* input_gate_scratch = nullptr;
float* cell_scratch = nullptr;
float* forget_gate_scratch = nullptr;
@@ -432,6 +514,7 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
float* output_state_ptr = output_state->data.f;
float* cell_state_ptr = cell_state->data.f;
+ // Feed the sequence into the LSTM step-by-step.
for (int t = 0; t < max_time; t++) {
const float* input_ptr_batch = input->data.f + t * n_batch * n_input;
float* output_ptr_batch = output->data.f + t * n_batch * n_output;
@@ -452,6 +535,262 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
return kTfLiteOk;
}
+TfLiteStatus EvalHybrid(
+ const TfLiteTensor* input, const TfLiteTensor* input_to_input_weights,
+ const TfLiteTensor* input_to_forget_weights,
+ const TfLiteTensor* input_to_cell_weights,
+ const TfLiteTensor* input_to_output_weights,
+ const TfLiteTensor* recurrent_to_input_weights,
+ const TfLiteTensor* recurrent_to_forget_weights,
+ const TfLiteTensor* recurrent_to_cell_weights,
+ const TfLiteTensor* recurrent_to_output_weights,
+ const TfLiteTensor* cell_to_input_weights,
+ const TfLiteTensor* cell_to_forget_weights,
+ const TfLiteTensor* cell_to_output_weights,
+ const TfLiteTensor* input_gate_bias, const TfLiteTensor* forget_gate_bias,
+ const TfLiteTensor* cell_bias, const TfLiteTensor* output_gate_bias,
+ const TfLiteTensor* projection_weights, const TfLiteTensor* projection_bias,
+ const TfLiteLSTMParams* params, TfLiteTensor* scratch_buffer,
+ TfLiteTensor* scaling_factors, TfLiteTensor* prod_scaling_factors,
+ TfLiteTensor* recovered_cell_weights, TfLiteTensor* input_quantized,
+ TfLiteTensor* output_state_quantized, TfLiteTensor* cell_state_quantized,
+ TfLiteTensor* output_state, TfLiteTensor* cell_state,
+ TfLiteTensor* output) {
+ const int max_time = input->dims->data[0];
+ const int n_batch = input->dims->data[1];
+ const int n_input = input->dims->data[2];
+ // n_cell and n_output will be the same size when there is no projection.
+ const int n_cell = input_to_output_weights->dims->data[0];
+ const int n_output = recurrent_to_output_weights->dims->data[1];
+
+ // Since we have already checked that weights are all there or none, we can
+ // check the existence of only one to get the condition.
+ const bool use_cifg = (input_to_input_weights == nullptr);
+ const bool use_peephole = (cell_to_output_weights != nullptr);
+
+ float* input_gate_scratch = nullptr;
+ float* cell_scratch = nullptr;
+ float* forget_gate_scratch = nullptr;
+ float* output_gate_scratch = nullptr;
+ if (use_cifg) {
+ cell_scratch = scratch_buffer->data.f;
+ forget_gate_scratch = scratch_buffer->data.f + n_cell * n_batch;
+ output_gate_scratch = scratch_buffer->data.f + 2 * n_cell * n_batch;
+ } else {
+ input_gate_scratch = scratch_buffer->data.f;
+ cell_scratch = scratch_buffer->data.f + n_cell * n_batch;
+ forget_gate_scratch = scratch_buffer->data.f + 2 * n_cell * n_batch;
+ output_gate_scratch = scratch_buffer->data.f + 3 * n_cell * n_batch;
+ }
+
+ // Check optional tensors, the respective pointers can be null.
+ int8_t* input_to_input_weights_ptr = nullptr;
+ float input_to_input_weights_scale = 1.0f;
+ int8_t* recurrent_to_input_weights_ptr = nullptr;
+ float recurrent_to_input_weights_scale = 1.0f;
+ float* input_gate_bias_ptr = nullptr;
+ if (!use_cifg) {
+ input_to_input_weights_ptr =
+ reinterpret_cast<int8_t*>(input_to_input_weights->data.uint8);
+ recurrent_to_input_weights_ptr =
+ reinterpret_cast<int8_t*>(recurrent_to_input_weights->data.uint8);
+ input_gate_bias_ptr = input_gate_bias->data.f;
+ input_to_input_weights_scale = input_to_input_weights->params.scale;
+ recurrent_to_input_weights_scale = recurrent_to_input_weights->params.scale;
+ }
+
+ int8_t* cell_to_input_weights_ptr = nullptr;
+ int8_t* cell_to_forget_weights_ptr = nullptr;
+ int8_t* cell_to_output_weights_ptr = nullptr;
+ float cell_to_input_weights_scale = 1.0f;
+ float cell_to_forget_weights_scale = 1.0f;
+ float cell_to_output_weights_scale = 1.0f;
+ if (use_peephole) {
+ if (!use_cifg) {
+ cell_to_input_weights_ptr =
+ reinterpret_cast<int8_t*>(cell_to_input_weights->data.uint8);
+ cell_to_input_weights_scale = cell_to_input_weights->params.scale;
+ }
+ cell_to_forget_weights_ptr =
+ reinterpret_cast<int8_t*>(cell_to_forget_weights->data.uint8);
+ cell_to_output_weights_ptr =
+ reinterpret_cast<int8_t*>(cell_to_output_weights->data.uint8);
+ cell_to_forget_weights_scale = cell_to_forget_weights->params.scale;
+ cell_to_output_weights_scale = cell_to_output_weights->params.scale;
+ }
+
+ const int8_t* projection_weights_ptr =
+ (projection_weights == nullptr)
+ ? nullptr
+ : reinterpret_cast<int8_t*>(projection_weights->data.uint8);
+ float projection_weights_scale =
+ (projection_weights == nullptr) ? 1.0f : projection_weights->params.scale;
+ const float* projection_bias_ptr =
+ (projection_bias == nullptr) ? nullptr : projection_bias->data.f;
+
+ // Required tensors, pointers are non-null.
+ const int8_t* input_to_forget_weights_ptr =
+ reinterpret_cast<int8_t*>(input_to_forget_weights->data.uint8);
+ const float input_to_forget_weights_scale =
+ input_to_forget_weights->params.scale;
+ const int8_t* input_to_cell_weights_ptr =
+ reinterpret_cast<int8_t*>(input_to_cell_weights->data.uint8);
+ const float input_to_cell_weights_scale = input_to_cell_weights->params.scale;
+ const int8_t* input_to_output_weights_ptr =
+ reinterpret_cast<int8_t*>(input_to_output_weights->data.uint8);
+ const float input_to_output_weights_scale =
+ input_to_output_weights->params.scale;
+ const int8_t* recurrent_to_forget_weights_ptr =
+ reinterpret_cast<int8_t*>(recurrent_to_forget_weights->data.uint8);
+ const float recurrent_to_forget_weights_scale =
+ recurrent_to_forget_weights->params.scale;
+ const int8_t* recurrent_to_cell_weights_ptr =
+ reinterpret_cast<int8_t*>(recurrent_to_cell_weights->data.uint8);
+ const float recurrent_to_cell_weights_scale =
+ recurrent_to_cell_weights->params.scale;
+ const int8_t* recurrent_to_output_weights_ptr =
+ reinterpret_cast<int8_t*>(recurrent_to_output_weights->data.uint8);
+ const float recurrent_to_output_weights_scale =
+ recurrent_to_output_weights->params.scale;
+ const float* forget_gate_bias_ptr = forget_gate_bias->data.f;
+ const float* cell_bias_ptr = cell_bias->data.f;
+ const float* output_gate_bias_ptr = output_gate_bias->data.f;
+
+ float* output_state_ptr = output_state->data.f;
+ float* cell_state_ptr = cell_state->data.f;
+
+ // Temporary storage for quantized values and scaling factors.
+ int8_t* quantized_input_ptr =
+ reinterpret_cast<int8_t*>(input_quantized->data.uint8);
+ int8_t* quantized_output_state_ptr =
+ reinterpret_cast<int8_t*>(output_state_quantized->data.uint8);
+ int8_t* quantized_cell_state_ptr =
+ reinterpret_cast<int8_t*>(cell_state_quantized->data.uint8);
+ float* scaling_factors_ptr = scaling_factors->data.f;
+ float* prod_scaling_factors_ptr = prod_scaling_factors->data.f;
+ float* recovered_cell_weights_ptr = recovered_cell_weights->data.f;
+
+ // Feed the sequence into the LSTM step-by-step.
+ for (int t = 0; t < max_time; t++) {
+ const float* input_ptr_batch = input->data.f + t * n_batch * n_input;
+ float* output_ptr_batch = output->data.f + t * n_batch * n_output;
+
+ kernel_utils::LstmStep(
+ input_ptr_batch, input_to_input_weights_ptr,
+ input_to_input_weights_scale, input_to_forget_weights_ptr,
+ input_to_forget_weights_scale, input_to_cell_weights_ptr,
+ input_to_cell_weights_scale, input_to_output_weights_ptr,
+ input_to_output_weights_scale, recurrent_to_input_weights_ptr,
+ recurrent_to_input_weights_scale, recurrent_to_forget_weights_ptr,
+ recurrent_to_forget_weights_scale, recurrent_to_cell_weights_ptr,
+ recurrent_to_cell_weights_scale, recurrent_to_output_weights_ptr,
+ recurrent_to_output_weights_scale, cell_to_input_weights_ptr,
+ cell_to_input_weights_scale, cell_to_forget_weights_ptr,
+ cell_to_forget_weights_scale, cell_to_output_weights_ptr,
+ cell_to_output_weights_scale, input_gate_bias_ptr, forget_gate_bias_ptr,
+ cell_bias_ptr, output_gate_bias_ptr, projection_weights_ptr,
+ projection_weights_scale, projection_bias_ptr, params, n_batch, n_cell,
+ n_input, n_output, input_gate_scratch, forget_gate_scratch,
+ cell_scratch, output_gate_scratch, scaling_factors_ptr,
+ prod_scaling_factors_ptr, recovered_cell_weights_ptr,
+ quantized_input_ptr, quantized_output_state_ptr,
+ quantized_cell_state_ptr, output_state_ptr, cell_state_ptr,
+ output_ptr_batch);
+ }
+ return kTfLiteOk;
+}
+
+TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
+ auto* params = reinterpret_cast<TfLiteLSTMParams*>(node->builtin_data);
+ const TfLiteTensor* input = GetInput(context, node, kInputTensor);
+
+ const TfLiteTensor* input_to_input_weights =
+ GetOptionalInputTensor(context, node, kInputToInputWeightsTensor);
+ const TfLiteTensor* input_to_forget_weights =
+ GetInput(context, node, kInputToForgetWeightsTensor);
+ const TfLiteTensor* input_to_cell_weights =
+ GetInput(context, node, kInputToCellWeightsTensor);
+ const TfLiteTensor* input_to_output_weights =
+ GetInput(context, node, kInputToOutputWeightsTensor);
+
+ const TfLiteTensor* recurrent_to_input_weights =
+ GetOptionalInputTensor(context, node, kRecurrentToInputWeightsTensor);
+ const TfLiteTensor* recurrent_to_forget_weights =
+ GetInput(context, node, kRecurrentToForgetWeightsTensor);
+ const TfLiteTensor* recurrent_to_cell_weights =
+ GetInput(context, node, kRecurrentToCellWeightsTensor);
+ const TfLiteTensor* recurrent_to_output_weights =
+ GetInput(context, node, kRecurrentToOutputWeightsTensor);
+
+ const TfLiteTensor* cell_to_input_weights =
+ GetOptionalInputTensor(context, node, kCellToInputWeightsTensor);
+ const TfLiteTensor* cell_to_forget_weights =
+ GetOptionalInputTensor(context, node, kCellToForgetWeightsTensor);
+ const TfLiteTensor* cell_to_output_weights =
+ GetOptionalInputTensor(context, node, kCellToOutputWeightsTensor);
+
+ const TfLiteTensor* input_gate_bias =
+ GetOptionalInputTensor(context, node, kInputGateBiasTensor);
+ const TfLiteTensor* forget_gate_bias =
+ GetInput(context, node, kForgetGateBiasTensor);
+ const TfLiteTensor* cell_bias = GetInput(context, node, kCellGateBiasTensor);
+ const TfLiteTensor* output_gate_bias =
+ GetInput(context, node, kOutputGateBiasTensor);
+
+ const TfLiteTensor* projection_weights =
+ GetOptionalInputTensor(context, node, kProjectionWeightsTensor);
+ const TfLiteTensor* projection_bias =
+ GetOptionalInputTensor(context, node, kProjectionBiasTensor);
+
+ // Index the scratch buffers pointers to the global scratch buffer.
+ TfLiteTensor* scratch_buffer = GetTemporary(context, node, /*index=*/0);
+
+ TfLiteTensor* output_state = GetOutput(context, node, kOutputStateTensor);
+ TfLiteTensor* cell_state = GetOutput(context, node, kCellStateTensor);
+ TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
+
+ switch (input_to_output_weights->type) {
+ case kTfLiteFloat32: {
+ return EvalFloat(input, input_to_input_weights, input_to_forget_weights,
+ input_to_cell_weights, input_to_output_weights,
+ recurrent_to_input_weights, recurrent_to_forget_weights,
+ recurrent_to_cell_weights, recurrent_to_output_weights,
+ cell_to_input_weights, cell_to_forget_weights,
+ cell_to_output_weights, input_gate_bias,
+ forget_gate_bias, cell_bias, output_gate_bias,
+ projection_weights, projection_bias, params,
+ scratch_buffer, output_state, cell_state, output);
+ }
+ case kTfLiteUInt8: {
+ TfLiteTensor* input_quantized = GetTemporary(context, node, /*index=*/1);
+ TfLiteTensor* output_state_quantized =
+ GetTemporary(context, node, /*index=*/2);
+ TfLiteTensor* cell_state_quantized =
+ GetTemporary(context, node, /*index=*/3);
+ TfLiteTensor* scaling_factors = GetTemporary(context, node, /*index=*/4);
+ TfLiteTensor* prod_scaling_factors =
+ GetTemporary(context, node, /*index=*/5);
+ TfLiteTensor* recovered_cell_weights =
+ GetTemporary(context, node, /*index=*/6);
+ return EvalHybrid(
+ input, input_to_input_weights, input_to_forget_weights,
+ input_to_cell_weights, input_to_output_weights,
+ recurrent_to_input_weights, recurrent_to_forget_weights,
+ recurrent_to_cell_weights, recurrent_to_output_weights,
+ cell_to_input_weights, cell_to_forget_weights, cell_to_output_weights,
+ input_gate_bias, forget_gate_bias, cell_bias, output_gate_bias,
+ projection_weights, projection_bias, params, scratch_buffer,
+ scaling_factors, prod_scaling_factors, recovered_cell_weights,
+ input_quantized, output_state_quantized, cell_state_quantized,
+ output_state, cell_state, output);
+ }
+ default:
+ context->ReportError(context, "Type %d is not currently supported.",
+ input_to_output_weights->type);
+ return kTfLiteError;
+ }
+ return kTfLiteOk;
+}
} // namespace unidirectional_sequence_lstm
TfLiteRegistration* Register_UNIDIRECTIONAL_SEQUENCE_LSTM() {
diff --git a/tensorflow/contrib/lite/kernels/unidirectional_sequence_lstm_test.cc b/tensorflow/contrib/lite/kernels/unidirectional_sequence_lstm_test.cc
index 5881ced7c7..de38bdef6f 100644
--- a/tensorflow/contrib/lite/kernels/unidirectional_sequence_lstm_test.cc
+++ b/tensorflow/contrib/lite/kernels/unidirectional_sequence_lstm_test.cc
@@ -14,7 +14,6 @@ limitations under the License.
==============================================================================*/
// Unit test for TFLite Sequential LSTM op.
-#include <iomanip>
#include <memory>
#include <vector>
@@ -37,7 +36,8 @@ class UnidirectionalLSTMOpModel : public SingleOpModel {
bool use_peephole, bool use_projection_weights,
bool use_projection_bias, float cell_clip,
float proj_clip,
- const std::vector<std::vector<int>>& input_shapes)
+ const std::vector<std::vector<int>>& input_shapes,
+ const TensorType& weights_type = TensorType_FLOAT32)
: n_batch_(n_batch),
n_input_(n_input),
n_cell_(n_cell),
@@ -48,31 +48,31 @@ class UnidirectionalLSTMOpModel : public SingleOpModel {
if (use_cifg) {
input_to_input_weights_ = AddNullInput();
} else {
- input_to_input_weights_ = AddInput(TensorType_FLOAT32);
+ input_to_input_weights_ = AddInput(weights_type);
}
- input_to_forget_weights_ = AddInput(TensorType_FLOAT32);
- input_to_cell_weights_ = AddInput(TensorType_FLOAT32);
- input_to_output_weights_ = AddInput(TensorType_FLOAT32);
+ input_to_forget_weights_ = AddInput(weights_type);
+ input_to_cell_weights_ = AddInput(weights_type);
+ input_to_output_weights_ = AddInput(weights_type);
if (use_cifg) {
recurrent_to_input_weights_ = AddNullInput();
} else {
- recurrent_to_input_weights_ = AddInput(TensorType_FLOAT32);
+ recurrent_to_input_weights_ = AddInput(weights_type);
}
- recurrent_to_forget_weights_ = AddInput(TensorType_FLOAT32);
- recurrent_to_cell_weights_ = AddInput(TensorType_FLOAT32);
- recurrent_to_output_weights_ = AddInput(TensorType_FLOAT32);
+ recurrent_to_forget_weights_ = AddInput(weights_type);
+ recurrent_to_cell_weights_ = AddInput(weights_type);
+ recurrent_to_output_weights_ = AddInput(weights_type);
if (use_peephole) {
if (use_cifg) {
cell_to_input_weights_ = AddNullInput();
} else {
- cell_to_input_weights_ = AddInput(TensorType_FLOAT32);
+ cell_to_input_weights_ = AddInput(weights_type);
}
- cell_to_forget_weights_ = AddInput(TensorType_FLOAT32);
- cell_to_output_weights_ = AddInput(TensorType_FLOAT32);
+ cell_to_forget_weights_ = AddInput(weights_type);
+ cell_to_output_weights_ = AddInput(weights_type);
} else {
cell_to_input_weights_ = AddNullInput();
cell_to_forget_weights_ = AddNullInput();
@@ -89,7 +89,7 @@ class UnidirectionalLSTMOpModel : public SingleOpModel {
output_gate_bias_ = AddInput(TensorType_FLOAT32);
if (use_projection_weights) {
- projection_weights_ = AddInput(TensorType_FLOAT32);
+ projection_weights_ = AddInput(weights_type);
if (use_projection_bias) {
projection_bias_ = AddInput(TensorType_FLOAT32);
} else {
@@ -196,8 +196,9 @@ class UnidirectionalLSTMOpModel : public SingleOpModel {
zero_buffer.get() + zero_buffer_size);
}
- void SetInput(int offset, float* begin, float* end) {
- PopulateTensor(input_, offset, begin, end);
+ void SetInput(int offset, const float* begin, const float* end) {
+ PopulateTensor(input_, offset, const_cast<float*>(begin),
+ const_cast<float*>(end));
}
std::vector<float> GetOutput() { return ExtractVector<float>(output_); }
@@ -208,7 +209,7 @@ class UnidirectionalLSTMOpModel : public SingleOpModel {
int num_batches() { return n_batch_; }
int sequence_length() { return sequence_length_; }
- private:
+ protected:
int input_;
int input_to_input_weights_;
int input_to_forget_weights_;
@@ -243,7 +244,183 @@ class UnidirectionalLSTMOpModel : public SingleOpModel {
int sequence_length_;
};
-TEST(LSTMOpTest, BlackBoxTestNoCifgNoPeepholeNoProjectionNoClipping) {
+// The hybrid model has quantized weights.
+class HybridUnidirectionalLSTMOpModel : public UnidirectionalLSTMOpModel {
+ public:
+ HybridUnidirectionalLSTMOpModel(
+ int n_batch, int n_input, int n_cell, int n_output, int sequence_length,
+ bool use_cifg, bool use_peephole, bool use_projection_weights,
+ bool use_projection_bias, float cell_clip, float proj_clip,
+ const std::vector<std::vector<int>>& input_shapes)
+ : UnidirectionalLSTMOpModel(
+ n_batch, n_input, n_cell, n_output, sequence_length, use_cifg,
+ use_peephole, use_projection_weights, use_projection_bias,
+ cell_clip, proj_clip, input_shapes, TensorType_UINT8) {}
+
+ void SetInputToInputWeights(std::initializer_list<float> f) {
+ SymmetricQuantizeAndPopulate(input_to_input_weights_, f);
+ }
+
+ void SetInputToForgetWeights(std::initializer_list<float> f) {
+ SymmetricQuantizeAndPopulate(input_to_forget_weights_, f);
+ }
+
+ void SetInputToCellWeights(std::initializer_list<float> f) {
+ SymmetricQuantizeAndPopulate(input_to_cell_weights_, f);
+ }
+
+ void SetInputToOutputWeights(std::initializer_list<float> f) {
+ SymmetricQuantizeAndPopulate(input_to_output_weights_, f);
+ }
+
+ void SetRecurrentToInputWeights(std::initializer_list<float> f) {
+ SymmetricQuantizeAndPopulate(recurrent_to_input_weights_, f);
+ }
+
+ void SetRecurrentToForgetWeights(std::initializer_list<float> f) {
+ SymmetricQuantizeAndPopulate(recurrent_to_forget_weights_, f);
+ }
+
+ void SetRecurrentToCellWeights(std::initializer_list<float> f) {
+ SymmetricQuantizeAndPopulate(recurrent_to_cell_weights_, f);
+ }
+
+ void SetRecurrentToOutputWeights(std::initializer_list<float> f) {
+ SymmetricQuantizeAndPopulate(recurrent_to_output_weights_, f);
+ }
+
+ void SetCellToInputWeights(std::initializer_list<float> f) {
+ SymmetricQuantizeAndPopulate(cell_to_input_weights_, f);
+ }
+
+ void SetCellToForgetWeights(std::initializer_list<float> f) {
+ SymmetricQuantizeAndPopulate(cell_to_forget_weights_, f);
+ }
+
+ void SetCellToOutputWeights(std::initializer_list<float> f) {
+ SymmetricQuantizeAndPopulate(cell_to_output_weights_, f);
+ }
+
+ void SetProjectionWeights(std::initializer_list<float> f) {
+ SymmetricQuantizeAndPopulate(projection_weights_, f);
+ }
+};
+
+class BaseLstmTest : public ::testing::Test {
+ protected:
+ // Weights of the LSTM model. Some are optional.
+ std::initializer_list<float> input_to_input_weights_;
+ std::initializer_list<float> input_to_cell_weights_;
+ std::initializer_list<float> input_to_forget_weights_;
+ std::initializer_list<float> input_to_output_weights_;
+ std::initializer_list<float> input_gate_bias_;
+ std::initializer_list<float> cell_gate_bias_;
+ std::initializer_list<float> forget_gate_bias_;
+ std::initializer_list<float> output_gate_bias_;
+ std::initializer_list<float> recurrent_to_input_weights_;
+ std::initializer_list<float> recurrent_to_cell_weights_;
+ std::initializer_list<float> recurrent_to_forget_weights_;
+ std::initializer_list<float> recurrent_to_output_weights_;
+ std::initializer_list<float> cell_to_input_weights_;
+ std::initializer_list<float> cell_to_forget_weights_;
+ std::initializer_list<float> cell_to_output_weights_;
+ std::initializer_list<float> projection_weights_;
+
+ // LSTM input is stored as num_batch x num_inputs vector.
+ std::vector<std::vector<float>> lstm_input_;
+ // LSTM output is stored as num_batch x num_outputs vector.
+ std::vector<std::vector<float>> lstm_golden_output_;
+
+ // Compares output up to tolerance to the result of the lstm given the input.
+ void VerifyGoldens(const std::vector<std::vector<float>>& input,
+ const std::vector<std::vector<float>>& output,
+ UnidirectionalLSTMOpModel* lstm, float tolerance = 1e-5) {
+ const int num_batches = input.size();
+ EXPECT_GT(num_batches, 0);
+ const int num_inputs = lstm->num_inputs();
+ EXPECT_GT(num_inputs, 0);
+ const int input_sequence_size = input[0].size() / num_inputs;
+ EXPECT_GT(input_sequence_size, 0);
+ // Feed the whole sequence as input.
+ for (int i = 0; i < input_sequence_size; ++i) {
+ for (int b = 0; b < num_batches; ++b) {
+ const float* batch_start = input[b].data() + i * num_inputs;
+ const float* batch_end = batch_start + num_inputs;
+
+ lstm->SetInput(((i * num_batches) + b) * lstm->num_inputs(),
+ batch_start, batch_end);
+ }
+ }
+
+ lstm->Invoke();
+
+ const int num_outputs = lstm->num_outputs();
+ EXPECT_GT(num_outputs, 0);
+ std::vector<float> expected;
+ for (int i = 0; i < input_sequence_size; ++i) {
+ for (int b = 0; b < num_batches; ++b) {
+ const float* golden_start_batch = output[b].data() + i * num_outputs;
+ const float* golden_end_batch = golden_start_batch + num_outputs;
+
+ expected.insert(expected.end(), golden_start_batch, golden_end_batch);
+ }
+ }
+
+ EXPECT_THAT(lstm->GetOutput(),
+ ElementsAreArray(ArrayFloatNear(expected, tolerance)));
+ }
+};
+
+class NoCifgNoPeepholeNoProjectionNoClippingLstmTest : public BaseLstmTest {
+ void SetUp() override {
+ input_to_input_weights_ = {-0.45018822, -0.02338299, -0.0870589,
+ -0.34550029, 0.04266912, -0.15680569,
+ -0.34856534, 0.43890524};
+ input_to_cell_weights_ = {-0.50013041, 0.1370284, 0.11810488, 0.2013163,
+ -0.20583314, 0.44344562, 0.22077113, -0.29909778};
+ input_to_forget_weights_ = {0.09701663, 0.20334584, -0.50592935,
+ -0.31343272, -0.40032279, 0.44781327,
+ 0.01387155, -0.35593212};
+ input_to_output_weights_ = {-0.25065863, -0.28290087, 0.04613829,
+ 0.40525138, 0.44272184, 0.03897077,
+ -0.1556896, 0.19487578};
+ input_gate_bias_ = {0., 0., 0., 0.};
+ cell_gate_bias_ = {0., 0., 0., 0.};
+ forget_gate_bias_ = {1., 1., 1., 1.};
+ output_gate_bias_ = {0., 0., 0., 0.};
+
+ recurrent_to_input_weights_ = {
+ -0.0063535, -0.2042388, 0.31454784, -0.35746509,
+ 0.28902304, 0.08183324, -0.16555229, 0.02286911,
+ -0.13566875, 0.03034258, 0.48091322, -0.12528998,
+ 0.24077177, -0.51332325, -0.33502164, 0.10629296};
+
+ recurrent_to_cell_weights_ = {
+ -0.3407414, 0.24443203, -0.2078532, 0.26320225,
+ 0.05695659, -0.00123841, -0.4744786, -0.35869038,
+ -0.06418842, -0.13502428, -0.501764, 0.22830659,
+ -0.46367589, 0.26016325, -0.03894562, -0.16368064};
+
+ recurrent_to_forget_weights_ = {
+ -0.48684245, -0.06655136, 0.42224967, 0.2112639,
+ 0.27654213, 0.20864892, -0.07646349, 0.45877004,
+ 0.00141793, -0.14609534, 0.36447752, 0.09196436,
+ 0.28053468, 0.01560611, -0.20127171, -0.01140004};
+
+ recurrent_to_output_weights_ = {
+ 0.43385774, -0.17194885, 0.2718237, 0.09215671,
+ 0.24107647, -0.39835793, 0.18212086, 0.01301402,
+ 0.48572797, -0.50656658, 0.20047462, -0.20607421,
+ -0.51818722, -0.15390486, 0.0468148, 0.39922136};
+
+ lstm_input_ = {{2., 3., 3., 4., 1., 1.}};
+ lstm_golden_output_ = {{-0.02973187, 0.1229473, 0.20885126, -0.15358765,
+ -0.03716109, 0.12507336, 0.41193449, -0.20860538,
+ -0.15053082, 0.09120187, 0.24278517, -0.12222792}};
+ }
+};
+
+TEST_F(NoCifgNoPeepholeNoProjectionNoClippingLstmTest, LstmBlackBoxTest) {
const int n_batch = 1;
const int n_input = 2;
// n_cell and n_output have the same size when there is no projection.
@@ -252,9 +429,11 @@ TEST(LSTMOpTest, BlackBoxTestNoCifgNoPeepholeNoProjectionNoClipping) {
const int sequence_length = 3;
UnidirectionalLSTMOpModel lstm(
- n_batch, n_input, n_cell, n_output, sequence_length, /*use_cifg=*/false,
- /*use_peephole=*/false, /*use_projection_weights=*/false,
- /*use_projection_bias=*/false, /*cell_clip=*/0.0, /*proj_clip=*/0.0,
+ n_batch, n_input, n_cell, n_output, sequence_length,
+ /*use_cifg=*/false, /*use_peephole=*/false,
+ /*use_projection_weights=*/false,
+ /*use_projection_bias=*/false,
+ /*cell_clip=*/0.0, /*proj_clip=*/0.0,
{
{sequence_length, n_batch, n_input}, // input tensor
@@ -281,77 +460,138 @@ TEST(LSTMOpTest, BlackBoxTestNoCifgNoPeepholeNoProjectionNoClipping) {
{0}, // projection_bias tensor
});
- lstm.SetInputToInputWeights({-0.45018822, -0.02338299, -0.0870589,
- -0.34550029, 0.04266912, -0.15680569,
- -0.34856534, 0.43890524});
+ lstm.SetInputToInputWeights(input_to_input_weights_);
+ lstm.SetInputToCellWeights(input_to_cell_weights_);
+ lstm.SetInputToForgetWeights(input_to_forget_weights_);
+ lstm.SetInputToOutputWeights(input_to_output_weights_);
- lstm.SetInputToCellWeights({-0.50013041, 0.1370284, 0.11810488, 0.2013163,
- -0.20583314, 0.44344562, 0.22077113,
- -0.29909778});
+ lstm.SetInputGateBias(input_gate_bias_);
+ lstm.SetCellBias(cell_gate_bias_);
+ lstm.SetForgetGateBias(forget_gate_bias_);
+ lstm.SetOutputGateBias(output_gate_bias_);
- lstm.SetInputToForgetWeights({0.09701663, 0.20334584, -0.50592935,
- -0.31343272, -0.40032279, 0.44781327,
- 0.01387155, -0.35593212});
+ lstm.SetRecurrentToInputWeights(recurrent_to_input_weights_);
+ lstm.SetRecurrentToCellWeights(recurrent_to_cell_weights_);
+ lstm.SetRecurrentToForgetWeights(recurrent_to_forget_weights_);
+ lstm.SetRecurrentToOutputWeights(recurrent_to_output_weights_);
+
+ // Resetting cell_state and output_state
+ lstm.ResetCellState();
+ lstm.ResetOutputState();
+
+ VerifyGoldens(lstm_input_, lstm_golden_output_, &lstm);
+}
- lstm.SetInputToOutputWeights({-0.25065863, -0.28290087, 0.04613829,
- 0.40525138, 0.44272184, 0.03897077, -0.1556896,
- 0.19487578});
+TEST_F(NoCifgNoPeepholeNoProjectionNoClippingLstmTest, HybridLstmBlackBoxTest) {
+ const int n_batch = 1;
+ const int n_input = 2;
+ // n_cell and n_output have the same size when there is no projection.
+ const int n_cell = 4;
+ const int n_output = 4;
+ const int sequence_length = 3;
- lstm.SetInputGateBias({0., 0., 0., 0.});
+ HybridUnidirectionalLSTMOpModel lstm(
+ n_batch, n_input, n_cell, n_output, sequence_length,
+ /*use_cifg=*/false, /*use_peephole=*/false,
+ /*use_projection_weights=*/false,
+ /*use_projection_bias=*/false, /*cell_clip=*/0.0, /*proj_clip=*/0.0,
+ {
+ {sequence_length, n_batch, n_input}, // input tensor
- lstm.SetCellBias({0., 0., 0., 0.});
+ {n_cell, n_input}, // input_to_input_weight tensor
+ {n_cell, n_input}, // input_to_forget_weight tensor
+ {n_cell, n_input}, // input_to_cell_weight tensor
+ {n_cell, n_input}, // input_to_output_weight tensor
- lstm.SetForgetGateBias({1., 1., 1., 1.});
+ {n_cell, n_output}, // recurrent_to_input_weight tensor
+ {n_cell, n_output}, // recurrent_to_forget_weight tensor
+ {n_cell, n_output}, // recurrent_to_cell_weight tensor
+ {n_cell, n_output}, // recurrent_to_output_weight tensor
- lstm.SetOutputGateBias({0., 0., 0., 0.});
+ {0}, // cell_to_input_weight tensor
+ {0}, // cell_to_forget_weight tensor
+ {0}, // cell_to_output_weight tensor
- lstm.SetRecurrentToInputWeights(
- {-0.0063535, -0.2042388, 0.31454784, -0.35746509, 0.28902304, 0.08183324,
- -0.16555229, 0.02286911, -0.13566875, 0.03034258, 0.48091322,
- -0.12528998, 0.24077177, -0.51332325, -0.33502164, 0.10629296});
+ {n_cell}, // input_gate_bias tensor
+ {n_cell}, // forget_gate_bias tensor
+ {n_cell}, // cell_bias tensor
+ {n_cell}, // output_gate_bias tensor
- lstm.SetRecurrentToCellWeights(
- {-0.3407414, 0.24443203, -0.2078532, 0.26320225, 0.05695659, -0.00123841,
- -0.4744786, -0.35869038, -0.06418842, -0.13502428, -0.501764, 0.22830659,
- -0.46367589, 0.26016325, -0.03894562, -0.16368064});
+ {0, 0}, // projection_weight tensor
+ {0}, // projection_bias tensor
+ });
- lstm.SetRecurrentToForgetWeights(
- {-0.48684245, -0.06655136, 0.42224967, 0.2112639, 0.27654213, 0.20864892,
- -0.07646349, 0.45877004, 0.00141793, -0.14609534, 0.36447752, 0.09196436,
- 0.28053468, 0.01560611, -0.20127171, -0.01140004});
+ lstm.SetInputToInputWeights(input_to_input_weights_);
+ lstm.SetInputToCellWeights(input_to_cell_weights_);
+ lstm.SetInputToForgetWeights(input_to_forget_weights_);
+ lstm.SetInputToOutputWeights(input_to_output_weights_);
- lstm.SetRecurrentToOutputWeights(
- {0.43385774, -0.17194885, 0.2718237, 0.09215671, 0.24107647, -0.39835793,
- 0.18212086, 0.01301402, 0.48572797, -0.50656658, 0.20047462, -0.20607421,
- -0.51818722, -0.15390486, 0.0468148, 0.39922136});
+ lstm.SetInputGateBias(input_gate_bias_);
+ lstm.SetCellBias(cell_gate_bias_);
+ lstm.SetForgetGateBias(forget_gate_bias_);
+ lstm.SetOutputGateBias(output_gate_bias_);
- // Input should have n_input * sequence_length many values.
- static float lstm_input[] = {2., 3., 3., 4., 1., 1.};
- static float lstm_golden_output[] = {-0.02973187, 0.1229473, 0.20885126,
- -0.15358765, -0.03716109, 0.12507336,
- 0.41193449, -0.20860538, -0.15053082,
- 0.09120187, 0.24278517, -0.12222792};
+ lstm.SetRecurrentToInputWeights(recurrent_to_input_weights_);
+ lstm.SetRecurrentToCellWeights(recurrent_to_cell_weights_);
+ lstm.SetRecurrentToForgetWeights(recurrent_to_forget_weights_);
+ lstm.SetRecurrentToOutputWeights(recurrent_to_output_weights_);
// Resetting cell_state and output_state
lstm.ResetCellState();
lstm.ResetOutputState();
- float* batch0_start = lstm_input;
- float* batch0_end = batch0_start + lstm.num_inputs() * lstm.sequence_length();
+ VerifyGoldens(lstm_input_, lstm_golden_output_, &lstm,
+ /*tolerance=*/0.0157651);
+}
- lstm.SetInput(0, batch0_start, batch0_end);
+class CifgPeepholeNoProjectionNoClippingLstmTest : public BaseLstmTest {
+ void SetUp() override {
+ input_to_cell_weights_ = {-0.49770179, -0.27711356, -0.09624726,
+ 0.05100781, 0.04717243, 0.48944736,
+ -0.38535351, -0.17212132};
- lstm.Invoke();
+ input_to_forget_weights_ = {-0.55291498, -0.42866567, 0.13056988,
+ -0.3633365, -0.22755712, 0.28253698,
+ 0.24407166, 0.33826375};
- float* golden_start = lstm_golden_output;
- float* golden_end =
- golden_start + lstm.num_outputs() * lstm.sequence_length();
- std::vector<float> expected;
- expected.insert(expected.end(), golden_start, golden_end);
- EXPECT_THAT(lstm.GetOutput(), ElementsAreArray(ArrayFloatNear(expected)));
-}
+ input_to_output_weights_ = {0.10725588, -0.02335852, -0.55932593,
+ -0.09426838, -0.44257352, 0.54939759,
+ 0.01533556, 0.42751634};
+ cell_gate_bias_ = {0., 0., 0., 0.};
+ forget_gate_bias_ = {1., 1., 1., 1.};
+ output_gate_bias_ = {0., 0., 0., 0.};
+
+ recurrent_to_cell_weights_ = {
+ 0.54066205, -0.32668582, -0.43562764, -0.56094903,
+ 0.42957711, 0.01841056, -0.32764608, -0.33027974,
+ -0.10826075, 0.20675004, 0.19069612, -0.03026325,
+ -0.54532051, 0.33003211, 0.44901288, 0.21193194};
+
+ recurrent_to_forget_weights_ = {
+ -0.13832897, -0.0515101, -0.2359007, -0.16661474,
+ -0.14340827, 0.36986142, 0.23414481, 0.55899,
+ 0.10798943, -0.41174671, 0.17751795, -0.34484994,
+ -0.35874045, -0.11352962, 0.27268326, 0.54058349};
+
+ recurrent_to_output_weights_ = {
+ 0.41613156, 0.42610586, -0.16495961, -0.5663873,
+ 0.30579174, -0.05115908, -0.33941799, 0.23364776,
+ 0.11178309, 0.09481031, -0.26424935, 0.46261835,
+ 0.50248802, 0.26114327, -0.43736315, 0.33149987};
+
+ cell_to_forget_weights_ = {0.47485286, -0.51955009, -0.24458408,
+ 0.31544167};
+ cell_to_output_weights_ = {-0.17135078, 0.82760304, 0.85573703,
+ -0.77109635};
+
+ lstm_input_ = {{2., 3., 3., 4., 1., 1.}};
+ lstm_golden_output_ = {{-0.36444446, -0.00352185, 0.12886585, -0.05163646,
+ -0.42312205, -0.01218222, 0.24201041, -0.08124574,
+ -0.358325, -0.04621704, 0.21641694, -0.06471302}};
+ }
+};
-TEST(LSTMOpTest, BlackBoxTestWithCifgWithPeepholeNoProjectionNoClipping) {
+TEST_F(CifgPeepholeNoProjectionNoClippingLstmTest, LstmBlackBoxTest) {
const int n_batch = 1;
const int n_input = 2;
// n_cell and n_output have the same size when there is no projection.
@@ -360,9 +600,11 @@ TEST(LSTMOpTest, BlackBoxTestWithCifgWithPeepholeNoProjectionNoClipping) {
const int sequence_length = 3;
UnidirectionalLSTMOpModel lstm(
- n_batch, n_input, n_cell, n_output, sequence_length, /*use_cifg=*/true,
- /*use_peephole=*/true, /*use_projection_weights=*/false,
- /*use_projection_bias=*/false, /*cell_clip=*/0.0, /*proj_clip=*/0.0,
+ n_batch, n_input, n_cell, n_output, sequence_length,
+ /*use_cifg=*/true, /*use_peephole=*/true,
+ /*use_projection_weights=*/false,
+ /*use_projection_bias=*/false,
+ /*cell_clip=*/0.0, /*proj_clip=*/0.0,
{
{sequence_length, n_batch, n_input}, // input tensor
@@ -389,71 +631,690 @@ TEST(LSTMOpTest, BlackBoxTestWithCifgWithPeepholeNoProjectionNoClipping) {
{0}, // projection_bias tensor
});
- lstm.SetInputToCellWeights({-0.49770179, -0.27711356, -0.09624726, 0.05100781,
- 0.04717243, 0.48944736, -0.38535351,
- -0.17212132});
+ lstm.SetInputToCellWeights(input_to_cell_weights_);
+ lstm.SetInputToForgetWeights(input_to_forget_weights_);
+ lstm.SetInputToOutputWeights(input_to_output_weights_);
- lstm.SetInputToForgetWeights({-0.55291498, -0.42866567, 0.13056988,
- -0.3633365, -0.22755712, 0.28253698, 0.24407166,
- 0.33826375});
+ lstm.SetCellBias(cell_gate_bias_);
+ lstm.SetForgetGateBias(forget_gate_bias_);
+ lstm.SetOutputGateBias(output_gate_bias_);
- lstm.SetInputToOutputWeights({0.10725588, -0.02335852, -0.55932593,
- -0.09426838, -0.44257352, 0.54939759,
- 0.01533556, 0.42751634});
+ lstm.SetRecurrentToCellWeights(recurrent_to_cell_weights_);
+ lstm.SetRecurrentToForgetWeights(recurrent_to_forget_weights_);
+ lstm.SetRecurrentToOutputWeights(recurrent_to_output_weights_);
+
+ lstm.SetCellToForgetWeights(cell_to_forget_weights_);
+ lstm.SetCellToOutputWeights(cell_to_output_weights_);
+
+ // Resetting cell_state and output_state
+ lstm.ResetCellState();
+ lstm.ResetOutputState();
+
+ VerifyGoldens(lstm_input_, lstm_golden_output_, &lstm);
+}
+
+TEST_F(CifgPeepholeNoProjectionNoClippingLstmTest, HybridLstmBlackBoxTest) {
+ const int n_batch = 1;
+ const int n_input = 2;
+ // n_cell and n_output have the same size when there is no projection.
+ const int n_cell = 4;
+ const int n_output = 4;
+ const int sequence_length = 3;
+
+ HybridUnidirectionalLSTMOpModel lstm(
+ n_batch, n_input, n_cell, n_output, sequence_length,
+ /*use_cifg=*/true, /*use_peephole=*/true,
+ /*use_projection_weights=*/false,
+ /*use_projection_bias=*/false,
+ /*cell_clip=*/0.0, /*proj_clip=*/0.0,
+ {
+ {sequence_length, n_batch, n_input}, // input tensor
+
+ {0, 0}, // input_to_input_weight tensor
+ {n_cell, n_input}, // input_to_forget_weight tensor
+ {n_cell, n_input}, // input_to_cell_weight tensor
+ {n_cell, n_input}, // input_to_output_weight tensor
- lstm.SetCellBias({0., 0., 0., 0.});
+ {0, 0}, // recurrent_to_input_weight tensor
+ {n_cell, n_output}, // recurrent_to_forget_weight tensor
+ {n_cell, n_output}, // recurrent_to_cell_weight tensor
+ {n_cell, n_output}, // recurrent_to_output_weight tensor
- lstm.SetForgetGateBias({1., 1., 1., 1.});
+ {0}, // cell_to_input_weight tensor
+ {n_cell}, // cell_to_forget_weight tensor
+ {n_cell}, // cell_to_output_weight tensor
- lstm.SetOutputGateBias({0., 0., 0., 0.});
+ {0}, // input_gate_bias tensor
+ {n_cell}, // forget_gate_bias tensor
+ {n_cell}, // cell_bias tensor
+ {n_cell}, // output_gate_bias tensor
- lstm.SetRecurrentToCellWeights(
- {0.54066205, -0.32668582, -0.43562764, -0.56094903, 0.42957711,
- 0.01841056, -0.32764608, -0.33027974, -0.10826075, 0.20675004,
- 0.19069612, -0.03026325, -0.54532051, 0.33003211, 0.44901288,
- 0.21193194});
+ {0, 0}, // projection_weight tensor
+ {0}, // projection_bias tensor
+ });
- lstm.SetRecurrentToForgetWeights(
- {-0.13832897, -0.0515101, -0.2359007, -0.16661474, -0.14340827,
- 0.36986142, 0.23414481, 0.55899, 0.10798943, -0.41174671, 0.17751795,
- -0.34484994, -0.35874045, -0.11352962, 0.27268326, 0.54058349});
+ lstm.SetInputToCellWeights(input_to_cell_weights_);
+ lstm.SetInputToForgetWeights(input_to_forget_weights_);
+ lstm.SetInputToOutputWeights(input_to_output_weights_);
- lstm.SetRecurrentToOutputWeights(
- {0.41613156, 0.42610586, -0.16495961, -0.5663873, 0.30579174, -0.05115908,
- -0.33941799, 0.23364776, 0.11178309, 0.09481031, -0.26424935, 0.46261835,
- 0.50248802, 0.26114327, -0.43736315, 0.33149987});
+ lstm.SetCellBias(cell_gate_bias_);
+ lstm.SetForgetGateBias(forget_gate_bias_);
+ lstm.SetOutputGateBias(output_gate_bias_);
- lstm.SetCellToForgetWeights(
- {0.47485286, -0.51955009, -0.24458408, 0.31544167});
- lstm.SetCellToOutputWeights(
- {-0.17135078, 0.82760304, 0.85573703, -0.77109635});
+ lstm.SetRecurrentToCellWeights(recurrent_to_cell_weights_);
+ lstm.SetRecurrentToForgetWeights(recurrent_to_forget_weights_);
+ lstm.SetRecurrentToOutputWeights(recurrent_to_output_weights_);
- static float lstm_input[] = {2., 3., 3., 4., 1., 1.};
- static float lstm_golden_output[] = {-0.36444446, -0.00352185, 0.12886585,
- -0.05163646, -0.42312205, -0.01218222,
- 0.24201041, -0.08124574, -0.358325,
- -0.04621704, 0.21641694, -0.06471302};
+ lstm.SetCellToForgetWeights(cell_to_forget_weights_);
+ lstm.SetCellToOutputWeights(cell_to_output_weights_);
// Resetting cell_state and output_state
lstm.ResetCellState();
lstm.ResetOutputState();
- float* batch0_start = lstm_input;
- float* batch0_end = batch0_start + lstm.num_inputs() * lstm.sequence_length();
-
- lstm.SetInput(0, batch0_start, batch0_end);
-
- lstm.Invoke();
-
- float* golden_start = lstm_golden_output;
- float* golden_end =
- golden_start + lstm.num_outputs() * lstm.sequence_length();
- std::vector<float> expected;
- expected.insert(expected.end(), golden_start, golden_end);
- EXPECT_THAT(lstm.GetOutput(), ElementsAreArray(ArrayFloatNear(expected)));
+ VerifyGoldens(lstm_input_, lstm_golden_output_, &lstm, /*tolerance=*/0.03573);
}
-TEST(LSTMOpTest, BlackBoxTestWithPeepholeWithProjectionNoClipping) {
+class NoCifgPeepholeProjectionClippingLstmTest : public BaseLstmTest {
+ void SetUp() override {
+ input_to_input_weights_ = {
+ 0.021393683, 0.06124551, 0.046905167, -0.014657677, -0.03149463,
+ 0.09171803, 0.14647801, 0.10797193, -0.0057968358, 0.0019193048,
+ -0.2726754, 0.10154029, -0.018539885, 0.080349885, -0.10262385,
+ -0.022599787, -0.09121155, -0.008675967, -0.045206103, -0.0821282,
+ -0.008045952, 0.015478081, 0.055217247, 0.038719587, 0.044153627,
+ -0.06453243, 0.05031825, -0.046935108, -0.008164439, 0.014574226,
+ -0.1671009, -0.15519552, -0.16819797, -0.13971269, -0.11953059,
+ 0.25005487, -0.22790983, 0.009855087, -0.028140958, -0.11200698,
+ 0.11295408, -0.0035217577, 0.054485075, 0.05184695, 0.064711206,
+ 0.10989193, 0.11674786, 0.03490607, 0.07727357, 0.11390585,
+ -0.1863375, -0.1034451, -0.13945189, -0.049401227, -0.18767063,
+ 0.042483903, 0.14233552, 0.13832581, 0.18350165, 0.14545603,
+ -0.028545704, 0.024939531, 0.050929718, 0.0076203286, -0.0029723682,
+ -0.042484224, -0.11827596, -0.09171104, -0.10808628, -0.16327988,
+ -0.2273378, -0.0993647, -0.017155107, 0.0023917493, 0.049272764,
+ 0.0038534778, 0.054764505, 0.089753784, 0.06947234, 0.08014476,
+ -0.04544234, -0.0497073, -0.07135631, -0.048929106, -0.004042012,
+ -0.009284026, 0.018042054, 0.0036860977, -0.07427302, -0.11434604,
+ -0.018995456, 0.031487543, 0.012834908, 0.019977754, 0.044256654,
+ -0.39292613, -0.18519334, -0.11651281, -0.06809892, 0.011373677};
+
+ input_to_forget_weights_ = {
+ -0.0018401089, -0.004852237, 0.03698424, 0.014181704,
+ 0.028273236, -0.016726194, -0.05249759, -0.10204261,
+ 0.00861066, -0.040979505, -0.009899187, 0.01923892,
+ -0.028177269, -0.08535103, -0.14585495, 0.10662567,
+ -0.01909731, -0.017883534, -0.0047269356, -0.045103323,
+ 0.0030784295, 0.076784775, 0.07463696, 0.094531395,
+ 0.0814421, -0.12257899, -0.033945758, -0.031303465,
+ 0.045630626, 0.06843887, -0.13492945, -0.012480007,
+ -0.0811829, -0.07224499, -0.09628791, 0.045100946,
+ 0.0012300825, 0.013964662, 0.099372394, 0.02543059,
+ 0.06958324, 0.034257296, 0.0482646, 0.06267997,
+ 0.052625068, 0.12784666, 0.07077897, 0.025725935,
+ 0.04165009, 0.07241905, 0.018668644, -0.037377294,
+ -0.06277783, -0.08833636, -0.040120605, -0.011405586,
+ -0.007808335, -0.010301386, -0.005102167, 0.027717464,
+ 0.05483423, 0.11449111, 0.11289652, 0.10939839,
+ 0.13396506, -0.08402166, -0.01901462, -0.044678304,
+ -0.07720565, 0.014350063, -0.11757958, -0.0652038,
+ -0.08185733, -0.076754324, -0.092614375, 0.10405491,
+ 0.052960336, 0.035755895, 0.035839386, -0.012540553,
+ 0.036881298, 0.02913376, 0.03420159, 0.05448447,
+ -0.054523353, 0.02582715, 0.02327355, -0.011857179,
+ -0.0011980024, -0.034641717, -0.026125094, -0.17582615,
+ -0.15923657, -0.27486774, -0.0006143371, 0.0001771948,
+ -8.470171e-05, 0.02651807, 0.045790765, 0.06956496};
+
+ input_to_cell_weights_ = {
+ -0.04580283, -0.09549462, -0.032418985, -0.06454633,
+ -0.043528453, 0.043018587, -0.049152344, -0.12418144,
+ -0.078985475, -0.07596889, 0.019484362, -0.11434962,
+ -0.0074034138, -0.06314844, -0.092981495, 0.0062155537,
+ -0.025034338, -0.0028890965, 0.048929527, 0.06235075,
+ 0.10665918, -0.032036792, -0.08505916, -0.10843358,
+ -0.13002433, -0.036816437, -0.02130134, -0.016518239,
+ 0.0047691227, -0.0025825808, 0.066017866, 0.029991534,
+ -0.10652836, -0.1037554, -0.13056071, -0.03266643,
+ -0.033702414, -0.006473424, -0.04611692, 0.014419339,
+ -0.025174323, 0.0396852, 0.081777506, 0.06157468,
+ 0.10210095, -0.009658194, 0.046511717, 0.03603906,
+ 0.0069369148, 0.015960095, -0.06507666, 0.09551598,
+ 0.053568836, 0.06408714, 0.12835667, -0.008714329,
+ -0.20211966, -0.12093674, 0.029450472, 0.2849013,
+ -0.029227901, 0.1164364, -0.08560263, 0.09941786,
+ -0.036999565, -0.028842626, -0.0033637602, -0.017012902,
+ -0.09720865, -0.11193351, -0.029155117, -0.017936034,
+ -0.009768936, -0.04223324, -0.036159635, 0.06505112,
+ -0.021742892, -0.023377212, -0.07221364, -0.06430552,
+ 0.05453865, 0.091149814, 0.06387331, 0.007518393,
+ 0.055960953, 0.069779344, 0.046411168, 0.10509911,
+ 0.07463894, 0.0075130584, 0.012850982, 0.04555431,
+ 0.056955688, 0.06555285, 0.050801456, -0.009862683,
+ 0.00826772, -0.026555609, -0.0073611983, -0.0014897042};
+
+ input_to_output_weights_ = {
+ -0.0998932, -0.07201956, -0.052803773, -0.15629593, -0.15001918,
+ -0.07650751, 0.02359855, -0.075155355, -0.08037709, -0.15093534,
+ 0.029517552, -0.04751393, 0.010350531, -0.02664851, -0.016839722,
+ -0.023121163, 0.0077019283, 0.012851257, -0.05040649, -0.0129761,
+ -0.021737747, -0.038305793, -0.06870586, -0.01481247, -0.001285394,
+ 0.10124236, 0.083122835, 0.053313006, -0.062235646, -0.075637154,
+ -0.027833903, 0.029774971, 0.1130802, 0.09218906, 0.09506135,
+ -0.086665764, -0.037162706, -0.038880914, -0.035832845, -0.014481564,
+ -0.09825003, -0.12048569, -0.097665586, -0.05287633, -0.0964047,
+ -0.11366429, 0.035777505, 0.13568819, 0.052451383, 0.050649304,
+ 0.05798951, -0.021852335, -0.099848844, 0.014740475, -0.078897946,
+ 0.04974699, 0.014160473, 0.06973932, 0.04964942, 0.033364646,
+ 0.08190124, 0.025535367, 0.050893165, 0.048514254, 0.06945813,
+ -0.078907564, -0.06707616, -0.11844508, -0.09986688, -0.07509403,
+ 0.06263226, 0.14925587, 0.20188436, 0.12098451, 0.14639415,
+ 0.0015017595, -0.014267382, -0.03417257, 0.012711468, 0.0028300495,
+ -0.024758482, -0.05098548, -0.0821182, 0.014225672, 0.021544158,
+ 0.08949725, 0.07505268, -0.0020780868, 0.04908258, 0.06476295,
+ -0.022907063, 0.027562456, 0.040185735, 0.019567577, -0.015598739,
+ -0.049097303, -0.017121866, -0.083368234, -0.02332002, -0.0840956};
+
+ input_gate_bias_ = {0.02234832, 0.14757581, 0.18176508, 0.10380666,
+ 0.053110216, -0.06928846, -0.13942584, -0.11816189,
+ 0.19483899, 0.03652339, -0.10250295, 0.036714908,
+ -0.18426876, 0.036065217, 0.21810818, 0.02383196,
+ -0.043370757, 0.08690144, -0.04444982, 0.00030581196};
+
+ forget_gate_bias_ = {0.035185695, -0.042891346, -0.03032477, 0.23027696,
+ 0.11098921, 0.15378423, 0.09263801, 0.09790885,
+ 0.09508917, 0.061199076, 0.07665568, -0.015443159,
+ -0.03499149, 0.046190713, 0.08895977, 0.10899629,
+ 0.40694186, 0.06030037, 0.012413437, -0.06108739};
+
+ cell_gate_bias_ = {-0.024379363, 0.0055531194, 0.23377132, 0.033463873,
+ -0.1483596, -0.10639995, -0.091433935, 0.058573797,
+ -0.06809782, -0.07889636, -0.043246906, -0.09829136,
+ -0.4279842, 0.034901652, 0.18797937, 0.0075234566,
+ 0.016178843, 0.1749513, 0.13975595, 0.92058027};
+
+ output_gate_bias_ = {0.046159424, -0.0012809046, 0.03563469, 0.12648113,
+ 0.027195795, 0.35373217, -0.018957434, 0.008907322,
+ -0.0762701, 0.12018895, 0.04216877, 0.0022856654,
+ 0.040952638, 0.3147856, 0.08225149, -0.057416286,
+ -0.14995944, -0.008040261, 0.13208859, 0.029760877};
+
+ recurrent_to_input_weights_ = {
+ -0.001374326, -0.078856036, 0.10672688, 0.029162422,
+ -0.11585556, 0.02557986, -0.13446963, -0.035785314,
+ -0.01244275, 0.025961924, -0.02337298, -0.044228926,
+ -0.055839065, -0.046598054, -0.010546039, -0.06900766,
+ 0.027239809, 0.022582639, -0.013296484, -0.05459212,
+ 0.08981, -0.045407712, 0.08682226, -0.06867011,
+ -0.14390695, -0.02916037, 0.000996957, 0.091420636,
+ 0.14283475, -0.07390571, -0.06402044, 0.062524505,
+ -0.093129106, 0.04860203, -0.08364217, -0.08119002,
+ 0.009352075, 0.22920375, 0.0016303885, 0.11583097,
+ -0.13732095, 0.012405723, -0.07551853, 0.06343048,
+ 0.12162708, -0.031923793, -0.014335606, 0.01790974,
+ -0.10650317, -0.0724401, 0.08554849, -0.05727212,
+ 0.06556731, -0.042729504, -0.043227166, 0.011683251,
+ -0.013082158, -0.029302018, -0.010899579, -0.062036745,
+ -0.022509435, -0.00964907, -0.01567329, 0.04260106,
+ -0.07787477, -0.11576462, 0.017356863, 0.048673786,
+ -0.017577527, -0.05527947, -0.082487635, -0.040137455,
+ -0.10820036, -0.04666372, 0.022746278, -0.07851417,
+ 0.01068115, 0.032956902, 0.022433773, 0.0026891115,
+ 0.08944216, -0.0685835, 0.010513544, 0.07228705,
+ 0.02032331, -0.059686817, -0.0005566496, -0.086984694,
+ 0.040414046, -0.1380399, 0.094208956, -0.05722982,
+ 0.012092817, -0.04989123, -0.086576, -0.003399834,
+ -0.04696032, -0.045747425, 0.10091314, 0.048676282,
+ -0.029037097, 0.031399418, -0.0040285117, 0.047237843,
+ 0.09504992, 0.041799378, -0.049185462, -0.031518843,
+ -0.10516937, 0.026374253, 0.10058866, -0.0033195973,
+ -0.041975245, 0.0073591834, 0.0033782164, -0.004325073,
+ -0.10167381, 0.042500053, -0.01447153, 0.06464186,
+ -0.017142897, 0.03312627, 0.009205989, 0.024138335,
+ -0.011337001, 0.035530265, -0.010912711, 0.0706555,
+ -0.005894094, 0.051841937, -0.1401738, -0.02351249,
+ 0.0365468, 0.07590991, 0.08838724, 0.021681072,
+ -0.10086113, 0.019608743, -0.06195883, 0.077335775,
+ 0.023646897, -0.095322326, 0.02233014, 0.09756986,
+ -0.048691444, -0.009579111, 0.07595467, 0.11480546,
+ -0.09801813, 0.019894179, 0.08502348, 0.004032281,
+ 0.037211012, 0.068537936, -0.048005626, -0.091520436,
+ -0.028379958, -0.01556313, 0.06554592, -0.045599163,
+ -0.01672207, -0.020169014, -0.011877351, -0.20212261,
+ 0.010889619, 0.0047078193, 0.038385306, 0.08540671,
+ -0.017140968, -0.0035865551, 0.016678626, 0.005633034,
+ 0.015963363, 0.00871737, 0.060130805, 0.028611384,
+ 0.10109069, -0.015060172, -0.07894427, 0.06401885,
+ 0.011584063, -0.024466386, 0.0047652307, -0.09041358,
+ 0.030737216, -0.0046374933, 0.14215417, -0.11823516,
+ 0.019899689, 0.006106124, -0.027092824, 0.0786356,
+ 0.05052217, -0.058925, -0.011402121, -0.024987547,
+ -0.0013661642, -0.06832946, -0.015667673, -0.1083353,
+ -0.00096863037, -0.06988685, -0.053350925, -0.027275559,
+ -0.033664223, -0.07978348, -0.025200296, -0.017207067,
+ -0.058403496, -0.055697463, 0.005798788, 0.12965427,
+ -0.062582195, 0.0013350133, -0.10482091, 0.0379771,
+ 0.072521195, -0.0029455067, -0.13797039, -0.03628521,
+ 0.013806405, -0.017858358, -0.01008298, -0.07700066,
+ -0.017081132, 0.019358726, 0.0027079724, 0.004635139,
+ 0.062634714, -0.02338735, -0.039547626, -0.02050681,
+ 0.03385117, -0.083611414, 0.002862572, -0.09421313,
+ 0.058618143, -0.08598433, 0.00972939, 0.023867095,
+ -0.053934585, -0.023203006, 0.07452513, -0.048767887,
+ -0.07314807, -0.056307215, -0.10433547, -0.06440842,
+ 0.04328182, 0.04389765, -0.020006588, -0.09076438,
+ -0.11652589, -0.021705797, 0.03345259, -0.010329105,
+ -0.025767034, 0.013057034, -0.07316461, -0.10145612,
+ 0.06358255, 0.18531723, 0.07759293, 0.12006465,
+ 0.1305557, 0.058638252, -0.03393652, 0.09622831,
+ -0.16253184, -2.4580743e-06, 0.079869635, -0.070196845,
+ -0.005644518, 0.06857898, -0.12598175, -0.035084512,
+ 0.03156317, -0.12794146, -0.031963028, 0.04692781,
+ 0.030070418, 0.0071660685, -0.095516115, -0.004643372,
+ 0.040170413, -0.062104587, -0.0037324072, 0.0554317,
+ 0.08184801, -0.019164372, 0.06791302, 0.034257166,
+ -0.10307039, 0.021943003, 0.046745934, 0.0790918,
+ -0.0265588, -0.007824208, 0.042546265, -0.00977924,
+ -0.0002440307, -0.017384544, -0.017990116, 0.12252321,
+ -0.014512694, -0.08251313, 0.08861942, 0.13589665,
+ 0.026351685, 0.012641483, 0.07466548, 0.044301085,
+ -0.045414884, -0.051112458, 0.03444247, -0.08502782,
+ -0.04106223, -0.028126027, 0.028473156, 0.10467447};
+
+ recurrent_to_cell_weights_ = {
+ -0.037322544, 0.018592842, 0.0056175636, -0.06253426,
+ 0.055647098, -0.05713207, -0.05626563, 0.005559383,
+ 0.03375411, -0.025757805, -0.088049285, 0.06017052,
+ -0.06570978, 0.007384076, 0.035123326, -0.07920549,
+ 0.053676967, 0.044480428, -0.07663568, 0.0071805613,
+ 0.08089997, 0.05143358, 0.038261272, 0.03339287,
+ -0.027673481, 0.044746667, 0.028349208, 0.020090483,
+ -0.019443132, -0.030755889, -0.0040000007, 0.04465846,
+ -0.021585021, 0.0031670958, 0.0053199246, -0.056117613,
+ -0.10893326, 0.076739706, -0.08509834, -0.027997585,
+ 0.037871376, 0.01449768, -0.09002357, -0.06111149,
+ -0.046195522, 0.0422062, -0.005683705, -0.1253618,
+ -0.012925729, -0.04890792, 0.06985068, 0.037654128,
+ 0.03398274, -0.004781977, 0.007032333, -0.031787455,
+ 0.010868644, -0.031489216, 0.09525667, 0.013939797,
+ 0.0058680447, 0.0167067, 0.02668468, -0.04797466,
+ -0.048885044, -0.12722108, 0.035304096, 0.06554885,
+ 0.00972396, -0.039238118, -0.05159735, -0.11329045,
+ 0.1613692, -0.03750952, 0.06529313, -0.071974665,
+ -0.11769596, 0.015524369, -0.0013754242, -0.12446318,
+ 0.02786344, -0.014179351, 0.005264273, 0.14376344,
+ 0.015983658, 0.03406988, -0.06939408, 0.040699873,
+ 0.02111075, 0.09669095, 0.041345075, -0.08316494,
+ -0.07684199, -0.045768797, 0.032298047, -0.041805092,
+ 0.0119405, 0.0061010392, 0.12652606, 0.0064572375,
+ -0.024950314, 0.11574242, 0.04508852, -0.04335324,
+ 0.06760663, -0.027437469, 0.07216407, 0.06977076,
+ -0.05438599, 0.034033038, -0.028602652, 0.05346137,
+ 0.043184172, -0.037189785, 0.10420091, 0.00882477,
+ -0.054019816, -0.074273005, -0.030617684, -0.0028467078,
+ 0.024302477, -0.0038869337, 0.005332455, 0.0013399826,
+ 0.04361412, -0.007001822, 0.09631092, -0.06702025,
+ -0.042049985, -0.035070654, -0.04103342, -0.10273396,
+ 0.0544271, 0.037184782, -0.13150354, -0.0058036847,
+ -0.008264958, 0.042035464, 0.05891794, 0.029673764,
+ 0.0063542654, 0.044788733, 0.054816857, 0.062257513,
+ -0.00093483756, 0.048938446, -0.004952862, -0.007730018,
+ -0.04043371, -0.017094059, 0.07229206, -0.023670016,
+ -0.052195564, -0.025616996, -0.01520939, 0.045104615,
+ -0.007376126, 0.003533447, 0.006570588, 0.056037236,
+ 0.12436656, 0.051817212, 0.028532185, -0.08686856,
+ 0.11868599, 0.07663395, -0.07323171, 0.03463402,
+ -0.050708205, -0.04458982, -0.11590894, 0.021273347,
+ 0.1251325, -0.15313013, -0.12224372, 0.17228661,
+ 0.023029093, 0.086124025, 0.006445803, -0.03496501,
+ 0.028332196, 0.04449512, -0.042436164, -0.026587414,
+ -0.006041347, -0.09292539, -0.05678812, 0.03897832,
+ 0.09465633, 0.008115513, -0.02171956, 0.08304309,
+ 0.071401566, 0.019622514, 0.032163795, -0.004167056,
+ 0.02295182, 0.030739572, 0.056506045, 0.004612461,
+ 0.06524936, 0.059999723, 0.046395954, -0.0045512207,
+ -0.1335546, -0.030136576, 0.11584653, -0.014678886,
+ 0.0020118146, -0.09688814, -0.0790206, 0.039770417,
+ -0.0329582, 0.07922767, 0.029322514, 0.026405897,
+ 0.04207835, -0.07073373, 0.063781224, 0.0859677,
+ -0.10925287, -0.07011058, 0.048005477, 0.03438226,
+ -0.09606514, -0.006669445, -0.043381985, 0.04240257,
+ -0.06955775, -0.06769346, 0.043903265, -0.026784198,
+ -0.017840602, 0.024307009, -0.040079936, -0.019946516,
+ 0.045318738, -0.12233574, 0.026170589, 0.0074471775,
+ 0.15978073, 0.10185836, 0.10298046, -0.015476589,
+ -0.039390966, -0.072174534, 0.0739445, -0.1211869,
+ -0.0347889, -0.07943156, 0.014809798, -0.12412325,
+ -0.0030663363, 0.039695457, 0.0647603, -0.08291318,
+ -0.018529687, -0.004423833, 0.0037507233, 0.084633216,
+ -0.01514876, -0.056505352, -0.012800942, -0.06994386,
+ 0.012962922, -0.031234352, 0.07029052, 0.016418684,
+ 0.03618972, 0.055686004, -0.08663945, -0.017404709,
+ -0.054761406, 0.029065743, 0.052404847, 0.020238016,
+ 0.0048197987, -0.0214882, 0.07078733, 0.013016777,
+ 0.06262858, 0.009184685, 0.020785125, -0.043904778,
+ -0.0270329, -0.03299152, -0.060088247, -0.015162964,
+ -0.001828936, 0.12642565, -0.056757294, 0.013586685,
+ 0.09232601, -0.035886683, 0.06000002, 0.05229691,
+ -0.052580316, -0.082029596, -0.010794592, 0.012947712,
+ -0.036429964, -0.085508935, -0.13127148, -0.017744139,
+ 0.031502828, 0.036232427, -0.031581745, 0.023051167,
+ -0.05325106, -0.03421577, 0.028793324, -0.034633752,
+ -0.009881397, -0.043551125, -0.018609839, 0.0019097115,
+ -0.008799762, 0.056595087, 0.0022273948, 0.055752404};
+
+ recurrent_to_forget_weights_ = {
+ -0.057784554, -0.026057621, -0.068447545, -0.022581743,
+ 0.14811787, 0.10826372, 0.09471067, 0.03987225,
+ -0.0039523416, 0.00030638507, 0.053185795, 0.10572994,
+ 0.08414449, -0.022036452, -0.00066928595, -0.09203576,
+ 0.032950465, -0.10985798, -0.023809856, 0.0021431844,
+ -0.02196096, -0.00326074, 0.00058621005, -0.074678116,
+ -0.06193199, 0.055729095, 0.03736828, 0.020123724,
+ 0.061878487, -0.04729229, 0.034919553, -0.07585433,
+ -0.04421272, -0.044019096, 0.085488975, 0.04058006,
+ -0.06890133, -0.030951202, -0.024628663, -0.07672815,
+ 0.034293607, 0.08556707, -0.05293577, -0.033561368,
+ -0.04899627, 0.0241671, 0.015736353, -0.095442444,
+ -0.029564252, 0.016493602, -0.035026584, 0.022337519,
+ -0.026871363, 0.004780428, 0.0077918363, -0.03601621,
+ 0.016435321, -0.03263031, -0.09543275, -0.047392778,
+ 0.013454138, 0.028934088, 0.01685226, -0.086110644,
+ -0.046250615, -0.01847454, 0.047608484, 0.07339695,
+ 0.034546845, -0.04881143, 0.009128804, -0.08802852,
+ 0.03761666, 0.008096139, -0.014454086, 0.014361001,
+ -0.023502491, -0.0011840804, -0.07607001, 0.001856849,
+ -0.06509276, -0.006021153, -0.08570962, -0.1451793,
+ 0.060212336, 0.055259194, 0.06974018, 0.049454916,
+ -0.027794661, -0.08077226, -0.016179763, 0.1169753,
+ 0.17213494, -0.0056326236, -0.053934924, -0.0124349,
+ -0.11520337, 0.05409887, 0.088759385, 0.0019655675,
+ 0.0042065294, 0.03881498, 0.019844765, 0.041858196,
+ -0.05695512, 0.047233116, 0.038937137, -0.06542224,
+ 0.014429736, -0.09719407, 0.13908425, -0.05379757,
+ 0.012321099, 0.082840554, -0.029899208, 0.044217527,
+ 0.059855383, 0.07711018, -0.045319796, 0.0948846,
+ -0.011724666, -0.0033288454, -0.033542685, -0.04764985,
+ -0.13873616, 0.040668588, 0.034832682, -0.015319203,
+ -0.018715994, 0.046002675, 0.0599172, -0.043107376,
+ 0.0294216, -0.002314414, -0.022424703, 0.0030315618,
+ 0.0014641669, 0.0029166266, -0.11878115, 0.013738511,
+ 0.12375372, -0.0006038222, 0.029104086, 0.087442465,
+ 0.052958444, 0.07558703, 0.04817258, 0.044462286,
+ -0.015213451, -0.08783778, -0.0561384, -0.003008196,
+ 0.047060397, -0.002058388, 0.03429439, -0.018839769,
+ 0.024734668, 0.024614193, -0.042046934, 0.09597743,
+ -0.0043254104, 0.04320769, 0.0064070094, -0.0019131786,
+ -0.02558259, -0.022822596, -0.023273505, -0.02464396,
+ -0.10991725, -0.006240552, 0.0074488563, 0.024044557,
+ 0.04383914, -0.046476185, 0.028658995, 0.060410924,
+ 0.050786525, 0.009452605, -0.0073054377, -0.024810238,
+ 0.0052906186, 0.0066939713, -0.0020913032, 0.014515517,
+ 0.015898481, 0.021362653, -0.030262267, 0.016587038,
+ -0.011442813, 0.041154444, -0.007631438, -0.03423484,
+ -0.010977775, 0.036152758, 0.0066366293, 0.11915515,
+ 0.02318443, -0.041350313, 0.021485701, -0.10906167,
+ -0.028218046, -0.00954771, 0.020531068, -0.11995105,
+ -0.03672871, 0.024019798, 0.014255957, -0.05221243,
+ -0.00661567, -0.04630967, 0.033188973, 0.10107534,
+ -0.014027541, 0.030796422, -0.10270911, -0.035999842,
+ 0.15443139, 0.07684145, 0.036571592, -0.035900835,
+ -0.0034699554, 0.06209149, 0.015920248, -0.031122351,
+ -0.03858649, 0.01849943, 0.13872518, 0.01503974,
+ 0.069941424, -0.06948533, -0.0088794185, 0.061282158,
+ -0.047401894, 0.03100163, -0.041533746, -0.10430945,
+ 0.044574402, -0.01425562, -0.024290353, 0.034563623,
+ 0.05866852, 0.023947537, -0.09445152, 0.035450947,
+ 0.02247216, -0.0042998926, 0.061146557, -0.10250651,
+ 0.020881841, -0.06747029, 0.10062043, -0.0023941975,
+ 0.03532124, -0.016341697, 0.09685456, -0.016764693,
+ 0.051808182, 0.05875331, -0.04536488, 0.001626336,
+ -0.028892258, -0.01048663, -0.009793449, -0.017093895,
+ 0.010987891, 0.02357273, -0.00010856845, 0.0099760275,
+ -0.001845119, -0.03551521, 0.0018358806, 0.05763657,
+ -0.01769146, 0.040995963, 0.02235177, -0.060430344,
+ 0.11475477, -0.023854522, 0.10071741, 0.0686208,
+ -0.014250481, 0.034261297, 0.047418304, 0.08562733,
+ -0.030519066, 0.0060542435, 0.014653856, -0.038836084,
+ 0.04096551, 0.032249358, -0.08355519, -0.026823482,
+ 0.056386515, -0.010401743, -0.028396193, 0.08507674,
+ 0.014410365, 0.020995233, 0.17040324, 0.11511526,
+ 0.02459721, 0.0066619175, 0.025853224, -0.023133837,
+ -0.081302024, 0.017264642, -0.009585969, 0.09491168,
+ -0.051313367, 0.054532815, -0.014298593, 0.10657464,
+ 0.007076659, 0.10964551, 0.0409152, 0.008275321,
+ -0.07283536, 0.07937492, 0.04192024, -0.1075027};
+
+ recurrent_to_output_weights_ = {
+ 0.025825322, -0.05813119, 0.09495884, -0.045984812,
+ -0.01255415, -0.0026479573, -0.08196161, -0.054914974,
+ -0.0046604523, -0.029587349, -0.044576716, -0.07480124,
+ -0.082868785, 0.023254942, 0.027502948, -0.0039728214,
+ -0.08683098, -0.08116779, -0.014675607, -0.037924774,
+ -0.023314456, -0.007401714, -0.09255757, 0.029460307,
+ -0.08829125, -0.005139627, -0.08989442, -0.0555066,
+ 0.13596267, -0.025062224, -0.048351806, -0.03850004,
+ 0.07266485, -0.022414139, 0.05940088, 0.075114764,
+ 0.09597592, -0.010211725, -0.0049794707, -0.011523867,
+ -0.025980417, 0.072999895, 0.11091378, -0.081685916,
+ 0.014416728, 0.043229222, 0.034178585, -0.07530371,
+ 0.035837382, -0.085607, -0.007721233, -0.03287832,
+ -0.043848954, -0.06404588, -0.06632928, -0.073643476,
+ 0.008214239, -0.045984086, 0.039764922, 0.03474462,
+ 0.060612556, -0.080590084, 0.049127717, 0.04151091,
+ -0.030063879, 0.008801774, -0.023021035, -0.019558564,
+ 0.05158114, -0.010947698, -0.011825728, 0.0075720972,
+ 0.0699727, -0.0039981045, 0.069350146, 0.08799282,
+ 0.016156472, 0.035502106, 0.11695009, 0.006217345,
+ 0.13392477, -0.037875112, 0.025745004, 0.08940699,
+ -0.00924166, 0.0046702605, -0.036598757, -0.08811812,
+ 0.10522024, -0.032441203, 0.008176899, -0.04454919,
+ 0.07058152, 0.0067963637, 0.039206743, 0.03259838,
+ 0.03725492, -0.09515802, 0.013326398, -0.052055415,
+ -0.025676316, 0.03198509, -0.015951829, -0.058556724,
+ 0.036879618, 0.043357447, 0.028362012, -0.05908629,
+ 0.0059240665, -0.04995891, -0.019187413, 0.0276265,
+ -0.01628143, 0.0025863599, 0.08800015, 0.035250366,
+ -0.022165963, -0.07328642, -0.009415526, -0.07455109,
+ 0.11690406, 0.0363299, 0.07411125, 0.042103454,
+ -0.009660886, 0.019076364, 0.018299393, -0.046004917,
+ 0.08891175, 0.0431396, -0.026327137, -0.051502608,
+ 0.08979574, -0.051670972, 0.04940282, -0.07491107,
+ -0.021240504, 0.022596184, -0.034280192, 0.060163025,
+ -0.058211457, -0.051837247, -0.01349775, -0.04639988,
+ -0.035936575, -0.011681591, 0.064818054, 0.0073146066,
+ -0.021745546, -0.043124277, -0.06471268, -0.07053354,
+ -0.029321948, -0.05330136, 0.016933719, -0.053782392,
+ 0.13747959, -0.1361751, -0.11569455, 0.0033329215,
+ 0.05693899, -0.053219706, 0.063698, 0.07977434,
+ -0.07924483, 0.06936997, 0.0034815092, -0.007305279,
+ -0.037325785, -0.07251102, -0.033633437, -0.08677009,
+ 0.091591336, -0.14165086, 0.021752775, 0.019683983,
+ 0.0011612234, -0.058154266, 0.049996935, 0.0288841,
+ -0.0024567875, -0.14345716, 0.010955264, -0.10234828,
+ 0.1183656, -0.0010731248, -0.023590032, -0.072285876,
+ -0.0724771, -0.026382286, -0.0014920527, 0.042667855,
+ 0.0018776858, 0.02986552, 0.009814309, 0.0733756,
+ 0.12289186, 0.018043943, -0.0458958, 0.049412545,
+ 0.033632483, 0.05495232, 0.036686596, -0.013781798,
+ -0.010036754, 0.02576849, -0.08307328, 0.010112348,
+ 0.042521734, -0.05869831, -0.071689695, 0.03876447,
+ -0.13275425, -0.0352966, -0.023077697, 0.10285965,
+ 0.084736146, 0.15568255, -0.00040734606, 0.027835453,
+ -0.10292561, -0.032401145, 0.10053256, -0.026142767,
+ -0.08271222, -0.0030240538, -0.016368777, 0.1070414,
+ 0.042672627, 0.013456989, -0.0437609, -0.022309763,
+ 0.11576483, 0.04108048, 0.061026827, -0.0190714,
+ -0.0869359, 0.037901703, 0.0610107, 0.07202949,
+ 0.01675338, 0.086139716, -0.08795751, -0.014898893,
+ -0.023771819, -0.01965048, 0.007955471, -0.043740474,
+ 0.03346837, -0.10549954, 0.090567775, 0.042013682,
+ -0.03176985, 0.12569028, -0.02421228, -0.029526481,
+ 0.023851605, 0.031539805, 0.05292009, -0.02344001,
+ -0.07811758, -0.08834428, 0.10094801, 0.16594367,
+ -0.06861939, -0.021256343, -0.041093912, -0.06669611,
+ 0.035498552, 0.021757556, -0.09302526, -0.015403468,
+ -0.06614931, -0.051798206, -0.013874718, 0.03630673,
+ 0.010412845, -0.08077351, 0.046185967, 0.0035662893,
+ 0.03541868, -0.094149634, -0.034814864, 0.003128424,
+ -0.020674974, -0.03944324, -0.008110165, -0.11113267,
+ 0.08484226, 0.043586485, 0.040582247, 0.0968012,
+ -0.065249965, -0.028036479, 0.0050708856, 0.0017462453,
+ 0.0326779, 0.041296225, 0.09164146, -0.047743853,
+ -0.015952192, -0.034451712, 0.084197424, -0.05347844,
+ -0.11768019, 0.085926116, -0.08251791, -0.045081906,
+ 0.0948852, 0.068401024, 0.024856757, 0.06978981,
+ -0.057309967, -0.012775832, -0.0032452994, 0.01977615,
+ -0.041040014, -0.024264973, 0.063464895, 0.05431621,
+ };
+
+ cell_to_input_weights_ = {
+ 0.040369894, 0.030746894, 0.24704495, 0.018586371, -0.037586458,
+ -0.15312155, -0.11812848, -0.11465643, 0.20259799, 0.11418174,
+ -0.10116027, -0.011334949, 0.12411352, -0.076769054, -0.052169047,
+ 0.21198851, -0.38871562, -0.09061183, -0.09683246, -0.21929175};
+
+ cell_to_forget_weights_ = {
+ -0.01998659, -0.15568835, -0.24248174, -0.012770197, 0.041331276,
+ -0.072311886, -0.052123554, -0.0066330447, -0.043891653, 0.036225766,
+ -0.047248036, 0.021479502, 0.033189066, 0.11952997, -0.020432774,
+ 0.64658105, -0.06650122, -0.03467612, 0.095340036, 0.23647355};
+
+ cell_to_output_weights_ = {
+ 0.08286371, -0.08261836, -0.51210177, 0.002913762, 0.17764764,
+ -0.5495371, -0.08460716, -0.24552552, 0.030037103, 0.04123544,
+ -0.11940523, 0.007358328, 0.1890978, 0.4833202, -0.34441817,
+ 0.36312827, -0.26375428, 0.1457655, -0.19724406, 0.15548733};
+
+ projection_weights_ = {
+ -0.009802181, 0.09401916, 0.0717386, -0.13895074,
+ 0.09641832, 0.060420845, 0.08539281, 0.054285463,
+ 0.061395317, 0.034448683, -0.042991187, 0.019801661,
+ -0.16840284, -0.015726732, -0.23041931, -0.024478018,
+ -0.10959692, -0.013875541, 0.18600968, -0.061274476,
+ 0.0138165, -0.08160894, -0.07661644, 0.032372914,
+ 0.16169067, 0.22465782, -0.03993472, -0.004017731,
+ 0.08633481, -0.28869787, 0.08682067, 0.17240396,
+ 0.014975425, 0.056431185, 0.031037588, 0.16702051,
+ 0.0077946745, 0.15140012, 0.29405436, 0.120285,
+ -0.188994, -0.027265169, 0.043389652, -0.022061434,
+ 0.014777949, -0.20203483, 0.094781205, 0.19100232,
+ 0.13987629, -0.036132768, -0.06426278, -0.05108664,
+ 0.13221376, 0.009441198, -0.16715929, 0.15859416,
+ -0.040437475, 0.050779544, -0.022187516, 0.012166504,
+ 0.027685808, -0.07675938, -0.0055694645, -0.09444123,
+ 0.0046453946, 0.050794356, 0.10770313, -0.20790008,
+ -0.07149004, -0.11425117, 0.008225835, -0.035802525,
+ 0.14374903, 0.15262283, 0.048710253, 0.1847461,
+ -0.007487823, 0.11000021, -0.09542012, 0.22619456,
+ -0.029149994, 0.08527916, 0.009043713, 0.0042746216,
+ 0.016261552, 0.022461696, 0.12689082, -0.043589946,
+ -0.12035478, -0.08361797, -0.050666027, -0.1248618,
+ -0.1275799, -0.071875185, 0.07377272, 0.09944291,
+ -0.18897448, -0.1593054, -0.06526116, -0.040107165,
+ -0.004618631, -0.067624845, -0.007576253, 0.10727444,
+ 0.041546922, -0.20424393, 0.06907816, 0.050412357,
+ 0.00724631, 0.039827548, 0.12449835, 0.10747581,
+ 0.13708383, 0.09134148, -0.12617786, -0.06428341,
+ 0.09956831, 0.1208086, -0.14676677, -0.0727722,
+ 0.1126304, 0.010139365, 0.015571211, -0.038128063,
+ 0.022913318, -0.042050496, 0.16842307, -0.060597885,
+ 0.10531834, -0.06411776, -0.07451711, -0.03410368,
+ -0.13393489, 0.06534304, 0.003620307, 0.04490757,
+ 0.05970546, 0.05197996, 0.02839995, 0.10434969,
+ -0.013699693, -0.028353551, -0.07260381, 0.047201227,
+ -0.024575593, -0.036445823, 0.07155557, 0.009672501,
+ -0.02328883, 0.009533515, -0.03606021, -0.07421458,
+ -0.028082801, -0.2678904, -0.13221288, 0.18419984,
+ -0.13012612, -0.014588381, -0.035059117, -0.04824723,
+ 0.07830115, -0.056184657, 0.03277091, 0.025466874,
+ 0.14494097, -0.12522776, -0.098633975, -0.10766018,
+ -0.08317623, 0.08594209, 0.07749552, 0.039474737,
+ 0.1776665, -0.07409566, -0.0477268, 0.29323658,
+ 0.10801441, 0.1154011, 0.013952499, 0.10739139,
+ 0.10708251, -0.051456142, 0.0074137426, -0.10430189,
+ 0.10034707, 0.045594677, 0.0635285, -0.0715442,
+ -0.089667566, -0.10811871, 0.00026344223, 0.08298446,
+ -0.009525053, 0.006585689, -0.24567553, -0.09450807,
+ 0.09648481, 0.026996298, -0.06419476, -0.04752702,
+ -0.11063944, -0.23441927, -0.17608605, -0.052156363,
+ 0.067035615, 0.19271925, -0.0032889997, -0.043264326,
+ 0.09663576, -0.057112187, -0.10100678, 0.0628376,
+ 0.04447668, 0.017961001, -0.10094388, -0.10190601,
+ 0.18335468, 0.10494553, -0.052095775, -0.0026118709,
+ 0.10539724, -0.04383912, -0.042349473, 0.08438151,
+ -0.1947263, 0.02251204, 0.11216432, -0.10307853,
+ 0.17351969, -0.039091777, 0.08066188, -0.00561982,
+ 0.12633002, 0.11335965, -0.0088127935, -0.019777594,
+ 0.06864014, -0.059751723, 0.016233567, -0.06894641,
+ -0.28651384, -0.004228674, 0.019708522, -0.16305895,
+ -0.07468996, -0.0855457, 0.099339016, -0.07580735,
+ -0.13775392, 0.08434318, 0.08330512, -0.12131499,
+ 0.031935584, 0.09180414, -0.08876437, -0.08049874,
+ 0.008753825, 0.03498998, 0.030215185, 0.03907079,
+ 0.089751154, 0.029194152, -0.03337423, -0.019092513,
+ 0.04331237, 0.04299654, -0.036394123, -0.12915532,
+ 0.09793732, 0.07512415, -0.11319543, -0.032502122,
+ 0.15661901, 0.07671967, -0.005491124, -0.19379048,
+ -0.218606, 0.21448623, 0.017840758, 0.1416943,
+ -0.07051762, 0.19488361, 0.02664691, -0.18104725,
+ -0.09334311, 0.15026465, -0.15493552, -0.057762887,
+ -0.11604192, -0.262013, -0.01391798, 0.012185008,
+ 0.11156489, -0.07483202, 0.06693364, -0.26151478,
+ 0.046425626, 0.036540434, -0.16435726, 0.17338543,
+ -0.21401681, -0.11385144, -0.08283257, -0.069031075,
+ 0.030635102, 0.010969227, 0.11109743, 0.010919218,
+ 0.027526086, 0.13519906, 0.01891392, -0.046839405,
+ -0.040167913, 0.017953383, -0.09700955, 0.0061885654,
+ -0.07000971, 0.026893595, -0.038844477, 0.14543656};
+
+ lstm_input_ = {
+ {// Batch0: 4 (input_sequence_size) * 5 (n_input)
+ 0.787926, 0.151646, 0.071352, 0.118426, 0.458058, // step 0
+ 0.596268, 0.998386, 0.568695, 0.864524, 0.571277, // step 1
+ 0.073204, 0.296072, 0.743333, 0.069199, 0.045348, // step 2
+ 0.867394, 0.291279, 0.013714, 0.482521, 0.626339}, // step 3
+
+ {// Batch1: 4 (input_sequence_size) * 5 (n_input)
+ 0.295743, 0.544053, 0.690064, 0.858138, 0.497181, // step 0
+ 0.642421, 0.524260, 0.134799, 0.003639, 0.162482, // step 1
+ 0.640394, 0.930399, 0.050782, 0.432485, 0.988078, // step 2
+ 0.082922, 0.563329, 0.865614, 0.333232, 0.259916} // step 3
+ };
+
+ lstm_golden_output_ = {
+ {// Batch0: 4 (input_sequence_size) * 16 (n_output)
+ -0.00396806, 0.029352, -0.00279226, 0.0159977, -0.00835576,
+ -0.0211779, 0.0283512, -0.0114597, 0.00907307, -0.0244004,
+ -0.0152191, -0.0259063, 0.00914318, 0.00415118, 0.017147,
+ 0.0134203, -0.0166936, 0.0381209, 0.000889694, 0.0143363,
+ -0.0328911, -0.0234288, 0.0333051, -0.012229, 0.0110322,
+ -0.0457725, -0.000832209, -0.0202817, 0.0327257, 0.0121308,
+ 0.0155969, 0.0312091, -0.0213783, 0.0350169, 0.000324794,
+ 0.0276012, -0.0263374, -0.0371449, 0.0446149, -0.0205474,
+ 0.0103729, -0.0576349, -0.0150052, -0.0292043, 0.0376827,
+ 0.0136115, 0.0243435, 0.0354492, -0.0189322, 0.0464512,
+ -0.00251373, 0.0225745, -0.0308346, -0.0317124, 0.0460407,
+ -0.0189395, 0.0149363, -0.0530162, -0.0150767, -0.0340193,
+ 0.0286833, 0.00824207, 0.0264887, 0.0305169},
+ {// Batch1: 4 (input_sequence_size) * 16 (n_output)
+ -0.013869, 0.0287268, -0.00334693, 0.00733398, -0.0287926,
+ -0.0186926, 0.0193662, -0.0115437, 0.00422612, -0.0345232,
+ 0.00223253, -0.00957321, 0.0210624, 0.013331, 0.0150954,
+ 0.02168, -0.0141913, 0.0322082, 0.00227024, 0.0260507,
+ -0.0188721, -0.0296489, 0.0399134, -0.0160509, 0.0116039,
+ -0.0447318, -0.0150515, -0.0277406, 0.0316596, 0.0118233,
+ 0.0214762, 0.0293641, -0.0204549, 0.0450315, -0.00117378,
+ 0.0167673, -0.0375007, -0.0238314, 0.038784, -0.0174034,
+ 0.0131743, -0.0506589, -0.0048447, -0.0240239, 0.0325789,
+ 0.00790065, 0.0220157, 0.0333314, -0.0264787, 0.0387855,
+ -0.000764675, 0.0217599, -0.037537, -0.0335206, 0.0431679,
+ -0.0211424, 0.010203, -0.062785, -0.00832363, -0.025181,
+ 0.0412031, 0.0118723, 0.0239643, 0.0394009}};
+ }
+};
+
+TEST_F(NoCifgPeepholeProjectionClippingLstmTest, LstmBlackBoxTest) {
const int n_batch = 2;
const int n_input = 5;
const int n_cell = 20;
@@ -461,8 +1322,9 @@ TEST(LSTMOpTest, BlackBoxTestWithPeepholeWithProjectionNoClipping) {
const int sequence_length = 4;
UnidirectionalLSTMOpModel lstm(
- n_batch, n_input, n_cell, n_output, sequence_length, /*use_cifg=*/false,
- /*use_peephole=*/true, /*use_projection_weights=*/true,
+ n_batch, n_input, n_cell, n_output, sequence_length,
+ /*use_cifg=*/false, /*use_peephole=*/true,
+ /*use_projection_weights=*/true,
/*use_projection_bias=*/false,
/*cell_clip=*/0.0, /*proj_clip=*/0.0,
{
@@ -491,588 +1353,99 @@ TEST(LSTMOpTest, BlackBoxTestWithPeepholeWithProjectionNoClipping) {
{0}, // projection_bias tensor
});
- lstm.SetInputToInputWeights(
- {0.021393683, 0.06124551, 0.046905167, -0.014657677, -0.03149463,
- 0.09171803, 0.14647801, 0.10797193, -0.0057968358, 0.0019193048,
- -0.2726754, 0.10154029, -0.018539885, 0.080349885, -0.10262385,
- -0.022599787, -0.09121155, -0.008675967, -0.045206103, -0.0821282,
- -0.008045952, 0.015478081, 0.055217247, 0.038719587, 0.044153627,
- -0.06453243, 0.05031825, -0.046935108, -0.008164439, 0.014574226,
- -0.1671009, -0.15519552, -0.16819797, -0.13971269, -0.11953059,
- 0.25005487, -0.22790983, 0.009855087, -0.028140958, -0.11200698,
- 0.11295408, -0.0035217577, 0.054485075, 0.05184695, 0.064711206,
- 0.10989193, 0.11674786, 0.03490607, 0.07727357, 0.11390585,
- -0.1863375, -0.1034451, -0.13945189, -0.049401227, -0.18767063,
- 0.042483903, 0.14233552, 0.13832581, 0.18350165, 0.14545603,
- -0.028545704, 0.024939531, 0.050929718, 0.0076203286, -0.0029723682,
- -0.042484224, -0.11827596, -0.09171104, -0.10808628, -0.16327988,
- -0.2273378, -0.0993647, -0.017155107, 0.0023917493, 0.049272764,
- 0.0038534778, 0.054764505, 0.089753784, 0.06947234, 0.08014476,
- -0.04544234, -0.0497073, -0.07135631, -0.048929106, -0.004042012,
- -0.009284026, 0.018042054, 0.0036860977, -0.07427302, -0.11434604,
- -0.018995456, 0.031487543, 0.012834908, 0.019977754, 0.044256654,
- -0.39292613, -0.18519334, -0.11651281, -0.06809892, 0.011373677});
-
- lstm.SetInputToForgetWeights(
- {-0.0018401089, -0.004852237, 0.03698424, 0.014181704, 0.028273236,
- -0.016726194, -0.05249759, -0.10204261, 0.00861066, -0.040979505,
- -0.009899187, 0.01923892, -0.028177269, -0.08535103, -0.14585495,
- 0.10662567, -0.01909731, -0.017883534, -0.0047269356, -0.045103323,
- 0.0030784295, 0.076784775, 0.07463696, 0.094531395, 0.0814421,
- -0.12257899, -0.033945758, -0.031303465, 0.045630626, 0.06843887,
- -0.13492945, -0.012480007, -0.0811829, -0.07224499, -0.09628791,
- 0.045100946, 0.0012300825, 0.013964662, 0.099372394, 0.02543059,
- 0.06958324, 0.034257296, 0.0482646, 0.06267997, 0.052625068,
- 0.12784666, 0.07077897, 0.025725935, 0.04165009, 0.07241905,
- 0.018668644, -0.037377294, -0.06277783, -0.08833636, -0.040120605,
- -0.011405586, -0.007808335, -0.010301386, -0.005102167, 0.027717464,
- 0.05483423, 0.11449111, 0.11289652, 0.10939839, 0.13396506,
- -0.08402166, -0.01901462, -0.044678304, -0.07720565, 0.014350063,
- -0.11757958, -0.0652038, -0.08185733, -0.076754324, -0.092614375,
- 0.10405491, 0.052960336, 0.035755895, 0.035839386, -0.012540553,
- 0.036881298, 0.02913376, 0.03420159, 0.05448447, -0.054523353,
- 0.02582715, 0.02327355, -0.011857179, -0.0011980024, -0.034641717,
- -0.026125094, -0.17582615, -0.15923657, -0.27486774, -0.0006143371,
- 0.0001771948, -8.470171e-05, 0.02651807, 0.045790765, 0.06956496});
-
- lstm.SetInputToCellWeights(
- {-0.04580283, -0.09549462, -0.032418985, -0.06454633,
- -0.043528453, 0.043018587, -0.049152344, -0.12418144,
- -0.078985475, -0.07596889, 0.019484362, -0.11434962,
- -0.0074034138, -0.06314844, -0.092981495, 0.0062155537,
- -0.025034338, -0.0028890965, 0.048929527, 0.06235075,
- 0.10665918, -0.032036792, -0.08505916, -0.10843358,
- -0.13002433, -0.036816437, -0.02130134, -0.016518239,
- 0.0047691227, -0.0025825808, 0.066017866, 0.029991534,
- -0.10652836, -0.1037554, -0.13056071, -0.03266643,
- -0.033702414, -0.006473424, -0.04611692, 0.014419339,
- -0.025174323, 0.0396852, 0.081777506, 0.06157468,
- 0.10210095, -0.009658194, 0.046511717, 0.03603906,
- 0.0069369148, 0.015960095, -0.06507666, 0.09551598,
- 0.053568836, 0.06408714, 0.12835667, -0.008714329,
- -0.20211966, -0.12093674, 0.029450472, 0.2849013,
- -0.029227901, 0.1164364, -0.08560263, 0.09941786,
- -0.036999565, -0.028842626, -0.0033637602, -0.017012902,
- -0.09720865, -0.11193351, -0.029155117, -0.017936034,
- -0.009768936, -0.04223324, -0.036159635, 0.06505112,
- -0.021742892, -0.023377212, -0.07221364, -0.06430552,
- 0.05453865, 0.091149814, 0.06387331, 0.007518393,
- 0.055960953, 0.069779344, 0.046411168, 0.10509911,
- 0.07463894, 0.0075130584, 0.012850982, 0.04555431,
- 0.056955688, 0.06555285, 0.050801456, -0.009862683,
- 0.00826772, -0.026555609, -0.0073611983, -0.0014897042});
-
- lstm.SetInputToOutputWeights(
- {-0.0998932, -0.07201956, -0.052803773, -0.15629593, -0.15001918,
- -0.07650751, 0.02359855, -0.075155355, -0.08037709, -0.15093534,
- 0.029517552, -0.04751393, 0.010350531, -0.02664851, -0.016839722,
- -0.023121163, 0.0077019283, 0.012851257, -0.05040649, -0.0129761,
- -0.021737747, -0.038305793, -0.06870586, -0.01481247, -0.001285394,
- 0.10124236, 0.083122835, 0.053313006, -0.062235646, -0.075637154,
- -0.027833903, 0.029774971, 0.1130802, 0.09218906, 0.09506135,
- -0.086665764, -0.037162706, -0.038880914, -0.035832845, -0.014481564,
- -0.09825003, -0.12048569, -0.097665586, -0.05287633, -0.0964047,
- -0.11366429, 0.035777505, 0.13568819, 0.052451383, 0.050649304,
- 0.05798951, -0.021852335, -0.099848844, 0.014740475, -0.078897946,
- 0.04974699, 0.014160473, 0.06973932, 0.04964942, 0.033364646,
- 0.08190124, 0.025535367, 0.050893165, 0.048514254, 0.06945813,
- -0.078907564, -0.06707616, -0.11844508, -0.09986688, -0.07509403,
- 0.06263226, 0.14925587, 0.20188436, 0.12098451, 0.14639415,
- 0.0015017595, -0.014267382, -0.03417257, 0.012711468, 0.0028300495,
- -0.024758482, -0.05098548, -0.0821182, 0.014225672, 0.021544158,
- 0.08949725, 0.07505268, -0.0020780868, 0.04908258, 0.06476295,
- -0.022907063, 0.027562456, 0.040185735, 0.019567577, -0.015598739,
- -0.049097303, -0.017121866, -0.083368234, -0.02332002, -0.0840956});
-
- lstm.SetInputGateBias(
- {0.02234832, 0.14757581, 0.18176508, 0.10380666, 0.053110216,
- -0.06928846, -0.13942584, -0.11816189, 0.19483899, 0.03652339,
- -0.10250295, 0.036714908, -0.18426876, 0.036065217, 0.21810818,
- 0.02383196, -0.043370757, 0.08690144, -0.04444982, 0.00030581196});
-
- lstm.SetForgetGateBias({0.035185695, -0.042891346, -0.03032477, 0.23027696,
- 0.11098921, 0.15378423, 0.09263801, 0.09790885,
- 0.09508917, 0.061199076, 0.07665568, -0.015443159,
- -0.03499149, 0.046190713, 0.08895977, 0.10899629,
- 0.40694186, 0.06030037, 0.012413437, -0.06108739});
-
- lstm.SetCellBias({-0.024379363, 0.0055531194, 0.23377132, 0.033463873,
- -0.1483596, -0.10639995, -0.091433935, 0.058573797,
- -0.06809782, -0.07889636, -0.043246906, -0.09829136,
- -0.4279842, 0.034901652, 0.18797937, 0.0075234566,
- 0.016178843, 0.1749513, 0.13975595, 0.92058027});
-
- lstm.SetOutputGateBias(
- {0.046159424, -0.0012809046, 0.03563469, 0.12648113, 0.027195795,
- 0.35373217, -0.018957434, 0.008907322, -0.0762701, 0.12018895,
- 0.04216877, 0.0022856654, 0.040952638, 0.3147856, 0.08225149,
- -0.057416286, -0.14995944, -0.008040261, 0.13208859, 0.029760877});
-
- lstm.SetRecurrentToInputWeights(
- {-0.001374326, -0.078856036, 0.10672688, 0.029162422,
- -0.11585556, 0.02557986, -0.13446963, -0.035785314,
- -0.01244275, 0.025961924, -0.02337298, -0.044228926,
- -0.055839065, -0.046598054, -0.010546039, -0.06900766,
- 0.027239809, 0.022582639, -0.013296484, -0.05459212,
- 0.08981, -0.045407712, 0.08682226, -0.06867011,
- -0.14390695, -0.02916037, 0.000996957, 0.091420636,
- 0.14283475, -0.07390571, -0.06402044, 0.062524505,
- -0.093129106, 0.04860203, -0.08364217, -0.08119002,
- 0.009352075, 0.22920375, 0.0016303885, 0.11583097,
- -0.13732095, 0.012405723, -0.07551853, 0.06343048,
- 0.12162708, -0.031923793, -0.014335606, 0.01790974,
- -0.10650317, -0.0724401, 0.08554849, -0.05727212,
- 0.06556731, -0.042729504, -0.043227166, 0.011683251,
- -0.013082158, -0.029302018, -0.010899579, -0.062036745,
- -0.022509435, -0.00964907, -0.01567329, 0.04260106,
- -0.07787477, -0.11576462, 0.017356863, 0.048673786,
- -0.017577527, -0.05527947, -0.082487635, -0.040137455,
- -0.10820036, -0.04666372, 0.022746278, -0.07851417,
- 0.01068115, 0.032956902, 0.022433773, 0.0026891115,
- 0.08944216, -0.0685835, 0.010513544, 0.07228705,
- 0.02032331, -0.059686817, -0.0005566496, -0.086984694,
- 0.040414046, -0.1380399, 0.094208956, -0.05722982,
- 0.012092817, -0.04989123, -0.086576, -0.003399834,
- -0.04696032, -0.045747425, 0.10091314, 0.048676282,
- -0.029037097, 0.031399418, -0.0040285117, 0.047237843,
- 0.09504992, 0.041799378, -0.049185462, -0.031518843,
- -0.10516937, 0.026374253, 0.10058866, -0.0033195973,
- -0.041975245, 0.0073591834, 0.0033782164, -0.004325073,
- -0.10167381, 0.042500053, -0.01447153, 0.06464186,
- -0.017142897, 0.03312627, 0.009205989, 0.024138335,
- -0.011337001, 0.035530265, -0.010912711, 0.0706555,
- -0.005894094, 0.051841937, -0.1401738, -0.02351249,
- 0.0365468, 0.07590991, 0.08838724, 0.021681072,
- -0.10086113, 0.019608743, -0.06195883, 0.077335775,
- 0.023646897, -0.095322326, 0.02233014, 0.09756986,
- -0.048691444, -0.009579111, 0.07595467, 0.11480546,
- -0.09801813, 0.019894179, 0.08502348, 0.004032281,
- 0.037211012, 0.068537936, -0.048005626, -0.091520436,
- -0.028379958, -0.01556313, 0.06554592, -0.045599163,
- -0.01672207, -0.020169014, -0.011877351, -0.20212261,
- 0.010889619, 0.0047078193, 0.038385306, 0.08540671,
- -0.017140968, -0.0035865551, 0.016678626, 0.005633034,
- 0.015963363, 0.00871737, 0.060130805, 0.028611384,
- 0.10109069, -0.015060172, -0.07894427, 0.06401885,
- 0.011584063, -0.024466386, 0.0047652307, -0.09041358,
- 0.030737216, -0.0046374933, 0.14215417, -0.11823516,
- 0.019899689, 0.006106124, -0.027092824, 0.0786356,
- 0.05052217, -0.058925, -0.011402121, -0.024987547,
- -0.0013661642, -0.06832946, -0.015667673, -0.1083353,
- -0.00096863037, -0.06988685, -0.053350925, -0.027275559,
- -0.033664223, -0.07978348, -0.025200296, -0.017207067,
- -0.058403496, -0.055697463, 0.005798788, 0.12965427,
- -0.062582195, 0.0013350133, -0.10482091, 0.0379771,
- 0.072521195, -0.0029455067, -0.13797039, -0.03628521,
- 0.013806405, -0.017858358, -0.01008298, -0.07700066,
- -0.017081132, 0.019358726, 0.0027079724, 0.004635139,
- 0.062634714, -0.02338735, -0.039547626, -0.02050681,
- 0.03385117, -0.083611414, 0.002862572, -0.09421313,
- 0.058618143, -0.08598433, 0.00972939, 0.023867095,
- -0.053934585, -0.023203006, 0.07452513, -0.048767887,
- -0.07314807, -0.056307215, -0.10433547, -0.06440842,
- 0.04328182, 0.04389765, -0.020006588, -0.09076438,
- -0.11652589, -0.021705797, 0.03345259, -0.010329105,
- -0.025767034, 0.013057034, -0.07316461, -0.10145612,
- 0.06358255, 0.18531723, 0.07759293, 0.12006465,
- 0.1305557, 0.058638252, -0.03393652, 0.09622831,
- -0.16253184, -2.4580743e-06, 0.079869635, -0.070196845,
- -0.005644518, 0.06857898, -0.12598175, -0.035084512,
- 0.03156317, -0.12794146, -0.031963028, 0.04692781,
- 0.030070418, 0.0071660685, -0.095516115, -0.004643372,
- 0.040170413, -0.062104587, -0.0037324072, 0.0554317,
- 0.08184801, -0.019164372, 0.06791302, 0.034257166,
- -0.10307039, 0.021943003, 0.046745934, 0.0790918,
- -0.0265588, -0.007824208, 0.042546265, -0.00977924,
- -0.0002440307, -0.017384544, -0.017990116, 0.12252321,
- -0.014512694, -0.08251313, 0.08861942, 0.13589665,
- 0.026351685, 0.012641483, 0.07466548, 0.044301085,
- -0.045414884, -0.051112458, 0.03444247, -0.08502782,
- -0.04106223, -0.028126027, 0.028473156, 0.10467447});
-
- lstm.SetRecurrentToForgetWeights(
- {-0.057784554, -0.026057621, -0.068447545, -0.022581743,
- 0.14811787, 0.10826372, 0.09471067, 0.03987225,
- -0.0039523416, 0.00030638507, 0.053185795, 0.10572994,
- 0.08414449, -0.022036452, -0.00066928595, -0.09203576,
- 0.032950465, -0.10985798, -0.023809856, 0.0021431844,
- -0.02196096, -0.00326074, 0.00058621005, -0.074678116,
- -0.06193199, 0.055729095, 0.03736828, 0.020123724,
- 0.061878487, -0.04729229, 0.034919553, -0.07585433,
- -0.04421272, -0.044019096, 0.085488975, 0.04058006,
- -0.06890133, -0.030951202, -0.024628663, -0.07672815,
- 0.034293607, 0.08556707, -0.05293577, -0.033561368,
- -0.04899627, 0.0241671, 0.015736353, -0.095442444,
- -0.029564252, 0.016493602, -0.035026584, 0.022337519,
- -0.026871363, 0.004780428, 0.0077918363, -0.03601621,
- 0.016435321, -0.03263031, -0.09543275, -0.047392778,
- 0.013454138, 0.028934088, 0.01685226, -0.086110644,
- -0.046250615, -0.01847454, 0.047608484, 0.07339695,
- 0.034546845, -0.04881143, 0.009128804, -0.08802852,
- 0.03761666, 0.008096139, -0.014454086, 0.014361001,
- -0.023502491, -0.0011840804, -0.07607001, 0.001856849,
- -0.06509276, -0.006021153, -0.08570962, -0.1451793,
- 0.060212336, 0.055259194, 0.06974018, 0.049454916,
- -0.027794661, -0.08077226, -0.016179763, 0.1169753,
- 0.17213494, -0.0056326236, -0.053934924, -0.0124349,
- -0.11520337, 0.05409887, 0.088759385, 0.0019655675,
- 0.0042065294, 0.03881498, 0.019844765, 0.041858196,
- -0.05695512, 0.047233116, 0.038937137, -0.06542224,
- 0.014429736, -0.09719407, 0.13908425, -0.05379757,
- 0.012321099, 0.082840554, -0.029899208, 0.044217527,
- 0.059855383, 0.07711018, -0.045319796, 0.0948846,
- -0.011724666, -0.0033288454, -0.033542685, -0.04764985,
- -0.13873616, 0.040668588, 0.034832682, -0.015319203,
- -0.018715994, 0.046002675, 0.0599172, -0.043107376,
- 0.0294216, -0.002314414, -0.022424703, 0.0030315618,
- 0.0014641669, 0.0029166266, -0.11878115, 0.013738511,
- 0.12375372, -0.0006038222, 0.029104086, 0.087442465,
- 0.052958444, 0.07558703, 0.04817258, 0.044462286,
- -0.015213451, -0.08783778, -0.0561384, -0.003008196,
- 0.047060397, -0.002058388, 0.03429439, -0.018839769,
- 0.024734668, 0.024614193, -0.042046934, 0.09597743,
- -0.0043254104, 0.04320769, 0.0064070094, -0.0019131786,
- -0.02558259, -0.022822596, -0.023273505, -0.02464396,
- -0.10991725, -0.006240552, 0.0074488563, 0.024044557,
- 0.04383914, -0.046476185, 0.028658995, 0.060410924,
- 0.050786525, 0.009452605, -0.0073054377, -0.024810238,
- 0.0052906186, 0.0066939713, -0.0020913032, 0.014515517,
- 0.015898481, 0.021362653, -0.030262267, 0.016587038,
- -0.011442813, 0.041154444, -0.007631438, -0.03423484,
- -0.010977775, 0.036152758, 0.0066366293, 0.11915515,
- 0.02318443, -0.041350313, 0.021485701, -0.10906167,
- -0.028218046, -0.00954771, 0.020531068, -0.11995105,
- -0.03672871, 0.024019798, 0.014255957, -0.05221243,
- -0.00661567, -0.04630967, 0.033188973, 0.10107534,
- -0.014027541, 0.030796422, -0.10270911, -0.035999842,
- 0.15443139, 0.07684145, 0.036571592, -0.035900835,
- -0.0034699554, 0.06209149, 0.015920248, -0.031122351,
- -0.03858649, 0.01849943, 0.13872518, 0.01503974,
- 0.069941424, -0.06948533, -0.0088794185, 0.061282158,
- -0.047401894, 0.03100163, -0.041533746, -0.10430945,
- 0.044574402, -0.01425562, -0.024290353, 0.034563623,
- 0.05866852, 0.023947537, -0.09445152, 0.035450947,
- 0.02247216, -0.0042998926, 0.061146557, -0.10250651,
- 0.020881841, -0.06747029, 0.10062043, -0.0023941975,
- 0.03532124, -0.016341697, 0.09685456, -0.016764693,
- 0.051808182, 0.05875331, -0.04536488, 0.001626336,
- -0.028892258, -0.01048663, -0.009793449, -0.017093895,
- 0.010987891, 0.02357273, -0.00010856845, 0.0099760275,
- -0.001845119, -0.03551521, 0.0018358806, 0.05763657,
- -0.01769146, 0.040995963, 0.02235177, -0.060430344,
- 0.11475477, -0.023854522, 0.10071741, 0.0686208,
- -0.014250481, 0.034261297, 0.047418304, 0.08562733,
- -0.030519066, 0.0060542435, 0.014653856, -0.038836084,
- 0.04096551, 0.032249358, -0.08355519, -0.026823482,
- 0.056386515, -0.010401743, -0.028396193, 0.08507674,
- 0.014410365, 0.020995233, 0.17040324, 0.11511526,
- 0.02459721, 0.0066619175, 0.025853224, -0.023133837,
- -0.081302024, 0.017264642, -0.009585969, 0.09491168,
- -0.051313367, 0.054532815, -0.014298593, 0.10657464,
- 0.007076659, 0.10964551, 0.0409152, 0.008275321,
- -0.07283536, 0.07937492, 0.04192024, -0.1075027});
-
- lstm.SetRecurrentToCellWeights(
- {-0.037322544, 0.018592842, 0.0056175636, -0.06253426,
- 0.055647098, -0.05713207, -0.05626563, 0.005559383,
- 0.03375411, -0.025757805, -0.088049285, 0.06017052,
- -0.06570978, 0.007384076, 0.035123326, -0.07920549,
- 0.053676967, 0.044480428, -0.07663568, 0.0071805613,
- 0.08089997, 0.05143358, 0.038261272, 0.03339287,
- -0.027673481, 0.044746667, 0.028349208, 0.020090483,
- -0.019443132, -0.030755889, -0.0040000007, 0.04465846,
- -0.021585021, 0.0031670958, 0.0053199246, -0.056117613,
- -0.10893326, 0.076739706, -0.08509834, -0.027997585,
- 0.037871376, 0.01449768, -0.09002357, -0.06111149,
- -0.046195522, 0.0422062, -0.005683705, -0.1253618,
- -0.012925729, -0.04890792, 0.06985068, 0.037654128,
- 0.03398274, -0.004781977, 0.007032333, -0.031787455,
- 0.010868644, -0.031489216, 0.09525667, 0.013939797,
- 0.0058680447, 0.0167067, 0.02668468, -0.04797466,
- -0.048885044, -0.12722108, 0.035304096, 0.06554885,
- 0.00972396, -0.039238118, -0.05159735, -0.11329045,
- 0.1613692, -0.03750952, 0.06529313, -0.071974665,
- -0.11769596, 0.015524369, -0.0013754242, -0.12446318,
- 0.02786344, -0.014179351, 0.005264273, 0.14376344,
- 0.015983658, 0.03406988, -0.06939408, 0.040699873,
- 0.02111075, 0.09669095, 0.041345075, -0.08316494,
- -0.07684199, -0.045768797, 0.032298047, -0.041805092,
- 0.0119405, 0.0061010392, 0.12652606, 0.0064572375,
- -0.024950314, 0.11574242, 0.04508852, -0.04335324,
- 0.06760663, -0.027437469, 0.07216407, 0.06977076,
- -0.05438599, 0.034033038, -0.028602652, 0.05346137,
- 0.043184172, -0.037189785, 0.10420091, 0.00882477,
- -0.054019816, -0.074273005, -0.030617684, -0.0028467078,
- 0.024302477, -0.0038869337, 0.005332455, 0.0013399826,
- 0.04361412, -0.007001822, 0.09631092, -0.06702025,
- -0.042049985, -0.035070654, -0.04103342, -0.10273396,
- 0.0544271, 0.037184782, -0.13150354, -0.0058036847,
- -0.008264958, 0.042035464, 0.05891794, 0.029673764,
- 0.0063542654, 0.044788733, 0.054816857, 0.062257513,
- -0.00093483756, 0.048938446, -0.004952862, -0.007730018,
- -0.04043371, -0.017094059, 0.07229206, -0.023670016,
- -0.052195564, -0.025616996, -0.01520939, 0.045104615,
- -0.007376126, 0.003533447, 0.006570588, 0.056037236,
- 0.12436656, 0.051817212, 0.028532185, -0.08686856,
- 0.11868599, 0.07663395, -0.07323171, 0.03463402,
- -0.050708205, -0.04458982, -0.11590894, 0.021273347,
- 0.1251325, -0.15313013, -0.12224372, 0.17228661,
- 0.023029093, 0.086124025, 0.006445803, -0.03496501,
- 0.028332196, 0.04449512, -0.042436164, -0.026587414,
- -0.006041347, -0.09292539, -0.05678812, 0.03897832,
- 0.09465633, 0.008115513, -0.02171956, 0.08304309,
- 0.071401566, 0.019622514, 0.032163795, -0.004167056,
- 0.02295182, 0.030739572, 0.056506045, 0.004612461,
- 0.06524936, 0.059999723, 0.046395954, -0.0045512207,
- -0.1335546, -0.030136576, 0.11584653, -0.014678886,
- 0.0020118146, -0.09688814, -0.0790206, 0.039770417,
- -0.0329582, 0.07922767, 0.029322514, 0.026405897,
- 0.04207835, -0.07073373, 0.063781224, 0.0859677,
- -0.10925287, -0.07011058, 0.048005477, 0.03438226,
- -0.09606514, -0.006669445, -0.043381985, 0.04240257,
- -0.06955775, -0.06769346, 0.043903265, -0.026784198,
- -0.017840602, 0.024307009, -0.040079936, -0.019946516,
- 0.045318738, -0.12233574, 0.026170589, 0.0074471775,
- 0.15978073, 0.10185836, 0.10298046, -0.015476589,
- -0.039390966, -0.072174534, 0.0739445, -0.1211869,
- -0.0347889, -0.07943156, 0.014809798, -0.12412325,
- -0.0030663363, 0.039695457, 0.0647603, -0.08291318,
- -0.018529687, -0.004423833, 0.0037507233, 0.084633216,
- -0.01514876, -0.056505352, -0.012800942, -0.06994386,
- 0.012962922, -0.031234352, 0.07029052, 0.016418684,
- 0.03618972, 0.055686004, -0.08663945, -0.017404709,
- -0.054761406, 0.029065743, 0.052404847, 0.020238016,
- 0.0048197987, -0.0214882, 0.07078733, 0.013016777,
- 0.06262858, 0.009184685, 0.020785125, -0.043904778,
- -0.0270329, -0.03299152, -0.060088247, -0.015162964,
- -0.001828936, 0.12642565, -0.056757294, 0.013586685,
- 0.09232601, -0.035886683, 0.06000002, 0.05229691,
- -0.052580316, -0.082029596, -0.010794592, 0.012947712,
- -0.036429964, -0.085508935, -0.13127148, -0.017744139,
- 0.031502828, 0.036232427, -0.031581745, 0.023051167,
- -0.05325106, -0.03421577, 0.028793324, -0.034633752,
- -0.009881397, -0.043551125, -0.018609839, 0.0019097115,
- -0.008799762, 0.056595087, 0.0022273948, 0.055752404});
-
- lstm.SetRecurrentToOutputWeights({
- 0.025825322, -0.05813119, 0.09495884, -0.045984812, -0.01255415,
- -0.0026479573, -0.08196161, -0.054914974, -0.0046604523, -0.029587349,
- -0.044576716, -0.07480124, -0.082868785, 0.023254942, 0.027502948,
- -0.0039728214, -0.08683098, -0.08116779, -0.014675607, -0.037924774,
- -0.023314456, -0.007401714, -0.09255757, 0.029460307, -0.08829125,
- -0.005139627, -0.08989442, -0.0555066, 0.13596267, -0.025062224,
- -0.048351806, -0.03850004, 0.07266485, -0.022414139, 0.05940088,
- 0.075114764, 0.09597592, -0.010211725, -0.0049794707, -0.011523867,
- -0.025980417, 0.072999895, 0.11091378, -0.081685916, 0.014416728,
- 0.043229222, 0.034178585, -0.07530371, 0.035837382, -0.085607,
- -0.007721233, -0.03287832, -0.043848954, -0.06404588, -0.06632928,
- -0.073643476, 0.008214239, -0.045984086, 0.039764922, 0.03474462,
- 0.060612556, -0.080590084, 0.049127717, 0.04151091, -0.030063879,
- 0.008801774, -0.023021035, -0.019558564, 0.05158114, -0.010947698,
- -0.011825728, 0.0075720972, 0.0699727, -0.0039981045, 0.069350146,
- 0.08799282, 0.016156472, 0.035502106, 0.11695009, 0.006217345,
- 0.13392477, -0.037875112, 0.025745004, 0.08940699, -0.00924166,
- 0.0046702605, -0.036598757, -0.08811812, 0.10522024, -0.032441203,
- 0.008176899, -0.04454919, 0.07058152, 0.0067963637, 0.039206743,
- 0.03259838, 0.03725492, -0.09515802, 0.013326398, -0.052055415,
- -0.025676316, 0.03198509, -0.015951829, -0.058556724, 0.036879618,
- 0.043357447, 0.028362012, -0.05908629, 0.0059240665, -0.04995891,
- -0.019187413, 0.0276265, -0.01628143, 0.0025863599, 0.08800015,
- 0.035250366, -0.022165963, -0.07328642, -0.009415526, -0.07455109,
- 0.11690406, 0.0363299, 0.07411125, 0.042103454, -0.009660886,
- 0.019076364, 0.018299393, -0.046004917, 0.08891175, 0.0431396,
- -0.026327137, -0.051502608, 0.08979574, -0.051670972, 0.04940282,
- -0.07491107, -0.021240504, 0.022596184, -0.034280192, 0.060163025,
- -0.058211457, -0.051837247, -0.01349775, -0.04639988, -0.035936575,
- -0.011681591, 0.064818054, 0.0073146066, -0.021745546, -0.043124277,
- -0.06471268, -0.07053354, -0.029321948, -0.05330136, 0.016933719,
- -0.053782392, 0.13747959, -0.1361751, -0.11569455, 0.0033329215,
- 0.05693899, -0.053219706, 0.063698, 0.07977434, -0.07924483,
- 0.06936997, 0.0034815092, -0.007305279, -0.037325785, -0.07251102,
- -0.033633437, -0.08677009, 0.091591336, -0.14165086, 0.021752775,
- 0.019683983, 0.0011612234, -0.058154266, 0.049996935, 0.0288841,
- -0.0024567875, -0.14345716, 0.010955264, -0.10234828, 0.1183656,
- -0.0010731248, -0.023590032, -0.072285876, -0.0724771, -0.026382286,
- -0.0014920527, 0.042667855, 0.0018776858, 0.02986552, 0.009814309,
- 0.0733756, 0.12289186, 0.018043943, -0.0458958, 0.049412545,
- 0.033632483, 0.05495232, 0.036686596, -0.013781798, -0.010036754,
- 0.02576849, -0.08307328, 0.010112348, 0.042521734, -0.05869831,
- -0.071689695, 0.03876447, -0.13275425, -0.0352966, -0.023077697,
- 0.10285965, 0.084736146, 0.15568255, -0.00040734606, 0.027835453,
- -0.10292561, -0.032401145, 0.10053256, -0.026142767, -0.08271222,
- -0.0030240538, -0.016368777, 0.1070414, 0.042672627, 0.013456989,
- -0.0437609, -0.022309763, 0.11576483, 0.04108048, 0.061026827,
- -0.0190714, -0.0869359, 0.037901703, 0.0610107, 0.07202949,
- 0.01675338, 0.086139716, -0.08795751, -0.014898893, -0.023771819,
- -0.01965048, 0.007955471, -0.043740474, 0.03346837, -0.10549954,
- 0.090567775, 0.042013682, -0.03176985, 0.12569028, -0.02421228,
- -0.029526481, 0.023851605, 0.031539805, 0.05292009, -0.02344001,
- -0.07811758, -0.08834428, 0.10094801, 0.16594367, -0.06861939,
- -0.021256343, -0.041093912, -0.06669611, 0.035498552, 0.021757556,
- -0.09302526, -0.015403468, -0.06614931, -0.051798206, -0.013874718,
- 0.03630673, 0.010412845, -0.08077351, 0.046185967, 0.0035662893,
- 0.03541868, -0.094149634, -0.034814864, 0.003128424, -0.020674974,
- -0.03944324, -0.008110165, -0.11113267, 0.08484226, 0.043586485,
- 0.040582247, 0.0968012, -0.065249965, -0.028036479, 0.0050708856,
- 0.0017462453, 0.0326779, 0.041296225, 0.09164146, -0.047743853,
- -0.015952192, -0.034451712, 0.084197424, -0.05347844, -0.11768019,
- 0.085926116, -0.08251791, -0.045081906, 0.0948852, 0.068401024,
- 0.024856757, 0.06978981, -0.057309967, -0.012775832, -0.0032452994,
- 0.01977615, -0.041040014, -0.024264973, 0.063464895, 0.05431621,
- });
-
- lstm.SetCellToInputWeights(
- {0.040369894, 0.030746894, 0.24704495, 0.018586371, -0.037586458,
- -0.15312155, -0.11812848, -0.11465643, 0.20259799, 0.11418174,
- -0.10116027, -0.011334949, 0.12411352, -0.076769054, -0.052169047,
- 0.21198851, -0.38871562, -0.09061183, -0.09683246, -0.21929175});
-
- lstm.SetCellToForgetWeights(
- {-0.01998659, -0.15568835, -0.24248174, -0.012770197, 0.041331276,
- -0.072311886, -0.052123554, -0.0066330447, -0.043891653, 0.036225766,
- -0.047248036, 0.021479502, 0.033189066, 0.11952997, -0.020432774,
- 0.64658105, -0.06650122, -0.03467612, 0.095340036, 0.23647355});
-
- lstm.SetCellToOutputWeights(
- {0.08286371, -0.08261836, -0.51210177, 0.002913762, 0.17764764,
- -0.5495371, -0.08460716, -0.24552552, 0.030037103, 0.04123544,
- -0.11940523, 0.007358328, 0.1890978, 0.4833202, -0.34441817,
- 0.36312827, -0.26375428, 0.1457655, -0.19724406, 0.15548733});
-
- lstm.SetProjectionWeights(
- {-0.009802181, 0.09401916, 0.0717386, -0.13895074, 0.09641832,
- 0.060420845, 0.08539281, 0.054285463, 0.061395317, 0.034448683,
- -0.042991187, 0.019801661, -0.16840284, -0.015726732, -0.23041931,
- -0.024478018, -0.10959692, -0.013875541, 0.18600968, -0.061274476,
- 0.0138165, -0.08160894, -0.07661644, 0.032372914, 0.16169067,
- 0.22465782, -0.03993472, -0.004017731, 0.08633481, -0.28869787,
- 0.08682067, 0.17240396, 0.014975425, 0.056431185, 0.031037588,
- 0.16702051, 0.0077946745, 0.15140012, 0.29405436, 0.120285,
- -0.188994, -0.027265169, 0.043389652, -0.022061434, 0.014777949,
- -0.20203483, 0.094781205, 0.19100232, 0.13987629, -0.036132768,
- -0.06426278, -0.05108664, 0.13221376, 0.009441198, -0.16715929,
- 0.15859416, -0.040437475, 0.050779544, -0.022187516, 0.012166504,
- 0.027685808, -0.07675938, -0.0055694645, -0.09444123, 0.0046453946,
- 0.050794356, 0.10770313, -0.20790008, -0.07149004, -0.11425117,
- 0.008225835, -0.035802525, 0.14374903, 0.15262283, 0.048710253,
- 0.1847461, -0.007487823, 0.11000021, -0.09542012, 0.22619456,
- -0.029149994, 0.08527916, 0.009043713, 0.0042746216, 0.016261552,
- 0.022461696, 0.12689082, -0.043589946, -0.12035478, -0.08361797,
- -0.050666027, -0.1248618, -0.1275799, -0.071875185, 0.07377272,
- 0.09944291, -0.18897448, -0.1593054, -0.06526116, -0.040107165,
- -0.004618631, -0.067624845, -0.007576253, 0.10727444, 0.041546922,
- -0.20424393, 0.06907816, 0.050412357, 0.00724631, 0.039827548,
- 0.12449835, 0.10747581, 0.13708383, 0.09134148, -0.12617786,
- -0.06428341, 0.09956831, 0.1208086, -0.14676677, -0.0727722,
- 0.1126304, 0.010139365, 0.015571211, -0.038128063, 0.022913318,
- -0.042050496, 0.16842307, -0.060597885, 0.10531834, -0.06411776,
- -0.07451711, -0.03410368, -0.13393489, 0.06534304, 0.003620307,
- 0.04490757, 0.05970546, 0.05197996, 0.02839995, 0.10434969,
- -0.013699693, -0.028353551, -0.07260381, 0.047201227, -0.024575593,
- -0.036445823, 0.07155557, 0.009672501, -0.02328883, 0.009533515,
- -0.03606021, -0.07421458, -0.028082801, -0.2678904, -0.13221288,
- 0.18419984, -0.13012612, -0.014588381, -0.035059117, -0.04824723,
- 0.07830115, -0.056184657, 0.03277091, 0.025466874, 0.14494097,
- -0.12522776, -0.098633975, -0.10766018, -0.08317623, 0.08594209,
- 0.07749552, 0.039474737, 0.1776665, -0.07409566, -0.0477268,
- 0.29323658, 0.10801441, 0.1154011, 0.013952499, 0.10739139,
- 0.10708251, -0.051456142, 0.0074137426, -0.10430189, 0.10034707,
- 0.045594677, 0.0635285, -0.0715442, -0.089667566, -0.10811871,
- 0.00026344223, 0.08298446, -0.009525053, 0.006585689, -0.24567553,
- -0.09450807, 0.09648481, 0.026996298, -0.06419476, -0.04752702,
- -0.11063944, -0.23441927, -0.17608605, -0.052156363, 0.067035615,
- 0.19271925, -0.0032889997, -0.043264326, 0.09663576, -0.057112187,
- -0.10100678, 0.0628376, 0.04447668, 0.017961001, -0.10094388,
- -0.10190601, 0.18335468, 0.10494553, -0.052095775, -0.0026118709,
- 0.10539724, -0.04383912, -0.042349473, 0.08438151, -0.1947263,
- 0.02251204, 0.11216432, -0.10307853, 0.17351969, -0.039091777,
- 0.08066188, -0.00561982, 0.12633002, 0.11335965, -0.0088127935,
- -0.019777594, 0.06864014, -0.059751723, 0.016233567, -0.06894641,
- -0.28651384, -0.004228674, 0.019708522, -0.16305895, -0.07468996,
- -0.0855457, 0.099339016, -0.07580735, -0.13775392, 0.08434318,
- 0.08330512, -0.12131499, 0.031935584, 0.09180414, -0.08876437,
- -0.08049874, 0.008753825, 0.03498998, 0.030215185, 0.03907079,
- 0.089751154, 0.029194152, -0.03337423, -0.019092513, 0.04331237,
- 0.04299654, -0.036394123, -0.12915532, 0.09793732, 0.07512415,
- -0.11319543, -0.032502122, 0.15661901, 0.07671967, -0.005491124,
- -0.19379048, -0.218606, 0.21448623, 0.017840758, 0.1416943,
- -0.07051762, 0.19488361, 0.02664691, -0.18104725, -0.09334311,
- 0.15026465, -0.15493552, -0.057762887, -0.11604192, -0.262013,
- -0.01391798, 0.012185008, 0.11156489, -0.07483202, 0.06693364,
- -0.26151478, 0.046425626, 0.036540434, -0.16435726, 0.17338543,
- -0.21401681, -0.11385144, -0.08283257, -0.069031075, 0.030635102,
- 0.010969227, 0.11109743, 0.010919218, 0.027526086, 0.13519906,
- 0.01891392, -0.046839405, -0.040167913, 0.017953383, -0.09700955,
- 0.0061885654, -0.07000971, 0.026893595, -0.038844477, 0.14543656});
-
- static float lstm_input[][20] = {
- {// Batch0: 4 (input_sequence_size) * 5 (n_input)
- 0.787926, 0.151646, 0.071352, 0.118426, 0.458058, 0.596268, 0.998386,
- 0.568695, 0.864524, 0.571277, 0.073204, 0.296072, 0.743333, 0.069199,
- 0.045348, 0.867394, 0.291279, 0.013714, 0.482521, 0.626339},
-
- {// Batch1: 4 (input_sequence_size) * 5 (n_input)
- 0.295743, 0.544053, 0.690064, 0.858138, 0.497181, 0.642421, 0.524260,
- 0.134799, 0.003639, 0.162482, 0.640394, 0.930399, 0.050782, 0.432485,
- 0.988078, 0.082922, 0.563329, 0.865614, 0.333232, 0.259916}};
-
- static float lstm_golden_output[][64] = {
- {// Batch0: 4 (input_sequence_size) * 16 (n_output)
- -0.00396806, 0.029352, -0.00279226, 0.0159977, -0.00835576,
- -0.0211779, 0.0283512, -0.0114597, 0.00907307, -0.0244004,
- -0.0152191, -0.0259063, 0.00914318, 0.00415118, 0.017147,
- 0.0134203, -0.0166936, 0.0381209, 0.000889694, 0.0143363,
- -0.0328911, -0.0234288, 0.0333051, -0.012229, 0.0110322,
- -0.0457725, -0.000832209, -0.0202817, 0.0327257, 0.0121308,
- 0.0155969, 0.0312091, -0.0213783, 0.0350169, 0.000324794,
- 0.0276012, -0.0263374, -0.0371449, 0.0446149, -0.0205474,
- 0.0103729, -0.0576349, -0.0150052, -0.0292043, 0.0376827,
- 0.0136115, 0.0243435, 0.0354492, -0.0189322, 0.0464512,
- -0.00251373, 0.0225745, -0.0308346, -0.0317124, 0.0460407,
- -0.0189395, 0.0149363, -0.0530162, -0.0150767, -0.0340193,
- 0.0286833, 0.00824207, 0.0264887, 0.0305169},
- {// Batch1: 4 (input_sequence_size) * 16 (n_output)
- -0.013869, 0.0287268, -0.00334693, 0.00733398, -0.0287926,
- -0.0186926, 0.0193662, -0.0115437, 0.00422612, -0.0345232,
- 0.00223253, -0.00957321, 0.0210624, 0.013331, 0.0150954,
- 0.02168, -0.0141913, 0.0322082, 0.00227024, 0.0260507,
- -0.0188721, -0.0296489, 0.0399134, -0.0160509, 0.0116039,
- -0.0447318, -0.0150515, -0.0277406, 0.0316596, 0.0118233,
- 0.0214762, 0.0293641, -0.0204549, 0.0450315, -0.00117378,
- 0.0167673, -0.0375007, -0.0238314, 0.038784, -0.0174034,
- 0.0131743, -0.0506589, -0.0048447, -0.0240239, 0.0325789,
- 0.00790065, 0.0220157, 0.0333314, -0.0264787, 0.0387855,
- -0.000764675, 0.0217599, -0.037537, -0.0335206, 0.0431679,
- -0.0211424, 0.010203, -0.062785, -0.00832363, -0.025181,
- 0.0412031, 0.0118723, 0.0239643, 0.0394009}};
+ lstm.SetInputToInputWeights(input_to_input_weights_);
+ lstm.SetInputToCellWeights(input_to_cell_weights_);
+ lstm.SetInputToForgetWeights(input_to_forget_weights_);
+ lstm.SetInputToOutputWeights(input_to_output_weights_);
+
+ lstm.SetInputGateBias(input_gate_bias_);
+ lstm.SetCellBias(cell_gate_bias_);
+ lstm.SetForgetGateBias(forget_gate_bias_);
+ lstm.SetOutputGateBias(output_gate_bias_);
+
+ lstm.SetRecurrentToInputWeights(recurrent_to_input_weights_);
+ lstm.SetRecurrentToCellWeights(recurrent_to_cell_weights_);
+ lstm.SetRecurrentToForgetWeights(recurrent_to_forget_weights_);
+ lstm.SetRecurrentToOutputWeights(recurrent_to_output_weights_);
+
+ lstm.SetCellToInputWeights(cell_to_input_weights_);
+ lstm.SetCellToForgetWeights(cell_to_forget_weights_);
+ lstm.SetCellToOutputWeights(cell_to_output_weights_);
+
+ lstm.SetProjectionWeights(projection_weights_);
// Resetting cell_state and output_state
lstm.ResetCellState();
lstm.ResetOutputState();
- for (int i = 0; i < lstm.sequence_length(); i++) {
- float* batch0_start = lstm_input[0] + i * lstm.num_inputs();
- float* batch0_end = batch0_start + lstm.num_inputs();
+ VerifyGoldens(lstm_input_, lstm_golden_output_, &lstm);
+}
- lstm.SetInput(2 * i * lstm.num_inputs(), batch0_start, batch0_end);
+TEST_F(NoCifgPeepholeProjectionClippingLstmTest, HybridLstmBlackBoxTest) {
+ const int n_batch = 2;
+ const int n_input = 5;
+ const int n_cell = 20;
+ const int n_output = 16;
+ const int sequence_length = 4;
- float* batch1_start = lstm_input[1] + i * lstm.num_inputs();
- float* batch1_end = batch1_start + lstm.num_inputs();
- lstm.SetInput((2 * i + 1) * lstm.num_inputs(), batch1_start, batch1_end);
- }
+ HybridUnidirectionalLSTMOpModel lstm(
+ n_batch, n_input, n_cell, n_output, sequence_length,
+ /*use_cifg=*/false, /*use_peephole=*/true,
+ /*use_projection_weights=*/true,
+ /*use_projection_bias=*/false,
+ /*cell_clip=*/0.0, /*proj_clip=*/0.0,
+ {
+ {sequence_length, n_batch, n_input}, // input tensor
- lstm.Invoke();
+ {n_cell, n_input}, // input_to_input_weight tensor
+ {n_cell, n_input}, // input_to_forget_weight tensor
+ {n_cell, n_input}, // input_to_cell_weight tensor
+ {n_cell, n_input}, // input_to_output_weight tensor
- std::vector<float> expected;
- for (int i = 0; i < lstm.sequence_length(); i++) {
- float* golden_start_batch0 = lstm_golden_output[0] + i * lstm.num_outputs();
- float* golden_end_batch0 = golden_start_batch0 + lstm.num_outputs();
- float* golden_start_batch1 = lstm_golden_output[1] + i * lstm.num_outputs();
- float* golden_end_batch1 = golden_start_batch1 + lstm.num_outputs();
- expected.insert(expected.end(), golden_start_batch0, golden_end_batch0);
- expected.insert(expected.end(), golden_start_batch1, golden_end_batch1);
- }
- EXPECT_THAT(lstm.GetOutput(), ElementsAreArray(ArrayFloatNear(expected)));
+ {n_cell, n_output}, // recurrent_to_input_weight tensor
+ {n_cell, n_output}, // recurrent_to_forget_weight tensor
+ {n_cell, n_output}, // recurrent_to_cell_weight tensor
+ {n_cell, n_output}, // recurrent_to_output_weight tensor
+
+ {n_cell}, // cell_to_input_weight tensor
+ {n_cell}, // cell_to_forget_weight tensor
+ {n_cell}, // cell_to_output_weight tensor
+
+ {n_cell}, // input_gate_bias tensor
+ {n_cell}, // forget_gate_bias tensor
+ {n_cell}, // cell_bias tensor
+ {n_cell}, // output_gate_bias tensor
+
+ {n_output, n_cell}, // projection_weight tensor
+ {0}, // projection_bias tensor
+ });
+
+ lstm.SetInputToInputWeights(input_to_input_weights_);
+ lstm.SetInputToCellWeights(input_to_cell_weights_);
+ lstm.SetInputToForgetWeights(input_to_forget_weights_);
+ lstm.SetInputToOutputWeights(input_to_output_weights_);
+
+ lstm.SetInputGateBias(input_gate_bias_);
+ lstm.SetCellBias(cell_gate_bias_);
+ lstm.SetForgetGateBias(forget_gate_bias_);
+ lstm.SetOutputGateBias(output_gate_bias_);
+
+ lstm.SetRecurrentToInputWeights(recurrent_to_input_weights_);
+ lstm.SetRecurrentToCellWeights(recurrent_to_cell_weights_);
+ lstm.SetRecurrentToForgetWeights(recurrent_to_forget_weights_);
+ lstm.SetRecurrentToOutputWeights(recurrent_to_output_weights_);
+
+ lstm.SetCellToInputWeights(cell_to_input_weights_);
+ lstm.SetCellToForgetWeights(cell_to_forget_weights_);
+ lstm.SetCellToOutputWeights(cell_to_output_weights_);
+
+ lstm.SetProjectionWeights(projection_weights_);
+
+ // Resetting cell_state and output_state
+ lstm.ResetCellState();
+ lstm.ResetOutputState();
+
+ VerifyGoldens(lstm_input_, lstm_golden_output_, &lstm, /*tolerance=*/0.00467);
}
} // namespace
diff --git a/tensorflow/contrib/lite/model.cc b/tensorflow/contrib/lite/model.cc
index e1ec2d6d57..6c1ba3694a 100644
--- a/tensorflow/contrib/lite/model.cc
+++ b/tensorflow/contrib/lite/model.cc
@@ -63,6 +63,9 @@ TfLiteStatus ConvertTensorType(TensorType tensor_type, TfLiteType* type,
case TensorType_BOOL:
*type = kTfLiteBool;
break;
+ case TensorType_COMPLEX64:
+ *type = kTfLiteComplex64;
+ break;
default:
error_reporter->Report("Unimplemented data type %s (%d) in tensor\n",
EnumNameTensorType(tensor_type), tensor_type);
@@ -183,6 +186,8 @@ InterpreterBuilder::InterpreterBuilder(const ::tflite::Model* model,
op_resolver_(op_resolver),
error_reporter_(ValidateErrorReporter(error_reporter)) {}
+InterpreterBuilder::~InterpreterBuilder() {}
+
TfLiteStatus InterpreterBuilder::BuildLocalIndexToRegistrationMapping() {
TfLiteStatus status = kTfLiteOk;
auto opcodes = model_->operator_codes();
@@ -201,8 +206,9 @@ TfLiteStatus InterpreterBuilder::BuildLocalIndexToRegistrationMapping() {
} else if (builtin_code != BuiltinOperator_CUSTOM) {
registration = op_resolver_.FindOp(builtin_code, version);
if (registration == nullptr) {
- error_reporter_->Report("Didn't find op for builtin opcode '%s'\n",
- EnumNameBuiltinOperator(builtin_code));
+ error_reporter_->Report(
+ "Didn't find op for builtin opcode '%s' version '%d'\n",
+ EnumNameBuiltinOperator(builtin_code), version);
status = kTfLiteError;
}
} else if (!opcode->custom_code()) {
@@ -444,6 +450,18 @@ TfLiteStatus ParseOpData(const Operator* op, BuiltinOperator op_type,
op->builtin_options_as_FullyConnectedOptions()) {
params->activation = parse_activation(
fully_connected_params->fused_activation_function());
+ switch (fully_connected_params->weights_format()) {
+ case FullyConnectedOptionsWeightsFormat_DEFAULT:
+ params->weights_format = kTfLiteFullyConnectedWeightsFormatDefault;
+ break;
+ case FullyConnectedOptionsWeightsFormat_SHUFFLED4x16INT8:
+ params->weights_format =
+ kTfLiteFullyConnectedWeightsFormatShuffled4x16Int8;
+ break;
+ default:
+ error_reporter->Report("Unhandled fully-connected weights format.");
+ return kTfLiteError;
+ }
}
*builtin_data = reinterpret_cast<void*>(params);
break;
@@ -598,6 +616,8 @@ TfLiteStatus ParseOpData(const Operator* op, BuiltinOperator op_type,
break;
}
case BuiltinOperator_MEAN:
+ case BuiltinOperator_REDUCE_MAX:
+ case BuiltinOperator_REDUCE_PROD:
case BuiltinOperator_SUM: {
auto* params = MallocPOD<TfLiteReducerParams>();
if (auto* schema_params = op->builtin_options_as_ReducerOptions()) {
@@ -646,6 +666,15 @@ TfLiteStatus ParseOpData(const Operator* op, BuiltinOperator op_type,
*builtin_data = reinterpret_cast<void*>(params);
break;
}
+ case BuiltinOperator_ARG_MIN: {
+ auto* params = MallocPOD<TfLiteArgMinParams>();
+ if (const auto* schema_params = op->builtin_options_as_ArgMinOptions()) {
+ ConvertTensorType(schema_params->output_type(), &params->output_type,
+ error_reporter);
+ }
+ *builtin_data = reinterpret_cast<void*>(params);
+ break;
+ }
case BuiltinOperator_TRANSPOSE_CONV: {
TfLiteTransposeConvParams* params =
MallocPOD<TfLiteTransposeConvParams>();
@@ -682,6 +711,17 @@ TfLiteStatus ParseOpData(const Operator* op, BuiltinOperator op_type,
error_reporter->Report("DELEGATE op shouldn't exist in model.");
return kTfLiteError;
}
+ case BuiltinOperator_FAKE_QUANT: {
+ auto* params = MallocPOD<TfLiteFakeQuantParams>();
+ if (auto* schema_params = op->builtin_options_as_FakeQuantOptions()) {
+ params->min = schema_params->min();
+ params->max = schema_params->max();
+ params->num_bits = schema_params->num_bits();
+ params->narrow_range = schema_params->narrow_range();
+ }
+ *builtin_data = static_cast<void*>(params);
+ break;
+ }
// Below are the ops with no builtin_data strcture.
case BuiltinOperator_BATCH_TO_SPACE_ND:
@@ -723,6 +763,7 @@ TfLiteStatus ParseOpData(const Operator* op, BuiltinOperator op_type,
case BuiltinOperator_TILE:
case BuiltinOperator_TOPK_V2:
case BuiltinOperator_TRANSPOSE:
+ case BuiltinOperator_POW:
break;
}
return kTfLiteOk;
@@ -745,7 +786,7 @@ TfLiteStatus InterpreterBuilder::ParseNodes(
}
const TfLiteRegistration* registration =
- flatbuffer_op_index_to_registration_[op->opcode_index()];
+ flatbuffer_op_index_to_registration_[index];
if (registration == nullptr) {
error_reporter_->Report("Skipping op for opcode_index %d\n", index);
status = kTfLiteError;
@@ -975,7 +1016,7 @@ TfLiteStatus InterpreterBuilder::operator()(
variables.push_back(i);
}
}
- (**interpreter).SetVariables(variables);
+ (**interpreter).SetVariables(std::move(variables));
return kTfLiteOk;
}
diff --git a/tensorflow/contrib/lite/model.h b/tensorflow/contrib/lite/model.h
index 3946b49041..8bc9ecd7ce 100644
--- a/tensorflow/contrib/lite/model.h
+++ b/tensorflow/contrib/lite/model.h
@@ -156,6 +156,7 @@ class InterpreterBuilder {
InterpreterBuilder(const ::tflite::Model* model,
const OpResolver& op_resolver,
ErrorReporter* error_reporter = DefaultErrorReporter());
+ ~InterpreterBuilder();
InterpreterBuilder(const InterpreterBuilder&) = delete;
InterpreterBuilder& operator=(const InterpreterBuilder&) = delete;
TfLiteStatus operator()(std::unique_ptr<Interpreter>* interpreter);
diff --git a/tensorflow/contrib/lite/nnapi_delegate.cc b/tensorflow/contrib/lite/nnapi_delegate.cc
index ab007993af..5950840e8a 100644
--- a/tensorflow/contrib/lite/nnapi_delegate.cc
+++ b/tensorflow/contrib/lite/nnapi_delegate.cc
@@ -29,27 +29,46 @@ limitations under the License.
namespace tflite {
-// TODO(aselle): FATAL leaves resources hanging.
-void FATAL(const char* format, ...) {
+void logError(const char* format, ...) {
+ // TODO(mikie): use android logging, stderr is not captured for Java
+ // applications
va_list args;
va_start(args, format);
vfprintf(stderr, format, args);
va_end(args);
+ fprintf(stderr, "\n");
fflush(stderr);
- exit(1);
}
+#define FATAL(...) \
+ logError(__VA_ARGS__); \
+ exit(1);
+
// TODO(aselle): Change the error model to use status codes.
-#define CHECK_TFLITE_SUCCESS(x) \
- if (x != kTfLiteOk) { \
- FATAL("Aborting since tflite returned failure."); \
+#define CHECK_TFLITE_SUCCESS(x) \
+ if (x != kTfLiteOk) { \
+ FATAL("Aborting since tflite returned failure nnapi_delegate.cc:%d.", \
+ __LINE__); \
}
-#define CHECK_NN(x) \
- if (x != ANEURALNETWORKS_NO_ERROR) { \
- FATAL("Aborting since tflite returned failure."); \
+#define CHECK_NN(x) \
+ if (x != ANEURALNETWORKS_NO_ERROR) { \
+ FATAL("Aborting since NNAPI returned failure nnapi_delegate.cc:%d", \
+ __LINE__); \
}
+#define RETURN_ERROR_IF_NN_FAILED(x) \
+ if (x != ANEURALNETWORKS_NO_ERROR) { \
+ logError( \
+ "Returning error since NNAPI returned failure nnapi_delegate.cc:%d.", \
+ __LINE__); \
+ return kTfLiteError; \
+ }
+
+// Tracking of NNAPI operand ids
+static const int64_t kOperandIdNotSet = -1;
+static const int64_t kOperandNotNeeded = -2;
+
namespace {
int32_t GetAndroidSdkVersion() {
@@ -104,21 +123,16 @@ NNAPIDelegate::~NNAPIDelegate() {
}
// Adds the tensors of the interpreter to the NN API model.
-// Returns the number of operands added.
-uint32_t addTensorOperands(tflite::Interpreter* interpreter,
- ANeuralNetworksModel* nn_model,
- const std::vector<uint32_t>& skip_list) {
+TfLiteStatus addTensorOperands(tflite::Interpreter* interpreter,
+ ANeuralNetworksModel* nn_model,
+ uint32_t* no_of_operands_added,
+ std::vector<int64_t>* nnapi_ids) {
uint32_t next_id = 0;
for (size_t i = 0; i < interpreter->tensors_size(); i++) {
- // skip temporaries tensors.
- bool shouldSkip = false;
- for (auto skip_idx : skip_list) {
- if (i == skip_idx) {
- shouldSkip = true;
- break;
- }
- }
- if (shouldSkip) continue;
+ // Skip temporaries and RNN back-edges.
+ if ((*nnapi_ids)[i] == kOperandNotNeeded) continue;
+
+ (*nnapi_ids)[i] = int64_t(next_id);
int32_t nn_type = 0;
// NNAPI requires 32-bit float scale to be zero, tflite doesn't care
@@ -144,7 +158,18 @@ uint32_t addTensorOperands(tflite::Interpreter* interpreter,
zeroPoint = tensor->params.zero_point;
break;
default:
- FATAL("Unsupported type.");
+ logError("Unsupported tensor type %d", tensor->type);
+ return kTfLiteError;
+ }
+ if (tensor->dims->size == 0) {
+ logError("NNAPI doesn't support tensors with rank 0 (index %d name %s)",
+ i, tensor->name);
+ return kTfLiteError;
+ }
+ if (tensor->dims->size > 4) {
+ logError("NNAPI doesn't support tensors with rank > 4 (index %d name %s)",
+ i, tensor->name);
+ return kTfLiteError;
}
// TODO(aselle): Note, many of these are intermediate results. Do I need
// to ever specify these sizes. I am currently below doing setValue
@@ -154,36 +179,53 @@ uint32_t addTensorOperands(tflite::Interpreter* interpreter,
ANeuralNetworksOperandType operand_type{
nn_type, static_cast<uint32_t>(tensor->dims->size),
reinterpret_cast<uint32_t*>(tensor->dims->data), scale, zeroPoint};
- CHECK_NN(ANeuralNetworksModel_addOperand(nn_model, &operand_type));
+ RETURN_ERROR_IF_NN_FAILED(
+ ANeuralNetworksModel_addOperand(nn_model, &operand_type));
// TODO(aselle): Based on Michael's suggestion, limiting this to read
// only memory
if (tensor->allocation_type == kTfLiteMmapRo) {
if (const NNAPIAllocation* alloc = dynamic_cast<const NNAPIAllocation*>(
static_cast<const Allocation*>(tensor->allocation))) {
- CHECK_NN(ANeuralNetworksModel_setOperandValueFromMemory(
- nn_model, next_id, alloc->memory(), alloc->offset(tensor->data.raw),
- tensor->bytes));
+ RETURN_ERROR_IF_NN_FAILED(
+ ANeuralNetworksModel_setOperandValueFromMemory(
+ nn_model, next_id, alloc->memory(),
+ alloc->offset(tensor->data.raw), tensor->bytes));
} else {
- CHECK_NN(ANeuralNetworksModel_setOperandValue(
+ RETURN_ERROR_IF_NN_FAILED(ANeuralNetworksModel_setOperandValue(
nn_model, next_id, tensor->data.raw, tensor->bytes));
}
} else if (tensor->bytes == 0) {
// These size 0 tensors are optional tensors reserved.
- CHECK_NN(
+ RETURN_ERROR_IF_NN_FAILED(
ANeuralNetworksModel_setOperandValue(nn_model, next_id, nullptr, 0));
}
++next_id;
}
- return next_id;
+ *no_of_operands_added = next_id;
+ return kTfLiteOk;
+}
+
+void MapAndAddTensorIds(const int* from_ids_buf, size_t from_ids_count,
+ std::vector<uint32_t>* into,
+ const std::vector<int64_t>& map) {
+ for (size_t i = 0; i < from_ids_count; i++) {
+ int from_id = from_ids_buf[i];
+ if (from_id == kOptionalTensor) {
+ into->push_back(from_id);
+ } else {
+ into->push_back(map[from_id]);
+ }
+ }
}
// Adds the operations and their parameters to the NN API model.
// 'next-id' is the operand ID of the next operand of the model.
-void AddOpsAndParams(tflite::Interpreter* interpreter,
- ANeuralNetworksModel* nn_model, uint32_t next_id,
- std::vector<int>* model_state_inputs,
- std::vector<int>* model_state_outputs) {
+TfLiteStatus AddOpsAndParams(
+ tflite::Interpreter* interpreter, ANeuralNetworksModel* nn_model,
+ uint32_t next_id, std::vector<int>* model_state_inputs,
+ std::vector<int>* model_state_outputs,
+ const std::vector<int64_t>& tensor_id_to_nnapi_id) {
for (size_t i = 0; i < interpreter->nodes_size(); i++) {
const auto* node_and_registration = interpreter->node_and_registration(i);
const TfLiteNode& node = node_and_registration->first;
@@ -192,10 +234,11 @@ void AddOpsAndParams(tflite::Interpreter* interpreter,
static_cast<tflite::BuiltinOperator>(registration.builtin_code);
// Add the parameters.
- std::vector<uint32_t> augmented_inputs(
- node.inputs->data, node.inputs->data + node.inputs->size);
- std::vector<uint32_t> augmented_outputs(
- node.outputs->data, node.outputs->data + node.outputs->size);
+ std::vector<uint32_t> augmented_inputs, augmented_outputs;
+ MapAndAddTensorIds(node.inputs->data, node.inputs->size, &augmented_inputs,
+ tensor_id_to_nnapi_id);
+ MapAndAddTensorIds(node.outputs->data, node.outputs->size,
+ &augmented_outputs, tensor_id_to_nnapi_id);
auto add_scalar_int32 = [&nn_model, &augmented_inputs,
&next_id](int value) {
@@ -215,6 +258,17 @@ void AddOpsAndParams(tflite::Interpreter* interpreter,
augmented_inputs.push_back(next_id++);
};
+ auto add_vector_int32 = [&](const int* values, uint32_t num_values) {
+ ANeuralNetworksOperandType operand_type{
+ .type = ANEURALNETWORKS_TENSOR_INT32,
+ .dimensionCount = 1,
+ .dimensions = &num_values};
+ CHECK_NN(ANeuralNetworksModel_addOperand(nn_model, &operand_type))
+ CHECK_NN(ANeuralNetworksModel_setOperandValue(
+ nn_model, next_id, values, sizeof(int32_t) * num_values));
+ augmented_inputs.push_back(next_id++);
+ };
+
// Handle state tensors of RNN, LSTM, SVDF.
// For each state_out tensor, a corresponding state_in operand needs to be
// created for NNAPI.
@@ -233,42 +287,54 @@ void AddOpsAndParams(tflite::Interpreter* interpreter,
model_state_outputs->push_back(tensor_id);
next_id++;
};
+ auto check_and_add_activation = [&add_scalar_int32](int activation) {
+ if (activation > kTfLiteActRelu6) {
+ FATAL("NNAPI only supports RELU, RELU1 and RELU6 activations");
+ }
+ add_scalar_int32(activation);
+ };
auto add_add_params = [&add_scalar_int32](void* data) {
auto* builtin = reinterpret_cast<TfLiteAddParams*>(data);
+ if (builtin->activation > kTfLiteActRelu6) {
+ FATAL("NNAPI only supports RELU, RELU1 and RELU6 activations");
+ }
add_scalar_int32(builtin->activation);
};
- auto add_pooling_params = [&add_scalar_int32](void* data) {
+ auto add_pooling_params = [&add_scalar_int32,
+ &check_and_add_activation](void* data) {
auto builtin = reinterpret_cast<TfLitePoolParams*>(data);
add_scalar_int32(builtin->padding);
add_scalar_int32(builtin->stride_width);
add_scalar_int32(builtin->stride_height);
add_scalar_int32(builtin->filter_width);
add_scalar_int32(builtin->filter_height);
- add_scalar_int32(builtin->activation);
+ check_and_add_activation(builtin->activation);
};
- auto add_convolution_params = [&add_scalar_int32](void* data) {
+ auto add_convolution_params = [&add_scalar_int32,
+ &check_and_add_activation](void* data) {
auto builtin = reinterpret_cast<TfLiteConvParams*>(data);
add_scalar_int32(builtin->padding);
add_scalar_int32(builtin->stride_width);
add_scalar_int32(builtin->stride_height);
- add_scalar_int32(builtin->activation);
+ check_and_add_activation(builtin->activation);
};
- auto add_depthwise_conv_params = [&add_scalar_int32](void* data) {
+ auto add_depthwise_conv_params = [&add_scalar_int32,
+ &check_and_add_activation](void* data) {
auto builtin = reinterpret_cast<TfLiteDepthwiseConvParams*>(data);
add_scalar_int32(builtin->padding);
add_scalar_int32(builtin->stride_width);
add_scalar_int32(builtin->stride_height);
add_scalar_int32(builtin->depth_multiplier);
- add_scalar_int32(builtin->activation);
+ check_and_add_activation(builtin->activation);
};
- auto add_fully_connected_params = [&add_scalar_int32](void* data) {
+ auto add_fully_connected_params = [&check_and_add_activation](void* data) {
auto builtin = reinterpret_cast<TfLiteFullyConnectedParams*>(data);
- add_scalar_int32(builtin->activation);
+ check_and_add_activation(builtin->activation);
};
auto add_concatenation_params = [&add_scalar_int32](void* data) {
@@ -300,6 +366,7 @@ void AddOpsAndParams(tflite::Interpreter* interpreter,
// LSTM in NNAPI requires scratch tensor as an output operand.
auto add_lstm_scratch_tensor_float32 = [interpreter, &node, &nn_model,
&next_id, &augmented_outputs]() {
+ if (node.temporaries->size == 0) return;
int scratch_buffer_index = node.temporaries->data[0];
const TfLiteTensor* tensor = interpreter->tensor(scratch_buffer_index);
ANeuralNetworksOperandType operand_type{
@@ -327,6 +394,14 @@ void AddOpsAndParams(tflite::Interpreter* interpreter,
add_scalar_int32(builtin->activation);
};
+ auto add_squeeze_params = [&](void* data) {
+ const auto* builtin = reinterpret_cast<TfLiteSqueezeParams*>(data);
+ // Note that we add the squeeze dimensions even if the dimensions were
+ // unspecified (empty), as NNAPI requires the operand.
+ add_vector_int32(builtin->squeeze_dims,
+ static_cast<uint32_t>(builtin->num_squeeze_dims));
+ };
+
// Handle optional input tensors.
auto add_optional_tensors = [&nn_model, &augmented_inputs,
&next_id](int nn_type) {
@@ -366,7 +441,14 @@ void AddOpsAndParams(tflite::Interpreter* interpreter,
add_pooling_params(node.builtin_data);
nn_op_type = ANEURALNETWORKS_L2_POOL_2D;
break;
- case tflite::BuiltinOperator_CONV_2D:
+ case tflite::BuiltinOperator_CONV_2D: {
+ auto builtin = reinterpret_cast<TfLiteConvParams*>(node.builtin_data);
+ if (builtin->dilation_width_factor != 1 ||
+ builtin->dilation_height_factor != 1 || node.inputs->size != 3) {
+ logError("NNAPI does not support dilated Conv2D.");
+ return kTfLiteError;
+ }
+ }
add_convolution_params(node.builtin_data);
nn_op_type = ANEURALNETWORKS_CONV_2D;
break;
@@ -410,6 +492,10 @@ void AddOpsAndParams(tflite::Interpreter* interpreter,
nn_op_type = ANEURALNETWORKS_SPACE_TO_DEPTH;
break;
case tflite::BuiltinOperator_LSTM: {
+ if (node.inputs->size + /* no of params */ 3 != 21) {
+ logError("NNAPI only supports 21-input LSTMs");
+ return kTfLiteError;
+ }
duplicate_state_tensor_float32(
node.outputs->data[/*kOutputStateTensor*/ 0]);
duplicate_state_tensor_float32(
@@ -448,10 +534,31 @@ void AddOpsAndParams(tflite::Interpreter* interpreter,
case tflite::BuiltinOperator_DIV:
nnapi_version = 11; // require NNAPI 1.1
nn_op_type = ANEURALNETWORKS_DIV;
+ check_and_add_activation(
+ reinterpret_cast<TfLiteDivParams*>(node.builtin_data)->activation);
break;
case tflite::BuiltinOperator_SUB:
nnapi_version = 11; // require NNAPI 1.1
nn_op_type = ANEURALNETWORKS_SUB;
+ check_and_add_activation(
+ reinterpret_cast<TfLiteSubParams*>(node.builtin_data)->activation);
+ break;
+ case tflite::BuiltinOperator_SQUEEZE:
+ nnapi_version = 11; // requires NNAPI 1.1
+ add_squeeze_params(node.builtin_data);
+ nn_op_type = ANEURALNETWORKS_SQUEEZE;
+ break;
+ case tflite::BuiltinOperator_TRANSPOSE:
+ // The permutation input tensor value dictates the output dimensions.
+ // TODO(b/110888333): Support dynamically-sized tensors in delegates.
+ if ((node.inputs->size > 1) &&
+ (interpreter->tensor(node.inputs->data[1])->allocation_type !=
+ kTfLiteMmapRo)) {
+ logError("NNAPI does not yet support dynamic tensors.");
+ return kTfLiteError;
+ }
+ nnapi_version = 11; // require NNAPI 1.1
+ nn_op_type = ANEURALNETWORKS_TRANSPOSE;
break;
case tflite::BuiltinOperator_CONCAT_EMBEDDINGS:
case tflite::BuiltinOperator_LSH_PROJECTION:
@@ -472,9 +579,7 @@ void AddOpsAndParams(tflite::Interpreter* interpreter,
case tflite::BuiltinOperator_SPACE_TO_BATCH_ND:
case tflite::BuiltinOperator_BATCH_TO_SPACE_ND:
case tflite::BuiltinOperator_TOPK_V2:
- case tflite::BuiltinOperator_TRANSPOSE:
case tflite::BuiltinOperator_SPLIT:
- case tflite::BuiltinOperator_SQUEEZE:
case tflite::BuiltinOperator_STRIDED_SLICE:
case tflite::BuiltinOperator_EXP:
case tflite::BuiltinOperator_LOG_SOFTMAX:
@@ -485,6 +590,7 @@ void AddOpsAndParams(tflite::Interpreter* interpreter,
case tflite::BuiltinOperator_MAXIMUM:
case tflite::BuiltinOperator_MINIMUM:
case tflite::BuiltinOperator_ARG_MAX:
+ case tflite::BuiltinOperator_ARG_MIN:
case tflite::BuiltinOperator_GREATER:
case tflite::BuiltinOperator_GREATER_EQUAL:
case tflite::BuiltinOperator_LESS:
@@ -501,15 +607,19 @@ void AddOpsAndParams(tflite::Interpreter* interpreter,
case tflite::BuiltinOperator_EQUAL:
case tflite::BuiltinOperator_NOT_EQUAL:
case tflite::BuiltinOperator_SUM:
+ case tflite::BuiltinOperator_REDUCE_MAX:
+ case tflite::BuiltinOperator_REDUCE_PROD:
case tflite::BuiltinOperator_SQRT:
case tflite::BuiltinOperator_RSQRT:
case tflite::BuiltinOperator_SHAPE:
- FATAL("Op code %d is currently not delegated to NNAPI", builtin);
- nn_op_type = -1; // set to invalid
+ case tflite::BuiltinOperator_POW:
+ case tflite::BuiltinOperator_FAKE_QUANT:
+ logError("Op code %d is currently not delegated to NNAPI", builtin);
+ return kTfLiteError;
break;
case tflite::BuiltinOperator_CUSTOM:
- FATAL("Custom operations are not supported when using NNAPI.");
- nn_op_type = -1; // set to invalid
+ logError("Custom operations are not supported when using NNAPI.");
+ return kTfLiteError;
break;
}
@@ -518,47 +628,70 @@ void AddOpsAndParams(tflite::Interpreter* interpreter,
}
// Add the operation.
- CHECK_NN(ANeuralNetworksModel_addOperation(
+ RETURN_ERROR_IF_NN_FAILED(ANeuralNetworksModel_addOperation(
nn_model, nn_op_type, static_cast<uint32_t>(augmented_inputs.size()),
augmented_inputs.data(),
static_cast<uint32_t>(augmented_outputs.size()),
reinterpret_cast<uint32_t*>(augmented_outputs.data())));
}
+ return kTfLiteOk;
}
TfLiteStatus NNAPIDelegate::BuildGraph(Interpreter* interpreter) {
- // TODO(aselle): This is not correct. need to handle resize invalidation.
- if (nn_model_ && nn_compiled_model_) return kTfLiteOk;
+ if (nn_model_ && nn_compiled_model_) return model_status_;
+ // TODO(aselle): This is not correct. need to handle resize invalidation.
if (!nn_model_) {
CHECK_NN(ANeuralNetworksModel_create(&nn_model_));
- // Find all the temporary tensors and put them in a skip_list.
- std::vector<uint32_t> skip_list;
+ // Find which tensors should be added to NNAPI. TFLite has temporaries
+ // and RNN back-edges which are are not valid for NNAPI. We look through all
+ // inputs and outputs and mark the mapping in tensor_id_to_nnapi_id with
+ // kOperandIdNotSet. addTensorOperands will replace those with the
+ // corresponding NNAPI operand ids and skip kOperandNotNeeded entries.
+ std::vector<int64_t> tensor_id_to_nnapi_id(interpreter->tensors_size(),
+ kOperandNotNeeded);
+ auto set_ids_to_not_set = [&tensor_id_to_nnapi_id](const int* buf,
+ size_t count) {
+ for (int j = 0; j < count; j++) {
+ auto tensor_id = buf[j];
+ if (tensor_id != kOptionalTensor) {
+ tensor_id_to_nnapi_id[tensor_id] = kOperandIdNotSet;
+ }
+ }
+ };
for (size_t i = 0; i < interpreter->nodes_size(); i++) {
const auto* node_and_registration = interpreter->node_and_registration(i);
const TfLiteNode& node = node_and_registration->first;
- if (node.temporaries != nullptr) {
- for (int j = 0; j < node.temporaries->size; j++) {
- skip_list.push_back(static_cast<uint32_t>(node.temporaries->data[j]));
- }
- }
+ set_ids_to_not_set(node.inputs->data, node.inputs->size);
+ set_ids_to_not_set(node.outputs->data, node.outputs->size);
}
-
- uint32_t next_id = addTensorOperands(interpreter, nn_model_, skip_list);
- AddOpsAndParams(interpreter, nn_model_, next_id, &model_states_inputs_,
- &model_states_outputs_);
-
- std::vector<int> augmented_inputs = interpreter->inputs();
- std::vector<int> augmented_outputs = interpreter->outputs();
-
- // All state tensors input/output need to be treated as model input/output.
+ set_ids_to_not_set(interpreter->inputs().data(),
+ interpreter->inputs().size());
+ set_ids_to_not_set(interpreter->outputs().data(),
+ interpreter->outputs().size());
+
+ uint32_t next_id = 0;
+ RETURN_ERROR_IF_NN_FAILED(addTensorOperands(
+ interpreter, nn_model_, &next_id, &tensor_id_to_nnapi_id));
+ RETURN_ERROR_IF_NN_FAILED(
+ AddOpsAndParams(interpreter, nn_model_, next_id, &model_states_inputs_,
+ &model_states_outputs_, tensor_id_to_nnapi_id));
+
+ std::vector<uint32_t> augmented_inputs;
+ MapAndAddTensorIds(interpreter->inputs().data(),
+ interpreter->inputs().size(), &augmented_inputs,
+ tensor_id_to_nnapi_id);
augmented_inputs.insert(augmented_inputs.end(),
model_states_inputs_.begin(),
model_states_inputs_.end());
- augmented_outputs.insert(augmented_outputs.end(),
- model_states_outputs_.begin(),
- model_states_outputs_.end());
+ std::vector<uint32_t> augmented_outputs;
+ MapAndAddTensorIds(interpreter->outputs().data(),
+ interpreter->outputs().size(), &augmented_outputs,
+ tensor_id_to_nnapi_id);
+ MapAndAddTensorIds(model_states_outputs_.data(),
+ model_states_outputs_.size(), &augmented_outputs,
+ tensor_id_to_nnapi_id);
CHECK_NN(ANeuralNetworksModel_identifyInputsAndOutputs(
nn_model_, static_cast<uint32_t>(augmented_inputs.size()),
@@ -576,7 +709,13 @@ TfLiteStatus NNAPIDelegate::BuildGraph(Interpreter* interpreter) {
TfLiteStatus NNAPIDelegate::Invoke(Interpreter* interpreter) {
if (!nn_model_) {
- TF_LITE_ENSURE_STATUS(BuildGraph(interpreter));
+ model_status_ = BuildGraph(interpreter);
+ if (model_status_ != kTfLiteOk) {
+ logError("Failed to build graph for NNAPI");
+ }
+ }
+ if (model_status_ != kTfLiteOk) {
+ return model_status_;
}
ANeuralNetworksExecution* execution = nullptr;
diff --git a/tensorflow/contrib/lite/nnapi_delegate.h b/tensorflow/contrib/lite/nnapi_delegate.h
index 94dea4f9b2..8dc7d38a30 100644
--- a/tensorflow/contrib/lite/nnapi_delegate.h
+++ b/tensorflow/contrib/lite/nnapi_delegate.h
@@ -59,14 +59,16 @@ class NNAPIDelegate {
ANeuralNetworksModel* nn_model_ = nullptr;
// The NN API compilation handle
ANeuralNetworksCompilation* nn_compiled_model_ = nullptr;
+ // Model status
+ TfLiteStatus model_status_ = kTfLiteOk;
// List of state tensors for LSTM, RNN, SVDF.
// NN API does not allow ops to maintain states across multiple
// invocations. We need to manually create state input tensors from
// corresponding state output tensors of TFLite operations, and map them
// correctly.
- std::vector<int> model_states_inputs_;
- std::vector<int> model_states_outputs_;
+ std::vector<int> model_states_inputs_; // holds NNAPI operand ids
+ std::vector<int> model_states_outputs_; // holds TFLite tensor ids
};
} // namespace tflite
diff --git a/tensorflow/contrib/lite/optional_debug_tools.cc b/tensorflow/contrib/lite/optional_debug_tools.cc
index 99c35b9caf..f1f025f777 100644
--- a/tensorflow/contrib/lite/optional_debug_tools.cc
+++ b/tensorflow/contrib/lite/optional_debug_tools.cc
@@ -52,6 +52,8 @@ const char* TensorTypeName(TfLiteType type) {
return "kTfLiteBool";
case kTfLiteInt16:
return "kTfLiteInt16";
+ case kTfLiteComplex64:
+ return "kTfLiteComplex64";
}
return "(invalid)";
}
diff --git a/tensorflow/contrib/lite/profiling/profile_summarizer.cc b/tensorflow/contrib/lite/profiling/profile_summarizer.cc
index c37a096588..36e87b666a 100644
--- a/tensorflow/contrib/lite/profiling/profile_summarizer.cc
+++ b/tensorflow/contrib/lite/profiling/profile_summarizer.cc
@@ -83,7 +83,7 @@ OperatorDetails GetOperatorDetails(const tflite::Interpreter& interpreter,
OperatorDetails details;
details.name = op_name;
if (profiling_string) {
- details.name += ":" + string(profiling_string);
+ details.name += ":" + std::string(profiling_string);
}
details.inputs = GetTensorNames(interpreter, inputs);
details.outputs = GetTensorNames(interpreter, outputs);
diff --git a/tensorflow/contrib/lite/python/BUILD b/tensorflow/contrib/lite/python/BUILD
index 27909a9458..8c9608db04 100644
--- a/tensorflow/contrib/lite/python/BUILD
+++ b/tensorflow/contrib/lite/python/BUILD
@@ -19,6 +19,7 @@ py_library(
visibility = ["//visibility:public"],
deps = [
"//tensorflow/contrib/lite/python/interpreter_wrapper:tensorflow_wrap_interpreter_wrapper",
+ "//tensorflow/python:util",
],
)
@@ -30,9 +31,10 @@ py_test(
tags = ["no_oss"],
deps = [
":interpreter",
- "//tensorflow/python:array_ops",
"//tensorflow/python:client_testlib",
- "//tensorflow/python:platform_test",
+ "//tensorflow/python:framework_test_lib",
+ "//tensorflow/python:platform",
+ "//third_party/py/numpy",
],
)
diff --git a/tensorflow/contrib/lite/python/interpreter.py b/tensorflow/contrib/lite/python/interpreter.py
index fd90823425..e1981ceae2 100644
--- a/tensorflow/contrib/lite/python/interpreter.py
+++ b/tensorflow/contrib/lite/python/interpreter.py
@@ -56,9 +56,6 @@ class Interpreter(object):
self._interpreter = (
_interpreter_wrapper.InterpreterWrapper_CreateWrapperCPPFromBuffer(
model_content))
- if not self._interpreter:
- raise ValueError(
- 'Failed to create model from {} bytes'.format(len(model_content)))
elif not model_path and not model_path:
raise ValueError('`model_path` or `model_content` must be specified.')
else:
@@ -66,8 +63,7 @@ class Interpreter(object):
def allocate_tensors(self):
self._ensure_safe()
- if not self._interpreter.AllocateTensors():
- raise ValueError('Failed to allocate tensors')
+ return self._interpreter.AllocateTensors()
def _safe_to_run(self):
"""Returns true if there exist no numpy array buffers.
@@ -152,8 +148,7 @@ class Interpreter(object):
Raises:
ValueError: If the interpreter could not set the tensor.
"""
- if not self._interpreter.SetTensor(tensor_index, value):
- raise ValueError('Failed to set tensor')
+ self._interpreter.SetTensor(tensor_index, value)
def resize_tensor_input(self, input_index, tensor_size):
"""Resizes an input tensor.
@@ -167,8 +162,7 @@ class Interpreter(object):
ValueError: If the interpreter could not resize the input tensor.
"""
self._ensure_safe()
- if not self._interpreter.ResizeInputTensor(input_index, tensor_size):
- raise ValueError('Failed to resize input')
+ self._interpreter.ResizeInputTensor(input_index, tensor_size)
def get_output_details(self):
"""Gets model output details.
@@ -181,7 +175,9 @@ class Interpreter(object):
]
def get_tensor(self, tensor_index):
- """Gets the value of the input tensor. Note this makes a copy so prefer `tensor()`.
+ """Gets the value of the input tensor (get a copy).
+
+ If you wish to avoid the copy, use `tensor()`.
Args:
tensor_index: Tensor index of tensor to get. This value can be gotten from
@@ -247,5 +243,7 @@ class Interpreter(object):
ValueError: When the underlying interpreter fails raise ValueError.
"""
self._ensure_safe()
- if not self._interpreter.Invoke():
- raise ValueError('Failed to invoke TFLite model')
+ self._interpreter.Invoke()
+
+ def reset_all_variables_to_zero(self):
+ return self._interpreter.ResetVariableTensorsToZero()
diff --git a/tensorflow/contrib/lite/python/interpreter_test.py b/tensorflow/contrib/lite/python/interpreter_test.py
index 5f1fa26c3b..95fa4b8584 100644
--- a/tensorflow/contrib/lite/python/interpreter_test.py
+++ b/tensorflow/contrib/lite/python/interpreter_test.py
@@ -19,6 +19,7 @@ from __future__ import print_function
import io
import numpy as np
+import six
from tensorflow.contrib.lite.python import interpreter as interpreter_wrapper
from tensorflow.python.framework import test_util
@@ -91,6 +92,28 @@ class InterpreterTest(test_util.TensorFlowTestCase):
self.assertTrue((expected_output == output_data).all())
+class InterpreterTestErrorPropagation(test_util.TensorFlowTestCase):
+
+ def testInvalidModelContent(self):
+ with self.assertRaisesRegexp(ValueError,
+ 'Model provided has model identifier \''):
+ interpreter_wrapper.Interpreter(model_content=six.b('garbage'))
+
+ def testInvalidModelFile(self):
+ with self.assertRaisesRegexp(
+ ValueError, 'Could not open \'totally_invalid_file_name\''):
+ interpreter_wrapper.Interpreter(
+ model_path='totally_invalid_file_name')
+
+ def testInvokeBeforeReady(self):
+ interpreter = interpreter_wrapper.Interpreter(
+ model_path=resource_loader.get_path_to_datafile(
+ 'testdata/permute_float.tflite'))
+ with self.assertRaisesRegexp(RuntimeError,
+ 'Invoke called on model that is not ready'):
+ interpreter.invoke()
+
+
class InterpreterTensorAccessorTest(test_util.TensorFlowTestCase):
def setUp(self):
diff --git a/tensorflow/contrib/lite/python/interpreter_wrapper/BUILD b/tensorflow/contrib/lite/python/interpreter_wrapper/BUILD
index 634c2a1e1f..69ee95c320 100644
--- a/tensorflow/contrib/lite/python/interpreter_wrapper/BUILD
+++ b/tensorflow/contrib/lite/python/interpreter_wrapper/BUILD
@@ -13,7 +13,6 @@ cc_library(
deps = [
"//tensorflow/contrib/lite:framework",
"//tensorflow/contrib/lite/kernels:builtin_ops",
- "//tensorflow/core:lib",
"//third_party/py/numpy:headers",
"//third_party/python_runtime:headers",
"@com_google_absl//absl/memory",
diff --git a/tensorflow/contrib/lite/python/interpreter_wrapper/interpreter_wrapper.cc b/tensorflow/contrib/lite/python/interpreter_wrapper/interpreter_wrapper.cc
index b283551c45..c38b692dcd 100644
--- a/tensorflow/contrib/lite/python/interpreter_wrapper/interpreter_wrapper.cc
+++ b/tensorflow/contrib/lite/python/interpreter_wrapper/interpreter_wrapper.cc
@@ -14,13 +14,13 @@ limitations under the License.
==============================================================================*/
#include "tensorflow/contrib/lite/python/interpreter_wrapper/interpreter_wrapper.h"
+#include <sstream>
#include <string>
#include "absl/memory/memory.h"
#include "tensorflow/contrib/lite/interpreter.h"
#include "tensorflow/contrib/lite/kernels/register.h"
#include "tensorflow/contrib/lite/model.h"
-#include "tensorflow/core/platform/logging.h"
// Disallow Numpy 1.7 deprecated symbols.
#define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION
@@ -38,9 +38,58 @@ limitations under the License.
#define CPP_TO_PYSTRING PyString_FromStringAndSize
#endif
+#define TFLITE_PY_CHECK(x) \
+ if ((x) != kTfLiteOk) { \
+ return error_reporter_->exception(); \
+ }
+
+#define TFLITE_PY_TENSOR_BOUNDS_CHECK(i) \
+ if (i >= interpreter_->tensors_size() || i < 0) { \
+ PyErr_Format(PyExc_ValueError, \
+ "Invalid tensor index %d exceeds max tensor index %lu", i, \
+ interpreter_->tensors_size()); \
+ return nullptr; \
+ }
+
+#define TFLITE_PY_ENSURE_VALID_INTERPRETER() \
+ if (!interpreter_) { \
+ PyErr_SetString(PyExc_ValueError, "Interpreter was not initialized."); \
+ return nullptr; \
+ }
+
namespace tflite {
namespace interpreter_wrapper {
+class PythonErrorReporter : public tflite::ErrorReporter {
+ public:
+ PythonErrorReporter() {}
+
+ // Report an error message
+ int Report(const char* format, va_list args) override {
+ char buf[1024];
+ int formatted = vsnprintf(buf, sizeof(buf), format, args);
+ buffer_ << buf;
+ return formatted;
+ }
+
+ // Set's a Python runtime exception with the last error.
+ PyObject* exception() {
+ std::string last_message = message();
+ PyErr_SetString(PyExc_RuntimeError, last_message.c_str());
+ return nullptr;
+ }
+
+ // Gets the last error message and clears the buffer.
+ std::string message() {
+ std::string value = buffer_.str();
+ buffer_.clear();
+ return value;
+ }
+
+ private:
+ std::stringstream buffer_;
+};
+
namespace {
// Calls PyArray's initialization to initialize all the API pointers. Note that
@@ -60,19 +109,6 @@ std::unique_ptr<tflite::Interpreter> CreateInterpreter(
std::unique_ptr<tflite::Interpreter> interpreter;
tflite::InterpreterBuilder(*model, resolver)(&interpreter);
- if (interpreter) {
- for (const int input_index : interpreter->inputs()) {
- const TfLiteTensor* tensor = interpreter->tensor(input_index);
- CHECK(tensor);
- const TfLiteIntArray* dims = tensor->dims;
- if (!dims) {
- continue;
- }
-
- std::vector<int> input_dims(dims->data, dims->data + dims->size);
- interpreter->ResizeInputTensor(input_index, input_dims);
- }
- }
return interpreter;
}
@@ -92,11 +128,13 @@ int TfLiteTypeToPyArrayType(TfLiteType tf_lite_type) {
return NPY_OBJECT;
case kTfLiteBool:
return NPY_BOOL;
+ case kTfLiteComplex64:
+ return NPY_COMPLEX64;
case kTfLiteNoType:
- return -1;
+ return NPY_NOTYPE;
+ // Avoid default so compiler errors created when new types are made.
}
- LOG(ERROR) << "Unknown TfLiteType " << tf_lite_type;
- return -1;
+ return NPY_NOTYPE;
}
TfLiteType TfLiteTypeFromPyArray(PyArrayObject* array) {
@@ -118,8 +156,10 @@ TfLiteType TfLiteTypeFromPyArray(PyArrayObject* array) {
case NPY_STRING:
case NPY_UNICODE:
return kTfLiteString;
+ case NPY_COMPLEX64:
+ return kTfLiteComplex64;
+ // Avoid default so compiler errors created when new types are made.
}
- LOG(ERROR) << "Unknown PyArray dtype " << pyarray_type;
return kTfLiteNoType;
}
@@ -143,32 +183,29 @@ PyObject* PyTupleFromQuantizationParam(const TfLiteQuantizationParams& param) {
} // namespace
InterpreterWrapper::InterpreterWrapper(
- std::unique_ptr<tflite::FlatBufferModel> model)
+ std::unique_ptr<tflite::FlatBufferModel> model,
+ std::unique_ptr<PythonErrorReporter> error_reporter)
: model_(std::move(model)),
+ error_reporter_(std::move(error_reporter)),
resolver_(absl::make_unique<tflite::ops::builtin::BuiltinOpResolver>()),
interpreter_(CreateInterpreter(model_.get(), *resolver_)) {}
InterpreterWrapper::~InterpreterWrapper() {}
-bool InterpreterWrapper::AllocateTensors() {
- if (!interpreter_) {
- LOG(ERROR) << "Cannot allocate tensors: invalid interpreter.";
- return false;
- }
-
- if (interpreter_->AllocateTensors() != kTfLiteOk) {
- LOG(ERROR) << "Unable to allocate tensors.";
- return false;
- }
-
- return true;
+PyObject* InterpreterWrapper::AllocateTensors() {
+ TFLITE_PY_ENSURE_VALID_INTERPRETER();
+ TFLITE_PY_CHECK(interpreter_->AllocateTensors());
+ Py_RETURN_NONE;
}
-bool InterpreterWrapper::Invoke() {
- return interpreter_ ? (interpreter_->Invoke() == kTfLiteOk) : false;
+PyObject* InterpreterWrapper::Invoke() {
+ TFLITE_PY_ENSURE_VALID_INTERPRETER();
+ TFLITE_PY_CHECK(interpreter_->Invoke());
+ Py_RETURN_NONE;
}
PyObject* InterpreterWrapper::InputIndices() const {
+ TFLITE_PY_ENSURE_VALID_INTERPRETER();
PyObject* np_array = PyArrayFromIntVector(interpreter_->inputs().data(),
interpreter_->inputs().size());
@@ -182,35 +219,36 @@ PyObject* InterpreterWrapper::OutputIndices() const {
return PyArray_Return(reinterpret_cast<PyArrayObject*>(np_array));
}
-bool InterpreterWrapper::ResizeInputTensor(int i, PyObject* value) {
- if (!interpreter_) {
- LOG(ERROR) << "Invalid interpreter.";
- return false;
- }
+PyObject* InterpreterWrapper::ResizeInputTensor(int i, PyObject* value) {
+ TFLITE_PY_ENSURE_VALID_INTERPRETER();
std::unique_ptr<PyObject, PyDecrefDeleter> array_safe(
PyArray_FromAny(value, nullptr, 0, 0, NPY_ARRAY_CARRAY, nullptr));
if (!array_safe) {
- LOG(ERROR) << "Failed to convert value into readable tensor.";
- return false;
+ PyErr_SetString(PyExc_ValueError,
+ "Failed to convert numpy value into readable tensor.");
+ return nullptr;
}
PyArrayObject* array = reinterpret_cast<PyArrayObject*>(array_safe.get());
if (PyArray_NDIM(array) != 1) {
- LOG(ERROR) << "Expected 1-D defining input shape.";
- return false;
+ PyErr_Format(PyExc_ValueError, "Shape should be 1D instead of %d.",
+ PyArray_NDIM(array));
+ return nullptr;
}
if (PyArray_TYPE(array) != NPY_INT32) {
- LOG(ERROR) << "Shape must be an int32 array";
- return false;
+ PyErr_Format(PyExc_ValueError, "Shape must be type int32 (was %d).",
+ PyArray_TYPE(array));
+ return nullptr;
}
std::vector<int> dims(PyArray_SHAPE(array)[0]);
memcpy(dims.data(), PyArray_BYTES(array), dims.size() * sizeof(int));
- return (interpreter_->ResizeInputTensor(i, dims) == kTfLiteOk);
+ TFLITE_PY_CHECK(interpreter_->ResizeInputTensor(i, dims));
+ Py_RETURN_NONE;
}
std::string InterpreterWrapper::TensorName(int i) const {
@@ -223,21 +261,21 @@ std::string InterpreterWrapper::TensorName(int i) const {
}
PyObject* InterpreterWrapper::TensorType(int i) const {
- if (!interpreter_ || i >= interpreter_->tensors_size() || i < 0) {
- return nullptr;
- }
+ TFLITE_PY_ENSURE_VALID_INTERPRETER();
+ TFLITE_PY_TENSOR_BOUNDS_CHECK(i);
const TfLiteTensor* tensor = interpreter_->tensor(i);
- int typenum = TfLiteTypeToPyArrayType(tensor->type);
- return PyArray_TypeObjectFromType(typenum);
+ int code = TfLiteTypeToPyArrayType(tensor->type);
+ if (code == -1) {
+ PyErr_Format(PyExc_ValueError, "Invalid tflite type code %d", code);
+ return nullptr;
+ }
+ return PyArray_TypeObjectFromType(code);
}
PyObject* InterpreterWrapper::TensorSize(int i) const {
- if (!interpreter_ || i >= interpreter_->tensors_size() || i < 0) {
- Py_INCREF(Py_None);
- return Py_None;
- }
-
+ TFLITE_PY_ENSURE_VALID_INTERPRETER();
+ TFLITE_PY_TENSOR_BOUNDS_CHECK(i);
const TfLiteTensor* tensor = interpreter_->tensor(i);
PyObject* np_array =
PyArrayFromIntVector(tensor->dims->data, tensor->dims->size);
@@ -246,97 +284,82 @@ PyObject* InterpreterWrapper::TensorSize(int i) const {
}
PyObject* InterpreterWrapper::TensorQuantization(int i) const {
- if (!interpreter_ || i >= interpreter_->tensors_size() || i < 0) {
- Py_INCREF(Py_None);
- return Py_None;
- }
-
+ TFLITE_PY_ENSURE_VALID_INTERPRETER();
+ TFLITE_PY_TENSOR_BOUNDS_CHECK(i);
const TfLiteTensor* tensor = interpreter_->tensor(i);
return PyTupleFromQuantizationParam(tensor->params);
}
-bool InterpreterWrapper::SetTensor(int i, PyObject* value) {
- if (!interpreter_) {
- LOG(ERROR) << "Invalid interpreter.";
- return false;
- }
-
- if (i >= interpreter_->tensors_size()) {
- LOG(ERROR) << "Invalid tensor index: " << i << " exceeds max tensor index "
- << interpreter_->tensors_size();
- return false;
- }
+PyObject* InterpreterWrapper::SetTensor(int i, PyObject* value) {
+ TFLITE_PY_ENSURE_VALID_INTERPRETER();
+ TFLITE_PY_TENSOR_BOUNDS_CHECK(i);
std::unique_ptr<PyObject, PyDecrefDeleter> array_safe(
PyArray_FromAny(value, nullptr, 0, 0, NPY_ARRAY_CARRAY, nullptr));
if (!array_safe) {
- LOG(ERROR) << "Failed to convert value into readable tensor.";
- return false;
+ PyErr_SetString(PyExc_ValueError,
+ "Failed to convert value into readable tensor.");
+ return nullptr;
}
PyArrayObject* array = reinterpret_cast<PyArrayObject*>(array_safe.get());
const TfLiteTensor* tensor = interpreter_->tensor(i);
if (TfLiteTypeFromPyArray(array) != tensor->type) {
- LOG(ERROR) << "Cannot set tensor:"
- << " Got tensor of type " << TfLiteTypeFromPyArray(array)
- << " but expected type " << tensor->type << " for input " << i;
- return false;
+ PyErr_Format(PyExc_ValueError,
+ "Cannot set tensor:"
+ " Got tensor of type %d"
+ " but expected type %d for input %d ",
+ TfLiteTypeFromPyArray(array), tensor->type, i);
+ return nullptr;
}
if (PyArray_NDIM(array) != tensor->dims->size) {
- LOG(ERROR) << "Cannot set tensor: Dimension mismatch";
- return false;
+ PyErr_SetString(PyExc_ValueError, "Cannot set tensor: Dimension mismatch");
+ return nullptr;
}
for (int j = 0; j < PyArray_NDIM(array); j++) {
if (tensor->dims->data[j] != PyArray_SHAPE(array)[j]) {
- LOG(ERROR) << "Cannot set tensor: Dimension mismatch";
- return false;
+ PyErr_SetString(PyExc_ValueError,
+ "Cannot set tensor: Dimension mismatch");
+ return nullptr;
}
}
size_t size = PyArray_NBYTES(array);
- DCHECK_EQ(size, tensor->bytes);
+ if (size != tensor->bytes) {
+ PyErr_Format(PyExc_ValueError,
+ "numpy array had %zu bytes but expected %zu bytes.", size,
+ tensor->bytes);
+ return nullptr;
+ }
memcpy(tensor->data.raw, PyArray_DATA(array), size);
- return true;
+ Py_RETURN_NONE;
}
namespace {
-PyObject* CheckGetTensorArgs(Interpreter* interpreter, int tensor_index,
+PyObject* CheckGetTensorArgs(Interpreter* interpreter_, int tensor_index,
TfLiteTensor** tensor, int* type_num) {
- if (!interpreter) {
- LOG(ERROR) << "Invalid interpreter.";
- Py_INCREF(Py_None);
- return Py_None;
- }
-
- if (tensor_index >= interpreter->tensors_size() || tensor_index < 0) {
- LOG(ERROR) << "Invalid tensor index: " << tensor_index
- << " exceeds max tensor index " << interpreter->inputs().size();
- Py_INCREF(Py_None);
- return Py_None;
- }
+ TFLITE_PY_ENSURE_VALID_INTERPRETER();
+ TFLITE_PY_TENSOR_BOUNDS_CHECK(tensor_index);
- *tensor = interpreter->tensor(tensor_index);
+ *tensor = interpreter_->tensor(tensor_index);
if ((*tensor)->bytes == 0) {
- LOG(ERROR) << "Invalid tensor size";
- Py_INCREF(Py_None);
- return Py_None;
+ PyErr_SetString(PyExc_ValueError, "Invalid tensor size.");
+ return nullptr;
}
*type_num = TfLiteTypeToPyArrayType((*tensor)->type);
if (*type_num == -1) {
- LOG(ERROR) << "Unknown tensor type " << (*tensor)->type;
- Py_INCREF(Py_None);
- return Py_None;
+ PyErr_SetString(PyExc_ValueError, "Unknown tensor type.");
+ return nullptr;
}
if (!(*tensor)->data.raw) {
- LOG(ERROR) << "Tensor data is null.";
- Py_INCREF(Py_None);
- return Py_None;
+ PyErr_SetString(PyExc_ValueError, "Tensor data is null.");
+ return nullptr;
}
return nullptr;
@@ -358,9 +381,8 @@ PyObject* InterpreterWrapper::GetTensor(int i) const {
// it will leak.
void* data = malloc(tensor->bytes);
if (!data) {
- LOG(ERROR) << "Malloc to copy tensor failed.";
- Py_INCREF(Py_None);
- return Py_None;
+ PyErr_SetString(PyExc_ValueError, "Malloc to copy tensor failed.");
+ return nullptr;
}
memcpy(data, tensor->data.raw, tensor->bytes);
PyObject* np_array =
@@ -390,22 +412,39 @@ PyObject* InterpreterWrapper::tensor(PyObject* base_object, int i) {
}
InterpreterWrapper* InterpreterWrapper::CreateWrapperCPPFromFile(
- const char* model_path) {
+ const char* model_path, std::string* error_msg) {
+ std::unique_ptr<PythonErrorReporter> error_reporter(new PythonErrorReporter);
std::unique_ptr<tflite::FlatBufferModel> model =
- tflite::FlatBufferModel::BuildFromFile(model_path);
- return model ? new InterpreterWrapper(std::move(model)) : nullptr;
+ tflite::FlatBufferModel::BuildFromFile(model_path, error_reporter.get());
+ if (!model) {
+ *error_msg = error_reporter->message();
+ return nullptr;
+ }
+ return new InterpreterWrapper(std::move(model), std::move(error_reporter));
}
InterpreterWrapper* InterpreterWrapper::CreateWrapperCPPFromBuffer(
- PyObject* data) {
+ PyObject* data, std::string* error_msg) {
char * buf = nullptr;
Py_ssize_t length;
+ std::unique_ptr<PythonErrorReporter> error_reporter(new PythonErrorReporter);
if (PY_TO_CPPSTRING(data, &buf, &length) == -1) {
return nullptr;
}
std::unique_ptr<tflite::FlatBufferModel> model =
- tflite::FlatBufferModel::BuildFromBuffer(buf, length);
- return model ? new InterpreterWrapper(std::move(model)) : nullptr;
+ tflite::FlatBufferModel::BuildFromBuffer(buf, length,
+ error_reporter.get());
+ if (!model) {
+ *error_msg = error_reporter->message();
+ return nullptr;
+ }
+ return new InterpreterWrapper(std::move(model), std::move(error_reporter));
+}
+
+PyObject* InterpreterWrapper::ResetVariableTensorsToZero() {
+ TFLITE_PY_ENSURE_VALID_INTERPRETER();
+ TFLITE_PY_CHECK(interpreter_->ResetVariableTensorsToZero());
+ Py_RETURN_NONE;
}
} // namespace interpreter_wrapper
diff --git a/tensorflow/contrib/lite/python/interpreter_wrapper/interpreter_wrapper.h b/tensorflow/contrib/lite/python/interpreter_wrapper/interpreter_wrapper.h
index e7343cb388..556ec7117a 100644
--- a/tensorflow/contrib/lite/python/interpreter_wrapper/interpreter_wrapper.h
+++ b/tensorflow/contrib/lite/python/interpreter_wrapper/interpreter_wrapper.h
@@ -15,12 +15,12 @@ limitations under the License.
#ifndef TENSORFLOW_CONTRIB_LITE_PYTHON_INTERPRETER_WRAPPER_INTERPRETER_WRAPPER_H_
#define TENSORFLOW_CONTRIB_LITE_PYTHON_INTERPRETER_WRAPPER_INTERPRETER_WRAPPER_H_
+// Place `<locale>` before <Python.h> to avoid build failures in macOS.
+#include <locale>
#include <memory>
#include <string>
#include <vector>
-// Place `<locale>` before <Python.h> to avoid build failures in macOS.
-#include <locale>
#include <Python.h>
// We forward declare TFLite classes here to avoid exposing them to SWIG.
@@ -36,34 +36,41 @@ class Interpreter;
namespace interpreter_wrapper {
+class PythonErrorReporter;
+
class InterpreterWrapper {
public:
// SWIG caller takes ownership of pointer.
- static InterpreterWrapper* CreateWrapperCPPFromFile(const char* model_path);
+ static InterpreterWrapper* CreateWrapperCPPFromFile(const char* model_path,
+ std::string* error_msg);
// SWIG caller takes ownership of pointer.
- static InterpreterWrapper* CreateWrapperCPPFromBuffer(PyObject* data);
+ static InterpreterWrapper* CreateWrapperCPPFromBuffer(PyObject* data,
+ std::string* error_msg);
~InterpreterWrapper();
- bool AllocateTensors();
- bool Invoke();
+ PyObject* AllocateTensors();
+ PyObject* Invoke();
PyObject* InputIndices() const;
PyObject* OutputIndices() const;
- bool ResizeInputTensor(int i, PyObject* value);
+ PyObject* ResizeInputTensor(int i, PyObject* value);
std::string TensorName(int i) const;
PyObject* TensorType(int i) const;
PyObject* TensorSize(int i) const;
PyObject* TensorQuantization(int i) const;
- bool SetTensor(int i, PyObject* value);
+ PyObject* SetTensor(int i, PyObject* value);
PyObject* GetTensor(int i) const;
+ PyObject* ResetVariableTensorsToZero();
+
// Returns a reference to tensor index i as a numpy array. The base_object
// should be the interpreter object providing the memory.
PyObject* tensor(PyObject* base_object, int i);
private:
- InterpreterWrapper(std::unique_ptr<tflite::FlatBufferModel> model);
+ InterpreterWrapper(std::unique_ptr<tflite::FlatBufferModel> model,
+ std::unique_ptr<PythonErrorReporter> error_reporter);
// InterpreterWrapper is not copyable or assignable. We avoid the use of
// InterpreterWrapper() = delete here for SWIG compatibility.
@@ -71,6 +78,7 @@ class InterpreterWrapper {
InterpreterWrapper(const InterpreterWrapper& rhs);
const std::unique_ptr<tflite::FlatBufferModel> model_;
+ const std::unique_ptr<PythonErrorReporter> error_reporter_;
const std::unique_ptr<tflite::ops::builtin::BuiltinOpResolver> resolver_;
const std::unique_ptr<tflite::Interpreter> interpreter_;
};
diff --git a/tensorflow/contrib/lite/python/interpreter_wrapper/interpreter_wrapper.i b/tensorflow/contrib/lite/python/interpreter_wrapper/interpreter_wrapper.i
index 7f51f9f00d..afb2092eac 100644
--- a/tensorflow/contrib/lite/python/interpreter_wrapper/interpreter_wrapper.i
+++ b/tensorflow/contrib/lite/python/interpreter_wrapper/interpreter_wrapper.i
@@ -18,8 +18,51 @@ limitations under the License.
%{
#define SWIG_FILE_WITH_INIT
+#include "tensorflow/contrib/lite/interpreter.h"
+#include "tensorflow/contrib/lite/model.h"
#include "tensorflow/contrib/lite/python/interpreter_wrapper/interpreter_wrapper.h"
%}
%include "tensorflow/contrib/lite/python/interpreter_wrapper/interpreter_wrapper.h"
+
+namespace tflite {
+namespace interpreter_wrapper {
+%extend InterpreterWrapper {
+
+ // Version of the constructor that handles producing Python exceptions
+ // that propagate strings.
+ static PyObject* CreateWrapperCPPFromFile(const char* model_path) {
+ std::string error;
+ if(tflite::interpreter_wrapper::InterpreterWrapper* ptr =
+ tflite::interpreter_wrapper::InterpreterWrapper
+ ::CreateWrapperCPPFromFile(
+ model_path, &error)) {
+ return SWIG_NewPointerObj(
+ ptr, SWIGTYPE_p_tflite__interpreter_wrapper__InterpreterWrapper, 1);
+ } else {
+ PyErr_SetString(PyExc_ValueError, error.c_str());
+ return nullptr;
+ }
+ }
+
+ // Version of the constructor that handles producing Python exceptions
+ // that propagate strings.
+ static PyObject* CreateWrapperCPPFromBuffer(
+ PyObject* data) {
+ std::string error;
+ if(tflite::interpreter_wrapper::InterpreterWrapper* ptr =
+ tflite::interpreter_wrapper::InterpreterWrapper
+ ::CreateWrapperCPPFromBuffer(
+ data, &error)) {
+ return SWIG_NewPointerObj(
+ ptr, SWIGTYPE_p_tflite__interpreter_wrapper__InterpreterWrapper, 1);
+ } else {
+ PyErr_SetString(PyExc_ValueError, error.c_str());
+ return nullptr;
+ }
+ }
+}
+
+} // namespace interpreter_wrapper
+} // namespace tflite
diff --git a/tensorflow/contrib/lite/python/lite.py b/tensorflow/contrib/lite/python/lite.py
index 69a2f638af..29a1487c1f 100644
--- a/tensorflow/contrib/lite/python/lite.py
+++ b/tensorflow/contrib/lite/python/lite.py
@@ -50,6 +50,7 @@ from tensorflow.contrib.lite.python.interpreter import Interpreter # pylint: di
from tensorflow.contrib.lite.python.op_hint import convert_op_hints_to_stubs # pylint: disable=unused-import
from tensorflow.contrib.lite.python.op_hint import OpHint # pylint: disable=unused-import
from tensorflow.core.framework import graph_pb2 as _graph_pb2
+from tensorflow.python import keras as _keras
from tensorflow.python.client import session as _session
from tensorflow.python.framework import graph_util as tf_graph_util
from tensorflow.python.framework.importer import import_graph_def
@@ -131,7 +132,7 @@ class TocoConverter(object):
Args:
- graph_def: TensorFlow GraphDef.
+ graph_def: Frozen TensorFlow GraphDef.
input_tensors: List of input tensors. Type and shape are computed using
`foo.get_shape()` and `foo.dtype`.
output_tensors: List of output tensors (only .name is used from this).
@@ -177,7 +178,7 @@ class TocoConverter(object):
"""Creates a TocoConverter class from a file containing a frozen GraphDef.
Args:
- graph_def_file: Full filepath of file containing TensorFlow GraphDef.
+ graph_def_file: Full filepath of file containing frozen GraphDef.
input_arrays: List of input tensors to freeze graph with.
output_arrays: List of output tensors to freeze graph with.
input_shapes: Dict of strings representing input tensor names to list of
@@ -269,6 +270,48 @@ class TocoConverter(object):
return cls(
graph_def=result[0], input_tensors=result[1], output_tensors=result[2])
+ @classmethod
+ def from_keras_model_file(cls,
+ model_file,
+ input_arrays=None,
+ input_shapes=None,
+ output_arrays=None):
+ """Creates a TocoConverter class from a tf.keras model file.
+
+ Args:
+ model_file: Full filepath of HDF5 file containing the tf.keras model.
+ input_arrays: List of input tensors to freeze graph with. Uses input
+ arrays from SignatureDef when none are provided. (default None)
+ input_shapes: Dict of strings representing input tensor names to list of
+ integers representing input shapes (e.g., {"foo" : [1, 16, 16, 3]}).
+ Automatically determined when input shapes is None (e.g., {"foo" :
+ None}). (default None)
+ output_arrays: List of output tensors to freeze graph with. Uses output
+ arrays from SignatureDef when none are provided. (default None)
+
+ Returns:
+ TocoConverter class.
+ """
+ _keras.backend.clear_session()
+ _keras.backend.set_learning_phase(False)
+ keras_model = _keras.models.load_model(model_file)
+ sess = _keras.backend.get_session()
+
+ # Get input and output tensors.
+ if input_arrays:
+ input_tensors = get_tensors_from_tensor_names(sess.graph, input_arrays)
+ else:
+ input_tensors = keras_model.inputs
+
+ if output_arrays:
+ output_tensors = get_tensors_from_tensor_names(sess.graph, output_arrays)
+ else:
+ output_tensors = keras_model.outputs
+ set_tensor_shapes(input_tensors, input_shapes)
+
+ graph_def = _freeze_graph(sess, output_tensors)
+ return cls(graph_def, input_tensors, output_tensors)
+
def convert(self):
"""Converts a TensorFlow GraphDef based on instance variables.
@@ -366,7 +409,7 @@ def _is_frozen_graph(sess):
Bool.
"""
for op in sess.graph.get_operations():
- if op.type.startswith("Variable"):
+ if op.type.startswith("Variable") or op.type.endswith("VariableOp"):
return False
return True
diff --git a/tensorflow/contrib/lite/python/lite_test.py b/tensorflow/contrib/lite/python/lite_test.py
index a9475de474..ca2af5aaed 100644
--- a/tensorflow/contrib/lite/python/lite_test.py
+++ b/tensorflow/contrib/lite/python/lite_test.py
@@ -19,11 +19,13 @@ from __future__ import division
from __future__ import print_function
import os
+import tempfile
import numpy as np
from tensorflow.contrib.lite.python import lite
from tensorflow.contrib.lite.python import lite_constants
from tensorflow.contrib.lite.python.interpreter import Interpreter
+from tensorflow.python import keras
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
@@ -618,5 +620,279 @@ class FromSavedModelTest(test_util.TensorFlowTestCase):
self.assertTrue(tflite_model)
+class FromKerasFile(test_util.TensorFlowTestCase):
+
+ def setUp(self):
+ keras.backend.clear_session()
+
+ def _getSequentialModel(self):
+ model = keras.models.Sequential()
+ model.add(keras.layers.Dense(2, input_shape=(3,)))
+ model.add(keras.layers.RepeatVector(3))
+ model.add(keras.layers.TimeDistributed(keras.layers.Dense(3)))
+ model.compile(
+ loss=keras.losses.MSE,
+ optimizer=keras.optimizers.RMSprop(),
+ metrics=[keras.metrics.categorical_accuracy],
+ sample_weight_mode='temporal')
+ x = np.random.random((1, 3))
+ y = np.random.random((1, 3, 3))
+ model.train_on_batch(x, y)
+ model.predict(x)
+
+ try:
+ fd, keras_file = tempfile.mkstemp('.h5')
+ keras.models.save_model(model, keras_file)
+ finally:
+ os.close(fd)
+ return keras_file
+
+ def testSequentialModel(self):
+ """Test a Sequential tf.keras model with default inputs."""
+ keras_file = self._getSequentialModel()
+
+ converter = lite.TocoConverter.from_keras_model_file(keras_file)
+ tflite_model = converter.convert()
+ self.assertTrue(tflite_model)
+
+ os.remove(keras_file)
+
+ # Check values from converted model.
+ interpreter = Interpreter(model_content=tflite_model)
+ interpreter.allocate_tensors()
+
+ input_details = interpreter.get_input_details()
+ self.assertEqual(1, len(input_details))
+ self.assertEqual('dense_input', input_details[0]['name'])
+ self.assertEqual(np.float32, input_details[0]['dtype'])
+ self.assertTrue(([1, 3] == input_details[0]['shape']).all())
+ self.assertEqual((0., 0.), input_details[0]['quantization'])
+
+ output_details = interpreter.get_output_details()
+ self.assertEqual(1, len(output_details))
+ self.assertEqual('time_distributed/Reshape_1', output_details[0]['name'])
+ self.assertEqual(np.float32, output_details[0]['dtype'])
+ self.assertTrue(([1, 3, 3] == output_details[0]['shape']).all())
+ self.assertEqual((0., 0.), output_details[0]['quantization'])
+
+ def testSequentialModelInputArray(self):
+ """Test a Sequential tf.keras model testing input arrays argument."""
+ keras_file = self._getSequentialModel()
+
+ # Invalid input array raises error.
+ with self.assertRaises(ValueError) as error:
+ lite.TocoConverter.from_keras_model_file(
+ keras_file, input_arrays=['invalid-input'])
+ self.assertEqual("Invalid tensors 'invalid-input' were found.",
+ str(error.exception))
+
+ # Valid input array.
+ converter = lite.TocoConverter.from_keras_model_file(
+ keras_file, input_arrays=['dense_input'])
+ tflite_model = converter.convert()
+ os.remove(keras_file)
+ self.assertTrue(tflite_model)
+
+ def testSequentialModelInputShape(self):
+ """Test a Sequential tf.keras model testing input shapes argument."""
+ keras_file = self._getSequentialModel()
+
+ # Passing in shape of invalid input array has no impact as long as all input
+ # arrays have a shape.
+ converter = lite.TocoConverter.from_keras_model_file(
+ keras_file, input_shapes={'invalid-input': [2, 3]})
+ tflite_model = converter.convert()
+ self.assertTrue(tflite_model)
+
+ # Passing in shape of valid input array.
+ converter = lite.TocoConverter.from_keras_model_file(
+ keras_file, input_shapes={'dense_input': [2, 3]})
+ tflite_model = converter.convert()
+ os.remove(keras_file)
+ self.assertTrue(tflite_model)
+
+ # Check input shape from converted model.
+ interpreter = Interpreter(model_content=tflite_model)
+ interpreter.allocate_tensors()
+
+ input_details = interpreter.get_input_details()
+ self.assertEqual(1, len(input_details))
+ self.assertEqual('dense_input', input_details[0]['name'])
+ self.assertTrue(([2, 3] == input_details[0]['shape']).all())
+
+ def testSequentialModelOutputArray(self):
+ """Test a Sequential tf.keras model testing output arrays argument."""
+ keras_file = self._getSequentialModel()
+
+ # Invalid output array raises error.
+ with self.assertRaises(ValueError) as error:
+ lite.TocoConverter.from_keras_model_file(
+ keras_file, output_arrays=['invalid-output'])
+ self.assertEqual("Invalid tensors 'invalid-output' were found.",
+ str(error.exception))
+
+ # Valid output array.
+ converter = lite.TocoConverter.from_keras_model_file(
+ keras_file, output_arrays=['time_distributed/Reshape_1'])
+ tflite_model = converter.convert()
+ os.remove(keras_file)
+ self.assertTrue(tflite_model)
+
+ def testFunctionalModel(self):
+ """Test a Functional tf.keras model with default inputs."""
+ inputs = keras.layers.Input(shape=(3,), name='input')
+ x = keras.layers.Dense(2)(inputs)
+ output = keras.layers.Dense(3)(x)
+
+ model = keras.models.Model(inputs, output)
+ model.compile(
+ loss=keras.losses.MSE,
+ optimizer=keras.optimizers.RMSprop(),
+ metrics=[keras.metrics.categorical_accuracy])
+ x = np.random.random((1, 3))
+ y = np.random.random((1, 3))
+ model.train_on_batch(x, y)
+
+ model.predict(x)
+ fd, keras_file = tempfile.mkstemp('.h5')
+ keras.models.save_model(model, keras_file)
+
+ # Convert to TFLite model.
+ converter = lite.TocoConverter.from_keras_model_file(keras_file)
+ tflite_model = converter.convert()
+ self.assertTrue(tflite_model)
+
+ os.close(fd)
+ os.remove(keras_file)
+
+ # Check values from converted model.
+ interpreter = Interpreter(model_content=tflite_model)
+ interpreter.allocate_tensors()
+
+ input_details = interpreter.get_input_details()
+ self.assertEqual(1, len(input_details))
+ self.assertEqual('input', input_details[0]['name'])
+ self.assertEqual(np.float32, input_details[0]['dtype'])
+ self.assertTrue(([1, 3] == input_details[0]['shape']).all())
+ self.assertEqual((0., 0.), input_details[0]['quantization'])
+
+ output_details = interpreter.get_output_details()
+ self.assertEqual(1, len(output_details))
+ self.assertEqual('dense_1/BiasAdd', output_details[0]['name'])
+ self.assertEqual(np.float32, output_details[0]['dtype'])
+ self.assertTrue(([1, 3] == output_details[0]['shape']).all())
+ self.assertEqual((0., 0.), output_details[0]['quantization'])
+
+ def testFunctionalModelMultipleInputs(self):
+ """Test a Functional tf.keras model with multiple inputs and outputs."""
+ a = keras.layers.Input(shape=(3,), name='input_a')
+ b = keras.layers.Input(shape=(3,), name='input_b')
+ dense = keras.layers.Dense(4, name='dense')
+ c = dense(a)
+ d = dense(b)
+ e = keras.layers.Dropout(0.5, name='dropout')(c)
+
+ model = keras.models.Model([a, b], [d, e])
+ model.compile(
+ loss=keras.losses.MSE,
+ optimizer=keras.optimizers.RMSprop(),
+ metrics=[keras.metrics.mae],
+ loss_weights=[1., 0.5])
+
+ input_a_np = np.random.random((10, 3))
+ input_b_np = np.random.random((10, 3))
+ output_d_np = np.random.random((10, 4))
+ output_e_np = np.random.random((10, 4))
+ model.train_on_batch([input_a_np, input_b_np], [output_d_np, output_e_np])
+
+ model.predict([input_a_np, input_b_np], batch_size=5)
+ fd, keras_file = tempfile.mkstemp('.h5')
+ keras.models.save_model(model, keras_file)
+
+ # Convert to TFLite model.
+ converter = lite.TocoConverter.from_keras_model_file(keras_file)
+ tflite_model = converter.convert()
+ self.assertTrue(tflite_model)
+
+ os.close(fd)
+ os.remove(keras_file)
+
+ # Check values from converted model.
+ interpreter = Interpreter(model_content=tflite_model)
+ interpreter.allocate_tensors()
+
+ input_details = interpreter.get_input_details()
+ self.assertEqual(2, len(input_details))
+ self.assertEqual('input_a', input_details[0]['name'])
+ self.assertEqual(np.float32, input_details[0]['dtype'])
+ self.assertTrue(([1, 3] == input_details[0]['shape']).all())
+ self.assertEqual((0., 0.), input_details[0]['quantization'])
+
+ self.assertEqual('input_b', input_details[1]['name'])
+ self.assertEqual(np.float32, input_details[1]['dtype'])
+ self.assertTrue(([1, 3] == input_details[1]['shape']).all())
+ self.assertEqual((0., 0.), input_details[1]['quantization'])
+
+ output_details = interpreter.get_output_details()
+ self.assertEqual(2, len(output_details))
+ self.assertEqual('dense_1/BiasAdd', output_details[0]['name'])
+ self.assertEqual(np.float32, output_details[0]['dtype'])
+ self.assertTrue(([1, 4] == output_details[0]['shape']).all())
+ self.assertEqual((0., 0.), output_details[0]['quantization'])
+
+ self.assertEqual('dropout/Identity', output_details[1]['name'])
+ self.assertEqual(np.float32, output_details[1]['dtype'])
+ self.assertTrue(([1, 4] == output_details[1]['shape']).all())
+ self.assertEqual((0., 0.), output_details[1]['quantization'])
+
+ def testFunctionalSequentialModel(self):
+ """Test a Functional tf.keras model containing a Sequential model."""
+ model = keras.models.Sequential()
+ model.add(keras.layers.Dense(2, input_shape=(3,)))
+ model.add(keras.layers.RepeatVector(3))
+ model.add(keras.layers.TimeDistributed(keras.layers.Dense(3)))
+ model = keras.models.Model(model.input, model.output)
+
+ model.compile(
+ loss=keras.losses.MSE,
+ optimizer=keras.optimizers.RMSprop(),
+ metrics=[keras.metrics.categorical_accuracy],
+ sample_weight_mode='temporal')
+ x = np.random.random((1, 3))
+ y = np.random.random((1, 3, 3))
+ model.train_on_batch(x, y)
+ model.predict(x)
+
+ model.predict(x)
+ fd, keras_file = tempfile.mkstemp('.h5')
+ keras.models.save_model(model, keras_file)
+
+ # Convert to TFLite model.
+ converter = lite.TocoConverter.from_keras_model_file(keras_file)
+ tflite_model = converter.convert()
+ self.assertTrue(tflite_model)
+
+ os.close(fd)
+ os.remove(keras_file)
+
+ # Check values from converted model.
+ interpreter = Interpreter(model_content=tflite_model)
+ interpreter.allocate_tensors()
+
+ input_details = interpreter.get_input_details()
+ self.assertEqual(1, len(input_details))
+ self.assertEqual('dense_input', input_details[0]['name'])
+ self.assertEqual(np.float32, input_details[0]['dtype'])
+ self.assertTrue(([1, 3] == input_details[0]['shape']).all())
+ self.assertEqual((0., 0.), input_details[0]['quantization'])
+
+ output_details = interpreter.get_output_details()
+ self.assertEqual(1, len(output_details))
+ self.assertEqual('time_distributed/Reshape_1', output_details[0]['name'])
+ self.assertEqual(np.float32, output_details[0]['dtype'])
+ self.assertTrue(([1, 3, 3] == output_details[0]['shape']).all())
+ self.assertEqual((0., 0.), output_details[0]['quantization'])
+
+
if __name__ == '__main__':
test.main()
diff --git a/tensorflow/contrib/lite/python/tflite_convert.py b/tensorflow/contrib/lite/python/tflite_convert.py
index d18a29834b..9bd1f4f76e 100644
--- a/tensorflow/contrib/lite/python/tflite_convert.py
+++ b/tensorflow/contrib/lite/python/tflite_convert.py
@@ -74,6 +74,9 @@ def _get_toco_converter(flags):
converter_kwargs["saved_model_dir"] = flags.saved_model_dir
converter_kwargs["tag_set"] = _parse_set(flags.saved_model_tag_set)
converter_kwargs["signature_key"] = flags.saved_model_signature_key
+ elif flags.keras_model_file:
+ converter_fn = lite.TocoConverter.from_keras_model_file
+ converter_kwargs["model_file"] = flags.keras_model_file
return converter_fn(**converter_kwargs)
@@ -102,7 +105,7 @@ def _convert_model(flags):
input_arrays = converter.get_input_arrays()
std_dev_values = _parse_array(flags.std_dev_values, type_fn=int)
mean_values = _parse_array(flags.mean_values, type_fn=int)
- quant_stats = zip(mean_values, std_dev_values)
+ quant_stats = list(zip(mean_values, std_dev_values))
if ((not flags.input_arrays and len(input_arrays) > 1) or
(len(input_arrays) != len(quant_stats))):
raise ValueError("Mismatching --input_arrays, --std_dev_values, and "
@@ -222,11 +225,15 @@ def run_main(_):
input_file_group.add_argument(
"--graph_def_file",
type=str,
- help="Full filepath of file containing TensorFlow GraphDef.")
+ help="Full filepath of file containing frozen TensorFlow GraphDef.")
input_file_group.add_argument(
"--saved_model_dir",
type=str,
help="Full filepath of directory containing the SavedModel.")
+ input_file_group.add_argument(
+ "--keras_model_file",
+ type=str,
+ help="Full filepath of HDF5 file containing tf.Keras model.")
# Model format flags.
parser.add_argument(
diff --git a/tensorflow/contrib/lite/schema/BUILD b/tensorflow/contrib/lite/schema/BUILD
index 9717a4a1a4..f095151cae 100644
--- a/tensorflow/contrib/lite/schema/BUILD
+++ b/tensorflow/contrib/lite/schema/BUILD
@@ -65,6 +65,7 @@ cc_test(
],
tags = [
"tflite_not_portable_android",
+ "tflite_not_portable_ios",
],
deps = [
"//tensorflow/core:lib_platform",
diff --git a/tensorflow/contrib/lite/schema/schema.fbs b/tensorflow/contrib/lite/schema/schema.fbs
index df43f1e5ab..6c3189a884 100644
--- a/tensorflow/contrib/lite/schema/schema.fbs
+++ b/tensorflow/contrib/lite/schema/schema.fbs
@@ -35,6 +35,7 @@ enum TensorType : byte {
STRING = 5,
BOOL = 6,
INT16 = 7,
+ COMPLEX64 = 8,
}
// Parameters for converting a quantized tensor back to float. Given a
@@ -43,7 +44,7 @@ enum TensorType : byte {
table QuantizationParameters {
min:[float]; // For importing back into tensorflow.
max:[float]; // For importing back into tensorflow.
- scale:[float];
+ scale:[float]; // For dequantizing the tensor's values.
zero_point:[long];
}
@@ -154,10 +155,15 @@ enum BuiltinOperator : byte {
EQUAL = 71,
NOT_EQUAL = 72,
LOG = 73,
- SUM=74,
+ SUM = 74,
SQRT = 75,
RSQRT = 76,
SHAPE = 77,
+ POW = 78,
+ ARG_MIN = 79,
+ FAKE_QUANT = 80,
+ REDUCE_PROD = 81,
+ REDUCE_MAX = 82,
}
// Options for the builtin operators.
@@ -217,6 +223,9 @@ union BuiltinOptions {
EqualOptions,
NotEqualOptions,
ShapeOptions,
+ PowOptions,
+ ArgMinOptions,
+ FakeQuantOptions,
}
enum Padding : byte { SAME, VALID }
@@ -294,9 +303,18 @@ table BidirectionalSequenceRNNOptions {
fused_activation_function:ActivationFunctionType;
}
+enum FullyConnectedOptionsWeightsFormat: byte {
+ DEFAULT = 0,
+ SHUFFLED4x16INT8 = 1,
+}
+
// An implementation of TensorFlow fully_connected (a.k.a Dense) layer.
table FullyConnectedOptions {
+ // Parameters for FullyConnected version 1 or above.
fused_activation_function:ActivationFunctionType;
+
+ // Parameters for FullyConnected version 2 or above.
+ weights_format:FullyConnectedOptionsWeightsFormat = DEFAULT;
}
table SoftmaxOptions {
@@ -457,6 +475,10 @@ table ArgMaxOptions {
output_type : TensorType;
}
+table ArgMinOptions {
+ output_type : TensorType;
+}
+
table GreaterOptions {
}
@@ -502,6 +524,19 @@ table ShapeOptions {
out_type : TensorType;
}
+table PowOptions {
+}
+
+table FakeQuantOptions {
+ // Parameters supported by version 1:
+ min:float;
+ max:float;
+ num_bits:int;
+
+ // Parameters supported by version 2:
+ narrow_range:bool;
+}
+
// An OperatorCode can be an enum value (BuiltinOperator) if the operator is a
// builtin, or a string if the operator is custom.
table OperatorCode {
diff --git a/tensorflow/contrib/lite/schema/schema_generated.h b/tensorflow/contrib/lite/schema/schema_generated.h
index 8c0660dfe2..8052404319 100755
--- a/tensorflow/contrib/lite/schema/schema_generated.h
+++ b/tensorflow/contrib/lite/schema/schema_generated.h
@@ -157,6 +157,9 @@ struct TileOptionsT;
struct ArgMaxOptions;
struct ArgMaxOptionsT;
+struct ArgMinOptions;
+struct ArgMinOptionsT;
+
struct GreaterOptions;
struct GreaterOptionsT;
@@ -196,6 +199,12 @@ struct NotEqualOptionsT;
struct ShapeOptions;
struct ShapeOptionsT;
+struct PowOptions;
+struct PowOptionsT;
+
+struct FakeQuantOptions;
+struct FakeQuantOptionsT;
+
struct OperatorCode;
struct OperatorCodeT;
@@ -220,11 +229,12 @@ enum TensorType {
TensorType_STRING = 5,
TensorType_BOOL = 6,
TensorType_INT16 = 7,
+ TensorType_COMPLEX64 = 8,
TensorType_MIN = TensorType_FLOAT32,
- TensorType_MAX = TensorType_INT16
+ TensorType_MAX = TensorType_COMPLEX64
};
-inline TensorType (&EnumValuesTensorType())[8] {
+inline TensorType (&EnumValuesTensorType())[9] {
static TensorType values[] = {
TensorType_FLOAT32,
TensorType_FLOAT16,
@@ -233,7 +243,8 @@ inline TensorType (&EnumValuesTensorType())[8] {
TensorType_INT64,
TensorType_STRING,
TensorType_BOOL,
- TensorType_INT16
+ TensorType_INT16,
+ TensorType_COMPLEX64
};
return values;
}
@@ -248,6 +259,7 @@ inline const char **EnumNamesTensorType() {
"STRING",
"BOOL",
"INT16",
+ "COMPLEX64",
nullptr
};
return names;
@@ -336,11 +348,16 @@ enum BuiltinOperator {
BuiltinOperator_SQRT = 75,
BuiltinOperator_RSQRT = 76,
BuiltinOperator_SHAPE = 77,
+ BuiltinOperator_POW = 78,
+ BuiltinOperator_ARG_MIN = 79,
+ BuiltinOperator_FAKE_QUANT = 80,
+ BuiltinOperator_REDUCE_PROD = 81,
+ BuiltinOperator_REDUCE_MAX = 82,
BuiltinOperator_MIN = BuiltinOperator_ADD,
- BuiltinOperator_MAX = BuiltinOperator_SHAPE
+ BuiltinOperator_MAX = BuiltinOperator_REDUCE_MAX
};
-inline BuiltinOperator (&EnumValuesBuiltinOperator())[77] {
+inline BuiltinOperator (&EnumValuesBuiltinOperator())[82] {
static BuiltinOperator values[] = {
BuiltinOperator_ADD,
BuiltinOperator_AVERAGE_POOL_2D,
@@ -418,7 +435,12 @@ inline BuiltinOperator (&EnumValuesBuiltinOperator())[77] {
BuiltinOperator_SUM,
BuiltinOperator_SQRT,
BuiltinOperator_RSQRT,
- BuiltinOperator_SHAPE
+ BuiltinOperator_SHAPE,
+ BuiltinOperator_POW,
+ BuiltinOperator_ARG_MIN,
+ BuiltinOperator_FAKE_QUANT,
+ BuiltinOperator_REDUCE_PROD,
+ BuiltinOperator_REDUCE_MAX
};
return values;
}
@@ -503,6 +525,11 @@ inline const char **EnumNamesBuiltinOperator() {
"SQRT",
"RSQRT",
"SHAPE",
+ "POW",
+ "ARG_MIN",
+ "FAKE_QUANT",
+ "REDUCE_PROD",
+ "REDUCE_MAX",
nullptr
};
return names;
@@ -570,11 +597,14 @@ enum BuiltinOptions {
BuiltinOptions_EqualOptions = 53,
BuiltinOptions_NotEqualOptions = 54,
BuiltinOptions_ShapeOptions = 55,
+ BuiltinOptions_PowOptions = 56,
+ BuiltinOptions_ArgMinOptions = 57,
+ BuiltinOptions_FakeQuantOptions = 58,
BuiltinOptions_MIN = BuiltinOptions_NONE,
- BuiltinOptions_MAX = BuiltinOptions_ShapeOptions
+ BuiltinOptions_MAX = BuiltinOptions_FakeQuantOptions
};
-inline BuiltinOptions (&EnumValuesBuiltinOptions())[56] {
+inline BuiltinOptions (&EnumValuesBuiltinOptions())[59] {
static BuiltinOptions values[] = {
BuiltinOptions_NONE,
BuiltinOptions_Conv2DOptions,
@@ -631,7 +661,10 @@ inline BuiltinOptions (&EnumValuesBuiltinOptions())[56] {
BuiltinOptions_ExpandDimsOptions,
BuiltinOptions_EqualOptions,
BuiltinOptions_NotEqualOptions,
- BuiltinOptions_ShapeOptions
+ BuiltinOptions_ShapeOptions,
+ BuiltinOptions_PowOptions,
+ BuiltinOptions_ArgMinOptions,
+ BuiltinOptions_FakeQuantOptions
};
return values;
}
@@ -694,6 +727,9 @@ inline const char **EnumNamesBuiltinOptions() {
"EqualOptions",
"NotEqualOptions",
"ShapeOptions",
+ "PowOptions",
+ "ArgMinOptions",
+ "FakeQuantOptions",
nullptr
};
return names;
@@ -928,6 +964,18 @@ template<> struct BuiltinOptionsTraits<ShapeOptions> {
static const BuiltinOptions enum_value = BuiltinOptions_ShapeOptions;
};
+template<> struct BuiltinOptionsTraits<PowOptions> {
+ static const BuiltinOptions enum_value = BuiltinOptions_PowOptions;
+};
+
+template<> struct BuiltinOptionsTraits<ArgMinOptions> {
+ static const BuiltinOptions enum_value = BuiltinOptions_ArgMinOptions;
+};
+
+template<> struct BuiltinOptionsTraits<FakeQuantOptions> {
+ static const BuiltinOptions enum_value = BuiltinOptions_FakeQuantOptions;
+};
+
struct BuiltinOptionsUnion {
BuiltinOptions type;
void *value;
@@ -1399,6 +1447,30 @@ struct BuiltinOptionsUnion {
return type == BuiltinOptions_ShapeOptions ?
reinterpret_cast<const ShapeOptionsT *>(value) : nullptr;
}
+ PowOptionsT *AsPowOptions() {
+ return type == BuiltinOptions_PowOptions ?
+ reinterpret_cast<PowOptionsT *>(value) : nullptr;
+ }
+ const PowOptionsT *AsPowOptions() const {
+ return type == BuiltinOptions_PowOptions ?
+ reinterpret_cast<const PowOptionsT *>(value) : nullptr;
+ }
+ ArgMinOptionsT *AsArgMinOptions() {
+ return type == BuiltinOptions_ArgMinOptions ?
+ reinterpret_cast<ArgMinOptionsT *>(value) : nullptr;
+ }
+ const ArgMinOptionsT *AsArgMinOptions() const {
+ return type == BuiltinOptions_ArgMinOptions ?
+ reinterpret_cast<const ArgMinOptionsT *>(value) : nullptr;
+ }
+ FakeQuantOptionsT *AsFakeQuantOptions() {
+ return type == BuiltinOptions_FakeQuantOptions ?
+ reinterpret_cast<FakeQuantOptionsT *>(value) : nullptr;
+ }
+ const FakeQuantOptionsT *AsFakeQuantOptions() const {
+ return type == BuiltinOptions_FakeQuantOptions ?
+ reinterpret_cast<const FakeQuantOptionsT *>(value) : nullptr;
+ }
};
bool VerifyBuiltinOptions(flatbuffers::Verifier &verifier, const void *obj, BuiltinOptions type);
@@ -1506,6 +1578,35 @@ inline const char *EnumNameLSHProjectionType(LSHProjectionType e) {
return EnumNamesLSHProjectionType()[index];
}
+enum FullyConnectedOptionsWeightsFormat {
+ FullyConnectedOptionsWeightsFormat_DEFAULT = 0,
+ FullyConnectedOptionsWeightsFormat_SHUFFLED4x16INT8 = 1,
+ FullyConnectedOptionsWeightsFormat_MIN = FullyConnectedOptionsWeightsFormat_DEFAULT,
+ FullyConnectedOptionsWeightsFormat_MAX = FullyConnectedOptionsWeightsFormat_SHUFFLED4x16INT8
+};
+
+inline FullyConnectedOptionsWeightsFormat (&EnumValuesFullyConnectedOptionsWeightsFormat())[2] {
+ static FullyConnectedOptionsWeightsFormat values[] = {
+ FullyConnectedOptionsWeightsFormat_DEFAULT,
+ FullyConnectedOptionsWeightsFormat_SHUFFLED4x16INT8
+ };
+ return values;
+}
+
+inline const char **EnumNamesFullyConnectedOptionsWeightsFormat() {
+ static const char *names[] = {
+ "DEFAULT",
+ "SHUFFLED4x16INT8",
+ nullptr
+ };
+ return names;
+}
+
+inline const char *EnumNameFullyConnectedOptionsWeightsFormat(FullyConnectedOptionsWeightsFormat e) {
+ const size_t index = static_cast<int>(e);
+ return EnumNamesFullyConnectedOptionsWeightsFormat()[index];
+}
+
enum LSTMKernelType {
LSTMKernelType_FULL = 0,
LSTMKernelType_BASIC = 1,
@@ -2558,22 +2659,29 @@ flatbuffers::Offset<BidirectionalSequenceRNNOptions> CreateBidirectionalSequence
struct FullyConnectedOptionsT : public flatbuffers::NativeTable {
typedef FullyConnectedOptions TableType;
ActivationFunctionType fused_activation_function;
+ FullyConnectedOptionsWeightsFormat weights_format;
FullyConnectedOptionsT()
- : fused_activation_function(ActivationFunctionType_NONE) {
+ : fused_activation_function(ActivationFunctionType_NONE),
+ weights_format(FullyConnectedOptionsWeightsFormat_DEFAULT) {
}
};
struct FullyConnectedOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
typedef FullyConnectedOptionsT NativeTableType;
enum {
- VT_FUSED_ACTIVATION_FUNCTION = 4
+ VT_FUSED_ACTIVATION_FUNCTION = 4,
+ VT_WEIGHTS_FORMAT = 6
};
ActivationFunctionType fused_activation_function() const {
return static_cast<ActivationFunctionType>(GetField<int8_t>(VT_FUSED_ACTIVATION_FUNCTION, 0));
}
+ FullyConnectedOptionsWeightsFormat weights_format() const {
+ return static_cast<FullyConnectedOptionsWeightsFormat>(GetField<int8_t>(VT_WEIGHTS_FORMAT, 0));
+ }
bool Verify(flatbuffers::Verifier &verifier) const {
return VerifyTableStart(verifier) &&
VerifyField<int8_t>(verifier, VT_FUSED_ACTIVATION_FUNCTION) &&
+ VerifyField<int8_t>(verifier, VT_WEIGHTS_FORMAT) &&
verifier.EndTable();
}
FullyConnectedOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
@@ -2587,6 +2695,9 @@ struct FullyConnectedOptionsBuilder {
void add_fused_activation_function(ActivationFunctionType fused_activation_function) {
fbb_.AddElement<int8_t>(FullyConnectedOptions::VT_FUSED_ACTIVATION_FUNCTION, static_cast<int8_t>(fused_activation_function), 0);
}
+ void add_weights_format(FullyConnectedOptionsWeightsFormat weights_format) {
+ fbb_.AddElement<int8_t>(FullyConnectedOptions::VT_WEIGHTS_FORMAT, static_cast<int8_t>(weights_format), 0);
+ }
explicit FullyConnectedOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
: fbb_(_fbb) {
start_ = fbb_.StartTable();
@@ -2601,8 +2712,10 @@ struct FullyConnectedOptionsBuilder {
inline flatbuffers::Offset<FullyConnectedOptions> CreateFullyConnectedOptions(
flatbuffers::FlatBufferBuilder &_fbb,
- ActivationFunctionType fused_activation_function = ActivationFunctionType_NONE) {
+ ActivationFunctionType fused_activation_function = ActivationFunctionType_NONE,
+ FullyConnectedOptionsWeightsFormat weights_format = FullyConnectedOptionsWeightsFormat_DEFAULT) {
FullyConnectedOptionsBuilder builder_(_fbb);
+ builder_.add_weights_format(weights_format);
builder_.add_fused_activation_function(fused_activation_function);
return builder_.Finish();
}
@@ -4421,6 +4534,60 @@ inline flatbuffers::Offset<ArgMaxOptions> CreateArgMaxOptions(
flatbuffers::Offset<ArgMaxOptions> CreateArgMaxOptions(flatbuffers::FlatBufferBuilder &_fbb, const ArgMaxOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+struct ArgMinOptionsT : public flatbuffers::NativeTable {
+ typedef ArgMinOptions TableType;
+ TensorType output_type;
+ ArgMinOptionsT()
+ : output_type(TensorType_FLOAT32) {
+ }
+};
+
+struct ArgMinOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+ typedef ArgMinOptionsT NativeTableType;
+ enum {
+ VT_OUTPUT_TYPE = 4
+ };
+ TensorType output_type() const {
+ return static_cast<TensorType>(GetField<int8_t>(VT_OUTPUT_TYPE, 0));
+ }
+ bool Verify(flatbuffers::Verifier &verifier) const {
+ return VerifyTableStart(verifier) &&
+ VerifyField<int8_t>(verifier, VT_OUTPUT_TYPE) &&
+ verifier.EndTable();
+ }
+ ArgMinOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ void UnPackTo(ArgMinOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ static flatbuffers::Offset<ArgMinOptions> Pack(flatbuffers::FlatBufferBuilder &_fbb, const ArgMinOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct ArgMinOptionsBuilder {
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ void add_output_type(TensorType output_type) {
+ fbb_.AddElement<int8_t>(ArgMinOptions::VT_OUTPUT_TYPE, static_cast<int8_t>(output_type), 0);
+ }
+ explicit ArgMinOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+ : fbb_(_fbb) {
+ start_ = fbb_.StartTable();
+ }
+ ArgMinOptionsBuilder &operator=(const ArgMinOptionsBuilder &);
+ flatbuffers::Offset<ArgMinOptions> Finish() {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<ArgMinOptions>(end);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<ArgMinOptions> CreateArgMinOptions(
+ flatbuffers::FlatBufferBuilder &_fbb,
+ TensorType output_type = TensorType_FLOAT32) {
+ ArgMinOptionsBuilder builder_(_fbb);
+ builder_.add_output_type(output_type);
+ return builder_.Finish();
+}
+
+flatbuffers::Offset<ArgMinOptions> CreateArgMinOptions(flatbuffers::FlatBufferBuilder &_fbb, const ArgMinOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
struct GreaterOptionsT : public flatbuffers::NativeTable {
typedef GreaterOptions TableType;
GreaterOptionsT() {
@@ -5007,6 +5174,136 @@ inline flatbuffers::Offset<ShapeOptions> CreateShapeOptions(
flatbuffers::Offset<ShapeOptions> CreateShapeOptions(flatbuffers::FlatBufferBuilder &_fbb, const ShapeOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+struct PowOptionsT : public flatbuffers::NativeTable {
+ typedef PowOptions TableType;
+ PowOptionsT() {
+ }
+};
+
+struct PowOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+ typedef PowOptionsT NativeTableType;
+ bool Verify(flatbuffers::Verifier &verifier) const {
+ return VerifyTableStart(verifier) &&
+ verifier.EndTable();
+ }
+ PowOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ void UnPackTo(PowOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ static flatbuffers::Offset<PowOptions> Pack(flatbuffers::FlatBufferBuilder &_fbb, const PowOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct PowOptionsBuilder {
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ explicit PowOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+ : fbb_(_fbb) {
+ start_ = fbb_.StartTable();
+ }
+ PowOptionsBuilder &operator=(const PowOptionsBuilder &);
+ flatbuffers::Offset<PowOptions> Finish() {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<PowOptions>(end);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<PowOptions> CreatePowOptions(
+ flatbuffers::FlatBufferBuilder &_fbb) {
+ PowOptionsBuilder builder_(_fbb);
+ return builder_.Finish();
+}
+
+flatbuffers::Offset<PowOptions> CreatePowOptions(flatbuffers::FlatBufferBuilder &_fbb, const PowOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct FakeQuantOptionsT : public flatbuffers::NativeTable {
+ typedef FakeQuantOptions TableType;
+ float min;
+ float max;
+ int32_t num_bits;
+ bool narrow_range;
+ FakeQuantOptionsT()
+ : min(0.0f),
+ max(0.0f),
+ num_bits(0),
+ narrow_range(false) {
+ }
+};
+
+struct FakeQuantOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+ typedef FakeQuantOptionsT NativeTableType;
+ enum {
+ VT_MIN = 4,
+ VT_MAX = 6,
+ VT_NUM_BITS = 8,
+ VT_NARROW_RANGE = 10
+ };
+ float min() const {
+ return GetField<float>(VT_MIN, 0.0f);
+ }
+ float max() const {
+ return GetField<float>(VT_MAX, 0.0f);
+ }
+ int32_t num_bits() const {
+ return GetField<int32_t>(VT_NUM_BITS, 0);
+ }
+ bool narrow_range() const {
+ return GetField<uint8_t>(VT_NARROW_RANGE, 0) != 0;
+ }
+ bool Verify(flatbuffers::Verifier &verifier) const {
+ return VerifyTableStart(verifier) &&
+ VerifyField<float>(verifier, VT_MIN) &&
+ VerifyField<float>(verifier, VT_MAX) &&
+ VerifyField<int32_t>(verifier, VT_NUM_BITS) &&
+ VerifyField<uint8_t>(verifier, VT_NARROW_RANGE) &&
+ verifier.EndTable();
+ }
+ FakeQuantOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ void UnPackTo(FakeQuantOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ static flatbuffers::Offset<FakeQuantOptions> Pack(flatbuffers::FlatBufferBuilder &_fbb, const FakeQuantOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct FakeQuantOptionsBuilder {
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ void add_min(float min) {
+ fbb_.AddElement<float>(FakeQuantOptions::VT_MIN, min, 0.0f);
+ }
+ void add_max(float max) {
+ fbb_.AddElement<float>(FakeQuantOptions::VT_MAX, max, 0.0f);
+ }
+ void add_num_bits(int32_t num_bits) {
+ fbb_.AddElement<int32_t>(FakeQuantOptions::VT_NUM_BITS, num_bits, 0);
+ }
+ void add_narrow_range(bool narrow_range) {
+ fbb_.AddElement<uint8_t>(FakeQuantOptions::VT_NARROW_RANGE, static_cast<uint8_t>(narrow_range), 0);
+ }
+ explicit FakeQuantOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+ : fbb_(_fbb) {
+ start_ = fbb_.StartTable();
+ }
+ FakeQuantOptionsBuilder &operator=(const FakeQuantOptionsBuilder &);
+ flatbuffers::Offset<FakeQuantOptions> Finish() {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<FakeQuantOptions>(end);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<FakeQuantOptions> CreateFakeQuantOptions(
+ flatbuffers::FlatBufferBuilder &_fbb,
+ float min = 0.0f,
+ float max = 0.0f,
+ int32_t num_bits = 0,
+ bool narrow_range = false) {
+ FakeQuantOptionsBuilder builder_(_fbb);
+ builder_.add_num_bits(num_bits);
+ builder_.add_max(max);
+ builder_.add_min(min);
+ builder_.add_narrow_range(narrow_range);
+ return builder_.Finish();
+}
+
+flatbuffers::Offset<FakeQuantOptions> CreateFakeQuantOptions(flatbuffers::FlatBufferBuilder &_fbb, const FakeQuantOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
struct OperatorCodeT : public flatbuffers::NativeTable {
typedef OperatorCode TableType;
BuiltinOperator builtin_code;
@@ -5305,6 +5602,15 @@ struct Operator FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
const ShapeOptions *builtin_options_as_ShapeOptions() const {
return builtin_options_type() == BuiltinOptions_ShapeOptions ? static_cast<const ShapeOptions *>(builtin_options()) : nullptr;
}
+ const PowOptions *builtin_options_as_PowOptions() const {
+ return builtin_options_type() == BuiltinOptions_PowOptions ? static_cast<const PowOptions *>(builtin_options()) : nullptr;
+ }
+ const ArgMinOptions *builtin_options_as_ArgMinOptions() const {
+ return builtin_options_type() == BuiltinOptions_ArgMinOptions ? static_cast<const ArgMinOptions *>(builtin_options()) : nullptr;
+ }
+ const FakeQuantOptions *builtin_options_as_FakeQuantOptions() const {
+ return builtin_options_type() == BuiltinOptions_FakeQuantOptions ? static_cast<const FakeQuantOptions *>(builtin_options()) : nullptr;
+ }
const flatbuffers::Vector<uint8_t> *custom_options() const {
return GetPointer<const flatbuffers::Vector<uint8_t> *>(VT_CUSTOM_OPTIONS);
}
@@ -5556,6 +5862,18 @@ template<> inline const ShapeOptions *Operator::builtin_options_as<ShapeOptions>
return builtin_options_as_ShapeOptions();
}
+template<> inline const PowOptions *Operator::builtin_options_as<PowOptions>() const {
+ return builtin_options_as_PowOptions();
+}
+
+template<> inline const ArgMinOptions *Operator::builtin_options_as<ArgMinOptions>() const {
+ return builtin_options_as_ArgMinOptions();
+}
+
+template<> inline const FakeQuantOptions *Operator::builtin_options_as<FakeQuantOptions>() const {
+ return builtin_options_as_FakeQuantOptions();
+}
+
struct OperatorBuilder {
flatbuffers::FlatBufferBuilder &fbb_;
flatbuffers::uoffset_t start_;
@@ -6335,6 +6653,7 @@ inline void FullyConnectedOptions::UnPackTo(FullyConnectedOptionsT *_o, const fl
(void)_o;
(void)_resolver;
{ auto _e = fused_activation_function(); _o->fused_activation_function = _e; };
+ { auto _e = weights_format(); _o->weights_format = _e; };
}
inline flatbuffers::Offset<FullyConnectedOptions> FullyConnectedOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const FullyConnectedOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
@@ -6346,9 +6665,11 @@ inline flatbuffers::Offset<FullyConnectedOptions> CreateFullyConnectedOptions(fl
(void)_o;
struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const FullyConnectedOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
auto _fused_activation_function = _o->fused_activation_function;
+ auto _weights_format = _o->weights_format;
return tflite::CreateFullyConnectedOptions(
_fbb,
- _fused_activation_function);
+ _fused_activation_function,
+ _weights_format);
}
inline SoftmaxOptionsT *SoftmaxOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
@@ -7218,6 +7539,32 @@ inline flatbuffers::Offset<ArgMaxOptions> CreateArgMaxOptions(flatbuffers::FlatB
_output_type);
}
+inline ArgMinOptionsT *ArgMinOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+ auto _o = new ArgMinOptionsT();
+ UnPackTo(_o, _resolver);
+ return _o;
+}
+
+inline void ArgMinOptions::UnPackTo(ArgMinOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+ (void)_o;
+ (void)_resolver;
+ { auto _e = output_type(); _o->output_type = _e; };
+}
+
+inline flatbuffers::Offset<ArgMinOptions> ArgMinOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ArgMinOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+ return CreateArgMinOptions(_fbb, _o, _rehasher);
+}
+
+inline flatbuffers::Offset<ArgMinOptions> CreateArgMinOptions(flatbuffers::FlatBufferBuilder &_fbb, const ArgMinOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+ (void)_rehasher;
+ (void)_o;
+ struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const ArgMinOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+ auto _output_type = _o->output_type;
+ return tflite::CreateArgMinOptions(
+ _fbb,
+ _output_type);
+}
+
inline GreaterOptionsT *GreaterOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
auto _o = new GreaterOptionsT();
UnPackTo(_o, _resolver);
@@ -7532,6 +7879,64 @@ inline flatbuffers::Offset<ShapeOptions> CreateShapeOptions(flatbuffers::FlatBuf
_out_type);
}
+inline PowOptionsT *PowOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+ auto _o = new PowOptionsT();
+ UnPackTo(_o, _resolver);
+ return _o;
+}
+
+inline void PowOptions::UnPackTo(PowOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+ (void)_o;
+ (void)_resolver;
+}
+
+inline flatbuffers::Offset<PowOptions> PowOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const PowOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+ return CreatePowOptions(_fbb, _o, _rehasher);
+}
+
+inline flatbuffers::Offset<PowOptions> CreatePowOptions(flatbuffers::FlatBufferBuilder &_fbb, const PowOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+ (void)_rehasher;
+ (void)_o;
+ struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const PowOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+ return tflite::CreatePowOptions(
+ _fbb);
+}
+
+inline FakeQuantOptionsT *FakeQuantOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+ auto _o = new FakeQuantOptionsT();
+ UnPackTo(_o, _resolver);
+ return _o;
+}
+
+inline void FakeQuantOptions::UnPackTo(FakeQuantOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+ (void)_o;
+ (void)_resolver;
+ { auto _e = min(); _o->min = _e; };
+ { auto _e = max(); _o->max = _e; };
+ { auto _e = num_bits(); _o->num_bits = _e; };
+ { auto _e = narrow_range(); _o->narrow_range = _e; };
+}
+
+inline flatbuffers::Offset<FakeQuantOptions> FakeQuantOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const FakeQuantOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+ return CreateFakeQuantOptions(_fbb, _o, _rehasher);
+}
+
+inline flatbuffers::Offset<FakeQuantOptions> CreateFakeQuantOptions(flatbuffers::FlatBufferBuilder &_fbb, const FakeQuantOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+ (void)_rehasher;
+ (void)_o;
+ struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const FakeQuantOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+ auto _min = _o->min;
+ auto _max = _o->max;
+ auto _num_bits = _o->num_bits;
+ auto _narrow_range = _o->narrow_range;
+ return tflite::CreateFakeQuantOptions(
+ _fbb,
+ _min,
+ _max,
+ _num_bits,
+ _narrow_range);
+}
+
inline OperatorCodeT *OperatorCode::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
auto _o = new OperatorCodeT();
UnPackTo(_o, _resolver);
@@ -7941,6 +8346,18 @@ inline bool VerifyBuiltinOptions(flatbuffers::Verifier &verifier, const void *ob
auto ptr = reinterpret_cast<const ShapeOptions *>(obj);
return verifier.VerifyTable(ptr);
}
+ case BuiltinOptions_PowOptions: {
+ auto ptr = reinterpret_cast<const PowOptions *>(obj);
+ return verifier.VerifyTable(ptr);
+ }
+ case BuiltinOptions_ArgMinOptions: {
+ auto ptr = reinterpret_cast<const ArgMinOptions *>(obj);
+ return verifier.VerifyTable(ptr);
+ }
+ case BuiltinOptions_FakeQuantOptions: {
+ auto ptr = reinterpret_cast<const FakeQuantOptions *>(obj);
+ return verifier.VerifyTable(ptr);
+ }
default: return false;
}
}
@@ -8179,6 +8596,18 @@ inline void *BuiltinOptionsUnion::UnPack(const void *obj, BuiltinOptions type, c
auto ptr = reinterpret_cast<const ShapeOptions *>(obj);
return ptr->UnPack(resolver);
}
+ case BuiltinOptions_PowOptions: {
+ auto ptr = reinterpret_cast<const PowOptions *>(obj);
+ return ptr->UnPack(resolver);
+ }
+ case BuiltinOptions_ArgMinOptions: {
+ auto ptr = reinterpret_cast<const ArgMinOptions *>(obj);
+ return ptr->UnPack(resolver);
+ }
+ case BuiltinOptions_FakeQuantOptions: {
+ auto ptr = reinterpret_cast<const FakeQuantOptions *>(obj);
+ return ptr->UnPack(resolver);
+ }
default: return nullptr;
}
}
@@ -8405,6 +8834,18 @@ inline flatbuffers::Offset<void> BuiltinOptionsUnion::Pack(flatbuffers::FlatBuff
auto ptr = reinterpret_cast<const ShapeOptionsT *>(value);
return CreateShapeOptions(_fbb, ptr, _rehasher).Union();
}
+ case BuiltinOptions_PowOptions: {
+ auto ptr = reinterpret_cast<const PowOptionsT *>(value);
+ return CreatePowOptions(_fbb, ptr, _rehasher).Union();
+ }
+ case BuiltinOptions_ArgMinOptions: {
+ auto ptr = reinterpret_cast<const ArgMinOptionsT *>(value);
+ return CreateArgMinOptions(_fbb, ptr, _rehasher).Union();
+ }
+ case BuiltinOptions_FakeQuantOptions: {
+ auto ptr = reinterpret_cast<const FakeQuantOptionsT *>(value);
+ return CreateFakeQuantOptions(_fbb, ptr, _rehasher).Union();
+ }
default: return 0;
}
}
@@ -8631,6 +9072,18 @@ inline BuiltinOptionsUnion::BuiltinOptionsUnion(const BuiltinOptionsUnion &u) FL
value = new ShapeOptionsT(*reinterpret_cast<ShapeOptionsT *>(u.value));
break;
}
+ case BuiltinOptions_PowOptions: {
+ value = new PowOptionsT(*reinterpret_cast<PowOptionsT *>(u.value));
+ break;
+ }
+ case BuiltinOptions_ArgMinOptions: {
+ value = new ArgMinOptionsT(*reinterpret_cast<ArgMinOptionsT *>(u.value));
+ break;
+ }
+ case BuiltinOptions_FakeQuantOptions: {
+ value = new FakeQuantOptionsT(*reinterpret_cast<FakeQuantOptionsT *>(u.value));
+ break;
+ }
default:
break;
}
@@ -8913,6 +9366,21 @@ inline void BuiltinOptionsUnion::Reset() {
delete ptr;
break;
}
+ case BuiltinOptions_PowOptions: {
+ auto ptr = reinterpret_cast<PowOptionsT *>(value);
+ delete ptr;
+ break;
+ }
+ case BuiltinOptions_ArgMinOptions: {
+ auto ptr = reinterpret_cast<ArgMinOptionsT *>(value);
+ delete ptr;
+ break;
+ }
+ case BuiltinOptions_FakeQuantOptions: {
+ auto ptr = reinterpret_cast<FakeQuantOptionsT *>(value);
+ delete ptr;
+ break;
+ }
default: break;
}
value = nullptr;
diff --git a/tensorflow/contrib/lite/testing/BUILD b/tensorflow/contrib/lite/testing/BUILD
index b823c97f38..789bc695f8 100644
--- a/tensorflow/contrib/lite/testing/BUILD
+++ b/tensorflow/contrib/lite/testing/BUILD
@@ -172,6 +172,7 @@ cc_test(
data = ["//tensorflow/contrib/lite:testdata/multi_add.bin"],
tags = [
"tflite_not_portable_android",
+ "tflite_not_portable_ios",
],
deps = [
":tflite_driver",
diff --git a/tensorflow/contrib/lite/testing/generate_examples.py b/tensorflow/contrib/lite/testing/generate_examples.py
index c4d2d7ca52..32d04c0717 100644
--- a/tensorflow/contrib/lite/testing/generate_examples.py
+++ b/tensorflow/contrib/lite/testing/generate_examples.py
@@ -94,8 +94,8 @@ KNOWN_BUGS = {
r"sigmoid.*input_shape=\[\]": "67645668",
# Concat doesn't work with a single input tensor
r"concat.*num_tensors=1": "67378344",
- # Transposition in MatMul is not supported.
- r"fully_connected.*transpose_.=True": "67586970",
+ # Transposition in MatMul is not fully supported.
+ "fully_connected.*transpose_a=True": "67586970",
# Softmax graphs are too complex.
r"softmax.*dim=0": "67749831",
# BatchToSpaceND only supports 4D tensors.
@@ -678,6 +678,55 @@ def make_relu6_tests(zip_path):
make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs)
+def make_prelu_tests(zip_path):
+ """Make a set of tests to do PReLU."""
+
+ test_parameters = [{
+ # The canonical case for image processing is having a 4D `input` (NHWC)
+ # and `shared_axes`=[1, 2], so the alpha parameter is per channel.
+ "input_shape": [[1, 10, 10, 3], [3, 3, 3, 3]],
+ "shared_axes": [[1, 2], [1]],
+ }]
+
+ def build_graph(parameters):
+ """Build the graph for the test case."""
+
+ input_tensor = tf.placeholder(
+ dtype=tf.float32, name="input", shape=parameters["input_shape"])
+ prelu = tf.keras.layers.PReLU(shared_axes=parameters["shared_axes"])
+ out = prelu(input_tensor)
+ return [input_tensor], [out]
+
+ def build_inputs(parameters, sess, inputs, outputs):
+ """Build the inputs for the test case."""
+
+ input_shape = parameters["input_shape"]
+ input_values = create_tensor_data(
+ np.float32, input_shape, min_value=-10, max_value=10)
+ shared_axes = parameters["shared_axes"]
+
+ alpha_shape = []
+ for dim in range(1, len(input_shape)):
+ alpha_shape.append(1 if dim in shared_axes else input_shape[dim])
+
+ alpha_values = create_tensor_data(np.float32, alpha_shape)
+
+ # There should be only 1 trainable variable tensor.
+ variables = tf.all_variables()
+ assert len(variables) == 1
+ sess.run(variables[0].assign(alpha_values))
+
+ return [input_values], sess.run(
+ outputs, feed_dict=dict(zip(inputs, [input_values])))
+
+ make_zip_of_tests(
+ zip_path,
+ test_parameters,
+ build_graph,
+ build_inputs,
+ use_frozen_graph=True)
+
+
# This function tests various TensorFLow functions that generates Const op,
# including `tf.ones`, `tf.zeros` and random functions.
def make_constant_tests(zip_path):
@@ -705,7 +754,7 @@ def make_constant_tests(zip_path):
def make_binary_op_tests(zip_path, binary_operator):
- """Make a set of tests to do add with and without broadcast."""
+ """Make a set of tests to do binary ops with and without broadcast."""
# These parameters are split because we don't support broadcasting.
test_parameters = [{
@@ -830,16 +879,24 @@ def make_reduce_tests(reduce_op):
def make_mean_tests(zip_path):
"""Make a set of tests to do mean."""
-
return make_reduce_tests(tf.reduce_mean)(zip_path)
def make_sum_tests(zip_path):
"""Make a set of tests to do sum."""
-
return make_reduce_tests(tf.reduce_sum)(zip_path)
+def make_reduce_prod_tests(zip_path):
+ """Make a set of tests to do prod."""
+ return make_reduce_tests(tf.reduce_prod)(zip_path)
+
+
+def make_reduce_max_tests(zip_path):
+ """Make a set of tests to do max."""
+ return make_reduce_tests(tf.reduce_max)(zip_path)
+
+
def make_exp_tests(zip_path):
"""Make a set of tests to do exp."""
@@ -990,6 +1047,10 @@ def make_mul_tests(zip_path):
make_binary_op_tests(zip_path, tf.multiply)
+def make_pow_tests(zip_path):
+ make_binary_op_tests(zip_path, tf.pow)
+
+
def make_gather_tests(zip_path):
"""Make a set of tests to do gather."""
@@ -1321,6 +1382,12 @@ def make_fully_connected_tests(zip_path):
"transpose_a": [False],
"transpose_b": [False],
"constant_filter": [True, False],
+ }, {
+ "shape1": [[40, 37]],
+ "shape2": [[40, 37]],
+ "transpose_a": [False],
+ "transpose_b": [True],
+ "constant_filter": [True, False],
}]
def build_graph(parameters):
@@ -2165,7 +2232,7 @@ def make_topk_tests(zip_path):
make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs)
-def make_arg_max_tests(zip_path):
+def make_arg_min_max_tests(zip_path):
"""Make a set of tests to do arg_max."""
test_parameters = [{
@@ -2173,6 +2240,7 @@ def make_arg_max_tests(zip_path):
"input_shape": [[1, 1, 1, 3], [2, 3, 4, 5], [2, 3, 3], [5, 5], [10]],
"output_type": [tf.int32, tf.int64],
"axis_is_last_dim": [True, False],
+ "is_arg_max": [True],
}]
def build_graph(parameters):
@@ -2185,7 +2253,10 @@ def make_arg_max_tests(zip_path):
axis = len(parameters["input_shape"]) - 1
else:
axis = random.randint(0, max(len(parameters["input_shape"]) - 2, 0))
- out = tf.arg_max(input_value, axis, output_type=parameters["output_type"])
+ if parameters["is_arg_max"]:
+ out = tf.arg_max(input_value, axis, output_type=parameters["output_type"])
+ else:
+ out = tf.arg_min(input_value, axis, output_type=parameters["output_type"])
return [input_value], [out]
def build_inputs(parameters, sess, inputs, outputs):
diff --git a/tensorflow/contrib/lite/testing/generate_testspec.cc b/tensorflow/contrib/lite/testing/generate_testspec.cc
index c0c861ff6d..c1092e4d25 100644
--- a/tensorflow/contrib/lite/testing/generate_testspec.cc
+++ b/tensorflow/contrib/lite/testing/generate_testspec.cc
@@ -25,7 +25,7 @@ namespace testing {
template <typename T>
void GenerateCsv(const std::vector<int>& shape, float min, float max,
string* out) {
- auto random_float = [](int min, int max) {
+ auto random_float = [](float min, float max) {
static unsigned int seed;
return min + (max - min) * static_cast<float>(rand_r(&seed)) / RAND_MAX;
};
@@ -37,16 +37,10 @@ void GenerateCsv(const std::vector<int>& shape, float min, float max,
*out = Join(data.data(), data.size(), ",");
}
-bool GenerateTestSpecFromTensorflowModel(
- std::iostream& stream, const string& tensorflow_model_path,
- const string& tflite_model_path, const std::vector<string>& input_layer,
+std::vector<string> GenerateInputValues(
+ const std::vector<string>& input_layer,
const std::vector<string>& input_layer_type,
- const std::vector<string>& input_layer_shape,
- const std::vector<string>& output_layer) {
- CHECK_EQ(input_layer.size(), input_layer_type.size());
- CHECK_EQ(input_layer.size(), input_layer_shape.size());
-
- // Generate inputs.
+ const std::vector<string>& input_layer_shape) {
std::vector<string> input_values;
input_values.resize(input_layer.size());
for (int i = 0; i < input_layer.size(); i++) {
@@ -73,9 +67,22 @@ bool GenerateTestSpecFromTensorflowModel(
default:
fprintf(stderr, "Unsupported type %d (%s) when generating testspec.\n",
type, input_layer_type[i].c_str());
- return false;
+ input_values.clear();
+ return input_values;
}
}
+ return input_values;
+}
+
+bool GenerateTestSpecFromTensorflowModel(
+ std::iostream& stream, const string& tensorflow_model_path,
+ const string& tflite_model_path, int num_invocations,
+ const std::vector<string>& input_layer,
+ const std::vector<string>& input_layer_type,
+ const std::vector<string>& input_layer_shape,
+ const std::vector<string>& output_layer) {
+ CHECK_EQ(input_layer.size(), input_layer_type.size());
+ CHECK_EQ(input_layer.size(), input_layer_shape.size());
// Invoke tensorflow model.
TfDriver runner(input_layer, input_layer_type, input_layer_shape,
@@ -91,39 +98,51 @@ bool GenerateTestSpecFromTensorflowModel(
return false;
}
- for (int i = 0; i < input_values.size(); i++) {
- runner.SetInput(i, input_values[i]);
- if (!runner.IsValid()) {
- cerr << runner.GetErrorMessage() << endl;
- return false;
- }
- }
-
- runner.Invoke();
- if (!runner.IsValid()) {
- cerr << runner.GetErrorMessage() << endl;
- return false;
- }
-
- // Write test spec.
+ // Write first part of test spec, defining model and input shapes.
stream << "load_model: " << tflite_model_path << "\n";
stream << "reshape {\n";
for (const auto& shape : input_layer_shape) {
stream << " input: \"" << shape << "\"\n";
}
stream << "}\n";
- stream << "invoke {\n";
- for (const auto& value : input_values) {
- stream << " input: \"" << value << "\"\n";
- }
- for (int i = 0; i < output_layer.size(); i++) {
- stream << " output: \"" << runner.ReadOutput(i) << "\"\n";
+
+ // Generate inputs.
+ for (int i = 0; i < num_invocations; ++i) {
+ // Note that the input values are random, so each invocation will have a
+ // different set.
+ std::vector<string> input_values =
+ GenerateInputValues(input_layer, input_layer_type, input_layer_shape);
+ if (input_values.empty()) return false;
+
+ // Run TensorFlow.
+ for (int j = 0; j < input_values.size(); j++) {
+ runner.SetInput(j, input_values[j]);
+ if (!runner.IsValid()) {
+ cerr << runner.GetErrorMessage() << endl;
+ return false;
+ }
+ }
+
+ runner.Invoke();
if (!runner.IsValid()) {
cerr << runner.GetErrorMessage() << endl;
return false;
}
+
+ // Write second part of test spec, with inputs and outputs.
+ stream << "invoke {\n";
+ for (const auto& value : input_values) {
+ stream << " input: \"" << value << "\"\n";
+ }
+ for (int j = 0; j < output_layer.size(); j++) {
+ stream << " output: \"" << runner.ReadOutput(j) << "\"\n";
+ if (!runner.IsValid()) {
+ cerr << runner.GetErrorMessage() << endl;
+ return false;
+ }
+ }
+ stream << "}\n";
}
- stream << "}\n";
return true;
}
diff --git a/tensorflow/contrib/lite/testing/generate_testspec.h b/tensorflow/contrib/lite/testing/generate_testspec.h
index 6e31a853c3..bfaf5e7ec8 100644
--- a/tensorflow/contrib/lite/testing/generate_testspec.h
+++ b/tensorflow/contrib/lite/testing/generate_testspec.h
@@ -30,13 +30,15 @@ namespace testing {
// stream: mutable iostream that contains the contents of test spec.
// tensorflow_model_path: path to TensorFlow model.
// tflite_model_path: path to tflite_model_path that the test spec runs
+// num_invocations: how many pairs of inputs and outputs will be generated.
// against. input_layer: names of input tensors. Example: input1
// input_layer_type: datatypes of input tensors. Example: float
// input_layer_shape: shapes of input tensors, separated by comma. example:
// 1,3,4 output_layer: names of output tensors. Example: output
bool GenerateTestSpecFromTensorflowModel(
std::iostream& stream, const string& tensorflow_model_path,
- const string& tflite_model_path, const std::vector<string>& input_layer,
+ const string& tflite_model_path, int num_invocations,
+ const std::vector<string>& input_layer,
const std::vector<string>& input_layer_type,
const std::vector<string>& input_layer_shape,
const std::vector<string>& output_layer);
diff --git a/tensorflow/contrib/lite/testing/generated_examples_zip_test.cc b/tensorflow/contrib/lite/testing/generated_examples_zip_test.cc
index 8a59d756f8..ba36017baf 100644
--- a/tensorflow/contrib/lite/testing/generated_examples_zip_test.cc
+++ b/tensorflow/contrib/lite/testing/generated_examples_zip_test.cc
@@ -42,6 +42,7 @@ string* FLAGS_unzip_binary_path = new string("/usr/bin/unzip");
string* FLAGS_unzip_binary_path = new string("/system/bin/unzip");
#endif
bool FLAGS_use_nnapi = false;
+bool FLAGS_ignore_unsupported_nnapi = false;
} // namespace
// TensorFlow system environment for file system called.
@@ -52,12 +53,7 @@ tensorflow::Env* env = tensorflow::Env::Default();
// Key is a substring of the test name and value is a bug number.
// TODO(ahentz): make sure we clean this list up frequently.
std::map<string, string> kBrokenTests = {
- // Add only supports float32. (and "constant" tests use Add)
- {R"(^\/add_a.*int32)", "68808744"},
- {R"(^\/constant.*int32)", "68808744"},
- {R"(^\/mul.*int32)", "68808744"},
{R"(^\/div.*int32)", "68808744"},
- {R"(^\/sub.*int32)", "68808744"},
// Pad and PadV2 only supports 4D tensors.
{R"(^\/pad.*,input_shape=\[.,.\],paddings=\[\[.,.\],\[.,.\]\])",
@@ -99,11 +95,12 @@ std::map<string, string> kBrokenTests = {
{R"(^\/gather.*axis=1)", "76910444"},
// No support for arbitrary dimensions in ArgMax.
- {R"(^\/arg_max.*axis_is_last_dim=False.*input_shape=\[.,.,.,.\])",
+ {R"(^\/arg_min_max.*axis_is_last_dim=False.*input_shape=\[.,.,.,.\])",
"77546240"},
- {R"(^\/arg_max.*axis_is_last_dim=False.*input_shape=\[.,.,.\])",
+ {R"(^\/arg_min_max.*axis_is_last_dim=False.*input_shape=\[.,.,.\])",
+ "77546240"},
+ {R"(^\/arg_min_max.*axis_is_last_dim=False.*input_shape=\[.,.\])",
"77546240"},
- {R"(^\/arg_max.*axis_is_last_dim=False.*input_shape=\[.,.\])", "77546240"},
};
// Allows test data to be unzipped into a temporary directory and makes
@@ -228,16 +225,21 @@ TEST_P(OpsTest, RunZipTests) {
}
bool result = tflite::testing::ParseAndRunTests(&tflite_stream, &test_driver);
+ string message = test_driver.GetErrorMessage();
if (bug_number.empty()) {
- EXPECT_TRUE(result) << test_driver.GetErrorMessage();
+ if (FLAGS_use_nnapi && FLAGS_ignore_unsupported_nnapi && !result) {
+ EXPECT_EQ(message, string("Failed to invoke interpreter")) << message;
+ } else {
+ EXPECT_TRUE(result) << message;
+ }
} else {
if (FLAGS_ignore_known_bugs) {
EXPECT_FALSE(result) << "Test was expected to fail but is now passing; "
"you can mark http://b/"
<< bug_number << " as fixed! Yay!";
} else {
- EXPECT_TRUE(result) << test_driver.GetErrorMessage()
- << ": Possibly due to http://b/" << bug_number;
+ EXPECT_TRUE(result) << message << ": Possibly due to http://b/"
+ << bug_number;
}
}
}
@@ -280,8 +282,11 @@ int main(int argc, char** argv) {
tflite::testing::FLAGS_unzip_binary_path,
"Required: Location of a suitable unzip binary."),
tensorflow::Flag("use_nnapi", &tflite::testing::FLAGS_use_nnapi,
- "Whether to enable the NNAPI delegate")};
-
+ "Whether to enable the NNAPI delegate"),
+ tensorflow::Flag("ignore_unsupported_nnapi",
+ &tflite::testing::FLAGS_ignore_unsupported_nnapi,
+ "Don't fail tests just because delegation to NNAPI "
+ "is not possible")};
bool success = tensorflow::Flags::Parse(&argc, argv, flags);
if (!success || (argc == 2 && !strcmp(argv[1], "--helpfull"))) {
fprintf(stderr, "%s", tensorflow::Flags::Usage(argv[0], flags).c_str());
diff --git a/tensorflow/contrib/lite/testing/tflite_diff_example_test.cc b/tensorflow/contrib/lite/testing/tflite_diff_example_test.cc
index 5afa0f800c..f2c49fe389 100644
--- a/tensorflow/contrib/lite/testing/tflite_diff_example_test.cc
+++ b/tensorflow/contrib/lite/testing/tflite_diff_example_test.cc
@@ -20,12 +20,29 @@ int main(int argc, char** argv) {
::tflite::testing::DiffOptions options =
::tflite::testing::ParseTfliteDiffFlags(&argc, argv);
if (options.tensorflow_model.empty()) return 1;
+
int failure_count = 0;
- for (int i = 0; i < 100; i++) {
- if (!tflite::testing::RunDiffTest(options)) {
+ for (int i = 0; i < options.num_runs_per_pass; i++) {
+ if (!tflite::testing::RunDiffTest(options, /*num_invocations=*/1)) {
++failure_count;
}
}
- fprintf(stderr, "Num errors: %d\n", failure_count);
+ int failures_in_first_pass = failure_count;
+
+ if (failure_count == 0) {
+ // Let's try again with num_invocations > 1 to make sure we can do multiple
+ // invocations without resetting the interpreter.
+ for (int i = 0; i < options.num_runs_per_pass; i++) {
+ if (!tflite::testing::RunDiffTest(options, /*num_invocations=*/2)) {
+ ++failure_count;
+ }
+ }
+ }
+
+ fprintf(stderr, "Num errors in single-inference pass: %d\n",
+ failures_in_first_pass);
+ fprintf(stderr, "Num errors in multi-inference pass : %d\n",
+ failure_count - failures_in_first_pass);
+
return failure_count != 0 ? 1 : 0;
}
diff --git a/tensorflow/contrib/lite/testing/tflite_diff_flags.h b/tensorflow/contrib/lite/testing/tflite_diff_flags.h
index 706108ed73..7a57e8d3fb 100644
--- a/tensorflow/contrib/lite/testing/tflite_diff_flags.h
+++ b/tensorflow/contrib/lite/testing/tflite_diff_flags.h
@@ -30,6 +30,7 @@ DiffOptions ParseTfliteDiffFlags(int* argc, char** argv) {
string input_layer_type;
string input_layer_shape;
string output_layer;
+ int32_t num_runs_per_pass = 100;
} values;
std::vector<tensorflow::Flag> flags = {
@@ -49,6 +50,8 @@ DiffOptions ParseTfliteDiffFlags(int* argc, char** argv) {
tensorflow::Flag("output_layer", &values.output_layer,
"Names of output tensors, separated by comma. Example "
"output_1,output_2"),
+ tensorflow::Flag("num_runs_per_pass", &values.num_runs_per_pass,
+ "Number of full runs in each pass."),
};
bool no_inputs = *argc == 1;
@@ -63,7 +66,8 @@ DiffOptions ParseTfliteDiffFlags(int* argc, char** argv) {
Split<string>(values.input_layer, ","),
Split<string>(values.input_layer_type, ","),
Split<string>(values.input_layer_shape, ":"),
- Split<string>(values.output_layer, ",")};
+ Split<string>(values.output_layer, ","),
+ values.num_runs_per_pass};
}
} // namespace testing
diff --git a/tensorflow/contrib/lite/testing/tflite_diff_util.cc b/tensorflow/contrib/lite/testing/tflite_diff_util.cc
index f601d3752d..19f34c0a51 100644
--- a/tensorflow/contrib/lite/testing/tflite_diff_util.cc
+++ b/tensorflow/contrib/lite/testing/tflite_diff_util.cc
@@ -25,13 +25,14 @@ limitations under the License.
namespace tflite {
namespace testing {
-bool RunDiffTest(const DiffOptions& options) {
+bool RunDiffTest(const DiffOptions& options, int num_invocations) {
std::stringstream tflite_stream;
if (!GenerateTestSpecFromTensorflowModel(
tflite_stream, options.tensorflow_model, options.tflite_model,
- options.input_layer, options.input_layer_type,
- options.input_layer_shape, options.output_layer))
+ num_invocations, options.input_layer, options.input_layer_type,
+ options.input_layer_shape, options.output_layer)) {
return false;
+ }
TfLiteDriver tflite_driver(/*use_nnapi=*/true);
tflite_driver.LoadModel(options.tflite_model);
return tflite::testing::ParseAndRunTests(&tflite_stream, &tflite_driver);
diff --git a/tensorflow/contrib/lite/testing/tflite_diff_util.h b/tensorflow/contrib/lite/testing/tflite_diff_util.h
index 326fa6c3e2..4ab2f230fd 100644
--- a/tensorflow/contrib/lite/testing/tflite_diff_util.h
+++ b/tensorflow/contrib/lite/testing/tflite_diff_util.h
@@ -40,10 +40,14 @@ struct DiffOptions {
// Names of output tensors.
// Example output_1,output_2
std::vector<string> output_layer;
+ // Number of full runs (from building interpreter to checking outputs) in
+ // each of the passes. The first pass has a single inference, while the
+ // second pass does multiple inferences back to back.
+ int num_runs_per_pass;
};
// Run a single TensorFLow Lite diff test with a given options.
-bool RunDiffTest(const DiffOptions& options);
+bool RunDiffTest(const DiffOptions& options, int num_invocations);
} // namespace testing
} // namespace tflite
diff --git a/tensorflow/contrib/lite/toco/BUILD b/tensorflow/contrib/lite/toco/BUILD
index be102faa4c..bbce93f61a 100644
--- a/tensorflow/contrib/lite/toco/BUILD
+++ b/tensorflow/contrib/lite/toco/BUILD
@@ -143,7 +143,6 @@ cc_library(
":toco_graphviz_dump_options",
":toco_port",
":types_proto_cc",
- "//tensorflow/cc/saved_model:tag_constants",
"//tensorflow/core:framework_internal",
"//tensorflow/core:lib",
"@com_google_absl//absl/strings",
@@ -170,41 +169,6 @@ cc_library(
)
cc_library(
- name = "toco_saved_model",
- srcs = [
- "toco_saved_model.cc",
- ],
- hdrs = [
- "toco_saved_model.h",
- ],
- visibility = ["//visibility:public"],
- deps = [
- ":model_cmdline_flags",
- ":model_flags_proto_cc",
- ":toco_flags_proto_cc",
- ":types_proto_cc",
- "//tensorflow/cc/tools:freeze_saved_model",
- "//tensorflow/core:protos_all_cc",
- "@com_google_absl//absl/strings",
- ],
-)
-
-tf_cc_test(
- name = "toco_saved_model_test",
- srcs = ["toco_saved_model_test.cc"],
- deps = [
- ":model_cmdline_flags",
- ":toco_cmdline_flags",
- ":toco_saved_model",
- "//tensorflow/cc:cc_ops",
- "//tensorflow/cc:scope",
- "//tensorflow/core:test",
- "@com_google_absl//absl/strings",
- "@com_google_googletest//:gtest_main",
- ],
-)
-
-cc_library(
name = "graph_transformations",
srcs = [
"graph_transformations/convert_expanddims_to_reshape.cc",
@@ -238,6 +202,7 @@ cc_library(
"graph_transformations/lstm_utils.cc",
"graph_transformations/make_initial_dequantize_operator.cc",
"graph_transformations/merge_reshape_into_preceding_transpose.cc",
+ "graph_transformations/move_binary_operator_before_reshape.cc",
"graph_transformations/propagate_activation_function_into_constants.cc",
"graph_transformations/propagate_array_data_types.cc",
"graph_transformations/propagate_default_min_max.cc",
@@ -247,7 +212,7 @@ cc_library(
"graph_transformations/quantization_util.h",
"graph_transformations/quantize.cc",
"graph_transformations/quantize_weights.cc",
- "graph_transformations/read_fake_quant_min_max.cc",
+ "graph_transformations/read_array_minmax_and_narrow_range_from_fake_quant.cc",
"graph_transformations/remove_final_dequantize_op.cc",
"graph_transformations/remove_tensorflow_assert.cc",
"graph_transformations/remove_tensorflow_identity.cc",
@@ -280,10 +245,11 @@ cc_library(
"graph_transformations/resolve_constant_strided_slice.cc",
"graph_transformations/resolve_constant_transpose.cc",
"graph_transformations/resolve_constant_unary.cc",
- "graph_transformations/resolve_mean_attributes.cc",
+ "graph_transformations/resolve_fake_quant_args_from_vars.cc",
"graph_transformations/resolve_multiply_by_zero.cc",
"graph_transformations/resolve_pad_attributes.cc",
"graph_transformations/resolve_padv2_attributes.cc",
+ "graph_transformations/resolve_reduce_attributes.cc",
"graph_transformations/resolve_reorder_axes.cc",
"graph_transformations/resolve_reshape_attributes.cc",
"graph_transformations/resolve_slice_attributes.cc",
@@ -431,7 +397,6 @@ tf_cc_binary(
":toco_cmdline_flags",
":toco_flags_proto_cc",
":toco_port",
- ":toco_saved_model",
":toco_tooling",
":types_proto_cc",
"//tensorflow/core:lib",
diff --git a/tensorflow/contrib/lite/toco/README.md b/tensorflow/contrib/lite/toco/README.md
index ee83c7a6e3..2db6a627ab 100644
--- a/tensorflow/contrib/lite/toco/README.md
+++ b/tensorflow/contrib/lite/toco/README.md
@@ -17,11 +17,12 @@ Usage information is given in these documents:
Once an application developer has a trained TensorFlow model, TOCO will accept
that model and generate a TensorFlow Lite
[FlatBuffer](https://google.github.io/flatbuffers/) file. TOCO currently supports
-[SavedModels](https://www.tensorflow.org/guide/saved_model#using_savedmodel_with_estimators)
-and frozen graphs (models generated via
-[freeze_graph.py](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/tools/freeze_graph.py)).
-The TensorFlow Lite FlatBuffer file can be shipped to client devices, generally
-mobile devices, where the TensorFlow Lite interpreter handles them on-device.
-This flow is represented in the diagram below.
+[SavedModels](https://www.tensorflow.org/guide/saved_model#using_savedmodel_with_estimators),
+frozen graphs (models generated via
+[freeze_graph.py](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/tools/freeze_graph.py)),
+and `tf.Keras` model files. The TensorFlow Lite FlatBuffer file can be shipped
+to client devices, generally mobile devices, where the TensorFlow Lite
+interpreter handles them on-device. This flow is represented in the diagram
+below.
![drawing](g3doc/toco_landscape.svg)
diff --git a/tensorflow/contrib/lite/toco/args.h b/tensorflow/contrib/lite/toco/args.h
index 9f5ca66d05..aef35ad490 100644
--- a/tensorflow/contrib/lite/toco/args.h
+++ b/tensorflow/contrib/lite/toco/args.h
@@ -21,13 +21,13 @@ limitations under the License.
#include <functional>
#include <unordered_map>
#include <vector>
+#include "tensorflow/contrib/lite/toco/toco_port.h"
#if defined(PLATFORM_GOOGLE)
#include "strings/split.h"
+#include "strings/strip.h"
#endif
#include "absl/strings/numbers.h"
#include "absl/strings/str_split.h"
-#include "tensorflow/cc/saved_model/tag_constants.h"
-#include "tensorflow/contrib/lite/toco/toco_port.h"
#include "tensorflow/contrib/lite/toco/toco_types.h"
namespace toco {
@@ -145,8 +145,10 @@ class Arg<toco::StringMapList> final {
}
string outer_member_copy = outer_member;
absl::StripAsciiWhitespace(&outer_member);
- if (!TryStripPrefixString(outer_member, "{", &outer_member)) return false;
- if (!TryStripSuffixString(outer_member, "}", &outer_member)) return false;
+ if (!strings::TryStripPrefixString(outer_member, "{", &outer_member))
+ return false;
+ if (!strings::TryStripSuffixString(outer_member, "}", &outer_member))
+ return false;
const std::vector<string> inner_fields_vector =
absl::StrSplit(outer_member, ',');
@@ -223,7 +225,7 @@ struct ParsedTocoFlags {
Arg<string> output_file;
Arg<string> input_format = Arg<string>("TENSORFLOW_GRAPHDEF");
Arg<string> output_format = Arg<string>("TFLITE");
- Arg<string> savedmodel_tagset = Arg<string>(tensorflow::kSavedModelTagServe);
+ Arg<string> savedmodel_tagset;
// TODO(aselle): command_line_flags doesn't support doubles
Arg<float> default_ranges_min = Arg<float>(0.);
Arg<float> default_ranges_max = Arg<float>(0.);
diff --git a/tensorflow/contrib/lite/toco/export_tensorflow.cc b/tensorflow/contrib/lite/toco/export_tensorflow.cc
index 6b78f1c05e..17375d19be 100644
--- a/tensorflow/contrib/lite/toco/export_tensorflow.cc
+++ b/tensorflow/contrib/lite/toco/export_tensorflow.cc
@@ -145,7 +145,7 @@ void ConvertFloatTensorConst(const string& name, const Shape& input_shape,
if (HasAlreadyExportedConst(name, *tensorflow_graph)) {
return;
}
- auto* const_op = tensorflow_graph->add_node();
+ tensorflow::NodeDef* const_op = tensorflow_graph->add_node();
const_op->set_op("Const");
const_op->set_name(name);
(*const_op->mutable_attr())["dtype"].set_type(DT_FLOAT);
@@ -162,7 +162,7 @@ void ConvertFloatTensorConst(const string& name, const Shape& input_shape,
if (HasAlreadyExportedConst(name, *tensorflow_graph)) {
return;
}
- auto* const_op = tensorflow_graph->add_node();
+ tensorflow::NodeDef* const_op = tensorflow_graph->add_node();
const_op->set_op("Const");
const_op->set_name(name);
(*const_op->mutable_attr())["dtype"].set_type(DT_FLOAT);
@@ -178,7 +178,7 @@ void ConvertFloatTensorConst(const Model& model, const string& name,
if (HasAlreadyExportedConst(name, *tensorflow_graph)) {
return;
}
- auto* const_op = tensorflow_graph->add_node();
+ tensorflow::NodeDef* const_op = tensorflow_graph->add_node();
const_op->set_op("Const");
const_op->set_name(name);
(*const_op->mutable_attr())["dtype"].set_type(DT_FLOAT);
@@ -199,7 +199,7 @@ void ConvertFloatTensorConst(const Model& model, const string& name,
if (HasAlreadyExportedConst(name, *tensorflow_graph)) {
return;
}
- auto* const_op = tensorflow_graph->add_node();
+ tensorflow::NodeDef* const_op = tensorflow_graph->add_node();
const_op->set_op("Const");
const_op->set_name(name);
(*const_op->mutable_attr())["dtype"].set_type(DT_FLOAT);
@@ -222,7 +222,7 @@ void ConvertIntTensorConst(const Model& model, const string& name,
}
CHECK(model.HasArray(name));
const auto& array = model.GetArray(name);
- auto* const_op = tensorflow_graph->add_node();
+ tensorflow::NodeDef* const_op = tensorflow_graph->add_node();
const_op->set_op("Const");
const_op->set_name(name);
(*const_op->mutable_attr())["dtype"].set_type(DT_INT32);
@@ -245,7 +245,7 @@ void CreateIntTensorConst(const string& name, const std::vector<int32>& data,
if (HasAlreadyExportedConst(name, *tensorflow_graph)) {
return;
}
- auto* const_op = tensorflow_graph->add_node();
+ tensorflow::NodeDef* const_op = tensorflow_graph->add_node();
const_op->set_op("Const");
const_op->set_name(name);
(*const_op->mutable_attr())["dtype"].set_type(DT_INT32);
@@ -268,7 +268,7 @@ void CreateMatrixShapeTensorConst(const string& name, int rows, int cols,
if (HasAlreadyExportedConst(name, *tensorflow_graph)) {
return;
}
- auto* const_op = tensorflow_graph->add_node();
+ tensorflow::NodeDef* const_op = tensorflow_graph->add_node();
const_op->set_op("Const");
const_op->set_name(name);
(*const_op->mutable_attr())["dtype"].set_type(DT_INT32);
@@ -286,7 +286,7 @@ void CreateDummyConcatDimTensorConst(const string& name, int dim,
if (HasAlreadyExportedConst(name, *tensorflow_graph)) {
return;
}
- auto* const_op = tensorflow_graph->add_node();
+ tensorflow::NodeDef* const_op = tensorflow_graph->add_node();
const_op->set_op("Const");
const_op->set_name(name);
(*const_op->mutable_attr())["dtype"].set_type(DT_INT32);
@@ -301,7 +301,7 @@ void CreateReshapeShapeTensorConst(const string& name,
if (HasAlreadyExportedConst(name, *tensorflow_graph)) {
return;
}
- auto* const_op = tensorflow_graph->add_node();
+ tensorflow::NodeDef* const_op = tensorflow_graph->add_node();
const_op->set_op("Const");
const_op->set_name(name);
(*const_op->mutable_attr())["dtype"].set_type(DT_INT32);
@@ -341,7 +341,7 @@ void ConvertConvOperator(const Model& model, const ConvOperator& src_op,
conv_output += "/conv";
}
- auto* conv2d_op = tensorflow_graph->add_node();
+ tensorflow::NodeDef* conv2d_op = tensorflow_graph->add_node();
conv2d_op->set_op("Conv2D");
conv2d_op->set_name(conv_output);
*conv2d_op->add_input() = src_op.inputs[0];
@@ -377,7 +377,7 @@ void ConvertConvOperator(const Model& model, const ConvOperator& src_op,
(*conv2d_op->mutable_attr())["padding"].set_s(padding);
if (has_bias) {
- auto* biasadd_op = tensorflow_graph->add_node();
+ tensorflow::NodeDef* biasadd_op = tensorflow_graph->add_node();
biasadd_op->set_op("BiasAdd");
biasadd_op->set_name(src_op.outputs[0]);
biasadd_op->add_input(conv_output);
@@ -409,7 +409,7 @@ void ConvertDepthwiseConvOperator(const Model& model,
conv_output += "/conv";
}
- auto* dc2d_op = tensorflow_graph->add_node();
+ tensorflow::NodeDef* dc2d_op = tensorflow_graph->add_node();
dc2d_op->set_op("DepthwiseConv2dNative");
dc2d_op->set_name(conv_output);
*dc2d_op->add_input() = src_op.inputs[0];
@@ -457,7 +457,7 @@ void ConvertDepthwiseConvOperator(const Model& model,
(*dc2d_op->mutable_attr())["padding"].set_s(padding);
if (has_bias) {
- auto* biasadd_op = tensorflow_graph->add_node();
+ tensorflow::NodeDef* biasadd_op = tensorflow_graph->add_node();
biasadd_op->set_op("BiasAdd");
biasadd_op->set_name(src_op.outputs[0]);
biasadd_op->add_input(conv_output);
@@ -482,7 +482,7 @@ void ConvertDepthwiseConvOperator(const Model& model,
void ConvertTransposeConvOperator(const Model& model,
const TransposeConvOperator& src_op,
GraphDef* tensorflow_graph) {
- auto* conv2d_op = tensorflow_graph->add_node();
+ tensorflow::NodeDef* conv2d_op = tensorflow_graph->add_node();
conv2d_op->set_op("Conv2DBackpropInput");
conv2d_op->set_name(src_op.outputs[0]);
*conv2d_op->add_input() = src_op.inputs[0];
@@ -514,7 +514,7 @@ void ConvertTransposeConvOperator(const Model& model,
void ConvertDepthToSpaceOperator(const Model& model,
const DepthToSpaceOperator& src_op,
GraphDef* tensorflow_graph) {
- auto* op = tensorflow_graph->add_node();
+ tensorflow::NodeDef* op = tensorflow_graph->add_node();
op->set_op("DepthToSpace");
op->set_name(src_op.outputs[0]);
*op->add_input() = src_op.inputs[0];
@@ -525,7 +525,7 @@ void ConvertDepthToSpaceOperator(const Model& model,
void ConvertSpaceToDepthOperator(const Model& model,
const SpaceToDepthOperator& src_op,
GraphDef* tensorflow_graph) {
- auto* op = tensorflow_graph->add_node();
+ tensorflow::NodeDef* op = tensorflow_graph->add_node();
op->set_op("SpaceToDepth");
op->set_name(src_op.outputs[0]);
*op->add_input() = src_op.inputs[0];
@@ -546,7 +546,7 @@ void ConvertFullyConnectedOperator(const Model& model,
CHECK_EQ(fc_weights_shape.dimensions_count(), 2);
CreateMatrixShapeTensorConst(reshape_shape, fc_weights_shape.dims(1), -1,
tensorflow_graph);
- auto* reshape_op = tensorflow_graph->add_node();
+ tensorflow::NodeDef* reshape_op = tensorflow_graph->add_node();
reshape_op->set_op("Reshape");
reshape_op->set_name(reshape_output);
reshape_op->add_input(src_op.inputs[0]);
@@ -568,7 +568,7 @@ void ConvertFullyConnectedOperator(const Model& model,
const string transpose_perm =
AvailableArrayName(model, transpose_output + "/perm");
CreateIntTensorConst(transpose_perm, {1, 0}, {2}, tensorflow_graph);
- auto transpose_op = tensorflow_graph->add_node();
+ tensorflow::NodeDef* transpose_op = tensorflow_graph->add_node();
transpose_op->set_op("Transpose");
transpose_op->set_name(transpose_output);
*transpose_op->add_input() = src_op.inputs[1];
@@ -577,7 +577,7 @@ void ConvertFullyConnectedOperator(const Model& model,
GetTensorFlowDataType(model, src_op.inputs[1]));
(*transpose_op->mutable_attr())["Tperm"].set_type(DT_INT32);
- auto* matmul_op = tensorflow_graph->add_node();
+ tensorflow::NodeDef* matmul_op = tensorflow_graph->add_node();
matmul_op->set_op("MatMul");
matmul_op->set_name(matmul_output);
*matmul_op->add_input() = reshape_output;
@@ -590,7 +590,7 @@ void ConvertFullyConnectedOperator(const Model& model,
// Add the bias, if it exists.
if (has_bias) {
- auto* biasadd_op = tensorflow_graph->add_node();
+ tensorflow::NodeDef* biasadd_op = tensorflow_graph->add_node();
biasadd_op->set_op("BiasAdd");
biasadd_op->set_name(src_op.outputs[0]);
biasadd_op->add_input(matmul_output);
@@ -615,7 +615,7 @@ void ConvertFullyConnectedOperator(const Model& model,
void ConvertAddOperator(const Model& model, const AddOperator& src_op,
GraphDef* tensorflow_graph) {
- auto* add_op = tensorflow_graph->add_node();
+ tensorflow::NodeDef* add_op = tensorflow_graph->add_node();
add_op->set_op("Add");
add_op->set_name(src_op.outputs[0]);
CHECK_EQ(src_op.inputs.size(), 2);
@@ -626,7 +626,7 @@ void ConvertAddOperator(const Model& model, const AddOperator& src_op,
void ConvertAddNOperator(const Model& model, const AddNOperator& src_op,
GraphDef* tensorflow_graph) {
- auto* add_op = tensorflow_graph->add_node();
+ tensorflow::NodeDef* add_op = tensorflow_graph->add_node();
add_op->set_op("AddN");
add_op->set_name(src_op.outputs[0]);
for (const auto& input : src_op.inputs) {
@@ -638,7 +638,7 @@ void ConvertAddNOperator(const Model& model, const AddNOperator& src_op,
void ConvertMulOperator(const Model& model, const MulOperator& src_op,
GraphDef* tensorflow_graph) {
- auto* add_op = tensorflow_graph->add_node();
+ tensorflow::NodeDef* add_op = tensorflow_graph->add_node();
add_op->set_op("Mul");
add_op->set_name(src_op.outputs[0]);
CHECK_EQ(src_op.inputs.size(), 2);
@@ -649,7 +649,7 @@ void ConvertMulOperator(const Model& model, const MulOperator& src_op,
void ConvertReluOperator(const ReluOperator& src_op,
GraphDef* tensorflow_graph) {
- auto* relu_op = tensorflow_graph->add_node();
+ tensorflow::NodeDef* relu_op = tensorflow_graph->add_node();
relu_op->set_op("Relu");
relu_op->set_name(src_op.outputs[0]);
*relu_op->add_input() = src_op.inputs[0];
@@ -662,7 +662,7 @@ void ConvertRelu1Operator(const Relu1Operator& src_op,
const string min_bounds = src_op.outputs[0] + "/min_bounds";
const string max_output = src_op.outputs[0] + "/max_output";
- auto* max_bounds_const_op = tensorflow_graph->add_node();
+ tensorflow::NodeDef* max_bounds_const_op = tensorflow_graph->add_node();
max_bounds_const_op->set_op("Const");
max_bounds_const_op->set_name(max_bounds);
(*max_bounds_const_op->mutable_attr())["dtype"].set_type(DT_FLOAT);
@@ -671,7 +671,7 @@ void ConvertRelu1Operator(const Relu1Operator& src_op,
max_bounds_const_op_tensor->set_dtype(DT_FLOAT);
max_bounds_const_op_tensor->add_float_val(-1.0f);
- auto* min_bounds_const_op = tensorflow_graph->add_node();
+ tensorflow::NodeDef* min_bounds_const_op = tensorflow_graph->add_node();
min_bounds_const_op->set_op("Const");
min_bounds_const_op->set_name(min_bounds);
(*min_bounds_const_op->mutable_attr())["dtype"].set_type(DT_FLOAT);
@@ -680,14 +680,14 @@ void ConvertRelu1Operator(const Relu1Operator& src_op,
min_bounds_const_op_tensor->set_dtype(DT_FLOAT);
min_bounds_const_op_tensor->add_float_val(1.0f);
- auto* max_op = tensorflow_graph->add_node();
+ tensorflow::NodeDef* max_op = tensorflow_graph->add_node();
max_op->set_op("Maximum");
max_op->set_name(max_output);
*max_op->add_input() = src_op.inputs[0];
*max_op->add_input() = max_bounds;
(*max_op->mutable_attr())["T"].set_type(DT_FLOAT);
- auto* min_op = tensorflow_graph->add_node();
+ tensorflow::NodeDef* min_op = tensorflow_graph->add_node();
min_op->set_op("Minimum");
min_op->set_name(src_op.outputs[0]);
*min_op->add_input() = max_output;
@@ -697,7 +697,7 @@ void ConvertRelu1Operator(const Relu1Operator& src_op,
void ConvertRelu6Operator(const Relu6Operator& src_op,
GraphDef* tensorflow_graph) {
- auto* relu_op = tensorflow_graph->add_node();
+ tensorflow::NodeDef* relu_op = tensorflow_graph->add_node();
relu_op->set_op("Relu6");
relu_op->set_name(src_op.outputs[0]);
*relu_op->add_input() = src_op.inputs[0];
@@ -705,7 +705,7 @@ void ConvertRelu6Operator(const Relu6Operator& src_op,
}
void ConvertLogOperator(const LogOperator& src_op, GraphDef* tensorflow_graph) {
- auto* op = tensorflow_graph->add_node();
+ tensorflow::NodeDef* op = tensorflow_graph->add_node();
op->set_op("Log");
op->set_name(src_op.outputs[0]);
CHECK_EQ(src_op.inputs.size(), 1);
@@ -715,7 +715,7 @@ void ConvertLogOperator(const LogOperator& src_op, GraphDef* tensorflow_graph) {
void ConvertLogisticOperator(const LogisticOperator& src_op,
GraphDef* tensorflow_graph) {
- auto* relu_op = tensorflow_graph->add_node();
+ tensorflow::NodeDef* relu_op = tensorflow_graph->add_node();
relu_op->set_op("Sigmoid");
relu_op->set_name(src_op.outputs[0]);
*relu_op->add_input() = src_op.inputs[0];
@@ -724,7 +724,7 @@ void ConvertLogisticOperator(const LogisticOperator& src_op,
void ConvertTanhOperator(const TanhOperator& src_op,
GraphDef* tensorflow_graph) {
- auto* tanh_op = tensorflow_graph->add_node();
+ tensorflow::NodeDef* tanh_op = tensorflow_graph->add_node();
tanh_op->set_op("Tanh");
tanh_op->set_name(src_op.outputs[0]);
*tanh_op->add_input() = src_op.inputs[0];
@@ -744,7 +744,7 @@ void ConvertSoftmaxOperator(const Model& model, const SoftmaxOperator& src_op,
const string softmax_size = src_op.outputs[0] + "/softmax_insert_size";
softmax_input = reshape_output;
- auto* reshape_op = tensorflow_graph->add_node();
+ tensorflow::NodeDef* reshape_op = tensorflow_graph->add_node();
reshape_op->set_op("Reshape");
reshape_op->set_name(reshape_output);
*reshape_op->add_input() = src_op.inputs[0];
@@ -761,7 +761,7 @@ void ConvertSoftmaxOperator(const Model& model, const SoftmaxOperator& src_op,
CreateReshapeShapeTensorConst(softmax_size, shape_data, tensorflow_graph);
}
- auto* softmax_op = tensorflow_graph->add_node();
+ tensorflow::NodeDef* softmax_op = tensorflow_graph->add_node();
softmax_op->set_op("Softmax");
softmax_op->set_name(src_op.outputs[0]);
*softmax_op->add_input() = softmax_input;
@@ -785,7 +785,7 @@ void ConvertLogSoftmaxOperator(const Model& model,
const string softmax_size = src_op.outputs[0] + "/log_softmax_insert_size";
softmax_input = reshape_output;
- auto* reshape_op = tensorflow_graph->add_node();
+ tensorflow::NodeDef* reshape_op = tensorflow_graph->add_node();
reshape_op->set_op("Reshape");
reshape_op->set_name(reshape_output);
*reshape_op->add_input() = src_op.inputs[0];
@@ -802,7 +802,7 @@ void ConvertLogSoftmaxOperator(const Model& model,
CreateReshapeShapeTensorConst(softmax_size, shape_data, tensorflow_graph);
}
- auto* log_softmax_op = tensorflow_graph->add_node();
+ tensorflow::NodeDef* log_softmax_op = tensorflow_graph->add_node();
log_softmax_op->set_op("LogSoftmax");
log_softmax_op->set_name(src_op.outputs[0]);
*log_softmax_op->add_input() = softmax_input;
@@ -817,7 +817,7 @@ void ConvertL2NormalizationOperator(const L2NormalizationOperator& src_op,
const string rsqrt_output = src_op.outputs[0] + "/rsqrt";
const string rsqrt_tiled_output = src_op.outputs[0] + "/rsqrt_tiled";
- auto* sum_reduction_indices_op = tensorflow_graph->add_node();
+ tensorflow::NodeDef* sum_reduction_indices_op = tensorflow_graph->add_node();
sum_reduction_indices_op->set_op("Const");
sum_reduction_indices_op->set_name(sum_reduction_indices);
(*sum_reduction_indices_op->mutable_attr())["dtype"].set_type(DT_INT32);
@@ -831,26 +831,26 @@ void ConvertL2NormalizationOperator(const L2NormalizationOperator& src_op,
sum_reduction_indices_tensor->add_int_val(0);
sum_reduction_indices_tensor->add_int_val(1);
- auto* square_op = tensorflow_graph->add_node();
+ tensorflow::NodeDef* square_op = tensorflow_graph->add_node();
square_op->set_op("Square");
square_op->set_name(square_output);
*square_op->add_input() = src_op.inputs[0];
(*square_op->mutable_attr())["T"].set_type(DT_FLOAT);
- auto* sum_op = tensorflow_graph->add_node();
+ tensorflow::NodeDef* sum_op = tensorflow_graph->add_node();
sum_op->set_op("Sum");
sum_op->set_name(sum_output);
*sum_op->add_input() = square_output;
*sum_op->add_input() = sum_reduction_indices;
(*sum_op->mutable_attr())["T"].set_type(DT_FLOAT);
- auto* rsqrt_op = tensorflow_graph->add_node();
+ tensorflow::NodeDef* rsqrt_op = tensorflow_graph->add_node();
rsqrt_op->set_op("Rsqrt");
rsqrt_op->set_name(rsqrt_output);
*rsqrt_op->add_input() = sum_output;
(*rsqrt_op->mutable_attr())["T"].set_type(DT_FLOAT);
- auto* mul_op = tensorflow_graph->add_node();
+ tensorflow::NodeDef* mul_op = tensorflow_graph->add_node();
mul_op->set_op("Mul");
mul_op->set_name(src_op.outputs[0]);
*mul_op->add_input() = src_op.inputs[0];
@@ -861,7 +861,7 @@ void ConvertL2NormalizationOperator(const L2NormalizationOperator& src_op,
void ConvertLocalResponseNormalizationOperator(
const LocalResponseNormalizationOperator& src_op,
GraphDef* tensorflow_graph) {
- auto* lrn_op = tensorflow_graph->add_node();
+ tensorflow::NodeDef* lrn_op = tensorflow_graph->add_node();
lrn_op->set_op("LRN");
lrn_op->set_name(src_op.outputs[0]);
*lrn_op->add_input() = src_op.inputs[0];
@@ -873,7 +873,7 @@ void ConvertLocalResponseNormalizationOperator(
void ConvertFakeQuantOperator(const FakeQuantOperator& src_op,
GraphDef* tensorflow_graph) {
- auto* fakequant_op = tensorflow_graph->add_node();
+ tensorflow::NodeDef* fakequant_op = tensorflow_graph->add_node();
fakequant_op->set_op("FakeQuantWithMinMaxArgs");
fakequant_op->set_name(src_op.outputs[0]);
CHECK_EQ(src_op.inputs.size(), 1);
@@ -884,11 +884,14 @@ void ConvertFakeQuantOperator(const FakeQuantOperator& src_op,
if (src_op.num_bits) {
(*fakequant_op->mutable_attr())["num_bits"].set_i(src_op.num_bits);
}
+ if (src_op.narrow_range) {
+ (*fakequant_op->mutable_attr())["narrow_range"].set_b(src_op.narrow_range);
+ }
}
void ConvertMaxPoolOperator(const MaxPoolOperator& src_op,
GraphDef* tensorflow_graph) {
- auto* maxpool_op = tensorflow_graph->add_node();
+ tensorflow::NodeDef* maxpool_op = tensorflow_graph->add_node();
maxpool_op->set_op("MaxPool");
maxpool_op->set_name(src_op.outputs[0]);
*maxpool_op->add_input() = src_op.inputs[0];
@@ -916,7 +919,7 @@ void ConvertMaxPoolOperator(const MaxPoolOperator& src_op,
void ConvertAveragePoolOperator(const AveragePoolOperator& src_op,
GraphDef* tensorflow_graph) {
- auto* avgpool_op = tensorflow_graph->add_node();
+ tensorflow::NodeDef* avgpool_op = tensorflow_graph->add_node();
avgpool_op->set_op("AvgPool");
avgpool_op->set_name(src_op.outputs[0]);
*avgpool_op->add_input() = src_op.inputs[0];
@@ -945,7 +948,7 @@ void ConvertAveragePoolOperator(const AveragePoolOperator& src_op,
void ConvertConcatenationOperator(const Model& model,
const ConcatenationOperator& src_op,
GraphDef* tensorflow_graph) {
- auto* dc_op = tensorflow_graph->add_node();
+ tensorflow::NodeDef* dc_op = tensorflow_graph->add_node();
dc_op->set_op("ConcatV2");
dc_op->set_name(src_op.outputs[0]);
const string dummy_axis = src_op.outputs[0] + "/axis";
@@ -963,7 +966,7 @@ void ConvertConcatenationOperator(const Model& model,
void ConvertTensorFlowReshapeOperator(const Model& model,
const TensorFlowReshapeOperator& src_op,
GraphDef* tensorflow_graph) {
- auto* reshape_op = tensorflow_graph->add_node();
+ tensorflow::NodeDef* reshape_op = tensorflow_graph->add_node();
reshape_op->set_op("Reshape");
reshape_op->set_name(src_op.outputs[0]);
CHECK_EQ(src_op.inputs.size(), 2);
@@ -985,7 +988,7 @@ void ConvertL2PoolOperator(const L2PoolOperator& src_op,
const string square_output = src_op.outputs[0] + "/square";
const string avgpool_output = src_op.outputs[0] + "/avgpool";
- auto* square_op = tensorflow_graph->add_node();
+ tensorflow::NodeDef* square_op = tensorflow_graph->add_node();
square_op->set_op("Square");
square_op->set_name(square_output);
*square_op->add_input() = src_op.inputs[0];
@@ -1000,7 +1003,7 @@ void ConvertL2PoolOperator(const L2PoolOperator& src_op,
LOG(FATAL) << "Bad padding (only SAME and VALID are supported)";
}
- auto* avgpool_op = tensorflow_graph->add_node();
+ tensorflow::NodeDef* avgpool_op = tensorflow_graph->add_node();
avgpool_op->set_op("AvgPool");
avgpool_op->set_name(avgpool_output);
*avgpool_op->add_input() = square_output;
@@ -1018,7 +1021,7 @@ void ConvertL2PoolOperator(const L2PoolOperator& src_op,
ksize.mutable_list()->add_i(src_op.kwidth);
ksize.mutable_list()->add_i(1);
- auto* sqrt_op = tensorflow_graph->add_node();
+ tensorflow::NodeDef* sqrt_op = tensorflow_graph->add_node();
sqrt_op->set_op("Sqrt");
sqrt_op->set_name(src_op.outputs[0]);
*sqrt_op->add_input() = avgpool_output;
@@ -1027,7 +1030,7 @@ void ConvertL2PoolOperator(const L2PoolOperator& src_op,
void ConvertSquareOperator(const TensorFlowSquareOperator& src_op,
GraphDef* tensorflow_graph) {
- auto* square_op = tensorflow_graph->add_node();
+ tensorflow::NodeDef* square_op = tensorflow_graph->add_node();
square_op->set_op("Square");
square_op->set_name(src_op.outputs[0]);
CHECK_EQ(src_op.inputs.size(), 1);
@@ -1037,7 +1040,7 @@ void ConvertSquareOperator(const TensorFlowSquareOperator& src_op,
void ConvertSqrtOperator(const TensorFlowSqrtOperator& src_op,
GraphDef* tensorflow_graph) {
- auto* sqrt_op = tensorflow_graph->add_node();
+ tensorflow::NodeDef* sqrt_op = tensorflow_graph->add_node();
sqrt_op->set_op("Sqrt");
sqrt_op->set_name(src_op.outputs[0]);
CHECK_EQ(src_op.inputs.size(), 1);
@@ -1048,19 +1051,20 @@ void ConvertSqrtOperator(const TensorFlowSqrtOperator& src_op,
void ConvertRsqrtOperator(const Model& model,
const TensorFlowRsqrtOperator& src_op,
GraphDef* tensorflow_graph) {
- auto* rsqrt_op = tensorflow_graph->add_node();
+ tensorflow::NodeDef* rsqrt_op = tensorflow_graph->add_node();
rsqrt_op->set_op("Rsqrt");
rsqrt_op->set_name(src_op.outputs[0]);
CHECK_EQ(src_op.inputs.size(), 1);
*rsqrt_op->add_input() = src_op.inputs[0];
- const auto data_type = GetTensorFlowDataType(model, src_op.inputs[0]);
+ const tensorflow::DataType data_type =
+ GetTensorFlowDataType(model, src_op.inputs[0]);
(*rsqrt_op->mutable_attr())["T"].set_type(data_type);
}
void ConvertSplitOperator(const Model& model,
const TensorFlowSplitOperator& src_op,
GraphDef* tensorflow_graph) {
- auto* split_op = tensorflow_graph->add_node();
+ tensorflow::NodeDef* split_op = tensorflow_graph->add_node();
split_op->set_op("Split");
split_op->set_name(src_op.outputs[0]);
for (const auto& input : src_op.inputs) {
@@ -1081,7 +1085,7 @@ void ConvertSplitOperator(const Model& model,
void ConvertCastOperator(const Model& model, const CastOperator& src_op,
GraphDef* tensorflow_graph) {
- auto* cast_op = tensorflow_graph->add_node();
+ tensorflow::NodeDef* cast_op = tensorflow_graph->add_node();
cast_op->set_op("Cast");
cast_op->set_name(src_op.outputs[0]);
CHECK_EQ(src_op.inputs.size(), 1);
@@ -1095,7 +1099,7 @@ void ConvertCastOperator(const Model& model, const CastOperator& src_op,
void ConvertFloorOperator(const Model& model, const FloorOperator& src_op,
GraphDef* tensorflow_graph) {
- auto* floor_op = tensorflow_graph->add_node();
+ tensorflow::NodeDef* floor_op = tensorflow_graph->add_node();
floor_op->set_op("Floor");
floor_op->set_name(src_op.outputs[0]);
CHECK_EQ(src_op.inputs.size(), 1);
@@ -1105,7 +1109,7 @@ void ConvertFloorOperator(const Model& model, const FloorOperator& src_op,
void ConvertGatherOperator(const Model& model, const GatherOperator& src_op,
GraphDef* tensorflow_graph) {
- auto* gather_op = tensorflow_graph->add_node();
+ tensorflow::NodeDef* gather_op = tensorflow_graph->add_node();
gather_op->set_op("Gather");
gather_op->set_name(src_op.outputs[0]);
CHECK_EQ(src_op.inputs.size(), 2);
@@ -1113,13 +1117,14 @@ void ConvertGatherOperator(const Model& model, const GatherOperator& src_op,
*gather_op->add_input() = src_op.inputs[1];
(*gather_op->mutable_attr())["Tindices"].set_type(DT_INT32);
- const auto params_type = GetTensorFlowDataType(model, src_op.inputs[0]);
+ const tensorflow::DataType params_type =
+ GetTensorFlowDataType(model, src_op.inputs[0]);
(*gather_op->mutable_attr())["Tparams"].set_type(params_type);
}
void ConvertArgMaxOperator(const Model& model, const ArgMaxOperator& src_op,
GraphDef* tensorflow_graph) {
- auto* argmax_op = tensorflow_graph->add_node();
+ tensorflow::NodeDef* argmax_op = tensorflow_graph->add_node();
argmax_op->set_op("ArgMax");
argmax_op->set_name(src_op.outputs[0]);
CHECK_EQ(src_op.inputs.size(), 2);
@@ -1133,10 +1138,26 @@ void ConvertArgMaxOperator(const Model& model, const ArgMaxOperator& src_op,
GetTensorFlowDataType(model, src_op.outputs[0]));
}
+void ConvertArgMinOperator(const Model& model, const ArgMinOperator& src_op,
+ GraphDef* tensorflow_graph) {
+ tensorflow::NodeDef* argmin_op = tensorflow_graph->add_node();
+ argmin_op->set_op("ArgMin");
+ argmin_op->set_name(src_op.outputs[0]);
+ CHECK_EQ(src_op.inputs.size(), 2);
+ *argmin_op->add_input() = src_op.inputs[0];
+ *argmin_op->add_input() = src_op.inputs[1];
+ (*argmin_op->mutable_attr())["T"].set_type(
+ GetTensorFlowDataType(model, src_op.inputs[0]));
+ (*argmin_op->mutable_attr())["Tidx"].set_type(
+ GetTensorFlowDataType(model, src_op.inputs[1]));
+ (*argmin_op->mutable_attr())["output_type"].set_type(
+ GetTensorFlowDataType(model, src_op.outputs[0]));
+}
+
void ConvertTransposeOperator(const Model& model,
const TransposeOperator& src_op,
GraphDef* tensorflow_graph) {
- auto* transpose_op = tensorflow_graph->add_node();
+ tensorflow::NodeDef* transpose_op = tensorflow_graph->add_node();
transpose_op->set_op("Transpose");
transpose_op->set_name(src_op.outputs[0]);
CHECK_EQ(src_op.inputs.size(), 2);
@@ -1151,7 +1172,7 @@ void ConvertTransposeOperator(const Model& model,
void ConvertTensorFlowShapeOperator(const Model& model,
const TensorFlowShapeOperator& src_op,
GraphDef* tensorflow_graph) {
- auto* shape_op = tensorflow_graph->add_node();
+ tensorflow::NodeDef* shape_op = tensorflow_graph->add_node();
shape_op->set_op("Shape");
shape_op->set_name(src_op.outputs[0]);
CHECK_EQ(src_op.inputs.size(), 1);
@@ -1164,7 +1185,7 @@ void ConvertTensorFlowShapeOperator(const Model& model,
void ConvertRankOperator(const Model& model, const RankOperator& src_op,
GraphDef* tensorflow_graph) {
- auto* rank_op = tensorflow_graph->add_node();
+ tensorflow::NodeDef* rank_op = tensorflow_graph->add_node();
rank_op->set_op("Rank");
rank_op->set_name(src_op.outputs[0]);
CHECK_EQ(src_op.inputs.size(), 1);
@@ -1175,7 +1196,7 @@ void ConvertRankOperator(const Model& model, const RankOperator& src_op,
void ConvertRangeOperator(const Model& model, const RangeOperator& src_op,
GraphDef* tensorflow_graph) {
- auto* range_op = tensorflow_graph->add_node();
+ tensorflow::NodeDef* range_op = tensorflow_graph->add_node();
range_op->set_op("Range");
range_op->set_name(src_op.outputs[0]);
CHECK_EQ(src_op.inputs.size(), 3);
@@ -1188,7 +1209,7 @@ void ConvertRangeOperator(const Model& model, const RangeOperator& src_op,
void ConvertStackOperator(const Model& model, const StackOperator& src_op,
GraphDef* tensorflow_graph) {
- auto* stack_op = tensorflow_graph->add_node();
+ tensorflow::NodeDef* stack_op = tensorflow_graph->add_node();
stack_op->set_op("Stack");
stack_op->set_name(src_op.outputs[0]);
for (const auto& input : src_op.inputs) {
@@ -1201,7 +1222,7 @@ void ConvertStackOperator(const Model& model, const StackOperator& src_op,
void ConvertFillOperator(const Model& model, const FillOperator& src_op,
GraphDef* tensorflow_graph) {
- auto* fill_op = tensorflow_graph->add_node();
+ tensorflow::NodeDef* fill_op = tensorflow_graph->add_node();
fill_op->set_op("Fill");
fill_op->set_name(src_op.outputs[0]);
CHECK_EQ(src_op.inputs.size(), 2);
@@ -1215,7 +1236,7 @@ void ConvertFillOperator(const Model& model, const FillOperator& src_op,
void ConvertFloorDivOperator(const Model& model, const FloorDivOperator& src_op,
GraphDef* tensorflow_graph) {
- auto* floor_div_op = tensorflow_graph->add_node();
+ tensorflow::NodeDef* floor_div_op = tensorflow_graph->add_node();
floor_div_op->set_op("FloorDiv");
floor_div_op->set_name(src_op.outputs[0]);
CHECK_EQ(src_op.inputs.size(), 2);
@@ -1228,7 +1249,7 @@ void ConvertFloorDivOperator(const Model& model, const FloorDivOperator& src_op,
void ConvertExpandDimsOperator(const Model& model,
const ExpandDimsOperator& src_op,
GraphDef* tensorflow_graph) {
- auto* expand_dims_op = tensorflow_graph->add_node();
+ tensorflow::NodeDef* expand_dims_op = tensorflow_graph->add_node();
expand_dims_op->set_op("ExpandDims");
expand_dims_op->set_name(src_op.outputs[0]);
CHECK_EQ(src_op.inputs.size(), 2);
@@ -1243,7 +1264,7 @@ void ConvertExpandDimsOperator(const Model& model,
void ConvertResizeBilinearOperator(const Model& model,
const ResizeBilinearOperator& src_op,
GraphDef* tensorflow_graph) {
- auto* resize_op = tensorflow_graph->add_node();
+ tensorflow::NodeDef* resize_op = tensorflow_graph->add_node();
resize_op->set_op("ResizeBilinear");
resize_op->set_name(src_op.outputs[0]);
CHECK_EQ(src_op.inputs.size(), 2);
@@ -1293,7 +1314,7 @@ void ConvertLstmCellOperator(const Model& model, const LstmCellOperator& src_op,
// works the same since the tensor has the same underlying data layout.
const string axis_output = concat_output + "/axis";
CreateDummyConcatDimTensorConst(axis_output, axis, tensorflow_graph);
- auto* concat_op = tensorflow_graph->add_node();
+ tensorflow::NodeDef* concat_op = tensorflow_graph->add_node();
concat_op->set_op("ConcatV2");
concat_op->set_name(concat_output);
*concat_op->add_input() = src_op.inputs[LstmCellOperator::DATA_INPUT];
@@ -1321,7 +1342,7 @@ void ConvertLstmCellOperator(const Model& model, const LstmCellOperator& src_op,
// Fully connected matrix multiply
const string matmul_output = base + "MatMul";
- auto* matmul_op = tensorflow_graph->add_node();
+ tensorflow::NodeDef* matmul_op = tensorflow_graph->add_node();
matmul_op->set_op("MatMul");
matmul_op->set_name(matmul_output);
*matmul_op->add_input() = concat_output;
@@ -1350,7 +1371,7 @@ void ConvertLstmCellOperator(const Model& model, const LstmCellOperator& src_op,
// Add biases
string biasadd_output = base + "BiasAdd";
- auto* biasadd_op = tensorflow_graph->add_node();
+ tensorflow::NodeDef* biasadd_op = tensorflow_graph->add_node();
biasadd_op->set_op("BiasAdd");
biasadd_op->set_name(biasadd_output);
biasadd_op->add_input(matmul_output);
@@ -1363,7 +1384,7 @@ void ConvertLstmCellOperator(const Model& model, const LstmCellOperator& src_op,
// The dimension is the same as the concatenation dimension
CreateDummyConcatDimTensorConst(split_dim_output, axis, tensorflow_graph);
string split_output = base + "split";
- auto* split_op = tensorflow_graph->add_node();
+ tensorflow::NodeDef* split_op = tensorflow_graph->add_node();
split_op->set_op("Split");
split_op->set_name(split_output);
*split_op->add_input() = split_dim_output;
@@ -1373,21 +1394,21 @@ void ConvertLstmCellOperator(const Model& model, const LstmCellOperator& src_op,
// Activation functions and memory computations
const string tanh_0_output = base + "Tanh";
- auto* tanh_0_op = tensorflow_graph->add_node();
+ tensorflow::NodeDef* tanh_0_op = tensorflow_graph->add_node();
tanh_0_op->set_op("Tanh");
tanh_0_op->set_name(tanh_0_output);
*tanh_0_op->add_input() = split_output + ":1";
(*tanh_0_op->mutable_attr())["T"].set_type(DT_FLOAT);
const string sigmoid_1_output = base + "Sigmoid_1";
- auto* logistic_1_op = tensorflow_graph->add_node();
+ tensorflow::NodeDef* logistic_1_op = tensorflow_graph->add_node();
logistic_1_op->set_op("Sigmoid");
logistic_1_op->set_name(sigmoid_1_output);
*logistic_1_op->add_input() = split_output;
(*logistic_1_op->mutable_attr())["T"].set_type(DT_FLOAT);
const string mul_1_output = base + "mul_1";
- auto* mul_1_op = tensorflow_graph->add_node();
+ tensorflow::NodeDef* mul_1_op = tensorflow_graph->add_node();
mul_1_op->set_op("Mul");
mul_1_op->set_name(mul_1_output);
*mul_1_op->add_input() = sigmoid_1_output;
@@ -1395,21 +1416,21 @@ void ConvertLstmCellOperator(const Model& model, const LstmCellOperator& src_op,
(*mul_1_op->mutable_attr())["T"].set_type(DT_FLOAT);
const string sigmoid_0_output = base + "Sigmoid";
- auto* logistic_2_op = tensorflow_graph->add_node();
+ tensorflow::NodeDef* logistic_2_op = tensorflow_graph->add_node();
logistic_2_op->set_op("Sigmoid");
logistic_2_op->set_name(sigmoid_0_output);
*logistic_2_op->add_input() = split_output + ":2";
(*logistic_2_op->mutable_attr())["T"].set_type(DT_FLOAT);
const string sigmoid_2_output = base + "Sigmoid_2";
- auto* logistic_3_op = tensorflow_graph->add_node();
+ tensorflow::NodeDef* logistic_3_op = tensorflow_graph->add_node();
logistic_3_op->set_op("Sigmoid");
logistic_3_op->set_name(sigmoid_2_output);
*logistic_3_op->add_input() = split_output + ":3";
(*logistic_3_op->mutable_attr())["T"].set_type(DT_FLOAT);
const string mul_0_output = base + "mul";
- auto* mul_0_op = tensorflow_graph->add_node();
+ tensorflow::NodeDef* mul_0_op = tensorflow_graph->add_node();
mul_0_op->set_op("Mul");
mul_0_op->set_name(mul_0_output);
*mul_0_op->add_input() = src_op.inputs[LstmCellOperator::PREV_STATE_INPUT];
@@ -1417,7 +1438,7 @@ void ConvertLstmCellOperator(const Model& model, const LstmCellOperator& src_op,
(*mul_0_op->mutable_attr())["T"].set_type(DT_FLOAT);
const string add_1_output = src_op.outputs[LstmCellOperator::STATE_OUTPUT];
- auto* add_1_op = tensorflow_graph->add_node();
+ tensorflow::NodeDef* add_1_op = tensorflow_graph->add_node();
add_1_op->set_op("Add");
add_1_op->set_name(add_1_output);
*add_1_op->add_input() = mul_0_output;
@@ -1425,14 +1446,14 @@ void ConvertLstmCellOperator(const Model& model, const LstmCellOperator& src_op,
(*add_1_op->mutable_attr())["T"].set_type(DT_FLOAT);
const string tanh_1_output = base + "Tanh_1";
- auto* tanh_1_op = tensorflow_graph->add_node();
+ tensorflow::NodeDef* tanh_1_op = tensorflow_graph->add_node();
tanh_1_op->set_op("Tanh");
tanh_1_op->set_name(tanh_1_output);
*tanh_1_op->add_input() = add_1_output;
(*tanh_1_op->mutable_attr())["T"].set_type(DT_FLOAT);
const string mul_2_output = src_op.outputs[LstmCellOperator::ACTIV_OUTPUT];
- auto* mul_2_op = tensorflow_graph->add_node();
+ tensorflow::NodeDef* mul_2_op = tensorflow_graph->add_node();
mul_2_op->set_op("Mul");
mul_2_op->set_name(mul_2_output);
*mul_2_op->add_input() = tanh_1_output;
@@ -1443,14 +1464,15 @@ void ConvertLstmCellOperator(const Model& model, const LstmCellOperator& src_op,
void ConvertSpaceToBatchNDOperator(const Model& model,
const SpaceToBatchNDOperator& src_op,
GraphDef* tensorflow_graph) {
- auto* new_op = tensorflow_graph->add_node();
+ tensorflow::NodeDef* new_op = tensorflow_graph->add_node();
new_op->set_op("SpaceToBatchND");
new_op->set_name(src_op.outputs[0]);
CHECK_EQ(src_op.inputs.size(), 3);
*new_op->add_input() = src_op.inputs[0];
*new_op->add_input() = src_op.inputs[1];
*new_op->add_input() = src_op.inputs[2];
- const auto params_type = GetTensorFlowDataType(model, src_op.inputs[0]);
+ const tensorflow::DataType params_type =
+ GetTensorFlowDataType(model, src_op.inputs[0]);
(*new_op->mutable_attr())["T"].set_type(params_type);
(*new_op->mutable_attr())["Tblock_shape"].set_type(DT_INT32);
(*new_op->mutable_attr())["Tpaddings"].set_type(DT_INT32);
@@ -1459,14 +1481,15 @@ void ConvertSpaceToBatchNDOperator(const Model& model,
void ConvertBatchToSpaceNDOperator(const Model& model,
const BatchToSpaceNDOperator& src_op,
GraphDef* tensorflow_graph) {
- auto* new_op = tensorflow_graph->add_node();
+ tensorflow::NodeDef* new_op = tensorflow_graph->add_node();
new_op->set_op("BatchToSpaceND");
new_op->set_name(src_op.outputs[0]);
CHECK_EQ(src_op.inputs.size(), 3);
*new_op->add_input() = src_op.inputs[0];
*new_op->add_input() = src_op.inputs[1];
*new_op->add_input() = src_op.inputs[2];
- const auto params_type = GetTensorFlowDataType(model, src_op.inputs[0]);
+ const tensorflow::DataType params_type =
+ GetTensorFlowDataType(model, src_op.inputs[0]);
(*new_op->mutable_attr())["T"].set_type(params_type);
(*new_op->mutable_attr())["Tblock_shape"].set_type(DT_INT32);
(*new_op->mutable_attr())["Tcrops"].set_type(DT_INT32);
@@ -1474,18 +1497,19 @@ void ConvertBatchToSpaceNDOperator(const Model& model,
void ConvertPadOperator(const Model& model, const PadOperator& src_op,
GraphDef* tensorflow_graph) {
- auto* new_op = tensorflow_graph->add_node();
+ tensorflow::NodeDef* new_op = tensorflow_graph->add_node();
new_op->set_op("Pad");
new_op->set_name(src_op.outputs[0]);
CHECK_EQ(src_op.inputs.size(), 2);
*new_op->add_input() = src_op.inputs[0];
*new_op->add_input() = src_op.inputs[1];
- const auto params_type = GetTensorFlowDataType(model, src_op.inputs[0]);
+ const tensorflow::DataType params_type =
+ GetTensorFlowDataType(model, src_op.inputs[0]);
(*new_op->mutable_attr())["T"].set_type(params_type);
// Create the params tensor.
- auto* params_op = tensorflow_graph->add_node();
+ tensorflow::NodeDef* params_op = tensorflow_graph->add_node();
params_op->set_op("Const");
params_op->set_name(src_op.inputs[1]);
(*params_op->mutable_attr())["dtype"].set_type(DT_INT32);
@@ -1504,7 +1528,7 @@ void ConvertPadOperator(const Model& model, const PadOperator& src_op,
void ConvertPadV2Operator(const Model& model, const PadV2Operator& src_op,
GraphDef* tensorflow_graph) {
- auto* new_op = tensorflow_graph->add_node();
+ tensorflow::NodeDef* new_op = tensorflow_graph->add_node();
new_op->set_op("PadV2");
new_op->set_name(src_op.outputs[0]);
CHECK_EQ(src_op.inputs.size(), 2);
@@ -1512,11 +1536,12 @@ void ConvertPadV2Operator(const Model& model, const PadV2Operator& src_op,
*new_op->add_input() = src_op.inputs[1];
*new_op->add_input() = src_op.inputs[2];
- const auto params_type = GetTensorFlowDataType(model, src_op.inputs[0]);
+ const tensorflow::DataType params_type =
+ GetTensorFlowDataType(model, src_op.inputs[0]);
(*new_op->mutable_attr())["T"].set_type(params_type);
// Create the params tensor.
- auto* params_op = tensorflow_graph->add_node();
+ tensorflow::NodeDef* params_op = tensorflow_graph->add_node();
params_op->set_op("Const");
params_op->set_name(src_op.inputs[1]);
(*params_op->mutable_attr())["dtype"].set_type(DT_INT32);
@@ -1535,7 +1560,7 @@ void ConvertPadV2Operator(const Model& model, const PadV2Operator& src_op,
void CreateSliceInput(const string& input_name, const std::vector<int>& values,
GraphDef* tensorflow_graph) {
- auto* params_op = tensorflow_graph->add_node();
+ tensorflow::NodeDef* params_op = tensorflow_graph->add_node();
params_op->set_op("Const");
params_op->set_name(input_name);
(*params_op->mutable_attr())["dtype"].set_type(DT_INT32);
@@ -1552,7 +1577,7 @@ void CreateSliceInput(const string& input_name, const std::vector<int>& values,
void ConvertStridedSliceOperator(const Model& model,
const StridedSliceOperator& src_op,
GraphDef* tensorflow_graph) {
- auto* new_op = tensorflow_graph->add_node();
+ tensorflow::NodeDef* new_op = tensorflow_graph->add_node();
new_op->set_op("StridedSlice");
new_op->set_name(src_op.outputs[0]);
CHECK_EQ(src_op.inputs.size(), 4);
@@ -1561,7 +1586,8 @@ void ConvertStridedSliceOperator(const Model& model,
*new_op->add_input() = src_op.inputs[2];
*new_op->add_input() = src_op.inputs[3];
- const auto params_type = GetTensorFlowDataType(model, src_op.inputs[0]);
+ const tensorflow::DataType params_type =
+ GetTensorFlowDataType(model, src_op.inputs[0]);
(*new_op->mutable_attr())["T"].set_type(params_type);
(*new_op->mutable_attr())["Index"].set_type(DT_INT32);
@@ -1579,7 +1605,7 @@ void ConvertStridedSliceOperator(const Model& model,
void ConvertSliceOperator(const Model& model, const SliceOperator& src_op,
GraphDef* tensorflow_graph) {
- auto* new_op = tensorflow_graph->add_node();
+ tensorflow::NodeDef* new_op = tensorflow_graph->add_node();
new_op->set_op("Slice");
new_op->set_name(src_op.outputs[0]);
CHECK_EQ(src_op.inputs.size(), 3);
@@ -1587,7 +1613,8 @@ void ConvertSliceOperator(const Model& model, const SliceOperator& src_op,
*new_op->add_input() = src_op.inputs[1];
*new_op->add_input() = src_op.inputs[2];
- const auto params_type = GetTensorFlowDataType(model, src_op.inputs[0]);
+ const tensorflow::DataType params_type =
+ GetTensorFlowDataType(model, src_op.inputs[0]);
(*new_op->mutable_attr())["T"].set_type(params_type);
(*new_op->mutable_attr())["Index"].set_type(DT_INT32);
@@ -1596,16 +1623,18 @@ void ConvertSliceOperator(const Model& model, const SliceOperator& src_op,
CreateSliceInput(src_op.inputs[2], src_op.size, tensorflow_graph);
}
-void ConvertMeanOperator(const Model& model, const MeanOperator& src_op,
- GraphDef* tensorflow_graph) {
- auto* new_op = tensorflow_graph->add_node();
- new_op->set_op("Mean");
+template <typename T>
+void ConvertReduceOperator(const Model& model, const T& src_op,
+ GraphDef* tensorflow_graph, const string& op_name) {
+ tensorflow::NodeDef* new_op = tensorflow_graph->add_node();
+ new_op->set_op(op_name);
new_op->set_name(src_op.outputs[0]);
CHECK_EQ(src_op.inputs.size(), 2);
*new_op->add_input() = src_op.inputs[0];
*new_op->add_input() = src_op.inputs[1];
- const auto params_type = GetTensorFlowDataType(model, src_op.inputs[0]);
+ const tensorflow::DataType params_type =
+ GetTensorFlowDataType(model, src_op.inputs[0]);
(*new_op->mutable_attr())["T"].set_type(params_type);
if (src_op.keep_dims) {
@@ -1613,7 +1642,7 @@ void ConvertMeanOperator(const Model& model, const MeanOperator& src_op,
}
// Create the params tensor.
- auto* params_op = tensorflow_graph->add_node();
+ tensorflow::NodeDef* params_op = tensorflow_graph->add_node();
params_op->set_op("Const");
params_op->set_name(src_op.inputs[1]);
(*params_op->mutable_attr())["dtype"].set_type(DT_INT32);
@@ -1629,13 +1658,14 @@ void ConvertMeanOperator(const Model& model, const MeanOperator& src_op,
void ConvertSqueezeOperator(const Model& model, const SqueezeOperator& src_op,
GraphDef* tensorflow_graph) {
- auto* new_op = tensorflow_graph->add_node();
+ tensorflow::NodeDef* new_op = tensorflow_graph->add_node();
new_op->set_op("Squeeze");
new_op->set_name(src_op.outputs[0]);
CHECK_EQ(src_op.inputs.size(), 1);
*new_op->add_input() = src_op.inputs[0];
- const auto params_type = GetTensorFlowDataType(model, src_op.inputs[0]);
+ const tensorflow::DataType params_type =
+ GetTensorFlowDataType(model, src_op.inputs[0]);
(*new_op->mutable_attr())["T"].set_type(params_type);
if (!src_op.squeeze_dims.empty()) {
@@ -1648,74 +1678,79 @@ void ConvertSqueezeOperator(const Model& model, const SqueezeOperator& src_op,
void ConvertSubOperator(const Model& model, const SubOperator& src_op,
GraphDef* tensorflow_graph) {
- auto* sub_op = tensorflow_graph->add_node();
+ tensorflow::NodeDef* sub_op = tensorflow_graph->add_node();
sub_op->set_op("Sub");
sub_op->set_name(src_op.outputs[0]);
CHECK_EQ(src_op.inputs.size(), 2);
*sub_op->add_input() = src_op.inputs[0];
*sub_op->add_input() = src_op.inputs[1];
- const auto data_type = GetTensorFlowDataType(model, src_op.inputs[0]);
+ const tensorflow::DataType data_type =
+ GetTensorFlowDataType(model, src_op.inputs[0]);
(*sub_op->mutable_attr())["T"].set_type(data_type);
}
void ConvertTensorFlowMinimumOperator(const Model& model,
const TensorFlowMinimumOperator& src_op,
GraphDef* tensorflow_graph) {
- auto* sub_op = tensorflow_graph->add_node();
+ tensorflow::NodeDef* sub_op = tensorflow_graph->add_node();
sub_op->set_op("Minimum");
sub_op->set_name(src_op.outputs[0]);
CHECK_EQ(src_op.inputs.size(), 2);
*sub_op->add_input() = src_op.inputs[0];
*sub_op->add_input() = src_op.inputs[1];
- const auto data_type = GetTensorFlowDataType(model, src_op.inputs[0]);
+ const tensorflow::DataType data_type =
+ GetTensorFlowDataType(model, src_op.inputs[0]);
(*sub_op->mutable_attr())["T"].set_type(data_type);
}
void ConvertTensorFlowMaximumOperator(const Model& model,
const TensorFlowMaximumOperator& src_op,
GraphDef* tensorflow_graph) {
- auto* sub_op = tensorflow_graph->add_node();
+ tensorflow::NodeDef* sub_op = tensorflow_graph->add_node();
sub_op->set_op("Maximum");
sub_op->set_name(src_op.outputs[0]);
CHECK_EQ(src_op.inputs.size(), 2);
*sub_op->add_input() = src_op.inputs[0];
*sub_op->add_input() = src_op.inputs[1];
- const auto data_type = GetTensorFlowDataType(model, src_op.inputs[0]);
+ const tensorflow::DataType data_type =
+ GetTensorFlowDataType(model, src_op.inputs[0]);
(*sub_op->mutable_attr())["T"].set_type(data_type);
}
void ConvertSelectOperator(const Model& model, const SelectOperator& src_op,
GraphDef* tensorflow_graph) {
- auto* sub_op = tensorflow_graph->add_node();
+ tensorflow::NodeDef* sub_op = tensorflow_graph->add_node();
sub_op->set_op("Select");
sub_op->set_name(src_op.outputs[0]);
CHECK_EQ(src_op.inputs.size(), 3);
*sub_op->add_input() = src_op.inputs[0];
*sub_op->add_input() = src_op.inputs[1];
*sub_op->add_input() = src_op.inputs[2];
- const auto data_type = GetTensorFlowDataType(model, src_op.inputs[1]);
+ const tensorflow::DataType data_type =
+ GetTensorFlowDataType(model, src_op.inputs[1]);
(*sub_op->mutable_attr())["T"].set_type(data_type);
}
void ConvertTileOperator(const Model& model,
const TensorFlowTileOperator& src_op,
GraphDef* tensorflow_graph) {
- auto* tile_op = tensorflow_graph->add_node();
+ tensorflow::NodeDef* tile_op = tensorflow_graph->add_node();
tile_op->set_op("Tile");
tile_op->set_name(src_op.outputs[0]);
CHECK_EQ(src_op.inputs.size(), 2);
*tile_op->add_input() = src_op.inputs[0];
*tile_op->add_input() = src_op.inputs[1];
- const auto data_type = GetTensorFlowDataType(model, src_op.inputs[0]);
+ const tensorflow::DataType data_type =
+ GetTensorFlowDataType(model, src_op.inputs[0]);
(*tile_op->mutable_attr())["T"].set_type(data_type);
- const auto multiples_data_type =
+ const tensorflow::DataType multiples_data_type =
GetTensorFlowDataType(model, src_op.inputs[1]);
(*tile_op->mutable_attr())["Tmultiples"].set_type(multiples_data_type);
}
void ConvertTopKV2Operator(const Model& model, const TopKV2Operator& src_op,
GraphDef* tensorflow_graph) {
- auto* topk_op = tensorflow_graph->add_node();
+ tensorflow::NodeDef* topk_op = tensorflow_graph->add_node();
topk_op->set_op("TOPKV2");
topk_op->set_name(src_op.outputs[0]);
CHECK_EQ(src_op.inputs.size(), 2);
@@ -1728,12 +1763,13 @@ void ConvertRandomUniformOperator(const Model& model,
const RandomUniformOperator& src_op,
GraphDef* tensorflow_graph) {
CHECK(tensorflow_graph != nullptr);
- auto* new_op = tensorflow_graph->add_node();
+ tensorflow::NodeDef* new_op = tensorflow_graph->add_node();
new_op->set_op("RandomUniform");
CHECK_EQ(src_op.inputs.size(), 1);
new_op->set_name(src_op.outputs[0]);
*new_op->add_input() = src_op.inputs[0];
- const auto shape_type = GetTensorFlowDataType(model, src_op.inputs[0]);
+ const tensorflow::DataType shape_type =
+ GetTensorFlowDataType(model, src_op.inputs[0]);
(*new_op->mutable_attr())["T"].set_type(shape_type);
(*new_op->mutable_attr())["dtype"].set_type(
GetTensorFlowDataType(src_op.dtype));
@@ -1744,13 +1780,14 @@ void ConvertRandomUniformOperator(const Model& model,
void ConvertComparisonOperator(const Model& model, const Operator& src_op,
const char* op_name,
GraphDef* tensorflow_graph) {
- auto* comparison_op = tensorflow_graph->add_node();
+ tensorflow::NodeDef* comparison_op = tensorflow_graph->add_node();
comparison_op->set_op(op_name);
comparison_op->set_name(src_op.outputs[0]);
CHECK_EQ(src_op.inputs.size(), 2);
*comparison_op->add_input() = src_op.inputs[0];
*comparison_op->add_input() = src_op.inputs[1];
- const auto data_type = GetTensorFlowDataType(model, src_op.inputs[0]);
+ const tensorflow::DataType data_type =
+ GetTensorFlowDataType(model, src_op.inputs[0]);
(*comparison_op->mutable_attr())["T"].set_type(data_type);
}
@@ -1758,21 +1795,37 @@ void ConvertSparseToDenseOperator(const Model& model,
const SparseToDenseOperator& src_op,
const char* op_name,
GraphDef* tensorflow_graph) {
- auto* sparse_to_dense_op = tensorflow_graph->add_node();
+ tensorflow::NodeDef* sparse_to_dense_op = tensorflow_graph->add_node();
sparse_to_dense_op->set_op(op_name);
sparse_to_dense_op->set_name(src_op.outputs[0]);
CHECK_EQ(src_op.inputs.size(), 4);
for (int i = 0; i < 4; ++i) {
*sparse_to_dense_op->add_input() = src_op.inputs[i];
}
- const auto data_type = GetTensorFlowDataType(model, src_op.inputs[3]);
+ const tensorflow::DataType data_type =
+ GetTensorFlowDataType(model, src_op.inputs[3]);
(*sparse_to_dense_op->mutable_attr())["T"].set_type(data_type);
- const auto index_type = GetTensorFlowDataType(model, src_op.inputs[0]);
+ const tensorflow::DataType index_type =
+ GetTensorFlowDataType(model, src_op.inputs[0]);
(*sparse_to_dense_op->mutable_attr())["Tindices"].set_type(index_type);
(*sparse_to_dense_op->mutable_attr())["Tindices"].set_b(
src_op.validate_indices);
}
+void ConvertPowOperator(const Model& model, const PowOperator& src_op,
+ const char* op_name, GraphDef* tensorflow_graph) {
+ tensorflow::NodeDef* pow_op = tensorflow_graph->add_node();
+ pow_op->set_op(op_name);
+ pow_op->set_name(src_op.outputs[0]);
+ CHECK_EQ(src_op.inputs.size(), 2);
+ for (int i = 0; i < 2; ++i) {
+ *pow_op->add_input() = src_op.inputs[i];
+ }
+ const tensorflow::DataType data_type =
+ GetTensorFlowDataType(model, src_op.inputs[0]);
+ (*pow_op->mutable_attr())["T"].set_type(data_type);
+}
+
void ConvertOperator(const Model& model, const Operator& src_op,
GraphDef* tensorflow_graph) {
if (src_op.fused_activation_function != FusedActivationFunctionType::kNone) {
@@ -1909,8 +1962,20 @@ void ConvertOperator(const Model& model, const Operator& src_op,
model, static_cast<const StridedSliceOperator&>(src_op),
tensorflow_graph);
} else if (src_op.type == OperatorType::kMean) {
- ConvertMeanOperator(model, static_cast<const MeanOperator&>(src_op),
- tensorflow_graph);
+ ConvertReduceOperator(model, static_cast<const MeanOperator&>(src_op),
+ tensorflow_graph, "Mean");
+ } else if (src_op.type == OperatorType::kSum) {
+ ConvertReduceOperator(model,
+ static_cast<const TensorFlowSumOperator&>(src_op),
+ tensorflow_graph, "Sum");
+ } else if (src_op.type == OperatorType::kReduceProd) {
+ ConvertReduceOperator(model,
+ static_cast<const TensorFlowProdOperator&>(src_op),
+ tensorflow_graph, "Prod");
+ } else if (src_op.type == OperatorType::kReduceMax) {
+ ConvertReduceOperator(model,
+ static_cast<const TensorFlowMaxOperator&>(src_op),
+ tensorflow_graph, "Max");
} else if (src_op.type == OperatorType::kSub) {
ConvertSubOperator(model, static_cast<const SubOperator&>(src_op),
tensorflow_graph);
@@ -1931,6 +1996,9 @@ void ConvertOperator(const Model& model, const Operator& src_op,
} else if (src_op.type == OperatorType::kArgMax) {
ConvertArgMaxOperator(model, static_cast<const ArgMaxOperator&>(src_op),
tensorflow_graph);
+ } else if (src_op.type == OperatorType::kArgMin) {
+ ConvertArgMinOperator(model, static_cast<const ArgMinOperator&>(src_op),
+ tensorflow_graph);
} else if (src_op.type == OperatorType::kTopK_V2) {
ConvertTopKV2Operator(model, static_cast<const TopKV2Operator&>(src_op),
tensorflow_graph);
@@ -1987,6 +2055,9 @@ void ConvertOperator(const Model& model, const Operator& src_op,
ConvertTileOperator(model,
static_cast<const TensorFlowTileOperator&>(src_op),
tensorflow_graph);
+ } else if (src_op.type == OperatorType::kPow) {
+ ConvertPowOperator(model, static_cast<const PowOperator&>(src_op), "Pow",
+ tensorflow_graph);
} else {
LOG(FATAL) << "Unhandled operator type " << OperatorTypeName(src_op.type);
}
@@ -1994,7 +2065,7 @@ void ConvertOperator(const Model& model, const Operator& src_op,
void AddPlaceholder(const string& name, ArrayDataType type,
GraphDef* tensorflow_graph) {
- auto* placeholder = tensorflow_graph->add_node();
+ tensorflow::NodeDef* placeholder = tensorflow_graph->add_node();
placeholder->set_op("Placeholder");
switch (type) {
case ArrayDataType::kBool:
@@ -2023,7 +2094,7 @@ void AddPlaceholder(const string& name, ArrayDataType type,
void AddPlaceholderForRNNState(const Model& model, const string& name, int size,
GraphDef* tensorflow_graph) {
- auto* placeholder = tensorflow_graph->add_node();
+ tensorflow::NodeDef* placeholder = tensorflow_graph->add_node();
placeholder->set_op("Placeholder");
placeholder->set_name(name);
(*placeholder->mutable_attr())["dtype"].set_type(DT_FLOAT);
diff --git a/tensorflow/contrib/lite/toco/g3doc/cmdline_examples.md b/tensorflow/contrib/lite/toco/g3doc/cmdline_examples.md
index 0ab024c618..18b7848db8 100644
--- a/tensorflow/contrib/lite/toco/g3doc/cmdline_examples.md
+++ b/tensorflow/contrib/lite/toco/g3doc/cmdline_examples.md
@@ -11,8 +11,10 @@ Table of contents:
* [Command-line tools](#tools)
* [Converting models prior to TensorFlow 1.9.](#pre-tensorflow-1.9)
-* [Convert a TensorFlow GraphDef](#graphdef)
-* [Convert a TensorFlow SavedModel](#savedmodel)
+* [Basic examples](#basic)
+ * [Convert a TensorFlow GraphDef](#graphdef)
+ * [Convert a TensorFlow SavedModel](#savedmodel)
+ * [Convert a tf.keras model](#keras)
* [Quantization](#quantization)
* [Convert a TensorFlow GraphDef for quantized inference](#graphdef-quant)
* [Use "dummy-quantization" to try out quantized inference on a float
@@ -51,7 +53,12 @@ API](python_api.md#pre-tensorflow-1.9). If a command line tool is desired, the
Terminal for additional details on the command-line flags available. There were
no command line tools in TensorFlow 1.8.
-## Convert a TensorFlow GraphDef <a name="graphdef"></a>
+## Basic examples <a name="basic"></a>
+
+The following section shows examples of how to convert a basic float-point model
+from each of the supported data formats into a TensorFlow Lite FlatBuffers.
+
+### Convert a TensorFlow GraphDef <a name="graphdef"></a>
The follow example converts a basic TensorFlow GraphDef (frozen by
[freeze_graph.py](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/tools/freeze_graph.py))
@@ -70,7 +77,7 @@ tflite_convert \
The value for `input_shapes` is automatically determined whenever possible.
-## Convert a TensorFlow SavedModel <a name="savedmodel"></a>
+### Convert a TensorFlow SavedModel <a name="savedmodel"></a>
The follow example converts a basic TensorFlow SavedModel into a Tensorflow Lite
FlatBuffer to perform floating-point inference.
@@ -95,6 +102,17 @@ There is currently no support for MetaGraphDefs without a SignatureDef or for
MetaGraphDefs that use the [`assets/`
directory](https://www.tensorflow.org/guide/saved_model#structure_of_a_savedmodel_directory).
+### Convert a tf.Keras model <a name="keras"></a>
+
+The following example converts a `tf.keras` model into a TensorFlow Lite
+Flatbuffer. The `tf.keras` file must contain both the model and the weights.
+
+```
+tflite_convert \
+ --output_file=/tmp/foo.tflite \
+ --keras_model_file=/tmp/keras_model.h5
+```
+
## Quantization
### Convert a TensorFlow GraphDef for quantized inference <a name="graphdef-quant"></a>
diff --git a/tensorflow/contrib/lite/toco/g3doc/cmdline_reference.md b/tensorflow/contrib/lite/toco/g3doc/cmdline_reference.md
index 2d44b871c6..decc8a45a4 100644
--- a/tensorflow/contrib/lite/toco/g3doc/cmdline_reference.md
+++ b/tensorflow/contrib/lite/toco/g3doc/cmdline_reference.md
@@ -19,7 +19,7 @@ Table of contents:
The following high level flags specify the details of the input and output
files. The flag `--output_file` is always required. Additionally, either
-`--graph_def_file` or `--saved_model_dir` is required.
+`--graph_def_file`, `--saved_model_dir` or `--keras_model_file` is required.
* `--output_file`. Type: string. Specifies the full path of the output file.
* `--graph_def_file`. Type: string. Specifies the full path of the input
@@ -27,6 +27,8 @@ files. The flag `--output_file` is always required. Additionally, either
[freeze_graph.py](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/tools/freeze_graph.py).
* `--saved_model_dir`. Type: string. Specifies the full path to the directory
containing the SavedModel.
+* `--keras_model_file`. Type: string. Specifies the full path of the HDF5 file
+ containing the tf.keras model.
* `--output_format`. Type: string. Default: `TFLITE`. Specifies the format of
the output file. Allowed values:
* `TFLITE`: TensorFlow Lite FlatBuffer format.
diff --git a/tensorflow/contrib/lite/toco/g3doc/python_api.md b/tensorflow/contrib/lite/toco/g3doc/python_api.md
index afa6fd6957..3799eac0a1 100644
--- a/tensorflow/contrib/lite/toco/g3doc/python_api.md
+++ b/tensorflow/contrib/lite/toco/g3doc/python_api.md
@@ -15,6 +15,7 @@ Table of contents:
* [Exporting a GraphDef from tf.Session](#basic-graphdef-sess)
* [Exporting a GraphDef from file](#basic-graphdef-file)
* [Exporting a SavedModel](#basic-savedmodel)
+ * [Exporting a tf.keras File](#basic-keras-file)
* [Complex examples](#complex)
* [Exporting a quantized GraphDef](#complex-quant)
* [TensorFlow Lite Python interpreter](#interpreter)
@@ -40,9 +41,11 @@ is `tf.contrib.lite.TocoConverter`. The API for calling the Python intepreter is
`TocoConverter` provides class methods based on the original format of the
model. `TocoConverter.from_session()` is available for GraphDefs.
-`TocoConverter.from_saved_model()` is available for SavedModels. Example usages
-for simple float-point models are shown in [Basic Examples](#basic). Examples
-usages for more complex models is shown in [Complex Examples](#complex).
+`TocoConverter.from_saved_model()` is available for SavedModels.
+`TocoConverter.from_keras_model_file()` is available for `tf.Keras` files.
+Example usages for simple float-point models are shown in [Basic
+Examples](#basic). Examples usages for more complex models is shown in [Complex
+Examples](#complex).
**NOTE**: Currently, `TocoConverter` will cause a fatal error to the Python
interpreter when the conversion fails. This will be remedied as soon as
@@ -114,6 +117,51 @@ For more complex SavedModels, the optional parameters that can be passed into
`output_arrays`, `tag_set` and `signature_key`. Details of each parameter are
available by running `help(tf.contrib.lite.TocoConverter)`.
+### Exporting a tf.keras File <a name="basic-keras-file"></a>
+
+The following example shows how to convert a `tf.keras` model into a TensorFlow
+Lite FlatBuffer.
+
+```python
+import tensorflow as tf
+
+converter = tf.contrib.lite.TocoConverter.from_keras_model_file("keras_model.h5")
+tflite_model = converter.convert()
+open("converted_model.tflite", "wb").write(tflite_model)
+```
+
+The `tf.keras` file must contain both the model and the weights. A comprehensive
+example including model construction can be seen below.
+
+```python
+import numpy as np
+import tensorflow as tf
+
+# Generate tf.keras model.
+model = tf.keras.models.Sequential()
+model.add(tf.keras.layers.Dense(2, input_shape=(3,)))
+model.add(tf.keras.layers.RepeatVector(3))
+model.add(tf.keras.layers.TimeDistributed(tf.keras.layers.Dense(3)))
+model.compile(loss=tf.keras.losses.MSE,
+ optimizer=tf.keras.optimizers.RMSprop(lr=0.0001),
+ metrics=[tf.keras.metrics.categorical_accuracy],
+ sample_weight_mode='temporal')
+
+x = np.random.random((1, 3))
+y = np.random.random((1, 3, 3))
+model.train_on_batch(x, y)
+model.predict(x)
+
+# Save tf.keras model in HDF5 format.
+keras_file = "keras_model.h5"
+tf.keras.models.save_model(model, keras_file)
+
+# Convert to TensorFlow Lite model.
+converter = tf.contrib.lite.TocoConverter.from_keras_model_file(keras_file)
+tflite_model = converter.convert()
+open("converted_model.tflite", "wb").write(tflite_model)
+```
+
## Complex examples <a name="complex"></a>
For models where the default value of the attributes is not sufficient, the
diff --git a/tensorflow/contrib/lite/toco/graph_transformations/dequantize.cc b/tensorflow/contrib/lite/toco/graph_transformations/dequantize.cc
index 2c7ffe4884..1688586733 100644
--- a/tensorflow/contrib/lite/toco/graph_transformations/dequantize.cc
+++ b/tensorflow/contrib/lite/toco/graph_transformations/dequantize.cc
@@ -159,6 +159,7 @@ bool DequantizeArray(const string& array_name,
new_array.GetOrCreateMinMax() = array->GetMinMax();
fakequant_op->minmax.reset(new MinMax);
*fakequant_op->minmax = array->GetMinMax();
+ fakequant_op->narrow_range = array->narrow_range;
if (must_insert_fakequant_before) {
for (const auto& op : model->operators) {
for (string& output : op->outputs) {
diff --git a/tensorflow/contrib/lite/toco/graph_transformations/ensure_bias_vectors.cc b/tensorflow/contrib/lite/toco/graph_transformations/ensure_bias_vectors.cc
index 708ecf6e0a..e80ed036b3 100644
--- a/tensorflow/contrib/lite/toco/graph_transformations/ensure_bias_vectors.cc
+++ b/tensorflow/contrib/lite/toco/graph_transformations/ensure_bias_vectors.cc
@@ -26,17 +26,38 @@ namespace toco {
namespace {
+int GetOutputDepthFromWeights(const Model& model, const Operator& op) {
+ const string& weights_name = op.inputs[1];
+ const auto& weights_shape = model.GetArray(weights_name).shape();
+ if (op.type == OperatorType::kConv ||
+ op.type == OperatorType::kFullyConnected) {
+ return weights_shape.dims(0);
+ }
+ if (op.type == OperatorType::kDepthwiseConv) {
+ return weights_shape.dims(3);
+ }
+ LOG(FATAL) << "Unhandled operator type";
+ return 0;
+}
+
bool ProcessLinearOperator(Model* model, Operator* op) {
if (op->inputs.size() >= 3) {
return false;
}
const string& output_name = op->outputs[0];
+ const string& weights_name = op->inputs[1];
+ if (!model->GetArray(weights_name).has_shape()) {
+ return false;
+ }
+ const int depth = GetOutputDepthFromWeights(*model, *op);
const string& bias_name = AvailableArrayName(*model, output_name + "_bias");
op->inputs.push_back(bias_name);
DCHECK_EQ(op->inputs.size(), 3);
auto& bias_array = model->GetOrCreateArray(bias_name);
bias_array.data_type = ArrayDataType::kFloat;
-
+ bias_array.mutable_shape()->mutable_dims()->push_back(depth);
+ auto& bias_buffer = bias_array.GetMutableBuffer<ArrayDataType::kFloat>();
+ bias_buffer.data.resize(depth, 0.f);
return true;
}
} // namespace
diff --git a/tensorflow/contrib/lite/toco/graph_transformations/graph_transformations.h b/tensorflow/contrib/lite/toco/graph_transformations/graph_transformations.h
index 4025fede6f..8db7df5c0e 100644
--- a/tensorflow/contrib/lite/toco/graph_transformations/graph_transformations.h
+++ b/tensorflow/contrib/lite/toco/graph_transformations/graph_transformations.h
@@ -135,6 +135,7 @@ DECLARE_GRAPH_TRANSFORMATION(IdentifyRelu1)
DECLARE_GRAPH_TRANSFORMATION(IdentifyPRelu)
DECLARE_GRAPH_TRANSFORMATION(IdentifyDilatedConv)
DECLARE_GRAPH_TRANSFORMATION(MakeInitialDequantizeOperator)
+DECLARE_GRAPH_TRANSFORMATION(MoveBinaryOperatorBeforeReshape)
DECLARE_GRAPH_TRANSFORMATION(PropagateActivationFunctionIntoConstants)
DECLARE_GRAPH_TRANSFORMATION(PropagateArrayDataTypes)
DECLARE_GRAPH_TRANSFORMATION(PropagateFakeQuantNumBits);
@@ -158,7 +159,7 @@ DECLARE_GRAPH_TRANSFORMATION(ResolveConstantBinaryOperator)
DECLARE_GRAPH_TRANSFORMATION(ResolveConstantUnaryOperator)
DECLARE_GRAPH_TRANSFORMATION(CreateIm2colArrays)
DECLARE_GRAPH_TRANSFORMATION(DropIm2colArrays)
-DECLARE_GRAPH_TRANSFORMATION(ReadFakeQuantMinMax)
+DECLARE_GRAPH_TRANSFORMATION(ReadArrayMinmaxAndNarrowRangeFromFakeQuant)
DECLARE_GRAPH_TRANSFORMATION(ReorderElementwiseUnary)
DECLARE_GRAPH_TRANSFORMATION(ReorderReshapeTranspose)
DECLARE_GRAPH_TRANSFORMATION(ResolveReorderAxes)
@@ -179,7 +180,7 @@ DECLARE_GRAPH_TRANSFORMATION(ResolvePadAttributes)
DECLARE_GRAPH_TRANSFORMATION(ResolvePadV2Attributes)
DECLARE_GRAPH_TRANSFORMATION(ResolveStridedSliceAttributes)
DECLARE_GRAPH_TRANSFORMATION(ResolveSliceAttributes)
-DECLARE_GRAPH_TRANSFORMATION(ResolveMeanAttributes)
+DECLARE_GRAPH_TRANSFORMATION(ResolveReduceAttributes)
DECLARE_GRAPH_TRANSFORMATION(ResolveTransposeAttributes)
DECLARE_GRAPH_TRANSFORMATION(ResolveConstantRandomUniform)
DECLARE_GRAPH_TRANSFORMATION(ResolveConstantRange)
@@ -193,6 +194,7 @@ DECLARE_GRAPH_TRANSFORMATION(ResolveMultiplyByZero)
DECLARE_GRAPH_TRANSFORMATION(Dequantize)
DECLARE_GRAPH_TRANSFORMATION(UnpartitionEmbeddingLookup)
DECLARE_GRAPH_TRANSFORMATION(ShuffleFCWeights)
+DECLARE_GRAPH_TRANSFORMATION(ResolveFakeQuantArgsFromVars)
class PropagateDefaultMinMax : public GraphTransformation {
public:
diff --git a/tensorflow/contrib/lite/toco/graph_transformations/hardcode_min_max.cc b/tensorflow/contrib/lite/toco/graph_transformations/hardcode_min_max.cc
index 82a4308ecb..2f1bb8f0ad 100644
--- a/tensorflow/contrib/lite/toco/graph_transformations/hardcode_min_max.cc
+++ b/tensorflow/contrib/lite/toco/graph_transformations/hardcode_min_max.cc
@@ -133,24 +133,20 @@ bool HardcodeMinMaxForConcatenation(Model* model, Operator* op) {
}
bool HardcodeMinMaxForSplit(Model* model, Operator* op) {
- for (const auto& output : op->outputs) {
- if (model->GetArray(output).minmax) {
- LOG(WARNING) << "Skipping min-max setting for " << LogName(*op)
- << " because output " << output << " already has min-max.";
- return false;
- }
- }
// Data is in second input.
auto& input_array = model->GetArray(op->inputs[1]);
if (!input_array.minmax) {
return false;
- } else {
- for (const auto& output : op->outputs) {
- auto& array = model->GetArray(output);
+ }
+ bool changed = false;
+ for (const auto& output : op->outputs) {
+ auto& array = model->GetArray(output);
+ if (!array.minmax || !(array.GetMinMax() == input_array.GetMinMax())) {
+ changed = true;
array.GetOrCreateMinMax() = *input_array.minmax;
}
- return true;
}
+ return changed;
}
// The output of average or max pooling is within the same range as its input.
@@ -232,6 +228,14 @@ bool HardcodeMinMaxForOutput(Model* model, Operator* op, double min,
return true;
}
+bool MinMaxApproximatelyEqual(const MinMax& minmax1, const MinMax& minmax2) {
+ const double magnitude =
+ std::min(minmax1.max - minmax1.min, minmax2.max - minmax2.min);
+ const double tolerated = 1e-6 * magnitude;
+ return std::abs(minmax1.min - minmax2.min) < tolerated &&
+ std::abs(minmax1.max - minmax2.max) < tolerated;
+}
+
// Propagates MinMax from any of the listed arrays, to all others.
// If multiple of these arrays have MinMax, then these are required
// to agree with each other.
@@ -254,7 +258,7 @@ bool PropagateMinMaxAmongArrays(Model* model,
for (const string& array_name : array_names) {
auto& array = model->GetArray(array_name);
if (array.minmax) {
- CHECK(*array.minmax == *reference_minmax)
+ CHECK(MinMaxApproximatelyEqual(*array.minmax, *reference_minmax))
<< "Both the following arrays have minmax, and they disagree: "
<< reference_array_name << " (" << reference_minmax->min << ","
<< reference_minmax->max << ") and " << array_name << " ("
diff --git a/tensorflow/contrib/lite/toco/graph_transformations/identify_lstm.cc b/tensorflow/contrib/lite/toco/graph_transformations/identify_lstm.cc
index 685353a846..c0b014b45e 100644
--- a/tensorflow/contrib/lite/toco/graph_transformations/identify_lstm.cc
+++ b/tensorflow/contrib/lite/toco/graph_transformations/identify_lstm.cc
@@ -35,19 +35,24 @@ std::vector<std::unique_ptr<Operator>>::iterator FindOperator(
return it;
}
-bool GetStateArrayForBackEdge(const Model& model,
- const string& back_edge_source_array,
- string* state_array = nullptr) {
- for (const auto& rnn_state : model.flags.rnn_states()) {
- if (back_edge_source_array == rnn_state.back_edge_source_array()) {
- // Found LSTM cell output
- if (state_array) {
- *state_array = rnn_state.state_array();
- }
- return true;
+bool ValidateSourceOp(const Model& model, const string& array_name,
+ OperatorType op_type, Operator** source_op) {
+ if (op_type == OperatorType::kNone) {
+ CHECK(!source_op);
+ } else {
+ CHECK(source_op);
+ *source_op = GetOpWithOutput(model, array_name);
+ if (*source_op == nullptr) {
+ return false;
+ }
+
+ // Check that first operator, if connected, is of correct type
+ if ((*source_op)->type != op_type) {
+ return false;
}
}
- return false;
+
+ return true;
}
// Returns true if the given operator has exactly 1 input, and is connected to
@@ -62,24 +67,10 @@ bool MatchOperatorInputs(const Operator& op, const Model& model,
}
// Check if first input is disconnected/connected to an operator
- Operator* x = GetOpWithOutput(model, op.inputs[0]);
- if ((op_type == OperatorType::kNone) && (x != nullptr)) {
- return false;
- }
- if ((op_type != OperatorType::kNone) && (x == nullptr)) {
+ if (!ValidateSourceOp(model, op.inputs[0], op_type, connected_op)) {
return false;
}
- // Check that first operator, if connected, is of correct type
- if ((x != nullptr) && (x->type != op_type)) {
- return false;
- }
-
- // Successfully matched. Optionally return matching input operators.
- if (connected_op) {
- *connected_op = x;
- }
-
return true;
}
@@ -96,40 +87,15 @@ bool MatchOperatorInputs(const Operator& op, const Model& model,
}
// Check if first input is disconnected/connected to an operator
- Operator* x = GetOpWithOutput(model, op.inputs[0]);
- if ((a_op_type == OperatorType::kNone) && (x != nullptr)) {
- return false;
- }
- if ((a_op_type != OperatorType::kNone) && (x == nullptr)) {
- return false;
- }
-
- // Check that first operator, if connected, is of correct type
- if ((x != nullptr) && (x->type != a_op_type)) {
+ if (!ValidateSourceOp(model, op.inputs[0], a_op_type, a_op)) {
return false;
}
// Check if second input is disconnected/connected to an operator
- Operator* y = GetOpWithOutput(model, op.inputs[1]);
- if ((b_op_type == OperatorType::kNone) && (y != nullptr)) {
- return false;
- }
- if ((b_op_type != OperatorType::kNone) && (y == nullptr)) {
+ if (!ValidateSourceOp(model, op.inputs[1], b_op_type, b_op)) {
return false;
}
- // Check that second operator, if connected, is of correct type
- if ((y != nullptr) && (y->type != b_op_type)) {
- return false;
- }
-
- // Successfully matched. Optionally return matching input operators.
- if (a_op != nullptr) {
- *a_op = x;
- }
- if (b_op != nullptr) {
- *b_op = y;
- }
return true;
}
@@ -147,57 +113,20 @@ bool MatchOperatorInputs(const Operator& op, const Model& model,
}
// Check if first input is disconnected/connected to an operator
- Operator* x = GetOpWithOutput(model, op.inputs[0]);
- if ((a_op_type == OperatorType::kNone) && (x != nullptr)) {
- return false;
- }
- if ((a_op_type != OperatorType::kNone) && (x == nullptr)) {
- return false;
- }
-
- // Check that first operator, if connected, is of correct type
- if ((x != nullptr) && (x->type != a_op_type)) {
+ if (!ValidateSourceOp(model, op.inputs[0], a_op_type, a_op)) {
return false;
}
// Check if second input is disconnected/connected to an operator
- Operator* y = GetOpWithOutput(model, op.inputs[1]);
- if ((b_op_type == OperatorType::kNone) && (y != nullptr)) {
- return false;
- }
- if ((b_op_type != OperatorType::kNone) && (y == nullptr)) {
- return false;
- }
-
- // Check that second operator, if connected, is of correct type
- if ((y != nullptr) && (y->type != b_op_type)) {
+ if (!ValidateSourceOp(model, op.inputs[1], b_op_type, b_op)) {
return false;
}
// Check if third input is disconnected/connected to an operator
- Operator* z = GetOpWithOutput(model, op.inputs[2]);
- if ((c_op_type == OperatorType::kNone) && (z != nullptr)) {
- return false;
- }
- if ((c_op_type != OperatorType::kNone) && (z == nullptr)) {
- return false;
- }
-
- // Check that third operator, if connected, is of correct type
- if ((z != nullptr) && (z->type != c_op_type)) {
+ if (!ValidateSourceOp(model, op.inputs[2], c_op_type, c_op)) {
return false;
}
- // Successfully matched. Optionally return matching input operators.
- if (a_op != nullptr) {
- *a_op = x;
- }
- if (b_op != nullptr) {
- *b_op = y;
- }
- if (c_op != nullptr) {
- *c_op = z;
- }
return true;
}
@@ -231,11 +160,6 @@ bool IdentifyLstmCell::Run(Model* model, std::size_t op_index) {
&state_combine_add)) {
return false;
}
- string prev_state;
- if (!GetStateArrayForBackEdge(*model, state_output_tanh->inputs[0],
- &prev_state)) {
- return false;
- }
// State forget & remember addition
Operator *state_forget_mul, *state_remember_mul;
@@ -244,9 +168,7 @@ bool IdentifyLstmCell::Run(Model* model, std::size_t op_index) {
&state_remember_mul)) {
return false;
}
- if (state_forget_mul->inputs[0] != prev_state) {
- return false;
- }
+ const string prev_state = state_forget_mul->inputs[0];
// State forget gate
Operator* state_forget_sig;
diff --git a/tensorflow/contrib/lite/toco/graph_transformations/identify_prelu.cc b/tensorflow/contrib/lite/toco/graph_transformations/identify_prelu.cc
index 30be4ac0aa..b90a156a0d 100644
--- a/tensorflow/contrib/lite/toco/graph_transformations/identify_prelu.cc
+++ b/tensorflow/contrib/lite/toco/graph_transformations/identify_prelu.cc
@@ -74,14 +74,30 @@ bool IdentifyPRelu::Run(Model* model, std::size_t op_index) {
const auto* relu_neg_input_op = GetOpWithOutput(*model, mul_op->inputs[1]);
if (relu_neg_input_op == nullptr ||
- relu_neg_input_op->type != OperatorType::kNeg ||
- relu_neg_input_op->fused_activation_function !=
- FusedActivationFunctionType::kRelu ||
relu_neg_input_op->inputs.size() != 1) {
return false;
}
- if (relu_input_op->inputs[0] != relu_neg_input_op->inputs[0]) {
+ const Operator* final_input_op;
+ if (relu_neg_input_op->type == OperatorType::kNeg &&
+ relu_neg_input_op->fused_activation_function ==
+ FusedActivationFunctionType::kRelu) {
+ // This detects a Neg op with fused Relu activation function.
+ final_input_op = relu_neg_input_op;
+ } else {
+ // This detects a Neg op followed by a separated Relu op.
+ const auto* neg_input_op =
+ GetOpWithOutput(*model, relu_neg_input_op->inputs[0]);
+ if (neg_input_op == nullptr || neg_input_op->inputs.size() != 1 ||
+ relu_neg_input_op->type != OperatorType::kRelu ||
+ relu_neg_input_op->fused_activation_function !=
+ FusedActivationFunctionType::kNone) {
+ return false;
+ }
+ final_input_op = neg_input_op;
+ }
+
+ if (relu_input_op->inputs[0] != final_input_op->inputs[0]) {
return false;
}
@@ -112,7 +128,6 @@ bool IdentifyPRelu::Run(Model* model, std::size_t op_index) {
// intermediate tensors aren't used by other ops, those will be removed by
// other graph transformation rules.
model->operators.erase(FindOp(*model, add_op));
-
return true;
}
diff --git a/tensorflow/contrib/lite/toco/graph_transformations/make_initial_dequantize_operator.cc b/tensorflow/contrib/lite/toco/graph_transformations/make_initial_dequantize_operator.cc
index 45d9f73a1e..f684de08ab 100644
--- a/tensorflow/contrib/lite/toco/graph_transformations/make_initial_dequantize_operator.cc
+++ b/tensorflow/contrib/lite/toco/graph_transformations/make_initial_dequantize_operator.cc
@@ -85,15 +85,8 @@ bool AddDequantizeOperatorToInput(const string& input_name, const Operator* op,
dequantized_input_minmax = input_minmax;
auto& input_qparams = input_array.GetOrCreateQuantizationParams();
input_array.data_type = input_array.final_data_type;
- if (input_array.data_type == ArrayDataType::kUint8) {
- GetQuantizationParamsFromMinMax<ArrayDataType::kUint8>(input_minmax,
- &input_qparams);
- } else if (input_array.data_type == ArrayDataType::kInt16) {
- GetQuantizationParamsFromMinMax<ArrayDataType::kInt16>(input_minmax,
- &input_qparams);
- } else {
- LOG(FATAL) << "unhandled data type";
- }
+ ChooseQuantizationParamsForArrayAndQuantizedDataType(
+ input_array, input_array.data_type, &input_qparams);
transformation->AddMessageF(
"Created %s"
diff --git a/tensorflow/contrib/lite/toco/graph_transformations/move_binary_operator_before_reshape.cc b/tensorflow/contrib/lite/toco/graph_transformations/move_binary_operator_before_reshape.cc
new file mode 100644
index 0000000000..7f44c65285
--- /dev/null
+++ b/tensorflow/contrib/lite/toco/graph_transformations/move_binary_operator_before_reshape.cc
@@ -0,0 +1,178 @@
+/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ ==============================================================================*/
+#include <algorithm>
+
+#include "tensorflow/contrib/lite/toco/graph_transformations/graph_transformations.h"
+#include "tensorflow/contrib/lite/toco/model.h"
+#include "tensorflow/contrib/lite/toco/tooling_util.h"
+
+namespace toco {
+
+namespace {
+
+bool IsTailOfShape(const Shape& tail, const Shape& shape) {
+ // Return true if 'tail' dimensions are the same as the ending dimensions of
+ // 'shape'.
+
+ int shape_end = shape.dimensions_count() - 1;
+ int tail_end = tail.dimensions_count() - 1;
+
+ if (tail_end > shape_end) {
+ // tail cannot be longer than shape.
+ return false;
+ }
+
+ // Walk dimensions back to front and compare
+ for (int i = 0; i <= tail_end; i++) {
+ if (shape.dims(shape_end - i) != tail.dims(tail_end - i)) {
+ return false;
+ }
+ }
+ return true;
+}
+
+} // namespace
+
+// If a binary operator is doing a broadcast operation from a constant array,
+// and the constant array shape is the tail of both the other input shape, and a
+// subsequent reshape op's output shape, we can swap their order. Since we
+// prefer to have reshape ops after mathematic ops, this can allow for the
+// collapsing of some reshapes. The WaveNet model in particular benefits from
+// this transformation.
+//
+// Note we are testing for one particular case of a broader set of possible
+// binary-reshape op transformations. This transformation could be generalized.
+bool MoveBinaryOperatorBeforeReshape::Run(Model* model, std::size_t op_index) {
+ const auto binary_it = model->operators.begin() + op_index;
+ Operator* binary_op = binary_it->get();
+ if (binary_op->type != OperatorType::kAdd &&
+ binary_op->type != OperatorType::kMul &&
+ binary_op->type != OperatorType::kSub &&
+ binary_op->type != OperatorType::kDiv &&
+ binary_op->type != OperatorType::kFloorDiv &&
+ binary_op->type != OperatorType::kFloorMod &&
+ binary_op->type != OperatorType::kMinimum &&
+ binary_op->type != OperatorType::kMaximum &&
+ binary_op->type != OperatorType::kLess &&
+ binary_op->type != OperatorType::kLessEqual &&
+ binary_op->type != OperatorType::kGreater &&
+ binary_op->type != OperatorType::kGreaterEqual) {
+ return false;
+ }
+
+ // BINARY OP INPUT CHECKS
+ CHECK_EQ(binary_op->inputs.size(), 2);
+ const bool input_is_const[2] = {
+ IsConstantParameterArray(*model, binary_op->inputs[0]),
+ IsConstantParameterArray(*model, binary_op->inputs[1]),
+ };
+ if (!input_is_const[0] && !input_is_const[1]) {
+ // To limit our scope, we require one constant input. Though there's no
+ // reason this transformation wouldn't work with all variable inputs.
+ return false;
+ }
+ if (input_is_const[0] && input_is_const[1]) {
+ // Both inputs are constants. Leave this for constants propagation.
+ return false;
+ }
+ const int constant_input_idx = input_is_const[0] ? 0 : 1;
+ const int variable_input_idx = input_is_const[0] ? 1 : 0;
+ CHECK(input_is_const[constant_input_idx]);
+ CHECK(!input_is_const[variable_input_idx]);
+
+ const auto& variable_input_array =
+ model->GetArray(binary_op->inputs[variable_input_idx]);
+ if (!variable_input_array.has_shape()) {
+ AddMessageF(
+ "Not moving %s because it's non-constant input shape is not resolved.",
+ LogName(*binary_op));
+ return false;
+ }
+ if (!IsTailOfShape(
+ model->GetArray(binary_op->inputs[constant_input_idx]).shape(),
+ model->GetArray(binary_op->inputs[variable_input_idx]).shape())) {
+ // Constant array shape must be the latter part of the variable shape.
+ return false;
+ }
+
+ // RESHAPE OP CHECKS
+ auto reshape_it =
+ FindOpWithOutput(*model, binary_op->inputs[variable_input_idx]);
+ if (reshape_it == model->operators.end()) {
+ AddMessageF("Not moving %s because it's variable input is not connected.",
+ LogName(*binary_op));
+ return false;
+ }
+ Operator* reshape_op = reshape_it->get();
+ if (reshape_op->type != OperatorType::kReshape) {
+ AddMessageF("Not moving %s because the preceding %s is not a reshape op",
+ LogName(*binary_op), LogName(*reshape_op));
+ return false;
+ }
+ const auto& reshape_input_array = model->GetArray(reshape_op->inputs[0]);
+ if (!reshape_input_array.has_shape()) {
+ AddMessageF(
+ "Not moving %s because it's non-constant input shape is not resolved "
+ "yet",
+ LogName(*binary_op));
+ return false;
+ }
+ if (!IsTailOfShape(
+ model->GetArray(binary_op->inputs[constant_input_idx]).shape(),
+ model->GetArray(reshape_op->outputs[0]).shape())) {
+ // Constant array shape must be the latter part of the binary op output
+ // shape.
+ return false;
+ }
+
+ // EXTRA CHECKS ON CONNECTING ARRAY
+ for (const string& output_array : model->flags.output_arrays()) {
+ if (binary_op->inputs[variable_input_idx] == output_array) {
+ AddMessageF(
+ "Not moving %s because the output of reshape op %s is an output op.",
+ LogName(*binary_op), LogName(*reshape_op));
+ return false;
+ }
+ }
+ int count_ops_consuming_output =
+ CountOpsWithInput(*model, binary_op->inputs[variable_input_idx]);
+ DCHECK_GE(count_ops_consuming_output, 1);
+ if (count_ops_consuming_output > 1) {
+ AddMessageF(
+ "Not moving %s because the output of reshape op %s is consumed by "
+ "another op",
+ LogName(*binary_op), LogName(*reshape_op));
+ return false;
+ }
+
+ // SWAP ORDER OF BINARY AND RESHAPE OPS
+ AddMessageF("Moving op %s before reshape op %s", LogName(*binary_op),
+ LogName(*reshape_op));
+
+ // Swap op input and outputs
+ std::iter_swap(reshape_op->inputs.begin(),
+ binary_op->inputs.begin() + variable_input_idx);
+ std::iter_swap(reshape_op->outputs.begin(), binary_op->outputs.begin());
+
+ // Swap operator ordering
+ std::iter_swap(binary_it, reshape_it);
+
+ // Clear binary output shape so it will be re-propagated
+ model->GetArray(binary_op->outputs[0]).clear_shape();
+
+ return true;
+}
+
+} // namespace toco
diff --git a/tensorflow/contrib/lite/toco/graph_transformations/propagate_array_data_types.cc b/tensorflow/contrib/lite/toco/graph_transformations/propagate_array_data_types.cc
index 27a1049eaf..670bcf64e7 100644
--- a/tensorflow/contrib/lite/toco/graph_transformations/propagate_array_data_types.cc
+++ b/tensorflow/contrib/lite/toco/graph_transformations/propagate_array_data_types.cc
@@ -100,6 +100,13 @@ bool PropagateArrayDataTypes::Run(Model* model, std::size_t op_index) {
model->GetArray(op->outputs[0]).data_type = argmax_op->output_data_type;
break;
}
+ case OperatorType::kArgMin: {
+ // Data type of the ArgMin op is specified.
+ CHECK_EQ(op->outputs.size(), 1);
+ auto* argmin_op = static_cast<ArgMinOperator*>(op);
+ model->GetArray(op->outputs[0]).data_type = argmin_op->output_data_type;
+ break;
+ }
case OperatorType::kRange: {
auto* range_op = static_cast<RangeOperator*>(op);
// Output type of the Range op can be set via an attribute
@@ -175,6 +182,14 @@ bool PropagateArrayDataTypes::Run(Model* model, std::size_t op_index) {
SetDataTypeForAllOutputs(model, op, data_type);
break;
}
+ case OperatorType::kPow: {
+ CHECK_EQ(op->inputs.size(), 2);
+ CHECK(model->GetArray(op->inputs[0]).data_type ==
+ model->GetArray(op->inputs[1]).data_type);
+ const ArrayDataType data_type = model->GetArray(op->inputs[0]).data_type;
+ SetDataTypeForAllOutputs(model, op, data_type);
+ break;
+ }
default: {
// These operators produce outputs with the same type as their 1st input
CHECK_GT(op->inputs.size(), 0);
diff --git a/tensorflow/contrib/lite/toco/graph_transformations/propagate_default_min_max.cc b/tensorflow/contrib/lite/toco/graph_transformations/propagate_default_min_max.cc
index 50b90e7c2b..cd078ef189 100644
--- a/tensorflow/contrib/lite/toco/graph_transformations/propagate_default_min_max.cc
+++ b/tensorflow/contrib/lite/toco/graph_transformations/propagate_default_min_max.cc
@@ -25,6 +25,14 @@ limitations under the License.
namespace toco {
+namespace {
+
+bool SupportsMinMax(const Array& array) {
+ return array.data_type == ArrayDataType::kFloat;
+}
+
+} // namespace
+
// Propagates default min/max values to any operator input/output array that
// is missing them.
//
@@ -39,14 +47,16 @@ bool PropagateDefaultMinMax::Run(Model* model, std::size_t op_index) {
for (const auto& input : op->inputs) {
auto& input_array = model->GetArray(input);
- if (!input_array.minmax && !input_array.buffer) {
+ if (!input_array.minmax && !input_array.buffer &&
+ SupportsMinMax(input_array)) {
did_change |= SetArrayMinMax(input, &input_array);
}
}
for (const auto& output : op->outputs) {
auto& output_array = model->GetArray(output);
- if (!output_array.minmax && !output_array.buffer) {
+ if (!output_array.minmax && !output_array.buffer &&
+ SupportsMinMax(output_array)) {
did_change |= SetArrayMinMax(output, &output_array);
}
}
diff --git a/tensorflow/contrib/lite/toco/graph_transformations/propagate_fake_quant_num_bits.cc b/tensorflow/contrib/lite/toco/graph_transformations/propagate_fake_quant_num_bits.cc
index e25125b429..3ad6b0ec6f 100644
--- a/tensorflow/contrib/lite/toco/graph_transformations/propagate_fake_quant_num_bits.cc
+++ b/tensorflow/contrib/lite/toco/graph_transformations/propagate_fake_quant_num_bits.cc
@@ -27,11 +27,15 @@ namespace toco {
namespace {
-void ChangeArrayDataType(GraphTransformation* transformation, Array* array,
+bool ChangeArrayDataType(GraphTransformation* transformation, Array* array,
ArrayDataType new_data_type,
const MinMax* new_minmax) {
// Ensure the array ends up in the new type (if it hasn't yet been quantized).
- array->final_data_type = new_data_type;
+ bool changed = false;
+ if (array->final_data_type != new_data_type) {
+ array->final_data_type = new_data_type;
+ changed = true;
+ }
if (array->minmax && array->quantization_params) {
// The array is already quantized and has min/max info.
@@ -62,18 +66,16 @@ void ChangeArrayDataType(GraphTransformation* transformation, Array* array,
"Rescaling min/max from %g,%g (%s) to %g,%g (%s)", array_minmax.min,
array_minmax.max, ArrayDataTypeName(array->data_type), min, max,
ArrayDataTypeName(new_data_type));
-
array_minmax.min = min;
array_minmax.max = max;
- GetQuantizationParamsFromMinMax<ArrayDataType::kInt16>(
- array_minmax, array->quantization_params.get());
-
+ ChooseQuantizationParamsForArrayAndQuantizedDataType(
+ *array, new_data_type, array->quantization_params.get());
// Directly change the type as the array was already quantized.
array->data_type = new_data_type;
- } else {
+ changed = true;
+ } else if (!array->quantization_params) {
// Array has not yet been quantized so we can just set the final data type
// and assign the new min/max value (if provided).
- CHECK(!array->quantization_params);
if (!array->minmax && new_minmax) {
transformation->AddMessageF("Forcing new minmax to %g,%g (%s)",
@@ -82,8 +84,11 @@ void ChangeArrayDataType(GraphTransformation* transformation, Array* array,
auto& array_minmax = array->GetOrCreateMinMax();
array_minmax.min = new_minmax->min;
array_minmax.max = new_minmax->max;
+ changed = true;
}
}
+
+ return changed;
}
// Returns true if the op blocks our backward recursive data type propagation.
@@ -159,9 +164,8 @@ bool RecursivelyBackwardPropagateDataType(GraphTransformation* transformation,
"Adjusting input final data type of array %s from %s to %s", input,
ArrayDataTypeName(input_array.final_data_type),
ArrayDataTypeName(new_data_type));
- did_change = true;
- ChangeArrayDataType(transformation, &input_array, new_data_type,
- &new_minmax);
+ did_change |= ChangeArrayDataType(transformation, &input_array,
+ new_data_type, &new_minmax);
// Walk up into all ops producing the inputs to this op.
for (auto& producing_op : model->operators) {
@@ -212,9 +216,8 @@ bool RecursivelyForwardPropagateDataType(GraphTransformation* transformation,
"Adjusting output final data type of array %s from %s to %s", output,
ArrayDataTypeName(output_array.final_data_type),
ArrayDataTypeName(new_data_type));
- did_change = true;
- ChangeArrayDataType(transformation, &output_array, new_data_type,
- nullptr);
+ did_change |= ChangeArrayDataType(transformation, &output_array,
+ new_data_type, nullptr);
// Walk down into all ops consuming the output of this op.
for (auto& consuming_op : model->operators) {
diff --git a/tensorflow/contrib/lite/toco/graph_transformations/propagate_fixed_sizes.cc b/tensorflow/contrib/lite/toco/graph_transformations/propagate_fixed_sizes.cc
index c61da203c6..f422e3a9c7 100644
--- a/tensorflow/contrib/lite/toco/graph_transformations/propagate_fixed_sizes.cc
+++ b/tensorflow/contrib/lite/toco/graph_transformations/propagate_fixed_sizes.cc
@@ -120,49 +120,7 @@ void ComputeBinaryOperatorOutputSize(const Shape& input_shape_x,
CHECK(output_array->has_shape());
}
-int GetOutputDepthFromWeights(const Model& model, const Operator& op) {
- const string& weights_name = op.inputs[1];
- const auto& weights_shape = model.GetArray(weights_name).shape();
- if (op.type == OperatorType::kConv ||
- op.type == OperatorType::kFullyConnected) {
- return weights_shape.dims(0);
- } else if (op.type == OperatorType::kDepthwiseConv) {
- return weights_shape.dims(3);
- } else {
- LOG(FATAL) << "Unhandled operator type";
- }
-}
-
-bool EnsureBiasVectorShape(Model* model, Operator* op) {
- const string& weights_name = op->inputs[1];
- const auto& weights_array = model->GetArray(weights_name);
- // Yield until weights shape has been resolved.
- if (!weights_array.has_shape()) {
- return false;
- }
-
- if (op->inputs.size() < 3) {
- return false;
- }
- auto& bias_array = model->GetArray(op->inputs[2]);
- if (bias_array.has_shape()) {
- return true;
- }
-
- const int output_depth = GetOutputDepthFromWeights(*model, *op);
- bias_array.copy_shape(Shape({output_depth}));
-
- auto& float_buffer = bias_array.GetMutableBuffer<ArrayDataType::kFloat>();
- float_buffer.data.resize(output_depth, 0);
-
- return true;
-}
-
void ProcessConvOperator(Model* model, ConvOperator* op) {
- if (!EnsureBiasVectorShape(model, op)) {
- return;
- }
-
const auto& input_array = model->GetArray(op->inputs[0]);
// Yield until input dims have been resolved.
if (!input_array.has_shape()) {
@@ -292,10 +250,6 @@ void ProcessTransposeConvOperator(Model* model, TransposeConvOperator* op) {
}
void ProcessDepthwiseConvOperator(Model* model, DepthwiseConvOperator* op) {
- if (!EnsureBiasVectorShape(model, op)) {
- return;
- }
-
const auto& input_array = model->GetArray(op->inputs[0]);
// Yield until input dims have been resolved.
if (!input_array.has_shape()) {
@@ -410,10 +364,6 @@ void ProcessOpWithShapeInput(Model* model, Operator* op) {
}
void ProcessFullyConnectedOperator(Model* model, FullyConnectedOperator* op) {
- if (!EnsureBiasVectorShape(model, op)) {
- return;
- }
-
const auto& input_array = model->GetArray(op->inputs[0]);
// Yield until input dims have been resolved.
if (!input_array.has_shape()) {
@@ -574,10 +524,12 @@ bool KeepDims(const Operator& op) {
switch (op.type) {
case OperatorType::kMin: // Reduction Min
return static_cast<const TensorFlowMinOperator&>(op).keep_dims;
- case OperatorType::kMax: // Reduction Max
+ case OperatorType::kReduceMax: // Reduction Max
return static_cast<const TensorFlowMaxOperator&>(op).keep_dims;
case OperatorType::kSum:
return static_cast<const TensorFlowSumOperator&>(op).keep_dims;
+ case OperatorType::kReduceProd:
+ return static_cast<const TensorFlowProdOperator&>(op).keep_dims;
case OperatorType::kMean:
return static_cast<const MeanOperator&>(op).keep_dims;
default:
@@ -1089,9 +1041,6 @@ void ProcessGatherOperator(Model* model, GatherOperator* op) {
QCHECK_GE(input_shape.dimensions_count(), 1);
op->input_rank = input_shape.dimensions_count();
- // We only support 1-D indices.
- QCHECK_EQ(indices_shape.dimensions_count(), 1);
-
// Copy the input dimensions to the output except for dimension 0,
// where the dimension of indices_shape is used.
// TODO(mgubin): if axis != 0 this is not true, change when it's supported.
@@ -1341,8 +1290,8 @@ void ProcessStridedSliceOperator(Model* model, StridedSliceOperator* op) {
op->begin_mask, op->start_indices, op->strides,
input_array.shape().dims().data(), axis);
int stop_index = tflite::strided_slice::StopForAxis(
- op->end_mask, op->stop_indices, op->strides,
- input_array.shape().dims().data(), axis);
+ op->end_mask, op->shrink_axis_mask, op->stop_indices, op->strides,
+ input_array.shape().dims().data(), axis, start_index);
int dim_size =
ceil(static_cast<float>(stop_index - start_index) / op->strides[axis]);
@@ -1457,7 +1406,8 @@ void ProcessTransposeOperator(Model* model, TransposeOperator* op) {
}
}
-void ProcessArgMaxOperator(Model* model, ArgMaxOperator* op) {
+template <typename Op>
+void ProcessArgMinMaxOperator(Model* model, Op* op) {
CHECK_EQ(op->inputs.size(), 2);
const auto& input_array = model->GetArray(op->inputs[0]);
// Yield until input dims have been resolved.
@@ -1611,6 +1561,7 @@ bool PropagateFixedSizes::Run(Model* model, std::size_t op_index) {
case OperatorType::kGreaterEqual:
case OperatorType::kEqual:
case OperatorType::kNotEqual:
+ case OperatorType::kPow:
ProcessSimpleBinaryOperator(model, op);
break;
case OperatorType::kAddN:
@@ -1657,8 +1608,9 @@ bool PropagateFixedSizes::Run(Model* model, std::size_t op_index) {
ProcessL2PoolOperator(model, static_cast<L2PoolOperator*>(op));
break;
case OperatorType::kMin: // Reduction Min
- case OperatorType::kMax: // Reduction Max
+ case OperatorType::kReduceMax: // Reduction Max
case OperatorType::kSum:
+ case OperatorType::kReduceProd:
case OperatorType::kMean:
ProcessTensorFlowReductionOperator(model, op);
break;
@@ -1748,7 +1700,12 @@ bool PropagateFixedSizes::Run(Model* model, std::size_t op_index) {
static_cast<StridedSliceOperator*>(op));
break;
case OperatorType::kArgMax:
- ProcessArgMaxOperator(model, static_cast<ArgMaxOperator*>(op));
+ ProcessArgMinMaxOperator<ArgMaxOperator>(
+ model, static_cast<ArgMaxOperator*>(op));
+ break;
+ case OperatorType::kArgMin:
+ ProcessArgMinMaxOperator<ArgMinOperator>(
+ model, static_cast<ArgMinOperator*>(op));
break;
case OperatorType::kUnsupported:
break;
diff --git a/tensorflow/contrib/lite/toco/graph_transformations/quantization_util.cc b/tensorflow/contrib/lite/toco/graph_transformations/quantization_util.cc
index d74cad9a62..44733391f5 100644
--- a/tensorflow/contrib/lite/toco/graph_transformations/quantization_util.cc
+++ b/tensorflow/contrib/lite/toco/graph_transformations/quantization_util.cc
@@ -74,46 +74,54 @@ ArrayDataType GetQuantizedDataType(const Array& array,
}
}
-void GetQuantizationParams(ArrayDataType data_type, const MinMax& minmax,
- QuantizationParams* quantization_params) {
- switch (data_type) {
+template <ArrayDataType A>
+void ChooseQuantizationParamsForArrayAndQuantizedDataType(
+ const Array& array, QuantizationParams* quantization_params) {
+ *quantization_params = ::tflite::ChooseQuantizationParams<DataType<A>>(
+ array.minmax->min, array.minmax->max, array.narrow_range);
+}
+
+void ChooseQuantizationParamsForArrayAndQuantizedDataType(
+ const Array& array, ArrayDataType quantized_data_type,
+ QuantizationParams* quantization_params) {
+ switch (quantized_data_type) {
case ArrayDataType::kInt8:
- GetQuantizationParamsFromMinMax<ArrayDataType::kInt8>(
- minmax, quantization_params);
+ ChooseQuantizationParamsForArrayAndQuantizedDataType<
+ ArrayDataType::kInt8>(array, quantization_params);
break;
case ArrayDataType::kUint8:
- GetQuantizationParamsFromMinMax<ArrayDataType::kUint8>(
- minmax, quantization_params);
+ ChooseQuantizationParamsForArrayAndQuantizedDataType<
+ ArrayDataType::kUint8>(array, quantization_params);
break;
case ArrayDataType::kInt16:
- GetQuantizationParamsFromMinMax<ArrayDataType::kInt16>(
- minmax, quantization_params);
+ ChooseQuantizationParamsForArrayAndQuantizedDataType<
+ ArrayDataType::kInt16>(array, quantization_params);
break;
case ArrayDataType::kUint16:
- GetQuantizationParamsFromMinMax<ArrayDataType::kUint16>(
- minmax, quantization_params);
+ ChooseQuantizationParamsForArrayAndQuantizedDataType<
+ ArrayDataType::kUint16>(array, quantization_params);
break;
case ArrayDataType::kInt32:
- GetQuantizationParamsFromMinMax<ArrayDataType::kInt32>(
- minmax, quantization_params);
+ ChooseQuantizationParamsForArrayAndQuantizedDataType<
+ ArrayDataType::kInt32>(array, quantization_params);
break;
case ArrayDataType::kUint32:
- GetQuantizationParamsFromMinMax<ArrayDataType::kUint32>(
- minmax, quantization_params);
+ ChooseQuantizationParamsForArrayAndQuantizedDataType<
+ ArrayDataType::kUint32>(array, quantization_params);
break;
case ArrayDataType::kInt64:
- GetQuantizationParamsFromMinMax<ArrayDataType::kInt64>(
- minmax, quantization_params);
+ ChooseQuantizationParamsForArrayAndQuantizedDataType<
+ ArrayDataType::kInt64>(array, quantization_params);
break;
case ArrayDataType::kUint64:
- GetQuantizationParamsFromMinMax<ArrayDataType::kUint64>(
- minmax, quantization_params);
+ ChooseQuantizationParamsForArrayAndQuantizedDataType<
+ ArrayDataType::kUint64>(array, quantization_params);
break;
case ArrayDataType::kFloat:
case ArrayDataType::kNone:
default:
LOG(FATAL) << "Unhandled final quantization type "
- << static_cast<int>(data_type);
+ << static_cast<int>(quantized_data_type);
}
}
@@ -121,8 +129,8 @@ namespace {
template <ArrayDataType A>
std::unique_ptr<GenericBuffer> QuantizeBuffer(
- const GenericBuffer& buffer,
- const QuantizationParams& quantization_params) {
+ const Array& array, const QuantizationParams& quantization_params) {
+ const GenericBuffer& buffer = *array.buffer;
const auto inverse_scale = 1. / quantization_params.scale;
CHECK(buffer.type == ArrayDataType::kFloat);
const auto& float_buffer =
@@ -140,8 +148,15 @@ std::unique_ptr<GenericBuffer> QuantizeBuffer(
} else {
scaled_val = quantization_params.zero_point + inverse_scale * src_val;
}
- quantized_buffer->data[i] =
- tflite::SafeCast<DataType<A>>(std::round(scaled_val));
+ auto integer_val = tflite::SafeCast<DataType<A>>(std::round(scaled_val));
+ // In addition to its effect on the choice of quantization params upstream
+ // of here, narrow_range also means nudge the min quantized value by +1,
+ // so e.g. uint8 values get constrained to [1, 255].
+ if (integer_val == std::numeric_limits<DataType<A>>::min() &&
+ array.narrow_range) {
+ integer_val++;
+ }
+ quantized_buffer->data[i] = integer_val;
}
return std::unique_ptr<GenericBuffer>(quantized_buffer);
}
@@ -155,7 +170,7 @@ void QuantizeArray(GraphTransformation* transformation, Model* model,
CHECK(!array.quantization_params);
array.GetOrCreateQuantizationParams() = quantization_params;
if (array.buffer) {
- array.buffer = QuantizeBuffer<A>(*array.buffer, quantization_params);
+ array.buffer = QuantizeBuffer<A>(array, quantization_params);
}
array.data_type = A;
array.final_data_type = A;
@@ -210,8 +225,8 @@ bool IsArrayQuantizedRangeSubset(GraphTransformation* transformation,
} else {
// Work around cases where we are asking for this prior to the Quantize
// transformation having added the quantization_params.
- GetQuantizationParams(quantized_data_type, *array.minmax,
- &quantization_params);
+ ChooseQuantizationParamsForArrayAndQuantizedDataType(
+ array, quantized_data_type, &quantization_params);
transformation->AddMessageF(
"No quantization params - infering from data type %s with minmax "
"%g,%g as zero_point=%g, scale=%g",
diff --git a/tensorflow/contrib/lite/toco/graph_transformations/quantization_util.h b/tensorflow/contrib/lite/toco/graph_transformations/quantization_util.h
index 79a2ce7e50..cf093c6f17 100644
--- a/tensorflow/contrib/lite/toco/graph_transformations/quantization_util.h
+++ b/tensorflow/contrib/lite/toco/graph_transformations/quantization_util.h
@@ -38,21 +38,11 @@ bool GetQuantizedDataTypeNumericalRange(ArrayDataType data_type,
ArrayDataType GetQuantizedDataType(const Array& array,
ArrayDataType default_type);
-// Returns the quantization params for the array with the given data type and
-// minmax.
-void GetQuantizationParams(ArrayDataType data_type, const MinMax& minmax,
- QuantizationParams* quantization_params);
-
-// Returns the quantization params for the data type and minmax values.
-template <ArrayDataType A>
-void GetQuantizationParamsFromMinMax(const MinMax& minmax,
- QuantizationParams* quantization_params) {
- using Integer = DataType<A>;
- const double rmin = minmax.min;
- const double rmax = minmax.max;
- *quantization_params =
- ::tflite::ChooseQuantizationParams<Integer>(rmin, rmax);
-}
+// Chooses the quantization params for a given array and a given target
+// quantized data type (which may not be the array's current data type).
+void ChooseQuantizationParamsForArrayAndQuantizedDataType(
+ const Array& array, ArrayDataType quantized_data_type,
+ QuantizationParams* quantization_params);
// Quantizes an array by setting its data type and (if constant) quantizing
// all values in the array.
diff --git a/tensorflow/contrib/lite/toco/graph_transformations/quantize.cc b/tensorflow/contrib/lite/toco/graph_transformations/quantize.cc
index 1c61b8cb36..5be2757479 100644
--- a/tensorflow/contrib/lite/toco/graph_transformations/quantize.cc
+++ b/tensorflow/contrib/lite/toco/graph_transformations/quantize.cc
@@ -59,7 +59,8 @@ bool SupportsQuantization(const Operator& op) {
type == OperatorType::kGreater ||
type == OperatorType::kGreaterEqual || type == OperatorType::kLess ||
type == OperatorType::kLessEqual || type == OperatorType::kSelect ||
- type == OperatorType::kArgMax;
+ type == OperatorType::kArgMax || type == OperatorType::kRelu ||
+ type == OperatorType::kRelu1 || type == OperatorType::kRelu6;
}
const MinMax& GetOrComputeMinMax(Model* model, const string& array_name) {
@@ -211,13 +212,15 @@ bool ChooseQuantizationForOperatorInput(
if (op.type == OperatorType::kLstmCell) {
if (input_index == LstmCellOperator::PREV_STATE_INPUT) {
*quantized_data_type = ArrayDataType::kInt16;
- GetQuantizationParams(*quantized_data_type, minmax, quantization_params);
+ ChooseQuantizationParamsForArrayAndQuantizedDataType(
+ array, *quantized_data_type, quantization_params);
return true;
}
}
*quantized_data_type = GetQuantizedDataType(array, ArrayDataType::kUint8);
- GetQuantizationParams(*quantized_data_type, minmax, quantization_params);
+ ChooseQuantizationParamsForArrayAndQuantizedDataType(
+ array, *quantized_data_type, quantization_params);
transformation->AddMessageF(
"For input array %s with min=%g, max=%g, chose to quantize as %s (f=%s) "
"with zero_point=%d, scale=%g",
@@ -325,12 +328,13 @@ bool ChooseQuantizationForOperatorOutput(
output, OperatorTypeName(op.type));
return true;
}
- if ((op.type == OperatorType::kDepthToSpace) ||
- (op.type == OperatorType::kSpaceToDepth) ||
- (op.type == OperatorType::kReshape) ||
- (op.type == OperatorType::kSplit) ||
- (op.type == OperatorType::kConcatenation &&
- model->flags.change_concat_input_ranges())) {
+ if ((op.type == OperatorType::kConcatenation &&
+ model->flags.change_concat_input_ranges()) ||
+ op.type == OperatorType::kDepthToSpace ||
+ op.type == OperatorType::kSpaceToDepth ||
+ op.type == OperatorType::kReshape || op.type == OperatorType::kSplit ||
+ op.type == OperatorType::kRelu || op.type == OperatorType::kRelu1 ||
+ op.type == OperatorType::kRelu6) {
int data_input_index = 0;
if (op.type == OperatorType::kSplit) {
data_input_index = 1;
@@ -356,12 +360,14 @@ bool ChooseQuantizationForOperatorOutput(
if (output_index == LstmCellOperator::STATE_OUTPUT ||
output_index == LstmCellOperator::ACTIV_TEMP) {
*quantized_data_type = ArrayDataType::kInt16;
- GetQuantizationParams(*quantized_data_type, minmax, quantization_params);
+ ChooseQuantizationParamsForArrayAndQuantizedDataType(
+ array, *quantized_data_type, quantization_params);
return true;
}
}
*quantized_data_type = GetQuantizedDataType(array, ArrayDataType::kUint8);
- GetQuantizationParams(*quantized_data_type, minmax, quantization_params);
+ ChooseQuantizationParamsForArrayAndQuantizedDataType(
+ array, *quantized_data_type, quantization_params);
transformation->AddMessageF(
"For output array %s with min=%g, max=%g"
", chose to quantize as %s with zero_point=%d"
@@ -505,36 +511,47 @@ bool Quantize::Run(Model* model, std::size_t op_index) {
// Check if the output of that Dequantize op was not used by any
// other operator. We will then erase that Dequantize op.
if (!CountOpsWithInput(*model, dequantize_op->outputs[0])) {
- // If any of the model's output_arrays was pointing to the
- // Dequantize op's output, let it point to the Dequantize op's
- // input instead.
- for (int i = 0; i < model->flags.output_arrays_size(); i++) {
- if (model->flags.output_arrays(i) == dequantize_op->outputs[0]) {
- // TODO(b/78013785): never rename output arrays.
- if (IsInputArray(*model, dequantize_op->inputs[0])) {
- // The op input is an input array and the output is an output
- // array and we can't have an array be both. Insert a copy
- // op to ensure the two arrays stay separate.
- AddMessageF(
- "Tried to rename output array %d while removing dequant "
- "op %s but array is also an input; inserting copy %s "
- "-> %s",
- i, LogName(*dequantize_op), model->flags.output_arrays(i),
- dequantize_op->inputs[0]);
- InsertCopyOperator(model, dequantize_op->inputs[0],
- dequantize_op->outputs[0]);
- } else {
- // Op output is strictly used as an output array, so we can
- // just rename the array and directly bypass the op.
- AddMessageF(
- "Renaming output array %d after removing dequant op %s: "
- "%s -> %s",
- i, LogName(*dequantize_op), model->flags.output_arrays(i),
- dequantize_op->inputs[0]);
- model->flags.set_output_arrays(i, dequantize_op->inputs[0]);
- model->EraseArray(dequantize_op->outputs[0]);
+ if (IsDiscardableArray(*model, dequantize_op->outputs[0])) {
+ // Usual case: we can just discard the dequantize output.
+ model->EraseArray(dequantize_op->outputs[0]);
+ } else {
+ // The dequantize output is not discardable. Special care needed.
+ // If any of the model's output_arrays was pointing to the
+ // Dequantize op's output, let it point to the Dequantize op's
+ // input instead.
+ for (int i = 0; i < model->flags.output_arrays_size(); i++) {
+ if (model->flags.output_arrays(i) ==
+ dequantize_op->outputs[0]) {
+ // TODO(b/78013785): never rename output arrays.
+ if (IsInputArray(*model, dequantize_op->inputs[0])) {
+ // The op input is an input array and the output is an
+ // output array and we can't have an array be both. Insert a
+ // copy op to ensure the two arrays stay separate.
+ AddMessageF(
+ "Tried to rename output array %d while removing "
+ "dequant "
+ "op %s but array is also an input; inserting copy %s "
+ "-> %s",
+ i, LogName(*dequantize_op),
+ model->flags.output_arrays(i),
+ dequantize_op->inputs[0]);
+ InsertCopyOperator(model, dequantize_op->inputs[0],
+ dequantize_op->outputs[0]);
+ } else {
+ // Op output is strictly used as an output array, so we can
+ // just rename the array and directly bypass the op.
+ AddMessageF(
+ "Renaming output array %d after removing dequant op "
+ "%s: "
+ "%s -> %s",
+ i, LogName(*dequantize_op),
+ model->flags.output_arrays(i),
+ dequantize_op->inputs[0]);
+ model->flags.set_output_arrays(i, dequantize_op->inputs[0]);
+ model->EraseArray(dequantize_op->outputs[0]);
+ }
+ break;
}
- break;
}
}
model->operators.erase(dequantize_it);
diff --git a/tensorflow/contrib/lite/toco/graph_transformations/quantize_weights.cc b/tensorflow/contrib/lite/toco/graph_transformations/quantize_weights.cc
index 88ea0945e7..7a8515f6d1 100644
--- a/tensorflow/contrib/lite/toco/graph_transformations/quantize_weights.cc
+++ b/tensorflow/contrib/lite/toco/graph_transformations/quantize_weights.cc
@@ -36,10 +36,8 @@ void GetQuantizationParamsFromArray(const Array& array,
const std::vector<float>& float_vals =
array.GetBuffer<ArrayDataType::kFloat>().data;
auto minmax = std::minmax_element(float_vals.begin(), float_vals.end());
- MinMax toco_minmax;
- toco_minmax.min = *minmax.first;
- toco_minmax.max = *minmax.second;
- GetQuantizationParams(ArrayDataType::kUint8, toco_minmax, params);
+ *params = tflite::ChooseQuantizationParams<uint8>(
+ *minmax.first, *minmax.second, array.narrow_range);
}
} // namespace
diff --git a/tensorflow/contrib/lite/toco/graph_transformations/read_array_minmax_and_narrow_range_from_fake_quant.cc b/tensorflow/contrib/lite/toco/graph_transformations/read_array_minmax_and_narrow_range_from_fake_quant.cc
new file mode 100644
index 0000000000..5b41c49bfa
--- /dev/null
+++ b/tensorflow/contrib/lite/toco/graph_transformations/read_array_minmax_and_narrow_range_from_fake_quant.cc
@@ -0,0 +1,78 @@
+/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+#include <algorithm>
+#include <memory>
+#include <string>
+#include <unordered_map>
+#include <vector>
+
+#include "tensorflow/contrib/lite/toco/graph_transformations/graph_transformations.h"
+#include "tensorflow/contrib/lite/toco/model.h"
+#include "tensorflow/contrib/lite/toco/tooling_util.h"
+#include "tensorflow/core/platform/logging.h"
+
+namespace toco {
+
+namespace {
+
+bool ApplyAttrsToArray(GraphTransformation* transformation, Model* model,
+ const FakeQuantOperator& fq_op,
+ const string& array_name) {
+ bool changed = false;
+ auto& annotated_array = model->GetArray(array_name);
+ if (!annotated_array.minmax) {
+ const MinMax& minmax = *fq_op.minmax;
+ annotated_array.GetOrCreateMinMax() = minmax;
+ transformation->AddMessageF(
+ "Read min/max annotation for array %s: min=%g, max=%g", array_name,
+ minmax.min, minmax.max);
+ changed = true;
+ }
+ if (fq_op.narrow_range && !annotated_array.narrow_range) {
+ annotated_array.narrow_range = true;
+ transformation->AddMessageF("Read narrow_range annotation for array %s",
+ array_name);
+ changed = true;
+ }
+ return changed;
+}
+
+} // end namespace
+
+bool ReadArrayMinmaxAndNarrowRangeFromFakeQuant::Run(Model* model,
+ std::size_t op_index) {
+ const auto fakequant_it = model->operators.begin() + op_index;
+ auto* fakequant_base_op = fakequant_it->get();
+ if (fakequant_base_op->type != OperatorType::kFakeQuant) {
+ return false;
+ }
+ auto* fq_op = static_cast<FakeQuantOperator*>(fakequant_base_op);
+
+ if (!fq_op->minmax) {
+ // Need to be resolved first by ResolveFakeQuantArgsFromVars.
+ return false;
+ }
+
+ // At this point, this FakeQuantOperator should have a MinMax
+ // attached to it, and should only have 1 input (it should not have
+ // 2nd and 3rd input arrays giving min and max anymore).
+ CHECK(fq_op->minmax);
+ CHECK_EQ(1, fq_op->inputs.size());
+
+ return ApplyAttrsToArray(this, model, *fq_op, fq_op->inputs[0]) ||
+ ApplyAttrsToArray(this, model, *fq_op, fq_op->outputs[0]);
+}
+
+} // namespace toco
diff --git a/tensorflow/contrib/lite/toco/graph_transformations/read_fake_quant_min_max.cc b/tensorflow/contrib/lite/toco/graph_transformations/read_fake_quant_min_max.cc
deleted file mode 100644
index bdcca5b7ca..0000000000
--- a/tensorflow/contrib/lite/toco/graph_transformations/read_fake_quant_min_max.cc
+++ /dev/null
@@ -1,112 +0,0 @@
-/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-==============================================================================*/
-#include <algorithm>
-#include <memory>
-#include <string>
-#include <unordered_map>
-#include <vector>
-
-#include "tensorflow/contrib/lite/toco/graph_transformations/graph_transformations.h"
-#include "tensorflow/contrib/lite/toco/model.h"
-#include "tensorflow/contrib/lite/toco/tooling_util.h"
-#include "tensorflow/core/platform/logging.h"
-
-namespace toco {
-
-namespace {
-
-bool ApplyMinMaxToArray(GraphTransformation* transformation, Model* model,
- const MinMax& minmax, const string& array_name) {
- auto& annotated_array = model->GetArray(array_name);
- if (annotated_array.minmax) {
- return false;
- }
- annotated_array.GetOrCreateMinMax() = minmax;
- transformation->AddMessageF(
- "Read min/max annotation for array %s: min=%g, max=%g", array_name,
- minmax.min, minmax.max);
- return true;
-}
-
-} // end namespace
-
-bool ReadFakeQuantMinMax::Run(Model* model, std::size_t op_index) {
- const auto fakequant_it = model->operators.begin() + op_index;
- auto* fakequant_base_op = fakequant_it->get();
- if (fakequant_base_op->type != OperatorType::kFakeQuant) {
- return false;
- }
- auto* fakequant_op = static_cast<FakeQuantOperator*>(fakequant_base_op);
-
- bool changed = false;
-
- if (!fakequant_op->minmax) {
- CHECK_EQ(fakequant_op->inputs.size(), 3);
- // We need to yield until the min and max parameters have been
- // resolved to constant arrays.
- for (int i = 1; i <= 2; i++) {
- if (!IsConstantParameterArray(*model, fakequant_op->inputs[1])) {
- return false;
- }
- }
-
- // Obtain the final min/max values
- const auto& min_array = model->GetArray(fakequant_op->inputs[1]);
- const auto& max_array = model->GetArray(fakequant_op->inputs[2]);
- CHECK_EQ(RequiredBufferSizeForShape(min_array.shape()), 1);
- CHECK_EQ(RequiredBufferSizeForShape(max_array.shape()), 1);
- fakequant_op->minmax.reset(new MinMax);
- MinMax& minmax = *fakequant_op->minmax;
- minmax.min = min_array.GetBuffer<ArrayDataType::kFloat>().data[0];
- minmax.max = max_array.GetBuffer<ArrayDataType::kFloat>().data[0];
- // We always want [min, max] to contain 0.
- if (minmax.min > 0 || minmax.max < 0) {
- LOG(ERROR) << "For " << LogName(*fakequant_op) << " the MinMax range "
- << "[" << minmax.min << ", " << minmax.max
- << "] does not contain 0. "
- << "Proceeding by tweaking it to contain 0, which will result "
- "in poor accuracy.";
- }
- minmax.min = std::min(minmax.min, 0.);
- minmax.max = std::max(minmax.max, 0.);
-
- // We won't use the input arrays that provided these min and max
- // values, anymore. Delete them unless they are used by something
- // else.
- for (int i = 1; i <= 2; i++) {
- if (CountOpsWithInput(*model, fakequant_op->inputs[i]) == 1) {
- model->EraseArray(fakequant_op->inputs[i]);
- }
- }
- fakequant_op->inputs.resize(1);
- changed = true;
- }
-
- // At this point, this FakeQuantOperator should have a MinMax
- // attached to it, and should only have 1 input (it should not have
- // 2nd and 3rd input arrays giving min and max anymore).
- CHECK(fakequant_op->minmax);
- CHECK_EQ(1, fakequant_op->inputs.size());
-
- const MinMax& minmax = *fakequant_op->minmax;
-
- // Record the MinMax info on the input and output arrays
- changed |= ApplyMinMaxToArray(this, model, minmax, fakequant_op->inputs[0]);
- changed |= ApplyMinMaxToArray(this, model, minmax, fakequant_op->outputs[0]);
-
- return changed;
-}
-
-} // namespace toco
diff --git a/tensorflow/contrib/lite/toco/graph_transformations/resolve_batch_to_space_nd_attributes.cc b/tensorflow/contrib/lite/toco/graph_transformations/resolve_batch_to_space_nd_attributes.cc
index a06919e228..b8b35161d7 100644
--- a/tensorflow/contrib/lite/toco/graph_transformations/resolve_batch_to_space_nd_attributes.cc
+++ b/tensorflow/contrib/lite/toco/graph_transformations/resolve_batch_to_space_nd_attributes.cc
@@ -50,7 +50,7 @@ bool ResolveBatchToSpaceNDAttributes::Run(Model* model, std::size_t op_index) {
// will delete this op.
return false;
}
- std::vector<int> crops_buffer =
+ const std::vector<int>& crops_buffer =
crops_array.GetBuffer<ArrayDataType::kInt32>().data;
for (int i = 0; i < crops_dims[0]; ++i) {
op->before_crops.push_back(crops_buffer[i * 2]);
@@ -62,7 +62,7 @@ bool ResolveBatchToSpaceNDAttributes::Run(Model* model, std::size_t op_index) {
if (!block_shape_array.has_shape()) return false;
const std::vector<int>& block_shape_dims = block_shape_array.shape().dims();
CHECK_EQ(block_shape_dims.size(), 1);
- std::vector<int> block_shape_buffer =
+ const std::vector<int>& block_shape_buffer =
block_shape_array.GetBuffer<ArrayDataType::kInt32>().data;
for (int i = 0; i < block_shape_dims[0]; ++i) {
op->block_shape.push_back(block_shape_buffer[i]);
diff --git a/tensorflow/contrib/lite/toco/graph_transformations/resolve_constant_fake_quant.cc b/tensorflow/contrib/lite/toco/graph_transformations/resolve_constant_fake_quant.cc
index efb7bb2184..058f314b33 100644
--- a/tensorflow/contrib/lite/toco/graph_transformations/resolve_constant_fake_quant.cc
+++ b/tensorflow/contrib/lite/toco/graph_transformations/resolve_constant_fake_quant.cc
@@ -25,6 +25,37 @@ limitations under the License.
namespace toco {
+template <ArrayDataType A>
+void GetBoundsForQuantizedDataType(double* min, double* max) {
+ using limits = std::numeric_limits<DataType<A>>;
+ *min = limits::min();
+ *max = limits::max();
+}
+
+void GetBoundsForQuantizedDataType(ArrayDataType quantized_data_type,
+ double* min, double* max) {
+ switch (quantized_data_type) {
+ case ArrayDataType::kUint8:
+ return GetBoundsForQuantizedDataType<ArrayDataType::kUint8>(min, max);
+ case ArrayDataType::kInt8:
+ return GetBoundsForQuantizedDataType<ArrayDataType::kInt8>(min, max);
+ case ArrayDataType::kUint16:
+ return GetBoundsForQuantizedDataType<ArrayDataType::kUint16>(min, max);
+ case ArrayDataType::kInt16:
+ return GetBoundsForQuantizedDataType<ArrayDataType::kInt16>(min, max);
+ case ArrayDataType::kUint32:
+ return GetBoundsForQuantizedDataType<ArrayDataType::kUint32>(min, max);
+ case ArrayDataType::kInt32:
+ return GetBoundsForQuantizedDataType<ArrayDataType::kInt32>(min, max);
+ case ArrayDataType::kUint64:
+ return GetBoundsForQuantizedDataType<ArrayDataType::kUint64>(min, max);
+ case ArrayDataType::kInt64:
+ return GetBoundsForQuantizedDataType<ArrayDataType::kInt64>(min, max);
+ default:
+ LOG(FATAL) << "unhandled quantized data type";
+ }
+}
+
bool ResolveConstantFakeQuant::Run(Model* model, std::size_t op_index) {
const auto fakequant_it = model->operators.begin() + op_index;
const auto* fakequant_base_op = fakequant_it->get();
@@ -76,14 +107,21 @@ bool ResolveConstantFakeQuant::Run(Model* model, std::size_t op_index) {
const int size = input_buffer.data.size();
output_buffer.data.resize(size);
QuantizationParams qparams;
- GetQuantizationParamsFromMinMax<ArrayDataType::kUint8>(*fakequant_op->minmax,
- &qparams);
+ ChooseQuantizationParamsForArrayAndQuantizedDataType(
+ output_array, quantized_data_type, &qparams);
+ double quantized_min, quantized_max;
+ GetBoundsForQuantizedDataType(quantized_data_type, &quantized_min,
+ &quantized_max);
+ if (fakequant_op->narrow_range) {
+ quantized_min++;
+ }
+
for (int i = 0; i < size; i++) {
const double src_val = input_buffer.data[i];
const double unclamped_quantized_val =
std::round(qparams.zero_point + src_val / qparams.scale);
- const double quantized_val =
- std::min(255., std::max(0., unclamped_quantized_val));
+ const double quantized_val = std::min(
+ quantized_max, std::max(quantized_min, unclamped_quantized_val));
const double dst_val = qparams.scale * (quantized_val - qparams.zero_point);
output_buffer.data[i] = dst_val;
}
diff --git a/tensorflow/contrib/lite/toco/graph_transformations/resolve_constant_strided_slice.cc b/tensorflow/contrib/lite/toco/graph_transformations/resolve_constant_strided_slice.cc
index 6ee231465f..9d8bd4fc39 100644
--- a/tensorflow/contrib/lite/toco/graph_transformations/resolve_constant_strided_slice.cc
+++ b/tensorflow/contrib/lite/toco/graph_transformations/resolve_constant_strided_slice.cc
@@ -38,6 +38,7 @@ void StridedSlice(StridedSliceOperator const& op, Array const& input_array,
CHECK_EQ(op.new_axis_mask, 0);
int num_input_axes = op.start_indices.size();
+ CHECK_EQ(num_input_axes, op.start_indices.size());
CHECK_EQ(num_input_axes, op.stop_indices.size());
CHECK_EQ(num_input_axes, op.strides.size());
@@ -49,11 +50,16 @@ void StridedSlice(StridedSliceOperator const& op, Array const& input_array,
// Initialize source coordinate
Shape const& input_shape = input_array.shape();
Buffer<Type> const& input_buffer = input_array.GetBuffer<Type>();
- std::vector<int> src_coord(op.start_indices.size());
+ std::vector<int> src_coord(num_input_axes);
+ std::vector<int> stop_for_axis(num_input_axes);
for (int axis = 0; axis < num_input_axes; axis++) {
- src_coord[axis] = tflite::strided_slice::StartForAxis(
+ int start = tflite::strided_slice::StartForAxis(
op.begin_mask, op.start_indices, op.strides, input_shape.dims().data(),
axis);
+ src_coord[axis] = start;
+ stop_for_axis[axis] = tflite::strided_slice::StopForAxis(
+ op.end_mask, op.shrink_axis_mask, op.stop_indices, op.strides,
+ input_shape.dims().data(), axis, start);
}
// In order to handle any number (N) of dimensions, we copy elements one by
@@ -76,9 +82,7 @@ void StridedSlice(StridedSliceOperator const& op, Array const& input_array,
}
// Check if we've overflowed.
- int stop = tflite::strided_slice::StopForAxis(
- op.end_mask, op.stop_indices, op.strides, input_shape.dims().data(),
- axis);
+ int stop = stop_for_axis[axis];
if (tflite::strided_slice::LoopCondition(src_coord[axis], stop, stride)) {
// Reset axis and set carry
src_coord[axis] = tflite::strided_slice::StartForAxis(
diff --git a/tensorflow/contrib/lite/toco/graph_transformations/resolve_constant_unary.cc b/tensorflow/contrib/lite/toco/graph_transformations/resolve_constant_unary.cc
index f89ef85fdb..51099cf74a 100644
--- a/tensorflow/contrib/lite/toco/graph_transformations/resolve_constant_unary.cc
+++ b/tensorflow/contrib/lite/toco/graph_transformations/resolve_constant_unary.cc
@@ -58,7 +58,7 @@ bool ResolveConstantUnaryOperator::Run(Model* model, std::size_t op_index) {
case OperatorType::kSquare:
case OperatorType::kSum:
case OperatorType::kMin: // Reduction Min
- case OperatorType::kMax: // Reduction Max
+ case OperatorType::kReduceMax: // Reduction Max
case OperatorType::kReshape:
case OperatorType::kRelu6:
case OperatorType::kRelu1:
@@ -207,7 +207,7 @@ bool ResolveConstantUnaryOperator::Run(Model* model, std::size_t op_index) {
min = std::min(min, (*input_float_data)[i]);
}
output_float_data[0] = min;
- } else if (unary_op->type == OperatorType::kMax) {
+ } else if (unary_op->type == OperatorType::kReduceMax) {
// At the moment only full reduction across all dimensions is supported.
// TODO(starka): Output should not be padded.
for (int i = 0; i < output_dims_count; i++) {
diff --git a/tensorflow/contrib/lite/toco/graph_transformations/resolve_fake_quant_args_from_vars.cc b/tensorflow/contrib/lite/toco/graph_transformations/resolve_fake_quant_args_from_vars.cc
new file mode 100644
index 0000000000..0dda1fd0b3
--- /dev/null
+++ b/tensorflow/contrib/lite/toco/graph_transformations/resolve_fake_quant_args_from_vars.cc
@@ -0,0 +1,80 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+#include <algorithm>
+#include <memory>
+#include <string>
+#include <unordered_map>
+#include <vector>
+
+#include "tensorflow/contrib/lite/toco/graph_transformations/graph_transformations.h"
+#include "tensorflow/contrib/lite/toco/model.h"
+#include "tensorflow/contrib/lite/toco/tooling_util.h"
+#include "tensorflow/core/platform/logging.h"
+
+namespace toco {
+
+bool ResolveFakeQuantArgsFromVars::Run(Model* model, std::size_t op_index) {
+ const auto fakequant_it = model->operators.begin() + op_index;
+ auto* fakequant_base_op = fakequant_it->get();
+ if (fakequant_base_op->type != OperatorType::kFakeQuant) {
+ return false;
+ }
+ auto* fakequant_op = static_cast<FakeQuantOperator*>(fakequant_base_op);
+
+ if (fakequant_op->minmax) {
+ // Already resolved.
+ return false;
+ }
+
+ CHECK_EQ(fakequant_op->inputs.size(), 3);
+ // We need to yield until the min and max parameters have been
+ // resolved to constant arrays.
+ for (int i = 1; i <= 2; i++) {
+ if (!IsConstantParameterArray(*model, fakequant_op->inputs[i])) {
+ return false;
+ }
+ }
+
+ // Obtain the final min/max values
+ const auto& min_array = model->GetArray(fakequant_op->inputs[1]);
+ const auto& max_array = model->GetArray(fakequant_op->inputs[2]);
+ CHECK_EQ(RequiredBufferSizeForShape(min_array.shape()), 1);
+ CHECK_EQ(RequiredBufferSizeForShape(max_array.shape()), 1);
+ fakequant_op->minmax.reset(new MinMax);
+ MinMax& minmax = *fakequant_op->minmax;
+ minmax.min = min_array.GetBuffer<ArrayDataType::kFloat>().data[0];
+ minmax.max = max_array.GetBuffer<ArrayDataType::kFloat>().data[0];
+ // We always want [min, max] to contain 0.
+ if (minmax.min > 0 || minmax.max < 0) {
+ LOG(ERROR) << "For " << LogName(*fakequant_op) << " the MinMax range "
+ << "[" << minmax.min << ", " << minmax.max
+ << "] does not contain 0. "
+ << "Proceeding by tweaking it to contain 0, which will result "
+ "in poor accuracy.";
+ }
+ minmax.min = std::min(minmax.min, 0.);
+ minmax.max = std::max(minmax.max, 0.);
+
+ // We won't use the input arrays that provided these min and max
+ // values, anymore. Delete them unless they are used by something
+ // else.
+ for (int i = 1; i <= 2; i++) {
+ DeleteArrayIfUsedOnce(fakequant_op->inputs[i], model);
+ }
+ fakequant_op->inputs.resize(1);
+ return true;
+}
+
+} // namespace toco
diff --git a/tensorflow/contrib/lite/toco/graph_transformations/resolve_mean_attributes.cc b/tensorflow/contrib/lite/toco/graph_transformations/resolve_reduce_attributes.cc
index 013b50ac9b..5f8a06ba92 100644
--- a/tensorflow/contrib/lite/toco/graph_transformations/resolve_mean_attributes.cc
+++ b/tensorflow/contrib/lite/toco/graph_transformations/resolve_reduce_attributes.cc
@@ -24,11 +24,8 @@ limitations under the License.
namespace toco {
-bool ResolveMeanAttributes::Run(Model* model, std::size_t op_index) {
- auto* mean_op = model->operators[op_index].get();
- if (mean_op->type != OperatorType::kMean) return false;
- auto* op = static_cast<MeanOperator*>(mean_op);
-
+template <typename T>
+bool ResolveAttributes(Model* model, T* op) {
if (!op->axis.empty()) {
// Attributes already resolved
return false;
@@ -36,10 +33,26 @@ bool ResolveMeanAttributes::Run(Model* model, std::size_t op_index) {
if (op->inputs.size() != 2) return false;
if (!IsConstantParameterArray(*model, op->inputs[1])) return false;
- const auto& indices_array = model->GetArray(op->inputs[1]);
+ const Array& indices_array = model->GetArray(op->inputs[1]);
if (!indices_array.has_shape()) return false;
op->axis = indices_array.GetBuffer<ArrayDataType::kInt32>().data;
return true;
}
+bool ResolveReduceAttributes::Run(Model* model, std::size_t op_index) {
+ Operator* op = model->operators[op_index].get();
+ switch (op->type) {
+ case OperatorType::kMean:
+ return ResolveAttributes(model, static_cast<MeanOperator*>(op));
+ case OperatorType::kSum:
+ return ResolveAttributes(model, static_cast<TensorFlowSumOperator*>(op));
+ case OperatorType::kReduceProd:
+ return ResolveAttributes(model, static_cast<TensorFlowProdOperator*>(op));
+ case OperatorType::kReduceMax:
+ return ResolveAttributes(model, static_cast<TensorFlowMaxOperator*>(op));
+ default:
+ return false;
+ }
+}
+
} // namespace toco
diff --git a/tensorflow/contrib/lite/toco/graph_transformations/resolve_reorder_axes.cc b/tensorflow/contrib/lite/toco/graph_transformations/resolve_reorder_axes.cc
index bc70db0bd8..8266e2c205 100644
--- a/tensorflow/contrib/lite/toco/graph_transformations/resolve_reorder_axes.cc
+++ b/tensorflow/contrib/lite/toco/graph_transformations/resolve_reorder_axes.cc
@@ -51,11 +51,12 @@ void ReorderAxes(AxesOrder input_axes_order, AxesOrder output_axes_order,
}
bool ResolveReorderAxes::Run(Model* model, std::size_t op_index) {
- auto reorder_it = model->operators.begin() + op_index;
- auto* reorder_op = static_cast<ReorderAxesOperator*>(reorder_it->get());
- if (reorder_op->type != OperatorType::kReorderAxes) {
+ auto it = model->operators.begin() + op_index;
+ auto* op = it->get();
+ if (op->type != OperatorType::kReorderAxes) {
return false;
}
+ auto* reorder_op = static_cast<ReorderAxesOperator*>(op);
const auto& input_array_name = reorder_op->inputs[0];
const auto& output_array_name = reorder_op->outputs[0];
auto& input_array = model->GetArray(input_array_name);
@@ -95,7 +96,7 @@ bool ResolveReorderAxes::Run(Model* model, std::size_t op_index) {
// Remove the op and output array.
model->EraseArray(output_array_name);
- model->operators.erase(reorder_it);
+ model->operators.erase(it);
return true;
}
diff --git a/tensorflow/contrib/lite/toco/graph_transformations/resolve_space_to_batch_nd_attributes.cc b/tensorflow/contrib/lite/toco/graph_transformations/resolve_space_to_batch_nd_attributes.cc
index dad6aceccf..fab50bec1f 100644
--- a/tensorflow/contrib/lite/toco/graph_transformations/resolve_space_to_batch_nd_attributes.cc
+++ b/tensorflow/contrib/lite/toco/graph_transformations/resolve_space_to_batch_nd_attributes.cc
@@ -53,7 +53,7 @@ bool ResolveSpaceToBatchNDAttributes::Run(Model* model, std::size_t op_index) {
// will delete this op.
return false;
}
- std::vector<int> paddings_buffer =
+ const std::vector<int>& paddings_buffer =
paddings_array.GetBuffer<ArrayDataType::kInt32>().data;
for (int i = 0; i < paddings_dims[0]; ++i) {
op->before_paddings.push_back(paddings_buffer[i * 2]);
@@ -66,7 +66,7 @@ bool ResolveSpaceToBatchNDAttributes::Run(Model* model, std::size_t op_index) {
if (!block_shape_array.has_shape()) return false;
const std::vector<int>& block_shape_dims = block_shape_array.shape().dims();
CHECK_EQ(block_shape_dims.size(), 1);
- std::vector<int> block_shape_buffer =
+ const std::vector<int>& block_shape_buffer =
block_shape_array.GetBuffer<ArrayDataType::kInt32>().data;
for (int i = 0; i < block_shape_dims[0]; ++i) {
op->block_shape.push_back(block_shape_buffer[i]);
diff --git a/tensorflow/contrib/lite/toco/graph_transformations/resolve_tensorflow_matmul.cc b/tensorflow/contrib/lite/toco/graph_transformations/resolve_tensorflow_matmul.cc
index d496f5ae5e..fcf30bd347 100644
--- a/tensorflow/contrib/lite/toco/graph_transformations/resolve_tensorflow_matmul.cc
+++ b/tensorflow/contrib/lite/toco/graph_transformations/resolve_tensorflow_matmul.cc
@@ -32,21 +32,34 @@ bool ResolveTensorFlowMatMul::Run(Model* model, std::size_t op_index) {
const auto* matmul_op =
static_cast<const TensorFlowMatMulOperator*>(matmul_it->get());
+ // Handling transposition of the first input here isn't very simple because
+ // we need to know the actual shape in order to produce a proper
+ // TransposeOperator. However, the second input is supposed to be 2D, so we
+ // can actually handle transposition of that matrix, which happens to be more
+ // common anyway.
+ CHECK(!matmul_op->transpose_a);
+
// Reorder the axes on the second input. TensorFlow uses row-major ordering
// on both inputs, however this is inefficient for the FullyConnected
// operator. We'll transpose the second input to be in column-major order now
// and let constant propagation optimize things (if possible).
- auto* transpose_op = new TransposeOperator;
- transpose_op->inputs = {
- matmul_op->inputs[1],
- CreateInt32Array(
- model,
- AvailableArrayName(*model, matmul_op->inputs[1] + "/transpose/perm"),
- {1, 0})};
- transpose_op->outputs = {
- AvailableArrayName(*model, matmul_op->inputs[1] + "/transpose")};
- model->GetOrCreateArray(transpose_op->outputs[0]);
- model->operators.emplace(matmul_it, transpose_op);
+ string input_lhs = matmul_op->inputs[0];
+ string input_rhs = matmul_op->inputs[1];
+ if (!matmul_op->transpose_b) {
+ auto* transpose_op = new TransposeOperator;
+ transpose_op->inputs = {
+ matmul_op->inputs[1],
+ CreateInt32Array(model,
+ AvailableArrayName(
+ *model, matmul_op->inputs[1] + "/transpose/perm"),
+ {1, 0})};
+ transpose_op->outputs = {
+ AvailableArrayName(*model, matmul_op->inputs[1] + "/transpose")};
+ model->GetOrCreateArray(transpose_op->outputs[0]);
+ model->operators.emplace(matmul_it, transpose_op);
+
+ input_rhs = transpose_op->outputs[0];
+ }
// Refresh iterator.
matmul_it = model->operators.begin();
@@ -57,9 +70,6 @@ bool ResolveTensorFlowMatMul::Run(Model* model, std::size_t op_index) {
}
DCHECK_EQ(matmul_it->get(), matmul_op);
- string input_lhs = matmul_op->inputs[0];
- string input_rhs = transpose_op->outputs[0];
-
// Construct the new FullyConnectedOperator.
auto* fc_op = new FullyConnectedOperator;
fc_op->outputs = matmul_op->outputs;
diff --git a/tensorflow/contrib/lite/toco/import_tensorflow.cc b/tensorflow/contrib/lite/toco/import_tensorflow.cc
index da7e5add7e..2ffab49e7a 100644
--- a/tensorflow/contrib/lite/toco/import_tensorflow.cc
+++ b/tensorflow/contrib/lite/toco/import_tensorflow.cc
@@ -378,7 +378,7 @@ tensorflow::Status ImportBoolArray(const TensorProto& input_tensor,
for (int i = 0; i < input_flat_size; i++) {
output_bool_data[i] = input_tensor.bool_val(0);
}
- } else if (input_tensor.int_val_size() == input_flat_size) {
+ } else if (input_tensor.bool_val_size() == input_flat_size) {
for (int i = 0; i < input_tensor.bool_val_size(); i++) {
output_bool_data[i] = input_tensor.bool_val(i);
}
@@ -755,6 +755,9 @@ tensorflow::Status ConvertFakeQuantWithMinMaxArgs(
op->outputs.push_back(node.name());
// tf.fake_quant_with_min_max_args num_bits defaults to 8.
op->num_bits = HasAttr(node, "num_bits") ? GetIntAttr(node, "num_bits") : 8;
+ if (HasAttr(node, "narrow_range")) {
+ op->narrow_range = GetBoolAttr(node, "narrow_range");
+ }
model->operators.emplace_back(op);
return tensorflow::Status::OK();
}
@@ -774,6 +777,9 @@ tensorflow::Status ConvertFakeQuantWithMinMaxVars(
}
op->outputs.push_back(node.name());
op->num_bits = HasAttr(node, "num_bits") ? GetIntAttr(node, "num_bits") : 8;
+ if (HasAttr(node, "narrow_range")) {
+ op->narrow_range = GetBoolAttr(node, "narrow_range");
+ }
model->operators.emplace_back(op);
return tensorflow::Status::OK();
}
@@ -799,22 +805,6 @@ tensorflow::Status ConvertSqueezeOperator(
return tensorflow::Status::OK();
}
-tensorflow::Status ConvertSumOperator(
- const NodeDef& node, const TensorFlowImportFlags& tf_import_flags,
- Model* model) {
- CHECK_EQ(node.op(), "Sum");
- TF_QCHECK_OK(CheckInputsCount(node, tf_import_flags, 2));
- auto* op = new TensorFlowSumOperator;
- op->inputs.push_back(node.input(0));
- op->inputs.push_back(node.input(1));
- op->outputs.push_back(node.name());
- model->operators.emplace_back(op);
- if (HasAttr(node, "keep_dims")) {
- op->keep_dims = GetBoolAttr(node, "keep_dims");
- }
- return tensorflow::Status::OK();
-}
-
tensorflow::Status ConvertSplitOperator(
const NodeDef& node, const TensorFlowImportFlags& tf_import_flags,
Model* model) {
@@ -984,18 +974,19 @@ tensorflow::Status ConvertMatMulOperator(
Model* model) {
TF_QCHECK_OK(CheckInputsCount(node, tf_import_flags, 2));
- // Transpose flags should be easy to support, but we don't have a
- // GraphDef with them to test on at the moment.
- CHECK_EQ(HasAttr(node, "transpose_a") && GetBoolAttr(node, "transpose_a"),
- false);
- CHECK_EQ(HasAttr(node, "transpose_b") && GetBoolAttr(node, "transpose_b"),
- false);
CHECK(!HasAttr(node, "adjoint_a") ||
(GetBoolAttr(node, "adjoint_a") == false));
CHECK(!HasAttr(node, "adjoint_b") ||
(GetBoolAttr(node, "adjoint_b") == false));
auto* matmul = new TensorFlowMatMulOperator;
+ if (HasAttr(node, "transpose_a")) {
+ matmul->transpose_a = GetBoolAttr(node, "transpose_a");
+ }
+ if (HasAttr(node, "transpose_b")) {
+ matmul->transpose_b = GetBoolAttr(node, "transpose_b");
+ }
+
matmul->inputs = {node.input(0), node.input(1)};
matmul->outputs = {node.name()};
model->operators.emplace_back(matmul);
@@ -1051,22 +1042,6 @@ tensorflow::Status ConvertSimpleOperator(
return ConvertSimpleOperator<Op>(node, tf_import_flags, model);
}
-tensorflow::Status ConvertMaxOperator(
- const NodeDef& node, const TensorFlowImportFlags& tf_import_flags,
- Model* model) {
- CHECK_EQ(node.op(), "Max");
- TF_QCHECK_OK(CheckInputsCount(node, tf_import_flags, 2));
- auto* op = new TensorFlowMaxOperator;
- op->inputs.push_back(node.input(0));
- op->inputs.push_back(node.input(1));
- op->outputs.push_back(node.name());
- model->operators.emplace_back(op);
- if (HasAttr(node, "keep_dims")) {
- op->keep_dims = GetBoolAttr(node, "keep_dims");
- }
- return tensorflow::Status::OK();
-}
-
tensorflow::Status ConvertMinOperator(
const NodeDef& node, const TensorFlowImportFlags& tf_import_flags,
Model* model) {
@@ -1229,10 +1204,11 @@ tensorflow::Status ConvertGatherOperator(
return tensorflow::Status::OK();
}
-tensorflow::Status ConvertArgMaxOperator(
+template <typename Op, const char* op_name>
+tensorflow::Status ConvertArgMinMaxOperator(
const NodeDef& node, const TensorFlowImportFlags& tf_import_flags,
Model* model) {
- CHECK_EQ(node.op(), "ArgMax");
+ CHECK_EQ(node.op(), op_name);
TF_QCHECK_OK(CheckInputsCount(node, tf_import_flags, 2));
const auto axis_data_type =
HasAttr(node, "Tidx") ? GetDataTypeAttr(node, "Tidx") : DT_INT32;
@@ -1241,7 +1217,7 @@ tensorflow::Status ConvertArgMaxOperator(
: DT_INT64;
CHECK(axis_data_type == DT_INT64 || axis_data_type == DT_INT32);
CHECK(output_type == DT_INT64 || output_type == DT_INT32);
- auto* op = new ArgMaxOperator;
+ auto* op = new Op;
op->output_data_type = ConvertDataType(output_type);
op->inputs.push_back(node.input(0));
op->inputs.push_back(node.input(1));
@@ -1404,12 +1380,12 @@ tensorflow::Status ConvertBatchToSpaceNDOperator(
return tensorflow::Status::OK();
}
-tensorflow::Status ConvertMeanOperator(
+template <typename T>
+tensorflow::Status ConvertReduceOperator(
const NodeDef& node, const TensorFlowImportFlags& tf_import_flags,
Model* model) {
- CHECK_EQ(node.op(), "Mean");
TF_QCHECK_OK(CheckInputsCount(node, tf_import_flags, 2));
- auto* op = new MeanOperator;
+ auto* op = new T;
op->inputs.push_back(node.input(0));
op->inputs.push_back(node.input(1));
op->outputs.push_back(node.name());
@@ -1832,12 +1808,16 @@ using ConverterType = tensorflow::Status (*)(
Model* model);
using ConverterMapType = std::unordered_map<std::string, ConverterType>;
+constexpr char kArgMax[] = "ArgMax";
+constexpr char kArgMin[] = "ArgMin";
+
ConverterMapType GetTensorFlowNodeConverterMap() {
return std::unordered_map<std::string, ConverterType>({
{"Add", ConvertSimpleOperator<AddOperator, 2>},
{"AddN", ConvertSimpleOperator<AddNOperator>},
{"All", ConvertSimpleOperator<TensorFlowAllOperator>},
- {"ArgMax", ConvertArgMaxOperator},
+ {"ArgMax", ConvertArgMinMaxOperator<ArgMaxOperator, kArgMax>},
+ {"ArgMin", ConvertArgMinMaxOperator<ArgMinOperator, kArgMin>},
{"Assert", ConvertSimpleOperator<TensorFlowAssertOperator>},
{"AvgPool", ConvertAvgPoolOperator},
{"BatchMatMul", ConvertBatchMatMulOperator},
@@ -1881,10 +1861,10 @@ ConverterMapType GetTensorFlowNodeConverterMap() {
{"Log", ConvertSimpleOperator<LogOperator, 1>},
{"LogSoftmax", ConvertSimpleOperator<LogSoftmaxOperator, 1>},
{"MatMul", ConvertMatMulOperator},
- {"Max", ConvertMaxOperator},
+ {"Max", ConvertReduceOperator<TensorFlowMaxOperator>},
{"MaxPool", ConvertMaxPoolOperator},
{"Maximum", ConvertSimpleOperator<TensorFlowMaximumOperator, 2>},
- {"Mean", ConvertMeanOperator},
+ {"Mean", ConvertReduceOperator<MeanOperator>},
{"Merge", ConvertSimpleOperator<TensorFlowMergeOperator, 2>},
{"Min", ConvertMinOperator},
{"Minimum", ConvertSimpleOperator<TensorFlowMinimumOperator, 2>},
@@ -1899,6 +1879,8 @@ ConverterMapType GetTensorFlowNodeConverterMap() {
{"ParallelDynamicStitch", ConvertDynamicStitchOperator},
{"Placeholder", ConvertPlaceholderOperator},
{"PlaceholderWithDefault", ConvertIdentityOperator},
+ {"Pow", ConvertSimpleOperator<PowOperator, 2>},
+ {"Prod", ConvertReduceOperator<TensorFlowProdOperator>},
{"RandomUniform", ConvertRandomUniform},
{"Range", ConvertRangeOperator},
{"Rank", ConvertSimpleOperator<RankOperator, 1>},
@@ -1925,7 +1907,7 @@ ConverterMapType GetTensorFlowNodeConverterMap() {
{"StopGradient", ConvertIdentityOperator},
{"StridedSlice", ConvertStridedSliceOperator},
{"Sub", ConvertSimpleOperator<SubOperator, 2>},
- {"Sum", ConvertSumOperator},
+ {"Sum", ConvertReduceOperator<TensorFlowSumOperator>},
{"Svdf", ConvertSvdfOperator},
{"Switch", ConvertSwitchOperator},
{"Tanh", ConvertSimpleOperator<TanhOperator, 1>},
diff --git a/tensorflow/contrib/lite/toco/model.h b/tensorflow/contrib/lite/toco/model.h
index 89cb061499..37f4188cf7 100644
--- a/tensorflow/contrib/lite/toco/model.h
+++ b/tensorflow/contrib/lite/toco/model.h
@@ -15,6 +15,7 @@ limitations under the License.
#ifndef TENSORFLOW_CONTRIB_LITE_TOCO_MODEL_H_
#define TENSORFLOW_CONTRIB_LITE_TOCO_MODEL_H_
+#include <complex>
#include <functional>
#include <initializer_list>
#include <memory>
@@ -84,6 +85,7 @@ enum class OperatorType : uint8 {
kBatchToSpaceND,
kPad,
kPadV2,
+ kReduceProd, // Reduction product
kStridedSlice,
kSlice,
kSqueeze,
@@ -105,10 +107,10 @@ enum class OperatorType : uint8 {
kIdentity,
kLess,
kLessEqual,
- kMax, // Reduction Max
- kMaximum, // Element-wise Maximum
- kMin, // Reduction Min
- kMinimum, // Element-wise Minimum
+ kReduceMax, // Reduction Max
+ kMaximum, // Element-wise Maximum
+ kMin, // Reduction Min
+ kMinimum, // Element-wise Minimum
kMatMul,
kMerge,
kNeg,
@@ -138,6 +140,8 @@ enum class OperatorType : uint8 {
kSparseToDense,
kEqual,
kNotEqual,
+ kPow,
+ kArgMin,
};
// Helper to deal with TensorFlow arrays using a different ordering of
@@ -160,15 +164,16 @@ enum class AxesOrder {
// The type of the scalars in an array.
// Note that the type does not by itself tell whether the values in the array
-// are real (are literally interpreted as real numbers) or quantized (only
-// acquire a meaning as real numbers in conjunction with QuantizationParams).
+// are non-quantized (can be accessed directly) or quantized (must be
+// interpreted in conjunction with QuantizationParams).
//
// In practice though:
-// float values are always real
+// float values are never quantized
// uint8 values are always quantized
-// int32 values are either real or quantized (depending on whether
+// int32 values are sometimes quantized (depending on whether
// QuantizationParams are present).
-// other types are unused at the moment.
+// complex values are never quantized
+// other types are never quantized at the moment.
//
// kNone means that we don't know the data type yet, or that we don't care
// because we'll be dropping the array anyway (e.g. some exotic array types
@@ -186,7 +191,8 @@ enum class ArrayDataType : uint8 {
kUint32,
kInt64,
kUint64, // 10
- kString
+ kString,
+ kComplex64,
};
// Compile-time logic to map ArrayDataType to the corresponding C++ scalar type
@@ -240,6 +246,10 @@ template <>
struct DataTypeImpl<ArrayDataType::kString> {
typedef string Type;
};
+template <>
+struct DataTypeImpl<ArrayDataType::kComplex64> {
+ typedef std::complex<float> Type;
+};
template <ArrayDataType A>
using DataType = typename DataTypeImpl<A>::Type;
@@ -782,6 +792,7 @@ struct FakeQuantOperator : Operator {
FakeQuantOperator() : Operator(OperatorType::kFakeQuant) {}
std::unique_ptr<MinMax> minmax;
int num_bits = 8;
+ bool narrow_range = false;
};
// Element-wise division operator.
@@ -829,6 +840,8 @@ struct BatchMatMulOperator : Operator {
// TensorFlow equivalent: MatMul
struct TensorFlowMatMulOperator : Operator {
TensorFlowMatMulOperator() : Operator(OperatorType::kMatMul) {}
+ bool transpose_a = false;
+ bool transpose_b = false;
};
// Padding operator. Pads a tensor with zeros.
@@ -1217,6 +1230,19 @@ struct SubOperator : Operator {
// TensorFlow equivalent: Sum
struct TensorFlowSumOperator : Operator {
TensorFlowSumOperator() : Operator(OperatorType::kSum) {}
+ std::vector<int> axis;
+ bool keep_dims = false;
+};
+
+// Prod reduction: computes the product of all of entries across the axes.
+//
+// Inputs:
+// inputs[0]: required: the input array
+//
+// TensorFlow equivalent: Prod
+struct TensorFlowProdOperator : Operator {
+ TensorFlowProdOperator() : Operator(OperatorType::kReduceProd) {}
+ std::vector<int> axis;
bool keep_dims = false;
};
@@ -1376,16 +1402,15 @@ struct TensorFlowNotEqualOperator : Operator {
TensorFlowNotEqualOperator() : Operator(OperatorType::kNotEqual) {}
};
-// Global max reduction: computes the max of all of entries in the input array.
-// Thus the output is "0-dimensional": it consists of a single scalar value.
+// Max reduction: computes the max of all of entries across the axes.
//
// Inputs:
// inputs[0]: required: the input array
//
-// TensorFlow equivalent: Max --- except that we only support the special case
-// of global reduction across all dimensions.
+// TensorFlow equivalent: Max
struct TensorFlowMaxOperator : Operator {
- TensorFlowMaxOperator() : Operator(OperatorType::kMax) {}
+ TensorFlowMaxOperator() : Operator(OperatorType::kReduceMax) {}
+ std::vector<int> axis;
bool keep_dims = false;
};
@@ -1518,6 +1543,17 @@ struct ArgMaxOperator : Operator {
ArrayDataType output_data_type = ArrayDataType::kInt64;
};
+// ArgMin operator. It returns the index of the minimum value along axis.
+//
+// Inputs:
+// inputs[0]: required: the input tensor
+//
+// TensorFlow equivalent: ArgMin
+struct ArgMinOperator : Operator {
+ ArgMinOperator() : Operator(OperatorType::kArgMin) {}
+ ArrayDataType output_data_type = ArrayDataType::kInt64;
+};
+
// ResizeBilinear operator. It resizes input images with bilinear interpolation.
// It does not support align_corners at the moment.
//
@@ -1637,6 +1673,17 @@ struct SparseToDenseOperator : Operator {
bool validate_indices;
};
+// Pow operator:
+//
+// Inputs:
+// Inputs[0]: required: A tensor.
+// Inputs[1]: required: A tensor.
+//
+// TensorFlow equivalent: Pow.
+struct PowOperator : Operator {
+ PowOperator() : Operator(OperatorType::kPow) {}
+};
+
// Alloc's are used for transient arrays only. An Alloc specifies which interval
// of the "transient_data" workspace buffer passed to inference functions, is to
// be used for the transient array at hand. The 'start' and 'end' values are
@@ -1821,6 +1868,40 @@ struct Array {
// If this is non-null, then these quantization parameters are to be used
// to assign a meaning as real numbers to the elements of this array.
std::unique_ptr<QuantizationParams> quantization_params;
+ // narrow_range is a detail of how toco handles FakeQuant operators with
+ // narrow_range, see
+ // https://www.tensorflow.org/api_docs/python/tf/fake_quant_with_min_max_vars
+ //
+ // For more context about what that is useful for, see the big comment in
+ // graph_transformations/ensure_uint8_weights_safe_for_fast_int8_kernels.cc
+ //
+ // The narrow_range flag applies only to quantized arrays, and changes
+ // their quantization in the following way when it is set to 'true':
+ // 1. The computation of {zero_point, scale} from {min, max} needs to be
+ // amended so that the real min value will get quantized to
+ // (min_quantized_value + 1) instead of just (min_quantized_value).
+ // E.g. for uint8 quantization, the real min value should get quantized to
+ // the uint8 value 1, not 0.
+ // 2. Quantized values should get clamped to the interval
+ // [min_quantized_value + 1, max_value]. Equivalently, the
+ // min_quantized_value should get nudged to (min_quantized_value + 1).
+ // The reason why 1. does not imply 2. is that real values may not belong to
+ // the stated [min, max] interval. Concretely, weights recorded at the last
+ // learning step may not fall in the [min, max] interval recorded over
+ // previous learning steps, as the values evolve across learning steps.
+ //
+ // Rationale why this is directly a field on Array:
+ // - This can't be just a field on FakeQuantOperator, because
+ // FakeQuantOperators are gone (DropFakeQuant) before we get to using that
+ // information (Quantize). We need a place to store that bit in the interim.
+ // - This can't be in QuantizationParams because we need to record this
+ // ahead of quantization, and QuantizationParams are only created during
+ // quantization.
+ // - This could be in MinMax, but that would be an abuse of what MinMax is
+ // about, and would break existing code that assumes that a MinMax is just
+ // a min and a max. Unlike MinMax which is agnostic as to the quantized
+ // data type, narrow_range refers to values in the quantized data type.
+ bool narrow_range = false;
private:
std::unique_ptr<Shape> array_shape;
diff --git a/tensorflow/contrib/lite/toco/model_cmdline_flags.cc b/tensorflow/contrib/lite/toco/model_cmdline_flags.cc
index 4c9f1aa4b0..06072d1fcb 100644
--- a/tensorflow/contrib/lite/toco/model_cmdline_flags.cc
+++ b/tensorflow/contrib/lite/toco/model_cmdline_flags.cc
@@ -74,10 +74,10 @@ bool ParseModelFlagsFromCommandLineFlags(
"height, input array width, input array depth."),
Flag("batch_size", parsed_flags.batch_size.bind(),
parsed_flags.batch_size.default_value(),
- "Batch size for the model. Replaces the first dimension of an "
- "input size array if undefined. Use only with SavedModels when "
- "--input_shapes flag is not specified. Always use --input_shapes "
- "flag with frozen graphs."),
+ "Deprecated. Batch size for the model. Replaces the first dimension "
+ "of an input size array if undefined. Use only with SavedModels "
+ "when --input_shapes flag is not specified. Always use "
+ "--input_shapes flag with frozen graphs."),
Flag("input_data_type", parsed_flags.input_data_type.bind(),
parsed_flags.input_data_type.default_value(),
"Deprecated: use --input_data_types instead. Input array type, if "
diff --git a/tensorflow/contrib/lite/toco/tflite/export.cc b/tensorflow/contrib/lite/toco/tflite/export.cc
index 1972246807..5ad307af14 100644
--- a/tensorflow/contrib/lite/toco/tflite/export.cc
+++ b/tensorflow/contrib/lite/toco/tflite/export.cc
@@ -336,17 +336,13 @@ void Export(
auto op_codes = ExportOperatorCodes(model, ops_by_type, operators_map,
&builder, &error_summary);
- const string fake_quant_operation_name = "FAKE_QUANT";
-
- if (error_summary.count(fake_quant_operation_name) != 0) {
- LOG(ERROR)
- << fake_quant_operation_name
- << " operation was not converted. If running quantized make sure you "
- "are passing --inference_type=QUANTIZED_UINT8 and values for "
- "--std_values and --mean_values.";
- // Remove the fake quant operation from the errors, since it shouldn't
- // be provided a custom implementation.
- error_summary.erase(fake_quant_operation_name);
+ for (const auto& op : model.operators) {
+ if (op->type == OperatorType::kFakeQuant) {
+ LOG(WARNING) << "FAKE_QUANT operation " << LogName(*op)
+ << " was not converted. If running quantized make sure you "
+ "are passing --inference_type=QUANTIZED_UINT8 and values "
+ "for --std_values and --mean_values.";
+ }
}
if (!allow_custom_ops && !error_summary.empty()) {
// Remove ExpandDims and ReorderAxes from unimplemented list unless they
diff --git a/tensorflow/contrib/lite/toco/tflite/import.cc b/tensorflow/contrib/lite/toco/tflite/import.cc
index d1867bd4fa..1dd4915b31 100644
--- a/tensorflow/contrib/lite/toco/tflite/import.cc
+++ b/tensorflow/contrib/lite/toco/tflite/import.cc
@@ -221,6 +221,8 @@ std::unique_ptr<Model> Import(const ModelFlags& model_flags,
model.get());
ImportIOTensors(*input_model, tensors_table, model.get());
+ UndoWeightsShuffling(model.get());
+
return model;
}
diff --git a/tensorflow/contrib/lite/toco/tflite/operator.cc b/tensorflow/contrib/lite/toco/tflite/operator.cc
index 290a925c1e..68d13586f1 100644
--- a/tensorflow/contrib/lite/toco/tflite/operator.cc
+++ b/tensorflow/contrib/lite/toco/tflite/operator.cc
@@ -282,25 +282,31 @@ class DepthToSpace : public CustomOperator<DepthToSpaceOperator> {
int GetVersion(const Operator& op) const override { return 1; }
};
-class FakeQuant : public CustomOperator<FakeQuantOperator> {
+class FakeQuant
+ : public BuiltinOperator<FakeQuantOperator, ::tflite::FakeQuantOptions,
+ ::tflite::BuiltinOptions_FakeQuantOptions> {
public:
- using CustomOperator::CustomOperator;
- void WriteOptions(const TocoOperator& op,
- flexbuffers::Builder* fbb) const override {
- fbb->Float("min", op.minmax->min);
- fbb->Float("max", op.minmax->max);
- fbb->Int("num_bits", op.num_bits);
+ using BuiltinOperator::BuiltinOperator;
+ flatbuffers::Offset<TfLiteOptions> WriteOptions(
+ const TocoOperator& op,
+ flatbuffers::FlatBufferBuilder* builder) const override {
+ return ::tflite::CreateFakeQuantOptions(
+ *builder, op.minmax->min, op.minmax->max, op.num_bits, op.narrow_range);
}
- void ReadOptions(const flexbuffers::Map& m, TocoOperator* op) const override {
+ void ReadOptions(const TfLiteOptions& options,
+ TocoOperator* op) const override {
auto* minmax = new MinMax;
- minmax->min = m["min"].AsFloat();
- minmax->max = m["max"].AsFloat();
+ minmax->min = options.min();
+ minmax->max = options.max();
op->minmax.reset(minmax);
- const auto& num_bits = m["num_bits"];
- op->num_bits = num_bits.IsInt() ? num_bits.AsInt32() : 8;
+ op->num_bits = options.num_bits();
+ op->narrow_range = options.narrow_range();
}
- int GetVersion(const Operator& op) const override { return 1; }
+ int GetVersion(const Operator& op) const override {
+ const auto& fq_op = static_cast<const FakeQuantOperator&>(op);
+ return fq_op.narrow_range ? 2 : 1;
+ }
};
class FullyConnected
@@ -314,16 +320,47 @@ class FullyConnected
flatbuffers::FlatBufferBuilder* builder) const override {
auto activation_function =
ActivationFunction::Serialize(op.fused_activation_function);
- return ::tflite::CreateFullyConnectedOptions(*builder, activation_function);
+ ::tflite::FullyConnectedOptionsWeightsFormat tflite_weights_format;
+ switch (op.weights_format) {
+ case FullyConnectedWeightsFormat::kDefault:
+ tflite_weights_format =
+ ::tflite::FullyConnectedOptionsWeightsFormat_DEFAULT;
+ break;
+ case FullyConnectedWeightsFormat::kShuffled4x16Int8:
+ tflite_weights_format =
+ ::tflite::FullyConnectedOptionsWeightsFormat_SHUFFLED4x16INT8;
+ break;
+ default:
+ LOG(ERROR) << "Unhandled FC weights format";
+ tflite_weights_format =
+ ::tflite::FullyConnectedOptionsWeightsFormat_DEFAULT;
+ }
+ return ::tflite::CreateFullyConnectedOptions(*builder, activation_function,
+ tflite_weights_format);
}
void ReadOptions(const TfLiteOptions& options,
TocoOperator* op) const override {
op->fused_activation_function =
ActivationFunction::Deserialize(options.fused_activation_function());
+ switch (options.weights_format()) {
+ case ::tflite::FullyConnectedOptionsWeightsFormat_DEFAULT:
+ op->weights_format = FullyConnectedWeightsFormat::kDefault;
+ break;
+ case ::tflite::FullyConnectedOptionsWeightsFormat_SHUFFLED4x16INT8:
+ op->weights_format = FullyConnectedWeightsFormat::kShuffled4x16Int8;
+ break;
+ default:
+ LOG(ERROR) << "Unhandled FC weights format";
+ op->weights_format = FullyConnectedWeightsFormat::kDefault;
+ }
}
- int GetVersion(const Operator& op) const override { return 1; }
+ int GetVersion(const Operator& op) const override {
+ const auto& fc_op = static_cast<const FullyConnectedOperator&>(op);
+ return fc_op.weights_format == FullyConnectedWeightsFormat::kDefault ? 1
+ : 2;
+ }
};
class Gather : public BuiltinOperator<GatherOperator, ::tflite::GatherOptions,
@@ -730,6 +767,44 @@ class Sum
int GetVersion(const Operator& op) const override { return 1; }
};
+class ReduceMax
+ : public BuiltinOperator<TensorFlowSumOperator, ::tflite::ReducerOptions,
+ ::tflite::BuiltinOptions_ReducerOptions> {
+ public:
+ using BuiltinOperator::BuiltinOperator;
+ flatbuffers::Offset<TfLiteOptions> WriteOptions(
+ const TocoOperator& op,
+ flatbuffers::FlatBufferBuilder* builder) const override {
+ return ::tflite::CreateReducerOptions(*builder, op.keep_dims);
+ }
+
+ void ReadOptions(const TfLiteOptions& options,
+ TocoOperator* op) const override {
+ op->keep_dims = options.keep_dims();
+ }
+
+ int GetVersion(const Operator& op) const override { return 1; }
+};
+
+class ReduceProd
+ : public BuiltinOperator<TensorFlowSumOperator, ::tflite::ReducerOptions,
+ ::tflite::BuiltinOptions_ReducerOptions> {
+ public:
+ using BuiltinOperator::BuiltinOperator;
+ flatbuffers::Offset<TfLiteOptions> WriteOptions(
+ const TocoOperator& op,
+ flatbuffers::FlatBufferBuilder* builder) const override {
+ return ::tflite::CreateReducerOptions(*builder, op.keep_dims);
+ }
+
+ void ReadOptions(const TfLiteOptions& options,
+ TocoOperator* op) const override {
+ op->keep_dims = options.keep_dims();
+ }
+
+ int GetVersion(const Operator& op) const override { return 1; }
+};
+
class ResizeBilinear
: public BuiltinOperator<ResizeBilinearOperator,
::tflite::ResizeBilinearOptions,
@@ -854,6 +929,25 @@ class ArgMax : public BuiltinOperator<ArgMaxOperator, ::tflite::ArgMaxOptions,
int GetVersion(const Operator& op) const override { return 1; }
};
+class ArgMin : public BuiltinOperator<ArgMinOperator, ::tflite::ArgMinOptions,
+ ::tflite::BuiltinOptions_ArgMinOptions> {
+ public:
+ using BuiltinOperator::BuiltinOperator;
+ flatbuffers::Offset<TfLiteOptions> WriteOptions(
+ const TocoOperator& op,
+ flatbuffers::FlatBufferBuilder* builder) const override {
+ return ::tflite::CreateArgMinOptions(
+ *builder, DataType::Serialize(op.output_data_type));
+ }
+
+ void ReadOptions(const TfLiteOptions& options,
+ TocoOperator* op) const override {
+ op->output_data_type = DataType::Deserialize(options.output_type());
+ }
+
+ int GetVersion(const Operator& op) const override { return 1; }
+};
+
class TransposeConv
: public BuiltinOperator<TransposeConvOperator,
::tflite::TransposeConvOptions,
@@ -1127,6 +1221,10 @@ std::vector<std::unique_ptr<BaseOperator>> BuildOperatorList() {
ops.emplace_back(
new Mean(::tflite::BuiltinOperator_MEAN, OperatorType::kMean));
ops.emplace_back(new Sum(::tflite::BuiltinOperator_SUM, OperatorType::kSum));
+ ops.emplace_back(new ReduceProd(::tflite::BuiltinOperator_REDUCE_PROD,
+ OperatorType::kReduceProd));
+ ops.emplace_back(new ReduceMax(::tflite::BuiltinOperator_REDUCE_MAX,
+ OperatorType::kReduceMax));
ops.emplace_back(new ResizeBilinear(::tflite::BuiltinOperator_RESIZE_BILINEAR,
OperatorType::kResizeBilinear));
ops.emplace_back(
@@ -1144,6 +1242,8 @@ std::vector<std::unique_ptr<BaseOperator>> BuildOperatorList() {
ops.emplace_back(
new ArgMax(::tflite::BuiltinOperator_ARG_MAX, OperatorType::kArgMax));
ops.emplace_back(
+ new ArgMin(::tflite::BuiltinOperator_ARG_MIN, OperatorType::kArgMin));
+ ops.emplace_back(
new Tile(::tflite::BuiltinOperator_TILE, OperatorType::kTile));
ops.emplace_back(new ExpandDims(::tflite::BuiltinOperator_EXPAND_DIMS,
OperatorType::kExpandDims));
@@ -1153,11 +1253,12 @@ std::vector<std::unique_ptr<BaseOperator>> BuildOperatorList() {
OperatorType::kSparseToDense));
ops.emplace_back(
new Shape(::tflite::BuiltinOperator_SHAPE, OperatorType::kShape));
+ ops.emplace_back(new FakeQuant(::tflite::BuiltinOperator_FAKE_QUANT,
+ OperatorType::kFakeQuant));
// Custom Operators.
ops.emplace_back(
new DepthToSpace("DEPTH_TO_SPACE", OperatorType::kDepthToSpace));
- ops.emplace_back(new FakeQuant("FAKE_QUANT", OperatorType::kFakeQuant));
ops.emplace_back(new TensorFlowUnsupported("TENSORFLOW_UNSUPPORTED",
OperatorType::kUnsupported));
@@ -1206,6 +1307,7 @@ std::vector<std::unique_ptr<BaseOperator>> BuildOperatorList() {
new SimpleOperator<SelectOperator>("SELECT", OperatorType::kSelect));
ops.emplace_back(
new SimpleOperator<SliceOperator>("SLICE", OperatorType::kSlice));
+ ops.emplace_back(new SimpleOperator<PowOperator>("POW", OperatorType::kPow));
// Element-wise operator
ops.emplace_back(new SimpleOperator<SinOperator>("SIN", OperatorType::kSin));
ops.emplace_back(new SimpleOperator<LogOperator>("LOG", OperatorType::kLog));
diff --git a/tensorflow/contrib/lite/toco/tflite/operator_test.cc b/tensorflow/contrib/lite/toco/tflite/operator_test.cc
index 79c8e5d738..ff2d35b1f5 100644
--- a/tensorflow/contrib/lite/toco/tflite/operator_test.cc
+++ b/tensorflow/contrib/lite/toco/tflite/operator_test.cc
@@ -126,6 +126,7 @@ TEST_F(OperatorTest, SimpleOperators) {
CheckSimpleOperator<LogOperator>("LOG", OperatorType::kLog);
CheckSimpleOperator<TensorFlowSqrtOperator>("SQRT", OperatorType::kSqrt);
CheckSimpleOperator<TensorFlowRsqrtOperator>("RSQRT", OperatorType::kRsqrt);
+ CheckSimpleOperator<PowOperator>("POW", OperatorType::kPow);
}
TEST_F(OperatorTest, BuiltinAdd) {
@@ -415,6 +416,13 @@ TEST_F(OperatorTest, BuiltinArgMax) {
EXPECT_EQ(op.output_data_type, output_toco_op->output_data_type);
}
+TEST_F(OperatorTest, BuiltinArgMin) {
+ ArgMinOperator op;
+ auto output_toco_op = SerializeAndDeserialize(
+ GetOperator("ARG_MIN", OperatorType::kArgMin), op);
+ EXPECT_EQ(op.output_data_type, output_toco_op->output_data_type);
+}
+
TEST_F(OperatorTest, BuiltinTransposeConv) {
TransposeConvOperator op;
op.stride_width = 123;
diff --git a/tensorflow/contrib/lite/toco/tflite/types.cc b/tensorflow/contrib/lite/toco/tflite/types.cc
index 42c5d7e8eb..754f0b4b8c 100644
--- a/tensorflow/contrib/lite/toco/tflite/types.cc
+++ b/tensorflow/contrib/lite/toco/tflite/types.cc
@@ -100,6 +100,8 @@ void CopyBuffer(const ::tflite::Buffer& buffer, Array* array) {
return ::tflite::TensorType_STRING;
case ArrayDataType::kBool:
return ::tflite::TensorType_BOOL;
+ case ArrayDataType::kComplex64:
+ return ::tflite::TensorType_COMPLEX64;
default:
// FLOAT32 is filled for unknown data types.
// TODO(ycling): Implement type inference in TF Lite interpreter.
@@ -123,6 +125,8 @@ ArrayDataType DataType::Deserialize(int tensor_type) {
return ArrayDataType::kUint8;
case ::tflite::TensorType_BOOL:
return ArrayDataType::kBool;
+ case ::tflite::TensorType_COMPLEX64:
+ return ArrayDataType::kComplex64;
default:
LOG(FATAL) << "Unhandled tensor type '" << tensor_type << "'.";
}
@@ -147,6 +151,8 @@ flatbuffers::Offset<flatbuffers::Vector<uint8_t>> DataBuffer::Serialize(
return CopyBuffer<ArrayDataType::kUint8>(array, builder);
case ArrayDataType::kBool:
return CopyBoolToBuffer(array, builder);
+ case ArrayDataType::kComplex64:
+ return CopyBuffer<ArrayDataType::kComplex64>(array, builder);
default:
LOG(FATAL) << "Unhandled array data type.";
}
@@ -172,6 +178,8 @@ void DataBuffer::Deserialize(const ::tflite::Tensor& tensor,
return CopyBuffer<ArrayDataType::kUint8>(buffer, array);
case ::tflite::TensorType_BOOL:
return CopyBuffer<ArrayDataType::kBool>(buffer, array);
+ case ::tflite::TensorType_COMPLEX64:
+ return CopyBuffer<ArrayDataType::kComplex64>(buffer, array);
default:
LOG(FATAL) << "Unhandled tensor type.";
}
diff --git a/tensorflow/contrib/lite/toco/tflite/types_test.cc b/tensorflow/contrib/lite/toco/tflite/types_test.cc
index 8c6ef95bfa..8e9f30ba3a 100644
--- a/tensorflow/contrib/lite/toco/tflite/types_test.cc
+++ b/tensorflow/contrib/lite/toco/tflite/types_test.cc
@@ -14,6 +14,8 @@ limitations under the License.
==============================================================================*/
#include "tensorflow/contrib/lite/toco/tflite/types.h"
+#include <complex>
+
#include <gmock/gmock.h>
#include <gtest/gtest.h>
@@ -71,7 +73,8 @@ TEST(DataType, SupportedTypes) {
{ArrayDataType::kInt32, ::tflite::TensorType_INT32},
{ArrayDataType::kInt64, ::tflite::TensorType_INT64},
{ArrayDataType::kFloat, ::tflite::TensorType_FLOAT32},
- {ArrayDataType::kBool, ::tflite::TensorType_BOOL}};
+ {ArrayDataType::kBool, ::tflite::TensorType_BOOL},
+ {ArrayDataType::kComplex64, ::tflite::TensorType_COMPLEX64}};
for (auto x : testdata) {
EXPECT_EQ(x.second, DataType::Serialize(x.first));
EXPECT_EQ(x.first, DataType::Deserialize(x.second));
@@ -171,6 +174,14 @@ TEST(DataBuffer, Bool) {
::testing::ElementsAre(true, false, true));
}
+TEST(DataBuffer, Complex64) {
+ Array recovered = ToFlatBufferAndBack<ArrayDataType::kComplex64>(
+ {std::complex<float>(1.0f, 2.0f), std::complex<float>(3.0f, 4.0f)});
+ EXPECT_THAT(recovered.GetBuffer<ArrayDataType::kComplex64>().data,
+ ::testing::ElementsAre(std::complex<float>(1.0f, 2.0f),
+ std::complex<float>(3.0f, 4.0f)));
+}
+
TEST(Padding, All) {
EXPECT_EQ(::tflite::Padding_SAME, Padding::Serialize(PaddingType::kSame));
EXPECT_EQ(PaddingType::kSame, Padding::Deserialize(::tflite::Padding_SAME));
diff --git a/tensorflow/contrib/lite/toco/toco.cc b/tensorflow/contrib/lite/toco/toco.cc
index 8041aa9e7f..0b460bd178 100644
--- a/tensorflow/contrib/lite/toco/toco.cc
+++ b/tensorflow/contrib/lite/toco/toco.cc
@@ -23,7 +23,6 @@ limitations under the License.
#include "tensorflow/contrib/lite/toco/toco_cmdline_flags.h"
#include "tensorflow/contrib/lite/toco/toco_flags.pb.h"
#include "tensorflow/contrib/lite/toco/toco_port.h"
-#include "tensorflow/contrib/lite/toco/toco_saved_model.h"
#include "tensorflow/contrib/lite/toco/toco_tooling.h"
#include "tensorflow/contrib/lite/toco/toco_types.h"
#include "tensorflow/core/platform/logging.h"
@@ -49,17 +48,6 @@ void CheckFrozenModelPermissions(const Arg<string>& input_file) {
<< input_file.value() << ".\n";
}
-// Checks the permissions of the SavedModel directory.
-void CheckSavedModelPermissions(const Arg<string>& savedmodel_directory) {
- QCHECK(savedmodel_directory.specified())
- << "Missing required flag --savedmodel_directory.\n";
- QCHECK(
- port::file::Exists(savedmodel_directory.value(), port::file::Defaults())
- .ok())
- << "Specified savedmodel_directory does not exist: "
- << savedmodel_directory.value() << ".\n";
-}
-
// Reads the contents of the GraphDef from either the frozen graph file or the
// SavedModel directory. If it reads the SavedModel directory, it updates the
// ModelFlags and TocoFlags accordingly.
@@ -69,24 +57,16 @@ void ReadInputData(const ParsedTocoFlags& parsed_toco_flags,
string* graph_def_contents) {
port::CheckInitGoogleIsDone("InitGoogle is not done yet.\n");
- bool has_input_file = parsed_toco_flags.input_file.specified();
- bool has_savedmodel_dir = parsed_toco_flags.savedmodel_directory.specified();
-
- // Ensure either input_file or savedmodel_directory flag has been set.
- QCHECK_NE(has_input_file, has_savedmodel_dir)
- << "Specify either input_file or savedmodel_directory flag.\n";
+ // Ensure savedmodel_directory is not set.
+ QCHECK(!parsed_toco_flags.savedmodel_directory.specified())
+ << "Use `tensorflow/contrib/lite/python/tflite_convert` script with "
+ << "SavedModel directories.\n";
// Checks the input file permissions and reads the contents.
- if (has_input_file) {
- CheckFrozenModelPermissions(parsed_toco_flags.input_file);
- CHECK(port::file::GetContents(parsed_toco_flags.input_file.value(),
- graph_def_contents, port::file::Defaults())
- .ok());
- } else {
- CheckSavedModelPermissions(parsed_toco_flags.savedmodel_directory);
- GetSavedModelContents(parsed_toco_flags, parsed_model_flags, toco_flags,
- model_flags, graph_def_contents);
- }
+ CheckFrozenModelPermissions(parsed_toco_flags.input_file);
+ CHECK(port::file::GetContents(parsed_toco_flags.input_file.value(),
+ graph_def_contents, port::file::Defaults())
+ .ok());
}
void ToolMain(const ParsedTocoFlags& parsed_toco_flags,
diff --git a/tensorflow/contrib/lite/toco/toco_cmdline_flags.cc b/tensorflow/contrib/lite/toco/toco_cmdline_flags.cc
index 87a1e429b9..c6d0a03452 100644
--- a/tensorflow/contrib/lite/toco/toco_cmdline_flags.cc
+++ b/tensorflow/contrib/lite/toco/toco_cmdline_flags.cc
@@ -41,7 +41,7 @@ bool ParseTocoFlagsFromCommandLineFlags(
"extension."),
Flag("savedmodel_directory", parsed_flags.savedmodel_directory.bind(),
parsed_flags.savedmodel_directory.default_value(),
- "Full path to the directory containing the SavedModel."),
+ "Deprecated. Full path to the directory containing the SavedModel."),
Flag("output_file", parsed_flags.output_file.bind(),
parsed_flags.output_file.default_value(),
"Output file. "
@@ -55,9 +55,9 @@ bool ParseTocoFlagsFromCommandLineFlags(
"One of TENSORFLOW_GRAPHDEF, TFLITE, GRAPHVIZ_DOT."),
Flag("savedmodel_tagset", parsed_flags.savedmodel_tagset.bind(),
parsed_flags.savedmodel_tagset.default_value(),
- "Comma-separated set of tags identifying the MetaGraphDef within "
- "the SavedModel to analyze. All tags in the tag set must be "
- "specified."),
+ "Deprecated. Comma-separated set of tags identifying the "
+ "MetaGraphDef within the SavedModel to analyze. All tags in the tag "
+ "set must be specified."),
Flag("default_ranges_min", parsed_flags.default_ranges_min.bind(),
parsed_flags.default_ranges_min.default_value(),
"If defined, will be used as the default value for the min bound "
diff --git a/tensorflow/contrib/lite/toco/toco_flags.proto b/tensorflow/contrib/lite/toco/toco_flags.proto
index ad4e94ded9..b4a9870d58 100644
--- a/tensorflow/contrib/lite/toco/toco_flags.proto
+++ b/tensorflow/contrib/lite/toco/toco_flags.proto
@@ -37,7 +37,7 @@ enum FileFormat {
// of as properties of models, instead describing how models are to be
// processed in the context of the present tooling job.
//
-// Next ID to use: 21.
+// Next ID to use: 26.
message TocoFlags {
// Input file format
optional FileFormat input_format = 1;
diff --git a/tensorflow/contrib/lite/toco/toco_saved_model.cc b/tensorflow/contrib/lite/toco/toco_saved_model.cc
deleted file mode 100644
index 26f55a66c7..0000000000
--- a/tensorflow/contrib/lite/toco/toco_saved_model.cc
+++ /dev/null
@@ -1,189 +0,0 @@
-/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-==============================================================================*/
-
-#include <string>
-#include <vector>
-
-#include "absl/strings/numbers.h"
-#include "tensorflow/contrib/lite/toco/model_cmdline_flags.h"
-#include "tensorflow/contrib/lite/toco/toco_saved_model.h"
-#include "tensorflow/core/framework/attr_value.pb.h"
-#include "tensorflow/core/framework/node_def.pb.h"
-#include "tensorflow/core/framework/tensor_shape.pb.h"
-
-namespace toco {
-namespace {
-
-// Loads a SavedModel from the directory specified in parsed_toco_flags.
-// Returns a SavedModelBundle with the requested MetaGraphDef.
-const tensorflow::SavedModelBundle* LoadSavedModel(
- const ParsedTocoFlags& parsed_toco_flags) {
- const string model_path = parsed_toco_flags.savedmodel_directory.value();
- QCHECK(tensorflow::MaybeSavedModelDirectory(model_path))
- << "Model is not saved in the supported SavedModel format.\n";
-
- // Gets the tags identifying the MetaGraphDef from the command line arguments.
- string tags_str;
- if (parsed_toco_flags.savedmodel_tagset.specified()) {
- tags_str = parsed_toco_flags.savedmodel_tagset.value();
- } else {
- tags_str = parsed_toco_flags.savedmodel_tagset.default_value();
- }
- auto tags = absl::StrSplit(tags_str, ',');
-
- // Loads MetaGraphDef.
- auto* bundle = new tensorflow::SavedModelBundle;
- TF_CHECK_OK(tensorflow::LoadSavedModel(tensorflow::SessionOptions(),
- tensorflow::RunOptions(), model_path,
- tags, bundle))
- << "Failed to load exported model from " << model_path
- << ". Ensure the model contains the required tags '" << tags_str
- << "'.\n";
- return bundle;
-}
-
-// Returns the array name without the postfix.
-//
-// e.g. reduces "input:0" to "input".
-string GetArrayName(const string& name) {
- const std::vector<string>& names = absl::StrSplit(name, ':');
- return names[0];
-}
-
-// Returns the list of array names without the postfix sorted alphabetically.
-std::set<string> GetSortedNames(const std::unordered_set<string>& names) {
- std::vector<string> final_names;
- final_names.reserve(names.size());
- for (const auto& name : names) {
- final_names.push_back(GetArrayName(name));
- }
- return std::set<string>(final_names.begin(), final_names.end());
-}
-
-// Gets the final shape after replacing the first dimension with batch size, if
-// it is undefined (containing the value -1). Returns whether the shape is
-// valid.
-bool ReplaceShapeBatchSize(const tensorflow::TensorShapeProto& shape,
- int batch_size,
- tensorflow::TensorShapeProto* final_shape) {
- for (int idx = 0; idx < shape.dim().size(); ++idx) {
- int64 final_dim = shape.dim()[idx].size();
- if (final_dim == -1) {
- if (idx > 0) return false;
- final_dim = batch_size;
- }
- final_shape->add_dim()->set_size(final_dim);
- }
- return true;
-}
-
-// Updates the input arrays in ModelFlags to contain the shape of the array.
-void ProcessInputShapes(const tensorflow::GraphDef& graph_def, int batch_size,
- ModelFlags* model_flags) {
- // Build map of input array names to input arrays.
- std::unordered_map<string, InputArray*> input_data_map;
- for (auto& input : *model_flags->mutable_input_arrays()) {
- input_data_map[input.name()] = &input;
- }
-
- // Adds shapes to the input arrays if the shape is valid.
- for (const tensorflow::NodeDef& node_def : graph_def.node()) {
- if (input_data_map.find(node_def.name()) != input_data_map.end()) {
- const auto shape_it = node_def.attr().find("shape");
- if (shape_it != node_def.attr().end()) {
- tensorflow::TensorShapeProto final_shape;
- bool is_valid = ReplaceShapeBatchSize(shape_it->second.shape(),
- batch_size, &final_shape);
-
- if (is_valid) {
- auto* shape = input_data_map.at(node_def.name())->mutable_shape();
- QCHECK_EQ(shape->dims_size(), 0)
- << "The shape for the input '" << node_def.name()
- << "' was previously defined. For clarity please define inputs "
- << "via --input_arrays and input_shapes flags.\n";
- for (const auto& dim : final_shape.dim()) {
- shape->add_dims(dim.size());
- }
- }
- }
- }
- }
-
- // Checks all input arrays have a shape.
- for (auto const& input : model_flags->input_arrays()) {
- QCHECK(input.shape().dims_size() > 0)
- << "A valid input shape was not found for input '" << input.name()
- << "'. Please define via --input_arrays and --input_shapes flags.\n";
- }
-}
-
-} // namespace
-
-void ParseMetaData(const tensorflow::GraphDef& graph_def,
- const std::unordered_set<string>& inputs,
- const std::unordered_set<string>& outputs,
- const ParsedTocoFlags& parsed_toco_flags,
- const ParsedModelFlags& parsed_model_flags,
- TocoFlags* toco_flags, ModelFlags* model_flags) {
- if (!parsed_model_flags.input_arrays.specified()) {
- const std::set<string> sorted_inputs = GetSortedNames(inputs);
- for (const auto& input_name : sorted_inputs) {
- model_flags->add_input_arrays()->set_name(input_name);
- }
- }
-
- if (!parsed_model_flags.output_arrays.specified()) {
- const std::set<string> sorted_outputs = GetSortedNames(outputs);
- for (const auto& output_name : sorted_outputs) {
- model_flags->add_output_arrays(GetArrayName(output_name));
- }
- }
-
- if (!parsed_model_flags.input_shapes.specified()) {
- int batch_size = parsed_model_flags.batch_size.value();
- ProcessInputShapes(graph_def, batch_size, model_flags);
- }
-
- if (!parsed_toco_flags.inference_type.specified()) {
- toco_flags->set_inference_type(IODataType::FLOAT);
- }
-}
-
-// TODO(nupurgarg): Add top level tests.
-void GetSavedModelContents(const ParsedTocoFlags& parsed_toco_flags,
- const ParsedModelFlags& parsed_model_flags,
- TocoFlags* toco_flags, ModelFlags* model_flags,
- string* graph_def_contents) {
- // Loads the MetaGraphDef within a SavedModelBundle.
- auto bundle = LoadSavedModel(parsed_toco_flags);
-
- // Converts the MetaGraphDef to frozen GraphDef.
- tensorflow::GraphDef frozen_graph_def;
- std::unordered_set<string> inputs;
- std::unordered_set<string> outputs;
- TF_CHECK_OK(tensorflow::FreezeSavedModel(*bundle, &frozen_graph_def, &inputs,
- &outputs));
-
- // Reads the frozen GraphDef into a string.
- QCHECK(frozen_graph_def.SerializeToString(graph_def_contents))
- << "Unable to generate serialized GraphDef.\n";
-
- // Process inputs and outputs and metadata within GraphDef.
- const tensorflow::GraphDef graph_def = bundle->meta_graph_def.graph_def();
- ParseMetaData(graph_def, inputs, outputs, parsed_toco_flags,
- parsed_model_flags, toco_flags, model_flags);
-}
-
-} // namespace toco
diff --git a/tensorflow/contrib/lite/toco/toco_saved_model.h b/tensorflow/contrib/lite/toco/toco_saved_model.h
deleted file mode 100644
index 7a0fabd82d..0000000000
--- a/tensorflow/contrib/lite/toco/toco_saved_model.h
+++ /dev/null
@@ -1,53 +0,0 @@
-/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-==============================================================================*/
-
-#ifndef TENSORFLOW_CONTRIB_LITE_TOCO_TOCO_SAVED_MODEL_H_
-#define TENSORFLOW_CONTRIB_LITE_TOCO_TOCO_SAVED_MODEL_H_
-
-#include <string>
-#include <vector>
-
-#include "tensorflow/cc/tools/freeze_saved_model.h"
-#include "tensorflow/contrib/lite/toco/args.h"
-#include "tensorflow/contrib/lite/toco/model_flags.pb.h"
-#include "tensorflow/contrib/lite/toco/toco_flags.pb.h"
-#include "tensorflow/contrib/lite/toco/types.pb.h"
-
-namespace toco {
-
-// Parses metadata into `toco_flags` and `model_flags`.
-//
-// Stores `inputs` as input_arrays and `outputs` as output_arrays in
-// `model_flags`. Infers input_shapes from the GraphDef and stores it in
-// `model_flags` as part of the input_arrays. Assumes inference_type is FLOAT
-// and stores it in `toco_flags`.
-void ParseMetaData(const tensorflow::GraphDef& graph_def,
- const std::unordered_set<string>& inputs,
- const std::unordered_set<string>& outputs,
- const ParsedTocoFlags& parsed_toco_flags,
- const ParsedModelFlags& parsed_model_flags,
- TocoFlags* toco_flags, ModelFlags* model_flags);
-
-// Generates a frozen graph from the SavedModel in the directory specified in
-// `toco_flags`. Reads frozen graph contents into `graph_def_contents`. Parses
-// metadata relating to the GraphDef into `toco_flags` and `model_flags`.
-void GetSavedModelContents(const ParsedTocoFlags& parsed_toco_flags,
- const ParsedModelFlags& parsed_model_flags,
- TocoFlags* toco_flags, ModelFlags* model_flags,
- string* graph_def_contents);
-
-} // namespace toco
-
-#endif // TENSORFLOW_CONTRIB_LITE_TOCO_TOCO_SAVED_MODEL_H_
diff --git a/tensorflow/contrib/lite/toco/toco_saved_model_test.cc b/tensorflow/contrib/lite/toco/toco_saved_model_test.cc
deleted file mode 100644
index 5e122afe65..0000000000
--- a/tensorflow/contrib/lite/toco/toco_saved_model_test.cc
+++ /dev/null
@@ -1,274 +0,0 @@
-/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-==============================================================================*/
-
-#include "tensorflow/contrib/lite/toco/toco_saved_model.h"
-#include "absl/strings/str_join.h"
-#include "tensorflow/cc/framework/scope.h"
-#include "tensorflow/cc/ops/standard_ops.h"
-#include "tensorflow/contrib/lite/toco/model_cmdline_flags.h"
-#include "tensorflow/contrib/lite/toco/toco_cmdline_flags.h"
-#include "tensorflow/core/lib/core/status_test_util.h"
-
-#include <gmock/gmock.h>
-#include <gtest/gtest.h>
-
-namespace toco {
-namespace {
-
-using tensorflow::ops::Add;
-using tensorflow::ops::Const;
-using tensorflow::ops::FakeQuantWithMinMaxArgs;
-using tensorflow::ops::Placeholder;
-
-class TocoSavedModelTest : public ::testing::Test {
- protected:
- // Calls functions to process cmdline arguments and calls ParseMetaData.
- // ParseMetaData parses input_arrays, output_arrays, and gets metadata from
- // SavedModel it is not defined in the cmdline arguments.
- void ProcessGraphDefMetadata(const std::unordered_set<string>& inputs,
- const std::unordered_set<string>& outputs,
- const tensorflow::GraphDef& graph_def) {
- ReadTocoFlagsFromCommandLineFlags(parsed_toco_flags_, &toco_flags_);
- ReadModelFlagsFromCommandLineFlags(parsed_model_flags_, &model_flags_);
- ParseMetaData(graph_def, inputs, outputs, parsed_toco_flags_,
- parsed_model_flags_, &toco_flags_, &model_flags_);
- }
-
- // Gets the GraphDef from the SavedModelBundle and processes metadata.
- void ProcessSavedModelMetadata(const std::unordered_set<string>& inputs,
- const std::unordered_set<string>& outputs) {
- const tensorflow::GraphDef graph_def = bundle_.meta_graph_def.graph_def();
- ProcessGraphDefMetadata(inputs, outputs, graph_def);
- }
-
- // Returns a GraphDef representing a simple float model with a single input.
- tensorflow::GraphDef GetFloatGraphDef(const std::vector<int64>& shape) {
- tensorflow::GraphDef graph_def;
- tensorflow::Scope scope = tensorflow::Scope::NewRootScope();
-
- tensorflow::Output input =
- Placeholder(scope.WithOpName("input"), tensorflow::DT_FLOAT,
- Placeholder::Shape(tensorflow::PartialTensorShape(shape)));
- tensorflow::Output zero = Const(scope.WithOpName("zero"), 0.0f, {});
- tensorflow::Output add = Add(scope.WithOpName("add"), input, zero);
-
- TF_EXPECT_OK(scope.ToGraphDef(&graph_def));
- return graph_def;
- }
-
- // Returns a GraphDef representing a simple float model with two inputs.
- tensorflow::GraphDef GetComplexFloatGraphDef() {
- tensorflow::GraphDef graph_def;
- tensorflow::Scope scope = tensorflow::Scope::NewRootScope();
-
- tensorflow::Output inputA =
- Placeholder(scope.WithOpName("inputA"), tensorflow::DT_FLOAT,
- Placeholder::Shape(tensorflow::TensorShape({1, 3, 3, 1})));
- tensorflow::Output inputB =
- Placeholder(scope.WithOpName("inputB"), tensorflow::DT_FLOAT,
- Placeholder::Shape(tensorflow::TensorShape({1, 3, 3, 1})));
- tensorflow::Output add = Add(scope.WithOpName("add"), inputB, inputA);
-
- TF_EXPECT_OK(scope.ToGraphDef(&graph_def));
- return graph_def;
- }
-
- // Returns a GraphDef representing a simple quantized model.
- tensorflow::GraphDef GetQuantizedGraphDef() {
- tensorflow::GraphDef graph_def;
- tensorflow::Scope scope = tensorflow::Scope::NewRootScope();
-
- tensorflow::Output input =
- Placeholder(scope.WithOpName("input"), tensorflow::DT_FLOAT,
- Placeholder::Shape(tensorflow::TensorShape({1, 3, 3, 1})));
- tensorflow::Output zero = Const(scope.WithOpName("zero"), 0.0f, {});
- tensorflow::Output fake_quant =
- FakeQuantWithMinMaxArgs(scope.WithOpName("quant"), zero);
- tensorflow::Output add = Add(scope.WithOpName("add"), input, fake_quant);
-
- TF_EXPECT_OK(scope.ToGraphDef(&graph_def));
- return graph_def;
- }
-
- // Gets the values in the input_arrays flag.
- std::vector<string> GetInputArrays() {
- std::vector<string> actual;
- for (const auto& input : model_flags_.input_arrays()) {
- actual.push_back(input.name());
- }
- return actual;
- }
-
- // Gets the values in the output_arrays flag.
- std::vector<string> GetOutputArrays() {
- std::vector<string> actual(model_flags_.output_arrays().begin(),
- model_flags_.output_arrays().end());
- return actual;
- }
-
- // Gets the shape of the given input array.
- string GetInputShape(const string& input_array) {
- for (const auto& input : model_flags_.input_arrays()) {
- if (input.name() == input_array) {
- std::vector<string> dims;
- for (int idx = 0; idx < input.shape().dims_size(); ++idx) {
- dims.push_back(std::to_string(input.shape().dims(idx)));
- }
- return absl::StrJoin(dims, ",");
- }
- }
- return "";
- }
-
- tensorflow::SavedModelBundle bundle_;
- ParsedTocoFlags parsed_toco_flags_;
- ParsedModelFlags parsed_model_flags_;
- TocoFlags toco_flags_;
- ModelFlags model_flags_;
-};
-
-// Tests if input_arrays, output_arrays, inference_type, and output_arrays are
-// added to ModelFlags if they are not specified in cmdline arguments.
-// Tests if the default batch size replaces a -1 in the first dimension.
-TEST_F(TocoSavedModelTest, NoCmdLine) {
- tensorflow::GraphDef graph_def = GetFloatGraphDef({-1, 3, 3, 1});
-
- ProcessGraphDefMetadata({"input"}, {"add"}, graph_def);
- EXPECT_EQ(GetInputArrays(), std::vector<string>({"input"}));
- EXPECT_EQ(GetOutputArrays(), std::vector<string>({"add"}));
- EXPECT_EQ(GetInputShape("input"), "1,3,3,1");
- EXPECT_EQ(toco_flags_.inference_type(), IODataType::FLOAT);
-}
-
-// Tests if the order of input_arrays and output_arrays is deterministic when
-// they are taken from the SavedModel.
-TEST_F(TocoSavedModelTest, NoCmdLineMultipleArrays) {
- tensorflow::GraphDef graph_def = GetComplexFloatGraphDef();
-
- // Note: The model does not have two outputs. However, the function does not
- // need an accurate output_array list. This is only meant to test order.
- ProcessGraphDefMetadata({"inputB", "inputA"}, {"add", "invalid"}, graph_def);
- EXPECT_EQ(GetInputArrays(), std::vector<string>({"inputA", "inputB"}));
- EXPECT_EQ(GetOutputArrays(), std::vector<string>({"add", "invalid"}));
- EXPECT_EQ(GetInputShape("inputA"), "1,3,3,1");
- EXPECT_EQ(GetInputShape("inputB"), "1,3,3,1");
- EXPECT_EQ(toco_flags_.inference_type(), IODataType::FLOAT);
-}
-
-// Tests if input_shapes is inferred when input_arrays is passed in via cmdline
-// arguments.
-TEST_F(TocoSavedModelTest, InputNameWithoutInputShape) {
- parsed_model_flags_.input_arrays.bind()("input");
- tensorflow::GraphDef graph_def = GetFloatGraphDef({2, 3, 3, 1});
-
- ProcessGraphDefMetadata({"not_used_input"}, {"add"}, graph_def);
- EXPECT_EQ(GetInputArrays(), std::vector<string>({"input"}));
- EXPECT_EQ(GetOutputArrays(), std::vector<string>({"add"}));
- EXPECT_EQ(GetInputShape("input"), "2,3,3,1");
- EXPECT_EQ(toco_flags_.inference_type(), IODataType::FLOAT);
-}
-
-// Ensures a failure occurs when input_shapes is defined without input_arrays.
-TEST_F(TocoSavedModelTest, InputShapeWithoutInputName) {
- parsed_model_flags_.input_shapes.bind()("1,224,224,1:9,12");
- tensorflow::GraphDef graph_def = GetFloatGraphDef({1, 3, 3, 1});
-
- EXPECT_DEATH(ProcessGraphDefMetadata({"input"}, {"add"}, graph_def),
- "failed: input_shapes.size\\(\\) == "
- "model_flags->input_arrays_size\\(\\)");
-}
-
-// Tests if the cmdline values of input_arrays, input_shapes are used when
-// specified with an empty GraphDef.
-TEST_F(TocoSavedModelTest, InputArraysCmdLine) {
- parsed_model_flags_.input_arrays.bind()("inputA,inputB");
- parsed_model_flags_.input_shapes.bind()("1,224,224,1:9,12");
-
- ProcessSavedModelMetadata({"input0", "input1"}, {"output0", "output1"});
- EXPECT_EQ(GetInputArrays(), std::vector<string>({"inputA", "inputB"}));
- EXPECT_EQ(GetOutputArrays(), std::vector<string>({"output0", "output1"}));
- EXPECT_EQ(GetInputShape("inputA"), "1,224,224,1");
- EXPECT_EQ(GetInputShape("inputB"), "9,12");
- EXPECT_EQ(toco_flags_.inference_type(), IODataType::FLOAT);
-}
-
-// Tests if the cmdline values of input_arrays, input_shapes are used when
-// specified even if values exist within the GraphDef.
-TEST_F(TocoSavedModelTest, InputArraysCmdLineWithGraphDef) {
- parsed_model_flags_.input_arrays.bind()("inputA");
- parsed_model_flags_.input_shapes.bind()("1,224,224,1");
- tensorflow::GraphDef graph_def = GetFloatGraphDef({1, 3, 3, 1});
-
- ProcessGraphDefMetadata({"inputA"}, {"add"}, graph_def);
- EXPECT_EQ(GetInputArrays(), std::vector<string>({"inputA"}));
- EXPECT_EQ(GetOutputArrays(), std::vector<string>({"add"}));
- EXPECT_EQ(GetInputShape("inputA"), "1,224,224,1");
- EXPECT_EQ(toco_flags_.inference_type(), IODataType::FLOAT);
-}
-
-// Tests if the cmdline values of input_arrays, input_shapes, inference_type,
-// and output_arrays are used when specified with an empty GraphDef.
-TEST_F(TocoSavedModelTest, AllParamsCmdLine) {
- parsed_model_flags_.input_arrays.bind()("inputA,inputB");
- parsed_model_flags_.output_arrays.bind()("outputA,outputB");
- parsed_model_flags_.input_shapes.bind()("1,224,224,1:9,12");
- parsed_toco_flags_.inference_type.bind()("FLOAT");
-
- ProcessSavedModelMetadata({"input0", "input1"}, {"output0", "output1"});
- EXPECT_EQ(GetInputArrays(), std::vector<string>({"inputA", "inputB"}));
- EXPECT_EQ(GetOutputArrays(), std::vector<string>({"outputA", "outputB"}));
- EXPECT_EQ(GetInputShape("inputA"), "1,224,224,1");
- EXPECT_EQ(GetInputShape("inputB"), "9,12");
- EXPECT_EQ(toco_flags_.inference_type(), IODataType::FLOAT);
-}
-
-// Tests if a quantized graph gives the correct values assuming type is passed
-// in via command line.
-TEST_F(TocoSavedModelTest, QuantizedNoCmdLine) {
- parsed_toco_flags_.inference_type.bind()("QUANTIZED_UINT8");
- tensorflow::GraphDef graph_def = GetQuantizedGraphDef();
-
- ProcessGraphDefMetadata({"input"}, {"add"}, graph_def);
- EXPECT_EQ(GetInputArrays(), std::vector<string>({"input"}));
- EXPECT_EQ(GetOutputArrays(), std::vector<string>({"add"}));
- EXPECT_EQ(GetInputShape("input"), "1,3,3,1");
- EXPECT_EQ(toco_flags_.inference_type(), IODataType::QUANTIZED_UINT8);
-}
-
-// Tests if the provided batch size replaces a -1 in the first dimension of
-// input shape.
-TEST_F(TocoSavedModelTest, MissingShapeParameterValid) {
- parsed_model_flags_.batch_size.bind()(3);
- tensorflow::GraphDef graph_def = GetFloatGraphDef({-1, 3, 3, 1});
-
- ProcessGraphDefMetadata({"input"}, {"add"}, graph_def);
- EXPECT_EQ(GetInputArrays(), std::vector<string>({"input"}));
- EXPECT_EQ(GetOutputArrays(), std::vector<string>({"add"}));
- EXPECT_EQ(GetInputShape("input"), "3,3,3,1");
- EXPECT_EQ(toco_flags_.inference_type(), IODataType::FLOAT);
-}
-
-// Ensures a failure occurs if there is a -1 in a dimension aside from the first
-// position of input shape.
-TEST_F(TocoSavedModelTest, MissingShapeParameterInvalid) {
- parsed_model_flags_.batch_size.bind()(3);
- tensorflow::GraphDef graph_def = GetFloatGraphDef({1, -1, 3, 1});
-
- EXPECT_DEATH(ProcessGraphDefMetadata({"input"}, {"add"}, graph_def),
- "A valid input shape was not found for input 'input'.");
-}
-
-} // namespace
-} // namespace toco
diff --git a/tensorflow/contrib/lite/toco/toco_tooling.cc b/tensorflow/contrib/lite/toco/toco_tooling.cc
index 2534d1ef2a..7a0d9608cc 100644
--- a/tensorflow/contrib/lite/toco/toco_tooling.cc
+++ b/tensorflow/contrib/lite/toco/toco_tooling.cc
@@ -79,6 +79,7 @@ void MakeGeneralGraphTransformationsSet(
transformations->Add(new FuseBinaryIntoFollowingAffine);
transformations->Add(new FuseBroadcastIntoFollowingBinary);
transformations->Add(new MergeReshapeIntoPrecedingTranspose);
+ transformations->Add(new MoveBinaryOperatorBeforeReshape);
transformations->Add(new ReorderElementwiseUnary);
transformations->Add(new ReorderReshapeTranspose);
transformations->Add(new ResolveBatchNormalization);
@@ -104,14 +105,15 @@ void MakeGeneralGraphTransformationsSet(
transformations->Add(new IdentifyRelu1);
transformations->Add(new IdentifyPRelu);
transformations->Add(new RemoveTrivialBinaryOperator);
- transformations->Add(new ReadFakeQuantMinMax);
+ transformations->Add(new ResolveFakeQuantArgsFromVars);
+ transformations->Add(new ReadArrayMinmaxAndNarrowRangeFromFakeQuant);
transformations->Add(new ResolveSpaceToBatchNDAttributes);
transformations->Add(new ResolveBatchToSpaceNDAttributes);
transformations->Add(new ResolvePadAttributes);
transformations->Add(new ResolvePadV2Attributes);
transformations->Add(new ResolveStridedSliceAttributes);
transformations->Add(new ResolveSliceAttributes);
- transformations->Add(new ResolveMeanAttributes);
+ transformations->Add(new ResolveReduceAttributes);
transformations->Add(new ResolveConstantShapeOrRank);
transformations->Add(new MakeInitialDequantizeOperator);
transformations->Add(new UnpartitionEmbeddingLookup);
@@ -134,6 +136,8 @@ bool SupportsPreallocatedWorkspace(FileFormat format) {
return (format == TFLITE);
}
+bool SupportsShuffledFCWeights(FileFormat format) { return format == TFLITE; }
+
bool IsRealValued(toco::ArrayDataType type) {
// TODO(benoitjacob) - this is hardcoding that uint8 and int16 are only used
// for quantized real-number values, and no other integer type is ever used
@@ -270,13 +274,16 @@ void Transform(const TocoFlags& toco_flags, Model* model) {
transformations.Add(new toco::MergeLstmCellInputs);
}
}
- if (toco_flags.quantize_weights()) {
- transformations.Add(new QuantizeWeights);
- }
transformations.Add(new ResolveConstantConcatenation);
RunGraphTransformations(model, "general graph transformations",
transformations);
+ if (toco_flags.quantize_weights()) {
+ // Run the quantize weights transformation after batchnorms have been
+ // folded into the weights.
+ RunGraphTransformations(model, "quantize weights transformation",
+ {new QuantizeWeights});
+ }
if (quantize_output) {
if (toco_flags.propagate_fake_quant_num_bits()) {
RunGraphTransformations(model,
@@ -335,6 +342,10 @@ void Transform(const TocoFlags& toco_flags, Model* model) {
new RemoveFinalDequantizeOp,
ensure_safe_for_int8_kernels,
});
+ if (SupportsShuffledFCWeights(output_format)) {
+ RunGraphTransformations(model, "shuffling of FC weights",
+ {new ShuffleFCWeights});
+ }
} else {
GraphTransformationsSet dequantization_transformations{new Dequantize};
// Dequantize creates FakeQuant nodes. We may want to discard
diff --git a/tensorflow/contrib/lite/toco/tooling_util.cc b/tensorflow/contrib/lite/toco/tooling_util.cc
index a52c812ef4..45cd10ec7b 100644
--- a/tensorflow/contrib/lite/toco/tooling_util.cc
+++ b/tensorflow/contrib/lite/toco/tooling_util.cc
@@ -350,7 +350,7 @@ const char* OperatorTypeName(OperatorType type) {
HANDLE_OPERATORTYPENAME_CASE(Less)
HANDLE_OPERATORTYPENAME_CASE(LessEqual)
HANDLE_OPERATORTYPENAME_CASE(MatMul)
- HANDLE_OPERATORTYPENAME_CASE(Max) // Reduction Max
+ HANDLE_OPERATORTYPENAME_CASE(ReduceMax) // Reduction Max
HANDLE_OPERATORTYPENAME_CASE(Maximum) // Element-wise Maximum
HANDLE_OPERATORTYPENAME_CASE(Merge)
HANDLE_OPERATORTYPENAME_CASE(Min) // Reduction Min
@@ -385,8 +385,10 @@ const char* OperatorTypeName(OperatorType type) {
HANDLE_OPERATORTYPENAME_CASE(SpaceToBatchND)
HANDLE_OPERATORTYPENAME_CASE(BatchToSpaceND)
HANDLE_OPERATORTYPENAME_CASE(Mean)
+ HANDLE_OPERATORTYPENAME_CASE(ReduceProd)
HANDLE_OPERATORTYPENAME_CASE(Svdf)
HANDLE_OPERATORTYPENAME_CASE(ArgMax)
+ HANDLE_OPERATORTYPENAME_CASE(ArgMin)
HANDLE_OPERATORTYPENAME_CASE(TopK_V2)
HANDLE_OPERATORTYPENAME_CASE(Unsupported)
HANDLE_OPERATORTYPENAME_CASE(Exp)
@@ -396,6 +398,7 @@ const char* OperatorTypeName(OperatorType type) {
HANDLE_OPERATORTYPENAME_CASE(SparseToDense)
HANDLE_OPERATORTYPENAME_CASE(Equal)
HANDLE_OPERATORTYPENAME_CASE(NotEqual)
+ HANDLE_OPERATORTYPENAME_CASE(Pow)
default:
LOG(FATAL) << "Unhandled op type";
#undef HANDLE_OPERATORTYPENAME_CASE
@@ -446,8 +449,12 @@ void LogSummary(int log_level, const Model& model) {
}
void LogArray(int log_level, const Model& model, const string& name) {
- const auto& array = model.GetArray(name);
VLOG(log_level) << "Array: " << name;
+ if (!model.HasArray(name)) {
+ VLOG(log_level) << " DOES NOT EXIST";
+ return;
+ }
+ const auto& array = model.GetArray(name);
VLOG(log_level) << " Data type: " << ArrayDataTypeName(array.data_type);
VLOG(log_level) << " Final type: "
<< ArrayDataTypeName(array.final_data_type);
@@ -1260,8 +1267,13 @@ void InsertCopyOperator(Model* model, const string& source_array_name,
auto* copy_op = new TensorFlowReshapeOperator;
copy_op->inputs = {
source_array_name,
- CreateInt32Array(model, target_array_name + "_copy_shape", shape)};
+ CreateInt32Array(
+ model, AvailableArrayName(*model, target_array_name + "_copy_shape"),
+ shape)};
copy_op->outputs = {target_array_name};
+ if (target_array.has_shape()) {
+ copy_op->shape = target_array.shape().dims();
+ }
model->operators.emplace_back(copy_op);
}
@@ -2200,4 +2212,51 @@ void UseArraysExtraInfo(Model* model, bool quantize_output) {
}
}
+void UndoWeightsShuffling(Model* model) {
+ for (const auto& op : model->operators) {
+ if (op->type != toco::OperatorType::kFullyConnected) {
+ continue;
+ }
+ const auto& fc_op = static_cast<toco::FullyConnectedOperator&>(*op);
+ if (fc_op.weights_format == FullyConnectedWeightsFormat::kDefault) {
+ continue;
+ }
+ const string& weights_name = fc_op.inputs[1];
+ QCHECK_EQ(CountOpsWithInput(*model, weights_name), 1);
+ auto& weights_array = model->GetArray(weights_name);
+ QCHECK(weights_array.data_type == ArrayDataType::kUint8);
+ auto& weights_data =
+ weights_array.GetMutableBuffer<toco::ArrayDataType::kUint8>().data;
+ const auto& weights_shape = weights_array.shape();
+ QCHECK_EQ(weights_shape.dimensions_count(), 2);
+ const int rows = weights_shape.dims(0);
+ const int cols = weights_shape.dims(1);
+ QCHECK_EQ(rows % 4, 0);
+ QCHECK_EQ(cols % 16, 0);
+ CHECK_EQ(rows * cols, weights_data.size());
+ // Compute the de-shuffled weights
+ std::vector<uint8> deshuffled_data(weights_data.size());
+ uint8* shuffled_data_ptr = weights_data.data();
+ for (int r = 0; r < rows; r += 4) {
+ for (int c = 0; c < cols; c += 16) {
+ for (int i = 0; i < 4; i++) {
+ uint8* deshuffled_data_ptr =
+ deshuffled_data.data() + (r + i) * cols + c;
+ for (int j = 0; j < 16; j++) {
+ uint8 shuffled_val = *shuffled_data_ptr++;
+ // Deshuffling isn't only about deshuffling the storage layout,
+ // it's also about undoing the flipping of the sign bit, which is
+ // performed on the shuffled weights.
+ uint8 deshuffled_val = shuffled_val ^ 0x80;
+ *deshuffled_data_ptr++ = deshuffled_val;
+ }
+ }
+ }
+ }
+ CHECK_EQ(shuffled_data_ptr, weights_data.data() + rows * cols);
+ // Switch this FC op to using the deshuffled weights.
+ weights_data = std::move(deshuffled_data);
+ }
+}
+
} // namespace toco
diff --git a/tensorflow/contrib/lite/toco/tooling_util.h b/tensorflow/contrib/lite/toco/tooling_util.h
index 791ced8d01..5dbfa54fa0 100644
--- a/tensorflow/contrib/lite/toco/tooling_util.h
+++ b/tensorflow/contrib/lite/toco/tooling_util.h
@@ -344,6 +344,11 @@ tensorflow::Status NumElements(const std::vector<T>& shape, U* num_elements) {
return tensorflow::Status::OK();
}
+// A model file may have shuffled FC weights.
+// When that happens, we want to de-shuffle them immediately on import,
+// so that the rest of toco doesn't need to know about shuffled weights.
+void UndoWeightsShuffling(Model* model);
+
} // namespace toco
#endif // TENSORFLOW_CONTRIB_LITE_TOCO_TOOLING_UTIL_H_
diff --git a/tensorflow/contrib/lite/tools/BUILD b/tensorflow/contrib/lite/tools/BUILD
index 5913847329..d070018e83 100644
--- a/tensorflow/contrib/lite/tools/BUILD
+++ b/tensorflow/contrib/lite/tools/BUILD
@@ -14,6 +14,7 @@ py_binary(
srcs = ["visualize.py"],
data = [
"//tensorflow/contrib/lite/schema:schema.fbs",
+ "//tensorflow/python:platform",
"@flatbuffers//:flatc",
],
srcs_version = "PY2AND3",
@@ -53,6 +54,7 @@ cc_test(
],
tags = [
"tflite_not_portable_android",
+ "tflite_not_portable_ios",
],
deps = [
":gen_op_registration",
diff --git a/tensorflow/contrib/lite/tools/benchmark/README.md b/tensorflow/contrib/lite/tools/benchmark/README.md
index c10826afff..f1e257ad10 100644
--- a/tensorflow/contrib/lite/tools/benchmark/README.md
+++ b/tensorflow/contrib/lite/tools/benchmark/README.md
@@ -3,7 +3,38 @@
## Description
A simple C++ binary to benchmark a TFLite model and its individual operators,
-both on desktop machines and on Android.
+both on desktop machines and on Android. The binary takes a TFLite model,
+generates random inputs and then repeatedly runs the model for specified number
+of runs. Aggregrate latency statistics are reported after running the benchmark.
+
+The instructions below are for running the binary on Desktop and Android,
+for iOS please use the
+[iOS benchmark app] (https://github.com/tensorflow/tensorflow/tree/master/tensorflow/contrib/lite/tools/benchmark/ios).
+
+## Parameters
+
+The binary takes the following required parameters:
+
+* `graph`: `string` \
+ The path to the TFLite model file.
+* `input_layer`: `string` \
+ The name of the input layer, this is typically the first layer of the model.
+* `input_layer_shape`: `string` \
+ The shape of the input layer. This is a comma separated string of the shape
+ of tensor of input layer.
+
+and the following optional parameters:
+
+* `num_threads`: `int` (default=1) \
+ The number of threads to use for running TFLite interpreter.
+* `warmup_runs`: `int` (default=1) \
+ The number of warmup runs to do before starting the benchmark.
+* `run_delay`: `float` (default=-1.0) \
+ The delay in seconds between subsequent benchmark runs. Non-positive values
+ mean use no delay.
+* `use_nnapi`: `bool` (default=false) \
+ Whether to use [Android NNAPI] (https://developer.android.com/ndk/guides/neuralnetworks/).
+ This API is available on recent Android devices.
## To build/install/run
@@ -44,7 +75,7 @@ adb push mobilenet_quant_v1_224.tflite /data/local/tmp
```
adb shell /data/local/tmp/benchmark_model \
--graph=/data/local/tmp/mobilenet_quant_v1_224.tflite \
- --input_layer="Placeholder" \
+ --input_layer="input" \
--input_layer_shape="1,224,224,3" \
--num_threads=4
```
@@ -70,6 +101,30 @@ bazel-bin/tensorflow/contrib/lite/tools/benchmark/benchmark_model \
The MobileNet graph used as an example here may be downloaded from
https://storage.googleapis.com/download.tensorflow.org/models/tflite/mobilenet_v1_224_android_quant_2017_11_08.zip
+
+## Reducing variance between runs on Android.
+
+Most modern Android phones use [ARM big.LITTLE](https://en.wikipedia.org/wiki/ARM_big.LITTLE)
+architecture where some cores are more power hungry but faster than other cores.
+When running benchmarks on these phones there can be significant variance
+between different runs of the benchmark. One way to reduce variance between runs
+is to set the [CPU affinity](https://en.wikipedia.org/wiki/Processor_affinity)
+before running the benchmark. On Android this can be done using the `taskset`
+command.
+E.g. for running the benchmark on big cores on Pixel 2 with a single thread one
+can use the following command:
+
+```
+adb shell taskset f0 /data/local/tmp/benchmark_model \
+ --graph=/data/local/tmp/mobilenet_quant_v1_224.tflite \
+ --input_layer="input" \
+ --input_layer_shape="1,224,224,3" \
+ --num_threads=1
+```
+
+where `f0` is the affinity mask for big cores on Pixel 2.
+Note: The affinity mask varies with the device.
+
## Profiling model operators
The benchmark model binary also allows you to profile operators and give execution times of each operator. To do this,
compile the binary with a compiler flag that enables profiling to be compiled in. Pass **--copt=-DTFLITE_PROFILING_ENABLED**
diff --git a/tensorflow/contrib/lite/tools/benchmark/benchmark_model.cc b/tensorflow/contrib/lite/tools/benchmark/benchmark_model.cc
index 08648bcfe2..19b9a9c7ba 100644
--- a/tensorflow/contrib/lite/tools/benchmark/benchmark_model.cc
+++ b/tensorflow/contrib/lite/tools/benchmark/benchmark_model.cc
@@ -98,10 +98,13 @@ void BenchmarkModel::LogFlags() {
<< "]";
}
+void BenchmarkModel::PrepareInputsAndOutputs() {}
+
Stat<int64_t> BenchmarkModel::Run(int num_times, RunType run_type) {
Stat<int64_t> run_stats;
TFLITE_LOG(INFO) << "Running benchmark for " << num_times << " iterations ";
for (int run = 0; run < num_times; run++) {
+ PrepareInputsAndOutputs();
listeners_.OnSingleRunStart(run_type);
int64_t start_us = profiling::time::NowMicros();
RunImpl();
diff --git a/tensorflow/contrib/lite/tools/benchmark/benchmark_model.h b/tensorflow/contrib/lite/tools/benchmark/benchmark_model.h
index 942e21f67a..3c7063b2d4 100644
--- a/tensorflow/contrib/lite/tools/benchmark/benchmark_model.h
+++ b/tensorflow/contrib/lite/tools/benchmark/benchmark_model.h
@@ -150,6 +150,7 @@ class BenchmarkModel {
virtual std::vector<Flag> GetFlags();
virtual uint64_t ComputeInputBytes() = 0;
virtual tensorflow::Stat<int64_t> Run(int num_times, RunType run_type);
+ virtual void PrepareInputsAndOutputs();
virtual void RunImpl() = 0;
BenchmarkParams params_;
BenchmarkListeners listeners_;
diff --git a/tensorflow/contrib/lite/tools/benchmark/benchmark_params.h b/tensorflow/contrib/lite/tools/benchmark/benchmark_params.h
index 33448dd162..c98f47bb0d 100644
--- a/tensorflow/contrib/lite/tools/benchmark/benchmark_params.h
+++ b/tensorflow/contrib/lite/tools/benchmark/benchmark_params.h
@@ -31,6 +31,8 @@ class TypedBenchmarkParam;
class BenchmarkParam {
protected:
enum class ParamType { TYPE_INT32, TYPE_FLOAT, TYPE_BOOL, TYPE_STRING };
+ template <typename T>
+ static ParamType GetValueType();
public:
template <typename T>
@@ -49,8 +51,6 @@ class BenchmarkParam {
private:
static void AssertHasSameType(ParamType a, ParamType b);
- template <typename T>
- static ParamType GetValueType();
const ParamType type_;
};
diff --git a/tensorflow/contrib/lite/tools/visualize.py b/tensorflow/contrib/lite/tools/visualize.py
index f571dd59da..e07f899e4d 100644
--- a/tensorflow/contrib/lite/tools/visualize.py
+++ b/tensorflow/contrib/lite/tools/visualize.py
@@ -28,11 +28,24 @@ import json
import os
import sys
+from tensorflow.python.platform import resource_loader
+
# Schema to use for flatbuffers
_SCHEMA = "third_party/tensorflow/contrib/lite/schema/schema.fbs"
-# Where the binary will be once built in for the flatc converter
-_BINARY = "third_party/flatbuffers/flatc"
+# TODO(angerson): fix later when rules are simplified..
+_SCHEMA = resource_loader.get_path_to_datafile("../schema/schema.fbs")
+_BINARY = resource_loader.get_path_to_datafile("../../../../flatbuffers/flatc")
+# Account for different package positioning internal vs. external.
+if not os.path.exists(_BINARY):
+ _BINARY = resource_loader.get_path_to_datafile(
+ "../../../../../flatbuffers/flatc")
+
+if not os.path.exists(_SCHEMA):
+ raise RuntimeError("Sorry, schema file cannot be found at %r" % _SCHEMA)
+if not os.path.exists(_BINARY):
+ raise RuntimeError("Sorry, flatc is not available at %r" % _BINARY)
+
# A CSS description for making the visualizer
_CSS = """
diff --git a/tensorflow/contrib/makefile/tf_op_files.txt b/tensorflow/contrib/makefile/tf_op_files.txt
index 89db9ee279..6e7423f85e 100644
--- a/tensorflow/contrib/makefile/tf_op_files.txt
+++ b/tensorflow/contrib/makefile/tf_op_files.txt
@@ -92,6 +92,7 @@ tensorflow/core/kernels/reduction_ops_common.cc
tensorflow/core/kernels/reduction_ops_any.cc
tensorflow/core/kernels/reduction_ops_all.cc
tensorflow/core/kernels/roll_op.cc
+tensorflow/core/kernels/queue_op.cc
tensorflow/core/kernels/queue_ops.cc
tensorflow/core/kernels/queue_base.cc
tensorflow/core/kernels/pooling_ops_common.cc
diff --git a/tensorflow/contrib/metrics/BUILD b/tensorflow/contrib/metrics/BUILD
index 66cb493e5c..21cd34f73f 100644
--- a/tensorflow/contrib/metrics/BUILD
+++ b/tensorflow/contrib/metrics/BUILD
@@ -31,6 +31,7 @@ py_library(
"//tensorflow/python:check_ops",
"//tensorflow/python:confusion_matrix",
"//tensorflow/python:control_flow_ops",
+ "//tensorflow/python:distribute",
"//tensorflow/python:framework_for_generated_wrappers",
"//tensorflow/python:histogram_ops",
"//tensorflow/python:init_ops",
diff --git a/tensorflow/contrib/metrics/__init__.py b/tensorflow/contrib/metrics/__init__.py
index 5effea3596..88798d61b7 100644
--- a/tensorflow/contrib/metrics/__init__.py
+++ b/tensorflow/contrib/metrics/__init__.py
@@ -63,6 +63,7 @@ See the @{$python/contrib.metrics} guide.
@@aggregate_metrics
@@aggregate_metric_map
@@confusion_matrix
+@@f1_score
@@set_difference
@@set_intersection
@@set_size
diff --git a/tensorflow/contrib/metrics/python/metrics/classification.py b/tensorflow/contrib/metrics/python/metrics/classification.py
index 26aba1cc51..e553612269 100644
--- a/tensorflow/contrib/metrics/python/metrics/classification.py
+++ b/tensorflow/contrib/metrics/python/metrics/classification.py
@@ -22,6 +22,9 @@ from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
+from tensorflow.python.ops import metrics_impl
+from tensorflow.python.ops import variable_scope
+from tensorflow.python.training import distribute as distribute_lib
# TODO(nsilberman): move into metrics/python/ops/
@@ -62,3 +65,121 @@ def accuracy(predictions, labels, weights=None, name=None):
return math_ops.div(math_ops.reduce_sum(is_correct),
math_ops.reduce_sum(num_values))
return math_ops.reduce_mean(is_correct)
+
+
+def f1_score(labels, predictions, weights=None, num_thresholds=200,
+ metrics_collections=None, updates_collections=None, name=None):
+ """Computes the approximately best F1-score across different thresholds.
+
+ The f1_score function applies a range of thresholds to the predictions to
+ convert them from [0, 1] to bool. Precision and recall are computed by
+ comparing them to the labels. The F1-Score is then defined as
+ 2 * precision * recall / (precision + recall). The best one across the
+ thresholds is returned.
+
+ Disclaimer: In practice it may be desirable to choose the best threshold on
+ the validation set and evaluate the F1 score with this threshold on a
+ separate test set. Or it may be desirable to use a fixed threshold (e.g. 0.5).
+
+ This function internally creates four local variables, `true_positives`,
+ `true_negatives`, `false_positives` and `false_negatives` that are used to
+ compute the pairs of recall and precision values for a linearly spaced set of
+ thresholds from which the best f1-score is derived.
+
+ This value is ultimately returned as `f1-score`, an idempotent operation that
+ computes the F1-score (computed using the aforementioned variables). The
+ `num_thresholds` variable controls the degree of discretization with larger
+ numbers of thresholds more closely approximating the true best F1-score.
+
+ For estimation of the metric over a stream of data, the function creates an
+ `update_op` operation that updates these variables and returns the F1-score.
+
+ Example usage with a custom estimator:
+ def model_fn(features, labels, mode):
+ predictions = make_predictions(features)
+ loss = make_loss(predictions, labels)
+ train_op = tf.contrib.training.create_train_op(
+ total_loss=loss,
+ optimizer='Adam')
+ eval_metric_ops = {'f1': f1_score(labels, predictions)}
+ return tf.estimator.EstimatorSpec(
+ mode=mode,
+ predictions=predictions,
+ loss=loss,
+ train_op=train_op,
+ eval_metric_ops=eval_metric_ops,
+ export_outputs=export_outputs)
+ estimator = tf.estimator.Estimator(model_fn=model_fn)
+
+ If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
+
+ Args:
+ labels: A `Tensor` whose shape matches `predictions`. Will be cast to
+ `bool`.
+ predictions: A floating point `Tensor` of arbitrary shape and whose values
+ are in the range `[0, 1]`.
+ weights: Optional `Tensor` whose rank is either 0, or the same rank as
+ `labels`, and must be broadcastable to `labels` (i.e., all dimensions must
+ be either `1`, or the same as the corresponding `labels` dimension).
+ num_thresholds: The number of thresholds to use when discretizing the roc
+ curve.
+ metrics_collections: An optional list of collections that `f1_score` should
+ be added to.
+ updates_collections: An optional list of collections that `update_op` should
+ be added to.
+ name: An optional variable_scope name.
+
+ Returns:
+ f1_score: A scalar `Tensor` representing the current best f1-score across
+ different thresholds.
+ update_op: An operation that increments the `true_positives`,
+ `true_negatives`, `false_positives` and `false_negatives` variables
+ appropriately and whose value matches the `f1_score`.
+
+ Raises:
+ ValueError: If `predictions` and `labels` have mismatched shapes, or if
+ `weights` is not `None` and its shape doesn't match `predictions`, or if
+ either `metrics_collections` or `updates_collections` are not a list or
+ tuple.
+ """
+ with variable_scope.variable_scope(
+ name, 'f1', (labels, predictions, weights)):
+ predictions, labels, weights = metrics_impl._remove_squeezable_dimensions( # pylint: disable=protected-access
+ predictions=predictions, labels=labels, weights=weights)
+ # To account for floating point imprecisions / avoid division by zero.
+ epsilon = 1e-7
+ thresholds = [(i + 1) * 1.0 / (num_thresholds - 1)
+ for i in range(num_thresholds - 2)]
+ thresholds = [0.0 - epsilon] + thresholds + [1.0 + epsilon]
+
+ # Confusion matrix.
+ values, update_ops = metrics_impl._confusion_matrix_at_thresholds( # pylint: disable=protected-access
+ labels, predictions, thresholds, weights, includes=('tp', 'fp', 'fn'))
+
+ # Compute precision and recall at various thresholds.
+ def compute_best_f1_score(tp, fp, fn, name):
+ precision_at_t = math_ops.div(tp, epsilon + tp + fp,
+ name='precision_' + name)
+ recall_at_t = math_ops.div(tp, epsilon + tp + fn, name='recall_' + name)
+ # Compute F1 score.
+ f1_at_thresholds = (
+ 2.0 * precision_at_t * recall_at_t /
+ (precision_at_t + recall_at_t + epsilon))
+ return math_ops.reduce_max(f1_at_thresholds)
+
+ def f1_across_towers(_, values):
+ best_f1 = compute_best_f1_score(tp=values['tp'], fp=values['fp'],
+ fn=values['fn'], name='value')
+ if metrics_collections:
+ ops.add_to_collections(metrics_collections, best_f1)
+ return best_f1
+
+ best_f1 = distribute_lib.get_tower_context().merge_call(
+ f1_across_towers, values)
+
+ update_op = compute_best_f1_score(tp=update_ops['tp'], fp=update_ops['fp'],
+ fn=update_ops['fn'], name='update')
+ if updates_collections:
+ ops.add_to_collections(updates_collections, update_op)
+
+ return best_f1, update_op
diff --git a/tensorflow/contrib/metrics/python/metrics/classification_test.py b/tensorflow/contrib/metrics/python/metrics/classification_test.py
index fa0f12d029..3d0b81c1be 100644
--- a/tensorflow/contrib/metrics/python/metrics/classification_test.py
+++ b/tensorflow/contrib/metrics/python/metrics/classification_test.py
@@ -18,9 +18,16 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
+import numpy as np
+
from tensorflow.contrib.metrics.python.metrics import classification
+from tensorflow.python.data.ops import dataset_ops
+from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
+from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
+from tensorflow.python.ops import random_ops
+from tensorflow.python.ops import variables
from tensorflow.python.platform import test
@@ -108,5 +115,200 @@ class ClassificationTest(test.TestCase):
self.assertEqual(result, 0.5)
+class F1ScoreTest(test.TestCase):
+
+ def setUp(self):
+ super(F1ScoreTest, self).setUp()
+ np.random.seed(1)
+
+ def testVars(self):
+ classification.f1_score(
+ predictions=array_ops.ones((10, 1)),
+ labels=array_ops.ones((10, 1)),
+ num_thresholds=3)
+ expected = {'f1/true_positives:0', 'f1/false_positives:0',
+ 'f1/false_negatives:0'}
+ self.assertEquals(
+ expected, set(v.name for v in variables.local_variables()))
+ self.assertEquals(
+ set(expected), set(v.name for v in variables.local_variables()))
+ self.assertEquals(
+ set(expected),
+ set(v.name for v in ops.get_collection(ops.GraphKeys.METRIC_VARIABLES)))
+
+ def testMetricsCollection(self):
+ my_collection_name = '__metrics__'
+ f1, _ = classification.f1_score(
+ predictions=array_ops.ones((10, 1)),
+ labels=array_ops.ones((10, 1)),
+ num_thresholds=3,
+ metrics_collections=[my_collection_name])
+ self.assertListEqual(ops.get_collection(my_collection_name), [f1])
+
+ def testUpdatesCollection(self):
+ my_collection_name = '__updates__'
+ _, f1_op = classification.f1_score(
+ predictions=array_ops.ones((10, 1)),
+ labels=array_ops.ones((10, 1)),
+ num_thresholds=3,
+ updates_collections=[my_collection_name])
+ self.assertListEqual(ops.get_collection(my_collection_name), [f1_op])
+
+ def testValueTensorIsIdempotent(self):
+ predictions = random_ops.random_uniform(
+ (10, 3), maxval=1, dtype=dtypes.float32, seed=1)
+ labels = random_ops.random_uniform(
+ (10, 3), maxval=2, dtype=dtypes.int64, seed=2)
+ f1, f1_op = classification.f1_score(predictions, labels, num_thresholds=3)
+
+ with self.test_session() as sess:
+ sess.run(variables.local_variables_initializer())
+
+ # Run several updates.
+ for _ in range(10):
+ sess.run([f1_op])
+
+ # Then verify idempotency.
+ initial_f1 = f1.eval()
+ for _ in range(10):
+ self.assertAllClose(initial_f1, f1.eval())
+
+ def testAllCorrect(self):
+ inputs = np.random.randint(0, 2, size=(100, 1))
+
+ with self.test_session() as sess:
+ predictions = constant_op.constant(inputs, dtype=dtypes.float32)
+ labels = constant_op.constant(inputs)
+ f1, f1_op = classification.f1_score(predictions, labels, num_thresholds=3)
+
+ sess.run(variables.local_variables_initializer())
+ sess.run([f1_op])
+
+ self.assertEqual(1, f1.eval())
+
+ def testSomeCorrect(self):
+ predictions = constant_op.constant(
+ [1, 0, 1, 0], shape=(1, 4), dtype=dtypes.float32)
+ labels = constant_op.constant([0, 1, 1, 0], shape=(1, 4))
+ f1, f1_op = classification.f1_score(predictions, labels, num_thresholds=1)
+ with self.test_session() as sess:
+ sess.run(variables.local_variables_initializer())
+ sess.run([f1_op])
+ # Threshold 0 will have around 0.5 precision and 1 recall yielding an F1
+ # score of 2 * 0.5 * 1 / (1 + 0.5).
+ self.assertAlmostEqual(2 * 0.5 * 1 / (1 + 0.5), f1.eval())
+
+ def testAllIncorrect(self):
+ inputs = np.random.randint(0, 2, size=(10000, 1))
+
+ with self.test_session() as sess:
+ predictions = constant_op.constant(inputs, dtype=dtypes.float32)
+ labels = constant_op.constant(1 - inputs, dtype=dtypes.float32)
+ f1, f1_op = classification.f1_score(predictions, labels, num_thresholds=3)
+
+ sess.run(variables.local_variables_initializer())
+ sess.run([f1_op])
+
+ # Threshold 0 will have around 0.5 precision and 1 recall yielding an F1
+ # score of 2 * 0.5 * 1 / (1 + 0.5).
+ self.assertAlmostEqual(2 * 0.5 * 1 / (1 + 0.5), f1.eval(), places=2)
+
+ def testWeights1d(self):
+ with self.test_session() as sess:
+ predictions = constant_op.constant(
+ [[1, 0], [1, 0]], shape=(2, 2), dtype=dtypes.float32)
+ labels = constant_op.constant([[0, 1], [1, 0]], shape=(2, 2))
+ weights = constant_op.constant(
+ [[0], [1]], shape=(2, 1), dtype=dtypes.float32)
+ f1, f1_op = classification.f1_score(predictions, labels, weights,
+ num_thresholds=3)
+ sess.run(variables.local_variables_initializer())
+ sess.run([f1_op])
+
+ self.assertAlmostEqual(1.0, f1.eval(), places=5)
+
+ def testWeights2d(self):
+ with self.test_session() as sess:
+ predictions = constant_op.constant(
+ [[1, 0], [1, 0]], shape=(2, 2), dtype=dtypes.float32)
+ labels = constant_op.constant([[0, 1], [1, 0]], shape=(2, 2))
+ weights = constant_op.constant(
+ [[0, 0], [1, 1]], shape=(2, 2), dtype=dtypes.float32)
+ f1, f1_op = classification.f1_score(predictions, labels, weights,
+ num_thresholds=3)
+ sess.run(variables.local_variables_initializer())
+ sess.run([f1_op])
+
+ self.assertAlmostEqual(1.0, f1.eval(), places=5)
+
+ def testZeroLabelsPredictions(self):
+ with self.test_session() as sess:
+ predictions = array_ops.zeros([4], dtype=dtypes.float32)
+ labels = array_ops.zeros([4])
+ f1, f1_op = classification.f1_score(predictions, labels, num_thresholds=3)
+ sess.run(variables.local_variables_initializer())
+ sess.run([f1_op])
+
+ self.assertAlmostEqual(0.0, f1.eval(), places=5)
+
+ def testWithMultipleUpdates(self):
+ num_samples = 1000
+ batch_size = 10
+ num_batches = int(num_samples / batch_size)
+
+ # Create the labels and data.
+ labels = np.random.randint(0, 2, size=(num_samples, 1))
+ noise = np.random.normal(0.0, scale=0.2, size=(num_samples, 1))
+ predictions = 0.4 + 0.2 * labels + noise
+ predictions[predictions > 1] = 1
+ predictions[predictions < 0] = 0
+ thresholds = [-0.01, 0.5, 1.01]
+
+ expected_max_f1 = -1.0
+ for threshold in thresholds:
+ tp = 0
+ fp = 0
+ fn = 0
+ tn = 0
+ for i in range(num_samples):
+ if predictions[i] >= threshold:
+ if labels[i] == 1:
+ tp += 1
+ else:
+ fp += 1
+ else:
+ if labels[i] == 1:
+ fn += 1
+ else:
+ tn += 1
+ epsilon = 1e-7
+ expected_prec = tp / (epsilon + tp + fp)
+ expected_rec = tp / (epsilon + tp + fn)
+ expected_f1 = (2 * expected_prec * expected_rec /
+ (epsilon + expected_prec + expected_rec))
+ if expected_f1 > expected_max_f1:
+ expected_max_f1 = expected_f1
+
+ labels = labels.astype(np.float32)
+ predictions = predictions.astype(np.float32)
+ tf_predictions, tf_labels = (dataset_ops.Dataset
+ .from_tensor_slices((predictions, labels))
+ .repeat()
+ .batch(batch_size)
+ .make_one_shot_iterator()
+ .get_next())
+ f1, f1_op = classification.f1_score(tf_labels, tf_predictions,
+ num_thresholds=3)
+
+ with self.test_session() as sess:
+ sess.run(variables.local_variables_initializer())
+ for _ in range(num_batches):
+ sess.run([f1_op])
+ # Since this is only approximate, we can't expect a 6 digits match.
+ # Although with higher number of samples/thresholds we should see the
+ # accuracy improving
+ self.assertAlmostEqual(expected_max_f1, f1.eval(), 2)
+
+
if __name__ == '__main__':
test.main()
diff --git a/tensorflow/contrib/mixed_precision/python/loss_scale_optimizer.py b/tensorflow/contrib/mixed_precision/python/loss_scale_optimizer.py
index ef34f7bf7b..93050a3ae3 100644
--- a/tensorflow/contrib/mixed_precision/python/loss_scale_optimizer.py
+++ b/tensorflow/contrib/mixed_precision/python/loss_scale_optimizer.py
@@ -77,7 +77,7 @@ class LossScaleOptimizer(optimizer.Optimizer):
If gradients clipping is applied, one can call
`optimizer.compute_gradients()` and `optimizer.apply_gradients()`
- seperately.
+ separately.
Notice the following way of using LossScaleOptimizer is not intended. Always
use `loss_scale_optimizer.compute_gradients()` to compute gradients instead of
diff --git a/tensorflow/contrib/mpi_collectives/BUILD b/tensorflow/contrib/mpi_collectives/BUILD
index a7be92a35e..ecac06354d 100644
--- a/tensorflow/contrib/mpi_collectives/BUILD
+++ b/tensorflow/contrib/mpi_collectives/BUILD
@@ -52,6 +52,7 @@ tf_custom_op_library(
deps = [
":mpi_defines",
":mpi_message_proto_cc",
+ "//tensorflow/stream_executor:stream_executor_headers_lib",
"//third_party/mpi",
],
)
diff --git a/tensorflow/contrib/mpi_collectives/kernels/mpi_ops.cc b/tensorflow/contrib/mpi_collectives/kernels/mpi_ops.cc
index ed22ee667f..e4b0c2c654 100644
--- a/tensorflow/contrib/mpi_collectives/kernels/mpi_ops.cc
+++ b/tensorflow/contrib/mpi_collectives/kernels/mpi_ops.cc
@@ -73,7 +73,7 @@ limitations under the License.
*/
template <class T>
-using StatusOr = se::port::StatusOr<T>;
+using StatusOr = stream_executor::port::StatusOr<T>;
using CPUDevice = Eigen::ThreadPoolDevice;
using GPUDevice = Eigen::GpuDevice;
diff --git a/tensorflow/contrib/mpi_collectives/mpi_ops.py b/tensorflow/contrib/mpi_collectives/mpi_ops.py
new file mode 100644
index 0000000000..bd7096d9ce
--- /dev/null
+++ b/tensorflow/contrib/mpi_collectives/mpi_ops.py
@@ -0,0 +1,163 @@
+# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# =============================================================================
+"""Inter-process communication using MPI."""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import tensorflow as tf
+
+from tensorflow.python.framework import errors
+from tensorflow.python.framework import load_library
+from tensorflow.python.framework import ops
+from tensorflow.python.platform import resource_loader
+from tensorflow.python.platform import tf_logging as logging
+
+
+def _load_library(name, op_list=None):
+ """Loads a .so file containing the specified operators.
+
+ Args:
+ name: The name of the .so file to load.
+ op_list: A list of names of operators that the library should have. If None
+ then the .so file's contents will not be verified.
+
+ Raises:
+ NameError if one of the required ops is missing.
+ """
+ try:
+ filename = resource_loader.get_path_to_datafile(name)
+ library = load_library.load_op_library(filename)
+ for expected_op in (op_list or []):
+ for lib_op in library.OP_LIST.op:
+ if lib_op.name == expected_op:
+ break
+ else:
+ raise NameError('Could not find operator %s in dynamic library %s' %
+ (expected_op, name))
+ return library
+ except errors.NotFoundError:
+ logging.warning('%s file could not be loaded.', name)
+
+
+MPI_LIB = _load_library(
+ 'mpi_collectives.so',
+ ['MPISize', 'MPIRank', 'MPILocalRank', 'MPIAllgather', 'MPIAllreduce'])
+
+
+def size(name=None):
+ """An op which returns the number of MPI processes.
+
+ This is equivalent to running `MPI_Comm_size(MPI_COMM_WORLD, ...)` to get the
+ size of the global communicator.
+
+ Returns:
+ An integer scalar containing the number of MPI processes.
+ """
+ return MPI_LIB.mpi_size(name=name)
+
+
+ops.NotDifferentiable('MPISize')
+
+
+def rank(name=None):
+ """An op which returns the MPI rank of the calling process.
+
+ This is equivalent to running `MPI_Comm_rank(MPI_COMM_WORLD, ...)` to get the
+ rank of the current process in the global communicator.
+
+ Returns:
+ An integer scalar with the MPI rank of the calling process.
+ """
+ return MPI_LIB.mpi_rank(name=name)
+
+
+ops.NotDifferentiable('MPIRank')
+
+
+def init(name=None):
+ """An op which initializes MPI on the device on which it is run.
+
+ All future MPI ops must be run on the same device that the `init` op was run
+ on.
+ """
+ return MPI_LIB.mpi_init(name=name)
+
+
+ops.NotDifferentiable('MPIInit')
+
+
+def local_rank(name=None):
+ """An op which returns the local MPI rank of the calling process, within the
+ node that it is running on. For example, if there are seven processes running
+ on a node, their local ranks will be zero through six, inclusive.
+
+ This is equivalent to running `MPI_Comm_rank(...)` on a new communicator
+ which only includes processes on the same node.
+
+ Returns:
+ An integer scalar with the local MPI rank of the calling process.
+ """
+ return MPI_LIB.mpi_local_rank(name=name)
+
+
+ops.NotDifferentiable('MPILocalRank')
+
+
+def _allreduce(tensor, name=None):
+ """An op which sums an input tensor over all the MPI processes.
+
+ The reduction operation is keyed by the name of the op. The tensor type and
+ shape must be the same on all MPI processes for a given name. The reduction
+ will not start until all processes are ready to send and receive the tensor.
+
+ Returns:
+ A tensor of the same shape and type as `tensor`, summed across all
+ processes.
+ """
+ return MPI_LIB.mpi_allreduce(tensor, name=name)
+
+
+ops.NotDifferentiable('MPIAllreduce')
+
+
+def allgather(tensor, name=None):
+ """An op which concatenates the input tensor with the same input tensor on
+ all other MPI processes.
+
+ The concatenation is done on the first dimension, so the input tensors on the
+ different processes must have the same rank and shape, except for the first
+ dimension, which is allowed to be different.
+
+ Returns:
+ A tensor of the same type as `tensor`, concatenated on dimension zero
+ across all processes. The shape is identical to the input shape, except for
+ the first dimension, which may be greater and is the sum of all first
+ dimensions of the tensors in different MPI processes.
+ """
+ # Specify that first allgather is to collect the tensor gather sizes,
+ # indicated by passing in a scalar (0-D tensor) of value 0
+ sizes_flag = tf.constant(0, dtype=tf.int64, name='size_flag_const')
+ my_size = tf.slice(
+ tf.shape(tensor, out_type=tf.int64), [0], [1], name='size_slice')
+ if name is None:
+ name = 'allgather'
+ sizing_name = '{}_sizing'.format(name)
+ sizes = MPI_LIB.mpi_allgather(my_size, sizes_flag, name=sizing_name)
+ return MPI_LIB.mpi_allgather(tensor, sizes, name=name)
+
+
+ops.NotDifferentiable('MPIAllgather')
diff --git a/tensorflow/contrib/mpi_collectives/ring.cc b/tensorflow/contrib/mpi_collectives/ring.cc
new file mode 100644
index 0000000000..d93233eb21
--- /dev/null
+++ b/tensorflow/contrib/mpi_collectives/ring.cc
@@ -0,0 +1,80 @@
+/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#ifdef TENSORFLOW_USE_MPI
+
+#define EIGEN_USE_THREADS
+
+#include "tensorflow/contrib/mpi_collectives/ring.h"
+
+namespace tensorflow {
+namespace contrib {
+namespace mpi {
+
+using CPUDevice = Eigen::ThreadPoolDevice;
+
+extern template MPI_Datatype MPIType<float>();
+extern template MPI_Datatype MPIType<int>();
+extern template MPI_Datatype MPIType<long long>();
+extern template DataType TensorFlowDataType<float>();
+extern template DataType TensorFlowDataType<int>();
+extern template DataType TensorFlowDataType<long long>();
+
+// Generate all necessary specializations for RingAllreduce.
+template Status RingAllreduce<CPUDevice, int>(OpKernelContext*, const Tensor*,
+ Tensor*, Tensor*);
+template Status RingAllreduce<CPUDevice, long long>(OpKernelContext*,
+ const Tensor*, Tensor*,
+ Tensor*);
+template Status RingAllreduce<CPUDevice, float>(OpKernelContext*, const Tensor*,
+ Tensor*, Tensor*);
+
+// Generate all necessary specializations for RingAllgather.
+template Status RingAllgather<CPUDevice, int>(OpKernelContext*, const Tensor*,
+ const std::vector<size_t>&,
+ Tensor*);
+template Status RingAllgather<CPUDevice, long long>(OpKernelContext*,
+ const Tensor*,
+ const std::vector<size_t>&,
+ Tensor*);
+template Status RingAllgather<CPUDevice, float>(OpKernelContext*, const Tensor*,
+ const std::vector<size_t>&,
+ Tensor*);
+
+// Copy data on a CPU using a straight-forward memcpy.
+template <>
+void CopyTensorData<CPUDevice>(void* dst, void* src, size_t size) {
+ std::memcpy(dst, src, size);
+};
+
+// Accumulate values on a CPU.
+#define GENERATE_ACCUMULATE(type) \
+ template <> \
+ void AccumulateTensorData<CPUDevice, type>(type * dst, type * src, \
+ size_t size) { \
+ for (unsigned int i = 0; i < size; i++) { \
+ dst[i] += src[i]; \
+ } \
+ };
+GENERATE_ACCUMULATE(int);
+GENERATE_ACCUMULATE(long long);
+GENERATE_ACCUMULATE(float);
+#undef GENERATE_ACCUMULATE
+
+} // namespace mpi
+} // namespace contrib
+} // namespace tensorflow
+
+#endif // TENSORFLOW_USE_MPI
diff --git a/tensorflow/contrib/mpi_collectives/ring.cu.cc b/tensorflow/contrib/mpi_collectives/ring.cu.cc
new file mode 100644
index 0000000000..2f3eef366a
--- /dev/null
+++ b/tensorflow/contrib/mpi_collectives/ring.cu.cc
@@ -0,0 +1,117 @@
+/* Copyright 2016 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#ifdef TENSORFLOW_USE_MPI
+
+#if GOOGLE_CUDA
+
+#define EIGEN_USE_GPU
+
+#include "tensorflow/contrib/mpi_collectives/ring.h"
+
+namespace tensorflow {
+namespace contrib {
+namespace mpi {
+
+using CPUDevice = Eigen::ThreadPoolDevice;
+
+template <>
+MPI_Datatype MPIType<float>() {
+ return MPI_FLOAT;
+};
+template <>
+MPI_Datatype MPIType<int>() {
+ return MPI_INT;
+};
+template <>
+MPI_Datatype MPIType<long long>() {
+ return MPI_LONG_LONG;
+};
+
+template <>
+DataType TensorFlowDataType<float>() {
+ return DT_FLOAT;
+};
+template <>
+DataType TensorFlowDataType<int>() {
+ return DT_INT32;
+};
+template <>
+DataType TensorFlowDataType<long long>() {
+ return DT_INT64;
+};
+
+// Generate all necessary specializations for RingAllreduce.
+template Status RingAllreduce<GPUDevice, int>(OpKernelContext*, const Tensor*,
+ Tensor*, Tensor*);
+template Status RingAllreduce<GPUDevice, long long>(OpKernelContext*,
+ const Tensor*, Tensor*,
+ Tensor*);
+template Status RingAllreduce<GPUDevice, float>(OpKernelContext*, const Tensor*,
+ Tensor*, Tensor*);
+
+// Generate all necessary specializations for RingAllgather.
+template Status RingAllgather<GPUDevice, int>(OpKernelContext*, const Tensor*,
+ const std::vector<size_t>&,
+ Tensor*);
+template Status RingAllgather<GPUDevice, long long>(OpKernelContext*,
+ const Tensor*,
+ const std::vector<size_t>&,
+ Tensor*);
+template Status RingAllgather<GPUDevice, float>(OpKernelContext*, const Tensor*,
+ const std::vector<size_t>&,
+ Tensor*);
+
+// Synchronously copy data on the GPU, using a different stream than the default
+// and than TensorFlow to avoid synchronizing on operations unrelated to the
+// allreduce.
+template <>
+void CopyTensorData<GPUDevice>(void* dst, void* src, size_t size) {
+ auto stream = CudaStreamForMPI();
+ cudaMemcpyAsync(dst, src, size, cudaMemcpyDeviceToDevice, stream);
+ cudaStreamSynchronize(stream);
+};
+
+// Elementwise accumulation kernel for GPU.
+template <typename T>
+__global__ void elemwise_accum(T* out, const T* in, const size_t N) {
+ for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < N;
+ i += blockDim.x * gridDim.x) {
+ out[i] += in[i];
+ }
+}
+
+// Synchronously accumulate tensors on the GPU, using a different stream than
+// the default and than TensorFlow to avoid synchronizing on operations
+// unrelated to the allreduce.
+#define GENERATE_ACCUMULATE(type) \
+ template <> \
+ void AccumulateTensorData<GPUDevice, type>(type * dst, type * src, \
+ size_t size) { \
+ auto stream = CudaStreamForMPI(); \
+ elemwise_accum<type><<<32, 256, 0, stream>>>(dst, src, size); \
+ cudaStreamSynchronize(stream); \
+ };
+GENERATE_ACCUMULATE(int);
+GENERATE_ACCUMULATE(long long);
+GENERATE_ACCUMULATE(float);
+#undef GENERATE_ACCUMULATE
+
+} // namespace mpi
+} // namespace contrib
+} // namespace tensorflow
+#endif // GOOGLE_CUDA
+
+#endif // TENSORFLOW_USE_MPI
diff --git a/tensorflow/contrib/mpi_collectives/ring.h b/tensorflow/contrib/mpi_collectives/ring.h
new file mode 100644
index 0000000000..cae57ce60e
--- /dev/null
+++ b/tensorflow/contrib/mpi_collectives/ring.h
@@ -0,0 +1,327 @@
+/* Copyright 2016 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#ifndef TENSORFLOW_CONTRIB_MPI_H_
+#define TENSORFLOW_CONTRIB_MPI_H_
+
+#ifdef TENSORFLOW_USE_MPI
+
+#include "tensorflow/core/framework/op.h"
+#include "tensorflow/core/framework/op_kernel.h"
+#include "tensorflow/core/framework/shape_inference.h"
+
+#include "third_party/eigen3/unsupported/Eigen/CXX11/Tensor"
+#include "tensorflow/core/framework/tensor_types.h"
+
+#if GOOGLE_CUDA
+#include "cuda_runtime.h"
+#endif
+
+// Needed to avoid header issues with C++-supporting MPI implementations
+#define OMPI_SKIP_MPICXX
+#include "third_party/mpi/mpi.h"
+
+#define TAG_TENSOR 12
+
+namespace tensorflow {
+namespace contrib {
+namespace mpi {
+
+using CPUDevice = Eigen::ThreadPoolDevice;
+using GPUDevice = Eigen::GpuDevice;
+
+// Convert from templated types to values we can pass to MPI.
+template <typename T>
+MPI_Datatype MPIType();
+
+// Convert from templated types to TensorFlow data types.
+template <typename T>
+DataType TensorFlowDataType();
+
+#define MPI_REQUIRES_OK(MPI_STATUS) \
+ if ((MPI_STATUS) != MPI_SUCCESS) { \
+ return errors::Unknown("MPI operation failed unexpectedly."); \
+ }
+
+// Copy data from one tensor to another tensor.
+// This uses a custom CUDA stream on GPU, which is necessary to overlay the
+// backpropagation computations with the allreduce.
+template <typename Device>
+void CopyTensorData(void* destination, void* source, size_t size);
+
+// Add a tensor into another tensor, accumulating in place.
+// This uses a custom CUDA stream on GPU, which is necessary to overlay the
+// backpropagation computations with the allreduce.
+template <typename Device, typename T>
+void AccumulateTensorData(T* destination, T* source, size_t size);
+
+// We need to get the right stream for doing CUDA memory transfers and
+// operations, which is possibly different from the standard TensorFlow stream.
+#if GOOGLE_CUDA
+cudaStream_t CudaStreamForMPI();
+#endif
+
+/* Perform a ring allreduce on the data. Allocate the necessary output tensor
+ * and store it in the output parameter.
+ *
+ * Assumes that all MPI processes are doing an allreduce of the same tensor,
+ * with the same dimensions.
+ *
+ * A ring allreduce is a bandwidth-optimal way to do an allreduce. To do the
+ * allreduce, the nodes involved are arranged in a ring:
+ *
+ * .--0--.
+ * / \
+ * 3 1
+ * \ /
+ * *--2--*
+ *
+ * Each node always sends to the next clockwise node in the ring, and receives
+ * from the previous one.
+ *
+ * The allreduce is done in two parts: a scatter-reduce and an allgather. In
+ * the scatter reduce, a reduction is done, so that each node ends up with a
+ * chunk of the final output tensor which has contributions from all other
+ * nodes. In the allgather, those chunks are distributed among all the nodes,
+ * so that all nodes have the entire output tensor.
+ *
+ * Both of these operations are done by dividing the input tensor into N
+ * evenly sized chunks (where N is the number of nodes in the ring).
+ *
+ * The scatter-reduce is done in N-1 steps. In the ith step, node j will send
+ * the (j - i)th chunk and receive the (j - i - 1)th chunk, adding it in to
+ * its existing data for that chunk. For example, in the first iteration with
+ * the ring depicted above, you will have the following transfers:
+ *
+ * Segment 0: Node 0 --> Node 1
+ * Segment 1: Node 1 --> Node 2
+ * Segment 2: Node 2 --> Node 3
+ * Segment 3: Node 3 --> Node 0
+ *
+ * In the second iteration, you'll have the following transfers:
+ *
+ * Segment 0: Node 1 --> Node 2
+ * Segment 1: Node 2 --> Node 3
+ * Segment 2: Node 3 --> Node 0
+ * Segment 3: Node 0 --> Node 1
+ *
+ * After this iteration, Node 2 has 3 of the four contributions to Segment 0.
+ * The last iteration has the following transfers:
+ *
+ * Segment 0: Node 2 --> Node 3
+ * Segment 1: Node 3 --> Node 0
+ * Segment 2: Node 0 --> Node 1
+ * Segment 3: Node 1 --> Node 2
+ *
+ * After this iteration, Node 3 has the fully accumulated Segment 0; Node 0
+ * has the fully accumulated Segment 1; and so on. The scatter-reduce is
+ * complete.
+ *
+ * Next, the allgather distributes these fully accumululated chunks across all
+ * nodes. Communication proceeds in the same ring, once again in N-1 steps. At
+ * the ith step, node j will send chunk (j - i + 1) and receive chunk (j - i).
+ * For example, at the first iteration, the following transfers will occur:
+ *
+ * Segment 0: Node 3 --> Node 0
+ * Segment 1: Node 0 --> Node 1
+ * Segment 2: Node 1 --> Node 2
+ * Segment 3: Node 2 --> Node 3
+ *
+ * After the first iteration, Node 0 will have a fully accumulated Segment 0
+ * (from Node 3) and Segment 1. In the next iteration, Node 0 will send its
+ * just-received Segment 0 onward to Node 1, and receive Segment 3 from Node 3.
+ * After this has continued for N - 1 iterations, all nodes will have a the
+ * fully accumulated tensor.
+ *
+ * Each node will do (N-1) sends for the scatter-reduce and (N-1) sends for the
+ * allgather. Each send will contain K / N bytes, if there are K bytes in the
+ * original tensor on every node. Thus, each node sends and receives 2K(N - 1)/N
+ * bytes of data, and the performance of the allreduce (assuming no latency in
+ * connections) is constrained by the slowest interconnect between the nodes.
+ *
+ */
+template <typename Device, typename T>
+Status RingAllreduce(OpKernelContext* context, const Tensor* input,
+ Tensor* temp, Tensor* output) {
+ // Acquire MPI size and rank
+ int n, r;
+ MPI_REQUIRES_OK(MPI_Comm_size(MPI_COMM_WORLD, &n));
+ MPI_REQUIRES_OK(MPI_Comm_rank(MPI_COMM_WORLD, &r));
+
+ T* buffer = (T*)output->tensor_data().data();
+
+ CopyTensorData<Device>((void*)buffer, (void*)input->tensor_data().data(),
+ output->tensor_data().size());
+
+ // Calculate segment sizes and segment ends
+ const size_t elements_to_reduce = input->NumElements();
+ const size_t segment_size = elements_to_reduce / n;
+ std::vector<size_t> segment_sizes(n, segment_size);
+
+ const size_t residual = elements_to_reduce % n;
+ for (size_t i = 0; i < residual; ++i) {
+ segment_sizes[i]++;
+ }
+
+ std::vector<size_t> segment_starts(n);
+ segment_starts[0] = 0;
+ for (size_t i = 1; i < segment_starts.size(); ++i) {
+ segment_starts[i] = segment_starts[i - 1] + segment_sizes[i - 1];
+ }
+
+ assert(segment_starts[n - 1] + segment_sizes[n - 1] == elements_to_reduce);
+
+ T* segment_recv = (T*)temp->tensor_data().data();
+
+ // Receive from your left neighbor with wrap-around
+ const size_t recv_from = ((r - 1) + n) % n;
+
+ // Send to your right neighbor with wrap-around
+ const size_t send_to = (r + 1) % n;
+
+ MPI_Status recv_status;
+ MPI_Request recv_req;
+
+ // Now start ring. At every step, for every rank, we iterate through
+ // segments with wraparound and send and recv from our neighbors and reduce
+ // locally. At the i'th iteration, rank r, sends segment (r-i) and receives
+ // segment (r-i-1).
+ for (int i = 0; i < n - 1; i++) {
+ const size_t send_seg_id = ((r - i) + n) % n;
+ const size_t recv_seg_id = ((r - i - 1) + n) % n;
+
+ T* segment_send = &(buffer[segment_starts[send_seg_id]]);
+
+ MPI_REQUIRES_OK(MPI_Irecv(segment_recv, segment_sizes[recv_seg_id],
+ MPIType<T>(), recv_from, TAG_TENSOR,
+ MPI_COMM_WORLD, &recv_req));
+
+ MPI_REQUIRES_OK(MPI_Send(segment_send, segment_sizes[send_seg_id],
+ MPIType<T>(), send_to, TAG_TENSOR,
+ MPI_COMM_WORLD));
+
+ T* segment_update = &(buffer[segment_starts[recv_seg_id]]);
+
+ // Wait for recv to complete before reduction
+ MPI_REQUIRES_OK(MPI_Wait(&recv_req, &recv_status));
+
+ const size_t recv_seg_size = segment_sizes[recv_seg_id];
+ AccumulateTensorData<Device, T>(segment_update, segment_recv,
+ recv_seg_size);
+ }
+
+ // Now start pipelined ring allgather. At every step, for every rank, we
+ // iterate through segments with wraparound and send and recv from our
+ // neighbors. At the i'th iteration, rank r, sends segment (r-i+1) and
+ // receives segment (r-i).
+ for (size_t i = 0; i < n - 1; ++i) {
+ const size_t send_seg_id = ((r - i + 1) + n) % n;
+ const size_t recv_seg_id = ((r - i) + n) % n;
+
+ // Segment to send - at every iteration we send segment (r-i+1)
+ T* segment_send = &(buffer[segment_starts[send_seg_id]]);
+
+ // Segment to recv - at every iteration we receive segment (r-i)
+ T* segment_recv = &(buffer[segment_starts[recv_seg_id]]);
+
+ MPI_REQUIRES_OK(MPI_Sendrecv(
+ segment_send, segment_sizes[send_seg_id], MPIType<T>(), send_to,
+ TAG_TENSOR, segment_recv, segment_sizes[recv_seg_id], MPIType<T>(),
+ recv_from, TAG_TENSOR, MPI_COMM_WORLD, &recv_status));
+ }
+
+ return Status::OK();
+}
+
+// Perform a ring allgather on a Tensor. Other ranks may allgather with a
+// tensor which differs in the first dimension only; all other dimensions must
+// be the same.
+//
+// For more information on the ring allgather, read the documentation for the
+// ring allreduce, which includes a ring allgather.
+template <typename Device, typename T>
+Status RingAllgather(OpKernelContext* context, const Tensor* input,
+ const std::vector<size_t>& sizes, Tensor* output) {
+ // Acquire MPI size and rank
+ int n, r;
+ MPI_REQUIRES_OK(MPI_Comm_size(MPI_COMM_WORLD, &n));
+ MPI_REQUIRES_OK(MPI_Comm_rank(MPI_COMM_WORLD, &r));
+
+ assert(sizes.size() == n);
+ assert(input->dim_size(0) == sizes[r]);
+
+ // Compute number of elements in every "row". We can't compute number of
+ // elements in every chunks, because those chunks are variable length.
+ size_t elements_per_row = 1;
+ for (int i = 1; i < input->shape().dims(); i++) {
+ elements_per_row *= input->dim_size(i);
+ }
+
+ // Copy data from input tensor to correct place in output tensor.
+ std::vector<size_t> segment_starts(n);
+ segment_starts[0] = 0;
+ for (int i = 1; i < n; i++) {
+ segment_starts[i] = segment_starts[i - 1] + elements_per_row * sizes[i - 1];
+ }
+ size_t offset = segment_starts[r];
+
+ // Copy data to the right offset for this rank.
+ T* buffer = (T*)output->tensor_data().data();
+ CopyTensorData<Device>((void*)(buffer + offset),
+ (void*)input->tensor_data().data(),
+ elements_per_row * sizes[r] * sizeof(T));
+
+ // Receive from your left neighbor with wrap-around
+ const size_t recv_from = ((r - 1) + n) % n;
+
+ // Send to your right neighbor with wrap-around
+ const size_t send_to = (r + 1) % n;
+
+ // Perform a ring allgather. At every step, for every rank, we iterate
+ // through segments with wraparound and send and recv from our neighbors.
+ // At the i'th iteration, rank r, sends segment (r-i) and receives segment
+ // (r-1-i).
+ MPI_Status recv_status;
+ for (size_t i = 0; i < n - 1; ++i) {
+ const size_t send_seg_id = ((r - i) + n) % n;
+ const size_t recv_seg_id = ((r - i - 1) + n) % n;
+
+ // Segment to send - at every iteration we send segment (r-i)
+ size_t offset_send = segment_starts[send_seg_id];
+ size_t rows_send = sizes[send_seg_id];
+ T* segment_send = &(buffer[offset_send]);
+
+ // Segment to recv - at every iteration we receive segment (r-1-i)
+ size_t offset_recv = segment_starts[recv_seg_id];
+ size_t rows_recv = sizes[recv_seg_id];
+ T* segment_recv = &(buffer[offset_recv]);
+
+ MPI_REQUIRES_OK(MPI_Sendrecv(
+ segment_send, elements_per_row * rows_send, MPIType<T>(), send_to,
+ TAG_TENSOR, segment_recv, elements_per_row * rows_recv, MPIType<T>(),
+ recv_from, TAG_TENSOR, MPI_COMM_WORLD, &recv_status));
+ }
+
+ return Status::OK();
+}
+
+} // namespace mpi
+} // namespace contrib
+} // namespace tensorflow
+
+#endif // TENSORFLOW_USE_MPI
+
+#undef TENSORFLOW_CONTRIB_MPI_H_
+#endif // TENSORFLOW_CONTRIB_MPI_H_
diff --git a/tensorflow/contrib/nccl/BUILD b/tensorflow/contrib/nccl/BUILD
index 7cfdf0f607..62996d1fd8 100644
--- a/tensorflow/contrib/nccl/BUILD
+++ b/tensorflow/contrib/nccl/BUILD
@@ -19,17 +19,18 @@ load("//tensorflow:tensorflow.bzl", "cuda_py_test")
load("@local_config_cuda//cuda:build_defs.bzl", "if_cuda")
load("//tensorflow:tensorflow.bzl", "tf_kernel_library")
load("//tensorflow:tensorflow.bzl", "tf_custom_op_py_library")
+load("//tensorflow:tensorflow.bzl", "if_not_windows_cuda")
tf_custom_op_library(
name = "python/ops/_nccl_ops.so",
srcs = [
"ops/nccl_ops.cc",
],
- gpu_srcs = [
+ gpu_srcs = if_not_windows_cuda([
"kernels/nccl_manager.cc",
"kernels/nccl_manager.h",
"kernels/nccl_ops.cc",
- ],
+ ]),
deps = if_cuda([
"@local_config_nccl//:nccl",
"//tensorflow/core:gpu_headers_lib",
diff --git a/tensorflow/contrib/nccl/python/ops/nccl_ops.py b/tensorflow/contrib/nccl/python/ops/nccl_ops.py
index 029b01412d..fa597cf3ef 100644
--- a/tensorflow/contrib/nccl/python/ops/nccl_ops.py
+++ b/tensorflow/contrib/nccl/python/ops/nccl_ops.py
@@ -63,12 +63,12 @@ def _all_sum_grad(op, grad):
Raises:
LookupError: If `reduction` is not `sum`.
"""
- if op.get_attr('reduction') != 'sum':
+ if op.get_attr('reduction') != b'sum':
raise LookupError('No gradient defined for NcclAllReduce except sum.')
_check_device(grad, expected=op.device)
num_devices = op.get_attr('num_devices')
- shared_name = op.get_attr('shared_name') + '_grad'
+ shared_name = op.get_attr('shared_name') + b'_grad'
with ops.device(op.device):
return gen_nccl_ops.nccl_all_reduce(
@@ -162,7 +162,7 @@ def _reduce_sum_grad(op, grad):
Raises:
LookupError: If the reduction attribute of op is not `sum`.
"""
- if op.get_attr('reduction') != 'sum':
+ if op.get_attr('reduction') != b'sum':
raise LookupError('No gradient defined for NcclReduce except sum.')
_check_device(grad, expected=op.device)
diff --git a/tensorflow/contrib/opt/__init__.py b/tensorflow/contrib/opt/__init__.py
index 157ed6a278..3e63e99030 100644
--- a/tensorflow/contrib/opt/__init__.py
+++ b/tensorflow/contrib/opt/__init__.py
@@ -22,17 +22,18 @@ from __future__ import print_function
from tensorflow.contrib.opt.python.training.adamax import *
from tensorflow.contrib.opt.python.training.addsign import *
from tensorflow.contrib.opt.python.training.drop_stale_gradient_optimizer import *
+from tensorflow.contrib.opt.python.training.elastic_average_optimizer import *
from tensorflow.contrib.opt.python.training.external_optimizer import *
+from tensorflow.contrib.opt.python.training.ggt import *
from tensorflow.contrib.opt.python.training.lazy_adam_optimizer import *
+from tensorflow.contrib.opt.python.training.model_average_optimizer import *
from tensorflow.contrib.opt.python.training.moving_average_optimizer import *
from tensorflow.contrib.opt.python.training.multitask_optimizer_wrapper import *
from tensorflow.contrib.opt.python.training.nadam_optimizer import *
from tensorflow.contrib.opt.python.training.weight_decay_optimizers import *
from tensorflow.contrib.opt.python.training.powersign import *
from tensorflow.contrib.opt.python.training.variable_clipping_optimizer import *
-from tensorflow.contrib.opt.python.training.elastic_average_optimizer import *
-from tensorflow.contrib.opt.python.training.model_average_optimizer import *
-from tensorflow.contrib.opt.python.training.ggt import *
+from tensorflow.contrib.opt.python.training.weight_decay_optimizers import *
# pylint: enable=wildcard-import
from tensorflow.python.util.all_util import remove_undocumented
diff --git a/tensorflow/contrib/opt/python/training/addsign_test.py b/tensorflow/contrib/opt/python/training/addsign_test.py
index 08d45ed73f..628a735e72 100644
--- a/tensorflow/contrib/opt/python/training/addsign_test.py
+++ b/tensorflow/contrib/opt/python/training/addsign_test.py
@@ -214,7 +214,7 @@ class AddSignTest(test.TestCase):
# Run 7 steps of AddSign
# first 4 steps with positive gradient
# last 3 steps with negative gradient (sign(gm) should be -1)
- for t in range(1, 4):
+ for t in range(1, 8):
if t < 5:
update.run()
else:
@@ -222,7 +222,7 @@ class AddSignTest(test.TestCase):
var0_np, m0 = addsign_update_numpy(
var0_np,
- grads0_np,
+ grads0_np if t < 5 else -grads0_np,
m0,
learning_rate,
alpha=alpha,
@@ -232,7 +232,7 @@ class AddSignTest(test.TestCase):
)
var1_np, m1 = addsign_update_numpy(
var1_np,
- grads1_np,
+ grads1_np if t < 5 else -grads1_np,
m1,
learning_rate,
alpha=alpha,
diff --git a/tensorflow/contrib/opt/python/training/ggt.py b/tensorflow/contrib/opt/python/training/ggt.py
index 928c453517..cae952d8f5 100644
--- a/tensorflow/contrib/opt/python/training/ggt.py
+++ b/tensorflow/contrib/opt/python/training/ggt.py
@@ -33,7 +33,7 @@ class GGTOptimizer(optimizer_v2.OptimizerV2):
GGT has an advantage over sgd and adam on large models with poor conditioning,
for example language models and CNNs,
- see [ABCHSZZ 2018]([pdf](https://arxiv.org/pdf/1806.02958.pdf)).
+ see [[ABCHSZZ 2018]](https://arxiv.org/pdf/1806.02958.pdf).
"""
def __init__(self,
diff --git a/tensorflow/contrib/opt/python/training/powersign_test.py b/tensorflow/contrib/opt/python/training/powersign_test.py
index 5214082dd6..0bcf5d230a 100644
--- a/tensorflow/contrib/opt/python/training/powersign_test.py
+++ b/tensorflow/contrib/opt/python/training/powersign_test.py
@@ -216,7 +216,7 @@ class PowerSignTest(test.TestCase):
self.assertAllClose([1.0, 2.0], var0.eval())
self.assertAllClose([3.0, 4.0], var1.eval())
- # Run 3 steps of powersign
+ # Run 7 steps of powersign
# first 4 steps with positive gradient
# last 3 steps with negative gradient (sign(gm) should be -1)
for t in range(1, 8):
diff --git a/tensorflow/contrib/opt/python/training/weight_decay_optimizers.py b/tensorflow/contrib/opt/python/training/weight_decay_optimizers.py
index 8aa40aeb45..b9cf40eb7b 100644
--- a/tensorflow/contrib/opt/python/training/weight_decay_optimizers.py
+++ b/tensorflow/contrib/opt/python/training/weight_decay_optimizers.py
@@ -19,13 +19,13 @@ from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
-from tensorflow.python.training import optimizer
from tensorflow.python.ops import control_flow_ops
+from tensorflow.python.ops import resource_variable_ops
+from tensorflow.python.ops import state_ops
from tensorflow.python.training import adam
from tensorflow.python.training import momentum as momentum_opt
+from tensorflow.python.training import optimizer
from tensorflow.python.util.tf_export import tf_export
-from tensorflow.python.ops import state_ops
-from tensorflow.python.ops import resource_variable_ops
class DecoupledWeightDecayExtension(object):
@@ -65,7 +65,7 @@ class DecoupledWeightDecayExtension(object):
Args:
weight_decay: A `Tensor` or a floating point value, the factor by which
a variable is decayed in the update step.
- decay_var_list: Optional list or tuple or set of `Variable` objects to
+ **kwargs: Optional list or tuple or set of `Variable` objects to
decay.
"""
self._decay_var_list = None # is set in minimize or apply_gradients
@@ -85,6 +85,28 @@ class DecoupledWeightDecayExtension(object):
If decay_var_list is None, all variables in var_list are decayed.
For more information see the documentation of Optimizer.minimize.
+
+ Args:
+ loss: A `Tensor` containing the value to minimize.
+ global_step: Optional `Variable` to increment by one after the
+ variables have been updated.
+ var_list: Optional list or tuple of `Variable` objects to update to
+ minimize `loss`. Defaults to the list of variables collected in
+ the graph under the key `GraphKeys.TRAINABLE_VARIABLES`.
+ gate_gradients: How to gate the computation of gradients. Can be
+ `GATE_NONE`, `GATE_OP`, or `GATE_GRAPH`.
+ aggregation_method: Specifies the method used to combine gradient terms.
+ Valid values are defined in the class `AggregationMethod`.
+ colocate_gradients_with_ops: If True, try colocating gradients with
+ the corresponding op.
+ name: Optional name for the returned operation.
+ grad_loss: Optional. A `Tensor` holding the gradient computed for `loss`.
+ decay_var_list: Optional list of decay variables.
+
+ Returns:
+ An Operation that updates the variables in `var_list`. If `global_step`
+ was not `None`, that operation also increments `global_step`.
+
"""
self._decay_var_list = set(decay_var_list) if decay_var_list else False
return super(DecoupledWeightDecayExtension, self).minimize(
@@ -103,6 +125,19 @@ class DecoupledWeightDecayExtension(object):
are decayed.
For more information see the documentation of Optimizer.apply_gradients.
+
+ Args:
+ grads_and_vars: List of (gradient, variable) pairs as returned by
+ `compute_gradients()`.
+ global_step: Optional `Variable` to increment by one after the
+ variables have been updated.
+ name: Optional name for the returned operation. Default to the
+ name passed to the `Optimizer` constructor.
+ decay_var_list: Optional list of decay variables.
+
+ Returns:
+ An `Operation` that applies the specified gradients. If `global_step`
+ was not None, that operation also increments `global_step`.
"""
self._decay_var_list = set(decay_var_list) if decay_var_list else False
return super(DecoupledWeightDecayExtension, self).apply_gradients(
@@ -197,6 +232,7 @@ def extend_with_decoupled_weight_decay(base_optimizer):
A new optimizer class that inherits from DecoupledWeightDecayExtension
and base_optimizer.
"""
+
class OptimizerWithDecoupledWeightDecay(DecoupledWeightDecayExtension,
base_optimizer):
"""Base_optimizer with decoupled weight decay.
diff --git a/tensorflow/contrib/opt/python/training/weight_decay_optimizers_test.py b/tensorflow/contrib/opt/python/training/weight_decay_optimizers_test.py
index 74d1cdbbda..76d8a5697a 100644
--- a/tensorflow/contrib/opt/python/training/weight_decay_optimizers_test.py
+++ b/tensorflow/contrib/opt/python/training/weight_decay_optimizers_test.py
@@ -20,6 +20,7 @@ from __future__ import print_function
import numpy as np
+from tensorflow.contrib.opt.python.training import weight_decay_optimizers
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
@@ -29,7 +30,6 @@ from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import adam
-from tensorflow.contrib.opt.python.training import weight_decay_optimizers
WEIGHT_DECAY = 0.01
@@ -91,7 +91,6 @@ class WeightDecayOptimizerTest(test.TestCase):
opt = optimizer()
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
-
if not context.executing_eagerly():
with ops.Graph().as_default():
# Shouldn't return non-slot variables from other graphs.
@@ -171,9 +170,9 @@ class ExtendWithWeightDecayTest(WeightDecayOptimizerTest):
@staticmethod
def get_optimizer():
- AdamW = weight_decay_optimizers.extend_with_decoupled_weight_decay(
+ adamw = weight_decay_optimizers.extend_with_decoupled_weight_decay(
adam.AdamOptimizer)
- return AdamW(WEIGHT_DECAY)
+ return adamw(WEIGHT_DECAY)
def testBasic(self):
self.doTest(self.get_optimizer, adamw_update_numpy, "Adam", "m",
@@ -185,6 +184,5 @@ class ExtendWithWeightDecayTest(WeightDecayOptimizerTest):
use_resource=True)
-
if __name__ == "__main__":
test.main()
diff --git a/tensorflow/contrib/optimizer_v2/optimizer_v2.py b/tensorflow/contrib/optimizer_v2/optimizer_v2.py
index c6f3bd6ee1..8c11d8bcfd 100644
--- a/tensorflow/contrib/optimizer_v2/optimizer_v2.py
+++ b/tensorflow/contrib/optimizer_v2/optimizer_v2.py
@@ -766,7 +766,8 @@ class OptimizerV2(optimizer_v1.Optimizer):
# *after* loss() is evaluated, so we know what loss reduction it uses.
if scale_loss_by_num_towers is None:
scale_loss_by_num_towers = (
- distribute_lib.get_loss_reduction() == "mean")
+ distribute_lib.get_loss_reduction() ==
+ variable_scope.VariableAggregation.MEAN)
if scale_loss_by_num_towers:
num_towers = distribute_lib.get_distribution_strategy().num_towers
if num_towers > 1:
@@ -784,7 +785,8 @@ class OptimizerV2(optimizer_v1.Optimizer):
# Scale loss for number of towers (non-callable-loss case).
if scale_loss_by_num_towers is None:
scale_loss_by_num_towers = (
- distribute_lib.get_loss_reduction() == "mean")
+ distribute_lib.get_loss_reduction() ==
+ variable_scope.VariableAggregation.MEAN)
if scale_loss_by_num_towers:
num_towers = distribute_lib.get_distribution_strategy().num_towers
if num_towers > 1:
@@ -896,7 +898,8 @@ class OptimizerV2(optimizer_v1.Optimizer):
def _distributed_apply(self, distribution, grads_and_vars, global_step, name):
"""`apply_gradients` for use with a `DistributionStrategy`."""
- reduced_grads = distribution.batch_reduce("sum", grads_and_vars)
+ reduced_grads = distribution.batch_reduce(
+ variable_scope.VariableAggregation.SUM, grads_and_vars)
var_list = [v for _, v in grads_and_vars]
grads_and_vars = zip(reduced_grads, var_list)
diff --git a/tensorflow/contrib/proto/BUILD b/tensorflow/contrib/proto/BUILD
index 3e9b1a0b8d..d45622174f 100644
--- a/tensorflow/contrib/proto/BUILD
+++ b/tensorflow/contrib/proto/BUILD
@@ -19,9 +19,7 @@ py_library(
py_library(
name = "proto_pip",
- data = [
- "//tensorflow/contrib/proto/python/kernel_tests:test_messages",
- ] + if_static(
+ data = if_static(
[],
otherwise = ["//tensorflow/contrib/proto/python/kernel_tests:libtestexample.so"],
),
diff --git a/tensorflow/contrib/proto/python/kernel_tests/BUILD b/tensorflow/contrib/proto/python/kernel_tests/BUILD
index a380a131f8..3c6fde23d2 100644
--- a/tensorflow/contrib/proto/python/kernel_tests/BUILD
+++ b/tensorflow/contrib/proto/python/kernel_tests/BUILD
@@ -4,45 +4,18 @@ licenses(["notice"]) # Apache 2.0
exports_files(["LICENSE"])
-# Much of the work in this BUILD file actually happens in the corresponding
-# build_defs.bzl, which creates an individual testcase for each example .pbtxt
-# file in this directory.
-#
-load(":build_defs.bzl", "decode_proto_test_suite")
-load(":build_defs.bzl", "encode_proto_test_suite")
-
-# This expands to a tf_py_test for each test file.
-# It defines the test_suite :decode_proto_op_tests.
-decode_proto_test_suite(
- name = "decode_proto_tests",
- examples = glob(["*.pbtxt"]),
-)
-
-# This expands to a tf_py_test for each test file.
-# It defines the test_suite :encode_proto_op_tests.
-encode_proto_test_suite(
- name = "encode_proto_tests",
- examples = glob(["*.pbtxt"]),
-)
-
-# Below here are tests that are not tied to an example text proto.
-filegroup(
- name = "test_messages",
- srcs = glob(["*.pbtxt"]),
-)
-
load("//tensorflow:tensorflow.bzl", "tf_py_test")
load("//tensorflow:tensorflow.bzl", "tf_cc_shared_object")
load("//tensorflow/core:platform/default/build_config_root.bzl", "if_static")
load("//tensorflow/core:platform/default/build_config.bzl", "tf_proto_library")
tf_py_test(
- name = "decode_proto_fail_test",
+ name = "decode_proto_op_test",
size = "small",
- srcs = ["decode_proto_fail_test.py"],
+ srcs = ["decode_proto_op_test.py"],
additional_deps = [
+ ":decode_proto_op_test_base",
":py_test_deps",
- "//third_party/py/numpy",
"//tensorflow/contrib/proto:proto",
"//tensorflow/contrib/proto/python/ops:decode_proto_op_py",
],
@@ -56,20 +29,63 @@ tf_py_test(
],
)
+tf_py_test(
+ name = "encode_proto_op_test",
+ size = "small",
+ srcs = ["encode_proto_op_test.py"],
+ additional_deps = [
+ ":encode_proto_op_test_base",
+ ":py_test_deps",
+ "//tensorflow/contrib/proto:proto",
+ "//tensorflow/contrib/proto/python/ops:decode_proto_op_py",
+ "//tensorflow/contrib/proto/python/ops:encode_proto_op_py",
+ ],
+ data = if_static(
+ [],
+ otherwise = [":libtestexample.so"],
+ ),
+ tags = [
+ "no_pip", # TODO(b/78026780)
+ "no_windows", # TODO(b/78028010)
+ ],
+)
+
+py_library(
+ name = "proto_op_test_base",
+ testonly = 1,
+ srcs = ["proto_op_test_base.py"],
+ deps = [
+ ":test_example_proto_py",
+ "//tensorflow/python:client_testlib",
+ ],
+)
+
py_library(
- name = "test_case",
- srcs = ["test_case.py"],
- deps = ["//tensorflow/python:client_testlib"],
+ name = "decode_proto_op_test_base",
+ testonly = 1,
+ srcs = ["decode_proto_op_test_base.py"],
+ deps = [
+ ":proto_op_test_base",
+ ":test_example_proto_py",
+ "//third_party/py/numpy",
+ "@absl_py//absl/testing:parameterized",
+ ],
)
py_library(
- name = "py_test_deps",
+ name = "encode_proto_op_test_base",
+ testonly = 1,
+ srcs = ["encode_proto_op_test_base.py"],
deps = [
- ":test_case",
+ ":proto_op_test_base",
":test_example_proto_py",
+ "//third_party/py/numpy",
+ "@absl_py//absl/testing:parameterized",
],
)
+py_library(name = "py_test_deps")
+
tf_proto_library(
name = "test_example_proto",
srcs = ["test_example.proto"],
diff --git a/tensorflow/contrib/proto/python/kernel_tests/build_defs.bzl b/tensorflow/contrib/proto/python/kernel_tests/build_defs.bzl
deleted file mode 100644
index f425601691..0000000000
--- a/tensorflow/contrib/proto/python/kernel_tests/build_defs.bzl
+++ /dev/null
@@ -1,89 +0,0 @@
-"""BUILD rules for generating file-driven proto test cases.
-
-The decode_proto_test_suite() and encode_proto_test_suite() rules take a list
-of text protos and generates a tf_py_test() for each one.
-"""
-
-load("//tensorflow:tensorflow.bzl", "tf_py_test")
-load("//tensorflow:tensorflow.bzl", "register_extension_info")
-load("//tensorflow/core:platform/default/build_config_root.bzl", "if_static")
-
-def _test_name(test, path):
- return "%s_%s_test" % (test, path.split("/")[-1].split(".")[0])
-
-def decode_proto_test_suite(name, examples):
- """Build the decode_proto py_test for each test filename."""
- for test_filename in examples:
- tf_py_test(
- name = _test_name("decode_proto", test_filename),
- srcs = ["decode_proto_op_test.py"],
- size = "small",
- data = [test_filename] + if_static(
- [],
- otherwise = [":libtestexample.so"],
- ),
- main = "decode_proto_op_test.py",
- args = [
- "--message_text_file=\"%s/%s\"" % (native.package_name(), test_filename),
- ],
- additional_deps = [
- ":py_test_deps",
- "//third_party/py/numpy",
- "//tensorflow/contrib/proto:proto",
- "//tensorflow/contrib/proto/python/ops:decode_proto_op_py",
- ],
- tags = [
- "no_pip", # TODO(b/78026780)
- "no_windows", # TODO(b/78028010)
- ],
- )
- native.test_suite(
- name = name,
- tests = [":" + _test_name("decode_proto", test_filename)
- for test_filename in examples],
- )
-
-def encode_proto_test_suite(name, examples):
- """Build the encode_proto py_test for each test filename."""
- for test_filename in examples:
- tf_py_test(
- name = _test_name("encode_proto", test_filename),
- srcs = ["encode_proto_op_test.py"],
- size = "small",
- data = [test_filename] + if_static(
- [],
- otherwise = [":libtestexample.so"],
- ),
- main = "encode_proto_op_test.py",
- args = [
- "--message_text_file=\"%s/%s\"" % (native.package_name(), test_filename),
- ],
- additional_deps = [
- ":py_test_deps",
- "//third_party/py/numpy",
- "//tensorflow/contrib/proto:proto",
- "//tensorflow/contrib/proto/python/ops:decode_proto_op_py",
- "//tensorflow/contrib/proto/python/ops:encode_proto_op_py",
- ],
- tags = [
- "no_pip", # TODO(b/78026780)
- "no_windows", # TODO(b/78028010)
- ],
- )
- native.test_suite(
- name = name,
- tests = [":" + _test_name("encode_proto", test_filename)
- for test_filename in examples],
- )
-
-register_extension_info(
- extension_name = "decode_proto_test_suite",
- label_regex_map = {
- "deps": "deps:decode_example_.*",
- })
-
-register_extension_info(
- extension_name = "encode_proto_test_suite",
- label_regex_map = {
- "deps": "deps:encode_example_.*",
- })
diff --git a/tensorflow/contrib/proto/python/kernel_tests/decode_proto_fail_test.py b/tensorflow/contrib/proto/python/kernel_tests/decode_proto_fail_test.py
deleted file mode 100644
index 5298342ee7..0000000000
--- a/tensorflow/contrib/proto/python/kernel_tests/decode_proto_fail_test.py
+++ /dev/null
@@ -1,68 +0,0 @@
-# =============================================================================
-# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# =============================================================================
-
-# Python3 preparedness imports.
-from __future__ import absolute_import
-from __future__ import division
-from __future__ import print_function
-
-import numpy as np
-
-from tensorflow.contrib.proto.python.kernel_tests import test_case
-from tensorflow.contrib.proto.python.ops import decode_proto_op
-from tensorflow.python.framework import dtypes
-from tensorflow.python.framework import errors
-from tensorflow.python.platform import test
-
-
-class DecodeProtoFailTest(test_case.ProtoOpTestCase):
- """Test failure cases for DecodeToProto."""
-
- def _TestCorruptProtobuf(self, sanitize):
- """Test failure cases for DecodeToProto."""
-
- # The goal here is to check the error reporting.
- # Testing against a variety of corrupt protobufs is
- # done by fuzzing.
- corrupt_proto = 'This is not a binary protobuf'
-
- # Numpy silently truncates the strings if you don't specify dtype=object.
- batch = np.array(corrupt_proto, dtype=object)
- msg_type = 'tensorflow.contrib.proto.TestCase'
- field_names = ['sizes']
- field_types = [dtypes.int32]
-
- with self.test_session() as sess:
- ctensor, vtensor = decode_proto_op.decode_proto(
- batch,
- message_type=msg_type,
- field_names=field_names,
- output_types=field_types,
- sanitize=sanitize)
- with self.assertRaisesRegexp(errors.DataLossError,
- 'Unable to parse binary protobuf'
- '|Failed to consume entire buffer'):
- _ = sess.run([ctensor] + vtensor)
-
- def testCorrupt(self):
- self._TestCorruptProtobuf(sanitize=False)
-
- def testSanitizerCorrupt(self):
- self._TestCorruptProtobuf(sanitize=True)
-
-
-if __name__ == '__main__':
- test.main()
diff --git a/tensorflow/contrib/proto/python/kernel_tests/decode_proto_op_test.py b/tensorflow/contrib/proto/python/kernel_tests/decode_proto_op_test.py
index d1c13c82bc..934035ec4c 100644
--- a/tensorflow/contrib/proto/python/kernel_tests/decode_proto_op_test.py
+++ b/tensorflow/contrib/proto/python/kernel_tests/decode_proto_op_test.py
@@ -13,287 +13,22 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
-"""Table-driven test for decode_proto op.
+"""Tests for decode_proto op."""
-This test is run once with each of the *.TestCase.pbtxt files
-in the test directory.
-"""
# Python3 preparedness imports.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
-import numpy as np
-
-from google.protobuf import text_format
-
-from tensorflow.contrib.proto.python.kernel_tests import test_case
-from tensorflow.contrib.proto.python.kernel_tests import test_example_pb2
+from tensorflow.contrib.proto.python.kernel_tests import decode_proto_op_test_base as test_base
from tensorflow.contrib.proto.python.ops import decode_proto_op
-from tensorflow.python.framework import dtypes
-from tensorflow.python.platform import flags
from tensorflow.python.platform import test
-FLAGS = flags.FLAGS
-
-flags.DEFINE_string('message_text_file', None,
- 'A file containing a text serialized TestCase protobuf.')
-
-
-class DecodeProtoOpTest(test_case.ProtoOpTestCase):
-
- def _compareValues(self, fd, vs, evs):
- """Compare lists/arrays of field values."""
-
- if len(vs) != len(evs):
- self.fail('Field %s decoded %d outputs, expected %d' %
- (fd.name, len(vs), len(evs)))
- for i, ev in enumerate(evs):
- # Special case fuzzy match for float32. TensorFlow seems to mess with
- # MAX_FLT slightly and the test doesn't work otherwise.
- # TODO(nix): ask on TF list about why MAX_FLT doesn't pass through.
- if fd.cpp_type == fd.CPPTYPE_FLOAT:
- # Numpy isclose() is better than assertIsClose() which uses an absolute
- # value comparison.
- self.assertTrue(
- np.isclose(vs[i], ev), 'expected %r, actual %r' % (ev, vs[i]))
- elif fd.cpp_type == fd.CPPTYPE_STRING:
- # In Python3 string tensor values will be represented as bytes, so we
- # reencode the proto values to match that.
- self.assertEqual(vs[i], ev.encode('ascii'))
- else:
- # Doubles and other types pass through unscathed.
- self.assertEqual(vs[i], ev)
-
- def _compareRepeatedPrimitiveValue(self, batch_shape, sizes, fields,
- field_dict):
- """Compare protos of type RepeatedPrimitiveValue.
-
- Args:
- batch_shape: the shape of the input tensor of serialized messages.
- sizes: int matrix of repeat counts returned by decode_proto
- fields: list of test_example_pb2.FieldSpec (types and expected values)
- field_dict: map from field names to decoded numpy tensors of values
- """
-
- # Check that expected values match.
- for field in fields:
- values = field_dict[field.name]
- self.assertEqual(dtypes.as_dtype(values.dtype), field.dtype)
-
- fd = field.expected.DESCRIPTOR.fields_by_name[field.name]
-
- # Values has the same shape as the input plus an extra
- # dimension for repeats.
- self.assertEqual(list(values.shape)[:-1], batch_shape)
-
- # Nested messages are represented as TF strings, requiring
- # some special handling.
- if field.name == 'message_value':
- vs = []
- for buf in values.flat:
- msg = test_example_pb2.PrimitiveValue()
- msg.ParseFromString(buf)
- vs.append(msg)
- evs = getattr(field.expected, field.name)
- if len(vs) != len(evs):
- self.fail('Field %s decoded %d outputs, expected %d' %
- (fd.name, len(vs), len(evs)))
- for v, ev in zip(vs, evs):
- self.assertEqual(v, ev)
- continue
-
- # This can be a little confusing. For testing we are using
- # RepeatedPrimitiveValue in two ways: it's the proto that we
- # decode for testing, and it's used in the expected value as a
- # union type. The two cases are slightly different: this is the
- # second case.
- # We may be fetching the uint64_value from the test proto, but
- # in the expected proto we store it in the int64_value field
- # because TensorFlow doesn't support unsigned int64.
- tf_type_to_primitive_value_field = {
- dtypes.float32:
- 'float_value',
- dtypes.float64:
- 'double_value',
- dtypes.int32:
- 'int32_value',
- dtypes.uint8:
- 'uint8_value',
- dtypes.int8:
- 'int8_value',
- dtypes.string:
- 'string_value',
- dtypes.int64:
- 'int64_value',
- dtypes.bool:
- 'bool_value',
- # Unhandled TensorFlow types:
- # DT_INT16 DT_COMPLEX64 DT_QINT8 DT_QUINT8 DT_QINT32
- # DT_BFLOAT16 DT_QINT16 DT_QUINT16 DT_UINT16
- }
- tf_field_name = tf_type_to_primitive_value_field.get(field.dtype)
- if tf_field_name is None:
- self.fail('Unhandled tensorflow type %d' % field.dtype)
-
- self._compareValues(fd, values.flat,
- getattr(field.expected, tf_field_name))
-
- def _runDecodeProtoTests(self, fields, case_sizes, batch_shape, batch,
- message_type, message_format, sanitize,
- force_disordered=False):
- """Run decode tests on a batch of messages.
-
- Args:
- fields: list of test_example_pb2.FieldSpec (types and expected values)
- case_sizes: expected sizes array
- batch_shape: the shape of the input tensor of serialized messages
- batch: list of serialized messages
- message_type: descriptor name for messages
- message_format: format of messages, 'text' or 'binary'
- sanitize: whether to sanitize binary protobuf inputs
- force_disordered: whether to force fields encoded out of order.
- """
-
- if force_disordered:
- # Exercise code path that handles out-of-order fields by prepending extra
- # fields with tag numbers higher than any real field. Note that this won't
- # work with sanitization because that forces reserialization using a
- # trusted decoder and encoder.
- assert not sanitize
- extra_fields = test_example_pb2.ExtraFields()
- extra_fields.string_value = 'IGNORE ME'
- extra_fields.bool_value = False
- extra_msg = extra_fields.SerializeToString()
- batch = [extra_msg + msg for msg in batch]
-
- # Numpy silently truncates the strings if you don't specify dtype=object.
- batch = np.array(batch, dtype=object)
- batch = np.reshape(batch, batch_shape)
-
- field_names = [f.name for f in fields]
- output_types = [f.dtype for f in fields]
-
- with self.test_session() as sess:
- sizes, vtensor = decode_proto_op.decode_proto(
- batch,
- message_type=message_type,
- field_names=field_names,
- output_types=output_types,
- message_format=message_format,
- sanitize=sanitize)
-
- vlist = sess.run([sizes] + vtensor)
- sizes = vlist[0]
- # Values is a list of tensors, one for each field.
- value_tensors = vlist[1:]
-
- # Check that the repeat sizes are correct.
- self.assertTrue(
- np.all(np.array(sizes.shape) == batch_shape + [len(field_names)]))
-
- # Check that the decoded sizes match the expected sizes.
- self.assertEqual(len(sizes.flat), len(case_sizes))
- self.assertTrue(
- np.all(sizes.flat == np.array(
- case_sizes, dtype=np.int32)))
-
- field_dict = dict(zip(field_names, value_tensors))
-
- self._compareRepeatedPrimitiveValue(batch_shape, sizes, fields,
- field_dict)
-
- def testBinary(self):
- with open(FLAGS.message_text_file, 'r') as fp:
- case = text_format.Parse(fp.read(), test_example_pb2.TestCase())
-
- batch = [primitive.SerializeToString() for primitive in case.primitive]
- self._runDecodeProtoTests(
- case.field,
- case.sizes,
- list(case.shape),
- batch,
- 'tensorflow.contrib.proto.RepeatedPrimitiveValue',
- 'binary',
- sanitize=False)
-
- def testBinaryDisordered(self):
- with open(FLAGS.message_text_file, 'r') as fp:
- case = text_format.Parse(fp.read(), test_example_pb2.TestCase())
-
- batch = [primitive.SerializeToString() for primitive in case.primitive]
- self._runDecodeProtoTests(
- case.field,
- case.sizes,
- list(case.shape),
- batch,
- 'tensorflow.contrib.proto.RepeatedPrimitiveValue',
- 'binary',
- sanitize=False,
- force_disordered=True)
-
- def testPacked(self):
- with open(FLAGS.message_text_file, 'r') as fp:
- case = text_format.Parse(fp.read(), test_example_pb2.TestCase())
-
- # Now try with the packed serialization.
- # We test the packed representations by loading the same test cases
- # using PackedPrimitiveValue instead of RepeatedPrimitiveValue.
- # To do this we rely on the text format being the same for packed and
- # unpacked fields, and reparse the test message using the packed version
- # of the proto.
- packed_batch = [
- # Note: float_format='.17g' is necessary to ensure preservation of
- # doubles and floats in text format.
- text_format.Parse(
- text_format.MessageToString(
- primitive, float_format='.17g'),
- test_example_pb2.PackedPrimitiveValue()).SerializeToString()
- for primitive in case.primitive
- ]
-
- self._runDecodeProtoTests(
- case.field,
- case.sizes,
- list(case.shape),
- packed_batch,
- 'tensorflow.contrib.proto.PackedPrimitiveValue',
- 'binary',
- sanitize=False)
-
- def testText(self):
- with open(FLAGS.message_text_file, 'r') as fp:
- case = text_format.Parse(fp.read(), test_example_pb2.TestCase())
-
- # Note: float_format='.17g' is necessary to ensure preservation of
- # doubles and floats in text format.
- text_batch = [
- text_format.MessageToString(
- primitive, float_format='.17g') for primitive in case.primitive
- ]
-
- self._runDecodeProtoTests(
- case.field,
- case.sizes,
- list(case.shape),
- text_batch,
- 'tensorflow.contrib.proto.RepeatedPrimitiveValue',
- 'text',
- sanitize=False)
- def testSanitizerGood(self):
- with open(FLAGS.message_text_file, 'r') as fp:
- case = text_format.Parse(fp.read(), test_example_pb2.TestCase())
+class DecodeProtoOpTest(test_base.DecodeProtoOpTestBase):
- batch = [primitive.SerializeToString() for primitive in case.primitive]
- self._runDecodeProtoTests(
- case.field,
- case.sizes,
- list(case.shape),
- batch,
- 'tensorflow.contrib.proto.RepeatedPrimitiveValue',
- 'binary',
- sanitize=True)
+ def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
+ super(DecodeProtoOpTest, self).__init__(decode_proto_op, methodName)
if __name__ == '__main__':
diff --git a/tensorflow/contrib/proto/python/kernel_tests/decode_proto_op_test_base.py b/tensorflow/contrib/proto/python/kernel_tests/decode_proto_op_test_base.py
new file mode 100644
index 0000000000..5f7f510352
--- /dev/null
+++ b/tensorflow/contrib/proto/python/kernel_tests/decode_proto_op_test_base.py
@@ -0,0 +1,310 @@
+# =============================================================================
+# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# =============================================================================
+"""Tests for decode_proto op."""
+
+# Python3 preparedness imports.
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+from absl.testing import parameterized
+import numpy as np
+
+
+from google.protobuf import text_format
+
+from tensorflow.contrib.proto.python.kernel_tests import proto_op_test_base as test_base
+from tensorflow.contrib.proto.python.kernel_tests import test_example_pb2
+from tensorflow.python.framework import dtypes
+from tensorflow.python.framework import errors
+
+
+class DecodeProtoOpTestBase(test_base.ProtoOpTestBase, parameterized.TestCase):
+ """Base class for testing proto decoding ops."""
+
+ def __init__(self, decode_module, methodName='runTest'): # pylint: disable=invalid-name
+ """DecodeProtoOpTestBase initializer.
+
+ Args:
+ decode_module: a module containing the `decode_proto_op` method
+ methodName: the name of the test method (same as for test.TestCase)
+ """
+
+ super(DecodeProtoOpTestBase, self).__init__(methodName)
+ self._decode_module = decode_module
+
+ def _compareValues(self, fd, vs, evs):
+ """Compare lists/arrays of field values."""
+
+ if len(vs) != len(evs):
+ self.fail('Field %s decoded %d outputs, expected %d' %
+ (fd.name, len(vs), len(evs)))
+ for i, ev in enumerate(evs):
+ # Special case fuzzy match for float32. TensorFlow seems to mess with
+ # MAX_FLT slightly and the test doesn't work otherwise.
+ # TODO(nix): ask on TF list about why MAX_FLT doesn't pass through.
+ if fd.cpp_type == fd.CPPTYPE_FLOAT:
+ # Numpy isclose() is better than assertIsClose() which uses an absolute
+ # value comparison.
+ self.assertTrue(
+ np.isclose(vs[i], ev), 'expected %r, actual %r' % (ev, vs[i]))
+ elif fd.cpp_type == fd.CPPTYPE_STRING:
+ # In Python3 string tensor values will be represented as bytes, so we
+ # reencode the proto values to match that.
+ self.assertEqual(vs[i], ev.encode('ascii'))
+ else:
+ # Doubles and other types pass through unscathed.
+ self.assertEqual(vs[i], ev)
+
+ def _compareProtos(self, batch_shape, sizes, fields, field_dict):
+ """Compare protos of type TestValue.
+
+ Args:
+ batch_shape: the shape of the input tensor of serialized messages.
+ sizes: int matrix of repeat counts returned by decode_proto
+ fields: list of test_example_pb2.FieldSpec (types and expected values)
+ field_dict: map from field names to decoded numpy tensors of values
+ """
+
+ # Check that expected values match.
+ for field in fields:
+ values = field_dict[field.name]
+ self.assertEqual(dtypes.as_dtype(values.dtype), field.dtype)
+
+ fd = field.value.DESCRIPTOR.fields_by_name[field.name]
+
+ # Values has the same shape as the input plus an extra
+ # dimension for repeats.
+ self.assertEqual(list(values.shape)[:-1], batch_shape)
+
+ # Nested messages are represented as TF strings, requiring
+ # some special handling.
+ if field.name == 'message_value':
+ vs = []
+ for buf in values.flat:
+ msg = test_example_pb2.PrimitiveValue()
+ msg.ParseFromString(buf)
+ vs.append(msg)
+ evs = getattr(field.value, field.name)
+ if len(vs) != len(evs):
+ self.fail('Field %s decoded %d outputs, expected %d' %
+ (fd.name, len(vs), len(evs)))
+ for v, ev in zip(vs, evs):
+ self.assertEqual(v, ev)
+ continue
+
+ # This can be a little confusing. For testing we are using TestValue in
+ # two ways: it's the proto that we decode for testing, and it's used in
+ # the expected value as a union type.
+ #
+ # The two cases are slightly different: this is the second case. We may be
+ # fetching the uint64_value from the test proto, but in the expected proto
+ # we store it in the int64_value field because TensorFlow doesn't support
+ # unsigned int64.
+ tf_type_to_primitive_value_field = {
+ dtypes.float32:
+ 'float_value',
+ dtypes.float64:
+ 'double_value',
+ dtypes.int32:
+ 'int32_value',
+ dtypes.uint8:
+ 'uint8_value',
+ dtypes.int8:
+ 'int8_value',
+ dtypes.string:
+ 'string_value',
+ dtypes.int64:
+ 'int64_value',
+ dtypes.bool:
+ 'bool_value',
+ # Unhandled TensorFlow types:
+ # DT_INT16 DT_COMPLEX64 DT_QINT8 DT_QUINT8 DT_QINT32
+ # DT_BFLOAT16 DT_QINT16 DT_QUINT16 DT_UINT16
+ }
+ tf_field_name = tf_type_to_primitive_value_field.get(field.dtype)
+ if tf_field_name is None:
+ self.fail('Unhandled tensorflow type %d' % field.dtype)
+
+ self._compareValues(fd, values.flat,
+ getattr(field.value, tf_field_name))
+
+ def _runDecodeProtoTests(self, fields, case_sizes, batch_shape, batch,
+ message_type, message_format, sanitize,
+ force_disordered=False):
+ """Run decode tests on a batch of messages.
+
+ Args:
+ fields: list of test_example_pb2.FieldSpec (types and expected values)
+ case_sizes: expected sizes array
+ batch_shape: the shape of the input tensor of serialized messages
+ batch: list of serialized messages
+ message_type: descriptor name for messages
+ message_format: format of messages, 'text' or 'binary'
+ sanitize: whether to sanitize binary protobuf inputs
+ force_disordered: whether to force fields encoded out of order.
+ """
+
+ if force_disordered:
+ # Exercise code path that handles out-of-order fields by prepending extra
+ # fields with tag numbers higher than any real field. Note that this won't
+ # work with sanitization because that forces reserialization using a
+ # trusted decoder and encoder.
+ assert not sanitize
+ extra_fields = test_example_pb2.ExtraFields()
+ extra_fields.string_value = 'IGNORE ME'
+ extra_fields.bool_value = False
+ extra_msg = extra_fields.SerializeToString()
+ batch = [extra_msg + msg for msg in batch]
+
+ # Numpy silently truncates the strings if you don't specify dtype=object.
+ batch = np.array(batch, dtype=object)
+ batch = np.reshape(batch, batch_shape)
+
+ field_names = [f.name for f in fields]
+ output_types = [f.dtype for f in fields]
+
+ with self.test_session() as sess:
+ sizes, vtensor = self._decode_module.decode_proto(
+ batch,
+ message_type=message_type,
+ field_names=field_names,
+ output_types=output_types,
+ message_format=message_format,
+ sanitize=sanitize)
+
+ vlist = sess.run([sizes] + vtensor)
+ sizes = vlist[0]
+ # Values is a list of tensors, one for each field.
+ value_tensors = vlist[1:]
+
+ # Check that the repeat sizes are correct.
+ self.assertTrue(
+ np.all(np.array(sizes.shape) == batch_shape + [len(field_names)]))
+
+ # Check that the decoded sizes match the expected sizes.
+ self.assertEqual(len(sizes.flat), len(case_sizes))
+ self.assertTrue(
+ np.all(sizes.flat == np.array(
+ case_sizes, dtype=np.int32)))
+
+ field_dict = dict(zip(field_names, value_tensors))
+
+ self._compareProtos(batch_shape, sizes, fields, field_dict)
+
+ @parameterized.named_parameters(*test_base.ProtoOpTestBase.named_parameters())
+ def testBinary(self, case):
+ batch = [value.SerializeToString() for value in case.values]
+ self._runDecodeProtoTests(
+ case.fields,
+ case.sizes,
+ list(case.shapes),
+ batch,
+ 'tensorflow.contrib.proto.TestValue',
+ 'binary',
+ sanitize=False)
+
+ @parameterized.named_parameters(*test_base.ProtoOpTestBase.named_parameters())
+ def testBinaryDisordered(self, case):
+ batch = [value.SerializeToString() for value in case.values]
+ self._runDecodeProtoTests(
+ case.fields,
+ case.sizes,
+ list(case.shapes),
+ batch,
+ 'tensorflow.contrib.proto.TestValue',
+ 'binary',
+ sanitize=False,
+ force_disordered=True)
+
+ @parameterized.named_parameters(*test_base.ProtoOpTestBase.named_parameters())
+ def testPacked(self, case):
+ # Now try with the packed serialization.
+ #
+ # We test the packed representations by loading the same test case using
+ # PackedTestValue instead of TestValue. To do this we rely on the text
+ # format being the same for packed and unpacked fields, and reparse the
+ # test message using the packed version of the proto.
+ packed_batch = [
+ # Note: float_format='.17g' is necessary to ensure preservation of
+ # doubles and floats in text format.
+ text_format.Parse(
+ text_format.MessageToString(
+ value, float_format='.17g'),
+ test_example_pb2.PackedTestValue()).SerializeToString()
+ for value in case.values
+ ]
+
+ self._runDecodeProtoTests(
+ case.fields,
+ case.sizes,
+ list(case.shapes),
+ packed_batch,
+ 'tensorflow.contrib.proto.PackedTestValue',
+ 'binary',
+ sanitize=False)
+
+ @parameterized.named_parameters(*test_base.ProtoOpTestBase.named_parameters())
+ def testText(self, case):
+ # Note: float_format='.17g' is necessary to ensure preservation of
+ # doubles and floats in text format.
+ text_batch = [
+ text_format.MessageToString(
+ value, float_format='.17g') for value in case.values
+ ]
+
+ self._runDecodeProtoTests(
+ case.fields,
+ case.sizes,
+ list(case.shapes),
+ text_batch,
+ 'tensorflow.contrib.proto.TestValue',
+ 'text',
+ sanitize=False)
+
+ @parameterized.named_parameters(*test_base.ProtoOpTestBase.named_parameters())
+ def testSanitizerGood(self, case):
+ batch = [value.SerializeToString() for value in case.values]
+ self._runDecodeProtoTests(
+ case.fields,
+ case.sizes,
+ list(case.shapes),
+ batch,
+ 'tensorflow.contrib.proto.TestValue',
+ 'binary',
+ sanitize=True)
+
+ @parameterized.parameters((False), (True))
+ def testCorruptProtobuf(self, sanitize):
+ corrupt_proto = 'This is not a binary protobuf'
+
+ # Numpy silently truncates the strings if you don't specify dtype=object.
+ batch = np.array(corrupt_proto, dtype=object)
+ msg_type = 'tensorflow.contrib.proto.TestCase'
+ field_names = ['sizes']
+ field_types = [dtypes.int32]
+
+ with self.test_session() as sess:
+ ctensor, vtensor = self._decode_module.decode_proto(
+ batch,
+ message_type=msg_type,
+ field_names=field_names,
+ output_types=field_types,
+ sanitize=sanitize)
+ with self.assertRaisesRegexp(errors.DataLossError,
+ 'Unable to parse binary protobuf'
+ '|Failed to consume entire buffer'):
+ _ = sess.run([ctensor] + vtensor)
diff --git a/tensorflow/contrib/proto/python/kernel_tests/defaut_values.TestCase.pbtxt b/tensorflow/contrib/proto/python/kernel_tests/defaut_values.TestCase.pbtxt
deleted file mode 100644
index 4e31681907..0000000000
--- a/tensorflow/contrib/proto/python/kernel_tests/defaut_values.TestCase.pbtxt
+++ /dev/null
@@ -1,94 +0,0 @@
-primitive {
- # No fields specified, so we get all defaults
-}
-shape: 1
-sizes: 0
-field {
- name: "double_default"
- dtype: DT_DOUBLE
- expected { double_value: 1.0 }
-}
-sizes: 0
-field {
- name: "float_default"
- dtype: DT_DOUBLE # Try casting the float field to double.
- expected { double_value: 2.0 }
-}
-sizes: 0
-field {
- name: "int64_default"
- dtype: DT_INT64
- expected { int64_value: 3 }
-}
-sizes: 0
-field {
- name: "uint64_default"
- dtype: DT_INT64
- expected { int64_value: 4 }
-}
-sizes: 0
-field {
- name: "int32_default"
- dtype: DT_INT32
- expected { int32_value: 5 }
-}
-sizes: 0
-field {
- name: "fixed64_default"
- dtype: DT_INT64
- expected { int64_value: 6 }
-}
-sizes: 0
-field {
- name: "fixed32_default"
- dtype: DT_INT32
- expected { int32_value: 7 }
-}
-sizes: 0
-field {
- name: "bool_default"
- dtype: DT_BOOL
- expected { bool_value: true }
-}
-sizes: 0
-field {
- name: "string_default"
- dtype: DT_STRING
- expected { string_value: "a" }
-}
-sizes: 0
-field {
- name: "bytes_default"
- dtype: DT_STRING
- expected { string_value: "a longer default string" }
-}
-sizes: 0
-field {
- name: "uint32_default"
- dtype: DT_INT32
- expected { int32_value: -1 }
-}
-sizes: 0
-field {
- name: "sfixed32_default"
- dtype: DT_INT32
- expected { int32_value: 10 }
-}
-sizes: 0
-field {
- name: "sfixed64_default"
- dtype: DT_INT64
- expected { int64_value: 11 }
-}
-sizes: 0
-field {
- name: "sint32_default"
- dtype: DT_INT32
- expected { int32_value: 12 }
-}
-sizes: 0
-field {
- name: "sint64_default"
- dtype: DT_INT64
- expected { int64_value: 13 }
-}
diff --git a/tensorflow/contrib/proto/python/kernel_tests/encode_proto_op_test.py b/tensorflow/contrib/proto/python/kernel_tests/encode_proto_op_test.py
index 30e58e6336..fc5cd25d43 100644
--- a/tensorflow/contrib/proto/python/kernel_tests/encode_proto_op_test.py
+++ b/tensorflow/contrib/proto/python/kernel_tests/encode_proto_op_test.py
@@ -13,167 +13,24 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
-"""Table-driven test for encode_proto op.
+"""Tests for encode_proto op."""
-This test is run once with each of the *.TestCase.pbtxt files
-in the test directory.
-
-It tests that encode_proto is a lossless inverse of decode_proto
-(for the specified fields).
-"""
# Python3 readiness boilerplate
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
-import numpy as np
-
-from google.protobuf import text_format
-
-from tensorflow.contrib.proto.python.kernel_tests import test_case
-from tensorflow.contrib.proto.python.kernel_tests import test_example_pb2
+from tensorflow.contrib.proto.python.kernel_tests import encode_proto_op_test_base as test_base
from tensorflow.contrib.proto.python.ops import decode_proto_op
from tensorflow.contrib.proto.python.ops import encode_proto_op
-from tensorflow.python.framework import dtypes
-from tensorflow.python.ops import array_ops
-from tensorflow.python.platform import flags
from tensorflow.python.platform import test
-FLAGS = flags.FLAGS
-
-flags.DEFINE_string('message_text_file', None,
- 'A file containing a text serialized TestCase protobuf.')
-
-
-class EncodeProtoOpTest(test_case.ProtoOpTestCase):
-
- def testBadInputs(self):
- # Invalid field name
- with self.test_session():
- with self.assertRaisesOpError('Unknown field: non_existent_field'):
- encode_proto_op.encode_proto(
- sizes=[[1]],
- values=[np.array([[0.0]], dtype=np.int32)],
- message_type='tensorflow.contrib.proto.RepeatedPrimitiveValue',
- field_names=['non_existent_field']).eval()
-
- # Incorrect types.
- with self.test_session():
- with self.assertRaisesOpError(
- 'Incompatible type for field double_value.'):
- encode_proto_op.encode_proto(
- sizes=[[1]],
- values=[np.array([[0.0]], dtype=np.int32)],
- message_type='tensorflow.contrib.proto.RepeatedPrimitiveValue',
- field_names=['double_value']).eval()
-
- # Incorrect shapes of sizes.
- with self.test_session():
- with self.assertRaisesOpError(
- r'sizes should be batch_size \+ \[len\(field_names\)\]'):
- sizes = array_ops.placeholder(dtypes.int32)
- values = array_ops.placeholder(dtypes.float64)
- encode_proto_op.encode_proto(
- sizes=sizes,
- values=[values],
- message_type='tensorflow.contrib.proto.RepeatedPrimitiveValue',
- field_names=['double_value']).eval(feed_dict={
- sizes: [[[0, 0]]],
- values: [[0.0]]
- })
-
- # Inconsistent shapes of values.
- with self.test_session():
- with self.assertRaisesOpError(
- 'Values must match up to the last dimension'):
- sizes = array_ops.placeholder(dtypes.int32)
- values1 = array_ops.placeholder(dtypes.float64)
- values2 = array_ops.placeholder(dtypes.int32)
- (encode_proto_op.encode_proto(
- sizes=[[1, 1]],
- values=[values1, values2],
- message_type='tensorflow.contrib.proto.RepeatedPrimitiveValue',
- field_names=['double_value', 'int32_value']).eval(feed_dict={
- values1: [[0.0]],
- values2: [[0], [0]]
- }))
-
- def _testRoundtrip(self, in_bufs, message_type, fields):
-
- field_names = [f.name for f in fields]
- out_types = [f.dtype for f in fields]
-
- with self.test_session() as sess:
- sizes, field_tensors = decode_proto_op.decode_proto(
- in_bufs,
- message_type=message_type,
- field_names=field_names,
- output_types=out_types)
-
- out_tensors = encode_proto_op.encode_proto(
- sizes,
- field_tensors,
- message_type=message_type,
- field_names=field_names)
-
- out_bufs, = sess.run([out_tensors])
-
- # Check that the re-encoded tensor has the same shape.
- self.assertEqual(in_bufs.shape, out_bufs.shape)
-
- # Compare the input and output.
- for in_buf, out_buf in zip(in_bufs.flat, out_bufs.flat):
- in_obj = test_example_pb2.RepeatedPrimitiveValue()
- in_obj.ParseFromString(in_buf)
-
- out_obj = test_example_pb2.RepeatedPrimitiveValue()
- out_obj.ParseFromString(out_buf)
-
- # Check that the deserialized objects are identical.
- self.assertEqual(in_obj, out_obj)
-
- # Check that the input and output serialized messages are identical.
- # If we fail here, there is a difference in the serialized
- # representation but the new serialization still parses. This could
- # be harmless (a change in map ordering?) or it could be bad (e.g.
- # loss of packing in the encoding).
- self.assertEqual(in_buf, out_buf)
-
- def testRoundtrip(self):
- with open(FLAGS.message_text_file, 'r') as fp:
- case = text_format.Parse(fp.read(), test_example_pb2.TestCase())
-
- in_bufs = [primitive.SerializeToString() for primitive in case.primitive]
-
- # np.array silently truncates strings if you don't specify dtype=object.
- in_bufs = np.reshape(np.array(in_bufs, dtype=object), list(case.shape))
- return self._testRoundtrip(
- in_bufs, 'tensorflow.contrib.proto.RepeatedPrimitiveValue', case.field)
-
- def testRoundtripPacked(self):
- with open(FLAGS.message_text_file, 'r') as fp:
- case = text_format.Parse(fp.read(), test_example_pb2.TestCase())
- # Now try with the packed serialization.
- # We test the packed representations by loading the same test cases
- # using PackedPrimitiveValue instead of RepeatedPrimitiveValue.
- # To do this we rely on the text format being the same for packed and
- # unpacked fields, and reparse the test message using the packed version
- # of the proto.
- in_bufs = [
- # Note: float_format='.17g' is necessary to ensure preservation of
- # doubles and floats in text format.
- text_format.Parse(
- text_format.MessageToString(
- primitive, float_format='.17g'),
- test_example_pb2.PackedPrimitiveValue()).SerializeToString()
- for primitive in case.primitive
- ]
+class EncodeProtoOpTest(test_base.EncodeProtoOpTestBase):
- # np.array silently truncates strings if you don't specify dtype=object.
- in_bufs = np.reshape(np.array(in_bufs, dtype=object), list(case.shape))
- return self._testRoundtrip(
- in_bufs, 'tensorflow.contrib.proto.PackedPrimitiveValue', case.field)
+ def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
+ super(EncodeProtoOpTest, self).__init__(decode_proto_op, encode_proto_op,
+ methodName)
if __name__ == '__main__':
diff --git a/tensorflow/contrib/proto/python/kernel_tests/encode_proto_op_test_base.py b/tensorflow/contrib/proto/python/kernel_tests/encode_proto_op_test_base.py
new file mode 100644
index 0000000000..07dfb924d3
--- /dev/null
+++ b/tensorflow/contrib/proto/python/kernel_tests/encode_proto_op_test_base.py
@@ -0,0 +1,177 @@
+# =============================================================================
+# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# =============================================================================
+"""Table-driven test for encode_proto op.
+
+This test is run once with each of the *.TestCase.pbtxt files
+in the test directory.
+
+It tests that encode_proto is a lossless inverse of decode_proto
+(for the specified fields).
+"""
+# Python3 readiness boilerplate
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+from absl.testing import parameterized
+import numpy as np
+
+from google.protobuf import text_format
+
+from tensorflow.contrib.proto.python.kernel_tests import proto_op_test_base as test_base
+from tensorflow.contrib.proto.python.kernel_tests import test_example_pb2
+from tensorflow.python.framework import dtypes
+from tensorflow.python.ops import array_ops
+
+
+class EncodeProtoOpTestBase(test_base.ProtoOpTestBase, parameterized.TestCase):
+ """Base class for testing proto encoding ops."""
+
+ def __init__(self, decode_module, encode_module, methodName='runTest'): # pylint: disable=invalid-name
+ """EncodeProtoOpTestBase initializer.
+
+ Args:
+ decode_module: a module containing the `decode_proto_op` method
+ encode_module: a module containing the `encode_proto_op` method
+ methodName: the name of the test method (same as for test.TestCase)
+ """
+
+ super(EncodeProtoOpTestBase, self).__init__(methodName)
+ self._decode_module = decode_module
+ self._encode_module = encode_module
+
+ def testBadInputs(self):
+ # Invalid field name
+ with self.test_session():
+ with self.assertRaisesOpError('Unknown field: non_existent_field'):
+ self._encode_module.encode_proto(
+ sizes=[[1]],
+ values=[np.array([[0.0]], dtype=np.int32)],
+ message_type='tensorflow.contrib.proto.TestValue',
+ field_names=['non_existent_field']).eval()
+
+ # Incorrect types.
+ with self.test_session():
+ with self.assertRaisesOpError(
+ 'Incompatible type for field double_value.'):
+ self._encode_module.encode_proto(
+ sizes=[[1]],
+ values=[np.array([[0.0]], dtype=np.int32)],
+ message_type='tensorflow.contrib.proto.TestValue',
+ field_names=['double_value']).eval()
+
+ # Incorrect shapes of sizes.
+ with self.test_session():
+ with self.assertRaisesOpError(
+ r'sizes should be batch_size \+ \[len\(field_names\)\]'):
+ sizes = array_ops.placeholder(dtypes.int32)
+ values = array_ops.placeholder(dtypes.float64)
+ self._encode_module.encode_proto(
+ sizes=sizes,
+ values=[values],
+ message_type='tensorflow.contrib.proto.TestValue',
+ field_names=['double_value']).eval(feed_dict={
+ sizes: [[[0, 0]]],
+ values: [[0.0]]
+ })
+
+ # Inconsistent shapes of values.
+ with self.test_session():
+ with self.assertRaisesOpError(
+ 'Values must match up to the last dimension'):
+ sizes = array_ops.placeholder(dtypes.int32)
+ values1 = array_ops.placeholder(dtypes.float64)
+ values2 = array_ops.placeholder(dtypes.int32)
+ (self._encode_module.encode_proto(
+ sizes=[[1, 1]],
+ values=[values1, values2],
+ message_type='tensorflow.contrib.proto.TestValue',
+ field_names=['double_value', 'int32_value']).eval(feed_dict={
+ values1: [[0.0]],
+ values2: [[0], [0]]
+ }))
+
+ def _testRoundtrip(self, in_bufs, message_type, fields):
+
+ field_names = [f.name for f in fields]
+ out_types = [f.dtype for f in fields]
+
+ with self.test_session() as sess:
+ sizes, field_tensors = self._decode_module.decode_proto(
+ in_bufs,
+ message_type=message_type,
+ field_names=field_names,
+ output_types=out_types)
+
+ out_tensors = self._encode_module.encode_proto(
+ sizes,
+ field_tensors,
+ message_type=message_type,
+ field_names=field_names)
+
+ out_bufs, = sess.run([out_tensors])
+
+ # Check that the re-encoded tensor has the same shape.
+ self.assertEqual(in_bufs.shape, out_bufs.shape)
+
+ # Compare the input and output.
+ for in_buf, out_buf in zip(in_bufs.flat, out_bufs.flat):
+ in_obj = test_example_pb2.TestValue()
+ in_obj.ParseFromString(in_buf)
+
+ out_obj = test_example_pb2.TestValue()
+ out_obj.ParseFromString(out_buf)
+
+ # Check that the deserialized objects are identical.
+ self.assertEqual(in_obj, out_obj)
+
+ # Check that the input and output serialized messages are identical.
+ # If we fail here, there is a difference in the serialized
+ # representation but the new serialization still parses. This could
+ # be harmless (a change in map ordering?) or it could be bad (e.g.
+ # loss of packing in the encoding).
+ self.assertEqual(in_buf, out_buf)
+
+ @parameterized.named_parameters(*test_base.ProtoOpTestBase.named_parameters())
+ def testRoundtrip(self, case):
+ in_bufs = [value.SerializeToString() for value in case.values]
+
+ # np.array silently truncates strings if you don't specify dtype=object.
+ in_bufs = np.reshape(np.array(in_bufs, dtype=object), list(case.shapes))
+ return self._testRoundtrip(
+ in_bufs, 'tensorflow.contrib.proto.TestValue', case.fields)
+
+ @parameterized.named_parameters(*test_base.ProtoOpTestBase.named_parameters())
+ def testRoundtripPacked(self, case):
+ # Now try with the packed serialization.
+ # We test the packed representations by loading the same test cases using
+ # PackedTestValue instead of TestValue. To do this we rely on the text
+ # format being the same for packed and unpacked fields, and reparse the test
+ # message using the packed version of the proto.
+ in_bufs = [
+ # Note: float_format='.17g' is necessary to ensure preservation of
+ # doubles and floats in text format.
+ text_format.Parse(
+ text_format.MessageToString(
+ value, float_format='.17g'),
+ test_example_pb2.PackedTestValue()).SerializeToString()
+ for value in case.values
+ ]
+
+ # np.array silently truncates strings if you don't specify dtype=object.
+ in_bufs = np.reshape(np.array(in_bufs, dtype=object), list(case.shapes))
+ return self._testRoundtrip(
+ in_bufs, 'tensorflow.contrib.proto.PackedTestValue', case.fields)
diff --git a/tensorflow/contrib/proto/python/kernel_tests/minmax.TestCase.pbtxt b/tensorflow/contrib/proto/python/kernel_tests/minmax.TestCase.pbtxt
deleted file mode 100644
index b170f89c0f..0000000000
--- a/tensorflow/contrib/proto/python/kernel_tests/minmax.TestCase.pbtxt
+++ /dev/null
@@ -1,161 +0,0 @@
-primitive {
- double_value: -1.7976931348623158e+308
- double_value: 2.2250738585072014e-308
- double_value: 1.7976931348623158e+308
- float_value: -3.402823466e+38
- float_value: 1.175494351e-38
- float_value: 3.402823466e+38
- int64_value: -9223372036854775808
- int64_value: 9223372036854775807
- uint64_value: 0
- uint64_value: 18446744073709551615
- int32_value: -2147483648
- int32_value: 2147483647
- fixed64_value: 0
- fixed64_value: 18446744073709551615
- fixed32_value: 0
- fixed32_value: 4294967295
- bool_value: false
- bool_value: true
- string_value: ""
- string_value: "I refer to the infinite."
- uint32_value: 0
- uint32_value: 4294967295
- sfixed32_value: -2147483648
- sfixed32_value: 2147483647
- sfixed64_value: -9223372036854775808
- sfixed64_value: 9223372036854775807
- sint32_value: -2147483648
- sint32_value: 2147483647
- sint64_value: -9223372036854775808
- sint64_value: 9223372036854775807
-}
-shape: 1
-sizes: 3
-sizes: 3
-sizes: 2
-sizes: 2
-sizes: 2
-sizes: 2
-sizes: 2
-sizes: 2
-sizes: 2
-sizes: 2
-sizes: 2
-sizes: 2
-sizes: 2
-sizes: 2
-field {
- name: "double_value"
- dtype: DT_DOUBLE
- expected {
- double_value: -1.7976931348623158e+308
- double_value: 2.2250738585072014e-308
- double_value: 1.7976931348623158e+308
- }
-}
-field {
- name: "float_value"
- dtype: DT_FLOAT
- expected {
- float_value: -3.402823466e+38
- float_value: 1.175494351e-38
- float_value: 3.402823466e+38
- }
-}
-field {
- name: "int64_value"
- dtype: DT_INT64
- expected {
- int64_value: -9223372036854775808
- int64_value: 9223372036854775807
- }
-}
-field {
- name: "uint64_value"
- dtype: DT_INT64
- expected {
- int64_value: 0
- int64_value: -1
- }
-}
-field {
- name: "int32_value"
- dtype: DT_INT32
- expected {
- int32_value: -2147483648
- int32_value: 2147483647
- }
-}
-field {
- name: "fixed64_value"
- dtype: DT_INT64
- expected {
- int64_value: 0
- int64_value: -1 # unsigned is 18446744073709551615
- }
-}
-field {
- name: "fixed32_value"
- dtype: DT_INT32
- expected {
- int32_value: 0
- int32_value: -1 # unsigned is 4294967295
- }
-}
-field {
- name: "bool_value"
- dtype: DT_BOOL
- expected {
- bool_value: false
- bool_value: true
- }
-}
-field {
- name: "string_value"
- dtype: DT_STRING
- expected {
- string_value: ""
- string_value: "I refer to the infinite."
- }
-}
-field {
- name: "uint32_value"
- dtype: DT_INT32
- expected {
- int32_value: 0
- int32_value: -1 # unsigned is 4294967295
- }
-}
-field {
- name: "sfixed32_value"
- dtype: DT_INT32
- expected {
- int32_value: -2147483648
- int32_value: 2147483647
- }
-}
-field {
- name: "sfixed64_value"
- dtype: DT_INT64
- expected {
- int64_value: -9223372036854775808
- int64_value: 9223372036854775807
- }
-}
-field {
- name: "sint32_value"
- dtype: DT_INT32
- expected {
- int32_value: -2147483648
- int32_value: 2147483647
- }
-}
-field {
- name: "sint64_value"
- dtype: DT_INT64
- expected {
- int64_value: -9223372036854775808
- int64_value: 9223372036854775807
- }
-}
diff --git a/tensorflow/contrib/proto/python/kernel_tests/nested.TestCase.pbtxt b/tensorflow/contrib/proto/python/kernel_tests/nested.TestCase.pbtxt
deleted file mode 100644
index c664e52851..0000000000
--- a/tensorflow/contrib/proto/python/kernel_tests/nested.TestCase.pbtxt
+++ /dev/null
@@ -1,16 +0,0 @@
-primitive {
- message_value {
- double_value: 23.5
- }
-}
-shape: 1
-sizes: 1
-field {
- name: "message_value"
- dtype: DT_STRING
- expected {
- message_value {
- double_value: 23.5
- }
- }
-}
diff --git a/tensorflow/contrib/proto/python/kernel_tests/optional.TestCase.pbtxt b/tensorflow/contrib/proto/python/kernel_tests/optional.TestCase.pbtxt
deleted file mode 100644
index 125651d7ea..0000000000
--- a/tensorflow/contrib/proto/python/kernel_tests/optional.TestCase.pbtxt
+++ /dev/null
@@ -1,20 +0,0 @@
-primitive {
- bool_value: true
-}
-shape: 1
-sizes: 1
-sizes: 0
-field {
- name: "bool_value"
- dtype: DT_BOOL
- expected {
- bool_value: true
- }
-}
-field {
- name: "double_value"
- dtype: DT_DOUBLE
- expected {
- double_value: 0.0
- }
-}
diff --git a/tensorflow/contrib/proto/python/kernel_tests/promote_unsigned.TestCase.pbtxt b/tensorflow/contrib/proto/python/kernel_tests/promote_unsigned.TestCase.pbtxt
deleted file mode 100644
index bc07efc8f3..0000000000
--- a/tensorflow/contrib/proto/python/kernel_tests/promote_unsigned.TestCase.pbtxt
+++ /dev/null
@@ -1,29 +0,0 @@
-primitive {
- fixed32_value: 4294967295
- uint32_value: 4294967295
-}
-shape: 1
-sizes: 1
-field {
- name: "fixed32_value"
- dtype: DT_INT64
- expected {
- int64_value: 4294967295
- }
-}
-sizes: 1
-field {
- name: "uint32_value"
- dtype: DT_INT64
- expected {
- int64_value: 4294967295
- }
-}
-sizes: 0
-field {
- name: "uint32_default"
- dtype: DT_INT64
- expected {
- int64_value: 4294967295 # Comes from an explicitly-specified default
- }
-}
diff --git a/tensorflow/contrib/proto/python/kernel_tests/proto_op_test_base.py b/tensorflow/contrib/proto/python/kernel_tests/proto_op_test_base.py
new file mode 100644
index 0000000000..cbc7b3d3f8
--- /dev/null
+++ b/tensorflow/contrib/proto/python/kernel_tests/proto_op_test_base.py
@@ -0,0 +1,407 @@
+# =============================================================================
+# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# =============================================================================
+"""Test case base for testing proto operations."""
+
+# Python3 preparedness imports.
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import ctypes as ct
+import os
+
+from tensorflow.contrib.proto.python.kernel_tests import test_example_pb2
+from tensorflow.core.framework import types_pb2
+from tensorflow.python.platform import test
+
+
+class ProtoOpTestBase(test.TestCase):
+ """Base class for testing proto decoding and encoding ops."""
+
+ def __init__(self, methodName="runTest"): # pylint: disable=invalid-name
+ super(ProtoOpTestBase, self).__init__(methodName)
+ lib = os.path.join(os.path.dirname(__file__), "libtestexample.so")
+ if os.path.isfile(lib):
+ ct.cdll.LoadLibrary(lib)
+
+ @staticmethod
+ def named_parameters():
+ return (
+ ("defaults", ProtoOpTestBase.defaults_test_case()),
+ ("minmax", ProtoOpTestBase.minmax_test_case()),
+ ("nested", ProtoOpTestBase.nested_test_case()),
+ ("optional", ProtoOpTestBase.optional_test_case()),
+ ("promote_unsigned", ProtoOpTestBase.promote_unsigned_test_case()),
+ ("ragged", ProtoOpTestBase.ragged_test_case()),
+ ("shaped_batch", ProtoOpTestBase.shaped_batch_test_case()),
+ ("simple", ProtoOpTestBase.simple_test_case()),
+ )
+
+ @staticmethod
+ def defaults_test_case():
+ test_case = test_example_pb2.TestCase()
+ test_case.values.add() # No fields specified, so we get all defaults.
+ test_case.shapes.append(1)
+ test_case.sizes.append(0)
+ field = test_case.fields.add()
+ field.name = "double_value_with_default"
+ field.dtype = types_pb2.DT_DOUBLE
+ field.value.double_value.append(1.0)
+ test_case.sizes.append(0)
+ field = test_case.fields.add()
+ field.name = "float_value_with_default"
+ field.dtype = types_pb2.DT_FLOAT
+ field.value.float_value.append(2.0)
+ test_case.sizes.append(0)
+ field = test_case.fields.add()
+ field.name = "int64_value_with_default"
+ field.dtype = types_pb2.DT_INT64
+ field.value.int64_value.append(3)
+ test_case.sizes.append(0)
+ field = test_case.fields.add()
+ field.name = "sfixed64_value_with_default"
+ field.dtype = types_pb2.DT_INT64
+ field.value.int64_value.append(11)
+ test_case.sizes.append(0)
+ field = test_case.fields.add()
+ field.name = "sint64_value_with_default"
+ field.dtype = types_pb2.DT_INT64
+ field.value.int64_value.append(13)
+ test_case.sizes.append(0)
+ field = test_case.fields.add()
+ field.name = "uint64_value_with_default"
+ field.dtype = types_pb2.DT_INT64
+ field.value.int64_value.append(4)
+ test_case.sizes.append(0)
+ field = test_case.fields.add()
+ field.name = "fixed64_value_with_default"
+ field.dtype = types_pb2.DT_INT64
+ field.value.int64_value.append(6)
+ test_case.sizes.append(0)
+ field = test_case.fields.add()
+ field.name = "int32_value_with_default"
+ field.dtype = types_pb2.DT_INT32
+ field.value.int32_value.append(5)
+ test_case.sizes.append(0)
+ field = test_case.fields.add()
+ field.name = "sfixed32_value_with_default"
+ field.dtype = types_pb2.DT_INT32
+ field.value.int32_value.append(10)
+ test_case.sizes.append(0)
+ field = test_case.fields.add()
+ field.name = "sint32_value_with_default"
+ field.dtype = types_pb2.DT_INT32
+ field.value.int32_value.append(12)
+ test_case.sizes.append(0)
+ field = test_case.fields.add()
+ field.name = "uint32_value_with_default"
+ field.dtype = types_pb2.DT_INT32
+ field.value.int32_value.append(9)
+ test_case.sizes.append(0)
+ field = test_case.fields.add()
+ field.name = "fixed32_value_with_default"
+ field.dtype = types_pb2.DT_INT32
+ field.value.int32_value.append(7)
+ test_case.sizes.append(0)
+ field = test_case.fields.add()
+ field.name = "bool_value_with_default"
+ field.dtype = types_pb2.DT_BOOL
+ field.value.bool_value.append(True)
+ test_case.sizes.append(0)
+ field = test_case.fields.add()
+ field.name = "string_value_with_default"
+ field.dtype = types_pb2.DT_STRING
+ field.value.string_value.append("a")
+ test_case.sizes.append(0)
+ field = test_case.fields.add()
+ field.name = "bytes_value_with_default"
+ field.dtype = types_pb2.DT_STRING
+ field.value.string_value.append("a longer default string")
+ return test_case
+
+ @staticmethod
+ def minmax_test_case():
+ test_case = test_example_pb2.TestCase()
+ value = test_case.values.add()
+ value.double_value.append(-1.7976931348623158e+308)
+ value.double_value.append(2.2250738585072014e-308)
+ value.double_value.append(1.7976931348623158e+308)
+ value.float_value.append(-3.402823466e+38)
+ value.float_value.append(1.175494351e-38)
+ value.float_value.append(3.402823466e+38)
+ value.int64_value.append(-9223372036854775808)
+ value.int64_value.append(9223372036854775807)
+ value.sfixed64_value.append(-9223372036854775808)
+ value.sfixed64_value.append(9223372036854775807)
+ value.sint64_value.append(-9223372036854775808)
+ value.sint64_value.append(9223372036854775807)
+ value.uint64_value.append(0)
+ value.uint64_value.append(18446744073709551615)
+ value.fixed64_value.append(0)
+ value.fixed64_value.append(18446744073709551615)
+ value.int32_value.append(-2147483648)
+ value.int32_value.append(2147483647)
+ value.sfixed32_value.append(-2147483648)
+ value.sfixed32_value.append(2147483647)
+ value.sint32_value.append(-2147483648)
+ value.sint32_value.append(2147483647)
+ value.uint32_value.append(0)
+ value.uint32_value.append(4294967295)
+ value.fixed32_value.append(0)
+ value.fixed32_value.append(4294967295)
+ value.bool_value.append(False)
+ value.bool_value.append(True)
+ value.string_value.append("")
+ value.string_value.append("I refer to the infinite.")
+ test_case.shapes.append(1)
+ test_case.sizes.append(3)
+ field = test_case.fields.add()
+ field.name = "double_value"
+ field.dtype = types_pb2.DT_DOUBLE
+ field.value.double_value.append(-1.7976931348623158e+308)
+ field.value.double_value.append(2.2250738585072014e-308)
+ field.value.double_value.append(1.7976931348623158e+308)
+ test_case.sizes.append(3)
+ field = test_case.fields.add()
+ field.name = "float_value"
+ field.dtype = types_pb2.DT_FLOAT
+ field.value.float_value.append(-3.402823466e+38)
+ field.value.float_value.append(1.175494351e-38)
+ field.value.float_value.append(3.402823466e+38)
+ test_case.sizes.append(2)
+ field = test_case.fields.add()
+ field.name = "int64_value"
+ field.dtype = types_pb2.DT_INT64
+ field.value.int64_value.append(-9223372036854775808)
+ field.value.int64_value.append(9223372036854775807)
+ test_case.sizes.append(2)
+ field = test_case.fields.add()
+ field.name = "sfixed64_value"
+ field.dtype = types_pb2.DT_INT64
+ field.value.int64_value.append(-9223372036854775808)
+ field.value.int64_value.append(9223372036854775807)
+ test_case.sizes.append(2)
+ field = test_case.fields.add()
+ field.name = "sint64_value"
+ field.dtype = types_pb2.DT_INT64
+ field.value.int64_value.append(-9223372036854775808)
+ field.value.int64_value.append(9223372036854775807)
+ test_case.sizes.append(2)
+ field = test_case.fields.add()
+ field.name = "uint64_value"
+ field.dtype = types_pb2.DT_INT64
+ field.value.int64_value.append(0)
+ field.value.int64_value.append(-1)
+ test_case.sizes.append(2)
+ field = test_case.fields.add()
+ field.name = "fixed64_value"
+ field.dtype = types_pb2.DT_INT64
+ field.value.int64_value.append(0)
+ field.value.int64_value.append(-1)
+ test_case.sizes.append(2)
+ field = test_case.fields.add()
+ field.name = "int32_value"
+ field.dtype = types_pb2.DT_INT32
+ field.value.int32_value.append(-2147483648)
+ field.value.int32_value.append(2147483647)
+ test_case.sizes.append(2)
+ field = test_case.fields.add()
+ field.name = "sfixed32_value"
+ field.dtype = types_pb2.DT_INT32
+ field.value.int32_value.append(-2147483648)
+ field.value.int32_value.append(2147483647)
+ test_case.sizes.append(2)
+ field = test_case.fields.add()
+ field.name = "sint32_value"
+ field.dtype = types_pb2.DT_INT32
+ field.value.int32_value.append(-2147483648)
+ field.value.int32_value.append(2147483647)
+ test_case.sizes.append(2)
+ field = test_case.fields.add()
+ field.name = "uint32_value"
+ field.dtype = types_pb2.DT_INT32
+ field.value.int32_value.append(0)
+ field.value.int32_value.append(-1)
+ test_case.sizes.append(2)
+ field = test_case.fields.add()
+ field.name = "fixed32_value"
+ field.dtype = types_pb2.DT_INT32
+ field.value.int32_value.append(0)
+ field.value.int32_value.append(-1)
+ test_case.sizes.append(2)
+ field = test_case.fields.add()
+ field.name = "bool_value"
+ field.dtype = types_pb2.DT_BOOL
+ field.value.bool_value.append(False)
+ field.value.bool_value.append(True)
+ test_case.sizes.append(2)
+ field = test_case.fields.add()
+ field.name = "string_value"
+ field.dtype = types_pb2.DT_STRING
+ field.value.string_value.append("")
+ field.value.string_value.append("I refer to the infinite.")
+ return test_case
+
+ @staticmethod
+ def nested_test_case():
+ test_case = test_example_pb2.TestCase()
+ value = test_case.values.add()
+ message_value = value.message_value.add()
+ message_value.double_value = 23.5
+ test_case.shapes.append(1)
+ test_case.sizes.append(1)
+ field = test_case.fields.add()
+ field.name = "message_value"
+ field.dtype = types_pb2.DT_STRING
+ message_value = field.value.message_value.add()
+ message_value.double_value = 23.5
+ return test_case
+
+ @staticmethod
+ def optional_test_case():
+ test_case = test_example_pb2.TestCase()
+ value = test_case.values.add()
+ value.bool_value.append(True)
+ test_case.shapes.append(1)
+ test_case.sizes.append(1)
+ field = test_case.fields.add()
+ field.name = "bool_value"
+ field.dtype = types_pb2.DT_BOOL
+ field.value.bool_value.append(True)
+ test_case.sizes.append(0)
+ field = test_case.fields.add()
+ field.name = "double_value"
+ field.dtype = types_pb2.DT_DOUBLE
+ field.value.double_value.append(0.0)
+ return test_case
+
+ @staticmethod
+ def promote_unsigned_test_case():
+ test_case = test_example_pb2.TestCase()
+ value = test_case.values.add()
+ value.fixed32_value.append(4294967295)
+ value.uint32_value.append(4294967295)
+ test_case.shapes.append(1)
+ test_case.sizes.append(1)
+ field = test_case.fields.add()
+ field.name = "fixed32_value"
+ field.dtype = types_pb2.DT_INT64
+ field.value.int64_value.append(4294967295)
+ test_case.sizes.append(1)
+ field = test_case.fields.add()
+ field.name = "uint32_value"
+ field.dtype = types_pb2.DT_INT64
+ field.value.int64_value.append(4294967295)
+ # Comes from an explicitly-specified default
+ test_case.sizes.append(0)
+ field = test_case.fields.add()
+ field.name = "uint32_value_with_default"
+ field.dtype = types_pb2.DT_INT64
+ field.value.int64_value.append(9)
+ return test_case
+
+ @staticmethod
+ def ragged_test_case():
+ test_case = test_example_pb2.TestCase()
+ value = test_case.values.add()
+ value.double_value.append(23.5)
+ value.double_value.append(123.0)
+ value.bool_value.append(True)
+ value = test_case.values.add()
+ value.double_value.append(3.1)
+ value.bool_value.append(False)
+ test_case.shapes.append(2)
+ test_case.sizes.append(2)
+ test_case.sizes.append(1)
+ test_case.sizes.append(1)
+ test_case.sizes.append(1)
+ field = test_case.fields.add()
+ field.name = "double_value"
+ field.dtype = types_pb2.DT_DOUBLE
+ field.value.double_value.append(23.5)
+ field.value.double_value.append(123.0)
+ field.value.double_value.append(3.1)
+ field.value.double_value.append(0.0)
+ field = test_case.fields.add()
+ field.name = "bool_value"
+ field.dtype = types_pb2.DT_BOOL
+ field.value.bool_value.append(True)
+ field.value.bool_value.append(False)
+ return test_case
+
+ @staticmethod
+ def shaped_batch_test_case():
+ test_case = test_example_pb2.TestCase()
+ value = test_case.values.add()
+ value.double_value.append(23.5)
+ value.bool_value.append(True)
+ value = test_case.values.add()
+ value.double_value.append(44.0)
+ value.bool_value.append(False)
+ value = test_case.values.add()
+ value.double_value.append(3.14159)
+ value.bool_value.append(True)
+ value = test_case.values.add()
+ value.double_value.append(1.414)
+ value.bool_value.append(True)
+ value = test_case.values.add()
+ value.double_value.append(-32.2)
+ value.bool_value.append(False)
+ value = test_case.values.add()
+ value.double_value.append(0.0001)
+ value.bool_value.append(True)
+ test_case.shapes.append(3)
+ test_case.shapes.append(2)
+ for _ in range(12):
+ test_case.sizes.append(1)
+ field = test_case.fields.add()
+ field.name = "double_value"
+ field.dtype = types_pb2.DT_DOUBLE
+ field.value.double_value.append(23.5)
+ field.value.double_value.append(44.0)
+ field.value.double_value.append(3.14159)
+ field.value.double_value.append(1.414)
+ field.value.double_value.append(-32.2)
+ field.value.double_value.append(0.0001)
+ field = test_case.fields.add()
+ field.name = "bool_value"
+ field.dtype = types_pb2.DT_BOOL
+ field.value.bool_value.append(True)
+ field.value.bool_value.append(False)
+ field.value.bool_value.append(True)
+ field.value.bool_value.append(True)
+ field.value.bool_value.append(False)
+ field.value.bool_value.append(True)
+ return test_case
+
+ @staticmethod
+ def simple_test_case():
+ test_case = test_example_pb2.TestCase()
+ value = test_case.values.add()
+ value.double_value.append(23.5)
+ value.bool_value.append(True)
+ test_case.shapes.append(1)
+ test_case.sizes.append(1)
+ field = test_case.fields.add()
+ field.name = "double_value"
+ field.dtype = types_pb2.DT_DOUBLE
+ field.value.double_value.append(23.5)
+ test_case.sizes.append(1)
+ field = test_case.fields.add()
+ field.name = "bool_value"
+ field.dtype = types_pb2.DT_BOOL
+ field.value.bool_value.append(True)
+ return test_case
diff --git a/tensorflow/contrib/proto/python/kernel_tests/ragged.TestCase.pbtxt b/tensorflow/contrib/proto/python/kernel_tests/ragged.TestCase.pbtxt
deleted file mode 100644
index 61c7ac53f7..0000000000
--- a/tensorflow/contrib/proto/python/kernel_tests/ragged.TestCase.pbtxt
+++ /dev/null
@@ -1,32 +0,0 @@
-primitive {
- double_value: 23.5
- double_value: 123.0
- bool_value: true
-}
-primitive {
- double_value: 3.1
- bool_value: false
-}
-shape: 2
-sizes: 2
-sizes: 1
-sizes: 1
-sizes: 1
-field {
- name: "double_value"
- dtype: DT_DOUBLE
- expected {
- double_value: 23.5
- double_value: 123.0
- double_value: 3.1
- double_value: 0.0
- }
-}
-field {
- name: "bool_value"
- dtype: DT_BOOL
- expected {
- bool_value: true
- bool_value: false
- }
-}
diff --git a/tensorflow/contrib/proto/python/kernel_tests/shaped_batch.TestCase.pbtxt b/tensorflow/contrib/proto/python/kernel_tests/shaped_batch.TestCase.pbtxt
deleted file mode 100644
index f4828076d5..0000000000
--- a/tensorflow/contrib/proto/python/kernel_tests/shaped_batch.TestCase.pbtxt
+++ /dev/null
@@ -1,62 +0,0 @@
-primitive {
- double_value: 23.5
- bool_value: true
-}
-primitive {
- double_value: 44.0
- bool_value: false
-}
-primitive {
- double_value: 3.14159
- bool_value: true
-}
-primitive {
- double_value: 1.414
- bool_value: true
-}
-primitive {
- double_value: -32.2
- bool_value: false
-}
-primitive {
- double_value: 0.0001
- bool_value: true
-}
-shape: 3
-shape: 2
-sizes: 1
-sizes: 1
-sizes: 1
-sizes: 1
-sizes: 1
-sizes: 1
-sizes: 1
-sizes: 1
-sizes: 1
-sizes: 1
-sizes: 1
-sizes: 1
-field {
- name: "double_value"
- dtype: DT_DOUBLE
- expected {
- double_value: 23.5
- double_value: 44.0
- double_value: 3.14159
- double_value: 1.414
- double_value: -32.2
- double_value: 0.0001
- }
-}
-field {
- name: "bool_value"
- dtype: DT_BOOL
- expected {
- bool_value: true
- bool_value: false
- bool_value: true
- bool_value: true
- bool_value: false
- bool_value: true
- }
-}
diff --git a/tensorflow/contrib/proto/python/kernel_tests/simple.TestCase.pbtxt b/tensorflow/contrib/proto/python/kernel_tests/simple.TestCase.pbtxt
deleted file mode 100644
index dc20ac147b..0000000000
--- a/tensorflow/contrib/proto/python/kernel_tests/simple.TestCase.pbtxt
+++ /dev/null
@@ -1,21 +0,0 @@
-primitive {
- double_value: 23.5
- bool_value: true
-}
-shape: 1
-sizes: 1
-sizes: 1
-field {
- name: "double_value"
- dtype: DT_DOUBLE
- expected {
- double_value: 23.5
- }
-}
-field {
- name: "bool_value"
- dtype: DT_BOOL
- expected {
- bool_value: true
- }
-}
diff --git a/tensorflow/contrib/proto/python/kernel_tests/test_example.proto b/tensorflow/contrib/proto/python/kernel_tests/test_example.proto
index a2c88e372b..674d881220 100644
--- a/tensorflow/contrib/proto/python/kernel_tests/test_example.proto
+++ b/tensorflow/contrib/proto/python/kernel_tests/test_example.proto
@@ -1,6 +1,4 @@
// Test description and protos to work with it.
-//
-// Many of the protos in this file are for unit tests that haven't been written yet.
syntax = "proto2";
@@ -8,54 +6,27 @@ import "tensorflow/core/framework/types.proto";
package tensorflow.contrib.proto;
-// A TestCase holds a proto and a bunch of assertions
-// about how it should decode.
+// A TestCase holds a proto and assertions about how it should decode.
message TestCase {
- // A batch of primitives to be serialized and decoded.
- repeated RepeatedPrimitiveValue primitive = 1;
- // The shape of the batch.
- repeated int32 shape = 2;
+ // Batches of primitive values.
+ repeated TestValue values = 1;
+ // The batch shapes.
+ repeated int32 shapes = 2;
// Expected sizes for each field.
repeated int32 sizes = 3;
// Expected values for each field.
- repeated FieldSpec field = 4;
+ repeated FieldSpec fields = 4;
};
// FieldSpec describes the expected output for a single field.
message FieldSpec {
optional string name = 1;
optional tensorflow.DataType dtype = 2;
- optional RepeatedPrimitiveValue expected = 3;
+ optional TestValue value = 3;
};
+// NOTE: This definition must be kept in sync with PackedTestValue.
message TestValue {
- optional PrimitiveValue primitive_value = 1;
- optional EnumValue enum_value = 2;
- optional MessageValue message_value = 3;
- optional RepeatedMessageValue repeated_message_value = 4;
- optional RepeatedPrimitiveValue repeated_primitive_value = 6;
-}
-
-message PrimitiveValue {
- optional double double_value = 1;
- optional float float_value = 2;
- optional int64 int64_value = 3;
- optional uint64 uint64_value = 4;
- optional int32 int32_value = 5;
- optional fixed64 fixed64_value = 6;
- optional fixed32 fixed32_value = 7;
- optional bool bool_value = 8;
- optional string string_value = 9;
- optional bytes bytes_value = 12;
- optional uint32 uint32_value = 13;
- optional sfixed32 sfixed32_value = 15;
- optional sfixed64 sfixed64_value = 16;
- optional sint32 sint32_value = 17;
- optional sint64 sint64_value = 18;
-}
-
-// NOTE: This definition must be kept in sync with PackedPrimitiveValue.
-message RepeatedPrimitiveValue {
repeated double double_value = 1;
repeated float float_value = 2;
repeated int64 int64_value = 3;
@@ -74,30 +45,31 @@ message RepeatedPrimitiveValue {
repeated PrimitiveValue message_value = 19;
// Optional fields with explicitly-specified defaults.
- optional double double_default = 20 [default = 1.0];
- optional float float_default = 21 [default = 2.0];
- optional int64 int64_default = 22 [default = 3];
- optional uint64 uint64_default = 23 [default = 4];
- optional int32 int32_default = 24 [default = 5];
- optional fixed64 fixed64_default = 25 [default = 6];
- optional fixed32 fixed32_default = 26 [default = 7];
- optional bool bool_default = 27 [default = true];
- optional string string_default = 28 [default = "a"];
- optional bytes bytes_default = 29 [default = "a longer default string"];
- optional uint32 uint32_default = 30 [default = 4294967295];
- optional sfixed32 sfixed32_default = 31 [default = 10];
- optional sfixed64 sfixed64_default = 32 [default = 11];
- optional sint32 sint32_default = 33 [default = 12];
- optional sint64 sint64_default = 34 [default = 13];
+ optional double double_value_with_default = 20 [default = 1.0];
+ optional float float_value_with_default = 21 [default = 2.0];
+ optional int64 int64_value_with_default = 22 [default = 3];
+ optional uint64 uint64_value_with_default = 23 [default = 4];
+ optional int32 int32_value_with_default = 24 [default = 5];
+ optional fixed64 fixed64_value_with_default = 25 [default = 6];
+ optional fixed32 fixed32_value_with_default = 26 [default = 7];
+ optional bool bool_value_with_default = 27 [default = true];
+ optional string string_value_with_default = 28 [default = "a"];
+ optional bytes bytes_value_with_default = 29
+ [default = "a longer default string"];
+ optional uint32 uint32_value_with_default = 30 [default = 9];
+ optional sfixed32 sfixed32_value_with_default = 31 [default = 10];
+ optional sfixed64 sfixed64_value_with_default = 32 [default = 11];
+ optional sint32 sint32_value_with_default = 33 [default = 12];
+ optional sint64 sint64_value_with_default = 34 [default = 13];
}
-// A PackedPrimitiveValue looks exactly the same as a RepeatedPrimitiveValue
-// in the text format, but the binary serializion is different.
-// We test the packed representations by loading the same test cases
-// using this definition instead of RepeatedPrimitiveValue.
-// NOTE: This definition must be kept in sync with RepeatedPrimitiveValue
-// in every way except the packed=true declaration.
-message PackedPrimitiveValue {
+// A PackedTestValue looks exactly the same as a TestValue in the text format,
+// but the binary serializion is different. We test the packed representations
+// by loading the same test cases using this definition instead of TestValue.
+//
+// NOTE: This definition must be kept in sync with TestValue in every way except
+// the packed=true declaration.
+message PackedTestValue {
repeated double double_value = 1 [packed = true];
repeated float float_value = 2 [packed = true];
repeated int64 int64_value = 3 [packed = true];
@@ -115,23 +87,53 @@ message PackedPrimitiveValue {
repeated sint64 sint64_value = 18 [packed = true];
repeated PrimitiveValue message_value = 19;
- optional double double_default = 20 [default = 1.0];
- optional float float_default = 21 [default = 2.0];
- optional int64 int64_default = 22 [default = 3];
- optional uint64 uint64_default = 23 [default = 4];
- optional int32 int32_default = 24 [default = 5];
- optional fixed64 fixed64_default = 25 [default = 6];
- optional fixed32 fixed32_default = 26 [default = 7];
- optional bool bool_default = 27 [default = true];
- optional string string_default = 28 [default = "a"];
- optional bytes bytes_default = 29 [default = "a longer default string"];
- optional uint32 uint32_default = 30 [default = 4294967295];
- optional sfixed32 sfixed32_default = 31 [default = 10];
- optional sfixed64 sfixed64_default = 32 [default = 11];
- optional sint32 sint32_default = 33 [default = 12];
- optional sint64 sint64_default = 34 [default = 13];
+ optional double double_value_with_default = 20 [default = 1.0];
+ optional float float_value_with_default = 21 [default = 2.0];
+ optional int64 int64_value_with_default = 22 [default = 3];
+ optional uint64 uint64_value_with_default = 23 [default = 4];
+ optional int32 int32_value_with_default = 24 [default = 5];
+ optional fixed64 fixed64_value_with_default = 25 [default = 6];
+ optional fixed32 fixed32_value_with_default = 26 [default = 7];
+ optional bool bool_value_with_default = 27 [default = true];
+ optional string string_value_with_default = 28 [default = "a"];
+ optional bytes bytes_value_with_default = 29
+ [default = "a longer default string"];
+ optional uint32 uint32_value_with_default = 30 [default = 9];
+ optional sfixed32 sfixed32_value_with_default = 31 [default = 10];
+ optional sfixed64 sfixed64_value_with_default = 32 [default = 11];
+ optional sint32 sint32_value_with_default = 33 [default = 12];
+ optional sint64 sint64_value_with_default = 34 [default = 13];
}
+message PrimitiveValue {
+ optional double double_value = 1;
+ optional float float_value = 2;
+ optional int64 int64_value = 3;
+ optional uint64 uint64_value = 4;
+ optional int32 int32_value = 5;
+ optional fixed64 fixed64_value = 6;
+ optional fixed32 fixed32_value = 7;
+ optional bool bool_value = 8;
+ optional string string_value = 9;
+ optional bytes bytes_value = 12;
+ optional uint32 uint32_value = 13;
+ optional sfixed32 sfixed32_value = 15;
+ optional sfixed64 sfixed64_value = 16;
+ optional sint32 sint32_value = 17;
+ optional sint64 sint64_value = 18;
+}
+
+// Message containing fields with field numbers higher than any field above.
+// An instance of this message is prepended to each binary message in the test
+// to exercise the code path that handles fields encoded out of order of field
+// number.
+message ExtraFields {
+ optional string string_value = 1776;
+ optional bool bool_value = 1777;
+}
+
+// The messages below are for yet-to-be created tests.
+
message EnumValue {
enum Color {
RED = 0;
@@ -171,12 +173,3 @@ message RepeatedMessageValue {
repeated NestedMessageValue message_values = 11;
}
-
-// Message containing fields with field numbers higher than any field above. An
-// instance of this message is prepended to each binary message in the test to
-// exercise the code path that handles fields encoded out of order of field
-// number.
-message ExtraFields {
- optional string string_value = 1776;
- optional bool bool_value = 1777;
-}
diff --git a/tensorflow/contrib/quantize/python/fold_batch_norms.py b/tensorflow/contrib/quantize/python/fold_batch_norms.py
index 55479bf5f7..e3c4899830 100644
--- a/tensorflow/contrib/quantize/python/fold_batch_norms.py
+++ b/tensorflow/contrib/quantize/python/fold_batch_norms.py
@@ -121,7 +121,8 @@ def _FoldFusedBatchNorms(graph, is_training, freeze_batch_norm_delay):
scaled_weight_tensor = math_ops.multiply(
weights, multiplier_tensor, name='mul_fold')
new_layer_tensor = _CloneWithNewOperands(
- match.layer_op, match.input_tensor, scaled_weight_tensor)
+ match.layer_op, match.input_tensor, scaled_weight_tensor,
+ match.batch_to_space_op)
if correction_recip is not None:
new_layer_tensor = math_ops.multiply(
@@ -149,6 +150,8 @@ def _FindFusedBatchNorms(graph):
_FusedBatchNormMatches.
"""
input_pattern = graph_matcher.OpTypePattern('*')
+ # In practice, the weight pattern can match a Variable or a SpaceToBatchND
+ # operation that follows a variable for atrous convolutions.
weight_pattern = graph_matcher.OpTypePattern('*')
gamma_pattern = graph_matcher.OpTypePattern('*')
beta_pattern = graph_matcher.OpTypePattern('*')
@@ -160,16 +163,27 @@ def _FindFusedBatchNorms(graph):
layer_pattern = graph_matcher.OpTypePattern(
'Conv2D|DepthwiseConv2dNative|MatMul',
inputs=[input_pattern, weight_pattern])
+ batch_to_space_pattern = graph_matcher.OpTypePattern(
+ 'BatchToSpaceND',
+ inputs=[
+ layer_pattern,
+ graph_matcher.OpTypePattern('*'),
+ graph_matcher.OpTypePattern('*')
+ ])
+ layer_output_pattern = graph_matcher.OneofPattern(
+ [layer_pattern, batch_to_space_pattern])
# MatMul has a Reshape between it and FusedBatchNorm.
matmul_reshape_pattern = graph_matcher.OpTypePattern(
- 'Reshape', inputs=[layer_pattern,
- graph_matcher.OpTypePattern('*')])
+ 'Reshape',
+ inputs=[layer_output_pattern,
+ graph_matcher.OpTypePattern('*')])
batch_norm_pattern = graph_matcher.OpTypePattern(
'FusedBatchNorm',
inputs=[
- graph_matcher.OneofPattern([matmul_reshape_pattern, layer_pattern]),
- gamma_pattern, beta_pattern, mean_pattern, variance_pattern
+ graph_matcher.OneofPattern(
+ [matmul_reshape_pattern, layer_output_pattern]), gamma_pattern,
+ beta_pattern, mean_pattern, variance_pattern
])
matmul_bn_output_reshape_pattern = graph_matcher.OpTypePattern(
'Reshape', inputs=[batch_norm_pattern,
@@ -192,6 +206,7 @@ def _FindFusedBatchNorms(graph):
moving_variance_tensor = None
bn_decay_mean_tensor = None
bn_decay_var_tensor = None
+ batch_to_space_op = None
layer_op = match_result.get_op(layer_pattern)
layer_tensor = match_result.get_tensor(layer_pattern)
bn_op = match_result.get_op(batch_norm_pattern)
@@ -213,6 +228,7 @@ def _FindFusedBatchNorms(graph):
if not output_tensor.consumers():
continue
+ batch_to_space_op = match_result.get_op(batch_to_space_pattern)
input_tensor = match_result.get_tensor(input_pattern)
weight_tensor = match_result.get_tensor(weight_pattern)
gamma_tensor = match_result.get_tensor(gamma_pattern)
@@ -276,7 +292,8 @@ def _FindFusedBatchNorms(graph):
moving_variance_tensor=moving_variance_tensor,
bn_decay_mean_tensor=bn_decay_mean_tensor,
bn_decay_var_tensor=bn_decay_var_tensor,
- batch_epsilon=batch_epsilon)
+ batch_epsilon=batch_epsilon,
+ batch_to_space_op=batch_to_space_op)
def _ComputeBatchNormCorrections(context, match, freeze_batch_norm_delay,
@@ -380,7 +397,8 @@ def _ComputeBatchNormCorrections(context, match, freeze_batch_norm_delay,
return correction_scale, correction_recip, correction_offset
-def _CloneWithNewOperands(layer_op, input_tensor, weight_tensor):
+def _CloneWithNewOperands(layer_op, input_tensor, weight_tensor,
+ batch_to_space_op):
"""Clones layer_op with input_tensor and weight_tensor as new inputs."""
new_layer_name = layer_op.name.split('/')[-1] + '_Fold'
if layer_op.type == 'Conv2D':
@@ -400,12 +418,25 @@ def _CloneWithNewOperands(layer_op, input_tensor, weight_tensor):
transpose_b=layer_op.get_attr('transpose_b'),
name=new_layer_name)
elif layer_op.type == 'DepthwiseConv2dNative':
- return nn.depthwise_conv2d(
+ conv = nn.depthwise_conv2d(
input_tensor,
weight_tensor,
+ rate=layer_op.get_attr('dilations'),
strides=layer_op.get_attr('strides'),
padding=layer_op.get_attr('padding'),
name=new_layer_name)
+ # Copy the batch to space operation if we have a atrous convolution.
+ if batch_to_space_op:
+ batch_to_space_op = layer_op.outputs[0].consumers()[0]
+ # TODO(suharshs): It's hard to make this name match with the unfused name.
+ # Restructure this code to not rely on scope at all.
+ new_batch_to_space_name = batch_to_space_op.name.split('/')[-1] + '_Fold'
+ conv = array_ops.batch_to_space_nd(
+ conv,
+ batch_to_space_op.inputs[1],
+ batch_to_space_op.inputs[2],
+ name=new_batch_to_space_name)
+ return conv
else:
raise ValueError('Cannot handle operation of type: %s' % layer_op.type)
@@ -617,7 +648,8 @@ def _GetBatchNormParams(graph, context, has_scaling):
moving_variance_tensor=moving_variance_tensor,
bn_decay_mean_tensor=bn_decay_mean_tensor,
bn_decay_var_tensor=bn_decay_var_tensor,
- batch_epsilon=batch_epsilon)
+ batch_epsilon=batch_epsilon,
+ batch_to_space_op=None)
def _CreateFoldedOp(graph, context, has_scaling, freeze_batch_norm_delay,
@@ -651,6 +683,11 @@ def _CreateFoldedOp(graph, context, has_scaling, freeze_batch_norm_delay,
'/BatchNorm/batchnorm_1/' +
mul_scale_name)
op_below = mul_scale.inputs[0].op
+ # Skip over the BatchToSpace operation in the case of atrous convolutions.
+ batch_to_space_op = None
+ if op_below.type == 'BatchToSpaceND':
+ batch_to_space_op = op_below
+ op_below = op_below.inputs[0].op
weights = op_below.inputs[1]
match = _GetBatchNormParams(
graph=graph, context=context, has_scaling=has_scaling)
@@ -691,7 +728,7 @@ def _CreateFoldedOp(graph, context, has_scaling, freeze_batch_norm_delay,
context + '/correction_mult')
mul_fold = _CloneOp(mul_scale, context + '/mul_fold', [(0, weights)])
else:
- raise ValueError('Cannot handle operation of type: %s' % op_below.op)
+ raise ValueError('Cannot handle operation of type: %s' % op_below.type)
_AssertShapesMatch('mul_fold', mul_fold.inputs[0], mul_fold.outputs[0])
conv_or_fc_folded = _CloneOp(op_below, op_below.name + '_Fold',
@@ -701,6 +738,13 @@ def _CreateFoldedOp(graph, context, has_scaling, freeze_batch_norm_delay,
context + '/BatchNorm/batchnorm_1/add_1')
corrected_output = conv_or_fc_folded.outputs[0]
+ # Copy the batch to space operation if we have a atrous convolution.
+ if batch_to_space_op:
+ corrected_output = array_ops.batch_to_space_nd(
+ corrected_output,
+ batch_to_space_op.inputs[1],
+ batch_to_space_op.inputs[2],
+ name=batch_to_space_op.name + '_Fold')
if correction_offset is not None:
with ops.device(conv_or_fc_folded.device):
corrected_output = math_ops.multiply(correction_recip, corrected_output,
@@ -898,7 +942,8 @@ class _BatchNormMatch(object):
def __init__(self, layer_op, bn_op, output_tensor, input_tensor,
weight_tensor, gamma_tensor, beta_tensor, mean_tensor,
variance_tensor, moving_mean_tensor, moving_variance_tensor,
- bn_decay_mean_tensor, bn_decay_var_tensor, batch_epsilon):
+ bn_decay_mean_tensor, bn_decay_var_tensor, batch_epsilon,
+ batch_to_space_op):
self._layer_op = layer_op
self._bn_op = bn_op
self._output_tensor = output_tensor
@@ -913,6 +958,7 @@ class _BatchNormMatch(object):
self._bn_decay_mean_tensor = bn_decay_mean_tensor
self._bn_decay_var_tensor = bn_decay_var_tensor
self._batch_epsilon = batch_epsilon
+ self._batch_to_space_op = batch_to_space_op
@property
def layer_op(self):
@@ -969,3 +1015,7 @@ class _BatchNormMatch(object):
@property
def bn_decay_var_tensor(self):
return self._bn_decay_var_tensor
+
+ @property
+ def batch_to_space_op(self):
+ return self._batch_to_space_op
diff --git a/tensorflow/contrib/quantize/python/fold_batch_norms_test.py b/tensorflow/contrib/quantize/python/fold_batch_norms_test.py
index bfa9d3bf70..7c907ffd92 100644
--- a/tensorflow/contrib/quantize/python/fold_batch_norms_test.py
+++ b/tensorflow/contrib/quantize/python/fold_batch_norms_test.py
@@ -438,6 +438,90 @@ class FoldBatchNormsTest(test_util.TensorFlowTestCase):
def testFoldDepthwiseConv2d(self):
self._RunTestOverParameters(self._TestFoldDepthwiseConv2d)
+ def _TestFoldAtrousConv2d(self, relu, relu_op_name, with_bypass, has_scaling,
+ fused_batch_norm, freeze_batch_norm_delay):
+ """Tests folding: inputs -> AtrousConv2d with batch norm -> Relu*.
+
+ Args:
+ relu: Callable that returns an Operation, a factory method for the Relu*.
+ relu_op_name: String, name of the Relu* operation.
+ with_bypass: Bool, when true there is an extra connection added from
+ inputs to just before Relu*.
+ has_scaling: Bool, when true the batch norm has scaling.
+ fused_batch_norm: Bool, when true the batch norm is fused.
+ freeze_batch_norm_delay: None or the number of steps after which training
+ switches to using frozen mean and variance
+ """
+ g = ops.Graph()
+ with g.as_default():
+ batch_size, height, width = 5, 128, 128
+ inputs = array_ops.zeros((batch_size, height, width, 3))
+ dilation_rate = 2
+ activation_fn = None if with_bypass else relu
+ scope = 'test/test2' if with_bypass else 'test'
+ node = separable_conv2d(
+ inputs,
+ None, [3, 3],
+ rate=dilation_rate,
+ depth_multiplier=1.0,
+ padding='SAME',
+ weights_initializer=self._WeightInit(0.09),
+ activation_fn=activation_fn,
+ normalizer_fn=batch_norm,
+ normalizer_params=self._BatchNormParams(
+ scale=has_scaling, fused=fused_batch_norm),
+ scope=scope)
+ if with_bypass:
+ node = math_ops.add(inputs, node, name='test/Add')
+ relu(node, name='test/' + relu_op_name)
+
+ fold_batch_norms.FoldBatchNorms(
+ g, is_training=True, freeze_batch_norm_delay=freeze_batch_norm_delay)
+
+ folded_mul = g.get_operation_by_name(scope + '/mul_fold')
+ self.assertEqual(folded_mul.type, 'Mul')
+ if fused_batch_norm:
+ scale_reshape_op_name = scope + '/BatchNorm_Fold/scale_reshape'
+ else:
+ scale_reshape_op_name = scope + '/scale_reshape'
+ self._AssertInputOpsAre(folded_mul,
+ [scope + '/correction_mult', scale_reshape_op_name])
+ self._AssertOutputGoesToOps(folded_mul, g, [scope + '/depthwise_Fold'])
+
+ scale_reshape = g.get_operation_by_name(scale_reshape_op_name)
+ self.assertEqual(scale_reshape.type, 'Reshape')
+ self._AssertInputOpsAre(scale_reshape, [
+ self._BatchNormMultiplierName(scope, has_scaling, fused_batch_norm),
+ scale_reshape_op_name + '/shape'
+ ])
+ self._AssertOutputGoesToOps(scale_reshape, g, [scope + '/mul_fold'])
+
+ folded_conv = g.get_operation_by_name(scope + '/depthwise_Fold')
+ self.assertEqual(folded_conv.type, 'DepthwiseConv2dNative')
+ self._AssertInputOpsAre(
+ folded_conv, [scope + '/mul_fold', scope + '/depthwise/SpaceToBatchND'])
+ if fused_batch_norm:
+ self._AssertOutputGoesToOps(folded_conv, g,
+ [scope + '/BatchToSpaceND_Fold'])
+ else:
+ self._AssertOutputGoesToOps(folded_conv, g,
+ [scope + '/depthwise/BatchToSpaceND_Fold'])
+
+ folded_add = g.get_operation_by_name(scope + '/add_fold')
+ self.assertEqual(folded_add.type, 'Add')
+ self._AssertInputOpsAre(folded_add, [
+ scope + '/correction_add',
+ self._BathNormBiasName(scope, fused_batch_norm)
+ ])
+ output_op_names = ['test/Add' if with_bypass else 'test/' + relu_op_name]
+ self._AssertOutputGoesToOps(folded_add, g, output_op_names)
+
+ for op in g.get_operations():
+ self.assertFalse('//' in op.name, 'Double slash in op %s' % op.name)
+
+ def testFoldAtrousConv2d(self):
+ self._RunTestOverParameters(self._TestFoldAtrousConv2d)
+
def _TestCompareFoldAndUnfolded(self, relu, relu_op_name, with_bypass,
has_scaling, fused_batch_norm,
freeze_batch_norm_delay):
diff --git a/tensorflow/contrib/quantize/python/quantize.py b/tensorflow/contrib/quantize/python/quantize.py
index cbba72643f..4fc315d901 100644
--- a/tensorflow/contrib/quantize/python/quantize.py
+++ b/tensorflow/contrib/quantize/python/quantize.py
@@ -194,6 +194,8 @@ def _FindLayersToQuantize(graph):
/
conv|fc
|
+ [batch_to_space_nd]
+ |
[post_conv_correction]
|
biasadd|folded_bias
@@ -247,9 +249,21 @@ def _FindLayersToQuantize(graph):
],
ordered_inputs=False)
+ # For atrous convolutions a BatchToSpaceND will occur after the depthwise
+ # convolution.
+ batch_to_space_pattern = graph_matcher.OpTypePattern(
+ 'BatchToSpaceND',
+ inputs=[
+ layer_pattern,
+ graph_matcher.OpTypePattern('*'),
+ graph_matcher.OpTypePattern('*')
+ ])
+
+ layer_output_pattern = graph_matcher.OneofPattern(
+ [batch_to_space_pattern, layer_pattern])
folded_bias_mul_pattern = graph_matcher.OpTypePattern(
'Mul',
- inputs=[graph_matcher.OpTypePattern('*'), layer_pattern],
+ inputs=[graph_matcher.OpTypePattern('*'), layer_output_pattern],
ordered_inputs=False)
post_layer_op_correction_pattern = graph_matcher.OpTypePattern(
'Add',
@@ -264,28 +278,37 @@ def _FindLayersToQuantize(graph):
],
ordered_inputs=False)
+ # batch_norms with forced updates have an Identity operation at the end.
+ # TODO(suharshs): Find a way to easily skip extra Identity operations. The
+ # current issue is that doing so can often match patterns across many layers
+ # incorrectly.
+ batch_norm_identity = graph_matcher.OpTypePattern(
+ 'Identity', inputs=[folded_bias_add_pattern])
+
bias_add_pattern = graph_matcher.OpTypePattern(
- 'Add|BiasAdd', inputs=[layer_pattern, '*'], ordered_inputs=False)
+ 'Add|BiasAdd', inputs=[layer_output_pattern, '*'], ordered_inputs=False)
# The bias can come from the bias add or the folded bias add.
bypass_pattern = graph_matcher.OpTypePattern(
'Add',
inputs=[
graph_matcher.OneofPattern(
- [bias_add_pattern, folded_bias_add_pattern]), '*'
+ [bias_add_pattern, folded_bias_add_pattern, batch_norm_identity]),
+ '*'
],
ordered_inputs=False)
# The input to the activation can come from bias add, fold bias add, the
# bypasses.
# TODO(suharshs): We should ideally skip Identity operations instead of
- # treating them as an activation.
+ # treating them as activations.
activation_pattern = graph_matcher.OpTypePattern(
'|'.join(_ACTIVATION_TYPES) + '|Identity',
inputs=[
graph_matcher.OneofPattern([
bias_add_pattern,
folded_bias_add_pattern,
+ batch_norm_identity,
bypass_pattern,
])
])
@@ -373,14 +396,6 @@ def _FindLayersToQuantize(graph):
return layer_matches
-def _HasPostActivationBypass(activation_op):
- for activation_tensor in activation_op.outputs:
- for output_op in activation_tensor.consumers():
- if output_op.type == 'Add':
- return True
- return False
-
-
class _LayerMatch(object):
"""Contains all information related to a matched Layer."""
diff --git a/tensorflow/contrib/quantize/python/quantize_graph.py b/tensorflow/contrib/quantize/python/quantize_graph.py
index 11d052d7f4..2944f964c7 100644
--- a/tensorflow/contrib/quantize/python/quantize_graph.py
+++ b/tensorflow/contrib/quantize/python/quantize_graph.py
@@ -191,6 +191,7 @@ def experimental_create_training_graph(input_graph=None,
def experimental_create_eval_graph(input_graph=None,
weight_bits=8,
activation_bits=8,
+ quant_delay=None,
scope=None):
"""Rewrites an eval input_graph in place for simulated quantization.
@@ -209,6 +210,8 @@ def experimental_create_eval_graph(input_graph=None,
default graph.
weight_bits: Number of bits to use for quantizing weights.
activation_bits: Number of bits to use for quantizing activations.
+ quant_delay: Number of steps after which weights and activations are
+ quantized during eval.
scope: The scope to be transformed. If it's not None, only the ops which
are in this scope will be transformed.
@@ -221,4 +224,5 @@ def experimental_create_eval_graph(input_graph=None,
is_training=False,
weight_bits=weight_bits,
activation_bits=activation_bits,
+ quant_delay=quant_delay,
scope=scope)
diff --git a/tensorflow/contrib/quantize/python/quantize_parameterized_test.py b/tensorflow/contrib/quantize/python/quantize_parameterized_test.py
index db745aa562..31a2955ddb 100644
--- a/tensorflow/contrib/quantize/python/quantize_parameterized_test.py
+++ b/tensorflow/contrib/quantize/python/quantize_parameterized_test.py
@@ -276,6 +276,52 @@ class QuantizeTest(test_util.TensorFlowTestCase):
graph, scope, 'DepthwiseConv2dNative', activation_op_name, with_bypass,
delay, use_resource)
+ def testQuantize_AtrousConvWithoutBatchNorm(self):
+ self._RunWithoutBatchNormTestOverParameters(
+ self._TestQuantize_AtrousConvWithoutBatchNorm)
+
+ def _TestQuantize_AtrousConvWithoutBatchNorm(
+ self, activation, activation_op_name, with_bypass, delay, use_resource):
+ """Tests quantization: inputs -> atrous conv no batch norm -> Activation.
+
+ Args:
+ activation: Callable that returns an Operation, a factory method for the
+ Activation.
+ activation_op_name: String, name of the Activation operation.
+ with_bypass: Bool, when true there is an extra connection added from
+ inputs to just before Activation.
+ delay: Int (optional), delay in number of steps until quantization starts.
+ use_resource: Bool, when true uses resource variables.
+ """
+ graph = ops.Graph()
+ with graph.as_default():
+ variable_scope.get_variable_scope().set_use_resource(use_resource)
+ batch_size, height, width, depth = 5, 128, 128, 3
+ inputs = array_ops.zeros((batch_size, height, width, depth))
+ dilation_rate = 2
+ activation_fn = None if with_bypass else activation
+ scope = 'test/test2' if with_bypass else 'test'
+ node = separable_conv2d(
+ inputs,
+ None, [3, 3],
+ rate=dilation_rate,
+ depth_multiplier=1.0,
+ padding='SAME',
+ weights_initializer=self._WeightInit(0.09),
+ activation_fn=activation_fn,
+ scope=scope)
+ if with_bypass:
+ node = math_ops.add(inputs, node, name='test/Add')
+ node = activation(node, name='test/' + activation_op_name)
+ update_barrier = control_flow_ops.no_op(name='update_barrier')
+ with ops.control_dependencies([update_barrier]):
+ array_ops.identity(node, name='control_dependency')
+ quantize.Quantize(graph, True, quant_delay=delay)
+
+ self._AssertCorrectQuantizedGraphWithoutBatchNorm(
+ graph, scope, 'DepthwiseConv2dNative', activation_op_name, with_bypass,
+ delay, use_resource)
+
def _RunBatchNormTestOverParameters(self, test_fn):
# TODO(suharshs): Use parameterized test once OSS TF supports it.
parameters_list = [
@@ -543,6 +589,61 @@ class QuantizeTest(test_util.TensorFlowTestCase):
graph, scope, 'DepthwiseConv2dNative', activation_op_name,
with_bypass, delay, use_resource)
+ def testQuantize_AtrousConvWithBatchNorm(self):
+ self._RunBatchNormTestOverParameters(
+ self._TestQuantize_AtrousConvWithBatchNorm)
+
+ def _TestQuantize_AtrousConvWithBatchNorm(
+ self, activation, activation_op_name, with_bypass, delay,
+ fused_batch_norm, use_resource):
+ """Tests quantization: inputs -> atrous conv with batch norm -> Activation.
+
+ Args:
+ activation: Callable that returns an Operation, a factory method for the
+ Activation.
+ activation_op_name: String, name of the Activation operation.
+ with_bypass: Bool, when true there is an extra connection added from
+ inputs to just before Activation.
+ delay: Int (optional), delay in number of steps until quantization starts.
+ fused_batch_norm: Bool, when true use FusedBatchNorm.
+ use_resource: Bool, when true uses resource variables.
+ """
+ graph = ops.Graph()
+ with graph.as_default():
+ variable_scope.get_variable_scope().set_use_resource(use_resource)
+ batch_size, height, width, depth = 5, 128, 128, 3
+ inputs = array_ops.zeros((batch_size, height, width, depth))
+ dilation_rate = 2
+ scope = 'test/test2' if with_bypass else 'test'
+ node = separable_conv2d(
+ inputs,
+ None, [3, 3],
+ rate=dilation_rate,
+ depth_multiplier=1.0,
+ padding='SAME',
+ weights_initializer=self._WeightInit(0.09),
+ activation_fn=None,
+ normalizer_fn=batch_norm,
+ normalizer_params=self._BatchNormParams(fused_batch_norm),
+ scope=scope)
+
+ # Manually add a bypass (optional) and an activation.
+ if with_bypass:
+ node = math_ops.add(inputs, node, name='test/Add')
+
+ node = activation(node, name='test/' + activation_op_name)
+
+ update_barrier = control_flow_ops.no_op(name='update_barrier')
+ with ops.control_dependencies([update_barrier]):
+ array_ops.identity(node, name='control_dependency')
+
+ fold_batch_norms.FoldBatchNorms(graph, is_training=True)
+ quantize.Quantize(graph, True, quant_delay=delay)
+
+ self._AssertCorrectQuantizedGraphWithBatchNorm(
+ graph, scope, 'DepthwiseConv2dNative', activation_op_name,
+ with_bypass, delay, use_resource)
+
def _AssertIdempotent(self, graph):
# Ensure that calling the rewrite again doesn't change the graph.
graph_def_before = str(graph.as_graph_def())
@@ -553,8 +654,80 @@ class QuantizeTest(test_util.TensorFlowTestCase):
graph_def_after = str(graph.as_graph_def())
self.assertEqual(graph_def_before, graph_def_after)
- def _BatchNormParams(self, fused=False):
- return {'center': True, 'scale': True, 'decay': 1.0 - 0.003, 'fused': fused}
+ def testBatchNormForcedUpdates(self):
+ parameter_list = [
+ # (activation, activation_op_name, fused_batch_norm)
+ (nn_ops.relu6, 'Relu6', False),
+ (nn_ops.relu, 'Relu', False),
+ (array_ops.identity, 'Identity', False),
+ (nn_ops.relu6, 'Relu6', True),
+ (nn_ops.relu, 'Relu', True),
+ (array_ops.identity, 'Identity', True),
+ ]
+ for params in parameter_list:
+ self._TestBatchNormForcedUpdates(params[0], params[1], params[2], False)
+ self._TestBatchNormForcedUpdates(params[0], params[1], params[2], True)
+
+ def _TestBatchNormForcedUpdates(self, activation, activation_op_name,
+ fused_batch_norm, use_resource):
+ """post_activation bypass quantization should happen with forced updates."""
+ graph = ops.Graph()
+ with graph.as_default():
+ variable_scope.get_variable_scope().set_use_resource(use_resource)
+ batch_size, height, width, depth = 5, 128, 128, 3
+ input1 = array_ops.zeros((batch_size, height, width, depth))
+ input2 = array_ops.zeros((batch_size, height / 2, width / 2, 32))
+ # Setting updates_collections to None forces updates adding an extra
+ # identity operation following batch norms.
+ bn_params = self._BatchNormParams(
+ fused=fused_batch_norm, force_updates=True)
+ conv = conv2d(
+ input1,
+ 32, [5, 5],
+ stride=2,
+ padding='SAME',
+ weights_initializer=self._WeightInit(0.09),
+ activation_fn=activation,
+ normalizer_fn=batch_norm,
+ normalizer_params=bn_params,
+ scope='test/test')
+ bypass_tensor = math_ops.add(conv, input2, name='test/add')
+ # The output of the post_activation bypass will be another layer.
+ _ = conv2d(
+ bypass_tensor,
+ 32, [5, 5],
+ stride=2,
+ padding='SAME',
+ weights_initializer=self._WeightInit(0.09),
+ normalizer_fn=batch_norm,
+ normalizer_params=bn_params,
+ activation_fn=activation,
+ scope='test/unused')
+
+ fold_batch_norms.FoldBatchNorms(graph, is_training=True)
+ quantize.Quantize(graph, is_training=True)
+
+ # Ensure that the bypass node is preceded by and followed by a
+ # FakeQuantWithMinMaxVar operation, since the output of the Add isn't an
+ # activation.
+ self.assertTrue('FakeQuantWithMinMaxVars' in
+ [c.type for c in bypass_tensor.consumers()])
+ self.assertTrue('FakeQuantWithMinMaxVars' in
+ [i.op.type for i in bypass_tensor.op.inputs])
+
+ with open('/tmp/bn_quant_test.pbtxt', 'w') as f:
+ f.write(str(graph.as_graph_def()))
+
+ def _BatchNormParams(self, fused=False, force_updates=False):
+ params = {
+ 'center': True,
+ 'scale': True,
+ 'decay': 1.0 - 0.003,
+ 'fused': fused
+ }
+ if force_updates:
+ params['updates_collections'] = None
+ return params
def _WeightInit(self, stddev):
"""Returns truncated normal variable initializer.
diff --git a/tensorflow/contrib/rnn/BUILD b/tensorflow/contrib/rnn/BUILD
index 4eb5c920b3..2a84629080 100644
--- a/tensorflow/contrib/rnn/BUILD
+++ b/tensorflow/contrib/rnn/BUILD
@@ -118,7 +118,6 @@ cuda_py_tests(
"//tensorflow/python:framework_for_generated_wrappers",
"//tensorflow/python:init_ops",
"//tensorflow/python:math_ops",
- "//tensorflow/python:random_ops",
"//tensorflow/python:rnn",
"//tensorflow/python:rnn_cell",
"//tensorflow/python:variable_scope",
diff --git a/tensorflow/contrib/rnn/__init__.py b/tensorflow/contrib/rnn/__init__.py
index 67f31785b5..cb437f2a2f 100644
--- a/tensorflow/contrib/rnn/__init__.py
+++ b/tensorflow/contrib/rnn/__init__.py
@@ -58,6 +58,10 @@ See @{$python/contrib.rnn} guide.
@@Conv3DLSTMCell
@@HighwayWrapper
@@GLSTMCell
+@@SRUCell
+@@IndRNNCell
+@@IndyGRUCell
+@@IndyLSTMCell
<!--RNNCell wrappers-->
@@AttentionCellWrapper
diff --git a/tensorflow/contrib/rnn/python/kernel_tests/core_rnn_cell_test.py b/tensorflow/contrib/rnn/python/kernel_tests/core_rnn_cell_test.py
index 86f1e27abd..85f0f8ced9 100644
--- a/tensorflow/contrib/rnn/python/kernel_tests/core_rnn_cell_test.py
+++ b/tensorflow/contrib/rnn/python/kernel_tests/core_rnn_cell_test.py
@@ -18,7 +18,6 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
-import functools
import os
import numpy as np
@@ -35,7 +34,6 @@ from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
-from tensorflow.python.ops import random_ops
from tensorflow.python.ops import rnn
from tensorflow.python.ops import rnn_cell_impl
from tensorflow.python.ops import variable_scope
@@ -117,6 +115,27 @@ class RNNCellTest(test.TestCase):
})
self.assertEqual(res[0].shape, (1, 2))
+ def testIndRNNCell(self):
+ with self.test_session() as sess:
+ with variable_scope.variable_scope(
+ "root", initializer=init_ops.constant_initializer(0.5)):
+ x = array_ops.zeros([1, 2])
+ m = array_ops.zeros([1, 2])
+ cell = contrib_rnn_cell.IndRNNCell(2)
+ g, _ = cell(x, m)
+ self.assertEqual([
+ "root/ind_rnn_cell/%s_w:0" % rnn_cell_impl._WEIGHTS_VARIABLE_NAME,
+ "root/ind_rnn_cell/%s_u:0" % rnn_cell_impl._WEIGHTS_VARIABLE_NAME,
+ "root/ind_rnn_cell/%s:0" % rnn_cell_impl._BIAS_VARIABLE_NAME
+ ], [v.name for v in cell.trainable_variables])
+ self.assertFalse(cell.non_trainable_variables)
+ sess.run([variables_lib.global_variables_initializer()])
+ res = sess.run([g], {
+ x.name: np.array([[1., 1.]]),
+ m.name: np.array([[0.1, 0.1]])
+ })
+ self.assertEqual(res[0].shape, (1, 2))
+
def testGRUCell(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
@@ -145,6 +164,34 @@ class RNNCellTest(test.TestCase):
# Smoke test
self.assertAllClose(res[0], [[0.156736, 0.156736]])
+ def testIndyGRUCell(self):
+ with self.test_session() as sess:
+ with variable_scope.variable_scope(
+ "root", initializer=init_ops.constant_initializer(0.5)):
+ x = array_ops.zeros([1, 2])
+ m = array_ops.zeros([1, 2])
+ g, _ = contrib_rnn_cell.IndyGRUCell(2)(x, m)
+ sess.run([variables_lib.global_variables_initializer()])
+ res = sess.run([g], {
+ x.name: np.array([[1., 1.]]),
+ m.name: np.array([[0.1, 0.1]])
+ })
+ # Smoke test
+ self.assertAllClose(res[0], [[0.185265, 0.17704]])
+ with variable_scope.variable_scope(
+ "other", initializer=init_ops.constant_initializer(0.5)):
+ # Test IndyGRUCell with input_size != num_units.
+ x = array_ops.zeros([1, 3])
+ m = array_ops.zeros([1, 2])
+ g, _ = contrib_rnn_cell.IndyGRUCell(2)(x, m)
+ sess.run([variables_lib.global_variables_initializer()])
+ res = sess.run([g], {
+ x.name: np.array([[1., 1., 1.]]),
+ m.name: np.array([[0.1, 0.1]])
+ })
+ # Smoke test
+ self.assertAllClose(res[0], [[0.155127, 0.157328]])
+
def testSRUCell(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
@@ -345,6 +392,72 @@ class RNNCellTest(test.TestCase):
self.assertAllClose(res[1], expected_mem0)
self.assertAllClose(res[2], expected_mem1)
+ def testIndyLSTMCell(self):
+ for dtype in [dtypes.float16, dtypes.float32]:
+ np_dtype = dtype.as_numpy_dtype
+ with self.test_session(graph=ops.Graph()) as sess:
+ with variable_scope.variable_scope(
+ "root", initializer=init_ops.constant_initializer(0.5)):
+ x = array_ops.zeros([1, 2], dtype=dtype)
+ state_0 = (array_ops.zeros([1, 2], dtype=dtype),) * 2
+ state_1 = (array_ops.zeros([1, 2], dtype=dtype),) * 2
+ cell = rnn_cell_impl.MultiRNNCell(
+ [contrib_rnn_cell.IndyLSTMCell(2) for _ in range(2)])
+ self.assertEqual(cell.dtype, None)
+ self.assertEqual("cell-0", cell._checkpoint_dependencies[0].name)
+ self.assertEqual("cell-1", cell._checkpoint_dependencies[1].name)
+ cell.get_config() # Should not throw an error
+ g, (out_state_0, out_state_1) = cell(x, (state_0, state_1))
+ # Layer infers the input type.
+ self.assertEqual(cell.dtype, dtype.name)
+ expected_variable_names = [
+ "root/multi_rnn_cell/cell_0/indy_lstm_cell/%s_w:0" %
+ rnn_cell_impl._WEIGHTS_VARIABLE_NAME,
+ "root/multi_rnn_cell/cell_0/indy_lstm_cell/%s_u:0" %
+ rnn_cell_impl._WEIGHTS_VARIABLE_NAME,
+ "root/multi_rnn_cell/cell_0/indy_lstm_cell/%s:0" %
+ rnn_cell_impl._BIAS_VARIABLE_NAME,
+ "root/multi_rnn_cell/cell_1/indy_lstm_cell/%s_w:0" %
+ rnn_cell_impl._WEIGHTS_VARIABLE_NAME,
+ "root/multi_rnn_cell/cell_1/indy_lstm_cell/%s_u:0" %
+ rnn_cell_impl._WEIGHTS_VARIABLE_NAME,
+ "root/multi_rnn_cell/cell_1/indy_lstm_cell/%s:0" %
+ rnn_cell_impl._BIAS_VARIABLE_NAME
+ ]
+ self.assertEqual(expected_variable_names,
+ [v.name for v in cell.trainable_variables])
+ self.assertFalse(cell.non_trainable_variables)
+ sess.run([variables_lib.global_variables_initializer()])
+ res = sess.run(
+ [g, out_state_0, out_state_1], {
+ x.name: np.array([[1., 1.]]),
+ state_0[0].name: 0.1 * np.ones([1, 2]),
+ state_0[1].name: 0.1 * np.ones([1, 2]),
+ state_1[0].name: 0.1 * np.ones([1, 2]),
+ state_1[1].name: 0.1 * np.ones([1, 2]),
+ })
+ self.assertEqual(len(res), 3)
+ variables = variables_lib.global_variables()
+ self.assertEqual(expected_variable_names, [v.name for v in variables])
+ # Only check the range of outputs as this is just a smoke test.
+ self.assertAllInRange(res[0], -1.0, 1.0)
+ self.assertAllInRange(res[1], -1.0, 1.0)
+ self.assertAllInRange(res[2], -1.0, 1.0)
+ with variable_scope.variable_scope(
+ "other", initializer=init_ops.constant_initializer(0.5)):
+ # Test IndyLSTMCell with input_size != num_units.
+ x = array_ops.zeros([1, 3], dtype=dtype)
+ state = (array_ops.zeros([1, 2], dtype=dtype),) * 2
+ g, out_state = contrib_rnn_cell.IndyLSTMCell(2)(x, state)
+ sess.run([variables_lib.global_variables_initializer()])
+ res = sess.run(
+ [g, out_state], {
+ x.name: np.array([[1., 1., 1.]], dtype=np_dtype),
+ state[0].name: 0.1 * np.ones([1, 2], dtype=np_dtype),
+ state[1].name: 0.1 * np.ones([1, 2], dtype=np_dtype),
+ })
+ self.assertEqual(len(res), 2)
+
def testLSTMCell(self):
with self.test_session() as sess:
num_units = 8
@@ -935,50 +1048,6 @@ class DropoutWrapperTest(test.TestCase):
self.assertAllClose(res0[1].h, res1[1].h)
-class SlimRNNCellTest(test.TestCase):
-
- def testBasicRNNCell(self):
- with self.test_session() as sess:
- with variable_scope.variable_scope(
- "root", initializer=init_ops.constant_initializer(0.5)):
- x = array_ops.zeros([1, 2])
- m = array_ops.zeros([1, 2])
- my_cell = functools.partial(basic_rnn_cell, num_units=2)
- # pylint: disable=protected-access
- g, _ = rnn_cell_impl._SlimRNNCell(my_cell)(x, m)
- # pylint: enable=protected-access
- sess.run([variables_lib.global_variables_initializer()])
- res = sess.run([g], {
- x.name: np.array([[1., 1.]]),
- m.name: np.array([[0.1, 0.1]])
- })
- self.assertEqual(res[0].shape, (1, 2))
-
- def testBasicRNNCellMatch(self):
- batch_size = 32
- input_size = 100
- num_units = 10
- with self.test_session() as sess:
- with variable_scope.variable_scope(
- "root", initializer=init_ops.constant_initializer(0.5)):
- inputs = random_ops.random_uniform((batch_size, input_size))
- _, initial_state = basic_rnn_cell(inputs, None, num_units)
- rnn_cell = rnn_cell_impl.BasicRNNCell(num_units)
- outputs, state = rnn_cell(inputs, initial_state)
- variable_scope.get_variable_scope().reuse_variables()
- my_cell = functools.partial(basic_rnn_cell, num_units=num_units)
- # pylint: disable=protected-access
- slim_cell = rnn_cell_impl._SlimRNNCell(my_cell)
- # pylint: enable=protected-access
- slim_outputs, slim_state = slim_cell(inputs, initial_state)
- self.assertEqual(slim_outputs.get_shape(), outputs.get_shape())
- self.assertEqual(slim_state.get_shape(), state.get_shape())
- sess.run([variables_lib.global_variables_initializer()])
- res = sess.run([slim_outputs, slim_state, outputs, state])
- self.assertAllClose(res[0], res[2])
- self.assertAllClose(res[1], res[3])
-
-
def basic_rnn_cell(inputs, state, num_units, scope=None):
if state is None:
if inputs is not None:
diff --git a/tensorflow/contrib/rnn/python/ops/rnn_cell.py b/tensorflow/contrib/rnn/python/ops/rnn_cell.py
index b12e2cd5ed..1816b469ee 100644
--- a/tensorflow/contrib/rnn/python/ops/rnn_cell.py
+++ b/tensorflow/contrib/rnn/python/ops/rnn_cell.py
@@ -23,6 +23,7 @@ import math
from tensorflow.contrib.compiler import jit
from tensorflow.contrib.layers.python.layers import layers
from tensorflow.contrib.rnn.python.ops import core_rnn_cell
+from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import op_def_registry
from tensorflow.python.framework import ops
@@ -30,6 +31,7 @@ from tensorflow.python.framework import tensor_shape
from tensorflow.python.layers import base as base_layer
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import clip_ops
+from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_impl # pylint: disable=unused-import
@@ -3050,3 +3052,343 @@ class WeightNormLSTMCell(rnn_cell_impl.RNNCell):
new_state = rnn_cell_impl.LSTMStateTuple(new_c, new_h)
return new_h, new_state
+
+
+class IndRNNCell(rnn_cell_impl.LayerRNNCell):
+ """Independently Recurrent Neural Network (IndRNN) cell
+ (cf. https://arxiv.org/abs/1803.04831).
+
+ Args:
+ num_units: int, The number of units in the RNN cell.
+ activation: Nonlinearity to use. Default: `tanh`.
+ reuse: (optional) Python boolean describing whether to reuse variables
+ in an existing scope. If not `True`, and the existing scope already has
+ the given variables, an error is raised.
+ name: String, the name of the layer. Layers with the same name will
+ share weights, but to avoid mistakes we require reuse=True in such
+ cases.
+ dtype: Default dtype of the layer (default of `None` means use the type
+ of the first input). Required when `build` is called before `call`.
+ """
+
+ def __init__(self,
+ num_units,
+ activation=None,
+ reuse=None,
+ name=None,
+ dtype=None):
+ super(IndRNNCell, self).__init__(_reuse=reuse, name=name, dtype=dtype)
+
+ # Inputs must be 2-dimensional.
+ self.input_spec = base_layer.InputSpec(ndim=2)
+
+ self._num_units = num_units
+ self._activation = activation or math_ops.tanh
+
+ @property
+ def state_size(self):
+ return self._num_units
+
+ @property
+ def output_size(self):
+ return self._num_units
+
+ def build(self, inputs_shape):
+ if inputs_shape[1].value is None:
+ raise ValueError(
+ "Expected inputs.shape[-1] to be known, saw shape: %s" % inputs_shape)
+
+ input_depth = inputs_shape[1].value
+ # pylint: disable=protected-access
+ self._kernel_w = self.add_variable(
+ "%s_w" % rnn_cell_impl._WEIGHTS_VARIABLE_NAME,
+ shape=[input_depth, self._num_units])
+ self._kernel_u = self.add_variable(
+ "%s_u" % rnn_cell_impl._WEIGHTS_VARIABLE_NAME,
+ shape=[1, self._num_units],
+ initializer=init_ops.random_uniform_initializer(
+ minval=-1, maxval=1, dtype=self.dtype))
+ self._bias = self.add_variable(
+ rnn_cell_impl._BIAS_VARIABLE_NAME,
+ shape=[self._num_units],
+ initializer=init_ops.zeros_initializer(dtype=self.dtype))
+ # pylint: enable=protected-access
+
+ self.built = True
+
+ def call(self, inputs, state):
+ """IndRNN: output = new_state = act(W * input + u * state + B)."""
+
+ gate_inputs = math_ops.matmul(inputs, self._kernel_w) + (
+ state * self._kernel_u)
+ gate_inputs = nn_ops.bias_add(gate_inputs, self._bias)
+ output = self._activation(gate_inputs)
+ return output, output
+
+
+class IndyGRUCell(rnn_cell_impl.LayerRNNCell):
+ r"""Independently Gated Recurrent Unit cell.
+
+ Based on IndRNNs (https://arxiv.org/abs/1803.04831) and similar to GRUCell,
+ yet with the \(U_r\), \(U_z\), and \(U\) matrices in equations 5, 6, and
+ 8 of http://arxiv.org/abs/1406.1078 respectively replaced by diagonal
+ matrices, i.e. a Hadamard product with a single vector:
+
+ $$r_j = \sigma\left([\mathbf W_r\mathbf x]_j +
+ [\mathbf u_r\circ \mathbf h_{(t-1)}]_j\right)$$
+ $$z_j = \sigma\left([\mathbf W_z\mathbf x]_j +
+ [\mathbf u_z\circ \mathbf h_{(t-1)}]_j\right)$$
+ $$\tilde{h}^{(t)}_j = \phi\left([\mathbf W \mathbf x]_j +
+ [\mathbf u \circ \mathbf r \circ \mathbf h_{(t-1)}]_j\right)$$
+
+ where \(\circ\) denotes the Hadamard operator. This means that each IndyGRU
+ node sees only its own state, as opposed to seeing all states in the same
+ layer.
+
+ TODO(gonnet): Write a paper describing this and add a reference here.
+
+ Args:
+ num_units: int, The number of units in the GRU cell.
+ activation: Nonlinearity to use. Default: `tanh`.
+ reuse: (optional) Python boolean describing whether to reuse variables
+ in an existing scope. If not `True`, and the existing scope already has
+ the given variables, an error is raised.
+ kernel_initializer: (optional) The initializer to use for the weight
+ matrices applied to the input.
+ bias_initializer: (optional) The initializer to use for the bias.
+ name: String, the name of the layer. Layers with the same name will
+ share weights, but to avoid mistakes we require reuse=True in such
+ cases.
+ dtype: Default dtype of the layer (default of `None` means use the type
+ of the first input). Required when `build` is called before `call`.
+ """
+
+ def __init__(self,
+ num_units,
+ activation=None,
+ reuse=None,
+ kernel_initializer=None,
+ bias_initializer=None,
+ name=None,
+ dtype=None):
+ super(IndyGRUCell, self).__init__(_reuse=reuse, name=name, dtype=dtype)
+
+ # Inputs must be 2-dimensional.
+ self.input_spec = base_layer.InputSpec(ndim=2)
+
+ self._num_units = num_units
+ self._activation = activation or math_ops.tanh
+ self._kernel_initializer = kernel_initializer
+ self._bias_initializer = bias_initializer
+
+ @property
+ def state_size(self):
+ return self._num_units
+
+ @property
+ def output_size(self):
+ return self._num_units
+
+ def build(self, inputs_shape):
+ if inputs_shape[1].value is None:
+ raise ValueError(
+ "Expected inputs.shape[-1] to be known, saw shape: %s" % inputs_shape)
+
+ input_depth = inputs_shape[1].value
+ # pylint: disable=protected-access
+ self._gate_kernel_w = self.add_variable(
+ "gates/%s_w" % rnn_cell_impl._WEIGHTS_VARIABLE_NAME,
+ shape=[input_depth, 2 * self._num_units],
+ initializer=self._kernel_initializer)
+ self._gate_kernel_u = self.add_variable(
+ "gates/%s_u" % rnn_cell_impl._WEIGHTS_VARIABLE_NAME,
+ shape=[1, 2 * self._num_units],
+ initializer=init_ops.random_uniform_initializer(
+ minval=-1, maxval=1, dtype=self.dtype))
+ self._gate_bias = self.add_variable(
+ "gates/%s" % rnn_cell_impl._BIAS_VARIABLE_NAME,
+ shape=[2 * self._num_units],
+ initializer=(self._bias_initializer
+ if self._bias_initializer is not None else
+ init_ops.constant_initializer(1.0, dtype=self.dtype)))
+ self._candidate_kernel_w = self.add_variable(
+ "candidate/%s" % rnn_cell_impl._WEIGHTS_VARIABLE_NAME,
+ shape=[input_depth, self._num_units],
+ initializer=self._kernel_initializer)
+ self._candidate_kernel_u = self.add_variable(
+ "candidate/%s_u" % rnn_cell_impl._WEIGHTS_VARIABLE_NAME,
+ shape=[1, self._num_units],
+ initializer=init_ops.random_uniform_initializer(
+ minval=-1, maxval=1, dtype=self.dtype))
+ self._candidate_bias = self.add_variable(
+ "candidate/%s" % rnn_cell_impl._BIAS_VARIABLE_NAME,
+ shape=[self._num_units],
+ initializer=(self._bias_initializer
+ if self._bias_initializer is not None else
+ init_ops.zeros_initializer(dtype=self.dtype)))
+ # pylint: enable=protected-access
+
+ self.built = True
+
+ def call(self, inputs, state):
+ """Gated recurrent unit (GRU) with nunits cells."""
+
+ gate_inputs = math_ops.matmul(inputs, self._gate_kernel_w) + (
+ gen_array_ops.tile(state, [1, 2]) * self._gate_kernel_u)
+ gate_inputs = nn_ops.bias_add(gate_inputs, self._gate_bias)
+
+ value = math_ops.sigmoid(gate_inputs)
+ r, u = array_ops.split(value=value, num_or_size_splits=2, axis=1)
+
+ r_state = r * state
+
+ candidate = math_ops.matmul(inputs, self._candidate_kernel_w) + (
+ r_state * self._candidate_kernel_u)
+ candidate = nn_ops.bias_add(candidate, self._candidate_bias)
+
+ c = self._activation(candidate)
+ new_h = u * state + (1 - u) * c
+ return new_h, new_h
+
+
+class IndyLSTMCell(rnn_cell_impl.LayerRNNCell):
+ r"""Basic IndyLSTM recurrent network cell.
+
+ Based on IndRNNs (https://arxiv.org/abs/1803.04831) and similar to
+ BasicLSTMCell, yet with the \(U_f\), \(U_i\), \(U_o\) and \(U_c\)
+ matrices in
+ https://en.wikipedia.org/wiki/Long_short-term_memory#LSTM_with_a_forget_gate
+ replaced by diagonal matrices, i.e. a Hadamard product with a single vector:
+
+ $$f_t = \sigma_g\left(W_f x_t + u_f \circ h_{t-1} + b_f\right)$$
+ $$i_t = \sigma_g\left(W_i x_t + u_i \circ h_{t-1} + b_i\right)$$
+ $$o_t = \sigma_g\left(W_o x_t + u_o \circ h_{t-1} + b_o\right)$$
+ $$c_t = f_t \circ c_{t-1} +
+ i_t \circ \sigma_c\left(W_c x_t + u_c \circ h_{t-1} + b_c\right)$$
+
+ where \(\circ\) denotes the Hadamard operator. This means that each IndyLSTM
+ node sees only its own state \(h\) and \(c\), as opposed to seeing all
+ states in the same layer.
+
+ We add forget_bias (default: 1) to the biases of the forget gate in order to
+ reduce the scale of forgetting in the beginning of the training.
+
+ It does not allow cell clipping, a projection layer, and does not
+ use peep-hole connections: it is the basic baseline.
+
+ For advanced models, please use the full @{tf.nn.rnn_cell.LSTMCell}
+ that follows.
+
+ TODO(gonnet): Write a paper describing this and add a reference here.
+ """
+
+ def __init__(self,
+ num_units,
+ forget_bias=1.0,
+ activation=None,
+ reuse=None,
+ kernel_initializer=None,
+ bias_initializer=None,
+ name=None,
+ dtype=None):
+ """Initialize the IndyLSTM cell.
+
+ Args:
+ num_units: int, The number of units in the LSTM cell.
+ forget_bias: float, The bias added to forget gates (see above).
+ Must set to `0.0` manually when restoring from CudnnLSTM-trained
+ checkpoints.
+ activation: Activation function of the inner states. Default: `tanh`.
+ reuse: (optional) Python boolean describing whether to reuse variables
+ in an existing scope. If not `True`, and the existing scope already has
+ the given variables, an error is raised.
+ kernel_initializer: (optional) The initializer to use for the weight
+ matrix applied to the inputs.
+ bias_initializer: (optional) The initializer to use for the bias.
+ name: String, the name of the layer. Layers with the same name will
+ share weights, but to avoid mistakes we require reuse=True in such
+ cases.
+ dtype: Default dtype of the layer (default of `None` means use the type
+ of the first input). Required when `build` is called before `call`.
+ """
+ super(IndyLSTMCell, self).__init__(_reuse=reuse, name=name, dtype=dtype)
+
+ # Inputs must be 2-dimensional.
+ self.input_spec = base_layer.InputSpec(ndim=2)
+
+ self._num_units = num_units
+ self._forget_bias = forget_bias
+ self._activation = activation or math_ops.tanh
+ self._kernel_initializer = kernel_initializer
+ self._bias_initializer = bias_initializer
+
+ @property
+ def state_size(self):
+ return rnn_cell_impl.LSTMStateTuple(self._num_units, self._num_units)
+
+ @property
+ def output_size(self):
+ return self._num_units
+
+ def build(self, inputs_shape):
+ if inputs_shape[1].value is None:
+ raise ValueError(
+ "Expected inputs.shape[-1] to be known, saw shape: %s" % inputs_shape)
+
+ input_depth = inputs_shape[1].value
+ # pylint: disable=protected-access
+ self._kernel_w = self.add_variable(
+ "%s_w" % rnn_cell_impl._WEIGHTS_VARIABLE_NAME,
+ shape=[input_depth, 4 * self._num_units],
+ initializer=self._kernel_initializer)
+ self._kernel_u = self.add_variable(
+ "%s_u" % rnn_cell_impl._WEIGHTS_VARIABLE_NAME,
+ shape=[1, 4 * self._num_units],
+ initializer=init_ops.random_uniform_initializer(
+ minval=-1, maxval=1, dtype=self.dtype))
+ self._bias = self.add_variable(
+ rnn_cell_impl._BIAS_VARIABLE_NAME,
+ shape=[4 * self._num_units],
+ initializer=(self._bias_initializer
+ if self._bias_initializer is not None else
+ init_ops.zeros_initializer(dtype=self.dtype)))
+ # pylint: enable=protected-access
+
+ self.built = True
+
+ def call(self, inputs, state):
+ """Independent Long short-term memory cell (IndyLSTM).
+
+ Args:
+ inputs: `2-D` tensor with shape `[batch_size, input_size]`.
+ state: An `LSTMStateTuple` of state tensors, each shaped
+ `[batch_size, num_units]`.
+
+ Returns:
+ A pair containing the new hidden state, and the new state (a
+ `LSTMStateTuple`).
+ """
+ sigmoid = math_ops.sigmoid
+ one = constant_op.constant(1, dtype=dtypes.int32)
+ c, h = state
+
+ gate_inputs = math_ops.matmul(inputs, self._kernel_w)
+ gate_inputs += gen_array_ops.tile(h, [1, 4]) * self._kernel_u
+ gate_inputs = nn_ops.bias_add(gate_inputs, self._bias)
+
+ # i = input_gate, j = new_input, f = forget_gate, o = output_gate
+ i, j, f, o = array_ops.split(
+ value=gate_inputs, num_or_size_splits=4, axis=one)
+
+ forget_bias_tensor = constant_op.constant(self._forget_bias, dtype=f.dtype)
+ # Note that using `add` and `multiply` instead of `+` and `*` gives a
+ # performance improvement. So using those at the cost of readability.
+ add = math_ops.add
+ multiply = math_ops.multiply
+ new_c = add(
+ multiply(c, sigmoid(add(f, forget_bias_tensor))),
+ multiply(sigmoid(i), self._activation(j)))
+ new_h = multiply(self._activation(new_c), sigmoid(o))
+
+ new_state = rnn_cell_impl.LSTMStateTuple(new_c, new_h)
+ return new_h, new_state
diff --git a/tensorflow/contrib/rpc/python/kernel_tests/BUILD b/tensorflow/contrib/rpc/python/kernel_tests/BUILD
index 2311c15a68..cb0b89ae55 100644
--- a/tensorflow/contrib/rpc/python/kernel_tests/BUILD
+++ b/tensorflow/contrib/rpc/python/kernel_tests/BUILD
@@ -1,5 +1,3 @@
-# TODO(b/76425722): Port everything in here to OS (currently excluded).
-
package(default_visibility = ["//visibility:public"])
licenses(["notice"]) # Apache 2.0
@@ -17,7 +15,6 @@ tf_proto_library(
srcs = ["test_example.proto"],
has_services = 1,
cc_api_version = 2,
- protodeps = ["//tensorflow/core:protos_all"],
)
py_library(
diff --git a/tensorflow/contrib/rpc/python/kernel_tests/rpc_op_test_base.py b/tensorflow/contrib/rpc/python/kernel_tests/rpc_op_test_base.py
index 27273d16b1..1c23c28860 100644
--- a/tensorflow/contrib/rpc/python/kernel_tests/rpc_op_test_base.py
+++ b/tensorflow/contrib/rpc/python/kernel_tests/rpc_op_test_base.py
@@ -51,23 +51,23 @@ class RpcOpTestBase(object):
def testScalarHostPortRpc(self):
with self.test_session() as sess:
request_tensors = (
- test_example_pb2.TestCase(shape=[1, 2, 3]).SerializeToString())
+ test_example_pb2.TestCase(values=[1, 2, 3]).SerializeToString())
response_tensors = self.rpc(
- method=self.get_method_name('IncrementTestShapes'),
+ method=self.get_method_name('Increment'),
address=self._address,
request=request_tensors)
self.assertEqual(response_tensors.shape, ())
response_values = sess.run(response_tensors)
response_message = test_example_pb2.TestCase()
self.assertTrue(response_message.ParseFromString(response_values))
- self.assertAllEqual([2, 3, 4], response_message.shape)
+ self.assertAllEqual([2, 3, 4], response_message.values)
def testScalarHostPortTryRpc(self):
with self.test_session() as sess:
request_tensors = (
- test_example_pb2.TestCase(shape=[1, 2, 3]).SerializeToString())
+ test_example_pb2.TestCase(values=[1, 2, 3]).SerializeToString())
response_tensors, status_code, status_message = self.try_rpc(
- method=self.get_method_name('IncrementTestShapes'),
+ method=self.get_method_name('Increment'),
address=self._address,
request=request_tensors)
self.assertEqual(status_code.shape, ())
@@ -77,7 +77,7 @@ class RpcOpTestBase(object):
sess.run((response_tensors, status_code, status_message)))
response_message = test_example_pb2.TestCase()
self.assertTrue(response_message.ParseFromString(response_values))
- self.assertAllEqual([2, 3, 4], response_message.shape)
+ self.assertAllEqual([2, 3, 4], response_message.values)
# For the base Rpc op, don't expect to get error status back.
self.assertEqual(errors.OK, status_code_values)
self.assertEqual(b'', status_message_values)
@@ -86,7 +86,7 @@ class RpcOpTestBase(object):
with self.test_session() as sess:
request_tensors = []
response_tensors = self.rpc(
- method=self.get_method_name('IncrementTestShapes'),
+ method=self.get_method_name('Increment'),
address=self._address,
request=request_tensors)
self.assertAllEqual(response_tensors.shape, [0])
@@ -95,7 +95,7 @@ class RpcOpTestBase(object):
def testInvalidMethod(self):
for method in [
- '/InvalidService.IncrementTestShapes',
+ '/InvalidService.Increment',
self.get_method_name('InvalidMethodName')
]:
with self.test_session() as sess:
@@ -115,12 +115,12 @@ class RpcOpTestBase(object):
with self.assertRaises(errors.UnavailableError):
sess.run(
self.rpc(
- method=self.get_method_name('IncrementTestShapes'),
+ method=self.get_method_name('Increment'),
address=address,
request=''))
_, status_code_value, status_message_value = sess.run(
self.try_rpc(
- method=self.get_method_name('IncrementTestShapes'),
+ method=self.get_method_name('Increment'),
address=address,
request=''))
self.assertEqual(errors.UNAVAILABLE, status_code_value)
@@ -182,10 +182,10 @@ class RpcOpTestBase(object):
with self.test_session() as sess:
request_tensors = [
test_example_pb2.TestCase(
- shape=[i, i + 1, i + 2]).SerializeToString() for i in range(20)
+ values=[i, i + 1, i + 2]).SerializeToString() for i in range(20)
]
response_tensors = self.rpc(
- method=self.get_method_name('IncrementTestShapes'),
+ method=self.get_method_name('Increment'),
address=self._address,
request=request_tensors)
self.assertEqual(response_tensors.shape, (20,))
@@ -194,17 +194,17 @@ class RpcOpTestBase(object):
for i in range(20):
response_message = test_example_pb2.TestCase()
self.assertTrue(response_message.ParseFromString(response_values[i]))
- self.assertAllEqual([i + 1, i + 2, i + 3], response_message.shape)
+ self.assertAllEqual([i + 1, i + 2, i + 3], response_message.values)
def testVecHostPortManyParallelRpcs(self):
with self.test_session() as sess:
request_tensors = [
test_example_pb2.TestCase(
- shape=[i, i + 1, i + 2]).SerializeToString() for i in range(20)
+ values=[i, i + 1, i + 2]).SerializeToString() for i in range(20)
]
many_response_tensors = [
self.rpc(
- method=self.get_method_name('IncrementTestShapes'),
+ method=self.get_method_name('Increment'),
address=self._address,
request=request_tensors) for _ in range(10)
]
@@ -216,25 +216,25 @@ class RpcOpTestBase(object):
for i in range(20):
response_message = test_example_pb2.TestCase()
self.assertTrue(response_message.ParseFromString(response_values[i]))
- self.assertAllEqual([i + 1, i + 2, i + 3], response_message.shape)
+ self.assertAllEqual([i + 1, i + 2, i + 3], response_message.values)
def testVecHostPortRpcUsingEncodeAndDecodeProto(self):
with self.test_session() as sess:
request_tensors = encode_proto_op.encode_proto(
message_type='tensorflow.contrib.rpc.TestCase',
- field_names=['shape'],
+ field_names=['values'],
sizes=[[3]] * 20,
values=[
[[i, i + 1, i + 2] for i in range(20)],
])
response_tensor_strings = self.rpc(
- method=self.get_method_name('IncrementTestShapes'),
+ method=self.get_method_name('Increment'),
address=self._address,
request=request_tensors)
_, (response_shape,) = decode_proto_op.decode_proto(
bytes=response_tensor_strings,
message_type='tensorflow.contrib.rpc.TestCase',
- field_names=['shape'],
+ field_names=['values'],
output_types=[dtypes.int32])
response_shape_values = sess.run(response_shape)
self.assertAllEqual([[i + 1, i + 2, i + 3]
@@ -285,9 +285,9 @@ class RpcOpTestBase(object):
addresses = flatten([[
self._address, 'unix:/tmp/this_unix_socket_doesnt_exist_97820348!!@'
] for _ in range(10)])
- request = test_example_pb2.TestCase(shape=[0, 1, 2]).SerializeToString()
+ request = test_example_pb2.TestCase(values=[0, 1, 2]).SerializeToString()
response_tensors, status_code, _ = self.try_rpc(
- method=self.get_method_name('IncrementTestShapes'),
+ method=self.get_method_name('Increment'),
address=addresses,
request=request)
response_tensors_values, status_code_values = sess.run((response_tensors,
@@ -303,9 +303,9 @@ class RpcOpTestBase(object):
flatten = lambda x: list(itertools.chain.from_iterable(x))
with self.test_session() as sess:
methods = flatten(
- [[self.get_method_name('IncrementTestShapes'), 'InvalidMethodName']
+ [[self.get_method_name('Increment'), 'InvalidMethodName']
for _ in range(10)])
- request = test_example_pb2.TestCase(shape=[0, 1, 2]).SerializeToString()
+ request = test_example_pb2.TestCase(values=[0, 1, 2]).SerializeToString()
response_tensors, status_code, _ = self.try_rpc(
method=methods, address=self._address, request=request)
response_tensors_values, status_code_values = sess.run((response_tensors,
@@ -325,10 +325,10 @@ class RpcOpTestBase(object):
] for _ in range(10)])
requests = [
test_example_pb2.TestCase(
- shape=[i, i + 1, i + 2]).SerializeToString() for i in range(20)
+ values=[i, i + 1, i + 2]).SerializeToString() for i in range(20)
]
response_tensors, status_code, _ = self.try_rpc(
- method=self.get_method_name('IncrementTestShapes'),
+ method=self.get_method_name('Increment'),
address=addresses,
request=requests)
response_tensors_values, status_code_values = sess.run((response_tensors,
@@ -343,4 +343,4 @@ class RpcOpTestBase(object):
response_message = test_example_pb2.TestCase()
self.assertTrue(
response_message.ParseFromString(response_tensors_values[i]))
- self.assertAllEqual([i + 1, i + 2, i + 3], response_message.shape)
+ self.assertAllEqual([i + 1, i + 2, i + 3], response_message.values)
diff --git a/tensorflow/contrib/rpc/python/kernel_tests/rpc_op_test_servicer.py b/tensorflow/contrib/rpc/python/kernel_tests/rpc_op_test_servicer.py
index 7cbd636cb1..265254aa51 100644
--- a/tensorflow/contrib/rpc/python/kernel_tests/rpc_op_test_servicer.py
+++ b/tensorflow/contrib/rpc/python/kernel_tests/rpc_op_test_servicer.py
@@ -30,8 +30,8 @@ from tensorflow.contrib.rpc.python.kernel_tests import test_example_pb2_grpc
class RpcOpTestServicer(test_example_pb2_grpc.TestCaseServiceServicer):
"""Test servicer for RpcOp tests."""
- def IncrementTestShapes(self, request, context):
- """Increment the entries in the shape attribute of request.
+ def Increment(self, request, context):
+ """Increment the entries in the `values` attribute of request.
Args:
request: input TestCase.
@@ -40,8 +40,8 @@ class RpcOpTestServicer(test_example_pb2_grpc.TestCaseServiceServicer):
Returns:
output TestCase.
"""
- for i in range(len(request.shape)):
- request.shape[i] += 1
+ for i in range(len(request.values)):
+ request.values[i] += 1
return request
def AlwaysFailWithInvalidArgument(self, request, context):
diff --git a/tensorflow/contrib/rpc/python/kernel_tests/test_example.proto b/tensorflow/contrib/rpc/python/kernel_tests/test_example.proto
index 96f4550f62..8141466349 100644
--- a/tensorflow/contrib/rpc/python/kernel_tests/test_example.proto
+++ b/tensorflow/contrib/rpc/python/kernel_tests/test_example.proto
@@ -1,29 +1,17 @@
// Test description and protos to work with it.
-//
-// Many of the protos in this file are for unit tests that haven't been written yet.
syntax = "proto2";
-import "tensorflow/core/framework/types.proto";
-
package tensorflow.contrib.rpc;
-// A TestCase holds a proto and a bunch of assertions
-// about how it should decode.
+// A TestCase holds a sequence of values.
message TestCase {
- // A batch of primitives to be serialized and decoded.
- repeated RepeatedPrimitiveValue primitive = 1;
- // The shape of the batch.
- repeated int32 shape = 2;
- // Expected sizes for each field.
- repeated int32 sizes = 3;
- // Expected values for each field.
- repeated FieldSpec field = 4;
+ repeated int32 values = 1;
};
service TestCaseService {
- // Copy input, and increment each entry in 'shape' by 1.
- rpc IncrementTestShapes(TestCase) returns (TestCase) {
+ // Copy input, and increment each entry in 'values' by 1.
+ rpc Increment(TestCase) returns (TestCase) {
}
// Sleep forever.
@@ -42,130 +30,3 @@ service TestCaseService {
rpc SometimesFailWithInvalidArgument(TestCase) returns (TestCase) {
}
};
-
-// FieldSpec describes the expected output for a single field.
-message FieldSpec {
- optional string name = 1;
- optional tensorflow.DataType dtype = 2;
- optional RepeatedPrimitiveValue expected = 3;
-};
-
-message TestValue {
- optional PrimitiveValue primitive_value = 1;
- optional EnumValue enum_value = 2;
- optional MessageValue message_value = 3;
- optional RepeatedMessageValue repeated_message_value = 4;
- optional RepeatedPrimitiveValue repeated_primitive_value = 6;
-}
-
-message PrimitiveValue {
- optional double double_value = 1;
- optional float float_value = 2;
- optional int64 int64_value = 3;
- optional uint64 uint64_value = 4;
- optional int32 int32_value = 5;
- optional fixed64 fixed64_value = 6;
- optional fixed32 fixed32_value = 7;
- optional bool bool_value = 8;
- optional string string_value = 9;
- optional bytes bytes_value = 12;
- optional uint32 uint32_value = 13;
- optional sfixed32 sfixed32_value = 15;
- optional sfixed64 sfixed64_value = 16;
- optional sint32 sint32_value = 17;
- optional sint64 sint64_value = 18;
-}
-
-// NOTE: This definition must be kept in sync with PackedPrimitiveValue.
-message RepeatedPrimitiveValue {
- repeated double double_value = 1;
- repeated float float_value = 2;
- repeated int64 int64_value = 3;
- repeated uint64 uint64_value = 4;
- repeated int32 int32_value = 5;
- repeated fixed64 fixed64_value = 6;
- repeated fixed32 fixed32_value = 7;
- repeated bool bool_value = 8;
- repeated string string_value = 9;
- repeated bytes bytes_value = 12;
- repeated uint32 uint32_value = 13;
- repeated sfixed32 sfixed32_value = 15;
- repeated sfixed64 sfixed64_value = 16;
- repeated sint32 sint32_value = 17;
- repeated sint64 sint64_value = 18;
- repeated PrimitiveValue message_value = 19;
-}
-
-// A PackedPrimitiveValue looks exactly the same as a RepeatedPrimitiveValue
-// in the text format, but the binary serializion is different.
-// We test the packed representations by loading the same test cases
-// using this definition instead of RepeatedPrimitiveValue.
-// NOTE: This definition must be kept in sync with RepeatedPrimitiveValue
-// in every way except the packed=true declaration.
-message PackedPrimitiveValue {
- repeated double double_value = 1 [packed = true];
- repeated float float_value = 2 [packed = true];
- repeated int64 int64_value = 3 [packed = true];
- repeated uint64 uint64_value = 4 [packed = true];
- repeated int32 int32_value = 5 [packed = true];
- repeated fixed64 fixed64_value = 6 [packed = true];
- repeated fixed32 fixed32_value = 7 [packed = true];
- repeated bool bool_value = 8 [packed = true];
- repeated string string_value = 9;
- repeated bytes bytes_value = 12;
- repeated uint32 uint32_value = 13 [packed = true];
- repeated sfixed32 sfixed32_value = 15 [packed = true];
- repeated sfixed64 sfixed64_value = 16 [packed = true];
- repeated sint32 sint32_value = 17 [packed = true];
- repeated sint64 sint64_value = 18 [packed = true];
- repeated PrimitiveValue message_value = 19;
-}
-
-message EnumValue {
- enum Color {
- RED = 0;
- ORANGE = 1;
- YELLOW = 2;
- GREEN = 3;
- BLUE = 4;
- INDIGO = 5;
- VIOLET = 6;
- };
- optional Color enum_value = 14;
- repeated Color repeated_enum_value = 15;
-}
-
-
-message InnerMessageValue {
- optional float float_value = 2;
- repeated bytes bytes_values = 8;
-}
-
-message MiddleMessageValue {
- repeated int32 int32_values = 5;
- optional InnerMessageValue message_value = 11;
- optional uint32 uint32_value = 13;
-}
-
-message MessageValue {
- optional double double_value = 1;
- optional MiddleMessageValue message_value = 11;
-}
-
-message RepeatedMessageValue {
- message NestedMessageValue {
- optional float float_value = 2;
- repeated bytes bytes_values = 8;
- }
-
- repeated NestedMessageValue message_values = 11;
-}
-
-// Message containing fields with field numbers higher than any field above. An
-// instance of this message is prepended to each binary message in the test to
-// exercise the code path that handles fields encoded out of order of field
-// number.
-message ExtraFields {
- optional string string_value = 1776;
- optional bool bool_value = 1777;
-}
diff --git a/tensorflow/contrib/seq2seq/python/kernel_tests/beam_search_decoder_test.py b/tensorflow/contrib/seq2seq/python/kernel_tests/beam_search_decoder_test.py
index 178328619f..4073b390fc 100644
--- a/tensorflow/contrib/seq2seq/python/kernel_tests/beam_search_decoder_test.py
+++ b/tensorflow/contrib/seq2seq/python/kernel_tests/beam_search_decoder_test.py
@@ -132,6 +132,48 @@ class TestGatherTree(test.TestCase):
def test_gather_tree_from_array_2d(self):
self._test_gather_tree_from_array(depth_ndims=2)
+ def test_gather_tree_from_array_complex_trajectory(self):
+ # Max. time = 7, batch = 1, beam = 5.
+ array = np.expand_dims(np.array(
+ [[[25, 12, 114, 89, 97]],
+ [[9, 91, 64, 11, 162]],
+ [[34, 34, 34, 34, 34]],
+ [[2, 4, 2, 2, 4]],
+ [[2, 3, 6, 2, 2]],
+ [[2, 2, 2, 3, 2]],
+ [[2, 2, 2, 2, 2]]]), -1)
+ parent_ids = np.array(
+ [[[0, 0, 0, 0, 0]],
+ [[0, 0, 0, 0, 0]],
+ [[0, 1, 2, 3, 4]],
+ [[0, 0, 1, 2, 1]],
+ [[0, 1, 1, 2, 3]],
+ [[0, 1, 3, 1, 2]],
+ [[0, 1, 2, 3, 4]]])
+ expected_array = np.expand_dims(np.array(
+ [[[25, 25, 25, 25, 25]],
+ [[9, 9, 91, 9, 9]],
+ [[34, 34, 34, 34, 34]],
+ [[2, 4, 2, 4, 4]],
+ [[2, 3, 6, 3, 6]],
+ [[2, 2, 2, 3, 2]],
+ [[2, 2, 2, 2, 2]]]), -1)
+ sequence_length = [[4, 6, 4, 7, 6]]
+
+ array = ops.convert_to_tensor(
+ array, dtype=dtypes.float32)
+ parent_ids = ops.convert_to_tensor(
+ parent_ids, dtype=dtypes.int32)
+ expected_array = ops.convert_to_tensor(
+ expected_array, dtype=dtypes.float32)
+
+ sorted_array = beam_search_decoder.gather_tree_from_array(
+ array, parent_ids, sequence_length)
+
+ with self.test_session() as sess:
+ sorted_array, expected_array = sess.run([sorted_array, expected_array])
+ self.assertAllEqual(expected_array, sorted_array)
+
class TestArrayShapeChecks(test.TestCase):
diff --git a/tensorflow/contrib/seq2seq/python/ops/beam_search_decoder.py b/tensorflow/contrib/seq2seq/python/ops/beam_search_decoder.py
index 184144f64a..f17dbb0fe3 100644
--- a/tensorflow/contrib/seq2seq/python/ops/beam_search_decoder.py
+++ b/tensorflow/contrib/seq2seq/python/ops/beam_search_decoder.py
@@ -145,24 +145,20 @@ def gather_tree_from_array(t, parent_ids, sequence_length):
array_ops.expand_dims(math_ops.range(beam_width), 0), 0)
beam_ids = array_ops.tile(beam_ids, [max_time, batch_size, 1])
- mask = array_ops.sequence_mask(
- sequence_length, maxlen=max_time, dtype=dtypes.int32)
- mask = array_ops.transpose(mask, perm=[2, 0, 1])
-
- # Use beam_width + 1 to mark the end of beam.
- masked_beam_ids = (beam_ids * mask) + (1 - mask) * (beam_width + 1)
-
max_sequence_lengths = math_ops.to_int32(
math_ops.reduce_max(sequence_length, axis=1))
sorted_beam_ids = beam_search_ops.gather_tree(
- step_ids=masked_beam_ids,
+ step_ids=beam_ids,
parent_ids=parent_ids,
max_sequence_lengths=max_sequence_lengths,
end_token=beam_width + 1)
# For out of range steps, simply copy the same beam.
+ in_bound_steps = array_ops.transpose(
+ array_ops.sequence_mask(sequence_length, maxlen=max_time),
+ perm=[2, 0, 1])
sorted_beam_ids = array_ops.where(
- math_ops.cast(mask, dtypes.bool), x=sorted_beam_ids, y=beam_ids)
+ in_bound_steps, x=sorted_beam_ids, y=beam_ids)
# Generate indices for gather_nd.
time_ind = array_ops.tile(array_ops.reshape(
@@ -250,7 +246,7 @@ class BeamSearchDecoder(decoder.Decoder):
```
tiled_encoder_outputs = tf.contrib.seq2seq.tile_batch(
encoder_outputs, multiplier=beam_width)
- tiled_encoder_final_state = tf.conrib.seq2seq.tile_batch(
+ tiled_encoder_final_state = tf.contrib.seq2seq.tile_batch(
encoder_final_state, multiplier=beam_width)
tiled_sequence_length = tf.contrib.seq2seq.tile_batch(
sequence_length, multiplier=beam_width)
diff --git a/tensorflow/contrib/seq2seq/python/ops/decoder.py b/tensorflow/contrib/seq2seq/python/ops/decoder.py
index e69725ff8a..f58268eff5 100644
--- a/tensorflow/contrib/seq2seq/python/ops/decoder.py
+++ b/tensorflow/contrib/seq2seq/python/ops/decoder.py
@@ -21,6 +21,7 @@ from __future__ import print_function
import abc
import six
+from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
@@ -182,19 +183,20 @@ def dynamic_decode(decoder,
raise TypeError("Expected decoder to be type Decoder, but saw: %s" %
type(decoder))
- def _is_xla_tensor(tensor):
- try:
- op = tensor.op
- except AttributeError:
- return False
- if control_flow_util.IsInXLAContext(op):
- return True
- return False
-
with variable_scope.variable_scope(scope, "decoder") as varscope:
- # Properly cache variable values inside the while_loop
- if varscope.caching_device is None:
- varscope.set_caching_device(lambda op: op.device)
+ # Determine context types.
+ ctxt = ops.get_default_graph()._get_control_flow_context() # pylint: disable=protected-access
+ is_xla = control_flow_util.GetContainingXLAContext(ctxt) is not None
+ in_while_loop = (
+ control_flow_util.GetContainingWhileContext(ctxt) is not None)
+ # Properly cache variable values inside the while_loop.
+ # Don't set a caching device when running in a loop, since it is possible
+ # that train steps could be wrapped in a tf.while_loop. In that scenario
+ # caching prevents forward computations in loop iterations from re-reading
+ # the updated weights.
+ if not context.executing_eagerly() and not in_while_loop:
+ if varscope.caching_device is None:
+ varscope.set_caching_device(lambda op: op.device)
if maximum_iterations is not None:
maximum_iterations = ops.convert_to_tensor(
@@ -208,9 +210,6 @@ def dynamic_decode(decoder,
decoder.output_dtype,
decoder.batch_size)
- is_xla = False
- if any([_is_xla_tensor(i) for i in nest.flatten(initial_inputs)]):
- is_xla = True
if is_xla and maximum_iterations is None:
raise ValueError("maximum_iterations is required for XLA compilation.")
if maximum_iterations is not None:
diff --git a/tensorflow/contrib/slim/python/slim/evaluation_test.py b/tensorflow/contrib/slim/python/slim/evaluation_test.py
index 3d0308aaf3..2c97834523 100644
--- a/tensorflow/contrib/slim/python/slim/evaluation_test.py
+++ b/tensorflow/contrib/slim/python/slim/evaluation_test.py
@@ -33,7 +33,6 @@ from tensorflow.python.debug.lib import debug_data
from tensorflow.python.debug.wrappers import hooks
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
-from tensorflow.python.framework import errors
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import metrics
@@ -242,7 +241,7 @@ class SingleEvaluationTest(test.TestCase):
checkpoint_path = os.path.join(self.get_temp_dir(),
'this_file_doesnt_exist')
log_dir = os.path.join(self.get_temp_dir(), 'error_raised')
- with self.assertRaises(errors.NotFoundError):
+ with self.assertRaises(ValueError):
evaluation.evaluate_once('', checkpoint_path, log_dir)
def _prepareCheckpoint(self, checkpoint_path):
diff --git a/tensorflow/contrib/summary/summary_ops_test.py b/tensorflow/contrib/summary/summary_ops_test.py
index f1ef218e74..3e41e3d0b4 100644
--- a/tensorflow/contrib/summary/summary_ops_test.py
+++ b/tensorflow/contrib/summary/summary_ops_test.py
@@ -81,6 +81,19 @@ class EagerFileTest(test_util.TensorFlowTestCase):
# test here that we're calling them correctly.
self.assertTrue(gfile.Exists(logdir))
+ @test_util.assert_no_new_pyobjects_executing_eagerly
+ def testEagerMemory(self):
+ training_util.get_or_create_global_step()
+ logdir = self.get_temp_dir()
+ with summary_ops.create_file_writer(
+ logdir, max_queue=0,
+ name='t0').as_default(), summary_ops.always_record_summaries():
+ summary_ops.generic('tensor', 1, '')
+ summary_ops.scalar('scalar', 2.0)
+ summary_ops.histogram('histogram', [1.0])
+ summary_ops.image('image', [[[[1.0]]]])
+ summary_ops.audio('audio', [[1.0]], 1.0, 1)
+
def testDefunSummarys(self):
training_util.get_or_create_global_step()
logdir = tempfile.mkdtemp()
diff --git a/tensorflow/contrib/tensorboard/db/BUILD b/tensorflow/contrib/tensorboard/db/BUILD
index 3f6b4cdc9a..6507546ee9 100644
--- a/tensorflow/contrib/tensorboard/db/BUILD
+++ b/tensorflow/contrib/tensorboard/db/BUILD
@@ -106,6 +106,7 @@ cc_library(
"//tensorflow/core:framework",
"//tensorflow/core:lib",
"//tensorflow/core:lib_internal",
+ "//tensorflow/core:png_internal",
"//tensorflow/core:protos_all_cc",
],
)
diff --git a/tensorflow/contrib/tensorrt/BUILD b/tensorflow/contrib/tensorrt/BUILD
index adda0b758b..cb2daa7b12 100644
--- a/tensorflow/contrib/tensorrt/BUILD
+++ b/tensorflow/contrib/tensorrt/BUILD
@@ -11,7 +11,7 @@ exports_files(["LICENSE"])
load(
"//tensorflow:tensorflow.bzl",
- "py_test",
+ "cuda_py_test",
"tf_cc_test",
"tf_copts",
"tf_cuda_library",
@@ -32,10 +32,7 @@ tf_cuda_cc_test(
name = "tensorrt_test_cc",
size = "small",
srcs = ["tensorrt_test.cc"],
- tags = [
- "manual",
- "notap",
- ],
+ tags = ["no_windows"],
deps = [
"//tensorflow/core:lib",
"//tensorflow/core:test",
@@ -185,6 +182,9 @@ tf_py_wrap_cc(
name = "wrap_conversion",
srcs = ["trt_conversion.i"],
copts = tf_copts(),
+ swig_includes = [
+ "//tensorflow/python:platform/base.i",
+ ],
deps = [
":trt_conversion",
":trt_engine_op_kernel",
@@ -275,6 +275,7 @@ tf_cc_test(
name = "segment_test",
size = "small",
srcs = ["segment/segment_test.cc"],
+ tags = ["no_windows"],
deps = [
":segment",
"//tensorflow/c:c_api",
@@ -310,10 +311,6 @@ tf_cuda_cc_test(
name = "trt_plugin_factory_test",
size = "small",
srcs = ["plugin/trt_plugin_factory_test.cc"],
- tags = [
- "manual",
- "notap",
- ],
deps = [
":trt_plugins",
"//tensorflow/core:lib",
@@ -325,23 +322,24 @@ tf_cuda_cc_test(
]),
)
-py_test(
+cuda_py_test(
name = "tf_trt_integration_test",
srcs = ["test/tf_trt_integration_test.py"],
- main = "test/tf_trt_integration_test.py",
- srcs_version = "PY2AND3",
- tags = [
- "manual",
- "notap",
- ],
- deps = [
+ additional_deps = [
":init_py",
"//tensorflow/python:client_testlib",
"//tensorflow/python:framework_test_lib",
],
+ main = "test/tf_trt_integration_test.py",
+ tags = [
+ "no_windows",
+ "nomac",
+ ],
)
cc_library(
name = "utils",
+ srcs = ["convert/utils.cc"],
hdrs = ["convert/utils.h"],
+ copts = tf_copts(),
)
diff --git a/tensorflow/contrib/tensorrt/convert/convert_graph.cc b/tensorflow/contrib/tensorrt/convert/convert_graph.cc
index ba01eaabc2..3b42a5ee96 100644
--- a/tensorflow/contrib/tensorrt/convert/convert_graph.cc
+++ b/tensorflow/contrib/tensorrt/convert/convert_graph.cc
@@ -31,7 +31,7 @@ limitations under the License.
#include "tensorflow/contrib/tensorrt/segment/segment.h"
#include "tensorflow/core/common_runtime/gpu/gpu_id.h"
#include "tensorflow/core/common_runtime/gpu/gpu_id_manager.h"
-#include "tensorflow/core/common_runtime/gpu/process_state.h"
+#include "tensorflow/core/common_runtime/gpu/gpu_process_state.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/graph_to_functiondef.h"
#include "tensorflow/core/framework/node_def_builder.h"
@@ -86,27 +86,48 @@ bool IsTensorRTCandidate(const tensorflow::Node* node) {
// TODO(jie): Segmentation shouldn't associated with op name.
// Split it into a registration for each kernel.
static const std::set<string> candidate_ops = {
- "Identity",
- "Snapshot",
- "Const",
- "Conv2D",
- "MaxPool",
- "BiasAdd",
- "Relu",
- "Add",
- "Mul",
- "Sub",
- "Rsqrt",
- "Pad",
- "Mean",
- "AvgPool",
- "ConcatV2",
- "DepthwiseConv2dNative",
- "FusedBatchNorm",
- "FusedBatchNormV2",
- // TODO(ben,jie): ...
+ "Identity",
+ "Snapshot",
+ "Const",
+ "Conv2D",
+ "MaxPool",
+ "BiasAdd",
+ "Relu",
+ "Add",
+ "Mul",
+ "Sub",
+ "Rsqrt",
+ "Pad",
+ "Mean",
+ "AvgPool",
+ "ConcatV2",
+ "DepthwiseConv2dNative",
+ "FusedBatchNorm",
+ "FusedBatchNormV2",
+ "Div",
+ "RealDiv",
+ "Rsqrt",
+ "Reciprocal",
+ "Exp",
+ "Log",
+ "Sqrt",
+ "Abs",
+ "Neg",
+#if NV_TENSORRT_MAJOR > 3
+ "MatMul",
+ "BatchMatMul",
+ "Softmax",
+ "Minimum",
+ "Maximum",
+ "TopKV2",
+ "Sum",
+ "Prod",
+ "Max",
+ "Min",
+#endif
+ // TODO(ben,jie): ...
};
- // LINT.ThenChange(//tensorflow/contrib/tensorrt/convert/convert_nodes.h)
+ // LINT.ThenChange(//tensorflow/contrib/tensorrt/convert/convert_nodes.cc)
return (candidate_ops.count(node->type_string()) ||
PluginFactoryTensorRT::GetInstance()->IsPlugin(node->type_string()));
}
@@ -142,7 +163,7 @@ tensorflow::Status ConvertCalibGraphToInferGraph(
auto n = infer_graph->mutable_node(i);
if (n->op() == "TRTEngineOp") {
VLOG(1) << "Processing " << n->name();
- string container_name = n->attr().at("segment_funcdef_name").s();
+ const string& container_name = n->attr().at("segment_funcdef_name").s();
TRTCalibrationResource* cres = nullptr;
auto status = calib_rm->Lookup(container_name, "Calibrator", &cres);
if (!status.ok()) {
@@ -152,7 +173,7 @@ tensorflow::Status ConvertCalibGraphToInferGraph(
"Need to run graph with calibration data first!");
}
if (cres->calibrator_) {
- cres->calibrator_->setDone();
+ cres->calibrator_->waitAndSetDone();
cres->thr_->join();
const auto& calibration_table =
cres->calibrator_->getCalibrationTableAsString();
@@ -168,6 +189,7 @@ tensorflow::Status ConvertCalibGraphToInferGraph(
"Can't get TRTCalibrator from resource manager!");
}
cres->Unref();
+ TF_RETURN_IF_ERROR(calib_rm->Cleanup(container_name));
}
}
return tensorflow::Status::OK();
@@ -247,6 +269,7 @@ tensorflow::Status GetEngineInfo(
const std::vector<tensorflow::Node*>& reverse_topo_order,
EngineInfo* info) {
std::vector<int> subgraph_node_ids;
+ std::set<int> added_const_node_ids; // Used to prevent double insertion.
std::set<string> segment_devices;
int input_port = 0;
int output_port = 0;
@@ -256,6 +279,7 @@ tensorflow::Status GetEngineInfo(
// edge, thus there must not be any duplicates since source nodes of
// input/output edges must be in different split of the graph.
// TODO(aaroey): consider using node id and port instead.
+ // TODO(aaroey): using topo order instead of reverting reverse topo order.
std::unordered_map<string, int> created_edges;
for (auto it = reverse_topo_order.rbegin(); it != reverse_topo_order.rend();
++it) {
@@ -274,8 +298,7 @@ tensorflow::Status GetEngineInfo(
<< " neither have requested device nor assigned device";
}
}
- int node_id = node->id();
- subgraph_node_ids.push_back(node_id);
+ const int node_id = node->id();
for (const auto edge : node->in_edges()) {
auto input_node = edge->src();
if (segment_nodes.count(input_node->name()) == 0 &&
@@ -286,7 +309,10 @@ tensorflow::Status GetEngineInfo(
// won't be removed from the graph. If it doesn't have any edges, TF
// will prune it out.
if (input_node->type_string() == "Const") {
- subgraph_node_ids.push_back(input_node->id());
+ if (added_const_node_ids.count(input_node->id()) == 0) {
+ added_const_node_ids.insert(input_node->id());
+ subgraph_node_ids.push_back(input_node->id());
+ }
} else {
string s(input_node->name());
StrAppend(&s, ":", edge->src_output());
@@ -304,6 +330,9 @@ tensorflow::Status GetEngineInfo(
}
}
}
+ // We need to add possible const input nodes before adding this node in
+ // order to keep the topological order.
+ subgraph_node_ids.push_back(node_id);
for (const auto edge : node->out_edges()) {
auto output_node = edge->dst();
if (segment_nodes.count(output_node->name()) == 0 &&
@@ -598,7 +627,9 @@ tensorflow::Status RegisterSegmentFunctionToFunctionLibrary(
edge->src()->output_type(edge->src_output()));
VLOG(1) << " input " << nout.node << ":" << nout.index
<< " dtype=" << tensorflow::DataTypeString(nout.data_type);
- node_builder.Input({nout});
+ // nvcc complains that Input(<brace-enclosed initializer list>) is
+ // ambiguous, so do not use Input({nout}).
+ node_builder.Input(nout);
TF_RETURN_IF_ERROR(node_builder.Attr("T", node->output_type(0))
.Attr("index", i)
.Finalize(&nd));
@@ -654,7 +685,7 @@ std::pair<int, tensorflow::Allocator*> GetDeviceAndAllocator(
// to allocators.
// TODO(sami): when grappler devices become available else path will not be
// necessary
- auto pm = tensorflow::ProcessState::singleton();
+ auto pm = tensorflow::GPUProcessState::singleton();
if (params.cluster) { // get allocator
tensorflow::Device* device = nullptr;
if (params.cluster->GetDeviceSet()) {
@@ -805,7 +836,9 @@ tensorflow::Status ConvertAfterShapes(ConversionParams& params) {
// The allocator is used to build the engine. The build and the built engine
// will be destroyed after we get the serialized engine string, so it's fine
// to use unique_ptr here.
- std::unique_ptr<nvinfer1::IGpuAllocator> alloc;
+ // TODO(aaroey): nvinfer1::IGpuAllocator doesn't have a virtual destructor
+ // and destructing the unique_ptr will result in segfault, fix it.
+ std::unique_ptr<TRTDeviceAllocator> alloc;
auto device_alloc = GetDeviceAndAllocator(params, engine);
int cuda_device_id = 0;
if (device_alloc.first >= 0) {
@@ -827,8 +860,8 @@ tensorflow::Status ConvertAfterShapes(ConversionParams& params) {
} else {
// Graph is not modified.
LOG(WARNING) << "Engine creation for segment " << i << ", composed of "
- << converted_segments.at(i).first.size() << " nodes failed: "
- << status << ". Skipping...";
+ << converted_segments.at(i).first.size()
+ << " nodes failed: " << status << ". Skipping...";
}
}
cudaSetDevice(old_cuda_device);
diff --git a/tensorflow/contrib/tensorrt/convert/convert_nodes.cc b/tensorflow/contrib/tensorrt/convert/convert_nodes.cc
index c49e26ea4e..e4ffc230e4 100644
--- a/tensorflow/contrib/tensorrt/convert/convert_nodes.cc
+++ b/tensorflow/contrib/tensorrt/convert/convert_nodes.cc
@@ -50,9 +50,29 @@ limitations under the License.
#if GOOGLE_TENSORRT
#include "tensorrt/include/NvInfer.h"
-// Check if the types are equal. Cast to int first so that failure log message
-// would work!
-#define CHECK_EQ_TYPE(val1, val2) CHECK_EQ((int)val1, (int)val2)
+// Check if the types are equal. Cast to int first so that failure log message
+// would work!
+#define TFTRT_CHECK_EQ_TYPE(val1, val2) CHECK_EQ((int)val1, (int)val2)
+
+#define TFTRT_INTERNAL_ERROR_AT_NODE(node) \
+ do { \
+ return tensorflow::errors::Internal( \
+ "TFTRT::", __FUNCTION__, "failed to add TRT layer, at: ", node); \
+ } while (0)
+
+#define TFTRT_RETURN_ERROR_IF_FALSE(status, node) \
+ do { \
+ if (status == false) { \
+ TFTRT_INTERNAL_ERROR_AT_NODE(node); \
+ } \
+ } while (0)
+
+#define TFTRT_RETURN_ERROR_IF_NULLPTR(ptr, node) \
+ do { \
+ if (ptr == nullptr) { \
+ TFTRT_INTERNAL_ERROR_AT_NODE(node); \
+ } \
+ } while (0)
namespace tensorflow {
namespace tensorrt {
@@ -75,6 +95,11 @@ inline tensorflow::Status ConvertDType(tensorflow::DataType tf_dtype,
case tensorflow::DataType::DT_HALF:
*trt_dtype = nvinfer1::DataType::kHALF;
break;
+#if NV_TENSORRT_MAJOR > 3
+ case tensorflow::DataType::DT_INT32:
+ *trt_dtype = nvinfer1::DataType::kINT32;
+ break;
+#endif
default:
return tensorflow::errors::InvalidArgument(
"Unsupported data type ", tensorflow::DataTypeString(tf_dtype));
@@ -134,6 +159,98 @@ tensorflow::Status ValidateInputProperties(const PartialTensorShape& shape,
return Status::OK();
}
+// Return whether or not the broadcast is feasible;
+bool TensorRTGetBroadcastShape(const nvinfer1::Dims& operand_l,
+ const bool operand_l_is_tensor,
+ const nvinfer1::Dims& operand_r,
+ const bool operand_r_is_tensor,
+ nvinfer1::Dims* operand_l_new_shape,
+ nvinfer1::Dims* operand_r_new_shape) {
+ // ***************************************************************************
+ // TensorRT Elementwise op supports broadcast but requires both tensor to be
+ // of Identical rank
+ //
+ // We consider case of:
+ // 1. operand_l to be a Tensor & operand_r to be a Const;
+ // 2. operand_l to be a Tensor & operand_r to be a Tensor;
+ // note: const op const (constant folding) should fallback to TensorFlow
+ //
+ // broadcast scheme:
+ // T: 1 3 5 (tensor would not have batch dimension)
+ // W: 1 1 3 1 (weight would have all explicit dimensions)
+ // i. fill in explicit dimensions
+ // -> T: -1 1 3 5 (we put a -1 for batch dimension)
+ // -> W: 1 1 3 1
+ // ii. compare broadcast feasibility
+ //
+ // We cannot support the following since TensorRT does not allow manipulation
+ // on batch dimension, we cannot generate output with proper shape
+ // T: 3 5 1
+ // W: 1 1 1 1 3 5 1
+ // -> T: 1 1 1 -1 3 5 1
+ // -> W: 1 1 1 1 3 5 1
+ // ***************************************************************************
+ const int max_nb_dims = nvinfer1::Dims::MAX_DIMS + 1;
+ const size_t element_size = sizeof(operand_l.d[0]);
+
+ // fill in dimensions
+ int l_s[max_nb_dims];
+ std::fill(l_s, l_s + max_nb_dims, 1);
+ int l_d = operand_l_is_tensor ? operand_l.nbDims + 1 : operand_l.nbDims;
+ int r_s[max_nb_dims];
+ std::fill(r_s, r_s + max_nb_dims, 1);
+ int r_d = operand_r_is_tensor ? operand_r.nbDims + 1 : operand_r.nbDims;
+
+ int max_d = std::max(l_d, r_d);
+ std::memcpy(l_s + max_d - operand_l.nbDims, operand_l.d,
+ operand_l.nbDims * element_size);
+ std::memcpy(r_s + max_d - operand_r.nbDims, operand_r.d,
+ operand_r.nbDims * element_size);
+
+ // set -1 for batch dimension, since batch size is not supposed to be
+ // broadcasted
+ if (operand_l_is_tensor) {
+ if (max_d != l_d) { // if broadcast beyond batch dimension, fail
+ return false;
+ }
+ l_s[0] = -1;
+ }
+ if (operand_r_is_tensor) {
+ if (max_d != r_d) { // if broadcast beyond batch dimension, fail
+ return false;
+ }
+ r_s[0] = -1;
+ }
+
+ // compare broadcast feasibility
+ for (int i = max_d - 1; i >= 0; i--) {
+ if ((l_s[i] != r_s[i]) && (l_s[i] != 1) && (r_s[i] != 1)) {
+ return false;
+ }
+ }
+
+ // output new TensorRT Dimension (stripping the batch dimension)
+ operand_l_new_shape->nbDims = max_d - 1;
+ std::memcpy(operand_l_new_shape->d, l_s + 1, (max_d - 1) * element_size);
+ operand_r_new_shape->nbDims = max_d - 1;
+ std::memcpy(operand_r_new_shape->d, r_s + 1, (max_d - 1) * element_size);
+
+ return true;
+}
+
+inline bool DimsEqual(const nvinfer1::Dims& dim_l,
+ const nvinfer1::Dims& dim_r) {
+ if (dim_l.nbDims != dim_r.nbDims) {
+ return false;
+ }
+ for (int i = 0; i < dim_l.nbDims; i++) {
+ if (dim_l.d[i] != dim_r.d[i]) {
+ return false;
+ }
+ }
+ return true;
+}
+
inline nvinfer1::Dims GetTensorShape(const tensorflow::Tensor& tensor) {
nvinfer1::Dims dims;
dims.nbDims = tensor.dims();
@@ -143,7 +260,7 @@ inline nvinfer1::Dims GetTensorShape(const tensorflow::Tensor& tensor) {
return dims;
}
-inline int64_t GetShapeSize(nvinfer1::Dims shape) {
+inline int64_t GetShapeSize(const nvinfer1::Dims& shape) {
// Returns total number of elements in shape
int64_t count = 1;
for (int d = 0; d < shape.nbDims; ++d) {
@@ -156,7 +273,7 @@ static std::vector<std::pair<int, int>> CreateSamePadding(
const nvinfer1::DimsHW& stride, const nvinfer1::DimsHW& kernel,
const std::vector<int64_t>& input_dims) {
std::vector<std::pair<int, int>> padding(input_dims.size());
- CHECK_EQ((size_t)stride.nbDims, input_dims.size()); // TODO(jie): N+C? NC+?
+ CHECK_EQ(stride.nbDims, input_dims.size()); // TODO(jie): N+C? NC+?
for (size_t i = 0; i < input_dims.size(); ++i) {
// Formula to calculate the padding
@@ -186,6 +303,7 @@ string GetCommonNameScope(const string& op_name_a, const string& op_name_b) {
return op_name_a.substr(0, last_scope_separator);
}
+// Class to convert TF weight to TRT weight.
class TRT_ShapedWeights {
public:
TRT_ShapedWeights(tensorflow::DataType type, const void* values,
@@ -197,12 +315,14 @@ class TRT_ShapedWeights {
explicit TRT_ShapedWeights(tensorflow::DataType type)
: shape_(), type_(type), values_(nullptr), empty_weight_flag_(true) {}
+ // TODO(aaroey): use rvalue reference.
TRT_ShapedWeights(const TRT_ShapedWeights& rhs)
: shape_(rhs.shape_),
type_(rhs.type_),
values_(rhs.values_),
empty_weight_flag_(rhs.empty_weight_flag_) {}
+ // TODO(aaroey): use GetShapeSize() instead.
int64_t count() const {
int64_t c = 1;
for (int i = 0; i < shape_.nbDims; i++) c *= shape_.d[i];
@@ -220,6 +340,7 @@ class TRT_ShapedWeights {
const void* GetValues() const { return values_; }
+ // TODO(aaroey): get rid of this method.
void SetValues(const void* values) { values_ = values; }
size_t size_bytes() const {
@@ -230,10 +351,12 @@ class TRT_ShapedWeights {
// Default converter
operator nvinfer1::Weights() const { return GetWeightsForTRT(); }
+ // TODO(aaroey): make these private.
nvinfer1::Dims shape_;
tensorflow::DataType type_;
private:
+ // TODO(aaroey): this should not be const as it's always from TRTWeightStore.
const void* values_;
bool empty_weight_flag_;
};
@@ -244,6 +367,7 @@ class TRT_TensorOrWeights {
: tensor_(tensor), weights_(DT_FLOAT), variant_(TRT_NODE_TENSOR) {}
explicit TRT_TensorOrWeights(const TRT_ShapedWeights& weights)
: tensor_(nullptr), weights_(weights), variant_(TRT_NODE_WEIGHTS) {}
+ // TODO(aaroey): use rvalue reference.
TRT_TensorOrWeights(const TRT_TensorOrWeights& rhs)
: tensor_(rhs.tensor_), weights_(rhs.weights_), variant_(rhs.variant_) {}
~TRT_TensorOrWeights() {}
@@ -252,19 +376,19 @@ class TRT_TensorOrWeights {
bool is_weights() const { return variant_ == TRT_NODE_WEIGHTS; }
nvinfer1::ITensor* tensor() {
- CHECK_EQ(is_tensor(), true);
+ CHECK(is_tensor());
return tensor_;
}
const nvinfer1::ITensor* tensor() const {
- CHECK_EQ(is_tensor(), true);
+ CHECK(is_tensor());
return tensor_;
}
TRT_ShapedWeights& weights() {
- CHECK_EQ(is_weights(), true);
+ CHECK(is_weights());
return weights_;
}
const TRT_ShapedWeights& weights() const {
- CHECK_EQ(is_weights(), true);
+ CHECK(is_weights());
return weights_;
}
nvinfer1::Dims shape() const {
@@ -288,21 +412,25 @@ class TFAttrs {
attrs_.insert({attr.first, &attr.second});
}
}
- bool count(string key) const { return attrs_.count(key); }
- tensorflow::AttrValue const* at(string key) const {
+
+ bool count(const string& key) const { return attrs_.count(key); }
+
+ tensorflow::AttrValue const* at(const string& key) const {
if (!attrs_.count(key)) {
LOG(FATAL) << "Attribute not found: " << key;
}
return attrs_.at(key);
}
+
template <typename T>
T get(const string& key) const;
+
template <typename T>
T get(const string& key, const T& default_value) const {
return attrs_.count(key) ? this->get<T>(key) : default_value;
}
- std::vector<string> GetAllAttrKey() {
+ std::vector<string> GetAllAttrKeys() const {
std::vector<string> attr_list;
for (const auto& attr_item : attrs_) {
attr_list.emplace_back(attr_item.first);
@@ -337,15 +465,6 @@ std::vector<string> TFAttrs::get<std::vector<string>>(const string& key) const {
auto attr = this->at(key)->list().s();
return std::vector<string>(attr.begin(), attr.end());
}
-template <>
-nvinfer1::Dims TFAttrs::get<nvinfer1::Dims>(const string& key) const {
- auto values = this->get<std::vector<int>>(key);
- nvinfer1::Dims dims;
- dims.nbDims = values.size();
- std::copy(values.begin(), values.end(), dims.d);
- // Note: No dimension type information is included
- return dims;
-}
template <>
nvinfer1::DataType TFAttrs::get<nvinfer1::DataType>(const string& key) const {
@@ -371,10 +490,11 @@ bool TFAttrs::get<bool>(const string& key) const {
}
// TODO(jie): reorder4 & reorder2 should be merged?
+// TODO(aaroey): fix the order of parameters.
template <typename T>
-void Reorder4(nvinfer1::DimsNCHW shape, const T* idata,
- nvinfer1::DimsNCHW istrides, T* odata,
- nvinfer1::DimsNCHW ostrides) {
+void Reorder4(const nvinfer1::DimsNCHW& shape, const T* idata,
+ const nvinfer1::DimsNCHW& istrides, T* odata,
+ const nvinfer1::DimsNCHW& ostrides) {
for (int n = 0; n < shape.n(); ++n) {
for (int c = 0; c < shape.c(); ++c) {
for (int h = 0; h < shape.h(); ++h) {
@@ -389,12 +509,13 @@ void Reorder4(nvinfer1::DimsNCHW shape, const T* idata,
}
template <typename T>
-void Reorder2(nvinfer1::DimsHW shape, const T* idata, nvinfer1::DimsHW istrides,
- T* odata, nvinfer1::DimsHW ostrides) {
+void Reorder2(const nvinfer1::DimsHW& shape, const T* idata,
+ const nvinfer1::DimsHW& istrides, T* odata,
+ const nvinfer1::DimsHW& ostrides) {
for (int h = 0; h < shape.h(); ++h) {
for (int w = 0; w < shape.w(); ++w) {
odata[h * ostrides.h() + w * ostrides.w()] =
- idata[h * ostrides.h() + w * ostrides.w()];
+ idata[h * istrides.h() + w * istrides.w()];
}
}
}
@@ -402,16 +523,17 @@ void Reorder2(nvinfer1::DimsHW shape, const T* idata, nvinfer1::DimsHW istrides,
// TODO(jie): fallback to tensorflow!!
void ReorderCKtoKC(const TRT_ShapedWeights& iweights,
TRT_ShapedWeights* oweights) {
- int c = iweights.shape_.d[0];
- int k = iweights.shape_.d[1];
+ const int c = iweights.shape_.d[0];
+ const int k = iweights.shape_.d[1];
oweights->shape_.d[0] = k;
oweights->shape_.d[1] = c;
- nvinfer1::DimsHW istrides = {1, k};
- nvinfer1::DimsHW ostrides = {c, 1};
+ const nvinfer1::DimsHW istrides = {1, k};
+ const nvinfer1::DimsHW ostrides = {c, 1};
switch (iweights.type_) {
case tensorflow::DataType::DT_FLOAT: {
Reorder2({k, c}, static_cast<float const*>(iweights.GetValues()),
istrides,
+ // TODO(aaroey): get rid of all the const_cast like this.
static_cast<float*>(const_cast<void*>(oweights->GetValues())),
ostrides);
break;
@@ -434,21 +556,24 @@ void ReorderRSCKToKCRS(const TRT_ShapedWeights& iweights,
TRT_ShapedWeights* oweights, int num_groups) {
CHECK_EQ(iweights.type_, oweights->type_);
CHECK_EQ(iweights.size_bytes(), oweights->size_bytes());
- int r = iweights.shape_.d[0];
- int s = iweights.shape_.d[1];
- // TRT requires GKcRS, while TF depthwise has RSCK
- // where c=1, C=G
+ // K indexes over output channels, C over input channels, and R and S over the
+ // height and width of the convolution
+ const int r = iweights.shape_.d[0];
+ const int s = iweights.shape_.d[1];
+ // TRT requires GKcRS, while TF depthwise has RSCK where c=1, C=G
VLOG(2) << "num_groups: " << num_groups;
- int c = iweights.shape_.d[2] / num_groups;
+ const int c = iweights.shape_.d[2] / num_groups;
VLOG(2) << "c" << iweights.shape_.d[2] << " then " << c;
- int k = iweights.shape_.d[3] * num_groups;
+ const int k = iweights.shape_.d[3] * num_groups;
VLOG(2) << "k" << iweights.shape_.d[3] << " then " << k;
+ VLOG(2) << "r" << iweights.shape_.d[0] << " then " << r;
+ VLOG(2) << "s" << iweights.shape_.d[1] << " then " << s;
oweights->shape_.d[0] = k / num_groups;
oweights->shape_.d[1] = c * num_groups;
oweights->shape_.d[2] = r;
oweights->shape_.d[3] = s;
- nvinfer1::DimsNCHW istrides = {1, k, s * k * c, c * k};
- nvinfer1::DimsNCHW ostrides = {c * r * s, r * s, s, 1};
+ const nvinfer1::DimsNCHW istrides = {1, k, s * k * c, c * k};
+ const nvinfer1::DimsNCHW ostrides = {c * r * s, r * s, s, 1};
switch (iweights.type_) {
case tensorflow::DataType::DT_FLOAT: {
Reorder4({k, c, r, s}, static_cast<float const*>(iweights.GetValues()),
@@ -480,11 +605,14 @@ using OpConverter =
std::vector<TRT_TensorOrWeights>*)>;
class Converter {
+ // TODO(aaroey): fix the order of members.
std::unordered_map<string, TRT_TensorOrWeights> trt_tensors_;
std::unordered_map<string, OpConverter> op_registry_;
OpConverter plugin_converter_;
nvinfer1::INetworkDefinition* trt_network_;
std::list<std::vector<uint8_t>> temp_bufs_;
+ // TODO(aaroey): inline the definition of TRTWeightStore here, and add APIs to
+ // operate the stored weights instead of operating it directly.
TRTWeightStore* weight_store_;
bool fp16_;
void register_op_converters();
@@ -492,7 +620,7 @@ class Converter {
std::vector<TRT_TensorOrWeights>* inputs) {
for (auto const& input_name : node_def.input()) {
/*************************************************************************
- * TODO(jie) handle case 1) here
+ * TODO(jie): handle case 1) here.
* Normalizes the inputs and extracts associated metadata:
* 1) Inputs can contain a colon followed by a suffix of characters.
* That suffix may be a single number (e.g. inputName:1) or several
@@ -506,6 +634,7 @@ class Converter {
if (input_name[0] == '^') continue;
string name = input_name;
auto first = name.find_first_of(':');
+ // TODO(aaroey): why removing the colon but not the zero? A bug?
if (first != string::npos && first + 2 == name.size() &&
name[first + 1] == '0')
name.erase(first);
@@ -514,12 +643,13 @@ class Converter {
if (trt_tensors_.count(name)) {
inputs->push_back(trt_tensors_.at(name));
} else {
- string str("Node ");
- StrAppend(&str, node_def.name(), " should have an input named '", name,
+ // TODO(aaroey): this should not happen, make it a CHECK.
+ // TODO(aaroey): use StrCat for pattern like this.
+ string msg("Node ");
+ StrAppend(&msg, node_def.name(), " should have an input named '", name,
"' but it is not available");
- LOG(WARNING) << "input: " << name << " not available for node at "
- << node_def.name();
- return tensorflow::errors::InvalidArgument(str);
+ LOG(ERROR) << msg;
+ return tensorflow::errors::InvalidArgument(msg);
}
}
return tensorflow::Status::OK();
@@ -540,6 +670,7 @@ class Converter {
weights.SetValues(weight_store_->store_.back().data());
return weights;
}
+ // TODO(aaroey): fix all the namings.
bool isFP16() { return fp16_; }
TRT_ShapedWeights get_temp_weights_like(const TRT_ShapedWeights& weights) {
return this->get_temp_weights(weights.type_, weights.shape_);
@@ -548,9 +679,10 @@ class Converter {
tensorflow::Status convert_node(const tensorflow::NodeDef& node_def) {
std::vector<TRT_TensorOrWeights> inputs;
TF_RETURN_IF_ERROR(this->get_inputs(node_def, &inputs));
- string op = node_def.op();
+ const string& op = node_def.op();
std::vector<TRT_TensorOrWeights> outputs;
if (PluginFactoryTensorRT::GetInstance()->IsPlugin(op)) {
+ // TODO(aaroey): plugin_converter_ is not set, fix it.
TF_RETURN_IF_ERROR(plugin_converter_(*this, node_def, inputs, &outputs));
} else {
if (!op_registry_.count(op)) {
@@ -561,7 +693,7 @@ class Converter {
TF_RETURN_IF_ERROR(op_converter(*this, node_def, inputs, &outputs));
}
for (size_t i = 0; i < outputs.size(); ++i) {
- TRT_TensorOrWeights output = outputs.at(i);
+ TRT_TensorOrWeights& output = outputs[i];
// TODO(jie): tf protobuf seems to be omitting the :0 suffix
string output_name = node_def.name();
if (i != 0) output_name = StrCat(output_name, ":", i);
@@ -579,26 +711,29 @@ class Converter {
nvinfer1::INetworkDefinition* network() { return trt_network_; }
- TRT_TensorOrWeights get_tensor(string name) {
+ TRT_TensorOrWeights get_tensor(const string& name) {
if (!trt_tensors_.count(name)) {
return TRT_TensorOrWeights(nullptr);
}
return trt_tensors_.at(name);
}
- bool insert_input_tensor(string name, nvinfer1::ITensor* tensor) {
+ bool insert_input_tensor(const string& name, nvinfer1::ITensor* tensor) {
return trt_tensors_.insert({name, TRT_TensorOrWeights(tensor)}).second;
}
nvinfer1::ITensor* TransposeTensor(nvinfer1::ITensor* input_tensor,
- std::vector<int> order) {
- auto dims = input_tensor->getDimensions();
+ const std::vector<int>& order) {
+ const auto dims = input_tensor->getDimensions();
// TODO(jie): change the return to status and properly exit
if (order.size() - 1 != size_t(dims.nbDims))
LOG(ERROR) << "Dimension does not match, fail gracefully";
nvinfer1::IShuffleLayer* layer = this->network()->addShuffle(*input_tensor);
+ if (layer == nullptr) {
+ return nullptr;
+ }
nvinfer1::Permutation permutation;
for (int32_t i = 0; i < dims.nbDims; ++i) {
permutation.order[i] = order[i + 1] - 1;
@@ -629,13 +764,14 @@ TRT_ShapedWeights ConvertFP32ToFP16(Converter& ctx,
}
return weights;
}
+
// ****************************************************************************
// Constant folding functions
// TODO(jie): once optimizer kicks in, we should have done constant folding
// there.
-//*****************************************************************************/
+// *****************************************************************************
struct LambdaFactory {
- enum class OP_CATEGORY : int { RSQRT = 0, NEG, ADD, MUL, SUB };
+ enum class OP_CATEGORY : int { RSQRT = 0, NEG, ADD, MUL, SUB, RECIP };
OP_CATEGORY op;
template <typename T>
@@ -647,6 +783,8 @@ struct LambdaFactory {
}
case OP_CATEGORY::NEG:
return [](T t) -> T { return -t; };
+ case OP_CATEGORY::RECIP:
+ return [](T t) -> T { return 1.0 / t; };
default:
VLOG(2) << "Not supported op for unary: " << static_cast<int>(op);
return nullptr;
@@ -680,7 +818,6 @@ struct LambdaFactory {
VLOG(2) << "LAMBDA VAL : " << val;
return l + val;
};
- // Return [val](T l)-> T {return l+val;};
case OP_CATEGORY::SUB:
return [val](T l) -> T {
VLOG(2) << "LAMBDA VAL : " << val;
@@ -740,11 +877,13 @@ std::function<Eigen::half(Eigen::half)> LambdaFactory::unary<Eigen::half>() {
}
case OP_CATEGORY::NEG:
return [](Eigen::half t) -> Eigen::half { return -t; };
+ // TODO(aaroey): can we support RECIP?
default:
VLOG(2) << "Not supported op for unary: " << static_cast<int>(op);
return nullptr;
}
}
+
tensorflow::Status UnaryCompute(const TRT_ShapedWeights& iweights,
TRT_ShapedWeights* oweights,
LambdaFactory unary_op) {
@@ -790,6 +929,7 @@ tensorflow::Status BinaryCompute(const TRT_ShapedWeights& iweights_l,
if (iweights_l.count() != iweights_r.count()) {
// We only supports broadcast of RankZero
if (iweights_l.count() == 1) {
+ // TODO(aaroey): Remove loggings like this.
VLOG(2) << "I bet it is not working!" << (*inp_l);
std::transform(inp_r, inp_r + iweights_r.count(), oup,
binary_op.broadcast_l<float>(*inp_l));
@@ -842,117 +982,21 @@ tensorflow::Status BinaryCompute(const TRT_ShapedWeights& iweights_l,
return tensorflow::Status::OK();
}
-tensorflow::Status ConstantFoldUnary(
- Converter& ctx, const tensorflow::NodeDef& node_def,
- const std::vector<TRT_TensorOrWeights>& inputs,
- std::vector<TRT_TensorOrWeights>* outputs) {
- TRT_ShapedWeights weights_input = inputs.at(0).weights();
-
- // Allocate output weights
- TRT_ShapedWeights weights_output = ctx.get_temp_weights_like(weights_input);
-
- // FIXME assume type matches input weights
- // Get trt type & shape
- // Maybe this part has to be moved into the block of rsqrt later
- // Check type consistency
- CHECK_EQ(weights_input.type_,
- TFAttrs(node_def).get<tensorflow::DataType>("T"));
-
- LambdaFactory unary_op;
- if (node_def.op() == "Rsqrt") {
- // Compute rsqrt
- unary_op.op = LambdaFactory::OP_CATEGORY::RSQRT;
- auto ret = UnaryCompute(weights_input, &weights_output, unary_op);
- // Pass the output
- if (ret == tensorflow::Status::OK()) {
- outputs->push_back(TRT_TensorOrWeights(weights_output));
- }
- return ret;
- } else {
- return tensorflow::errors::Unimplemented("Binary op not supported: " +
- node_def.op());
- }
-}
-
-// TODO(jie,ben) broadcast is needed yet not implemented
-// Let's get the simple stuff working first. Maybe we should fall back to TF
-// approach for constant folding
-tensorflow::Status ConstantFoldBinary(
- Converter& ctx, const tensorflow::NodeDef& node_def,
- const std::vector<TRT_TensorOrWeights>& inputs,
- std::vector<TRT_TensorOrWeights>* outputs) {
- TRT_ShapedWeights weights_input_l = inputs.at(0).weights();
- TRT_ShapedWeights weights_input_r = inputs.at(1).weights();
-
- // Check type consistency
- CHECK_EQ(weights_input_l.type_, weights_input_r.type_);
-
- if (weights_input_l.shape_.nbDims != weights_input_r.shape_.nbDims)
- return tensorflow::errors::Unimplemented(
- "Binary op implicit broadcast not supported: " + node_def.op());
-
- // TODO(jie): constant fold should really fall back to TF.
- int num_dims = weights_input_l.shape_.nbDims;
- nvinfer1::Dims output_shape;
- output_shape.nbDims = num_dims;
- VLOG(2) << "nb_dims: " << num_dims
- << ", the other: " << weights_input_r.shape_.nbDims;
- for (int i = 0; i < num_dims; i++) {
- if (weights_input_l.shape_.d[i] == weights_input_r.shape_.d[i]) {
- output_shape.d[i] = weights_input_l.shape_.d[i];
- } else if (weights_input_l.shape_.d[i] == 1 ||
- weights_input_r.shape_.d[i] == 1) {
- output_shape.d[i] =
- std::max(weights_input_l.shape_.d[i], weights_input_r.shape_.d[i]);
- } else {
- return tensorflow::errors::Unimplemented(
- "Binary op with incompatible shape at, " + node_def.op());
- }
- VLOG(2) << "left: " << weights_input_l.shape_.d[i]
- << "right: " << weights_input_r.shape_.d[i]
- << "output: " << output_shape.d[i];
- }
-
- // FIXME assume type matches input weights
- // Get trt type & shape
- TFAttrs attrs(node_def);
- // Maybe this part has to be moved into the block of rsqrt later
- tensorflow::DataType dtype = attrs.get<tensorflow::DataType>("T");
-
- // Allocate output weights
- TRT_ShapedWeights weights_output = ctx.get_temp_weights(dtype, output_shape);
-
- LambdaFactory binary_op;
- if (node_def.op() == "Sub") {
- binary_op.op = LambdaFactory::OP_CATEGORY::SUB;
- } else if (node_def.op() == "Mul") {
- binary_op.op = LambdaFactory::OP_CATEGORY::MUL;
- } else if (node_def.op() == "Add") {
- binary_op.op = LambdaFactory::OP_CATEGORY::ADD;
- } else {
- return tensorflow::errors::Unimplemented("Binary op not supported: " +
- node_def.op());
- }
- auto ret = BinaryCompute(weights_input_l, weights_input_r, &weights_output,
- binary_op);
-
- // Pass the output
- if (ret == tensorflow::Status::OK()) {
- outputs->push_back(TRT_TensorOrWeights(weights_output));
- }
-
- return ret;
-}
-
// TODO(jie): broadcast is needed yet not implemented.
// Only implemented channel wise for the time being
tensorflow::Status BinaryTensorOpWeight(
Converter& ctx, const tensorflow::NodeDef& node_def,
const nvinfer1::ITensor* tensor, TRT_ShapedWeights weights,
- std::vector<TRT_TensorOrWeights>* outputs) {
- // FIXME assume type matches input weights
- // Get trt type & shape
- // Maybe this part has to be moved into the block of rsqrt later
+ bool swapped_inputs, std::vector<TRT_TensorOrWeights>* outputs) {
+ // tensor is the left operand while weights is the right operand;
+ // when swapped_inputs set to true, those two are swapped.
+ // TODO(aaroey): use a set.
+ if (node_def.op() != "Sub" && node_def.op() != "Add" &&
+ node_def.op() != "Mul" && node_def.op() != "Div" &&
+ node_def.op() != "RealDiv") {
+ return tensorflow::errors::Unimplemented(
+ "op not supported: " + node_def.op() + ", at: " + node_def.name());
+ }
// Check type consistency
nvinfer1::DataType ttype;
@@ -962,6 +1006,12 @@ tensorflow::Status BinaryTensorOpWeight(
auto dims_w = weights.shape_;
auto dims_t = tensor->getDimensions();
+ // TODO(jie): addScale checks for input tensor dimension
+ if (dims_t.nbDims != 3) {
+ return tensorflow::errors::InvalidArgument(
+ "addScale requires tensor with rank 3, " + node_def.name());
+ }
+
// default to element-wise
auto scale_mode = nvinfer1::ScaleMode::kELEMENTWISE;
@@ -1032,6 +1082,7 @@ tensorflow::Status BinaryTensorOpWeight(
permutation[dims_t.nbDims] = 1;
tensor = ctx.TransposeTensor(const_cast<nvinfer1::ITensor*>(tensor),
permutation);
+ TFTRT_RETURN_ERROR_IF_NULLPTR(tensor, node_def.name());
} else {
return tensorflow::errors::InvalidArgument(
"Transpose cannot be applied, " + node_def.name());
@@ -1049,11 +1100,35 @@ tensorflow::Status BinaryTensorOpWeight(
// Maybe I should do a switch
if (node_def.op() == "Sub") {
- TRT_ShapedWeights neg_weights = ctx.get_temp_weights_like(weights);
- LambdaFactory unary_op;
- unary_op.op = LambdaFactory::OP_CATEGORY::NEG;
- TF_RETURN_IF_ERROR(UnaryCompute(weights, &neg_weights, unary_op));
- shift_weights = neg_weights;
+ if (swapped_inputs) {
+ shift_weights = weights;
+ nvinfer1::IUnaryLayer* layer =
+ ctx.network()->addUnary(*const_cast<nvinfer1::ITensor*>(tensor),
+ nvinfer1::UnaryOperation::kNEG);
+ TFTRT_RETURN_ERROR_IF_NULLPTR(layer, node_def.name());
+ tensor = layer->getOutput(0);
+ } else {
+ TRT_ShapedWeights neg_weights = ctx.get_temp_weights_like(weights);
+ LambdaFactory unary_op;
+ unary_op.op = LambdaFactory::OP_CATEGORY::NEG;
+ TF_RETURN_IF_ERROR(UnaryCompute(weights, &neg_weights, unary_op));
+ shift_weights = neg_weights;
+ }
+ } else if (node_def.op() == "Div" || node_def.op() == "RealDiv") {
+ if (swapped_inputs) {
+ scale_weights = weights;
+ nvinfer1::IUnaryLayer* layer =
+ ctx.network()->addUnary(*const_cast<nvinfer1::ITensor*>(tensor),
+ nvinfer1::UnaryOperation::kRECIP);
+ TFTRT_RETURN_ERROR_IF_NULLPTR(layer, node_def.name());
+ tensor = layer->getOutput(0);
+ } else {
+ TRT_ShapedWeights recip_weights = ctx.get_temp_weights_like(weights);
+ LambdaFactory unary_op;
+ unary_op.op = LambdaFactory::OP_CATEGORY::RECIP;
+ TF_RETURN_IF_ERROR(UnaryCompute(weights, &recip_weights, unary_op));
+ scale_weights = recip_weights;
+ }
} else if (node_def.op() == "Mul") {
scale_weights = weights;
} else if (node_def.op() == "Add") {
@@ -1066,11 +1141,13 @@ tensorflow::Status BinaryTensorOpWeight(
nvinfer1::IScaleLayer* layer = ctx.network()->addScale(
*const_cast<nvinfer1::ITensor*>(tensor), scale_mode, shift_weights,
scale_weights, power_weights);
+ TFTRT_RETURN_ERROR_IF_NULLPTR(layer, node_def.name());
nvinfer1::ITensor* output_tensor = layer->getOutput(0);
// transpose back dimension
if (permutation_flag) {
output_tensor = ctx.TransposeTensor(output_tensor, permutation);
+ TFTRT_RETURN_ERROR_IF_NULLPTR(output_tensor, node_def.name());
}
// Pass the output
@@ -1094,20 +1171,31 @@ tensorflow::Status ConvertConv2DHelper(
if (data_format == "NHWC") {
tensor = ctx.TransposeTensor(const_cast<nvinfer1::ITensor*>(tensor),
{0, 3, 1, 2});
+ TFTRT_RETURN_ERROR_IF_NULLPTR(tensor, node_def.name());
h_index = 1;
w_index = 2;
// TODO(jie): transpose it
}
// tensor after transpose (NCHW)
- auto tensor_dim = tensor->getDimensions();
+ const auto tensor_dim = tensor->getDimensions();
int num_groups = group;
- if (num_groups == 0) // depthwise convolution
- num_groups = tensor_dim.d[0];
+ if (num_groups == 0) num_groups = tensor_dim.d[0]; // depthwise convolution
VLOG(2) << "groups count: " << num_groups;
TRT_ShapedWeights weights_rsck = inputs.at(1).weights();
+
+ VLOG(2) << "weight shape: " << weights_rsck.shape_.nbDims;
+ for (int i = 0; i < weights_rsck.shape_.nbDims; i++) {
+ VLOG(2) << weights_rsck.shape_.d[i];
+ }
+
+ if (weights_rsck.shape_.nbDims != 4) {
+ return tensorflow::errors::Internal(
+ "Conv2D expects kernel of dimension 4, at: " + node_def.name());
+ }
+
if (ctx.isFP16()) {
weights_rsck = ConvertFP32ToFP16(ctx, inputs.at(1).weights());
}
@@ -1115,18 +1203,22 @@ tensorflow::Status ConvertConv2DHelper(
TRT_ShapedWeights weights = ctx.get_temp_weights_like(weights_rsck);
ReorderRSCKToKCRS(weights_rsck, &weights, num_groups);
TRT_ShapedWeights biases(weights.type_);
- int noutput = weights.shape_.d[0] * num_groups;
+ const int noutput = weights.shape_.d[0] * num_groups;
nvinfer1::DimsHW kernel_size;
kernel_size.h() = weights.shape_.d[2];
kernel_size.w() = weights.shape_.d[3];
+ VLOG(2) << "RSCK: ";
+ for (int i = 0; i < 4; i++) {
+ VLOG(2) << " " << weights.shape_.d[i];
+ }
VLOG(2) << "kernel size: " << kernel_size.h() << ", " << kernel_size.w();
// TODO(jie): stride. (NHWC/NCHW)
- auto tf_stride = attrs.get<std::vector<int>>("strides");
+ const auto tf_stride = attrs.get<std::vector<int>>("strides");
VLOG(2) << "h_INDEX" << h_index << ", w_index " << w_index;
VLOG(2) << "stride!!!: " << tf_stride[0] << tf_stride[1] << tf_stride[2]
<< tf_stride[3];
- nvinfer1::DimsHW stride(tf_stride[h_index], tf_stride[w_index]);
+ const nvinfer1::DimsHW stride(tf_stride[h_index], tf_stride[w_index]);
std::vector<std::pair<int, int>> padding;
// TODO(jie): padding.
@@ -1154,6 +1246,7 @@ tensorflow::Status ConvertConv2DHelper(
*const_cast<nvinfer1::ITensor*>(tensor),
nvinfer1::DimsHW(padding[0].first, padding[1].first),
nvinfer1::DimsHW(padding[0].second, padding[1].second));
+ TFTRT_RETURN_ERROR_IF_NULLPTR(pad_layer, node_def.name());
padding = {{0, 0}, {0, 0}};
tensor = pad_layer->getOutput(0);
auto dim_after = tensor->getDimensions();
@@ -1164,6 +1257,7 @@ tensorflow::Status ConvertConv2DHelper(
nvinfer1::IConvolutionLayer* layer =
ctx.network()->addConvolution(*const_cast<nvinfer1::ITensor*>(tensor),
noutput, kernel_size, weights, biases);
+ TFTRT_RETURN_ERROR_IF_NULLPTR(layer, node_def.name());
layer->setStride(stride);
layer->setPadding({padding[0].first, padding[1].first});
@@ -1178,6 +1272,7 @@ tensorflow::Status ConvertConv2DHelper(
if (data_format == "NHWC") {
// TODO(jie): transpose it back!
output_tensor = ctx.TransposeTensor(output_tensor, {0, 2, 3, 1});
+ TFTRT_RETURN_ERROR_IF_NULLPTR(output_tensor, node_def.name());
} else {
VLOG(2) << "NCHW !!!!";
}
@@ -1199,35 +1294,91 @@ tensorflow::Status ConvertConv2DHelper(
node_def.name());
}
+// Helper function converts input into tensor with shape specified by dims.
+bool PrepareTensorForShape(Converter& ctx, const TRT_TensorOrWeights& input,
+ const nvinfer1::Dims& dims,
+ const nvinfer1::ITensor** tensor) {
+ if (input.is_tensor()) {
+ if (DimsEqual(input.shape(), dims)) {
+ *tensor = input.tensor();
+ } else {
+ nvinfer1::IShuffleLayer* layer = ctx.network()->addShuffle(
+ *const_cast<nvinfer1::ITensor*>(input.tensor()));
+ if (layer != nullptr) {
+ layer->setReshapeDimensions(dims);
+ *tensor = layer->getOutput(0);
+ } else {
+ return false;
+ }
+ }
+ } else {
+#if NV_TENSORRT_MAJOR > 3
+ nvinfer1::IConstantLayer* layer =
+ ctx.network()->addConstant(dims, input.weights());
+ if (layer != nullptr) {
+ *tensor = layer->getOutput(0);
+ } else {
+ return false;
+ }
+#else
+ return false;
+#endif
+ }
+ return true;
+}
+
tensorflow::Status BinaryTensorOpTensor(
Converter& ctx, const tensorflow::NodeDef& node_def,
- const nvinfer1::ITensor* tensor_l, const nvinfer1::ITensor* tensor_r,
+ const TRT_TensorOrWeights& operand_l, const TRT_TensorOrWeights& operand_r,
std::vector<TRT_TensorOrWeights>* outputs) {
static const std::unordered_map<string, nvinfer1::ElementWiseOperation> ops{
{"Add", nvinfer1::ElementWiseOperation::kSUM},
{"Mul", nvinfer1::ElementWiseOperation::kPROD},
{"Sub", nvinfer1::ElementWiseOperation::kSUB},
{"Div", nvinfer1::ElementWiseOperation::kDIV},
+ {"RealDiv", nvinfer1::ElementWiseOperation::kDIV},
+ {"Minimum", nvinfer1::ElementWiseOperation::kMIN},
+ {"Maximum", nvinfer1::ElementWiseOperation::kMAX},
};
- // FIXME assume type matches input weights
+ const nvinfer1::ITensor* tensor_l;
+ const nvinfer1::ITensor* tensor_r;
+
+ nvinfer1::Dims dim_l;
+ nvinfer1::Dims dim_r;
+
+ if (!TensorRTGetBroadcastShape(operand_l.shape(), operand_l.is_tensor(),
+ operand_r.shape(), operand_r.is_tensor(),
+ &dim_l, &dim_r)) {
+ return tensorflow::errors::InvalidArgument(
+ "Binary op broadcast scheme not supported by TensorRT op: " +
+ node_def.op() + ", at: " + node_def.name());
+ }
+
+ TFTRT_RETURN_ERROR_IF_FALSE(
+ PrepareTensorForShape(ctx, operand_l, dim_l, &tensor_l), node_def.name());
+ TFTRT_RETURN_ERROR_IF_FALSE(
+ PrepareTensorForShape(ctx, operand_r, dim_r, &tensor_r), node_def.name());
+
// get trt type & shape
TFAttrs attrs(node_def);
// maybe this part has to be moved into the block of rsqrt later
nvinfer1::DataType dtype = attrs.get<nvinfer1::DataType>("T");
// check type consistency
- CHECK_EQ_TYPE(tensor_l->getType(), dtype);
- CHECK_EQ_TYPE(tensor_r->getType(), dtype);
+ TFTRT_CHECK_EQ_TYPE(tensor_l->getType(), dtype);
+ TFTRT_CHECK_EQ_TYPE(tensor_r->getType(), dtype);
auto op_pair = ops.find(node_def.op());
- if (op_pair == ops.end())
+ if (op_pair == ops.end()) {
return tensorflow::errors::Unimplemented(
- "binary op: " + node_def.op() +
- " not supported at: " + node_def.name());
+ "binary op: ", node_def.op(), " not supported at: ", node_def.name());
+ }
nvinfer1::IElementWiseLayer* layer = ctx.network()->addElementWise(
+ // TODO(aaroey): will tensor_l/tensor_r get modified?
*const_cast<nvinfer1::ITensor*>(tensor_l),
*const_cast<nvinfer1::ITensor*>(tensor_r), op_pair->second);
+ TFTRT_RETURN_ERROR_IF_NULLPTR(layer, node_def.name());
nvinfer1::ITensor* output_tensor = layer->getOutput(0);
@@ -1254,7 +1405,7 @@ tensorflow::Status ConvertPlugin(Converter& ctx,
// passing attributes
// TODO(jie): support more general attribute
TFAttrs attrs(node_def);
- auto attr_key_vector = attrs.GetAllAttrKey();
+ auto attr_key_vector = attrs.GetAllAttrKeys();
for (auto attr_key : attr_key_vector) {
// TODO(jie): support only list of float for toy example here.
auto data = attrs.get<std::vector<float>>(attr_key);
@@ -1275,29 +1426,6 @@ tensorflow::Status ConvertPlugin(Converter& ctx,
return tensorflow::Status::OK();
}
-tensorflow::Status ConvertPlaceholder(
- Converter& ctx, const tensorflow::NodeDef& node_def,
- const std::vector<TRT_TensorOrWeights>& inputs,
- std::vector<TRT_TensorOrWeights>* outputs) {
- VLOG(2) << "Placeholder should have been replace already";
- return tensorflow::errors::Unimplemented("cannot convert Placeholder op");
- // OK this make sense since we are supposed to replace it with input
- TFAttrs attrs(node_def);
- nvinfer1::DataType dtype = attrs.get<nvinfer1::DataType>("dtype");
- nvinfer1::Dims dims = attrs.get<nvinfer1::Dims>("shape");
-
- dims.nbDims--;
- for (int i = 0; i < dims.nbDims; i++) dims.d[i] = dims.d[i + 1];
-
- nvinfer1::ITensor* output =
- ctx.network()->addInput(node_def.name().c_str(), dtype, dims);
- if (!output) {
- return tensorflow::errors::InvalidArgument("Failed to create Input layer");
- }
- outputs->push_back(TRT_TensorOrWeights(output));
- return tensorflow::Status::OK();
-}
-
tensorflow::Status ConvertConv2D(Converter& ctx,
const tensorflow::NodeDef& node_def,
const std::vector<TRT_TensorOrWeights>& inputs,
@@ -1323,65 +1451,64 @@ tensorflow::Status ConvertPool(Converter& ctx,
int h_index = 2;
int w_index = 3;
- auto data_format = attrs.get<string>("data_format");
+ const auto data_format = attrs.get<string>("data_format");
if (data_format == "NHWC") {
h_index = 1;
w_index = 2;
tensor = ctx.TransposeTensor(const_cast<nvinfer1::ITensor*>(tensor),
{0, 3, 1, 2});
- } else {
- VLOG(2) << "NCHW !!!!";
+ TFTRT_RETURN_ERROR_IF_NULLPTR(tensor, node_def.name());
}
+
nvinfer1::PoolingType type;
- // TODO(jie): support other pooling type
- if (node_def.op() == "MaxPool")
+ if (node_def.op() == "MaxPool") {
type = nvinfer1::PoolingType::kMAX;
- else if (node_def.op() == "AvgPool")
+ } else if (node_def.op() == "AvgPool") {
type = nvinfer1::PoolingType::kAVERAGE;
- else
- return tensorflow::errors::Unimplemented("Only supports Max pool");
+ } else {
+ return tensorflow::errors::Unimplemented("Unsupported pool type: ",
+ node_def.op());
+ }
- // TODO(jie): NCHW
- auto tf_stride = attrs.get<std::vector<int>>("strides");
- nvinfer1::DimsHW stride(tf_stride[h_index], tf_stride[w_index]);
+ const auto tf_stride = attrs.get<std::vector<int>>("strides");
+ const nvinfer1::DimsHW stride(tf_stride[h_index], tf_stride[w_index]);
- auto tf_kernel = attrs.get<std::vector<int>>("ksize");
- nvinfer1::DimsHW ksize(tf_kernel[h_index], tf_kernel[w_index]);
+ const auto tf_kernel = attrs.get<std::vector<int>>("ksize");
+ const nvinfer1::DimsHW ksize(tf_kernel[h_index], tf_kernel[w_index]);
auto tensor_dim = tensor->getDimensions();
std::vector<std::pair<int, int>> padding;
- // TODO(jie): padding.
- if (attrs.get<string>("padding") == "SAME") {
+ const string padding_type = attrs.get<string>("padding");
+ if (padding_type == "SAME") {
// This is NCHW tensor with no batch dimension.
// 1 -> h
// 2 -> w
padding = CreateSamePadding(
stride, ksize,
{static_cast<int>(tensor_dim.d[1]), static_cast<int>(tensor_dim.d[2])});
- } else if (attrs.get<string>("padding") == "VALID") {
- // No padding for valid padding here
- VLOG(2) << "No padding added for VALID padding in pool" << node_def.name();
+ } else if (padding_type == "VALID") {
padding = {{0, 0}, {0, 0}};
} else {
- return tensorflow::errors::Unimplemented(
- "Current MaxPool cannot support padding other than SAME");
+ return tensorflow::errors::Unimplemented("Unsupported padding type: ",
+ padding_type);
}
if (padding[0].first != padding[0].second ||
padding[1].first != padding[1].second) {
- // TODO(jie): handle asymmetric padding
VLOG(2) << "Padding!!!: " << padding[0].first << padding[0].second
<< padding[1].first << padding[1].second;
auto pad_layer = ctx.network()->addPadding(
*const_cast<nvinfer1::ITensor*>(tensor),
nvinfer1::DimsHW(padding[0].first, padding[1].first),
nvinfer1::DimsHW(padding[0].second, padding[1].second));
+ TFTRT_RETURN_ERROR_IF_NULLPTR(pad_layer, node_def.name());
padding = {{0, 0}, {0, 0}};
tensor = pad_layer->getOutput(0);
}
nvinfer1::IPoolingLayer* layer = ctx.network()->addPooling(
*const_cast<nvinfer1::ITensor*>(tensor), type, ksize);
+ TFTRT_RETURN_ERROR_IF_NULLPTR(layer, node_def.name());
layer->setStride(stride);
layer->setPadding({padding[0].first, padding[1].first});
@@ -1389,10 +1516,8 @@ tensorflow::Status ConvertPool(Converter& ctx,
nvinfer1::ITensor* output_tensor = layer->getOutput(0);
if (data_format == "NHWC") {
- // TODO(jie): transpose it back!
output_tensor = ctx.TransposeTensor(output_tensor, {0, 2, 3, 1});
- } else {
- VLOG(2) << "NCHW !!!!";
+ TFTRT_RETURN_ERROR_IF_NULLPTR(output_tensor, node_def.name());
}
outputs->push_back(TRT_TensorOrWeights(output_tensor));
return tensorflow::Status::OK();
@@ -1405,6 +1530,7 @@ tensorflow::Status ConvertActivation(
const nvinfer1::ITensor* tensor = inputs.at(0).tensor();
nvinfer1::IActivationLayer* layer = ctx.network()->addActivation(
*const_cast<nvinfer1::ITensor*>(tensor), nvinfer1::ActivationType::kRELU);
+ TFTRT_RETURN_ERROR_IF_NULLPTR(layer, node_def.name());
nvinfer1::ITensor* output_tensor = layer->getOutput(0);
outputs->push_back(TRT_TensorOrWeights(output_tensor));
return tensorflow::Status::OK();
@@ -1415,40 +1541,61 @@ tensorflow::Status ConvertScale(Converter& ctx,
const std::vector<TRT_TensorOrWeights>& inputs,
std::vector<TRT_TensorOrWeights>* outputs) {
if (inputs.size() != 2 || !inputs.at(0).is_tensor() ||
- !inputs.at(1).is_weights())
+ !inputs.at(1).is_weights()) {
return tensorflow::errors::Unimplemented(
- "Only supports tensor op weight for now, at " + node_def.name());
- // Implement tensor binaryOp weight [channel wise] for now;
- const nvinfer1::ITensor* tensor = inputs.at(0).tensor();
+ "ConvertScale only supports tensor<op>weight: ", node_def.name());
+ }
+ const nvinfer1::ITensor* tensor = inputs.at(0).tensor();
TRT_ShapedWeights weights = inputs.at(1).weights();
if (ctx.isFP16()) {
weights = ConvertFP32ToFP16(ctx, inputs.at(1).weights());
}
TRT_ShapedWeights empty_weights(weights.type_);
-
TFAttrs attrs(node_def);
- // Transpose NHWC
- auto data_format = attrs.get<string>("data_format");
+ const auto data_format = attrs.get<string>("data_format");
+ int channel_index;
+ const auto dims = tensor->getDimensions();
if (data_format == "NHWC") {
- tensor = ctx.TransposeTensor(const_cast<nvinfer1::ITensor*>(tensor),
- {0, 3, 1, 2});
- // TODO(jie): transpose it
+ // 1). NHWC is really N+C
+ channel_index = dims.nbDims - 1; // batch dimension is implicit here!
} else {
- VLOG(2) << "NCHW !!!!";
+ // 2). NCHW is really N+CHW
+ channel_index = dims.nbDims - 3; // batch dimension is implicit here!
}
- auto dims = tensor->getDimensions();
- VLOG(2) << "tensor dimensions: " << dims.nbDims;
- for (int i = 0; i < dims.nbDims; i++) {
- VLOG(2) << "i: " << dims.d[i];
+ nvinfer1::Permutation permutation;
+ for (int32_t i = 0; i < dims.nbDims; ++i) {
+ permutation.order[i] = i;
}
- dims = weights.shape_;
- VLOG(2) << "tensor dimensions: " << dims.nbDims;
- for (int i = 0; i < dims.nbDims; i++) {
- VLOG(2) << "i: " << dims.d[i];
+
+ if (channel_index >= 0) {
+ permutation.order[0] = channel_index;
+ permutation.order[channel_index] = 0;
+ } else {
+ return tensorflow::errors::Unimplemented(
+ "TFTRT::BiasAdd cannot apply on batch dimension, at ", node_def.name());
+ }
+
+ // TensorRT addScale requires input to be of rank 3, we need to apply
+ // transpose as well as reshape
+ if (channel_index != 0 || dims.nbDims != 3) {
+ nvinfer1::IShuffleLayer* shuffle_layer =
+ ctx.network()->addShuffle(*const_cast<nvinfer1::ITensor*>(tensor));
+ TFTRT_RETURN_ERROR_IF_NULLPTR(shuffle_layer, node_def.name());
+ nvinfer1::Dims reshape_dims;
+ reshape_dims.nbDims = 3;
+ reshape_dims.d[0] = 0; // 0 copy from the input
+ reshape_dims.d[1] = dims.nbDims >= 2 ? 0 : 1; // 0 copy from the input
+ reshape_dims.d[2] = dims.nbDims >= 3 ? -1 : 1; // -1 infer from the rest
+ if (channel_index != 0) {
+ // maybe we do not need this check. concerned about TRT optimization
+ shuffle_layer->setFirstTranspose(permutation);
+ }
+ shuffle_layer->setReshapeDimensions(reshape_dims);
+ tensor = shuffle_layer->getOutput(0);
}
nvinfer1::ScaleMode mode = nvinfer1::ScaleMode::kCHANNEL;
@@ -1459,14 +1606,26 @@ tensorflow::Status ConvertScale(Converter& ctx,
nvinfer1::IScaleLayer* layer =
ctx.network()->addScale(*const_cast<nvinfer1::ITensor*>(tensor), mode,
weights, empty_weights, empty_weights);
+ TFTRT_RETURN_ERROR_IF_NULLPTR(layer, node_def.name());
nvinfer1::ITensor* output_tensor = layer->getOutput(0);
- if (data_format == "NHWC") {
- // TODO(jie): transpose it back!
- output_tensor = ctx.TransposeTensor(output_tensor, {0, 2, 3, 1});
- } else {
- VLOG(2) << "NCHW !!!!";
+
+ // restore transpose & reshape
+ if (channel_index != 0 || dims.nbDims != 3) {
+ nvinfer1::IShuffleLayer* shuffle_layer = ctx.network()->addShuffle(
+ *const_cast<nvinfer1::ITensor*>(output_tensor));
+ TFTRT_RETURN_ERROR_IF_NULLPTR(shuffle_layer, node_def.name());
+ nvinfer1::Dims reshape_dims = dims;
+ int tmp = reshape_dims.d[channel_index];
+ reshape_dims.d[channel_index] = reshape_dims.d[0];
+ reshape_dims.d[0] = tmp;
+ shuffle_layer->setReshapeDimensions(reshape_dims);
+ if (channel_index != 0) {
+ shuffle_layer->setSecondTranspose(permutation);
+ }
+ output_tensor = shuffle_layer->getOutput(0);
}
+
outputs->push_back(TRT_TensorOrWeights(output_tensor));
return tensorflow::Status::OK();
}
@@ -1483,11 +1642,13 @@ tensorflow::Status ConvertConst(Converter& ctx,
// Create shaped weights as output
tensorflow::Tensor tensor;
- if (!tensor.FromProto(weights_tensor))
- return tensorflow::errors::Internal("Cannot parse weight tensor proto: " +
+ if (!tensor.FromProto(weights_tensor)) {
+ return tensorflow::errors::Internal("Cannot parse weight tensor proto: ",
node_def.name());
+ }
TRT_ShapedWeights weights(dtype);
+ // TODO(aaroey): we should choose the array using dtype and shape.
if (!weights_tensor.float_val().empty()) {
VLOG(2) << "SCALAR!!!" << node_def.name();
nvinfer1::Dims scalar_shape;
@@ -1495,22 +1656,16 @@ tensorflow::Status ConvertConst(Converter& ctx,
VLOG(2) << "dimensions: " << tensor.dims();
VLOG(2) << "size: " << weights_tensor.float_val_size();
scalar_shape = GetTensorShape(tensor);
+ VLOG(2) << "details: ";
for (int i = 0; i < scalar_shape.nbDims; i++)
VLOG(2) << scalar_shape.d[i];
- if (GetShapeSize(scalar_shape) != weights_tensor.float_val_size()) {
- if (weights_tensor.float_val_size() == 1 ||
- scalar_shape.d[0] == weights_tensor.float_val_size()) {
- scalar_shape.nbDims = 1;
- // no dimension provided. flatten it
- scalar_shape.d[0] = weights_tensor.float_val_size();
- scalar_shape.type[0] = nvinfer1::DimensionType::kSPATIAL;
- } else {
- LOG(WARNING) << "Broadcast on weights only supports kCHANNEL and"
- << " kUNIFORM, at: " << node_def.name();
- string err_str("Broadcast method is not supported for '");
- StrAppend(&err_str, node_def.name(), "' of type ", node_def.op());
- return tensorflow::errors::InvalidArgument(err_str);
- }
+ if (GetShapeSize(scalar_shape) != weights_tensor.float_val_size() &&
+ weights_tensor.float_val_size() != 1) {
+ LOG(ERROR) << "Broadcast on weights only supports kCHANNEL and"
+ << " kUNIFORM, at: " << node_def.name();
+ string err_str("Broadcast method is not supported for '");
+ StrAppend(&err_str, node_def.name(), "' of type ", node_def.op());
+ return tensorflow::errors::InvalidArgument(err_str);
}
} else {
VLOG(2) << "Dimensions: " << tensor.dims();
@@ -1520,39 +1675,42 @@ tensorflow::Status ConvertConst(Converter& ctx,
scalar_shape.type[0] = nvinfer1::DimensionType::kSPATIAL;
for (int i = 1; i < nvinfer1::Dims::MAX_DIMS; i++) {
scalar_shape.d[i] = 0;
- scalar_shape.type[i] = nvinfer1::DimensionType::kSPATIAL;
}
}
+ // TODO(aaroey): use GetShapeSize().
size_t len_data = tensorflow::DataTypeSize(dtype);
for (int i = 0; i < scalar_shape.nbDims; i++) len_data *= scalar_shape.d[i];
ctx.weight_store()->store_.push_back(std::vector<uint8_t>(len_data));
void* dst = static_cast<void*>(&(ctx.weight_store()->store_.back()[0]));
- std::vector<float> tensor_data(
- weights_tensor.float_val().begin(),
- weights_tensor.float_val()
- .end()); // make a local copy first to flatten
- memcpy(dst, tensor_data.data(), len_data); // store into weight store
+ if (weights_tensor.float_val_size() == 1) {
+ std::fill_n((float*)dst, GetShapeSize(scalar_shape),
+ *weights_tensor.float_val().begin());
+ } else {
+ // TODO(aaroey): get rid of this copy as RepeatedField is always
+ // contiguous make a local copy first to flatten doesn't have to be
+ // contiguous
+ std::vector<float> tensor_data(weights_tensor.float_val().begin(),
+ weights_tensor.float_val().end());
+ memcpy(dst, tensor_data.data(), len_data); // store into weight store
+ }
+ VLOG(2) << "create shape details: ";
+ for (int i = 0; i < scalar_shape.nbDims; i++) VLOG(2) << scalar_shape.d[i];
weights = TRT_ShapedWeights(dtype, dst, scalar_shape);
} else if (!weights_tensor.int_val().empty()) {
+ // TODO(aaroey): this is very similar to the above code for float, merge
+ // them.
VLOG(2) << "int!!!" << node_def.name();
nvinfer1::Dims scalar_shape;
if (tensor.dims() > 0) {
VLOG(2) << "dimensions: " << tensor.dims();
scalar_shape = GetTensorShape(tensor);
- if (GetShapeSize(scalar_shape) != weights_tensor.int_val_size()) {
- if (weights_tensor.int_val_size() == 1 ||
- scalar_shape.d[0] == weights_tensor.int_val_size()) {
- scalar_shape.nbDims = 1;
- // no dimension provided. flatten it
- scalar_shape.d[0] = weights_tensor.int_val_size();
- scalar_shape.type[0] = nvinfer1::DimensionType::kSPATIAL;
- } else {
- LOG(WARNING) << "Broadcast on weights only supports kCHANNEL and"
- << " kUNIFORM, at: " << node_def.name();
- string err_str("Broadcast method is not supported for '");
- StrAppend(&err_str, node_def.name(), "' of type ", node_def.op());
- return tensorflow::errors::InvalidArgument(err_str);
- }
+ if (GetShapeSize(scalar_shape) != weights_tensor.int_val_size() &&
+ weights_tensor.int_val_size() != 1) {
+ LOG(WARNING) << "Broadcast on weights only supports kCHANNEL and"
+ << " kUNIFORM, at: " << node_def.name();
+ string err_str("Broadcast method is not supported for '");
+ StrAppend(&err_str, node_def.name(), "' of type ", node_def.op());
+ return tensorflow::errors::InvalidArgument(err_str);
}
} else {
VLOG(2) << "dimensions: " << tensor.dims();
@@ -1565,23 +1723,30 @@ tensorflow::Status ConvertConst(Converter& ctx,
scalar_shape.type[i] = nvinfer1::DimensionType::kSPATIAL;
}
}
- // we should not have converted //if (ctx.isFP16()) {
+ // we should not have converted
size_t len_data = tensorflow::DataTypeSize(dtype);
for (int i = 0; i < scalar_shape.nbDims; i++) len_data *= scalar_shape.d[i];
size_t len_tensor = weights_tensor.int_val_size() * sizeof(int32);
len_data = std::max(len_data, len_tensor);
ctx.weight_store()->store_.push_back(std::vector<uint8_t>(len_data));
void* dst = static_cast<void*>(&(ctx.weight_store()->store_.back()[0]));
- std::vector<int32> tensor_data(
- weights_tensor.int_val().begin(),
- weights_tensor.int_val().end()); // make a local copy first to flatten
- // doesn't have to be contigous
- memcpy(dst, tensor_data.data(), len_tensor); // store into weight store
+ if (weights_tensor.int_val_size() == 1) {
+ std::fill_n((int*)dst, GetShapeSize(scalar_shape),
+ *weights_tensor.int_val().begin());
+ } else {
+ // TODO(aaroey): get rid of this copy as RepeatedField is always
+ // contiguous make a local copy first to flatten doesn't have to be
+ // contiguous
+ std::vector<int32> tensor_data(weights_tensor.int_val().begin(),
+ weights_tensor.int_val().end());
+ memcpy(dst, tensor_data.data(), len_tensor); // store into weight store
+ }
weights = TRT_ShapedWeights(dtype, dst, scalar_shape);
} else if (!weights_tensor.tensor_content().empty()) {
- // obsolete method.
- // After optimization path, we do not see weights in this format.
- // fp16 conversion technically should be needed here.
+ // obsolete method.
+ // After optimization path, we do not see weights in this format.
+ // TODO(aaroey): why?
+ // fp16 conversion technically should be needed here.
VLOG(2) << "TENSOR!!!" << node_def.name();
const auto& content = weights_tensor.tensor_content();
@@ -1595,8 +1760,8 @@ tensorflow::Status ConvertConst(Converter& ctx,
content, static_cast<char*>(const_cast<void*>(weights.GetValues())));
}
} else {
- return tensorflow::errors::Unimplemented(
- "Not supported constant type, at " + node_def.name());
+ return tensorflow::errors::Unimplemented("Not supported constant type, at ",
+ node_def.name());
}
// Pass the output
outputs->push_back(TRT_TensorOrWeights(weights));
@@ -1615,96 +1780,144 @@ tensorflow::Status ConvertBinary(Converter& ctx,
const tensorflow::NodeDef& node_def,
const std::vector<TRT_TensorOrWeights>& inputs,
std::vector<TRT_TensorOrWeights>* outputs) {
- if (inputs.size() != 2)
+ if (inputs.size() != 2) {
return tensorflow::errors::FailedPrecondition(
- "Binary ops require two tensor input, at " + node_def.name());
-
- if (inputs.at(0).is_weights() && inputs.at(1).is_weights())
- return ConstantFoldBinary(ctx, node_def, inputs, outputs);
-
- if (inputs.at(0).is_tensor() && inputs.at(1).is_weights())
- return BinaryTensorOpWeight(ctx, node_def, inputs.at(0).tensor(),
- inputs.at(1).weights(), outputs);
+ "Binary ops require two tensor input, at ", node_def.name());
+ }
- if (inputs.at(0).is_weights() && inputs.at(1).is_tensor())
- return BinaryTensorOpWeight(ctx, node_def, inputs.at(1).tensor(),
- inputs.at(0).weights(), outputs);
+ // Constant folding should have been done by TensorFlow
- if (inputs.at(0).is_tensor() && inputs.at(1).is_tensor())
- return BinaryTensorOpTensor(ctx, node_def, inputs.at(0).tensor(),
- inputs.at(1).tensor(), outputs);
+ if (inputs.at(0).is_weights() && inputs.at(1).is_weights()) {
+ return tensorflow::errors::Unimplemented(
+ "Constant folding is falled back to TensorFlow, binary op received "
+ "both input as constant at: ",
+ node_def.name());
+ }
- return tensorflow::errors::Unknown("Binary op input error, at " +
- node_def.name());
+ // Try to convert into Scale layer first (for better performance)
+ // Since scale layer supports restricted broadcast policy and op types, we
+ // allow failure and try to handle it through Elementwise op
+ // (BinaryTensorOpTensor)
+ Status status = tensorflow::Status::OK();
+ if (inputs.at(0).is_tensor() && inputs.at(1).is_weights()) {
+ status = BinaryTensorOpWeight(ctx, node_def, inputs.at(0).tensor(),
+ inputs.at(1).weights(), false, outputs);
+ } else if (inputs.at(0).is_weights() && inputs.at(1).is_tensor()) {
+ status = BinaryTensorOpWeight(ctx, node_def, inputs.at(1).tensor(),
+ inputs.at(0).weights(), true, outputs);
+#if NV_TENSORRT_MAJOR == 3
+ } else {
+#else
+ }
+ if ((inputs.at(0).is_tensor() && inputs.at(1).is_tensor()) || !status.ok()) {
+#endif
+ status = BinaryTensorOpTensor(ctx, node_def, inputs.at(0), inputs.at(1),
+ outputs);
+ }
+ return status;
}
tensorflow::Status ConvertUnary(Converter& ctx,
const tensorflow::NodeDef& node_def,
const std::vector<TRT_TensorOrWeights>& inputs,
std::vector<TRT_TensorOrWeights>* outputs) {
- if (inputs.size() != 1)
+ static const std::unordered_map<string, nvinfer1::UnaryOperation> ops{
+ {"Neg", nvinfer1::UnaryOperation::kNEG},
+ {"Exp", nvinfer1::UnaryOperation::kEXP},
+ {"Log", nvinfer1::UnaryOperation::kLOG},
+ {"Sqrt", nvinfer1::UnaryOperation::kSQRT},
+ {"Abs", nvinfer1::UnaryOperation::kABS},
+ {"Reciprocal", nvinfer1::UnaryOperation::kRECIP},
+ };
+
+ if (inputs.size() != 1) {
return tensorflow::errors::FailedPrecondition(
- "Unary ops require single tensor input, at " + node_def.name());
+ "Unary ops require single tensor input, at ", node_def.name());
+ }
- if (inputs.at(0).is_weights())
- return ConstantFoldUnary(ctx, node_def, inputs, outputs);
- else if (inputs.at(0).is_tensor())
+#if NV_TENSORRT_MAJOR == 3
+ if (inputs.at(0).is_weights()) {
return tensorflow::errors::Unimplemented(
- "Unary op for tensor not supported, at " + node_def.name());
+ "Constant folding for unary op is not supported", node_def.name());
+ }
+#endif
+
+ // TODO(jie): check type
+ const nvinfer1::ITensor* tensor;
+ TFTRT_RETURN_ERROR_IF_FALSE(
+ PrepareTensorForShape(ctx, inputs.at(0), inputs.at(0).shape(), &tensor),
+ node_def.name());
- return tensorflow::errors::Unknown("Binary op input error, at " +
- node_def.name());
+ nvinfer1::IUnaryLayer* layer;
+ if (node_def.op() == "Rsqrt") {
+ layer = ctx.network()->addUnary(*const_cast<nvinfer1::ITensor*>(tensor),
+ nvinfer1::UnaryOperation::kSQRT);
+ TFTRT_RETURN_ERROR_IF_NULLPTR(layer, node_def.name());
+ tensor = layer->getOutput(0);
+ layer = ctx.network()->addUnary(*const_cast<nvinfer1::ITensor*>(tensor),
+ nvinfer1::UnaryOperation::kRECIP);
+ } else if (ops.count(node_def.op()) != 0) {
+ layer = ctx.network()->addUnary(*const_cast<nvinfer1::ITensor*>(tensor),
+ ops.at(node_def.op()));
+ } else {
+ return tensorflow::errors::InvalidArgument(
+ "Binary op: ", node_def.op(), " not supported, at ", node_def.name());
+ }
+
+ TFTRT_RETURN_ERROR_IF_NULLPTR(layer, node_def.name());
+ nvinfer1::ITensor* output_tensor = layer->getOutput(0);
+ outputs->push_back(TRT_TensorOrWeights(output_tensor));
+ return tensorflow::Status::OK();
}
-tensorflow::Status ConvertReduce(Converter& ctx,
- const tensorflow::NodeDef& node_def,
- const std::vector<TRT_TensorOrWeights>& inputs,
- std::vector<TRT_TensorOrWeights>* outputs) {
+#if NV_TENSORRT_MAJOR == 3
+tensorflow::Status ConvertReducePool(
+ Converter& ctx, const tensorflow::NodeDef& node_def,
+ const std::vector<TRT_TensorOrWeights>& inputs,
+ std::vector<TRT_TensorOrWeights>* outputs) {
if (inputs.size() != 2 || !inputs.at(0).is_tensor() ||
- !inputs.at(1).is_weights())
+ !inputs.at(1).is_weights()) {
return tensorflow::errors::InvalidArgument(
- "Input expects tensor and weights, at" + node_def.name());
+ "Input expects tensor and weights, at", node_def.name());
+ }
// Implement tensor binaryOp weight [channel wise] for now;
const nvinfer1::ITensor* tensor = inputs.at(0).tensor();
- auto dims = tensor->getDimensions();
+ const auto dims = tensor->getDimensions();
// Restore implicit batch dimension
- int nb_dims = dims.nbDims + 1;
+ const int nb_dims = dims.nbDims + 1;
TRT_ShapedWeights index_list = inputs.at(1).weights();
-
TFAttrs attrs(node_def);
- // TODO(jie): handle data type.
- // Index type here is done through TF type, so I can leverage their
- // EnumToDataType for my cast
auto index_type = attrs.get<tensorflow::DataType>("Tidx");
// Only expect to handle INT32 as attributes for now
- if (index_type != tensorflow::DataType::DT_INT32)
+ if (index_type != tensorflow::DataType::DT_INT32) {
return tensorflow::errors::Unimplemented("Tidx supports only DT_INT32");
- auto index_list_data =
+ }
+ const auto index_list_data =
static_cast<int*>(const_cast<void*>(index_list.GetValues()));
- // Hack warning: have to fall back to pool layer since reduce is not in public
- // TRT yet.
- if (nb_dims != 4)
+ if (nb_dims != 4) {
return tensorflow::errors::InvalidArgument(
- "TRT only support reduce on 4 dimensional tensors, at" +
+ "TRT only support reduce on 4 dimensional tensors, at",
node_def.name());
- if (index_list.count() > 2)
+ }
+ if (index_list.count() > 2) {
return tensorflow::errors::InvalidArgument(
- "TRT cannot support reduce on more than 2 dimensions, at" +
+ "TRT cannot support reduce on more than 2 dimensions, at",
node_def.name());
+ }
std::set<int> idx_set;
// We cannot operate on Channel. permutation flag used to transpose tensor
int permuted_index = -1;
for (int i = 0; i < index_list.count(); i++) {
- if (index_list_data[i] == 0)
- return tensorflow::errors::InvalidArgument("TRT cannot reduce at 0, at" +
+ if (index_list_data[i] == 0) {
+ return tensorflow::errors::InvalidArgument("TRT cannot reduce at 0, at",
node_def.name());
+ }
if (index_list_data[i] == 1) permuted_index = 1;
-
idx_set.emplace(index_list_data[i]);
}
@@ -1725,6 +1938,7 @@ tensorflow::Status ConvertReduce(Converter& ctx,
// Apply permutation before extracting dimension for pool_kernel
tensor = ctx.TransposeTensor(const_cast<nvinfer1::ITensor*>(tensor),
permutation_order);
+ TFTRT_RETURN_ERROR_IF_NULLPTR(tensor, node_def.name());
}
// Apply permutation before extracting dimension for pool_kernel
@@ -1737,34 +1951,104 @@ tensorflow::Status ConvertReduce(Converter& ctx,
nvinfer1::IPoolingLayer* layer =
ctx.network()->addPooling(*const_cast<nvinfer1::ITensor*>(tensor),
nvinfer1::PoolingType::kAVERAGE, pool_kernel);
+ TFTRT_RETURN_ERROR_IF_NULLPTR(layer, node_def.name());
output_tensor = layer->getOutput(0);
} else {
- return tensorflow::errors::Unimplemented(
- "Op not supported " + node_def.op() + " , at " + node_def.name());
+ return tensorflow::errors::Unimplemented("Op not supported ", node_def.op(),
+ " , at ", node_def.name());
}
if (permuted_index != -1) {
// Apply permutation before extracting dimension for pool_kernel
output_tensor = ctx.TransposeTensor(
const_cast<nvinfer1::ITensor*>(output_tensor), permutation_order);
+ TFTRT_RETURN_ERROR_IF_NULLPTR(output_tensor, node_def.name());
}
outputs->push_back(TRT_TensorOrWeights(output_tensor));
return tensorflow::Status::OK();
}
+#elif NV_TENSORRT_MAJOR > 3
+tensorflow::Status ConvertReduce(Converter& ctx,
+ const tensorflow::NodeDef& node_def,
+ const std::vector<TRT_TensorOrWeights>& inputs,
+ std::vector<TRT_TensorOrWeights>* outputs) {
+ if (inputs.size() != 2 || !inputs.at(0).is_tensor() ||
+ !inputs.at(1).is_weights()) {
+ return tensorflow::errors::InvalidArgument(
+ "Input expects tensor and weights, at", node_def.name());
+ }
+
+ const nvinfer1::ITensor* tensor = inputs.at(0).tensor();
+ TRT_ShapedWeights index_list = inputs.at(1).weights();
+
+ TFAttrs attrs(node_def);
+ auto index_type = attrs.get<tensorflow::DataType>("Tidx");
+
+ // Only expect to handle INT32 as attributes for now
+ if (index_type != tensorflow::DataType::DT_INT32) {
+ return tensorflow::errors::Unimplemented("Tidx supports only DT_INT32");
+ }
+
+ const auto keep_dims = attrs.get<bool>("keep_dims");
+ auto index_list_data =
+ static_cast<int*>(const_cast<void*>(index_list.GetValues()));
+
+ int axes = 0;
+ if (index_list.count() == 0) {
+ return tensorflow::errors::InvalidArgument(
+ "TRT cannot support reduce on all (batch) dimensions, at",
+ node_def.name());
+ } else {
+ for (int i = 0; i < index_list.count(); i++) {
+ if (index_list_data[i] == 0) {
+ return tensorflow::errors::InvalidArgument(
+ "TRT cannot reduce at batch dimension, at", node_def.name());
+ }
+ axes |= (1 << (index_list_data[i] - 1));
+ }
+ }
+
+ nvinfer1::ReduceOperation reduce_operation;
+ if (node_def.op() == "Sum") {
+ reduce_operation = nvinfer1::ReduceOperation::kSUM;
+ } else if (node_def.op() == "Prod") {
+ reduce_operation = nvinfer1::ReduceOperation::kPROD;
+ } else if (node_def.op() == "Max") {
+ reduce_operation = nvinfer1::ReduceOperation::kMAX;
+ } else if (node_def.op() == "Min") {
+ reduce_operation = nvinfer1::ReduceOperation::kMIN;
+ } else if (node_def.op() == "Mean") {
+ reduce_operation = nvinfer1::ReduceOperation::kAVG;
+ } else {
+ return tensorflow::errors::Unimplemented("Op not supported ", node_def.op(),
+ " , at ", node_def.name());
+ }
+
+ nvinfer1::ILayer* layer =
+ ctx.network()->addReduce(*const_cast<nvinfer1::ITensor*>(tensor),
+ reduce_operation, axes, keep_dims);
+ TFTRT_RETURN_ERROR_IF_NULLPTR(layer, node_def.name());
+
+ outputs->push_back(TRT_TensorOrWeights(layer->getOutput(0)));
+ return tensorflow::Status::OK();
+}
+#endif
tensorflow::Status ConvertPad(Converter& ctx,
const tensorflow::NodeDef& node_def,
const std::vector<TRT_TensorOrWeights>& inputs,
std::vector<TRT_TensorOrWeights>* outputs) {
+ // TODO(aaroey): make a routine for this check and reuse it.
if (inputs.size() != 2 || !inputs.at(0).is_tensor() ||
- !inputs.at(1).is_weights())
+ !inputs.at(1).is_weights()) {
return tensorflow::errors::InvalidArgument(
- "Input expects tensor and weights, at" + node_def.name());
+ "Input expects tensor and weights, at", node_def.name());
+ }
// Implement tensor binaryOp weight [channel wise] for now;
const nvinfer1::ITensor* tensor = inputs.at(0).tensor();
- auto dims = tensor->getDimensions();
+ const auto dims = tensor->getDimensions();
// Restore implicit batch dimension
- int nb_dims = dims.nbDims + 1;
+ const int nb_dims = dims.nbDims + 1;
TRT_ShapedWeights pads = inputs.at(1).weights();
@@ -1774,21 +2058,24 @@ tensorflow::Status ConvertPad(Converter& ctx,
auto padding_type = attrs.get<tensorflow::DataType>("Tpaddings");
// TODO(jie): handle data type conversion for TRT?
- if (pads.shape_.d[0] != nb_dims || pads.shape_.d[1] != 2)
+ if (pads.shape_.d[0] != nb_dims || pads.shape_.d[1] != 2) {
return tensorflow::errors::InvalidArgument(
- "Pad only supports explicit padding on 4 dimensional tensor, at " +
+ "Pad only supports explicit padding on 4 dimensional tensor, at ",
node_def.name());
+ }
// Only expect to handle INT32 as attributes for now
- if (padding_type != tensorflow::DataType::DT_INT32)
+ if (padding_type != tensorflow::DataType::DT_INT32) {
return tensorflow::errors::Unimplemented(
"Tpaddings supports only DT_INT32");
+ }
auto pad_data = static_cast<int*>(const_cast<void*>(pads.GetValues()));
std::vector<int32_t> pad_index;
for (int i = 0; i < nb_dims; i++) {
- if (pad_data[2 * i] != 0 || pad_data[2 * i + 1] != 0)
+ if (pad_data[2 * i] != 0 || pad_data[2 * i + 1] != 0) {
pad_index.push_back(i);
+ }
}
// No padding at all, we should exit
@@ -1798,20 +2085,23 @@ tensorflow::Status ConvertPad(Converter& ctx,
}
// Only supports padding on less than 2 axis GIE-2579
- if (pad_index.size() > 2)
+ if (pad_index.size() > 2) {
return tensorflow::errors::InvalidArgument(
"Padding layer does not support padding on > 2");
+ }
// Padding on batch dimension is not supported
- if (pad_index[0] == 0)
+ if (pad_index[0] == 0) {
return tensorflow::errors::InvalidArgument(
"Padding layer does not support padding on batch dimension");
+ }
// Not doing the legit thing here. ignoring padding on dim 1 and 3;
// TODO(jie): implement pad as uff parser
- if (pad_index.size() == 2 && pad_index[0] == 0 && pad_index[1] == 3)
+ if (pad_index.size() == 2 && pad_index[0] == 0 && pad_index[1] == 3) {
return tensorflow::errors::Unimplemented(
"Padding layer does not support padding on dimension 1 and 3 yet");
+ }
bool legit_pad = true;
nvinfer1::DimsHW pre_padding(0, 0);
@@ -1822,6 +2112,7 @@ tensorflow::Status ConvertPad(Converter& ctx,
legit_pad = false;
tensor = ctx.TransposeTensor(const_cast<nvinfer1::ITensor*>(tensor),
{0, 3, 2, 1});
+ TFTRT_RETURN_ERROR_IF_NULLPTR(tensor, node_def.name());
permuted_pad_index[0] = 3;
}
@@ -1838,11 +2129,14 @@ tensorflow::Status ConvertPad(Converter& ctx,
nvinfer1::IPaddingLayer* layer = ctx.network()->addPadding(
*const_cast<nvinfer1::ITensor*>(tensor), pre_padding, post_padding);
+ TFTRT_RETURN_ERROR_IF_NULLPTR(layer, node_def.name());
nvinfer1::ITensor* output_tensor = layer->getOutput(0);
- if (!legit_pad)
+ if (!legit_pad) {
output_tensor = ctx.TransposeTensor(
const_cast<nvinfer1::ITensor*>(output_tensor), {0, 3, 2, 1});
+ TFTRT_RETURN_ERROR_IF_NULLPTR(output_tensor, node_def.name());
+ }
outputs->push_back(TRT_TensorOrWeights(output_tensor));
return tensorflow::Status::OK();
@@ -1855,9 +2149,10 @@ tensorflow::Status ConvertConcat(Converter& ctx,
// not including the last input (axis) here
int input_size = static_cast<int>(inputs.size()) - 1;
- if (!inputs.at(0).is_tensor())
+ if (!inputs.at(0).is_tensor()) {
return tensorflow::errors::InvalidArgument(
- "Concat in TRT support only Tensor input, at " + node_def.name());
+ "Concat in TRT support only Tensor input, at ", node_def.name());
+ }
// We are retrieving the axis
TRT_ShapedWeights axis = inputs.at(input_size).weights();
@@ -1868,8 +2163,8 @@ tensorflow::Status ConvertConcat(Converter& ctx,
// TODO(jie): handle data type
// Only expect to handle INT32 as index attributes for now
if (index_type != tensorflow::DataType::DT_INT32)
- return tensorflow::errors::Unimplemented(
- "Tidx supports only DT_INT32, at " + node_def.name());
+ return tensorflow::errors::Unimplemented("Tidx supports only DT_INT32, at ",
+ node_def.name());
int index = *(static_cast<int*>(const_cast<void*>(axis.GetValues())));
@@ -1877,23 +2172,29 @@ tensorflow::Status ConvertConcat(Converter& ctx,
auto dim = inputs.at(0).tensor()->getDimensions();
// dimension check
- if (index > dim.nbDims + 1)
+ if (index > dim.nbDims + 1) {
return tensorflow::errors::InvalidArgument(
- "Concatenate on axis out of dimension range, at " + node_def.name());
-
- if (index == 0)
+ "Concatenate on axis out of dimension range, at ", node_def.name());
+ }
+ if (index == 0) {
return tensorflow::errors::InvalidArgument(
- "Concatenate on batch dimension not supported, at " + node_def.name());
+ "Concatenate on batch dimension not supported, at ", node_def.name());
+ }
+ if (index < 0) {
+ index = dim.nbDims + index + 1;
+ }
+#if NV_TENSORRT_MAJOR == 3
// incase we need permutation;
std::vector<int> permutation_order(dim.nbDims + 1);
for (int i = 0; i < dim.nbDims + 1; i++) permutation_order[i] = i;
if (index != 1) {
- permutation_order[1] = index - 1;
- permutation_order[index - 1] = 1;
+ permutation_order[1] = index;
+ permutation_order[index] = 1;
}
+#endif
std::vector<nvinfer1::ITensor const*> inputs_vec;
// Shap chack (all input tensor should have same shape)
@@ -1901,24 +2202,28 @@ tensorflow::Status ConvertConcat(Converter& ctx,
for (int i = 0; i < input_size; i++) {
auto tensor_i = inputs.at(i).tensor();
auto dim_i = tensor_i->getDimensions();
- if (dim_i.nbDims != dim.nbDims)
+ if (dim_i.nbDims != dim.nbDims) {
return tensorflow::errors::InvalidArgument(
- "Concatenate receives inputs with inconsistent dimensions, at " +
+ "Concatenate receives inputs with inconsistent dimensions, at ",
node_def.name());
-
+ }
for (int j = 0; j < dim.nbDims; j++) {
// check dimension consistency on non-concatenate axis
- if (j != index - 1 && dim_i.d[j] != dim.d[j])
+ if (j != index - 1 && dim_i.d[j] != dim.d[j]) {
return tensorflow::errors::InvalidArgument(
- "Concatenate receives inputs with inconsistent shape, at" +
+ "Concatenate receives inputs with inconsistent shape, at",
node_def.name());
+ }
}
- // TRT does concatenation only on channel!
- if (index != 1)
+#if NV_TENSORRT_MAJOR == 3
+ // TRT3 does concatenation only on channel!
+ if (index != 1) {
tensor_i = ctx.TransposeTensor(const_cast<nvinfer1::ITensor*>(tensor_i),
permutation_order);
-
+ TFTRT_RETURN_ERROR_IF_NULLPTR(tensor_i, node_def.name());
+ }
+#endif
inputs_vec.push_back(tensor_i);
}
@@ -1926,11 +2231,18 @@ tensorflow::Status ConvertConcat(Converter& ctx,
nvinfer1::IConcatenationLayer* layer = ctx.network()->addConcatenation(
const_cast<nvinfer1::ITensor* const*>(inputs_vec.data()),
inputs_vec.size());
+ TFTRT_RETURN_ERROR_IF_NULLPTR(layer, node_def.name());
+#if NV_TENSORRT_MAJOR > 3
+ layer->setAxis(index - 1);
+#endif
nvinfer1::ITensor* output_tensor = layer->getOutput(0);
+#if NV_TENSORRT_MAJOR == 3
if (index != 1) {
output_tensor = ctx.TransposeTensor(output_tensor, permutation_order);
+ TFTRT_RETURN_ERROR_IF_NULLPTR(output_tensor, node_def.name());
}
+#endif
outputs->push_back(TRT_TensorOrWeights(output_tensor));
return tensorflow::Status::OK();
}
@@ -2049,112 +2361,243 @@ tensorflow::Status ConvertFusedBatchNorm(
combined_offset_weights.GetWeightsForTRT(),
combined_scale_weights.GetWeightsForTRT(),
dummy_power_weights.GetWeightsForTRT());
+ TFTRT_RETURN_ERROR_IF_NULLPTR(layer, node_def.name());
nvinfer1::ITensor* output_tensor = layer->getOutput(0);
outputs->push_back(TRT_TensorOrWeights(output_tensor));
return tensorflow::Status::OK();
}
-tensorflow::Status ConvertMatMul(Converter& ctx,
- const tensorflow::NodeDef& node_def,
- const std::vector<TRT_TensorOrWeights>& inputs,
- std::vector<TRT_TensorOrWeights>* outputs) {
- const nvinfer1::ITensor* tensor = inputs.at(0).tensor();
-
- // TODO(jie): transpose!
- TFAttrs attrs(node_def);
+#if NV_TENSORRT_MAJOR > 3
+tensorflow::Status ConvertMatMulHelper(
+ Converter& ctx, TRT_TensorOrWeights tensor_input,
+ TRT_ShapedWeights weights_raw, bool transpose_weight, string node_name,
+ std::vector<TRT_TensorOrWeights>* outputs) {
+ nvinfer1::ITensor* output_tensor;
+ if (!tensor_input.is_tensor()) {
+ return tensorflow::errors::InvalidArgument("Input 0 expects tensor");
+ }
+ const nvinfer1::ITensor* tensor = tensor_input.tensor();
- TRT_ShapedWeights weights_ck = inputs.at(1).weights();
- TRT_ShapedWeights weights = ctx.get_temp_weights_like(weights_ck);
- ReorderCKtoKC(weights_ck, &weights);
+ TRT_ShapedWeights weights(weights_raw.type_);
+ if (transpose_weight) {
+ weights = weights_raw;
+ } else {
+ TRT_ShapedWeights weights_ck = weights_raw;
+ weights = ctx.get_temp_weights_like(weights_ck);
+ ReorderCKtoKC(weights_raw, &weights);
+ }
TRT_ShapedWeights biases(weights.type_);
int noutput = weights.shape_.d[0];
+ auto input_dim = tensor->getDimensions();
+ while (input_dim.nbDims != 3) {
+ input_dim.d[input_dim.nbDims++] = 1;
+ }
+ TFTRT_RETURN_ERROR_IF_FALSE(
+ PrepareTensorForShape(ctx, tensor_input, input_dim, &tensor), node_name);
+
nvinfer1::IFullyConnectedLayer* layer = ctx.network()->addFullyConnected(
*const_cast<nvinfer1::ITensor*>(tensor), noutput, weights, biases);
-
- nvinfer1::ITensor* output_tensor = layer->getOutput(0);
+ TFTRT_RETURN_ERROR_IF_NULLPTR(layer, node_name);
+ output_tensor = layer->getOutput(0);
+
+ const nvinfer1::ITensor* temp_tensor;
+ auto output_dim = output_tensor->getDimensions();
+ output_dim.nbDims = 1;
+ TFTRT_RETURN_ERROR_IF_FALSE(
+ PrepareTensorForShape(ctx, TRT_TensorOrWeights(output_tensor), output_dim,
+ &temp_tensor),
+ node_name);
+ output_tensor = const_cast<nvinfer1::ITensor*>(temp_tensor);
outputs->push_back(TRT_TensorOrWeights(output_tensor));
return tensorflow::Status::OK();
}
-tensorflow::Status ConvertReshape(
+// inputs are both two dimensional (tensorflow::ops::MatMul)
+tensorflow::Status ConvertMatMul(Converter& ctx,
+ const tensorflow::NodeDef& node_def,
+ const std::vector<TRT_TensorOrWeights>& inputs,
+ std::vector<TRT_TensorOrWeights>* outputs) {
+ if (!inputs.at(0).is_tensor()) {
+ return tensorflow::errors::InvalidArgument("Input 0 expects tensor, at" +
+ node_def.name());
+ }
+
+ TFAttrs attrs(node_def);
+ // TODO(jie): INT32 should be converted?
+ tensorflow::DataType tf_dtype = attrs.get<tensorflow::DataType>("T");
+ if (tf_dtype != tensorflow::DataType::DT_FLOAT &&
+ tf_dtype != tensorflow::DataType::DT_HALF) {
+ return tensorflow::errors::Unimplemented(
+ "data type is not supported, for node " + node_def.name() + " got " +
+ tensorflow::DataTypeString(tf_dtype));
+ }
+ bool transpose_a = attrs.get<bool>("transpose_a");
+ bool transpose_b = attrs.get<bool>("transpose_b");
+
+ // FullyConnected:
+ if (transpose_a) {
+ return tensorflow::errors::Internal(
+ "Transpose_a is not supported for TensorRT FullyConnected (op: " +
+ node_def.op() + "), at: " + node_def.name());
+ }
+ if (inputs.at(1).is_tensor()) {
+ return tensorflow::errors::Internal(
+ "Operand 1 must be constant for TensorRT FullyConnected (op: " +
+ node_def.op() + "), at: " + node_def.name());
+ }
+ return ConvertMatMulHelper(ctx, inputs.at(0), inputs.at(1).weights(),
+ transpose_b, node_def.name(), outputs);
+}
+
+tensorflow::Status ConvertBatchMatMul(
Converter& ctx, const tensorflow::NodeDef& node_def,
const std::vector<TRT_TensorOrWeights>& inputs,
std::vector<TRT_TensorOrWeights>* outputs) {
- if (inputs.size() != 2 || !inputs.at(0).is_tensor() ||
- !inputs.at(1).is_weights())
- return tensorflow::errors::InvalidArgument(
- "Input expects tensor and weights, at" + node_def.name());
+ TFAttrs attrs(node_def);
- // implement tensor binaryOp weight [channel wise] for now;
- const nvinfer1::ITensor* tensor = inputs.at(0).tensor();
- auto dims = tensor->getDimensions();
- // restore implicit batch dimension
+ // TODO(jie): INT32 should be converted?
+ tensorflow::DataType tf_dtype = attrs.get<tensorflow::DataType>("T");
+ if (tf_dtype != tensorflow::DataType::DT_FLOAT &&
+ tf_dtype != tensorflow::DataType::DT_HALF) {
+ return tensorflow::errors::Unimplemented(
+ "data type is not supported, for node " + node_def.name() + " got " +
+ tensorflow::DataTypeString(tf_dtype));
+ }
- TRT_ShapedWeights shape = inputs.at(1).weights();
+ bool transpose_a = attrs.get<bool>("adj_x");
+ bool transpose_b = attrs.get<bool>("adj_y");
- TFAttrs attrs(node_def);
+ auto dims = inputs.at(0).shape();
+ if (dims.nbDims == 1) { // NC * CK is only supported through fully connected
+ if (transpose_a == false && inputs.at(0).is_tensor() &&
+ inputs.at(1).is_weights()) {
+ return ConvertMatMulHelper(ctx, inputs.at(0), inputs.at(1).weights(),
+ transpose_b, node_def.name(), outputs);
+ } else {
+ return tensorflow::errors::InvalidArgument(
+ "Invalid configuration for MatMul, at: " + node_def.name());
+ }
+ }
- auto padding_type = attrs.get<tensorflow::DataType>("Tshape");
+ const nvinfer1::ITensor* tensor_l;
+ const nvinfer1::ITensor* tensor_r;
+ auto dims_l = inputs.at(0).shape();
+ auto dims_r = inputs.at(1).shape();
+ if (inputs.at(0).is_weights()) {
+ if (inputs.at(0).shape().d[0] != 1) {
+ return tensorflow::errors::InvalidArgument(
+ "Input 0 as weight assumes broadcast across batch for MatMul, at: " +
+ node_def.name());
+ } else {
+ for (int i = 0; i < dims_l.nbDims - 1; i++) {
+ dims_l.d[i] = dims_l.d[i + 1];
+ }
+ dims_l.nbDims--;
+ }
+ }
+ if (inputs.at(1).is_weights()) {
+ if (inputs.at(1).shape().d[0] != 1) {
+ return tensorflow::errors::InvalidArgument(
+ "Input 1 as weight assumes broadcast across batch for MatMul, at: " +
+ node_def.name());
+ } else {
+ for (int i = 0; i < dims_r.nbDims - 1; i++) {
+ dims_r.d[i] = dims_r.d[i + 1];
+ }
+ dims_r.nbDims--;
+ }
+ }
- if (shape.shape_.nbDims != 1)
- return tensorflow::errors::InvalidArgument(
- "reshape new shape is not 1 dimensional, at " + node_def.name());
+ TFTRT_RETURN_ERROR_IF_FALSE(
+ PrepareTensorForShape(ctx, inputs.at(0), dims_l, &tensor_l),
+ node_def.name());
+ TFTRT_RETURN_ERROR_IF_FALSE(
+ PrepareTensorForShape(ctx, inputs.at(1), dims_r, &tensor_r),
+ node_def.name());
- // Only expect to handle INT32 as attributes for now
- if (padding_type != tensorflow::DataType::DT_INT32)
- return tensorflow::errors::Unimplemented(
- "reshape new shape supports only DT_INT32, at " + node_def.name());
+ nvinfer1::IMatrixMultiplyLayer* layer = ctx.network()->addMatrixMultiply(
+ *const_cast<nvinfer1::ITensor*>(tensor_l), transpose_a,
+ *const_cast<nvinfer1::ITensor*>(tensor_r), transpose_b);
+ TFTRT_RETURN_ERROR_IF_NULLPTR(layer, node_def.name());
+ nvinfer1::ITensor* output_tensor = layer->getOutput(0);
+ outputs->push_back(TRT_TensorOrWeights(output_tensor));
+ return tensorflow::Status::OK();
+}
+#endif
- auto shape_data = static_cast<int*>(const_cast<void*>(shape.GetValues()));
+#if NV_TENSORRT_MAJOR > 3
+tensorflow::Status ConvertSoftmax(
+ Converter& ctx, const tensorflow::NodeDef& node_def,
+ const std::vector<TRT_TensorOrWeights>& inputs,
+ std::vector<TRT_TensorOrWeights>* outputs) {
+ const nvinfer1::ITensor* tensor = inputs.at(0).tensor();
- if (shape_data[0] != -1)
+ int nbDims = tensor->getDimensions().nbDims;
+ if (nbDims == 0) {
return tensorflow::errors::InvalidArgument(
- "reshape new shape first dimension is not -1, at " + node_def.name());
+ "TensorRT Softmax cannot apply on batch dimension, at" +
+ node_def.name());
+ }
+ nvinfer1::ISoftMaxLayer* layer =
+ ctx.network()->addSoftMax(*const_cast<nvinfer1::ITensor*>(tensor));
+ TFTRT_RETURN_ERROR_IF_NULLPTR(layer, node_def.name());
+ // Tensorflow SoftMax assumes applying softmax on the last dimension.
+ layer->setAxes(1 << (nbDims - 1));
- auto shape_num_dims = shape.shape_.d[0];
- VLOG(2) << "shape dimensions: " << shape_num_dims;
- int volume_w = 1;
- for (int i = 1; i < shape.shape_.d[0]; i++) volume_w *= shape_data[i];
+ nvinfer1::ITensor* output_tensor = layer->getOutput(0);
+ outputs->push_back(TRT_TensorOrWeights(output_tensor));
+ return tensorflow::Status::OK();
+}
+#endif
- int volume_t = 1;
- for (int i = 0; i < dims.nbDims; i++) volume_t *= dims.d[i];
+#if NV_TENSORRT_MAJOR > 3
+tensorflow::Status ConvertTopK(Converter& ctx,
+ const tensorflow::NodeDef& node_def,
+ const std::vector<TRT_TensorOrWeights>& inputs,
+ std::vector<TRT_TensorOrWeights>* outputs) {
+ const nvinfer1::ITensor* tensor = inputs.at(0).tensor();
- VLOG(2) << "volume: " << volume_t << " volume weights: " << volume_w;
- if (volume_w != volume_t)
+ int nbDims = tensor->getDimensions().nbDims;
+ if (nbDims == 0) {
return tensorflow::errors::InvalidArgument(
- "volume does not agree between tensor and new shape, at " +
- node_def.name());
+ "TensorRT TopK cannot apply on batch dimension, at" + node_def.name());
+ }
- nvinfer1::IShuffleLayer* layer =
- ctx.network()->addShuffle(*const_cast<nvinfer1::ITensor*>(tensor));
+ TRT_ShapedWeights k_w = inputs.at(1).weights();
+ int k = *(static_cast<int*>(const_cast<void*>(k_w.GetValues())));
- nvinfer1::Dims reshape_dims;
- VLOG(2) << "new dimension: " << shape_num_dims - 1;
- reshape_dims.nbDims = shape_num_dims - 1;
- for (int32_t i = 0; i < reshape_dims.nbDims; ++i) {
- reshape_dims.d[i] = shape_data[i + 1];
+ nvinfer1::TopKOperation op;
+ uint32_t reducedAxes = 0;
+ if (node_def.op() == "TopKV2") {
+ op = nvinfer1::TopKOperation::kMAX;
+ reducedAxes |= 1 << (nbDims - 1);
+ } else {
+ return tensorflow::errors::Unimplemented(
+ "Operation: " + node_def.op() +
+ " not implemented, at: " + node_def.name());
}
- layer->setReshapeDimensions(reshape_dims);
- VLOG(2) << "new dimension: " << shape_num_dims - 1;
- nvinfer1::ITensor* output_tensor = layer->getOutput(0);
- auto dims_output = output_tensor->getDimensions();
- VLOG(2) << "output tensor dimension:" << dims_output.nbDims;
- outputs->push_back(TRT_TensorOrWeights(output_tensor));
+ nvinfer1::ITopKLayer* layer = ctx.network()->addTopK(
+ *const_cast<nvinfer1::ITensor*>(tensor), op, k, reducedAxes);
+ TFTRT_RETURN_ERROR_IF_NULLPTR(layer, node_def.name());
+
+ nvinfer1::ITensor* output_value_tensor = layer->getOutput(0);
+ nvinfer1::ITensor* output_indices_tensor = layer->getOutput(1);
+ outputs->push_back(TRT_TensorOrWeights(output_value_tensor));
+ outputs->push_back(TRT_TensorOrWeights(output_indices_tensor));
return tensorflow::Status::OK();
}
+#endif
void Converter::register_op_converters() {
// vgg_16 slim implementation
- op_registry_["Placeholder"] = ConvertPlaceholder;
op_registry_["Conv2D"] = ConvertConv2D;
op_registry_["DepthwiseConv2dNative"] = ConvertConv2DDepthwise;
op_registry_["Relu"] = ConvertActivation;
op_registry_["MaxPool"] = ConvertPool;
op_registry_["AvgPool"] = ConvertPool;
- // This could be really handled as ConvertBinary
op_registry_["BiasAdd"] = ConvertScale;
op_registry_["Const"] = ConvertConst;
// TODO(ben,jie): this is a temp hack.
@@ -2165,18 +2608,38 @@ void Converter::register_op_converters() {
op_registry_["Add"] = ConvertBinary;
op_registry_["Mul"] = ConvertBinary;
op_registry_["Sub"] = ConvertBinary;
- op_registry_["Rsqrt"] = ConvertUnary;
- op_registry_["Mean"] = ConvertReduce;
op_registry_["Pad"] = ConvertPad;
- // TODO(ben,jie): Add more ops
op_registry_["ConcatV2"] = ConvertConcat;
- op_registry_["MatMul"] = ConvertMatMul;
- op_registry_["Reshape"] = ConvertReshape;
op_registry_["FusedBatchNorm"] = ConvertFusedBatchNorm;
op_registry_["FusedBatchNormV2"] = ConvertFusedBatchNorm;
- plugin_converter_ = ConvertPlugin;
+ op_registry_["Div"] = ConvertBinary;
+ op_registry_["RealDiv"] = ConvertBinary;
+
+ op_registry_["Rsqrt"] = ConvertUnary;
+ op_registry_["Reciprocal"] = ConvertUnary;
+ op_registry_["Exp"] = ConvertUnary;
+ op_registry_["Log"] = ConvertUnary;
+ op_registry_["Sqrt"] = ConvertUnary;
+ op_registry_["Abs"] = ConvertUnary;
+ op_registry_["Neg"] = ConvertUnary;
+#if NV_TENSORRT_MAJOR == 3
+ op_registry_["Mean"] = ConvertReducePool;
+#endif
+#if NV_TENSORRT_MAJOR > 3
+ op_registry_["Sum"] = ConvertReduce;
+ op_registry_["Prod"] = ConvertReduce;
+ op_registry_["Max"] = ConvertReduce;
+ op_registry_["Min"] = ConvertReduce;
+ op_registry_["Mean"] = ConvertReduce;
+ op_registry_["Maximum"] = ConvertBinary;
+ op_registry_["Minimum"] = ConvertBinary;
+ op_registry_["Softmax"] = ConvertSoftmax;
+ op_registry_["MatMul"] = ConvertMatMul;
+ op_registry_["BatchMatMul"] = ConvertBatchMatMul;
+ op_registry_["TopKV2"] = ConvertTopK;
+#endif
}
} // namespace
diff --git a/tensorflow/contrib/tensorrt/convert/convert_nodes.h b/tensorflow/contrib/tensorrt/convert/convert_nodes.h
index 64337eee84..307f97026a 100644
--- a/tensorflow/contrib/tensorrt/convert/convert_nodes.h
+++ b/tensorflow/contrib/tensorrt/convert/convert_nodes.h
@@ -46,8 +46,8 @@ const int INT8MODE = 2;
struct EngineConnection {
EngineConnection(const string& outside, int out_id, int out_port,
- const string& inside, int in_id, int in_port,
- bool input_edge, int port)
+ const string& inside, int in_id, int in_port,
+ bool input_edge, int port)
: outside_node_name(outside),
outside_id(out_id),
outside_port(out_port),
diff --git a/tensorflow/contrib/tensorrt/convert/trt_optimization_pass.cc b/tensorflow/contrib/tensorrt/convert/trt_optimization_pass.cc
index ec9dbfa13b..044c736c03 100644
--- a/tensorflow/contrib/tensorrt/convert/trt_optimization_pass.cc
+++ b/tensorflow/contrib/tensorrt/convert/trt_optimization_pass.cc
@@ -17,6 +17,7 @@ limitations under the License.
#include "tensorflow/core/grappler/clusters/cluster.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/optimizers/custom_graph_optimizer_registry.h"
+#include "tensorflow/core/lib/strings/numbers.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/logging.h"
@@ -232,8 +233,25 @@ tensorflow::Status TRTOptimizationPass::Optimize(
tensorflow::grappler::GraphProperties static_graph_properties(item);
TF_RETURN_IF_ERROR(static_graph_properties.InferStatically(true));
tensorflow::tensorrt::convert::ConversionParams cp;
+
+ std::vector<string> nodes_to_preserve;
+ for (const auto& n : item.NodesToPreserve()) {
+ auto tokens = str_util::Split(n, ":");
+ string s = tokens.at(0);
+ for (int i = 1; i < tokens.size() - 1; ++i) {
+ StrAppend(&s, ":", tokens.at(i));
+ }
+ int dumm_port = -1;
+ // If the last token is not an integer, it must be part of the name.
+ // Otherwise it is port number.
+ if (tokens.size() > 1 &&
+ !strings::safe_strto32(tokens.back(), &dumm_port)) {
+ StrAppend(&s, ":", tokens.back());
+ }
+ nodes_to_preserve.push_back(s);
+ }
cp.input_graph_def = &item.graph;
- cp.output_names = &item.fetch;
+ cp.output_names = &nodes_to_preserve;
cp.max_batch_size = maximum_batch_size_;
cp.max_workspace_size_bytes = maximum_workspace_size_;
cp.output_graph_def = optimized_graph;
diff --git a/tensorflow/contrib/tensorrt/convert/utils.cc b/tensorflow/contrib/tensorrt/convert/utils.cc
new file mode 100644
index 0000000000..24591cf84b
--- /dev/null
+++ b/tensorflow/contrib/tensorrt/convert/utils.cc
@@ -0,0 +1,35 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#include "tensorflow/contrib/tensorrt/convert/utils.h"
+
+namespace tensorflow {
+namespace tensorrt {
+
+bool IsGoogleTensorRTEnabled() {
+ // TODO(laigd): consider also checking if tensorrt shared libraries are
+ // accessible. We can then direct users to this function to make sure they can
+ // safely write code that uses tensorrt conditionally. E.g. if it does not
+ // check for for tensorrt, and user mistakenly uses tensorrt, they will just
+ // crash and burn.
+#ifdef GOOGLE_TENSORRT
+ return true;
+#else
+ return false;
+#endif
+}
+
+} // namespace tensorrt
+} // namespace tensorflow
diff --git a/tensorflow/contrib/tensorrt/convert/utils.h b/tensorflow/contrib/tensorrt/convert/utils.h
index f601c06701..8b5f4d614a 100644
--- a/tensorflow/contrib/tensorrt/convert/utils.h
+++ b/tensorflow/contrib/tensorrt/convert/utils.h
@@ -31,6 +31,8 @@ struct TrtDestroyer {
template <typename T>
using TrtUniquePtrType = std::unique_ptr<T, TrtDestroyer<T>>;
+bool IsGoogleTensorRTEnabled();
+
} // namespace tensorrt
} // namespace tensorflow
diff --git a/tensorflow/contrib/tensorrt/kernels/trt_engine_op.cc b/tensorflow/contrib/tensorrt/kernels/trt_engine_op.cc
index 75e32559bb..04d072f5d9 100644
--- a/tensorflow/contrib/tensorrt/kernels/trt_engine_op.cc
+++ b/tensorflow/contrib/tensorrt/kernels/trt_engine_op.cc
@@ -316,10 +316,15 @@ void TRTEngineOp::ComputeAsync(tensorflow::OpKernelContext* ctx,
ctx->SetStatus(tensorflow::errors::InvalidArgument(
"INT8 inputs are not supported!"));
return;
+#if NV_TENSORRT_MAJOR > 3
+ case nvinfer1::DataType::kINT32:
+ buffers[binding_index] = (void*)(input_tensor.flat<int32>().data());
+ break;
+#endif
default:
LOG(ERROR) << "Unknown TRT data type: " << int(dtype);
ctx->SetStatus(tensorflow::errors::InvalidArgument(
- "Unknown ouput TRT data type! ", static_cast<int>(dtype)));
+ "Unknown output TRT data type! ", static_cast<int>(dtype)));
return;
}
}
@@ -327,8 +332,8 @@ void TRTEngineOp::ComputeAsync(tensorflow::OpKernelContext* ctx,
for (int i = 0; i < ctx->num_outputs(); i++) {
// Create an output tensor
const string output_name = StrCat(kOutputPHName, i);
- const size_t binding_index = trt_engine_ptr->getBindingIndex(
- output_name.c_str());
+ const size_t binding_index =
+ trt_engine_ptr->getBindingIndex(output_name.c_str());
Tensor* output_tensor = nullptr;
TensorShape output_shape;
@@ -368,10 +373,16 @@ void TRTEngineOp::ComputeAsync(tensorflow::OpKernelContext* ctx,
ctx->SetStatus(tensorflow::errors::InvalidArgument(
"INT8 outputs are not supported!"));
return;
+#if NV_TENSORRT_MAJOR > 3
+ case nvinfer1::DataType::kINT32:
+ buffers[binding_index] =
+ reinterpret_cast<void*>(output_tensor->flat<int32>().data());
+ break;
+#endif
default:
LOG(ERROR) << "Unknown TRT data type: " << static_cast<int>(dtype);
ctx->SetStatus(tensorflow::errors::InvalidArgument(
- "Unsupported output data type! ", int(dtype)));
+ "Unsupported output data type! ", static_cast<int>(dtype)));
return;
}
}
@@ -420,10 +431,10 @@ nvinfer1::IGpuAllocator* TRTEngineOp::GetAllocator(OpKernelContext* ctx) {
}
TRTEngineOp::EngineCtxPair& TRTEngineOp::GetEngine(int batch_size,
- OpKernelContext* ctx) {
+ OpKernelContext* ctx) {
static EngineCtxPair null_pair = {
- TrtUniquePtrType<nvinfer1::ICudaEngine>(nullptr),
- TrtUniquePtrType<nvinfer1::IExecutionContext>(nullptr)};
+ TrtUniquePtrType<nvinfer1::ICudaEngine>(nullptr),
+ TrtUniquePtrType<nvinfer1::IExecutionContext>(nullptr)};
// TODO(sami): This method needs to be re-written to use resource manager and
// with LRU mechanism option.
tensorflow::mutex_lock lock(engine_mutex_);
@@ -450,9 +461,9 @@ TRTEngineOp::EngineCtxPair& TRTEngineOp::GetEngine(int batch_size,
auto raw_static_engine = static_engine.get();
const auto max_batch_size = raw_static_engine->getMaxBatchSize();
engine_map_[max_batch_size] = {
- std::move(static_engine),
- TrtUniquePtrType<nvinfer1::IExecutionContext>(
- raw_static_engine->createExecutionContext())};
+ std::move(static_engine),
+ TrtUniquePtrType<nvinfer1::IExecutionContext>(
+ raw_static_engine->createExecutionContext())};
// Runtime is safe to delete after engine creation
serialized_segment_.clear();
if (max_batch_size < batch_size) return null_pair;
diff --git a/tensorflow/contrib/tensorrt/ops/trt_engine_op.cc b/tensorflow/contrib/tensorrt/ops/trt_engine_op.cc
index 383635f428..e0c7b62723 100644
--- a/tensorflow/contrib/tensorrt/ops/trt_engine_op.cc
+++ b/tensorflow/contrib/tensorrt/ops/trt_engine_op.cc
@@ -42,8 +42,14 @@ REGISTER_OP("TRTEngineOp")
.Attr("precision_mode: {'FP32', 'FP16', 'INT8', 'INT8CALIB'}")
.Attr("calibration_data: string = ''")
.Input("in_tensor: InT")
- .Output("out_tensor: OutT")
- .SetShapeFn(shape_inference::TRTEngineOpShapeInference);
+ .Output("out_tensor: OutT");
+// TODO(jie): TF requires concrete output shape for concrete input shapes.
+// This is tricky for batch dimension, since we cannot ensure which input
+// would carry the correct batch dimension (for the current stage of the
+// implementation, we do require all input tensor to carry the same batch
+// size, but this could change in the future). Hence we disable shape
+// inference function as a workaround.
+// .SetShapeFn(shape_inference::TRTEngineOpShapeInference);
} // namespace tensorflow
diff --git a/tensorflow/contrib/tensorrt/python/__init__.py b/tensorflow/contrib/tensorrt/python/__init__.py
index 0b2321b5fc..fe4fa166a1 100644
--- a/tensorflow/contrib/tensorrt/python/__init__.py
+++ b/tensorflow/contrib/tensorrt/python/__init__.py
@@ -22,4 +22,5 @@ from __future__ import print_function
from tensorflow.contrib.tensorrt.python.ops import trt_engine_op
from tensorflow.contrib.tensorrt.python.trt_convert import calib_graph_to_infer_graph
from tensorflow.contrib.tensorrt.python.trt_convert import create_inference_graph
+from tensorflow.contrib.tensorrt.python.trt_convert import is_tensorrt_enabled
# pylint: enable=unused-import,line-too-long
diff --git a/tensorflow/contrib/tensorrt/python/trt_convert.py b/tensorflow/contrib/tensorrt/python/trt_convert.py
index 79f512dbcf..2b67931661 100644
--- a/tensorflow/contrib/tensorrt/python/trt_convert.py
+++ b/tensorflow/contrib/tensorrt/python/trt_convert.py
@@ -23,6 +23,7 @@ import six as _six
from tensorflow.contrib.tensorrt.wrap_conversion import calib_convert
from tensorflow.contrib.tensorrt.wrap_conversion import get_linked_tensorrt_version
from tensorflow.contrib.tensorrt.wrap_conversion import get_loaded_tensorrt_version
+from tensorflow.contrib.tensorrt.wrap_conversion import is_tensorrt_enabled
from tensorflow.contrib.tensorrt.wrap_conversion import trt_convert
from tensorflow.core.framework import graph_pb2
from tensorflow.core.protobuf import rewriter_config_pb2
diff --git a/tensorflow/contrib/tensorrt/resources/trt_allocator.h b/tensorflow/contrib/tensorrt/resources/trt_allocator.h
index c5d2cec730..97ac82ca5d 100644
--- a/tensorflow/contrib/tensorrt/resources/trt_allocator.h
+++ b/tensorflow/contrib/tensorrt/resources/trt_allocator.h
@@ -51,6 +51,9 @@ class TRTDeviceAllocator : public nvinfer1::IGpuAllocator {
// Allocator implementation wrapping TF device allocators.
public:
TRTDeviceAllocator(tensorflow::Allocator* allocator);
+
+ // TODO(aaroey): base class doesn't have a virtual destructor, work with
+ // Nvidia to fix it.
virtual ~TRTDeviceAllocator() {
VLOG(1) << "Destroying allocator attached to " << allocator_->Name();
}
diff --git a/tensorflow/contrib/tensorrt/resources/trt_int8_calibrator.cc b/tensorflow/contrib/tensorrt/resources/trt_int8_calibrator.cc
index 32e81858b9..dab1dd9343 100644
--- a/tensorflow/contrib/tensorrt/resources/trt_int8_calibrator.cc
+++ b/tensorflow/contrib/tensorrt/resources/trt_int8_calibrator.cc
@@ -36,13 +36,14 @@ TRTInt8Calibrator::TRTInt8Calibrator(
: batch_size_(batch_size),
done_(false),
dev_buffers_(dev_buffers),
+ // Make sure setBatch() waits until getBatch() is called (the first time).
calib_running_(true),
batch_is_set_(false),
engine_name_(engine_name) {}
TRTInt8Calibrator::TRTInt8Calibrator(const string& calib_data)
: batch_size_(0),
- done_(false),
+ done_(true),
calib_running_(false),
batch_is_set_(false),
calibration_table_(calib_data) {}
@@ -50,13 +51,14 @@ TRTInt8Calibrator::TRTInt8Calibrator(const string& calib_data)
bool TRTInt8Calibrator::setBatch(const std::unordered_map<string, void*>& data,
const cudaStream_t stream) {
tensorflow::mutex_lock lock(cond_mtx_);
- // wait while calibration is running.
- while ((calib_running_ || batch_is_set_) && !done_) {
- cond_.wait(lock);
- }
+
+ // Wait while the queue is full or calibration is running.
+ while ((calib_running_ || batch_is_set_) && !done_) cond_.wait(lock);
if (done_) return false;
CHECK(!calib_running_ && !batch_is_set_);
VLOG(1) << "Set Batch Waiting finished";
+
+ // Sets the batch.
for (const auto it : data) {
auto devptr = dev_buffers_.find(it.first);
if (devptr == dev_buffers_.end()) {
@@ -76,8 +78,8 @@ bool TRTInt8Calibrator::setBatch(const std::unordered_map<string, void*>& data,
}
// TODO(Sami, aaorey): Find an alternative way!
- cudaStreamSynchronize(
- stream); // we have to wait for the stream before returning!
+ // we have to wait for the stream before returning!
+ cudaStreamSynchronize(stream);
batch_is_set_ = true;
cond_.notify_all();
return true;
@@ -86,21 +88,21 @@ bool TRTInt8Calibrator::setBatch(const std::unordered_map<string, void*>& data,
bool TRTInt8Calibrator::getBatch(void** bindings, const char** names,
int num_bindings) {
tensorflow::mutex_lock lock(cond_mtx_);
+ // Notify finish of last round of calibration.
calib_running_ = false;
cond_.notify_all();
- // wait until new batch arrives
- while ((!batch_is_set_ && !done_)) {
- cond_.wait(lock);
- }
+
+ // Wait until new batch arrives
+ while ((!batch_is_set_ && !done_)) cond_.wait(lock);
if (done_) return false;
+ // Gets the batch
for (int i = 0; i < num_bindings; i++) {
auto it = dev_buffers_.find(names[i]);
if (it == dev_buffers_.end()) {
LOG(FATAL) << "Calibration engine asked for unknown tensor name '"
<< names[i] << "' at position " << i;
}
-
bindings[i] = it->second.first;
}
batch_is_set_ = false;
@@ -108,6 +110,17 @@ bool TRTInt8Calibrator::getBatch(void** bindings, const char** names,
return true;
}
+void TRTInt8Calibrator::waitAndSetDone() {
+ tensorflow::mutex_lock lock(cond_mtx_);
+ // Wait while the queue is full or calibration is running, so we don't miss
+ // the last batch.
+ while ((calib_running_ || batch_is_set_) && !done_) cond_.wait(lock);
+ if (!done_) {
+ done_ = true;
+ cond_.notify_all();
+ }
+}
+
const void* TRTInt8Calibrator::readCalibrationCache(std::size_t& length) {
if (calibration_table_.empty()) return nullptr;
length = calibration_table_.size();
diff --git a/tensorflow/contrib/tensorrt/resources/trt_int8_calibrator.h b/tensorflow/contrib/tensorrt/resources/trt_int8_calibrator.h
index 994312d7c3..65466c9741 100644
--- a/tensorflow/contrib/tensorrt/resources/trt_int8_calibrator.h
+++ b/tensorflow/contrib/tensorrt/resources/trt_int8_calibrator.h
@@ -36,10 +36,13 @@ namespace tensorrt {
struct TRTInt8Calibrator : public nvinfer1::IInt8EntropyCalibrator {
public:
+ // Construct a calibrator for future calibration.
TRTInt8Calibrator(
const std::unordered_map<string, std::pair<void*, size_t>>& dev_buffers,
int batch_size, string engine_name);
+ // Construct a finalized calibrator where we don't need to run calibration any
+ // more, as the calibration data is provided.
TRTInt8Calibrator(const string& calibration_data);
~TRTInt8Calibrator();
@@ -52,6 +55,11 @@ struct TRTInt8Calibrator : public nvinfer1::IInt8EntropyCalibrator {
bool setBatch(const std::unordered_map<string, void*>& data,
const cudaStream_t stream);
+ // Wait until the last batch is consumed by the calibrator and set done.
+ void waitAndSetDone();
+
+ // Notify that calibration is done and future batches provided by setBatch()
+ // will be ignored.
void setDone();
// If not null, calibration is skipped.
diff --git a/tensorflow/contrib/tensorrt/shape_fn/trt_shfn.cc b/tensorflow/contrib/tensorrt/shape_fn/trt_shfn.cc
index 227ac120dd..f30dba59ad 100644
--- a/tensorflow/contrib/tensorrt/shape_fn/trt_shfn.cc
+++ b/tensorflow/contrib/tensorrt/shape_fn/trt_shfn.cc
@@ -28,36 +28,50 @@ limitations under the License.
namespace tensorflow {
namespace shape_inference {
-tensorflow::Status TRTEngineOpShapeInference(InferenceContext* context) {
- std::vector<tensorflow::TensorShape> shapes;
- for (int i = 0; i < context->num_outputs(); ++i) {
- context->set_output(i, context->UnknownShape());
+tensorflow::Status TRTEngineOpShapeInference(InferenceContext* c) {
+ for (int i = 0; i < c->num_outputs(); ++i) {
+ c->set_output(i, c->UnknownShape());
}
- auto status = context->GetAttr("input_shapes", &shapes);
- // it is ok to not to have shapes
- if (!status.ok()) return Status::OK();
- if ((int)shapes.size() != context->num_inputs()) return Status::OK();
- bool different_input = false;
- for (int i = 0; i < context->num_inputs(); ++i) {
- if (shapes.at(i) != context->input_tensor(i)->shape())
- different_input = true;
+
+ // Check the sanity of the input shapes.
+ std::vector<tensorflow::TensorShape> input_shapes;
+ TF_RETURN_IF_ERROR(c->GetAttr("input_shapes", &input_shapes));
+ if (input_shapes.size() != c->num_inputs()) {
+ return tensorflow::errors::InvalidArgument(
+ "The actual number of inputs doesn't match the number of input "
+ "shapes set in the attr: ",
+ c->num_inputs(), " vs ", input_shapes.size());
+ }
+ bool input_match = true;
+ for (int i = 0; i < c->num_inputs(); ++i) {
+ ShapeHandle handle;
+ TF_RETURN_IF_ERROR(
+ c->MakeShapeFromTensorShape(input_shapes.at(i), &handle));
+ ShapeHandle merged;
+ if (!c->Merge(c->input(i), handle, &merged).ok()) {
+ // Input shape doesn't match what was set in attr, fine.
+ input_match = false;
+ }
}
- if (different_input) return Status::OK();
- shapes.resize(0);
- status = context->GetAttr("output_shapes", &shapes);
- if (!status.ok()) return Status::OK();
- if ((int)shapes.size() != context->num_outputs()) return Status::OK();
- std::vector<ShapeHandle> shape_handles(shapes.size());
- for (size_t i = 0; i < shapes.size(); ++i) {
- status =
- context->MakeShapeFromTensorShape(shapes.at(i), &shape_handles.at(i));
- if (!status.ok()) return Status::OK();
+
+ // Check the sanity of the output shapes.
+ std::vector<tensorflow::TensorShape> output_shapes;
+ TF_RETURN_IF_ERROR(c->GetAttr("output_shapes", &output_shapes));
+ if (output_shapes.size() != c->num_outputs()) {
+ return tensorflow::errors::InvalidArgument(
+ "The actual number of outputs doesn't match the number of output "
+ "shapes set in the attr: ",
+ c->num_outputs(), " vs ", output_shapes.size());
}
- for (int i = 0; i < context->num_outputs(); ++i) {
- context->set_output(i, shape_handles.at(i));
+ for (size_t i = 0; i < output_shapes.size(); ++i) {
+ ShapeHandle handle;
+ TF_RETURN_IF_ERROR(
+ c->MakeShapeFromTensorShape(output_shapes.at(i), &handle));
+ if (input_match) c->set_output(i, handle);
}
return Status::OK();
}
+
} // namespace shape_inference
} // namespace tensorflow
diff --git a/tensorflow/contrib/tensorrt/test/tf_trt_integration_test.py b/tensorflow/contrib/tensorrt/test/tf_trt_integration_test.py
index 0403b652d7..7c3ef498c9 100644
--- a/tensorflow/contrib/tensorrt/test/tf_trt_integration_test.py
+++ b/tensorflow/contrib/tensorrt/test/tf_trt_integration_test.py
@@ -18,131 +18,336 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
+from collections import namedtuple
+import itertools
import warnings
import numpy as np
+import six
from tensorflow.contrib import tensorrt as trt
-from tensorflow.core.protobuf import config_pb2 as cpb2
-from tensorflow.python.framework import constant_op as cop
-from tensorflow.python.framework import dtypes as dtypes
-from tensorflow.python.framework import importer as importer
-from tensorflow.python.framework import ops as ops
+from tensorflow.core.protobuf import config_pb2
+from tensorflow.core.protobuf import rewriter_config_pb2
+from tensorflow.python.framework import constant_op
+from tensorflow.python.framework import dtypes
+from tensorflow.python.framework import importer
+from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
-from tensorflow.python.ops import array_ops as aops
-from tensorflow.python.ops import nn as nn
-from tensorflow.python.ops import nn_ops as nn_ops
-from tensorflow.python.platform import googletest
+from tensorflow.python.ops import array_ops
+from tensorflow.python.ops import math_ops
+from tensorflow.python.ops import nn
+from tensorflow.python.ops import nn_ops
+from tensorflow.python.platform import test
+INPUT_NAME = "input"
+OUTPUT_NAME = "output"
+INPUT_DIMS = [100, 24, 24, 2]
+MODE_FP32 = "FP32"
+MODE_FP16 = "FP16"
+MODE_INT8 = "INT8"
-class IntegrationTest(test_util.TensorFlowTestCase):
+if six.PY2:
+ to_bytes = lambda s: s
+ to_string = lambda s: s
+else:
+ to_bytes = lambda s: s.encode("utf-8", errors="surrogateescape")
+ to_string = lambda s: s.decode("utf-8")
+
+
+# TODO(aaroey): test graph with different dtypes.
+def GetSingleEngineGraphDef(dtype=dtypes.float32):
+ """Create a graph containing single segment."""
+ g = ops.Graph()
+ with g.as_default():
+ inp = array_ops.placeholder(
+ dtype=dtype, shape=[None] + INPUT_DIMS[1:], name=INPUT_NAME)
+ with g.device("/GPU:0"):
+ conv_filter = constant_op.constant(
+ [[[[1., 0.5, 4., 6., 0.5, 1.], [1., 0.5, 1., 1., 0.5, 1.]]]],
+ name="weights",
+ dtype=dtype)
+ conv = nn.conv2d(
+ input=inp,
+ filter=conv_filter,
+ strides=[1, 2, 2, 1],
+ padding="SAME",
+ name="conv")
+ bias = constant_op.constant(
+ [4., 1.5, 2., 3., 5., 7.], name="bias", dtype=dtype)
+ added = nn.bias_add(conv, bias, name="bias_add")
+ relu = nn.relu(added, "relu")
+ identity = array_ops.identity(relu, "identity")
+ pool = nn_ops.max_pool(
+ identity, [1, 2, 2, 1], [1, 2, 2, 1], "VALID", name="max_pool")
+ array_ops.squeeze(pool, name=OUTPUT_NAME)
+ return g.as_graph_def()
+
+
+# TODO(aaroey): test graph with different dtypes.
+def GetMultiEngineGraphDef(dtype=dtypes.float32):
+ """Create a graph containing multiple segment."""
+ g = ops.Graph()
+ with g.as_default():
+ inp = array_ops.placeholder(
+ dtype=dtype, shape=[None] + INPUT_DIMS[1:], name=INPUT_NAME)
+ with g.device("/GPU:0"):
+ conv_filter = constant_op.constant(
+ [[[[1., 0.5, 4., 6., 0.5, 1.], [1., 0.5, 1., 1., 0.5, 1.]]]],
+ name="weights",
+ dtype=dtype)
+ conv = nn.conv2d(
+ input=inp,
+ filter=conv_filter,
+ strides=[1, 2, 2, 1],
+ padding="SAME",
+ name="conv")
+ c1 = constant_op.constant(
+ np.random.randn(INPUT_DIMS[0], 12, 12, 6), dtype=dtype)
+ p = conv * c1
+ c2 = constant_op.constant(
+ np.random.randn(INPUT_DIMS[0], 12, 12, 6), dtype=dtype)
+ q = conv / c2
+
+ edge = math_ops.sin(q)
+ edge /= edge
+ r = edge + edge
+
+ p -= edge
+ q *= edge
+ s = p + q
+ s -= r
+ array_ops.squeeze(s, name=OUTPUT_NAME)
+ return g.as_graph_def()
+
+
+TestGraph = namedtuple("TestGraph",
+ ["gdef", "num_expected_engines", "expected_output_dims"])
+
+TEST_GRAPHS = {
+ "SingleEngineGraph":
+ TestGraph(
+ gdef=GetSingleEngineGraphDef(),
+ num_expected_engines=1,
+ expected_output_dims=(100, 6, 6, 6)),
+ "MultiEngineGraph":
+ TestGraph(
+ gdef=GetMultiEngineGraphDef(),
+ num_expected_engines=2,
+ expected_output_dims=(100, 12, 12, 6)),
+ # TODO(aaroey): add a large complex graph to test.
+}
+
+
+class TfTrtIntegrationTest(test_util.TensorFlowTestCase):
"""Class to test Tensorflow-TensorRT integration."""
def setUp(self):
"""Setup method."""
- super(IntegrationTest, self).setUp()
+ super(TfTrtIntegrationTest, self).setUp()
warnings.simplefilter("always")
- inp_dims = (100, 24, 24, 2)
- self._input = np.random.random_sample(inp_dims)
- self._original_graph = self.get_simple_graph_def()
- self._gpu_options = cpb2.GPUOptions(per_process_gpu_memory_fraction=0.50)
- self._config = cpb2.ConfigProto(gpu_options=self._gpu_options)
- self._reference = self.run_graph(self._original_graph, self._input)
-
- def get_simple_graph_def(self):
- """Create a simple graph and return its graph_def."""
- g = ops.Graph()
- with g.as_default():
- a = aops.placeholder(
- dtype=dtypes.float32, shape=(None, 24, 24, 2), name="input")
- e = cop.constant(
- [[[[1., 0.5, 4., 6., 0.5, 1.], [1., 0.5, 1., 1., 0.5, 1.]]]],
- name="weights",
- dtype=dtypes.float32)
- conv = nn.conv2d(
- input=a, filter=e, strides=[1, 2, 2, 1], padding="SAME", name="conv")
- b = cop.constant(
- [4., 1.5, 2., 3., 5., 7.], name="bias", dtype=dtypes.float32)
- t = nn.bias_add(conv, b, name="biasAdd")
- relu = nn.relu(t, "relu")
- idty = aops.identity(relu, "ID")
- v = nn_ops.max_pool(
- idty, [1, 2, 2, 1], [1, 2, 2, 1], "VALID", name="max_pool")
- aops.squeeze(v, name="output")
- return g.as_graph_def()
-
- def run_graph(self, gdef, dumm_inp):
- """Run given graphdef once."""
- ops.reset_default_graph()
+ self._input = np.random.random_sample(INPUT_DIMS)
+
+ def _GetConfigProto(self,
+ use_optimizer,
+ precision_mode=None,
+ is_dynamic_op=None):
+ if use_optimizer:
+ rewriter_cfg = rewriter_config_pb2.RewriterConfig()
+ rewriter_cfg.optimizers.extend(["constfold", "layout"])
+ custom_op = rewriter_cfg.custom_optimizers.add()
+ custom_op.name = "TensorRTOptimizer"
+ custom_op.parameter_map["minimum_segment_size"].i = 3
+ custom_op.parameter_map["max_batch_size"].i = self._input.shape[0]
+ custom_op.parameter_map["is_dynamic_op"].b = is_dynamic_op
+ custom_op.parameter_map["max_workspace_size_bytes"].i = 1 << 25
+ custom_op.parameter_map["precision_mode"].s = to_bytes(precision_mode)
+ graph_options = config_pb2.GraphOptions(rewrite_options=rewriter_cfg)
+ else:
+ graph_options = config_pb2.GraphOptions()
+
+ gpu_options = config_pb2.GPUOptions()
+ if trt.trt_convert.get_linked_tensorrt_version()[0] == 3:
+ gpu_options.per_process_gpu_memory_fraction = 0.50
+
+ config = config_pb2.ConfigProto(
+ gpu_options=gpu_options, graph_options=graph_options)
+ return config
+
+ def _RunGraph(self, graph_key, gdef, input_data, config, num_runs=2):
+ """Run given graphdef multiple times."""
g = ops.Graph()
with g.as_default():
inp, out = importer.import_graph_def(
- graph_def=gdef, return_elements=["input", "output"])
+ graph_def=gdef, return_elements=[INPUT_NAME, OUTPUT_NAME], name="")
inp = inp.outputs[0]
out = out.outputs[0]
with self.test_session(
- graph=g, config=self._config, use_gpu=True, force_gpu=True) as sess:
- val = sess.run(out, {inp: dumm_inp})
+ graph=g, config=config, use_gpu=True, force_gpu=True) as sess:
+ val = None
+ # Defaults to 2 runs to verify result across multiple runs is same.
+ for _ in range(num_runs):
+ new_val = sess.run(out, {inp: input_data})
+ self.assertEquals(TEST_GRAPHS[graph_key].expected_output_dims,
+ new_val.shape)
+ if val is not None:
+ self.assertAllEqual(new_val, val)
+ val = new_val
return val
# Use real data that is representative of the inference dataset
# for calibration. For this test script it is random data.
- def run_calibration(self, gdef, dumm_inp):
- """Run given calibration graph multiple times."""
- ops.reset_default_graph()
- g = ops.Graph()
- with g.as_default():
- inp, out = importer.import_graph_def(
- graph_def=gdef, return_elements=["input", "output"])
- inp = inp.outputs[0]
- out = out.outputs[0]
- # run over real calibration data here, we are mimicking a calibration
- # set of 30 different batches. Use as much calibration data as you want
- with self.test_session(
- graph=g, config=self._config, use_gpu=True, force_gpu=True) as sess:
- for _ in range(30):
- val = sess.run(out, {inp: dumm_inp})
- return val
+ def _RunCalibration(self, graph_key, gdef, input_data, config):
+ """Run calibration on given graph."""
+ return self._RunGraph(graph_key, gdef, input_data, config, 30)
- def get_trt_graph(self, mode):
+ def _GetTrtGraph(self, gdef, precision_mode, is_dynamic_op):
"""Return trt converted graph."""
- if mode in ["FP32", "FP16", "INT8"]:
- return trt.create_inference_graph(
- input_graph_def=self._original_graph,
- outputs=["output"],
- max_batch_size=self._input.shape[0],
- max_workspace_size_bytes=1 << 25,
- precision_mode=mode, # TRT Engine precision "FP32","FP16" or "INT8"
- minimum_segment_size=2 # minimum number of nodes in an engine
- )
- return None
-
- def testFP32(self):
- """Test FP32 conversion. Results should be identical to native case."""
- trt_graph = self.get_trt_graph("FP32")
- result = self.run_graph(trt_graph, self._input)
- self.assertAllEqual(self._reference, result)
- result1 = self.run_graph(trt_graph, self._input)
- self.assertAllEqual(result1, result)
-
- def testFP16(self):
- """Test FP16 conversion. Results may be different from native case."""
- trt_graph = self.get_trt_graph("FP16")
- result = self.run_graph(trt_graph, self._input)
- self.assertAllClose(self._reference, result, rtol=1.e-03)
- result1 = self.run_graph(trt_graph, self._input)
- self.assertAllEqual(result1, result)
-
- def testINT8(self):
- """Test INT8 conversion. Results may be different from native case."""
- calib_graph = self.get_trt_graph("INT8")
- result = self.run_calibration(calib_graph, self._input)
- self.assertAllEqual(self._reference, result)
- int8_graph = trt.calib_graph_to_infer_graph(calib_graph)
- result = self.run_graph(int8_graph, self._input)
- self.assertAllClose(self._reference, result, rtol=1.e-03)
- result1 = self.run_graph(int8_graph, self._input)
- self.assertAllEqual(result1, result)
+ return trt.create_inference_graph(
+ input_graph_def=gdef,
+ outputs=[OUTPUT_NAME],
+ max_batch_size=self._input.shape[0],
+ max_workspace_size_bytes=1 << 25,
+ precision_mode=precision_mode,
+ minimum_segment_size=2,
+ is_dynamic_op=is_dynamic_op)
+
+ def _VerifyGraphDef(self,
+ graph_key,
+ gdef,
+ precision_mode=None,
+ is_calibrated=None,
+ dynamic_engine=None):
+ num_engines = 0
+ for n in gdef.node:
+ if n.op == "TRTEngineOp":
+ num_engines += 1
+ self.assertNotEqual("", n.attr["serialized_segment"].s)
+ self.assertNotEqual("", n.attr["segment_funcdef_name"].s)
+ self.assertEquals(n.attr["precision_mode"].s, precision_mode)
+ self.assertEquals(n.attr["static_engine"].b, not dynamic_engine)
+ if precision_mode == MODE_INT8 and is_calibrated:
+ self.assertNotEqual("", n.attr["calibration_data"].s)
+ else:
+ self.assertEquals("", n.attr["calibration_data"].s)
+ if precision_mode is None:
+ self.assertEquals(num_engines, 0)
+ else:
+ self.assertEquals(num_engines,
+ TEST_GRAPHS[graph_key].num_expected_engines)
+
+ def _RunTest(self, graph_key, use_optimizer, precision_mode,
+ dynamic_infer_engine, dynamic_calib_engine):
+ assert precision_mode in [MODE_FP32, MODE_FP16, MODE_INT8]
+ input_gdef = TEST_GRAPHS[graph_key].gdef
+ self._VerifyGraphDef(graph_key, input_gdef)
+
+ # Get reference result without running trt.
+ config_no_trt = self._GetConfigProto(False)
+ print("Running original graph w/o trt, config:\n%s" % str(config_no_trt))
+ ref_result = self._RunGraph(graph_key, input_gdef, self._input,
+ config_no_trt)
+
+ # Run calibration if necessary.
+ if precision_mode == MODE_INT8:
+
+ calib_config = self._GetConfigProto(use_optimizer, precision_mode,
+ dynamic_calib_engine)
+ print("Running calibration graph, config:\n%s" % str(calib_config))
+ if use_optimizer:
+ self.assertTrue(False)
+ # TODO(aaroey): uncomment this and get infer_gdef when this mode is
+ # supported.
+ # result = self._RunCalibration(graph_key, input_gdef, self._input,
+ # calib_config)
+ else:
+ calib_gdef = self._GetTrtGraph(input_gdef, precision_mode,
+ dynamic_calib_engine)
+ self._VerifyGraphDef(graph_key, calib_gdef, precision_mode, False,
+ dynamic_calib_engine)
+ result = self._RunCalibration(graph_key, calib_gdef, self._input,
+ calib_config)
+ infer_gdef = trt.calib_graph_to_infer_graph(calib_gdef)
+ self._VerifyGraphDef(graph_key, infer_gdef, precision_mode, True,
+ dynamic_calib_engine)
+ self.assertAllClose(ref_result, result, rtol=1.e-03)
+ else:
+ infer_gdef = input_gdef
+
+ # Run inference.
+ infer_config = self._GetConfigProto(use_optimizer, precision_mode,
+ dynamic_infer_engine)
+ print("Running final inference graph, config:\n%s" % str(infer_config))
+ if use_optimizer:
+ result = self._RunGraph(graph_key, infer_gdef, self._input, infer_config)
+ else:
+ trt_infer_gdef = self._GetTrtGraph(infer_gdef, precision_mode,
+ dynamic_infer_engine)
+ self._VerifyGraphDef(graph_key, trt_infer_gdef, precision_mode, True,
+ dynamic_infer_engine)
+ result = self._RunGraph(graph_key, trt_infer_gdef, self._input,
+ infer_config)
+ self.assertAllClose(ref_result, result, rtol=1.e-03)
+
+ def testIdempotence(self):
+ # Test that applying tensorrt optimizer or offline conversion tools multiple
+ # times to the same graph will result in same graph.
+ #
+ # TODO(aaroey): currently the conversion is not deterministic, this is
+ # mainly because during tensorflow::ConvertGraphDefToGraph(), the graph uses
+ # EdgeSet which use a map keyed by Edge*, so the order of input/output edges
+ # of a node is nondeterministic, thus the order for segmenter to contract
+ # edges is nondeterministic. Need to evaluate whether we should fix this.
+ pass
+
+
+def GetTests():
+
+ def _GetTest(g, u, p, i, c):
+
+ def _Test(self):
+ print("Running test with parameters: graph_key=%s, use_optimizer=%s, "
+ "precision_mode=%s, dynamic_infer_engine=%s, "
+ "dynamic_calib_engine=%s" % (g, u, p, i, c))
+ self._RunTest(g, u, p, i, c)
+
+ return _Test
+
+ use_optimizer_options = [False, True]
+ precision_mode_options = [MODE_FP32, MODE_FP16, MODE_INT8]
+ dynamic_infer_engine_options = [False, True]
+ dynamic_calib_engine_options = [False, True]
+ for (graph_key, use_optimizer, precision_mode,
+ dynamic_infer_engine, dynamic_calib_engine) in itertools.product(
+ TEST_GRAPHS, use_optimizer_options, precision_mode_options,
+ dynamic_infer_engine_options, dynamic_calib_engine_options):
+ if precision_mode == MODE_INT8:
+ if not dynamic_calib_engine and dynamic_infer_engine:
+ # TODO(aaroey): test this case, the conversion from static calibration
+ # engine to dynamic inference engine should be a noop.
+ continue
+ if use_optimizer:
+ # TODO(aaroey): if use_optimizer is True we need to get the inference
+ # graphdef using custom python wrapper class, which is not currently
+ # supported yet.
+ continue
+ if not dynamic_calib_engine:
+ # TODO(aaroey): construction of static calibration engine is not
+ # supported yet.
+ continue
+ if dynamic_calib_engine and not dynamic_infer_engine:
+ # TODO(aaroey): construction of static inference engine using dynamic
+ # calibration engine is not supported yet.
+ continue
+ else: # In non int8 mode.
+ if dynamic_calib_engine:
+ # dynamic_calib_engine doesn't affect non-int8 modes, so just let
+ # related tests run once on dynamic_calib_engine=False.
+ continue
+ yield _GetTest(graph_key, use_optimizer, precision_mode,
+ dynamic_infer_engine, dynamic_calib_engine)
if __name__ == "__main__":
- googletest.main()
+ if trt.is_tensorrt_enabled():
+ for index, t in enumerate(GetTests()):
+ setattr(TfTrtIntegrationTest, "testTfTRT_" + str(index), t)
+ test.main()
diff --git a/tensorflow/contrib/tensorrt/trt_conversion.i b/tensorflow/contrib/tensorrt/trt_conversion.i
index d51a0b59e2..422740fdf6 100644
--- a/tensorflow/contrib/tensorrt/trt_conversion.i
+++ b/tensorflow/contrib/tensorrt/trt_conversion.i
@@ -100,6 +100,7 @@ _LIST_OUTPUT_TYPEMAP(int, PyLong_FromLong);
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/util/stat_summarizer.h"
#include "tensorflow/contrib/tensorrt/convert/convert_graph.h"
+#include "tensorflow/contrib/tensorrt/convert/utils.h"
%}
%ignoreall
@@ -108,6 +109,7 @@ _LIST_OUTPUT_TYPEMAP(int, PyLong_FromLong);
%unignore calib_convert;
%unignore get_linked_tensorrt_version;
%unignore get_loaded_tensorrt_version;
+%unignore is_tensorrt_enabled;
%{
@@ -140,7 +142,7 @@ std::pair<string, string> trt_convert(
return std::pair<string, string>{out_status, ""};
}
- if(precision_mode < 0 || precision_mode > 2){
+ if (precision_mode < 0 || precision_mode > 2) {
out_status = "InvalidArgument;Invalid precision_mode";
return std::pair<string, string>{out_status, ""};
}
@@ -221,25 +223,34 @@ std::pair<string, string> calib_convert(
#endif // GOOGLE_CUDA && GOOGLE_TENSORRT
}
-version_struct get_linked_tensorrt_version(){
+version_struct get_linked_tensorrt_version() {
// Return the version at the link time.
- const auto &lv = tensorflow::tensorrt::convert::GetLinkedTensorRTVersion();
version_struct s;
+#if GOOGLE_CUDA && GOOGLE_TENSORRT
+ const auto &lv = tensorflow::tensorrt::convert::GetLinkedTensorRTVersion();
s.vmajor = lv[0];
s.vminor = lv[1];
s.vpatch = lv[2];
+#endif // GOOGLE_CUDA && GOOGLE_TENSORRT
return s;
}
-version_struct get_loaded_tensorrt_version(){
+
+version_struct get_loaded_tensorrt_version() {
// Return the version from the loaded library.
- const auto &lv = tensorflow::tensorrt::convert::GetLoadedTensorRTVersion();
version_struct s;
+#if GOOGLE_CUDA && GOOGLE_TENSORRT
+ const auto &lv = tensorflow::tensorrt::convert::GetLoadedTensorRTVersion();
s.vmajor = lv[0];
s.vminor = lv[1];
s.vpatch = lv[2];
+#endif // GOOGLE_CUDA && GOOGLE_TENSORRT
return s;
}
+bool is_tensorrt_enabled() {
+ return tensorflow::tensorrt::IsGoogleTensorRTEnabled();
+}
+
%}
std::pair<string, string> calib_convert(string graph_def_string, bool is_dyn_op);
@@ -254,5 +265,6 @@ std::pair<string, string> trt_convert(string graph_def_string,
std::vector<int> cached_engine_batches);
version_struct get_linked_tensorrt_version();
version_struct get_loaded_tensorrt_version();
+bool is_tensorrt_enabled();
%unignoreall
diff --git a/tensorflow/contrib/timeseries/python/timeseries/BUILD b/tensorflow/contrib/timeseries/python/timeseries/BUILD
index e4963596d3..7020989d68 100644
--- a/tensorflow/contrib/timeseries/python/timeseries/BUILD
+++ b/tensorflow/contrib/timeseries/python/timeseries/BUILD
@@ -157,6 +157,7 @@ py_library(
py_test(
name = "head_test",
+ size = "large",
srcs = [
"head_test.py",
],
@@ -184,6 +185,7 @@ py_test(
"//tensorflow/python/saved_model:loader",
"//tensorflow/python/saved_model:tag_constants",
"//third_party/py/numpy",
+ "@absl_py//absl/testing:parameterized",
"@six_archive//:six",
],
)
diff --git a/tensorflow/contrib/timeseries/python/timeseries/estimators.py b/tensorflow/contrib/timeseries/python/timeseries/estimators.py
index 4ec8d26116..769183f40a 100644
--- a/tensorflow/contrib/timeseries/python/timeseries/estimators.py
+++ b/tensorflow/contrib/timeseries/python/timeseries/estimators.py
@@ -288,7 +288,7 @@ class StateSpaceRegressor(TimeSeriesRegressor):
"""An Estimator for general state space models."""
def __init__(self, model, state_manager=None, optimizer=None, model_dir=None,
- config=None):
+ config=None, head_type=ts_head_lib.TimeSeriesRegressionHead):
"""See TimeSeriesRegressor. Uses the ChainingStateManager by default."""
if not isinstance(model, state_space_model.StateSpaceModel):
raise ValueError(
@@ -301,7 +301,8 @@ class StateSpaceRegressor(TimeSeriesRegressor):
state_manager=state_manager,
optimizer=optimizer,
model_dir=model_dir,
- config=config)
+ config=config,
+ head_type=head_type)
class StructuralEnsembleRegressor(StateSpaceRegressor):
@@ -344,7 +345,8 @@ class StructuralEnsembleRegressor(StateSpaceRegressor):
anomaly_prior_probability=None,
optimizer=None,
model_dir=None,
- config=None):
+ config=None,
+ head_type=ts_head_lib.TimeSeriesRegressionHead):
"""Initialize the Estimator.
Args:
@@ -401,6 +403,8 @@ class StructuralEnsembleRegressor(StateSpaceRegressor):
from tf.train.Optimizer. Defaults to Adam with step size 0.02.
model_dir: See `Estimator`.
config: See `Estimator`.
+ head_type: The kind of head to use for the model (inheriting from
+ `TimeSeriesRegressionHead`).
"""
if anomaly_prior_probability is not None:
filtering_postprocessor = StateInterpolatingAnomalyDetector(
@@ -424,4 +428,5 @@ class StructuralEnsembleRegressor(StateSpaceRegressor):
model=model,
optimizer=optimizer,
model_dir=model_dir,
- config=config)
+ config=config,
+ head_type=head_type)
diff --git a/tensorflow/contrib/timeseries/python/timeseries/head.py b/tensorflow/contrib/timeseries/python/timeseries/head.py
index f236329fdb..8686a803e5 100644
--- a/tensorflow/contrib/timeseries/python/timeseries/head.py
+++ b/tensorflow/contrib/timeseries/python/timeseries/head.py
@@ -19,11 +19,7 @@ from __future__ import print_function
import re
-from tensorflow.python.training import training_util
-from tensorflow.contrib.layers.python.layers import optimizers
-
from tensorflow.contrib.timeseries.python.timeseries import feature_keys
-
from tensorflow.python.estimator import estimator_lib
from tensorflow.python.estimator.canned import head as head_lib
from tensorflow.python.estimator.canned import metric_keys
@@ -35,8 +31,9 @@ from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
-from tensorflow.python.util import nest
from tensorflow.python.summary import summary
+from tensorflow.python.training import training_util
+from tensorflow.python.util import nest
class _NoStatePredictOutput(export_lib.PredictOutput):
@@ -102,12 +99,9 @@ class TimeSeriesRegressionHead(head_lib._Head): # pylint:disable=protected-acce
use_resource=True):
model_outputs = self.create_loss(features, mode)
- train_op = optimizers.optimize_loss(
+ train_op = self.optimizer.minimize(
model_outputs.loss,
- global_step=training_util.get_global_step(),
- optimizer=self.optimizer,
- # Learning rate is set in the Optimizer object
- learning_rate=None)
+ global_step=training_util.get_global_step())
return estimator_lib.EstimatorSpec(
loss=model_outputs.loss,
mode=mode,
diff --git a/tensorflow/contrib/timeseries/python/timeseries/head_test.py b/tensorflow/contrib/timeseries/python/timeseries/head_test.py
index ed8f29c321..78c2cec21c 100644
--- a/tensorflow/contrib/timeseries/python/timeseries/head_test.py
+++ b/tensorflow/contrib/timeseries/python/timeseries/head_test.py
@@ -18,6 +18,9 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
+import os
+
+from absl.testing import parameterized
import numpy
import six
@@ -317,10 +320,38 @@ class PredictFeatureCheckingTests(test.TestCase):
mode=estimator_lib.ModeKeys.PREDICT)
-class OneShotTests(test.TestCase):
-
- def test_one_shot_prediction_head_export(self):
- model_dir = self.get_temp_dir()
+def _custom_time_series_regressor(
+ model_dir, head_type, exogenous_feature_columns):
+ return ts_estimators.TimeSeriesRegressor(
+ model=lstm_example._LSTMModel(
+ num_features=5, num_units=128,
+ exogenous_feature_columns=exogenous_feature_columns),
+ optimizer=adam.AdamOptimizer(0.001),
+ config=estimator_lib.RunConfig(tf_random_seed=4),
+ state_manager=state_management.ChainingStateManager(),
+ head_type=head_type,
+ model_dir=model_dir)
+
+
+def _structural_ensemble_regressor(
+ model_dir, head_type, exogenous_feature_columns):
+ return ts_estimators.StructuralEnsembleRegressor(
+ periodicities=None,
+ num_features=5,
+ exogenous_feature_columns=exogenous_feature_columns,
+ head_type=head_type,
+ model_dir=model_dir)
+
+
+class OneShotTests(parameterized.TestCase):
+
+ @parameterized.named_parameters(
+ {"testcase_name": "custom_time_series_regressor",
+ "estimator_factory": _custom_time_series_regressor},
+ {"testcase_name": "structural_ensemble_regressor",
+ "estimator_factory": _structural_ensemble_regressor})
+ def test_one_shot_prediction_head_export(self, estimator_factory):
+ model_dir = os.path.join(test.get_temp_dir(), str(ops.uid()))
categorical_column = feature_column.categorical_column_with_hash_bucket(
key="categorical_exogenous_feature", hash_bucket_size=16)
exogenous_feature_columns = [
@@ -328,15 +359,10 @@ class OneShotTests(test.TestCase):
"2d_exogenous_feature", shape=(2,)),
feature_column.embedding_column(
categorical_column=categorical_column, dimension=10)]
- estimator = ts_estimators.TimeSeriesRegressor(
- model=lstm_example._LSTMModel(
- num_features=5, num_units=128,
- exogenous_feature_columns=exogenous_feature_columns),
- optimizer=adam.AdamOptimizer(0.001),
- config=estimator_lib.RunConfig(tf_random_seed=4),
- state_manager=state_management.ChainingStateManager(),
- head_type=ts_head_lib.OneShotPredictionHead,
- model_dir=model_dir)
+ estimator = estimator_factory(
+ model_dir=model_dir,
+ exogenous_feature_columns=exogenous_feature_columns,
+ head_type=ts_head_lib.OneShotPredictionHead)
train_features = {
feature_keys.TrainEvalFeatures.TIMES: numpy.arange(
20, dtype=numpy.int64),
@@ -351,7 +377,7 @@ class OneShotTests(test.TestCase):
num_threads=1, batch_size=16, window_size=16)
estimator.train(input_fn=train_input_fn, steps=5)
input_receiver_fn = estimator.build_raw_serving_input_receiver_fn()
- export_location = estimator.export_savedmodel(self.get_temp_dir(),
+ export_location = estimator.export_savedmodel(test.get_temp_dir(),
input_receiver_fn)
graph = ops.Graph()
with graph.as_default():
@@ -385,7 +411,7 @@ class OneShotTests(test.TestCase):
for output_key, output_value
in predict_signature.outputs.items()}
output = session.run(fetches, feed_dict=feeds)
- self.assertAllEqual((2, 15, 5), output["mean"].shape)
+ self.assertEqual((2, 15, 5), output["mean"].shape)
if __name__ == "__main__":
diff --git a/tensorflow/contrib/tpu/BUILD b/tensorflow/contrib/tpu/BUILD
index 16696793bc..ef6c752851 100644
--- a/tensorflow/contrib/tpu/BUILD
+++ b/tensorflow/contrib/tpu/BUILD
@@ -16,7 +16,6 @@ package(
"//cloud/vmm/testing/tests/tpu:__subpackages__",
"//learning/brain:__subpackages__",
"//tensorflow:__subpackages__",
- "//third_party/cloud_tpu:__subpackages__",
],
)
@@ -161,12 +160,44 @@ py_library(
)
py_library(
+ name = "keras_support",
+ srcs = [
+ "python/tpu/keras_support.py",
+ ],
+ srcs_version = "PY2AND3",
+ deps = [
+ ":tpu_lib",
+ ":tpu_py",
+ "//tensorflow/contrib/cluster_resolver:tpu_cluster_resolver_py",
+ "//tensorflow/contrib/distribute/python:tpu_strategy",
+ "//tensorflow/contrib/framework:framework_py",
+ "//tensorflow/contrib/tpu/proto:compilation_result_proto_py",
+ "//tensorflow/core:protos_all_py",
+ "//tensorflow/python:array_ops",
+ "//tensorflow/python:dtypes",
+ "//tensorflow/python:framework_ops",
+ "//tensorflow/python:linalg_ops",
+ "//tensorflow/python:math_ops",
+ "//tensorflow/python:platform",
+ "//tensorflow/python:random_ops",
+ "//tensorflow/python:session",
+ "//tensorflow/python:tensor_spec",
+ "//tensorflow/python:variable_scope",
+ "//tensorflow/python/data/ops:dataset_ops",
+ "//tensorflow/python/estimator:model_fn",
+ "//tensorflow/python/keras:backend",
+ "//tensorflow/python/keras:engine",
+ "//tensorflow/python/keras:layers",
+ "//third_party/py/numpy",
+ ],
+)
+
+py_library(
name = "tpu_lib",
srcs = [
"python/tpu/__init__.py",
"python/tpu/bfloat16.py",
"python/tpu/device_assignment.py",
- "python/tpu/keras_support.py",
"python/tpu/session_support.py",
"python/tpu/topology.py",
"python/tpu/tpu.py",
@@ -307,3 +338,13 @@ tf_py_test(
"//tensorflow/python:framework_test_lib",
],
)
+
+tf_py_test(
+ name = "topology_test",
+ size = "small",
+ srcs = ["python/tpu/topology_test.py"],
+ additional_deps = [
+ ":tpu",
+ "//tensorflow/python:framework_test_lib",
+ ],
+)
diff --git a/tensorflow/contrib/tpu/__init__.py b/tensorflow/contrib/tpu/__init__.py
index dc90668559..d5484e9032 100644
--- a/tensorflow/contrib/tpu/__init__.py
+++ b/tensorflow/contrib/tpu/__init__.py
@@ -42,9 +42,11 @@
@@TPUEstimator
@@TPUEstimatorSpec
+@@export_estimator_savedmodel
@@RunConfig
@@InputPipelineConfig
@@TPUConfig
+@@bfloat16_scope
"""
from __future__ import absolute_import
diff --git a/tensorflow/contrib/tpu/profiler/pip_package/cloud_tpu_profiler/main.py b/tensorflow/contrib/tpu/profiler/pip_package/cloud_tpu_profiler/main.py
index 7f1d25732e..7a5d01cca4 100644
--- a/tensorflow/contrib/tpu/profiler/pip_package/cloud_tpu_profiler/main.py
+++ b/tensorflow/contrib/tpu/profiler/pip_package/cloud_tpu_profiler/main.py
@@ -17,12 +17,11 @@
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
-from absl import flags
-
import os
import subprocess
import sys
-
+from absl import flags
+from distutils.version import LooseVersion
import tensorflow as tf
# Cloud TPU Cluster Resolvers
@@ -35,9 +34,9 @@ flags.DEFINE_string(
None,
help='GCE zone where the Cloud TPU is located in. If not specified, we '
'will attempt to automatically detect the GCE project from metadata.')
-flags.DEFINE_string('tpu', None,
- 'Name of the Cloud TPU for Cluster Resolvers. You must '
- 'specify either this flag or --service_addr.')
+flags.DEFINE_string(
+ 'tpu', None, 'Name of the Cloud TPU for Cluster Resolvers. You must '
+ 'specify either this flag or --service_addr.')
# Tool specific parameters
flags.DEFINE_string(
@@ -48,13 +47,13 @@ flags.DEFINE_string(
' e.g. 10.0.1.2, 10.0.1.3. You can specify this flag with --tpu or '
'--service_addr to profile a subset of tpu nodes. You can also use only'
'--tpu and leave this flag unspecified to profile all the tpus.')
-flags.DEFINE_string('logdir', None,
- 'Path of TensorBoard log directory e.g. /tmp/tb_log, '
- 'gs://tb_bucket')
+flags.DEFINE_string(
+ 'logdir', None, 'Path of TensorBoard log directory e.g. /tmp/tb_log, '
+ 'gs://tb_bucket')
flags.DEFINE_integer('duration_ms', 2000, 'Duration of tracing in ms.')
-flags.DEFINE_integer('num_tracing_attempts', 3,
- 'Automatically retry N times when no trace '
- 'event is collected.')
+flags.DEFINE_integer(
+ 'num_tracing_attempts', 3, 'Automatically retry N times when no trace '
+ 'event is collected.')
flags.DEFINE_boolean('include_dataset_ops', True,
'Set to false to profile longer TPU '
'device traces.')
@@ -63,18 +62,24 @@ FLAGS = flags.FLAGS
EXECUTABLE = 'data/capture_tpu_profile'
JOB_NAME = 'worker'
+
def get_workers_list(cluster_resolver):
cluster_spec = cluster_resolver.cluster_spec()
task_indices = cluster_spec.task_indices(JOB_NAME)
- workers_list = [cluster_spec.task_address(JOB_NAME, i).split(':')[0]
- for i in task_indices]
+ workers_list = [
+ cluster_spec.task_address(JOB_NAME, i).split(':')[0] for i in task_indices
+ ]
return ','.join(workers_list)
+
def run_main():
tf.app.run(main)
+
def main(unused_argv=None):
tf.logging.set_verbosity(tf.logging.INFO)
+ tf_version = tf.__version__
+ print('TensorFlow version %s detected' % tf_version)
if FLAGS.service_addr is None and FLAGS.tpu is None:
sys.exit('You must specify either --service_addr or --tpu.')
@@ -88,17 +93,19 @@ def main(unused_argv=None):
else:
tpu_cluster_resolver = (
tf.contrib.cluster_resolver.TPUClusterResolver(
- [FLAGS.tpu],
- zone=FLAGS.tpu_zone,
- project=FLAGS.gcp_project))
+ [FLAGS.tpu], zone=FLAGS.tpu_zone, project=FLAGS.gcp_project))
service_addr = tpu_cluster_resolver.get_master()
service_addr = service_addr.replace('grpc://', '').replace(':8470', ':8466')
- workers_list = ""
- if FLAGS.workers_list is not None:
- workers_list = FLAGS.workers_list
- elif tpu_cluster_resolver is not None:
- workers_list = get_workers_list(tpu_cluster_resolver)
+ workers_list = ''
+ if LooseVersion(tf_version) < LooseVersion('1.9'):
+ tf.logging.warn('Attempt to profile with legacy support under TensorFlow '
+ 'version %s' % tf_version)
+ else:
+ if FLAGS.workers_list is not None:
+ workers_list = FLAGS.workers_list
+ elif tpu_cluster_resolver is not None:
+ workers_list = get_workers_list(tpu_cluster_resolver)
if not FLAGS.logdir:
sys.exit('logdir must be provided.')
diff --git a/tensorflow/contrib/tpu/profiler/pip_package/setup.py b/tensorflow/contrib/tpu/profiler/pip_package/setup.py
index f97a972f01..19f088f8b8 100644
--- a/tensorflow/contrib/tpu/profiler/pip_package/setup.py
+++ b/tensorflow/contrib/tpu/profiler/pip_package/setup.py
@@ -20,7 +20,7 @@ from __future__ import print_function
from setuptools import setup
-_VERSION = '1.7.0'
+_VERSION = '1.9.0'
CONSOLE_SCRIPTS = [
'capture_tpu_profile=cloud_tpu_profiler.main:run_main',
diff --git a/tensorflow/contrib/tpu/profiler/version.h b/tensorflow/contrib/tpu/profiler/version.h
index bd9ba6697e..1bf49966d1 100644
--- a/tensorflow/contrib/tpu/profiler/version.h
+++ b/tensorflow/contrib/tpu/profiler/version.h
@@ -16,6 +16,6 @@ limitations under the License.
#ifndef TENSORFLOW_CONTRIB_TPU_PROFILER_VERSION_H_
#define TENSORFLOW_CONTRIB_TPU_PROFILER_VERSION_H_
-#define TPU_PROFILER_VERSION "1.7.0"
+#define TPU_PROFILER_VERSION "1.9.0"
#endif // TENSORFLOW_CONTRIB_TPU_PROFILER_VERSION_H_
diff --git a/tensorflow/contrib/tpu/proto/BUILD b/tensorflow/contrib/tpu/proto/BUILD
index 7ecb36852c..26016f47df 100644
--- a/tensorflow/contrib/tpu/proto/BUILD
+++ b/tensorflow/contrib/tpu/proto/BUILD
@@ -2,7 +2,12 @@ licenses(["notice"]) # Apache 2.0
exports_files(["LICENSE"])
-load("//tensorflow/core:platform/default/build_config.bzl", "tf_proto_library")
+load(
+ "//tensorflow/core:platform/default/build_config.bzl",
+ "tf_additional_all_protos",
+ "tf_proto_library",
+ "tf_proto_library_py",
+)
tf_proto_library(
name = "tpu_embedding_config_proto",
@@ -22,12 +27,14 @@ tf_proto_library(
visibility = ["//visibility:public"],
)
-tf_proto_library(
+tf_proto_library_py(
name = "compilation_result_proto",
srcs = [
"compilation_result.proto",
],
- cc_api_version = 2,
- protodeps = ["//tensorflow/core:protos_all"],
+ protodeps = tf_additional_all_protos() + [
+ "//tensorflow/compiler/xla:xla_data_proto",
+ "//tensorflow/compiler/xla/service:hlo_proto",
+ ],
visibility = ["//visibility:public"],
)
diff --git a/tensorflow/contrib/tpu/proto/compilation_result.proto b/tensorflow/contrib/tpu/proto/compilation_result.proto
index cf52897de3..88585a5bd1 100644
--- a/tensorflow/contrib/tpu/proto/compilation_result.proto
+++ b/tensorflow/contrib/tpu/proto/compilation_result.proto
@@ -3,6 +3,7 @@ syntax = "proto3";
option cc_enable_arenas = true;
package tensorflow.tpu;
+import "tensorflow/compiler/xla/service/hlo.proto";
import "tensorflow/core/lib/core/error_codes.proto";
// Describes the result of a TPU compilation.
@@ -10,4 +11,7 @@ message CompilationResultProto {
// The error message, if any, returned during compilation.
error.Code status_code = 1;
string status_error_message = 2;
+
+ // HLO proto.
+ repeated xla.HloProto hlo_protos = 3;
}
diff --git a/tensorflow/contrib/tpu/python/tpu/keras_support.py b/tensorflow/contrib/tpu/python/tpu/keras_support.py
index 293e162059..8292c920fc 100644
--- a/tensorflow/contrib/tpu/python/tpu/keras_support.py
+++ b/tensorflow/contrib/tpu/python/tpu/keras_support.py
@@ -19,15 +19,16 @@ To use, wrap your model with the `keras_support.tpu_model` function.
Example usage:
```
-# Must activate before building TPU models
-keras_support.setup_tpu_session(master_address)
-
image = tf.keras.layers.Input(shape=(28, 28, 3), name='image')
c1 = tf.keras.layers.Conv2D(filters=16, kernel_size=(3, 3))( image)
flattened = tf.keras.layers.Flatten()(c1)
logits = tf.keras.layers.Dense(10, activation='softmax')(flattened)
model = tf.keras.Model(inputs=[image], outputs=[logits])
-model = keras_support.tpu_model(model)
+
+strategy = keras_support.TPUDistributionStrategy(num_cores_per_host=8)
+model = keras_support.tpu_model(model,
+ strategy=strategy,
+ tpu_name_or_address=tpu_name)
# Only TF optimizers are currently supported.
model.compile(optimizer=tf.train.AdamOptimizer(), ...)
@@ -35,9 +36,6 @@ model.compile(optimizer=tf.train.AdamOptimizer(), ...)
# `images` and `labels` should be Numpy arrays. Support for tensor input
# (e.g. datasets) is planned.
model.fit(images, labels)
-
-# Invoke before shutting down
-keras_support.shutdown_tpu_session()
```
"""
@@ -47,30 +45,45 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
+import abc
import collections
+import contextlib
import re
+import sys
import time
+import numpy as np
+
from tensorflow.contrib.cluster_resolver.python.training import tpu_cluster_resolver
+from tensorflow.contrib.distribute.python import tpu_strategy
from tensorflow.contrib.framework.python.framework import experimental
from tensorflow.contrib.tpu.proto import compilation_result_pb2 as tpu_compilation_result
from tensorflow.contrib.tpu.python.ops import tpu_ops
from tensorflow.contrib.tpu.python.tpu import tpu
+from tensorflow.contrib.tpu.python.tpu import tpu_function
from tensorflow.contrib.tpu.python.tpu import tpu_optimizer
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session as tf_session
+from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.estimator import model_fn as model_fn_lib
+from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
+from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_spec
from tensorflow.python.keras import backend as K
-from tensorflow.python.keras import layers
from tensorflow.python.keras import models
from tensorflow.python.keras import optimizers as keras_optimizers
+from tensorflow.python.keras.engine import base_layer
from tensorflow.python.keras.layers import embeddings
from tensorflow.python.ops import array_ops
+from tensorflow.python.ops import gen_linalg_ops
from tensorflow.python.ops import math_ops
+from tensorflow.python.ops import random_ops
+from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import tf_logging as logging
+TPUDistributionStrategy = tpu_strategy.TPUStrategy # pylint: disable=invalid-name
+
class TPUEmbedding(embeddings.Embedding):
"""TPU compatible embedding layer.
@@ -93,11 +106,49 @@ class TPUEmbedding(embeddings.Embedding):
return math_ops.tensordot(inputs, self.embeddings, 1)
+class KerasCrossShardOptimizer(keras_optimizers.Optimizer):
+ """An optimizer that averages gradients across TPU shards."""
+
+ def __init__(self, opt, name='KerasCrossShardOptimizer'):
+ """Construct a new cross-shard optimizer.
+
+ Args:
+ opt: An existing `Optimizer` to encapsulate.
+ name: Optional name prefix for the operations created when applying
+ gradients. Defaults to "KerasCrossShardOptimizer".
+
+ Raises:
+ ValueError: If reduction is not a valid cross-shard reduction.
+ """
+ super(KerasCrossShardOptimizer, self).__init__()
+ self._name = name
+ self._opt = opt
+
+ def get_updates(self, loss, params):
+ logging.info('Get updates: %s', loss)
+ self._opt.get_gradients = self.get_gradients
+ return self._opt.get_updates(loss, params)
+
+ def get_gradients(self, loss, params):
+ num_shards = tpu_function.get_tpu_context().number_of_shards
+ grads = super(KerasCrossShardOptimizer, self).get_gradients(loss, params)
+ return [tpu_ops.cross_replica_sum(grad) / num_shards for grad in grads]
+
+ def set_weights(self, weights):
+ self._opt.set_weights()
+
+ def get_weights(self):
+ return self._opt.get_weights()
+
+ @property
+ def lr(self):
+ return self._opt.lr
+
+
class TPUModelOp(
- collections.namedtuple(
- 'TPUModelOp',
- ['compile_op', 'execute_op', 'infeed_tensors', 'infeed_op',
- 'outfeed_op'])):
+ collections.namedtuple('TPUModelOp', [
+ 'compile_op', 'execute_op', 'infeed_tensors', 'infeed_op', 'outfeed_op'
+ ])):
pass
@@ -106,13 +157,441 @@ def _valid_name(tensor_name):
return re.sub('[^a-zA-Z0-9_-]+', '', tensor_name)
-def _replicated_optimizer(opt, num_replicas):
+def _replicated_optimizer(opt):
"""Wrap the optimizer `opt` with CrossShardOptimizer if applicable."""
- if num_replicas == 1:
+ if tpu_function.get_tpu_context().number_of_shards == 1:
return opt
- return keras_optimizers.TFOptimizer(
- optimizer=tpu_optimizer.CrossShardOptimizer(opt.optimizer)
- )
+
+ if isinstance(opt, keras_optimizers.TFOptimizer):
+ return tpu_optimizer.CrossShardOptimizer(opt.optimizer)
+ else:
+ return KerasCrossShardOptimizer(opt)
+
+
+class TPURewriteContext(object):
+ """Prepare the environment for a Keras model during `tpu.rewrite`.
+
+ This overrides the default placeholder behaviour to instead refer to a preset
+ input mapping. Placeholders are unsupported in TPU compiled code, and must
+ be replaced with explicit inputs or values from the infeed queue.
+
+ Instead of explicitly threading inputs all the way through the Keras codebase,
+ we override the behavior of the placeholder while compiling and inject the
+ Tensors from the infeed in place of the placeholder.
+
+ Similarly, as we compile a new sub-graph for each unique shape and execution
+ mode, we need to override the behavior of an embedded `name_scope` call in
+ the base Keras layer code. This allows us to re-use the same weights across
+ many compiles and share a single session/graph.
+ """
+
+ def __init__(self, input_map):
+ self._input_map = input_map
+ self._default_placeholder = None
+ self._default_name_scope = None
+
+ def __enter__(self):
+
+ def _placeholder(dtype, shape=None, name=None): # pylint: disable=unused-argument
+ logging.info('Remapping placeholder for %s', name)
+ if name in self._input_map:
+ return self._input_map[name]
+ else:
+ logging.info('Default: %s', name)
+ return self._default_placeholder(dtype, shape, name)
+
+ def _name_scope(name, default_name=None, values=None):
+ caller_frame = sys._getframe().f_back
+ caller_obj = caller_frame.f_locals.get('self')
+ if (caller_obj is not None and
+ isinstance(caller_obj, base_layer.Layer) and name is not None):
+ return variable_scope.variable_scope(
+ name, default_name, values, reuse=variable_scope.AUTO_REUSE)
+
+ return self._default_name_scope(name, default_name, values)
+
+ self._default_placeholder = array_ops.placeholder
+ self._default_name_scope = ops.name_scope
+ self._default_make_variable = base_layer.make_variable
+ self._default_random_normal = random_ops.random_normal
+ self._default_qr = gen_linalg_ops.qr
+
+ array_ops.placeholder = _placeholder
+
+ # Replace random_ops.random_normal with a dummy function because
+ # `random_normal` isn't yet implemented on the TPU. Because these
+ # initialized values are overwritten by the CPU values, this is okay.
+ def random_normal(shape,
+ mean=0.0,
+ stddev=1.0,
+ dtype=dtypes.float32,
+ seed=None,
+ name=None):
+ del mean
+ del stddev
+ del seed
+ return array_ops.zeros(shape, dtype=dtype, name=name)
+
+ random_ops.random_normal = random_normal
+
+ # Replace gen_linalg_ops.qr because QR decomposition is not yet implemented.
+ # TODO(saeta): Remove qr override once we confirm the qr implementation is
+ # ok.
+ # pylint: disable=redefined-builtin
+ def qr(input, full_matrices=False, name=None):
+ """Dummy implementation of qr decomposition."""
+ del full_matrices # TODO(saeta): Properly handle the full matrix case.
+ input_shape = input.shape
+ if len(input_shape) < 2:
+ raise ValueError('Invalid shape passed to qr: %s' % input_shape)
+ p = min(input_shape[-1], input_shape[-2])
+ if len(input_shape) == 2:
+ q = array_ops.zeros((p, p), name=name)
+ r = array_ops.zeros(input_shape, name=name)
+ return (r, q)
+ elif len(input_shape) == 3:
+ n = input_shape[0]
+ q = array_ops.zeros((n, p, p), name=name)
+ r = array_ops.zeros(input_shape, name=name)
+ return (r, q)
+ else:
+ raise ValueError('Invalid shape passed to qr: %s' % input_shape)
+ gen_linalg_ops.qr = qr
+
+ ops.name_scope = _name_scope
+ base_layer.make_variable = variable_scope.get_variable
+ logging.info('Overriding default placeholder.')
+ return
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ array_ops.placeholder = self._default_placeholder
+ ops.name_scope = self._default_name_scope
+ base_layer.make_variable = self._default_make_variable
+ random_ops.random_normal = self._default_random_normal
+ gen_linalg_ops.qr = self._default_qr
+
+
+class SizedInfeed(collections.namedtuple('SizedInfeed',
+ ['sharded_infeed_tensors',
+ 'infeed_ops'])):
+ """Represents an instantiation of the infeed ops for a concrete input shape.
+
+ sharded_infeed_tensors: A data structure of Tensors used to represent the
+ placeholder tensors that must be fed when using feed_dicts.
+
+ infeed_ops: the set of ops that will be run to drive infeed for a single step.
+ """
+ pass
+
+
+class TPUInfeedInstance(object):
+ """TPUInfeedInstance represents the logic to manage feeding in a single step.
+
+ See the comments on the `TPUInfeedManager` for a description for how infeed
+ is managed.
+ """
+
+ @abc.abstractmethod
+ def make_input_specs(self, input_tensors):
+ """Constructs the infeed_specs for the given Infeed instance.
+
+ Args:
+ input_tensors: The inputs to the model.
+
+ Returns:
+ A list of
+ """
+ pass
+
+ def make_feed_dict(self, tpu_model_op):
+ """Constructs a feed_dict for this instance, given the tpu_model_op.
+
+ Args:
+ tpu_model_op: A `TPUModelOp` representing the TPU Model for this
+ instance's input spec.
+
+ Returns:
+ A dictionary to use as the feed_dict of a `session.run` call.
+ """
+ pass
+
+
+class TPUInfeedManager(object):
+ """TPUInfeedManager manages the data infeeding of data to a TPU computation.
+
+ Because there are multiple data sources (e.g. in-memory NumPy arrays,
+ `tf.data.Dataset`s), we abstract the different logic behind a single
+ interface: the `TPUInfeedManager`.
+
+ (1) A `TPUFunction` is called with a set of inputs. Based on the inputs,
+ `TPUFunction` retrieves the corresponding `TPUInfeedManager` (or constructs a
+ new one if required).
+
+ (2) The `TPUFunction` calls `make_infeed_instance` on the `TPUInfeedManager`
+ which returns a `TPUInfeedInstance`.
+
+ (3) The `TPUFunction` checks in the shape cache for a pre-compiled instance of
+ the model based on the returned `input_specs` from `TPUInfeedInstance`.
+
+ (4) [Optional.] If the model has not already been instantiated for the given
+ input spec, the `TPUFunction` compiles the model for the input spec (using the
+ `TPUInfeedManager`).
+
+ (5) The `TPUInfeedInstance` constructs the session.run's feed_dict given the
+ compiled model instance corresponding to its shape.
+ """
+
+ @abc.abstractmethod
+ def make_infeed_instance(self, inputs):
+ """Given a single step's input, construct a `TPUInfeedInstance`.
+
+ Args:
+ inputs: The inputs to a given step.
+
+ Returns:
+ A subclass of `TPUInfeedInstance`.
+ """
+ pass
+
+ @abc.abstractmethod
+ def build_infeed_from_input_specs(self, input_specs, execution_mode):
+ """For a given input specification (size, type), construct the infeed ops.
+
+ This is called only once for a given input specification and builds the
+ graph ops. It does not have a pointer to the actual infeed data.
+
+ Args:
+ input_specs: TODO(saeta): Document me!
+ execution_mode: TODO(saeta): Document me!
+
+ Returns:
+ A `SizedInfeed` instance.
+ """
+ pass
+
+
+class TPUNumpyInfeedManager(TPUInfeedManager):
+ """TPU Infeed manager for Numpy inputs."""
+
+ class NumpyInfeedInstance(TPUInfeedInstance):
+ """Infeed instance for Numpy inputs."""
+
+ def __init__(self, sharded_inputs):
+ self._sharded_inputs = sharded_inputs
+
+ def make_input_specs(self, input_tensors):
+ # Compute an input specification (used to generate infeed enqueue and
+ # dequeue operations). We use the shape from our input array and the
+ # dtype from our model. A user may pass in a float64 for a float32
+ # input: for model compatibility we still must generate a float32 infeed.
+ input_specs = []
+ # We use the shape and dtype from the first shard to compute the input
+ # metadata (`input_specs`); all replicas have the same type and shape.
+ for tensor, ary in zip(input_tensors, self._sharded_inputs[0]):
+ input_specs.append(
+ tensor_spec.TensorSpec(ary.shape, tensor.dtype,
+ _valid_name(tensor.name)))
+
+ return input_specs
+
+ def make_feed_dict(self, tpu_model_op):
+ infeed_dict = {}
+ for infeed_tensors, inputs in zip(tpu_model_op.infeed_tensors,
+ self._sharded_inputs):
+ for tensor, value in zip(infeed_tensors, inputs):
+ infeed_dict[tensor] = value
+ return infeed_dict
+
+ def __init__(self, distribution_strategy):
+ self._strategy = distribution_strategy
+
+ def _split_tensors(self, inputs):
+ """Split input data across shards.
+
+ Each input is sliced along the batch axis.
+
+ Args:
+ inputs: List of Numpy arrays to run on the TPU.
+
+ Returns:
+ List of lists containing the input to feed to each TPU shard.
+ """
+ if self._strategy.num_towers == 1:
+ return [inputs]
+
+ batch_size = inputs[0].shape[0]
+ assert batch_size % self._strategy.num_towers == 0, (
+ 'batch_size must be divisible by strategy.num_towers (%s vs %s)' %
+ (batch_size, self._strategy.num_towers))
+ shard_size = batch_size // self._strategy.num_towers
+ input_list = []
+ for index in range(self._strategy.num_towers):
+ shard_inputs = [
+ x[index * shard_size:(index + 1) * shard_size] for x in inputs
+ ]
+ input_list.append(shard_inputs)
+ return input_list
+
+ def make_infeed_instance(self, inputs):
+ sharded_inputs = self._split_tensors(inputs)
+ return self.NumpyInfeedInstance(sharded_inputs)
+
+ def build_infeed_from_input_specs(self, input_specs, execution_mode):
+ infeed_op = []
+ shard_infeed_tensors = []
+
+ for shard_id in range(self._strategy.num_towers):
+ with ops.device('/device:TPU:%d' % shard_id):
+ infeed_tensors = []
+ for spec in input_specs:
+ # Construct placeholders for each of the inputs.
+ infeed_tensors.append(
+ array_ops.placeholder(
+ dtype=spec.dtype,
+ shape=spec.shape,
+ name='infeed-enqueue-%s-%d' % (spec.name, shard_id)))
+ shard_infeed_tensors.append(infeed_tensors)
+
+ infeed_op.append(
+ tpu_ops.infeed_enqueue_tuple(
+ infeed_tensors, [spec.shape for spec in input_specs],
+ name='infeed-enqueue-%s-%d' % (execution_mode, shard_id)))
+ return SizedInfeed(infeed_ops=infeed_op,
+ sharded_infeed_tensors=shard_infeed_tensors)
+
+
+class TPUDatasetInfeedManager(TPUInfeedManager):
+ """Manages infeed for a `tf.data.Dataset` into a TPU computation.
+ """
+
+ class DatasetInfeedInstance(TPUInfeedInstance):
+ """An instance of the TPU infeed."""
+
+ def __init__(self, input_specs):
+ self._input_specs = input_specs
+
+ def make_input_specs(self, input_tensors):
+ # TODO(saeta): Do error checking here!
+ return self._input_specs
+
+ def make_feed_dict(self, tpu_model_op):
+ # TODO(saeta): Verify tpu_model_op is as expected!
+ return {}
+
+ def __init__(self, dataset, distribution_strategy, tpu_session):
+ """Constructs a TPUDatasetInfeedManager.
+
+ Must be called within a `KerasTPUModel.tpu_session` context!
+
+ Args:
+ dataset: A `tf.data.Dataset` to infeed.
+ distribution_strategy: The `TPUDistributionStrategy` used to configure the
+ Keras TPU model.
+ tpu_session: The `tf.Session` object used for running the TPU model.
+ """
+ self._verify_dataset_shape(dataset)
+ self._dataset = dataset
+ self._strategy = distribution_strategy
+ dummy_x_shape = dataset.output_shapes[0].as_list()
+ dummy_x_shape[0] *= distribution_strategy.num_towers
+ dummy_y_shape = dataset.output_shapes[1].as_list()
+ dummy_y_shape[0] *= distribution_strategy.num_towers
+ self._iterator = dataset.make_initializable_iterator()
+ tpu_session.run(self._iterator.initializer)
+
+ self._get_next_ops = []
+ ctrl_deps = []
+ for i in range(distribution_strategy.num_towers):
+ with ops.control_dependencies(ctrl_deps): # Ensure deterministic
+ # TODO(saeta): Ensure correct placement!
+ get_next_op = self._iterator.get_next()
+ self._get_next_ops.append(get_next_op)
+ ctrl_deps.extend(get_next_op)
+
+ # Use dummy numpy inputs for the rest of Keras' shape checking. We
+ # intercept them when building the model.
+ self._dummy_x = np.zeros(dummy_x_shape,
+ dtype=dataset.output_types[0].as_numpy_dtype)
+ self._dummy_y = np.zeros(dummy_y_shape,
+ dtype=dataset.output_types[1].as_numpy_dtype)
+
+ input_specs = []
+ if isinstance(self._iterator.output_shapes, tuple):
+ assert isinstance(self._iterator.output_types, tuple)
+ assert len(self._iterator.output_shapes) == len(
+ self._iterator.output_types)
+ for i in range(len(self._iterator.output_shapes)):
+ spec = tensor_spec.TensorSpec(self._iterator.output_shapes[i],
+ self._iterator.output_types[i])
+ input_specs.append(spec)
+ elif isinstance(self._iterator.output_shapes, tensor_shape.TensorShape):
+ spec = tensor_spec.TensorSpec(self._iterator.output_shapes,
+ self._iterator.output_types)
+ input_specs.append(spec)
+
+ self._infeed_instance = self.DatasetInfeedInstance(input_specs)
+
+ def _verify_dataset_shape(self, dataset):
+ """Verifies a dataset is of an appropriate shape for TPUs."""
+ if not isinstance(dataset, dataset_ops.Dataset):
+ raise ValueError('The function passed as the `x` parameter did not '
+ 'return a `tf.data.Dataset`.')
+ if not isinstance(dataset.output_classes, tuple):
+ raise ValueError('The dataset must return a tuple of tf.Tensors, '
+ 'instead it returns: %s' % dataset.output_classes)
+ if len(dataset.output_classes) != 2:
+ raise ValueError(
+ 'The dataset must return a 2-element tuple, got '
+ '%s output classes instead.' % (dataset.output_classes,))
+ for i, cls in enumerate(dataset.output_classes):
+ if cls != ops.Tensor:
+ raise ValueError('The dataset returned a non-Tensor type (%s) at '
+ 'index %d.' % (cls, i))
+ for i, shape in enumerate(dataset.output_shapes):
+ if not shape:
+ raise ValueError('The dataset returns a scalar tensor in '
+ 'tuple index %d. Did you forget to batch? '
+ '(Output shapes: %s).' % (i,
+ dataset.output_shapes))
+ for j, dim in enumerate(shape):
+ if dim.value is None:
+ if j == 0:
+ hint = (' Hint: did you use `ds.batch(BATCH_SIZE, '
+ 'drop_remainder=True)`?')
+ else:
+ hint = ''
+ raise ValueError(
+ 'The Keras-TPU integration for `tf.data` '
+ 'currently requires static shapes. The provided '
+ 'dataset only has a partially defined shape. '
+ '(Dimension %d of output tensor %d is not statically known '
+ 'for output shapes: %s.%s)' % (i, j, dataset.output_shapes, hint))
+
+ @property
+ def dummy_x(self):
+ return self._dummy_x
+
+ @property
+ def dummy_y(self):
+ return self._dummy_y
+
+ def make_infeed_instance(self, inputs):
+ # TODO(saeta): Verify inputs is as expected.
+ return self._infeed_instance
+
+ def build_infeed_from_input_specs(self, input_specs, execution_mode):
+ shard_infeed_tensors = self._get_next_ops
+ assert len(shard_infeed_tensors) == self._strategy.num_towers
+ infeed_ops = []
+ for shard_id in range(self._strategy.num_towers):
+ with ops.device('/device:TPU:%d' % shard_id):
+ infeed_ops.append(
+ tpu_ops.infeed_enqueue_tuple(
+ shard_infeed_tensors[shard_id],
+ [spec.shape for spec in input_specs],
+ name='infeed-enqueue-%s-%d' % (execution_mode, shard_id)))
+ return SizedInfeed(infeed_ops=infeed_ops,
+ sharded_infeed_tensors=shard_infeed_tensors)
class TPUFunction(object):
@@ -127,19 +606,24 @@ class TPUFunction(object):
instead of being injected as `feed_dict` items or fetches.
"""
- def __init__(self, model, execution_mode, num_replicas=1):
+ def __init__(self, model, execution_mode, strategy):
self.model = model
self.execution_mode = execution_mode
+ self._strategy = strategy
self._compilation_cache = {}
- self.num_replicas = num_replicas
+ self._cloned_model = None
- def _specialize_model(self, input_specs):
+ # Copy optimizer configuration. This is done prior to `_specialize_model`
+ # as the configuration may require evaluating variables in the CPU session.
+ self._optimizer_config = None
+ if not isinstance(self.model.optimizer, keras_optimizers.TFOptimizer):
+ self._optimizer_config = self.model.optimizer.get_config()
+
+ def _specialize_model(self, input_specs, infeed_manager):
"""Specialize `self.model` (a Keras model) for the given input shapes."""
# Re-create our input and output layers inside our subgraph. They will be
# attached to the true computation when we clone our model in `tpu_fn`.
- K.set_learning_phase(
- self.execution_mode == model_fn_lib.ModeKeys.TRAIN
- )
+ K.set_learning_phase(self.execution_mode == model_fn_lib.ModeKeys.TRAIN)
# functools.partial and callable objects are not supported by tpu.rewrite
def _model_fn():
@@ -161,27 +645,38 @@ class TPUFunction(object):
name='infeed-%s' % self.execution_mode)
assert len(infeed_tensors) == len(infeed_layers), (
- 'Infeed inputs did not match model: %s vs %s', (infeed_layers,
- infeed_tensors))
+ 'Infeed inputs did not match model: %s vs %s' % (infeed_layers,
+ infeed_tensors))
tpu_targets = []
- tpu_inputs = []
+ tpu_input_map = {}
# Sort infeed outputs into inputs and labels for calling our Keras model.
for tensor, layer in zip(infeed_tensors, infeed_layers):
if layer in self.model._input_layers:
- tpu_inputs.append(layers.Input(name=layer.name, tensor=tensor))
+ tpu_input_map[layer.name] = tensor
if layer in self.model._output_layers:
tpu_targets.append(tensor)
- # Call our model with our infeed inputs (re-using the weights).
- model_outputs = self.model(tpu_inputs)
- child_model = models.Model(inputs=tpu_inputs, outputs=model_outputs)
+ # Clone our CPU model, running within the TPU device context.
+ with TPURewriteContext(tpu_input_map):
+ # TODO(power): Replicate variables.
+ with ops.device('/device:TPU:0'):
+ self._cloned_model = models.clone_model(self.model)
+
+ # Create a copy of the optimizer for this graph.
+ if isinstance(self.model.optimizer, keras_optimizers.TFOptimizer):
+ cloned_optimizer = keras_optimizers.TFOptimizer(
+ self.model.optimizer.optimizer)
+ else:
+ logging.info('Cloning %s %s', self.model.optimizer.__class__.__name__,
+ self._optimizer_config)
+ cloned_optimizer = self.model.optimizer.__class__.from_config(
+ self._optimizer_config)
if is_training or is_test:
- child_model.compile(
- optimizer=_replicated_optimizer(self.model.optimizer,
- self.num_replicas),
+ self._cloned_model.compile(
+ optimizer=_replicated_optimizer(cloned_optimizer),
loss=self.model.loss,
loss_weights=self.model.loss_weights,
metrics=self.model.metrics,
@@ -191,37 +686,37 @@ class TPUFunction(object):
# Compute our outfeed depending on the execution mode
if is_training:
- child_model._make_train_function()
+ self._cloned_model._make_train_function()
self._outfeed_spec = [
tensor_spec.TensorSpec(tensor.shape, tensor.dtype, tensor.name)
- for tensor in child_model.train_function.outputs
+ for tensor in self._cloned_model.train_function.outputs
]
return [
- child_model.train_function.updates_op,
+ self._cloned_model.train_function.updates_op,
tpu_ops.outfeed_enqueue_tuple(
- child_model.train_function.outputs,
+ self._cloned_model.train_function.outputs,
name='outfeed-enqueue-train')
]
elif is_test:
- child_model._make_test_function()
+ self._cloned_model._make_test_function()
self._outfeed_spec = [
tensor_spec.TensorSpec(tensor.shape, tensor.dtype, tensor.name)
- for tensor in child_model.test_function.outputs
+ for tensor in self._cloned_model.test_function.outputs
]
return [
tpu_ops.outfeed_enqueue_tuple(
- child_model.test_function.outputs,
+ self._cloned_model.test_function.outputs,
name='outfeed-enqueue-test')
]
elif is_predict:
- child_model._make_predict_function()
+ self._cloned_model._make_predict_function()
self._outfeed_spec = [
tensor_spec.TensorSpec(tensor.shape, tensor.dtype, tensor.name)
- for tensor in child_model.predict_function.outputs
+ for tensor in self._cloned_model.predict_function.outputs
]
return [
tpu_ops.outfeed_enqueue_tuple(
- child_model.predict_function.outputs,
+ self._cloned_model.predict_function.outputs,
name='outfeed-enqueue-predict',
)
]
@@ -236,84 +731,56 @@ class TPUFunction(object):
# `execute op` replicates `_model_fn` `num_replicas` times, with each shard
# running on a different logical core.
compile_op, execute_op = tpu.split_compile_and_replicate(
- _model_fn, inputs=[[]] * self.num_replicas)
+ _model_fn, inputs=[[]] * self._strategy.num_towers)
# Generate CPU side operations to enqueue features/labels and dequeue
# outputs from the model call.
- infeed_op = []
+ sized_infeed = infeed_manager.build_infeed_from_input_specs(
+ input_specs, self.execution_mode)
+ # Build output ops.
outfeed_op = []
- shard_infeed_tensors = []
-
- for shard_id in range(self.num_replicas):
+ for shard_id in range(self._strategy.num_towers):
with ops.device('/device:TPU:%d' % shard_id):
- infeed_tensors = []
- for spec in input_specs:
- infeed_tensors.append(
- array_ops.placeholder(
- dtype=spec.dtype,
- shape=spec.shape,
- name='infeed-enqueue-%s-%d' % (spec.name, shard_id)))
- shard_infeed_tensors.append(infeed_tensors)
-
- infeed_op.append(tpu_ops.infeed_enqueue_tuple(
- infeed_tensors, [spec.shape for spec in input_specs],
- name='infeed-enqueue-%s-%d' % (self.execution_mode, shard_id)))
-
- outfeed_op.extend(tpu_ops.outfeed_dequeue_tuple(
- dtypes=[spec.dtype for spec in self._outfeed_spec],
- shapes=[spec.shape for spec in self._outfeed_spec],
- name='outfeed-dequeue-%s-%d' % (self.execution_mode, shard_id)))
+ outfeed_op.extend(
+ tpu_ops.outfeed_dequeue_tuple(
+ dtypes=[spec.dtype for spec in self._outfeed_spec],
+ shapes=[spec.shape for spec in self._outfeed_spec],
+ name='outfeed-dequeue-%s-%d' % (self.execution_mode, shard_id)))
return TPUModelOp(
- compile_op, execute_op, infeed_tensors=shard_infeed_tensors,
- infeed_op=infeed_op, outfeed_op=outfeed_op)
+ compile_op,
+ execute_op,
+ infeed_tensors=sized_infeed.sharded_infeed_tensors,
+ infeed_op=sized_infeed.infeed_ops,
+ outfeed_op=outfeed_op)
def _test_model_compiles(self, tpu_model_ops):
"""Verifies that the given TPUModelOp can be compiled via XLA."""
- session = K.get_session()
-
logging.info('Started compiling')
start_time = time.clock()
- result = session.run(tpu_model_ops.compile_op)
+ result = K.get_session().run(tpu_model_ops.compile_op)
proto = tpu_compilation_result.CompilationResultProto()
proto.ParseFromString(result)
if proto.status_error_message:
- raise RuntimeError(
- 'Compilation failed: {}'.format(proto.status_error_message))
+ raise RuntimeError('Compilation failed: {}'.format(
+ proto.status_error_message))
end_time = time.clock()
logging.info('Finished compiling. Time elapsed: %s secs',
end_time - start_time)
- def _split_tensors(self, inputs):
- """Split input data across shards.
-
- Each input is sliced along the batch axis.
-
- Args:
- inputs: List of Numpy arrays to run on the TPU.
-
- Returns:
- List of lists containing the input to feed to each TPU shard.
- """
- if self.num_replicas == 1:
- return [inputs]
-
- batch_size = inputs[0].shape[0]
- assert batch_size % self.num_replicas == 0, (
- 'batch_size must be divisible by num_replicas')
- shard_size = batch_size // self.num_replicas
- input_list = []
- for index in range(self.num_replicas):
- shard_inputs = [x[index * shard_size:(index + 1) * shard_size]
- for x in inputs]
- input_list.append(shard_inputs)
- return input_list
-
def __call__(self, inputs):
assert isinstance(inputs, list)
+ infeed_manager = None
+ for x, mgr in self.model._numpy_to_infeed_manager_list:
+ if inputs[0] is x:
+ infeed_manager = mgr
+ break
+ if infeed_manager is None:
+ infeed_manager = TPUNumpyInfeedManager(self.model._strategy)
+
# Strip sample weight from inputs
if (self.execution_mode == model_fn_lib.ModeKeys.TRAIN or
self.execution_mode == model_fn_lib.ModeKeys.EVAL):
@@ -322,21 +789,9 @@ class TPUFunction(object):
else:
input_tensors = self.model._feed_inputs
- shard_inputs = self._split_tensors(inputs)
+ infeed_instance = infeed_manager.make_infeed_instance(inputs)
del inputs # To avoid accident usage.
-
- # Compute an input specification (used to generate infeed enqueue and
- # dequeue operations). We use the shape from our input array and the
- # dtype from our model. A user may pass in a float64 for a float32
- # input: for model compatibility we still must generate a float32 infeed.
- input_specs = []
-
- # We use the shape and dtype from the first shard to compute the input
- # metadata (`input_specs`); all replicas have the same type and shape.
- for tensor, ary in zip(input_tensors, shard_inputs[0]):
- input_specs.append(
- tensor_spec.TensorSpec(ary.shape, tensor.dtype,
- _valid_name(tensor.name)))
+ input_specs = infeed_instance.make_input_specs(input_tensors)
# XLA requires every operation in the graph has a fixed shape. To
# handle varying batch sizes we recompile a new sub-graph for each
@@ -344,89 +799,103 @@ class TPUFunction(object):
shape_key = tuple([tuple(spec.shape.as_list()) for spec in input_specs])
if shape_key not in self._compilation_cache:
- logging.info('New input shapes; (re-)compiling: mode=%s, %s',
- self.execution_mode, input_specs)
- new_tpu_model_ops = self._specialize_model(input_specs)
- self._compilation_cache[shape_key] = new_tpu_model_ops
- self._test_model_compiles(new_tpu_model_ops)
-
+ with self.model.tpu_session():
+ logging.info('New input shapes; (re-)compiling: mode=%s, %s',
+ self.execution_mode, input_specs)
+ new_tpu_model_ops = self._specialize_model(input_specs,
+ infeed_manager)
+ self._compilation_cache[shape_key] = new_tpu_model_ops
+ self._test_model_compiles(new_tpu_model_ops)
+
+ # Initialize our TPU weights on the first compile.
+ self.model._initialize_weights(self._cloned_model)
tpu_model_ops = self._compilation_cache[shape_key]
- infeed_dict = {}
- for infeed_tensors, inputs in zip(tpu_model_ops.infeed_tensors,
- shard_inputs):
- for tensor, value in zip(infeed_tensors, inputs):
- infeed_dict[tensor] = value
+ infeed_dict = infeed_instance.make_feed_dict(tpu_model_ops)
- session = K.get_session()
- _, _, outfeed_outputs = session.run([
- tpu_model_ops.infeed_op, tpu_model_ops.execute_op,
- tpu_model_ops.outfeed_op
- ], infeed_dict)
+ with self.model.tpu_session() as session:
+ _, _, outfeed_outputs = session.run([
+ tpu_model_ops.infeed_op, tpu_model_ops.execute_op,
+ tpu_model_ops.outfeed_op
+ ], infeed_dict)
# TODO(xiejw): Decide how to reduce outputs, or just discard all but first.
- return outfeed_outputs[:len(outfeed_outputs) // self.num_replicas]
-
-
-@experimental
-def setup_tpu_session(tpu_name_or_address):
- """Initializes and returns a Keras/TF session connected the TPU `master`.
-
- Args:
- tpu_name_or_address: A string that is either the name of the Cloud TPU,
- the grpc address of the Cloud TPU, or (Googlers only) the BNS name of the
- Cloud TPU. If tpu_name_or_address is None, the TPUClusterResolver will
- examine the environment to determine a potential Cloud TPU to use.
-
- Returns:
- A `tf.Session`.
- """
- cluster_resolver = tpu_cluster_resolver.TPUClusterResolver(
- tpu_name_or_address)
- cluster_spec = cluster_resolver.cluster_spec()
- session = tf_session.Session(
- target=cluster_resolver.master(),
- config=config_pb2.ConfigProto(
- isolate_session_state=True))
- if cluster_spec:
- session.cluster_def.CopyFrom(cluster_spec.as_cluster_def())
- K.set_session(session)
- K.get_session().run(tpu.initialize_system())
- return session
-
-
-@experimental
-def shutdown_tpu_session(session=None):
- """Shutdown the TPU attached to session.
+ if self.execution_mode == model_fn_lib.ModeKeys.PREDICT:
+ outputs = [[]] * len(self._outfeed_spec)
+ outputs_per_replica = len(self._outfeed_spec)
- This should be called to cleanly shut down the TPU system before the client
- exits.
+ for i in range(self._strategy.num_towers):
+ output_group = outfeed_outputs[i * outputs_per_replica:(i + 1) *
+ outputs_per_replica]
+ for j in range(outputs_per_replica):
+ outputs[j].append(output_group[j])
- Args:
- session: Session to shutdown, or None to use the default session.
-
- Returns:
-
- """
- if session is None:
- session = K.get_session()
-
- session.run(tpu.shutdown_system())
+ return [np.concatenate(group) for group in outputs]
+ else:
+ return outfeed_outputs[:len(outfeed_outputs) // self._strategy.num_towers]
class KerasTPUModel(models.Model):
"""TPU compatible Keras model wrapper."""
- def __init__(self, inputs, outputs, name, replicas=1):
+ def __init__(self, cpu_model, tpu_name_or_address, strategy):
super(models.Model, self).__init__( # pylint: disable=bad-super-call
- inputs=inputs,
- outputs=outputs,
- name=name,
+ inputs=cpu_model.inputs,
+ outputs=cpu_model.outputs,
+ name=cpu_model.name,
)
+
+ # Create a mapping from numpy arrays to infeed managers.
+ # Note: uses a list of tuples instead of a map because numpy arrays are
+ # not hashable.
+ self._numpy_to_infeed_manager_list = []
+
self.predict_function = None
self.test_function = None
self.train_function = None
- self.replicas = replicas
+ self._strategy = strategy
+
+ self._tpu_name_or_address = tpu_name_or_address
+ self._cpu_model = cpu_model
+ self._tpu_model = None
+ self._tpu_weights_initialized = False
+ self._graph = ops.Graph()
+
+ self._cluster_resolver = tpu_cluster_resolver.TPUClusterResolver(
+ tpu_name_or_address)
+ master = self._cluster_resolver.master()
+ cluster_spec = self._cluster_resolver.cluster_spec()
+ self._session = tf_session.Session(
+ graph=self._graph,
+ target=master,
+ config=config_pb2.ConfigProto(isolate_session_state=True))
+
+ # TODO(saeta): Confirm the lines below work in ClusterSpec propagation env.
+ if cluster_spec:
+ self._session.cluster_def.CopyFrom(cluster_spec.as_cluster_def())
+
+ with self._graph.as_default():
+ self._session.run(tpu.initialize_system())
+
+ # If the input CPU model has already been compiled, compile our TPU model
+ # immediately.
+ if self._cpu_model.optimizer:
+ self.compile(
+ self._cpu_model.optimizer,
+ self._cpu_model.loss,
+ self._cpu_model.metrics,
+ self._cpu_model.loss_weights,
+ self._cpu_model.sample_weight_mode,
+ self._cpu_model.weighted_metrics,
+ self._cpu_model.target_tensors,
+ )
+
+ def get_config(self):
+ return {
+ 'cpu_model': self._cpu_model,
+ 'tpu_name_or_address': self._tpu_name_or_address,
+ 'strategy': self._strategy,
+ }
def compile(self,
optimizer,
@@ -448,44 +917,183 @@ class KerasTPUModel(models.Model):
sample_weight_mode, weighted_metrics,
target_tensors, **kwargs)
- # Keras optimizers are not compatible with TPU rewrite
- if not isinstance(self.optimizer, keras_optimizers.TFOptimizer):
+ if not self._cpu_model.optimizer:
+ self._cpu_model.compile(optimizer, loss, metrics, loss_weights,
+ sample_weight_mode, weighted_metrics,
+ target_tensors, **kwargs)
+
+ def fit(self,
+ x=None,
+ y=None,
+ batch_size=None,
+ epochs=1,
+ verbose=1,
+ callbacks=None,
+ validation_split=0.,
+ validation_data=None,
+ shuffle=True,
+ class_weight=None,
+ sample_weight=None,
+ initial_epoch=0,
+ steps_per_epoch=None,
+ validation_steps=None,
+ **kwargs):
+ assert not self._numpy_to_infeed_manager_list # Ensure empty.
+
+ infeed_managers = [] # Managers to clean up at the end of the fit call.
+ if isinstance(x, dataset_ops.Dataset):
+ # TODO(b/111413240): Support taking a tf.data.Dataset directly.
raise ValueError(
- 'Optimizer must be a TFOptimizer, got: %s' % self.optimizer)
+ 'Taking a Dataset directly is not yet supported. Please '
+ 'wrap your dataset construction code in a function and '
+ 'pass that to fit instead. For examples, see: '
+ 'https://github.com/tensorflow/tpu/tree/master/models/experimental'
+ '/keras')
+ if callable(x):
+ with self.tpu_session() as sess:
+ dataset = x()
+ if steps_per_epoch is None:
+ raise ValueError('When using tf.data as input to a model, you '
+ 'should specify the steps_per_epoch argument.')
+ if y is not None:
+ raise ValueError('When using tf.data as input to a model, y must be '
+ 'None')
+ infeed_manager = TPUDatasetInfeedManager(dataset, self._strategy, sess)
+ # Use dummy numpy inputs for the rest of Keras' shape checking. We
+ # intercept them when building the model.
+ x = infeed_manager.dummy_x
+ y = infeed_manager.dummy_y
+ infeed_managers.append((x, infeed_manager))
+
+ if isinstance(validation_data, dataset_ops.Dataset):
+ # TODO(b/111413240): Support taking a tf.data.Dataset directly.
+ raise ValueError(
+ 'Taking a Dataset directly is not yet supported. Please '
+ 'wrap your dataset construction code in a function and '
+ 'pass that to fit instead. For examples, see: '
+ 'https://github.com/tensorflow/tpu/tree/master/models/experimental'
+ '/keras')
+ if callable(validation_data):
+ with self.tpu_session() as sess:
+ dataset = validation_data()
+ if validation_steps is None:
+ raise ValueError('When using tf.data as validation for a model, you '
+ 'should specify the validation_steps argument.')
+ infeed_manager = TPUDatasetInfeedManager(dataset, self._strategy, sess)
+ # Use dummy numpy inputs for the rest of Keras' shape checking. We
+ # intercept them when building the model.
+ val_x = infeed_manager.dummy_x
+ val_y = infeed_manager.dummy_y
+ infeed_managers.append((val_x, infeed_manager))
+ validation_data = (val_x, val_y)
+
+ self._numpy_to_infeed_manager_list = infeed_managers
+ try:
+ return super(KerasTPUModel, self).fit(
+ x,
+ y,
+ batch_size,
+ epochs,
+ verbose,
+ callbacks,
+ validation_split,
+ validation_data,
+ shuffle,
+ class_weight,
+ sample_weight,
+ initial_epoch,
+ steps_per_epoch,
+ validation_steps,
+ **kwargs)
+ finally:
+ self._numpy_to_infeed_manager_list = []
def _make_train_function(self):
if not self.train_function:
- self.train_function = TPUFunction(self, model_fn_lib.ModeKeys.TRAIN,
- num_replicas=self.replicas)
+ self.train_function = TPUFunction(
+ self, model_fn_lib.ModeKeys.TRAIN, strategy=self._strategy)
return self.train_function
def _make_test_function(self):
if not self.test_function:
- self.test_function = TPUFunction(self, model_fn_lib.ModeKeys.EVAL)
+ self.test_function = TPUFunction(
+ self, model_fn_lib.ModeKeys.EVAL, strategy=self._strategy)
return self.test_function
def _make_predict_function(self):
if not self.predict_function:
- self.predict_function = TPUFunction(self, model_fn_lib.ModeKeys.PREDICT)
+ self.predict_function = TPUFunction(
+ self, model_fn_lib.ModeKeys.PREDICT, strategy=self._strategy)
return self.predict_function
- def cpu_model(self):
- cpu_model = models.Model(
- inputs=self.inputs,
- outputs=self.outputs,
- name=self.name,
- )
+ def _initialize_weights(self, cloned_model):
+ """Initialize TPU weights.
- if self.optimizer:
- cpu_model.compile(
- optimizer=self.optimizer,
- loss=self.loss,
- metrics=self.metrics,
- loss_weights=self.loss_weights,
- )
+ This is called on the first compile of the TPU model (first call to
+ fit/predict/evaluate).
- return cpu_model
+ Args:
+ cloned_model: `keras.Model`, TPU model to initialize.
+ """
+ if self._tpu_weights_initialized:
+ return
+
+ self._tpu_model = cloned_model
+ self._tpu_weights_initialized = True
+
+ weights = self._cpu_model.get_weights()
+ with self.tpu_session():
+ logging.info('Setting weights on TPU model.')
+ cloned_model.set_weights(weights)
+
+ def sync_to_cpu(self):
+ """Copy weights from the CPU, returning a synchronized CPU model."""
+ if self._tpu_weights_initialized:
+ with self.tpu_session():
+ logging.info('Copying TPU weights to the CPU')
+ tpu_weights = self._tpu_model.get_weights()
+
+ self._cpu_model.set_weights(tpu_weights)
+
+ return self._cpu_model
+
+ def get_weights(self):
+ return self.sync_to_cpu().get_weights()
+
+ def save_weights(self, *args, **kw):
+ return self.sync_to_cpu().save_weights(*args, **kw)
+
+ def save(self, *args, **kw):
+ return self.sync_to_cpu().save(*args, **kw)
+
+ def set_weights(self, weights):
+ # We may not have a TPU model available if we haven't run fit/predict, so
+ # we can't directly set the TPU weights here.
+ # Instead, reset CPU model weights and force TPU re-initialization at the
+ # next call.
+ self._cpu_model.set_weights(weights)
+ self._tpu_weights_initialized = False
+
+ @contextlib.contextmanager
+ def tpu_session(self):
+ """Yields a TPU session and sets it as the default Keras session."""
+ with self._graph.as_default():
+ default_session = K.get_session()
+ # N.B. We have to call `K.set_session()` AND set our session as the
+ # TF default. `K.get_session()` surprisingly does not return the value
+ # supplied by K.set_session otherwise.
+ K.set_session(self._session)
+ with self._session.as_default():
+ yield self._session
+ K.set_session(default_session)
+
+ def shutdown(self):
+ # TODO(b/111364423): Actually shut down the system.
+ logging.info('Skipping shutting down TPU system.')
+ # with self.tpu_session() as session:
+ # session.run(tpu.shutdown_system())
+ self._session.close()
def _validate_shapes(model):
@@ -522,8 +1130,8 @@ Output shape: %(output_shape)s
@experimental
-def tpu_model(model, replicas=None):
- """Runs a model on TPU(s).
+def tpu_model(model, tpu_name_or_address=None, strategy=None):
+ """Copy `model` along with weights to the TPU. Returns a TPU model.
Usage:
```
@@ -531,44 +1139,39 @@ def tpu_model(model, replicas=None):
b = Dense(32)(a)
model = Model(inputs=a, outputs=b)
- model = keras_support.tpu_model(model)
- model.compile(
- optimizer=tf.train.GradientDescentOptimizer(learning_rate=1.0),
- ...)
- ```
-
- If `replicas` is set, replicates the model computation on all TPU cores. The
- model computation is replicated `num_replicas` times; each shard will run on a
- different TPU core.
-
- Limitation: Currently, replication is only supported for training.
-
- Usage:
- ```
- a = Input(shape=(32,))
- b = Dense(32)(a)
- model = Model(inputs=a, outputs=b)
-
- model = keras_support.tpu_model(model, replicas=2)
+ # If `num_cores_per_host` is greater than one, batch parallelism will be used
+ # to run on multiple TPU cores.
+ strategy = keras_support.TPUDistributionStrategy(num_cores_per_host=8)
+ model = keras_support.tpu_model(model, strategy)
model.compile(
optimizer=tf.train.GradientDescentOptimizer(learning_rate=1.0),
...)
+ model.shutdown()
```
Args:
model: A `KerasTPUModel`.
- replicas: (Optional) Int, number of TPU cores which to create model
- replicas. If `None`, the model runs on single core only, i.e., no
- replication.
+ tpu_name_or_address: A string that is either the name of the Cloud TPU,
+ the grpc address of the Cloud TPU, or (Googlers only) the BNS name of the
+ Cloud TPU. If tpu_name_or_address is None, the TPUClusterResolver will
+ examine the environment to determine a potential Cloud TPU to use.
+ strategy: `TPUDistributionStrategy`. The strategy to use for replicating
+ model across multiple TPU cores.
Returns:
A new `KerasTPUModel` instance.
"""
+ # Force initialization of the CPU model.
+ model.get_weights()
+ model.reset_states()
+
_validate_shapes(model)
# TODO(xiejw): Validate TPU model. TPUModel only?
# TODO(xiejw): Validate replicas. Full or 1. Shall we allow subset?
# TODO(xiejw): Adds reduction option.
- replicas = 1 if replicas is None else replicas
+ if strategy is None:
+ strategy = TPUDistributionStrategy(num_cores_per_host=1)
return KerasTPUModel(
- inputs=model.inputs, outputs=model.outputs, name=model.name,
- replicas=replicas)
+ cpu_model=model,
+ tpu_name_or_address=tpu_name_or_address,
+ strategy=strategy)
diff --git a/tensorflow/contrib/tpu/python/tpu/topology.py b/tensorflow/contrib/tpu/python/tpu/topology.py
index cda9a63f20..1fb26e701a 100644
--- a/tensorflow/contrib/tpu/python/tpu/topology.py
+++ b/tensorflow/contrib/tpu/python/tpu/topology.py
@@ -55,8 +55,9 @@ class Topology(object):
rank 3 numpy int32 array that describes a valid coordinate mapping.
"""
+ self._serialized = serialized
+
if serialized:
- self._serialized = serialized
self._parse_topology(serialized)
else:
self._mesh_shape = np.asarray(mesh_shape, dtype=np.int32)
@@ -131,7 +132,7 @@ class Topology(object):
proto.mesh_shape[:] = list(self._mesh_shape)
proto.num_tasks = self._device_coordinates.shape[0]
proto.num_tpu_devices_per_task = self._device_coordinates.shape[1]
- proto.device_coordinates = list(self._device_coordinates.flatten())
+ proto.device_coordinates.extend(list(self._device_coordinates.flatten()))
self._serialized = proto.SerializeToString()
return self._serialized
diff --git a/tensorflow/contrib/tpu/python/tpu/topology_test.py b/tensorflow/contrib/tpu/python/tpu/topology_test.py
new file mode 100644
index 0000000000..e67fdb263a
--- /dev/null
+++ b/tensorflow/contrib/tpu/python/tpu/topology_test.py
@@ -0,0 +1,46 @@
+# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# =============================================================================
+
+"""Tests for topology.py."""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+from tensorflow.contrib.tpu.python.tpu import topology
+
+from tensorflow.python.platform import test
+
+
+class TopologyTest(test.TestCase):
+
+ def testSerialization(self):
+ """Test if the class is able to generate serialzied string."""
+ original_topology = topology.Topology(
+ mesh_shape=[1, 1, 2],
+ device_coordinates=[[[0, 0, 0], [0, 0, 1]]],
+ )
+ serialized_str = original_topology.serialized()
+ new_topology = topology.Topology(serialized=serialized_str)
+
+ # Make sure the topology recovered from serialized str is same as the
+ # original topology.
+ self.assertAllEqual(
+ original_topology.mesh_shape, new_topology.mesh_shape)
+ self.assertAllEqual(
+ original_topology.device_coordinates, new_topology.device_coordinates)
+
+if __name__ == "__main__":
+ test.main()
diff --git a/tensorflow/contrib/tpu/python/tpu/tpu.py b/tensorflow/contrib/tpu/python/tpu/tpu.py
index dc473c5846..7216626a58 100644
--- a/tensorflow/contrib/tpu/python/tpu/tpu.py
+++ b/tensorflow/contrib/tpu/python/tpu/tpu.py
@@ -151,6 +151,41 @@ class TPUReplicateContext(control_flow_ops.XLAControlFlowContext):
self._name = name
self._unsupported_ops = []
self._pivot = pivot
+ self._replicated_vars = {}
+
+ def get_replicated_var_handle(self, var):
+ """Returns a variable handle for replicated TPU variable 'var'.
+
+ This is an method used by an experimental replicated variable
+ implementation and is not intended as a public API.
+
+ Args:
+ var: The replicated TPU variable.
+
+ Returns:
+ The handle of the TPU replicated input node.
+ """
+ handle = self._replicated_vars.get(var)
+ if handle is not None:
+ return handle
+
+ # Builds a TPUReplicatedInput node for the variable, if one does not already
+ # exist. The TPUReplicatedInput node must belong to the enclosing
+ # control-flow scope of the TPUReplicateContext.
+ # TODO(phawkins): consider changing the contract of the TPU encapsulation
+ # so the TPUReplicatedInput nodes go inside the TPUReplicateContext scope
+ # instead.
+
+ # pylint: disable=protected-access
+ graph = ops.get_default_graph()
+ saved_context = graph._get_control_flow_context()
+ graph._set_control_flow_context(self.outer_context)
+ handle = tpu_ops.tpu_replicated_input(
+ [v.handle for v in var._vars], name=var.name + "/handle")
+ graph._set_control_flow_context(saved_context)
+ # pylint: enable=protected-access
+ self._replicated_vars[var] = handle
+ return handle
def report_unsupported_operations(self):
if self._unsupported_ops:
@@ -227,7 +262,7 @@ class TPUReplicateContext(control_flow_ops.XLAControlFlowContext):
class FakeOp(object):
"""A helper class to determine the current device.
- Supports only the device set/get methods needed to run the
+ Supports only the type and device set/get methods needed to run the
graph's _apply_device_function method.
"""
@@ -235,11 +270,18 @@ class TPUReplicateContext(control_flow_ops.XLAControlFlowContext):
self._device = ""
@property
+ def type(self):
+ return "FakeOp"
+
+ @property
def device(self):
return self._device
def _set_device(self, device):
- self._device = device.to_string()
+ if isinstance(device, pydev.DeviceSpec):
+ self._device = device.to_string()
+ else:
+ self._device = device
if self._outside_compilation_cluster:
raise NotImplementedError("Cannot nest outside_compilation clusters")
diff --git a/tensorflow/contrib/tpu/python/tpu/tpu_config.py b/tensorflow/contrib/tpu/python/tpu/tpu_config.py
index 6d7331e3c7..9e010922dc 100644
--- a/tensorflow/contrib/tpu/python/tpu/tpu_config.py
+++ b/tensorflow/contrib/tpu/python/tpu/tpu_config.py
@@ -23,8 +23,6 @@ import collections
import json
import os
-import numpy as np
-
from tensorflow.contrib.tpu.python.tpu import util as util_lib
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.estimator import run_config as run_config_lib
@@ -43,6 +41,7 @@ class InputPipelineConfig(object):
PER_SHARD_V1 = 1
PER_HOST_V1 = 2
PER_HOST_V2 = 3
+ BROADCAST = 4
# TODO(b/72511246) Provide a simplified api to configure model parallelism.
@@ -50,7 +49,7 @@ class TPUConfig(
collections.namedtuple('TPUConfig', [
'iterations_per_loop',
'num_shards',
- 'computation_shape',
+ 'num_cores_per_replica',
'per_host_input_for_training',
'tpu_job_name',
'initial_infeed_sleep_secs',
@@ -67,22 +66,22 @@ class TPUConfig(
case, this number equals the total number of TPU cores. For
model-parallelism, the total number of TPU cores equals
product(computation_shape) * num_shards.
- computation_shape: Defaults to `None`, which disables model parallelism. A
- list of size 3 which describes the shape of a model replica's block of
- cores. This is required by model-parallelism which enables partitioning
- the model to multiple cores. For example, [2, 2, 1] means the model is
- partitioned across 4 cores which span two cores in both x and y
- coordinates. Please refer to @{tf.contrib.tpu.Topology} for the
- geometry of a TPU mesh.
+ num_cores_per_replica: Defaults to `None`, which disables model parallelism.
+ An integer which describes the number of TPU cores per model replica. This
+ is required by model-parallelism which enables partitioning
+ the model to multiple cores. Currently num_cores_per_replica must be
+ 1, 2, 4, or 8.
per_host_input_for_training: If `True`, `PER_HOST_V1`, or `PER_HOST_V2`,
- `input_fn` is invoked per-host rather than per-core. With per-host input
- pipeline configuration, `input_fn` is invoked once on each host. With the
- per-core input pipeline configuration, it is invoked once for each core.
+ `input_fn` is invoked once on each host. With the per-core input pipeline
+ configuration, it is invoked once for each core.
With a global batch size `train_batch_size` in `TPUEstimator` constructor,
the batch size for each shard is `train_batch_size` // #hosts in the
`True` or `PER_HOST_V1` mode. In `PER_HOST_V2` mode, it is
- `train_batch_size` // #cores. With the per-core input pipeline
- configuration, the shard batch size is also `train_batch_size` // #cores.
+ `train_batch_size` // #cores. In `BROADCAST` mode, `input_fn` is only
+ invoked once on host 0 and the tensors are broadcasted to all other
+ replicas. The batch size equals to train_batch_size`. With the per-core
+ input pipeline configuration, the shard batch size is also
+ `train_batch_size` // #cores.
Note: per_host_input_for_training==PER_SHARD_V1 only supports mode.TRAIN.
tpu_job_name: The name of the TPU job. Typically, this name is auto-inferred
within TPUEstimator, however when using ClusterSpec propagation in more
@@ -99,7 +98,7 @@ class TPUConfig(
def __new__(cls,
iterations_per_loop=2,
num_shards=None,
- computation_shape=None,
+ num_cores_per_replica=None,
per_host_input_for_training=True,
tpu_job_name=None,
initial_infeed_sleep_secs=None):
@@ -112,19 +111,12 @@ class TPUConfig(
if num_shards is not None:
util_lib.check_positive_integer(num_shards, 'TPUConfig num_shards')
- # Check computation_shape
- if computation_shape is not None and len(computation_shape) != 3:
- raise ValueError(
- 'computation_shape must be a list with length 3 or None; got {}'.
- format(str(computation_shape)))
-
- if computation_shape is not None:
- computation_shape_array = np.asarray(computation_shape, dtype=np.int32)
- # This prevents any computation being replicated across multiple hosts, so
- # that each host feeds the same number of computations.
- if any(computation_shape_array < 1) or any(computation_shape_array > 2):
- raise ValueError('computation_shape elements can only be 1 or 2; got '
- 'computation_shape={}'.format(computation_shape))
+ # Parse computation_shape
+ if num_cores_per_replica is not None:
+ if num_cores_per_replica not in [1, 2, 4, 8]:
+ raise ValueError(
+ 'num_cores_per_replica must be 1, 2, 4, or 8; got {}'.format(
+ str(num_cores_per_replica)))
# per_host_input_for_training may be True, False, or integer in [1..3].
# Map legacy values (True, False) to numeric values.
@@ -144,7 +136,7 @@ class TPUConfig(
cls,
iterations_per_loop=iterations_per_loop,
num_shards=num_shards,
- computation_shape=computation_shape,
+ num_cores_per_replica=num_cores_per_replica,
per_host_input_for_training=per_host_input_for_training,
tpu_job_name=tpu_job_name,
initial_infeed_sleep_secs=initial_infeed_sleep_secs)
@@ -214,6 +206,12 @@ class RunConfig(run_config_lib.RunConfig):
self._session_config.cluster_def.CopyFrom(
self._cluster_spec.as_cluster_def())
+ def _maybe_overwrite_session_config_for_distributed_training(self):
+ # Overrides the parent class session_config overwrite for between-graph. TPU
+ # runs with in-graph, which should not have device filter. Doing nothing
+ # ("pass") basically disables it.
+ pass
+
@property
def evaluation_master(self):
return self._evaluation_master
diff --git a/tensorflow/contrib/tpu/python/tpu/tpu_config_test.py b/tensorflow/contrib/tpu/python/tpu/tpu_config_test.py
index 37ef3dbe1e..2326fe97a8 100644
--- a/tensorflow/contrib/tpu/python/tpu/tpu_config_test.py
+++ b/tensorflow/contrib/tpu/python/tpu/tpu_config_test.py
@@ -21,6 +21,7 @@ from __future__ import print_function
import json
from tensorflow.contrib.tpu.python.tpu import tpu_config as tpu_config_lib
+from tensorflow.core.protobuf import config_pb2
from tensorflow.python.estimator import run_config as run_config_lib
from tensorflow.python.platform import test
@@ -33,6 +34,46 @@ def _set_tf_config_env_variable(tf_config):
class TPURunConfigTest(test.TestCase):
+ def test_no_session_config_set_in_local_case(self):
+ run_config = tpu_config_lib.RunConfig()
+ self.assertIsNone(run_config.session_config)
+
+ def test_no_session_config_overwrite_in_local_case(self):
+ session_config = config_pb2.ConfigProto(allow_soft_placement=True)
+ run_config = tpu_config_lib.RunConfig(session_config=session_config)
+ self.assertEqual(session_config, run_config.session_config)
+
+ def test_no_session_config_set_with_cluster_spec(self):
+ tf_config = {
+ 'cluster': {
+ run_config_lib.TaskType.CHIEF: ['host3:3'],
+ run_config_lib.TaskType.WORKER: ['host3:4']
+ },
+ 'task': {
+ 'type': run_config_lib.TaskType.CHIEF,
+ 'index': 0
+ }
+ }
+ with _set_tf_config_env_variable(tf_config):
+ run_config = tpu_config_lib.RunConfig()
+ self.assertIsNone(run_config.session_config)
+
+ def test_no_session_config_overwrite_with_cluster_spec(self):
+ tf_config = {
+ 'cluster': {
+ run_config_lib.TaskType.CHIEF: ['host3:3'],
+ run_config_lib.TaskType.WORKER: ['host3:4']
+ },
+ 'task': {
+ 'type': run_config_lib.TaskType.CHIEF,
+ 'index': 0
+ }
+ }
+ with _set_tf_config_env_variable(tf_config):
+ session_config = config_pb2.ConfigProto(allow_soft_placement=True)
+ run_config = tpu_config_lib.RunConfig(session_config=session_config)
+ self.assertEqual(session_config, run_config.session_config)
+
def test_fail_with_invalid_num_shards(self):
with self.assertRaisesRegexp(ValueError, 'must be positive'):
tpu_config_lib.RunConfig(
@@ -43,15 +84,11 @@ class TPURunConfigTest(test.TestCase):
tpu_config_lib.RunConfig(
tpu_config=tpu_config_lib.TPUConfig(iterations_per_loop=0))
- def test_fail_with_invalid_computation_shape(self):
- with self.assertRaisesRegexp(ValueError,
- 'computation_shape must be a list with length'
- ' 3 or None'):
- tpu_config_lib.TPUConfig(computation_shape=[2, 1])
-
- with self.assertRaisesRegexp(ValueError,
- 'computation_shape elements can only be'):
- tpu_config_lib.TPUConfig(computation_shape=[1, 3, 1])
+ def test_fail_with_invalid_num_cores_per_replica(self):
+ with self.assertRaisesRegexp(
+ ValueError, 'num_cores_per_replica must be 1, 2, 4, or 8;'
+ ' got 7'):
+ tpu_config_lib.TPUConfig(num_cores_per_replica=7)
class TPURunConfigMasterTest(test.TestCase):
diff --git a/tensorflow/contrib/tpu/python/tpu/tpu_context.py b/tensorflow/contrib/tpu/python/tpu/tpu_context.py
index c4c69902f9..211c59cb90 100644
--- a/tensorflow/contrib/tpu/python/tpu/tpu_context.py
+++ b/tensorflow/contrib/tpu/python/tpu/tpu_context.py
@@ -21,8 +21,6 @@ from __future__ import print_function
from contextlib import contextmanager
import copy
-import numpy as np
-
from tensorflow.contrib.tpu.python.tpu import device_assignment as tpu_device_assignment
from tensorflow.contrib.tpu.python.tpu import tpu_config
from tensorflow.contrib.tpu.python.tpu import tpu_system_metadata as tpu_system_metadata_lib
@@ -33,15 +31,26 @@ from tensorflow.python.platform import tf_logging as logging
_DEFAULT_JOB_NAME = 'tpu_worker'
_DEFAULT_COORDINATOR_JOB_NAME = 'coordinator'
_LOCAL_MASTERS = ('', 'local')
+_NUM_CORES_TO_COMPUTATION_SHAPE = {
+ 1: [1, 1, 1],
+ 2: [1, 1, 2],
+ 4: [1, 2, 2],
+ 8: [2, 2, 2]
+}
class TPUContext(object):
"""The context of current input_fn invocation."""
- def __init__(self, internal_ctx, input_device=None, invocation_index=None):
+ def __init__(self,
+ internal_ctx,
+ input_device=None,
+ invocation_index=None,
+ call_from_input_fn=True):
self._internal_ctx = internal_ctx
self._input_device = input_device
self._invocation_index = invocation_index
+ self._call_from_input_fn = call_from_input_fn
def current_input_fn_deployment(self):
"""The configuration of the current input_fn invocation.
@@ -69,11 +78,21 @@ class TPUContext(object):
total invocation count is equal to the number of hosts in the system
and num replicas consumed by current invocation is equal to number of
cores per host.
+
+ Raises:
+ RuntimeError: If this method must not be called from input_fn.
"""
+ if not self._call_from_input_fn:
+ raise RuntimeError('This TPUContext instance must not be called from'
+ ' model_fn.')
+
if self._internal_ctx.is_input_sharded_per_core():
total_invocation_count = (self._internal_ctx.num_hosts
* self._internal_ctx.num_of_replicas_per_host)
replicas_consumed = 1
+ elif self._internal_ctx.is_input_broadcast_with_iterators():
+ total_invocation_count = 1
+ replicas_consumed = self._internal_ctx.num_replicas
else:
total_invocation_count = self._internal_ctx.num_hosts
replicas_consumed = self._internal_ctx.num_of_replicas_per_host
@@ -92,6 +111,27 @@ class TPUContext(object):
"""
return self._internal_ctx.num_replicas
+ @property
+ def num_hosts(self):
+ """The number of hosts for the TPU system."""
+ return self._internal_ctx.num_hosts
+
+ @property
+ def num_of_replicas_per_host(self):
+ """The number of replicas for each host."""
+ if self._internal_ctx.model_parallelism_enabled:
+ raise ValueError(
+ 'num_of_replicas_per_host is not supported for model_parallelism')
+ return self._internal_ctx.num_of_replicas_per_host
+
+ @property
+ def device_assignment(self):
+ """Returns device_assignment object."""
+ if self._call_from_input_fn:
+ raise RuntimeError('This TPUContext instance must not be called from'
+ ' input_fn.')
+ return self._internal_ctx.device_assignment
+
def device_for_replica(self, replica_id):
"""Returns the tuple of (CPU device and device ordinal) for replica.
@@ -108,8 +148,8 @@ class TPUContext(object):
# as far as model is replicated to all cores in the system.
# If the precise replica_id to device mapping is required, please
- # set the computation_shape as [1,1,1] in TPUConfig to enable
- # the model parallelism.
+ # set the num_cores_per_replica to 1 in TPUConfig to enable the
+ # model parallelism.
if self._internal_ctx.model_parallelism_enabled:
return RuntimeError(
'device_for_replica is not yet implemented for model parallelism. '
@@ -162,9 +202,14 @@ class _InternalTPUContext(object):
self._eval_on_tpu = eval_on_tpu
self._model_parallelism_enabled = (
- use_tpu and config.tpu_config.computation_shape)
+ use_tpu and config.tpu_config.num_cores_per_replica)
self._mode = None
-
+ num_cores_per_replica = config.tpu_config.num_cores_per_replica
+ if num_cores_per_replica:
+ self._computation_shape = _NUM_CORES_TO_COMPUTATION_SHAPE[
+ num_cores_per_replica]
+ else:
+ self._computation_shape = None
self._lazy_tpu_system_metadata_dict = {} # key by master address
self._lazy_device_assignment_dict = {} # key by master address
self._lazy_validation_dict = {} # key by ModeKeys
@@ -225,11 +270,12 @@ class _InternalTPUContext(object):
device_assignment = tpu_device_assignment.device_assignment(
tpu_system_metadata.topology,
- computation_shape=self._config.tpu_config.computation_shape,
+ computation_shape=self._computation_shape,
num_replicas=self.num_replicas)
- logging.info('computation_shape: %s',
- str(self._config.tpu_config.computation_shape))
+ logging.info('num_cores_per_replica: %s',
+ str(self._config.tpu_config.num_cores_per_replica))
+ logging.info('computation_shape: %s', str(self._computation_shape))
logging.info('num_replicas: %d', self.num_replicas)
logging.info('device_assignment.topology.device_coordinates: %s',
str(device_assignment.topology.device_coordinates))
@@ -270,23 +316,20 @@ class _InternalTPUContext(object):
num_cores_in_system = self.num_cores
if self.model_parallelism_enabled:
- computation_shape_array = np.asarray(
- self._config.tpu_config.computation_shape, dtype=np.int32)
- num_cores_per_replica = np.prod(computation_shape_array)
+ num_cores_per_replica = self._config.tpu_config.num_cores_per_replica
if num_cores_per_replica > num_cores_in_system:
raise ValueError(
'The num of cores required by the model parallelism, specified by '
- 'TPUConfig.computation_shape, is larger than the total num of '
- 'TPU cores in the system. computation_shape: {}, num cores '
- 'in the system: {}'.format(
- self._config.tpu_config.computation_shape,
- num_cores_in_system))
+ 'TPUConfig.num_cores_per_replica, is larger than the total num of '
+ 'TPU cores in the system. num_cores_per_replica: {}, num cores '
+ 'in the system: {}'.format(num_cores_per_replica,
+ num_cores_in_system))
if num_cores_in_system % num_cores_per_replica != 0:
raise RuntimeError(
'The num of cores in the system ({}) is not divisible by the num '
'of cores ({}) required by the model parallelism, specified by '
- 'TPUConfig.computation_shape. This should never happen!'.format(
+ 'TPUConfig.num_cores_per_replica. This should never happen!'.format(
num_cores_in_system, num_cores_per_replica))
return num_cores_in_system // num_cores_per_replica
@@ -314,6 +357,11 @@ class _InternalTPUContext(object):
return (self._config.tpu_config.per_host_input_for_training is
tpu_config.InputPipelineConfig.PER_HOST_V2)
+ def is_input_broadcast_with_iterators(self):
+ """Return true if input_fn should be run in the full_replicae config."""
+ return (self._config.tpu_config.per_host_input_for_training is
+ tpu_config.InputPipelineConfig.BROADCAST)
+
def is_running_on_cpu(self, is_export_mode=False):
"""Determines whether the input_fn and model_fn should be invoked on CPU.
@@ -378,7 +426,7 @@ class _InternalTPUContext(object):
"""Returns the shard batch size for `input_fn`."""
global_batch_size = self.global_batch_size
- if self.is_running_on_cpu():
+ if (self.is_running_on_cpu() or self.is_input_broadcast_with_iterators()):
return global_batch_size
# On TPU
@@ -393,7 +441,7 @@ class _InternalTPUContext(object):
"""Returns the shard batch size for `model_fn`."""
global_batch_size = self.global_batch_size
- if self.is_running_on_cpu():
+ if (self.is_running_on_cpu() or self.is_input_broadcast_with_iterators()):
return global_batch_size
# On TPU. always sharded per shard.
@@ -450,17 +498,23 @@ class _InternalTPUContext(object):
master = self.master_job
- def _placement_function(_sentinal=None, core_id=None, host_id=None): # pylint: disable=invalid-name
+ def _placement_function(_sentinal=None, replica_id=None, host_id=None): # pylint: disable=invalid-name
+ """Return the host device given replica_id or host_id."""
assert _sentinal is None
- if core_id is not None and host_id is not None:
+ if replica_id is not None and host_id is not None:
raise RuntimeError(
- 'core_id and host_id can have only one non-None value.')
+ 'replica_id and host_id can have only one non-None value.')
if master is None:
return '/replica:0/task:0/device:CPU:0'
else:
- if core_id is not None:
- host_id = core_id / self.num_of_cores_per_host
+ if replica_id is not None:
+ if self.model_parallelism_enabled:
+ return self.device_assignment.host_device(
+ replica=replica_id, job=master)
+ else:
+ host_id = replica_id / self.num_of_cores_per_host
+
return '/job:%s/task:%d/device:CPU:0' % (master, host_id)
return _placement_function
@@ -533,7 +587,7 @@ class _InternalTPUContext(object):
'be ({}), got ({}). For non-model-parallelism, num_replicas should '
'be the total num of TPU cores in the system. For '
'model-parallelism, the total number of TPU cores should be '
- 'product(computation_shape) * num_replicas. Please set it '
+ 'num_cores_per_replica * num_replicas. Please set it '
'accordingly or leave it as `None`'.format(
self._get_master_address(), num_replicas,
user_provided_num_replicas))
@@ -612,7 +666,7 @@ def _get_tpu_context(config, train_batch_size, eval_batch_size,
"""Returns an instance of `_InternalTPUContext`."""
if (config.tpu_config.num_shards == 1 and
- config.tpu_config.computation_shape is None):
+ config.tpu_config.num_cores_per_replica is None):
logging.warning(
'Setting TPUConfig.num_shards==1 is an unsupported behavior. '
'Please fix as soon as possible (leaving num_shards as None.')
diff --git a/tensorflow/contrib/tpu/python/tpu/tpu_estimator.py b/tensorflow/contrib/tpu/python/tpu/tpu_estimator.py
index 2b1cb4245e..718ea630a8 100644
--- a/tensorflow/contrib/tpu/python/tpu/tpu_estimator.py
+++ b/tensorflow/contrib/tpu/python/tpu/tpu_estimator.py
@@ -81,12 +81,17 @@ _TPU_ESTIMATOR = 'tpu_estimator'
_ITERATIONS_PER_LOOP_VAR = 'iterations_per_loop'
_BATCH_SIZE_KEY = 'batch_size'
_CTX_KEY = 'context'
+_USE_TPU_KEY = 'use_tpu'
_CROSS_REPLICA_SUM_OP = 'CrossReplicaSum'
_ONE_GIGABYTE = 1024 * 1024 * 1024
_TPU_ENQUEUE_OPS = '_tpu_enqueue_ops'
_TPU_TRAIN_OP = '_tpu_train_op'
_REWRITE_FOR_INFERENCE_MODE = '_rewrite_for_inference'
+# Ideally _USE_TPU_KEY should be reserved as well. However there are already
+# models that make use of this key, thus it can not be reserved now to prevent
+# breakage. In the long run, we would like to mitigate this by migrating models
+# off of using _USE_TPU_KEY.
_RESERVED_PARAMS_KEYS = [_BATCH_SIZE_KEY, _CTX_KEY]
@@ -211,8 +216,8 @@ class _SIGNAL(object):
class TPUEstimatorSpec(model_fn_lib._TPUEstimatorSpec): # pylint: disable=protected-access
"""Ops and objects returned from a `model_fn` and passed to `TPUEstimator`.
- See `EstimatorSpec` for `mode`, 'predictions, 'loss', 'train_op', and
- 'export_outputs`.
+ See `EstimatorSpec` for `mode`, `predictions`, `loss`, `train_op`, and
+ `export_outputs`.
For evaluation, `eval_metrics `is a tuple of `metric_fn` and `tensors`, where
`metric_fn` runs on CPU to generate metrics and `tensors` represents the
@@ -226,7 +231,7 @@ class TPUEstimatorSpec(model_fn_lib._TPUEstimatorSpec): # pylint: disable=prote
size is the first dimension. Once all tensors are available at CPU host from
all shards, they are concatenated (on CPU) and passed as positional arguments
to the `metric_fn` if `tensors` is list or keyword arguments if `tensors` is
- dict. `metric_fn` takes the `tensors` and returns a dict from metric string
+ a dict. `metric_fn` takes the `tensors` and returns a dict from metric string
name to the result of calling a metric function, namely a `(metric_tensor,
update_op)` tuple. See `TPUEstimator` for MNIST example how to specify the
`eval_metrics`.
@@ -842,6 +847,65 @@ def generate_per_host_v2_enqueue_ops_fn_for_host(
return enqueue_ops_fn, captured_infeed_queue, hooks, is_dataset
+def generate_broadcast_enqueue_ops_fn(ctx, input_fn, inputs_structure_recorder,
+ num_hosts):
+ """Generates infeed enqueue ops for one input_fn on all the hosts."""
+ captured_infeed_queue = _CapturedObject()
+ hooks = []
+ device_0 = ctx.tpu_host_placement_function(host_id=0)
+ with ops.device(device_0):
+ user_context = tpu_context.TPUContext(
+ internal_ctx=ctx, input_device=device_0, invocation_index=0)
+ inputs = _Inputs.from_input_fn(input_fn(user_context))
+
+ is_dataset = inputs.is_dataset
+ if ctx.mode == model_fn_lib.ModeKeys.PREDICT:
+ raise TypeError('Mode PREDICT not yet supported in BROADCAST mode.')
+
+ hooks.append(inputs.dataset_initializer_hook())
+ num_replicas_per_host = ctx.num_of_replicas_per_host
+
+ def tpu_ordinal_function_impl(replica_id):
+ if ctx.device_assignment:
+ return ctx.device_assignment.tpu_ordinal(replica_id=replica_id)
+ else:
+ return replica_id % num_replicas_per_host
+
+ def device_function_impl(replica_id):
+ return ctx.tpu_host_placement_function(replica_id=replica_id)
+
+ def enqueue_ops_fn():
+ """Generates enqueue ops for all the hosts."""
+ broadcasted_inputs = []
+ flattened_inputs = None # Cache result from input_fn.
+ for host_id in xrange(num_hosts):
+ with ops.device(ctx.tpu_host_placement_function(host_id=host_id)):
+ for _ in xrange(ctx.num_of_replicas_per_host):
+ # Note: input_fn is only called once at host 0 for the first replica.
+ # The features and labels returned from that invocation are
+ # broadcasted to other replicas(including the replicas on other
+ # hosts).
+ if flattened_inputs is None:
+ features, labels = inputs.features_and_labels() # Calls get_next()
+ inputs_structure_recorder.validate_and_record_structure(
+ features, labels)
+ flattened_inputs = (
+ inputs_structure_recorder.flatten_features_and_labels(
+ features, labels))
+ broadcasted_inputs.append(flattened_inputs)
+
+ infeed_queue = tpu_feed.InfeedQueue(
+ number_of_tuple_elements=len(broadcasted_inputs[0]))
+ captured_infeed_queue.capture(infeed_queue)
+ enqueue_ops = infeed_queue.generate_enqueue_ops(
+ broadcasted_inputs,
+ tpu_ordinal_function=tpu_ordinal_function_impl,
+ placement_function=device_function_impl)
+ return enqueue_ops
+
+ return enqueue_ops_fn, captured_infeed_queue, hooks, is_dataset
+
+
class _InputPipeline(object):
"""`_InputPipeline` handles invoking `input_fn` and piping to infeed queue.
@@ -1074,6 +1138,22 @@ class _InputPipeline(object):
# Infeed_queue_getter must be called after enqueue_ops_fn is called.
infeed_queues.append(captured_infeed_queue.get())
+ elif self._ctx.is_input_broadcast_with_iterators():
+ # Only calls input_fn in host 0.
+ host_device = tpu_host_placement_fn(host_id=0)
+ enqueue_ops_fn, captured_infeed_queue, hooks, is_dataset = (
+ generate_broadcast_enqueue_ops_fn(self._ctx, self._input_fn,
+ self._inputs_structure_recorder,
+ num_hosts))
+ all_hooks.extend(hooks)
+ if is_dataset:
+ run_infeed_loop_on_coordinator = False
+ enqueue_ops.append(
+ _wrap_computation_in_while_loop(
+ device=host_device, op_fn=enqueue_ops_fn))
+ else:
+ enqueue_ops.append(enqueue_ops_fn())
+ infeed_queues.append(captured_infeed_queue.get())
else:
for host_id in range(num_hosts):
host_device = tpu_host_placement_fn(host_id=host_id)
@@ -1414,8 +1494,16 @@ class _ModelFnWrapper(object):
if batch_size_for_model_fn is not None:
_add_item_to_params(params, _BATCH_SIZE_KEY, batch_size_for_model_fn)
+ running_on_cpu = self._ctx.is_running_on_cpu(is_export_mode)
+ _add_item_to_params(params, _USE_TPU_KEY, not running_on_cpu)
+
+ if not running_on_cpu:
+ user_context = tpu_context.TPUContext(
+ internal_ctx=self._ctx, call_from_input_fn=False)
+ _add_item_to_params(params, _CTX_KEY, user_context)
+
estimator_spec = self._model_fn(features=features, **kwargs)
- if (self._ctx.is_running_on_cpu(is_export_mode) and
+ if (running_on_cpu and
isinstance(estimator_spec, model_fn_lib._TPUEstimatorSpec)): # pylint: disable=protected-access
# The estimator_spec will be passed to `Estimator` directly, which expects
# type `EstimatorSpec`.
@@ -1593,7 +1681,7 @@ class _OutfeedHostCall(object):
# place all ops on tpu host if possible.
#
# TODO(jhseu): Evaluate whether this is right for summaries.
- with ops.device(self._ctx.tpu_host_placement_function(core_id=0)):
+ with ops.device(self._ctx.tpu_host_placement_function(replica_id=0)):
for name in self._names:
dequeue_ops = dequeue_ops_by_name[name]
for i, item in enumerate(dequeue_ops):
@@ -1978,7 +2066,7 @@ class TPUEstimator(estimator_lib.Estimator):
if (config.tpu_config.per_host_input_for_training is
tpu_config.InputPipelineConfig.PER_SHARD_V1 and
- config.tpu_config.computation_shape):
+ config.tpu_config.num_cores_per_replica):
raise ValueError(
'Model parallelism only supports per host input for training. '
'Please adjust TPURunconfig.per_host_input_for_training.')
@@ -2033,24 +2121,29 @@ class TPUEstimator(estimator_lib.Estimator):
strip_default_attrs,
save_variables=True,
mode=model_fn_lib.ModeKeys.PREDICT,
- export_tags=None):
+ export_tags=None,
+ check_variables=True):
if mode != model_fn_lib.ModeKeys.PREDICT:
raise NotImplementedError(
'TPUEstimator only handles mode PREDICT for export_savedmodel(); '
'got {}.'.format(mode))
- super(TPUEstimator, self)._add_meta_graph_for_mode(builder,
- input_receiver_fn_map,
- checkpoint_path,
- strip_default_attrs,
- save_variables,
- mode=mode)
+ (super(TPUEstimator, self).
+ _add_meta_graph_for_mode(builder,
+ input_receiver_fn_map,
+ checkpoint_path,
+ strip_default_attrs,
+ save_variables,
+ mode=mode,
+ export_tags=export_tags,
+ check_variables=check_variables))
if self._export_to_tpu:
input_receiver_fn_map = {_REWRITE_FOR_INFERENCE_MODE:
input_receiver_fn_map[mode]}
export_tags = [tag_constants.SERVING, tag_constants.TPU]
mode = _REWRITE_FOR_INFERENCE_MODE
+ # See b/110052256 for why `check_variables` is `False`.
(super(TPUEstimator, self).
_add_meta_graph_for_mode(builder,
input_receiver_fn_map,
@@ -2058,7 +2151,8 @@ class TPUEstimator(estimator_lib.Estimator):
strip_default_attrs,
save_variables=False,
mode=mode,
- export_tags=export_tags))
+ export_tags=export_tags,
+ check_variables=False))
def _call_model_fn(self, features, labels, mode, config):
if mode == _REWRITE_FOR_INFERENCE_MODE:
@@ -2284,10 +2378,20 @@ class TPUEstimator(estimator_lib.Estimator):
# Clear the bit.
self._is_input_fn_invoked = None
+ # examples_hook is added to training_hooks for both CPU and TPU
+ # execution.
+ examples_hook = ExamplesPerSecondHook(
+ ctx.global_batch_size,
+ output_dir=self.model_dir,
+ every_n_steps=self._log_every_n_steps)
+
if ctx.is_running_on_cpu(is_export_mode=is_export_mode):
logging.info('Running %s on CPU', mode)
- return model_fn_wrapper.call_without_tpu(
+ estimator_spec = model_fn_wrapper.call_without_tpu(
features, labels, is_export_mode=is_export_mode)
+ estimator_spec = estimator_spec._replace(
+ training_hooks=estimator_spec.training_hooks + (examples_hook,))
+ return estimator_spec
assert labels is None, '`labels` passed to `model_fn` must be `None`.'
# TPUEstimator._call_input_fn passes `input_fn` as features to here.
@@ -2355,10 +2459,6 @@ class TPUEstimator(estimator_lib.Estimator):
},
every_n_iter=logging_hook_frequency)
])
- examples_hook = ExamplesPerSecondHook(
- ctx.global_batch_size,
- output_dir=self.model_dir,
- every_n_steps=self._log_every_n_steps)
examples_hook._set_steps_per_run( # pylint: disable=protected-access
self._config.tpu_config.iterations_per_loop)
hooks.append(examples_hook)
@@ -3143,9 +3243,53 @@ def _add_item_to_params(params, key, value):
if isinstance(params, hparam.HParams):
# For HParams, we need to use special API.
if key in params:
- params.key = value
+ params.set_hparam(key, value)
else:
params.add_hparam(key, value)
else:
# Now params is Python dict.
params[key] = value
+
+
+def export_estimator_savedmodel(estimator,
+ export_dir_base,
+ serving_input_receiver_fn,
+ assets_extra=None,
+ as_text=False,
+ checkpoint_path=None,
+ strip_default_attrs=False):
+ """Export `Estimator` trained model for TPU inference.
+
+ Args:
+ estimator: `Estimator` with which model has been trained.
+ export_dir_base: A string containing a directory in which to create
+ timestamped subdirectories containing exported SavedModels.
+ serving_input_receiver_fn: A function that takes no argument and
+ returns a `ServingInputReceiver` or `TensorServingInputReceiver`.
+ assets_extra: A dict specifying how to populate the assets.extra directory
+ within the exported SavedModel, or `None` if no extra assets are needed.
+ as_text: whether to write the SavedModel proto in text format.
+ checkpoint_path: The checkpoint path to export. If `None` (the default),
+ the most recent checkpoint found within the model directory is chosen.
+ strip_default_attrs: Boolean. If `True`, default-valued attributes will be
+ removed from the NodeDefs.
+
+ Returns:
+ The string path to the exported directory.
+ """
+ # `TPUEstimator` requires `tpu_config.RunConfig`, so we cannot use
+ # `estimator.config`.
+ config = tpu_config.RunConfig(model_dir=estimator.model_dir)
+ est = TPUEstimator(
+ estimator._model_fn, # pylint: disable=protected-access
+ config=config,
+ params=estimator.params,
+ use_tpu=True,
+ train_batch_size=2048, # Does not matter.
+ eval_batch_size=2048, # Does not matter.
+ )
+ return est.export_savedmodel(export_dir_base, serving_input_receiver_fn,
+ assets_extra,
+ as_text,
+ checkpoint_path,
+ strip_default_attrs)
diff --git a/tensorflow/contrib/tpu/python/tpu/tpu_feed.py b/tensorflow/contrib/tpu/python/tpu/tpu_feed.py
index 604e6600c8..a44b4f4622 100644
--- a/tensorflow/contrib/tpu/python/tpu/tpu_feed.py
+++ b/tensorflow/contrib/tpu/python/tpu/tpu_feed.py
@@ -461,7 +461,10 @@ class InfeedQueue(object):
name=full_name,
device_ordinal=tpu_ordinal)
- def generate_enqueue_ops(self, sharded_inputs, tpu_ordinal_function=None):
+ def generate_enqueue_ops(self,
+ sharded_inputs,
+ tpu_ordinal_function=None,
+ placement_function=None):
"""Generates the host-side Ops to enqueue the shards of a tuple.
sharded_inputs is a list, one for each shard, of lists of
@@ -483,6 +486,9 @@ class InfeedQueue(object):
shard index as input and returns the ordinal of the TPU device
the shard's infeed should be placed on. tpu_ordinal_function must be
set if the inputs are placed on CPU devices.
+ placement_function: if not None, a function that takes the shard index as
+ input and returns the host device where the enqueue op should be placed
+ on.
Returns:
A list of host-side Ops, one for each shard, that when executed together
@@ -508,8 +514,12 @@ class InfeedQueue(object):
tpu_ordinal_function = lambda index: -1
name_prefix = "%s/enqueue" % self._name
return [
- self._generate_enqueue_op(shard, name_prefix, index,
- tpu_ordinal=tpu_ordinal_function(index))
+ self._generate_enqueue_op(
+ shard,
+ name_prefix,
+ index,
+ tpu_ordinal=tpu_ordinal_function(index),
+ device=placement_function(index) if placement_function else None)
for (shard, index) in zip(sharded_inputs, xrange(self.number_of_shards))
]
diff --git a/tensorflow/contrib/tpu/python/tpu/tpu_optimizer.py b/tensorflow/contrib/tpu/python/tpu/tpu_optimizer.py
index 15f99d7eeb..53d33f4077 100644
--- a/tensorflow/contrib/tpu/python/tpu/tpu_optimizer.py
+++ b/tensorflow/contrib/tpu/python/tpu/tpu_optimizer.py
@@ -23,6 +23,7 @@ import collections
from tensorflow.contrib.tpu.python.ops import tpu_ops
from tensorflow.contrib.tpu.python.tpu import tpu_function
+from tensorflow.python.framework import ops
from tensorflow.python.ops.losses import losses
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import optimizer
@@ -153,8 +154,9 @@ class CrossShardOptimizer(optimizer.Optimizer):
if grad is None:
summed_grads_and_vars.append((grad, var))
else:
- summed_grads_and_vars.append((tpu_ops.cross_replica_sum(
- grad, self._group_assignment), var))
+ with ops.colocate_with(grad):
+ summed_grads_and_vars.append((tpu_ops.cross_replica_sum(
+ grad, self._group_assignment), var))
return self._opt.apply_gradients(summed_grads_and_vars, global_step, name)
def get_slot(self, *args, **kwargs):
diff --git a/tensorflow/contrib/training/python/training/sgdr_learning_rate_decay.py b/tensorflow/contrib/training/python/training/sgdr_learning_rate_decay.py
new file mode 100644
index 0000000000..ed0f398e30
--- /dev/null
+++ b/tensorflow/contrib/training/python/training/sgdr_learning_rate_decay.py
@@ -0,0 +1,187 @@
+# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+
+"""SGDR learning rate decay function."""
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import math
+
+from tensorflow.python.framework import constant_op
+from tensorflow.python.framework import ops
+from tensorflow.python.ops import math_ops, control_flow_ops
+
+
+def sgdr_decay(learning_rate, global_step, initial_period_steps,
+ t_mul=2.0, m_mul=1.0, name=None):
+ """Implements Stochastic Gradient Descent with Warm Restarts (SGDR).
+
+ As described in "SGDR: Stochastic Gradient Descent
+ with Warm Restarts" by Ilya Loshchilov & Frank Hutter, Proceedings of
+ ICLR'2017, available at https://arxiv.org/pdf/1608.03983.pdf
+
+ The learning rate decreases according to cosine annealing:
+
+ ```python
+ learning_rate * 0.5 * (1 + cos(x_val * pi)) # for x_val defined in [0, 1]
+ ```
+
+ Thus, at the beginning (when the restart index i = 0),
+ the learning rate decreases for `initial_period_steps` steps from the initial
+ learning rate `learning_rate` (when `x_val=0`, we get `cos(0)=1`) to
+ 0 (when `x_val=1`, we get `cos(pi)=-1`).
+
+ The decrease within the i-th period takes `t_i` steps,
+ where `t_0` = `initial_period_steps` is the user-defined number of batch
+ iterations (not epochs as in the paper) to be performed before the first
+ restart is launched.
+
+ Then, we perform the first restart (i=1) by setting the learning rate to
+ `learning_rate*(m_mul^i)`, where `m_mul in [0,1]` (set to 1 by default).
+ The i-th restart runs for `t_i=t_0*(t_mul^i)` steps, i.e., every new
+ restart runs `t_mul` times longer than the previous one.
+
+ Importantly, when one has no access to a validation set, SGDR suggests
+ to report the best expected / recommended solution in the following way:
+ When we are within our initial run (i=0), every new solution represents
+ SGDR's recommended solution. Instead, when i>0, the recommended solution is
+ the one obtained at the end of each restart.
+
+ Note that the minimum learning rate is set to 0 for simplicity,
+ you can adjust the code to deal with any positive minimum learning rate
+ as defined in the paper.
+
+ `initial_period_steps` is the duration of the first period measured in terms
+ of number of minibatch updates. If one wants to use epochs, one should compute
+ the number of updates required for an epoch.
+
+ For example, assume the following parameters and intention:
+ Minibatch size: 100
+ Training dataset size: 10000
+ If the user wants the first decay period to span across 5 epochs, then
+ `initial_period_steps` = 5 * 10000/100 = 500
+
+ Train for 10000 batch iterations with the initial learning rate set to
+ 0.1, then restart to run 2 times longer, i.e, for 20000 batch iterations
+ and with the initial learning rate 0.05, then restart again and again,
+ doubling the runtime of each new period and with two times smaller
+ initial learning rate.
+
+ To accomplish the above, one would write:
+
+ ```python
+ ...
+ global_step = tf.Variable(0, trainable=False)
+ starter_learning_rate = 0.1
+ learning_rate = sgdr_decay(starter_learning_rate, global_step,
+ initial_period_steps=10000, t_mul=2, m_mul=0.5)
+ # Passing global_step to minimize() will increment it at each step.
+ learning_step = (
+ tf.train.GradientDescentOptimizer(learning_rate)
+ .minimize(...my loss..., global_step=global_step)
+ )
+
+ # Step | 0 | 1000 | 5000 | 9000 | 9999 | 10000 | 11000 |
+ # LR | 0.1 | 0.097 | 0.05 | 0.002 | 0.00 | 0.05 | 0.0496 |
+
+ # Step | 20000 | 29000 | 29999 | 30000 |
+ # LR | 0.025 | 0.0003 | 0.00 | 0.025 |
+ ```
+
+ Args:
+ learning_rate: A scalar `float32` or `float64` `Tensor` or a
+ Python number. The initial learning rate.
+ global_step: A scalar `int32` or `int64` `Tensor` or a Python number.
+ Global step to use for the decay computation. Must not be negative.
+ initial_period_steps: Duration of the first period measured as the number
+ of minibatch updates, if one wants to use epochs, one should compute
+ the number of updates required for an epoch.
+ t_mul: A scalar `float32` or `float64` `Tensor` or a Python number.
+ Must be positive.
+ Used to derive the number of iterations in the i-th period:
+ `initial_period_steps * (t_mul^i)`. Defaults to 2.0.
+ m_mul: A scalar `float32` or `float64` `Tensor` or a Python number.
+ Must be positive.
+ Used to derive the initial learning rate of the i-th period:
+ `learning_rate * (m_mul^i)`. Defaults to 1.0
+
+ Returns:
+ A scalar `Tensor` of the same type as `learning_rate`.
+ The learning rate for a provided global_step.
+ Raises:
+ ValueError: if `global_step` is not supplied.
+ """
+
+ if global_step is None:
+ raise ValueError("global_step is required for sgdr_decay.")
+ with ops.name_scope(name, "SGDRDecay",
+ [learning_rate, global_step,
+ initial_period_steps, t_mul, m_mul]) as name:
+ learning_rate = ops.convert_to_tensor(learning_rate,
+ name="initial_learning_rate")
+ dtype = learning_rate.dtype
+ global_step = math_ops.cast(global_step, dtype)
+ t_0 = math_ops.cast(initial_period_steps, dtype)
+ t_mul = math_ops.cast(t_mul, dtype)
+ m_mul = math_ops.cast(m_mul, dtype)
+
+ c_one = math_ops.cast(constant_op.constant(1.0), dtype)
+ c_half = math_ops.cast(constant_op.constant(0.5), dtype)
+ c_pi = math_ops.cast(constant_op.constant(math.pi), dtype)
+
+ # Find normalized value of the current step
+ x_val = math_ops.div(global_step, t_0)
+
+ def compute_step(x_val, geometric=False):
+ if geometric:
+ # Consider geometric series where t_mul != 1
+ # 1 + t_mul + t_mul^2 ... = (1 - t_mul^i_restart) / (1 - t_mul)
+
+ # First find how many restarts were performed for a given x_val
+ # Find maximal integer i_restart value for which this equation holds
+ # x_val >= (1 - t_mul^i_restart) / (1 - t_mul)
+ # x_val * (1 - t_mul) <= (1 - t_mul^i_restart)
+ # t_mul^i_restart <= (1 - x_val * (1 - t_mul))
+
+ # tensorflow allows only log with base e
+ # i_restart <= log(1 - x_val * (1 - t_mul) / log(t_mul)
+ # Find how many restarts were performed
+
+ i_restart = math_ops.floor(
+ math_ops.log(c_one - x_val * (c_one - t_mul)) / math_ops.log(t_mul))
+ # Compute the sum of all restarts before the current one
+ sum_r = (c_one - t_mul ** i_restart) / (c_one - t_mul)
+ # Compute our position within the current restart
+ x_val = (x_val - sum_r) / t_mul ** i_restart
+
+ else:
+ # Find how many restarts were performed
+ i_restart = math_ops.floor(x_val)
+ # Compute our position within the current restart
+ x_val = x_val - i_restart
+ return i_restart, x_val
+
+ i_restart, x_val = control_flow_ops.cond(
+ math_ops.equal(t_mul, c_one),
+ lambda: compute_step(x_val, geometric=False),
+ lambda: compute_step(x_val, geometric=True))
+
+ # If m_mul < 1, then the initial learning rate of every new restart will be
+ # smaller, i.e., by a factor of m_mul ** i_restart at i_restart-th restart
+ m_fac = learning_rate * (m_mul ** i_restart)
+
+ return math_ops.multiply(c_half * m_fac,
+ (math_ops.cos(x_val * c_pi) + c_one), name=name)
diff --git a/tensorflow/contrib/training/python/training/sgdr_learning_rate_decay_test.py b/tensorflow/contrib/training/python/training/sgdr_learning_rate_decay_test.py
new file mode 100644
index 0000000000..4a46e9a49e
--- /dev/null
+++ b/tensorflow/contrib/training/python/training/sgdr_learning_rate_decay_test.py
@@ -0,0 +1,145 @@
+# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+
+"""Functional test for sgdr learning rate decay."""
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import math
+
+from sgdr_learning_rate_decay import sgdr_decay
+from tensorflow.python.platform import googletest
+from tensorflow.python.framework import test_util
+from tensorflow.python.framework import dtypes
+from tensorflow import placeholder
+
+
+class SGDRDecayTest(test_util.TensorFlowTestCase):
+ """Unit tests for SGDR learning rate decay."""
+
+ def get_original_values(self, lr, t_e, mult_factor, iter_per_epoch, epochs):
+ """Get an array with learning rate values from the consecutive steps using
+ the original implementation
+ (https://github.com/loshchil/SGDR/blob/master/SGDR_WRNs.py)."""
+ t0 = math.pi / 2.0
+ tt = 0
+ te_next = t_e
+
+ lr_values = []
+ sh_lr = lr
+ for epoch in range(epochs):
+ for _ in range(iter_per_epoch):
+ # In the original approach training function is executed here
+ lr_values.append(sh_lr)
+ dt = 2.0 * math.pi / float(2.0 * t_e)
+ tt = tt + float(dt) / iter_per_epoch
+ if tt >= math.pi:
+ tt = tt - math.pi
+ cur_t = t0 + tt
+ new_lr = lr * (1.0 + math.sin(cur_t)) / 2.0 # lr_min = 0, lr_max = lr
+ sh_lr = new_lr
+ if (epoch + 1) == te_next: # time to restart
+ sh_lr = lr
+ tt = 0 # by setting to 0 we set lr to lr_max, see above
+ t_e = t_e * mult_factor # change the period of restarts
+ te_next = te_next + t_e # note the next restart's epoch
+
+ return lr_values
+
+ def get_sgdr_values(self, lr, initial_period_steps, t_mul, iters):
+ """Get an array with learning rate values from the consecutive steps
+ using current tensorflow implementation."""
+ with self.test_session():
+ step = placeholder(dtypes.int32)
+
+ decay = sgdr_decay(lr, step, initial_period_steps, t_mul)
+ lr_values = []
+ for i in range(iters):
+ lr_values.append(decay.eval(feed_dict={step: i}))
+
+ return lr_values
+
+ def testCompareToOriginal(self):
+ """Compare values generated by tensorflow implementation to the values
+ generated by the original implementation
+ (https://github.com/loshchil/SGDR/blob/master/SGDR_WRNs.py)."""
+ with self.test_session():
+ lr = 10.0
+ init_steps = 2
+ t_mul = 3
+ iters = 10
+ epochs = 50
+
+ org_lr = self.get_original_values(lr, init_steps, t_mul, iters, epochs)
+ sgdr_lr = self.get_sgdr_values(lr, init_steps*iters, t_mul, iters*epochs)
+
+ for org, sgdr in zip(org_lr, sgdr_lr):
+ self.assertAllClose(org, sgdr)
+
+ def testMDecay(self):
+ """Test m_mul argument. Check values for learning rate at the beginning
+ of the first, second, third and fourth period. """
+ with self.test_session():
+ step = placeholder(dtypes.int32)
+
+ lr = 0.1
+ t_e = 10
+ t_mul = 3
+ m_mul = 0.9
+
+ decay = sgdr_decay(lr, step, t_e, t_mul, m_mul)
+
+ test_step = 0
+ self.assertAllClose(decay.eval(feed_dict={step: test_step}),
+ lr)
+
+ test_step = t_e
+ self.assertAllClose(decay.eval(feed_dict={step: test_step}),
+ lr * m_mul)
+
+ test_step = t_e + t_e*t_mul
+ self.assertAllClose(decay.eval(feed_dict={step: test_step}),
+ lr * m_mul**2)
+
+ test_step = t_e + t_e*t_mul + t_e * (t_mul**2)
+ self.assertAllClose(decay.eval(feed_dict={step: test_step}),
+ lr * (m_mul**3))
+
+ def testCos(self):
+ """Check learning rate values at the beginning, in the middle
+ and at the end of the period."""
+ with self.test_session():
+ step = placeholder(dtypes.int32)
+ lr = 0.2
+ t_e = 1000
+ t_mul = 1
+
+ decay = sgdr_decay(lr, step, t_e, t_mul)
+
+ test_step = 0
+ self.assertAllClose(decay.eval(feed_dict={step: test_step}), lr)
+
+ test_step = t_e//2
+ self.assertAllClose(decay.eval(feed_dict={step: test_step}), lr/2)
+
+ test_step = t_e
+ self.assertAllClose(decay.eval(feed_dict={step: test_step}), lr)
+
+ test_step = t_e*3//2
+ self.assertAllClose(decay.eval(feed_dict={step: test_step}), lr/2)
+
+if __name__ == "__main__":
+ googletest.main()
diff --git a/tensorflow/contrib/verbs/rdma.cc b/tensorflow/contrib/verbs/rdma.cc
index 86350a08e5..f7c979e863 100644
--- a/tensorflow/contrib/verbs/rdma.cc
+++ b/tensorflow/contrib/verbs/rdma.cc
@@ -24,8 +24,8 @@ limitations under the License.
#include "tensorflow/core/common_runtime/dma_helper.h"
#include "tensorflow/core/common_runtime/process_util.h"
#if GOOGLE_CUDA
+#include "tensorflow/core/common_runtime/gpu/gpu_process_state.h"
#include "tensorflow/core/common_runtime/gpu/gpu_util.h"
-#include "tensorflow/core/common_runtime/gpu/process_state.h"
#endif
#include "tensorflow/core/distributed_runtime/rendezvous_mgr_interface.h"
#include "tensorflow/core/distributed_runtime/rpc/grpc_util.h"
@@ -1084,7 +1084,7 @@ void RdmaTensorResponse::RecvHandler(Rendezvous::ParsedKey parsed,
// The tensor must be copied from GPU to CPU, because either:
// 1. The tensor is located on a non GDR compatible GPU.
// 2. The tensor's meta-data has changed.
- Allocator* alloc = ProcessState::singleton()->GetCUDAHostAllocator(0);
+ Allocator* alloc = GPUProcessState::singleton()->GetCUDAHostAllocator(0);
copy = Tensor(alloc, in.dtype(), in.shape());
CountCopies(rm_.name_, (void*)DMAHelper::base(&in),
(void*)DMAHelper::base(&copy), in.TotalBytes(), true);
@@ -1541,7 +1541,7 @@ bool RdmaTensorRequest::AllocateTensors() {
if (mr_ == nullptr) {
// Can't RDMA directly to result. Use a proxy.
proxy_tensor_ =
- new Tensor(ProcessState::singleton()->GetCUDAHostAllocator(0),
+ new Tensor(GPUProcessState::singleton()->GetCUDAHostAllocator(0),
result_tensor_->dtype(), result_tensor_->shape());
rdma_addr_ = DMAHelper::base(proxy_tensor_);
mr_ =
diff --git a/tensorflow/contrib/verbs/rdma_mgr.cc b/tensorflow/contrib/verbs/rdma_mgr.cc
index 369bd986df..9cb3d1fbbf 100644
--- a/tensorflow/contrib/verbs/rdma_mgr.cc
+++ b/tensorflow/contrib/verbs/rdma_mgr.cc
@@ -21,8 +21,9 @@ limitations under the License.
#include "tensorflow/contrib/verbs/grpc_verbs_client.h"
#include "tensorflow/contrib/verbs/verbs_service.pb.h"
#include "tensorflow/core/common_runtime/bfc_allocator.h"
+#include "tensorflow/core/common_runtime/gpu/gpu_process_state.h"
#include "tensorflow/core/common_runtime/gpu/gpu_util.h"
-#include "tensorflow/core/common_runtime/gpu/process_state.h"
+#include "tensorflow/core/common_runtime/process_state.h"
#include "tensorflow/core/distributed_runtime/rpc/grpc_worker_cache.h"
#include "tensorflow/core/distributed_runtime/session_mgr.h"
#include "tensorflow/core/framework/allocator_registry.h"
@@ -282,7 +283,7 @@ void RdmaMgr::InitAllocators() {
Allocator* allocators[] = {
#if GOOGLE_CUDA
- ProcessState::singleton()->GetCUDAHostAllocator(0),
+ GPUProcessState::singleton()->GetCUDAHostAllocator(0),
ProcessState::singleton()->GetCPUAllocator(0),
#endif // GOOGLE_CUDA
cpu_allocator(),
@@ -323,7 +324,8 @@ void RdmaMgr::InitAllocators() {
std::bind(&RdmaMemoryMgr::InsertMemoryRegion,
&RdmaMemoryMgr::Singleton(), _1, _2, std::string(buf));
- ProcessState::singleton()->AddGPUAllocVisitor(bus_id, cuda_alloc_visitor);
+ GPUProcessState::singleton()->AddGPUAllocVisitor(bus_id,
+ cuda_alloc_visitor);
LOG(INFO) << "Instrumenting GPU allocator with bus_id " << bus_id;
}
#endif // GOOGLE_CUDA
diff --git a/tensorflow/core/BUILD b/tensorflow/core/BUILD
index 59e76cb575..514713bb96 100644
--- a/tensorflow/core/BUILD
+++ b/tensorflow/core/BUILD
@@ -150,7 +150,6 @@ load(
"//third_party/mkl:build_defs.bzl",
"if_mkl",
)
-load("@io_bazel_rules_closure//closure:defs.bzl", "closure_proto_library")
exports_files(["ops/ops.pbtxt"])
@@ -234,7 +233,6 @@ tf_proto_library(
srcs = [],
cc_api_version = 2,
default_header = True,
- j2objc_api_version = 1,
java_api_version = 2,
js_api_version = 2,
protodeps = [
@@ -335,6 +333,7 @@ filegroup(
"platform/init_main.h",
"platform/mem.h",
"platform/mutex.h",
+ "platform/numa.h",
"platform/thread_annotations.h",
],
visibility = ["//visibility:private"],
@@ -793,6 +792,7 @@ tf_cuda_library(
"framework/graph_def_util.h",
"framework/graph_to_functiondef.h",
"framework/kernel_def_builder.h",
+ "framework/kernel_def_util.h",
"framework/log_memory.h",
"framework/lookup_interface.h",
"framework/memory_types.h",
@@ -846,6 +846,7 @@ tf_cuda_library(
"util/sparse/sparse_tensor.h",
"util/stat_summarizer.h",
"util/stat_summarizer_options.h",
+ "util/status_util.h",
"util/stream_executor_util.h",
"util/strided_slice_op.h",
"util/tensor_format.h",
@@ -903,6 +904,15 @@ cc_library(
)
cc_library(
+ name = "status_util",
+ hdrs = ["util/status_util.h"],
+ deps = [
+ ":graph",
+ ":lib",
+ ],
+)
+
+cc_library(
name = "reader_base",
srcs = ["framework/reader_base.cc"],
hdrs = ["framework/reader_base.h"],
@@ -1198,6 +1208,7 @@ tf_cuda_library(
hdrs = [
"common_runtime/device.h",
"common_runtime/device_factory.h",
+ "common_runtime/function.h",
"common_runtime/optimization_registry.h",
"common_runtime/shape_refiner.h",
"graph/algorithm.h",
@@ -1252,6 +1263,7 @@ cc_library(
"//tensorflow/core/kernels:fake_quant_ops",
"//tensorflow/core/kernels:function_ops",
"//tensorflow/core/kernels:functional_ops",
+ "//tensorflow/core/kernels:grappler",
"//tensorflow/core/kernels:histogram_op",
"//tensorflow/core/kernels:image",
"//tensorflow/core/kernels:io",
@@ -1941,8 +1953,10 @@ LIB_INTERNAL_PRIVATE_HEADERS = ["framework/resource_handle.h"] + glob(
"**/*test*",
"lib/gif/**/*",
"lib/jpeg/**/*",
+ "lib/png/**/*",
"platform/gif.h",
"platform/jpeg.h",
+ "platform/png.h",
"platform/**/cuda.h",
"platform/**/stream_executor.h",
],
@@ -2037,6 +2051,7 @@ cc_library(
"lib/hash/crc32c_accelerate.cc",
"lib/gif/**/*",
"lib/jpeg/**/*",
+ "lib/png/**/*",
"platform/**/env_time.cc",
"platform/**/cuda_libdevice_path.cc",
"platform/**/device_tracer.cc",
@@ -2133,6 +2148,39 @@ cc_library(
)
cc_library(
+ name = "png_internal",
+ srcs = ["lib/png/png_io.cc"],
+ hdrs = [
+ "lib/bfloat16/bfloat16.h",
+ "lib/core/casts.h",
+ "lib/core/stringpiece.h",
+ "lib/png/png_io.h",
+ "platform/byte_order.h",
+ "platform/cpu_info.h",
+ "platform/default/integral_types.h",
+ "platform/default/logging.h",
+ "platform/logging.h",
+ "platform/macros.h",
+ "platform/platform.h",
+ "platform/png.h",
+ "platform/types.h",
+ ],
+ copts = tf_copts(),
+ linkopts = select({
+ "//tensorflow:freebsd": [],
+ "//tensorflow:windows": [],
+ "//tensorflow:windows_msvc": [],
+ "//conditions:default": ["-ldl"],
+ }),
+ deps = [
+ ":lib",
+ ":lib_internal",
+ "//tensorflow/core/platform/default/build_config:png",
+ "@zlib_archive//:zlib",
+ ],
+)
+
+cc_library(
name = "tflite_portable_logging",
srcs = [],
hdrs = [
@@ -2240,7 +2288,6 @@ tf_proto_library(
srcs = ERROR_CODES_PROTO_SRCS,
cc_api_version = 2,
default_header = True,
- j2objc_api_version = 1,
java_api_version = 2,
js_api_version = 2,
provide_cc_alias = True,
@@ -2262,7 +2309,6 @@ tf_proto_library(
srcs = COMMON_PROTO_SRCS + ADDITIONAL_CORE_PROTO_SRCS,
cc_api_version = 2,
default_header = True,
- j2objc_api_version = 1,
java_api_version = 2,
js_api_version = 2,
protodeps = [
@@ -2419,6 +2465,7 @@ tf_cuda_library(
"framework/resource_handle.cc",
"util/memmapped_file_system.*",
"util/memmapped_file_system_writer.*",
+ "util/stats_calculator.*",
"util/version_info.cc",
],
) + select({
@@ -2445,6 +2492,7 @@ tf_cuda_library(
":protos_all_proto_text",
":error_codes_proto_text",
":protos_all_cc",
+ ":stats_calculator_portable",
":version_lib",
"//tensorflow/core/platform/default/build_config:platformlib",
"//tensorflow/core/kernels:bounds_check",
@@ -2660,6 +2708,8 @@ CORE_CPU_LIB_HEADERS = CORE_CPU_BASE_HDRS + [
"common_runtime/step_stats_collector.h",
"common_runtime/threadpool_device.h",
"common_runtime/visitable_allocator.h",
+ "common_runtime/process_state.h",
+ "common_runtime/pool_allocator.h",
"graph/gradients.h",
"graph/quantize_training.h",
] + if_mkl(["graph/mkl_graph_util.h"])
@@ -2698,7 +2748,9 @@ tf_cuda_library(
"common_runtime/optimization_registry.cc",
"common_runtime/parallel_concat_optimizer.cc",
"common_runtime/placer.cc",
+ "common_runtime/pool_allocator.cc",
"common_runtime/process_function_library_runtime.cc",
+ "common_runtime/process_state.cc",
"common_runtime/process_util.cc",
"common_runtime/renamed_device.cc",
"common_runtime/rendezvous_mgr.cc",
@@ -2885,6 +2937,7 @@ cc_library(
)
GPU_RUNTIME_HEADERS = [
+ "common_runtime/gpu/cuda_host_allocator.h",
"common_runtime/gpu/gpu_bfc_allocator.h",
"common_runtime/gpu/gpu_cudamalloc_allocator.h",
"common_runtime/gpu/gpu_debug_allocator.h",
@@ -2894,10 +2947,9 @@ GPU_RUNTIME_HEADERS = [
"common_runtime/gpu/gpu_id_utils.h",
"common_runtime/gpu/gpu_init.h",
"common_runtime/gpu/gpu_managed_allocator.h",
+ "common_runtime/gpu/gpu_process_state.h",
"common_runtime/gpu/gpu_stream_util.h",
"common_runtime/gpu/gpu_util.h",
- "common_runtime/gpu/pool_allocator.h",
- "common_runtime/gpu/process_state.h",
"common_runtime/gpu_device_context.h",
]
@@ -2910,11 +2962,10 @@ tf_cuda_library(
"common_runtime/gpu/gpu_device.cc",
"common_runtime/gpu/gpu_device_factory.cc",
"common_runtime/gpu/gpu_managed_allocator.cc",
+ "common_runtime/gpu/gpu_process_state.cc",
"common_runtime/gpu/gpu_stream_util.cc",
"common_runtime/gpu/gpu_util.cc",
"common_runtime/gpu/gpu_util_platform_specific.cc",
- "common_runtime/gpu/pool_allocator.cc",
- "common_runtime/gpu/process_state.cc",
],
hdrs = GPU_RUNTIME_HEADERS,
copts = tf_copts(),
@@ -3225,6 +3276,28 @@ tf_cc_test(
)
tf_cc_test(
+ name = "platform_numa_test",
+ size = "small",
+ srcs = ["platform/numa_test.cc"],
+ tags = [
+ # This test will not pass unless it has access to all NUMA nodes
+ # on the executing machine.
+ "manual",
+ "notap",
+ ],
+ deps = [
+ ":framework",
+ ":lib",
+ ":lib_internal",
+ ":lib_test_internal",
+ ":protos_all_cc",
+ ":test",
+ ":test_main",
+ "//third_party/eigen3",
+ ],
+)
+
+tf_cc_test(
name = "platform_setround_test",
size = "small",
srcs = ["platform/setround_test.cc"],
@@ -3377,6 +3450,7 @@ tf_cc_tests(
"framework/graph_def_util_test.cc",
"framework/graph_to_functiondef_test.cc",
"framework/kernel_def_builder_test.cc",
+ "framework/kernel_def_util_test.cc",
"framework/memory_types_test.cc",
"framework/node_def_builder_test.cc",
"framework/node_def_util_test.cc",
@@ -3426,6 +3500,7 @@ tf_cc_tests(
"util/semver_test.cc",
"util/sparse/sparse_tensor_test.cc",
"util/stat_summarizer_test.cc",
+ "util/status_util_test.cc",
"util/tensor_format_test.cc",
"util/tensor_slice_reader_test.cc",
"util/tensor_slice_set_test.cc",
@@ -3450,6 +3525,7 @@ tf_cc_tests(
":ops",
":protos_all_cc",
":protos_test_cc",
+ ":status_util",
":test",
":test_main",
":testlib",
@@ -3585,6 +3661,7 @@ tf_cc_test_mkl(
deps = [
":core",
":core_cpu",
+ ":core_cpu_internal",
":framework",
":framework_internal",
":test",
@@ -3908,13 +3985,13 @@ tf_cc_test(
],
)
-tf_cc_test(
+tf_cuda_cc_test(
name = "common_runtime_direct_session_test",
size = "small",
srcs = ["common_runtime/direct_session_test.cc"],
+ args = [] + if_cuda(["--heap_check=local"]), # The GPU tracer leaks memory
linkstatic = tf_kernel_tests_linkstatic(),
deps = [
- ":core",
":core_cpu",
":core_cpu_internal",
":direct_session_internal",
@@ -3927,6 +4004,7 @@ tf_cc_test(
":test",
":test_main",
":testlib",
+ "//third_party/eigen3",
"//tensorflow/cc:cc_ops",
"//tensorflow/core/kernels:control_flow_ops",
"//tensorflow/core/kernels:cwise_op",
@@ -3940,8 +4018,7 @@ tf_cc_test(
"//tensorflow/core/kernels:queue_ops",
"//tensorflow/core/kernels:session_ops",
"//tensorflow/core/kernels:variable_ops",
- "//third_party/eigen3",
- ],
+ ] + if_cuda([":cuda"]),
)
# This is identical to :common_runtime_direct_session_test with the addition of
diff --git a/tensorflow/core/api_def/api_test.cc b/tensorflow/core/api_def/api_test.cc
index 477a0b670e..ae03a61ae6 100644
--- a/tensorflow/core/api_def/api_test.cc
+++ b/tensorflow/core/api_def/api_test.cc
@@ -149,6 +149,33 @@ void TestAllApiDefAttributeNamesAreValid(
}
}
}
+
+void TestDeprecatedAttributesSetCorrectly(
+ const std::unordered_map<string, ApiDef>& api_defs_map) {
+ for (const auto& name_and_api_def : api_defs_map) {
+ int num_deprecated_endpoints = 0;
+ const auto& api_def = name_and_api_def.second;
+ for (const auto& endpoint : api_def.endpoint()) {
+ if (endpoint.deprecated()) {
+ ++num_deprecated_endpoints;
+ }
+ }
+
+ const auto& name = name_and_api_def.first;
+ ASSERT_TRUE(api_def.deprecation_message().empty() ||
+ num_deprecated_endpoints == 0)
+ << "Endpoints are set to 'deprecated' for deprecated op " << name
+ << ". If an op is deprecated (i.e. deprecation_message is set), "
+ << "all the endpoints are deprecated implicitly and 'deprecated' "
+ << "field should not be set.";
+ if (num_deprecated_endpoints > 0) {
+ ASSERT_NE(num_deprecated_endpoints, api_def.endpoint_size())
+ << "All " << name << " endpoints are deprecated. Please, set "
+ << "deprecation_message in api_def_" << name << ".pbtxt instead. "
+ << "to indicate that the op is deprecated.";
+ }
+ }
+}
} // namespace
class BaseApiTest : public ::testing::Test {
@@ -171,7 +198,7 @@ TEST_F(BaseApiTest, AllOpsAreInApiDef) {
if (excluded_ops->find(op.name()) != excluded_ops->end()) {
continue;
}
- ASSERT_TRUE(api_defs_map_.find(op.name()) != api_defs_map_.end())
+ EXPECT_TRUE(api_defs_map_.find(op.name()) != api_defs_map_.end())
<< op.name() << " op does not have api_def_*.pbtxt file. "
<< "Please add api_def_" << op.name() << ".pbtxt file "
<< "under tensorflow/core/api_def/base_api/ directory.";
@@ -236,6 +263,11 @@ TEST_F(BaseApiTest, AllApiDefAttributeNamesAreValid) {
TestAllApiDefAttributeNamesAreValid(ops_, api_defs_map_);
}
+// Checks that deprecation is set correctly.
+TEST_F(BaseApiTest, DeprecationSetCorrectly) {
+ TestDeprecatedAttributesSetCorrectly(api_defs_map_);
+}
+
class PythonApiTest : public ::testing::Test {
protected:
PythonApiTest() {
@@ -272,4 +304,9 @@ TEST_F(PythonApiTest, AllApiDefAttributeNamesAreValid) {
TestAllApiDefAttributeNamesAreValid(ops_, api_defs_map_);
}
+// Checks that deprecation is set correctly.
+TEST_F(PythonApiTest, DeprecationSetCorrectly) {
+ TestDeprecatedAttributesSetCorrectly(api_defs_map_);
+}
+
} // namespace tensorflow
diff --git a/tensorflow/core/api_def/base_api/api_def_BoostedTreesCenterBias.pbtxt b/tensorflow/core/api_def/base_api/api_def_BoostedTreesCenterBias.pbtxt
new file mode 100644
index 0000000000..b58b974eb4
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_BoostedTreesCenterBias.pbtxt
@@ -0,0 +1,41 @@
+op {
+ graph_op_name: "BoostedTreesCenterBias"
+ visibility: HIDDEN
+ in_arg {
+ name: "tree_ensemble_handle"
+ description: <<END
+Handle to the tree ensemble.
+END
+ }
+ in_arg {
+ name: "mean_gradients"
+ description: <<END
+A tensor with shape=[logits_dimension] with mean of gradients for a first node.
+END
+ }
+ in_arg {
+ name: "mean_hessians"
+ description: <<END
+A tensor with shape=[logits_dimension] mean of hessians for a first node.
+END
+ }
+in_arg {
+ name: "l1"
+ description: <<END
+l1 regularization factor on leaf weights, per instance based.
+END
+ }
+ in_arg {
+ name: "l2"
+ description: <<END
+l2 regularization factor on leaf weights, per instance based.
+END
+ }
+ out_arg {
+ name: "continue_centering"
+ description: <<END
+Bool, whether to continue bias centering.
+END
+ }
+ summary: "Calculates the prior from the training data (the bias) and fills in the first node with the logits' prior. Returns a boolean indicating whether to continue centering."
+} \ No newline at end of file
diff --git a/tensorflow/core/api_def/base_api/api_def_BoostedTreesExampleDebugOutputs.pbtxt b/tensorflow/core/api_def/base_api/api_def_BoostedTreesExampleDebugOutputs.pbtxt
new file mode 100644
index 0000000000..206fa3cc98
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_BoostedTreesExampleDebugOutputs.pbtxt
@@ -0,0 +1,36 @@
+op {
+ graph_op_name: "BoostedTreesExampleDebugOutputs"
+ visibility: HIDDEN
+ in_arg {
+ name: "bucketized_features"
+ description: <<END
+A list of rank 1 Tensors containing bucket id for each
+feature.
+END
+ }
+ out_arg {
+ name: "examples_debug_outputs_serialized"
+ description: <<END
+Output rank 1 Tensor containing a proto serialized as a string for each example.
+END
+ }
+ attr {
+ name: "num_bucketized_features"
+ description: <<END
+Inferred.
+END
+ }
+ attr {
+ name: "logits_dimension"
+ description: <<END
+scalar, dimension of the logits, to be used for constructing the protos in
+examples_debug_outputs_serialized.
+END
+ }
+ summary: "Debugging/model interpretability outputs for each example."
+ description: <<END
+It traverses all the trees and computes debug metrics for individual examples,
+such as getting split feature ids and logits after each split along the decision
+path used to compute directional feature contributions.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_GatherNd.pbtxt b/tensorflow/core/api_def/base_api/api_def_GatherNd.pbtxt
index 6cd76ff340..342a1f6b05 100644
--- a/tensorflow/core/api_def/base_api/api_def_GatherNd.pbtxt
+++ b/tensorflow/core/api_def/base_api/api_def_GatherNd.pbtxt
@@ -25,7 +25,7 @@ END
(K-1)-dimensional tensor of indices into `params`, where each element defines a
slice of `params`:
- output[i_0, ..., i_{K-2}] = params[indices[i0, ..., i_{K-2}]]
+ output[\\(i_0, ..., i_{K-2}\\)] = params[indices[\\(i_0, ..., i_{K-2}\\)]]
Whereas in @{tf.gather} `indices` defines slices into the first
dimension of `params`, in `tf.gather_nd`, `indices` defines slices into the
diff --git a/tensorflow/core/api_def/base_api/api_def_IteratorFromStringHandleV2.pbtxt b/tensorflow/core/api_def/base_api/api_def_IteratorFromStringHandleV2.pbtxt
new file mode 100644
index 0000000000..9d464b2aea
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_IteratorFromStringHandleV2.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "IteratorFromStringHandleV2"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_IteratorV2.pbtxt b/tensorflow/core/api_def/base_api/api_def_IteratorV2.pbtxt
new file mode 100644
index 0000000000..becc729016
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_IteratorV2.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "IteratorV2"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_MatrixExponential.pbtxt b/tensorflow/core/api_def/base_api/api_def_MatrixExponential.pbtxt
index 0d680f6531..d7b56aec87 100644
--- a/tensorflow/core/api_def/base_api/api_def_MatrixExponential.pbtxt
+++ b/tensorflow/core/api_def/base_api/api_def_MatrixExponential.pbtxt
@@ -18,7 +18,7 @@ END
}
summary: "Computes the matrix exponential of one or more square matrices:"
description: <<END
-exp(A) = \sum_{n=0}^\infty A^n/n!
+\\(exp(A) = \sum_{n=0}^\infty A^n/n!\\)
The exponential is computed using a combination of the scaling and squaring
method and the Pade approximation. Details can be founds in:
diff --git a/tensorflow/core/api_def/base_api/api_def_MatrixLogarithm.pbtxt b/tensorflow/core/api_def/base_api/api_def_MatrixLogarithm.pbtxt
index a6c4d0d400..9e80064d15 100644
--- a/tensorflow/core/api_def/base_api/api_def_MatrixLogarithm.pbtxt
+++ b/tensorflow/core/api_def/base_api/api_def_MatrixLogarithm.pbtxt
@@ -20,7 +20,7 @@ END
summary: "Computes the matrix logarithm of one or more square matrices:"
description: <<END
-log(exp(A)) = A
+\\(log(exp(A)) = A\\)
This op is only defined for complex matrices. If A is positive-definite and
real, then casting to a complex matrix, taking the logarithm and casting back
diff --git a/tensorflow/core/api_def/base_api/api_def_NonMaxSuppressionWithOverlaps.pbtxt b/tensorflow/core/api_def/base_api/api_def_NonMaxSuppressionWithOverlaps.pbtxt
new file mode 100644
index 0000000000..180edb15a4
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_NonMaxSuppressionWithOverlaps.pbtxt
@@ -0,0 +1,62 @@
+op {
+ graph_op_name: "NonMaxSuppressionWithOverlaps"
+ in_arg {
+ name: "overlaps"
+ description: <<END
+A 2-D float tensor of shape `[num_boxes, num_boxes]` representing
+the n-by-n box overlap values.
+END
+ }
+ in_arg {
+ name: "scores"
+ description: <<END
+A 1-D float tensor of shape `[num_boxes]` representing a single
+score corresponding to each box (each row of boxes).
+END
+ }
+ in_arg {
+ name: "max_output_size"
+ description: <<END
+A scalar integer tensor representing the maximum number of
+boxes to be selected by non max suppression.
+END
+ }
+ in_arg {
+ name: "overlap_threshold"
+ description: <<END
+A 0-D float tensor representing the threshold for deciding whether
+boxes overlap too.
+END
+ }
+ in_arg {
+ name: "score_threshold"
+ description: <<END
+A 0-D float tensor representing the threshold for deciding when to remove
+boxes based on score.
+END
+ }
+ out_arg {
+ name: "selected_indices"
+ description: <<END
+A 1-D integer tensor of shape `[M]` representing the selected
+indices from the boxes tensor, where `M <= max_output_size`.
+END
+ }
+ summary: "Greedily selects a subset of bounding boxes in descending order of score,"
+ description: <<END
+pruning away boxes that have high overlaps
+with previously selected boxes. Bounding boxes with score less than
+`score_threshold` are removed. N-by-n overlap values are supplied as square matrix,
+which allows for defining a custom overlap criterium (eg. intersection over union,
+intersection over area, etc.).
+
+The output of this operation is a set of integers indexing into the input
+collection of bounding boxes representing the selected boxes. The bounding
+box coordinates corresponding to the selected indices can then be obtained
+using the `tf.gather operation`. For example:
+
+ selected_indices = tf.image.non_max_suppression_with_overlaps(
+ overlaps, scores, max_output_size, overlap_threshold, score_threshold)
+ selected_boxes = tf.gather(boxes, selected_indices)
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_ReduceJoin.pbtxt b/tensorflow/core/api_def/base_api/api_def_ReduceJoin.pbtxt
index d13866ddaa..b447d09377 100644
--- a/tensorflow/core/api_def/base_api/api_def_ReduceJoin.pbtxt
+++ b/tensorflow/core/api_def/base_api/api_def_ReduceJoin.pbtxt
@@ -36,7 +36,7 @@ END
summary: "Joins a string Tensor across the given dimensions."
description: <<END
Computes the string join across dimensions in the given string Tensor of shape
-`[d_0, d_1, ..., d_n-1]`. Returns a new Tensor created by joining the input
+`[\\(d_0, d_1, ..., d_{n-1}\\)]`. Returns a new Tensor created by joining the input
strings with the given separator (default: empty string). Negative indices are
counted backwards from the end, with `-1` being equivalent to `n - 1`. If
indices are not specified, joins across all dimensions beginning from `n - 1`
diff --git a/tensorflow/core/api_def/base_api/api_def_ScatterNdAdd.pbtxt b/tensorflow/core/api_def/base_api/api_def_ScatterNdAdd.pbtxt
index b0665ebf0e..a9a7646314 100644
--- a/tensorflow/core/api_def/base_api/api_def_ScatterNdAdd.pbtxt
+++ b/tensorflow/core/api_def/base_api/api_def_ScatterNdAdd.pbtxt
@@ -42,7 +42,7 @@ within a given variable according to `indices`.
`ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.
`indices` must be integer tensor, containing indices into `ref`.
-It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.
+It must be shape `\\([d_0, ..., d_{Q-2}, K]\\)` where `0 < K <= P`.
The innermost dimension of `indices` (with length `K`) corresponds to
indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th
@@ -50,9 +50,7 @@ dimension of `ref`.
`updates` is `Tensor` of rank `Q-1+P-K` with shape:
-```
-[d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]].
-```
+$$[d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]].$$
For example, say we want to add 4 scattered elements to a rank-1 tensor to 8
elements. In Python, that addition would look like this:
diff --git a/tensorflow/core/api_def/base_api/api_def_ScatterNdNonAliasingAdd.pbtxt b/tensorflow/core/api_def/base_api/api_def_ScatterNdNonAliasingAdd.pbtxt
index e5c64c2b90..35116e5f6a 100644
--- a/tensorflow/core/api_def/base_api/api_def_ScatterNdNonAliasingAdd.pbtxt
+++ b/tensorflow/core/api_def/base_api/api_def_ScatterNdNonAliasingAdd.pbtxt
@@ -37,7 +37,7 @@ respect to both `input` and `updates`.
`input` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.
`indices` must be integer tensor, containing indices into `input`.
-It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.
+It must be shape \\([d_0, ..., d_{Q-2}, K]\\) where `0 < K <= P`.
The innermost dimension of `indices` (with length `K`) corresponds to
indices into elements (if `K = P`) or `(P-K)`-dimensional slices
@@ -45,9 +45,7 @@ indices into elements (if `K = P`) or `(P-K)`-dimensional slices
`updates` is `Tensor` of rank `Q-1+P-K` with shape:
-```
-[d_0, ..., d_{Q-2}, input.shape[K], ..., input.shape[P-1]].
-```
+$$[d_0, ..., d_{Q-2}, input.shape[K], ..., input.shape[P-1]].$$
For example, say we want to add 4 scattered elements to a rank-1 tensor to 8
elements. In Python, that addition would look like this:
diff --git a/tensorflow/core/api_def/base_api/api_def_ScatterNdSub.pbtxt b/tensorflow/core/api_def/base_api/api_def_ScatterNdSub.pbtxt
index 333db017f5..99e5c4908b 100644
--- a/tensorflow/core/api_def/base_api/api_def_ScatterNdSub.pbtxt
+++ b/tensorflow/core/api_def/base_api/api_def_ScatterNdSub.pbtxt
@@ -42,7 +42,7 @@ within a given variable according to `indices`.
`ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.
`indices` must be integer tensor, containing indices into `ref`.
-It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.
+It must be shape \\([d_0, ..., d_{Q-2}, K]\\) where `0 < K <= P`.
The innermost dimension of `indices` (with length `K`) corresponds to
indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th
@@ -50,9 +50,7 @@ dimension of `ref`.
`updates` is `Tensor` of rank `Q-1+P-K` with shape:
-```
-[d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]].
-```
+$$[d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]].$$
For example, say we want to subtract 4 scattered elements from a rank-1 tensor
with 8 elements. In Python, that subtraction would look like this:
diff --git a/tensorflow/core/api_def/base_api/api_def_ScatterNdUpdate.pbtxt b/tensorflow/core/api_def/base_api/api_def_ScatterNdUpdate.pbtxt
index 33d98262d5..cb57c171b9 100644
--- a/tensorflow/core/api_def/base_api/api_def_ScatterNdUpdate.pbtxt
+++ b/tensorflow/core/api_def/base_api/api_def_ScatterNdUpdate.pbtxt
@@ -42,7 +42,7 @@ variable according to `indices`.
`ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.
`indices` must be integer tensor, containing indices into `ref`.
-It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.
+It must be shape \\([d_0, ..., d_{Q-2}, K]\\) where `0 < K <= P`.
The innermost dimension of `indices` (with length `K`) corresponds to
indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th
@@ -50,9 +50,7 @@ dimension of `ref`.
`updates` is `Tensor` of rank `Q-1+P-K` with shape:
-```
-[d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]].
-```
+$$[d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]].$$
For example, say we want to update 4 scattered elements to a rank-1 tensor to
8 elements. In Python, that update would look like this:
diff --git a/tensorflow/core/api_def/base_api/api_def_IdentityDataset.pbtxt b/tensorflow/core/api_def/base_api/api_def_SinkDataset.pbtxt
index ff2854fd2c..b5758ddbfb 100644
--- a/tensorflow/core/api_def/base_api/api_def_IdentityDataset.pbtxt
+++ b/tensorflow/core/api_def/base_api/api_def_SinkDataset.pbtxt
@@ -1,5 +1,5 @@
op {
- graph_op_name: "IdentityDataset"
+ graph_op_name: "SinkDataset"
visibility: HIDDEN
in_arg {
name: "input_dataset"
diff --git a/tensorflow/core/api_def/base_api/api_def_SlideDataset.pbtxt b/tensorflow/core/api_def/base_api/api_def_SlideDataset.pbtxt
index c80ee77f73..ddde3ee5b4 100644
--- a/tensorflow/core/api_def/base_api/api_def_SlideDataset.pbtxt
+++ b/tensorflow/core/api_def/base_api/api_def_SlideDataset.pbtxt
@@ -8,11 +8,18 @@ sliding window.
END
}
in_arg {
- name: "stride"
+ name: "window_shift"
description: <<END
A scalar representing the steps moving the sliding window
forward in one iteration. It must be positive.
END
}
+ in_arg {
+ name: "window_stride"
+ description: <<END
+A scalar representing the stride of the input elements of the sliding window.
+It must be positive.
+END
+ }
summary: "Creates a dataset that passes a sliding window over `input_dataset`."
}
diff --git a/tensorflow/core/api_def/base_api/api_def_Softmax.pbtxt b/tensorflow/core/api_def/base_api/api_def_Softmax.pbtxt
index 43884824c9..b51b468c3d 100644
--- a/tensorflow/core/api_def/base_api/api_def_Softmax.pbtxt
+++ b/tensorflow/core/api_def/base_api/api_def_Softmax.pbtxt
@@ -16,6 +16,6 @@ END
description: <<END
For each batch `i` and class `j` we have
- softmax[i, j] = exp(logits[i, j]) / sum_j(exp(logits[i, j]))
+ $$softmax[i, j] = exp(logits[i, j]) / sum_j(exp(logits[i, j]))$$
END
}
diff --git a/tensorflow/core/api_def/base_api/api_def_SparseApplyAdagrad.pbtxt b/tensorflow/core/api_def/base_api/api_def_SparseApplyAdagrad.pbtxt
index 1698e2def0..06409d8db2 100644
--- a/tensorflow/core/api_def/base_api/api_def_SparseApplyAdagrad.pbtxt
+++ b/tensorflow/core/api_def/base_api/api_def_SparseApplyAdagrad.pbtxt
@@ -47,7 +47,7 @@ END
summary: "Update relevant entries in \'*var\' and \'*accum\' according to the adagrad scheme."
description: <<END
That is for rows we have grad for, we update var and accum as follows:
-accum += grad * grad
-var -= lr * grad * (1 / sqrt(accum))
+$$accum += grad * grad$$
+$$var -= lr * grad * (1 / sqrt(accum))$$
END
}
diff --git a/tensorflow/core/api_def/base_api/api_def_SparseApplyCenteredRMSProp.pbtxt b/tensorflow/core/api_def/base_api/api_def_SparseApplyCenteredRMSProp.pbtxt
index 2c6a36bf45..b3f2d3ea62 100644
--- a/tensorflow/core/api_def/base_api/api_def_SparseApplyCenteredRMSProp.pbtxt
+++ b/tensorflow/core/api_def/base_api/api_def_SparseApplyCenteredRMSProp.pbtxt
@@ -83,8 +83,8 @@ mean_square = decay * mean_square + (1-decay) * gradient ** 2
mean_grad = decay * mean_grad + (1-decay) * gradient
Delta = learning_rate * gradient / sqrt(mean_square + epsilon - mean_grad ** 2)
-ms <- rho * ms_{t-1} + (1-rho) * grad * grad
-mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon)
-var <- var - mom
+$$ms <- rho * ms_{t-1} + (1-rho) * grad * grad$$
+$$mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon)$$
+$$var <- var - mom$$
END
}
diff --git a/tensorflow/core/api_def/base_api/api_def_SparseApplyFtrl.pbtxt b/tensorflow/core/api_def/base_api/api_def_SparseApplyFtrl.pbtxt
index 524b5c5a47..9a6b6bca5f 100644
--- a/tensorflow/core/api_def/base_api/api_def_SparseApplyFtrl.pbtxt
+++ b/tensorflow/core/api_def/base_api/api_def_SparseApplyFtrl.pbtxt
@@ -71,10 +71,10 @@ END
summary: "Update relevant entries in \'*var\' according to the Ftrl-proximal scheme."
description: <<END
That is for rows we have grad for, we update var, accum and linear as follows:
-accum_new = accum + grad * grad
-linear += grad + (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var
-quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2
-var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0
-accum = accum_new
+$$accum_new = accum + grad * grad$$
+$$linear += grad + (accum_{new}^{-lr_{power}} - accum^{-lr_{power}} / lr * var$$
+$$quadratic = 1.0 / (accum_{new}^{lr_{power}} * lr) + 2 * l2$$
+$$var = (sign(linear) * l1 - linear) / quadratic\ if\ |linear| > l1\ else\ 0.0$$
+$$accum = accum_{new}$$
END
}
diff --git a/tensorflow/core/api_def/base_api/api_def_SparseApplyMomentum.pbtxt b/tensorflow/core/api_def/base_api/api_def_SparseApplyMomentum.pbtxt
index 8d9ac9ea3f..17dbb488de 100644
--- a/tensorflow/core/api_def/base_api/api_def_SparseApplyMomentum.pbtxt
+++ b/tensorflow/core/api_def/base_api/api_def_SparseApplyMomentum.pbtxt
@@ -64,7 +64,7 @@ Set use_nesterov = True if you want to use Nesterov momentum.
That is for rows we have grad for, we update var and accum as follows:
-accum = accum * momentum + grad
-var -= lr * accum
+$$accum = accum * momentum + grad$$
+$$var -= lr * accum$$
END
}
diff --git a/tensorflow/core/api_def/base_api/api_def_SparseApplyProximalAdagrad.pbtxt b/tensorflow/core/api_def/base_api/api_def_SparseApplyProximalAdagrad.pbtxt
index 80541b91c7..0b24f2ddd1 100644
--- a/tensorflow/core/api_def/base_api/api_def_SparseApplyProximalAdagrad.pbtxt
+++ b/tensorflow/core/api_def/base_api/api_def_SparseApplyProximalAdagrad.pbtxt
@@ -58,9 +58,9 @@ END
summary: "Sparse update entries in \'*var\' and \'*accum\' according to FOBOS algorithm."
description: <<END
That is for rows we have grad for, we update var and accum as follows:
-accum += grad * grad
-prox_v = var
-prox_v -= lr * grad * (1 / sqrt(accum))
-var = sign(prox_v)/(1+lr*l2) * max{|prox_v|-lr*l1,0}
+$$accum += grad * grad$$
+$$prox_v = var$$
+$$prox_v -= lr * grad * (1 / sqrt(accum))$$
+$$var = sign(prox_v)/(1+lr*l2) * max{|prox_v|-lr*l1,0}$$
END
}
diff --git a/tensorflow/core/api_def/base_api/api_def_SparseApplyProximalGradientDescent.pbtxt b/tensorflow/core/api_def/base_api/api_def_SparseApplyProximalGradientDescent.pbtxt
index 5200e5516d..9dc53860e5 100644
--- a/tensorflow/core/api_def/base_api/api_def_SparseApplyProximalGradientDescent.pbtxt
+++ b/tensorflow/core/api_def/base_api/api_def_SparseApplyProximalGradientDescent.pbtxt
@@ -52,7 +52,7 @@ END
summary: "Sparse update \'*var\' as FOBOS algorithm with fixed learning rate."
description: <<END
That is for rows we have grad for, we update var as follows:
-prox_v = var - alpha * grad
-var = sign(prox_v)/(1+alpha*l2) * max{|prox_v|-alpha*l1,0}
+$$prox_v = var - alpha * grad$$
+$$var = sign(prox_v)/(1+alpha*l2) * max{|prox_v|-alpha*l1,0}$$
END
}
diff --git a/tensorflow/core/api_def/base_api/api_def_SparseApplyRMSProp.pbtxt b/tensorflow/core/api_def/base_api/api_def_SparseApplyRMSProp.pbtxt
index a4dbd608b8..ee9f57fa9d 100644
--- a/tensorflow/core/api_def/base_api/api_def_SparseApplyRMSProp.pbtxt
+++ b/tensorflow/core/api_def/base_api/api_def_SparseApplyRMSProp.pbtxt
@@ -71,8 +71,8 @@ and mom will not update in iterations during which the grad is zero.
mean_square = decay * mean_square + (1-decay) * gradient ** 2
Delta = learning_rate * gradient / sqrt(mean_square + epsilon)
-ms <- rho * ms_{t-1} + (1-rho) * grad * grad
-mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon)
-var <- var - mom
+$$ms <- rho * ms_{t-1} + (1-rho) * grad * grad$$
+$$mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon)$$
+$$var <- var - mom$$
END
}
diff --git a/tensorflow/core/api_def/base_api/api_def_StatefulPartitionedCall.pbtxt b/tensorflow/core/api_def/base_api/api_def_StatefulPartitionedCall.pbtxt
new file mode 100644
index 0000000000..c4cb4e362a
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_StatefulPartitionedCall.pbtxt
@@ -0,0 +1,25 @@
+
+op {
+ graph_op_name: "StatefulPartitionedCall"
+ in_arg {
+ name: "args"
+ description: "A list of input tensors."
+ }
+ out_arg {
+ name: "output"
+ description: "A list of return values."
+ }
+ attr { name: "Tin" description: "A list of input types." }
+ attr { name: "Tout" description: "A list of output types." }
+ attr {
+ name: "f"
+ description: <<END
+ A function that takes 'args', a list of tensors, and returns 'output',
+ another list of tensors. Input and output types are specified by 'Tin'
+ and 'Tout'. The function body of f will be placed and partitioned across
+ devices, setting this op apart from the regular Call op. This op is
+ stateful.
+END
+ }
+ summary: "returns `f(inputs)`, where `f`'s body is placed and partitioned."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_UnsortedSegmentSum.pbtxt b/tensorflow/core/api_def/base_api/api_def_UnsortedSegmentSum.pbtxt
index eb5d0d1247..9aeabd030d 100644
--- a/tensorflow/core/api_def/base_api/api_def_UnsortedSegmentSum.pbtxt
+++ b/tensorflow/core/api_def/base_api/api_def_UnsortedSegmentSum.pbtxt
@@ -20,7 +20,7 @@ Read @{$math_ops#Segmentation$the section on segmentation} for an explanation of
segments.
Computes a tensor such that
-`(output[i] = sum_{j...} data[j...]` where the sum is over tuples `j...` such
+\\(output[i] = sum_{j...} data[j...]\\) where the sum is over tuples `j...` such
that `segment_ids[j...] == i`. Unlike `SegmentSum`, `segment_ids`
need not be sorted and need not cover all values in the full
range of valid values.
diff --git a/tensorflow/core/api_def/base_api/api_def_WindowDataset.pbtxt b/tensorflow/core/api_def/base_api/api_def_WindowDataset.pbtxt
new file mode 100644
index 0000000000..1bc3660479
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_WindowDataset.pbtxt
@@ -0,0 +1,11 @@
+op {
+ visibility: HIDDEN
+ graph_op_name: "WindowDataset"
+ in_arg {
+ name: "window_size"
+ description: <<END
+A scalar representing the number of elements to accumulate in a window.
+END
+ }
+ summary: "A dataset that creates window datasets from the input dataset."
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_Acos.pbtxt b/tensorflow/core/api_def/python_api/api_def_Acos.pbtxt
index ca1ee78526..1fd8baf05f 100644
--- a/tensorflow/core/api_def/python_api/api_def_Acos.pbtxt
+++ b/tensorflow/core/api_def/python_api/api_def_Acos.pbtxt
@@ -5,6 +5,6 @@ op {
}
endpoint {
name: "acos"
- deprecation_message: "tf.acos is deprecated, please use tf.math.acos instead."
+ deprecated: true
}
}
diff --git a/tensorflow/core/api_def/python_api/api_def_Acosh.pbtxt b/tensorflow/core/api_def/python_api/api_def_Acosh.pbtxt
index 7503353e41..f7946652ef 100644
--- a/tensorflow/core/api_def/python_api/api_def_Acosh.pbtxt
+++ b/tensorflow/core/api_def/python_api/api_def_Acosh.pbtxt
@@ -5,6 +5,6 @@ op {
}
endpoint {
name: "acosh"
- deprecation_message: "tf.acosh is deprecated, please use tf.math.acosh instead."
+ deprecated: true
}
}
diff --git a/tensorflow/core/api_def/python_api/api_def_Add.pbtxt b/tensorflow/core/api_def/python_api/api_def_Add.pbtxt
index cc5d68b15d..fb505a91ac 100644
--- a/tensorflow/core/api_def/python_api/api_def_Add.pbtxt
+++ b/tensorflow/core/api_def/python_api/api_def_Add.pbtxt
@@ -5,6 +5,6 @@ op {
}
endpoint {
name: "add"
- deprecation_message: "tf.add is deprecated, please use tf.math.add instead."
+ deprecated: true
}
}
diff --git a/tensorflow/core/api_def/python_api/api_def_AsString.pbtxt b/tensorflow/core/api_def/python_api/api_def_AsString.pbtxt
index 9306eaf373..ea65543a76 100644
--- a/tensorflow/core/api_def/python_api/api_def_AsString.pbtxt
+++ b/tensorflow/core/api_def/python_api/api_def_AsString.pbtxt
@@ -5,6 +5,6 @@ op {
}
endpoint {
name: "as_string"
- deprecation_message: "tf.as_string is deprecated, please use tf.dtypes.as_string instead."
+ deprecated: true
}
}
diff --git a/tensorflow/core/api_def/python_api/api_def_Asin.pbtxt b/tensorflow/core/api_def/python_api/api_def_Asin.pbtxt
index 7622af7b45..eedf4553c6 100644
--- a/tensorflow/core/api_def/python_api/api_def_Asin.pbtxt
+++ b/tensorflow/core/api_def/python_api/api_def_Asin.pbtxt
@@ -5,6 +5,6 @@ op {
}
endpoint {
name: "asin"
- deprecation_message: "tf.asin is deprecated, please use tf.math.asin instead."
+ deprecated: true
}
}
diff --git a/tensorflow/core/api_def/python_api/api_def_Asinh.pbtxt b/tensorflow/core/api_def/python_api/api_def_Asinh.pbtxt
index 395275c21d..10c2fb356e 100644
--- a/tensorflow/core/api_def/python_api/api_def_Asinh.pbtxt
+++ b/tensorflow/core/api_def/python_api/api_def_Asinh.pbtxt
@@ -5,6 +5,6 @@ op {
}
endpoint {
name: "asinh"
- deprecation_message: "tf.asinh is deprecated, please use tf.math.asinh instead."
+ deprecated: true
}
}
diff --git a/tensorflow/core/api_def/python_api/api_def_Atan.pbtxt b/tensorflow/core/api_def/python_api/api_def_Atan.pbtxt
index dfcd632558..03dd5dc848 100644
--- a/tensorflow/core/api_def/python_api/api_def_Atan.pbtxt
+++ b/tensorflow/core/api_def/python_api/api_def_Atan.pbtxt
@@ -5,6 +5,6 @@ op {
}
endpoint {
name: "atan"
- deprecation_message: "tf.atan is deprecated, please use tf.math.atan instead."
+ deprecated: true
}
}
diff --git a/tensorflow/core/api_def/python_api/api_def_Atan2.pbtxt b/tensorflow/core/api_def/python_api/api_def_Atan2.pbtxt
index fba79507aa..85b27bd881 100644
--- a/tensorflow/core/api_def/python_api/api_def_Atan2.pbtxt
+++ b/tensorflow/core/api_def/python_api/api_def_Atan2.pbtxt
@@ -5,6 +5,6 @@ op {
}
endpoint {
name: "atan2"
- deprecation_message: "tf.atan2 is deprecated, please use tf.math.atan2 instead."
+ deprecated: true
}
}
diff --git a/tensorflow/core/api_def/python_api/api_def_Atanh.pbtxt b/tensorflow/core/api_def/python_api/api_def_Atanh.pbtxt
index f7164c33e8..ee7c0600d6 100644
--- a/tensorflow/core/api_def/python_api/api_def_Atanh.pbtxt
+++ b/tensorflow/core/api_def/python_api/api_def_Atanh.pbtxt
@@ -5,6 +5,6 @@ op {
}
endpoint {
name: "atanh"
- deprecation_message: "tf.atanh is deprecated, please use tf.math.atanh instead."
+ deprecated: true
}
}
diff --git a/tensorflow/core/api_def/python_api/api_def_BatchToSpaceND.pbtxt b/tensorflow/core/api_def/python_api/api_def_BatchToSpaceND.pbtxt
index 56e49a2221..9552fc92e3 100644
--- a/tensorflow/core/api_def/python_api/api_def_BatchToSpaceND.pbtxt
+++ b/tensorflow/core/api_def/python_api/api_def_BatchToSpaceND.pbtxt
@@ -5,6 +5,6 @@ op {
}
endpoint {
name: "batch_to_space_nd"
- deprecation_message: "tf.batch_to_space_nd is deprecated, please use tf.manip.batch_to_space_nd instead."
+ deprecated: true
}
}
diff --git a/tensorflow/core/api_def/python_api/api_def_Betainc.pbtxt b/tensorflow/core/api_def/python_api/api_def_Betainc.pbtxt
index 7c37b534c7..7ad7cbcba9 100644
--- a/tensorflow/core/api_def/python_api/api_def_Betainc.pbtxt
+++ b/tensorflow/core/api_def/python_api/api_def_Betainc.pbtxt
@@ -5,6 +5,6 @@ op {
}
endpoint {
name: "betainc"
- deprecation_message: "tf.betainc is deprecated, please use tf.math.betainc instead."
+ deprecated: true
}
}
diff --git a/tensorflow/core/api_def/python_api/api_def_Ceil.pbtxt b/tensorflow/core/api_def/python_api/api_def_Ceil.pbtxt
index 0c72cf2edd..f2265bad56 100644
--- a/tensorflow/core/api_def/python_api/api_def_Ceil.pbtxt
+++ b/tensorflow/core/api_def/python_api/api_def_Ceil.pbtxt
@@ -5,6 +5,6 @@ op {
}
endpoint {
name: "ceil"
- deprecation_message: "tf.ceil is deprecated, please use tf.math.ceil instead."
+ deprecated: true
}
}
diff --git a/tensorflow/core/api_def/python_api/api_def_CheckNumerics.pbtxt b/tensorflow/core/api_def/python_api/api_def_CheckNumerics.pbtxt
index 7ea52d30b6..541b09a591 100644
--- a/tensorflow/core/api_def/python_api/api_def_CheckNumerics.pbtxt
+++ b/tensorflow/core/api_def/python_api/api_def_CheckNumerics.pbtxt
@@ -5,6 +5,6 @@ op {
}
endpoint {
name: "check_numerics"
- deprecation_message: "tf.check_numerics is deprecated, please use tf.debugging.check_numerics instead."
+ deprecated: true
}
}
diff --git a/tensorflow/core/api_def/python_api/api_def_Cholesky.pbtxt b/tensorflow/core/api_def/python_api/api_def_Cholesky.pbtxt
index 568fab4037..942f4e6ed8 100644
--- a/tensorflow/core/api_def/python_api/api_def_Cholesky.pbtxt
+++ b/tensorflow/core/api_def/python_api/api_def_Cholesky.pbtxt
@@ -5,6 +5,6 @@ op {
}
endpoint {
name: "cholesky"
- deprecation_message: "tf.cholesky is deprecated, please use tf.linalg.cholesky instead."
+ deprecated: true
}
}
diff --git a/tensorflow/core/api_def/python_api/api_def_Cos.pbtxt b/tensorflow/core/api_def/python_api/api_def_Cos.pbtxt
index 6550cd2d4e..1af8c0c2c9 100644
--- a/tensorflow/core/api_def/python_api/api_def_Cos.pbtxt
+++ b/tensorflow/core/api_def/python_api/api_def_Cos.pbtxt
@@ -5,6 +5,6 @@ op {
}
endpoint {
name: "cos"
- deprecation_message: "tf.cos is deprecated, please use tf.math.cos instead."
+ deprecated: true
}
}
diff --git a/tensorflow/core/api_def/python_api/api_def_Cosh.pbtxt b/tensorflow/core/api_def/python_api/api_def_Cosh.pbtxt
index ef82a45a80..2de87df40d 100644
--- a/tensorflow/core/api_def/python_api/api_def_Cosh.pbtxt
+++ b/tensorflow/core/api_def/python_api/api_def_Cosh.pbtxt
@@ -5,6 +5,6 @@ op {
}
endpoint {
name: "cosh"
- deprecation_message: "tf.cosh is deprecated, please use tf.math.cosh instead."
+ deprecated: true
}
}
diff --git a/tensorflow/core/api_def/python_api/api_def_Cross.pbtxt b/tensorflow/core/api_def/python_api/api_def_Cross.pbtxt
index 33c1b8c617..e8a871cae6 100644
--- a/tensorflow/core/api_def/python_api/api_def_Cross.pbtxt
+++ b/tensorflow/core/api_def/python_api/api_def_Cross.pbtxt
@@ -5,6 +5,6 @@ op {
}
endpoint {
name: "cross"
- deprecation_message: "tf.cross is deprecated, please use tf.linalg.cross instead."
+ deprecated: true
}
}
diff --git a/tensorflow/core/api_def/python_api/api_def_DecodeBase64.pbtxt b/tensorflow/core/api_def/python_api/api_def_DecodeBase64.pbtxt
index 55c43ceba2..8b96eee631 100644
--- a/tensorflow/core/api_def/python_api/api_def_DecodeBase64.pbtxt
+++ b/tensorflow/core/api_def/python_api/api_def_DecodeBase64.pbtxt
@@ -5,6 +5,6 @@ op {
}
endpoint {
name: "decode_base64"
- deprecation_message: "tf.decode_base64 is deprecated, please use tf.io.decode_base64 instead."
+ deprecated: true
}
}
diff --git a/tensorflow/core/api_def/python_api/api_def_DecodeCompressed.pbtxt b/tensorflow/core/api_def/python_api/api_def_DecodeCompressed.pbtxt
index 5f6be24cc4..829608fc8f 100644
--- a/tensorflow/core/api_def/python_api/api_def_DecodeCompressed.pbtxt
+++ b/tensorflow/core/api_def/python_api/api_def_DecodeCompressed.pbtxt
@@ -5,6 +5,6 @@ op {
}
endpoint {
name: "decode_compressed"
- deprecation_message: "tf.decode_compressed is deprecated, please use tf.io.decode_compressed instead."
+ deprecated: true
}
}
diff --git a/tensorflow/core/api_def/python_api/api_def_DecodeJSONExample.pbtxt b/tensorflow/core/api_def/python_api/api_def_DecodeJSONExample.pbtxt
index 3759047f57..9f28bc5f59 100644
--- a/tensorflow/core/api_def/python_api/api_def_DecodeJSONExample.pbtxt
+++ b/tensorflow/core/api_def/python_api/api_def_DecodeJSONExample.pbtxt
@@ -5,6 +5,6 @@ op {
}
endpoint {
name: "decode_json_example"
- deprecation_message: "tf.decode_json_example is deprecated, please use tf.io.decode_json_example instead."
+ deprecated: true
}
}
diff --git a/tensorflow/core/api_def/python_api/api_def_DecodeRaw.pbtxt b/tensorflow/core/api_def/python_api/api_def_DecodeRaw.pbtxt
index a83f702dca..0010a59ca4 100644
--- a/tensorflow/core/api_def/python_api/api_def_DecodeRaw.pbtxt
+++ b/tensorflow/core/api_def/python_api/api_def_DecodeRaw.pbtxt
@@ -5,6 +5,6 @@ op {
}
endpoint {
name: "decode_raw"
- deprecation_message: "tf.decode_raw is deprecated, please use tf.io.decode_raw instead."
+ deprecated: true
}
}
diff --git a/tensorflow/core/api_def/python_api/api_def_Dequantize.pbtxt b/tensorflow/core/api_def/python_api/api_def_Dequantize.pbtxt
index c9b4f76fab..5edd0c216b 100644
--- a/tensorflow/core/api_def/python_api/api_def_Dequantize.pbtxt
+++ b/tensorflow/core/api_def/python_api/api_def_Dequantize.pbtxt
@@ -5,6 +5,6 @@ op {
}
endpoint {
name: "dequantize"
- deprecation_message: "tf.dequantize is deprecated, please use tf.quantization.dequantize instead."
+ deprecated: true
}
}
diff --git a/tensorflow/core/api_def/python_api/api_def_Diag.pbtxt b/tensorflow/core/api_def/python_api/api_def_Diag.pbtxt
index 2043facfa9..cba30e63e8 100644
--- a/tensorflow/core/api_def/python_api/api_def_Diag.pbtxt
+++ b/tensorflow/core/api_def/python_api/api_def_Diag.pbtxt
@@ -5,6 +5,6 @@ op {
}
endpoint {
name: "diag"
- deprecation_message: "tf.diag is deprecated, please use tf.linalg.tensor_diag instead."
+ deprecated: true
}
}
diff --git a/tensorflow/core/api_def/python_api/api_def_DiagPart.pbtxt b/tensorflow/core/api_def/python_api/api_def_DiagPart.pbtxt
index 7fa30b2347..54e1f34e82 100644
--- a/tensorflow/core/api_def/python_api/api_def_DiagPart.pbtxt
+++ b/tensorflow/core/api_def/python_api/api_def_DiagPart.pbtxt
@@ -5,6 +5,6 @@ op {
}
endpoint {
name: "diag_part"
- deprecation_message: "tf.diag_part is deprecated, please use tf.linalg.tensor_diag_part instead."
+ deprecated: true
}
}
diff --git a/tensorflow/core/api_def/python_api/api_def_Digamma.pbtxt b/tensorflow/core/api_def/python_api/api_def_Digamma.pbtxt
index 03f57678a8..91b4dfead7 100644
--- a/tensorflow/core/api_def/python_api/api_def_Digamma.pbtxt
+++ b/tensorflow/core/api_def/python_api/api_def_Digamma.pbtxt
@@ -5,6 +5,6 @@ op {
}
endpoint {
name: "digamma"
- deprecation_message: "tf.digamma is deprecated, please use tf.math.digamma instead."
+ deprecated: true
}
}
diff --git a/tensorflow/core/api_def/python_api/api_def_EncodeBase64.pbtxt b/tensorflow/core/api_def/python_api/api_def_EncodeBase64.pbtxt
index 47b4ab4da4..71bb73cfb2 100644
--- a/tensorflow/core/api_def/python_api/api_def_EncodeBase64.pbtxt
+++ b/tensorflow/core/api_def/python_api/api_def_EncodeBase64.pbtxt
@@ -5,6 +5,6 @@ op {
}
endpoint {
name: "encode_base64"
- deprecation_message: "tf.encode_base64 is deprecated, please use tf.io.encode_base64 instead."
+ deprecated: true
}
}
diff --git a/tensorflow/core/api_def/python_api/api_def_Equal.pbtxt b/tensorflow/core/api_def/python_api/api_def_Equal.pbtxt
index 2630962f7d..78aa1b3bc5 100644
--- a/tensorflow/core/api_def/python_api/api_def_Equal.pbtxt
+++ b/tensorflow/core/api_def/python_api/api_def_Equal.pbtxt
@@ -5,6 +5,6 @@ op {
}
endpoint {
name: "equal"
- deprecation_message: "tf.equal is deprecated, please use tf.math.equal instead."
+ deprecated: true
}
}
diff --git a/tensorflow/core/api_def/python_api/api_def_Erfc.pbtxt b/tensorflow/core/api_def/python_api/api_def_Erfc.pbtxt
index 6a511b3251..e96df0c596 100644
--- a/tensorflow/core/api_def/python_api/api_def_Erfc.pbtxt
+++ b/tensorflow/core/api_def/python_api/api_def_Erfc.pbtxt
@@ -5,6 +5,6 @@ op {
}
endpoint {
name: "erfc"
- deprecation_message: "tf.erfc is deprecated, please use tf.math.erfc instead."
+ deprecated: true
}
}
diff --git a/tensorflow/core/api_def/python_api/api_def_Exp.pbtxt b/tensorflow/core/api_def/python_api/api_def_Exp.pbtxt
index e1fd718ff0..70323fe5b4 100644
--- a/tensorflow/core/api_def/python_api/api_def_Exp.pbtxt
+++ b/tensorflow/core/api_def/python_api/api_def_Exp.pbtxt
@@ -5,6 +5,6 @@ op {
}
endpoint {
name: "exp"
- deprecation_message: "tf.exp is deprecated, please use tf.math.exp instead."
+ deprecated: true
}
}
diff --git a/tensorflow/core/api_def/python_api/api_def_Expm1.pbtxt b/tensorflow/core/api_def/python_api/api_def_Expm1.pbtxt
index ca25706407..8ddf9d4d70 100644
--- a/tensorflow/core/api_def/python_api/api_def_Expm1.pbtxt
+++ b/tensorflow/core/api_def/python_api/api_def_Expm1.pbtxt
@@ -5,6 +5,6 @@ op {
}
endpoint {
name: "expm1"
- deprecation_message: "tf.expm1 is deprecated, please use tf.math.expm1 instead."
+ deprecated: true
}
}
diff --git a/tensorflow/core/api_def/python_api/api_def_ExtractImagePatches.pbtxt b/tensorflow/core/api_def/python_api/api_def_ExtractImagePatches.pbtxt
index d302e26ad2..f008b1222d 100644
--- a/tensorflow/core/api_def/python_api/api_def_ExtractImagePatches.pbtxt
+++ b/tensorflow/core/api_def/python_api/api_def_ExtractImagePatches.pbtxt
@@ -5,6 +5,6 @@ op {
}
endpoint {
name: "extract_image_patches"
- deprecation_message: "tf.extract_image_patches is deprecated, please use tf.image.extract_image_patches instead."
+ deprecated: true
}
}
diff --git a/tensorflow/core/api_def/python_api/api_def_FFT.pbtxt b/tensorflow/core/api_def/python_api/api_def_FFT.pbtxt
index 57a00a08e3..d79e936b71 100644
--- a/tensorflow/core/api_def/python_api/api_def_FFT.pbtxt
+++ b/tensorflow/core/api_def/python_api/api_def_FFT.pbtxt
@@ -5,6 +5,6 @@ op {
}
endpoint {
name: "fft"
- deprecation_message: "tf.fft is deprecated, please use tf.spectral.fft instead."
+ deprecated: true
}
}
diff --git a/tensorflow/core/api_def/python_api/api_def_FakeQuantWithMinMaxArgs.pbtxt b/tensorflow/core/api_def/python_api/api_def_FakeQuantWithMinMaxArgs.pbtxt
index cd14b13675..d8db83331f 100644
--- a/tensorflow/core/api_def/python_api/api_def_FakeQuantWithMinMaxArgs.pbtxt
+++ b/tensorflow/core/api_def/python_api/api_def_FakeQuantWithMinMaxArgs.pbtxt
@@ -5,6 +5,6 @@ op {
}
endpoint {
name: "fake_quant_with_min_max_args"
- deprecation_message: "tf.fake_quant_with_min_max_args is deprecated, please use tf.quantization.fake_quant_with_min_max_args instead."
+ deprecated: true
}
}
diff --git a/tensorflow/core/api_def/python_api/api_def_FakeQuantWithMinMaxArgsGradient.pbtxt b/tensorflow/core/api_def/python_api/api_def_FakeQuantWithMinMaxArgsGradient.pbtxt
index d55cb69d1d..74f01d1a0c 100644
--- a/tensorflow/core/api_def/python_api/api_def_FakeQuantWithMinMaxArgsGradient.pbtxt
+++ b/tensorflow/core/api_def/python_api/api_def_FakeQuantWithMinMaxArgsGradient.pbtxt
@@ -5,6 +5,6 @@ op {
}
endpoint {
name: "fake_quant_with_min_max_args_gradient"
- deprecation_message: "tf.fake_quant_with_min_max_args_gradient is deprecated, please use tf.quantization.fake_quant_with_min_max_args_gradient instead."
+ deprecated: true
}
}
diff --git a/tensorflow/core/api_def/python_api/api_def_FakeQuantWithMinMaxVars.pbtxt b/tensorflow/core/api_def/python_api/api_def_FakeQuantWithMinMaxVars.pbtxt
index 6ff4f2cdb2..e14fb6d118 100644
--- a/tensorflow/core/api_def/python_api/api_def_FakeQuantWithMinMaxVars.pbtxt
+++ b/tensorflow/core/api_def/python_api/api_def_FakeQuantWithMinMaxVars.pbtxt
@@ -5,6 +5,6 @@ op {
}
endpoint {
name: "fake_quant_with_min_max_vars"
- deprecation_message: "tf.fake_quant_with_min_max_vars is deprecated, please use tf.quantization.fake_quant_with_min_max_vars instead."
+ deprecated: true
}
}
diff --git a/tensorflow/core/api_def/python_api/api_def_FakeQuantWithMinMaxVarsGradient.pbtxt b/tensorflow/core/api_def/python_api/api_def_FakeQuantWithMinMaxVarsGradient.pbtxt
index 817a35cc6c..4611ebdfb8 100644
--- a/tensorflow/core/api_def/python_api/api_def_FakeQuantWithMinMaxVarsGradient.pbtxt
+++ b/tensorflow/core/api_def/python_api/api_def_FakeQuantWithMinMaxVarsGradient.pbtxt
@@ -5,6 +5,6 @@ op {
}
endpoint {
name: "fake_quant_with_min_max_vars_gradient"
- deprecation_message: "tf.fake_quant_with_min_max_vars_gradient is deprecated, please use tf.quantization.fake_quant_with_min_max_vars_gradient instead."
+ deprecated: true
}
}
diff --git a/tensorflow/core/api_def/python_api/api_def_FakeQuantWithMinMaxVarsPerChannel.pbtxt b/tensorflow/core/api_def/python_api/api_def_FakeQuantWithMinMaxVarsPerChannel.pbtxt
index 275c0d5225..0936e513c3 100644
--- a/tensorflow/core/api_def/python_api/api_def_FakeQuantWithMinMaxVarsPerChannel.pbtxt
+++ b/tensorflow/core/api_def/python_api/api_def_FakeQuantWithMinMaxVarsPerChannel.pbtxt
@@ -5,6 +5,6 @@ op {
}
endpoint {
name: "fake_quant_with_min_max_vars_per_channel"
- deprecation_message: "tf.fake_quant_with_min_max_vars_per_channel is deprecated, please use tf.quantization.fake_quant_with_min_max_vars_per_channel instead."
+ deprecated: true
}
}
diff --git a/tensorflow/core/api_def/python_api/api_def_FakeQuantWithMinMaxVarsPerChannelGradient.pbtxt b/tensorflow/core/api_def/python_api/api_def_FakeQuantWithMinMaxVarsPerChannelGradient.pbtxt
index 897312897f..0d9968248c 100644
--- a/tensorflow/core/api_def/python_api/api_def_FakeQuantWithMinMaxVarsPerChannelGradient.pbtxt
+++ b/tensorflow/core/api_def/python_api/api_def_FakeQuantWithMinMaxVarsPerChannelGradient.pbtxt
@@ -5,6 +5,6 @@ op {
}
endpoint {
name: "fake_quant_with_min_max_vars_per_channel_gradient"
- deprecation_message: "tf.fake_quant_with_min_max_vars_per_channel_gradient is deprecated, please use tf.quantization.fake_quant_with_min_max_vars_per_channel_gradient instead."
+ deprecated: true
}
}
diff --git a/tensorflow/core/api_def/python_api/api_def_Floor.pbtxt b/tensorflow/core/api_def/python_api/api_def_Floor.pbtxt
index 788d95edc1..9b93caa0b1 100644
--- a/tensorflow/core/api_def/python_api/api_def_Floor.pbtxt
+++ b/tensorflow/core/api_def/python_api/api_def_Floor.pbtxt
@@ -5,6 +5,6 @@ op {
}
endpoint {
name: "floor"
- deprecation_message: "tf.floor is deprecated, please use tf.math.floor instead."
+ deprecated: true
}
}
diff --git a/tensorflow/core/api_def/python_api/api_def_GatherNd.pbtxt b/tensorflow/core/api_def/python_api/api_def_GatherNd.pbtxt
index 371dc740df..71257c8855 100644
--- a/tensorflow/core/api_def/python_api/api_def_GatherNd.pbtxt
+++ b/tensorflow/core/api_def/python_api/api_def_GatherNd.pbtxt
@@ -5,6 +5,6 @@ op {
}
endpoint {
name: "gather_nd"
- deprecation_message: "tf.gather_nd is deprecated, please use tf.manip.gather_nd instead."
+ deprecated: true
}
}
diff --git a/tensorflow/core/api_def/python_api/api_def_Greater.pbtxt b/tensorflow/core/api_def/python_api/api_def_Greater.pbtxt
index c8c56515b2..7de60d44c4 100644
--- a/tensorflow/core/api_def/python_api/api_def_Greater.pbtxt
+++ b/tensorflow/core/api_def/python_api/api_def_Greater.pbtxt
@@ -5,6 +5,6 @@ op {
}
endpoint {
name: "greater"
- deprecation_message: "tf.greater is deprecated, please use tf.math.greater instead."
+ deprecated: true
}
}
diff --git a/tensorflow/core/api_def/python_api/api_def_GreaterEqual.pbtxt b/tensorflow/core/api_def/python_api/api_def_GreaterEqual.pbtxt
index ccb390fb3e..9c8975c2a9 100644
--- a/tensorflow/core/api_def/python_api/api_def_GreaterEqual.pbtxt
+++ b/tensorflow/core/api_def/python_api/api_def_GreaterEqual.pbtxt
@@ -5,6 +5,6 @@ op {
}
endpoint {
name: "greater_equal"
- deprecation_message: "tf.greater_equal is deprecated, please use tf.math.greater_equal instead."
+ deprecated: true
}
}
diff --git a/tensorflow/core/api_def/python_api/api_def_IFFT.pbtxt b/tensorflow/core/api_def/python_api/api_def_IFFT.pbtxt
index 267ad8d0a0..17fbd8ace4 100644
--- a/tensorflow/core/api_def/python_api/api_def_IFFT.pbtxt
+++ b/tensorflow/core/api_def/python_api/api_def_IFFT.pbtxt
@@ -5,6 +5,6 @@ op {
}
endpoint {
name: "ifft"
- deprecation_message: "tf.ifft is deprecated, please use tf.spectral.ifft instead."
+ deprecated: true
}
}
diff --git a/tensorflow/core/api_def/python_api/api_def_Igamma.pbtxt b/tensorflow/core/api_def/python_api/api_def_Igamma.pbtxt
index 4e7e3a6e57..8c4815c26e 100644
--- a/tensorflow/core/api_def/python_api/api_def_Igamma.pbtxt
+++ b/tensorflow/core/api_def/python_api/api_def_Igamma.pbtxt
@@ -5,6 +5,6 @@ op {
}
endpoint {
name: "igamma"
- deprecation_message: "tf.igamma is deprecated, please use tf.math.igamma instead."
+ deprecated: true
}
}
diff --git a/tensorflow/core/api_def/python_api/api_def_Igammac.pbtxt b/tensorflow/core/api_def/python_api/api_def_Igammac.pbtxt
index ea92a0916b..b43b54391b 100644
--- a/tensorflow/core/api_def/python_api/api_def_Igammac.pbtxt
+++ b/tensorflow/core/api_def/python_api/api_def_Igammac.pbtxt
@@ -5,6 +5,6 @@ op {
}
endpoint {
name: "igammac"
- deprecation_message: "tf.igammac is deprecated, please use tf.math.igammac instead."
+ deprecated: true
}
}
diff --git a/tensorflow/core/api_def/python_api/api_def_InvertPermutation.pbtxt b/tensorflow/core/api_def/python_api/api_def_InvertPermutation.pbtxt
index bce642b96a..d75fcd63e3 100644
--- a/tensorflow/core/api_def/python_api/api_def_InvertPermutation.pbtxt
+++ b/tensorflow/core/api_def/python_api/api_def_InvertPermutation.pbtxt
@@ -5,6 +5,6 @@ op {
}
endpoint {
name: "invert_permutation"
- deprecation_message: "tf.invert_permutation is deprecated, please use tf.math.invert_permutation instead."
+ deprecated: true
}
}
diff --git a/tensorflow/core/api_def/python_api/api_def_IsFinite.pbtxt b/tensorflow/core/api_def/python_api/api_def_IsFinite.pbtxt
index a2c12f2ea0..27142644bf 100644
--- a/tensorflow/core/api_def/python_api/api_def_IsFinite.pbtxt
+++ b/tensorflow/core/api_def/python_api/api_def_IsFinite.pbtxt
@@ -5,6 +5,6 @@ op {
}
endpoint {
name: "is_finite"
- deprecation_message: "tf.is_finite is deprecated, please use tf.debugging.is_finite instead."
+ deprecated: true
}
}
diff --git a/tensorflow/core/api_def/python_api/api_def_IsInf.pbtxt b/tensorflow/core/api_def/python_api/api_def_IsInf.pbtxt
index 7c29811fd7..4cd92f1cb7 100644
--- a/tensorflow/core/api_def/python_api/api_def_IsInf.pbtxt
+++ b/tensorflow/core/api_def/python_api/api_def_IsInf.pbtxt
@@ -5,6 +5,6 @@ op {
}
endpoint {
name: "is_inf"
- deprecation_message: "tf.is_inf is deprecated, please use tf.debugging.is_inf instead."
+ deprecated: true
}
}
diff --git a/tensorflow/core/api_def/python_api/api_def_IsNan.pbtxt b/tensorflow/core/api_def/python_api/api_def_IsNan.pbtxt
index 459cf3ccbd..07d49f9436 100644
--- a/tensorflow/core/api_def/python_api/api_def_IsNan.pbtxt
+++ b/tensorflow/core/api_def/python_api/api_def_IsNan.pbtxt
@@ -5,6 +5,6 @@ op {
}
endpoint {
name: "is_nan"
- deprecation_message: "tf.is_nan is deprecated, please use tf.debugging.is_nan instead."
+ deprecated: true
}
}
diff --git a/tensorflow/core/api_def/python_api/api_def_Less.pbtxt b/tensorflow/core/api_def/python_api/api_def_Less.pbtxt
index 15cbdc6d8e..055df2922a 100644
--- a/tensorflow/core/api_def/python_api/api_def_Less.pbtxt
+++ b/tensorflow/core/api_def/python_api/api_def_Less.pbtxt
@@ -5,6 +5,6 @@ op {
}
endpoint {
name: "less"
- deprecation_message: "tf.less is deprecated, please use tf.math.less instead."
+ deprecated: true
}
}
diff --git a/tensorflow/core/api_def/python_api/api_def_LessEqual.pbtxt b/tensorflow/core/api_def/python_api/api_def_LessEqual.pbtxt
index 35aa18698f..d2803ddb69 100644
--- a/tensorflow/core/api_def/python_api/api_def_LessEqual.pbtxt
+++ b/tensorflow/core/api_def/python_api/api_def_LessEqual.pbtxt
@@ -5,6 +5,6 @@ op {
}
endpoint {
name: "less_equal"
- deprecation_message: "tf.less_equal is deprecated, please use tf.math.less_equal instead."
+ deprecated: true
}
}
diff --git a/tensorflow/core/api_def/python_api/api_def_Lgamma.pbtxt b/tensorflow/core/api_def/python_api/api_def_Lgamma.pbtxt
index 89886b09d3..0262b838ca 100644
--- a/tensorflow/core/api_def/python_api/api_def_Lgamma.pbtxt
+++ b/tensorflow/core/api_def/python_api/api_def_Lgamma.pbtxt
@@ -5,6 +5,6 @@ op {
}
endpoint {
name: "lgamma"
- deprecation_message: "tf.lgamma is deprecated, please use tf.math.lgamma instead."
+ deprecated: true
}
}
diff --git a/tensorflow/core/api_def/python_api/api_def_Log.pbtxt b/tensorflow/core/api_def/python_api/api_def_Log.pbtxt
index fb82aa7e43..26d2473b9c 100644
--- a/tensorflow/core/api_def/python_api/api_def_Log.pbtxt
+++ b/tensorflow/core/api_def/python_api/api_def_Log.pbtxt
@@ -5,6 +5,6 @@ op {
}
endpoint {
name: "log"
- deprecation_message: "tf.log is deprecated, please use tf.math.log instead."
+ deprecated: true
}
}
diff --git a/tensorflow/core/api_def/python_api/api_def_Log1p.pbtxt b/tensorflow/core/api_def/python_api/api_def_Log1p.pbtxt
index 6b451aa546..d85b6dccec 100644
--- a/tensorflow/core/api_def/python_api/api_def_Log1p.pbtxt
+++ b/tensorflow/core/api_def/python_api/api_def_Log1p.pbtxt
@@ -5,6 +5,6 @@ op {
}
endpoint {
name: "log1p"
- deprecation_message: "tf.log1p is deprecated, please use tf.math.log1p instead."
+ deprecated: true
}
}
diff --git a/tensorflow/core/api_def/python_api/api_def_LogicalAnd.pbtxt b/tensorflow/core/api_def/python_api/api_def_LogicalAnd.pbtxt
index 403a8c71ff..80bd98b740 100644
--- a/tensorflow/core/api_def/python_api/api_def_LogicalAnd.pbtxt
+++ b/tensorflow/core/api_def/python_api/api_def_LogicalAnd.pbtxt
@@ -5,6 +5,6 @@ op {
}
endpoint {
name: "logical_and"
- deprecation_message: "tf.logical_and is deprecated, please use tf.math.logical_and instead."
+ deprecated: true
}
}
diff --git a/tensorflow/core/api_def/python_api/api_def_LogicalNot.pbtxt b/tensorflow/core/api_def/python_api/api_def_LogicalNot.pbtxt
index f228958c77..b2244c44b1 100644
--- a/tensorflow/core/api_def/python_api/api_def_LogicalNot.pbtxt
+++ b/tensorflow/core/api_def/python_api/api_def_LogicalNot.pbtxt
@@ -5,6 +5,6 @@ op {
}
endpoint {
name: "logical_not"
- deprecation_message: "tf.logical_not is deprecated, please use tf.math.logical_not instead."
+ deprecated: true
}
}
diff --git a/tensorflow/core/api_def/python_api/api_def_LogicalOr.pbtxt b/tensorflow/core/api_def/python_api/api_def_LogicalOr.pbtxt
index ab89f236e7..cf78b52e07 100644
--- a/tensorflow/core/api_def/python_api/api_def_LogicalOr.pbtxt
+++ b/tensorflow/core/api_def/python_api/api_def_LogicalOr.pbtxt
@@ -5,6 +5,6 @@ op {
}
endpoint {
name: "logical_or"
- deprecation_message: "tf.logical_or is deprecated, please use tf.math.logical_or instead."
+ deprecated: true
}
}
diff --git a/tensorflow/core/api_def/python_api/api_def_MatchingFiles.pbtxt b/tensorflow/core/api_def/python_api/api_def_MatchingFiles.pbtxt
index 8930d66940..74145670a8 100644
--- a/tensorflow/core/api_def/python_api/api_def_MatchingFiles.pbtxt
+++ b/tensorflow/core/api_def/python_api/api_def_MatchingFiles.pbtxt
@@ -5,6 +5,6 @@ op {
}
endpoint {
name: "matching_files"
- deprecation_message: "tf.matching_files is deprecated, please use tf.io.matching_files instead."
+ deprecated: true
}
}
diff --git a/tensorflow/core/api_def/python_api/api_def_MatrixBandPart.pbtxt b/tensorflow/core/api_def/python_api/api_def_MatrixBandPart.pbtxt
index bad2f03f32..1122c52ab4 100644
--- a/tensorflow/core/api_def/python_api/api_def_MatrixBandPart.pbtxt
+++ b/tensorflow/core/api_def/python_api/api_def_MatrixBandPart.pbtxt
@@ -5,6 +5,6 @@ op {
}
endpoint {
name: "matrix_band_part"
- deprecation_message: "tf.matrix_band_part is deprecated, please use tf.linalg.band_part instead."
+ deprecated: true
}
}
diff --git a/tensorflow/core/api_def/python_api/api_def_MatrixDeterminant.pbtxt b/tensorflow/core/api_def/python_api/api_def_MatrixDeterminant.pbtxt
index d241d4d721..9563bf0354 100644
--- a/tensorflow/core/api_def/python_api/api_def_MatrixDeterminant.pbtxt
+++ b/tensorflow/core/api_def/python_api/api_def_MatrixDeterminant.pbtxt
@@ -5,6 +5,6 @@ op {
}
endpoint {
name: "matrix_determinant"
- deprecation_message: "tf.matrix_determinant is deprecated, please use tf.linalg.det instead."
+ deprecated: true
}
}
diff --git a/tensorflow/core/api_def/python_api/api_def_MatrixDiag.pbtxt b/tensorflow/core/api_def/python_api/api_def_MatrixDiag.pbtxt
index 208b37e297..8ab0bf75eb 100644
--- a/tensorflow/core/api_def/python_api/api_def_MatrixDiag.pbtxt
+++ b/tensorflow/core/api_def/python_api/api_def_MatrixDiag.pbtxt
@@ -5,6 +5,6 @@ op {
}
endpoint {
name: "matrix_diag"
- deprecation_message: "tf.matrix_diag is deprecated, please use tf.linalg.diag instead."
+ deprecated: true
}
}
diff --git a/tensorflow/core/api_def/python_api/api_def_MatrixDiagPart.pbtxt b/tensorflow/core/api_def/python_api/api_def_MatrixDiagPart.pbtxt
index a8a50e8a89..82ce67853c 100644
--- a/tensorflow/core/api_def/python_api/api_def_MatrixDiagPart.pbtxt
+++ b/tensorflow/core/api_def/python_api/api_def_MatrixDiagPart.pbtxt
@@ -5,6 +5,6 @@ op {
}
endpoint {
name: "matrix_diag_part"
- deprecation_message: "tf.matrix_diag_part is deprecated, please use tf.linalg.diag_part instead."
+ deprecated: true
}
}
diff --git a/tensorflow/core/api_def/python_api/api_def_MatrixInverse.pbtxt b/tensorflow/core/api_def/python_api/api_def_MatrixInverse.pbtxt
index 944513fcd9..85862f6eb5 100644
--- a/tensorflow/core/api_def/python_api/api_def_MatrixInverse.pbtxt
+++ b/tensorflow/core/api_def/python_api/api_def_MatrixInverse.pbtxt
@@ -5,6 +5,6 @@ op {
}
endpoint {
name: "matrix_inverse"
- deprecation_message: "tf.matrix_inverse is deprecated, please use tf.linalg.inv instead."
+ deprecated: true
}
}
diff --git a/tensorflow/core/api_def/python_api/api_def_MatrixSetDiag.pbtxt b/tensorflow/core/api_def/python_api/api_def_MatrixSetDiag.pbtxt
index a6080dbc2d..6325e4f0e6 100644
--- a/tensorflow/core/api_def/python_api/api_def_MatrixSetDiag.pbtxt
+++ b/tensorflow/core/api_def/python_api/api_def_MatrixSetDiag.pbtxt
@@ -5,6 +5,6 @@ op {
}
endpoint {
name: "matrix_set_diag"
- deprecation_message: "tf.matrix_set_diag is deprecated, please use tf.linalg.set_diag instead."
+ deprecated: true
}
}
diff --git a/tensorflow/core/api_def/python_api/api_def_MatrixSolve.pbtxt b/tensorflow/core/api_def/python_api/api_def_MatrixSolve.pbtxt
index caba80326b..6325dff407 100644
--- a/tensorflow/core/api_def/python_api/api_def_MatrixSolve.pbtxt
+++ b/tensorflow/core/api_def/python_api/api_def_MatrixSolve.pbtxt
@@ -5,6 +5,6 @@ op {
}
endpoint {
name: "matrix_solve"
- deprecation_message: "tf.matrix_solve is deprecated, please use tf.linalg.solve instead."
+ deprecated: true
}
}
diff --git a/tensorflow/core/api_def/python_api/api_def_MatrixTriangularSolve.pbtxt b/tensorflow/core/api_def/python_api/api_def_MatrixTriangularSolve.pbtxt
index a4dfa538ed..7f865e23b2 100644
--- a/tensorflow/core/api_def/python_api/api_def_MatrixTriangularSolve.pbtxt
+++ b/tensorflow/core/api_def/python_api/api_def_MatrixTriangularSolve.pbtxt
@@ -5,6 +5,6 @@ op {
}
endpoint {
name: "matrix_triangular_solve"
- deprecation_message: "tf.matrix_triangular_solve is deprecated, please use tf.linalg.triangular_solve instead."
+ deprecated: true
}
}
diff --git a/tensorflow/core/api_def/python_api/api_def_Maximum.pbtxt b/tensorflow/core/api_def/python_api/api_def_Maximum.pbtxt
index 90af9e145b..bcff379b71 100644
--- a/tensorflow/core/api_def/python_api/api_def_Maximum.pbtxt
+++ b/tensorflow/core/api_def/python_api/api_def_Maximum.pbtxt
@@ -5,6 +5,6 @@ op {
}
endpoint {
name: "maximum"
- deprecation_message: "tf.maximum is deprecated, please use tf.math.maximum instead."
+ deprecated: true
}
}
diff --git a/tensorflow/core/api_def/python_api/api_def_Minimum.pbtxt b/tensorflow/core/api_def/python_api/api_def_Minimum.pbtxt
index 33bcd6f667..9aae74226a 100644
--- a/tensorflow/core/api_def/python_api/api_def_Minimum.pbtxt
+++ b/tensorflow/core/api_def/python_api/api_def_Minimum.pbtxt
@@ -5,6 +5,6 @@ op {
}
endpoint {
name: "minimum"
- deprecation_message: "tf.minimum is deprecated, please use tf.math.minimum instead."
+ deprecated: true
}
}
diff --git a/tensorflow/core/api_def/python_api/api_def_NonMaxSuppressionWithOverlaps.pbtxt b/tensorflow/core/api_def/python_api/api_def_NonMaxSuppressionWithOverlaps.pbtxt
new file mode 100644
index 0000000000..0d358dff98
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_NonMaxSuppressionWithOverlaps.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "NonMaxSuppressionWithOverlaps"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_NotEqual.pbtxt b/tensorflow/core/api_def/python_api/api_def_NotEqual.pbtxt
index 385565daaf..f37317854f 100644
--- a/tensorflow/core/api_def/python_api/api_def_NotEqual.pbtxt
+++ b/tensorflow/core/api_def/python_api/api_def_NotEqual.pbtxt
@@ -5,6 +5,6 @@ op {
}
endpoint {
name: "not_equal"
- deprecation_message: "tf.not_equal is deprecated, please use tf.math.not_equal instead."
+ deprecated: true
}
}
diff --git a/tensorflow/core/api_def/python_api/api_def_ParseTensor.pbtxt b/tensorflow/core/api_def/python_api/api_def_ParseTensor.pbtxt
index 29f02ab1ac..10b3aab0c7 100644
--- a/tensorflow/core/api_def/python_api/api_def_ParseTensor.pbtxt
+++ b/tensorflow/core/api_def/python_api/api_def_ParseTensor.pbtxt
@@ -5,6 +5,6 @@ op {
}
endpoint {
name: "parse_tensor"
- deprecation_message: "tf.parse_tensor is deprecated, please use tf.io.parse_tensor instead."
+ deprecated: true
}
}
diff --git a/tensorflow/core/api_def/python_api/api_def_Polygamma.pbtxt b/tensorflow/core/api_def/python_api/api_def_Polygamma.pbtxt
index 567a448642..9df81402d5 100644
--- a/tensorflow/core/api_def/python_api/api_def_Polygamma.pbtxt
+++ b/tensorflow/core/api_def/python_api/api_def_Polygamma.pbtxt
@@ -5,6 +5,6 @@ op {
}
endpoint {
name: "polygamma"
- deprecation_message: "tf.polygamma is deprecated, please use tf.math.polygamma instead."
+ deprecated: true
}
}
diff --git a/tensorflow/core/api_def/python_api/api_def_Qr.pbtxt b/tensorflow/core/api_def/python_api/api_def_Qr.pbtxt
index a9371b5d9b..0260eecc91 100644
--- a/tensorflow/core/api_def/python_api/api_def_Qr.pbtxt
+++ b/tensorflow/core/api_def/python_api/api_def_Qr.pbtxt
@@ -5,6 +5,6 @@ op {
}
endpoint {
name: "qr"
- deprecation_message: "tf.qr is deprecated, please use tf.linalg.qr instead."
+ deprecated: true
}
}
diff --git a/tensorflow/core/api_def/python_api/api_def_QuantizedConcat.pbtxt b/tensorflow/core/api_def/python_api/api_def_QuantizedConcat.pbtxt
index 44508ef079..69404b9472 100644
--- a/tensorflow/core/api_def/python_api/api_def_QuantizedConcat.pbtxt
+++ b/tensorflow/core/api_def/python_api/api_def_QuantizedConcat.pbtxt
@@ -5,6 +5,6 @@ op {
}
endpoint {
name: "quantized_concat"
- deprecation_message: "tf.quantized_concat is deprecated, please use tf.quantization.quantized_concat instead."
+ deprecated: true
}
}
diff --git a/tensorflow/core/api_def/python_api/api_def_ReadFile.pbtxt b/tensorflow/core/api_def/python_api/api_def_ReadFile.pbtxt
index 7c38fae31c..9d479be45f 100644
--- a/tensorflow/core/api_def/python_api/api_def_ReadFile.pbtxt
+++ b/tensorflow/core/api_def/python_api/api_def_ReadFile.pbtxt
@@ -5,6 +5,6 @@ op {
}
endpoint {
name: "read_file"
- deprecation_message: "tf.read_file is deprecated, please use tf.io.read_file instead."
+ deprecated: true
}
}
diff --git a/tensorflow/core/api_def/python_api/api_def_Reciprocal.pbtxt b/tensorflow/core/api_def/python_api/api_def_Reciprocal.pbtxt
index 0f37e99f4f..c4d4c27722 100644
--- a/tensorflow/core/api_def/python_api/api_def_Reciprocal.pbtxt
+++ b/tensorflow/core/api_def/python_api/api_def_Reciprocal.pbtxt
@@ -5,6 +5,6 @@ op {
}
endpoint {
name: "reciprocal"
- deprecation_message: "tf.reciprocal is deprecated, please use tf.math.reciprocal instead."
+ deprecated: true
}
}
diff --git a/tensorflow/core/api_def/python_api/api_def_RegexReplace.pbtxt b/tensorflow/core/api_def/python_api/api_def_RegexReplace.pbtxt
index 6938e20e57..b17806b338 100644
--- a/tensorflow/core/api_def/python_api/api_def_RegexReplace.pbtxt
+++ b/tensorflow/core/api_def/python_api/api_def_RegexReplace.pbtxt
@@ -5,6 +5,6 @@ op {
}
endpoint {
name: "regex_replace"
- deprecation_message: "tf.regex_replace is deprecated, please use tf.strings.regex_replace instead."
+ deprecated: true
}
}
diff --git a/tensorflow/core/api_def/python_api/api_def_Reshape.pbtxt b/tensorflow/core/api_def/python_api/api_def_Reshape.pbtxt
index 907d95a6f0..c469665b66 100644
--- a/tensorflow/core/api_def/python_api/api_def_Reshape.pbtxt
+++ b/tensorflow/core/api_def/python_api/api_def_Reshape.pbtxt
@@ -5,6 +5,6 @@ op {
}
endpoint {
name: "reshape"
- deprecation_message: "tf.reshape is deprecated, please use tf.manip.reshape instead."
+ deprecated: true
}
}
diff --git a/tensorflow/core/api_def/python_api/api_def_ReverseV2.pbtxt b/tensorflow/core/api_def/python_api/api_def_ReverseV2.pbtxt
index bbe9e97d60..77f595927b 100644
--- a/tensorflow/core/api_def/python_api/api_def_ReverseV2.pbtxt
+++ b/tensorflow/core/api_def/python_api/api_def_ReverseV2.pbtxt
@@ -5,10 +5,10 @@ op {
}
endpoint {
name: "reverse"
- deprecation_message: "tf.reverse is deprecated, please use tf.manip.reverse instead."
+ deprecated: true
}
endpoint {
name: "reverse_v2"
- deprecation_message: "tf.reverse_v2 is deprecated, please use tf.manip.reverse instead."
+ deprecated: true
}
}
diff --git a/tensorflow/core/api_def/python_api/api_def_Rint.pbtxt b/tensorflow/core/api_def/python_api/api_def_Rint.pbtxt
index 4330a80d04..ec37a23127 100644
--- a/tensorflow/core/api_def/python_api/api_def_Rint.pbtxt
+++ b/tensorflow/core/api_def/python_api/api_def_Rint.pbtxt
@@ -5,6 +5,6 @@ op {
}
endpoint {
name: "rint"
- deprecation_message: "tf.rint is deprecated, please use tf.math.rint instead."
+ deprecated: true
}
}
diff --git a/tensorflow/core/api_def/python_api/api_def_Rsqrt.pbtxt b/tensorflow/core/api_def/python_api/api_def_Rsqrt.pbtxt
index 6a45f4aff5..4fc2b81421 100644
--- a/tensorflow/core/api_def/python_api/api_def_Rsqrt.pbtxt
+++ b/tensorflow/core/api_def/python_api/api_def_Rsqrt.pbtxt
@@ -5,6 +5,6 @@ op {
}
endpoint {
name: "rsqrt"
- deprecation_message: "tf.rsqrt is deprecated, please use tf.math.rsqrt instead."
+ deprecated: true
}
}
diff --git a/tensorflow/core/api_def/python_api/api_def_ScatterNd.pbtxt b/tensorflow/core/api_def/python_api/api_def_ScatterNd.pbtxt
index cabf171cb0..a65a19b542 100644
--- a/tensorflow/core/api_def/python_api/api_def_ScatterNd.pbtxt
+++ b/tensorflow/core/api_def/python_api/api_def_ScatterNd.pbtxt
@@ -5,6 +5,6 @@ op {
}
endpoint {
name: "scatter_nd"
- deprecation_message: "tf.scatter_nd is deprecated, please use tf.manip.scatter_nd instead."
+ deprecated: true
}
}
diff --git a/tensorflow/core/api_def/python_api/api_def_SegmentMax.pbtxt b/tensorflow/core/api_def/python_api/api_def_SegmentMax.pbtxt
index 65e34a1fcf..2e22c375c0 100644
--- a/tensorflow/core/api_def/python_api/api_def_SegmentMax.pbtxt
+++ b/tensorflow/core/api_def/python_api/api_def_SegmentMax.pbtxt
@@ -5,6 +5,6 @@ op {
}
endpoint {
name: "segment_max"
- deprecation_message: "tf.segment_max is deprecated, please use tf.math.segment_max instead."
+ deprecated: true
}
}
diff --git a/tensorflow/core/api_def/python_api/api_def_SegmentMean.pbtxt b/tensorflow/core/api_def/python_api/api_def_SegmentMean.pbtxt
index f1e19c5571..646348072f 100644
--- a/tensorflow/core/api_def/python_api/api_def_SegmentMean.pbtxt
+++ b/tensorflow/core/api_def/python_api/api_def_SegmentMean.pbtxt
@@ -5,6 +5,6 @@ op {
}
endpoint {
name: "segment_mean"
- deprecation_message: "tf.segment_mean is deprecated, please use tf.math.segment_mean instead."
+ deprecated: true
}
}
diff --git a/tensorflow/core/api_def/python_api/api_def_SegmentMin.pbtxt b/tensorflow/core/api_def/python_api/api_def_SegmentMin.pbtxt
index fd9a3c380d..1a77019a2d 100644
--- a/tensorflow/core/api_def/python_api/api_def_SegmentMin.pbtxt
+++ b/tensorflow/core/api_def/python_api/api_def_SegmentMin.pbtxt
@@ -5,6 +5,6 @@ op {
}
endpoint {
name: "segment_min"
- deprecation_message: "tf.segment_min is deprecated, please use tf.math.segment_min instead."
+ deprecated: true
}
}
diff --git a/tensorflow/core/api_def/python_api/api_def_SegmentProd.pbtxt b/tensorflow/core/api_def/python_api/api_def_SegmentProd.pbtxt
index f2be8baafc..cf4d6f0237 100644
--- a/tensorflow/core/api_def/python_api/api_def_SegmentProd.pbtxt
+++ b/tensorflow/core/api_def/python_api/api_def_SegmentProd.pbtxt
@@ -5,6 +5,6 @@ op {
}
endpoint {
name: "segment_prod"
- deprecation_message: "tf.segment_prod is deprecated, please use tf.math.segment_prod instead."
+ deprecated: true
}
}
diff --git a/tensorflow/core/api_def/python_api/api_def_SegmentSum.pbtxt b/tensorflow/core/api_def/python_api/api_def_SegmentSum.pbtxt
index c7cc1d0c9f..c6d7999455 100644
--- a/tensorflow/core/api_def/python_api/api_def_SegmentSum.pbtxt
+++ b/tensorflow/core/api_def/python_api/api_def_SegmentSum.pbtxt
@@ -5,6 +5,6 @@ op {
}
endpoint {
name: "segment_sum"
- deprecation_message: "tf.segment_sum is deprecated, please use tf.math.segment_sum instead."
+ deprecated: true
}
}
diff --git a/tensorflow/core/api_def/python_api/api_def_Sin.pbtxt b/tensorflow/core/api_def/python_api/api_def_Sin.pbtxt
index 0794334987..9c19a1a177 100644
--- a/tensorflow/core/api_def/python_api/api_def_Sin.pbtxt
+++ b/tensorflow/core/api_def/python_api/api_def_Sin.pbtxt
@@ -5,6 +5,6 @@ op {
}
endpoint {
name: "sin"
- deprecation_message: "tf.sin is deprecated, please use tf.math.sin instead."
+ deprecated: true
}
}
diff --git a/tensorflow/core/api_def/python_api/api_def_Sinh.pbtxt b/tensorflow/core/api_def/python_api/api_def_Sinh.pbtxt
index c42f8678c6..155e58e6d5 100644
--- a/tensorflow/core/api_def/python_api/api_def_Sinh.pbtxt
+++ b/tensorflow/core/api_def/python_api/api_def_Sinh.pbtxt
@@ -5,6 +5,6 @@ op {
}
endpoint {
name: "sinh"
- deprecation_message: "tf.sinh is deprecated, please use tf.math.sinh instead."
+ deprecated: true
}
}
diff --git a/tensorflow/core/api_def/python_api/api_def_SpaceToBatchND.pbtxt b/tensorflow/core/api_def/python_api/api_def_SpaceToBatchND.pbtxt
index 63a7547e14..af323a6cf3 100644
--- a/tensorflow/core/api_def/python_api/api_def_SpaceToBatchND.pbtxt
+++ b/tensorflow/core/api_def/python_api/api_def_SpaceToBatchND.pbtxt
@@ -5,6 +5,6 @@ op {
}
endpoint {
name: "space_to_batch_nd"
- deprecation_message: "tf.space_to_batch_nd is deprecated, please use tf.manip.space_to_batch_nd instead."
+ deprecated: true
}
}
diff --git a/tensorflow/core/api_def/python_api/api_def_SquaredDifference.pbtxt b/tensorflow/core/api_def/python_api/api_def_SquaredDifference.pbtxt
index 01a33a3346..4bab8cf00c 100644
--- a/tensorflow/core/api_def/python_api/api_def_SquaredDifference.pbtxt
+++ b/tensorflow/core/api_def/python_api/api_def_SquaredDifference.pbtxt
@@ -5,6 +5,6 @@ op {
}
endpoint {
name: "squared_difference"
- deprecation_message: "tf.squared_difference is deprecated, please use tf.math.squared_difference instead."
+ deprecated: true
}
}
diff --git a/tensorflow/core/api_def/python_api/api_def_StatefulPartitionedCall.pbtxt b/tensorflow/core/api_def/python_api/api_def_StatefulPartitionedCall.pbtxt
new file mode 100644
index 0000000000..eb8e3ae902
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_StatefulPartitionedCall.pbtxt
@@ -0,0 +1 @@
+op { graph_op_name: "StatefulPartitionedCall" visibility: HIDDEN }
diff --git a/tensorflow/core/api_def/python_api/api_def_StringJoin.pbtxt b/tensorflow/core/api_def/python_api/api_def_StringJoin.pbtxt
index 53c1b8053d..46a7c0361e 100644
--- a/tensorflow/core/api_def/python_api/api_def_StringJoin.pbtxt
+++ b/tensorflow/core/api_def/python_api/api_def_StringJoin.pbtxt
@@ -5,6 +5,6 @@ op {
}
endpoint {
name: "string_join"
- deprecation_message: "tf.string_join is deprecated, please use tf.strings.join instead."
+ deprecated: true
}
}
diff --git a/tensorflow/core/api_def/python_api/api_def_StringStrip.pbtxt b/tensorflow/core/api_def/python_api/api_def_StringStrip.pbtxt
index 364806e1f5..fbcdeaad6d 100644
--- a/tensorflow/core/api_def/python_api/api_def_StringStrip.pbtxt
+++ b/tensorflow/core/api_def/python_api/api_def_StringStrip.pbtxt
@@ -5,6 +5,6 @@ op {
}
endpoint {
name: "string_strip"
- deprecation_message: "tf.string_strip is deprecated, please use tf.strings.strip instead."
+ deprecated: true
}
}
diff --git a/tensorflow/core/api_def/python_api/api_def_StringToHashBucket.pbtxt b/tensorflow/core/api_def/python_api/api_def_StringToHashBucket.pbtxt
index b0e93d2b22..d122e79b39 100644
--- a/tensorflow/core/api_def/python_api/api_def_StringToHashBucket.pbtxt
+++ b/tensorflow/core/api_def/python_api/api_def_StringToHashBucket.pbtxt
@@ -5,6 +5,6 @@ op {
}
endpoint {
name: "string_to_hash_bucket"
- deprecation_message: "tf.string_to_hash_bucket is deprecated, please use tf.strings.to_hash_bucket instead."
+ deprecated: true
}
}
diff --git a/tensorflow/core/api_def/python_api/api_def_StringToHashBucketFast.pbtxt b/tensorflow/core/api_def/python_api/api_def_StringToHashBucketFast.pbtxt
index 9576e1a9de..aef9dffefe 100644
--- a/tensorflow/core/api_def/python_api/api_def_StringToHashBucketFast.pbtxt
+++ b/tensorflow/core/api_def/python_api/api_def_StringToHashBucketFast.pbtxt
@@ -5,6 +5,6 @@ op {
}
endpoint {
name: "string_to_hash_bucket_fast"
- deprecation_message: "tf.string_to_hash_bucket_fast is deprecated, please use tf.strings.to_hash_bucket_fast instead."
+ deprecated: true
}
}
diff --git a/tensorflow/core/api_def/python_api/api_def_StringToHashBucketStrong.pbtxt b/tensorflow/core/api_def/python_api/api_def_StringToHashBucketStrong.pbtxt
index e8c7c12608..385b9fd02a 100644
--- a/tensorflow/core/api_def/python_api/api_def_StringToHashBucketStrong.pbtxt
+++ b/tensorflow/core/api_def/python_api/api_def_StringToHashBucketStrong.pbtxt
@@ -5,6 +5,6 @@ op {
}
endpoint {
name: "string_to_hash_bucket_strong"
- deprecation_message: "tf.string_to_hash_bucket_strong is deprecated, please use tf.strings.to_hash_bucket_strong instead."
+ deprecated: true
}
}
diff --git a/tensorflow/core/api_def/python_api/api_def_StringToNumber.pbtxt b/tensorflow/core/api_def/python_api/api_def_StringToNumber.pbtxt
index 9de1ca0b30..f740b9849d 100644
--- a/tensorflow/core/api_def/python_api/api_def_StringToNumber.pbtxt
+++ b/tensorflow/core/api_def/python_api/api_def_StringToNumber.pbtxt
@@ -5,6 +5,6 @@ op {
}
endpoint {
name: "string_to_number"
- deprecation_message: "tf.string_to_number is deprecated, please use tf.strings.to_number instead."
+ deprecated: true
}
}
diff --git a/tensorflow/core/api_def/python_api/api_def_Substr.pbtxt b/tensorflow/core/api_def/python_api/api_def_Substr.pbtxt
index 25d1bb3f51..4778d7927c 100644
--- a/tensorflow/core/api_def/python_api/api_def_Substr.pbtxt
+++ b/tensorflow/core/api_def/python_api/api_def_Substr.pbtxt
@@ -5,6 +5,6 @@ op {
}
endpoint {
name: "substr"
- deprecation_message: "tf.substr is deprecated, please use tf.strings.substr instead."
+ deprecated: true
}
}
diff --git a/tensorflow/core/api_def/python_api/api_def_Tan.pbtxt b/tensorflow/core/api_def/python_api/api_def_Tan.pbtxt
index 8bcf381dd4..ffa92f5580 100644
--- a/tensorflow/core/api_def/python_api/api_def_Tan.pbtxt
+++ b/tensorflow/core/api_def/python_api/api_def_Tan.pbtxt
@@ -5,6 +5,6 @@ op {
}
endpoint {
name: "tan"
- deprecation_message: "tf.tan is deprecated, please use tf.math.tan instead."
+ deprecated: true
}
}
diff --git a/tensorflow/core/api_def/python_api/api_def_Tile.pbtxt b/tensorflow/core/api_def/python_api/api_def_Tile.pbtxt
index 0b9053a529..c34061c941 100644
--- a/tensorflow/core/api_def/python_api/api_def_Tile.pbtxt
+++ b/tensorflow/core/api_def/python_api/api_def_Tile.pbtxt
@@ -5,6 +5,6 @@ op {
}
endpoint {
name: "tile"
- deprecation_message: "tf.tile is deprecated, please use tf.manip.tile instead."
+ deprecated: true
}
}
diff --git a/tensorflow/core/api_def/python_api/api_def_UnsortedSegmentMax.pbtxt b/tensorflow/core/api_def/python_api/api_def_UnsortedSegmentMax.pbtxt
index 1ea59d2e63..cf81843241 100644
--- a/tensorflow/core/api_def/python_api/api_def_UnsortedSegmentMax.pbtxt
+++ b/tensorflow/core/api_def/python_api/api_def_UnsortedSegmentMax.pbtxt
@@ -5,6 +5,6 @@ op {
}
endpoint {
name: "unsorted_segment_max"
- deprecation_message: "tf.unsorted_segment_max is deprecated, please use tf.math.unsorted_segment_max instead."
+ deprecated: true
}
}
diff --git a/tensorflow/core/api_def/python_api/api_def_UnsortedSegmentMin.pbtxt b/tensorflow/core/api_def/python_api/api_def_UnsortedSegmentMin.pbtxt
index 9857def6fe..475361c85a 100644
--- a/tensorflow/core/api_def/python_api/api_def_UnsortedSegmentMin.pbtxt
+++ b/tensorflow/core/api_def/python_api/api_def_UnsortedSegmentMin.pbtxt
@@ -5,6 +5,6 @@ op {
}
endpoint {
name: "unsorted_segment_min"
- deprecation_message: "tf.unsorted_segment_min is deprecated, please use tf.math.unsorted_segment_min instead."
+ deprecated: true
}
}
diff --git a/tensorflow/core/api_def/python_api/api_def_UnsortedSegmentProd.pbtxt b/tensorflow/core/api_def/python_api/api_def_UnsortedSegmentProd.pbtxt
index d9e3f7be69..a9d741bbc3 100644
--- a/tensorflow/core/api_def/python_api/api_def_UnsortedSegmentProd.pbtxt
+++ b/tensorflow/core/api_def/python_api/api_def_UnsortedSegmentProd.pbtxt
@@ -5,6 +5,6 @@ op {
}
endpoint {
name: "unsorted_segment_prod"
- deprecation_message: "tf.unsorted_segment_prod is deprecated, please use tf.math.unsorted_segment_prod instead."
+ deprecated: true
}
}
diff --git a/tensorflow/core/api_def/python_api/api_def_UnsortedSegmentSum.pbtxt b/tensorflow/core/api_def/python_api/api_def_UnsortedSegmentSum.pbtxt
index 0cffd12404..337678dcff 100644
--- a/tensorflow/core/api_def/python_api/api_def_UnsortedSegmentSum.pbtxt
+++ b/tensorflow/core/api_def/python_api/api_def_UnsortedSegmentSum.pbtxt
@@ -5,6 +5,6 @@ op {
}
endpoint {
name: "unsorted_segment_sum"
- deprecation_message: "tf.unsorted_segment_sum is deprecated, please use tf.math.unsorted_segment_sum instead."
+ deprecated: true
}
}
diff --git a/tensorflow/core/api_def/python_api/api_def_WriteFile.pbtxt b/tensorflow/core/api_def/python_api/api_def_WriteFile.pbtxt
index f28a9151ca..1a58ae19e5 100644
--- a/tensorflow/core/api_def/python_api/api_def_WriteFile.pbtxt
+++ b/tensorflow/core/api_def/python_api/api_def_WriteFile.pbtxt
@@ -5,6 +5,6 @@ op {
}
endpoint {
name: "write_file"
- deprecation_message: "tf.write_file is deprecated, please use tf.io.write_file instead."
+ deprecated: true
}
}
diff --git a/tensorflow/core/api_def/python_api/api_def_Zeta.pbtxt b/tensorflow/core/api_def/python_api/api_def_Zeta.pbtxt
index a84ffcdf14..4684a9d624 100644
--- a/tensorflow/core/api_def/python_api/api_def_Zeta.pbtxt
+++ b/tensorflow/core/api_def/python_api/api_def_Zeta.pbtxt
@@ -5,6 +5,6 @@ op {
}
endpoint {
name: "zeta"
- deprecation_message: "tf.zeta is deprecated, please use tf.math.zeta instead."
+ deprecated: true
}
}
diff --git a/tensorflow/core/common_runtime/base_collective_executor.h b/tensorflow/core/common_runtime/base_collective_executor.h
index 462d6b7533..3af9286264 100644
--- a/tensorflow/core/common_runtime/base_collective_executor.h
+++ b/tensorflow/core/common_runtime/base_collective_executor.h
@@ -108,11 +108,11 @@ class BaseCollectiveExecutor : public CollectiveExecutor {
bool peer_is_local, const string& key, Device* to_device,
DeviceContext* to_device_ctx,
const AllocatorAttributes& to_alloc_attr, Tensor* to_tensor,
- const DeviceLocality& client_locality,
+ const DeviceLocality& client_locality, int stream_index,
const StatusCallback& done) override {
- remote_access_->RecvFromPeer(peer_device, peer_task, peer_is_local, key,
- to_device, to_device_ctx, to_alloc_attr,
- to_tensor, client_locality, done);
+ remote_access_->RecvFromPeer(
+ peer_device, peer_task, peer_is_local, key, to_device, to_device_ctx,
+ to_alloc_attr, to_tensor, client_locality, stream_index, done);
}
void PostToPeer(const string& peer_device, const string& peer_task,
diff --git a/tensorflow/core/common_runtime/broadcaster.cc b/tensorflow/core/common_runtime/broadcaster.cc
index 9646a0856e..46142d5923 100644
--- a/tensorflow/core/common_runtime/broadcaster.cc
+++ b/tensorflow/core/common_runtime/broadcaster.cc
@@ -187,7 +187,7 @@ void Broadcaster::RunTree() {
DeviceContext* op_dev_ctx = ctx_->op_device_context();
CollectiveRemoteAccessLocal::MemCpyAsync(
op_dev_ctx, op_dev_ctx, device_, device_, ctx_->input_alloc_attr(0),
- ctx_->output_alloc_attr(0), input, output_,
+ ctx_->output_alloc_attr(0), input, output_, 0 /*steam_index*/,
[this, &mu, &pending_count, &all_done](const Status& s) {
mutex_lock l(mu);
status_.Update(s);
@@ -239,7 +239,7 @@ void Broadcaster::DispatchRecv(int src_rank, Tensor* dst_tensor,
col_params_.task.is_local[src_idx], recv_buf_key,
device_, ctx_->op_device_context(),
ctx_->output_alloc_attr(0), dst_tensor,
- device_locality_, done);
+ device_locality_, 0 /*stream_index*/, done);
}
} // namespace tensorflow
diff --git a/tensorflow/core/common_runtime/broadcaster_test.cc b/tensorflow/core/common_runtime/broadcaster_test.cc
index 959b93d56e..6a163a0db0 100644
--- a/tensorflow/core/common_runtime/broadcaster_test.cc
+++ b/tensorflow/core/common_runtime/broadcaster_test.cc
@@ -161,12 +161,12 @@ class FailTestRMA : public CollectiveRemoteAccessLocal {
bool peer_is_local, const string& key, Device* to_device,
DeviceContext* to_device_ctx,
const AllocatorAttributes& to_alloc_attr, Tensor* to_tensor,
- const DeviceLocality& client_locality,
+ const DeviceLocality& client_locality, int stream_index,
const StatusCallback& done) override {
if (MaybeFail(done)) return;
CollectiveRemoteAccessLocal::RecvFromPeer(
peer_device, peer_task, peer_is_local, key, to_device, to_device_ctx,
- to_alloc_attr, to_tensor, client_locality, done);
+ to_alloc_attr, to_tensor, client_locality, stream_index, done);
}
void PostToPeer(const string& peer_device, const string& peer_task,
diff --git a/tensorflow/core/common_runtime/collective_param_resolver_local.cc b/tensorflow/core/common_runtime/collective_param_resolver_local.cc
index 8b2e0d1e0a..236f999228 100644
--- a/tensorflow/core/common_runtime/collective_param_resolver_local.cc
+++ b/tensorflow/core/common_runtime/collective_param_resolver_local.cc
@@ -18,6 +18,10 @@ limitations under the License.
namespace tensorflow {
+void CollectiveParamResolverLocal::InstanceRec::WaitForOutMu(mutex_lock& lock) {
+ while (!out_mu_available) out_cv.wait(lock);
+}
+
CollectiveParamResolverLocal::CollectiveParamResolverLocal(
const DeviceMgr* dev_mgr, DeviceResolverInterface* dev_resolver,
const string& task_name)
@@ -313,11 +317,14 @@ void SortDevicesAndTasks(CollectiveParams* cp) {
VLOG(1) << "Modified device_names on " << cp;
SetDevPerTask(cp);
}
+} // namespace
// Establish the requested number of subdivision permutations based on the
// ring order implicit in the device order.
-void GenerateSubdivPerms(const string& device, int source_rank,
- CollectiveParams* cp) {
+/*static*/
+void CollectiveParamResolverLocal::GenerateSubdivPerms(const string& device,
+ int source_rank,
+ CollectiveParams* cp) {
// Each subdiv permutation is a ring formed by rotating each
// single-task subsequence of devices by an offset. This makes most
// sense when each task has the same number of devices but we can't
@@ -356,15 +363,27 @@ void GenerateSubdivPerms(const string& device, int source_rank,
std::vector<int>& perm = cp->instance.impl_details.subdiv_permutations[sdi];
CHECK_EQ(perm.size(), 0);
int offset = cp->instance.impl_details.subdiv_offsets[sdi];
- int prior_dev_count = 0;
+ // A negative subdivision offset is interpreted as follows:
+ // 1. Reverse the local device ordering.
+ // 2. Begin the subdivision at abs(offset) in the reversed ordering.
+ bool reverse = false;
+ if (offset < 0) {
+ offset = abs(offset);
+ reverse = true;
+ }
+ int prior_dev_count = 0; // sum over prior worker device counts
for (int ti = 0; ti < cp->group.num_tasks; ++ti) {
for (int di = 0; di < dev_per_task[ti]; ++di) {
- int offset_di = (di + offset) % dev_per_task[ti];
+ int di_offset = (di + offset) % dev_per_task[ti];
+ int offset_di =
+ reverse ? (dev_per_task[ti] - (di_offset + 1)) : di_offset;
+ // Device index in global subdivision permutation.
int permuted_di = prior_dev_count + offset_di;
+ int rank = static_cast<int>(perm.size());
perm.push_back(permuted_di);
- if (cp->instance.device_names[prior_dev_count + di] == device) {
- CHECK_EQ(prior_dev_count + di, cp->default_rank);
- cp->subdiv_rank[sdi] = permuted_di;
+ if (cp->instance.device_names[permuted_di] == device) {
+ CHECK_EQ(permuted_di, cp->default_rank);
+ cp->subdiv_rank[sdi] = rank;
}
}
prior_dev_count += dev_per_task[ti];
@@ -411,8 +430,6 @@ void GenerateSubdivPerms(const string& device, int source_rank,
}
}
-} // namespace
-
void CollectiveParamResolverLocal::CompleteTaskIsLocal(const string& task_name,
CollectiveParams* cp) {
cp->task.is_local.resize(cp->group.group_size, false);
@@ -460,11 +477,24 @@ void CollectiveParamResolverLocal::InitInstanceSharedParams(
// called by a derived class, some of the devices may be non-local and
// GetDeviceLocalitiesAsync will use those fields to launch RPCs.
CompleteTaskIsLocal(task_name_, &ir->shared);
+
+ // Because the callback may execute in a different thread, we release
+ // ir->out_mu here. Before releasing, we mark it as unavailable for other
+ // threads.
+ ir->out_mu_available = false;
+ ir->out_mu.unlock();
std::vector<DeviceLocality>* localities = new std::vector<DeviceLocality>;
dev_resolver_->GetDeviceLocalitiesAsync(
ir->shared.instance, localities,
[this, gr, cp, ir, localities, done](const Status& s)
- EXCLUSIVE_LOCKS_REQUIRED(ir->out_mu) {
+ EXCLUSIVE_LOCK_FUNCTION(ir->out_mu) {
+ // Then we recover the lock in the callback thread that will hold it
+ // through the rest of the call chain. Signal the cv now, any
+ // waiting threads will wake only when out_mu is released later.
+ ir->out_mu.lock();
+ DCHECK(!ir->out_mu_available);
+ ir->out_mu_available = true;
+ ir->out_cv.notify_all();
if (s.ok()) {
CompleteDefaultRanking(gr, cp, ir, *localities);
done(Status::OK());
@@ -512,6 +542,7 @@ void CollectiveParamResolverLocal::CallbackWithStatus(
Status s;
{
mutex_lock l(irec->out_mu);
+ irec->WaitForOutMu(l);
s = irec->status;
}
done(s, irec);
@@ -559,21 +590,29 @@ void CollectiveParamResolverLocal::CallInitInstanceSharedParams(
// static analysis, so we turn off analysis only within this
// function body.
//
- // A lock on ir->out_mu must be held throughout the _bodies_ of the
+ // A lock on ir->out_mu must be held* throughout the _bodies_ of the
// chain of function calls initiated here, each of which calls
// another as its last action, but it will be dropped within the
// callback defined below, which means that the lock can be dropped
// before all the function stack frames pop. The static analysis will
// not allow that.
+ //
+ // *the lock is dropped just before calling GetDeviceLocalitiesAsync, because
+ // there is no guarantee that the thread that executes the callback is the
+ // same as the one that locked ir->out_mu. To prevent other threads from
+ // grabbing ir->out_mu, we mark ir->out_mu_available as false. Hence, in
+ // principle, the lock is held throughout.
ir->out_mu.lock();
+ DCHECK(ir->out_mu_available);
ir->known.resize(cp->group.group_size, false);
InitInstanceSharedParams(
gr, cp, ir,
[this, ir, done](const Status& s) UNLOCK_FUNCTION(ir->out_mu) {
DCHECK(!ir->out_mu.try_lock());
+ DCHECK(ir->out_mu_available);
ir->status.Update(s);
ir->out_mu.unlock();
- // Prepare to invoke any waiters that accumlated during
+ // Prepare to invoke any waiters that accumulated during
// initialization.
std::vector<IRConsumer> init_waiters;
{
@@ -650,6 +689,7 @@ void CollectiveParamResolverLocal::CompleteInstanceFromInitializedIRec(
// Populate the fields common across instance.
{
mutex_lock l(ir->out_mu);
+ ir->WaitForOutMu(l);
// custom operator= does a deep copy.
cp->instance = ir->shared.instance;
}
@@ -665,8 +705,9 @@ void CollectiveParamResolverLocal::CompleteInstanceFromInitializedIRec(
int source_rank;
{
mutex_lock l(irec->out_mu);
+ irec->WaitForOutMu(l);
s = irec->status;
- source_rank = ir->source_rank;
+ source_rank = irec->source_rank;
}
if (s.ok()) {
GenerateSubdivPerms(device, source_rank, cp);
@@ -687,6 +728,7 @@ void CollectiveParamResolverLocal::CompleteInstanceSource(InstanceRec* ir,
std::vector<IRConsumer> ready_waiters;
{
mutex_lock l(ir->out_mu);
+ ir->WaitForOutMu(l);
CHECK_EQ(cp->group.group_size, ir->known.size());
CHECK_GE(cp->default_rank, 0);
if (!ir->known[cp->default_rank]) {
diff --git a/tensorflow/core/common_runtime/collective_param_resolver_local.h b/tensorflow/core/common_runtime/collective_param_resolver_local.h
index 43c404f2ec..01bdeca7d1 100644
--- a/tensorflow/core/common_runtime/collective_param_resolver_local.h
+++ b/tensorflow/core/common_runtime/collective_param_resolver_local.h
@@ -88,7 +88,7 @@ class CollectiveParamResolverLocal : public ParamResolverInterface {
// permit mutex locks to be taken in more than one order.
//
// out_mu guards access to most of the fields.
- // in_mu guards access to a queue of comsumer callbacks wanting to
+ // in_mu guards access to a queue of consumer callbacks wanting to
// read the fields guarded by out_mu.
//
// The in_mu should be locked only while holding instance_mu_; the
@@ -109,8 +109,12 @@ class CollectiveParamResolverLocal : public ParamResolverInterface {
bool is_init GUARDED_BY(in_mu);
std::vector<IRConsumer> init_waiters GUARDED_BY(in_mu);
- // Values to be shared by all instances, constant after initialization.
+ // A thread that wishes to acquire out_mu must ensure that it is available
+ // by invoking WaitForOutMu().
mutex out_mu;
+ condition_variable out_cv;
+ bool out_mu_available GUARDED_BY(out_mu);
+ // Values to be shared by all instances, constant after initialization.
CollectiveParams shared GUARDED_BY(out_mu);
// If an error occurs during initialization this structure stays in
// the table with a non-OK status. Purging the table and restarting
@@ -124,7 +128,15 @@ class CollectiveParamResolverLocal : public ParamResolverInterface {
std::vector<bool> known GUARDED_BY(out_mu);
std::vector<IRConsumer> known_waiters GUARDED_BY(out_mu);
- InstanceRec() : is_init(false), source_rank(-1), known_count(0) {}
+ InstanceRec()
+ : is_init(false),
+ out_mu_available(true),
+ source_rank(-1),
+ known_count(0) {}
+
+ // If out_mu is unavailable during distributed device locality
+ // initialization, wait on out_cv until it is available again.
+ void WaitForOutMu(mutex_lock& lock) EXCLUSIVE_LOCKS_REQUIRED(out_mu);
};
// Find the InstanceRec with the same instance_key as cp. If it doesn't
@@ -147,7 +159,7 @@ class CollectiveParamResolverLocal : public ParamResolverInterface {
// cp is populated with all DeviceLocalities
void InitInstanceSharedParams(const GroupRec* gr, const CollectiveParams* cp,
InstanceRec* ir, const StatusCallback& done)
- EXCLUSIVE_LOCKS_REQUIRED(ir->out_mu) LOCKS_EXCLUDED(gr->mu);
+ UNLOCK_FUNCTION(ir->out_mu) LOCKS_EXCLUDED(gr->mu);
void CallInitInstanceSharedParams(const GroupRec* gr,
const CollectiveParams* cp, InstanceRec* ir,
@@ -200,6 +212,10 @@ class CollectiveParamResolverLocal : public ParamResolverInterface {
void CallbackWithStatus(const InstanceRecCallback& done, InstanceRec* irec)
LOCKS_EXCLUDED(irec->out_mu);
+ friend class CollectiveParamResolverLocalTest;
+ static void GenerateSubdivPerms(const string& device, int source_rank,
+ CollectiveParams* cp);
+
const DeviceMgr* dev_mgr_;
DeviceResolverInterface* dev_resolver_; // Not owned.
string task_name_;
diff --git a/tensorflow/core/common_runtime/collective_param_resolver_local_test.cc b/tensorflow/core/common_runtime/collective_param_resolver_local_test.cc
index 4e33c4779a..d5be8f927e 100644
--- a/tensorflow/core/common_runtime/collective_param_resolver_local_test.cc
+++ b/tensorflow/core/common_runtime/collective_param_resolver_local_test.cc
@@ -26,7 +26,6 @@ limitations under the License.
#include "tensorflow/core/public/session_options.h"
namespace tensorflow {
-namespace {
#define NUM_DEVS 3
@@ -45,6 +44,11 @@ class CollectiveParamResolverLocalTest : public ::testing::Test {
task_name));
}
+ void GenSubdivPerms(const string& device, int source_rank,
+ CollectiveParams* cp) {
+ CollectiveParamResolverLocal::GenerateSubdivPerms(device, source_rank, cp);
+ }
+
std::vector<Device*> devices_;
std::unique_ptr<DeviceMgr> device_mgr_;
std::unique_ptr<DeviceResolverLocal> drl_;
@@ -147,7 +151,69 @@ TEST_F(CollectiveParamResolverLocalTest, CompleteParamsBroadcast1Task) {
}
}
-// TEST_F(CollectiveParamResolverLocalTest,
+TEST_F(CollectiveParamResolverLocalTest, GenerateSubdivPerms) {
+ static const int kNumDevsPerTask = 8;
+ static const int kNumTasks = 3;
+ static const int kNumDevs = kNumDevsPerTask * kNumTasks;
+ CollectiveParams cp;
+ std::vector<string> device_names;
+ std::vector<string> task_names;
+ cp.group.group_key = 1;
+ cp.group.group_size = kNumDevs;
+ cp.group.device_type = DeviceType("GPU");
+ cp.group.num_tasks = kNumTasks;
+ cp.instance.instance_key = 3;
+ cp.instance.type = REDUCTION_COLLECTIVE;
+ cp.instance.data_type = DataType(DT_FLOAT);
+ cp.instance.shape = TensorShape({5});
+ cp.instance.impl_details.subdiv_offsets.push_back(0);
+ cp.is_source = false;
+ for (int i = 0; i < kNumDevs; ++i) {
+ int task_id = i / kNumDevsPerTask;
+ int dev_id = i % kNumDevsPerTask;
+ string task_name = strings::StrCat("/job:worker/replica:0/task:", task_id);
+ task_names.push_back(task_name);
+ string device_name = strings::StrCat(task_name, "/device:GPU:", dev_id);
+ device_names.push_back(device_name);
+ cp.instance.task_names.push_back(task_name);
+ cp.instance.device_names.push_back(device_name);
+ }
+
+ int test_rank = 0;
+ cp.default_rank = test_rank;
+ cp.instance.impl_details.subdiv_offsets = {0, 4};
+ GenSubdivPerms(cp.instance.device_names[test_rank], 0, &cp);
+ std::vector<int> expected_0 = {0, 1, 2, 3, 4, 5, 6, 7,
+ 8, 9, 10, 11, 12, 13, 14, 15,
+ 16, 17, 18, 19, 20, 21, 22, 23};
+ std::vector<int> expected_1 = {4, 5, 6, 7, 0, 1, 2, 3, 12, 13, 14, 15,
+ 8, 9, 10, 11, 20, 21, 22, 23, 16, 17, 18, 19};
+ for (int i = 0; i < kNumDevs; ++i) {
+ EXPECT_EQ(expected_0[i],
+ cp.instance.impl_details.subdiv_permutations[0][i]);
+ EXPECT_EQ(expected_1[i],
+ cp.instance.impl_details.subdiv_permutations[1][i]);
+ }
+ EXPECT_EQ(0, cp.subdiv_rank[0]);
+ EXPECT_EQ(4, cp.subdiv_rank[1]);
+
+ test_rank = 3;
+ cp.default_rank = test_rank;
+ cp.instance.impl_details.subdiv_offsets = {3, -3};
+ cp.instance.impl_details.subdiv_permutations.clear();
+ GenSubdivPerms(cp.instance.device_names[test_rank], 0, &cp);
+ expected_0 = {3, 4, 5, 6, 7, 0, 1, 2, 11, 12, 13, 14,
+ 15, 8, 9, 10, 19, 20, 21, 22, 23, 16, 17, 18};
+ expected_1 = {4, 3, 2, 1, 0, 7, 6, 5, 12, 11, 10, 9,
+ 8, 15, 14, 13, 20, 19, 18, 17, 16, 23, 22, 21};
+ for (int i = 0; i < kNumDevs; ++i) {
+ EXPECT_EQ(expected_0[i],
+ cp.instance.impl_details.subdiv_permutations[0][i]);
+ EXPECT_EQ(expected_1[i],
+ cp.instance.impl_details.subdiv_permutations[1][i]);
+ }
+ EXPECT_EQ(0, cp.subdiv_rank[0]);
+ EXPECT_EQ(1, cp.subdiv_rank[1]);
+}
-} // namespace
} // namespace tensorflow
diff --git a/tensorflow/core/common_runtime/collective_rma_local.cc b/tensorflow/core/common_runtime/collective_rma_local.cc
index 69f1a9f24c..288ae9d794 100644
--- a/tensorflow/core/common_runtime/collective_rma_local.cc
+++ b/tensorflow/core/common_runtime/collective_rma_local.cc
@@ -27,7 +27,8 @@ void CollectiveRemoteAccessLocal::RecvFromPeer(
const string& peer_device, const string& peer_task, bool peer_is_local,
const string& key, Device* to_device, DeviceContext* to_device_ctx,
const AllocatorAttributes& to_alloc_attr, Tensor* to_tensor,
- const DeviceLocality& client_locality, const StatusCallback& done) {
+ const DeviceLocality& client_locality, int dev_to_dev_stream_index,
+ const StatusCallback& done) {
VLOG(1) << "RecvFromPeer " << this << " from " << peer_device << " key "
<< key;
if (!peer_is_local) {
@@ -37,8 +38,9 @@ void CollectiveRemoteAccessLocal::RecvFromPeer(
return;
}
buf_rendezvous_.ConsumeBuf(
- key, [this, to_tensor, to_device_ctx, to_device, to_alloc_attr, done](
- const Status& s, BufRendezvous::Hook* hook) {
+ key, [this, to_tensor, to_device_ctx, to_device, to_alloc_attr,
+ dev_to_dev_stream_index,
+ done](const Status& s, BufRendezvous::Hook* hook) {
if (!s.ok()) {
done(s);
delete hook;
@@ -53,7 +55,7 @@ void CollectiveRemoteAccessLocal::RecvFromPeer(
to_alloc_attr, // dst AllocatorAttributes
hook->prod_value, // src Tensor*
to_tensor, // dst Tensor*
- [hook, done](const Status& s) {
+ dev_to_dev_stream_index, [hook, done](const Status& s) {
// This callback may be executing in the GPUEventMgr
// pool in which case it must be very short duration
// and non-blocking (except e.g. for queue insertion).
@@ -82,7 +84,7 @@ void CollectiveRemoteAccessLocal::MemCpyAsync(
DeviceContext* src_dev_ctx, DeviceContext* dst_dev_ctx, Device* src_dev,
Device* dst_dev, const AllocatorAttributes& src_attr,
const AllocatorAttributes& dst_attr, const Tensor* src, Tensor* dst,
- const StatusCallback& done) {
+ int dev_to_dev_stream_index, const StatusCallback& done) {
// We want a real copy to happen, i.e. the bytes inside of src should be
// transferred to the buffer backing dst. If src and dst are on different
// devices then CopyTensor::ViaDMA will do just that. But if they're both
@@ -115,7 +117,7 @@ void CollectiveRemoteAccessLocal::MemCpyAsync(
if (non_cpu_src || non_cpu_dst) {
CopyTensor::ViaDMA("", // edge name (non-existent)
src_dev_ctx, dst_dev_ctx, src_dev, dst_dev, src_attr,
- dst_attr, src, dst, done);
+ dst_attr, src, dst, dev_to_dev_stream_index, done);
} else {
int64 bytes = src->TotalBytes();
DCHECK_EQ(dst->TotalBytes(), bytes);
diff --git a/tensorflow/core/common_runtime/collective_rma_local.h b/tensorflow/core/common_runtime/collective_rma_local.h
index 716e23bfa1..dbb2e67c7d 100644
--- a/tensorflow/core/common_runtime/collective_rma_local.h
+++ b/tensorflow/core/common_runtime/collective_rma_local.h
@@ -41,6 +41,7 @@ class CollectiveRemoteAccessLocal : public PerStepCollectiveRemoteAccess {
DeviceContext* to_device_ctx,
const AllocatorAttributes& to_alloc_attr, Tensor* to_tensor,
const DeviceLocality& client_locality,
+ int dev_to_dev_stream_index,
const StatusCallback& done) override;
void PostToPeer(const string& peer_device, const string& peer_task,
@@ -77,6 +78,7 @@ class CollectiveRemoteAccessLocal : public PerStepCollectiveRemoteAccess {
Device* dst_dev, const AllocatorAttributes& src_attr,
const AllocatorAttributes& dst_attr,
const Tensor* src, Tensor* dst,
+ int dev_to_dev_stream_index,
const StatusCallback& done);
protected:
diff --git a/tensorflow/core/common_runtime/collective_rma_local_test.cc b/tensorflow/core/common_runtime/collective_rma_local_test.cc
index dcd4272d96..a931fe64bd 100644
--- a/tensorflow/core/common_runtime/collective_rma_local_test.cc
+++ b/tensorflow/core/common_runtime/collective_rma_local_test.cc
@@ -69,6 +69,7 @@ TEST_F(CollectiveRemoteAccessLocalTest, PostRecvCPU0) {
rma_->RecvFromPeer(kTaskName + "/device:CPU:0", kTaskName, true /*is_local*/,
"key_0", cpu0 /*to_device*/, nullptr /*to_device_ctx*/,
attr /*to_alloc_attr*/, &sink_tensor, dev_locality,
+ 0 /*stream_index*/,
[this, &recv_note, &recv_status](const Status& s) {
recv_status = s;
recv_note.Notify();
@@ -111,6 +112,7 @@ TEST_F(CollectiveRemoteAccessLocalTest, PostRecvCPU1_2) {
rma_->RecvFromPeer(kTaskName + "/device:CPU:1", kTaskName, true /*is_local*/,
"key_0", cpu2 /*to_device*/, nullptr /*to_device_ctx*/,
attr /*to_alloc_attr*/, &sink_tensor, dev_locality,
+ 0 /*stream_index*/,
[this, &recv_note, &recv_status](const Status& s) {
recv_status = s;
recv_note.Notify();
diff --git a/tensorflow/core/common_runtime/copy_tensor.cc b/tensorflow/core/common_runtime/copy_tensor.cc
index 08d120c7a5..630b3702c8 100644
--- a/tensorflow/core/common_runtime/copy_tensor.cc
+++ b/tensorflow/core/common_runtime/copy_tensor.cc
@@ -170,7 +170,7 @@ void CopyDeviceToDevice(CopyTensor::CopyFunction copy_function,
Device* dst, const AllocatorAttributes src_alloc_attr,
const AllocatorAttributes dst_alloc_attr,
const Tensor* input, Tensor* output,
- StatusCallback done) {
+ int dev_to_dev_stream_index, StatusCallback done) {
if (input->dtype() == DT_VARIANT) {
Tensor copy(cpu_allocator, DT_VARIANT, input->shape());
auto* status_cb = new ReffedStatusCallback(std::move(done));
@@ -182,10 +182,10 @@ void CopyDeviceToDevice(CopyTensor::CopyFunction copy_function,
};
auto copier = std::bind(
[copy_function, src, dst, src_alloc_attr, dst_alloc_attr,
- recv_dev_context, send_dev_context, out_allocator,
- status_cb](StatusCallback wrapped_done_,
- // Begin unbound arguments
- const Tensor& from, Tensor* to) {
+ recv_dev_context, send_dev_context, out_allocator, status_cb,
+ dev_to_dev_stream_index](StatusCallback wrapped_done_,
+ // Begin unbound arguments
+ const Tensor& from, Tensor* to) {
if (!DMAHelper::CanUseDMA(&from)) {
Status err = errors::InvalidArgument(
"During Variant Device->Device Copy: "
@@ -199,7 +199,7 @@ void CopyDeviceToDevice(CopyTensor::CopyFunction copy_function,
*to = Tensor(out_allocator, from.dtype(), from.shape());
copy_function(send_dev_context, recv_dev_context, src, dst,
src_alloc_attr, dst_alloc_attr, &from, to,
- std::move(wrapped_done_));
+ dev_to_dev_stream_index, std::move(wrapped_done_));
return Status::OK();
} else {
return status_cb->status();
@@ -224,7 +224,8 @@ void CopyDeviceToDevice(CopyTensor::CopyFunction copy_function,
}
} else {
copy_function(send_dev_context, recv_dev_context, src, dst, src_alloc_attr,
- dst_alloc_attr, input, output, std::move(done));
+ dst_alloc_attr, input, output, dev_to_dev_stream_index,
+ std::move(done));
}
}
@@ -236,7 +237,7 @@ void CopyTensor::ViaDMA(StringPiece edge_name, DeviceContext* send_dev_context,
Device* dst, const AllocatorAttributes src_alloc_attr,
const AllocatorAttributes dst_alloc_attr,
const Tensor* input, Tensor* output,
- StatusCallback done) {
+ int dev_to_dev_stream_index, StatusCallback done) {
tracing::ScopedAnnotation annotation(edge_name);
VLOG(1) << "Copy " << edge_name;
@@ -266,7 +267,7 @@ void CopyTensor::ViaDMA(StringPiece edge_name, DeviceContext* send_dev_context,
CopyDeviceToDevice(ri.copy_function, cpu_allocator, out_allocator,
send_dev_context, recv_dev_context, src, dst,
src_alloc_attr, dst_alloc_attr, input, output,
- std::move(done));
+ dev_to_dev_stream_index, std::move(done));
return;
}
}
diff --git a/tensorflow/core/common_runtime/copy_tensor.h b/tensorflow/core/common_runtime/copy_tensor.h
index a9d684bf11..9cd5ac2a37 100644
--- a/tensorflow/core/common_runtime/copy_tensor.h
+++ b/tensorflow/core/common_runtime/copy_tensor.h
@@ -28,13 +28,11 @@ namespace tensorflow {
class CopyTensor {
public:
- typedef void (*CopyFunction)(DeviceContext* send_dev_context,
- DeviceContext* recv_dev_context, Device* src,
- Device* dst,
- const AllocatorAttributes src_alloc_attr,
- const AllocatorAttributes dst_alloc_attr,
- const Tensor* input, Tensor* output,
- StatusCallback done);
+ typedef void (*CopyFunction)(
+ DeviceContext* send_dev_context, DeviceContext* recv_dev_context,
+ Device* src, Device* dst, const AllocatorAttributes src_alloc_attr,
+ const AllocatorAttributes dst_alloc_attr, const Tensor* input,
+ Tensor* output, int dev_to_dev_stream_index, StatusCallback done);
// Copies "input" to "output" between devices accessible to the
// local process via some DMA-like method. "edge_name" is the name
@@ -46,7 +44,8 @@ class CopyTensor {
DeviceContext* recv_dev_context, Device* src, Device* dst,
const AllocatorAttributes src_alloc_attr,
const AllocatorAttributes dst_alloc_attr,
- const Tensor* input, Tensor* output, StatusCallback done);
+ const Tensor* input, Tensor* output,
+ int dev_to_dev_stream_index, StatusCallback done);
// Object used to call Register() at static-initialization time.
// Note: This should only ever be used as a global-static object; no stack
diff --git a/tensorflow/core/common_runtime/direct_session.cc b/tensorflow/core/common_runtime/direct_session.cc
index 87ba609dd7..1732553abd 100644
--- a/tensorflow/core/common_runtime/direct_session.cc
+++ b/tensorflow/core/common_runtime/direct_session.cc
@@ -1188,12 +1188,11 @@ Status DirectSession::CreateExecutors(
delete kernel;
}
};
- params.node_outputs_cb = node_outputs_callback_;
optimizer.Optimize(lib, options_.env, device, &iter->second,
/*shape_map=*/nullptr);
- // EXPERIMENTAL: tfdbg inserts debug nodes in the graph.
+ // TensorFlow Debugger (tfdbg) inserts debug nodes in the graph.
const DebugOptions& debug_options =
options.callable_options.run_options().debug_options();
if (!debug_options.debug_tensor_watch_opts().empty()) {
@@ -1626,15 +1625,6 @@ Status DirectSession::MakeCallable(const CallableOptions& callable_options,
TF_RETURN_IF_ERROR(CheckNotClosed());
TF_RETURN_IF_ERROR(CheckGraphCreated("MakeCallable()"));
- if (!callable_options.run_options()
- .debug_options()
- .debug_tensor_watch_opts()
- .empty()) {
- return errors::Unimplemented(
- "Debug options are not currently supported via the C++ MakeCallable "
- "interface.");
- }
-
std::unique_ptr<ExecutorsAndKeys> ek;
std::unique_ptr<FunctionInfo> func_info;
RunStateArgs run_state_args(callable_options.run_options().debug_options());
diff --git a/tensorflow/core/common_runtime/direct_session_test.cc b/tensorflow/core/common_runtime/direct_session_test.cc
index 8ddc9958b2..142d613129 100644
--- a/tensorflow/core/common_runtime/direct_session_test.cc
+++ b/tensorflow/core/common_runtime/direct_session_test.cc
@@ -40,6 +40,7 @@ limitations under the License.
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/core/threadpool.h"
#include "tensorflow/core/lib/strings/str_util.h"
+#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
#include "tensorflow/core/protobuf/rewriter_config.pb.h"
@@ -47,6 +48,11 @@ limitations under the License.
#include "tensorflow/core/public/session_options.h"
#include "tensorflow/core/util/device_name_utils.h"
+#ifdef GOOGLE_CUDA
+#include "cuda/include/cuda.h"
+#include "cuda/include/cuda_runtime_api.h"
+#endif // GOOGLE_CUDA
+
namespace tensorflow {
namespace {
@@ -1233,36 +1239,23 @@ TEST(DirectSessionTest, TimeoutSession) {
device: '/device:CPU:0'
attr {
key: 'capacity'
- value {
- i: 10
- }
+ value { i: 10 }
}
attr {
key: 'component_types'
- value {
- list {
- type: DT_FLOAT
- }
- }
+ value { list { type: DT_FLOAT } }
}
attr {
key: 'container'
- value {
- s: ''
- }
+ value { s: '' }
}
attr {
key: 'shapes'
- value {
- list {
- }
- }
+ value { list {} }
}
attr {
key: 'shared_name'
- value {
- s: ''
- }
+ value { s: '' }
}
}
node {
@@ -1272,24 +1265,15 @@ TEST(DirectSessionTest, TimeoutSession) {
device: '/device:CPU:0'
attr {
key: 'component_types'
- value {
- list {
- type: DT_FLOAT
- }
- }
+ value { list { type: DT_FLOAT } }
}
attr {
key: 'timeout_ms'
- value {
- i: -1
- }
+ value { i: -1 }
}
}
- versions {
- producer: 9
- }
- )proto",
- &graph);
+ versions { producer: 9 }
+ )proto", &graph);
{
// Creates a session with operation_timeout_in_ms set to 100 milliseconds.
@@ -1352,11 +1336,8 @@ TEST(DirectSessionTest, TestTimeoutCleanShutdown) {
op: 'CancellationMgrPollingOp'
device: '/device:CPU:0'
}
- versions {
- producer: 9
- }
- )proto",
- &graph);
+ versions { producer: 9 }
+ )proto", &graph);
// Creates a session with operation_timeout_in_ms set to 100 milliseconds.
SessionOptions options;
@@ -1730,6 +1711,292 @@ TEST(DirectSessionTest, LocalDeviceManager) {
EXPECT_GT(mgr->ListDevices().size(), 0);
}
+// y = tf.square(x)
+GraphDef CreateGraphForYEqualsXSquared() {
+ GraphDef graph_def;
+ const char* text_proto = R"EOF(
+node {
+ name: "x"
+ op: "Placeholder"
+ attr { key: "dtype" value { type: DT_FLOAT } }
+ attr { key: "shape" value { shape { unknown_rank: true } } }
+}
+node {
+ name: "y"
+ op: "Square"
+ input: "x"
+ attr { key: "T" value { type: DT_FLOAT } }
+}
+versions {
+ producer: 26
+}
+ )EOF";
+
+ QCHECK(protobuf::TextFormat::ParseFromString(text_proto, &graph_def));
+ return graph_def;
+}
+
+// A graph that consumes and produces string tensors
+// (which are not GPU-compatible, i.e., there are no
+// GPU kernels for these operations).
+bool IsCUDATensor(const Tensor& t) {
+#ifdef GOOGLE_CUDA
+ cudaPointerAttributes attributes;
+ cudaError_t err =
+ cudaPointerGetAttributes(&attributes, t.tensor_data().data());
+ if (err == cudaErrorInvalidValue) return false;
+ CHECK_EQ(cudaSuccess, err) << cudaGetErrorString(err);
+ return (attributes.memoryType == cudaMemoryTypeDevice);
+#else
+ return false;
+#endif
+}
+
+string GPUDeviceName(Session* session) {
+ std::vector<DeviceAttributes> devices;
+ TF_CHECK_OK(session->ListDevices(&devices));
+ for (const DeviceAttributes& d : devices) {
+ if (d.device_type() == "GPU" || d.device_type() == "gpu") {
+ return d.name();
+ }
+ }
+ return "";
+}
+
+TEST(DirectSessionTest, FeedAndFetchTensorsInDeviceMemory) {
+ std::unique_ptr<Session> session(NewSession(SessionOptions()));
+ const string gpu_device_name = GPUDeviceName(session.get());
+ if (gpu_device_name.empty()) {
+ LOG(INFO) << "Skipping test since no GPU is available";
+ return;
+ }
+
+ TF_ASSERT_OK(session->Create(CreateGraphForYEqualsXSquared()));
+
+ CallableOptions opts;
+ opts.add_feed("x:0");
+ opts.add_fetch("y:0");
+
+ Tensor gpu_tensor;
+
+ {
+ Session::CallableHandle feed_cpu_fetch_gpu;
+ opts.mutable_fetch_devices()->insert({"y:0", gpu_device_name});
+ opts.set_fetch_skip_sync(true);
+ TF_ASSERT_OK(session->MakeCallable(opts, &feed_cpu_fetch_gpu));
+ Tensor input(DT_FLOAT, {});
+ input.scalar<float>()() = 2.0f;
+ std::vector<Tensor> outputs;
+ TF_ASSERT_OK(
+ session->RunCallable(feed_cpu_fetch_gpu, {input}, &outputs, nullptr));
+ TF_ASSERT_OK(session->ReleaseCallable(feed_cpu_fetch_gpu));
+ ASSERT_EQ(1, outputs.size());
+ gpu_tensor = outputs[0];
+ ASSERT_TRUE(IsCUDATensor(gpu_tensor));
+ }
+
+ {
+ Session::CallableHandle feed_gpu_fetch_cpu;
+ opts.clear_fetch_devices();
+ opts.mutable_feed_devices()->insert({"x:0", gpu_device_name});
+ TF_ASSERT_OK(session->MakeCallable(opts, &feed_gpu_fetch_cpu));
+ std::vector<Tensor> outputs;
+ TF_ASSERT_OK(session->RunCallable(feed_gpu_fetch_cpu, {gpu_tensor},
+ &outputs, nullptr));
+ TF_ASSERT_OK(session->ReleaseCallable(feed_gpu_fetch_cpu));
+ ASSERT_EQ(1, outputs.size());
+ // The output is in CPU/host memory, so it can be dereferenced.
+ ASSERT_EQ(16.0, outputs[0].scalar<float>()());
+ }
+}
+
+GraphDef CreateIdentityGraphDef(DataType dtype) {
+ GraphDef def;
+
+ AttrValue dtype_attr;
+ dtype_attr.set_type(dtype);
+
+ AttrValue shape_attr;
+ shape_attr.mutable_shape()->set_unknown_rank(true);
+
+ auto* placeholder = def.add_node();
+ placeholder->set_name("x");
+ placeholder->set_op("Placeholder");
+ placeholder->mutable_attr()->insert({"dtype", dtype_attr});
+ placeholder->mutable_attr()->insert({"shape", shape_attr});
+
+ auto* identity = def.add_node();
+ identity->set_name("y");
+ identity->set_op("Identity");
+ identity->add_input("x");
+ identity->mutable_attr()->insert({"T", dtype_attr});
+
+ return def;
+}
+
+void TestFeedAndFetchTensorsInDeviceMemory(
+ const SessionOptions& session_options, DataType dtype) {
+ std::unique_ptr<Session> session(NewSession(session_options));
+ const string gpu_device_name = GPUDeviceName(session.get());
+ if (gpu_device_name.empty()) {
+ LOG(INFO) << "Skipping test since no GPU is available";
+ return;
+ }
+
+ TF_ASSERT_OK(session->Create(CreateIdentityGraphDef(dtype)))
+ << DataType_Name(dtype);
+
+ CallableOptions opts;
+ opts.add_feed("x:0");
+ opts.add_fetch("y:0");
+
+ Tensor gpu_tensor;
+ Tensor host_tensor(dtype, {3});
+ {
+ // Ask for the fetched tensor to be backed by device memory.
+ // Even though the kernel that created the tensor produced it in host
+ // memory.
+ opts.mutable_fetch_devices()->insert({"y:0", gpu_device_name});
+ opts.set_fetch_skip_sync(true);
+ Session::CallableHandle handle;
+ TF_ASSERT_OK(session->MakeCallable(opts, &handle)) << DataType_Name(dtype);
+ std::vector<Tensor> outputs;
+ TF_ASSERT_OK(session->RunCallable(handle, {host_tensor}, &outputs, nullptr))
+ << DataType_Name(dtype);
+ TF_ASSERT_OK(session->ReleaseCallable(handle)) << DataType_Name(dtype);
+ ASSERT_EQ(1, outputs.size()) << DataType_Name(dtype);
+ gpu_tensor = outputs[0];
+ ASSERT_TRUE(IsCUDATensor(gpu_tensor)) << DataType_Name(dtype);
+ }
+
+ {
+ // Feed a tensor backed by device memory, even though the operations in the
+ // graph expect it in host memory.
+ opts.clear_fetch_devices();
+ opts.mutable_feed_devices()->insert({"x:0", gpu_device_name});
+ Session::CallableHandle handle;
+ TF_ASSERT_OK(session->MakeCallable(opts, &handle)) << DataType_Name(dtype);
+ std::vector<Tensor> outputs;
+ TF_ASSERT_OK(session->RunCallable(handle, {gpu_tensor}, &outputs, nullptr))
+ << DataType_Name(dtype);
+ TF_ASSERT_OK(session->ReleaseCallable(handle)) << DataType_Name(dtype);
+ ASSERT_EQ(1, outputs.size());
+ const StringPiece actual_data = outputs[0].tensor_data();
+ const StringPiece expected_data = host_tensor.tensor_data();
+ EXPECT_EQ(expected_data.size(), actual_data.size()) << DataType_Name(dtype);
+ EXPECT_EQ(0, memcmp(expected_data.data(), actual_data.data(),
+ std::min(expected_data.size(), actual_data.size())))
+ << DataType_Name(dtype);
+ }
+}
+
+void TestFeedAndFetchTensorsInDeviceMemoryFailsToMakeCallable(
+ const SessionOptions& session_options, DataType dtype) {
+ std::unique_ptr<Session> session(NewSession(session_options));
+ const string gpu_device_name = GPUDeviceName(session.get());
+ if (gpu_device_name.empty()) {
+ LOG(INFO) << "Skipping test since no GPU is available";
+ return;
+ }
+
+ TF_ASSERT_OK(session->Create(CreateIdentityGraphDef(dtype)))
+ << DataType_Name(dtype);
+
+ CallableOptions opts;
+ opts.add_feed("x:0");
+ opts.add_fetch("y:0");
+
+ // Fail when asking to fetch into GPU memory.
+ {
+ opts.mutable_fetch_devices()->insert({"y:0", gpu_device_name});
+ opts.set_fetch_skip_sync(true);
+ Session::CallableHandle handle;
+ Status status = session->MakeCallable(opts, &handle);
+ EXPECT_FALSE(status.ok()) << DataType_Name(dtype);
+ EXPECT_TRUE(str_util::StrContains(
+ status.error_message(),
+ strings::StrCat(
+ "Cannot feed or fetch tensor 'y:0' from device ", gpu_device_name,
+ " as feeding/fetching from GPU devices is not yet supported for ",
+ DataTypeString(dtype), " tensors")))
+ << DataType_Name(dtype) << ", Status: " << status;
+ }
+
+ // Fail when feeding from GPU memory.
+ {
+ opts.clear_feed_devices();
+ opts.mutable_feed_devices()->insert({"x:0", gpu_device_name});
+ Session::CallableHandle handle;
+ Status status = session->MakeCallable(opts, &handle);
+ EXPECT_FALSE(status.ok());
+ EXPECT_TRUE(str_util::StrContains(
+ status.error_message(),
+ strings::StrCat(
+ "Cannot feed or fetch tensor 'x:0' from device ", gpu_device_name,
+ " as feeding/fetching from GPU devices is not yet supported for ",
+ DataTypeString(dtype), " tensors")))
+ << DataType_Name(dtype) << ", Status: " << status;
+ }
+}
+
+void TestFeedAndFetchTensorsInDeviceMemoryForAllDataTypes(
+ const SessionOptions& opts) {
+ // Feeding/fetching on device does not work for all DataTypes as it
+ // relies on the implementation of the _Arg and _Retval kernels which
+ // are not registered for some types or consume/produce inputs/outputs
+ // in host memory for some types.
+ //
+ // Run through all datatypes to validate that either:
+ // (a) MakeCallable fails (because the given type cannot be fed/fetched
+ // in device memory),
+ // OR
+ // (b) Succeeds: RunCallable should gladly accept inputs in device memory
+ // and produce output tensors in device memory.
+ for (int i = DataType_MIN; i <= DataType_MAX; ++i) {
+ if (!DataType_IsValid(i)) continue;
+ const DataType dtype = static_cast<DataType>(i);
+ switch (dtype) {
+ case DT_INVALID:
+ break;
+ case DT_BFLOAT16:
+ case DT_BOOL:
+ case DT_COMPLEX128:
+ case DT_COMPLEX64:
+ case DT_DOUBLE:
+ case DT_FLOAT:
+ case DT_HALF:
+ case DT_INT16:
+ case DT_INT64:
+ case DT_INT8:
+ case DT_UINT16:
+ case DT_UINT8:
+ TestFeedAndFetchTensorsInDeviceMemory(opts, dtype);
+ break;
+ default:
+ // Ignore all REF types since Tensors of this type aren't intended to
+ // be fed (and attempting to create one via the Tensor constructor
+ // will result in a LOG(FATAL)).
+ if (!IsRefType(dtype)) {
+ TestFeedAndFetchTensorsInDeviceMemoryFailsToMakeCallable(opts, dtype);
+ }
+ break;
+ }
+ }
+}
+
+TEST(DirectSessionTest, FeedAndFetchTensorsInDeviceMemory_AllDataTypes) {
+ SessionOptions opts;
+ opts.config.set_allow_soft_placement(false);
+ TestFeedAndFetchTensorsInDeviceMemoryForAllDataTypes(opts);
+}
+
+TEST(DirectSessionTest,
+ FeedAndFetchTensorsInDeviceMemory_AllDataTypes_SoftPlacement) {
+ SessionOptions opts;
+ opts.config.set_allow_soft_placement(true);
+ TestFeedAndFetchTensorsInDeviceMemoryForAllDataTypes(opts);
+}
+
// A simple benchmark for the overhead of `DirectSession::Run()` calls
// with varying numbers of feeds/fetches.
void FeedFetchBenchmarkHelper(int iters, int num_feeds,
diff --git a/tensorflow/core/common_runtime/direct_session_with_tracking_alloc_test.cc b/tensorflow/core/common_runtime/direct_session_with_tracking_alloc_test.cc
index b4bf1c408f..0b096a14a3 100644
--- a/tensorflow/core/common_runtime/direct_session_with_tracking_alloc_test.cc
+++ b/tensorflow/core/common_runtime/direct_session_with_tracking_alloc_test.cc
@@ -106,24 +106,24 @@ TEST(DirectSessionWithTrackingAllocTest, CostModelTest) {
EXPECT_EQ(1, shape.dim(1).size());
if (node->name() == y->name()) {
#ifdef INTEL_MKL
- // if MKL is used, it goes through various additional
- // graph rewrite pass. In TF, everytime a graph pass
+ // if MKL is used, it goes through various additional
+ // graph rewrite pass. In TF, everytime a graph pass
// happens, "constant" nodes are allocated
// and deallocated. Each allocation calls the
// (FindChunkPtr of BFCAllocator),
- // which increments the value of AllocationId.
- // Thus AllocationId becomes more than TF if MKL
- // is used. Now IDs for MKL are 8 more than TF.
+ // which increments the value of AllocationId.
+ // Thus AllocationId becomes more than TF if MKL
+ // is used. Now IDs for MKL are 8 more than TF.
EXPECT_EQ(29, cm->AllocationId(node, 0));
#else
EXPECT_EQ(21, cm->AllocationId(node, 0));
-#endif
+#endif
} else {
#ifdef INTEL_MKL
EXPECT_EQ(30, cm->AllocationId(node, 0));
#else
EXPECT_EQ(22, cm->AllocationId(node, 0));
-#endif
+#endif
}
}
EXPECT_LE(0, cm->MaxExecutionTime(node));
diff --git a/tensorflow/core/common_runtime/eager/context.cc b/tensorflow/core/common_runtime/eager/context.cc
index 8a87ba7a19..aaca633cc5 100644
--- a/tensorflow/core/common_runtime/eager/context.cc
+++ b/tensorflow/core/common_runtime/eager/context.cc
@@ -34,8 +34,16 @@ EagerContext::EagerContext(const SessionOptions& opts,
local_device_manager_.get(), opts.env, TF_GRAPH_DEF_VERSION,
&func_lib_def_, {}, thread_pool_.get())),
log_device_placement_(opts.config.log_device_placement()),
- async_default_(async) {
+ async_default_(async),
+ env_(opts.env) {
InitDeviceMapAndAsync();
+ if (opts.config.inter_op_parallelism_threads() > 0) {
+ runner_ = [this](std::function<void()> closure) {
+ this->thread_pool_->Schedule(closure);
+ };
+ } else {
+ runner_ = [](std::function<void()> closure) { closure(); };
+ }
}
#ifndef __ANDROID__
@@ -57,6 +65,7 @@ EagerContext::EagerContext(
log_device_placement_(opts.config.log_device_placement()),
async_default_(async),
remote_device_manager_(std::move(remote_device_manager)),
+ env_(opts.env),
server_(std::move(server)),
remote_eager_workers_(std::move(remote_eager_workers)),
remote_contexts_(remote_contexts) {
@@ -109,6 +118,9 @@ Status EagerContext::SetAsyncForThread(bool async) {
void EagerContext::ClearCaches() {
mutex_lock ml(cache_mu_);
gtl::STLDeleteValues(&kernel_cache_);
+ pflr_.reset(new ProcessFunctionLibraryRuntime(
+ local_device_manager_.get(), env_, TF_GRAPH_DEF_VERSION, &func_lib_def_,
+ {}, thread_pool_.get()));
}
void EagerContext::SetThreadLocalDevicePlacementPolicy(
@@ -193,9 +205,46 @@ Status EagerContext::FindDeviceByName(const string& name, Device** result) {
return Status::OK();
}
+Status EagerContext::MaybeRegisterFunctionRemotely(const FunctionDef& fdef) {
+ if (remote_device_manager_ == nullptr) return Status::OK();
+
+ BlockingCounter blocking_counter(static_cast<int>(remote_contexts_.size()));
+
+ std::vector<eager::RegisterFunctionRequest> requests(remote_contexts_.size());
+ std::vector<eager::RegisterFunctionResponse> responses(
+ remote_contexts_.size());
+ std::vector<Status> statuses(remote_contexts_.size());
+
+ int i = 0;
+ for (const auto& target_and_context_id : remote_contexts_) {
+ requests[i].set_context_id(target_and_context_id.second);
+ *requests[i].mutable_function_def() = fdef;
+
+ auto* eager_client =
+ remote_eager_workers_->GetClient(target_and_context_id.first);
+
+ eager_client->RegisterFunctionAsync(
+ &requests[i], &responses[i],
+ [i, &statuses, &blocking_counter](const Status& status) {
+ statuses[i] = status;
+ blocking_counter.DecrementCount();
+ });
+
+ i++;
+ }
+ blocking_counter.Wait();
+
+ for (int i = 0; i < remote_contexts_.size(); i++) {
+ TF_RETURN_IF_ERROR(statuses[i]);
+ }
+ return Status::OK();
+}
+
Status EagerContext::AddFunctionDef(const FunctionDef& fdef) {
mutex_lock l(functions_mu_);
- return func_lib_def_.AddFunctionDef(fdef);
+ TF_RETURN_IF_ERROR(func_lib_def_.AddFunctionDef(fdef));
+
+ return MaybeRegisterFunctionRemotely(fdef);
}
KernelAndDevice* EagerContext::GetCachedKernel(Fprint128 cache_key) {
diff --git a/tensorflow/core/common_runtime/eager/context.h b/tensorflow/core/common_runtime/eager/context.h
index 601b9e4545..6825c39ef3 100644
--- a/tensorflow/core/common_runtime/eager/context.h
+++ b/tensorflow/core/common_runtime/eager/context.h
@@ -105,6 +105,8 @@ class EagerContext {
EagerExecutor* Executor() { return &executor_; }
+ std::function<void(std::function<void()>)>* runner() { return &runner_; }
+
// Sets whether this thread should run in synchronous or asynchronous mode.
Status SetAsyncForThread(bool async);
@@ -183,6 +185,7 @@ class EagerContext {
#endif
private:
void InitDeviceMapAndAsync();
+ Status MaybeRegisterFunctionRemotely(const FunctionDef& fdef);
const ContextDevicePlacementPolicy policy_;
@@ -208,10 +211,9 @@ class EagerContext {
std::unique_ptr<thread::ThreadPool> thread_pool_;
- // One FunctionLibraryRuntime per device.
- // func_libs[i] is the FunctionLibraryRuntime corresponding to
- // session->devices[i].
- const std::unique_ptr<ProcessFunctionLibraryRuntime> pflr_;
+ std::unique_ptr<ProcessFunctionLibraryRuntime> pflr_;
+
+ std::function<void(std::function<void()>)> runner_;
mutex cache_mu_;
std::unordered_map<Fprint128, KernelAndDevice*, Fprint128Hasher> kernel_cache_
@@ -234,6 +236,8 @@ class EagerContext {
const std::unique_ptr<DeviceMgr> remote_device_manager_;
+ tensorflow::Env* const env_;
+
// The server_ is not const since we release it when the context is destroyed.
// Therefore the server_ object is not marked as const (even though it should
// be).
diff --git a/tensorflow/core/common_runtime/eager/execute.cc b/tensorflow/core/common_runtime/eager/execute.cc
index 14aa520e19..5ea814ed4e 100644
--- a/tensorflow/core/common_runtime/eager/execute.cc
+++ b/tensorflow/core/common_runtime/eager/execute.cc
@@ -36,6 +36,7 @@ limitations under the License.
#include "tensorflow/core/lib/random/random.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/mutex.h"
+#include "tensorflow/core/util/ptr_util.h"
namespace tensorflow {
@@ -127,7 +128,7 @@ Status MaybeCopyInputToExpectedDevice(EagerOperation* op, int i,
// We are only here if the policy is warn or silent copies, so we should
// trigger a copy.
auto pre_time = Env::Default()->NowMicros();
- TensorHandle* result_handle;
+ TensorHandle* result_handle = nullptr;
Status status = EagerCopyToDevice(
*handle, ctx, expected_device->name().c_str(), &result_handle);
if (run_metadata != nullptr) {
@@ -423,7 +424,13 @@ Status GetOutputDTypes(EagerOperation* op, DataTypeVector* output_dtypes) {
const auto& node_def = op->MutableAttrs()->BuildNodeDef();
const OpDef* op_def = nullptr;
- TF_RETURN_IF_ERROR(OpDefForOp(op->Name().c_str(), &op_def));
+ const FunctionDef* function_def =
+ op->EagerContext()->FuncLibDef()->Find(op->Name());
+ if (function_def != nullptr) {
+ op_def = &(function_def->signature());
+ } else {
+ TF_RETURN_IF_ERROR(OpDefForOp(op->Name().c_str(), &op_def));
+ }
TF_RETURN_IF_ERROR(OutputTypesForNode(node_def, *op_def, output_dtypes));
@@ -505,7 +512,8 @@ Status EagerLocalExecute(EagerOperation* op,
// See WARNING comment in Execute (before kernel->Run) - would be nice to
// rework to avoid this subtlety.
tf_shared_lock l(*ctx->FunctionsMu());
- status = KernelAndDevice::Init(ndef, ctx->func_lib(device), kernel);
+ status = KernelAndDevice::Init(ndef, ctx->func_lib(device), ctx->runner(),
+ kernel);
if (!status.ok()) {
delete kernel;
return status;
@@ -606,7 +614,7 @@ Status EagerRemoteExecute(EagerOperation* op, TensorHandle** retvals,
tensorflow::TensorHandle* input = op->Inputs()[i];
- tensorflow::uint64 op_id;
+ tensorflow::int64 op_id;
int32 output_num;
TF_RETURN_IF_ERROR(input->RemoteAddress(&op_id, &output_num));
@@ -623,22 +631,6 @@ Status EagerRemoteExecute(EagerOperation* op, TensorHandle** retvals,
request.set_context_id(context_id);
- if (op->EagerContext()->Async()) {
- tensorflow::uint64 id = op->EagerContext()->NextId();
- auto* node = new eager::RemoteExecuteNode(id, request, eager_client);
- op->EagerContext()->ExecutorAdd(node);
- } else {
- Notification n;
- Status status;
- eager_client->EnqueueAsync(&request, &response,
- [&n, &status](const Status& s) {
- status = s;
- n.Notify();
- });
- n.WaitForNotification();
- if (!status.ok()) return status;
- }
-
DataTypeVector output_dtypes;
TF_RETURN_IF_ERROR(GetOutputDTypes(op, &output_dtypes));
@@ -649,6 +641,13 @@ Status EagerRemoteExecute(EagerOperation* op, TensorHandle** retvals,
tensorflow::Device* op_device = op->Device();
+ bool is_async = op->EagerContext()->Async();
+ uint64 remote_node_id = 0;
+
+ if (is_async) {
+ remote_node_id = op->EagerContext()->NextId();
+ }
+
const tensorflow::uint64 id = remote_op->id();
for (int i = 0; i < *num_retvals; i++) {
// TODO(nareshmodi): Change the callback to instead add the decref to a list
@@ -676,9 +675,52 @@ Status EagerRemoteExecute(EagerOperation* op, TensorHandle** retvals,
return tensorflow::Status::OK();
};
- retvals[i] = new TensorHandle(remote_op->id(), i, output_dtypes[i],
- std::move(callback), op_device, op_device,
- op->EagerContext());
+
+ retvals[i] = new TensorHandle(remote_op->id(), i, remote_node_id,
+ output_dtypes[i], std::move(callback),
+ op_device, op_device, op->EagerContext());
+ }
+
+ if (is_async) {
+ // Copy the output handles, since the container for them might get
+ // destroyed.
+ gtl::InlinedVector<TensorHandle*, 2> retvals_copy;
+ for (int i = 0; i < *num_retvals; i++) {
+ retvals_copy.push_back(retvals[i]);
+ retvals_copy[i]->Ref();
+ }
+ // Unable to capture via std::move, so bind instead.
+ auto* node = new eager::RemoteExecuteNode(
+ remote_node_id, request, eager_client, op->Inputs(),
+ std::bind(
+ [](const gtl::InlinedVector<TensorHandle*, 2>& retvals,
+ const Status& status, const eager::EnqueueResponse& response) {
+ if (!status.ok()) return;
+ for (int i = 0; i < retvals.size(); i++) {
+ retvals[i]->SetRemoteShape(MakeUnique<TensorShape>(
+ response.queue_response(0).shape(i)));
+ retvals[i]->Unref();
+ }
+ },
+ std::move(retvals_copy), std::placeholders::_1,
+ std::placeholders::_2));
+ op->EagerContext()->ExecutorAdd(node);
+ } else {
+ Notification n;
+ Status status;
+ eager_client->EnqueueAsync(&request, &response,
+ [&n, &status](const Status& s) {
+ status = s;
+ n.Notify();
+ });
+ n.WaitForNotification();
+
+ if (!status.ok()) return status;
+
+ for (int i = 0; i < *num_retvals; i++) {
+ retvals[i]->SetRemoteShape(
+ MakeUnique<TensorShape>(response.queue_response(0).shape(i)));
+ }
}
return Status::OK();
diff --git a/tensorflow/core/common_runtime/eager/kernel_and_device.cc b/tensorflow/core/common_runtime/eager/kernel_and_device.cc
index b410ea175b..dae5d1983f 100644
--- a/tensorflow/core/common_runtime/eager/kernel_and_device.cc
+++ b/tensorflow/core/common_runtime/eager/kernel_and_device.cc
@@ -41,17 +41,22 @@ Status KernelAndDevice::InitOp(Device* device, const NodeDef& ndef,
out->device_ = device;
out->kernel_.reset(k);
out->flib_ = nullptr;
+ out->runner_ = nullptr;
+ out->default_runner_ = [](std::function<void()> f) { f(); };
return s;
}
// static
Status KernelAndDevice::Init(const NodeDef& ndef, FunctionLibraryRuntime* flib,
+ std::function<void(std::function<void()>)>* runner,
KernelAndDevice* out) {
OpKernel* k = nullptr;
Status s = flib->CreateKernel(ndef, &k);
out->device_ = flib->device();
out->kernel_.reset(k);
out->flib_ = flib;
+ out->runner_ = runner;
+ out->default_runner_ = [](std::function<void()> f) { f(); };
return s;
}
@@ -83,10 +88,11 @@ Status KernelAndDevice::Run(std::vector<Tensor>* input_tensors,
if (stats != nullptr) {
params.track_allocations = true;
}
- // TODO(apassos): use a thread pool.
- std::function<void(std::function<void()>)> runner =
- [](std::function<void()> f) { f(); };
- params.runner = &runner;
+ if (runner_ == nullptr) {
+ params.runner = &default_runner_;
+ } else {
+ params.runner = runner_;
+ }
ScopedStepContainer step_container(0, [this](const string& name) {
device_->resource_manager()->Cleanup(name).IgnoreError();
diff --git a/tensorflow/core/common_runtime/eager/kernel_and_device.h b/tensorflow/core/common_runtime/eager/kernel_and_device.h
index c41a0972b1..c0b676b285 100644
--- a/tensorflow/core/common_runtime/eager/kernel_and_device.h
+++ b/tensorflow/core/common_runtime/eager/kernel_and_device.h
@@ -57,6 +57,7 @@ class KernelAndDevice {
// the FunctionLibraryRuntime is pushed on to the caller (see locking in
// c_api.cc).
static Status Init(const NodeDef& ndef, FunctionLibraryRuntime* flib,
+ std::function<void(std::function<void()>)>* runner,
KernelAndDevice* out);
// TODO(ashankar): Remove this
static Status InitOp(Device* device, const NodeDef& ndef,
@@ -88,6 +89,8 @@ class KernelAndDevice {
checkpoint::TensorSliceReaderCacheWrapper slice_reader_cache_;
Rendezvous* rendez_;
DataTypeVector output_dtypes_;
+ std::function<void(std::function<void()>)>* runner_;
+ std::function<void(std::function<void()>)> default_runner_;
};
} // namespace tensorflow
diff --git a/tensorflow/core/common_runtime/eager/kernel_and_device_test.cc b/tensorflow/core/common_runtime/eager/kernel_and_device_test.cc
index b4349e1dee..6abe98f53c 100644
--- a/tensorflow/core/common_runtime/eager/kernel_and_device_test.cc
+++ b/tensorflow/core/common_runtime/eager/kernel_and_device_test.cc
@@ -107,8 +107,8 @@ void BM_KernelAndDeviceInit(int iters) {
KernelAndDevice k(nullptr);
tensorflow::testing::StartTiming();
for (int i = 0; i < iters; ++i) {
- TF_CHECK_OK(
- KernelAndDevice::Init(ndef, env.function_library_runtime(), &k));
+ TF_CHECK_OK(KernelAndDevice::Init(ndef, env.function_library_runtime(),
+ nullptr, &k));
}
}
BENCHMARK(BM_KernelAndDeviceInit);
@@ -128,8 +128,8 @@ void BM_KernelAndDeviceRun(int iters) {
.BuildNodeDef());
TestEnv env;
KernelAndDevice kernel(nullptr);
- TF_CHECK_OK(
- KernelAndDevice::Init(ndef, env.function_library_runtime(), &kernel));
+ TF_CHECK_OK(KernelAndDevice::Init(ndef, env.function_library_runtime(),
+ nullptr, &kernel));
tensorflow::testing::StartTiming();
for (int i = 0; i < iters; ++i) {
TF_CHECK_OK(kernel.Run(&inputs, &outputs, nullptr));
diff --git a/tensorflow/core/common_runtime/eager/tensor_handle.cc b/tensorflow/core/common_runtime/eager/tensor_handle.cc
index 1a811aa8df..85b0b79bce 100644
--- a/tensorflow/core/common_runtime/eager/tensor_handle.cc
+++ b/tensorflow/core/common_runtime/eager/tensor_handle.cc
@@ -45,7 +45,7 @@ limitations under the License.
namespace tensorflow {
bool TensorHandle::IsReady() {
- if (node_id == 0) return true;
+ if (node_id_ == 0) return true;
mutex_lock l(ctx_mutex_);
return is_ready_;
}
@@ -54,17 +54,19 @@ bool TensorHandle::IsRemote() {
return remote_op_id_ >= 0 && remote_output_num_ >= 0;
}
-Status TensorHandle::WaitReady() {
+Status TensorHandle::WaitForNode(uint64 node_id, bool return_if_is_ready) {
if (node_id == 0) return Status::OK();
EagerExecutor* executor = nullptr;
{
mutex_lock l(ctx_mutex_);
- if (is_ready_) return Status::OK();
+ if (return_if_is_ready && is_ready_) return Status::OK();
executor = ctx_->Executor();
}
return executor->WaitFor(node_id);
}
+Status TensorHandle::WaitReady() { return WaitForNode(node_id_, true); }
+
Status TensorHandle::Tensor(const tensorflow::Tensor** t) {
if (IsRemote()) {
return errors::Unavailable(
@@ -107,7 +109,51 @@ Status TensorHandle::TensorAndDevice(const tensorflow::Tensor** tensor,
return Status::OK();
}
-Status TensorHandle::RemoteAddress(uint64* op_id, int32* output_num) {
+Status TensorHandle::Shape(tensorflow::TensorShape* shape) {
+ if (IsRemote()) {
+ TF_RETURN_IF_ERROR(WaitForNode(remote_shape_node_id_, false));
+ CHECK(remote_shape_ != nullptr);
+ *shape = *(remote_shape_.get());
+ } else {
+ TF_RETURN_IF_ERROR(WaitReady());
+ DCHECK(IsReady());
+ *shape = tensor_.shape();
+ }
+ return Status::OK();
+}
+
+Status TensorHandle::NumDims(int* num_dims) {
+ if (IsRemote()) {
+ TF_RETURN_IF_ERROR(WaitForNode(remote_shape_node_id_, false));
+ CHECK(remote_shape_ != nullptr);
+ *num_dims = remote_shape_->dims();
+ } else {
+ TF_RETURN_IF_ERROR(WaitReady());
+ DCHECK(IsReady());
+ DCHECK(num_dims != nullptr);
+
+ *num_dims = tensor_.dims();
+ }
+
+ return Status::OK();
+}
+
+Status TensorHandle::Dim(int dim_index, int64* dim) {
+ if (IsRemote()) {
+ TF_RETURN_IF_ERROR(WaitForNode(remote_shape_node_id_, false));
+ *dim = remote_shape_->dim_size(dim_index);
+ } else {
+ TF_RETURN_IF_ERROR(WaitReady());
+ DCHECK(IsReady());
+ DCHECK(dim != nullptr);
+
+ *dim = tensor_.dim_size(dim_index);
+ }
+
+ return Status::OK();
+}
+
+Status TensorHandle::RemoteAddress(int64* op_id, int32* output_num) {
if (!IsRemote()) {
return errors::FailedPrecondition(
"This TensorHandle refers to a local tensor handle");
@@ -122,7 +168,7 @@ void TensorHandle::SetTensorAndDevice(const tensorflow::Tensor& tensor,
tensorflow::Device* device,
tensorflow::Device* op_device) {
mutex_lock l(ctx_mutex_);
- DCHECK(node_id > 0 && !is_ready_)
+ DCHECK(node_id_ > 0 && !is_ready_)
<< "SetTensorAndDevice should be only called "
<< "on non-ready handles.";
is_ready_ = true;
@@ -189,6 +235,7 @@ Status TensorHandle::CopyToDevice(EagerContext* ctx, tensorflow::Device* dstd,
tensorflow::CopyTensor::ViaDMA("copy", src_device_context, dst_device_context,
srcd, dstd, tensorflow::AllocatorAttributes(),
tensorflow::AllocatorAttributes(), src, &dst,
+ 0 /*dev_to_dev_stream_index*/,
[&status, &n](const tensorflow::Status& s) {
status = s;
n.Notify();
diff --git a/tensorflow/core/common_runtime/eager/tensor_handle.h b/tensorflow/core/common_runtime/eager/tensor_handle.h
index a3b7dd862e..1bc9c6531a 100644
--- a/tensorflow/core/common_runtime/eager/tensor_handle.h
+++ b/tensorflow/core/common_runtime/eager/tensor_handle.h
@@ -51,38 +51,41 @@ class TensorHandle : public core::RefCounted {
public:
TensorHandle(const Tensor& t, Device* d, Device* op_device, EagerContext* ctx)
: dtype(t.dtype()),
- node_id(0),
+ node_id_(0),
tensor_(t),
device_(d),
op_device_(op_device),
remote_op_id_(-1),
remote_output_num_(-1),
+ remote_shape_node_id_(-1),
ctx_(ctx),
is_ready_(true) {}
TensorHandle(uint64 node_id, DataType dtype, EagerContext* ctx)
: dtype(dtype),
- node_id(node_id),
+ node_id_(node_id),
tensor_(dtype),
device_(nullptr),
op_device_(nullptr),
remote_op_id_(-1),
remote_output_num_(-1),
+ remote_shape_node_id_(-1),
ctx_(ctx),
is_ready_(ctx == nullptr) {
- DCHECK_GT(node_id, 0);
+ DCHECK_GT(node_id_, 0);
}
// Remote tensor handle constructor.
- TensorHandle(uint64 op_id, int32 output_num, DataType dtype,
- std::function<void()> call_on_destroy, Device* d,
+ TensorHandle(int64 op_id, int32 output_num, uint64 remote_shape_node_id,
+ DataType dtype, std::function<void()> call_on_destroy, Device* d,
Device* op_device, EagerContext* ctx)
: dtype(dtype),
- node_id(0),
+ node_id_(0),
device_(d),
op_device_(op_device),
remote_op_id_(op_id),
remote_output_num_(output_num),
+ remote_shape_node_id_(remote_shape_node_id),
call_on_destroy_(std::move(call_on_destroy)),
ctx_(ctx),
is_ready_(true) {
@@ -106,8 +109,13 @@ class TensorHandle : public core::RefCounted {
tensorflow::Device** device,
tensorflow::Device** op_device);
+ Status Shape(tensorflow::TensorShape* shape);
+
+ Status NumDims(int* num_dims);
+ Status Dim(int dim_index, int64* dim);
+
// Return the op_id and output num if the handle refers to a remote tensor.
- Status RemoteAddress(uint64* op_id, int32* output_num);
+ Status RemoteAddress(int64* op_id, int32* output_num);
// Note that this can be called at most once, and only on non-ready handles,
// and makes them ready.
@@ -128,11 +136,22 @@ class TensorHandle : public core::RefCounted {
// ready.
const DataType dtype;
+ void SetRemoteShape(std::unique_ptr<TensorShape> remote_shape) {
+ remote_shape_ = std::move(remote_shape);
+ }
+
+ bool OnHostCPU() {
+ mutex_lock ml(ctx_mutex_);
+ return device_ == nullptr ||
+ (ctx_ == nullptr || ctx_->HostCPU() == device_);
+ }
+
private:
// If the contents of the Tensor pointed to by this handle is yet to be
// computed by a EagerNode, this function will block till that compuatation is
// done and the handle is "ready".
Status WaitReady();
+ Status WaitForNode(uint64 node_id, bool return_if_is_ready);
bool IsReady();
@@ -140,7 +159,7 @@ class TensorHandle : public core::RefCounted {
// Id for the EagerNode that will compute the value pointed to by this handle.
// If the value is 0, the handle is already ready, but not vice-versa.
- const uint64 node_id;
+ const uint64 node_id_;
tensorflow::Tensor tensor_;
@@ -159,8 +178,10 @@ class TensorHandle : public core::RefCounted {
tensorflow::Device* op_device_;
// IDs required when this class is representing a remote tensor handle.
- const uint64 remote_op_id_;
+ const int64 remote_op_id_;
const int32 remote_output_num_;
+ std::unique_ptr<TensorShape> remote_shape_;
+ const uint64 remote_shape_node_id_;
// A callback that is executed when the class is destroyed.
//
diff --git a/tensorflow/core/common_runtime/executor.cc b/tensorflow/core/common_runtime/executor.cc
index f7f2cdc14f..8096139d90 100644
--- a/tensorflow/core/common_runtime/executor.cc
+++ b/tensorflow/core/common_runtime/executor.cc
@@ -1966,17 +1966,9 @@ Status ExecutorState::ProcessOutputs(const NodeItem& item, OpKernelContext* ctx,
device_context = device_context_map_[node->id()];
}
- // Experimental: debugger (tfdb) access to intermediate node completion.
- if (item.num_outputs == 0 && impl_->params_.node_outputs_cb != nullptr) {
- // If the node has no output, invoke the callback with output slot set to
- // -1, signifying that this is a no-output node.
- s.Update(impl_->params_.node_outputs_cb(item.node->name(), -1, nullptr,
- false, ctx));
- }
-
for (int i = 0; i < item.num_outputs; ++i) {
const TensorValue val = ctx->release_output(i);
- if (*ctx->is_output_dead() || val.tensor == nullptr) {
+ if (val.tensor == nullptr) {
// Unless it's a Switch or a Recv, the node must produce a
// tensor value at i-th output.
if (!IsSwitch(node) && !IsRecv(node)) {
@@ -2018,13 +2010,6 @@ Status ExecutorState::ProcessOutputs(const NodeItem& item, OpKernelContext* ctx,
LogMemory::RecordTensorOutput(ctx->op_kernel().name(),
ctx->step_id(), i, to_log);
}
-
- // Experimental: debugger (tfdb) access to intermediate node
- // outputs.
- if (impl_->params_.node_outputs_cb != nullptr) {
- s.Update(impl_->params_.node_outputs_cb(item.node->name(), i,
- out->ref, true, ctx));
- }
} else {
// NOTE that std::move is used here, so val.tensor goes to
// uninitialized state (val.tensor->IsInitialized return false).
@@ -2036,12 +2021,6 @@ Status ExecutorState::ProcessOutputs(const NodeItem& item, OpKernelContext* ctx,
LogMemory::RecordTensorOutput(ctx->op_kernel().name(),
ctx->step_id(), i, *out->val);
}
-
- // Experimental: debugger access to intermediate node outputs.
- if (impl_->params_.node_outputs_cb != nullptr) {
- s.Update(impl_->params_.node_outputs_cb(
- item.node->name(), i, out->val.get(), false, ctx));
- }
}
} else {
s.Update(errors::Internal("Output ", i, " of type ",
diff --git a/tensorflow/core/common_runtime/executor.h b/tensorflow/core/common_runtime/executor.h
index e5d7b7c53c..cd01b43aea 100644
--- a/tensorflow/core/common_runtime/executor.h
+++ b/tensorflow/core/common_runtime/executor.h
@@ -103,7 +103,6 @@ class Executor {
const Tensor* tensor, const bool is_ref,
OpKernelContext* ctx)>
NodeOutputsCallback;
- NodeOutputsCallback node_outputs_cb = nullptr;
};
typedef std::function<void(const Status&)> DoneCallback;
virtual void RunAsync(const Args& args, DoneCallback done) = 0;
@@ -139,8 +138,6 @@ struct LocalExecutorParams {
// when the executor is deleted.
std::function<Status(const NodeDef&, OpKernel**)> create_kernel;
std::function<void(OpKernel*)> delete_kernel;
-
- Executor::Args::NodeOutputsCallback node_outputs_cb;
};
::tensorflow::Status NewLocalExecutor(const LocalExecutorParams& params,
std::unique_ptr<const Graph> graph,
diff --git a/tensorflow/core/common_runtime/function.cc b/tensorflow/core/common_runtime/function.cc
index 6d8cea8297..a93cfa2ec5 100644
--- a/tensorflow/core/common_runtime/function.cc
+++ b/tensorflow/core/common_runtime/function.cc
@@ -399,12 +399,11 @@ Status FunctionLibraryRuntimeImpl::CreateKernel(
// types.
MemoryTypeVector input_memory_types;
for (const auto& t : fbody->arg_types) {
- input_memory_types.push_back(
- (t == DT_INT32 || t == DT_RESOURCE) ? HOST_MEMORY : DEVICE_MEMORY);
+ input_memory_types.push_back(MTypeFromDType(t));
}
MemoryTypeVector output_memory_types;
for (const auto& t : fbody->ret_types) {
- output_memory_types.push_back(t == DT_INT32 ? HOST_MEMORY : DEVICE_MEMORY);
+ output_memory_types.push_back(MTypeFromDType(t));
}
// Constructs a CallOp kernel for running the instantiated function.
@@ -728,6 +727,25 @@ void FunctionLibraryRuntimeImpl::RunRemote(const Options& opts, Handle handle,
return;
}
+ std::vector<AllocatorAttributes> args_alloc_attrs, rets_alloc_attrs;
+ args_alloc_attrs.reserve(fbody->arg_types.size());
+ rets_alloc_attrs.reserve(fbody->ret_types.size());
+ // Note: Functions assume that int32's are always on host memory.
+ for (const auto& arg_type : fbody->arg_types) {
+ AllocatorAttributes arg_alloc_attrs;
+ if (MTypeFromDType(arg_type) == HOST_MEMORY) {
+ arg_alloc_attrs.set_on_host(true);
+ }
+ args_alloc_attrs.push_back(arg_alloc_attrs);
+ }
+ for (const auto& ret_type : fbody->ret_types) {
+ AllocatorAttributes ret_alloc_attrs;
+ if (MTypeFromDType(ret_type) == HOST_MEMORY) {
+ ret_alloc_attrs.set_on_host(true);
+ }
+ rets_alloc_attrs.push_back(ret_alloc_attrs);
+ }
+
// The ProcFLR sends the arguments to the function from the source_device to
// the target_device. So here we receive those arguments. Similarly, when the
// computation is done and stored in *rets, we send the return values back
@@ -735,10 +753,10 @@ void FunctionLibraryRuntimeImpl::RunRemote(const Options& opts, Handle handle,
std::vector<Tensor>* remote_args = new std::vector<Tensor>;
ProcessFunctionLibraryRuntime::ReceiveTensorsAsync(
source_device, target_device, "arg_", src_incarnation, args.size(),
- device_context, {}, rendezvous, remote_args,
+ device_context, args_alloc_attrs, rendezvous, remote_args,
[frame, remote_args, item, source_device, target_device,
- target_incarnation, rendezvous, device_context, rets, done,
- exec_args](const Status& status) {
+ target_incarnation, rendezvous, device_context, rets, done, exec_args,
+ rets_alloc_attrs](const Status& status) {
Status s = status;
if (s.ok()) {
s = frame->SetArgs(*remote_args);
@@ -751,9 +769,10 @@ void FunctionLibraryRuntimeImpl::RunRemote(const Options& opts, Handle handle,
return;
}
item->exec->RunAsync(
- *exec_args, [frame, rets, done, source_device, target_device,
- target_incarnation, rendezvous, device_context,
- remote_args, exec_args](const Status& status) {
+ *exec_args,
+ [frame, rets, done, source_device, target_device,
+ target_incarnation, rendezvous, device_context, remote_args,
+ exec_args, rets_alloc_attrs](const Status& status) {
Status s = status;
if (s.ok()) {
s = frame->ConsumeRetvals(rets);
@@ -767,7 +786,7 @@ void FunctionLibraryRuntimeImpl::RunRemote(const Options& opts, Handle handle,
}
s = ProcessFunctionLibraryRuntime::SendTensors(
target_device, source_device, "ret_", target_incarnation,
- *rets, device_context, {}, rendezvous);
+ *rets, device_context, rets_alloc_attrs, rendezvous);
delete remote_args;
delete exec_args;
done(s);
diff --git a/tensorflow/core/common_runtime/gpu/cuda_host_allocator.h b/tensorflow/core/common_runtime/gpu/cuda_host_allocator.h
new file mode 100644
index 0000000000..636cd43575
--- /dev/null
+++ b/tensorflow/core/common_runtime/gpu/cuda_host_allocator.h
@@ -0,0 +1,60 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#ifndef TENSORFLOW_CORE_COMMON_RUNTIME_GPU_CUDA_HOST_ALLOCATOR_H_
+#define TENSORFLOW_CORE_COMMON_RUNTIME_GPU_CUDA_HOST_ALLOCATOR_H_
+
+#include "tensorflow/core/framework/allocator.h"
+#include "tensorflow/core/platform/macros.h"
+#include "tensorflow/core/platform/stream_executor.h"
+
+namespace tensorflow {
+// Allocator for pinned CPU RAM that is made known to CUDA for the
+// purpose of efficient DMA with a GPU.
+class CUDAHostAllocator : public SubAllocator {
+ public:
+ // Note: stream_exec cannot be null.
+ explicit CUDAHostAllocator(se::StreamExecutor* stream_exec)
+ : stream_exec_(stream_exec) {
+ CHECK(stream_exec_ != nullptr);
+ }
+ ~CUDAHostAllocator() override {}
+
+ void* Alloc(size_t alignment, size_t num_bytes) override {
+ void* ptr = nullptr;
+ if (num_bytes > 0) {
+ ptr = stream_exec_->HostMemoryAllocate(num_bytes);
+ if (ptr == nullptr) {
+ LOG(WARNING) << "could not allocate pinned host memory of size: "
+ << num_bytes;
+ }
+ }
+ return ptr;
+ }
+
+ void Free(void* ptr, size_t num_bytes) override {
+ if (ptr != nullptr) {
+ stream_exec_->HostMemoryDeallocate(ptr);
+ }
+ }
+
+ private:
+ se::StreamExecutor* stream_exec_; // not owned, non-null
+
+ TF_DISALLOW_COPY_AND_ASSIGN(CUDAHostAllocator);
+};
+
+} // namespace tensorflow
+#endif // TENSORFLOW_CORE_COMMON_RUNTIME_GPU_CUDA_HOST_ALLOCATOR_H_
diff --git a/tensorflow/core/common_runtime/gpu/gpu_device.cc b/tensorflow/core/common_runtime/gpu/gpu_device.cc
index bee5627636..3cb51b0dbc 100644
--- a/tensorflow/core/common_runtime/gpu/gpu_device.cc
+++ b/tensorflow/core/common_runtime/gpu/gpu_device.cc
@@ -36,9 +36,9 @@ limitations under the License.
#include "tensorflow/core/common_runtime/gpu/gpu_id_manager.h"
#include "tensorflow/core/common_runtime/gpu/gpu_id_utils.h"
#include "tensorflow/core/common_runtime/gpu/gpu_init.h"
+#include "tensorflow/core/common_runtime/gpu/gpu_process_state.h"
#include "tensorflow/core/common_runtime/gpu/gpu_stream_util.h"
#include "tensorflow/core/common_runtime/gpu/gpu_util.h"
-#include "tensorflow/core/common_runtime/gpu/process_state.h"
#include "tensorflow/core/common_runtime/gpu_device_context.h"
#include "tensorflow/core/common_runtime/local_device.h"
#include "tensorflow/core/framework/allocator.h"
@@ -201,7 +201,8 @@ class BaseGPUDevice::StreamGroupFactory {
// This function is thread safe.
BaseGPUDevice::StreamGroup* GetOrCreate(TfGpuId tf_gpu_id,
int stream_group_within_gpu,
- se::StreamExecutor* executor) {
+ se::StreamExecutor* executor,
+ const GPUOptions& options) {
mutex_lock guard(lock_);
StreamGroup* group =
&streams_[key_type(tf_gpu_id.value(), stream_group_within_gpu)];
@@ -221,10 +222,21 @@ class BaseGPUDevice::StreamGroupFactory {
VLOG(2) << "Created device_to_host_stream[" << stream_group_within_gpu
<< "] = " << group->device_to_host;
- group->device_to_device = new se::Stream(executor);
- group->device_to_device->Init();
- VLOG(2) << "Created device_to_device_stream[" << stream_group_within_gpu
- << "] = " << group->device_to_host;
+ int num_d2d_streams =
+ options.experimental().num_dev_to_dev_copy_streams();
+ if (num_d2d_streams < 1 || num_d2d_streams > 4) {
+ LOG(ERROR)
+ << "Illegal GPUOptions.experimental.num_dev_to_dev_copy_streams="
+ << num_d2d_streams << " set to 1 instead.";
+ num_d2d_streams = 1;
+ }
+ for (int i = 0; i < num_d2d_streams; ++i) {
+ se::Stream* stream = new se::Stream(executor);
+ stream->Init();
+ group->device_to_device.push_back(stream);
+ VLOG(2) << "Created device_to_device_stream[" << stream_group_within_gpu
+ << "] = " << group->device_to_device.back();
+ }
}
return group;
}
@@ -262,7 +274,7 @@ BaseGPUDevice::BaseGPUDevice(const SessionOptions& options, const string& name,
tf_gpu_id_(tf_gpu_id),
sync_every_op_(sync_every_op),
max_streams_(max_streams) {
- ProcessState::singleton()->EnableGPUDevice();
+ GPUProcessState::singleton()->EnableGPUDevice();
}
BaseGPUDevice::~BaseGPUDevice() {
@@ -287,8 +299,8 @@ Status BaseGPUDevice::Init(const SessionOptions& options) {
// Create the specified number of GPU streams
for (int i = 0; i < max_streams_; i++) {
- streams_.push_back(
- StreamGroupFactory::Global().GetOrCreate(tf_gpu_id_, i, executor_));
+ streams_.push_back(StreamGroupFactory::Global().GetOrCreate(
+ tf_gpu_id_, i, executor_, options.config.gpu_options()));
size_t scratch_buffer_size = Eigen::kCudaScratchSize + sizeof(unsigned int);
void* scratch_buffer = gpu_allocator_->AllocateRaw(
@@ -1060,7 +1072,7 @@ Status BaseGPUDeviceFactory::CreateGPUDevice(const SessionOptions& options,
se::StreamExecutor* se =
GpuIdUtil::ExecutorForCudaGpuId(cuda_gpu_id).ValueOrDie();
const se::DeviceDescription& desc = se->GetDeviceDescription();
- ProcessState* process_state = ProcessState::singleton();
+ GPUProcessState* process_state = GPUProcessState::singleton();
Allocator* gpu_allocator = process_state->GetGPUAllocator(
options.config.gpu_options(), tf_gpu_id, memory_limit);
if (gpu_allocator == nullptr) {
@@ -1080,7 +1092,7 @@ Status BaseGPUDeviceFactory::CreateGPUDevice(const SessionOptions& options,
BaseGPUDevice* gpu_device = CreateGPUDevice(
options, device_name, static_cast<Bytes>(stats.bytes_limit), dev_locality,
tf_gpu_id, GetShortDeviceDescription(cuda_gpu_id, desc), gpu_allocator,
- process_state->GetCPUAllocator(numa_node));
+ ProcessState::singleton()->GetCPUAllocator(numa_node));
LOG(INFO) << "Created TensorFlow device (" << device_name << " with "
<< (stats.bytes_limit >> 20) << " MB memory) -> physical GPU ("
<< GetShortDeviceDescription(cuda_gpu_id, desc) << ")";
diff --git a/tensorflow/core/common_runtime/gpu/gpu_device.h b/tensorflow/core/common_runtime/gpu/gpu_device.h
index 737a3515b6..56d03d7a8c 100644
--- a/tensorflow/core/common_runtime/gpu/gpu_device.h
+++ b/tensorflow/core/common_runtime/gpu/gpu_device.h
@@ -39,6 +39,7 @@ limitations under the License.
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/lib/core/status.h"
+#include "tensorflow/core/lib/gtl/inlined_vector.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/stream_executor.h"
#include "tensorflow/core/platform/types.h"
@@ -119,7 +120,7 @@ class BaseGPUDevice : public LocalDevice {
se::Stream* compute = nullptr;
se::Stream* host_to_device = nullptr;
se::Stream* device_to_host = nullptr;
- se::Stream* device_to_device = nullptr;
+ gtl::InlinedVector<se::Stream*, 4> device_to_device;
};
class StreamGroupFactory;
diff --git a/tensorflow/core/common_runtime/gpu/gpu_device_factory.cc b/tensorflow/core/common_runtime/gpu/gpu_device_factory.cc
index 9a000749c6..e1aaf95df6 100644
--- a/tensorflow/core/common_runtime/gpu/gpu_device_factory.cc
+++ b/tensorflow/core/common_runtime/gpu/gpu_device_factory.cc
@@ -19,7 +19,7 @@ limitations under the License.
#include "tensorflow/core/common_runtime/gpu/gpu_device.h"
#include "tensorflow/core/common_runtime/gpu/gpu_id.h"
-#include "tensorflow/core/common_runtime/gpu/process_state.h"
+#include "tensorflow/core/common_runtime/gpu/gpu_process_state.h"
#include "tensorflow/core/common_runtime/threadpool_device.h"
namespace tensorflow {
@@ -40,9 +40,10 @@ class GPUDevice : public BaseGPUDevice {
}
Allocator* GetAllocator(AllocatorAttributes attr) override {
+ CHECK(cpu_allocator_) << "bad place 1";
if (attr.on_host()) {
if (attr.gpu_compatible() || force_gpu_compatible_) {
- ProcessState* ps = ProcessState::singleton();
+ GPUProcessState* ps = GPUProcessState::singleton();
return ps->GetCUDAHostAllocator(0);
} else {
return cpu_allocator_;
@@ -90,7 +91,7 @@ class GPUCompatibleCPUDevice : public ThreadPoolDevice {
~GPUCompatibleCPUDevice() override {}
Allocator* GetAllocator(AllocatorAttributes attr) override {
- ProcessState* ps = ProcessState::singleton();
+ GPUProcessState* ps = GPUProcessState::singleton();
if (attr.gpu_compatible() || force_gpu_compatible_) {
return ps->GetCUDAHostAllocator(0);
} else {
diff --git a/tensorflow/core/common_runtime/gpu/gpu_device_test.cc b/tensorflow/core/common_runtime/gpu/gpu_device_test.cc
index 5c6cb43eff..daf59f0560 100644
--- a/tensorflow/core/common_runtime/gpu/gpu_device_test.cc
+++ b/tensorflow/core/common_runtime/gpu/gpu_device_test.cc
@@ -19,7 +19,7 @@ limitations under the License.
#include "tensorflow/core/common_runtime/gpu/gpu_id_utils.h"
#include "tensorflow/core/common_runtime/gpu/gpu_init.h"
-#include "tensorflow/core/common_runtime/gpu/process_state.h"
+#include "tensorflow/core/common_runtime/gpu/gpu_process_state.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/core/status_test_util.h"
@@ -58,7 +58,7 @@ void ExpectErrorMessageSubstr(const Status& s, StringPiece substr) {
class GPUDeviceTest : public ::testing::Test {
public:
- void TearDown() override { ProcessState::singleton()->TestOnlyReset(); }
+ void TearDown() override { GPUProcessState::singleton()->TestOnlyReset(); }
protected:
static SessionOptions MakeSessionOptions(
diff --git a/tensorflow/core/common_runtime/gpu/process_state.cc b/tensorflow/core/common_runtime/gpu/gpu_process_state.cc
index 2b442071e2..b18688174d 100644
--- a/tensorflow/core/common_runtime/gpu/process_state.cc
+++ b/tensorflow/core/common_runtime/gpu/gpu_process_state.cc
@@ -13,11 +13,12 @@ See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
-#include "tensorflow/core/common_runtime/gpu/process_state.h"
+#include "tensorflow/core/common_runtime/gpu/gpu_process_state.h"
#include <cstring>
#include <vector>
+#include "tensorflow/core/common_runtime/gpu/cuda_host_allocator.h"
#include "tensorflow/core/common_runtime/gpu/gpu_bfc_allocator.h"
#include "tensorflow/core/common_runtime/gpu/gpu_cudamalloc_allocator.h"
#include "tensorflow/core/common_runtime/gpu/gpu_debug_allocator.h"
@@ -25,7 +26,7 @@ limitations under the License.
#include "tensorflow/core/common_runtime/gpu/gpu_id_manager.h"
#include "tensorflow/core/common_runtime/gpu/gpu_id_utils.h"
#include "tensorflow/core/common_runtime/gpu/gpu_init.h"
-#include "tensorflow/core/common_runtime/gpu/pool_allocator.h"
+#include "tensorflow/core/common_runtime/pool_allocator.h"
#include "tensorflow/core/framework/allocator.h"
#include "tensorflow/core/framework/log_memory.h"
#include "tensorflow/core/framework/tracking_allocator.h"
@@ -37,19 +38,6 @@ limitations under the License.
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/util/env_var.h"
-// If these flags need to be runtime configurable, consider adding
-// options to ConfigProto.
-
-// If true, register CPU RAM used to copy to/from GPU RAM with the
-// CUDA driver.
-const bool FLAGS_brain_mem_reg_cuda_dma = true;
-
-// If true, record attributes of memory allocations and
-// dynamically check for appropriate use of registered memory.
-// Should only be true for debugging or diagnosis of
-// performance issues.
-const bool FLAGS_brain_gpu_record_mem_types = false;
-
namespace tensorflow {
namespace {
@@ -67,46 +55,37 @@ bool useCudaMemoryGuardAllocator() {
} // namespace
-ProcessState* ProcessState::instance_ = nullptr;
+GPUProcessState* GPUProcessState::instance_ = nullptr;
-/*static*/ ProcessState* ProcessState::singleton() {
+/*static*/ GPUProcessState* GPUProcessState::singleton() {
if (instance_ == nullptr) {
- instance_ = new ProcessState;
+ instance_ = new GPUProcessState;
}
+ CHECK(instance_->process_state_);
return instance_;
}
-ProcessState::ProcessState() : gpu_device_enabled_(false) {
+GPUProcessState::GPUProcessState() : gpu_device_enabled_(false) {
CHECK(instance_ == nullptr);
instance_ = this;
+ process_state_ = ProcessState::singleton();
}
-ProcessState::~ProcessState() {
+// Normally the GPUProcessState singleton is never explicitly deleted.
+// This function is defined for debugging problems with the allocators.
+GPUProcessState::~GPUProcessState() {
+ CHECK_EQ(this, instance_);
for (auto p : gpu_allocators_) {
delete p;
}
instance_ = nullptr;
}
-string ProcessState::MemDesc::DebugString() {
- return strings::StrCat((loc == CPU ? "CPU " : "GPU "), dev_index,
- ", dma: ", gpu_registered, ", nic: ", nic_registered);
-}
-
-ProcessState::MemDesc ProcessState::PtrType(const void* ptr) {
- if (FLAGS_brain_gpu_record_mem_types) {
- auto iter = mem_desc_map_.find(ptr);
- if (iter != mem_desc_map_.end()) {
- return iter->second;
- }
- }
- return MemDesc();
-}
-
-Allocator* ProcessState::GetGPUAllocator(const GPUOptions& options,
- TfGpuId tf_gpu_id,
- size_t total_bytes) {
+Allocator* GPUProcessState::GetGPUAllocator(const GPUOptions& options,
+ TfGpuId tf_gpu_id,
+ size_t total_bytes) {
+ CHECK(process_state_);
#if GOOGLE_CUDA
const string& allocator_type = options.allocator_type();
mutex_lock lock(mu_);
@@ -114,7 +93,8 @@ Allocator* ProcessState::GetGPUAllocator(const GPUOptions& options,
if (tf_gpu_id.value() >= static_cast<int64>(gpu_allocators_.size())) {
gpu_allocators_.resize(tf_gpu_id.value() + 1);
- if (FLAGS_brain_gpu_record_mem_types) gpu_al_.resize(tf_gpu_id.value() + 1);
+ if (process_state_->ProcessState::FLAGS_brain_gpu_record_mem_types)
+ gpu_al_.resize(tf_gpu_id.value() + 1);
}
if (gpu_allocators_[tf_gpu_id.value()] == nullptr) {
@@ -155,9 +135,9 @@ Allocator* ProcessState::GetGPUAllocator(const GPUOptions& options,
gpu_allocator->AddAllocVisitor(v);
}
}
- if (FLAGS_brain_gpu_record_mem_types) {
- MemDesc md;
- md.loc = MemDesc::GPU;
+ if (process_state_->ProcessState::FLAGS_brain_gpu_record_mem_types) {
+ ProcessState::MemDesc md;
+ md.loc = ProcessState::MemDesc::GPU;
md.dev_index = cuda_gpu_id.value();
md.gpu_registered = false;
md.nic_registered = true;
@@ -165,10 +145,11 @@ Allocator* ProcessState::GetGPUAllocator(const GPUOptions& options,
gpu_al_.resize(tf_gpu_id.value() + 1);
}
gpu_al_[tf_gpu_id.value()] = new internal::RecordingAllocator(
- &mem_desc_map_, gpu_allocator, md, &mu_);
+ &process_state_->mem_desc_map_, gpu_allocator, md, &mu_);
}
}
- if (FLAGS_brain_gpu_record_mem_types) return gpu_al_[tf_gpu_id.value()];
+ if (process_state_->ProcessState::FLAGS_brain_gpu_record_mem_types)
+ return gpu_al_[tf_gpu_id.value()];
return gpu_allocators_[tf_gpu_id.value()];
#else
LOG(FATAL) << "GPUAllocator unavailable. Not compiled with --config=cuda.";
@@ -176,64 +157,13 @@ Allocator* ProcessState::GetGPUAllocator(const GPUOptions& options,
#endif // GOOGLE_CUDA
}
-Allocator* ProcessState::GetCPUAllocator(int numa_node) {
- // Although we're temporarily ignoring numa_node, check for legality.
- CHECK_GE(numa_node, 0);
- // TODO(tucker): actually maintain separate CPUAllocators for
- // different numa_nodes. For now, just one.
- numa_node = 0;
- mutex_lock lock(mu_);
- while (cpu_allocators_.size() <= static_cast<size_t>(numa_node)) {
- bool use_bfc_allocator = false;
- // TODO(reedwm): Switch default to BGFAllocator if it's at least as fast and
- // efficient.
- Status status = ReadBoolFromEnvVar("TF_CPU_ALLOCATOR_USE_BFC", false,
- &use_bfc_allocator);
- if (!status.ok()) {
- LOG(ERROR) << "GetCPUAllocator: " << status.error_message();
- }
- VisitableAllocator* allocator;
- if (use_bfc_allocator) {
- // TODO(reedwm): evaluate whether 64GB by default is the best choice.
- int64 cpu_mem_limit_in_mb = -1;
- Status status = ReadInt64FromEnvVar("TF_CPU_BFC_MEM_LIMIT_IN_MB",
- 1LL << 16 /*64GB max by default*/,
- &cpu_mem_limit_in_mb);
- if (!status.ok()) {
- LOG(ERROR) << "GetCPUAllocator: " << status.error_message();
- }
- int64 cpu_mem_limit = cpu_mem_limit_in_mb * (1LL << 20);
- allocator = new BFCAllocator(new BasicCPUAllocator(), cpu_mem_limit,
- true /*allow_growth*/,
- "bfc_cpu_allocator_for_gpu" /*name*/);
- VLOG(2) << "Using BFCAllocator with memory limit of "
- << cpu_mem_limit_in_mb << " MB for ProcessState CPU allocator";
- } else {
- allocator = new PoolAllocator(
- 100 /*pool_size_limit*/, true /*auto_resize*/,
- new BasicCPUAllocator(), new NoopRounder, "cpu_pool");
- VLOG(2) << "Using PoolAllocator for ProcessState CPU allocator";
- }
- if (LogMemory::IsEnabled()) {
- // Wrap the allocator to track allocation ids for better logging
- // at the cost of performance.
- allocator = new TrackingVisitableAllocator(allocator, true);
- }
- cpu_allocators_.push_back(allocator);
+Allocator* GPUProcessState::GetCUDAHostAllocator(int numa_node) {
+ CHECK(process_state_);
+ if (!HasGPUDevice() ||
+ !process_state_->ProcessState::FLAGS_brain_mem_reg_cuda_dma) {
+ return process_state_->GetCPUAllocator(numa_node);
}
- return cpu_allocators_[0];
-}
-
-Allocator* ProcessState::GetCUDAHostAllocator(int numa_node) {
- if (!HasGPUDevice() || !FLAGS_brain_mem_reg_cuda_dma) {
- return cpu_allocator();
- }
- // Although we're temporarily ignoring numa_node, check for legality.
CHECK_GE(numa_node, 0);
- // TODO(tucker): actually maintain separate CPUAllocators for
- // different numa_nodes. For now, just one.
- numa_node = 0;
-
{
// Here we optimize the most common use case where cuda_host_allocators_
// and cuda_al_ have already been populated and since we're only reading
@@ -241,7 +171,7 @@ Allocator* ProcessState::GetCUDAHostAllocator(int numa_node) {
// we take a unique lock and populate these vectors.
tf_shared_lock lock(mu_);
- if (FLAGS_brain_gpu_record_mem_types &&
+ if (process_state_->ProcessState::FLAGS_brain_gpu_record_mem_types &&
static_cast<int>(cuda_al_.size()) > 0) {
return cuda_al_[0];
}
@@ -288,21 +218,25 @@ Allocator* ProcessState::GetCUDAHostAllocator(int numa_node) {
allocator = new TrackingVisitableAllocator(allocator, true);
}
cuda_host_allocators_.push_back(allocator);
- if (FLAGS_brain_gpu_record_mem_types) {
- MemDesc md;
- md.loc = MemDesc::CPU;
+ if (process_state_->ProcessState::FLAGS_brain_gpu_record_mem_types) {
+ ProcessState::MemDesc md;
+ md.loc = ProcessState::MemDesc::CPU;
md.dev_index = 0;
md.gpu_registered = true;
md.nic_registered = false;
cuda_al_.push_back(new internal::RecordingAllocator(
- &mem_desc_map_, cuda_host_allocators_.back(), md, &mu_));
+ &process_state_->mem_desc_map_, cuda_host_allocators_.back(), md,
+ &mu_));
}
}
- if (FLAGS_brain_gpu_record_mem_types) return cuda_al_[0];
+ if (process_state_->ProcessState::FLAGS_brain_gpu_record_mem_types)
+ return cuda_al_[0];
return cuda_host_allocators_[0];
}
-void ProcessState::AddGPUAllocVisitor(int bus_id, AllocVisitor visitor) {
+void GPUProcessState::AddGPUAllocVisitor(int bus_id,
+ const AllocVisitor& visitor) {
+ CHECK(process_state_);
#if GOOGLE_CUDA
mutex_lock lock(mu_);
for (int i = 0; i < static_cast<int64>(gpu_allocators_.size()); ++i) {
@@ -320,17 +254,17 @@ void ProcessState::AddGPUAllocVisitor(int bus_id, AllocVisitor visitor) {
#endif // GOOGLE_CUDA
}
-void ProcessState::TestOnlyReset() {
- mutex_lock lock(mu_);
- gpu_device_enabled_ = false;
- gpu_visitors_.clear();
- mem_desc_map_.clear();
- gtl::STLDeleteElements(&cpu_allocators_);
- gtl::STLDeleteElements(&gpu_allocators_);
- gtl::STLDeleteElements(&cuda_host_allocators_);
- gtl::STLDeleteElements(&cpu_al_);
- gtl::STLDeleteElements(&gpu_al_);
- gtl::STLDeleteElements(&cuda_al_);
+void GPUProcessState::TestOnlyReset() {
+ process_state_->ProcessState::TestOnlyReset();
+ {
+ mutex_lock lock(mu_);
+ gpu_device_enabled_ = false;
+ gpu_visitors_.clear();
+ gtl::STLDeleteElements(&gpu_allocators_);
+ gtl::STLDeleteElements(&cuda_host_allocators_);
+ gtl::STLDeleteElements(&gpu_al_);
+ gtl::STLDeleteElements(&cuda_al_);
+ }
}
} // namespace tensorflow
diff --git a/tensorflow/core/common_runtime/gpu/process_state.h b/tensorflow/core/common_runtime/gpu/gpu_process_state.h
index bc2c4182d7..cb41c3c6bd 100644
--- a/tensorflow/core/common_runtime/gpu/process_state.h
+++ b/tensorflow/core/common_runtime/gpu/gpu_process_state.h
@@ -1,4 +1,4 @@
-/* Copyright 2015 The TensorFlow Authors. All Rights Reserved.
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -13,8 +13,8 @@ See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
-#ifndef TENSORFLOW_COMMON_RUNTIME_GPU_PROCESS_STATE_H_
-#define TENSORFLOW_COMMON_RUNTIME_GPU_PROCESS_STATE_H_
+#ifndef TENSORFLOW_CORE_COMMON_RUNTIME_GPU_GPU_PROCESS_STATE_H_
+#define TENSORFLOW_CORE_COMMON_RUNTIME_GPU_GPU_PROCESS_STATE_H_
#include <functional>
#include <map>
@@ -22,6 +22,7 @@ limitations under the License.
#include <vector>
#include "tensorflow/core/common_runtime/gpu/gpu_id.h"
+#include "tensorflow/core/common_runtime/process_state.h"
#include "tensorflow/core/framework/allocator.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/thread_annotations.h"
@@ -34,27 +35,10 @@ class Allocator;
class VisitableAllocator;
class PoolAllocator;
-// Singleton that manages per-process state, e.g. allocation
-// of shared resources.
-class ProcessState {
+// Singleton that manages per-process state when GPUs are present.
+class GPUProcessState {
public:
- static ProcessState* singleton();
-
- // Descriptor for memory allocation attributes, used by optional
- // runtime correctness analysis logic.
- struct MemDesc {
- enum MemLoc { CPU, GPU };
- MemLoc loc;
- int dev_index;
- bool gpu_registered;
- bool nic_registered;
- MemDesc()
- : loc(CPU),
- dev_index(0),
- gpu_registered(false),
- nic_registered(false) {}
- string DebugString();
- };
+ static GPUProcessState* singleton();
// Query whether any GPU device has been created so far.
// Disable thread safety analysis since a race is benign here.
@@ -68,14 +52,6 @@ class ProcessState {
gpu_device_enabled_ = true;
}
- // Returns what we know about the memory at ptr.
- // If we know nothing, it's called CPU 0 with no other attributes.
- MemDesc PtrType(const void* ptr);
-
- // Returns the one CPUAllocator used for the given numa_node.
- // TEMPORARY: ignores numa_node.
- Allocator* GetCPUAllocator(int numa_node);
-
// Returns the one GPU allocator used for the indexed GPU.
// Note that this is a system GPU index, not (necessarily) a brain
// device index.
@@ -107,69 +83,39 @@ class ProcessState {
// the index of one of the PCIe buses. If the bus_id is invalid,
// results are undefined.
typedef std::function<void(void*, size_t)> AllocVisitor;
- virtual void AddGPUAllocVisitor(int bus_id, AllocVisitor visitor);
-
- typedef std::unordered_map<const void*, MemDesc> MDMap;
+ virtual void AddGPUAllocVisitor(int bus_id, const AllocVisitor& visitor);
protected:
- ProcessState();
+ GPUProcessState();
// Helper method for unit tests to reset the ProcessState singleton by
// cleaning up everything. Never use in production.
virtual void TestOnlyReset();
- static ProcessState* instance_;
+ ProcessState::MDMap* mem_desc_map() {
+ if (process_state_) return &process_state_->mem_desc_map_;
+ return nullptr;
+ }
+
+ static GPUProcessState* instance_;
+ ProcessState* process_state_; // Not owned.
bool gpu_device_enabled_;
mutex mu_;
- std::vector<Allocator*> cpu_allocators_ GUARDED_BY(mu_);
std::vector<VisitableAllocator*> gpu_allocators_ GUARDED_BY(mu_);
std::vector<std::vector<AllocVisitor>> gpu_visitors_ GUARDED_BY(mu_);
std::vector<Allocator*> cuda_host_allocators_ GUARDED_BY(mu_);
- virtual ~ProcessState();
+ virtual ~GPUProcessState();
// Optional RecordingAllocators that wrap the corresponding
// Allocators for runtime attribute use analysis.
- MDMap mem_desc_map_;
- std::vector<Allocator*> cpu_al_ GUARDED_BY(mu_);
std::vector<Allocator*> gpu_al_ GUARDED_BY(mu_);
std::vector<Allocator*> cuda_al_ GUARDED_BY(mu_);
friend class GPUDeviceTest;
};
-namespace internal {
-class RecordingAllocator : public Allocator {
- public:
- RecordingAllocator(ProcessState::MDMap* mm, Allocator* a,
- ProcessState::MemDesc md, mutex* mu)
- : mm_(mm), a_(a), md_(md), mu_(mu) {}
-
- string Name() override { return a_->Name(); }
- void* AllocateRaw(size_t alignment, size_t num_bytes) override {
- void* p = a_->AllocateRaw(alignment, num_bytes);
- mutex_lock l(*mu_);
- (*mm_)[p] = md_;
- return p;
- }
- void DeallocateRaw(void* p) override {
- mutex_lock l(*mu_);
- auto iter = mm_->find(p);
- mm_->erase(iter);
- a_->DeallocateRaw(p);
- }
- bool TracksAllocationSizes() override { return a_->TracksAllocationSizes(); }
- size_t RequestedSize(const void* p) override { return a_->RequestedSize(p); }
- size_t AllocatedSize(const void* p) override { return a_->AllocatedSize(p); }
- void GetStats(AllocatorStats* stats) override { a_->GetStats(stats); }
- void ClearStats() override { a_->ClearStats(); }
- ProcessState::MDMap* mm_; // not owned
- Allocator* a_; // not owned
- ProcessState::MemDesc md_;
- mutex* mu_;
-};
-} // namespace internal
} // namespace tensorflow
-#endif // TENSORFLOW_COMMON_RUNTIME_GPU_PROCESS_STATE_H_
+#endif // TENSORFLOW_CORE_COMMON_RUNTIME_GPU_GPU_PROCESS_STATE_H_
diff --git a/tensorflow/core/common_runtime/gpu/gpu_util.cc b/tensorflow/core/common_runtime/gpu/gpu_util.cc
index d38413d79c..5851360cab 100644
--- a/tensorflow/core/common_runtime/gpu/gpu_util.cc
+++ b/tensorflow/core/common_runtime/gpu/gpu_util.cc
@@ -19,7 +19,7 @@ limitations under the License.
#include "tensorflow/core/common_runtime/device.h"
#include "tensorflow/core/common_runtime/dma_helper.h"
#include "tensorflow/core/common_runtime/gpu/gpu_event_mgr.h"
-#include "tensorflow/core/common_runtime/gpu/process_state.h"
+#include "tensorflow/core/common_runtime/gpu/gpu_process_state.h"
#include "tensorflow/core/common_runtime/gpu_device_context.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor.pb.h"
@@ -150,7 +150,7 @@ void GPUUtil::SetProtoFromGPU(const Tensor& tensor, Device* dev,
const int64 total_bytes = is_dead ? 0 : tensor.TotalBytes();
if (total_bytes > 0) {
tracing::ScopedAnnotation annotation("SetProtoFromGPU");
- alloc = ProcessState::singleton()->GetCUDAHostAllocator(0);
+ alloc = GPUProcessState::singleton()->GetCUDAHostAllocator(0);
buf = alloc->Allocate<char>(total_bytes);
if (LogMemory::IsEnabled()) {
LogMemory::RecordRawAllocation("SetProtoFromGPU",
@@ -185,13 +185,11 @@ void GPUUtil::SetProtoFromGPU(const Tensor& tensor, Device* dev,
}
// static
-void GPUUtil::DeviceToDeviceCopy(DeviceContext* send_dev_context,
- DeviceContext* recv_dev_context, Device* src,
- Device* dst,
- AllocatorAttributes src_alloc_attr,
- AllocatorAttributes dst_alloc_attr,
- const Tensor* input, Tensor* output,
- StatusCallback done) {
+void GPUUtil::DeviceToDeviceCopy(
+ DeviceContext* send_dev_context, DeviceContext* recv_dev_context,
+ Device* src, Device* dst, AllocatorAttributes src_alloc_attr,
+ AllocatorAttributes dst_alloc_attr, const Tensor* input, Tensor* output,
+ int dev_to_dev_stream_index, StatusCallback done) {
const DeviceBase::GpuDeviceInfo* dev_info = nullptr;
se::Stream* send_stream = nullptr;
Status s = PrepareCopy(src, send_dev_context, *input, output, &dev_info,
@@ -202,7 +200,7 @@ void GPUUtil::DeviceToDeviceCopy(DeviceContext* send_dev_context,
}
auto send_device_to_device_stream =
static_cast<const GPUDeviceContext*>(send_dev_context)
- ->device_to_device_stream();
+ ->device_to_device_stream(dev_to_dev_stream_index);
if (send_device_to_device_stream == nullptr) {
done(errors::Internal("No send gpu copy-out-stream is available."));
return;
diff --git a/tensorflow/core/common_runtime/gpu/gpu_util.h b/tensorflow/core/common_runtime/gpu/gpu_util.h
index 237b0044da..57687a8364 100644
--- a/tensorflow/core/common_runtime/gpu/gpu_util.h
+++ b/tensorflow/core/common_runtime/gpu/gpu_util.h
@@ -90,13 +90,11 @@ class GPUUtil {
Device* gpu_device, Tensor* gpu_tensor,
StatusCallback done);
- static void DeviceToDeviceCopy(DeviceContext* send_dev_context,
- DeviceContext* recv_dev_context, Device* src,
- Device* dst,
- AllocatorAttributes src_alloc_attr,
- AllocatorAttributes dst_alloc_attr,
- const Tensor* input, Tensor* output,
- StatusCallback done);
+ static void DeviceToDeviceCopy(
+ DeviceContext* send_dev_context, DeviceContext* recv_dev_context,
+ Device* src, Device* dst, AllocatorAttributes src_alloc_attr,
+ AllocatorAttributes dst_alloc_attr, const Tensor* input, Tensor* output,
+ int dev_to_dev_stream_index, StatusCallback done);
// Deep-copying of GPU tensor on the same device.
// 'src_gpu_tensor''s and 'dst_gpu_tensor''s backing memory must be on
diff --git a/tensorflow/core/common_runtime/gpu/pool_allocator_test.cc b/tensorflow/core/common_runtime/gpu/pool_allocator_test.cc
index a4c8d5fe86..583bff2c07 100644
--- a/tensorflow/core/common_runtime/gpu/pool_allocator_test.cc
+++ b/tensorflow/core/common_runtime/gpu/pool_allocator_test.cc
@@ -15,8 +15,9 @@ limitations under the License.
#if GOOGLE_CUDA
-#include "tensorflow/core/common_runtime/gpu/pool_allocator.h"
+#include "tensorflow/core/common_runtime/pool_allocator.h"
+#include "tensorflow/core/common_runtime/gpu/cuda_host_allocator.h"
#include "tensorflow/core/platform/stream_executor.h"
#include "tensorflow/core/platform/test.h"
@@ -96,7 +97,8 @@ TEST(PoolAllocatorTest, Alignment) {
TEST(PoolAllocatorTest, AutoResize) {
PoolAllocator pool(2 /*pool_size_limit*/, true /*auto_resize*/,
- new BasicCPUAllocator, new NoopRounder, "pool");
+ new BasicCPUAllocator(0 /*numa_node*/), new NoopRounder,
+ "pool");
// Alloc/dealloc 10 sizes just a few times, confirming pool size
// stays at 2.
diff --git a/tensorflow/core/common_runtime/gpu_device_context.h b/tensorflow/core/common_runtime/gpu_device_context.h
index c92c5d1af3..d697d878dc 100644
--- a/tensorflow/core/common_runtime/gpu_device_context.h
+++ b/tensorflow/core/common_runtime/gpu_device_context.h
@@ -18,6 +18,7 @@ limitations under the License.
#include "tensorflow/core/common_runtime/device.h"
#include "tensorflow/core/framework/device_base.h"
+#include "tensorflow/core/lib/gtl/inlined_vector.h"
namespace stream_executor {
class Stream;
@@ -31,7 +32,7 @@ class GPUDeviceContext : public DeviceContext {
GPUDeviceContext(int stream_id, se::Stream* stream,
se::Stream* host_to_device_stream,
se::Stream* device_to_host_stream,
- se::Stream* device_to_device_stream)
+ gtl::InlinedVector<se::Stream*, 4> device_to_device_stream)
: stream_id_(stream_id),
stream_(stream),
host_to_device_stream_(host_to_device_stream),
@@ -43,8 +44,8 @@ class GPUDeviceContext : public DeviceContext {
se::Stream* stream() const override { return stream_; }
se::Stream* host_to_device_stream() const { return host_to_device_stream_; }
se::Stream* device_to_host_stream() const { return device_to_host_stream_; }
- se::Stream* device_to_device_stream() const {
- return device_to_device_stream_;
+ se::Stream* device_to_device_stream(int index) const {
+ return device_to_device_stream_[index % device_to_device_stream_.size()];
}
int stream_id() const { return stream_id_; }
@@ -64,12 +65,12 @@ class GPUDeviceContext : public DeviceContext {
// The default primary stream to use for this context.
// All the memory belongs to this stream.
se::Stream* stream_;
- // The stream to use for copy data from host into GPU.
+ // The stream to use for copying data from host into GPU.
se::Stream* host_to_device_stream_;
- // The stream to use for copy data from GPU to host.
+ // The stream to use for copying data from GPU to host.
se::Stream* device_to_host_stream_;
- // The stream to use for copy data between GPU.
- se::Stream* device_to_device_stream_;
+ // Streams to use for copying data between GPUs.
+ gtl::InlinedVector<se::Stream*, 4> device_to_device_stream_;
};
} // namespace tensorflow
diff --git a/tensorflow/core/common_runtime/graph_execution_state.cc b/tensorflow/core/common_runtime/graph_execution_state.cc
index 58018689d5..9c9eacb5b5 100644
--- a/tensorflow/core/common_runtime/graph_execution_state.cc
+++ b/tensorflow/core/common_runtime/graph_execution_state.cc
@@ -280,6 +280,118 @@ class TensorConnectionPruneRewrite : public subgraph::PruneRewrite {
NodeBuilder::NodeOut from_tensor_;
};
+template <class Map>
+Status LookupDevice(const DeviceSet& device_set, const string& tensor_name,
+ const Map& tensor2device,
+ const tensorflow::DeviceAttributes** out_device_attrs) {
+ *out_device_attrs = nullptr;
+ if (tensor2device.empty()) {
+ *out_device_attrs = &device_set.client_device()->attributes();
+ return Status::OK();
+ }
+ const auto it = tensor2device.find(tensor_name);
+ if (it == tensor2device.end()) {
+ *out_device_attrs = &device_set.client_device()->attributes();
+ return Status::OK();
+ }
+ DeviceNameUtils::ParsedName parsed_name;
+ if (!DeviceNameUtils::ParseFullName(it->second, &parsed_name)) {
+ return errors::InvalidArgument("Invalid device name ('", it->second,
+ "') provided for the tensor '", tensor_name,
+ "' in CallableOptions");
+ }
+ Device* device = device_set.FindDeviceByName(
+ DeviceNameUtils::ParsedNameToString(parsed_name));
+ if (device == nullptr) {
+ return errors::InvalidArgument("Device '", it->second,
+ "' specified for tensor '", tensor_name,
+ "' in CallableOptions does not exist");
+ }
+ *out_device_attrs = &device->attributes();
+ return Status::OK();
+}
+
+struct TensorAndDevice {
+ // WARNING: backing memory for the 'tensor' field is NOT owend.
+ const TensorId tensor;
+ // WARNING: device pointer is not owned, so must outlive TensorAndDevice.
+ const DeviceAttributes* device;
+};
+
+// Tensors of some DataTypes cannot placed in device memory as feeds or
+// fetches. Validate against a whitelist of those known to work.
+bool IsFeedAndFetchSupported(DataType dtype, const string& device_type) {
+ // The mechanism for supporting feeds of device-backed Tensors requires
+ // the _Arg kernel to be registered for the corresponding type (and that
+ // the input to the kernel be in device and not host memory).
+ //
+ // The mechanism for supporting fetches of device-backed Tensors requires
+ // the _Retval kernel to be registered for the corresponding type (and
+ // that the output is produced in device and not host memory).
+ //
+ // For now, we return true iff there are _Arg AND _Retval kernels for dtype on
+ // the device. False negatives are okay, false positives would be bad.
+ //
+ // TODO(ashankar): Instead of a whitelist here, perhaps we could query
+ // the kernel registry for _Arg and _Retval kernels instead.
+ if (device_type == DEVICE_CPU) return true;
+ if (device_type != DEVICE_GPU) return false;
+ switch (dtype) {
+ case DT_BFLOAT16:
+ case DT_BOOL:
+ case DT_COMPLEX128:
+ case DT_COMPLEX64:
+ case DT_DOUBLE:
+ case DT_FLOAT:
+ case DT_HALF:
+ case DT_INT16:
+ case DT_INT64:
+ case DT_INT8:
+ case DT_UINT16:
+ case DT_UINT8:
+ return true;
+ default:
+ return false;
+ }
+}
+
+Status ValidateFeedAndFetchDevices(
+ const Graph& graph,
+ const std::vector<TensorAndDevice>& tensors_and_devices) {
+ if (tensors_and_devices.empty()) return Status::OK();
+ std::vector<bool> found(tensors_and_devices.size(), false);
+ for (const Node* node : graph.nodes()) {
+ // Linearly looping through all nodes and then all feed+fetch tensors isn't
+ // quite efficient. At the time of this writing, the expectation was that
+ // tensors_and_devices.size() is really small in practice, so this won't be
+ // problematic.
+ // Revist and make a more efficient lookup possible if needed (e.g., perhaps
+ // Graph can maintain a map from node name to Node*).
+ for (int i = 0; i < tensors_and_devices.size(); ++i) {
+ const TensorAndDevice& td = tensors_and_devices[i];
+ if (td.tensor.first != node->name()) continue;
+ found[i] = true;
+ TF_RETURN_IF_ERROR(graph.IsValidOutputTensor(node, td.tensor.second));
+ const DataType dtype = node->output_type(td.tensor.second);
+ if (!IsFeedAndFetchSupported(dtype, td.device->device_type())) {
+ return errors::Unimplemented(
+ "Cannot feed or fetch tensor '", td.tensor.ToString(),
+ "' from device ", td.device->name(), " as feeding/fetching from ",
+ td.device->device_type(), " devices is not yet supported for ",
+ DataTypeString(dtype), " tensors");
+ }
+ }
+ }
+ for (int i = 0; i < found.size(); ++i) {
+ if (!found[i]) {
+ return errors::InvalidArgument(
+ "Tensor ", tensors_and_devices[i].tensor.ToString(),
+ ", specified in either feed_devices or fetch_devices was not found "
+ "in the Graph");
+ }
+ }
+ return Status::OK();
+}
} // namespace
Status GraphExecutionState::PruneGraph(
@@ -289,18 +401,52 @@ Status GraphExecutionState::PruneGraph(
feed_rewrites.reserve(options.callable_options.feed_size());
std::vector<std::unique_ptr<subgraph::PruneRewrite>> fetch_rewrites;
fetch_rewrites.reserve(options.callable_options.fetch_size());
- const DeviceAttributes* device_info =
- &device_set_->client_device()->attributes();
if (options.use_function_convention) {
+ std::vector<TensorAndDevice> tensors_and_devices;
for (int i = 0; i < options.callable_options.feed_size(); ++i) {
- feed_rewrites.emplace_back(new subgraph::ArgFeedRewrite(
- &options.callable_options.feed(i), device_info, i));
+ // WARNING: feed MUST be a reference, since ArgFeedRewrite and
+ // tensors_and_devices holds on to its address.
+ const string& feed = options.callable_options.feed(i);
+ const DeviceAttributes* device_info;
+ TF_RETURN_IF_ERROR(LookupDevice(*device_set_, feed,
+ options.callable_options.feed_devices(),
+ &device_info));
+ feed_rewrites.emplace_back(
+ new subgraph::ArgFeedRewrite(&feed, device_info, i));
+ tensors_and_devices.push_back({ParseTensorName(feed), device_info});
+ }
+ if (!options.callable_options.fetch_devices().empty() &&
+ !options.callable_options.fetch_skip_sync()) {
+ return errors::Unimplemented(
+ "CallableOptions.fetch_skip_sync = false is not yet implemented. You "
+ "can set it to true instead, but MUST ensure that Device::Sync() is "
+ "invoked on the Device corresponding to the fetched tensor before "
+ "dereferencing the Tensor's memory.");
}
for (int i = 0; i < options.callable_options.fetch_size(); ++i) {
- fetch_rewrites.emplace_back(new subgraph::RetvalFetchRewrite(
- &options.callable_options.fetch(i), device_info, i));
+ // WARNING: fetch MUST be a reference, since RetvalFetchRewrite and
+ // tensors_and_devices holds on to its address.
+ const string& fetch = options.callable_options.fetch(i);
+ const DeviceAttributes* device_info;
+ TF_RETURN_IF_ERROR(LookupDevice(*device_set_, fetch,
+ options.callable_options.fetch_devices(),
+ &device_info));
+ fetch_rewrites.emplace_back(
+ new subgraph::RetvalFetchRewrite(&fetch, device_info, i));
+ tensors_and_devices.push_back({ParseTensorName(fetch), device_info});
}
+ TF_RETURN_IF_ERROR(
+ ValidateFeedAndFetchDevices(*graph, tensors_and_devices));
} else {
+ if (!options.callable_options.feed_devices().empty() ||
+ !options.callable_options.fetch_devices().empty()) {
+ return errors::Unimplemented(
+ "CallableOptions::feed_devices and CallableOptions::fetch_devices "
+ "to configure feeding/fetching tensors to/from device memory is not "
+ "yet supported when using a remote session.");
+ }
+ const DeviceAttributes* device_info =
+ &device_set_->client_device()->attributes();
for (const string& feed : options.callable_options.feed()) {
feed_rewrites.emplace_back(
new subgraph::RecvFeedRewrite(&feed, device_info));
@@ -455,11 +601,11 @@ Status GraphExecutionState::OptimizeGraph(
return errors::InvalidArgument("Missing node shape or type");
}
TensorShapeProto shape_proto(node.attr().at("shape").shape());
- // If the shape of the placeholder value is only partially known, we're
- // free to use any dimension we want to feed the placeholder. We choose
- // 1 to minimize the memory impact. Note that this only matters if an
- // optimizer choose to run the graph to build its cost model, which
- // doesn't happen (yet)
+ // If the shape of the placeholder value is only partially known,
+ // we're free to use any dimension we want to feed the placeholder. We
+ // choose 1 to minimize the memory impact. Note that this only matters
+ // if an optimizer choose to run the graph to build its cost model,
+ // which doesn't happen (yet)
if (shape_proto.unknown_rank()) {
shape_proto.set_unknown_rank(false);
}
@@ -513,10 +659,10 @@ Status GraphExecutionState::OptimizeGraph(
opts.allow_internal_ops = true;
TF_RETURN_IF_ERROR(
ConvertGraphDefToGraph(opts, new_graph, optimized_graph->get()));
- // The graph conversion sets the requested device names but not the assigned
- // device names. However, since at this point the graph is placed TF expects
- // an assigned device name for every node. Therefore we copy the requested
- // device into the assigned device field.
+ // The graph conversion sets the requested device names but not the
+ // assigned device names. However, since at this point the graph is placed
+ // TF expects an assigned device name for every node. Therefore we copy
+ // the requested device into the assigned device field.
for (Node* node : optimized_graph->get()->nodes()) {
node->set_assigned_device_name(node->requested_device());
}
diff --git a/tensorflow/core/common_runtime/placer.cc b/tensorflow/core/common_runtime/placer.cc
index 86851c2c07..6781c87f6c 100644
--- a/tensorflow/core/common_runtime/placer.cc
+++ b/tensorflow/core/common_runtime/placer.cc
@@ -30,6 +30,7 @@ limitations under the License.
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/stringpiece.h"
#include "tensorflow/core/lib/strings/str_util.h"
+#include "tensorflow/core/util/status_util.h"
namespace tensorflow {
@@ -628,6 +629,40 @@ class ColocationGraph {
return parent;
}
+ // Ensures that the devices of 'dst's resource and reference match the device
+ // specified for 'src', which is an input of 'dst' with a partially or fully
+ // specified device.
+ Status VerifyResourceAndRefInputsCanBeColocated(
+ const Node* dst, const Node* src,
+ const DeviceNameUtils::ParsedName& src_parsed_name) {
+ std::vector<const Edge*> edges;
+ TF_RETURN_IF_ERROR(dst->input_edges(&edges));
+ for (const Edge* edge : edges) {
+ DataType input_type = dst->input_type(edge->dst_input());
+ if (input_type == DT_RESOURCE || IsRefType(input_type)) {
+ const Node* input_node = edge->src();
+ if (input_node == src) {
+ continue;
+ }
+ const auto& input_root = members_[FindRoot(input_node->id())];
+ const auto& input_parsed_name = input_root.device_name;
+ if (DeviceNameUtils::HasSomeDetails(input_parsed_name) &&
+ !DeviceNameUtils::AreCompatibleDevNames(input_parsed_name,
+ src_parsed_name)) {
+ return AttachDef(
+ errors::InvalidArgument(
+ "Could not colocate node with its "
+ "resource and reference inputs; devices ",
+ DeviceNameUtils::ParsedNameToString(input_parsed_name),
+ " and ", DeviceNameUtils::ParsedNameToString(src_parsed_name),
+ " are not compatible."),
+ *dst);
+ }
+ }
+ }
+ return Status::OK();
+ }
+
Graph* const graph_; // Not owned.
std::vector<Member> members_;
const DeviceSet* device_set_; // Not owned.
@@ -646,6 +681,15 @@ bool IsGeneratorNode(const Node* node) {
!IsRefType(node->output_type(0));
}
+bool IsExemptFromResourceInputColocation(const Node* node) {
+ // Note: Partitioned function calls, which place and partition their
+ // function bodies, are exempt from this check: they forward resource and
+ // ref inputs to operations that are appropriately placed, instead of
+ // dereferencing them.
+ const string& op_type = node->op_def().name();
+ return op_type == "PartitionedCall" || op_type == "StatefulPartitionedCall";
+}
+
} // namespace
Placer::Placer(Graph* graph, const DeviceSet* devices,
@@ -680,8 +724,8 @@ Status Placer::Run() {
// 2. Enumerate the constraint edges, and use them to update the disjoint
// node set.
- // If `node` has an input edge with reference type, add an
- // edge from the source of that edge to `node`.
+ // If `node` has an input edge with reference type, add an edge from the
+ // source of that edge to `node`.
for (const Edge* edge : graph_->edges()) {
if (edge->IsControlEdge()) {
continue;
@@ -689,7 +733,10 @@ Status Placer::Run() {
Node* src = edge->src();
Node* dst = edge->dst();
DataType input_type = dst->input_type(edge->dst_input());
- if (input_type == DT_RESOURCE || IsRefType(input_type)) {
+ if ((input_type == DT_RESOURCE || IsRefType(input_type)) &&
+ !IsExemptFromResourceInputColocation(dst)) {
+ // Colocate `src` and `dst` to maintain the invariant that nodes connected
+ // by reference edges are colocated.
int src_root_id = colocation_graph.FindRoot(src->id());
int dst_root_id = colocation_graph.FindRoot(dst->id());
auto& src_root = colocation_graph.members_[src_root_id];
@@ -706,6 +753,9 @@ Status Placer::Run() {
// incompatible.
if (!DeviceNameUtils::AreCompatibleDevNames(source_parsed_name,
dest_parsed_name)) {
+ TF_RETURN_IF_ERROR(
+ colocation_graph.VerifyResourceAndRefInputsCanBeColocated(
+ dst, src, source_parsed_name));
if (log_device_placement_) {
LOG(INFO) << "Ignoring device specification "
<< DeviceNameUtils::ParsedNameToString(dest_parsed_name)
@@ -773,10 +823,10 @@ Status Placer::Run() {
std::vector<Device*>* devices;
Status status = colocation_graph.GetDevicesForNode(node, &devices);
if (!status.ok()) {
- return AttachDef(
- errors::InvalidArgument("Cannot assign a device for operation '",
- node->name(), "': ", status.error_message()),
- *node);
+ return AttachDef(errors::InvalidArgument(
+ "Cannot assign a device for operation ",
+ RichNodeName(node), ": ", status.error_message()),
+ *node);
}
// Returns the first device in sorted devices list so we will always
@@ -820,10 +870,10 @@ Status Placer::Run() {
std::vector<Device*>* devices;
Status status = colocation_graph.GetDevicesForNode(node, &devices);
if (!status.ok()) {
- return AttachDef(
- errors::InvalidArgument("Cannot assign a device for operation '",
- node->name(), "': ", status.error_message()),
- *node);
+ return AttachDef(errors::InvalidArgument(
+ "Cannot assign a device for operation ",
+ RichNodeName(node), ": ", status.error_message()),
+ *node);
}
int assigned_device = -1;
@@ -889,4 +939,22 @@ void Placer::LogDeviceAssignment(const Node* node) const {
}
}
+bool Placer::ClientHandlesErrorFormatting() const {
+ return options_ != nullptr &&
+ options_->config.experimental().client_handles_error_formatting();
+}
+
+// Returns the node name in single quotes. If the client handles formatted
+// errors, appends a formatting tag which the client will reformat into, for
+// example, " (defined at filename:123)".
+string Placer::RichNodeName(const Node* node) const {
+ string quoted_name = strings::StrCat("'", node->name(), "'");
+ if (ClientHandlesErrorFormatting()) {
+ string file_and_line = error_format_tag(*node, "${file}:${line}");
+ return strings::StrCat(quoted_name, " (defined at ", file_and_line, ")");
+ } else {
+ return quoted_name;
+ }
+}
+
} // namespace tensorflow
diff --git a/tensorflow/core/common_runtime/placer.h b/tensorflow/core/common_runtime/placer.h
index 75dce7c7fe..fce87269c5 100644
--- a/tensorflow/core/common_runtime/placer.h
+++ b/tensorflow/core/common_runtime/placer.h
@@ -87,6 +87,8 @@ class Placer {
// placement if the SessionOptions entry in 'options_' requests it.
void AssignAndLog(int assigned_device, Node* node) const;
void LogDeviceAssignment(const Node* node) const;
+ bool ClientHandlesErrorFormatting() const;
+ string RichNodeName(const Node* node) const;
Graph* const graph_; // Not owned.
const DeviceSet* const devices_; // Not owned.
diff --git a/tensorflow/core/common_runtime/placer_test.cc b/tensorflow/core/common_runtime/placer_test.cc
index 5ad251c892..cede899842 100644
--- a/tensorflow/core/common_runtime/placer_test.cc
+++ b/tensorflow/core/common_runtime/placer_test.cc
@@ -575,6 +575,10 @@ REGISTER_KERNEL_BUILDER(Name("HandleAssignCPU").Device("FakeCPU"), DummyOp);
REGISTER_OP("HandleAssignGPU").Input("i: resource").Input("v: float");
REGISTER_KERNEL_BUILDER(Name("HandleAssignGPU").Device("FakeGPU"), DummyOp);
+REGISTER_OP("TestTwoHandlesIn").Input("i: resource").Input("j: resource");
+REGISTER_KERNEL_BUILDER(Name("TestTwoHandlesIn").Device("FakeCPU"), DummyOp);
+REGISTER_KERNEL_BUILDER(Name("TestTwoHandlesIn").Device("FakeGPU"), DummyOp);
+
// Tests all combinations of resource handles and ops using them.
TEST_F(PlacerTest, TestResourceHandle) {
auto handle_test = [this](const string& var_op_name,
@@ -609,6 +613,42 @@ TEST_F(PlacerTest, TestResourceHandle) {
handle_test("HandleVariableCPU", "HandleAssignGPU", "FakeCPU").ok());
}
+TEST_F(PlacerTest, TestResourceHandlesOnDifferentDevicesFails) {
+ auto handle_test = [this](bool allow_soft_placement) {
+ Graph g(OpRegistry::Global());
+ { // Scope for temporary variables used to construct g.
+ GraphDefBuilder b(GraphDefBuilder::kFailImmediately);
+ Node* var_cpu =
+ ops::SourceOp("TestHandleVariable", b.opts().WithName("var_cpu"));
+ Node* var_gpu =
+ ops::SourceOp("TestHandleVariable", b.opts().WithName("var_gpu"));
+ ops::BinaryOp("TestTwoHandlesIn", var_cpu, var_gpu,
+ b.opts().WithName("two_handles_in"));
+ TF_EXPECT_OK(BuildGraph(b, &g));
+
+ GetNodeByName(g, "var_cpu")
+ ->set_assigned_device_name(
+ "/job:a/replica:0/task:0/device:fakecpu:0");
+ GetNodeByName(g, "var_gpu")
+ ->set_assigned_device_name(
+ "/job:a/replica:0/task:0/device:fakegpu:0");
+ }
+
+ SessionOptions options;
+ options.config.set_allow_soft_placement(allow_soft_placement);
+ options.config.set_log_device_placement(true);
+ Status s = Place(&g, &options);
+ EXPECT_EQ(error::INVALID_ARGUMENT, s.code());
+ EXPECT_TRUE(str_util::StrContains(
+ s.error_message(),
+ "Could not colocate node with its resource and reference inputs"));
+ return Status::OK();
+ };
+
+ TF_EXPECT_OK(handle_test(false));
+ TF_EXPECT_OK(handle_test(true));
+}
+
// Test that an assignment of an operator to the wrong device
// is ignored when it could never be satisfied (due to reference
// edges, for example).
@@ -1102,6 +1142,50 @@ TEST_F(PlacerTest, TestNonexistentGpuNoAllowSoftPlacement) {
EXPECT_TRUE(str_util::StrContains(s.error_message(), "/device:fakegpu:11"));
}
+// Test that the "Cannot assign a device" error message contains a format tag
+// when requested.
+TEST_F(PlacerTest, TestNonexistentGpuNoAllowSoftPlacementFormatTag) {
+ Graph g(OpRegistry::Global());
+ { // Scope for temporary variables used to construct g.
+ GraphDefBuilder b(GraphDefBuilder::kFailImmediately);
+ ops::SourceOp("TestDevice",
+ b.opts().WithName("in").WithDevice("/device:fakegpu:11"));
+ TF_EXPECT_OK(BuildGraph(b, &g));
+ }
+
+ SessionOptions options;
+ options.config.mutable_experimental()->set_client_handles_error_formatting(
+ true);
+ Status s = Place(&g, &options);
+ EXPECT_EQ(error::INVALID_ARGUMENT, s.code());
+ EXPECT_TRUE(
+ str_util::StrContains(s.error_message(),
+ "Cannot assign a device for operation 'in'"
+ " (defined at ^^node:in:${file}:${line}^^)"));
+}
+
+// Test that the "Cannot assign a device" error message does not contain a
+// format tag when not it shouldn't
+TEST_F(PlacerTest, TestNonexistentGpuNoAllowSoftPlacementNoFormatTag) {
+ Graph g(OpRegistry::Global());
+ { // Scope for temporary variables used to construct g.
+ GraphDefBuilder b(GraphDefBuilder::kFailImmediately);
+ ops::SourceOp("TestDevice",
+ b.opts().WithName("in").WithDevice("/device:fakegpu:11"));
+ TF_EXPECT_OK(BuildGraph(b, &g));
+ }
+
+ SessionOptions options;
+ options.config.mutable_experimental()->set_client_handles_error_formatting(
+ false);
+ Status s = Place(&g, &options);
+ EXPECT_EQ(error::INVALID_ARGUMENT, s.code());
+ EXPECT_TRUE(str_util::StrContains(
+ s.error_message(), "Cannot assign a device for operation 'in'"));
+ EXPECT_FALSE(str_util::StrContains(
+ s.error_message(), "'in' (defined at ^^node:in:${file}:${line}^^)"));
+}
+
// Test that placement fails when a node requests an explicit device that is not
// supported by the registered kernels if allow_soft_placement is no set.
TEST_F(PlacerTest, TestUnsupportedDeviceNoAllowSoftPlacement) {
diff --git a/tensorflow/core/common_runtime/gpu/pool_allocator.cc b/tensorflow/core/common_runtime/pool_allocator.cc
index 66fff16e8f..10a24ed14c 100644
--- a/tensorflow/core/common_runtime/gpu/pool_allocator.cc
+++ b/tensorflow/core/common_runtime/pool_allocator.cc
@@ -13,7 +13,7 @@ See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
-#include "tensorflow/core/common_runtime/gpu/pool_allocator.h"
+#include "tensorflow/core/common_runtime/pool_allocator.h"
#include <errno.h>
#ifndef _MSC_VER
@@ -284,4 +284,12 @@ void PoolAllocator::AddFreeVisitor(Visitor visitor) {
free_visitors_.push_back(visitor);
}
+void* BasicCPUAllocator::Alloc(size_t alignment, size_t num_bytes) {
+ return port::AlignedMalloc(num_bytes, static_cast<int>(alignment));
+}
+
+void BasicCPUAllocator::Free(void* ptr, size_t num_bytes) {
+ port::AlignedFree(ptr);
+}
+
} // namespace tensorflow
diff --git a/tensorflow/core/common_runtime/gpu/pool_allocator.h b/tensorflow/core/common_runtime/pool_allocator.h
index 310158aba1..607734445b 100644
--- a/tensorflow/core/common_runtime/gpu/pool_allocator.h
+++ b/tensorflow/core/common_runtime/pool_allocator.h
@@ -13,12 +13,11 @@ See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
-#ifndef TENSORFLOW_COMMON_RUNTIME_GPU_POOL_ALLOCATOR_H_
-#define TENSORFLOW_COMMON_RUNTIME_GPU_POOL_ALLOCATOR_H_
+#ifndef TENSORFLOW_CORE_COMMON_RUNTIME_POOL_ALLOCATOR_H_
+#define TENSORFLOW_CORE_COMMON_RUNTIME_POOL_ALLOCATOR_H_
// Simple LRU pool allocators for various flavors of CPU RAM that
-// implement the VisitableAllocator interface. GPU memory is managed
-// by GPURegionAllocator.
+// implement the VisitableAllocator interface.
#include <atomic>
#include <map>
@@ -28,9 +27,7 @@ limitations under the License.
#include "tensorflow/core/lib/core/bits.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/macros.h"
-#include "tensorflow/core/platform/mem.h"
#include "tensorflow/core/platform/mutex.h"
-#include "tensorflow/core/platform/stream_executor.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
@@ -168,48 +165,18 @@ class Pow2Rounder : public RoundUpInterface {
class BasicCPUAllocator : public SubAllocator {
public:
+ // Argument numa_node is currently ignored.
+ explicit BasicCPUAllocator(int numa_node) : numa_node_(numa_node) {}
+
~BasicCPUAllocator() override {}
- void* Alloc(size_t alignment, size_t num_bytes) override {
- return port::AlignedMalloc(num_bytes, alignment);
- }
- void Free(void* ptr, size_t num_bytes) override { port::AlignedFree(ptr); }
-};
+ void* Alloc(size_t alignment, size_t num_bytes) override;
-// Allocator for pinned CPU RAM that is made known to CUDA for the
-// purpose of efficient DMA with a GPU.
-class CUDAHostAllocator : public SubAllocator {
- public:
- // Note: stream_exec cannot be null.
- explicit CUDAHostAllocator(se::StreamExecutor* stream_exec)
- : stream_exec_(stream_exec) {
- CHECK(stream_exec_ != nullptr);
- }
- ~CUDAHostAllocator() override {}
-
- void* Alloc(size_t alignment, size_t num_bytes) override {
- void* ptr = nullptr;
- if (num_bytes > 0) {
- ptr = stream_exec_->HostMemoryAllocate(num_bytes);
- if (ptr == nullptr) {
- LOG(WARNING) << "could not allocate pinned host memory of size: "
- << num_bytes;
- }
- }
- return ptr;
- }
-
- void Free(void* ptr, size_t num_bytes) override {
- if (ptr != nullptr) {
- stream_exec_->HostMemoryDeallocate(ptr);
- }
- }
+ void Free(void* ptr, size_t num_bytes) override;
private:
- se::StreamExecutor* stream_exec_; // not owned, non-null
-
- TF_DISALLOW_COPY_AND_ASSIGN(CUDAHostAllocator);
+ int numa_node_;
};
} // namespace tensorflow
-#endif // TENSORFLOW_COMMON_RUNTIME_GPU_POOL_ALLOCATOR_H_
+#endif // TENSORFLOW_CORE_COMMON_RUNTIME_POOL_ALLOCATOR_H_
diff --git a/tensorflow/core/common_runtime/process_state.cc b/tensorflow/core/common_runtime/process_state.cc
new file mode 100644
index 0000000000..4d83b25ce6
--- /dev/null
+++ b/tensorflow/core/common_runtime/process_state.cc
@@ -0,0 +1,129 @@
+/* Copyright 2015 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#include "tensorflow/core/common_runtime/process_state.h"
+
+#include <cstring>
+#include <vector>
+
+#include "tensorflow/core/common_runtime/bfc_allocator.h"
+#include "tensorflow/core/common_runtime/pool_allocator.h"
+#include "tensorflow/core/framework/allocator.h"
+#include "tensorflow/core/framework/log_memory.h"
+#include "tensorflow/core/framework/tracking_allocator.h"
+#include "tensorflow/core/lib/gtl/stl_util.h"
+#include "tensorflow/core/lib/strings/strcat.h"
+#include "tensorflow/core/platform/logging.h"
+#include "tensorflow/core/platform/mutex.h"
+#include "tensorflow/core/platform/types.h"
+#include "tensorflow/core/util/env_var.h"
+
+namespace tensorflow {
+
+ProcessState* ProcessState::instance_ = nullptr;
+
+/*static*/ ProcessState* ProcessState::singleton() {
+ if (instance_ == nullptr) {
+ instance_ = new ProcessState;
+ }
+
+ return instance_;
+}
+
+ProcessState::ProcessState() : numa_enabled_(false) {
+ CHECK(instance_ == nullptr);
+}
+
+// Normally the ProcessState singleton is never explicitly deleted.
+// This function is defined for debugging problems with the allocators.
+ProcessState::~ProcessState() {
+ CHECK_EQ(this, instance_);
+ instance_ = nullptr;
+ for (Allocator* a : cpu_allocators_) {
+ delete a;
+ }
+}
+
+string ProcessState::MemDesc::DebugString() {
+ return strings::StrCat((loc == CPU ? "CPU " : "GPU "), dev_index,
+ ", dma: ", gpu_registered, ", nic: ", nic_registered);
+}
+
+ProcessState::MemDesc ProcessState::PtrType(const void* ptr) {
+ if (FLAGS_brain_gpu_record_mem_types) {
+ auto iter = mem_desc_map_.find(ptr);
+ if (iter != mem_desc_map_.end()) {
+ return iter->second;
+ }
+ }
+ return MemDesc();
+}
+
+Allocator* ProcessState::GetCPUAllocator(int numa_node) {
+ CHECK_GE(numa_node, 0);
+ if (!numa_enabled_) numa_node = 0;
+ mutex_lock lock(mu_);
+ while (cpu_allocators_.size() <= static_cast<size_t>(numa_node)) {
+ bool use_bfc_allocator = false;
+ // TODO(reedwm): Switch default to BGFAllocator if it's at least as fast and
+ // efficient.
+ Status status = ReadBoolFromEnvVar("TF_CPU_ALLOCATOR_USE_BFC", false,
+ &use_bfc_allocator);
+ if (!status.ok()) {
+ LOG(ERROR) << "GetCPUAllocator: " << status.error_message();
+ }
+ VisitableAllocator* allocator;
+ if (use_bfc_allocator) {
+ // TODO(reedwm): evaluate whether 64GB by default is the best choice.
+ int64 cpu_mem_limit_in_mb = -1;
+ Status status = ReadInt64FromEnvVar("TF_CPU_BFC_MEM_LIMIT_IN_MB",
+ 1LL << 16 /*64GB max by default*/,
+ &cpu_mem_limit_in_mb);
+ if (!status.ok()) {
+ LOG(ERROR) << "GetCPUAllocator: " << status.error_message();
+ }
+ int64 cpu_mem_limit = cpu_mem_limit_in_mb * (1LL << 20);
+ allocator = new BFCAllocator(
+ new BasicCPUAllocator(numa_enabled_ ? numa_node : -1), cpu_mem_limit,
+ true /*allow_growth*/, "bfc_cpu_allocator_for_gpu" /*name*/);
+ VLOG(2) << "Using BFCAllocator with memory limit of "
+ << cpu_mem_limit_in_mb << " MB for ProcessState CPU allocator";
+ } else {
+ allocator = new PoolAllocator(
+ 100 /*pool_size_limit*/, true /*auto_resize*/,
+ new BasicCPUAllocator(numa_enabled_ ? numa_node : -1),
+ new NoopRounder, "cpu_pool");
+ VLOG(2) << "Using PoolAllocator for ProcessState CPU allocator "
+ << "numa_enabled_=" << numa_enabled_
+ << " numa_node=" << numa_node;
+ }
+ if (LogMemory::IsEnabled()) {
+ // Wrap the allocator to track allocation ids for better logging
+ // at the cost of performance.
+ allocator = new TrackingVisitableAllocator(allocator, true);
+ }
+ cpu_allocators_.push_back(allocator);
+ }
+ return cpu_allocators_[numa_node];
+}
+
+void ProcessState::TestOnlyReset() {
+ mutex_lock lock(mu_);
+ mem_desc_map_.clear();
+ gtl::STLDeleteElements(&cpu_allocators_);
+ gtl::STLDeleteElements(&cpu_al_);
+}
+
+} // namespace tensorflow
diff --git a/tensorflow/core/common_runtime/process_state.h b/tensorflow/core/common_runtime/process_state.h
new file mode 100644
index 0000000000..0f4ae230bb
--- /dev/null
+++ b/tensorflow/core/common_runtime/process_state.h
@@ -0,0 +1,132 @@
+/* Copyright 2015 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#ifndef TENSORFLOW_CORE_COMMON_RUNTIME_PROCESS_STATE_H_
+#define TENSORFLOW_CORE_COMMON_RUNTIME_PROCESS_STATE_H_
+
+#include <functional>
+#include <map>
+#include <unordered_map>
+#include <vector>
+
+#include "tensorflow/core/framework/allocator.h"
+#include "tensorflow/core/platform/mutex.h"
+#include "tensorflow/core/platform/thread_annotations.h"
+#include "tensorflow/core/platform/types.h"
+#include "tensorflow/core/protobuf/config.pb.h"
+
+namespace tensorflow {
+
+class Allocator;
+class VisitableAllocator;
+class PoolAllocator;
+
+// Singleton that manages per-process state, e.g. allocation of
+// shared resources.
+class ProcessState {
+ public:
+ static ProcessState* singleton();
+
+ // Descriptor for memory allocation attributes, used by optional
+ // runtime correctness analysis logic.
+ struct MemDesc {
+ enum MemLoc { CPU, GPU };
+ MemLoc loc;
+ int dev_index;
+ bool gpu_registered;
+ bool nic_registered;
+ MemDesc()
+ : loc(CPU),
+ dev_index(0),
+ gpu_registered(false),
+ nic_registered(false) {}
+ string DebugString();
+ };
+
+ // If NUMA Allocators are desired, call this before calling any
+ // Allocator accessor.
+ void EnableNUMA() { numa_enabled_ = true; }
+
+ // Returns what we know about the memory at ptr.
+ // If we know nothing, it's called CPU 0 with no other attributes.
+ MemDesc PtrType(const void* ptr);
+
+ // Returns the one CPUAllocator used for the given numa_node.
+ // TEMPORARY: ignores numa_node.
+ Allocator* GetCPUAllocator(int numa_node);
+
+ typedef std::unordered_map<const void*, MemDesc> MDMap;
+
+ protected:
+ ProcessState();
+ friend class GPUProcessState;
+
+ // If these flags need to be runtime configurable consider adding
+ // them to ConfigProto.
+ static const bool FLAGS_brain_mem_reg_cuda_dma = true;
+ static const bool FLAGS_brain_gpu_record_mem_types = false;
+
+ // Helper method for unit tests to reset the ProcessState singleton by
+ // cleaning up everything. Never use in production.
+ virtual void TestOnlyReset();
+
+ static ProcessState* instance_;
+ bool numa_enabled_;
+
+ mutex mu_;
+
+ std::vector<Allocator*> cpu_allocators_ GUARDED_BY(mu_);
+
+ virtual ~ProcessState();
+
+ // Optional RecordingAllocators that wrap the corresponding
+ // Allocators for runtime attribute use analysis.
+ MDMap mem_desc_map_;
+ std::vector<Allocator*> cpu_al_ GUARDED_BY(mu_);
+};
+
+namespace internal {
+class RecordingAllocator : public Allocator {
+ public:
+ RecordingAllocator(ProcessState::MDMap* mm, Allocator* a,
+ ProcessState::MemDesc md, mutex* mu)
+ : mm_(mm), a_(a), md_(md), mu_(mu) {}
+
+ string Name() override { return a_->Name(); }
+ void* AllocateRaw(size_t alignment, size_t num_bytes) override {
+ void* p = a_->AllocateRaw(alignment, num_bytes);
+ mutex_lock l(*mu_);
+ (*mm_)[p] = md_;
+ return p;
+ }
+ void DeallocateRaw(void* p) override {
+ mutex_lock l(*mu_);
+ auto iter = mm_->find(p);
+ mm_->erase(iter);
+ a_->DeallocateRaw(p);
+ }
+ bool TracksAllocationSizes() override { return a_->TracksAllocationSizes(); }
+ size_t RequestedSize(const void* p) override { return a_->RequestedSize(p); }
+ size_t AllocatedSize(const void* p) override { return a_->AllocatedSize(p); }
+ void GetStats(AllocatorStats* stats) override { a_->GetStats(stats); }
+ void ClearStats() override { a_->ClearStats(); }
+ ProcessState::MDMap* mm_; // not owned
+ Allocator* a_; // not owned
+ ProcessState::MemDesc md_;
+ mutex* mu_;
+};
+} // namespace internal
+} // namespace tensorflow
+#endif // TENSORFLOW_CORE_COMMON_RUNTIME_PROCESS_STATE_H_
diff --git a/tensorflow/core/common_runtime/rendezvous_mgr.cc b/tensorflow/core/common_runtime/rendezvous_mgr.cc
index 93f24a3217..6d247975ed 100644
--- a/tensorflow/core/common_runtime/rendezvous_mgr.cc
+++ b/tensorflow/core/common_runtime/rendezvous_mgr.cc
@@ -110,7 +110,7 @@ void IntraProcessRendezvous::SameWorkerRecvDone(
CopyTensor::ViaDMA(parsed.edge_name, send_args.device_context,
recv_args.device_context, src_device, dst_device,
send_args.alloc_attrs, recv_args.alloc_attrs, &in, out,
- std::move(done));
+ 0 /*dev_to_dev_stream_index*/, std::move(done));
}
void IntraProcessRendezvous::RecvAsync(const ParsedKey& parsed,
diff --git a/tensorflow/core/common_runtime/ring_reducer.cc b/tensorflow/core/common_runtime/ring_reducer.cc
index f8428f2fde..c1e514d5ad 100644
--- a/tensorflow/core/common_runtime/ring_reducer.cc
+++ b/tensorflow/core/common_runtime/ring_reducer.cc
@@ -163,7 +163,8 @@ void RingReducer::Run(StatusCallback done) {
CollectiveRemoteAccessLocal::MemCpyAsync(
ctx_->input_device_context(0), ctx_->op_device_context(), device_,
device_, ctx_->input_alloc_attr(0), ctx_->output_alloc_attr(0), input_,
- output_, [this, &note, &status](const Status& s) {
+ output_, 0 /*dev_to_dev_stream_index*/,
+ [this, &note, &status](const Status& s) {
status.Update(s);
note.Notify();
});
@@ -387,7 +388,7 @@ void RingReducer::DispatchRecv(RingField* rf, const StatusCallback& done) {
col_params_.task.is_local[rf->recv_dev_idx],
recv_buf_key, device_, ctx_->op_device_context(),
ctx_->output_alloc_attr(0), dst_tensor,
- device_locality_, done);
+ device_locality_, rf->subdiv_idx, done);
}
string RingReducer::FieldState() {
@@ -446,10 +447,11 @@ bool RingReducer::RunAsyncParts() {
if (rf->do_recv) {
rf->action = RF_RECV;
auto requeue = [this, rf, &ready_queue, &aborted](Status s) {
- const bool bad_status = !s.ok();
- if (bad_status) aborted = true;
+ if (!s.ok()) {
+ aborted = true;
+ StartAbort(s);
+ }
ready_queue.Enqueue(rf);
- if (bad_status) StartAbort(s);
};
DispatchRecv(rf, requeue);
dispatched = true;
@@ -494,10 +496,11 @@ bool RingReducer::RunAsyncParts() {
if (rf->do_send) {
rf->action = RF_SEND;
auto send_complete = [this, rf, &ready_queue, &aborted](Status s) {
- const bool bad_status = !s.ok();
- if (bad_status) aborted = true;
+ if (!s.ok()) {
+ aborted = true;
+ StartAbort(s);
+ }
ready_queue.Enqueue(rf);
- if (bad_status) StartAbort(s);
};
DispatchSend(rf, send_complete);
dispatched = true;
diff --git a/tensorflow/core/common_runtime/ring_reducer_test.cc b/tensorflow/core/common_runtime/ring_reducer_test.cc
index e4387a074a..fcdf9deff8 100644
--- a/tensorflow/core/common_runtime/ring_reducer_test.cc
+++ b/tensorflow/core/common_runtime/ring_reducer_test.cc
@@ -68,11 +68,13 @@ class FailTestRMA : public CollectiveRemoteAccessLocal {
DeviceContext* to_device_ctx,
const AllocatorAttributes& to_alloc_attr, Tensor* to_tensor,
const DeviceLocality& client_locality,
+ int dev_to_dev_stream_index,
const StatusCallback& done) override {
if (MaybeFail(done)) return;
CollectiveRemoteAccessLocal::RecvFromPeer(
peer_device, peer_task, peer_is_local, key, to_device, to_device_ctx,
- to_alloc_attr, to_tensor, client_locality, done);
+ to_alloc_attr, to_tensor, client_locality, dev_to_dev_stream_index,
+ done);
}
void PostToPeer(const string& peer_device, const string& peer_task,
diff --git a/tensorflow/core/common_runtime/test_collective_executor_mgr.h b/tensorflow/core/common_runtime/test_collective_executor_mgr.h
index d0d4f24b11..80205830a2 100644
--- a/tensorflow/core/common_runtime/test_collective_executor_mgr.h
+++ b/tensorflow/core/common_runtime/test_collective_executor_mgr.h
@@ -32,7 +32,8 @@ class TestCollectiveExecutor : public CollectiveExecutor {
bool peer_is_local, const string& key, Device* to_device,
DeviceContext* to_device_ctx,
const AllocatorAttributes& to_alloc_attr, Tensor* to_tensor,
- const DeviceLocality& client_locality, //???
+ const DeviceLocality& client_locality,
+ int dev_to_dev_stream_index,
const StatusCallback& done) override {
done(errors::Internal("Unimplemented"));
}
diff --git a/tensorflow/core/debug/BUILD b/tensorflow/core/debug/BUILD
index 36e9b3455a..591c22b8f6 100644
--- a/tensorflow/core/debug/BUILD
+++ b/tensorflow/core/debug/BUILD
@@ -82,25 +82,6 @@ cc_library(
)
tf_cuda_library(
- name = "debug_gateway_internal",
- srcs = ["debug_gateway.cc"],
- hdrs = ["debug_gateway.h"],
- copts = tf_copts(),
- linkstatic = 1,
- deps = [
- ":debug",
- "//tensorflow/core:core_cpu_internal",
- "//tensorflow/core:direct_session_internal",
- "//tensorflow/core:framework",
- "//tensorflow/core:lib",
- "//tensorflow/core:lib_internal",
- "//tensorflow/core:proto_text",
- "//tensorflow/core:protos_all_cc",
- ],
- alwayslink = 1,
-)
-
-tf_cuda_library(
name = "debugger_state_impl",
srcs = ["debugger_state_impl.cc"],
hdrs = ["debugger_state_impl.h"],
@@ -187,42 +168,6 @@ tf_cuda_library(
],
)
-# TODO(cais): Fix flakiness on GPU and change this back to a tf_cc_test_gpu.
-# See b/34081273.
-tf_cc_test(
- name = "debug_gateway_test",
- size = "small",
- srcs = ["debug_gateway_test.cc"],
- args = ["--heap_check=local"],
- linkstatic = tf_kernel_tests_linkstatic(),
- tags = [
- "no_cuda_on_cpu_tap",
- "no_gpu",
- ],
- deps = [
- ":debug",
- ":debug_gateway_internal",
- ":debug_graph_utils",
- "//tensorflow/cc:cc_ops",
- "//tensorflow/core:all_kernels",
- "//tensorflow/core:core_cpu",
- "//tensorflow/core:core_cpu_internal",
- "//tensorflow/core:direct_session",
- "//tensorflow/core:direct_session_internal",
- "//tensorflow/core:framework",
- "//tensorflow/core:framework_internal",
- "//tensorflow/core:gpu_runtime",
- "//tensorflow/core:lib",
- "//tensorflow/core:lib_internal",
- "//tensorflow/core:protos_all_cc",
- "//tensorflow/core:test",
- "//tensorflow/core:test_main",
- "//tensorflow/core:testlib",
- "//tensorflow/core/kernels:debug_ops",
- "//tensorflow/core/kernels:ops_util",
- ],
-)
-
tf_cc_test(
name = "debug_io_utils_test",
size = "small",
diff --git a/tensorflow/core/debug/debug_gateway.cc b/tensorflow/core/debug/debug_gateway.cc
deleted file mode 100644
index 2e1aabd1cc..0000000000
--- a/tensorflow/core/debug/debug_gateway.cc
+++ /dev/null
@@ -1,122 +0,0 @@
-/* Copyright 2016 The TensorFlow Authors. All Rights Reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-==============================================================================*/
-
-#include "tensorflow/core/debug/debug_gateway.h"
-
-#include <utility>
-
-#include "tensorflow/core/common_runtime/device_factory.h"
-#include "tensorflow/core/common_runtime/session_factory.h"
-#include "tensorflow/core/framework/tensor.h"
-
-namespace tensorflow {
-
-DebugGateway::DebugGateway(DirectSession* session) : session_(session) {
- session_->node_outputs_callback_ =
- [this](const string& node_name, const int output_slot,
- const Tensor* tensor, const bool is_ref, OpKernelContext* ctx) {
- if (comp_cb_ != nullptr && output_slot <= 0) {
- // The node completion callback is invoked once for a node regardless
- // of whether the node has zero, one or more outputs.
- // The output_slot can be negative (-1, or kControlSlot) if
- // node_outputs_callback_ is invoked for a node with no output. If
- // that is the case, notify the callback that the node in question has
- // no output.
- comp_cb_(node_name, output_slot == 0);
- }
-
- // Copy tensor values (e.g., from GPU to host) only if the
- // value callback is not nullptr.
- if (val_cb_ != nullptr && output_slot >= 0) {
- CopyTensor(node_name, output_slot, tensor, ctx,
- [this, node_name, output_slot,
- is_ref](const Tensor* copied_tensor) {
- val_cb_(node_name, output_slot, *copied_tensor, is_ref);
- });
- }
-
- return Status::OK();
- };
-}
-
-DebugGateway::~DebugGateway() {
- if (session_ != nullptr) {
- session_->node_outputs_callback_ = nullptr;
- }
-}
-
-void DebugGateway::SetNodeCompletionCallback(NodeCompletionCallback callback) {
- comp_cb_ = std::move(callback);
-}
-
-void DebugGateway::SetNodeValueCallback(NodeValueCallback callback) {
- val_cb_ = std::move(callback);
-}
-
-void DebugGateway::CopyTensor(const string& node_name, const int output_slot,
- const Tensor* src_tensor, OpKernelContext* ctx,
- CopyDoneCallback copy_done_cb) {
- Device* device = static_cast<Device*>(ctx->device());
-
- // Determine if the tensor is initialized properly.
- // The second part of the check is necessary because in some cases, a
- // tensor can pass the IsInitialized() check, but the dtype is not set,
- // e.g., tf.FIFOQueue.
- if (src_tensor->IsInitialized() && DataTypeSize(src_tensor->dtype()) > 0) {
- // Tensor is initialized.
-
- string tensor_tag = strings::StrCat(node_name, ":", output_slot);
-
- // Create copied tensor on host
- Allocator* cpu_allocator = tensorflow::cpu_allocator();
- Tensor cpu_tensor(cpu_allocator, src_tensor->dtype(), src_tensor->shape());
-
- // Determine if the tensor is on device (GPU) or host (CPU).
- // The second part of the check is necessary because even an OpKernel on
- // may have output tensors allocated on CPU.
- if ((device->name().find("GPU:") != string::npos ||
- device->name().find("SYCL:") != string::npos) &&
- !ctx->output_alloc_attr(output_slot).on_host()) {
- // GPU tensors: Copy it to host (CPU).
- DeviceContext* device_ctxt = ctx->op_device_context();
-
- // Copy device (e.g., GPU) tensor to host and when done, invoke the
- // callback.
- device_ctxt->CopyDeviceTensorToCPU(
- src_tensor, "TensorCopy", device, &cpu_tensor,
- [node_name, cpu_tensor, copy_done_cb](const Status& s) {
- if (s.ok()) {
- copy_done_cb(&cpu_tensor);
- } else {
- LOG(ERROR) << "Copying of device Tensor " << node_name
- << " to CPU for debugging failed.";
- }
- });
- } else {
- // For CPU tensors, copy the source tensor and own the copy, because the
- // value callback may outlive the life time of the tensor and the tensor
- // may shared the underlying buffer with other tensors.
- cpu_tensor.UnsafeCopyFromInternal(*src_tensor, src_tensor->dtype(),
- src_tensor->shape());
-
- copy_done_cb(&cpu_tensor);
- }
- } else {
- // Tensor is not initialized: No need to copy.
- copy_done_cb(src_tensor);
- }
-}
-
-} // namespace tensorflow
diff --git a/tensorflow/core/debug/debug_gateway.h b/tensorflow/core/debug/debug_gateway.h
deleted file mode 100644
index bf5b6e08db..0000000000
--- a/tensorflow/core/debug/debug_gateway.h
+++ /dev/null
@@ -1,83 +0,0 @@
-/* Copyright 2016 The TensorFlow Authors. All Rights Reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-==============================================================================*/
-
-#ifndef TENSORFLOW_DEBUG_DEBUG_SESSION_H_
-#define TENSORFLOW_DEBUG_DEBUG_SESSION_H_
-
-#include <unordered_map>
-
-#include "tensorflow/core/common_runtime/direct_session.h"
-#include "tensorflow/core/common_runtime/executor.h"
-
-namespace tensorflow {
-
-// Experimental. tfdb (TensorFlow Debugger): Gateway to intermediate node
-// outputs during Session Run calls. Currently limited to DirectSession.
-class DebugGateway {
- public:
- DebugGateway(DirectSession* session);
- virtual ~DebugGateway();
-
- // Callback for node completion. This callback is invoked only once for
- // a node regardless of whether it has one or more outputs. The value(s) of
- // the output tensor(s) are not necessarily available when this callback is
- // invoked. They may need to be asynchronously copied from device (e.g.,
- // GPU) to host, hence the need for the NodeValueCallback below.
- //
- // Args:
- // node_name: Name of the node that has just completed execution
- // any_output: Whether the node has any output(s)
- typedef std::function<void(const string& node_name, const bool any_output)>
- NodeCompletionCallback;
- void SetNodeCompletionCallback(NodeCompletionCallback callback);
-
- // Callback for node value. This is invoked when the value of a node's
- // output tensor is available on the host, possibly after copying from
- // a device (e.g., GPU).
- //
- // Args:
- // node_name: Name of the node of which the output has become available
- // output_slot: Output slot number of the output Tensor
- // tensor_value: Reference to the tensor value
- // is_ref: Whether the output of the reference type
- typedef std::function<void(const string& node_name, const int output_slot,
- const Tensor& tensor_value, const bool is_ref)>
- NodeValueCallback;
- void SetNodeValueCallback(NodeValueCallback callback);
-
- // TODO(cais): Add whitelists for ops/tensors (e.g., {"A:0", "B:0"})
- // for node completion callback (whitelist_comp_) and node value callback
- // (whitelist_val_). If whitelist_comp_ is non-empty, the gateway will
- // invoke the NodeCompletionCallback only for the nodes specified in the
- // whitelist. And so forth for whitelist_val_.
-
- private:
- DirectSession* session_;
- // TODO(cais): DebugGateway currently supports only DirectSession. Add
- // support for GrpcSession.
-
- NodeCompletionCallback comp_cb_ = nullptr;
- NodeValueCallback val_cb_ = nullptr;
-
- typedef std::function<void(const Tensor* dst_tensor)> CopyDoneCallback;
-
- void CopyTensor(const string& node_name, const int output_slot,
- const Tensor* src_tensor, OpKernelContext* ctx,
- CopyDoneCallback copy_done_cb);
-};
-
-} // end namespace tensorflow
-
-#endif // TENSORFLOW_DEBUG_DEBUG_SESSION_H_
diff --git a/tensorflow/core/debug/debug_gateway_test.cc b/tensorflow/core/debug/debug_gateway_test.cc
deleted file mode 100644
index b1bbd3f698..0000000000
--- a/tensorflow/core/debug/debug_gateway_test.cc
+++ /dev/null
@@ -1,1011 +0,0 @@
-/* Copyright 2016 The TensorFlow Authors. All Rights Reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-==============================================================================*/
-
-#include "tensorflow/core/debug/debug_gateway.h"
-
-#include <algorithm>
-#include <cstdlib>
-#include <memory>
-#include <unordered_map>
-
-#include "tensorflow/core/debug/debug_graph_utils.h"
-#include "tensorflow/core/framework/tensor_testutil.h"
-#include "tensorflow/core/graph/testlib.h"
-#include "tensorflow/core/lib/core/notification.h"
-#include "tensorflow/core/lib/core/status_test_util.h"
-#include "tensorflow/core/lib/core/threadpool.h"
-#include "tensorflow/core/protobuf/rewriter_config.pb.h"
-
-namespace tensorflow {
-namespace {
-
-std::unique_ptr<DirectSession> CreateSession() {
- SessionOptions options;
- // Turn off graph optimizer so we can observe intermediate node states.
- options.config.mutable_graph_options()
- ->mutable_optimizer_options()
- ->set_opt_level(OptimizerOptions_Level_L0);
- options.config.mutable_graph_options()
- ->mutable_rewrite_options()
- ->set_constant_folding(RewriterConfig::OFF);
- options.config.mutable_graph_options()
- ->mutable_rewrite_options()
- ->set_dependency_optimization(RewriterConfig::OFF);
-
- return std::unique_ptr<DirectSession>(
- dynamic_cast<DirectSession*>(NewSession(options)));
-}
-
-class SessionDebugMinusAXTest : public ::testing::Test {
- public:
- void Initialize(std::initializer_list<float> a_values) {
- Graph graph(OpRegistry::Global());
-
-#if GOOGLE_CUDA
- const string kDeviceName = "/job:localhost/replica:0/task:0/device:GPU:0";
-#elif defined(TENSORFLOW_USE_SYCL)
- const string kDeviceName = "/job:localhost/replica:0/task:0/device:SYCL:0";
-#else
- const string kDeviceName = "/job:localhost/replica:0/task:0/device:CPU:0";
-#endif
-
- Tensor a_tensor(DT_FLOAT, TensorShape({2, 2}));
- test::FillValues<float>(&a_tensor, a_values);
- Node* a = test::graph::Constant(&graph, a_tensor);
- a->set_assigned_device_name(kDeviceName);
- a_ = a->name();
-
- Tensor x_tensor(DT_FLOAT, TensorShape({2, 1}));
- test::FillValues<float>(&x_tensor, {1, 1});
- Node* x = test::graph::Constant(&graph, x_tensor);
- x->set_assigned_device_name(kDeviceName);
- x_ = x->name();
-
- // y = A * x
- Node* y = test::graph::Matmul(&graph, a, x, false, false);
- y->set_assigned_device_name(kDeviceName);
- y_ = y->name();
-
- Node* y_neg = test::graph::Unary(&graph, "Neg", y);
- y_neg_ = y_neg->name();
- y_neg->set_assigned_device_name(kDeviceName);
-
- test::graph::ToGraphDef(&graph, &def_);
- }
-
- string a_;
- string x_;
- string y_;
- string y_neg_;
- GraphDef def_;
-};
-
-TEST_F(SessionDebugMinusAXTest, RunSimpleNetwork) {
- Initialize({3, 2, -1, 0});
- auto session = CreateSession();
- ASSERT_TRUE(session != nullptr);
-
- DebugGateway debug_gateway(session.get());
-
- // Supply completion and value callbacks
- mutex mu;
- // Completed nodes with and without outputs
- std::vector<string> completed_nodes_w_outputs;
- std::vector<string> completed_nodes_wo_outputs;
-
- Notification callbacks_done;
- debug_gateway.SetNodeCompletionCallback(
- [&mu, &completed_nodes_w_outputs, &completed_nodes_wo_outputs](
- const string& node_name, const bool any_output) {
- mutex_lock l(mu);
- if (any_output) {
- completed_nodes_w_outputs.push_back(node_name);
- } else {
- completed_nodes_wo_outputs.push_back(node_name);
- }
- });
-
- std::vector<bool> tensors_initialized;
- std::unordered_map<string, Tensor> tensor_vals;
- // output_slot values recorded in value callbacks
- std::vector<int> output_slots_val;
- // is_ref values recorded in value callbacks
- std::vector<bool> is_refs_val;
-
- debug_gateway.SetNodeValueCallback(
- [this, &mu, &tensors_initialized, &tensor_vals, &output_slots_val,
- &is_refs_val,
- &callbacks_done](const string& node_name, const int output_slot,
- const Tensor& tensor_value, const bool is_ref) {
- mutex_lock l(mu);
- tensors_initialized.push_back(tensor_value.IsInitialized());
- tensor_vals.insert(std::make_pair(node_name, tensor_value));
- output_slots_val.push_back(output_slot);
- is_refs_val.push_back(is_ref);
-
- // Set the notification once we have the value from the target node.
- if (node_name == y_neg_ && !callbacks_done.HasBeenNotified()) {
- callbacks_done.Notify();
- }
- });
-
- TF_ASSERT_OK(session->Create(def_));
-
- std::vector<std::pair<string, Tensor>> inputs;
-
- // Request two targets: one fetch output and one non-fetched output.
- std::vector<string> output_names = {y_ + ":0"};
- std::vector<string> target_nodes = {y_neg_};
- std::vector<Tensor> outputs;
- Status s = session->Run(inputs, output_names, target_nodes, &outputs);
- TF_ASSERT_OK(s);
-
- // Wait for callbacks to complete.
- callbacks_done.WaitForNotification();
-
- ASSERT_EQ(1, outputs.size());
- // The first output should be initialized and have the correct
- // output.
- auto mat = outputs[0].matrix<float>();
- ASSERT_TRUE(outputs[0].IsInitialized());
- EXPECT_FLOAT_EQ(5.0, mat(0, 0));
-
- // Verify the calling history of the completion callback
- // The following verifies each node with output(s) invoked the callback
- // exactly once.
- ASSERT_GE(completed_nodes_w_outputs.size(), 4); // There may be added nodes.
-
- ASSERT_EQ(1, std::count(completed_nodes_w_outputs.begin(),
- completed_nodes_w_outputs.end(), a_));
- ASSERT_EQ(1, std::count(completed_nodes_w_outputs.begin(),
- completed_nodes_w_outputs.end(), x_));
- ASSERT_EQ(1, std::count(completed_nodes_w_outputs.begin(),
- completed_nodes_w_outputs.end(), y_));
- ASSERT_EQ(1, std::count(completed_nodes_w_outputs.begin(),
- completed_nodes_w_outputs.end(), y_neg_));
-
- // Apart from nodes with outputs, there are also no-output (control) nodes.
- // They ought to be captured by the DebugGateway through
- // NodeOutputCallback as well.
- ASSERT_GT(completed_nodes_wo_outputs.size(), 0);
-
- // The DebugGateway should have captured the _SOURCE node.
- ASSERT_LE(1, std::count(completed_nodes_wo_outputs.begin(),
- completed_nodes_wo_outputs.end(), "_SOURCE"));
-
- // Verify the calling history of the value callabck
- ASSERT_EQ(completed_nodes_w_outputs.size(), tensors_initialized.size());
-
- // In this graph, there is no uninitialized node value.
- ASSERT_EQ(
- tensors_initialized.end(),
- std::find(tensors_initialized.begin(), tensors_initialized.end(), false));
-
- ASSERT_EQ(completed_nodes_w_outputs.size(), tensor_vals.size());
- ASSERT_EQ(completed_nodes_w_outputs.size(), output_slots_val.size());
- ASSERT_EQ(completed_nodes_w_outputs.size(), is_refs_val.size());
-
- // Verify the intermediate tensor values captured through the value callback
- auto mat_a = tensor_vals[a_].matrix<float>();
- ASSERT_EQ(3.0, mat_a(0, 0));
- ASSERT_EQ(2.0, mat_a(0, 1));
- ASSERT_EQ(-1.0, mat_a(1, 0));
- ASSERT_EQ(0.0, mat_a(1, 1));
-
- auto mat_x = tensor_vals[x_].matrix<float>();
- ASSERT_EQ(1.0, mat_x(0, 0));
- ASSERT_EQ(1.0, mat_x(1, 0));
-
- auto mat_y = tensor_vals[y_].matrix<float>();
- ASSERT_EQ(5.0, mat_y(0, 0));
- ASSERT_EQ(-1.0, mat_y(1, 0));
-
- auto mat_y_neg = tensor_vals[y_neg_].matrix<float>();
- ASSERT_EQ(-5.0, mat_y_neg(0, 0));
- ASSERT_EQ(1.0, mat_y_neg(1, 0));
-
- // In this graph, all outputs are on the first slot
- ASSERT_EQ(output_slots_val.size(),
- std::count_if(output_slots_val.begin(), output_slots_val.end(),
- [](int slot) { return slot == 0; }));
-
- // In this graph, there is no ref-type tensor.
- ASSERT_EQ(is_refs_val.end(),
- std::find(is_refs_val.begin(), is_refs_val.end(), true));
-}
-
-TEST_F(SessionDebugMinusAXTest, RunSimpleNetworkWithTwoDebugNodesInserted) {
- // Tensor contains one count of NaN
- Initialize({3, std::numeric_limits<float>::quiet_NaN(), -1, 0});
- auto session = CreateSession();
- ASSERT_TRUE(session != nullptr);
-
- DebugGateway debug_gateway(session.get());
-
- // Create debug tensor watch options with two debug ops:
- // DebugIdentity and DebugNanCount
- RunOptions run_opts;
- run_opts.set_output_partition_graphs(true);
-
- const string debug_identity = "DebugIdentity";
- const string debug_nan_count = "DebugNanCount";
- DebugTensorWatch* tensor_watch_opts =
- run_opts.mutable_debug_options()->add_debug_tensor_watch_opts();
- tensor_watch_opts->set_node_name(y_);
- tensor_watch_opts->set_output_slot(0);
- tensor_watch_opts->add_debug_ops(debug_identity);
- tensor_watch_opts->add_debug_ops(debug_nan_count);
-
- // Expected name of the inserted debug node
- string debug_identity_node_name = DebugNodeInserter::GetDebugNodeName(
- strings::StrCat(y_, ":", 0), 0, debug_identity);
- string debug_nan_count_node_name = DebugNodeInserter::GetDebugNodeName(
- strings::StrCat(y_, ":", 0), 1, debug_nan_count);
-
- // Supply completion and value callbacks
- mutex mu;
- // Completed nodes with and without outputs
- std::vector<string> completed_debug_nodes;
-
- Notification callbacks_done;
- debug_gateway.SetNodeCompletionCallback(
- [&mu, &debug_identity_node_name, &debug_nan_count_node_name,
- &completed_debug_nodes](const string& node_name, const bool any_output) {
- mutex_lock l(mu);
- if (any_output && (node_name == debug_identity_node_name ||
- node_name == debug_nan_count_node_name)) {
- completed_debug_nodes.push_back(node_name);
- }
- });
-
- std::vector<Tensor> watched_tensor_vals;
- std::vector<Tensor> debug_identity_tensor_vals;
- std::vector<Tensor> debug_nan_count_tensor_vals;
-
- debug_gateway.SetNodeValueCallback(
- [this, &mu, &debug_identity_node_name, &debug_nan_count_node_name,
- &watched_tensor_vals, &debug_identity_tensor_vals,
- &debug_nan_count_tensor_vals,
- &callbacks_done](const string& node_name, const int output_slot,
- const Tensor& tensor_value, const bool is_ref) {
- mutex_lock l(mu);
- if (node_name == y_) {
- watched_tensor_vals.push_back(tensor_value);
- } else if (node_name == debug_identity_node_name && output_slot == 0) {
- // output_slot == 0 carries the debug signal. Same below.
- debug_identity_tensor_vals.push_back(tensor_value);
- } else if (node_name == debug_nan_count_node_name && output_slot == 0) {
- debug_nan_count_tensor_vals.push_back(tensor_value);
- }
-
- // Set the notification once we have the value from the target node.
- if (node_name == y_neg_ && !callbacks_done.HasBeenNotified()) {
- callbacks_done.Notify();
- }
- });
-
- TF_ASSERT_OK(session->Create(def_));
-
- std::vector<std::pair<string, Tensor>> inputs;
-
- // Request two targets: one fetch output and one non-fetched output.
- std::vector<string> output_names = {y_ + ":0"};
- std::vector<string> target_nodes = {y_neg_};
- std::vector<Tensor> outputs;
-
- RunMetadata run_metadata;
- Status s = session->Run(run_opts, inputs, output_names, target_nodes,
- &outputs, &run_metadata);
- TF_ASSERT_OK(s);
-
-// Verify the correct number of partition graphs (GraphDefs) outputted
-// through RunMetadata, given whether GPU is involved.
-#if GOOGLE_CUDA
- ASSERT_EQ(2, run_metadata.partition_graphs().size());
-#elif defined(TENSORFLOW_USE_SYCL)
- ASSERT_EQ(2, run_metadata.partition_graphs().size());
-#else
- ASSERT_EQ(1, run_metadata.partition_graphs().size());
-#endif
-
- // Wait for callbacks to complete.
- callbacks_done.WaitForNotification();
-
- // Verify that each of the two debug nodes has completed exactly once.
- ASSERT_EQ(2, completed_debug_nodes.size());
- ASSERT_EQ(
- 1, std::count(completed_debug_nodes.begin(), completed_debug_nodes.end(),
- debug_identity_node_name));
- ASSERT_EQ(
- 1, std::count(completed_debug_nodes.begin(), completed_debug_nodes.end(),
- debug_nan_count_node_name));
-
- // Verify that the tensor values from the watched node and the identity
- // debug node are received and they are equal (owing to the debug op being
- // "DebugIdentity")
- ASSERT_EQ(1, watched_tensor_vals.size());
- ASSERT_EQ(1, debug_identity_tensor_vals.size());
- auto mat_y = watched_tensor_vals[0].matrix<float>();
- auto mat_identity = debug_identity_tensor_vals[0].matrix<float>();
- // ASSERT_EQ doesn't work for nan == nan
- ASSERT_TRUE(std::isnan(mat_y(0, 0)));
- ASSERT_TRUE(std::isnan(mat_identity(0, 0)));
- ASSERT_EQ(-1, mat_identity(1, 0));
-
- // Verify that the output from the NaN-count debug node indicates exactly
- // one NaN.
- ASSERT_EQ(1, debug_nan_count_tensor_vals.size());
- ASSERT_EQ(1, debug_nan_count_tensor_vals[0].scalar<int64>()());
-}
-
-#if !defined(GOOGLE_CUDA) && !defined(TENSORFLOW_USE_SYCL)
-// TODO(cais): Reinstate the following test for concurrent debugged runs on
-// a GPU once the root cause of the ~0.5% flakiness has been addressed.
-// (b/34081273)
-TEST_F(SessionDebugMinusAXTest,
- RunSimpleNetworkConcurrentlyWithDifferentDebugTensorWatches) {
- // Test concurrent Run() calls on a graph with different debug watches.
-
- Initialize({3, 2, -1, 0});
- auto session = CreateSession();
- ASSERT_TRUE(session != nullptr);
- TF_ASSERT_OK(session->Create(def_));
-
- // Number of concurrent Run() calls to launch.
- const int kConcurrentRuns = 3;
- thread::ThreadPool* tp =
- new thread::ThreadPool(Env::Default(), "test", kConcurrentRuns);
-
- std::vector<string> output_names = {y_ + ":0"};
- std::vector<string> target_nodes = {y_neg_};
-
- mutex mu;
- DebugGateway debug_gateway(session.get());
- std::unordered_map<string, Tensor> debug_identity_tensor_vals;
-
- const string debug_identity = "DebugIdentity";
-
- const string a_debug_identity_node_name = DebugNodeInserter::GetDebugNodeName(
- strings::StrCat(a_, ":", 0), 0, debug_identity);
- const string x_debug_identity_node_name = DebugNodeInserter::GetDebugNodeName(
- strings::StrCat(x_, ":", 0), 0, debug_identity);
- const string y_debug_identity_node_name = DebugNodeInserter::GetDebugNodeName(
- strings::StrCat(y_, ":", 0), 0, debug_identity);
-
- Notification callbacks_done;
- volatile int val_callback_count = 0;
-
- debug_gateway.SetNodeValueCallback(
- [this, &mu, &val_callback_count, &a_debug_identity_node_name,
- &x_debug_identity_node_name, &y_debug_identity_node_name,
- &debug_identity_tensor_vals, &callbacks_done,
- &kConcurrentRuns](const string& node_name, const int output_slot,
- const Tensor& tensor_value, const bool is_ref) {
- mutex_lock l(mu);
-
- if (node_name == a_debug_identity_node_name && output_slot == 0) {
- debug_identity_tensor_vals["a"] = tensor_value;
- val_callback_count++;
- } else if (node_name == x_debug_identity_node_name &&
- output_slot == 0) {
- // output_slot == 0 carries the debug signal.
- debug_identity_tensor_vals["x"] = tensor_value;
- val_callback_count++;
- } else if (node_name == y_debug_identity_node_name &&
- output_slot == 0) {
- debug_identity_tensor_vals["y"] = tensor_value;
- val_callback_count++;
- }
-
- // Set the notification once we have the value from the callbacks from
- // all the concurrent Run() calls.
- if (val_callback_count == kConcurrentRuns &&
- !callbacks_done.HasBeenNotified()) {
- callbacks_done.Notify();
- }
- });
-
- int run_counter = 0;
- mutex run_lock;
-
- // Function to be executed concurrently.
- auto fn = [this, &run_lock, &run_counter, &session, output_names,
- target_nodes, &debug_identity]() {
- // Create unique debug tensor watch options for each of the concurrent
- // run calls.
- RunOptions run_opts;
- run_opts.set_output_partition_graphs(true);
-
- DebugTensorWatch* tensor_watch_opts =
- run_opts.mutable_debug_options()->add_debug_tensor_watch_opts();
- tensor_watch_opts->set_output_slot(0);
- tensor_watch_opts->add_debug_ops(debug_identity);
-
- {
- // Let the concurrent runs watch different tensors.
-
- mutex_lock l(run_lock);
-
- if (run_counter == 0) {
- // Let the 1st concurrent run watch a.
- tensor_watch_opts->set_node_name(a_);
- } else if (run_counter == 1) {
- // Let the 2nd concurrent watch x.
- tensor_watch_opts->set_node_name(x_);
- } else if (run_counter == 2) {
- // Let the 3rd concurrent watch y.
- tensor_watch_opts->set_node_name(y_);
- }
-
- run_counter++;
- }
-
- // Run the graph.
- RunMetadata run_metadata;
- std::vector<std::pair<string, Tensor>> inputs;
- std::vector<Tensor> outputs;
- Status s = session->Run(run_opts, inputs, output_names, target_nodes,
- &outputs, &run_metadata);
- TF_ASSERT_OK(s);
-
- ASSERT_EQ(1, run_metadata.partition_graphs().size());
-
- ASSERT_EQ(1, outputs.size());
- ASSERT_TRUE(outputs[0].IsInitialized());
- ASSERT_EQ(TensorShape({2, 1}), outputs[0].shape());
- auto mat = outputs[0].matrix<float>();
- EXPECT_FLOAT_EQ(5.0, mat(0, 0));
- EXPECT_FLOAT_EQ(-1.0, mat(1, 0));
- };
-
- for (int i = 0; i < kConcurrentRuns; ++i) {
- tp->Schedule(fn);
- }
-
- // Wait for the debug callbacks to finish.
- callbacks_done.WaitForNotification();
-
- // Wait for the concurrent functions with Run() calls to finish.
- delete tp;
-
- {
- mutex_lock l(mu);
-
- ASSERT_EQ(kConcurrentRuns, val_callback_count);
- ASSERT_EQ(kConcurrentRuns, debug_identity_tensor_vals.size());
-
- ASSERT_EQ(TensorShape({2, 2}), debug_identity_tensor_vals["a"].shape());
- auto a_mat_identity = debug_identity_tensor_vals["a"].matrix<float>();
- ASSERT_EQ(3.0, a_mat_identity(0, 0));
- ASSERT_EQ(2.0, a_mat_identity(0, 1));
- ASSERT_EQ(-1.0, a_mat_identity(1, 0));
- ASSERT_EQ(0.0, a_mat_identity(1, 1));
-
- ASSERT_EQ(TensorShape({2, 1}), debug_identity_tensor_vals["x"].shape());
- auto x_mat_identity = debug_identity_tensor_vals["x"].matrix<float>();
- ASSERT_EQ(1.0, x_mat_identity(0, 0));
- ASSERT_EQ(1.0, x_mat_identity(1, 0));
-
- ASSERT_EQ(TensorShape({2, 1}), debug_identity_tensor_vals["y"].shape());
- auto y_mat_identity = debug_identity_tensor_vals["y"].matrix<float>();
- ASSERT_EQ(5.0, y_mat_identity(0, 0));
- ASSERT_EQ(-1.0, y_mat_identity(1, 0));
- }
-}
-#endif
-
-class SessionDebugOutputSlotWithoutOutgoingEdgeTest : public ::testing::Test {
- public:
- void Initialize() {
- Graph graph(OpRegistry::Global());
-
-#if GOOGLE_CUDA
- const string kDeviceName = "/job:localhost/replica:0/task:0/device:GPU:0";
-#elif defined(TENSORFLOW_USE_SYCL)
- const string kDeviceName = "/job:localhost/replica:0/task:0/device:SYCL:0";
-#else
- const string kDeviceName = "/job:localhost/replica:0/task:0/device:CPU:0";
-#endif
-
- Tensor a_tensor(DT_FLOAT, TensorShape({1, 1}));
- test::FillValues<float>(&a_tensor, {42.0});
- Node* a = test::graph::Constant(&graph, a_tensor);
- a->set_assigned_device_name(kDeviceName);
-
- Node* c = test::graph::Constant(&graph, a_tensor);
- c->set_assigned_device_name(kDeviceName);
- c_ = c->name();
-
- // Node c will be executed only because of the control edge from c to y.
- // Its output slot (slot 0) does not have an outgoing edge. This test
- // is for testing that the debugger can watch that slot properly.
- Node* y = test::graph::NoOp(&graph, {c});
- y->set_assigned_device_name(kDeviceName);
- y_ = y->name();
-
- test::graph::ToGraphDef(&graph, &def_);
- }
-
- string c_;
- string y_;
- GraphDef def_;
-};
-
-TEST_F(SessionDebugOutputSlotWithoutOutgoingEdgeTest,
- WatchSlotWithoutOutgoingEdge) {
- Initialize();
- auto session = CreateSession();
- ASSERT_TRUE(session != nullptr);
-
- DebugGateway debug_gateway(session.get());
-
- // Supply completion and value callbacks
- mutex mu;
-
- string debug_identity_node_name = DebugNodeInserter::GetDebugNodeName(
- strings::StrCat(c_, ":", 0), 0, "DebugIdentity");
-
- Notification callbacks_done;
-
- std::vector<Tensor> debug_identity_tensor_vals;
- debug_gateway.SetNodeValueCallback(
- [this, &mu, &callbacks_done, &debug_identity_node_name,
- &debug_identity_tensor_vals](
- const string& node_name, const int output_slot,
- const Tensor& tensor_value, const bool is_ref) {
- mutex_lock l(mu);
-
- if (node_name == debug_identity_node_name && output_slot == 0) {
- debug_identity_tensor_vals.push_back(tensor_value);
-
- if (!callbacks_done.HasBeenNotified()) {
- callbacks_done.Notify();
- }
- }
- });
-
- // Add DebugIdentity watch on c:0, which does not have an outgoing edge.
- RunOptions run_opts;
- run_opts.set_output_partition_graphs(true);
-
- DebugTensorWatch* tensor_watch_opts =
- run_opts.mutable_debug_options()->add_debug_tensor_watch_opts();
- tensor_watch_opts->set_node_name(c_);
- tensor_watch_opts->set_output_slot(0);
- tensor_watch_opts->add_debug_ops("DebugIdentity");
-
- TF_ASSERT_OK(session->Create(def_));
-
- // Invoke Session::Run() on y.
- std::vector<std::pair<string, Tensor>> inputs;
- std::vector<string> output_names;
- std::vector<string> target_nodes = {y_};
- std::vector<Tensor> outputs;
-
- RunMetadata run_metadata;
- Status s = session->Run(run_opts, inputs, output_names, target_nodes,
- &outputs, &run_metadata);
- TF_ASSERT_OK(s);
-
- // Wait for callbacks to complete.
- callbacks_done.WaitForNotification();
-
- // Assert that DebugIdentity node watching the control edge has been run.
- ASSERT_EQ(1, debug_identity_tensor_vals.size());
- auto mat_identity = debug_identity_tensor_vals[0].matrix<float>();
- ASSERT_EQ(42.0, mat_identity(0, 0));
-}
-
-class SessionDebugVariableTest : public ::testing::Test {
- public:
- void Initialize() {
- Graph graph(OpRegistry::Global());
-
-#if GOOGLE_CUDA
- const string kDeviceName = "/job:localhost/replica:0/task:0/device:GPU:0";
-#elif defined(TENSORFLOW_USE_SYCL)
- const string kDeviceName = "/job:localhost/replica:0/task:0/device:SYCL:0";
-#else
- const string kDeviceName = "/job:localhost/replica:0/task:0/device:CPU:0";
-#endif
-
- // Define variable node.
- var_node_name_ = "var";
- Node* var =
- test::graph::Var(&graph, DT_FLOAT, TensorShape({3}), var_node_name_);
- var->set_assigned_device_name(kDeviceName);
-
- // Define the initial value and the initial-value node.
- Tensor nan_nan_seven(DT_FLOAT, TensorShape({3}));
- nan_nan_seven.flat<float>()(0) = std::numeric_limits<float>::quiet_NaN();
- nan_nan_seven.flat<float>()(1) = std::numeric_limits<float>::quiet_NaN();
- nan_nan_seven.flat<float>()(2) = 7.0;
-
- init_val_node_name_ = "init_val";
- Node* init_val =
- test::graph::Constant(&graph, nan_nan_seven, init_val_node_name_);
- init_val->set_assigned_device_name(kDeviceName);
-
- // Define node for variable value initialization
- Node* init = test::graph::Assign(&graph, var, init_val);
- init->set_assigned_device_name(kDeviceName);
- init_node_name_ = init->name();
-
- // Define new value node
- Tensor nan_eight_eight(DT_FLOAT, TensorShape({3}));
- nan_eight_eight.flat<float>()(0) = std::numeric_limits<float>::quiet_NaN();
- nan_eight_eight.flat<float>()(1) = 8.0;
- nan_eight_eight.flat<float>()(2) = 8.0;
-
- Node* new_val = test::graph::Constant(&graph, nan_eight_eight);
- new_val->set_assigned_device_name(kDeviceName);
- new_val_node_name_ = new_val->name();
-
- // Define node for assigning new value
- Node* assign = test::graph::Assign(&graph, var, new_val);
- assign->set_assigned_device_name(kDeviceName);
- assign_node_name_ = assign->name();
-
- test::graph::ToGraphDef(&graph, &def_);
- }
-
- string var_node_name_;
- string init_val_node_name_;
- string init_node_name_;
- string new_val_node_name_;
- string assign_node_name_;
- GraphDef def_;
-};
-
-TEST_F(SessionDebugVariableTest, WatchUninitializedVariableWithDebugOps) {
- Initialize();
- auto session = CreateSession();
- ASSERT_TRUE(session != nullptr);
-
- DebugGateway debug_gateway(session.get());
-
- TF_ASSERT_OK(session->Create(def_));
-
- // Set up DebugTensorWatch for an uninitialized tensor (in node var).
- RunOptions run_opts;
- const string debug_identity = "DebugIdentity";
- DebugTensorWatch* tensor_watch_opts =
- run_opts.mutable_debug_options()->add_debug_tensor_watch_opts();
- tensor_watch_opts->set_node_name(var_node_name_);
- tensor_watch_opts->set_output_slot(0);
- tensor_watch_opts->add_debug_ops(debug_identity);
-
- // Expected name of the inserted debug node
- string debug_identity_node_name = DebugNodeInserter::GetDebugNodeName(
- strings::StrCat(var_node_name_, ":", 0), 0, debug_identity);
-
- // Supply completion and value callbacks
- mutex mu;
- // Completed nodes with and without outputs
- std::vector<string> completed_debug_nodes;
-
- Notification callbacks_done;
- debug_gateway.SetNodeCompletionCallback(
- [this, &mu, &debug_identity_node_name, &completed_debug_nodes,
- &callbacks_done](const string& node_name, const bool any_output) {
- mutex_lock l(mu);
- if (any_output && (node_name == debug_identity_node_name)) {
- completed_debug_nodes.push_back(node_name);
- }
- });
-
- std::vector<Tensor> debug_identity_tensor_vals;
-
- debug_gateway.SetNodeValueCallback(
- [this, &mu, &debug_identity_node_name, &debug_identity_tensor_vals,
- &callbacks_done](const string& node_name, const int output_slot,
- const Tensor& tensor_value, const bool is_ref) {
- mutex_lock l(mu);
- if (node_name == debug_identity_node_name && output_slot == 0) {
- // output_slot == 0 carries the debug signal. Same below.
- debug_identity_tensor_vals.push_back(tensor_value);
- }
-
- // Set the notification once we have the value from the target node.
- if (node_name == init_node_name_ && !callbacks_done.HasBeenNotified()) {
- callbacks_done.Notify();
- }
- });
-
- // First run the initialization op
- std::vector<std::pair<string, Tensor>> inputs_init;
- std::vector<Tensor> outputs_init;
-
- RunMetadata run_metadata;
- Status s = session->Run(run_opts, inputs_init, {init_node_name_}, {},
- &outputs_init, &run_metadata);
- TF_ASSERT_OK(s);
-
- callbacks_done.WaitForNotification();
-
- ASSERT_EQ(1, completed_debug_nodes.size());
- ASSERT_EQ(
- 1, std::count(completed_debug_nodes.begin(), completed_debug_nodes.end(),
- debug_identity_node_name));
-
- // Assert the output reflects the uninitialized nature of var's tensor.
- ASSERT_EQ(1, debug_identity_tensor_vals.size());
- ASSERT_FALSE(debug_identity_tensor_vals[0].IsInitialized());
- ASSERT_EQ(DT_FLOAT, debug_identity_tensor_vals[0].dtype());
- ASSERT_EQ(TensorShape({3}), debug_identity_tensor_vals[0].shape());
-}
-
-TEST_F(SessionDebugVariableTest, VariableAssignWithDebugOps) {
- // Tensor contains one count of NaN
- Initialize();
- auto session = CreateSession();
- ASSERT_TRUE(session != nullptr);
-
- DebugGateway debug_gateway(session.get());
-
- TF_ASSERT_OK(session->Create(def_));
-
- // First run the initialization op
- std::vector<std::pair<string, Tensor>> inputs_init;
- std::vector<Tensor> outputs_init;
- Status s = session->Run(inputs_init, {init_node_name_}, {}, &outputs_init);
- TF_ASSERT_OK(s);
-
- // Create debug tensor watch options with two ref-type debug ops:
- // DebugIdentity and DebugNanCount
- RunOptions run_opts;
- run_opts.set_output_partition_graphs(true);
- const string debug_identity = "DebugIdentity";
- const string debug_nan_count = "DebugNanCount";
- DebugTensorWatch* tensor_watch_opts =
- run_opts.mutable_debug_options()->add_debug_tensor_watch_opts();
- tensor_watch_opts->set_node_name(var_node_name_);
- tensor_watch_opts->set_output_slot(0);
- tensor_watch_opts->add_debug_ops(debug_identity);
- tensor_watch_opts->add_debug_ops(debug_nan_count);
-
- char tempdir_template[] = "/tmp/tfdbg_XXXXXX";
- string temp_dir(mkdtemp(tempdir_template));
- tensor_watch_opts->add_debug_urls(strings::StrCat("file://", temp_dir));
-
- // Expected name of the inserted debug node
- string debug_identity_node_name = DebugNodeInserter::GetDebugNodeName(
- strings::StrCat(var_node_name_, ":", 0), 0, debug_identity);
- string debug_nan_count_node_name = DebugNodeInserter::GetDebugNodeName(
- strings::StrCat(var_node_name_, ":", 0), 1, debug_nan_count);
-
- // Supply completion and value callbacks
- mutex mu;
- // Completed nodes with and without outputs
- std::vector<string> completed_debug_nodes;
-
- Notification callbacks_done;
- debug_gateway.SetNodeCompletionCallback(
- [this, &mu, &debug_identity_node_name, &debug_nan_count_node_name,
- &completed_debug_nodes,
- &callbacks_done](const string& node_name, const bool any_output) {
- mutex_lock l(mu);
- if (any_output && (node_name == debug_identity_node_name ||
- node_name == debug_nan_count_node_name)) {
- completed_debug_nodes.push_back(node_name);
- }
- });
-
- std::vector<Tensor> debug_identity_tensor_vals;
- std::vector<Tensor> debug_nan_count_tensor_vals;
-
- debug_gateway.SetNodeValueCallback(
- [this, &mu, &debug_identity_node_name, &debug_nan_count_node_name,
- &debug_identity_tensor_vals, &debug_nan_count_tensor_vals,
- &callbacks_done](const string& node_name, const int output_slot,
- const Tensor& tensor_value, const bool is_ref) {
- mutex_lock l(mu);
- if (node_name == debug_identity_node_name && output_slot == 0) {
- // output_slot == 0 carries the debug signal. Same below.
- debug_identity_tensor_vals.push_back(tensor_value);
- } else if (node_name == debug_nan_count_node_name && output_slot == 0) {
- debug_nan_count_tensor_vals.push_back(tensor_value);
- }
-
- // Set the notification once we have the value from the target node.
- if (node_name == assign_node_name_ &&
- !callbacks_done.HasBeenNotified()) {
- callbacks_done.Notify();
- }
- });
-
- // // Request two targets: one fetch output and one non-fetched output.
- std::vector<std::pair<string, Tensor>> inputs;
- std::vector<string> output_names = {assign_node_name_ + ":0"};
- std::vector<string> target_nodes = {assign_node_name_};
- std::vector<Tensor> outputs;
-
- // Run with RunOptions that has tensor watches
- RunMetadata run_metadata;
- s = session->Run(run_opts, inputs, output_names, target_nodes, &outputs,
- &run_metadata);
- TF_ASSERT_OK(s);
-
-#if GOOGLE_CUDA
- ASSERT_EQ(2, run_metadata.partition_graphs().size());
-#elif defined(TENSORFLOW_USE_SYCL)
- ASSERT_EQ(2, run_metadata.partition_graphs().size());
-#else
- ASSERT_EQ(1, run_metadata.partition_graphs().size());
-#endif
-
- // Wait for callbacks to complete.
- callbacks_done.WaitForNotification();
-
- // Verify that the update has happened properly.
- ASSERT_EQ(1, outputs.size());
- ASSERT_TRUE(std::isnan(outputs[0].vec<float>()(0)));
- ASSERT_EQ(8.0, outputs[0].vec<float>()(1)); // Expect new value
- ASSERT_EQ(8.0, outputs[0].vec<float>()(2)); // Expect new value
-
- // Verify that each of the two debug nodes has completed exactly once.
- ASSERT_EQ(2, completed_debug_nodes.size());
- ASSERT_EQ(
- 1, std::count(completed_debug_nodes.begin(), completed_debug_nodes.end(),
- debug_identity_node_name));
- ASSERT_EQ(
- 1, std::count(completed_debug_nodes.begin(), completed_debug_nodes.end(),
- debug_nan_count_node_name));
-
- // Verify that the values from the ref identity node reflects the value
- // before the new assign.
- ASSERT_EQ(1, debug_identity_tensor_vals.size());
-
- auto vec_identity = debug_identity_tensor_vals[0].vec<float>();
- ASSERT_TRUE(std::isnan(vec_identity(0)));
- ASSERT_TRUE(std::isnan(vec_identity(1)));
- ASSERT_EQ(7.0, vec_identity(2));
-
- // Verify that the output from the NaN-count debug node indicates exactly
- // two NaNs, i.e., reflecting the value before the new assign.
- ASSERT_EQ(1, debug_nan_count_tensor_vals.size());
- ASSERT_EQ(2, debug_nan_count_tensor_vals[0].scalar<int64>()());
-}
-
-#if defined(GOOGLE_CUDA) || defined(TENSORFLOW_USE_SYCL)
-class SessionDebugGPUSwitchTest : public ::testing::Test {
- public:
- void Initialize() {
- Graph graph(OpRegistry::Global());
-
-#ifdef GOOGLE_CUDA
- const string kDeviceName = "/job:localhost/replica:0/task:0/device:GPU:0";
-#elif TENSORFLOW_USE_SYCL
- const string kDeviceName = "/job:localhost/replica:0/task:0/device:SYCL:0";
-#endif
-
- Tensor vb(DT_BOOL, TensorShape({}));
- vb.scalar<bool>()() = true;
- Tensor vi(DT_INT64, TensorShape({}));
- vi.scalar<int>()() = 42;
- // So vi is expected to be forwarded to the second output port of sw.
-
- Node* pred = test::graph::Constant(&graph, vb);
- pred->set_assigned_device_name(kDeviceName);
- pred_node_name_ = pred->name();
-
- Node* value = test::graph::Constant(&graph, vi);
- pred->set_assigned_device_name(kDeviceName);
- value_node_name_ = value->name();
-
- Node* sw = test::graph::Switch(&graph, value, pred);
- sw->set_assigned_device_name(kDeviceName);
- sw_node_name_ = sw->name();
-
- Node* z = test::graph::Identity(&graph, sw, 1);
- sw->set_assigned_device_name(kDeviceName);
- z_node_name_ = z->name();
-
- test::graph::ToGraphDef(&graph, &def_);
- }
-
- string pred_node_name_;
- string value_node_name_;
- string sw_node_name_;
- string z_node_name_;
- GraphDef def_;
-};
-
-// Test for debug-watching tensors marked as HOST_MEMORY on GPU.
-TEST_F(SessionDebugGPUSwitchTest, RunSwitchWithHostMemoryDebugOp) {
- Initialize();
- auto session = CreateSession();
- ASSERT_TRUE(session != nullptr);
-
- DebugGateway debug_gateway(session.get());
-
- RunOptions run_opts;
- run_opts.set_output_partition_graphs(true);
- // This is the name of the boolean tensor fed as pred to the Switch node.
- // On GPU, this edge is HOST_MEMORY.
- const string watched_tensor = strings::StrCat(pred_node_name_, "/_1");
-
- const string debug_identity = "DebugIdentity";
- DebugTensorWatch* tensor_watch_opts =
- run_opts.mutable_debug_options()->add_debug_tensor_watch_opts();
- tensor_watch_opts->set_node_name(watched_tensor);
- tensor_watch_opts->set_output_slot(0);
- tensor_watch_opts->add_debug_ops(debug_identity);
-
- // Expected name of the inserted debug node
- string debug_identity_node_name = DebugNodeInserter::GetDebugNodeName(
- strings::StrCat(watched_tensor, ":", 0), 0, debug_identity);
-
- // Supply completion and value callbacks
- mutex mu;
- // Completed nodes with and without outputs
- std::vector<string> completed_nodes_w_outputs;
- std::vector<string> completed_nodes_wo_outputs;
-
- Notification callbacks_done;
- debug_gateway.SetNodeCompletionCallback(
- [&mu, &completed_nodes_w_outputs, &completed_nodes_wo_outputs](
- const string& node_name, const bool any_output) {
- mutex_lock l(mu);
- if (any_output) {
- completed_nodes_w_outputs.push_back(node_name);
- } else {
- completed_nodes_wo_outputs.push_back(node_name);
- }
- });
-
- std::vector<Tensor> debug_identity_tensor_vals;
-
- debug_gateway.SetNodeValueCallback(
- [this, &mu, &debug_identity_node_name, &debug_identity_tensor_vals,
- &callbacks_done](const string& node_name, const int output_slot,
- const Tensor& tensor_value, const bool is_ref) {
- mutex_lock l(mu);
- if (node_name == debug_identity_node_name && output_slot == 0) {
- debug_identity_tensor_vals.push_back(tensor_value);
- }
-
- // Set the notification once we have the value from the target node.
- if (node_name == z_node_name_ && !callbacks_done.HasBeenNotified()) {
- callbacks_done.Notify();
- }
- });
-
- TF_ASSERT_OK(session->Create(def_));
-
- std::vector<std::pair<string, Tensor>> inputs;
-
- // Request two targets: one fetch output and one non-fetched output.
- std::vector<string> output_names = {z_node_name_ + ":0"};
- std::vector<string> target_nodes = {z_node_name_};
- std::vector<Tensor> outputs;
-
- RunMetadata run_metadata;
- Status s = session->Run(run_opts, inputs, output_names, target_nodes,
- &outputs, &run_metadata);
- TF_ASSERT_OK(s);
-
- ASSERT_EQ(2, run_metadata.partition_graphs().size());
-
- // Wait for callbacks to complete.
- callbacks_done.WaitForNotification();
-
- ASSERT_EQ(1, debug_identity_tensor_vals.size());
- ASSERT_TRUE(debug_identity_tensor_vals[0].scalar<bool>()());
-}
-#endif // GOOGLE_CUDA
-
-} // end namespace
-} // end namespace tensorflow
diff --git a/tensorflow/core/distributed_runtime/BUILD b/tensorflow/core/distributed_runtime/BUILD
index 0abef01a9a..2059b1ce0d 100644
--- a/tensorflow/core/distributed_runtime/BUILD
+++ b/tensorflow/core/distributed_runtime/BUILD
@@ -494,9 +494,11 @@ tf_cc_test(
"//tensorflow/core:core_cpu_internal",
"//tensorflow/core:framework",
"//tensorflow/core:lib",
+ "//tensorflow/core:protos_all_cc",
"//tensorflow/core:session_options",
"//tensorflow/core:test",
"//tensorflow/core:test_main",
+ "//tensorflow/core:worker_proto_cc",
],
)
diff --git a/tensorflow/core/distributed_runtime/base_rendezvous_mgr.cc b/tensorflow/core/distributed_runtime/base_rendezvous_mgr.cc
index 5f6931e008..de6e4b4a7c 100644
--- a/tensorflow/core/distributed_runtime/base_rendezvous_mgr.cc
+++ b/tensorflow/core/distributed_runtime/base_rendezvous_mgr.cc
@@ -281,7 +281,7 @@ void BaseRemoteRendezvous::SameWorkerRecvDone(
CopyTensor::ViaDMA(parsed.edge_name, send_args.device_context,
recv_args.device_context, src_device, dst_device,
send_args.alloc_attrs, recv_args.alloc_attrs, &in, out,
- std::move(done));
+ 0 /*dev_to_dev_stream_index*/, std::move(done));
}
bool BaseRemoteRendezvous::IsSameWorker(DeviceNameUtils::ParsedName src,
diff --git a/tensorflow/core/distributed_runtime/collective_param_resolver_distributed.cc b/tensorflow/core/distributed_runtime/collective_param_resolver_distributed.cc
index 612ac14e22..1dd10d309b 100644
--- a/tensorflow/core/distributed_runtime/collective_param_resolver_distributed.cc
+++ b/tensorflow/core/distributed_runtime/collective_param_resolver_distributed.cc
@@ -150,21 +150,23 @@ void CollectiveParamResolverDistributed::CompleteInstanceAsync(
for (int32 offset : request->subdiv_offset()) {
cp->instance.impl_details.subdiv_offsets.push_back(offset);
}
- VLOG(1) << "New cp " << cp << " for device " << request->device() << " : "
+ string* device = new string(request->device());
+ VLOG(1) << "New cp " << cp << " for device " << *device << " : "
<< cp->ToString();
- StatusCallback done_and_cleanup = [this, cp, done](const Status& s) {
+ StatusCallback done_and_cleanup = [this, cp, device, done](const Status& s) {
done(s);
delete cp;
+ delete device;
};
// Start by completing the group.
CompleteGroupDistributed(
- request->device(), cp, cancel_mgr,
- [this, cp, request, response, cancel_mgr, done_and_cleanup](
+ *device, cp, cancel_mgr,
+ [this, cp, device, response, cancel_mgr, done_and_cleanup](
const Status& cg_status, const GroupRec* gr) {
if (cg_status.ok()) {
// Then complete the instance.
CompleteInstanceDistributed(
- request->device(), gr, cp, cancel_mgr,
+ *device, gr, cp, cancel_mgr,
[this, gr, cp, response,
done_and_cleanup](const Status& ci_status) {
if (ci_status.ok()) {
@@ -176,6 +178,7 @@ void CollectiveParamResolverDistributed::CompleteInstanceAsync(
const Status& fi_status, InstanceRec* ir) {
if (fi_status.ok()) {
mutex_lock l(ir->out_mu);
+ ir->WaitForOutMu(l);
response->set_instance_key(cp->instance.instance_key);
response->set_source_rank(ir->source_rank);
done_and_cleanup(fi_status);
@@ -277,18 +280,21 @@ bool CollectiveParamResolverDistributed::InstanceIsCached(int32 instance_key) {
void CollectiveParamResolverDistributed::UpdateInstanceCache(
const GroupRec* gr, CollectiveParams* cp,
const CompleteInstanceResponse& resp, const StatusCallback& done) {
- Notification note;
- InstanceRec* ir = nullptr;
+ using InstanceRecPointer = InstanceRec*;
+ InstanceRecPointer* irp = new InstanceRecPointer(nullptr);
int32 source_rank = resp.source_rank();
- auto continue_with_ir = [this, cp, &ir, source_rank, done](const Status& s) {
+ auto continue_with_ir = [this, cp, irp, source_rank, done](const Status& s) {
if (!s.ok()) {
done(s);
+ delete irp;
return;
}
Status status;
+ InstanceRec* ir = *irp;
do {
mutex_lock l(ir->out_mu);
+ ir->WaitForOutMu(l);
if (ir->source_rank != source_rank) {
if (ir->source_rank >= 0) {
ir->status = errors::Internal(
@@ -318,11 +324,12 @@ void CollectiveParamResolverDistributed::UpdateInstanceCache(
} while (false);
// Callback outside of lock.
done(status);
+ delete irp;
};
FindInstanceRec(
- gr, cp, [this, &ir, continue_with_ir](const Status s, InstanceRec* irec) {
- ir = irec;
+ gr, cp, [this, irp, continue_with_ir](const Status s, InstanceRec* irec) {
+ *irp = irec;
continue_with_ir(s);
});
}
diff --git a/tensorflow/core/distributed_runtime/collective_rma_distributed.cc b/tensorflow/core/distributed_runtime/collective_rma_distributed.cc
index d4c47cab49..b9a3502131 100644
--- a/tensorflow/core/distributed_runtime/collective_rma_distributed.cc
+++ b/tensorflow/core/distributed_runtime/collective_rma_distributed.cc
@@ -65,11 +65,13 @@ void CollectiveRemoteAccessDistributed::RecvFromPeer(
const string& peer_device, const string& peer_task, bool peer_is_local,
const string& key, Device* to_device, DeviceContext* to_device_ctx,
const AllocatorAttributes& to_alloc_attr, Tensor* to_tensor,
- const DeviceLocality& client_locality, const StatusCallback& done) {
+ const DeviceLocality& client_locality, int dev_to_dev_stream_index,
+ const StatusCallback& done) {
if (peer_is_local) {
CollectiveRemoteAccessLocal::RecvFromPeer(
peer_device, peer_task, peer_is_local, key, to_device, to_device_ctx,
- to_alloc_attr, to_tensor, client_locality, done);
+ to_alloc_attr, to_tensor, client_locality, dev_to_dev_stream_index,
+ done);
return;
}
@@ -83,7 +85,8 @@ void CollectiveRemoteAccessDistributed::RecvFromPeer(
// Logic to be executed on the RecvBufAsync callback.
auto recv_buf_callback = [this, state, peer_task, to_device, to_alloc_attr,
- to_device_ctx, to_tensor, done](const Status& s) {
+ to_device_ctx, to_tensor, dev_to_dev_stream_index,
+ done](const Status& s) {
if (s.ok()) {
// In this generic implementation the bytes come back in the
// RPC response protobuf rather than via RDMA so we need to copy
@@ -119,7 +122,7 @@ void CollectiveRemoteAccessDistributed::RecvFromPeer(
CopyTensor::ViaDMA("", // edge name (non-existent)
nullptr /*send_dev_ctx*/, to_device_ctx, cpu_dev,
to_device, cpu_attr, to_alloc_attr, cpu_tensor,
- to_tensor,
+ to_tensor, dev_to_dev_stream_index,
[this, cpu_tensor, done](const Status& s) {
delete cpu_tensor;
// This callback must not block, so execute
diff --git a/tensorflow/core/distributed_runtime/collective_rma_distributed.h b/tensorflow/core/distributed_runtime/collective_rma_distributed.h
index cfa9110f47..9434cacbca 100644
--- a/tensorflow/core/distributed_runtime/collective_rma_distributed.h
+++ b/tensorflow/core/distributed_runtime/collective_rma_distributed.h
@@ -37,6 +37,7 @@ class CollectiveRemoteAccessDistributed : public CollectiveRemoteAccessLocal {
DeviceContext* to_device_ctx,
const AllocatorAttributes& to_alloc_attr, Tensor* to_tensor,
const DeviceLocality& client_locality,
+ int dev_to_dev_stream_index,
const StatusCallback& done) override;
void StartAbort(const Status& s) override;
diff --git a/tensorflow/core/distributed_runtime/collective_rma_distributed_test.cc b/tensorflow/core/distributed_runtime/collective_rma_distributed_test.cc
index a552f81f58..bfd312410c 100644
--- a/tensorflow/core/distributed_runtime/collective_rma_distributed_test.cc
+++ b/tensorflow/core/distributed_runtime/collective_rma_distributed_test.cc
@@ -280,7 +280,7 @@ TEST_F(CollRMADistTest, ProdFirstOK) {
"/job:worker/replica:0/task:1", // peer_task
false, // peer_is_local
kBufKey, dst_device, to_device_ctx, alloc_attr_, &to_tensor_,
- device_locality_,
+ device_locality_, 0 /*dev_to_dev_stream_index*/,
[this, &consumer_status, &consumer_note](const Status& s) {
consumer_status = s;
consumer_note.Notify();
@@ -309,7 +309,7 @@ TEST_F(CollRMADistTest, ConsFirstOK) {
"/job:worker/replica:0/task:1", // peer_task
false, // peer_is_local
kBufKey, dst_device, to_device_ctx, alloc_attr_, &to_tensor_,
- device_locality_,
+ device_locality_, 0 /*dev_to_dev_stream_index*/,
[this, &consumer_status, &consumer_note](const Status& s) {
consumer_status = s;
consumer_note.Notify();
@@ -342,7 +342,7 @@ TEST_F(CollRMADistTest, ConsFirstAbort) {
"/job:worker/replica:0/task:1", // peer_task
false, // peer_is_local
kBufKey, dst_device, to_device_ctx, alloc_attr_, &to_tensor_,
- device_locality_,
+ device_locality_, 0 /*dev_to_dev_stream_index*/,
[this, &consumer_status, &consumer_note](const Status& s) {
consumer_status = s;
consumer_note.Notify();
diff --git a/tensorflow/core/distributed_runtime/eager/BUILD b/tensorflow/core/distributed_runtime/eager/BUILD
index 5bcf295acd..055e5dfced 100644
--- a/tensorflow/core/distributed_runtime/eager/BUILD
+++ b/tensorflow/core/distributed_runtime/eager/BUILD
@@ -37,6 +37,7 @@ cc_library(
"//tensorflow/core:eager_service_proto_cc",
"//tensorflow/core:lib",
"//tensorflow/core/common_runtime/eager:eager_executor",
+ "//tensorflow/core/common_runtime/eager:tensor_handle",
],
)
diff --git a/tensorflow/core/distributed_runtime/eager/eager_service_impl.cc b/tensorflow/core/distributed_runtime/eager/eager_service_impl.cc
index 2fa234c810..466e779fab 100644
--- a/tensorflow/core/distributed_runtime/eager/eager_service_impl.cc
+++ b/tensorflow/core/distributed_runtime/eager/eager_service_impl.cc
@@ -63,10 +63,10 @@ Status GetNumRetvals(tensorflow::EagerContext* context, const string& op_name,
}
*num_retvals += iter->second.i();
} else if (!output_arg.type_list_attr().empty()) {
- auto iter = attrs.find(output_arg.number_attr());
+ auto iter = attrs.find(output_arg.type_list_attr());
if (iter == attrs.end()) {
- return errors::InvalidArgument("Unable to find number_attr ",
- output_arg.number_attr(),
+ return errors::InvalidArgument("Unable to find type_list_attr ",
+ output_arg.type_list_attr(),
" for Op: ", op_name);
}
*num_retvals += iter->second.list().type_size();
@@ -81,6 +81,10 @@ Status GetNumRetvals(tensorflow::EagerContext* context, const string& op_name,
Status EagerServiceImpl::CreateContext(const CreateContextRequest* request,
CreateContextResponse* response) {
+ //make sure env_ , env_->rendezvous_mgr available
+ if (env_ == nullptr || env_->rendezvous_mgr == nullptr) {
+ return tensorflow::errors::Internal("invalid eager env_ or env_->rendezvous_mgr.");
+ }
std::vector<tensorflow::Device*> devices;
TF_RETURN_IF_ERROR(tensorflow::DeviceFactory::AddDevices(
@@ -128,8 +132,20 @@ Status EagerServiceImpl::CreateContext(const CreateContextRequest* request,
return Status::OK();
}
+Status TensorHandleShape(TensorHandle* handle, TensorShapeProto* proto) {
+ const tensorflow::Tensor* t = nullptr;
+
+ // TODO(nareshmodi): This call makes async calls sync calls. Fix this.
+ TF_RETURN_IF_ERROR(handle->Tensor(&t));
+
+ t->shape().AsProto(proto);
+
+ return Status::OK();
+}
+
Status EagerServiceImpl::ExecuteOp(const Operation& operation,
- ServerContext* server_context) {
+ ServerContext* server_context,
+ QueueResponse* queue_response) {
std::unique_ptr<tensorflow::EagerOperation> op;
const char* name = operation.name().c_str(); // Shorthand
const tensorflow::AttrTypeMap* types;
@@ -172,6 +188,10 @@ Status EagerServiceImpl::ExecuteOp(const Operation& operation,
server_context->AddOperationOutputs(retvals, operation.id());
+ for (auto* handle : retvals) {
+ TF_RETURN_IF_ERROR(TensorHandleShape(handle, queue_response->add_shape()));
+ }
+
return Status::OK();
}
@@ -182,8 +202,9 @@ Status EagerServiceImpl::Enqueue(const EnqueueRequest* request,
core::ScopedUnref context_unref(context);
for (const auto& item : request->queue()) {
+ auto* queue_response = response->add_queue_response();
if (item.has_operation()) {
- TF_RETURN_IF_ERROR(ExecuteOp(item.operation(), context));
+ TF_RETURN_IF_ERROR(ExecuteOp(item.operation(), context, queue_response));
} else {
TF_RETURN_IF_ERROR(context->DeleteTensorHandle(
RemoteTensorHandleInternal(item.handle_to_decref())));
diff --git a/tensorflow/core/distributed_runtime/eager/eager_service_impl.h b/tensorflow/core/distributed_runtime/eager/eager_service_impl.h
index ebd5269a57..b0e4aa84b9 100644
--- a/tensorflow/core/distributed_runtime/eager/eager_service_impl.h
+++ b/tensorflow/core/distributed_runtime/eager/eager_service_impl.h
@@ -135,7 +135,8 @@ class EagerServiceImpl {
tensorflow::Status GetServerContext(uint64, ServerContext**);
private:
- Status ExecuteOp(const Operation& operation, ServerContext* server_context);
+ Status ExecuteOp(const Operation& operation, ServerContext* server_context,
+ QueueResponse* queue_response);
const WorkerEnv* const env_; // Not owned.
mutex contexts_mu_;
diff --git a/tensorflow/core/distributed_runtime/eager/eager_service_impl_test.cc b/tensorflow/core/distributed_runtime/eager/eager_service_impl_test.cc
index 91b58698a4..b98386ba86 100644
--- a/tensorflow/core/distributed_runtime/eager/eager_service_impl_test.cc
+++ b/tensorflow/core/distributed_runtime/eager/eager_service_impl_test.cc
@@ -198,6 +198,11 @@ TEST_F(EagerServiceImplTest, BasicTest) {
TF_ASSERT_OK(eager_service_impl.Enqueue(&remote_enqueue_request,
&remote_enqueue_response));
+ auto& matmul_result_shape =
+ remote_enqueue_response.queue_response(1).shape(0);
+ EXPECT_EQ(matmul_result_shape.dim(0).size(), 2);
+ EXPECT_EQ(matmul_result_shape.dim(1).size(), 2);
+
tensorflow::TensorHandle* tensor_handle;
TF_ASSERT_OK(eager_service_impl.GetTensorHandle(
response.context_id(), RemoteTensorHandleInternal(2, 0), &tensor_handle));
diff --git a/tensorflow/core/distributed_runtime/eager/remote_execute_node.h b/tensorflow/core/distributed_runtime/eager/remote_execute_node.h
index c4bd67aaed..28b68c3b88 100644
--- a/tensorflow/core/distributed_runtime/eager/remote_execute_node.h
+++ b/tensorflow/core/distributed_runtime/eager/remote_execute_node.h
@@ -17,6 +17,7 @@ limitations under the License.
#define TENSORFLOW_CORE_DISTRIBUTED_RUNTIME_EAGER_REMOTE_EXECUTE_NODE_H_
#include "tensorflow/core/common_runtime/eager/eager_executor.h"
+#include "tensorflow/core/common_runtime/eager/tensor_handle.h"
#include "tensorflow/core/distributed_runtime/eager/eager_client.h"
#include "tensorflow/core/protobuf/eager_service.pb.h"
@@ -27,6 +28,22 @@ namespace eager {
// via RPC in a remote EagerService.
class RemoteExecuteNode : public tensorflow::EagerNode {
public:
+ RemoteExecuteNode(
+ tensorflow::uint64 id, const tensorflow::eager::EnqueueRequest& request,
+ tensorflow::eager::EagerClient* eager_client,
+ const gtl::InlinedVector<TensorHandle*, 4>& inputs,
+ std::function<void(const Status& status, const EnqueueResponse& response)>
+ done_callback)
+ : tensorflow::EagerNode(id),
+ request_(std::move(request)),
+ eager_client_(eager_client),
+ inputs_(inputs),
+ done_callback_(std::move(done_callback)) {
+ for (auto* handle : inputs_) {
+ handle->Ref();
+ }
+ }
+
RemoteExecuteNode(tensorflow::uint64 id,
const tensorflow::eager::EnqueueRequest& request,
tensorflow::eager::EagerClient* eager_client)
@@ -34,6 +51,12 @@ class RemoteExecuteNode : public tensorflow::EagerNode {
request_(std::move(request)),
eager_client_(eager_client) {}
+ ~RemoteExecuteNode() {
+ for (auto* handle : inputs_) {
+ handle->Unref();
+ }
+ }
+
tensorflow::Status Run() override {
tensorflow::eager::EnqueueResponse response;
tensorflow::Status status;
@@ -45,6 +68,10 @@ class RemoteExecuteNode : public tensorflow::EagerNode {
});
n.WaitForNotification();
+ if (done_callback_) {
+ done_callback_(status, response);
+ }
+
return status;
}
@@ -52,6 +79,13 @@ class RemoteExecuteNode : public tensorflow::EagerNode {
EnqueueRequest request_;
tensorflow::eager::EagerClient*
eager_client_; // Not owned, and must outlive the RemoteExecuteNode.
+
+ // This is required to ensure that the tensor handles stay alive across the
+ // execution.
+ gtl::InlinedVector<TensorHandle*, 4> inputs_;
+
+ std::function<void(const Status& status, const EnqueueResponse& response)>
+ done_callback_;
};
} // namespace eager
diff --git a/tensorflow/core/distributed_runtime/graph_mgr.cc b/tensorflow/core/distributed_runtime/graph_mgr.cc
index e2f13df19f..6c146036ae 100644
--- a/tensorflow/core/distributed_runtime/graph_mgr.cc
+++ b/tensorflow/core/distributed_runtime/graph_mgr.cc
@@ -261,7 +261,7 @@ Status GraphMgr::InitItem(const string& session, const GraphDef& gdef,
optimizer.Optimize(lib, worker_env_->env, params.device, &subgraph,
/*shape_map=*/nullptr);
- // EXPERIMENTAL: tfdbg inserts debug nodes (i.e., probes) to the graph.
+ // TensorFlow Debugger (tfdbg) inserts debug nodes in the graph.
if (!debug_options.debug_tensor_watch_opts().empty()) {
TF_RETURN_IF_ERROR(DecorateAndPublishGraphForDebug(
debug_options, subgraph.get(), params.device));
diff --git a/tensorflow/core/distributed_runtime/rpc/grpc_channel.cc b/tensorflow/core/distributed_runtime/rpc/grpc_channel.cc
index 0ebc084cb6..b7eb3c9015 100644
--- a/tensorflow/core/distributed_runtime/rpc/grpc_channel.cc
+++ b/tensorflow/core/distributed_runtime/rpc/grpc_channel.cc
@@ -42,12 +42,12 @@ string MakeAddress(const string& job, int task) {
return strings::StrCat("/job:", job, "/replica:0/task:", task);
}
+// Allows the host to be a raw IP (either v4 or v6).
Status ValidateHostPortPair(const string& host_port) {
uint32 port;
- std::vector<string> parts = str_util::Split(host_port, ':');
- // Must be host:port, port must be a number, host must not contain a '/'.
- if (parts.size() != 2 || !strings::safe_strtou32(parts[1], &port) ||
- parts[0].find("/") != string::npos) {
+ auto colon_index = host_port.find_last_of(':');
+ if (!strings::safe_strtou32(host_port.substr(colon_index + 1), &port) ||
+ host_port.substr(0, colon_index).find("/") != string::npos) {
return errors::InvalidArgument("Could not interpret \"", host_port,
"\" as a host-port pair.");
}
diff --git a/tensorflow/core/distributed_runtime/rpc/grpc_channel_test.cc b/tensorflow/core/distributed_runtime/rpc/grpc_channel_test.cc
index a17acc85b3..f07a5a0974 100644
--- a/tensorflow/core/distributed_runtime/rpc/grpc_channel_test.cc
+++ b/tensorflow/core/distributed_runtime/rpc/grpc_channel_test.cc
@@ -150,10 +150,15 @@ TEST(GrpcChannelTest, NewHostPortGrpcChannelValidation) {
EXPECT_TRUE(NewHostPortGrpcChannel("127.0.0.1:2222", &mock_ptr).ok());
EXPECT_TRUE(NewHostPortGrpcChannel("example.com:2222", &mock_ptr).ok());
EXPECT_TRUE(NewHostPortGrpcChannel("fqdn.example.com.:2222", &mock_ptr).ok());
+ EXPECT_TRUE(NewHostPortGrpcChannel("[2002:a9c:258e::]:2222", &mock_ptr).ok());
+ EXPECT_TRUE(NewHostPortGrpcChannel("[::]:2222", &mock_ptr).ok());
EXPECT_FALSE(NewHostPortGrpcChannel("example.com/abc:2222", &mock_ptr).ok());
EXPECT_FALSE(NewHostPortGrpcChannel("127.0.0.1:2222/", &mock_ptr).ok());
EXPECT_FALSE(NewHostPortGrpcChannel("example.com/abc:", &mock_ptr).ok());
+ EXPECT_FALSE(NewHostPortGrpcChannel("[::]/:2222", &mock_ptr).ok());
+ EXPECT_FALSE(NewHostPortGrpcChannel("[::]:2222/", &mock_ptr).ok());
+ EXPECT_FALSE(NewHostPortGrpcChannel("[::]:", &mock_ptr).ok());
}
} // namespace tensorflow
diff --git a/tensorflow/core/distributed_runtime/rpc/grpc_server_lib.cc b/tensorflow/core/distributed_runtime/rpc/grpc_server_lib.cc
index ff64d78b79..db14f6473e 100644
--- a/tensorflow/core/distributed_runtime/rpc/grpc_server_lib.cc
+++ b/tensorflow/core/distributed_runtime/rpc/grpc_server_lib.cc
@@ -152,16 +152,14 @@ Status GrpcServer::Init(
" was not defined in job \"",
server_def_.job_name(), "\"");
}
- const std::vector<string> hostname_port =
- str_util::Split(iter->second, ':');
- if (hostname_port.size() != 2 ||
- !strings::safe_strto32(hostname_port[1], &requested_port)) {
+ auto colon_index = iter->second.find_last_of(':');
+ if (!strings::safe_strto32(iter->second.substr(colon_index + 1),
+ &requested_port)) {
return errors::InvalidArgument(
"Could not parse port for local server from \"", iter->second,
- "\"");
- } else {
- break;
+ "\".");
}
+ break;
}
}
if (requested_port == -1) {
@@ -289,12 +287,10 @@ Status GrpcServer::Init(
nullptr);
}
-
Status GrpcServer::Init(
ServiceInitFunction service_func,
const RendezvousMgrCreationFunction& rendezvous_mgr_func) {
- return Init(std::move(service_func), rendezvous_mgr_func, nullptr,
- nullptr);
+ return Init(std::move(service_func), rendezvous_mgr_func, nullptr, nullptr);
}
Status GrpcServer::Init() { return Init(nullptr, nullptr, nullptr, nullptr); }
@@ -345,11 +341,13 @@ Status GrpcServer::WorkerCacheFactory(const WorkerCacheFactoryOptions& options,
const string host_port = channel_cache_->TranslateTask(name_prefix);
int requested_port;
- if (!strings::safe_strto32(str_util::Split(host_port, ':')[1],
+ auto colon_index = host_port.find_last_of(':');
+ if (!strings::safe_strto32(host_port.substr(colon_index + 1),
&requested_port)) {
return errors::Internal("Could not parse port for local server from \"",
- channel_cache_->TranslateTask(name_prefix), "\".");
+ host_port, "\".");
}
+
if (requested_port != bound_port_) {
return errors::InvalidArgument("Requested port ", requested_port,
" differs from expected port ", bound_port_);
diff --git a/tensorflow/core/distributed_runtime/rpc/grpc_server_lib.h b/tensorflow/core/distributed_runtime/rpc/grpc_server_lib.h
index 115148b84e..3366246afb 100644
--- a/tensorflow/core/distributed_runtime/rpc/grpc_server_lib.h
+++ b/tensorflow/core/distributed_runtime/rpc/grpc_server_lib.h
@@ -96,7 +96,7 @@ class GrpcServer : public ServerInterface {
Status Init(ServiceInitFunction service_func,
const RendezvousMgrCreationFunction& rendezvous_mgr_func,
const CollectiveMgrCreationFunction& collective_mgr_func);
-
+
Status Init(ServiceInitFunction service_func,
const RendezvousMgrCreationFunction& rendezvous_mgr_func);
diff --git a/tensorflow/core/distributed_runtime/rpc/grpc_session_test.cc b/tensorflow/core/distributed_runtime/rpc/grpc_session_test.cc
index 45b15a54a2..fc601991a2 100644
--- a/tensorflow/core/distributed_runtime/rpc/grpc_session_test.cc
+++ b/tensorflow/core/distributed_runtime/rpc/grpc_session_test.cc
@@ -163,6 +163,39 @@ TEST(GrpcSessionTest, BasicCallable) {
}
}
+TEST(GrpcSessionTest, CallableWithOnDeviceFeedsAndFetches) {
+ // Specifying feeds/fetch devices for remote sessions is not yet defined.
+ // Ensure that the error is graceful.
+ GraphDef graph;
+ string node_names[3];
+ // c = a * b
+ CreateGraphDef(&graph, node_names);
+
+ std::unique_ptr<test::TestCluster> cluster;
+ TF_CHECK_OK(test::TestCluster::MakeTestCluster(Devices(1, 0), 2, &cluster));
+
+ std::unique_ptr<Session> session(
+ NewRemote(Options(cluster->targets()[0], 1)));
+ ASSERT_TRUE(session != nullptr);
+
+ TF_CHECK_OK(session->Create(graph));
+
+ std::vector<DeviceAttributes> devices;
+ TF_CHECK_OK(session->ListDevices(&devices));
+ ASSERT_GT(devices.size(), 0);
+ const string device_name = devices.back().name();
+
+ CallableOptions opts;
+ const string fetch = node_names[2] + ":0";
+ opts.add_fetch(fetch);
+ opts.mutable_fetch_devices()->insert({fetch, device_name});
+
+ Session::CallableHandle handle;
+ Status status = session->MakeCallable(opts, &handle);
+ EXPECT_EQ(error::UNIMPLEMENTED, status.code());
+ TF_CHECK_OK(session->Close());
+}
+
TEST(GrpcSessionTest, BasicNonProtoAPIConsistentOrder) {
GraphDef graph;
string node_names[3];
diff --git a/tensorflow/core/distributed_runtime/rpc_collective_executor_mgr.cc b/tensorflow/core/distributed_runtime/rpc_collective_executor_mgr.cc
index 5eeed6e382..45b989f6e2 100644
--- a/tensorflow/core/distributed_runtime/rpc_collective_executor_mgr.cc
+++ b/tensorflow/core/distributed_runtime/rpc_collective_executor_mgr.cc
@@ -99,6 +99,32 @@ void RpcCollectiveExecutorMgr::RefreshStepIdSequenceAsync(
}
}
+void RpcCollectiveExecutorMgr::GetStepSequenceAsync(
+ const GetStepSequenceRequest* request, GetStepSequenceResponse* response,
+ const StatusCallback& done) {
+ if (!group_leader_.empty()) {
+ LOG(ERROR) << "GetStepSequence called at non-group-leader";
+ done(errors::Internal("GetStepSequenceAsync called at non-group-leader"));
+ } else {
+ mutex_lock l(sequence_mu_);
+ for (int64 graph_key : request->graph_key()) {
+ auto it = sequence_table_.find(graph_key);
+ GraphKeySequence* gks = nullptr;
+ if (it == sequence_table_.end()) {
+ gks = new GraphKeySequence(graph_key);
+ gks->next_step_id_ = NewRandomStepId();
+ sequence_table_[graph_key] = gks;
+ } else {
+ gks = it->second;
+ }
+ StepSequence* ss = response->add_step_sequence();
+ ss->set_graph_key(graph_key);
+ ss->set_next_step_id(gks->next_step_id_);
+ }
+ done(Status::OK());
+ }
+}
+
Status RpcCollectiveExecutorMgr::UpdateStepSequences(
const GetStepSequenceResponse& resp) {
mutex_lock l(sequence_mu_);
diff --git a/tensorflow/core/distributed_runtime/rpc_collective_executor_mgr.h b/tensorflow/core/distributed_runtime/rpc_collective_executor_mgr.h
index e9f3f0ebe8..c9581fa00f 100644
--- a/tensorflow/core/distributed_runtime/rpc_collective_executor_mgr.h
+++ b/tensorflow/core/distributed_runtime/rpc_collective_executor_mgr.h
@@ -42,6 +42,12 @@ class RpcCollectiveExecutorMgr : public CollectiveExecutorMgr {
virtual ~RpcCollectiveExecutorMgr();
+ // This function should only be called at the group_leader, by an RPC.
+ // Other needs for StepIds should be satisfied by NextStepId.
+ void GetStepSequenceAsync(const GetStepSequenceRequest* request,
+ GetStepSequenceResponse* response,
+ const StatusCallback& done) override;
+
void RefreshStepIdSequenceAsync(int64 graph_key,
const StatusCallback& done) override;
diff --git a/tensorflow/core/distributed_runtime/rpc_collective_executor_mgr_test.cc b/tensorflow/core/distributed_runtime/rpc_collective_executor_mgr_test.cc
index 37b83d82be..0323300fdd 100644
--- a/tensorflow/core/distributed_runtime/rpc_collective_executor_mgr_test.cc
+++ b/tensorflow/core/distributed_runtime/rpc_collective_executor_mgr_test.cc
@@ -26,6 +26,7 @@ limitations under the License.
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
+#include "tensorflow/core/protobuf/worker.pb.h"
#include "tensorflow/core/public/session_options.h"
namespace tensorflow {
@@ -121,4 +122,50 @@ TEST_F(RpcCollectiveExecutorMgrTest, NextStepId) {
EXPECT_GT(llabs(y - z), 3);
}
+TEST_F(RpcCollectiveExecutorMgrTest, GetStepSequence) {
+ int64 x = cme_->NextStepId(3);
+ EXPECT_EQ(x, CollectiveExecutor::kInvalidId);
+ int64 y = cme_->NextStepId(4);
+ EXPECT_EQ(y, CollectiveExecutor::kInvalidId);
+ GetStepSequenceRequest request;
+ GetStepSequenceResponse response;
+ request.add_graph_key(3);
+ request.add_graph_key(4);
+ {
+ Notification note;
+ Status status;
+ cme_->GetStepSequenceAsync(&request, &response,
+ [this, &status, &note](const Status& s) {
+ status = s;
+ note.Notify();
+ });
+ note.WaitForNotification();
+ EXPECT_TRUE(status.ok());
+ }
+ ASSERT_EQ(2, response.step_sequence_size());
+ std::unordered_map<int64, int64> values;
+ for (const auto& ss : response.step_sequence()) {
+ values[ss.graph_key()] = ss.next_step_id();
+ }
+ EXPECT_NE(values[3], CollectiveExecutor::kInvalidId);
+ EXPECT_NE(values[4], CollectiveExecutor::kInvalidId);
+ // Re-get, should be same values.
+ response.Clear();
+ {
+ Notification note;
+ Status status;
+ cme_->GetStepSequenceAsync(&request, &response,
+ [this, &status, &note](const Status& s) {
+ status = s;
+ note.Notify();
+ });
+ note.WaitForNotification();
+ EXPECT_TRUE(status.ok());
+ }
+ ASSERT_EQ(2, response.step_sequence_size());
+ for (const auto& ss : response.step_sequence()) {
+ EXPECT_EQ(values[ss.graph_key()], ss.next_step_id());
+ }
+}
+
} // namespace tensorflow
diff --git a/tensorflow/core/framework/api_def.proto b/tensorflow/core/framework/api_def.proto
index 3f8dd272e7..f8553cf5bb 100644
--- a/tensorflow/core/framework/api_def.proto
+++ b/tensorflow/core/framework/api_def.proto
@@ -30,6 +30,10 @@ import "tensorflow/core/framework/attr_value.proto";
message ApiDef {
// Name of the op (in the OpDef) to specify the API for.
string graph_op_name = 1;
+ // If this op is deprecated, set deprecation message to the message
+ // that should be logged when this op is used.
+ // The message should indicate alternative op to use, if any.
+ string deprecation_message = 12;
enum Visibility {
// Normally this is "VISIBLE" unless you are inheriting a
@@ -56,10 +60,10 @@ message ApiDef {
// use a snake_case convention instead of CamelCase.
string name = 1;
- // If this endpoint is deprecated, set deprecation_message to a
- // message that should be logged when the endpoint is used.
- // The message should indicate alternative endpoint to use, if any.
- string deprecation_message = 2;
+ // Set if this endpoint is deprecated. If set to true, a message suggesting
+ // to use a non-deprecated endpoint instead will be printed. If all
+ // endpoints are deprecated, set deprecation_message in ApiDef instead.
+ bool deprecated = 3;
}
repeated Endpoint endpoint = 3;
diff --git a/tensorflow/core/framework/collective.h b/tensorflow/core/framework/collective.h
index f8d27d3868..c3e6388e28 100644
--- a/tensorflow/core/framework/collective.h
+++ b/tensorflow/core/framework/collective.h
@@ -225,6 +225,7 @@ class PeerAccessInterface {
const AllocatorAttributes& to_alloc_attr,
Tensor* to_tensor,
const DeviceLocality& client_locality,
+ int dev_to_dev_stream_index,
const StatusCallback& done) = 0;
virtual void PostToPeer(const string& peer_device, const string& peer_task,
diff --git a/tensorflow/core/framework/common_shape_fns.cc b/tensorflow/core/framework/common_shape_fns.cc
index ed3318d841..21c6940b62 100644
--- a/tensorflow/core/framework/common_shape_fns.cc
+++ b/tensorflow/core/framework/common_shape_fns.cc
@@ -1231,11 +1231,13 @@ Status ConcatV2Shape(InferenceContext* c) {
c->num_inputs() - 1 /* dim_index */);
}
-Status BroadcastBinaryOpOutputShapeFn(InferenceContext* c, int output_index) {
- ShapeHandle shape_x = c->input(0);
- ShapeHandle shape_y = c->input(1);
+Status BroadcastBinaryOpOutputShapeFnHelper(InferenceContext* c,
+ ShapeHandle shape_x,
+ ShapeHandle shape_y,
+ ShapeHandle* out) {
+ CHECK_NOTNULL(out);
if (!c->RankKnown(shape_x) || !c->RankKnown(shape_y)) {
- c->set_output(0, c->UnknownShape());
+ *out = c->UnknownShape();
return Status::OK();
}
const int32 rank_x = c->Rank(shape_x);
@@ -1293,7 +1295,7 @@ Status BroadcastBinaryOpOutputShapeFn(InferenceContext* c, int output_index) {
}
}
- c->set_output(output_index, c->MakeShape(dims));
+ *out = c->MakeShape(dims);
return Status::OK();
}
diff --git a/tensorflow/core/framework/common_shape_fns.h b/tensorflow/core/framework/common_shape_fns.h
index 87bb133d92..2bedce1d6a 100644
--- a/tensorflow/core/framework/common_shape_fns.h
+++ b/tensorflow/core/framework/common_shape_fns.h
@@ -267,7 +267,22 @@ Status ConcatV2Shape(shape_inference::InferenceContext* c);
// Shape function for binary operators that broadcast their inputs
// and with output to output_index.
-Status BroadcastBinaryOpOutputShapeFn(InferenceContext* c, int output_index);
+// Note: out cannot be NULL.
+Status BroadcastBinaryOpOutputShapeFnHelper(InferenceContext* c,
+ ShapeHandle shape_x,
+ ShapeHandle shape_y,
+ ShapeHandle* out);
+
+// Shape function for binary operators that broadcast their inputs
+// and with output to output_index.
+inline Status BroadcastBinaryOpOutputShapeFn(InferenceContext* c,
+ int output_index) {
+ ShapeHandle out;
+ TF_RETURN_IF_ERROR(
+ BroadcastBinaryOpOutputShapeFnHelper(c, c->input(0), c->input(1), &out));
+ c->set_output(output_index, out);
+ return Status::OK();
+}
// Shape function for binary operators that broadcast their inputs.
// Tested by ops/math_ops_test.cc.
diff --git a/tensorflow/core/framework/graph_to_functiondef.cc b/tensorflow/core/framework/graph_to_functiondef.cc
index 4ffa503379..b2bc414c49 100644
--- a/tensorflow/core/framework/graph_to_functiondef.cc
+++ b/tensorflow/core/framework/graph_to_functiondef.cc
@@ -153,7 +153,7 @@ Status GraphToFunctionDef(const Graph& graph, const string& name,
const string normalized = node_names.Normalize(node->name());
argdef->set_name(normalized);
Edge const* edge;
- TF_CHECK_OK(node->input_edge(0, &edge));
+ TF_RETURN_IF_ERROR(node->input_edge(0, &edge));
return_values[normalized] =
strings::StrCat(edge->src()->name(), ":", edge->src_output());
continue;
diff --git a/tensorflow/core/framework/kernel_def_util.cc b/tensorflow/core/framework/kernel_def_util.cc
new file mode 100644
index 0000000000..bbd3dd3e57
--- /dev/null
+++ b/tensorflow/core/framework/kernel_def_util.cc
@@ -0,0 +1,83 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#include "tensorflow/core/framework/kernel_def_util.h"
+
+#include "tensorflow/core/framework/attr_value.pb.h"
+#include "tensorflow/core/framework/attr_value_util.h"
+#include "tensorflow/core/framework/kernel_def.pb_text.h"
+#include "tensorflow/core/framework/node_def_util.h"
+#include "tensorflow/core/framework/types.h"
+
+namespace tensorflow {
+
+namespace {
+// Helper for KernelAttrsMatch().
+bool InTypeList(DataType dt, const AttrValue& type_list) {
+ for (int in_list : type_list.list().type()) {
+ if (dt == in_list) return true;
+ }
+ return false;
+}
+} // namespace
+
+Status KernelAttrsMatch(const KernelDef& kernel_def, AttrSlice attrs,
+ bool* match) {
+ *match = false;
+ for (const auto& constraint : kernel_def.constraint()) {
+ if (constraint.allowed_values().list().type_size() == 0) {
+ return errors::Unimplemented(
+ "KernelDef '", ProtoShortDebugString(kernel_def),
+ " has constraint on attr '", constraint.name(),
+ "' with unsupported type: ",
+ SummarizeAttrValue(constraint.allowed_values()));
+ }
+
+ const AttrValue* found = attrs.Find(constraint.name());
+ if (found) {
+ if (found->type() != DT_INVALID) {
+ if (!InTypeList(found->type(), constraint.allowed_values())) {
+ return Status::OK();
+ }
+ } else {
+ if (!AttrValueHasType(*found, "list(type)").ok()) {
+ return errors::InvalidArgument(
+ "KernelDef '", ProtoShortDebugString(kernel_def),
+ "' has constraint on attr '", constraint.name(),
+ "' that has value '", SummarizeAttrValue(*found),
+ "' that does not have type 'type' or 'list(type)' in NodeDef "
+ "'",
+ attrs.SummarizeNode(), "'");
+ }
+
+ for (int t : found->list().type()) {
+ if (!InTypeList(static_cast<DataType>(t),
+ constraint.allowed_values())) {
+ return Status::OK();
+ }
+ }
+ }
+ } else {
+ return errors::InvalidArgument(
+ "OpKernel '", kernel_def.op(), "' has constraint on attr '",
+ constraint.name(), "' not in NodeDef '", attrs.SummarizeNode(),
+ "', KernelDef: '", ProtoShortDebugString(kernel_def), "'");
+ }
+ }
+ *match = true;
+ return Status::OK();
+}
+
+} // namespace tensorflow
diff --git a/tensorflow/core/framework/kernel_def_util.h b/tensorflow/core/framework/kernel_def_util.h
new file mode 100644
index 0000000000..b973cefc4f
--- /dev/null
+++ b/tensorflow/core/framework/kernel_def_util.h
@@ -0,0 +1,31 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#ifndef TENSORFLOW_CORE_FRAMEWORK_KERNEL_DEF_UTIL_H_
+#define TENSORFLOW_CORE_FRAMEWORK_KERNEL_DEF_UTIL_H_
+
+#include "tensorflow/core/framework/kernel_def.pb.h"
+#include "tensorflow/core/framework/node_def_util.h"
+
+namespace tensorflow {
+
+// Returns whether the attrs satisfy the constraints in the kernel_def. Returns
+// an error if attrs in kernel_def are not found, or have a mismatching type.
+Status KernelAttrsMatch(const KernelDef& kernel_def, AttrSlice attrs,
+ bool* match);
+
+} // namespace tensorflow
+
+#endif // TENSORFLOW_CORE_FRAMEWORK_KERNEL_DEF_UTIL_H_
diff --git a/tensorflow/core/framework/kernel_def_util_test.cc b/tensorflow/core/framework/kernel_def_util_test.cc
new file mode 100644
index 0000000000..a2e4aa82fa
--- /dev/null
+++ b/tensorflow/core/framework/kernel_def_util_test.cc
@@ -0,0 +1,133 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#include "tensorflow/core/framework/kernel_def_util.h"
+
+#include "tensorflow/core/framework/kernel_def.pb.h"
+#include "tensorflow/core/framework/node_def.pb.h"
+#include "tensorflow/core/platform/logging.h"
+#include "tensorflow/core/platform/test.h"
+
+namespace tensorflow {
+
+namespace {
+
+NodeDef NodeDefFromText(const string& text) {
+ NodeDef node_def;
+ EXPECT_TRUE(protobuf::TextFormat::MergeFromString(text, &node_def));
+ return node_def;
+}
+
+KernelDef KernelDefFromText(const string& text) {
+ KernelDef kernel_def;
+ EXPECT_TRUE(protobuf::TextFormat::MergeFromString(text, &kernel_def));
+ return kernel_def;
+}
+
+class AttrsMatchTest : public ::testing::Test {
+ protected:
+ void ExpectStatus(const string& node_def_str, const string& kernel_def_str,
+ error::Code code) {
+ bool match;
+ auto status = KernelAttrsMatch(KernelDefFromText(kernel_def_str),
+ NodeDefFromText(node_def_str), &match);
+ LOG(INFO) << "status: " << status;
+ EXPECT_EQ(code, status.code());
+ if (!status.ok()) {
+ EXPECT_FALSE(match)
+ << "Expect no match between the given NodeDef and KernelDef";
+ }
+ }
+};
+
+TEST_F(AttrsMatchTest, ValidConstraint) {
+ string node_def_str = R"(
+ name: "ValidConstraint-op"
+ op: "ValidConstraint"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ )";
+ string kernel_def_str = R"(
+ op: "ValidConstraint"
+ device_type: "CPU"
+ constraint {
+ name: "T"
+ allowed_values {
+ list {
+ type: DT_FLOAT
+ }
+ }
+ }
+ )";
+ ExpectStatus(node_def_str, kernel_def_str, error::OK);
+}
+
+TEST_F(AttrsMatchTest, BadConstraint) {
+ string node_def_str = R"(
+ name: "BadConstraint-op"
+ op: "BadConstraint"
+ attr {
+ key: "dtype"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ )";
+ string kernel_def_str = R"(
+ op: "BadConstraint"
+ device_type: "CPU"
+ constraint {
+ name: "T"
+ allowed_values {
+ list {
+ type: DT_FLOAT
+ }
+ }
+ }
+ )";
+ ExpectStatus(node_def_str, kernel_def_str, error::INVALID_ARGUMENT);
+}
+
+TEST_F(AttrsMatchTest, Unimplemented) {
+ string node_def_str = R"(
+ name: "BadConstraint-op"
+ op: "BadConstraint"
+ attr {
+ key: "dtype"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ )";
+ string kernel_def_str = R"(
+ op: "BadConstraint"
+ device_type: "CPU"
+ constraint {
+ name: "T"
+ allowed_values {
+ list {
+ }
+ }
+ }
+ )";
+ ExpectStatus(node_def_str, kernel_def_str, error::UNIMPLEMENTED);
+}
+
+} // namespace
+} // namespace tensorflow
diff --git a/tensorflow/core/framework/memory_types.cc b/tensorflow/core/framework/memory_types.cc
index 270118bb67..6dff6fe654 100644
--- a/tensorflow/core/framework/memory_types.cc
+++ b/tensorflow/core/framework/memory_types.cc
@@ -60,13 +60,18 @@ void MemoryTypesHelper(const NameRangeMap& name_map,
host_memory_args->resize(keep);
}
+bool IsFunctionCallOp(const string& op_type) {
+ return op_type == "SymbolicGradient" || op_type == "PartitionedCall" ||
+ op_type == "StatefulPartitionedCall";
+}
+
+} // namespace
+
MemoryType MTypeFromDType(const DataType dtype) {
return (dtype == DT_INT32 || DataTypeAlwaysOnHost(dtype)) ? HOST_MEMORY
: DEVICE_MEMORY;
}
-} // namespace
-
Status MemoryTypesForNode(const OpRegistryInterface* op_registry,
const DeviceType& device_type, const NodeDef& ndef,
MemoryTypeVector* inp_mtypes,
@@ -94,7 +99,7 @@ Status MemoryTypesForNode(const OpRegistryInterface* op_registry,
// TODO(zhifengc,phawkins): We should do type inference over function bodies
// to derive the correct input/output memory types. We should also split
// host-memory and non host-memory arguments into separate type lists.
- if (!status.ok() || ndef.op() == "SymbolicGradient") {
+ if (!status.ok() || IsFunctionCallOp(ndef.op())) {
for (const auto& t : inp_dtypes) inp_mtypes->push_back(MTypeFromDType(t));
for (const auto& t : out_dtypes) out_mtypes->push_back(MTypeFromDType(t));
return Status::OK();
diff --git a/tensorflow/core/framework/op_kernel.cc b/tensorflow/core/framework/op_kernel.cc
index c2561b5019..58feec90f0 100644
--- a/tensorflow/core/framework/op_kernel.cc
+++ b/tensorflow/core/framework/op_kernel.cc
@@ -23,6 +23,7 @@ limitations under the License.
#include "tensorflow/core/framework/device_attributes.pb.h"
#include "tensorflow/core/framework/graph.pb_text.h"
#include "tensorflow/core/framework/kernel_def.pb_text.h"
+#include "tensorflow/core/framework/kernel_def_util.h"
#include "tensorflow/core/framework/log_memory.h"
#include "tensorflow/core/framework/memory_types.h"
#include "tensorflow/core/framework/node_def.pb.h"
@@ -262,11 +263,13 @@ OpKernelContext::OpKernelContext(Params* params, int num_outputs)
outputs_(num_outputs),
temp_memory_allocated_(0),
persistent_memory_allocated_(0) {
- Allocator* eigen_gpu_allocator = get_allocator(AllocatorAttributes());
params_->ensure_eigen_gpu_device();
- params_->device->ReinitializeGpuDevice(this, params_->eigen_gpu_device,
- params_->op_device_context,
- eigen_gpu_allocator);
+ if (params_->eigen_gpu_device != nullptr) {
+ Allocator* eigen_gpu_allocator = get_allocator(AllocatorAttributes());
+ params_->device->ReinitializeGpuDevice(this, params_->eigen_gpu_device,
+ params_->op_device_context,
+ eigen_gpu_allocator);
+ }
if (params_->record_tensor_accesses) {
referenced_tensors_.Init();
}
@@ -969,62 +972,6 @@ void OpKernelRegistrar::InitInternal(const KernelDef* kernel_def,
namespace {
-// Helper for AttrsMatch().
-bool InTypeList(DataType dt, const AttrValue& type_list) {
- for (int in_list : type_list.list().type()) {
- if (dt == in_list) return true;
- }
- return false;
-}
-
-// Returns whether the attrs satisfy the constraints in the kernel_def. Returns
-// an error if attrs in kernel_def are not found, or have a mismatching type.
-Status AttrsMatch(AttrSlice attrs, const KernelDef& kernel_def, bool* match) {
- *match = false;
- for (const auto& constraint : kernel_def.constraint()) {
- if (constraint.allowed_values().list().type_size() == 0) {
- return errors::Unimplemented(
- "KernelDef '", ProtoShortDebugString(kernel_def),
- " has constraint on attr '", constraint.name(),
- "' with unsupported type: ",
- SummarizeAttrValue(constraint.allowed_values()));
- }
-
- const AttrValue* found = attrs.Find(constraint.name());
- if (found) {
- if (found->type() != DT_INVALID) {
- if (!InTypeList(found->type(), constraint.allowed_values())) {
- return Status::OK();
- }
- } else {
- if (!AttrValueHasType(*found, "list(type)").ok()) {
- return errors::InvalidArgument(
- "KernelDef '", ProtoShortDebugString(kernel_def),
- "' has constraint on attr '", constraint.name(),
- "' that has value '", SummarizeAttrValue(*found),
- "' that does not have type 'type' or 'list(type)' in NodeDef "
- "'",
- attrs.SummarizeNode(), "'");
- }
-
- for (int t : found->list().type()) {
- if (!InTypeList(static_cast<DataType>(t),
- constraint.allowed_values())) {
- return Status::OK();
- }
- }
- }
- } else {
- return errors::InvalidArgument(
- "OpKernel '", kernel_def.op(), "' has constraint on attr '",
- constraint.name(), "' not in NodeDef '", attrs.SummarizeNode(),
- "', KernelDef: '", ProtoShortDebugString(kernel_def), "'");
- }
- }
- *match = true;
- return Status::OK();
-}
-
static const StringPiece kKernelAttr("_kernel");
// TODO(irving): Replace with const Node& version below.
@@ -1043,7 +990,7 @@ Status FindKernelRegistration(const DeviceType& device_type,
// If there is a kernel registered for the op and device_type,
// check that the attrs match.
bool match;
- TF_RETURN_IF_ERROR(AttrsMatch(node_def, iter->second.def, &match));
+ TF_RETURN_IF_ERROR(KernelAttrsMatch(iter->second.def, node_def, &match));
if (match) {
if (*reg != nullptr) {
return errors::InvalidArgument(
diff --git a/tensorflow/core/framework/op_kernel.h b/tensorflow/core/framework/op_kernel.h
index 6c4c3a2ac1..d9fe42fcbb 100644
--- a/tensorflow/core/framework/op_kernel.h
+++ b/tensorflow/core/framework/op_kernel.h
@@ -1044,7 +1044,6 @@ class OpKernelContext {
// For control flow.
FrameAndIter frame_iter() const { return params_->frame_iter; }
bool is_input_dead() const { return params_->is_input_dead; }
- bool* is_output_dead() { return &is_output_dead_; }
// May be used, e.g., to get GPU handles, etc.
// TODO(tucker): Add example usage.
@@ -1143,8 +1142,6 @@ class OpKernelContext {
// Constructed only if <params->record_tensor_accesses>.
ManualConstructor<UniqueTensorReferences> referenced_tensors_ GUARDED_BY(mu_);
- bool is_output_dead_ = false;
-
// The following data members are only used when allocation tracking is
// enabled.
mutable mutex stats_mu_;
diff --git a/tensorflow/core/framework/resource_op_kernel.h b/tensorflow/core/framework/resource_op_kernel.h
index 813ec6eed5..0a8da8b3bf 100644
--- a/tensorflow/core/framework/resource_op_kernel.h
+++ b/tensorflow/core/framework/resource_op_kernel.h
@@ -43,9 +43,15 @@ template <typename T>
class ResourceOpKernel : public OpKernel {
public:
explicit ResourceOpKernel(OpKernelConstruction* context) : OpKernel(context) {
- OP_REQUIRES_OK(context,
- context->allocate_persistent(DT_STRING, TensorShape({2}),
- &handle_, nullptr));
+ has_resource_type_ = (context->output_type(0) == DT_RESOURCE);
+ if (!has_resource_type_) {
+ // The resource variant of the op may be placed on non-CPU devices, but
+ // this allocation is always on the host. Fortunately we don't need it in
+ // the resource case.
+ OP_REQUIRES_OK(context,
+ context->allocate_persistent(DT_STRING, TensorShape({2}),
+ &handle_, nullptr));
+ }
}
// The resource is deleted from the resource manager only when it is private
@@ -89,12 +95,14 @@ class ResourceOpKernel : public OpKernel {
return;
}
- auto h = handle_.AccessTensor(context)->template flat<string>();
- h(0) = cinfo_.container();
- h(1) = cinfo_.name();
+ if (!has_resource_type_) {
+ auto h = handle_.AccessTensor(context)->template flat<string>();
+ h(0) = cinfo_.container();
+ h(1) = cinfo_.name();
+ }
resource_ = resource;
}
- if (context->expected_output_dtype(0) == DT_RESOURCE) {
+ if (has_resource_type_) {
OP_REQUIRES_OK(context, MakeResourceHandleToOutput(
context, 0, cinfo_.container(), cinfo_.name(),
MakeTypeIndex<T>()));
@@ -122,6 +130,9 @@ class ResourceOpKernel : public OpKernel {
virtual Status VerifyResource(T* resource) { return Status::OK(); }
PersistentTensor handle_ GUARDED_BY(mu_);
+
+ // Is the output of the operator of type DT_RESOURCE?
+ bool has_resource_type_;
};
} // namespace tensorflow
diff --git a/tensorflow/core/framework/stats_aggregator.h b/tensorflow/core/framework/stats_aggregator.h
index 8002d9291c..4a18efc940 100644
--- a/tensorflow/core/framework/stats_aggregator.h
+++ b/tensorflow/core/framework/stats_aggregator.h
@@ -57,6 +57,10 @@ class StatsAggregator {
// interface. It is possible that not all implementations will support
// encoding their state as a protocol buffer.
virtual void EncodeToProto(Summary* out_summary) = 0;
+
+ // Increment the `label` cell of metrics mapped with `name` by given `value`.
+ virtual void IncrementCounter(const string& name, const string& label,
+ int64 val) = 0;
};
// A `StatsAggregatorResource` wraps a shareable `StatsAggregator` as a resource
diff --git a/tensorflow/core/framework/types.h b/tensorflow/core/framework/types.h
index ded6aa0991..ff7c9855d6 100644
--- a/tensorflow/core/framework/types.h
+++ b/tensorflow/core/framework/types.h
@@ -470,6 +470,10 @@ inline bool DataTypeIsUnsigned(DataType dt) {
// Returns a 0 on failure
int DataTypeSize(DataType dt);
+// Returns HOST_MEMORY if `dtype` is always on host or is a DT_INT32,
+// DEVICE_MEMORY otherwise.
+MemoryType MTypeFromDType(const DataType dtype);
+
// Types that always sit on host: DT_STRING, DT_STRING_REF, DT_RESOURCE.
// For DT_RESOURCE, the handle always sits on host (even if the underlying
// object has device-allocated resources).
diff --git a/tensorflow/core/graph/tensor_id.h b/tensorflow/core/graph/tensor_id.h
index bf13fc78a6..0ba3942618 100644
--- a/tensorflow/core/graph/tensor_id.h
+++ b/tensorflow/core/graph/tensor_id.h
@@ -62,12 +62,10 @@ TensorId ParseTensorName(StringPiece name);
struct SafeTensorId : public std::pair<string, int> {
typedef std::pair<string, int> Base;
- // Inherit the set of constructors.
- using Base::pair;
-
// NOTE(skyewm): this is required on some platforms. I'm not sure why the
- // using statement above isn't always sufficient.
+ // using "using Base::pair;" isn't always sufficient.
SafeTensorId() : Base() {}
+ SafeTensorId(const string& str, int idx) : Base(str, idx) {}
SafeTensorId(const TensorId& id);
string ToString() const {
diff --git a/tensorflow/core/grappler/costs/BUILD b/tensorflow/core/grappler/costs/BUILD
index b054068299..f3dc2c2091 100644
--- a/tensorflow/core/grappler/costs/BUILD
+++ b/tensorflow/core/grappler/costs/BUILD
@@ -41,6 +41,7 @@ cc_library(
visibility = ["//visibility:public"],
deps = [
":utils",
+ "//tensorflow/core/grappler/utils:functions",
"//tensorflow/core/grappler/utils:topological_sort",
"//tensorflow/core/grappler:graph_view",
"//tensorflow/core/grappler:op_types",
diff --git a/tensorflow/core/grappler/costs/graph_properties.cc b/tensorflow/core/grappler/costs/graph_properties.cc
index 0c02876ac5..83a8326e79 100644
--- a/tensorflow/core/grappler/costs/graph_properties.cc
+++ b/tensorflow/core/grappler/costs/graph_properties.cc
@@ -28,6 +28,7 @@ limitations under the License.
#include "tensorflow/core/grappler/graph_view.h"
#include "tensorflow/core/grappler/op_types.h"
#include "tensorflow/core/grappler/utils.h"
+#include "tensorflow/core/grappler/utils/functions.h"
#include "tensorflow/core/grappler/utils/topological_sort.h"
#include "tensorflow/core/lib/strings/str_util.h"
@@ -422,11 +423,108 @@ class SymbolicShapeRefiner {
return it->second.inference_context.get();
}
- // Forward the shapes from the function's fanin to the function body,
- // then call PropagateShapes.
- // Returns an error if 'node' is not a function node.
- Status UpdateFunction(const NodeDef* node, bool* refined) {
- return UpdateNode(node, refined);
+ // Forward the shapes from the function input nodes to
+ // the argument nodes (which are Placeholder nodes), then
+ // perform shape inference on the function body.
+ //
+ // Propagate shape information of final function body node
+ // to function node `node`.
+ //
+ // In the event of an error, UpdateNode will simply set `node`'s
+ // output shape to be Unknown.
+ Status UpdateFunction(const NodeDef* node) {
+ auto it = fun_to_grappler_function_item_.find(node->op());
+ if (it == fun_to_grappler_function_item_.end()) {
+ return errors::InvalidArgument(
+ node->op(), " was not previously added to SymbolicShapeRefiner.");
+ }
+
+ GrapplerFunctionItem& grappler_function_item = it->second;
+ GraphView gv(&grappler_function_item.graph);
+
+ // Forward shapes from function input nodes to argument nodes.
+ for (int i = 0; i < grappler_function_item.inputs().size(); ++i) {
+ auto& fun_input = grappler_function_item.input(i);
+ if (fun_input.placeholders.size() > 1) {
+ // TODO(jmdecker): Handle case with multiple input placeholders
+ return errors::Unimplemented(
+ "Input arguments with multiple placeholders are not yet "
+ "supported.");
+ }
+ NodeDef* fun_node = gv.GetNode(fun_input.input_name);
+ const string& input = node->input(i);
+ const string& node_name = NodeName(input);
+
+ if (IsControlInput(input)) {
+ return errors::FailedPrecondition(
+ "Function inputs should not contain control nodes.");
+ }
+
+ NodeDef* input_node = graph_.GetNode(node_name);
+ if (input_node == nullptr) {
+ return errors::FailedPrecondition(node_name,
+ " was not found in the graph.");
+ }
+
+ InferenceContext* input_inference_context = GetContext(input_node);
+ if (input_inference_context == nullptr) {
+ return errors::FailedPrecondition(
+ "Inference context has not been created for ", node_name);
+ }
+
+ int output_port_num = NodePosition(input);
+ AttrValue attr_output_shape;
+ TensorShapeProto proto;
+ const auto& handle = input_inference_context->output(output_port_num);
+ input_inference_context->ShapeHandleToProto(handle, &proto);
+ *attr_output_shape.mutable_shape() = proto;
+ (*fun_node->mutable_attr())["shape"] = attr_output_shape;
+ }
+
+ // Perform inference on function body.
+ GraphProperties gp(grappler_function_item);
+ TF_RETURN_IF_ERROR(gp.InferStatically(true));
+
+ // Add return nodes for output shapes.
+ auto ic = GetContext(node);
+ int output = 0;
+ for (auto const& out_arg : grappler_function_item.outputs()) {
+ if (out_arg.output_tensors.size() > 1) {
+ // TODO(jmdecker): Handle case of multiple output tensors
+ return errors::Unimplemented(
+ "Output arguments with multiple output tensors are not yet "
+ "supported.");
+ }
+
+ string out_tensor = out_arg.output_tensors[0];
+ auto out_tensor_pieces = str_util::Split(out_tensor, ",");
+ string node_name = out_tensor_pieces[0];
+ int port_id;
+
+ // Check if port_id was included in out_tensor
+ if (out_tensor_pieces.size() <= 1) {
+ port_id = 0;
+ } else if (!strings::safe_strto32(out_tensor_pieces[1], &port_id)) {
+ return errors::FailedPrecondition(
+ "Failed string to integer conversion for ", out_tensor_pieces[1]);
+ }
+
+ const NodeDef* retnode = gv.GetNode(node_name);
+ if (retnode == nullptr) {
+ return errors::FailedPrecondition("Unable to find return node ",
+ node_name, " for ", node->name());
+ }
+
+ auto output_properties = gp.GetOutputProperties(retnode->name());
+ auto const& outprop = output_properties[port_id];
+ const TensorShapeProto& shape = outprop.shape();
+ ShapeHandle out;
+ TF_RETURN_IF_ERROR(ic->MakeShapeFromShapeProto(shape, &out));
+ ic->set_output(output, out);
+ output++;
+ }
+
+ return Status::OK();
}
Status UpdateNode(const NodeDef* node, bool* refined) {
@@ -436,6 +534,7 @@ class SymbolicShapeRefiner {
node_context = CHECK_NOTNULL(GetNodeContext(node));
*refined = true;
}
+
// Check if the shapes of the nodes in the fan-in of this node have changed,
// and if they have, update the node input shapes.
InferenceContext* inference_context = node_context->inference_context.get();
@@ -455,7 +554,8 @@ class SymbolicShapeRefiner {
if (c == nullptr) {
return errors::FailedPrecondition(
"Input ", dst_input, " ('", input->name(), "') for '",
- node->name(), "' was not previously added to ShapeRefiner.");
+ node->name(),
+ "' was not previously added to SymbolicShapeRefiner.");
}
if (IsConstant(*input)) {
@@ -565,6 +665,21 @@ class SymbolicShapeRefiner {
node_context->inference_context->set_input_tensors_as_shapes(
input_tensors_as_shapes);
+ // Properly handle function nodes.
+ if (node_context->op_data && node_context->op_data->is_function_op) {
+ // TODO(jmdecker): Detect if the input shapes have changed for this
+ // function. Note that when we hit a function call node, refined will be
+ // true, as the updates to the call node will have changed, even if it's
+ // the same function being called twice with the same input shapes.
+ // Example: simple_function.pbtxt
+ if (UpdateFunction(node).ok()) {
+ return Status::OK();
+ } else {
+ VLOG(1) << "UpdateFunction failed for " << node->op()
+ << ". Defaulting to ShapeUnknown.";
+ }
+ }
+
// Update the shapes of the outputs.
return InferShapes(*node, node_context);
}
@@ -681,7 +796,39 @@ class SymbolicShapeRefiner {
return true;
}
- Status AddFunction(const NodeDef* node) { return Status::OK(); }
+ Status AddFunction(const NodeDef* function_node) {
+ auto it = fun_to_grappler_function_item_.find(function_node->op());
+ if (it != fun_to_grappler_function_item_.end()) {
+ return Status::OK();
+ }
+
+ const FunctionDef* function_def =
+ CHECK_NOTNULL(function_library_.Find(function_node->op()));
+
+ GrapplerFunctionItem grappler_function_item;
+ TF_RETURN_IF_ERROR(MakeGrapplerFunctionItem(
+ *function_def, function_library_, &grappler_function_item));
+
+ if (grappler_function_item.inputs().size() > function_node->input_size()) {
+ return errors::FailedPrecondition(
+ "Function input size should be smaller than node input size.");
+ }
+
+ for (int i = grappler_function_item.inputs().size();
+ i < function_node->input_size(); ++i) {
+ const string& input = function_node->input(i);
+ if (!IsControlInput(input)) {
+ return errors::FailedPrecondition(
+ "Found regular input (", input,
+ ") instead of control nodes for node ", function_node->name());
+ }
+ }
+
+ fun_to_grappler_function_item_[function_def->signature().name()] =
+ grappler_function_item;
+
+ return Status::OK();
+ }
Status AddNode(const NodeDef* node) {
NodeContext& node_ctx = node_to_context_[node];
@@ -911,6 +1058,8 @@ class SymbolicShapeRefiner {
std::unordered_map<const NodeDef*, NodeContext> node_to_context_;
std::unordered_map<ShapeId, ShapeHandle, HashShapeId> unknown_shapes_;
std::unordered_map<DimId, DimensionHandle, HashDimId> unknown_dims_;
+ std::unordered_map<string, GrapplerFunctionItem>
+ fun_to_grappler_function_item_;
FunctionLibraryDefinition function_library_;
const std::unordered_map<string, std::unordered_set<int>>& fed_ports_;
};
@@ -1082,13 +1231,9 @@ Status GraphProperties::UpdateShapes(
// Set shapes and types of Queue ops, if needed.
TF_RETURN_IF_ERROR(UpdateQueue(n, shape_refiner, new_shapes));
} else {
- auto c = shape_refiner->GetNodeContext(n);
- if (c && c->op_data && c->op_data->is_function_op) {
- TF_RETURN_IF_ERROR(shape_refiner->UpdateFunction(n, new_shapes));
- } else {
- // Rely on regular TF shape refinement for all the other nodes.
- TF_RETURN_IF_ERROR(shape_refiner->UpdateNode(n, new_shapes));
- }
+ // Rely on regular TF shape refinement for all the other nodes.
+ // UpdateNode calls UpdateFunction if a function node is detected.
+ TF_RETURN_IF_ERROR(shape_refiner->UpdateNode(n, new_shapes));
}
return Status::OK();
}
diff --git a/tensorflow/core/grappler/costs/graph_properties_test.cc b/tensorflow/core/grappler/costs/graph_properties_test.cc
index aa787ae620..1be19d291a 100644
--- a/tensorflow/core/grappler/costs/graph_properties_test.cc
+++ b/tensorflow/core/grappler/costs/graph_properties_test.cc
@@ -783,7 +783,7 @@ TEST_F(GraphPropertiesTest, InferRestoreOpShape_WithTwoNodesShareSameOutput) {
EXPECT_EQ("float: [128,256]", PropToString(prop));
}
-TEST_F(GraphPropertiesTest, FunctionStaticShapeInference) {
+TEST_F(GraphPropertiesTest, SimpleFunctionStaticShapeInference) {
// Test graph produced in python using:
/*
@function.Defun(*[tf.float32] * 2, noinline=True)
@@ -796,7 +796,6 @@ TEST_F(GraphPropertiesTest, FunctionStaticShapeInference) {
z = MyAdd(x, y)
z = MyAdd(x, z)
*/
- // Check that the shape inference code infers what it can.
GrapplerItem item;
string filename = io::JoinPath(testing::TensorFlowSrcRoot(), kTestDataPath,
"simple_function.pbtxt");
@@ -806,15 +805,258 @@ TEST_F(GraphPropertiesTest, FunctionStaticShapeInference) {
const auto out_props = properties.GetOutputProperties("MyAdd_55e046a8");
const OpInfo::TensorProperties& out_prop = out_props[0];
EXPECT_EQ(DT_FLOAT, out_prop.dtype());
- EXPECT_TRUE(out_prop.shape().unknown_rank());
+ EXPECT_FALSE(out_prop.shape().unknown_rank());
+ EXPECT_EQ(2, out_prop.shape().dim_size());
+ EXPECT_EQ(1, out_prop.shape().dim(0).size());
+ EXPECT_EQ(2, out_prop.shape().dim(1).size());
const auto in_props = properties.GetInputProperties("MyAdd_55e046a8");
+ EXPECT_EQ(2, in_props.size());
+
+ const OpInfo::TensorProperties& in_prop = in_props[0];
+ EXPECT_EQ(DT_FLOAT, in_prop.dtype());
+ EXPECT_FALSE(in_prop.shape().unknown_rank());
+ EXPECT_EQ(2, in_prop.shape().dim_size());
+ EXPECT_EQ(1, in_prop.shape().dim(0).size());
+ EXPECT_EQ(2, in_prop.shape().dim(1).size());
+
+ const OpInfo::TensorProperties& in_prop1 = in_props[1];
+ EXPECT_EQ(DT_FLOAT, in_prop1.dtype());
+ EXPECT_FALSE(in_prop1.shape().unknown_rank());
+ EXPECT_EQ(2, in_prop1.shape().dim_size());
+ EXPECT_EQ(1, in_prop1.shape().dim(0).size());
+ EXPECT_EQ(2, in_prop1.shape().dim(1).size());
+}
+
+TEST_F(GraphPropertiesTest, LargeFunctionStaticShapeInference) {
+ GrapplerItem item;
+ string filename = io::JoinPath(testing::TensorFlowSrcRoot(), kTestDataPath,
+ "large_function_graph.pbtxt");
+ TF_CHECK_OK(ReadGraphDefFromFile(filename, &item.graph));
+ GraphProperties properties(item);
+ TF_CHECK_OK(properties.InferStatically(false));
+
+ const auto out_props = properties.GetOutputProperties("y0");
+ EXPECT_EQ(2, out_props.size());
+
+ const OpInfo::TensorProperties& out_prop0 = out_props[0];
+ EXPECT_EQ(DT_FLOAT, out_prop0.dtype());
+ EXPECT_EQ(4, out_prop0.shape().dim_size());
+ EXPECT_EQ(128, out_prop0.shape().dim(0).size());
+ EXPECT_EQ(112, out_prop0.shape().dim(1).size());
+ EXPECT_EQ(112, out_prop0.shape().dim(2).size());
+ EXPECT_EQ(64, out_prop0.shape().dim(3).size());
+
+ const OpInfo::TensorProperties& out_prop1 = out_props[1];
+ EXPECT_EQ(DT_FLOAT, out_prop1.dtype());
+ EXPECT_EQ(128, out_prop1.shape().dim(0).size());
+ EXPECT_EQ(112, out_prop1.shape().dim(1).size());
+ EXPECT_EQ(112, out_prop1.shape().dim(2).size());
+ EXPECT_EQ(24, out_prop1.shape().dim(3).size());
+
+ const auto in_props = properties.GetInputProperties("y0");
+ EXPECT_EQ(4, in_props.size());
+
+ const OpInfo::TensorProperties& in_prop0 = in_props[0];
+ EXPECT_EQ(DT_FLOAT, in_prop0.dtype());
+ EXPECT_EQ(1, in_prop0.shape().dim_size());
+ EXPECT_EQ(64, in_prop0.shape().dim(0).size());
+
+ const OpInfo::TensorProperties& in_prop1 = in_props[1];
+ EXPECT_EQ(DT_FLOAT, in_prop1.dtype());
+ EXPECT_EQ(4, in_prop1.shape().dim_size());
+ EXPECT_EQ(1, in_prop1.shape().dim(0).size());
+ EXPECT_EQ(1, in_prop1.shape().dim(1).size());
+ EXPECT_EQ(24, in_prop1.shape().dim(2).size());
+ EXPECT_EQ(64, in_prop1.shape().dim(3).size());
+
+ const OpInfo::TensorProperties& in_prop2 = in_props[2];
+ EXPECT_EQ(DT_FLOAT, in_prop2.dtype());
+ EXPECT_EQ(4, in_prop2.shape().dim_size());
+ EXPECT_EQ(128, in_prop2.shape().dim(0).size());
+ EXPECT_EQ(224, in_prop2.shape().dim(1).size());
+ EXPECT_EQ(224, in_prop2.shape().dim(2).size());
+ EXPECT_EQ(3, in_prop2.shape().dim(3).size());
+
+ const OpInfo::TensorProperties& in_prop3 = in_props[3];
+ EXPECT_EQ(DT_FLOAT, in_prop3.dtype());
+ EXPECT_EQ(4, in_prop3.shape().dim_size());
+ EXPECT_EQ(7, in_prop3.shape().dim(0).size());
+ EXPECT_EQ(7, in_prop3.shape().dim(1).size());
+ EXPECT_EQ(3, in_prop3.shape().dim(2).size());
+ EXPECT_EQ(8, in_prop3.shape().dim(3).size());
+}
+
+TEST_F(GraphPropertiesTest, FunctionWithErrorStaticShapeInference) {
+ GrapplerItem item;
+ string filename = io::JoinPath(testing::TensorFlowSrcRoot(), kTestDataPath,
+ "function_error.pbtxt");
+ TF_CHECK_OK(ReadGraphDefFromFile(filename, &item.graph));
+ GraphProperties properties(item);
+ TF_CHECK_OK(properties.InferStatically(false));
+
+ const auto out_props = properties.GetOutputProperties("MyAdd_yabA4wXEdM4");
+ EXPECT_EQ(1, out_props.size());
+
+ const OpInfo::TensorProperties& out_prop = out_props[0];
+ EXPECT_EQ(DT_FLOAT, out_prop.dtype());
+ EXPECT_TRUE(out_prop.shape().unknown_rank());
+
+ const auto in_props = properties.GetInputProperties("MyAdd_yabA4wXEdM4");
+ EXPECT_EQ(2, in_props.size());
+
const OpInfo::TensorProperties& in_prop = in_props[0];
EXPECT_EQ(DT_FLOAT, in_prop.dtype());
EXPECT_FALSE(in_prop.shape().unknown_rank());
EXPECT_EQ(2, in_prop.shape().dim_size());
EXPECT_EQ(1, in_prop.shape().dim(0).size());
EXPECT_EQ(2, in_prop.shape().dim(1).size());
+
+ const OpInfo::TensorProperties& in_prop1 = in_props[1];
+ EXPECT_EQ(DT_FLOAT, in_prop1.dtype());
+ EXPECT_FALSE(in_prop1.shape().unknown_rank());
+ EXPECT_EQ(2, in_prop1.shape().dim_size());
+ EXPECT_EQ(1, in_prop1.shape().dim(0).size());
+ EXPECT_EQ(2, in_prop1.shape().dim(1).size());
+}
+
+TEST_F(GraphPropertiesTest, FunctionSwitchStaticShapeInference) {
+ // Test graph produced in python using:
+ /*
+ @function.Defun(*[tf.float32] * 2, noinline=True)
+ def MyAdd(x, y):
+ return tf.add(x, y)
+
+ with tf.Graph().as_default():
+ x = lambda: tf.constant(2.0, shape=[1, 2], dtype=tf.float32)
+ y = lambda: tf.constant(2.0, shape=[1, 2], dtype=tf.float32)
+ z = tf.constant(2.0, shape=[1, 2], dtype=tf.float32)
+ z2 = MyAdd(tf.case([(tf.less(0, 1), x)], default=y), z)
+ */
+ GrapplerItem item;
+ string filename = io::JoinPath(testing::TensorFlowSrcRoot(), kTestDataPath,
+ "function_switch.pbtxt");
+ TF_CHECK_OK(ReadGraphDefFromFile(filename, &item.graph));
+ GraphProperties properties(item);
+ TF_CHECK_OK(properties.InferStatically(false));
+ const auto out_props = properties.GetOutputProperties("MyAdd_MPaeanipb7o");
+ const OpInfo::TensorProperties& out_prop = out_props[0];
+ EXPECT_EQ(DT_FLOAT, out_prop.dtype());
+ EXPECT_FALSE(out_prop.shape().unknown_rank());
+ EXPECT_EQ(2, out_prop.shape().dim_size());
+ EXPECT_EQ(1, out_prop.shape().dim(0).size());
+ EXPECT_EQ(2, out_prop.shape().dim(1).size());
+
+ const auto in_props = properties.GetInputProperties("MyAdd_MPaeanipb7o");
+ EXPECT_EQ(2, in_props.size());
+
+ const OpInfo::TensorProperties& in_prop = in_props[0];
+ EXPECT_EQ(DT_FLOAT, in_prop.dtype());
+ EXPECT_FALSE(in_prop.shape().unknown_rank());
+ EXPECT_EQ(2, in_prop.shape().dim_size());
+ EXPECT_EQ(1, in_prop.shape().dim(0).size());
+ EXPECT_EQ(2, in_prop.shape().dim(1).size());
+
+ const OpInfo::TensorProperties& in_prop1 = in_props[1];
+ EXPECT_EQ(DT_FLOAT, in_prop1.dtype());
+ EXPECT_FALSE(in_prop1.shape().unknown_rank());
+ EXPECT_EQ(2, in_prop1.shape().dim_size());
+ EXPECT_EQ(1, in_prop1.shape().dim(0).size());
+ EXPECT_EQ(2, in_prop1.shape().dim(1).size());
+}
+
+TEST_F(GraphPropertiesTest, FunctionSwitch2StaticShapeInference) {
+ // Test graph produced in python using:
+ /*
+ @function.Defun(*[tf.float32] * 2, noinline=True)
+ def MyAdd(x, y):
+ return tf.add(x, y)
+
+ with tf.Graph().as_default():
+ x = lambda: tf.constant(2.0, shape=[1, 2], dtype=tf.float32)
+ y = lambda: tf.constant(2.0, shape=[1, 2], dtype=tf.float32)
+ z = tf.constant(2.0, shape=[1, 2], dtype=tf.float32)
+ z2 = MyAdd(tf.case([(tf.less(1, 0), x)], default=y), z)
+ */
+ GrapplerItem item;
+ string filename = io::JoinPath(testing::TensorFlowSrcRoot(), kTestDataPath,
+ "function_switch_2.pbtxt");
+ TF_CHECK_OK(ReadGraphDefFromFile(filename, &item.graph));
+ GraphProperties properties(item);
+ TF_CHECK_OK(properties.InferStatically(false));
+ const auto out_props = properties.GetOutputProperties("MyAdd_MPaeanipb7o");
+ const OpInfo::TensorProperties& out_prop = out_props[0];
+ EXPECT_EQ(DT_FLOAT, out_prop.dtype());
+ EXPECT_FALSE(out_prop.shape().unknown_rank());
+ EXPECT_EQ(2, out_prop.shape().dim_size());
+ EXPECT_EQ(1, out_prop.shape().dim(0).size());
+ EXPECT_EQ(2, out_prop.shape().dim(1).size());
+
+ const auto in_props = properties.GetInputProperties("MyAdd_MPaeanipb7o");
+ EXPECT_EQ(2, in_props.size());
+
+ const OpInfo::TensorProperties& in_prop = in_props[0];
+ EXPECT_EQ(DT_FLOAT, in_prop.dtype());
+ EXPECT_FALSE(in_prop.shape().unknown_rank());
+ EXPECT_EQ(2, in_prop.shape().dim_size());
+ EXPECT_EQ(1, in_prop.shape().dim(0).size());
+ EXPECT_EQ(2, in_prop.shape().dim(1).size());
+
+ const OpInfo::TensorProperties& in_prop1 = in_props[1];
+ EXPECT_EQ(DT_FLOAT, in_prop1.dtype());
+ EXPECT_FALSE(in_prop1.shape().unknown_rank());
+ EXPECT_EQ(2, in_prop1.shape().dim_size());
+ EXPECT_EQ(1, in_prop1.shape().dim(0).size());
+ EXPECT_EQ(2, in_prop1.shape().dim(1).size());
+}
+
+TEST_F(GraphPropertiesTest, FunctionSwitchShapesStaticShapeInference) {
+ // Test graph produced in python using:
+ /*
+ @function.Defun(*[tf.float32] * 2, noinline=True)
+ def MyAdd(x, y):
+ a = tf.constant(2.0, shape=[1, 2], dtype=tf.float32)
+ b = tf.constant(2.0, shape=[1, 3], dtype=tf.float32)
+ c = tf.add(x, a)
+ d = tf.add(y, b)
+ return c
+
+ with tf.Graph().as_default():
+ x = lambda: tf.constant(2.0, shape=[1, 2], dtype=tf.float32)
+ y = lambda: tf.constant(2.0, shape=[1, 2], dtype=tf.float32)
+ z = tf.constant(2.0, shape=[1, 3], dtype=tf.float32)
+ z2 = MyAdd(tf.case([(tf.less(1, 0), x)], default=y), z)
+ */
+ GrapplerItem item;
+ string filename = io::JoinPath(testing::TensorFlowSrcRoot(), kTestDataPath,
+ "function_switch_shapes.pbtxt");
+ TF_CHECK_OK(ReadGraphDefFromFile(filename, &item.graph));
+ GraphProperties properties(item);
+ TF_CHECK_OK(properties.InferStatically(false));
+ const auto out_props = properties.GetOutputProperties("MyAdd_lEKAAnIwI5I");
+ const OpInfo::TensorProperties& out_prop = out_props[0];
+ EXPECT_EQ(DT_FLOAT, out_prop.dtype());
+ EXPECT_FALSE(out_prop.shape().unknown_rank());
+ EXPECT_EQ(2, out_prop.shape().dim_size());
+ EXPECT_EQ(1, out_prop.shape().dim(0).size());
+ EXPECT_EQ(2, out_prop.shape().dim(1).size());
+
+ const auto in_props = properties.GetInputProperties("MyAdd_lEKAAnIwI5I");
+ EXPECT_EQ(2, in_props.size());
+
+ const OpInfo::TensorProperties& in_prop = in_props[0];
+ EXPECT_EQ(DT_FLOAT, in_prop.dtype());
+ EXPECT_FALSE(in_prop.shape().unknown_rank());
+ EXPECT_EQ(2, in_prop.shape().dim_size());
+ EXPECT_EQ(1, in_prop.shape().dim(0).size());
+ EXPECT_EQ(2, in_prop.shape().dim(1).size());
+
+ const OpInfo::TensorProperties& in_prop1 = in_props[1];
+ EXPECT_EQ(DT_FLOAT, in_prop1.dtype());
+ EXPECT_FALSE(in_prop1.shape().unknown_rank());
+ EXPECT_EQ(2, in_prop1.shape().dim_size());
+ EXPECT_EQ(1, in_prop1.shape().dim(0).size());
+ EXPECT_EQ(3, in_prop1.shape().dim(1).size());
}
TEST_F(GraphPropertiesTest, SymbolicShapes) {
diff --git a/tensorflow/core/grappler/costs/graph_properties_testdata/function_error.pbtxt b/tensorflow/core/grappler/costs/graph_properties_testdata/function_error.pbtxt
new file mode 100644
index 0000000000..c3f0a6c95d
--- /dev/null
+++ b/tensorflow/core/grappler/costs/graph_properties_testdata/function_error.pbtxt
@@ -0,0 +1,117 @@
+node {
+ name: "Const"
+ op: "Const"
+ attr {
+ key: "dtype"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "value"
+ value {
+ tensor {
+ dtype: DT_FLOAT
+ tensor_shape {
+ dim {
+ size: 1
+ }
+ dim {
+ size: 2
+ }
+ }
+ float_val: 2.0
+ }
+ }
+ }
+}
+node {
+ name: "Const_1"
+ op: "Const"
+ attr {
+ key: "dtype"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "value"
+ value {
+ tensor {
+ dtype: DT_FLOAT
+ tensor_shape {
+ dim {
+ size: 1
+ }
+ dim {
+ size: 2
+ }
+ }
+ float_val: 2.0
+ }
+ }
+ }
+}
+node {
+ name: "MyAdd_yabA4wXEdM4"
+ op: "MyAdd_yabA4wXEdM4"
+ input: "Const"
+ input: "Const_1"
+}
+library {
+ function {
+ signature {
+ name: "MyAdd_yabA4wXEdM4"
+ input_arg {
+ name: "x"
+ type: DT_FLOAT
+ }
+ input_arg {
+ name: "y"
+ type: DT_FLOAT
+ }
+ output_arg {
+ name: "add_1"
+ type: DT_FLOAT
+ }
+ }
+ node_def {
+ name: "Add"
+ op: "Add"
+ input: "x"
+ input: "Add:z:0"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ }
+ node_def {
+ name: "Add_1"
+ op: "Add"
+ input: "Add:z:0"
+ input: "y"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ }
+ ret {
+ key: "add_1"
+ value: "Add_1:z:0"
+ }
+ attr {
+ key: "_noinline"
+ value {
+ b: true
+ }
+ }
+ }
+}
+versions {
+ producer: 26
+ min_consumer: 12
+}
diff --git a/tensorflow/core/grappler/costs/graph_properties_testdata/function_switch.pbtxt b/tensorflow/core/grappler/costs/graph_properties_testdata/function_switch.pbtxt
new file mode 100644
index 0000000000..d6d856ce41
--- /dev/null
+++ b/tensorflow/core/grappler/costs/graph_properties_testdata/function_switch.pbtxt
@@ -0,0 +1,251 @@
+node {
+ name: "Const"
+ op: "Const"
+ attr {
+ key: "dtype"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "value"
+ value {
+ tensor {
+ dtype: DT_FLOAT
+ tensor_shape {
+ dim {
+ size: 1
+ }
+ dim {
+ size: 2
+ }
+ }
+ float_val: 2.0
+ }
+ }
+ }
+}
+node {
+ name: "Less/x"
+ op: "Const"
+ attr {
+ key: "dtype"
+ value {
+ type: DT_INT32
+ }
+ }
+ attr {
+ key: "value"
+ value {
+ tensor {
+ dtype: DT_INT32
+ tensor_shape {
+ }
+ int_val: 0
+ }
+ }
+ }
+}
+node {
+ name: "Less/y"
+ op: "Const"
+ attr {
+ key: "dtype"
+ value {
+ type: DT_INT32
+ }
+ }
+ attr {
+ key: "value"
+ value {
+ tensor {
+ dtype: DT_INT32
+ tensor_shape {
+ }
+ int_val: 1
+ }
+ }
+ }
+}
+node {
+ name: "Less"
+ op: "Less"
+ input: "Less/x"
+ input: "Less/y"
+ attr {
+ key: "T"
+ value {
+ type: DT_INT32
+ }
+ }
+}
+node {
+ name: "case/cond/Switch"
+ op: "Switch"
+ input: "Less"
+ input: "Less"
+ attr {
+ key: "T"
+ value {
+ type: DT_BOOL
+ }
+ }
+}
+node {
+ name: "case/cond/switch_t"
+ op: "Identity"
+ input: "case/cond/Switch:1"
+ attr {
+ key: "T"
+ value {
+ type: DT_BOOL
+ }
+ }
+}
+node {
+ name: "case/cond/switch_f"
+ op: "Identity"
+ input: "case/cond/Switch"
+ attr {
+ key: "T"
+ value {
+ type: DT_BOOL
+ }
+ }
+}
+node {
+ name: "case/cond/pred_id"
+ op: "Identity"
+ input: "Less"
+ attr {
+ key: "T"
+ value {
+ type: DT_BOOL
+ }
+ }
+}
+node {
+ name: "case/cond/Const"
+ op: "Const"
+ input: "^case/cond/switch_t"
+ attr {
+ key: "dtype"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "value"
+ value {
+ tensor {
+ dtype: DT_FLOAT
+ tensor_shape {
+ dim {
+ size: 1
+ }
+ dim {
+ size: 2
+ }
+ }
+ float_val: 2.0
+ }
+ }
+ }
+}
+node {
+ name: "case/cond/Const_1"
+ op: "Const"
+ input: "^case/cond/switch_f"
+ attr {
+ key: "dtype"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "value"
+ value {
+ tensor {
+ dtype: DT_FLOAT
+ tensor_shape {
+ dim {
+ size: 1
+ }
+ dim {
+ size: 2
+ }
+ }
+ float_val: 2.0
+ }
+ }
+ }
+}
+node {
+ name: "case/cond/Merge"
+ op: "Merge"
+ input: "case/cond/Const_1"
+ input: "case/cond/Const"
+ attr {
+ key: "N"
+ value {
+ i: 2
+ }
+ }
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+}
+node {
+ name: "MyAdd_MPaeanipb7o"
+ op: "MyAdd_MPaeanipb7o"
+ input: "case/cond/Merge"
+ input: "Const"
+}
+library {
+ function {
+ signature {
+ name: "MyAdd_MPaeanipb7o"
+ input_arg {
+ name: "x"
+ type: DT_FLOAT
+ }
+ input_arg {
+ name: "y"
+ type: DT_FLOAT
+ }
+ output_arg {
+ name: "Add"
+ type: DT_FLOAT
+ }
+ }
+ node_def {
+ name: "Add"
+ op: "Add"
+ input: "x"
+ input: "y"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ }
+ ret {
+ key: "Add"
+ value: "Add:z:0"
+ }
+ attr {
+ key: "_noinline"
+ value {
+ b: true
+ }
+ }
+ }
+}
+versions {
+ producer: 26
+ min_consumer: 12
+}
diff --git a/tensorflow/core/grappler/costs/graph_properties_testdata/function_switch_2.pbtxt b/tensorflow/core/grappler/costs/graph_properties_testdata/function_switch_2.pbtxt
new file mode 100644
index 0000000000..e57d9d7076
--- /dev/null
+++ b/tensorflow/core/grappler/costs/graph_properties_testdata/function_switch_2.pbtxt
@@ -0,0 +1,251 @@
+node {
+ name: "Const"
+ op: "Const"
+ attr {
+ key: "dtype"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "value"
+ value {
+ tensor {
+ dtype: DT_FLOAT
+ tensor_shape {
+ dim {
+ size: 1
+ }
+ dim {
+ size: 2
+ }
+ }
+ float_val: 2.0
+ }
+ }
+ }
+}
+node {
+ name: "Less/x"
+ op: "Const"
+ attr {
+ key: "dtype"
+ value {
+ type: DT_INT32
+ }
+ }
+ attr {
+ key: "value"
+ value {
+ tensor {
+ dtype: DT_INT32
+ tensor_shape {
+ }
+ int_val: 1
+ }
+ }
+ }
+}
+node {
+ name: "Less/y"
+ op: "Const"
+ attr {
+ key: "dtype"
+ value {
+ type: DT_INT32
+ }
+ }
+ attr {
+ key: "value"
+ value {
+ tensor {
+ dtype: DT_INT32
+ tensor_shape {
+ }
+ int_val: 0
+ }
+ }
+ }
+}
+node {
+ name: "Less"
+ op: "Less"
+ input: "Less/x"
+ input: "Less/y"
+ attr {
+ key: "T"
+ value {
+ type: DT_INT32
+ }
+ }
+}
+node {
+ name: "case/cond/Switch"
+ op: "Switch"
+ input: "Less"
+ input: "Less"
+ attr {
+ key: "T"
+ value {
+ type: DT_BOOL
+ }
+ }
+}
+node {
+ name: "case/cond/switch_t"
+ op: "Identity"
+ input: "case/cond/Switch:1"
+ attr {
+ key: "T"
+ value {
+ type: DT_BOOL
+ }
+ }
+}
+node {
+ name: "case/cond/switch_f"
+ op: "Identity"
+ input: "case/cond/Switch"
+ attr {
+ key: "T"
+ value {
+ type: DT_BOOL
+ }
+ }
+}
+node {
+ name: "case/cond/pred_id"
+ op: "Identity"
+ input: "Less"
+ attr {
+ key: "T"
+ value {
+ type: DT_BOOL
+ }
+ }
+}
+node {
+ name: "case/cond/Const"
+ op: "Const"
+ input: "^case/cond/switch_t"
+ attr {
+ key: "dtype"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "value"
+ value {
+ tensor {
+ dtype: DT_FLOAT
+ tensor_shape {
+ dim {
+ size: 1
+ }
+ dim {
+ size: 2
+ }
+ }
+ float_val: 2.0
+ }
+ }
+ }
+}
+node {
+ name: "case/cond/Const_1"
+ op: "Const"
+ input: "^case/cond/switch_f"
+ attr {
+ key: "dtype"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "value"
+ value {
+ tensor {
+ dtype: DT_FLOAT
+ tensor_shape {
+ dim {
+ size: 1
+ }
+ dim {
+ size: 2
+ }
+ }
+ float_val: 2.0
+ }
+ }
+ }
+}
+node {
+ name: "case/cond/Merge"
+ op: "Merge"
+ input: "case/cond/Const_1"
+ input: "case/cond/Const"
+ attr {
+ key: "N"
+ value {
+ i: 2
+ }
+ }
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+}
+node {
+ name: "MyAdd_MPaeanipb7o"
+ op: "MyAdd_MPaeanipb7o"
+ input: "case/cond/Merge"
+ input: "Const"
+}
+library {
+ function {
+ signature {
+ name: "MyAdd_MPaeanipb7o"
+ input_arg {
+ name: "x"
+ type: DT_FLOAT
+ }
+ input_arg {
+ name: "y"
+ type: DT_FLOAT
+ }
+ output_arg {
+ name: "Add"
+ type: DT_FLOAT
+ }
+ }
+ node_def {
+ name: "Add"
+ op: "Add"
+ input: "x"
+ input: "y"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ }
+ ret {
+ key: "Add"
+ value: "Add:z:0"
+ }
+ attr {
+ key: "_noinline"
+ value {
+ b: true
+ }
+ }
+ }
+}
+versions {
+ producer: 26
+ min_consumer: 12
+}
diff --git a/tensorflow/core/grappler/costs/graph_properties_testdata/function_switch_shapes.pbtxt b/tensorflow/core/grappler/costs/graph_properties_testdata/function_switch_shapes.pbtxt
new file mode 100644
index 0000000000..e9afa91886
--- /dev/null
+++ b/tensorflow/core/grappler/costs/graph_properties_testdata/function_switch_shapes.pbtxt
@@ -0,0 +1,317 @@
+node {
+ name: "Const"
+ op: "Const"
+ attr {
+ key: "dtype"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "value"
+ value {
+ tensor {
+ dtype: DT_FLOAT
+ tensor_shape {
+ dim {
+ size: 1
+ }
+ dim {
+ size: 3
+ }
+ }
+ float_val: 2.0
+ }
+ }
+ }
+}
+node {
+ name: "Less/x"
+ op: "Const"
+ attr {
+ key: "dtype"
+ value {
+ type: DT_INT32
+ }
+ }
+ attr {
+ key: "value"
+ value {
+ tensor {
+ dtype: DT_INT32
+ tensor_shape {
+ }
+ int_val: 1
+ }
+ }
+ }
+}
+node {
+ name: "Less/y"
+ op: "Const"
+ attr {
+ key: "dtype"
+ value {
+ type: DT_INT32
+ }
+ }
+ attr {
+ key: "value"
+ value {
+ tensor {
+ dtype: DT_INT32
+ tensor_shape {
+ }
+ int_val: 0
+ }
+ }
+ }
+}
+node {
+ name: "Less"
+ op: "Less"
+ input: "Less/x"
+ input: "Less/y"
+ attr {
+ key: "T"
+ value {
+ type: DT_INT32
+ }
+ }
+}
+node {
+ name: "case/cond/Switch"
+ op: "Switch"
+ input: "Less"
+ input: "Less"
+ attr {
+ key: "T"
+ value {
+ type: DT_BOOL
+ }
+ }
+}
+node {
+ name: "case/cond/switch_t"
+ op: "Identity"
+ input: "case/cond/Switch:1"
+ attr {
+ key: "T"
+ value {
+ type: DT_BOOL
+ }
+ }
+}
+node {
+ name: "case/cond/switch_f"
+ op: "Identity"
+ input: "case/cond/Switch"
+ attr {
+ key: "T"
+ value {
+ type: DT_BOOL
+ }
+ }
+}
+node {
+ name: "case/cond/pred_id"
+ op: "Identity"
+ input: "Less"
+ attr {
+ key: "T"
+ value {
+ type: DT_BOOL
+ }
+ }
+}
+node {
+ name: "case/cond/Const"
+ op: "Const"
+ input: "^case/cond/switch_t"
+ attr {
+ key: "dtype"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "value"
+ value {
+ tensor {
+ dtype: DT_FLOAT
+ tensor_shape {
+ dim {
+ size: 1
+ }
+ dim {
+ size: 2
+ }
+ }
+ float_val: 2.0
+ }
+ }
+ }
+}
+node {
+ name: "case/cond/Const_1"
+ op: "Const"
+ input: "^case/cond/switch_f"
+ attr {
+ key: "dtype"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "value"
+ value {
+ tensor {
+ dtype: DT_FLOAT
+ tensor_shape {
+ dim {
+ size: 1
+ }
+ dim {
+ size: 2
+ }
+ }
+ float_val: 2.0
+ }
+ }
+ }
+}
+node {
+ name: "case/cond/Merge"
+ op: "Merge"
+ input: "case/cond/Const_1"
+ input: "case/cond/Const"
+ attr {
+ key: "N"
+ value {
+ i: 2
+ }
+ }
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+}
+node {
+ name: "MyAdd_lEKAAnIwI5I"
+ op: "MyAdd_lEKAAnIwI5I"
+ input: "case/cond/Merge"
+ input: "Const"
+}
+library {
+ function {
+ signature {
+ name: "MyAdd_lEKAAnIwI5I"
+ input_arg {
+ name: "x"
+ type: DT_FLOAT
+ }
+ input_arg {
+ name: "y"
+ type: DT_FLOAT
+ }
+ output_arg {
+ name: "Add"
+ type: DT_FLOAT
+ }
+ }
+ node_def {
+ name: "Const"
+ op: "Const"
+ attr {
+ key: "dtype"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "value"
+ value {
+ tensor {
+ dtype: DT_FLOAT
+ tensor_shape {
+ dim {
+ size: 1
+ }
+ dim {
+ size: 2
+ }
+ }
+ float_val: 2.0
+ }
+ }
+ }
+ }
+ node_def {
+ name: "Const_1"
+ op: "Const"
+ attr {
+ key: "dtype"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "value"
+ value {
+ tensor {
+ dtype: DT_FLOAT
+ tensor_shape {
+ dim {
+ size: 1
+ }
+ dim {
+ size: 3
+ }
+ }
+ float_val: 2.0
+ }
+ }
+ }
+ }
+ node_def {
+ name: "Add"
+ op: "Add"
+ input: "x"
+ input: "Const:output:0"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ }
+ node_def {
+ name: "Add_1"
+ op: "Add"
+ input: "y"
+ input: "Const_1:output:0"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ }
+ ret {
+ key: "Add"
+ value: "Add:z:0"
+ }
+ attr {
+ key: "_noinline"
+ value {
+ b: true
+ }
+ }
+ }
+}
+versions {
+ producer: 26
+ min_consumer: 12
+}
diff --git a/tensorflow/core/grappler/costs/graph_properties_testdata/large_function_graph.pbtxt b/tensorflow/core/grappler/costs/graph_properties_testdata/large_function_graph.pbtxt
new file mode 100644
index 0000000000..415c347a1d
--- /dev/null
+++ b/tensorflow/core/grappler/costs/graph_properties_testdata/large_function_graph.pbtxt
@@ -0,0 +1,597 @@
+node {
+ name: "Const/Const"
+ op: "Const"
+ device: "/cpu:0"
+ attr {
+ key: "dtype"
+ value {
+ type: DT_INT32
+ }
+ }
+ attr {
+ key: "value"
+ value {
+ tensor {
+ dtype: DT_INT32
+ tensor_shape {
+ dim {
+ size: 1
+ }
+ }
+ int_val: 64
+ }
+ }
+ }
+}
+node {
+ name: "input_0_0"
+ op: "RandomUniform"
+ input: "Const/Const"
+ device: "/cpu:0"
+ attr {
+ key: "T"
+ value {
+ type: DT_INT32
+ }
+ }
+ attr {
+ key: "dtype"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "seed"
+ value {
+ i: 0
+ }
+ }
+ attr {
+ key: "seed2"
+ value {
+ i: 0
+ }
+ }
+}
+node {
+ name: "Const_1/Const"
+ op: "Const"
+ device: "/cpu:0"
+ attr {
+ key: "dtype"
+ value {
+ type: DT_INT32
+ }
+ }
+ attr {
+ key: "value"
+ value {
+ tensor {
+ dtype: DT_INT32
+ tensor_shape {
+ dim {
+ size: 4
+ }
+ }
+ tensor_content: "\001\000\000\000\001\000\000\000\030\000\000\000@\000\000\000"
+ }
+ }
+ }
+}
+node {
+ name: "input_1_0"
+ op: "RandomUniform"
+ input: "Const_1/Const"
+ device: "/cpu:0"
+ attr {
+ key: "T"
+ value {
+ type: DT_INT32
+ }
+ }
+ attr {
+ key: "dtype"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "seed"
+ value {
+ i: 0
+ }
+ }
+ attr {
+ key: "seed2"
+ value {
+ i: 0
+ }
+ }
+}
+node {
+ name: "Const_2/Const"
+ op: "Const"
+ device: "/cpu:0"
+ attr {
+ key: "dtype"
+ value {
+ type: DT_INT32
+ }
+ }
+ attr {
+ key: "value"
+ value {
+ tensor {
+ dtype: DT_INT32
+ tensor_shape {
+ dim {
+ size: 4
+ }
+ }
+ tensor_content: "\200\000\000\000\340\000\000\000\340\000\000\000\003\000\000\000"
+ }
+ }
+ }
+}
+node {
+ name: "input_2_0"
+ op: "RandomUniform"
+ input: "Const_2/Const"
+ device: "/cpu:0"
+ attr {
+ key: "T"
+ value {
+ type: DT_INT32
+ }
+ }
+ attr {
+ key: "dtype"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "seed"
+ value {
+ i: 0
+ }
+ }
+ attr {
+ key: "seed2"
+ value {
+ i: 0
+ }
+ }
+}
+node {
+ name: "Const_3/Const"
+ op: "Const"
+ device: "/cpu:0"
+ attr {
+ key: "dtype"
+ value {
+ type: DT_INT32
+ }
+ }
+ attr {
+ key: "value"
+ value {
+ tensor {
+ dtype: DT_INT32
+ tensor_shape {
+ dim {
+ size: 4
+ }
+ }
+ tensor_content: "\007\000\000\000\007\000\000\000\003\000\000\000\010\000\000\000"
+ }
+ }
+ }
+}
+node {
+ name: "input_3_0"
+ op: "RandomUniform"
+ input: "Const_3/Const"
+ device: "/cpu:0"
+ attr {
+ key: "T"
+ value {
+ type: DT_INT32
+ }
+ }
+ attr {
+ key: "dtype"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "seed"
+ value {
+ i: 0
+ }
+ }
+ attr {
+ key: "seed2"
+ value {
+ i: 0
+ }
+ }
+}
+node {
+ name: "y0"
+ op: "BiasAddx1_Conv2Dx1_DepthwiseConv2dNativex1_Relux1_95"
+ input: "input_0_0"
+ input: "input_1_0"
+ input: "input_2_0"
+ input: "input_3_0"
+ input: "^input_0_0"
+ input: "^input_1_0"
+ input: "^input_2_0"
+ input: "^input_3_0"
+ device: "/cpu:0"
+}
+node {
+ name: "shape"
+ op: "Shape"
+ input: "y0"
+ input: "^input_0_0"
+ input: "^input_1_0"
+ input: "^input_2_0"
+ input: "^input_3_0"
+ device: "/cpu:0"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "out_type"
+ value {
+ type: DT_INT32
+ }
+ }
+}
+node {
+ name: "zeros"
+ op: "ZerosLike"
+ input: "shape"
+ input: "^input_0_0"
+ input: "^input_1_0"
+ input: "^input_2_0"
+ input: "^input_3_0"
+ device: "/cpu:0"
+ attr {
+ key: "T"
+ value {
+ type: DT_INT32
+ }
+ }
+}
+node {
+ name: "ones"
+ op: "OnesLike"
+ input: "shape"
+ input: "^input_0_0"
+ input: "^input_1_0"
+ input: "^input_2_0"
+ input: "^input_3_0"
+ device: "/cpu:0"
+ attr {
+ key: "T"
+ value {
+ type: DT_INT32
+ }
+ }
+}
+node {
+ name: "slice_0"
+ op: "Slice"
+ input: "y0"
+ input: "zeros"
+ input: "ones"
+ input: "^input_0_0"
+ input: "^input_1_0"
+ input: "^input_2_0"
+ input: "^input_3_0"
+ device: "/cpu:0"
+ attr {
+ key: "Index"
+ value {
+ type: DT_INT32
+ }
+ }
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+}
+node {
+ name: "identity_0"
+ op: "Identity"
+ input: "slice_0"
+ input: "^input_0_0"
+ input: "^input_1_0"
+ input: "^input_2_0"
+ input: "^input_3_0"
+ device: "/cpu:0"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+}
+node {
+ name: "shape_1"
+ op: "Shape"
+ input: "y0:1"
+ input: "^input_0_0"
+ input: "^input_1_0"
+ input: "^input_2_0"
+ input: "^input_3_0"
+ device: "/cpu:0"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "out_type"
+ value {
+ type: DT_INT32
+ }
+ }
+}
+node {
+ name: "zeros_1"
+ op: "ZerosLike"
+ input: "shape_1"
+ input: "^input_0_0"
+ input: "^input_1_0"
+ input: "^input_2_0"
+ input: "^input_3_0"
+ device: "/cpu:0"
+ attr {
+ key: "T"
+ value {
+ type: DT_INT32
+ }
+ }
+}
+node {
+ name: "ones_1"
+ op: "OnesLike"
+ input: "shape_1"
+ input: "^input_0_0"
+ input: "^input_1_0"
+ input: "^input_2_0"
+ input: "^input_3_0"
+ device: "/cpu:0"
+ attr {
+ key: "T"
+ value {
+ type: DT_INT32
+ }
+ }
+}
+node {
+ name: "slice_1"
+ op: "Slice"
+ input: "y0:1"
+ input: "zeros_1"
+ input: "ones_1"
+ input: "^input_0_0"
+ input: "^input_1_0"
+ input: "^input_2_0"
+ input: "^input_3_0"
+ device: "/cpu:0"
+ attr {
+ key: "Index"
+ value {
+ type: DT_INT32
+ }
+ }
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+}
+node {
+ name: "identity_1"
+ op: "Identity"
+ input: "slice_1"
+ input: "^input_0_0"
+ input: "^input_1_0"
+ input: "^input_2_0"
+ input: "^input_3_0"
+ device: "/cpu:0"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+}
+library {
+ function {
+ signature {
+ name: "BiasAddx1_Conv2Dx1_DepthwiseConv2dNativex1_Relux1_95"
+ input_arg {
+ name: "InceptionV2/Conv2d_1a_7x7/biases/read"
+ type: DT_FLOAT
+ }
+ input_arg {
+ name: "InceptionV2/Conv2d_1a_7x7/pointwise_weights/read"
+ type: DT_FLOAT
+ }
+ input_arg {
+ name: "random_uniform"
+ type: DT_FLOAT
+ }
+ input_arg {
+ name: "InceptionV2/Conv2d_1a_7x7/depthwise_weights/read"
+ type: DT_FLOAT
+ }
+ output_arg {
+ name: "InceptionV2/InceptionV2/Conv2d_1a_7x7/Relu"
+ type: DT_FLOAT
+ }
+ output_arg {
+ name: "InceptionV2/InceptionV2/Conv2d_1a_7x7/separable_conv2d/depthwise"
+ type: DT_FLOAT
+ }
+ }
+ node_def {
+ name: "InceptionV2/InceptionV2/Conv2d_1a_7x7/BiasAdd"
+ op: "BiasAdd"
+ input: "InceptionV2/InceptionV2/Conv2d_1a_7x7/separable_conv2d:output:0"
+ input: "InceptionV2/Conv2d_1a_7x7/biases/read"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "data_format"
+ value {
+ s: "NHWC"
+ }
+ }
+ }
+ node_def {
+ name: "InceptionV2/InceptionV2/Conv2d_1a_7x7/Relu"
+ op: "Relu"
+ input: "InceptionV2/InceptionV2/Conv2d_1a_7x7/BiasAdd:output:0"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ }
+ node_def {
+ name: "InceptionV2/InceptionV2/Conv2d_1a_7x7/separable_conv2d"
+ op: "Conv2D"
+ input: "InceptionV2/InceptionV2/Conv2d_1a_7x7/separable_conv2d/depthwise:output:0"
+ input: "InceptionV2/Conv2d_1a_7x7/pointwise_weights/read"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "data_format"
+ value {
+ s: "NHWC"
+ }
+ }
+ attr {
+ key: "dilations"
+ value {
+ list {
+ i: 1
+ i: 1
+ i: 1
+ i: 1
+ }
+ }
+ }
+ attr {
+ key: "padding"
+ value {
+ s: "VALID"
+ }
+ }
+ attr {
+ key: "strides"
+ value {
+ list {
+ i: 1
+ i: 1
+ i: 1
+ i: 1
+ }
+ }
+ }
+ attr {
+ key: "use_cudnn_on_gpu"
+ value {
+ b: true
+ }
+ }
+ }
+ node_def {
+ name: "InceptionV2/InceptionV2/Conv2d_1a_7x7/separable_conv2d/depthwise"
+ op: "DepthwiseConv2dNative"
+ input: "random_uniform"
+ input: "InceptionV2/Conv2d_1a_7x7/depthwise_weights/read"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "data_format"
+ value {
+ s: "NHWC"
+ }
+ }
+ attr {
+ key: "dilations"
+ value {
+ list {
+ i: 1
+ i: 1
+ i: 1
+ i: 1
+ }
+ }
+ }
+ attr {
+ key: "padding"
+ value {
+ s: "SAME"
+ }
+ }
+ attr {
+ key: "strides"
+ value {
+ list {
+ i: 1
+ i: 2
+ i: 2
+ i: 1
+ }
+ }
+ }
+ }
+ ret {
+ key: "InceptionV2/InceptionV2/Conv2d_1a_7x7/Relu"
+ value: "InceptionV2/InceptionV2/Conv2d_1a_7x7/Relu:activations:0"
+ }
+ ret {
+ key: "InceptionV2/InceptionV2/Conv2d_1a_7x7/separable_conv2d/depthwise"
+ value: "InceptionV2/InceptionV2/Conv2d_1a_7x7/separable_conv2d/depthwise:output:0"
+ }
+ attr {
+ key: "_noinline"
+ value {
+ b: true
+ }
+ }
+ }
+}
+versions {
+ producer: 26
+ min_consumer: 12
+}
diff --git a/tensorflow/core/grappler/optimizers/BUILD b/tensorflow/core/grappler/optimizers/BUILD
index 4245ac0f3b..b1d6d48e31 100644
--- a/tensorflow/core/grappler/optimizers/BUILD
+++ b/tensorflow/core/grappler/optimizers/BUILD
@@ -794,9 +794,6 @@ tf_cc_test(
name = "scoped_allocator_optimizer_test",
size = "small",
srcs = ["scoped_allocator_optimizer_test.cc"],
- tags = [
- "nomsan",
- ],
deps = [
":scoped_allocator_optimizer",
"//tensorflow/cc:cc_ops",
diff --git a/tensorflow/core/grappler/optimizers/arithmetic_optimizer.cc b/tensorflow/core/grappler/optimizers/arithmetic_optimizer.cc
index 90be051764..97862d1ed0 100644
--- a/tensorflow/core/grappler/optimizers/arithmetic_optimizer.cc
+++ b/tensorflow/core/grappler/optimizers/arithmetic_optimizer.cc
@@ -227,6 +227,27 @@ class ArithmeticOptimizerStage : public GraphOptimizerStage<string> {
ctx().nodes_to_preserve->end();
}
+ // TODO(ezhulenev): move to GraphOptimizerStage?
+ bool IsDrivenByControlDependency(const NodeDef& node) const {
+ return std::any_of(node.input().begin(), node.input().end(),
+ IsControlInput);
+ }
+
+ // TODO(ezhulenev): move to GraphOptimizerStage?
+ bool DrivesControlDependency(const NodeDef& node) const {
+ int position;
+ for (const NodeDef* output : ctx().node_map->GetOutputs(node.name())) {
+ for (int i = 0; i < output->input_size(); ++i) {
+ auto input = output->input(i);
+ string name = ParseNodeName(input, &position);
+ if (name == node.name() && /*control input*/ position < 0) {
+ return true;
+ }
+ }
+ }
+ return false;
+ }
+
private:
// Extended context required for ArithmeticOptimizer.
const ArithmeticOptimizerContext ctx_ext_;
@@ -357,27 +378,6 @@ class ArithmeticNodesGroupOptimizerStage : public ArithmeticOptimizerStage {
is_broadcastable);
}
- // TODO(ezhulenev): move to GraphOptimizerStage?
- bool IsDrivenByControlDependency(const NodeDef& node) const {
- return std::any_of(node.input().begin(), node.input().end(),
- IsControlInput);
- }
-
- // TODO(ezhulenev): move to GraphOptimizerStage?
- bool DrivesControlDependency(const NodeDef& node) const {
- int position;
- for (const NodeDef* output : ctx().node_map->GetOutputs(node.name())) {
- for (int i = 0; i < output->input_size(); ++i) {
- auto input = output->input(i);
- string name = ParseNodeName(input, &position);
- if (name == node.name() && /*control input*/ position < 0) {
- return true;
- }
- }
- }
- return false;
- }
-
string ShapeSignature(const TensorShapeProto& shape) const {
string signature = strings::StrCat("rank:", shape.dim_size(), ":dim");
for (int i = 0; i < shape.dim_size(); ++i)
@@ -2519,33 +2519,32 @@ class ConvertLog1pStage : public ArithmeticOptimizerStage {
bool* modified) {
const auto& t =
ctx().graph_properties->GetInputProperties(input->name())[i];
- for (int k = 0; k < t.shape().dim_size(); ++k) {
- // Skip if t shape is not fully determined.
- if (t.shape().dim(k).size() < 0) {
+ const auto& c =
+ ctx().graph_properties->GetInputProperties(input->name())[j];
+ for (int k = 0; k < c.shape().dim_size(); ++k) {
+ // Skip if c shape is not fully determined.
+ if (c.shape().dim(k).size() < 0) {
return Status::OK();
}
}
- const auto& c =
- ctx().graph_properties->GetInputProperties(input->name())[j];
TensorShapeProto broadcast_shape;
if (!ShapeAfterBroadcast(t.shape(), c.shape(), &broadcast_shape)) {
- return errors::InvalidArgument("Cannot get broadcast shape for: ",
- t.DebugString(), " and ", c.DebugString());
+ return Status::OK();
}
if (!ShapesSymbolicallyEqual(t.shape(), broadcast_shape)) {
// skip if the non-constant tensor doesn't have the same shape after
// broadcast.
return Status::OK();
}
- if (TensorShape::IsValid(t.shape()) && t.has_value()) {
- Tensor tensor(t.dtype(), t.shape());
- if (!tensor.FromProto(t.value())) {
+ if (TensorShape::IsValid(c.shape()) && c.has_value()) {
+ Tensor constant(c.dtype(), c.shape());
+ if (!constant.FromProto(c.value())) {
return errors::InvalidArgument("Cannot parse tensor from proto: ",
- t.value().DebugString());
+ c.value().DebugString());
}
complex128 element;
- for (int k = 0; k < tensor.NumElements(); ++k) {
- if (!GetElement(tensor, k, &element)) {
+ for (int k = 0; k < constant.NumElements(); ++k) {
+ if (!GetElement(constant, k, &element)) {
// input data type is not supported by log1p. Skip.
return Status::OK();
}
@@ -2558,11 +2557,12 @@ class ConvertLog1pStage : public ArithmeticOptimizerStage {
TF_RETURN_IF_ERROR(GetInputNode(input->input(i), &x));
TF_RETURN_IF_ERROR(GetInputNode(input->input(j), &y));
node->set_op("Log1p");
- node->set_input(0, y->name());
- node->add_input(AsControlDependency(x->name()));
+ node->set_input(0, input->input(i));
+ node->add_input(AsControlDependency(y->name()));
ForwardControlDependencies(node, {input});
AddToOptimizationQueue(node);
+ AddToOptimizationQueue(input);
AddToOptimizationQueue(x);
AddToOptimizationQueue(y);
*modified = true;
@@ -2648,6 +2648,172 @@ class OptimizeMaxOrMinOfMonotonicStage : public ArithmeticOptimizerStage {
}
};
+// Replace a chain of type&shape preserving unary ops with a
+// '_UnaryOpsComposition' node.
+// TODO(ezhulenev): It should be a part of remapper optimizer because it doesn't
+// have to do much with arithmetic (together with FoldMultiplyIntoConv stage?).
+class UnaryOpsComposition : public ArithmeticOptimizerStage {
+ public:
+ explicit UnaryOpsComposition(const GraphOptimizerContext& ctx,
+ const ArithmeticOptimizerContext& ctx_ext)
+ : ArithmeticOptimizerStage("UnaryOpsComposition", ctx, ctx_ext) {
+ // WARN: This should be consistent with unary_ops_composition.cc.
+ // clang-format off
+ supported_ops_ = {// Ops defined via Eigen scalar ops.
+ {"Abs", {DT_FLOAT, DT_HALF, DT_DOUBLE}},
+ {"Acos", {DT_FLOAT, DT_DOUBLE}},
+ {"Acosh", {DT_FLOAT, DT_DOUBLE}},
+ {"Asin", {DT_FLOAT, DT_DOUBLE}},
+ {"Asinh", {DT_FLOAT, DT_DOUBLE}},
+ {"Atan", {DT_FLOAT, DT_DOUBLE}},
+ {"Atanh", {DT_FLOAT, DT_DOUBLE}},
+ {"Ceil", {DT_FLOAT, DT_HALF, DT_DOUBLE}},
+ {"Cos", {DT_FLOAT, DT_HALF, DT_DOUBLE}},
+ {"Cosh", {DT_FLOAT, DT_DOUBLE}},
+ {"Expm1", {DT_FLOAT, DT_HALF, DT_DOUBLE}},
+ {"Exp", {DT_FLOAT, DT_HALF, DT_DOUBLE}},
+ {"Floor", {DT_FLOAT, DT_HALF, DT_DOUBLE}},
+ {"Inv", {DT_FLOAT, DT_HALF, DT_DOUBLE}},
+ {"Log", {DT_FLOAT, DT_HALF, DT_DOUBLE}},
+ {"Log1p", {DT_FLOAT, DT_HALF, DT_DOUBLE}},
+ {"Neg", {DT_FLOAT, DT_HALF, DT_DOUBLE}},
+ {"Reciprocal", {DT_FLOAT, DT_HALF, DT_DOUBLE}},
+ {"Rint", {DT_FLOAT, DT_DOUBLE}},
+ {"Round", {DT_FLOAT, DT_HALF, DT_DOUBLE}},
+ {"Rsqrt", {DT_FLOAT, DT_HALF, DT_DOUBLE}},
+ {"Sigmoid", {DT_FLOAT, DT_HALF, DT_DOUBLE}},
+ {"Sin", {DT_FLOAT, DT_HALF, DT_DOUBLE}},
+ {"Sinh", {DT_FLOAT, DT_DOUBLE}},
+ {"Sqrt", {DT_FLOAT, DT_HALF, DT_DOUBLE}},
+ {"Square", {DT_FLOAT, DT_HALF, DT_DOUBLE}},
+ {"Tan", {DT_FLOAT, DT_DOUBLE}},
+ {"Tanh", {DT_FLOAT, DT_HALF, DT_DOUBLE}},
+ // Additional ops that are not part of the Eigen.
+ {"Elu", {DT_FLOAT, DT_HALF, DT_DOUBLE}},
+ {"Relu", {DT_FLOAT, DT_HALF, DT_DOUBLE}},
+ {"Relu6", {DT_FLOAT, DT_HALF, DT_DOUBLE}},
+ {"Selu", {DT_FLOAT, DT_HALF, DT_DOUBLE}}};
+ // clang-format on
+ }
+ ~UnaryOpsComposition() override = default;
+
+ bool IsSupported(const NodeDef* node) const override {
+ return CanOptimize(*node) &&
+ // Check that this node was not already a root of a fused chain. If
+ // graph optimization runs twice without pruning in between,
+ // fused_nodes_ will not have this information.
+ !ctx().node_map->NodeExists(OptimizedNodeName(*node));
+ }
+
+ Status TrySimplify(NodeDef* root, string* simplified_node_name) override {
+ DataType dtype = root->attr().at("T").type();
+
+ // Keep a trace of all supported input nodes that can be fused together.
+ std::vector<string> op_nodes = {root->name()};
+ std::vector<string> op_names = {root->op()};
+
+ // Check if we should follow input(0) while building an op composition.
+ const auto predicate_fn = [&](const NodeDef& input) {
+ if (input.name() == root->name()) return true;
+
+ bool follow_input_node =
+ dtype == GetDataTypeFromAttr(input, "T") &&
+ NumNonControlDataOutputs(input, *ctx().node_map) == 1 &&
+ CanOptimize(input);
+
+ if (follow_input_node) {
+ op_nodes.push_back(input.name());
+ op_names.push_back(input.op());
+ }
+
+ return follow_input_node;
+ };
+
+ NodeDef* last_op = GetTailOfChain(
+ *root, *ctx().node_map, /*follow_control_input*/ false, predicate_fn);
+
+ // We were not able to find a chain that can be replaced.
+ if (op_names.size() == 1) return Status::OK();
+
+ // Do not add fused nodes to any other chain.
+ std::for_each(op_nodes.begin(), op_nodes.end(),
+ [this](const string& name) { AddToFusedNodes(name); });
+
+ // Reverse the trace to get correct composition computation order.
+ std::reverse(op_names.begin(), op_names.end());
+
+ VLOG(2) << "Fuse unary ops: root=" << root->name() << " op_names=["
+ << str_util::Join(op_names, ", ") << "]";
+
+ NodeDef* composition_node = ctx().optimized_graph->add_node();
+ composition_node->set_name(OptimizedNodeName(*root));
+ composition_node->set_op("_UnaryOpsComposition");
+ composition_node->add_input(last_op->input(0));
+ composition_node->set_device(root->device());
+
+ auto attr = composition_node->mutable_attr();
+ SetAttrValue(dtype, &(*attr)["T"]);
+ SetAttrValue(op_names, &(*attr)["op_names"]);
+
+ ctx().node_map->AddNode(composition_node->name(), composition_node);
+ ctx().node_map->AddOutput(NodeName(last_op->input(0)),
+ composition_node->name());
+
+ *simplified_node_name = composition_node->name();
+
+ return Status::OK();
+ }
+
+ private:
+ bool CanOptimize(const NodeDef& node) const {
+ DataType dtype = GetDataTypeFromAttr(node, "T");
+ if (!IsSupported(node.op(), dtype)) {
+ return false;
+ }
+ if (IsInPreserveSet(node)) {
+ return false;
+ }
+ if (!NodeIsOnCpu(node)) {
+ return false;
+ }
+ if (NodeIsAlreadyFused(node)) {
+ return false;
+ }
+ return !(IsDrivenByControlDependency(node) ||
+ DrivesControlDependency(node));
+ }
+
+ // UnaryOpsComposition is defined only for CPU.
+ bool NodeIsOnCpu(const NodeDef& node) const {
+ using str_util::StartsWith;
+
+ string task;
+ string device;
+
+ return DeviceNameUtils::SplitDeviceName(node.device(), &task, &device) &&
+ StartsWith(device, DEVICE_CPU);
+ }
+
+ bool NodeIsAlreadyFused(const NodeDef& node) const {
+ return fused_nodes_.count(node.name()) > 0;
+ }
+
+ string OptimizedNodeName(const NodeDef& node) const {
+ return strings::StrCat(node.name(), "/unary_ops_composition");
+ }
+
+ void AddToFusedNodes(const string& name) { fused_nodes_.insert(name); }
+
+ // Check if an op is supported by the _UnaryOpsComposition for the given type.
+ bool IsSupported(const string& op_name, DataType dtype) const {
+ const auto it = supported_ops_.find(op_name);
+ return it != supported_ops_.end() && it->second.count(dtype) > 0;
+ }
+
+ std::unordered_map<string, std::set<DataType>> supported_ops_;
+ std::unordered_set<string> fused_nodes_;
+};
+
} // namespace
class UniqueNodes {
@@ -2928,6 +3094,8 @@ Status ArithmeticOptimizer::SimplifyArithmeticOps(bool can_use_shapes) {
pipeline.AddStage<ConvertLog1pStage>(ctx, ctx_ext);
if (options_.optimize_max_or_min_of_monotonic)
pipeline.AddStage<OptimizeMaxOrMinOfMonotonicStage>(ctx, ctx_ext);
+ if (options_.unary_ops_composition)
+ pipeline.AddStage<UnaryOpsComposition>(ctx, ctx_ext);
VLOG(1) << "Run " << pipeline.NumStages() << " arithmetic optimizer stages: "
<< str_util::Join(pipeline.StageNames(), ", ");
diff --git a/tensorflow/core/grappler/optimizers/arithmetic_optimizer.h b/tensorflow/core/grappler/optimizers/arithmetic_optimizer.h
index 824ef35ef6..00c02d19bd 100644
--- a/tensorflow/core/grappler/optimizers/arithmetic_optimizer.h
+++ b/tensorflow/core/grappler/optimizers/arithmetic_optimizer.h
@@ -77,6 +77,7 @@ class ArithmeticOptimizer : public GraphOptimizer {
bool simplify_aggregation = true;
bool convert_pow = true;
bool convert_log1p = true;
+ bool unary_ops_composition = true;
// Choose which arithmetic optimizer stages will be enabled for a given
// optimization level by default.
diff --git a/tensorflow/core/grappler/optimizers/arithmetic_optimizer_test.cc b/tensorflow/core/grappler/optimizers/arithmetic_optimizer_test.cc
index d0e6b04679..c387b00303 100644
--- a/tensorflow/core/grappler/optimizers/arithmetic_optimizer_test.cc
+++ b/tensorflow/core/grappler/optimizers/arithmetic_optimizer_test.cc
@@ -141,6 +141,9 @@ class ArithmeticOptimizerTest : public GrapplerTest {
options.dedup_computations = false;
options.combine_add_to_addn = false;
options.convert_sqrt_div_to_rsqrt_mul = false;
+ options.convert_pow = false;
+ options.convert_log1p = false;
+ options.optimize_max_or_min_of_monotonic = false;
options.fold_conjugate_into_transpose = false;
options.fold_multiply_into_conv = false;
options.fold_transpose_into_matmul = false;
@@ -158,6 +161,7 @@ class ArithmeticOptimizerTest : public GrapplerTest {
options.reorder_cast_and_transpose = false;
options.replace_mul_with_square = false;
options.simplify_aggregation = false;
+ options.unary_ops_composition = false;
optimizer->options_ = options;
}
@@ -274,6 +278,11 @@ class ArithmeticOptimizerTest : public GrapplerTest {
DisableAllStages(optimizer);
optimizer->options_.optimize_max_or_min_of_monotonic = true;
}
+
+ void EnableOnlyUnaryOpsComposition(ArithmeticOptimizer* optimizer) {
+ DisableAllStages(optimizer);
+ optimizer->options_.unary_ops_composition = true;
+ }
};
TEST_F(ArithmeticOptimizerTest, NoOp) {
@@ -3159,5 +3168,62 @@ TEST_F(ArithmeticOptimizerTest, OptimizeMaxOrMinOfMonotonicElementWise) {
EXPECT_EQ(2, required_node_count);
}
+TEST_F(ArithmeticOptimizerTest, UnaryOpsComposition) {
+ tensorflow::Scope s = tensorflow::Scope::NewRootScope();
+
+ auto x = ops::Const(s.WithOpName("x"), {1.0f, 2.0f}, {1, 2});
+ Output sqrt = ops::Sqrt(s.WithOpName("sqrt"), x);
+ Output log = ops::Log(s.WithOpName("log"), sqrt);
+ Output relu = ops::Relu(s.WithOpName("relu"), log);
+ Output final_out = ops::Identity(s.WithOpName("final_out"), relu);
+
+ GrapplerItem item;
+ item.fetch = {"final_out"};
+ TF_CHECK_OK(s.ToGraphDef(&item.graph));
+
+ // Place all nodes on CPU.
+ for (int i = 0; i < item.graph.node_size(); ++i) {
+ item.graph.mutable_node(i)->set_device("/device:CPU:0");
+ }
+
+ auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
+ EXPECT_EQ(1, tensors_expected.size());
+
+ GraphDef output;
+ ArithmeticOptimizer optimizer;
+ EnableOnlyUnaryOpsComposition(&optimizer);
+ OptimizeAndPrune(&optimizer, &item, &output);
+
+ EXPECT_EQ(3, output.node_size());
+
+ // Check that Sqrt/Log/Relu were replaced with a single op.
+ int required_node_count = 0;
+ for (int i = 0; i < output.node_size(); ++i) {
+ const NodeDef& node = output.node(i);
+ if (node.name() == "final_out") {
+ EXPECT_EQ("Identity", node.op());
+ EXPECT_EQ(1, node.input_size());
+ EXPECT_EQ("relu/unary_ops_composition", node.input(0));
+ ++required_node_count;
+ } else if (node.name() == "relu/unary_ops_composition") {
+ EXPECT_EQ("_UnaryOpsComposition", node.op());
+ EXPECT_EQ(1, node.input_size());
+ EXPECT_EQ("x", node.input(0));
+
+ auto op_names = node.attr().at("op_names").list().s();
+ EXPECT_EQ(3, op_names.size());
+ EXPECT_EQ("Sqrt", op_names[0]);
+ EXPECT_EQ("Log", op_names[1]);
+ EXPECT_EQ("Relu", op_names[2]);
+ ++required_node_count;
+ }
+ }
+ EXPECT_EQ(2, required_node_count);
+
+ auto tensors = EvaluateNodes(output, item.fetch);
+ EXPECT_EQ(1, tensors.size());
+ test::ExpectTensorNear<float>(tensors_expected[0], tensors[0], 1e-6);
+}
+
} // namespace grappler
} // namespace tensorflow
diff --git a/tensorflow/core/grappler/optimizers/data/BUILD b/tensorflow/core/grappler/optimizers/data/BUILD
index 08fc9d84da..3cb9d4d61c 100644
--- a/tensorflow/core/grappler/optimizers/data/BUILD
+++ b/tensorflow/core/grappler/optimizers/data/BUILD
@@ -4,6 +4,39 @@ load("//tensorflow:tensorflow.bzl", "tf_cc_test")
load("//tensorflow/core:platform/default/build_config.bzl", "tf_protos_all")
cc_library(
+ name = "function_rename",
+ srcs = ["function_rename.cc"],
+ hdrs = [
+ "function_rename.h",
+ ],
+ visibility = ["//visibility:public"],
+ deps = [
+ ":graph_utils",
+ "//tensorflow/core:lib",
+ "//tensorflow/core/grappler:graph_view",
+ "//tensorflow/core/grappler:grappler_item",
+ "//tensorflow/core/grappler:op_types",
+ "//tensorflow/core/grappler:utils",
+ "//tensorflow/core/grappler/clusters:cluster",
+ "//tensorflow/core/grappler/optimizers:custom_graph_optimizer",
+ "//tensorflow/core/grappler/optimizers:custom_graph_optimizer_registry",
+ ] + tf_protos_all(),
+)
+
+tf_cc_test(
+ name = "function_rename_test",
+ srcs = ["function_rename_test.cc"],
+ visibility = ["//visibility:public"],
+ deps = [
+ ":function_rename",
+ "//tensorflow/core:framework",
+ "//tensorflow/core:test",
+ "//tensorflow/core:test_main",
+ "//tensorflow/core/grappler:grappler_item",
+ ] + tf_protos_all(),
+)
+
+cc_library(
name = "graph_utils",
srcs = ["graph_utils.cc"],
hdrs = [
@@ -68,6 +101,40 @@ tf_cc_test(
)
cc_library(
+ name = "noop_elimination",
+ srcs = ["noop_elimination.cc"],
+ hdrs = [
+ "noop_elimination.h",
+ ],
+ visibility = ["//visibility:public"],
+ deps = [
+ ":graph_utils",
+ "//tensorflow/core:lib",
+ "//tensorflow/core/grappler:graph_view",
+ "//tensorflow/core/grappler:grappler_item",
+ "//tensorflow/core/grappler:op_types",
+ "//tensorflow/core/grappler:utils",
+ "//tensorflow/core/grappler/clusters:cluster",
+ "//tensorflow/core/grappler/optimizers:custom_graph_optimizer",
+ "//tensorflow/core/grappler/optimizers:custom_graph_optimizer_registry",
+ ] + tf_protos_all(),
+)
+
+tf_cc_test(
+ name = "noop_elimination_test",
+ srcs = ["noop_elimination_test.cc"],
+ visibility = ["//visibility:public"],
+ deps = [
+ ":graph_utils",
+ ":noop_elimination",
+ "//tensorflow/core:framework",
+ "//tensorflow/core:test",
+ "//tensorflow/core:test_main",
+ "//tensorflow/core/grappler:grappler_item",
+ ],
+)
+
+cc_library(
name = "shuffle_and_repeat_fusion",
srcs = ["shuffle_and_repeat_fusion.cc"],
hdrs = [
@@ -105,7 +172,9 @@ cc_library(
name = "data",
visibility = ["//visibility:public"],
deps = [
+ ":function_rename",
":map_and_batch_fusion",
+ ":noop_elimination",
":shuffle_and_repeat_fusion",
],
alwayslink = 1,
diff --git a/tensorflow/core/grappler/optimizers/data/function_rename.cc b/tensorflow/core/grappler/optimizers/data/function_rename.cc
new file mode 100644
index 0000000000..8cf044d1bd
--- /dev/null
+++ b/tensorflow/core/grappler/optimizers/data/function_rename.cc
@@ -0,0 +1,51 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#include "tensorflow/core/grappler/optimizers/data/function_rename.h"
+
+#include "tensorflow/core/grappler/clusters/cluster.h"
+#include "tensorflow/core/grappler/graph_view.h"
+#include "tensorflow/core/grappler/grappler_item.h"
+#include "tensorflow/core/grappler/op_types.h"
+#include "tensorflow/core/grappler/optimizers/custom_graph_optimizer_registry.h"
+#include "tensorflow/core/grappler/optimizers/data/graph_utils.h"
+#include "tensorflow/core/grappler/utils.h"
+#include "tensorflow/core/platform/protobuf.h"
+
+namespace tensorflow {
+namespace grappler {
+
+Status FunctionRename::Optimize(Cluster* cluster, const GrapplerItem& item,
+ GraphDef* output) {
+ *output = item.graph;
+ GraphView graph(output);
+ int n = output->mutable_library()->function_size();
+ for (int i = 0; i < n; ++i) {
+ FunctionDef* fn = output->mutable_library()->mutable_function(i);
+ fn->mutable_signature()->set_name(fn->signature().name() + "world");
+ }
+
+ return Status::OK();
+}
+
+void FunctionRename::Feedback(Cluster* cluster, const GrapplerItem& item,
+ const GraphDef& optimize_output, double result) {
+ // no-op
+}
+
+REGISTER_GRAPH_OPTIMIZER_AS(FunctionRename, "_test_only_function_rename");
+
+} // end namespace grappler
+} // end namespace tensorflow
diff --git a/tensorflow/core/grappler/optimizers/data/function_rename.h b/tensorflow/core/grappler/optimizers/data/function_rename.h
new file mode 100644
index 0000000000..23ad9470ff
--- /dev/null
+++ b/tensorflow/core/grappler/optimizers/data/function_rename.h
@@ -0,0 +1,46 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#ifndef TENSORFLOW_CORE_GRAPPLER_OPTIMIZERS_DATA_FUNCTION_RENAME_H_
+#define TENSORFLOW_CORE_GRAPPLER_OPTIMIZERS_DATA_FUNCTION_RENAME_H_
+
+#include "tensorflow/core/grappler/optimizers/custom_graph_optimizer.h"
+
+namespace tensorflow {
+namespace grappler {
+
+class FunctionRename : public CustomGraphOptimizer {
+ public:
+ FunctionRename() = default;
+ ~FunctionRename() override = default;
+
+ string name() const override { return "_test_only_function_rename"; };
+
+ Status Init(
+ const tensorflow::RewriterConfig_CustomGraphOptimizer* config) override {
+ return Status::OK();
+ }
+
+ Status Optimize(Cluster* cluster, const GrapplerItem& item,
+ GraphDef* output) override;
+
+ void Feedback(Cluster* cluster, const GrapplerItem& item,
+ const GraphDef& optimize_output, double result) override;
+};
+
+} // end namespace grappler
+} // end namespace tensorflow
+
+#endif // TENSORFLOW_CORE_GRAPPLER_OPTIMIZERS_DATA_FUNCTION_RENAME_H_
diff --git a/tensorflow/core/grappler/optimizers/data/function_rename_test.cc b/tensorflow/core/grappler/optimizers/data/function_rename_test.cc
new file mode 100644
index 0000000000..56b8a960a7
--- /dev/null
+++ b/tensorflow/core/grappler/optimizers/data/function_rename_test.cc
@@ -0,0 +1,42 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#include "tensorflow/core/grappler/optimizers/data/function_rename.h"
+
+#include "tensorflow/core/framework/function.pb.h"
+#include "tensorflow/core/framework/op_def.pb.h"
+#include "tensorflow/core/grappler/grappler_item.h"
+#include "tensorflow/core/lib/core/status_test_util.h"
+#include "tensorflow/core/platform/test.h"
+
+namespace tensorflow {
+namespace grappler {
+namespace {
+
+TEST(FunctionRenameTest, RenameFunction) {
+ GrapplerItem item;
+ GraphDef *graph = &item.graph;
+ FunctionDef *fn = graph->mutable_library()->add_function();
+ fn->mutable_signature()->set_name("hello");
+
+ FunctionRename optimizer;
+ GraphDef output;
+ TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
+ EXPECT_EQ(output.library().function(0).signature().name(), "helloworld");
+}
+
+} // namespace
+} // namespace grappler
+} // namespace tensorflow
diff --git a/tensorflow/core/grappler/optimizers/data/graph_utils.cc b/tensorflow/core/grappler/optimizers/data/graph_utils.cc
index aece142f7a..b5b46ccafe 100644
--- a/tensorflow/core/grappler/optimizers/data/graph_utils.cc
+++ b/tensorflow/core/grappler/optimizers/data/graph_utils.cc
@@ -221,6 +221,14 @@ void SetUniqueName(const string& op, GraphDef* graph, NodeDef* node) {
node->set_name(strings::StrCat(op, "/_", id));
}
+void ReplaceInput(const NodeDef& old_input, const NodeDef& new_input,
+ GraphView* graph) {
+ GraphView::OutputPort output_port = graph->GetOutputPort(old_input.name(), 0);
+ auto fanout = graph->GetFanout(output_port);
+ for (auto& input_port : fanout)
+ input_port.node->set_input(0, new_input.name());
+}
+
} // end namespace graph_utils
} // end namespace grappler
} // end namespace tensorflow
diff --git a/tensorflow/core/grappler/optimizers/data/graph_utils.h b/tensorflow/core/grappler/optimizers/data/graph_utils.h
index 3d2467031f..1cb0f0c81d 100644
--- a/tensorflow/core/grappler/optimizers/data/graph_utils.h
+++ b/tensorflow/core/grappler/optimizers/data/graph_utils.h
@@ -22,6 +22,7 @@ limitations under the License.
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/framework/types.h"
+#include "tensorflow/core/grappler/graph_view.h"
#include "tensorflow/core/grappler/utils.h"
#include "tensorflow/core/lib/core/errors.h"
@@ -78,6 +79,10 @@ int FindNodeWithOp(const string& op, const GraphDef& graph);
// is unique across the graph.
void SetUniqueName(const string& op, GraphDef* graph, NodeDef* node);
+// Replaces the input for the output nodes of 'old_input' with 'new_input'.
+void ReplaceInput(const NodeDef& old_input, const NodeDef& new_input,
+ GraphView* graph);
+
} // end namespace graph_utils
} // end namespace grappler
} // end namespace tensorflow
diff --git a/tensorflow/core/grappler/optimizers/data/graph_utils_test.cc b/tensorflow/core/grappler/optimizers/data/graph_utils_test.cc
index 00f66c9bc1..d723d73b7a 100644
--- a/tensorflow/core/grappler/optimizers/data/graph_utils_test.cc
+++ b/tensorflow/core/grappler/optimizers/data/graph_utils_test.cc
@@ -23,9 +23,7 @@ namespace grappler {
namespace graph_utils {
namespace {
-class GraphUtilsTest : public ::testing::Test {};
-
-TEST_F(GraphUtilsTest, AddScalarConstNodeBool) {
+TEST(GraphUtilsTest, AddScalarConstNodeBool) {
GraphDef graph;
NodeDef* bool_node;
TF_EXPECT_OK(AddScalarConstNode<bool>(true, &graph, &bool_node));
@@ -33,7 +31,7 @@ TEST_F(GraphUtilsTest, AddScalarConstNodeBool) {
EXPECT_EQ(bool_node->attr().at("value").tensor().bool_val(0), true);
}
-TEST_F(GraphUtilsTest, AddScalarConstNodeDouble) {
+TEST(GraphUtilsTest, AddScalarConstNodeDouble) {
GraphDef graph;
NodeDef* double_node;
TF_EXPECT_OK(AddScalarConstNode<double>(3.14, &graph, &double_node));
@@ -41,7 +39,7 @@ TEST_F(GraphUtilsTest, AddScalarConstNodeDouble) {
EXPECT_FLOAT_EQ(double_node->attr().at("value").tensor().double_val(0), 3.14);
}
-TEST_F(GraphUtilsTest, AddScalarConstNodeFloat) {
+TEST(GraphUtilsTest, AddScalarConstNodeFloat) {
GraphDef graph;
NodeDef* float_node;
TF_EXPECT_OK(AddScalarConstNode<float>(3.14, &graph, &float_node));
@@ -49,7 +47,7 @@ TEST_F(GraphUtilsTest, AddScalarConstNodeFloat) {
EXPECT_FLOAT_EQ(float_node->attr().at("value").tensor().float_val(0), 3.14);
}
-TEST_F(GraphUtilsTest, AddScalarConstNodeInt) {
+TEST(GraphUtilsTest, AddScalarConstNodeInt) {
GraphDef graph;
NodeDef* int_node;
TF_EXPECT_OK(AddScalarConstNode<int>(42, &graph, &int_node));
@@ -57,7 +55,7 @@ TEST_F(GraphUtilsTest, AddScalarConstNodeInt) {
EXPECT_EQ(int_node->attr().at("value").tensor().int_val(0), 42);
}
-TEST_F(GraphUtilsTest, AddScalarConstNodeInt64) {
+TEST(GraphUtilsTest, AddScalarConstNodeInt64) {
GraphDef graph;
NodeDef* int64_node;
TF_EXPECT_OK(AddScalarConstNode<int64>(42, &graph, &int64_node));
@@ -65,7 +63,7 @@ TEST_F(GraphUtilsTest, AddScalarConstNodeInt64) {
EXPECT_EQ(int64_node->attr().at("value").tensor().int64_val(0), 42);
}
-TEST_F(GraphUtilsTest, AddScalarConstNodeString) {
+TEST(GraphUtilsTest, AddScalarConstNodeString) {
GraphDef graph;
NodeDef* string_node;
TF_EXPECT_OK(AddScalarConstNode<StringPiece>("hello", &graph, &string_node));
@@ -73,7 +71,7 @@ TEST_F(GraphUtilsTest, AddScalarConstNodeString) {
EXPECT_EQ(string_node->attr().at("value").tensor().string_val(0), "hello");
}
-TEST_F(GraphUtilsTest, Compare) {
+TEST(GraphUtilsTest, Compare) {
GraphDef graphA;
GraphDef graphB;
EXPECT_TRUE(Compare(graphA, graphB));
@@ -88,7 +86,7 @@ TEST_F(GraphUtilsTest, Compare) {
EXPECT_TRUE(Compare(graphA, graphB));
}
-TEST_F(GraphUtilsTest, ContainsNodeWithName) {
+TEST(GraphUtilsTest, ContainsNodeWithName) {
GraphDef graph;
EXPECT_TRUE(!ContainsNodeWithName("A", graph));
@@ -100,7 +98,7 @@ TEST_F(GraphUtilsTest, ContainsNodeWithName) {
EXPECT_TRUE(!ContainsNodeWithName("A", graph));
}
-TEST_F(GraphUtilsTest, ContainsNodeWithOp) {
+TEST(GraphUtilsTest, ContainsNodeWithOp) {
GraphDef graph;
EXPECT_TRUE(!ContainsNodeWithOp("OpA", graph));
@@ -112,7 +110,7 @@ TEST_F(GraphUtilsTest, ContainsNodeWithOp) {
EXPECT_TRUE(!ContainsNodeWithOp("OpA", graph));
}
-TEST_F(GraphUtilsTest, FindNodeWithName) {
+TEST(GraphUtilsTest, FindNodeWithName) {
GraphDef graph;
EXPECT_EQ(FindNodeWithName("A", graph), -1);
@@ -124,7 +122,7 @@ TEST_F(GraphUtilsTest, FindNodeWithName) {
EXPECT_EQ(FindNodeWithName("A", graph), -1);
}
-TEST_F(GraphUtilsTest, FindNodeWithOp) {
+TEST(GraphUtilsTest, FindNodeWithOp) {
GraphDef graph;
EXPECT_EQ(FindNodeWithOp("OpA", graph), -1);
@@ -136,7 +134,7 @@ TEST_F(GraphUtilsTest, FindNodeWithOp) {
EXPECT_EQ(FindNodeWithOp("OpA", graph), -1);
}
-TEST_F(GraphUtilsTest, SetUniqueName) {
+TEST(GraphUtilsTest, SetUniqueName) {
GraphDef graph;
NodeDef* node1;
@@ -151,6 +149,26 @@ TEST_F(GraphUtilsTest, SetUniqueName) {
EXPECT_NE(node2->name(), node3->name());
}
+TEST(GraphUtilsTest, ReplaceInput) {
+ GraphDef graph;
+
+ NodeDef* node1;
+ TF_EXPECT_OK(AddNode("", "A", {}, {}, &graph, &node1));
+
+ NodeDef* node2;
+ TF_EXPECT_OK(AddNode("", "A", {node1->name()}, {}, &graph, &node2));
+
+ NodeDef* node3;
+ TF_EXPECT_OK(AddNode("", "A", {node2->name()}, {}, &graph, &node3));
+
+ EXPECT_EQ(node3->input(0), node2->name());
+
+ GraphView view(&graph);
+ ReplaceInput(*node2, *node1, &view);
+
+ EXPECT_EQ(node3->input(0), node1->name());
+}
+
} // namespace
} // namespace graph_utils
} // namespace grappler
diff --git a/tensorflow/core/grappler/optimizers/data/map_and_batch_fusion.cc b/tensorflow/core/grappler/optimizers/data/map_and_batch_fusion.cc
index 1e8cbb9784..eac665bd92 100644
--- a/tensorflow/core/grappler/optimizers/data/map_and_batch_fusion.cc
+++ b/tensorflow/core/grappler/optimizers/data/map_and_batch_fusion.cc
@@ -115,15 +115,7 @@ Status MapAndBatchFusion::Optimize(Cluster* cluster, const GrapplerItem& item,
nodes_to_delete.insert(map_node->name());
nodes_to_delete.insert(batch_node.name());
- // Update the input of the outputs of the `Batch` node to use
- // `MapAndBatch`.
- GraphView::OutputPort output_port =
- graph.GetOutputPort(batch_node.name(), 0);
- auto fanout = graph.GetFanout(output_port);
- for (auto it = fanout.begin(); it != fanout.end(); ++it) {
- NodeDef* node = it->node;
- node->set_input(0, new_node->name());
- }
+ graph_utils::ReplaceInput(batch_node, *new_node, &graph);
}
TF_RETURN_IF_ERROR(graph_utils::DeleteNodes(nodes_to_delete, output));
return Status::OK();
diff --git a/tensorflow/core/grappler/optimizers/data/noop_elimination.cc b/tensorflow/core/grappler/optimizers/data/noop_elimination.cc
new file mode 100644
index 0000000000..5670966367
--- /dev/null
+++ b/tensorflow/core/grappler/optimizers/data/noop_elimination.cc
@@ -0,0 +1,90 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#include "tensorflow/core/grappler/optimizers/data/noop_elimination.h"
+
+#include "tensorflow/core/framework/attr_value.pb.h"
+#include "tensorflow/core/framework/node_def.pb.h"
+#include "tensorflow/core/grappler/clusters/cluster.h"
+#include "tensorflow/core/grappler/graph_view.h"
+#include "tensorflow/core/grappler/grappler_item.h"
+#include "tensorflow/core/grappler/op_types.h"
+#include "tensorflow/core/grappler/optimizers/custom_graph_optimizer_registry.h"
+#include "tensorflow/core/grappler/optimizers/data/graph_utils.h"
+#include "tensorflow/core/grappler/utils.h"
+#include "tensorflow/core/platform/protobuf.h"
+
+namespace tensorflow {
+namespace grappler {
+namespace {
+
+bool IsTakeAll(const NodeDef& take_node, const GraphView& graph) {
+ if (take_node.op() != "TakeDataset") return false;
+
+ const NodeDef& count_node = *graph.GetNode(take_node.input(1));
+ // We are looking only for 'take' with negative count.
+ return count_node.attr().at("value").tensor().int64_val(0) < 0;
+}
+
+bool IsSkipNone(const NodeDef& skip_node, const GraphView& graph) {
+ if (skip_node.op() != "SkipDataset") return false;
+
+ const NodeDef& count_node = *graph.GetNode(skip_node.input(1));
+ // We are looking only for skip(0) nodes.
+ return count_node.attr().at("value").tensor().int64_val(0) == 0;
+}
+
+bool IsRepeatOne(const NodeDef& repeat_node, const GraphView& graph) {
+ if (repeat_node.op() != "RepeatDataset") return false;
+
+ const NodeDef& count_node = *graph.GetNode(repeat_node.input(1));
+ // We are looking only for repeat(1) nodes.
+ return count_node.attr().at("value").tensor().int64_val(0) == 1;
+}
+
+bool IsNoOp(const NodeDef& node, const GraphView& graph) {
+ return IsTakeAll(node, graph) || IsSkipNone(node, graph) ||
+ IsRepeatOne(node, graph);
+}
+
+} // namespace
+
+Status NoOpElimination::Optimize(Cluster* cluster, const GrapplerItem& item,
+ GraphDef* output) {
+ *output = item.graph;
+ GraphView graph(output);
+ std::set<string> nodes_to_delete;
+ for (const NodeDef& node : item.graph.node()) {
+ if (!IsNoOp(node, graph)) continue;
+
+ GraphView::InputPort input_port = graph.GetInputPort(node.name(), 0);
+ NodeDef* const parent = graph.GetRegularFanin(input_port).node;
+ graph_utils::ReplaceInput(node, *parent, &graph);
+
+ nodes_to_delete.insert(node.name());
+ }
+ TF_RETURN_IF_ERROR(graph_utils::DeleteNodes(nodes_to_delete, output));
+ return Status::OK();
+}
+
+void NoOpElimination::Feedback(Cluster* cluster, const GrapplerItem& item,
+ const GraphDef& optimize_output, double result) {
+ // no-op
+}
+
+REGISTER_GRAPH_OPTIMIZER_AS(NoOpElimination, "noop_elimination");
+
+} // end namespace grappler
+} // end namespace tensorflow
diff --git a/tensorflow/core/grappler/optimizers/data/noop_elimination.h b/tensorflow/core/grappler/optimizers/data/noop_elimination.h
new file mode 100644
index 0000000000..c67cea49d5
--- /dev/null
+++ b/tensorflow/core/grappler/optimizers/data/noop_elimination.h
@@ -0,0 +1,48 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#ifndef TENSORFLOW_CORE_GRAPPLER_OPTIMIZERS_DATA_NOOP_ELIMINATION_H_
+#define TENSORFLOW_CORE_GRAPPLER_OPTIMIZERS_DATA_NOOP_ELIMINATION_H_
+
+#include "tensorflow/core/grappler/optimizers/custom_graph_optimizer.h"
+
+namespace tensorflow {
+namespace grappler {
+
+// This class eliminates tf.data transformations such as `take(n)` (for n < 0),
+// `skip(0)`, or `repeat(1)`
+class NoOpElimination : public CustomGraphOptimizer {
+ public:
+ NoOpElimination() = default;
+ ~NoOpElimination() override = default;
+
+ string name() const override { return "noop_elimination"; };
+
+ Status Init(
+ const tensorflow::RewriterConfig_CustomGraphOptimizer* config) override {
+ return Status::OK();
+ }
+
+ Status Optimize(Cluster* cluster, const GrapplerItem& item,
+ GraphDef* output) override;
+
+ void Feedback(Cluster* cluster, const GrapplerItem& item,
+ const GraphDef& optimize_output, double result) override;
+};
+
+} // end namespace grappler
+} // end namespace tensorflow
+
+#endif // TENSORFLOW_CORE_GRAPPLER_OPTIMIZERS_DATA_NOOP_ELIMINATION_H_
diff --git a/tensorflow/core/grappler/optimizers/data/noop_elimination_test.cc b/tensorflow/core/grappler/optimizers/data/noop_elimination_test.cc
new file mode 100644
index 0000000000..8628b16ea5
--- /dev/null
+++ b/tensorflow/core/grappler/optimizers/data/noop_elimination_test.cc
@@ -0,0 +1,217 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#include "tensorflow/core/grappler/optimizers/data/noop_elimination.h"
+#include <tuple>
+#include "tensorflow/core/framework/attr_value_util.h"
+#include "tensorflow/core/grappler/grappler_item.h"
+#include "tensorflow/core/grappler/optimizers/data/graph_utils.h"
+#include "tensorflow/core/lib/core/status_test_util.h"
+#include "tensorflow/core/platform/test.h"
+
+namespace tensorflow {
+namespace grappler {
+namespace {
+
+std::vector<std::pair<string, AttrValue>> GetCommonAttributes() {
+ AttrValue shapes_attr, types_attr;
+ SetAttrValue("output_shapes", &shapes_attr);
+ SetAttrValue("output_types", &types_attr);
+ std::vector<std::pair<string, AttrValue>> commonAttributes = {
+ {"output_shapes", shapes_attr}, {"output_types", types_attr}};
+
+ return commonAttributes;
+}
+
+void MakeUnaryNode(GraphDef *graph, const std::string &node_type, int count,
+ string input_node, NodeDef **return_node) {
+ NodeDef *node_count;
+ TF_ASSERT_OK(
+ graph_utils::AddScalarConstNode<int64>(count, graph, &node_count));
+ TF_ASSERT_OK(graph_utils::AddNode("", node_type,
+ {std::move(input_node), node_count->name()},
+ GetCommonAttributes(), graph, return_node));
+}
+
+void MakeCacheNode(GraphDef *graph, string input_node, NodeDef **return_node) {
+ NodeDef *node_filename;
+ TF_ASSERT_OK(
+ graph_utils::AddScalarConstNode<StringPiece>("", graph, &node_filename));
+ TF_ASSERT_OK(graph_utils::AddNode(
+ "", "CacheDataset", {std::move(input_node), node_filename->name()},
+ GetCommonAttributes(), graph, return_node));
+}
+
+void MakeRangeNode(GraphDef *graph, NodeDef **range_node) {
+ NodeDef *start_node, *stop_node, *step_node;
+ TF_ASSERT_OK(graph_utils::AddScalarConstNode<int64>(0, graph, &start_node));
+ TF_ASSERT_OK(graph_utils::AddScalarConstNode<int64>(10, graph, &stop_node));
+ TF_ASSERT_OK(graph_utils::AddScalarConstNode<int64>(1, graph, &step_node));
+
+ std::vector<string> range_inputs = {start_node->name(), stop_node->name(),
+ step_node->name()};
+
+ TF_ASSERT_OK(graph_utils::AddNode("", "RangeDataset", range_inputs,
+ GetCommonAttributes(), graph, range_node));
+}
+
+struct NoOpLastEliminationTest
+ : ::testing::TestWithParam<std::tuple<std::string, int, bool>> {};
+
+// This test checks whether the no-op elimination correctly handles
+// transformations at the end of the pipeline.
+TEST_P(NoOpLastEliminationTest, EliminateLastNoOpNode) {
+ GrapplerItem item;
+ GraphDef *graph = &item.graph;
+
+ const std::string &node_type = std::get<0>(GetParam());
+ const int node_count = std::get<1>(GetParam());
+ const bool should_keep_node = std::get<2>(GetParam());
+
+ NodeDef *range_node;
+ MakeRangeNode(graph, &range_node);
+
+ NodeDef *node;
+ MakeUnaryNode(graph, node_type, node_count, range_node->name(), &node);
+
+ NoOpElimination optimizer;
+ GraphDef output;
+ TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
+
+ EXPECT_EQ(graph_utils::ContainsNodeWithName(node->name(), output),
+ should_keep_node);
+}
+
+INSTANTIATE_TEST_CASE_P(
+ BasicRemovalTest, NoOpLastEliminationTest,
+ ::testing::Values(std::make_tuple("TakeDataset", -3, false),
+ std::make_tuple("TakeDataset", -1, false),
+ std::make_tuple("TakeDataset", 0, true),
+ std::make_tuple("TakeDataset", 3, true),
+ std::make_tuple("SkipDataset", -1, true),
+ std::make_tuple("SkipDataset", 0, false),
+ std::make_tuple("SkipDataset", 3, true),
+ std::make_tuple("RepeatDataset", 1, false),
+ std::make_tuple("RepeatDataset", 2, true)));
+
+struct NoOpMiddleEliminationTest
+ : ::testing::TestWithParam<std::tuple<std::string, int, bool>> {};
+
+// This test checks whether the no-op elimination correctly handles
+// transformations int the middle of the pipeline.
+TEST_P(NoOpMiddleEliminationTest, EliminateMiddleNoOpNode) {
+ GrapplerItem item;
+ GraphDef *graph = &item.graph;
+
+ const std::string &node_type = std::get<0>(GetParam());
+ const int node_count = std::get<1>(GetParam());
+ const bool should_keep_node = std::get<2>(GetParam());
+
+ NodeDef *range_node;
+ MakeRangeNode(graph, &range_node);
+
+ NodeDef *node;
+ MakeUnaryNode(graph, node_type, node_count, range_node->name(), &node);
+
+ NodeDef *cache_node;
+ MakeCacheNode(graph, node->name(), &cache_node);
+ NoOpElimination optimizer;
+ GraphDef output;
+ TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
+
+ EXPECT_EQ(graph_utils::ContainsNodeWithName(node->name(), output),
+ should_keep_node);
+ EXPECT_TRUE(graph_utils::ContainsNodeWithName(cache_node->name(), output));
+
+ NodeDef cache_node_out =
+ output.node(graph_utils::FindNodeWithName(cache_node->name(), output));
+
+ EXPECT_EQ(cache_node_out.input_size(), 2);
+ auto last_node_input = (should_keep_node ? node : range_node)->name();
+ EXPECT_EQ(cache_node_out.input(0), last_node_input);
+}
+
+INSTANTIATE_TEST_CASE_P(
+ BasicRemovalTest, NoOpMiddleEliminationTest,
+ ::testing::Values(std::make_tuple("TakeDataset", -1, false),
+ std::make_tuple("TakeDataset", -3, false),
+ std::make_tuple("TakeDataset", 0, true),
+ std::make_tuple("TakeDataset", 3, true),
+ std::make_tuple("SkipDataset", -1, true),
+ std::make_tuple("SkipDataset", 0, false),
+ std::make_tuple("SkipDataset", 3, true),
+ std::make_tuple("RepeatDataset", 1, false),
+ std::make_tuple("RepeatDataset", 2, true)));
+
+using NodesTypes = std::tuple<std::pair<string, int>, std::pair<string, int>>;
+struct NoOpMultipleEliminationTest : ::testing::TestWithParam<NodesTypes> {};
+
+// This test checks whether the no-op elimination correctly removes
+// multiple noop nodes.
+TEST_P(NoOpMultipleEliminationTest, EliminateMultipleNoOpNode) {
+ GrapplerItem item;
+ GraphDef *graph = &item.graph;
+
+ static_assert(std::tuple_size<NodesTypes>::value == 2,
+ "Make sure to include everything in the test");
+ const std::vector<std::pair<string, int>> noop_nodes = {
+ std::get<0>(GetParam()), std::get<1>(GetParam())};
+
+ NodeDef *range_node;
+ MakeRangeNode(graph, &range_node);
+
+ NodeDef *previous = range_node;
+ std::vector<string> nodes_to_remove;
+ nodes_to_remove.reserve(noop_nodes.size());
+
+ for (const auto &noop_node : noop_nodes) {
+ NodeDef *node;
+ MakeUnaryNode(graph, noop_node.first, noop_node.second, previous->name(),
+ &node);
+ nodes_to_remove.push_back(node->name());
+ previous = node;
+ }
+
+ NodeDef *cache_node;
+ MakeCacheNode(graph, previous->name(), &cache_node);
+ NoOpElimination optimizer;
+ GraphDef output;
+ TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
+
+ for (const auto &noop_node_name : nodes_to_remove)
+ EXPECT_FALSE(graph_utils::ContainsNodeWithName(noop_node_name, output));
+
+ EXPECT_TRUE(graph_utils::ContainsNodeWithName(cache_node->name(), output));
+
+ NodeDef cache_node_out =
+ output.node(graph_utils::FindNodeWithName(cache_node->name(), output));
+
+ EXPECT_EQ(cache_node_out.input_size(), 2);
+ EXPECT_EQ(cache_node_out.input(0), range_node->name());
+}
+
+const auto *const kTakeNode = new std::pair<string, int>{"TakeDataset", -1};
+const auto *const kSkipNode = new std::pair<string, int>{"SkipDataset", 0};
+const auto *const kRepeatNode = new std::pair<string, int>{"RepeatDataset", 1};
+
+INSTANTIATE_TEST_CASE_P(
+ BasicRemovalTest, NoOpMultipleEliminationTest,
+ ::testing::Combine(::testing::Values(*kTakeNode, *kSkipNode, *kRepeatNode),
+ ::testing::Values(*kTakeNode, *kSkipNode,
+ *kRepeatNode)));
+
+} // namespace
+} // namespace grappler
+} // namespace tensorflow
diff --git a/tensorflow/core/grappler/optimizers/data/shuffle_and_repeat_fusion.cc b/tensorflow/core/grappler/optimizers/data/shuffle_and_repeat_fusion.cc
index 0df73b33ed..8332fb0b1e 100644
--- a/tensorflow/core/grappler/optimizers/data/shuffle_and_repeat_fusion.cc
+++ b/tensorflow/core/grappler/optimizers/data/shuffle_and_repeat_fusion.cc
@@ -84,15 +84,7 @@ Status ShuffleAndRepeatFusion::Optimize(Cluster* cluster,
nodes_to_delete.insert(shuffle_node->name());
nodes_to_delete.insert(repeat_node.name());
- // Update the input of the outputs of the `Repeat` node to use
- // `ShuffleAndRepeat`.
- GraphView::OutputPort output_port =
- graph.GetOutputPort(repeat_node.name(), 0);
- auto fanout = graph.GetFanout(output_port);
- for (auto it = fanout.begin(); it != fanout.end(); ++it) {
- NodeDef* node = it->node;
- node->set_input(0, new_node->name());
- }
+ graph_utils::ReplaceInput(repeat_node, *new_node, &graph);
}
TF_RETURN_IF_ERROR(graph_utils::DeleteNodes(nodes_to_delete, output));
return Status::OK();
diff --git a/tensorflow/core/grappler/optimizers/meta_optimizer.cc b/tensorflow/core/grappler/optimizers/meta_optimizer.cc
index b1f31ad0d0..c55f479451 100644
--- a/tensorflow/core/grappler/optimizers/meta_optimizer.cc
+++ b/tensorflow/core/grappler/optimizers/meta_optimizer.cc
@@ -91,7 +91,8 @@ std::unique_ptr<GraphOptimizer> MetaOptimizer::MakeNewOptimizer(
MK_OPT("dependency", new DependencyOptimizer(cfg_.dependency_optimization()));
MK_OPT("debug_stripper", new DebugStripper());
MK_OPT("scoped_allocator",
- new ScopedAllocatorOptimizer(cfg_.scoped_allocator_opts()));
+ new ScopedAllocatorOptimizer(cfg_.scoped_allocator_optimization(),
+ cfg_.scoped_allocator_opts()));
return std::unique_ptr<GraphOptimizer>();
}
@@ -150,8 +151,8 @@ Status MetaOptimizer::InitializeOptimizers(
new AutoParallel(cfg_.auto_parallel().num_replicas()));
}
if (cfg_.scoped_allocator_optimization()) {
- optimizers->emplace_back(
- new ScopedAllocatorOptimizer(cfg_.scoped_allocator_opts()));
+ optimizers->emplace_back(new ScopedAllocatorOptimizer(
+ cfg_.scoped_allocator_optimization(), cfg_.scoped_allocator_opts()));
}
return Status::OK();
}
diff --git a/tensorflow/core/grappler/optimizers/scoped_allocator_optimizer.cc b/tensorflow/core/grappler/optimizers/scoped_allocator_optimizer.cc
index cceef4098d..275568e464 100644
--- a/tensorflow/core/grappler/optimizers/scoped_allocator_optimizer.cc
+++ b/tensorflow/core/grappler/optimizers/scoped_allocator_optimizer.cc
@@ -650,7 +650,8 @@ class UnaryElementwiseRewriter : public ScopedAllocatorOptimizer::Rewriter {
};
ScopedAllocatorOptimizer::ScopedAllocatorOptimizer(
- const ScopedAllocatorOptions& opts) {
+ RewriterConfig::Toggle opt_level, const ScopedAllocatorOptions& opts)
+ : opt_level_(opt_level) {
VLOG(1) << "ScopedAllocatorOptimizer::ScopedAllocatorOptimizer";
Rewriter* r = new UnaryElementwiseRewriter();
to_delete_.push_back(r);
diff --git a/tensorflow/core/grappler/optimizers/scoped_allocator_optimizer.h b/tensorflow/core/grappler/optimizers/scoped_allocator_optimizer.h
index ab4d444595..13589f536c 100644
--- a/tensorflow/core/grappler/optimizers/scoped_allocator_optimizer.h
+++ b/tensorflow/core/grappler/optimizers/scoped_allocator_optimizer.h
@@ -32,7 +32,8 @@ class ScopedAllocatorOptimizer;
// movement and consolidate some kinds of Ops.
class ScopedAllocatorOptimizer : public GraphOptimizer {
public:
- explicit ScopedAllocatorOptimizer(const ScopedAllocatorOptions& opts);
+ ScopedAllocatorOptimizer(RewriterConfig::Toggle opt_level,
+ const ScopedAllocatorOptions& opts);
~ScopedAllocatorOptimizer() override;
string name() const override { return "scoped_allocator_optimizer"; }
diff --git a/tensorflow/core/grappler/optimizers/scoped_allocator_optimizer_test.cc b/tensorflow/core/grappler/optimizers/scoped_allocator_optimizer_test.cc
index 3a2859dc5f..89847f83d4 100644
--- a/tensorflow/core/grappler/optimizers/scoped_allocator_optimizer_test.cc
+++ b/tensorflow/core/grappler/optimizers/scoped_allocator_optimizer_test.cc
@@ -115,7 +115,7 @@ TEST_F(ScopedAllocatorOptimizerTest, UnaryRewriteOnly) {
ScopedAllocatorOptions opts;
opts.add_enable_op("Abs");
- ScopedAllocatorOptimizer sao(opts);
+ ScopedAllocatorOptimizer sao(RewriterConfig::ON, opts);
ScopedAllocatorOptimizer::OpNameSet ons;
ons.insert("Abs");
@@ -199,7 +199,7 @@ TEST_F(ScopedAllocatorOptimizerTest, UnaryExecute) {
// b + c == -4, -4, 3, 2
for (int oi = 0; oi < outputs.size(); ++oi) {
for (int i = 0; i < outputs[oi].NumElements(); ++i) {
- VLOG(0) << "output vec " << oi << " index " << i << " = "
+ VLOG(1) << "output vec " << oi << " index " << i << " = "
<< outputs[oi].flat<float>()(i);
}
if (oi == 0) {
diff --git a/tensorflow/core/grappler/utils/scc.cc b/tensorflow/core/grappler/utils/scc.cc
index f2a6507d94..d033e9c522 100644
--- a/tensorflow/core/grappler/utils/scc.cc
+++ b/tensorflow/core/grappler/utils/scc.cc
@@ -142,9 +142,13 @@ void StronglyConnectedComponents(
// Create a list of top-level parents (add them to object queue)
// Also create a mapping from nodes to their children.
+ // Inputs might not be present if called on a subgraph.
for (const NodeDef& node : graph.node()) {
for (const string& input : node.input()) {
- name_to_data[NodeName(input)]->children.push_back(node_to_data[&node]);
+ auto it = name_to_data.find(NodeName(input));
+ if (it != name_to_data.end()) {
+ it->second->children.push_back(node_to_data[&node]);
+ }
}
}
@@ -202,10 +206,12 @@ int IdentifyLoops(const GraphDef& graph,
const std::vector<const NodeDef*>& component_nodes = component.second;
std::vector<std::pair<NodeDef*, string>> next_iter_nodes;
GraphDef subgraph;
+ std::unordered_map<const NodeDef*, const NodeDef*> subgraph_mapping;
for (const auto& component_node : component_nodes) {
NodeDef* node = subgraph.add_node();
*node = *component_node;
+ subgraph_mapping[node] = component_node;
if (IsNextIteration(*node)) {
CHECK_EQ(1, node->input_size());
next_iter_nodes.emplace_back(node, node->input(0));
@@ -227,13 +233,13 @@ int IdentifyLoops(const GraphDef& graph,
int num_components = 0;
std::unordered_map<const NodeDef*, int> components;
StronglyConnectedComponents(subgraph, &components, &num_components);
- CHECK_EQ(1, num_components);
+ CHECK_GE(num_components, 1);
for (const auto it : components) {
int id = it.second;
if (id < 0) {
continue;
}
- (*loops)[it.first].push_back(loop_id);
+ (*loops)[subgraph_mapping[it.first]].push_back(loop_id);
}
++loop_id;
}
diff --git a/tensorflow/core/kernels/BUILD b/tensorflow/core/kernels/BUILD
index 77dec24c33..7599cf7db2 100644
--- a/tensorflow/core/kernels/BUILD
+++ b/tensorflow/core/kernels/BUILD
@@ -368,6 +368,7 @@ cc_library(
cc_library(
name = "queue_op",
+ srcs = ["queue_op.cc"],
hdrs = ["queue_op.h"],
deps = [
":queue_base",
@@ -881,7 +882,6 @@ tf_kernel_library(
"tile_functor_gpu.cu.cc",
],
prefix = "tile_ops",
- textual_hdrs = ["tile_ops_gpu_impl.h"],
deps = ARRAY_DEPS,
)
@@ -1885,9 +1885,10 @@ cc_library(
name = "fifo_queue",
srcs = ["fifo_queue.cc"],
hdrs = ["fifo_queue.h"],
- visibility = ["//visibility:private"],
+ visibility = [":friends"],
deps = [
":queue_base",
+ ":queue_op",
":typed_queue",
"//tensorflow/core:framework",
"//tensorflow/core:lib",
@@ -2085,6 +2086,7 @@ IMAGE_DEPS = [
"//tensorflow/core:jpeg_internal",
"//tensorflow/core:lib",
"//tensorflow/core:lib_internal",
+ "//tensorflow/core:png_internal",
"//tensorflow/core:protos_all_cc",
]
@@ -2659,7 +2661,7 @@ tf_kernel_library(
tf_kernel_library(
name = "summary_image_op",
prefix = "summary_image_op",
- deps = LOGGING_DEPS,
+ deps = LOGGING_DEPS + ["//tensorflow/core:png_internal"],
)
tf_kernel_library(
@@ -2704,17 +2706,16 @@ cc_library(
],
)
-MANIP_DEPS = [
- "//tensorflow/core:framework",
- "//tensorflow/core:lib",
- "//tensorflow/core:manip_ops_op_lib",
- "//third_party/eigen3",
-]
-
tf_kernel_library(
name = "roll_op",
prefix = "roll_op",
- deps = MANIP_DEPS,
+ deps = [
+ ":bounds_check",
+ "//tensorflow/core:framework",
+ "//tensorflow/core:lib",
+ "//tensorflow/core:manip_ops_op_lib",
+ "//third_party/eigen3",
+ ],
)
tf_cc_test(
@@ -2934,6 +2935,15 @@ tf_kernel_library(
deps = MATH_DEPS,
)
+tf_kernel_library(
+ name = "unary_ops_composition",
+ prefix = "unary_ops_composition",
+ deps = MATH_DEPS + [
+ ":cwise_op",
+ ":relu_op",
+ ],
+)
+
tf_cc_test(
name = "sequence_ops_test",
size = "small",
@@ -3033,6 +3043,28 @@ tf_cuda_cc_test(
)
tf_cuda_cc_test(
+ name = "unary_ops_composition_test",
+ size = "small",
+ srcs = ["unary_ops_composition_test.cc"],
+ deps = [
+ ":ops_testutil",
+ ":ops_util",
+ ":unary_ops_composition",
+ "//tensorflow/cc:cc_ops",
+ "//tensorflow/cc:client_session",
+ "//tensorflow/core:core_cpu",
+ "//tensorflow/core:framework",
+ "//tensorflow/core:framework_internal",
+ "//tensorflow/core:lib",
+ "//tensorflow/core:protos_all_cc",
+ "//tensorflow/core:tensorflow",
+ "//tensorflow/core:test",
+ "//tensorflow/core:test_main",
+ "//tensorflow/core:testlib",
+ ],
+)
+
+tf_cuda_cc_test(
name = "matmul_op_test",
size = "small",
srcs = ["matmul_op_test.cc"],
@@ -3352,6 +3384,14 @@ cc_library(
],
)
+# Kernels for the nodes intented to be added to the graph by the Grappler optimizers.
+cc_library(
+ name = "grappler",
+ deps = [
+ ":unary_ops_composition",
+ ],
+)
+
NN_DEPS = [
":bounds_check",
":conv_2d",
@@ -3888,6 +3928,8 @@ tf_cc_test(
cc_library(
name = "sparse",
deps = [
+ ":deserialize_sparse_string_op",
+ ":deserialize_sparse_variant_op",
":serialize_sparse_op",
":sparse_add_grad_op",
":sparse_add_op",
@@ -4042,6 +4084,23 @@ tf_kernel_library(
)
tf_kernel_library(
+ name = "deserialize_sparse_string_op",
+ prefix = "deserialize_sparse_string_op",
+ deps = SPARSE_DEPS + [
+ ":reshape_util",
+ "//tensorflow/core:protos_all_cc",
+ ],
+)
+
+tf_kernel_library(
+ name = "deserialize_sparse_variant_op",
+ prefix = "deserialize_sparse_variant_op",
+ deps = SPARSE_DEPS + [
+ "//tensorflow/core:protos_all_cc",
+ ],
+)
+
+tf_kernel_library(
name = "sparse_tensors_map_ops",
prefix = "sparse_tensors_map_ops",
deps = SPARSE_DEPS,
@@ -5052,6 +5111,7 @@ filegroup(
"padding_fifo_queue.cc",
"padding_fifo_queue_op.cc",
"queue_base.cc",
+ "queue_op.cc",
"queue_ops.cc",
"random_op.cc",
"reduction_ops_all.cc",
diff --git a/tensorflow/core/kernels/boosted_trees/BUILD b/tensorflow/core/kernels/boosted_trees/BUILD
index 62327dfe1d..4910021c63 100644
--- a/tensorflow/core/kernels/boosted_trees/BUILD
+++ b/tensorflow/core/kernels/boosted_trees/BUILD
@@ -30,6 +30,7 @@ tf_kernel_library(
"//tensorflow/core:framework",
"//tensorflow/core:lib",
"//tensorflow/core:lib_internal",
+ "//tensorflow/core/kernels/boosted_trees:boosted_trees_proto_cc",
],
)
@@ -44,6 +45,11 @@ cc_library(
],
)
+cc_library(
+ name = "tree_helper",
+ hdrs = ["tree_helper.h"],
+)
+
tf_kernel_library(
name = "resource_ops",
srcs = ["resource_ops.cc"],
@@ -60,6 +66,7 @@ tf_kernel_library(
name = "stats_ops",
srcs = ["stats_ops.cc"],
deps = [
+ ":tree_helper",
"//tensorflow/core:boosted_trees_ops_op_lib",
"//tensorflow/core:framework",
"//tensorflow/core:lib",
@@ -71,6 +78,7 @@ tf_kernel_library(
srcs = ["training_ops.cc"],
deps = [
":resources",
+ ":tree_helper",
"//tensorflow/core:boosted_trees_ops_op_lib",
"//tensorflow/core:framework",
"//tensorflow/core:lib",
diff --git a/tensorflow/core/kernels/boosted_trees/boosted_trees.proto b/tensorflow/core/kernels/boosted_trees/boosted_trees.proto
index 55599de731..c9664f0c1c 100644
--- a/tensorflow/core/kernels/boosted_trees/boosted_trees.proto
+++ b/tensorflow/core/kernels/boosted_trees/boosted_trees.proto
@@ -115,3 +115,20 @@ message TreeEnsemble {
// Metadata that is used during the training.
GrowingMetadata growing_metadata = 4;
}
+
+// DebugOutput contains outputs useful for debugging/model interpretation, at
+// the individual example-level. Debug outputs that are available to the user
+// are: 1) Directional feature contributions (DFCs) 2) Node IDs for ensemble
+// prediction path 3) Leaf node IDs.
+message DebugOutput {
+ // Return the logits and associated feature splits across prediction paths for
+ // each tree, for every example, at predict time. We will use these values to
+ // compute DFCs in Python, by subtracting each child prediction from its
+ // parent prediction and associating this change with its respective feature
+ // id.
+ repeated int32 feature_ids = 1;
+ repeated float logits_path = 2;
+
+ // TODO(crawles): return 2) Node IDs for ensemble prediction path 3) Leaf node
+ // IDs.
+}
diff --git a/tensorflow/core/kernels/boosted_trees/prediction_ops.cc b/tensorflow/core/kernels/boosted_trees/prediction_ops.cc
index 20359f28d3..b2efa06941 100644
--- a/tensorflow/core/kernels/boosted_trees/prediction_ops.cc
+++ b/tensorflow/core/kernels/boosted_trees/prediction_ops.cc
@@ -23,6 +23,7 @@ limitations under the License.
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/types.h"
+#include "tensorflow/core/kernels/boosted_trees/boosted_trees.pb.h"
#include "tensorflow/core/kernels/boosted_trees/resources.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/refcount.h"
@@ -103,8 +104,8 @@ class BoostedTreesTrainingPredictOp : public OpKernel {
const int32 latest_tree = resource->num_trees() - 1;
if (latest_tree < 0) {
- // Ensemble was empty. Nothing changes.
- output_node_ids = cached_node_ids;
+ // Ensemble was empty. Output the very first node.
+ output_node_ids.setZero();
output_tree_ids = cached_tree_ids;
// All the predictions are zeros.
output_partial_logits.setZero();
@@ -119,16 +120,20 @@ class BoostedTreesTrainingPredictOp : public OpKernel {
int32 node_id = cached_node_ids(i);
float partial_tree_logit = 0.0;
- // If the tree was pruned, returns the node id into which the
- // current_node_id was pruned, as well the correction of the cached
- // logit prediction.
- resource->GetPostPruneCorrection(tree_id, node_id, &node_id,
- &partial_tree_logit);
-
- // Logic in the loop adds the cached node value again if it is a leaf.
- // If it is not a leaf anymore we need to subtract the old node's
- // value. The following logic handles both of these cases.
- partial_tree_logit -= resource->node_value(tree_id, node_id);
+ if (node_id >= 0) {
+ // If the tree was pruned, returns the node id into which the
+ // current_node_id was pruned, as well the correction of the cached
+ // logit prediction.
+ resource->GetPostPruneCorrection(tree_id, node_id, &node_id,
+ &partial_tree_logit);
+ // Logic in the loop adds the cached node value again if it is a
+ // leaf. If it is not a leaf anymore we need to subtract the old
+ // node's value. The following logic handles both of these cases.
+ partial_tree_logit -= resource->node_value(tree_id, node_id);
+ } else {
+ // No cache exists, start from the very first node.
+ node_id = 0;
+ }
float partial_all_logit = 0.0;
while (true) {
if (resource->is_leaf(tree_id, node_id)) {
@@ -219,10 +224,10 @@ class BoostedTreesPredictOp : public OpKernel {
return;
}
- const int32 latest_tree = resource->num_trees() - 1;
+ const int32 last_tree = resource->num_trees() - 1;
auto do_work = [&resource, &batch_bucketized_features, &output_logits,
- batch_size, latest_tree](int32 start, int32 end) {
+ batch_size, last_tree](int32 start, int32 end) {
for (int32 i = start; i < end; ++i) {
float tree_logit = 0.0;
int32 tree_id = 0;
@@ -232,8 +237,8 @@ class BoostedTreesPredictOp : public OpKernel {
tree_logit += resource->GetTreeWeight(tree_id) *
resource->node_value(tree_id, node_id);
- // Stop if it was the latest tree.
- if (tree_id == latest_tree) {
+ // Stop if it was the last tree.
+ if (tree_id == last_tree) {
break;
}
// Move onto other trees.
@@ -250,7 +255,7 @@ class BoostedTreesPredictOp : public OpKernel {
// 10 is the magic number. The actual number might depend on (the number of
// layers in the trees) and (cpu cycles spent on each layer), but this
// value would work for many cases. May be tuned later.
- const int64 cost = (latest_tree + 1) * 10;
+ const int64 cost = (last_tree + 1) * 10;
thread::ThreadPool* const worker_threads =
context->device()->tensorflow_cpu_worker_threads()->workers;
Shard(worker_threads->NumThreads(), worker_threads, batch_size,
@@ -266,4 +271,118 @@ class BoostedTreesPredictOp : public OpKernel {
REGISTER_KERNEL_BUILDER(Name("BoostedTreesPredict").Device(DEVICE_CPU),
BoostedTreesPredictOp);
+// The Op that returns debugging/model interpretability outputs for each
+// example. Currently it outputs the split feature ids and logits after each
+// split along the decision path for each example. This will be used to compute
+// directional feature contributions at predict time for an arbitrary activation
+// function.
+// TODO(crawles): return in proto 1) Node IDs for ensemble prediction path
+// 2) Leaf node IDs.
+class BoostedTreesExampleDebugOutputsOp : public OpKernel {
+ public:
+ explicit BoostedTreesExampleDebugOutputsOp(
+ OpKernelConstruction* const context)
+ : OpKernel(context) {
+ OP_REQUIRES_OK(context, context->GetAttr("num_bucketized_features",
+ &num_bucketized_features_));
+ OP_REQUIRES_OK(context,
+ context->GetAttr("logits_dimension", &logits_dimension_));
+ OP_REQUIRES(context, logits_dimension_ == 1,
+ errors::InvalidArgument(
+ "Currently only one dimensional outputs are supported."));
+ }
+
+ void Compute(OpKernelContext* const context) override {
+ BoostedTreesEnsembleResource* resource;
+ // Get the resource.
+ OP_REQUIRES_OK(context, LookupResource(context, HandleFromInput(context, 0),
+ &resource));
+ // Release the reference to the resource once we're done using it.
+ core::ScopedUnref unref_me(resource);
+
+ // Get the inputs.
+ OpInputList bucketized_features_list;
+ OP_REQUIRES_OK(context, context->input_list("bucketized_features",
+ &bucketized_features_list));
+ std::vector<tensorflow::TTypes<int32>::ConstVec> batch_bucketized_features;
+ batch_bucketized_features.reserve(bucketized_features_list.size());
+ for (const Tensor& tensor : bucketized_features_list) {
+ batch_bucketized_features.emplace_back(tensor.vec<int32>());
+ }
+ const int batch_size = batch_bucketized_features[0].size();
+
+ // We need to get the feature ids used for splitting and the logits after
+ // each split. We will use these to calulate the changes in the prediction
+ // (contributions) for an arbitrary activation function (done in Python) and
+ // attribute them to the associated feature ids. We will store these in
+ // a proto below.
+ Tensor* output_debug_info_t = nullptr;
+ OP_REQUIRES_OK(
+ context, context->allocate_output("examples_debug_outputs_serialized",
+ {batch_size}, &output_debug_info_t));
+ // Will contain serialized protos, per example.
+ auto output_debug_info = output_debug_info_t->flat<string>();
+ const int32 last_tree = resource->num_trees() - 1;
+
+ // For each given example, traverse through all trees keeping track of the
+ // features used to split and the associated logits at each point along the
+ // path. Note: feature_ids has one less value than logits_path because the
+ // first value of each logit path will be the bias.
+ auto do_work = [&resource, &batch_bucketized_features, &output_debug_info,
+ batch_size, last_tree](int32 start, int32 end) {
+ for (int32 i = start; i < end; ++i) {
+ // Proto to store debug outputs, per example.
+ boosted_trees::DebugOutput example_debug_info;
+ // Initial bias prediction. E.g., prediction based off training mean.
+ example_debug_info.add_logits_path(resource->GetTreeWeight(0) *
+ resource->node_value(0, 0));
+ int32 node_id = 0;
+ int32 tree_id = 0;
+ int32 feature_id;
+ float tree_logit;
+ float past_trees_logit = 0; // Sum of leaf logits from prior trees.
+ // Populate proto.
+ while (tree_id <= last_tree) {
+ // Feature id used to split.
+ feature_id = resource->feature_id(tree_id, node_id);
+ example_debug_info.add_feature_ids(feature_id);
+ // Get logit after split.
+ node_id = resource->next_node(tree_id, node_id, i,
+ batch_bucketized_features);
+ tree_logit = resource->GetTreeWeight(tree_id) *
+ resource->node_value(tree_id, node_id);
+ // Output logit incorporates sum of leaf logits from prior trees.
+ example_debug_info.add_logits_path(tree_logit + past_trees_logit);
+ if (resource->is_leaf(tree_id, node_id)) {
+ // Move onto other trees.
+ past_trees_logit += tree_logit;
+ ++tree_id;
+ node_id = 0;
+ }
+ }
+ // Set output as serialized proto containing debug info.
+ string serialized = example_debug_info.SerializeAsString();
+ output_debug_info(i) = serialized;
+ }
+ };
+
+ // 10 is the magic number. The actual number might depend on (the number of
+ // layers in the trees) and (cpu cycles spent on each layer), but this
+ // value would work for many cases. May be tuned later.
+ const int64 cost = (last_tree + 1) * 10;
+ thread::ThreadPool* const worker_threads =
+ context->device()->tensorflow_cpu_worker_threads()->workers;
+ Shard(worker_threads->NumThreads(), worker_threads, batch_size,
+ /*cost_per_unit=*/cost, do_work);
+ }
+
+ private:
+ int32 logits_dimension_; // Indicates dimension of logits in the tree nodes.
+ int32 num_bucketized_features_; // Indicates the number of features.
+};
+
+REGISTER_KERNEL_BUILDER(
+ Name("BoostedTreesExampleDebugOutputs").Device(DEVICE_CPU),
+ BoostedTreesExampleDebugOutputsOp);
+
} // namespace tensorflow
diff --git a/tensorflow/core/kernels/boosted_trees/resources.cc b/tensorflow/core/kernels/boosted_trees/resources.cc
index c410748c27..cc90bb2f45 100644
--- a/tensorflow/core/kernels/boosted_trees/resources.cc
+++ b/tensorflow/core/kernels/boosted_trees/resources.cc
@@ -21,6 +21,10 @@ limitations under the License.
namespace tensorflow {
+namespace {
+constexpr float kLayerByLayerTreeWeight = 1.0;
+} // namespace
+
// Constructor.
BoostedTreesEnsembleResource::BoostedTreesEnsembleResource()
: tree_ensemble_(
@@ -78,6 +82,16 @@ float BoostedTreesEnsembleResource::node_value(const int32 tree_id,
}
}
+void BoostedTreesEnsembleResource::set_node_value(const int32 tree_id,
+ const int32 node_id,
+ const float logits) {
+ DCHECK_LT(tree_id, tree_ensemble_->trees_size());
+ DCHECK_LT(node_id, tree_ensemble_->trees(tree_id).nodes_size());
+ auto* node = tree_ensemble_->mutable_trees(tree_id)->mutable_nodes(node_id);
+ DCHECK(node->node_case() == boosted_trees::Node::kLeaf);
+ node->mutable_leaf()->set_scalar(logits);
+}
+
int32 BoostedTreesEnsembleResource::GetNumLayersGrown(
const int32 tree_id) const {
DCHECK_LT(tree_id, tree_ensemble_->trees_size());
@@ -204,9 +218,14 @@ void BoostedTreesEnsembleResource::UpdateGrowingMetadata() const {
// Add a tree to the ensemble and returns a new tree_id.
int32 BoostedTreesEnsembleResource::AddNewTree(const float weight) {
+ return AddNewTreeWithLogits(weight, 0.0);
+}
+
+int32 BoostedTreesEnsembleResource::AddNewTreeWithLogits(const float weight,
+ const float logits) {
const int32 new_tree_id = tree_ensemble_->trees_size();
auto* node = tree_ensemble_->add_trees()->add_nodes();
- node->mutable_leaf()->set_scalar(0.0);
+ node->mutable_leaf()->set_scalar(logits);
tree_ensemble_->add_tree_weights(weight);
tree_ensemble_->add_tree_metadata();
@@ -225,7 +244,7 @@ void BoostedTreesEnsembleResource::AddBucketizedSplitNode(
*right_node_id = *left_node_id + 1;
auto* left_node = tree->add_nodes();
auto* right_node = tree->add_nodes();
- if (node_id != 0) {
+ if (node_id != 0 || (node->has_leaf() && node->leaf().scalar() != 0)) {
// Save previous leaf value if it is not the first leaf in the tree.
node->mutable_metadata()->mutable_original_leaf()->Swap(
node->mutable_leaf());
diff --git a/tensorflow/core/kernels/boosted_trees/resources.h b/tensorflow/core/kernels/boosted_trees/resources.h
index df78d3f275..f961ed3814 100644
--- a/tensorflow/core/kernels/boosted_trees/resources.h
+++ b/tensorflow/core/kernels/boosted_trees/resources.h
@@ -70,6 +70,9 @@ class BoostedTreesEnsembleResource : public StampedResource {
float node_value(const int32 tree_id, const int32 node_id) const;
+ void set_node_value(const int32 tree_id, const int32 node_id,
+ const float logits);
+
int32 GetNumLayersGrown(const int32 tree_id) const;
void SetNumLayersGrown(const int32 tree_id, int32 new_num_layers) const;
@@ -99,6 +102,9 @@ class BoostedTreesEnsembleResource : public StampedResource {
// Add a tree to the ensemble and returns a new tree_id.
int32 AddNewTree(const float weight);
+ // Adds new tree with one node to the ensemble and sets node's value to logits
+ int32 AddNewTreeWithLogits(const float weight, const float logits);
+
// Grows the tree by adding a split and leaves.
void AddBucketizedSplitNode(const int32 tree_id, const int32 node_id,
const int32 feature_id, const int32 threshold,
diff --git a/tensorflow/core/kernels/boosted_trees/stats_ops.cc b/tensorflow/core/kernels/boosted_trees/stats_ops.cc
index 48afd3fbf3..64ec1caa9c 100644
--- a/tensorflow/core/kernels/boosted_trees/stats_ops.cc
+++ b/tensorflow/core/kernels/boosted_trees/stats_ops.cc
@@ -17,13 +17,10 @@ limitations under the License.
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor.h"
+#include "tensorflow/core/kernels/boosted_trees/tree_helper.h"
namespace tensorflow {
-namespace {
-const float kEps = 1e-15;
-} // namespace
-
class BoostedTreesCalculateBestGainsPerFeatureOp : public OpKernel {
public:
explicit BoostedTreesCalculateBestGainsPerFeatureOp(
@@ -139,7 +136,7 @@ class BoostedTreesCalculateBestGainsPerFeatureOp : public OpKernel {
total_hess - cum_hess_bucket, l1, l2,
&contrib_for_right, &gain_for_right);
- if (gain_for_left + gain_for_right > best_gain) {
+ if (GainIsLarger(gain_for_left + gain_for_right, best_gain)) {
best_gain = gain_for_left + gain_for_right;
best_bucket = bucket;
best_contrib_for_left = contrib_for_left;
@@ -200,40 +197,6 @@ class BoostedTreesCalculateBestGainsPerFeatureOp : public OpKernel {
}
private:
- void CalculateWeightsAndGains(const float g, const float h, const float l1,
- const float l2, float* weight, float* gain) {
- //
- // The formula for weight is -(g+l1*sgn(w))/(H+l2), for gain it is
- // (g+l1*sgn(w))^2/(h+l2).
- // This is because for each leaf we optimize
- // 1/2(h+l2)*w^2+g*w+l1*abs(w)
- float g_with_l1 = g;
- // Apply L1 regularization.
- // 1) Assume w>0 => w=-(g+l1)/(h+l2)=> g+l1 < 0 => g < -l1
- // 2) Assume w<0 => w=-(g-l1)/(h+l2)=> g-l1 > 0 => g > l1
- // For g from (-l1, l1), thus there is no solution => set to 0.
- if (l1 > 0) {
- if (g > l1) {
- g_with_l1 -= l1;
- } else if (g < -l1) {
- g_with_l1 += l1;
- } else {
- *weight = 0.0;
- *gain = 0.0;
- return;
- }
- }
- // Apply L2 regularization.
- if (h + l2 <= kEps) {
- // Avoid division by 0 or infinitesimal.
- *weight = 0;
- *gain = 0;
- } else {
- *weight = -g_with_l1 / (h + l2);
- *gain = -g_with_l1 * (*weight);
- }
- }
-
int max_splits_;
int num_features_;
};
diff --git a/tensorflow/core/kernels/boosted_trees/training_ops.cc b/tensorflow/core/kernels/boosted_trees/training_ops.cc
index a14fd4a133..973cdec13a 100644
--- a/tensorflow/core/kernels/boosted_trees/training_ops.cc
+++ b/tensorflow/core/kernels/boosted_trees/training_ops.cc
@@ -16,11 +16,13 @@ limitations under the License.
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/kernels/boosted_trees/resources.h"
+#include "tensorflow/core/kernels/boosted_trees/tree_helper.h"
namespace tensorflow {
namespace {
constexpr float kLayerByLayerTreeWeight = 1.0;
+constexpr float kMinDeltaForCenterBias = 0.01;
// TODO(nponomareva, youngheek): consider using vector.
struct SplitCandidate {
@@ -89,7 +91,8 @@ class BoostedTreesUpdateEnsembleOp : public OpKernel {
// Find best splits for each active node.
std::map<int32, SplitCandidate> best_splits;
- FindBestSplitsPerNode(context, node_ids_list, gains_list, &best_splits);
+ FindBestSplitsPerNode(context, node_ids_list, gains_list, feature_ids,
+ &best_splits);
int32 current_tree =
UpdateGlobalAttemptsAndRetrieveGrowableTree(ensemble_resource);
@@ -193,6 +196,7 @@ class BoostedTreesUpdateEnsembleOp : public OpKernel {
void FindBestSplitsPerNode(
OpKernelContext* const context, const OpInputList& node_ids_list,
const OpInputList& gains_list,
+ const TTypes<const int32>::Vec& feature_ids,
std::map<int32, SplitCandidate>* best_split_per_node) {
// Find best split per node going through every feature candidate.
for (int64 feature_idx = 0; feature_idx < num_features_; ++feature_idx) {
@@ -211,8 +215,18 @@ class BoostedTreesUpdateEnsembleOp : public OpKernel {
candidate.candidate_idx = candidate_idx;
candidate.gain = gain;
- if (best_split_it == best_split_per_node->end() ||
- gain > best_split_it->second.gain) {
+ if (TF_PREDICT_FALSE(best_split_it != best_split_per_node->end() &&
+ GainsAreEqual(gain, best_split_it->second.gain))) {
+ const auto best_candidate = (*best_split_per_node)[node_id];
+ const int32 best_feature_id = feature_ids(best_candidate.feature_idx);
+ const int32 feature_id = feature_ids(candidate.feature_idx);
+ VLOG(2) << "Breaking ties on feature ids and buckets";
+ // Breaking ties deterministically.
+ if (feature_id < best_feature_id) {
+ (*best_split_per_node)[node_id] = candidate;
+ }
+ } else if (best_split_it == best_split_per_node->end() ||
+ GainIsLarger(gain, best_split_it->second.gain)) {
(*best_split_per_node)[node_id] = candidate;
}
}
@@ -227,4 +241,69 @@ class BoostedTreesUpdateEnsembleOp : public OpKernel {
REGISTER_KERNEL_BUILDER(Name("BoostedTreesUpdateEnsemble").Device(DEVICE_CPU),
BoostedTreesUpdateEnsembleOp);
+class BoostedTreesCenterBiasOp : public OpKernel {
+ public:
+ explicit BoostedTreesCenterBiasOp(OpKernelConstruction* const context)
+ : OpKernel(context) {}
+
+ void Compute(OpKernelContext* const context) override {
+ // Get decision tree ensemble.
+ BoostedTreesEnsembleResource* ensemble_resource;
+ OP_REQUIRES_OK(context, LookupResource(context, HandleFromInput(context, 0),
+ &ensemble_resource));
+ core::ScopedUnref unref_me(ensemble_resource);
+ mutex_lock l(*ensemble_resource->get_mutex());
+ // Increase the ensemble stamp.
+ ensemble_resource->set_stamp(ensemble_resource->stamp() + 1);
+
+ // Read means of hessians and gradients
+ const Tensor* mean_gradients_t;
+ OP_REQUIRES_OK(context,
+ context->input("mean_gradients", &mean_gradients_t));
+
+ const Tensor* mean_hessians_t;
+ OP_REQUIRES_OK(context, context->input("mean_hessians", &mean_hessians_t));
+
+ // Get the regularization options.
+ const Tensor* l1_t;
+ OP_REQUIRES_OK(context, context->input("l1", &l1_t));
+ const auto l1 = l1_t->scalar<float>()();
+ const Tensor* l2_t;
+ OP_REQUIRES_OK(context, context->input("l2", &l2_t));
+ const auto l2 = l2_t->scalar<float>()();
+
+ // For now, assume 1-dimensional weight on leaves.
+ float logits;
+ float unused_gain;
+
+ // TODO(nponomareva): change this when supporting multiclass.
+ const float gradients_mean = mean_gradients_t->flat<float>()(0);
+ const float hessians_mean = mean_hessians_t->flat<float>()(0);
+ CalculateWeightsAndGains(gradients_mean, hessians_mean, l1, l2, &logits,
+ &unused_gain);
+
+ float current_bias = 0.0;
+ bool continue_centering = true;
+ if (ensemble_resource->num_trees() == 0) {
+ ensemble_resource->AddNewTreeWithLogits(kLayerByLayerTreeWeight, logits);
+ current_bias = logits;
+ } else {
+ current_bias = ensemble_resource->node_value(0, 0);
+ continue_centering =
+ std::abs(logits / current_bias) > kMinDeltaForCenterBias;
+ current_bias += logits;
+ ensemble_resource->set_node_value(0, 0, current_bias);
+ }
+
+ Tensor* continue_centering_t = nullptr;
+ OP_REQUIRES_OK(
+ context, context->allocate_output("continue_centering", TensorShape({}),
+ &continue_centering_t));
+ // Check if we need to continue centering bias.
+ continue_centering_t->scalar<bool>()() = continue_centering;
+ }
+};
+REGISTER_KERNEL_BUILDER(Name("BoostedTreesCenterBias").Device(DEVICE_CPU),
+ BoostedTreesCenterBiasOp);
+
} // namespace tensorflow
diff --git a/tensorflow/core/kernels/boosted_trees/tree_helper.h b/tensorflow/core/kernels/boosted_trees/tree_helper.h
new file mode 100644
index 0000000000..8b18d9e5f8
--- /dev/null
+++ b/tensorflow/core/kernels/boosted_trees/tree_helper.h
@@ -0,0 +1,69 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#ifndef TENSORFLOW_CORE_KERNELS_BOOSTED_TREES_TREE_HELPER_H_
+#define TENSORFLOW_CORE_KERNELS_BOOSTED_TREES_TREE_HELPER_H_
+#include <cmath>
+
+namespace tensorflow {
+
+static bool GainsAreEqual(const float g1, const float g2) {
+ const float kTolerance = 1e-15;
+ return std::abs(g1 - g2) < kTolerance;
+}
+
+static bool GainIsLarger(const float g1, const float g2) {
+ const float kTolerance = 1e-15;
+ return g1 - g2 >= kTolerance;
+}
+
+static void CalculateWeightsAndGains(const float g, const float h,
+ const float l1, const float l2,
+ float* weight, float* gain) {
+ const float kEps = 1e-15;
+ // The formula for weight is -(g+l1*sgn(w))/(H+l2), for gain it is
+ // (g+l1*sgn(w))^2/(h+l2).
+ // This is because for each leaf we optimize
+ // 1/2(h+l2)*w^2+g*w+l1*abs(w)
+ float g_with_l1 = g;
+ // Apply L1 regularization.
+ // 1) Assume w>0 => w=-(g+l1)/(h+l2)=> g+l1 < 0 => g < -l1
+ // 2) Assume w<0 => w=-(g-l1)/(h+l2)=> g-l1 > 0 => g > l1
+ // For g from (-l1, l1), thus there is no solution => set to 0.
+ if (l1 > 0) {
+ if (g > l1) {
+ g_with_l1 -= l1;
+ } else if (g < -l1) {
+ g_with_l1 += l1;
+ } else {
+ *weight = 0.0;
+ *gain = 0.0;
+ return;
+ }
+ }
+ // Apply L2 regularization.
+ if (h + l2 <= kEps) {
+ // Avoid division by 0 or infinitesimal.
+ *weight = 0;
+ *gain = 0;
+ } else {
+ *weight = -g_with_l1 / (h + l2);
+ *gain = -g_with_l1 * (*weight);
+ }
+}
+
+} // namespace tensorflow
+
+#endif // TENSORFLOW_CORE_KERNELS_BOOSTED_TREES_TREE_HELPER_H_
diff --git a/tensorflow/core/kernels/concat_op.cc b/tensorflow/core/kernels/concat_op.cc
index a87b63f913..902327aaea 100644
--- a/tensorflow/core/kernels/concat_op.cc
+++ b/tensorflow/core/kernels/concat_op.cc
@@ -113,7 +113,7 @@ class ConcatBaseOp : public OpKernel {
int64 output_concat_dim = 0;
const bool input_is_scalar = IsLegacyScalar(input_shape);
for (int i = 0; i < N; ++i) {
- const auto in = values[i];
+ const auto& in = values[i];
const bool in_is_scalar = IsLegacyScalar(in.shape());
OP_REQUIRES(
c, in.dims() == input_dims || (input_is_scalar && in_is_scalar),
diff --git a/tensorflow/core/kernels/constant_op.cc b/tensorflow/core/kernels/constant_op.cc
index fe1a1ba5a3..a888422d49 100644
--- a/tensorflow/core/kernels/constant_op.cc
+++ b/tensorflow/core/kernels/constant_op.cc
@@ -297,7 +297,8 @@ class ZerosLikeOp : public OpKernel {
errors::InvalidArgument("ZerosLike non-scalar Tensor with "
"dtype=DT_VARIANT is not supported."));
const Variant& v = input.scalar<Variant>()();
- Tensor out(cpu_allocator(), DT_VARIANT, TensorShape({}));
+ Tensor out(ctx->device()->GetAllocator(AllocatorAttributes()), DT_VARIANT,
+ TensorShape({}));
Variant* out_v = &(out.scalar<Variant>()());
OP_REQUIRES_OK(ctx, UnaryOpVariant<Device>(
ctx, ZEROS_LIKE_VARIANT_UNARY_OP, v, out_v));
diff --git a/tensorflow/core/kernels/conv_ops_fused.cc b/tensorflow/core/kernels/conv_ops_fused.cc
index 1b40ad81f4..972100ba77 100644
--- a/tensorflow/core/kernels/conv_ops_fused.cc
+++ b/tensorflow/core/kernels/conv_ops_fused.cc
@@ -195,7 +195,7 @@ EIGEN_ALWAYS_INLINE PerCacheLineParameters<T1> CalculatePerCacheLineParameters(
const int64 bottom_y_index =
std::min(static_cast<int64>(std::ceil(in_y)), (st.in_height - 1));
// Lerp is used for bilinear filtering when that's needed.
- result.y_lerp = in_y - top_y_index;
+ result.y_lerp = static_cast<T1>(in_y - top_y_index);
// Which rows of the original input image to pull the values from.
result.input_top_row_start =
input_batch_start + (top_y_index * input_width * input_depth);
@@ -245,7 +245,7 @@ CalculatePerCachePixelParameters(int64 cache_x, int64 cache_start_x,
result.right_x_index =
std::min(static_cast<int64>(std::ceil(in_x)), (st.in_width - 1));
// This x_lerp is used to blend pixels in bilinear filtering.
- result.x_lerp = in_x - result.left_x_index;
+ result.x_lerp = static_cast<T1>(in_x - result.left_x_index);
return result;
}
@@ -465,8 +465,8 @@ class FusedResizeAndPadConvFunctor {
// for that operation are always present.
// Work out the parameters that remain constant across the
// row we're calculating.
- PerCacheLineParameters<float> line_params(
- CalculatePerCacheLineParameters<float>(
+ PerCacheLineParameters<T1> line_params(
+ CalculatePerCacheLineParameters<T1>(
task_params.cache_height, cache_y,
task_params.resize_cache,
task_params.cache_line_width, task_params.input_width,
@@ -881,7 +881,9 @@ class FusedResizeConv2DUsingGemmOp : public OpKernel {
BILINEAR>, \
true>);
+TF_CALL_half(REGISTER_FUSED);
TF_CALL_float(REGISTER_FUSED);
+TF_CALL_double(REGISTER_FUSED);
#define REGISTER_PAD_ONLY_FUSED(T) \
REGISTER_KERNEL_BUILDER( \
@@ -892,6 +894,8 @@ TF_CALL_float(REGISTER_FUSED);
NEAREST>, \
false>);
+TF_CALL_half(REGISTER_PAD_ONLY_FUSED);
TF_CALL_float(REGISTER_PAD_ONLY_FUSED);
+TF_CALL_double(REGISTER_PAD_ONLY_FUSED);
} // namespace tensorflow
diff --git a/tensorflow/core/kernels/conv_ops_test.cc b/tensorflow/core/kernels/conv_ops_test.cc
index 9acc725ba8..4f9a96ce17 100644
--- a/tensorflow/core/kernels/conv_ops_test.cc
+++ b/tensorflow/core/kernels/conv_ops_test.cc
@@ -88,14 +88,15 @@ TEST(ConvParameters, WinogradNonfusedAlgoSize) {
class FusedResizePadConvOpTest : public OpsTestBase {
protected:
- void HandwrittenConv() {
+ template <typename T>
+ void HandwrittenConv(DataType dtype) {
const int stride = 1;
TF_EXPECT_OK(NodeDefBuilder("fused_resize_op", "FusedResizeAndPadConv2D")
- .Input(FakeInput(DT_FLOAT))
+ .Input(FakeInput(dtype))
.Input(FakeInput(DT_INT32))
.Input(FakeInput(DT_INT32))
- .Input(FakeInput(DT_FLOAT))
- .Attr("T", DT_FLOAT)
+ .Input(FakeInput(dtype))
+ .Attr("T", dtype)
.Attr("resize_align_corners", false)
.Attr("mode", "REFLECT")
.Attr("strides", {1, stride, stride, 1})
@@ -110,9 +111,8 @@ class FusedResizePadConvOpTest : public OpsTestBase {
// | 1 | 2 | 3 | 4 |
// | 5 | 6 | 7 | 8 |
// | 9 | 10 | 11 | 12 |
- Tensor image(DT_FLOAT,
- {image_batch_count, image_height, image_width, depth});
- test::FillValues<float>(&image, {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12});
+ Tensor image(dtype, {image_batch_count, image_height, image_width, depth});
+ test::FillValues<T>(&image, {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12});
// The filter matrix is:
// | 1 | 4 | 7 |
@@ -120,8 +120,8 @@ class FusedResizePadConvOpTest : public OpsTestBase {
// | 3 | 6 | 9 |
const int filter_size = 3;
const int filter_count = 1;
- Tensor filter(DT_FLOAT, {filter_size, filter_size, depth, filter_count});
- test::FillValues<float>(&filter, {1, 4, 7, 2, 5, 8, 3, 6, 9});
+ Tensor filter(dtype, {filter_size, filter_size, depth, filter_count});
+ test::FillValues<T>(&filter, {1, 4, 7, 2, 5, 8, 3, 6, 9});
const int resized_width = image_width;
const int resized_height = image_height;
@@ -131,12 +131,12 @@ class FusedResizePadConvOpTest : public OpsTestBase {
const int left_padding = 0;
const int right_padding = 0;
- AddInputFromArray<float>(image.shape(), image.flat<float>());
+ AddInputFromArray<T>(image.shape(), image.flat<T>());
AddInputFromArray<int32>(TensorShape({2}), {resized_height, resized_width});
AddInputFromArray<int32>(
TensorShape({4, 2}),
{0, 0, top_padding, bottom_padding, left_padding, right_padding, 0, 0});
- AddInputFromArray<float>(filter.shape(), filter.flat<float>());
+ AddInputFromArray<T>(filter.shape(), filter.flat<T>());
TF_ASSERT_OK(RunOpKernel());
// We're sliding the 3x3 filter across the 3x4 image, with accesses outside
@@ -160,21 +160,22 @@ class FusedResizePadConvOpTest : public OpsTestBase {
// | 187 | 234 | 261 | 121 |
const int expected_width = image_width;
const int expected_height = image_height * filter_count;
- Tensor expected(DT_FLOAT, TensorShape({image_batch_count, expected_height,
- expected_width, filter_count}));
- test::FillValues<float>(
+ Tensor expected(dtype, TensorShape({image_batch_count, expected_height,
+ expected_width, filter_count}));
+ test::FillValues<T>(
&expected, {105, 150, 183, 95, 235, 312, 357, 178, 187, 234, 261, 121});
const Tensor& output = *GetOutput(0);
- test::ExpectTensorNear<float>(expected, output, 1e-5);
+ test::ExpectTensorNear<T>(expected, output, 1e-5);
}
+ template <typename T>
void CompareFusedAndSeparate(int input_width, int input_height,
int input_depth, int resize_width,
int resize_height, int y_padding, int x_padding,
int filter_size, int filter_count,
bool resize_align_corners,
const string& pad_mode, int stride,
- const string& padding) {
+ const string& padding, DataType dtype) {
auto root = tensorflow::Scope::NewRootScope();
using namespace ::tensorflow::ops; // NOLINT(build/namespaces)
@@ -183,29 +184,34 @@ class FusedResizePadConvOpTest : public OpsTestBase {
test::FillIota<float>(&input_data, 1.0f);
Output input =
Const(root.WithOpName("input"), Input::Initializer(input_data));
+ Output casted_input = Cast(root.WithOpName("casted_input"), input, dtype);
Tensor filter_data(DT_FLOAT, TensorShape({filter_size, filter_size,
input_depth, filter_count}));
test::FillIota<float>(&filter_data, 1.0f);
Output filter =
Const(root.WithOpName("filter"), Input::Initializer(filter_data));
+ Output casted_filter =
+ Cast(root.WithOpName("casted_filter"), filter, dtype);
Output resize_size =
Const(root.WithOpName("resize_size"), {resize_height, resize_width});
Output resize =
ResizeBilinear(root.WithOpName("resize"), input, resize_size,
ResizeBilinear::AlignCorners(resize_align_corners));
+ // Bilinear resize only output float, cast it to dtype to match the input.
+ Output casted_resize = Cast(root.WithOpName("cast"), resize, dtype);
Output paddings =
Const(root.WithOpName("paddings"),
{{0, 0}, {y_padding, y_padding}, {x_padding, x_padding}, {0, 0}});
- Output mirror_pad =
- MirrorPad(root.WithOpName("mirror_pad"), resize, paddings, pad_mode);
- Output conv = Conv2D(root.WithOpName("conv"), mirror_pad, filter,
+ Output mirror_pad = MirrorPad(root.WithOpName("mirror_pad"), casted_resize,
+ paddings, pad_mode);
+ Output conv = Conv2D(root.WithOpName("conv"), mirror_pad, casted_filter,
{1, stride, stride, 1}, padding);
Output fused_conv = FusedResizeAndPadConv2D(
- root.WithOpName("fused_conv"), input, resize_size, paddings, filter,
- pad_mode, {1, stride, stride, 1}, padding,
+ root.WithOpName("fused_conv"), casted_input, resize_size, paddings,
+ casted_filter, pad_mode, {1, stride, stride, 1}, padding,
FusedResizeAndPadConv2D::ResizeAlignCorners(resize_align_corners));
tensorflow::GraphDef graph;
@@ -221,14 +227,16 @@ class FusedResizePadConvOpTest : public OpsTestBase {
std::vector<Tensor> fused_tensors;
TF_ASSERT_OK(session->Run({}, {"fused_conv"}, {}, &fused_tensors));
- test::ExpectClose(unfused_tensors[0], fused_tensors[0]);
+ test::ExpectTensorNear<T>(unfused_tensors[0], fused_tensors[0], 1e-5);
}
+ template <typename T>
void CompareFusedPadOnlyAndSeparate(int input_width, int input_height,
int input_depth, int y_padding,
int x_padding, int filter_size,
int filter_count, const string& pad_mode,
- int stride, const string& padding) {
+ int stride, const string& padding,
+ DataType dtype) {
auto root = tensorflow::Scope::NewRootScope();
using namespace ::tensorflow::ops; // NOLINT(build/namespaces)
@@ -237,24 +245,27 @@ class FusedResizePadConvOpTest : public OpsTestBase {
test::FillIota<float>(&input_data, 1.0f);
Output input =
Const(root.WithOpName("input"), Input::Initializer(input_data));
+ Output casted_input = Cast(root.WithOpName("casted_input"), input, dtype);
Tensor filter_data(DT_FLOAT, TensorShape({filter_size, filter_size,
input_depth, filter_count}));
test::FillIota<float>(&filter_data, 1.0f);
Output filter =
Const(root.WithOpName("filter"), Input::Initializer(filter_data));
+ Output casted_filter =
+ Cast(root.WithOpName("casted_filter"), filter, dtype);
Output paddings =
Const(root.WithOpName("paddings"),
{{0, 0}, {y_padding, y_padding}, {x_padding, x_padding}, {0, 0}});
- Output mirror_pad =
- MirrorPad(root.WithOpName("mirror_pad"), input, paddings, pad_mode);
- Output conv = Conv2D(root.WithOpName("conv"), mirror_pad, filter,
+ Output mirror_pad = MirrorPad(root.WithOpName("mirror_pad"), casted_input,
+ paddings, pad_mode);
+ Output conv = Conv2D(root.WithOpName("conv"), mirror_pad, casted_filter,
{1, stride, stride, 1}, padding);
- Output fused_conv =
- FusedPadConv2D(root.WithOpName("fused_conv"), input, paddings, filter,
- pad_mode, {1, stride, stride, 1}, padding);
+ Output fused_conv = FusedPadConv2D(
+ root.WithOpName("fused_conv"), casted_input, paddings, casted_filter,
+ pad_mode, {1, stride, stride, 1}, padding);
tensorflow::GraphDef graph;
TF_ASSERT_OK(root.ToGraphDef(&graph));
@@ -269,95 +280,130 @@ class FusedResizePadConvOpTest : public OpsTestBase {
std::vector<Tensor> fused_tensors;
TF_ASSERT_OK(session->Run({}, {"fused_conv"}, {}, &fused_tensors));
- test::ExpectClose(unfused_tensors[0], fused_tensors[0]);
+ test::ExpectTensorNear<T>(unfused_tensors[0], fused_tensors[0], 1e-5);
}
};
-TEST_F(FusedResizePadConvOpTest, HandwrittenConv) { HandwrittenConv(); }
+TEST_F(FusedResizePadConvOpTest, HandwrittenConvHalf) {
+ HandwrittenConv<Eigen::half>(DT_HALF);
+}
-TEST_F(FusedResizePadConvOpTest, IdentityComparative) {
- CompareFusedAndSeparate(10, 10, 1, 10, 10, 0, 0, 1, 1, false, "REFLECT", 1,
- "SAME");
+TEST_F(FusedResizePadConvOpTest, HandwrittenConvFloat) {
+ HandwrittenConv<float>(DT_FLOAT);
+}
+
+TEST_F(FusedResizePadConvOpTest, HandwrittenConvDouble) {
+ HandwrittenConv<double>(DT_DOUBLE);
+}
+
+TEST_F(FusedResizePadConvOpTest, IdentityComparativeHalf) {
+ CompareFusedAndSeparate<Eigen::half>(10, 10, 1, 10, 10, 0, 0, 1, 1, false,
+ "REFLECT", 1, "SAME", DT_HALF);
+}
+
+TEST_F(FusedResizePadConvOpTest, IdentityComparativeFloat) {
+ CompareFusedAndSeparate<float>(10, 10, 1, 10, 10, 0, 0, 1, 1, false,
+ "REFLECT", 1, "SAME", DT_FLOAT);
+}
+
+TEST_F(FusedResizePadConvOpTest, IdentityComparativeDouble) {
+ CompareFusedAndSeparate<double>(10, 10, 1, 10, 10, 0, 0, 1, 1, false,
+ "REFLECT", 1, "SAME", DT_DOUBLE);
}
TEST_F(FusedResizePadConvOpTest, ConvOnlyComparative) {
- CompareFusedAndSeparate(10, 10, 3, 10, 10, 0, 0, 4, 4, false, "REFLECT", 1,
- "SAME");
+ CompareFusedAndSeparate<float>(10, 10, 3, 10, 10, 0, 0, 4, 4, false,
+ "REFLECT", 1, "SAME", DT_FLOAT);
}
TEST_F(FusedResizePadConvOpTest, ResizeOnlyComparative) {
- CompareFusedAndSeparate(10, 10, 1, 20, 20, 0, 0, 1, 1, false, "REFLECT", 1,
- "SAME");
+ CompareFusedAndSeparate<float>(10, 10, 1, 20, 20, 0, 0, 1, 1, false,
+ "REFLECT", 1, "SAME", DT_FLOAT);
}
TEST_F(FusedResizePadConvOpTest, ResizeAndConvComparative) {
- CompareFusedAndSeparate(2, 2, 4, 4, 2, 0, 0, 2, 2, false, "REFLECT", 1,
- "SAME");
+ CompareFusedAndSeparate<float>(2, 2, 4, 4, 2, 0, 0, 2, 2, false, "REFLECT", 1,
+ "SAME", DT_FLOAT);
}
TEST_F(FusedResizePadConvOpTest, ResizeAlignAndConvComparative) {
- CompareFusedAndSeparate(2, 2, 4, 4, 2, 0, 0, 2, 2, true, "REFLECT", 1,
- "SAME");
+ CompareFusedAndSeparate<float>(2, 2, 4, 4, 2, 0, 0, 2, 2, true, "REFLECT", 1,
+ "SAME", DT_FLOAT);
}
TEST_F(FusedResizePadConvOpTest, ResizeAndConvStridedComparative) {
- CompareFusedAndSeparate(2, 2, 4, 4, 2, 0, 0, 2, 2, false, "REFLECT", 2,
- "SAME");
+ CompareFusedAndSeparate<float>(2, 2, 4, 4, 2, 0, 0, 2, 2, false, "REFLECT", 2,
+ "SAME", DT_FLOAT);
}
TEST_F(FusedResizePadConvOpTest, ResizeAlignAndConvValidComparative) {
- CompareFusedAndSeparate(2, 2, 4, 4, 2, 0, 0, 2, 2, true, "REFLECT", 1,
- "VALID");
+ CompareFusedAndSeparate<float>(2, 2, 4, 4, 2, 0, 0, 2, 2, true, "REFLECT", 1,
+ "VALID", DT_FLOAT);
}
TEST_F(FusedResizePadConvOpTest, PadOnlyComparative) {
- CompareFusedAndSeparate(4, 4, 1, 4, 4, 2, 2, 1, 1, false, "REFLECT", 1,
- "SAME");
+ CompareFusedAndSeparate<float>(4, 4, 1, 4, 4, 2, 2, 1, 1, false, "REFLECT", 1,
+ "SAME", DT_FLOAT);
}
TEST_F(FusedResizePadConvOpTest, PadOnlyWithChannelsComparative) {
- CompareFusedAndSeparate(4, 4, 3, 4, 4, 2, 2, 1, 1, false, "REFLECT", 1,
- "SAME");
+ CompareFusedAndSeparate<float>(4, 4, 3, 4, 4, 2, 2, 1, 1, false, "REFLECT", 1,
+ "SAME", DT_FLOAT);
}
TEST_F(FusedResizePadConvOpTest, ResizeAndPadComparative) {
- CompareFusedAndSeparate(4, 4, 1, 6, 6, 2, 2, 1, 1, false, "REFLECT", 1,
- "SAME");
+ CompareFusedAndSeparate<float>(4, 4, 1, 6, 6, 2, 2, 1, 1, false, "REFLECT", 1,
+ "SAME", DT_FLOAT);
}
TEST_F(FusedResizePadConvOpTest, PadOnlySymmetricComparative) {
- CompareFusedAndSeparate(4, 4, 1, 4, 4, 2, 2, 1, 1, false, "SYMMETRIC", 1,
- "SAME");
+ CompareFusedAndSeparate<float>(4, 4, 1, 4, 4, 2, 2, 1, 1, false, "SYMMETRIC",
+ 1, "SAME", DT_FLOAT);
}
TEST_F(FusedResizePadConvOpTest, ResizeAndPadSymmetricComparative) {
- CompareFusedAndSeparate(4, 4, 3, 6, 6, 2, 2, 1, 1, false, "SYMMETRIC", 1,
- "SAME");
+ CompareFusedAndSeparate<float>(4, 4, 3, 6, 6, 2, 2, 1, 1, false, "SYMMETRIC",
+ 1, "SAME", DT_FLOAT);
+}
+
+TEST_F(FusedResizePadConvOpTest, ResizeAndPadSymmetricComparativeLarge) {
+ CompareFusedAndSeparate<float>(1000, 1000, 3, 1006, 1006, 2, 2, 1, 1, false,
+ "SYMMETRIC", 1, "SAME", DT_FLOAT);
}
-TEST_F(FusedResizePadConvOpTest, NoResizeIdentityComparative) {
- CompareFusedPadOnlyAndSeparate(10, 10, 1, 0, 0, 1, 1, "REFLECT", 1, "SAME");
+TEST_F(FusedResizePadConvOpTest, NoResizeIdentityComparativeHalf) {
+ CompareFusedPadOnlyAndSeparate<Eigen::half>(10, 10, 1, 0, 0, 1, 1, "REFLECT",
+ 1, "SAME", DT_HALF);
+}
+
+TEST_F(FusedResizePadConvOpTest, NoResizeIdentityComparativeFloat) {
+ CompareFusedPadOnlyAndSeparate<float>(10, 10, 1, 0, 0, 1, 1, "REFLECT", 1,
+ "SAME", DT_FLOAT);
+}
+
+TEST_F(FusedResizePadConvOpTest, NoResizeIdentityComparativeDouble) {
+ CompareFusedPadOnlyAndSeparate<double>(10, 10, 1, 0, 0, 1, 1, "REFLECT", 1,
+ "SAME", DT_DOUBLE);
}
TEST_F(FusedResizePadConvOpTest, NoResizeConvOnlyComparative) {
- CompareFusedPadOnlyAndSeparate(10, 10, 3, 0, 0, 4, 4, "REFLECT", 1, "SAME");
+ CompareFusedPadOnlyAndSeparate<float>(10, 10, 3, 0, 0, 4, 4, "REFLECT", 1,
+ "SAME", DT_FLOAT);
}
TEST_F(FusedResizePadConvOpTest, NoResizePadOnlyComparative) {
- CompareFusedPadOnlyAndSeparate(4, 4, 1, 2, 2, 1, 1, "REFLECT", 1, "SAME");
+ CompareFusedPadOnlyAndSeparate<float>(4, 4, 1, 2, 2, 1, 1, "REFLECT", 1,
+ "SAME", DT_FLOAT);
}
TEST_F(FusedResizePadConvOpTest, NoResizePadOnlyWithChannelsComparative) {
- CompareFusedPadOnlyAndSeparate(4, 4, 3, 2, 2, 1, 1, "REFLECT", 1, "SAME");
+ CompareFusedPadOnlyAndSeparate<float>(4, 4, 3, 2, 2, 1, 1, "REFLECT", 1,
+ "SAME", DT_FLOAT);
}
TEST_F(FusedResizePadConvOpTest, NoResizePadOnlySymmetricComparative) {
- CompareFusedPadOnlyAndSeparate(4, 4, 1, 2, 2, 1, 1, "SYMMETRIC", 1, "SAME");
-}
-
-TEST_F(FusedResizePadConvOpTest, ResizeAndPadSymmetricComparativeLarge) {
- CompareFusedAndSeparate(1000, 1000, 3, 1006, 1006, 2, 2, 1, 1, false,
- "SYMMETRIC", 1, "SAME");
+ CompareFusedPadOnlyAndSeparate<float>(4, 4, 1, 2, 2, 1, 1, "SYMMETRIC", 1,
+ "SAME", DT_FLOAT);
}
class ConvOpTest : public OpsTestBase {
diff --git a/tensorflow/core/kernels/ctc_loss_op.cc b/tensorflow/core/kernels/ctc_loss_op.cc
index b38d838bf1..fb375ee4b3 100644
--- a/tensorflow/core/kernels/ctc_loss_op.cc
+++ b/tensorflow/core/kernels/ctc_loss_op.cc
@@ -100,8 +100,10 @@ class CTCLossOp : public OpKernel {
TensorShape labels_shape({batch_size, max_label_len});
std::vector<int64> order{0, 1};
- sparse::SparseTensor labels_sp(*labels_indices, *labels_values,
- labels_shape, order);
+ sparse::SparseTensor labels_sp;
+ OP_REQUIRES_OK(
+ ctx, sparse::SparseTensor::Create(*labels_indices, *labels_values,
+ labels_shape, order, &labels_sp));
Status labels_sp_valid = labels_sp.IndicesValid();
OP_REQUIRES(ctx, labels_sp_valid.ok(),
diff --git a/tensorflow/core/kernels/data/BUILD b/tensorflow/core/kernels/data/BUILD
index 6d2a04aa25..e04fa20414 100644
--- a/tensorflow/core/kernels/data/BUILD
+++ b/tensorflow/core/kernels/data/BUILD
@@ -85,6 +85,19 @@ tf_kernel_library(
)
tf_kernel_library(
+ name = "window_dataset_op",
+ srcs = ["window_dataset_op.cc"],
+ deps = [
+ ":dataset",
+ ":window_dataset",
+ "//tensorflow/core:dataset_ops_op_lib",
+ "//tensorflow/core:framework",
+ "//tensorflow/core:lib",
+ "//tensorflow/core:lib_internal",
+ ],
+)
+
+tf_kernel_library(
name = "slide_dataset_op",
srcs = ["slide_dataset_op.cc"],
deps = [
@@ -550,15 +563,6 @@ tf_kernel_library(
)
tf_kernel_library(
- name = "identity_dataset_op",
- srcs = ["identity_dataset_op.cc"],
- deps = [
- ":dataset",
- "//tensorflow/core:framework",
- ],
-)
-
-tf_kernel_library(
name = "optimize_dataset_op",
srcs = ["optimize_dataset_op.cc"],
deps = [
@@ -606,7 +610,6 @@ tf_kernel_library(
":generator_dataset_op",
":group_by_reducer_dataset_op",
":group_by_window_dataset_op",
- ":identity_dataset_op",
":interleave_dataset_op",
":iterator_ops",
":map_and_batch_dataset_op",
@@ -634,6 +637,7 @@ tf_kernel_library(
":tensor_queue_dataset_op",
":tensor_slice_dataset_op",
":unbatch_dataset_op",
+ ":window_dataset_op",
":writer_ops",
":zip_dataset_op",
],
diff --git a/tensorflow/core/kernels/data/captured_function.cc b/tensorflow/core/kernels/data/captured_function.cc
index ee58341cfd..82da385405 100644
--- a/tensorflow/core/kernels/data/captured_function.cc
+++ b/tensorflow/core/kernels/data/captured_function.cc
@@ -214,6 +214,9 @@ Status CapturedFunction::Run(IteratorContext* ctx, std::vector<Tensor>&& args,
});
f_opts.step_container = &step_container;
f_opts.runner = ctx->runner();
+ if (ctx->lib()->device()->device_type() != DEVICE_CPU) {
+ f_opts.create_rendezvous = true;
+ }
// TODO(mrry): Add cancellation manager support to IteratorContext
// so that we can cancel running map functions. The local
// cancellation manager here is created so that we can run kernels
@@ -248,6 +251,9 @@ Status CapturedFunction::RunWithBorrowedArgs(IteratorContext* ctx,
});
f_opts.step_container = &step_container;
f_opts.runner = ctx->runner();
+ if (ctx->lib()->device()->device_type() != DEVICE_CPU) {
+ f_opts.create_rendezvous = true;
+ }
// TODO(mrry): Add cancellation manager support to IteratorContext
// so that we can cancel running map functions. The local
// cancellation manager here is created so that we can run kernels
@@ -304,6 +310,9 @@ Status CapturedFunction::RunInstantiated(const std::vector<Tensor>& args,
});
f_opts.step_container = &step_container;
f_opts.runner = runner;
+ if (lib->device()->device_type() != DEVICE_CPU) {
+ f_opts.create_rendezvous = true;
+ }
// TODO(mrry): Add cancellation manager support to IteratorContext
// so that we can cancel running map functions. The local
// cancellation manager here is created so that we can run kernels
@@ -351,6 +360,9 @@ void CapturedFunction::RunAsync(IteratorContext* ctx,
});
f_opts.step_container = step_container;
f_opts.runner = ctx->runner();
+ if (ctx->lib()->device()->device_type() != DEVICE_CPU) {
+ f_opts.create_rendezvous = true;
+ }
// TODO(mrry): Add cancellation manager support to IteratorContext
// so that we can cancel running map functions. The local
// cancellation manager here is created so that we can run kernels
diff --git a/tensorflow/core/kernels/data/dense_to_sparse_batch_dataset_op.cc b/tensorflow/core/kernels/data/dense_to_sparse_batch_dataset_op.cc
index 91b9279427..da4b14c8b9 100644
--- a/tensorflow/core/kernels/data/dense_to_sparse_batch_dataset_op.cc
+++ b/tensorflow/core/kernels/data/dense_to_sparse_batch_dataset_op.cc
@@ -101,8 +101,8 @@ class DenseToSparseBatchDatasetOp : public UnaryDatasetOpKernel {
}
const DataTypeVector& output_dtypes() const override {
- static DataTypeVector* output_dtypes_ = new DataTypeVector({DT_VARIANT});
- return *output_dtypes_;
+ static DataTypeVector* output_dtypes = new DataTypeVector({DT_VARIANT});
+ return *output_dtypes;
}
const std::vector<PartialTensorShape>& output_shapes() const override {
diff --git a/tensorflow/core/kernels/data/generator_dataset_op.cc b/tensorflow/core/kernels/data/generator_dataset_op.cc
index aae62ad2fe..0981e42ba1 100644
--- a/tensorflow/core/kernels/data/generator_dataset_op.cc
+++ b/tensorflow/core/kernels/data/generator_dataset_op.cc
@@ -197,6 +197,9 @@ class GeneratorDatasetOp : public DatasetOpKernel {
REGISTER_KERNEL_BUILDER(Name("GeneratorDataset").Device(DEVICE_CPU),
GeneratorDatasetOp);
+REGISTER_KERNEL_BUILDER(
+ Name("GeneratorDataset").Device(DEVICE_GPU).HostMemory("handle"),
+ GeneratorDatasetOp);
} // namespace
diff --git a/tensorflow/core/kernels/data/group_by_reducer_dataset_op.cc b/tensorflow/core/kernels/data/group_by_reducer_dataset_op.cc
index 03abae79d2..7206be8c0d 100644
--- a/tensorflow/core/kernels/data/group_by_reducer_dataset_op.cc
+++ b/tensorflow/core/kernels/data/group_by_reducer_dataset_op.cc
@@ -254,6 +254,7 @@ class GroupByReducerDatasetOp : public UnaryDatasetOpKernel {
TF_RETURN_IF_ERROR(
dataset()->captured_finalize_func_->RunWithBorrowedArgs(
ctx, states_[keys_[keys_index_++]], out_tensors));
+ *end_of_sequence = false;
return Status::OK();
}
diff --git a/tensorflow/core/kernels/data/identity_dataset_op.cc b/tensorflow/core/kernels/data/identity_dataset_op.cc
deleted file mode 100644
index e28f188336..0000000000
--- a/tensorflow/core/kernels/data/identity_dataset_op.cc
+++ /dev/null
@@ -1,102 +0,0 @@
-/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-==============================================================================*/
-#include <map>
-
-#include "tensorflow/core/framework/tensor.h"
-#include "tensorflow/core/kernels/data/dataset.h"
-
-namespace tensorflow {
-namespace {
-
-// The purpose of identity dataset is to serve as a placeholder when performing
-// optimizations. It is not expected to be surfaced in the Python API.
-class IdentityDatasetOp : public UnaryDatasetOpKernel {
- public:
- explicit IdentityDatasetOp(OpKernelConstruction* ctx)
- : UnaryDatasetOpKernel(ctx) {
- OP_REQUIRES_OK(ctx, ctx->GetAttr("output_types", &output_types_));
- OP_REQUIRES_OK(ctx, ctx->GetAttr("output_shapes", &output_shapes_));
- }
-
- protected:
- void MakeDataset(OpKernelContext* ctx, DatasetBase* input,
- DatasetBase** output) override {
- *output = new Dataset(ctx, input);
- }
-
- private:
- class Dataset : public GraphDatasetBase {
- public:
- Dataset(OpKernelContext* ctx, const DatasetBase* input)
- : GraphDatasetBase(ctx), input_(input) {
- input_->Ref();
- }
-
- ~Dataset() override { input_->Unref(); }
-
- std::unique_ptr<IteratorBase> MakeIteratorInternal(
- const string& prefix) const override {
- return std::unique_ptr<IteratorBase>(
- new Iterator({this, strings::StrCat(prefix, "::Identity")}));
- }
-
- const DataTypeVector& output_dtypes() const override {
- return input_->output_dtypes();
- }
-
- const std::vector<PartialTensorShape>& output_shapes() const override {
- return input_->output_shapes();
- }
-
- string DebugString() const override { return "IdentityDatasetOp::Dataset"; }
-
- protected:
- Status AsGraphDefInternal(OpKernelContext* ctx, DatasetGraphDefBuilder* b,
- Node** output) const override {
- Node* input_graph_node = nullptr;
- TF_RETURN_IF_ERROR(b->AddParentDataset(ctx, input_, &input_graph_node));
- TF_RETURN_IF_ERROR(b->AddDataset(this, {input_graph_node}, output));
- return Status::OK();
- }
-
- private:
- class Iterator : public DatasetIterator<Dataset> {
- public:
- explicit Iterator(const Params& params)
- : DatasetIterator<Dataset>(params) {}
-
- Status Initialize(IteratorContext* ctx) override {
- return errors::Unimplemented(strings::StrCat(prefix(), "::Initialize"));
- }
-
- Status GetNextInternal(IteratorContext* ctx,
- std::vector<Tensor>* out_tensors,
- bool* end_of_sequence) override {
- return errors::Unimplemented(
- strings::StrCat(prefix(), "::GetNextInternal"));
- }
- };
-
- const DatasetBase* const input_;
- };
-
- DataTypeVector output_types_;
- std::vector<PartialTensorShape> output_shapes_;
-};
-
-REGISTER_KERNEL_BUILDER(Name("IdentityDataset").Device(DEVICE_CPU),
- IdentityDatasetOp);
-} // namespace
-} // namespace tensorflow
diff --git a/tensorflow/core/kernels/data/iterator_ops.cc b/tensorflow/core/kernels/data/iterator_ops.cc
index b476a452a5..2a94a54f3d 100644
--- a/tensorflow/core/kernels/data/iterator_ops.cc
+++ b/tensorflow/core/kernels/data/iterator_ops.cc
@@ -686,30 +686,45 @@ class ToSingleElementOp : public AsyncOpKernel {
ctx,
dataset->MakeIterator(&iter_ctx, "SingleElementIterator", &iterator),
done);
+
+ // NOTE(jsimsa): We must destroy the iterator before calling `done()`, to
+ // avoid destruction races.
+ IteratorBase* raw_iterator = iterator.release();
+ auto cleanup = gtl::MakeCleanup([ctx, raw_iterator, done] {
+ delete raw_iterator;
+ done();
+ });
std::vector<Tensor> components;
components.reserve(dataset->output_dtypes().size());
- bool end_of_sequence;
-
- OP_REQUIRES_OK_ASYNC(
- ctx, iterator->GetNext(&iter_ctx, &components, &end_of_sequence),
- done);
- OP_REQUIRES_ASYNC(ctx, !end_of_sequence,
- errors::InvalidArgument("Dataset was empty."), done);
+ bool end_of_sequence = false;
+ Status s =
+ raw_iterator->GetNext(&iter_ctx, &components, &end_of_sequence);
+ if (!s.ok()) {
+ ctx->SetStatus(s);
+ return;
+ }
+ if (end_of_sequence) {
+ ctx->SetStatus(errors::InvalidArgument("Dataset was empty."));
+ return;
+ }
for (int i = 0; i < components.size(); ++i) {
// TODO(mrry): Check that the shapes match the shape attrs.
ctx->set_output(i, components[i]);
}
components.clear();
- OP_REQUIRES_OK_ASYNC(
- ctx, iterator->GetNext(&iter_ctx, &components, &end_of_sequence),
- done);
- OP_REQUIRES_ASYNC(
- ctx, end_of_sequence,
- errors::InvalidArgument("Dataset had more than one element."), done);
-
- done();
+ Status s2 =
+ raw_iterator->GetNext(&iter_ctx, &components, &end_of_sequence);
+ if (!s2.ok()) {
+ ctx->SetStatus(s2);
+ return;
+ }
+ if (!end_of_sequence) {
+ ctx->SetStatus(
+ errors::InvalidArgument("Dataset had more than one element."));
+ return;
+ }
});
}
@@ -1135,22 +1150,45 @@ class DeserializeIteratorOp : public OpKernel {
REGISTER_KERNEL_BUILDER(Name("Iterator").Device(DEVICE_CPU), IteratorHandleOp);
+REGISTER_KERNEL_BUILDER(Name("IteratorV2").Device(DEVICE_CPU),
+ IteratorHandleOp);
+REGISTER_KERNEL_BUILDER(Name("IteratorV2").Device(DEVICE_GPU),
+ IteratorHandleOp);
REGISTER_KERNEL_BUILDER(Name("MakeIterator").Device(DEVICE_CPU),
MakeIteratorOp);
+REGISTER_KERNEL_BUILDER(
+ Name("MakeIterator").Device(DEVICE_GPU).HostMemory("dataset"),
+ MakeIteratorOp);
REGISTER_KERNEL_BUILDER(Name("AnonymousIterator").Device(DEVICE_CPU),
AnonymousIteratorHandleOp);
+REGISTER_KERNEL_BUILDER(Name("AnonymousIterator").Device(DEVICE_GPU),
+ AnonymousIteratorHandleOp);
REGISTER_KERNEL_BUILDER(Name("DatasetToSingleElement").Device(DEVICE_CPU),
ToSingleElementOp);
REGISTER_KERNEL_BUILDER(Name("OneShotIterator").Device(DEVICE_CPU),
OneShotIteratorOp);
REGISTER_KERNEL_BUILDER(Name("IteratorGetNext").Device(DEVICE_CPU),
IteratorGetNextOp);
+REGISTER_KERNEL_BUILDER(Name("IteratorGetNext").Device(DEVICE_GPU),
+ IteratorGetNextOp);
REGISTER_KERNEL_BUILDER(Name("IteratorGetNextSync").Device(DEVICE_CPU),
IteratorGetNextSyncOp);
+REGISTER_KERNEL_BUILDER(Name("IteratorGetNextSync").Device(DEVICE_GPU),
+ IteratorGetNextSyncOp);
REGISTER_KERNEL_BUILDER(Name("IteratorToStringHandle").Device(DEVICE_CPU),
IteratorToStringHandleOp);
+REGISTER_KERNEL_BUILDER(Name("IteratorToStringHandle")
+ .Device(DEVICE_GPU)
+ .HostMemory("string_handle"),
+ IteratorToStringHandleOp);
REGISTER_KERNEL_BUILDER(Name("IteratorFromStringHandle").Device(DEVICE_CPU),
IteratorFromStringHandleOp);
+REGISTER_KERNEL_BUILDER(Name("IteratorFromStringHandleV2").Device(DEVICE_CPU),
+ IteratorFromStringHandleOp);
+REGISTER_KERNEL_BUILDER(Name("IteratorFromStringHandleV2")
+ .Device(DEVICE_GPU)
+ .HostMemory("string_handle"),
+ IteratorFromStringHandleOp);
REGISTER_KERNEL_BUILDER(Name("SerializeIterator").Device(DEVICE_CPU),
SerializeIteratorOp);
REGISTER_KERNEL_BUILDER(Name("DeserializeIterator").Device(DEVICE_CPU),
diff --git a/tensorflow/core/kernels/data/optimize_dataset_op.cc b/tensorflow/core/kernels/data/optimize_dataset_op.cc
index 8965858e8d..276f5f89c8 100644
--- a/tensorflow/core/kernels/data/optimize_dataset_op.cc
+++ b/tensorflow/core/kernels/data/optimize_dataset_op.cc
@@ -54,8 +54,8 @@ class OptimizeDatasetOp : public UnaryDatasetOpKernel {
ctx, ParseVectorArgument<string>(ctx, "optimizations", &optimizations));
Dataset* dataset =
new Dataset(ctx, input, optimizations, output_types_, output_shapes_);
- core::ScopedUnref unref(dataset);
- OP_REQUIRES_OK(ctx, dataset->Optimize(ctx, output));
+ OP_REQUIRES_OK(ctx, dataset->Optimize(ctx));
+ *output = dataset;
}
private:
@@ -73,7 +73,10 @@ class OptimizeDatasetOp : public UnaryDatasetOpKernel {
input_->Ref();
}
- ~Dataset() override { input_->Unref(); }
+ ~Dataset() override {
+ input_->Unref();
+ optimized_input_->Unref();
+ }
std::unique_ptr<IteratorBase> MakeIteratorInternal(
const string& prefix) const override {
@@ -81,7 +84,7 @@ class OptimizeDatasetOp : public UnaryDatasetOpKernel {
new Iterator({this, strings::StrCat(prefix, "::Optimize")}));
}
- Status Optimize(OpKernelContext* ctx, DatasetBase** output) {
+ Status Optimize(OpKernelContext* ctx) {
GraphDefBuilder b;
DatasetGraphDefBuilder db(&b);
Node* input_node = nullptr;
@@ -89,18 +92,20 @@ class OptimizeDatasetOp : public UnaryDatasetOpKernel {
string output_node = input_node->name();
GraphDef graph_def;
TF_RETURN_IF_ERROR(b.ToGraphDef(&graph_def));
+ VLOG(3) << "Before optimization: " << graph_def.DebugString();
TF_RETURN_IF_ERROR(ApplyOptimizations(ctx, &graph_def, &output_node));
-
+ VLOG(3) << "After optimization: " << graph_def.DebugString();
+ flib_def_.reset(new FunctionLibraryDefinition(OpRegistry::Global(),
+ graph_def.library()));
Graph graph(OpRegistry::Global());
TF_RETURN_IF_ERROR(ImportGraphDef({}, graph_def, &graph, nullptr));
std::vector<Tensor> outputs;
- GraphRunner graph_runner(ctx->env());
- // Once rewrites that add/modify functions are introduced, we will need
- // persist the results in a function library runtime.
+ GraphRunner graph_runner(ctx->function_library()->device());
TF_RETURN_IF_ERROR(graph_runner.Run(&graph, ctx->function_library(), {},
{output_node}, &outputs));
- TF_RETURN_IF_ERROR(GetDatasetFromVariantTensor(outputs[0], output));
- (*output)->Ref();
+ TF_RETURN_IF_ERROR(
+ GetDatasetFromVariantTensor(outputs[0], &optimized_input_));
+ optimized_input_->Ref();
return Status::OK();
}
@@ -113,6 +118,18 @@ class OptimizeDatasetOp : public UnaryDatasetOpKernel {
string DebugString() const override { return "OptimizeDatasetOp::Dataset"; }
+ protected:
+ Status AsGraphDefInternal(OpKernelContext* ctx, DatasetGraphDefBuilder* b,
+ Node** output) const override {
+ Node* input_graph_node = nullptr;
+ TF_RETURN_IF_ERROR(b->AddParentDataset(ctx, input_, &input_graph_node));
+ Node* optimizations_node = nullptr;
+ TF_RETURN_IF_ERROR(b->AddVector(optimizations_, &optimizations_node));
+ TF_RETURN_IF_ERROR(
+ b->AddDataset(this, {input_graph_node, optimizations_node}, output));
+ return Status::OK();
+ }
+
private:
class Iterator : public DatasetIterator<Dataset> {
public:
@@ -120,15 +137,38 @@ class OptimizeDatasetOp : public UnaryDatasetOpKernel {
: DatasetIterator<Dataset>(params) {}
Status Initialize(IteratorContext* ctx) override {
- return errors::Unimplemented(strings::StrCat(prefix(), "::Initialize"));
+ return dataset()->optimized_input_->MakeIterator(ctx, prefix(),
+ &input_impl_);
}
Status GetNextInternal(IteratorContext* ctx,
std::vector<Tensor>* out_tensors,
bool* end_of_sequence) override {
- return errors::Unimplemented(
- strings::StrCat(prefix(), "::GetNextInternal"));
+ IteratorContext::Params params;
+ params.env = ctx->env();
+ params.runner = *(ctx->runner());
+ params.stats_aggregator_getter = ctx->stats_aggregator_getter();
+ params.lib = ctx->lib();
+ params.function_library = dataset()->flib_def_;
+ params.allocator_getter = ctx->allocator_getter();
+ IteratorContext iter_ctx(params);
+ return input_impl_->GetNext(&iter_ctx, out_tensors, end_of_sequence);
+ }
+
+ protected:
+ Status SaveInternal(IteratorStateWriter* writer) override {
+ TF_RETURN_IF_ERROR(SaveParent(writer, input_impl_));
+ return Status::OK();
}
+
+ Status RestoreInternal(IteratorContext* ctx,
+ IteratorStateReader* reader) override {
+ TF_RETURN_IF_ERROR(RestoreParent(ctx, reader, input_impl_));
+ return Status::OK();
+ }
+
+ private:
+ std::unique_ptr<IteratorBase> input_impl_;
};
Status ApplyOptimizations(OpKernelContext* ctx, GraphDef* graph_def,
@@ -136,16 +176,8 @@ class OptimizeDatasetOp : public UnaryDatasetOpKernel {
// Add a fake sink node to allow rewriting the actual sink node.
NodeDef* node = graph_def->mutable_node()->Add();
node->set_name("FakeSink");
- node->set_op("IdentityDataset");
+ node->set_op("SinkDataset");
node->add_input(*output_node);
- {
- grappler::GraphView graph(graph_def);
- NodeDef* sink = graph.GetNode(*output_node);
- (*node->mutable_attr())["output_shapes"] =
- sink->attr().at("output_shapes");
- (*node->mutable_attr())["output_types"] =
- sink->attr().at("output_types");
- }
// Create metagraph.
MetaGraphDef meta_graph_def;
@@ -162,10 +194,10 @@ class OptimizeDatasetOp : public UnaryDatasetOpKernel {
for (const string& optimization : optimizations_) {
rewriter_config.add_optimizers(optimization);
}
- // If no optimizations were specified, supply a non-existent optimization
- // to prevent Grappler from applying the default set of optimizations as
- // some of them do not work out of the box at the moment (e.g. because we
- // have no cost model for dataset ops).
+ // If no optimizations were specified, supply a non-existent
+ // optimization to prevent Grappler from applying the default set of
+ // optimizations as some of them do not work out of the box at the
+ // moment (e.g. because we have no cost model for dataset ops).
if (optimizations_.empty()) {
rewriter_config.add_optimizers("non-existent");
}
@@ -178,6 +210,12 @@ class OptimizeDatasetOp : public UnaryDatasetOpKernel {
tensorflow::grappler::VirtualCluster cluster(device_map);
// Run optimizer.
+ if (VLOG_IS_ON(2)) {
+ LOG(INFO) << "Performing the following optimizations:";
+ for (const string& optimization : optimizations_) {
+ LOG(INFO) << " " << optimization;
+ }
+ }
TF_RETURN_IF_ERROR(tensorflow::grappler::RunMetaOptimizer(
*grappler_item, rewriter_config, ctx->device(), &cluster, graph_def));
@@ -192,6 +230,8 @@ class OptimizeDatasetOp : public UnaryDatasetOpKernel {
return Status::OK();
}
+ DatasetBase* optimized_input_;
+ std::shared_ptr<FunctionLibraryDefinition> flib_def_;
const DatasetBase* input_;
const std::vector<string> optimizations_;
const DataTypeVector output_types_;
diff --git a/tensorflow/core/kernels/data/prefetch_dataset_op.cc b/tensorflow/core/kernels/data/prefetch_dataset_op.cc
index 2bafb985ef..cc16108dce 100644
--- a/tensorflow/core/kernels/data/prefetch_dataset_op.cc
+++ b/tensorflow/core/kernels/data/prefetch_dataset_op.cc
@@ -357,7 +357,12 @@ class PrefetchDatasetOp : public UnaryDatasetOpKernel {
REGISTER_KERNEL_BUILDER(Name("PrefetchDataset").Device(DEVICE_CPU),
PrefetchDatasetOp);
-
+REGISTER_KERNEL_BUILDER(Name("PrefetchDataset")
+ .Device(DEVICE_GPU)
+ .HostMemory("buffer_size")
+ .HostMemory("input_dataset")
+ .HostMemory("handle"),
+ PrefetchDatasetOp);
} // namespace
} // namespace tensorflow
diff --git a/tensorflow/core/kernels/data/slide_dataset_op.cc b/tensorflow/core/kernels/data/slide_dataset_op.cc
index c17e9343ea..5765c61f30 100644
--- a/tensorflow/core/kernels/data/slide_dataset_op.cc
+++ b/tensorflow/core/kernels/data/slide_dataset_op.cc
@@ -12,6 +12,10 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
+
+#include <deque>
+#include <vector>
+
#include "tensorflow/core/framework/partial_tensor_shape.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/kernels/data/dataset.h"
@@ -33,37 +37,40 @@ class SlideDatasetOp : public UnaryDatasetOpKernel {
void MakeDataset(OpKernelContext* ctx, DatasetBase* input,
DatasetBase** output) override {
int64 window_size = 0;
- int64 stride = 0;
OP_REQUIRES_OK(
ctx, ParseScalarArgument<int64>(ctx, "window_size", &window_size));
- OP_REQUIRES_OK(ctx, ParseScalarArgument<int64>(ctx, "stride", &stride));
OP_REQUIRES(
ctx, window_size > 0,
errors::InvalidArgument("Window size must be greater than zero."));
+ int64 window_shift = 0;
+ OP_REQUIRES_OK(
+ ctx, ParseScalarArgument<int64>(ctx, "window_shift", &window_shift));
+ OP_REQUIRES(
+ ctx, window_shift > 0,
+ errors::InvalidArgument("Window shift must be greater than zero."));
+ int64 window_stride = 0;
+ OP_REQUIRES_OK(
+ ctx, ParseScalarArgument<int64>(ctx, "window_stride", &window_stride));
OP_REQUIRES(
- ctx, stride > 0,
- errors::InvalidArgument("Stride must be greater than zero."));
- if (stride == window_size) {
- LOG(WARNING) << "stride: " << stride
+ ctx, window_stride > 0,
+ errors::InvalidArgument("window_stride must be greater than zero."));
+ if (window_size == window_shift && window_stride == 1) {
+ LOG(WARNING) << "window_shift: " << window_shift
<< " is equal to window_size: " << window_size
- << ", to use `batch` instead.";
- } else if (stride > window_size) {
- LOG(WARNING) << "stride: " << stride
- << " is greater than window_size: " << window_size
- << ", you will lose some data.";
+ << " and window_stride is 1, use `batch` instead.";
}
-
- *output = new Dataset(ctx, window_size, stride, input);
+ *output = new Dataset(ctx, window_size, window_shift, window_stride, input);
}
private:
class Dataset : public GraphDatasetBase {
public:
- Dataset(OpKernelContext* ctx, int64 window_size, int64 stride,
- const DatasetBase* input)
+ Dataset(OpKernelContext* ctx, int64 window_size, int64 window_shift,
+ int64 window_stride, const DatasetBase* input)
: GraphDatasetBase(ctx),
window_size_(window_size),
- stride_(stride),
+ window_shift_(window_shift),
+ window_stride_(window_stride),
input_(input) {
input_->Ref();
@@ -92,8 +99,8 @@ class SlideDatasetOp : public UnaryDatasetOpKernel {
}
string DebugString() const override {
- return strings::StrCat("SlideDatasetOp(", window_size_, ", ", stride_,
- ")::Dataset");
+ return strings::StrCat("SlideDatasetOp(", window_size_, ", ",
+ window_shift_, ", ", window_stride_, ")::Dataset");
}
protected:
@@ -102,16 +109,18 @@ class SlideDatasetOp : public UnaryDatasetOpKernel {
Node* input_graph_node = nullptr;
TF_RETURN_IF_ERROR(b->AddParentDataset(ctx, input_, &input_graph_node));
Node* window_size = nullptr;
- Node* stride = nullptr;
+ Node* window_shift = nullptr;
+ Node* window_stride = nullptr;
TF_RETURN_IF_ERROR(b->AddScalar(window_size_, &window_size));
- TF_RETURN_IF_ERROR(b->AddScalar(stride_, &stride));
- TF_RETURN_IF_ERROR(
- b->AddDataset(this, {input_graph_node, window_size, stride}, output));
+ TF_RETURN_IF_ERROR(b->AddScalar(window_shift_, &window_shift));
+ TF_RETURN_IF_ERROR(b->AddScalar(window_stride_, &window_stride));
+ TF_RETURN_IF_ERROR(b->AddDataset(
+ this, {input_graph_node, window_size, window_shift, window_stride},
+ output));
return Status::OK();
}
private:
-
class Iterator : public DatasetIterator<Dataset> {
public:
explicit Iterator(const Params& params)
@@ -125,7 +134,8 @@ class SlideDatasetOp : public UnaryDatasetOpKernel {
std::vector<Tensor>* out_tensors,
bool* end_of_sequence) override {
const int64 window_size = dataset()->window_size_;
- const int64 stride = dataset()->stride_;
+ const int64 window_shift = dataset()->window_shift_;
+ const int64 window_stride = dataset()->window_stride_;
std::vector<std::vector<Tensor>> batch_elements;
{
mutex_lock l(mu_);
@@ -134,55 +144,51 @@ class SlideDatasetOp : public UnaryDatasetOpKernel {
return Status::OK();
}
batch_elements.reserve(window_size);
- // Use cache if stride < window_size.
- if (stride < window_size) {
- const bool first_call = cache_.empty();
- if (first_call) {
- cache_.reserve(window_size);
- } else {
- // Reuse cache in the previous iteration.
- cache_.swap(batch_elements);
- }
- }
- // Fill up with new elements.
+
+ // Fill up buffer.
+ size_t target_size = TargetBufferSize(window_size, window_stride);
*end_of_sequence = false;
- for (size_t i = batch_elements.size(); i < window_size && !*end_of_sequence;
- ++i) {
- std::vector<Tensor> batch_element_tuple;
- TF_RETURN_IF_ERROR(input_impl_->GetNext(ctx, &batch_element_tuple,
- end_of_sequence));
+ for (size_t i = buffer_.size(); i < target_size && !*end_of_sequence;
+ ++i) {
+ std::vector<Tensor> element;
+ TF_RETURN_IF_ERROR(
+ input_impl_->GetNext(ctx, &element, end_of_sequence));
if (!*end_of_sequence) {
- batch_elements.push_back(std::move(batch_element_tuple));
+ buffer_.push_back(std::move(element));
} else {
input_impl_.reset();
}
}
- // Drop the final smaller blocks.
- if (batch_elements.size() < window_size) {
+
+ // Drop the final smaller batch.
+ if (buffer_.size() < target_size) {
DCHECK(*end_of_sequence);
return Status::OK();
}
- if (stride < window_size) {
- // Cache the data used for the next iteration.
- for (size_t i = stride; i < window_size; ++i) {
- cache_.emplace_back(batch_elements[i]);
- }
- } else if (stride > window_size) {
- // Drop the data before the next iteration.
- std::vector<Tensor> batch_element_tuple;
- for (size_t i = window_size; i < stride && !*end_of_sequence; ++i) {
- TF_RETURN_IF_ERROR(input_impl_->GetNext(ctx, &batch_element_tuple,
- end_of_sequence));
- if (*end_of_sequence) {
+ for (size_t i = 0; i < window_size; ++i) {
+ batch_elements.emplace_back(buffer_[window_stride * i]);
+ }
+
+ // Drop the data before the next iteration.
+ if (window_shift >= buffer_.size()) {
+ for (size_t i = buffer_.size(); i < window_shift; ++i) {
+ bool end_of_input;
+ std::vector<Tensor> element;
+ TF_RETURN_IF_ERROR(
+ input_impl_->GetNext(ctx, &element, &end_of_input));
+ if (end_of_input) {
input_impl_.reset();
+ break;
}
}
+ buffer_.clear();
+ } else {
+ buffer_.erase(buffer_.begin(), buffer_.begin() + window_shift);
}
}
// Construct output tensors.
- // Those codes below are copied from batch_dataset_op.cc.
const size_t num_tuple_components = batch_elements[0].size();
const int64 num_batch_elements = batch_elements.size();
for (size_t component_index = 0; component_index < num_tuple_components;
@@ -224,15 +230,15 @@ class SlideDatasetOp : public UnaryDatasetOpKernel {
} else {
TF_RETURN_IF_ERROR(SaveParent(writer, input_impl_));
}
- // Save cache.
- TF_RETURN_IF_ERROR(
- writer->WriteScalar(strings::StrCat("cache_size"), cache_.size()));
- for (int64 i = 0; i < cache_.size(); i++) {
+ // Save buffer.
+ TF_RETURN_IF_ERROR(writer->WriteScalar(strings::StrCat("buffer_size"),
+ buffer_.size()));
+ for (int64 i = 0; i < buffer_.size(); i++) {
TF_RETURN_IF_ERROR(writer->WriteScalar(
- strings::StrCat("cache[", i, "]_size"), cache_[i].size()));
- for (int64 j = 0; j < cache_[i].size(); j++) {
+ strings::StrCat("buffer[", i, "]_size"), buffer_[i].size()));
+ for (int64 j = 0; j < buffer_[i].size(); j++) {
TF_RETURN_IF_ERROR(writer->WriteTensor(
- strings::StrCat("cache[", i, "][", j, "]"), cache_[i][j]));
+ strings::StrCat("buffer[", i, "][", j, "]"), buffer_[i][j]));
}
}
return Status::OK();
@@ -246,32 +252,37 @@ class SlideDatasetOp : public UnaryDatasetOpKernel {
} else {
input_impl_.reset();
}
- // Restore cache.
- int64 cache_size;
+ // Restore buffer.
+ int64 buffer_size;
TF_RETURN_IF_ERROR(
- reader->ReadScalar(strings::StrCat("cache_size"), &cache_size));
- cache_.resize(cache_size);
- for (int64 i = 0; i < cache_size; i++) {
+ reader->ReadScalar(strings::StrCat("buffer_size"), &buffer_size));
+ buffer_.resize(buffer_size);
+ for (int64 i = 0; i < buffer_size; i++) {
int64 vector_size;
TF_RETURN_IF_ERROR(reader->ReadScalar(
- strings::StrCat("cache[", i, "]_size"), &vector_size));
- cache_[i].resize(vector_size);
+ strings::StrCat("buffer[", i, "]_size"), &vector_size));
+ buffer_[i].resize(vector_size);
for (int64 j = 0; j < vector_size; j++) {
TF_RETURN_IF_ERROR(reader->ReadTensor(
- strings::StrCat("cache[", i, "][", j, "]"), &cache_[i][j]));
+ strings::StrCat("buffer[", i, "][", j, "]"), &buffer_[i][j]));
}
}
return Status::OK();
}
private:
+ size_t TargetBufferSize(int64 window_size, int64 window_stride) {
+ return (window_size - 1) * window_stride + 1;
+ }
+
mutex mu_;
- std::vector<std::vector<Tensor>> cache_ GUARDED_BY(mu_);
+ std::deque<std::vector<Tensor>> buffer_ GUARDED_BY(mu_);
std::unique_ptr<IteratorBase> input_impl_ GUARDED_BY(mu_);
};
const int64 window_size_;
- const int64 stride_;
+ const int64 window_shift_;
+ const int64 window_stride_;
const DatasetBase* const input_;
std::vector<PartialTensorShape> output_shapes_;
};
diff --git a/tensorflow/core/kernels/data/sparse_tensor_slice_dataset_op.cc b/tensorflow/core/kernels/data/sparse_tensor_slice_dataset_op.cc
index 2604822cc9..b5dff48d2d 100644
--- a/tensorflow/core/kernels/data/sparse_tensor_slice_dataset_op.cc
+++ b/tensorflow/core/kernels/data/sparse_tensor_slice_dataset_op.cc
@@ -252,10 +252,12 @@ class SparseTensorSliceDatasetOp : public DatasetOpKernel {
previous_batch_index = next_batch_index;
}
gtl::InlinedVector<int64, 8> std_order(dense_shape->NumElements(), 0);
- sparse::SparseTensor sparse_tensor(
- *indices, *values, TensorShape(dense_shape->vec<int64>()), std_order);
-
- *output = new Dataset<T>(ctx, sparse_tensor);
+ sparse::SparseTensor tensor;
+ OP_REQUIRES_OK(
+ ctx, sparse::SparseTensor::Create(
+ *indices, *values, TensorShape(dense_shape->vec<int64>()),
+ std_order, &tensor));
+ *output = new Dataset<T>(ctx, std::move(tensor));
}
private:
diff --git a/tensorflow/core/kernels/data/stats_aggregator_ops.cc b/tensorflow/core/kernels/data/stats_aggregator_ops.cc
index 33a56b2eb5..b133cfab54 100644
--- a/tensorflow/core/kernels/data/stats_aggregator_ops.cc
+++ b/tensorflow/core/kernels/data/stats_aggregator_ops.cc
@@ -20,11 +20,25 @@ limitations under the License.
#include "tensorflow/core/framework/resource_op_kernel.h"
#include "tensorflow/core/framework/summary.pb.h"
#include "tensorflow/core/lib/histogram/histogram.h"
+#include "tensorflow/core/lib/monitoring/counter.h"
+#include "tensorflow/core/lib/monitoring/gauge.h"
+#include "tensorflow/core/lib/monitoring/sampler.h"
#include "tensorflow/core/platform/macros.h"
namespace tensorflow {
namespace {
+static mutex* get_counters_map_lock() {
+ static mutex counters_map_lock(LINKER_INITIALIZED);
+ return &counters_map_lock;
+}
+
+static std::unordered_map<string, monitoring::Counter<1>*>* get_counters_map() {
+ static std::unordered_map<string, monitoring::Counter<1>*>* counters_map =
+ new std::unordered_map<string, monitoring::Counter<1>*>;
+ return counters_map;
+}
+
class StatsAggregatorImpl : public StatsAggregator {
public:
StatsAggregatorImpl() {}
@@ -61,6 +75,21 @@ class StatsAggregatorImpl : public StatsAggregator {
}
}
+ void IncrementCounter(const string& name, const string& label,
+ int64 val) override {
+ mutex_lock l(*get_counters_map_lock());
+ auto counters_map = get_counters_map();
+ if (counters_map->find(name) == counters_map->end()) {
+ counters_map->emplace(
+ name, monitoring::Counter<1>::New(
+ /*streamz name*/ "/tensorflow/" + name,
+ /*streamz description*/
+ name + " generated or consumed by the component.",
+ /*streamz label name*/ "component_descriptor"));
+ }
+ counters_map->at(name)->GetCell(label)->IncrementBy(val);
+ }
+
private:
mutex mu_;
std::unordered_map<string, histogram::Histogram> histograms_ GUARDED_BY(mu_);
diff --git a/tensorflow/core/kernels/data/stats_dataset_ops.cc b/tensorflow/core/kernels/data/stats_dataset_ops.cc
index 3e0a6ae049..a537e7e68f 100644
--- a/tensorflow/core/kernels/data/stats_dataset_ops.cc
+++ b/tensorflow/core/kernels/data/stats_dataset_ops.cc
@@ -316,10 +316,14 @@ class FeatureStatsDatasetOp : public UnaryDatasetOpKernel {
// changes to parse_example() where it returns stats as well.
for (int i = 0; i < record_t.size(); ++i) {
if (example.ParseFromString(record_t(i))) {
+ stats_aggregator->IncrementCounter("examples_count", "trainer",
+ 1);
AddStatsFeatures(example, stats_aggregator);
} else {
SequenceExample sequence_example;
if (sequence_example.ParseFromString(record_t(i))) {
+ stats_aggregator->IncrementCounter("sequence_examples_count",
+ "trainer", 1);
AddStatsFeatures(sequence_example, stats_aggregator);
}
}
@@ -360,8 +364,11 @@ class FeatureStatsDatasetOp : public UnaryDatasetOpKernel {
int feature_values_list_size_sum = 0;
for (const auto& feature : example.features().feature()) {
+ stats_aggregator->IncrementCounter("features_count", "trainer", 1);
feature_values_list_size_sum += AddStatsFeatureValues(feature.second);
}
+ stats_aggregator->IncrementCounter("feature_values_count", "trainer",
+ feature_values_list_size_sum);
stats_aggregator->AddToHistogram(
strings::StrCat(dataset()->tag_, ":feature-values"),
{static_cast<double>(feature_values_list_size_sum)});
@@ -378,16 +385,20 @@ class FeatureStatsDatasetOp : public UnaryDatasetOpKernel {
int feature_values_list_size_sum = 0;
for (const auto& feature : example.context().feature()) {
+ stats_aggregator->IncrementCounter("features_count", "trainer", 1);
feature_values_list_size_sum += AddStatsFeatureValues(feature.second);
}
for (const auto& feature_list :
example.feature_lists().feature_list()) {
+ stats_aggregator->IncrementCounter("feature_lists_count", "reainer",
+ 1);
for (const auto& feature : feature_list.second.feature()) {
feature_values_list_size_sum += AddStatsFeatureValues(feature);
}
}
-
+ stats_aggregator->IncrementCounter("feature_values_count", "trainer",
+ feature_values_list_size_sum);
stats_aggregator->AddToHistogram(
strings::StrCat(dataset()->tag_, ":feature-values"),
{static_cast<double>(feature_values_list_size_sum)});
diff --git a/tensorflow/core/kernels/data/window_dataset.cc b/tensorflow/core/kernels/data/window_dataset.cc
index 668b461374..17551bccd9 100644
--- a/tensorflow/core/kernels/data/window_dataset.cc
+++ b/tensorflow/core/kernels/data/window_dataset.cc
@@ -17,6 +17,7 @@ limitations under the License.
namespace tensorflow {
namespace {
+// TODO(b/110981596): Support checkpointing.
class WindowDataset : public DatasetBase {
public:
WindowDataset(std::vector<std::vector<Tensor>> elements,
diff --git a/tensorflow/core/kernels/data/window_dataset.h b/tensorflow/core/kernels/data/window_dataset.h
index 97c31668ac..7bd31a0bc7 100644
--- a/tensorflow/core/kernels/data/window_dataset.h
+++ b/tensorflow/core/kernels/data/window_dataset.h
@@ -31,7 +31,7 @@ namespace tensorflow {
//
// This dataset is constructed internally for use in datasets that
// build nested dataset expressions (e.g. the reducer function for
-// GroupByBatchDataset). It efficiently supports multiple iterators on
+// GroupByWindowDataset). It efficiently supports multiple iterators on
// the same window without recomputation.
//
// REQUIRES: `output_types` must match the types of the respective
diff --git a/tensorflow/core/kernels/data/window_dataset_op.cc b/tensorflow/core/kernels/data/window_dataset_op.cc
new file mode 100644
index 0000000000..0283e5697b
--- /dev/null
+++ b/tensorflow/core/kernels/data/window_dataset_op.cc
@@ -0,0 +1,196 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#include "tensorflow/core/framework/partial_tensor_shape.h"
+#include "tensorflow/core/framework/tensor.h"
+#include "tensorflow/core/kernels/data/dataset.h"
+#include "tensorflow/core/kernels/data/window_dataset.h"
+
+namespace tensorflow {
+
+namespace {
+
+// See documentation in ../ops/dataset_ops.cc for a high-level
+// description of the following op.
+
+class WindowDatasetOp : public UnaryDatasetOpKernel {
+ public:
+ explicit WindowDatasetOp(OpKernelConstruction* ctx)
+ : UnaryDatasetOpKernel(ctx) {}
+
+ void MakeDataset(OpKernelContext* ctx, DatasetBase* input,
+ DatasetBase** output) override {
+ int64 window_size = 0;
+ OP_REQUIRES_OK(
+ ctx, ParseScalarArgument<int64>(ctx, "window_size", &window_size));
+ OP_REQUIRES(
+ ctx, window_size > 0,
+ errors::InvalidArgument("Window size must be greater than zero."));
+
+ *output = new Dataset(ctx, window_size, input);
+ }
+
+ private:
+ class Dataset : public GraphDatasetBase {
+ public:
+ Dataset(OpKernelContext* ctx, int64 window_size, const DatasetBase* input)
+ : GraphDatasetBase(ctx), window_size_(window_size), input_(input) {
+ input_->Ref();
+ }
+
+ ~Dataset() override { input_->Unref(); }
+
+ std::unique_ptr<IteratorBase> MakeIteratorInternal(
+ const string& prefix) const override {
+ return std::unique_ptr<IteratorBase>(new Iterator(
+ Iterator::Params{this, strings::StrCat(prefix, "::Window")}));
+ }
+
+ const DataTypeVector& output_dtypes() const override {
+ static DataTypeVector* output_dtypes = new DataTypeVector({DT_VARIANT});
+ return *output_dtypes;
+ }
+
+ const std::vector<PartialTensorShape>& output_shapes() const override {
+ static std::vector<PartialTensorShape>* output_shapes =
+ new std::vector<PartialTensorShape>({TensorShape({})});
+ return *output_shapes;
+ }
+
+ string DebugString() const override {
+ return strings::StrCat("WindowDatasetOp(", window_size_, ")::Dataset");
+ }
+
+ protected:
+ Status AsGraphDefInternal(OpKernelContext* ctx, DatasetGraphDefBuilder* b,
+ Node** output) const override {
+ Node* input_graph_node = nullptr;
+ TF_RETURN_IF_ERROR(b->AddParentDataset(ctx, input_, &input_graph_node));
+ Node* window_size = nullptr;
+ TF_RETURN_IF_ERROR(b->AddScalar(window_size_, &window_size));
+ TF_RETURN_IF_ERROR(
+ b->AddDataset(this, {input_graph_node, window_size}, output));
+ return Status::OK();
+ }
+
+ private:
+ class Iterator : public DatasetIterator<Dataset> {
+ public:
+ explicit Iterator(const Params& params)
+ : DatasetIterator<Dataset>(params) {}
+
+ Status Initialize(IteratorContext* ctx) override {
+ return dataset()->input_->MakeIterator(ctx, prefix(), &input_impl_);
+ }
+
+ Status GetNextInternal(IteratorContext* ctx,
+ std::vector<Tensor>* out_tensors,
+ bool* end_of_sequence) override {
+ // Each row of `window_elements` is a tuple of tensors from the
+ // input iterator.
+ std::vector<std::vector<Tensor>> window_elements;
+ {
+ mutex_lock l(mu_);
+ if (!input_impl_) {
+ *end_of_sequence = true;
+ return Status::OK();
+ }
+ window_elements.reserve(dataset()->window_size_);
+ *end_of_sequence = false;
+ for (int i = 0; i < dataset()->window_size_ && !*end_of_sequence;
+ ++i) {
+ std::vector<Tensor> window_element_tuple;
+ TF_RETURN_IF_ERROR(input_impl_->GetNext(ctx, &window_element_tuple,
+ end_of_sequence));
+ if (!*end_of_sequence) {
+ window_elements.emplace_back(std::move(window_element_tuple));
+ } else {
+ input_impl_.reset();
+ }
+ }
+ }
+
+ if (window_elements.empty()) {
+ DCHECK(*end_of_sequence);
+ return Status::OK();
+ }
+
+ const size_t num_tuple_components = window_elements[0].size();
+ const int64 num_window_elements = window_elements.size();
+ for (size_t idx = 0; idx < num_tuple_components; ++idx) {
+ DatasetBase* window_dataset;
+ std::vector<std::vector<Tensor>> window_component_elements;
+ window_component_elements.reserve(num_window_elements);
+ // Build the output tuple component by copying one slice
+ // from each input element in the window.
+ for (size_t i = 0; i < num_window_elements; ++i) {
+ std::vector<Tensor> component_element;
+ component_element.push_back(std::move(window_elements[i][idx]));
+ window_component_elements.push_back(component_element);
+ }
+ DataTypeVector output_types(
+ {dataset()->input_->output_dtypes()[idx]});
+ std::vector<PartialTensorShape> output_shapes(
+ {dataset()->input_->output_shapes()[idx]});
+ TF_RETURN_IF_ERROR(NewWindowDataset(window_component_elements,
+ output_types, output_shapes,
+ &window_dataset));
+ out_tensors->emplace_back(DT_VARIANT, TensorShape({}));
+ TF_RETURN_IF_ERROR(StoreDatasetInVariantTensor(window_dataset,
+ &out_tensors->back()));
+ }
+ *end_of_sequence = false;
+ return Status::OK();
+ }
+
+ protected:
+ Status SaveInternal(IteratorStateWriter* writer) override {
+ mutex_lock l(mu_);
+ if (!input_impl_) {
+ TF_RETURN_IF_ERROR(
+ writer->WriteScalar(full_name("input_impl_empty"), ""));
+ } else {
+ TF_RETURN_IF_ERROR(SaveParent(writer, input_impl_));
+ }
+ return Status::OK();
+ }
+
+ Status RestoreInternal(IteratorContext* ctx,
+ IteratorStateReader* reader) override {
+ mutex_lock l(mu_);
+ if (!reader->Contains(full_name("input_impl_empty"))) {
+ TF_RETURN_IF_ERROR(RestoreParent(ctx, reader, input_impl_));
+ } else {
+ input_impl_.reset();
+ }
+ return Status::OK();
+ }
+
+ private:
+ mutex mu_;
+ std::unique_ptr<IteratorBase> input_impl_ GUARDED_BY(mu_);
+ };
+
+ const int64 window_size_;
+ const DatasetBase* const input_;
+ };
+};
+
+REGISTER_KERNEL_BUILDER(Name("WindowDataset").Device(DEVICE_CPU),
+ WindowDatasetOp);
+
+} // namespace
+
+} // namespace tensorflow
diff --git a/tensorflow/core/kernels/dense_update_ops.cc b/tensorflow/core/kernels/dense_update_ops.cc
index 0de97de205..f942b1a8a9 100644
--- a/tensorflow/core/kernels/dense_update_ops.cc
+++ b/tensorflow/core/kernels/dense_update_ops.cc
@@ -98,6 +98,8 @@ typedef Eigen::SyclDevice SYCLDevice;
TF_CALL_ALL_TYPES(REGISTER_KERNELS);
TF_CALL_QUANTIZED_TYPES(REGISTER_KERNELS);
+// quint16 not included in QUANTZIED_TYPES
+TF_CALL_quint16(REGISTER_KERNELS);
#undef REGISTER_KERNELS
#if GOOGLE_CUDA
diff --git a/tensorflow/core/kernels/deserialize_sparse_string_op.cc b/tensorflow/core/kernels/deserialize_sparse_string_op.cc
new file mode 100644
index 0000000000..2c13f24ad6
--- /dev/null
+++ b/tensorflow/core/kernels/deserialize_sparse_string_op.cc
@@ -0,0 +1,296 @@
+/* Copyright 2015 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#define EIGEN_USE_THREADS
+
+#include <algorithm>
+#include <numeric>
+#include <utility>
+#include <vector>
+
+#include "tensorflow/core/framework/op_kernel.h"
+#include "tensorflow/core/framework/register_types.h"
+#include "tensorflow/core/framework/tensor.h"
+#include "tensorflow/core/framework/tensor.pb.h"
+#include "tensorflow/core/framework/tensor_util.h"
+#include "tensorflow/core/framework/types.h"
+#include "tensorflow/core/framework/variant.h"
+#include "tensorflow/core/framework/variant_encode_decode.h"
+#include "tensorflow/core/kernels/reshape_util.h"
+#include "tensorflow/core/lib/gtl/inlined_vector.h"
+#include "tensorflow/core/lib/gtl/optional.h"
+#include "tensorflow/core/util/sparse/sparse_tensor.h"
+
+namespace tensorflow {
+
+namespace {
+
+using sparse::SparseTensor;
+
+class DeserializeSparseOp : public OpKernel {
+ public:
+ explicit DeserializeSparseOp(OpKernelConstruction* context)
+ : OpKernel(context) {
+ OP_REQUIRES_OK(context, context->GetAttr("dtype", &dtype_));
+ }
+
+ void Compute(OpKernelContext* context) override {
+ const Tensor& serialized_sparse = context->input(0);
+ const int ndims = serialized_sparse.shape().dims();
+
+ OP_REQUIRES(
+ context, ndims > 0,
+ errors::InvalidArgument("Serialized sparse should have non-zero rank ",
+ serialized_sparse.shape().DebugString()));
+
+ OP_REQUIRES(context, serialized_sparse.shape().dim_size(ndims - 1) == 3,
+ errors::InvalidArgument(
+ "Serialized sparse should have 3 as the last dimension ",
+ serialized_sparse.shape().DebugString()));
+
+ int num_sparse_tensors = 1;
+ for (int i = 0; i < ndims - 1; ++i) {
+ num_sparse_tensors *= serialized_sparse.shape().dim_size(i);
+ }
+
+ OP_REQUIRES(
+ context, num_sparse_tensors > 0,
+ errors::InvalidArgument(
+ "Serialized sparse should have at least 1 serialized tensor, "
+ "but has a zero dimension ",
+ serialized_sparse.shape().DebugString()));
+
+ if (num_sparse_tensors == 1 && ndims == 1) {
+ // Special case with a single sparse tensor. We can avoid data
+ // motion in the Concat and Reshape.
+ const auto& serialized_sparse_t = serialized_sparse.vec<string>();
+
+ Tensor output_indices;
+ Tensor output_values;
+ Tensor output_shape;
+ OP_REQUIRES_OK(context,
+ this->GetAndValidateSparseTensor(
+ serialized_sparse_t(0), serialized_sparse_t(1),
+ serialized_sparse_t(2), dtype_, 0 /* index */,
+ &output_indices, &output_values, &output_shape));
+ context->set_output(0, output_indices);
+ context->set_output(1, output_values);
+ context->set_output(2, output_shape);
+ return;
+ }
+
+ std::vector<Tensor> indices;
+ std::vector<Tensor> values;
+ TensorShape shape;
+ indices.reserve(num_sparse_tensors);
+ values.reserve(num_sparse_tensors);
+
+ const auto& serialized_sparse_t =
+ serialized_sparse.flat_inner_dims<string, 2>();
+ for (int i = 0; i < num_sparse_tensors; ++i) {
+ Tensor output_indices;
+ Tensor output_values;
+ Tensor output_shape;
+ OP_REQUIRES_OK(context,
+ this->GetAndValidateSparseTensor(
+ serialized_sparse_t(i, 0), serialized_sparse_t(i, 1),
+ serialized_sparse_t(i, 2), dtype_, i, &output_indices,
+ &output_values, &output_shape));
+ int64 num_entries = output_indices.dim_size(0);
+ int rank = output_indices.dim_size(1);
+
+ // Now we expand each SparseTensors' indices and shape by
+ // prefixing a dimension
+ Tensor expanded_indices(DT_INT64, TensorShape({num_entries, 1 + rank}));
+ const auto& output_indices_t = output_indices.matrix<int64>();
+ auto expanded_indices_t = expanded_indices.matrix<int64>();
+ expanded_indices_t.chip<1>(0).setZero();
+ if (rank > 0) {
+ Eigen::DSizes<Eigen::DenseIndex, 2> indices_start(0, 1);
+ Eigen::DSizes<Eigen::DenseIndex, 2> indices_sizes(num_entries, rank);
+ expanded_indices_t.slice(indices_start, indices_sizes) =
+ output_indices_t;
+ }
+ Tensor expanded_shape(DT_INT64, TensorShape({1 + rank}));
+ const auto& output_shape_t = output_shape.vec<int64>();
+ auto expanded_shape_t = expanded_shape.vec<int64>();
+ expanded_shape_t(0) = 1;
+ std::copy_n(&output_shape_t(0), rank, &expanded_shape_t(1));
+
+ TensorShape expanded_tensor_shape(expanded_shape.vec<int64>());
+
+ indices.push_back(expanded_indices);
+ values.push_back(output_values);
+ if (i == 0) {
+ shape = expanded_tensor_shape;
+ } else {
+ OP_REQUIRES(
+ context, shape.dims() == expanded_tensor_shape.dims(),
+ errors::InvalidArgument(
+ "Inconsistent shape across SparseTensors: rank prior to "
+ "SparseTensor[",
+ i, "] was: ", shape.dims() - 1, " but rank of SparseTensor[", i,
+ "] is: ", expanded_tensor_shape.dims() - 1));
+ for (int j = 1; j < shape.dims(); ++j) {
+ // NOTE(mrry): For compatibility with the implementations of
+ // DeserializeManySparse, and many ops that generate
+ // SparseTensors to batch that do not have a fixed
+ // dense_shape (e.g. `tf.parse_single_example()`), we
+ // compute the maximum in each dimension to find the
+ // smallest dense_shape that bounds all of the input
+ // SparseTensors.
+ shape.set_dim(j, std::max(shape.dim_size(j),
+ expanded_tensor_shape.dim_size(j)));
+ }
+ }
+ }
+
+ // Dimension 0 is the primary dimension.
+ int rank = shape.dims();
+ gtl::InlinedVector<int64, 8> std_order(rank);
+ std::iota(std_order.begin(), std_order.end(), 0);
+
+ std::vector<SparseTensor> tensors;
+ tensors.reserve(num_sparse_tensors);
+ for (int i = 0; i < num_sparse_tensors; ++i) {
+ SparseTensor tensor;
+ OP_REQUIRES_OK(context, SparseTensor::Create(indices[i], values[i], shape,
+ std_order, &tensor));
+ tensors.push_back(std::move(tensor));
+ }
+
+ gtl::optional<SparseTensor> maybe_output;
+#define HANDLE_TYPE(T) \
+ case DataTypeToEnum<T>::value: { \
+ maybe_output = SparseTensor::Concat<T>(tensors); \
+ break; \
+ }
+
+ switch (dtype_) {
+ TF_CALL_ALL_TYPES(HANDLE_TYPE);
+ TF_CALL_QUANTIZED_TYPES(HANDLE_TYPE);
+#undef HANDLE_TYPE
+ default:
+ OP_REQUIRES(context, false,
+ errors::Unimplemented(
+ "DeserializeSparse Unhandled data type: ", dtype_));
+ }
+ DCHECK(maybe_output);
+ SparseTensor& output = maybe_output.value();
+
+ // Compute the input shape for the reshape operation.
+ Tensor input_shape(DT_INT64, TensorShape({output.dims()}));
+ std::copy_n(output.shape().data(), output.dims(),
+ input_shape.vec<int64>().data());
+
+ // Compute the target shape for the reshape operation.
+ Tensor target_shape(DT_INT64, TensorShape({ndims + output.dims() - 2}));
+ for (int i = 0; i < ndims - 1; ++i) {
+ target_shape.vec<int64>()(i) = serialized_sparse.shape().dim_size(i);
+ }
+ for (int i = 0; i < output.dims() - 1; ++i) {
+ target_shape.vec<int64>()(i + ndims - 1) = output.shape().data()[i + 1];
+ }
+
+ Tensor output_indices;
+ Tensor output_shape;
+ Reshape(context, output.indices(), input_shape, target_shape,
+ 0 /* output indices index */, 2 /* output shape index */);
+ context->set_output(1, output.values());
+ }
+
+ private:
+ Status Deserialize(const string& serialized, Tensor* result) {
+ TensorProto proto;
+ if (!ParseProtoUnlimited(&proto, serialized)) {
+ return errors::InvalidArgument("Could not parse serialized proto");
+ }
+ Tensor tensor;
+ if (!tensor.FromProto(proto)) {
+ return errors::InvalidArgument("Could not construct tensor from proto");
+ }
+ *result = tensor;
+ return Status::OK();
+ }
+
+ Status GetAndValidateSparseTensor(
+ const string& serialized_indices, const string& serialized_values,
+ const string& serialized_shape, DataType values_dtype, int index,
+ Tensor* output_indices, Tensor* output_values, Tensor* output_shape) {
+ // Deserialize and validate the indices.
+ TF_RETURN_IF_ERROR(this->Deserialize(serialized_indices, output_indices));
+ if (!TensorShapeUtils::IsMatrix(output_indices->shape())) {
+ return errors::InvalidArgument(
+ "Expected serialized_sparse[", index,
+ ", 0] to represent an index matrix but received shape ",
+ output_indices->shape().DebugString());
+ }
+ int64 num_entries = output_indices->dim_size(0);
+ int rank = output_indices->dim_size(1);
+
+ // Deserialize and validate the values.
+ TF_RETURN_IF_ERROR(this->Deserialize(serialized_values, output_values));
+ if (!TensorShapeUtils::IsVector(output_values->shape())) {
+ return errors::InvalidArgument(
+ "Expected serialized_sparse[", index,
+ ", 1] to represent a values vector but received shape ",
+ output_values->shape().DebugString());
+ }
+ if (values_dtype != output_values->dtype()) {
+ return errors::InvalidArgument(
+ "Requested SparseTensor of type ", DataTypeString(values_dtype),
+ " but SparseTensor[", index,
+ "].values.dtype() == ", DataTypeString(output_values->dtype()));
+ }
+ if (num_entries != output_values->dim_size(0)) {
+ return errors::InvalidArgument(
+ "Expected row counts of SparseTensor[", index,
+ "].indices and SparseTensor[", index,
+ "].values to match but they do not: ", num_entries, " vs. ",
+ output_values->dim_size(0));
+ }
+
+ // Deserialize and validate the shape.
+ TF_RETURN_IF_ERROR(this->Deserialize(serialized_shape, output_shape));
+ if (!TensorShapeUtils::IsVector(output_shape->shape())) {
+ return errors::InvalidArgument(
+ "Expected serialized_sparse[", index,
+ ", 1] to be a shape vector but its shape is ",
+ output_shape->shape().DebugString());
+ }
+ if (rank != output_shape->dim_size(0)) {
+ return errors::InvalidArgument("Expected column counts of SparseTensor[",
+ index,
+ "].indices to match size of SparseTensor[",
+ index, "].shape but they do not: ", rank,
+ " vs. ", output_shape->dim_size(0));
+ }
+ return Status::OK();
+ }
+
+ DataType dtype_;
+};
+
+REGISTER_KERNEL_BUILDER(Name("DeserializeSparse")
+ .Device(DEVICE_CPU)
+ .TypeConstraint<string>("Tserialized"),
+ DeserializeSparseOp)
+
+REGISTER_KERNEL_BUILDER(Name("DeserializeManySparse").Device(DEVICE_CPU),
+ DeserializeSparseOp)
+
+} // namespace
+
+} // namespace tensorflow
diff --git a/tensorflow/core/kernels/deserialize_sparse_variant_op.cc b/tensorflow/core/kernels/deserialize_sparse_variant_op.cc
new file mode 100644
index 0000000000..fce3029e4e
--- /dev/null
+++ b/tensorflow/core/kernels/deserialize_sparse_variant_op.cc
@@ -0,0 +1,372 @@
+/* Copyright 2015 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#include "tensorflow/core/framework/op_kernel.h"
+#include "tensorflow/core/framework/register_types.h"
+#include "tensorflow/core/framework/tensor.h"
+#include "tensorflow/core/framework/types.h"
+#include "tensorflow/core/framework/variant.h"
+#include "tensorflow/core/framework/variant_encode_decode.h"
+#include "tensorflow/core/lib/gtl/inlined_vector.h"
+
+namespace tensorflow {
+
+namespace {
+
+class DeserializeSparseOp : public OpKernel {
+ public:
+ explicit DeserializeSparseOp(OpKernelConstruction* context)
+ : OpKernel(context) {
+ OP_REQUIRES_OK(context, context->GetAttr("dtype", &dtype_));
+ }
+
+ void Compute(OpKernelContext* context) override {
+ const Tensor& input = context->input(0);
+
+ OP_REQUIRES(
+ context, input.dims() > 0,
+ errors::InvalidArgument("Serialized sparse should have non-zero rank ",
+ input.shape().DebugString()));
+ OP_REQUIRES(context, input.shape().dim_size(input.dims() - 1) == 3,
+ errors::InvalidArgument(
+ "Serialized sparse should have 3 as the last dimension ",
+ input.shape().DebugString()));
+
+ // `input_dims_to_stack` is the number of dimensions that will be added to
+ // each of the elements before they are concatenated into the output.
+ const int64 input_dims_to_stack = input.dims() - 1;
+ int num_sparse_tensors = 1;
+ for (int i = 0; i < input_dims_to_stack; ++i) {
+ num_sparse_tensors *= input.shape().dim_size(i);
+ }
+
+ if (num_sparse_tensors == 1 && input_dims_to_stack == 0) {
+ // Special case with a single sparse tensor, and no dimensions to add
+ // to the output indices. We can return the boxed tensors directly (after
+ // validating them).
+ const Tensor* output_indices;
+ const Tensor* output_values;
+ const Tensor* output_shape;
+ const auto& input_as_vec = input.vec<Variant>();
+ int64 total_non_zeros;
+ OP_REQUIRES_OK(context, GetAndValidateSparseTensorShape(
+ input_as_vec(1), input_as_vec(2), 0,
+ &output_shape, &total_non_zeros));
+ OP_REQUIRES_OK(context, GetAndValidateSparseTensorIndicesAndValues(
+ input_as_vec(0), input_as_vec(1), 0,
+ output_shape->NumElements(), &output_indices,
+ &output_values));
+ context->set_output(0, *output_indices);
+ context->set_output(1, *output_values);
+ context->set_output(2, *output_shape);
+ return;
+ }
+
+ OP_REQUIRES(
+ context, num_sparse_tensors > 0,
+ errors::InvalidArgument(
+ "Serialized sparse should have at least 1 serialized tensor, "
+ "but has a zero dimension ",
+ input.shape().DebugString()));
+
+ const auto& input_as_matrix = input.flat_inner_dims<Variant, 2>();
+
+ // Compute the output "dense shape" of and number of non-zero elements in
+ // the stacked sparse tensors. Given an input of shape (S_0, ...,
+ // S_{input_dims_to_stack-1}, 3), and an element of dense shape (E_0, ...
+ // E_n), the output dense shape will be (S_0, ...,
+ // S_{input_dims_to_stack-1}, E_0, ..., E_n).
+ Tensor* output_shape;
+ int64 total_non_zeros = 0;
+
+ // Allocate and build the initial output shape based on the element shape of
+ // the 0th sparse tensor in the input.
+ //
+ // NOTE(mrry): We define `element_shape` as a `const Tensor*` rather than a
+ // `Tensor` to avoid the overhead of allocating and deallocating a `Tensor`
+ // on the stack. While the per-`Tensor` cost is small, this op can unbox a
+ // large number of tensors (3 per batch element) and these fixed overheads
+ // dominate when the number of non-zeros per element is small.
+ const Tensor* element_shape;
+ OP_REQUIRES_OK(context, GetAndValidateSparseTensorShape(
+ input_as_matrix(0, 1), input_as_matrix(0, 2), 0,
+ &element_shape, &total_non_zeros));
+ OP_REQUIRES_OK(context,
+ context->allocate_output(
+ 2, {input_dims_to_stack + element_shape->NumElements()},
+ &output_shape));
+ const auto element_shape_vec = element_shape->vec<int64>();
+ auto output_shape_vec = output_shape->vec<int64>();
+ output_shape_vec(0) = num_sparse_tensors;
+ for (int64 j = 0; j < input_dims_to_stack; ++j) {
+ output_shape_vec(j) = input.dim_size(j);
+ }
+ for (int64 j = 0; j < element_shape->NumElements(); ++j) {
+ output_shape_vec(j + input_dims_to_stack) = element_shape_vec(j);
+ }
+
+ // Accumulate the number of non-zero elements from the remaining sparse
+ // tensors, and validate that they have compatible dense shapes.
+ //
+ // NOTE(mrry): For compatibility with the implementations of
+ // DeserializeManySparse, and many ops that generate SparseTensors to batch
+ // that do not have a fixed dense_shape (e.g. `tf.parse_single_example()`),
+ // we compute the maximum in each dimension to find the smallest dense_shape
+ // that bounds all of the input SparseTensors.
+ for (int i = 1; i < num_sparse_tensors; ++i) {
+ int64 num_non_zeros;
+ OP_REQUIRES_OK(context, GetAndValidateSparseTensorShape(
+ input_as_matrix(i, 1), input_as_matrix(i, 2),
+ i, &element_shape, &num_non_zeros));
+ total_non_zeros += num_non_zeros;
+ OP_REQUIRES(
+ context,
+ output_shape->NumElements() - input_dims_to_stack ==
+ element_shape->NumElements(),
+ errors::InvalidArgument(
+ "Inconsistent shape across SparseTensors: rank prior to "
+ "SparseTensor[",
+ i, "] was: ", output_shape->NumElements() - input_dims_to_stack,
+ " but rank of SparseTensor[", i,
+ "] is: ", element_shape->NumElements()));
+ const auto element_shape_vec = element_shape->vec<int64>();
+ for (int j = 0; j < element_shape->NumElements(); ++j) {
+ output_shape_vec(j + input_dims_to_stack) = std::max(
+ output_shape_vec(j + input_dims_to_stack), element_shape_vec(j));
+ }
+ }
+
+ // Compute the output "indices" matrix and "values" vector.
+ Tensor* output_indices;
+ Tensor* output_values;
+
+ const int output_rank = output_shape->NumElements();
+ OP_REQUIRES_OK(context,
+ context->allocate_output(
+ 0, {static_cast<int64>(total_non_zeros), output_rank},
+ &output_indices));
+ OP_REQUIRES_OK(
+ context, context->allocate_output(
+ 1, {static_cast<int64>(total_non_zeros)}, &output_values));
+
+ // The bulk of the work in this method involves building the output indices
+ // in a tight loop. For cache friendliness, we generate the indices in the
+ // order that they will be laid out in memory. We use raw pointers instead
+ // of Eigen element/slice indexing methods, to access the underlying index
+ // buffer to minimize the amount of work in that tight loop.
+ int64* output_indices_data = output_indices->matrix<int64>().data();
+ size_t current_row = 0;
+
+ for (int i = 0; i < num_sparse_tensors; ++i) {
+ const Tensor* element_indices;
+ const Tensor* element_values;
+ OP_REQUIRES_OK(context, this->GetAndValidateSparseTensorIndicesAndValues(
+ input_as_matrix(i, 0), input_as_matrix(i, 1),
+ i, output_rank - input_dims_to_stack,
+ &element_indices, &element_values));
+
+ const size_t num_index_rows = element_values->NumElements();
+
+ // An empty sparse tensor in the input will generate no data
+ // in the output. We short-circuit the rest of the iteration to avoid
+ // triggering assertions in the Eigen when manipulating empty tensors (or
+ // slices of tensors).
+ if (num_index_rows == 0) continue;
+
+ const size_t start_row = current_row;
+ const size_t next_start_row = current_row + num_index_rows;
+
+ // NOTE(mrry): If the element is a scalar SparseTensor,
+ // `element_indices` will be an empty tensor, and this pointer will not
+ // be valid. However, we will not dereference the pointer in that case,
+ // because `input_dims_to_stack == output_rank`.
+ const int64* element_indices_data =
+ element_indices->matrix<int64>().data();
+
+ // Build the submatrix of `output_indices` for the i^th sparse tensor
+ // in the input.
+ //
+ // Each row of `output_indices` comprises `input_dims_to_stack` indices
+ // based on the position of the i^th sparse tensor in the input tensor,
+ // followed by the indices from the corresponding row in
+ // `element_indices`.
+ if (input_dims_to_stack == 1 && output_rank == 2) {
+ // We specialize this case because the compiler can generate
+ // more efficient code when the number of indices for each element is
+ // known statically. Since the most common use of this op is to
+ // serialize batches of SparseTensors, and the most common source of
+ // SparseTensors is the `tf.parse_single_example()` op, which generates
+ // 1-D SparseTensors, we statically unroll the loop for the rank 2
+ // output case.
+ for (; current_row < next_start_row; ++current_row) {
+ *output_indices_data++ = i;
+ *output_indices_data++ = *element_indices_data++;
+ }
+ } else {
+ // `sparse_tensor_index` is the tuple of indices that correspond to
+ // mapping the flat element index (`i`) back onto the stacked
+ // coordinates implied by the position of the i^th sparse tensor in the
+ // input tensor.
+ //
+ // We build `sparse_tensor_index` in reverse (innermost/minor dimension
+ // to outermost/major dimension). The `cumulative_product` represents
+ // the size of the inner subtensor for which `sparse_tensor_index` has
+ // already been built.
+ gtl::InlinedVector<int64, 4> sparse_tensor_index(input_dims_to_stack);
+ int cumulative_product = 1;
+ for (size_t j = 0; j < sparse_tensor_index.size(); ++j) {
+ size_t reverse_index = sparse_tensor_index.size() - j - 1;
+ sparse_tensor_index[reverse_index] =
+ (i / cumulative_product) % input.dim_size(reverse_index);
+ cumulative_product *= input.dim_size(reverse_index);
+ }
+ for (; current_row < next_start_row; ++current_row) {
+ for (int64 sparse_tensor_index_component : sparse_tensor_index) {
+ *output_indices_data++ = sparse_tensor_index_component;
+ }
+ for (size_t k = input_dims_to_stack; k < output_rank; ++k) {
+ *output_indices_data++ = *element_indices_data++;
+ }
+ }
+ }
+
+ // Build the subvector of `output_values` for the i^th sparse tensor
+ // in the input.
+ //
+ // NOTE(mrry): There is a potential optimization here where we use a T*
+ // to represent the current position in `output_values`, but it would
+ // require some rejigging of the template parameters.
+ // NOTE(mrry): Another potential optimization: if we know that this
+ // operation consumes its input, we could std::move non-primitive elements
+ // into the output and avoid a copy.
+ Eigen::DSizes<Eigen::DenseIndex, 1> values_start(start_row);
+ Eigen::DSizes<Eigen::DenseIndex, 1> values_sizes(num_index_rows);
+
+#define HANDLE_TYPE(T) \
+ case DataTypeToEnum<T>::value: { \
+ output_values->vec<T>().slice(values_start, values_sizes) = \
+ element_values->vec<T>(); \
+ break; \
+ }
+ switch (dtype_) {
+ TF_CALL_ALL_TYPES(HANDLE_TYPE);
+ TF_CALL_QUANTIZED_TYPES(HANDLE_TYPE);
+#undef HANDLE_TYPE
+ default:
+ OP_REQUIRES_OK(
+ context, errors::Unimplemented(
+ "DeserializeSparse Unhandled data type: ", dtype_));
+ }
+ }
+ }
+
+ private:
+ Status GetAndValidateSparseTensorShape(const Variant& serialized_values,
+ const Variant& serialized_shape,
+ int index, const Tensor** output_shape,
+ int64* output_num_non_zeros) {
+ // Deserialize and validate the shape.
+ *output_shape = serialized_shape.get<Tensor>();
+ if (*output_shape == nullptr) {
+ return errors::InvalidArgument(
+ "Could not get a tensor from serialized_sparse[", index, ", 2]");
+ }
+ if ((*output_shape)->dtype() != DT_INT64) {
+ return errors::InvalidArgument(
+ "Expected serialized_sparse[", index,
+ ", 2] to be a vector of DT_INT64 but received dtype ",
+ DataTypeString((*output_shape)->dtype()));
+ }
+ if (!TensorShapeUtils::IsVector((*output_shape)->shape())) {
+ return errors::InvalidArgument(
+ "Expected serialized_sparse[", index,
+ ", 2] to be a shape vector but its shape is ",
+ (*output_shape)->shape().DebugString());
+ }
+ *output_num_non_zeros = serialized_values.get<Tensor>()->NumElements();
+ return Status::OK();
+ }
+
+ Status GetAndValidateSparseTensorIndicesAndValues(
+ const Variant& serialized_indices, const Variant& serialized_values,
+ int index, int expected_rank, const Tensor** output_indices,
+ const Tensor** output_values) {
+ // Deserialize and validate the indices.
+ *output_indices = serialized_indices.get<Tensor>();
+ if (*output_indices == nullptr) {
+ return errors::InvalidArgument(
+ "Could not get a tensor from serialized_sparse[", index, ", 0]");
+ }
+ if ((*output_indices)->dtype() != DT_INT64) {
+ return errors::InvalidArgument(
+ "Expected serialized_sparse[", index,
+ ", 0] to be a matrix of DT_INT64 but received dtype ",
+ DataTypeString((*output_indices)->dtype()));
+ }
+ if (!TensorShapeUtils::IsMatrix((*output_indices)->shape())) {
+ return errors::InvalidArgument(
+ "Expected serialized_sparse[", index,
+ ", 0] to represent an index matrix but received shape ",
+ (*output_indices)->shape().DebugString());
+ }
+ int64 num_entries = (*output_indices)->dim_size(0);
+ int rank = (*output_indices)->dim_size(1);
+ if (rank != expected_rank) {
+ return errors::InvalidArgument(
+ "Expected column counts of SparseTensor[", index,
+ "].indices to match size of SparseTensor[", index,
+ "].shape but they do not: ", rank, " vs. ", expected_rank);
+ }
+
+ // Deserialize and validate the values.
+ *output_values = serialized_values.get<Tensor>();
+ if (*output_values == nullptr) {
+ return errors::InvalidArgument(
+ "Could not get a tensor from serialized_sparse[", index, ", 1]");
+ }
+ if (!TensorShapeUtils::IsVector((*output_values)->shape())) {
+ return errors::InvalidArgument(
+ "Expected serialized_sparse[", index,
+ ", 1] to represent a values vector but received shape ",
+ (*output_values)->shape().DebugString());
+ }
+ if (dtype_ != (*output_values)->dtype()) {
+ return errors::InvalidArgument(
+ "Requested SparseTensor of type ", DataTypeString(dtype_),
+ " but SparseTensor[", index,
+ "].values.dtype() == ", DataTypeString((*output_values)->dtype()));
+ }
+ if (num_entries != (*output_values)->dim_size(0)) {
+ return errors::InvalidArgument(
+ "Expected row counts of SparseTensor[", index,
+ "].indices and SparseTensor[", index,
+ "].values to match but they do not: ", num_entries, " vs. ",
+ (*output_values)->dim_size(0));
+ }
+
+ return Status::OK();
+ }
+
+ DataType dtype_;
+};
+
+REGISTER_KERNEL_BUILDER(Name("DeserializeSparse")
+ .Device(DEVICE_CPU)
+ .TypeConstraint<Variant>("Tserialized"),
+ DeserializeSparseOp)
+
+} // namespace
+
+} // namespace tensorflow
diff --git a/tensorflow/core/kernels/edit_distance_op.cc b/tensorflow/core/kernels/edit_distance_op.cc
index 20d857c721..4aecdc9e41 100644
--- a/tensorflow/core/kernels/edit_distance_op.cc
+++ b/tensorflow/core/kernels/edit_distance_op.cc
@@ -133,10 +133,15 @@ class EditDistanceOp : public OpKernel {
std::vector<int64> sorted_order(truth_st_shape.dims());
std::iota(sorted_order.begin(), sorted_order.end(), 0);
- sparse::SparseTensor hypothesis(*hypothesis_indices, *hypothesis_values,
- hypothesis_st_shape, sorted_order);
- sparse::SparseTensor truth(*truth_indices, *truth_values, truth_st_shape,
- sorted_order);
+ sparse::SparseTensor hypothesis;
+ OP_REQUIRES_OK(ctx, sparse::SparseTensor::Create(
+ *hypothesis_indices, *hypothesis_values,
+ hypothesis_st_shape, sorted_order, &hypothesis));
+
+ sparse::SparseTensor truth;
+ OP_REQUIRES_OK(ctx, sparse::SparseTensor::Create(
+ *truth_indices, *truth_values, truth_st_shape,
+ sorted_order, &truth));
// Group dims 0, 1, ..., RANK - 1. The very last dim is assumed
// to store the variable length sequences.
diff --git a/tensorflow/core/kernels/fifo_queue.cc b/tensorflow/core/kernels/fifo_queue.cc
index a23478af5b..d6e859f1aa 100644
--- a/tensorflow/core/kernels/fifo_queue.cc
+++ b/tensorflow/core/kernels/fifo_queue.cc
@@ -366,4 +366,19 @@ Status FIFOQueue::MatchesNodeDef(const NodeDef& node_def) {
return Status::OK();
}
+// Defines a FIFOQueueOp, which produces a Queue (specifically, one
+// backed by FIFOQueue) that persists across different graph
+// executions, and sessions. Running this op produces a single-element
+// tensor of handles to Queues in the corresponding device.
+FIFOQueueOp::FIFOQueueOp(OpKernelConstruction* context)
+ : TypedQueueOp(context) {
+ OP_REQUIRES_OK(context, context->GetAttr("shapes", &component_shapes_));
+}
+
+Status FIFOQueueOp::CreateResource(QueueInterface** ret) {
+ FIFOQueue* queue = new FIFOQueue(capacity_, component_types_,
+ component_shapes_, cinfo_.name());
+ return CreateTypedQueue(queue, ret);
+}
+
} // namespace tensorflow
diff --git a/tensorflow/core/kernels/fifo_queue.h b/tensorflow/core/kernels/fifo_queue.h
index f01d70924d..697ee81c39 100644
--- a/tensorflow/core/kernels/fifo_queue.h
+++ b/tensorflow/core/kernels/fifo_queue.h
@@ -13,8 +13,8 @@ See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
-#ifndef TENSORFLOW_KERNELS_FIFO_QUEUE_H_
-#define TENSORFLOW_KERNELS_FIFO_QUEUE_H_
+#ifndef TENSORFLOW_CORE_KERNELS_FIFO_QUEUE_H_
+#define TENSORFLOW_CORE_KERNELS_FIFO_QUEUE_H_
#include <deque>
#include <vector>
@@ -23,6 +23,7 @@ limitations under the License.
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/types.h"
+#include "tensorflow/core/kernels/queue_op.h"
#include "tensorflow/core/kernels/typed_queue.h"
#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/platform/mutex.h"
@@ -69,6 +70,22 @@ class FIFOQueue : public TypedQueue<std::deque<PersistentTensor> > {
TF_DISALLOW_COPY_AND_ASSIGN(FIFOQueue);
};
+// Defines a FIFOQueueOp, which produces a Queue (specifically, one
+// backed by FIFOQueue) that persists across different graph
+// executions, and sessions. Running this op produces a single-element
+// tensor of handles to Queues in the corresponding device.
+class FIFOQueueOp : public TypedQueueOp {
+ public:
+ explicit FIFOQueueOp(OpKernelConstruction* context);
+
+ private:
+ Status CreateResource(QueueInterface** ret) override
+ EXCLUSIVE_LOCKS_REQUIRED(mu_);
+
+ std::vector<TensorShape> component_shapes_;
+ TF_DISALLOW_COPY_AND_ASSIGN(FIFOQueueOp);
+};
+
} // namespace tensorflow
-#endif // TENSORFLOW_KERNELS_FIFO_QUEUE_H_
+#endif // TENSORFLOW_CORE_KERNELS_FIFO_QUEUE_H_
diff --git a/tensorflow/core/kernels/fifo_queue_op.cc b/tensorflow/core/kernels/fifo_queue_op.cc
index b35bdbb2f0..80869768f1 100644
--- a/tensorflow/core/kernels/fifo_queue_op.cc
+++ b/tensorflow/core/kernels/fifo_queue_op.cc
@@ -13,50 +13,11 @@ See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
-// See docs in ../ops/data_flow_ops.cc.
-
-#include <deque>
-#include <vector>
-
#include "tensorflow/core/framework/op_kernel.h"
-#include "tensorflow/core/framework/resource_mgr.h"
-#include "tensorflow/core/framework/tensor.h"
-#include "tensorflow/core/framework/tensor_shape.h"
-#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/kernels/fifo_queue.h"
-#include "tensorflow/core/kernels/queue_base.h"
-#include "tensorflow/core/kernels/queue_op.h"
-#include "tensorflow/core/lib/core/errors.h"
-#include "tensorflow/core/platform/logging.h"
-#include "tensorflow/core/platform/macros.h"
-#include "tensorflow/core/platform/mutex.h"
-#include "tensorflow/core/platform/thread_annotations.h"
-#include "tensorflow/core/platform/types.h"
namespace tensorflow {
-// Defines a FIFOQueueOp, which produces a Queue (specifically, one
-// backed by FIFOQueue) that persists across different graph
-// executions, and sessions. Running this op produces a single-element
-// tensor of handles to Queues in the corresponding device.
-class FIFOQueueOp : public TypedQueueOp {
- public:
- explicit FIFOQueueOp(OpKernelConstruction* context) : TypedQueueOp(context) {
- OP_REQUIRES_OK(context, context->GetAttr("shapes", &component_shapes_));
- }
-
- private:
- Status CreateResource(QueueInterface** ret) override
- EXCLUSIVE_LOCKS_REQUIRED(mu_) {
- FIFOQueue* queue = new FIFOQueue(capacity_, component_types_,
- component_shapes_, cinfo_.name());
- return CreateTypedQueue(queue, ret);
- }
-
- std::vector<TensorShape> component_shapes_;
- TF_DISALLOW_COPY_AND_ASSIGN(FIFOQueueOp);
-};
-
REGISTER_KERNEL_BUILDER(Name("FIFOQueue").Device(DEVICE_CPU), FIFOQueueOp);
REGISTER_KERNEL_BUILDER(Name("FIFOQueueV2").Device(DEVICE_CPU), FIFOQueueOp);
diff --git a/tensorflow/core/kernels/function_ops.cc b/tensorflow/core/kernels/function_ops.cc
index f2724735bf..d5c33c0188 100644
--- a/tensorflow/core/kernels/function_ops.cc
+++ b/tensorflow/core/kernels/function_ops.cc
@@ -135,6 +135,12 @@ REGISTER_KERNEL_BUILDER(Name(kArgOp)
.TypeConstraint<ResourceHandle>("T"),
ArgOp);
+REGISTER_KERNEL_BUILDER(Name(kArgOp)
+ .Device(DEVICE_GPU)
+ .HostMemory("output")
+ .TypeConstraint<string>("T"),
+ ArgOp);
+
#define REGISTER(type) \
REGISTER_KERNEL_BUILDER( \
Name(kRetOp).Device(DEVICE_GPU).TypeConstraint<type>("T"), RetvalOp);
@@ -149,6 +155,12 @@ REGISTER_KERNEL_BUILDER(Name(kRetOp)
.TypeConstraint<ResourceHandle>("T")
.HostMemory("input"),
RetvalOp);
+
+REGISTER_KERNEL_BUILDER(Name(kRetOp)
+ .Device(DEVICE_GPU)
+ .TypeConstraint<string>("T")
+ .HostMemory("input"),
+ RetvalOp);
#undef REGISTER
class PassOn : public OpKernel {
@@ -297,20 +309,28 @@ class RemoteCallOp : public AsyncOpKernel {
explicit RemoteCallOp(OpKernelConstruction* ctx) : AsyncOpKernel(ctx) {
OP_REQUIRES_OK(ctx,
ctx->GetAttr(FunctionLibraryDefinition::kFuncAttr, &func_));
+ OP_REQUIRES_OK(ctx, ctx->GetAttr("Tin", &input_dtypes_));
+ OP_REQUIRES_OK(ctx, ctx->GetAttr("Tout", &output_dtypes_));
}
~RemoteCallOp() override {}
void ComputeAsync(OpKernelContext* ctx, DoneCallback done) override {
- const Tensor* target;
- OP_REQUIRES_OK_ASYNC(ctx, ctx->input("target", &target), done);
- const string& target_device =
- DeviceNameUtils::CanonicalizeDeviceName(target->scalar<string>()());
-
FunctionLibraryRuntime* lib = ctx->function_library();
OP_REQUIRES_ASYNC(ctx, lib != nullptr,
errors::Internal("No function library is provided."),
done);
+
+ const string& source_device = lib->device()->name();
+ const Tensor* target;
+ OP_REQUIRES_OK_ASYNC(ctx, ctx->input("target", &target), done);
+ string target_device;
+ OP_REQUIRES_OK_ASYNC(
+ ctx,
+ DeviceNameUtils::CanonicalizeDeviceName(target->scalar<string>()(),
+ source_device, &target_device),
+ done);
+
AttrValueMap attr_values = func_.attr();
FunctionLibraryRuntime::InstantiateOptions instantiate_opts;
instantiate_opts.target = target_device;
@@ -345,7 +365,7 @@ class RemoteCallOp : public AsyncOpKernel {
FunctionLibraryRuntime::Options opts;
opts.step_id = ctx->step_id();
opts.runner = ctx->runner();
- opts.source_device = lib->device()->name();
+ opts.source_device = source_device;
if (opts.source_device != target_device) {
opts.remote_execution = true;
}
@@ -355,6 +375,20 @@ class RemoteCallOp : public AsyncOpKernel {
for (const Tensor& argument : arguments) {
args.push_back(argument);
}
+ for (const auto& dtype : input_dtypes_) {
+ AllocatorAttributes arg_alloc_attrs;
+ if (DataTypeAlwaysOnHost(dtype)) {
+ arg_alloc_attrs.set_on_host(true);
+ }
+ opts.args_alloc_attrs.push_back(arg_alloc_attrs);
+ }
+ for (const auto& dtype : output_dtypes_) {
+ AllocatorAttributes ret_alloc_attrs;
+ if (DataTypeAlwaysOnHost(dtype)) {
+ ret_alloc_attrs.set_on_host(true);
+ }
+ opts.rets_alloc_attrs.push_back(ret_alloc_attrs);
+ }
auto* rets = new std::vector<Tensor>;
auto* activity = new tracing::ScopedActivity(strings::StrCat(
"RemoteCall: Run: ", func_.name(), " on ", target_device));
@@ -377,6 +411,8 @@ class RemoteCallOp : public AsyncOpKernel {
private:
NameAttrList func_;
+ DataTypeVector input_dtypes_;
+ DataTypeVector output_dtypes_;
mutex mu_;
typedef std::pair<string, FunctionLibraryRuntime*> FunctionTarget;
diff --git a/tensorflow/core/kernels/initializable_lookup_table.h b/tensorflow/core/kernels/initializable_lookup_table.h
index 990cbceac2..b4f81d9a70 100644
--- a/tensorflow/core/kernels/initializable_lookup_table.h
+++ b/tensorflow/core/kernels/initializable_lookup_table.h
@@ -51,7 +51,7 @@ class InitializableLookupTable : public LookupInterface {
"Insert not supported by InitializableLookupTable implementations");
}
- Status ExportValues(OpKernelContext* context) {
+ Status ExportValues(OpKernelContext* context) override {
return errors::Unimplemented(
"ExportValues not supported by InitializableLookupTable "
"implementations");
diff --git a/tensorflow/core/kernels/mkl_aggregate_ops.cc b/tensorflow/core/kernels/mkl_aggregate_ops.cc
index 4ad858e4a9..3d04aeeb3e 100644
--- a/tensorflow/core/kernels/mkl_aggregate_ops.cc
+++ b/tensorflow/core/kernels/mkl_aggregate_ops.cc
@@ -445,11 +445,10 @@ class MklAddNOp : public OpKernel {
// atleast one input is in MKL format, we choose output descriptor for
// reorder.
std::vector<primitive::at> inputs;
- std::vector<primitive> net;
// Check if actual input format of the tensor is different than common_pd
// we told MKLDNN. In that case, we will need reorder.
- src1.CheckReorderToOpMem(srcs_pd[0], &net);
- src2.CheckReorderToOpMem(srcs_pd[1], &net);
+ src1.CheckReorderToOpMem(srcs_pd[0]);
+ src2.CheckReorderToOpMem(srcs_pd[1]);
inputs.push_back(src1.GetOpMem());
inputs.push_back(src2.GetOpMem());
@@ -482,6 +481,7 @@ class MklAddNOp : public OpKernel {
dst.SetUsrMemDataHandle(dst_tensor);
// Create Sum op, and submit net for execution.
+ std::vector<primitive> net;
net.push_back(sum(sum_pd, inputs, dst.GetOpMem()));
stream(stream::kind::eager).submit(net).wait();
} catch (mkldnn::error& e) {
diff --git a/tensorflow/core/kernels/mkl_concat_op.cc b/tensorflow/core/kernels/mkl_concat_op.cc
index 31d1b949ef..6f490cdc23 100644
--- a/tensorflow/core/kernels/mkl_concat_op.cc
+++ b/tensorflow/core/kernels/mkl_concat_op.cc
@@ -704,14 +704,14 @@ class MklConcatOp : public OpKernel {
if (input_tensors[k].NumElements() == 0)
continue;
- auto src_dims = TFShapeToMklDnnDims(
- mkl_input_shapes[k].GetTfShape());
auto src_md = mkl_input_shapes[k].GetMklLayout();
srcs[k].SetUsrMem(src_md, &input_tensors[k]);
- if (src_md.data.format != mkl_common_format)
+ if (src_md.data.format != mkl_common_format) {
+ memory::dims src_dims(src_md.data.dims, &src_md.data.dims[src_md.data.ndims]);
src_md = memory::desc(src_dims, MklDnnType<T>(),
mkl_common_format);
+ }
srcs_pd.push_back(memory::primitive_desc(src_md, cpu_engine));
}
@@ -756,11 +756,10 @@ class MklConcatOp : public OpKernel {
}
std::vector<primitive::at> inputs;
- std::vector<primitive> net;
if (isMklReorderNeeded) {
for (int k = 0; k < input_tensors.size(); k++) {
if (input_tensors[k].NumElements() > 0) {
- srcs[k].CheckReorderToOpMem(srcs_pd[k], &net);
+ srcs[k].CheckReorderToOpMem(srcs_pd[k]);
}
}
}
@@ -806,6 +805,7 @@ class MklConcatOp : public OpKernel {
dst.SetUsrMem(dst_md, dst_tensor);
auto concat_op = concat(concat_pd, inputs, dst.GetOpMem());
+ std::vector<primitive> net;
net.push_back(concat_op);
stream(stream::kind::eager).submit(net).wait();
} catch (mkldnn::error& e) {
diff --git a/tensorflow/core/kernels/mkl_conv_grad_filter_ops.cc b/tensorflow/core/kernels/mkl_conv_grad_filter_ops.cc
index 356eed8b67..4e80f5acce 100644
--- a/tensorflow/core/kernels/mkl_conv_grad_filter_ops.cc
+++ b/tensorflow/core/kernels/mkl_conv_grad_filter_ops.cc
@@ -54,9 +54,311 @@ using mkldnn::stream;
#include "tensorflow/core/util/mkl_util.h"
namespace tensorflow {
-
typedef Eigen::ThreadPoolDevice CPUDevice;
+#ifndef INTEL_MKL_ML
+
+struct MklConvBwdFilterParams {
+ memory::dims src_dims;
+ memory::dims diff_filter_dims;
+ memory::dims diff_bias_dims;
+ memory::dims diff_dst_dims;
+ memory::dims strides;
+ memory::dims dilations;
+ memory::dims padding_left;
+ memory::dims padding_right;
+ padding_kind padding;
+
+ MklConvBwdFilterParams(memory::dims src_dims,
+ memory::dims diff_filter_dims, memory::dims diff_bias_dims,
+ memory::dims diff_dst_dims, memory::dims strides,
+ memory::dims dilations, memory::dims padding_left,
+ memory::dims padding_right, padding_kind padding) :
+ src_dims(src_dims), diff_filter_dims(diff_filter_dims),
+ diff_bias_dims(diff_bias_dims), diff_dst_dims(diff_dst_dims),
+ strides(strides), dilations(dilations),
+ padding_left(padding_left), padding_right(padding_right),
+ padding(padding) {
+ }
+};
+
+template <typename T>
+class MklConv2DBwdFilterPrimitive : public MklPrimitive {
+ public:
+ explicit MklConv2DBwdFilterPrimitive(
+ const MklConvBwdFilterParams& convBwdFilterDims) :
+ cpu_engine_(engine::cpu, 0) {
+ context_.bwd_filter_stream.reset(new stream(stream::kind::eager));
+ // create conv primitive
+ if (context_.conv_bwd_filter == nullptr) {
+ Setup(convBwdFilterDims);
+ }
+ }
+
+ ~MklConv2DBwdFilterPrimitive() {}
+
+ // Convolution backward weights with bias
+ // src_data: input data buffer of src
+ // diff_filter_data: output data buffer of diff_filter
+ // diff_bias_data: output data buffer of diff_bias
+ // diff_dst_data: input data buffer of diff_dst
+ void Execute(const T* src_data, const T* diff_filter_data,
+ const T* diff_bias_data, const T* diff_dst_data) {
+ context_.src_mem->set_data_handle(
+ static_cast<void*>(const_cast<T*>(src_data)));
+ context_.diff_filter_mem->set_data_handle(
+ static_cast<void*>(const_cast<T*>(diff_filter_data)));
+ context_.diff_bias_mem->set_data_handle(
+ static_cast<void*>(const_cast<T*>(diff_bias_data)));
+ context_.diff_dst_mem->set_data_handle(
+ static_cast<void*>(const_cast<T*>(diff_dst_data)));
+
+ context_.bwd_filter_stream->submit(context_.bwd_filter_primitives);
+
+ context_.src_mem->set_data_handle(DummyData);
+ context_.diff_filter_mem->set_data_handle(DummyData);
+ context_.diff_bias_mem->set_data_handle(DummyData);
+ context_.diff_dst_mem->set_data_handle(DummyData);
+ return;
+ }
+
+ // Convolution backward weights without bias
+ // src_data: input data buffer of src
+ // diff_filter_data: output data buffer of diff_filter
+ // diff_dst_data: input data buffer of diff_dst
+ void Execute(const T* src_data,
+ const T* diff_filter_data, const T* diff_dst_data) {
+ context_.src_mem->set_data_handle(
+ static_cast<void*>(const_cast<T*>(src_data)));
+ context_.diff_filter_mem->set_data_handle(
+ static_cast<void*>(const_cast<T*>(diff_filter_data)));
+ context_.diff_dst_mem->set_data_handle(
+ static_cast<void*>(const_cast<T*>(diff_dst_data)));
+
+ context_.bwd_filter_stream->submit(context_.bwd_filter_primitives);
+
+ context_.src_mem->set_data_handle(DummyData);
+ context_.diff_filter_mem->set_data_handle(DummyData);
+ context_.diff_dst_mem->set_data_handle(DummyData);
+ return;
+ }
+
+ memory::format GetSrcMemoryFormat() const {
+ return context_.src_fmt;
+ }
+
+ memory::format GetDiffDstMemoryFormat() const {
+ return context_.diff_dst_fmt;
+ }
+
+ memory::format GetDiffFilterMemoryFormat() const {
+ return context_.diff_filter_fmt;
+ }
+
+ // convolution primitive
+ std::shared_ptr<mkldnn::convolution_backward_weights::primitive_desc>
+ GetPrimitiveDesc() const {
+ return context_.bwd_filter_pd;
+ }
+
+ private:
+ // Primitive reuse context for Conv2D bwd filter op
+ struct ConvBwdFilterContext {
+ // expected memory format for this primitive instance
+ memory::format src_fmt;
+ memory::format diff_dst_fmt;
+ memory::format diff_filter_fmt;
+
+ // convolution bwd input primitive
+ std::shared_ptr<mkldnn::convolution_backward_weights::primitive_desc>
+ bwd_filter_pd;
+ std::shared_ptr<mkldnn::primitive> conv_bwd_filter;
+
+ // MKLDNN memory
+ std::shared_ptr<mkldnn::memory> src_mem;
+ std::shared_ptr<mkldnn::memory> diff_filter_mem;
+ std::shared_ptr<mkldnn::memory> diff_bias_mem;
+ std::shared_ptr<mkldnn::memory> diff_dst_mem;
+
+ // desc & prmitive desc
+ std::shared_ptr<mkldnn::convolution_backward_weights::desc> bwd_filter_desc;
+ std::shared_ptr<mkldnn::convolution_forward::desc> fwd_desc;
+ std::shared_ptr<mkldnn::convolution_forward::primitive_desc> fwd_pd;
+
+ // memory desc: forward & backward can share same memory desc
+ std::shared_ptr<mkldnn::memory::desc> src_md;
+ std::shared_ptr<mkldnn::memory::desc> diff_filter_md;
+ std::shared_ptr<mkldnn::memory::desc> diff_bias_md;
+ std::shared_ptr<mkldnn::memory::desc> diff_dst_md;
+
+ // MKL pipeline
+ std::shared_ptr<mkldnn::stream> bwd_filter_stream;
+ std::vector<mkldnn::primitive> bwd_filter_primitives;
+
+ ConvBwdFilterContext() :
+ src_fmt(memory::format::any),
+ diff_dst_fmt(memory::format::any),
+ diff_filter_fmt(memory::format::any),
+ src_mem(nullptr), diff_filter_mem(nullptr),
+ diff_bias_mem(nullptr), diff_dst_mem(nullptr),
+ bwd_filter_desc(nullptr), fwd_desc(nullptr), fwd_pd(nullptr),
+ src_md(nullptr), diff_filter_md(nullptr),
+ diff_bias_md(nullptr), diff_dst_md(nullptr),
+ bwd_filter_stream(nullptr) {
+ }
+ };
+
+ // Setup Conv2d backward filter (weights) primitives.
+ void Setup(const MklConvBwdFilterParams& convBwdFilterDims) {
+ // create memory descriptors for convolution data w/ no specified format
+ context_.src_md.reset(new memory::desc({convBwdFilterDims.src_dims},
+ MklDnnType<T>(), memory::format::any));
+
+ context_.diff_dst_md.reset(new memory::desc(
+ {convBwdFilterDims.diff_dst_dims},
+ MklDnnType<T>(), memory::format::any));
+
+ context_.diff_filter_md.reset(new memory::desc(
+ {convBwdFilterDims.diff_filter_dims},
+ MklDnnType<T>(), memory::format::any));
+
+ if (!convBwdFilterDims.diff_bias_dims.empty())
+ context_.diff_bias_md.reset(new memory::desc(
+ {convBwdFilterDims.diff_bias_dims},
+ MklDnnType<T>(), memory::format::x));
+
+ // create a convolution
+ if (!convBwdFilterDims.diff_bias_dims.empty()) {
+ context_.bwd_filter_desc.reset(new convolution_backward_weights::desc(
+ convolution_direct, *context_.src_md, *context_.diff_filter_md,
+ *context_.diff_bias_md, *context_.diff_dst_md,
+ convBwdFilterDims.strides, convBwdFilterDims.dilations,
+ convBwdFilterDims.padding_left, convBwdFilterDims.padding_right,
+ convBwdFilterDims.padding));
+ } else {
+ context_.bwd_filter_desc.reset(
+ new convolution_backward_weights::desc(
+ convolution_direct, *context_.src_md, *context_.diff_filter_md,
+ *context_.diff_dst_md, convBwdFilterDims.strides,
+ convBwdFilterDims.dilations, convBwdFilterDims.padding_left,
+ convBwdFilterDims.padding_right, convBwdFilterDims.padding));
+ }
+
+ // create fwd primitive_desc
+ context_.fwd_desc.reset(new convolution_forward::desc(
+ prop_kind::forward, convolution_direct,
+ *context_.src_md, *context_.diff_filter_md, *context_.diff_dst_md,
+ convBwdFilterDims.strides,
+ convBwdFilterDims.dilations, convBwdFilterDims.padding_left,
+ convBwdFilterDims.padding_right, convBwdFilterDims.padding));
+ context_.fwd_pd.reset(new convolution_forward::primitive_desc(
+ *context_.fwd_desc, cpu_engine_));
+
+ // create backward conv primitive_desc
+ context_.bwd_filter_pd.reset(
+ new convolution_backward_weights::primitive_desc(
+ *context_.bwd_filter_desc, cpu_engine_, *context_.fwd_pd));
+
+ // store the expected memory format
+ auto bwd_filter_pd = context_.bwd_filter_pd.get();
+ context_.src_fmt = static_cast<mkldnn::memory::format>(
+ bwd_filter_pd->src_primitive_desc().desc().data.format);
+ context_.diff_filter_fmt = static_cast<mkldnn::memory::format>(
+ bwd_filter_pd->diff_weights_primitive_desc().desc().data.format);
+ context_.diff_dst_fmt = static_cast<mkldnn::memory::format>(
+ bwd_filter_pd->diff_dst_primitive_desc().desc().data.format);
+
+ // create memory primitive based on dummy data
+ context_.src_mem.reset(new memory(
+ bwd_filter_pd->src_primitive_desc(), DummyData));
+ context_.diff_filter_mem.reset(new memory(
+ bwd_filter_pd->diff_weights_primitive_desc(), DummyData));
+ context_.diff_dst_mem.reset(new memory(
+ bwd_filter_pd->diff_dst_primitive_desc(), DummyData));
+
+ // create convolution primitive and add it to net
+ if (!convBwdFilterDims.diff_bias_dims.empty()) {
+ context_.diff_bias_mem.reset(new memory(
+ {{{convBwdFilterDims.diff_bias_dims}, MklDnnType<T>(),
+ memory::format::x}, cpu_engine_}, DummyData));
+ context_.conv_bwd_filter.reset(new convolution_backward_weights(
+ *context_.bwd_filter_pd, *context_.src_mem, *context_.diff_dst_mem,
+ *context_.diff_filter_mem, *context_.diff_bias_mem));
+ } else {
+ context_.conv_bwd_filter.reset(new convolution_backward_weights(
+ *context_.bwd_filter_pd, *context_.src_mem,
+ *context_.diff_dst_mem, *context_.diff_filter_mem));
+ }
+
+ context_.bwd_filter_primitives.push_back(*context_.conv_bwd_filter);
+ }
+
+ struct ConvBwdFilterContext context_;
+ engine cpu_engine_;
+};
+
+template <typename T>
+class MklConv2DBwdFilterPrimitiveFactory : public MklPrimitiveFactory<T> {
+ public:
+ static MklConv2DBwdFilterPrimitive<T>* Get(
+ const MklConvBwdFilterParams& convBwdFilterDims) {
+ MklConv2DBwdFilterPrimitive<T>* conv2d_bwd_filter = nullptr;
+
+ // look into the pool for reusable primitive
+ conv2d_bwd_filter = dynamic_cast<MklConv2DBwdFilterPrimitive<T>*> (
+ MklConv2DBwdFilterPrimitiveFactory<T>::GetInstance().GetConv2dBwdFilter(
+ convBwdFilterDims));
+
+ if (conv2d_bwd_filter == nullptr) {
+ conv2d_bwd_filter = new MklConv2DBwdFilterPrimitive<T>(
+ convBwdFilterDims);
+ MklConv2DBwdFilterPrimitiveFactory<T>::GetInstance().SetConv2dBwdFilter(
+ convBwdFilterDims, conv2d_bwd_filter);
+ }
+ return conv2d_bwd_filter;
+ }
+
+
+ private:
+ MklConv2DBwdFilterPrimitiveFactory() {}
+ ~MklConv2DBwdFilterPrimitiveFactory() {}
+
+ static MklConv2DBwdFilterPrimitiveFactory& GetInstance() {
+ static MklConv2DBwdFilterPrimitiveFactory instance_;
+ return instance_;
+ }
+
+ static std::string CreateKey(
+ const MklConvBwdFilterParams& convBwdFilterDims) {
+ std::string prefix = "conv2d_bwd_filter";
+ FactoryKeyCreator key_creator;
+ key_creator.AddAsKey(prefix);
+ key_creator.AddAsKey(convBwdFilterDims.src_dims);
+ key_creator.AddAsKey(convBwdFilterDims.diff_filter_dims);
+ key_creator.AddAsKey(convBwdFilterDims.diff_bias_dims);
+ key_creator.AddAsKey(convBwdFilterDims.diff_dst_dims);
+ key_creator.AddAsKey(convBwdFilterDims.strides);
+ key_creator.AddAsKey(convBwdFilterDims.dilations);
+ key_creator.AddAsKey(convBwdFilterDims.padding_left);
+ key_creator.AddAsKey(convBwdFilterDims.padding_right);
+ return key_creator.GetKey();
+ }
+
+ MklPrimitive* GetConv2dBwdFilter(
+ const MklConvBwdFilterParams& convBwdFilterDims) {
+ std::string key = CreateKey(convBwdFilterDims);
+ return this->GetOp(key);
+ }
+
+ void SetConv2dBwdFilter(
+ const MklConvBwdFilterParams& convBwdFilterDims, MklPrimitive* op) {
+ std::string key = CreateKey(convBwdFilterDims);
+ this->SetOp(key, op);
+ }
+};
+
+#endif
+
#ifdef INTEL_MKL_ML
template <typename Device, class T>
@@ -442,11 +744,213 @@ class MklConv2DCustomBackpropFilterOp
: public MklConv2DBackpropCommonOp<Device, T> {
public:
explicit MklConv2DCustomBackpropFilterOp(OpKernelConstruction* context)
- : MklConv2DBackpropCommonOp<Device, T>(context) {}
+ : MklConv2DBackpropCommonOp<Device, T>(context) {
+ }
+
~MklConv2DCustomBackpropFilterOp() {}
+ void Compute(OpKernelContext* context) {
+ try {
+ MklDnnData<T> src(&cpu_engine_);
+ MklDnnData<T> diff_dst(&cpu_engine_);
+ MklDnnData<T> diff_filter(&cpu_engine_); // output
+
+ // Input tensors
+ const int kInputIdx = 0, kFilterIdx = 1, kOutbpropIdx = 2;
+ const Tensor& src_tensor = MklGetInput(context, kInputIdx);
+ const Tensor& filter_tensor = MklGetInput(context, kFilterIdx);
+ const Tensor& diff_dst_tensor = MklGetInput(context, kOutbpropIdx);
+
+ MklDnnShape src_mkl_shape, filter_mkl_shape, diff_dst_mkl_shape;
+ GetMklShape(context, kInputIdx, &src_mkl_shape);
+ GetMklShape(context, kFilterIdx, &filter_mkl_shape);
+ GetMklShape(context, kOutbpropIdx, &diff_dst_mkl_shape);
+ // Allow operator-specific sanity checking of shapes.
+ ValidateMklShapes(src_mkl_shape, filter_mkl_shape, diff_dst_mkl_shape);
+
+ // Allow operator-specific generation of shapes.
+ // E.g., Conv2DBackpropFilter gets filter as filter_sizes. It is a
+ // tensor containing shape of filter. So filter.shape() is not
+ // a correct way to get filter shape. These operator-specific calls
+ // allow this class to handle this case.
+ TensorShape src_tf_shape = MakeInputTfShape(context, src_tensor);
+ TensorShape filter_tf_shape = MakeFilterTfShape(context, filter_tensor);
+ TensorShape diff_dst_tf_shape = GetTfShape(context, kOutbpropIdx);
+
+ // Corner cases: output with 0 elements and 0 batch size.
+ Tensor* diff_filter_tensor = nullptr;
+ if (src_tf_shape.num_elements() == 0 ||
+ filter_tf_shape.num_elements() == 0 ||
+ diff_dst_tf_shape.num_elements() == 0) {
+ MklDnnShape diff_filter_mkl_shape;
+ diff_filter_mkl_shape.SetMklTensor(false);
+ TensorShape diff_filter_tf_shape = GetOutputTfShape(
+ src_tf_shape, filter_tf_shape, diff_dst_tf_shape);
+ const int kOutputIdx = 0;
+ AllocateOutputSetMklShape(context, kOutputIdx, &diff_filter_tensor,
+ diff_filter_tf_shape, diff_filter_mkl_shape);
+ CHECK_NOTNULL(diff_filter_tensor);
+
+ // if output tensor has more than 0 elements, we need to 0 them out.
+ auto diff_filter_data = diff_filter_tensor->flat<T>().data();
+ for (size_t i = 0; i < diff_filter_tf_shape.num_elements(); ++i) {
+ diff_filter_data[i] = 0;
+ }
+ return;
+ }
+
+ // By default, all dims are in MKL order. Only dims in TF order
+ // are those with prefix tf_order.
+ memory::dims diff_dst_dims, fwd_src_dims, fwd_filter_dims;
+ memory::dims padding_left, padding_right, dilations,
+ strides, fwd_dst_dims;
+ memory::dims fwd_dst_dims_tf_order;
+
+ // Get forward convolution parameters.
+ MklDnnConvUtil conv_utl(context, this->strides_, this->padding_,
+ this->data_format_, this->dilations_);
+ conv_utl.GetConvFwdSizesInMklOrder(
+ src_tf_shape, filter_tf_shape, &fwd_src_dims, &fwd_filter_dims,
+ &strides, &dilations, &fwd_dst_dims_tf_order,
+ &fwd_dst_dims, &padding_left, &padding_right);
+ if (!context->status().ok()) return;
+
+ auto tf_fmt = TFDataFormatToMklDnnDataFormat(this->data_format_);
+ auto fwd_src_md =
+ src_mkl_shape.IsMklTensor()
+ ? src_mkl_shape.GetMklLayout()
+ : memory::desc(fwd_src_dims, MklDnnType<T>(), tf_fmt);
+
+ conv_utl.GetInputSizeInMklOrder(diff_dst_tf_shape, &diff_dst_dims);
+ if (!context->status().ok()) return;
+
+ auto diff_dst_md = diff_dst_mkl_shape.IsMklTensor()
+ ? diff_dst_mkl_shape.GetMklLayout()
+ : memory::desc(diff_dst_dims,
+ MklDnnType<T>(), tf_fmt);
+
+ memory::dims diff_bias_dims = {};
+ int64 depth = 0;
+ if (biasEnabled) {
+ TensorShape obp_tf_shape = GetTfShape(context, 2);
+ depth = (this->data_format_ == FORMAT_NCHW)
+ ? obp_tf_shape.dim_size(1)
+ : obp_tf_shape.dim_size(3);
+ diff_bias_dims = {static_cast<int>(depth)};
+ }
+
+ dilations[kDilationH] -= 1;
+ dilations[kDilationW] -= 1;
+
+ MklConv2DBwdFilterPrimitive<T> *conv2d_bwd_filter = nullptr;
+ MklConvBwdFilterParams convBwdFilterDims(fwd_src_dims, fwd_filter_dims,
+ diff_bias_dims, diff_dst_dims, strides, dilations, padding_left,
+ padding_right, TFPaddingToMklDnnPadding(this->padding_));
+ conv2d_bwd_filter = MklConv2DBwdFilterPrimitiveFactory<T>::Get(
+ convBwdFilterDims);
+ auto bwd_filter_pd = conv2d_bwd_filter->GetPrimitiveDesc();
+
+ // allocate output tensors: diff_fitler and diff_bias (w bias)
+ auto bwd_output_dims = GetOutputDims(fwd_src_dims, fwd_filter_dims);
+
+ // diff_filter
+ MklDnnShape diff_filter_mkl_shape;
+ diff_filter_mkl_shape.SetMklTensor(false);
+ // output_dims_mkl_order is in OIHW format.
+ TensorShape diff_filter_tf_shape(
+ {bwd_output_dims[MklDnnDims::Dim_H],
+ bwd_output_dims[MklDnnDims::Dim_W],
+ bwd_output_dims[MklDnnDims::Dim_I],
+ bwd_output_dims[MklDnnDims::Dim_O]});
+ AllocateOutputSetMklShape(context, 0, &diff_filter_tensor,
+ diff_filter_tf_shape, diff_filter_mkl_shape);
+
+ Tensor* diff_bias_tensor = nullptr;
+ if (biasEnabled) {
+ TensorShape diff_bias_shape({depth});
+ AllocateBiasGradTensor(context, diff_bias_shape, &diff_bias_tensor);
+ }
+
+ // check if src and diff_dst need reorder
+ std::vector<primitive> net;
+ T *src_data = nullptr;
+ if (fwd_src_md.data.format != conv2d_bwd_filter->GetSrcMemoryFormat()) {
+ src.SetUsrMem(fwd_src_md, &src_tensor);
+ src.CheckReorderToOpMem(
+ bwd_filter_pd->src_primitive_desc(), &net);
+ src_data = static_cast<T*>(src.GetOpMem().get_data_handle());
+ } else {
+ src_data = static_cast<T*>(const_cast<T*>(
+ src_tensor.flat<T>().data()));
+ }
+
+ T *diff_dst_data = nullptr;
+ if (diff_dst_md.data.format !=
+ conv2d_bwd_filter->GetDiffDstMemoryFormat()) {
+ diff_dst.SetUsrMem(diff_dst_md, &diff_dst_tensor);
+ diff_dst.CheckReorderToOpMem(
+ bwd_filter_pd->diff_dst_primitive_desc(), &net);
+ diff_dst_data = static_cast<T*>(
+ diff_dst.GetOpMem().get_data_handle());
+ } else {
+ diff_dst_data = static_cast<T*>(const_cast<T*>(
+ diff_dst_tensor.flat<T>().data()));
+ }
+ stream(stream::kind::eager).submit(net).wait();
+
+ // For backward filter, convert diff_filter back to Tensorflow layout
+ // Here we prepare to reorder op memory back to user memory
+ bool diff_filter_reorder_required = false;
+ T *diff_filter_data = nullptr;
+ if (GetOutputFormat(tf_fmt) !=
+ conv2d_bwd_filter->GetDiffFilterMemoryFormat()) {
+ // Allocate diff filter tensor as Tensorflow layout
+ diff_filter.SetUsrMem(bwd_output_dims, GetOutputFormat(tf_fmt),
+ diff_filter_tensor);
+ diff_filter_reorder_required = true;
+ diff_filter.PrepareReorderToUserMemIfReq(
+ bwd_filter_pd->diff_weights_primitive_desc());
+ diff_filter_data = static_cast<T*>(
+ diff_filter.GetOpMem().get_data_handle());
+ } else {
+ diff_filter_data = static_cast<T*>(const_cast<T*>(
+ diff_filter_tensor->flat<T>().data()));
+ }
+
+ // Execute convolution filter bwd
+ if (biasEnabled) {
+ T* diff_bias_data = static_cast<T*>(const_cast<T*>(
+ diff_bias_tensor->flat<T>().data()));
+ conv2d_bwd_filter->Execute(src_data, diff_filter_data,
+ diff_bias_data, diff_dst_data);
+ } else {
+ conv2d_bwd_filter->Execute(src_data, diff_filter_data, diff_dst_data);
+ }
+
+ // Reorder diff_filter back to Tensorflow layout if necessary
+ if (diff_filter_reorder_required) {
+ std::vector<primitive> net;
+ diff_filter.InsertReorderToUserMem(&net);
+ stream(stream::kind::eager).submit(net).wait();
+ }
+ } catch (mkldnn::error& e) {
+ string error_msg = "Status: " + std::to_string(e.status) +
+ ", message: " + string(e.message) + ", in file " +
+ string(__FILE__) + ":" + std::to_string(__LINE__);
+ OP_REQUIRES_OK(
+ context,
+ errors::Aborted("Operation received an exception:", error_msg));
+ }
+ }
+
private:
+ const int kInputIndex_Filter = 1;
+ const int kInputIndex_InputSizes = 0;
const int kDilationH = 0, kDilationW = 1;
+ engine cpu_engine_ = engine(engine::cpu, 0);
+
+ // Validate input shapes.
+ // Function asserts that input shapes are valid.
void ValidateMklShapes(const MklDnnShape& input_mkl_shape,
const MklDnnShape& filter_mkl_shape,
const MklDnnShape& obp_mkl_shape) {
@@ -454,141 +958,44 @@ class MklConv2DCustomBackpropFilterOp
<< "Conv2DBackpropFilter: filter should not be in MKL Layout";
}
- size_t GetInputTensorIndexWithSizes() { return 1; /* filter index */ }
-
+ // Get TensorFlow shape of input tensor.
TensorShape MakeInputTfShape(OpKernelContext* context,
const Tensor& input_tensor) {
size_t input_idx = 0;
return GetTfShape(context, input_idx);
}
+ // Get TensorFlow shape of filter tensor.
TensorShape MakeFilterTfShape(OpKernelContext* context,
const Tensor& filter_tensor) {
TensorShape filter_tf_shape;
CHECK_EQ(TensorShapeUtils::IsVector(filter_tensor.shape()), true);
CHECK_EQ(TensorShapeUtils::MakeShape(filter_tensor.vec<int32>(),
- &filter_tf_shape)
- .ok(),
- true);
+ &filter_tf_shape).ok(), true);
return filter_tf_shape;
}
+ // Get Tensorflow shape of output tensor (diff_filter),
+ // which is same as shape of filter.
TensorShape GetOutputTfShape(const TensorShape& input_shape,
const TensorShape& filter_shape,
const TensorShape& outbprop_shape) {
- // Shape of output of Conv2DBackpropFilter is same as shape of filter.
return filter_shape;
}
+ // Get the shape of output (diff_filter) in MKL-DNN order.
+ // Computes shape of output from input shape (fwd_input_dims)
+ // and filter shape (fwd_filter_dims).
const memory::dims& GetOutputDims(const memory::dims& fwd_input_dims,
const memory::dims& fwd_filter_dims) {
- // Shape of output of Conv2DBackpropFilter is same as shape of filter.
return fwd_filter_dims;
}
+ // Output layout is Tensorflow's filter layout (HWIO).
memory::format GetOutputFormat(const memory::format data_format) {
- // Output layout is Tensorflow's filter layout (HWIO).
return memory::format::hwio;
}
- void CreatePrimitive(OpKernelContext* context, const engine& cpu_engine,
- const convolution_forward::primitive_desc& conv_fwd_pd,
- MklDnnData<T>* input, MklDnnData<T>* filter,
- MklDnnData<T>* outbackprop, MklDnnData<T>* output,
- Tensor** output_tensor,
- const memory::dims& strides,
- const memory::dims& dilations,
- const memory::dims& padding_l,
- const memory::dims& padding_r, padding_kind padding,
- const memory::dims& bwd_output_dims,
- memory::format bwd_output_format) {
- CHECK_NOTNULL(context);
- CHECK_NOTNULL(input);
- CHECK_NOTNULL(filter);
- CHECK_NOTNULL(outbackprop);
- CHECK_NOTNULL(output);
- CHECK_NOTNULL(output_tensor);
-
- MklDnnData<T>* bias_grad = nullptr;
- int depth = 0;
- if (biasEnabled) {
- // Data structure for bias_grad
- bias_grad = new MklDnnData<T>(&cpu_engine);
- TensorShape obp_tf_shape = GetTfShape(context, 2);
- depth = (MklConv2DBackpropCommonOp<Device, T>::GetTFDataFormat() ==
- FORMAT_NCHW)
- ? obp_tf_shape.dim_size(1)
- : obp_tf_shape.dim_size(3);
- memory::dims bias_grad_dims = {depth};
- bias_grad->SetOpMemDesc(bias_grad_dims, memory::format::x);
- }
-
- if (biasEnabled && (bias_grad != nullptr)) {
- // Create convolution backward weights with bias primitive.
- // Use dilated convolution in case dilate rates are greater than zero.
- auto bwd_desc = (dilations[kDilationH] > 0 || dilations[kDilationW] > 0) ?
- convolution_backward_weights::desc(convolution_direct,
- input->GetOpMemDesc(), output->GetOpMemDesc(),
- bias_grad->GetOpMemDesc(),
- outbackprop->GetOpMemDesc(), strides,
- dilations, padding_l, padding_r, padding) :
- convolution_backward_weights::desc(convolution_direct,
- input->GetOpMemDesc(), output->GetOpMemDesc(),
- bias_grad->GetOpMemDesc(),
- outbackprop->GetOpMemDesc(),
- strides, padding_l, padding_r, padding);
- auto bwd_pd = convolution_backward_weights::primitive_desc(bwd_desc,
- cpu_engine,
- conv_fwd_pd);
-
- // Allocate output tensor.
- AllocateOutputTensor(context, bwd_pd, bwd_output_dims,
- bwd_output_format, output_tensor);
-
- CHECK_NOTNULL(*output_tensor);
- // Set buffer handle using allocated output tensor.
- output->SetUsrMemDataHandle(*output_tensor);
-
- // Allocate bias_grad tensor
- TensorShape bias_grad_shape({depth});
- Tensor* bias_grad_tensor = nullptr;
- AllocateBiasGradTensor(context, bias_grad_shape, &bias_grad_tensor);
- memory::dims bias_grad_dims = {depth};
- // Since Bias is 1D, we use format::x from MKLDNN to represent it.
- auto bias_grad_md =
- memory::desc({bias_grad_dims}, MklDnnType<T>(), memory::format::x);
- bias_grad->SetUsrMem(bias_grad_md, bias_grad_tensor);
- bias_grad->SetUsrMemDataHandle(bias_grad_tensor);
-
- PrepareAndExecutePrimitive(bwd_pd, input, outbackprop, output,
- bias_grad);
- } else {
- // Create convolution backward weights primitive.
- // Use dilated convolution in case dilate rates are greater than zero.
- auto bwd_desc = (dilations[kDilationH] > 0 || dilations[kDilationW] > 0) ?
- convolution_backward_weights::desc(convolution_direct,
- input->GetOpMemDesc(), output->GetOpMemDesc(),
- outbackprop->GetOpMemDesc(), strides,
- dilations, padding_l, padding_r, padding) :
- convolution_backward_weights::desc(convolution_direct,
- input->GetOpMemDesc(), output->GetOpMemDesc(),
- outbackprop->GetOpMemDesc(),
- strides, padding_l, padding_r, padding);
- auto bwd_pd = convolution_backward_weights::primitive_desc(bwd_desc,
- cpu_engine,
- conv_fwd_pd);
-
- // Allocate output tensor.
- AllocateOutputTensor(context, bwd_pd, bwd_output_dims,
- bwd_output_format, output_tensor);
-
- CHECK_NOTNULL(*output_tensor);
- // Set buffer handle using allocated output tensor.
- output->SetUsrMemDataHandle(*output_tensor);
- PrepareAndExecutePrimitive(bwd_pd, input, outbackprop, output);
- }
- }
-
// Allocate output tensor.
void AllocateOutputTensor(
OpKernelContext* context,
@@ -623,40 +1030,8 @@ class MklConv2DCustomBackpropFilterOp
MklDnnShape bias_grad_mkl_shape;
bias_grad_mkl_shape.SetMklTensor(false);
- AllocateOutputSetMklShape(context, 1, bias_grad_tensor, bias_grad_shape,
- bias_grad_mkl_shape);
- }
-
- // Prepare and execute net - checks for input and output reorders.
- void PrepareAndExecutePrimitive(
- const convolution_backward_weights::primitive_desc& conv_pd,
- MklDnnData<T>* input, MklDnnData<T>* obp, MklDnnData<T>* output,
- MklDnnData<T>* bias_grad = nullptr) {
- // Create reorders between user layout and MKL layout if it is needed and
- // add it to the net before convolution.
- std::vector<primitive> net;
- input->CheckReorderToOpMem(conv_pd.src_primitive_desc(), &net);
- obp->CheckReorderToOpMem(conv_pd.diff_dst_primitive_desc(), &net);
-
- // For BackpropFilter, we convert the output tensor back in Tensorflow
- // layout.
- bool output_reorder_required = output->PrepareReorderToUserMemIfReq(
- conv_pd.diff_weights_primitive_desc());
-
- if (biasEnabled && (bias_grad != nullptr)) {
- net.push_back(convolution_backward_weights(
- conv_pd, input->GetOpMem(), obp->GetOpMem(), output->GetOpMem(),
- bias_grad->GetOpMem()));
- } else {
- net.push_back(convolution_backward_weights(
- conv_pd, input->GetOpMem(), obp->GetOpMem(), output->GetOpMem()));
- }
-
- if (output_reorder_required) {
- output->InsertReorderToUserMem(&net);
- }
-
- stream(stream::kind::eager).submit(net).wait();
+ AllocateOutputSetMklShape(context, 1, bias_grad_tensor,
+ bias_grad_shape, bias_grad_mkl_shape);
}
};
diff --git a/tensorflow/core/kernels/mkl_conv_grad_input_ops.cc b/tensorflow/core/kernels/mkl_conv_grad_input_ops.cc
index 21b18f9119..0af4568b47 100644
--- a/tensorflow/core/kernels/mkl_conv_grad_input_ops.cc
+++ b/tensorflow/core/kernels/mkl_conv_grad_input_ops.cc
@@ -55,9 +55,246 @@ using mkldnn::stream;
#endif
namespace tensorflow {
-
typedef Eigen::ThreadPoolDevice CPUDevice;
+#ifndef INTEL_MKL_ML
+
+/// utility classes enabling primitive reuse for backward conv2d ops.
+struct MklConvBwdInputParams {
+ memory::dims diff_src_dims;
+ memory::dims filter_dims;
+ memory::dims diff_dst_dims;
+ memory::dims strides;
+ memory::dims dilations;
+ memory::dims padding_left;
+ memory::dims padding_right;
+ padding_kind padding;
+
+ MklConvBwdInputParams(memory::dims diff_src_dims,
+ memory::dims filter_dims, memory::dims diff_dst_dims,
+ memory::dims strides, memory::dims dilations,
+ memory::dims padding_left, memory::dims padding_right,
+ padding_kind padding) :
+ diff_src_dims(diff_src_dims), filter_dims(filter_dims),
+ diff_dst_dims(diff_dst_dims), strides(strides),
+ dilations(dilations), padding_left(padding_left),
+ padding_right(padding_right), padding(padding) {
+ }
+};
+
+template <typename T>
+class MklConv2DBwdInputPrimitive : public MklPrimitive {
+ public:
+ explicit MklConv2DBwdInputPrimitive(
+ const MklConvBwdInputParams& convBwdInputDims) :
+ cpu_engine_(engine::cpu, 0) {
+ context_.bwd_input_stream.reset(new stream(stream::kind::eager));
+
+ // create conv primitive
+ if (context_.conv_bwd_input == nullptr) {
+ Setup(convBwdInputDims);
+ }
+ }
+ ~MklConv2DBwdInputPrimitive() {}
+
+ // Convolution backward filter (weights)
+ // diff_src_data: output data buffer of diff_src
+ // filter_data: input data buffer of filter (weights)
+ // diff_dst_data: input data buffer of dst
+ // Bias does not matter here
+ void Execute(const T* diff_src_data,
+ const T* filter_data, const T* diff_dst_data) {
+ context_.diff_src_mem->set_data_handle(
+ static_cast<T*>(const_cast<T*>(diff_src_data)));
+ context_.filter_mem->set_data_handle(
+ static_cast<T*>(const_cast<T*>(filter_data)));
+ context_.diff_dst_mem->set_data_handle(
+ static_cast<T*>(const_cast<T*>(diff_dst_data)));
+
+ context_.bwd_input_stream->submit(context_.bwd_input_primitives);
+
+ // set back data handle
+ context_.diff_src_mem->set_data_handle(DummyData);
+ context_.filter_mem->set_data_handle(DummyData);
+ context_.diff_dst_mem->set_data_handle(DummyData);
+ return;
+ }
+
+ memory::format GetFilterMemoryFormat() const {
+ return context_.filter_fmt;
+ }
+
+ memory::format GetDiffDstMemoryFormat() const {
+ return context_.diff_dst_fmt;
+ }
+
+ std::shared_ptr<mkldnn::convolution_backward_data::primitive_desc>
+ GetPrimitiveDesc() const {
+ return context_.bwd_input_pd;
+ }
+
+ private:
+ // Primitive reuse context for Conv2D Bwd Input op
+ struct ConvBwdInputContext {
+ // expected memory format for this primitive instance
+ memory::format filter_fmt;
+ memory::format diff_dst_fmt;
+
+ // MKLDNN memory
+ std::shared_ptr<mkldnn::memory> diff_src_mem;
+ std::shared_ptr<mkldnn::memory> filter_mem;
+ std::shared_ptr<mkldnn::memory> diff_dst_mem;
+
+ // convolution primitive
+ std::shared_ptr<mkldnn::convolution_backward_data::primitive_desc>
+ bwd_input_pd;
+ std::shared_ptr<mkldnn::primitive> conv_bwd_input;
+
+ // desc & prmitive desc
+ std::shared_ptr<mkldnn::convolution_backward_data::desc> bwd_input_desc;
+ std::shared_ptr<mkldnn::convolution_forward::desc> fwd_desc;
+ std::shared_ptr<mkldnn::convolution_forward::primitive_desc> fwd_pd;
+
+ // memory desc: forward & backward can share same memory::desc
+ std::shared_ptr<memory::desc> diff_src_md;
+ std::shared_ptr<memory::desc> filter_md;
+ std::shared_ptr<memory::desc> diff_dst_md;
+
+ // MKL pipeline
+ std::shared_ptr<mkldnn::stream> bwd_input_stream;
+ std::vector<mkldnn::primitive> bwd_input_primitives;
+
+ ConvBwdInputContext() :
+ filter_fmt(memory::format::any), diff_dst_fmt(memory::format::any),
+ diff_src_mem(nullptr), filter_mem(nullptr), diff_dst_mem(nullptr),
+ bwd_input_pd(nullptr), conv_bwd_input(nullptr),
+ bwd_input_desc(nullptr), fwd_desc(nullptr), fwd_pd(nullptr),
+ diff_src_md(nullptr), filter_md(nullptr), diff_dst_md(nullptr),
+ bwd_input_stream(nullptr) {
+ }
+ };
+
+
+ void Setup(const MklConvBwdInputParams& convBwdInputDims) {
+ // create memory descriptors for convolution data w/ no specified format
+ context_.diff_src_md.reset(new memory::desc(
+ {convBwdInputDims.diff_src_dims},
+ MklDnnType<T>(), memory::format::any));
+ context_.filter_md.reset(new memory::desc(
+ {convBwdInputDims.filter_dims},
+ MklDnnType<T>(), memory::format::any));
+ context_.diff_dst_md.reset(new memory::desc(
+ {convBwdInputDims.diff_dst_dims},
+ MklDnnType<T>(), memory::format::any));
+
+ // create convolution primitives
+ context_.bwd_input_desc.reset(new convolution_backward_data::desc(
+ convolution_direct, *context_.diff_src_md, *context_.filter_md,
+ *context_.diff_dst_md, convBwdInputDims.strides,
+ convBwdInputDims.dilations, convBwdInputDims.padding_left,
+ convBwdInputDims.padding_right, convBwdInputDims.padding));
+
+ context_.fwd_desc.reset(new convolution_forward::desc(prop_kind::forward,
+ convolution_direct, *context_.diff_src_md, *context_.filter_md,
+ *context_.diff_dst_md, convBwdInputDims.strides,
+ convBwdInputDims.dilations, convBwdInputDims.padding_left,
+ convBwdInputDims.padding_right, convBwdInputDims.padding));
+
+ context_.fwd_pd.reset(new convolution_forward::primitive_desc(
+ *context_.fwd_desc, cpu_engine_));
+
+ // create backward conv prim desc
+ context_.bwd_input_pd.reset(
+ new convolution_backward_data::primitive_desc(
+ *context_.bwd_input_desc, cpu_engine_, *context_.fwd_pd));
+
+ // create memory primitive based on dummy data
+ context_.diff_src_mem.reset(new memory(
+ context_.bwd_input_pd.get()->diff_src_primitive_desc(), DummyData));
+ context_.filter_mem.reset(new memory(
+ context_.bwd_input_pd.get()->weights_primitive_desc(), DummyData));
+ context_.diff_dst_mem.reset(new memory(
+ context_.bwd_input_pd.get()->diff_dst_primitive_desc(), DummyData));
+
+ // store the expected memory format
+ context_.filter_fmt = static_cast<memory::format>(
+ context_.bwd_input_pd.get()->weights_primitive_desc().desc().data.format);
+ context_.diff_dst_fmt = static_cast<memory::format>(
+ context_.bwd_input_pd.get()->diff_dst_primitive_desc().desc().data.format);
+
+ // create convolution primitive and add it to net
+ context_.conv_bwd_input.reset(new convolution_backward_data(
+ *context_.bwd_input_pd, *context_.diff_dst_mem,
+ *context_.filter_mem, *context_.diff_src_mem));
+
+ context_.bwd_input_primitives.push_back(*context_.conv_bwd_input);
+ }
+
+ struct ConvBwdInputContext context_;
+ engine cpu_engine_;
+};
+
+template <typename T>
+class MklConv2DBwdInputPrimitiveFactory : public MklPrimitiveFactory<T> {
+ private:
+ MklConv2DBwdInputPrimitiveFactory() {}
+ ~MklConv2DBwdInputPrimitiveFactory() {}
+
+ public:
+ static MklConv2DBwdInputPrimitive<T>* Get(
+ const MklConvBwdInputParams& convBwdInputDims) {
+ MklConv2DBwdInputPrimitive<T>* conv2d_bwd_input = nullptr;
+
+ // look into the pool for reusable primitive
+ conv2d_bwd_input = dynamic_cast<MklConv2DBwdInputPrimitive<T>*> (
+ MklConv2DBwdInputPrimitiveFactory<T>::GetInstance().GetConv2dBwdInput(
+ convBwdInputDims));
+
+ if (conv2d_bwd_input == nullptr) {
+ conv2d_bwd_input = new MklConv2DBwdInputPrimitive<T>(
+ convBwdInputDims);
+ MklConv2DBwdInputPrimitiveFactory<T>::GetInstance().SetConv2dBwdInput(
+ convBwdInputDims, conv2d_bwd_input);
+ }
+ return conv2d_bwd_input;
+ }
+
+ private:
+ static MklConv2DBwdInputPrimitiveFactory& GetInstance() {
+ static MklConv2DBwdInputPrimitiveFactory instance_;
+ return instance_;
+ }
+
+ static std::string CreateKey(
+ const MklConvBwdInputParams& convBwdInputDims) {
+ std::string prefix = "conv2d_bwd_input";
+ FactoryKeyCreator key_creator;
+ key_creator.AddAsKey(prefix);
+ key_creator.AddAsKey(convBwdInputDims.diff_src_dims);
+ key_creator.AddAsKey(convBwdInputDims.filter_dims);
+ key_creator.AddAsKey(convBwdInputDims.diff_dst_dims);
+ key_creator.AddAsKey(convBwdInputDims.strides);
+ key_creator.AddAsKey(convBwdInputDims.dilations);
+ key_creator.AddAsKey(convBwdInputDims.padding_left);
+ key_creator.AddAsKey(convBwdInputDims.padding_right);
+ return key_creator.GetKey();
+ }
+
+ MklPrimitive* GetConv2dBwdInput(
+ const MklConvBwdInputParams& convBwdInputDims) {
+ std::string key = CreateKey(convBwdInputDims);
+ return this->GetOp(key);
+ }
+
+ void SetConv2dBwdInput(
+ const MklConvBwdInputParams& convBwdInputDims, MklPrimitive *op) {
+ std::string key = CreateKey(convBwdInputDims);
+ this->SetOp(key, op);
+ }
+};
+
+#endif
+
#ifdef INTEL_MKL_ML
template <typename Device, class T>
@@ -365,13 +602,173 @@ class MklConv2DCustomBackpropInputOp
: public MklConv2DBackpropCommonOp<Device, T> {
public:
explicit MklConv2DCustomBackpropInputOp(OpKernelConstruction* context)
- : MklConv2DBackpropCommonOp<Device, T>(context) {}
+ : MklConv2DBackpropCommonOp<Device, T>(context) {
+ }
+
~MklConv2DCustomBackpropInputOp() {}
+ void Compute(OpKernelContext* context) {
+ try {
+ MklDnnData<T> filter(&cpu_engine);
+ MklDnnData<T> diff_dst(&cpu_engine);
+
+ // Input tensors
+ const int kInputIdx = 0, kFilterIdx = 1, kOutbpropIdx = 2;
+ const Tensor& src_tensor = MklGetInput(context, kInputIdx);
+ const Tensor& filter_tensor = MklGetInput(context, kFilterIdx);
+ const Tensor& diff_dst_tensor = MklGetInput(context, kOutbpropIdx);
+
+ MklDnnShape src_mkl_shape, filter_mkl_shape, diff_dst_mkl_shape;
+ GetMklShape(context, kInputIdx, &src_mkl_shape);
+ GetMklShape(context, kFilterIdx, &filter_mkl_shape);
+ GetMklShape(context, kOutbpropIdx, &diff_dst_mkl_shape);
+ // Allow operator-specific sanity checking of shapes.
+ ValidateMklShapes(src_mkl_shape, filter_mkl_shape,
+ diff_dst_mkl_shape);
+
+ // Allow operator-specific generation of shapes.
+ // E.g., Conv2DBackpropFilter gets filter as filter_sizes. It is a
+ // tensor containing shape of filter. So filter.shape() is not
+ // a correct way to get filter shape. These operator-specific calls
+ // allow this class to handle this case.
+ TensorShape src_tf_shape = MakeInputTfShape(context, src_tensor);
+ TensorShape filter_tf_shape = MakeFilterTfShape(context, filter_tensor);
+ TensorShape diff_dst_tf_shape = GetTfShape(context, kOutbpropIdx);
+
+ // Corner cases: output with 0 elements and 0 batch size.
+ Tensor* diff_src_tensor = nullptr;
+ if (src_tf_shape.num_elements() == 0 ||
+ filter_tf_shape.num_elements() == 0 ||
+ diff_dst_tf_shape.num_elements() == 0) {
+ MklDnnShape diff_src_mkl_shape;
+ diff_src_mkl_shape.SetMklTensor(false);
+ TensorShape diff_src_tf_shape = GetOutputTfShape(
+ src_tf_shape, filter_tf_shape, diff_dst_tf_shape);
+ const int kOutputIdx = 0;
+ AllocateOutputSetMklShape(context, kOutputIdx, &diff_src_tensor,
+ diff_src_tf_shape, diff_src_mkl_shape);
+ CHECK_NOTNULL(diff_src_tensor);
+
+ // if output tensor has more than 0 elements, we need to 0 them out.
+ auto diff_src_data = diff_src_tensor->flat<T>().data();
+ for (size_t i = 0; i < diff_src_tf_shape.num_elements(); ++i) {
+ diff_src_data[i] = 0;
+ }
+ return;
+ }
+ // By default, all dims are in MKL order. Only dims in TF order
+ // are those with postfix tf_order.
+ memory::dims diff_dst_dims, fwd_src_dims, fwd_filter_dims;
+ memory::dims padding_left, padding_right, dilations, strides;
+ memory::dims fwd_output_dims, fwd_output_dims_tf_order;
+
+ // Get forward convolution parameters.
+ MklDnnConvUtil conv_utl(context, this->strides_, this->padding_,
+ this->data_format_, this->dilations_);
+ conv_utl.GetConvFwdSizesInMklOrder(
+ src_tf_shape, filter_tf_shape, &fwd_src_dims, &fwd_filter_dims,
+ &strides, &dilations, &fwd_output_dims_tf_order, &fwd_output_dims,
+ &padding_left, &padding_right);
+ if (!context->status().ok()) return;
+
+ // Create Convolution forward descriptor since Convolution backward
+ // API needs it. For that, we first need to create input, filter
+ // and output memory descriptors.
+ auto tf_fmt = TFDataFormatToMklDnnDataFormat(this->data_format_);
+
+ // If filter is in MKL layout, then simply grab filter layout;
+ // otherwise, construct filter in TF layout.
+ // For TF layout, filter is in HWIO format.
+ auto fwd_filter_md = filter_mkl_shape.IsMklTensor()
+ ? filter_mkl_shape.GetMklLayout()
+ : memory::desc(fwd_filter_dims, MklDnnType<T>(),
+ memory::format::hwio);
+
+ conv_utl.GetInputSizeInMklOrder(diff_dst_tf_shape, &diff_dst_dims);
+ if (!context->status().ok()) return;
+ auto diff_dst_md = diff_dst_mkl_shape.IsMklTensor()
+ ? diff_dst_mkl_shape.GetMklLayout()
+ : memory::desc(diff_dst_dims,
+ MklDnnType<T>(), tf_fmt);
+
+ dilations[kDilationH] -= 1;
+ dilations[kDilationW] -= 1;
+
+ MklConv2DBwdInputPrimitive<T> *conv2d_bwd_input = nullptr;
+ conv_utl.GetInputSizeInMklOrder(diff_dst_tf_shape, &diff_dst_dims);
+ MklConvBwdInputParams convBwdInputDims(fwd_src_dims, fwd_filter_dims,
+ diff_dst_dims, strides, dilations, padding_left, padding_right,
+ TFPaddingToMklDnnPadding(this->padding_));
+ conv2d_bwd_input = MklConv2DBwdInputPrimitiveFactory<T>::Get(
+ convBwdInputDims);
+ auto bwd_input_pd = conv2d_bwd_input->GetPrimitiveDesc();
+
+ // allocate output tensor
+ auto diff_src_pd = bwd_input_pd->diff_src_primitive_desc();
+ auto bwd_diff_src_dims = GetOutputDims(fwd_src_dims, fwd_filter_dims);
+ auto bwd_diff_src_format = GetOutputFormat(tf_fmt);
+ MklDnnShape diff_src_mkl_shape;
+ diff_src_mkl_shape.SetMklTensor(true);
+ diff_src_mkl_shape.SetMklLayout(&diff_src_pd);
+ diff_src_mkl_shape.SetElemType(MklDnnType<T>());
+ diff_src_mkl_shape.SetTfLayout(bwd_diff_src_dims.size(),
+ bwd_diff_src_dims, bwd_diff_src_format);
+ TensorShape diff_src_tf_shape;
+ diff_src_tf_shape.AddDim(diff_src_pd.get_size() / sizeof(T));
+ AllocateOutputSetMklShape(context, 0, &diff_src_tensor,
+ diff_src_tf_shape, diff_src_mkl_shape);
+
+ T *diff_src_data = static_cast<T*>(const_cast<T*>(
+ diff_src_tensor->flat<T>().data()));
+
+ // check if filter and diff_dst need reorder
+ std::vector<primitive> net;
+ T* filter_data = nullptr;
+ if (fwd_filter_md.data.format !=
+ conv2d_bwd_input->GetFilterMemoryFormat()) {
+ filter.SetUsrMem(fwd_filter_md, &filter_tensor);
+ filter.CheckReorderToOpMem(
+ bwd_input_pd->weights_primitive_desc(),
+ &net);
+ filter_data = static_cast<T*>(filter.GetOpMem().get_data_handle());
+ } else {
+ filter_data = static_cast<T*>(const_cast<T*>(
+ filter_tensor.flat<T>().data()));
+ }
+
+ T* diff_dst_data = nullptr;
+ if (diff_dst_md.data.format !=
+ conv2d_bwd_input->GetDiffDstMemoryFormat()) {
+ diff_dst.SetUsrMem(diff_dst_md, &diff_dst_tensor);
+ diff_dst.CheckReorderToOpMem(
+ bwd_input_pd->diff_dst_primitive_desc(), &net);
+ diff_dst_data = static_cast<T*>(
+ diff_dst.GetOpMem().get_data_handle());
+ } else {
+ diff_dst_data = static_cast<T*>(const_cast<T*>(
+ diff_dst_tensor.flat<T>().data()));
+ }
+ stream(stream::kind::eager).submit(net).wait();
+
+ // execute convolution input bwd
+ conv2d_bwd_input->Execute(diff_src_data, filter_data, diff_dst_data);
+ } catch (mkldnn::error& e) {
+ string error_msg = "Status: " + std::to_string(e.status) +
+ ", message: " + string(e.message) + ", in file " +
+ string(__FILE__) + ":" + std::to_string(__LINE__);
+ OP_REQUIRES_OK(
+ context,
+ errors::Aborted("Operation received an exception:", error_msg));
+ }
+ }
+
private:
- const int kInputIndex_Filter = 1, kInputIndex_InputSizes = 0,
- kInputIndex_OutBackProp = 2;
+ const int kInputIndex_Filter = 1, kInputIndex_InputSizes = 0;
const int kDilationH = 0, kDilationW = 1;
+ engine cpu_engine = engine(engine::cpu, 0);
+
+ // Validate input shapes.
+ // Function asserts that input shapes are valid.
void ValidateMklShapes(const MklDnnShape& input_mkl_shape,
const MklDnnShape& filter_mkl_shape,
const MklDnnShape& obp_mkl_shape) {
@@ -382,8 +779,7 @@ class MklConv2DCustomBackpropInputOp
<< "Conv2DBackpropInput: input should not be in MKL Layout";
}
- size_t GetInputTensorIndexWithSizes() { return kInputIndex_InputSizes; }
-
+ // Get TensorFlow shape of input tensor.
TensorShape MakeInputTfShape(OpKernelContext* context,
const Tensor& input_tensor) {
TensorShape input_tf_shape;
@@ -395,72 +791,32 @@ class MklConv2DCustomBackpropInputOp
return input_tf_shape;
}
+ // Get TensorFlow shape of filter tensor.
TensorShape MakeFilterTfShape(OpKernelContext* context,
const Tensor& filter_tensor) {
return GetTfShape(context, kInputIndex_Filter);
}
+ // Get the Tensorflow shape of Output (diff_src),
+ // which is same as shape of Conv2D 'input'.
TensorShape GetOutputTfShape(const TensorShape& input_shape,
const TensorShape& filter_shape,
const TensorShape& outbprop_shape) {
- // Output Shape of Conv2DBackpropInput is same as shape of Conv2D 'input'.
return input_shape;
}
+ // Get the Tensorflow shape of Output (diff_src),
+ // which is same as shape of Conv2D 'input'.
const memory::dims& GetOutputDims(const memory::dims& fwd_input_dims,
const memory::dims& fwd_filter_dims) {
- // Output Shape of Conv2DBackpropInput is same as shape of Conv2D 'input'.
return fwd_input_dims;
}
+ // Output layout is Tensorflow's layout in data format order.
memory::format GetOutputFormat(const memory::format data_format) {
- // Output layout is Tensorflow's layout in data format order.
return data_format;
}
- void CreatePrimitive(OpKernelContext* context, const engine& cpu_engine,
- const convolution_forward::primitive_desc& conv_fwd_pd,
- MklDnnData<T>* input, MklDnnData<T>* filter,
- MklDnnData<T>* outbackprop, MklDnnData<T>* output,
- Tensor** output_tensor,
- const memory::dims& strides,
- const memory::dims& dilations,
- const memory::dims& padding_l,
- const memory::dims& padding_r, padding_kind padding,
- const memory::dims& bwd_output_dims,
- memory::format bwd_output_format) {
- CHECK_NOTNULL(context);
- CHECK_NOTNULL(input);
- CHECK_NOTNULL(filter);
- CHECK_NOTNULL(outbackprop);
- CHECK_NOTNULL(output);
- CHECK_NOTNULL(output_tensor);
-
- // Create convolution backward data primitive.
- // Use dilated convolution in case dilate rates are greater than zero.
- auto bwd_desc = (dilations[kDilationH] > 0 || dilations[kDilationW] > 0) ?
- convolution_backward_data::desc(convolution_direct,
- output->GetOpMemDesc(), filter->GetOpMemDesc(),
- outbackprop->GetOpMemDesc(), strides,
- dilations, padding_l, padding_r, padding):
- convolution_backward_data::desc(convolution_direct,
- output->GetOpMemDesc(), filter->GetOpMemDesc(),
- outbackprop->GetOpMemDesc(),
- strides, padding_l, padding_r, padding);
-
- auto bwd_pd = convolution_backward_data::primitive_desc(
- bwd_desc, cpu_engine, conv_fwd_pd);
-
- // Allocate output tensor in TensorFlow and MKL layout.
- AllocateOutputTensor(context, bwd_pd, bwd_output_dims, bwd_output_format,
- output_tensor);
- CHECK_NOTNULL(*output_tensor);
- // Set buffer handle using allocated output tensor.
- output->SetUsrMemDataHandle(*output_tensor);
-
- PrepareAndExecutePrimitive(bwd_pd, filter, outbackprop, output);
- }
-
// Allocate output tensor.
void AllocateOutputTensor(
OpKernelContext* context,
@@ -487,22 +843,6 @@ class MklConv2DCustomBackpropInputOp
AllocateOutputSetMklShape(context, 0, output_tensor, output_tf_shape,
output_mkl_shape);
}
-
- // Prepare and execute net - checks for input and output reorders.
- void PrepareAndExecutePrimitive(
- const convolution_backward_data::primitive_desc& conv_pd,
- MklDnnData<T>* filter, MklDnnData<T>* obp, MklDnnData<T>* output) {
- // Create reorders between user layout and MKL layout if it is needed and
- // add it to the net before convolution.
- std::vector<primitive> net;
- filter->CheckReorderToOpMem(conv_pd.weights_primitive_desc(), &net);
- obp->CheckReorderToOpMem(conv_pd.diff_dst_primitive_desc(), &net);
-
- net.push_back(convolution_backward_data(
- conv_pd, obp->GetOpMem(), filter->GetOpMem(), output->GetOpMem()));
-
- stream(stream::kind::eager).submit(net).wait();
- }
};
#endif // INTEL_MKL_ML
diff --git a/tensorflow/core/kernels/mkl_conv_ops.cc b/tensorflow/core/kernels/mkl_conv_ops.cc
index cede0b9dd6..b568973220 100644
--- a/tensorflow/core/kernels/mkl_conv_ops.cc
+++ b/tensorflow/core/kernels/mkl_conv_ops.cc
@@ -70,23 +70,25 @@ struct MklConvFwdParams {
memory::dims padding_left;
memory::dims padding_right;
- MklConvFwdParams(memory::dims src_dims,
- memory::dims filter_dims, memory::dims bias_dims,
- memory::dims dst_dims, memory::dims strides,
- memory::dims dilations, memory::dims padding_left,
- memory::dims padding_right) :
- src_dims(src_dims), filter_dims(filter_dims),
- bias_dims(bias_dims), dst_dims(dst_dims),
- strides(strides), dilations(dilations),
- padding_left(padding_left), padding_right(padding_right) {
- }
+ MklConvFwdParams(memory::dims src_dims, memory::dims filter_dims,
+ memory::dims bias_dims, memory::dims dst_dims,
+ memory::dims strides, memory::dims dilations,
+ memory::dims padding_left, memory::dims padding_right)
+ : src_dims(src_dims),
+ filter_dims(filter_dims),
+ bias_dims(bias_dims),
+ dst_dims(dst_dims),
+ strides(strides),
+ dilations(dilations),
+ padding_left(padding_left),
+ padding_right(padding_right) {}
};
template <typename T>
-class MklConv2DFwdPrimitive: public MklPrimitive {
+class MklConv2DFwdPrimitive : public MklPrimitive {
public:
- explicit MklConv2DFwdPrimitive(const MklConvFwdParams& convFwdDims) :
- cpu_engine_(engine::cpu, 0) {
+ explicit MklConv2DFwdPrimitive(const MklConvFwdParams& convFwdDims)
+ : cpu_engine_(engine::cpu, 0) {
context_.fwd_stream.reset(new stream(stream::kind::eager));
// create conv primitive
if (context_.conv_fwd == nullptr) {
@@ -101,8 +103,8 @@ class MklConv2DFwdPrimitive: public MklPrimitive {
// filter_data: input data buffer of filter (weights)
// bias_data: input data buffer of bias
// dst_data: output data buffer of dst
- void Execute(const T* src_data, const T* filter_data,
- const T* bias_data, const T* dst_data) {
+ void Execute(const T* src_data, const T* filter_data, const T* bias_data,
+ const T* dst_data) {
context_.src_mem->set_data_handle(
static_cast<void*>(const_cast<T*>(src_data)));
context_.filter_mem->set_data_handle(
@@ -126,8 +128,7 @@ class MklConv2DFwdPrimitive: public MklPrimitive {
// src_data: input data buffer of src
// filter_data: input data buffer of filter (weights)
// dst_data: output data buffer of dst
- void Execute(const T* src_data, const T* filter_data,
- const T* dst_data) {
+ void Execute(const T* src_data, const T* filter_data, const T* dst_data) {
context_.src_mem->set_data_handle(
static_cast<void*>(const_cast<T*>(src_data)));
context_.filter_mem->set_data_handle(
@@ -142,13 +143,9 @@ class MklConv2DFwdPrimitive: public MklPrimitive {
context_.dst_mem->set_data_handle(DummyData);
}
- memory::format GetSrcMemoryFormat() const {
- return context_.src_fmt;
- }
+ memory::format GetSrcMemoryFormat() const { return context_.src_fmt; }
- memory::format GetFilterMemoryFormat() const {
- return context_.filter_fmt;
- }
+ memory::format GetFilterMemoryFormat() const { return context_.filter_fmt; }
std::shared_ptr<mkldnn::convolution_forward::primitive_desc>
GetPrimitiveDesc() const {
@@ -184,43 +181,50 @@ class MklConv2DFwdPrimitive: public MklPrimitive {
std::shared_ptr<mkldnn::stream> fwd_stream;
std::vector<mkldnn::primitive> fwd_primitives;
- ConvFwdContext() :
- src_fmt(memory::format::any), filter_fmt(memory::format::any),
- src_mem(nullptr), filter_mem(nullptr), bias_mem(nullptr),
- dst_mem(nullptr), fwd_desc(nullptr),
- src_md(nullptr), filter_md(nullptr), bias_md(nullptr),
- fwd_pd(nullptr), conv_fwd(nullptr), fwd_stream(nullptr) {
- }
+ ConvFwdContext()
+ : src_fmt(memory::format::any),
+ filter_fmt(memory::format::any),
+ src_mem(nullptr),
+ filter_mem(nullptr),
+ bias_mem(nullptr),
+ dst_mem(nullptr),
+ fwd_desc(nullptr),
+ src_md(nullptr),
+ filter_md(nullptr),
+ bias_md(nullptr),
+ fwd_pd(nullptr),
+ conv_fwd(nullptr),
+ fwd_stream(nullptr) {}
};
void Setup(const MklConvFwdParams& convFwdDims) {
// create memory descriptors for convolution data w/ no specified format
- context_.src_md.reset(new memory::desc({convFwdDims.src_dims},
- MklDnnType<T>(), memory::format::any));
+ context_.src_md.reset(new memory::desc(
+ {convFwdDims.src_dims}, MklDnnType<T>(), memory::format::any));
- context_.filter_md.reset(new memory::desc({convFwdDims.filter_dims},
- MklDnnType<T>(), memory::format::any));
+ context_.filter_md.reset(new memory::desc(
+ {convFwdDims.filter_dims}, MklDnnType<T>(), memory::format::any));
- context_.dst_md.reset(new memory::desc({convFwdDims.dst_dims},
- MklDnnType<T>(), memory::format::any));
+ context_.dst_md.reset(new memory::desc(
+ {convFwdDims.dst_dims}, MklDnnType<T>(), memory::format::any));
if (!convFwdDims.bias_dims.empty())
- context_.bias_md.reset(new memory::desc({convFwdDims.bias_dims},
- MklDnnType<T>(), memory::format::any));
+ context_.bias_md.reset(new memory::desc(
+ {convFwdDims.bias_dims}, MklDnnType<T>(), memory::format::any));
// create a convolution
if (!convFwdDims.bias_dims.empty()) {
- context_.fwd_desc.reset(new convolution_forward::desc(prop_kind::forward,
- convolution_direct, *context_.src_md, *context_.filter_md,
- *context_.bias_md, *context_.dst_md,
+ context_.fwd_desc.reset(new convolution_forward::desc(
+ prop_kind::forward, convolution_direct, *context_.src_md,
+ *context_.filter_md, *context_.bias_md, *context_.dst_md,
convFwdDims.strides, convFwdDims.dilations, convFwdDims.padding_left,
convFwdDims.padding_right, padding_kind::zero));
} else {
- context_.fwd_desc.reset(new convolution_forward::desc(prop_kind::forward,
- convolution_direct, *context_.src_md, *context_.filter_md,
- *context_.dst_md, convFwdDims.strides, convFwdDims.dilations,
- convFwdDims.padding_left, convFwdDims.padding_right,
- padding_kind::zero));
+ context_.fwd_desc.reset(new convolution_forward::desc(
+ prop_kind::forward, convolution_direct, *context_.src_md,
+ *context_.filter_md, *context_.dst_md, convFwdDims.strides,
+ convFwdDims.dilations, convFwdDims.padding_left,
+ convFwdDims.padding_right, padding_kind::zero));
}
context_.fwd_pd.reset(new convolution_forward::primitive_desc(
@@ -234,24 +238,26 @@ class MklConv2DFwdPrimitive: public MklPrimitive {
context_.fwd_pd.get()->weights_primitive_desc().desc().data.format);
// create memory primitive based on dummy data
- context_.src_mem.reset(new memory(
- context_.fwd_pd.get()->src_primitive_desc(), DummyData));
- context_.filter_mem.reset(new memory(
- context_.fwd_pd.get()->weights_primitive_desc(), DummyData));
- context_.dst_mem.reset(new memory(
- context_.fwd_pd.get()->dst_primitive_desc(), DummyData));
+ context_.src_mem.reset(
+ new memory(context_.fwd_pd.get()->src_primitive_desc(), DummyData));
+ context_.filter_mem.reset(
+ new memory(context_.fwd_pd.get()->weights_primitive_desc(), DummyData));
+ context_.dst_mem.reset(
+ new memory(context_.fwd_pd.get()->dst_primitive_desc(), DummyData));
// create convolution primitive and add it to net
if (!convFwdDims.bias_dims.empty()) {
- context_.bias_mem.reset(new memory({{{convFwdDims.bias_dims},
- MklDnnType<T>(), memory::format::x}, cpu_engine_}, DummyData));
- context_.conv_fwd.reset(new convolution_forward(
- *context_.fwd_pd, *context_.src_mem, *context_.filter_mem,
- *context_.bias_mem, *context_.dst_mem));
+ context_.bias_mem.reset(new memory(
+ {{{convFwdDims.bias_dims}, MklDnnType<T>(), memory::format::x},
+ cpu_engine_},
+ DummyData));
+ context_.conv_fwd.reset(new convolution_forward(
+ *context_.fwd_pd, *context_.src_mem, *context_.filter_mem,
+ *context_.bias_mem, *context_.dst_mem));
} else {
- context_.conv_fwd.reset(new convolution_forward(
- *context_.fwd_pd, *context_.src_mem,
- *context_.filter_mem, *context_.dst_mem));
+ context_.conv_fwd.reset(
+ new convolution_forward(*context_.fwd_pd, *context_.src_mem,
+ *context_.filter_mem, *context_.dst_mem));
}
context_.fwd_primitives.push_back(*context_.conv_fwd);
@@ -266,19 +272,19 @@ template <typename T>
class MklConv2DFwdPrimitiveFactory : public MklPrimitiveFactory<T> {
public:
static MklConv2DFwdPrimitive<T>* Get(const MklConvFwdParams& convFwdDims) {
- MklConv2DFwdPrimitive<T>* conv2d_fwd = nullptr;
-
- // try to find a suitable one in pool
- conv2d_fwd = dynamic_cast<MklConv2DFwdPrimitive<T>*> (
- MklConv2DFwdPrimitiveFactory<T>::GetInstance().GetConv2DFwd(
- convFwdDims));
-
- if (conv2d_fwd == nullptr) {
- conv2d_fwd = new MklConv2DFwdPrimitive<T>(convFwdDims);
- MklConv2DFwdPrimitiveFactory<T>::GetInstance().SetConv2DFwd(
- convFwdDims, conv2d_fwd);
- }
- return conv2d_fwd;
+ MklConv2DFwdPrimitive<T>* conv2d_fwd = nullptr;
+
+ // try to find a suitable one in pool
+ conv2d_fwd = dynamic_cast<MklConv2DFwdPrimitive<T>*>(
+ MklConv2DFwdPrimitiveFactory<T>::GetInstance().GetConv2DFwd(
+ convFwdDims));
+
+ if (conv2d_fwd == nullptr) {
+ conv2d_fwd = new MklConv2DFwdPrimitive<T>(convFwdDims);
+ MklConv2DFwdPrimitiveFactory<T>::GetInstance().SetConv2DFwd(convFwdDims,
+ conv2d_fwd);
+ }
+ return conv2d_fwd;
}
private:
@@ -312,7 +318,7 @@ class MklConv2DFwdPrimitiveFactory : public MklPrimitiveFactory<T> {
return this->GetOp(key);
}
- void SetConv2DFwd(const MklConvFwdParams& convFwdDims, MklPrimitive *op) {
+ void SetConv2DFwd(const MklConvFwdParams& convFwdDims, MklPrimitive* op) {
std::string key = CreateKey(convFwdDims);
this->SetOp(key, op);
}
@@ -865,22 +871,24 @@ class MklConv2DOp : public OpKernel {
dilations[kDilationW] -= 1;
// get a conv2d fwd from primitive pool
- MklConv2DFwdPrimitive<T> *conv2d_fwd = nullptr;
+ MklConv2DFwdPrimitive<T>* conv2d_fwd = nullptr;
if (biasEnabled) {
memory::dims bias_dims = {};
conv_utl.GetBiasSizeInMklOrder(kInputIndex_Bias, &bias_dims);
MklConvFwdParams convFwdDims(src_dims, filter_dims, bias_dims,
- dst_dims_mkl_order, strides, dilations, padding_left, padding_right);
+ dst_dims_mkl_order, strides, dilations,
+ padding_left, padding_right);
conv2d_fwd = MklConv2DFwdPrimitiveFactory<T>::Get(convFwdDims);
} else {
MklConvFwdParams convFwdDims(src_dims, filter_dims, NONE_DIMS,
- dst_dims_mkl_order, strides, dilations, padding_left, padding_right);
+ dst_dims_mkl_order, strides, dilations,
+ padding_left, padding_right);
conv2d_fwd = MklConv2DFwdPrimitiveFactory<T>::Get(convFwdDims);
}
// allocate output tensors output_tensor and filter_out_tensor
- std::shared_ptr<mkldnn::convolution_forward::primitive_desc>
- conv_fwd_pd = conv2d_fwd->GetPrimitiveDesc();
+ std::shared_ptr<mkldnn::convolution_forward::primitive_desc> conv_fwd_pd =
+ conv2d_fwd->GetPrimitiveDesc();
AllocateOutputTensor(context, *conv_fwd_pd,
dst_dims_mkl_order, tf_fmt, &dst_tensor);
Tensor* filter_out_tensor = nullptr;
@@ -891,31 +899,25 @@ class MklConv2DOp : public OpKernel {
T* dst_data = static_cast<T*>(dst_tensor->flat<T>().data());
// check whether src/filter need reorder
- std::vector<primitive> net;
T *src_data = nullptr;
if (src_md.data.format != conv2d_fwd->GetSrcMemoryFormat()) {
src.SetUsrMem(src_md, &src_tensor);
- src.CheckReorderToOpMem(
- conv_fwd_pd.get()->src_primitive_desc(), &net);
+ src.CheckReorderToOpMem(conv_fwd_pd.get()->src_primitive_desc());
src_data = static_cast<T*>(src.GetOpMem().get_data_handle());
} else {
- src_data = static_cast<T*>(const_cast<T*>(
- src_tensor.flat<T>().data()));
+ src_data = static_cast<T*>(const_cast<T*>(src_tensor.flat<T>().data()));
}
- T *filter_data = nullptr;
+ T* filter_data = nullptr;
if (filter_md.data.format != conv2d_fwd->GetFilterMemoryFormat()) {
filter.SetUsrMem(filter_md, &filter_tensor);
- filter.CheckReorderToOpMem(
- conv_fwd_pd.get()->weights_primitive_desc(),
- filter.GetTensorBuffer(filter_out_tensor), &net);
+ filter.CheckReorderToOpMem(conv_fwd_pd.get()->weights_primitive_desc(),
+ filter.GetTensorBuffer(filter_out_tensor));
filter_data = static_cast<T*>(filter.GetOpMem().get_data_handle());
} else {
- filter_data = static_cast<T*>(const_cast<T*>(
- filter_tensor.flat<T>().data()));
+ filter_data =
+ static_cast<T*>(const_cast<T*>(filter_tensor.flat<T>().data()));
}
- stream(stream::kind::eager).submit(net).wait();
-
// execute convolution
if (biasEnabled) {
@@ -1010,16 +1012,15 @@ class MklConv2DOp : public OpKernel {
// Create reorders between user layout and MKL layout if it is needed and
// add it to the net before convolution. No need to check for output
// reorder as we propagate output layout to the next layer.
- std::vector<primitive> net;
- src->CheckReorderToOpMem(conv_prim_desc.src_primitive_desc(), &net);
+ src->CheckReorderToOpMem(conv_prim_desc.src_primitive_desc());
// rather than re-order to a temp buffer, reorder directly to the
// filter output tensor
filter->CheckReorderToOpMem(conv_prim_desc.weights_primitive_desc(),
- filter->GetTensorBuffer(filter_out_tensor),
- &net);
+ filter->GetTensorBuffer(filter_out_tensor));
// Create convolution primitive and add it to net.
+ std::vector<primitive> net;
if (bias) {
CHECK_EQ(biasEnabled, true);
net.push_back(convolution_forward(conv_prim_desc, src->GetOpMem(),
diff --git a/tensorflow/core/kernels/mkl_conv_ops.h b/tensorflow/core/kernels/mkl_conv_ops.h
index 8333a09316..5e1a5001dc 100644
--- a/tensorflow/core/kernels/mkl_conv_ops.h
+++ b/tensorflow/core/kernels/mkl_conv_ops.h
@@ -19,6 +19,7 @@ limitations under the License.
#include <limits>
#include <string>
#include <vector>
+#include <memory>
#include "tensorflow/core/framework/numeric_op.h"
#include "tensorflow/core/framework/op_kernel.h"
@@ -349,6 +350,7 @@ class MklDnnConvUtil {
}
};
+
/////////////////////////////////////////////////////////////////////
/// Common class that implements Conv2DBackpropFilter and Input
/////////////////////////////////////////////////////////////////////
@@ -388,227 +390,17 @@ class MklConv2DBackpropCommonOp : public OpKernel {
OP_REQUIRES_OK(context, context->GetAttr("padding", &padding_));
}
- void Compute(OpKernelContext* context) override {
- try {
- auto cpu_engine = engine(engine::cpu, 0);
-
- // Prepare common tensors for Conv2DBackpropInput and
- // Conv2DBackpropFilter.
- MklDnnData<T> input(&cpu_engine);
- MklDnnData<T> filter(&cpu_engine);
- MklDnnData<T> outbackprop(&cpu_engine);
- MklDnnData<T> output(&cpu_engine);
-
- // Input tensors
- const int kInputIdx = 0, kFilterIdx = 1, kOutbpropIdx = 2;
- const Tensor& input_tensor = MklGetInput(context, kInputIdx);
- const Tensor& filter_tensor = MklGetInput(context, kFilterIdx);
- const Tensor& outbprop_tensor = MklGetInput(context, kOutbpropIdx);
-
- MklDnnShape input_mkl_shape, filter_mkl_shape, outbprop_mkl_shape;
- GetMklShape(context, kInputIdx, &input_mkl_shape);
- GetMklShape(context, kFilterIdx, &filter_mkl_shape);
- GetMklShape(context, kOutbpropIdx, &outbprop_mkl_shape);
- // Allow operator-specific sanity checking of shapes.
- ValidateMklShapes(input_mkl_shape, filter_mkl_shape, outbprop_mkl_shape);
-
- // Allow operator-specific generation of shapes.
- // E.g., Conv2DBackpropFilter gets filter as filter_sizes. It is a
- // tensor containing shape of filter. So filter.shape() is not
- // a correct way to get filter shape. These operator-specific calls
- // allow this class to handle this case.
- TensorShape input_tf_shape = MakeInputTfShape(context, input_tensor);
- TensorShape filter_tf_shape = MakeFilterTfShape(context, filter_tensor);
- TensorShape outbprop_tf_shape = GetTfShape(context, kOutbpropIdx);
-
- // Corner cases: output with 0 elements and 0 batch size.
- Tensor* output_tensor = nullptr;
- if (input_tf_shape.num_elements() == 0 ||
- filter_tf_shape.num_elements() == 0 ||
- outbprop_tf_shape.num_elements() == 0) {
- MklDnnShape output_mkl_shape;
- output_mkl_shape.SetMklTensor(false);
- TensorShape output_tf_shape = GetOutputTfShape(
- input_tf_shape, filter_tf_shape, outbprop_tf_shape);
- const int kOutputIdx = 0;
- AllocateOutputSetMklShape(context, kOutputIdx, &output_tensor,
- output_tf_shape, output_mkl_shape);
- CHECK_NOTNULL(output_tensor);
-
- // if output tensor has more than 0 elements, we need to 0 them out.
- for (size_t i = 0; i < output_tf_shape.num_elements(); ++i) {
- output_tensor->flat<T>().data()[i] = 0;
- }
-
- return;
- }
-
- // By default, all dims are in MKL order. Only dims in TF order
- // are those with prefix tf_order.
- memory::dims outbprop_dims, fwd_input_dims, fwd_filter_dims;
- memory::dims padding_l, padding_r, dilations, strides, fwd_output_dims;
- memory::dims fwd_output_dims_tf_order;
-
- // Get forward convolution parameters.
- MklDnnConvUtil conv_utl(context, strides_, padding_, data_format_,
- dilations_);
- conv_utl.GetConvFwdSizesInMklOrder(
- input_tf_shape, filter_tf_shape, &fwd_input_dims, &fwd_filter_dims,
- &strides, &dilations, &fwd_output_dims_tf_order, &fwd_output_dims,
- &padding_l, &padding_r);
- if (!context->status().ok()) return;
-
- // Create Convolution forward descriptor since Convolution backward
- // API needs it. For that, we first need to create input, filter
- // and output memory descriptors.
- auto tf_fmt = TFDataFormatToMklDnnDataFormat(data_format_);
- // If input is in MKL layout, then simply grab input layout; otherwise,
- // construct input TF layout. For TF layout, although input shape
- // required is in MKL-DNN order, the layout is Tensorflow's layout
- // (NHWC or NCHW depending on data format).
- auto fwd_input_md =
- input_mkl_shape.IsMklTensor()
- ? input_mkl_shape.GetMklLayout()
- : memory::desc(fwd_input_dims, MklDnnType<T>(), tf_fmt);
- // If filter is in MKL layout, then simply grab filter layout; otherwise
- // construct filter in TF layout. For TF layout, filter is in HWIO format.
- auto fwd_filter_md = filter_mkl_shape.IsMklTensor()
- ? filter_mkl_shape.GetMklLayout()
- : memory::desc(fwd_filter_dims, MklDnnType<T>(),
- memory::format::hwio);
- // Tensorflow Output of Conv2D is in data_format order.
- auto fwd_out_md = memory::desc(fwd_output_dims, MklDnnType<T>(), tf_fmt);
-
- const int kDilationH = 0, kDilationW = 1;
- dilations[kDilationH] -= 1;
- dilations[kDilationW] -= 1;
- auto fwd_desc = (dilations[kDilationH] > 0 || dilations[kDilationW] > 0)?
- convolution_forward::desc(prop_kind::forward,
- convolution_direct, fwd_input_md,
- fwd_filter_md, fwd_out_md,
- strides, dilations, padding_l, padding_r,
- TFPaddingToMklDnnPadding(padding_)) :
- convolution_forward::desc(prop_kind::forward,
- convolution_direct, fwd_input_md,
- fwd_filter_md, fwd_out_md,
- strides, padding_l, padding_r,
- TFPaddingToMklDnnPadding(padding_));
- auto fwd_pd = convolution_forward::primitive_desc(fwd_desc, cpu_engine);
-
- // Create memory for user data. Describe how the inputs and outputs of
- // Convolution look like. Also specify buffers containing actual input
- // and output data.
-
- // Since this is a common class for both Conv2DBackpropFilter and
- // Conv2DBackpropInput, we skip SetUsrMem call for input tensor (for
- // Conv2DBackpropInput) and for filter tensor (for
- // conv2DBackpropFilter) depending on which tensor is int32 type.
- size_t input_with_sizes = GetInputTensorIndexWithSizes();
- if (input_with_sizes != kInputIdx) {
- // Shape of Conv2DBackpropFilter's input is same as Conv2D input.
- input.SetUsrMem(fwd_input_md, &input_tensor);
- } else if (input_with_sizes != kFilterIdx) {
- // Shape of Conv2DBackpropInput's filter is same as Conv2D filter.
- filter.SetUsrMem(fwd_filter_md, &filter_tensor);
- }
-
- conv_utl.GetInputSizeInMklOrder(outbprop_tf_shape, &outbprop_dims);
- if (!context->status().ok()) return;
- if (outbprop_mkl_shape.IsMklTensor()) {
- // If outbackprop is in Mkl layout, then simply grab it.
- auto outbprop_md = outbprop_mkl_shape.GetMklLayout();
- outbackprop.SetUsrMem(outbprop_md, &outbprop_tensor);
- } else {
- // If outbackprop is in TensorFlow layout, then we need to create memory
- // descriptor for it. Outbackprop shape is data format order.
- outbackprop.SetUsrMem(outbprop_dims, tf_fmt, &outbprop_tensor);
- }
-
- // Operator specific call to get output shape and data_format.
- auto bwd_output_dims = GetOutputDims(fwd_input_dims, fwd_filter_dims);
- auto bwd_output_format = GetOutputFormat(tf_fmt);
- output.SetUsrMem(bwd_output_dims, bwd_output_format);
-
- // Create memory descriptors for convolution data w/ no specified format.
- input.SetOpMemDesc(fwd_input_dims, memory::format::any);
- filter.SetOpMemDesc(fwd_filter_dims, memory::format::any);
- outbackprop.SetOpMemDesc(outbprop_dims, memory::format::any);
- output.SetOpMemDesc(bwd_output_dims, memory::format::any);
-
- // Operator-specific call to create and execute primitive.
- CreatePrimitive(context, cpu_engine, fwd_pd, &input, &filter,
- &outbackprop, &output, &output_tensor,
- strides, dilations, padding_l, padding_r,
- TFPaddingToMklDnnPadding(padding_),
- bwd_output_dims, bwd_output_format);
- } catch (mkldnn::error& e) {
- string error_msg = "Status: " + std::to_string(e.status) +
- ", message: " + string(e.message) + ", in file " +
- string(__FILE__) + ":" + std::to_string(__LINE__);
- OP_REQUIRES_OK(
- context,
- errors::Aborted("Operation received an exception:", error_msg));
- }
- }
-
- /// Pure virtual function to allow operator to check for validity of input
- /// shapes. Function asserts that input shapes are valid.
- virtual void ValidateMklShapes(const MklDnnShape& input_mkl_shape,
- const MklDnnShape& filter_mkl_shape,
- const MklDnnShape& outbprop_mkl_shape) = 0;
-
- /// Operator-specific function that returns index of input that is
- /// representing input sizes. For Conv2DBackpropFilter it returns 1 since
- /// filter for this operator is filter shape. For Conv2DBackpropInput it
- /// returns 0 (for input).
- virtual size_t GetInputTensorIndexWithSizes() = 0;
-
- /// Get TensorFlow shape of input tensor.
- virtual TensorShape MakeInputTfShape(OpKernelContext* context,
- const Tensor& input_tensor) = 0;
-
- /// Get TensorFlow shape of filter tensor.
- virtual TensorShape MakeFilterTfShape(OpKernelContext* context,
- const Tensor& filter_tensor) = 0;
-
- /// Get the TensorFlow shape of output tensor.
- virtual TensorShape GetOutputTfShape(const TensorShape& input_shape,
- const TensorShape& filter_shape,
- const TensorShape& outbprop_shape) = 0;
-
- /// Get shape of output in MKL-DNN order. Computes shape of output from
- /// input shape (fwd_input_dims) and filter shape (fwd_filter_dims).
- virtual const memory::dims& GetOutputDims(
- const memory::dims& fwd_input_dims,
- const memory::dims& fwd_filter_dims) = 0;
-
- /// Get data_format of output in MKL-DNN order. If output data format is
- /// same as input data format, then it simply returns value of data_format
- /// parameter as it is.
- virtual memory::format GetOutputFormat(const memory::format data_format) = 0;
-
- /// Create and execute the primitive storing output in the output_tensor.
- virtual void CreatePrimitive(OpKernelContext* context,
- const engine& cpu_engine,
- const convolution_forward::primitive_desc& conv_fwd_pd,
- MklDnnData<T>* input, MklDnnData<T>* filter, MklDnnData<T>* outbackprop,
- MklDnnData<T>* output, Tensor** output_tensor, const memory::dims& strides,
- const memory::dims& dilations, const memory::dims& padding_l,
- const memory::dims& padding_r, padding_kind padding,
- const memory::dims& bwd_output_dims,
- memory::format bwd_output_format) = 0;
-
- // Get the data_format {NCHW, NHWC}
- TensorFormat GetTFDataFormat() { return data_format_; }
-
- private:
+ protected:
+ // data members accessible to derived classes.
std::vector<int32> dilations_;
std::vector<int32> strides_;
Padding padding_;
- TensorFormat data_format_;
+ TensorFormat data_format_; // NCHW or NHWC
};
+
#endif // INTEL_MKL_ML
+
/////////////////////////////////////////////////////////////////////
/// Dummy Mkl op that is just used for operators that are intermediate
/// output of node fusion in the graph
diff --git a/tensorflow/core/kernels/mkl_lrn_op.cc b/tensorflow/core/kernels/mkl_lrn_op.cc
index dfe50e6a7f..7966c271d5 100644
--- a/tensorflow/core/kernels/mkl_lrn_op.cc
+++ b/tensorflow/core/kernels/mkl_lrn_op.cc
@@ -847,12 +847,12 @@ class MklLRNOp : public OpKernel {
MklDnnData<T>* src_dnn_data,
MklDnnData<T>* dst_dnn_data,
MklDnnData<uint8>* wksp_dnn_data = nullptr) {
- std::vector<primitive> net;
// Check for input reorder
- src_dnn_data->CheckReorderToOpMem(lrn_fwd_desc.src_primitive_desc(), &net);
+ src_dnn_data->CheckReorderToOpMem(lrn_fwd_desc.src_primitive_desc());
// Create pooling primitive and add it to net
+ std::vector<primitive> net;
if (wksp_dnn_data != nullptr) {
net.push_back(lrn_forward(lrn_fwd_desc, src_dnn_data->GetOpMem(),
wksp_dnn_data->GetOpMem(),
@@ -1160,15 +1160,15 @@ class MklLRNGradOp : public OpKernel {
MklDnnData<T>* output_diff_src,
const memory::primitive_desc& target_diff_dst_pd,
const MklDnnData<uint8>* workspace_dnn_data = nullptr) {
- std::vector<primitive> net;
// Check for input reordering on the diff dst input
input_gradient_diff_dst->CheckReorderToOpMem(
- lrn_bkwd_desc.diff_dst_primitive_desc(), &net);
+ lrn_bkwd_desc.diff_dst_primitive_desc());
// Check for input reordering on the original input
- src_dnn_data->CheckReorderToOpMem(lrn_fwd_desc.src_primitive_desc(), &net);
+ src_dnn_data->CheckReorderToOpMem(lrn_fwd_desc.src_primitive_desc());
// Create pooling primitive and add it to net
+ std::vector<primitive> net;
if (nullptr == workspace_dnn_data) {
net.push_back(lrn_backward(lrn_bkwd_desc, src_dnn_data->GetOpMem(),
input_gradient_diff_dst->GetOpMem(),
diff --git a/tensorflow/core/kernels/mkl_reshape_op.cc b/tensorflow/core/kernels/mkl_reshape_op.cc
index c44a6f3477..02ea9fc068 100644
--- a/tensorflow/core/kernels/mkl_reshape_op.cc
+++ b/tensorflow/core/kernels/mkl_reshape_op.cc
@@ -263,10 +263,7 @@ class MklReshapeOp : public OpKernel {
// shape_from != shape_to), then we just copy input tensor to
// output tensor with target shape (we cannot forward Mkl layout
// in such case because shape has changed.)
- std::vector<primitive> net;
- if (dnn_data_input.CheckReorderToOpMem(output_tf_pd, output_tensor,
- &net)) {
- stream(stream::kind::eager).submit(net).wait();
+ if (dnn_data_input.CheckReorderToOpMem(output_tf_pd, output_tensor)) {
} else {
OP_REQUIRES(
context, output_tensor->CopyFrom(input_tensor, shape_to),
diff --git a/tensorflow/core/kernels/mkl_tfconv_op.h b/tensorflow/core/kernels/mkl_tfconv_op.h
index 7e8ed1b1d6..f4f0035f26 100644
--- a/tensorflow/core/kernels/mkl_tfconv_op.h
+++ b/tensorflow/core/kernels/mkl_tfconv_op.h
@@ -111,10 +111,8 @@ class MklToTfOp : public OpKernel {
// Do we need to reorder Mkl layout into TensorFlow layout?
if (input.IsReorderNeeded(output_tf_pd)) {
// Insert reorder between Mkl layout and TensorFlow layout.
- std::vector<primitive> net;
- CHECK_EQ(input.CheckReorderToOpMem(output_tf_pd, output_tensor, &net),
+ CHECK_EQ(input.CheckReorderToOpMem(output_tf_pd, output_tensor),
true);
- stream(stream::kind::eager).submit(net).wait();
} else {
// If not, just forward input tensor to output tensor.
CHECK(output_tensor->CopyFrom(input_tensor, output_shape));
diff --git a/tensorflow/core/kernels/non_max_suppression_op.cc b/tensorflow/core/kernels/non_max_suppression_op.cc
index 23fdfe944a..f59843a07a 100644
--- a/tensorflow/core/kernels/non_max_suppression_op.cc
+++ b/tensorflow/core/kernels/non_max_suppression_op.cc
@@ -19,6 +19,7 @@ limitations under the License.
#include "tensorflow/core/kernels/non_max_suppression_op.h"
+#include <functional>
#include <queue>
#include <vector>
@@ -38,9 +39,32 @@ namespace {
typedef Eigen::ThreadPoolDevice CPUDevice;
+static inline void CheckScoreSizes(OpKernelContext* context, int num_boxes,
+ const Tensor& scores) {
+ // The shape of 'scores' is [num_boxes]
+ OP_REQUIRES(context, scores.dims() == 1,
+ errors::InvalidArgument("scores must be 1-D",
+ scores.shape().DebugString()));
+ OP_REQUIRES(context, scores.dim_size(0) == num_boxes,
+ errors::InvalidArgument("scores has incompatible shape"));
+}
+
+static inline void ParseAndCheckOverlapSizes(OpKernelContext* context,
+ const Tensor& overlaps,
+ int* num_boxes) {
+ // the shape of 'overlaps' is [num_boxes, num_boxes]
+ OP_REQUIRES(context, overlaps.dims() == 2,
+ errors::InvalidArgument("overlaps must be 2-D",
+ overlaps.shape().DebugString()));
+
+ *num_boxes = overlaps.dim_size(0);
+ OP_REQUIRES(context, overlaps.dim_size(1) == *num_boxes,
+ errors::InvalidArgument("overlaps must be square",
+ overlaps.shape().DebugString()));
+}
+
static inline void ParseAndCheckBoxSizes(OpKernelContext* context,
- const Tensor& boxes,
- const Tensor& scores, int* num_boxes) {
+ const Tensor& boxes, int* num_boxes) {
// The shape of 'boxes' is [num_boxes, 4]
OP_REQUIRES(context, boxes.dims() == 2,
errors::InvalidArgument("boxes must be 2-D",
@@ -48,18 +72,12 @@ static inline void ParseAndCheckBoxSizes(OpKernelContext* context,
*num_boxes = boxes.dim_size(0);
OP_REQUIRES(context, boxes.dim_size(1) == 4,
errors::InvalidArgument("boxes must have 4 columns"));
-
- // The shape of 'scores' is [num_boxes]
- OP_REQUIRES(context, scores.dims() == 1,
- errors::InvalidArgument("scores must be 1-D",
- scores.shape().DebugString()));
- OP_REQUIRES(context, scores.dim_size(0) == *num_boxes,
- errors::InvalidArgument("scores has incompatible shape"));
}
// Return intersection-over-union overlap between boxes i and j
-static inline float IOU(typename TTypes<float, 2>::ConstTensor boxes, int i,
- int j) {
+static inline float IOUGreaterThanThreshold(
+ typename TTypes<float, 2>::ConstTensor boxes, int i, int j,
+ float iou_threshold) {
const float ymin_i = std::min<float>(boxes(i, 0), boxes(i, 2));
const float xmin_i = std::min<float>(boxes(i, 1), boxes(i, 3));
const float ymax_i = std::max<float>(boxes(i, 0), boxes(i, 2));
@@ -78,24 +96,36 @@ static inline float IOU(typename TTypes<float, 2>::ConstTensor boxes, int i,
const float intersection_area =
std::max<float>(intersection_ymax - intersection_ymin, 0.0) *
std::max<float>(intersection_xmax - intersection_xmin, 0.0);
- return intersection_area / (area_i + area_j - intersection_area);
+ const float iou = intersection_area / (area_i + area_j - intersection_area);
+ return iou > iou_threshold;
}
-void DoNonMaxSuppressionOp(OpKernelContext* context, const Tensor& boxes,
- const Tensor& scores, const Tensor& max_output_size,
- const float iou_threshold,
- const float score_threshold) {
- OP_REQUIRES(context, iou_threshold >= 0 && iou_threshold <= 1,
- errors::InvalidArgument("iou_threshold must be in [0, 1]"));
-
- int num_boxes = 0;
- ParseAndCheckBoxSizes(context, boxes, scores, &num_boxes);
- if (!context->status().ok()) {
- return;
- }
+static inline bool OverlapsGreaterThanThreshold(
+ typename TTypes<float, 2>::ConstTensor overlaps, int i, int j,
+ float overlap_threshold) {
+ return overlaps(i, j) > overlap_threshold;
+}
+
+static inline std::function<bool(int, int)> CreateIOUSuppressCheckFn(
+ const Tensor& boxes, float threshold) {
+ typename TTypes<float, 2>::ConstTensor boxes_data = boxes.tensor<float, 2>();
+ return std::bind(&IOUGreaterThanThreshold, boxes_data, std::placeholders::_1,
+ std::placeholders::_2, threshold);
+}
+
+static inline std::function<bool(int, int)> CreateOverlapsSuppressCheckFn(
+ const Tensor& overlaps, float threshold) {
+ typename TTypes<float, 2>::ConstTensor overlaps_data =
+ overlaps.tensor<float, 2>();
+ return std::bind(&OverlapsGreaterThanThreshold, overlaps_data,
+ std::placeholders::_1, std::placeholders::_2, threshold);
+}
+void DoNonMaxSuppressionOp(OpKernelContext* context, const Tensor& scores,
+ int num_boxes, const Tensor& max_output_size,
+ const float score_threshold,
+ std::function<bool(int, int)> suppress_check_fn) {
const int output_size = std::min(max_output_size.scalar<int>()(), num_boxes);
- TTypes<float, 2>::ConstTensor boxes_data = boxes.tensor<float, 2>();
std::vector<float> scores_data(num_boxes);
std::copy_n(scores.flat<float>().data(), num_boxes, scores_data.begin());
@@ -120,11 +150,9 @@ void DoNonMaxSuppressionOp(OpKernelContext* context, const Tensor& boxes,
std::vector<int> selected;
std::vector<float> selected_scores;
Candidate next_candidate;
- float iou, original_score;
while (selected.size() < output_size && !candidate_priority_queue.empty()) {
next_candidate = candidate_priority_queue.top();
- original_score = next_candidate.score;
candidate_priority_queue.pop();
// Overlapping boxes are likely to have similar scores,
@@ -132,9 +160,10 @@ void DoNonMaxSuppressionOp(OpKernelContext* context, const Tensor& boxes,
// in order to see if `next_candidate` should be suppressed.
bool should_select = true;
for (int j = selected.size() - 1; j >= 0; --j) {
- iou = IOU(boxes_data, next_candidate.box_index, selected[j]);
- if (iou == 0.0) continue;
- if (iou > iou_threshold) should_select = false;
+ if (suppress_check_fn(next_candidate.box_index, selected[j])) {
+ should_select = false;
+ break;
+ }
}
if (should_select) {
@@ -174,9 +203,19 @@ class NonMaxSuppressionOp : public OpKernel {
errors::InvalidArgument("max_output_size must be 0-D, got shape ",
max_output_size.shape().DebugString()));
+ OP_REQUIRES(context, iou_threshold_ >= 0 && iou_threshold_ <= 1,
+ errors::InvalidArgument("iou_threshold must be in [0, 1]"));
+ int num_boxes = 0;
+ ParseAndCheckBoxSizes(context, boxes, &num_boxes);
+ CheckScoreSizes(context, num_boxes, scores);
+ if (!context->status().ok()) {
+ return;
+ }
+ auto suppress_check_fn = CreateIOUSuppressCheckFn(boxes, iou_threshold_);
+
const float score_threshold_val = std::numeric_limits<float>::lowest();
- DoNonMaxSuppressionOp(context, boxes, scores, max_output_size,
- iou_threshold_, score_threshold_val);
+ DoNonMaxSuppressionOp(context, scores, num_boxes, max_output_size,
+ score_threshold_val, suppress_check_fn);
}
private:
@@ -207,9 +246,19 @@ class NonMaxSuppressionV2Op : public OpKernel {
iou_threshold.shape().DebugString()));
const float iou_threshold_val = iou_threshold.scalar<float>()();
+ OP_REQUIRES(context, iou_threshold_val >= 0 && iou_threshold_val <= 1,
+ errors::InvalidArgument("iou_threshold must be in [0, 1]"));
+ int num_boxes = 0;
+ ParseAndCheckBoxSizes(context, boxes, &num_boxes);
+ CheckScoreSizes(context, num_boxes, scores);
+ if (!context->status().ok()) {
+ return;
+ }
+ auto suppress_check_fn = CreateIOUSuppressCheckFn(boxes, iou_threshold_val);
+
const float score_threshold_val = std::numeric_limits<float>::lowest();
- DoNonMaxSuppressionOp(context, boxes, scores, max_output_size,
- iou_threshold_val, score_threshold_val);
+ DoNonMaxSuppressionOp(context, scores, num_boxes, max_output_size,
+ score_threshold_val, suppress_check_fn);
}
};
@@ -245,8 +294,65 @@ class NonMaxSuppressionV3Op : public OpKernel {
score_threshold.shape().DebugString()));
const float score_threshold_val = score_threshold.scalar<float>()();
- DoNonMaxSuppressionOp(context, boxes, scores, max_output_size,
- iou_threshold_val, score_threshold_val);
+ OP_REQUIRES(context, iou_threshold_val >= 0 && iou_threshold_val <= 1,
+ errors::InvalidArgument("iou_threshold must be in [0, 1]"));
+ int num_boxes = 0;
+ ParseAndCheckBoxSizes(context, boxes, &num_boxes);
+ CheckScoreSizes(context, num_boxes, scores);
+ if (!context->status().ok()) {
+ return;
+ }
+ auto suppress_check_fn = CreateIOUSuppressCheckFn(boxes, iou_threshold_val);
+
+ DoNonMaxSuppressionOp(context, scores, num_boxes, max_output_size,
+ score_threshold_val, suppress_check_fn);
+ }
+};
+
+template <typename Device>
+class NonMaxSuppressionWithOverlapsOp : public OpKernel {
+ public:
+ explicit NonMaxSuppressionWithOverlapsOp(OpKernelConstruction* context)
+ : OpKernel(context) {}
+
+ void Compute(OpKernelContext* context) override {
+ // overlaps: [num_boxes, num_boxes]
+ const Tensor& overlaps = context->input(0);
+ // scores: [num_boxes]
+ const Tensor& scores = context->input(1);
+ // max_output_size: scalar
+ const Tensor& max_output_size = context->input(2);
+ OP_REQUIRES(
+ context, TensorShapeUtils::IsScalar(max_output_size.shape()),
+ errors::InvalidArgument("max_output_size must be 0-D, got shape ",
+ max_output_size.shape().DebugString()));
+ // overlap_threshold: scalar
+ const Tensor& overlap_threshold = context->input(3);
+ OP_REQUIRES(
+ context, TensorShapeUtils::IsScalar(overlap_threshold.shape()),
+ errors::InvalidArgument("overlap_threshold must be 0-D, got shape ",
+ overlap_threshold.shape().DebugString()));
+ const float overlap_threshold_val = overlap_threshold.scalar<float>()();
+
+ // score_threshold: scalar
+ const Tensor& score_threshold = context->input(4);
+ OP_REQUIRES(
+ context, TensorShapeUtils::IsScalar(score_threshold.shape()),
+ errors::InvalidArgument("score_threshold must be 0-D, got shape ",
+ score_threshold.shape().DebugString()));
+ const float score_threshold_val = score_threshold.scalar<float>()();
+
+ int num_boxes = 0;
+ ParseAndCheckOverlapSizes(context, overlaps, &num_boxes);
+ CheckScoreSizes(context, num_boxes, scores);
+ if (!context->status().ok()) {
+ return;
+ }
+ auto suppress_check_fn =
+ CreateOverlapsSuppressCheckFn(overlaps, overlap_threshold_val);
+
+ DoNonMaxSuppressionOp(context, scores, num_boxes, max_output_size,
+ score_threshold_val, suppress_check_fn);
}
};
@@ -259,4 +365,8 @@ REGISTER_KERNEL_BUILDER(Name("NonMaxSuppressionV2").Device(DEVICE_CPU),
REGISTER_KERNEL_BUILDER(Name("NonMaxSuppressionV3").Device(DEVICE_CPU),
NonMaxSuppressionV3Op<CPUDevice>);
+REGISTER_KERNEL_BUILDER(
+ Name("NonMaxSuppressionWithOverlaps").Device(DEVICE_CPU),
+ NonMaxSuppressionWithOverlapsOp<CPUDevice>);
+
} // namespace tensorflow
diff --git a/tensorflow/core/kernels/non_max_suppression_op_test.cc b/tensorflow/core/kernels/non_max_suppression_op_test.cc
index ed7db313bd..055161a35f 100644
--- a/tensorflow/core/kernels/non_max_suppression_op_test.cc
+++ b/tensorflow/core/kernels/non_max_suppression_op_test.cc
@@ -569,4 +569,241 @@ TEST_F(NonMaxSuppressionV3OpTest, TestEmptyInput) {
test::ExpectTensorEqual<int>(expected, *GetOutput(0));
}
+//
+// NonMaxSuppressionWithOverlapsOp Tests
+//
+
+class NonMaxSuppressionWithOverlapsOpTest : public OpsTestBase {
+ protected:
+ void MakeOp() {
+ TF_EXPECT_OK(NodeDefBuilder("non_max_suppression_op",
+ "NonMaxSuppressionWithOverlaps")
+ .Input(FakeInput(DT_FLOAT))
+ .Input(FakeInput(DT_FLOAT))
+ .Input(FakeInput(DT_INT32))
+ .Input(FakeInput(DT_FLOAT))
+ .Input(FakeInput(DT_FLOAT))
+ .Finalize(node_def()));
+ TF_EXPECT_OK(InitOp());
+ }
+
+ void AddIoUInput(const std::vector<float>& boxes) {
+ ASSERT_EQ((boxes.size() % 4), 0);
+ size_t num_boxes = boxes.size() / 4;
+ std::vector<float> iou_overlaps(num_boxes * num_boxes);
+
+ // compute the pairwise IoU overlaps
+ auto corner_access = [&boxes](size_t box_idx, size_t corner_idx) {
+ return boxes[box_idx * 4 + corner_idx];
+ };
+ for (size_t i = 0; i < num_boxes; ++i) {
+ for (size_t j = 0; j < num_boxes; ++j) {
+ const float ymin_i =
+ std::min<float>(corner_access(i, 0), corner_access(i, 2));
+ const float xmin_i =
+ std::min<float>(corner_access(i, 1), corner_access(i, 3));
+ const float ymax_i =
+ std::max<float>(corner_access(i, 0), corner_access(i, 2));
+ const float xmax_i =
+ std::max<float>(corner_access(i, 1), corner_access(i, 3));
+ const float ymin_j =
+ std::min<float>(corner_access(j, 0), corner_access(j, 2));
+ const float xmin_j =
+ std::min<float>(corner_access(j, 1), corner_access(j, 3));
+ const float ymax_j =
+ std::max<float>(corner_access(j, 0), corner_access(j, 2));
+ const float xmax_j =
+ std::max<float>(corner_access(j, 1), corner_access(j, 3));
+ const float area_i = (ymax_i - ymin_i) * (xmax_i - xmin_i);
+ const float area_j = (ymax_j - ymin_j) * (xmax_j - xmin_j);
+
+ float iou;
+ if (area_i <= 0 || area_j <= 0) {
+ iou = 0.0;
+ } else {
+ const float intersection_ymin = std::max<float>(ymin_i, ymin_j);
+ const float intersection_xmin = std::max<float>(xmin_i, xmin_j);
+ const float intersection_ymax = std::min<float>(ymax_i, ymax_j);
+ const float intersection_xmax = std::min<float>(xmax_i, xmax_j);
+ const float intersection_area =
+ std::max<float>(intersection_ymax - intersection_ymin, 0.0) *
+ std::max<float>(intersection_xmax - intersection_xmin, 0.0);
+ iou = intersection_area / (area_i + area_j - intersection_area);
+ }
+ iou_overlaps[i * num_boxes + j] = iou;
+ }
+ }
+
+ AddInputFromArray<float>(TensorShape({static_cast<signed>(num_boxes),
+ static_cast<signed>(num_boxes)}),
+ iou_overlaps);
+ }
+};
+
+TEST_F(NonMaxSuppressionWithOverlapsOpTest, TestSelectFromThreeClusters) {
+ MakeOp();
+ AddIoUInput({0, 0, 1, 1, 0, 0.1f, 1, 1.1f, 0, -0.1f, 1, 0.9f,
+ 0, 10, 1, 11, 0, 10.1f, 1, 11.1f, 0, 100, 1, 101});
+ AddInputFromArray<float>(TensorShape({6}), {.9f, .75f, .6f, .95f, .5f, .3f});
+ AddInputFromArray<int>(TensorShape({}), {3});
+ AddInputFromArray<float>(TensorShape({}), {.5f});
+ AddInputFromArray<float>(TensorShape({}), {0.0f});
+ TF_ASSERT_OK(RunOpKernel());
+
+ Tensor expected(allocator(), DT_INT32, TensorShape({3}));
+ test::FillValues<int>(&expected, {3, 0, 5});
+ test::ExpectTensorEqual<int>(expected, *GetOutput(0));
+}
+
+TEST_F(NonMaxSuppressionWithOverlapsOpTest,
+ TestSelectFromThreeClustersFlippedCoordinates) {
+ MakeOp();
+ AddIoUInput({1, 1, 0, 0, 0, 0.1f, 1, 1.1f, 0, .9f, 1, -0.1f,
+ 0, 10, 1, 11, 1, 10.1f, 0, 11.1f, 1, 101, 0, 100});
+ AddInputFromArray<float>(TensorShape({6}), {.9f, .75f, .6f, .95f, .5f, .3f});
+ AddInputFromArray<int>(TensorShape({}), {3});
+ AddInputFromArray<float>(TensorShape({}), {.5f});
+ AddInputFromArray<float>(TensorShape({}), {0.0f});
+ TF_ASSERT_OK(RunOpKernel());
+
+ Tensor expected(allocator(), DT_INT32, TensorShape({3}));
+ test::FillValues<int>(&expected, {3, 0, 5});
+ test::ExpectTensorEqual<int>(expected, *GetOutput(0));
+}
+
+TEST_F(NonMaxSuppressionWithOverlapsOpTest,
+ TestSelectAtMostTwoBoxesFromThreeClusters) {
+ MakeOp();
+ AddIoUInput({0, 0, 1, 1, 0, 0.1f, 1, 1.1f, 0, -0.1f, 1, 0.9f,
+ 0, 10, 1, 11, 0, 10.1f, 1, 11.1f, 0, 100, 1, 101});
+ AddInputFromArray<float>(TensorShape({6}), {.9f, .75f, .6f, .95f, .5f, .3f});
+ AddInputFromArray<int>(TensorShape({}), {2});
+ AddInputFromArray<float>(TensorShape({}), {.5f});
+ AddInputFromArray<float>(TensorShape({}), {0.0f});
+ TF_ASSERT_OK(RunOpKernel());
+
+ Tensor expected(allocator(), DT_INT32, TensorShape({2}));
+ test::FillValues<int>(&expected, {3, 0});
+ test::ExpectTensorEqual<int>(expected, *GetOutput(0));
+}
+
+TEST_F(NonMaxSuppressionWithOverlapsOpTest,
+ TestSelectAtMostThirtyBoxesFromThreeClusters) {
+ MakeOp();
+ AddIoUInput({0, 0, 1, 1, 0, 0.1f, 1, 1.1f, 0, -0.1f, 1, 0.9f,
+ 0, 10, 1, 11, 0, 10.1f, 1, 11.1f, 0, 100, 1, 101});
+ AddInputFromArray<float>(TensorShape({6}), {.9f, .75f, .6f, .95f, .5f, .3f});
+ AddInputFromArray<int>(TensorShape({}), {30});
+ AddInputFromArray<float>(TensorShape({}), {.5f});
+ AddInputFromArray<float>(TensorShape({}), {0.0f});
+ TF_ASSERT_OK(RunOpKernel());
+
+ Tensor expected(allocator(), DT_INT32, TensorShape({3}));
+ test::FillValues<int>(&expected, {3, 0, 5});
+ test::ExpectTensorEqual<int>(expected, *GetOutput(0));
+}
+
+TEST_F(NonMaxSuppressionWithOverlapsOpTest, TestSelectSingleBox) {
+ MakeOp();
+ AddIoUInput({0, 0, 1, 1});
+ AddInputFromArray<float>(TensorShape({1}), {.9f});
+ AddInputFromArray<int>(TensorShape({}), {3});
+ AddInputFromArray<float>(TensorShape({}), {.5f});
+ AddInputFromArray<float>(TensorShape({}), {0.0f});
+ TF_ASSERT_OK(RunOpKernel());
+
+ Tensor expected(allocator(), DT_INT32, TensorShape({1}));
+ test::FillValues<int>(&expected, {0});
+ test::ExpectTensorEqual<int>(expected, *GetOutput(0));
+}
+
+TEST_F(NonMaxSuppressionWithOverlapsOpTest, TestSelectFromTenIdenticalBoxes) {
+ MakeOp();
+
+ int num_boxes = 10;
+ std::vector<float> corners(num_boxes * 4);
+ std::vector<float> scores(num_boxes);
+ for (int i = 0; i < num_boxes; ++i) {
+ corners[i * 4 + 0] = 0;
+ corners[i * 4 + 1] = 0;
+ corners[i * 4 + 2] = 1;
+ corners[i * 4 + 3] = 1;
+ scores[i] = .9;
+ }
+ AddIoUInput(corners);
+ AddInputFromArray<float>(TensorShape({num_boxes}), scores);
+ AddInputFromArray<int>(TensorShape({}), {3});
+ AddInputFromArray<float>(TensorShape({}), {.5f});
+ AddInputFromArray<float>(TensorShape({}), {0.0f});
+ TF_ASSERT_OK(RunOpKernel());
+
+ Tensor expected(allocator(), DT_INT32, TensorShape({1}));
+ test::FillValues<int>(&expected, {0});
+ test::ExpectTensorEqual<int>(expected, *GetOutput(0));
+}
+
+TEST_F(NonMaxSuppressionWithOverlapsOpTest, TestInconsistentBoxAndScoreShapes) {
+ MakeOp();
+ AddIoUInput({0, 0, 1, 1, 0, 0.1f, 1, 1.1f, 0, -0.1f, 1, 0.9f,
+ 0, 10, 1, 11, 0, 10.1f, 1, 11.1f, 0, 100, 1, 101});
+ AddInputFromArray<float>(TensorShape({5}), {.9f, .75f, .6f, .95f, .5f});
+ AddInputFromArray<int>(TensorShape({}), {30});
+ AddInputFromArray<float>(TensorShape({}), {.5f});
+ AddInputFromArray<float>(TensorShape({}), {0.0f});
+ Status s = RunOpKernel();
+
+ ASSERT_FALSE(s.ok());
+ EXPECT_TRUE(
+ str_util::StrContains(s.ToString(), "scores has incompatible shape"))
+ << s;
+}
+
+TEST_F(NonMaxSuppressionWithOverlapsOpTest, TestInvalidOverlapsShape) {
+ MakeOp();
+ AddInputFromArray<float>(TensorShape({2, 3}), {0, 0, 0, 0, 0, 0});
+ AddInputFromArray<float>(TensorShape({2}), {0.5f, 0.5f});
+ AddInputFromArray<int>(TensorShape({}), {30});
+ AddInputFromArray<float>(TensorShape({}), {0.f});
+ AddInputFromArray<float>(TensorShape({}), {0.0f});
+ Status s = RunOpKernel();
+
+ ASSERT_FALSE(s.ok());
+ EXPECT_TRUE(str_util::StrContains(s.ToString(), "overlaps must be square"))
+ << s;
+}
+
+TEST_F(NonMaxSuppressionWithOverlapsOpTest, TestThresholdGreaterOne) {
+ MakeOp();
+ AddIoUInput({0, 0, 1, 1});
+ AddInputFromArray<float>(TensorShape({1}), {.9f});
+ AddInputFromArray<int>(TensorShape({}), {3});
+ AddInputFromArray<float>(TensorShape({}), {1.2f});
+ AddInputFromArray<float>(TensorShape({}), {0.0f});
+ TF_ASSERT_OK(RunOpKernel());
+}
+
+TEST_F(NonMaxSuppressionWithOverlapsOpTest, TestThresholdSmallerZero) {
+ MakeOp();
+ AddIoUInput({0, 0, 1, 1});
+ AddInputFromArray<float>(TensorShape({1}), {.9f});
+ AddInputFromArray<int>(TensorShape({}), {3});
+ AddInputFromArray<float>(TensorShape({}), {-0.2f});
+ AddInputFromArray<float>(TensorShape({}), {0.0f});
+ TF_ASSERT_OK(RunOpKernel());
+}
+
+TEST_F(NonMaxSuppressionWithOverlapsOpTest, TestEmptyInput) {
+ MakeOp();
+ AddIoUInput({});
+ AddInputFromArray<float>(TensorShape({0}), {});
+ AddInputFromArray<int>(TensorShape({}), {30});
+ AddInputFromArray<float>(TensorShape({}), {.5f});
+ AddInputFromArray<float>(TensorShape({}), {0.0f});
+ TF_ASSERT_OK(RunOpKernel());
+
+ Tensor expected(allocator(), DT_INT32, TensorShape({0}));
+ test::FillValues<int>(&expected, {});
+ test::ExpectTensorEqual<int>(expected, *GetOutput(0));
+}
+
} // namespace tensorflow
diff --git a/tensorflow/core/kernels/pad_op.cc b/tensorflow/core/kernels/pad_op.cc
index 41494f56c5..3b9133ed7e 100644
--- a/tensorflow/core/kernels/pad_op.cc
+++ b/tensorflow/core/kernels/pad_op.cc
@@ -320,7 +320,7 @@ namespace functor {
DECLARE_GPU_SPEC(T, 5); \
DECLARE_GPU_SPEC(T, 6);
-TF_CALL_GPU_NUMBER_TYPES(DECLARE_GPU_SPECS);
+TF_CALL_GPU_ALL_TYPES(DECLARE_GPU_SPECS);
TF_CALL_int8(DECLARE_GPU_SPECS);
} // namespace functor
@@ -353,7 +353,7 @@ TF_CALL_int8(DECLARE_GPU_SPECS);
.HostMemory("constant_values"), \
PadOp<GPUDevice, T, int64>)
-TF_CALL_GPU_NUMBER_TYPES(REGISTER_GPU_KERNEL);
+TF_CALL_GPU_ALL_TYPES(REGISTER_GPU_KERNEL);
TF_CALL_int8(REGISTER_GPU_KERNEL);
// A special GPU kernel for int32.
diff --git a/tensorflow/core/kernels/pad_op_gpu.cu.cc b/tensorflow/core/kernels/pad_op_gpu.cu.cc
index 8e13e19e2e..00ec44adc2 100644
--- a/tensorflow/core/kernels/pad_op_gpu.cu.cc
+++ b/tensorflow/core/kernels/pad_op_gpu.cu.cc
@@ -39,7 +39,7 @@ typedef Eigen::GpuDevice GPUDevice;
DEFINE_GPU_PAD_SPECS(T, int32) \
DEFINE_GPU_PAD_SPECS(T, int64)
-TF_CALL_GPU_NUMBER_TYPES(DEFINE_GPU_SPECS);
+TF_CALL_GPU_ALL_TYPES(DEFINE_GPU_SPECS);
TF_CALL_int8(DEFINE_GPU_SPECS);
} // namespace tensorflow
diff --git a/tensorflow/core/kernels/partitioned_function_ops.cc b/tensorflow/core/kernels/partitioned_function_ops.cc
index b6ee808091..b5c6ba1da3 100644
--- a/tensorflow/core/kernels/partitioned_function_ops.cc
+++ b/tensorflow/core/kernels/partitioned_function_ops.cc
@@ -19,6 +19,7 @@ limitations under the License.
#include "tensorflow/core/framework/graph_to_functiondef.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor.h"
+#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/graph/graph_constructor.h"
#include "tensorflow/core/graph/graph_partition.h"
@@ -42,8 +43,7 @@ namespace {
// TODO(akshayka): Support distributed execution.
class PartitionedCallOp : public AsyncOpKernel {
public:
- explicit PartitionedCallOp(OpKernelConstruction* ctx)
- : AsyncOpKernel(ctx), local_device_name_(ctx->device()->name()) {
+ explicit PartitionedCallOp(OpKernelConstruction* ctx) : AsyncOpKernel(ctx) {
OP_REQUIRES_OK(ctx, ctx->GetAttr("f", &func_));
}
@@ -55,6 +55,9 @@ class PartitionedCallOp : public AsyncOpKernel {
errors::Internal("No function library is provided."),
done);
+ OpInputList args;
+ OP_REQUIRES_OK_ASYNC(ctx, ctx->input_list("args", &args), done);
+
// The function body's graph is placed and partitioned the first time
// `ComputeAsync` is invoked; every subsequent invocation calls each
// of the function shards yielded by partitioning.
@@ -67,16 +70,35 @@ class PartitionedCallOp : public AsyncOpKernel {
// via, e.g., virtual device annotations and a list of device names supplied
// through an attribute.
//
- // TODO(akshayka): Lift the constraint pinning inputs and outputs to the
- // local device.
- //
// TODO(akshayka): Add a fastpath for functions that execute on a single
// device.
{
mutex_lock l(mu_);
- if (!partitioned_) {
- auto graph = tensorflow::MakeUnique<Graph>(OpRegistry::Global());
- OP_REQUIRES_OK_ASYNC(ctx, GetGraphFromFunction(lib, graph.get()), done);
+ if (function_handles_.find(lib) == function_handles_.end()) {
+ if (local_device_name_.empty()) {
+ // The full local device name isn't known at kernel construction
+ // time, hence the need to set it here.
+ local_device_name_ = lib->device()->name();
+ }
+
+ // TODO(b/37549631): Because this kernel may correspond to a stateful
+ // op, it may be shared by multiple subgraphs, which in turn may have
+ // different `FunctionLibraryRuntime` objects and therefore different
+ // `FHandle` namespaces. As such, we partition on a per-FLR basis.
+ FunctionLibraryRuntime::InstantiateOptions opts;
+ FHandle handle;
+ OP_REQUIRES_OK_ASYNC(
+ ctx,
+ lib->Instantiate(func_.name(), AttrSlice(&func_.attr()), opts,
+ &handle),
+ done);
+ const FunctionBody* fbody = lib->GetFunctionBody(handle);
+ OP_REQUIRES_ASYNC(ctx, fbody != nullptr,
+ errors::Internal("Could not find handle ", handle),
+ done);
+ auto graph = tensorflow::MakeUnique<Graph>(fbody->graph->flib_def());
+ CopyGraph(*fbody->graph, graph.get());
+ OP_REQUIRES_OK_ASYNC(ctx, PinResourceArgs(graph.get(), args), done);
DeviceSet device_set;
for (auto d : lib->device_mgr()->ListDevices()) {
@@ -94,9 +116,14 @@ class PartitionedCallOp : public AsyncOpKernel {
// an OpKernel, so functions are instantiated in an overlay library.
overlay_lib_.reset(new FunctionLibraryDefinition(
*lib->GetFunctionLibraryDefinition()));
+ auto handles = tensorflow::MakeUnique<gtl::FlatMap<string, FHandle>>();
for (const auto& pair : subgraphs) {
+ // TODO(akshayka): Fail gracefully if the set of devices corresponds
+ // to more than one address space.
const string& target = pair.first;
const auto& subgraph = pair.second;
+ OP_REQUIRES_OK_ASYNC(
+ ctx, UpdateArgAndRetMetadata(target, subgraph.get()), done);
FunctionDef shard;
string unique_name = UniquifyFunctionName(func_.name());
OP_REQUIRES_OK_ASYNC(
@@ -111,40 +138,38 @@ class PartitionedCallOp : public AsyncOpKernel {
lib->Instantiate(unique_name, AttrSlice(&shard.attr()), opts,
&handle),
done);
- function_handles_.emplace(target, handle);
+ handles->emplace(target, handle);
}
- partitioned_ = true;
+
+ function_handles_.emplace(lib, std::move(handles));
}
}
- ExecuteFunctions(lib, ctx, std::move(done));
+ ExecuteFunctions(lib, ctx, args, std::move(done));
}
private:
typedef std::pair<string, FHandle> DeviceAndFHandle;
+ typedef std::pair<std::vector<int>, std::vector<int>> ArgAndRetIndices;
+ typedef std::pair<std::vector<AllocatorAttributes>,
+ std::vector<AllocatorAttributes>>
+ ArgAndRetAllocAttrs;
- // `func_` encapsulates the original, unsharded function.
- // Copies the graph backing `func_` into `*graph`, pinning the input and
- // output nodes to the local device.
- //
- // `*graph` must be a freshly allocated graph.
- Status GetGraphFromFunction(FunctionLibraryRuntime* lib, Graph* graph) {
- FunctionLibraryRuntime::InstantiateOptions opts;
- FHandle handle;
- TF_RETURN_IF_ERROR(lib->Instantiate(func_.name(), AttrSlice(&func_.attr()),
- opts, &handle));
- const FunctionBody* fbody = lib->GetFunctionBody(handle);
- if (fbody == nullptr) {
- return errors::Internal("Could not find handle ", handle);
- }
- CopyGraph(*fbody->graph, graph);
-
- // Pin the inputs and outputs to the local device to simplify the
- // function-dispatching logic.
+ // Pins each arg that emits a `DT_RESOURCE` tensor to the device on which the
+ // corresponding resource lives. This ensures that the Placer assigns ops that
+ // access these resources to the appropriate devices.
+ Status PinResourceArgs(Graph* graph, const OpInputList& args) {
for (Node* node : graph->op_nodes()) {
string node_type = node->type_string();
- if (node_type == FunctionLibraryDefinition::kArgOp ||
- node_type == FunctionLibraryDefinition::kRetOp) {
- node->set_assigned_device_name(local_device_name_);
+ if (node_type == FunctionLibraryDefinition::kArgOp) {
+ const AttrValue* attr_value;
+ TF_RETURN_IF_ERROR(node->attrs().Find("index", &attr_value));
+ int index = attr_value->i();
+ TF_RETURN_IF_ERROR(node->attrs().Find("T", &attr_value));
+ DataType dtype = attr_value->type();
+ if (dtype == DT_RESOURCE) {
+ ResourceHandle handle = args[index].flat<ResourceHandle>()(0);
+ node->set_assigned_device_name(handle.device());
+ }
}
}
return Status::OK();
@@ -198,9 +223,104 @@ class PartitionedCallOp : public AsyncOpKernel {
return Status::OK();
}
- // Executes the partitioned functions.
+ // Each subgraph produced by partitioning the function body contains a subset
+ // of the original `Arg` and `Retval` nodes. This function performs
+ // bookkeeping to track which `Arg` and `Retval` nodes were placed on a
+ // particular device / subgraph.
+ //
+ // More specifically, this function
+ // (1) rewrites the indices of the `Arg` and `Retval` nodes placed on a
+ // particular device,
+ // (2) records the subsets of `Arg` and `Retval` nodes assigned to the
+ // device, and
+ // (3) records which `Arg` and `Retval` nodes live in host memory.
+ Status UpdateArgAndRetMetadata(const string& device, Graph* subgraph) {
+ if (arg_and_ret_indices_.find(device) != arg_and_ret_indices_.end()) {
+ // This function has already been partitioned, albeit for a different
+ // function library.
+ return Status::OK();
+ }
+
+ ArgAndRetIndices indices;
+ std::vector<int>* arg_indices = &indices.first;
+ std::vector<int>* ret_indices = &indices.second;
+ std::vector<std::pair<Node*, int>> arg_nodes;
+ std::vector<std::pair<Node*, int>> ret_nodes;
+ const AttrValue* attr_value;
+
+ for (Node* node : subgraph->op_nodes()) {
+ string node_type = node->type_string();
+ if (node_type == FunctionLibraryDefinition::kArgOp) {
+ TF_RETURN_IF_ERROR(node->attrs().Find("index", &attr_value));
+ int index = attr_value->i();
+ arg_indices->push_back(index);
+ arg_nodes.push_back(std::make_pair(node, index));
+ } else if (node_type == FunctionLibraryDefinition::kRetOp) {
+ TF_RETURN_IF_ERROR(node->attrs().Find("index", &attr_value));
+ int index = attr_value->i();
+ ret_indices->push_back(index);
+ ret_nodes.push_back(std::make_pair(node, index));
+ }
+ }
+
+ auto sort_by_index = [](std::pair<Node*, int> one,
+ std::pair<Node*, int> two) -> bool {
+ return one.second < two.second;
+ };
+ std::sort(arg_nodes.begin(), arg_nodes.end(), sort_by_index);
+ std::sort(ret_nodes.begin(), ret_nodes.end(), sort_by_index);
+ for (int i = 0; i < arg_nodes.size(); ++i) {
+ Node* arg = arg_nodes[i].first;
+ arg->AddAttr("index", i);
+ TF_RETURN_IF_ERROR(arg->attrs().Find("T", &attr_value));
+ AllocatorAttributes alloc_attr;
+ DataType type = attr_value->type();
+ if (MTypeFromDType(type) == HOST_MEMORY) {
+ alloc_attr.set_on_host(true);
+ }
+ arg_and_ret_alloc_attrs_[device].first.push_back(alloc_attr);
+ }
+ for (int i = 0; i < ret_nodes.size(); ++i) {
+ Node* ret = ret_nodes[i].first;
+ ret->AddAttr("index", i);
+ TF_RETURN_IF_ERROR(ret->attrs().Find("T", &attr_value));
+ AllocatorAttributes alloc_attr;
+ DataType type = attr_value->type();
+ if (MTypeFromDType(type) == HOST_MEMORY) {
+ alloc_attr.set_on_host(true);
+ }
+ arg_and_ret_alloc_attrs_[device].second.push_back(alloc_attr);
+ }
+
+ arg_and_ret_indices_.emplace(device, indices);
+ return Status::OK();
+ }
+
+ std::vector<Tensor> GetArgsForIndices(const std::vector<int>& indices,
+ const OpInputList& arguments) {
+ std::vector<Tensor> args;
+ args.reserve(indices.size());
+ for (int i : indices) {
+ args.push_back(arguments[i]);
+ }
+ return args;
+ }
+
void ExecuteFunctions(FunctionLibraryRuntime* lib, OpKernelContext* ctx,
- DoneCallback done) LOCKS_EXCLUDED(mu_) {
+ const OpInputList& op_args, DoneCallback done)
+ LOCKS_EXCLUDED(mu_) {
+ const gtl::FlatMap<string, FHandle>* handles;
+ {
+ mutex_lock l(mu_);
+ handles = function_handles_[lib].get();
+ }
+ if (handles->empty()) {
+ // Trivial case where the function body is empty.
+ ctx->SetStatus(Status::OK());
+ done();
+ return;
+ }
+
FunctionLibraryRuntime::Options opts;
opts.step_id = ctx->step_id();
opts.step_container = ctx->step_container();
@@ -215,11 +335,6 @@ class PartitionedCallOp : public AsyncOpKernel {
Rendezvous* rendez = new IntraProcessRendezvous(lib->device_mgr());
opts.rendezvous = rendez;
- OpInputList arguments;
- OP_REQUIRES_OK_ASYNC(ctx, ctx->input_list("args", &arguments), done);
- // Dummy args vector for the remote shards, which do not have inputs.
- std::vector<Tensor> dummy_args;
-
StatusCallback callback = std::bind(
[](Rendezvous* rendez, DoneCallback& done, const Status& status) {
rendez->Unref();
@@ -227,48 +342,62 @@ class PartitionedCallOp : public AsyncOpKernel {
},
rendez, std::move(done), std::placeholders::_1);
auto* refcounted_done = new ReffedStatusCallback(std::move(callback));
- for (int i = 1; i < function_handles_.size(); ++i) {
+ for (int i = 1; i < handles->size(); ++i) {
refcounted_done->Ref();
}
- for (const auto& pair : function_handles_) {
- const string& target_device = pair.first;
+ for (const auto& pair : *handles) {
+ const string& target = pair.first;
FHandle handle = pair.second;
- VLOG(3) << "Running function shard on device " << target_device;
- if (target_device == local_device_name_) {
+ VLOG(3) << "Running function shard on device " << target;
+ ArgAndRetIndices indices = arg_and_ret_indices_[target];
+ ArgAndRetAllocAttrs alloc_attrs = arg_and_ret_alloc_attrs_[target];
+ const std::vector<int>& arg_indices = indices.first;
+ const std::vector<int>& ret_indices = indices.second;
+ opts.args_alloc_attrs = alloc_attrs.first;
+ opts.rets_alloc_attrs = alloc_attrs.second;
+ if (target == local_device_name_) {
opts.remote_execution = false;
- std::vector<Tensor> args;
- args.reserve(arguments.size());
- for (const Tensor& argument : arguments) {
- args.push_back(argument);
- }
- auto* rets = new std::vector<Tensor>;
- lib->Run(opts, handle, args, rets,
- [rets, refcounted_done, ctx](const Status& status) {
- if (!status.ok()) {
- ctx->SetStatus(status);
- } else {
- for (int i = 0; i < rets->size(); ++i) {
- ctx->set_output(i, (*rets)[i]);
- }
- }
- delete rets;
- refcounted_done->Unref();
- });
+ std::vector<Tensor> args = GetArgsForIndices(arg_indices, op_args);
+ std::vector<Tensor>* rets = new std::vector<Tensor>;
+ lib->Run(
+ opts, handle, args, rets,
+ [rets, ret_indices, refcounted_done, ctx](const Status& status) {
+ if (!status.ok()) {
+ VLOG(3) << "Local execution failed: " << status;
+ ctx->SetStatus(status);
+ } else {
+ for (int i = 0; i < rets->size(); ++i) {
+ ctx->set_output(ret_indices[i], (*rets)[i]);
+ }
+ }
+ delete rets;
+ VLOG(3) << "Finished local execution.";
+ refcounted_done->Unref();
+ });
} else {
opts.remote_execution = true;
- std::vector<Tensor>* dummy_rets = new std::vector<Tensor>;
- lib->Run(opts, handle, dummy_args, dummy_rets,
- [dummy_rets, refcounted_done, ctx](const Status& status) {
- if (!status.ok()) {
- ctx->SetStatus(status);
- }
- delete dummy_rets;
- refcounted_done->Unref();
- });
+ std::vector<Tensor> args = GetArgsForIndices(arg_indices, op_args);
+ std::vector<Tensor>* rets = new std::vector<Tensor>;
+ lib->Run(
+ opts, handle, args, rets,
+ [rets, ret_indices, refcounted_done, ctx](const Status& status) {
+ if (!status.ok()) {
+ VLOG(3) << "Remote execution failed: " << status;
+ ctx->SetStatus(status);
+ } else {
+ for (int i = 0; i < rets->size(); ++i) {
+ ctx->set_output(ret_indices[i], (*rets)[i]);
+ }
+ }
+ delete rets;
+ VLOG(3) << "Finished remote execution.";
+ refcounted_done->Unref();
+ });
}
}
}
+
string UniquifyFunctionName(const string& name) {
for (;; ++suffix_) {
const string candidate = strings::StrCat(name, "_", suffix_);
@@ -279,26 +408,40 @@ class PartitionedCallOp : public AsyncOpKernel {
}
NameAttrList func_;
- const string local_device_name_;
+ string local_device_name_;
// Function shards are added to `overlay_lib_`.
std::unique_ptr<FunctionLibraryDefinition> overlay_lib_;
- // A map from device names to handles of function shards; this map is
- // read-only after the first execution of the OpKernel.
- gtl::FlatMap<string, FHandle> function_handles_;
+ // Contains maps from device names to handles of function shards, keyed by
+ // FunctionLibraryRuntime pointers. (Because this kernel may be instantiated
+ // for a stateful op, different invocations of it may use different FLRs.)
+ gtl::FlatMap<FunctionLibraryRuntime*,
+ std::unique_ptr<gtl::FlatMap<string, FHandle>>>
+ function_handles_ GUARDED_BY(mu_);
+ // Map from device name to the indices of the arguments and return values
+ // placed on that device. Read-only after the first invocation.
+ gtl::FlatMap<string, ArgAndRetIndices> arg_and_ret_indices_;
+ // Map from device name to alloc attrs for arguments and return values of the
+ // function placed on that device. Read-only after the first invocation.
+ gtl::FlatMap<string, ArgAndRetAllocAttrs> arg_and_ret_alloc_attrs_;
mutex mu_;
- bool partitioned_ GUARDED_BY(mu_) = false;
// Used to uniquify function names in `overlay_lib_`.
uint32 suffix_ = 0;
};
REGISTER_KERNEL_BUILDER(Name("PartitionedCall").Device(DEVICE_CPU),
PartitionedCallOp);
+REGISTER_KERNEL_BUILDER(Name("StatefulPartitionedCall").Device(DEVICE_CPU),
+ PartitionedCallOp);
REGISTER_KERNEL_BUILDER(Name("PartitionedCall").Device(DEVICE_GPU),
PartitionedCallOp);
+REGISTER_KERNEL_BUILDER(Name("StatefulPartitionedCall").Device(DEVICE_GPU),
+ PartitionedCallOp);
#if TENSORFLOW_USE_SYCL
REGISTER_KERNEL_BUILDER(Name("PartitionedCall").Device(DEVICE_SYCL),
PartitionedCallOp);
+REGISTER_KERNEL_BUILDER(Name("StatefulPartitionedCall").Device(DEVICE_SYCL),
+ PartitionedCallOp);
#endif // TENSORFLOW_USE_SYCL
} // namespace
diff --git a/tensorflow/core/kernels/quantize_and_dequantize_op.h b/tensorflow/core/kernels/quantize_and_dequantize_op.h
index 906d507c8a..782263e4e9 100644
--- a/tensorflow/core/kernels/quantize_and_dequantize_op.h
+++ b/tensorflow/core/kernels/quantize_and_dequantize_op.h
@@ -47,9 +47,13 @@ struct QuantizeAndDequantizeOneScaleImpl {
if (!range_given) {
input_min.device(d) = input.minimum();
input_max.device(d) = input.maximum();
+ d.memcpyDeviceToHost(&min_range, input_min.data(), sizeof(T));
+ d.memcpyDeviceToHost(&max_range, input_max.data(), sizeof(T));
+ } else {
+ // Copy the range values from their respective tensors on the host.
+ min_range = input_min_tensor->scalar<T>()();
+ max_range = input_max_tensor->scalar<T>()();
}
- d.memcpyDeviceToHost(&min_range, input_min.data(), sizeof(T));
- d.memcpyDeviceToHost(&max_range, input_max.data(), sizeof(T));
// Calculate the range for the simulated integer quantization:
// e.g. [-128,127] for signed = true, num_bits = 8,
diff --git a/tensorflow/core/kernels/queue_op.cc b/tensorflow/core/kernels/queue_op.cc
new file mode 100644
index 0000000000..53f431ef3c
--- /dev/null
+++ b/tensorflow/core/kernels/queue_op.cc
@@ -0,0 +1,367 @@
+/* Copyright 2015 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#include "tensorflow/core/kernels/queue_op.h"
+#include "tensorflow/core/framework/op_kernel.h"
+#include "tensorflow/core/framework/queue_interface.h"
+#include "tensorflow/core/framework/tensor.h"
+#include "tensorflow/core/framework/tensor_shape.h"
+#include "tensorflow/core/framework/types.h"
+#include "tensorflow/core/lib/core/errors.h"
+#include "tensorflow/core/platform/macros.h"
+#include "tensorflow/core/platform/types.h"
+
+namespace tensorflow {
+
+QueueOp::QueueOp(OpKernelConstruction* context) : ResourceOpKernel(context) {
+ OP_REQUIRES_OK(context, context->GetAttr("capacity", &capacity_));
+ if (capacity_ < 0) {
+ capacity_ = QueueBase::kUnbounded;
+ }
+ OP_REQUIRES_OK(context,
+ context->GetAttr("component_types", &component_types_));
+}
+
+void QueueOp::Compute(OpKernelContext* context) {
+ ResourceOpKernel<QueueInterface>::Compute(context);
+ mutex_lock l(mu_);
+ if (resource_ && context->track_allocations()) {
+ context->record_persistent_memory_allocation(resource_->MemoryUsed());
+ }
+}
+
+Status QueueOp::VerifyResource(QueueInterface* queue) {
+ return queue->MatchesNodeDef(def());
+}
+
+
+QueueOpKernel::QueueOpKernel(OpKernelConstruction* context)
+ : AsyncOpKernel(context) {}
+
+void QueueOpKernel::ComputeAsync(OpKernelContext* ctx, DoneCallback callback) {
+ QueueInterface* queue;
+ if (ctx->input_dtype(0) == DT_RESOURCE) {
+ OP_REQUIRES_OK_ASYNC(
+ ctx, LookupResource(ctx, HandleFromInput(ctx, 0), &queue), callback);
+ } else {
+ OP_REQUIRES_OK_ASYNC(ctx, GetResourceFromContext(ctx, "handle", &queue),
+ callback);
+ }
+ ComputeAsync(ctx, queue, [callback, queue]() {
+ queue->Unref();
+ callback();
+ });
+}
+
+QueueAccessOpKernel::QueueAccessOpKernel(OpKernelConstruction* context)
+ : QueueOpKernel(context) {
+ OP_REQUIRES_OK(context, context->GetAttr("timeout_ms", &timeout_));
+ // TODO(keveman): Enable timeout.
+ OP_REQUIRES(context, timeout_ == -1,
+ errors::InvalidArgument("Timeout not supported yet."));
+}
+
+// Defines an EnqueueOp, the execution of which enqueues a tuple of
+// tensors in the given Queue.
+//
+// The op has 1 + k inputs, where k is the number of components in the
+// tuples stored in the given Queue:
+// - Input 0: queue handle.
+// - Input 1: 0th element of the tuple.
+// - ...
+// - Input (1+k): kth element of the tuple.
+EnqueueOp::EnqueueOp(OpKernelConstruction* context)
+ : QueueAccessOpKernel(context) {}
+
+void EnqueueOp::ComputeAsync(OpKernelContext* ctx, QueueInterface* queue,
+ DoneCallback callback) {
+ DataTypeVector expected_inputs;
+ if (ctx->input_dtype(0) == DT_RESOURCE) {
+ expected_inputs.push_back(DT_RESOURCE);
+ } else {
+ expected_inputs.push_back(DT_STRING_REF);
+ }
+ for (DataType dt : queue->component_dtypes()) {
+ expected_inputs.push_back(dt);
+ }
+ OP_REQUIRES_OK_ASYNC(ctx, ctx->MatchSignature(expected_inputs, {}), callback);
+
+ QueueInterface::Tuple tuple;
+ OpInputList components;
+ OP_REQUIRES_OK_ASYNC(ctx, ctx->input_list("components", &components),
+ callback);
+ for (const Tensor& Tcomponent : components) {
+ tuple.push_back(Tcomponent);
+ }
+
+ OP_REQUIRES_OK_ASYNC(ctx, queue->ValidateTuple(tuple), callback);
+ queue->TryEnqueue(tuple, ctx, callback);
+}
+
+// Defines an EnqueueManyOp, the execution of which slices each
+// component of a tuple of tensors along the 0th dimension, and
+// enqueues tuples of slices in the given Queue.
+//
+// The op has 1 + k inputs, where k is the number of components in the
+// tuples stored in the given Queue:
+// - Input 0: queue handle.
+// - Input 1: 0th element of the tuple.
+// - ...
+// - Input (1+k): kth element of the tuple.
+//
+// N.B. All tuple components must have the same size in the 0th
+// dimension.
+EnqueueManyOp::EnqueueManyOp(OpKernelConstruction* context)
+ : QueueAccessOpKernel(context) {}
+
+void EnqueueManyOp::ComputeAsync(OpKernelContext* ctx, QueueInterface* queue,
+ DoneCallback callback) {
+ DataTypeVector expected_inputs;
+ if (ctx->input_dtype(0) == DT_RESOURCE) {
+ expected_inputs.push_back(DT_RESOURCE);
+ } else {
+ expected_inputs.push_back(DT_STRING_REF);
+ }
+ for (DataType dt : queue->component_dtypes()) {
+ expected_inputs.push_back(dt);
+ }
+ OP_REQUIRES_OK_ASYNC(ctx, ctx->MatchSignature(expected_inputs, {}), callback);
+
+ QueueInterface::Tuple tuple;
+ OpInputList components;
+ OP_REQUIRES_OK_ASYNC(ctx, ctx->input_list("components", &components),
+ callback);
+ for (const Tensor& Tcomponent : components) {
+ tuple.push_back(Tcomponent);
+ }
+
+ OP_REQUIRES_OK_ASYNC(ctx, queue->ValidateManyTuple(tuple), callback);
+ queue->TryEnqueueMany(tuple, ctx, callback);
+}
+
+EnqueueManyOp::~EnqueueManyOp() = default;
+
+// Defines a DequeueOp, the execution of which dequeues a tuple of
+// tensors from the given Queue.
+//
+// The op has one input, which is the handle of the appropriate
+// Queue. The op has k outputs, where k is the number of components in
+// the tuples stored in the given Queue, and output i is the ith
+// component of the dequeued tuple.
+DequeueOp::DequeueOp(OpKernelConstruction* context)
+ : QueueAccessOpKernel(context) {}
+
+void DequeueOp::ComputeAsync(OpKernelContext* ctx, QueueInterface* queue,
+ DoneCallback callback) {
+ if (ctx->input_dtype(0) == DT_RESOURCE) {
+ OP_REQUIRES_OK_ASYNC(
+ ctx, ctx->MatchSignature({DT_RESOURCE}, queue->component_dtypes()),
+ callback);
+ } else {
+ OP_REQUIRES_OK_ASYNC(
+ ctx, ctx->MatchSignature({DT_STRING_REF}, queue->component_dtypes()),
+ callback);
+ }
+
+ queue->TryDequeue(ctx, [ctx, callback](const QueueInterface::Tuple& tuple) {
+ if (!ctx->status().ok()) {
+ callback();
+ return;
+ }
+ OpOutputList output_components;
+ OP_REQUIRES_OK_ASYNC(
+ ctx, ctx->output_list("components", &output_components), callback);
+ for (int i = 0; i < ctx->num_outputs(); ++i) {
+ output_components.set(i, tuple[i]);
+ }
+ callback();
+ });
+}
+
+DequeueOp::~DequeueOp() = default;
+
+// Defines a DequeueManyOp, the execution of which concatenates the
+// requested number of elements from the given Queue along the 0th
+// dimension, and emits the result as a single tuple of tensors.
+//
+// The op has two inputs:
+// - Input 0: the handle to a queue.
+// - Input 1: the number of elements to dequeue.
+//
+// The op has k outputs, where k is the number of components in the
+// tuples stored in the given Queue, and output i is the ith component
+// of the dequeued tuple.
+DequeueManyOp::DequeueManyOp(OpKernelConstruction* context)
+ : QueueAccessOpKernel(context) {}
+
+void DequeueManyOp::ComputeAsync(OpKernelContext* ctx, QueueInterface* queue,
+ DoneCallback callback) {
+ const Tensor& Tnum_elements = ctx->input(1);
+ int32 num_elements = Tnum_elements.flat<int32>()(0);
+
+ OP_REQUIRES_ASYNC(ctx, num_elements >= 0,
+ errors::InvalidArgument("DequeueManyOp requested ",
+ num_elements, " < 0 elements"),
+ callback);
+
+ if (ctx->input_dtype(0) == DT_RESOURCE) {
+ OP_REQUIRES_OK_ASYNC(
+ ctx,
+ ctx->MatchSignature({DT_RESOURCE, DT_INT32}, queue->component_dtypes()),
+ callback);
+ } else {
+ OP_REQUIRES_OK_ASYNC(ctx,
+ ctx->MatchSignature({DT_STRING_REF, DT_INT32},
+ queue->component_dtypes()),
+ callback);
+ }
+
+ queue->TryDequeueMany(
+ num_elements, ctx, false /* allow_small_batch */,
+ [ctx, callback](const QueueInterface::Tuple& tuple) {
+ if (!ctx->status().ok()) {
+ callback();
+ return;
+ }
+ OpOutputList output_components;
+ OP_REQUIRES_OK_ASYNC(
+ ctx, ctx->output_list("components", &output_components), callback);
+ for (int i = 0; i < ctx->num_outputs(); ++i) {
+ output_components.set(i, tuple[i]);
+ }
+ callback();
+ });
+}
+
+DequeueManyOp::~DequeueManyOp() = default;
+
+// Defines a DequeueUpToOp, the execution of which concatenates the
+// requested number of elements from the given Queue along the 0th
+// dimension, and emits the result as a single tuple of tensors.
+//
+// The difference between this op and DequeueMany is the handling when
+// the Queue is closed. While the DequeueMany op will return if there
+// an error when there are less than num_elements elements left in the
+// closed queue, this op will return between 1 and
+// min(num_elements, elements_remaining_in_queue), and will not block.
+// If there are no elements left, then the standard DequeueMany error
+// is returned.
+//
+// This op only works if the underlying Queue implementation accepts
+// the allow_small_batch = true parameter to TryDequeueMany.
+// If it does not, an errors::Unimplemented exception is returned.
+//
+// The op has two inputs:
+// - Input 0: the handle to a queue.
+// - Input 1: the number of elements to dequeue.
+//
+// The op has k outputs, where k is the number of components in the
+// tuples stored in the given Queue, and output i is the ith component
+// of the dequeued tuple.
+//
+// The op has one attribute: allow_small_batch. If the Queue supports
+// it, setting this to true causes the queue to return smaller
+// (possibly zero length) batches when it is closed, up to however
+// many elements are available when the op executes. In this case,
+// the Queue does not block when closed.
+DequeueUpToOp::DequeueUpToOp(OpKernelConstruction* context)
+ : QueueAccessOpKernel(context) {}
+
+void DequeueUpToOp::ComputeAsync(OpKernelContext* ctx, QueueInterface* queue,
+ DoneCallback callback) {
+ const Tensor& Tnum_elements = ctx->input(1);
+ int32 num_elements = Tnum_elements.flat<int32>()(0);
+
+ OP_REQUIRES_ASYNC(ctx, num_elements >= 0,
+ errors::InvalidArgument("DequeueUpToOp requested ",
+ num_elements, " < 0 elements"),
+ callback);
+
+ if (ctx->input_dtype(0) == DT_RESOURCE) {
+ OP_REQUIRES_OK_ASYNC(
+ ctx,
+ ctx->MatchSignature({DT_RESOURCE, DT_INT32}, queue->component_dtypes()),
+ callback);
+ } else {
+ OP_REQUIRES_OK_ASYNC(ctx,
+ ctx->MatchSignature({DT_STRING_REF, DT_INT32},
+ queue->component_dtypes()),
+ callback);
+ }
+
+ queue->TryDequeueMany(
+ num_elements, ctx, true /* allow_small_batch */,
+ [ctx, callback](const QueueInterface::Tuple& tuple) {
+ if (!ctx->status().ok()) {
+ callback();
+ return;
+ }
+ OpOutputList output_components;
+ OP_REQUIRES_OK_ASYNC(
+ ctx, ctx->output_list("components", &output_components), callback);
+ for (int i = 0; i < ctx->num_outputs(); ++i) {
+ output_components.set(i, tuple[i]);
+ }
+ callback();
+ });
+}
+
+DequeueUpToOp::~DequeueUpToOp() = default;
+
+// Defines a QueueCloseOp, which closes the given Queue. Closing a
+// Queue signals that no more elements will be enqueued in it.
+//
+// The op has one input, which is the handle of the appropriate Queue.
+QueueCloseOp::QueueCloseOp(OpKernelConstruction* context)
+ : QueueOpKernel(context) {
+ OP_REQUIRES_OK(context, context->GetAttr("cancel_pending_enqueues",
+ &cancel_pending_enqueues_));
+}
+
+void QueueCloseOp::ComputeAsync(OpKernelContext* ctx, QueueInterface* queue,
+ DoneCallback callback) {
+ queue->Close(ctx, cancel_pending_enqueues_, callback);
+}
+
+// Defines a QueueSizeOp, which computes the number of elements in the
+// given Queue, and emits it as an output tensor.
+//
+// The op has one input, which is the handle of the appropriate Queue;
+// and one output, which is a single-element tensor containing the current
+// size of that Queue.
+QueueSizeOp::QueueSizeOp(OpKernelConstruction* context)
+ : QueueOpKernel(context) {}
+
+void QueueSizeOp::ComputeAsync(OpKernelContext* ctx, QueueInterface* queue,
+ DoneCallback callback) {
+ Tensor* Tqueue_size = nullptr;
+ OP_REQUIRES_OK(ctx, ctx->allocate_output(0, TensorShape({}), &Tqueue_size));
+ Tqueue_size->flat<int32>().setConstant(queue->size());
+ callback();
+}
+
+QueueIsClosedOp::QueueIsClosedOp(OpKernelConstruction* context)
+ : QueueOpKernel(context) {}
+
+void QueueIsClosedOp::ComputeAsync(OpKernelContext* ctx, QueueInterface* queue,
+ DoneCallback callback) {
+ Tensor* Tqueue_is_closed = nullptr;
+ OP_REQUIRES_OK(ctx,
+ ctx->allocate_output(0, TensorShape({}), &Tqueue_is_closed));
+ Tqueue_is_closed->flat<bool>().setConstant(queue->is_closed());
+ callback();
+}
+
+} // namespace tensorflow
diff --git a/tensorflow/core/kernels/queue_op.h b/tensorflow/core/kernels/queue_op.h
index 6c19f9841c..2efd838a5f 100644
--- a/tensorflow/core/kernels/queue_op.h
+++ b/tensorflow/core/kernels/queue_op.h
@@ -13,12 +13,13 @@ See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
-#ifndef TENSORFLOW_KERNELS_QUEUE_OP_H_
-#define TENSORFLOW_KERNELS_QUEUE_OP_H_
+#ifndef TENSORFLOW_CORE_KERNELS_QUEUE_OP_H_
+#define TENSORFLOW_CORE_KERNELS_QUEUE_OP_H_
#include <deque>
#include "tensorflow/core/framework/op_kernel.h"
+#include "tensorflow/core/framework/queue_interface.h"
#include "tensorflow/core/framework/resource_op_kernel.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/types.h"
@@ -32,22 +33,9 @@ namespace tensorflow {
// Defines a QueueOp, an abstract class for Queue construction ops.
class QueueOp : public ResourceOpKernel<QueueInterface> {
public:
- QueueOp(OpKernelConstruction* context) : ResourceOpKernel(context) {
- OP_REQUIRES_OK(context, context->GetAttr("capacity", &capacity_));
- if (capacity_ < 0) {
- capacity_ = QueueBase::kUnbounded;
- }
- OP_REQUIRES_OK(context,
- context->GetAttr("component_types", &component_types_));
- }
+ QueueOp(OpKernelConstruction* context);
- void Compute(OpKernelContext* context) override {
- ResourceOpKernel<QueueInterface>::Compute(context);
- mutex_lock l(mu_);
- if (resource_ && context->track_allocations()) {
- context->record_persistent_memory_allocation(resource_->MemoryUsed());
- }
- }
+ void Compute(OpKernelContext* context) override;
protected:
// Variables accessible by subclasses
@@ -55,9 +43,7 @@ class QueueOp : public ResourceOpKernel<QueueInterface> {
DataTypeVector component_types_;
private:
- Status VerifyResource(QueueInterface* queue) override {
- return queue->MatchesNodeDef(def());
- }
+ Status VerifyResource(QueueInterface* queue) override;
};
class TypedQueueOp : public QueueOp {
@@ -75,6 +61,211 @@ class TypedQueueOp : public QueueOp {
}
};
+// Queue manipulator kernels
+
+class QueueOpKernel : public AsyncOpKernel {
+ public:
+ explicit QueueOpKernel(OpKernelConstruction* context);
+
+ void ComputeAsync(OpKernelContext* ctx, DoneCallback callback) final;
+
+ protected:
+ virtual void ComputeAsync(OpKernelContext* ctx, QueueInterface* queue,
+ DoneCallback callback) = 0;
+};
+
+class QueueAccessOpKernel : public QueueOpKernel {
+ public:
+ explicit QueueAccessOpKernel(OpKernelConstruction* context);
+
+ protected:
+ int64 timeout_;
+};
+
+// Defines an EnqueueOp, the execution of which enqueues a tuple of
+// tensors in the given Queue.
+//
+// The op has 1 + k inputs, where k is the number of components in the
+// tuples stored in the given Queue:
+// - Input 0: queue handle.
+// - Input 1: 0th element of the tuple.
+// - ...
+// - Input (1+k): kth element of the tuple.
+class EnqueueOp : public QueueAccessOpKernel {
+ public:
+ explicit EnqueueOp(OpKernelConstruction* context);
+
+ protected:
+ void ComputeAsync(OpKernelContext* ctx, QueueInterface* queue,
+ DoneCallback callback) override;
+
+ private:
+ TF_DISALLOW_COPY_AND_ASSIGN(EnqueueOp);
+};
+
+// Defines an EnqueueManyOp, the execution of which slices each
+// component of a tuple of tensors along the 0th dimension, and
+// enqueues tuples of slices in the given Queue.
+//
+// The op has 1 + k inputs, where k is the number of components in the
+// tuples stored in the given Queue:
+// - Input 0: queue handle.
+// - Input 1: 0th element of the tuple.
+// - ...
+// - Input (1+k): kth element of the tuple.
+//
+// N.B. All tuple components must have the same size in the 0th
+// dimension.
+class EnqueueManyOp : public QueueAccessOpKernel {
+ public:
+ explicit EnqueueManyOp(OpKernelConstruction* context);
+
+ protected:
+ void ComputeAsync(OpKernelContext* ctx, QueueInterface* queue,
+ DoneCallback callback) override;
+
+ ~EnqueueManyOp() override;
+
+ private:
+ TF_DISALLOW_COPY_AND_ASSIGN(EnqueueManyOp);
+};
+
+// Defines a DequeueOp, the execution of which dequeues a tuple of
+// tensors from the given Queue.
+//
+// The op has one input, which is the handle of the appropriate
+// Queue. The op has k outputs, where k is the number of components in
+// the tuples stored in the given Queue, and output i is the ith
+// component of the dequeued tuple.
+class DequeueOp : public QueueAccessOpKernel {
+ public:
+ explicit DequeueOp(OpKernelConstruction* context);
+
+ protected:
+ void ComputeAsync(OpKernelContext* ctx, QueueInterface* queue,
+ DoneCallback callback) override;
+
+ ~DequeueOp() override;
+
+ private:
+ TF_DISALLOW_COPY_AND_ASSIGN(DequeueOp);
+};
+
+// Defines a DequeueManyOp, the execution of which concatenates the
+// requested number of elements from the given Queue along the 0th
+// dimension, and emits the result as a single tuple of tensors.
+//
+// The op has two inputs:
+// - Input 0: the handle to a queue.
+// - Input 1: the number of elements to dequeue.
+//
+// The op has k outputs, where k is the number of components in the
+// tuples stored in the given Queue, and output i is the ith component
+// of the dequeued tuple.
+class DequeueManyOp : public QueueAccessOpKernel {
+ public:
+ explicit DequeueManyOp(OpKernelConstruction* context);
+
+ protected:
+ void ComputeAsync(OpKernelContext* ctx, QueueInterface* queue,
+ DoneCallback callback) override;
+
+ ~DequeueManyOp() override;
+
+ private:
+ TF_DISALLOW_COPY_AND_ASSIGN(DequeueManyOp);
+};
+
+// Defines a DequeueUpToOp, the execution of which concatenates the
+// requested number of elements from the given Queue along the 0th
+// dimension, and emits the result as a single tuple of tensors.
+//
+// The difference between this op and DequeueMany is the handling when
+// the Queue is closed. While the DequeueMany op will return if there
+// an error when there are less than num_elements elements left in the
+// closed queue, this op will return between 1 and
+// min(num_elements, elements_remaining_in_queue), and will not block.
+// If there are no elements left, then the standard DequeueMany error
+// is returned.
+//
+// This op only works if the underlying Queue implementation accepts
+// the allow_small_batch = true parameter to TryDequeueMany.
+// If it does not, an errors::Unimplemented exception is returned.
+//
+// The op has two inputs:
+// - Input 0: the handle to a queue.
+// - Input 1: the number of elements to dequeue.
+//
+// The op has k outputs, where k is the number of components in the
+// tuples stored in the given Queue, and output i is the ith component
+// of the dequeued tuple.
+//
+// The op has one attribute: allow_small_batch. If the Queue supports
+// it, setting this to true causes the queue to return smaller
+// (possibly zero length) batches when it is closed, up to however
+// many elements are available when the op executes. In this case,
+// the Queue does not block when closed.
+class DequeueUpToOp : public QueueAccessOpKernel {
+ public:
+ explicit DequeueUpToOp(OpKernelConstruction* context);
+
+ protected:
+ void ComputeAsync(OpKernelContext* ctx, QueueInterface* queue,
+ DoneCallback callback) override;
+
+ ~DequeueUpToOp() override;
+
+ private:
+ TF_DISALLOW_COPY_AND_ASSIGN(DequeueUpToOp);
+};
+
+// Defines a QueueCloseOp, which closes the given Queue. Closing a
+// Queue signals that no more elements will be enqueued in it.
+//
+// The op has one input, which is the handle of the appropriate Queue.
+class QueueCloseOp : public QueueOpKernel {
+ public:
+ explicit QueueCloseOp(OpKernelConstruction* context);
+
+ protected:
+ void ComputeAsync(OpKernelContext* ctx, QueueInterface* queue,
+ DoneCallback callback) override;
+
+ private:
+ bool cancel_pending_enqueues_;
+ TF_DISALLOW_COPY_AND_ASSIGN(QueueCloseOp);
+};
+
+// Defines a QueueSizeOp, which computes the number of elements in the
+// given Queue, and emits it as an output tensor.
+//
+// The op has one input, which is the handle of the appropriate Queue;
+// and one output, which is a single-element tensor containing the current
+// size of that Queue.
+class QueueSizeOp : public QueueOpKernel {
+ public:
+ explicit QueueSizeOp(OpKernelConstruction* context);
+
+ protected:
+ void ComputeAsync(OpKernelContext* ctx, QueueInterface* queue,
+ DoneCallback callback) override;
+
+ private:
+ TF_DISALLOW_COPY_AND_ASSIGN(QueueSizeOp);
+};
+
+class QueueIsClosedOp : public QueueOpKernel {
+ public:
+ explicit QueueIsClosedOp(OpKernelConstruction* context);
+
+ protected:
+ void ComputeAsync(OpKernelContext* ctx, QueueInterface* queue,
+ DoneCallback callback) override;
+
+ private:
+ TF_DISALLOW_COPY_AND_ASSIGN(QueueIsClosedOp);
+};
+
} // namespace tensorflow
-#endif // TENSORFLOW_KERNELS_QUEUE_OP_H_
+#endif // TENSORFLOW_CORE_KERNELS_QUEUE_OP_H_
diff --git a/tensorflow/core/kernels/queue_ops.cc b/tensorflow/core/kernels/queue_ops.cc
index 46a02854d7..c4d404259b 100644
--- a/tensorflow/core/kernels/queue_ops.cc
+++ b/tensorflow/core/kernels/queue_ops.cc
@@ -13,437 +13,44 @@ See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
-// See docs in ../ops/data_flow_ops.cc.
-
#include "tensorflow/core/framework/op_kernel.h"
-#include "tensorflow/core/framework/queue_interface.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/types.h"
+#include "tensorflow/core/kernels/queue_op.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
-class QueueOpKernel : public AsyncOpKernel {
- public:
- explicit QueueOpKernel(OpKernelConstruction* context)
- : AsyncOpKernel(context) {}
-
- void ComputeAsync(OpKernelContext* ctx, DoneCallback callback) final {
- QueueInterface* queue;
- if (ctx->input_dtype(0) == DT_RESOURCE) {
- OP_REQUIRES_OK_ASYNC(
- ctx, LookupResource(ctx, HandleFromInput(ctx, 0), &queue), callback);
- } else {
- OP_REQUIRES_OK_ASYNC(ctx, GetResourceFromContext(ctx, "handle", &queue),
- callback);
- }
- ComputeAsync(ctx, queue, [callback, queue]() {
- queue->Unref();
- callback();
- });
- }
-
- protected:
- virtual void ComputeAsync(OpKernelContext* ctx, QueueInterface* queue,
- DoneCallback callback) = 0;
-};
-
-class QueueAccessOpKernel : public QueueOpKernel {
- public:
- explicit QueueAccessOpKernel(OpKernelConstruction* context)
- : QueueOpKernel(context) {
- OP_REQUIRES_OK(context, context->GetAttr("timeout_ms", &timeout_));
- // TODO(keveman): Enable timeout.
- OP_REQUIRES(context, timeout_ == -1,
- errors::InvalidArgument("Timeout not supported yet."));
- }
-
- protected:
- int64 timeout_;
-};
-
-// Defines an EnqueueOp, the execution of which enqueues a tuple of
-// tensors in the given Queue.
-//
-// The op has 1 + k inputs, where k is the number of components in the
-// tuples stored in the given Queue:
-// - Input 0: queue handle.
-// - Input 1: 0th element of the tuple.
-// - ...
-// - Input (1+k): kth element of the tuple.
-class EnqueueOp : public QueueAccessOpKernel {
- public:
- explicit EnqueueOp(OpKernelConstruction* context)
- : QueueAccessOpKernel(context) {}
-
- protected:
- void ComputeAsync(OpKernelContext* ctx, QueueInterface* queue,
- DoneCallback callback) override {
- DataTypeVector expected_inputs;
- if (ctx->input_dtype(0) == DT_RESOURCE) {
- expected_inputs.push_back(DT_RESOURCE);
- } else {
- expected_inputs.push_back(DT_STRING_REF);
- }
- for (DataType dt : queue->component_dtypes()) {
- expected_inputs.push_back(dt);
- }
- OP_REQUIRES_OK_ASYNC(ctx, ctx->MatchSignature(expected_inputs, {}),
- callback);
-
- QueueInterface::Tuple tuple;
- OpInputList components;
- OP_REQUIRES_OK_ASYNC(ctx, ctx->input_list("components", &components),
- callback);
- for (const Tensor& Tcomponent : components) {
- tuple.push_back(Tcomponent);
- }
-
- OP_REQUIRES_OK_ASYNC(ctx, queue->ValidateTuple(tuple), callback);
- queue->TryEnqueue(tuple, ctx, callback);
- }
-
- private:
- TF_DISALLOW_COPY_AND_ASSIGN(EnqueueOp);
-};
-
REGISTER_KERNEL_BUILDER(Name("QueueEnqueue").Device(DEVICE_CPU), EnqueueOp);
REGISTER_KERNEL_BUILDER(Name("QueueEnqueueV2").Device(DEVICE_CPU), EnqueueOp);
-// Defines an EnqueueManyOp, the execution of which slices each
-// component of a tuple of tensors along the 0th dimension, and
-// enqueues tuples of slices in the given Queue.
-//
-// The op has 1 + k inputs, where k is the number of components in the
-// tuples stored in the given Queue:
-// - Input 0: queue handle.
-// - Input 1: 0th element of the tuple.
-// - ...
-// - Input (1+k): kth element of the tuple.
-//
-// N.B. All tuple components must have the same size in the 0th
-// dimension.
-class EnqueueManyOp : public QueueAccessOpKernel {
- public:
- explicit EnqueueManyOp(OpKernelConstruction* context)
- : QueueAccessOpKernel(context) {}
-
- protected:
- void ComputeAsync(OpKernelContext* ctx, QueueInterface* queue,
- DoneCallback callback) override {
- DataTypeVector expected_inputs;
- if (ctx->input_dtype(0) == DT_RESOURCE) {
- expected_inputs.push_back(DT_RESOURCE);
- } else {
- expected_inputs.push_back(DT_STRING_REF);
- }
- for (DataType dt : queue->component_dtypes()) {
- expected_inputs.push_back(dt);
- }
- OP_REQUIRES_OK_ASYNC(ctx, ctx->MatchSignature(expected_inputs, {}),
- callback);
-
- QueueInterface::Tuple tuple;
- OpInputList components;
- OP_REQUIRES_OK_ASYNC(ctx, ctx->input_list("components", &components),
- callback);
- for (const Tensor& Tcomponent : components) {
- tuple.push_back(Tcomponent);
- }
-
- OP_REQUIRES_OK_ASYNC(ctx, queue->ValidateManyTuple(tuple), callback);
- queue->TryEnqueueMany(tuple, ctx, callback);
- }
-
- ~EnqueueManyOp() override {}
-
- private:
- TF_DISALLOW_COPY_AND_ASSIGN(EnqueueManyOp);
-};
-
REGISTER_KERNEL_BUILDER(Name("QueueEnqueueMany").Device(DEVICE_CPU),
EnqueueManyOp);
REGISTER_KERNEL_BUILDER(Name("QueueEnqueueManyV2").Device(DEVICE_CPU),
EnqueueManyOp);
-// Defines a DequeueOp, the execution of which dequeues a tuple of
-// tensors from the given Queue.
-//
-// The op has one input, which is the handle of the appropriate
-// Queue. The op has k outputs, where k is the number of components in
-// the tuples stored in the given Queue, and output i is the ith
-// component of the dequeued tuple.
-class DequeueOp : public QueueAccessOpKernel {
- public:
- explicit DequeueOp(OpKernelConstruction* context)
- : QueueAccessOpKernel(context) {}
-
- protected:
- void ComputeAsync(OpKernelContext* ctx, QueueInterface* queue,
- DoneCallback callback) override {
- if (ctx->input_dtype(0) == DT_RESOURCE) {
- OP_REQUIRES_OK_ASYNC(
- ctx, ctx->MatchSignature({DT_RESOURCE}, queue->component_dtypes()),
- callback);
- } else {
- OP_REQUIRES_OK_ASYNC(
- ctx, ctx->MatchSignature({DT_STRING_REF}, queue->component_dtypes()),
- callback);
- }
-
- queue->TryDequeue(ctx, [ctx, callback](const QueueInterface::Tuple& tuple) {
- if (!ctx->status().ok()) {
- callback();
- return;
- }
- OpOutputList output_components;
- OP_REQUIRES_OK_ASYNC(
- ctx, ctx->output_list("components", &output_components), callback);
- for (int i = 0; i < ctx->num_outputs(); ++i) {
- output_components.set(i, tuple[i]);
- }
- callback();
- });
- }
-
- ~DequeueOp() override {}
-
- private:
- TF_DISALLOW_COPY_AND_ASSIGN(DequeueOp);
-};
-
REGISTER_KERNEL_BUILDER(Name("QueueDequeue").Device(DEVICE_CPU), DequeueOp);
REGISTER_KERNEL_BUILDER(Name("QueueDequeueV2").Device(DEVICE_CPU), DequeueOp);
-// Defines a DequeueManyOp, the execution of which concatenates the
-// requested number of elements from the given Queue along the 0th
-// dimension, and emits the result as a single tuple of tensors.
-//
-// The op has two inputs:
-// - Input 0: the handle to a queue.
-// - Input 1: the number of elements to dequeue.
-//
-// The op has k outputs, where k is the number of components in the
-// tuples stored in the given Queue, and output i is the ith component
-// of the dequeued tuple.
-class DequeueManyOp : public QueueAccessOpKernel {
- public:
- explicit DequeueManyOp(OpKernelConstruction* context)
- : QueueAccessOpKernel(context) {}
-
- protected:
- void ComputeAsync(OpKernelContext* ctx, QueueInterface* queue,
- DoneCallback callback) override {
- const Tensor& Tnum_elements = ctx->input(1);
- int32 num_elements = Tnum_elements.flat<int32>()(0);
-
- OP_REQUIRES_ASYNC(ctx, num_elements >= 0,
- errors::InvalidArgument("DequeueManyOp requested ",
- num_elements, " < 0 elements"),
- callback);
-
- if (ctx->input_dtype(0) == DT_RESOURCE) {
- OP_REQUIRES_OK_ASYNC(ctx,
- ctx->MatchSignature({DT_RESOURCE, DT_INT32},
- queue->component_dtypes()),
- callback);
- } else {
- OP_REQUIRES_OK_ASYNC(ctx,
- ctx->MatchSignature({DT_STRING_REF, DT_INT32},
- queue->component_dtypes()),
- callback);
- }
-
- queue->TryDequeueMany(
- num_elements, ctx, false /* allow_small_batch */,
- [ctx, callback](const QueueInterface::Tuple& tuple) {
- if (!ctx->status().ok()) {
- callback();
- return;
- }
- OpOutputList output_components;
- OP_REQUIRES_OK_ASYNC(
- ctx, ctx->output_list("components", &output_components),
- callback);
- for (int i = 0; i < ctx->num_outputs(); ++i) {
- output_components.set(i, tuple[i]);
- }
- callback();
- });
- }
-
- ~DequeueManyOp() override {}
-
- private:
- TF_DISALLOW_COPY_AND_ASSIGN(DequeueManyOp);
-};
-
REGISTER_KERNEL_BUILDER(Name("QueueDequeueMany").Device(DEVICE_CPU),
DequeueManyOp);
REGISTER_KERNEL_BUILDER(Name("QueueDequeueManyV2").Device(DEVICE_CPU),
DequeueManyOp);
-// Defines a DequeueUpToOp, the execution of which concatenates the
-// requested number of elements from the given Queue along the 0th
-// dimension, and emits the result as a single tuple of tensors.
-//
-// The difference between this op and DequeueMany is the handling when
-// the Queue is closed. While the DequeueMany op will return if there
-// an error when there are less than num_elements elements left in the
-// closed queue, this op will return between 1 and
-// min(num_elements, elements_remaining_in_queue), and will not block.
-// If there are no elements left, then the standard DequeueMany error
-// is returned.
-//
-// This op only works if the underlying Queue implementation accepts
-// the allow_small_batch = true parameter to TryDequeueMany.
-// If it does not, an errors::Unimplemented exception is returned.
-//
-// The op has two inputs:
-// - Input 0: the handle to a queue.
-// - Input 1: the number of elements to dequeue.
-//
-// The op has k outputs, where k is the number of components in the
-// tuples stored in the given Queue, and output i is the ith component
-// of the dequeued tuple.
-//
-// The op has one attribute: allow_small_batch. If the Queue supports
-// it, setting this to true causes the queue to return smaller
-// (possibly zero length) batches when it is closed, up to however
-// many elements are available when the op executes. In this case,
-// the Queue does not block when closed.
-class DequeueUpToOp : public QueueAccessOpKernel {
- public:
- explicit DequeueUpToOp(OpKernelConstruction* context)
- : QueueAccessOpKernel(context) {}
-
- protected:
- void ComputeAsync(OpKernelContext* ctx, QueueInterface* queue,
- DoneCallback callback) override {
- const Tensor& Tnum_elements = ctx->input(1);
- int32 num_elements = Tnum_elements.flat<int32>()(0);
-
- OP_REQUIRES_ASYNC(ctx, num_elements >= 0,
- errors::InvalidArgument("DequeueUpToOp requested ",
- num_elements, " < 0 elements"),
- callback);
-
- if (ctx->input_dtype(0) == DT_RESOURCE) {
- OP_REQUIRES_OK_ASYNC(ctx,
- ctx->MatchSignature({DT_RESOURCE, DT_INT32},
- queue->component_dtypes()),
- callback);
- } else {
- OP_REQUIRES_OK_ASYNC(ctx,
- ctx->MatchSignature({DT_STRING_REF, DT_INT32},
- queue->component_dtypes()),
- callback);
- }
-
- queue->TryDequeueMany(
- num_elements, ctx, true /* allow_small_batch */,
- [ctx, callback](const QueueInterface::Tuple& tuple) {
- if (!ctx->status().ok()) {
- callback();
- return;
- }
- OpOutputList output_components;
- OP_REQUIRES_OK_ASYNC(
- ctx, ctx->output_list("components", &output_components),
- callback);
- for (int i = 0; i < ctx->num_outputs(); ++i) {
- output_components.set(i, tuple[i]);
- }
- callback();
- });
- }
-
- ~DequeueUpToOp() override {}
-
- private:
- TF_DISALLOW_COPY_AND_ASSIGN(DequeueUpToOp);
-};
-
REGISTER_KERNEL_BUILDER(Name("QueueDequeueUpTo").Device(DEVICE_CPU),
DequeueUpToOp);
REGISTER_KERNEL_BUILDER(Name("QueueDequeueUpToV2").Device(DEVICE_CPU),
DequeueUpToOp);
-// Defines a QueueCloseOp, which closes the given Queue. Closing a
-// Queue signals that no more elements will be enqueued in it.
-//
-// The op has one input, which is the handle of the appropriate Queue.
-class QueueCloseOp : public QueueOpKernel {
- public:
- explicit QueueCloseOp(OpKernelConstruction* context)
- : QueueOpKernel(context) {
- OP_REQUIRES_OK(context, context->GetAttr("cancel_pending_enqueues",
- &cancel_pending_enqueues_));
- }
-
- protected:
- void ComputeAsync(OpKernelContext* ctx, QueueInterface* queue,
- DoneCallback callback) override {
- queue->Close(ctx, cancel_pending_enqueues_, callback);
- }
-
- private:
- bool cancel_pending_enqueues_;
- TF_DISALLOW_COPY_AND_ASSIGN(QueueCloseOp);
-};
-
REGISTER_KERNEL_BUILDER(Name("QueueClose").Device(DEVICE_CPU), QueueCloseOp);
REGISTER_KERNEL_BUILDER(Name("QueueCloseV2").Device(DEVICE_CPU), QueueCloseOp);
-// Defines a QueueSizeOp, which computes the number of elements in the
-// given Queue, and emits it as an output tensor.
-//
-// The op has one input, which is the handle of the appropriate Queue;
-// and one output, which is a single-element tensor containing the current
-// size of that Queue.
-class QueueSizeOp : public QueueOpKernel {
- public:
- explicit QueueSizeOp(OpKernelConstruction* context)
- : QueueOpKernel(context) {}
-
- protected:
- void ComputeAsync(OpKernelContext* ctx, QueueInterface* queue,
- DoneCallback callback) override {
- Tensor* Tqueue_size = nullptr;
- OP_REQUIRES_OK(ctx, ctx->allocate_output(0, TensorShape({}), &Tqueue_size));
- Tqueue_size->flat<int32>().setConstant(queue->size());
- callback();
- }
-
- private:
- TF_DISALLOW_COPY_AND_ASSIGN(QueueSizeOp);
-};
-
REGISTER_KERNEL_BUILDER(Name("QueueSize").Device(DEVICE_CPU), QueueSizeOp);
REGISTER_KERNEL_BUILDER(Name("QueueSizeV2").Device(DEVICE_CPU), QueueSizeOp);
-class QueueIsClosedOp : public QueueOpKernel {
- public:
- explicit QueueIsClosedOp(OpKernelConstruction* context)
- : QueueOpKernel(context) {}
-
- protected:
- void ComputeAsync(OpKernelContext* ctx, QueueInterface* queue,
- DoneCallback callback) override {
- Tensor* Tqueue_is_closed = nullptr;
- OP_REQUIRES_OK(ctx,
- ctx->allocate_output(0, TensorShape({}), &Tqueue_is_closed));
- Tqueue_is_closed->flat<bool>().setConstant(queue->is_closed());
- callback();
- }
-
- private:
- TF_DISALLOW_COPY_AND_ASSIGN(QueueIsClosedOp);
-};
-
REGISTER_KERNEL_BUILDER(Name("QueueIsClosed").Device(DEVICE_CPU),
QueueIsClosedOp);
REGISTER_KERNEL_BUILDER(Name("QueueIsClosedV2").Device(DEVICE_CPU),
diff --git a/tensorflow/core/kernels/reshape_util.cc b/tensorflow/core/kernels/reshape_util.cc
index c75e942039..50fdc17916 100644
--- a/tensorflow/core/kernels/reshape_util.cc
+++ b/tensorflow/core/kernels/reshape_util.cc
@@ -28,7 +28,6 @@ limitations under the License.
#include "tensorflow/core/framework/tensor_util.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/lib/gtl/inlined_vector.h"
-#include "tensorflow/core/util/sparse/sparse_tensor.h"
namespace tensorflow {
@@ -108,15 +107,19 @@ void Reshape(OpKernelContext *context, const Tensor &input_indices_in,
}
gtl::InlinedVector<int64, 8> input_strides(input_rank);
- input_strides[input_rank - 1] = 1;
- for (int d = input_rank - 2; d >= 0; --d) {
- input_strides[d] = input_strides[d + 1] * input_shape.dim_size(d + 1);
+ if (input_rank > 0) {
+ input_strides[input_rank - 1] = 1;
+ for (int d = input_rank - 2; d >= 0; --d) {
+ input_strides[d] = input_strides[d + 1] * input_shape.dim_size(d + 1);
+ }
}
gtl::InlinedVector<int64, 8> output_strides(output_rank);
- output_strides[output_rank - 1] = 1;
- for (int d = output_rank - 2; d >= 0; --d) {
- output_strides[d] = output_strides[d + 1] * output_shape.dim_size(d + 1);
+ if (output_rank > 0) {
+ output_strides[output_rank - 1] = 1;
+ for (int d = output_rank - 2; d >= 0; --d) {
+ output_strides[d] = output_strides[d + 1] * output_shape.dim_size(d + 1);
+ }
}
Tensor *result_indices = nullptr;
diff --git a/tensorflow/core/kernels/resource_variable_ops.cc b/tensorflow/core/kernels/resource_variable_ops.cc
index af921e4815..c5292e1ae1 100644
--- a/tensorflow/core/kernels/resource_variable_ops.cc
+++ b/tensorflow/core/kernels/resource_variable_ops.cc
@@ -174,25 +174,20 @@ REGISTER_KERNEL_BUILDER(Name("VariableShape")
#endif // GOOGLE_CUDA
-class DestroyResourceOp : public OpKernel {
- public:
- explicit DestroyResourceOp(OpKernelConstruction* ctx) : OpKernel(ctx) {
- OP_REQUIRES_OK(ctx,
- ctx->GetAttr("ignore_lookup_error", &ignore_lookup_error_));
- }
+DestroyResourceOp::DestroyResourceOp(OpKernelConstruction* ctx)
+ : OpKernel(ctx) {
+ OP_REQUIRES_OK(ctx,
+ ctx->GetAttr("ignore_lookup_error", &ignore_lookup_error_));
+}
- void Compute(OpKernelContext* ctx) override {
- const ResourceHandle& p = HandleFromInput(ctx, 0);
- Status status = DeleteResource(ctx, p);
- if (ignore_lookup_error_ && errors::IsNotFound(status)) {
- return;
- }
- OP_REQUIRES_OK(ctx, status);
+void DestroyResourceOp::Compute(OpKernelContext* ctx) {
+ const ResourceHandle& p = HandleFromInput(ctx, 0);
+ Status status = DeleteResource(ctx, p);
+ if (ignore_lookup_error_ && errors::IsNotFound(status)) {
+ return;
}
-
- private:
- bool ignore_lookup_error_;
-};
+ OP_REQUIRES_OK(ctx, status);
+}
REGISTER_KERNEL_BUILDER(Name("DestroyResourceOp").Device(DEVICE_CPU),
DestroyResourceOp);
diff --git a/tensorflow/core/kernels/resource_variable_ops.h b/tensorflow/core/kernels/resource_variable_ops.h
index 8cae5d21f0..9b60106f13 100644
--- a/tensorflow/core/kernels/resource_variable_ops.h
+++ b/tensorflow/core/kernels/resource_variable_ops.h
@@ -28,6 +28,15 @@ class ReadVariableOp : public OpKernel {
DataType dtype_;
};
+class DestroyResourceOp : public OpKernel {
+ public:
+ explicit DestroyResourceOp(OpKernelConstruction* ctx);
+ void Compute(OpKernelContext* ctx) override;
+
+ private:
+ bool ignore_lookup_error_;
+};
+
} // namespace tensorflow
#endif // TENSORFLOW_CORE_KERNELS_RESOURCE_VARIABLE_OPS_H_
diff --git a/tensorflow/core/kernels/roll_op.cc b/tensorflow/core/kernels/roll_op.cc
index 722116f86f..efa30438d9 100644
--- a/tensorflow/core/kernels/roll_op.cc
+++ b/tensorflow/core/kernels/roll_op.cc
@@ -19,6 +19,7 @@ limitations under the License.
#include "tensorflow/core/framework/register_types.h"
#include "tensorflow/core/framework/register_types_traits.h"
#include "tensorflow/core/framework/shape_inference.h"
+#include "tensorflow/core/kernels/bounds_check.h"
#include "tensorflow/core/lib/gtl/array_slice.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/util/work_sharder.h"
@@ -258,7 +259,7 @@ class RollOp : public OpKernel {
if (axis < 0) {
axis += num_dims;
}
- OP_REQUIRES(context, 0 <= axis && axis < num_dims,
+ OP_REQUIRES(context, FastBoundsCheck(axis, num_dims),
errors::InvalidArgument("axis ", axis, " is out of range"));
const int ds = std::max<int>(static_cast<int>(input.dim_size(axis)), 1);
const int sum = shift_mod_sum[axis] + static_cast<int>(shift_flat(i));
diff --git a/tensorflow/core/kernels/sdca_internal.cc b/tensorflow/core/kernels/sdca_internal.cc
index 3e16ba8d04..1c071d3d41 100644
--- a/tensorflow/core/kernels/sdca_internal.cc
+++ b/tensorflow/core/kernels/sdca_internal.cc
@@ -18,6 +18,7 @@ limitations under the License.
#include "tensorflow/core/kernels/sdca_internal.h"
#include <limits>
+#include <numeric>
#include <random>
#include "third_party/eigen3/unsupported/Eigen/CXX11/Tensor"
diff --git a/tensorflow/core/kernels/sdca_internal.h b/tensorflow/core/kernels/sdca_internal.h
index 897c488702..1eff4b15fa 100644
--- a/tensorflow/core/kernels/sdca_internal.h
+++ b/tensorflow/core/kernels/sdca_internal.h
@@ -43,8 +43,6 @@ limitations under the License.
#include "tensorflow/core/lib/random/distribution_sampler.h"
#include "tensorflow/core/lib/strings/stringprintf.h"
#include "tensorflow/core/util/guarded_philox_random.h"
-#include "tensorflow/core/util/sparse/group_iterator.h"
-#include "tensorflow/core/util/sparse/sparse_tensor.h"
#include "tensorflow/core/util/work_sharder.h"
namespace tensorflow {
diff --git a/tensorflow/core/kernels/segment_reduction_ops.h b/tensorflow/core/kernels/segment_reduction_ops.h
index 15004ae4df..d28e35157b 100644
--- a/tensorflow/core/kernels/segment_reduction_ops.h
+++ b/tensorflow/core/kernels/segment_reduction_ops.h
@@ -13,9 +13,8 @@ See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
-#ifndef THIRD_PARTY_TENSORFLOW_CORE_KERNELS_SEGMENT_REDUCTION_OPS_H_
-#define THIRD_PARTY_TENSORFLOW_CORE_KERNELS_SEGMENT_REDUCTION_OPS_H_
-
+#ifndef TENSORFLOW_CORE_KERNELS_SEGMENT_REDUCTION_OPS_H_
+#define TENSORFLOW_CORE_KERNELS_SEGMENT_REDUCTION_OPS_H_
// This file requires the following include because it uses CudaAtomicMax:
// #include "tensorflow/core/util/cuda_kernel_helper.h"
@@ -24,7 +23,6 @@ limitations under the License.
// non-GPU targets. This only breaks in clang, because it's more strict for
// template code and CudaAtomicMax is used in template context.
-
// This file requires the following include because it uses CudaAtomicMax:
// #include "tensorflow/core/util/cuda_kernel_helper.h"
diff --git a/tensorflow/core/kernels/sendrecv_ops.cc b/tensorflow/core/kernels/sendrecv_ops.cc
index 2f87057f4e..6521dcf932 100644
--- a/tensorflow/core/kernels/sendrecv_ops.cc
+++ b/tensorflow/core/kernels/sendrecv_ops.cc
@@ -160,7 +160,6 @@ Rendezvous::DoneCallback make_recv_callback(OpKernelContext* ctx,
if (!is_dead) {
ctx->set_output(0, val);
}
- *ctx->is_output_dead() = is_dead;
}
done();
},
diff --git a/tensorflow/core/kernels/serialize_sparse_op.cc b/tensorflow/core/kernels/serialize_sparse_op.cc
index 9e041d98f7..577e327809 100644
--- a/tensorflow/core/kernels/serialize_sparse_op.cc
+++ b/tensorflow/core/kernels/serialize_sparse_op.cc
@@ -36,6 +36,8 @@ limitations under the License.
namespace tensorflow {
+namespace {
+
using sparse::SparseTensor;
template <typename T>
@@ -188,8 +190,10 @@ class SerializeManySparseOp : public SerializeManySparseOpBase<U> {
TensorShape tensor_input_shape(input_shape->vec<int64>());
gtl::InlinedVector<int64, 8> std_order(rank);
std::iota(std_order.begin(), std_order.end(), 0);
- SparseTensor input_st(*input_indices, *input_values, tensor_input_shape,
- std_order);
+ SparseTensor input_st;
+ OP_REQUIRES_OK(context, SparseTensor::Create(*input_indices, *input_values,
+ tensor_input_shape, std_order,
+ &input_st));
auto input_shape_t = input_shape->vec<int64>();
const int64 N = input_shape_t(0);
@@ -306,267 +310,6 @@ Status SerializeManySparseOpBase<Variant>::Serialize(const Tensor& input,
TF_CALL_ALL_TYPES(REGISTER_KERNELS);
#undef REGISTER_KERNELS
-template <typename T>
-class DeserializeSparseOp : public OpKernel {
- public:
- explicit DeserializeSparseOp(OpKernelConstruction* context)
- : OpKernel(context) {
- OP_REQUIRES_OK(context, context->GetAttr("dtype", &dtype_));
- }
-
- void Compute(OpKernelContext* context) override {
- const Tensor& serialized_sparse = context->input(0);
- const int ndims = serialized_sparse.shape().dims();
-
- OP_REQUIRES(
- context, ndims > 0,
- errors::InvalidArgument("Serialized sparse should have non-zero rank ",
- serialized_sparse.shape().DebugString()));
-
- OP_REQUIRES(context, serialized_sparse.shape().dim_size(ndims - 1) == 3,
- errors::InvalidArgument(
- "Serialized sparse should have 3 as the last dimension ",
- serialized_sparse.shape().DebugString()));
-
- int num_sparse_tensors = 1;
- for (int i = 0; i < ndims - 1; ++i) {
- num_sparse_tensors *= serialized_sparse.shape().dim_size(i);
- }
-
- OP_REQUIRES(
- context, num_sparse_tensors > 0,
- errors::InvalidArgument(
- "Serialized sparse should have at least 1 serialized tensor, "
- "but has a zero dimension ",
- serialized_sparse.shape().DebugString()));
-
- if (num_sparse_tensors == 1 && serialized_sparse.shape().dims() == 0) {
- // Special case with a single sparse tensor. We can avoid data
- // motion in the Concat and Reshape.
- const auto& serialized_sparse_t = serialized_sparse.vec<T>();
-
- Tensor output_indices;
- Tensor output_values;
- Tensor output_shape;
- OP_REQUIRES_OK(context,
- this->GetAndValidateSparseTensor(
- serialized_sparse_t(0), serialized_sparse_t(1),
- serialized_sparse_t(2), dtype_, 0 /* index */,
- &output_indices, &output_values, &output_shape));
- context->set_output(0, output_indices);
- context->set_output(1, output_values);
- context->set_output(2, output_shape);
- return;
- }
-
- std::vector<Tensor> indices;
- std::vector<Tensor> values;
- TensorShape shape;
- indices.reserve(num_sparse_tensors);
- values.reserve(num_sparse_tensors);
-
- const auto& serialized_sparse_t = serialized_sparse.flat_inner_dims<T, 2>();
- for (int i = 0; i < num_sparse_tensors; ++i) {
- Tensor output_indices;
- Tensor output_values;
- Tensor output_shape;
- OP_REQUIRES_OK(context,
- this->GetAndValidateSparseTensor(
- serialized_sparse_t(i, 0), serialized_sparse_t(i, 1),
- serialized_sparse_t(i, 2), dtype_, i, &output_indices,
- &output_values, &output_shape));
- int64 num_entries = output_indices.dim_size(0);
- int rank = output_indices.dim_size(1);
-
- // Now we expand each SparseTensors' indices and shape by
- // prefixing a dimension
- Tensor expanded_indices(DT_INT64, TensorShape({num_entries, 1 + rank}));
- const auto& output_indices_t = output_indices.matrix<int64>();
- auto expanded_indices_t = expanded_indices.matrix<int64>();
- expanded_indices_t.chip<1>(0).setZero();
- Eigen::DSizes<Eigen::DenseIndex, 2> indices_start(0, 1);
- Eigen::DSizes<Eigen::DenseIndex, 2> indices_sizes(num_entries, rank);
- expanded_indices_t.slice(indices_start, indices_sizes) = output_indices_t;
-
- Tensor expanded_shape(DT_INT64, TensorShape({1 + rank}));
- const auto& output_shape_t = output_shape.vec<int64>();
- auto expanded_shape_t = expanded_shape.vec<int64>();
- expanded_shape_t(0) = 1;
- std::copy_n(&output_shape_t(0), rank, &expanded_shape_t(1));
-
- TensorShape expanded_tensor_shape(expanded_shape.vec<int64>());
-
- indices.push_back(expanded_indices);
- values.push_back(output_values);
- if (i == 0) {
- shape = expanded_tensor_shape;
- } else {
- OP_REQUIRES(
- context, shape.dims() == expanded_tensor_shape.dims(),
- errors::InvalidArgument(
- "Inconsistent shape across SparseTensors: rank prior to "
- "SparseTensor[",
- i, "] was: ", shape.dims() - 1, " but rank of SparseTensor[", i,
- "] is: ", expanded_tensor_shape.dims() - 1));
- for (int j = 1; j < shape.dims(); ++j) {
- // NOTE(mrry): For compatibility with the implementations of
- // DeserializeManySparse, and many ops that generate
- // SparseTensors to batch that do not have a fixed
- // dense_shape (e.g. `tf.parse_single_example()`), we
- // compute the maximum in each dimension to find the
- // smallest dense_shape that bounds all of the input
- // SparseTensors.
- shape.set_dim(j, std::max(shape.dim_size(j),
- expanded_tensor_shape.dim_size(j)));
- }
- }
- }
-
- // Dimension 0 is the primary dimension.
- int rank = shape.dims();
- gtl::InlinedVector<int64, 8> std_order(rank);
- std::iota(std_order.begin(), std_order.end(), 0);
-
- std::vector<SparseTensor> tensors;
- tensors.reserve(num_sparse_tensors);
- for (int i = 0; i < num_sparse_tensors; ++i) {
- tensors.emplace_back(indices[i], values[i], shape, std_order);
- }
-
- gtl::optional<SparseTensor> maybe_output;
-#define HANDLE_TYPE(T) \
- case DataTypeToEnum<T>::value: { \
- maybe_output = SparseTensor::Concat<T>(tensors); \
- break; \
- }
-
- switch (dtype_) {
- TF_CALL_ALL_TYPES(HANDLE_TYPE);
- TF_CALL_QUANTIZED_TYPES(HANDLE_TYPE);
-#undef HANDLE_TYPE
- default:
- OP_REQUIRES(context, false,
- errors::Unimplemented(
- "DeserializeSparse Unhandled data type: ", dtype_));
- }
- DCHECK(maybe_output);
- SparseTensor& output = maybe_output.value();
-
- // Compute the input shape for the reshape operation.
- Tensor input_shape(DT_INT64, TensorShape({output.dims()}));
- std::copy_n(output.shape().data(), output.dims(),
- input_shape.vec<int64>().data());
-
- // Compute the target shape for the reshape operation.
- Tensor target_shape(DT_INT64, TensorShape({ndims + output.dims() - 2}));
- for (int i = 0; i < ndims - 1; ++i) {
- target_shape.vec<int64>()(i) = serialized_sparse.shape().dim_size(i);
- }
- for (int i = 0; i < output.dims() - 1; ++i) {
- target_shape.vec<int64>()(i + ndims - 1) = output.shape().data()[i + 1];
- }
-
- Tensor output_indices;
- Tensor output_shape;
- Reshape(context, output.indices(), input_shape, target_shape,
- 0 /* output indices index */, 2 /* output shape index */);
- context->set_output(1, output.values());
- }
-
- protected:
- Status Deserialize(const T& serialized, Tensor* result);
-
- Status GetAndValidateSparseTensor(
- const T& serialized_indices, const T& serialized_values,
- const T& serialized_shape, DataType values_dtype, int index,
- Tensor* output_indices, Tensor* output_values, Tensor* output_shape) {
- // Deserialize and validate the indices.
- TF_RETURN_IF_ERROR(this->Deserialize(serialized_indices, output_indices));
- if (!TensorShapeUtils::IsMatrix(output_indices->shape())) {
- return errors::InvalidArgument(
- "Expected serialized_sparse[", index,
- ", 0] to represent an index matrix but received shape ",
- output_indices->shape().DebugString());
- }
- int64 num_entries = output_indices->dim_size(0);
- int rank = output_indices->dim_size(1);
-
- // Deserialize and validate the values.
- TF_RETURN_IF_ERROR(this->Deserialize(serialized_values, output_values));
- if (!TensorShapeUtils::IsVector(output_values->shape())) {
- return errors::InvalidArgument(
- "Expected serialized_sparse[", index,
- ", 1] to represent a values vector but received shape ",
- output_values->shape().DebugString());
- }
- if (values_dtype != output_values->dtype()) {
- return errors::InvalidArgument(
- "Requested SparseTensor of type ", DataTypeString(values_dtype),
- " but SparseTensor[", index,
- "].values.dtype() == ", DataTypeString(output_values->dtype()));
- }
- if (num_entries != output_values->dim_size(0)) {
- return errors::InvalidArgument(
- "Expected row counts of SparseTensor[", index,
- "].indices and SparseTensor[", index,
- "].values to match but they do not: ", num_entries, " vs. ",
- output_values->dim_size(0));
- }
-
- // Deserialize and validate the shape.
- TF_RETURN_IF_ERROR(this->Deserialize(serialized_shape, output_shape));
- if (!TensorShapeUtils::IsVector(output_shape->shape())) {
- return errors::InvalidArgument(
- "Expected serialized_sparse[", index,
- ", 1] to be a shape vector but its shape is ",
- output_shape->shape().DebugString());
- }
- if (rank != output_shape->dim_size(0)) {
- return errors::InvalidArgument("Expected column counts of SparseTensor[",
- index,
- "].indices to match size of SparseTensor[",
- index, "].shape but they do not: ", rank,
- " vs. ", output_shape->dim_size(0));
- }
- return Status::OK();
- }
-
- DataType dtype_;
-};
-
-template <>
-Status DeserializeSparseOp<string>::Deserialize(const string& serialized,
- Tensor* result) {
- TensorProto proto;
- if (!ParseProtoUnlimited(&proto, serialized)) {
- return errors::InvalidArgument("Could not parse serialized proto");
- }
- Tensor tensor;
- if (!tensor.FromProto(proto)) {
- return errors::InvalidArgument("Could not construct tensor from proto");
- }
- *result = tensor;
- return Status::OK();
-}
-
-REGISTER_KERNEL_BUILDER(Name("DeserializeSparse")
- .Device(DEVICE_CPU)
- .TypeConstraint<string>("Tserialized"),
- DeserializeSparseOp<string>)
-
-REGISTER_KERNEL_BUILDER(Name("DeserializeManySparse").Device(DEVICE_CPU),
- DeserializeSparseOp<string>)
-
-template <>
-Status DeserializeSparseOp<Variant>::Deserialize(const Variant& serialized,
- Tensor* result) {
- *result = *serialized.get<Tensor>();
- return Status::OK();
-}
-
-REGISTER_KERNEL_BUILDER(Name("DeserializeSparse")
- .Device(DEVICE_CPU)
- .TypeConstraint<Variant>("Tserialized"),
- DeserializeSparseOp<Variant>)
+} // namespace
} // namespace tensorflow
diff --git a/tensorflow/core/kernels/set_kernels.cc b/tensorflow/core/kernels/set_kernels.cc
index e836c764ac..f893d4e945 100644
--- a/tensorflow/core/kernels/set_kernels.cc
+++ b/tensorflow/core/kernels/set_kernels.cc
@@ -63,9 +63,9 @@ Status GroupShape(const VarDimArray& input_shape, ShapeArray* grouped_shape) {
// Build `SparseTensor` from indices, values, and shape in inputs
// [base_index, base_index + 3), and validate its rank and indices.
-sparse::SparseTensor SparseTensorFromContext(OpKernelContext* ctx,
- const int32 base_index,
- bool validate_indices) {
+Status SparseTensorFromContext(OpKernelContext* ctx, const int32 base_index,
+ bool validate_indices,
+ sparse::SparseTensor* tensor) {
// Assume row-major order.
const TensorShape shape =
TensorShape(ctx->input(base_index + 2).vec<int64>());
@@ -73,13 +73,8 @@ sparse::SparseTensor SparseTensorFromContext(OpKernelContext* ctx,
std::vector<int64> order(shape.dims());
std::iota(order.begin(), order.end(), 0);
- const sparse::SparseTensor st(ctx->input(base_index),
- ctx->input(base_index + 1), shape, order);
- if (validate_indices) {
- Status s = st.IndicesValid();
- if (!s.ok()) ctx->SetStatus(s);
- }
- return st;
+ return sparse::SparseTensor::Create(
+ ctx->input(base_index), ctx->input(base_index + 1), shape, order, tensor);
}
// TODO(ptucker): CheckGroup is just a sanity check on the result of
@@ -253,11 +248,13 @@ class SetSizeOp : public OpKernel {
template <typename T>
void SetSizeOp<T>::Compute(OpKernelContext* ctx) {
- const sparse::SparseTensor set_st =
- SparseTensorFromContext(ctx, 0, validate_indices_);
+ sparse::SparseTensor set_st;
+ OP_REQUIRES_OK(ctx,
+ SparseTensorFromContext(ctx, 0, validate_indices_, &set_st));
+ OP_REQUIRES_OK(ctx, set_st.IndicesValid());
- // Output shape is same as input except for last dimension, which reduces to
- // the set size of values along that dimension.
+ // Output shape is same as input except for last dimension, which reduces
+ // to the set size of values along that dimension.
ShapeArray output_shape;
OP_REQUIRES_OK(ctx, GroupShape(set_st.shape(), &output_shape));
const auto output_strides = Strides(output_shape);
@@ -484,8 +481,10 @@ void SetOperationOp<T>::ComputeDenseToDense(OpKernelContext* ctx) const {
template <typename T>
void SetOperationOp<T>::ComputeDenseToSparse(OpKernelContext* ctx) const {
const Tensor& set1_t = ctx->input(0);
- const sparse::SparseTensor set2_st =
- SparseTensorFromContext(ctx, 1, validate_indices_);
+ sparse::SparseTensor set2_st;
+ OP_REQUIRES_OK(ctx,
+ SparseTensorFromContext(ctx, 1, validate_indices_, &set2_st));
+ OP_REQUIRES_OK(ctx, set2_st.IndicesValid());
// The following should stay in sync with `_dense_to_sparse_shape` shape
// assertions in python/ops/set_ops.py, and `SetShapeFn` for
// `DenseToSparseSetOperation` in ops/set_ops.cc.
@@ -597,10 +596,15 @@ const std::vector<int64> GROUP_ITER_END;
// with the same first n-1 dimensions in set1 and set2.
template <typename T>
void SetOperationOp<T>::ComputeSparseToSparse(OpKernelContext* ctx) const {
- const sparse::SparseTensor set1_st =
- SparseTensorFromContext(ctx, 0, validate_indices_);
- const sparse::SparseTensor set2_st =
- SparseTensorFromContext(ctx, 3, validate_indices_);
+ sparse::SparseTensor set1_st;
+ OP_REQUIRES_OK(ctx,
+ SparseTensorFromContext(ctx, 0, validate_indices_, &set1_st));
+ OP_REQUIRES_OK(ctx, set1_st.IndicesValid());
+
+ sparse::SparseTensor set2_st;
+ OP_REQUIRES_OK(ctx,
+ SparseTensorFromContext(ctx, 3, validate_indices_, &set2_st));
+
// The following should stay in sync with `_sparse_to_sparse_shape` shape
// assertions in python/ops/set_ops.py, and `SetShapeFn` for
// `SparseToSparseSetOperation` in ops/set_ops.cc.
diff --git a/tensorflow/core/kernels/sparse_concat_op.cc b/tensorflow/core/kernels/sparse_concat_op.cc
index f813794374..3b2a0cb0f3 100644
--- a/tensorflow/core/kernels/sparse_concat_op.cc
+++ b/tensorflow/core/kernels/sparse_concat_op.cc
@@ -124,9 +124,12 @@ class SparseConcatOp : public OpKernel {
std::vector<sparse::SparseTensor> sp_inputs;
for (int i = 0; i < N; ++i) {
const TensorShape current_shape(shapes[i].vec<int64>());
- sp_inputs.emplace_back(tensor::DeepCopy(inds[i]),
- tensor::DeepCopy(vals[i]), current_shape,
- std_order);
+ sparse::SparseTensor tensor;
+ OP_REQUIRES_OK(context,
+ sparse::SparseTensor::Create(
+ tensor::DeepCopy(inds[i]), tensor::DeepCopy(vals[i]),
+ current_shape, std_order, &tensor));
+ sp_inputs.push_back(std::move(tensor));
sp_inputs[i].Reorder<T>(concat_order);
}
diff --git a/tensorflow/core/kernels/sparse_reduce_op.cc b/tensorflow/core/kernels/sparse_reduce_op.cc
index 9e60791f97..a465564739 100644
--- a/tensorflow/core/kernels/sparse_reduce_op.cc
+++ b/tensorflow/core/kernels/sparse_reduce_op.cc
@@ -172,8 +172,10 @@ class SparseReduceOp : public OpKernel {
// making deep copies here. Remove this if/when we change Reorder()'s
// semantics.
const auto shape_vec = shape_t->vec<int64>();
- SparseTensor sp(tensor::DeepCopy(*indices_t), tensor::DeepCopy(*values_t),
- TensorShape(shape_vec));
+ SparseTensor sp;
+ OP_REQUIRES_OK(ctx, SparseTensor::Create(
+ tensor::DeepCopy(*indices_t), tensor::DeepCopy(*values_t),
+ TensorShape(shape_vec), &sp));
ReduceDetails reduction = SparseTensorReduceHelper(
sp, reduction_axes_t->flat<int32>(), keep_dims_);
@@ -260,8 +262,10 @@ class SparseReduceSparseOp : public OpKernel {
OP_REQUIRES_OK(ctx, ValidateInputs(shape_t, reduction_axes_t));
- SparseTensor sp(tensor::DeepCopy(*indices_t), tensor::DeepCopy(*values_t),
- TensorShape(shape_t->vec<int64>()));
+ SparseTensor sp;
+ OP_REQUIRES_OK(ctx, SparseTensor::Create(tensor::DeepCopy(*indices_t),
+ tensor::DeepCopy(*values_t),
+ TensorShape(shape_t->vec<int64>()), &sp));
ReduceDetails reduction = SparseTensorReduceHelper(
sp, reduction_axes_t->flat<int32>(), keep_dims_);
diff --git a/tensorflow/core/kernels/sparse_reorder_op.cc b/tensorflow/core/kernels/sparse_reorder_op.cc
index d1373fe0ef..6f9065827f 100644
--- a/tensorflow/core/kernels/sparse_reorder_op.cc
+++ b/tensorflow/core/kernels/sparse_reorder_op.cc
@@ -60,16 +60,21 @@ class SparseReorderOp : public OpKernel {
std::iota(std_order.begin(), std_order.end(), 0);
// Check if the sparse tensor is already ordered correctly
- sparse::SparseTensor input_sp(input_ind, input_val, input_shape, std_order);
+ sparse::SparseTensor input_sp;
+ OP_REQUIRES_OK(
+ context, sparse::SparseTensor::Create(input_ind, input_val, input_shape,
+ std_order, &input_sp));
if (input_sp.IndicesValid().ok()) {
context->set_output(0, input_sp.indices());
context->set_output(1, input_sp.values());
} else {
// Deep-copy the input Tensors, then reorder in-place
- sparse::SparseTensor reordered_sp(tensor::DeepCopy(input_ind),
- tensor::DeepCopy(input_val),
- input_shape);
+ sparse::SparseTensor reordered_sp;
+ OP_REQUIRES_OK(context,
+ sparse::SparseTensor::Create(tensor::DeepCopy(input_ind),
+ tensor::DeepCopy(input_val),
+ input_shape, &reordered_sp));
reordered_sp.Reorder<T>(std_order);
context->set_output(0, reordered_sp.indices());
context->set_output(1, reordered_sp.values());
diff --git a/tensorflow/core/kernels/sparse_slice_grad_op.cc b/tensorflow/core/kernels/sparse_slice_grad_op.cc
index 90a39ed818..f92b6414ff 100644
--- a/tensorflow/core/kernels/sparse_slice_grad_op.cc
+++ b/tensorflow/core/kernels/sparse_slice_grad_op.cc
@@ -18,7 +18,6 @@ limitations under the License.
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_util.h"
#include "tensorflow/core/framework/types.h"
-#include "tensorflow/core/util/sparse/sparse_tensor.h"
namespace tensorflow {
diff --git a/tensorflow/core/kernels/sparse_slice_op.cc b/tensorflow/core/kernels/sparse_slice_op.cc
index 10dc208ab6..6aaf4fd88f 100644
--- a/tensorflow/core/kernels/sparse_slice_op.cc
+++ b/tensorflow/core/kernels/sparse_slice_op.cc
@@ -66,8 +66,11 @@ class SparseSliceOp : public OpKernel {
"Expected size to be a vector of length ", input_dims,
" but got length ", input_size.NumElements()));
- sparse::SparseTensor sparse_tensor(input_indices, input_values,
- TensorShape(input_shape.vec<int64>()));
+ sparse::SparseTensor sparse_tensor;
+ OP_REQUIRES_OK(context,
+ sparse::SparseTensor::Create(
+ input_indices, input_values,
+ TensorShape(input_shape.vec<int64>()), &sparse_tensor));
const gtl::ArraySlice<int64> start(input_start.flat<int64>().data(),
input_dims);
diff --git a/tensorflow/core/kernels/sparse_softmax_op.cc b/tensorflow/core/kernels/sparse_softmax_op.cc
index 444a5f657a..dc3119bba4 100644
--- a/tensorflow/core/kernels/sparse_softmax_op.cc
+++ b/tensorflow/core/kernels/sparse_softmax_op.cc
@@ -69,8 +69,11 @@ class SparseSoftmaxOp : public OpKernel {
const int nnz = static_cast<int>(indices_t->dim_size(0));
const int rank = static_cast<int>(indices_t->dim_size(1));
- SparseTensor st(tensor::DeepCopy(*indices_t), tensor::DeepCopy(*values_t),
- TensorShape(shape_t->flat<int64>()));
+ SparseTensor st;
+ OP_REQUIRES_OK(
+ context, SparseTensor::Create(
+ tensor::DeepCopy(*indices_t), tensor::DeepCopy(*values_t),
+ TensorShape(shape_t->flat<int64>()), &st));
Tensor *output_values = nullptr;
OP_REQUIRES_OK(context, context->allocate_output(0, TensorShape({nnz}),
diff --git a/tensorflow/core/kernels/sparse_split_op.cc b/tensorflow/core/kernels/sparse_split_op.cc
index 67dcf05a6c..3d02be47cb 100644
--- a/tensorflow/core/kernels/sparse_split_op.cc
+++ b/tensorflow/core/kernels/sparse_split_op.cc
@@ -63,10 +63,16 @@ class SparseSplitOp : public OpKernel {
input_shape.vec<int64>()(split_dim), "), got ",
num_split_));
- sparse::SparseTensor sparse_tensor(input_indices, input_values,
- TensorShape(input_shape.vec<int64>()));
- const std::vector<sparse::SparseTensor> outputs =
- sparse::SparseTensor::Split<T>(sparse_tensor, split_dim, num_split_);
+ sparse::SparseTensor sparse_tensor;
+ OP_REQUIRES_OK(context,
+ sparse::SparseTensor::Create(
+ input_indices, input_values,
+ TensorShape(input_shape.vec<int64>()), &sparse_tensor));
+
+ std::vector<sparse::SparseTensor> outputs;
+ OP_REQUIRES_OK(context,
+ sparse::SparseTensor::Split<T>(sparse_tensor, split_dim,
+ num_split_, &outputs));
for (int slice_index = 0; slice_index < num_split_; ++slice_index) {
context->set_output(slice_index, outputs[slice_index].indices());
diff --git a/tensorflow/core/kernels/sparse_tensors_map_ops.cc b/tensorflow/core/kernels/sparse_tensors_map_ops.cc
index 2aadd92475..74fa3a15f0 100644
--- a/tensorflow/core/kernels/sparse_tensors_map_ops.cc
+++ b/tensorflow/core/kernels/sparse_tensors_map_ops.cc
@@ -93,8 +93,9 @@ class SparseTensorsMap : public ResourceBase {
const Tensor* ix = sp_iter->second.indices.AccessTensor(ctx);
const Tensor* values = sp_iter->second.values.AccessTensor(ctx);
const auto& shape = sp_iter->second.shape;
- sparse_tensors->emplace_back(*ix, *values, shape);
-
+ SparseTensor tensor;
+ TF_RETURN_IF_ERROR(SparseTensor::Create(*ix, *values, shape, &tensor));
+ sparse_tensors->push_back(std::move(tensor));
sp_tensors_.erase(sp_iter);
}
}
@@ -195,7 +196,9 @@ class AddSparseToTensorsMapOp : public SparseTensorAccessingOp {
TensorShapeUtils::MakeShape(input_shape->vec<int64>().data(),
input_shape->NumElements(),
&input_shape_object));
- SparseTensor st(*input_indices, *input_values, input_shape_object);
+ SparseTensor st;
+ OP_REQUIRES_OK(context, SparseTensor::Create(*input_indices, *input_values,
+ input_shape_object, &st));
int64 handle;
OP_REQUIRES_OK(context, map->AddSparseTensor(context, st, &handle));
@@ -253,8 +256,10 @@ class AddManySparseToTensorsMapOp : public SparseTensorAccessingOp {
TensorShape tensor_input_shape(input_shape->vec<int64>());
gtl::InlinedVector<int64, 8> std_order(rank);
std::iota(std_order.begin(), std_order.end(), 0);
- SparseTensor input_st(*input_indices, *input_values, tensor_input_shape,
- std_order);
+ SparseTensor input_st;
+ OP_REQUIRES_OK(context, SparseTensor::Create(*input_indices, *input_values,
+ tensor_input_shape, std_order,
+ &input_st));
auto input_shape_t = input_shape->vec<int64>();
const int64 N = input_shape_t(0);
@@ -300,7 +305,10 @@ class AddManySparseToTensorsMapOp : public SparseTensorAccessingOp {
output_values_t(i) = values(i);
}
- SparseTensor st_i(output_indices, output_values, output_shape);
+ SparseTensor st_i;
+ OP_REQUIRES_OK(context,
+ SparseTensor::Create(output_indices, output_values,
+ output_shape, &st_i));
int64 handle;
OP_REQUIRES_OK(context, map->AddSparseTensor(context, st_i, &handle));
sparse_handles_t(b) = handle;
@@ -311,7 +319,9 @@ class AddManySparseToTensorsMapOp : public SparseTensorAccessingOp {
if (visited.size() < N) {
Tensor empty_indices(DT_INT64, {0, rank - 1});
Tensor empty_values(DataTypeToEnum<T>::value, {0});
- SparseTensor empty_st(empty_indices, empty_values, output_shape);
+ SparseTensor empty_st;
+ OP_REQUIRES_OK(context, SparseTensor::Create(empty_indices, empty_values,
+ output_shape, &empty_st));
for (int64 b = 0; b < N; ++b) {
// We skipped this batch entry.
@@ -466,13 +476,15 @@ class TakeManySparseFromTensorsMapOp : public SparseTensorAccessingOp {
std::vector<SparseTensor> tensors_to_concat;
tensors_to_concat.reserve(N);
for (int i = 0; i < N; ++i) {
- tensors_to_concat.emplace_back(std::move(indices_to_concat[i]),
- std::move(values_to_concat[i]),
- preconcat_shape, std_order);
+ SparseTensor tensor;
+ OP_REQUIRES_OK(context,
+ SparseTensor::Create(std::move(indices_to_concat[i]),
+ std::move(values_to_concat[i]),
+ preconcat_shape, std_order, &tensor));
+ tensors_to_concat.push_back(std::move(tensor));
}
- SparseTensor output(SparseTensor::Concat<T>(tensors_to_concat));
-
+ auto output = SparseTensor::Concat<T>(tensors_to_concat);
Tensor final_output_shape(DT_INT64, TensorShape({output.dims()}));
std::copy_n(output.shape().data(), output.dims(),
diff --git a/tensorflow/core/kernels/sparse_to_dense_op.cc b/tensorflow/core/kernels/sparse_to_dense_op.cc
index ba3da21a43..f79a4d0494 100644
--- a/tensorflow/core/kernels/sparse_to_dense_op.cc
+++ b/tensorflow/core/kernels/sparse_to_dense_op.cc
@@ -119,8 +119,10 @@ class SparseToDense : public OpKernel {
// Assume SparseTensor is lexicographically sorted.
gtl::InlinedVector<int64, 8> order(output->shape().dims());
std::iota(order.begin(), order.end(), 0);
- sparse::SparseTensor st(indices_shaped, sparse_values_b, output->shape(),
- order);
+ sparse::SparseTensor st;
+ OP_REQUIRES_OK(c,
+ sparse::SparseTensor::Create(indices_shaped, sparse_values_b,
+ output->shape(), order, &st));
if (validate_indices_) {
OP_REQUIRES_OK(c, st.IndicesValid());
diff --git a/tensorflow/core/kernels/tensor_array_ops.cc b/tensorflow/core/kernels/tensor_array_ops.cc
index 37803ec775..5aa5d20b1a 100644
--- a/tensorflow/core/kernels/tensor_array_ops.cc
+++ b/tensorflow/core/kernels/tensor_array_ops.cc
@@ -735,6 +735,7 @@ class TensorArrayPackOrGatherOp : public OpKernel {
TensorArrayPackOrGatherOp<CPUDevice, type, false /* LEGACY_PACK */>);
TF_CALL_POD_STRING_TYPES(REGISTER_GATHER_AND_PACK);
+TF_CALL_variant(REGISTER_GATHER_AND_PACK);
REGISTER_GATHER_AND_PACK(quint8);
REGISTER_GATHER_AND_PACK(qint8);
REGISTER_GATHER_AND_PACK(qint32);
diff --git a/tensorflow/core/kernels/unary_ops_composition.cc b/tensorflow/core/kernels/unary_ops_composition.cc
new file mode 100644
index 0000000000..0c2cb1b39f
--- /dev/null
+++ b/tensorflow/core/kernels/unary_ops_composition.cc
@@ -0,0 +1,432 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+// See docs in ../ops/math_ops.cc.
+
+#define EIGEN_USE_THREADS
+
+#include "third_party/eigen3/unsupported/Eigen/CXX11/Tensor"
+#include "tensorflow/core/framework/op_kernel.h"
+#include "tensorflow/core/kernels/cwise_ops.h"
+#include "tensorflow/core/kernels/cwise_ops_common.h"
+#include "tensorflow/core/kernels/relu_op_functor.h"
+
+namespace tensorflow {
+
+template <typename T>
+class UnaryOpsComposition; // forward declare kernel
+
+template <typename T>
+struct UnaryOpsCompositionSupport;
+
+template <typename T>
+struct UnaryOpsCompositionBase {
+ using InputBuffer = typename TTypes<T>::ConstFlat;
+ using OutputBuffer = typename TTypes<T>::Flat;
+
+ using ComputeFn = void (*)(const InputBuffer&, OutputBuffer*);
+
+ struct ComputeFnRegistration {
+ ComputeFn compute_fn;
+ int cost;
+ };
+
+ bool HasComputeFn(const string& name) {
+ return compute_fns.find(name) != compute_fns.end();
+ }
+
+ protected:
+ void RegisterComputeFn(const string& name, ComputeFn compute_fn, int cost) {
+ VLOG(5) << "Register compute fn: name=" << name << " cost=" << cost;
+ compute_fns[name] = {compute_fn, cost};
+ }
+
+ private:
+ friend class UnaryOpsComposition<T>;
+
+ Status ExportComputeFns(const std::vector<string>& op_names,
+ std::vector<ComputeFn>* fns, int* cost) {
+ for (const string& op_name : op_names) {
+ auto it = compute_fns.find(op_name);
+ if (it == compute_fns.end())
+ return errors::InvalidArgument(
+ "Do not have a compute function registered for op: ", op_name);
+
+ const ComputeFnRegistration& reg = it->second;
+ fns->push_back(reg.compute_fn);
+ *cost += reg.cost;
+ }
+
+ return Status::OK();
+ }
+
+ std::unordered_map<string, ComputeFnRegistration> compute_fns;
+};
+
+template <typename T>
+class UnaryOpsComposition : public OpKernel {
+ public:
+ using Kernel = UnaryOpsComposition<T>;
+
+ using Scalar = T;
+ using Packet = typename Eigen::internal::packet_traits<T>::type;
+
+ using Support = UnaryOpsCompositionSupport<T>;
+
+ using InputBuffer = typename Support::InputBuffer;
+ using OutputBuffer = typename Support::OutputBuffer;
+ using ComputeFn = typename Support::ComputeFn;
+
+ explicit UnaryOpsComposition(OpKernelConstruction* context)
+ : OpKernel(context) {
+ OP_REQUIRES_OK(context, context->GetAttr("op_names", &op_names_));
+
+ OP_REQUIRES(context, !op_names_.empty(),
+ errors::InvalidArgument(
+ "Unary op composition must have at least one op"));
+
+ OP_REQUIRES_OK(context,
+ support_.ExportComputeFns(op_names_, &fns_, &cost_));
+
+ VLOG(2) << "Composed unary op: [" << str_util::Join(op_names_, ", ")
+ << "]; cost=" << cost_;
+ }
+
+ void Compute(OpKernelContext* ctx) override {
+ const Tensor& in = ctx->input(0);
+ Tensor* out = nullptr;
+ OP_REQUIRES_OK(
+ ctx, ctx->forward_input_or_allocate_output({0}, 0, in.shape(), &out));
+
+ InputBuffer in_flat = in.flat<T>();
+ OutputBuffer out_flat = out->flat<T>();
+
+ const std::size_t num_fns = fns_.size();
+ auto compute_fn = [this, &in_flat, &out_flat, &num_fns](int64 begin,
+ int64 end) {
+ int64 len = end - begin;
+ const InputBuffer in_slice(in_flat.data() + begin, len);
+ const InputBuffer scratch_slice(out_flat.data() + begin, len);
+ OutputBuffer out_slice(out_flat.data() + begin, len);
+
+ fns_[0](in_slice, &out_slice);
+ for (int i = 1; i < num_fns; ++i) {
+ fns_[i](scratch_slice, &out_slice);
+ }
+ };
+
+ const CPUDevice& device = ctx->eigen_device<CPUDevice>();
+ const int kOverheadCycles = static_cast<int>(num_fns) * 10;
+ Eigen::TensorOpCost cost(/*bytes_loaded=*/sizeof(T) * num_fns,
+ /*bytes_stored=*/sizeof(T) * num_fns,
+ kOverheadCycles + cost_);
+ device.parallelFor(in.NumElements(), cost, AlignBlockSize,
+ std::move(compute_fn));
+ }
+
+ private:
+ static const int kPacketSize = Eigen::internal::unpacket_traits<Packet>::size;
+
+ static inline int64 AlignBlockSize(int64 block_size) {
+ // Align block size to packet size and account for unrolling in run above.
+ if (block_size >= 16 * kPacketSize) {
+ return (block_size + 4 * kPacketSize - 1) & ~(4 * kPacketSize - 1);
+ }
+ // Aligning to 4 * PacketSize would increase block size by more than 25%.
+ return (block_size + kPacketSize - 1) & ~(kPacketSize - 1);
+ }
+
+ Support support_;
+
+ std::vector<string> op_names_;
+ std::vector<ComputeFn> fns_;
+ int cost_ = 0;
+};
+
+// Register compute functions for UnaryOp functors.
+#define REGISTER_COMPUTE_FN_HELPER(name, functor) \
+ static_assert(std::is_same<functor::in_type, functor::out_type>::value, \
+ "Functor must have same input and output types"); \
+ \
+ static inline void Compute##name(const InputBuffer& in, OutputBuffer* out) { \
+ *out = in.unaryExpr(functor::func()); \
+ } \
+ static inline int Cost##name() { \
+ return Eigen::internal::functor_traits<functor::func>::Cost; \
+ }
+
+// Register compute function for the Relu/Relu6/Elu/Selu.
+#define REGISTER_RELU_HELPER() \
+ template <typename T> \
+ using functor_traits = Eigen::internal::functor_traits<T>; \
+ \
+ static inline void ComputeRelu(const InputBuffer& in, OutputBuffer* out) { \
+ auto relu = functor::Relu<Eigen::DefaultDevice, T>(); \
+ relu(Eigen::DefaultDevice(), in, *out); \
+ } \
+ \
+ static inline int CostRelu() { \
+ return functor_traits<Eigen::internal::scalar_max_op<T>>::Cost; \
+ } \
+ \
+ static inline void ComputeRelu6(const InputBuffer& in, OutputBuffer* out) { \
+ auto relu6 = functor::Relu6<Eigen::DefaultDevice, T>(); \
+ relu6(Eigen::DefaultDevice(), in, *out); \
+ } \
+ \
+ static inline int CostRelu6() { \
+ return functor_traits<Eigen::internal::scalar_max_op<T>>::Cost + \
+ functor_traits<Eigen::internal::scalar_min_op<T>>::Cost; \
+ } \
+ static inline void ComputeElu(const InputBuffer& in, OutputBuffer* out) { \
+ auto elu = functor::Elu<Eigen::DefaultDevice, T>(); \
+ elu(Eigen::DefaultDevice(), in, *out); \
+ } \
+ \
+ static inline int CostElu() { \
+ return functor_traits<Eigen::internal::scalar_exp_op<T>>::Cost + \
+ Eigen::NumTraits<T>::MulCost; \
+ } \
+ static inline void ComputeSelu(const InputBuffer& in, OutputBuffer* out) { \
+ auto selu = functor::Selu<Eigen::DefaultDevice, T>(); \
+ selu(Eigen::DefaultDevice(), in, *out); \
+ } \
+ \
+ static inline int CostSelu() { \
+ return 2 * (functor_traits<Eigen::internal::scalar_exp_op<T>>::Cost + \
+ Eigen::NumTraits<T>::MulCost); \
+ }
+
+#define REGISTER_COMPUTE_FN(func) \
+ RegisterComputeFn(#func, Compute##func, Cost##func());
+
+template <>
+struct UnaryOpsCompositionSupport<float> : UnaryOpsCompositionBase<float> {
+ using T = float;
+
+ UnaryOpsCompositionSupport() {
+ // UnaryOp functors.
+ REGISTER_COMPUTE_FN(Abs);
+ REGISTER_COMPUTE_FN(Acos);
+ REGISTER_COMPUTE_FN(Acosh);
+ REGISTER_COMPUTE_FN(Asin);
+ REGISTER_COMPUTE_FN(Asinh);
+ REGISTER_COMPUTE_FN(Atan);
+ REGISTER_COMPUTE_FN(Atanh);
+ REGISTER_COMPUTE_FN(Ceil);
+ REGISTER_COMPUTE_FN(Cos);
+ REGISTER_COMPUTE_FN(Cosh);
+ REGISTER_COMPUTE_FN(Expm1);
+ REGISTER_COMPUTE_FN(Exp);
+ REGISTER_COMPUTE_FN(Floor);
+ REGISTER_COMPUTE_FN(Inv);
+ REGISTER_COMPUTE_FN(Log);
+ REGISTER_COMPUTE_FN(Log1p);
+ REGISTER_COMPUTE_FN(Neg);
+ REGISTER_COMPUTE_FN(Reciprocal);
+ REGISTER_COMPUTE_FN(Rint);
+ REGISTER_COMPUTE_FN(Round);
+ REGISTER_COMPUTE_FN(Rsqrt);
+ REGISTER_COMPUTE_FN(Sigmoid);
+ REGISTER_COMPUTE_FN(Sin);
+ REGISTER_COMPUTE_FN(Sinh);
+ REGISTER_COMPUTE_FN(Sqrt);
+ REGISTER_COMPUTE_FN(Square);
+ REGISTER_COMPUTE_FN(Tan);
+ REGISTER_COMPUTE_FN(Tanh);
+
+ // Additional compute functions not defined via UnaryOp functors.
+ REGISTER_COMPUTE_FN(Elu);
+ REGISTER_COMPUTE_FN(Relu);
+ REGISTER_COMPUTE_FN(Relu6);
+ REGISTER_COMPUTE_FN(Selu);
+ }
+
+ REGISTER_RELU_HELPER();
+
+ // clang-format off
+ REGISTER_COMPUTE_FN_HELPER(Abs, functor::abs<T>);
+ REGISTER_COMPUTE_FN_HELPER(Acos, functor::acos<T>);
+ REGISTER_COMPUTE_FN_HELPER(Acosh, functor::acosh<T>);
+ REGISTER_COMPUTE_FN_HELPER(Asin, functor::asin<T>);
+ REGISTER_COMPUTE_FN_HELPER(Asinh, functor::asinh<T>);
+ REGISTER_COMPUTE_FN_HELPER(Atan, functor::atan<T>);
+ REGISTER_COMPUTE_FN_HELPER(Atanh, functor::atanh<T>);
+ REGISTER_COMPUTE_FN_HELPER(Ceil, functor::ceil<T>);
+ REGISTER_COMPUTE_FN_HELPER(Cos, functor::cos<T>);
+ REGISTER_COMPUTE_FN_HELPER(Cosh, functor::cosh<T>);
+ REGISTER_COMPUTE_FN_HELPER(Expm1, functor::expm1<T>);
+ REGISTER_COMPUTE_FN_HELPER(Exp, functor::exp<T>);
+ REGISTER_COMPUTE_FN_HELPER(Floor, functor::floor<T>);
+ REGISTER_COMPUTE_FN_HELPER(Inv, functor::inverse<T>);
+ REGISTER_COMPUTE_FN_HELPER(Log, functor::log<T>);
+ REGISTER_COMPUTE_FN_HELPER(Log1p, functor::log1p<T>);
+ REGISTER_COMPUTE_FN_HELPER(Neg, functor::neg<T>);
+ REGISTER_COMPUTE_FN_HELPER(Reciprocal, functor::inverse<T>);
+ REGISTER_COMPUTE_FN_HELPER(Rint, functor::rint<T>);
+ REGISTER_COMPUTE_FN_HELPER(Round, functor::round<T>);
+ REGISTER_COMPUTE_FN_HELPER(Rsqrt, functor::rsqrt<T>);
+ REGISTER_COMPUTE_FN_HELPER(Sigmoid, functor::sigmoid<T>);
+ REGISTER_COMPUTE_FN_HELPER(Sin, functor::sin<T>);
+ REGISTER_COMPUTE_FN_HELPER(Sinh, functor::sinh<T>);
+ REGISTER_COMPUTE_FN_HELPER(Sqrt, functor::sqrt<T>);
+ REGISTER_COMPUTE_FN_HELPER(Square, functor::square<T>);
+ REGISTER_COMPUTE_FN_HELPER(Tan, functor::tan<T>);
+ REGISTER_COMPUTE_FN_HELPER(Tanh, functor::tanh<T>);
+ // clang-format on
+};
+
+template <>
+struct UnaryOpsCompositionSupport<Eigen::half>
+ : UnaryOpsCompositionBase<Eigen::half> {
+ using T = Eigen::half;
+
+ UnaryOpsCompositionSupport() {
+ REGISTER_COMPUTE_FN(Abs);
+ REGISTER_COMPUTE_FN(Ceil);
+ REGISTER_COMPUTE_FN(Cos);
+ REGISTER_COMPUTE_FN(Expm1);
+ REGISTER_COMPUTE_FN(Exp);
+ REGISTER_COMPUTE_FN(Floor);
+ REGISTER_COMPUTE_FN(Inv);
+ REGISTER_COMPUTE_FN(Log);
+ REGISTER_COMPUTE_FN(Log1p);
+ REGISTER_COMPUTE_FN(Neg);
+ REGISTER_COMPUTE_FN(Reciprocal);
+ REGISTER_COMPUTE_FN(Round);
+ REGISTER_COMPUTE_FN(Rsqrt);
+ REGISTER_COMPUTE_FN(Sigmoid);
+ REGISTER_COMPUTE_FN(Sin);
+ REGISTER_COMPUTE_FN(Sqrt);
+ REGISTER_COMPUTE_FN(Square);
+ REGISTER_COMPUTE_FN(Tanh);
+ // Additional compute functions not defined via UnaryOp functors.
+ REGISTER_COMPUTE_FN(Elu);
+ REGISTER_COMPUTE_FN(Relu);
+ REGISTER_COMPUTE_FN(Relu6);
+ REGISTER_COMPUTE_FN(Selu);
+ }
+
+ REGISTER_RELU_HELPER();
+
+ // clang-format off
+ REGISTER_COMPUTE_FN_HELPER(Abs, functor::abs<T>);
+ REGISTER_COMPUTE_FN_HELPER(Ceil, functor::ceil<T>);
+ REGISTER_COMPUTE_FN_HELPER(Cos, functor::cos<T>);
+ REGISTER_COMPUTE_FN_HELPER(Expm1, functor::expm1<T>);
+ REGISTER_COMPUTE_FN_HELPER(Exp, functor::exp<T>);
+ REGISTER_COMPUTE_FN_HELPER(Floor, functor::floor<T>);
+ REGISTER_COMPUTE_FN_HELPER(Inv, functor::inverse<T>);
+ REGISTER_COMPUTE_FN_HELPER(Log, functor::log<T>);
+ REGISTER_COMPUTE_FN_HELPER(Log1p, functor::log1p<T>);
+ REGISTER_COMPUTE_FN_HELPER(Neg, functor::neg<T>);
+ REGISTER_COMPUTE_FN_HELPER(Reciprocal, functor::inverse<T>);
+ REGISTER_COMPUTE_FN_HELPER(Round, functor::round<T>);
+ REGISTER_COMPUTE_FN_HELPER(Rsqrt, functor::rsqrt<T>);
+ REGISTER_COMPUTE_FN_HELPER(Sigmoid, functor::sigmoid<T>);
+ REGISTER_COMPUTE_FN_HELPER(Sin, functor::sin<T>);
+ REGISTER_COMPUTE_FN_HELPER(Sqrt, functor::sqrt<T>);
+ REGISTER_COMPUTE_FN_HELPER(Square, functor::square<T>);
+ REGISTER_COMPUTE_FN_HELPER(Tanh, functor::tanh<T>);
+ // clang-format on
+};
+
+template <>
+struct UnaryOpsCompositionSupport<double> : UnaryOpsCompositionBase<double> {
+ using T = double;
+
+ UnaryOpsCompositionSupport() {
+ REGISTER_COMPUTE_FN(Abs);
+ REGISTER_COMPUTE_FN(Acos);
+ REGISTER_COMPUTE_FN(Acosh);
+ REGISTER_COMPUTE_FN(Asin);
+ REGISTER_COMPUTE_FN(Asinh);
+ REGISTER_COMPUTE_FN(Atan);
+ REGISTER_COMPUTE_FN(Atanh);
+ REGISTER_COMPUTE_FN(Ceil);
+ REGISTER_COMPUTE_FN(Cos);
+ REGISTER_COMPUTE_FN(Cosh);
+ REGISTER_COMPUTE_FN(Expm1);
+ REGISTER_COMPUTE_FN(Exp);
+ REGISTER_COMPUTE_FN(Floor);
+ REGISTER_COMPUTE_FN(Inv);
+ REGISTER_COMPUTE_FN(Log);
+ REGISTER_COMPUTE_FN(Log1p);
+ REGISTER_COMPUTE_FN(Neg);
+ REGISTER_COMPUTE_FN(Reciprocal);
+ REGISTER_COMPUTE_FN(Rint);
+ REGISTER_COMPUTE_FN(Round);
+ REGISTER_COMPUTE_FN(Rsqrt);
+ REGISTER_COMPUTE_FN(Sigmoid);
+ REGISTER_COMPUTE_FN(Sin);
+ REGISTER_COMPUTE_FN(Sinh);
+ REGISTER_COMPUTE_FN(Sqrt);
+ REGISTER_COMPUTE_FN(Square);
+ REGISTER_COMPUTE_FN(Tan);
+ REGISTER_COMPUTE_FN(Tanh);
+ // Additional compute functions not defined via UnaryOp functors.
+ REGISTER_COMPUTE_FN(Elu);
+ REGISTER_COMPUTE_FN(Relu);
+ REGISTER_COMPUTE_FN(Relu6);
+ REGISTER_COMPUTE_FN(Selu);
+ }
+
+ REGISTER_RELU_HELPER();
+
+ // clang-format off
+ REGISTER_COMPUTE_FN_HELPER(Abs, functor::abs<T>);
+ REGISTER_COMPUTE_FN_HELPER(Acos, functor::acos<T>);
+ REGISTER_COMPUTE_FN_HELPER(Acosh, functor::acosh<T>);
+ REGISTER_COMPUTE_FN_HELPER(Asin, functor::asin<T>);
+ REGISTER_COMPUTE_FN_HELPER(Asinh, functor::asinh<T>);
+ REGISTER_COMPUTE_FN_HELPER(Atan, functor::atan<T>);
+ REGISTER_COMPUTE_FN_HELPER(Atanh, functor::atanh<T>);
+ REGISTER_COMPUTE_FN_HELPER(Ceil, functor::ceil<T>);
+ REGISTER_COMPUTE_FN_HELPER(Cos, functor::cos<T>);
+ REGISTER_COMPUTE_FN_HELPER(Cosh, functor::cosh<T>);
+ REGISTER_COMPUTE_FN_HELPER(Expm1, functor::expm1<T>);
+ REGISTER_COMPUTE_FN_HELPER(Exp, functor::exp<T>);
+ REGISTER_COMPUTE_FN_HELPER(Floor, functor::floor<T>);
+ REGISTER_COMPUTE_FN_HELPER(Inv, functor::inverse<T>);
+ REGISTER_COMPUTE_FN_HELPER(Log, functor::log<T>);
+ REGISTER_COMPUTE_FN_HELPER(Log1p, functor::log1p<T>);
+ REGISTER_COMPUTE_FN_HELPER(Neg, functor::neg<T>);
+ REGISTER_COMPUTE_FN_HELPER(Reciprocal, functor::inverse<T>);
+ REGISTER_COMPUTE_FN_HELPER(Rint, functor::rint<T>);
+ REGISTER_COMPUTE_FN_HELPER(Round, functor::round<T>);
+ REGISTER_COMPUTE_FN_HELPER(Rsqrt, functor::rsqrt<T>);
+ REGISTER_COMPUTE_FN_HELPER(Sigmoid, functor::sigmoid<T>);
+ REGISTER_COMPUTE_FN_HELPER(Sin, functor::sin<T>);
+ REGISTER_COMPUTE_FN_HELPER(Sinh, functor::sinh<T>);
+ REGISTER_COMPUTE_FN_HELPER(Sqrt, functor::sqrt<T>);
+ REGISTER_COMPUTE_FN_HELPER(Square, functor::square<T>);
+ REGISTER_COMPUTE_FN_HELPER(Tan, functor::tan<T>);
+ REGISTER_COMPUTE_FN_HELPER(Tanh, functor::tanh<T>);
+ // clang-format on
+};
+
+// Register the CPU kernels.
+#define REGISTER_CPU(T) \
+ REGISTER_KERNEL_BUILDER( \
+ Name("_UnaryOpsComposition").Device(DEVICE_CPU).TypeConstraint<T>("T"), \
+ UnaryOpsComposition<T>);
+
+REGISTER_CPU(float);
+REGISTER_CPU(Eigen::half);
+REGISTER_CPU(double);
+
+#undef REGISTER_CPU
+
+} // namespace tensorflow
diff --git a/tensorflow/core/kernels/unary_ops_composition_test.cc b/tensorflow/core/kernels/unary_ops_composition_test.cc
new file mode 100644
index 0000000000..4be3555609
--- /dev/null
+++ b/tensorflow/core/kernels/unary_ops_composition_test.cc
@@ -0,0 +1,179 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#include <cmath>
+
+#include "tensorflow/cc/ops/standard_ops.h"
+#include "tensorflow/core/common_runtime/kernel_benchmark_testlib.h"
+#include "tensorflow/core/framework/fake_input.h"
+#include "tensorflow/core/framework/node_def_builder.h"
+#include "tensorflow/core/framework/tensor.h"
+#include "tensorflow/core/graph/node_builder.h"
+#include "tensorflow/core/kernels/ops_testutil.h"
+#include "tensorflow/core/kernels/ops_util.h"
+#include "tensorflow/core/platform/test.h"
+#include "tensorflow/core/platform/test_benchmark.h"
+
+namespace tensorflow {
+namespace {
+
+class UnaryOpsCompositionTest : public OpsTestBase {
+ protected:
+ template <typename T>
+ void RunComposedOp(const std::vector<string> op_names, T input, T expected) {
+ TF_ASSERT_OK(NodeDefBuilder("unary_op_composition", "_UnaryOpsComposition")
+ .Input(FakeInput(DataTypeToEnum<T>::v()))
+ .Attr("T", DataTypeToEnum<T>::v())
+ .Attr("op_names", op_names)
+ .Finalize(node_def()));
+ TF_ASSERT_OK(InitOp());
+
+ TensorShape shape({});
+ AddInputFromArray<T>(shape, {input});
+
+ TF_ASSERT_OK(RunOpKernel());
+
+ Tensor expected_tensor(allocator(), DataTypeToEnum<T>::value, shape);
+ test::FillValues<T>(&expected_tensor, {expected});
+ test::ExpectClose(expected_tensor, *GetOutput(0));
+ }
+};
+
+TEST_F(UnaryOpsCompositionTest, Compose_Sqrt_Sqrt_F) {
+ RunComposedOp<float>({"Sqrt", "Sqrt"}, 81.0, 3.0);
+}
+
+TEST_F(UnaryOpsCompositionTest, Compose_Sqrt_Sqrt_D) {
+ RunComposedOp<double>({"Sqrt", "Sqrt"}, 81.0, 3.0);
+}
+
+TEST_F(UnaryOpsCompositionTest, Compose_Sqrt_Sin_F) {
+ RunComposedOp<float>({"Sqrt", "Sin"}, 81.0, std::sin(9.0f));
+}
+
+TEST_F(UnaryOpsCompositionTest, Compose_Cos_Acos_F) {
+ RunComposedOp<float>({"Cos", "Acos"}, 0.5, std::acos(std::cos(0.5f)));
+}
+
+TEST_F(UnaryOpsCompositionTest, Compose_Tanh_Relu_F) {
+ RunComposedOp<float>({"Tanh", "Relu"}, 0.5, std::max(0.0f, std::tanh(0.5f)));
+}
+
+TEST_F(UnaryOpsCompositionTest, Compose_Tanh_Relu_D) {
+ RunComposedOp<double>({"Tanh", "Relu"}, 0.5, std::max(0.0, std::tanh(0.5)));
+}
+
+TEST_F(UnaryOpsCompositionTest, Compose_Tanh_Relu6_F) {
+ RunComposedOp<float>({"Relu6"}, 11.0f, 6.0f);
+}
+
+// Performance benchmarks below.
+
+string Function(int i) {
+ std::vector<string> ops = {"Tanh", "Relu", "Sigmoid", "Sqrt", "Log", "Exp"};
+ return ops[i % ops.size()];
+}
+
+// Unary ops chained together as a separate graph nodes.
+static Graph* UnaryOpsChain(int tensor_size, int repeat_graph,
+ int num_functions) {
+ Graph* g = new Graph(OpRegistry::Global());
+
+ Tensor t(DT_FLOAT, TensorShape({tensor_size}));
+ t.flat<float>() = t.flat<float>().setRandom();
+
+ for (int i = 0; i < repeat_graph; ++i) {
+ Node* node = test::graph::Constant(g, t);
+ for (int j = 0; j < num_functions; ++j) {
+ TF_CHECK_OK(NodeBuilder(g->NewName("n"), Function(j))
+ .Input(node)
+ .Attr("T", DT_FLOAT)
+ .Finalize(g, &node));
+ }
+ }
+
+ return g;
+}
+
+#define BM_UnaryOpsChain(N, R, F, type) \
+ static void BM_UnaryOpsChain##_##type##_##N##_##R##_##F(int iters) { \
+ testing::ItemsProcessed(static_cast<int64>(iters) * N * R * F); \
+ test::Benchmark(#type, UnaryOpsChain(N, R, F)).Run(iters); \
+ } \
+ BENCHMARK(BM_UnaryOpsChain##_##type##_##N##_##R##_##F);
+
+// Unary ops fused together.
+static Graph* UnaryOpsCompo(int tensor_size, int repeat_graph,
+ int num_functions) {
+ Graph* g = new Graph(OpRegistry::Global());
+
+ Tensor t(DT_FLOAT, TensorShape({tensor_size}));
+ t.flat<float>() = t.flat<float>().setRandom();
+
+ std::vector<string> functions;
+ for (int j = 0; j < num_functions; ++j) {
+ functions.push_back(Function(j));
+ }
+
+ for (int i = 0; i < repeat_graph; ++i) {
+ Node* node = test::graph::Constant(g, t);
+ TF_CHECK_OK(NodeBuilder(g->NewName("n"), "_UnaryOpsComposition")
+ .Input(node)
+ .Attr("T", DT_FLOAT)
+ .Attr("op_names", functions)
+ .Finalize(g, &node));
+ }
+
+ return g;
+}
+
+#define BM_UnaryOpsCompo(N, R, F, type) \
+ static void BM_UnaryOpsCompo##_##type##_##N##_##R##_##F(int iters) { \
+ testing::ItemsProcessed(static_cast<int64>(iters) * N * R * F); \
+ test::Benchmark(#type, UnaryOpsCompo(N, R, F)).Run(iters); \
+ } \
+ BENCHMARK(BM_UnaryOpsCompo##_##type##_##N##_##R##_##F);
+
+// BenchmarkName(tensor_size, repeat_graph, num_ops, type)
+
+BM_UnaryOpsChain(1000, 25, 2, cpu);
+BM_UnaryOpsCompo(1000, 25, 2, cpu);
+
+BM_UnaryOpsChain(1000, 25, 5, cpu);
+BM_UnaryOpsCompo(1000, 25, 5, cpu);
+
+BM_UnaryOpsChain(1000, 25, 10, cpu);
+BM_UnaryOpsCompo(1000, 25, 10, cpu);
+
+BM_UnaryOpsChain(100000, 25, 2, cpu);
+BM_UnaryOpsCompo(100000, 25, 2, cpu);
+
+BM_UnaryOpsChain(100000, 25, 5, cpu);
+BM_UnaryOpsCompo(100000, 25, 5, cpu);
+
+BM_UnaryOpsChain(100000, 25, 10, cpu);
+BM_UnaryOpsCompo(100000, 25, 10, cpu);
+
+BM_UnaryOpsChain(1000000, 25, 2, cpu);
+BM_UnaryOpsCompo(1000000, 25, 2, cpu);
+
+BM_UnaryOpsChain(1000000, 25, 5, cpu);
+BM_UnaryOpsCompo(1000000, 25, 5, cpu);
+
+BM_UnaryOpsChain(1000000, 25, 10, cpu);
+BM_UnaryOpsCompo(1000000, 25, 10, cpu);
+
+} // namespace
+} // end namespace tensorflow
diff --git a/tensorflow/core/kernels/variable_ops.cc b/tensorflow/core/kernels/variable_ops.cc
index 7fd5809ca4..eadea18f76 100644
--- a/tensorflow/core/kernels/variable_ops.cc
+++ b/tensorflow/core/kernels/variable_ops.cc
@@ -73,9 +73,6 @@ void VariableOp::Compute(OpKernelContext* ctx) {
// here is valid because it owns a ref on var.
ctx->set_output_ref(0, var->mu(), var->tensor());
if (ctx->track_allocations() && var->tensor()->IsInitialized()) {
- AllocatorAttributes attr;
- attr.set_gpu_compatible(true);
- attr.set_nic_compatible(true);
ctx->record_persistent_memory_allocation(var->tensor()->AllocatedBytes());
}
var->Unref();
diff --git a/tensorflow/core/lib/bfloat16/bfloat16.h b/tensorflow/core/lib/bfloat16/bfloat16.h
index 2c0576ff10..1c130ba300 100644
--- a/tensorflow/core/lib/bfloat16/bfloat16.h
+++ b/tensorflow/core/lib/bfloat16/bfloat16.h
@@ -354,6 +354,18 @@ struct bfloat16 {
return x;
}
+ static bfloat16 highest() {
+ bfloat16 x;
+ x.value = 0x7F7F; // 0x1.FEp127
+ return x;
+ }
+
+ static bfloat16 lowest() {
+ bfloat16 x;
+ x.value = 0xFF7F; // -0x1.FEp127
+ return x;
+ }
+
uint16_t value;
// A value that represents "not a number".
diff --git a/tensorflow/core/lib/db/sqlite_test.cc b/tensorflow/core/lib/db/sqlite_test.cc
index c099160b0c..1590055960 100644
--- a/tensorflow/core/lib/db/sqlite_test.cc
+++ b/tensorflow/core/lib/db/sqlite_test.cc
@@ -73,6 +73,7 @@ TEST_F(SqliteTest, InsertAndSelectDouble) {
EXPECT_EQ(1, stmt.ColumnInt(1));
}
+#ifdef DSQLITE_ENABLE_JSON1
TEST_F(SqliteTest, Json1Extension) {
string s1 = "{\"key\": 42}";
string s2 = "{\"key\": \"value\"}";
@@ -85,6 +86,7 @@ TEST_F(SqliteTest, Json1Extension) {
EXPECT_EQ(42, stmt.ColumnInt(0));
EXPECT_EQ("value", stmt.ColumnString(1));
}
+#endif //DSQLITE_ENABLE_JSON1
TEST_F(SqliteTest, NulCharsInString) {
string s; // XXX: Want to write {2, '\0'} but not sure why not.
diff --git a/tensorflow/core/lib/gtl/manual_constructor_test.cc b/tensorflow/core/lib/gtl/manual_constructor_test.cc
index 4e832ce8d8..35cbc78b66 100644
--- a/tensorflow/core/lib/gtl/manual_constructor_test.cc
+++ b/tensorflow/core/lib/gtl/manual_constructor_test.cc
@@ -95,9 +95,6 @@ TEST(ManualConstructorTest, Alignment) {
#ifdef ARCH_K8
EXPECT_EQ(reinterpret_cast<intptr_t>(test2.b.get()) % 16, 0);
#endif
-#ifdef ARCH_PIII
- EXPECT_EQ(reinterpret_cast<intptr_t>(test2.b.get()) % 4, 0);
-#endif
}
TEST(ManualConstructorTest, DefaultInitialize) {
diff --git a/tensorflow/core/ops/boosted_trees_ops.cc b/tensorflow/core/ops/boosted_trees_ops.cc
index 88d6eaf819..01452b3e85 100644
--- a/tensorflow/core/ops/boosted_trees_ops.cc
+++ b/tensorflow/core/ops/boosted_trees_ops.cc
@@ -203,6 +203,30 @@ REGISTER_OP("BoostedTreesPredict")
return Status::OK();
});
+REGISTER_OP("BoostedTreesExampleDebugOutputs")
+ .Input("tree_ensemble_handle: resource")
+ .Input("bucketized_features: num_bucketized_features * int32")
+ .Attr("num_bucketized_features: int >= 1") // Inferred.
+ .Attr("logits_dimension: int")
+ .Output("examples_debug_outputs_serialized: string")
+ .SetShapeFn([](shape_inference::InferenceContext* c) {
+ shape_inference::ShapeHandle feature_shape;
+ int num_bucketized_features;
+ TF_RETURN_IF_ERROR(
+ c->GetAttr("num_bucketized_features", &num_bucketized_features));
+ shape_inference::ShapeHandle unused_input;
+ for (int i = 0; i < num_bucketized_features; ++i) {
+ TF_RETURN_IF_ERROR(c->WithRank(c->input(i + 1), 1, &feature_shape));
+ // Check that the shapes of all bucketized features are the same.
+ TF_RETURN_IF_ERROR(c->Merge(c->input(1), feature_shape, &unused_input));
+ }
+
+ // Multi-class will be supported by modifying the proto.
+ auto batch_size = c->MakeShape({c->Dim(feature_shape, 0)});
+ c->set_output(0, batch_size);
+ return Status::OK();
+ });
+
REGISTER_OP("BoostedTreesSerializeEnsemble")
.Input("tree_ensemble_handle: resource")
.Output("stamp_token: int64")
@@ -307,4 +331,27 @@ REGISTER_OP("BoostedTreesUpdateEnsemble")
return Status::OK();
});
+REGISTER_OP("BoostedTreesCenterBias")
+ .Input("tree_ensemble_handle: resource")
+ .Input("mean_gradients: float")
+ .Input("mean_hessians: float")
+ // Regularization-related.
+ .Input("l1: float")
+ .Input("l2: float")
+ .Output("continue_centering: bool")
+ .SetShapeFn([](shape_inference::InferenceContext* c) {
+ shape_inference::ShapeHandle gradients_shape;
+ shape_inference::ShapeHandle hessians_shape;
+ shape_inference::ShapeHandle unused_shape;
+ TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 2, &gradients_shape));
+ TF_RETURN_IF_ERROR(c->WithRank(c->input(2), 2, &hessians_shape));
+ TF_RETURN_IF_ERROR(
+ c->Merge(gradients_shape, hessians_shape, &unused_shape));
+ TF_RETURN_IF_ERROR(c->WithRank(c->input(3), 0, &unused_shape));
+ TF_RETURN_IF_ERROR(c->WithRank(c->input(4), 0, &unused_shape));
+
+ c->set_output(0, c->Scalar());
+ return Status::OK();
+ });
+
} // namespace tensorflow
diff --git a/tensorflow/core/ops/compat/ops_history.v1.pbtxt b/tensorflow/core/ops/compat/ops_history.v1.pbtxt
index ee4faa5033..d94fa2cad7 100644
--- a/tensorflow/core/ops/compat/ops_history.v1.pbtxt
+++ b/tensorflow/core/ops/compat/ops_history.v1.pbtxt
@@ -7681,66 +7681,6 @@ op {
}
}
op {
- name: "AvgPool"
- input_arg {
- name: "value"
- type_attr: "T"
- }
- output_arg {
- name: "output"
- type_attr: "T"
- }
- attr {
- name: "ksize"
- type: "list(int)"
- has_minimum: true
- minimum: 4
- }
- attr {
- name: "strides"
- type: "list(int)"
- has_minimum: true
- minimum: 4
- }
- attr {
- name: "padding"
- type: "string"
- allowed_values {
- list {
- s: "SAME"
- s: "VALID"
- }
- }
- }
- attr {
- name: "data_format"
- type: "string"
- default_value {
- s: "NHWC"
- }
- allowed_values {
- list {
- s: "NHWC"
- s: "NCHW"
- s: "HWNC"
- s: "HWCN"
- }
- }
- }
- attr {
- name: "T"
- type: "type"
- allowed_values {
- list {
- type: DT_HALF
- type: DT_BFLOAT16
- type: DT_FLOAT
- type: DT_DOUBLE
- }
- }
- }
-}
-op {
name: "AvgPool3D"
input_arg {
name: "input"
@@ -8430,70 +8370,6 @@ op {
}
}
op {
- name: "AvgPoolGrad"
- input_arg {
- name: "orig_input_shape"
- type: DT_INT32
- }
- input_arg {
- name: "grad"
- type_attr: "T"
- }
- output_arg {
- name: "output"
- type_attr: "T"
- }
- attr {
- name: "ksize"
- type: "list(int)"
- has_minimum: true
- minimum: 4
- }
- attr {
- name: "strides"
- type: "list(int)"
- has_minimum: true
- minimum: 4
- }
- attr {
- name: "padding"
- type: "string"
- allowed_values {
- list {
- s: "SAME"
- s: "VALID"
- }
- }
- }
- attr {
- name: "data_format"
- type: "string"
- default_value {
- s: "NHWC"
- }
- allowed_values {
- list {
- s: "NHWC"
- s: "NCHW"
- s: "HWNC"
- s: "HWCN"
- }
- }
- }
- attr {
- name: "T"
- type: "type"
- allowed_values {
- list {
- type: DT_HALF
- type: DT_BFLOAT16
- type: DT_FLOAT
- type: DT_DOUBLE
- }
- }
- }
-}
-op {
name: "Barrier"
output_arg {
name: "handle"
@@ -10555,61 +10431,6 @@ op {
}
}
op {
- name: "BiasAdd"
- input_arg {
- name: "value"
- type_attr: "T"
- }
- input_arg {
- name: "bias"
- type_attr: "T"
- }
- output_arg {
- name: "output"
- type_attr: "T"
- }
- attr {
- name: "T"
- type: "type"
- allowed_values {
- list {
- type: DT_FLOAT
- type: DT_DOUBLE
- type: DT_INT32
- type: DT_UINT8
- type: DT_INT16
- type: DT_INT8
- type: DT_COMPLEX64
- type: DT_INT64
- type: DT_QINT8
- type: DT_QUINT8
- type: DT_QINT32
- type: DT_BFLOAT16
- type: DT_UINT16
- type: DT_COMPLEX128
- type: DT_HALF
- type: DT_UINT32
- type: DT_UINT64
- }
- }
- }
- attr {
- name: "data_format"
- type: "string"
- default_value {
- s: "NHWC"
- }
- allowed_values {
- list {
- s: "NHWC"
- s: "NCHW"
- s: "HWNC"
- s: "HWCN"
- }
- }
- }
-}
-op {
name: "BiasAddGrad"
input_arg {
name: "out_backprop"
@@ -10802,57 +10623,6 @@ op {
}
}
op {
- name: "BiasAddGrad"
- input_arg {
- name: "out_backprop"
- type_attr: "T"
- }
- output_arg {
- name: "output"
- type_attr: "T"
- }
- attr {
- name: "T"
- type: "type"
- allowed_values {
- list {
- type: DT_FLOAT
- type: DT_DOUBLE
- type: DT_INT32
- type: DT_UINT8
- type: DT_INT16
- type: DT_INT8
- type: DT_COMPLEX64
- type: DT_INT64
- type: DT_QINT8
- type: DT_QUINT8
- type: DT_QINT32
- type: DT_BFLOAT16
- type: DT_UINT16
- type: DT_COMPLEX128
- type: DT_HALF
- type: DT_UINT32
- type: DT_UINT64
- }
- }
- }
- attr {
- name: "data_format"
- type: "string"
- default_value {
- s: "NHWC"
- }
- allowed_values {
- list {
- s: "NHWC"
- s: "NCHW"
- s: "HWNC"
- s: "HWCN"
- }
- }
- }
-}
-op {
name: "BiasAddV1"
input_arg {
name: "value"
@@ -11527,6 +11297,34 @@ op {
}
}
op {
+ name: "BoostedTreesCenterBias"
+ input_arg {
+ name: "tree_ensemble_handle"
+ type: DT_RESOURCE
+ }
+ input_arg {
+ name: "mean_gradients"
+ type: DT_FLOAT
+ }
+ input_arg {
+ name: "mean_hessians"
+ type: DT_FLOAT
+ }
+ input_arg {
+ name: "l1"
+ type: DT_FLOAT
+ }
+ input_arg {
+ name: "l2"
+ type: DT_FLOAT
+ }
+ output_arg {
+ name: "continue_centering"
+ type: DT_BOOL
+ }
+ is_stateful: true
+}
+op {
name: "BoostedTreesCreateEnsemble"
input_arg {
name: "tree_ensemble_handle"
@@ -11581,6 +11379,33 @@ op {
is_stateful: true
}
op {
+ name: "BoostedTreesExampleDebugOutputs"
+ input_arg {
+ name: "tree_ensemble_handle"
+ type: DT_RESOURCE
+ }
+ input_arg {
+ name: "bucketized_features"
+ type: DT_INT32
+ number_attr: "num_bucketized_features"
+ }
+ output_arg {
+ name: "examples_debug_outputs_serialized"
+ type: DT_STRING
+ }
+ attr {
+ name: "num_bucketized_features"
+ type: "int"
+ has_minimum: true
+ minimum: 1
+ }
+ attr {
+ name: "logits_dimension"
+ type: "int"
+ }
+ is_stateful: true
+}
+op {
name: "BoostedTreesGetEnsembleStates"
input_arg {
name: "tree_ensemble_handle"
@@ -13402,144 +13227,6 @@ op {
}
}
op {
- name: "Conv2D"
- input_arg {
- name: "input"
- type_attr: "T"
- }
- input_arg {
- name: "filter"
- type_attr: "T"
- }
- output_arg {
- name: "output"
- type_attr: "T"
- }
- attr {
- name: "T"
- type: "type"
- allowed_values {
- list {
- type: DT_HALF
- type: DT_BFLOAT16
- type: DT_FLOAT
- type: DT_DOUBLE
- }
- }
- }
- attr {
- name: "strides"
- type: "list(int)"
- }
- attr {
- name: "use_cudnn_on_gpu"
- type: "bool"
- default_value {
- b: true
- }
- }
- attr {
- name: "padding"
- type: "string"
- allowed_values {
- list {
- s: "SAME"
- s: "VALID"
- }
- }
- }
- attr {
- name: "data_format"
- type: "string"
- default_value {
- s: "NHWC"
- }
- allowed_values {
- list {
- s: "NHWC"
- s: "NCHW"
- s: "HWNC"
- s: "HWCN"
- }
- }
- }
- attr {
- name: "dilations"
- type: "list(int)"
- default_value {
- list {
- i: 1
- i: 1
- i: 1
- i: 1
- }
- }
- }
-}
-op {
- name: "Conv2DBackpropFilter"
- input_arg {
- name: "input"
- type_attr: "T"
- }
- input_arg {
- name: "filter_sizes"
- type: DT_INT32
- }
- input_arg {
- name: "out_backprop"
- type_attr: "T"
- }
- output_arg {
- name: "output"
- type_attr: "T"
- }
- attr {
- name: "T"
- type: "type"
- allowed_values {
- list {
- type: DT_HALF
- type: DT_FLOAT
- }
- }
- }
- attr {
- name: "strides"
- type: "list(int)"
- }
- attr {
- name: "use_cudnn_on_gpu"
- type: "bool"
- default_value {
- b: true
- }
- }
- attr {
- name: "padding"
- type: "string"
- allowed_values {
- list {
- s: "SAME"
- s: "VALID"
- }
- }
- }
- attr {
- name: "data_format"
- type: "string"
- default_value {
- s: "NHWC"
- }
- allowed_values {
- list {
- s: "NHWC"
- s: "NCHW"
- }
- }
- }
-}
-op {
name: "Conv2DBackpropFilter"
input_arg {
name: "input"
@@ -13563,7 +13250,6 @@ op {
allowed_values {
list {
type: DT_HALF
- type: DT_BFLOAT16
type: DT_FLOAT
}
}
@@ -13602,18 +13288,6 @@ op {
}
}
}
- attr {
- name: "dilations"
- type: "list(int)"
- default_value {
- list {
- i: 1
- i: 1
- i: 1
- i: 1
- }
- }
- }
}
op {
name: "Conv2DBackpropFilter"
@@ -13641,7 +13315,6 @@ op {
type: DT_HALF
type: DT_BFLOAT16
type: DT_FLOAT
- type: DT_DOUBLE
}
}
}
@@ -13753,8 +13426,6 @@ op {
list {
s: "NHWC"
s: "NCHW"
- s: "HWNC"
- s: "HWCN"
}
}
}
@@ -13988,85 +13659,6 @@ op {
}
}
op {
- name: "Conv2DBackpropInput"
- input_arg {
- name: "input_sizes"
- type: DT_INT32
- }
- input_arg {
- name: "filter"
- type_attr: "T"
- }
- input_arg {
- name: "out_backprop"
- type_attr: "T"
- }
- output_arg {
- name: "output"
- type_attr: "T"
- }
- attr {
- name: "T"
- type: "type"
- allowed_values {
- list {
- type: DT_HALF
- type: DT_BFLOAT16
- type: DT_FLOAT
- type: DT_DOUBLE
- }
- }
- }
- attr {
- name: "strides"
- type: "list(int)"
- }
- attr {
- name: "use_cudnn_on_gpu"
- type: "bool"
- default_value {
- b: true
- }
- }
- attr {
- name: "padding"
- type: "string"
- allowed_values {
- list {
- s: "SAME"
- s: "VALID"
- }
- }
- }
- attr {
- name: "data_format"
- type: "string"
- default_value {
- s: "NHWC"
- }
- allowed_values {
- list {
- s: "NHWC"
- s: "NCHW"
- s: "HWNC"
- s: "HWCN"
- }
- }
- }
- attr {
- name: "dilations"
- type: "list(int)"
- default_value {
- list {
- i: 1
- i: 1
- i: 1
- i: 1
- }
- }
- }
-}
-op {
name: "Conv3D"
input_arg {
name: "input"
@@ -18797,117 +18389,6 @@ op {
}
}
op {
- name: "DepthwiseConv2dNative"
- input_arg {
- name: "input"
- type_attr: "T"
- }
- input_arg {
- name: "filter"
- type_attr: "T"
- }
- output_arg {
- name: "output"
- type_attr: "T"
- }
- attr {
- name: "T"
- type: "type"
- allowed_values {
- list {
- type: DT_HALF
- type: DT_BFLOAT16
- type: DT_FLOAT
- type: DT_DOUBLE
- }
- }
- }
- attr {
- name: "strides"
- type: "list(int)"
- }
- attr {
- name: "padding"
- type: "string"
- allowed_values {
- list {
- s: "SAME"
- s: "VALID"
- }
- }
- }
- attr {
- name: "data_format"
- type: "string"
- default_value {
- s: "NHWC"
- }
- allowed_values {
- list {
- s: "NHWC"
- s: "NCHW"
- s: "HWNC"
- s: "HWCN"
- }
- }
- }
- attr {
- name: "dilations"
- type: "list(int)"
- default_value {
- list {
- i: 1
- i: 1
- i: 1
- i: 1
- }
- }
- }
-}
-op {
- name: "DepthwiseConv2dNativeBackpropFilter"
- input_arg {
- name: "input"
- type_attr: "T"
- }
- input_arg {
- name: "filter_sizes"
- type: DT_INT32
- }
- input_arg {
- name: "out_backprop"
- type_attr: "T"
- }
- output_arg {
- name: "output"
- type_attr: "T"
- }
- attr {
- name: "T"
- type: "type"
- allowed_values {
- list {
- type: DT_FLOAT
- type: DT_DOUBLE
- }
- }
- }
- attr {
- name: "strides"
- type: "list(int)"
- }
- attr {
- name: "padding"
- type: "string"
- allowed_values {
- list {
- s: "SAME"
- s: "VALID"
- }
- }
- }
-}
-op {
name: "DepthwiseConv2dNativeBackpropFilter"
input_arg {
name: "input"
@@ -18949,19 +18430,6 @@ op {
}
}
}
- attr {
- name: "data_format"
- type: "string"
- default_value {
- s: "NHWC"
- }
- allowed_values {
- list {
- s: "NHWC"
- s: "NCHW"
- }
- }
- }
}
op {
name: "DepthwiseConv2dNativeBackpropFilter"
@@ -18986,7 +18454,6 @@ op {
type: "type"
allowed_values {
list {
- type: DT_BFLOAT16
type: DT_FLOAT
type: DT_DOUBLE
}
@@ -19019,18 +18486,6 @@ op {
}
}
}
- attr {
- name: "dilations"
- type: "list(int)"
- default_value {
- list {
- i: 1
- i: 1
- i: 1
- i: 1
- }
- }
- }
}
op {
name: "DepthwiseConv2dNativeBackpropFilter"
@@ -19055,7 +18510,6 @@ op {
type: "type"
allowed_values {
list {
- type: DT_HALF
type: DT_BFLOAT16
type: DT_FLOAT
type: DT_DOUBLE
@@ -19156,8 +18610,6 @@ op {
list {
s: "NHWC"
s: "NCHW"
- s: "HWNC"
- s: "HWCN"
}
}
}
@@ -19413,78 +18865,6 @@ op {
}
}
op {
- name: "DepthwiseConv2dNativeBackpropInput"
- input_arg {
- name: "input_sizes"
- type: DT_INT32
- }
- input_arg {
- name: "filter"
- type_attr: "T"
- }
- input_arg {
- name: "out_backprop"
- type_attr: "T"
- }
- output_arg {
- name: "output"
- type_attr: "T"
- }
- attr {
- name: "T"
- type: "type"
- allowed_values {
- list {
- type: DT_HALF
- type: DT_BFLOAT16
- type: DT_FLOAT
- type: DT_DOUBLE
- }
- }
- }
- attr {
- name: "strides"
- type: "list(int)"
- }
- attr {
- name: "padding"
- type: "string"
- allowed_values {
- list {
- s: "SAME"
- s: "VALID"
- }
- }
- }
- attr {
- name: "data_format"
- type: "string"
- default_value {
- s: "NHWC"
- }
- allowed_values {
- list {
- s: "NHWC"
- s: "NCHW"
- s: "HWNC"
- s: "HWCN"
- }
- }
- }
- attr {
- name: "dilations"
- type: "list(int)"
- default_value {
- list {
- i: 1
- i: 1
- i: 1
- i: 1
- }
- }
- }
-}
-op {
name: "Dequantize"
input_arg {
name: "input"
@@ -24723,6 +24103,60 @@ op {
}
}
op {
+ name: "FusedPadConv2D"
+ input_arg {
+ name: "input"
+ type_attr: "T"
+ }
+ input_arg {
+ name: "paddings"
+ type: DT_INT32
+ }
+ input_arg {
+ name: "filter"
+ type_attr: "T"
+ }
+ output_arg {
+ name: "output"
+ type_attr: "T"
+ }
+ attr {
+ name: "T"
+ type: "type"
+ allowed_values {
+ list {
+ type: DT_HALF
+ type: DT_FLOAT
+ type: DT_DOUBLE
+ }
+ }
+ }
+ attr {
+ name: "mode"
+ type: "string"
+ allowed_values {
+ list {
+ s: "REFLECT"
+ s: "SYMMETRIC"
+ }
+ }
+ }
+ attr {
+ name: "strides"
+ type: "list(int)"
+ }
+ attr {
+ name: "padding"
+ type: "string"
+ allowed_values {
+ list {
+ s: "SAME"
+ s: "VALID"
+ }
+ }
+ }
+}
+op {
name: "FusedResizeAndPadConv2D"
input_arg {
name: "input"
@@ -24786,6 +24220,71 @@ op {
}
}
op {
+ name: "FusedResizeAndPadConv2D"
+ input_arg {
+ name: "input"
+ type_attr: "T"
+ }
+ input_arg {
+ name: "size"
+ type: DT_INT32
+ }
+ input_arg {
+ name: "paddings"
+ type: DT_INT32
+ }
+ input_arg {
+ name: "filter"
+ type_attr: "T"
+ }
+ output_arg {
+ name: "output"
+ type_attr: "T"
+ }
+ attr {
+ name: "T"
+ type: "type"
+ allowed_values {
+ list {
+ type: DT_HALF
+ type: DT_FLOAT
+ type: DT_DOUBLE
+ }
+ }
+ }
+ attr {
+ name: "resize_align_corners"
+ type: "bool"
+ default_value {
+ b: false
+ }
+ }
+ attr {
+ name: "mode"
+ type: "string"
+ allowed_values {
+ list {
+ s: "REFLECT"
+ s: "SYMMETRIC"
+ }
+ }
+ }
+ attr {
+ name: "strides"
+ type: "list(int)"
+ }
+ attr {
+ name: "padding"
+ type: "string"
+ allowed_values {
+ list {
+ s: "SAME"
+ s: "VALID"
+ }
+ }
+ }
+}
+op {
name: "Gather"
input_arg {
name: "params"
@@ -26102,29 +25601,6 @@ op {
}
}
op {
- name: "IdentityDataset"
- input_arg {
- name: "input_dataset"
- type: DT_VARIANT
- }
- output_arg {
- name: "handle"
- type: DT_VARIANT
- }
- attr {
- name: "output_types"
- type: "list(type)"
- has_minimum: true
- minimum: 1
- }
- attr {
- name: "output_shapes"
- type: "list(shape)"
- has_minimum: true
- minimum: 1
- }
-}
-op {
name: "IdentityN"
input_arg {
name: "input"
@@ -26290,6 +25766,43 @@ op {
}
}
op {
+ name: "If"
+ input_arg {
+ name: "cond"
+ type_attr: "Tcond"
+ }
+ input_arg {
+ name: "input"
+ type_list_attr: "Tin"
+ }
+ output_arg {
+ name: "output"
+ type_list_attr: "Tout"
+ }
+ attr {
+ name: "Tcond"
+ type: "type"
+ }
+ attr {
+ name: "Tin"
+ type: "list(type)"
+ has_minimum: true
+ }
+ attr {
+ name: "Tout"
+ type: "list(type)"
+ has_minimum: true
+ }
+ attr {
+ name: "then_branch"
+ type: "func"
+ }
+ attr {
+ name: "else_branch"
+ type: "func"
+ }
+}
+op {
name: "Igamma"
input_arg {
name: "a"
@@ -27658,6 +27171,36 @@ op {
is_stateful: true
}
op {
+ name: "IteratorFromStringHandleV2"
+ input_arg {
+ name: "string_handle"
+ type: DT_STRING
+ }
+ output_arg {
+ name: "resource_handle"
+ type: DT_RESOURCE
+ }
+ attr {
+ name: "output_types"
+ type: "list(type)"
+ default_value {
+ list {
+ }
+ }
+ has_minimum: true
+ }
+ attr {
+ name: "output_shapes"
+ type: "list(shape)"
+ default_value {
+ list {
+ }
+ }
+ has_minimum: true
+ }
+ is_stateful: true
+}
+op {
name: "IteratorGetNext"
input_arg {
name: "iterator"
@@ -27718,6 +27261,34 @@ op {
is_stateful: true
}
op {
+ name: "IteratorV2"
+ output_arg {
+ name: "handle"
+ type: DT_RESOURCE
+ }
+ attr {
+ name: "shared_name"
+ type: "string"
+ }
+ attr {
+ name: "container"
+ type: "string"
+ }
+ attr {
+ name: "output_types"
+ type: "list(type)"
+ has_minimum: true
+ minimum: 1
+ }
+ attr {
+ name: "output_shapes"
+ type: "list(shape)"
+ has_minimum: true
+ minimum: 1
+ }
+ is_stateful: true
+}
+op {
name: "L2Loss"
input_arg {
name: "t"
@@ -31988,85 +31559,6 @@ op {
}
}
op {
- name: "MaxPoolGrad"
- input_arg {
- name: "orig_input"
- type_attr: "T"
- }
- input_arg {
- name: "orig_output"
- type_attr: "T"
- }
- input_arg {
- name: "grad"
- type_attr: "T"
- }
- output_arg {
- name: "output"
- type_attr: "T"
- }
- attr {
- name: "ksize"
- type: "list(int)"
- has_minimum: true
- minimum: 4
- }
- attr {
- name: "strides"
- type: "list(int)"
- has_minimum: true
- minimum: 4
- }
- attr {
- name: "padding"
- type: "string"
- allowed_values {
- list {
- s: "SAME"
- s: "VALID"
- }
- }
- }
- attr {
- name: "data_format"
- type: "string"
- default_value {
- s: "NHWC"
- }
- allowed_values {
- list {
- s: "NHWC"
- s: "NCHW"
- s: "HWNC"
- s: "HWCN"
- }
- }
- }
- attr {
- name: "T"
- type: "type"
- default_value {
- type: DT_FLOAT
- }
- allowed_values {
- list {
- type: DT_FLOAT
- type: DT_DOUBLE
- type: DT_INT32
- type: DT_UINT8
- type: DT_INT16
- type: DT_INT8
- type: DT_INT64
- type: DT_BFLOAT16
- type: DT_UINT16
- type: DT_HALF
- type: DT_UINT32
- type: DT_UINT64
- }
- }
- }
-}
-op {
name: "MaxPoolGradGrad"
input_arg {
name: "orig_input"
@@ -32359,82 +31851,6 @@ op {
}
}
op {
- name: "MaxPoolGradGrad"
- input_arg {
- name: "orig_input"
- type_attr: "T"
- }
- input_arg {
- name: "orig_output"
- type_attr: "T"
- }
- input_arg {
- name: "grad"
- type_attr: "T"
- }
- output_arg {
- name: "output"
- type_attr: "T"
- }
- attr {
- name: "ksize"
- type: "list(int)"
- has_minimum: true
- minimum: 4
- }
- attr {
- name: "strides"
- type: "list(int)"
- has_minimum: true
- minimum: 4
- }
- attr {
- name: "padding"
- type: "string"
- allowed_values {
- list {
- s: "SAME"
- s: "VALID"
- }
- }
- }
- attr {
- name: "data_format"
- type: "string"
- default_value {
- s: "NHWC"
- }
- allowed_values {
- list {
- s: "NHWC"
- s: "NCHW"
- s: "HWNC"
- s: "HWCN"
- }
- }
- }
- attr {
- name: "T"
- type: "type"
- allowed_values {
- list {
- type: DT_FLOAT
- type: DT_DOUBLE
- type: DT_INT32
- type: DT_UINT8
- type: DT_INT16
- type: DT_INT8
- type: DT_INT64
- type: DT_BFLOAT16
- type: DT_UINT16
- type: DT_HALF
- type: DT_UINT32
- type: DT_UINT64
- }
- }
- }
-}
-op {
name: "MaxPoolGradGradV2"
input_arg {
name: "orig_input"
@@ -32711,78 +32127,6 @@ op {
}
}
op {
- name: "MaxPoolGradGradV2"
- input_arg {
- name: "orig_input"
- type_attr: "T"
- }
- input_arg {
- name: "orig_output"
- type_attr: "T"
- }
- input_arg {
- name: "grad"
- type_attr: "T"
- }
- input_arg {
- name: "ksize"
- type: DT_INT32
- }
- input_arg {
- name: "strides"
- type: DT_INT32
- }
- output_arg {
- name: "output"
- type_attr: "T"
- }
- attr {
- name: "padding"
- type: "string"
- allowed_values {
- list {
- s: "SAME"
- s: "VALID"
- }
- }
- }
- attr {
- name: "data_format"
- type: "string"
- default_value {
- s: "NHWC"
- }
- allowed_values {
- list {
- s: "NHWC"
- s: "NCHW"
- s: "HWNC"
- s: "HWCN"
- }
- }
- }
- attr {
- name: "T"
- type: "type"
- allowed_values {
- list {
- type: DT_FLOAT
- type: DT_DOUBLE
- type: DT_INT32
- type: DT_UINT8
- type: DT_INT16
- type: DT_INT8
- type: DT_INT64
- type: DT_BFLOAT16
- type: DT_UINT16
- type: DT_HALF
- type: DT_UINT32
- type: DT_UINT64
- }
- }
- }
-}
-op {
name: "MaxPoolGradGradWithArgmax"
input_arg {
name: "input"
@@ -33351,81 +32695,6 @@ op {
}
}
op {
- name: "MaxPoolGradV2"
- input_arg {
- name: "orig_input"
- type_attr: "T"
- }
- input_arg {
- name: "orig_output"
- type_attr: "T"
- }
- input_arg {
- name: "grad"
- type_attr: "T"
- }
- input_arg {
- name: "ksize"
- type: DT_INT32
- }
- input_arg {
- name: "strides"
- type: DT_INT32
- }
- output_arg {
- name: "output"
- type_attr: "T"
- }
- attr {
- name: "padding"
- type: "string"
- allowed_values {
- list {
- s: "SAME"
- s: "VALID"
- }
- }
- }
- attr {
- name: "data_format"
- type: "string"
- default_value {
- s: "NHWC"
- }
- allowed_values {
- list {
- s: "NHWC"
- s: "NCHW"
- s: "HWNC"
- s: "HWCN"
- }
- }
- }
- attr {
- name: "T"
- type: "type"
- default_value {
- type: DT_FLOAT
- }
- allowed_values {
- list {
- type: DT_FLOAT
- type: DT_DOUBLE
- type: DT_INT32
- type: DT_UINT8
- type: DT_INT16
- type: DT_INT8
- type: DT_INT64
- type: DT_BFLOAT16
- type: DT_UINT16
- type: DT_HALF
- type: DT_UINT32
- type: DT_UINT64
- }
- }
- }
-}
-op {
name: "MaxPoolGradWithArgmax"
input_arg {
name: "input"
@@ -36110,6 +35379,33 @@ op {
}
}
op {
+ name: "NonMaxSuppressionWithOverlaps"
+ input_arg {
+ name: "overlaps"
+ type: DT_FLOAT
+ }
+ input_arg {
+ name: "scores"
+ type: DT_FLOAT
+ }
+ input_arg {
+ name: "max_output_size"
+ type: DT_INT32
+ }
+ input_arg {
+ name: "overlap_threshold"
+ type: DT_FLOAT
+ }
+ input_arg {
+ name: "score_threshold"
+ type: DT_FLOAT
+ }
+ output_arg {
+ name: "selected_indices"
+ type: DT_INT32
+ }
+}
+op {
name: "NotEqual"
input_arg {
name: "x"
@@ -58674,6 +57970,17 @@ op {
}
}
op {
+ name: "SinkDataset"
+ input_arg {
+ name: "input_dataset"
+ type: DT_VARIANT
+ }
+ output_arg {
+ name: "handle"
+ type: DT_VARIANT
+ }
+}
+op {
name: "Size"
input_arg {
name: "input"
@@ -58864,7 +58171,11 @@ op {
type: DT_INT64
}
input_arg {
- name: "stride"
+ name: "window_shift"
+ type: DT_INT64
+ }
+ input_arg {
+ name: "window_stride"
type: DT_INT64
}
output_arg {
@@ -66693,6 +66004,54 @@ op {
}
}
op {
+ name: "SparseSliceGrad"
+ input_arg {
+ name: "backprop_val_grad"
+ type_attr: "T"
+ }
+ input_arg {
+ name: "input_indices"
+ type: DT_INT64
+ }
+ input_arg {
+ name: "input_start"
+ type: DT_INT64
+ }
+ input_arg {
+ name: "output_indices"
+ type: DT_INT64
+ }
+ output_arg {
+ name: "val_grad"
+ type_attr: "T"
+ }
+ attr {
+ name: "T"
+ type: "type"
+ allowed_values {
+ list {
+ type: DT_FLOAT
+ type: DT_DOUBLE
+ type: DT_INT32
+ type: DT_UINT8
+ type: DT_INT16
+ type: DT_INT8
+ type: DT_COMPLEX64
+ type: DT_INT64
+ type: DT_QINT8
+ type: DT_QUINT8
+ type: DT_QINT32
+ type: DT_BFLOAT16
+ type: DT_UINT16
+ type: DT_COMPLEX128
+ type: DT_HALF
+ type: DT_UINT32
+ type: DT_UINT64
+ }
+ }
+ }
+}
+op {
name: "SparseSoftmax"
input_arg {
name: "sp_indices"
@@ -68594,6 +67953,32 @@ op {
is_stateful: true
}
op {
+ name: "StatefulPartitionedCall"
+ input_arg {
+ name: "args"
+ type_list_attr: "Tin"
+ }
+ output_arg {
+ name: "output"
+ type_list_attr: "Tout"
+ }
+ attr {
+ name: "Tin"
+ type: "list(type)"
+ has_minimum: true
+ }
+ attr {
+ name: "Tout"
+ type: "list(type)"
+ has_minimum: true
+ }
+ attr {
+ name: "f"
+ type: "func"
+ }
+ is_stateful: true
+}
+op {
name: "StatelessMultinomial"
input_arg {
name: "logits"
@@ -74570,6 +73955,33 @@ op {
is_stateful: true
}
op {
+ name: "WindowDataset"
+ input_arg {
+ name: "input_dataset"
+ type: DT_VARIANT
+ }
+ input_arg {
+ name: "window_size"
+ type: DT_INT64
+ }
+ output_arg {
+ name: "handle"
+ type: DT_VARIANT
+ }
+ attr {
+ name: "output_types"
+ type: "list(type)"
+ has_minimum: true
+ minimum: 1
+ }
+ attr {
+ name: "output_shapes"
+ type: "list(shape)"
+ has_minimum: true
+ minimum: 1
+ }
+}
+op {
name: "WriteAudioSummary"
input_arg {
name: "writer"
diff --git a/tensorflow/core/ops/dataset_ops.cc b/tensorflow/core/ops/dataset_ops.cc
index 9dca5f53ce..8c83a09597 100644
--- a/tensorflow/core/ops/dataset_ops.cc
+++ b/tensorflow/core/ops/dataset_ops.cc
@@ -362,6 +362,19 @@ REGISTER_OP("FilterDataset")
.Attr("output_shapes: list(shape) >= 1")
.SetShapeFn(shape_inference::ScalarShape);
+REGISTER_OP("WindowDataset")
+ .Input("input_dataset: variant")
+ .Input("window_size: int64")
+ .Output("handle: variant")
+ .Attr("output_types: list(type) >= 1")
+ .Attr("output_shapes: list(shape) >= 1")
+ .SetShapeFn([](shape_inference::InferenceContext* c) {
+ shape_inference::ShapeHandle unused;
+ // batch_size should be a scalar.
+ TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 0, &unused));
+ return shape_inference::ScalarShape(c);
+ });
+
REGISTER_OP("BatchDataset")
.Input("input_dataset: variant")
.Input("batch_size: int64")
@@ -391,19 +404,20 @@ REGISTER_OP("BatchDatasetV2")
return shape_inference::ScalarShape(c);
});
-// TODO(mrry): move SlideDataset to contrib in the future.
REGISTER_OP("SlideDataset")
.Input("input_dataset: variant")
.Input("window_size: int64")
- .Input("stride: int64")
+ .Input("window_shift: int64")
+ .Input("window_stride: int64")
.Output("handle: variant")
.Attr("output_types: list(type) >= 1")
.Attr("output_shapes: list(shape) >= 1")
.SetShapeFn([](shape_inference::InferenceContext* c) {
shape_inference::ShapeHandle unused;
- // window_size and stride should be scalars.
+ // window_size, window_shift, and window_stride should be scalars.
TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 0, &unused));
TF_RETURN_IF_ERROR(c->WithRank(c->input(2), 0, &unused));
+ TF_RETURN_IF_ERROR(c->WithRank(c->input(3), 0, &unused));
return shape_inference::ScalarShape(c);
});
@@ -631,6 +645,14 @@ REGISTER_OP("Iterator")
.Attr("output_shapes: list(shape) >= 1")
.SetShapeFn(shape_inference::ScalarShape);
+REGISTER_OP("IteratorV2")
+ .Output("handle: resource")
+ .Attr("shared_name: string")
+ .Attr("container: string")
+ .Attr("output_types: list(type) >= 1")
+ .Attr("output_shapes: list(shape) >= 1")
+ .SetShapeFn(shape_inference::ScalarShape);
+
REGISTER_OP("AnonymousIterator")
.Output("handle: resource")
.Attr("output_types: list(type) >= 1")
@@ -708,6 +730,13 @@ REGISTER_OP("IteratorFromStringHandle")
.Attr("output_shapes: list(shape) >= 0 = []")
.SetShapeFn(shape_inference::ScalarShape);
+REGISTER_OP("IteratorFromStringHandleV2")
+ .Input("string_handle: string")
+ .Output("resource_handle: resource")
+ .Attr("output_types: list(type) >= 0 = []")
+ .Attr("output_shapes: list(shape) >= 0 = []")
+ .SetShapeFn(shape_inference::ScalarShape);
+
REGISTER_OP("SerializeIterator")
.Input("resource_handle: resource")
.Output("serialized: variant")
@@ -770,11 +799,9 @@ REGISTER_OP("DatasetToGraph")
.Output("graph: string")
.SetShapeFn(shape_inference::ScalarShape);
-REGISTER_OP("IdentityDataset")
+REGISTER_OP("SinkDataset")
.Input("input_dataset: variant")
.Output("handle: variant")
- .Attr("output_types: list(type) >= 1")
- .Attr("output_shapes: list(shape) >= 1")
.SetShapeFn(shape_inference::ScalarShape);
REGISTER_OP("OptimizeDataset")
diff --git a/tensorflow/core/ops/debug_ops.cc b/tensorflow/core/ops/debug_ops.cc
index 5aebdca1ea..2d9b4360de 100644
--- a/tensorflow/core/ops/debug_ops.cc
+++ b/tensorflow/core/ops/debug_ops.cc
@@ -20,7 +20,7 @@ limitations under the License.
namespace tensorflow {
-// EXPERIMENTAL: tfdbg debugger-inserted ops.
+// TensorFlow Debugger-inserted ops.
// These ops are used only internally by tfdbg. There is no API for users to
// direct create them. Users can create them indirectly by using
// RunOptions.debug_options during Session::Run() call. See tfdbg documentation
diff --git a/tensorflow/core/ops/functional_ops.cc b/tensorflow/core/ops/functional_ops.cc
index 88553dff93..5f262db2ce 100644
--- a/tensorflow/core/ops/functional_ops.cc
+++ b/tensorflow/core/ops/functional_ops.cc
@@ -31,11 +31,23 @@ REGISTER_OP("SymbolicGradient")
if (c->num_inputs() < c->num_outputs()) {
return errors::InvalidArgument("len(inputs) < len(outputs)");
}
+ std::vector<DataType> types;
+ TF_RETURN_IF_ERROR(c->GetAttr("Tin", &types));
// Say, (u, v) = f(x, y, z), _symbolic_gradient(f) is a function of
// (x, y, z, du, dv) -> (dx, dy, dz). Therefore, shapes of its
// outputs (dx, dy, dz) are the same as (x, y, z).
for (int i = 0; i < c->num_outputs(); ++i) {
- c->set_output(i, c->input(i));
+ if (types[i] == DT_RESOURCE) {
+ const std::vector<shape_inference::ShapeAndType>* handle_type =
+ c->input_handle_shapes_and_types(i);
+ if (handle_type != nullptr) {
+ c->set_output(i, handle_type->at(0).shape);
+ } else {
+ c->set_output(i, c->UnknownShape());
+ }
+ } else {
+ c->set_output(i, c->input(i));
+ }
}
return Status::OK();
});
@@ -83,7 +95,7 @@ REGISTER_OP("If")
.Output("output: Tout")
.Attr("Tcond: type")
.Attr("Tin: list(type) >= 0")
- .Attr("Tout: list(type)")
+ .Attr("Tout: list(type) >= 0")
.Attr("then_branch: func")
.Attr("else_branch: func")
.SetShapeFn(shape_inference::UnknownShape);
@@ -145,7 +157,6 @@ REGISTER_OP("For")
.Attr("body: func")
.SetShapeFn(shape_inference::UnknownShape);
-// TODO(b/73826847, b/37549631) Mark as stateful.
REGISTER_OP("PartitionedCall")
.Input("args: Tin")
.Output("output: Tout")
@@ -154,6 +165,15 @@ REGISTER_OP("PartitionedCall")
.Attr("f: func")
.SetShapeFn(shape_inference::UnknownShape);
+REGISTER_OP("StatefulPartitionedCall")
+ .Input("args: Tin")
+ .Output("output: Tout")
+ .Attr("Tin: list(type) >= 0")
+ .Attr("Tout: list(type) >= 0")
+ .Attr("f: func")
+ .SetIsStateful()
+ .SetShapeFn(shape_inference::UnknownShape);
+
// This op is used as a placeholder in If branch functions. It doesn't provide a
// valid output when run, so must either be removed (e.g. replaced with a
// function input) or guaranteed not to be used (e.g. if mirroring an
diff --git a/tensorflow/core/ops/image_ops.cc b/tensorflow/core/ops/image_ops.cc
index 87f4991134..50ced1ff73 100644
--- a/tensorflow/core/ops/image_ops.cc
+++ b/tensorflow/core/ops/image_ops.cc
@@ -709,4 +709,36 @@ REGISTER_OP("NonMaxSuppressionV3")
return Status::OK();
});
+REGISTER_OP("NonMaxSuppressionWithOverlaps")
+ .Input("overlaps: float")
+ .Input("scores: float")
+ .Input("max_output_size: int32")
+ .Input("overlap_threshold: float")
+ .Input("score_threshold: float")
+ .Output("selected_indices: int32")
+ .SetShapeFn([](InferenceContext* c) {
+ // Get inputs and validate ranks.
+ ShapeHandle overlaps;
+ TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 2, &overlaps));
+ ShapeHandle scores;
+ TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 1, &scores));
+ ShapeHandle max_output_size;
+ TF_RETURN_IF_ERROR(c->WithRank(c->input(2), 0, &max_output_size));
+ ShapeHandle overlap_threshold;
+ TF_RETURN_IF_ERROR(c->WithRank(c->input(3), 0, &overlap_threshold));
+ ShapeHandle score_threshold;
+ TF_RETURN_IF_ERROR(c->WithRank(c->input(4), 0, &score_threshold));
+ // The boxes is a 2-D float Tensor of shape [num_boxes, 4].
+ DimensionHandle unused;
+ // The boxes[0] and scores[0] are both num_boxes.
+ TF_RETURN_IF_ERROR(
+ c->Merge(c->Dim(overlaps, 0), c->Dim(scores, 0), &unused));
+ // The boxes[1] is 4.
+ TF_RETURN_IF_ERROR(
+ c->Merge(c->Dim(overlaps, 0), c->Dim(overlaps, 1), &unused));
+
+ c->set_output(0, c->Vector(c->UnknownDim()));
+ return Status::OK();
+ });
+
} // namespace tensorflow
diff --git a/tensorflow/core/ops/math_ops.cc b/tensorflow/core/ops/math_ops.cc
index fd59622b27..c229bd5a41 100644
--- a/tensorflow/core/ops/math_ops.cc
+++ b/tensorflow/core/ops/math_ops.cc
@@ -243,6 +243,17 @@ REGISTER_OP("BesselI0e").UNARY_REAL();
REGISTER_OP("BesselI1e").UNARY_REAL();
+REGISTER_OP("_UnaryOpsComposition")
+ .Input("x: T")
+ .Output("y: T")
+ .Attr("T: {float, half, double}")
+ .Attr("op_names: list(string)")
+ .SetShapeFn(shape_inference::UnchangedShape)
+ .Doc(R"doc(
+*NOTE*: Do not invoke this operator directly in Python. Graph rewrite pass is
+expected to create these operators.
+)doc");
+
#undef UNARY
#undef UNARY_REAL
#undef UNARY_COMPLEX
diff --git a/tensorflow/core/ops/nn_ops.cc b/tensorflow/core/ops/nn_ops.cc
index f1bbfac5e6..f947d4c30d 100644
--- a/tensorflow/core/ops/nn_ops.cc
+++ b/tensorflow/core/ops/nn_ops.cc
@@ -432,7 +432,7 @@ REGISTER_OP("FusedResizeAndPadConv2D")
.Input("paddings: int32")
.Input("filter: T")
.Output("output: T")
- .Attr("T: {float}")
+ .Attr("T: {half, float, double}")
.Attr("resize_align_corners: bool = false")
.Attr(GetMirrorPadModeAttrString())
.Attr("strides: list(int)")
@@ -446,7 +446,7 @@ REGISTER_OP("FusedPadConv2D")
.Input("paddings: int32")
.Input("filter: T")
.Output("output: T")
- .Attr("T: {float}")
+ .Attr("T: {half, float, double}")
.Attr(GetMirrorPadModeAttrString())
.Attr("strides: list(int)")
.Attr(GetPaddingAttrString())
diff --git a/tensorflow/core/ops/ops.pbtxt b/tensorflow/core/ops/ops.pbtxt
index e18771c389..4f24ab480f 100644
--- a/tensorflow/core/ops/ops.pbtxt
+++ b/tensorflow/core/ops/ops.pbtxt
@@ -2490,8 +2490,6 @@ op {
list {
s: "NHWC"
s: "NCHW"
- s: "HWNC"
- s: "HWCN"
}
}
}
@@ -2674,8 +2672,6 @@ op {
list {
s: "NHWC"
s: "NCHW"
- s: "HWNC"
- s: "HWCN"
}
}
}
@@ -3989,8 +3985,6 @@ op {
list {
s: "NHWC"
s: "NCHW"
- s: "HWNC"
- s: "HWCN"
}
}
}
@@ -4040,8 +4034,6 @@ op {
list {
s: "NHWC"
s: "NCHW"
- s: "HWNC"
- s: "HWCN"
}
}
}
@@ -4340,6 +4332,34 @@ op {
}
}
op {
+ name: "BoostedTreesCenterBias"
+ input_arg {
+ name: "tree_ensemble_handle"
+ type: DT_RESOURCE
+ }
+ input_arg {
+ name: "mean_gradients"
+ type: DT_FLOAT
+ }
+ input_arg {
+ name: "mean_hessians"
+ type: DT_FLOAT
+ }
+ input_arg {
+ name: "l1"
+ type: DT_FLOAT
+ }
+ input_arg {
+ name: "l2"
+ type: DT_FLOAT
+ }
+ output_arg {
+ name: "continue_centering"
+ type: DT_BOOL
+ }
+ is_stateful: true
+}
+op {
name: "BoostedTreesCreateEnsemble"
input_arg {
name: "tree_ensemble_handle"
@@ -4394,6 +4414,33 @@ op {
is_stateful: true
}
op {
+ name: "BoostedTreesExampleDebugOutputs"
+ input_arg {
+ name: "tree_ensemble_handle"
+ type: DT_RESOURCE
+ }
+ input_arg {
+ name: "bucketized_features"
+ type: DT_INT32
+ number_attr: "num_bucketized_features"
+ }
+ output_arg {
+ name: "examples_debug_outputs_serialized"
+ type: DT_STRING
+ }
+ attr {
+ name: "num_bucketized_features"
+ type: "int"
+ has_minimum: true
+ minimum: 1
+ }
+ attr {
+ name: "logits_dimension"
+ type: "int"
+ }
+ is_stateful: true
+}
+op {
name: "BoostedTreesGetEnsembleStates"
input_arg {
name: "tree_ensemble_handle"
@@ -5675,8 +5722,6 @@ op {
list {
s: "NHWC"
s: "NCHW"
- s: "HWNC"
- s: "HWCN"
}
}
}
@@ -5754,8 +5799,6 @@ op {
list {
s: "NHWC"
s: "NCHW"
- s: "HWNC"
- s: "HWCN"
}
}
}
@@ -5833,8 +5876,6 @@ op {
list {
s: "NHWC"
s: "NCHW"
- s: "HWNC"
- s: "HWCN"
}
}
}
@@ -8537,8 +8578,6 @@ op {
list {
s: "NHWC"
s: "NCHW"
- s: "HWNC"
- s: "HWCN"
}
}
}
@@ -8609,8 +8648,6 @@ op {
list {
s: "NHWC"
s: "NCHW"
- s: "HWNC"
- s: "HWCN"
}
}
}
@@ -8681,8 +8718,6 @@ op {
list {
s: "NHWC"
s: "NCHW"
- s: "HWNC"
- s: "HWCN"
}
}
}
@@ -11430,7 +11465,9 @@ op {
type: "type"
allowed_values {
list {
+ type: DT_HALF
type: DT_FLOAT
+ type: DT_DOUBLE
}
}
}
@@ -11486,7 +11523,9 @@ op {
type: "type"
allowed_values {
list {
+ type: DT_HALF
type: DT_FLOAT
+ type: DT_DOUBLE
}
}
}
@@ -12315,29 +12354,6 @@ op {
}
}
op {
- name: "IdentityDataset"
- input_arg {
- name: "input_dataset"
- type: DT_VARIANT
- }
- output_arg {
- name: "handle"
- type: DT_VARIANT
- }
- attr {
- name: "output_types"
- type: "list(type)"
- has_minimum: true
- minimum: 1
- }
- attr {
- name: "output_shapes"
- type: "list(shape)"
- has_minimum: true
- minimum: 1
- }
-}
-op {
name: "IdentityN"
input_arg {
name: "input"
@@ -12430,7 +12446,6 @@ op {
name: "Tout"
type: "list(type)"
has_minimum: true
- minimum: 1
}
attr {
name: "then_branch"
@@ -13210,6 +13225,36 @@ op {
is_stateful: true
}
op {
+ name: "IteratorFromStringHandleV2"
+ input_arg {
+ name: "string_handle"
+ type: DT_STRING
+ }
+ output_arg {
+ name: "resource_handle"
+ type: DT_RESOURCE
+ }
+ attr {
+ name: "output_types"
+ type: "list(type)"
+ default_value {
+ list {
+ }
+ }
+ has_minimum: true
+ }
+ attr {
+ name: "output_shapes"
+ type: "list(shape)"
+ default_value {
+ list {
+ }
+ }
+ has_minimum: true
+ }
+ is_stateful: true
+}
+op {
name: "IteratorGetNext"
input_arg {
name: "iterator"
@@ -13270,6 +13315,34 @@ op {
is_stateful: true
}
op {
+ name: "IteratorV2"
+ output_arg {
+ name: "handle"
+ type: DT_RESOURCE
+ }
+ attr {
+ name: "shared_name"
+ type: "string"
+ }
+ attr {
+ name: "container"
+ type: "string"
+ }
+ attr {
+ name: "output_types"
+ type: "list(type)"
+ has_minimum: true
+ minimum: 1
+ }
+ attr {
+ name: "output_shapes"
+ type: "list(shape)"
+ has_minimum: true
+ minimum: 1
+ }
+ is_stateful: true
+}
+op {
name: "L2Loss"
input_arg {
name: "t"
@@ -15416,8 +15489,6 @@ op {
list {
s: "NHWC"
s: "NCHW"
- s: "HWNC"
- s: "HWCN"
}
}
}
@@ -15495,8 +15566,6 @@ op {
list {
s: "NHWC"
s: "NCHW"
- s: "HWNC"
- s: "HWCN"
}
}
}
@@ -15567,8 +15636,6 @@ op {
list {
s: "NHWC"
s: "NCHW"
- s: "HWNC"
- s: "HWCN"
}
}
}
@@ -15710,8 +15777,6 @@ op {
list {
s: "NHWC"
s: "NCHW"
- s: "HWNC"
- s: "HWCN"
}
}
}
@@ -16931,6 +16996,33 @@ op {
}
}
op {
+ name: "NonMaxSuppressionWithOverlaps"
+ input_arg {
+ name: "overlaps"
+ type: DT_FLOAT
+ }
+ input_arg {
+ name: "scores"
+ type: DT_FLOAT
+ }
+ input_arg {
+ name: "max_output_size"
+ type: DT_INT32
+ }
+ input_arg {
+ name: "overlap_threshold"
+ type: DT_FLOAT
+ }
+ input_arg {
+ name: "score_threshold"
+ type: DT_FLOAT
+ }
+ output_arg {
+ name: "selected_indices"
+ type: DT_INT32
+ }
+}
+op {
name: "NotEqual"
input_arg {
name: "x"
@@ -27312,6 +27404,17 @@ op {
}
}
op {
+ name: "SinkDataset"
+ input_arg {
+ name: "input_dataset"
+ type: DT_VARIANT
+ }
+ output_arg {
+ name: "handle"
+ type: DT_VARIANT
+ }
+}
+op {
name: "Size"
input_arg {
name: "input"
@@ -27475,7 +27578,11 @@ op {
type: DT_INT64
}
input_arg {
- name: "stride"
+ name: "window_shift"
+ type: DT_INT64
+ }
+ input_arg {
+ name: "window_stride"
type: DT_INT64
}
output_arg {
@@ -30111,6 +30218,54 @@ op {
}
}
op {
+ name: "SparseSliceGrad"
+ input_arg {
+ name: "backprop_val_grad"
+ type_attr: "T"
+ }
+ input_arg {
+ name: "input_indices"
+ type: DT_INT64
+ }
+ input_arg {
+ name: "input_start"
+ type: DT_INT64
+ }
+ input_arg {
+ name: "output_indices"
+ type: DT_INT64
+ }
+ output_arg {
+ name: "val_grad"
+ type_attr: "T"
+ }
+ attr {
+ name: "T"
+ type: "type"
+ allowed_values {
+ list {
+ type: DT_FLOAT
+ type: DT_DOUBLE
+ type: DT_INT32
+ type: DT_UINT8
+ type: DT_INT16
+ type: DT_INT8
+ type: DT_COMPLEX64
+ type: DT_INT64
+ type: DT_QINT8
+ type: DT_QUINT8
+ type: DT_QINT32
+ type: DT_BFLOAT16
+ type: DT_UINT16
+ type: DT_COMPLEX128
+ type: DT_HALF
+ type: DT_UINT32
+ type: DT_UINT64
+ }
+ }
+ }
+}
+op {
name: "SparseSoftmax"
input_arg {
name: "sp_indices"
@@ -31143,6 +31298,32 @@ op {
is_stateful: true
}
op {
+ name: "StatefulPartitionedCall"
+ input_arg {
+ name: "args"
+ type_list_attr: "Tin"
+ }
+ output_arg {
+ name: "output"
+ type_list_attr: "Tout"
+ }
+ attr {
+ name: "Tin"
+ type: "list(type)"
+ has_minimum: true
+ }
+ attr {
+ name: "Tout"
+ type: "list(type)"
+ has_minimum: true
+ }
+ attr {
+ name: "f"
+ type: "func"
+ }
+ is_stateful: true
+}
+op {
name: "StatelessMultinomial"
input_arg {
name: "logits"
@@ -35043,6 +35224,33 @@ op {
is_stateful: true
}
op {
+ name: "WindowDataset"
+ input_arg {
+ name: "input_dataset"
+ type: DT_VARIANT
+ }
+ input_arg {
+ name: "window_size"
+ type: DT_INT64
+ }
+ output_arg {
+ name: "handle"
+ type: DT_VARIANT
+ }
+ attr {
+ name: "output_types"
+ type: "list(type)"
+ has_minimum: true
+ minimum: 1
+ }
+ attr {
+ name: "output_shapes"
+ type: "list(shape)"
+ has_minimum: true
+ minimum: 1
+ }
+}
+op {
name: "WriteAudioSummary"
input_arg {
name: "writer"
diff --git a/tensorflow/core/platform/cloud/gcs_file_system.cc b/tensorflow/core/platform/cloud/gcs_file_system.cc
index ec77861480..aa35e8a116 100644
--- a/tensorflow/core/platform/cloud/gcs_file_system.cc
+++ b/tensorflow/core/platform/cloud/gcs_file_system.cc
@@ -631,6 +631,9 @@ GcsFileSystem::GcsFileSystem()
// Setting either to 0 disables the cache; set both for good measure.
block_size = max_bytes = 0;
}
+ VLOG(1) << "GCS cache max size = " << max_bytes << " ; "
+ << "block size = " << block_size << " ; "
+ << "max staleness = " << max_staleness;
file_block_cache_ = MakeFileBlockCache(block_size, max_bytes, max_staleness);
// Apply overrides for the stat cache max age and max entries, if provided.
uint64 stat_cache_max_age = kStatCacheDefaultMaxAge;
@@ -1557,6 +1560,7 @@ Status GcsFileSystem::CreateHttpRequest(std::unique_ptr<HttpRequest>* request) {
return Status::OK();
}
-REGISTER_FILE_SYSTEM("gs", RetryingGcsFileSystem);
-
} // namespace tensorflow
+
+// Initialize gcs_file_system
+REGISTER_FILE_SYSTEM("gs", ::tensorflow::RetryingGcsFileSystem);
diff --git a/tensorflow/core/platform/default/build_config.bzl b/tensorflow/core/platform/default/build_config.bzl
index 66ccd81e41..28891320c4 100644
--- a/tensorflow/core/platform/default/build_config.bzl
+++ b/tensorflow/core/platform/default/build_config.bzl
@@ -620,10 +620,10 @@ def tf_additional_core_deps():
],
"//conditions:default": [],
}) + select({
- "//tensorflow:with_s3_support_windows_override": [],
- "//tensorflow:with_s3_support_android_override": [],
- "//tensorflow:with_s3_support_ios_override": [],
- "//tensorflow:with_s3_support": [
+ "//tensorflow:with_aws_support_windows_override": [],
+ "//tensorflow:with_aws_support_android_override": [],
+ "//tensorflow:with_aws_support_ios_override": [],
+ "//tensorflow:with_aws_support": [
"//tensorflow/core/platform/s3:s3_file_system",
],
"//conditions:default": [],
diff --git a/tensorflow/core/platform/default/build_config/BUILD b/tensorflow/core/platform/default/build_config/BUILD
index c17e4810d5..da1f66dc67 100644
--- a/tensorflow/core/platform/default/build_config/BUILD
+++ b/tensorflow/core/platform/default/build_config/BUILD
@@ -146,7 +146,6 @@ cc_library(
"@farmhash_archive//:farmhash",
"@fft2d",
"@highwayhash//:sip_hash",
- "@png_archive//:png",
],
)
@@ -161,7 +160,7 @@ cc_library(
"@farmhash_archive//:farmhash",
"@fft2d",
"@highwayhash//:sip_hash",
- "@png_archive//:png",
+ "@zlib_archive//:zlib",
],
)
@@ -187,6 +186,15 @@ cc_library(
)
cc_library(
+ name = "png",
+ copts = tf_copts(),
+ deps = [
+ "@png_archive//:png",
+ "@zlib_archive//:zlib",
+ ],
+)
+
+cc_library(
name = "protos_cc_impl",
copts = tf_copts(),
deps = [
diff --git a/tensorflow/core/platform/env.h b/tensorflow/core/platform/env.h
index 9192f7ba10..e17ecc8c52 100644
--- a/tensorflow/core/platform/env.h
+++ b/tensorflow/core/platform/env.h
@@ -450,6 +450,6 @@ struct Register {
::tensorflow::register_file_system::Register<factory>(env, scheme)
#define REGISTER_FILE_SYSTEM(scheme, factory) \
- REGISTER_FILE_SYSTEM_ENV(Env::Default(), scheme, factory);
+ REGISTER_FILE_SYSTEM_ENV(::tensorflow::Env::Default(), scheme, factory);
#endif // TENSORFLOW_CORE_PLATFORM_ENV_H_
diff --git a/tensorflow/core/platform/numa.h b/tensorflow/core/platform/numa.h
new file mode 100644
index 0000000000..b1f08e4c4c
--- /dev/null
+++ b/tensorflow/core/platform/numa.h
@@ -0,0 +1,62 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#ifndef TENSORFLOW_CORE_PLATFORM_NUMA_H_
+#define TENSORFLOW_CORE_PLATFORM_NUMA_H_
+
+#include "tensorflow/core/platform/platform.h"
+#include "tensorflow/core/platform/types.h"
+
+namespace tensorflow {
+namespace port {
+
+// Returns true iff NUMA functions are supported.
+bool NUMAEnabled();
+
+// Returns the number of NUMA nodes present with respect to CPU operations.
+// Typically this will be the number of sockets where some RAM has greater
+// affinity with one socket than another.
+int NUMANumNodes();
+
+static const int kNUMANoAffinity = -1;
+
+// If possible sets affinity of the current thread to the specified NUMA node.
+// If node == kNUMANoAffinity removes affinity to any particular node.
+void NUMASetThreadNodeAffinity(int node);
+
+// Returns NUMA node affinity of the current thread, kNUMANoAffinity if none.
+int NUMAGetThreadNodeAffinity();
+
+// Like AlignedMalloc, but allocates memory with affinity to the specified NUMA
+// node.
+//
+// Notes:
+// 1. node must be >= 0 and < NUMANumNodes.
+// 1. minimum_alignment must a factor of system page size, the memory
+// returned will be page-aligned.
+// 2. This function is likely significantly slower than AlignedMalloc
+// and should not be used for lots of small allocations. It makes more
+// sense as a backing allocator for BFCAllocator, PoolAllocator, or similar.
+void* NUMAMalloc(int node, size_t size, int minimum_alignment);
+
+// Memory allocated by NUMAMalloc must be freed via NUMAFree.
+void NUMAFree(void* ptr, size_t size);
+
+// Returns NUMA node affinity of memory address, kNUMANoAffinity if none.
+int NUMAGetMemAffinity(const void* ptr);
+
+} // namespace port
+} // namespace tensorflow
+#endif // TENSORFLOW_CORE_PLATFORM_NUMA_H_
diff --git a/tensorflow/core/platform/numa_test.cc b/tensorflow/core/platform/numa_test.cc
new file mode 100644
index 0000000000..8b39ecd59c
--- /dev/null
+++ b/tensorflow/core/platform/numa_test.cc
@@ -0,0 +1,61 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#include "tensorflow/core/platform/numa.h"
+
+#include "tensorflow/core/platform/logging.h"
+#include "tensorflow/core/platform/test.h"
+
+namespace tensorflow {
+namespace internal {
+
+TEST(Numa, NumNodes) {
+ if (port::NUMAEnabled()) {
+ EXPECT_GE(port::NUMANumNodes(), 1);
+ }
+}
+
+TEST(Numa, Malloc) {
+ if (port::NUMAEnabled()) {
+ int num_nodes = port::NUMANumNodes();
+ for (int request_node = 0; request_node < num_nodes; ++request_node) {
+ void* ptr = port::NUMAMalloc(request_node, 8, 0);
+ EXPECT_NE(ptr, nullptr);
+ // Affinity cannot be tested until page is touched, so save a value.
+ *(reinterpret_cast<int*>(ptr)) = 0;
+ int affinity_node = port::NUMAGetMemAffinity(ptr);
+ EXPECT_EQ(affinity_node, request_node);
+ port::NUMAFree(ptr, 8);
+ }
+ }
+}
+
+TEST(Numa, SetNodeAffinity) {
+ // NOTE(tucker): This test is not reliable when executed under tap because
+ // the virtual machine may not have access to all of the availble NUMA
+ // nodes. Not sure what to do about that.
+ EXPECT_EQ(-1, port::NUMAGetThreadNodeAffinity());
+ if (port::NUMAEnabled()) {
+ int num_nodes = port::NUMANumNodes();
+ for (int request_node = 0; request_node < num_nodes; ++request_node) {
+ port::NUMASetThreadNodeAffinity(request_node);
+ int affinity_node = port::NUMAGetThreadNodeAffinity();
+ EXPECT_EQ(affinity_node, request_node);
+ }
+ }
+}
+
+} // namespace internal
+} // namespace tensorflow
diff --git a/tensorflow/core/platform/posix/port.cc b/tensorflow/core/platform/posix/port.cc
index 708f32ba80..1939cf72fb 100644
--- a/tensorflow/core/platform/posix/port.cc
+++ b/tensorflow/core/platform/posix/port.cc
@@ -24,6 +24,7 @@ limitations under the License.
#include "tensorflow/core/platform/cpu_info.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/mem.h"
+#include "tensorflow/core/platform/numa.h"
#include "tensorflow/core/platform/snappy.h"
#include "tensorflow/core/platform/types.h"
@@ -79,6 +80,19 @@ int NumHyperthreadsPerCore() {
return (ht_per_core > 0) ? ht_per_core : 1;
}
+bool NUMAEnabled() {
+ // Not yet implemented: coming soon.
+ return false;
+}
+
+int NUMANumNodes() { return 1; }
+
+void NUMASetThreadNodeAffinity(int node) {}
+
+int NUMAGetThreadNodeAffinity() {
+ return kNUMANoAffinity;
+}
+
void* AlignedMalloc(size_t size, int minimum_alignment) {
#if defined(__ANDROID__)
return memalign(minimum_alignment, size);
@@ -128,6 +142,16 @@ void Free(void* ptr) {
#endif
}
+void* NUMAMalloc(int node, size_t size, int minimum_alignment) {
+ return AlignedMalloc(size, minimum_alignment);
+}
+
+void NUMAFree(void* ptr, size_t size) { Free(ptr); }
+
+int NUMAGetMemAffinity(const void* addr) {
+ return kNUMANoAffinity;
+}
+
void MallocExtension_ReleaseToSystem(std::size_t num_bytes) {
// No-op.
}
diff --git a/tensorflow/core/platform/profile_utils/cpu_utils.cc b/tensorflow/core/platform/profile_utils/cpu_utils.cc
index 02de7d1362..b0136b52f4 100644
--- a/tensorflow/core/platform/profile_utils/cpu_utils.cc
+++ b/tensorflow/core/platform/profile_utils/cpu_utils.cc
@@ -15,6 +15,7 @@ limitations under the License.
#include "tensorflow/core/platform/profile_utils/cpu_utils.h"
+#include <fstream>
#include <limits>
#include <mutex>
@@ -67,22 +68,32 @@ static ICpuUtilsHelper* cpu_utils_helper_instance_ = nullptr;
#if defined(__ANDROID__)
return GetCpuUtilsHelperSingletonInstance().CalculateCpuFrequency();
#elif defined(__linux__)
- double bogomips;
- FILE* fp = popen("grep '^bogomips' /proc/cpuinfo | head -1", "r");
- if (fp == nullptr) {
- return INVALID_FREQUENCY;
- }
- const int retval_of_bogomips = fscanf(fp, "bogomips : %lf", &bogomips);
- if (retval_of_bogomips <= 0) {
+ // Read the contents of /proc/cpuinfo.
+ std::ifstream cpuinfo("/proc/cpuinfo");
+ if (!cpuinfo) {
+ LOG(WARNING) << "Failed to open /proc/cpuinfo";
return INVALID_FREQUENCY;
}
- pclose(fp);
- const double freq_ghz = bogomips / 1000.0 / 2.0;
- if (retval_of_bogomips != 1 || freq_ghz < 0.01) {
- LOG(WARNING) << "Failed to get CPU frequency: " << freq_ghz << " Hz";
- return INVALID_FREQUENCY;
+ string line;
+ while (std::getline(cpuinfo, line)) {
+ double bogomips;
+ const int retval_of_bogomips =
+ sscanf(line.c_str(), "bogomips : %lf", &bogomips);
+ if (retval_of_bogomips > 0) {
+ const double freq_ghz = bogomips / 1000.0 / 2.0;
+ if (retval_of_bogomips != 1 || freq_ghz < 0.01) {
+ LOG(WARNING) << "Failed to get CPU frequency: " << freq_ghz << " Hz";
+ return INVALID_FREQUENCY;
+ }
+ const int64 freq_n =
+ static_cast<int64>(freq_ghz * 1000.0 * 1000.0 * 1000.0);
+ LOG(INFO) << "CPU Frequency: " << freq_n << " Hz";
+ return freq_n;
+ }
}
- return static_cast<int64>(freq_ghz * 1000.0 * 1000.0 * 1000.0);
+ LOG(WARNING) << "Failed to find bogomips in /proc/cpuinfo; cannot determine "
+ "CPU frequency";
+ return INVALID_FREQUENCY;
#elif defined(__APPLE__)
int64 freq_hz;
FILE* fp =
diff --git a/tensorflow/core/platform/s3/BUILD b/tensorflow/core/platform/s3/BUILD
index 21038cfeb1..41184b6fd9 100644
--- a/tensorflow/core/platform/s3/BUILD
+++ b/tensorflow/core/platform/s3/BUILD
@@ -16,10 +16,10 @@ load(
tf_cc_binary(
name = "s3_file_system.so",
srcs = [
+ "aws_crypto.cc",
+ "aws_crypto.h",
"aws_logging.cc",
"aws_logging.h",
- "s3_crypto.cc",
- "s3_crypto.h",
"s3_file_system.cc",
"s3_file_system.h",
],
@@ -40,16 +40,14 @@ tf_cc_binary(
)
cc_library(
- name = "s3_crypto",
+ name = "aws_crypto",
srcs = [
- "s3_crypto.cc",
+ "aws_crypto.cc",
],
hdrs = [
- "s3_crypto.h",
+ "aws_crypto.h",
],
deps = [
- "//tensorflow/core:lib",
- "//tensorflow/core:lib_internal",
"@aws",
"@boringssl//:crypto",
],
@@ -81,8 +79,8 @@ cc_library(
"s3_file_system.h",
],
deps = [
+ ":aws_crypto",
":aws_logging",
- ":s3_crypto",
"//tensorflow/core:lib",
"//tensorflow/core:lib_internal",
"@aws",
diff --git a/tensorflow/core/platform/s3/aws_crypto.cc b/tensorflow/core/platform/s3/aws_crypto.cc
new file mode 100644
index 0000000000..90e46d6c1d
--- /dev/null
+++ b/tensorflow/core/platform/s3/aws_crypto.cc
@@ -0,0 +1,113 @@
+/* Copyright 2015 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+#include "tensorflow/core/platform/s3/aws_crypto.h"
+#include <openssl/hmac.h>
+#include <openssl/sha.h>
+
+#include <aws/core/utils/crypto/HashResult.h>
+#include <aws/s3/S3Client.h>
+
+namespace tensorflow {
+
+class AWSSha256HMACOpenSSLImpl : public Aws::Utils::Crypto::HMAC {
+ public:
+ AWSSha256HMACOpenSSLImpl() {}
+
+ virtual ~AWSSha256HMACOpenSSLImpl() = default;
+
+ virtual Aws::Utils::Crypto::HashResult Calculate(
+ const Aws::Utils::ByteBuffer& toSign,
+ const Aws::Utils::ByteBuffer& secret) override {
+ unsigned int length = SHA256_DIGEST_LENGTH;
+ Aws::Utils::ByteBuffer digest(length);
+ memset(digest.GetUnderlyingData(), 0, length);
+
+ HMAC_CTX ctx;
+ HMAC_CTX_init(&ctx);
+
+ HMAC_Init_ex(&ctx, secret.GetUnderlyingData(),
+ static_cast<int>(secret.GetLength()), EVP_sha256(), NULL);
+ HMAC_Update(&ctx, toSign.GetUnderlyingData(), toSign.GetLength());
+ HMAC_Final(&ctx, digest.GetUnderlyingData(), &length);
+ HMAC_CTX_cleanup(&ctx);
+
+ return Aws::Utils::Crypto::HashResult(std::move(digest));
+ }
+};
+
+class AWSSha256OpenSSLImpl : public Aws::Utils::Crypto::Hash {
+ public:
+ AWSSha256OpenSSLImpl() {}
+
+ virtual ~AWSSha256OpenSSLImpl() = default;
+
+ virtual Aws::Utils::Crypto::HashResult Calculate(
+ const Aws::String& str) override {
+ SHA256_CTX sha256;
+ SHA256_Init(&sha256);
+ SHA256_Update(&sha256, str.data(), str.size());
+
+ Aws::Utils::ByteBuffer hash(SHA256_DIGEST_LENGTH);
+ SHA256_Final(hash.GetUnderlyingData(), &sha256);
+
+ return Aws::Utils::Crypto::HashResult(std::move(hash));
+ }
+
+ virtual Aws::Utils::Crypto::HashResult Calculate(
+ Aws::IStream& stream) override {
+ SHA256_CTX sha256;
+ SHA256_Init(&sha256);
+
+ auto currentPos = stream.tellg();
+ if (currentPos == std::streampos(std::streamoff(-1))) {
+ currentPos = 0;
+ stream.clear();
+ }
+
+ stream.seekg(0, stream.beg);
+
+ char streamBuffer
+ [Aws::Utils::Crypto::Hash::INTERNAL_HASH_STREAM_BUFFER_SIZE];
+ while (stream.good()) {
+ stream.read(streamBuffer,
+ Aws::Utils::Crypto::Hash::INTERNAL_HASH_STREAM_BUFFER_SIZE);
+ auto bytesRead = stream.gcount();
+
+ if (bytesRead > 0) {
+ SHA256_Update(&sha256, streamBuffer, static_cast<size_t>(bytesRead));
+ }
+ }
+
+ stream.clear();
+ stream.seekg(currentPos, stream.beg);
+
+ Aws::Utils::ByteBuffer hash(SHA256_DIGEST_LENGTH);
+ SHA256_Final(hash.GetUnderlyingData(), &sha256);
+
+ return Aws::Utils::Crypto::HashResult(std::move(hash));
+ }
+};
+
+std::shared_ptr<Aws::Utils::Crypto::Hash>
+AWSSHA256Factory::CreateImplementation() const {
+ return Aws::MakeShared<AWSSha256OpenSSLImpl>(AWSCryptoAllocationTag);
+}
+
+std::shared_ptr<Aws::Utils::Crypto::HMAC>
+AWSSHA256HmacFactory::CreateImplementation() const {
+ return Aws::MakeShared<AWSSha256HMACOpenSSLImpl>(AWSCryptoAllocationTag);
+}
+
+} // namespace tensorflow
diff --git a/tensorflow/core/platform/s3/aws_crypto.h b/tensorflow/core/platform/s3/aws_crypto.h
new file mode 100644
index 0000000000..f05771b904
--- /dev/null
+++ b/tensorflow/core/platform/s3/aws_crypto.h
@@ -0,0 +1,35 @@
+/* Copyright 2015 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+#include <aws/core/Aws.h>
+#include <aws/core/utils/crypto/Factories.h>
+#include <aws/core/utils/crypto/HMAC.h>
+#include <aws/core/utils/crypto/Hash.h>
+
+namespace tensorflow {
+static const char* AWSCryptoAllocationTag = "AWSCryptoAllocation";
+
+class AWSSHA256Factory : public Aws::Utils::Crypto::HashFactory {
+ public:
+ std::shared_ptr<Aws::Utils::Crypto::Hash> CreateImplementation()
+ const override;
+};
+
+class AWSSHA256HmacFactory : public Aws::Utils::Crypto::HMACFactory {
+ public:
+ std::shared_ptr<Aws::Utils::Crypto::HMAC> CreateImplementation()
+ const override;
+};
+
+} // namespace tensorflow
diff --git a/tensorflow/core/platform/s3/s3_file_system.cc b/tensorflow/core/platform/s3/s3_file_system.cc
index 6da679dc75..bdc8f808df 100644
--- a/tensorflow/core/platform/s3/s3_file_system.cc
+++ b/tensorflow/core/platform/s3/s3_file_system.cc
@@ -17,8 +17,8 @@ limitations under the License.
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/platform/file_system_helper.h"
#include "tensorflow/core/platform/mutex.h"
+#include "tensorflow/core/platform/s3/aws_crypto.h"
#include "tensorflow/core/platform/s3/aws_logging.h"
-#include "tensorflow/core/platform/s3/s3_crypto.h"
#include <aws/core/Aws.h>
#include <aws/core/config/AWSProfileConfigLoader.h>
@@ -300,10 +300,10 @@ std::shared_ptr<Aws::S3::S3Client> S3FileSystem::GetS3Client() {
Aws::SDKOptions options;
options.cryptoOptions.sha256Factory_create_fn = []() {
- return Aws::MakeShared<S3SHA256Factory>(S3CryptoAllocationTag);
+ return Aws::MakeShared<AWSSHA256Factory>(AWSCryptoAllocationTag);
};
options.cryptoOptions.sha256HMACFactory_create_fn = []() {
- return Aws::MakeShared<S3SHA256HmacFactory>(S3CryptoAllocationTag);
+ return Aws::MakeShared<AWSSHA256HmacFactory>(AWSCryptoAllocationTag);
};
Aws::InitAPI(options);
diff --git a/tensorflow/contrib/lite/java/src/main/native/duration_utils_jni.cc b/tensorflow/core/platform/vmodule_benchmark_test.cc
index 0e08a04370..0f9e75bf9c 100644
--- a/tensorflow/contrib/lite/java/src/main/native/duration_utils_jni.cc
+++ b/tensorflow/core/platform/vmodule_benchmark_test.cc
@@ -13,26 +13,16 @@ See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
-#include <jni.h>
-#include <time.h>
+#include "tensorflow/core/platform/logging.h"
+#include "tensorflow/core/platform/test_benchmark.h"
-namespace tflite {
+namespace tensorflow {
-// Gets the elapsed wall-clock timespec.
-timespec getCurrentTime() {
- timespec time;
- clock_gettime(CLOCK_MONOTONIC, &time);
- return time;
+static void BM_DisabledVlog(int iters) {
+ for (int i = 0; i < iters; ++i) {
+ VLOG(1) << "Testing VLOG(1)!";
+ }
}
+BENCHMARK(BM_DisabledVlog);
-// Computes the time diff from two timespecs. Returns '-1' if 'stop' is earlier
-// than 'start'.
-jlong timespec_diff_nanoseconds(struct timespec* start, struct timespec* stop) {
- jlong result = stop->tv_sec - start->tv_sec;
- if (result < 0) return -1;
- result = 1000000000 * result + (stop->tv_nsec - start->tv_nsec);
- if (result < 0) return -1;
- return result;
-}
-
-} // namespace tflite
+} // namespace tensorflow
diff --git a/tensorflow/core/platform/vmodule_test.cc b/tensorflow/core/platform/vmodule_test.cc
new file mode 100644
index 0000000000..47b4b2e0e7
--- /dev/null
+++ b/tensorflow/core/platform/vmodule_test.cc
@@ -0,0 +1,117 @@
+/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+// Test that popens a child process with the VLOG-ing environment variable set
+// for the logging framework, and observes VLOG_IS_ON and VLOG macro output.
+
+#include "tensorflow/core/platform/logging.h"
+#include "tensorflow/core/platform/platform.h"
+#include "tensorflow/core/platform/test.h"
+
+#include <string.h>
+
+namespace tensorflow {
+namespace {
+
+int RealMain(const char* argv0, bool do_vlog) {
+ if (do_vlog) {
+#if !defined(PLATFORM_GOOGLE)
+ // Note, we only test this when !defined(PLATFORM_GOOGLE) because
+ // VmoduleActivated doesn't exist in that implementation.
+ //
+ // Also, we call this internal API to simulate what would happen if
+ // differently-named translation units attempted to VLOG, so we don't need
+ // to create dummy translation unit files.
+ bool ok = internal::LogMessage::VmoduleActivated("vmodule_test.cc", 7) &&
+ internal::LogMessage::VmoduleActivated("shoobadooba.h", 3);
+ if (!ok) {
+ fprintf(stderr, "vmodule activated levels not as expected.\n");
+ return EXIT_FAILURE;
+ }
+#endif
+
+ // Print info on which VLOG levels are activated.
+ fprintf(stderr, "VLOG_IS_ON(8)? %d\n", VLOG_IS_ON(8));
+ fprintf(stderr, "VLOG_IS_ON(7)? %d\n", VLOG_IS_ON(7));
+ fprintf(stderr, "VLOG_IS_ON(6)? %d\n", VLOG_IS_ON(6));
+ // Do some VLOG-ing.
+ VLOG(8) << "VLOG(8)";
+ VLOG(7) << "VLOG(7)";
+ VLOG(6) << "VLOG(6)";
+ LOG(INFO) << "INFO";
+ return EXIT_SUCCESS;
+ }
+
+ // Popen the child process.
+ std::string command = std::string(argv0);
+#if defined(PLATFORM_GOOGLE)
+ command = command + " do_vlog --vmodule=vmodule_test=7 --alsologtostderr";
+#else
+ command =
+ "TF_CPP_VMODULE=vmodule_test=7,shoobadooba=3 " + command + " do_vlog";
+#endif
+ command += " 2>&1";
+ fprintf(stderr, "Running: \"%s\"\n", command.c_str());
+ FILE* f = popen(command.c_str(), "r");
+ if (f == nullptr) {
+ fprintf(stderr, "Failed to popen child: %s\n", strerror(errno));
+ return EXIT_FAILURE;
+ }
+
+ // Read data from the child's stdout.
+ constexpr int kBufferSizeBytes = 4096;
+ char buffer[kBufferSizeBytes];
+ size_t result = fread(buffer, sizeof(buffer[0]), kBufferSizeBytes - 1, f);
+ if (result == 0) {
+ fprintf(stderr, "Failed to read from child stdout: %zu %s\n", result,
+ strerror(errno));
+ return EXIT_FAILURE;
+ }
+ buffer[result] = '\0';
+ int status = pclose(f);
+ if (status == -1) {
+ fprintf(stderr, "Failed to close popen child: %s\n", strerror(errno));
+ return EXIT_FAILURE;
+ }
+
+ // Check output is as expected.
+ const char kExpected[] =
+ "VLOG_IS_ON(8)? 0\nVLOG_IS_ON(7)? 1\nVLOG_IS_ON(6)? 1\n";
+ if (strstr(buffer, kExpected) == nullptr) {
+ fprintf(stderr, "error: unexpected output from child: \"%.*s\"\n",
+ kBufferSizeBytes, buffer);
+ return EXIT_FAILURE;
+ }
+ bool ok = strstr(buffer, "VLOG(7)\n") != nullptr &&
+ strstr(buffer, "VLOG(6)\n") != nullptr &&
+ strstr(buffer, "VLOG(8)\n") == nullptr;
+ if (!ok) {
+ fprintf(stderr, "error: VLOG output not as expected: \"%.*s\"\n",
+ kBufferSizeBytes, buffer);
+ return EXIT_FAILURE;
+ }
+
+ // Success!
+ return EXIT_SUCCESS;
+}
+
+} // namespace
+} // namespace tensorflow
+
+int main(int argc, char** argv) {
+ testing::InitGoogleTest(&argc, argv);
+ bool do_vlog = argc >= 2 && strcmp(argv[1], "do_vlog") == 0;
+ return tensorflow::RealMain(argv[0], do_vlog);
+}
diff --git a/tensorflow/core/protobuf/config.proto b/tensorflow/core/protobuf/config.proto
index d83215d5c2..77639461d9 100644
--- a/tensorflow/core/protobuf/config.proto
+++ b/tensorflow/core/protobuf/config.proto
@@ -143,6 +143,10 @@ message GPUOptions {
// multiple processes are sharing a single GPU while individually using less
// than 1.0 per process memory fraction.
bool use_unified_memory = 2;
+
+ // If > 1, the number of device-to-device copy streams to create
+ // for each GPUDevice.
+ int32 num_dev_to_dev_copy_streams = 3;
}
// Everything inside experimental is subject to change and is not subject
@@ -385,6 +389,9 @@ message ConfigProto {
message Experimental {
// Task name for group resolution.
string collective_group_leader = 1;
+ // Whether the client will format templated errors. For example, the string:
+ // "The node was defined on ^^node:Foo:${file}:${line}^^".
+ bool client_handles_error_formatting = 2;
};
Experimental experimental = 16;
@@ -490,5 +497,67 @@ message CallableOptions {
// in the callable.
repeated TensorConnection tensor_connection = 5;
- // Next: 6
+ // The Tensor objects fed in the callable and fetched from the callable
+ // are expected to be backed by host (CPU) memory by default.
+ //
+ // The options below allow changing that - feeding tensors backed by
+ // device memory, or returning tensors that are backed by device memory.
+ //
+ // The maps below map the name of a feed/fetch tensor (which appears in
+ // 'feed' or 'fetch' fields above), to the fully qualified name of the device
+ // owning the memory backing the contents of the tensor.
+ //
+ // For example, creating a callable with the following options:
+ //
+ // CallableOptions {
+ // feed: "a:0"
+ // feed: "b:0"
+ //
+ // fetch: "x:0"
+ // fetch: "y:0"
+ //
+ // feed_devices: {
+ // "a:0": "/job:localhost/replica:0/task:0/device:GPU:0"
+ // }
+ //
+ // fetch_devices: {
+ // "y:0": "/job:localhost/replica:0/task:0/device:GPU:0"
+ // }
+ // }
+ //
+ // means that the Callable expects:
+ // - The first argument ("a:0") is a Tensor backed by GPU memory.
+ // - The second argument ("b:0") is a Tensor backed by host memory.
+ // and of its return values:
+ // - The first output ("x:0") will be backed by host memory.
+ // - The second output ("y:0") will be backed by GPU memory.
+ //
+ // FEEDS:
+ // It is the responsibility of the caller to ensure that the memory of the fed
+ // tensors will be correctly initialized and synchronized before it is
+ // accessed by operations executed during the call to Session::RunCallable().
+ //
+ // This is typically ensured by using the TensorFlow memory allocators
+ // (Device::GetAllocator()) to create the Tensor to be fed.
+ //
+ // Alternatively, for CUDA-enabled GPU devices, this typically means that the
+ // operation that produced the contents of the tensor has completed, i.e., the
+ // CUDA stream has been synchronized (e.g., via cuCtxSynchronize() or
+ // cuStreamSynchronize()).
+ map<string, string> feed_devices = 6;
+ map<string, string> fetch_devices = 7;
+
+ // By default, RunCallable() will synchronize the GPU stream before returning
+ // fetched tensors on a GPU device, to ensure that the values in those tensors
+ // have been produced. This simplifies interacting with the tensors, but
+ // potentially incurs a performance hit.
+ //
+ // If this options is set to true, the caller is responsible for ensuring
+ // that the values in the fetched tensors have been produced before they are
+ // used. The caller can do this by invoking `Device::Sync()` on the underlying
+ // device(s), or by feeding the tensors back to the same Session using
+ // `feed_devices` with the same corresponding device name.
+ bool fetch_skip_sync = 8;
+
+ // Next: 9
}
diff --git a/tensorflow/core/protobuf/debug.proto b/tensorflow/core/protobuf/debug.proto
index 499900f965..811cf406b9 100644
--- a/tensorflow/core/protobuf/debug.proto
+++ b/tensorflow/core/protobuf/debug.proto
@@ -7,7 +7,7 @@ option java_multiple_files = true;
option java_package = "org.tensorflow.framework";
option go_package = "github.com/tensorflow/tensorflow/tensorflow/go/core/protobuf";
-// EXPERIMENTAL. Option for watching a node.
+// Option for watching a node in TensorFlow Debugger (tfdbg).
message DebugTensorWatch {
// Name of the node to watch.
string node_name = 1;
@@ -51,7 +51,7 @@ message DebugTensorWatch {
bool tolerate_debug_op_creation_failures = 5;
}
-// EXPERIMENTAL. Options for initializing DebuggerState.
+// Options for initializing DebuggerState in TensorFlow Debugger (tfdbg).
message DebugOptions {
// Debugging options
repeated DebugTensorWatch debug_tensor_watch_opts = 4;
diff --git a/tensorflow/core/protobuf/eager_service.proto b/tensorflow/core/protobuf/eager_service.proto
index 50294b8a42..5b05a1b3ee 100644
--- a/tensorflow/core/protobuf/eager_service.proto
+++ b/tensorflow/core/protobuf/eager_service.proto
@@ -7,6 +7,7 @@ import "tensorflow/core/framework/device_attributes.proto";
import "tensorflow/core/framework/function.proto";
import "tensorflow/core/framework/versions.proto";
import "tensorflow/core/protobuf/tensorflow_server.proto";
+import "tensorflow/core/framework/tensor_shape.proto";
message RemoteTensorHandle {
// The ID of the operation that produced this tensor.
@@ -45,6 +46,10 @@ message QueueItem {
}
}
+message QueueResponse {
+ repeated TensorShapeProto shape = 1;
+}
+
message CreateContextRequest {
// Identifies the full cluster, and this particular worker's position within.
ServerDef server_def = 1;
@@ -84,6 +89,8 @@ message EnqueueRequest {
}
message EnqueueResponse {
+ // A single operation response for every item in the request.
+ repeated QueueResponse queue_response = 1;
}
message WaitQueueDoneRequest {
diff --git a/tensorflow/core/protobuf/tensorflow_server.proto b/tensorflow/core/protobuf/tensorflow_server.proto
index be25804a1b..2bf48d50e1 100644
--- a/tensorflow/core/protobuf/tensorflow_server.proto
+++ b/tensorflow/core/protobuf/tensorflow_server.proto
@@ -46,6 +46,6 @@ message ServerDef {
// The protocol to be used by this server.
//
- // Acceptable values include: "grpc".
+ // Acceptable values include: "grpc", "grpc+verbs".
string protocol = 5;
}
diff --git a/tensorflow/core/util/device_name_utils.cc b/tensorflow/core/util/device_name_utils.cc
index 90c3fed2e8..8c24076aa9 100644
--- a/tensorflow/core/util/device_name_utils.cc
+++ b/tensorflow/core/util/device_name_utils.cc
@@ -184,16 +184,65 @@ bool DeviceNameUtils::ParseFullName(StringPiece fullname, ParsedName* p) {
return true;
}
+namespace {
+
+void CompleteName(const DeviceNameUtils::ParsedName& parsed_basename,
+ DeviceNameUtils::ParsedName* parsed_name) {
+ if (!parsed_name->has_job) {
+ parsed_name->job = parsed_basename.job;
+ parsed_name->has_job = true;
+ }
+ if (!parsed_name->has_replica) {
+ parsed_name->replica = parsed_basename.replica;
+ parsed_name->has_replica = true;
+ }
+ if (!parsed_name->has_task) {
+ parsed_name->task = parsed_basename.task;
+ parsed_name->has_task = true;
+ }
+ if (!parsed_name->has_type) {
+ parsed_name->type = parsed_basename.type;
+ parsed_name->has_type = true;
+ }
+ if (!parsed_name->has_id) {
+ parsed_name->id = parsed_basename.id;
+ parsed_name->has_id = true;
+ }
+}
+
+} // namespace
+
/* static */
-string DeviceNameUtils::CanonicalizeDeviceName(StringPiece fullname) {
+Status DeviceNameUtils::CanonicalizeDeviceName(StringPiece fullname,
+ StringPiece basename,
+ string* canonical_name) {
+ *canonical_name = "";
+ ParsedName parsed_basename;
+ if (!ParseFullName(basename, &parsed_basename)) {
+ return errors::InvalidArgument("Could not parse basename: ", basename,
+ " into a device specification.");
+ }
+ if (!(parsed_basename.has_job && parsed_basename.has_replica &&
+ parsed_basename.has_task && parsed_basename.has_type &&
+ parsed_basename.has_id)) {
+ return errors::InvalidArgument("Basename: ", basename,
+ " should be fully "
+ "specified.");
+ }
ParsedName parsed_name;
if (ParseLocalName(fullname, &parsed_name)) {
- return ParsedNameToString(parsed_name);
+ CompleteName(parsed_basename, &parsed_name);
+ *canonical_name = ParsedNameToString(parsed_name);
+ return Status::OK();
}
if (ParseFullName(fullname, &parsed_name)) {
- return ParsedNameToString(parsed_name);
+ CompleteName(parsed_basename, &parsed_name);
+ *canonical_name = ParsedNameToString(parsed_name);
+ return Status::OK();
}
- return "";
+ return errors::InvalidArgument("Could not parse ", fullname,
+ " into a device "
+ "specification.");
}
/* static */
diff --git a/tensorflow/core/util/device_name_utils.h b/tensorflow/core/util/device_name_utils.h
index 0ae28df997..4071a70836 100644
--- a/tensorflow/core/util/device_name_utils.h
+++ b/tensorflow/core/util/device_name_utils.h
@@ -88,10 +88,14 @@ class DeviceNameUtils {
// Parses "fullname" into "*parsed". Returns true iff succeeds.
static bool ParseFullName(StringPiece fullname, ParsedName* parsed);
- // Canonicalizes "fullname". Accepts both legacy, newer and local versions of
- // the device spec. Returns the newer version of the device spec. If we were
- // unable to interpret / parse "fullname" returns "".
- static string CanonicalizeDeviceName(StringPiece fullname);
+ // Canonicalizes "fullname" into "*canonical_name". Uses a fully specified
+ // basename to fill in fields that are missing. Accepts both legacy, newer
+ // and local versions of the device spec. Returns the newer version of the
+ // device spec. If we were unable to interpret / parse "fullname" returns
+ // an error and *canonical_name is set to "".
+ static Status CanonicalizeDeviceName(StringPiece fullname,
+ StringPiece basename,
+ string* canonical_name);
// Returns true if "name" specifies any non-trivial constraint on the device.
static bool HasSomeDetails(const ParsedName& name) {
diff --git a/tensorflow/core/util/device_name_utils_test.cc b/tensorflow/core/util/device_name_utils_test.cc
index ff9c108f10..dafb3b20b9 100644
--- a/tensorflow/core/util/device_name_utils_test.cc
+++ b/tensorflow/core/util/device_name_utils_test.cc
@@ -467,18 +467,41 @@ TEST(DeviceNameUtilsTest, GetNamesForDeviceMappings) {
}
TEST(DeviceNameUtilsTest, CanonicalizeDeviceName) {
- EXPECT_EQ("/job:foo/replica:10/task:0/device:CPU:1",
- DeviceNameUtils::CanonicalizeDeviceName(
- "/job:foo/replica:10/task:0/device:CPU:1"));
- EXPECT_EQ("/job:foo/replica:10/task:0/device:CPU:1",
- DeviceNameUtils::CanonicalizeDeviceName(
- "/job:foo/task:0/replica:10/device:CPU:1"));
- EXPECT_EQ("/job:foo/replica:10/task:0/device:CPU:1",
- DeviceNameUtils::CanonicalizeDeviceName(
- "/job:foo/task:0/replica:10/cpu:1"));
- EXPECT_EQ("/device:CPU:0", DeviceNameUtils::CanonicalizeDeviceName("CPU:0"));
- EXPECT_EQ("", DeviceNameUtils::CanonicalizeDeviceName(
- "/job:foo/task:0/replica/cpu:1"));
+ string canonical_name;
+ {
+ // Good basename.
+ string basename = "/job:foo/replica:10/task:0/device:CPU:0";
+ TF_EXPECT_OK(DeviceNameUtils::CanonicalizeDeviceName(
+ "/job:foo/replica:10/task:0/device:CPU:1", basename, &canonical_name));
+ EXPECT_EQ("/job:foo/replica:10/task:0/device:CPU:1", canonical_name);
+ TF_EXPECT_OK(DeviceNameUtils::CanonicalizeDeviceName(
+ "/job:foo/task:0/replica:10/device:CPU:1", basename, &canonical_name));
+ EXPECT_EQ("/job:foo/replica:10/task:0/device:CPU:1", canonical_name);
+ TF_EXPECT_OK(DeviceNameUtils::CanonicalizeDeviceName(
+ "/job:foo/task:0/replica:10/cpu:1", basename, &canonical_name));
+ EXPECT_EQ("/job:foo/replica:10/task:0/device:CPU:1", canonical_name);
+ TF_EXPECT_OK(DeviceNameUtils::CanonicalizeDeviceName("CPU:0", basename,
+ &canonical_name));
+ EXPECT_EQ("/job:foo/replica:10/task:0/device:CPU:0", canonical_name);
+ Status s = DeviceNameUtils::CanonicalizeDeviceName(
+ "/job:foo/task:0/replica/cpu:1", basename, &canonical_name);
+ EXPECT_EQ(s.code(), error::INVALID_ARGUMENT);
+ EXPECT_EQ("", canonical_name);
+ }
+
+ {
+ // Try out malformed basenames.
+ string fullname = "/device:CPU:0";
+
+ Status s = DeviceNameUtils::CanonicalizeDeviceName(
+ fullname, "/device:CPU:0", &canonical_name);
+ EXPECT_EQ(s.code(), error::INVALID_ARGUMENT);
+ EXPECT_EQ("", canonical_name);
+ s = DeviceNameUtils::CanonicalizeDeviceName(
+ fullname, "/job:foo/task:0/replica/cpu:1", &canonical_name);
+ EXPECT_EQ(s.code(), error::INVALID_ARGUMENT);
+ EXPECT_EQ("", canonical_name);
+ }
}
static void BM_ParseFullName(int iters) {
diff --git a/tensorflow/core/util/mkl_util.h b/tensorflow/core/util/mkl_util.h
index 96944f27cd..bb447e0393 100644
--- a/tensorflow/core/util/mkl_util.h
+++ b/tensorflow/core/util/mkl_util.h
@@ -1487,6 +1487,8 @@ inline memory::desc CreateBlockedMemDescHelper(const memory::dims& dim,
return memory::desc(md);
}
+template <typename T>
+inline primitive FindOrCreateReorder(const memory* from, const memory* to);
/*
* Class to represent all the resources corresponding to a tensor in TensorFlow
* that are required to execute an operation (such as Convolution).
@@ -1733,6 +1735,24 @@ class MklDnnData {
return false;
}
+ /// TODO: this is a faster path with reorder primitive cache compared with
+ /// CheckReorderToOpMem(..., std::vector<primitive>* net), will remove
+ /// slow path in the future
+ inline bool CheckReorderToOpMem(const memory::primitive_desc& op_pd) {
+ CHECK_NOTNULL(user_memory_);
+ if (IsReorderNeeded(op_pd)) {
+ // TODO(nhasabni): can we remove dynamic memory allocation?
+ // primitive reuse don't allow two same reorder prim in
+ // one stream, so submit it immediately
+ reorder_memory_ = new memory(op_pd);
+ std::vector<primitive> net;
+ net.push_back(FindOrCreateReorder<T>(user_memory_, reorder_memory_));
+ stream(stream::kind::eager).submit(net).wait();
+ return true;
+ }
+ return false;
+ }
+
/// Overloaded version of above function that accepts memory buffer
/// where output of reorder needs to be stored.
///
@@ -1758,6 +1778,26 @@ class MklDnnData {
return false;
}
+ /// TODO: this is a faster path with reorder primitive cache compared with
+ /// CheckReorderToOpMem(..., std::vector<primitive>* net), will remove
+ /// slow path in the future
+ inline bool CheckReorderToOpMem(const memory::primitive_desc& op_pd,
+ void* reorder_data_handle) {
+ CHECK_NOTNULL(reorder_data_handle);
+ CHECK_NOTNULL(user_memory_);
+ if (IsReorderNeeded(op_pd)) {
+ // TODO(nhasabni): can we remove dynamic memory allocation?
+ // primitive reuse don't allow two same reorder prim in
+ // one stream, so submit it immediately
+ std::vector<primitive> net;
+ reorder_memory_ = new memory(op_pd, reorder_data_handle);
+ net.push_back(FindOrCreateReorder<T>(user_memory_, reorder_memory_));
+ stream(stream::kind::eager).submit(net).wait();
+ return true;
+ }
+ return false;
+ }
+
/// Another overloaded version of CheckReorderToOpMem that accepts Tensor
/// where output of reorder needs to be stored.
///
@@ -1776,6 +1816,15 @@ class MklDnnData {
return CheckReorderToOpMem(op_pd, GetTensorBuffer(reorder_tensor), net);
}
+ /// TODO: this is a faster path with reorder primitive cache compared with
+ /// CheckReorderToOpMem(..., std::vector<primitive>* net), will remove
+ /// slow path in the future
+ inline bool CheckReorderToOpMem(const memory::primitive_desc& op_pd,
+ Tensor* reorder_tensor) {
+ CHECK_NOTNULL(reorder_tensor);
+ return CheckReorderToOpMem(op_pd, GetTensorBuffer(reorder_tensor));
+ }
+
/// Function to handle output reorder
///
/// This function performs very similar functionality as input reordering
@@ -1812,6 +1861,20 @@ class MklDnnData {
CHECK_NOTNULL(reorder_memory_);
net->push_back(CreateReorder(reorder_memory_, user_memory_));
}
+
+ /// TODO: this is a faster path with reorder primitive cache compared with
+ /// InsertReorderToUserMem(std::vector<primitive>* net), will remove
+ /// slow path in the future
+ inline void InsertReorderToUserMem() {
+ CHECK_NOTNULL(user_memory_);
+ CHECK_NOTNULL(reorder_memory_);
+ // primitive reuse don't allow two same reorder prim in
+ // one stream, so submit it immediately
+ std::vector<primitive> net;
+ net.push_back(FindOrCreateReorder<T>(reorder_memory_, user_memory_));
+ stream(stream::kind::eager).submit(net).wait();
+ }
+
};
/// Base class for operations with reuse of primitives
@@ -1851,7 +1914,7 @@ class MklPrimitiveFactory {
}
private:
- static inline std::unordered_map<std::string, MklPrimitive*> &GetHashMap() {
+ static inline std::unordered_map<std::string, MklPrimitive*>& GetHashMap() {
static thread_local std::unordered_map<std::string, MklPrimitive*> map_;
return map_;
}
@@ -1894,6 +1957,109 @@ class FactoryKeyCreator {
}
};
+class MklReorderPrimitive : public MklPrimitive {
+ public:
+ explicit MklReorderPrimitive(const memory* from, const memory* to) {
+ Setup(from, to);
+ }
+ ~MklReorderPrimitive() {}
+
+ std::shared_ptr<primitive> GetPrimitive() {
+ return context_.reorder_prim;
+ }
+
+ void SetMemory(const memory* from, const memory* to) {
+ context_.src_mem->set_data_handle(from->get_data_handle());
+ context_.dst_mem->set_data_handle(to->get_data_handle());
+ }
+
+ private:
+ struct ReorderContext {
+ std::shared_ptr<mkldnn::memory> src_mem;
+ std::shared_ptr<mkldnn::memory> dst_mem;
+ std::shared_ptr<primitive> reorder_prim;
+ ReorderContext():
+ src_mem(nullptr), dst_mem(nullptr), reorder_prim(nullptr) {
+ }
+ } context_;
+
+ engine cpu_engine_ = engine(engine::cpu, 0);
+
+ void Setup(const memory* from, const memory* to) {
+ context_.src_mem.reset(new memory(
+ {from->get_primitive_desc().desc(), cpu_engine_}, DummyData));
+ context_.dst_mem.reset(new memory(
+ {to->get_primitive_desc().desc(), cpu_engine_}, DummyData));
+ context_.reorder_prim = std::make_shared<mkldnn::reorder>(
+ reorder(*context_.src_mem, *context_.dst_mem));
+ }
+};
+
+template <typename T>
+class MklReorderPrimitiveFactory : public MklPrimitiveFactory<T> {
+ public:
+ static MklReorderPrimitive* Get(const memory* from,
+ const memory* to) {
+ auto reorderPrim = static_cast<MklReorderPrimitive*>(
+ MklReorderPrimitiveFactory<T>::GetInstance().GetReorder(from, to));
+ if (reorderPrim == nullptr) {
+ reorderPrim = new MklReorderPrimitive(from, to);
+ MklReorderPrimitiveFactory<T>::GetInstance().SetReorder(
+ from, to, reorderPrim);
+ }
+ reorderPrim->SetMemory(from, to);
+ return reorderPrim;
+ }
+
+ static MklReorderPrimitiveFactory & GetInstance() {
+ static MklReorderPrimitiveFactory instance_;
+ return instance_;
+ }
+
+ private:
+ MklReorderPrimitiveFactory() {};
+ ~MklReorderPrimitiveFactory() {};
+
+ static std::string CreateKey(const memory* from, const memory* to) {
+ std::string prefix = "reorder";
+ FactoryKeyCreator key_creator;
+ auto const &from_desc = from->get_primitive_desc().desc().data;
+ auto const &to_desc = to->get_primitive_desc().desc().data;
+ memory::dims from_dims(from_desc.dims, &from_desc.dims[from_desc.ndims]);
+ memory::dims to_dims(to_desc.dims, &to_desc.dims[to_desc.ndims]);
+ key_creator.AddAsKey(prefix);
+ key_creator.AddAsKey(static_cast<int>(from_desc.format));
+ key_creator.AddAsKey(static_cast<int>(from_desc.data_type));
+ key_creator.AddAsKey(from_dims);
+ key_creator.AddAsKey(static_cast<int>(to_desc.format));
+ key_creator.AddAsKey(static_cast<int>(to_desc.data_type));
+ key_creator.AddAsKey(to_dims);
+ return key_creator.GetKey();
+ }
+
+ MklPrimitive* GetReorder(const memory* from, const memory* to) {
+ std::string key = CreateKey(from, to);
+ return this->GetOp(key);
+ }
+
+ void SetReorder(const memory* from, const memory* to, MklPrimitive* op) {
+ std::string key = CreateKey(from, to);
+ this->SetOp(key, op);
+ }
+};
+
+ /// Fuction to find(or create) a reorder from memory pointed by from to memory pointed
+ /// by to, it will created primitive or get primitive from pool if it is cached.
+ /// Returns the primitive.
+ template <typename T>
+ inline primitive FindOrCreateReorder(const memory* from, const memory* to) {
+ CHECK_NOTNULL(from);
+ CHECK_NOTNULL(to);
+ MklReorderPrimitive *reorder_prim =
+ MklReorderPrimitiveFactory<T>::Get(from, to);
+ return *reorder_prim->GetPrimitive();
+ }
+
#endif // INTEL_MKL_DNN
} // namespace tensorflow
diff --git a/tensorflow/core/util/saved_tensor_slice_util.h b/tensorflow/core/util/saved_tensor_slice_util.h
index ee43945a39..90672a10a8 100644
--- a/tensorflow/core/util/saved_tensor_slice_util.h
+++ b/tensorflow/core/util/saved_tensor_slice_util.h
@@ -123,6 +123,7 @@ TENSOR_PROTO_EXTRACT_TYPE(int8, int, int32);
TENSOR_PROTO_EXTRACT_TYPE(int16, int, int32);
TENSOR_PROTO_EXTRACT_TYPE(qint8, int, int32);
TENSOR_PROTO_EXTRACT_TYPE(quint8, int, int32);
+TENSOR_PROTO_EXTRACT_TYPE(quint16, int, int32);
#undef TENSOR_PROTO_EXTRACT_TYPE_COMPLEX
#undef TENSOR_PROTO_EXTRACT_TYPE_HELPER
diff --git a/tensorflow/core/util/sparse/dim_comparator.h b/tensorflow/core/util/sparse/dim_comparator.h
index b773b33008..0782e7e1a8 100644
--- a/tensorflow/core/util/sparse/dim_comparator.h
+++ b/tensorflow/core/util/sparse/dim_comparator.h
@@ -13,8 +13,8 @@ See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
-#ifndef TENSORFLOW_UTIL_SPARSE_DIM_COMPARATOR_H_
-#define TENSORFLOW_UTIL_SPARSE_DIM_COMPARATOR_H_
+#ifndef TENSORFLOW_CORE_UTIL_SPARSE_DIM_COMPARATOR_H_
+#define TENSORFLOW_CORE_UTIL_SPARSE_DIM_COMPARATOR_H_
#include "third_party/eigen3/unsupported/Eigen/CXX11/Tensor"
#include "tensorflow/core/kernels/bounds_check.h"
@@ -49,11 +49,11 @@ class DimComparator {
DimComparator(const TTypes<int64>::Matrix& ix, const VarDimArray& order,
const VarDimArray& shape)
: ix_(ix), order_(order), dims_(shape.size()) {
- CHECK_GT(order.size(), size_t{0}) << "Must order using at least one index";
- CHECK_LE(order.size(), shape.size()) << "Can only sort up to dims";
+ DCHECK_GT(order.size(), size_t{0}) << "Must order using at least one index";
+ DCHECK_LE(order.size(), shape.size()) << "Can only sort up to dims";
for (size_t d = 0; d < order.size(); ++d) {
- CHECK_GE(order[d], 0);
- CHECK_LT(order[d], shape.size());
+ DCHECK_GE(order[d], 0);
+ DCHECK_LT(order[d], shape.size());
}
}
@@ -97,7 +97,7 @@ class FixedDimComparator : DimComparator {
FixedDimComparator(const TTypes<int64>::Matrix& ix, const VarDimArray& order,
const VarDimArray& shape)
: DimComparator(ix, order, shape) {
- CHECK_EQ(order.size(), ORDER_DIM);
+ DCHECK_EQ(order.size(), ORDER_DIM);
}
inline bool operator()(const int64 i, const int64 j) const {
bool value = false;
@@ -116,4 +116,4 @@ class FixedDimComparator : DimComparator {
} // namespace sparse
} // namespace tensorflow
-#endif // TENSORFLOW_UTIL_SPARSE_DIM_COMPARATOR_H_
+#endif // TENSORFLOW_CORE_UTIL_SPARSE_DIM_COMPARATOR_H_
diff --git a/tensorflow/core/util/sparse/group_iterator.h b/tensorflow/core/util/sparse/group_iterator.h
index fb70318078..3fa8cb6116 100644
--- a/tensorflow/core/util/sparse/group_iterator.h
+++ b/tensorflow/core/util/sparse/group_iterator.h
@@ -13,8 +13,8 @@ See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
-#ifndef TENSORFLOW_UTIL_SPARSE_GROUP_ITERATOR_H_
-#define TENSORFLOW_UTIL_SPARSE_GROUP_ITERATOR_H_
+#ifndef TENSORFLOW_CORE_UTIL_SPARSE_GROUP_ITERATOR_H_
+#define TENSORFLOW_CORE_UTIL_SPARSE_GROUP_ITERATOR_H_
#include <vector>
#include "third_party/eigen3/unsupported/Eigen/CXX11/Tensor"
@@ -143,4 +143,4 @@ typename TTypes<T>::UnalignedVec Group::values() const {
} // namespace sparse
} // namespace tensorflow
-#endif // TENSORFLOW_UTIL_SPARSE_GROUP_ITERATOR_H_
+#endif // TENSORFLOW_CORE_UTIL_SPARSE_GROUP_ITERATOR_H_
diff --git a/tensorflow/core/util/sparse/sparse_tensor.h b/tensorflow/core/util/sparse/sparse_tensor.h
index 258ee418c1..0f04b65f60 100644
--- a/tensorflow/core/util/sparse/sparse_tensor.h
+++ b/tensorflow/core/util/sparse/sparse_tensor.h
@@ -13,8 +13,8 @@ See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
-#ifndef TENSORFLOW_UTIL_SPARSE_SPARSE_TENSOR_H_
-#define TENSORFLOW_UTIL_SPARSE_SPARSE_TENSOR_H_
+#ifndef TENSORFLOW_CORE_UTIL_SPARSE_SPARSE_TENSOR_H_
+#define TENSORFLOW_CORE_UTIL_SPARSE_SPARSE_TENSOR_H_
#include <limits>
#include <numeric>
@@ -26,8 +26,10 @@ limitations under the License.
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/kernels/bounds_check.h"
+#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/strings/str_util.h"
+#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/util/sparse/dim_comparator.h"
@@ -41,32 +43,88 @@ class SparseTensor {
typedef typename gtl::ArraySlice<int64> VarDimArray;
typedef typename gtl::InlinedVector<int64, 8> ShapeArray;
+ static Status Create(Tensor ix, Tensor vals, const VarDimArray shape,
+ const VarDimArray order, SparseTensor* result) {
+ if (ix.dtype() != DT_INT64) {
+ return Status(
+ error::INVALID_ARGUMENT,
+ strings::StrCat("indices must be type int64 but got: ", ix.dtype()));
+ }
+ if (!TensorShapeUtils::IsVector(vals.shape())) {
+ return Status(error::INVALID_ARGUMENT,
+ strings::StrCat("vals must be a vec, but got: ",
+ vals.shape().DebugString()));
+ }
+ if (ix.shape().dim_size(0) != vals.shape().dim_size(0)) {
+ return Status(error::INVALID_ARGUMENT,
+ strings::StrCat("indices and values rows (indexing "
+ "dimension) must match. (indices = ",
+ ix.shape().dim_size(0), ", values = ",
+ vals.shape().dim_size(0), ")"));
+ }
+ int dims;
+ TF_RETURN_IF_ERROR(GetDimsFromIx(ix, &dims));
+ if (order.size() != dims) {
+ return Status(error::INVALID_ARGUMENT,
+ "Order length must be SparseTensor rank.");
+ }
+ if (shape.size() != dims) {
+ return Status(error::INVALID_ARGUMENT,
+ "Shape rank must be SparseTensor rank.");
+ }
+
+ *result = SparseTensor(ix, vals, shape, order);
+ return Status();
+ }
+
+ static Status Create(Tensor ix, Tensor vals, const TensorShape& shape,
+ SparseTensor* result) {
+ return Create(ix, vals, TensorShapeToVector(shape),
+ UndefinedOrder(TensorShapeToVector(shape)), result);
+ }
+
+ static Status Create(Tensor ix, Tensor vals, const VarDimArray shape,
+ SparseTensor* result) {
+ return Create(ix, vals, shape, UndefinedOrder(shape), result);
+ }
+
+ static Status Create(Tensor ix, Tensor vals, const TensorShape& shape,
+ const VarDimArray order, SparseTensor* result) {
+ return Create(ix, vals, TensorShapeToVector(shape), order, result);
+ }
+
+ SparseTensor() : dims_(0) {}
+
+ // DEPRECATED: use Create() functions instead of constructors directly.
SparseTensor(Tensor ix, Tensor vals, const TensorShape& shape)
: SparseTensor(ix, vals, TensorShapeToVector(shape),
UndefinedOrder(TensorShapeToVector(shape))) {}
+ // DEPRECATED: use Create() functions instead of constructors directly.
SparseTensor(Tensor ix, Tensor vals, const VarDimArray shape)
: SparseTensor(ix, vals, shape, UndefinedOrder(shape)) {}
+ // DEPRECATED: use Create() functions instead of constructors directly.
SparseTensor(Tensor ix, Tensor vals, const TensorShape& shape,
const VarDimArray order)
: SparseTensor(ix, vals, TensorShapeToVector(shape), order) {}
+ // DEPRECATED: use Create() functions instead of constructors directly.
SparseTensor(Tensor ix, Tensor vals, const VarDimArray shape,
const VarDimArray order)
: ix_(ix),
vals_(vals),
shape_(shape.begin(), shape.end()),
order_(order.begin(), order.end()),
- dims_(GetDimsFromIx(ix)) {
- CHECK_EQ(ix.dtype(), DT_INT64)
+ dims_(UnsafeGetDimsFromIx(ix)) {
+ DCHECK_EQ(ix.dtype(), DT_INT64)
<< "indices must be type int64 but got: " << ix.dtype();
- CHECK(TensorShapeUtils::IsVector(vals.shape()))
+ DCHECK(TensorShapeUtils::IsVector(vals.shape()))
<< "vals must be a vec, but got: " << vals.shape().DebugString();
- CHECK_EQ(ix.shape().dim_size(0), vals.shape().dim_size(0))
+ DCHECK_EQ(ix.shape().dim_size(0), vals.shape().dim_size(0))
<< "indices and values rows (indexing dimension) must match.";
- CHECK_EQ(order.size(), dims_) << "Order length must be SparseTensor rank.";
- CHECK_EQ(shape.size(), dims_) << "Shape rank must be SparseTensor rank.";
+ DCHECK_EQ(order.size(), dims_) << "Order length must be SparseTensor rank.";
+ DCHECK_EQ(shape.size(), dims_) << "Shape rank must be SparseTensor rank.";
}
SparseTensor(const SparseTensor& other)
@@ -81,6 +139,16 @@ class SparseTensor {
vals_ = other.vals_;
shape_ = other.shape_;
order_ = other.order_;
+ dims_ = other.dims_;
+ return *this;
+ }
+
+ SparseTensor& operator=(SparseTensor&& other) {
+ ix_ = std::move(other.ix_);
+ vals_ = std::move(other.vals_);
+ shape_ = std::move(other.shape_);
+ order_ = std::move(other.order_);
+ dims_ = std::move(other.dims_);
return *this;
}
@@ -126,11 +194,11 @@ class SparseTensor {
//
// See the README.md in this directory for more usage information.
GroupIterable group(const VarDimArray& group_ix) const {
- CHECK_LE(group_ix.size(), dims_);
+ DCHECK_LE(group_ix.size(), dims_);
for (std::size_t di = 0; di < group_ix.size(); ++di) {
- CHECK_GE(group_ix[di], 0) << "Group dimension out of range";
- CHECK_LT(group_ix[di], dims_) << "Group dimension out of range";
- CHECK_EQ(group_ix[di], order_[di])
+ DCHECK_GE(group_ix[di], 0) << "Group dimension out of range";
+ DCHECK_LT(group_ix[di], dims_) << "Group dimension out of range";
+ DCHECK_EQ(group_ix[di], order_[di])
<< "Group dimension does not match sorted order";
}
return GroupIterable(ix_, vals_, dims_, group_ix);
@@ -166,9 +234,16 @@ class SparseTensor {
// isn't an integer multiple of split_dim, we add one extra dimension for
// each slice.
template <typename T>
+ static Status Split(const SparseTensor& tensor, const int split_dim,
+ const int num_split, std::vector<SparseTensor>* result);
+
+ // DEPRECATED: use the form of Split() that takes an output pointer and
+ // returns a status instead.
+ template <typename T>
static std::vector<SparseTensor> Split(const SparseTensor& tensor,
const int split_dim,
- const int num_split);
+ const int num_split,
+ Status* status = nullptr);
// Slice() will slice the input SparseTensor into a SparseTensor based on
// specified start and size. Both start and size are 1-D array with each
@@ -189,9 +264,18 @@ class SparseTensor {
}
private:
- static int GetDimsFromIx(const Tensor& ix) {
- CHECK(TensorShapeUtils::IsMatrix(ix.shape()))
- << "indices must be a matrix, but got: " << ix.shape().DebugString();
+ static Status GetDimsFromIx(const Tensor& ix, int* result) {
+ if (!TensorShapeUtils::IsMatrix(ix.shape())) {
+ return Status(error::INVALID_ARGUMENT,
+ strings::StrCat("indices must be a matrix, but got: ",
+ ix.shape().DebugString()));
+ }
+ *result = UnsafeGetDimsFromIx(ix);
+ return Status();
+ }
+
+ static int UnsafeGetDimsFromIx(const Tensor& ix) {
+ DCHECK(TensorShapeUtils::IsMatrix(ix.shape()));
return ix.dim_size(1);
}
@@ -251,8 +335,8 @@ class SparseTensor {
// Helper for Split() that returns the slice index.
static inline int GetSliceIndex(const int dim, const int split_size,
const int residual) {
- CHECK_GT(split_size, 0);
- CHECK_GE(dim, 0);
+ DCHECK_GT(split_size, 0);
+ DCHECK_GE(dim, 0);
if (residual == 0) return dim / split_size;
const int offset = residual * (split_size + 1);
if (dim < offset) {
@@ -265,8 +349,8 @@ class SparseTensor {
// Helper for Split() that returns the dimension in the slice.
static inline int GetDimensionInSlice(const int dim, const int split_size,
const int residual) {
- CHECK_GT(split_size, 0);
- CHECK_GE(dim, 0);
+ DCHECK_GT(split_size, 0);
+ DCHECK_GE(dim, 0);
if (residual == 0) return dim % split_size;
const int offset = residual * (split_size + 1);
if (dim < offset) {
@@ -279,8 +363,8 @@ class SparseTensor {
// Helper for Split() that returns the shape given a slice index.
static inline int GetSliceShape(const int slice_index, const int split_size,
const int residual) {
- CHECK_GT(split_size, 0);
- CHECK_GE(slice_index, 0);
+ DCHECK_GT(split_size, 0);
+ DCHECK_GE(slice_index, 0);
if (residual == 0) return split_size;
if (slice_index < residual) {
return split_size + 1;
@@ -293,7 +377,7 @@ class SparseTensor {
Tensor vals_;
ShapeArray shape_;
ShapeArray order_;
- const int dims_;
+ int dims_;
};
// This operation updates the indices and values Tensor rows, so it is
@@ -301,9 +385,9 @@ class SparseTensor {
// temporary space.
template <typename T>
void SparseTensor::Reorder(const VarDimArray& order) {
- CHECK_EQ(DataTypeToEnum<T>::v(), dtype())
+ DCHECK_EQ(DataTypeToEnum<T>::v(), dtype())
<< "Reorder requested with the wrong datatype";
- CHECK_EQ(order.size(), dims_) << "Order length must be SparseTensor rank";
+ DCHECK_EQ(order.size(), dims_) << "Order length must be SparseTensor rank";
auto ix_t = ix_.matrix<int64>();
auto vals_t = vals_.vec<T>();
@@ -360,13 +444,13 @@ void SparseTensor::Reorder(const VarDimArray& order) {
template <typename T>
bool SparseTensor::ValidateAndInitializeToDense(Tensor* out, bool initialize) {
- CHECK_EQ(DataTypeToEnum<T>::v(), dtype())
+ DCHECK_EQ(DataTypeToEnum<T>::v(), dtype())
<< "ToDense requested with the wrong datatype";
- CHECK_EQ(out->shape().dims(), dims_)
+ DCHECK_EQ(out->shape().dims(), dims_)
<< "Incompatible dimensions between SparseTensor and output";
- CHECK_EQ(out->dtype(), DataTypeToEnum<T>::v())
+ DCHECK_EQ(out->dtype(), DataTypeToEnum<T>::v())
<< "Output must be type: " << DataTypeToEnum<T>::v()
<< " but got: " << out->dtype();
@@ -422,9 +506,9 @@ bool SparseTensor::ToDense(Tensor* out, bool initialize) {
template <typename T>
SparseTensor SparseTensor::Concat(
const gtl::ArraySlice<SparseTensor>& tensors) {
- CHECK_GE(tensors.size(), size_t{1}) << "Cannot concat 0 SparseTensors";
+ DCHECK_GE(tensors.size(), size_t{1}) << "Cannot concat 0 SparseTensors";
const int dims = tensors[0].dims_;
- CHECK_GE(dims, 1) << "Cannot concat 0-dimensional SparseTensors";
+ DCHECK_GE(dims, 1) << "Cannot concat 0-dimensional SparseTensors";
auto order_0 = tensors[0].order();
const int primary_dim = order_0[0];
ShapeArray final_order(order_0.begin(), order_0.end());
@@ -434,17 +518,17 @@ SparseTensor SparseTensor::Concat(
bool fully_ordered = true;
for (const SparseTensor& st : tensors) {
- CHECK_EQ(st.dims_, dims) << "All SparseTensors must have the same rank.";
- CHECK_EQ(DataTypeToEnum<T>::v(), st.dtype())
+ DCHECK_EQ(st.dims_, dims) << "All SparseTensors must have the same rank.";
+ DCHECK_EQ(DataTypeToEnum<T>::v(), st.dtype())
<< "Concat requested with the wrong data type";
- CHECK_GE(st.order()[0], 0) << "SparseTensor must be ordered";
- CHECK_EQ(st.order()[0], primary_dim)
+ DCHECK_GE(st.order()[0], 0) << "SparseTensor must be ordered";
+ DCHECK_EQ(st.order()[0], primary_dim)
<< "All SparseTensors' order[0] must match. This is the concat dim.";
if (st.order() != final_order) fully_ordered = false;
const VarDimArray& st_shape = st.shape();
for (int d = 0; d < dims - 1; ++d) {
const int cdim = (d < primary_dim) ? d : d + 1;
- CHECK_EQ(final_shape[cdim], st_shape[cdim])
+ DCHECK_EQ(final_shape[cdim], st_shape[cdim])
<< "All SparseTensors' shapes must match except on the concat dim. "
<< "Concat dim: " << primary_dim
<< ", mismatched shape at dim: " << cdim
@@ -494,7 +578,8 @@ SparseTensor SparseTensor::Concat(
template <typename T>
std::vector<SparseTensor> SparseTensor::Split(const SparseTensor& input_tensor,
const int split_dim,
- const int num_split) {
+ const int num_split,
+ Status* status /* = nullptr */) {
std::vector<Tensor> output_indices;
std::vector<Tensor> output_values;
std::vector<TensorShape> output_shapes;
@@ -514,12 +599,18 @@ std::vector<SparseTensor> SparseTensor::Split(const SparseTensor& input_tensor,
const int split_dim_size = input_tensor.shape()[split_dim];
const int split_size = split_dim_size / num_split;
- CHECK(num_split > 0 && num_split <= split_dim_size) << "num_split must be in "
- "the interval (0, "
- << split_dim_size << "]";
- CHECK(split_dim >= 0 && split_dim < num_dim) << "num_dim must be in "
- "the interval [0, "
- << num_dim << ")";
+ if (!(num_split > 0 && num_split <= split_dim_size) && status != nullptr) {
+ *status = Status(error::INVALID_ARGUMENT,
+ strings::StrCat("num_split must be in the interval (0, ",
+ split_dim_size, "]"));
+ return {};
+ }
+ if (!(split_dim >= 0 && split_dim < num_dim)) {
+ *status = Status(
+ error::INVALID_ARGUMENT,
+ strings::StrCat("num_dim must be in the interval [0, ", num_dim, ")"));
+ return {};
+ }
const int residual = split_dim_size % num_split;
for (int i = 0; i < input_tensor.indices().dim_size(0); ++i) {
@@ -559,13 +650,28 @@ std::vector<SparseTensor> SparseTensor::Split(const SparseTensor& input_tensor,
std::vector<SparseTensor> output_tensors;
output_tensors.reserve(num_split);
for (int i = 0; i < num_split; ++i) {
- output_tensors.emplace_back(output_indices[i], output_values[i],
- output_shapes[i]);
+ SparseTensor tensor;
+ Status create_status =
+ Create(output_indices[i], output_values[i], output_shapes[i], &tensor);
+ if (!create_status.ok() && status != nullptr) {
+ *status = create_status;
+ return {};
+ }
+ output_tensors.push_back(std::move(tensor));
}
return output_tensors;
}
template <typename T>
+Status SparseTensor::Split(const SparseTensor& input_tensor,
+ const int split_dim, const int num_split,
+ std::vector<SparseTensor>* result) {
+ Status status;
+ *result = Split<T>(input_tensor, split_dim, num_split, &status);
+ return status;
+}
+
+template <typename T>
SparseTensor SparseTensor::Slice(const SparseTensor& input_tensor,
const gtl::ArraySlice<int64>& start,
const gtl::ArraySlice<int64>& size) {
@@ -643,4 +749,4 @@ SparseTensor SparseTensor::Slice(const SparseTensor& input_tensor,
} // namespace sparse
} // namespace tensorflow
-#endif // TENSORFLOW_UTIL_SPARSE_SPARSE_TENSOR_H_
+#endif // TENSORFLOW_CORE_UTIL_SPARSE_SPARSE_TENSOR_H_
diff --git a/tensorflow/core/util/sparse/sparse_tensor_test.cc b/tensorflow/core/util/sparse/sparse_tensor_test.cc
index 85de032085..5578e42625 100644
--- a/tensorflow/core/util/sparse/sparse_tensor_test.cc
+++ b/tensorflow/core/util/sparse/sparse_tensor_test.cc
@@ -94,9 +94,12 @@ TEST(SparseTensorTest, SparseTensorInvalidIndicesType) {
const int NDIM = 3;
Tensor ix(DT_INT32, TensorShape({N, NDIM}));
Tensor vals(DT_STRING, TensorShape({N}));
+ SparseTensor result;
- EXPECT_DEATH(SparseTensor(ix, vals, TensorShape({10, 10, 10}), {0, 1, 2}),
- "indices must be type int64");
+ EXPECT_EQ(SparseTensor::Create(ix, vals, TensorShape({10, 10, 10}), {0, 1, 2},
+ &result)
+ .code(),
+ error::INVALID_ARGUMENT);
}
TEST(SparseTensorTest, SparseTensorInvalidIndicesShape) {
@@ -104,9 +107,12 @@ TEST(SparseTensorTest, SparseTensorInvalidIndicesShape) {
const int NDIM = 3;
Tensor ix(DT_INT64, TensorShape({N, NDIM, 1}));
Tensor vals(DT_STRING, TensorShape({N}));
+ SparseTensor result;
- EXPECT_DEATH(SparseTensor(ix, vals, TensorShape({10, 10, 10}), {0, 1, 2}),
- "indices must be a matrix");
+ EXPECT_EQ(SparseTensor::Create(ix, vals, TensorShape({10, 10, 10}), {0, 1, 2},
+ &result)
+ .code(),
+ error::INVALID_ARGUMENT);
}
TEST(SparseTensorTest, SparseTensorInvalidValues) {
@@ -114,9 +120,12 @@ TEST(SparseTensorTest, SparseTensorInvalidValues) {
const int NDIM = 3;
Tensor ix(DT_INT64, TensorShape({N, NDIM}));
Tensor vals(DT_STRING, TensorShape({N, 1}));
+ SparseTensor result;
- EXPECT_DEATH(SparseTensor(ix, vals, TensorShape({10, 10, 10}), {0, 1, 2}),
- "vals must be a vec");
+ EXPECT_EQ(SparseTensor::Create(ix, vals, TensorShape({10, 10, 10}), {0, 1, 2},
+ &result)
+ .code(),
+ error::INVALID_ARGUMENT);
}
TEST(SparseTensorTest, SparseTensorInvalidN) {
@@ -124,9 +133,12 @@ TEST(SparseTensorTest, SparseTensorInvalidN) {
const int NDIM = 3;
Tensor ix(DT_INT64, TensorShape({N, NDIM}));
Tensor vals(DT_STRING, TensorShape({N - 1}));
+ SparseTensor result;
- EXPECT_DEATH(SparseTensor(ix, vals, TensorShape({10, 10, 10}), {0, 1, 2}),
- "indices and values rows .* must match");
+ EXPECT_EQ(SparseTensor::Create(ix, vals, TensorShape({10, 10, 10}), {0, 1, 2},
+ &result)
+ .code(),
+ error::INVALID_ARGUMENT);
}
TEST(SparseTensorTest, SparseTensorInvalidOrder) {
@@ -134,18 +146,24 @@ TEST(SparseTensorTest, SparseTensorInvalidOrder) {
const int NDIM = 3;
Tensor ix(DT_INT64, TensorShape({N, NDIM}));
Tensor vals(DT_STRING, TensorShape({N}));
+ SparseTensor result;
- EXPECT_DEATH(SparseTensor(ix, vals, TensorShape({10, 10, 10}), {0, 1}),
- "Order length must be SparseTensor rank");
+ EXPECT_EQ(
+ SparseTensor::Create(ix, vals, TensorShape({10, 10, 10}), {0, 1}, &result)
+ .code(),
+ error::INVALID_ARGUMENT);
}
TEST(SparseTensorTest, SparseTensorInvalidShape) {
int N = 5;
const int NDIM = 3;
Tensor ix(DT_INT64, TensorShape({N, NDIM}));
Tensor vals(DT_STRING, TensorShape({N}));
+ SparseTensor result;
- EXPECT_DEATH(SparseTensor(ix, vals, TensorShape({10, 10}), {0, 1, 2}),
- "Shape rank must be SparseTensor rank");
+ EXPECT_EQ(
+ SparseTensor::Create(ix, vals, TensorShape({10, 10}), {0, 1, 2}, &result)
+ .code(),
+ error::INVALID_ARGUMENT);
}
TEST(SparseTensorTest, SparseTensorConstruction) {
@@ -169,7 +187,8 @@ TEST(SparseTensorTest, SparseTensorConstruction) {
TensorShape shape({10, 10, 10});
std::vector<int64> order{0, 1, 2};
- SparseTensor st(ix, vals, shape, order);
+ SparseTensor st;
+ TF_ASSERT_OK(SparseTensor::Create(ix, vals, shape, order, &st));
Status st_indices_valid = st.IndicesValid();
EXPECT_FALSE(st_indices_valid.ok());
EXPECT_EQ("indices[2] = [2,0,0] is out of order",
@@ -210,7 +229,8 @@ TEST(SparseTensorTest, EmptySparseTensorAllowed) {
std::vector<int64> shape{10, 10, 10};
std::vector<int64> order{0, 1, 2};
- SparseTensor st(ix, vals, shape, order);
+ SparseTensor st;
+ TF_ASSERT_OK(SparseTensor::Create(ix, vals, shape, order, &st));
TF_EXPECT_OK(st.IndicesValid());
EXPECT_EQ(st.order(), order);
@@ -227,7 +247,8 @@ TEST(SparseTensorTest, SortingWorksCorrectly) {
Tensor ix(DT_INT64, TensorShape({N, NDIM}));
Tensor vals(DT_STRING, TensorShape({N}));
TensorShape shape({1000, 1000, 1000, 1000});
- SparseTensor st(ix, vals, shape);
+ SparseTensor st;
+ TF_ASSERT_OK(SparseTensor::Create(ix, vals, shape, &st));
auto ix_t = ix.matrix<int64>();
@@ -266,7 +287,8 @@ TEST(SparseTensorTest, ValidateIndicesFindsInvalid) {
TensorShape shape({10, 10, 10});
std::vector<int64> order{0, 1, 2};
- SparseTensor st(ix, vals, shape, order);
+ SparseTensor st;
+ TF_ASSERT_OK(SparseTensor::Create(ix, vals, shape, order, &st));
st.Reorder<string>(order);
Status st_indices_valid = st.IndicesValid();
@@ -302,7 +324,8 @@ TEST(SparseTensorTest, SparseTensorCheckBoundaries) {
TensorShape shape({10, 10, 10});
std::vector<int64> order{0, 1, 2};
- SparseTensor st(ix, vals, shape, order);
+ SparseTensor st;
+ TF_ASSERT_OK(SparseTensor::Create(ix, vals, shape, order, &st));
EXPECT_FALSE(st.IndicesValid().ok());
st.Reorder<string>(order);
@@ -351,7 +374,8 @@ TEST(SparseTensorTest, SparseTensorToDenseTensor) {
TensorShape shape({4, 4, 5});
std::vector<int64> order{0, 1, 2};
- SparseTensor st(ix, vals, shape, order);
+ SparseTensor st;
+ TF_ASSERT_OK(SparseTensor::Create(ix, vals, shape, order, &st));
Tensor dense(DT_STRING, TensorShape({4, 4, 5}));
st.ToDense<string>(&dense);
@@ -390,7 +414,8 @@ TEST(SparseTensorTest, SparseTensorToLargerDenseTensor) {
TensorShape shape({4, 4, 5});
std::vector<int64> order{0, 1, 2};
- SparseTensor st(ix, vals, shape, order);
+ SparseTensor st;
+ TF_ASSERT_OK(SparseTensor::Create(ix, vals, shape, order, &st));
Tensor dense(DT_STRING, TensorShape({10, 10, 10}));
st.ToDense<string>(&dense);
@@ -433,7 +458,8 @@ TEST(SparseTensorTest, SparseTensorGroup) {
TensorShape shape({10, 10, 10});
std::vector<int64> order{0, 1, 2};
- SparseTensor st(ix, vals, shape, order);
+ SparseTensor st;
+ TF_ASSERT_OK(SparseTensor::Create(ix, vals, shape, order, &st));
st.Reorder<int32>(order);
std::vector<std::vector<int64> > groups;
@@ -521,7 +547,8 @@ TEST(SparseTensorTest, Concat) {
TensorShape shape({10, 10, 10});
std::vector<int64> order{0, 1, 2};
- SparseTensor st(ix, vals, shape, order);
+ SparseTensor st;
+ TF_ASSERT_OK(SparseTensor::Create(ix, vals, shape, order, &st));
EXPECT_FALSE(st.IndicesValid().ok());
st.Reorder<string>(order);
TF_EXPECT_OK(st.IndicesValid());
@@ -551,7 +578,9 @@ TEST(SparseTensorTest, Concat) {
// Concat works if non-primary ix is out of order, but output order
// is not defined
- SparseTensor st_ooo(ix, vals, shape, {0, 2, 1}); // non-primary ix OOO
+ SparseTensor st_ooo;
+ TF_ASSERT_OK(SparseTensor::Create(ix, vals, shape, {0, 2, 1},
+ &st_ooo)); // non-primary ix OOO
SparseTensor conc_ooo = SparseTensor::Concat<string>({st, st, st, st_ooo});
std::vector<int64> expected_ooo{-1, -1, -1};
EXPECT_EQ(conc_ooo.order(), expected_ooo);
@@ -584,9 +613,11 @@ TEST(SparseTensorTest, Split) {
vals.vec<int64>()(2) = 3;
vals.vec<int64>()(3) = 4;
- SparseTensor st(ids, vals, TensorShape({4, 3}));
+ SparseTensor st;
+ TF_ASSERT_OK(SparseTensor::Create(ids, vals, TensorShape({4, 3}), &st));
- std::vector<SparseTensor> st_list = SparseTensor::Split<int64>(st, 0, 2);
+ std::vector<SparseTensor> st_list;
+ TF_ASSERT_OK(SparseTensor::Split<int64>(st, 0, 2, &st_list));
EXPECT_EQ(st_list.size(), 2);
auto expected_shape = gtl::InlinedVector<int64, 8>{2, 3};
@@ -633,7 +664,8 @@ TEST(SparseTensorTest, Slice) {
vals.vec<int64>()(2) = 3;
vals.vec<int64>()(3) = 4;
- SparseTensor st(ids, vals, TensorShape({4, 3}));
+ SparseTensor st;
+ TF_ASSERT_OK(SparseTensor::Create(ids, vals, TensorShape({4, 3}), &st));
std::vector<int64> start(2, 0);
std::vector<int64> size(2);
@@ -662,7 +694,8 @@ TEST(SparseTensorTest, Dim0SparseTensorToDenseTensor) {
vals.scalar<int32>()() = 5;
TensorShape shape({});
- SparseTensor st(ix, vals, shape);
+ SparseTensor st;
+ TF_ASSERT_OK(SparseTensor::Create(ix, vals, shape, &st));
Tensor dense(DT_INT32, TensorShape({}));
st.ToDense<int32>(&dense);
@@ -699,7 +732,8 @@ static void BM_SparseReorderFloat(int iters, int N32, int NDIM32) {
ix_t(i, d) = rnd.Rand64() % 1000;
}
}
- SparseTensor st(ix, vals, shape, order);
+ SparseTensor st;
+ TF_ASSERT_OK(SparseTensor::Create(ix, vals, shape, order, &st));
testing::StartTiming();
st.Reorder<float>(reorder);
@@ -740,7 +774,8 @@ static void BM_SparseReorderString(int iters, int N32, int NDIM32) {
ix_t(i, d) = rnd.Rand64() % 1000;
}
}
- SparseTensor st(ix, vals, shape, order);
+ SparseTensor st;
+ TF_ASSERT_OK(SparseTensor::Create(ix, vals, shape, order, &st));
testing::StartTiming();
st.Reorder<string>(reorder);
diff --git a/tensorflow/core/util/status_util.h b/tensorflow/core/util/status_util.h
new file mode 100644
index 0000000000..ea92f61dce
--- /dev/null
+++ b/tensorflow/core/util/status_util.h
@@ -0,0 +1,36 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#ifndef TENSORFLOW_CORE_UTIL_STATUS_UTIL_H_
+#define TENSORFLOW_CORE_UTIL_STATUS_UTIL_H_
+
+#include "tensorflow/core/graph/graph.h"
+#include "tensorflow/core/lib/strings/strcat.h"
+
+namespace tensorflow {
+
+// Creates a tag to be used in an exception error message. This can be parsed by
+// the Python layer and replaced with information about the node.
+//
+// For example, error_format_tag(node, "${file}") returns
+// "^^node:NODE_NAME:${line}^^" which would be rewritten by the Python layer as
+// e.g. "file/where/node/was/created.py".
+inline string error_format_tag(const Node& node, const string& format) {
+ return strings::StrCat("^^node:", node.name(), ":", format, "^^");
+}
+
+} // namespace tensorflow
+
+#endif // TENSORFLOW_CORE_UTIL_STATUS_UTIL_H_
diff --git a/tensorflow/core/util/status_util_test.cc b/tensorflow/core/util/status_util_test.cc
new file mode 100644
index 0000000000..1f06004db2
--- /dev/null
+++ b/tensorflow/core/util/status_util_test.cc
@@ -0,0 +1,36 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#include "tensorflow/core/util/status_util.h"
+
+#include "tensorflow/core/graph/graph_constructor.h"
+#include "tensorflow/core/graph/node_builder.h"
+#include "tensorflow/core/lib/core/status_test_util.h"
+#include "tensorflow/core/platform/test.h"
+
+namespace tensorflow {
+namespace {
+
+TEST(TestStatusUtil, ErrorFormatTagForNode) {
+ Graph graph(OpRegistry::Global());
+ Node* node;
+ TF_CHECK_OK(NodeBuilder("Foo", "NoOp").Finalize(&graph, &node));
+ EXPECT_EQ(error_format_tag(*node, "${line}"), "^^node:Foo:${line}^^");
+ EXPECT_EQ(error_format_tag(*node, "${file}:${line}"),
+ "^^node:Foo:${file}:${line}^^");
+}
+
+} // namespace
+} // namespace tensorflow
diff --git a/tensorflow/core/util/tensor_format.cc b/tensorflow/core/util/tensor_format.cc
index 33ab87aa78..a5f7ecf0d1 100644
--- a/tensorflow/core/util/tensor_format.cc
+++ b/tensorflow/core/util/tensor_format.cc
@@ -18,7 +18,7 @@ limitations under the License.
namespace tensorflow {
string GetConvnetDataFormatAttrString() {
- return "data_format: { 'NHWC', 'NCHW', 'HWNC', 'HWCN' } = 'NHWC' ";
+ return "data_format: { 'NHWC', 'NCHW' } = 'NHWC' ";
}
string GetConvnet3dDataFormatAttrString() {
diff --git a/tensorflow/docs_src/api_guides/python/spectral_ops.md b/tensorflow/docs_src/api_guides/python/spectral_ops.md
index 022c471ef1..dd13802f00 100644
--- a/tensorflow/docs_src/api_guides/python/spectral_ops.md
+++ b/tensorflow/docs_src/api_guides/python/spectral_ops.md
@@ -23,3 +23,4 @@ that you can use to transform Tensors of real and complex signals.
## Discrete Cosine Transforms
* @{tf.spectral.dct}
+* @{tf.spectral.idct}
diff --git a/tensorflow/docs_src/deploy/s3.md b/tensorflow/docs_src/deploy/s3.md
index 9ef9674338..7028249e94 100644
--- a/tensorflow/docs_src/deploy/s3.md
+++ b/tensorflow/docs_src/deploy/s3.md
@@ -90,4 +90,4 @@ S3 was invented by Amazon, but the S3 API has spread in popularity and has sever
* [Amazon S3](https://aws.amazon.com/s3/)
* [Google Storage](https://cloud.google.com/storage/docs/interoperability)
-* [Minio](https://www.minio.io/kubernetes.html)(Standalone mode only)
+* [Minio](https://www.minio.io/kubernetes.html)
diff --git a/tensorflow/docs_src/extend/index.md b/tensorflow/docs_src/extend/index.md
index 1ab0340ad9..d48340a777 100644
--- a/tensorflow/docs_src/extend/index.md
+++ b/tensorflow/docs_src/extend/index.md
@@ -17,7 +17,8 @@ TensorFlow:
Python is currently the only language supported by TensorFlow's API stability
promises. However, TensorFlow also provides functionality in C++, Go, Java and
-[JavaScript](https://js.tensorflow.org),
+[JavaScript](https://js.tensorflow.org) (incuding
+[Node.js](https://github.com/tensorflow/tfjs-node)),
plus community support for [Haskell](https://github.com/tensorflow/haskell) and
[Rust](https://github.com/tensorflow/rust). If you'd like to create or
develop TensorFlow features in a language other than these languages, read the
diff --git a/tensorflow/docs_src/extend/new_data_formats.md b/tensorflow/docs_src/extend/new_data_formats.md
index d1d1f69766..abbf47910e 100644
--- a/tensorflow/docs_src/extend/new_data_formats.md
+++ b/tensorflow/docs_src/extend/new_data_formats.md
@@ -77,18 +77,24 @@ can be used as a starting point for your implementation:
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/shape_inference.h"
-namespace tensorflow {
+namespace myproject {
namespace {
-class MyReaderDatasetOp : public DatasetOpKernel {
+using ::tensorflow::DT_STRING;
+using ::tensorflow::PartialTensorShape;
+using ::tensorflow::Status;
+
+class MyReaderDatasetOp : public tensorflow::DatasetOpKernel {
public:
- MyReaderDatasetOp(OpKernelConstruction* ctx) : DatasetOpKernel(ctx) {
+ MyReaderDatasetOp(tensorflow::OpKernelConstruction* ctx)
+ : DatasetOpKernel(ctx) {
// Parse and validate any attrs that define the dataset using
// `ctx->GetAttr()`, and store them in member variables.
}
- void MakeDataset(OpKernelContext* ctx, DatasetBase** output) override {
+ void MakeDataset(tensorflow::OpKernelContext* ctx,
+ tensorflow::DatasetBase** output) override {
// Parse and validate any input tensors 0that define the dataset using
// `ctx->input()` or the utility function
// `ParseScalarArgument<T>(ctx, &arg)`.
@@ -99,14 +105,14 @@ class MyReaderDatasetOp : public DatasetOpKernel {
}
private:
- class Dataset : public GraphDatasetBase {
+ class Dataset : public tensorflow::GraphDatasetBase {
public:
- Dataset(OpKernelContext* ctx) : GraphDatasetBase(ctx) {}
+ Dataset(tensorflow::OpKernelContext* ctx) : GraphDatasetBase(ctx) {}
- std::unique_ptr<IteratorBase> MakeIteratorInternal(
+ std::unique_ptr<tensorflow::IteratorBase> MakeIteratorInternal(
const string& prefix) const override {
- return std::unique_ptr<IteratorBase>(
- new Iterator({this, strings::StrCat(prefix, "::MyReader")}));
+ return std::unique_ptr<tensorflow::IteratorBase>(new Iterator(
+ {this, tensorflow::strings::StrCat(prefix, "::MyReader")}));
}
// Record structure: Each record is represented by a scalar string tensor.
@@ -114,8 +120,8 @@ class MyReaderDatasetOp : public DatasetOpKernel {
// Dataset elements can have a fixed number of components of different
// types and shapes; replace the following two methods to customize this
// aspect of the dataset.
- const DataTypeVector& output_dtypes() const override {
- static DataTypeVector* dtypes = new DataTypeVector({DT_STRING});
+ const tensorflow::DataTypeVector& output_dtypes() const override {
+ static auto* const dtypes = new tensorflow::DataTypeVector({DT_STRING});
return *dtypes;
}
const std::vector<PartialTensorShape>& output_shapes() const override {
@@ -132,16 +138,16 @@ class MyReaderDatasetOp : public DatasetOpKernel {
// Implement this method if you want to be able to save and restore
// instances of this dataset (and any iterators over it).
Status AsGraphDefInternal(DatasetGraphDefBuilder* b,
- Node** output) const override {
+ tensorflow::Node** output) const override {
// Construct nodes to represent any of the input tensors from this
// object's member variables using `b->AddScalar()` and `b->AddVector()`.
- std::vector<Node*> input_tensors;
+ std::vector<tensorflow::Node*> input_tensors;
TF_RETURN_IF_ERROR(b->AddDataset(this, input_tensors, output));
return Status::OK();
}
private:
- class Iterator : public DatasetIterator<Dataset> {
+ class Iterator : public tensorflow::DatasetIterator<Dataset> {
public:
explicit Iterator(const Params& params)
: DatasetIterator<Dataset>(params), i_(0) {}
@@ -158,15 +164,15 @@ class MyReaderDatasetOp : public DatasetOpKernel {
// return `Status::OK()`.
// 3. If an error occurs, return an error status using one of the helper
// functions from "tensorflow/core/lib/core/errors.h".
- Status GetNextInternal(IteratorContext* ctx,
- std::vector<Tensor>* out_tensors,
+ Status GetNextInternal(tensorflow::IteratorContext* ctx,
+ std::vector<tensorflow::Tensor>* out_tensors,
bool* end_of_sequence) override {
// NOTE: `GetNextInternal()` may be called concurrently, so it is
// recommended that you protect the iterator state with a mutex.
- mutex_lock l(mu_);
+ tensorflow::mutex_lock l(mu_);
if (i_ < 10) {
// Create a scalar string tensor and add it to the output.
- Tensor record_tensor(ctx->allocator({}), DT_STRING, {});
+ tensorflow::Tensor record_tensor(ctx->allocator({}), DT_STRING, {});
record_tensor.scalar<string>()() = "MyReader!";
out_tensors->emplace_back(std::move(record_tensor));
++i_;
@@ -183,20 +189,20 @@ class MyReaderDatasetOp : public DatasetOpKernel {
//
// Implement these two methods if you want to be able to save and restore
// instances of this iterator.
- Status SaveInternal(IteratorStateWriter* writer) override {
- mutex_lock l(mu_);
+ Status SaveInternal(tensorflow::IteratorStateWriter* writer) override {
+ tensorflow::mutex_lock l(mu_);
TF_RETURN_IF_ERROR(writer->WriteScalar(full_name("i"), i_));
return Status::OK();
}
- Status RestoreInternal(IteratorContext* ctx,
- IteratorStateReader* reader) override {
- mutex_lock l(mu_);
+ Status RestoreInternal(tensorflow::IteratorContext* ctx,
+ tensorflow::IteratorStateReader* reader) override {
+ tensorflow::mutex_lock l(mu_);
TF_RETURN_IF_ERROR(reader->ReadScalar(full_name("i"), &i_));
return Status::OK();
}
private:
- mutex mu_;
+ tensorflow::mutex mu_;
int64 i_ GUARDED_BY(mu_);
};
};
@@ -211,14 +217,14 @@ class MyReaderDatasetOp : public DatasetOpKernel {
REGISTER_OP("MyReaderDataset")
.Output("handle: variant")
.SetIsStateful()
- .SetShapeFn(shape_inference::ScalarShape);
+ .SetShapeFn(tensorflow::shape_inference::ScalarShape);
// Register the kernel implementation for MyReaderDataset.
-REGISTER_KERNEL_BUILDER(Name("MyReaderDataset").Device(DEVICE_CPU),
+REGISTER_KERNEL_BUILDER(Name("MyReaderDataset").Device(tensorflow::DEVICE_CPU),
MyReaderDatasetOp);
} // namespace
-} // namespace tensorflow
+} // namespace myproject
```
The last step is to build the C++ code and add a Python wrapper. The easiest way
diff --git a/tensorflow/docs_src/get_started/eager.md b/tensorflow/docs_src/get_started/eager.md
deleted file mode 100644
index ddf239485a..0000000000
--- a/tensorflow/docs_src/get_started/eager.md
+++ /dev/null
@@ -1,3 +0,0 @@
-# Custom Training Walkthrough
-
-[Colab notebook](https://colab.research.google.com/github/tensorflow/models/blob/r1.9.0/samples/core/get_started/eager.ipynb)
diff --git a/tensorflow/docs_src/get_started/index.md b/tensorflow/docs_src/get_started/index.md
deleted file mode 100644
index bd2a80d9ef..0000000000
--- a/tensorflow/docs_src/get_started/index.md
+++ /dev/null
@@ -1,29 +0,0 @@
-# Get Started
-
-If you are new to machine learning, we recommend taking the following online
-course prior to diving into TensorFlow documentation:
-
- * [Machine Learning Crash Course](https://developers.google.com/machine-learning/crash-course/),
- which introduces machine learning concepts and encourages experimentation
- with existing TensorFlow code.
-
-TensorFlow is a tool for machine learning. While it contains a wide range of
-functionality, TensorFlow is mainly designed for deep neural network models.
-
-The easiest way to get started with TensorFlow is by using Eager Execution.
-
- * @{$get_started/eager}, is for anyone new to machine learning or TensorFlow.
-
-TensorFlow provides many APIs. The remainder of this section focuses on the
-Estimator API which provide scalable, high-performance models. See the
-@{$estimators} guide.
-
-For more advanced users:
-
- * The @{$low_level_intro$Low Level Introduction} demonstrates how to use
- TensorFlow outside of the Estimator framework, for debugging and
- experimentation.
- * The @{$guide$Programmer's Guide} details major
- TensorFlow components.
- * The @{$tutorials$Tutorials} provide walkthroughs of a variety of
- TensorFlow models.
diff --git a/tensorflow/docs_src/get_started/leftnav_files b/tensorflow/docs_src/get_started/leftnav_files
deleted file mode 100644
index 5c400a67f0..0000000000
--- a/tensorflow/docs_src/get_started/leftnav_files
+++ /dev/null
@@ -1,10 +0,0 @@
-### Learn and use ML
-basic_classification.md
-basic_text_classification.md
-basic_regression.md
-overfit_and_underfit.md
-save_and_restore_models.md
-next_steps.md
-
-### Research and experimentation
-eager.md
diff --git a/tensorflow/docs_src/guide/autograph.md b/tensorflow/docs_src/guide/autograph.md
new file mode 100644
index 0000000000..823e1c6d6b
--- /dev/null
+++ b/tensorflow/docs_src/guide/autograph.md
@@ -0,0 +1,3 @@
+# AutoGraph: Easy control flow for graphs
+
+[Colab notebook](https://colab.research.google.com/github/tensorflow/models/blob/master/samples/core/guide/autograph.ipynb)
diff --git a/tensorflow/docs_src/guide/datasets_for_estimators.md b/tensorflow/docs_src/guide/datasets_for_estimators.md
index b04af78cd8..b55a5731a4 100644
--- a/tensorflow/docs_src/guide/datasets_for_estimators.md
+++ b/tensorflow/docs_src/guide/datasets_for_estimators.md
@@ -76,9 +76,9 @@ Let's walk through the `train_input_fn()`.
The function starts by using the @{tf.data.Dataset.from_tensor_slices} function
to create a @{tf.data.Dataset} representing slices of the array. The array is
sliced across the first dimension. For example, an array containing the
-@{$tutorials/layers$mnist training data} has a shape of `(60000, 28, 28)`.
-Passing this to `from_tensor_slices` returns a `Dataset` object containing
-60000 slices, each one a 28x28 image.
+MNIST training data has a shape of `(60000, 28, 28)`. Passing this to
+`from_tensor_slices` returns a `Dataset` object containing 60000 slices, each one
+a 28x28 image.
The code that returns this `Dataset` is as follows:
diff --git a/tensorflow/docs_src/guide/debugger.md b/tensorflow/docs_src/guide/debugger.md
index dc4db58857..8d78fe6fbd 100644
--- a/tensorflow/docs_src/guide/debugger.md
+++ b/tensorflow/docs_src/guide/debugger.md
@@ -463,7 +463,6 @@ predict_results = classifier.predict(predict_input_fn, hooks=hooks)
```
[debug_tflearn_iris.py](https://www.tensorflow.org/code/tensorflow/python/debug/examples/debug_tflearn_iris.py),
-based on [tf-learn's iris tutorial](https://www.tensorflow.org/versions/r1.8/get_started/tflearn),
contains a full example of how to use the tfdbg with `Estimator`s.
To run this example, do:
diff --git a/tensorflow/docs_src/guide/eager.md b/tensorflow/docs_src/guide/eager.md
index b2bc3273b4..e98206eef9 100644
--- a/tensorflow/docs_src/guide/eager.md
+++ b/tensorflow/docs_src/guide/eager.md
@@ -316,9 +316,8 @@ for (batch, (images, labels)) in enumerate(dataset):
The following example creates a multi-layer model that classifies the standard
-[MNIST handwritten digits](https://www.tensorflow.org/tutorials/layers). It
-demonstrates the optimizer and layer APIs to build trainable graphs in an eager
-execution environment.
+MNIST handwritten digits. It demonstrates the optimizer and layer APIs to build
+trainable graphs in an eager execution environment.
### Train a model
@@ -422,7 +421,7 @@ class Model(tf.keras.Model):
super(Model, self).__init__()
self.W = tfe.Variable(5., name='weight')
self.B = tfe.Variable(10., name='bias')
- def predict(self, inputs):
+ def call(self, inputs):
return inputs * self.W + self.B
# A toy dataset of points around 3 * x + 2
@@ -433,7 +432,7 @@ training_outputs = training_inputs * 3 + 2 + noise
# The loss function to be optimized
def loss(model, inputs, targets):
- error = model.predict(inputs) - targets
+ error = model(inputs) - targets
return tf.reduce_mean(tf.square(error))
def grad(model, inputs, targets):
diff --git a/tensorflow/docs_src/guide/feature_columns.md b/tensorflow/docs_src/guide/feature_columns.md
index 1013ec910c..41080e050b 100644
--- a/tensorflow/docs_src/guide/feature_columns.md
+++ b/tensorflow/docs_src/guide/feature_columns.md
@@ -561,9 +561,9 @@ For more examples on feature columns, view the following:
* The @{$low_level_intro#feature_columns$Low Level Introduction} demonstrates how
experiment directly with `feature_columns` using TensorFlow's low level APIs.
-* The @{$wide$wide} and @{$wide_and_deep$Wide & Deep} Tutorials solve a
- binary classification problem using `feature_columns` on a variety of input
- data types.
+* The [Estimator wide and deep learning tutorial](https://github.com/tensorflow/models/tree/master/official/wide_deep)
+ solves a binary classification problem using `feature_columns` on a variety of
+ input data types.
To learn more about embeddings, see the following:
diff --git a/tensorflow/docs_src/guide/graph_viz.md b/tensorflow/docs_src/guide/graph_viz.md
index f581ae56da..a8876da5a5 100644
--- a/tensorflow/docs_src/guide/graph_viz.md
+++ b/tensorflow/docs_src/guide/graph_viz.md
@@ -248,7 +248,8 @@ The images below show the CIFAR-10 model with tensor shape information:
Often it is useful to collect runtime metadata for a run, such as total memory
usage, total compute time, and tensor shapes for nodes. The code example below
is a snippet from the train and test section of a modification of the
-@{$layers$simple MNIST tutorial}, in which we have recorded summaries and
+[Estimators MNIST tutorial](../tutorials/estimators/cnn.md), in which we have
+recorded summaries and
runtime statistics. See the
@{$summaries_and_tensorboard#serializing-the-data$Summaries Tutorial}
for details on how to record summaries.
diff --git a/tensorflow/docs_src/guide/graphs.md b/tensorflow/docs_src/guide/graphs.md
index e6246ef148..492f97c191 100644
--- a/tensorflow/docs_src/guide/graphs.md
+++ b/tensorflow/docs_src/guide/graphs.md
@@ -486,7 +486,7 @@ subgraph inside.
![](../images/mnist_deep.png)
For more information about visualizing your TensorFlow application with
-TensorBoard, see the [TensorBoard tutorial](../get_started/summaries_and_tensorboard.md).
+TensorBoard, see the [TensorBoard guide](./summaries_and_tensorboard.md).
## Programming with multiple graphs
diff --git a/tensorflow/docs_src/guide/index.md b/tensorflow/docs_src/guide/index.md
index eefdb9ceae..f78dfc9a89 100644
--- a/tensorflow/docs_src/guide/index.md
+++ b/tensorflow/docs_src/guide/index.md
@@ -16,15 +16,12 @@ works. The units are as follows:
## Estimators
-* @{$estimators} provides an introduction.
-* @{$premade_estimators}, introduces Estimators for machine learning.
-* @{$custom_estimators}, which demonstrates how to build and train models you
- design yourself.
-* @{$feature_columns}, which shows how an Estimator can handle a variety of input
- data types without changes to the model.
-* @{$datasets_for_estimators} describes using tf.data with estimators.
-* @{$checkpoints}, which explains how to save training progress and resume where
- you left off.
+* @{$estimators}, learn how to use Estimators for machine learning.
+* @{$premade_estimators}, the basics of premade Estimators.
+* @{$checkpoints}, save training progress and resume where you left off.
+* @{$feature_columns}, handle a variety of input data types without changes to the model.
+* @{$datasets_for_estimators}, use `tf.data` to input data.
+* @{$custom_estimators}, write your own Estimator.
## Accelerators
diff --git a/tensorflow/docs_src/guide/keras.md b/tensorflow/docs_src/guide/keras.md
index 83172dab7f..1d846df104 100644
--- a/tensorflow/docs_src/guide/keras.md
+++ b/tensorflow/docs_src/guide/keras.md
@@ -35,7 +35,7 @@ from tensorflow import keras
* The `tf.keras` version in the latest TensorFlow release might not be the same
as the latest `keras` version from PyPI. Check `tf.keras.__version__`.
* When [saving a model's weights](#weights_only), `tf.keras` defaults to the
- [checkpoint format](../get_started/checkpoints.md). Pass `save_format='h5'` to
+ [checkpoint format](./checkpoints.md). Pass `save_format='h5'` to
use HDF5.
## Build a simple model
@@ -221,7 +221,7 @@ To *evaluate* the inference-mode loss and metrics for the data provided:
```python
model.evaluate(x, y, batch_size=32)
-model.evaluate(dataset, steps=30
+model.evaluate(dataset, steps=30)
```
And to *predict* the output of the last layer in inference for the data provided,
@@ -442,7 +442,7 @@ model.load_weights('my_model')
```
By default, this saves the model's weights in the
-[TensorFlow checkpoint](../get_started/checkpoints.md) file format. Weights can
+[TensorFlow checkpoint](./checkpoints.md) file format. Weights can
also be saved to the Keras HDF5 format (the default for the multi-backend
implementation of Keras):
@@ -581,15 +581,6 @@ model.compile(loss='binary_crossentropy', optimizer=optimizer)
model.summary()
```
-Convert the Keras model to a `tf.estimator.Estimator` instance:
-
-```python
-keras_estimator = keras.estimator.model_to_estimator(
- keras_model=model,
- config=config,
- model_dir='/tmp/model_dir')
-```
-
Define an *input pipeline*. The `input_fn` returns a `tf.data.Dataset` object
used to distribute the data across multiple devices—with each device processing
a slice of the input batch.
@@ -615,6 +606,15 @@ strategy = tf.contrib.distribute.MirroredStrategy()
config = tf.estimator.RunConfig(train_distribute=strategy)
```
+Convert the Keras model to a `tf.estimator.Estimator` instance:
+
+```python
+keras_estimator = keras.estimator.model_to_estimator(
+ keras_model=model,
+ config=config,
+ model_dir='/tmp/model_dir')
+```
+
Finally, train the `Estimator` instance by providing the `input_fn` and `steps`
arguments:
diff --git a/tensorflow/docs_src/guide/leftnav_files b/tensorflow/docs_src/guide/leftnav_files
index 357a2a1cb9..c4e235b41a 100644
--- a/tensorflow/docs_src/guide/leftnav_files
+++ b/tensorflow/docs_src/guide/leftnav_files
@@ -8,10 +8,10 @@ datasets.md
### Estimators
estimators.md: Introduction to Estimators
premade_estimators.md
-custom_estimators.md
+checkpoints.md
feature_columns.md
datasets_for_estimators.md
-checkpoints.md
+custom_estimators.md
### Accelerators
using_gpu.md
@@ -23,6 +23,7 @@ tensors.md
variables.md
graphs.md
saved_model.md
+autograph.md : Control flow
### ML Concepts
embedding.md
diff --git a/tensorflow/docs_src/guide/saved_model.md b/tensorflow/docs_src/guide/saved_model.md
index 27ef7bb0da..acc3d3ca0b 100644
--- a/tensorflow/docs_src/guide/saved_model.md
+++ b/tensorflow/docs_src/guide/saved_model.md
@@ -794,11 +794,12 @@ Here's the syntax:
```
usage: saved_model_cli run [-h] --dir DIR --tag_set TAG_SET --signature_def
SIGNATURE_DEF_KEY [--inputs INPUTS]
- [--input_exprs INPUT_EXPRS] [--outdir OUTDIR]
+ [--input_exprs INPUT_EXPRS]
+ [--input_examples INPUT_EXAMPLES] [--outdir OUTDIR]
[--overwrite] [--tf_debug]
```
-The `run` command provides the following two ways to pass inputs to the model:
+The `run` command provides the following three ways to pass inputs to the model:
* `--inputs` option enables you to pass numpy ndarray in files.
* `--input_exprs` option enables you to pass Python expressions.
@@ -847,7 +848,7 @@ dictionary is stored in the pickle file and the value corresponding to
the *variable_name* will be used.
-#### `--inputs_exprs`
+#### `--input_exprs`
To pass inputs through Python expressions, specify the `--input_exprs` option.
This can be useful for when you don't have data
@@ -869,7 +870,7 @@ example:
(Note that the `numpy` module is already available to you as `np`.)
-#### `--inputs_examples`
+#### `--input_examples`
To pass `tf.train.Example` as inputs, specify the `--input_examples` option.
For each input key, it takes a list of dictionary, where each dictionary is an
diff --git a/tensorflow/docs_src/guide/tensorboard_histograms.md b/tensorflow/docs_src/guide/tensorboard_histograms.md
index 918deda190..af8f2cadd1 100644
--- a/tensorflow/docs_src/guide/tensorboard_histograms.md
+++ b/tensorflow/docs_src/guide/tensorboard_histograms.md
@@ -13,8 +13,8 @@ TensorFlow has an op
which is perfect for this purpose. As is usually the case with TensorBoard, we
will ingest data using a summary op; in this case,
['tf.summary.histogram'](https://www.tensorflow.org/api_docs/python/tf/summary/histogram).
-For a primer on how summaries work, please see the general
-[TensorBoard tutorial](https://www.tensorflow.org/get_started/summaries_and_tensorboard).
+For a primer on how summaries work, please see the
+[TensorBoard guide](./summaries_and_tensorboard.md).
Here is a code snippet that will generate some histogram summaries containing
normally distributed data, where the mean of the distribution increases over
diff --git a/tensorflow/docs_src/guide/version_compat.md b/tensorflow/docs_src/guide/version_compat.md
index 72e427c5f8..d2e5e41190 100644
--- a/tensorflow/docs_src/guide/version_compat.md
+++ b/tensorflow/docs_src/guide/version_compat.md
@@ -301,8 +301,10 @@ existing producer scripts will not suddenly use the new functionality.
#### Change an op's functionality
1. Add a new similar op named `SomethingV2` or similar and go through the
- process of adding it and switching existing Python wrappers to use it, which
- may take three weeks if forward compatibility is desired.
+ process of adding it and switching existing Python wrappers to use it.
+ To ensure forward compatibility use the checks suggested in
+ [compat.py](https://www.tensorflow.org/code/tensorflow/python/compat/compat.py)
+ when changing the Python wrappers.
2. Remove the old op (Can only take place with a major version change due to
backward compatibility).
3. Increase `min_consumer` to rule out consumers with the old op, add back the
diff --git a/tensorflow/docs_src/install/index.md b/tensorflow/docs_src/install/index.md
index c2e5a991d4..55481cc400 100644
--- a/tensorflow/docs_src/install/index.md
+++ b/tensorflow/docs_src/install/index.md
@@ -1,36 +1,39 @@
-# Installing TensorFlow
+# Install TensorFlow
-We've built and tested TensorFlow on the following 64-bit laptop/desktop
-operating systems:
+Note: Run the [TensorFlow tutorials](../tutorials) in a pre-configured
+[Colab notebook environment](https://colab.research.google.com/notebooks/welcome.ipynb){: .external},
+without installation.
+
+TensorFlow is built and tested on the following 64-bit operating systems:
* macOS 10.12.6 (Sierra) or later.
* Ubuntu 16.04 or later
* Windows 7 or later.
* Raspbian 9.0 or later.
-Although you might be able to install TensorFlow on other laptop or desktop
-systems, we only support (and only fix issues in) the preceding configurations.
+While TensorFlow may work on other systems, we only support—and fix issues in—the
+systems listed above.
The following guides explain how to install a version of TensorFlow
that enables you to write applications in Python:
- * @{$install_linux$Installing TensorFlow on Ubuntu}
- * @{$install_mac$Installing TensorFlow on macOS}
- * @{$install_windows$Installing TensorFlow on Windows}
- * @{$install_raspbian$Installing TensorFlow on a Raspberry Pi}
- * @{$install_sources$Installing TensorFlow from Sources}
+ * @{$install_linux$Install TensorFlow on Ubuntu}
+ * @{$install_mac$Install TensorFlow on macOS}
+ * @{$install_windows$Install TensorFlow on Windows}
+ * @{$install_raspbian$Install TensorFlow on a Raspberry Pi}
+ * @{$install_sources$Install TensorFlow from source code}
Many aspects of the Python TensorFlow API changed from version 0.n to 1.0.
The following guide explains how to migrate older TensorFlow applications
to Version 1.0:
- * @{$migration$Transitioning to TensorFlow 1.0}
+ * @{$migration$Transition to TensorFlow 1.0}
The following guides explain how to install TensorFlow libraries for use in
other programming languages. These APIs are aimed at deploying TensorFlow
models in applications and are not as extensive as the Python APIs.
- * @{$install_java$Installing TensorFlow for Java}
- * @{$install_c$Installing TensorFlow for C}
- * @{$install_go$Installing TensorFlow for Go}
+ * @{$install_java$Install TensorFlow for Java}
+ * @{$install_c$Install TensorFlow for C}
+ * @{$install_go$Install TensorFlow for Go}
diff --git a/tensorflow/docs_src/install/install_c.md b/tensorflow/docs_src/install/install_c.md
index 2901848745..4e1c32f972 100644
--- a/tensorflow/docs_src/install/install_c.md
+++ b/tensorflow/docs_src/install/install_c.md
@@ -1,4 +1,4 @@
-# Installing TensorFlow for C
+# Install TensorFlow for C
TensorFlow provides a C API defined in
[`c_api.h`](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/c/c_api.h),
diff --git a/tensorflow/docs_src/install/install_go.md b/tensorflow/docs_src/install/install_go.md
index 2c126df5aa..162a820f22 100644
--- a/tensorflow/docs_src/install/install_go.md
+++ b/tensorflow/docs_src/install/install_go.md
@@ -1,4 +1,4 @@
-# Installing TensorFlow for Go
+# Install TensorFlow for Go
TensorFlow provides APIs for use in Go programs. These APIs are particularly
well-suited to loading models created in Python and executing them within
diff --git a/tensorflow/docs_src/install/install_java.md b/tensorflow/docs_src/install/install_java.md
index 692dfc9cef..c196bb9b31 100644
--- a/tensorflow/docs_src/install/install_java.md
+++ b/tensorflow/docs_src/install/install_java.md
@@ -1,4 +1,4 @@
-# Installing TensorFlow for Java
+# Install TensorFlow for Java
TensorFlow provides APIs for use in Java programs. These APIs are particularly
well-suited to loading models created in Python and executing them within a
diff --git a/tensorflow/docs_src/install/install_linux.md b/tensorflow/docs_src/install/install_linux.md
index c573acaf45..7534d0fac1 100644
--- a/tensorflow/docs_src/install/install_linux.md
+++ b/tensorflow/docs_src/install/install_linux.md
@@ -1,38 +1,38 @@
-# Installing TensorFlow on Ubuntu
+# Install TensorFlow on Ubuntu
This guide explains how to install TensorFlow on Ubuntu Linux. While these
-instructions may work on other Linux variants, they are tested and supported with
-the following system requirements:
-
-* 64-bit desktops or laptops
-* Ubuntu 16.04 or higher
+instructions may work on other Linux variants, they are tested and supported
+with the following system requirements:
+* 64-bit desktops or laptops
+* Ubuntu 16.04 or higher
## Choose which TensorFlow to install
The following TensorFlow variants are available for installation:
-* __TensorFlow with CPU support only__. If your system does not have a
- NVIDIA®&nbsp;GPU, you must install this version. This version of TensorFlow is
- usually easier to install, so even if you have an NVIDIA GPU, we recommend
- installing this version first.
-* __TensorFlow with GPU support__. TensorFlow programs usually run much faster on
- a GPU instead of a CPU. If you run performance-critical applications and your
- system has an NVIDIA®&nbsp;GPU that meets the prerequisites, you should install
- this version. See [TensorFlow GPU support](#NVIDIARequirements) for details.
-
+* __TensorFlow with CPU support only__. If your system does not have a
+ NVIDIA®&nbsp;GPU, you must install this version. This version of TensorFlow
+ is usually easier to install, so even if you have an NVIDIA GPU, we
+ recommend installing this version first.
+* __TensorFlow with GPU support__. TensorFlow programs usually run much faster
+ on a GPU instead of a CPU. If you run performance-critical applications and
+ your system has an NVIDIA®&nbsp;GPU that meets the prerequisites, you should
+ install this version. See [TensorFlow GPU support](#NVIDIARequirements) for
+ details.
## How to install TensorFlow
There are a few options to install TensorFlow on your machine:
-* [Use pip in a virtual environment](#InstallingVirtualenv) *(recommended)*
-* [Use pip in your system environment](#InstallingNativePip)
-* [Configure a Docker container](#InstallingDocker)
-* [Use pip in Anaconda](#InstallingAnaconda)
-* [Install TensorFlow from source](/install/install_sources)
+* [Use pip in a virtual environment](#InstallingVirtualenv) *(recommended)*
+* [Use pip in your system environment](#InstallingNativePip)
+* [Configure a Docker container](#InstallingDocker)
+* [Use pip in Anaconda](#InstallingAnaconda)
+* [Install TensorFlow from source](/install/install_sources)
<a name="InstallingVirtualenv"></a>
+
### Use `pip` in a virtual environment
Key Point: Using a virtual environment is the recommended install method.
@@ -41,8 +41,8 @@ The [Virtualenv](https://virtualenv.pypa.io/en/stable/) tool creates virtual
Python environments that are isolated from other Python development on the same
machine. In this scenario, you install TensorFlow and its dependencies within a
virtual environment that is available when *activated*. Virtualenv provides a
-reliable way to install and run TensorFlow while avoiding conflicts with the rest
-of the system.
+reliable way to install and run TensorFlow while avoiding conflicts with the
+rest of the system.
##### 1. Install Python, `pip`, and `virtualenv`.
@@ -62,7 +62,7 @@ To install these packages on Ubuntu:
</pre>
We *recommend* using `pip` version 8.1 or higher. If using a release before
-version 8.1, upgrade `pip`:
+version 8.1, upgrade `pip`:
<pre class="prettyprint lang-bsh">
<code class="devsite-terminal">sudo pip install -U pip</code>
@@ -112,10 +112,10 @@ affecting packages outside the `virtualenv`.
Choose one of the available TensorFlow packages for installation:
-* `tensorflow` —Current release for CPU
-* `tensorflow-gpu` —Current release with GPU support
-* `tf-nightly` —Nightly build for CPU
-* `tf-nightly-gpu` —Nightly build with GPU support
+* `tensorflow` —Current release for CPU
+* `tensorflow-gpu` —Current release with GPU support
+* `tf-nightly` —Nightly build for CPU
+* `tf-nightly-gpu` —Nightly build with GPU support
Within an active Virtualenv environment, use `pip` to install the package:
@@ -160,14 +160,14 @@ To uninstall TensorFlow, remove the Virtualenv directory you created in step 2:
<code class="devsite-terminal">rm -r ~/tensorflow/<var>venv</var></code>
</pre>
-
<a name="InstallingNativePip"></a>
+
### Use `pip` in your system environment
Use `pip` to install the TensorFlow package directly on your system without
using a container or virtual environment for isolation. This method is
-recommended for system administrators that want a TensorFlow installation that is
-available to everyone on a multi-user system.
+recommended for system administrators that want a TensorFlow installation that
+is available to everyone on a multi-user system.
Since a system install is not isolated, it could interfere with other
Python-based installations. But if you understand `pip` and your Python
@@ -195,7 +195,7 @@ To install these packages on Ubuntu:
</pre>
We *recommend* using `pip` version 8.1 or higher. If using a release before
-version 8.1, upgrade `pip`:
+version 8.1, upgrade `pip`:
<pre class="prettyprint lang-bsh">
<code class="devsite-terminal">sudo pip install -U pip</code>
@@ -212,10 +212,10 @@ installed, use `easy_install` to install `pip`:
Choose one of the available TensorFlow packages for installation:
-* `tensorflow` —Current release for CPU
-* `tensorflow-gpu` —Current release with GPU support
-* `tf-nightly` —Nightly build for CPU
-* `tf-nightly-gpu` —Nightly build with GPU support
+* `tensorflow` —Current release for CPU
+* `tensorflow-gpu` —Current release with GPU support
+* `tf-nightly` —Nightly build for CPU
+* `tf-nightly-gpu` —Nightly build with GPU support
And use `pip` to install the package for Python 2 or 3:
@@ -260,37 +260,36 @@ To uninstall TensorFlow on your system, use one of following commands:
</pre>
<a name="InstallingDocker"></a>
+
### Configure a Docker container
-Docker completely isolates the TensorFlow installation
-from pre-existing packages on your machine. The Docker container contains
-TensorFlow and all its dependencies. Note that the Docker image can be quite
-large (hundreds of MBs). You might choose the Docker installation if you are
-incorporating TensorFlow into a larger application architecture that already
-uses Docker.
+Docker completely isolates the TensorFlow installation from pre-existing
+packages on your machine. The Docker container contains TensorFlow and all its
+dependencies. Note that the Docker image can be quite large (hundreds of MBs).
+You might choose the Docker installation if you are incorporating TensorFlow
+into a larger application architecture that already uses Docker.
Take the following steps to install TensorFlow through Docker:
- 1. Install Docker on your machine as described in the
- [Docker documentation](http://docs.docker.com/engine/installation/).
- 2. Optionally, create a Linux group called <code>docker</code> to allow
- launching containers without sudo as described in the
- [Docker documentation](https://docs.docker.com/engine/installation/linux/linux-postinstall/).
- (If you don't do this step, you'll have to use sudo each time
- you invoke Docker.)
- 3. To install a version of TensorFlow that supports GPUs, you must first
- install [nvidia-docker](https://github.com/NVIDIA/nvidia-docker), which
- is stored in github.
- 4. Launch a Docker container that contains one of the
- [TensorFlow binary images](https://hub.docker.com/r/tensorflow/tensorflow/tags/).
+1. Install Docker on your machine as described in the
+ [Docker documentation](http://docs.docker.com/engine/installation/).
+2. Optionally, create a Linux group called <code>docker</code> to allow
+ launching containers without sudo as described in the
+ [Docker documentation](https://docs.docker.com/engine/installation/linux/linux-postinstall/).
+ (If you don't do this step, you'll have to use sudo each time you invoke
+ Docker.)
+3. To install a version of TensorFlow that supports GPUs, you must first
+ install [nvidia-docker](https://github.com/NVIDIA/nvidia-docker), which is
+ stored in github.
+4. Launch a Docker container that contains one of the
+ [TensorFlow binary images](https://hub.docker.com/r/tensorflow/tensorflow/tags/).
The remainder of this section explains how to launch a Docker container.
-
#### CPU-only
-To launch a Docker container with CPU-only support (that is, without
-GPU support), enter a command of the following format:
+To launch a Docker container with CPU-only support (that is, without GPU
+support), enter a command of the following format:
<pre>
$ docker run -it <i>-p hostPort:containerPort TensorFlowCPUImage</i>
@@ -298,29 +297,31 @@ $ docker run -it <i>-p hostPort:containerPort TensorFlowCPUImage</i>
where:
- * <tt><i>-p hostPort:containerPort</i></tt> is optional.
- If you plan to run TensorFlow programs from the shell, omit this option.
- If you plan to run TensorFlow programs as Jupyter notebooks, set both
- <tt><i>hostPort</i></tt> and <tt><i>containerPort</i></tt>
- to <tt>8888</tt>. If you'd like to run TensorBoard inside the container,
- add a second `-p` flag, setting both <i>hostPort</i> and <i>containerPort</i>
- to 6006.
- * <tt><i>TensorFlowCPUImage</i></tt> is required. It identifies the Docker
+* <tt><i>-p hostPort:containerPort</i></tt> is optional. If you plan to run
+ TensorFlow programs from the shell, omit this option. If you plan to run
+ TensorFlow programs as Jupyter notebooks, set both <tt><i>hostPort</i></tt>
+ and <tt><i>containerPort</i></tt> to <tt>8888</tt>. If you'd like to run
+ TensorBoard inside the container, add a second `-p` flag, setting both
+ <i>hostPort</i> and <i>containerPort</i> to 6006.
+* <tt><i>TensorFlowCPUImage</i></tt> is required. It identifies the Docker
container. Specify one of the following values:
- * <tt>tensorflow/tensorflow</tt>, which is the TensorFlow CPU binary image.
- * <tt>tensorflow/tensorflow:latest-devel</tt>, which is the latest
- TensorFlow CPU Binary image plus source code.
- * <tt>tensorflow/tensorflow:<i>version</i></tt>, which is the
- specified version (for example, 1.1.0rc1) of TensorFlow CPU binary image.
- * <tt>tensorflow/tensorflow:<i>version</i>-devel</tt>, which is
- the specified version (for example, 1.1.0rc1) of the TensorFlow GPU
- binary image plus source code.
+
+ * <tt>tensorflow/tensorflow</tt>, which is the TensorFlow CPU binary
+ image.
+ * <tt>tensorflow/tensorflow:latest-devel</tt>, which is the latest
+ TensorFlow CPU Binary image plus source code.
+ * <tt>tensorflow/tensorflow:<i>version</i></tt>, which is the specified
+ version (for example, 1.1.0rc1) of TensorFlow CPU binary image.
+ * <tt>tensorflow/tensorflow:<i>version</i>-devel</tt>, which is the
+ specified version (for example, 1.1.0rc1) of the TensorFlow GPU binary
+ image plus source code.
TensorFlow images are available at
[dockerhub](https://hub.docker.com/r/tensorflow/tensorflow/).
-For example, the following command launches the latest TensorFlow CPU binary image
-in a Docker container from which you can run TensorFlow programs in a shell:
+For example, the following command launches the latest TensorFlow CPU binary
+image in a Docker container from which you can run TensorFlow programs in a
+shell:
<pre>
$ <b>docker run -it tensorflow/tensorflow bash</b>
@@ -336,10 +337,11 @@ $ <b>docker run -it -p 8888:8888 tensorflow/tensorflow</b>
Docker will download the TensorFlow binary image the first time you launch it.
-
#### GPU support
-To launch a Docker container with NVidia GPU support, enter a command of the following format (this [does not require any local CUDA installation](https://github.com/nvidia/nvidia-docker/wiki/CUDA#requirements)):
+To launch a Docker container with NVidia GPU support, enter a command of the
+following format (this
+[does not require any local CUDA installation](https://github.com/nvidia/nvidia-docker/wiki/CUDA#requirements)):
<pre>
$ <b>nvidia-docker run -it</b> <i>-p hostPort:containerPort TensorFlowGPUImage</i>
@@ -347,34 +349,34 @@ $ <b>nvidia-docker run -it</b> <i>-p hostPort:containerPort TensorFlowGPUImage</
where:
- * <tt><i>-p hostPort:containerPort</i></tt> is optional. If you plan
- to run TensorFlow programs from the shell, omit this option. If you plan
- to run TensorFlow programs as Jupyter notebooks, set both
- <tt><i>hostPort</i></tt> and <code><em>containerPort</em></code> to `8888`.
- * <i>TensorFlowGPUImage</i> specifies the Docker container. You must
- specify one of the following values:
- * <tt>tensorflow/tensorflow:latest-gpu</tt>, which is the latest
- TensorFlow GPU binary image.
- * <tt>tensorflow/tensorflow:latest-devel-gpu</tt>, which is
- the latest TensorFlow GPU Binary image plus source code.
- * <tt>tensorflow/tensorflow:<i>version</i>-gpu</tt>, which is the
- specified version (for example, 0.12.1) of the TensorFlow GPU
- binary image.
- * <tt>tensorflow/tensorflow:<i>version</i>-devel-gpu</tt>, which is
- the specified version (for example, 0.12.1) of the TensorFlow GPU
- binary image plus source code.
-
-We recommend installing one of the `latest` versions. For example, the
-following command launches the latest TensorFlow GPU binary image in a
-Docker container from which you can run TensorFlow programs in a shell:
+* <tt><i>-p hostPort:containerPort</i></tt> is optional. If you plan to run
+ TensorFlow programs from the shell, omit this option. If you plan to run
+ TensorFlow programs as Jupyter notebooks, set both <tt><i>hostPort</i></tt>
+ and <code><em>containerPort</em></code> to `8888`.
+* <i>TensorFlowGPUImage</i> specifies the Docker container. You must specify
+ one of the following values:
+ * <tt>tensorflow/tensorflow:latest-gpu</tt>, which is the latest
+ TensorFlow GPU binary image.
+ * <tt>tensorflow/tensorflow:latest-devel-gpu</tt>, which is the latest
+ TensorFlow GPU Binary image plus source code.
+ * <tt>tensorflow/tensorflow:<i>version</i>-gpu</tt>, which is the
+ specified version (for example, 0.12.1) of the TensorFlow GPU binary
+ image.
+ * <tt>tensorflow/tensorflow:<i>version</i>-devel-gpu</tt>, which is the
+ specified version (for example, 0.12.1) of the TensorFlow GPU binary
+ image plus source code.
+
+We recommend installing one of the `latest` versions. For example, the following
+command launches the latest TensorFlow GPU binary image in a Docker container
+from which you can run TensorFlow programs in a shell:
<pre>
$ <b>nvidia-docker run -it tensorflow/tensorflow:latest-gpu bash</b>
</pre>
-The following command also launches the latest TensorFlow GPU binary image
-in a Docker container. In this Docker container, you can run TensorFlow
-programs in a Jupyter notebook:
+The following command also launches the latest TensorFlow GPU binary image in a
+Docker container. In this Docker container, you can run TensorFlow programs in a
+Jupyter notebook:
<pre>
$ <b>nvidia-docker run -it -p 8888:8888 tensorflow/tensorflow:latest-gpu</b>
@@ -390,14 +392,12 @@ Docker will download the TensorFlow binary image the first time you launch it.
For more details see the
[TensorFlow docker readme](https://github.com/tensorflow/tensorflow/tree/master/tensorflow/tools/docker).
-
#### Next Steps
-You should now
-[validate your installation](#ValidateYourInstallation).
-
+You should now [validate your installation](#ValidateYourInstallation).
<a name="InstallingAnaconda"></a>
+
### Use `pip` in Anaconda
Anaconda provides the `conda` utility to create a virtual environment. However,
@@ -410,61 +410,59 @@ not tested on new TensorFlow releases.
Take the following steps to install TensorFlow in an Anaconda environment:
- 1. Follow the instructions on the
- [Anaconda download site](https://www.continuum.io/downloads)
- to download and install Anaconda.
+1. Follow the instructions on the
+ [Anaconda download site](https://www.continuum.io/downloads) to download and
+ install Anaconda.
- 2. Create a conda environment named <tt>tensorflow</tt> to run a version
- of Python by invoking the following command:
+2. Create a conda environment named <tt>tensorflow</tt> to run a version of
+ Python by invoking the following command:
<pre>$ <b>conda create -n tensorflow pip python=2.7 # or python=3.3, etc.</b></pre>
- 3. Activate the conda environment by issuing the following command:
+3. Activate the conda environment by issuing the following command:
<pre>$ <b>source activate tensorflow</b>
(tensorflow)$ # Your prompt should change </pre>
- 4. Issue a command of the following format to install
- TensorFlow inside your conda environment:
+4. Issue a command of the following format to install TensorFlow inside your
+ conda environment:
<pre>(tensorflow)$ <b>pip install --ignore-installed --upgrade</b> <i>tfBinaryURL</i></pre>
- where <code><em>tfBinaryURL</em></code> is the
- [URL of the TensorFlow Python package](#the_url_of_the_tensorflow_python_package).
- For example, the following command installs the CPU-only version of
- TensorFlow for Python 3.4:
+ where <code><em>tfBinaryURL</em></code> is the
+ [URL of the TensorFlow Python package](#the_url_of_the_tensorflow_python_package).
+ For example, the following command installs the CPU-only version of
+ TensorFlow for Python 3.4:
<pre>
(tensorflow)$ <b>pip install --ignore-installed --upgrade \
https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-1.9.0rc0-cp34-cp34m-linux_x86_64.whl</b></pre>
<a name="ValidateYourInstallation"></a>
+
## Validate your installation
To validate your TensorFlow installation, do the following:
- 1. Ensure that your environment is prepared to run TensorFlow programs.
- 2. Run a short TensorFlow program.
-
+1. Ensure that your environment is prepared to run TensorFlow programs.
+2. Run a short TensorFlow program.
### Prepare your environment
-If you installed on native pip, Virtualenv, or Anaconda, then
-do the following:
+If you installed on native pip, Virtualenv, or Anaconda, then do the following:
- 1. Start a terminal.
- 2. If you installed with Virtualenv or Anaconda, activate your container.
- 3. If you installed TensorFlow source code, navigate to any
- directory *except* one containing TensorFlow source code.
+1. Start a terminal.
+2. If you installed with Virtualenv or Anaconda, activate your container.
+3. If you installed TensorFlow source code, navigate to any directory *except*
+ one containing TensorFlow source code.
-If you installed through Docker, start a Docker container
-from which you can run bash. For example:
+If you installed through Docker, start a Docker container from which you can run
+bash. For example:
<pre>
$ <b>docker run -it tensorflow/tensorflow bash</b>
</pre>
-
### Run a short TensorFlow program
Invoke python from your shell as follows:
@@ -486,94 +484,71 @@ TensorFlow programs:
<pre>Hello, TensorFlow!</pre>
-If the system outputs an error message instead of a greeting, see [Common
-installation problems](#common_installation_problems).
+If the system outputs an error message instead of a greeting, see
+[Common installation problems](#common_installation_problems).
-To learn more, see [Get Started with TensorFlow](https://www.tensorflow.org/get_started).
+To learn more, see the [TensorFlow tutorials](../tutorials/).
<a name="NVIDIARequirements"></a>
-## TensorFlow GPU support
-
-To install TensorFlow with GPU support, configure the following NVIDIA® software
-on your system:
-
-* [CUDA Toolkit 9.0](http://nvidia.com/cuda). For details, see
- [NVIDIA's documentation](http://docs.nvidia.com/cuda/cuda-installation-guide-linux/).
- Append the relevant CUDA pathnames to the `LD_LIBRARY_PATH` environmental
- variable as described in the NVIDIA documentation.
-* [cuDNN SDK v7](http://developer.nvidia.com/cudnn). For details, see
- [NVIDIA's documentation](http://docs.nvidia.com/deeplearning/sdk/cudnn-install/).
- Create the `CUDA_HOME` environment variable as described in the NVIDIA
- documentation.
-* A GPU card with CUDA Compute Capability 3.0 or higher for building TensorFlow
- from source. To use the TensorFlow binaries, version 3.5 or higher is required.
- See the [NVIDIA documentation](https://developer.nvidia.com/cuda-gpus) for a
- list of supported GPU cards.
-* [GPU drivers](http://nvidia.com/drivers) that support your version of the CUDA
- Toolkit.
-* The `libcupti-dev` library is the NVIDIA CUDA Profile Tools Interface. This
- library provides advanced profiling support. To install this library,
- use the following command for CUDA Toolkit >= 8.0:
-
-<pre class="prettyprint lang-bsh">
- <code class="devsite-terminal">sudo apt-get install cuda-command-line-tools</code>
-</pre>
-
-Add this path to the `LD_LIBRARY_PATH` environmental variable:
-
-<pre class="prettyprint lang-bsh">
- <code class="devsite-terminal">export LD_LIBRARY_PATH=${LD_LIBRARY_PATH:+${LD_LIBRARY_PATH}:}/usr/local/cuda/extras/CUPTI/lib64</code>
-</pre>
-
-* *OPTIONAL*: For optimized performance during inference, install
- *NVIDIA&nbsp;TensorRT&nbsp;3.0*. To install the minimal amount of TensorRT
- runtime components required to use with the pre-built `tensorflow-gpu` package:
-<pre class="prettyprint lang-bsh">
- <code class="devsite-terminal">wget https://developer.download.nvidia.com/compute/machine-learning/repos/ubuntu1404/x86_64/nvinfer-runtime-trt-repo-ubuntu1404-3.0.4-ga-cuda9.0_1.0-1_amd64.deb</code>
- <code class="devsite-terminal">sudo dpkg -i nvinfer-runtime-trt-repo-ubuntu1404-3.0.4-ga-cuda9.0_1.0-1_amd64.deb</code>
- <code class="devsite-terminal">sudo apt-get update</code>
- <code class="devsite-terminal">sudo apt-get install -y --allow-downgrades libnvinfer-dev libcudnn7-dev=7.0.5.15-1+cuda9.0 libcudnn7=7.0.5.15-1+cuda9.0</code>
-</pre>
-
-Note: For compatibility with the pre-built `tensorflow-gpu` package, use the
-Ubuntu *14.04* package of TensorRT (shown above). Use this even when installing
-on an Ubuntu 16.04 system.
-
-To build the TensorFlow-TensorRT integration module from source instead of using
-the pre-built binaries, see the
-[module documentation](https://github.com/tensorflow/tensorflow/tree/master/tensorflow/contrib/tensorrt#using-tensorrt-in-tensorflow).
-For detailed TensorRT installation instructions, see
-[NVIDIA's TensorRT documentation](http://docs.nvidia.com/deeplearning/sdk/tensorrt-install-guide/index.html).
-
-To avoid cuDNN version conflicts during later system upgrades, hold the cuDNN
-version at 7.0.5:
-
-<pre class="prettyprint lang-bsh">
- <code class="devsite-terminal">sudo apt-mark hold libcudnn7 libcudnn7-dev</code>
-</pre>
-
-To allow upgrades, remove the this hold:
-
-<pre class="prettyprint lang-bsh">
- <code class="devsite-terminal">sudo apt-mark unhold libcudnn7 libcudnn7-dev</code>
-</pre>
-
-If you have an earlier version of the preceding packages, upgrade to the
-specified versions. If upgrading is not possible, you can still run TensorFlow
-with GPU support by @{$install_sources}.
+## TensorFlow GPU support
+Note: Due to the number of libraries required, using [Docker](#InstallingDocker)
+is recommended over installing directly on the host system.
+
+The following NVIDIA® <i>hardware</i> must be installed on your system:
+
+* GPU card with CUDA Compute Capability 3.5 or higher. See
+ [NVIDIA documentation](https://developer.nvidia.com/cuda-gpus) for a list of
+ supported GPU cards.
+
+The following NVIDIA® <i>software</i> must be installed on your system:
+
+* [GPU drivers](http://nvidia.com/driver). CUDA 9.0 requires 384.x or higher.
+* [CUDA Toolkit 9.0](http://nvidia.com/cuda).
+* [cuDNN SDK](http://developer.nvidia.com/cudnn) (>= 7.0). Version 7.1 is
+ recommended.
+* [CUPTI](http://docs.nvidia.com/cuda/cupti/) ships with the CUDA Toolkit, but
+ you also need to append its path to the `LD_LIBRARY_PATH` environment
+ variable: `export
+ LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/cuda/extras/CUPTI/lib64`
+* *OPTIONAL*: [NCCL 2.2](https://developer.nvidia.com/nccl) to use TensorFlow
+ with multiple GPUs.
+* *OPTIONAL*:
+ [TensorRT](http://docs.nvidia.com/deeplearning/sdk/tensorrt-install-guide/index.html)
+ which can improve latency and throughput for inference for some models.
+
+To use a GPU with CUDA Compute Capability 3.0, or different versions of the
+preceding NVIDIA libraries see
+@{$install_sources$installing TensorFlow from Sources}. If using Ubuntu 16.04
+and possibly other Debian based linux distros, `apt-get` can be used with the
+NVIDIA repository to simplify installation.
+
+```bash
+# Adds NVIDIA package repository.
+sudo apt-key adv --fetch-keys http://developer.download.nvidia.com/compute/cuda/repos/ubuntu1604/x86_64/7fa2af80.pub
+wget http://developer.download.nvidia.com/compute/cuda/repos/ubuntu1604/x86_64/cuda-repo-ubuntu1604_9.1.85-1_amd64.deb
+wget http://developer.download.nvidia.com/compute/machine-learning/repos/ubuntu1604/x86_64/nvidia-machine-learning-repo-ubuntu1604_1.0.0-1_amd64.deb
+sudo dpkg -i cuda-repo-ubuntu1604_9.1.85-1_amd64.deb
+sudo dpkg -i nvidia-machine-learning-repo-ubuntu1604_1.0.0-1_amd64.deb
+sudo apt-get update
+# Includes optional NCCL 2.x.
+sudo apt-get install cuda9.0 cuda-cublas-9-0 cuda-cufft-9-0 cuda-curand-9-0 \
+ cuda-cusolver-9-0 cuda-cusparse-9-0 libcudnn7=7.1.4.18-1+cuda9.0 \
+ libnccl2=2.2.13-1+cuda9.0 cuda-command-line-tools-9-0
+# Optionally install TensorRT runtime, must be done after above cuda install.
+sudo apt-get update
+sudo apt-get install libnvinfer4=4.1.2-1+cuda9.0
+```
## Common installation problems
We are relying on Stack Overflow to document TensorFlow installation problems
-and their remedies. The following table contains links to Stack Overflow
-answers for some common installation problems.
-If you encounter an error message or other
-installation problem not listed in the following table, search for it
-on Stack Overflow. If Stack Overflow doesn't show the error message,
-ask a new question about it on Stack Overflow and specify
-the `tensorflow` tag.
+and their remedies. The following table contains links to Stack Overflow answers
+for some common installation problems. If you encounter an error message or
+other installation problem not listed in the following table, search for it on
+Stack Overflow. If Stack Overflow doesn't show the error message, ask a new
+question about it on Stack Overflow and specify the `tensorflow` tag.
<table>
<tr> <th>Link to GitHub or Stack&nbsp;Overflow</th> <th>Error Message</th> </tr>
@@ -657,20 +632,19 @@ the `tensorflow` tag.
</table>
-
<a name="TF_PYTHON_URL"></a>
+
## The URL of the TensorFlow Python package
A few installation mechanisms require the URL of the TensorFlow Python package.
The value you specify depends on three factors:
- * operating system
- * Python version
- * CPU only vs. GPU support
+* operating system
+* Python version
+* CPU only vs. GPU support
This section documents the relevant values for Linux installations.
-
### Python 2.7
CPU only:
@@ -679,7 +653,6 @@ CPU only:
https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-1.9.0rc0-cp27-none-linux_x86_64.whl
</pre>
-
GPU support:
<pre>
@@ -689,7 +662,6 @@ https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow_gpu-1.9.0rc0-cp27
Note that GPU support requires the NVIDIA hardware and software described in
[NVIDIA requirements to run TensorFlow with GPU support](#NVIDIARequirements).
-
### Python 3.4
CPU only:
@@ -698,7 +670,6 @@ CPU only:
https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-1.9.0rc0-cp34-cp34m-linux_x86_64.whl
</pre>
-
GPU support:
<pre>
@@ -708,7 +679,6 @@ https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow_gpu-1.9.0rc0-cp34
Note that GPU support requires the NVIDIA hardware and software described in
[NVIDIA requirements to run TensorFlow with GPU support](#NVIDIARequirements).
-
### Python 3.5
CPU only:
@@ -717,14 +687,12 @@ CPU only:
https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-1.9.0rc0-cp35-cp35m-linux_x86_64.whl
</pre>
-
GPU support:
<pre>
https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow_gpu-1.9.0rc0-cp35-cp35m-linux_x86_64.whl
</pre>
-
Note that GPU support requires the NVIDIA hardware and software described in
[NVIDIA requirements to run TensorFlow with GPU support](#NVIDIARequirements).
@@ -736,13 +704,11 @@ CPU only:
https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-1.9.0rc0-cp36-cp36m-linux_x86_64.whl
</pre>
-
GPU support:
<pre>
https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow_gpu-1.9.0rc0-cp36-cp36m-linux_x86_64.whl
</pre>
-
Note that GPU support requires the NVIDIA hardware and software described in
[NVIDIA requirements to run TensorFlow with GPU support](#NVIDIARequirements).
diff --git a/tensorflow/docs_src/install/install_mac.md b/tensorflow/docs_src/install/install_mac.md
index 584f1e2e35..3372e9e1e0 100644
--- a/tensorflow/docs_src/install/install_mac.md
+++ b/tensorflow/docs_src/install/install_mac.md
@@ -1,4 +1,4 @@
-# Installing TensorFlow on macOS
+# Install TensorFlow on macOS
This guide explains how to install TensorFlow on macOS. Although these
instructions might also work on other macOS variants, we have only
@@ -403,8 +403,7 @@ writing TensorFlow programs:
If the system outputs an error message instead of a greeting, see
[Common installation problems](#common_installation_problems).
-To learn more, see [Get Started with TensorFlow](https://www.tensorflow.org/get_started).
-
+To learn more, see the [TensorFlow tutorials](../tutorials/).
## Common installation problems
diff --git a/tensorflow/docs_src/install/install_raspbian.md b/tensorflow/docs_src/install/install_raspbian.md
index 0caab6d335..58a5285c78 100644
--- a/tensorflow/docs_src/install/install_raspbian.md
+++ b/tensorflow/docs_src/install/install_raspbian.md
@@ -1,4 +1,4 @@
-# Installing TensorFlow on Raspbian
+# Install TensorFlow on Raspbian
This guide explains how to install TensorFlow on a Raspberry Pi running
Raspbian. Although these instructions might also work on other Pi variants, we
@@ -230,7 +230,7 @@ problems, despite the log message.
If the system outputs an error message instead of a greeting, see [Common
installation problems](#common_installation_problems).
-To learn more, see [Get Started with TensorFlow](https://www.tensorflow.org/get_started).
+To learn more, see the [TensorFlow tutorials](../tutorials/).
## Common installation problems
diff --git a/tensorflow/docs_src/install/install_sources.md b/tensorflow/docs_src/install/install_sources.md
index e55520ceaa..502f4de7a6 100644
--- a/tensorflow/docs_src/install/install_sources.md
+++ b/tensorflow/docs_src/install/install_sources.md
@@ -1,28 +1,27 @@
-# Installing TensorFlow from Sources
+# Install TensorFlow from Sources
-This guide explains how to build TensorFlow sources into a TensorFlow
-binary and how to install that TensorFlow binary. Note that we provide
-well-tested, pre-built TensorFlow binaries for Ubuntu, macOS, and Windows
-systems. In addition, there are pre-built TensorFlow
-[docker images](https://hub.docker.com/r/tensorflow/tensorflow/).
-So, don't build a TensorFlow binary yourself unless you are very
-comfortable building complex packages from source and dealing with
-the inevitable aftermath should things not go exactly as documented.
+This guide explains how to build TensorFlow sources into a TensorFlow binary and
+how to install that TensorFlow binary. Note that we provide well-tested,
+pre-built TensorFlow binaries for Ubuntu, macOS, and Windows systems. In
+addition, there are pre-built TensorFlow
+[docker images](https://hub.docker.com/r/tensorflow/tensorflow/). So, don't
+build a TensorFlow binary yourself unless you are very comfortable building
+complex packages from source and dealing with the inevitable aftermath should
+things not go exactly as documented.
-If the last paragraph didn't scare you off, welcome. This guide explains
-how to build TensorFlow on 64-bit desktops and laptops running either of
-the following operating systems:
+If the last paragraph didn't scare you off, welcome. This guide explains how to
+build TensorFlow on 64-bit desktops and laptops running either of the following
+operating systems:
* Ubuntu
* macOS X
-Note: Some users have successfully built and installed TensorFlow from
-sources on non-supported systems. Please remember that we do not fix
-issues stemming from these attempts.
+Note: Some users have successfully built and installed TensorFlow from sources
+on non-supported systems. Please remember that we do not fix issues stemming
+from these attempts.
-We **do not support** building TensorFlow on Windows. That said, if you'd
-like to try to build TensorFlow on Windows anyway, use either of the
-following:
+We **do not support** building TensorFlow on Windows. That said, if you'd like
+to try to build TensorFlow on Windows anyway, use either of the following:
* [Bazel on Windows](https://bazel.build/versions/master/docs/windows.html)
* [TensorFlow CMake build](https://github.com/tensorflow/tensorflow/tree/master/tensorflow/contrib/cmake)
@@ -32,38 +31,33 @@ instructions. Older CPUs may not be able to execute these binaries.
## Determine which TensorFlow to install
-You must choose one of the following types of TensorFlow to build and
-install:
-
-* **TensorFlow with CPU support only**. If your system does not have a
- NVIDIA® GPU, build and install this version. Note that this version of
- TensorFlow is typically easier to build and install, so even if you
- have an NVIDIA GPU, we recommend building and installing this version
- first.
-* **TensorFlow with GPU support**. TensorFlow programs typically run
- significantly faster on a GPU than on a CPU. Therefore, if your system
- has a NVIDIA GPU and you need to run performance-critical applications,
- you should ultimately build and install this version.
- Beyond the NVIDIA GPU itself, your system must also fulfill the NVIDIA
- software requirements described in one of the following documents:
+You must choose one of the following types of TensorFlow to build and install:
- * @{$install_linux#NVIDIARequirements$Installing TensorFlow on Ubuntu}
- * @{$install_mac#NVIDIARequirements$Installing TensorFlow on macOS}
+* **TensorFlow with CPU support only**. If your system does not have a NVIDIA®
+ GPU, build and install this version. Note that this version of TensorFlow is
+ typically easier to build and install, so even if you have an NVIDIA GPU, we
+ recommend building and installing this version first.
+* **TensorFlow with GPU support**. TensorFlow programs typically run
+ significantly faster on a GPU than on a CPU. Therefore, if your system has a
+ NVIDIA GPU and you need to run performance-critical applications, you should
+ ultimately build and install this version. Beyond the NVIDIA GPU itself,
+ your system must also fulfill the NVIDIA software requirements described in
+ one of the following documents:
+ * @ {$install_linux#NVIDIARequirements$Installing TensorFlow on Ubuntu}
+ * @ {$install_mac#NVIDIARequirements$Installing TensorFlow on macOS}
## Clone the TensorFlow repository
-Start the process of building TensorFlow by cloning a TensorFlow
-repository.
+Start the process of building TensorFlow by cloning a TensorFlow repository.
To clone **the latest** TensorFlow repository, issue the following command:
<pre>$ <b>git clone https://github.com/tensorflow/tensorflow</b> </pre>
-The preceding <code>git clone</code> command creates a subdirectory
-named `tensorflow`. After cloning, you may optionally build a
-**specific branch** (such as a release branch) by invoking the
-following commands:
+The preceding <code>git clone</code> command creates a subdirectory named
+`tensorflow`. After cloning, you may optionally build a **specific branch**
+(such as a release branch) by invoking the following commands:
<pre>
$ <b>cd tensorflow</b>
@@ -75,38 +69,34 @@ issue the following command:
<pre>$ <b>git checkout r1.0</b></pre>
-Next, you must prepare your environment for
-[Linux](#PrepareLinux)
-or
+Next, you must prepare your environment for [Linux](#PrepareLinux) or
[macOS](#PrepareMac)
-
<a name="PrepareLinux"></a>
-## Prepare environment for Linux
-Before building TensorFlow on Linux, install the following build
-tools on your system:
+## Prepare environment for Linux
- * bazel
- * TensorFlow Python dependencies
- * optionally, NVIDIA packages to support TensorFlow for GPU.
+Before building TensorFlow on Linux, install the following build tools on your
+system:
+* bazel
+* TensorFlow Python dependencies
+* optionally, NVIDIA packages to support TensorFlow for GPU.
### Install Bazel
If bazel is not installed on your system, install it now by following
[these directions](https://bazel.build/versions/master/docs/install.html).
-
### Install TensorFlow Python dependencies
To install TensorFlow, you must install the following packages:
- * `numpy`, which is a numerical processing package that TensorFlow requires.
- * `dev`, which enables adding extensions to Python.
- * `pip`, which enables you to install and manage certain Python packages.
- * `wheel`, which enables you to manage Python compressed packages in
- the wheel (.whl) format.
+* `numpy`, which is a numerical processing package that TensorFlow requires.
+* `dev`, which enables adding extensions to Python.
+* `pip`, which enables you to install and manage certain Python packages.
+* `wheel`, which enables you to manage Python compressed packages in the wheel
+ (.whl) format.
To install these packages for Python 2.7, issue the following command:
@@ -120,68 +110,70 @@ To install these packages for Python 3.n, issue the following command:
$ <b>sudo apt-get install python3-numpy python3-dev python3-pip python3-wheel</b>
</pre>
-
### Optional: install TensorFlow for GPU prerequisites
If you are building TensorFlow without GPU support, skip this section.
-The following NVIDIA <i>hardware</i> must be installed on your system:
-
- * GPU card with CUDA Compute Capability 3.0 or higher. See
- [NVIDIA documentation](https://developer.nvidia.com/cuda-gpus)
- for a list of supported GPU cards.
-
-The following NVIDIA <i>software</i> must be installed on your system:
-
- * [CUDA Toolkit](http://nvidia.com/cuda) (>= 8.0). We recommend version 9.0.
- For details, see
- [NVIDIA's documentation](http://docs.nvidia.com/cuda/cuda-installation-guide-linux/).
- Ensure that you append the relevant CUDA pathnames to the
- `LD_LIBRARY_PATH` environment variable as described in the
- NVIDIA documentation.
- * [GPU drivers](http://nvidia.com/driver) supporting your version of the CUDA
- Toolkit.
- * [cuDNN SDK](http://developer.nvidia.com/cudnn) (>= 6.0). We recommend version 7.0. For details, see
- [NVIDIA's documentation](http://docs.nvidia.com/deeplearning/sdk/cudnn-install/).
- * [CUPTI](http://docs.nvidia.com/cuda/cupti/) ships with the CUDA Toolkit, but
- you also need to append its path to the `LD_LIBRARY_PATH` environment
- variable:
+The following NVIDIA® <i>hardware</i> must be installed on your system:
- <pre> $ <b>export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/cuda/extras/CUPTI/lib64</b> </pre>
+* GPU card with CUDA Compute Capability 3.5 or higher. See
+ [NVIDIA documentation](https://developer.nvidia.com/cuda-gpus) for a list of
+ supported GPU cards.
+
+The following NVIDIA® <i>software</i> must be installed on your system:
+
+* [GPU drivers](http://nvidia.com/driver). CUDA 9.0 requires 384.x or higher.
+* [CUDA Toolkit](http://nvidia.com/cuda) (>= 8.0). We recommend version 9.0.
+* [cuDNN SDK](http://developer.nvidia.com/cudnn) (>= 6.0). We recommend
+ version 7.1.x.
+* [CUPTI](http://docs.nvidia.com/cuda/cupti/) ships with the CUDA Toolkit, but
+ you also need to append its path to the `LD_LIBRARY_PATH` environment
+ variable: `export
+ LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/cuda/extras/CUPTI/lib64`
+* *OPTIONAL*: [NCCL 2.2](https://developer.nvidia.com/nccl) to use TensorFlow
+ with multiple GPUs.
+* *OPTIONAL*:
+ [TensorRT](http://docs.nvidia.com/deeplearning/sdk/tensorrt-install-guide/index.html)
+ which can improve latency and throughput for inference for some models.
+
+While it is possible to install the NVIDIA libraries via `apt-get` from the
+NVIDIA repository, the libraries and headers are installed in locations that
+make it difficult to configure and debug build issues. Downloading and
+installing the libraries manually or using docker
+([latest-devel-gpu](https://hub.docker.com/r/tensorflow/tensorflow/tags/)) is
+recommended.
### Next
After preparing the environment, you must now
[configure the installation](#ConfigureInstallation).
-
<a name="PrepareMac"></a>
+
## Prepare environment for macOS
Before building TensorFlow, you must install the following on your system:
- * bazel
- * TensorFlow Python dependencies.
- * optionally, NVIDIA packages to support TensorFlow for GPU.
-
+* bazel
+* TensorFlow Python dependencies.
+* optionally, NVIDIA packages to support TensorFlow for GPU.
### Install bazel
If bazel is not installed on your system, install it now by following
[these directions](https://bazel.build/versions/master/docs/install.html#mac-os-x).
-
### Install python dependencies
To build TensorFlow, you must install the following packages:
- * six
- * numpy, which is a numerical processing package that TensorFlow requires.
- * wheel, which enables you to manage Python compressed packages
- in the wheel (.whl) format.
+* six
+* numpy, which is a numerical processing package that TensorFlow requires.
+* wheel, which enables you to manage Python compressed packages in the wheel
+ (.whl) format.
-You may install the python dependencies using pip. If you don't have pip
-on your machine, we recommend using homebrew to install Python and pip as
+You may install the python dependencies using pip. If you don't have pip on your
+machine, we recommend using homebrew to install Python and pip as
[documented here](http://docs.python-guide.org/en/latest/starting/install/osx/).
If you follow these instructions, you will not need to disable SIP.
@@ -192,22 +184,23 @@ After installing pip, invoke the following commands:
Note: These are just the minimum requirements to _build_ tensorflow. Installing
the pip package will download additional packages required to _run_ it. If you
plan on executing tasks directly with `bazel` , without the pip installation,
-you may need to install additional python packages. For example, you should
-`pip install mock enum34` before running TensorFlow's tests with bazel.
+you may need to install additional python packages. For example, you should `pip
+install mock enum34` before running TensorFlow's tests with bazel.
<a name="ConfigureInstallation"></a>
+
## Configure the installation
-The root of the source tree contains a bash script named
-<code>configure</code>. This script asks you to identify the pathname of all
-relevant TensorFlow dependencies and specify other build configuration options
-such as compiler flags. You must run this script *prior* to
-creating the pip package and installing TensorFlow.
+The root of the source tree contains a bash script named <code>configure</code>.
+This script asks you to identify the pathname of all relevant TensorFlow
+dependencies and specify other build configuration options such as compiler
+flags. You must run this script *prior* to creating the pip package and
+installing TensorFlow.
-If you wish to build TensorFlow with GPU, `configure` will ask
-you to specify the version numbers of CUDA and cuDNN. If several
-versions of CUDA or cuDNN are installed on your system, explicitly select
-the desired version instead of relying on the default.
+If you wish to build TensorFlow with GPU, `configure` will ask you to specify
+the version numbers of CUDA and cuDNN. If several versions of CUDA or cuDNN are
+installed on your system, explicitly select the desired version instead of
+relying on the default.
One of the questions that `configure` will ask is as follows:
@@ -215,17 +208,17 @@ One of the questions that `configure` will ask is as follows:
Please specify optimization flags to use during compilation when bazel option "--config=opt" is specified [Default is -march=native]
</pre>
-This question refers to a later phase in which you'll use bazel to [build the
-pip package](#build-the-pip-package) or the [C/Java libraries](#BuildCorJava).
-We recommend accepting the default (`-march=native`), which will optimize the
-generated code for your local machine's CPU type. However, if you are building
-TensorFlow on one CPU type but will run TensorFlow on a different CPU type, then
-consider specifying a more specific optimization
-flag as described in [the gcc
-documentation](https://gcc.gnu.org/onlinedocs/gcc-4.5.3/gcc/i386-and-x86_002d64-Options.html).
+This question refers to a later phase in which you'll use bazel to
+[build the pip package](#build-the-pip-package) or the
+[C/Java libraries](#BuildCorJava). We recommend accepting the default
+(`-march=native`), which will optimize the generated code for your local
+machine's CPU type. However, if you are building TensorFlow on one CPU type but
+will run TensorFlow on a different CPU type, then consider specifying a more
+specific optimization flag as described in
+[the gcc documentation](https://gcc.gnu.org/onlinedocs/gcc-4.5.3/gcc/i386-and-x86_002d64-Options.html).
-Here is an example execution of the `configure` script. Note that your
-own input will likely differ from our sample input:
+Here is an example execution of the `configure` script. Note that your own input
+will likely differ from our sample input:
<pre>
$ <b>cd tensorflow</b> # cd to the top-level directory created
@@ -262,26 +255,26 @@ Please specify the location where cuDNN 7 library is installed. Refer to README.
Please specify a list of comma-separated CUDA compute capabilities you want to build with.
You can find the compute capability of your device at: https://developer.nvidia.com/cuda-gpus.
Please note that each additional compute capability significantly increases your build time and binary size.
-[Default is: "3.5,5.2"]: <b>3.0</b>
+
Do you wish to build TensorFlow with MPI support? [y/N]
MPI support will not be enabled for TensorFlow
Configuration finished
</pre>
-If you told `configure` to build for GPU support, then `configure`
-will create a canonical set of symbolic links to the CUDA libraries
-on your system. Therefore, every time you change the CUDA library paths,
-you must rerun the `configure` script before re-invoking
-the <code>bazel build</code> command.
+[Default is: "3.5,7.0"]: <b>6.0,7.0</b>
-Note the following:
+If you told `configure` to build for GPU support, then `configure` will create a
+canonical set of symbolic links to the CUDA libraries on your system. Therefore,
+every time you change the CUDA library paths, you must rerun the `configure`
+script before re-invoking the <code>bazel build</code> command.
- * Although it is possible to build both CUDA and non-CUDA configs
- under the same source tree, we recommend running `bazel clean` when
- switching between these two configurations in the same source tree.
- * If you don't run the `configure` script *before* running the
- `bazel build` command, the `bazel build` command will fail.
+Note the following:
+* Although it is possible to build both CUDA and non-CUDA configs under the
+ same source tree, we recommend running `bazel clean` when switching between
+ these two configurations in the same source tree.
+* If you don't run the `configure` script *before* running the `bazel build`
+ command, the `bazel build` command will fail.
## Build the pip package
@@ -289,44 +282,53 @@ Note: If you're only interested in building the libraries for the TensorFlow C
or Java APIs, see [Build the C or Java libraries](#BuildCorJava), you do not
need to build the pip package in that case.
-To build a pip package for TensorFlow with CPU-only support,
-you would typically invoke the following command:
+### CPU-only support
+
+To build a pip package for TensorFlow with CPU-only support:
+
+<pre>
+$ bazel build --config=opt //tensorflow/tools/pip_package:build_pip_package
+</pre>
+
+To build a pip package for TensorFlow with CPU-only support for the Intel®
+MKL-DNN:
<pre>
-$ <b>bazel build --config=opt //tensorflow/tools/pip_package:build_pip_package</b>
+$ bazel build --config=mkl --config=opt //tensorflow/tools/pip_package:build_pip_package
</pre>
-To build a pip package for TensorFlow with GPU support,
-invoke the following command:
+### GPU support
-<pre>$ <b>bazel build --config=opt --config=cuda //tensorflow/tools/pip_package:build_pip_package</b> </pre>
+To build a pip package for TensorFlow with GPU support:
-**NOTE on gcc 5 or later:** the binary pip packages available on the
-TensorFlow website are built with gcc 4, which uses the older ABI. To
-make your build compatible with the older ABI, you need to add
-`--cxxopt="-D_GLIBCXX_USE_CXX11_ABI=0"` to your `bazel build` command.
-ABI compatibility allows custom ops built against the TensorFlow pip package
-to continue to work against your built package.
+<pre>
+$ bazel build --config=opt --config=cuda //tensorflow/tools/pip_package:build_pip_package
+</pre>
-<b>Tip:</b> By default, building TensorFlow from sources consumes
-a lot of RAM. If RAM is an issue on your system, you may limit RAM usage
-by specifying <code>--local_resources 2048,.5,1.0</code> while
-invoking `bazel`.
+**NOTE on gcc 5 or later:** the binary pip packages available on the TensorFlow
+website are built with gcc 4, which uses the older ABI. To make your build
+compatible with the older ABI, you need to add
+`--cxxopt="-D_GLIBCXX_USE_CXX11_ABI=0"` to your `bazel build` command. ABI
+compatibility allows custom ops built against the TensorFlow pip package to
+continue to work against your built package.
-The <code>bazel build</code> command builds a script named
-`build_pip_package`. Running this script as follows will build
-a `.whl` file within the `/tmp/tensorflow_pkg` directory:
+<b>Tip:</b> By default, building TensorFlow from sources consumes a lot of RAM.
+If RAM is an issue on your system, you may limit RAM usage by specifying
+<code>--local_resources 2048,.5,1.0</code> while invoking `bazel`.
+
+The <code>bazel build</code> command builds a script named `build_pip_package`.
+Running this script as follows will build a `.whl` file within the
+`/tmp/tensorflow_pkg` directory:
<pre>
$ <b>bazel-bin/tensorflow/tools/pip_package/build_pip_package /tmp/tensorflow_pkg</b>
</pre>
-
## Install the pip package
-Invoke `pip install` to install that pip package.
-The filename of the `.whl` file depends on your platform.
-For example, the following command will install the pip package
+Invoke `pip install` to install that pip package. The filename of the `.whl`
+file depends on your platform. For example, the following command will install
+the pip package
for TensorFlow 1.9.0rc0 on Linux:
@@ -362,28 +364,31 @@ TensorFlow programs:
<pre>Hello, TensorFlow!</pre>
-To learn more, see [Get Started with TensorFlow](https://www.tensorflow.org/get_started).
+To learn more, see the [TensorFlow tutorials](../tutorials/).
-If the system outputs an error message instead of a greeting, see [Common
-installation problems](#common_installation_problems).
+If the system outputs an error message instead of a greeting, see
+[Common installation problems](#common_installation_problems).
## Common build and installation problems
The build and installation problems you encounter typically depend on the
-operating system. See the "Common installation problems" section
-of one of the following guides:
-
- * @{$install_linux#common_installation_problems$Installing TensorFlow on Linux}
- * @{$install_mac#common_installation_problems$Installing TensorFlow on Mac OS}
- * @{$install_windows#common_installation_problems$Installing TensorFlow on Windows}
-
-Beyond the errors documented in those two guides, the following table
-notes additional errors specific to building TensorFlow. Note that we
-are relying on Stack Overflow as the repository for build and installation
-problems. If you encounter an error message not listed in the preceding
-two guides or in the following table, search for it on Stack Overflow. If
-Stack Overflow doesn't show the error message, ask a new question on
-Stack Overflow and specify the `tensorflow` tag.
+operating system. See the "Common installation problems" section of one of the
+following guides:
+
+* @
+ {$install_linux#common_installation_problems$Installing TensorFlow on Linux}
+* @
+ {$install_mac#common_installation_problems$Installing TensorFlow on Mac OS}
+* @
+ {$install_windows#common_installation_problems$Installing TensorFlow on Windows}
+
+Beyond the errors documented in those two guides, the following table notes
+additional errors specific to building TensorFlow. Note that we are relying on
+Stack Overflow as the repository for build and installation problems. If you
+encounter an error message not listed in the preceding two guides or in the
+following table, search for it on Stack Overflow. If Stack Overflow doesn't show
+the error message, ask a new question on Stack Overflow and specify the
+`tensorflow` tag.
<table>
<tr> <th>Stack Overflow Link</th> <th>Error Message</th> </tr>
@@ -430,6 +435,7 @@ Stack Overflow and specify the `tensorflow` tag.
</table>
## Tested source configurations
+
**Linux**
<table>
<tr><th>Version:</th><th>CPU/GPU:</th><th>Python Version:</th><th>Compiler:</th><th>Build Tools:</th><th>cuDNN:</th><th>CUDA:</th></tr>
@@ -498,6 +504,7 @@ Stack Overflow and specify the `tensorflow` tag.
</table>
<a name="BuildCorJava"></a>
+
## Build the C or Java libraries
The instructions above are tailored to building the TensorFlow Python packages.
@@ -506,10 +513,12 @@ If you're interested in building the libraries for the TensorFlow C API, do the
following:
1. Follow the steps up to [Configure the installation](#ConfigureInstallation)
-2. Build the C libraries following instructions in the [README](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/tools/lib_package/README.md).
+2. Build the C libraries following instructions in the
+ [README](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/tools/lib_package/README.md).
-If you're interested inv building the libraries for the TensorFlow Java API,
-do the following:
+If you're interested inv building the libraries for the TensorFlow Java API, do
+the following:
1. Follow the steps up to [Configure the installation](#ConfigureInstallation)
-2. Build the Java library following instructions in the [README](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/tools/lib_package/README.md).
+2. Build the Java library following instructions in the
+ [README](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/tools/lib_package/README.md).
diff --git a/tensorflow/docs_src/install/install_windows.md b/tensorflow/docs_src/install/install_windows.md
index 7fe94f0bc3..e9061bf3c1 100644
--- a/tensorflow/docs_src/install/install_windows.md
+++ b/tensorflow/docs_src/install/install_windows.md
@@ -1,4 +1,4 @@
-# Installing TensorFlow on Windows
+# Install TensorFlow on Windows
This guide explains how to install TensorFlow on Windows. Although these
instructions might also work on other Windows variants, we have only
@@ -157,7 +157,7 @@ TensorFlow programs:
If the system outputs an error message instead of a greeting, see [Common
installation problems](#common_installation_problems).
-To learn more, see [Get Started with TensorFlow](https://www.tensorflow.org/get_started).
+To learn more, see the [TensorFlow tutorials](../tutorials/).
## Common installation problems
diff --git a/tensorflow/docs_src/install/migration.md b/tensorflow/docs_src/install/migration.md
index d6c31f96bd..19315ace2d 100644
--- a/tensorflow/docs_src/install/migration.md
+++ b/tensorflow/docs_src/install/migration.md
@@ -1,5 +1,4 @@
-
-# Transitioning to TensorFlow 1.0
+# Transition to TensorFlow 1.0
The APIs in TensorFlow 1.0 have changed in ways that are not all backwards
diff --git a/tensorflow/docs_src/javascript/index.md b/tensorflow/docs_src/javascript/index.md
deleted file mode 100644
index ad63eeb255..0000000000
--- a/tensorflow/docs_src/javascript/index.md
+++ /dev/null
@@ -1,5 +0,0 @@
-# JavaScript
-
-You may develop TensorFlow programs in JavaScript, training and deploying
-models right in your browser. For details, see
-[js.tensorflow.org](https://js.tensorflow.org).
diff --git a/tensorflow/docs_src/javascript/leftnav_files b/tensorflow/docs_src/javascript/leftnav_files
deleted file mode 100644
index fc0ab8a543..0000000000
--- a/tensorflow/docs_src/javascript/leftnav_files
+++ /dev/null
@@ -1 +0,0 @@
-index.md
diff --git a/tensorflow/docs_src/mobile/leftnav_files b/tensorflow/docs_src/mobile/leftnav_files
index 585470d5f0..97340ef7e1 100644
--- a/tensorflow/docs_src/mobile/leftnav_files
+++ b/tensorflow/docs_src/mobile/leftnav_files
@@ -4,6 +4,7 @@ tflite/index.md
tflite/devguide.md
tflite/demo_android.md
tflite/demo_ios.md
+tflite/performance.md
>>>
### TensorFlow Mobile
mobile_intro.md
diff --git a/tensorflow/docs_src/mobile/mobile_intro.md b/tensorflow/docs_src/mobile/mobile_intro.md
index 241f01d460..baad443308 100644
--- a/tensorflow/docs_src/mobile/mobile_intro.md
+++ b/tensorflow/docs_src/mobile/mobile_intro.md
@@ -38,7 +38,8 @@ speech-driven interface, and many of these require on-device processing. Most of
the time a user isn’t giving commands, and so streaming audio continuously to a
remote server would be a waste of bandwidth, since it would mostly be silence or
background noises. To solve this problem it’s common to have a small neural
-network running on-device @{$tutorials/audio_recognition$listening out for a particular keyword}.
+network running on-device
+[listening out for a particular keyword](../tutorials/sequences/audio_recognition).
Once that keyword has been spotted, the rest of the
conversation can be transmitted over to the server for further processing if
more computing power is needed.
diff --git a/tensorflow/docs_src/mobile/tflite/demo_android.md b/tensorflow/docs_src/mobile/tflite/demo_android.md
index 6f9893f8f1..fdf0bcf3c1 100644
--- a/tensorflow/docs_src/mobile/tflite/demo_android.md
+++ b/tensorflow/docs_src/mobile/tflite/demo_android.md
@@ -1,7 +1,7 @@
# Android Demo App
An example Android application using TensorFLow Lite is available
-[on GitHub](https://github.com/tensorflow/tensorflow/tree/master/tensorflow/contrib/lite/java/demo/app).
+[on GitHub](https://github.com/tensorflow/tensorflow/tree/master/tensorflow/contrib/lite/java/demo).
The demo is a sample camera app that classifies images continuously
using either a quantized Mobilenet model or a floating point Inception-v3 model.
To run the demo, a device running Android 5.0 ( API 21) or higher is required.
diff --git a/tensorflow/docs_src/mobile/tflite/devguide.md b/tensorflow/docs_src/mobile/tflite/devguide.md
index 4133bc172a..b168d6c183 100644
--- a/tensorflow/docs_src/mobile/tflite/devguide.md
+++ b/tensorflow/docs_src/mobile/tflite/devguide.md
@@ -54,10 +54,11 @@ both floating point and quantized inference.
### Train a custom model
A developer may choose to train a custom model using Tensorflow (see the
-@{$tutorials} for examples of building and training models). If you have already
-written a model, the first step is to export this to a @{tf.GraphDef} file. This
-is required because some formats do not store the model structure outside the
-code, and we must communicate with other parts of the framework. See
+[TensorFlow tutorials](../../tutorials/) for examples of building and training
+models). If you have already written a model, the first step is to export this
+to a @{tf.GraphDef} file. This is required because some formats do not store the
+model structure outside the code, and we must communicate with other parts of the
+framework. See
[Exporting the Inference Graph](https://github.com/tensorflow/models/blob/master/research/slim/README.md)
to create .pb file for the custom model.
diff --git a/tensorflow/docs_src/mobile/tflite/performance.md b/tensorflow/docs_src/mobile/tflite/performance.md
new file mode 100644
index 0000000000..79bacaaa1b
--- /dev/null
+++ b/tensorflow/docs_src/mobile/tflite/performance.md
@@ -0,0 +1,174 @@
+# Performance
+
+This document lists TensorFlow Lite performance benchmarks when running well
+known models on some Android and iOS devices.
+
+These performance benchmark numbers were generated with the
+[Android TFLite benchmark binary](https://github.com/tensorflow/tensorflow/tree/master/tensorflow/contrib/lite/tools/benchmark)
+and the [iOS benchmark app](https://github.com/tensorflow/tensorflow/tree/master/tensorflow/contrib/lite/tools/benchmark/ios).
+
+# Android performance benchmarks
+
+For Android benchmarks, the CPU affinity is set to use big cores on the device to
+reduce variance (see [details](https://github.com/tensorflow/tensorflow/tree/master/tensorflow/contrib/lite/tools/benchmark#reducing-variance-between-runs-on-android)).
+
+It assumes that models were download and unzipped to the
+`/data/local/tmp/tflite_models` directory. The benchmark binary is built
+using [these instructions](https://github.com/tensorflow/tensorflow/tree/master/tensorflow/contrib/lite/tools/benchmark#on-android)
+and assumed in the `/data/local/tmp` directory.
+
+To run the benchmark:
+
+```
+adb shell taskset ${CPU_MASK} /data/local/tmp/benchmark_model \
+ --num_threads=1 \
+ --graph=/data/local/tmp/tflite_models/${GRAPH} \
+ --warmup_runs=1 \
+ --num_runs=50 \
+ --use_nnapi=false
+```
+
+Here, `${GRAPH}` is the name of model and `${CPU_MASK}` is the CPU affinity
+chosen according to the following table:
+
+Device | CPU_MASK |
+-------| ----------
+Pixel 2 | f0 |
+Pixel xl | 0c |
+
+
+<table>
+ <thead>
+ <tr>
+ <th>Model Name</th>
+ <th>Device </th>
+ <th>Mean inference time (std dev)</th>
+ </tr>
+ </thead>
+ <tr>
+ <td rowspan = 2>
+ <a href="http://download.tensorflow.org/models/mobilenet_v1_2018_02_22/mobilenet_v1_1.0_224.tgz">Mobilenet_1.0_224(float)</a>
+ </td>
+ <td>Pixel 2 </td>
+ <td>166.5 ms (2.6 ms)</td>
+ </tr>
+ <tr>
+ <td>Pixel xl </td>
+ <td>122.9 ms (1.8 ms) </td>
+ </tr>
+ <tr>
+ <td rowspan = 2>
+ <a href="http://download.tensorflow.org/models/mobilenet_v1_2018_02_22/mobilenet_v1_1.0_224_quant.tgz">Mobilenet_1.0_224 (quant)</a>
+ </td>
+ <td>Pixel 2 </td>
+ <td>69.5 ms (0.9 ms)</td>
+ </tr>
+ <tr>
+ <td>Pixel xl </td>
+ <td>78.9 ms (2.2 ms) </td>
+ </tr>
+ <tr>
+ <td rowspan = 2>
+ <a href="https://storage.googleapis.com/download.tensorflow.org/models/tflite/model_zoo/upload_20180427/nasnet_mobile_2018_04_27.tgz">NASNet mobile</a>
+ </td>
+ <td>Pixel 2 </td>
+ <td>273.8 ms (3.5 ms)</td>
+ </tr>
+ <tr>
+ <td>Pixel xl </td>
+ <td>210.8 ms (4.2 ms)</td>
+ </tr>
+ <tr>
+ <td rowspan = 2>
+ <a href="https://storage.googleapis.com/download.tensorflow.org/models/tflite/model_zoo/upload_20180427/squeezenet_2018_04_27.tgz">SqueezeNet</a>
+ </td>
+ <td>Pixel 2 </td>
+ <td>234.0 ms (2.1 ms)</td>
+ </tr>
+ <tr>
+ <td>Pixel xl </td>
+ <td>158.0 ms (2.1 ms)</td>
+ </tr>
+ <tr>
+ <td rowspan = 2>
+ <a href="https://storage.googleapis.com/download.tensorflow.org/models/tflite/model_zoo/upload_20180427/inception_resnet_v2_2018_04_27.tgz">Inception_ResNet_V2</a>
+ </td>
+ <td>Pixel 2 </td>
+ <td>2846.0 ms (15.0 ms)</td>
+ </tr>
+ <tr>
+ <td>Pixel xl </td>
+ <td>1973.0 ms (15.0 ms) </td>
+ </tr>
+ <tr>
+ <td rowspan = 2>
+ <a href="https://storage.googleapis.com/download.tensorflow.org/models/tflite/model_zoo/upload_20180427/inception_v4_2018_04_27.tgz">Inception_V4</a>
+ </td>
+ <td>Pixel 2 </td>
+ <td>3180.0 ms (11.7 ms)</td>
+ </tr>
+ <tr>
+ <td>Pixel xl </td>
+ <td>2262.0 ms (21.0 ms) </td>
+ </tr>
+
+ </table>
+
+# iOS benchmarks
+
+To run iOS benchmarks, the [benchmark
+app](https://github.com/tensorflow/tensorflow/tree/master/tensorflow/contrib/lite/tools/benchmark/ios)
+was modified to include the appropriate model and `benchmark_params.json` was
+modified to set `num_threads` to 1.
+
+<table>
+ <thead>
+ <tr>
+ <th>Model Name</th>
+ <th>Device </th>
+ <th>Mean inference time (std dev)</th>
+ </tr>
+ </thead>
+ <tr>
+ <td>
+ <a href="http://download.tensorflow.org/models/mobilenet_v1_2018_02_22/mobilenet_v1_1.0_224.tgz">Mobilenet_1.0_224(float)</a>
+ </td>
+ <td>iPhone 8 </td>
+ <td>32.2 ms (0.8 ms)</td>
+ </tr>
+ <tr>
+ <td>
+ <a href="http://download.tensorflow.org/models/mobilenet_v1_2018_02_22/mobilenet_v1_1.0_224_quant.tgz)">Mobilenet_1.0_224 (quant)</a>
+ </td>
+ <td>iPhone 8 </td>
+ <td>24.4 ms (0.8 ms)</td>
+ </tr>
+ <tr>
+ <td>
+ <a href="https://storage.googleapis.com/download.tensorflow.org/models/tflite/model_zoo/upload_20180427/nasnet_mobile_2018_04_27.tgz">NASNet mobile</a>
+ </td>
+ <td>iPhone 8 </td>
+ <td>60.3 ms (0.6 ms)</td>
+ </tr>
+ <tr>
+ <td>
+ <a href="https://storage.googleapis.com/download.tensorflow.org/models/tflite/model_zoo/upload_20180427/squeezenet_2018_04_27.tgz">SqueezeNet</a>
+ </td>
+ <td>iPhone 8 </td>
+ <td>44.3 (0.7 ms)</td>
+ </tr>
+ <tr>
+ <td>
+ <a href="https://storage.googleapis.com/download.tensorflow.org/models/tflite/model_zoo/upload_20180427/inception_resnet_v2_2018_04_27.tgz">Inception_ResNet_V2</a>
+ </td>
+ <td>iPhone 8</td>
+ <td>562.4 ms (18.2 ms)</td>
+ </tr>
+ <tr>
+ <td>
+ <a href="https://storage.googleapis.com/download.tensorflow.org/models/tflite/model_zoo/upload_20180427/inception_v4_2018_04_27.tgz">Inception_V4</a>
+ </td>
+ <td>iPhone 8 </td>
+ <td>661.0 ms (29.2 ms)</td>
+ </tr>
+ </table>
diff --git a/tensorflow/docs_src/performance/xla/operation_semantics.md b/tensorflow/docs_src/performance/xla/operation_semantics.md
index f7e116bf0f..68c427a316 100644
--- a/tensorflow/docs_src/performance/xla/operation_semantics.md
+++ b/tensorflow/docs_src/performance/xla/operation_semantics.md
@@ -1308,12 +1308,10 @@ See also
: : : parameters of type T and M of :
: : : arbitrary type :
| `dimensions` | `int64` array | array of map dimensions |
-| `static_operands` | sequence of M `XlaOp`s | M arrays of arbitrary type |
Applies a scalar function over the given `operands` arrays, producing an array
of the same dimensions where each element is the result of the mapped function
-applied to the corresponding elements in the input arrays with `static_operands`
-given as additional input to `computation`.
+applied to the corresponding elements in the input arrays.
The mapped function is an arbitrary computation with the restriction that it has
N inputs of scalar type `T` and a single output with type `S`. The output has
@@ -2012,13 +2010,42 @@ Slice(b, {2, 1}, {4, 3}) produces:
See also
[`XlaBuilder::Sort`](https://www.tensorflow.org/code/tensorflow/compiler/xla/client/xla_client/xla_builder.h).
-Sorts the elements in the operand.
+There are two versions of the Sort instruction: a single-operand and a
+two-operand version.
<b>`Sort(operand)`</b>
-Arguments | Type | Semantics
---------- | ------- | -------------------
-`operand` | `XlaOp` | The operand to sort
+Arguments | Type | Semantics
+----------- | ------- | --------------------
+`operand` | `XlaOp` | The operand to sort.
+`dimension` | `int64` | The dimension along which to sort.
+
+Sorts the elements in the operand in ascending order along the provided
+dimension. For example, for a rank-2 (matrix) operand, a `dimension` value of 0
+will sort each column independently, and a `dimension` value of 1 will sort each
+row independently. If the operand's elements have floating point type, and the
+operand contains NaN elements, the order of elements in the output is
+implementation-defined.
+
+<b>`Sort(key, value)`</b>
+
+Sorts both the key and the value operands. The keys are sorted as in the
+single-operand version. The values are sorted according to the order of their
+corresponding keys. For example, if the inputs are `keys = [3, 1]` and
+`values = [42, 50]`, then the output of the sort is the tuple
+`{[1, 3], [50, 42]}`.
+
+The sort is not guaranteed to be stable, that is, if the keys array contains
+duplicates, the order of their corresponding values may not be preserved.
+
+Arguments | Type | Semantics
+----------- | ------- | -------------------
+`keys` | `XlaOp` | The sort keys.
+`values` | `XlaOp` | The values to sort.
+`dimension` | `int64` | The dimension along which to sort.
+
+The `keys` and `values` must have the same dimensions, but may have different
+element types.
## Transpose
diff --git a/tensorflow/docs_src/get_started/_index.yaml b/tensorflow/docs_src/tutorials/_index.yaml
index 277fc852fb..c74fe58089 100644
--- a/tensorflow/docs_src/get_started/_index.yaml
+++ b/tensorflow/docs_src/tutorials/_index.yaml
@@ -66,9 +66,7 @@ landing_page:
}
</style>
<div class="devsite-landing-row-item-description">
- <a href="#">
- <h3 class="hide-from-toc">Learn and use ML</h3>
- </a>
+ <h3 class="hide-from-toc">Learn and use ML</h3>
<div class="devsite-landing-row-item-description-content">
<p>
The high-level Keras API provides building blocks to create and
@@ -77,11 +75,11 @@ landing_page:
<a href="/guide/keras">TensorFlow Keras guide</a>.
</p>
<ol style="padding-left:20px;">
- <li><a href="/get_started/basic_classification">Basic classification</a></li>
- <li><a href="/get_started/basic_text_classification">Text classification</a></li>
- <li><a href="/get_started/basic_regression">Regression</a></li>
- <li><a href="/get_started/overfit_and_underfit">Overfitting and underfitting</a></li>
- <li><a href="/get_started/save_and_restore_models">Save and load</a></li>
+ <li><a href="./keras/basic_classification">Basic classification</a></li>
+ <li><a href="./keras/basic_text_classification">Text classification</a></li>
+ <li><a href="./keras/basic_regression">Regression</a></li>
+ <li><a href="./keras/overfit_and_underfit">Overfitting and underfitting</a></li>
+ <li><a href="./keras/save_and_restore_models">Save and load</a></li>
</ol>
</div>
<div class="devsite-landing-row-item-buttons" style="margin-top:0;">
@@ -111,15 +109,13 @@ landing_page:
model.evaluate(x_test, y_test)
</pre>
{% dynamic if request.tld != 'cn' %}
- <a class="colab-button" target="_blank" href="https://colab.sandbox.google.com/github/tensorflow/models/blob/master/samples/core/get_started/_index.ipynb">Run in a <span>Notebook</span></a>
+ <a class="colab-button" target="_blank" href="https://colab.research.google.com/github/tensorflow/models/blob/master/samples/core/get_started/_index.ipynb">Run in a <span>Notebook</span></a>
{% dynamic endif %}
- items:
- custom_html: >
<div class="devsite-landing-row-item-description" style="border-right: 2px solid #eee;">
- <a href="https://github.com/tensorflow/tensorflow/tree/master/tensorflow/contrib/eager/python/examples/notebooks">
- <h3 class="hide-from-toc">Research and experimentation</h3>
- </a>
+ <h3 class="hide-from-toc">Research and experimentation</h3>
<div class="devsite-landing-row-item-description-content">
<p>
Eager execution provides an imperative, define-by-run interface for advanced operations. Write custom layers, forward passes, and training loops with auto‑differentiation. Start with
@@ -128,38 +124,38 @@ landing_page:
<ol style="padding-left:20px;">
<li>
{% dynamic if request.tld == 'cn' %}
- <a href="https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/eager/python/examples/notebooks/1_basics.ipynb" class="external">Eager execution basics</a>
+ <a href="https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/eager/python/examples/notebooks/eager_basics.ipynb" class="external">Eager execution basics</a>
{% dynamic else %}
- <a href="https://colab.sandbox.google.com/github/tensorflow/tensorflow/blob/master/tensorflow/contrib/eager/python/examples/notebooks/1_basics.ipynb" class="external">Eager execution basics</a>
+ <a href="https://colab.research.google.com/github/tensorflow/tensorflow/blob/master/tensorflow/contrib/eager/python/examples/notebooks/eager_basics.ipynb" class="external">Eager execution basics</a>
{% dynamic endif %}
</li>
<li>
{% dynamic if request.tld == 'cn' %}
- <a href="https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/eager/python/examples/notebooks/2_gradients.ipynb" class="external">Automatic differentiation and gradient tapes</a>
+ <a href="https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/eager/python/examples/notebooks/automatic_differentiation.ipynb" class="external">Automatic differentiation and gradient tape</a>
{% dynamic else %}
- <a href="https://colab.sandbox.google.com/github/tensorflow/tensorflow/blob/master/tensorflow/contrib/eager/python/examples/notebooks/2_gradients.ipynb" class="external">Automatic differentiation and gradient tapes</a>
+ <a href="https://colab.research.google.com/github/tensorflow/tensorflow/blob/master/tensorflow/contrib/eager/python/examples/notebooks/automatic_differentiation.ipynb" class="external">Automatic differentiation and gradient tape</a>
{% dynamic endif %}
</li>
<li>
{% dynamic if request.tld == 'cn' %}
- <a href="https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/eager/python/examples/notebooks/3_training_models.ipynb" class="external">Variables, models, and training</a>
+ <a href="https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/eager/python/examples/notebooks/custom_training.ipynb" class="external">Custom training: basics</a>
{% dynamic else %}
- <a href="https://colab.sandbox.google.com/github/tensorflow/tensorflow/blob/master/tensorflow/contrib/eager/python/examples/notebooks/3_training_models.ipynb" class="external">Variables, models, and training</a>
+ <a href="https://colab.research.google.com/github/tensorflow/tensorflow/blob/master/tensorflow/contrib/eager/python/examples/notebooks/custom_training.ipynb" class="external">Custom training: basics</a>
{% dynamic endif %}
</li>
<li>
{% dynamic if request.tld == 'cn' %}
- <a href="https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/eager/python/examples/notebooks/4_high_level.ipynb" class="external">Custom layers</a>
+ <a href="https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/eager/python/examples/notebooks/custom_layers.ipynb" class="external">Custom layers</a>
{% dynamic else %}
- <a href="https://colab.sandbox.google.com/github/tensorflow/tensorflow/blob/master/tensorflow/contrib/eager/python/examples/notebooks/4_high_level.ipynb" class="external">Custom layers</a>
+ <a href="https://colab.research.google.com/github/tensorflow/tensorflow/blob/master/tensorflow/contrib/eager/python/examples/notebooks/custom_layers.ipynb" class="external">Custom layers</a>
{% dynamic endif %}
</li>
- <li><a href="/get_started/eager">Custom training walkthrough</a></li>
+ <li><a href="./eager/custom_training_walkthrough">Custom training: walkthrough</a></li>
<li>
{% dynamic if request.tld == 'cn' %}
<a href="https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/eager/python/examples/nmt_with_attention/nmt_with_attention.ipynb" class="external">Example: Neural machine translation w/ attention</a>
{% dynamic else %}
- <a href="https://colab.sandbox.google.com/github/tensorflow/tensorflow/blob/master/tensorflow/contrib/eager/python/examples/nmt_with_attention/nmt_with_attention.ipynb" class="external">Example: Neural machine translation w/ attention</a>
+ <a href="https://colab.research.google.com/github/tensorflow/tensorflow/blob/master/tensorflow/contrib/eager/python/examples/nmt_with_attention/nmt_with_attention.ipynb" class="external">Example: Neural machine translation w/ attention</a>
{% dynamic endif %}
</li>
</ol>
@@ -170,19 +166,20 @@ landing_page:
</div>
- custom_html: >
<div class="devsite-landing-row-item-description">
- <a href="#">
- <h3 class="hide-from-toc">ML at production scale</h3>
- </a>
+ <h3 class="hide-from-toc">ML at production scale</h3>
<div class="devsite-landing-row-item-description-content">
<p>
Estimators can train large models on multiple machines in a
- production environment. Try the examples below and read the
+ production environment. TensorFlow provides a collection of
+ pre-made Estimators to implement common ML algorithms. See the
<a href="/guide/estimators">Estimators guide</a>.
</p>
<ol style="padding-left: 20px;">
- <li><a href="/tutorials/text_classification_with_tf_hub">How to build a simple text classifier with TF-Hub</a></li>
- <li><a href="https://github.com/tensorflow/models/tree/master/official/boosted_trees">Classifying Higgs boson processes</a></li>
- <li><a href="/tutorials/wide_and_deep">Wide and deep learning using estimators</a></li>
+ <li><a href="/tutorials/estimators/linear">Build a linear model with Estimators</a></li>
+ <li><a href="https://github.com/tensorflow/models/tree/master/official/wide_deep" class="external">Wide and deep learning with Estimators</a></li>
+ <li><a href="https://github.com/tensorflow/models/tree/master/official/boosted_trees" class="external">Boosted trees</a></li>
+ <li><a href="/hub/tutorials/text_classification_with_tf_hub">How to build a simple text classifier with TF-Hub</a></li>
+ <li><a href="/tutorials/estimators/cnn">Build a Convolutional Neural Network using Estimators</a></li>
</ol>
</div>
<div class="devsite-landing-row-item-buttons">
@@ -193,7 +190,7 @@ landing_page:
- description: >
<h2 class="hide-from-toc">Google Colab&#58; An easy way to learn and use TensorFlow</h2>
<p>
- <a href="https://colab.sandbox.google.com/notebooks/welcome.ipynb" class="external">Colaboratory</a>
+ <a href="https://colab.research.google.com/notebooks/welcome.ipynb" class="external">Colaboratory</a>
is a Google research project created to help disseminate machine learning
education and research. It's a Jupyter notebook environment that requires
no setup to use and runs entirely in the cloud.
diff --git a/tensorflow/docs_src/tutorials/_toc.yaml b/tensorflow/docs_src/tutorials/_toc.yaml
new file mode 100644
index 0000000000..d33869af6e
--- /dev/null
+++ b/tensorflow/docs_src/tutorials/_toc.yaml
@@ -0,0 +1,103 @@
+toc:
+- title: Get started with TensorFlow
+ path: /tutorials/
+
+- title: Learn and use ML
+ style: accordion
+ section:
+ - title: Overview
+ path: /tutorials/keras/
+ - title: Basic classification
+ path: /tutorials/keras/basic_classification
+ - title: Text classification
+ path: /tutorials/keras/basic_text_classification
+ - title: Regression
+ path: /tutorials/keras/basic_regression
+ - title: Overfitting and underfitting
+ path: /tutorials/keras/overfit_and_underfit
+ - title: Save and restore models
+ path: /tutorials/keras/save_and_restore_models
+
+- title: Research and experimentation
+ style: accordion
+ section:
+ - title: Overview
+ path: /tutorials/eager/
+ - title: Eager execution
+ path: https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/eager/python/examples/notebooks/eager_basics.ipynb
+ status: external
+ - title: Automatic differentiation
+ path: https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/eager/python/examples/notebooks/automatic_differentiation.ipynb
+ status: external
+ - title: "Custom training: basics"
+ path: https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/eager/python/examples/notebooks/custom_training.ipynb
+ status: external
+ - title: Custom layers
+ path: https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/eager/python/examples/notebooks/custom_layers.ipynb
+ status: external
+ - title: "Custom training: walkthrough"
+ path: /tutorials/eager/custom_training_walkthrough
+ - title: Translation with attention
+ path: https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/eager/python/examples/nmt_with_attention/nmt_with_attention.ipynb
+ status: external
+
+- title: ML at production scale
+ style: accordion
+ section:
+ - title: Linear model with Estimators
+ path: /tutorials/estimators/linear
+ - title: Wide and deep learning
+ path: https://github.com/tensorflow/models/tree/master/official/wide_deep
+ status: external
+ - title: Boosted trees
+ path: https://github.com/tensorflow/models/tree/master/official/boosted_trees
+ status: external
+ - title: Text classifier with TF-Hub
+ path: /hub/tutorials/text_classification_with_tf_hub
+ - title: Build a CNN using Estimators
+ path: /tutorials/estimators/cnn
+
+- title: Images
+ style: accordion
+ section:
+ - title: Image recognition
+ path: /tutorials/images/image_recognition
+ - title: Image retraining
+ path: /hub/tutorials/image_retraining
+ - title: Advanced CNN
+ path: /tutorials/images/deep_cnn
+
+- title: Sequences
+ style: accordion
+ section:
+ - title: Recurrent neural network
+ path: /tutorials/sequences/recurrent
+ - title: Drawing classification
+ path: /tutorials/sequences/recurrent_quickdraw
+ - title: Simple audio recognition
+ path: /tutorials/sequences/audio_recognition
+ - title: Neural machine translation
+ path: https://github.com/tensorflow/nmt
+ status: external
+
+- title: Data representation
+ style: accordion
+ section:
+ - title: Vector representations of words
+ path: /tutorials/representation/word2vec
+ - title: Kernel methods
+ path: /tutorials/representation/kernel_methods
+ - title: Large-scale linear models
+ path: /tutorials/representation/linear
+
+- title: Non-ML
+ style: accordion
+ section:
+ - title: Mandelbrot set
+ path: /tutorials/non-ml/mandelbrot
+ - title: Partial differential equations
+ path: /tutorials/non-ml/pdes
+
+- break: True
+- title: Next steps
+ path: /tutorials/next_steps
diff --git a/tensorflow/docs_src/tutorials/eager/custom_training_walkthrough.md b/tensorflow/docs_src/tutorials/eager/custom_training_walkthrough.md
new file mode 100644
index 0000000000..b564a27ecf
--- /dev/null
+++ b/tensorflow/docs_src/tutorials/eager/custom_training_walkthrough.md
@@ -0,0 +1,3 @@
+# Custom training: walkthrough
+
+[Colab notebook](https://colab.research.google.com/github/tensorflow/models/blob/master/samples/core/tutorials/eager/custom_training_walkthrough.ipynb)
diff --git a/tensorflow/docs_src/tutorials/eager/index.md b/tensorflow/docs_src/tutorials/eager/index.md
new file mode 100644
index 0000000000..5445e0c343
--- /dev/null
+++ b/tensorflow/docs_src/tutorials/eager/index.md
@@ -0,0 +1,13 @@
+# Research and experimentation
+
+Eager execution provides an imperative, define-by-run interface for advanced
+operations. Write custom layers, forward passes, and training loops with
+auto&nbsp;differentiation. Start with these notebooks, then read the
+[eager execution guide](../../guide/eager).
+
+1. <span>[Eager execution](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/eager/python/examples/notebooks/eager_intro.ipynb){:.external}</span>
+2. <span>[Automatic differentiation and gradient tape](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/eager/python/examples/notebooks/automatic_differentiation.ipynb){:.external}</span>
+3. <span>[Custom training: basics](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/eager/python/examples/notebooks/custom_training.ipynb){:.external}</span>
+4. <span>[Custom layers](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/eager/python/examples/notebooks/custom_layers.ipynb){:.external}</span>
+5. [Custom training: walkthrough](/tutorials/eager/custom_training_walkthrough)
+6. <span>[Advanced example: Neural machine translation with attention](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/eager/python/examples/nmt_with_attention/nmt_with_attention.ipynb){:.external}</span>
diff --git a/tensorflow/docs_src/tutorials/layers.md b/tensorflow/docs_src/tutorials/estimators/cnn.md
index 212e337637..12a215b50c 100644
--- a/tensorflow/docs_src/tutorials/layers.md
+++ b/tensorflow/docs_src/tutorials/estimators/cnn.md
@@ -1,4 +1,4 @@
-# A Guide to TF Layers: Building a Convolutional Neural Network
+# Build a Convolutional Neural Network using Estimators
The TensorFlow @{tf.layers$`layers` module} provides a high-level API that makes
it easy to construct a neural network. It provides methods that facilitate the
@@ -470,51 +470,18 @@ as the loss metric. The following code calculates cross entropy when the model
runs in either `TRAIN` or `EVAL` mode:
```python
-onehot_labels = tf.one_hot(indices=tf.cast(labels, tf.int32), depth=10)
-loss = tf.losses.softmax_cross_entropy(
- onehot_labels=onehot_labels, logits=logits)
+loss = tf.losses.sparse_softmax_cross_entropy(labels=labels, logits=logits)
```
Let's take a closer look at what's happening above.
-Our `labels` tensor contains a list of predictions for our examples, e.g. `[1,
-9, ...]`. In order to calculate cross-entropy, first we need to convert `labels`
-to the corresponding
-[one-hot encoding](https://www.quora.com/What-is-one-hot-encoding-and-when-is-it-used-in-data-science):
+Our `labels` tensor contains a list of prediction indices for our examples, e.g. `[1,
+9, ...]`. `logits` contains the linear outputs of our last layer.
-```none
-[[0, 1, 0, 0, 0, 0, 0, 0, 0, 0],
- [0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
- ...]
-```
-
-We use the @{tf.one_hot} function
-to perform this conversion. `tf.one_hot()` has two required arguments:
-
-* `indices`. The locations in the one-hot tensor that will have "on
- values"—i.e., the locations of `1` values in the tensor shown above.
-* `depth`. The depth of the one-hot tensor—i.e., the number of target classes.
- Here, the depth is `10`.
+`tf.losses.sparse_softmax_cross_entropy`, calculates the softmax crossentropy
+(aka: categorical crossentropy, negative log-likelihood) from these two inputs
+in an efficient, numerically stable way.
-The following code creates the one-hot tensor for our labels, `onehot_labels`:
-
-```python
-onehot_labels = tf.one_hot(indices=tf.cast(labels, tf.int32), depth=10)
-```
-
-Because `labels` contains a series of values from 0–9, `indices` is just our
-`labels` tensor, with values cast to integers. The `depth` is `10` because we
-have 10 possible target classes, one for each digit.
-
-Next, we compute cross-entropy of `onehot_labels` and the softmax of the
-predictions from our logits layer. `tf.losses.softmax_cross_entropy()` takes
-`onehot_labels` and `logits` as arguments, performs softmax activation on
-`logits`, calculates cross-entropy, and returns our `loss` as a scalar `Tensor`:
-
-```python
-loss = tf.losses.softmax_cross_entropy(
- onehot_labels=onehot_labels, logits=logits)
-```
### Configure the Training Op
diff --git a/tensorflow/docs_src/tutorials/estimators/linear.md b/tensorflow/docs_src/tutorials/estimators/linear.md
new file mode 100644
index 0000000000..067a33ac03
--- /dev/null
+++ b/tensorflow/docs_src/tutorials/estimators/linear.md
@@ -0,0 +1,3 @@
+# Build a linear model with Estimators
+
+[Colab notebook](https://colab.research.google.com/github/tensorflow/models/blob/master/samples/core/tutorials/estimators/linear.ipynb)
diff --git a/tensorflow/docs_src/tutorials/image_retraining.md b/tensorflow/docs_src/tutorials/image_retraining.md
deleted file mode 100644
index 27784eef9c..0000000000
--- a/tensorflow/docs_src/tutorials/image_retraining.md
+++ /dev/null
@@ -1,4 +0,0 @@
-# How to Retrain Inception's Final Layer for New Categories
-
-**NOTE: This tutorial has moved to**
-https://github.com/tensorflow/hub/tree/master/docs/tutorials/image_retraining.md
diff --git a/tensorflow/docs_src/tutorials/deep_cnn.md b/tensorflow/docs_src/tutorials/images/deep_cnn.md
index 44a32d9d1d..27963575f5 100644
--- a/tensorflow/docs_src/tutorials/deep_cnn.md
+++ b/tensorflow/docs_src/tutorials/images/deep_cnn.md
@@ -1,7 +1,4 @@
-# Convolutional Neural Networks
-
-> **NOTE:** This tutorial is intended for *advanced* users of TensorFlow
-and assumes expertise and experience in machine learning.
+# Advanced Convolutional Neural Networks
## Overview
@@ -83,21 +80,21 @@ for details. It consists of 1,068,298 learnable parameters and requires about
## Code Organization
The code for this tutorial resides in
-[`models/tutorials/image/cifar10/`](https://www.tensorflow.org/code/tensorflow_models/tutorials/image/cifar10/).
+[`models/tutorials/image/cifar10/`](https://github.com/tensorflow/models/tree/master/tutorials/image/cifar10/).
File | Purpose
--- | ---
-[`cifar10_input.py`](https://www.tensorflow.org/code/tensorflow_models/tutorials/image/cifar10/cifar10_input.py) | Reads the native CIFAR-10 binary file format.
-[`cifar10.py`](https://www.tensorflow.org/code/tensorflow_models/tutorials/image/cifar10/cifar10.py) | Builds the CIFAR-10 model.
-[`cifar10_train.py`](https://www.tensorflow.org/code/tensorflow_models/tutorials/image/cifar10/cifar10_train.py) | Trains a CIFAR-10 model on a CPU or GPU.
-[`cifar10_multi_gpu_train.py`](https://www.tensorflow.org/code/tensorflow_models/tutorials/image/cifar10/cifar10_multi_gpu_train.py) | Trains a CIFAR-10 model on multiple GPUs.
-[`cifar10_eval.py`](https://www.tensorflow.org/code/tensorflow_models/tutorials/image/cifar10/cifar10_eval.py) | Evaluates the predictive performance of a CIFAR-10 model.
+[`cifar10_input.py`](https://github.com/tensorflow/models/tree/master/tutorials/image/cifar10/cifar10_input.py) | Reads the native CIFAR-10 binary file format.
+[`cifar10.py`](https://github.com/tensorflow/models/tree/master/tutorials/image/cifar10/cifar10.py) | Builds the CIFAR-10 model.
+[`cifar10_train.py`](https://github.com/tensorflow/models/tree/master/tutorials/image/cifar10/cifar10_train.py) | Trains a CIFAR-10 model on a CPU or GPU.
+[`cifar10_multi_gpu_train.py`](https://github.com/tensorflow/models/tree/master/tutorials/image/cifar10/cifar10_multi_gpu_train.py) | Trains a CIFAR-10 model on multiple GPUs.
+[`cifar10_eval.py`](https://github.com/tensorflow/models/tree/master/tutorials/image/cifar10/cifar10_eval.py) | Evaluates the predictive performance of a CIFAR-10 model.
## CIFAR-10 Model
The CIFAR-10 network is largely contained in
-[`cifar10.py`](https://www.tensorflow.org/code/tensorflow_models/tutorials/image/cifar10/cifar10.py).
+[`cifar10.py`](https://github.com/tensorflow/models/tree/master/tutorials/image/cifar10/cifar10.py).
The complete training
graph contains roughly 765 operations. We find that we can make the code most
reusable by constructing the graph with the following modules:
@@ -438,9 +435,6 @@ with a batch size of 64 and compare the training speed.
## Next Steps
-[Congratulations!](https://www.youtube.com/watch?v=9bZkp7q19f0) You have
-completed the CIFAR-10 tutorial.
-
If you are now interested in developing and training your own image
classification system, we recommend forking this tutorial and replacing
components to address your image classification problem.
diff --git a/tensorflow/docs_src/tutorials/image_recognition.md b/tensorflow/docs_src/tutorials/images/image_recognition.md
index 332bcf54f0..d545de73df 100644
--- a/tensorflow/docs_src/tutorials/image_recognition.md
+++ b/tensorflow/docs_src/tutorials/images/image_recognition.md
@@ -434,7 +434,6 @@ should be able to transfer some of that understanding to solving related
problems. One way to perform transfer learning is to remove the final
classification layer of the network and extract
the [next-to-last layer of the CNN](https://arxiv.org/abs/1310.1531), in this case a 2048 dimensional vector.
-There's a guide to doing this @{$image_retraining$in the how-to section}.
## Resources for Learning More
@@ -450,7 +449,7 @@ covering them.
To find out more about implementing convolutional neural networks, you can jump
to the TensorFlow @{$deep_cnn$deep convolutional networks tutorial},
-or start a bit more gently with our @{$layers$MNIST starter tutorial}.
+or start a bit more gently with our [Estimator MNIST tutorial](../estimators/cnn.md).
Finally, if you want to get up to speed on research in this area, you can
read the recent work of all the papers referenced in this tutorial.
diff --git a/tensorflow/docs_src/tutorials/index.md b/tensorflow/docs_src/tutorials/index.md
deleted file mode 100644
index 6bd3a3a897..0000000000
--- a/tensorflow/docs_src/tutorials/index.md
+++ /dev/null
@@ -1,59 +0,0 @@
-# Tutorials
-
-
-This section contains tutorials demonstrating how to do specific tasks
-in TensorFlow. If you are new to TensorFlow, we recommend reading
-[Get Started with TensorFlow](/get_started/).
-
-## Images
-
-These tutorials cover different aspects of image recognition:
-
- * @{$layers$MNIST}, which introduces convolutional neural networks (CNNs) and
- demonstrates how to build a CNN in TensorFlow.
- * @{$image_recognition}, which introduces the field of image recognition and
- uses a pre-trained model (Inception) for recognizing images.
- * @{$image_retraining}, which has a wonderfully self-explanatory title.
- * @{$deep_cnn}, which demonstrates how to build a small CNN for recognizing
- images. This tutorial is aimed at advanced TensorFlow users.
-
-
-## Sequences
-
-These tutorials focus on machine learning problems dealing with sequence data.
-
- * @{$recurrent}, which demonstrates how to use a
- recurrent neural network to predict the next word in a sentence.
- * @{$seq2seq}, which demonstrates how to use a
- sequence-to-sequence model to translate text from English to French.
- * @{$recurrent_quickdraw}
- builds a classification model for drawings, directly from the sequence of
- pen strokes.
- * @{$audio_recognition}, which shows how to
- build a basic speech recognition network.
-
-## Data representation
-
-These tutorials demonstrate various data representations that can be used in
-TensorFlow.
-
- * @{$wide}, uses
- @{tf.feature_column$feature columns} to feed a variety of data types
- to linear model, to solve a classification problem.
- * @{$wide_and_deep}, builds on the
- above linear model tutorial, adding a deep feed-forward neural network
- component and a DNN-compatible data representation.
- * @{$word2vec}, which demonstrates how to
- create an embedding for words.
- * @{$kernel_methods},
- which shows how to improve the quality of a linear model by using explicit
- kernel mappings.
-
-## Non Machine Learning
-
-Although TensorFlow specializes in machine learning, the core of TensorFlow is
-a powerful numeric computation system which you can also use to solve other
-kinds of math problems. For example:
-
- * @{$mandelbrot}
- * @{$pdes}
diff --git a/tensorflow/docs_src/get_started/basic_classification.md b/tensorflow/docs_src/tutorials/keras/basic_classification.md
index 91bbd85b24..e028af99b9 100644
--- a/tensorflow/docs_src/get_started/basic_classification.md
+++ b/tensorflow/docs_src/tutorials/keras/basic_classification.md
@@ -1,3 +1,3 @@
# Basic Classification
-[Colab notebook](https://colab.research.google.com/github/tensorflow/models/blob/master/samples/core/get_started/basic_classification.ipynb)
+[Colab notebook](https://colab.research.google.com/github/tensorflow/models/blob/master/samples/core/tutorials/keras/basic_classification.ipynb)
diff --git a/tensorflow/docs_src/get_started/basic_regression.md b/tensorflow/docs_src/tutorials/keras/basic_regression.md
index a535f22f5a..8721b7aca1 100644
--- a/tensorflow/docs_src/get_started/basic_regression.md
+++ b/tensorflow/docs_src/tutorials/keras/basic_regression.md
@@ -1,3 +1,3 @@
# Basic Regression
-[Colab notebook](https://colab.research.google.com/github/tensorflow/models/blob/master/samples/core/get_started/basic_regression.ipynb)
+[Colab notebook](https://colab.research.google.com/github/tensorflow/models/blob/master/samples/core/tutorials/keras/basic_regression.ipynb)
diff --git a/tensorflow/docs_src/get_started/basic_text_classification.md b/tensorflow/docs_src/tutorials/keras/basic_text_classification.md
index 7c5d4f7896..c2a16bdd20 100644
--- a/tensorflow/docs_src/get_started/basic_text_classification.md
+++ b/tensorflow/docs_src/tutorials/keras/basic_text_classification.md
@@ -1,3 +1,3 @@
# Basic Text Classification
-[Colab notebook](https://colab.research.google.com/github/tensorflow/models/blob/master/samples/core/get_started/basic_text_classification.ipynb)
+[Colab notebook](https://colab.research.google.com/github/tensorflow/models/blob/master/samples/core/tutorials/keras/basic_text_classification.ipynb)
diff --git a/tensorflow/docs_src/tutorials/keras/index.md b/tensorflow/docs_src/tutorials/keras/index.md
new file mode 100644
index 0000000000..9d42281c8f
--- /dev/null
+++ b/tensorflow/docs_src/tutorials/keras/index.md
@@ -0,0 +1,22 @@
+# Learn and use machine learning
+
+This notebook collection is inspired by the book
+*[Deep Learning with Python](https://books.google.com/books?id=Yo3CAQAACAAJ)*.
+These tutorials use `tf.keras`, TensorFlow's high-level Python API for building
+and training deep learning models. To learn more about using Keras with
+TensorFlow, see the [TensorFlow Keras Guide](../../guide/keras).
+
+Publisher's note: *Deep Learning with Python* introduces the field of deep
+learning using the Python language and the powerful Keras library. Written by
+Keras creator and Google AI researcher François Chollet, this book builds your
+understanding through intuitive explanations and practical examples.
+
+To learn about machine learning fundamentals and concepts, consider taking the
+[Machine Learning Crash Course](https://developers.google.com/machine-learning/crash-course/).
+Additional TensorFlow and machine learning resources are listed in [next steps](../next_steps).
+
+1. [Basic classification](./basic_classification)
+2. [Text classification](./basic_text_classification)
+3. [Regression](./basic_regression)
+4. [Overfitting and underfitting](./overfit_and_underfit)
+5. [Save and restore models](./save_and_restore_models)
diff --git a/tensorflow/docs_src/get_started/overfit_and_underfit.md b/tensorflow/docs_src/tutorials/keras/overfit_and_underfit.md
index e5b5ae7b5a..f07f3addd8 100644
--- a/tensorflow/docs_src/get_started/overfit_and_underfit.md
+++ b/tensorflow/docs_src/tutorials/keras/overfit_and_underfit.md
@@ -1,3 +1,3 @@
# Overfitting and Underfitting
-[Colab notebook](https://colab.research.google.com/github/tensorflow/models/blob/master/samples/core/get_started/overfit_and_underfit.ipynb)
+[Colab notebook](https://colab.research.google.com/github/tensorflow/models/blob/master/samples/core/tutorials/keras/overfit_and_underfit.ipynb)
diff --git a/tensorflow/docs_src/get_started/save_and_restore_models.md b/tensorflow/docs_src/tutorials/keras/save_and_restore_models.md
index 44b3772945..a799b379a0 100644
--- a/tensorflow/docs_src/get_started/save_and_restore_models.md
+++ b/tensorflow/docs_src/tutorials/keras/save_and_restore_models.md
@@ -1,3 +1,3 @@
# Save and restore Models
-[Colab notebook](https://colab.research.google.com/github/tensorflow/models/blob/master/samples/core/get_started/save_and_restore_models.ipynb)
+[Colab notebook](https://colab.research.google.com/github/tensorflow/models/blob/master/samples/core/tutorials/keras/save_and_restore_models.ipynb)
diff --git a/tensorflow/docs_src/tutorials/leftnav_files b/tensorflow/docs_src/tutorials/leftnav_files
deleted file mode 100644
index 888052428f..0000000000
--- a/tensorflow/docs_src/tutorials/leftnav_files
+++ /dev/null
@@ -1,23 +0,0 @@
-index.md
-
-### Images
-layers.md: MNIST
-image_recognition.md: Image Recognition
-image_retraining.md: Image Retraining
-deep_cnn.md
-
-### Sequences
-recurrent.md
-seq2seq.md: Neural Machine Translation
-recurrent_quickdraw.md: Drawing Classification
-audio_recognition.md
-
-### Data Representation
-wide.md: Linear Models
-wide_and_deep.md: Wide & Deep Learning
-word2vec.md
-kernel_methods.md: Kernel Methods
-
-### Non-ML
-mandelbrot.md
-pdes.md
diff --git a/tensorflow/docs_src/get_started/next_steps.md b/tensorflow/docs_src/tutorials/next_steps.md
index 6318a39c6c..01c9f7204a 100644
--- a/tensorflow/docs_src/get_started/next_steps.md
+++ b/tensorflow/docs_src/tutorials/next_steps.md
@@ -1,4 +1,4 @@
-# Next Steps
+# Next steps
## Learn more about TensorFlow
diff --git a/tensorflow/docs_src/tutorials/mandelbrot.md b/tensorflow/docs_src/tutorials/non-ml/mandelbrot.md
index 1c0a548129..1c0a548129 100755..100644
--- a/tensorflow/docs_src/tutorials/mandelbrot.md
+++ b/tensorflow/docs_src/tutorials/non-ml/mandelbrot.md
diff --git a/tensorflow/docs_src/tutorials/pdes.md b/tensorflow/docs_src/tutorials/non-ml/pdes.md
index 425e8d7084..b5a0fa834a 100755..100644
--- a/tensorflow/docs_src/tutorials/pdes.md
+++ b/tensorflow/docs_src/tutorials/non-ml/pdes.md
@@ -135,7 +135,6 @@ for i in range(1000):
DisplayArray(U.eval(), rng=[-0.1, 0.1])
```
-![jpeg](../images/pde_output_2.jpg)
+![jpeg](../../images/pde_output_2.jpg)
Look! Ripples!
-
diff --git a/tensorflow/docs_src/tutorials/kernel_methods.md b/tensorflow/docs_src/tutorials/representation/kernel_methods.md
index 205e2a2d2c..f3c232c511 100644
--- a/tensorflow/docs_src/tutorials/kernel_methods.md
+++ b/tensorflow/docs_src/tutorials/representation/kernel_methods.md
@@ -27,7 +27,7 @@ TensorFlow will provide support for sparse features at a later release.
This tutorial uses [tf.contrib.learn](https://www.tensorflow.org/code/tensorflow/contrib/learn/python/learn)
(TensorFlow's high-level Machine Learning API) Estimators for our ML models.
-If you are not familiar with this API, [tf.estimator Quickstart](https://www.tensorflow.org/get_started/estimator)
+If you are not familiar with this API, The [Estimator guide](../../guide/estimators.md)
is a good place to start. We will use the MNIST dataset. The tutorial consists
of the following steps:
diff --git a/tensorflow/docs_src/tutorials/linear.md b/tensorflow/docs_src/tutorials/representation/linear.md
index 3f247ade26..1b418cf065 100644
--- a/tensorflow/docs_src/tutorials/linear.md
+++ b/tensorflow/docs_src/tutorials/representation/linear.md
@@ -11,8 +11,9 @@ those tools. It explains:
deep learning to get the advantages of both.
Read this overview to decide whether the Estimator's linear model tools might
-be useful to you. Then do the @{$wide$Linear Models tutorial} to
-give it a try. This overview uses code samples from the tutorial, but the
+be useful to you. Then work through the
+[Estimator wide and deep learning tutorial](https://github.com/tensorflow/models/tree/master/official/wide_deep)
+to give it a try. This overview uses code samples from the tutorial, but the
tutorial walks through the code in greater detail.
To understand this overview it will help to have some familiarity
@@ -176,7 +177,7 @@ the name of a `FeatureColumn`. Each key's value is a tensor containing the
values of that feature for all data instances. See
@{$premade_estimators#input_fn} for a
more comprehensive look at input functions, and `input_fn` in the
-[linear models tutorial code](https://github.com/tensorflow/models/tree/master/official/wide_deep/wide_deep.py)
+[wide and deep learning tutorial](https://github.com/tensorflow/models/tree/master/official/wide_deep)
for an example implementation of an input function.
The input function is passed to the `train()` and `evaluate()` calls that
@@ -234,4 +235,5 @@ e = tf.estimator.DNNLinearCombinedClassifier(
dnn_feature_columns=deep_columns,
dnn_hidden_units=[100, 50])
```
-For more information, see the @{$wide_and_deep$Wide and Deep Learning tutorial}.
+For more information, see the
+[wide and deep learning tutorial](https://github.com/tensorflow/models/tree/master/official/wide_deep).
diff --git a/tensorflow/docs_src/tutorials/word2vec.md b/tensorflow/docs_src/tutorials/representation/word2vec.md
index 3fe7352bd2..0a1c41c84a 100644
--- a/tensorflow/docs_src/tutorials/word2vec.md
+++ b/tensorflow/docs_src/tutorials/representation/word2vec.md
@@ -23,7 +23,7 @@ straight in, feel free to look at the minimalistic implementation in
This basic example contains the code needed to download some data, train on it a
bit and visualize the result. Once you get comfortable with reading and running
the basic version, you can graduate to
-[models/tutorials/embedding/word2vec.py](https://www.tensorflow.org/code/tensorflow_models/tutorials/embedding/word2vec.py)
+[models/tutorials/embedding/word2vec.py](https://github.com/tensorflow/models/tree/master/tutorials/embedding/word2vec.py)
which is a more serious implementation that showcases some more advanced
TensorFlow principles about how to efficiently use threads to move data into a
text model, how to checkpoint during training, etc.
@@ -341,7 +341,7 @@ t-SNE.
Et voila! As expected, words that are similar end up clustering nearby each
other. For a more heavyweight implementation of word2vec that showcases more of
the advanced features of TensorFlow, see the implementation in
-[models/tutorials/embedding/word2vec.py](https://www.tensorflow.org/code/tensorflow_models/tutorials/embedding/word2vec.py).
+[models/tutorials/embedding/word2vec.py](https://github.com/tensorflow/models/tree/master/tutorials/embedding/word2vec.py).
## Evaluating Embeddings: Analogical Reasoning
@@ -357,7 +357,7 @@ Download the dataset for this task from
To see how we do this evaluation, have a look at the `build_eval_graph()` and
`eval()` functions in
-[models/tutorials/embedding/word2vec.py](https://www.tensorflow.org/code/tensorflow_models/tutorials/embedding/word2vec.py).
+[models/tutorials/embedding/word2vec.py](https://github.com/tensorflow/models/tree/master/tutorials/embedding/word2vec.py).
The choice of hyperparameters can strongly influence the accuracy on this task.
To achieve state-of-the-art performance on this task requires training over a
@@ -385,13 +385,13 @@ your model is seriously bottlenecked on input data, you may want to implement a
custom data reader for your problem, as described in
@{$new_data_formats$New Data Formats}. For the case of Skip-Gram
modeling, we've actually already done this for you as an example in
-[models/tutorials/embedding/word2vec.py](https://www.tensorflow.org/code/tensorflow_models/tutorials/embedding/word2vec.py).
+[models/tutorials/embedding/word2vec.py](https://github.com/tensorflow/models/tree/master/tutorials/embedding/word2vec.py).
If your model is no longer I/O bound but you want still more performance, you
can take things further by writing your own TensorFlow Ops, as described in
@{$adding_an_op$Adding a New Op}. Again we've provided an
example of this for the Skip-Gram case
-[models/tutorials/embedding/word2vec_optimized.py](https://www.tensorflow.org/code/tensorflow_models/tutorials/embedding/word2vec_optimized.py).
+[models/tutorials/embedding/word2vec_optimized.py](https://github.com/tensorflow/models/tree/master/tutorials/embedding/word2vec_optimized.py).
Feel free to benchmark these against each other to measure performance
improvements at each stage.
diff --git a/tensorflow/docs_src/tutorials/seq2seq.md b/tensorflow/docs_src/tutorials/seq2seq.md
deleted file mode 100644
index 8928ba4f7d..0000000000
--- a/tensorflow/docs_src/tutorials/seq2seq.md
+++ /dev/null
@@ -1,5 +0,0 @@
-# Sequence-to-Sequence Models
-
-Please check out the
-[tensorflow neural machine translation tutorial](https://github.com/tensorflow/nmt)
-for building sequence-to-sequence models with the latest Tensorflow API.
diff --git a/tensorflow/docs_src/tutorials/audio_recognition.md b/tensorflow/docs_src/tutorials/sequences/audio_recognition.md
index d7a8da6f96..d7a8da6f96 100644
--- a/tensorflow/docs_src/tutorials/audio_recognition.md
+++ b/tensorflow/docs_src/tutorials/sequences/audio_recognition.md
diff --git a/tensorflow/docs_src/tutorials/recurrent.md b/tensorflow/docs_src/tutorials/sequences/recurrent.md
index 14da2c8785..715cc7856a 100644
--- a/tensorflow/docs_src/tutorials/recurrent.md
+++ b/tensorflow/docs_src/tutorials/sequences/recurrent.md
@@ -2,8 +2,8 @@
## Introduction
-Take a look at [this great article](https://colah.github.io/posts/2015-08-Understanding-LSTMs/)
-for an introduction to recurrent neural networks and LSTMs in particular.
+See [Understanding LSTM Networks](https://colah.github.io/posts/2015-08-Understanding-LSTMs/){:.external}
+for an introduction to recurrent neural networks and LSTMs.
## Language Modeling
diff --git a/tensorflow/docs_src/tutorials/recurrent_quickdraw.md b/tensorflow/docs_src/tutorials/sequences/recurrent_quickdraw.md
index 1afd861738..37bce5b76d 100644
--- a/tensorflow/docs_src/tutorials/recurrent_quickdraw.md
+++ b/tensorflow/docs_src/tutorials/sequences/recurrent_quickdraw.md
@@ -13,7 +13,7 @@ In this tutorial we'll show how to build an RNN-based recognizer for this
problem. The model will use a combination of convolutional layers, LSTM layers,
and a softmax output layer to classify the drawings:
-<center> ![RNN model structure](../images/quickdraw_model.png) </center>
+<center> ![RNN model structure](../../images/quickdraw_model.png) </center>
The figure above shows the structure of the model that we will build in this
tutorial. The input is a drawing that is encoded as a sequence of strokes of
@@ -208,7 +208,7 @@ This data is then reformatted into a tensor of shape `[num_training_samples,
max_length, 3]`. Then we determine the bounding box of the original drawing in
screen coordinates and normalize the size such that the drawing has unit height.
-<center> ![Size normalization](../images/quickdraw_sizenormalization.png) </center>
+<center> ![Size normalization](../../images/quickdraw_sizenormalization.png) </center>
Finally, we compute the differences between consecutive points and store these
as a `VarLenFeature` in a
diff --git a/tensorflow/docs_src/tutorials/wide.md b/tensorflow/docs_src/tutorials/wide.md
deleted file mode 100644
index 27ce75a30d..0000000000
--- a/tensorflow/docs_src/tutorials/wide.md
+++ /dev/null
@@ -1,461 +0,0 @@
-# TensorFlow Linear Model Tutorial
-
-In this tutorial, we will use the tf.estimator API in TensorFlow to solve a
-binary classification problem: Given census data about a person such as age,
-education, marital status, and occupation (the features), we will try to predict
-whether or not the person earns more than 50,000 dollars a year (the target
-label). We will train a **logistic regression** model, and given an individual's
-information our model will output a number between 0 and 1, which can be
-interpreted as the probability that the individual has an annual income of over
-50,000 dollars.
-
-## Setup
-
-To try the code for this tutorial:
-
-1. @{$install$Install TensorFlow} if you haven't already.
-
-2. Download [the tutorial code](https://github.com/tensorflow/models/tree/master/official/wide_deep/).
-
-3. Execute the data download script we provide to you:
-
- $ python data_download.py
-
-4. Execute the tutorial code with the following command to train the linear
-model described in this tutorial:
-
- $ python wide_deep.py --model_type=wide
-
-Read on to find out how this code builds its linear model.
-
-## Reading The Census Data
-
-The dataset we'll be using is the
-[Census Income Dataset](https://archive.ics.uci.edu/ml/datasets/Census+Income).
-We have provided
-[data_download.py](https://github.com/tensorflow/models/tree/master/official/wide_deep/data_download.py)
-which downloads the code and performs some additional cleanup.
-
-Since the task is a binary classification problem, we'll construct a label
-column named "label" whose value is 1 if the income is over 50K, and 0
-otherwise. For reference, see `input_fn` in
-[wide_deep.py](https://github.com/tensorflow/models/tree/master/official/wide_deep/wide_deep.py).
-
-Next, let's take a look at the dataframe and see which columns we can use to
-predict the target label. The columns can be grouped into two types—categorical
-and continuous columns:
-
-* A column is called **categorical** if its value can only be one of the
- categories in a finite set. For example, the relationship status of a person
- (wife, husband, unmarried, etc.) or the education level (high school,
- college, etc.) are categorical columns.
-* A column is called **continuous** if its value can be any numerical value in
- a continuous range. For example, the capital gain of a person (e.g. $14,084)
- is a continuous column.
-
-Here's a list of columns available in the Census Income dataset:
-
-| Column Name | Type | Description |
-| -------------- | ----------- | --------------------------------- |
-| age | Continuous | The age of the individual |
-| workclass | Categorical | The type of employer the |
-: : : individual has (government, :
-: : : military, private, etc.). :
-| fnlwgt | Continuous | The number of people the census |
-: : : takers believe that observation :
-: : : represents (sample weight). Final :
-: : : weight will not be used. :
-| education | Categorical | The highest level of education |
-: : : achieved for that individual. :
-| education_num | Continuous | The highest level of education in |
-: : : numerical form. :
-| marital_status | Categorical | Marital status of the individual. |
-| occupation | Categorical | The occupation of the individual. |
-| relationship | Categorical | Wife, Own-child, Husband, |
-: : : Not-in-family, Other-relative, :
-: : : Unmarried. :
-| race | Categorical | Amer-Indian-Eskimo, Asian-Pac- |
-: : : Islander, Black, White, Other. :
-| gender | Categorical | Female, Male. |
-| capital_gain | Continuous | Capital gains recorded. |
-| capital_loss | Continuous | Capital Losses recorded. |
-| hours_per_week | Continuous | Hours worked per week. |
-| native_country | Categorical | Country of origin of the |
-: : : individual. :
-| income_bracket | Categorical | ">50K" or "<=50K", meaning |
-: : : whether the person makes more :
-: : : than $50,000 annually. :
-
-## Converting Data into Tensors
-
-When building a tf.estimator model, the input data is specified by means of an
-Input Builder function. This builder function will not be called until it is
-later passed to tf.estimator.Estimator methods such as `train` and `evaluate`.
-The purpose of this function is to construct the input data, which is
-represented in the form of @{tf.Tensor}s or @{tf.SparseTensor}s.
-In more detail, the input builder function returns the following as a pair:
-
-1. `features`: A dict from feature column names to `Tensors` or
- `SparseTensors`.
-2. `labels`: A `Tensor` containing the label column.
-
-The keys of the `features` will be used to construct columns in the next
-section. Because we want to call the `train` and `evaluate` methods with
-different data, we define a method that returns an input function based on the
-given data. Note that the returned input function will be called while
-constructing the TensorFlow graph, not while running the graph. What it is
-returning is a representation of the input data as the fundamental unit of
-TensorFlow computations, a `Tensor` (or `SparseTensor`).
-
-Each continuous column in the train or test data will be converted into a
-`Tensor`, which in general is a good format to represent dense data. For
-categorical data, we must represent the data as a `SparseTensor`. This data
-format is good for representing sparse data. Our `input_fn` uses the `tf.data`
-API, which makes it easy to apply transformations to our dataset:
-
-```python
-def input_fn(data_file, num_epochs, shuffle, batch_size):
- """Generate an input function for the Estimator."""
- assert tf.gfile.Exists(data_file), (
- '%s not found. Please make sure you have either run data_download.py or '
- 'set both arguments --train_data and --test_data.' % data_file)
-
- def parse_csv(value):
- print('Parsing', data_file)
- columns = tf.decode_csv(value, record_defaults=_CSV_COLUMN_DEFAULTS)
- features = dict(zip(_CSV_COLUMNS, columns))
- labels = features.pop('income_bracket')
- return features, tf.equal(labels, '>50K')
-
- # Extract lines from input files using the Dataset API.
- dataset = tf.data.TextLineDataset(data_file)
-
- if shuffle:
- dataset = dataset.shuffle(buffer_size=_SHUFFLE_BUFFER)
-
- dataset = dataset.map(parse_csv, num_parallel_calls=5)
-
- # We call repeat after shuffling, rather than before, to prevent separate
- # epochs from blending together.
- dataset = dataset.repeat(num_epochs)
- dataset = dataset.batch(batch_size)
-
- iterator = dataset.make_one_shot_iterator()
- features, labels = iterator.get_next()
- return features, labels
-```
-
-## Selecting and Engineering Features for the Model
-
-Selecting and crafting the right set of feature columns is key to learning an
-effective model. A **feature column** can be either one of the raw columns in
-the original dataframe (let's call them **base feature columns**), or any new
-columns created based on some transformations defined over one or multiple base
-columns (let's call them **derived feature columns**). Basically, "feature
-column" is an abstract concept of any raw or derived variable that can be used
-to predict the target label.
-
-### Base Categorical Feature Columns
-
-To define a feature column for a categorical feature, we can create a
-`CategoricalColumn` using the tf.feature_column API. If you know the set of all
-possible feature values of a column and there are only a few of them, you can
-use `categorical_column_with_vocabulary_list`. Each key in the list will get
-assigned an auto-incremental ID starting from 0. For example, for the
-`relationship` column we can assign the feature string "Husband" to an integer
-ID of 0 and "Not-in-family" to 1, etc., by doing:
-
-```python
-relationship = tf.feature_column.categorical_column_with_vocabulary_list(
- 'relationship', [
- 'Husband', 'Not-in-family', 'Wife', 'Own-child', 'Unmarried',
- 'Other-relative'])
-```
-
-What if we don't know the set of possible values in advance? Not a problem. We
-can use `categorical_column_with_hash_bucket` instead:
-
-```python
-occupation = tf.feature_column.categorical_column_with_hash_bucket(
- 'occupation', hash_bucket_size=1000)
-```
-
-What will happen is that each possible value in the feature column `occupation`
-will be hashed to an integer ID as we encounter them in training. See an example
-illustration below:
-
-ID | Feature
---- | -------------
-... |
-9 | `"Machine-op-inspct"`
-... |
-103 | `"Farming-fishing"`
-... |
-375 | `"Protective-serv"`
-... |
-
-No matter which way we choose to define a `SparseColumn`, each feature string
-will be mapped into an integer ID by looking up a fixed mapping or by hashing.
-Note that hashing collisions are possible, but may not significantly impact the
-model quality. Under the hood, the `LinearModel` class is responsible for
-managing the mapping and creating `tf.Variable` to store the model parameters
-(also known as model weights) for each feature ID. The model parameters will be
-learned through the model training process we'll go through later.
-
-We'll do the similar trick to define the other categorical features:
-
-```python
-education = tf.feature_column.categorical_column_with_vocabulary_list(
- 'education', [
- 'Bachelors', 'HS-grad', '11th', 'Masters', '9th', 'Some-college',
- 'Assoc-acdm', 'Assoc-voc', '7th-8th', 'Doctorate', 'Prof-school',
- '5th-6th', '10th', '1st-4th', 'Preschool', '12th'])
-
-marital_status = tf.feature_column.categorical_column_with_vocabulary_list(
- 'marital_status', [
- 'Married-civ-spouse', 'Divorced', 'Married-spouse-absent',
- 'Never-married', 'Separated', 'Married-AF-spouse', 'Widowed'])
-
-relationship = tf.feature_column.categorical_column_with_vocabulary_list(
- 'relationship', [
- 'Husband', 'Not-in-family', 'Wife', 'Own-child', 'Unmarried',
- 'Other-relative'])
-
-workclass = tf.feature_column.categorical_column_with_vocabulary_list(
- 'workclass', [
- 'Self-emp-not-inc', 'Private', 'State-gov', 'Federal-gov',
- 'Local-gov', '?', 'Self-emp-inc', 'Without-pay', 'Never-worked'])
-
-# To show an example of hashing:
-occupation = tf.feature_column.categorical_column_with_hash_bucket(
- 'occupation', hash_bucket_size=1000)
-```
-
-### Base Continuous Feature Columns
-
-Similarly, we can define a `NumericColumn` for each continuous feature column
-that we want to use in the model:
-
-```python
-age = tf.feature_column.numeric_column('age')
-education_num = tf.feature_column.numeric_column('education_num')
-capital_gain = tf.feature_column.numeric_column('capital_gain')
-capital_loss = tf.feature_column.numeric_column('capital_loss')
-hours_per_week = tf.feature_column.numeric_column('hours_per_week')
-```
-
-### Making Continuous Features Categorical through Bucketization
-
-Sometimes the relationship between a continuous feature and the label is not
-linear. As a hypothetical example, a person's income may grow with age in the
-early stage of one's career, then the growth may slow at some point, and finally
-the income decreases after retirement. In this scenario, using the raw `age` as
-a real-valued feature column might not be a good choice because the model can
-only learn one of the three cases:
-
-1. Income always increases at some rate as age grows (positive correlation),
-1. Income always decreases at some rate as age grows (negative correlation), or
-1. Income stays the same no matter at what age (no correlation)
-
-If we want to learn the fine-grained correlation between income and each age
-group separately, we can leverage **bucketization**. Bucketization is a process
-of dividing the entire range of a continuous feature into a set of consecutive
-bins/buckets, and then converting the original numerical feature into a bucket
-ID (as a categorical feature) depending on which bucket that value falls into.
-So, we can define a `bucketized_column` over `age` as:
-
-```python
-age_buckets = tf.feature_column.bucketized_column(
- age, boundaries=[18, 25, 30, 35, 40, 45, 50, 55, 60, 65])
-```
-
-where the `boundaries` is a list of bucket boundaries. In this case, there are
-10 boundaries, resulting in 11 age group buckets (from age 17 and below, 18-24,
-25-29, ..., to 65 and over).
-
-### Intersecting Multiple Columns with CrossedColumn
-
-Using each base feature column separately may not be enough to explain the data.
-For example, the correlation between education and the label (earning > 50,000
-dollars) may be different for different occupations. Therefore, if we only learn
-a single model weight for `education="Bachelors"` and `education="Masters"`, we
-won't be able to capture every single education-occupation combination (e.g.
-distinguishing between `education="Bachelors" AND occupation="Exec-managerial"`
-and `education="Bachelors" AND occupation="Craft-repair"`). To learn the
-differences between different feature combinations, we can add **crossed feature
-columns** to the model.
-
-```python
-education_x_occupation = tf.feature_column.crossed_column(
- ['education', 'occupation'], hash_bucket_size=1000)
-```
-
-We can also create a `CrossedColumn` over more than two columns. Each
-constituent column can be either a base feature column that is categorical
-(`SparseColumn`), a bucketized real-valued feature column (`BucketizedColumn`),
-or even another `CrossColumn`. Here's an example:
-
-```python
-age_buckets_x_education_x_occupation = tf.feature_column.crossed_column(
- [age_buckets, 'education', 'occupation'], hash_bucket_size=1000)
-```
-
-## Defining The Logistic Regression Model
-
-After processing the input data and defining all the feature columns, we're now
-ready to put them all together and build a Logistic Regression model. In the
-previous section we've seen several types of base and derived feature columns,
-including:
-
-* `CategoricalColumn`
-* `NumericColumn`
-* `BucketizedColumn`
-* `CrossedColumn`
-
-All of these are subclasses of the abstract `FeatureColumn` class, and can be
-added to the `feature_columns` field of a model:
-
-```python
-base_columns = [
- education, marital_status, relationship, workclass, occupation,
- age_buckets,
-]
-crossed_columns = [
- tf.feature_column.crossed_column(
- ['education', 'occupation'], hash_bucket_size=1000),
- tf.feature_column.crossed_column(
- [age_buckets, 'education', 'occupation'], hash_bucket_size=1000),
-]
-
-model_dir = tempfile.mkdtemp()
-model = tf.estimator.LinearClassifier(
- model_dir=model_dir, feature_columns=base_columns + crossed_columns)
-```
-
-The model also automatically learns a bias term, which controls the prediction
-one would make without observing any features (see the section "How Logistic
-Regression Works" for more explanations). The learned model files will be stored
-in `model_dir`.
-
-## Training and Evaluating Our Model
-
-After adding all the features to the model, now let's look at how to actually
-train the model. Training a model is just a single command using the
-tf.estimator API:
-
-```python
-model.train(input_fn=lambda: input_fn(train_data, num_epochs, True, batch_size))
-```
-
-After the model is trained, we can evaluate how good our model is at predicting
-the labels of the holdout data:
-
-```python
-results = model.evaluate(input_fn=lambda: input_fn(
- test_data, 1, False, batch_size))
-for key in sorted(results):
- print('%s: %s' % (key, results[key]))
-```
-
-The first line of the final output should be something like
-`accuracy: 0.83557522`, which means the accuracy is 83.6%. Feel free to try more
-features and transformations and see if you can do even better!
-
-After the model is evaluated, we can use the model to predict whether an individual has an annual income of over
-50,000 dollars given an individual's information input.
-```python
- pred_iter = model.predict(input_fn=lambda: input_fn(FLAGS.test_data, 1, False, 1))
- for pred in pred_iter:
- print(pred['classes'])
-```
-
-The model prediction output would be like `[b'1']` or `[b'0']` which means whether corresponding individual has an annual income of over 50,000 dollars or not.
-
-If you'd like to see a working end-to-end example, you can download our
-[example code](https://github.com/tensorflow/models/tree/master/official/wide_deep/wide_deep.py)
-and set the `model_type` flag to `wide`.
-
-## Adding Regularization to Prevent Overfitting
-
-Regularization is a technique used to avoid **overfitting**. Overfitting happens
-when your model does well on the data it is trained on, but worse on test data
-that the model has not seen before, such as live traffic. Overfitting generally
-occurs when a model is excessively complex, such as having too many parameters
-relative to the number of observed training data. Regularization allows for you
-to control your model's complexity and makes the model more generalizable to
-unseen data.
-
-In the Linear Model library, you can add L1 and L2 regularizations to the model
-as:
-
-```
-model = tf.estimator.LinearClassifier(
- model_dir=model_dir, feature_columns=base_columns + crossed_columns,
- optimizer=tf.train.FtrlOptimizer(
- learning_rate=0.1,
- l1_regularization_strength=1.0,
- l2_regularization_strength=1.0))
-```
-
-One important difference between L1 and L2 regularization is that L1
-regularization tends to make model weights stay at zero, creating sparser
-models, whereas L2 regularization also tries to make the model weights closer to
-zero but not necessarily zero. Therefore, if you increase the strength of L1
-regularization, you will have a smaller model size because many of the model
-weights will be zero. This is often desirable when the feature space is very
-large but sparse, and when there are resource constraints that prevent you from
-serving a model that is too large.
-
-In practice, you should try various combinations of L1, L2 regularization
-strengths and find the best parameters that best control overfitting and give
-you a desirable model size.
-
-## How Logistic Regression Works
-
-Finally, let's take a minute to talk about what the Logistic Regression model
-actually looks like in case you're not already familiar with it. We'll denote
-the label as \\(Y\\), and the set of observed features as a feature vector
-\\(\mathbf{x}=[x_1, x_2, ..., x_d]\\). We define \\(Y=1\\) if an individual
-earned > 50,000 dollars and \\(Y=0\\) otherwise. In Logistic Regression, the
-probability of the label being positive (\\(Y=1\\)) given the features
-\\(\mathbf{x}\\) is given as:
-
-$$ P(Y=1|\mathbf{x}) = \frac{1}{1+\exp(-(\mathbf{w}^T\mathbf{x}+b))}$$
-
-where \\(\mathbf{w}=[w_1, w_2, ..., w_d]\\) are the model weights for the
-features \\(\mathbf{x}=[x_1, x_2, ..., x_d]\\). \\(b\\) is a constant that is
-often called the **bias** of the model. The equation consists of two parts—A
-linear model and a logistic function:
-
-* **Linear Model**: First, we can see that \\(\mathbf{w}^T\mathbf{x}+b = b +
- w_1x_1 + ... +w_dx_d\\) is a linear model where the output is a linear
- function of the input features \\(\mathbf{x}\\). The bias \\(b\\) is the
- prediction one would make without observing any features. The model weight
- \\(w_i\\) reflects how the feature \\(x_i\\) is correlated with the positive
- label. If \\(x_i\\) is positively correlated with the positive label, the
- weight \\(w_i\\) increases, and the probability \\(P(Y=1|\mathbf{x})\\) will
- be closer to 1. On the other hand, if \\(x_i\\) is negatively correlated
- with the positive label, then the weight \\(w_i\\) decreases and the
- probability \\(P(Y=1|\mathbf{x})\\) will be closer to 0.
-
-* **Logistic Function**: Second, we can see that there's a logistic function
- (also known as the sigmoid function) \\(S(t) = 1/(1+\exp(-t))\\) being
- applied to the linear model. The logistic function is used to convert the
- output of the linear model \\(\mathbf{w}^T\mathbf{x}+b\\) from any real
- number into the range of \\([0, 1]\\), which can be interpreted as a
- probability.
-
-Model training is an optimization problem: The goal is to find a set of model
-weights (i.e. model parameters) to minimize a **loss function** defined over the
-training data, such as logistic loss for Logistic Regression models. The loss
-function measures the discrepancy between the ground-truth label and the model's
-prediction. If the prediction is very close to the ground-truth label, the loss
-value will be low; if the prediction is very far from the label, then the loss
-value would be high.
-
-## Learn Deeper
-
-If you're interested in learning more, check out our
-@{$wide_and_deep$Wide & Deep Learning Tutorial} where we'll show you how to
-combine the strengths of linear models and deep neural networks by jointly
-training them using the tf.estimator API.
diff --git a/tensorflow/docs_src/tutorials/wide_and_deep.md b/tensorflow/docs_src/tutorials/wide_and_deep.md
deleted file mode 100644
index 44677a810b..0000000000
--- a/tensorflow/docs_src/tutorials/wide_and_deep.md
+++ /dev/null
@@ -1,243 +0,0 @@
-# TensorFlow Wide & Deep Learning Tutorial
-
-In the previous @{$wide$TensorFlow Linear Model Tutorial}, we trained a logistic
-regression model to predict the probability that the individual has an annual
-income of over 50,000 dollars using the
-[Census Income Dataset](https://archive.ics.uci.edu/ml/datasets/Census+Income).
-TensorFlow is great for training deep neural networks too, and you might be
-thinking which one you should choose—well, why not both? Would it be possible to
-combine the strengths of both in one model?
-
-In this tutorial, we'll introduce how to use the tf.estimator API to jointly
-train a wide linear model and a deep feed-forward neural network. This approach
-combines the strengths of memorization and generalization. It's useful for
-generic large-scale regression and classification problems with sparse input
-features (e.g., categorical features with a large number of possible feature
-values). If you're interested in learning more about how Wide & Deep Learning
-works, please check out our [research paper](https://arxiv.org/abs/1606.07792).
-
-![Wide & Deep Spectrum of Models](https://www.tensorflow.org/images/wide_n_deep.svg "Wide & Deep")
-
-The figure above shows a comparison of a wide model (logistic regression with
-sparse features and transformations), a deep model (feed-forward neural network
-with an embedding layer and several hidden layers), and a Wide & Deep model
-(joint training of both). At a high level, there are only 3 steps to configure a
-wide, deep, or Wide & Deep model using the tf.estimator API:
-
-1. Select features for the wide part: Choose the sparse base columns and
- crossed columns you want to use.
-1. Select features for the deep part: Choose the continuous columns, the
- embedding dimension for each categorical column, and the hidden layer sizes.
-1. Put them all together in a Wide & Deep model
- (`DNNLinearCombinedClassifier`).
-
-And that's it! Let's go through a simple example.
-
-## Setup
-
-To try the code for this tutorial:
-
-1. @{$install$Install TensorFlow} if you haven't already.
-
-2. Download [the tutorial code](https://github.com/tensorflow/models/tree/master/official/wide_deep/).
-
-3. Execute the data download script we provide to you:
-
- $ python data_download.py
-
-4. Execute the tutorial code with the following command to train the wide and
-deep model described in this tutorial:
-
- $ python wide_deep.py
-
-Read on to find out how this code builds its model.
-
-
-## Define Base Feature Columns
-
-First, let's define the base categorical and continuous feature columns that
-we'll use. These base columns will be the building blocks used by both the wide
-part and the deep part of the model.
-
-```python
-import tensorflow as tf
-
-# Continuous columns
-age = tf.feature_column.numeric_column('age')
-education_num = tf.feature_column.numeric_column('education_num')
-capital_gain = tf.feature_column.numeric_column('capital_gain')
-capital_loss = tf.feature_column.numeric_column('capital_loss')
-hours_per_week = tf.feature_column.numeric_column('hours_per_week')
-
-education = tf.feature_column.categorical_column_with_vocabulary_list(
- 'education', [
- 'Bachelors', 'HS-grad', '11th', 'Masters', '9th', 'Some-college',
- 'Assoc-acdm', 'Assoc-voc', '7th-8th', 'Doctorate', 'Prof-school',
- '5th-6th', '10th', '1st-4th', 'Preschool', '12th'])
-
-marital_status = tf.feature_column.categorical_column_with_vocabulary_list(
- 'marital_status', [
- 'Married-civ-spouse', 'Divorced', 'Married-spouse-absent',
- 'Never-married', 'Separated', 'Married-AF-spouse', 'Widowed'])
-
-relationship = tf.feature_column.categorical_column_with_vocabulary_list(
- 'relationship', [
- 'Husband', 'Not-in-family', 'Wife', 'Own-child', 'Unmarried',
- 'Other-relative'])
-
-workclass = tf.feature_column.categorical_column_with_vocabulary_list(
- 'workclass', [
- 'Self-emp-not-inc', 'Private', 'State-gov', 'Federal-gov',
- 'Local-gov', '?', 'Self-emp-inc', 'Without-pay', 'Never-worked'])
-
-# To show an example of hashing:
-occupation = tf.feature_column.categorical_column_with_hash_bucket(
- 'occupation', hash_bucket_size=1000)
-
-# Transformations.
-age_buckets = tf.feature_column.bucketized_column(
- age, boundaries=[18, 25, 30, 35, 40, 45, 50, 55, 60, 65])
-```
-
-## The Wide Model: Linear Model with Crossed Feature Columns
-
-The wide model is a linear model with a wide set of sparse and crossed feature
-columns:
-
-```python
-base_columns = [
- education, marital_status, relationship, workclass, occupation,
- age_buckets,
-]
-
-crossed_columns = [
- tf.feature_column.crossed_column(
- ['education', 'occupation'], hash_bucket_size=1000),
- tf.feature_column.crossed_column(
- [age_buckets, 'education', 'occupation'], hash_bucket_size=1000),
-]
-```
-
-You can also see the @{$wide$TensorFlow Linear Model Tutorial} for more details.
-
-Wide models with crossed feature columns can memorize sparse interactions
-between features effectively. That being said, one limitation of crossed feature
-columns is that they do not generalize to feature combinations that have not
-appeared in the training data. Let's add a deep model with embeddings to fix
-that.
-
-## The Deep Model: Neural Network with Embeddings
-
-The deep model is a feed-forward neural network, as shown in the previous
-figure. Each of the sparse, high-dimensional categorical features are first
-converted into a low-dimensional and dense real-valued vector, often referred to
-as an embedding vector. These low-dimensional dense embedding vectors are
-concatenated with the continuous features, and then fed into the hidden layers
-of a neural network in the forward pass. The embedding values are initialized
-randomly, and are trained along with all other model parameters to minimize the
-training loss. If you're interested in learning more about embeddings, check out
-the TensorFlow tutorial on @{$word2vec$Vector Representations of Words} or
-[Word embedding](https://en.wikipedia.org/wiki/Word_embedding) on Wikipedia.
-
-Another way to represent categorical columns to feed into a neural network is
-via a one-hot or multi-hot representation. This is often appropriate for
-categorical columns with only a few possible values. As an example of a one-hot
-representation, for the relationship column, `"Husband"` can be represented as
-[1, 0, 0, 0, 0, 0], and `"Not-in-family"` as [0, 1, 0, 0, 0, 0], etc. This is a
-fixed representation, whereas embeddings are more flexible and calculated at
-training time.
-
-We'll configure the embeddings for the categorical columns using
-`embedding_column`, and concatenate them with the continuous columns.
-We also use `indicator_column` to create multi-hot representations of some
-categorical columns.
-
-```python
-deep_columns = [
- age,
- education_num,
- capital_gain,
- capital_loss,
- hours_per_week,
- tf.feature_column.indicator_column(workclass),
- tf.feature_column.indicator_column(education),
- tf.feature_column.indicator_column(marital_status),
- tf.feature_column.indicator_column(relationship),
- # To show an example of embedding
- tf.feature_column.embedding_column(occupation, dimension=8),
-]
-```
-
-The higher the `dimension` of the embedding is, the more degrees of freedom the
-model will have to learn the representations of the features. For simplicity, we
-set the dimension to 8 for all feature columns here. Empirically, a more
-informed decision for the number of dimensions is to start with a value on the
-order of \\(\log_2(n)\\) or \\(k\sqrt[4]n\\), where \\(n\\) is the number of
-unique features in a feature column and \\(k\\) is a small constant (usually
-smaller than 10).
-
-Through dense embeddings, deep models can generalize better and make predictions
-on feature pairs that were previously unseen in the training data. However, it
-is difficult to learn effective low-dimensional representations for feature
-columns when the underlying interaction matrix between two feature columns is
-sparse and high-rank. In such cases, the interaction between most feature pairs
-should be zero except a few, but dense embeddings will lead to nonzero
-predictions for all feature pairs, and thus can over-generalize. On the other
-hand, linear models with crossed features can memorize these “exception rules”
-effectively with fewer model parameters.
-
-Now, let's see how to jointly train wide and deep models and allow them to
-complement each other’s strengths and weaknesses.
-
-## Combining Wide and Deep Models into One
-
-The wide models and deep models are combined by summing up their final output
-log odds as the prediction, then feeding the prediction to a logistic loss
-function. All the graph definition and variable allocations have already been
-handled for you under the hood, so you simply need to create a
-`DNNLinearCombinedClassifier`:
-
-```python
-model = tf.estimator.DNNLinearCombinedClassifier(
- model_dir='/tmp/census_model',
- linear_feature_columns=base_columns + crossed_columns,
- dnn_feature_columns=deep_columns,
- dnn_hidden_units=[100, 50])
-```
-
-## Training and Evaluating The Model
-
-Before we train the model, let's read in the Census dataset as we did in the
-@{$wide$TensorFlow Linear Model tutorial}. See `data_download.py` as well as
-`input_fn` within
-[`wide_deep.py`](https://github.com/tensorflow/models/tree/master/official/wide_deep/wide_deep.py).
-
-After reading in the data, you can train and evaluate the model:
-
-```python
-# Train and evaluate the model every `FLAGS.epochs_per_eval` epochs.
-for n in range(FLAGS.train_epochs // FLAGS.epochs_per_eval):
- model.train(input_fn=lambda: input_fn(
- FLAGS.train_data, FLAGS.epochs_per_eval, True, FLAGS.batch_size))
-
- results = model.evaluate(input_fn=lambda: input_fn(
- FLAGS.test_data, 1, False, FLAGS.batch_size))
-
- # Display evaluation metrics
- print('Results at epoch', (n + 1) * FLAGS.epochs_per_eval)
- print('-' * 30)
-
- for key in sorted(results):
- print('%s: %s' % (key, results[key]))
-```
-
-The final output accuracy should be somewhere around 85.5%. If you'd like to
-see a working end-to-end example, you can download our
-[example code](https://github.com/tensorflow/models/tree/master/official/wide_deep/wide_deep.py).
-
-Note that this tutorial is just a quick example on a small dataset to get you
-familiar with the API. Wide & Deep Learning will be even more powerful if you
-try it on a large dataset with many sparse feature columns that have a large
-number of possible feature values. Again, feel free to take a look at our
-[research paper](https://arxiv.org/abs/1606.07792) for more ideas about how to
-apply Wide & Deep Learning in real-world large-scale machine learning problems.
diff --git a/tensorflow/examples/speech_commands/BUILD b/tensorflow/examples/speech_commands/BUILD
index 13bca34a86..7a44e2ee4f 100644
--- a/tensorflow/examples/speech_commands/BUILD
+++ b/tensorflow/examples/speech_commands/BUILD
@@ -56,6 +56,7 @@ tf_py_test(
srcs = ["input_data_test.py"],
additional_deps = [
":input_data",
+ ":models",
"//tensorflow/python:client_testlib",
],
)
diff --git a/tensorflow/examples/speech_commands/freeze.py b/tensorflow/examples/speech_commands/freeze.py
index c8671d9c41..7657b23c60 100644
--- a/tensorflow/examples/speech_commands/freeze.py
+++ b/tensorflow/examples/speech_commands/freeze.py
@@ -54,7 +54,7 @@ FLAGS = None
def create_inference_graph(wanted_words, sample_rate, clip_duration_ms,
clip_stride_ms, window_size_ms, window_stride_ms,
- dct_coefficient_count, model_architecture):
+ feature_bin_count, model_architecture, preprocess):
"""Creates an audio model with the nodes needed for inference.
Uses the supplied arguments to create a model, and inserts the input and
@@ -67,14 +67,19 @@ def create_inference_graph(wanted_words, sample_rate, clip_duration_ms,
clip_stride_ms: How often to run recognition. Useful for models with cache.
window_size_ms: Time slice duration to estimate frequencies from.
window_stride_ms: How far apart time slices should be.
- dct_coefficient_count: Number of frequency bands to analyze.
+ feature_bin_count: Number of frequency bands to analyze.
model_architecture: Name of the kind of model to generate.
+ preprocess: How the spectrogram is processed to produce features, for
+ example 'mfcc' or 'average'.
+
+ Raises:
+ Exception: If the preprocessing mode isn't recognized.
"""
words_list = input_data.prepare_words_list(wanted_words.split(','))
model_settings = models.prepare_model_settings(
len(words_list), sample_rate, clip_duration_ms, window_size_ms,
- window_stride_ms, dct_coefficient_count)
+ window_stride_ms, feature_bin_count, preprocess)
runtime_settings = {'clip_stride_ms': clip_stride_ms}
wav_data_placeholder = tf.placeholder(tf.string, [], name='wav_data')
@@ -88,15 +93,25 @@ def create_inference_graph(wanted_words, sample_rate, clip_duration_ms,
window_size=model_settings['window_size_samples'],
stride=model_settings['window_stride_samples'],
magnitude_squared=True)
- fingerprint_input = contrib_audio.mfcc(
- spectrogram,
- decoded_sample_data.sample_rate,
- dct_coefficient_count=dct_coefficient_count)
- fingerprint_frequency_size = model_settings['dct_coefficient_count']
- fingerprint_time_size = model_settings['spectrogram_length']
- reshaped_input = tf.reshape(fingerprint_input, [
- -1, fingerprint_time_size * fingerprint_frequency_size
- ])
+
+ if preprocess == 'average':
+ fingerprint_input = tf.nn.pool(
+ tf.expand_dims(spectrogram, -1),
+ window_shape=[1, model_settings['average_window_width']],
+ strides=[1, model_settings['average_window_width']],
+ pooling_type='AVG',
+ padding='SAME')
+ elif preprocess == 'mfcc':
+ fingerprint_input = contrib_audio.mfcc(
+ spectrogram,
+ sample_rate,
+ dct_coefficient_count=model_settings['fingerprint_width'])
+ else:
+ raise Exception('Unknown preprocess mode "%s" (should be "mfcc" or'
+ ' "average")' % (preprocess))
+
+ fingerprint_size = model_settings['fingerprint_size']
+ reshaped_input = tf.reshape(fingerprint_input, [-1, fingerprint_size])
logits = models.create_model(
reshaped_input, model_settings, model_architecture, is_training=False,
@@ -110,10 +125,12 @@ def main(_):
# Create the model and load its weights.
sess = tf.InteractiveSession()
- create_inference_graph(FLAGS.wanted_words, FLAGS.sample_rate,
- FLAGS.clip_duration_ms, FLAGS.clip_stride_ms,
- FLAGS.window_size_ms, FLAGS.window_stride_ms,
- FLAGS.dct_coefficient_count, FLAGS.model_architecture)
+ create_inference_graph(
+ FLAGS.wanted_words, FLAGS.sample_rate, FLAGS.clip_duration_ms,
+ FLAGS.clip_stride_ms, FLAGS.window_size_ms, FLAGS.window_stride_ms,
+ FLAGS.feature_bin_count, FLAGS.model_architecture, FLAGS.preprocess)
+ if FLAGS.quantize:
+ tf.contrib.quantize.create_training_graph(quant_delay=0)
models.load_variables_from_checkpoint(sess, FLAGS.start_checkpoint)
# Turn all the variables into inline constants inside the graph and save it.
@@ -155,10 +172,11 @@ if __name__ == '__main__':
default=10.0,
help='How long the stride is between spectrogram timeslices',)
parser.add_argument(
- '--dct_coefficient_count',
+ '--feature_bin_count',
type=int,
default=40,
- help='How many bins to use for the MFCC fingerprint',)
+ help='How many bins to use for the MFCC fingerprint',
+ )
parser.add_argument(
'--start_checkpoint',
type=str,
@@ -176,5 +194,15 @@ if __name__ == '__main__':
help='Words to use (others will be added to an unknown label)',)
parser.add_argument(
'--output_file', type=str, help='Where to save the frozen graph.')
+ parser.add_argument(
+ '--quantize',
+ type=bool,
+ default=False,
+ help='Whether to train the model for eight-bit deployment')
+ parser.add_argument(
+ '--preprocess',
+ type=str,
+ default='mfcc',
+ help='Spectrogram processing mode. Can be "mfcc" or "average"')
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
diff --git a/tensorflow/examples/speech_commands/freeze_test.py b/tensorflow/examples/speech_commands/freeze_test.py
index 97c6eac675..c8de6c2152 100644
--- a/tensorflow/examples/speech_commands/freeze_test.py
+++ b/tensorflow/examples/speech_commands/freeze_test.py
@@ -24,14 +24,62 @@ from tensorflow.python.platform import test
class FreezeTest(test.TestCase):
- def testCreateInferenceGraph(self):
+ def testCreateInferenceGraphWithMfcc(self):
with self.test_session() as sess:
- freeze.create_inference_graph('a,b,c,d', 16000, 1000.0, 30.0, 30.0, 10.0,
- 40, 'conv')
+ freeze.create_inference_graph(
+ wanted_words='a,b,c,d',
+ sample_rate=16000,
+ clip_duration_ms=1000.0,
+ clip_stride_ms=30.0,
+ window_size_ms=30.0,
+ window_stride_ms=10.0,
+ feature_bin_count=40,
+ model_architecture='conv',
+ preprocess='mfcc')
self.assertIsNotNone(sess.graph.get_tensor_by_name('wav_data:0'))
self.assertIsNotNone(
sess.graph.get_tensor_by_name('decoded_sample_data:0'))
self.assertIsNotNone(sess.graph.get_tensor_by_name('labels_softmax:0'))
+ ops = [node.op for node in sess.graph_def.node]
+ self.assertEqual(1, ops.count('Mfcc'))
+
+ def testCreateInferenceGraphWithoutMfcc(self):
+ with self.test_session() as sess:
+ freeze.create_inference_graph(
+ wanted_words='a,b,c,d',
+ sample_rate=16000,
+ clip_duration_ms=1000.0,
+ clip_stride_ms=30.0,
+ window_size_ms=30.0,
+ window_stride_ms=10.0,
+ feature_bin_count=40,
+ model_architecture='conv',
+ preprocess='average')
+ self.assertIsNotNone(sess.graph.get_tensor_by_name('wav_data:0'))
+ self.assertIsNotNone(
+ sess.graph.get_tensor_by_name('decoded_sample_data:0'))
+ self.assertIsNotNone(sess.graph.get_tensor_by_name('labels_softmax:0'))
+ ops = [node.op for node in sess.graph_def.node]
+ self.assertEqual(0, ops.count('Mfcc'))
+
+ def testFeatureBinCount(self):
+ with self.test_session() as sess:
+ freeze.create_inference_graph(
+ wanted_words='a,b,c,d',
+ sample_rate=16000,
+ clip_duration_ms=1000.0,
+ clip_stride_ms=30.0,
+ window_size_ms=30.0,
+ window_stride_ms=10.0,
+ feature_bin_count=80,
+ model_architecture='conv',
+ preprocess='average')
+ self.assertIsNotNone(sess.graph.get_tensor_by_name('wav_data:0'))
+ self.assertIsNotNone(
+ sess.graph.get_tensor_by_name('decoded_sample_data:0'))
+ self.assertIsNotNone(sess.graph.get_tensor_by_name('labels_softmax:0'))
+ ops = [node.op for node in sess.graph_def.node]
+ self.assertEqual(0, ops.count('Mfcc'))
if __name__ == '__main__':
diff --git a/tensorflow/examples/speech_commands/generate_streaming_test_wav.py b/tensorflow/examples/speech_commands/generate_streaming_test_wav.py
index 053206ae2f..9858906927 100644
--- a/tensorflow/examples/speech_commands/generate_streaming_test_wav.py
+++ b/tensorflow/examples/speech_commands/generate_streaming_test_wav.py
@@ -87,11 +87,12 @@ def main(_):
words_list = input_data.prepare_words_list(FLAGS.wanted_words.split(','))
model_settings = models.prepare_model_settings(
len(words_list), FLAGS.sample_rate, FLAGS.clip_duration_ms,
- FLAGS.window_size_ms, FLAGS.window_stride_ms, FLAGS.dct_coefficient_count)
+ FLAGS.window_size_ms, FLAGS.window_stride_ms, FLAGS.feature_bin_count,
+ 'mfcc')
audio_processor = input_data.AudioProcessor(
'', FLAGS.data_dir, FLAGS.silence_percentage, 10,
FLAGS.wanted_words.split(','), FLAGS.validation_percentage,
- FLAGS.testing_percentage, model_settings)
+ FLAGS.testing_percentage, model_settings, FLAGS.data_dir)
output_audio_sample_count = FLAGS.sample_rate * FLAGS.test_duration_seconds
output_audio = np.zeros((output_audio_sample_count,), dtype=np.float32)
@@ -242,10 +243,11 @@ if __name__ == '__main__':
default=10.0,
help='How long the stride is between spectrogram timeslices',)
parser.add_argument(
- '--dct_coefficient_count',
+ '--feature_bin_count',
type=int,
default=40,
- help='How many bins to use for the MFCC fingerprint',)
+ help='How many bins to use for the MFCC fingerprint',
+ )
parser.add_argument(
'--wanted_words',
type=str,
diff --git a/tensorflow/examples/speech_commands/input_data.py b/tensorflow/examples/speech_commands/input_data.py
index 63dd18457f..30f2cfa9fe 100644
--- a/tensorflow/examples/speech_commands/input_data.py
+++ b/tensorflow/examples/speech_commands/input_data.py
@@ -153,14 +153,14 @@ class AudioProcessor(object):
def __init__(self, data_url, data_dir, silence_percentage, unknown_percentage,
wanted_words, validation_percentage, testing_percentage,
- model_settings):
+ model_settings, summaries_dir):
self.data_dir = data_dir
self.maybe_download_and_extract_dataset(data_url, data_dir)
self.prepare_data_index(silence_percentage, unknown_percentage,
wanted_words, validation_percentage,
testing_percentage)
self.prepare_background_data()
- self.prepare_processing_graph(model_settings)
+ self.prepare_processing_graph(model_settings, summaries_dir)
def maybe_download_and_extract_dataset(self, data_url, dest_directory):
"""Download and extract data set tar file.
@@ -325,7 +325,7 @@ class AudioProcessor(object):
if not self.background_data:
raise Exception('No background wav files were found in ' + search_path)
- def prepare_processing_graph(self, model_settings):
+ def prepare_processing_graph(self, model_settings, summaries_dir):
"""Builds a TensorFlow graph to apply the input distortions.
Creates a graph that loads a WAVE file, decodes it, scales the volume,
@@ -341,48 +341,88 @@ class AudioProcessor(object):
- time_shift_offset_placeholder_: How much to move the clip in time.
- background_data_placeholder_: PCM sample data for background noise.
- background_volume_placeholder_: Loudness of mixed-in background.
- - mfcc_: Output 2D fingerprint of processed audio.
+ - output_: Output 2D fingerprint of processed audio.
Args:
model_settings: Information about the current model being trained.
+ summaries_dir: Path to save training summary information to.
+
+ Raises:
+ ValueError: If the preprocessing mode isn't recognized.
"""
- desired_samples = model_settings['desired_samples']
- self.wav_filename_placeholder_ = tf.placeholder(tf.string, [])
- wav_loader = io_ops.read_file(self.wav_filename_placeholder_)
- wav_decoder = contrib_audio.decode_wav(
- wav_loader, desired_channels=1, desired_samples=desired_samples)
- # Allow the audio sample's volume to be adjusted.
- self.foreground_volume_placeholder_ = tf.placeholder(tf.float32, [])
- scaled_foreground = tf.multiply(wav_decoder.audio,
- self.foreground_volume_placeholder_)
- # Shift the sample's start position, and pad any gaps with zeros.
- self.time_shift_padding_placeholder_ = tf.placeholder(tf.int32, [2, 2])
- self.time_shift_offset_placeholder_ = tf.placeholder(tf.int32, [2])
- padded_foreground = tf.pad(
- scaled_foreground,
- self.time_shift_padding_placeholder_,
- mode='CONSTANT')
- sliced_foreground = tf.slice(padded_foreground,
- self.time_shift_offset_placeholder_,
- [desired_samples, -1])
- # Mix in background noise.
- self.background_data_placeholder_ = tf.placeholder(tf.float32,
- [desired_samples, 1])
- self.background_volume_placeholder_ = tf.placeholder(tf.float32, [])
- background_mul = tf.multiply(self.background_data_placeholder_,
- self.background_volume_placeholder_)
- background_add = tf.add(background_mul, sliced_foreground)
- background_clamp = tf.clip_by_value(background_add, -1.0, 1.0)
- # Run the spectrogram and MFCC ops to get a 2D 'fingerprint' of the audio.
- spectrogram = contrib_audio.audio_spectrogram(
- background_clamp,
- window_size=model_settings['window_size_samples'],
- stride=model_settings['window_stride_samples'],
- magnitude_squared=True)
- self.mfcc_ = contrib_audio.mfcc(
- spectrogram,
- wav_decoder.sample_rate,
- dct_coefficient_count=model_settings['dct_coefficient_count'])
+ with tf.get_default_graph().name_scope('data'):
+ desired_samples = model_settings['desired_samples']
+ self.wav_filename_placeholder_ = tf.placeholder(
+ tf.string, [], name='wav_filename')
+ wav_loader = io_ops.read_file(self.wav_filename_placeholder_)
+ wav_decoder = contrib_audio.decode_wav(
+ wav_loader, desired_channels=1, desired_samples=desired_samples)
+ # Allow the audio sample's volume to be adjusted.
+ self.foreground_volume_placeholder_ = tf.placeholder(
+ tf.float32, [], name='foreground_volume')
+ scaled_foreground = tf.multiply(wav_decoder.audio,
+ self.foreground_volume_placeholder_)
+ # Shift the sample's start position, and pad any gaps with zeros.
+ self.time_shift_padding_placeholder_ = tf.placeholder(
+ tf.int32, [2, 2], name='time_shift_padding')
+ self.time_shift_offset_placeholder_ = tf.placeholder(
+ tf.int32, [2], name='time_shift_offset')
+ padded_foreground = tf.pad(
+ scaled_foreground,
+ self.time_shift_padding_placeholder_,
+ mode='CONSTANT')
+ sliced_foreground = tf.slice(padded_foreground,
+ self.time_shift_offset_placeholder_,
+ [desired_samples, -1])
+ # Mix in background noise.
+ self.background_data_placeholder_ = tf.placeholder(
+ tf.float32, [desired_samples, 1], name='background_data')
+ self.background_volume_placeholder_ = tf.placeholder(
+ tf.float32, [], name='background_volume')
+ background_mul = tf.multiply(self.background_data_placeholder_,
+ self.background_volume_placeholder_)
+ background_add = tf.add(background_mul, sliced_foreground)
+ background_clamp = tf.clip_by_value(background_add, -1.0, 1.0)
+ # Run the spectrogram and MFCC ops to get a 2D 'fingerprint' of the audio.
+ spectrogram = contrib_audio.audio_spectrogram(
+ background_clamp,
+ window_size=model_settings['window_size_samples'],
+ stride=model_settings['window_stride_samples'],
+ magnitude_squared=True)
+ tf.summary.image(
+ 'spectrogram', tf.expand_dims(spectrogram, -1), max_outputs=1)
+ # The number of buckets in each FFT row in the spectrogram will depend on
+ # how many input samples there are in each window. This can be quite
+ # large, with a 160 sample window producing 127 buckets for example. We
+ # don't need this level of detail for classification, so we often want to
+ # shrink them down to produce a smaller result. That's what this section
+ # implements. One method is to use average pooling to merge adjacent
+ # buckets, but a more sophisticated approach is to apply the MFCC
+ # algorithm to shrink the representation.
+ if model_settings['preprocess'] == 'average':
+ self.output_ = tf.nn.pool(
+ tf.expand_dims(spectrogram, -1),
+ window_shape=[1, model_settings['average_window_width']],
+ strides=[1, model_settings['average_window_width']],
+ pooling_type='AVG',
+ padding='SAME')
+ tf.summary.image('shrunk_spectrogram', self.output_, max_outputs=1)
+ elif model_settings['preprocess'] == 'mfcc':
+ self.output_ = contrib_audio.mfcc(
+ spectrogram,
+ wav_decoder.sample_rate,
+ dct_coefficient_count=model_settings['fingerprint_width'])
+ tf.summary.image(
+ 'mfcc', tf.expand_dims(self.output_, -1), max_outputs=1)
+ else:
+ raise ValueError('Unknown preprocess mode "%s" (should be "mfcc" or'
+ ' "average")' % (model_settings['preprocess']))
+
+ # Merge all the summaries and write them out to /tmp/retrain_logs (by
+ # default)
+ self.merged_summaries_ = tf.summary.merge_all(scope='data')
+ self.summary_writer_ = tf.summary.FileWriter(summaries_dir + '/data',
+ tf.get_default_graph())
def set_size(self, mode):
"""Calculates the number of samples in the dataset partition.
@@ -418,6 +458,9 @@ class AudioProcessor(object):
Returns:
List of sample data for the transformed samples, and list of label indexes
+
+ Raises:
+ ValueError: If background samples are too short.
"""
# Pick one of the partitions to choose samples from.
candidates = self.data_index[mode]
@@ -460,6 +503,11 @@ class AudioProcessor(object):
if use_background or sample['label'] == SILENCE_LABEL:
background_index = np.random.randint(len(self.background_data))
background_samples = self.background_data[background_index]
+ if len(background_samples) <= model_settings['desired_samples']:
+ raise ValueError(
+ 'Background sample is too short! Need more than %d'
+ ' samples but only %d were found' %
+ (model_settings['desired_samples'], len(background_samples)))
background_offset = np.random.randint(
0, len(background_samples) - model_settings['desired_samples'])
background_clipped = background_samples[background_offset:(
@@ -482,7 +530,10 @@ class AudioProcessor(object):
else:
input_dict[self.foreground_volume_placeholder_] = 1
# Run the graph to produce the output audio.
- data[i - offset, :] = sess.run(self.mfcc_, feed_dict=input_dict).flatten()
+ summary, data_tensor = sess.run(
+ [self.merged_summaries_, self.output_], feed_dict=input_dict)
+ self.summary_writer_.add_summary(summary)
+ data[i - offset, :] = data_tensor.flatten()
label_index = self.word_to_index[sample['label']]
labels[i - offset] = label_index
return data, labels
diff --git a/tensorflow/examples/speech_commands/input_data_test.py b/tensorflow/examples/speech_commands/input_data_test.py
index 13f294d39d..2e551be9a2 100644
--- a/tensorflow/examples/speech_commands/input_data_test.py
+++ b/tensorflow/examples/speech_commands/input_data_test.py
@@ -25,6 +25,7 @@ import tensorflow as tf
from tensorflow.contrib.framework.python.ops import audio_ops as contrib_audio
from tensorflow.examples.speech_commands import input_data
+from tensorflow.examples.speech_commands import models
from tensorflow.python.platform import test
@@ -32,7 +33,7 @@ class InputDataTest(test.TestCase):
def _getWavData(self):
with self.test_session() as sess:
- sample_data = tf.zeros([1000, 2])
+ sample_data = tf.zeros([32000, 2])
wav_encoder = contrib_audio.encode_wav(sample_data, 16000)
wav_data = sess.run(wav_encoder)
return wav_data
@@ -57,9 +58,31 @@ class InputDataTest(test.TestCase):
"label_count": 4,
"window_size_samples": 100,
"window_stride_samples": 100,
- "dct_coefficient_count": 40,
+ "fingerprint_width": 40,
+ "preprocess": "mfcc",
}
+ def _runGetDataTest(self, preprocess, window_length_ms):
+ tmp_dir = self.get_temp_dir()
+ wav_dir = os.path.join(tmp_dir, "wavs")
+ os.mkdir(wav_dir)
+ self._saveWavFolders(wav_dir, ["a", "b", "c"], 100)
+ background_dir = os.path.join(wav_dir, "_background_noise_")
+ os.mkdir(background_dir)
+ wav_data = self._getWavData()
+ for i in range(10):
+ file_path = os.path.join(background_dir, "background_audio_%d.wav" % i)
+ self._saveTestWavFile(file_path, wav_data)
+ model_settings = models.prepare_model_settings(
+ 4, 16000, 1000, window_length_ms, 20, 40, preprocess)
+ with self.test_session() as sess:
+ audio_processor = input_data.AudioProcessor(
+ "", wav_dir, 10, 10, ["a", "b"], 10, 10, model_settings, tmp_dir)
+ result_data, result_labels = audio_processor.get_data(
+ 10, 0, model_settings, 0.3, 0.1, 100, "training", sess)
+ self.assertEqual(10, len(result_data))
+ self.assertEqual(10, len(result_labels))
+
def testPrepareWordsList(self):
words_list = ["a", "b"]
self.assertGreater(
@@ -76,8 +99,9 @@ class InputDataTest(test.TestCase):
def testPrepareDataIndex(self):
tmp_dir = self.get_temp_dir()
self._saveWavFolders(tmp_dir, ["a", "b", "c"], 100)
- audio_processor = input_data.AudioProcessor("", tmp_dir, 10, 10, ["a", "b"],
- 10, 10, self._model_settings())
+ audio_processor = input_data.AudioProcessor("", tmp_dir, 10, 10,
+ ["a", "b"], 10, 10,
+ self._model_settings(), tmp_dir)
self.assertLess(0, audio_processor.set_size("training"))
self.assertTrue("training" in audio_processor.data_index)
self.assertTrue("validation" in audio_processor.data_index)
@@ -90,7 +114,7 @@ class InputDataTest(test.TestCase):
self._saveWavFolders(tmp_dir, ["a", "b", "c"], 0)
with self.assertRaises(Exception) as e:
_ = input_data.AudioProcessor("", tmp_dir, 10, 10, ["a", "b"], 10, 10,
- self._model_settings())
+ self._model_settings(), tmp_dir)
self.assertTrue("No .wavs found" in str(e.exception))
def testPrepareDataIndexMissing(self):
@@ -98,7 +122,7 @@ class InputDataTest(test.TestCase):
self._saveWavFolders(tmp_dir, ["a", "b", "c"], 100)
with self.assertRaises(Exception) as e:
_ = input_data.AudioProcessor("", tmp_dir, 10, 10, ["a", "b", "d"], 10,
- 10, self._model_settings())
+ 10, self._model_settings(), tmp_dir)
self.assertTrue("Expected to find" in str(e.exception))
def testPrepareBackgroundData(self):
@@ -110,8 +134,9 @@ class InputDataTest(test.TestCase):
file_path = os.path.join(background_dir, "background_audio_%d.wav" % i)
self._saveTestWavFile(file_path, wav_data)
self._saveWavFolders(tmp_dir, ["a", "b", "c"], 100)
- audio_processor = input_data.AudioProcessor("", tmp_dir, 10, 10, ["a", "b"],
- 10, 10, self._model_settings())
+ audio_processor = input_data.AudioProcessor("", tmp_dir, 10, 10,
+ ["a", "b"], 10, 10,
+ self._model_settings(), tmp_dir)
self.assertEqual(10, len(audio_processor.background_data))
def testLoadWavFile(self):
@@ -148,44 +173,27 @@ class InputDataTest(test.TestCase):
"label_count": 4,
"window_size_samples": 100,
"window_stride_samples": 100,
- "dct_coefficient_count": 40,
+ "fingerprint_width": 40,
+ "preprocess": "mfcc",
}
audio_processor = input_data.AudioProcessor("", wav_dir, 10, 10, ["a", "b"],
- 10, 10, model_settings)
+ 10, 10, model_settings, tmp_dir)
self.assertIsNotNone(audio_processor.wav_filename_placeholder_)
self.assertIsNotNone(audio_processor.foreground_volume_placeholder_)
self.assertIsNotNone(audio_processor.time_shift_padding_placeholder_)
self.assertIsNotNone(audio_processor.time_shift_offset_placeholder_)
self.assertIsNotNone(audio_processor.background_data_placeholder_)
self.assertIsNotNone(audio_processor.background_volume_placeholder_)
- self.assertIsNotNone(audio_processor.mfcc_)
+ self.assertIsNotNone(audio_processor.output_)
- def testGetData(self):
- tmp_dir = self.get_temp_dir()
- wav_dir = os.path.join(tmp_dir, "wavs")
- os.mkdir(wav_dir)
- self._saveWavFolders(wav_dir, ["a", "b", "c"], 100)
- background_dir = os.path.join(wav_dir, "_background_noise_")
- os.mkdir(background_dir)
- wav_data = self._getWavData()
- for i in range(10):
- file_path = os.path.join(background_dir, "background_audio_%d.wav" % i)
- self._saveTestWavFile(file_path, wav_data)
- model_settings = {
- "desired_samples": 160,
- "fingerprint_size": 40,
- "label_count": 4,
- "window_size_samples": 100,
- "window_stride_samples": 100,
- "dct_coefficient_count": 40,
- }
- audio_processor = input_data.AudioProcessor("", wav_dir, 10, 10, ["a", "b"],
- 10, 10, model_settings)
- with self.test_session() as sess:
- result_data, result_labels = audio_processor.get_data(
- 10, 0, model_settings, 0.3, 0.1, 100, "training", sess)
- self.assertEqual(10, len(result_data))
- self.assertEqual(10, len(result_labels))
+ def testGetDataAverage(self):
+ self._runGetDataTest("average", 10)
+
+ def testGetDataAverageLongWindow(self):
+ self._runGetDataTest("average", 30)
+
+ def testGetDataMfcc(self):
+ self._runGetDataTest("mfcc", 30)
def testGetUnprocessedData(self):
tmp_dir = self.get_temp_dir()
@@ -198,10 +206,11 @@ class InputDataTest(test.TestCase):
"label_count": 4,
"window_size_samples": 100,
"window_stride_samples": 100,
- "dct_coefficient_count": 40,
+ "fingerprint_width": 40,
+ "preprocess": "mfcc",
}
audio_processor = input_data.AudioProcessor("", wav_dir, 10, 10, ["a", "b"],
- 10, 10, model_settings)
+ 10, 10, model_settings, tmp_dir)
result_data, result_labels = audio_processor.get_unprocessed_data(
10, model_settings, "training")
self.assertEqual(10, len(result_data))
diff --git a/tensorflow/examples/speech_commands/models.py b/tensorflow/examples/speech_commands/models.py
index ab611f414a..65ae3b1511 100644
--- a/tensorflow/examples/speech_commands/models.py
+++ b/tensorflow/examples/speech_commands/models.py
@@ -24,9 +24,21 @@ import math
import tensorflow as tf
+def _next_power_of_two(x):
+ """Calculates the smallest enclosing power of two for an input.
+
+ Args:
+ x: Positive float or integer number.
+
+ Returns:
+ Next largest power of two integer.
+ """
+ return 1 if x == 0 else 2**(int(x) - 1).bit_length()
+
+
def prepare_model_settings(label_count, sample_rate, clip_duration_ms,
- window_size_ms, window_stride_ms,
- dct_coefficient_count):
+ window_size_ms, window_stride_ms, feature_bin_count,
+ preprocess):
"""Calculates common settings needed for all models.
Args:
@@ -35,10 +47,14 @@ def prepare_model_settings(label_count, sample_rate, clip_duration_ms,
clip_duration_ms: Length of each audio clip to be analyzed.
window_size_ms: Duration of frequency analysis window.
window_stride_ms: How far to move in time between frequency windows.
- dct_coefficient_count: Number of frequency bins to use for analysis.
+ feature_bin_count: Number of frequency bins to use for analysis.
+ preprocess: How the spectrogram is processed to produce features.
Returns:
Dictionary containing common settings.
+
+ Raises:
+ ValueError: If the preprocessing mode isn't recognized.
"""
desired_samples = int(sample_rate * clip_duration_ms / 1000)
window_size_samples = int(sample_rate * window_size_ms / 1000)
@@ -48,16 +64,28 @@ def prepare_model_settings(label_count, sample_rate, clip_duration_ms,
spectrogram_length = 0
else:
spectrogram_length = 1 + int(length_minus_window / window_stride_samples)
- fingerprint_size = dct_coefficient_count * spectrogram_length
+ if preprocess == 'average':
+ fft_bin_count = 1 + (_next_power_of_two(window_size_samples) / 2)
+ average_window_width = int(math.floor(fft_bin_count / feature_bin_count))
+ fingerprint_width = int(math.ceil(fft_bin_count / average_window_width))
+ elif preprocess == 'mfcc':
+ average_window_width = -1
+ fingerprint_width = feature_bin_count
+ else:
+ raise ValueError('Unknown preprocess mode "%s" (should be "mfcc" or'
+ ' "average")' % (preprocess))
+ fingerprint_size = fingerprint_width * spectrogram_length
return {
'desired_samples': desired_samples,
'window_size_samples': window_size_samples,
'window_stride_samples': window_stride_samples,
'spectrogram_length': spectrogram_length,
- 'dct_coefficient_count': dct_coefficient_count,
+ 'fingerprint_width': fingerprint_width,
'fingerprint_size': fingerprint_size,
'label_count': label_count,
'sample_rate': sample_rate,
+ 'preprocess': preprocess,
+ 'average_window_width': average_window_width,
}
@@ -106,10 +134,14 @@ def create_model(fingerprint_input, model_settings, model_architecture,
elif model_architecture == 'low_latency_svdf':
return create_low_latency_svdf_model(fingerprint_input, model_settings,
is_training, runtime_settings)
+ elif model_architecture == 'tiny_conv':
+ return create_tiny_conv_model(fingerprint_input, model_settings,
+ is_training)
else:
raise Exception('model_architecture argument "' + model_architecture +
'" not recognized, should be one of "single_fc", "conv",' +
- ' "low_latency_conv, or "low_latency_svdf"')
+ ' "low_latency_conv, "low_latency_svdf",' +
+ ' or "tiny_conv"')
def load_variables_from_checkpoint(sess, start_checkpoint):
@@ -152,9 +184,12 @@ def create_single_fc_model(fingerprint_input, model_settings, is_training):
dropout_prob = tf.placeholder(tf.float32, name='dropout_prob')
fingerprint_size = model_settings['fingerprint_size']
label_count = model_settings['label_count']
- weights = tf.Variable(
- tf.truncated_normal([fingerprint_size, label_count], stddev=0.001))
- bias = tf.Variable(tf.zeros([label_count]))
+ weights = tf.get_variable(
+ name='weights',
+ initializer=tf.truncated_normal_initializer(stddev=0.001),
+ shape=[fingerprint_size, label_count])
+ bias = tf.get_variable(
+ name='bias', initializer=tf.zeros_initializer, shape=[label_count])
logits = tf.matmul(fingerprint_input, weights) + bias
if is_training:
return logits, dropout_prob
@@ -212,18 +247,21 @@ def create_conv_model(fingerprint_input, model_settings, is_training):
"""
if is_training:
dropout_prob = tf.placeholder(tf.float32, name='dropout_prob')
- input_frequency_size = model_settings['dct_coefficient_count']
+ input_frequency_size = model_settings['fingerprint_width']
input_time_size = model_settings['spectrogram_length']
fingerprint_4d = tf.reshape(fingerprint_input,
[-1, input_time_size, input_frequency_size, 1])
first_filter_width = 8
first_filter_height = 20
first_filter_count = 64
- first_weights = tf.Variable(
- tf.truncated_normal(
- [first_filter_height, first_filter_width, 1, first_filter_count],
- stddev=0.01))
- first_bias = tf.Variable(tf.zeros([first_filter_count]))
+ first_weights = tf.get_variable(
+ name='first_weights',
+ initializer=tf.truncated_normal_initializer(stddev=0.01),
+ shape=[first_filter_height, first_filter_width, 1, first_filter_count])
+ first_bias = tf.get_variable(
+ name='first_bias',
+ initializer=tf.zeros_initializer,
+ shape=[first_filter_count])
first_conv = tf.nn.conv2d(fingerprint_4d, first_weights, [1, 1, 1, 1],
'SAME') + first_bias
first_relu = tf.nn.relu(first_conv)
@@ -235,14 +273,17 @@ def create_conv_model(fingerprint_input, model_settings, is_training):
second_filter_width = 4
second_filter_height = 10
second_filter_count = 64
- second_weights = tf.Variable(
- tf.truncated_normal(
- [
- second_filter_height, second_filter_width, first_filter_count,
- second_filter_count
- ],
- stddev=0.01))
- second_bias = tf.Variable(tf.zeros([second_filter_count]))
+ second_weights = tf.get_variable(
+ name='second_weights',
+ initializer=tf.truncated_normal_initializer(stddev=0.01),
+ shape=[
+ second_filter_height, second_filter_width, first_filter_count,
+ second_filter_count
+ ])
+ second_bias = tf.get_variable(
+ name='second_bias',
+ initializer=tf.zeros_initializer,
+ shape=[second_filter_count])
second_conv = tf.nn.conv2d(max_pool, second_weights, [1, 1, 1, 1],
'SAME') + second_bias
second_relu = tf.nn.relu(second_conv)
@@ -259,10 +300,14 @@ def create_conv_model(fingerprint_input, model_settings, is_training):
flattened_second_conv = tf.reshape(second_dropout,
[-1, second_conv_element_count])
label_count = model_settings['label_count']
- final_fc_weights = tf.Variable(
- tf.truncated_normal(
- [second_conv_element_count, label_count], stddev=0.01))
- final_fc_bias = tf.Variable(tf.zeros([label_count]))
+ final_fc_weights = tf.get_variable(
+ name='final_fc_weights',
+ initializer=tf.truncated_normal_initializer,
+ shape=[second_conv_element_count, label_count])
+ final_fc_bias = tf.get_variable(
+ name='final_fc_bias',
+ initializer=tf.zeros_initializer,
+ shape=[label_count])
final_fc = tf.matmul(flattened_second_conv, final_fc_weights) + final_fc_bias
if is_training:
return final_fc, dropout_prob
@@ -318,7 +363,7 @@ def create_low_latency_conv_model(fingerprint_input, model_settings,
"""
if is_training:
dropout_prob = tf.placeholder(tf.float32, name='dropout_prob')
- input_frequency_size = model_settings['dct_coefficient_count']
+ input_frequency_size = model_settings['fingerprint_width']
input_time_size = model_settings['spectrogram_length']
fingerprint_4d = tf.reshape(fingerprint_input,
[-1, input_time_size, input_frequency_size, 1])
@@ -327,11 +372,14 @@ def create_low_latency_conv_model(fingerprint_input, model_settings,
first_filter_count = 186
first_filter_stride_x = 1
first_filter_stride_y = 1
- first_weights = tf.Variable(
- tf.truncated_normal(
- [first_filter_height, first_filter_width, 1, first_filter_count],
- stddev=0.01))
- first_bias = tf.Variable(tf.zeros([first_filter_count]))
+ first_weights = tf.get_variable(
+ name='first_weights',
+ initializer=tf.truncated_normal_initializer(stddev=0.01),
+ shape=[first_filter_height, first_filter_width, 1, first_filter_count])
+ first_bias = tf.get_variable(
+ name='first_bias',
+ initializer=tf.zeros_initializer,
+ shape=[first_filter_count])
first_conv = tf.nn.conv2d(fingerprint_4d, first_weights, [
1, first_filter_stride_y, first_filter_stride_x, 1
], 'VALID') + first_bias
@@ -351,30 +399,42 @@ def create_low_latency_conv_model(fingerprint_input, model_settings,
flattened_first_conv = tf.reshape(first_dropout,
[-1, first_conv_element_count])
first_fc_output_channels = 128
- first_fc_weights = tf.Variable(
- tf.truncated_normal(
- [first_conv_element_count, first_fc_output_channels], stddev=0.01))
- first_fc_bias = tf.Variable(tf.zeros([first_fc_output_channels]))
+ first_fc_weights = tf.get_variable(
+ name='first_fc_weights',
+ initializer=tf.truncated_normal_initializer(stddev=0.01),
+ shape=[first_conv_element_count, first_fc_output_channels])
+ first_fc_bias = tf.get_variable(
+ name='first_fc_bias',
+ initializer=tf.zeros_initializer,
+ shape=[first_fc_output_channels])
first_fc = tf.matmul(flattened_first_conv, first_fc_weights) + first_fc_bias
if is_training:
second_fc_input = tf.nn.dropout(first_fc, dropout_prob)
else:
second_fc_input = first_fc
second_fc_output_channels = 128
- second_fc_weights = tf.Variable(
- tf.truncated_normal(
- [first_fc_output_channels, second_fc_output_channels], stddev=0.01))
- second_fc_bias = tf.Variable(tf.zeros([second_fc_output_channels]))
+ second_fc_weights = tf.get_variable(
+ name='second_fc_weights',
+ initializer=tf.truncated_normal_initializer(stddev=0.01),
+ shape=[first_fc_output_channels, second_fc_output_channels])
+ second_fc_bias = tf.get_variable(
+ name='second_fc_bias',
+ initializer=tf.zeros_initializer,
+ shape=[second_fc_output_channels])
second_fc = tf.matmul(second_fc_input, second_fc_weights) + second_fc_bias
if is_training:
final_fc_input = tf.nn.dropout(second_fc, dropout_prob)
else:
final_fc_input = second_fc
label_count = model_settings['label_count']
- final_fc_weights = tf.Variable(
- tf.truncated_normal(
- [second_fc_output_channels, label_count], stddev=0.01))
- final_fc_bias = tf.Variable(tf.zeros([label_count]))
+ final_fc_weights = tf.get_variable(
+ name='final_fc_weights',
+ initializer=tf.truncated_normal_initializer(stddev=0.01),
+ shape=[second_fc_output_channels, label_count])
+ final_fc_bias = tf.get_variable(
+ name='final_fc_bias',
+ initializer=tf.zeros_initializer,
+ shape=[label_count])
final_fc = tf.matmul(final_fc_input, final_fc_weights) + final_fc_bias
if is_training:
return final_fc, dropout_prob
@@ -422,7 +482,7 @@ def create_low_latency_svdf_model(fingerprint_input, model_settings,
Args:
fingerprint_input: TensorFlow node that will output audio feature vectors.
The node is expected to produce a 2D Tensor of shape:
- [batch, model_settings['dct_coefficient_count'] *
+ [batch, model_settings['fingerprint_width'] *
model_settings['spectrogram_length']]
with the features corresponding to the same time slot arranged contiguously,
and the oldest slot at index [:, 0], and newest at [:, -1].
@@ -440,7 +500,7 @@ def create_low_latency_svdf_model(fingerprint_input, model_settings,
if is_training:
dropout_prob = tf.placeholder(tf.float32, name='dropout_prob')
- input_frequency_size = model_settings['dct_coefficient_count']
+ input_frequency_size = model_settings['fingerprint_width']
input_time_size = model_settings['spectrogram_length']
# Validation.
@@ -462,8 +522,11 @@ def create_low_latency_svdf_model(fingerprint_input, model_settings,
num_filters = rank * num_units
# Create the runtime memory: [num_filters, batch, input_time_size]
batch = 1
- memory = tf.Variable(tf.zeros([num_filters, batch, input_time_size]),
- trainable=False, name='runtime-memory')
+ memory = tf.get_variable(
+ initializer=tf.zeros_initializer,
+ shape=[num_filters, batch, input_time_size],
+ trainable=False,
+ name='runtime-memory')
# Determine the number of new frames in the input, such that we only operate
# on those. For training we do not use the memory, and thus use all frames
# provided in the input.
@@ -483,8 +546,10 @@ def create_low_latency_svdf_model(fingerprint_input, model_settings,
new_fingerprint_input = tf.expand_dims(new_fingerprint_input, 2)
# Create the frequency filters.
- weights_frequency = tf.Variable(
- tf.truncated_normal([input_frequency_size, num_filters], stddev=0.01))
+ weights_frequency = tf.get_variable(
+ name='weights_frequency',
+ initializer=tf.truncated_normal_initializer(stddev=0.01),
+ shape=[input_frequency_size, num_filters])
# Expand to add input channels dimensions.
# weights_frequency: [input_frequency_size, 1, num_filters]
weights_frequency = tf.expand_dims(weights_frequency, 1)
@@ -506,8 +571,10 @@ def create_low_latency_svdf_model(fingerprint_input, model_settings,
activations_time = new_memory
# Create the time filters.
- weights_time = tf.Variable(
- tf.truncated_normal([num_filters, input_time_size], stddev=0.01))
+ weights_time = tf.get_variable(
+ name='weights_time',
+ initializer=tf.truncated_normal_initializer(stddev=0.01),
+ shape=[num_filters, input_time_size])
# Apply the time filter on the outputs of the feature filters.
# weights_time: [num_filters, input_time_size, 1]
# outputs: [num_filters, batch, 1]
@@ -524,7 +591,8 @@ def create_low_latency_svdf_model(fingerprint_input, model_settings,
units_output = tf.transpose(units_output)
# Appy bias.
- bias = tf.Variable(tf.zeros([num_units]))
+ bias = tf.get_variable(
+ name='bias', initializer=tf.zeros_initializer, shape=[num_units])
first_bias = tf.nn.bias_add(units_output, bias)
# Relu.
@@ -536,31 +604,135 @@ def create_low_latency_svdf_model(fingerprint_input, model_settings,
first_dropout = first_relu
first_fc_output_channels = 256
- first_fc_weights = tf.Variable(
- tf.truncated_normal([num_units, first_fc_output_channels], stddev=0.01))
- first_fc_bias = tf.Variable(tf.zeros([first_fc_output_channels]))
+ first_fc_weights = tf.get_variable(
+ name='first_fc_weights',
+ initializer=tf.truncated_normal_initializer(stddev=0.01),
+ shape=[num_units, first_fc_output_channels])
+ first_fc_bias = tf.get_variable(
+ name='first_fc_bias',
+ initializer=tf.zeros_initializer,
+ shape=[first_fc_output_channels])
first_fc = tf.matmul(first_dropout, first_fc_weights) + first_fc_bias
if is_training:
second_fc_input = tf.nn.dropout(first_fc, dropout_prob)
else:
second_fc_input = first_fc
second_fc_output_channels = 256
- second_fc_weights = tf.Variable(
- tf.truncated_normal(
- [first_fc_output_channels, second_fc_output_channels], stddev=0.01))
- second_fc_bias = tf.Variable(tf.zeros([second_fc_output_channels]))
+ second_fc_weights = tf.get_variable(
+ name='second_fc_weights',
+ initializer=tf.truncated_normal_initializer(stddev=0.01),
+ shape=[first_fc_output_channels, second_fc_output_channels])
+ second_fc_bias = tf.get_variable(
+ name='second_fc_bias',
+ initializer=tf.zeros_initializer,
+ shape=[second_fc_output_channels])
second_fc = tf.matmul(second_fc_input, second_fc_weights) + second_fc_bias
if is_training:
final_fc_input = tf.nn.dropout(second_fc, dropout_prob)
else:
final_fc_input = second_fc
label_count = model_settings['label_count']
- final_fc_weights = tf.Variable(
- tf.truncated_normal(
- [second_fc_output_channels, label_count], stddev=0.01))
- final_fc_bias = tf.Variable(tf.zeros([label_count]))
+ final_fc_weights = tf.get_variable(
+ name='final_fc_weights',
+ initializer=tf.truncated_normal(stddev=0.01),
+ shape=[second_fc_output_channels, label_count])
+ final_fc_bias = tf.get_variable(
+ name='final_fc_bias',
+ initializer=tf.zeros_initializer,
+ shape=[label_count])
final_fc = tf.matmul(final_fc_input, final_fc_weights) + final_fc_bias
if is_training:
return final_fc, dropout_prob
else:
return final_fc
+
+
+def create_tiny_conv_model(fingerprint_input, model_settings, is_training):
+ """Builds a convolutional model aimed at microcontrollers.
+
+ Devices like DSPs and microcontrollers can have very small amounts of
+ memory and limited processing power. This model is designed to use less
+ than 20KB of working RAM, and fit within 32KB of read-only (flash) memory.
+
+ Here's the layout of the graph:
+
+ (fingerprint_input)
+ v
+ [Conv2D]<-(weights)
+ v
+ [BiasAdd]<-(bias)
+ v
+ [Relu]
+ v
+ [MatMul]<-(weights)
+ v
+ [BiasAdd]<-(bias)
+ v
+
+ This doesn't produce particularly accurate results, but it's designed to be
+ used as the first stage of a pipeline, running on a low-energy piece of
+ hardware that can always be on, and then wake higher-power chips when a
+ possible utterance has been found, so that more accurate analysis can be done.
+
+ During training, a dropout node is introduced after the relu, controlled by a
+ placeholder.
+
+ Args:
+ fingerprint_input: TensorFlow node that will output audio feature vectors.
+ model_settings: Dictionary of information about the model.
+ is_training: Whether the model is going to be used for training.
+
+ Returns:
+ TensorFlow node outputting logits results, and optionally a dropout
+ placeholder.
+ """
+ if is_training:
+ dropout_prob = tf.placeholder(tf.float32, name='dropout_prob')
+ input_frequency_size = model_settings['fingerprint_width']
+ input_time_size = model_settings['spectrogram_length']
+ fingerprint_4d = tf.reshape(fingerprint_input,
+ [-1, input_time_size, input_frequency_size, 1])
+ first_filter_width = 8
+ first_filter_height = 10
+ first_filter_count = 8
+ first_weights = tf.get_variable(
+ name='first_weights',
+ initializer=tf.truncated_normal_initializer(stddev=0.01),
+ shape=[first_filter_height, first_filter_width, 1, first_filter_count])
+ first_bias = tf.get_variable(
+ name='first_bias',
+ initializer=tf.zeros_initializer,
+ shape=[first_filter_count])
+ first_conv_stride_x = 2
+ first_conv_stride_y = 2
+ first_conv = tf.nn.conv2d(fingerprint_4d, first_weights,
+ [1, first_conv_stride_y, first_conv_stride_x, 1],
+ 'SAME') + first_bias
+ first_relu = tf.nn.relu(first_conv)
+ if is_training:
+ first_dropout = tf.nn.dropout(first_relu, dropout_prob)
+ else:
+ first_dropout = first_relu
+ first_dropout_shape = first_dropout.get_shape()
+ first_dropout_output_width = first_dropout_shape[2]
+ first_dropout_output_height = first_dropout_shape[1]
+ first_dropout_element_count = int(
+ first_dropout_output_width * first_dropout_output_height *
+ first_filter_count)
+ flattened_first_dropout = tf.reshape(first_dropout,
+ [-1, first_dropout_element_count])
+ label_count = model_settings['label_count']
+ final_fc_weights = tf.get_variable(
+ name='final_fc_weights',
+ initializer=tf.truncated_normal_initializer(stddev=0.01),
+ shape=[first_dropout_element_count, label_count])
+ final_fc_bias = tf.get_variable(
+ name='final_fc_bias',
+ initializer=tf.zeros_initializer,
+ shape=[label_count])
+ final_fc = (
+ tf.matmul(flattened_first_dropout, final_fc_weights) + final_fc_bias)
+ if is_training:
+ return final_fc, dropout_prob
+ else:
+ return final_fc
diff --git a/tensorflow/examples/speech_commands/models_test.py b/tensorflow/examples/speech_commands/models_test.py
index 80c795367f..0c373967ed 100644
--- a/tensorflow/examples/speech_commands/models_test.py
+++ b/tensorflow/examples/speech_commands/models_test.py
@@ -26,12 +26,29 @@ from tensorflow.python.platform import test
class ModelsTest(test.TestCase):
+ def _modelSettings(self):
+ return models.prepare_model_settings(
+ label_count=10,
+ sample_rate=16000,
+ clip_duration_ms=1000,
+ window_size_ms=20,
+ window_stride_ms=10,
+ feature_bin_count=40,
+ preprocess="mfcc")
+
def testPrepareModelSettings(self):
self.assertIsNotNone(
- models.prepare_model_settings(10, 16000, 1000, 20, 10, 40))
+ models.prepare_model_settings(
+ label_count=10,
+ sample_rate=16000,
+ clip_duration_ms=1000,
+ window_size_ms=20,
+ window_stride_ms=10,
+ feature_bin_count=40,
+ preprocess="mfcc"))
def testCreateModelConvTraining(self):
- model_settings = models.prepare_model_settings(10, 16000, 1000, 20, 10, 40)
+ model_settings = self._modelSettings()
with self.test_session() as sess:
fingerprint_input = tf.zeros([1, model_settings["fingerprint_size"]])
logits, dropout_prob = models.create_model(fingerprint_input,
@@ -42,7 +59,7 @@ class ModelsTest(test.TestCase):
self.assertIsNotNone(sess.graph.get_tensor_by_name(dropout_prob.name))
def testCreateModelConvInference(self):
- model_settings = models.prepare_model_settings(10, 16000, 1000, 20, 10, 40)
+ model_settings = self._modelSettings()
with self.test_session() as sess:
fingerprint_input = tf.zeros([1, model_settings["fingerprint_size"]])
logits = models.create_model(fingerprint_input, model_settings, "conv",
@@ -51,7 +68,7 @@ class ModelsTest(test.TestCase):
self.assertIsNotNone(sess.graph.get_tensor_by_name(logits.name))
def testCreateModelLowLatencyConvTraining(self):
- model_settings = models.prepare_model_settings(10, 16000, 1000, 20, 10, 40)
+ model_settings = self._modelSettings()
with self.test_session() as sess:
fingerprint_input = tf.zeros([1, model_settings["fingerprint_size"]])
logits, dropout_prob = models.create_model(
@@ -62,7 +79,7 @@ class ModelsTest(test.TestCase):
self.assertIsNotNone(sess.graph.get_tensor_by_name(dropout_prob.name))
def testCreateModelFullyConnectedTraining(self):
- model_settings = models.prepare_model_settings(10, 16000, 1000, 20, 10, 40)
+ model_settings = self._modelSettings()
with self.test_session() as sess:
fingerprint_input = tf.zeros([1, model_settings["fingerprint_size"]])
logits, dropout_prob = models.create_model(
@@ -73,7 +90,7 @@ class ModelsTest(test.TestCase):
self.assertIsNotNone(sess.graph.get_tensor_by_name(dropout_prob.name))
def testCreateModelBadArchitecture(self):
- model_settings = models.prepare_model_settings(10, 16000, 1000, 20, 10, 40)
+ model_settings = self._modelSettings()
with self.test_session():
fingerprint_input = tf.zeros([1, model_settings["fingerprint_size"]])
with self.assertRaises(Exception) as e:
@@ -81,6 +98,17 @@ class ModelsTest(test.TestCase):
"bad_architecture", True)
self.assertTrue("not recognized" in str(e.exception))
+ def testCreateModelTinyConvTraining(self):
+ model_settings = self._modelSettings()
+ with self.test_session() as sess:
+ fingerprint_input = tf.zeros([1, model_settings["fingerprint_size"]])
+ logits, dropout_prob = models.create_model(
+ fingerprint_input, model_settings, "tiny_conv", True)
+ self.assertIsNotNone(logits)
+ self.assertIsNotNone(dropout_prob)
+ self.assertIsNotNone(sess.graph.get_tensor_by_name(logits.name))
+ self.assertIsNotNone(sess.graph.get_tensor_by_name(dropout_prob.name))
+
if __name__ == "__main__":
test.main()
diff --git a/tensorflow/examples/speech_commands/train.py b/tensorflow/examples/speech_commands/train.py
index fc28eb0631..eca34f8812 100644
--- a/tensorflow/examples/speech_commands/train.py
+++ b/tensorflow/examples/speech_commands/train.py
@@ -98,12 +98,12 @@ def main(_):
model_settings = models.prepare_model_settings(
len(input_data.prepare_words_list(FLAGS.wanted_words.split(','))),
FLAGS.sample_rate, FLAGS.clip_duration_ms, FLAGS.window_size_ms,
- FLAGS.window_stride_ms, FLAGS.dct_coefficient_count)
+ FLAGS.window_stride_ms, FLAGS.feature_bin_count, FLAGS.preprocess)
audio_processor = input_data.AudioProcessor(
- FLAGS.data_url, FLAGS.data_dir, FLAGS.silence_percentage,
- FLAGS.unknown_percentage,
+ FLAGS.data_url, FLAGS.data_dir,
+ FLAGS.silence_percentage, FLAGS.unknown_percentage,
FLAGS.wanted_words.split(','), FLAGS.validation_percentage,
- FLAGS.testing_percentage, model_settings)
+ FLAGS.testing_percentage, model_settings, FLAGS.summaries_dir)
fingerprint_size = model_settings['fingerprint_size']
label_count = model_settings['label_count']
time_shift_samples = int((FLAGS.time_shift_ms * FLAGS.sample_rate) / 1000)
@@ -122,8 +122,25 @@ def main(_):
'lists, but are %d and %d long instead' % (len(training_steps_list),
len(learning_rates_list)))
- fingerprint_input = tf.placeholder(
+ input_placeholder = tf.placeholder(
tf.float32, [None, fingerprint_size], name='fingerprint_input')
+ if FLAGS.quantize:
+ # TODO(petewarden): These values have been derived from the observed ranges
+ # of spectrogram and MFCC inputs. If the preprocessing pipeline changes,
+ # they may need to be updated.
+ if FLAGS.preprocess == 'average':
+ fingerprint_min = 0.0
+ fingerprint_max = 2048.0
+ elif FLAGS.preprocess == 'mfcc':
+ fingerprint_min = -247.0
+ fingerprint_max = 30.0
+ else:
+ raise Exception('Unknown preprocess mode "%s" (should be "mfcc" or'
+ ' "average")' % (FLAGS.preprocess))
+ fingerprint_input = tf.fake_quant_with_min_max_args(
+ input_placeholder, fingerprint_min, fingerprint_max)
+ else:
+ fingerprint_input = input_placeholder
logits, dropout_prob = models.create_model(
fingerprint_input,
@@ -146,7 +163,8 @@ def main(_):
with tf.name_scope('cross_entropy'):
cross_entropy_mean = tf.losses.sparse_softmax_cross_entropy(
labels=ground_truth_input, logits=logits)
- tf.summary.scalar('cross_entropy', cross_entropy_mean)
+ if FLAGS.quantize:
+ tf.contrib.quantize.create_training_graph(quant_delay=0)
with tf.name_scope('train'), tf.control_dependencies(control_dependencies):
learning_rate_input = tf.placeholder(
tf.float32, [], name='learning_rate_input')
@@ -157,7 +175,9 @@ def main(_):
confusion_matrix = tf.confusion_matrix(
ground_truth_input, predicted_indices, num_classes=label_count)
evaluation_step = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
- tf.summary.scalar('accuracy', evaluation_step)
+ with tf.get_default_graph().name_scope('eval'):
+ tf.summary.scalar('cross_entropy', cross_entropy_mean)
+ tf.summary.scalar('accuracy', evaluation_step)
global_step = tf.train.get_or_create_global_step()
increment_global_step = tf.assign(global_step, global_step + 1)
@@ -165,7 +185,7 @@ def main(_):
saver = tf.train.Saver(tf.global_variables())
# Merge all the summaries and write them out to /tmp/retrain_logs (by default)
- merged_summaries = tf.summary.merge_all()
+ merged_summaries = tf.summary.merge_all(scope='eval')
train_writer = tf.summary.FileWriter(FLAGS.summaries_dir + '/train',
sess.graph)
validation_writer = tf.summary.FileWriter(FLAGS.summaries_dir + '/validation')
@@ -207,8 +227,11 @@ def main(_):
# Run the graph with this batch of training data.
train_summary, train_accuracy, cross_entropy_value, _, _ = sess.run(
[
- merged_summaries, evaluation_step, cross_entropy_mean, train_step,
- increment_global_step
+ merged_summaries,
+ evaluation_step,
+ cross_entropy_mean,
+ train_step,
+ increment_global_step,
],
feed_dict={
fingerprint_input: train_fingerprints,
@@ -364,10 +387,11 @@ if __name__ == '__main__':
default=10.0,
help='How far to move in time between spectogram timeslices.',)
parser.add_argument(
- '--dct_coefficient_count',
+ '--feature_bin_count',
type=int,
default=40,
- help='How many bins to use for the MFCC fingerprint',)
+ help='How many bins to use for the MFCC fingerprint',
+ )
parser.add_argument(
'--how_many_training_steps',
type=str,
@@ -423,6 +447,16 @@ if __name__ == '__main__':
type=bool,
default=False,
help='Whether to check for invalid numbers during processing')
+ parser.add_argument(
+ '--quantize',
+ type=bool,
+ default=False,
+ help='Whether to train the model for eight-bit deployment')
+ parser.add_argument(
+ '--preprocess',
+ type=str,
+ default='mfcc',
+ help='Spectrogram processing mode. Can be "mfcc" or "average"')
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
diff --git a/tensorflow/go/attrs_test.go b/tensorflow/go/attrs_test.go
index 35b0cb352e..ea8af221ae 100644
--- a/tensorflow/go/attrs_test.go
+++ b/tensorflow/go/attrs_test.go
@@ -28,7 +28,7 @@ func TestOperationAttrs(t *testing.T) {
i := 0
makeConst := func(v interface{}) Output {
op, err := Const(g, fmt.Sprintf("const/%d/%+v", i, v), v)
- i += 1
+ i++
if err != nil {
t.Fatal(err)
}
@@ -71,6 +71,7 @@ func TestOperationAttrs(t *testing.T) {
"boundaries": []float32(nil),
},
},
+ /* TODO(ashankar): debug this issue and add it back later.
{
Name: "list(type),list(shape)",
Type: "InfeedEnqueueTuple",
@@ -111,6 +112,7 @@ func TestOperationAttrs(t *testing.T) {
"device_ordinal": int64(0),
},
},
+ */
{
Name: "list(int),int",
Type: "StringToHashBucketStrong",
diff --git a/tensorflow/go/graph.go b/tensorflow/go/graph.go
index 08943a527c..32a77550ee 100644
--- a/tensorflow/go/graph.go
+++ b/tensorflow/go/graph.go
@@ -177,7 +177,14 @@ type OpSpec struct {
// being added.
ControlDependencies []*Operation
- // Other possible fields: Device, ColocateWith.
+ // The device on which the operation should be executed.
+ // If omitted, an appropriate device will automatically be selected.
+ //
+ // For example, if set of "/device:GPU:0", then the operation will
+ // execute on GPU #0.
+ Device string
+
+ // Other possible fields: ColocateWith.
}
// AddOperation adds an operation to g.
@@ -225,6 +232,11 @@ func (g *Graph) AddOperation(args OpSpec) (*Operation, error) {
return nil, fmt.Errorf("%v (memory will be leaked)", err)
}
}
+ if len(args.Device) > 0 {
+ cdevice := C.CString(args.Device)
+ C.TF_SetDevice(cdesc, cdevice)
+ C.free(unsafe.Pointer(cdevice))
+ }
c := C.TF_FinishOperation(cdesc, status.c)
if err := status.Err(); err != nil {
return nil, err
diff --git a/tensorflow/go/op/scope.go b/tensorflow/go/op/scope.go
index 13de4294dc..ac39808d83 100644
--- a/tensorflow/go/op/scope.go
+++ b/tensorflow/go/op/scope.go
@@ -37,6 +37,7 @@ type Scope struct {
namemap map[string]int
namespace string
controlDependencies []*tf.Operation
+ device string
err *scopeErr
}
@@ -82,6 +83,7 @@ func (s *Scope) AddOperation(args tf.OpSpec) *tf.Operation {
args.Name = s.namespace + "/" + args.Name
}
args.ControlDependencies = append(args.ControlDependencies, s.controlDependencies...)
+ args.Device = s.device
op, err := s.graph.AddOperation(args)
if err != nil {
s.UpdateErr(args.Type, err)
@@ -98,10 +100,12 @@ func (s *Scope) SubScope(namespace string) *Scope {
namespace = s.namespace + "/" + namespace
}
return &Scope{
- graph: s.graph,
- namemap: make(map[string]int),
- namespace: namespace,
- err: s.err,
+ graph: s.graph,
+ namemap: make(map[string]int),
+ namespace: namespace,
+ controlDependencies: s.controlDependencies,
+ device: s.device,
+ err: s.err,
}
}
@@ -123,6 +127,25 @@ func (s *Scope) WithControlDependencies(ops ...*tf.Operation) *Scope {
namemap: s.namemap,
namespace: s.namespace,
controlDependencies: deps,
+ device: s.device,
+ err: s.err,
+ }
+}
+
+// WithDevice returns a new Scope which will cause all operations added to the
+// graph to execute on devices that match the provided device specification.
+//
+// For example, WithDevice("/device:GPU:0") will cause operations added to
+// the graph to execute on GPU #0.
+//
+// An empty string removes any device restrictions.
+func (s *Scope) WithDevice(device string) *Scope {
+ return &Scope{
+ graph: s.graph,
+ namemap: s.namemap,
+ namespace: s.namespace,
+ controlDependencies: s.controlDependencies,
+ device: device,
err: s.err,
}
}
diff --git a/tensorflow/go/op/scope_test.go b/tensorflow/go/op/scope_test.go
index b58a61de98..be7b0ad892 100644
--- a/tensorflow/go/op/scope_test.go
+++ b/tensorflow/go/op/scope_test.go
@@ -112,6 +112,21 @@ func TestControlDependencies(t *testing.T) {
}
}
+func TestDevice(t *testing.T) {
+ s := NewScope()
+ matrix := Const(s, [][]float32{{3.0}})
+ s = s.WithDevice("/device:GPU:0")
+ square := MatMul(s.SubScope("square"), matrix, matrix)
+ s = s.WithDevice("")
+ cube := MatMul(s.SubScope("cube"), square, matrix)
+ if got, want := square.Op.Device(), "/device:GPU:0"; got != want {
+ t.Errorf("Got %q, want %q", got, want)
+ }
+ if got, want := cube.Op.Device(), ""; got != want {
+ t.Errorf("Got %q, want %q", got, want)
+ }
+}
+
func TestScopeFinalize(t *testing.T) {
var (
root = NewScope()
diff --git a/tensorflow/go/op/wrappers.go b/tensorflow/go/op/wrappers.go
index 7f1f0970a6..f49e1cecaf 100644
--- a/tensorflow/go/op/wrappers.go
+++ b/tensorflow/go/op/wrappers.go
@@ -3045,30 +3045,176 @@ func UnravelIndex(scope *Scope, indices tf.Output, dims tf.Output) (output tf.Ou
return op.Output(0)
}
-// Computes gradients for SparseSegmentSqrtN.
+// Subtracts `v` into specified rows of `x`.
//
-// Returns tensor "output" with same shape as grad, except for dimension 0 whose
-// value is output_dim0.
+// Computes y = x; y[i, :] -= v; return y.
//
// Arguments:
-// grad: gradient propagated to the SparseSegmentSqrtN op.
-// indices: indices passed to the corresponding SparseSegmentSqrtN op.
-// segment_ids: segment_ids passed to the corresponding SparseSegmentSqrtN op.
-// output_dim0: dimension 0 of "data" passed to SparseSegmentSqrtN op.
-func SparseSegmentSqrtNGrad(scope *Scope, grad tf.Output, indices tf.Output, segment_ids tf.Output, output_dim0 tf.Output) (output tf.Output) {
+// x: A `Tensor` of type T.
+// i: A vector. Indices into the left-most dimension of `x`.
+// v: A `Tensor` of type T. Same dimension sizes as x except the first dimension, which must be the same as i's size.
+//
+// Returns A `Tensor` of type T. An alias of `x`. The content of `y` is undefined if there are duplicates in `i`.
+func InplaceSub(scope *Scope, x tf.Output, i tf.Output, v tf.Output) (y tf.Output) {
if scope.Err() != nil {
return
}
opspec := tf.OpSpec{
- Type: "SparseSegmentSqrtNGrad",
+ Type: "InplaceSub",
Input: []tf.Input{
- grad, indices, segment_ids, output_dim0,
+ x, i, v,
},
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
+// Updates specified rows with values in `v`.
+//
+// Computes `x[i, :] = v; return x`.
+//
+// Arguments:
+// x: A tensor of type `T`.
+// i: A vector. Indices into the left-most dimension of `x`.
+// v: A `Tensor` of type T. Same dimension sizes as x except the first dimension, which must be the same as i's size.
+//
+// Returns A `Tensor` of type T. An alias of `x`. The content of `y` is undefined if there are duplicates in `i`.
+func InplaceUpdate(scope *Scope, x tf.Output, i tf.Output, v tf.Output) (y tf.Output) {
+ if scope.Err() != nil {
+ return
+ }
+ opspec := tf.OpSpec{
+ Type: "InplaceUpdate",
+ Input: []tf.Input{
+ x, i, v,
+ },
+ }
+ op := scope.AddOperation(opspec)
+ return op.Output(0)
+}
+
+// Makes a copy of `x`.
+//
+// Arguments:
+// x: The source tensor of type `T`.
+//
+// Returns y: A `Tensor` of type `T`. A copy of `x`. Guaranteed that `y`
+// is not an alias of `x`.
+func DeepCopy(scope *Scope, x tf.Output) (y tf.Output) {
+ if scope.Err() != nil {
+ return
+ }
+ opspec := tf.OpSpec{
+ Type: "DeepCopy",
+ Input: []tf.Input{
+ x,
+ },
+ }
+ op := scope.AddOperation(opspec)
+ return op.Output(0)
+}
+
+// PackAttr is an optional argument to Pack.
+type PackAttr func(optionalAttr)
+
+// PackAxis sets the optional axis attribute to value.
+//
+// value: Dimension along which to pack. Negative values wrap around, so the
+// valid range is `[-(R+1), R+1)`.
+// If not specified, defaults to 0
+func PackAxis(value int64) PackAttr {
+ return func(m optionalAttr) {
+ m["axis"] = value
+ }
+}
+
+// Packs a list of `N` rank-`R` tensors into one rank-`(R+1)` tensor.
+//
+// Packs the `N` tensors in `values` into a tensor with rank one higher than each
+// tensor in `values`, by packing them along the `axis` dimension.
+// Given a list of tensors of shape `(A, B, C)`;
+//
+// if `axis == 0` then the `output` tensor will have the shape `(N, A, B, C)`.
+// if `axis == 1` then the `output` tensor will have the shape `(A, N, B, C)`.
+// Etc.
+//
+// For example:
+//
+// ```
+// # 'x' is [1, 4]
+// # 'y' is [2, 5]
+// # 'z' is [3, 6]
+// pack([x, y, z]) => [[1, 4], [2, 5], [3, 6]] # Pack along first dim.
+// pack([x, y, z], axis=1) => [[1, 2, 3], [4, 5, 6]]
+// ```
+//
+// This is the opposite of `unpack`.
+//
+// Arguments:
+// values: Must be of same shape and type.
+//
+// Returns The packed tensor.
+func Pack(scope *Scope, values []tf.Output, optional ...PackAttr) (output tf.Output) {
+ if scope.Err() != nil {
+ return
+ }
+ attrs := map[string]interface{}{}
+ for _, a := range optional {
+ a(attrs)
+ }
+ opspec := tf.OpSpec{
+ Type: "Pack",
+ Input: []tf.Input{
+ tf.OutputList(values),
+ },
+ Attrs: attrs,
+ }
+ op := scope.AddOperation(opspec)
+ return op.Output(0)
+}
+
+// Concatenates a list of `N` tensors along the first dimension.
+//
+// The input tensors are all required to have size 1 in the first dimension.
+//
+// For example:
+//
+// ```
+// # 'x' is [[1, 4]]
+// # 'y' is [[2, 5]]
+// # 'z' is [[3, 6]]
+// parallel_concat([x, y, z]) => [[1, 4], [2, 5], [3, 6]] # Pack along first dim.
+// ```
+//
+// The difference between concat and parallel_concat is that concat requires all
+// of the inputs be computed before the operation will begin but doesn't require
+// that the input shapes be known during graph construction. Parallel concat
+// will copy pieces of the input into the output as they become available, in
+// some situations this can provide a performance benefit.
+//
+// Arguments:
+// values: Tensors to be concatenated. All must have size 1 in the first dimension
+// and same shape.
+// shape: the final shape of the result; should be equal to the shapes of any input
+// but with the number of input values in the first dimension.
+//
+// Returns The concatenated tensor.
+func ParallelConcat(scope *Scope, values []tf.Output, shape tf.Shape) (output tf.Output) {
+ if scope.Err() != nil {
+ return
+ }
+ attrs := map[string]interface{}{"shape": shape}
+ opspec := tf.OpSpec{
+ Type: "ParallelConcat",
+ Input: []tf.Input{
+ tf.OutputList(values),
+ },
+ Attrs: attrs,
+ }
+ op := scope.AddOperation(opspec)
+ return op.Output(0)
+}
+
// Computes the mean along sparse segments of a tensor.
//
// Read @{$math_ops#Segmentation$the section on segmentation} for an explanation of
@@ -3121,6 +3267,57 @@ func StackPopV2(scope *Scope, handle tf.Output, elem_type tf.DataType) (elem tf.
return op.Output(0)
}
+// Computes the sum along sparse segments of a tensor.
+//
+// Like `SparseSegmentSum`, but allows missing ids in `segment_ids`. If an id is
+// misisng, the `output` tensor at that position will be zeroed.
+//
+// Read @{$math_ops#Segmentation$the section on segmentation} for an explanation of
+// segments.
+//
+// For example:
+//
+// ```python
+// c = tf.constant([[1,2,3,4], [-1,-2,-3,-4], [5,6,7,8]])
+//
+// tf.sparse_segment_sum_with_num_segments(
+// c, tf.constant([0, 1]), tf.constant([0, 0]), num_segments=3)
+// # => [[0 0 0 0]
+// # [0 0 0 0]
+// # [0 0 0 0]]
+//
+// tf.sparse_segment_sum_with_num_segments(c,
+// tf.constant([0, 1]),
+// tf.constant([0, 2],
+// num_segments=4))
+// # => [[ 1 2 3 4]
+// # [ 0 0 0 0]
+// # [-1 -2 -3 -4]
+// # [ 0 0 0 0]]
+// ```
+//
+// Arguments:
+//
+// indices: A 1-D tensor. Has same rank as `segment_ids`.
+// segment_ids: A 1-D tensor. Values should be sorted and can be repeated.
+// num_segments: Should equal the number of distinct segment IDs.
+//
+// Returns Has same shape as data, except for dimension 0 which
+// has size `num_segments`.
+func SparseSegmentSumWithNumSegments(scope *Scope, data tf.Output, indices tf.Output, segment_ids tf.Output, num_segments tf.Output) (output tf.Output) {
+ if scope.Err() != nil {
+ return
+ }
+ opspec := tf.OpSpec{
+ Type: "SparseSegmentSumWithNumSegments",
+ Input: []tf.Input{
+ data, indices, segment_ids, num_segments,
+ },
+ }
+ op := scope.AddOperation(opspec)
+ return op.Output(0)
+}
+
// PreventGradientAttr is an optional argument to PreventGradient.
type PreventGradientAttr func(optionalAttr)
@@ -6071,53 +6268,6 @@ func MutexV2(scope *Scope, optional ...MutexV2Attr) (resource tf.Output) {
return op.Output(0)
}
-// AvgPool3DAttr is an optional argument to AvgPool3D.
-type AvgPool3DAttr func(optionalAttr)
-
-// AvgPool3DDataFormat sets the optional data_format attribute to value.
-//
-// value: The data format of the input and output data. With the
-// default format "NDHWC", the data is stored in the order of:
-// [batch, in_depth, in_height, in_width, in_channels].
-// Alternatively, the format could be "NCDHW", the data storage order is:
-// [batch, in_channels, in_depth, in_height, in_width].
-// If not specified, defaults to "NDHWC"
-func AvgPool3DDataFormat(value string) AvgPool3DAttr {
- return func(m optionalAttr) {
- m["data_format"] = value
- }
-}
-
-// Performs 3D average pooling on the input.
-//
-// Arguments:
-// input: Shape `[batch, depth, rows, cols, channels]` tensor to pool over.
-// ksize: 1-D tensor of length 5. The size of the window for each dimension of
-// the input tensor. Must have `ksize[0] = ksize[4] = 1`.
-// strides: 1-D tensor of length 5. The stride of the sliding window for each
-// dimension of `input`. Must have `strides[0] = strides[4] = 1`.
-// padding: The type of padding algorithm to use.
-//
-// Returns The average pooled output tensor.
-func AvgPool3D(scope *Scope, input tf.Output, ksize []int64, strides []int64, padding string, optional ...AvgPool3DAttr) (output tf.Output) {
- if scope.Err() != nil {
- return
- }
- attrs := map[string]interface{}{"ksize": ksize, "strides": strides, "padding": padding}
- for _, a := range optional {
- a(attrs)
- }
- opspec := tf.OpSpec{
- Type: "AvgPool3D",
- Input: []tf.Input{
- input,
- },
- Attrs: attrs,
- }
- op := scope.AddOperation(opspec)
- return op.Output(0)
-}
-
// Returns element-wise remainder of division. This emulates C semantics in that
//
// the result here is consistent with a truncating divide. E.g.
@@ -7677,6 +7827,124 @@ func AccumulateNV2(scope *Scope, inputs []tf.Output, shape tf.Shape) (sum tf.Out
return op.Output(0)
}
+// RandomShuffleAttr is an optional argument to RandomShuffle.
+type RandomShuffleAttr func(optionalAttr)
+
+// RandomShuffleSeed sets the optional seed attribute to value.
+//
+// value: If either `seed` or `seed2` are set to be non-zero, the random number
+// generator is seeded by the given seed. Otherwise, it is seeded by a
+// random seed.
+// If not specified, defaults to 0
+func RandomShuffleSeed(value int64) RandomShuffleAttr {
+ return func(m optionalAttr) {
+ m["seed"] = value
+ }
+}
+
+// RandomShuffleSeed2 sets the optional seed2 attribute to value.
+//
+// value: A second seed to avoid seed collision.
+// If not specified, defaults to 0
+func RandomShuffleSeed2(value int64) RandomShuffleAttr {
+ return func(m optionalAttr) {
+ m["seed2"] = value
+ }
+}
+
+// Randomly shuffles a tensor along its first dimension.
+//
+// The tensor is shuffled along dimension 0, such that each `value[j]` is mapped
+// to one and only one `output[i]`. For example, a mapping that might occur for a
+// 3x2 tensor is:
+//
+// ```
+// [[1, 2], [[5, 6],
+// [3, 4], ==> [1, 2],
+// [5, 6]] [3, 4]]
+// ```
+//
+// Arguments:
+// value: The tensor to be shuffled.
+//
+// Returns A tensor of same shape and type as `value`, shuffled along its first
+// dimension.
+func RandomShuffle(scope *Scope, value tf.Output, optional ...RandomShuffleAttr) (output tf.Output) {
+ if scope.Err() != nil {
+ return
+ }
+ attrs := map[string]interface{}{}
+ for _, a := range optional {
+ a(attrs)
+ }
+ opspec := tf.OpSpec{
+ Type: "RandomShuffle",
+ Input: []tf.Input{
+ value,
+ },
+ Attrs: attrs,
+ }
+ op := scope.AddOperation(opspec)
+ return op.Output(0)
+}
+
+// OrderedMapIncompleteSizeAttr is an optional argument to OrderedMapIncompleteSize.
+type OrderedMapIncompleteSizeAttr func(optionalAttr)
+
+// OrderedMapIncompleteSizeCapacity sets the optional capacity attribute to value.
+// If not specified, defaults to 0
+//
+// REQUIRES: value >= 0
+func OrderedMapIncompleteSizeCapacity(value int64) OrderedMapIncompleteSizeAttr {
+ return func(m optionalAttr) {
+ m["capacity"] = value
+ }
+}
+
+// OrderedMapIncompleteSizeMemoryLimit sets the optional memory_limit attribute to value.
+// If not specified, defaults to 0
+//
+// REQUIRES: value >= 0
+func OrderedMapIncompleteSizeMemoryLimit(value int64) OrderedMapIncompleteSizeAttr {
+ return func(m optionalAttr) {
+ m["memory_limit"] = value
+ }
+}
+
+// OrderedMapIncompleteSizeContainer sets the optional container attribute to value.
+// If not specified, defaults to ""
+func OrderedMapIncompleteSizeContainer(value string) OrderedMapIncompleteSizeAttr {
+ return func(m optionalAttr) {
+ m["container"] = value
+ }
+}
+
+// OrderedMapIncompleteSizeSharedName sets the optional shared_name attribute to value.
+// If not specified, defaults to ""
+func OrderedMapIncompleteSizeSharedName(value string) OrderedMapIncompleteSizeAttr {
+ return func(m optionalAttr) {
+ m["shared_name"] = value
+ }
+}
+
+// Op returns the number of incomplete elements in the underlying container.
+func OrderedMapIncompleteSize(scope *Scope, dtypes []tf.DataType, optional ...OrderedMapIncompleteSizeAttr) (size tf.Output) {
+ if scope.Err() != nil {
+ return
+ }
+ attrs := map[string]interface{}{"dtypes": dtypes}
+ for _, a := range optional {
+ a(attrs)
+ }
+ opspec := tf.OpSpec{
+ Type: "OrderedMapIncompleteSize",
+
+ Attrs: attrs,
+ }
+ op := scope.AddOperation(opspec)
+ return op.Output(0)
+}
+
// DepthwiseConv2dNativeBackpropFilterAttr is an optional argument to DepthwiseConv2dNativeBackpropFilter.
type DepthwiseConv2dNativeBackpropFilterAttr func(optionalAttr)
@@ -7996,27 +8264,6 @@ func CollectiveBcastSend(scope *Scope, input tf.Output, group_size int64, group_
return op.Output(0)
}
-// Makes a copy of `x`.
-//
-// Arguments:
-// x: The source tensor of type `T`.
-//
-// Returns y: A `Tensor` of type `T`. A copy of `x`. Guaranteed that `y`
-// is not an alias of `x`.
-func DeepCopy(scope *Scope, x tf.Output) (y tf.Output) {
- if scope.Err() != nil {
- return
- }
- opspec := tf.OpSpec{
- Type: "DeepCopy",
- Input: []tf.Input{
- x,
- },
- }
- op := scope.AddOperation(opspec)
- return op.Output(0)
-}
-
// Split a `SparseTensor` into `num_split` tensors along one dimension.
//
// If the `shape[split_dim]` is not an integer multiple of `num_split`. Slices
@@ -11210,7 +11457,7 @@ func SampleDistortedBoundingBoxAspectRatioRange(value []float32) SampleDistorted
// SampleDistortedBoundingBoxAreaRange sets the optional area_range attribute to value.
//
// value: The cropped area of the image must contain a fraction of the
-// supplied image within this range.
+// supplied image within in this range.
// If not specified, defaults to <f:0.05 f:1 >
func SampleDistortedBoundingBoxAreaRange(value []float32) SampleDistortedBoundingBoxAttr {
return func(m optionalAttr) {
@@ -13013,122 +13260,6 @@ func Conv3DBackpropInput(scope *Scope, input tf.Output, filter tf.Output, out_ba
return op.Output(0)
}
-// ResourceApplyProximalAdagradAttr is an optional argument to ResourceApplyProximalAdagrad.
-type ResourceApplyProximalAdagradAttr func(optionalAttr)
-
-// ResourceApplyProximalAdagradUseLocking sets the optional use_locking attribute to value.
-//
-// value: If True, updating of the var and accum tensors will be protected by
-// a lock; otherwise the behavior is undefined, but may exhibit less contention.
-// If not specified, defaults to false
-func ResourceApplyProximalAdagradUseLocking(value bool) ResourceApplyProximalAdagradAttr {
- return func(m optionalAttr) {
- m["use_locking"] = value
- }
-}
-
-// Update '*var' and '*accum' according to FOBOS with Adagrad learning rate.
-//
-// accum += grad * grad
-// prox_v = var - lr * grad * (1 / sqrt(accum))
-// var = sign(prox_v)/(1+lr*l2) * max{|prox_v|-lr*l1,0}
-//
-// Arguments:
-// var_: Should be from a Variable().
-// accum: Should be from a Variable().
-// lr: Scaling factor. Must be a scalar.
-// l1: L1 regularization. Must be a scalar.
-// l2: L2 regularization. Must be a scalar.
-// grad: The gradient.
-//
-// Returns the created operation.
-func ResourceApplyProximalAdagrad(scope *Scope, var_ tf.Output, accum tf.Output, lr tf.Output, l1 tf.Output, l2 tf.Output, grad tf.Output, optional ...ResourceApplyProximalAdagradAttr) (o *tf.Operation) {
- if scope.Err() != nil {
- return
- }
- attrs := map[string]interface{}{}
- for _, a := range optional {
- a(attrs)
- }
- opspec := tf.OpSpec{
- Type: "ResourceApplyProximalAdagrad",
- Input: []tf.Input{
- var_, accum, lr, l1, l2, grad,
- },
- Attrs: attrs,
- }
- return scope.AddOperation(opspec)
-}
-
-// MutableHashTableOfTensorsV2Attr is an optional argument to MutableHashTableOfTensorsV2.
-type MutableHashTableOfTensorsV2Attr func(optionalAttr)
-
-// MutableHashTableOfTensorsV2Container sets the optional container attribute to value.
-//
-// value: If non-empty, this table is placed in the given container.
-// Otherwise, a default container is used.
-// If not specified, defaults to ""
-func MutableHashTableOfTensorsV2Container(value string) MutableHashTableOfTensorsV2Attr {
- return func(m optionalAttr) {
- m["container"] = value
- }
-}
-
-// MutableHashTableOfTensorsV2SharedName sets the optional shared_name attribute to value.
-//
-// value: If non-empty, this table is shared under the given name across
-// multiple sessions.
-// If not specified, defaults to ""
-func MutableHashTableOfTensorsV2SharedName(value string) MutableHashTableOfTensorsV2Attr {
- return func(m optionalAttr) {
- m["shared_name"] = value
- }
-}
-
-// MutableHashTableOfTensorsV2UseNodeNameSharing sets the optional use_node_name_sharing attribute to value.
-// If not specified, defaults to false
-func MutableHashTableOfTensorsV2UseNodeNameSharing(value bool) MutableHashTableOfTensorsV2Attr {
- return func(m optionalAttr) {
- m["use_node_name_sharing"] = value
- }
-}
-
-// MutableHashTableOfTensorsV2ValueShape sets the optional value_shape attribute to value.
-// If not specified, defaults to <>
-func MutableHashTableOfTensorsV2ValueShape(value tf.Shape) MutableHashTableOfTensorsV2Attr {
- return func(m optionalAttr) {
- m["value_shape"] = value
- }
-}
-
-// Creates an empty hash table.
-//
-// This op creates a mutable hash table, specifying the type of its keys and
-// values. Each value must be a vector. Data can be inserted into the table using
-// the insert operations. It does not support the initialization operation.
-//
-// Arguments:
-// key_dtype: Type of the table keys.
-// value_dtype: Type of the table values.
-//
-// Returns Handle to a table.
-func MutableHashTableOfTensorsV2(scope *Scope, key_dtype tf.DataType, value_dtype tf.DataType, optional ...MutableHashTableOfTensorsV2Attr) (table_handle tf.Output) {
- if scope.Err() != nil {
- return
- }
- attrs := map[string]interface{}{"key_dtype": key_dtype, "value_dtype": value_dtype}
- for _, a := range optional {
- a(attrs)
- }
- opspec := tf.OpSpec{
- Type: "MutableHashTableOfTensorsV2",
-
- Attrs: attrs,
- }
- op := scope.AddOperation(opspec)
- return op.Output(0)
-}
-
// Subtracts sparse updates from the variable referenced by `resource`.
//
// This operation computes
@@ -13874,6 +14005,83 @@ func Minimum(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
return op.Output(0)
}
+// MfccAttr is an optional argument to Mfcc.
+type MfccAttr func(optionalAttr)
+
+// MfccUpperFrequencyLimit sets the optional upper_frequency_limit attribute to value.
+//
+// value: The highest frequency to use when calculating the
+// ceptstrum.
+// If not specified, defaults to 4000
+func MfccUpperFrequencyLimit(value float32) MfccAttr {
+ return func(m optionalAttr) {
+ m["upper_frequency_limit"] = value
+ }
+}
+
+// MfccLowerFrequencyLimit sets the optional lower_frequency_limit attribute to value.
+//
+// value: The lowest frequency to use when calculating the
+// ceptstrum.
+// If not specified, defaults to 20
+func MfccLowerFrequencyLimit(value float32) MfccAttr {
+ return func(m optionalAttr) {
+ m["lower_frequency_limit"] = value
+ }
+}
+
+// MfccFilterbankChannelCount sets the optional filterbank_channel_count attribute to value.
+//
+// value: Resolution of the Mel bank used internally.
+// If not specified, defaults to 40
+func MfccFilterbankChannelCount(value int64) MfccAttr {
+ return func(m optionalAttr) {
+ m["filterbank_channel_count"] = value
+ }
+}
+
+// MfccDctCoefficientCount sets the optional dct_coefficient_count attribute to value.
+//
+// value: How many output channels to produce per time slice.
+// If not specified, defaults to 13
+func MfccDctCoefficientCount(value int64) MfccAttr {
+ return func(m optionalAttr) {
+ m["dct_coefficient_count"] = value
+ }
+}
+
+// Transforms a spectrogram into a form that's useful for speech recognition.
+//
+// Mel Frequency Cepstral Coefficients are a way of representing audio data that's
+// been effective as an input feature for machine learning. They are created by
+// taking the spectrum of a spectrogram (a 'cepstrum'), and discarding some of the
+// higher frequencies that are less significant to the human ear. They have a long
+// history in the speech recognition world, and https://en.wikipedia.org/wiki/Mel-frequency_cepstrum
+// is a good resource to learn more.
+//
+// Arguments:
+// spectrogram: Typically produced by the Spectrogram op, with magnitude_squared
+// set to true.
+// sample_rate: How many samples per second the source audio used.
+func Mfcc(scope *Scope, spectrogram tf.Output, sample_rate tf.Output, optional ...MfccAttr) (output tf.Output) {
+ if scope.Err() != nil {
+ return
+ }
+ attrs := map[string]interface{}{}
+ for _, a := range optional {
+ a(attrs)
+ }
+ opspec := tf.OpSpec{
+ Type: "Mfcc",
+ Input: []tf.Input{
+ spectrogram, sample_rate,
+ },
+ Attrs: attrs,
+ }
+ op := scope.AddOperation(opspec)
+ return op.Output(0)
+}
+
// AudioSummaryAttr is an optional argument to AudioSummary.
type AudioSummaryAttr func(optionalAttr)
@@ -14292,65 +14500,6 @@ func TensorArraySplitV2(scope *Scope, handle tf.Output, value tf.Output, lengths
return op.Output(0)
}
-// PackAttr is an optional argument to Pack.
-type PackAttr func(optionalAttr)
-
-// PackAxis sets the optional axis attribute to value.
-//
-// value: Dimension along which to pack. Negative values wrap around, so the
-// valid range is `[-(R+1), R+1)`.
-// If not specified, defaults to 0
-func PackAxis(value int64) PackAttr {
- return func(m optionalAttr) {
- m["axis"] = value
- }
-}
-
-// Packs a list of `N` rank-`R` tensors into one rank-`(R+1)` tensor.
-//
-// Packs the `N` tensors in `values` into a tensor with rank one higher than each
-// tensor in `values`, by packing them along the `axis` dimension.
-// Given a list of tensors of shape `(A, B, C)`;
-//
-// if `axis == 0` then the `output` tensor will have the shape `(N, A, B, C)`.
-// if `axis == 1` then the `output` tensor will have the shape `(A, N, B, C)`.
-// Etc.
-//
-// For example:
-//
-// ```
-// # 'x' is [1, 4]
-// # 'y' is [2, 5]
-// # 'z' is [3, 6]
-// pack([x, y, z]) => [[1, 4], [2, 5], [3, 6]] # Pack along first dim.
-// pack([x, y, z], axis=1) => [[1, 2, 3], [4, 5, 6]]
-// ```
-//
-// This is the opposite of `unpack`.
-//
-// Arguments:
-// values: Must be of same shape and type.
-//
-// Returns The packed tensor.
-func Pack(scope *Scope, values []tf.Output, optional ...PackAttr) (output tf.Output) {
- if scope.Err() != nil {
- return
- }
- attrs := map[string]interface{}{}
- for _, a := range optional {
- a(attrs)
- }
- opspec := tf.OpSpec{
- Type: "Pack",
- Input: []tf.Input{
- tf.OutputList(values),
- },
- Attrs: attrs,
- }
- op := scope.AddOperation(opspec)
- return op.Output(0)
-}
-
// Reorders a SparseTensor into the canonical, row-major ordering.
//
// Note that by convention, all sparse ops preserve the canonical ordering along
@@ -15010,30 +15159,6 @@ func Sigmoid(scope *Scope, x tf.Output) (y tf.Output) {
return op.Output(0)
}
-// Updates specified rows with values in `v`.
-//
-// Computes `x[i, :] = v; return x`.
-//
-// Arguments:
-// x: A tensor of type `T`.
-// i: A vector. Indices into the left-most dimension of `x`.
-// v: A `Tensor` of type T. Same dimension sizes as x except the first dimension, which must be the same as i's size.
-//
-// Returns A `Tensor` of type T. An alias of `x`. The content of `y` is undefined if there are duplicates in `i`.
-func InplaceUpdate(scope *Scope, x tf.Output, i tf.Output, v tf.Output) (y tf.Output) {
- if scope.Err() != nil {
- return
- }
- opspec := tf.OpSpec{
- Type: "InplaceUpdate",
- Input: []tf.Input{
- x, i, v,
- },
- }
- op := scope.AddOperation(opspec)
- return op.Output(0)
-}
-
// FusedBatchNormAttr is an optional argument to FusedBatchNorm.
type FusedBatchNormAttr func(optionalAttr)
@@ -17765,6 +17890,187 @@ func SparseCross(scope *Scope, indices []tf.Output, values []tf.Output, shapes [
return op.Output(0), op.Output(1), op.Output(2)
}
+// ResourceApplyProximalAdagradAttr is an optional argument to ResourceApplyProximalAdagrad.
+type ResourceApplyProximalAdagradAttr func(optionalAttr)
+
+// ResourceApplyProximalAdagradUseLocking sets the optional use_locking attribute to value.
+//
+// value: If True, updating of the var and accum tensors will be protected by
+// a lock; otherwise the behavior is undefined, but may exhibit less contention.
+// If not specified, defaults to false
+func ResourceApplyProximalAdagradUseLocking(value bool) ResourceApplyProximalAdagradAttr {
+ return func(m optionalAttr) {
+ m["use_locking"] = value
+ }
+}
+
+// Update '*var' and '*accum' according to FOBOS with Adagrad learning rate.
+//
+// accum += grad * grad
+// prox_v = var - lr * grad * (1 / sqrt(accum))
+// var = sign(prox_v)/(1+lr*l2) * max{|prox_v|-lr*l1,0}
+//
+// Arguments:
+// var_: Should be from a Variable().
+// accum: Should be from a Variable().
+// lr: Scaling factor. Must be a scalar.
+// l1: L1 regularization. Must be a scalar.
+// l2: L2 regularization. Must be a scalar.
+// grad: The gradient.
+//
+// Returns the created operation.
+func ResourceApplyProximalAdagrad(scope *Scope, var_ tf.Output, accum tf.Output, lr tf.Output, l1 tf.Output, l2 tf.Output, grad tf.Output, optional ...ResourceApplyProximalAdagradAttr) (o *tf.Operation) {
+ if scope.Err() != nil {
+ return
+ }
+ attrs := map[string]interface{}{}
+ for _, a := range optional {
+ a(attrs)
+ }
+ opspec := tf.OpSpec{
+ Type: "ResourceApplyProximalAdagrad",
+ Input: []tf.Input{
+ var_, accum, lr, l1, l2, grad,
+ },
+ Attrs: attrs,
+ }
+ return scope.AddOperation(opspec)
+}
+
+// MutableHashTableOfTensorsV2Attr is an optional argument to MutableHashTableOfTensorsV2.
+type MutableHashTableOfTensorsV2Attr func(optionalAttr)
+
+// MutableHashTableOfTensorsV2Container sets the optional container attribute to value.
+//
+// value: If non-empty, this table is placed in the given container.
+// Otherwise, a default container is used.
+// If not specified, defaults to ""
+func MutableHashTableOfTensorsV2Container(value string) MutableHashTableOfTensorsV2Attr {
+ return func(m optionalAttr) {
+ m["container"] = value
+ }
+}
+
+// MutableHashTableOfTensorsV2SharedName sets the optional shared_name attribute to value.
+//
+// value: If non-empty, this table is shared under the given name across
+// multiple sessions.
+// If not specified, defaults to ""
+func MutableHashTableOfTensorsV2SharedName(value string) MutableHashTableOfTensorsV2Attr {
+ return func(m optionalAttr) {
+ m["shared_name"] = value
+ }
+}
+
+// MutableHashTableOfTensorsV2UseNodeNameSharing sets the optional use_node_name_sharing attribute to value.
+// If not specified, defaults to false
+func MutableHashTableOfTensorsV2UseNodeNameSharing(value bool) MutableHashTableOfTensorsV2Attr {
+ return func(m optionalAttr) {
+ m["use_node_name_sharing"] = value
+ }
+}
+
+// MutableHashTableOfTensorsV2ValueShape sets the optional value_shape attribute to value.
+// If not specified, defaults to <>
+func MutableHashTableOfTensorsV2ValueShape(value tf.Shape) MutableHashTableOfTensorsV2Attr {
+ return func(m optionalAttr) {
+ m["value_shape"] = value
+ }
+}
+
+// Creates an empty hash table.
+//
+// This op creates a mutable hash table, specifying the type of its keys and
+// values. Each value must be a vector. Data can be inserted into the table using
+// the insert operations. It does not support the initialization operation.
+//
+// Arguments:
+// key_dtype: Type of the table keys.
+// value_dtype: Type of the table values.
+//
+// Returns Handle to a table.
+func MutableHashTableOfTensorsV2(scope *Scope, key_dtype tf.DataType, value_dtype tf.DataType, optional ...MutableHashTableOfTensorsV2Attr) (table_handle tf.Output) {
+ if scope.Err() != nil {
+ return
+ }
+ attrs := map[string]interface{}{"key_dtype": key_dtype, "value_dtype": value_dtype}
+ for _, a := range optional {
+ a(attrs)
+ }
+ opspec := tf.OpSpec{
+ Type: "MutableHashTableOfTensorsV2",
+
+ Attrs: attrs,
+ }
+ op := scope.AddOperation(opspec)
+ return op.Output(0)
+}
+
+// Computes the gradient of the sigmoid of `x` wrt its input.
+//
+// Specifically, `grad = dy * y * (1 - y)`, where `y = sigmoid(x)`, and
+// `dy` is the corresponding input gradient.
+func SigmoidGrad(scope *Scope, y tf.Output, dy tf.Output) (z tf.Output) {
+ if scope.Err() != nil {
+ return
+ }
+ opspec := tf.OpSpec{
+ Type: "SigmoidGrad",
+ Input: []tf.Input{
+ y, dy,
+ },
+ }
+ op := scope.AddOperation(opspec)
+ return op.Output(0)
+}
+
+// Convert one or more images from HSV to RGB.
+//
+// Outputs a tensor of the same shape as the `images` tensor, containing the RGB
+// value of the pixels. The output is only well defined if the value in `images`
+// are in `[0,1]`.
+//
+// See `rgb_to_hsv` for a description of the HSV encoding.
+//
+// Arguments:
+// images: 1-D or higher rank. HSV data to convert. Last dimension must be size 3.
+//
+// Returns `images` converted to RGB.
+func HSVToRGB(scope *Scope, images tf.Output) (output tf.Output) {
+ if scope.Err() != nil {
+ return
+ }
+ opspec := tf.OpSpec{
+ Type: "HSVToRGB",
+ Input: []tf.Input{
+ images,
+ },
+ }
+ op := scope.AddOperation(opspec)
+ return op.Output(0)
+}
+
+// Retrieves the tree ensemble resource stamp token, number of trees and growing statistics.
+//
+// Arguments:
+// tree_ensemble_handle: Handle to the tree ensemble.
+//
+// Returns Stamp token of the tree ensemble resource.The number of trees in the tree ensemble resource.The number of trees that were finished successfully.The number of layers we attempted to build (but not necessarily succeeded).Rank size 2 tensor that contains start and end ids of the nodes in the latest
+// layer.
+func BoostedTreesGetEnsembleStates(scope *Scope, tree_ensemble_handle tf.Output) (stamp_token tf.Output, num_trees tf.Output, num_finalized_trees tf.Output, num_attempted_layers tf.Output, last_layer_nodes_range tf.Output) {
+ if scope.Err() != nil {
+ return
+ }
+ opspec := tf.OpSpec{
+ Type: "BoostedTreesGetEnsembleStates",
+ Input: []tf.Input{
+ tree_ensemble_handle,
+ },
+ }
+ op := scope.AddOperation(opspec)
+ return op.Output(0), op.Output(1), op.Output(2), op.Output(3), op.Output(4)
+}
+
// Returns the element-wise min of two SparseTensors.
//
// Assumes the two SparseTensors have the same shape, i.e., no broadcasting.
@@ -17969,9 +18275,8 @@ func SparseFillEmptyRowsGrad(scope *Scope, reverse_index_map tf.Output, grad_val
}
// Computes scaled exponential linear: `scale * alpha * (exp(features) - 1)`
-// if < 0, `scale * features` otherwise.
//
-// Assumes weights to have zero mean and variance 1.0 / fan_in.
+// if < 0, `scale * features` otherwise.
//
// See [Self-Normalizing Neural Networks](https://arxiv.org/abs/1706.02515)
func Selu(scope *Scope, features tf.Output) (activations tf.Output) {
@@ -19204,119 +19509,25 @@ func RandomUniformInt(scope *Scope, shape tf.Output, minval tf.Output, maxval tf
return op.Output(0)
}
-// RandomShuffleAttr is an optional argument to RandomShuffle.
-type RandomShuffleAttr func(optionalAttr)
-
-// RandomShuffleSeed sets the optional seed attribute to value.
-//
-// value: If either `seed` or `seed2` are set to be non-zero, the random number
-// generator is seeded by the given seed. Otherwise, it is seeded by a
-// random seed.
-// If not specified, defaults to 0
-func RandomShuffleSeed(value int64) RandomShuffleAttr {
- return func(m optionalAttr) {
- m["seed"] = value
- }
-}
-
-// RandomShuffleSeed2 sets the optional seed2 attribute to value.
-//
-// value: A second seed to avoid seed collision.
-// If not specified, defaults to 0
-func RandomShuffleSeed2(value int64) RandomShuffleAttr {
- return func(m optionalAttr) {
- m["seed2"] = value
- }
-}
-
-// Randomly shuffles a tensor along its first dimension.
-//
-// The tensor is shuffled along dimension 0, such that each `value[j]` is mapped
-// to one and only one `output[i]`. For example, a mapping that might occur for a
-// 3x2 tensor is:
+// Computes gradients for SparseSegmentSqrtN.
//
-// ```
-// [[1, 2], [[5, 6],
-// [3, 4], ==> [1, 2],
-// [5, 6]] [3, 4]]
-// ```
+// Returns tensor "output" with same shape as grad, except for dimension 0 whose
+// value is output_dim0.
//
// Arguments:
-// value: The tensor to be shuffled.
-//
-// Returns A tensor of same shape and type as `value`, shuffled along its first
-// dimension.
-func RandomShuffle(scope *Scope, value tf.Output, optional ...RandomShuffleAttr) (output tf.Output) {
+// grad: gradient propagated to the SparseSegmentSqrtN op.
+// indices: indices passed to the corresponding SparseSegmentSqrtN op.
+// segment_ids: segment_ids passed to the corresponding SparseSegmentSqrtN op.
+// output_dim0: dimension 0 of "data" passed to SparseSegmentSqrtN op.
+func SparseSegmentSqrtNGrad(scope *Scope, grad tf.Output, indices tf.Output, segment_ids tf.Output, output_dim0 tf.Output) (output tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{}
- for _, a := range optional {
- a(attrs)
- }
opspec := tf.OpSpec{
- Type: "RandomShuffle",
+ Type: "SparseSegmentSqrtNGrad",
Input: []tf.Input{
- value,
+ grad, indices, segment_ids, output_dim0,
},
- Attrs: attrs,
- }
- op := scope.AddOperation(opspec)
- return op.Output(0)
-}
-
-// OrderedMapIncompleteSizeAttr is an optional argument to OrderedMapIncompleteSize.
-type OrderedMapIncompleteSizeAttr func(optionalAttr)
-
-// OrderedMapIncompleteSizeCapacity sets the optional capacity attribute to value.
-// If not specified, defaults to 0
-//
-// REQUIRES: value >= 0
-func OrderedMapIncompleteSizeCapacity(value int64) OrderedMapIncompleteSizeAttr {
- return func(m optionalAttr) {
- m["capacity"] = value
- }
-}
-
-// OrderedMapIncompleteSizeMemoryLimit sets the optional memory_limit attribute to value.
-// If not specified, defaults to 0
-//
-// REQUIRES: value >= 0
-func OrderedMapIncompleteSizeMemoryLimit(value int64) OrderedMapIncompleteSizeAttr {
- return func(m optionalAttr) {
- m["memory_limit"] = value
- }
-}
-
-// OrderedMapIncompleteSizeContainer sets the optional container attribute to value.
-// If not specified, defaults to ""
-func OrderedMapIncompleteSizeContainer(value string) OrderedMapIncompleteSizeAttr {
- return func(m optionalAttr) {
- m["container"] = value
- }
-}
-
-// OrderedMapIncompleteSizeSharedName sets the optional shared_name attribute to value.
-// If not specified, defaults to ""
-func OrderedMapIncompleteSizeSharedName(value string) OrderedMapIncompleteSizeAttr {
- return func(m optionalAttr) {
- m["shared_name"] = value
- }
-}
-
-// Op returns the number of incomplete elements in the underlying container.
-func OrderedMapIncompleteSize(scope *Scope, dtypes []tf.DataType, optional ...OrderedMapIncompleteSizeAttr) (size tf.Output) {
- if scope.Err() != nil {
- return
- }
- attrs := map[string]interface{}{"dtypes": dtypes}
- for _, a := range optional {
- a(attrs)
- }
- opspec := tf.OpSpec{
- Type: "OrderedMapIncompleteSize",
-
- Attrs: attrs,
}
op := scope.AddOperation(opspec)
return op.Output(0)
@@ -20357,83 +20568,6 @@ func QuantizedAdd(scope *Scope, x tf.Output, y tf.Output, min_x tf.Output, max_x
return op.Output(0), op.Output(1), op.Output(2)
}
-// MfccAttr is an optional argument to Mfcc.
-type MfccAttr func(optionalAttr)
-
-// MfccUpperFrequencyLimit sets the optional upper_frequency_limit attribute to value.
-//
-// value: The highest frequency to use when calculating the
-// ceptstrum.
-// If not specified, defaults to 4000
-func MfccUpperFrequencyLimit(value float32) MfccAttr {
- return func(m optionalAttr) {
- m["upper_frequency_limit"] = value
- }
-}
-
-// MfccLowerFrequencyLimit sets the optional lower_frequency_limit attribute to value.
-//
-// value: The lowest frequency to use when calculating the
-// ceptstrum.
-// If not specified, defaults to 20
-func MfccLowerFrequencyLimit(value float32) MfccAttr {
- return func(m optionalAttr) {
- m["lower_frequency_limit"] = value
- }
-}
-
-// MfccFilterbankChannelCount sets the optional filterbank_channel_count attribute to value.
-//
-// value: Resolution of the Mel bank used internally.
-// If not specified, defaults to 40
-func MfccFilterbankChannelCount(value int64) MfccAttr {
- return func(m optionalAttr) {
- m["filterbank_channel_count"] = value
- }
-}
-
-// MfccDctCoefficientCount sets the optional dct_coefficient_count attribute to value.
-//
-// value: How many output channels to produce per time slice.
-// If not specified, defaults to 13
-func MfccDctCoefficientCount(value int64) MfccAttr {
- return func(m optionalAttr) {
- m["dct_coefficient_count"] = value
- }
-}
-
-// Transforms a spectrogram into a form that's useful for speech recognition.
-//
-// Mel Frequency Cepstral Coefficients are a way of representing audio data that's
-// been effective as an input feature for machine learning. They are created by
-// taking the spectrum of a spectrogram (a 'cepstrum'), and discarding some of the
-// higher frequencies that are less significant to the human ear. They have a long
-// history in the speech recognition world, and https://en.wikipedia.org/wiki/Mel-frequency_cepstrum
-// is a good resource to learn more.
-//
-// Arguments:
-// spectrogram: Typically produced by the Spectrogram op, with magnitude_squared
-// set to true.
-// sample_rate: How many samples per second the source audio used.
-func Mfcc(scope *Scope, spectrogram tf.Output, sample_rate tf.Output, optional ...MfccAttr) (output tf.Output) {
- if scope.Err() != nil {
- return
- }
- attrs := map[string]interface{}{}
- for _, a := range optional {
- a(attrs)
- }
- opspec := tf.OpSpec{
- Type: "Mfcc",
- Input: []tf.Input{
- spectrogram, sample_rate,
- },
- Attrs: attrs,
- }
- op := scope.AddOperation(opspec)
- return op.Output(0)
-}
-
// Given a quantized tensor described by (input, input_min, input_max), outputs a
//
// range that covers the actual values present in that tensor. This op is
@@ -21656,7 +21790,7 @@ func ImageSummaryBadColor(value tf.Tensor) ImageSummaryAttr {
// generated sequentially as '*tag*/image/0', '*tag*/image/1', etc.
//
// The `bad_color` argument is the color to use in the generated images for
-// non-finite input values. It is a `uint8` 1-D tensor of length `channels`.
+// non-finite input values. It is a `unit8` 1-D tensor of length `channels`.
// Each element must be in the range `[0, 255]` (It represents the value of a
// pixel in the output image). Non-finite values in the input tensor are
// replaced by this tensor in the output image. The default value is the color
@@ -23914,71 +24048,6 @@ func DecodeGif(scope *Scope, contents tf.Output) (image tf.Output) {
return op.Output(0)
}
-// Computes the gradient of the sigmoid of `x` wrt its input.
-//
-// Specifically, `grad = dy * y * (1 - y)`, where `y = sigmoid(x)`, and
-// `dy` is the corresponding input gradient.
-func SigmoidGrad(scope *Scope, y tf.Output, dy tf.Output) (z tf.Output) {
- if scope.Err() != nil {
- return
- }
- opspec := tf.OpSpec{
- Type: "SigmoidGrad",
- Input: []tf.Input{
- y, dy,
- },
- }
- op := scope.AddOperation(opspec)
- return op.Output(0)
-}
-
-// Convert one or more images from HSV to RGB.
-//
-// Outputs a tensor of the same shape as the `images` tensor, containing the RGB
-// value of the pixels. The output is only well defined if the value in `images`
-// are in `[0,1]`.
-//
-// See `rgb_to_hsv` for a description of the HSV encoding.
-//
-// Arguments:
-// images: 1-D or higher rank. HSV data to convert. Last dimension must be size 3.
-//
-// Returns `images` converted to RGB.
-func HSVToRGB(scope *Scope, images tf.Output) (output tf.Output) {
- if scope.Err() != nil {
- return
- }
- opspec := tf.OpSpec{
- Type: "HSVToRGB",
- Input: []tf.Input{
- images,
- },
- }
- op := scope.AddOperation(opspec)
- return op.Output(0)
-}
-
-// Retrieves the tree ensemble resource stamp token, number of trees and growing statistics.
-//
-// Arguments:
-// tree_ensemble_handle: Handle to the tree ensemble.
-//
-// Returns Stamp token of the tree ensemble resource.The number of trees in the tree ensemble resource.The number of trees that were finished successfully.The number of layers we attempted to build (but not necessarily succeeded).Rank size 2 tensor that contains start and end ids of the nodes in the latest
-// layer.
-func BoostedTreesGetEnsembleStates(scope *Scope, tree_ensemble_handle tf.Output) (stamp_token tf.Output, num_trees tf.Output, num_finalized_trees tf.Output, num_attempted_layers tf.Output, last_layer_nodes_range tf.Output) {
- if scope.Err() != nil {
- return
- }
- opspec := tf.OpSpec{
- Type: "BoostedTreesGetEnsembleStates",
- Input: []tf.Input{
- tree_ensemble_handle,
- },
- }
- op := scope.AddOperation(opspec)
- return op.Output(0), op.Output(1), op.Output(2), op.Output(3), op.Output(4)
-}
-
// Gets the next output from the given iterator.
//
// This operation is a synchronous version IteratorGetNext. It should only be used
@@ -24049,7 +24118,7 @@ func SampleDistortedBoundingBoxV2AspectRatioRange(value []float32) SampleDistort
// SampleDistortedBoundingBoxV2AreaRange sets the optional area_range attribute to value.
//
// value: The cropped area of the image must contain a fraction of the
-// supplied image within this range.
+// supplied image within in this range.
// If not specified, defaults to <f:0.05 f:1 >
func SampleDistortedBoundingBoxV2AreaRange(value []float32) SampleDistortedBoundingBoxV2Attr {
return func(m optionalAttr) {
@@ -24745,7 +24814,8 @@ type DecodeProtoV2Attr func(optionalAttr)
// If not specified, defaults to "local://"
func DecodeProtoV2DescriptorSource(value string) DecodeProtoV2Attr {
return func(m optionalAttr) {
- m["descriptor_source"] = value }
+ m["descriptor_source"] = value
+ }
}
// DecodeProtoV2MessageFormat sets the optional message_format attribute to value.
@@ -25778,57 +25848,6 @@ func CacheDataset(scope *Scope, input_dataset tf.Output, filename tf.Output, out
return op.Output(0)
}
-// Computes the sum along sparse segments of a tensor.
-//
-// Like `SparseSegmentSum`, but allows missing ids in `segment_ids`. If an id is
-// misisng, the `output` tensor at that position will be zeroed.
-//
-// Read @{$math_ops#Segmentation$the section on segmentation} for an explanation of
-// segments.
-//
-// For example:
-//
-// ```python
-// c = tf.constant([[1,2,3,4], [-1,-2,-3,-4], [5,6,7,8]])
-//
-// tf.sparse_segment_sum_with_num_segments(
-// c, tf.constant([0, 1]), tf.constant([0, 0]), num_segments=3)
-// # => [[0 0 0 0]
-// # [0 0 0 0]
-// # [0 0 0 0]]
-//
-// tf.sparse_segment_sum_with_num_segments(c,
-// tf.constant([0, 1]),
-// tf.constant([0, 2],
-// num_segments=4))
-// # => [[ 1 2 3 4]
-// # [ 0 0 0 0]
-// # [-1 -2 -3 -4]
-// # [ 0 0 0 0]]
-// ```
-//
-// Arguments:
-//
-// indices: A 1-D tensor. Has same rank as `segment_ids`.
-// segment_ids: A 1-D tensor. Values should be sorted and can be repeated.
-// num_segments: Should equal the number of distinct segment IDs.
-//
-// Returns Has same shape as data, except for dimension 0 which
-// has size `num_segments`.
-func SparseSegmentSumWithNumSegments(scope *Scope, data tf.Output, indices tf.Output, segment_ids tf.Output, num_segments tf.Output) (output tf.Output) {
- if scope.Err() != nil {
- return
- }
- opspec := tf.OpSpec{
- Type: "SparseSegmentSumWithNumSegments",
- Input: []tf.Input{
- data, indices, segment_ids, num_segments,
- },
- }
- op := scope.AddOperation(opspec)
- return op.Output(0)
-}
-
// Creates a dataset that executes a SQL query and emits rows of the result set.
//
// Arguments:
@@ -26443,6 +26462,53 @@ func Cross(scope *Scope, a tf.Output, b tf.Output) (product tf.Output) {
return op.Output(0)
}
+// AvgPool3DAttr is an optional argument to AvgPool3D.
+type AvgPool3DAttr func(optionalAttr)
+
+// AvgPool3DDataFormat sets the optional data_format attribute to value.
+//
+// value: The data format of the input and output data. With the
+// default format "NDHWC", the data is stored in the order of:
+// [batch, in_depth, in_height, in_width, in_channels].
+// Alternatively, the format could be "NCDHW", the data storage order is:
+// [batch, in_channels, in_depth, in_height, in_width].
+// If not specified, defaults to "NDHWC"
+func AvgPool3DDataFormat(value string) AvgPool3DAttr {
+ return func(m optionalAttr) {
+ m["data_format"] = value
+ }
+}
+
+// Performs 3D average pooling on the input.
+//
+// Arguments:
+// input: Shape `[batch, depth, rows, cols, channels]` tensor to pool over.
+// ksize: 1-D tensor of length 5. The size of the window for each dimension of
+// the input tensor. Must have `ksize[0] = ksize[4] = 1`.
+// strides: 1-D tensor of length 5. The stride of the sliding window for each
+// dimension of `input`. Must have `strides[0] = strides[4] = 1`.
+// padding: The type of padding algorithm to use.
+//
+// Returns The average pooled output tensor.
+func AvgPool3D(scope *Scope, input tf.Output, ksize []int64, strides []int64, padding string, optional ...AvgPool3DAttr) (output tf.Output) {
+ if scope.Err() != nil {
+ return
+ }
+ attrs := map[string]interface{}{"ksize": ksize, "strides": strides, "padding": padding}
+ for _, a := range optional {
+ a(attrs)
+ }
+ opspec := tf.OpSpec{
+ Type: "AvgPool3D",
+ Input: []tf.Input{
+ input,
+ },
+ Attrs: attrs,
+ }
+ op := scope.AddOperation(opspec)
+ return op.Output(0)
+}
+
// Performs a padding as a preprocess during a convolution.
//
// Similar to FusedResizeAndPadConv2d, this op allows for an optimized
@@ -30644,69 +30710,3 @@ func DecodeWav(scope *Scope, contents tf.Output, optional ...DecodeWavAttr) (aud
op := scope.AddOperation(opspec)
return op.Output(0), op.Output(1)
}
-
-// Concatenates a list of `N` tensors along the first dimension.
-//
-// The input tensors are all required to have size 1 in the first dimension.
-//
-// For example:
-//
-// ```
-// # 'x' is [[1, 4]]
-// # 'y' is [[2, 5]]
-// # 'z' is [[3, 6]]
-// parallel_concat([x, y, z]) => [[1, 4], [2, 5], [3, 6]] # Pack along first dim.
-// ```
-//
-// The difference between concat and parallel_concat is that concat requires all
-// of the inputs be computed before the operation will begin but doesn't require
-// that the input shapes be known during graph construction. Parallel concat
-// will copy pieces of the input into the output as they become available, in
-// some situations this can provide a performance benefit.
-//
-// Arguments:
-// values: Tensors to be concatenated. All must have size 1 in the first dimension
-// and same shape.
-// shape: the final shape of the result; should be equal to the shapes of any input
-// but with the number of input values in the first dimension.
-//
-// Returns The concatenated tensor.
-func ParallelConcat(scope *Scope, values []tf.Output, shape tf.Shape) (output tf.Output) {
- if scope.Err() != nil {
- return
- }
- attrs := map[string]interface{}{"shape": shape}
- opspec := tf.OpSpec{
- Type: "ParallelConcat",
- Input: []tf.Input{
- tf.OutputList(values),
- },
- Attrs: attrs,
- }
- op := scope.AddOperation(opspec)
- return op.Output(0)
-}
-
-// Subtracts `v` into specified rows of `x`.
-//
-// Computes y = x; y[i, :] -= v; return y.
-//
-// Arguments:
-// x: A `Tensor` of type T.
-// i: A vector. Indices into the left-most dimension of `x`.
-// v: A `Tensor` of type T. Same dimension sizes as x except the first dimension, which must be the same as i's size.
-//
-// Returns A `Tensor` of type T. An alias of `x`. The content of `y` is undefined if there are duplicates in `i`.
-func InplaceSub(scope *Scope, x tf.Output, i tf.Output, v tf.Output) (y tf.Output) {
- if scope.Err() != nil {
- return
- }
- opspec := tf.OpSpec{
- Type: "InplaceSub",
- Input: []tf.Input{
- x, i, v,
- },
- }
- op := scope.AddOperation(opspec)
- return op.Output(0)
-}
diff --git a/tensorflow/go/operation.go b/tensorflow/go/operation.go
index 25ec718703..d6a37e0a86 100644
--- a/tensorflow/go/operation.go
+++ b/tensorflow/go/operation.go
@@ -45,6 +45,12 @@ func (op *Operation) NumOutputs() int {
return int(C.TF_OperationNumOutputs(op.c))
}
+// Device returns a specification of the device on which this operation
+// will be executed, or the empty string if there is no such specification.
+func (op *Operation) Device() string {
+ return C.GoString(C.TF_OperationDevice(op.c))
+}
+
// OutputListSize returns the size of the list of Outputs that is produced by a
// named output of op.
//
diff --git a/tensorflow/go/operation_test.go b/tensorflow/go/operation_test.go
index 06b65bdfb7..4af9e33ad0 100644
--- a/tensorflow/go/operation_test.go
+++ b/tensorflow/go/operation_test.go
@@ -228,6 +228,29 @@ func TestOperationConsumers(t *testing.T) {
}
}
+func TestOperationDevice(t *testing.T) {
+ graph := NewGraph()
+ v, err := NewTensor(float32(1.0))
+ if err != nil {
+ t.Fatal(err)
+ }
+ op, err := graph.AddOperation(OpSpec{
+ Type: "Const",
+ Name: "Const",
+ Attrs: map[string]interface{}{
+ "dtype": v.DataType(),
+ "value": v,
+ },
+ Device: "/device:GPU:0",
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ if got, want := op.Device(), "/device:GPU:0"; got != want {
+ t.Errorf("Got %q, want %q", got, want)
+ }
+}
+
func forceGC() {
var mem runtime.MemStats
runtime.ReadMemStats(&mem)
diff --git a/tensorflow/java/maven/hadoop/pom.xml b/tensorflow/java/maven/hadoop/pom.xml
index 0642be06fa..7391dfb965 100644
--- a/tensorflow/java/maven/hadoop/pom.xml
+++ b/tensorflow/java/maven/hadoop/pom.xml
@@ -1,12 +1,30 @@
-<project
- xmlns="http://maven.apache.org/POM/4.0.0"
- xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
- xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
- <!-- Placeholder pom which is replaced by TensorFlow ecosystem Hadoop pom during build -->
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
<modelVersion>4.0.0</modelVersion>
- <description>TensorFlow TFRecord InputFormat/OutputFormat for Apache Hadoop</description>
+ <groupId>org.tensorflow</groupId>
<artifactId>hadoop</artifactId>
<packaging>jar</packaging>
+ <version>1.9.0</version>
+ <name>tensorflow-hadoop</name>
+ <url>https://www.tensorflow.org</url>
+ <description>TensorFlow TFRecord InputFormat/OutputFormat for Apache Hadoop</description>
+
+ <properties>
+ <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
+ <maven.compiler.source>1.6</maven.compiler.source>
+ <maven.compiler.target>1.6</maven.compiler.target>
+ <hadoop.version>2.6.0</hadoop.version>
+ <protobuf.version>3.3.1</protobuf.version>
+ <junit.version>4.11</junit.version>
+ </properties>
+
+ <licenses>
+ <license>
+ <name>Apache License Version 2.0</name>
+ <url>http://www.apache.org/licenses/LICENSE-2.0.txt</url>
+ </license>
+ </licenses>
<scm>
<url>https://github.com/tensorflow/ecosystem.git</url>
@@ -14,11 +32,161 @@
<developerConnection>scm:git:https://github.com/tensorflow/ecosystem.git</developerConnection>
</scm>
- <url>https://github.com/tensorflow/ecosystem/</url>
- <parent>
- <groupId>org.tensorflow</groupId>
- <artifactId>parentpom</artifactId>
- <version>1.9.0-rc0</version>
- <relativePath>../</relativePath>
- </parent>
-</project> \ No newline at end of file
+ <build>
+ <pluginManagement>
+ <plugins>
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-gpg-plugin</artifactId>
+ <version>1.5</version>
+ <executions>
+ <execution>
+ <id>sign-artifacts</id>
+ <phase>verify</phase>
+ <goals>
+ <goal>sign</goal>
+ </goals>
+ </execution>
+ </executions>
+ </plugin>
+ </plugins>
+ </pluginManagement>
+ <plugins>
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-source-plugin</artifactId>
+ <version>2.2.1</version>
+ <executions>
+ <execution>
+ <id>attach-sources</id>
+ <goals>
+ <goal>jar-no-fork</goal>
+ </goals>
+ </execution>
+ </executions>
+ </plugin>
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-javadoc-plugin</artifactId>
+ <version>2.9.1</version>
+ <executions>
+ <execution>
+ <id>attach-javadocs</id>
+ <goals>
+ <goal>jar</goal>
+ </goals>
+ </execution>
+ </executions>
+ </plugin>
+ </plugins>
+ </build>
+
+ <dependencies>
+ <dependency>
+ <groupId>org.tensorflow</groupId>
+ <artifactId>proto</artifactId>
+ <version>${project.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.hadoop</groupId>
+ <artifactId>hadoop-common</artifactId>
+ <version>${hadoop.version}</version>
+ <exclusions>
+ <exclusion>
+ <groupId>com.google.protobuf</groupId>
+ <artifactId>protobuf-java</artifactId>
+ </exclusion>
+ </exclusions>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.hadoop</groupId>
+ <artifactId>hadoop-mapreduce-client-core</artifactId>
+ <version>${hadoop.version}</version>
+ <exclusions>
+ <exclusion>
+ <groupId>com.google.protobuf</groupId>
+ <artifactId>protobuf-java</artifactId>
+ </exclusion>
+ </exclusions>
+ </dependency>
+ <dependency>
+ <groupId>com.google.protobuf</groupId>
+ <artifactId>protobuf-java</artifactId>
+ <version>${protobuf.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>junit</groupId>
+ <artifactId>junit</artifactId>
+ <version>${junit.version}</version>
+ <scope>test</scope>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.hadoop</groupId>
+ <artifactId>hadoop-mapreduce-client-jobclient</artifactId>
+ <version>${hadoop.version}</version>
+ <type>test-jar</type>
+ <optional>true</optional>
+ <scope>test</scope>
+ <exclusions>
+ <exclusion>
+ <groupId>com.google.protobuf</groupId>
+ <artifactId>protobuf-java</artifactId>
+ </exclusion>
+ </exclusions>
+ </dependency>
+ </dependencies>
+
+ <!-- Two profiles are used:
+ ossrh - deploys to ossrh/maven central
+ bintray - deploys to bintray/jcenter. -->
+ <profiles>
+ <profile>
+ <id>ossrh</id>
+ <distributionManagement>
+ <!-- Sonatype requirements from http://central.sonatype.org/pages/apache-maven.html -->
+ <snapshotRepository>
+ <id>ossrh</id>
+ <url>https://oss.sonatype.org/content/repositories/snapshots</url>
+ </snapshotRepository>
+ <repository>
+ <id>ossrh</id>
+ <url>https://oss.sonatype.org/service/local/staging/deploy/maven2/</url>
+ </repository>
+ </distributionManagement>
+ <build>
+ <plugins>
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-gpg-plugin</artifactId>
+ </plugin>
+ </plugins>
+ </build>
+ </profile>
+ <profile>
+ <id>bintray</id>
+ <distributionManagement>
+ <!-- https://blog.bintray.com/2015/09/17/publishing-your-maven-project-to-bintray/ -->
+ <repository>
+ <id>bintray</id>
+ <url>https://api.bintray.com/maven/google/tensorflow/tensorflow/;publish=0</url>
+ </repository>
+ </distributionManagement>
+ <build>
+ <plugins>
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-gpg-plugin</artifactId>
+ </plugin>
+ </plugins>
+ </build>
+ </profile>
+ </profiles>
+
+ <developers>
+ <developer>
+ <name>TensorFlowers</name>
+ <organization>TensorFlow</organization>
+ <organizationUrl>http://www.tensorflow.org</organizationUrl>
+ </developer>
+ </developers>
+</project>
diff --git a/tensorflow/java/maven/libtensorflow/pom.xml b/tensorflow/java/maven/libtensorflow/pom.xml
index a7fa9ea5cc..d44bdf8f81 100644
--- a/tensorflow/java/maven/libtensorflow/pom.xml
+++ b/tensorflow/java/maven/libtensorflow/pom.xml
@@ -6,7 +6,7 @@
<parent>
<groupId>org.tensorflow</groupId>
<artifactId>parentpom</artifactId>
- <version>1.9.0-rc1</version>
+ <version>1.9.0</version>
<relativePath>../</relativePath>
</parent>
<artifactId>libtensorflow</artifactId>
diff --git a/tensorflow/java/maven/libtensorflow_jni/pom.xml b/tensorflow/java/maven/libtensorflow_jni/pom.xml
index 83aae29f1e..e8925c6fb1 100644
--- a/tensorflow/java/maven/libtensorflow_jni/pom.xml
+++ b/tensorflow/java/maven/libtensorflow_jni/pom.xml
@@ -6,7 +6,7 @@
<parent>
<groupId>org.tensorflow</groupId>
<artifactId>parentpom</artifactId>
- <version>1.9.0-rc1</version>
+ <version>1.9.0</version>
<relativePath>../</relativePath>
</parent>
<artifactId>libtensorflow_jni</artifactId>
diff --git a/tensorflow/java/maven/libtensorflow_jni_gpu/pom.xml b/tensorflow/java/maven/libtensorflow_jni_gpu/pom.xml
index 50bd8ee5f9..3bf4a2590c 100644
--- a/tensorflow/java/maven/libtensorflow_jni_gpu/pom.xml
+++ b/tensorflow/java/maven/libtensorflow_jni_gpu/pom.xml
@@ -6,7 +6,7 @@
<parent>
<groupId>org.tensorflow</groupId>
<artifactId>parentpom</artifactId>
- <version>1.9.0-rc1</version>
+ <version>1.9.0</version>
<relativePath>../</relativePath>
</parent>
<artifactId>libtensorflow_jni_gpu</artifactId>
diff --git a/tensorflow/java/maven/pom.xml b/tensorflow/java/maven/pom.xml
index b4746794ea..b96dcf2888 100644
--- a/tensorflow/java/maven/pom.xml
+++ b/tensorflow/java/maven/pom.xml
@@ -6,7 +6,7 @@
<modelVersion>4.0.0</modelVersion>
<groupId>org.tensorflow</groupId>
<artifactId>parentpom</artifactId>
- <version>1.9.0-rc1</version>
+ <version>1.9.0</version>
<packaging>pom</packaging>
<url>https://www.tensorflow.org</url>
diff --git a/tensorflow/java/maven/proto/pom.xml b/tensorflow/java/maven/proto/pom.xml
index 618a2a124c..5581d864d7 100644
--- a/tensorflow/java/maven/proto/pom.xml
+++ b/tensorflow/java/maven/proto/pom.xml
@@ -6,7 +6,7 @@
<parent>
<groupId>org.tensorflow</groupId>
<artifactId>parentpom</artifactId>
- <version>1.9.0-rc1</version>
+ <version>1.9.0</version>
<relativePath>../</relativePath>
</parent>
<artifactId>proto</artifactId>
diff --git a/tensorflow/java/maven/run_inside_container.sh b/tensorflow/java/maven/run_inside_container.sh
index 2e771064e4..2240d6b7b9 100644
--- a/tensorflow/java/maven/run_inside_container.sh
+++ b/tensorflow/java/maven/run_inside_container.sh
@@ -203,7 +203,10 @@ download_tf_ecosystem() {
cd "${ECOSYSTEM_DIR}"
git clone "${TF_ECOSYSTEM_URL}"
cd ecosystem
- git checkout r${TF_VERSION}
+ # TF_VERSION is a semver string (<major>.<minor>.<patch>[-suffix])
+ # but the branch is just (r<major>.<minor>).
+ RELEASE_BRANCH=$(echo "${TF_VERSION}" | sed -e 's/\([0-9]\+\.[0-9]\+\)\.[0-9]\+.*/\1/')
+ git checkout r${RELEASE_BRANCH}
# Copy the TensorFlow Hadoop source
cp -r "${ECOSYSTEM_DIR}/ecosystem/hadoop/src" "${HADOOP_DIR}"
diff --git a/tensorflow/java/maven/spark-connector/pom.xml b/tensorflow/java/maven/spark-connector/pom.xml
index 19c752d08b..64956be02c 100644
--- a/tensorflow/java/maven/spark-connector/pom.xml
+++ b/tensorflow/java/maven/spark-connector/pom.xml
@@ -1,12 +1,23 @@
-<project
- xmlns="http://maven.apache.org/POM/4.0.0"
- xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
- xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
- <!-- Placeholder pom which is replaced by TensorFlow ecosystem Spark pom during build -->
+<?xml version="1.0" encoding="UTF-8"?>
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
- <description>TensorFlow TFRecord connector for Apache Spark DataFrames</description>
- <artifactId>spark-connector</artifactId>
+ <groupId>org.tensorflow</groupId>
+ <artifactId>spark-connector_2.11</artifactId>
<packaging>jar</packaging>
+ <version>1.9.0</version>
+ <name>spark-tensorflow-connector</name>
+ <url>https://www.tensorflow.org</url>
+ <description>TensorFlow TFRecord connector for Apache Spark DataFrames</description>
+
+ <licenses>
+ <license>
+ <name>The Apache Software License, Version 2.0</name>
+ <url>http://www.apache.org/licenses/LICENSE-2.0.txt</url>
+ <distribution>repo</distribution>
+ </license>
+ </licenses>
<scm>
<url>https://github.com/tensorflow/ecosystem.git</url>
@@ -14,11 +25,325 @@
<developerConnection>scm:git:https://github.com/tensorflow/ecosystem.git</developerConnection>
</scm>
- <url>https://github.com/tensorflow/ecosystem/</url>
- <parent>
- <groupId>org.tensorflow</groupId>
- <artifactId>parentpom</artifactId>
- <version>1.9.0-rc0</version>
- <relativePath>../</relativePath>
- </parent>
-</project> \ No newline at end of file
+ <properties>
+ <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
+ <scala.maven.version>3.2.2</scala.maven.version>
+ <scala.binary.version>2.11</scala.binary.version>
+ <scalatest.maven.version>1.0</scalatest.maven.version>
+ <scala.test.version>2.2.6</scala.test.version>
+ <maven.compiler.version>3.0</maven.compiler.version>
+ <java.version>1.8</java.version>
+ <spark.version>2.3.0</spark.version>
+ <yarn.api.version>2.7.3</yarn.api.version>
+ <junit.version>4.11</junit.version>
+ </properties>
+
+ <build>
+ <pluginManagement>
+ <plugins>
+ <plugin>
+ <inherited>true</inherited>
+ <groupId>net.alchim31.maven</groupId>
+ <artifactId>scala-maven-plugin</artifactId>
+ <version>${scala.maven.version}</version>
+ <executions>
+ <execution>
+ <id>compile</id>
+ <goals>
+ <goal>add-source</goal>
+ <goal>compile</goal>
+ </goals>
+ <configuration>
+ <jvmArgs>
+ <jvmArg>-Xms256m</jvmArg>
+ <jvmArg>-Xmx512m</jvmArg>
+ </jvmArgs>
+ <args>
+ <arg>-g:vars</arg>
+ <arg>-deprecation</arg>
+ <arg>-feature</arg>
+ <arg>-unchecked</arg>
+ <arg>-Xfatal-warnings</arg>
+ <arg>-language:implicitConversions</arg>
+ <arg>-language:existentials</arg>
+ </args>
+ </configuration>
+ </execution>
+ <execution>
+ <id>test</id>
+ <goals>
+ <goal>add-source</goal>
+ <goal>testCompile</goal>
+ </goals>
+ </execution>
+ <execution>
+ <id>attach-javadocs</id>
+ <goals>
+ <goal>doc-jar</goal>
+ </goals>
+ </execution>
+ </executions>
+ <configuration>
+ <recompileMode>incremental</recompileMode>
+ <useZincServer>true</useZincServer>
+ <scalaVersion>${scala.binary.version}</scalaVersion>
+ <checkMultipleScalaVersions>false</checkMultipleScalaVersions>
+ </configuration>
+ </plugin>
+ <plugin>
+ <inherited>true</inherited>
+ <groupId>org.scalatest</groupId>
+ <artifactId>scalatest-maven-plugin</artifactId>
+ <version>${scalatest.maven.version}</version>
+ <executions>
+ <execution>
+ <id>scalaTest</id>
+ <phase>test</phase>
+ <goals>
+ <goal>test</goal>
+ </goals>
+ </execution>
+ </executions>
+ </plugin>
+ <!-- Shade protobuf dependency. -->
+ <plugin>
+ <artifactId>maven-shade-plugin</artifactId>
+ <version>3.1.0</version>
+ <executions>
+ <execution>
+ <phase>package</phase>
+ <goals>
+ <goal>shade</goal>
+ </goals>
+ <configuration>
+ <minimizeJar>true</minimizeJar>
+ <artifactSet>
+ <includes>
+ <include>com.google.protobuf:protobuf-java</include>
+ <include>org.tensorflow:hadoop</include>
+ <include>org.tensorflow:proto</include>
+ </includes>
+ </artifactSet>
+ <filters>
+ <filter>
+ <!-- Remove the source to keep the result smaller. -->
+ <artifact>com.google.protobuf:protobuf-java</artifact>
+ <excludes>
+ <exclude>**/*.java</exclude>
+ </excludes>
+ </filter>
+ </filters>
+ <relocations>
+ <relocation>
+ <pattern>com.google.protobuf</pattern>
+ <shadedPattern>
+ org.tensorflow.spark.shaded.com.google.protobuf
+ </shadedPattern>
+ </relocation>
+ </relocations>
+ </configuration>
+ </execution>
+ </executions>
+ </plugin>
+ <!-- GPG signed components: http://central.sonatype.org/pages/apache-maven.html#gpg-signed-components -->
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-gpg-plugin</artifactId>
+ <version>1.5</version>
+ <executions>
+ <execution>
+ <id>sign-artifacts</id>
+ <phase>verify</phase>
+ <goals>
+ <goal>sign</goal>
+ </goals>
+ </execution>
+ </executions>
+ </plugin>
+ </plugins>
+ </pluginManagement>
+ <plugins>
+ <plugin>
+ <groupId>net.alchim31.maven</groupId>
+ <artifactId>scala-maven-plugin</artifactId>
+ </plugin>
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-shade-plugin</artifactId>
+ </plugin>
+ <plugin>
+ <groupId>org.scalatest</groupId>
+ <artifactId>scalatest-maven-plugin</artifactId>
+ </plugin>
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-compiler-plugin</artifactId>
+ <version>${maven.compiler.version}</version>
+ <configuration>
+ <source>${java.version}</source>
+ <target>${java.version}</target>
+ </configuration>
+ </plugin>
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-source-plugin</artifactId>
+ <version>2.2.1</version>
+ <executions>
+ <execution>
+ <id>attach-sources</id>
+ <goals>
+ <goal>jar-no-fork</goal>
+ </goals>
+ </execution>
+ </executions>
+ </plugin>
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-javadoc-plugin</artifactId>
+ <version>2.9.1</version>
+ <executions>
+ <execution>
+ <id>attach-javadocs</id>
+ <goals>
+ <goal>jar</goal>
+ </goals>
+ </execution>
+ </executions>
+ </plugin>
+ </plugins>
+ </build>
+
+ <profiles>
+ <profile>
+ <id>test</id>
+ <activation>
+ <activeByDefault>true</activeByDefault>
+ <property>
+ <name>!NEVERSETME</name>
+ </property>
+ </activation>
+ <build>
+ <plugins>
+ <plugin>
+ <groupId>net.alchim31.maven</groupId>
+ <artifactId>scala-maven-plugin</artifactId>
+ </plugin>
+ </plugins>
+ </build>
+ <dependencyManagement>
+ <dependencies>
+ <dependency>
+ <groupId>org.scalatest</groupId>
+ <artifactId>scalatest_${scala.binary.version}</artifactId>
+ <version>${scala.test.version}</version>
+ <scope>test</scope>
+ </dependency>
+ </dependencies>
+ </dependencyManagement>
+ <dependencies>
+ <dependency>
+ <groupId>org.scalatest</groupId>
+ <artifactId>scalatest_${scala.binary.version}</artifactId>
+ <scope>test</scope>
+ </dependency>
+ </dependencies>
+ </profile>
+
+ <!-- Two profiles are used:
+ ossrh - deploys to ossrh/maven central
+ bintray - deploys to bintray/jcenter. -->
+ <profile>
+ <id>ossrh</id>
+ <distributionManagement>
+ <!-- Sonatype requirements from http://central.sonatype.org/pages/apache-maven.html -->
+ <snapshotRepository>
+ <id>ossrh</id>
+ <url>https://oss.sonatype.org/content/repositories/snapshots</url>
+ </snapshotRepository>
+ <repository>
+ <id>ossrh</id>
+ <url>https://oss.sonatype.org/service/local/staging/deploy/maven2/</url>
+ </repository>
+ </distributionManagement>
+ <build>
+ <plugins>
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-gpg-plugin</artifactId>
+ </plugin>
+ </plugins>
+ </build>
+ </profile>
+ <profile>
+ <id>bintray</id>
+ <distributionManagement>
+ <!-- https://blog.bintray.com/2015/09/17/publishing-your-maven-project-to-bintray/ -->
+ <repository>
+ <id>bintray</id>
+ <url>https://api.bintray.com/maven/google/tensorflow/tensorflow/;publish=0</url>
+ </repository>
+ </distributionManagement>
+ <build>
+ <plugins>
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-gpg-plugin</artifactId>
+ </plugin>
+ </plugins>
+ </build>
+ </profile>
+ </profiles>
+
+ <developers>
+ <developer>
+ <name>TensorFlowers</name>
+ <organization>TensorFlow</organization>
+ <organizationUrl>http://www.tensorflow.org</organizationUrl>
+ </developer>
+ </developers>
+
+ <dependencies>
+ <dependency>
+ <groupId>org.tensorflow</groupId>
+ <artifactId>hadoop</artifactId>
+ <version>${project.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.spark</groupId>
+ <artifactId>spark-core_${scala.binary.version}</artifactId>
+ <version>${spark.version}</version>
+ <scope>provided</scope>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.spark</groupId>
+ <artifactId>spark-sql_${scala.binary.version}</artifactId>
+ <version>${spark.version}</version>
+ <scope>provided</scope>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.spark</groupId>
+ <artifactId>spark-mllib_${scala.binary.version}</artifactId>
+ <version>${spark.version}</version>
+ <scope>provided</scope>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.hadoop</groupId>
+ <artifactId>hadoop-yarn-api</artifactId>
+ <version>${yarn.api.version}</version>
+ <scope>provided</scope>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.spark</groupId>
+ <artifactId>spark-mllib_${scala.binary.version}</artifactId>
+ <version>${spark.version}</version>
+ <type>test-jar</type>
+ <scope>test</scope>
+ </dependency>
+ <dependency>
+ <groupId>junit</groupId>
+ <artifactId>junit</artifactId>
+ <version>${junit.version}</version>
+ <scope>test</scope>
+ </dependency>
+ </dependencies>
+</project>
diff --git a/tensorflow/java/maven/tensorflow/pom.xml b/tensorflow/java/maven/tensorflow/pom.xml
index 157c4b8e82..92e15aa2c7 100644
--- a/tensorflow/java/maven/tensorflow/pom.xml
+++ b/tensorflow/java/maven/tensorflow/pom.xml
@@ -6,7 +6,7 @@
<parent>
<groupId>org.tensorflow</groupId>
<artifactId>parentpom</artifactId>
- <version>1.9.0-rc1</version>
+ <version>1.9.0</version>
<relativePath>../</relativePath>
</parent>
<artifactId>tensorflow</artifactId>
diff --git a/tensorflow/java/src/gen/cc/java_defs.h b/tensorflow/java/src/gen/cc/java_defs.h
index f5f54bf4d3..d9d6f8adc8 100644
--- a/tensorflow/java/src/gen/cc/java_defs.h
+++ b/tensorflow/java/src/gen/cc/java_defs.h
@@ -16,9 +16,9 @@ limitations under the License.
#ifndef TENSORFLOW_JAVA_SRC_GEN_CC_JAVA_DEFS_H_
#define TENSORFLOW_JAVA_SRC_GEN_CC_JAVA_DEFS_H_
-#include <string>
#include <list>
#include <map>
+#include <string>
#include <utility>
namespace tensorflow {
diff --git a/tensorflow/java/src/gen/cc/op_generator.cc b/tensorflow/java/src/gen/cc/op_generator.cc
index 2df69ee299..d5bd99bdd9 100644
--- a/tensorflow/java/src/gen/cc/op_generator.cc
+++ b/tensorflow/java/src/gen/cc/op_generator.cc
@@ -36,20 +36,21 @@ namespace java {
namespace {
constexpr const char kLicense[] =
- "/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n"
- "\n"
- "Licensed under the Apache License, Version 2.0 (the \"License\");\n"
- "you may not use this file except in compliance with the License.\n"
- "You may obtain a copy of the License at\n"
- "\n"
- " http://www.apache.org/licenses/LICENSE-2.0\n"
- "\n"
- "Unless required by applicable law or agreed to in writing, software\n"
- "distributed under the License is distributed on an \"AS IS\" BASIS,\n"
- "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n"
- "See the License for the specific language governing permissions and\n"
- "limitations under the License.\n"
- "=======================================================================*/\n";
+ "/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n"
+ "\n"
+ "Licensed under the Apache License, Version 2.0 (the \"License\");\n"
+ "you may not use this file except in compliance with the License.\n"
+ "You may obtain a copy of the License at\n"
+ "\n"
+ " http://www.apache.org/licenses/LICENSE-2.0\n"
+ "\n"
+ "Unless required by applicable law or agreed to in writing, software\n"
+ "distributed under the License is distributed on an \"AS IS\" BASIS,\n"
+ "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n"
+ "See the License for the specific language governing permissions and\n"
+ "limitations under the License.\n"
+ "=======================================================================*/"
+ "\n";
// There is three different modes to render an op class, depending on the
// number and type of outputs it has:
diff --git a/tensorflow/java/src/gen/cc/op_generator.h b/tensorflow/java/src/gen/cc/op_generator.h
index 759d800ecf..05decd6b54 100644
--- a/tensorflow/java/src/gen/cc/op_generator.h
+++ b/tensorflow/java/src/gen/cc/op_generator.h
@@ -19,10 +19,10 @@ limitations under the License.
#include <string>
#include <vector>
-#include "tensorflow/core/framework/op_def.pb.h"
#include "tensorflow/core/framework/api_def.pb.h"
#include "tensorflow/core/framework/op_def.pb.h"
#include "tensorflow/core/lib/core/status.h"
+#include "tensorflow/core/platform/env.h"
#include "tensorflow/java/src/gen/cc/op_specs.h"
namespace tensorflow {
diff --git a/tensorflow/java/src/gen/cc/op_specs.cc b/tensorflow/java/src/gen/cc/op_specs.cc
index 63e99fbb04..941ab2699c 100644
--- a/tensorflow/java/src/gen/cc/op_specs.cc
+++ b/tensorflow/java/src/gen/cc/op_specs.cc
@@ -14,9 +14,9 @@ limitations under the License.
==============================================================================*/
#include <map>
-#include <vector>
#include <string>
#include <utility>
+#include <vector>
#include "re2/re2.h"
#include "tensorflow/core/framework/op.h"
@@ -50,7 +50,7 @@ class TypeResolver {
// For example, if the argument's datatype is DT_STRING, this method will
// return "java.lang.String", so the argument can become "Operand<String>"
// in the Ops API
- Type TypeOf(const OpDef_ArgDef& arg_def, bool *iterable_out);
+ Type TypeOf(const OpDef_ArgDef& arg_def, bool* iterable_out);
// Returns types of an input attribute
//
@@ -62,7 +62,7 @@ class TypeResolver {
// <java.lang.Float, float>, so the attribute can be used as a "Float" object
// in the Ops API and casted to a "float" when passing through the JNI layer.
std::pair<Type, Type> TypesOf(const OpDef_AttrDef& attr_def,
- bool *iterable_out);
+ bool* iterable_out);
// Returns true if the type of this attribute has already been resolved
bool IsAttributeVisited(const string& attr_name) {
@@ -89,8 +89,7 @@ class TypeResolver {
}
};
-Type TypeResolver::TypeOf(const OpDef_ArgDef& arg_def,
- bool* iterable_out) {
+Type TypeResolver::TypeOf(const OpDef_ArgDef& arg_def, bool* iterable_out) {
*iterable_out = false;
if (!arg_def.number_attr().empty()) {
// when number_attr is set, argument has to be a list of tensors
@@ -154,13 +153,13 @@ Type TypeResolver::TypeOf(const OpDef_ArgDef& arg_def,
} else {
LOG(FATAL) << "Cannot resolve data type of argument \"" << arg_def.name()
- << "\" in operation \"" << op_def_.name() << "\"";
+ << "\" in operation \"" << op_def_.name() << "\"";
}
return type;
}
std::pair<Type, Type> TypeResolver::TypesOf(const OpDef_AttrDef& attr_def,
- bool* iterable_out) {
+ bool* iterable_out) {
std::pair<Type, Type> types = MakeTypePair(Type::Wildcard());
*iterable_out = false;
StringPiece attr_type = attr_def.type();
@@ -185,7 +184,7 @@ std::pair<Type, Type> TypeResolver::TypesOf(const OpDef_AttrDef& attr_def,
} else if (attr_type == "tensor") {
types = MakeTypePair(Type::Class("Tensor", "org.tensorflow")
- .add_parameter(Type::Wildcard()));
+ .add_parameter(Type::Wildcard()));
} else if (attr_type == "type") {
Type type = *iterable_out ? Type::Wildcard() : NextGeneric();
@@ -196,7 +195,7 @@ std::pair<Type, Type> TypeResolver::TypesOf(const OpDef_AttrDef& attr_def,
} else {
LOG(FATAL) << "Cannot resolve data type for attribute \"" << attr_type
- << "\" in operation \"" << op_def_.name() << "\"";
+ << "\" in operation \"" << op_def_.name() << "\"";
}
visited_attrs_.insert(std::make_pair(attr_def.name(), types.first));
return types;
@@ -219,47 +218,43 @@ string SnakeToCamelCase(const string& str, bool upper = false) {
return result;
}
-bool FindAndCut(re2::StringPiece* input, const RE2& expr,
- re2::StringPiece* before_match, re2::StringPiece* ret_match = nullptr) {
- re2::StringPiece match;
- if (!expr.Match(*input, 0, input->size(), RE2::UNANCHORED, &match, 1)) {
- return false;
- }
- before_match->set(input->data(), match.begin() - input->begin());
- input->remove_prefix(match.end() - before_match->begin());
- if (ret_match != nullptr) {
- *ret_match = match;
- }
+bool FindAndCut(string* input, const RE2& expr, string* before_match,
+ string* ret_match = nullptr) {
+ string match;
+ if (!RE2::PartialMatch(*input, expr, &match)) return false;
+ *before_match = input->substr(0, input->find(match));
+ *input = input->substr(before_match->size() + match.size());
+ if (ret_match != nullptr) *ret_match = match;
return true;
}
-string ParseDocumentation(re2::StringPiece input) {
+string ParseDocumentation(const string& inp) {
std::stringstream javadoc_text;
// TODO(karllessard) This is a very minimalist utility method for converting
// markdown syntax, as found in ops descriptions, to Javadoc/html tags. Check
// for alternatives to increase the level of support for markups.
std::vector<string> markups_subexpr;
- markups_subexpr.push_back("\n+\\*\\s+"); // lists
- markups_subexpr.push_back("\n{2,}"); // paragraphs
+ markups_subexpr.push_back("\n+\\*\\s+"); // lists
+ markups_subexpr.push_back("\n{2,}"); // paragraphs
markups_subexpr.push_back("`{3,}\\s*[^\\s\n]*\\s*\n"); // code blocks
- markups_subexpr.push_back("`+"); // inlined code and code blocks
+ markups_subexpr.push_back("`+"); // inlined code and code blocks
markups_subexpr.push_back("\\*{1,2}\\b"); // text emphasis
- markups_subexpr.push_back("\\["); // hyperlinks
- const RE2 markup_expr(str_util::Join(markups_subexpr, "|"));
+ markups_subexpr.push_back("\\["); // hyperlinks
+ const RE2 markup_expr("(" + str_util::Join(markups_subexpr, "|") + ")");
bool in_list = false;
+ string input = inp;
while (true) {
- re2::StringPiece text;
- re2::StringPiece markup;
+ string text, markup;
if (!FindAndCut(&input, markup_expr, &text, &markup)) {
javadoc_text << input;
break; // end of loop
}
javadoc_text << text;
- if (markup.starts_with("\n")) {
+ if (str_util::StartsWith(markup, "\n")) {
javadoc_text << "\n";
- if (markup.contains("*")) {
+ if (str_util::StrContains(markup, "*")) {
// new list item
javadoc_text << (in_list ? "</li>\n" : "<ul>\n") << "<li>\n";
in_list = true;
@@ -267,18 +262,18 @@ string ParseDocumentation(re2::StringPiece input) {
// end of list
javadoc_text << "</li>\n</ul>\n";
in_list = false;
- } else if (!input.starts_with("```")) {
+ } else if (!str_util::StartsWith(input, "```")) {
// new paragraph (not required if a <pre> block follows)
javadoc_text << "<p>\n";
}
- } else if (markup.starts_with("```")) {
+ } else if (str_util::StartsWith(markup, "```")) {
// code blocks
- if (FindAndCut(&input, "```\\s*\n*", &text)) {
+ if (FindAndCut(&input, "(```\\s*\n*)", &text)) {
javadoc_text << "<pre>{@code\n" << text << "}</pre>\n";
} else {
javadoc_text << markup;
}
- } else if (markup.starts_with("`")) {
+ } else if (str_util::StartsWith("(" + markup + ")", "`")) {
// inlined code
if (FindAndCut(&input, markup, &text)) {
javadoc_text << "{@code " << text << "}";
@@ -287,26 +282,28 @@ string ParseDocumentation(re2::StringPiece input) {
}
} else if (markup == "**") {
// text emphasis (strong)
- if (FindAndCut(&input, "\\b\\*{2}", &text)) {
+ if (FindAndCut(&input, "(\\b\\*{2})", &text)) {
javadoc_text << "<b>" << ParseDocumentation(text) << "</b>";
} else {
javadoc_text << markup;
}
} else if (markup == "*") {
// text emphasis (normal)
- if (FindAndCut(&input, "\\b\\*{1}", &text)) {
+ if (FindAndCut(&input, "(\\b\\*{1})", &text)) {
javadoc_text << "<i>" << ParseDocumentation(text) << "</i>";
} else {
javadoc_text << markup;
}
- } else if (markup.starts_with("[")) {
+ } else if (str_util::StartsWith(markup, "[")) {
// hyperlinks
string label;
string link;
- if (RE2::Consume(&input, "([^\\[]+)\\]\\((http.+)\\)", &label, &link)) {
+ if (RE2::PartialMatch(input, "([^\\[]+)\\]\\((http.+)\\)", &label,
+ &link) &&
+ str_util::StartsWith(input, label + link)) {
+ input = input.substr(label.size() + link.size());
javadoc_text << "<a href=\"" << link << "\">"
- << ParseDocumentation(label)
- << "</a>";
+ << ParseDocumentation(label) << "</a>";
} else {
javadoc_text << markup;
}
@@ -319,57 +316,56 @@ string ParseDocumentation(re2::StringPiece input) {
}
ArgumentSpec CreateInput(const OpDef_ArgDef& input_def,
- const ApiDef::Arg& input_api_def, TypeResolver* type_resolver) {
+ const ApiDef::Arg& input_api_def,
+ TypeResolver* type_resolver) {
bool iterable = false;
Type type = type_resolver->TypeOf(input_def, &iterable);
- Type var_type = Type::Interface("Operand", "org.tensorflow")
- .add_parameter(type);
+ Type var_type =
+ Type::Interface("Operand", "org.tensorflow").add_parameter(type);
if (iterable) {
var_type = Type::IterableOf(var_type);
}
- return ArgumentSpec(input_api_def.name(),
+ return ArgumentSpec(
+ input_api_def.name(),
Variable::Create(SnakeToCamelCase(input_api_def.rename_to()), var_type),
- type,
- ParseDocumentation(input_api_def.description()),
- iterable);
+ type, ParseDocumentation(input_api_def.description()), iterable);
}
AttributeSpec CreateAttribute(const OpDef_AttrDef& attr_def,
- const ApiDef::Attr& attr_api_def, TypeResolver* type_resolver) {
+ const ApiDef::Attr& attr_api_def,
+ TypeResolver* type_resolver) {
bool iterable = false;
std::pair<Type, Type> types = type_resolver->TypesOf(attr_def, &iterable);
- Type var_type = types.first.kind() == Type::GENERIC ?
- Type::Class("Class").add_parameter(types.first) : types.first;
+ Type var_type = types.first.kind() == Type::GENERIC
+ ? Type::Class("Class").add_parameter(types.first)
+ : types.first;
if (iterable) {
var_type = Type::ListOf(var_type);
}
- return AttributeSpec(attr_api_def.name(),
+ return AttributeSpec(
+ attr_api_def.name(),
Variable::Create(SnakeToCamelCase(attr_api_def.rename_to()), var_type),
- types.first,
- types.second,
- ParseDocumentation(attr_api_def.description()),
- iterable,
- attr_api_def.has_default_value());
+ types.first, types.second, ParseDocumentation(attr_api_def.description()),
+ iterable, attr_api_def.has_default_value());
}
ArgumentSpec CreateOutput(const OpDef_ArgDef& output_def,
- const ApiDef::Arg& output_api, TypeResolver* type_resolver) {
+ const ApiDef::Arg& output_api,
+ TypeResolver* type_resolver) {
bool iterable = false;
Type type = type_resolver->TypeOf(output_def, &iterable);
- Type var_type = Type::Class("Output", "org.tensorflow")
- .add_parameter(type);
+ Type var_type = Type::Class("Output", "org.tensorflow").add_parameter(type);
if (iterable) {
var_type = Type::ListOf(var_type);
}
- return ArgumentSpec(output_api.name(),
+ return ArgumentSpec(
+ output_api.name(),
Variable::Create(SnakeToCamelCase(output_api.rename_to()), var_type),
- type,
- ParseDocumentation(output_api.description()),
- iterable);
+ type, ParseDocumentation(output_api.description()), iterable);
}
EndpointSpec CreateEndpoint(const OpDef& op_def, const ApiDef& api_def,
- const ApiDef_Endpoint& endpoint_def) {
+ const ApiDef_Endpoint& endpoint_def) {
std::vector<string> name_tokens = str_util::Split(endpoint_def.name(), ".");
string package;
string name;
@@ -377,27 +373,25 @@ EndpointSpec CreateEndpoint(const OpDef& op_def, const ApiDef& api_def,
package = name_tokens.at(0);
name = name_tokens.at(1);
} else {
- package = kDefaultEndpointPackage;
+ package = "core"; // generate unclassified ops in the 'core' package
name = name_tokens.at(0);
}
- return EndpointSpec(package,
- name,
- Javadoc::Create(ParseDocumentation(api_def.summary()))
- .details(ParseDocumentation(api_def.description())));
+ return EndpointSpec(package, name,
+ Javadoc::Create(ParseDocumentation(api_def.summary()))
+ .details(ParseDocumentation(api_def.description())));
}
} // namespace
OpSpec OpSpec::Create(const OpDef& op_def, const ApiDef& api_def) {
- OpSpec op(api_def.graph_op_name(),
- api_def.visibility() == ApiDef::HIDDEN,
- op_def.deprecation().explanation());
+ OpSpec op(api_def.graph_op_name(), api_def.visibility() == ApiDef::HIDDEN,
+ op_def.deprecation().explanation());
TypeResolver type_resolver(op_def);
for (const string& next_input_name : api_def.arg_order()) {
for (int i = 0; i < op_def.input_arg().size(); ++i) {
if (op_def.input_arg(i).name() == next_input_name) {
op.inputs_.push_back(CreateInput(op_def.input_arg(i), api_def.in_arg(i),
- &type_resolver));
+ &type_resolver));
break;
}
}
@@ -406,8 +400,8 @@ OpSpec OpSpec::Create(const OpDef& op_def, const ApiDef& api_def) {
// do not parse attributes already visited, they have probably been inferred
// before as an input argument type
if (!type_resolver.IsAttributeVisited(op_def.attr(i).name())) {
- AttributeSpec attr = CreateAttribute(op_def.attr(i), api_def.attr(i),
- &type_resolver);
+ AttributeSpec attr =
+ CreateAttribute(op_def.attr(i), api_def.attr(i), &type_resolver);
// attributes with a default value are optional
if (attr.has_default_value() && attr.type().kind() != Type::GENERIC) {
op.optional_attributes_.push_back(attr);
@@ -417,8 +411,8 @@ OpSpec OpSpec::Create(const OpDef& op_def, const ApiDef& api_def) {
}
}
for (int i = 0; i < op_def.output_arg().size(); ++i) {
- op.outputs_.push_back(CreateOutput(op_def.output_arg(i), api_def.out_arg(i),
- &type_resolver));
+ op.outputs_.push_back(
+ CreateOutput(op_def.output_arg(i), api_def.out_arg(i), &type_resolver));
}
for (const auto& endpoint_def : api_def.endpoint()) {
op.endpoints_.push_back(CreateEndpoint(op_def, api_def, endpoint_def));
diff --git a/tensorflow/java/src/gen/cc/op_specs.h b/tensorflow/java/src/gen/cc/op_specs.h
index 3b53c730df..30ecb8ce53 100644
--- a/tensorflow/java/src/gen/cc/op_specs.h
+++ b/tensorflow/java/src/gen/cc/op_specs.h
@@ -19,9 +19,9 @@ limitations under the License.
#include <string>
#include <vector>
-#include "tensorflow/core/framework/op_def.pb.h"
#include "tensorflow/core/framework/api_def.pb.h"
#include "tensorflow/core/framework/attr_value.pb.h"
+#include "tensorflow/core/framework/op_def.pb.h"
#include "tensorflow/java/src/gen/cc/java_defs.h"
namespace tensorflow {
@@ -38,9 +38,8 @@ class EndpointSpec {
// javadoc: the endpoint class documentation
// TODO(annarev): hardcode depcreated to false until deprecated is possible
EndpointSpec(const string& package, const string& name,
- const Javadoc& javadoc)
- : package_(package), name_(name), javadoc_(javadoc),
- deprecated_(false) {}
+ const Javadoc& javadoc)
+ : package_(package), name_(name), javadoc_(javadoc), deprecated_(false) {}
const string& package() const { return package_; }
const string& name() const { return name_; }
@@ -63,10 +62,13 @@ class ArgumentSpec {
// type: the tensor type of this argument
// description: a description of this argument, in javadoc
// iterable: true if this argument is a list
- ArgumentSpec(const string& op_def_name, const Variable& var,
- const Type& type, const string& description, bool iterable)
- : op_def_name_(op_def_name), var_(var), type_(type),
- description_(description), iterable_(iterable) {}
+ ArgumentSpec(const string& op_def_name, const Variable& var, const Type& type,
+ const string& description, bool iterable)
+ : op_def_name_(op_def_name),
+ var_(var),
+ type_(type),
+ description_(description),
+ iterable_(iterable) {}
const string& op_def_name() const { return op_def_name_; }
const Variable& var() const { return var_; }
@@ -94,11 +96,16 @@ class AttributeSpec {
// iterable: true if this attribute is a list
// has_default_value: true if this attribute has a default value if not set
AttributeSpec(const string& op_def_name, const Variable& var,
- const Type& type, const Type& jni_type, const string& description,
- bool iterable, bool has_default_value)
- : op_def_name_(op_def_name), var_(var), type_(type),
- description_(description), iterable_(iterable),
- jni_type_(jni_type), has_default_value_(has_default_value) {}
+ const Type& type, const Type& jni_type,
+ const string& description, bool iterable,
+ bool has_default_value)
+ : op_def_name_(op_def_name),
+ var_(var),
+ type_(type),
+ description_(description),
+ iterable_(iterable),
+ jni_type_(jni_type),
+ has_default_value_(has_default_value) {}
const string& op_def_name() const { return op_def_name_; }
const Variable& var() const { return var_; }
@@ -147,9 +154,10 @@ class OpSpec {
// hidden: true if this op should not be visible through the Graph Ops API
// deprecation_explanation: message to show if all endpoints are deprecated
explicit OpSpec(const string& graph_op_name, bool hidden,
- const string& deprecation_explanation)
- : graph_op_name_(graph_op_name), hidden_(hidden),
- deprecation_explanation_(deprecation_explanation) {}
+ const string& deprecation_explanation)
+ : graph_op_name_(graph_op_name),
+ hidden_(hidden),
+ deprecation_explanation_(deprecation_explanation) {}
const string graph_op_name_;
const bool hidden_;
diff --git a/tensorflow/java/src/gen/java/org/tensorflow/processor/OperatorProcessor.java b/tensorflow/java/src/gen/java/org/tensorflow/processor/OperatorProcessor.java
index 3524160d87..796d6a62dc 100644
--- a/tensorflow/java/src/gen/java/org/tensorflow/processor/OperatorProcessor.java
+++ b/tensorflow/java/src/gen/java/org/tensorflow/processor/OperatorProcessor.java
@@ -15,6 +15,18 @@ limitations under the License.
package org.tensorflow.processor;
+import com.google.common.base.CaseFormat;
+import com.google.common.base.Strings;
+import com.google.common.collect.HashMultimap;
+import com.google.common.collect.Multimap;
+import com.squareup.javapoet.ClassName;
+import com.squareup.javapoet.FieldSpec;
+import com.squareup.javapoet.JavaFile;
+import com.squareup.javapoet.MethodSpec;
+import com.squareup.javapoet.ParameterSpec;
+import com.squareup.javapoet.TypeName;
+import com.squareup.javapoet.TypeSpec;
+import com.squareup.javapoet.TypeVariableName;
import java.io.IOException;
import java.util.Collection;
import java.util.Collections;
@@ -23,7 +35,6 @@ import java.util.Map;
import java.util.Set;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
-
import javax.annotation.processing.AbstractProcessor;
import javax.annotation.processing.Filer;
import javax.annotation.processing.Messager;
@@ -44,19 +55,6 @@ import javax.lang.model.util.ElementFilter;
import javax.lang.model.util.Elements;
import javax.tools.Diagnostic.Kind;
-import com.google.common.base.CaseFormat;
-import com.google.common.base.Strings;
-import com.google.common.collect.HashMultimap;
-import com.google.common.collect.Multimap;
-import com.squareup.javapoet.ClassName;
-import com.squareup.javapoet.FieldSpec;
-import com.squareup.javapoet.JavaFile;
-import com.squareup.javapoet.MethodSpec;
-import com.squareup.javapoet.ParameterSpec;
-import com.squareup.javapoet.TypeName;
-import com.squareup.javapoet.TypeSpec;
-import com.squareup.javapoet.TypeVariableName;
-
/**
* A compile-time Processor that aggregates classes annotated with {@link
* org.tensorflow.op.annotation.Operator} and generates the {@code Ops} convenience API. Please
@@ -115,10 +113,12 @@ public final class OperatorProcessor extends AbstractProcessor {
// generated our code, flag the location of each such class.
if (hasRun) {
for (Element e : annotated) {
- error(e, "The Operator processor has already processed @Operator annotated sources\n" +
- "and written out an Ops API. It cannot process additional @Operator sources.\n" +
- "One reason this can happen is if other annotation processors generate\n" +
- "new @Operator source files.");
+ error(
+ e,
+ "The Operator processor has already processed @Operator annotated sources\n"
+ + "and written out an Ops API. It cannot process additional @Operator sources.\n"
+ + "One reason this can happen is if other annotation processors generate\n"
+ + "new @Operator source files.");
}
return true;
}
@@ -146,9 +146,11 @@ public final class OperatorProcessor extends AbstractProcessor {
return Collections.singleton("org.tensorflow.op.annotation.Operator");
}
- private static final Pattern JAVADOC_TAG_PATTERN = Pattern.compile("@(?:param|return|throws|exception|see)\\s+.*");
+ private static final Pattern JAVADOC_TAG_PATTERN =
+ Pattern.compile("@(?:param|return|throws|exception|see)\\s+.*");
private static final TypeName T_OPS = ClassName.get("org.tensorflow.op", "Ops");
- private static final TypeName T_OPERATOR = ClassName.get("org.tensorflow.op.annotation", "Operator");
+ private static final TypeName T_OPERATOR =
+ ClassName.get("org.tensorflow.op.annotation", "Operator");
private static final TypeName T_SCOPE = ClassName.get("org.tensorflow.op", "Scope");
private static final TypeName T_GRAPH = ClassName.get("org.tensorflow", "Graph");
private static final TypeName T_STRING = ClassName.get(String.class);
@@ -167,20 +169,17 @@ public final class OperatorProcessor extends AbstractProcessor {
private void write(TypeSpec spec) {
try {
- JavaFile.builder("org.tensorflow.op", spec)
- .skipJavaLangImports(true)
- .build()
- .writeTo(filer);
+ JavaFile.builder("org.tensorflow.op", spec).skipJavaLangImports(true).build().writeTo(filer);
} catch (IOException e) {
throw new AssertionError(e);
}
}
private void writeApi(Multimap<String, MethodSpec> groupedMethods) {
- Map<String, ClassName> groups = new HashMap<String, ClassName>();
-
+ Map<String, ClassName> groups = new HashMap<>();
+
// Generate a API class for each group collected other than the default one (= empty string)
- for (Map.Entry<String, Collection<MethodSpec>> entry: groupedMethods.asMap().entrySet()) {
+ for (Map.Entry<String, Collection<MethodSpec>> entry : groupedMethods.asMap().entrySet()) {
if (!entry.getKey().isEmpty()) {
TypeSpec groupClass = buildGroupClass(entry.getKey(), entry.getValue());
write(groupClass);
@@ -193,12 +192,17 @@ public final class OperatorProcessor extends AbstractProcessor {
}
private boolean collectOpsMethods(
- RoundEnvironment roundEnv, Multimap<String, MethodSpec> groupedMethods, TypeElement annotation) {
+ RoundEnvironment roundEnv,
+ Multimap<String, MethodSpec> groupedMethods,
+ TypeElement annotation) {
boolean result = true;
for (Element e : roundEnv.getElementsAnnotatedWith(annotation)) {
// @Operator can only apply to types, so e must be a TypeElement.
if (!(e instanceof TypeElement)) {
- error(e, "@Operator can only be applied to classes, but this is a %s", e.getKind().toString());
+ error(
+ e,
+ "@Operator can only be applied to classes, but this is a %s",
+ e.getKind().toString());
result = false;
continue;
}
@@ -210,38 +214,42 @@ public final class OperatorProcessor extends AbstractProcessor {
}
return result;
}
-
- private void collectOpMethods(Multimap<String, MethodSpec> groupedMethods, TypeElement opClass, TypeElement annotation) {
+
+ private void collectOpMethods(
+ Multimap<String, MethodSpec> groupedMethods, TypeElement opClass, TypeElement annotation) {
AnnotationMirror am = getAnnotationMirror(opClass, annotation);
String groupName = getAnnotationElementValueAsString("group", am);
String methodName = getAnnotationElementValueAsString("name", am);
ClassName opClassName = ClassName.get(opClass);
if (Strings.isNullOrEmpty(methodName)) {
- methodName = CaseFormat.UPPER_CAMEL.to(CaseFormat.LOWER_CAMEL, opClassName.simpleName());
+ methodName = CaseFormat.UPPER_CAMEL.to(CaseFormat.LOWER_CAMEL, opClassName.simpleName());
}
- // Build a method for each @Operator found in the class path. There should be one method per operation factory called
+ // Build a method for each @Operator found in the class path. There should be one method per
+ // operation factory called
// "create", which takes in parameter a scope and, optionally, a list of arguments
for (ExecutableElement opMethod : ElementFilter.methodsIn(opClass.getEnclosedElements())) {
- if (opMethod.getModifiers().contains(Modifier.STATIC) && opMethod.getSimpleName().contentEquals("create")) {
+ if (opMethod.getModifiers().contains(Modifier.STATIC)
+ && opMethod.getSimpleName().contentEquals("create")) {
MethodSpec method = buildOpMethod(methodName, opClassName, opMethod);
groupedMethods.put(groupName, method);
}
}
}
- private MethodSpec buildOpMethod(String methodName, ClassName opClassName, ExecutableElement factoryMethod) {
+ private MethodSpec buildOpMethod(
+ String methodName, ClassName opClassName, ExecutableElement factoryMethod) {
MethodSpec.Builder builder =
MethodSpec.methodBuilder(methodName)
- .addModifiers(Modifier.PUBLIC)
- .returns(TypeName.get(factoryMethod.getReturnType()))
- .varargs(factoryMethod.isVarArgs())
- .addJavadoc("$L", buildOpMethodJavadoc(opClassName, factoryMethod));
+ .addModifiers(Modifier.PUBLIC)
+ .returns(TypeName.get(factoryMethod.getReturnType()))
+ .varargs(factoryMethod.isVarArgs())
+ .addJavadoc("$L", buildOpMethodJavadoc(opClassName, factoryMethod));
- for (TypeParameterElement tp: factoryMethod.getTypeParameters()) {
+ for (TypeParameterElement tp : factoryMethod.getTypeParameters()) {
TypeVariableName tvn = TypeVariableName.get((TypeVariable) tp.asType());
builder.addTypeVariable(tvn);
}
- for (TypeMirror thrownType: factoryMethod.getThrownTypes()) {
+ for (TypeMirror thrownType : factoryMethod.getThrownTypes()) {
builder.addException(TypeName.get(thrownType));
}
StringBuilder call = new StringBuilder("return $T.create(scope");
@@ -259,13 +267,17 @@ public final class OperatorProcessor extends AbstractProcessor {
call.append(")");
builder.addStatement(call.toString(), opClassName);
return builder.build();
- }
-
+ }
+
private String buildOpMethodJavadoc(ClassName opClassName, ExecutableElement factoryMethod) {
StringBuilder javadoc = new StringBuilder();
- javadoc.append("Adds an {@link ").append(opClassName.simpleName()).append("} operation to the graph\n\n");
+ javadoc
+ .append("Adds an {@link ")
+ .append(opClassName.simpleName())
+ .append("} operation to the graph\n\n");
- // Add all javadoc tags found in the operator factory method but the first one, which should be in all cases the
+ // Add all javadoc tags found in the operator factory method but the first one, which should be
+ // in all cases the
// 'scope' parameter that is implicitly passed by this API
Matcher tagMatcher = JAVADOC_TAG_PATTERN.matcher(elements.getDocComment(factoryMethod));
boolean firstParam = true;
@@ -277,136 +289,144 @@ public final class OperatorProcessor extends AbstractProcessor {
} else {
javadoc.append(tag).append('\n');
}
- }
+ }
javadoc.append("@see {@link ").append(opClassName).append("}\n");
return javadoc.toString();
}
-
+
private static TypeSpec buildGroupClass(String group, Collection<MethodSpec> methods) {
MethodSpec.Builder ctorBuilder =
MethodSpec.constructorBuilder()
- .addParameter(T_SCOPE, "scope")
- .addStatement("this.scope = scope");
-
+ .addParameter(T_SCOPE, "scope")
+ .addStatement("this.scope = scope");
+
TypeSpec.Builder builder =
TypeSpec.classBuilder(CaseFormat.LOWER_CAMEL.to(CaseFormat.UPPER_CAMEL, group) + "Ops")
- .addModifiers(Modifier.PUBLIC, Modifier.FINAL)
- .addJavadoc("An API for adding {@code $L} operations to a {@link $T Graph}\n\n" +
- "@see {@link $T}\n", group, T_GRAPH, T_OPS)
- .addMethods(methods)
- .addMethod(ctorBuilder.build());
+ .addModifiers(Modifier.PUBLIC, Modifier.FINAL)
+ .addJavadoc(
+ "An API for adding {@code $L} operations to a {@link $T Graph}\n\n"
+ + "@see {@link $T}\n",
+ group,
+ T_GRAPH,
+ T_OPS)
+ .addMethods(methods)
+ .addMethod(ctorBuilder.build());
builder.addField(
- FieldSpec.builder(T_SCOPE, "scope")
- .addModifiers(Modifier.PRIVATE, Modifier.FINAL)
- .build());
+ FieldSpec.builder(T_SCOPE, "scope").addModifiers(Modifier.PRIVATE, Modifier.FINAL).build());
return builder.build();
}
- private static TypeSpec buildTopClass(Map<String, ClassName> groupToClass, Collection<MethodSpec> methods) {
+ private static TypeSpec buildTopClass(
+ Map<String, ClassName> groupToClass, Collection<MethodSpec> methods) {
MethodSpec.Builder ctorBuilder =
MethodSpec.constructorBuilder()
- .addModifiers(Modifier.PRIVATE)
- .addParameter(T_SCOPE, "scope")
- .addStatement("this.scope = scope", T_SCOPE);
+ .addModifiers(Modifier.PRIVATE)
+ .addParameter(T_SCOPE, "scope")
+ .addStatement("this.scope = scope", T_SCOPE);
- for (Map.Entry<String, ClassName> entry: groupToClass.entrySet()) {
+ for (Map.Entry<String, ClassName> entry : groupToClass.entrySet()) {
ctorBuilder.addStatement("$L = new $T(scope)", entry.getKey(), entry.getValue());
}
TypeSpec.Builder opsBuilder =
TypeSpec.classBuilder("Ops")
- .addModifiers(Modifier.PUBLIC, Modifier.FINAL)
- .addJavadoc("An API for building a {@link $T} with operation wrappers\n<p>\n" +
- "Any operation wrapper found in the classpath properly annotated as an {@link $T @Operator} is exposed\n" +
- "by this API or one of its subgroup.\n<p>Example usage:\n<pre>{@code\n" +
- "try (Graph g = new Graph()) {\n" +
- " Ops ops = new Ops(g);\n" +
- " // Operations are typed classes with convenience\n" +
- " // builders in Ops.\n" +
- " Constant three = ops.constant(3);\n" +
- " // Single-result operations implement the Operand\n" +
- " // interface, so this works too.\n" +
- " Operand four = ops.constant(4);\n" +
- " // Most builders are found within a group, and accept\n" +
- " // Operand types as operands\n" +
- " Operand nine = ops.math().add(four, ops.constant(5));\n" +
- " // Multi-result operations however offer methods to\n" +
- " // select a particular result for use.\n" +
- " Operand result = \n" +
- " ops.math().add(ops.array().unique(s, a).y(), b);\n" +
- " // Optional attributes\n" +
- " ops.math().matMul(a, b, MatMul.transposeA(true));\n" +
- " // Naming operators\n" +
- " ops.withName(“foo”).constant(5); // name “foo”\n" +
- " // Names can exist in a hierarchy\n" +
- " Ops sub = ops.withSubScope(“sub”);\n" +
- " sub.withName(“bar”).constant(4); // “sub/bar”\n" +
- "}\n" +
- "}</pre>\n", T_GRAPH, T_OPERATOR)
- .addMethods(methods)
- .addMethod(ctorBuilder.build());
+ .addModifiers(Modifier.PUBLIC, Modifier.FINAL)
+ .addJavadoc(
+ "An API for building a {@link $T} with operation wrappers\n<p>\n"
+ + "Any operation wrapper found in the classpath properly annotated as an"
+ + "{@link $T @Operator} is exposed\n"
+ + "by this API or one of its subgroup.\n<p>Example usage:\n<pre>{@code\n"
+ + "try (Graph g = new Graph()) {\n"
+ + " Ops ops = new Ops(g);\n"
+ + " // Operations are typed classes with convenience\n"
+ + " // builders in Ops.\n"
+ + " Constant three = ops.constant(3);\n"
+ + " // Single-result operations implement the Operand\n"
+ + " // interface, so this works too.\n"
+ + " Operand four = ops.constant(4);\n"
+ + " // Most builders are found within a group, and accept\n"
+ + " // Operand types as operands\n"
+ + " Operand nine = ops.math().add(four, ops.constant(5));\n"
+ + " // Multi-result operations however offer methods to\n"
+ + " // select a particular result for use.\n"
+ + " Operand result = \n"
+ + " ops.math().add(ops.array().unique(s, a).y(), b);\n"
+ + " // Optional attributes\n"
+ + " ops.math().matMul(a, b, MatMul.transposeA(true));\n"
+ + " // Naming operators\n"
+ + " ops.withName(“foo”).constant(5); // name “foo”\n"
+ + " // Names can exist in a hierarchy\n"
+ + " Ops sub = ops.withSubScope(“sub”);\n"
+ + " sub.withName(“bar”).constant(4); // “sub/bar”\n"
+ + "}\n"
+ + "}</pre>\n",
+ T_GRAPH,
+ T_OPERATOR)
+ .addMethods(methods)
+ .addMethod(ctorBuilder.build());
opsBuilder.addMethod(
MethodSpec.methodBuilder("withSubScope")
- .addModifiers(Modifier.PUBLIC)
- .addParameter(T_STRING, "childScopeName")
- .returns(T_OPS)
- .addStatement("return new $T(scope.withSubScope(childScopeName))", T_OPS)
- .addJavadoc(
- "Returns an API that adds operations to the graph with the provided name prefix.\n\n" +
- "@see {@link $T#withSubScope(String)}\n", T_SCOPE)
- .build());
+ .addModifiers(Modifier.PUBLIC)
+ .addParameter(T_STRING, "childScopeName")
+ .returns(T_OPS)
+ .addStatement("return new $T(scope.withSubScope(childScopeName))", T_OPS)
+ .addJavadoc(
+ "Returns an API that adds operations to the graph with the provided name prefix.\n"
+ + "\n@see {@link $T#withSubScope(String)}\n",
+ T_SCOPE)
+ .build());
opsBuilder.addMethod(
MethodSpec.methodBuilder("withName")
- .addModifiers(Modifier.PUBLIC)
- .addParameter(T_STRING, "opName")
- .returns(T_OPS)
- .addStatement("return new Ops(scope.withName(opName))")
- .addJavadoc(
- "Returns an API that uses the provided name for an op.\n\n" +
- "@see {@link $T#withName(String)}\n", T_SCOPE)
- .build());
+ .addModifiers(Modifier.PUBLIC)
+ .addParameter(T_STRING, "opName")
+ .returns(T_OPS)
+ .addStatement("return new Ops(scope.withName(opName))")
+ .addJavadoc(
+ "Returns an API that uses the provided name for an op.\n\n"
+ + "@see {@link $T#withName(String)}\n",
+ T_SCOPE)
+ .build());
opsBuilder.addField(
- FieldSpec.builder(T_SCOPE, "scope")
- .addModifiers(Modifier.PRIVATE, Modifier.FINAL)
- .build());
+ FieldSpec.builder(T_SCOPE, "scope").addModifiers(Modifier.PRIVATE, Modifier.FINAL).build());
opsBuilder.addMethod(
MethodSpec.methodBuilder("scope")
- .addModifiers(Modifier.PUBLIC, Modifier.FINAL)
- .returns(T_SCOPE)
- .addStatement("return scope")
- .addJavadoc("Returns the current {@link $T scope} of this API\n", T_SCOPE)
- .build());
+ .addModifiers(Modifier.PUBLIC, Modifier.FINAL)
+ .returns(T_SCOPE)
+ .addStatement("return scope")
+ .addJavadoc("Returns the current {@link $T scope} of this API\n", T_SCOPE)
+ .build());
- for (Map.Entry<String, ClassName> entry: groupToClass.entrySet()) {
+ for (Map.Entry<String, ClassName> entry : groupToClass.entrySet()) {
opsBuilder.addField(
FieldSpec.builder(entry.getValue(), entry.getKey())
- .addModifiers(Modifier.PUBLIC, Modifier.FINAL)
- .build());
-
+ .addModifiers(Modifier.PUBLIC, Modifier.FINAL)
+ .build());
+
opsBuilder.addMethod(
MethodSpec.methodBuilder(entry.getKey())
- .addModifiers(Modifier.PUBLIC, Modifier.FINAL)
- .returns(entry.getValue())
- .addStatement("return $L", entry.getKey())
- .addJavadoc("Returns an API for adding {@code $L} operations to the graph\n", entry.getKey())
- .build());
+ .addModifiers(Modifier.PUBLIC, Modifier.FINAL)
+ .returns(entry.getValue())
+ .addStatement("return $L", entry.getKey())
+ .addJavadoc(
+ "Returns an API for adding {@code $L} operations to the graph\n", entry.getKey())
+ .build());
}
opsBuilder.addMethod(
MethodSpec.methodBuilder("create")
- .addModifiers(Modifier.PUBLIC, Modifier.STATIC)
- .addParameter(T_GRAPH, "graph")
- .returns(T_OPS)
- .addStatement("return new Ops(new $T(graph))", T_SCOPE)
- .addJavadoc("Creates an API for adding operations to the provided {@code graph}\n")
- .build());
+ .addModifiers(Modifier.PUBLIC, Modifier.STATIC)
+ .addParameter(T_GRAPH, "graph")
+ .returns(T_OPS)
+ .addStatement("return new Ops(new $T(graph))", T_SCOPE)
+ .addJavadoc("Creates an API for adding operations to the provided {@code graph}\n")
+ .build());
return opsBuilder.build();
}
@@ -417,12 +437,16 @@ public final class OperatorProcessor extends AbstractProcessor {
return am;
}
}
- throw new IllegalArgumentException("Annotation " + annotation.getSimpleName() + " not present on element "
- + element.getSimpleName());
+ throw new IllegalArgumentException(
+ "Annotation "
+ + annotation.getSimpleName()
+ + " not present on element "
+ + element.getSimpleName());
}
-
+
private static String getAnnotationElementValueAsString(String elementName, AnnotationMirror am) {
- for (Map.Entry<? extends ExecutableElement, ? extends AnnotationValue> entry : am.getElementValues().entrySet()) {
+ for (Map.Entry<? extends ExecutableElement, ? extends AnnotationValue> entry :
+ am.getElementValues().entrySet()) {
if (entry.getKey().getSimpleName().contentEquals(elementName)) {
return entry.getValue().getValue().toString();
}
diff --git a/tensorflow/java/src/main/java/org/tensorflow/Graph.java b/tensorflow/java/src/main/java/org/tensorflow/Graph.java
index d4fd3db5f7..7d19696749 100644
--- a/tensorflow/java/src/main/java/org/tensorflow/Graph.java
+++ b/tensorflow/java/src/main/java/org/tensorflow/Graph.java
@@ -143,6 +143,82 @@ public final class Graph implements AutoCloseable {
}
}
+ /**
+ * Adds operations to compute the partial derivatives of sum of {@code y}s w.r.t {@code x}s,
+ * i.e., {@code d(y_1 + y_2 + ...)/dx_1, d(y_1 + y_2 + ...)/dx_2...}
+ * <p>
+ * {@code dx} are used as initial gradients (which represent the symbolic partial derivatives of some loss function
+ * {@code L} w.r.t. {@code y}). {@code dx} must be null or have size of {@code y}.
+ * <p>
+ * If {@code dx} is null, the implementation will use dx of {@link org.tensorflow.op.core.OnesLike OnesLike} for all
+ * shapes in {@code y}.
+ *
+ * @param y output of the function to derive
+ * @param x inputs of the function for which partial derivatives are computed
+ * @param dx if not null, the partial derivatives of some loss function {@code L} w.r.t. {@code y}
+ * @return the partial derivatives {@code dy} with the size of {@code x}
+ */
+ public Output<?>[] addGradients(Output<?>[] y, Output<?>[] x, Output<?>[] dx) {
+ Output<?>[] dy = new Output<?>[x.length];
+ final long[] yHandles = new long[y.length];
+ final int[] yIndices = new int[y.length];
+ final long[] xHandles = new long[x.length];
+ final int[] xIndices = new int[x.length];
+ long[] dxHandles = null;
+ int[] dxIndices = null;
+
+ try (Reference ref = ref()) {
+ for (int i = 0; i < y.length; ++i) {
+ yHandles[i] = y[i].op().getUnsafeNativeHandle();
+ yIndices[i] = y[i].index();
+ }
+ for (int i = 0; i < x.length; ++i) {
+ xHandles[i] = x[i].op().getUnsafeNativeHandle();
+ xIndices[i] = x[i].index();
+ }
+ if (dx != null && dx.length > 0) {
+ dxHandles = new long[dx.length];
+ dxIndices = new int[dx.length];
+
+ for (int i = 0; i < dx.length; ++i) {
+ dxHandles[i] = dx[i].op().getUnsafeNativeHandle();
+ dxIndices[i] = dx[i].index();
+ }
+ }
+ // Gradient outputs are returned in two continuous arrays concatenated into one. The first holds the native handles
+ // of the gradient operations while the second holds the index of their output
+ // e.g. given xHandles = [x0Handle, x1Handle, ...] and xIndices = [x0Index, x1Index, ..], we obtain
+ // dy = [dy0Handle, dy1Handle, ..., dy0Index, dy1Index, ...]
+ long[] dyHandlesAndIndices =
+ addGradients(ref.nativeHandle(), yHandles, yIndices, xHandles, xIndices, dxHandles, dxIndices);
+ int ndy = dyHandlesAndIndices.length >> 1;
+ if (ndy != dy.length) {
+ throw new IllegalStateException(String.valueOf(ndy) + " gradients were added to the graph when " + dy.length
+ + " were expected");
+ }
+ for (int i = 0, j = ndy; i < ndy; ++i, ++j) {
+ Operation op = new Operation(this, dyHandlesAndIndices[i]);
+ dy[i] = new Output<>(op, (int) dyHandlesAndIndices[j]);
+ }
+ }
+ return dy;
+ }
+
+ /**
+ * Adds operations to compute the partial derivatives of sum of {@code y}s w.r.t {@code x}s,
+ * i.e., {@code dy/dx_1, dy/dx_2...}
+ * <p>
+ * This is a simplified version of {@link #addGradients(Output[], Output[], Output[]) where {@code y} is
+ * a single output and {@code dx} is null.
+ *
+ * @param y output of the function to derive
+ * @param x inputs of the function for which partial derivatives are computed
+ * @return the partial derivatives {@code dy} with the size of {@code x}
+ */
+ public Output<?>[] addGradients(Output<?> y, Output<?>[] x) {
+ return addGradients(new Output<?>[]{y}, x, null);
+ }
+
private final Object nativeHandleLock = new Object();
private long nativeHandle;
private int refcount = 0;
@@ -254,6 +330,9 @@ public final class Graph implements AutoCloseable {
private static native byte[] toGraphDef(long handle);
+ private static native long[] addGradients(long handle, long[] inputHandles, int[] inputIndices,
+ long[] outputHandles, int[] outputIndices, long[] gradInputHandles, int[] gradInputIndices);
+
static {
TensorFlow.init();
}
diff --git a/tensorflow/java/src/main/java/org/tensorflow/Input.java b/tensorflow/java/src/main/java/org/tensorflow/Input.java
new file mode 100644
index 0000000000..13bc463e7d
--- /dev/null
+++ b/tensorflow/java/src/main/java/org/tensorflow/Input.java
@@ -0,0 +1,48 @@
+/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+package org.tensorflow;
+
+/**
+ * Interface implemented by operands of a TensorFlow operation.
+ *
+ * <p>Example usage:
+ *
+ * <pre>{@code
+ * // The "decodeJpeg" operation can be used as input to the "cast" operation
+ * Input decodeJpeg = ops.image().decodeJpeg(...);
+ * ops.math().cast(decodeJpeg, DataType.FLOAT);
+ *
+ * // The output "y" of the "unique" operation can be used as input to the "cast" operation
+ * Output y = ops.array().unique(...).y();
+ * ops.math().cast(y, DataType.FLOAT);
+ *
+ * // The "split" operation can be used as input list to the "concat" operation
+ * Iterable<? extends Input> split = ops.array().split(...);
+ * ops.array().concat(0, split);
+ * }</pre>
+ */
+public interface Input<T> {
+
+ /**
+ * Returns the symbolic handle of a tensor.
+ *
+ * <p>Inputs to TensorFlow operations are outputs of another TensorFlow operation. This method is
+ * used to obtain a symbolic handle that represents the computation of the input.
+ *
+ * @see OperationBuilder#addInput(Output)
+ */
+ Output<T> asOutput();
+}
diff --git a/tensorflow/java/src/main/java/org/tensorflow/SavedModelBundle.java b/tensorflow/java/src/main/java/org/tensorflow/SavedModelBundle.java
index c8b9126f03..49594e6b47 100644
--- a/tensorflow/java/src/main/java/org/tensorflow/SavedModelBundle.java
+++ b/tensorflow/java/src/main/java/org/tensorflow/SavedModelBundle.java
@@ -25,18 +25,86 @@ package org.tensorflow;
* protocol buffer</a>).
*/
public class SavedModelBundle implements AutoCloseable {
+ /** Options for loading a SavedModel. */
+ public static final class Loader {
+ /** Load a <code>SavedModelBundle</code> with the configured options. */
+ public SavedModelBundle load() {
+ return SavedModelBundle.load(exportDir, tags, configProto, runOptions);
+ }
+
+ /**
+ * Sets options to use when executing model initialization operations.
+ *
+ * @param options Serialized <a
+ * href="https://www.tensorflow.org/code/tensorflow/core/protobuf/config.proto">RunOptions
+ * protocol buffer</a>.
+ */
+ public Loader withRunOptions(byte[] options) {
+ this.runOptions = options;
+ return this;
+ }
+
+ /**
+ * Set configuration of the <code>Session</code> object created when loading the model.
+ *
+ * @param configProto Serialized <a
+ * href="https://www.tensorflow.org/code/tensorflow/core/protobuf/config.proto">ConfigProto
+ * protocol buffer</a>.
+ */
+ public Loader withConfigProto(byte[] configProto) {
+ this.configProto = configProto;
+ return this;
+ }
+
+ /**
+ * Sets the set of tags that identify the specific graph in the saved model to load.
+ *
+ * @param tags the tags identifying the specific MetaGraphDef to load.
+ */
+ public Loader withTags(String... tags) {
+ this.tags = tags;
+ return this;
+ }
+
+ private Loader(String exportDir) {
+ this.exportDir = exportDir;
+ }
+
+ private String exportDir = null;
+ private String[] tags = null;
+ private byte[] configProto = null;
+ private byte[] runOptions = null;
+ }
/**
* Load a saved model from an export directory. The model that is being loaded should be created
* using the <a href="https://www.tensorflow.org/api_docs/python/tf/saved_model">Saved Model
* API</a>.
*
+ * <p>This method is a shorthand for:
+ *
+ * <pre>{@code
+ * SavedModelBundler.loader().withTags(tags).load();
+ * }</pre>
+ *
* @param exportDir the directory path containing a saved model.
* @param tags the tags identifying the specific metagraphdef to load.
* @return a bundle containing the graph and associated session.
*/
public static SavedModelBundle load(String exportDir, String... tags) {
- return load(exportDir, tags, null);
+ return loader(exportDir).withTags(tags).load();
+ }
+
+ /**
+ * Load a saved model.
+ *
+ * <p/>Returns a <code>Loader</code> object that can set configuration options before actually
+ * loading the model,
+ *
+ * @param exportDir the directory path containing a saved model.
+ */
+ public static Loader loader(String exportDir) {
+ return new Loader(exportDir);
}
/**
@@ -95,7 +163,8 @@ public class SavedModelBundle implements AutoCloseable {
return new SavedModelBundle(graph, session, metaGraphDef);
}
- private static native SavedModelBundle load(String exportDir, String[] tags, byte[] runOptions);
+ private static native SavedModelBundle load(
+ String exportDir, String[] tags, byte[] config, byte[] runOptions);
static {
TensorFlow.init();
diff --git a/tensorflow/java/src/main/java/org/tensorflow/op/core/Gradients.java b/tensorflow/java/src/main/java/org/tensorflow/op/core/Gradients.java
new file mode 100644
index 0000000000..f4671c8af9
--- /dev/null
+++ b/tensorflow/java/src/main/java/org/tensorflow/op/core/Gradients.java
@@ -0,0 +1,153 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+package org.tensorflow.op.core;
+
+import java.util.Arrays;
+import java.util.Iterator;
+import java.util.List;
+
+import org.tensorflow.Operand;
+import org.tensorflow.Output;
+import org.tensorflow.op.Op;
+import org.tensorflow.op.Operands;
+import org.tensorflow.op.Scope;
+import org.tensorflow.op.annotation.Operator;
+
+/**
+ * Adds operations to compute the partial derivatives of sum of {@code y}s w.r.t {@code x}s,
+ * i.e., {@code d(y_1 + y_2 + ...)/dx_1, d(y_1 + y_2 + ...)/dx_2...}
+ * <p>
+ * If {@code Options.dx()} values are set, they are as the initial symbolic partial derivatives of some loss
+ * function {@code L} w.r.t. {@code y}. {@code Options.dx()} must have the size of {@code y}.
+ * <p>
+ * If {@code Options.dx()} is not set, the implementation will use dx of {@code OnesLike} for all
+ * shapes in {@code y}.
+ * <p>
+ * The partial derivatives are returned in output {@code dy}, with the size of {@code x}.
+ * <p>
+ * Example of usage:
+ * <pre>{@code
+ * Gradients gradients = Gradients.create(scope, Arrays.asList(loss), Arrays.asList(w, b));
+ *
+ * Constant<Float> alpha = ops.constant(1.0f, Float.class);
+ * ApplyGradientDescent.create(scope, w, alpha, gradients.<Float>dy(0));
+ * ApplyGradientDescent.create(scope, b, alpha, gradients.<Float>dy(1));
+ * }</pre>
+ */
+@Operator
+public class Gradients implements Op, Iterable<Operand<?>> {
+
+ /**
+ * Optional attributes for {@link Gradients}
+ */
+ public static class Options {
+
+ /**
+ * @param dx partial derivatives of some loss function {@code L} w.r.t. {@code y}
+ * @return this option builder
+ */
+ public Options dx(Iterable<Operand<?>> dx) {
+ this.dx = dx;
+ return this;
+ }
+
+ private Iterable<Operand<?>> dx;
+
+ private Options() {
+ }
+ }
+
+ /**
+ * Adds gradients computation ops to the graph according to scope.
+ *
+ * @param scope current graph scope
+ * @param y outputs of the function to derive
+ * @param x inputs of the function for which partial derivatives are computed
+ * @param options carries optional attributes values
+ * @return a new instance of {@code Gradients}
+ */
+ public static Gradients create(Scope scope, Iterable<Operand<?>> y, Iterable<Operand<?>> x, Options... options) {
+ Output<?>[] dx = null;
+ if (options != null) {
+ for (Options opts : options) {
+ if (opts.dx != null) {
+ dx = Operands.asOutputs(opts.dx);
+ }
+ }
+ }
+ Output<?>[] gradOutputs = scope.graph().addGradients(Operands.asOutputs(y), Operands.asOutputs(x), dx);
+ return new Gradients(Arrays.asList(gradOutputs));
+ }
+
+ /**
+ * Adds gradients computation ops to the graph according to scope.
+ *
+ * This is a simplified version of {@link #create(Scope, Iterable, Iterable, Options...)} where {@code y} is
+ * a single output.
+ *
+ * @param scope current graph scope
+ * @param y output of the function to derive
+ * @param x inputs of the function for which partial derivatives are computed
+ * @param options carries optional attributes values
+ * @return a new instance of {@code Gradients}
+ */
+ @SuppressWarnings({"unchecked", "rawtypes"})
+ public static Gradients create(Scope scope, Operand<?> y, Iterable<Operand<?>> x, Options... options) {
+ return create(scope, (Iterable) Arrays.asList(y), x, options);
+ }
+
+ /**
+ * @param dx partial derivatives of some loss function {@code L} w.r.t. {@code y}
+ * @return builder to add more options to this operation
+ */
+ public Options dx(Iterable<Operand<?>> dx) {
+ return new Options().dx(dx);
+ }
+
+ @Override
+ @SuppressWarnings({"rawtypes", "unchecked"})
+ public Iterator<Operand<?>> iterator() {
+ return (Iterator) dy.iterator();
+ }
+
+ /**
+ * Partial derivatives of {@code y}s w.r.t. {@code x}s, with the size of {@code x}
+ */
+ public List<Output<?>> dy() {
+ return dy;
+ }
+
+ /**
+ * Returns a symbolic handle to one of the gradient operation output
+ * <p>
+ * Warning: Does not check that the type of the tensor matches T. It is recommended to call
+ * this method with an explicit type parameter rather than letting it be inferred, e.g. {@code
+ * gradients.<Integer>dy(0)}
+ *
+ * @param <T> The expected element type of the tensors produced by this output.
+ * @param index The index of the output among the gradients added by this operation
+ */
+ @SuppressWarnings("unchecked")
+ public <T> Output<T> dy(int index) {
+ return (Output<T>) dy.get(index);
+ }
+
+ private List<Output<?>> dy;
+
+ private Gradients(List<Output<?>> dy) {
+ this.dy = dy;
+ }
+}
diff --git a/tensorflow/java/src/main/java/org/tensorflow/types/TFBool.java b/tensorflow/java/src/main/java/org/tensorflow/types/TFBool.java
new file mode 100644
index 0000000000..ab34f6aa12
--- /dev/null
+++ b/tensorflow/java/src/main/java/org/tensorflow/types/TFBool.java
@@ -0,0 +1,30 @@
+/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+// GENERATED FILE. To update, edit tftypes.pl instead.
+
+package org.tensorflow.types;
+
+import org.tensorflow.DataType;
+
+/** Represents a boolean. */
+public class TFBool implements TFType {
+ private TFBool() {}
+ static {
+ Types.typeCodes.put(TFBool.class, DataType.BOOL);
+ }
+ static {
+ Types.scalars.put(TFBool.class, false);
+ }
+}
diff --git a/tensorflow/java/src/main/java/org/tensorflow/types/TFDouble.java b/tensorflow/java/src/main/java/org/tensorflow/types/TFDouble.java
new file mode 100644
index 0000000000..49e5d9f2f3
--- /dev/null
+++ b/tensorflow/java/src/main/java/org/tensorflow/types/TFDouble.java
@@ -0,0 +1,30 @@
+/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+// GENERATED FILE. To update, edit tftypes.pl instead.
+
+package org.tensorflow.types;
+
+import org.tensorflow.DataType;
+
+/** Represents a 64-bit double precision floating point number. */
+public class TFDouble implements TFType {
+ private TFDouble() {}
+ static {
+ Types.typeCodes.put(TFDouble.class, DataType.DOUBLE);
+ }
+ static {
+ Types.scalars.put(TFDouble.class, 0.0);
+ }
+}
diff --git a/tensorflow/java/src/main/java/org/tensorflow/types/TFFloat.java b/tensorflow/java/src/main/java/org/tensorflow/types/TFFloat.java
new file mode 100644
index 0000000000..8426ee41f0
--- /dev/null
+++ b/tensorflow/java/src/main/java/org/tensorflow/types/TFFloat.java
@@ -0,0 +1,30 @@
+/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+// GENERATED FILE. To update, edit tftypes.pl instead.
+
+package org.tensorflow.types;
+
+import org.tensorflow.DataType;
+
+/** Represents a 32-bit single precision floating point number. */
+public class TFFloat implements TFType {
+ private TFFloat() {}
+ static {
+ Types.typeCodes.put(TFFloat.class, DataType.FLOAT);
+ }
+ static {
+ Types.scalars.put(TFFloat.class, 0f);
+ }
+}
diff --git a/tensorflow/java/src/main/java/org/tensorflow/types/TFInt32.java b/tensorflow/java/src/main/java/org/tensorflow/types/TFInt32.java
new file mode 100644
index 0000000000..3947b6ad09
--- /dev/null
+++ b/tensorflow/java/src/main/java/org/tensorflow/types/TFInt32.java
@@ -0,0 +1,30 @@
+/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+// GENERATED FILE. To update, edit tftypes.pl instead.
+
+package org.tensorflow.types;
+
+import org.tensorflow.DataType;
+
+/** Represents a 32-bit signed integer. */
+public class TFInt32 implements TFType {
+ private TFInt32() {}
+ static {
+ Types.typeCodes.put(TFInt32.class, DataType.INT32);
+ }
+ static {
+ Types.scalars.put(TFInt32.class, 0);
+ }
+}
diff --git a/tensorflow/java/src/main/java/org/tensorflow/types/TFInt64.java b/tensorflow/java/src/main/java/org/tensorflow/types/TFInt64.java
new file mode 100644
index 0000000000..ccdded8693
--- /dev/null
+++ b/tensorflow/java/src/main/java/org/tensorflow/types/TFInt64.java
@@ -0,0 +1,30 @@
+/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+// GENERATED FILE. To update, edit tftypes.pl instead.
+
+package org.tensorflow.types;
+
+import org.tensorflow.DataType;
+
+/** Represents a 64-bit signed integer. */
+public class TFInt64 implements TFType {
+ private TFInt64() {}
+ static {
+ Types.typeCodes.put(TFInt64.class, DataType.INT64);
+ }
+ static {
+ Types.scalars.put(TFInt64.class, 0L);
+ }
+}
diff --git a/tensorflow/java/src/main/java/org/tensorflow/types/TFString.java b/tensorflow/java/src/main/java/org/tensorflow/types/TFString.java
new file mode 100644
index 0000000000..e7327e8c57
--- /dev/null
+++ b/tensorflow/java/src/main/java/org/tensorflow/types/TFString.java
@@ -0,0 +1,27 @@
+/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+// GENERATED FILE. To update, edit tftypes.pl instead.
+
+package org.tensorflow.types;
+
+import org.tensorflow.DataType;
+
+/** Represents an arbitrary sequence of bytes. */
+public class TFString implements TFType {
+ private TFString() {}
+ static {
+ Types.typeCodes.put(TFString.class, DataType.STRING);
+ }
+}
diff --git a/tensorflow/java/src/main/java/org/tensorflow/types/TFType.java b/tensorflow/java/src/main/java/org/tensorflow/types/TFType.java
new file mode 100644
index 0000000000..562953ac9d
--- /dev/null
+++ b/tensorflow/java/src/main/java/org/tensorflow/types/TFType.java
@@ -0,0 +1,20 @@
+/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+package org.tensorflow.types;
+
+/**
+ * A marker interface for classes representing TensorFlow types.
+ */
+public interface TFType {}
diff --git a/tensorflow/java/src/main/java/org/tensorflow/types/TFUInt8.java b/tensorflow/java/src/main/java/org/tensorflow/types/TFUInt8.java
new file mode 100644
index 0000000000..d7305ca5a8
--- /dev/null
+++ b/tensorflow/java/src/main/java/org/tensorflow/types/TFUInt8.java
@@ -0,0 +1,30 @@
+/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+// GENERATED FILE. To update, edit tftypes.pl instead.
+
+package org.tensorflow.types;
+
+import org.tensorflow.DataType;
+
+/** Represents an 8-bit unsigned integer. */
+public class TFUInt8 implements TFType {
+ private TFUInt8() {}
+ static {
+ Types.typeCodes.put(TFUInt8.class, DataType.UINT8);
+ }
+ static {
+ Types.scalars.put(TFUInt8.class, (byte)0);
+ }
+}
diff --git a/tensorflow/java/src/main/java/org/tensorflow/types/Types.java b/tensorflow/java/src/main/java/org/tensorflow/types/Types.java
new file mode 100644
index 0000000000..976cd9fd34
--- /dev/null
+++ b/tensorflow/java/src/main/java/org/tensorflow/types/Types.java
@@ -0,0 +1,52 @@
+/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+package org.tensorflow.types;
+
+import java.util.HashMap;
+import java.util.Map;
+import org.tensorflow.DataType;
+
+/**
+ * Utility class for managing the representation of TensorFlow types as Java
+ * types. For each TensorFlow type (e.g., int32), there is a corresponding Java
+ * type (e.g., TFInt32) that represents it at compile time and a corresponding
+ * class object (e.g., TFInt32.class) that represents it at run time. There is
+ * also an enumeration value in DataType that can be used to represent the
+ * type, though that should rarely be required.
+ */
+public class Types {
+
+ private Types() {} // not instantiable
+
+ static final Map<Class<?>, DataType> typeCodes = new HashMap<>();
+
+ /** Returns the DataType value corresponding to a TensorFlow type class. */
+ public static DataType dataType(Class<? extends TFType> c) {
+ DataType dtype = typeCodes.get(c);
+ if (dtype == null) {
+ throw new IllegalArgumentException("" + c + " is not a TensorFlow type.");
+ }
+ return dtype;
+ }
+
+ static final Map<Class<?>, Object> scalars = new HashMap<>();
+
+ /** Returns the zero value of type described by {@code c}, or null if
+ * the type (e.g., string) is not numeric and therefore has no zero value.
+ */
+ public static Object zeroValue(Class<? extends TFType> c) {
+ return scalars.get(c);
+ }
+}
diff --git a/tensorflow/java/src/main/native/graph_jni.cc b/tensorflow/java/src/main/native/graph_jni.cc
index 0fef155275..dac6a345e9 100644
--- a/tensorflow/java/src/main/native/graph_jni.cc
+++ b/tensorflow/java/src/main/native/graph_jni.cc
@@ -16,7 +16,9 @@ limitations under the License.
#include "tensorflow/java/src/main/native/graph_jni.h"
#include <limits>
+#include <memory>
#include "tensorflow/c/c_api.h"
+#include "tensorflow/java/src/main/native/utils_jni.h"
#include "tensorflow/java/src/main/native/exception_jni.h"
namespace {
@@ -130,3 +132,55 @@ Java_org_tensorflow_Graph_toGraphDef(JNIEnv* env, jclass clazz, jlong handle) {
TF_DeleteBuffer(buf);
return ret;
}
+
+JNIEXPORT jlongArray JNICALL
+Java_org_tensorflow_Graph_addGradients(JNIEnv* env, jclass clazz, jlong handle,
+ jlongArray y_handles, jintArray y_indices,
+ jlongArray x_handles, jintArray x_indices,
+ jlongArray dx_handles, jintArray dx_indices) {
+
+ TF_Graph* g = requireHandle(env, handle);
+ if (g == nullptr) return nullptr;
+
+ const jint ny = env->GetArrayLength(y_handles);
+ const jint nx = env->GetArrayLength(x_handles);
+
+ std::unique_ptr<TF_Output[]> y(new TF_Output[ny]);
+ std::unique_ptr<TF_Output[]> x(new TF_Output[nx]);
+ std::unique_ptr<TF_Output[]> dx(nullptr);
+ std::unique_ptr<TF_Output[]> dy(new TF_Output[nx]);
+
+ resolveOutputs(env, "y", y_handles, y_indices, y.get(), ny);
+ resolveOutputs(env, "x", x_handles, x_indices, x.get(), nx);
+ if (dx_handles != nullptr) {
+ if (env->GetArrayLength(dx_handles) != ny) {
+ throwException(env, kIllegalArgumentException,
+ "expected %d, got %d dx handles", ny,
+ env->GetArrayLength(dx_handles));
+ }
+ dx.reset(new TF_Output[ny]);
+ resolveOutputs(env, "dx", dx_handles, dx_indices, dx.get(), ny);
+ }
+ if (env->ExceptionCheck()) return nullptr;
+
+ TF_Status* status = TF_NewStatus();
+ TF_AddGradients(g, y.get(), ny, x.get(), nx, dx.get(), status, dy.get());
+
+ if (!throwExceptionIfNotOK(env, status)) {
+ TF_DeleteStatus(status);
+ return nullptr;
+ }
+ TF_DeleteStatus(status);
+
+ // returned array contains both op handles and output indices, in pair
+ jlongArray dy_handles_and_indices = env->NewLongArray(nx << 1);
+ jlong* dy_elems = env->GetLongArrayElements(dy_handles_and_indices, nullptr);
+ for (int i = 0, j = nx; i < nx; ++i, ++j) {
+ TF_Output dy_output = dy.get()[i];
+ dy_elems[i] = reinterpret_cast<jlong>(dy_output.oper);
+ dy_elems[j] = static_cast<jlong>(dy_output.index);
+ }
+ env->ReleaseLongArrayElements(dy_handles_and_indices, dy_elems, 0);
+
+ return dy_handles_and_indices;
+}
diff --git a/tensorflow/java/src/main/native/graph_jni.h b/tensorflow/java/src/main/native/graph_jni.h
index dd2e038332..4f87e8d5a7 100644
--- a/tensorflow/java/src/main/native/graph_jni.h
+++ b/tensorflow/java/src/main/native/graph_jni.h
@@ -73,6 +73,15 @@ JNIEXPORT jbyteArray JNICALL Java_org_tensorflow_Graph_toGraphDef(JNIEnv *,
jclass,
jlong);
+/*
+ * Class: org_tensorflow_Graph
+ * Method: name
+ * Signature: (J[J[I[J[I[J[I)[J
+ */
+JNIEXPORT jlongArray JNICALL Java_org_tensorflow_Graph_addGradients(JNIEnv *,
+ jclass, jlong, jlongArray, jintArray, jlongArray, jintArray, jlongArray,
+ jintArray);
+
#ifdef __cplusplus
} // extern "C"
#endif // __cplusplus
diff --git a/tensorflow/java/src/main/native/saved_model_bundle_jni.cc b/tensorflow/java/src/main/native/saved_model_bundle_jni.cc
index de6382a79c..68999fb2da 100644
--- a/tensorflow/java/src/main/native/saved_model_bundle_jni.cc
+++ b/tensorflow/java/src/main/native/saved_model_bundle_jni.cc
@@ -22,12 +22,25 @@ limitations under the License.
JNIEXPORT jobject JNICALL Java_org_tensorflow_SavedModelBundle_load(
JNIEnv* env, jclass clazz, jstring export_dir, jobjectArray tags,
- jbyteArray run_options) {
+ jbyteArray config, jbyteArray run_options) {
TF_Status* status = TF_NewStatus();
jobject bundle = nullptr;
// allocate parameters for TF_LoadSessionFromSavedModel
TF_SessionOptions* opts = TF_NewSessionOptions();
+ if (config != nullptr) {
+ size_t sz = env->GetArrayLength(config);
+ if (sz > 0) {
+ jbyte* config_data = env->GetByteArrayElements(config, nullptr);
+ TF_SetConfig(opts, static_cast<void*>(config_data), sz, status);
+ env->ReleaseByteArrayElements(config, config_data, JNI_ABORT);
+ if (!throwExceptionIfNotOK(env, status)) {
+ TF_DeleteSessionOptions(opts);
+ TF_DeleteStatus(status);
+ return nullptr;
+ }
+ }
+ }
TF_Buffer* crun_options = nullptr;
if (run_options != nullptr) {
size_t sz = env->GetArrayLength(run_options);
diff --git a/tensorflow/java/src/main/native/saved_model_bundle_jni.h b/tensorflow/java/src/main/native/saved_model_bundle_jni.h
index 6cce6a81bd..a4b05d0409 100644
--- a/tensorflow/java/src/main/native/saved_model_bundle_jni.h
+++ b/tensorflow/java/src/main/native/saved_model_bundle_jni.h
@@ -26,10 +26,10 @@ extern "C" {
* Class: org_tensorflow_SavedModelBundle
* Method: load
* Signature:
- * (Ljava/lang/String;[Ljava/lang/String;[B)Lorg/tensorflow/SavedModelBundle;
+ * (Ljava/lang/String;[Ljava/lang/String;[B;[B)Lorg/tensorflow/SavedModelBundle;
*/
JNIEXPORT jobject JNICALL Java_org_tensorflow_SavedModelBundle_load(
- JNIEnv *, jclass, jstring, jobjectArray, jbyteArray);
+ JNIEnv *, jclass, jstring, jobjectArray, jbyteArray, jbyteArray);
#ifdef __cplusplus
} // extern "C"
diff --git a/tensorflow/java/src/main/native/session_jni.cc b/tensorflow/java/src/main/native/session_jni.cc
index 2cd542d3c9..8b11525785 100644
--- a/tensorflow/java/src/main/native/session_jni.cc
+++ b/tensorflow/java/src/main/native/session_jni.cc
@@ -17,6 +17,7 @@ limitations under the License.
#include <memory>
#include "tensorflow/c/c_api.h"
+#include "tensorflow/java/src/main/native/utils_jni.h"
#include "tensorflow/java/src/main/native/exception_jni.h"
#include "tensorflow/java/src/main/native/session_jni.h"
@@ -55,37 +56,6 @@ void resolveHandles(JNIEnv* env, const char* type, jlongArray src_array,
env->ReleaseLongArrayElements(src_array, src_start, JNI_ABORT);
}
-void resolveOutputs(JNIEnv* env, const char* type, jlongArray src_op,
- jintArray src_index, TF_Output* dst, jint n) {
- if (env->ExceptionCheck()) return;
- jint len = env->GetArrayLength(src_op);
- if (len != n) {
- throwException(env, kIllegalArgumentException,
- "expected %d, got %d %s Operations", n, len, type);
- return;
- }
- len = env->GetArrayLength(src_index);
- if (len != n) {
- throwException(env, kIllegalArgumentException,
- "expected %d, got %d %s Operation output indices", n, len,
- type);
- return;
- }
- jlong* op_handles = env->GetLongArrayElements(src_op, nullptr);
- jint* indices = env->GetIntArrayElements(src_index, nullptr);
- for (int i = 0; i < n; ++i) {
- if (op_handles[i] == 0) {
- throwException(env, kNullPointerException, "invalid %s (#%d of %d)", type,
- i, n);
- break;
- }
- dst[i] = TF_Output{reinterpret_cast<TF_Operation*>(op_handles[i]),
- static_cast<int>(indices[i])};
- }
- env->ReleaseIntArrayElements(src_index, indices, JNI_ABORT);
- env->ReleaseLongArrayElements(src_op, op_handles, JNI_ABORT);
-}
-
void TF_MaybeDeleteBuffer(TF_Buffer* buf) {
if (buf == nullptr) return;
TF_DeleteBuffer(buf);
@@ -116,20 +86,22 @@ JNIEXPORT jlong JNICALL Java_org_tensorflow_Session_allocate2(
TF_Graph* graph = reinterpret_cast<TF_Graph*>(graph_handle);
TF_Status* status = TF_NewStatus();
TF_SessionOptions* opts = TF_NewSessionOptions();
- const char* ctarget = nullptr;
jbyte* cconfig = nullptr;
- if (target != nullptr) {
- ctarget = env->GetStringUTFChars(target, nullptr);
- }
if (config != nullptr) {
cconfig = env->GetByteArrayElements(config, nullptr);
TF_SetConfig(opts, cconfig,
static_cast<size_t>(env->GetArrayLength(config)), status);
if (!throwExceptionIfNotOK(env, status)) {
env->ReleaseByteArrayElements(config, cconfig, JNI_ABORT);
+ TF_DeleteSessionOptions(opts);
+ TF_DeleteStatus(status);
return 0;
}
}
+ const char* ctarget = nullptr;
+ if (target != nullptr) {
+ ctarget = env->GetStringUTFChars(target, nullptr);
+ }
TF_Session* session = TF_NewSession(graph, opts, status);
if (config != nullptr) {
env->ReleaseByteArrayElements(config, cconfig, JNI_ABORT);
diff --git a/tensorflow/java/src/main/native/utils_jni.cc b/tensorflow/java/src/main/native/utils_jni.cc
new file mode 100644
index 0000000000..069ac05a1c
--- /dev/null
+++ b/tensorflow/java/src/main/native/utils_jni.cc
@@ -0,0 +1,53 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#include "tensorflow/java/src/main/native/utils_jni.h"
+
+#include "tensorflow/java/src/main/native/exception_jni.h"
+
+void resolveOutputs(JNIEnv* env, const char* type, jlongArray src_op,
+ jintArray src_index, TF_Output* dst, jint n) {
+ if (env->ExceptionCheck()) return;
+ jint len = env->GetArrayLength(src_op);
+ if (len != n) {
+ throwException(env, kIllegalArgumentException,
+ "expected %d, got %d %s Operations", n, len, type);
+ return;
+ }
+ len = env->GetArrayLength(src_index);
+ if (len != n) {
+ throwException(env, kIllegalArgumentException,
+ "expected %d, got %d %s Operation output indices", n, len,
+ type);
+ return;
+ }
+ jlong* op_handles = env->GetLongArrayElements(src_op, nullptr);
+ jint* indices = env->GetIntArrayElements(src_index, nullptr);
+ for (int i = 0; i < n; ++i) {
+ if (op_handles[i] == 0) {
+ throwException(env, kNullPointerException, "invalid %s (#%d of %d)", type,
+ i, n);
+ break;
+ }
+ dst[i] = TF_Output{reinterpret_cast<TF_Operation*>(op_handles[i]),
+ static_cast<int>(indices[i])};
+ }
+ env->ReleaseIntArrayElements(src_index, indices, JNI_ABORT);
+ env->ReleaseLongArrayElements(src_op, op_handles, JNI_ABORT);
+}
+
+
+
+
diff --git a/tensorflow/java/src/main/native/utils_jni.h b/tensorflow/java/src/main/native/utils_jni.h
new file mode 100644
index 0000000000..352298e7de
--- /dev/null
+++ b/tensorflow/java/src/main/native/utils_jni.h
@@ -0,0 +1,33 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#ifndef TENSORFLOW_JAVA_UTILS_JNI_H_
+#define TENSORFLOW_JAVA_UTILS_JNI_H_
+
+#include <jni.h>
+
+#include "tensorflow/c/c_api.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif // __cplusplus
+
+void resolveOutputs(JNIEnv* env, const char* type, jlongArray src_op,
+ jintArray src_index, TF_Output* dst, jint n);
+
+#ifdef __cplusplus
+} // extern "C"
+#endif // __cplusplus
+#endif /* TENSORFLOW_JAVA_UTILS_JNI_H_ */
diff --git a/tensorflow/java/src/test/java/org/tensorflow/GraphTest.java b/tensorflow/java/src/test/java/org/tensorflow/GraphTest.java
index c540299bdc..c2e52c22c6 100644
--- a/tensorflow/java/src/test/java/org/tensorflow/GraphTest.java
+++ b/tensorflow/java/src/test/java/org/tensorflow/GraphTest.java
@@ -22,6 +22,7 @@ import static org.junit.Assert.assertTrue;
import java.util.HashSet;
import java.util.Iterator;
+
import org.junit.Test;
import org.junit.runner.RunWith;
import org.junit.runners.JUnit4;
@@ -129,4 +130,106 @@ public class GraphTest {
// expected exception.
}
}
+
+ @Test
+ public void addGradientsToGraph() {
+ try (Graph g = new Graph();
+ Session s = new Session(g)) {
+
+ Output<Float> x1 = TestUtil.placeholder(g, "x1", Float.class);
+ Output<Float> x2 = TestUtil.placeholder(g, "x2", Float.class);
+ Output<Float> y0 = TestUtil.square(g, "y0", x1);
+ Output<Float> y1 = TestUtil.square(g, "y1", y0);
+ Output<Float> y2 = TestUtil.addN(g, y0, x2);
+
+ Output<?>[] grads0 = g.addGradients(y1, toArray(x1));
+ assertNotNull(grads0);
+ assertEquals(1, grads0.length);
+ assertEquals(DataType.FLOAT, grads0[0].dataType());
+
+ Output<?>[] grads1 = g.addGradients(y2, toArray(x1, x2));
+ assertNotNull(grads1);
+ assertEquals(2, grads1.length);
+ assertEquals(DataType.FLOAT, grads1[0].dataType());
+ assertEquals(DataType.FLOAT, grads1[1].dataType());
+
+ try (Tensor<Float> c1 = Tensors.create(3.0f);
+ Tensor<Float> c2 = Tensors.create(2.0f);
+ TestUtil.AutoCloseableList<Tensor<?>> outputs = new TestUtil.AutoCloseableList<>(
+ s.runner()
+ .feed(x1, c1)
+ .feed(x2, c2)
+ .fetch(grads0[0])
+ .fetch(grads1[0])
+ .fetch(grads1[1])
+ .run())) {
+
+ assertEquals(3, outputs.size());
+ assertEquals(108.0f, outputs.get(0).floatValue(), 0.0f);
+ assertEquals(6.0f, outputs.get(1).floatValue(), 0.0f);
+ assertEquals(1.0f, outputs.get(2).floatValue(), 0.0f);
+ }
+ }
+ }
+
+ @Test
+ public void addGradientSumsToGraph() {
+ try (Graph g = new Graph();
+ Session s = new Session(g)) {
+
+ Output<Float> x = TestUtil.placeholder(g, "x", Float.class);
+ Output<Float> y0 = TestUtil.square(g, "y0", x);
+ Output<Float> y1 = TestUtil.square(g, "y1", y0);
+
+ Output<?>[] grad = g.addGradients(toArray(y0, y1), toArray(x), null);
+ assertNotNull(grad);
+ assertEquals(1, grad.length);
+ assertEquals(DataType.FLOAT, grad[0].dataType());
+
+ try (Tensor<Float> c = Tensors.create(3.0f);
+ Tensor<?> output = s.runner()
+ .feed(x, c)
+ .fetch(grad[0])
+ .run()
+ .get(0)) {
+
+ assertEquals(114.0f, output.floatValue(), 0.0f);
+ }
+ }
+ }
+
+ @Test
+ public void addGradientsWithInitialValuesToGraph() {
+ try (Graph g = new Graph();
+ Session s = new Session(g)) {
+
+ Output<Float> x = TestUtil.placeholder(g, "x", Float.class);
+ Output<Float> y0 = TestUtil.square(g, "y0", x);
+ Output<Float> y1 = TestUtil.square(g, "y1", y0);
+
+ Output<?>[] grad0 = g.addGradients(y1, toArray(y0));
+ assertNotNull(grad0);
+ assertEquals(1, grad0.length);
+ assertEquals(DataType.FLOAT, grad0[0].dataType());
+
+ Output<?>[] grad1 = g.addGradients(toArray(y0), toArray(x), toArray(grad0[0]));
+ assertNotNull(grad1);
+ assertEquals(1, grad1.length);
+ assertEquals(DataType.FLOAT, grad1[0].dataType());
+
+ try (Tensor<Float> c = Tensors.create(3.0f);
+ Tensor<?> output = s.runner()
+ .feed(x, c)
+ .fetch(grad1[0])
+ .run()
+ .get(0)) {
+
+ assertEquals(108.0f, output.floatValue(), 0.0f);
+ }
+ }
+ }
+
+ private static Output<?>[] toArray(Output<?>... outputs) {
+ return outputs;
+ }
}
diff --git a/tensorflow/java/src/test/java/org/tensorflow/SavedModelBundleTest.java b/tensorflow/java/src/test/java/org/tensorflow/SavedModelBundleTest.java
index 7922f3329c..7d936867a7 100644
--- a/tensorflow/java/src/test/java/org/tensorflow/SavedModelBundleTest.java
+++ b/tensorflow/java/src/test/java/org/tensorflow/SavedModelBundleTest.java
@@ -47,7 +47,61 @@ public class SavedModelBundleTest {
fail("not expected");
} catch (org.tensorflow.TensorFlowException e) {
// expected exception
- assertTrue(e.getMessage().contains("SavedModel not found"));
+ assertTrue(e.getMessage().contains("Could not find SavedModel"));
}
}
+
+ @Test
+ public void loader() {
+ try (SavedModelBundle bundle = SavedModelBundle.loader(SAVED_MODEL_PATH)
+ .withTags("serve")
+ .withConfigProto(sillyConfigProto())
+ .withRunOptions(sillyRunOptions())
+ .load()) {
+ assertNotNull(bundle.session());
+ assertNotNull(bundle.graph());
+ assertNotNull(bundle.metaGraphDef());
+ }
+ }
+
+ private static byte[] sillyRunOptions() {
+ // Ideally this would use the generated Java sources for protocol buffers
+ // and end up with something like the snippet below. However, generating
+ // the Java files for the .proto files in tensorflow/core:protos_all is
+ // a bit cumbersome in bazel until the proto_library rule is setup.
+ //
+ // See https://github.com/bazelbuild/bazel/issues/52#issuecomment-194341866
+ // https://github.com/bazelbuild/rules_go/pull/121#issuecomment-251515362
+ // https://github.com/bazelbuild/rules_go/pull/121#issuecomment-251692558
+ //
+ // For this test, for now, the use of specific bytes suffices.
+ return new byte[] {0x08, 0x03};
+ /*
+ return org.tensorflow.framework.RunOptions.newBuilder()
+ .setTraceLevel(RunOptions.TraceLevel.FULL_TRACE)
+ .build()
+ .toByteArray();
+ */
+ }
+
+ public static byte[] sillyConfigProto() {
+ // Ideally this would use the generated Java sources for protocol buffers
+ // and end up with something like the snippet below. However, generating
+ // the Java files for the .proto files in tensorflow/core:protos_all is
+ // a bit cumbersome in bazel until the proto_library rule is setup.
+ //
+ // See https://github.com/bazelbuild/bazel/issues/52#issuecomment-194341866
+ // https://github.com/bazelbuild/rules_go/pull/121#issuecomment-251515362
+ // https://github.com/bazelbuild/rules_go/pull/121#issuecomment-251692558
+ //
+ // For this test, for now, the use of specific bytes suffices.
+ return new byte[] {0x10, 0x01, 0x28, 0x01};
+ /*
+ return org.tensorflow.framework.ConfigProto.newBuilder()
+ .setInterOpParallelismThreads(1)
+ .setIntraOpParallelismThreads(1)
+ .build()
+ .toByteArray();
+ */
+ }
}
diff --git a/tensorflow/java/src/test/java/org/tensorflow/SessionTest.java b/tensorflow/java/src/test/java/org/tensorflow/SessionTest.java
index e8cc76c2a6..7d5980bcde 100644
--- a/tensorflow/java/src/test/java/org/tensorflow/SessionTest.java
+++ b/tensorflow/java/src/test/java/org/tensorflow/SessionTest.java
@@ -20,8 +20,6 @@ import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
-import java.util.ArrayList;
-import java.util.Collection;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.junit.runners.JUnit4;
@@ -36,8 +34,8 @@ public class SessionTest {
Session s = new Session(g)) {
TestUtil.transpose_A_times_X(g, new int[][] {{2}, {3}});
try (Tensor<Integer> x = Tensors.create(new int[][] {{5}, {7}});
- AutoCloseableList<Tensor<?>> outputs =
- new AutoCloseableList<Tensor<?>>(s.runner().feed("X", x).fetch("Y").run())) {
+ TestUtil.AutoCloseableList<Tensor<?>> outputs =
+ new TestUtil.AutoCloseableList<Tensor<?>>(s.runner().feed("X", x).fetch("Y").run())) {
assertEquals(1, outputs.size());
final int[][] expected = {{31}};
assertArrayEquals(expected, outputs.get(0).copyTo(new int[1][1]));
@@ -53,8 +51,8 @@ public class SessionTest {
Output<Integer> feed = g.operation("X").output(0);
Output<Integer> fetch = g.operation("Y").output(0);
try (Tensor<Integer> x = Tensors.create(new int[][] {{5}, {7}});
- AutoCloseableList<Tensor<?>> outputs =
- new AutoCloseableList<Tensor<?>>(s.runner().feed(feed, x).fetch(fetch).run())) {
+ TestUtil.AutoCloseableList<Tensor<?>> outputs =
+ new TestUtil.AutoCloseableList<Tensor<?>>(s.runner().feed(feed, x).fetch(fetch).run())) {
assertEquals(1, outputs.size());
final int[][] expected = {{31}};
assertArrayEquals(expected, outputs.get(0).copyTo(new int[1][1]));
@@ -112,7 +110,7 @@ public class SessionTest {
.setOptions(fullTraceRunOptions())
.runAndFetchMetadata();
// Sanity check on outputs.
- AutoCloseableList<Tensor<?>> outputs = new AutoCloseableList<Tensor<?>>(result.outputs);
+ TestUtil.AutoCloseableList<Tensor<?>> outputs = new TestUtil.AutoCloseableList<Tensor<?>>(result.outputs);
assertEquals(1, outputs.size());
final int[][] expected = {{31}};
assertArrayEquals(expected, outputs.get(0).copyTo(new int[1][1]));
@@ -135,8 +133,8 @@ public class SessionTest {
Session s = new Session(g)) {
TestUtil.constant(g, "c1", 2718);
TestUtil.constant(g, "c2", 31415);
- AutoCloseableList<Tensor<?>> outputs =
- new AutoCloseableList<Tensor<?>>(s.runner().fetch("c2").fetch("c1").run());
+ TestUtil.AutoCloseableList<Tensor<?>> outputs =
+ new TestUtil.AutoCloseableList<Tensor<?>>(s.runner().fetch("c2").fetch("c1").run());
assertEquals(2, outputs.size());
assertEquals(31415, outputs.get(0).intValue());
assertEquals(2718, outputs.get(1).intValue());
@@ -164,28 +162,6 @@ public class SessionTest {
Session s = new Session(g, singleThreadConfigProto())) {}
}
- private static final class AutoCloseableList<E extends AutoCloseable> extends ArrayList<E>
- implements AutoCloseable {
- AutoCloseableList(Collection<? extends E> c) {
- super(c);
- }
-
- @Override
- public void close() {
- Exception toThrow = null;
- for (AutoCloseable c : this) {
- try {
- c.close();
- } catch (Exception e) {
- toThrow = e;
- }
- }
- if (toThrow != null) {
- throw new RuntimeException(toThrow);
- }
- }
- }
-
private static byte[] fullTraceRunOptions() {
// Ideally this would use the generated Java sources for protocol buffers
// and end up with something like the snippet below. However, generating
diff --git a/tensorflow/java/src/test/java/org/tensorflow/TestUtil.java b/tensorflow/java/src/test/java/org/tensorflow/TestUtil.java
index c973b5a3d8..4e84886416 100644
--- a/tensorflow/java/src/test/java/org/tensorflow/TestUtil.java
+++ b/tensorflow/java/src/test/java/org/tensorflow/TestUtil.java
@@ -16,9 +16,34 @@ limitations under the License.
package org.tensorflow;
import java.lang.reflect.Array;
+import java.util.ArrayList;
+import java.util.Collection;
/** Static utility functions. */
public class TestUtil {
+
+ public static final class AutoCloseableList<E extends AutoCloseable> extends ArrayList<E>
+ implements AutoCloseable {
+ AutoCloseableList(Collection<? extends E> c) {
+ super(c);
+ }
+
+ @Override
+ public void close() {
+ Exception toThrow = null;
+ for (AutoCloseable c : this) {
+ try {
+ c.close();
+ } catch (Exception e) {
+ toThrow = e;
+ }
+ }
+ if (toThrow != null) {
+ throw new RuntimeException(toThrow);
+ }
+ }
+ }
+
public static <T> Output<T> constant(Graph g, String name, Object value) {
try (Tensor<?> t = Tensor.create(value)) {
return g.opBuilder("Const", name)
@@ -36,7 +61,7 @@ public class TestUtil {
.<T>output(0);
}
- public static Output<?> addN(Graph g, Output<?>... inputs) {
+ public static <T> Output<T> addN(Graph g, Output<?>... inputs) {
return g.opBuilder("AddN", "AddN").addInputList(inputs).build().output(0);
}
@@ -58,6 +83,13 @@ public class TestUtil {
.setAttr("num_split", numSplit)
.build();
}
+
+ public static <T> Output<T> square(Graph g, String name, Output<T> value) {
+ return g.opBuilder("Square", name)
+ .addInput(value)
+ .build()
+ .<T>output(0);
+ }
public static void transpose_A_times_X(Graph g, int[][] a) {
Output<Integer> aa = constant(g, "A", a);
diff --git a/tensorflow/python/BUILD b/tensorflow/python/BUILD
index 5d9a5130a0..51e6d5aabf 100644
--- a/tensorflow/python/BUILD
+++ b/tensorflow/python/BUILD
@@ -73,7 +73,7 @@ py_library(
visibility = [
"//tensorflow:__pkg__",
"//tensorflow/python/tools:__pkg__",
- "//tensorflow/tools/api/generator:__pkg__",
+ "//tensorflow/python/tools/api/generator:__pkg__",
],
deps = [
":array_ops",
@@ -127,12 +127,14 @@ py_library(
":util",
":weights_broadcast_ops",
"//tensorflow/core:protos_all_py",
+ "//tensorflow/python/compat",
"//tensorflow/python/data",
"//tensorflow/python/feature_column:feature_column_py",
"//tensorflow/python/keras",
"//tensorflow/python/ops/distributions",
"//tensorflow/python/ops/linalg",
"//tensorflow/python/ops/losses",
+ "//tensorflow/python/ops/parallel_for",
"//tensorflow/python/profiler",
"//tensorflow/python/saved_model",
"//third_party/py/numpy",
@@ -279,6 +281,9 @@ cc_library(
name = "ndarray_tensor_bridge",
srcs = ["lib/core/ndarray_tensor_bridge.cc"],
hdrs = ["lib/core/ndarray_tensor_bridge.h"],
+ visibility = visibility + [
+ "//learning/deepmind/courier:__subpackages__",
+ ],
deps = [
":bfloat16_lib",
":numpy_lib",
@@ -695,6 +700,17 @@ py_library(
)
py_library(
+ name = "error_interpolation",
+ srcs = [
+ "framework/error_interpolation.py",
+ ],
+ srcs_version = "PY2AND3",
+ deps = [
+ ":util",
+ ],
+)
+
+py_library(
name = "function",
srcs = ["framework/function.py"],
srcs_version = "PY2AND3",
@@ -808,6 +824,7 @@ py_library(
":platform",
":registry",
":tensor_shape",
+ ":traceable_stack",
":util",
":versions",
"//tensorflow/core:protos_all_py",
@@ -873,6 +890,17 @@ py_library(
],
)
+# This target is maintained separately from :util to provide separate visibility
+# for legacy users who were granted visibility when the functions were private
+# members of ops.Graph.
+py_library(
+ name = "tf_stack",
+ srcs = ["util/tf_stack.py"],
+ srcs_version = "PY2AND3",
+ visibility = ["//visibility:public"],
+ deps = [],
+)
+
py_library(
name = "tensor_shape",
srcs = ["framework/tensor_shape.py"],
@@ -908,6 +936,16 @@ py_library(
)
py_library(
+ name = "traceable_stack",
+ srcs = ["framework/traceable_stack.py"],
+ srcs_version = "PY2AND3",
+ visibility = ["//visibility:public"],
+ deps = [
+ ":util",
+ ],
+)
+
+py_library(
name = "versions",
srcs = ["framework/versions.py"],
srcs_version = "PY2AND3",
@@ -997,6 +1035,19 @@ py_test(
)
py_test(
+ name = "framework_error_interpolation_test",
+ size = "small",
+ srcs = ["framework/error_interpolation_test.py"],
+ main = "framework/error_interpolation_test.py",
+ srcs_version = "PY2AND3",
+ deps = [
+ ":client_testlib",
+ ":constant_op",
+ ":error_interpolation",
+ ],
+)
+
+py_test(
name = "framework_subscribe_test",
size = "small",
srcs = ["framework/subscribe_test.py"],
@@ -1181,6 +1232,21 @@ py_test(
],
)
+py_test(
+ name = "framework_traceable_stack_test",
+ size = "small",
+ srcs = ["framework/traceable_stack_test.py"],
+ main = "framework/traceable_stack_test.py",
+ srcs_version = "PY2AND3",
+ deps = [
+ ":framework_test_lib",
+ ":platform_test",
+ ":test_ops",
+ ":traceable_stack",
+ ":util",
+ ],
+)
+
tf_gen_op_wrapper_py(
name = "test_ops",
out = "framework/test_ops.py",
@@ -1961,6 +2027,8 @@ py_library(
":math_ops",
":platform",
":resource_variable_ops",
+ ":sparse_ops",
+ ":tensor_shape",
":variables",
],
)
@@ -3269,6 +3337,9 @@ py_library(
],
),
srcs_version = "PY2AND3",
+ visibility = visibility + [
+ "//tensorflow:__pkg__",
+ ],
deps = [
"//third_party/py/numpy",
"@org_python_pypi_backports_weakref",
@@ -3725,6 +3796,7 @@ py_library(
srcs_version = "PY2AND3",
deps = [
":c_api_util",
+ ":error_interpolation",
":errors",
":framework",
":framework_for_generated_wrappers",
@@ -3925,7 +3997,7 @@ tf_cuda_library(
tf_py_test(
name = "session_test",
- size = "small",
+ size = "medium",
srcs = ["client/session_test.py"],
additional_deps = [
":array_ops",
@@ -4050,6 +4122,7 @@ cuda_py_test(
":math_ops",
"//tensorflow/core:protos_all_py",
],
+ tags = ["no_windows_gpu"],
)
py_test(
diff --git a/tensorflow/python/client/session.py b/tensorflow/python/client/session.py
index 35aa37ac6d..e037925961 100644
--- a/tensorflow/python/client/session.py
+++ b/tensorflow/python/client/session.py
@@ -361,7 +361,7 @@ class _ListFetchMapper(_FetchMapper):
for m, vi in zip(self._mappers, self._value_indices):
results.append(m.build_results([values[j] for j in vi]))
# Return a value of the original type of the fetches.
- if self._fetch_type == list:
+ if issubclass(self._fetch_type, list):
return results
elif self._fetch_type == tuple:
return tuple(results)
@@ -1291,7 +1291,7 @@ class BaseSession(SessionInterface):
raise type(e)(node_def, op, message)
def _extend_graph(self):
- with self._graph._lock: # pylint: disable=protected-access
+ with self._graph._session_run_lock(): # pylint: disable=protected-access
tf_session.ExtendSession(self._session)
# The threshold to run garbage collection to delete dead tensors.
diff --git a/tensorflow/python/client/session_test.py b/tensorflow/python/client/session_test.py
index e49d067105..b72e029d1c 100644
--- a/tensorflow/python/client/session_test.py
+++ b/tensorflow/python/client/session_test.py
@@ -18,6 +18,7 @@ from __future__ import division
from __future__ import print_function
import collections
+import random
import os
import sys
import threading
@@ -1040,40 +1041,72 @@ class SessionTest(test_util.TensorFlowTestCase):
for t in threads:
t.join()
- def testParallelRunAndBuild(self):
+ @staticmethod
+ def _build_graph():
+ time.sleep(random.random() * 0.1)
+ # Do some graph construction. Try to exercise non-trivial paths.
+ graph = ops.get_default_graph()
+ gdef = None
+ for _ in range(10):
+ x = array_ops.placeholder(dtype=dtypes.float32)
+ with ops.colocate_with(x):
+ y = array_ops.placeholder(dtype=dtypes.float32)
+ with ops.device('/cpu:0'):
+ z = control_flow_ops.while_loop(
+ lambda x, y: x < 10, lambda x, y: (x + 1, x * y), [x, y])
+ with graph._attr_scope({'_a': attr_value_pb2.AttrValue(b=False)}):
+ gradients_impl.gradients(z, [x, y])
+ if gdef is None:
+ gdef = graph.as_graph_def()
+ else:
+ importer.import_graph_def(gdef, name='import')
+
+ def testParallelRunAndSingleBuild(self):
with session.Session() as sess:
c = constant_op.constant(5.0)
stop = threading.Event()
def run_loop():
while not stop.is_set():
+ time.sleep(random.random() * 0.1)
self.assertEqual(sess.run(c), 5.0)
- threads = [self.checkedThread(target=run_loop) for _ in range(100)]
+ threads = [self.checkedThread(target=run_loop) for _ in range(10)]
for t in threads:
t.start()
- # Do some graph construction. Try to exercise non-trivial paths.
- graph = ops.get_default_graph()
- gdef = None
- for _ in range(10):
- x = array_ops.placeholder(dtype=dtypes.float32)
- with ops.colocate_with(x):
- y = array_ops.placeholder(dtype=dtypes.float32)
- with ops.device('/cpu:0'):
- z = control_flow_ops.while_loop(
- lambda x, y: x < 10, lambda x, y: (x + 1, x * y), [x, y])
- with graph._attr_scope({'_a': attr_value_pb2.AttrValue(b=False)}):
- gradients_impl.gradients(z, [x, y])
- if gdef is None:
- gdef = graph.as_graph_def()
- else:
- importer.import_graph_def(gdef, name='import')
+ SessionTest._build_graph()
stop.set()
for t in threads:
t.join()
+ def testParallelRunAndParallelBuild(self):
+ with session.Session() as sess:
+ c = constant_op.constant(5.0)
+ stop = threading.Event()
+
+ def run_loop():
+ while not stop.is_set():
+ time.sleep(random.random() * 0.1)
+ self.assertEqual(sess.run(c), 5.0)
+
+ run_threads = [self.checkedThread(target=run_loop) for _ in range(10)]
+ for t in run_threads:
+ t.start()
+
+ build_threads = [self.checkedThread(target=SessionTest._build_graph)
+ for _ in range(10)]
+ for t in build_threads:
+ t.start()
+ for t in build_threads:
+ t.join()
+
+ # Let the run_threads run until the build threads are finished.
+ stop.set()
+ for t in run_threads:
+ t.join()
+
def testRunFeedDict(self):
with session.Session() as s:
x = array_ops.zeros([2])
diff --git a/tensorflow/python/compat/BUILD b/tensorflow/python/compat/BUILD
new file mode 100644
index 0000000000..58ceafca06
--- /dev/null
+++ b/tensorflow/python/compat/BUILD
@@ -0,0 +1,22 @@
+licenses(["notice"]) # Apache 2.0
+
+exports_files(["LICENSE"])
+
+load("//tensorflow:tensorflow.bzl", "tf_py_test")
+
+py_library(
+ name = "compat",
+ srcs = ["compat.py"],
+ srcs_version = "PY2AND3",
+ visibility = ["//tensorflow:internal"],
+)
+
+tf_py_test(
+ name = "compat_test",
+ size = "small",
+ srcs = ["compat_test.py"],
+ additional_deps = [
+ ":compat",
+ "//tensorflow/python:client_testlib",
+ ],
+)
diff --git a/tensorflow/python/compat/compat.py b/tensorflow/python/compat/compat.py
new file mode 100644
index 0000000000..68a6421c2c
--- /dev/null
+++ b/tensorflow/python/compat/compat.py
@@ -0,0 +1,125 @@
+# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""Utilities for API compatibility between TensorFlow release versions.
+
+See
+@{$guide/version_compat#backward_and_partial_forward_compatibility}
+"""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import datetime
+from tensorflow.python.util import tf_contextlib
+
+_FORWARD_COMPATIBILITY_HORIZON = datetime.date(2018, 8, 1)
+
+
+def forward_compatible(year, month, day):
+ """Return true if the forward compatibility window has expired.
+
+ Forward-compatibility refers to scenarios where the producer of a TensorFlow
+ model (a GraphDef or SavedModel) is compiled against a version of the
+ TensorFlow library newer than what the consumer was compiled against. The
+ "producer" is typically a Python program that constructs and trains a model
+ while the "consumer" is typically another program that loads and serves the
+ model.
+
+ TensorFlow has been supporting a 3 week forward-compatibility window for
+ programs compiled from source at HEAD.
+
+ For example, consider the case where a new operation `MyNewAwesomeAdd` is
+ created with the intent of replacing the implementation of an existing Python
+ wrapper - `tf.add`. The Python wrapper implementation should change from
+ something like:
+
+ ```python
+ def add(inputs, name=None):
+ return gen_math_ops.add(inputs, name)
+ ```
+
+ to:
+
+ ```python
+ from tensorflow.python.compat import compat
+
+ def add(inputs, name=None):
+ if compat.forward_compatible(year, month, day):
+ # Can use the awesome new implementation.
+ return gen_math_ops.my_new_awesome_add(inputs, name)
+ # To maintain forward compatibiltiy, use the old implementation.
+ return gen_math_ops.add(inputs, name)
+ ```
+
+ Where `year`, `month`, and `day` specify the date beyond which binaries
+ that consume a model are expected to have been updated to include the
+ new operations. This date is typically at least 3 weeks beyond the date
+ the code that adds the new operation is committed.
+
+ Args:
+ year: A year (e.g., 2018).
+ month: A month (1 <= month <= 12) in year.
+ day: A day (1 <= day <= 31, or 30, or 29, or 28) in month.
+
+ Returns:
+ True if the caller can expect that serialized TensorFlow graphs produced
+ can be consumed by programs that are compiled with the TensorFlow library
+ source code after (year, month, day).
+ """
+ return _FORWARD_COMPATIBILITY_HORIZON > datetime.date(year, month, day)
+
+
+@tf_contextlib.contextmanager
+def forward_compatibility_horizon(year, month, day):
+ """Context manager for testing forward compatibility of generated graphs.
+
+ To ensure forward compatibility of generated graphs (see `forward_compatible`)
+ with older binaries, new features can be gated with:
+
+ ```python
+ if compat.forward_compatible(year=2018, month=08, date=01):
+ generate_graph_with_new_features()
+ else:
+ generate_graph_so_older_binaries_can_consume_it()
+ ```
+
+ However, when adding new features, one may want to unittest it before
+ the forward compatibility window expires. This context manager enables
+ such tests. For example:
+
+ ```python
+ from tensorflow.python.compat import compat
+
+ def testMyNewFeature(self):
+ with compat.forward_compatibility_horizon(2018, 08, 02):
+ # Test that generate_graph_with_new_features() has an effect
+ ```
+
+ Args :
+ year: A year (e.g. 2018).
+ month: A month (1 <= month <= 12) in year.
+ day: A day (1 <= day <= 31, or 30, or 29, or 28) in month.
+
+ Yields:
+ Nothing.
+ """
+ global _FORWARD_COMPATIBILITY_HORIZON
+ try:
+ old_compat_date = _FORWARD_COMPATIBILITY_HORIZON
+ _FORWARD_COMPATIBILITY_HORIZON = datetime.date(year, month, day)
+ yield
+ finally:
+ _FORWARD_COMPATIBILITY_HORIZON = old_compat_date
diff --git a/tensorflow/python/compat/compat_test.py b/tensorflow/python/compat/compat_test.py
new file mode 100644
index 0000000000..946abbb300
--- /dev/null
+++ b/tensorflow/python/compat/compat_test.py
@@ -0,0 +1,70 @@
+# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""Tests for forward and backwards compatibility utilties."""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import datetime
+from tensorflow.python.compat import compat
+from tensorflow.python.platform import test
+
+
+class CompatTest(test.TestCase):
+
+ def _compatibility_date(self):
+ date = compat._FORWARD_COMPATIBILITY_HORIZON # pylint: disable=protected-access
+ return (date.year, date.month, date.day)
+
+ def _n_days_after(self, n):
+ date = compat._FORWARD_COMPATIBILITY_HORIZON + datetime.timedelta(days=n) # pylint: disable=protected-access
+ return (date.year, date.month, date.day)
+
+ def test_basic(self):
+ compatibility_date = self._compatibility_date()
+ one_day_before = self._n_days_after(-1)
+ self.assertTrue(compat.forward_compatible(*one_day_before))
+ self.assertFalse(compat.forward_compatible(*compatibility_date))
+
+ def test_decorator(self):
+ compatibility_date = self._compatibility_date()
+ one_day_after = self._n_days_after(1)
+ with compat.forward_compatibility_horizon(*one_day_after):
+ self.assertTrue(compat.forward_compatible(*compatibility_date))
+ self.assertFalse(compat.forward_compatible(*one_day_after))
+
+ # After exiting context manager, value should be reset.
+ self.assertFalse(compat.forward_compatible(*compatibility_date))
+
+ def test_decorator_with_failure(self):
+ compatibility_date = self._compatibility_date()
+ one_day_after = self._n_days_after(1)
+
+ class DummyError(Exception):
+ pass
+
+ try:
+ with compat.forward_compatibility_horizon(*one_day_after):
+ raise DummyError()
+ except DummyError:
+ pass # silence DummyError
+
+ # After exiting context manager, value should be reset.
+ self.assertFalse(compat.forward_compatible(*compatibility_date))
+
+
+if __name__ == '__main__':
+ test.main()
diff --git a/tensorflow/python/data/kernel_tests/BUILD b/tensorflow/python/data/kernel_tests/BUILD
index 3bde62fa1d..38505c0a01 100644
--- a/tensorflow/python/data/kernel_tests/BUILD
+++ b/tensorflow/python/data/kernel_tests/BUILD
@@ -349,6 +349,7 @@ tf_py_test(
"//tensorflow/python:sparse_tensor",
"//tensorflow/python:tensor_shape",
"//tensorflow/python:training",
+ "//tensorflow/python/compat:compat",
],
grpc_enabled = True,
)
diff --git a/tensorflow/python/data/kernel_tests/batch_dataset_op_test.py b/tensorflow/python/data/kernel_tests/batch_dataset_op_test.py
index 50bb0837b7..89de55dd4f 100644
--- a/tensorflow/python/data/kernel_tests/batch_dataset_op_test.py
+++ b/tensorflow/python/data/kernel_tests/batch_dataset_op_test.py
@@ -18,9 +18,12 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
+import time
+
from absl.testing import parameterized
import numpy as np
+from tensorflow.python.client import session
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
@@ -275,7 +278,7 @@ class PaddedBatchDatasetTest(test.TestCase, parameterized.TestCase):
result = sess.run(get_next)
padded_len = padded_shapes[0]
if padded_len is None or padded_len == -1:
- padded_len = np.max(result)
+ padded_len = np.max(result) if result.size > 0 else 0
self.assertEqual((batch_size, padded_len), result.shape)
for j in range(batch_size):
seq_len = seq_lens[(i * batch_size) + j]
@@ -285,7 +288,7 @@ class PaddedBatchDatasetTest(test.TestCase, parameterized.TestCase):
if not drop_remainder and len(seq_lens) % batch_size > 0:
result = sess.run(get_next)
- padded_len = np.max(result)
+ padded_len = np.max(result) if result.size > 0 else 0
self.assertEqual((len(seq_lens) % batch_size, padded_len),
result.shape)
for j in range(len(seq_lens) % batch_size):
@@ -461,5 +464,55 @@ class PaddedBatchDatasetTest(test.TestCase, parameterized.TestCase):
5, padded_shapes=shape_as_tensor)
+class BatchDatasetBenchmark(test.Benchmark):
+
+ def benchmarkBatchSparse(self):
+ non_zeros_per_row_values = [0, 1, 5, 10, 100]
+ batch_size_values = [1, 32, 64, 128, 1024]
+
+ sparse_placeholder = array_ops.sparse_placeholder(dtype=dtypes.int64)
+ batch_size_placeholder = array_ops.placeholder(dtype=dtypes.int64, shape=[])
+
+ dataset = dataset_ops.Dataset.from_tensors(sparse_placeholder).repeat(
+ ).batch(batch_size_placeholder)
+ iterator = dataset.make_initializable_iterator()
+ next_element = iterator.get_next()
+
+ for non_zeros_per_row in non_zeros_per_row_values:
+
+ sparse_value = sparse_tensor.SparseTensorValue(
+ indices=np.arange(non_zeros_per_row, dtype=np.int64)[:, np.newaxis],
+ values=np.arange(non_zeros_per_row, dtype=np.int64),
+ dense_shape=[1000])
+
+ for batch_size in batch_size_values:
+
+ with session.Session() as sess:
+ sess.run(iterator.initializer, feed_dict={
+ sparse_placeholder: sparse_value,
+ batch_size_placeholder: batch_size})
+ # Run five steps to warm up the session caches before taking the
+ # first measurement.
+ for _ in range(5):
+ sess.run(next_element.indices.op)
+ deltas = []
+ for _ in range(100):
+ start = time.time()
+ for _ in range(100):
+ sess.run(next_element.indices.op)
+ end = time.time()
+ deltas.append(end - start)
+
+ median_wall_time = np.median(deltas) / 100.0
+
+ print('Batch sparse dataset non-zeros per row: %d batch_size: %d '
+ 'wall time: %f'
+ % (non_zeros_per_row, batch_size, median_wall_time))
+ self.report_benchmark(
+ iters=10000, wall_time=median_wall_time,
+ name='benchmark_batch_sparse_dataset_nnz_%d_batch_size_%d' % (
+ non_zeros_per_row, batch_size))
+
+
if __name__ == '__main__':
test.main()
diff --git a/tensorflow/python/data/kernel_tests/iterator_ops_test.py b/tensorflow/python/data/kernel_tests/iterator_ops_test.py
index 820c167b6b..b434fa7334 100644
--- a/tensorflow/python/data/kernel_tests/iterator_ops_test.py
+++ b/tensorflow/python/data/kernel_tests/iterator_ops_test.py
@@ -25,6 +25,7 @@ import numpy as np
from tensorflow.core.protobuf import cluster_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session
+from tensorflow.python.compat import compat as forward_compat
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.ops import iterator_ops
from tensorflow.python.data.ops import readers
@@ -415,6 +416,69 @@ class IteratorTest(test.TestCase):
sess.run(
next_element, feed_dict={handle_placeholder: iterator_4_handle})
+ def testIteratorStringHandleFuture(self):
+ with forward_compat.forward_compatibility_horizon(2018, 8, 4):
+ dataset_3 = dataset_ops.Dataset.from_tensor_slices([1, 2, 3])
+ dataset_4 = dataset_ops.Dataset.from_tensor_slices([10, 20, 30, 40])
+
+ iterator_3 = dataset_3.make_one_shot_iterator()
+ iterator_4 = dataset_4.make_one_shot_iterator()
+
+ handle_placeholder = array_ops.placeholder(dtypes.string, shape=[])
+ feedable_iterator = iterator_ops.Iterator.from_string_handle(
+ handle_placeholder, dataset_3.output_types, dataset_3.output_shapes)
+ next_element = feedable_iterator.get_next()
+
+ self.assertEqual(dataset_3.output_types, feedable_iterator.output_types)
+ self.assertEqual(dataset_4.output_types, feedable_iterator.output_types)
+ self.assertEqual([], feedable_iterator.output_shapes)
+
+ with self.test_session() as sess:
+ iterator_3_handle = sess.run(iterator_3.string_handle())
+ iterator_4_handle = sess.run(iterator_4.string_handle())
+
+ self.assertEqual(
+ 10,
+ sess.run(
+ next_element,
+ feed_dict={handle_placeholder: iterator_4_handle}))
+ self.assertEqual(
+ 1,
+ sess.run(
+ next_element,
+ feed_dict={handle_placeholder: iterator_3_handle}))
+ self.assertEqual(
+ 20,
+ sess.run(
+ next_element,
+ feed_dict={handle_placeholder: iterator_4_handle}))
+ self.assertEqual(
+ 2,
+ sess.run(
+ next_element,
+ feed_dict={handle_placeholder: iterator_3_handle}))
+ self.assertEqual(
+ 30,
+ sess.run(
+ next_element,
+ feed_dict={handle_placeholder: iterator_4_handle}))
+ self.assertEqual(
+ 3,
+ sess.run(
+ next_element,
+ feed_dict={handle_placeholder: iterator_3_handle}))
+ self.assertEqual(
+ 40,
+ sess.run(
+ next_element,
+ feed_dict={handle_placeholder: iterator_4_handle}))
+ with self.assertRaises(errors.OutOfRangeError):
+ sess.run(
+ next_element, feed_dict={handle_placeholder: iterator_3_handle})
+ with self.assertRaises(errors.OutOfRangeError):
+ sess.run(
+ next_element, feed_dict={handle_placeholder: iterator_4_handle})
+
def testIteratorStringHandleReuseTensorObject(self):
dataset = dataset_ops.Dataset.from_tensor_slices([1, 2, 3])
one_shot_iterator = dataset.make_one_shot_iterator()
diff --git a/tensorflow/python/data/kernel_tests/map_dataset_op_test.py b/tensorflow/python/data/kernel_tests/map_dataset_op_test.py
index 0ecd821e9e..637bde9ae4 100644
--- a/tensorflow/python/data/kernel_tests/map_dataset_op_test.py
+++ b/tensorflow/python/data/kernel_tests/map_dataset_op_test.py
@@ -666,6 +666,13 @@ class MapDatasetTest(test.TestCase):
"currently support nested datasets as outputs."):
_ = dataset.map(dataset_ops.Dataset.from_tensor_slices)
+ def testReturnValueError(self):
+ dataset = dataset_ops.Dataset.from_tensors([1.0, 2.0, 3.0])
+ with self.assertRaisesRegexp(
+ TypeError, r"Unsupported return value from function passed to "
+ r"Dataset.map\(\): None."):
+ _ = dataset.map(lambda x: None)
+
class MapDatasetBenchmark(test.Benchmark):
diff --git a/tensorflow/python/data/ops/BUILD b/tensorflow/python/data/ops/BUILD
index fa2e86eab1..f15eb6310f 100644
--- a/tensorflow/python/data/ops/BUILD
+++ b/tensorflow/python/data/ops/BUILD
@@ -40,6 +40,7 @@ py_library(
"//tensorflow/python:dtypes",
"//tensorflow/python:framework_ops",
"//tensorflow/python:tensor_shape",
+ "//tensorflow/python/compat",
"//tensorflow/python/data/util:convert",
],
)
@@ -54,6 +55,7 @@ py_library(
"//tensorflow/python:framework_ops",
"//tensorflow/python:resource_variable_ops",
"//tensorflow/python:tensor_shape",
+ "//tensorflow/python/compat",
"//tensorflow/python/data/util:nest",
"//tensorflow/python/data/util:sparse",
"//tensorflow/python/eager:context",
diff --git a/tensorflow/python/data/ops/dataset_ops.py b/tensorflow/python/data/ops/dataset_ops.py
index 7cb6627615..88de4b588c 100644
--- a/tensorflow/python/data/ops/dataset_ops.py
+++ b/tensorflow/python/data/ops/dataset_ops.py
@@ -24,6 +24,7 @@ import warnings
import numpy as np
import six
+from tensorflow.python.compat import compat
from tensorflow.python.data.ops import iterator_ops
from tensorflow.python.data.util import nest
from tensorflow.python.data.util import random_seed
@@ -107,8 +108,12 @@ class Dataset(object):
"execution is enabled.")
if shared_name is None:
shared_name = ""
- iterator_resource = gen_dataset_ops.iterator(
- container="", shared_name=shared_name, **flat_structure(self))
+ if compat.forward_compatible(2018, 8, 3):
+ iterator_resource = gen_dataset_ops.iterator_v2(
+ container="", shared_name=shared_name, **flat_structure(self))
+ else:
+ iterator_resource = gen_dataset_ops.iterator(
+ container="", shared_name=shared_name, **flat_structure(self))
with ops.colocate_with(iterator_resource):
initializer = gen_dataset_ops.make_iterator(self._as_variant_tensor(),
iterator_resource)
@@ -888,7 +893,83 @@ class Dataset(object):
drop_remainder)
def map(self, map_func, num_parallel_calls=None):
- """Maps `map_func` across this dataset.
+ """Maps `map_func` across the elements of this dataset.
+
+ This transformation applies `map_func` to each element of this dataset, and
+ returns a new dataset containing the transformed elements, in the same
+ order as they appeared in the input.
+
+ For example:
+
+ ```python
+ # NOTE: The following examples use `{ ... }` to represent the
+ # contents of a dataset.
+ a = { 1, 2, 3, 4, 5 }
+
+ a.map(lambda x: x + 1) = { 2, 3, 4, 5, 6 }
+ ```
+
+ The input signature of `map_func` is determined by the structure of each
+ element in this dataset. For example:
+
+ ```python
+ # Each element is a `tf.Tensor` object.
+ a = { 1, 2, 3, 4, 5 }
+ # `map_func` takes a single argument of type `tf.Tensor` with the same
+ # shape and dtype.
+ result = a.map(lambda x: ...)
+
+ # Each element is a tuple containing two `tf.Tensor` objects.
+ b = { (1, "foo"), (2, "bar"), (3, "baz") }
+ # `map_func` takes two arguments of type `tf.Tensor`.
+ result = b.map(lambda x_int, y_str: ...)
+
+ # Each element is a dictionary mapping strings to `tf.Tensor` objects.
+ c = { {"a": 1, "b": "foo"}, {"a": 2, "b": "bar"}, {"a": 3, "b": "baz"} }
+ # `map_func` takes a single argument of type `dict` with the same keys as
+ # the elements.
+ result = c.map(lambda d: ...)
+ ```
+
+ The value or values returned by `map_func` determine the structure of each
+ element in the returned dataset.
+
+ ```python
+ # `map_func` returns a scalar `tf.Tensor` of type `tf.float32`.
+ def f(...):
+ return tf.constant(37.0)
+ result = dataset.map(f)
+ result.output_classes == tf.Tensor
+ result.output_types == tf.float32
+ result.output_shapes == [] # scalar
+
+ # `map_func` returns two `tf.Tensor` objects.
+ def g(...):
+ return tf.constant(37.0), tf.constant(["Foo", "Bar", "Baz"])
+ result = dataset.map(g)
+ result.output_classes == (tf.Tensor, tf.Tensor)
+ result.output_types == (tf.float32, tf.string)
+ result.output_shapes == ([], [3])
+
+ # Python primitives, lists, and NumPy arrays are implicitly converted to
+ # `tf.Tensor`.
+ def h(...):
+ return 37.0, ["Foo", "Bar", "Baz"], np.array([1.0, 2.0] dtype=np.float64)
+ result = dataset.map(h)
+ result.output_classes == (tf.Tensor, tf.Tensor, tf.Tensor)
+ result.output_types == (tf.float32, tf.string, tf.float64)
+ result.output_shapes == ([], [3], [2])
+
+ # `map_func` can return nested structures.
+ def i(...):
+ return {"a": 37.0, "b": [42, 16]}, "foo"
+ result.output_classes == ({"a": tf.Tensor, "b": tf.Tensor}, tf.Tensor)
+ result.output_types == ({"a": tf.float32, "b": tf.int32}, tf.string)
+ result.output_shapes == ({"a": [], "b": [2]}, [])
+ ```
+
+ In addition to `tf.Tensor` objects, `map_func` can accept as arguments and
+ return `tf.SparseTensor` objects.
Args:
map_func: A function mapping a nested structure of tensors (having
@@ -1168,10 +1249,29 @@ class _NestedDatasetComponent(object):
custom component types.
"""
- def __init__(self, dataset):
- self._output_classes = dataset.output_classes
- self._output_shapes = dataset.output_shapes
- self._output_types = dataset.output_types
+ def __init__(self,
+ dataset=None,
+ output_shapes=None,
+ output_types=None,
+ output_classes=None):
+ if dataset is None:
+ if (output_classes is None or output_shapes is None or
+ output_types is None):
+ raise ValueError(
+ "Either `dataset`, or all of `output_classes`, "
+ "`output_shapes`, and `output_types` must be specified.")
+ self._output_classes = output_classes
+ self._output_shapes = output_shapes
+ self._output_types = output_types
+ else:
+ if not (output_classes is None and output_shapes is None and
+ output_types is None):
+ raise ValueError(
+ "Either `dataset`, or all of `output_classes`, "
+ "`output_shapes`, and `output_types` must be specified.")
+ self._output_classes = dataset.output_classes
+ self._output_shapes = dataset.output_shapes
+ self._output_types = dataset.output_types
@property
def output_classes(self):
@@ -1330,7 +1430,11 @@ class StructuredFunctionWrapper(object):
flat_shapes.append(component)
flat_types.append(component)
else:
- t = ops.convert_to_tensor(t)
+ try:
+ t = ops.convert_to_tensor(t)
+ except (ValueError, TypeError):
+ raise TypeError("Unsupported return value from function passed to "
+ "%s: %s." % (transformation_name, t))
flat_ret.append(t)
flat_classes.append(ops.Tensor)
flat_shapes.append(t.get_shape())
@@ -1406,11 +1510,30 @@ def flat_structure(dataset):
A dictionary of keyword arguments that can be passed to many Dataset op
constructors.
"""
+ output_classes = []
+ output_shapes = []
+ output_types = []
+ for output_class, output_shape, output_type in zip(
+ nest.flatten(dataset.output_classes), nest.flatten(dataset.output_shapes),
+ nest.flatten(dataset.output_types)):
+ if isinstance(output_class, _NestedDatasetComponent):
+ output_classes.append(output_class.output_classes)
+ output_shapes.append(output_shape.output_shapes)
+ output_types.append(output_type.output_types)
+ else:
+ output_classes.append(output_class)
+ output_shapes.append(output_shape)
+ output_types.append(output_type)
+
+ output_classes = nest.pack_sequence_as(dataset.output_classes, output_classes)
+ output_shapes = nest.pack_sequence_as(dataset.output_shapes, output_shapes)
+ output_types = nest.pack_sequence_as(dataset.output_types, output_types)
+
return {
- "output_shapes": nest.flatten(sparse.as_dense_shapes(
- dataset.output_shapes, dataset.output_classes)),
- "output_types": nest.flatten(sparse.as_dense_types(
- dataset.output_types, dataset.output_classes)),
+ "output_shapes":
+ nest.flatten(sparse.as_dense_shapes(output_shapes, output_classes)),
+ "output_types":
+ nest.flatten(sparse.as_dense_types(output_types, output_classes)),
}
diff --git a/tensorflow/python/data/ops/iterator_ops.py b/tensorflow/python/data/ops/iterator_ops.py
index b6dba4e3ca..35de2f2841 100644
--- a/tensorflow/python/data/ops/iterator_ops.py
+++ b/tensorflow/python/data/ops/iterator_ops.py
@@ -20,6 +20,7 @@ from __future__ import print_function
import threading
import warnings
+from tensorflow.python.compat import compat
from tensorflow.python.data.util import nest
from tensorflow.python.data.util import sparse
from tensorflow.python.eager import context
@@ -172,13 +173,32 @@ class Iterator(object):
nest.assert_same_structure(output_types, output_shapes)
if shared_name is None:
shared_name = ""
- iterator_resource = gen_dataset_ops.iterator(
- container="",
- shared_name=shared_name,
- output_types=nest.flatten(
- sparse.as_dense_types(output_types, output_classes)),
- output_shapes=nest.flatten(
- sparse.as_dense_shapes(output_shapes, output_classes)))
+ if compat.forward_compatible(2018, 8, 3):
+ if not ops.get_default_graph()._graph_device_function_stack: # pylint: disable=protected-access
+ with ops.device("/cpu:0"):
+ iterator_resource = gen_dataset_ops.iterator_v2(
+ container="",
+ shared_name=shared_name,
+ output_types=nest.flatten(
+ sparse.as_dense_types(output_types, output_classes)),
+ output_shapes=nest.flatten(
+ sparse.as_dense_shapes(output_shapes, output_classes)))
+ else:
+ iterator_resource = gen_dataset_ops.iterator_v2(
+ container="",
+ shared_name=shared_name,
+ output_types=nest.flatten(
+ sparse.as_dense_types(output_types, output_classes)),
+ output_shapes=nest.flatten(
+ sparse.as_dense_shapes(output_shapes, output_classes)))
+ else:
+ iterator_resource = gen_dataset_ops.iterator(
+ container="",
+ shared_name=shared_name,
+ output_types=nest.flatten(
+ sparse.as_dense_types(output_types, output_classes)),
+ output_shapes=nest.flatten(
+ sparse.as_dense_shapes(output_shapes, output_classes)))
return Iterator(iterator_resource, None, output_types, output_shapes,
output_classes)
@@ -242,12 +262,29 @@ class Iterator(object):
output_classes = nest.map_structure(lambda _: ops.Tensor, output_types)
nest.assert_same_structure(output_types, output_shapes)
string_handle = ops.convert_to_tensor(string_handle, dtype=dtypes.string)
- iterator_resource = gen_dataset_ops.iterator_from_string_handle(
- string_handle,
- output_types=nest.flatten(
- sparse.as_dense_types(output_types, output_classes)),
- output_shapes=nest.flatten(
- sparse.as_dense_shapes(output_shapes, output_classes)))
+ if compat.forward_compatible(2018, 8, 3):
+ if not ops.get_default_graph()._graph_device_function_stack: # pylint: disable=protected-access
+ with ops.device("/cpu:0"):
+ iterator_resource = gen_dataset_ops.iterator_from_string_handle_v2(
+ string_handle,
+ output_types=nest.flatten(
+ sparse.as_dense_types(output_types, output_classes)),
+ output_shapes=nest.flatten(
+ sparse.as_dense_shapes(output_shapes, output_classes)))
+ else:
+ iterator_resource = gen_dataset_ops.iterator_from_string_handle_v2(
+ string_handle,
+ output_types=nest.flatten(
+ sparse.as_dense_types(output_types, output_classes)),
+ output_shapes=nest.flatten(
+ sparse.as_dense_shapes(output_shapes, output_classes)))
+ else:
+ iterator_resource = gen_dataset_ops.iterator_from_string_handle(
+ string_handle,
+ output_types=nest.flatten(
+ sparse.as_dense_types(output_types, output_classes)),
+ output_shapes=nest.flatten(
+ sparse.as_dense_shapes(output_shapes, output_classes)))
return Iterator(iterator_resource, None, output_types, output_shapes,
output_classes)
diff --git a/tensorflow/python/debug/BUILD b/tensorflow/python/debug/BUILD
index 6941cacf23..27b8ebd362 100644
--- a/tensorflow/python/debug/BUILD
+++ b/tensorflow/python/debug/BUILD
@@ -404,6 +404,7 @@ py_library(
deps = [
":debug_errors",
":debug_fibonacci",
+ ":debug_keras",
":debug_mnist",
":debug_tflearn_iris",
],
@@ -454,6 +455,17 @@ py_binary(
],
)
+py_binary(
+ name = "debug_keras",
+ srcs = ["examples/debug_keras.py"],
+ srcs_version = "PY2AND3",
+ deps = [
+ ":debug_py",
+ "//tensorflow:tensorflow_py",
+ "//third_party/py/numpy",
+ ],
+)
+
py_test(
name = "common_test",
size = "small",
@@ -791,6 +803,7 @@ cuda_py_test(
"//tensorflow/python:platform_test",
"//tensorflow/python:variables",
],
+ tags = ["no_windows_gpu"],
)
py_test(
@@ -1086,6 +1099,7 @@ py_test(
"//tensorflow/python:state_ops",
"//tensorflow/python:training",
"//tensorflow/python:variables",
+ "//third_party/py/numpy",
],
)
@@ -1096,6 +1110,7 @@ sh_test(
data = [
":debug_errors",
":debug_fibonacci",
+ ":debug_keras",
":debug_mnist",
":debug_tflearn_iris",
":offline_analyzer",
diff --git a/tensorflow/python/debug/examples/debug_keras.py b/tensorflow/python/debug/examples/debug_keras.py
new file mode 100644
index 0000000000..3272d85ade
--- /dev/null
+++ b/tensorflow/python/debug/examples/debug_keras.py
@@ -0,0 +1,89 @@
+# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""tfdbg example: debugging tf.keras models training on tf.data.Dataset."""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import argparse
+import sys
+
+import numpy as np
+import tensorflow as tf
+
+from tensorflow.python import debug as tf_debug
+
+
+def main(_):
+ # Create a dummy dataset.
+ num_examples = 8
+ steps_per_epoch = 2
+ input_dims = 3
+ output_dims = 1
+ xs = np.zeros([num_examples, input_dims])
+ ys = np.zeros([num_examples, output_dims])
+ dataset = tf.data.Dataset.from_tensor_slices(
+ (xs, ys)).repeat(num_examples).batch(int(num_examples / steps_per_epoch))
+
+ sess = tf.Session()
+ if FLAGS.debug:
+ # Use the command-line interface (CLI) of tfdbg.
+ sess = tf_debug.LocalCLIDebugWrapperSession(sess, ui_type=FLAGS.ui_type)
+ elif FLAGS.tensorboard_debug_address:
+ # Use the TensorBoard Debugger Plugin (GUI of tfdbg).
+ sess = tf_debug.TensorBoardDebugWrapperSession(
+ sess, FLAGS.tensorboard_debug_address)
+ tf.keras.backend.set_session(sess)
+
+ # Create a dummy model.
+ model = tf.keras.Sequential([
+ tf.keras.layers.Dense(1, input_shape=[input_dims])])
+ model.compile(loss="mse", optimizer="sgd")
+
+ # Train the model using the dummy dataset created above.
+ model.fit(dataset, epochs=FLAGS.epochs, steps_per_epoch=steps_per_epoch)
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ parser.register("type", "bool", lambda v: v.lower() == "true")
+ parser.add_argument(
+ "--debug",
+ type="bool",
+ nargs="?",
+ const=True,
+ default=False,
+ help="Use debugger to track down bad values during training. "
+ "Mutually exclusive with the --tensorboard_debug_address flag.")
+ parser.add_argument(
+ "--ui_type",
+ type=str,
+ default="curses",
+ help="Command-line user interface type (curses | readline).")
+ parser.add_argument(
+ "--tensorboard_debug_address",
+ type=str,
+ default=None,
+ help="Connect to the TensorBoard Debugger Plugin backend specified by "
+ "the gRPC address (e.g., localhost:1234). Mutually exclusive with the "
+ "--debug flag.")
+ parser.add_argument(
+ "--epochs",
+ type=int,
+ default=2,
+ help="Number of epochs to train the model for.")
+ FLAGS, unparsed = parser.parse_known_args()
+ tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
diff --git a/tensorflow/python/debug/examples/examples_test.sh b/tensorflow/python/debug/examples/examples_test.sh
index e9c45a7e6e..2d35b2d8bb 100755
--- a/tensorflow/python/debug/examples/examples_test.sh
+++ b/tensorflow/python/debug/examples/examples_test.sh
@@ -48,12 +48,14 @@ if [[ -z "${PYTHON_BIN_PATH}" ]]; then
DEBUG_ERRORS_BIN="$TEST_SRCDIR/org_tensorflow/tensorflow/python/debug/debug_errors"
DEBUG_MNIST_BIN="$TEST_SRCDIR/org_tensorflow/tensorflow/python/debug/debug_mnist"
DEBUG_TFLEARN_IRIS_BIN="$TEST_SRCDIR/org_tensorflow/tensorflow/python/debug/debug_tflearn_iris"
+ DEBUG_KERAS_BIN="$TEST_SRCDIR/org_tensorflow/tensorflow/python/debug/debug_keras"
OFFLINE_ANALYZER_BIN="$TEST_SRCDIR/org_tensorflow/tensorflow/python/debug/offline_analyzer"
else
DEBUG_FIBONACCI_BIN="${PYTHON_BIN_PATH} -m tensorflow.python.debug.examples.debug_fibonacci"
DEBUG_ERRORS_BIN="${PYTHON_BIN_PATH} -m tensorflow.python.debug.examples.debug_errors"
DEBUG_MNIST_BIN="${PYTHON_BIN_PATH} -m tensorflow.python.debug.examples.debug_mnist"
DEBUG_TFLEARN_IRIS_BIN="${PYTHON_BIN_PATH} -m tensorflow.python.debug.examples.debug_tflearn_iris"
+ DEBUG_KERAS_BIN="${PYTHON_BIN_PATH} -m tensorflow.python.debug.examples.debug_keras"
OFFLINE_ANALYZER_BIN="${PYTHON_BIN_PATH} -m tensorflow.python.debug.cli.offline_analyzer"
fi
@@ -96,6 +98,11 @@ if [[ -d "${CUSTOM_DUMP_ROOT}" ]]; then
exit 1
fi
+# Test debugging of tf.keras.
+cat << EOF | "${DEBUG_KERAS_BIN}" --debug --ui_type=readline
+run -f has_inf_or_nan
+EOF
+
# Test offline_analyzer.
echo
echo "Testing offline_analyzer"
diff --git a/tensorflow/python/debug/wrappers/framework.py b/tensorflow/python/debug/wrappers/framework.py
index c530204bbf..b9524ce649 100644
--- a/tensorflow/python/debug/wrappers/framework.py
+++ b/tensorflow/python/debug/wrappers/framework.py
@@ -392,6 +392,9 @@ class BaseDebugWrapperSession(session.SessionInterface):
self._default_session_context_manager = None
+ # A cache for callables created from CallableOptions.
+ self._cached_callables_from_options = dict()
+
@property
def graph(self):
return self._sess.graph
@@ -414,7 +417,8 @@ class BaseDebugWrapperSession(session.SessionInterface):
options=None,
run_metadata=None,
callable_runner=None,
- callable_runner_args=None):
+ callable_runner_args=None,
+ callable_options=None):
"""Wrapper around Session.run() that inserts tensor watch options.
Args:
@@ -424,7 +428,12 @@ class BaseDebugWrapperSession(session.SessionInterface):
run_metadata: Same as the `run_metadata` arg to regular `Session.run()`.
callable_runner: A `callable` returned by `Session.make_callable()`.
If not `None`, `fetches` and `feed_dict` must both be `None`.
- callable_runner_args: An optional list of arguments to `callable_runner`.
+ Mutually exclusive with `callable_options`.
+ callable_runner_args: An optional list of arguments to `callable_runner`
+ or for `callable_options`.
+ callable_options: An instance of `config_pb2.CallableOptions`, to be
+ used with `Session._make_callable_from_options()`. Mutually exclusive
+ with `callable_runner`.
Returns:
Simply forwards the output of the wrapped `Session.run()` call.
@@ -433,13 +442,17 @@ class BaseDebugWrapperSession(session.SessionInterface):
ValueError: On invalid `OnRunStartAction` value. Or if `callable_runner`
is not `None` and either or both of `fetches` and `feed_dict` is `None`.
"""
- if not callable_runner:
+ if callable_runner and callable_options:
+ raise ValueError(
+ "callable_runner and callable_options are mutually exclusive, but "
+ "are both specified in this call to BaseDebugWrapperSession.run().")
+
+ if not (callable_runner or callable_options):
self.increment_run_call_count()
- else:
- if fetches or feed_dict:
- raise ValueError(
- "callable_runner and fetches/feed_dict are mutually exclusive, but "
- "are used simultaneously.")
+ elif callable_runner and (fetches or feed_dict):
+ raise ValueError(
+ "callable_runner and fetches/feed_dict are mutually exclusive, "
+ "but are used simultaneously.")
empty_fetches = not nest.flatten(fetches)
if empty_fetches:
@@ -449,6 +462,11 @@ class BaseDebugWrapperSession(session.SessionInterface):
if self._is_disabled_thread() or empty_fetches:
if callable_runner:
return callable_runner(*callable_runner_args)
+ elif callable_options:
+ # pylint:disable=protected-access
+ return self._sess._make_callable_from_options(
+ callable_options)(*callable_runner_args)
+ # pylint:enable=protected-access
else:
return self._sess.run(fetches,
feed_dict=feed_dict,
@@ -464,19 +482,30 @@ class BaseDebugWrapperSession(session.SessionInterface):
if run_start_resp.action == OnRunStartAction.DEBUG_RUN:
# Decorate RunOption to fill in debugger tensor watch specifications.
- decorated_run_options = options or config_pb2.RunOptions()
+ decorated_run_options = None
+ if callable_options:
+ callable_options_id = id(callable_options)
+ if callable_options_id not in self._cached_callables_from_options:
+ # Make a copy of callable_options to avoid mutating it.
+ new_callable_options = config_pb2.CallableOptions()
+ new_callable_options.CopyFrom(callable_options)
+ decorated_run_options = new_callable_options.run_options
+ else:
+ decorated_run_options = options or config_pb2.RunOptions()
+
run_metadata = run_metadata or config_pb2.RunMetadata()
- self._decorate_run_options_for_debug(
- decorated_run_options,
- run_start_resp.debug_urls,
- debug_ops=run_start_resp.debug_ops,
- node_name_regex_whitelist=run_start_resp.node_name_regex_whitelist,
- op_type_regex_whitelist=run_start_resp.op_type_regex_whitelist,
- tensor_dtype_regex_whitelist=(
- run_start_resp.tensor_dtype_regex_whitelist),
- tolerate_debug_op_creation_failures=(
- run_start_resp.tolerate_debug_op_creation_failures))
+ if decorated_run_options:
+ self._decorate_run_options_for_debug(
+ decorated_run_options,
+ run_start_resp.debug_urls,
+ debug_ops=run_start_resp.debug_ops,
+ node_name_regex_whitelist=run_start_resp.node_name_regex_whitelist,
+ op_type_regex_whitelist=run_start_resp.op_type_regex_whitelist,
+ tensor_dtype_regex_whitelist=(
+ run_start_resp.tensor_dtype_regex_whitelist),
+ tolerate_debug_op_creation_failures=(
+ run_start_resp.tolerate_debug_op_creation_failures))
# Invoke the run() method of the wrapped Session. Catch any TensorFlow
# runtime errors.
@@ -486,6 +515,19 @@ class BaseDebugWrapperSession(session.SessionInterface):
retvals = callable_runner(*callable_runner_args,
options=decorated_run_options,
run_metadata=run_metadata)
+ elif callable_options:
+ # pylint:disable=protected-access
+ if callable_options_id in self._cached_callables_from_options:
+ callable_object = self._cached_callables_from_options[
+ callable_options_id]
+ else:
+ callable_object = self._sess._make_callable_from_options(
+ new_callable_options)
+ self._cached_callables_from_options[
+ callable_options_id] = callable_object
+ # pylint:enable=protected-access
+ retvals = callable_object(
+ *callable_runner_args, run_metadata=run_metadata)
else:
retvals = self._sess.run(fetches,
feed_dict=feed_dict,
@@ -590,7 +632,14 @@ class BaseDebugWrapperSession(session.SessionInterface):
run_metadata=kwargs.get("run_metadata", None),
callable_runner=runner,
callable_runner_args=runner_args)
+ return wrapped_runner
+ def _make_callable_from_options(self, callable_options):
+ def wrapped_runner(*feed_values, **kwargs):
+ return self.run(None,
+ run_metadata=kwargs.get("run_metadata", None),
+ callable_options=callable_options,
+ callable_runner_args=feed_values)
return wrapped_runner
@property
diff --git a/tensorflow/python/debug/wrappers/grpc_wrapper.py b/tensorflow/python/debug/wrappers/grpc_wrapper.py
index 1f9c8fa5a9..85944fa611 100644
--- a/tensorflow/python/debug/wrappers/grpc_wrapper.py
+++ b/tensorflow/python/debug/wrappers/grpc_wrapper.py
@@ -215,7 +215,8 @@ class TensorBoardDebugWrapperSession(GrpcDebugWrapperSession):
options=None,
run_metadata=None,
callable_runner=None,
- callable_runner_args=None):
+ callable_runner_args=None,
+ callable_options=None):
if self._send_traceback_and_source_code:
self._sent_graph_version = publish_traceback(
self._grpc_debug_server_urls, self.graph, feed_dict, fetches,
@@ -226,4 +227,5 @@ class TensorBoardDebugWrapperSession(GrpcDebugWrapperSession):
options=options,
run_metadata=run_metadata,
callable_runner=callable_runner,
- callable_runner_args=callable_runner_args)
+ callable_runner_args=callable_runner_args,
+ callable_options=callable_options)
diff --git a/tensorflow/python/debug/wrappers/local_cli_wrapper.py b/tensorflow/python/debug/wrappers/local_cli_wrapper.py
index 4e551ab995..668ffb57f1 100644
--- a/tensorflow/python/debug/wrappers/local_cli_wrapper.py
+++ b/tensorflow/python/debug/wrappers/local_cli_wrapper.py
@@ -596,7 +596,7 @@ class LocalCLIDebugWrapperSession(framework.BaseDebugWrapperSession):
# Register tab completion for the filter names.
curses_cli.register_tab_comp_context(["run", "r"],
list(self._tensor_filters.keys()))
- if self._feed_dict:
+ if self._feed_dict and hasattr(self._feed_dict, "keys"):
# Register tab completion for feed_dict keys.
feed_keys = [common.get_graph_element_name(key)
for key in self._feed_dict.keys()]
diff --git a/tensorflow/python/debug/wrappers/local_cli_wrapper_test.py b/tensorflow/python/debug/wrappers/local_cli_wrapper_test.py
index b06fa26a93..05c9eaa4d2 100644
--- a/tensorflow/python/debug/wrappers/local_cli_wrapper_test.py
+++ b/tensorflow/python/debug/wrappers/local_cli_wrapper_test.py
@@ -21,7 +21,10 @@ import os
import shutil
import tempfile
+import numpy as np
+
from tensorflow.core.protobuf import config_pb2
+from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.python.client import session
from tensorflow.python.debug.cli import cli_shared
from tensorflow.python.debug.cli import debugger_cli_common
@@ -149,7 +152,13 @@ class LocalCLIDebugWrapperSessionTest(test_util.TensorFlowTestCase):
dtypes.float32, shape=([5, 5]), name="sparse_placeholder")
self.sparse_add = sparse_ops.sparse_add(self.sparse_ph, self.sparse_ph)
- self.sess = session.Session()
+ rewriter_config = rewriter_config_pb2.RewriterConfig(
+ disable_model_pruning=True,
+ arithmetic_optimization=rewriter_config_pb2.RewriterConfig.OFF,
+ dependency_optimization=rewriter_config_pb2.RewriterConfig.OFF)
+ graph_options = config_pb2.GraphOptions(rewrite_options=rewriter_config)
+ config_proto = config_pb2.ConfigProto(graph_options=graph_options)
+ self.sess = session.Session(config=config_proto)
# Initialize variable.
self.sess.run(variables.global_variables_initializer())
@@ -393,6 +402,113 @@ class LocalCLIDebugWrapperSessionTest(test_util.TensorFlowTestCase):
self.assertAllClose(42.0, tensor_runner(41.0, 1.0))
self.assertEqual(1, len(wrapped_sess.observers["debug_dumps"]))
+ def testDebuggingMakeCallableFromOptionsWithZeroFeedWorks(self):
+ variable_1 = variables.Variable(
+ 10.5, dtype=dtypes.float32, name="variable_1")
+ a = math_ops.add(variable_1, variable_1, "callable_a")
+ math_ops.add(a, a, "callable_b")
+ self.sess.run(variable_1.initializer)
+
+ wrapped_sess = LocalCLIDebuggerWrapperSessionForTest(
+ [["run"]] * 3, self.sess, dump_root=self._tmp_dir)
+ callable_options = config_pb2.CallableOptions()
+ callable_options.fetch.append("callable_b")
+ sess_callable = wrapped_sess._make_callable_from_options(callable_options)
+
+ for _ in range(2):
+ callable_output = sess_callable()
+ self.assertAllClose(np.array(42.0, dtype=np.float32), callable_output[0])
+
+ debug_dumps = wrapped_sess.observers["debug_dumps"]
+ self.assertEqual(2, len(debug_dumps))
+ for debug_dump in debug_dumps:
+ node_names = [datum.node_name for datum in debug_dump.dumped_tensor_data]
+ self.assertItemsEqual(
+ ["callable_a", "callable_b", "variable_1", "variable_1/read"],
+ node_names)
+
+ def testDebuggingMakeCallableFromOptionsWithOneFeedWorks(self):
+ ph1 = array_ops.placeholder(dtypes.float32, name="callable_ph1")
+ a = math_ops.add(ph1, ph1, "callable_a")
+ math_ops.add(a, a, "callable_b")
+
+ wrapped_sess = LocalCLIDebuggerWrapperSessionForTest(
+ [["run"]] * 3, self.sess, dump_root=self._tmp_dir)
+ callable_options = config_pb2.CallableOptions()
+ callable_options.feed.append("callable_ph1")
+ callable_options.fetch.append("callable_b")
+ sess_callable = wrapped_sess._make_callable_from_options(callable_options)
+
+ ph1_value = np.array([10.5, -10.5], dtype=np.float32)
+
+ for _ in range(2):
+ callable_output = sess_callable(ph1_value)
+ self.assertAllClose(
+ np.array([42.0, -42.0], dtype=np.float32), callable_output[0])
+
+ debug_dumps = wrapped_sess.observers["debug_dumps"]
+ self.assertEqual(2, len(debug_dumps))
+ for debug_dump in debug_dumps:
+ node_names = [datum.node_name for datum in debug_dump.dumped_tensor_data]
+ self.assertItemsEqual(["callable_a", "callable_b"], node_names)
+
+ def testDebuggingMakeCallableFromOptionsWithTwoFeedsWorks(self):
+ ph1 = array_ops.placeholder(dtypes.float32, name="callable_ph1")
+ ph2 = array_ops.placeholder(dtypes.float32, name="callable_ph2")
+ a = math_ops.add(ph1, ph2, "callable_a")
+ math_ops.add(a, a, "callable_b")
+
+ wrapped_sess = LocalCLIDebuggerWrapperSessionForTest(
+ [["run"]] * 3, self.sess, dump_root=self._tmp_dir)
+ callable_options = config_pb2.CallableOptions()
+ callable_options.feed.append("callable_ph1")
+ callable_options.feed.append("callable_ph2")
+ callable_options.fetch.append("callable_b")
+ sess_callable = wrapped_sess._make_callable_from_options(callable_options)
+
+ ph1_value = np.array(5.0, dtype=np.float32)
+ ph2_value = np.array(16.0, dtype=np.float32)
+
+ for _ in range(2):
+ callable_output = sess_callable(ph1_value, ph2_value)
+ self.assertAllClose(np.array(42.0, dtype=np.float32), callable_output[0])
+
+ debug_dumps = wrapped_sess.observers["debug_dumps"]
+ self.assertEqual(2, len(debug_dumps))
+ for debug_dump in debug_dumps:
+ node_names = [datum.node_name for datum in debug_dump.dumped_tensor_data]
+ self.assertItemsEqual(["callable_a", "callable_b"], node_names)
+
+ def testDebugMakeCallableFromOptionsWithCustomOptionsAndMetadataWorks(self):
+ variable_1 = variables.Variable(
+ 10.5, dtype=dtypes.float32, name="variable_1")
+ a = math_ops.add(variable_1, variable_1, "callable_a")
+ math_ops.add(a, a, "callable_b")
+ self.sess.run(variable_1.initializer)
+
+ wrapped_sess = LocalCLIDebuggerWrapperSessionForTest(
+ [["run"], ["run"]], self.sess, dump_root=self._tmp_dir)
+ callable_options = config_pb2.CallableOptions()
+ callable_options.fetch.append("callable_b")
+ callable_options.run_options.trace_level = config_pb2.RunOptions.FULL_TRACE
+
+ sess_callable = wrapped_sess._make_callable_from_options(callable_options)
+
+ run_metadata = config_pb2.RunMetadata()
+ # Call the callable with a custom run_metadata.
+ callable_output = sess_callable(run_metadata=run_metadata)
+ # Verify that step_stats is populated in the custom run_metadata.
+ self.assertTrue(run_metadata.step_stats)
+ self.assertAllClose(np.array(42.0, dtype=np.float32), callable_output[0])
+
+ debug_dumps = wrapped_sess.observers["debug_dumps"]
+ self.assertEqual(1, len(debug_dumps))
+ debug_dump = debug_dumps[0]
+ node_names = [datum.node_name for datum in debug_dump.dumped_tensor_data]
+ self.assertItemsEqual(
+ ["callable_a", "callable_b", "variable_1", "variable_1/read"],
+ node_names)
+
def testRuntimeErrorShouldBeCaught(self):
wrapped_sess = LocalCLIDebuggerWrapperSessionForTest(
[["run"], ["run"]], self.sess, dump_root=self._tmp_dir)
diff --git a/tensorflow/python/eager/backprop.py b/tensorflow/python/eager/backprop.py
index bd97b181ff..9e0bbce4a1 100644
--- a/tensorflow/python/eager/backprop.py
+++ b/tensorflow/python/eager/backprop.py
@@ -605,7 +605,9 @@ def _zeros(shape, dtype):
# TODO(apassos): need to save enough information about variant tensors to do
# a zeros
return None
- cache_key = shape, dtype, device
+ # pylint: disable=protected-access
+ cache_key = shape, dtype, device, context.context()._eager_context.mode
+ # pylint: enable=protected-access
cached = _zeros_cache.get(cache_key)
if cached is None:
cached = _fast_fill(0, shape, dtype)
@@ -711,10 +713,15 @@ class GradientTape(object):
if self._recording:
self._pop_tape()
- def _push_tape(self):
+ def _push_tape(self, existing_tape=False):
if self._recording:
raise ValueError("Tape is already recording.")
- self._tape = tape.push_new_tape(persistent=self._persistent)
+ if existing_tape:
+ if self._tape is None:
+ raise ValueError("There is no existing tape.")
+ tape.push_tape(self._tape)
+ else:
+ self._tape = tape.push_new_tape(persistent=self._persistent)
self._recording = True
def _pop_tape(self):
@@ -762,7 +769,7 @@ class GradientTape(object):
try:
yield
finally:
- self._push_tape()
+ self._push_tape(existing_tape=True)
def reset(self):
"""Clears all information stored in this tape.
diff --git a/tensorflow/python/eager/backprop_test.py b/tensorflow/python/eager/backprop_test.py
index e129c2756a..bdda200ff6 100644
--- a/tensorflow/python/eager/backprop_test.py
+++ b/tensorflow/python/eager/backprop_test.py
@@ -223,11 +223,23 @@ class BackpropTest(test.TestCase):
def testTapeStopRecording(self):
with backprop.GradientTape() as t:
- x = constant_op.constant(1.0)
+ x = resource_variable_ops.ResourceVariable(1.0)
with t.stop_recording():
y = x * x
self.assertEqual(t.gradient(y, x), None)
+ def testTapeStopStartRecording(self):
+ with backprop.GradientTape(persistent=True) as t:
+ x = resource_variable_ops.ResourceVariable(1.0)
+ x2 = x * 2 # This should be differentiated through.
+ with t.stop_recording():
+ y = x2 * x2
+ z = x2 * x2
+ self.assertEqual(t.gradient(y, x2), None)
+
+ # If the x*2 was not differentiated through, this would be 2.0, not 4.0
+ self.assertEqual(t.gradient(z, x2).numpy(), 4.0)
+
def testTapeReset(self):
with backprop.GradientTape() as t:
v = resource_variable_ops.ResourceVariable(1.0)
@@ -900,6 +912,33 @@ class BackpropTest(test.TestCase):
'did you forget to return a value from fn?'):
val_and_grads_fn(x, y)
+ def testZerosCacheDoesntLeakAcrossModes(self):
+ with ops.Graph().as_default():
+ t = random_ops.random_normal(shape=[100, 2])
+ x = random_ops.random_normal(shape=[100, 4])
+ dy = random_ops.random_normal(shape=[100, 4])
+ with backprop.GradientTape() as gradient_tape:
+ gradient_tape.watch(x)
+ x1, _ = array_ops.split(x, num_or_size_splits=2, axis=1)
+ y1 = x1 ** 2.
+ y = array_ops.concat([y1, t], axis=1)
+
+ dx = gradient_tape.gradient(y, x, output_gradients=dy)
+ with self.test_session() as sess:
+ sess.run(variables.global_variables_initializer())
+ sess.run(dx)
+
+ t = random_ops.random_normal(shape=[100, 2])
+ x = random_ops.random_normal(shape=[100, 4])
+ dy = random_ops.random_normal(shape=[100, 4])
+ with backprop.GradientTape() as gradient_tape:
+ gradient_tape.watch(x)
+ x1, _ = array_ops.split(x, num_or_size_splits=2, axis=1)
+ y1 = x1 ** 2.
+ y = array_ops.concat([y1, t], axis=1)
+
+ dx = gradient_tape.gradient(y, x, output_gradients=dy)
+
if __name__ == '__main__':
test.main()
diff --git a/tensorflow/python/eager/function.py b/tensorflow/python/eager/function.py
index fc68e945c0..a6906f9efd 100644
--- a/tensorflow/python/eager/function.py
+++ b/tensorflow/python/eager/function.py
@@ -21,6 +21,7 @@ from __future__ import print_function
import collections
import functools
+import threading
import numpy as np
@@ -36,6 +37,7 @@ from tensorflow.python.framework import dtypes as dtypes_module
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
+from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.util import compat
@@ -47,8 +49,11 @@ def capture_value(tensor_map, value, dtype, name):
"""Capture a value from outside the function, to pass in as an extra arg."""
captured_value = tensor_map.get(ops.tensor_id(value), None)
if captured_value is None:
- captured_value = graph_placeholder(
- dtype=dtype or value.dtype, shape=value.shape, name=name)
+ # Note: setting ops.control_dependencies(None) ensures we always put
+ # capturing placeholders outside of any control flow context.
+ with ops.control_dependencies(None):
+ captured_value = graph_placeholder(
+ dtype=dtype or value.dtype, shape=value.shape, name=name)
if captured_value.dtype == dtypes_module.resource:
if ops._USE_C_SHAPES: # pylint: disable=protected-access
if isinstance(value, ops.EagerTensor):
@@ -133,7 +138,7 @@ class CapturingGraph(ops.Graph):
inputs[i] = self.capture(inp)
return super(CapturingGraph, self).create_op(
op_type, inputs, dtypes, input_types, name, attrs, op_def,
- compute_shapes, compute_device)
+ compute_device=compute_device)
# pylint: disable=invalid-name
@@ -228,11 +233,20 @@ def _register(fn):
context.context().add_function(fn)
+_xla_compile_attr = "_XlaCompile"
+
+
# TODO(apassos) get rid of this by splitting framework.function._DefinedFunction
# so it doesn't have the definition-generating logic and is just a container for
# an already-defined function.
class _EagerDefinedFunction(object):
- """Function object with the interface of tf _DefinedFunction."""
+ """Callable with the interface of `framework.function._DefinedFunction.`
+
+ `_EagerDefinedFunction` encapsulates a function definition and its properties,
+ and it provides a method for calling the encapsulated function. Some Ops
+ take functions as attributes, which have type `func`; an instance of this
+ class may be provided as the value of these `func` attributes.
+ """
def __init__(self, name, graph, operations, inputs, outputs, attrs):
"""Initializes an eager defined function.
@@ -263,6 +277,7 @@ class _EagerDefinedFunction(object):
# It might be worth creating a convenient way to re-use status.
pywrap_tensorflow.TF_FunctionSetAttrValueProto(
fn, compat.as_str(name), serialized)
+ self._xla_compile = _xla_compile_attr in attrs
# TODO(apassos) avoid creating a FunctionDef (specially to grab the
# signature, but also in general it's nice not to depend on it.
@@ -274,12 +289,92 @@ class _EagerDefinedFunction(object):
if context.executing_eagerly():
_register(fn)
self.definition = function_def
- self.name = function_def.signature.name
+ self.name = compat.as_bytes(function_def.signature.name)
self.signature = function_def.signature
+ self._num_outputs = len(self.signature.output_arg)
+ self._output_types = [o.type for o in self.signature.output_arg]
self.grad_func_name = None
self.python_grad_func = None
self._c_func = c_api_util.ScopedTFFunction(fn)
self._grad_func = None
+ self._graph = graph
+ self._stateful_ops = tuple(op for op in operations if op.op_def.is_stateful)
+
+ def add_to_graph(self, g):
+ # pylint: disable=protected-access
+ if self.name not in g._functions:
+ g._add_function(self)
+ for f in self._graph._functions.values():
+ if f.name not in g._functions:
+ g._add_function(f)
+ # pylint: enable=protected-access
+
+ @property
+ def stateful_ops(self):
+ return self._stateful_ops
+
+ def call(self, ctx, args, output_shapes):
+ """Calls this function with `args` as inputs.
+
+ Function execution respects device annotations only if the function won't
+ be compiled with xla.
+
+ Args:
+ ctx: a Context object
+ args: a list of arguments to supply this function with.
+ output_shapes: shapes to which outputs should be set; ignored when
+ executing eagerly.
+
+ Returns:
+ The outputs of the function call.
+ """
+
+ executing_eagerly = ctx.executing_eagerly()
+
+ xla_compile = self._xla_compile or (executing_eagerly and
+ ctx.device_spec.device_type == "TPU")
+
+ if xla_compile:
+ # XLA compilation relies upon a custom kernel creator to run functions.
+ signature = self.signature
+ if executing_eagerly:
+ outputs = execute.execute(
+ str(signature.name),
+ num_outputs=self._num_outputs,
+ inputs=args,
+ attrs=None,
+ ctx=ctx)
+ else:
+ g = ops.get_default_graph()
+ self.add_to_graph(g)
+ op = g.create_op(
+ signature.name,
+ [ops.internal_convert_to_tensor(x, ctx=ctx) for x in args],
+ tuple(dtypes_module.DType(x.type) for x in signature.output_arg),
+ op_def=signature,
+ name="FunctionCall",
+ compute_shapes=False)
+ outputs = op.outputs
+ if not outputs:
+ return op
+ outputs = [outputs] if isinstance(
+ outputs, (ops.Tensor, type(None))) else list(outputs)
+ else:
+ # TODO(akshayka): Either remove this if the FunctionLibraryRuntime
+ # creates `PartitionedCallOp` kernels by default, or remove the previous
+ # branch if a TPU kernel is registered for `PartitionedCall`.
+ outputs = functional_ops.partitioned_call(
+ args=args,
+ f=self,
+ tout=self._output_types,
+ executing_eagerly=executing_eagerly)
+
+ if executing_eagerly:
+ return outputs
+ else:
+ for i, shape in enumerate(output_shapes):
+ outputs[i].set_shape(shape)
+ return outputs
def _map_sequence_obj_to_idx(sequence):
@@ -303,8 +398,12 @@ def _flatten(sequence):
return outputs
+# TODO(akshayka): Perhaps rename to something more appropriate.
class GraphModeFunction(object):
- """Callable object representing a graph-mode function.
+ """Callable object encapsulating a function definition and its gradient.
+
+ `GraphModeFunction` is a callable that encapsulates a function definition and
+ is differentiable under `tf.GradientTape` objects.
"""
def __init__(self,
@@ -371,7 +470,7 @@ class GraphModeFunction(object):
def _construct_backprop_function(self):
"""Constructs the backprop function object for this function."""
- with self._graph.as_default(), context.graph_mode():
+ with self._graph.as_default():
c_known_ops = set()
c_captured_tensors = set()
@@ -385,7 +484,7 @@ class GraphModeFunction(object):
grad_ys=self._out_grad_placeholders)
for op in self._graph.get_operations()[existing_op_len:]:
if op.type in ["Variable", "VariableV2", "VarHandleOp"]:
- raise ValueError("tfe.defun cannot capture variables created without "
+ raise ValueError("defun cannot capture variables created without "
"using tf.get_variable. Op: %s" % op)
c_known_ops.add(op)
for i in op.inputs:
@@ -427,35 +526,10 @@ class GraphModeFunction(object):
The call output.
"""
all_args = args + self._extra_inputs
- signature = self._forward_fdef.signature
ctx = context.context()
- if ctx.executing_eagerly():
- outputs = execute.execute(
- str(signature.name),
- num_outputs=len(signature.output_arg),
- inputs=all_args,
- attrs=None,
- ctx=ctx)
- if not outputs:
- return None
- else:
- g = ops.get_default_graph()
- g._add_function(self._forward_fdef) # pylint: disable=protected-access
- op = g.create_op(
- signature.name,
- [ops.internal_convert_to_tensor(x, ctx=ctx) for x in all_args],
- tuple(dtypes_module.DType(x.type) for x in signature.output_arg),
- op_def=signature,
- name="FunctionCall",
- compute_shapes=False)
- outputs = op.outputs
- if not outputs:
- return op
- outputs = [outputs] if isinstance(outputs, ops.Tensor) else list(outputs)
-
- shapes = [shape for shape in self._output_shapes if shape is not None]
- for i, shape in enumerate(shapes):
- outputs[i].set_shape(shape)
+ outputs = self._forward_fdef.call(ctx, all_args, self._output_shapes)
+ if isinstance(outputs, ops.Operation) or outputs is None:
+ return outputs
# `real_outputs` are the actual outputs of the inference graph function;
# `side_outputs` are the intermediate Tensors that were added as outputs to
@@ -467,7 +541,7 @@ class GraphModeFunction(object):
return self._backward_function(*(list(args) + side_outputs)) # pylint: disable=not-callable
tape.record_operation(
- signature.name,
+ self._forward_fdef.signature.name,
real_outputs,
(args + self._extra_inputs),
backward_function)
@@ -509,13 +583,6 @@ class GraphModeFunction(object):
"""Returns the name of the function in Eager-compatible format."""
return self._function_def.name.encode("utf-8")
- def add_to_graph(self, g):
- if self._function_def.name not in g._functions: # pylint: disable=protected-access
- g._add_function(self._function_def) # pylint: disable=protected-access
- for f in self._graph._functions.values(): # pylint: disable=protected-access
- if f.name not in g._functions: # pylint: disable=protected-access
- g._add_function(f) # pylint: disable=protected-access
-
def __call__(self, *args):
"""Executes the passed function in eager mode."""
for v in self._variables:
@@ -530,34 +597,9 @@ class GraphModeFunction(object):
return self._backprop_call(tensor_inputs)
ctx = context.context()
- if ctx.executing_eagerly():
- result = execute.execute(
- str(self._func_name),
- num_outputs=self._num_outputs,
- inputs=tensor_inputs + self._extra_inputs,
- attrs=None,
- ctx=ctx)
- else:
- g = ops.get_default_graph()
- self.add_to_graph(g)
- signature = self._function_def.definition.signature
- args = list(tensor_inputs) + self._extra_inputs
- op = g.create_op(
- signature.name,
- [ops.internal_convert_to_tensor(x, ctx=ctx) for x in args],
- tuple(dtypes_module.DType(x.type) for x in signature.output_arg),
- op_def=signature,
- name="FunctionCall",
- compute_shapes=False)
- result = op.outputs
- if not result:
- return op
-
- shapes = [shape for shape in self._output_shapes if shape is not None]
- for i, shape in enumerate(shapes):
- result[i].set_shape(shape)
-
- return self._build_call_outputs(result)
+ args = tensor_inputs + self._extra_inputs
+ outputs = self._function_def.call(ctx, args, self._output_shapes)
+ return self._build_call_outputs(outputs)
def _build_call_outputs(self, result):
"""Maps the fdef output list to actual output structure.
@@ -568,7 +610,8 @@ class GraphModeFunction(object):
The actual call output.
"""
if self._python_func_outputs is None:
- return None
+ return result
+
# Use `nest.flatten` instead of `_flatten` in order to preserve any
# IndexedSlices in `self._python_func_outputs`.
outputs_list = nest.flatten(self._python_func_outputs)
@@ -614,55 +657,58 @@ def _deterministic_dict_values(kwds):
def _trace_and_define_function(name, func, compiled, args, kwds):
"""Defines and returns graph-mode version of func."""
graph_key = ops.get_default_graph()._graph_key # pylint: disable=protected-access
- with context.graph_mode():
- captures = {}
- tmp_graph = CapturingGraph(captures)
- # Inherit the graph key, since this is used for matching variables in
- # optimizers.
- tmp_graph._graph_key = graph_key # pylint: disable=protected-access
- # Copy the graph collections to ensure summaries and other things work. This
- # lets the function access (but not mutate) collections of the containing
- # graph, such as the global step and the summary writer collections.
- curr_graph = ops.get_default_graph()
- for collection in curr_graph.collections:
- tmp_graph.get_collection_ref(collection)[:] = curr_graph.get_collection(
- collection)
- with tmp_graph.as_default(), AutomaticControlDependencies() as a:
- func_args = _get_defun_inputs(args)
- func_kwds = _get_defun_inputs(kwds)
-
- def convert(x):
- if x is None:
- return None
- x = ops.convert_to_tensor_or_indexed_slices(x)
- x = a.mark_as_return(x)
- return x
+ captures = {}
+ tmp_graph = CapturingGraph(captures)
+ # Inherit the graph key, since this is used for matching variables in
+ # optimizers.
+ tmp_graph._graph_key = graph_key # pylint: disable=protected-access
+ # Copy the graph collections to ensure summaries and other things work. This
+ # lets the function access (but not mutate) collections of the containing
+ # graph, such as the global step and the summary writer collections.
+ curr_graph = ops.get_default_graph()
+ for collection in curr_graph.collections:
+ tmp_graph.get_collection_ref(collection)[:] = curr_graph.get_collection(
+ collection)
+ if context.executing_eagerly():
+ tmp_graph.seed = context.global_seed()
+ else:
+ tmp_graph.seed = curr_graph.seed
+ with tmp_graph.as_default(), AutomaticControlDependencies() as a:
+ func_args = _get_defun_inputs(args)
+ func_kwds = _get_defun_inputs(kwds)
- this_tape = tape.push_new_tape()
- try:
- func_outputs = func(*func_args, **func_kwds)
- func_outputs = nest.map_structure(convert, func_outputs)
- finally:
- tape.pop_tape(this_tape)
- variables = this_tape.watched_variables()
-
- # Returning a closed-over tensor as an output does not trigger a
- # call to convert_to_tensor, so we manually capture all such tensors.
- outputs_list = _flatten(func_outputs)
- func_def_outputs = [
- tmp_graph.capture(x) for x in outputs_list
- if x is not None
- ]
-
- ids = list(sorted(captures.keys()))
- if ids:
- extra_inputs, extra_placeholders = zip(* [captures[x] for x in ids])
- else:
- extra_inputs = []
- extra_placeholders = []
- output_shapes = tuple(
- x.shape if isinstance(x, ops.Tensor) else None
- for x in outputs_list)
+ def convert(x):
+ if x is None:
+ return None
+ x = ops.convert_to_tensor_or_indexed_slices(x)
+ x = a.mark_as_return(x)
+ return x
+
+ this_tape = tape.push_new_tape()
+ try:
+ func_outputs = func(*func_args, **func_kwds)
+ func_outputs = nest.map_structure(convert, func_outputs)
+ finally:
+ tape.pop_tape(this_tape)
+ variables = this_tape.watched_variables()
+
+ # Returning a closed-over tensor as an output does not trigger a
+ # call to convert_to_tensor, so we manually capture all such tensors.
+ outputs_list = _flatten(func_outputs)
+ func_def_outputs = [
+ tmp_graph.capture(x) for x in outputs_list
+ if x is not None
+ ]
+
+ ids = list(sorted(captures.keys()))
+ if ids:
+ extra_inputs, extra_placeholders = zip(* [captures[x] for x in ids])
+ else:
+ extra_inputs = []
+ extra_placeholders = []
+ output_shapes = tuple(
+ x.shape if isinstance(x, ops.Tensor) else None
+ for x in func_def_outputs)
func_kwds_values = _deterministic_dict_values(func_kwds)
flat_inputs = [
@@ -683,7 +729,7 @@ def _trace_and_define_function(name, func, compiled, args, kwds):
attrs = {}
if compiled:
- attrs["_XlaCompile"] = attr_value_pb2.AttrValue(b=True)
+ attrs[_xla_compile_attr] = attr_value_pb2.AttrValue(b=True)
return GraphModeFunction(
fname, all_inputs, extra_inputs, tmp_graph, operations, func_def_outputs,
@@ -728,6 +774,11 @@ class _PolymorphicFunction(object):
See the documentation for `defun` for more information on the semantics of
defined functions.
+
+ _PolymorphicFunction class is thread-compatible meaning that minimal
+ usage of defuns (defining and calling) is thread-safe, but if users call other
+ methods or invoke the base `python_function` themselves, external
+ synchronization is necessary.
"""
def __init__(self, python_function, name, compiled=False):
@@ -745,6 +796,8 @@ class _PolymorphicFunction(object):
self._arguments_to_functions = {}
self._variables = []
+ self._lock = threading.Lock()
+
def __get__(self, instance, owner):
"""Makes it possible to defun instance methods."""
del owner
@@ -779,22 +832,30 @@ class _PolymorphicFunction(object):
kwd_values = _deterministic_dict_values(kwds)
inputs = args + kwd_values
signature = tuple(_cache_key(x) for x in inputs)
-
- if signature not in self._arguments_to_functions:
- graph_function = _trace_and_define_function(
- self._name, self._python_function, self._compiled, args, kwds)
- self._arguments_to_functions[signature] = graph_function
- self._variables.extend(
- [v for v in graph_function.variables if v not in self._variables])
- return graph_function, inputs
- else:
- return self._arguments_to_functions[signature], inputs
+ # The graph, or whether we're executing eagerly, should be a part of the
+ # signature so we don't improperly capture tensors such as variables.
+ signature += tuple([context.executing_eagerly() or ops.get_default_graph()])
+
+ with self._lock:
+ if signature not in self._arguments_to_functions:
+ graph_function = _trace_and_define_function(
+ self._name, self._python_function, self._compiled, args, kwds)
+ self._arguments_to_functions[signature] = graph_function
+ self._variables.extend(
+ [v for v in graph_function.variables if v not in self._variables])
+ return graph_function, inputs
+ else:
+ return self._arguments_to_functions[signature], inputs
def __call__(self, *args, **kwds):
"""Calls a graph function specialized for this input signature."""
graph_function, inputs = self._maybe_define_function(*args, **kwds)
return graph_function(*inputs)
+ def call_python_function(self, *args, **kwargs):
+ """Directly calls the wrapped python function."""
+ return self._python_function(*args, **kwargs)
+
@property
def variables(self):
"""Returns a list of variables used in any of the defined functions."""
@@ -832,6 +893,11 @@ def defun(func=None, compiled=False):
be hashable Python objects or lists thereof. Additionally, it must return zero
or more @{tf.Tensor} objects.
+ Executing a graph generated by `defun` respects device annotations (i.e.,
+ all `with tf.device` directives present in a Python function will also be
+ present in its corresponding graph), but it is not yet possible to execute the
+ generated graphs across multiple machines.
+
_Example Usage_
```python
@@ -1242,7 +1308,7 @@ class AutomaticControlDependencies(object):
# Ensures the merge always runs
ops_which_must_run.add(new_merge[0].op)
if inp in last_op_using_resource_tensor:
- # Ensures the switch exectutes after the previous op using the resource.
+ # Ensures the switch executes after the previous op using the resource.
switch_op._add_control_input(last_op_using_resource_tensor[inp]) # pylint: disable=protected-access
# Ensure the next op outside the cond happens after the merge.
last_op_using_resource_tensor[inp] = new_merge[0].op
diff --git a/tensorflow/python/eager/function_test.py b/tensorflow/python/eager/function_test.py
index a5df3ef530..13c4ee7f15 100644
--- a/tensorflow/python/eager/function_test.py
+++ b/tensorflow/python/eager/function_test.py
@@ -19,15 +19,18 @@ from __future__ import print_function
import collections
+from tensorflow.core.protobuf import config_pb2
+from tensorflow.python.data.ops import iterator_ops
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.eager import function
from tensorflow.python.eager import tape
-from tensorflow.python.eager import test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
+from tensorflow.python.framework import errors
from tensorflow.python.framework import function as tf_function
from tensorflow.python.framework import ops
+from tensorflow.python.framework import random_seed
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.layers import convolutional
@@ -37,10 +40,14 @@ from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
+from tensorflow.python.ops import random_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
-from tensorflow.python.training import gradient_descent
+from tensorflow.python.platform import test
+from tensorflow.python.training import momentum
+from tensorflow.python.training import training_ops
+from tensorflow.python.util import compat
@test_util.with_c_shapes
@@ -103,6 +110,19 @@ class FunctionTest(test.TestCase):
grads, = gradients_impl.gradients(node, v)
v.initializer.run()
self.assertAllEqual(grads.eval(), 2.0)
+ self.assertEqual(grads.shape, v.shape)
+
+ def testGraphEagerIsolation(self):
+
+ @function.defun
+ def f():
+ v = resource_variable_ops.ResourceVariable(1.0)
+ return v.read_value()
+
+ self.assertAllEqual(f(), 1.0)
+
+ with ops.Graph().as_default():
+ self.assertEqual(f().shape, ())
def testBasicDefunOpGraphMode(self):
matmul = function.defun(math_ops.matmul)
@@ -118,6 +138,18 @@ class FunctionTest(test.TestCase):
out = sq_op(t)
self.assertAllEqual(out, math_ops.matmul(t, t).numpy())
+ def testRandomSeed(self):
+
+ @function.defun
+ def f():
+ return random_ops.random_normal(())
+
+ random_seed.set_random_seed(1)
+ x = f()
+ self.assertNotEqual(x, f())
+ random_seed.set_random_seed(1)
+ self.assertAllEqual(f(), x)
+
def testNestedInputsDefunOpGraphMode(self):
matmul = function.defun(math_ops.matmul)
@@ -180,6 +212,15 @@ class FunctionTest(test.TestCase):
self.assertEqual(fn_op.output_shapes, None)
self.assertAllEqual(fn_op(x, x), None)
+ def testDefunCapturedInt32(self):
+ x = constant_op.constant(1, dtype=dtypes.int32)
+
+ @function.defun
+ def add_int32s():
+ return x + x
+
+ self.assertEqual(2, int(add_int32s()))
+
def testDefunReadVariable(self):
v = resource_variable_ops.ResourceVariable(1.0)
@@ -191,13 +232,14 @@ class FunctionTest(test.TestCase):
def testDefunAssignAddVariable(self):
v = resource_variable_ops.ResourceVariable(1.0)
+ x = constant_op.constant(2.0)
@function.defun
- def f():
- v.assign_add(2.0)
+ def test_assign_add():
+ v.assign_add(x)
return v.read_value()
- self.assertEqual(3.0, float(f()))
+ self.assertEqual(3.0, float(test_assign_add()))
def testDefunShapeInferenceWithCapturedResourceVariable(self):
v = resource_variable_ops.ResourceVariable([[1, 2], [3, 4]])
@@ -210,6 +252,21 @@ class FunctionTest(test.TestCase):
compiled = function.defun(f)
compiled()
+ def testVariableInLoopInFunction(self):
+
+ @function.defun
+ def test_function():
+
+ def loop_test(_):
+ return False
+
+ def loop_body(_):
+ return variable_scope.get_variable('a', shape=())
+
+ return control_flow_ops.while_loop(loop_test, loop_body, [0.0])
+
+ self.assertEqual(test_function().shape, [])
+
def testDefunShapeInferenceWithCapturedResourceVariableInGraphMode(self):
with context.graph_mode():
v = resource_variable_ops.ResourceVariable([[1, 2], [3, 4]])
@@ -412,24 +469,33 @@ class FunctionTest(test.TestCase):
self.assertAllEqual(f(constant_op.constant(1.0)), 2.0)
- def testGradientOfGatherWithDefun(self):
+ def testGatherResourceWithDefun(self):
with ops.device('cpu:0'):
v = resource_variable_ops.ResourceVariable([0.0, 1.0, 2.0])
- def sum_gather():
- return math_ops.reduce_sum(array_ops.gather(v, [1, 2]))
+ def sum_gather():
+ return math_ops.reduce_sum(array_ops.gather(v, [1, 2]))
+
+ defined = function.defun(sum_gather)
+ self.assertAllEqual(sum_gather(), defined())
+
+ def testGradientOfGatherWithDefun(self):
+ v = resource_variable_ops.ResourceVariable([0.0, 1.0, 2.0])
+
+ def sum_gather():
+ return math_ops.reduce_sum(array_ops.gather(v, [1, 2]))
- grad_fn = backprop.implicit_grad(sum_gather)
- gradient = grad_fn()
- defun_grad_fn = backprop.implicit_grad(function.defun(sum_gather))
- defun_gradient = defun_grad_fn()
- self.assertEqual(len(gradient), len(defun_gradient))
+ grad_fn = backprop.implicit_grad(sum_gather)
+ gradient = grad_fn()
+ defun_grad_fn = backprop.implicit_grad(function.defun(sum_gather))
+ defun_gradient = defun_grad_fn()
+ self.assertEqual(len(gradient), len(defun_gradient))
- gradient = gradient[0][0]
- defun_gradient = defun_gradient[0][0]
- self.assertAllEqual(gradient.values, defun_gradient.values)
- self.assertAllEqual(gradient.indices, defun_gradient.indices)
- self.assertAllEqual(gradient.dense_shape, defun_gradient.dense_shape)
+ gradient = gradient[0][0]
+ defun_gradient = defun_gradient[0][0]
+ self.assertAllEqual(gradient.values, defun_gradient.values)
+ self.assertAllEqual(gradient.indices, defun_gradient.indices)
+ self.assertAllEqual(gradient.dense_shape, defun_gradient.dense_shape)
def testReturningIndexedSlicesWithDefun(self):
@@ -493,6 +559,66 @@ class FunctionTest(test.TestCase):
y = f(x, x).cpu()
self.assertAllEqual(y, [2.])
+ @test_util.run_in_graph_and_eager_modes
+ def testFunctionWithResourcesOnDifferentDevices(self):
+ if not context.context().num_gpus():
+ self.skipTest('No GPUs found.')
+
+ with ops.device('/cpu:0'):
+ v_cpu = resource_variable_ops.ResourceVariable([0.0, 1.0, 2.0])
+
+ with ops.device('/gpu:0'):
+ v_gpu = resource_variable_ops.ResourceVariable([0.0, 1.0, 2.0])
+
+ def sum_gather():
+ cpu_result = math_ops.reduce_sum(array_ops.gather(v_cpu, [1, 2]))
+ gpu_result = math_ops.reduce_sum(array_ops.gather(v_gpu, [1, 2]))
+ return cpu_result, gpu_result
+
+ defined = function.defun(sum_gather)
+ if not context.executing_eagerly():
+ self.evaluate(variables.global_variables_initializer())
+ expected = self.evaluate(sum_gather())
+ self.assertAllEqual(expected, self.evaluate(defined()))
+
+ @test_util.run_in_graph_and_eager_modes
+ def testOpInFunctionWithConflictingResourceInputs(self):
+ if not context.context().num_gpus():
+ self.skipTest('No GPUs found.')
+
+ with ops.device('/cpu:0'):
+ v_cpu = resource_variable_ops.ResourceVariable(
+ [0.0, 1.0, 2.0], name='cpu')
+ v_also_cpu = resource_variable_ops.ResourceVariable(
+ [0.0, 1.0, 2.0], name='also_cpu')
+
+ with ops.device('/gpu:0'):
+ v_gpu = resource_variable_ops.ResourceVariable(
+ [0.0, 1.0, 2.0], name='gpu')
+
+ @function.defun
+ def resource_apply_adam():
+ training_ops.resource_apply_adam(
+ v_cpu.handle,
+ v_gpu.handle,
+ v_also_cpu.handle,
+ 1.0, # beta1_power
+ 1.0, # beta2_power
+ 1.0, # learning_rate
+ 1.0, # beta1
+ 1.0, # beta2
+ 1.0, # epsilon,
+ [1.0, 1.0, 1.0], # grad
+ False) # use_locking
+ return None
+
+ with self.assertRaisesRegexp(
+ errors.InvalidArgumentError, 'Could not colocate node with its '
+ 'resource and reference inputs.*'):
+ if not context.executing_eagerly():
+ self.evaluate(variables.global_variables_initializer())
+ self.evaluate(resource_apply_adam())
+
def testFunctionHandlesInputsOnDifferentDevices(self):
if not context.context().num_gpus():
self.skipTest('No GPUs found')
@@ -542,17 +668,17 @@ class FunctionTest(test.TestCase):
def testNestedDifferentiableFunction(self):
@function.defun
- def foo(a, b):
+ def inner_fn(a, b):
return a * math_ops.add(a, b)
@function.defun
- def bar(x):
- return foo(x, 1.0)
+ def outer_fn(x):
+ return inner_fn(x, 1.0)
x = constant_op.constant(5.0)
with backprop.GradientTape() as tp:
tp.watch(x)
- result = bar(x)
+ result = outer_fn(x)
grad = tp.gradient(result, x)
self.assertAllEqual(grad, 2 * 5.0 + 1.0)
@@ -602,15 +728,15 @@ class FunctionTest(test.TestCase):
self.assertAllEqual(3, add_one(constant_op.constant(2)))
def testVariableCaptureInNestedFunctions(self):
- v = resource_variable_ops.ResourceVariable(1)
+ v = resource_variable_ops.ResourceVariable(1, dtype=dtypes.int32)
@function.defun
- def read():
+ def inner_read():
return v.read_value()
@function.defun
def outer():
- return read()
+ return inner_read()
self.assertEqual(1, int(outer()))
@@ -701,6 +827,27 @@ class FunctionTest(test.TestCase):
y = model(x)
self.assertAllEqual([[[[4.0]]]], y.numpy())
+ @test_util.run_in_graph_and_eager_modes(
+ config=config_pb2.ConfigProto(device_count={'CPU': 3}))
+ def testDeviceAnnotationsRespected(self):
+ @function.defun
+ def multi_device_fn():
+ with ops.device('/cpu:0'):
+ s1 = iterator_ops.Iterator.from_structure(
+ (dtypes.float32,)).string_handle()
+ with ops.device('/cpu:1'):
+ s2 = iterator_ops.Iterator.from_structure(
+ (dtypes.float32,)).string_handle()
+ with ops.device('/cpu:2'):
+ s3 = iterator_ops.Iterator.from_structure(
+ (dtypes.float32,)).string_handle()
+ return s1, s2, s3
+
+ outputs = multi_device_fn()
+ self.assertTrue(compat.as_bytes('CPU:0') in self.evaluate(outputs[0]))
+ self.assertTrue(compat.as_bytes('CPU:1') in self.evaluate(outputs[1]))
+ self.assertTrue(compat.as_bytes('CPU:2') in self.evaluate(outputs[2]))
+
def testVariablesAreTracked(self):
v = resource_variable_ops.ResourceVariable(1.0)
@@ -801,6 +948,25 @@ class FunctionTest(test.TestCase):
out = foo.two(t)
self.assertEqual(float(out), 1.0)
+ def testPythonCallWithSideEffects(self):
+ state = []
+
+ @function.defun
+ def side_effecting_function():
+ state.append(0)
+
+ side_effecting_function()
+ self.assertAllEqual(state, [0])
+
+ # The second invocation should call the graph function, which shouldn't
+ # trigger the list append.
+ side_effecting_function()
+ self.assertAllEqual(state, [0])
+
+ # Whereas calling the python function directly should create a side-effect.
+ side_effecting_function.call_python_function()
+ self.assertAllEqual(state, [0, 0])
+
@test_util.with_c_shapes
class AutomaticControlDependenciesTest(test.TestCase):
@@ -988,7 +1154,7 @@ class AutomaticControlDependenciesTest(test.TestCase):
def loss(v):
return v**2
- optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
+ optimizer = momentum.MomentumOptimizer(learning_rate=1.0, momentum=1.0)
@function.defun
def train():
@@ -1005,7 +1171,7 @@ class AutomaticControlDependenciesTest(test.TestCase):
def loss():
return v**2
- optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
+ optimizer = momentum.MomentumOptimizer(learning_rate=1.0, momentum=1.0)
@function.defun
def train():
@@ -1017,4 +1183,6 @@ class AutomaticControlDependenciesTest(test.TestCase):
if __name__ == '__main__':
+ ops.enable_eager_execution(
+ config=config_pb2.ConfigProto(device_count={'CPU': 3}))
test.main()
diff --git a/tensorflow/python/eager/graph_callable.py b/tensorflow/python/eager/graph_callable.py
index 760a148552..2c6f04d8ad 100644
--- a/tensorflow/python/eager/graph_callable.py
+++ b/tensorflow/python/eager/graph_callable.py
@@ -110,13 +110,25 @@ class _VariableCapturingScope(object):
"""
# TODO(apassos) ignoring the regularizer and partitioner here; figure out
# how to deal with these.
- def _custom_getter(getter=None, name=None, shape=None, dtype=dtypes.float32, # pylint: disable=missing-docstring
- initializer=None, regularizer=None, reuse=None,
- trainable=True, collections=None, caching_device=None, # pylint: disable=redefined-outer-name
- partitioner=None, validate_shape=True,
- use_resource=None):
+ def _custom_getter( # pylint: disable=missing-docstring
+ getter=None,
+ name=None,
+ shape=None,
+ dtype=dtypes.float32,
+ initializer=None,
+ regularizer=None,
+ reuse=None,
+ trainable=None,
+ collections=None,
+ caching_device=None, # pylint: disable=redefined-outer-name
+ partitioner=None,
+ validate_shape=True,
+ use_resource=None,
+ aggregation=variable_scope.VariableAggregation.NONE,
+ synchronization=variable_scope.VariableSynchronization.AUTO):
del getter, regularizer, partitioner, validate_shape, use_resource, dtype
- del collections, initializer, trainable, reuse, caching_device, shape,
+ del collections, initializer, trainable, reuse, caching_device, shape
+ del aggregation, synchronization
assert name in self.variables
v = self.variables[name]
return v.variable
@@ -136,13 +148,24 @@ class _VariableCapturingScope(object):
"""
# TODO(apassos) ignoring the regularizer and partitioner here; figure out
# how to deal with these.
- def _custom_getter(getter=None, name=None, shape=None, dtype=dtypes.float32, # pylint: disable=missing-docstring
- initializer=None, regularizer=None, reuse=None,
- trainable=True, collections=None, caching_device=None, # pylint: disable=redefined-outer-name
- partitioner=None, validate_shape=True,
- use_resource=None):
+ def _custom_getter( # pylint: disable=missing-docstring
+ getter=None,
+ name=None,
+ shape=None,
+ dtype=dtypes.float32,
+ initializer=None,
+ regularizer=None,
+ reuse=None,
+ trainable=None,
+ collections=None,
+ caching_device=None, # pylint: disable=redefined-outer-name
+ partitioner=None,
+ validate_shape=True,
+ use_resource=None,
+ aggregation=variable_scope.VariableAggregation.NONE,
+ synchronization=variable_scope.VariableSynchronization.AUTO):
del getter, regularizer, collections, caching_device, partitioner
- del use_resource, validate_shape
+ del use_resource, validate_shape, aggregation, synchronization
if name in self.tf_variables:
if reuse:
return self.tf_variables[name].initialized_value()
diff --git a/tensorflow/python/eager/pywrap_tensor.cc b/tensorflow/python/eager/pywrap_tensor.cc
index ea604647fa..cefd5b1206 100644
--- a/tensorflow/python/eager/pywrap_tensor.cc
+++ b/tensorflow/python/eager/pywrap_tensor.cc
@@ -620,10 +620,6 @@ static PyType_Slot EagerTensor_Type_slots[] = {
{Py_tp_init, reinterpret_cast<void*>(EagerTensor_init)},
{0, nullptr},
};
-
-PyType_Spec EagerTensor_Type_spec = {"EagerTensor", sizeof(EagerTensor), 0,
- Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HEAPTYPE,
- EagerTensor_Type_slots};
#else
// TODO(agarwal): support active_trace.
static PyTypeObject _EagerTensorType = {
@@ -754,6 +750,34 @@ PyObject* TFE_Py_InitEagerTensor(PyObject* base_class) {
#if PY_MAJOR_VERSION >= 3
PyObject* bases = PyTuple_New(1);
PyTuple_SET_ITEM(bases, 0, base_class);
+
+ tensorflow::Safe_PyObjectPtr base_class_module(
+ PyObject_GetAttrString(base_class, "__module__"));
+ const char* module = nullptr;
+ if (PyErr_Occurred()) {
+ PyErr_Clear();
+ module = "__builtin__";
+ } else {
+ module = PyBytes_AsString(base_class_module.get());
+ if (module == nullptr) {
+ PyErr_Clear();
+ module = PyUnicode_AsUTF8(base_class_module.get());
+ if (module == nullptr) {
+ PyErr_Clear();
+ module = "__builtin__";
+ }
+ }
+ }
+
+ // NOTE: The c_str from this string needs to outlast the function, hence is
+ // static.
+ static tensorflow::string fully_qualified_name =
+ tensorflow::strings::StrCat(module, ".EagerTensor");
+
+ static PyType_Spec EagerTensor_Type_spec = {
+ fully_qualified_name.c_str(), sizeof(EagerTensor), 0,
+ Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HEAPTYPE, EagerTensor_Type_slots};
+
EagerTensorType = reinterpret_cast<PyTypeObject*>(
PyType_FromSpecWithBases(&EagerTensor_Type_spec, bases));
if (PyErr_Occurred()) {
diff --git a/tensorflow/python/eager/pywrap_tfe_src.cc b/tensorflow/python/eager/pywrap_tfe_src.cc
index b797a3f82d..4d28e98961 100644
--- a/tensorflow/python/eager/pywrap_tfe_src.cc
+++ b/tensorflow/python/eager/pywrap_tfe_src.cc
@@ -948,7 +948,7 @@ class GradientTape
: id(id), variable(variable) {}
};
struct CompareById {
- bool operator()(const IdAndVariable& lhs, const IdAndVariable& rhs) {
+ bool operator()(const IdAndVariable& lhs, const IdAndVariable& rhs) const {
return lhs.id < rhs.id;
}
};
@@ -1173,14 +1173,14 @@ static tensorflow::eager::TapeTensor TapeTensorFromTensor(PyObject* tensor) {
if (EagerTensor_CheckExact(tensor)) {
TFE_TensorHandle* t = EagerTensor_Handle(tensor);
tensorflow::int64 id = EagerTensor_id(tensor);
- const tensorflow::Tensor* tensor = nullptr;
- const tensorflow::Status status = t->handle->Tensor(&tensor);
+ tensorflow::TensorShape tensor_shape;
+ const tensorflow::Status status = t->handle->Shape(&tensor_shape);
+
if (MaybeRaiseExceptionFromStatus(status, nullptr)) {
return tensorflow::eager::TapeTensor{id, t->handle->dtype,
tensorflow::TensorShape({})};
} else {
- return tensorflow::eager::TapeTensor{id, t->handle->dtype,
- tensor->shape()};
+ return tensorflow::eager::TapeTensor{id, t->handle->dtype, tensor_shape};
}
}
tensorflow::int64 id = FastTensorId(tensor);
@@ -1898,14 +1898,39 @@ PyObject* RecordGradient(PyObject* op_name, PyObject* inputs, PyObject* attrs,
void MaybeWatchVariable(PyObject* input) {
DCHECK(CheckResourceVariable(input));
- DCHECK(PyObject_HasAttrString(input, "trainable"));
+ DCHECK(PyObject_HasAttrString(input, "_trainable"));
tensorflow::Safe_PyObjectPtr trainable(
- PyObject_GetAttrString(input, "trainable"));
+ PyObject_GetAttrString(input, "_trainable"));
if (trainable.get() == Py_False) return;
TFE_Py_TapeSetWatchVariable(input);
}
+bool CastTensor(const FastPathOpExecInfo& op_exec_info,
+ const TF_DataType& desired_dtype,
+ tensorflow::Safe_TFE_TensorHandlePtr* handle,
+ TF_Status* status) {
+ TF_DataType input_dtype = TFE_TensorHandleDataType(handle->get());
+ TF_DataType output_dtype = input_dtype;
+
+ if (desired_dtype >= 0 && desired_dtype != input_dtype) {
+ *handle = tensorflow::make_safe(
+ tensorflow::EagerCast(op_exec_info.ctx, handle->get(), input_dtype,
+ static_cast<TF_DataType>(desired_dtype), status));
+ if (!status->status.ok()) return false;
+ output_dtype = desired_dtype;
+ }
+
+ if (output_dtype != TF_INT32) {
+ // Note that this is a shallow copy and will share the underlying buffer
+ // if copying to the same device.
+ *handle = tensorflow::make_safe(TFE_TensorHandleCopyToDevice(
+ handle->get(), op_exec_info.ctx, op_exec_info.device_name, status));
+ if (!status->status.ok()) return false;
+ }
+ return true;
+}
+
bool ReadVariableOp(const FastPathOpExecInfo& parent_op_exec_info,
PyObject* input, tensorflow::Safe_PyObjectPtr* output,
TF_Status* status) {
@@ -1938,9 +1963,31 @@ bool ReadVariableOp(const FastPathOpExecInfo& parent_op_exec_info,
TFE_Execute(op, &output_handle, &num_retvals, status);
if (MaybeRaiseExceptionFromTFStatus(status, nullptr)) return false;
- // Always create the py object (and correctly DECREF it) from the returned
- // value, else the data will leak.
- output->reset(EagerTensorFromHandle(output_handle));
+ if (!PyObject_HasAttrString(input, "_read_dtype")) {
+ // Always create the py object (and correctly DECREF it) from the returned
+ // value, else the data will leak.
+ output->reset(EagerTensorFromHandle(output_handle));
+ } else {
+ // This is a _MixedPrecisionVariable which potentially does casting when
+ // being read.
+ tensorflow::Safe_PyObjectPtr read_dtype(
+ PyObject_GetAttrString(input, "_read_dtype"));
+ int desired_dtype = -1;
+ if (!ParseTypeValue("_read_dtype", read_dtype.get(), status,
+ &desired_dtype)) {
+ return false;
+ }
+
+ auto safe_output_handle = tensorflow::make_safe(output_handle);
+ // Retires output_handle in the future.
+ output_handle = nullptr;
+ if (!CastTensor(parent_op_exec_info,
+ static_cast<TF_DataType>(desired_dtype),
+ &safe_output_handle, status)) {
+ return false;
+ }
+ output->reset(EagerTensorFromHandle(safe_output_handle.release()));
+ }
// TODO(nareshmodi): Should we run post exec callbacks here?
if (parent_op_exec_info.run_gradient_callback) {
@@ -2010,27 +2057,13 @@ bool ConvertToTensor(
}
}
- TF_DataType handle_dtype = TFE_TensorHandleDataType(handle.get());
- if (desired_dtype >= 0 && desired_dtype != handle_dtype) {
- handle = tensorflow::make_safe(
- tensorflow::EagerCast(op_exec_info.ctx, handle.get(), handle_dtype,
- static_cast<TF_DataType>(desired_dtype), status));
- if (!status->status.ok()) return false;
-
- handle_dtype = TFE_TensorHandleDataType(handle.get());
- }
-
- if (handle_dtype != TF_INT32) {
- // Note that this is a shallow copy and will share the underlying buffer
- // if copying to the same device.
- handle = tensorflow::make_safe(TFE_TensorHandleCopyToDevice(
- handle.get(), op_exec_info.ctx, op_exec_info.device_name, status));
- if (!status->status.ok()) return false;
+ if (!CastTensor(op_exec_info, static_cast<TF_DataType>(desired_dtype),
+ &handle, status)) {
+ return false;
}
-
+ TF_DataType output_dtype = TFE_TensorHandleDataType(handle.get());
output_handle->reset(EagerTensorFromHandle(handle.release()));
-
- dtype_setter(handle_dtype);
+ dtype_setter(output_dtype);
return true;
}
diff --git a/tensorflow/python/eager/pywrap_tfe_test.py b/tensorflow/python/eager/pywrap_tfe_test.py
index faaae40b3f..fd8ab695b8 100644
--- a/tensorflow/python/eager/pywrap_tfe_test.py
+++ b/tensorflow/python/eager/pywrap_tfe_test.py
@@ -23,6 +23,7 @@ from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.eager import test
from tensorflow.python.framework import constant_op
+from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
@@ -71,6 +72,25 @@ class Tests(test.TestCase):
@test_util.assert_no_new_tensors
@test_util.assert_no_garbage_created
+ def testFastpathExecute_MixedPrecisionVariableMatMulCorrectResponse(self):
+ ctx = context.context()
+ a_2_by_2 = constant_op.constant(1.0, shape=[2, 2])
+ a_2_by_2_fp16 = math_ops.cast(a_2_by_2, dtype=dtypes.float16)
+ m = resource_variable_ops.ResourceVariable(a_2_by_2)
+ m = resource_variable_ops._MixedPrecisionVariable(
+ m, read_dtype=dtypes.float16)
+ x = pywrap_tensorflow.TFE_Py_FastPathExecute(
+ ctx._handle, ctx.device_name, "MatMul", None, None, m, m, "transpose_a",
+ False, "transpose_b", False)
+ y = pywrap_tensorflow.TFE_Py_FastPathExecute(
+ ctx._handle, ctx.device_name, "MatMul", None, None, a_2_by_2_fp16,
+ a_2_by_2_fp16, "transpose_a", False, "transpose_b", False)
+
+ self.assertEqual(x.dtype, dtypes.float16)
+ self.assertAllEqual(x, y)
+
+ @test_util.assert_no_new_tensors
+ @test_util.assert_no_garbage_created
def testFastpathExecute_TapeWrite(self):
ctx = context.context()
with backprop.GradientTape(persistent=True) as tape:
@@ -98,6 +118,29 @@ class Tests(test.TestCase):
self.assertAllEqual(dz_dy.numpy(),
constant_op.constant(4.0, shape=[2, 2]).numpy())
+ @test_util.assert_no_new_tensors
+ @test_util.assert_no_garbage_created
+ def testFastpathExecute_MixedPrecisionVariableTapeWrite(self):
+ ctx = context.context()
+ with backprop.GradientTape(persistent=True) as tape:
+ a_2_by_2 = constant_op.constant(
+ [[1.0, 2.0], [3.0, 4.0]], dtype=dtypes.float32)
+ a_2_by_2_fp16 = math_ops.cast(a_2_by_2, dtype=dtypes.float16)
+ m1 = resource_variable_ops.ResourceVariable(a_2_by_2)
+ m2 = resource_variable_ops._MixedPrecisionVariable(
+ m1, read_dtype=dtypes.float16)
+ tape.watch(m2)
+ z = pywrap_tensorflow.TFE_Py_FastPathExecute(
+ ctx._handle, ctx.device_name, "MatMul", None, None, a_2_by_2_fp16, m2,
+ "transpose_a", False, "transpose_b", False)
+ dz_dy = tape.gradient(z, [m2])[0]
+ self.assertEqual(dz_dy.dtype, dtypes.float16)
+
+ expected_grads = math_ops.matmul(
+ array_ops.transpose(a_2_by_2_fp16),
+ constant_op.constant(1., shape=[2, 2], dtype=dtypes.float16)).numpy()
+ self.assertAllEqual(dz_dy.numpy(), expected_grads)
+
# Tests homogeneous list op
@test_util.assert_no_new_tensors
@test_util.assert_no_garbage_created
diff --git a/tensorflow/python/eager/tensor_test.py b/tensorflow/python/eager/tensor_test.py
index 626a4eb1ee..871136e2c8 100644
--- a/tensorflow/python/eager/tensor_test.py
+++ b/tensorflow/python/eager/tensor_test.py
@@ -278,7 +278,7 @@ class TFETensorUtilTest(test_util.TensorFlowTestCase):
with self.assertRaisesRegexp(
TypeError,
- r"tensors argument must be a list or a tuple. Got \"EagerTensor\""):
+ r"tensors argument must be a list or a tuple. Got.*EagerTensor"):
pywrap_tensorflow.TFE_Py_TensorShapeSlice(t1, -2)
def testNegativeSliceDim(self):
diff --git a/tensorflow/python/eager/test.py b/tensorflow/python/eager/test.py
index f6a46e7eb3..33ee797678 100644
--- a/tensorflow/python/eager/test.py
+++ b/tensorflow/python/eager/test.py
@@ -23,6 +23,7 @@ from tensorflow.python.platform import test as _test
from tensorflow.python.platform.test import * # pylint: disable=wildcard-import
+# TODO(akshayka): Do away with this file.
def main(argv=None):
_ops.enable_eager_execution()
_test.main(argv)
diff --git a/tensorflow/python/estimator/BUILD b/tensorflow/python/estimator/BUILD
index 8ee38d35cc..6c415b1bf2 100644
--- a/tensorflow/python/estimator/BUILD
+++ b/tensorflow/python/estimator/BUILD
@@ -707,6 +707,14 @@ py_library(
)
py_library(
+ name = "expect_h5py_installed",
+ # This is a dummy rule used as a numpy dependency in open-source.
+ # We expect h5py to already be installed on the system, e.g. via
+ # `pip install h5py'
+ visibility = ["//visibility:public"],
+)
+
+py_library(
name = "expect_six_installed",
# This is a dummy rule used as a numpy dependency in open-source.
# We expect six to already be installed on the system, e.g. via
diff --git a/tensorflow/python/estimator/api/BUILD b/tensorflow/python/estimator/api/BUILD
index aa5a29e6dd..a75fa7d0ae 100644
--- a/tensorflow/python/estimator/api/BUILD
+++ b/tensorflow/python/estimator/api/BUILD
@@ -6,13 +6,14 @@ package(
licenses(["notice"]) # Apache 2.0
-load("//tensorflow/tools/api/generator:api_gen.bzl", "gen_api_init_files")
-load("//tensorflow/tools/api/generator:api_gen.bzl", "ESTIMATOR_API_INIT_FILES")
+load("//tensorflow/python/tools/api/generator:api_gen.bzl", "gen_api_init_files")
+load("//tensorflow/python/tools/api/generator:api_gen.bzl", "ESTIMATOR_API_INIT_FILES")
gen_api_init_files(
name = "estimator_python_api_gen",
api_name = "estimator",
output_files = ESTIMATOR_API_INIT_FILES,
+ output_package = "tensorflow.python.estimator.api",
package = "tensorflow.python.estimator",
package_dep = "//tensorflow/python/estimator:estimator_py",
)
diff --git a/tensorflow/python/estimator/canned/baseline_test.py b/tensorflow/python/estimator/canned/baseline_test.py
index 7bf2e62da9..e46a3a156d 100644
--- a/tensorflow/python/estimator/canned/baseline_test.py
+++ b/tensorflow/python/estimator/canned/baseline_test.py
@@ -154,6 +154,8 @@ class BaselineRegressorEvaluationTest(test.TestCase):
self.assertDictEqual({
metric_keys.MetricKeys.LOSS: 9.,
metric_keys.MetricKeys.LOSS_MEAN: 9.,
+ metric_keys.MetricKeys.PREDICTION_MEAN: 13.,
+ metric_keys.MetricKeys.LABEL_MEAN: 10.,
ops.GraphKeys.GLOBAL_STEP: 100
}, eval_metrics)
@@ -176,6 +178,8 @@ class BaselineRegressorEvaluationTest(test.TestCase):
self.assertDictEqual({
metric_keys.MetricKeys.LOSS: 18.,
metric_keys.MetricKeys.LOSS_MEAN: 9.,
+ metric_keys.MetricKeys.PREDICTION_MEAN: 13.,
+ metric_keys.MetricKeys.LABEL_MEAN: 10.,
ops.GraphKeys.GLOBAL_STEP: 100
}, eval_metrics)
@@ -204,6 +208,8 @@ class BaselineRegressorEvaluationTest(test.TestCase):
self.assertDictEqual({
metric_keys.MetricKeys.LOSS: 27.,
metric_keys.MetricKeys.LOSS_MEAN: 9.,
+ metric_keys.MetricKeys.PREDICTION_MEAN: 13.,
+ metric_keys.MetricKeys.LABEL_MEAN: 10.,
ops.GraphKeys.GLOBAL_STEP: 100
}, eval_metrics)
@@ -229,7 +235,9 @@ class BaselineRegressorEvaluationTest(test.TestCase):
self.assertItemsEqual(
(metric_keys.MetricKeys.LOSS, metric_keys.MetricKeys.LOSS_MEAN,
- ops.GraphKeys.GLOBAL_STEP), eval_metrics.keys())
+ metric_keys.MetricKeys.PREDICTION_MEAN,
+ metric_keys.MetricKeys.LABEL_MEAN, ops.GraphKeys.GLOBAL_STEP),
+ eval_metrics.keys())
# Logit is bias which is [46, 58]
self.assertAlmostEqual(0, eval_metrics[metric_keys.MetricKeys.LOSS])
diff --git a/tensorflow/python/estimator/canned/boosted_trees.py b/tensorflow/python/estimator/canned/boosted_trees.py
index 8afef1b65a..3292e2724d 100644
--- a/tensorflow/python/estimator/canned/boosted_trees.py
+++ b/tensorflow/python/estimator/canned/boosted_trees.py
@@ -17,7 +17,9 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
+import abc
import collections
+import functools
from tensorflow.python.estimator import estimator
from tensorflow.python.estimator import model_fn
@@ -44,12 +46,13 @@ from tensorflow.python.util.tf_export import estimator_export
# TODO(nponomareva): Reveal pruning params here.
_TreeHParams = collections.namedtuple('TreeHParams', [
'n_trees', 'max_depth', 'learning_rate', 'l1', 'l2', 'tree_complexity',
- 'min_node_weight'
+ 'min_node_weight', 'center_bias'
])
_HOLD_FOR_MULTI_CLASS_SUPPORT = object()
_HOLD_FOR_MULTI_DIM_SUPPORT = object()
_DUMMY_NUM_BUCKETS = -1
+_DUMMY_NODE_ID = -1
def _get_transformed_features(features, sorted_feature_columns):
@@ -279,7 +282,9 @@ class _CacheTrainingStatesUsingHashTable(object):
"""Returns cached_tree_ids, cached_node_ids, cached_logits."""
cached_tree_ids, cached_node_ids, cached_logits = array_ops.split(
lookup_ops.lookup_table_find_v2(
- self._table_ref, self._example_ids, default_value=[0.0, 0.0, 0.0]),
+ self._table_ref,
+ self._example_ids,
+ default_value=[0.0, _DUMMY_NODE_ID, 0.0]),
[1, 1, self._logits_dimension],
axis=1)
cached_tree_ids = array_ops.squeeze(
@@ -330,7 +335,7 @@ class _CacheTrainingStatesUsingVariables(object):
array_ops.zeros([batch_size], dtype=dtypes.int32),
name='tree_ids_cache')
self._node_ids = _local_variable(
- array_ops.zeros([batch_size], dtype=dtypes.int32),
+ _DUMMY_NODE_ID*array_ops.ones([batch_size], dtype=dtypes.int32),
name='node_ids_cache')
self._logits = _local_variable(
array_ops.zeros([batch_size, logits_dimension], dtype=dtypes.float32),
@@ -380,6 +385,249 @@ class _StopAtAttemptsHook(session_run_hook.SessionRunHook):
run_context.request_stop()
+def _get_max_splits(tree_hparams):
+ """Calculates the max possible number of splits based on tree params."""
+ # maximum number of splits possible in the whole tree =2^(D-1)-1
+ max_splits = (1 << tree_hparams.max_depth) - 1
+ return max_splits
+
+
+class _EnsembleGrower(object):
+ """Abstract base class for different types of ensemble growers.
+
+ Use it to receive training ops for growing and centering bias, depending
+ on the implementation (for example, in memory or accumulator-based
+ distributed):
+ grower = ...create subclass grower(tree_ensemble, tree_hparams)
+ grow_op = grower.grow_tree(stats_summaries_list, feature_ids_list,
+ last_layer_nodes_range)
+ training_ops.append(grow_op)
+ """
+
+ def __init__(self, tree_ensemble, tree_hparams):
+ """Initializes a grower object.
+
+ Args:
+ tree_ensemble: A TreeEnsemble variable.
+ tree_hparams: TODO. collections.namedtuple for hyper parameters.
+ """
+ self._tree_ensemble = tree_ensemble
+ self._tree_hparams = tree_hparams
+
+ @abc.abstractmethod
+ def center_bias(self, center_bias_var, gradients, hessians):
+ """Centers bias, if ready, based on statistics.
+
+ Args:
+ center_bias_var: A variable that will be updated when bias centering
+ finished.
+ gradients: A rank 2 tensor of gradients.
+ hessians: A rank 2 tensor of hessians.
+
+ Returns:
+ An operation for centering bias.
+ """
+
+ @abc.abstractmethod
+ def grow_tree(self, stats_summaries_list, feature_ids_list,
+ last_layer_nodes_range):
+ """Grows a tree, if ready, based on provided statistics.
+
+ Args:
+ stats_summaries_list: List of stats summary tensors, representing sums of
+ gradients and hessians for each feature bucket.
+ feature_ids_list: a list of lists of feature ids for each bucket size.
+ last_layer_nodes_range: A tensor representing ids of the nodes in the
+ current layer, to be split.
+
+ Returns:
+ An op for growing a tree.
+ """
+
+ # ============= Helper methods ===========
+
+ def _center_bias_fn(self, center_bias_var, mean_gradients, mean_hessians):
+ """Updates the ensembles and cache (if needed) with logits prior."""
+ continue_centering = boosted_trees_ops.center_bias(
+ self._tree_ensemble.resource_handle,
+ mean_gradients=mean_gradients,
+ mean_hessians=mean_hessians,
+ l1=self._tree_hparams.l1,
+ l2=self._tree_hparams.l2)
+ return center_bias_var.assign(continue_centering)
+
+ def _grow_tree_from_stats_summaries(self, stats_summaries_list,
+ feature_ids_list, last_layer_nodes_range):
+ """Updates ensemble based on the best gains from stats summaries."""
+ node_ids_per_feature = []
+ gains_list = []
+ thresholds_list = []
+ left_node_contribs_list = []
+ right_node_contribs_list = []
+ all_feature_ids = []
+ assert len(stats_summaries_list) == len(feature_ids_list)
+
+ max_splits = _get_max_splits(self._tree_hparams)
+
+ for i, feature_ids in enumerate(feature_ids_list):
+ (numeric_node_ids_per_feature, numeric_gains_list,
+ numeric_thresholds_list, numeric_left_node_contribs_list,
+ numeric_right_node_contribs_list) = (
+ boosted_trees_ops.calculate_best_gains_per_feature(
+ node_id_range=last_layer_nodes_range,
+ stats_summary_list=stats_summaries_list[i],
+ l1=self._tree_hparams.l1,
+ l2=self._tree_hparams.l2,
+ tree_complexity=self._tree_hparams.tree_complexity,
+ min_node_weight=self._tree_hparams.min_node_weight,
+ max_splits=max_splits))
+
+ all_feature_ids += feature_ids
+ node_ids_per_feature += numeric_node_ids_per_feature
+ gains_list += numeric_gains_list
+ thresholds_list += numeric_thresholds_list
+ left_node_contribs_list += numeric_left_node_contribs_list
+ right_node_contribs_list += numeric_right_node_contribs_list
+
+ grow_op = boosted_trees_ops.update_ensemble(
+ # Confirm if local_tree_ensemble or tree_ensemble should be used.
+ self._tree_ensemble.resource_handle,
+ feature_ids=all_feature_ids,
+ node_ids=node_ids_per_feature,
+ gains=gains_list,
+ thresholds=thresholds_list,
+ left_node_contribs=left_node_contribs_list,
+ right_node_contribs=right_node_contribs_list,
+ learning_rate=self._tree_hparams.learning_rate,
+ max_depth=self._tree_hparams.max_depth,
+ pruning_mode=boosted_trees_ops.PruningMode.NO_PRUNING)
+ return grow_op
+
+
+class _InMemoryEnsembleGrower(_EnsembleGrower):
+ """A base class for ensemble growers."""
+
+ def __init__(self, tree_ensemble, tree_hparams):
+
+ super(_InMemoryEnsembleGrower, self).__init__(
+ tree_ensemble=tree_ensemble, tree_hparams=tree_hparams)
+
+ def center_bias(self, center_bias_var, gradients, hessians):
+ # For in memory, we already have a full batch of gradients and hessians,
+ # so just take a mean and proceed with centering.
+ mean_gradients = array_ops.expand_dims(
+ math_ops.reduce_mean(gradients, 0), 0)
+ mean_heassians = array_ops.expand_dims(math_ops.reduce_mean(hessians, 0), 0)
+ return self._center_bias_fn(center_bias_var, mean_gradients, mean_heassians)
+
+ def grow_tree(self, stats_summaries_list, feature_ids_list,
+ last_layer_nodes_range):
+ # For in memory, we already have full data in one batch, so we can grow the
+ # tree immediately.
+ return self._grow_tree_from_stats_summaries(
+ stats_summaries_list, feature_ids_list, last_layer_nodes_range)
+
+
+class _AccumulatorEnsembleGrower(_EnsembleGrower):
+ """A base class for ensemble growers."""
+
+ def __init__(self, tree_ensemble, tree_hparams, stamp_token,
+ n_batches_per_layer, bucket_size_list, is_chief):
+ super(_AccumulatorEnsembleGrower, self).__init__(
+ tree_ensemble=tree_ensemble, tree_hparams=tree_hparams)
+ self._stamp_token = stamp_token
+ self._n_batches_per_layer = n_batches_per_layer
+ self._bucket_size_list = bucket_size_list
+ self._is_chief = is_chief
+
+ def center_bias(self, center_bias_var, gradients, hessians):
+ # For not in memory situation, we need to accumulate enough of batches first
+ # before proceeding with centering bias.
+
+ # Create an accumulator.
+ bias_dependencies = []
+ bias_accumulator = data_flow_ops.ConditionalAccumulator(
+ dtype=dtypes.float32,
+ # The stats consist of grads and hessians means only.
+ # TODO(nponomareva): this will change for a multiclass
+ shape=[2, 1],
+ shared_name='bias_accumulator')
+
+ grads_and_hess = array_ops.stack([gradients, hessians], axis=0)
+ grads_and_hess = math_ops.reduce_mean(grads_and_hess, axis=1)
+
+ apply_grad = bias_accumulator.apply_grad(grads_and_hess, self._stamp_token)
+ bias_dependencies.append(apply_grad)
+
+ # Center bias if enough batches were processed.
+ with ops.control_dependencies(bias_dependencies):
+ if not self._is_chief:
+ return control_flow_ops.no_op()
+
+ def center_bias_from_accumulator():
+ accumulated = array_ops.unstack(bias_accumulator.take_grad(1), axis=0)
+ return self._center_bias_fn(center_bias_var,
+ array_ops.expand_dims(accumulated[0], 0),
+ array_ops.expand_dims(accumulated[1], 0))
+
+ center_bias_op = control_flow_ops.cond(
+ math_ops.greater_equal(bias_accumulator.num_accumulated(),
+ self._n_batches_per_layer),
+ center_bias_from_accumulator,
+ control_flow_ops.no_op,
+ name='wait_until_n_batches_for_bias_accumulated')
+ return center_bias_op
+
+ def grow_tree(self, stats_summaries_list, feature_ids_list,
+ last_layer_nodes_range):
+ # For not in memory situation, we need to accumulate enough of batches first
+ # before proceeding with building a tree layer.
+ max_splits = _get_max_splits(self._tree_hparams)
+
+ # Prepare accumulators.
+ accumulators = []
+ dependencies = []
+ for i, feature_ids in enumerate(feature_ids_list):
+ stats_summaries = stats_summaries_list[i]
+ accumulator = data_flow_ops.ConditionalAccumulator(
+ dtype=dtypes.float32,
+ # The stats consist of grads and hessians (the last dimension).
+ shape=[len(feature_ids), max_splits, self._bucket_size_list[i], 2],
+ shared_name='numeric_stats_summary_accumulator_' + str(i))
+ accumulators.append(accumulator)
+
+ apply_grad = accumulator.apply_grad(
+ array_ops.stack(stats_summaries, axis=0), self._stamp_token)
+ dependencies.append(apply_grad)
+
+ # Grow the tree if enough batches is accumulated.
+ with ops.control_dependencies(dependencies):
+ if not self._is_chief:
+ return control_flow_ops.no_op()
+
+ min_accumulated = math_ops.reduce_min(
+ array_ops.stack([acc.num_accumulated() for acc in accumulators]))
+
+ def grow_tree_from_accumulated_summaries_fn():
+ """Updates tree with the best layer from accumulated summaries."""
+ # Take out the accumulated summaries from the accumulator and grow.
+ stats_summaries_list = []
+ stats_summaries_list = [
+ array_ops.unstack(accumulator.take_grad(1), axis=0)
+ for accumulator in accumulators
+ ]
+ grow_op = self._grow_tree_from_stats_summaries(
+ stats_summaries_list, feature_ids_list, last_layer_nodes_range)
+ return grow_op
+
+ grow_model = control_flow_ops.cond(
+ math_ops.greater_equal(min_accumulated, self._n_batches_per_layer),
+ grow_tree_from_accumulated_summaries_fn,
+ control_flow_ops.no_op,
+ name='wait_until_n_batches_accumulated')
+ return grow_model
+
+
def _bt_model_fn(
features,
labels,
@@ -425,8 +673,8 @@ def _bt_model_fn(
ValueError: mode or params are invalid, or features has the wrong type.
"""
is_single_machine = (config.num_worker_replicas <= 1)
-
sorted_feature_columns = sorted(feature_columns, key=lambda tc: tc.name)
+ center_bias = tree_hparams.center_bias
if train_in_memory:
assert n_batches_per_layer == 1, (
'When train_in_memory is enabled, input_fn should return the entire '
@@ -437,11 +685,6 @@ def _bt_model_fn(
raise ValueError('train_in_memory is supported only for '
'non-distributed training.')
worker_device = control_flow_ops.no_op().device
- # maximum number of splits possible in the whole tree =2^(D-1)-1
- # TODO(youngheek): perhaps storage could be optimized by storing stats with
- # the dimension max_splits_per_layer, instead of max_splits (for the entire
- # tree).
- max_splits = (1 << tree_hparams.max_depth) - 1
train_op = []
with ops.name_scope(name) as name:
# Prepare.
@@ -469,6 +712,9 @@ def _bt_model_fn(
# Create Ensemble resources.
tree_ensemble = boosted_trees_ops.TreeEnsemble(name=name)
+ # Variable that determines whether bias centering is needed.
+ center_bias_var = variable_scope.variable(
+ initial_value=center_bias, name='center_bias_needed', trainable=False)
# Create logits.
if mode != model_fn.ModeKeys.TRAIN:
logits = boosted_trees_ops.predict(
@@ -489,6 +735,7 @@ def _bt_model_fn(
# TODO(soroush): Do partial updates if this becomes a bottleneck.
ensemble_reload = local_tree_ensemble.deserialize(
*tree_ensemble.serialize())
+
if training_state_cache:
cached_tree_ids, cached_node_ids, cached_logits = (
training_state_cache.lookup())
@@ -497,9 +744,10 @@ def _bt_model_fn(
batch_size = array_ops.shape(labels)[0]
cached_tree_ids, cached_node_ids, cached_logits = (
array_ops.zeros([batch_size], dtype=dtypes.int32),
- array_ops.zeros([batch_size], dtype=dtypes.int32),
+ _DUMMY_NODE_ID * array_ops.ones([batch_size], dtype=dtypes.int32),
array_ops.zeros(
[batch_size, head.logits_dimension], dtype=dtypes.float32))
+
with ops.control_dependencies([ensemble_reload]):
(stamp_token, num_trees, num_finalized_trees, num_attempted_layers,
last_layer_nodes_range) = local_tree_ensemble.get_states()
@@ -513,13 +761,20 @@ def _bt_model_fn(
cached_node_ids=cached_node_ids,
bucketized_features=input_feature_list,
logits_dimension=head.logits_dimension)
+
logits = cached_logits + partial_logits
# Create training graph.
def _train_op_fn(loss):
"""Run one training iteration."""
if training_state_cache:
- train_op.append(training_state_cache.insert(tree_ids, node_ids, logits))
+ # Cache logits only after center_bias is complete, if it's in progress.
+ train_op.append(
+ control_flow_ops.cond(
+ center_bias_var, control_flow_ops.no_op,
+ lambda: training_state_cache.insert(tree_ids, node_ids, logits))
+ )
+
if closed_form_grad_and_hess_fn:
gradients, hessians = closed_form_grad_and_hess_fn(logits, labels)
else:
@@ -527,6 +782,11 @@ def _bt_model_fn(
hessians = gradients_impl.gradients(
gradients, logits, name='Hessians')[0]
+ # TODO(youngheek): perhaps storage could be optimized by storing stats
+ # with the dimension max_splits_per_layer, instead of max_splits (for the
+ # entire tree).
+ max_splits = _get_max_splits(tree_hparams)
+
stats_summaries_list = []
for i, feature_ids in enumerate(feature_ids_list):
num_buckets = bucket_size_list[i]
@@ -543,103 +803,28 @@ def _bt_model_fn(
]
stats_summaries_list.append(summaries)
- accumulators = []
-
- def grow_tree_from_stats_summaries(stats_summaries_list,
- feature_ids_list):
- """Updates ensemble based on the best gains from stats summaries."""
- node_ids_per_feature = []
- gains_list = []
- thresholds_list = []
- left_node_contribs_list = []
- right_node_contribs_list = []
- all_feature_ids = []
-
- assert len(stats_summaries_list) == len(feature_ids_list)
-
- for i, feature_ids in enumerate(feature_ids_list):
- (numeric_node_ids_per_feature, numeric_gains_list,
- numeric_thresholds_list, numeric_left_node_contribs_list,
- numeric_right_node_contribs_list) = (
- boosted_trees_ops.calculate_best_gains_per_feature(
- node_id_range=last_layer_nodes_range,
- stats_summary_list=stats_summaries_list[i],
- l1=tree_hparams.l1,
- l2=tree_hparams.l2,
- tree_complexity=tree_hparams.tree_complexity,
- min_node_weight=tree_hparams.min_node_weight,
- max_splits=max_splits))
-
- all_feature_ids += feature_ids
- node_ids_per_feature += numeric_node_ids_per_feature
- gains_list += numeric_gains_list
- thresholds_list += numeric_thresholds_list
- left_node_contribs_list += numeric_left_node_contribs_list
- right_node_contribs_list += numeric_right_node_contribs_list
-
- grow_op = boosted_trees_ops.update_ensemble(
- # Confirm if local_tree_ensemble or tree_ensemble should be used.
- tree_ensemble.resource_handle,
- feature_ids=all_feature_ids,
- node_ids=node_ids_per_feature,
- gains=gains_list,
- thresholds=thresholds_list,
- left_node_contribs=left_node_contribs_list,
- right_node_contribs=right_node_contribs_list,
- learning_rate=tree_hparams.learning_rate,
- max_depth=tree_hparams.max_depth,
- pruning_mode=boosted_trees_ops.PruningMode.NO_PRUNING)
- return grow_op
-
if train_in_memory and is_single_machine:
- train_op.append(distribute_lib.increment_var(global_step))
- train_op.append(
- grow_tree_from_stats_summaries(stats_summaries_list,
- feature_ids_list))
+ grower = _InMemoryEnsembleGrower(tree_ensemble, tree_hparams)
else:
- dependencies = []
-
- for i, feature_ids in enumerate(feature_ids_list):
- stats_summaries = stats_summaries_list[i]
- accumulator = data_flow_ops.ConditionalAccumulator(
- dtype=dtypes.float32,
- # The stats consist of grads and hessians (the last dimension).
- shape=[len(feature_ids), max_splits, bucket_size_list[i], 2],
- shared_name='numeric_stats_summary_accumulator_' + str(i))
- accumulators.append(accumulator)
-
- apply_grad = accumulator.apply_grad(
- array_ops.stack(stats_summaries, axis=0), stamp_token)
- dependencies.append(apply_grad)
-
- def grow_tree_from_accumulated_summaries_fn():
- """Updates the tree with the best layer from accumulated summaries."""
- # Take out the accumulated summaries from the accumulator and grow.
- stats_summaries_list = []
-
- stats_summaries_list = [
- array_ops.unstack(accumulator.take_grad(1), axis=0)
- for accumulator in accumulators
- ]
-
- grow_op = grow_tree_from_stats_summaries(stats_summaries_list,
- feature_ids_list)
- return grow_op
-
- with ops.control_dependencies(dependencies):
- train_op.append(distribute_lib.increment_var(global_step))
- if config.is_chief:
- min_accumulated = math_ops.reduce_min(
- array_ops.stack(
- [acc.num_accumulated() for acc in accumulators]))
-
- train_op.append(
- control_flow_ops.cond(
- math_ops.greater_equal(min_accumulated,
- n_batches_per_layer),
- grow_tree_from_accumulated_summaries_fn,
- control_flow_ops.no_op,
- name='wait_until_n_batches_accumulated'))
+ grower = _AccumulatorEnsembleGrower(tree_ensemble, tree_hparams,
+ stamp_token, n_batches_per_layer,
+ bucket_size_list, config.is_chief)
+
+ update_model = control_flow_ops.cond(
+ center_bias_var,
+ functools.partial(
+ grower.center_bias,
+ center_bias_var,
+ gradients,
+ hessians,
+ ),
+ functools.partial(grower.grow_tree, stats_summaries_list,
+ feature_ids_list, last_layer_nodes_range))
+ train_op.append(update_model)
+
+ with ops.control_dependencies([update_model]):
+ increment_global = distribute_lib.increment_var(global_step)
+ train_op.append(increment_global)
return control_flow_ops.group(train_op, name='train_op')
@@ -739,7 +924,8 @@ class BoostedTreesClassifier(estimator.Estimator):
l2_regularization=0.,
tree_complexity=0.,
min_node_weight=0.,
- config=None):
+ config=None,
+ center_bias=False):
"""Initializes a `BoostedTreesClassifier` instance.
Example:
@@ -807,6 +993,13 @@ class BoostedTreesClassifier(estimator.Estimator):
split to be considered. The value will be compared with
sum(leaf_hessian)/(batch_size * n_batches_per_layer).
config: `RunConfig` object to configure the runtime settings.
+ center_bias: Whether bias centering needs to occur. Bias centering refers
+ to the first node in the very first tree returning the prediction that
+ is aligned with the original labels distribution. For example, for
+ regression problems, the first node will return the mean of the labels.
+ For binary classification problems, it will return a logit for a prior
+ probability of label 1.
+
Raises:
ValueError: when wrong arguments are given or unsupported functionalities
@@ -821,7 +1014,7 @@ class BoostedTreesClassifier(estimator.Estimator):
# HParams for the model.
tree_hparams = _TreeHParams(n_trees, max_depth, learning_rate,
l1_regularization, l2_regularization,
- tree_complexity, min_node_weight)
+ tree_complexity, min_node_weight, center_bias)
def _model_fn(features, labels, mode, config):
return _bt_model_fn( # pylint: disable=protected-access
@@ -864,7 +1057,8 @@ class BoostedTreesRegressor(estimator.Estimator):
l2_regularization=0.,
tree_complexity=0.,
min_node_weight=0.,
- config=None):
+ config=None,
+ center_bias=False):
"""Initializes a `BoostedTreesRegressor` instance.
Example:
@@ -925,6 +1119,12 @@ class BoostedTreesRegressor(estimator.Estimator):
split to be considered. The value will be compared with
sum(leaf_hessian)/(batch_size * n_batches_per_layer).
config: `RunConfig` object to configure the runtime settings.
+ center_bias: Whether bias centering needs to occur. Bias centering refers
+ to the first node in the very first tree returning the prediction that
+ is aligned with the original labels distribution. For example, for
+ regression problems, the first node will return the mean of the labels.
+ For binary classification problems, it will return a logit for a prior
+ probability of label 1.
Raises:
ValueError: when wrong arguments are given or unsupported functionalities
@@ -938,7 +1138,7 @@ class BoostedTreesRegressor(estimator.Estimator):
# HParams for the model.
tree_hparams = _TreeHParams(n_trees, max_depth, learning_rate,
l1_regularization, l2_regularization,
- tree_complexity, min_node_weight)
+ tree_complexity, min_node_weight, center_bias)
def _model_fn(features, labels, mode, config):
return _bt_model_fn( # pylint: disable=protected-access
diff --git a/tensorflow/python/estimator/canned/boosted_trees_test.py b/tensorflow/python/estimator/canned/boosted_trees_test.py
index 33e9e69b04..f807641057 100644
--- a/tensorflow/python/estimator/canned/boosted_trees_test.py
+++ b/tensorflow/python/estimator/canned/boosted_trees_test.py
@@ -554,14 +554,6 @@ class ModelFnTests(test_util.TensorFlowTestCase):
feature_column.numeric_column('f_%d' % i, dtype=dtypes.float32),
BUCKET_BOUNDARIES) for i in range(NUM_FEATURES)
}
- self._tree_hparams = boosted_trees._TreeHParams( # pylint:disable=protected-access
- n_trees=2,
- max_depth=2,
- learning_rate=0.1,
- l1=0.,
- l2=0.01,
- tree_complexity=0.,
- min_node_weight=0.)
def _get_expected_ensembles_for_classification(self):
first_round = """
@@ -790,6 +782,245 @@ class ModelFnTests(test_util.TensorFlowTestCase):
"""
return (first_round, second_round, third_round)
+ def _get_expected_ensembles_for_classification_with_bias(self):
+ first_round = """
+ trees {
+ nodes {
+ leaf {
+ scalar: -0.405086
+ }
+ }
+ }
+ tree_weights: 1.0
+ tree_metadata {
+ }
+ """
+ second_round = """
+ trees {
+ nodes {
+ bucketized_split {
+ feature_id: 2
+ threshold: 2
+ left_id: 1
+ right_id: 2
+ }
+ metadata {
+ gain: 0.407711
+ original_leaf {
+ scalar: -0.405086
+ }
+ }
+ }
+ nodes {
+ leaf {
+ scalar: -0.556054
+ }
+ }
+ nodes {
+ leaf {
+ scalar: -0.301233
+ }
+ }
+ }
+ tree_weights: 1.0
+ tree_metadata {
+ num_layers_grown: 1
+ is_finalized: false
+ }
+ growing_metadata {
+ num_trees_attempted: 1
+ num_layers_attempted: 1
+ last_layer_node_start: 1
+ last_layer_node_end: 3
+ }
+ """
+ third_round = """
+ trees {
+ nodes {
+ bucketized_split {
+ feature_id: 2
+ threshold: 2
+ left_id: 1
+ right_id: 2
+ }
+ metadata {
+ gain: 0.407711
+ original_leaf {
+ scalar: -0.405086
+ }
+ }
+ }
+ nodes {
+ bucketized_split {
+ feature_id: 0
+ threshold: 3
+ left_id: 3
+ right_id: 4
+ }
+ metadata {
+ original_leaf {
+ scalar: -0.556054
+ }
+ }
+ }
+ nodes {
+ bucketized_split {
+ feature_id: 0
+ threshold: 0
+ left_id: 5
+ right_id: 6
+ }
+ metadata {
+ gain: 0.09876
+ original_leaf {
+ scalar: -0.301233
+ }
+ }
+ }
+ nodes {
+ leaf {
+ scalar: -0.698072
+ }
+ }
+ nodes {
+ leaf {
+ scalar: -0.556054
+ }
+ }
+ nodes {
+ leaf {
+ scalar: -0.106016
+ }
+ }
+ nodes {
+ leaf {
+ scalar: -0.27349
+ }
+ }
+ }
+ trees {
+ nodes {
+ leaf {
+ }
+ }
+ }
+ tree_weights: 1.0
+ tree_weights: 1.0
+ tree_metadata {
+ num_layers_grown: 2
+ is_finalized: true
+ }
+ tree_metadata {
+ }
+ growing_metadata {
+ num_trees_attempted: 1
+ num_layers_attempted: 2
+ last_layer_node_end: 1
+ }
+ """
+ forth_round = """
+ trees {
+ nodes {
+ bucketized_split {
+ feature_id: 2
+ threshold: 2
+ left_id: 1
+ right_id: 2
+ }
+ metadata {
+ gain: 0.4077113
+ original_leaf {
+ scalar: -0.405086
+ }
+ }
+ }
+ nodes {
+ bucketized_split {
+ threshold: 3
+ left_id: 3
+ right_id: 4
+ }
+ metadata {
+ original_leaf {
+ scalar: -0.556054
+ }
+ }
+ }
+ nodes {
+ bucketized_split {
+ threshold: 0
+ left_id: 5
+ right_id: 6
+ }
+ metadata {
+ gain: 0.09876
+ original_leaf {
+ scalar: -0.301233
+ }
+ }
+ }
+ nodes {
+ leaf {
+ scalar: -0.698072
+ }
+ }
+ nodes {
+ leaf {
+ scalar: -0.556054
+ }
+ }
+ nodes {
+ leaf {
+ scalar: -0.106016
+ }
+ }
+ nodes {
+ leaf {
+ scalar: -0.27349
+ }
+ }
+ }
+ trees {
+ nodes {
+ bucketized_split {
+ feature_id: 2
+ threshold: 2
+ left_id: 1
+ right_id: 2
+ }
+ metadata {
+ gain: 0.289927
+ }
+ }
+ nodes {
+ leaf {
+ scalar: -0.134588
+ }
+ }
+ nodes {
+ leaf {
+ scalar: 0.083838
+ }
+ }
+ }
+ tree_weights: 1.0
+ tree_weights: 1.0
+ tree_metadata {
+ num_layers_grown: 2
+ is_finalized: true
+ }
+ tree_metadata {
+ num_layers_grown: 1
+ }
+ growing_metadata {
+ num_trees_attempted: 2
+ num_layers_attempted: 3
+ last_layer_node_start: 1
+ last_layer_node_end: 3
+ }
+ """
+ return (first_round, second_round, third_round, forth_round)
+
def _get_expected_ensembles_for_regression(self):
first_round = """
trees {
@@ -1017,17 +1248,275 @@ class ModelFnTests(test_util.TensorFlowTestCase):
"""
return (first_round, second_round, third_round)
- def _get_train_op_and_ensemble(self, head, config, is_classification,
- train_in_memory):
+ def _get_expected_ensembles_for_regression_with_bias(self):
+ first_round = """
+ trees {
+ nodes {
+ leaf {
+ scalar: 1.799974
+ }
+ }
+ }
+ tree_weights: 1.0
+ tree_metadata {
+ }
+ """
+ second_round = """
+ trees {
+ nodes {
+ bucketized_split {
+ feature_id: 1
+ threshold: 1
+ left_id: 1
+ right_id: 2
+ }
+ metadata {
+ gain: 1.190442
+ original_leaf {
+ scalar: 1.799974
+ }
+ }
+ }
+ nodes {
+ leaf {
+ scalar: 1.862786
+ }
+ }
+ nodes {
+ leaf {
+ scalar: 1.706149
+ }
+ }
+ }
+ tree_weights: 1.0
+ tree_metadata {
+ num_layers_grown: 1
+ is_finalized: false
+ }
+ growing_metadata {
+ num_trees_attempted: 1
+ num_layers_attempted: 1
+ last_layer_node_start: 1
+ last_layer_node_end: 3
+ }
+ """
+ third_round = """
+ trees {
+ nodes {
+ bucketized_split {
+ feature_id: 1
+ threshold: 1
+ left_id: 1
+ right_id: 2
+ }
+ metadata {
+ gain: 1.190442
+ original_leaf {
+ scalar: 1.799974
+ }
+ }
+ }
+ nodes {
+ bucketized_split {
+ feature_id: 0
+ threshold: 1
+ left_id: 3
+ right_id: 4
+ }
+ metadata {
+ gain: 2.683594
+ original_leaf {
+ scalar: 1.862786
+ }
+ }
+ }
+ nodes {
+ bucketized_split {
+ feature_id: 0
+ threshold: 0
+ left_id: 5
+ right_id: 6
+ }
+ metadata {
+ gain: 0.322693
+ original_leaf {
+ scalar: 1.706149
+ }
+ }
+ }
+ nodes {
+ leaf {
+ scalar: 2.024487
+ }
+ }
+ nodes {
+ leaf {
+ scalar: 1.710319
+ }
+ }
+ nodes {
+ leaf {
+ scalar: 1.559208
+ }
+ }
+ nodes {
+ leaf {
+ scalar: 1.686037
+ }
+ }
+ }
+ trees {
+ nodes {
+ leaf {
+ scalar: 0.0
+ }
+ }
+ }
+ tree_weights: 1.0
+ tree_weights: 1.0
+ tree_metadata {
+ num_layers_grown: 2
+ is_finalized: true
+ }
+ tree_metadata {
+ num_layers_grown: 0
+ is_finalized: false
+ }
+ growing_metadata {
+ num_trees_attempted: 1
+ num_layers_attempted: 2
+ last_layer_node_start: 0
+ last_layer_node_end: 1
+ }
+ """
+ forth_round = """
+ trees {
+ nodes {
+ bucketized_split {
+ feature_id: 1
+ threshold: 1
+ left_id: 1
+ right_id: 2
+ }
+ metadata {
+ gain: 1.190442
+ original_leaf {
+ scalar: 1.799974
+ }
+ }
+ }
+ nodes {
+ bucketized_split {
+ threshold: 1
+ left_id: 3
+ right_id: 4
+ }
+ metadata {
+ gain: 2.683594
+ original_leaf {
+ scalar: 1.8627863
+ }
+ }
+ }
+ nodes {
+ bucketized_split {
+ left_id: 5
+ right_id: 6
+ }
+ metadata {
+ gain: 0.322693
+ original_leaf {
+ scalar: 1.706149
+ }
+ }
+ }
+ nodes {
+ leaf {
+ scalar: 2.024487
+ }
+ }
+ nodes {
+ leaf {
+ scalar: 1.710319
+ }
+ }
+ nodes {
+ leaf {
+ scalar: 1.5592078
+ }
+ }
+ nodes {
+ leaf {
+ scalar: 1.686037
+ }
+ }
+ }
+ trees {
+ nodes {
+ bucketized_split {
+ feature_id: 1
+ left_id: 1
+ right_id: 2
+ }
+ metadata {
+ gain: 0.972589
+ }
+ }
+ nodes {
+ leaf {
+ scalar: -0.137592
+ }
+ }
+ nodes {
+ leaf {
+ scalar: 0.034926
+ }
+ }
+ }
+ tree_weights: 1.0
+ tree_weights: 1.0
+ tree_metadata {
+ num_layers_grown: 2
+ is_finalized: true
+ }
+ tree_metadata {
+ num_layers_grown: 1
+ }
+ growing_metadata {
+ num_trees_attempted: 2
+ num_layers_attempted: 3
+ last_layer_node_start: 1
+ last_layer_node_end: 3
+ }
+ """
+ return (first_round, second_round, third_round, forth_round)
+
+ def _get_train_op_and_ensemble(self,
+ head,
+ config,
+ is_classification,
+ train_in_memory,
+ center_bias=False):
"""Calls bt_model_fn() and returns the train_op and ensemble_serialzed."""
features, labels = _make_train_input_fn(is_classification)()
+
+ tree_hparams = boosted_trees._TreeHParams( # pylint:disable=protected-access
+ n_trees=2,
+ max_depth=2,
+ learning_rate=0.1,
+ l1=0.,
+ l2=0.01,
+ tree_complexity=0.,
+ min_node_weight=0.,
+ center_bias=center_bias)
+
estimator_spec = boosted_trees._bt_model_fn( # pylint:disable=protected-access
features=features,
labels=labels,
mode=model_fn.ModeKeys.TRAIN,
head=head,
feature_columns=self._feature_columns,
- tree_hparams=self._tree_hparams,
+ tree_hparams=tree_hparams,
example_id_column_name=EXAMPLE_ID_COLUMN,
n_batches_per_layer=1,
config=config,
@@ -1076,6 +1565,49 @@ class ModelFnTests(test_util.TensorFlowTestCase):
ensemble_proto.ParseFromString(serialized)
self.assertProtoEquals(expected_third, ensemble_proto)
+ def testTrainClassifierWithCenterBiasInMemory(self):
+ ops.reset_default_graph()
+
+ # When bias centering is on, we expect the very first node to have the
+ expected_first, expected_second, expected_third, expected_forth = (
+ self._get_expected_ensembles_for_classification_with_bias())
+
+ with self.test_session() as sess:
+ with sess.graph.as_default():
+ train_op, ensemble_serialized = self._get_train_op_and_ensemble(
+ boosted_trees._create_classification_head(n_classes=2),
+ run_config.RunConfig(),
+ is_classification=True,
+ train_in_memory=True,
+ center_bias=True)
+
+ # 4 iterations to center bias.
+ for _ in range(4):
+ _, serialized = sess.run([train_op, ensemble_serialized])
+
+ # Validate the trained ensemble.
+ ensemble_proto = boosted_trees_pb2.TreeEnsemble()
+ ensemble_proto.ParseFromString(serialized)
+ self.assertProtoEquals(expected_first, ensemble_proto)
+
+ _, serialized = sess.run([train_op, ensemble_serialized])
+ ensemble_proto = boosted_trees_pb2.TreeEnsemble()
+ ensemble_proto.ParseFromString(serialized)
+ self.assertProtoEquals(expected_second, ensemble_proto)
+
+ # Third round training and validation.
+ _, serialized = sess.run([train_op, ensemble_serialized])
+ ensemble_proto = boosted_trees_pb2.TreeEnsemble()
+ ensemble_proto.ParseFromString(serialized)
+ self.assertProtoEquals(expected_third, ensemble_proto)
+
+ # Forth round training and validation.
+ _, serialized = sess.run([train_op, ensemble_serialized])
+ ensemble_proto = boosted_trees_pb2.TreeEnsemble()
+ ensemble_proto.ParseFromString(serialized)
+
+ self.assertProtoEquals(expected_forth, ensemble_proto)
+
def testTrainClassifierNonInMemory(self):
ops.reset_default_graph()
expected_first, expected_second, expected_third = (
@@ -1106,6 +1638,47 @@ class ModelFnTests(test_util.TensorFlowTestCase):
ensemble_proto.ParseFromString(serialized)
self.assertProtoEquals(expected_third, ensemble_proto)
+ def testTrainClassifierWithCenterBiasNonInMemory(self):
+ ops.reset_default_graph()
+
+ # When bias centering is on, we expect the very first node to have the
+ expected_first, expected_second, expected_third, expected_forth = (
+ self._get_expected_ensembles_for_classification_with_bias())
+
+ with self.test_session() as sess:
+ with sess.graph.as_default():
+ train_op, ensemble_serialized = self._get_train_op_and_ensemble(
+ boosted_trees._create_classification_head(n_classes=2),
+ run_config.RunConfig(),
+ is_classification=True,
+ train_in_memory=False,
+ center_bias=True)
+ # 4 iterations to center bias.
+ for _ in range(4):
+ _, serialized = sess.run([train_op, ensemble_serialized])
+ # Validate the trained ensemble.
+ ensemble_proto = boosted_trees_pb2.TreeEnsemble()
+ ensemble_proto.ParseFromString(serialized)
+ self.assertProtoEquals(expected_first, ensemble_proto)
+
+ # Run one more time and validate the trained ensemble.
+ _, serialized = sess.run([train_op, ensemble_serialized])
+ ensemble_proto = boosted_trees_pb2.TreeEnsemble()
+ ensemble_proto.ParseFromString(serialized)
+ self.assertProtoEquals(expected_second, ensemble_proto)
+
+ # Third round training and validation.
+ _, serialized = sess.run([train_op, ensemble_serialized])
+ ensemble_proto = boosted_trees_pb2.TreeEnsemble()
+ ensemble_proto.ParseFromString(serialized)
+ self.assertProtoEquals(expected_third, ensemble_proto)
+
+ # Forth round training and validation.
+ _, serialized = sess.run([train_op, ensemble_serialized])
+ ensemble_proto = boosted_trees_pb2.TreeEnsemble()
+ ensemble_proto.ParseFromString(serialized)
+ self.assertProtoEquals(expected_forth, ensemble_proto)
+
def testTrainRegressorInMemory(self):
ops.reset_default_graph()
expected_first, expected_second, expected_third = (
@@ -1136,6 +1709,46 @@ class ModelFnTests(test_util.TensorFlowTestCase):
ensemble_proto.ParseFromString(serialized)
self.assertProtoEquals(expected_third, ensemble_proto)
+ def testTrainRegressorInMemoryWithCenterBias(self):
+ ops.reset_default_graph()
+ expected_first, expected_second, expected_third, expected_forth = (
+ self._get_expected_ensembles_for_regression_with_bias())
+ with self.test_session() as sess:
+ # Train with train_in_memory mode.
+ with sess.graph.as_default():
+ train_op, ensemble_serialized = self._get_train_op_and_ensemble(
+ boosted_trees._create_regression_head(label_dimension=1),
+ run_config.RunConfig(),
+ is_classification=False,
+ train_in_memory=True,
+ center_bias=True)
+ # 3 iterations to center bias.
+ for _ in range(3):
+ _, serialized = sess.run([train_op, ensemble_serialized])
+ # Validate the trained ensemble.
+ ensemble_proto = boosted_trees_pb2.TreeEnsemble()
+ ensemble_proto.ParseFromString(serialized)
+
+ self.assertProtoEquals(expected_first, ensemble_proto)
+
+ # Run one more time and validate the trained ensemble.
+ _, serialized = sess.run([train_op, ensemble_serialized])
+ ensemble_proto = boosted_trees_pb2.TreeEnsemble()
+ ensemble_proto.ParseFromString(serialized)
+ self.assertProtoEquals(expected_second, ensemble_proto)
+
+ # Third round training and validation.
+ _, serialized = sess.run([train_op, ensemble_serialized])
+ ensemble_proto = boosted_trees_pb2.TreeEnsemble()
+ ensemble_proto.ParseFromString(serialized)
+ self.assertProtoEquals(expected_third, ensemble_proto)
+
+ # Forth round training and validation.
+ _, serialized = sess.run([train_op, ensemble_serialized])
+ ensemble_proto = boosted_trees_pb2.TreeEnsemble()
+ ensemble_proto.ParseFromString(serialized)
+ self.assertProtoEquals(expected_forth, ensemble_proto)
+
def testTrainRegressorNonInMemory(self):
ops.reset_default_graph()
expected_first, expected_second, expected_third = (
@@ -1166,6 +1779,46 @@ class ModelFnTests(test_util.TensorFlowTestCase):
ensemble_proto.ParseFromString(serialized)
self.assertProtoEquals(expected_third, ensemble_proto)
+ def testTrainRegressorNotInMemoryWithCenterBias(self):
+ ops.reset_default_graph()
+ expected_first, expected_second, expected_third, expected_forth = (
+ self._get_expected_ensembles_for_regression_with_bias())
+ with self.test_session() as sess:
+ # Train with train_in_memory mode.
+ with sess.graph.as_default():
+ train_op, ensemble_serialized = self._get_train_op_and_ensemble(
+ boosted_trees._create_regression_head(label_dimension=1),
+ run_config.RunConfig(),
+ is_classification=False,
+ train_in_memory=False,
+ center_bias=True)
+ # 3 iterations to center the bias (because we are using regularization).
+ for _ in range(3):
+ _, serialized = sess.run([train_op, ensemble_serialized])
+
+ # Validate the trained ensemble.
+ ensemble_proto = boosted_trees_pb2.TreeEnsemble()
+ ensemble_proto.ParseFromString(serialized)
+ self.assertProtoEquals(expected_first, ensemble_proto)
+
+ # Run one more time and validate the trained ensemble.
+ _, serialized = sess.run([train_op, ensemble_serialized])
+ ensemble_proto = boosted_trees_pb2.TreeEnsemble()
+ ensemble_proto.ParseFromString(serialized)
+ self.assertProtoEquals(expected_second, ensemble_proto)
+
+ # Third round training and validation.
+ _, serialized = sess.run([train_op, ensemble_serialized])
+ ensemble_proto = boosted_trees_pb2.TreeEnsemble()
+ ensemble_proto.ParseFromString(serialized)
+ self.assertProtoEquals(expected_third, ensemble_proto)
+
+ # Forth round training and validation.
+ _, serialized = sess.run([train_op, ensemble_serialized])
+ ensemble_proto = boosted_trees_pb2.TreeEnsemble()
+ ensemble_proto.ParseFromString(serialized)
+ self.assertProtoEquals(expected_forth, ensemble_proto)
+
if __name__ == '__main__':
googletest.main()
diff --git a/tensorflow/python/estimator/canned/dnn.py b/tensorflow/python/estimator/canned/dnn.py
index 90889e3e5d..c08cf61220 100644
--- a/tensorflow/python/estimator/canned/dnn.py
+++ b/tensorflow/python/estimator/canned/dnn.py
@@ -26,6 +26,7 @@ from tensorflow.python.estimator.canned import head as head_lib
from tensorflow.python.estimator.canned import optimizers
from tensorflow.python.feature_column import feature_column as feature_column_lib
from tensorflow.python.layers import core as core_layers
+from tensorflow.python.layers import normalization
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import partitioned_variables
@@ -45,7 +46,7 @@ def _add_hidden_layer_summary(value, tag):
def _dnn_logit_fn_builder(units, hidden_units, feature_columns, activation_fn,
- dropout, input_layer_partitioner):
+ dropout, input_layer_partitioner, batch_norm):
"""Function builder for a dnn logit_fn.
Args:
@@ -58,6 +59,7 @@ def _dnn_logit_fn_builder(units, hidden_units, feature_columns, activation_fn,
dropout: When not `None`, the probability we will drop out a given
coordinate.
input_layer_partitioner: Partitioner for input layer.
+ batch_norm: Whether to use batch normalization after each hidden layer.
Returns:
A logit_fn (see below).
@@ -83,6 +85,7 @@ def _dnn_logit_fn_builder(units, hidden_units, feature_columns, activation_fn,
A `Tensor` representing the logits, or a list of `Tensor`'s representing
multiple logits in the MultiHead case.
"""
+ is_training = mode == model_fn.ModeKeys.TRAIN
with variable_scope.variable_scope(
'input_from_feature_columns',
values=tuple(six.itervalues(features)),
@@ -98,8 +101,20 @@ def _dnn_logit_fn_builder(units, hidden_units, feature_columns, activation_fn,
activation=activation_fn,
kernel_initializer=init_ops.glorot_uniform_initializer(),
name=hidden_layer_scope)
- if dropout is not None and mode == model_fn.ModeKeys.TRAIN:
+ if dropout is not None and is_training:
net = core_layers.dropout(net, rate=dropout, training=True)
+ if batch_norm:
+ # TODO(hjm): In future, if this becomes popular, we can enable
+ # customization of the batch normalization params by accepting a
+ # list of `BatchNormalization` instances as `batch_norm`.
+ net = normalization.batch_normalization(
+ net,
+ # The default momentum 0.99 actually crashes on certain
+ # problem, so here we use 0.999, which is the default of
+ # tf.contrib.layers.batch_norm.
+ momentum=0.999,
+ training=is_training,
+ name='batchnorm_%d' % layer_id)
_add_hidden_layer_summary(net, hidden_layer_scope.name)
with variable_scope.variable_scope('logits', values=(net,)) as logits_scope:
@@ -127,7 +142,8 @@ def _dnn_model_fn(features,
dropout=None,
input_layer_partitioner=None,
config=None,
- tpu_estimator_spec=False):
+ tpu_estimator_spec=False,
+ batch_norm=False):
"""Deep Neural Net model_fn.
Args:
@@ -150,6 +166,7 @@ def _dnn_model_fn(features,
config: `RunConfig` object to configure the runtime settings.
tpu_estimator_spec: Whether to return a `_TPUEstimatorSpec` or
or `model_fn.EstimatorSpec` instance.
+ batch_norm: Whether to use batch normalization after each hidden layer.
Returns:
An `EstimatorSpec` instance.
@@ -182,7 +199,8 @@ def _dnn_model_fn(features,
feature_columns=feature_columns,
activation_fn=activation_fn,
dropout=dropout,
- input_layer_partitioner=input_layer_partitioner)
+ input_layer_partitioner=input_layer_partitioner,
+ batch_norm=batch_norm)
logits = logit_fn(features=features, mode=mode)
if tpu_estimator_spec:
@@ -230,6 +248,17 @@ class DNNClassifier(estimator.Estimator):
l1_regularization_strength=0.001
))
+ # Or estimator using an optimizer with a learning rate decay.
+ estimator = DNNClassifier(
+ feature_columns=[categorical_feature_a_emb, categorical_feature_b_emb],
+ hidden_units=[1024, 512, 256],
+ optimizer=lambda: tf.AdamOptimizer(
+ learning_rate=tf.exponential_decay(
+ learning_rate=0.1,
+ global_step=tf.get_global_step(),
+ decay_steps=10000,
+ decay_rate=0.96))
+
# Or estimator with warm-starting from a previous checkpoint.
estimator = DNNClassifier(
feature_columns=[categorical_feature_a_emb, categorical_feature_b_emb],
@@ -288,6 +317,7 @@ class DNNClassifier(estimator.Estimator):
config=None,
warm_start_from=None,
loss_reduction=losses.Reduction.SUM,
+ batch_norm=False,
):
"""Initializes a `DNNClassifier` instance.
@@ -317,8 +347,9 @@ class DNNClassifier(estimator.Estimator):
encoded as integer values in {0, 1,..., n_classes-1} for `n_classes`>2 .
Also there will be errors if vocabulary is not provided and labels are
string.
- optimizer: An instance of `tf.Optimizer` used to train the model. Defaults
- to Adagrad optimizer.
+ optimizer: An instance of `tf.Optimizer` used to train the model. Can also
+ be a string (one of 'Adagrad', 'Adam', 'Ftrl', 'RMSProp', 'SGD'), or
+ callable. Defaults to Adagrad optimizer.
activation_fn: Activation function applied to each layer. If `None`, will
use `tf.nn.relu`.
dropout: When not `None`, the probability we will drop out a given
@@ -333,6 +364,7 @@ class DNNClassifier(estimator.Estimator):
names are unchanged.
loss_reduction: One of `tf.losses.Reduction` except `NONE`. Describes how
to reduce training loss over batch. Defaults to `SUM`.
+ batch_norm: Whether to use batch normalization after each hidden layer.
"""
head = head_lib._binary_logistic_or_multi_class_head( # pylint: disable=protected-access
n_classes, weight_column, label_vocabulary, loss_reduction)
@@ -349,7 +381,8 @@ class DNNClassifier(estimator.Estimator):
activation_fn=activation_fn,
dropout=dropout,
input_layer_partitioner=input_layer_partitioner,
- config=config)
+ config=config,
+ batch_norm=batch_norm)
super(DNNClassifier, self).__init__(
model_fn=_model_fn, model_dir=model_dir, config=config,
@@ -385,6 +418,17 @@ class DNNRegressor(estimator.Estimator):
l1_regularization_strength=0.001
))
+ # Or estimator using an optimizer with a learning rate decay.
+ estimator = DNNRegressor(
+ feature_columns=[categorical_feature_a_emb, categorical_feature_b_emb],
+ hidden_units=[1024, 512, 256],
+ optimizer=lambda: tf.AdamOptimizer(
+ learning_rate=tf.exponential_decay(
+ learning_rate=0.1,
+ global_step=tf.get_global_step(),
+ decay_steps=10000,
+ decay_rate=0.96))
+
# Or estimator with warm-starting from a previous checkpoint.
estimator = DNNRegressor(
feature_columns=[categorical_feature_a_emb, categorical_feature_b_emb],
@@ -442,6 +486,7 @@ class DNNRegressor(estimator.Estimator):
config=None,
warm_start_from=None,
loss_reduction=losses.Reduction.SUM,
+ batch_norm=False,
):
"""Initializes a `DNNRegressor` instance.
@@ -465,8 +510,9 @@ class DNNRegressor(estimator.Estimator):
used as a key to fetch weight tensor from the `features`. If it is a
`_NumericColumn`, raw tensor is fetched by key `weight_column.key`,
then weight_column.normalizer_fn is applied on it to get weight tensor.
- optimizer: An instance of `tf.Optimizer` used to train the model. Defaults
- to Adagrad optimizer.
+ optimizer: An instance of `tf.Optimizer` used to train the model. Can also
+ be a string (one of 'Adagrad', 'Adam', 'Ftrl', 'RMSProp', 'SGD'), or
+ callable. Defaults to Adagrad optimizer.
activation_fn: Activation function applied to each layer. If `None`, will
use `tf.nn.relu`.
dropout: When not `None`, the probability we will drop out a given
@@ -481,6 +527,7 @@ class DNNRegressor(estimator.Estimator):
names are unchanged.
loss_reduction: One of `tf.losses.Reduction` except `NONE`. Describes how
to reduce training loss over batch. Defaults to `SUM`.
+ batch_norm: Whether to use batch normalization after each hidden layer.
"""
def _model_fn(features, labels, mode, config):
@@ -498,7 +545,8 @@ class DNNRegressor(estimator.Estimator):
activation_fn=activation_fn,
dropout=dropout,
input_layer_partitioner=input_layer_partitioner,
- config=config)
+ config=config,
+ batch_norm=batch_norm)
super(DNNRegressor, self).__init__(
model_fn=_model_fn, model_dir=model_dir, config=config,
diff --git a/tensorflow/python/estimator/canned/dnn_linear_combined.py b/tensorflow/python/estimator/canned/dnn_linear_combined.py
index 3d1ad1365b..efa7812452 100644
--- a/tensorflow/python/estimator/canned/dnn_linear_combined.py
+++ b/tensorflow/python/estimator/canned/dnn_linear_combined.py
@@ -88,7 +88,9 @@ def _dnn_linear_combined_model_fn(features,
dnn_activation_fn=nn.relu,
dnn_dropout=None,
input_layer_partitioner=None,
- config=None):
+ config=None,
+ batch_norm=False,
+ linear_sparse_combiner='sum'):
"""Deep Neural Net and Linear combined model_fn.
Args:
@@ -115,7 +117,10 @@ def _dnn_linear_combined_model_fn(features,
coordinate.
input_layer_partitioner: Partitioner for input layer.
config: `RunConfig` object to configure the runtime settings.
-
+ batch_norm: Whether to use batch normalization after each hidden layer.
+ linear_sparse_combiner: A string specifying how to reduce the linear model
+ if a categorical column is multivalent. One of "mean", "sqrtn", and
+ "sum".
Returns:
An `EstimatorSpec` instance.
@@ -164,7 +169,8 @@ def _dnn_linear_combined_model_fn(features,
feature_columns=dnn_feature_columns,
activation_fn=dnn_activation_fn,
dropout=dnn_dropout,
- input_layer_partitioner=input_layer_partitioner)
+ input_layer_partitioner=input_layer_partitioner,
+ batch_norm=batch_norm)
dnn_logits = dnn_logit_fn(features=features, mode=mode)
linear_parent_scope = 'linear'
@@ -182,7 +188,8 @@ def _dnn_linear_combined_model_fn(features,
partitioner=input_layer_partitioner) as scope:
logit_fn = linear._linear_logit_fn_builder( # pylint: disable=protected-access
units=head.logits_dimension,
- feature_columns=linear_feature_columns)
+ feature_columns=linear_feature_columns,
+ sparse_combiner=linear_sparse_combiner)
linear_logits = logit_fn(features=features)
_add_layer_summary(linear_logits, scope.name)
@@ -257,12 +264,19 @@ class DNNLinearCombinedClassifier(estimator.Estimator):
# warm-start settings
warm_start_from="/path/to/checkpoint/dir")
- # To apply L1 and L2 regularization, you can set optimizers as follows:
+ # To apply L1 and L2 regularization, you can set dnn_optimizer to:
tf.train.ProximalAdagradOptimizer(
learning_rate=0.1,
l1_regularization_strength=0.001,
l2_regularization_strength=0.001)
- # It is same for FtrlOptimizer.
+ # To apply learning rate decay, you can set dnn_optimizer to a callable:
+ lambda: tf.AdamOptimizer(
+ learning_rate=tf.exponential_decay(
+ learning_rate=0.1,
+ global_step=tf.get_global_step(),
+ decay_steps=10000,
+ decay_rate=0.96)
+ # It is the same for linear_optimizer.
# Input builders
def input_fn_train: # returns x, y
@@ -314,7 +328,9 @@ class DNNLinearCombinedClassifier(estimator.Estimator):
input_layer_partitioner=None,
config=None,
warm_start_from=None,
- loss_reduction=losses.Reduction.SUM):
+ loss_reduction=losses.Reduction.SUM,
+ batch_norm=False,
+ linear_sparse_combiner='sum'):
"""Initializes a DNNLinearCombinedClassifier instance.
Args:
@@ -325,12 +341,16 @@ class DNNLinearCombinedClassifier(estimator.Estimator):
used by linear part of the model. All items in the set must be
instances of classes derived from `FeatureColumn`.
linear_optimizer: An instance of `tf.Optimizer` used to apply gradients to
- the linear part of the model. Defaults to FTRL optimizer.
+ the linear part of the model. Can also be a string (one of 'Adagrad',
+ 'Adam', 'Ftrl', 'RMSProp', 'SGD'), or callable. Defaults to FTRL
+ optimizer.
dnn_feature_columns: An iterable containing all the feature columns used
by deep part of the model. All items in the set must be instances of
classes derived from `FeatureColumn`.
dnn_optimizer: An instance of `tf.Optimizer` used to apply gradients to
- the deep part of the model. Defaults to Adagrad optimizer.
+ the deep part of the model. Can also be a string (one of 'Adagrad',
+ 'Adam', 'Ftrl', 'RMSProp', 'SGD'), or callable. Defaults to Adagrad
+ optimizer.
dnn_hidden_units: List of hidden units per layer. All layers are fully
connected.
dnn_activation_fn: Activation function applied to each layer. If None,
@@ -363,6 +383,12 @@ class DNNLinearCombinedClassifier(estimator.Estimator):
names are unchanged.
loss_reduction: One of `tf.losses.Reduction` except `NONE`. Describes how
to reduce training loss over batch. Defaults to `SUM`.
+ batch_norm: Whether to use batch normalization after each hidden layer.
+ linear_sparse_combiner: A string specifying how to reduce the linear model
+ if a categorical column is multivalent. One of "mean", "sqrtn", and
+ "sum" -- these are effectively different ways to do example-level
+ normalization, which can be useful for bag-of-words features. For more
+ details, see @{tf.feature_column.linear_model$linear_model}.
Raises:
ValueError: If both linear_feature_columns and dnn_features_columns are
@@ -402,7 +428,9 @@ class DNNLinearCombinedClassifier(estimator.Estimator):
dnn_activation_fn=dnn_activation_fn,
dnn_dropout=dnn_dropout,
input_layer_partitioner=input_layer_partitioner,
- config=config)
+ config=config,
+ batch_norm=batch_norm,
+ linear_sparse_combiner=linear_sparse_combiner)
super(DNNLinearCombinedClassifier, self).__init__(
model_fn=_model_fn, model_dir=model_dir, config=config,
@@ -441,12 +469,19 @@ class DNNLinearCombinedRegressor(estimator.Estimator):
# warm-start settings
warm_start_from="/path/to/checkpoint/dir")
- # To apply L1 and L2 regularization, you can set optimizers as follows:
+ # To apply L1 and L2 regularization, you can set dnn_optimizer to:
tf.train.ProximalAdagradOptimizer(
learning_rate=0.1,
l1_regularization_strength=0.001,
l2_regularization_strength=0.001)
- # It is same for FtrlOptimizer.
+ # To apply learning rate decay, you can set dnn_optimizer to a callable:
+ lambda: tf.AdamOptimizer(
+ learning_rate=tf.exponential_decay(
+ learning_rate=0.1,
+ global_step=tf.get_global_step(),
+ decay_steps=10000,
+ decay_rate=0.96)
+ # It is the same for linear_optimizer.
# Input builders
def input_fn_train: # returns x, y
@@ -497,7 +532,9 @@ class DNNLinearCombinedRegressor(estimator.Estimator):
input_layer_partitioner=None,
config=None,
warm_start_from=None,
- loss_reduction=losses.Reduction.SUM):
+ loss_reduction=losses.Reduction.SUM,
+ batch_norm=False,
+ linear_sparse_combiner='sum'):
"""Initializes a DNNLinearCombinedRegressor instance.
Args:
@@ -508,12 +545,16 @@ class DNNLinearCombinedRegressor(estimator.Estimator):
used by linear part of the model. All items in the set must be
instances of classes derived from `FeatureColumn`.
linear_optimizer: An instance of `tf.Optimizer` used to apply gradients to
- the linear part of the model. Defaults to FTRL optimizer.
+ the linear part of the model. Can also be a string (one of 'Adagrad',
+ 'Adam', 'Ftrl', 'RMSProp', 'SGD'), or callable. Defaults to FTRL
+ optimizer.
dnn_feature_columns: An iterable containing all the feature columns used
by deep part of the model. All items in the set must be instances of
classes derived from `FeatureColumn`.
dnn_optimizer: An instance of `tf.Optimizer` used to apply gradients to
- the deep part of the model. Defaults to Adagrad optimizer.
+ the deep part of the model. Can also be a string (one of 'Adagrad',
+ 'Adam', 'Ftrl', 'RMSProp', 'SGD'), or callable. Defaults to Adagrad
+ optimizer.
dnn_hidden_units: List of hidden units per layer. All layers are fully
connected.
dnn_activation_fn: Activation function applied to each layer. If None,
@@ -540,6 +581,12 @@ class DNNLinearCombinedRegressor(estimator.Estimator):
names are unchanged.
loss_reduction: One of `tf.losses.Reduction` except `NONE`. Describes how
to reduce training loss over batch. Defaults to `SUM`.
+ batch_norm: Whether to use batch normalization after each hidden layer.
+ linear_sparse_combiner: A string specifying how to reduce the linear model
+ if a categorical column is multivalent. One of "mean", "sqrtn", and
+ "sum" -- these are effectively different ways to do example-level
+ normalization, which can be useful for bag-of-words features. For more
+ details, see @{tf.feature_column.linear_model$linear_model}.
Raises:
ValueError: If both linear_feature_columns and dnn_features_columns are
@@ -570,7 +617,9 @@ class DNNLinearCombinedRegressor(estimator.Estimator):
dnn_activation_fn=dnn_activation_fn,
dnn_dropout=dnn_dropout,
input_layer_partitioner=input_layer_partitioner,
- config=config)
+ config=config,
+ batch_norm=batch_norm,
+ linear_sparse_combiner=linear_sparse_combiner)
super(DNNLinearCombinedRegressor, self).__init__(
model_fn=_model_fn, model_dir=model_dir, config=config,
diff --git a/tensorflow/python/estimator/canned/dnn_linear_combined_test.py b/tensorflow/python/estimator/canned/dnn_linear_combined_test.py
index d275695eb3..d16318659b 100644
--- a/tensorflow/python/estimator/canned/dnn_linear_combined_test.py
+++ b/tensorflow/python/estimator/canned/dnn_linear_combined_test.py
@@ -100,7 +100,8 @@ def _linear_regressor_fn(feature_columns,
weight_column=None,
optimizer='Ftrl',
config=None,
- partitioner=None):
+ partitioner=None,
+ sparse_combiner='sum'):
return dnn_linear_combined.DNNLinearCombinedRegressor(
model_dir=model_dir,
linear_feature_columns=feature_columns,
@@ -108,7 +109,8 @@ def _linear_regressor_fn(feature_columns,
label_dimension=label_dimension,
weight_column=weight_column,
input_layer_partitioner=partitioner,
- config=config)
+ config=config,
+ linear_sparse_combiner=sparse_combiner)
class LinearOnlyRegressorPartitionerTest(
@@ -163,7 +165,8 @@ def _linear_classifier_fn(feature_columns,
label_vocabulary=None,
optimizer='Ftrl',
config=None,
- partitioner=None):
+ partitioner=None,
+ sparse_combiner='sum'):
return dnn_linear_combined.DNNLinearCombinedClassifier(
model_dir=model_dir,
linear_feature_columns=feature_columns,
@@ -172,7 +175,8 @@ def _linear_classifier_fn(feature_columns,
weight_column=weight_column,
label_vocabulary=label_vocabulary,
input_layer_partitioner=partitioner,
- config=config)
+ config=config,
+ linear_sparse_combiner=sparse_combiner)
class LinearOnlyClassifierTrainingTest(
diff --git a/tensorflow/python/estimator/canned/dnn_testing_utils.py b/tensorflow/python/estimator/canned/dnn_testing_utils.py
index 06a648777f..de226ed0ef 100644
--- a/tensorflow/python/estimator/canned/dnn_testing_utils.py
+++ b/tensorflow/python/estimator/canned/dnn_testing_utils.py
@@ -65,6 +65,11 @@ from tensorflow.python.training import training_util
LEARNING_RATE_NAME = 'dnn/regression_head/dnn/learning_rate'
HIDDEN_WEIGHTS_NAME_PATTERN = 'dnn/hiddenlayer_%d/kernel'
HIDDEN_BIASES_NAME_PATTERN = 'dnn/hiddenlayer_%d/bias'
+BATCH_NORM_BETA_NAME_PATTERN = 'dnn/hiddenlayer_%d/batchnorm_%d/beta'
+BATCH_NORM_GAMMA_NAME_PATTERN = 'dnn/hiddenlayer_%d/batchnorm_%d/gamma'
+BATCH_NORM_MEAN_NAME_PATTERN = 'dnn/hiddenlayer_%d/batchnorm_%d/moving_mean'
+BATCH_NORM_VARIANCE_NAME_PATTERN = (
+ 'dnn/hiddenlayer_%d/batchnorm_%d/moving_variance')
LOGITS_WEIGHTS_NAME = 'dnn/logits/kernel'
LOGITS_BIASES_NAME = 'dnn/logits/bias'
OCCUPATION_EMBEDDING_NAME = ('dnn/input_from_feature_columns/input_layer/'
@@ -89,7 +94,10 @@ def assert_close(expected, actual, rtol=1e-04, message='', name='assert_close'):
name=scope)
-def create_checkpoint(weights_and_biases, global_step, model_dir):
+def create_checkpoint(weights_and_biases,
+ global_step,
+ model_dir,
+ batch_norm_vars=None):
"""Create checkpoint file with provided model weights.
Args:
@@ -98,12 +106,20 @@ def create_checkpoint(weights_and_biases, global_step, model_dir):
model_dir: Directory into which checkpoint is saved.
"""
weights, biases = zip(*weights_and_biases)
+ if batch_norm_vars:
+ assert len(batch_norm_vars) == len(weights_and_biases) - 1
+ (bn_betas, bn_gammas, bn_means, bn_variances) = zip(*batch_norm_vars)
model_weights = {}
# Hidden layer weights.
for i in range(0, len(weights) - 1):
model_weights[HIDDEN_WEIGHTS_NAME_PATTERN % i] = weights[i]
model_weights[HIDDEN_BIASES_NAME_PATTERN % i] = biases[i]
+ if batch_norm_vars:
+ model_weights[BATCH_NORM_BETA_NAME_PATTERN % (i, i)] = bn_betas[i]
+ model_weights[BATCH_NORM_GAMMA_NAME_PATTERN % (i, i)] = bn_gammas[i]
+ model_weights[BATCH_NORM_MEAN_NAME_PATTERN % (i, i)] = bn_means[i]
+ model_weights[BATCH_NORM_VARIANCE_NAME_PATTERN % (i, i)] = bn_variances[i]
# Output layer weights.
model_weights[LOGITS_WEIGHTS_NAME] = weights[-1]
@@ -503,8 +519,13 @@ class BaseDNNLogitFnTest(object):
writer_cache.FileWriterCache.clear()
shutil.rmtree(self._model_dir)
- def _test_logits(self, mode, hidden_units, logits_dimension, inputs,
- expected_logits):
+ def _test_logits(self,
+ mode,
+ hidden_units,
+ logits_dimension,
+ inputs,
+ expected_logits,
+ batch_norm=False):
"""Tests that the expected logits are calculated."""
with ops.Graph().as_default():
# Global step needed for MonitoredSession, which is in turn used to
@@ -525,7 +546,8 @@ class BaseDNNLogitFnTest(object):
],
activation_fn=nn.relu,
dropout=None,
- input_layer_partitioner=input_layer_partitioner)
+ input_layer_partitioner=input_layer_partitioner,
+ batch_norm=batch_norm)
logits = logit_fn(
features={'age': constant_op.constant(inputs)}, mode=mode)
with monitored_session.MonitoredTrainingSession(
@@ -556,6 +578,69 @@ class BaseDNNLogitFnTest(object):
inputs=[[10.]],
expected_logits=[[-2.08]])
+ def test_one_dim_logits_with_batch_norm(self):
+ """Tests one-dimensional logits.
+
+ input_layer = [[10]]
+ hidden_layer_0 = [[relu(0.6*10 +1), relu(0.5*10 -1)]] = [[7, 4]]
+ hidden_layer_0 = [[relu(0.6*20 +1), relu(0.5*20 -1)]] = [[13, 9]]
+
+ batch_norm_0, training (epsilon = 0.001):
+ mean1 = 1/2*(7+13) = 10,
+ variance1 = 1/2*(3^2+3^2) = 9
+ x11 = (7-10)/sqrt(9+0.001) = -0.999944449,
+ x21 = (13-10)/sqrt(9+0.001) = 0.999944449,
+
+ mean2 = 1/2*(4+9) = 6.5,
+ variance2 = 1/2*(2.5^2+.2.5^2) = 6.25
+ x12 = (4-6.5)/sqrt(6.25+0.001) = -0.99992001,
+ x22 = (9-6.5)/sqrt(6.25+0.001) = 0.99992001,
+
+ logits = [[-1*(-0.999944449) + 2*(-0.99992001) + 0.3],
+ [-1*0.999944449 + 2*0.99992001 + 0.3]]
+ = [[-0.699895571],[1.299895571]]
+
+ batch_norm_0, not training (epsilon = 0.001):
+ moving_mean1 = 0, moving_variance1 = 1
+ x11 = (7-0)/sqrt(1+0.001) = 6.996502623,
+ x21 = (13-0)/sqrt(1+0.001) = 12.993504871,
+ moving_mean2 = 0, moving_variance2 = 1
+ x12 = (4-0)/sqrt(1+0.001) = 3.998001499,
+ x22 = (9-0)/sqrt(1+0.001) = 8.995503372,
+
+ logits = [[-1*6.996502623 + 2*3.998001499 + 0.3],
+ [-1*12.993504871 + 2*8.995503372 + 0.3]]
+ = [[1.299500375],[5.297501873]]
+ """
+ base_global_step = 100
+ create_checkpoint(
+ (
+ ([[.6, .5]], [1., -1.]),
+ ([[-1.], [2.]], [.3]),
+ ),
+ base_global_step,
+ self._model_dir,
+ batch_norm_vars=([[0, 0], # beta.
+ [1, 1], # gamma.
+ [0, 0], # moving mean.
+ [1, 1], # moving variance.
+ ],))
+ self._test_logits(
+ model_fn.ModeKeys.TRAIN,
+ hidden_units=[2],
+ logits_dimension=1,
+ inputs=[[10.], [20.]],
+ expected_logits=[[-0.699895571], [1.299895571]],
+ batch_norm=True)
+ for mode in [model_fn.ModeKeys.EVAL, model_fn.ModeKeys.PREDICT]:
+ self._test_logits(
+ mode,
+ hidden_units=[2],
+ logits_dimension=1,
+ inputs=[[10.], [20.]],
+ expected_logits=[[1.299500375], [5.297501873]],
+ batch_norm=True)
+
def test_multi_dim_logits(self):
"""Tests multi-dimensional logits.
@@ -706,7 +791,8 @@ class BaseDNNLogitFnTest(object):
],
activation_fn=nn.relu,
dropout=None,
- input_layer_partitioner=input_layer_partitioner)
+ input_layer_partitioner=input_layer_partitioner,
+ batch_norm=False)
logits = logit_fn(
features={
'age': constant_op.constant(inputs[0]),
@@ -1185,6 +1271,8 @@ class BaseDNNRegressorEvaluateTest(object):
self.assertAllClose({
metric_keys.MetricKeys.LOSS: expected_loss,
metric_keys.MetricKeys.LOSS_MEAN: expected_loss,
+ metric_keys.MetricKeys.PREDICTION_MEAN: -2.08,
+ metric_keys.MetricKeys.LABEL_MEAN: 1.0,
ops.GraphKeys.GLOBAL_STEP: global_step
}, dnn_regressor.evaluate(input_fn=_input_fn, steps=1))
@@ -1215,6 +1303,8 @@ class BaseDNNRegressorEvaluateTest(object):
self.assertAllClose({
metric_keys.MetricKeys.LOSS: expected_loss,
metric_keys.MetricKeys.LOSS_MEAN: expected_loss / label_dimension,
+ metric_keys.MetricKeys.PREDICTION_MEAN: 0.39 / 3.0,
+ metric_keys.MetricKeys.LABEL_MEAN: 0.5 / 3.0,
ops.GraphKeys.GLOBAL_STEP: global_step
}, dnn_regressor.evaluate(input_fn=_input_fn, steps=1))
diff --git a/tensorflow/python/estimator/canned/head.py b/tensorflow/python/estimator/canned/head.py
index b74ef1015c..da9a64c2bc 100644
--- a/tensorflow/python/estimator/canned/head.py
+++ b/tensorflow/python/estimator/canned/head.py
@@ -1398,15 +1398,21 @@ class _RegressionHeadWithMeanSquaredErrorLoss(_Head):
weights=weights,
processed_labels=labels)
- def _eval_metric_ops(self, weights, unreduced_loss, regularization_loss):
+ def _eval_metric_ops(self, predicted_value, labels, weights, unreduced_loss,
+ regularization_loss):
"""Returns the Eval metric ops."""
keys = metric_keys.MetricKeys
# Estimator already adds a metric for loss.
eval_metric_ops = {
_summary_key(self._name, keys.LOSS_MEAN):
- metrics_lib.mean(
- values=unreduced_loss,
- weights=weights)
+ metrics_lib.mean(values=unreduced_loss, weights=weights),
+ _summary_key(self._name, keys.PREDICTION_MEAN):
+ _predictions_mean(
+ predictions=predicted_value,
+ weights=weights,
+ name=keys.PREDICTION_MEAN),
+ _summary_key(self._name, keys.LABEL_MEAN):
+ metrics_lib.mean(values=labels, weights=weights)
}
if regularization_loss is not None:
regularization_loss_key = _summary_key(
@@ -1489,13 +1495,13 @@ class _RegressionHeadWithMeanSquaredErrorLoss(_Head):
predictions=predictions,
loss=regularized_training_loss,
eval_metrics=_create_eval_metrics_tuple(
- self._eval_metric_ops,
- {
+ self._eval_metric_ops, {
+ 'predicted_value': predicted_value,
+ 'labels': labels,
'weights': weights,
'unreduced_loss': unreduced_loss,
'regularization_loss': regularization_loss,
- }
- ))
+ }))
# Train.
if optimizer is not None:
diff --git a/tensorflow/python/estimator/canned/head_test.py b/tensorflow/python/estimator/canned/head_test.py
index 08ce5ca8e8..bd2e0ae943 100644
--- a/tensorflow/python/estimator/canned/head_test.py
+++ b/tensorflow/python/estimator/canned/head_test.py
@@ -3103,8 +3103,10 @@ class RegressionHead(test.TestCase):
self.assertItemsEqual((prediction_key,), spec.predictions.keys())
self.assertEqual(dtypes.float32, spec.predictions[prediction_key].dtype)
self.assertEqual(dtypes.float32, spec.loss.dtype)
- self.assertItemsEqual(
- (metric_keys.MetricKeys.LOSS_MEAN,), spec.eval_metric_ops.keys())
+ self.assertItemsEqual((metric_keys.MetricKeys.LOSS_MEAN,
+ metric_keys.MetricKeys.PREDICTION_MEAN,
+ metric_keys.MetricKeys.LABEL_MEAN),
+ spec.eval_metric_ops.keys())
self.assertIsNone(spec.train_op)
self.assertIsNone(spec.export_outputs)
_assert_no_hooks(self, spec)
@@ -3140,6 +3142,9 @@ class RegressionHead(test.TestCase):
expected_metric_keys = [
'{}/some_regression_head'.format(metric_keys.MetricKeys.LOSS_MEAN),
+ '{}/some_regression_head'.format(
+ metric_keys.MetricKeys.PREDICTION_MEAN),
+ '{}/some_regression_head'.format(metric_keys.MetricKeys.LABEL_MEAN),
]
self.assertItemsEqual(expected_metric_keys, spec.eval_metric_ops.keys())
@@ -3170,6 +3175,8 @@ class RegressionHead(test.TestCase):
expected_metrics = {
keys.LOSS_MEAN: expected_unregularized_loss,
keys.LOSS_REGULARIZATION: expected_regularization_loss,
+ keys.PREDICTION_MEAN: (45 + 41) / 2.0,
+ keys.LABEL_MEAN: (43 + 44) / 2.0,
}
# Assert predictions, loss, and metrics.
@@ -3471,8 +3478,10 @@ class RegressionHead(test.TestCase):
self.assertItemsEqual((prediction_key,), spec.predictions.keys())
self.assertEqual(dtypes.float32, spec.predictions[prediction_key].dtype)
self.assertEqual(dtypes.float32, spec.loss.dtype)
- self.assertItemsEqual(
- (metric_keys.MetricKeys.LOSS_MEAN,), spec.eval_metric_ops.keys())
+ self.assertItemsEqual((metric_keys.MetricKeys.LOSS_MEAN,
+ metric_keys.MetricKeys.PREDICTION_MEAN,
+ metric_keys.MetricKeys.LABEL_MEAN),
+ spec.eval_metric_ops.keys())
self.assertIsNone(spec.train_op)
self.assertIsNone(spec.export_outputs)
_assert_no_hooks(self, spec)
@@ -3700,8 +3709,10 @@ class RegressionHead(test.TestCase):
self.assertItemsEqual((prediction_key,), spec.predictions.keys())
self.assertEqual(dtypes.float32, spec.predictions[prediction_key].dtype)
self.assertEqual(dtypes.float32, spec.loss.dtype)
- self.assertItemsEqual(
- (metric_keys.MetricKeys.LOSS_MEAN,), spec.eval_metric_ops.keys())
+ self.assertItemsEqual((metric_keys.MetricKeys.LOSS_MEAN,
+ metric_keys.MetricKeys.PREDICTION_MEAN,
+ metric_keys.MetricKeys.LABEL_MEAN),
+ spec.eval_metric_ops.keys())
self.assertIsNone(spec.train_op)
self.assertIsNone(spec.export_outputs)
_assert_no_hooks(self, spec)
@@ -3832,7 +3843,13 @@ class RegressionHead(test.TestCase):
# losses = [1*(35-45)^2, .1*(42-41)^2, 1.5*(45-44)^2] = [100, .1, 1.5]
# loss = sum(losses) = 100+.1+1.5 = 101.6
# loss_mean = loss/(1+.1+1.5) = 101.6/2.6 = 39.076923
- expected_metrics = {metric_keys.MetricKeys.LOSS_MEAN: 39.076923}
+ expected_metrics = {
+ metric_keys.MetricKeys.LOSS_MEAN:
+ 39.076923,
+ metric_keys.MetricKeys.PREDICTION_MEAN:
+ (45 + 41 * 0.1 + 44 * 1.5) / 2.6,
+ metric_keys.MetricKeys.LABEL_MEAN: (35 + 42 * 0.1 + 45 * 1.5) / 2.6,
+ }
# Assert spec contains expected tensors.
self.assertEqual(dtypes.float32, spec.loss.dtype)
diff --git a/tensorflow/python/estimator/canned/linear.py b/tensorflow/python/estimator/canned/linear.py
index ac59e786c4..58a7160348 100644
--- a/tensorflow/python/estimator/canned/linear.py
+++ b/tensorflow/python/estimator/canned/linear.py
@@ -66,13 +66,15 @@ def _compute_fraction_of_zero(cols_to_vars):
return nn.zero_fraction(array_ops.concat(all_weight_vars, axis=0))
-def _linear_logit_fn_builder(units, feature_columns):
+def _linear_logit_fn_builder(units, feature_columns, sparse_combiner='sum'):
"""Function builder for a linear logit_fn.
Args:
units: An int indicating the dimension of the logit layer.
feature_columns: An iterable containing all the feature columns used by
the model.
+ sparse_combiner: A string specifying how to reduce if a categorical column
+ is multivalent. One of "mean", "sqrtn", and "sum".
Returns:
A logit_fn (see below).
@@ -95,6 +97,7 @@ def _linear_logit_fn_builder(units, feature_columns):
features=features,
feature_columns=feature_columns,
units=units,
+ sparse_combiner=sparse_combiner,
cols_to_vars=cols_to_vars)
bias = cols_to_vars.pop('bias')
if units > 1:
@@ -111,7 +114,7 @@ def _linear_logit_fn_builder(units, feature_columns):
def _linear_model_fn(features, labels, mode, head, feature_columns, optimizer,
- partitioner, config):
+ partitioner, config, sparse_combiner='sum'):
"""A model_fn for linear models that use a gradient-based optimizer.
Args:
@@ -126,6 +129,8 @@ def _linear_model_fn(features, labels, mode, head, feature_columns, optimizer,
optimizer to use for training. If `None`, will use a FTRL optimizer.
partitioner: Partitioner for variables.
config: `RunConfig` object to configure the runtime settings.
+ sparse_combiner: A string specifying how to reduce if a categorical column
+ is multivalent. One of "mean", "sqrtn", and "sum".
Returns:
An `EstimatorSpec` instance.
@@ -153,7 +158,8 @@ def _linear_model_fn(features, labels, mode, head, feature_columns, optimizer,
partitioner=partitioner):
logit_fn = _linear_logit_fn_builder(
- units=head.logits_dimension, feature_columns=feature_columns)
+ units=head.logits_dimension, feature_columns=feature_columns,
+ sparse_combiner=sparse_combiner)
logits = logit_fn(features=features)
return head.create_estimator_spec(
@@ -193,6 +199,17 @@ class LinearClassifier(estimator.Estimator):
l1_regularization_strength=0.001
))
+ # Or estimator using an optimizer with a learning rate decay.
+ estimator = LinearClassifier(
+ feature_columns=[categorical_column_a,
+ categorical_feature_a_x_categorical_feature_b],
+ optimizer=lambda: tf.train.FtrlOptimizer(
+ learning_rate=tf.exponential_decay(
+ learning_rate=0.1,
+ global_step=tf.get_global_step(),
+ decay_steps=10000,
+ decay_rate=0.96))
+
# Or estimator with warm-starting from a previous checkpoint.
estimator = LinearClassifier(
feature_columns=[categorical_column_a,
@@ -244,7 +261,8 @@ class LinearClassifier(estimator.Estimator):
config=None,
partitioner=None,
warm_start_from=None,
- loss_reduction=losses.Reduction.SUM):
+ loss_reduction=losses.Reduction.SUM,
+ sparse_combiner='sum'):
"""Construct a `LinearClassifier` estimator object.
Args:
@@ -272,8 +290,9 @@ class LinearClassifier(estimator.Estimator):
encoded as integer values in {0, 1,..., n_classes-1} for `n_classes`>2 .
Also there will be errors if vocabulary is not provided and labels are
string.
- optimizer: An instance of `tf.Optimizer` used to train the model. Defaults
- to FTRL optimizer.
+ optimizer: An instance of `tf.Optimizer` used to train the model. Can also
+ be a string (one of 'Adagrad', 'Adam', 'Ftrl', 'RMSProp', 'SGD'), or
+ callable. Defaults to FTRL optimizer.
config: `RunConfig` object to configure the runtime settings.
partitioner: Optional. Partitioner for input layer.
warm_start_from: A string filepath to a checkpoint to warm-start from, or
@@ -283,6 +302,11 @@ class LinearClassifier(estimator.Estimator):
and Tensor names are unchanged.
loss_reduction: One of `tf.losses.Reduction` except `NONE`. Describes how
to reduce training loss over batch. Defaults to `SUM`.
+ sparse_combiner: A string specifying how to reduce if a categorical column
+ is multivalent. One of "mean", "sqrtn", and "sum" -- these are
+ effectively different ways to do example-level normalization, which can
+ be useful for bag-of-words features. for more details, see
+ @{tf.feature_column.linear_model$linear_model}.
Returns:
A `LinearClassifier` estimator.
@@ -311,7 +335,8 @@ class LinearClassifier(estimator.Estimator):
feature_columns=tuple(feature_columns or []),
optimizer=optimizer,
partitioner=partitioner,
- config=config)
+ config=config,
+ sparse_combiner=sparse_combiner)
super(LinearClassifier, self).__init__(
model_fn=_model_fn,
@@ -335,10 +360,31 @@ class LinearRegressor(estimator.Estimator):
categorical_feature_a_x_categorical_feature_b = crossed_column(...)
+ # Estimator using the default optimizer.
estimator = LinearRegressor(
feature_columns=[categorical_column_a,
categorical_feature_a_x_categorical_feature_b])
+ # Or estimator using the FTRL optimizer with regularization.
+ estimator = LinearRegressor(
+ feature_columns=[categorical_column_a,
+ categorical_feature_a_x_categorical_feature_b],
+ optimizer=tf.train.FtrlOptimizer(
+ learning_rate=0.1,
+ l1_regularization_strength=0.001
+ ))
+
+ # Or estimator using an optimizer with a learning rate decay.
+ estimator = LinearRegressor(
+ feature_columns=[categorical_column_a,
+ categorical_feature_a_x_categorical_feature_b],
+ optimizer=lambda: tf.train.FtrlOptimizer(
+ learning_rate=tf.exponential_decay(
+ learning_rate=0.1,
+ global_step=tf.get_global_step(),
+ decay_steps=10000,
+ decay_rate=0.96))
+
# Or estimator with warm-starting from a previous checkpoint.
estimator = LinearRegressor(
feature_columns=[categorical_column_a,
@@ -389,7 +435,8 @@ class LinearRegressor(estimator.Estimator):
config=None,
partitioner=None,
warm_start_from=None,
- loss_reduction=losses.Reduction.SUM):
+ loss_reduction=losses.Reduction.SUM,
+ sparse_combiner='sum'):
"""Initializes a `LinearRegressor` instance.
Args:
@@ -409,8 +456,9 @@ class LinearRegressor(estimator.Estimator):
used as a key to fetch weight tensor from the `features`. If it is a
`_NumericColumn`, raw tensor is fetched by key `weight_column.key`,
then weight_column.normalizer_fn is applied on it to get weight tensor.
- optimizer: An instance of `tf.Optimizer` used to train the model. Defaults
- to FTRL optimizer.
+ optimizer: An instance of `tf.Optimizer` used to train the model. Can also
+ be a string (one of 'Adagrad', 'Adam', 'Ftrl', 'RMSProp', 'SGD'), or
+ callable. Defaults to FTRL optimizer.
config: `RunConfig` object to configure the runtime settings.
partitioner: Optional. Partitioner for input layer.
warm_start_from: A string filepath to a checkpoint to warm-start from, or
@@ -420,6 +468,11 @@ class LinearRegressor(estimator.Estimator):
and Tensor names are unchanged.
loss_reduction: One of `tf.losses.Reduction` except `NONE`. Describes how
to reduce training loss over batch. Defaults to `SUM`.
+ sparse_combiner: A string specifying how to reduce if a categorical column
+ is multivalent. One of "mean", "sqrtn", and "sum" -- these are
+ effectively different ways to do example-level normalization, which can
+ be useful for bag-of-words features. for more details, see
+ @{tf.feature_column.linear_model$linear_model}.
"""
head = head_lib._regression_head( # pylint: disable=protected-access
label_dimension=label_dimension, weight_column=weight_column,
@@ -435,7 +488,8 @@ class LinearRegressor(estimator.Estimator):
feature_columns=tuple(feature_columns or []),
optimizer=optimizer,
partitioner=partitioner,
- config=config)
+ config=config,
+ sparse_combiner=sparse_combiner)
super(LinearRegressor, self).__init__(
model_fn=_model_fn,
diff --git a/tensorflow/python/estimator/canned/linear_testing_utils.py b/tensorflow/python/estimator/canned/linear_testing_utils.py
index 0e6436b421..c3934c7a80 100644
--- a/tensorflow/python/estimator/canned/linear_testing_utils.py
+++ b/tensorflow/python/estimator/canned/linear_testing_utils.py
@@ -29,6 +29,7 @@ import six
from tensorflow.core.example import example_pb2
from tensorflow.core.example import feature_pb2
from tensorflow.python.client import session as tf_session
+from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.estimator import estimator
from tensorflow.python.estimator import run_config
from tensorflow.python.estimator.canned import linear
@@ -260,6 +261,8 @@ class BaseLinearRegressorEvaluationTest(object):
self.assertDictEqual({
metric_keys.MetricKeys.LOSS: 9.,
metric_keys.MetricKeys.LOSS_MEAN: 9.,
+ metric_keys.MetricKeys.PREDICTION_MEAN: 13.,
+ metric_keys.MetricKeys.LABEL_MEAN: 10.,
ops.GraphKeys.GLOBAL_STEP: 100
}, eval_metrics)
@@ -285,6 +288,8 @@ class BaseLinearRegressorEvaluationTest(object):
self.assertDictEqual({
metric_keys.MetricKeys.LOSS: 18.,
metric_keys.MetricKeys.LOSS_MEAN: 9.,
+ metric_keys.MetricKeys.PREDICTION_MEAN: 13.,
+ metric_keys.MetricKeys.LABEL_MEAN: 10.,
ops.GraphKeys.GLOBAL_STEP: 100
}, eval_metrics)
@@ -315,6 +320,8 @@ class BaseLinearRegressorEvaluationTest(object):
self.assertDictEqual({
metric_keys.MetricKeys.LOSS: 27.,
metric_keys.MetricKeys.LOSS_MEAN: 9.,
+ metric_keys.MetricKeys.PREDICTION_MEAN: 13.,
+ metric_keys.MetricKeys.LABEL_MEAN: 10.,
ops.GraphKeys.GLOBAL_STEP: 100
}, eval_metrics)
@@ -345,7 +352,9 @@ class BaseLinearRegressorEvaluationTest(object):
self.assertItemsEqual(
(metric_keys.MetricKeys.LOSS, metric_keys.MetricKeys.LOSS_MEAN,
- ops.GraphKeys.GLOBAL_STEP), eval_metrics.keys())
+ metric_keys.MetricKeys.PREDICTION_MEAN,
+ metric_keys.MetricKeys.LABEL_MEAN, ops.GraphKeys.GLOBAL_STEP),
+ eval_metrics.keys())
# Logit is
# [2., 4., 5.] * [1.0, 2.0] + [7.0, 8.0] = [39, 50] + [7.0, 8.0]
@@ -382,7 +391,9 @@ class BaseLinearRegressorEvaluationTest(object):
eval_metrics = est.evaluate(input_fn=input_fn, steps=1)
self.assertItemsEqual(
(metric_keys.MetricKeys.LOSS, metric_keys.MetricKeys.LOSS_MEAN,
- ops.GraphKeys.GLOBAL_STEP), eval_metrics.keys())
+ metric_keys.MetricKeys.PREDICTION_MEAN,
+ metric_keys.MetricKeys.LABEL_MEAN, ops.GraphKeys.GLOBAL_STEP),
+ eval_metrics.keys())
# Logit is [(20. * 10.0 + 4 * 2.0 + 5.0), (40. * 10.0 + 8 * 2.0 + 5.0)] =
# [213.0, 421.0], while label is [213., 421.]. Loss = 0.
@@ -484,6 +495,69 @@ class BaseLinearRegressorPredictTest(object):
# x0 * weight0 + x1 * weight1 + bias = 2. * 10. + 3. * 20 + .2 = 80.2
self.assertAllClose([[80.2]], predicted_scores)
+ def testSparseCombiner(self):
+ w_a = 2.0
+ w_b = 3.0
+ w_c = 5.0
+ bias = 5.0
+ with ops.Graph().as_default():
+ variables_lib.Variable([[w_a], [w_b], [w_c]], name=LANGUAGE_WEIGHT_NAME)
+ variables_lib.Variable([bias], name=BIAS_NAME)
+ variables_lib.Variable(1, name=ops.GraphKeys.GLOBAL_STEP,
+ dtype=dtypes.int64)
+ save_variables_to_ckpt(self._model_dir)
+
+ def _input_fn():
+ return dataset_ops.Dataset.from_tensors({
+ 'language': sparse_tensor.SparseTensor(
+ values=['a', 'c', 'b', 'c'],
+ indices=[[0, 0], [0, 1], [1, 0], [1, 1]],
+ dense_shape=[2, 2]),
+ })
+
+ feature_columns = (
+ feature_column_lib.categorical_column_with_vocabulary_list(
+ 'language', vocabulary_list=['a', 'b', 'c']),)
+
+ # Check prediction for each sparse_combiner.
+ # With sparse_combiner = 'sum', we have
+ # logits_1 = w_a + w_c + bias
+ # = 2.0 + 5.0 + 5.0 = 12.0
+ # logits_2 = w_b + w_c + bias
+ # = 3.0 + 5.0 + 5.0 = 13.0
+ linear_regressor = self._linear_regressor_fn(
+ feature_columns=feature_columns,
+ model_dir=self._model_dir)
+ predictions = linear_regressor.predict(input_fn=_input_fn)
+ predicted_scores = list([x['predictions'] for x in predictions])
+ self.assertAllClose([[12.0], [13.0]], predicted_scores)
+
+ # With sparse_combiner = 'mean', we have
+ # logits_1 = 1/2 * (w_a + w_c) + bias
+ # = 1/2 * (2.0 + 5.0) + 5.0 = 8.5
+ # logits_2 = 1/2 * (w_b + w_c) + bias
+ # = 1/2 * (3.0 + 5.0) + 5.0 = 9.0
+ linear_regressor = self._linear_regressor_fn(
+ feature_columns=feature_columns,
+ model_dir=self._model_dir,
+ sparse_combiner='mean')
+ predictions = linear_regressor.predict(input_fn=_input_fn)
+ predicted_scores = list([x['predictions'] for x in predictions])
+ self.assertAllClose([[8.5], [9.0]], predicted_scores)
+
+ # With sparse_combiner = 'sqrtn', we have
+ # logits_1 = sqrt(2)/2 * (w_a + w_c) + bias
+ # = sqrt(2)/2 * (2.0 + 5.0) + 5.0 = 9.94974
+ # logits_2 = sqrt(2)/2 * (w_b + w_c) + bias
+ # = sqrt(2)/2 * (3.0 + 5.0) + 5.0 = 10.65685
+ linear_regressor = self._linear_regressor_fn(
+ feature_columns=feature_columns,
+ model_dir=self._model_dir,
+ sparse_combiner='sqrtn')
+ predictions = linear_regressor.predict(input_fn=_input_fn)
+ predicted_scores = list([x['predictions'] for x in predictions])
+ self.assertAllClose([[9.94974], [10.65685]], predicted_scores)
+
class BaseLinearRegressorIntegrationTest(object):
@@ -1636,6 +1710,69 @@ class BaseLinearClassifierPredictTest(object):
for i in range(n_classes)],
label_output_fn=lambda x: ('class_vocab_%s' % x).encode())
+ def testSparseCombiner(self):
+ w_a = 2.0
+ w_b = 3.0
+ w_c = 5.0
+ bias = 5.0
+ with ops.Graph().as_default():
+ variables_lib.Variable([[w_a], [w_b], [w_c]], name=LANGUAGE_WEIGHT_NAME)
+ variables_lib.Variable([bias], name=BIAS_NAME)
+ variables_lib.Variable(1, name=ops.GraphKeys.GLOBAL_STEP,
+ dtype=dtypes.int64)
+ save_variables_to_ckpt(self._model_dir)
+
+ def _input_fn():
+ return dataset_ops.Dataset.from_tensors({
+ 'language': sparse_tensor.SparseTensor(
+ values=['a', 'c', 'b', 'c'],
+ indices=[[0, 0], [0, 1], [1, 0], [1, 1]],
+ dense_shape=[2, 2]),
+ })
+
+ feature_columns = (
+ feature_column_lib.categorical_column_with_vocabulary_list(
+ 'language', vocabulary_list=['a', 'b', 'c']),)
+
+ # Check prediction for each sparse_combiner.
+ # With sparse_combiner = 'sum', we have
+ # logits_1 = w_a + w_c + bias
+ # = 2.0 + 5.0 + 5.0 = 12.0
+ # logits_2 = w_b + w_c + bias
+ # = 3.0 + 5.0 + 5.0 = 13.0
+ linear_classifier = self._linear_classifier_fn(
+ feature_columns=feature_columns,
+ model_dir=self._model_dir)
+ predictions = linear_classifier.predict(input_fn=_input_fn)
+ predicted_scores = list([x['logits'] for x in predictions])
+ self.assertAllClose([[12.0], [13.0]], predicted_scores)
+
+ # With sparse_combiner = 'mean', we have
+ # logits_1 = 1/2 * (w_a + w_c) + bias
+ # = 1/2 * (2.0 + 5.0) + 5.0 = 8.5
+ # logits_2 = 1/2 * (w_b + w_c) + bias
+ # = 1/2 * (3.0 + 5.0) + 5.0 = 9.0
+ linear_classifier = self._linear_classifier_fn(
+ feature_columns=feature_columns,
+ model_dir=self._model_dir,
+ sparse_combiner='mean')
+ predictions = linear_classifier.predict(input_fn=_input_fn)
+ predicted_scores = list([x['logits'] for x in predictions])
+ self.assertAllClose([[8.5], [9.0]], predicted_scores)
+
+ # With sparse_combiner = 'sqrtn', we have
+ # logits_1 = sqrt(2)/2 * (w_a + w_c) + bias
+ # = sqrt(2)/2 * (2.0 + 5.0) + 5.0 = 9.94974
+ # logits_2 = sqrt(2)/2 * (w_b + w_c) + bias
+ # = sqrt(2)/2 * (3.0 + 5.0) + 5.0 = 10.65685
+ linear_classifier = self._linear_classifier_fn(
+ feature_columns=feature_columns,
+ model_dir=self._model_dir,
+ sparse_combiner='sqrtn')
+ predictions = linear_classifier.predict(input_fn=_input_fn)
+ predicted_scores = list([x['logits'] for x in predictions])
+ self.assertAllClose([[9.94974], [10.65685]], predicted_scores)
+
class BaseLinearClassifierIntegrationTest(object):
diff --git a/tensorflow/python/estimator/canned/optimizers.py b/tensorflow/python/estimator/canned/optimizers.py
index f72c5ca5cb..8f51cc3a80 100644
--- a/tensorflow/python/estimator/canned/optimizers.py
+++ b/tensorflow/python/estimator/canned/optimizers.py
@@ -72,6 +72,8 @@ def get_optimizer_instance(opt, learning_rate=None):
raise ValueError(
'Unsupported optimizer name: {}. Supported names are: {}'.format(
opt, tuple(sorted(six.iterkeys(_OPTIMIZER_CLS_NAMES)))))
+ if callable(opt):
+ opt = opt()
if not isinstance(opt, optimizer_lib.Optimizer):
raise ValueError(
'The given object is not an Optimizer instance. Given: {}'.format(opt))
diff --git a/tensorflow/python/estimator/canned/optimizers_test.py b/tensorflow/python/estimator/canned/optimizers_test.py
index ee28756155..eadabdbc49 100644
--- a/tensorflow/python/estimator/canned/optimizers_test.py
+++ b/tensorflow/python/estimator/canned/optimizers_test.py
@@ -28,6 +28,13 @@ from tensorflow.python.training import optimizer as optimizer_lib
from tensorflow.python.training import rmsprop
+class _TestOptimizer(optimizer_lib.Optimizer):
+
+ def __init__(self):
+ super(_TestOptimizer, self).__init__(
+ use_locking=False, name='TestOptimizer')
+
+
class GetOptimizerInstance(test.TestCase):
def test_unsupported_name(self):
@@ -66,12 +73,6 @@ class GetOptimizerInstance(test.TestCase):
self.assertAlmostEqual(0.1, opt._learning_rate)
def test_object(self):
- class _TestOptimizer(optimizer_lib.Optimizer):
-
- def __init__(self):
- super(_TestOptimizer, self).__init__(
- use_locking=False, name='TestOptimizer')
-
opt = optimizers.get_optimizer_instance(_TestOptimizer())
self.assertIsInstance(opt, _TestOptimizer)
@@ -80,6 +81,23 @@ class GetOptimizerInstance(test.TestCase):
ValueError, 'The given object is not an Optimizer instance'):
optimizers.get_optimizer_instance((1, 2, 3))
+ def test_callable(self):
+ def _optimizer_fn():
+ return _TestOptimizer()
+ opt = optimizers.get_optimizer_instance(_optimizer_fn)
+ self.assertIsInstance(opt, _TestOptimizer)
+
+ def test_lambda(self):
+ opt = optimizers.get_optimizer_instance(lambda: _TestOptimizer()) # pylint: disable=unnecessary-lambda
+ self.assertIsInstance(opt, _TestOptimizer)
+
+ def test_callable_returns_invalid(self):
+ def _optimizer_fn():
+ return (1, 2, 3)
+ with self.assertRaisesRegexp(
+ ValueError, 'The given object is not an Optimizer instance'):
+ optimizers.get_optimizer_instance(_optimizer_fn)
+
if __name__ == '__main__':
test.main()
diff --git a/tensorflow/python/estimator/estimator.py b/tensorflow/python/estimator/estimator.py
index 8df75d9eee..253716b43e 100644
--- a/tensorflow/python/estimator/estimator.py
+++ b/tensorflow/python/estimator/estimator.py
@@ -38,6 +38,7 @@ from tensorflow.python.estimator import run_config
from tensorflow.python.estimator import util as estimator_util
from tensorflow.python.estimator.export import export as export_helpers
from tensorflow.python.estimator.export import export_output
+from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
@@ -575,7 +576,9 @@ class Estimator(object):
allowed_overrides = set([
'_call_input_fn', '_create_global_step',
'_convert_train_steps_to_hooks', '_convert_eval_steps_to_hooks',
- '_tf_api_names', '_estimator_api_names', '_estimator_api_constants',
+ '_tf_api_names', '_tf_api_names_v1', '_estimator_api_names',
+ '_estimator_api_names_v1', '_estimator_api_constants',
+ '_estimator_api_constants_v1',
'_validate_features_in_predict_input',
'_call_model_fn', '_add_meta_graph_for_mode'
])
@@ -848,7 +851,8 @@ class Estimator(object):
strip_default_attrs,
save_variables=True,
mode=model_fn_lib.ModeKeys.PREDICT,
- export_tags=None):
+ export_tags=None,
+ check_variables=True):
# pylint: disable=line-too-long
"""Loads variables and adds them along with a MetaGraphDef for saving.
@@ -869,6 +873,10 @@ class Estimator(object):
mode: tf.estimator.ModeKeys value indicating which mode will be exported.
export_tags: The set of tags with which to save `MetaGraphDef`. If None,
a default set will be selected to matched the passed mode.
+ check_variables: bool, whether to check the checkpoint has all variables.
+
+ Raises:
+ ValueError: if `save_variables` is `True` and `check_variable` is `False`.
"""
# pylint: enable=line-too-long
if export_tags is None:
@@ -909,16 +917,20 @@ class Estimator(object):
# SavedModel for restore later.
graph_saver = estimator_spec.scaffold.saver or saver.Saver(sharded=True)
- try:
- graph_saver.restore(session, checkpoint_path)
- except errors.NotFoundError as e:
- msg = ('Could not load all requested variables from the checkpoint. '
- 'Please make sure your model_fn does not expect variables '
- 'that were not saved in the checkpoint.\n\n'
- 'Encountered error with mode `{}` while restoring checkpoint '
- 'from: `{}`. Full Traceback:\n\n{}').format(
- mode, checkpoint_path, e)
- raise ValueError(msg)
+ if save_variables and not check_variables:
+ raise ValueError('If `save_variables` is `True, `check_variables`'
+ 'must not be `False`.')
+ if check_variables:
+ try:
+ graph_saver.restore(session, checkpoint_path)
+ except errors.NotFoundError as e:
+ msg = ('Could not load all requested variables from checkpoint. '
+ 'Please make sure your model_fn does not expect variables '
+ 'that were not saved in the checkpoint.\n\n'
+ 'Encountered error with mode `{}` while restoring '
+ 'checkpoint from: `{}`. Full Traceback:\n\n{}').format(
+ mode, checkpoint_path, e)
+ raise ValueError(msg)
# We add the train op explicitly for now, so that we don't have to
# change the Builder public interface. Note that this is a no-op
@@ -1174,25 +1186,73 @@ class Estimator(object):
Loss from training
"""
self._distribution.configure(self._session_config)
+
+ # TODO(sourabhbajaj): Remove this hack once we migrate the other strategies
+ # to use the new API
+ is_tpu_strategy = self._distribution.__class__.__name__ == 'TPUStrategy'
+
worker_hooks = []
with ops.Graph().as_default() as g:
with self._distribution.scope():
random_seed.set_random_seed(self._config.tf_random_seed)
- features, labels, input_hooks = (
- self._get_features_and_labels_from_input_fn(
- input_fn, model_fn_lib.ModeKeys.TRAIN))
- worker_hooks.extend(input_hooks)
- global_step_tensor = self._create_and_assert_global_step(g)
- # we want to add to the global collection in the main thread not the
- # tower threads.
- ops.add_to_collection(training_util.GLOBAL_STEP_READ_KEY,
- self._distribution.read_var(global_step_tensor))
- grouped_estimator_spec = self._distribution.call_for_each_tower(
- self._call_model_fn,
- features,
- labels, # although this will be None it seems
- model_fn_lib.ModeKeys.TRAIN,
- self.config)
+
+ if is_tpu_strategy:
+ # Create the iterator for run_on_dataset function
+ # TODO(sourabhbajaj): refactor this out to call a function on the
+ # strategy
+ dataset = self._distribution.distribute_dataset(
+ lambda: self._call_input_fn(input_fn, # pylint: disable=g-long-lambda
+ model_fn_lib.ModeKeys.TRAIN))
+ iterator = dataset.make_initializable_iterator()
+ worker_hooks.append(
+ estimator_util._DatasetInitializerHook(iterator)) # pylint: disable=protected-access
+
+ global_step_tensor = self._create_and_assert_global_step(g)
+ # we want to add to the global collection in the main thread not the
+ # tower threads.
+ ops.add_to_collection(training_util.GLOBAL_STEP_READ_KEY,
+ self._distribution.read_var(global_step_tensor))
+
+ # Create a step_fn from the train_op of grouped_estimator_spec
+ def step_fn(ctx, inputs):
+ """A single step that is passed to run_on_dataset."""
+ features, labels = inputs
+ estimator_spec = self._distribution.call_for_each_tower(
+ self._call_model_fn,
+ features,
+ labels,
+ model_fn_lib.ModeKeys.TRAIN,
+ self.config)
+ ctx.last_step_outputs = estimator_spec.loss
+ ctx.non_tensor_outputs = {'estimator_spec': estimator_spec}
+ with ops.control_dependencies([estimator_spec.train_op]):
+ return array_ops.identity(estimator_spec.loss)
+
+ # Create new train_op post graph rewrites
+ # TODO(sourabhbajaj): Make sure train_steps and tpu_iterations
+ # work correctly. Currently hardcoded at 2
+ initial_training_loss = constant_op.constant(1e7)
+ distributed_train_op, tpu_result, ctx = \
+ self._distribution._run_steps_on_dataset( # pylint: disable=protected-access
+ step_fn, iterator, iterations=2,
+ initial_loop_values=initial_training_loss)
+ grouped_estimator_spec = ctx.non_tensor_outputs['estimator_spec']
+ else:
+ features, labels, input_hooks = (
+ self._get_features_and_labels_from_input_fn(
+ input_fn, model_fn_lib.ModeKeys.TRAIN))
+ worker_hooks.extend(input_hooks)
+ global_step_tensor = self._create_and_assert_global_step(g)
+ # we want to add to the global collection in the main thread not the
+ # tower threads.
+ ops.add_to_collection(training_util.GLOBAL_STEP_READ_KEY,
+ self._distribution.read_var(global_step_tensor))
+ grouped_estimator_spec = self._distribution.call_for_each_tower(
+ self._call_model_fn,
+ features,
+ labels, # although this will be None it seems
+ model_fn_lib.ModeKeys.TRAIN,
+ self.config)
# TODO(anjalisridhar): Figure out how to resolve the following scaffold
# parameters: init_feed_dict, init_fn.
@@ -1278,13 +1338,28 @@ class Estimator(object):
training_chief_hooks = get_hooks_from_the_first_device(
grouped_estimator_spec.training_chief_hooks)
+ # TODO(sourabhbajaj): Merge the two code paths once we can
+ # handle per device variables correctly in reduce and can output
+ # the loss scaler.
+ if is_tpu_strategy:
+ loss = self._distribution.unwrap(
+ self._distribution.reduce(distribute_lib.get_loss_reduction(),
+ tpu_result)[0])[0]
+ worker_hooks.append(
+ estimator_util.StrategyInitFinalizeHook(
+ self._distribution.get_initialization_ops,
+ self._distribution.get_finalize_ops))
+ else:
+ loss = self._distribution.unwrap(
+ self._distribution.reduce(distribute_lib.get_loss_reduction(),
+ grouped_estimator_spec.loss,
+ destinations='/device:CPU:0'))[0]
+ distributed_train_op = grouped_estimator_spec.train_op
+
estimator_spec = model_fn_lib.EstimatorSpec(
mode=grouped_estimator_spec.mode,
- loss=self._distribution.unwrap(
- self._distribution.reduce(distribute_lib.get_loss_reduction(),
- grouped_estimator_spec.loss,
- destinations='/device:CPU:0'))[0],
- train_op=self._distribution.group(grouped_estimator_spec.train_op),
+ loss=loss,
+ train_op=self._distribution.group(distributed_train_op),
training_hooks=training_hooks,
training_chief_hooks=training_chief_hooks,
scaffold=scaffold)
diff --git a/tensorflow/python/estimator/estimator_test.py b/tensorflow/python/estimator/estimator_test.py
index 733c7fb95d..495d019f26 100644
--- a/tensorflow/python/estimator/estimator_test.py
+++ b/tensorflow/python/estimator/estimator_test.py
@@ -38,6 +38,7 @@ from tensorflow.python.estimator.export import export_output
from tensorflow.python.estimator.inputs import numpy_io
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
+from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.framework import test_util
@@ -1296,6 +1297,31 @@ class EstimatorEvaluateTest(test.TestCase):
dummy_input_fn, steps=1, checkpoint_path=est1.latest_checkpoint())
self.assertEqual(5, scores['global_step'])
+ def test_wrong_shape_throws_reasonable_error(self):
+ """Make sure we are helpful when model_fns change. See b/110263146."""
+ def _get_model_fn(val=1):
+ def _model_fn(features, labels, mode):
+ del features, labels # unused
+ variables.Variable(val, name='weight')
+ return model_fn_lib.EstimatorSpec(
+ mode=mode,
+ predictions=constant_op.constant([[1.]]),
+ loss=constant_op.constant(0.),
+ train_op=state_ops.assign_add(training.get_global_step(), 1))
+ return _model_fn
+
+ model_fn_1 = _get_model_fn()
+ model_fn_2 = _get_model_fn(val=[1])
+
+ est1 = estimator.Estimator(model_fn=model_fn_1)
+ est1.train(dummy_input_fn, steps=5)
+ est2 = estimator.Estimator(
+ model_fn=model_fn_2, model_dir=est1.model_dir)
+
+ expected_msg = 'Restoring from checkpoint failed.*a mismatch between'
+ with self.assertRaisesRegexp(errors.InvalidArgumentError, expected_msg):
+ est2.train(dummy_input_fn, steps=1,)
+
def test_scaffold_is_used(self):
def _model_fn_scaffold(features, labels, mode):
@@ -2278,6 +2304,43 @@ class EstimatorExportTest(test.TestCase):
with self.assertRaisesRegexp(ValueError, err_regex):
est._export_all_saved_models(export_dir_base, input_receiver_fn_map)
+ def test_export_all_saved_models_metric_operation(self):
+ """Ensures metrics ops.Operations can be expoerted (b/109740581)."""
+
+ def _model_fn(features, labels, mode):
+ del features, labels # Unused
+ metrics = {'metrics': (constant_op.constant([0]),
+ control_flow_ops.no_op())}
+ return model_fn_lib.EstimatorSpec(
+ mode,
+ predictions=constant_op.constant(10.),
+ loss=constant_op.constant(1.),
+ train_op=state_ops.assign_add(training.get_global_step(), 1),
+ eval_metric_ops=metrics)
+
+ tmpdir = tempfile.mkdtemp()
+ est = estimator.Estimator(model_fn=_model_fn)
+ est.train(input_fn=dummy_input_fn, steps=1)
+
+ # Perform the export.
+ export_dir_base = os.path.join(
+ compat.as_bytes(tmpdir), compat.as_bytes('metric_operation_export'))
+
+ input_receiver_fn_map = {
+ model_fn_lib.ModeKeys.EVAL: _get_supervised_input_receiver_fn()}
+
+ export_dir = est._export_all_saved_models(
+ export_dir_base, input_receiver_fn_map)
+
+ # Restore, to validate that the export was well-formed.
+ with ops.Graph().as_default() as graph:
+ with session.Session(graph=graph) as sess:
+ meta_graph = loader.load(sess, [tag_constants.EVAL], export_dir)
+ sig_outputs = meta_graph.signature_def[
+ model_fn_lib.ModeKeys.EVAL].outputs
+ self.assertEqual(
+ sig_outputs['metrics/update_op'].name, 'metric_op_wrapper:0')
+
def test_export_savedmodel_with_saveables_proto_roundtrip(self):
tmpdir = tempfile.mkdtemp()
est = estimator.Estimator(
diff --git a/tensorflow/python/estimator/export/export_output.py b/tensorflow/python/estimator/export/export_output.py
index 6c26d29985..20382a58d8 100644
--- a/tensorflow/python/estimator/export/export_output.py
+++ b/tensorflow/python/estimator/export/export_output.py
@@ -23,6 +23,7 @@ import abc
import six
+from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.saved_model import signature_def_utils
@@ -338,8 +339,16 @@ class _SupervisedOutput(ExportOutput):
raise ValueError(
'{} update_op must be a Tensor or Operation; got {}.'.format(
key, metric_op))
+
+ # We must wrap any ops in a Tensor before export, as the SignatureDef
+ # proto expects tensors only. See b/109740581
+ metric_op_tensor = metric_op
+ if isinstance(metric_op, ops.Operation):
+ with ops.control_dependencies([metric_op]):
+ metric_op_tensor = constant_op.constant([], name='metric_op_wrapper')
+
outputs[val_name] = metric_val
- outputs[op_name] = metric_op
+ outputs[op_name] = metric_op_tensor
return outputs
diff --git a/tensorflow/python/estimator/export/export_output_test.py b/tensorflow/python/estimator/export/export_output_test.py
index b21ba91b0f..d94c764fd7 100644
--- a/tensorflow/python/estimator/export/export_output_test.py
+++ b/tensorflow/python/estimator/export/export_output_test.py
@@ -24,8 +24,10 @@ from tensorflow.core.protobuf import meta_graph_pb2
from tensorflow.python.estimator.export import export_output as export_output_lib
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
+from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
+from tensorflow.python.ops import control_flow_ops
from tensorflow.python.platform import test
from tensorflow.python.saved_model import signature_constants
@@ -335,5 +337,18 @@ class SupervisedOutputTest(test.TestCase):
self.assertTrue("predictions/output1" in sig_def.outputs)
self.assertTrue("features" in sig_def.inputs)
+ def test_metric_op_is_operation(self):
+ """Tests that ops.Operation is wrapped by a tensor for metric_ops."""
+ loss = {"my_loss": constant_op.constant([0])}
+ predictions = {u"output1": constant_op.constant(["foo"])}
+ metrics = {"metrics": (constant_op.constant([0]), control_flow_ops.no_op())}
+
+ outputter = MockSupervisedOutput(loss, predictions, metrics)
+ self.assertEqual(outputter.metrics["metrics/value"], metrics["metrics"][0])
+ self.assertEqual(
+ outputter.metrics["metrics/update_op"].name, "metric_op_wrapper:0")
+ self.assertTrue(
+ isinstance(outputter.metrics["metrics/update_op"], ops.Tensor))
+
if __name__ == "__main__":
test.main()
diff --git a/tensorflow/python/estimator/inputs/pandas_io.py b/tensorflow/python/estimator/inputs/pandas_io.py
index 57f8e5fd6a..616bcb410f 100644
--- a/tensorflow/python/estimator/inputs/pandas_io.py
+++ b/tensorflow/python/estimator/inputs/pandas_io.py
@@ -18,6 +18,8 @@
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
+import six
+import uuid
import numpy as np
from tensorflow.python.estimator.inputs.queues import feeding_functions
@@ -35,6 +37,22 @@ except ImportError:
HAS_PANDAS = False
+def _get_unique_target_key(features, target_column_name):
+ """Returns a key that does not exist in the input DataFrame `features`.
+
+ Args:
+ features: DataFrame
+ target_column_name: Name of the target column as a `str`
+
+ Returns:
+ A unique key that can be used to insert the target into
+ features.
+ """
+ if target_column_name in features:
+ target_column_name += '_' + str(uuid.uuid4())
+ return target_column_name
+
+
@estimator_export('estimator.inputs.pandas_input_fn')
def pandas_input_fn(x,
y=None,
@@ -50,7 +68,7 @@ def pandas_input_fn(x,
Args:
x: pandas `DataFrame` object.
- y: pandas `Series` object. `None` if absent.
+ y: pandas `Series` object or `DataFrame`. `None` if absent.
batch_size: int, size of batches to return.
num_epochs: int, number of epochs to iterate over data. If not `None`,
read attempts that would exceed this value will raise `OutOfRangeError`.
@@ -60,7 +78,8 @@ def pandas_input_fn(x,
num_threads: Integer, number of threads used for reading and enqueueing. In
order to have predicted and repeatable order of reading and enqueueing,
such as in prediction and evaluation mode, `num_threads` should be 1.
- target_column: str, name to give the target column `y`.
+ target_column: str, name to give the target column `y`. This parameter
+ is not used when `y` is a `DataFrame`.
Returns:
Function, that has signature of ()->(dict of `features`, `target`)
@@ -79,6 +98,9 @@ def pandas_input_fn(x,
'(it is recommended to set it as True for training); '
'got {}'.format(shuffle))
+ if not isinstance(target_column, six.string_types):
+ raise TypeError('target_column must be a string type')
+
x = x.copy()
if y is not None:
if target_column in x:
@@ -88,7 +110,13 @@ def pandas_input_fn(x,
if not np.array_equal(x.index, y.index):
raise ValueError('Index for x and y are mismatched.\nIndex for x: %s\n'
'Index for y: %s\n' % (x.index, y.index))
- x[target_column] = y
+ if isinstance(y, pd.DataFrame):
+ y_columns = [(column, _get_unique_target_key(x, column))
+ for column in list(y)]
+ target_column = [v for _, v in y_columns]
+ x[target_column] = y
+ else:
+ x[target_column] = y
# TODO(mdan): These are memory copies. We probably don't need 4x slack space.
# The sizes below are consistent with what I've seen elsewhere.
@@ -118,7 +146,12 @@ def pandas_input_fn(x,
features = features[1:]
features = dict(zip(list(x.columns), features))
if y is not None:
- target = features.pop(target_column)
+ if isinstance(target_column, list):
+ keys = [k for k, _ in y_columns]
+ values = [features.pop(column) for column in target_column]
+ target = {k: v for k, v in zip(keys, values)}
+ else:
+ target = features.pop(target_column)
return features, target
return features
return input_fn
diff --git a/tensorflow/python/estimator/inputs/pandas_io_test.py b/tensorflow/python/estimator/inputs/pandas_io_test.py
index dcecf6dd61..6f13bc95d2 100644
--- a/tensorflow/python/estimator/inputs/pandas_io_test.py
+++ b/tensorflow/python/estimator/inputs/pandas_io_test.py
@@ -47,6 +47,16 @@ class PandasIoTest(test.TestCase):
y = pd.Series(np.arange(-32, -28), index=index)
return x, y
+ def makeTestDataFrameWithYAsDataFrame(self):
+ index = np.arange(100, 104)
+ a = np.arange(4)
+ b = np.arange(32, 36)
+ a_label = np.arange(10, 14)
+ b_label = np.arange(50, 54)
+ x = pd.DataFrame({'a': a, 'b': b}, index=index)
+ y = pd.DataFrame({'a_target': a_label, 'b_target': b_label}, index=index)
+ return x, y
+
def callInputFnOnce(self, input_fn, session):
results = input_fn()
coord = coordinator.Coordinator()
@@ -65,6 +75,19 @@ class PandasIoTest(test.TestCase):
pandas_io.pandas_input_fn(
x, y_noindex, batch_size=2, shuffle=False, num_epochs=1)
+ def testPandasInputFn_RaisesWhenTargetColumnIsAList(self):
+ if not HAS_PANDAS:
+ return
+
+ x, y = self.makeTestDataFrame()
+
+ with self.assertRaisesRegexp(TypeError,
+ 'target_column must be a string type'):
+ pandas_io.pandas_input_fn(x, y, batch_size=2,
+ shuffle=False,
+ num_epochs=1,
+ target_column=['one', 'two'])
+
def testPandasInputFn_NonBoolShuffle(self):
if not HAS_PANDAS:
return
@@ -90,6 +113,53 @@ class PandasIoTest(test.TestCase):
self.assertAllEqual(features['b'], [32, 33])
self.assertAllEqual(target, [-32, -31])
+ def testPandasInputFnWhenYIsDataFrame_ProducesExpectedOutput(self):
+ if not HAS_PANDAS:
+ return
+ with self.test_session() as session:
+ x, y = self.makeTestDataFrameWithYAsDataFrame()
+ input_fn = pandas_io.pandas_input_fn(
+ x, y, batch_size=2, shuffle=False, num_epochs=1)
+
+ features, targets = self.callInputFnOnce(input_fn, session)
+
+ self.assertAllEqual(features['a'], [0, 1])
+ self.assertAllEqual(features['b'], [32, 33])
+ self.assertAllEqual(targets['a_target'], [10, 11])
+ self.assertAllEqual(targets['b_target'], [50, 51])
+
+ def testPandasInputFnYIsDataFrame_HandlesOverlappingColumns(self):
+ if not HAS_PANDAS:
+ return
+ with self.test_session() as session:
+ x, y = self.makeTestDataFrameWithYAsDataFrame()
+ y = y.rename(columns={'a_target': 'a', 'b_target': 'b'})
+ input_fn = pandas_io.pandas_input_fn(
+ x, y, batch_size=2, shuffle=False, num_epochs=1)
+
+ features, targets = self.callInputFnOnce(input_fn, session)
+
+ self.assertAllEqual(features['a'], [0, 1])
+ self.assertAllEqual(features['b'], [32, 33])
+ self.assertAllEqual(targets['a'], [10, 11])
+ self.assertAllEqual(targets['b'], [50, 51])
+
+ def testPandasInputFnYIsDataFrame_HandlesOverlappingColumnsInTargets(self):
+ if not HAS_PANDAS:
+ return
+ with self.test_session() as session:
+ x, y = self.makeTestDataFrameWithYAsDataFrame()
+ y = y.rename(columns={'a_target': 'a', 'b_target': 'a_n'})
+ input_fn = pandas_io.pandas_input_fn(
+ x, y, batch_size=2, shuffle=False, num_epochs=1)
+
+ features, targets = self.callInputFnOnce(input_fn, session)
+
+ self.assertAllEqual(features['a'], [0, 1])
+ self.assertAllEqual(features['b'], [32, 33])
+ self.assertAllEqual(targets['a'], [10, 11])
+ self.assertAllEqual(targets['a_n'], [50, 51])
+
def testPandasInputFn_ProducesOutputsForLargeBatchAndMultipleEpochs(self):
if not HAS_PANDAS:
return
diff --git a/tensorflow/python/estimator/keras.py b/tensorflow/python/estimator/keras.py
index 408752d360..076359b503 100644
--- a/tensorflow/python/estimator/keras.py
+++ b/tensorflow/python/estimator/keras.py
@@ -39,12 +39,13 @@ from tensorflow.python.keras.utils.generic_utils import CustomObjectScope
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import metrics as metrics_module
-from tensorflow.python.ops import variables as variables_module
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.saved_model import signature_constants
from tensorflow.python.training import distribute as distribute_lib
from tensorflow.python.training import saver as saver_lib
from tensorflow.python.training import training_util
+from tensorflow.python.training.checkpointable import base as checkpointable
+from tensorflow.python.training.checkpointable import data_structures
_DEFAULT_SERVING_KEY = signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY
@@ -69,16 +70,22 @@ def _convert_tensor(x):
return x
-def _any_variable_initialized():
- """Check if any variable has been initialized in the Keras model.
+def _any_weight_initialized(keras_model):
+ """Check if any weights has been initialized in the Keras model.
+
+ Args:
+ keras_model: An instance of compiled keras model.
Returns:
- boolean, True if at least one variable has been initialized, else False.
+ boolean, True if at least one weight has been initialized, else False.
+ Currently keras initialize all weights at get_session().
"""
- variables = variables_module.global_variables()
- for v in variables:
- if getattr(v, '_keras_initialized', False):
- return True
+ if keras_model is None:
+ return False
+ for layer in keras_model.layers:
+ for weight in layer.weights:
+ if hasattr(weight, '_keras_initialized'):
+ return True
return False
@@ -122,8 +129,8 @@ def _create_ordered_io(keras_model, estimator_io, is_input=True):
'It needs to match one '
'of the following: %s' % ('input' if is_input else 'output', key,
', '.join(keras_io_names)))
- tensors = [_convert_tensor(estimator_io[io_name])
- for io_name in keras_io_names]
+ tensors = [_convert_tensor(estimator_io[io_name])
+ for io_name in keras_io_names]
return tensors
else:
# Plain array.
@@ -241,8 +248,17 @@ def _in_place_subclassed_model_state_restoration(model):
# Restore layers and build attributes
if (hasattr(model, '_original_attributes_cache') and
model._original_attributes_cache is not None):
- model._layers = []
+ # Models have sticky attribute assignment, so we want to be careful to add
+ # back the previous attributes and track Layers by their original names
+ # without adding dependencies on "utility" attributes which Models exempt
+ # when they're constructed.
+ model._layers = data_structures.NoDependency([])
for name, value in model._original_attributes_cache.items():
+ if not isinstance(value, checkpointable.CheckpointableBase):
+ # If this value is not already checkpointable, it's probably that way
+ # for a reason; we don't want to start tracking data structures that the
+ # original Model didn't.
+ value = data_structures.NoDependency(value)
setattr(model, name, value)
model._original_attributes_cache = None
else:
@@ -509,7 +525,7 @@ def model_to_estimator(keras_model=None,
keras_model_fn, model_dir=model_dir, config=config)
# Check if we need to call get_weights:
- if _any_variable_initialized():
+ if _any_weight_initialized(keras_model):
keras_weights = keras_model.get_weights()
# Warn if config passed to estimator tries to update GPUOptions. If a
# session has already been created, the GPUOptions passed to the first
diff --git a/tensorflow/python/estimator/keras_test.py b/tensorflow/python/estimator/keras_test.py
index 5e094ae92b..7a3c5a9bf1 100644
--- a/tensorflow/python/estimator/keras_test.py
+++ b/tensorflow/python/estimator/keras_test.py
@@ -32,7 +32,6 @@ from tensorflow.python.estimator.inputs import numpy_io
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.keras import testing_utils
-from tensorflow.python.keras.applications import mobilenet
from tensorflow.python.keras.optimizers import SGD
from tensorflow.python.ops.parsing_ops import gen_parsing_ops
from tensorflow.python.platform import gfile
@@ -60,9 +59,9 @@ def simple_sequential_model():
return model
-def simple_functional_model():
+def simple_functional_model(activation='relu'):
a = keras.layers.Input(shape=_INPUT_SIZE)
- b = keras.layers.Dense(16, activation='relu')(a)
+ b = keras.layers.Dense(16, activation=activation)(a)
b = keras.layers.Dropout(0.1)(b)
b = keras.layers.Dense(_NUM_CLASS, activation='softmax')(b)
model = keras.models.Model(inputs=[a], outputs=[b])
@@ -204,6 +203,7 @@ class TestKerasEstimator(test_util.TensorFlowTestCase):
writer_cache.FileWriterCache.clear()
gfile.DeleteRecursively(self._config.model_dir)
+ @test_util.run_in_graph_and_eager_modes
def test_train_with_tf_optimizer(self):
for model_type in ['sequential', 'functional']:
keras_model, (_, _), (
@@ -231,6 +231,7 @@ class TestKerasEstimator(test_util.TensorFlowTestCase):
writer_cache.FileWriterCache.clear()
gfile.DeleteRecursively(self._config.model_dir)
+ @test_util.run_in_graph_and_eager_modes
def test_train_with_subclassed_model(self):
keras_model, (_, _), (
_, _), train_input_fn, eval_input_fn = get_resource_for_simple_model(
@@ -472,21 +473,25 @@ class TestKerasEstimator(test_util.TensorFlowTestCase):
est_keras.train(input_fn=invald_output_name_input_fn, steps=100)
def test_custom_objects(self):
- keras_mobile = mobilenet.MobileNet(weights=None)
- keras_mobile.compile(loss='categorical_crossentropy', optimizer='adam')
+
+ def relu6(x):
+ return keras.backend.relu(x, max_value=6)
+
+ keras_model = simple_functional_model(activation=relu6)
+ keras_model.compile(loss='categorical_crossentropy', optimizer='adam')
custom_objects = {
- 'relu6': mobilenet.relu6,
- 'DepthwiseConv2D': mobilenet.DepthwiseConv2D
+ 'relu6': relu6
}
+
with self.assertRaisesRegexp(ValueError, 'relu6'):
with self.test_session():
keras_lib.model_to_estimator(
- keras_model=keras_mobile,
+ keras_model=keras_model,
model_dir=tempfile.mkdtemp(dir=self._base_dir))
with self.test_session():
keras_lib.model_to_estimator(
- keras_model=keras_mobile,
+ keras_model=keras_model,
model_dir=tempfile.mkdtemp(dir=self._base_dir),
custom_objects=custom_objects)
diff --git a/tensorflow/python/estimator/run_config.py b/tensorflow/python/estimator/run_config.py
index 3d60c63b68..b495c4884d 100644
--- a/tensorflow/python/estimator/run_config.py
+++ b/tensorflow/python/estimator/run_config.py
@@ -48,7 +48,8 @@ _DEFAULT_REPLACEABLE_LIST = [
'keep_checkpoint_every_n_hours',
'log_step_count_steps',
'train_distribute',
- 'device_fn'
+ 'device_fn',
+ 'protocol'
]
_SAVE_CKPT_ERR = (
@@ -288,6 +289,10 @@ def _validate_properties(run_config):
message='device_fn must be callable with exactly'
' one argument "op".')
+ _validate('protocol',
+ lambda protocol: protocol in (None, "grpc", "grpc+verbs"),
+ message='protocol should be grpc or grpc+verbs')
+
class TaskType(object):
MASTER = 'master'
@@ -312,7 +317,8 @@ class RunConfig(object):
keep_checkpoint_every_n_hours=10000,
log_step_count_steps=100,
train_distribute=None,
- device_fn=None):
+ device_fn=None,
+ protocol=None):
"""Constructs a RunConfig.
All distributed training related properties `cluster_spec`, `is_chief`,
@@ -436,7 +442,7 @@ class RunConfig(object):
the feature.
log_step_count_steps: The frequency, in number of global steps, that the
global step/sec and the loss will be logged during training.
- train_distribute: an optional instance of
+ train_distribute: An optional instance of
`tf.contrib.distribute.DistributionStrategy`. If specified,
then Estimator will distribute the user's model during training,
according to the policy specified by that strategy.
@@ -444,6 +450,8 @@ class RunConfig(object):
`Operation` and returns the device string. If `None`, defaults to
the device function returned by `tf.train.replica_device_setter`
with round-robin strategy.
+ protocol: An optional argument which specifies the protocol used when
+ starting server. None means default to grpc.
Raises:
ValueError: If both `save_checkpoints_steps` and `save_checkpoints_secs`
@@ -481,11 +489,21 @@ class RunConfig(object):
keep_checkpoint_every_n_hours=keep_checkpoint_every_n_hours,
log_step_count_steps=log_step_count_steps,
train_distribute=train_distribute,
- device_fn=device_fn)
+ device_fn=device_fn,
+ protocol=protocol)
self._init_distributed_setting_from_environment_var(tf_config)
- # Get session_config only for distributed mode (cluster_spec is present).
+ self._maybe_overwrite_session_config_for_distributed_training()
+
+ def _maybe_overwrite_session_config_for_distributed_training(self):
+ """Overwrites the session_config for distributed training.
+
+ The default overwrite is optimized for between-graph training. Subclass
+ should override this method if necessary.
+ """
+ # Get session_config only for between-graph distributed mode (cluster_spec
+ # is present).
if not self._session_config and self._cluster_spec:
RunConfig._replace(
self,
@@ -745,6 +763,11 @@ class RunConfig(object):
"""
return self._train_distribute
+ @property
+ def protocol(self):
+ """Returns the optional protocol value."""
+ return self._protocol
+
def replace(self, **kwargs):
"""Returns a new instance of `RunConfig` replacing specified properties.
@@ -760,7 +783,8 @@ class RunConfig(object):
- `keep_checkpoint_every_n_hours`,
- `log_step_count_steps`,
- `train_distribute`,
- - `device_fn`.
+ - `device_fn`,
+ - `protocol`.
In addition, either `save_checkpoints_steps` or `save_checkpoints_secs`
can be set (should not be both).
diff --git a/tensorflow/python/estimator/training.py b/tensorflow/python/estimator/training.py
index 37b123217a..a01b2300dd 100644
--- a/tensorflow/python/estimator/training.py
+++ b/tensorflow/python/estimator/training.py
@@ -278,10 +278,7 @@ def train_and_evaluate(estimator, train_spec, eval_spec):
supported distributed training configuration is between-graph replication.
Overfitting: In order to avoid overfitting, it is recommended to set up the
- training `input_fn` to shuffle the training data properly. It is also
- recommended to train the model a little longer, say multiple epochs, before
- performing evaluation, as the input pipeline starts from scratch for each
- training. It is particularly important for local training and evaluation.
+ training `input_fn` to shuffle the training data properly.
Stop condition: In order to support both distributed and non-distributed
configuration reliably, the only supported stop condition for model
@@ -315,10 +312,10 @@ def train_and_evaluate(estimator, train_spec, eval_spec):
# hidden_units=[1024, 512, 256])
# Input pipeline for train and evaluate.
- def train_input_fn: # returns x, y
+ def train_input_fn(): # returns x, y
# please shuffle the data.
pass
- def eval_input_fn_eval: # returns x, y
+ def eval_input_fn(): # returns x, y
pass
train_spec = tf.estimator.TrainSpec(input_fn=train_input_fn, max_steps=1000)
@@ -735,7 +732,8 @@ class _TrainingExecutor(object):
job_name=config.task_type,
task_index=config.task_id,
config=session_config,
- start=False)
+ start=False,
+ protocol=config.protocol)
server.start()
return server
diff --git a/tensorflow/python/estimator/training_test.py b/tensorflow/python/estimator/training_test.py
index 6bee7cbe83..dc106c7d3b 100644
--- a/tensorflow/python/estimator/training_test.py
+++ b/tensorflow/python/estimator/training_test.py
@@ -472,6 +472,7 @@ class _TrainingExecutorTrainingTest(object):
job_name=mock_est.config.task_type,
task_index=mock_est.config.task_id,
config=test.mock.ANY,
+ protocol=None,
start=False)
self.assertTrue(mock_server_instance.start.called)
@@ -502,6 +503,7 @@ class _TrainingExecutorTrainingTest(object):
job_name=mock_est.config.task_type,
task_index=mock_est.config.task_id,
config=test.mock.ANY,
+ protocol=None,
start=False)
self.assertTrue(mock_server_instance.start.called)
@@ -729,6 +731,7 @@ class TrainingExecutorRunMasterTest(test.TestCase):
job_name=mock_est.config.task_type,
task_index=mock_est.config.task_id,
config=test.mock.ANY,
+ protocol=None,
start=False)
self.assertTrue(mock_server_instance.start.called)
@@ -1481,6 +1484,7 @@ class TrainingExecutorRunPsTest(test.TestCase):
job_name=mock_est.config.task_type,
task_index=mock_est.config.task_id,
config=test.mock.ANY,
+ protocol=None,
start=False)
self.assertTrue(mock_server_instance.start.called)
diff --git a/tensorflow/python/estimator/util.py b/tensorflow/python/estimator/util.py
index 924ca309ff..d4a75478d5 100644
--- a/tensorflow/python/estimator/util.py
+++ b/tensorflow/python/estimator/util.py
@@ -22,6 +22,7 @@ from __future__ import print_function
import os
import time
+from tensorflow.core.protobuf import config_pb2
from tensorflow.python.platform import gfile
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import training
@@ -129,3 +130,24 @@ class _DatasetInitializerHook(training.SessionRunHook):
def after_create_session(self, session, coord):
del coord
session.run(self._initializer)
+
+
+class StrategyInitFinalizeHook(training.SessionRunHook):
+ """Creates a SessionRunHook that initializes and shutsdown devices."""
+
+ def __init__(self, initialization_fn, finalize_fn):
+ self._initialization_fn = initialization_fn
+ self._finalize_fn = finalize_fn
+
+ def begin(self):
+ self._init_ops = self._initialization_fn()
+ self._finalize_ops = self._finalize_fn()
+
+ def after_create_session(self, session, coord):
+ logging.info('Initialize system')
+ session.run(self._init_ops,
+ options=config_pb2.RunOptions(timeout_in_ms=5 * 60 * 1000))
+
+ def end(self, session):
+ logging.info('Finalize system.')
+ session.run(self._finalize_ops)
diff --git a/tensorflow/python/feature_column/BUILD b/tensorflow/python/feature_column/BUILD
index 295d4ca094..80707030e6 100644
--- a/tensorflow/python/feature_column/BUILD
+++ b/tensorflow/python/feature_column/BUILD
@@ -48,6 +48,39 @@ py_library(
],
)
+py_library(
+ name = "feature_column_v2",
+ srcs = ["feature_column_v2.py"],
+ srcs_version = "PY2AND3",
+ deps = [
+ "//tensorflow/python:array_ops",
+ "//tensorflow/python:check_ops",
+ "//tensorflow/python:control_flow_ops",
+ "//tensorflow/python:dtypes",
+ "//tensorflow/python:embedding_ops",
+ "//tensorflow/python:framework_ops",
+ "//tensorflow/python:init_ops",
+ "//tensorflow/python:lookup_ops",
+ "//tensorflow/python:math_ops",
+ "//tensorflow/python:nn_ops",
+ "//tensorflow/python:parsing_ops",
+ "//tensorflow/python:platform",
+ "//tensorflow/python:resource_variable_ops",
+ "//tensorflow/python:sparse_ops",
+ "//tensorflow/python:sparse_tensor",
+ "//tensorflow/python:string_ops",
+ "//tensorflow/python:template",
+ "//tensorflow/python:tensor_shape",
+ "//tensorflow/python:training",
+ "//tensorflow/python:util",
+ "//tensorflow/python:variable_scope",
+ "//tensorflow/python:variables",
+ "//tensorflow/python/keras",
+ "//third_party/py/numpy",
+ "@six_archive//:six",
+ ],
+)
+
filegroup(
name = "vocabulary_testdata",
srcs = [
@@ -92,3 +125,38 @@ py_test(
"//tensorflow/python/estimator:numpy_io",
],
)
+
+py_test(
+ name = "feature_column_v2_test",
+ srcs = ["feature_column_v2_test.py"],
+ data = [":vocabulary_testdata"],
+ srcs_version = "PY2AND3",
+ tags = [
+ "no_cuda_on_cpu_tap",
+ "no_pip",
+ ],
+ deps = [
+ ":feature_column_py",
+ ":feature_column_v2",
+ "//tensorflow/core:protos_all_py",
+ "//tensorflow/python:array_ops",
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:constant_op",
+ "//tensorflow/python:dtypes",
+ "//tensorflow/python:errors",
+ "//tensorflow/python:framework_ops",
+ "//tensorflow/python:framework_test_lib",
+ "//tensorflow/python:lookup_ops",
+ "//tensorflow/python:parsing_ops",
+ "//tensorflow/python:partitioned_variables",
+ "//tensorflow/python:session",
+ "//tensorflow/python:sparse_tensor",
+ "//tensorflow/python:training",
+ "//tensorflow/python:variable_scope",
+ "//tensorflow/python:variables",
+ "//tensorflow/python/eager:backprop",
+ "//tensorflow/python/eager:context",
+ "//tensorflow/python/estimator:numpy_io",
+ "//third_party/py/numpy",
+ ],
+)
diff --git a/tensorflow/python/feature_column/feature_column.py b/tensorflow/python/feature_column/feature_column.py
index 40219e4b34..d091d2fe0a 100644
--- a/tensorflow/python/feature_column/feature_column.py
+++ b/tensorflow/python/feature_column/feature_column.py
@@ -2158,7 +2158,7 @@ def _create_categorical_column_weighted_sum(column,
initializer=init_ops.zeros_initializer(),
trainable=trainable,
collections=weight_collections)
- return _safe_embedding_lookup_sparse(
+ return embedding_ops.safe_embedding_lookup_sparse(
weight,
id_tensor,
sparse_weights=weight_tensor,
@@ -2594,7 +2594,7 @@ class _EmbeddingColumn(
})
# Return embedding lookup result.
- return _safe_embedding_lookup_sparse(
+ return embedding_ops.safe_embedding_lookup_sparse(
embedding_weights=embedding_weights,
sparse_ids=sparse_ids,
sparse_weights=sparse_weights,
@@ -2736,7 +2736,7 @@ class _SharedEmbeddingColumn(
})
# Return embedding lookup result.
- return _safe_embedding_lookup_sparse(
+ return embedding_ops.safe_embedding_lookup_sparse(
embedding_weights=embedding_weights,
sparse_ids=sparse_ids,
sparse_weights=sparse_weights,
@@ -3228,161 +3228,6 @@ def _collect_leaf_level_keys(cross):
return leaf_level_keys
-# TODO(zakaria): Move this to embedding_ops and make it public.
-def _safe_embedding_lookup_sparse(embedding_weights,
- sparse_ids,
- sparse_weights=None,
- combiner='mean',
- default_id=None,
- name=None,
- partition_strategy='div',
- max_norm=None):
- """Lookup embedding results, accounting for invalid IDs and empty features.
-
- The partitioned embedding in `embedding_weights` must all be the same shape
- except for the first dimension. The first dimension is allowed to vary as the
- vocabulary size is not necessarily a multiple of `P`. `embedding_weights`
- may be a `PartitionedVariable` as returned by using `tf.get_variable()` with a
- partitioner.
-
- Invalid IDs (< 0) are pruned from input IDs and weights, as well as any IDs
- with non-positive weight. For an entry with no features, the embedding vector
- for `default_id` is returned, or the 0-vector if `default_id` is not supplied.
-
- The ids and weights may be multi-dimensional. Embeddings are always aggregated
- along the last dimension.
-
- Args:
- embedding_weights: A list of `P` float `Tensor`s or values representing
- partitioned embedding `Tensor`s. Alternatively, a `PartitionedVariable`
- created by partitioning along dimension 0. The total unpartitioned
- shape should be `[e_0, e_1, ..., e_m]`, where `e_0` represents the
- vocab size and `e_1, ..., e_m` are the embedding dimensions.
- sparse_ids: `SparseTensor` of shape `[d_0, d_1, ..., d_n]` containing the
- ids. `d_0` is typically batch size.
- sparse_weights: `SparseTensor` of same shape as `sparse_ids`, containing
- float weights corresponding to `sparse_ids`, or `None` if all weights
- are be assumed to be 1.0.
- combiner: A string specifying how to combine embedding results for each
- entry. Currently "mean", "sqrtn" and "sum" are supported, with "mean"
- the default.
- default_id: The id to use for an entry with no features.
- name: A name for this operation (optional).
- partition_strategy: A string specifying the partitioning strategy.
- Currently `"div"` and `"mod"` are supported. Default is `"div"`.
- max_norm: If not `None`, all embeddings are l2-normalized to max_norm before
- combining.
-
-
- Returns:
- Dense `Tensor` of shape `[d_0, d_1, ..., d_{n-1}, e_1, ..., e_m]`.
-
- Raises:
- ValueError: if `embedding_weights` is empty.
- """
- if embedding_weights is None:
- raise ValueError('Missing embedding_weights %s.' % embedding_weights)
- if isinstance(embedding_weights, variables.PartitionedVariable):
- embedding_weights = list(embedding_weights) # get underlying Variables.
- if not isinstance(embedding_weights, list):
- embedding_weights = [embedding_weights]
- if len(embedding_weights) < 1:
- raise ValueError('Missing embedding_weights %s.' % embedding_weights)
-
- dtype = sparse_weights.dtype if sparse_weights is not None else None
- embedding_weights = [
- ops.convert_to_tensor(w, dtype=dtype) for w in embedding_weights
- ]
-
- with ops.name_scope(name, 'embedding_lookup',
- embedding_weights + [sparse_ids,
- sparse_weights]) as scope:
- # Reshape higher-rank sparse ids and weights to linear segment ids.
- original_shape = sparse_ids.dense_shape
- original_rank_dim = sparse_ids.dense_shape.get_shape()[0]
- original_rank = (
- array_ops.size(original_shape)
- if original_rank_dim.value is None
- else original_rank_dim.value)
- sparse_ids = sparse_ops.sparse_reshape(sparse_ids, [
- math_ops.reduce_prod(
- array_ops.slice(original_shape, [0], [original_rank - 1])),
- array_ops.gather(original_shape, original_rank - 1)])
- if sparse_weights is not None:
- sparse_weights = sparse_tensor_lib.SparseTensor(
- sparse_ids.indices,
- sparse_weights.values, sparse_ids.dense_shape)
-
- # Prune invalid ids and weights.
- sparse_ids, sparse_weights = _prune_invalid_ids(sparse_ids, sparse_weights)
- if combiner != 'sum':
- sparse_ids, sparse_weights = _prune_invalid_weights(
- sparse_ids, sparse_weights)
-
- # Fill in dummy values for empty features, if necessary.
- sparse_ids, is_row_empty = sparse_ops.sparse_fill_empty_rows(sparse_ids,
- default_id or
- 0)
- if sparse_weights is not None:
- sparse_weights, _ = sparse_ops.sparse_fill_empty_rows(sparse_weights, 1.0)
-
- result = embedding_ops.embedding_lookup_sparse(
- embedding_weights,
- sparse_ids,
- sparse_weights,
- combiner=combiner,
- partition_strategy=partition_strategy,
- name=None if default_id is None else scope,
- max_norm=max_norm)
-
- if default_id is None:
- # Broadcast is_row_empty to the same shape as embedding_lookup_result,
- # for use in Select.
- is_row_empty = array_ops.tile(
- array_ops.reshape(is_row_empty, [-1, 1]),
- array_ops.stack([1, array_ops.shape(result)[1]]))
-
- result = array_ops.where(is_row_empty,
- array_ops.zeros_like(result),
- result,
- name=scope)
-
- # Reshape back from linear ids back into higher-dimensional dense result.
- final_result = array_ops.reshape(
- result,
- array_ops.concat([
- array_ops.slice(
- math_ops.cast(original_shape, dtypes.int32), [0],
- [original_rank - 1]),
- array_ops.slice(array_ops.shape(result), [1], [-1])
- ], 0))
- final_result.set_shape(tensor_shape.unknown_shape(
- (original_rank_dim - 1).value).concatenate(result.get_shape()[1:]))
- return final_result
-
-
-def _prune_invalid_ids(sparse_ids, sparse_weights):
- """Prune invalid IDs (< 0) from the input ids and weights."""
- is_id_valid = math_ops.greater_equal(sparse_ids.values, 0)
- if sparse_weights is not None:
- is_id_valid = math_ops.logical_and(
- is_id_valid,
- array_ops.ones_like(sparse_weights.values, dtype=dtypes.bool))
- sparse_ids = sparse_ops.sparse_retain(sparse_ids, is_id_valid)
- if sparse_weights is not None:
- sparse_weights = sparse_ops.sparse_retain(sparse_weights, is_id_valid)
- return sparse_ids, sparse_weights
-
-
-def _prune_invalid_weights(sparse_ids, sparse_weights):
- """Prune invalid weights (< 0) from the input ids and weights."""
- if sparse_weights is not None:
- is_weights_valid = math_ops.greater(sparse_weights.values, 0)
- sparse_ids = sparse_ops.sparse_retain(sparse_ids, is_weights_valid)
- sparse_weights = sparse_ops.sparse_retain(sparse_weights, is_weights_valid)
- return sparse_ids, sparse_weights
-
-
class _IndicatorColumn(_DenseColumn, _SequenceDenseColumn,
collections.namedtuple('_IndicatorColumn',
['categorical_column'])):
@@ -3419,10 +3264,14 @@ class _IndicatorColumn(_DenseColumn, _SequenceDenseColumn,
sp_ids=id_tensor,
sp_values=weight_tensor,
vocab_size=int(self._variable_shape[-1]))
- # Remove (?, -1) index
+ # Remove (?, -1) index.
weighted_column = sparse_ops.sparse_slice(weighted_column, [0, 0],
weighted_column.dense_shape)
- return sparse_ops.sparse_tensor_to_dense(weighted_column)
+ # Use scatter_nd to merge duplicated indices if existed,
+ # instead of sparse_tensor_to_dense.
+ return array_ops.scatter_nd(weighted_column.indices,
+ weighted_column.values,
+ weighted_column.dense_shape)
dense_id_tensor = sparse_ops.sparse_tensor_to_dense(
id_tensor, default_value=-1)
diff --git a/tensorflow/python/feature_column/feature_column_test.py b/tensorflow/python/feature_column/feature_column_test.py
index 511205451c..5bb47bfa47 100644
--- a/tensorflow/python/feature_column/feature_column_test.py
+++ b/tensorflow/python/feature_column/feature_column_test.py
@@ -4580,12 +4580,12 @@ class IndicatorColumnTest(test.TestCase):
weights = fc.weighted_categorical_column(ids, 'weights')
indicator = fc.indicator_column(weights)
features = {
- 'ids': constant_op.constant([['c', 'b', 'a']]),
- 'weights': constant_op.constant([[2., 4., 6.]])
+ 'ids': constant_op.constant([['c', 'b', 'a', 'c']]),
+ 'weights': constant_op.constant([[2., 4., 6., 1.]])
}
indicator_tensor = _transform_features(features, [indicator])[indicator]
with _initialized_session():
- self.assertAllEqual([[6., 4., 2.]], indicator_tensor.eval())
+ self.assertAllEqual([[6., 4., 3.]], indicator_tensor.eval())
def test_transform_with_missing_value_in_weighted_column(self):
# Github issue 12583
diff --git a/tensorflow/python/feature_column/feature_column_v2.py b/tensorflow/python/feature_column/feature_column_v2.py
new file mode 100644
index 0000000000..b4dd23f58d
--- /dev/null
+++ b/tensorflow/python/feature_column/feature_column_v2.py
@@ -0,0 +1,3600 @@
+# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""This API defines FeatureColumn abstraction.
+
+FeatureColumns provide a high level abstraction for ingesting and representing
+features. FeatureColumns are also the primary way of encoding features for
+canned @{tf.estimator.Estimator}s.
+
+When using FeatureColumns with `Estimators`, the type of feature column you
+should choose depends on (1) the feature type and (2) the model type.
+
+1. Feature type:
+
+ * Continuous features can be represented by `numeric_column`.
+ * Categorical features can be represented by any `categorical_column_with_*`
+ column:
+ - `categorical_column_with_vocabulary_list`
+ - `categorical_column_with_vocabulary_file`
+ - `categorical_column_with_hash_bucket`
+ - `categorical_column_with_identity`
+ - `weighted_categorical_column`
+
+2. Model type:
+
+ * Deep neural network models (`DNNClassifier`, `DNNRegressor`).
+
+ Continuous features can be directly fed into deep neural network models.
+
+ age_column = numeric_column("age")
+
+ To feed sparse features into DNN models, wrap the column with
+ `embedding_column` or `indicator_column`. `indicator_column` is recommended
+ for features with only a few possible values. For features with many
+ possible values, to reduce the size of your model, `embedding_column` is
+ recommended.
+
+ embedded_dept_column = embedding_column(
+ categorical_column_with_vocabulary_list(
+ "department", ["math", "philosophy", ...]), dimension=10)
+
+ * Wide (aka linear) models (`LinearClassifier`, `LinearRegressor`).
+
+ Sparse features can be fed directly into linear models. They behave like an
+ indicator column but with an efficient implementation.
+
+ dept_column = categorical_column_with_vocabulary_list("department",
+ ["math", "philosophy", "english"])
+
+ It is recommended that continuous features be bucketized before being
+ fed into linear models.
+
+ bucketized_age_column = bucketized_column(
+ source_column=age_column,
+ boundaries=[18, 25, 30, 35, 40, 45, 50, 55, 60, 65])
+
+ Sparse features can be crossed (also known as conjuncted or combined) in
+ order to form non-linearities, and then fed into linear models.
+
+ cross_dept_age_column = crossed_column(
+ columns=["department", bucketized_age_column],
+ hash_bucket_size=1000)
+
+Example of building canned `Estimator`s using FeatureColumns:
+
+ ```python
+ # Define features and transformations
+ deep_feature_columns = [age_column, embedded_dept_column]
+ wide_feature_columns = [dept_column, bucketized_age_column,
+ cross_dept_age_column]
+
+ # Build deep model
+ estimator = DNNClassifier(
+ feature_columns=deep_feature_columns,
+ hidden_units=[500, 250, 50])
+ estimator.train(...)
+
+ # Or build a wide model
+ estimator = LinearClassifier(
+ feature_columns=wide_feature_columns)
+ estimator.train(...)
+
+ # Or build a wide and deep model!
+ estimator = DNNLinearCombinedClassifier(
+ linear_feature_columns=wide_feature_columns,
+ dnn_feature_columns=deep_feature_columns,
+ dnn_hidden_units=[500, 250, 50])
+ estimator.train(...)
+ ```
+
+
+FeatureColumns can also be transformed into a generic input layer for
+custom models using `input_layer`.
+
+Example of building model using FeatureColumns, this can be used in a
+`model_fn` which is given to the {tf.estimator.Estimator}:
+
+ ```python
+ # Building model via layers
+
+ deep_feature_columns = [age_column, embedded_dept_column]
+ columns_to_tensor = parse_feature_columns_from_examples(
+ serialized=my_data,
+ feature_columns=deep_feature_columns)
+ first_layer = input_layer(
+ features=columns_to_tensor,
+ feature_columns=deep_feature_columns)
+ second_layer = fully_connected(first_layer, ...)
+ ```
+
+NOTE: Functions prefixed with "_" indicate experimental or private parts of
+the API subject to change, and should not be relied upon!
+"""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import abc
+import collections
+import math
+
+import numpy as np
+import six
+
+
+from tensorflow.python.eager import context
+from tensorflow.python.feature_column import feature_column as fc_old
+from tensorflow.python.framework import dtypes
+from tensorflow.python.framework import ops
+from tensorflow.python.framework import sparse_tensor as sparse_tensor_lib
+from tensorflow.python.framework import tensor_shape
+from tensorflow.python.keras.engine import training
+from tensorflow.python.layers import base
+from tensorflow.python.ops import array_ops
+from tensorflow.python.ops import check_ops
+from tensorflow.python.ops import control_flow_ops
+from tensorflow.python.ops import embedding_ops
+from tensorflow.python.ops import init_ops
+from tensorflow.python.ops import lookup_ops
+from tensorflow.python.ops import math_ops
+from tensorflow.python.ops import nn_ops
+from tensorflow.python.ops import parsing_ops
+from tensorflow.python.ops import resource_variable_ops
+from tensorflow.python.ops import sparse_ops
+from tensorflow.python.ops import string_ops
+from tensorflow.python.ops import template
+from tensorflow.python.ops import variable_scope
+from tensorflow.python.ops import variables
+from tensorflow.python.platform import gfile
+from tensorflow.python.platform import tf_logging as logging
+from tensorflow.python.training import checkpoint_utils
+from tensorflow.python.util import nest
+
+
+def _internal_input_layer(features,
+ feature_columns,
+ weight_collections=None,
+ trainable=True,
+ cols_to_vars=None,
+ scope=None):
+ """See input_layer. `scope` is a name or variable scope to use."""
+
+ feature_columns = fc_old._normalize_feature_columns(feature_columns) # pylint: disable=protected-access
+ for column in feature_columns:
+ if not isinstance(column, fc_old._DenseColumn): # pylint: disable=protected-access
+ raise ValueError(
+ 'Items of feature_columns must be a _DenseColumn. '
+ 'You can wrap a categorical column with an '
+ 'embedding_column or indicator_column. Given: {}'.format(column))
+ weight_collections = list(weight_collections or [])
+ if ops.GraphKeys.GLOBAL_VARIABLES not in weight_collections:
+ weight_collections.append(ops.GraphKeys.GLOBAL_VARIABLES)
+ if ops.GraphKeys.MODEL_VARIABLES not in weight_collections:
+ weight_collections.append(ops.GraphKeys.MODEL_VARIABLES)
+
+ # a non-None `scope` can allow for variable reuse, when, e.g., this function
+ # is wrapped by a `make_template`.
+ with variable_scope.variable_scope(
+ scope, default_name='input_layer', values=features.values()):
+ builder = fc_old._LazyBuilder(features) # pylint: disable=protected-access
+ output_tensors = []
+ ordered_columns = []
+ for column in sorted(feature_columns, key=lambda x: x.name):
+ ordered_columns.append(column)
+ with variable_scope.variable_scope(
+ None, default_name=column._var_scope_name): # pylint: disable=protected-access
+ tensor = column._get_dense_tensor( # pylint: disable=protected-access
+ builder,
+ weight_collections=weight_collections,
+ trainable=trainable)
+ num_elements = column._variable_shape.num_elements() # pylint: disable=protected-access
+ batch_size = array_ops.shape(tensor)[0]
+ output_tensors.append(
+ array_ops.reshape(tensor, shape=(batch_size, num_elements)))
+ if cols_to_vars is not None:
+ # Retrieve any variables created (some _DenseColumn's don't create
+ # variables, in which case an empty list is returned).
+ cols_to_vars[column] = ops.get_collection(
+ ops.GraphKeys.GLOBAL_VARIABLES,
+ scope=variable_scope.get_variable_scope().name)
+ _verify_static_batch_size_equality(output_tensors, ordered_columns)
+ return array_ops.concat(output_tensors, 1)
+
+
+def input_layer(features,
+ feature_columns,
+ weight_collections=None,
+ trainable=True,
+ cols_to_vars=None):
+ """Returns a dense `Tensor` as input layer based on given `feature_columns`.
+
+ Generally a single example in training data is described with FeatureColumns.
+ At the first layer of the model, this column oriented data should be converted
+ to a single `Tensor`.
+
+ Example:
+
+ ```python
+ price = numeric_column('price')
+ keywords_embedded = embedding_column(
+ categorical_column_with_hash_bucket("keywords", 10K), dimensions=16)
+ columns = [price, keywords_embedded, ...]
+ features = tf.parse_example(..., features=make_parse_example_spec(columns))
+ dense_tensor = input_layer(features, columns)
+ for units in [128, 64, 32]:
+ dense_tensor = tf.layers.dense(dense_tensor, units, tf.nn.relu)
+ prediction = tf.layers.dense(dense_tensor, 1)
+ ```
+
+ Args:
+ features: A mapping from key to tensors. `_FeatureColumn`s look up via these
+ keys. For example `numeric_column('price')` will look at 'price' key in
+ this dict. Values can be a `SparseTensor` or a `Tensor` depends on
+ corresponding `_FeatureColumn`.
+ feature_columns: An iterable containing the FeatureColumns to use as inputs
+ to your model. All items should be instances of classes derived from
+ `_DenseColumn` such as `numeric_column`, `embedding_column`,
+ `bucketized_column`, `indicator_column`. If you have categorical features,
+ you can wrap them with an `embedding_column` or `indicator_column`.
+ weight_collections: A list of collection names to which the Variable will be
+ added. Note that variables will also be added to collections
+ `tf.GraphKeys.GLOBAL_VARIABLES` and `ops.GraphKeys.MODEL_VARIABLES`.
+ trainable: If `True` also add the variable to the graph collection
+ `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
+ cols_to_vars: If not `None`, must be a dictionary that will be filled with a
+ mapping from `_FeatureColumn` to list of `Variable`s. For example, after
+ the call, we might have cols_to_vars =
+ {_EmbeddingColumn(
+ categorical_column=_HashedCategoricalColumn(
+ key='sparse_feature', hash_bucket_size=5, dtype=tf.string),
+ dimension=10): [<tf.Variable 'some_variable:0' shape=(5, 10),
+ <tf.Variable 'some_variable:1' shape=(5, 10)]}
+ If a column creates no variables, its value will be an empty list.
+
+ Returns:
+ A `Tensor` which represents input layer of a model. Its shape
+ is (batch_size, first_layer_dimension) and its dtype is `float32`.
+ first_layer_dimension is determined based on given `feature_columns`.
+
+ Raises:
+ ValueError: if an item in `feature_columns` is not a `_DenseColumn`.
+ """
+ return _internal_input_layer(features, feature_columns, weight_collections,
+ trainable, cols_to_vars)
+
+
+# TODO(akshayka): InputLayer should be a subclass of Layer, and it
+# should implement the logic in input_layer using Layer's build-and-call
+# paradigm; input_layer should create an instance of InputLayer and
+# return the result of invoking its apply method, just as functional layers do.
+class InputLayer(object):
+ """An object-oriented version of `input_layer` that reuses variables."""
+
+ def __init__(self,
+ feature_columns,
+ weight_collections=None,
+ trainable=True,
+ cols_to_vars=None):
+ """See `input_layer`."""
+
+ self._feature_columns = feature_columns
+ self._weight_collections = weight_collections
+ self._trainable = trainable
+ self._cols_to_vars = cols_to_vars
+ self._input_layer_template = template.make_template(
+ 'feature_column_input_layer',
+ _internal_input_layer,
+ create_scope_now_=True)
+ self._scope = self._input_layer_template.variable_scope
+
+ def __call__(self, features):
+ return self._input_layer_template(
+ features=features,
+ feature_columns=self._feature_columns,
+ weight_collections=self._weight_collections,
+ trainable=self._trainable,
+ cols_to_vars=None,
+ scope=self._scope)
+
+ @property
+ def non_trainable_variables(self):
+ return self._input_layer_template.non_trainable_variables
+
+ @property
+ def non_trainable_weights(self):
+ return self._input_layer_template.non_trainable_weights
+
+ @property
+ def trainable_variables(self):
+ return self._input_layer_template.trainable_variables
+
+ @property
+ def trainable_weights(self):
+ return self._input_layer_template.trainable_weights
+
+ @property
+ def variables(self):
+ return self._input_layer_template.variables
+
+ @property
+ def weights(self):
+ return self._input_layer_template.weights
+
+
+def linear_model(features,
+ feature_columns,
+ units=1,
+ sparse_combiner='sum',
+ weight_collections=None,
+ trainable=True,
+ cols_to_vars=None):
+ """Returns a linear prediction `Tensor` based on given `feature_columns`.
+
+ This function generates a weighted sum based on output dimension `units`.
+ Weighted sum refers to logits in classification problems. It refers to the
+ prediction itself for linear regression problems.
+
+ Note on supported columns: `linear_model` treats categorical columns as
+ `indicator_column`s. To be specific, assume the input as `SparseTensor` looks
+ like:
+
+ ```python
+ shape = [2, 2]
+ {
+ [0, 0]: "a"
+ [1, 0]: "b"
+ [1, 1]: "c"
+ }
+ ```
+ `linear_model` assigns weights for the presence of "a", "b", "c' implicitly,
+ just like `indicator_column`, while `input_layer` explicitly requires wrapping
+ each of categorical columns with an `embedding_column` or an
+ `indicator_column`.
+
+ Example of usage:
+
+ ```python
+ price = numeric_column('price')
+ price_buckets = bucketized_column(price, boundaries=[0., 10., 100., 1000.])
+ keywords = categorical_column_with_hash_bucket("keywords", 10K)
+ keywords_price = crossed_column('keywords', price_buckets, ...)
+ columns = [price_buckets, keywords, keywords_price ...]
+ features = tf.parse_example(..., features=make_parse_example_spec(columns))
+ prediction = linear_model(features, columns)
+ ```
+
+ Args:
+ features: A mapping from key to tensors. `_FeatureColumn`s look up via these
+ keys. For example `numeric_column('price')` will look at 'price' key in
+ this dict. Values are `Tensor` or `SparseTensor` depending on
+ corresponding `_FeatureColumn`.
+ feature_columns: An iterable containing the FeatureColumns to use as inputs
+ to your model. All items should be instances of classes derived from
+ `_FeatureColumn`s.
+ units: An integer, dimensionality of the output space. Default value is 1.
+ sparse_combiner: A string specifying how to reduce if a categorical column
+ is multivalent. Except `numeric_column`, almost all columns passed to
+ `linear_model` are considered as categorical columns. It combines each
+ categorical column independently. Currently "mean", "sqrtn" and "sum" are
+ supported, with "sum" the default for linear model. "sqrtn" often achieves
+ good accuracy, in particular with bag-of-words columns.
+ * "sum": do not normalize features in the column
+ * "mean": do l1 normalization on features in the column
+ * "sqrtn": do l2 normalization on features in the column
+ For example, for two features represented as the categorical columns:
+
+ ```python
+ # Feature 1
+
+ shape = [2, 2]
+ {
+ [0, 0]: "a"
+ [0, 1]: "b"
+ [1, 0]: "c"
+ }
+
+ # Feature 2
+
+ shape = [2, 3]
+ {
+ [0, 0]: "d"
+ [1, 0]: "e"
+ [1, 1]: "f"
+ [1, 2]: "g"
+ }
+ ```
+ with `sparse_combiner` as "mean", the linear model outputs conceptly are:
+ ```
+ y_0 = 1.0 / 2.0 * ( w_a + w_ b) + w_c + b_0
+ y_1 = w_d + 1.0 / 3.0 * ( w_e + w_ f + w_g) + b_1
+ ```
+ where `y_i` is the output, `b_i` is the bias, and `w_x` is the weight
+ assigned to the presence of `x` in the input features.
+ weight_collections: A list of collection names to which the Variable will be
+ added. Note that, variables will also be added to collections
+ `tf.GraphKeys.GLOBAL_VARIABLES` and `ops.GraphKeys.MODEL_VARIABLES`.
+ trainable: If `True` also add the variable to the graph collection
+ `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
+ cols_to_vars: If not `None`, must be a dictionary that will be filled with a
+ mapping from `_FeatureColumn` to associated list of `Variable`s. For
+ example, after the call, we might have cols_to_vars = {
+ _NumericColumn(
+ key='numeric_feature1', shape=(1,):
+ [<tf.Variable 'linear_model/price2/weights:0' shape=(1, 1)>],
+ 'bias': [<tf.Variable 'linear_model/bias_weights:0' shape=(1,)>],
+ _NumericColumn(
+ key='numeric_feature2', shape=(2,)):
+ [<tf.Variable 'linear_model/price1/weights:0' shape=(2, 1)>]}
+ If a column creates no variables, its value will be an empty list. Note
+ that cols_to_vars will also contain a string key 'bias' that maps to a
+ list of Variables.
+
+ Returns:
+ A `Tensor` which represents predictions/logits of a linear model. Its shape
+ is (batch_size, units) and its dtype is `float32`.
+
+ Raises:
+ ValueError: if an item in `feature_columns` is neither a `_DenseColumn`
+ nor `_CategoricalColumn`.
+ """
+ with variable_scope.variable_scope(None, 'linear_model') as vs:
+ model_name = _strip_leading_slashes(vs.name)
+ linear_model_layer = _LinearModel(
+ feature_columns=feature_columns,
+ units=units,
+ sparse_combiner=sparse_combiner,
+ weight_collections=weight_collections,
+ trainable=trainable,
+ name=model_name)
+ retval = linear_model_layer(features) # pylint: disable=not-callable
+ if cols_to_vars is not None:
+ cols_to_vars.update(linear_model_layer.cols_to_vars())
+ return retval
+
+
+def _add_to_collections(var, weight_collections):
+ """Adds a var to the list of weight_collections provided.
+
+ Handles the case for partitioned and non-partitioned variables.
+
+ Args:
+ var: A variable or Partitioned Variable.
+ weight_collections: List of collections to add variable to.
+ """
+ for weight_collection in weight_collections:
+ # The layer self.add_variable call already adds it to GLOBAL_VARIABLES.
+ if weight_collection == ops.GraphKeys.GLOBAL_VARIABLES:
+ continue
+ # TODO(rohanj): Explore adding a _get_variable_list method on `Variable`
+ # so that we don't have to do this check.
+ if isinstance(var, variables.PartitionedVariable):
+ for constituent_var in list(var):
+ ops.add_to_collection(weight_collection, constituent_var)
+ else:
+ ops.add_to_collection(weight_collection, var)
+
+
+class _FCLinearWrapper(base.Layer):
+ """Wraps a _FeatureColumn in a layer for use in a linear model.
+
+ See `linear_model` above.
+ """
+
+ def __init__(self,
+ feature_column,
+ units=1,
+ sparse_combiner='sum',
+ weight_collections=None,
+ trainable=True,
+ name=None,
+ **kwargs):
+ super(_FCLinearWrapper, self).__init__(
+ trainable=trainable, name=name, **kwargs)
+ self._feature_column = feature_column
+ self._units = units
+ self._sparse_combiner = sparse_combiner
+ self._weight_collections = weight_collections
+
+ def build(self, _):
+ if isinstance(self._feature_column, fc_old._CategoricalColumn): # pylint: disable=protected-access
+ weight = self.add_variable(
+ name='weights',
+ shape=(self._feature_column._num_buckets, self._units), # pylint: disable=protected-access
+ initializer=init_ops.zeros_initializer(),
+ trainable=self.trainable)
+ else:
+ num_elements = self._feature_column._variable_shape.num_elements() # pylint: disable=protected-access
+ weight = self.add_variable(
+ name='weights',
+ shape=[num_elements, self._units],
+ initializer=init_ops.zeros_initializer(),
+ trainable=self.trainable)
+ _add_to_collections(weight, self._weight_collections)
+ self._weight_var = weight
+ self.built = True
+
+ def call(self, builder):
+ weighted_sum = fc_old._create_weighted_sum( # pylint: disable=protected-access
+ column=self._feature_column,
+ builder=builder,
+ units=self._units,
+ sparse_combiner=self._sparse_combiner,
+ weight_collections=self._weight_collections,
+ trainable=self.trainable,
+ weight_var=self._weight_var)
+ return weighted_sum
+
+
+class _BiasLayer(base.Layer):
+ """A layer for the bias term.
+ """
+
+ def __init__(self,
+ units=1,
+ trainable=True,
+ weight_collections=None,
+ name=None,
+ **kwargs):
+ super(_BiasLayer, self).__init__(trainable=trainable, name=name, **kwargs)
+ self._units = units
+ self._weight_collections = weight_collections
+
+ def build(self, _):
+ self._bias_variable = self.add_variable(
+ 'bias_weights',
+ shape=[self._units],
+ initializer=init_ops.zeros_initializer(),
+ trainable=self.trainable)
+ _add_to_collections(self._bias_variable, self._weight_collections)
+ self.built = True
+
+ def call(self, _):
+ return self._bias_variable
+
+
+def _get_expanded_variable_list(variable):
+ if (isinstance(variable, variables.Variable) or
+ resource_variable_ops.is_resource_variable(variable)):
+ return [variable] # Single variable case.
+ else: # Must be a PartitionedVariable, so convert into a list.
+ return list(variable)
+
+
+def _strip_leading_slashes(name):
+ return name.rsplit('/', 1)[-1]
+
+
+class _LinearModel(training.Model):
+ """Creates a linear model using feature columns.
+
+ See `linear_model` for details.
+ """
+
+ def __init__(self,
+ feature_columns,
+ units=1,
+ sparse_combiner='sum',
+ weight_collections=None,
+ trainable=True,
+ name=None,
+ **kwargs):
+ super(_LinearModel, self).__init__(name=name, **kwargs)
+ self._feature_columns = fc_old._normalize_feature_columns( # pylint: disable=protected-access
+ feature_columns)
+ self._weight_collections = list(weight_collections or [])
+ if ops.GraphKeys.GLOBAL_VARIABLES not in self._weight_collections:
+ self._weight_collections.append(ops.GraphKeys.GLOBAL_VARIABLES)
+ if ops.GraphKeys.MODEL_VARIABLES not in self._weight_collections:
+ self._weight_collections.append(ops.GraphKeys.MODEL_VARIABLES)
+
+ column_layers = {}
+ for column in sorted(self._feature_columns, key=lambda x: x.name):
+ with variable_scope.variable_scope(
+ None, default_name=column._var_scope_name) as vs: # pylint: disable=protected-access
+ # Having the fully expressed variable scope name ends up doubly
+ # expressing the outer scope (scope with which this method was called)
+ # in the name of the variable that would get created.
+ column_name = _strip_leading_slashes(vs.name)
+ column_layer = _FCLinearWrapper(column, units, sparse_combiner,
+ self._weight_collections, trainable,
+ column_name, **kwargs)
+ column_layers[column_name] = column_layer
+ self._column_layers = self._add_layers(column_layers)
+ self._bias_layer = _BiasLayer(
+ units=units,
+ trainable=trainable,
+ weight_collections=self._weight_collections,
+ name='bias_layer',
+ **kwargs)
+ self._cols_to_vars = {}
+
+ def cols_to_vars(self):
+ """Returns a dict mapping _FeatureColumns to variables.
+
+ See `linear_model` for more information.
+ This is not populated till `call` is called i.e. layer is built.
+ """
+ return self._cols_to_vars
+
+ def call(self, features):
+ with variable_scope.variable_scope(self.name):
+ for column in self._feature_columns:
+ if not isinstance(
+ column,
+ (
+ fc_old._DenseColumn, # pylint: disable=protected-access
+ fc_old._CategoricalColumn)): # pylint: disable=protected-access
+ raise ValueError(
+ 'Items of feature_columns must be either a '
+ '_DenseColumn or _CategoricalColumn. Given: {}'.format(column))
+ weighted_sums = []
+ ordered_columns = []
+ builder = fc_old._LazyBuilder(features) # pylint: disable=protected-access
+ for layer in sorted(self._column_layers.values(), key=lambda x: x.name):
+ column = layer._feature_column # pylint: disable=protected-access
+ ordered_columns.append(column)
+ weighted_sum = layer(builder)
+ weighted_sums.append(weighted_sum)
+ self._cols_to_vars[column] = ops.get_collection(
+ ops.GraphKeys.GLOBAL_VARIABLES, scope=layer.scope_name)
+
+ _verify_static_batch_size_equality(weighted_sums, ordered_columns)
+ predictions_no_bias = math_ops.add_n(
+ weighted_sums, name='weighted_sum_no_bias')
+ predictions = nn_ops.bias_add(
+ predictions_no_bias,
+ self._bias_layer( # pylint: disable=not-callable
+ builder,
+ scope=variable_scope.get_variable_scope()), # pylint: disable=not-callable
+ name='weighted_sum')
+ bias = self._bias_layer.variables[0]
+ self._cols_to_vars['bias'] = _get_expanded_variable_list(bias)
+ return predictions
+
+ def _add_layers(self, layers):
+ # "Magic" required for keras.Model classes to track all the variables in
+ # a list of layers.Layer objects.
+ # TODO(ashankar): Figure out API so user code doesn't have to do this.
+ for name, layer in layers.items():
+ setattr(self, 'layer-%s' % name, layer)
+ return layers
+
+
+def _transform_features(features, feature_columns, state_manager):
+ """Returns transformed features based on features columns passed in.
+
+ Please note that most probably you would not need to use this function. Please
+ check `input_layer` and `linear_model` to see whether they will
+ satisfy your use case or not.
+
+ Example:
+
+ ```python
+ # Define features and transformations
+ crosses_a_x_b = crossed_column(
+ columns=["sparse_feature_a", "sparse_feature_b"], hash_bucket_size=10000)
+ price_buckets = bucketized_column(
+ source_column=numeric_column("price"), boundaries=[...])
+
+ columns = [crosses_a_x_b, price_buckets]
+ features = tf.parse_example(..., features=make_parse_example_spec(columns))
+ transformed = transform_features(features=features, feature_columns=columns)
+
+ assertCountEqual(columns, transformed.keys())
+ ```
+
+ Args:
+ features: A mapping from key to tensors. `FeatureColumn`s look up via these
+ keys. For example `numeric_column('price')` will look at 'price' key in
+ this dict. Values can be a `SparseTensor` or a `Tensor` depends on
+ corresponding `FeatureColumn`.
+ feature_columns: An iterable containing all the `FeatureColumn`s.
+ state_manager: A StateManager object that holds the FeatureColumn state.
+
+ Returns:
+ A `dict` mapping `FeatureColumn` to `Tensor` and `SparseTensor` values.
+ """
+ feature_columns = _normalize_feature_columns(feature_columns)
+ outputs = {}
+ with ops.name_scope(
+ None, default_name='transform_features', values=features.values()):
+ transformation_cache = FeatureTransformationCache(features)
+ for column in sorted(feature_columns, key=lambda x: x.name):
+ with ops.name_scope(None, default_name=column.name):
+ outputs[column] = transformation_cache.get(column, state_manager)
+ return outputs
+
+
+def make_parse_example_spec(feature_columns):
+ """Creates parsing spec dictionary from input feature_columns.
+
+ The returned dictionary can be used as arg 'features' in `tf.parse_example`.
+
+ Typical usage example:
+
+ ```python
+ # Define features and transformations
+ feature_a = categorical_column_with_vocabulary_file(...)
+ feature_b = numeric_column(...)
+ feature_c_bucketized = bucketized_column(numeric_column("feature_c"), ...)
+ feature_a_x_feature_c = crossed_column(
+ columns=["feature_a", feature_c_bucketized], ...)
+
+ feature_columns = set(
+ [feature_b, feature_c_bucketized, feature_a_x_feature_c])
+ features = tf.parse_example(
+ serialized=serialized_examples,
+ features=make_parse_example_spec(feature_columns))
+ ```
+
+ For the above example, make_parse_example_spec would return the dict:
+
+ ```python
+ {
+ "feature_a": parsing_ops.VarLenFeature(tf.string),
+ "feature_b": parsing_ops.FixedLenFeature([1], dtype=tf.float32),
+ "feature_c": parsing_ops.FixedLenFeature([1], dtype=tf.float32)
+ }
+ ```
+
+ Args:
+ feature_columns: An iterable containing all feature columns. All items
+ should be instances of classes derived from `FeatureColumn`.
+
+ Returns:
+ A dict mapping each feature key to a `FixedLenFeature` or `VarLenFeature`
+ value.
+
+ Raises:
+ ValueError: If any of the given `feature_columns` is not a `FeatureColumn`
+ instance.
+ """
+ result = {}
+ for column in feature_columns:
+ if not isinstance(column, FeatureColumn):
+ raise ValueError('All feature_columns must be FeatureColumn instances. '
+ 'Given: {}'.format(column))
+ config = column.parse_example_spec
+ for key, value in six.iteritems(config):
+ if key in result and value != result[key]:
+ raise ValueError(
+ 'feature_columns contain different parse_spec for key '
+ '{}. Given {} and {}'.format(key, value, result[key]))
+ result.update(config)
+ return result
+
+
+def embedding_column(
+ categorical_column, dimension, combiner='mean', initializer=None,
+ ckpt_to_load_from=None, tensor_name_in_ckpt=None, max_norm=None,
+ trainable=True):
+ """`_DenseColumn` that converts from sparse, categorical input.
+
+ Use this when your inputs are sparse, but you want to convert them to a dense
+ representation (e.g., to feed to a DNN).
+
+ Inputs must be a `_CategoricalColumn` created by any of the
+ `categorical_column_*` function. Here is an example of using
+ `embedding_column` with `DNNClassifier`:
+
+ ```python
+ video_id = categorical_column_with_identity(
+ key='video_id', num_buckets=1000000, default_value=0)
+ columns = [embedding_column(video_id, 9),...]
+
+ estimator = tf.estimator.DNNClassifier(feature_columns=columns, ...)
+
+ label_column = ...
+ def input_fn():
+ features = tf.parse_example(
+ ..., features=make_parse_example_spec(columns + [label_column]))
+ labels = features.pop(label_column.name)
+ return features, labels
+
+ estimator.train(input_fn=input_fn, steps=100)
+ ```
+
+ Here is an example using `embedding_column` with model_fn:
+
+ ```python
+ def model_fn(features, ...):
+ video_id = categorical_column_with_identity(
+ key='video_id', num_buckets=1000000, default_value=0)
+ columns = [embedding_column(video_id, 9),...]
+ dense_tensor = input_layer(features, columns)
+ # Form DNN layers, calculate loss, and return EstimatorSpec.
+ ...
+ ```
+
+ Args:
+ categorical_column: A `_CategoricalColumn` created by a
+ `categorical_column_with_*` function. This column produces the sparse IDs
+ that are inputs to the embedding lookup.
+ dimension: An integer specifying dimension of the embedding, must be > 0.
+ combiner: A string specifying how to reduce if there are multiple entries
+ in a single row. Currently 'mean', 'sqrtn' and 'sum' are supported, with
+ 'mean' the default. 'sqrtn' often achieves good accuracy, in particular
+ with bag-of-words columns. Each of this can be thought as example level
+ normalizations on the column. For more information, see
+ `tf.embedding_lookup_sparse`.
+ initializer: A variable initializer function to be used in embedding
+ variable initialization. If not specified, defaults to
+ `tf.truncated_normal_initializer` with mean `0.0` and standard deviation
+ `1/sqrt(dimension)`.
+ ckpt_to_load_from: String representing checkpoint name/pattern from which to
+ restore column weights. Required if `tensor_name_in_ckpt` is not `None`.
+ tensor_name_in_ckpt: Name of the `Tensor` in `ckpt_to_load_from` from
+ which to restore the column weights. Required if `ckpt_to_load_from` is
+ not `None`.
+ max_norm: If not `None`, embedding values are l2-normalized to this value.
+ trainable: Whether or not the embedding is trainable. Default is True.
+
+ Returns:
+ `_DenseColumn` that converts from sparse input.
+
+ Raises:
+ ValueError: if `dimension` not > 0.
+ ValueError: if exactly one of `ckpt_to_load_from` and `tensor_name_in_ckpt`
+ is specified.
+ ValueError: if `initializer` is specified and is not callable.
+ RuntimeError: If eager execution is enabled.
+ """
+ if (dimension is None) or (dimension < 1):
+ raise ValueError('Invalid dimension {}.'.format(dimension))
+ if (ckpt_to_load_from is None) != (tensor_name_in_ckpt is None):
+ raise ValueError('Must specify both `ckpt_to_load_from` and '
+ '`tensor_name_in_ckpt` or none of them.')
+
+ if (initializer is not None) and (not callable(initializer)):
+ raise ValueError('initializer must be callable if specified. '
+ 'Embedding of column_name: {}'.format(
+ categorical_column.name))
+ if initializer is None:
+ initializer = init_ops.truncated_normal_initializer(
+ mean=0.0, stddev=1 / math.sqrt(dimension))
+
+ return EmbeddingColumn(
+ categorical_column=categorical_column,
+ dimension=dimension,
+ combiner=combiner,
+ initializer=initializer,
+ ckpt_to_load_from=ckpt_to_load_from,
+ tensor_name_in_ckpt=tensor_name_in_ckpt,
+ max_norm=max_norm,
+ trainable=trainable)
+
+
+def shared_embedding_columns(
+ categorical_columns, dimension, combiner='mean', initializer=None,
+ shared_embedding_collection_name=None, ckpt_to_load_from=None,
+ tensor_name_in_ckpt=None, max_norm=None, trainable=True):
+ """List of dense columns that convert from sparse, categorical input.
+
+ This is similar to `embedding_column`, except that it produces a list of
+ embedding columns that share the same embedding weights.
+
+ Use this when your inputs are sparse and of the same type (e.g. watched and
+ impression video IDs that share the same vocabulary), and you want to convert
+ them to a dense representation (e.g., to feed to a DNN).
+
+ Inputs must be a list of categorical columns created by any of the
+ `categorical_column_*` function. They must all be of the same type and have
+ the same arguments except `key`. E.g. they can be
+ categorical_column_with_vocabulary_file with the same vocabulary_file. Some or
+ all columns could also be weighted_categorical_column.
+
+ Here is an example embedding of two features for a DNNClassifier model:
+
+ ```python
+ watched_video_id = categorical_column_with_vocabulary_file(
+ 'watched_video_id', video_vocabulary_file, video_vocabulary_size)
+ impression_video_id = categorical_column_with_vocabulary_file(
+ 'impression_video_id', video_vocabulary_file, video_vocabulary_size)
+ columns = shared_embedding_columns(
+ [watched_video_id, impression_video_id], dimension=10)
+
+ estimator = tf.estimator.DNNClassifier(feature_columns=columns, ...)
+
+ label_column = ...
+ def input_fn():
+ features = tf.parse_example(
+ ..., features=make_parse_example_spec(columns + [label_column]))
+ labels = features.pop(label_column.name)
+ return features, labels
+
+ estimator.train(input_fn=input_fn, steps=100)
+ ```
+
+ Here is an example using `shared_embedding_columns` with model_fn:
+
+ ```python
+ def model_fn(features, ...):
+ watched_video_id = categorical_column_with_vocabulary_file(
+ 'watched_video_id', video_vocabulary_file, video_vocabulary_size)
+ impression_video_id = categorical_column_with_vocabulary_file(
+ 'impression_video_id', video_vocabulary_file, video_vocabulary_size)
+ columns = shared_embedding_columns(
+ [watched_video_id, impression_video_id], dimension=10)
+ dense_tensor = input_layer(features, columns)
+ # Form DNN layers, calculate loss, and return EstimatorSpec.
+ ...
+ ```
+
+ Args:
+ categorical_columns: List of categorical columns created by a
+ `categorical_column_with_*` function. These columns produce the sparse IDs
+ that are inputs to the embedding lookup. All columns must be of the same
+ type and have the same arguments except `key`. E.g. they can be
+ categorical_column_with_vocabulary_file with the same vocabulary_file.
+ Some or all columns could also be weighted_categorical_column.
+ dimension: An integer specifying dimension of the embedding, must be > 0.
+ combiner: A string specifying how to reduce if there are multiple entries
+ in a single row. Currently 'mean', 'sqrtn' and 'sum' are supported, with
+ 'mean' the default. 'sqrtn' often achieves good accuracy, in particular
+ with bag-of-words columns. Each of this can be thought as example level
+ normalizations on the column. For more information, see
+ `tf.embedding_lookup_sparse`.
+ initializer: A variable initializer function to be used in embedding
+ variable initialization. If not specified, defaults to
+ `tf.truncated_normal_initializer` with mean `0.0` and standard deviation
+ `1/sqrt(dimension)`.
+ shared_embedding_collection_name: Optional collective name of these columns.
+ If not given, a reasonable name will be chosen based on the names of
+ `categorical_columns`.
+ ckpt_to_load_from: String representing checkpoint name/pattern from which to
+ restore column weights. Required if `tensor_name_in_ckpt` is not `None`.
+ tensor_name_in_ckpt: Name of the `Tensor` in `ckpt_to_load_from` from
+ which to restore the column weights. Required if `ckpt_to_load_from` is
+ not `None`.
+ max_norm: If not `None`, each embedding is clipped if its l2-norm is
+ larger than this value, before combining.
+ trainable: Whether or not the embedding is trainable. Default is True.
+
+ Returns:
+ A list of dense columns that converts from sparse input. The order of
+ results follows the ordering of `categorical_columns`.
+
+ Raises:
+ ValueError: if `dimension` not > 0.
+ ValueError: if any of the given `categorical_columns` is of different type
+ or has different arguments than the others.
+ ValueError: if exactly one of `ckpt_to_load_from` and `tensor_name_in_ckpt`
+ is specified.
+ ValueError: if `initializer` is specified and is not callable.
+ RuntimeError: if eager execution is enabled.
+ """
+ if context.executing_eagerly():
+ raise RuntimeError('shared_embedding_columns are not supported when eager '
+ 'execution is enabled.')
+
+ if (dimension is None) or (dimension < 1):
+ raise ValueError('Invalid dimension {}.'.format(dimension))
+ if (ckpt_to_load_from is None) != (tensor_name_in_ckpt is None):
+ raise ValueError('Must specify both `ckpt_to_load_from` and '
+ '`tensor_name_in_ckpt` or none of them.')
+
+ if (initializer is not None) and (not callable(initializer)):
+ raise ValueError('initializer must be callable if specified.')
+ if initializer is None:
+ initializer = init_ops.truncated_normal_initializer(
+ mean=0.0, stddev=1. / math.sqrt(dimension))
+
+ # Sort the columns so the default collection name is deterministic even if the
+ # user passes columns from an unsorted collection, such as dict.values().
+ sorted_columns = sorted(categorical_columns, key=lambda x: x.name)
+
+ c0 = sorted_columns[0]
+ num_buckets = c0.num_buckets
+ if not isinstance(c0, CategoricalColumn):
+ raise ValueError(
+ 'All categorical_columns must be subclasses of CategoricalColumn. '
+ 'Given: {}, of type: {}'.format(c0, type(c0)))
+ if isinstance(c0, WeightedCategoricalColumn):
+ c0 = c0.categorical_column
+ for c in sorted_columns[1:]:
+ if isinstance(c, WeightedCategoricalColumn):
+ c = c.categorical_column
+ if not isinstance(c, type(c0)):
+ raise ValueError(
+ 'To use shared_embedding_column, all categorical_columns must have '
+ 'the same type, or be weighted_categorical_column of the same type. '
+ 'Given column: {} of type: {} does not match given column: {} of '
+ 'type: {}'.format(c0, type(c0), c, type(c)))
+ if num_buckets != c.num_buckets:
+ raise ValueError(
+ 'To use shared_embedding_column, all categorical_columns must have '
+ 'the same number of buckets. Given column: {} with buckets: {} does '
+ 'not match column: {} with buckets: {}'.format(
+ c0, num_buckets, c, c.num_buckets))
+
+ if not shared_embedding_collection_name:
+ shared_embedding_collection_name = '_'.join(c.name for c in sorted_columns)
+ shared_embedding_collection_name += '_shared_embedding'
+
+ result = []
+ for column in categorical_columns:
+ result.append(
+ SharedEmbeddingColumn(
+ categorical_column=column,
+ initializer=initializer,
+ dimension=dimension,
+ combiner=combiner,
+ shared_embedding_collection_name=shared_embedding_collection_name,
+ ckpt_to_load_from=ckpt_to_load_from,
+ tensor_name_in_ckpt=tensor_name_in_ckpt,
+ max_norm=max_norm,
+ trainable=trainable))
+
+ return result
+
+
+def numeric_column(key,
+ shape=(1,),
+ default_value=None,
+ dtype=dtypes.float32,
+ normalizer_fn=None):
+ """Represents real valued or numerical features.
+
+ Example:
+
+ ```python
+ price = numeric_column('price')
+ columns = [price, ...]
+ features = tf.parse_example(..., features=make_parse_example_spec(columns))
+ dense_tensor = input_layer(features, columns)
+
+ # or
+ bucketized_price = bucketized_column(price, boundaries=[...])
+ columns = [bucketized_price, ...]
+ features = tf.parse_example(..., features=make_parse_example_spec(columns))
+ linear_prediction = linear_model(features, columns)
+ ```
+
+ Args:
+ key: A unique string identifying the input feature. It is used as the
+ column name and the dictionary key for feature parsing configs, feature
+ `Tensor` objects, and feature columns.
+ shape: An iterable of integers specifies the shape of the `Tensor`. An
+ integer can be given which means a single dimension `Tensor` with given
+ width. The `Tensor` representing the column will have the shape of
+ [batch_size] + `shape`.
+ default_value: A single value compatible with `dtype` or an iterable of
+ values compatible with `dtype` which the column takes on during
+ `tf.Example` parsing if data is missing. A default value of `None` will
+ cause `tf.parse_example` to fail if an example does not contain this
+ column. If a single value is provided, the same value will be applied as
+ the default value for every item. If an iterable of values is provided,
+ the shape of the `default_value` should be equal to the given `shape`.
+ dtype: defines the type of values. Default value is `tf.float32`. Must be a
+ non-quantized, real integer or floating point type.
+ normalizer_fn: If not `None`, a function that can be used to normalize the
+ value of the tensor after `default_value` is applied for parsing.
+ Normalizer function takes the input `Tensor` as its argument, and returns
+ the output `Tensor`. (e.g. lambda x: (x - 3.0) / 4.2). Please note that
+ even though the most common use case of this function is normalization, it
+ can be used for any kind of Tensorflow transformations.
+
+ Returns:
+ A `NumericColumn`.
+
+ Raises:
+ TypeError: if any dimension in shape is not an int
+ ValueError: if any dimension in shape is not a positive integer
+ TypeError: if `default_value` is an iterable but not compatible with `shape`
+ TypeError: if `default_value` is not compatible with `dtype`.
+ ValueError: if `dtype` is not convertible to `tf.float32`.
+ """
+ shape = _check_shape(shape, key)
+ if not (dtype.is_integer or dtype.is_floating):
+ raise ValueError('dtype must be convertible to float. '
+ 'dtype: {}, key: {}'.format(dtype, key))
+ default_value = _check_default_value(shape, default_value, dtype, key)
+
+ if normalizer_fn is not None and not callable(normalizer_fn):
+ raise TypeError(
+ 'normalizer_fn must be a callable. Given: {}'.format(normalizer_fn))
+
+ _assert_key_is_string(key)
+ return NumericColumn(
+ key,
+ shape=shape,
+ default_value=default_value,
+ dtype=dtype,
+ normalizer_fn=normalizer_fn)
+
+
+def bucketized_column(source_column, boundaries):
+ """Represents discretized dense input.
+
+ Buckets include the left boundary, and exclude the right boundary. Namely,
+ `boundaries=[0., 1., 2.]` generates buckets `(-inf, 0.)`, `[0., 1.)`,
+ `[1., 2.)`, and `[2., +inf)`.
+
+ For example, if the inputs are
+
+ ```python
+ boundaries = [0, 10, 100]
+ input tensor = [[-5, 10000]
+ [150, 10]
+ [5, 100]]
+ ```
+
+ then the output will be
+
+ ```python
+ output = [[0, 3]
+ [3, 2]
+ [1, 3]]
+ ```
+
+ Example:
+
+ ```python
+ price = numeric_column('price')
+ bucketized_price = bucketized_column(price, boundaries=[...])
+ columns = [bucketized_price, ...]
+ features = tf.parse_example(..., features=make_parse_example_spec(columns))
+ linear_prediction = linear_model(features, columns)
+
+ # or
+ columns = [bucketized_price, ...]
+ features = tf.parse_example(..., features=make_parse_example_spec(columns))
+ dense_tensor = input_layer(features, columns)
+ ```
+
+ `bucketized_column` can also be crossed with another categorical column using
+ `crossed_column`:
+
+ ```python
+ price = numeric_column('price')
+ # bucketized_column converts numerical feature to a categorical one.
+ bucketized_price = bucketized_column(price, boundaries=[...])
+ # 'keywords' is a string feature.
+ price_x_keywords = crossed_column([bucketized_price, 'keywords'], 50K)
+ columns = [price_x_keywords, ...]
+ features = tf.parse_example(..., features=make_parse_example_spec(columns))
+ linear_prediction = linear_model(features, columns)
+ ```
+
+ Args:
+ source_column: A one-dimensional dense column which is generated with
+ `numeric_column`.
+ boundaries: A sorted list or tuple of floats specifying the boundaries.
+
+ Returns:
+ A `BucketizedColumn`.
+
+ Raises:
+ ValueError: If `source_column` is not a numeric column, or if it is not
+ one-dimensional.
+ ValueError: If `boundaries` is not a sorted list or tuple.
+ """
+ if not isinstance(source_column, NumericColumn):
+ raise ValueError(
+ 'source_column must be a column generated with numeric_column(). '
+ 'Given: {}'.format(source_column))
+ if len(source_column.shape) > 1:
+ raise ValueError(
+ 'source_column must be one-dimensional column. '
+ 'Given: {}'.format(source_column))
+ if (not boundaries or
+ not (isinstance(boundaries, list) or isinstance(boundaries, tuple))):
+ raise ValueError('boundaries must be a sorted list.')
+ for i in range(len(boundaries) - 1):
+ if boundaries[i] >= boundaries[i + 1]:
+ raise ValueError('boundaries must be a sorted list.')
+ return BucketizedColumn(source_column, tuple(boundaries))
+
+
+def _assert_string_or_int(dtype, prefix):
+ if (dtype != dtypes.string) and (not dtype.is_integer):
+ raise ValueError(
+ '{} dtype must be string or integer. dtype: {}.'.format(prefix, dtype))
+
+
+def _assert_key_is_string(key):
+ if not isinstance(key, six.string_types):
+ raise ValueError(
+ 'key must be a string. Got: type {}. Given key: {}.'.format(
+ type(key), key))
+
+
+def categorical_column_with_hash_bucket(key,
+ hash_bucket_size,
+ dtype=dtypes.string):
+ """Represents sparse feature where ids are set by hashing.
+
+ Use this when your sparse features are in string or integer format, and you
+ want to distribute your inputs into a finite number of buckets by hashing.
+ output_id = Hash(input_feature_string) % bucket_size for string type input.
+ For int type input, the value is converted to its string representation first
+ and then hashed by the same formula.
+
+ For input dictionary `features`, `features[key]` is either `Tensor` or
+ `SparseTensor`. If `Tensor`, missing values can be represented by `-1` for int
+ and `''` for string, which will be dropped by this feature column.
+
+ Example:
+
+ ```python
+ keywords = categorical_column_with_hash_bucket("keywords", 10K)
+ columns = [keywords, ...]
+ features = tf.parse_example(..., features=make_parse_example_spec(columns))
+ linear_prediction = linear_model(features, columns)
+
+ # or
+ keywords_embedded = embedding_column(keywords, 16)
+ columns = [keywords_embedded, ...]
+ features = tf.parse_example(..., features=make_parse_example_spec(columns))
+ dense_tensor = input_layer(features, columns)
+ ```
+
+ Args:
+ key: A unique string identifying the input feature. It is used as the
+ column name and the dictionary key for feature parsing configs, feature
+ `Tensor` objects, and feature columns.
+ hash_bucket_size: An int > 1. The number of buckets.
+ dtype: The type of features. Only string and integer types are supported.
+
+ Returns:
+ A `HashedCategoricalColumn`.
+
+ Raises:
+ ValueError: `hash_bucket_size` is not greater than 1.
+ ValueError: `dtype` is neither string nor integer.
+ """
+ if hash_bucket_size is None:
+ raise ValueError('hash_bucket_size must be set. ' 'key: {}'.format(key))
+
+ if hash_bucket_size < 1:
+ raise ValueError('hash_bucket_size must be at least 1. '
+ 'hash_bucket_size: {}, key: {}'.format(
+ hash_bucket_size, key))
+
+ _assert_key_is_string(key)
+ _assert_string_or_int(dtype, prefix='column_name: {}'.format(key))
+
+ return HashedCategoricalColumn(key, hash_bucket_size, dtype)
+
+
+def categorical_column_with_vocabulary_file(key,
+ vocabulary_file,
+ vocabulary_size=None,
+ num_oov_buckets=0,
+ default_value=None,
+ dtype=dtypes.string):
+ """A `CategoricalColumn` with a vocabulary file.
+
+ Use this when your inputs are in string or integer format, and you have a
+ vocabulary file that maps each value to an integer ID. By default,
+ out-of-vocabulary values are ignored. Use either (but not both) of
+ `num_oov_buckets` and `default_value` to specify how to include
+ out-of-vocabulary values.
+
+ For input dictionary `features`, `features[key]` is either `Tensor` or
+ `SparseTensor`. If `Tensor`, missing values can be represented by `-1` for int
+ and `''` for string, which will be dropped by this feature column.
+
+ Example with `num_oov_buckets`:
+ File '/us/states.txt' contains 50 lines, each with a 2-character U.S. state
+ abbreviation. All inputs with values in that file are assigned an ID 0-49,
+ corresponding to its line number. All other values are hashed and assigned an
+ ID 50-54.
+
+ ```python
+ states = categorical_column_with_vocabulary_file(
+ key='states', vocabulary_file='/us/states.txt', vocabulary_size=50,
+ num_oov_buckets=5)
+ columns = [states, ...]
+ features = tf.parse_example(..., features=make_parse_example_spec(columns))
+ linear_prediction = linear_model(features, columns)
+ ```
+
+ Example with `default_value`:
+ File '/us/states.txt' contains 51 lines - the first line is 'XX', and the
+ other 50 each have a 2-character U.S. state abbreviation. Both a literal 'XX'
+ in input, and other values missing from the file, will be assigned ID 0. All
+ others are assigned the corresponding line number 1-50.
+
+ ```python
+ states = categorical_column_with_vocabulary_file(
+ key='states', vocabulary_file='/us/states.txt', vocabulary_size=51,
+ default_value=0)
+ columns = [states, ...]
+ features = tf.parse_example(..., features=make_parse_example_spec(columns))
+ linear_prediction, _, _ = linear_model(features, columns)
+ ```
+
+ And to make an embedding with either:
+
+ ```python
+ columns = [embedding_column(states, 3),...]
+ features = tf.parse_example(..., features=make_parse_example_spec(columns))
+ dense_tensor = input_layer(features, columns)
+ ```
+
+ Args:
+ key: A unique string identifying the input feature. It is used as the
+ column name and the dictionary key for feature parsing configs, feature
+ `Tensor` objects, and feature columns.
+ vocabulary_file: The vocabulary file name.
+ vocabulary_size: Number of the elements in the vocabulary. This must be no
+ greater than length of `vocabulary_file`, if less than length, later
+ values are ignored. If None, it is set to the length of `vocabulary_file`.
+ num_oov_buckets: Non-negative integer, the number of out-of-vocabulary
+ buckets. All out-of-vocabulary inputs will be assigned IDs in the range
+ `[vocabulary_size, vocabulary_size+num_oov_buckets)` based on a hash of
+ the input value. A positive `num_oov_buckets` can not be specified with
+ `default_value`.
+ default_value: The integer ID value to return for out-of-vocabulary feature
+ values, defaults to `-1`. This can not be specified with a positive
+ `num_oov_buckets`.
+ dtype: The type of features. Only string and integer types are supported.
+
+ Returns:
+ A `CategoricalColumn` with a vocabulary file.
+
+ Raises:
+ ValueError: `vocabulary_file` is missing or cannot be opened.
+ ValueError: `vocabulary_size` is missing or < 1.
+ ValueError: `num_oov_buckets` is a negative integer.
+ ValueError: `num_oov_buckets` and `default_value` are both specified.
+ ValueError: `dtype` is neither string nor integer.
+ """
+ if not vocabulary_file:
+ raise ValueError('Missing vocabulary_file in {}.'.format(key))
+
+ if vocabulary_size is None:
+ if not gfile.Exists(vocabulary_file):
+ raise ValueError('vocabulary_file in {} does not exist.'.format(key))
+
+ with gfile.GFile(vocabulary_file) as f:
+ vocabulary_size = sum(1 for _ in f)
+ logging.info(
+ 'vocabulary_size = %d in %s is inferred from the number of elements '
+ 'in the vocabulary_file %s.', vocabulary_size, key, vocabulary_file)
+
+ # `vocabulary_size` isn't required for lookup, but it is for `_num_buckets`.
+ if vocabulary_size < 1:
+ raise ValueError('Invalid vocabulary_size in {}.'.format(key))
+ if num_oov_buckets:
+ if default_value is not None:
+ raise ValueError(
+ 'Can\'t specify both num_oov_buckets and default_value in {}.'.format(
+ key))
+ if num_oov_buckets < 0:
+ raise ValueError('Invalid num_oov_buckets {} in {}.'.format(
+ num_oov_buckets, key))
+ _assert_string_or_int(dtype, prefix='column_name: {}'.format(key))
+ _assert_key_is_string(key)
+ return VocabularyFileCategoricalColumn(
+ key=key,
+ vocabulary_file=vocabulary_file,
+ vocabulary_size=vocabulary_size,
+ num_oov_buckets=0 if num_oov_buckets is None else num_oov_buckets,
+ default_value=-1 if default_value is None else default_value,
+ dtype=dtype)
+
+
+def categorical_column_with_vocabulary_list(
+ key, vocabulary_list, dtype=None, default_value=-1, num_oov_buckets=0):
+ """A `_CategoricalColumn` with in-memory vocabulary.
+
+ Use this when your inputs are in string or integer format, and you have an
+ in-memory vocabulary mapping each value to an integer ID. By default,
+ out-of-vocabulary values are ignored. Use either (but not both) of
+ `num_oov_buckets` and `default_value` to specify how to include
+ out-of-vocabulary values.
+
+ For input dictionary `features`, `features[key]` is either `Tensor` or
+ `SparseTensor`. If `Tensor`, missing values can be represented by `-1` for int
+ and `''` for string, which will be dropped by this feature column.
+
+ Example with `num_oov_buckets`:
+ In the following example, each input in `vocabulary_list` is assigned an ID
+ 0-3 corresponding to its index (e.g., input 'B' produces output 2). All other
+ inputs are hashed and assigned an ID 4-5.
+
+ ```python
+ colors = categorical_column_with_vocabulary_list(
+ key='colors', vocabulary_list=('R', 'G', 'B', 'Y'),
+ num_oov_buckets=2)
+ columns = [colors, ...]
+ features = tf.parse_example(..., features=make_parse_example_spec(columns))
+ linear_prediction, _, _ = linear_model(features, columns)
+ ```
+
+ Example with `default_value`:
+ In the following example, each input in `vocabulary_list` is assigned an ID
+ 0-4 corresponding to its index (e.g., input 'B' produces output 3). All other
+ inputs are assigned `default_value` 0.
+
+
+ ```python
+ colors = categorical_column_with_vocabulary_list(
+ key='colors', vocabulary_list=('X', 'R', 'G', 'B', 'Y'), default_value=0)
+ columns = [colors, ...]
+ features = tf.parse_example(..., features=make_parse_example_spec(columns))
+ linear_prediction, _, _ = linear_model(features, columns)
+ ```
+
+ And to make an embedding with either:
+
+ ```python
+ columns = [embedding_column(colors, 3),...]
+ features = tf.parse_example(..., features=make_parse_example_spec(columns))
+ dense_tensor = input_layer(features, columns)
+ ```
+
+ Args:
+ key: A unique string identifying the input feature. It is used as the
+ column name and the dictionary key for feature parsing configs, feature
+ `Tensor` objects, and feature columns.
+ vocabulary_list: An ordered iterable defining the vocabulary. Each feature
+ is mapped to the index of its value (if present) in `vocabulary_list`.
+ Must be castable to `dtype`.
+ dtype: The type of features. Only string and integer types are supported.
+ If `None`, it will be inferred from `vocabulary_list`.
+ default_value: The integer ID value to return for out-of-vocabulary feature
+ values, defaults to `-1`. This can not be specified with a positive
+ `num_oov_buckets`.
+ num_oov_buckets: Non-negative integer, the number of out-of-vocabulary
+ buckets. All out-of-vocabulary inputs will be assigned IDs in the range
+ `[len(vocabulary_list), len(vocabulary_list)+num_oov_buckets)` based on a
+ hash of the input value. A positive `num_oov_buckets` can not be specified
+ with `default_value`.
+
+ Returns:
+ A `CategoricalColumn` with in-memory vocabulary.
+
+ Raises:
+ ValueError: if `vocabulary_list` is empty, or contains duplicate keys.
+ ValueError: `num_oov_buckets` is a negative integer.
+ ValueError: `num_oov_buckets` and `default_value` are both specified.
+ ValueError: if `dtype` is not integer or string.
+ """
+ if (vocabulary_list is None) or (len(vocabulary_list) < 1):
+ raise ValueError(
+ 'vocabulary_list {} must be non-empty, column_name: {}'.format(
+ vocabulary_list, key))
+ if len(set(vocabulary_list)) != len(vocabulary_list):
+ raise ValueError(
+ 'Duplicate keys in vocabulary_list {}, column_name: {}'.format(
+ vocabulary_list, key))
+ vocabulary_dtype = dtypes.as_dtype(np.array(vocabulary_list).dtype)
+ if num_oov_buckets:
+ if default_value != -1:
+ raise ValueError(
+ 'Can\'t specify both num_oov_buckets and default_value in {}.'.format(
+ key))
+ if num_oov_buckets < 0:
+ raise ValueError('Invalid num_oov_buckets {} in {}.'.format(
+ num_oov_buckets, key))
+ _assert_string_or_int(
+ vocabulary_dtype, prefix='column_name: {} vocabulary'.format(key))
+ if dtype is None:
+ dtype = vocabulary_dtype
+ elif dtype.is_integer != vocabulary_dtype.is_integer:
+ raise ValueError(
+ 'dtype {} and vocabulary dtype {} do not match, column_name: {}'.format(
+ dtype, vocabulary_dtype, key))
+ _assert_string_or_int(dtype, prefix='column_name: {}'.format(key))
+ _assert_key_is_string(key)
+
+ return VocabularyListCategoricalColumn(
+ key=key,
+ vocabulary_list=tuple(vocabulary_list),
+ dtype=dtype,
+ default_value=default_value,
+ num_oov_buckets=num_oov_buckets)
+
+
+def categorical_column_with_identity(key, num_buckets, default_value=None):
+ """A `CategoricalColumn` that returns identity values.
+
+ Use this when your inputs are integers in the range `[0, num_buckets)`, and
+ you want to use the input value itself as the categorical ID. Values outside
+ this range will result in `default_value` if specified, otherwise it will
+ fail.
+
+ Typically, this is used for contiguous ranges of integer indexes, but
+ it doesn't have to be. This might be inefficient, however, if many of IDs
+ are unused. Consider `categorical_column_with_hash_bucket` in that case.
+
+ For input dictionary `features`, `features[key]` is either `Tensor` or
+ `SparseTensor`. If `Tensor`, missing values can be represented by `-1` for int
+ and `''` for string, which will be dropped by this feature column.
+
+ In the following examples, each input in the range `[0, 1000000)` is assigned
+ the same value. All other inputs are assigned `default_value` 0. Note that a
+ literal 0 in inputs will result in the same default ID.
+
+ Linear model:
+
+ ```python
+ video_id = categorical_column_with_identity(
+ key='video_id', num_buckets=1000000, default_value=0)
+ columns = [video_id, ...]
+ features = tf.parse_example(..., features=make_parse_example_spec(columns))
+ linear_prediction, _, _ = linear_model(features, columns)
+ ```
+
+ Embedding for a DNN model:
+
+ ```python
+ columns = [embedding_column(video_id, 9),...]
+ features = tf.parse_example(..., features=make_parse_example_spec(columns))
+ dense_tensor = input_layer(features, columns)
+ ```
+
+ Args:
+ key: A unique string identifying the input feature. It is used as the
+ column name and the dictionary key for feature parsing configs, feature
+ `Tensor` objects, and feature columns.
+ num_buckets: Range of inputs and outputs is `[0, num_buckets)`.
+ default_value: If `None`, this column's graph operations will fail for
+ out-of-range inputs. Otherwise, this value must be in the range
+ `[0, num_buckets)`, and will replace inputs in that range.
+
+ Returns:
+ A `CategoricalColumn` that returns identity values.
+
+ Raises:
+ ValueError: if `num_buckets` is less than one.
+ ValueError: if `default_value` is not in range `[0, num_buckets)`.
+ """
+ if num_buckets < 1:
+ raise ValueError(
+ 'num_buckets {} < 1, column_name {}'.format(num_buckets, key))
+ if (default_value is not None) and (
+ (default_value < 0) or (default_value >= num_buckets)):
+ raise ValueError(
+ 'default_value {} not in range [0, {}), column_name {}'.format(
+ default_value, num_buckets, key))
+ _assert_key_is_string(key)
+ return IdentityCategoricalColumn(
+ key=key, number_buckets=num_buckets, default_value=default_value)
+
+
+def indicator_column(categorical_column):
+ """Represents multi-hot representation of given categorical column.
+
+ - For DNN model, `indicator_column` can be used to wrap any
+ `categorical_column_*` (e.g., to feed to DNN). Consider to Use
+ `embedding_column` if the number of buckets/unique(values) are large.
+
+ - For Wide (aka linear) model, `indicator_column` is the internal
+ representation for categorical column when passing categorical column
+ directly (as any element in feature_columns) to `linear_model`. See
+ `linear_model` for details.
+
+ ```python
+ name = indicator_column(categorical_column_with_vocabulary_list(
+ 'name', ['bob', 'george', 'wanda'])
+ columns = [name, ...]
+ features = tf.parse_example(..., features=make_parse_example_spec(columns))
+ dense_tensor = input_layer(features, columns)
+
+ dense_tensor == [[1, 0, 0]] # If "name" bytes_list is ["bob"]
+ dense_tensor == [[1, 0, 1]] # If "name" bytes_list is ["bob", "wanda"]
+ dense_tensor == [[2, 0, 0]] # If "name" bytes_list is ["bob", "bob"]
+ ```
+
+ Args:
+ categorical_column: A `CategoricalColumn` which is created by
+ `categorical_column_with_*` or `crossed_column` functions.
+
+ Returns:
+ An `IndicatorColumn`.
+ """
+ return IndicatorColumn(categorical_column)
+
+
+def weighted_categorical_column(
+ categorical_column, weight_feature_key, dtype=dtypes.float32):
+ """Applies weight values to a `_CategoricalColumn`.
+
+ Use this when each of your sparse inputs has both an ID and a value. For
+ example, if you're representing text documents as a collection of word
+ frequencies, you can provide 2 parallel sparse input features ('terms' and
+ 'frequencies' below).
+
+ Example:
+
+ Input `tf.Example` objects:
+
+ ```proto
+ [
+ features {
+ feature {
+ key: "terms"
+ value {bytes_list {value: "very" value: "model"}}
+ }
+ feature {
+ key: "frequencies"
+ value {float_list {value: 0.3 value: 0.1}}
+ }
+ },
+ features {
+ feature {
+ key: "terms"
+ value {bytes_list {value: "when" value: "course" value: "human"}}
+ }
+ feature {
+ key: "frequencies"
+ value {float_list {value: 0.4 value: 0.1 value: 0.2}}
+ }
+ }
+ ]
+ ```
+
+ ```python
+ categorical_column = categorical_column_with_hash_bucket(
+ column_name='terms', hash_bucket_size=1000)
+ weighted_column = weighted_categorical_column(
+ categorical_column=categorical_column, weight_feature_key='frequencies')
+ columns = [weighted_column, ...]
+ features = tf.parse_example(..., features=make_parse_example_spec(columns))
+ linear_prediction, _, _ = linear_model(features, columns)
+ ```
+
+ This assumes the input dictionary contains a `SparseTensor` for key
+ 'terms', and a `SparseTensor` for key 'frequencies'. These 2 tensors must have
+ the same indices and dense shape.
+
+ Args:
+ categorical_column: A `_CategoricalColumn` created by
+ `categorical_column_with_*` functions.
+ weight_feature_key: String key for weight values.
+ dtype: Type of weights, such as `tf.float32`. Only float and integer weights
+ are supported.
+
+ Returns:
+ A `CategoricalColumn` composed of two sparse features: one represents id,
+ the other represents weight (value) of the id feature in that example.
+
+ Raises:
+ ValueError: if `dtype` is not convertible to float.
+ """
+ if (dtype is None) or not (dtype.is_integer or dtype.is_floating):
+ raise ValueError('dtype {} is not convertible to float.'.format(dtype))
+ return WeightedCategoricalColumn(
+ categorical_column=categorical_column,
+ weight_feature_key=weight_feature_key,
+ dtype=dtype)
+
+
+def crossed_column(keys, hash_bucket_size, hash_key=None):
+ """Returns a column for performing crosses of categorical features.
+
+ Crossed features will be hashed according to `hash_bucket_size`. Conceptually,
+ the transformation can be thought of as:
+ Hash(cartesian product of features) % `hash_bucket_size`
+
+ For example, if the input features are:
+
+ * SparseTensor referred by first key:
+
+ ```python
+ shape = [2, 2]
+ {
+ [0, 0]: "a"
+ [1, 0]: "b"
+ [1, 1]: "c"
+ }
+ ```
+
+ * SparseTensor referred by second key:
+
+ ```python
+ shape = [2, 1]
+ {
+ [0, 0]: "d"
+ [1, 0]: "e"
+ }
+ ```
+
+ then crossed feature will look like:
+
+ ```python
+ shape = [2, 2]
+ {
+ [0, 0]: Hash64("d", Hash64("a")) % hash_bucket_size
+ [1, 0]: Hash64("e", Hash64("b")) % hash_bucket_size
+ [1, 1]: Hash64("e", Hash64("c")) % hash_bucket_size
+ }
+ ```
+
+ Here is an example to create a linear model with crosses of string features:
+
+ ```python
+ keywords_x_doc_terms = crossed_column(['keywords', 'doc_terms'], 50K)
+ columns = [keywords_x_doc_terms, ...]
+ features = tf.parse_example(..., features=make_parse_example_spec(columns))
+ linear_prediction = linear_model(features, columns)
+ ```
+
+ You could also use vocabulary lookup before crossing:
+
+ ```python
+ keywords = categorical_column_with_vocabulary_file(
+ 'keywords', '/path/to/vocabulary/file', vocabulary_size=1K)
+ keywords_x_doc_terms = crossed_column([keywords, 'doc_terms'], 50K)
+ columns = [keywords_x_doc_terms, ...]
+ features = tf.parse_example(..., features=make_parse_example_spec(columns))
+ linear_prediction = linear_model(features, columns)
+ ```
+
+ If an input feature is of numeric type, you can use
+ `categorical_column_with_identity`, or `bucketized_column`, as in the example:
+
+ ```python
+ # vertical_id is an integer categorical feature.
+ vertical_id = categorical_column_with_identity('vertical_id', 10K)
+ price = numeric_column('price')
+ # bucketized_column converts numerical feature to a categorical one.
+ bucketized_price = bucketized_column(price, boundaries=[...])
+ vertical_id_x_price = crossed_column([vertical_id, bucketized_price], 50K)
+ columns = [vertical_id_x_price, ...]
+ features = tf.parse_example(..., features=make_parse_example_spec(columns))
+ linear_prediction = linear_model(features, columns)
+ ```
+
+ To use crossed column in DNN model, you need to add it in an embedding column
+ as in this example:
+
+ ```python
+ vertical_id_x_price = crossed_column([vertical_id, bucketized_price], 50K)
+ vertical_id_x_price_embedded = embedding_column(vertical_id_x_price, 10)
+ dense_tensor = input_layer(features, [vertical_id_x_price_embedded, ...])
+ ```
+
+ Args:
+ keys: An iterable identifying the features to be crossed. Each element can
+ be either:
+ * string: Will use the corresponding feature which must be of string type.
+ * `CategoricalColumn`: Will use the transformed tensor produced by this
+ column. Does not support hashed categorical column.
+ hash_bucket_size: An int > 1. The number of buckets.
+ hash_key: Specify the hash_key that will be used by the `FingerprintCat64`
+ function to combine the crosses fingerprints on SparseCrossOp (optional).
+
+ Returns:
+ A `CrossedColumn`.
+
+ Raises:
+ ValueError: If `len(keys) < 2`.
+ ValueError: If any of the keys is neither a string nor `CategoricalColumn`.
+ ValueError: If any of the keys is `HashedCategoricalColumn`.
+ ValueError: If `hash_bucket_size < 1`.
+ """
+ if not hash_bucket_size or hash_bucket_size < 1:
+ raise ValueError('hash_bucket_size must be > 1. '
+ 'hash_bucket_size: {}'.format(hash_bucket_size))
+ if not keys or len(keys) < 2:
+ raise ValueError(
+ 'keys must be a list with length > 1. Given: {}'.format(keys))
+ for key in keys:
+ if (not isinstance(key, six.string_types) and
+ not isinstance(key, CategoricalColumn)):
+ raise ValueError(
+ 'Unsupported key type. All keys must be either string, or '
+ 'categorical column except HashedCategoricalColumn. '
+ 'Given: {}'.format(key))
+ if isinstance(key, HashedCategoricalColumn):
+ raise ValueError(
+ 'categorical_column_with_hash_bucket is not supported for crossing. '
+ 'Hashing before crossing will increase probability of collision. '
+ 'Instead, use the feature name as a string. Given: {}'.format(key))
+ return CrossedColumn(
+ keys=tuple(keys), hash_bucket_size=hash_bucket_size, hash_key=hash_key)
+
+
+class StateManager(object):
+ """Manages the state associated with FeatureColumns.
+
+ Some `FeatureColumn`s create variables or resources to assist their
+ computation. The `StateManager` is responsible for creating and storing these
+ objects since `FeatureColumn`s are supposed to be stateless configuration
+ only.
+ """
+
+ def get_variable(self,
+ feature_column,
+ name,
+ shape,
+ dtype=None,
+ initializer=None):
+ """Creates a new variable or returns an existing one.
+
+ Args:
+ feature_column: A `FeatureColumn` object this variable corresponds to.
+ name: variable name.
+ shape: variable shape.
+ dtype: The type of the variable. Defaults to `self.dtype` or `float32`.
+ initializer: initializer instance (callable).
+
+ Returns:
+ The variable.
+ """
+ raise NotImplementedError('StateManager.get_variable')
+
+ def get_resource(self, feature_column, name, resource_creator):
+ """Creates a new resource or returns an existing one.
+
+ Resources can be things such as tables etc.
+
+ Args:
+ feature_column: A `FeatureColumn` object this variable corresponds to.
+ name: Name of the resource.
+ resource_creator: A callable that can create the resource.
+
+ Returns:
+ The resource.
+ """
+ raise NotImplementedError('StateManager.get_resource')
+
+
+class FeatureColumn(object):
+ """Represents a feature column abstraction.
+
+ WARNING: Do not subclass this layer unless you know what you are doing:
+ the API is subject to future changes.
+
+ To distinguish between the concept of a feature family and a specific binary
+ feature within a family, we refer to a feature family like "country" as a
+ feature column. For example, we can have a feature in a `tf.Example` format:
+ {key: "country", value: [ "US" ]}
+ In this example the value of feature is "US" and "country" refers to the
+ column of the feature.
+
+ This class is an abstract class. Users should not create instances of this.
+ """
+ __metaclass__ = abc.ABCMeta
+
+ @abc.abstractproperty
+ def name(self):
+ """Returns string. Used for naming."""
+ pass
+
+ @abc.abstractmethod
+ def transform_feature(self, transformation_cache, state_manager):
+ """Returns intermediate representation (usually a `Tensor`).
+
+ Uses `transformation_cache` to create an intermediate representation
+ (usually a `Tensor`) that other feature columns can use.
+
+ Example usage of `transformation_cache`:
+ Let's say a Feature column depends on raw feature ('raw') and another
+ `FeatureColumn` (input_fc). To access corresponding `Tensor`s,
+ transformation_cache will be used as follows:
+
+ ```python
+ raw_tensor = transformation_cache.get('raw', state_manager)
+ fc_tensor = transformation_cache.get(input_fc, state_manager)
+ ```
+
+ Args:
+ transformation_cache: A `FeatureTransformationCache` object to access
+ features.
+ state_manager: A `StateManager` to create / access resources such as
+ lookup tables.
+
+ Returns:
+ Transformed feature `Tensor`.
+ """
+ pass
+
+ @abc.abstractproperty
+ def parse_example_spec(self):
+ """Returns a `tf.Example` parsing spec as dict.
+
+ It is used for get_parsing_spec for `tf.parse_example`. Returned spec is a
+ dict from keys ('string') to `VarLenFeature`, `FixedLenFeature`, and other
+ supported objects. Please check documentation of @{tf.parse_example} for all
+ supported spec objects.
+
+ Let's say a Feature column depends on raw feature ('raw') and another
+ `FeatureColumn` (input_fc). One possible implementation of
+ parse_example_spec is as follows:
+
+ ```python
+ spec = {'raw': tf.FixedLenFeature(...)}
+ spec.update(input_fc.parse_example_spec)
+ return spec
+ ```
+ """
+ pass
+
+ def create_state(self, state_manager):
+ """Uses the `state_manager` to create state for the FeatureColumn.
+
+ Args:
+ state_manager: A `StateManager` to create / access resources such as
+ lookup tables and variables.
+ """
+ pass
+
+
+class DenseColumn(FeatureColumn):
+ """Represents a column which can be represented as `Tensor`.
+
+ Some examples of this type are: numeric_column, embedding_column,
+ indicator_column.
+ """
+
+ __metaclass__ = abc.ABCMeta
+
+ @abc.abstractproperty
+ def variable_shape(self):
+ """`TensorShape` of `get_dense_tensor`, without batch dimension."""
+ pass
+
+ @abc.abstractmethod
+ def get_dense_tensor(self, transformation_cache, state_manager):
+ """Returns a `Tensor`.
+
+ The output of this function will be used by model-builder-functions. For
+ example the pseudo code of `input_layer` will be like:
+
+ ```python
+ def input_layer(features, feature_columns, ...):
+ outputs = [fc.get_dense_tensor(...) for fc in feature_columns]
+ return tf.concat(outputs)
+ ```
+
+ Args:
+ transformation_cache: A `FeatureTransformationCache` object to access
+ features.
+ state_manager: A `StateManager` to create / access resources such as
+ lookup tables.
+
+ Returns:
+ `Tensor` of shape [batch_size] + `variable_shape`.
+ """
+ pass
+
+
+def _create_weighted_sum(column,
+ transformation_cache,
+ state_manager,
+ units,
+ sparse_combiner,
+ weight_collections,
+ trainable,
+ weight_var=None):
+ """Creates a weighted sum for a dense/categorical column for linear_model."""
+ if isinstance(column, CategoricalColumn):
+ return _create_categorical_column_weighted_sum(
+ column=column,
+ transformation_cache=transformation_cache,
+ state_manager=state_manager,
+ units=units,
+ sparse_combiner=sparse_combiner,
+ weight_collections=weight_collections,
+ trainable=trainable,
+ weight_var=weight_var)
+ else:
+ return _create_dense_column_weighted_sum(
+ column=column,
+ transformation_cache=transformation_cache,
+ state_manager=state_manager,
+ units=units,
+ weight_collections=weight_collections,
+ trainable=trainable,
+ weight_var=weight_var)
+
+
+def _create_dense_column_weighted_sum(column,
+ transformation_cache,
+ state_manager,
+ units,
+ weight_collections,
+ trainable,
+ weight_var=None):
+ """Create a weighted sum of a dense column for linear_model."""
+ tensor = column.get_dense_tensor(transformation_cache, state_manager)
+ num_elements = column.variable_shape.num_elements()
+ batch_size = array_ops.shape(tensor)[0]
+ tensor = array_ops.reshape(tensor, shape=(batch_size, num_elements))
+ if weight_var is not None:
+ weight = weight_var
+ else:
+ weight = variable_scope.get_variable(
+ name='weights',
+ shape=[num_elements, units],
+ initializer=init_ops.zeros_initializer(),
+ trainable=trainable,
+ collections=weight_collections)
+ return math_ops.matmul(tensor, weight, name='weighted_sum')
+
+
+class CategoricalColumn(FeatureColumn):
+ """Represents a categorical feature.
+
+ A categorical feature typically handled with a @{tf.SparseTensor} of IDs.
+ """
+ __metaclass__ = abc.ABCMeta
+
+ IdWeightPair = collections.namedtuple( # pylint: disable=invalid-name
+ 'IdWeightPair', ('id_tensor', 'weight_tensor'))
+
+ @abc.abstractproperty
+ def num_buckets(self):
+ """Returns number of buckets in this sparse feature."""
+ pass
+
+ @abc.abstractmethod
+ def get_sparse_tensors(self, transformation_cache, state_manager):
+ """Returns an IdWeightPair.
+
+ `IdWeightPair` is a pair of `SparseTensor`s which represents ids and
+ weights.
+
+ `IdWeightPair.id_tensor` is typically a `batch_size` x `num_buckets`
+ `SparseTensor` of `int64`. `IdWeightPair.weight_tensor` is either a
+ `SparseTensor` of `float` or `None` to indicate all weights should be
+ taken to be 1. If specified, `weight_tensor` must have exactly the same
+ shape and indices as `sp_ids`. Expected `SparseTensor` is same as parsing
+ output of a `VarLenFeature` which is a ragged matrix.
+
+ Args:
+ transformation_cache: A `FeatureTransformationCache` object to access
+ features.
+ state_manager: A `StateManager` to create / access resources such as
+ lookup tables.
+ """
+ pass
+
+
+def _create_categorical_column_weighted_sum(column,
+ transformation_cache,
+ state_manager,
+ units,
+ sparse_combiner,
+ weight_collections,
+ trainable,
+ weight_var=None):
+ # pylint: disable=g-doc-return-or-yield,g-doc-args
+ """Create a weighted sum of a categorical column for linear_model.
+
+ Note to maintainer: As implementation details, the weighted sum is
+ implemented via embedding_lookup_sparse toward efficiency. Mathematically,
+ they are the same.
+
+ To be specific, conceptually, categorical column can be treated as multi-hot
+ vector. Say:
+
+ ```python
+ x = [0 0 1] # categorical column input
+ w = [a b c] # weights
+ ```
+ The weighted sum is `c` in this case, which is same as `w[2]`.
+
+ Another example is
+
+ ```python
+ x = [0 1 1] # categorical column input
+ w = [a b c] # weights
+ ```
+ The weighted sum is `b + c` in this case, which is same as `w[2] + w[3]`.
+
+ For both cases, we can implement weighted sum via embedding_lookup with
+ sparse_combiner = "sum".
+ """
+
+ sparse_tensors = column.get_sparse_tensors(transformation_cache,
+ state_manager)
+ id_tensor = sparse_ops.sparse_reshape(sparse_tensors.id_tensor, [
+ array_ops.shape(sparse_tensors.id_tensor)[0], -1
+ ])
+ weight_tensor = sparse_tensors.weight_tensor
+ if weight_tensor is not None:
+ weight_tensor = sparse_ops.sparse_reshape(
+ weight_tensor, [array_ops.shape(weight_tensor)[0], -1])
+
+ if weight_var is not None:
+ weight = weight_var
+ else:
+ weight = variable_scope.get_variable(
+ name='weights',
+ shape=(column.num_buckets, units),
+ initializer=init_ops.zeros_initializer(),
+ trainable=trainable,
+ collections=weight_collections)
+ return _safe_embedding_lookup_sparse(
+ weight,
+ id_tensor,
+ sparse_weights=weight_tensor,
+ combiner=sparse_combiner,
+ name='weighted_sum')
+
+
+class SequenceDenseColumn(FeatureColumn):
+ """Represents dense sequence data."""
+
+ __metaclass__ = abc.ABCMeta
+
+ TensorSequenceLengthPair = collections.namedtuple( # pylint: disable=invalid-name
+ 'TensorSequenceLengthPair', ('dense_tensor', 'sequence_length'))
+
+ @abc.abstractmethod
+ def get_sequence_dense_tensor(self, transformation_cache, state_manager):
+ """Returns a `TensorSequenceLengthPair`.
+
+ Args:
+ transformation_cache: A `FeatureTransformationCache` object to access
+ features.
+ state_manager: A `StateManager` to create / access resources such as
+ lookup tables.
+ """
+ pass
+
+
+class FeatureTransformationCache(object):
+ """Handles caching of transformations while building the model.
+
+ `FeatureColumn` specifies how to digest an input column to the network. Some
+ feature columns require data transformations. This class caches those
+ transformations.
+
+ Some features may be used in more than one place. For example, one can use a
+ bucketized feature by itself and a cross with it. In that case we
+ should create only one bucketization op instead of creating ops for each
+ feature column separately. To handle re-use of transformed columns,
+ `FeatureTransformationCache` caches all previously transformed columns.
+
+ Example:
+ We're trying to use the following `FeatureColumn`s:
+
+ ```python
+ bucketized_age = fc.bucketized_column(fc.numeric_column("age"), ...)
+ keywords = fc.categorical_column_with_hash_buckets("keywords", ...)
+ age_X_keywords = fc.crossed_column([bucketized_age, "keywords"])
+ ... = linear_model(features,
+ [bucketized_age, keywords, age_X_keywords]
+ ```
+
+ If we transform each column independently, then we'll get duplication of
+ bucketization (one for cross, one for bucketization itself).
+ The `FeatureTransformationCache` eliminates this duplication.
+ """
+
+ def __init__(self, features):
+ """Creates a `FeatureTransformationCache`.
+
+ Args:
+ features: A mapping from feature column to objects that are `Tensor` or
+ `SparseTensor`, or can be converted to same via
+ `sparse_tensor.convert_to_tensor_or_sparse_tensor`. A `string` key
+ signifies a base feature (not-transformed). A `FeatureColumn` key
+ means that this `Tensor` is the output of an existing `FeatureColumn`
+ which can be reused.
+ """
+ self._features = features.copy()
+ self._feature_tensors = {}
+
+ def get(self, key, state_manager):
+ """Returns a `Tensor` for the given key.
+
+ A `str` key is used to access a base feature (not-transformed). When a
+ `FeatureColumn` is passed, the transformed feature is returned if it
+ already exists, otherwise the given `FeatureColumn` is asked to provide its
+ transformed output, which is then cached.
+
+ Args:
+ key: a `str` or a `FeatureColumn`.
+ state_manager: A StateManager object that holds the FeatureColumn state.
+
+ Returns:
+ The transformed `Tensor` corresponding to the `key`.
+
+ Raises:
+ ValueError: if key is not found or a transformed `Tensor` cannot be
+ computed.
+ """
+ if key in self._feature_tensors:
+ # FeatureColumn is already transformed or converted.
+ return self._feature_tensors[key]
+
+ if key in self._features:
+ feature_tensor = self._get_raw_feature_as_tensor(key)
+ self._feature_tensors[key] = feature_tensor
+ return feature_tensor
+
+ if isinstance(key, six.string_types):
+ raise ValueError('Feature {} is not in features dictionary.'.format(key))
+
+ if not isinstance(key, FeatureColumn):
+ raise TypeError('"key" must be either a "str" or "FeatureColumn". '
+ 'Provided: {}'.format(key))
+
+ column = key
+ logging.debug('Transforming feature_column %s.', column)
+ transformed = column.transform_feature(self, state_manager)
+ if transformed is None:
+ raise ValueError('Column {} is not supported.'.format(column.name))
+ self._feature_tensors[column] = transformed
+ return transformed
+
+ def _get_raw_feature_as_tensor(self, key):
+ """Gets the raw_feature (keyed by `key`) as `tensor`.
+
+ The raw feature is converted to (sparse) tensor and maybe expand dim.
+
+ For both `Tensor` and `SparseTensor`, the rank will be expanded (to 2) if
+ the rank is 1. This supports dynamic rank also. For rank 0 raw feature, will
+ error out as it is not supported.
+
+ Args:
+ key: A `str` key to access the raw feature.
+
+ Returns:
+ A `Tensor` or `SparseTensor`.
+
+ Raises:
+ ValueError: if the raw feature has rank 0.
+ """
+ raw_feature = self._features[key]
+ feature_tensor = sparse_tensor_lib.convert_to_tensor_or_sparse_tensor(
+ raw_feature)
+
+ def expand_dims(input_tensor):
+ # Input_tensor must have rank 1.
+ if isinstance(input_tensor, sparse_tensor_lib.SparseTensor):
+ return sparse_ops.sparse_reshape(
+ input_tensor, [array_ops.shape(input_tensor)[0], -1])
+ else:
+ return array_ops.expand_dims(input_tensor, -1)
+
+ rank = feature_tensor.get_shape().ndims
+ if rank is not None:
+ if rank == 0:
+ raise ValueError(
+ 'Feature (key: {}) cannot have rank 0. Give: {}'.format(
+ key, feature_tensor))
+ return feature_tensor if rank != 1 else expand_dims(feature_tensor)
+
+ # Handle dynamic rank.
+ with ops.control_dependencies([
+ check_ops.assert_positive(
+ array_ops.rank(feature_tensor),
+ message='Feature (key: {}) cannot have rank 0. Given: {}'.format(
+ key, feature_tensor))]):
+ return control_flow_ops.cond(
+ math_ops.equal(1, array_ops.rank(feature_tensor)),
+ lambda: expand_dims(feature_tensor),
+ lambda: feature_tensor)
+
+
+# TODO(ptucker): Move to third_party/tensorflow/python/ops/sparse_ops.py
+def _shape_offsets(shape):
+ """Returns moving offset for each dimension given shape."""
+ offsets = []
+ for dim in reversed(shape):
+ if offsets:
+ offsets.append(dim * offsets[-1])
+ else:
+ offsets.append(dim)
+ offsets.reverse()
+ return offsets
+
+
+# TODO(ptucker): Move to third_party/tensorflow/python/ops/sparse_ops.py
+def _to_sparse_input_and_drop_ignore_values(input_tensor, ignore_value=None):
+ """Converts a `Tensor` to a `SparseTensor`, dropping ignore_value cells.
+
+ If `input_tensor` is already a `SparseTensor`, just return it.
+
+ Args:
+ input_tensor: A string or integer `Tensor`.
+ ignore_value: Entries in `dense_tensor` equal to this value will be
+ absent from the resulting `SparseTensor`. If `None`, default value of
+ `dense_tensor`'s dtype will be used ('' for `str`, -1 for `int`).
+
+ Returns:
+ A `SparseTensor` with the same shape as `input_tensor`.
+
+ Raises:
+ ValueError: when `input_tensor`'s rank is `None`.
+ """
+ input_tensor = sparse_tensor_lib.convert_to_tensor_or_sparse_tensor(
+ input_tensor)
+ if isinstance(input_tensor, sparse_tensor_lib.SparseTensor):
+ return input_tensor
+ with ops.name_scope(None, 'to_sparse_input', (input_tensor, ignore_value,)):
+ if ignore_value is None:
+ if input_tensor.dtype == dtypes.string:
+ # Exception due to TF strings are converted to numpy objects by default.
+ ignore_value = ''
+ elif input_tensor.dtype.is_integer:
+ ignore_value = -1 # -1 has a special meaning of missing feature
+ else:
+ # NOTE: `as_numpy_dtype` is a property, so with the parentheses this is
+ # constructing a new numpy object of the given type, which yields the
+ # default value for that type.
+ ignore_value = input_tensor.dtype.as_numpy_dtype()
+ ignore_value = math_ops.cast(
+ ignore_value, input_tensor.dtype, name='ignore_value')
+ indices = array_ops.where(
+ math_ops.not_equal(input_tensor, ignore_value), name='indices')
+ return sparse_tensor_lib.SparseTensor(
+ indices=indices,
+ values=array_ops.gather_nd(input_tensor, indices, name='values'),
+ dense_shape=array_ops.shape(
+ input_tensor, out_type=dtypes.int64, name='dense_shape'))
+
+
+def _normalize_feature_columns(feature_columns):
+ """Normalizes the `feature_columns` input.
+
+ This method converts the `feature_columns` to list type as best as it can. In
+ addition, verifies the type and other parts of feature_columns, required by
+ downstream library.
+
+ Args:
+ feature_columns: The raw feature columns, usually passed by users.
+
+ Returns:
+ The normalized feature column list.
+
+ Raises:
+ ValueError: for any invalid inputs, such as empty, duplicated names, etc.
+ """
+ if isinstance(feature_columns, FeatureColumn):
+ feature_columns = [feature_columns]
+
+ if isinstance(feature_columns, collections.Iterator):
+ feature_columns = list(feature_columns)
+
+ if isinstance(feature_columns, dict):
+ raise ValueError('Expected feature_columns to be iterable, found dict.')
+
+ for column in feature_columns:
+ if not isinstance(column, FeatureColumn):
+ raise ValueError('Items of feature_columns must be a FeatureColumn. '
+ 'Given (type {}): {}.'.format(type(column), column))
+ if not feature_columns:
+ raise ValueError('feature_columns must not be empty.')
+ name_to_column = dict()
+ for column in feature_columns:
+ if column.name in name_to_column:
+ raise ValueError('Duplicate feature column name found for columns: {} '
+ 'and {}. This usually means that these columns refer to '
+ 'same base feature. Either one must be discarded or a '
+ 'duplicated but renamed item must be inserted in '
+ 'features dict.'.format(column,
+ name_to_column[column.name]))
+ name_to_column[column.name] = column
+
+ return feature_columns
+
+
+class NumericColumn(
+ DenseColumn,
+ collections.namedtuple(
+ 'NumericColumn',
+ ('key', 'shape', 'default_value', 'dtype', 'normalizer_fn'))):
+ """see `numeric_column`."""
+
+ @property
+ def name(self):
+ """See `FeatureColumn` base class."""
+ return self.key
+
+ @property
+ def parse_example_spec(self):
+ """See `FeatureColumn` base class."""
+ return {
+ self.key:
+ parsing_ops.FixedLenFeature(self.shape, self.dtype,
+ self.default_value)
+ }
+
+ def transform_feature(self, transformation_cache, state_manager):
+ """See `FeatureColumn` base class.
+
+ In this case, we apply the `normalizer_fn` to the input tensor.
+
+ Args:
+ transformation_cache: A `FeatureTransformationCache` object to access
+ features.
+ state_manager: A `StateManager` to create / access resources such as
+ lookup tables.
+
+ Returns:
+ Normalized input tensor.
+ Raises:
+ ValueError: If a SparseTensor is passed in.
+ """
+ input_tensor = transformation_cache.get(self.key, state_manager)
+ if isinstance(input_tensor, sparse_tensor_lib.SparseTensor):
+ raise ValueError(
+ 'The corresponding Tensor of numerical column must be a Tensor. '
+ 'SparseTensor is not supported. key: {}'.format(self.key))
+ if self.normalizer_fn is not None:
+ input_tensor = self.normalizer_fn(input_tensor)
+ return math_ops.to_float(input_tensor)
+
+ @property
+ def variable_shape(self):
+ """See `DenseColumn` base class."""
+ return tensor_shape.TensorShape(self.shape)
+
+ def get_dense_tensor(self, transformation_cache, state_manager):
+ """Returns dense `Tensor` representing numeric feature.
+
+ Args:
+ transformation_cache: A `FeatureTransformationCache` object to access
+ features.
+ state_manager: A `StateManager` to create / access resources such as
+ lookup tables.
+
+ Returns:
+ Dense `Tensor` created within `transform_feature`.
+ """
+ # Feature has been already transformed. Return the intermediate
+ # representation created by _transform_feature.
+ return transformation_cache.get(self, state_manager)
+
+
+class BucketizedColumn(DenseColumn, CategoricalColumn,
+ collections.namedtuple('BucketizedColumn',
+ ('source_column', 'boundaries'))):
+ """See `bucketized_column`."""
+
+ @property
+ def name(self):
+ """See `FeatureColumn` base class."""
+ return '{}_bucketized'.format(self.source_column.name)
+
+ @property
+ def parse_example_spec(self):
+ """See `FeatureColumn` base class."""
+ return self.source_column.parse_example_spec
+
+ def transform_feature(self, transformation_cache, state_manager):
+ """Returns bucketized categorical `source_column` tensor."""
+ source_tensor = transformation_cache.get(self.source_column, state_manager)
+ return math_ops._bucketize( # pylint: disable=protected-access
+ source_tensor,
+ boundaries=self.boundaries)
+
+ @property
+ def variable_shape(self):
+ """See `DenseColumn` base class."""
+ return tensor_shape.TensorShape(
+ tuple(self.source_column.shape) + (len(self.boundaries) + 1,))
+
+ def get_dense_tensor(self, transformation_cache, state_manager):
+ """Returns one hot encoded dense `Tensor`."""
+ input_tensor = transformation_cache.get(self, state_manager)
+ return array_ops.one_hot(
+ indices=math_ops.to_int64(input_tensor),
+ depth=len(self.boundaries) + 1,
+ on_value=1.,
+ off_value=0.)
+
+ @property
+ def num_buckets(self):
+ """See `CategoricalColumn` base class."""
+ # By construction, source_column is always one-dimensional.
+ return (len(self.boundaries) + 1) * self.source_column.shape[0]
+
+ def get_sparse_tensors(self, transformation_cache, state_manager):
+ """Converts dense inputs to SparseTensor so downstream code can use it."""
+ input_tensor = transformation_cache.get(self, state_manager)
+ batch_size = array_ops.shape(input_tensor)[0]
+ # By construction, source_column is always one-dimensional.
+ source_dimension = self.source_column.shape[0]
+
+ i1 = array_ops.reshape(
+ array_ops.tile(
+ array_ops.expand_dims(math_ops.range(0, batch_size), 1),
+ [1, source_dimension]),
+ (-1,))
+ i2 = array_ops.tile(math_ops.range(0, source_dimension), [batch_size])
+ # Flatten the bucket indices and unique them across dimensions
+ # E.g. 2nd dimension indices will range from k to 2*k-1 with k buckets
+ bucket_indices = (
+ array_ops.reshape(input_tensor, (-1,)) +
+ (len(self.boundaries) + 1) * i2)
+
+ indices = math_ops.to_int64(array_ops.transpose(array_ops.stack((i1, i2))))
+ dense_shape = math_ops.to_int64(array_ops.stack(
+ [batch_size, source_dimension]))
+ sparse_tensor = sparse_tensor_lib.SparseTensor(
+ indices=indices,
+ values=bucket_indices,
+ dense_shape=dense_shape)
+ return CategoricalColumn.IdWeightPair(sparse_tensor, None)
+
+
+class EmbeddingColumn(
+ DenseColumn, SequenceDenseColumn,
+ collections.namedtuple(
+ 'EmbeddingColumn',
+ ('categorical_column', 'dimension', 'combiner', 'initializer',
+ 'ckpt_to_load_from', 'tensor_name_in_ckpt', 'max_norm', 'trainable'))):
+ """See `embedding_column`."""
+
+ @property
+ def name(self):
+ """See `FeatureColumn` base class."""
+ return '{}_embedding'.format(self.categorical_column.name)
+
+ @property
+ def parse_example_spec(self):
+ """See `FeatureColumn` base class."""
+ return self.categorical_column.parse_example_spec
+
+ def transform_feature(self, transformation_cache, state_manager):
+ """Transforms underlying `categorical_column`."""
+ return transformation_cache.get(self.categorical_column, state_manager)
+
+ @property
+ def variable_shape(self):
+ """See `DenseColumn` base class."""
+ return tensor_shape.vector(self.dimension)
+
+ def _get_dense_tensor_internal(self, transformation_cache, state_manager):
+ """Private method that follows the signature of _get_dense_tensor."""
+ # Get sparse IDs and weights.
+ sparse_tensors = self.categorical_column.get_sparse_tensors(
+ transformation_cache, state_manager)
+ sparse_ids = sparse_tensors.id_tensor
+ sparse_weights = sparse_tensors.weight_tensor
+
+ embedding_shape = (self.categorical_column.num_buckets, self.dimension)
+ embedding_weights = state_manager.get_variable(
+ self,
+ name='embedding_weights',
+ shape=embedding_shape,
+ dtype=dtypes.float32,
+ initializer=self.initializer)
+
+ if self.ckpt_to_load_from is not None:
+ to_restore = embedding_weights
+ if isinstance(to_restore, variables.PartitionedVariable):
+ to_restore = to_restore._get_variable_list() # pylint: disable=protected-access
+ checkpoint_utils.init_from_checkpoint(self.ckpt_to_load_from, {
+ self.tensor_name_in_ckpt: to_restore
+ })
+
+ # Return embedding lookup result.
+ return _safe_embedding_lookup_sparse(
+ embedding_weights=embedding_weights,
+ sparse_ids=sparse_ids,
+ sparse_weights=sparse_weights,
+ combiner=self.combiner,
+ name='%s_weights' % self.name,
+ max_norm=self.max_norm)
+
+ def get_dense_tensor(self, transformation_cache, state_manager):
+ """Returns tensor after doing the embedding lookup.
+
+ Args:
+ transformation_cache: A `FeatureTransformationCache` object to access
+ features.
+ state_manager: A `StateManager` to create / access resources such as
+ lookup tables.
+
+ Returns:
+ Embedding lookup tensor.
+
+ Raises:
+ ValueError: `categorical_column` is SequenceCategoricalColumn.
+ """
+ if isinstance(self.categorical_column, SequenceCategoricalColumn):
+ raise ValueError(
+ 'In embedding_column: {}. '
+ 'categorical_column must not be of type SequenceCategoricalColumn. '
+ 'Suggested fix A: If you wish to use input_layer, use a '
+ 'non-sequence categorical_column_with_*. '
+ 'Suggested fix B: If you wish to create sequence input, use '
+ 'sequence_input_layer instead of input_layer. '
+ 'Given (type {}): {}'.format(self.name, type(self.categorical_column),
+ self.categorical_column))
+ return self._get_dense_tensor_internal(transformation_cache, state_manager)
+
+ def get_sequence_dense_tensor(self, transformation_cache, state_manager):
+ """See `SequenceDenseColumn` base class."""
+ if not isinstance(self.categorical_column, SequenceCategoricalColumn):
+ raise ValueError(
+ 'In embedding_column: {}. '
+ 'categorical_column must be of type SequenceCategoricalColumn '
+ 'to use sequence_input_layer. '
+ 'Suggested fix: Use one of sequence_categorical_column_with_*. '
+ 'Given (type {}): {}'.format(self.name, type(self.categorical_column),
+ self.categorical_column))
+ dense_tensor = self._get_dense_tensor_internal( # pylint: disable=protected-access
+ transformation_cache, state_manager)
+ sparse_tensors = self.categorical_column.get_sparse_tensors(
+ transformation_cache, state_manager)
+ sequence_length = _sequence_length_from_sparse_tensor(
+ sparse_tensors.id_tensor)
+ return SequenceDenseColumn.TensorSequenceLengthPair(
+ dense_tensor=dense_tensor, sequence_length=sequence_length)
+
+
+def _get_graph_for_variable(var):
+ if isinstance(var, variables.PartitionedVariable):
+ return list(var)[0].graph
+ else:
+ return var.graph
+
+
+class SharedEmbeddingColumn(
+ DenseColumn, SequenceDenseColumn,
+ collections.namedtuple(
+ 'SharedEmbeddingColumn',
+ ('categorical_column', 'dimension', 'combiner', 'initializer',
+ 'shared_embedding_collection_name', 'ckpt_to_load_from',
+ 'tensor_name_in_ckpt', 'max_norm', 'trainable'))):
+ """See `embedding_column`."""
+
+ @property
+ def name(self):
+ """See `FeatureColumn` base class."""
+ return '{}_shared_embedding'.format(self.categorical_column.name)
+
+ @property
+ def shared_collection_name(self):
+ """Returns the shared name of this column.
+
+ A group of columns share an embedding. Each one of those columns would have
+ the same `shared_collection_name` by which they could be collectively
+ referred to.
+ """
+ return self.shared_embedding_collection_name
+
+ @property
+ def parse_example_spec(self):
+ """See `FeatureColumn` base class."""
+ return self.categorical_column.parse_example_spec
+
+ def transform_feature(self, transformation_cache, state_manager):
+ """See `FeatureColumn` base class."""
+ return transformation_cache.get(self.categorical_column, state_manager)
+
+ @property
+ def variable_shape(self):
+ """See `DenseColumn` base class."""
+ return tensor_shape.vector(self.dimension)
+
+ def _get_dense_tensor_internal(self, transformation_cache, state_manager):
+ """Private method that follows the signature of _get_dense_tensor."""
+ # This method is called from a variable_scope with name _var_scope_name,
+ # which is shared among all shared embeddings. Open a name_scope here, so
+ # that the ops for different columns have distinct names.
+ with ops.name_scope(None, default_name=self.name):
+ # Get sparse IDs and weights.
+ sparse_tensors = self.categorical_column.get_sparse_tensors(
+ transformation_cache, state_manager)
+ sparse_ids = sparse_tensors.id_tensor
+ sparse_weights = sparse_tensors.weight_tensor
+
+ embedding_shape = (self.categorical_column.num_buckets, self.dimension)
+ embedding_weights = state_manager.get_variable(
+ self,
+ name='embedding_weights',
+ shape=embedding_shape,
+ dtype=dtypes.float32,
+ initializer=self.initializer)
+
+ if self.ckpt_to_load_from is not None:
+ to_restore = embedding_weights
+ if isinstance(to_restore, variables.PartitionedVariable):
+ to_restore = to_restore._get_variable_list() # pylint: disable=protected-access
+ checkpoint_utils.init_from_checkpoint(self.ckpt_to_load_from, {
+ self.tensor_name_in_ckpt: to_restore
+ })
+
+ # Return embedding lookup result.
+ return _safe_embedding_lookup_sparse(
+ embedding_weights=embedding_weights,
+ sparse_ids=sparse_ids,
+ sparse_weights=sparse_weights,
+ combiner=self.combiner,
+ name='%s_weights' % self.name,
+ max_norm=self.max_norm)
+
+ def get_dense_tensor(self, transformation_cache, state_manager):
+ """Returns the embedding lookup result."""
+ if isinstance(self.categorical_column, SequenceCategoricalColumn):
+ raise ValueError(
+ 'In embedding_column: {}. '
+ 'categorical_column must not be of type SequenceCategoricalColumn. '
+ 'Suggested fix A: If you wish to use input_layer, use a '
+ 'non-sequence categorical_column_with_*. '
+ 'Suggested fix B: If you wish to create sequence input, use '
+ 'sequence_input_layer instead of input_layer. '
+ 'Given (type {}): {}'.format(self.name, type(self.categorical_column),
+ self.categorical_column))
+ return self._get_dense_tensor_internal(transformation_cache, state_manager)
+
+ def get_sequence_dense_tensor(self, transformation_cache, state_manager):
+ """See `SequenceDenseColumn` base class."""
+ if not isinstance(self.categorical_column, SequenceCategoricalColumn):
+ raise ValueError(
+ 'In embedding_column: {}. '
+ 'categorical_column must be of type SequenceCategoricalColumn '
+ 'to use sequence_input_layer. '
+ 'Suggested fix: Use one of sequence_categorical_column_with_*. '
+ 'Given (type {}): {}'.format(self.name, type(self.categorical_column),
+ self.categorical_column))
+ dense_tensor = self.get_dense_tensor_internal(transformation_cache,
+ state_manager)
+ sparse_tensors = self.categorical_column.get_sparse_tensors(
+ transformation_cache, state_manager)
+ sequence_length = _sequence_length_from_sparse_tensor(
+ sparse_tensors.id_tensor)
+ return SequenceDenseColumn.TensorSequenceLengthPair(
+ dense_tensor=dense_tensor, sequence_length=sequence_length)
+
+
+def _create_tuple(shape, value):
+ """Returns a tuple with given shape and filled with value."""
+ if shape:
+ return tuple([_create_tuple(shape[1:], value) for _ in range(shape[0])])
+ return value
+
+
+def _as_tuple(value):
+ if not nest.is_sequence(value):
+ return value
+ return tuple([_as_tuple(v) for v in value])
+
+
+def _check_shape(shape, key):
+ """Returns shape if it's valid, raises error otherwise."""
+ assert shape is not None
+ if not nest.is_sequence(shape):
+ shape = [shape]
+ shape = tuple(shape)
+ for dimension in shape:
+ if not isinstance(dimension, int):
+ raise TypeError('shape dimensions must be integer. '
+ 'shape: {}, key: {}'.format(shape, key))
+ if dimension < 1:
+ raise ValueError('shape dimensions must be greater than 0. '
+ 'shape: {}, key: {}'.format(shape, key))
+ return shape
+
+
+def _is_shape_and_default_value_compatible(default_value, shape):
+ """Verifies compatibility of shape and default_value."""
+ # Invalid condition:
+ # * if default_value is not a scalar and shape is empty
+ # * or if default_value is an iterable and shape is not empty
+ if nest.is_sequence(default_value) != bool(shape):
+ return False
+ if not shape:
+ return True
+ if len(default_value) != shape[0]:
+ return False
+ for i in range(shape[0]):
+ if not _is_shape_and_default_value_compatible(default_value[i], shape[1:]):
+ return False
+ return True
+
+
+def _check_default_value(shape, default_value, dtype, key):
+ """Returns default value as tuple if it's valid, otherwise raises errors.
+
+ This function verifies that `default_value` is compatible with both `shape`
+ and `dtype`. If it is not compatible, it raises an error. If it is compatible,
+ it casts default_value to a tuple and returns it. `key` is used only
+ for error message.
+
+ Args:
+ shape: An iterable of integers specifies the shape of the `Tensor`.
+ default_value: If a single value is provided, the same value will be applied
+ as the default value for every item. If an iterable of values is
+ provided, the shape of the `default_value` should be equal to the given
+ `shape`.
+ dtype: defines the type of values. Default value is `tf.float32`. Must be a
+ non-quantized, real integer or floating point type.
+ key: Column name, used only for error messages.
+
+ Returns:
+ A tuple which will be used as default value.
+
+ Raises:
+ TypeError: if `default_value` is an iterable but not compatible with `shape`
+ TypeError: if `default_value` is not compatible with `dtype`.
+ ValueError: if `dtype` is not convertible to `tf.float32`.
+ """
+ if default_value is None:
+ return None
+
+ if isinstance(default_value, int):
+ return _create_tuple(shape, default_value)
+
+ if isinstance(default_value, float) and dtype.is_floating:
+ return _create_tuple(shape, default_value)
+
+ if callable(getattr(default_value, 'tolist', None)): # Handles numpy arrays
+ default_value = default_value.tolist()
+
+ if nest.is_sequence(default_value):
+ if not _is_shape_and_default_value_compatible(default_value, shape):
+ raise ValueError(
+ 'The shape of default_value must be equal to given shape. '
+ 'default_value: {}, shape: {}, key: {}'.format(
+ default_value, shape, key))
+ # Check if the values in the list are all integers or are convertible to
+ # floats.
+ is_list_all_int = all(
+ isinstance(v, int) for v in nest.flatten(default_value))
+ is_list_has_float = any(
+ isinstance(v, float) for v in nest.flatten(default_value))
+ if is_list_all_int:
+ return _as_tuple(default_value)
+ if is_list_has_float and dtype.is_floating:
+ return _as_tuple(default_value)
+ raise TypeError('default_value must be compatible with dtype. '
+ 'default_value: {}, dtype: {}, key: {}'.format(
+ default_value, dtype, key))
+
+
+class HashedCategoricalColumn(
+ CategoricalColumn,
+ collections.namedtuple('HashedCategoricalColumn',
+ ('key', 'hash_bucket_size', 'dtype'))):
+ """see `categorical_column_with_hash_bucket`."""
+
+ @property
+ def name(self):
+ """See `FeatureColumn` base class."""
+ return self.key
+
+ @property
+ def parse_example_spec(self):
+ """See `FeatureColumn` base class."""
+ return {self.key: parsing_ops.VarLenFeature(self.dtype)}
+
+ def transform_feature(self, transformation_cache, state_manager):
+ """Hashes the values in the feature_column."""
+ input_tensor = _to_sparse_input_and_drop_ignore_values(
+ transformation_cache.get(self.key, state_manager))
+ if not isinstance(input_tensor, sparse_tensor_lib.SparseTensor):
+ raise ValueError('SparseColumn input must be a SparseTensor.')
+
+ _assert_string_or_int(
+ input_tensor.dtype,
+ prefix='column_name: {} input_tensor'.format(self.key))
+
+ if self.dtype.is_integer != input_tensor.dtype.is_integer:
+ raise ValueError(
+ 'Column dtype and SparseTensors dtype must be compatible. '
+ 'key: {}, column dtype: {}, tensor dtype: {}'.format(
+ self.key, self.dtype, input_tensor.dtype))
+
+ if self.dtype == dtypes.string:
+ sparse_values = input_tensor.values
+ else:
+ sparse_values = string_ops.as_string(input_tensor.values)
+
+ sparse_id_values = string_ops.string_to_hash_bucket_fast(
+ sparse_values, self.hash_bucket_size, name='lookup')
+ return sparse_tensor_lib.SparseTensor(
+ input_tensor.indices, sparse_id_values, input_tensor.dense_shape)
+
+ @property
+ def num_buckets(self):
+ """Returns number of buckets in this sparse feature."""
+ return self.hash_bucket_size
+
+ def get_sparse_tensors(self, transformation_cache, state_manager):
+ """See `CategoricalColumn` base class."""
+ return CategoricalColumn.IdWeightPair(
+ transformation_cache.get(self, state_manager), None)
+
+
+class VocabularyFileCategoricalColumn(
+ CategoricalColumn,
+ collections.namedtuple('VocabularyFileCategoricalColumn',
+ ('key', 'vocabulary_file', 'vocabulary_size',
+ 'num_oov_buckets', 'dtype', 'default_value'))):
+ """See `categorical_column_with_vocabulary_file`."""
+
+ @property
+ def name(self):
+ """See `FeatureColumn` base class."""
+ return self.key
+
+ @property
+ def parse_example_spec(self):
+ """See `FeatureColumn` base class."""
+ return {self.key: parsing_ops.VarLenFeature(self.dtype)}
+
+ def transform_feature(self, transformation_cache, state_manager):
+ """Creates a lookup table for the vocabulary."""
+ input_tensor = _to_sparse_input_and_drop_ignore_values(
+ transformation_cache.get(self.key, state_manager))
+
+ if self.dtype.is_integer != input_tensor.dtype.is_integer:
+ raise ValueError(
+ 'Column dtype and SparseTensors dtype must be compatible. '
+ 'key: {}, column dtype: {}, tensor dtype: {}'.format(
+ self.key, self.dtype, input_tensor.dtype))
+
+ _assert_string_or_int(
+ input_tensor.dtype,
+ prefix='column_name: {} input_tensor'.format(self.key))
+
+ key_dtype = self.dtype
+ if input_tensor.dtype.is_integer:
+ # `index_table_from_file` requires 64-bit integer keys.
+ key_dtype = dtypes.int64
+ input_tensor = math_ops.to_int64(input_tensor)
+
+ # TODO(rohanj): Use state manager to manage the index table creation.
+ return lookup_ops.index_table_from_file(
+ vocabulary_file=self.vocabulary_file,
+ num_oov_buckets=self.num_oov_buckets,
+ vocab_size=self.vocabulary_size,
+ default_value=self.default_value,
+ key_dtype=key_dtype,
+ name='{}_lookup'.format(self.key)).lookup(input_tensor)
+
+ @property
+ def num_buckets(self):
+ """Returns number of buckets in this sparse feature."""
+ return self.vocabulary_size + self.num_oov_buckets
+
+ def get_sparse_tensors(self, transformation_cache, state_manager):
+ """See `CategoricalColumn` base class."""
+ return CategoricalColumn.IdWeightPair(
+ transformation_cache.get(self, state_manager), None)
+
+
+class VocabularyListCategoricalColumn(
+ CategoricalColumn,
+ collections.namedtuple(
+ 'VocabularyListCategoricalColumn',
+ ('key', 'vocabulary_list', 'dtype', 'default_value', 'num_oov_buckets'))
+):
+ """See `categorical_column_with_vocabulary_list`."""
+
+ @property
+ def name(self):
+ """See `FeatureColumn` base class."""
+ return self.key
+
+ @property
+ def parse_example_spec(self):
+ """See `FeatureColumn` base class."""
+ return {self.key: parsing_ops.VarLenFeature(self.dtype)}
+
+ def transform_feature(self, transformation_cache, state_manager):
+ """Creates a lookup table for the vocabulary list."""
+ input_tensor = _to_sparse_input_and_drop_ignore_values(
+ transformation_cache.get(self.key, state_manager))
+
+ if self.dtype.is_integer != input_tensor.dtype.is_integer:
+ raise ValueError(
+ 'Column dtype and SparseTensors dtype must be compatible. '
+ 'key: {}, column dtype: {}, tensor dtype: {}'.format(
+ self.key, self.dtype, input_tensor.dtype))
+
+ _assert_string_or_int(
+ input_tensor.dtype,
+ prefix='column_name: {} input_tensor'.format(self.key))
+
+ key_dtype = self.dtype
+ if input_tensor.dtype.is_integer:
+ # `index_table_from_tensor` requires 64-bit integer keys.
+ key_dtype = dtypes.int64
+ input_tensor = math_ops.to_int64(input_tensor)
+
+ # TODO(rohanj): Use state manager to manage the index table creation.
+ return lookup_ops.index_table_from_tensor(
+ vocabulary_list=tuple(self.vocabulary_list),
+ default_value=self.default_value,
+ num_oov_buckets=self.num_oov_buckets,
+ dtype=key_dtype,
+ name='{}_lookup'.format(self.key)).lookup(input_tensor)
+
+ @property
+ def num_buckets(self):
+ """Returns number of buckets in this sparse feature."""
+ return len(self.vocabulary_list) + self.num_oov_buckets
+
+ def get_sparse_tensors(self, transformation_cache, state_manager):
+ """See `CategoricalColumn` base class."""
+ return CategoricalColumn.IdWeightPair(
+ transformation_cache.get(self, state_manager), None)
+
+
+class IdentityCategoricalColumn(
+ CategoricalColumn,
+ collections.namedtuple('IdentityCategoricalColumn',
+ ('key', 'number_buckets', 'default_value'))):
+
+ """See `categorical_column_with_identity`."""
+
+ @property
+ def name(self):
+ """See `FeatureColumn` base class."""
+ return self.key
+
+ @property
+ def parse_example_spec(self):
+ """See `FeatureColumn` base class."""
+ return {self.key: parsing_ops.VarLenFeature(dtypes.int64)}
+
+ def transform_feature(self, transformation_cache, state_manager):
+ """Returns a SparseTensor with identity values."""
+ input_tensor = _to_sparse_input_and_drop_ignore_values(
+ transformation_cache.get(self.key, state_manager))
+
+ if not input_tensor.dtype.is_integer:
+ raise ValueError(
+ 'Invalid input, not integer. key: {} dtype: {}'.format(
+ self.key, input_tensor.dtype))
+
+ values = math_ops.to_int64(input_tensor.values, name='values')
+ num_buckets = math_ops.to_int64(self.num_buckets, name='num_buckets')
+ zero = math_ops.to_int64(0, name='zero')
+ if self.default_value is None:
+ # Fail if values are out-of-range.
+ assert_less = check_ops.assert_less(
+ values, num_buckets, data=(values, num_buckets),
+ name='assert_less_than_num_buckets')
+ assert_greater = check_ops.assert_greater_equal(
+ values, zero, data=(values,),
+ name='assert_greater_or_equal_0')
+ with ops.control_dependencies((assert_less, assert_greater)):
+ values = array_ops.identity(values)
+ else:
+ # Assign default for out-of-range values.
+ values = array_ops.where(
+ math_ops.logical_or(
+ values < zero, values >= num_buckets, name='out_of_range'),
+ array_ops.fill(
+ dims=array_ops.shape(values),
+ value=math_ops.to_int64(self.default_value),
+ name='default_values'),
+ values)
+
+ return sparse_tensor_lib.SparseTensor(
+ indices=input_tensor.indices,
+ values=values,
+ dense_shape=input_tensor.dense_shape)
+
+ @property
+ def num_buckets(self):
+ """Returns number of buckets in this sparse feature."""
+ return self.number_buckets
+
+ def get_sparse_tensors(self, transformation_cache, state_manager):
+ """See `CategoricalColumn` base class."""
+ return CategoricalColumn.IdWeightPair(
+ transformation_cache.get(self, state_manager), None)
+
+
+class WeightedCategoricalColumn(
+ CategoricalColumn,
+ collections.namedtuple(
+ 'WeightedCategoricalColumn',
+ ('categorical_column', 'weight_feature_key', 'dtype'))):
+ """See `weighted_categorical_column`."""
+
+ @property
+ def name(self):
+ """See `FeatureColumn` base class."""
+ return '{}_weighted_by_{}'.format(
+ self.categorical_column.name, self.weight_feature_key)
+
+ @property
+ def parse_example_spec(self):
+ """See `FeatureColumn` base class."""
+ config = self.categorical_column.parse_example_spec
+ if self.weight_feature_key in config:
+ raise ValueError('Parse config {} already exists for {}.'.format(
+ config[self.weight_feature_key], self.weight_feature_key))
+ config[self.weight_feature_key] = parsing_ops.VarLenFeature(self.dtype)
+ return config
+
+ @property
+ def num_buckets(self):
+ """See `DenseColumn` base class."""
+ return self.categorical_column.num_buckets
+
+ def transform_feature(self, transformation_cache, state_manager):
+ """Applies weights to tensor generated from `categorical_column`'."""
+ weight_tensor = transformation_cache.get(self.weight_feature_key,
+ state_manager)
+ if weight_tensor is None:
+ raise ValueError('Missing weights {}.'.format(self.weight_feature_key))
+ weight_tensor = sparse_tensor_lib.convert_to_tensor_or_sparse_tensor(
+ weight_tensor)
+ if self.dtype != weight_tensor.dtype.base_dtype:
+ raise ValueError('Bad dtype, expected {}, but got {}.'.format(
+ self.dtype, weight_tensor.dtype))
+ if not isinstance(weight_tensor, sparse_tensor_lib.SparseTensor):
+ # The weight tensor can be a regular Tensor. In this case, sparsify it.
+ weight_tensor = _to_sparse_input_and_drop_ignore_values(
+ weight_tensor, ignore_value=0.0)
+ if not weight_tensor.dtype.is_floating:
+ weight_tensor = math_ops.to_float(weight_tensor)
+ return (transformation_cache.get(self.categorical_column, state_manager),
+ weight_tensor)
+
+ def get_sparse_tensors(self, transformation_cache, state_manager):
+ """See `CategoricalColumn` base class."""
+ tensors = transformation_cache.get(self, state_manager)
+ return CategoricalColumn.IdWeightPair(tensors[0], tensors[1])
+
+
+class CrossedColumn(
+ CategoricalColumn,
+ collections.namedtuple('CrossedColumn',
+ ('keys', 'hash_bucket_size', 'hash_key'))):
+ """See `crossed_column`."""
+
+ @property
+ def name(self):
+ """See `FeatureColumn` base class."""
+ feature_names = []
+ for key in _collect_leaf_level_keys(self):
+ if isinstance(key, FeatureColumn):
+ feature_names.append(key.name)
+ else: # key must be a string
+ feature_names.append(key)
+ return '_X_'.join(sorted(feature_names))
+
+ @property
+ def parse_example_spec(self):
+ """See `FeatureColumn` base class."""
+ config = {}
+ for key in self.keys:
+ if isinstance(key, FeatureColumn):
+ config.update(key.parse_example_spec)
+ else: # key must be a string
+ config.update({key: parsing_ops.VarLenFeature(dtypes.string)})
+ return config
+
+ def transform_feature(self, transformation_cache, state_manager):
+ """Generates a hashed sparse cross from the input tensors."""
+ feature_tensors = []
+ for key in _collect_leaf_level_keys(self):
+ if isinstance(key, six.string_types):
+ feature_tensors.append(transformation_cache.get(key, state_manager))
+ elif isinstance(key, CategoricalColumn):
+ ids_and_weights = key.get_sparse_tensors(transformation_cache,
+ state_manager)
+ if ids_and_weights.weight_tensor is not None:
+ raise ValueError(
+ 'crossed_column does not support weight_tensor, but the given '
+ 'column populates weight_tensor. '
+ 'Given column: {}'.format(key.name))
+ feature_tensors.append(ids_and_weights.id_tensor)
+ else:
+ raise ValueError('Unsupported column type. Given: {}'.format(key))
+ return sparse_ops.sparse_cross_hashed(
+ inputs=feature_tensors,
+ num_buckets=self.hash_bucket_size,
+ hash_key=self.hash_key)
+
+ @property
+ def num_buckets(self):
+ """Returns number of buckets in this sparse feature."""
+ return self.hash_bucket_size
+
+ def get_sparse_tensors(self, transformation_cache, state_manager):
+ """See `CategoricalColumn` base class."""
+ return CategoricalColumn.IdWeightPair(
+ transformation_cache.get(self, state_manager), None)
+
+
+def _collect_leaf_level_keys(cross):
+ """Collects base keys by expanding all nested crosses.
+
+ Args:
+ cross: A `CrossedColumn`.
+
+ Returns:
+ A list of strings or `CategoricalColumn` instances.
+ """
+ leaf_level_keys = []
+ for k in cross.keys:
+ if isinstance(k, CrossedColumn):
+ leaf_level_keys.extend(_collect_leaf_level_keys(k))
+ else:
+ leaf_level_keys.append(k)
+ return leaf_level_keys
+
+
+# TODO(zakaria): Move this to embedding_ops and make it public.
+def _safe_embedding_lookup_sparse(embedding_weights,
+ sparse_ids,
+ sparse_weights=None,
+ combiner='mean',
+ default_id=None,
+ name=None,
+ partition_strategy='div',
+ max_norm=None):
+ """Lookup embedding results, accounting for invalid IDs and empty features.
+
+ The partitioned embedding in `embedding_weights` must all be the same shape
+ except for the first dimension. The first dimension is allowed to vary as the
+ vocabulary size is not necessarily a multiple of `P`. `embedding_weights`
+ may be a `PartitionedVariable` as returned by using `tf.get_variable()` with a
+ partitioner.
+
+ Invalid IDs (< 0) are pruned from input IDs and weights, as well as any IDs
+ with non-positive weight. For an entry with no features, the embedding vector
+ for `default_id` is returned, or the 0-vector if `default_id` is not supplied.
+
+ The ids and weights may be multi-dimensional. Embeddings are always aggregated
+ along the last dimension.
+
+ Args:
+ embedding_weights: A list of `P` float `Tensor`s or values representing
+ partitioned embedding `Tensor`s. Alternatively, a `PartitionedVariable`
+ created by partitioning along dimension 0. The total unpartitioned
+ shape should be `[e_0, e_1, ..., e_m]`, where `e_0` represents the
+ vocab size and `e_1, ..., e_m` are the embedding dimensions.
+ sparse_ids: `SparseTensor` of shape `[d_0, d_1, ..., d_n]` containing the
+ ids. `d_0` is typically batch size.
+ sparse_weights: `SparseTensor` of same shape as `sparse_ids`, containing
+ float weights corresponding to `sparse_ids`, or `None` if all weights
+ are be assumed to be 1.0.
+ combiner: A string specifying how to combine embedding results for each
+ entry. Currently "mean", "sqrtn" and "sum" are supported, with "mean"
+ the default.
+ default_id: The id to use for an entry with no features.
+ name: A name for this operation (optional).
+ partition_strategy: A string specifying the partitioning strategy.
+ Currently `"div"` and `"mod"` are supported. Default is `"div"`.
+ max_norm: If not `None`, all embeddings are l2-normalized to max_norm before
+ combining.
+
+
+ Returns:
+ Dense `Tensor` of shape `[d_0, d_1, ..., d_{n-1}, e_1, ..., e_m]`.
+
+ Raises:
+ ValueError: if `embedding_weights` is empty.
+ """
+ if embedding_weights is None:
+ raise ValueError('Missing embedding_weights %s.' % embedding_weights)
+ if isinstance(embedding_weights, variables.PartitionedVariable):
+ embedding_weights = list(embedding_weights) # get underlying Variables.
+ if not isinstance(embedding_weights, list):
+ embedding_weights = [embedding_weights]
+ if len(embedding_weights) < 1:
+ raise ValueError('Missing embedding_weights %s.' % embedding_weights)
+
+ dtype = sparse_weights.dtype if sparse_weights is not None else None
+ embedding_weights = [
+ ops.convert_to_tensor(w, dtype=dtype) for w in embedding_weights
+ ]
+
+ with ops.name_scope(name, 'embedding_lookup',
+ embedding_weights + [sparse_ids,
+ sparse_weights]) as scope:
+ # Reshape higher-rank sparse ids and weights to linear segment ids.
+ original_shape = sparse_ids.dense_shape
+ original_rank_dim = sparse_ids.dense_shape.get_shape()[0]
+ original_rank = (
+ array_ops.size(original_shape)
+ if original_rank_dim.value is None
+ else original_rank_dim.value)
+ sparse_ids = sparse_ops.sparse_reshape(sparse_ids, [
+ math_ops.reduce_prod(
+ array_ops.slice(original_shape, [0], [original_rank - 1])),
+ array_ops.gather(original_shape, original_rank - 1)])
+ if sparse_weights is not None:
+ sparse_weights = sparse_tensor_lib.SparseTensor(
+ sparse_ids.indices,
+ sparse_weights.values, sparse_ids.dense_shape)
+
+ # Prune invalid ids and weights.
+ sparse_ids, sparse_weights = _prune_invalid_ids(sparse_ids, sparse_weights)
+ if combiner != 'sum':
+ sparse_ids, sparse_weights = _prune_invalid_weights(
+ sparse_ids, sparse_weights)
+
+ # Fill in dummy values for empty features, if necessary.
+ sparse_ids, is_row_empty = sparse_ops.sparse_fill_empty_rows(sparse_ids,
+ default_id or
+ 0)
+ if sparse_weights is not None:
+ sparse_weights, _ = sparse_ops.sparse_fill_empty_rows(sparse_weights, 1.0)
+
+ result = embedding_ops.embedding_lookup_sparse(
+ embedding_weights,
+ sparse_ids,
+ sparse_weights,
+ combiner=combiner,
+ partition_strategy=partition_strategy,
+ name=None if default_id is None else scope,
+ max_norm=max_norm)
+
+ if default_id is None:
+ # Broadcast is_row_empty to the same shape as embedding_lookup_result,
+ # for use in Select.
+ is_row_empty = array_ops.tile(
+ array_ops.reshape(is_row_empty, [-1, 1]),
+ array_ops.stack([1, array_ops.shape(result)[1]]))
+
+ result = array_ops.where(is_row_empty,
+ array_ops.zeros_like(result),
+ result,
+ name=scope)
+
+ # Reshape back from linear ids back into higher-dimensional dense result.
+ final_result = array_ops.reshape(
+ result,
+ array_ops.concat([
+ array_ops.slice(
+ math_ops.cast(original_shape, dtypes.int32), [0],
+ [original_rank - 1]),
+ array_ops.slice(array_ops.shape(result), [1], [-1])
+ ], 0))
+ final_result.set_shape(tensor_shape.unknown_shape(
+ (original_rank_dim - 1).value).concatenate(result.get_shape()[1:]))
+ return final_result
+
+
+def _prune_invalid_ids(sparse_ids, sparse_weights):
+ """Prune invalid IDs (< 0) from the input ids and weights."""
+ is_id_valid = math_ops.greater_equal(sparse_ids.values, 0)
+ if sparse_weights is not None:
+ is_id_valid = math_ops.logical_and(
+ is_id_valid,
+ array_ops.ones_like(sparse_weights.values, dtype=dtypes.bool))
+ sparse_ids = sparse_ops.sparse_retain(sparse_ids, is_id_valid)
+ if sparse_weights is not None:
+ sparse_weights = sparse_ops.sparse_retain(sparse_weights, is_id_valid)
+ return sparse_ids, sparse_weights
+
+
+def _prune_invalid_weights(sparse_ids, sparse_weights):
+ """Prune invalid weights (< 0) from the input ids and weights."""
+ if sparse_weights is not None:
+ is_weights_valid = math_ops.greater(sparse_weights.values, 0)
+ sparse_ids = sparse_ops.sparse_retain(sparse_ids, is_weights_valid)
+ sparse_weights = sparse_ops.sparse_retain(sparse_weights, is_weights_valid)
+ return sparse_ids, sparse_weights
+
+
+class IndicatorColumn(DenseColumn, SequenceDenseColumn,
+ collections.namedtuple('IndicatorColumn',
+ ('categorical_column'))):
+ """Represents a one-hot column for use in deep networks.
+
+ Args:
+ categorical_column: A `CategoricalColumn` which is created by
+ `categorical_column_with_*` function.
+ """
+
+ @property
+ def name(self):
+ """See `FeatureColumn` base class."""
+ return '{}_indicator'.format(self.categorical_column.name)
+
+ def transform_feature(self, transformation_cache, state_manager):
+ """Returns dense `Tensor` representing feature.
+
+ Args:
+ transformation_cache: A `FeatureTransformationCache` object to access
+ features.
+ state_manager: A `StateManager` to create / access resources such as
+ lookup tables.
+
+ Returns:
+ Transformed feature `Tensor`.
+
+ Raises:
+ ValueError: if input rank is not known at graph building time.
+ """
+ id_weight_pair = self.categorical_column.get_sparse_tensors(
+ transformation_cache, state_manager)
+ id_tensor = id_weight_pair.id_tensor
+ weight_tensor = id_weight_pair.weight_tensor
+
+ # If the underlying column is weighted, return the input as a dense tensor.
+ if weight_tensor is not None:
+ weighted_column = sparse_ops.sparse_merge(
+ sp_ids=id_tensor,
+ sp_values=weight_tensor,
+ vocab_size=int(self.variable_shape[-1]))
+ # Remove (?, -1) index
+ weighted_column = sparse_ops.sparse_slice(weighted_column, [0, 0],
+ weighted_column.dense_shape)
+ return sparse_ops.sparse_tensor_to_dense(weighted_column)
+
+ dense_id_tensor = sparse_ops.sparse_tensor_to_dense(
+ id_tensor, default_value=-1)
+
+ # One hot must be float for tf.concat reasons since all other inputs to
+ # input_layer are float32.
+ one_hot_id_tensor = array_ops.one_hot(
+ dense_id_tensor,
+ depth=self.variable_shape[-1],
+ on_value=1.0,
+ off_value=0.0)
+
+ # Reduce to get a multi-hot per example.
+ return math_ops.reduce_sum(one_hot_id_tensor, axis=[-2])
+
+ @property
+ def parse_example_spec(self):
+ """See `FeatureColumn` base class."""
+ return self.categorical_column.parse_example_spec
+
+ @property
+ def variable_shape(self):
+ """Returns a `TensorShape` representing the shape of the dense `Tensor`."""
+ return tensor_shape.TensorShape([1, self.categorical_column.num_buckets])
+
+ def get_dense_tensor(self, transformation_cache, state_manager):
+ """Returns dense `Tensor` representing feature.
+
+ Args:
+ transformation_cache: A `FeatureTransformationCache` object to access
+ features.
+ state_manager: A `StateManager` to create / access resources such as
+ lookup tables.
+
+ Returns:
+ Dense `Tensor` created within `transform_feature`.
+
+ Raises:
+ ValueError: If `categorical_column` is a `SequenceCategoricalColumn`.
+ """
+ if isinstance(self.categorical_column, SequenceCategoricalColumn):
+ raise ValueError(
+ 'In indicator_column: {}. '
+ 'categorical_column must not be of type SequenceCategoricalColumn. '
+ 'Suggested fix A: If you wish to use input_layer, use a '
+ 'non-sequence categorical_column_with_*. '
+ 'Suggested fix B: If you wish to create sequence input, use '
+ 'sequence_input_layer instead of input_layer. '
+ 'Given (type {}): {}'.format(self.name, type(self.categorical_column),
+ self.categorical_column))
+ # Feature has been already transformed. Return the intermediate
+ # representation created by transform_feature.
+ return transformation_cache.get(self, state_manager)
+
+ def get_sequence_dense_tensor(self, transformation_cache, state_manager):
+ """See `SequenceDenseColumn` base class."""
+ if not isinstance(self.categorical_column, SequenceCategoricalColumn):
+ raise ValueError(
+ 'In indicator_column: {}. '
+ 'categorical_column must be of type SequenceCategoricalColumn '
+ 'to use sequence_input_layer. '
+ 'Suggested fix: Use one of sequence_categorical_column_with_*. '
+ 'Given (type {}): {}'.format(self.name, type(self.categorical_column),
+ self.categorical_column))
+ # Feature has been already transformed. Return the intermediate
+ # representation created by transform_feature.
+ dense_tensor = transformation_cache.get(self, state_manager)
+ sparse_tensors = self.categorical_column.get_sparse_tensors(
+ transformation_cache, state_manager)
+ sequence_length = _sequence_length_from_sparse_tensor(
+ sparse_tensors.id_tensor)
+ return SequenceDenseColumn.TensorSequenceLengthPair(
+ dense_tensor=dense_tensor, sequence_length=sequence_length)
+
+
+def _verify_static_batch_size_equality(tensors, columns):
+ # bath_size is a tf.Dimension object.
+ expected_batch_size = None
+ for i in range(0, len(tensors)):
+ if tensors[i].shape[0].value is not None:
+ if expected_batch_size is None:
+ bath_size_column_index = i
+ expected_batch_size = tensors[i].shape[0]
+ elif not expected_batch_size.is_compatible_with(tensors[i].shape[0]):
+ raise ValueError(
+ 'Batch size (first dimension) of each feature must be same. '
+ 'Batch size of columns ({}, {}): ({}, {})'.format(
+ columns[bath_size_column_index].name, columns[i].name,
+ expected_batch_size, tensors[i].shape[0]))
+
+
+def _sequence_length_from_sparse_tensor(sp_tensor, num_elements=1):
+ """Returns a [batch_size] Tensor with per-example sequence length."""
+ with ops.name_scope(None, 'sequence_length') as name_scope:
+ row_ids = sp_tensor.indices[:, 0]
+ column_ids = sp_tensor.indices[:, 1]
+ column_ids += array_ops.ones_like(column_ids)
+ seq_length = math_ops.to_int64(
+ math_ops.segment_max(column_ids, segment_ids=row_ids) / num_elements)
+ # If the last n rows do not have ids, seq_length will have shape
+ # [batch_size - n]. Pad the remaining values with zeros.
+ n_pad = array_ops.shape(sp_tensor)[:1] - array_ops.shape(seq_length)[:1]
+ padding = array_ops.zeros(n_pad, dtype=seq_length.dtype)
+ return array_ops.concat([seq_length, padding], axis=0, name=name_scope)
+
+
+class SequenceCategoricalColumn(FeatureColumn,
+ collections.namedtuple(
+ 'SequenceCategoricalColumn',
+ ('categorical_column'))):
+ """Represents sequences of categorical data."""
+
+ @property
+ def name(self):
+ """See `FeatureColumn` base class."""
+ return self.categorical_column.name
+
+ @property
+ def parse_example_spec(self):
+ """See `FeatureColumn` base class."""
+ return self.categorical_column.parse_example_spec
+
+ def transform_feature(self, transformation_cache, state_manager):
+ """See `FeatureColumn` base class."""
+ return self.categorical_column.transform_feature(transformation_cache,
+ state_manager)
+
+ @property
+ def num_buckets(self):
+ """Returns number of buckets in this sparse feature."""
+ return self.categorical_column.num_buckets
+
+ def get_sequence_sparse_tensors(self, transformation_cache, state_manager):
+ """Returns an IdWeightPair.
+
+ `IdWeightPair` is a pair of `SparseTensor`s which represents ids and
+ weights.
+
+ `IdWeightPair.id_tensor` is typically a `batch_size` x `num_buckets`
+ `SparseTensor` of `int64`. `IdWeightPair.weight_tensor` is either a
+ `SparseTensor` of `float` or `None` to indicate all weights should be
+ taken to be 1. If specified, `weight_tensor` must have exactly the same
+ shape and indices as `sp_ids`. Expected `SparseTensor` is same as parsing
+ output of a `VarLenFeature` which is a ragged matrix.
+
+ Args:
+ transformation_cache: A `FeatureTransformationCache` object to access
+ features.
+ state_manager: A `StateManager` to create / access resources such as
+ lookup tables.
+ """
+ sparse_tensors = self.categorical_column.get_sparse_tensors(
+ transformation_cache, state_manager)
+ id_tensor = sparse_tensors.id_tensor
+ weight_tensor = sparse_tensors.weight_tensor
+ # Expands final dimension, so that embeddings are not combined during
+ # embedding lookup.
+ check_id_rank = check_ops.assert_equal(
+ array_ops.rank(id_tensor), 2,
+ data=[
+ 'Column {} expected ID tensor of rank 2. '.format(self.name),
+ 'id_tensor shape: ', array_ops.shape(id_tensor)])
+ with ops.control_dependencies([check_id_rank]):
+ id_tensor = sparse_ops.sparse_reshape(
+ id_tensor,
+ shape=array_ops.concat([id_tensor.dense_shape, [1]], axis=0))
+ if weight_tensor is not None:
+ check_weight_rank = check_ops.assert_equal(
+ array_ops.rank(weight_tensor), 2,
+ data=[
+ 'Column {} expected weight tensor of rank 2.'.format(self.name),
+ 'weight_tensor shape:', array_ops.shape(weight_tensor)])
+ with ops.control_dependencies([check_weight_rank]):
+ weight_tensor = sparse_ops.sparse_reshape(
+ weight_tensor,
+ shape=array_ops.concat([weight_tensor.dense_shape, [1]], axis=0))
+ return CategoricalColumn.IdWeightPair(id_tensor, weight_tensor)
diff --git a/tensorflow/python/feature_column/feature_column_v2_test.py b/tensorflow/python/feature_column/feature_column_v2_test.py
new file mode 100644
index 0000000000..80a9d5d40e
--- /dev/null
+++ b/tensorflow/python/feature_column/feature_column_v2_test.py
@@ -0,0 +1,6583 @@
+# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""Tests for feature_column."""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import collections
+import copy
+
+import numpy as np
+
+from tensorflow.core.example import example_pb2
+from tensorflow.core.example import feature_pb2
+from tensorflow.core.protobuf import config_pb2
+from tensorflow.core.protobuf import rewriter_config_pb2
+from tensorflow.python.client import session
+from tensorflow.python.eager import backprop
+from tensorflow.python.eager import context
+from tensorflow.python.estimator.inputs import numpy_io
+from tensorflow.python.feature_column import feature_column as fc_old
+from tensorflow.python.feature_column import feature_column_v2 as fc
+from tensorflow.python.feature_column.feature_column_v2 import FeatureColumn
+from tensorflow.python.feature_column.feature_column_v2 import FeatureTransformationCache
+from tensorflow.python.feature_column.feature_column_v2 import InputLayer
+from tensorflow.python.feature_column.feature_column_v2 import StateManager
+from tensorflow.python.feature_column.feature_column_v2 import _LinearModel
+from tensorflow.python.feature_column.feature_column_v2 import _transform_features
+from tensorflow.python.framework import constant_op
+from tensorflow.python.framework import dtypes
+from tensorflow.python.framework import errors
+from tensorflow.python.framework import ops
+from tensorflow.python.framework import sparse_tensor
+from tensorflow.python.framework import test_util
+from tensorflow.python.ops import array_ops
+from tensorflow.python.ops import lookup_ops
+from tensorflow.python.ops import parsing_ops
+from tensorflow.python.ops import partitioned_variables
+from tensorflow.python.ops import variable_scope
+from tensorflow.python.ops import variables as variables_lib
+from tensorflow.python.platform import test
+from tensorflow.python.training import coordinator
+from tensorflow.python.training import queue_runner_impl
+
+
+def _initialized_session(config=None):
+ sess = session.Session(config=config)
+ sess.run(variables_lib.global_variables_initializer())
+ sess.run(lookup_ops.tables_initializer())
+ return sess
+
+
+class LazyColumnTest(test.TestCase):
+
+ def test_transformations_called_once(self):
+
+ class TransformCounter(FeatureColumn):
+
+ def __init__(self):
+ self.num_transform = 0
+
+ @property
+ def name(self):
+ return 'TransformCounter'
+
+ def transform_feature(self, transformation_cache, state_manager):
+ self.num_transform += 1 # Count transform calls.
+ return transformation_cache.get('a', state_manager)
+
+ @property
+ def parse_example_spec(self):
+ pass
+
+ transformation_cache = FeatureTransformationCache(
+ features={'a': [[2], [3.]]})
+ column = TransformCounter()
+ self.assertEqual(0, column.num_transform)
+ transformation_cache.get(column, None)
+ self.assertEqual(1, column.num_transform)
+ transformation_cache.get(column, None)
+ self.assertEqual(1, column.num_transform)
+
+ def test_returns_transform_output(self):
+
+ class Transformer(FeatureColumn):
+
+ @property
+ def name(self):
+ return 'Transformer'
+
+ def transform_feature(self, transformation_cache, state_manager):
+ return 'Output'
+
+ @property
+ def parse_example_spec(self):
+ pass
+
+ transformation_cache = FeatureTransformationCache(
+ features={'a': [[2], [3.]]})
+ column = Transformer()
+ self.assertEqual('Output', transformation_cache.get(column, None))
+ self.assertEqual('Output', transformation_cache.get(column, None))
+
+ def test_does_not_pollute_given_features_dict(self):
+
+ class Transformer(FeatureColumn):
+
+ @property
+ def name(self):
+ return 'Transformer'
+
+ def transform_feature(self, transformation_cache, state_manager):
+ return 'Output'
+
+ @property
+ def parse_example_spec(self):
+ pass
+
+ features = {'a': [[2], [3.]]}
+ transformation_cache = FeatureTransformationCache(features=features)
+ transformation_cache.get(Transformer(), None)
+ self.assertEqual(['a'], list(features.keys()))
+
+ def test_error_if_feature_is_not_found(self):
+ transformation_cache = FeatureTransformationCache(
+ features={'a': [[2], [3.]]})
+ with self.assertRaisesRegexp(ValueError,
+ 'bbb is not in features dictionary'):
+ transformation_cache.get('bbb', None)
+ with self.assertRaisesRegexp(ValueError,
+ 'bbb is not in features dictionary'):
+ transformation_cache.get(u'bbb', None)
+
+ def test_not_supported_feature_column(self):
+
+ class NotAProperColumn(FeatureColumn):
+
+ @property
+ def name(self):
+ return 'NotAProperColumn'
+
+ def transform_feature(self, transformation_cache, state_manager):
+ # It should return not None.
+ pass
+
+ @property
+ def parse_example_spec(self):
+ pass
+
+ transformation_cache = FeatureTransformationCache(
+ features={'a': [[2], [3.]]})
+ with self.assertRaisesRegexp(ValueError,
+ 'NotAProperColumn is not supported'):
+ transformation_cache.get(NotAProperColumn(), None)
+
+ def test_key_should_be_string_or_feature_colum(self):
+
+ class NotAFeatureColumn(object):
+ pass
+
+ transformation_cache = FeatureTransformationCache(
+ features={'a': [[2], [3.]]})
+ with self.assertRaisesRegexp(
+ TypeError, '"key" must be either a "str" or "FeatureColumn".'):
+ transformation_cache.get(NotAFeatureColumn(), None)
+
+
+class NumericColumnTest(test.TestCase):
+
+ def test_defaults(self):
+ a = fc.numeric_column('aaa')
+ self.assertEqual('aaa', a.key)
+ self.assertEqual('aaa', a.name)
+ self.assertEqual((1,), a.shape)
+ self.assertIsNone(a.default_value)
+ self.assertEqual(dtypes.float32, a.dtype)
+ self.assertIsNone(a.normalizer_fn)
+
+ def test_key_should_be_string(self):
+ with self.assertRaisesRegexp(ValueError, 'key must be a string.'):
+ fc.numeric_column(key=('aaa',))
+
+ def test_shape_saved_as_tuple(self):
+ a = fc.numeric_column('aaa', shape=[1, 2], default_value=[[3, 2.]])
+ self.assertEqual((1, 2), a.shape)
+
+ def test_default_value_saved_as_tuple(self):
+ a = fc.numeric_column('aaa', default_value=4.)
+ self.assertEqual((4.,), a.default_value)
+ a = fc.numeric_column('aaa', shape=[1, 2], default_value=[[3, 2.]])
+ self.assertEqual(((3., 2.),), a.default_value)
+
+ def test_shape_and_default_value_compatibility(self):
+ fc.numeric_column('aaa', shape=[2], default_value=[1, 2.])
+ with self.assertRaisesRegexp(ValueError, 'The shape of default_value'):
+ fc.numeric_column('aaa', shape=[2], default_value=[1, 2, 3.])
+ fc.numeric_column(
+ 'aaa', shape=[3, 2], default_value=[[2, 3], [1, 2], [2, 3.]])
+ with self.assertRaisesRegexp(ValueError, 'The shape of default_value'):
+ fc.numeric_column(
+ 'aaa', shape=[3, 1], default_value=[[2, 3], [1, 2], [2, 3.]])
+ with self.assertRaisesRegexp(ValueError, 'The shape of default_value'):
+ fc.numeric_column(
+ 'aaa', shape=[3, 3], default_value=[[2, 3], [1, 2], [2, 3.]])
+
+ def test_default_value_type_check(self):
+ fc.numeric_column(
+ 'aaa', shape=[2], default_value=[1, 2.], dtype=dtypes.float32)
+ fc.numeric_column(
+ 'aaa', shape=[2], default_value=[1, 2], dtype=dtypes.int32)
+ with self.assertRaisesRegexp(TypeError, 'must be compatible with dtype'):
+ fc.numeric_column(
+ 'aaa', shape=[2], default_value=[1, 2.], dtype=dtypes.int32)
+ with self.assertRaisesRegexp(TypeError,
+ 'default_value must be compatible with dtype'):
+ fc.numeric_column('aaa', default_value=['string'])
+
+ def test_shape_must_be_positive_integer(self):
+ with self.assertRaisesRegexp(TypeError, 'shape dimensions must be integer'):
+ fc.numeric_column(
+ 'aaa', shape=[
+ 1.0,
+ ])
+
+ with self.assertRaisesRegexp(ValueError,
+ 'shape dimensions must be greater than 0'):
+ fc.numeric_column(
+ 'aaa', shape=[
+ 0,
+ ])
+
+ def test_dtype_is_convertible_to_float(self):
+ with self.assertRaisesRegexp(ValueError,
+ 'dtype must be convertible to float'):
+ fc.numeric_column('aaa', dtype=dtypes.string)
+
+ def test_scalar_default_value_fills_the_shape(self):
+ a = fc.numeric_column('aaa', shape=[2, 3], default_value=2.)
+ self.assertEqual(((2., 2., 2.), (2., 2., 2.)), a.default_value)
+
+ def test_parse_spec(self):
+ a = fc.numeric_column('aaa', shape=[2, 3], dtype=dtypes.int32)
+ self.assertEqual({
+ 'aaa': parsing_ops.FixedLenFeature((2, 3), dtype=dtypes.int32)
+ }, a.parse_example_spec)
+
+ def test_parse_example_no_default_value(self):
+ price = fc.numeric_column('price', shape=[2])
+ data = example_pb2.Example(features=feature_pb2.Features(
+ feature={
+ 'price':
+ feature_pb2.Feature(float_list=feature_pb2.FloatList(
+ value=[20., 110.]))
+ }))
+ features = parsing_ops.parse_example(
+ serialized=[data.SerializeToString()],
+ features=fc.make_parse_example_spec([price]))
+ self.assertIn('price', features)
+ with self.test_session():
+ self.assertAllEqual([[20., 110.]], features['price'].eval())
+
+ def test_parse_example_with_default_value(self):
+ price = fc.numeric_column('price', shape=[2], default_value=11.)
+ data = example_pb2.Example(features=feature_pb2.Features(
+ feature={
+ 'price':
+ feature_pb2.Feature(float_list=feature_pb2.FloatList(
+ value=[20., 110.]))
+ }))
+ no_data = example_pb2.Example(features=feature_pb2.Features(
+ feature={
+ 'something_else':
+ feature_pb2.Feature(float_list=feature_pb2.FloatList(
+ value=[20., 110.]))
+ }))
+ features = parsing_ops.parse_example(
+ serialized=[data.SerializeToString(),
+ no_data.SerializeToString()],
+ features=fc.make_parse_example_spec([price]))
+ self.assertIn('price', features)
+ with self.test_session():
+ self.assertAllEqual([[20., 110.], [11., 11.]], features['price'].eval())
+
+ def test_normalizer_fn_must_be_callable(self):
+ with self.assertRaisesRegexp(TypeError, 'must be a callable'):
+ fc.numeric_column('price', normalizer_fn='NotACallable')
+
+ def test_normalizer_fn_transform_feature(self):
+
+ def _increment_two(input_tensor):
+ return input_tensor + 2.
+
+ price = fc.numeric_column('price', shape=[2], normalizer_fn=_increment_two)
+ output = _transform_features({'price': [[1., 2.], [5., 6.]]}, [price], None)
+ with self.test_session():
+ self.assertAllEqual([[3., 4.], [7., 8.]], output[price].eval())
+
+ def test_get_dense_tensor(self):
+
+ def _increment_two(input_tensor):
+ return input_tensor + 2.
+
+ price = fc.numeric_column('price', shape=[2], normalizer_fn=_increment_two)
+ transformation_cache = FeatureTransformationCache({
+ 'price': [[1., 2.], [5., 6.]]
+ })
+ self.assertEqual(
+ transformation_cache.get(price, None),
+ price.get_dense_tensor(transformation_cache, None))
+
+ def test_sparse_tensor_not_supported(self):
+ price = fc.numeric_column('price')
+ transformation_cache = FeatureTransformationCache({
+ 'price':
+ sparse_tensor.SparseTensor(
+ indices=[[0, 0]], values=[0.3], dense_shape=[1, 1])
+ })
+ with self.assertRaisesRegexp(ValueError, 'must be a Tensor'):
+ price.transform_feature(transformation_cache, None)
+
+ def test_deep_copy(self):
+ a = fc.numeric_column('aaa', shape=[1, 2], default_value=[[3., 2.]])
+ a_copy = copy.deepcopy(a)
+ self.assertEqual(a_copy.name, 'aaa')
+ self.assertEqual(a_copy.shape, (1, 2))
+ self.assertEqual(a_copy.default_value, ((3., 2.),))
+
+ def test_numpy_default_value(self):
+ a = fc.numeric_column(
+ 'aaa', shape=[1, 2], default_value=np.array([[3., 2.]]))
+ self.assertEqual(a.default_value, ((3., 2.),))
+
+ def test_linear_model(self):
+ price = fc_old.numeric_column('price')
+ with ops.Graph().as_default():
+ features = {'price': [[1.], [5.]]}
+ predictions = fc.linear_model(features, [price])
+ bias = get_linear_model_bias()
+ price_var = get_linear_model_column_var(price)
+ with _initialized_session() as sess:
+ self.assertAllClose([0.], bias.eval())
+ self.assertAllClose([[0.]], price_var.eval())
+ self.assertAllClose([[0.], [0.]], predictions.eval())
+ sess.run(price_var.assign([[10.]]))
+ self.assertAllClose([[10.], [50.]], predictions.eval())
+
+ def test_keras_linear_model(self):
+ price = fc_old.numeric_column('price')
+ with ops.Graph().as_default():
+ features = {'price': [[1.], [5.]]}
+ predictions = get_keras_linear_model_predictions(features, [price])
+ bias = get_linear_model_bias()
+ price_var = get_linear_model_column_var(price)
+ with _initialized_session() as sess:
+ self.assertAllClose([0.], bias.eval())
+ self.assertAllClose([[0.]], price_var.eval())
+ self.assertAllClose([[0.], [0.]], predictions.eval())
+ sess.run(price_var.assign([[10.]]))
+ self.assertAllClose([[10.], [50.]], predictions.eval())
+
+
+class BucketizedColumnTest(test.TestCase):
+
+ def test_invalid_source_column_type(self):
+ a = fc.categorical_column_with_hash_bucket('aaa', hash_bucket_size=10)
+ with self.assertRaisesRegexp(
+ ValueError,
+ 'source_column must be a column generated with numeric_column'):
+ fc.bucketized_column(a, boundaries=[0, 1])
+
+ def test_invalid_source_column_shape(self):
+ a = fc.numeric_column('aaa', shape=[2, 3])
+ with self.assertRaisesRegexp(
+ ValueError, 'source_column must be one-dimensional column'):
+ fc.bucketized_column(a, boundaries=[0, 1])
+
+ def test_invalid_boundaries(self):
+ a = fc.numeric_column('aaa')
+ with self.assertRaisesRegexp(
+ ValueError, 'boundaries must be a sorted list'):
+ fc.bucketized_column(a, boundaries=None)
+ with self.assertRaisesRegexp(
+ ValueError, 'boundaries must be a sorted list'):
+ fc.bucketized_column(a, boundaries=1.)
+ with self.assertRaisesRegexp(
+ ValueError, 'boundaries must be a sorted list'):
+ fc.bucketized_column(a, boundaries=[1, 0])
+ with self.assertRaisesRegexp(
+ ValueError, 'boundaries must be a sorted list'):
+ fc.bucketized_column(a, boundaries=[1, 1])
+
+ def test_name(self):
+ a = fc.numeric_column('aaa', dtype=dtypes.int32)
+ b = fc.bucketized_column(a, boundaries=[0, 1])
+ self.assertEqual('aaa_bucketized', b.name)
+
+ def test_parse_spec(self):
+ a = fc.numeric_column('aaa', shape=[2], dtype=dtypes.int32)
+ b = fc.bucketized_column(a, boundaries=[0, 1])
+ self.assertEqual({
+ 'aaa': parsing_ops.FixedLenFeature((2,), dtype=dtypes.int32)
+ }, b.parse_example_spec)
+
+ def test_variable_shape(self):
+ a = fc.numeric_column('aaa', shape=[2], dtype=dtypes.int32)
+ b = fc.bucketized_column(a, boundaries=[0, 1])
+ # Column 'aaa` has shape [2] times three buckets -> variable_shape=[2, 3].
+ self.assertAllEqual((2, 3), b.variable_shape)
+
+ def test_num_buckets(self):
+ a = fc.numeric_column('aaa', shape=[2], dtype=dtypes.int32)
+ b = fc.bucketized_column(a, boundaries=[0, 1])
+ # Column 'aaa` has shape [2] times three buckets -> num_buckets=6.
+ self.assertEqual(6, b.num_buckets)
+
+ def test_parse_example(self):
+ price = fc.numeric_column('price', shape=[2])
+ bucketized_price = fc.bucketized_column(price, boundaries=[0, 50])
+ data = example_pb2.Example(features=feature_pb2.Features(
+ feature={
+ 'price':
+ feature_pb2.Feature(float_list=feature_pb2.FloatList(
+ value=[20., 110.]))
+ }))
+ features = parsing_ops.parse_example(
+ serialized=[data.SerializeToString()],
+ features=fc.make_parse_example_spec([bucketized_price]))
+ self.assertIn('price', features)
+ with self.test_session():
+ self.assertAllEqual([[20., 110.]], features['price'].eval())
+
+ def test_transform_feature(self):
+ price = fc.numeric_column('price', shape=[2])
+ bucketized_price = fc.bucketized_column(price, boundaries=[0, 2, 4, 6])
+ with ops.Graph().as_default():
+ transformed_tensor = _transform_features({
+ 'price': [[-1., 1.], [5., 6.]]
+ }, [bucketized_price], None)
+ with _initialized_session():
+ self.assertAllEqual([[0, 1], [3, 4]],
+ transformed_tensor[bucketized_price].eval())
+
+ def test_get_dense_tensor_one_input_value(self):
+ """Tests _get_dense_tensor() for input with shape=[1]."""
+ price = fc.numeric_column('price', shape=[1])
+ bucketized_price = fc.bucketized_column(price, boundaries=[0, 2, 4, 6])
+ with ops.Graph().as_default():
+ transformation_cache = FeatureTransformationCache({
+ 'price': [[-1.], [1.], [5.], [6.]]
+ })
+ with _initialized_session():
+ bucketized_price_tensor = bucketized_price.get_dense_tensor(
+ transformation_cache, None)
+ self.assertAllClose(
+ # One-hot tensor.
+ [[[1., 0., 0., 0., 0.]],
+ [[0., 1., 0., 0., 0.]],
+ [[0., 0., 0., 1., 0.]],
+ [[0., 0., 0., 0., 1.]]],
+ bucketized_price_tensor.eval())
+
+ def test_get_dense_tensor_two_input_values(self):
+ """Tests _get_dense_tensor() for input with shape=[2]."""
+ price = fc.numeric_column('price', shape=[2])
+ bucketized_price = fc.bucketized_column(price, boundaries=[0, 2, 4, 6])
+ with ops.Graph().as_default():
+ transformation_cache = FeatureTransformationCache({
+ 'price': [[-1., 1.], [5., 6.]]
+ })
+ with _initialized_session():
+ bucketized_price_tensor = bucketized_price.get_dense_tensor(
+ transformation_cache, None)
+ self.assertAllClose(
+ # One-hot tensor.
+ [[[1., 0., 0., 0., 0.], [0., 1., 0., 0., 0.]],
+ [[0., 0., 0., 1., 0.], [0., 0., 0., 0., 1.]]],
+ bucketized_price_tensor.eval())
+
+ def test_get_sparse_tensors_one_input_value(self):
+ """Tests _get_sparse_tensors() for input with shape=[1]."""
+ price = fc.numeric_column('price', shape=[1])
+ bucketized_price = fc.bucketized_column(price, boundaries=[0, 2, 4, 6])
+ with ops.Graph().as_default():
+ transformation_cache = FeatureTransformationCache({
+ 'price': [[-1.], [1.], [5.], [6.]]
+ })
+ with _initialized_session() as sess:
+ id_weight_pair = bucketized_price.get_sparse_tensors(
+ transformation_cache, None)
+ self.assertIsNone(id_weight_pair.weight_tensor)
+ id_tensor_value = sess.run(id_weight_pair.id_tensor)
+ self.assertAllEqual(
+ [[0, 0], [1, 0], [2, 0], [3, 0]], id_tensor_value.indices)
+ self.assertAllEqual([0, 1, 3, 4], id_tensor_value.values)
+ self.assertAllEqual([4, 1], id_tensor_value.dense_shape)
+
+ def test_get_sparse_tensors_two_input_values(self):
+ """Tests _get_sparse_tensors() for input with shape=[2]."""
+ price = fc.numeric_column('price', shape=[2])
+ bucketized_price = fc.bucketized_column(price, boundaries=[0, 2, 4, 6])
+ with ops.Graph().as_default():
+ transformation_cache = FeatureTransformationCache({
+ 'price': [[-1., 1.], [5., 6.]]
+ })
+ with _initialized_session() as sess:
+ id_weight_pair = bucketized_price.get_sparse_tensors(
+ transformation_cache, None)
+ self.assertIsNone(id_weight_pair.weight_tensor)
+ id_tensor_value = sess.run(id_weight_pair.id_tensor)
+ self.assertAllEqual(
+ [[0, 0], [0, 1], [1, 0], [1, 1]], id_tensor_value.indices)
+ # Values 0-4 correspond to the first column of the input price.
+ # Values 5-9 correspond to the second column of the input price.
+ self.assertAllEqual([0, 6, 3, 9], id_tensor_value.values)
+ self.assertAllEqual([2, 2], id_tensor_value.dense_shape)
+
+ def test_sparse_tensor_input_not_supported(self):
+ price = fc.numeric_column('price')
+ bucketized_price = fc.bucketized_column(price, boundaries=[0, 1])
+ transformation_cache = FeatureTransformationCache({
+ 'price':
+ sparse_tensor.SparseTensor(
+ indices=[[0, 0]], values=[0.3], dense_shape=[1, 1])
+ })
+ with self.assertRaisesRegexp(ValueError, 'must be a Tensor'):
+ bucketized_price.transform_feature(transformation_cache, None)
+
+ def test_deep_copy(self):
+ a = fc.numeric_column('aaa', shape=[2])
+ a_bucketized = fc.bucketized_column(a, boundaries=[0, 1])
+ a_bucketized_copy = copy.deepcopy(a_bucketized)
+ self.assertEqual(a_bucketized_copy.name, 'aaa_bucketized')
+ self.assertAllEqual(a_bucketized_copy.variable_shape, (2, 3))
+ self.assertEqual(a_bucketized_copy.boundaries, (0, 1))
+
+ def test_linear_model_one_input_value(self):
+ """Tests linear_model() for input with shape=[1]."""
+ price = fc_old.numeric_column('price', shape=[1])
+ bucketized_price = fc_old.bucketized_column(price, boundaries=[0, 2, 4, 6])
+ with ops.Graph().as_default():
+ features = {'price': [[-1.], [1.], [5.], [6.]]}
+ predictions = fc.linear_model(features, [bucketized_price])
+ bias = get_linear_model_bias()
+ bucketized_price_var = get_linear_model_column_var(bucketized_price)
+ with _initialized_session() as sess:
+ self.assertAllClose([0.], bias.eval())
+ # One weight variable per bucket, all initialized to zero.
+ self.assertAllClose(
+ [[0.], [0.], [0.], [0.], [0.]], bucketized_price_var.eval())
+ self.assertAllClose([[0.], [0.], [0.], [0.]], predictions.eval())
+ sess.run(bucketized_price_var.assign(
+ [[10.], [20.], [30.], [40.], [50.]]))
+ # price -1. is in the 0th bucket, whose weight is 10.
+ # price 1. is in the 1st bucket, whose weight is 20.
+ # price 5. is in the 3rd bucket, whose weight is 40.
+ # price 6. is in the 4th bucket, whose weight is 50.
+ self.assertAllClose([[10.], [20.], [40.], [50.]], predictions.eval())
+ sess.run(bias.assign([1.]))
+ self.assertAllClose([[11.], [21.], [41.], [51.]], predictions.eval())
+
+ def test_linear_model_two_input_values(self):
+ """Tests linear_model() for input with shape=[2]."""
+ price = fc_old.numeric_column('price', shape=[2])
+ bucketized_price = fc_old.bucketized_column(price, boundaries=[0, 2, 4, 6])
+ with ops.Graph().as_default():
+ features = {'price': [[-1., 1.], [5., 6.]]}
+ predictions = fc.linear_model(features, [bucketized_price])
+ bias = get_linear_model_bias()
+ bucketized_price_var = get_linear_model_column_var(bucketized_price)
+ with _initialized_session() as sess:
+ self.assertAllClose([0.], bias.eval())
+ # One weight per bucket per input column, all initialized to zero.
+ self.assertAllClose(
+ [[0.], [0.], [0.], [0.], [0.], [0.], [0.], [0.], [0.], [0.]],
+ bucketized_price_var.eval())
+ self.assertAllClose([[0.], [0.]], predictions.eval())
+ sess.run(bucketized_price_var.assign(
+ [[10.], [20.], [30.], [40.], [50.],
+ [60.], [70.], [80.], [90.], [100.]]))
+ # 1st example:
+ # price -1. is in the 0th bucket, whose weight is 10.
+ # price 1. is in the 6th bucket, whose weight is 70.
+ # 2nd example:
+ # price 5. is in the 3rd bucket, whose weight is 40.
+ # price 6. is in the 9th bucket, whose weight is 100.
+ self.assertAllClose([[80.], [140.]], predictions.eval())
+ sess.run(bias.assign([1.]))
+ self.assertAllClose([[81.], [141.]], predictions.eval())
+
+ def test_keras_linear_model_one_input_value(self):
+ """Tests _LinearModel for input with shape=[1]."""
+ price = fc_old.numeric_column('price', shape=[1])
+ bucketized_price = fc_old.bucketized_column(price, boundaries=[0, 2, 4, 6])
+ with ops.Graph().as_default():
+ features = {'price': [[-1.], [1.], [5.], [6.]]}
+ predictions = get_keras_linear_model_predictions(features,
+ [bucketized_price])
+ bias = get_linear_model_bias()
+ bucketized_price_var = get_linear_model_column_var(bucketized_price)
+ with _initialized_session() as sess:
+ self.assertAllClose([0.], bias.eval())
+ # One weight variable per bucket, all initialized to zero.
+ self.assertAllClose([[0.], [0.], [0.], [0.], [0.]],
+ bucketized_price_var.eval())
+ self.assertAllClose([[0.], [0.], [0.], [0.]], predictions.eval())
+ sess.run(
+ bucketized_price_var.assign([[10.], [20.], [30.], [40.], [50.]]))
+ # price -1. is in the 0th bucket, whose weight is 10.
+ # price 1. is in the 1st bucket, whose weight is 20.
+ # price 5. is in the 3rd bucket, whose weight is 40.
+ # price 6. is in the 4th bucket, whose weight is 50.
+ self.assertAllClose([[10.], [20.], [40.], [50.]], predictions.eval())
+ sess.run(bias.assign([1.]))
+ self.assertAllClose([[11.], [21.], [41.], [51.]], predictions.eval())
+
+ def test_keras_linear_model_two_input_values(self):
+ """Tests _LinearModel for input with shape=[2]."""
+ price = fc_old.numeric_column('price', shape=[2])
+ bucketized_price = fc_old.bucketized_column(price, boundaries=[0, 2, 4, 6])
+ with ops.Graph().as_default():
+ features = {'price': [[-1., 1.], [5., 6.]]}
+ predictions = get_keras_linear_model_predictions(features,
+ [bucketized_price])
+ bias = get_linear_model_bias()
+ bucketized_price_var = get_linear_model_column_var(bucketized_price)
+ with _initialized_session() as sess:
+ self.assertAllClose([0.], bias.eval())
+ # One weight per bucket per input column, all initialized to zero.
+ self.assertAllClose(
+ [[0.], [0.], [0.], [0.], [0.], [0.], [0.], [0.], [0.], [0.]],
+ bucketized_price_var.eval())
+ self.assertAllClose([[0.], [0.]], predictions.eval())
+ sess.run(
+ bucketized_price_var.assign([[10.], [20.], [30.], [40.], [50.],
+ [60.], [70.], [80.], [90.], [100.]]))
+ # 1st example:
+ # price -1. is in the 0th bucket, whose weight is 10.
+ # price 1. is in the 6th bucket, whose weight is 70.
+ # 2nd example:
+ # price 5. is in the 3rd bucket, whose weight is 40.
+ # price 6. is in the 9th bucket, whose weight is 100.
+ self.assertAllClose([[80.], [140.]], predictions.eval())
+ sess.run(bias.assign([1.]))
+ self.assertAllClose([[81.], [141.]], predictions.eval())
+
+
+class HashedCategoricalColumnTest(test.TestCase):
+
+ def test_defaults(self):
+ a = fc.categorical_column_with_hash_bucket('aaa', 10)
+ self.assertEqual('aaa', a.name)
+ self.assertEqual('aaa', a.key)
+ self.assertEqual(10, a.hash_bucket_size)
+ self.assertEqual(dtypes.string, a.dtype)
+
+ def test_key_should_be_string(self):
+ with self.assertRaisesRegexp(ValueError, 'key must be a string.'):
+ fc.categorical_column_with_hash_bucket(('key',), 10)
+
+ def test_bucket_size_should_be_given(self):
+ with self.assertRaisesRegexp(ValueError, 'hash_bucket_size must be set.'):
+ fc.categorical_column_with_hash_bucket('aaa', None)
+
+ def test_bucket_size_should_be_positive(self):
+ with self.assertRaisesRegexp(ValueError,
+ 'hash_bucket_size must be at least 1'):
+ fc.categorical_column_with_hash_bucket('aaa', 0)
+
+ def test_dtype_should_be_string_or_integer(self):
+ fc.categorical_column_with_hash_bucket('aaa', 10, dtype=dtypes.string)
+ fc.categorical_column_with_hash_bucket('aaa', 10, dtype=dtypes.int32)
+ with self.assertRaisesRegexp(ValueError, 'dtype must be string or integer'):
+ fc.categorical_column_with_hash_bucket('aaa', 10, dtype=dtypes.float32)
+
+ def test_deep_copy(self):
+ original = fc.categorical_column_with_hash_bucket('aaa', 10)
+ for column in (original, copy.deepcopy(original)):
+ self.assertEqual('aaa', column.name)
+ self.assertEqual(10, column.hash_bucket_size)
+ self.assertEqual(10, column.num_buckets)
+ self.assertEqual(dtypes.string, column.dtype)
+
+ def test_parse_spec_string(self):
+ a = fc.categorical_column_with_hash_bucket('aaa', 10)
+ self.assertEqual({
+ 'aaa': parsing_ops.VarLenFeature(dtypes.string)
+ }, a.parse_example_spec)
+
+ def test_parse_spec_int(self):
+ a = fc.categorical_column_with_hash_bucket('aaa', 10, dtype=dtypes.int32)
+ self.assertEqual({
+ 'aaa': parsing_ops.VarLenFeature(dtypes.int32)
+ }, a.parse_example_spec)
+
+ def test_parse_example(self):
+ a = fc.categorical_column_with_hash_bucket('aaa', 10)
+ data = example_pb2.Example(features=feature_pb2.Features(
+ feature={
+ 'aaa':
+ feature_pb2.Feature(bytes_list=feature_pb2.BytesList(
+ value=[b'omar', b'stringer']))
+ }))
+ features = parsing_ops.parse_example(
+ serialized=[data.SerializeToString()],
+ features=fc.make_parse_example_spec([a]))
+ self.assertIn('aaa', features)
+ with self.test_session():
+ _assert_sparse_tensor_value(
+ self,
+ sparse_tensor.SparseTensorValue(
+ indices=[[0, 0], [0, 1]],
+ values=np.array([b'omar', b'stringer'], dtype=np.object_),
+ dense_shape=[1, 2]),
+ features['aaa'].eval())
+
+ def test_strings_should_be_hashed(self):
+ hashed_sparse = fc.categorical_column_with_hash_bucket('wire', 10)
+ wire_tensor = sparse_tensor.SparseTensor(
+ values=['omar', 'stringer', 'marlo'],
+ indices=[[0, 0], [1, 0], [1, 1]],
+ dense_shape=[2, 2])
+ outputs = _transform_features({'wire': wire_tensor}, [hashed_sparse], None)
+ output = outputs[hashed_sparse]
+ # Check exact hashed output. If hashing changes this test will break.
+ expected_values = [6, 4, 1]
+ with self.test_session():
+ self.assertEqual(dtypes.int64, output.values.dtype)
+ self.assertAllEqual(expected_values, output.values.eval())
+ self.assertAllEqual(wire_tensor.indices.eval(), output.indices.eval())
+ self.assertAllEqual(wire_tensor.dense_shape.eval(),
+ output.dense_shape.eval())
+
+ def test_tensor_dtype_should_be_string_or_integer(self):
+ string_fc = fc.categorical_column_with_hash_bucket(
+ 'a_string', 10, dtype=dtypes.string)
+ int_fc = fc.categorical_column_with_hash_bucket(
+ 'a_int', 10, dtype=dtypes.int32)
+ float_fc = fc.categorical_column_with_hash_bucket(
+ 'a_float', 10, dtype=dtypes.string)
+ int_tensor = sparse_tensor.SparseTensor(
+ values=[101],
+ indices=[[0, 0]],
+ dense_shape=[1, 1])
+ string_tensor = sparse_tensor.SparseTensor(
+ values=['101'],
+ indices=[[0, 0]],
+ dense_shape=[1, 1])
+ float_tensor = sparse_tensor.SparseTensor(
+ values=[101.],
+ indices=[[0, 0]],
+ dense_shape=[1, 1])
+ transformation_cache = FeatureTransformationCache({
+ 'a_int': int_tensor,
+ 'a_string': string_tensor,
+ 'a_float': float_tensor
+ })
+ transformation_cache.get(string_fc, None)
+ transformation_cache.get(int_fc, None)
+ with self.assertRaisesRegexp(ValueError, 'dtype must be string or integer'):
+ transformation_cache.get(float_fc, None)
+
+ def test_dtype_should_match_with_tensor(self):
+ hashed_sparse = fc.categorical_column_with_hash_bucket(
+ 'wire', 10, dtype=dtypes.int64)
+ wire_tensor = sparse_tensor.SparseTensor(
+ values=['omar'], indices=[[0, 0]], dense_shape=[1, 1])
+ transformation_cache = FeatureTransformationCache({'wire': wire_tensor})
+ with self.assertRaisesRegexp(ValueError, 'dtype must be compatible'):
+ transformation_cache.get(hashed_sparse, None)
+
+ def test_ints_should_be_hashed(self):
+ hashed_sparse = fc.categorical_column_with_hash_bucket(
+ 'wire', 10, dtype=dtypes.int64)
+ wire_tensor = sparse_tensor.SparseTensor(
+ values=[101, 201, 301],
+ indices=[[0, 0], [1, 0], [1, 1]],
+ dense_shape=[2, 2])
+ transformation_cache = FeatureTransformationCache({'wire': wire_tensor})
+ output = transformation_cache.get(hashed_sparse, None)
+ # Check exact hashed output. If hashing changes this test will break.
+ expected_values = [3, 7, 5]
+ with self.test_session():
+ self.assertAllEqual(expected_values, output.values.eval())
+
+ def test_int32_64_is_compatible(self):
+ hashed_sparse = fc.categorical_column_with_hash_bucket(
+ 'wire', 10, dtype=dtypes.int64)
+ wire_tensor = sparse_tensor.SparseTensor(
+ values=constant_op.constant([101, 201, 301], dtype=dtypes.int32),
+ indices=[[0, 0], [1, 0], [1, 1]],
+ dense_shape=[2, 2])
+ transformation_cache = FeatureTransformationCache({'wire': wire_tensor})
+ output = transformation_cache.get(hashed_sparse, None)
+ # Check exact hashed output. If hashing changes this test will break.
+ expected_values = [3, 7, 5]
+ with self.test_session():
+ self.assertAllEqual(expected_values, output.values.eval())
+
+ def test_get_sparse_tensors(self):
+ hashed_sparse = fc.categorical_column_with_hash_bucket('wire', 10)
+ transformation_cache = FeatureTransformationCache({
+ 'wire':
+ sparse_tensor.SparseTensor(
+ values=['omar', 'stringer', 'marlo'],
+ indices=[[0, 0], [1, 0], [1, 1]],
+ dense_shape=[2, 2])
+ })
+ id_weight_pair = hashed_sparse.get_sparse_tensors(transformation_cache,
+ None)
+ self.assertIsNone(id_weight_pair.weight_tensor)
+ self.assertEqual(
+ transformation_cache.get(hashed_sparse, None), id_weight_pair.id_tensor)
+
+ def DISABLED_test_get_sparse_tensors_weight_collections(self):
+ column = fc.categorical_column_with_hash_bucket('aaa', 10)
+ inputs = sparse_tensor.SparseTensor(
+ values=['omar', 'stringer', 'marlo'],
+ indices=[[0, 0], [1, 0], [1, 1]],
+ dense_shape=[2, 2])
+ column._get_sparse_tensors(
+ FeatureTransformationCache({
+ 'aaa': inputs
+ }),
+ weight_collections=('my_weights',))
+
+ self.assertItemsEqual(
+ [], ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES))
+ self.assertItemsEqual([], ops.get_collection('my_weights'))
+
+ def test_get_sparse_tensors_dense_input(self):
+ hashed_sparse = fc.categorical_column_with_hash_bucket('wire', 10)
+ transformation_cache = FeatureTransformationCache({
+ 'wire': (('omar', ''), ('stringer', 'marlo'))
+ })
+ id_weight_pair = hashed_sparse.get_sparse_tensors(transformation_cache,
+ None)
+ self.assertIsNone(id_weight_pair.weight_tensor)
+ self.assertEqual(
+ transformation_cache.get(hashed_sparse, None), id_weight_pair.id_tensor)
+
+ def test_linear_model(self):
+ wire_column = fc_old.categorical_column_with_hash_bucket('wire', 4)
+ self.assertEqual(4, wire_column._num_buckets)
+ with ops.Graph().as_default():
+ predictions = fc.linear_model({
+ wire_column.name: sparse_tensor.SparseTensorValue(
+ indices=((0, 0), (1, 0), (1, 1)),
+ values=('marlo', 'skywalker', 'omar'),
+ dense_shape=(2, 2))
+ }, (wire_column,))
+ bias = get_linear_model_bias()
+ wire_var = get_linear_model_column_var(wire_column)
+ with _initialized_session():
+ self.assertAllClose((0.,), bias.eval())
+ self.assertAllClose(((0.,), (0.,), (0.,), (0.,)), wire_var.eval())
+ self.assertAllClose(((0.,), (0.,)), predictions.eval())
+ wire_var.assign(((1.,), (2.,), (3.,), (4.,))).eval()
+ # 'marlo' -> 3: wire_var[3] = 4
+ # 'skywalker' -> 2, 'omar' -> 2: wire_var[2] + wire_var[2] = 3+3 = 6
+ self.assertAllClose(((4.,), (6.,)), predictions.eval())
+
+ def test_keras_linear_model(self):
+ wire_column = fc_old.categorical_column_with_hash_bucket('wire', 4)
+ self.assertEqual(4, wire_column._num_buckets)
+ with ops.Graph().as_default():
+ predictions = get_keras_linear_model_predictions({
+ wire_column.name:
+ sparse_tensor.SparseTensorValue(
+ indices=((0, 0), (1, 0), (1, 1)),
+ values=('marlo', 'skywalker', 'omar'),
+ dense_shape=(2, 2))
+ }, (wire_column,))
+ bias = get_linear_model_bias()
+ wire_var = get_linear_model_column_var(wire_column)
+ with _initialized_session():
+ self.assertAllClose((0.,), bias.eval())
+ self.assertAllClose(((0.,), (0.,), (0.,), (0.,)), wire_var.eval())
+ self.assertAllClose(((0.,), (0.,)), predictions.eval())
+ wire_var.assign(((1.,), (2.,), (3.,), (4.,))).eval()
+ # 'marlo' -> 3: wire_var[3] = 4
+ # 'skywalker' -> 2, 'omar' -> 2: wire_var[2] + wire_var[2] = 3+3 = 6
+ self.assertAllClose(((4.,), (6.,)), predictions.eval())
+
+
+class CrossedColumnTest(test.TestCase):
+
+ def test_keys_empty(self):
+ with self.assertRaisesRegexp(
+ ValueError, 'keys must be a list with length > 1'):
+ fc.crossed_column([], 10)
+
+ def test_keys_length_one(self):
+ with self.assertRaisesRegexp(
+ ValueError, 'keys must be a list with length > 1'):
+ fc.crossed_column(['a'], 10)
+
+ def test_key_type_unsupported(self):
+ with self.assertRaisesRegexp(ValueError, 'Unsupported key type'):
+ fc.crossed_column(['a', fc.numeric_column('c')], 10)
+
+ with self.assertRaisesRegexp(
+ ValueError, 'categorical_column_with_hash_bucket is not supported'):
+ fc.crossed_column(
+ ['a', fc.categorical_column_with_hash_bucket('c', 10)], 10)
+
+ def test_hash_bucket_size_negative(self):
+ with self.assertRaisesRegexp(
+ ValueError, 'hash_bucket_size must be > 1'):
+ fc.crossed_column(['a', 'c'], -1)
+
+ def test_hash_bucket_size_zero(self):
+ with self.assertRaisesRegexp(
+ ValueError, 'hash_bucket_size must be > 1'):
+ fc.crossed_column(['a', 'c'], 0)
+
+ def test_hash_bucket_size_none(self):
+ with self.assertRaisesRegexp(
+ ValueError, 'hash_bucket_size must be > 1'):
+ fc.crossed_column(['a', 'c'], None)
+
+ def test_name(self):
+ a = fc.numeric_column('a', dtype=dtypes.int32)
+ b = fc.bucketized_column(a, boundaries=[0, 1])
+ crossed1 = fc.crossed_column(['d1', 'd2'], 10)
+
+ crossed2 = fc.crossed_column([b, 'c', crossed1], 10)
+ self.assertEqual('a_bucketized_X_c_X_d1_X_d2', crossed2.name)
+
+ def test_name_ordered_alphabetically(self):
+ """Tests that the name does not depend on the order of given columns."""
+ a = fc.numeric_column('a', dtype=dtypes.int32)
+ b = fc.bucketized_column(a, boundaries=[0, 1])
+ crossed1 = fc.crossed_column(['d1', 'd2'], 10)
+
+ crossed2 = fc.crossed_column([crossed1, 'c', b], 10)
+ self.assertEqual('a_bucketized_X_c_X_d1_X_d2', crossed2.name)
+
+ def test_name_leaf_keys_ordered_alphabetically(self):
+ """Tests that the name does not depend on the order of given columns."""
+ a = fc.numeric_column('a', dtype=dtypes.int32)
+ b = fc.bucketized_column(a, boundaries=[0, 1])
+ crossed1 = fc.crossed_column(['d2', 'c'], 10)
+
+ crossed2 = fc.crossed_column([crossed1, 'd1', b], 10)
+ self.assertEqual('a_bucketized_X_c_X_d1_X_d2', crossed2.name)
+
+ def test_parse_spec(self):
+ a = fc.numeric_column('a', shape=[2], dtype=dtypes.int32)
+ b = fc.bucketized_column(a, boundaries=[0, 1])
+ crossed = fc.crossed_column([b, 'c'], 10)
+ self.assertEqual({
+ 'a': parsing_ops.FixedLenFeature((2,), dtype=dtypes.int32),
+ 'c': parsing_ops.VarLenFeature(dtypes.string),
+ }, crossed.parse_example_spec)
+
+ def test_num_buckets(self):
+ a = fc.numeric_column('a', shape=[2], dtype=dtypes.int32)
+ b = fc.bucketized_column(a, boundaries=[0, 1])
+ crossed = fc.crossed_column([b, 'c'], 15)
+ self.assertEqual(15, crossed.num_buckets)
+
+ def test_deep_copy(self):
+ a = fc.numeric_column('a', dtype=dtypes.int32)
+ b = fc.bucketized_column(a, boundaries=[0, 1])
+ crossed1 = fc.crossed_column(['d1', 'd2'], 10)
+ crossed2 = fc.crossed_column([b, 'c', crossed1], 15, hash_key=5)
+ crossed2_copy = copy.deepcopy(crossed2)
+ self.assertEqual('a_bucketized_X_c_X_d1_X_d2', crossed2_copy.name,)
+ self.assertEqual(15, crossed2_copy.hash_bucket_size)
+ self.assertEqual(5, crossed2_copy.hash_key)
+
+ def test_parse_example(self):
+ price = fc.numeric_column('price', shape=[2])
+ bucketized_price = fc.bucketized_column(price, boundaries=[0, 50])
+ price_cross_wire = fc.crossed_column([bucketized_price, 'wire'], 10)
+ data = example_pb2.Example(features=feature_pb2.Features(
+ feature={
+ 'price':
+ feature_pb2.Feature(float_list=feature_pb2.FloatList(
+ value=[20., 110.])),
+ 'wire':
+ feature_pb2.Feature(bytes_list=feature_pb2.BytesList(
+ value=[b'omar', b'stringer'])),
+ }))
+ features = parsing_ops.parse_example(
+ serialized=[data.SerializeToString()],
+ features=fc.make_parse_example_spec([price_cross_wire]))
+ self.assertIn('price', features)
+ self.assertIn('wire', features)
+ with self.test_session():
+ self.assertAllEqual([[20., 110.]], features['price'].eval())
+ wire_sparse = features['wire']
+ self.assertAllEqual([[0, 0], [0, 1]], wire_sparse.indices.eval())
+ # Use byte constants to pass the open-source test.
+ self.assertAllEqual([b'omar', b'stringer'], wire_sparse.values.eval())
+ self.assertAllEqual([1, 2], wire_sparse.dense_shape.eval())
+
+ def test_transform_feature(self):
+ price = fc.numeric_column('price', shape=[2])
+ bucketized_price = fc.bucketized_column(price, boundaries=[0, 50])
+ hash_bucket_size = 10
+ price_cross_wire = fc.crossed_column(
+ [bucketized_price, 'wire'], hash_bucket_size)
+ features = {
+ 'price': constant_op.constant([[1., 2.], [5., 6.]]),
+ 'wire': sparse_tensor.SparseTensor(
+ values=['omar', 'stringer', 'marlo'],
+ indices=[[0, 0], [1, 0], [1, 1]],
+ dense_shape=[2, 2]),
+ }
+ outputs = _transform_features(features, [price_cross_wire], None)
+ output = outputs[price_cross_wire]
+ with self.test_session() as sess:
+ output_val = sess.run(output)
+ self.assertAllEqual(
+ [[0, 0], [0, 1], [1, 0], [1, 1], [1, 2], [1, 3]], output_val.indices)
+ for val in output_val.values:
+ self.assertIn(val, list(range(hash_bucket_size)))
+ self.assertAllEqual([2, 4], output_val.dense_shape)
+
+ def test_get_sparse_tensors(self):
+ a = fc.numeric_column('a', dtype=dtypes.int32, shape=(2,))
+ b = fc.bucketized_column(a, boundaries=(0, 1))
+ crossed1 = fc.crossed_column(['d1', 'd2'], 10)
+ crossed2 = fc.crossed_column([b, 'c', crossed1], 15, hash_key=5)
+ with ops.Graph().as_default():
+ transformation_cache = FeatureTransformationCache({
+ 'a':
+ constant_op.constant(((-1., .5), (.5, 1.))),
+ 'c':
+ sparse_tensor.SparseTensor(
+ indices=((0, 0), (1, 0), (1, 1)),
+ values=['cA', 'cB', 'cC'],
+ dense_shape=(2, 2)),
+ 'd1':
+ sparse_tensor.SparseTensor(
+ indices=((0, 0), (1, 0), (1, 1)),
+ values=['d1A', 'd1B', 'd1C'],
+ dense_shape=(2, 2)),
+ 'd2':
+ sparse_tensor.SparseTensor(
+ indices=((0, 0), (1, 0), (1, 1)),
+ values=['d2A', 'd2B', 'd2C'],
+ dense_shape=(2, 2)),
+ })
+ id_weight_pair = crossed2.get_sparse_tensors(transformation_cache, None)
+ with _initialized_session():
+ id_tensor_eval = id_weight_pair.id_tensor.eval()
+ self.assertAllEqual(
+ ((0, 0), (0, 1), (1, 0), (1, 1), (1, 2), (1, 3), (1, 4), (1, 5),
+ (1, 6), (1, 7), (1, 8), (1, 9), (1, 10), (1, 11), (1, 12), (1, 13),
+ (1, 14), (1, 15)),
+ id_tensor_eval.indices)
+ # Check exact hashed output. If hashing changes this test will break.
+ # All values are within [0, hash_bucket_size).
+ expected_values = (
+ 6, 14, 0, 13, 8, 8, 10, 12, 2, 0, 1, 9, 8, 12, 2, 0, 10, 11)
+ self.assertAllEqual(expected_values, id_tensor_eval.values)
+ self.assertAllEqual((2, 16), id_tensor_eval.dense_shape)
+
+ def test_get_sparse_tensors_simple(self):
+ """Same as test_get_sparse_tensors, but with simpler values."""
+ a = fc.numeric_column('a', dtype=dtypes.int32, shape=(2,))
+ b = fc.bucketized_column(a, boundaries=(0, 1))
+ crossed = fc.crossed_column([b, 'c'], hash_bucket_size=5, hash_key=5)
+ with ops.Graph().as_default():
+ transformation_cache = FeatureTransformationCache({
+ 'a':
+ constant_op.constant(((-1., .5), (.5, 1.))),
+ 'c':
+ sparse_tensor.SparseTensor(
+ indices=((0, 0), (1, 0), (1, 1)),
+ values=['cA', 'cB', 'cC'],
+ dense_shape=(2, 2)),
+ })
+ id_weight_pair = crossed.get_sparse_tensors(transformation_cache, None)
+ with _initialized_session():
+ id_tensor_eval = id_weight_pair.id_tensor.eval()
+ self.assertAllEqual(
+ ((0, 0), (0, 1), (1, 0), (1, 1), (1, 2), (1, 3)),
+ id_tensor_eval.indices)
+ # Check exact hashed output. If hashing changes this test will break.
+ # All values are within [0, hash_bucket_size).
+ expected_values = (1, 0, 1, 3, 4, 2)
+ self.assertAllEqual(expected_values, id_tensor_eval.values)
+ self.assertAllEqual((2, 4), id_tensor_eval.dense_shape)
+
+ def test_linear_model(self):
+ """Tests linear_model.
+
+ Uses data from test_get_sparse_tesnsors_simple.
+ """
+ a = fc_old.numeric_column('a', dtype=dtypes.int32, shape=(2,))
+ b = fc_old.bucketized_column(a, boundaries=(0, 1))
+ crossed = fc_old.crossed_column([b, 'c'], hash_bucket_size=5, hash_key=5)
+ with ops.Graph().as_default():
+ predictions = fc.linear_model({
+ 'a': constant_op.constant(((-1., .5), (.5, 1.))),
+ 'c': sparse_tensor.SparseTensor(
+ indices=((0, 0), (1, 0), (1, 1)),
+ values=['cA', 'cB', 'cC'],
+ dense_shape=(2, 2)),
+ }, (crossed,))
+ bias = get_linear_model_bias()
+ crossed_var = get_linear_model_column_var(crossed)
+ with _initialized_session() as sess:
+ self.assertAllClose((0.,), bias.eval())
+ self.assertAllClose(
+ ((0.,), (0.,), (0.,), (0.,), (0.,)), crossed_var.eval())
+ self.assertAllClose(((0.,), (0.,)), predictions.eval())
+ sess.run(crossed_var.assign(((1.,), (2.,), (3.,), (4.,), (5.,))))
+ # Expected ids after cross = (1, 0, 1, 3, 4, 2)
+ self.assertAllClose(((3.,), (14.,)), predictions.eval())
+ sess.run(bias.assign((.1,)))
+ self.assertAllClose(((3.1,), (14.1,)), predictions.eval())
+
+ def test_linear_model_with_weights(self):
+
+ class _TestColumnWithWeights(fc_old._CategoricalColumn):
+ """Produces sparse IDs and sparse weights."""
+
+ @property
+ def name(self):
+ return 'test_column'
+
+ @property
+ def _parse_example_spec(self):
+ return {
+ self.name: parsing_ops.VarLenFeature(dtypes.int32),
+ '{}_weights'.format(self.name): parsing_ops.VarLenFeature(
+ dtypes.float32),
+ }
+
+ @property
+ def _num_buckets(self):
+ return 5
+
+ def _transform_feature(self, inputs):
+ return (inputs.get(self.name),
+ inputs.get('{}_weights'.format(self.name)))
+
+ def _get_sparse_tensors(self, inputs, weight_collections=None,
+ trainable=None):
+ """Populates both id_tensor and weight_tensor."""
+ ids_and_weights = inputs.get(self)
+ return fc_old._CategoricalColumn.IdWeightPair(
+ id_tensor=ids_and_weights[0], weight_tensor=ids_and_weights[1])
+
+ t = _TestColumnWithWeights()
+ crossed = fc_old.crossed_column([t, 'c'], hash_bucket_size=5, hash_key=5)
+ with ops.Graph().as_default():
+ with self.assertRaisesRegexp(
+ ValueError,
+ 'crossed_column does not support weight_tensor.*{}'.format(t.name)):
+ fc.linear_model({
+ t.name: sparse_tensor.SparseTensor(
+ indices=((0, 0), (1, 0), (1, 1)),
+ values=[0, 1, 2],
+ dense_shape=(2, 2)),
+ '{}_weights'.format(t.name): sparse_tensor.SparseTensor(
+ indices=((0, 0), (1, 0), (1, 1)),
+ values=[1., 10., 2.],
+ dense_shape=(2, 2)),
+ 'c': sparse_tensor.SparseTensor(
+ indices=((0, 0), (1, 0), (1, 1)),
+ values=['cA', 'cB', 'cC'],
+ dense_shape=(2, 2)),
+ }, (crossed,))
+
+ def test_keras_linear_model(self):
+ """Tests _LinearModel.
+
+ Uses data from test_get_sparse_tesnsors_simple.
+ """
+ a = fc_old.numeric_column('a', dtype=dtypes.int32, shape=(2,))
+ b = fc_old.bucketized_column(a, boundaries=(0, 1))
+ crossed = fc_old.crossed_column([b, 'c'], hash_bucket_size=5, hash_key=5)
+ with ops.Graph().as_default():
+ predictions = get_keras_linear_model_predictions({
+ 'a':
+ constant_op.constant(((-1., .5), (.5, 1.))),
+ 'c':
+ sparse_tensor.SparseTensor(
+ indices=((0, 0), (1, 0), (1, 1)),
+ values=['cA', 'cB', 'cC'],
+ dense_shape=(2, 2)),
+ }, (crossed,))
+ bias = get_linear_model_bias()
+ crossed_var = get_linear_model_column_var(crossed)
+ with _initialized_session() as sess:
+ self.assertAllClose((0.,), bias.eval())
+ self.assertAllClose(((0.,), (0.,), (0.,), (0.,), (0.,)),
+ crossed_var.eval())
+ self.assertAllClose(((0.,), (0.,)), predictions.eval())
+ sess.run(crossed_var.assign(((1.,), (2.,), (3.,), (4.,), (5.,))))
+ # Expected ids after cross = (1, 0, 1, 3, 4, 2)
+ self.assertAllClose(((3.,), (14.,)), predictions.eval())
+ sess.run(bias.assign((.1,)))
+ self.assertAllClose(((3.1,), (14.1,)), predictions.eval())
+
+ def test_keras_linear_model_with_weights(self):
+
+ class _TestColumnWithWeights(fc_old._CategoricalColumn):
+ """Produces sparse IDs and sparse weights."""
+
+ @property
+ def name(self):
+ return 'test_column'
+
+ @property
+ def _parse_example_spec(self):
+ return {
+ self.name:
+ parsing_ops.VarLenFeature(dtypes.int32),
+ '{}_weights'.format(self.name):
+ parsing_ops.VarLenFeature(dtypes.float32),
+ }
+
+ @property
+ def _num_buckets(self):
+ return 5
+
+ def _transform_feature(self, inputs):
+ return (inputs.get(self.name),
+ inputs.get('{}_weights'.format(self.name)))
+
+ def _get_sparse_tensors(self,
+ inputs,
+ weight_collections=None,
+ trainable=None):
+ """Populates both id_tensor and weight_tensor."""
+ ids_and_weights = inputs.get(self)
+ return fc_old._CategoricalColumn.IdWeightPair(
+ id_tensor=ids_and_weights[0], weight_tensor=ids_and_weights[1])
+
+ t = _TestColumnWithWeights()
+ crossed = fc_old.crossed_column([t, 'c'], hash_bucket_size=5, hash_key=5)
+ with ops.Graph().as_default():
+ with self.assertRaisesRegexp(
+ ValueError,
+ 'crossed_column does not support weight_tensor.*{}'.format(t.name)):
+ get_keras_linear_model_predictions({
+ t.name:
+ sparse_tensor.SparseTensor(
+ indices=((0, 0), (1, 0), (1, 1)),
+ values=[0, 1, 2],
+ dense_shape=(2, 2)),
+ '{}_weights'.format(t.name):
+ sparse_tensor.SparseTensor(
+ indices=((0, 0), (1, 0), (1, 1)),
+ values=[1., 10., 2.],
+ dense_shape=(2, 2)),
+ 'c':
+ sparse_tensor.SparseTensor(
+ indices=((0, 0), (1, 0), (1, 1)),
+ values=['cA', 'cB', 'cC'],
+ dense_shape=(2, 2)),
+ }, (crossed,))
+
+
+def get_linear_model_bias(name='linear_model'):
+ with variable_scope.variable_scope(name, reuse=True):
+ return variable_scope.get_variable('bias_weights')
+
+
+def get_linear_model_column_var(column, name='linear_model'):
+ return ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES,
+ name + '/' + column.name)[0]
+
+
+def get_keras_linear_model_predictions(features,
+ feature_columns,
+ units=1,
+ sparse_combiner='sum',
+ weight_collections=None,
+ trainable=True,
+ cols_to_vars=None):
+ keras_linear_model = _LinearModel(
+ feature_columns,
+ units,
+ sparse_combiner,
+ weight_collections,
+ trainable,
+ name='linear_model')
+ retval = keras_linear_model(features) # pylint: disable=not-callable
+ if cols_to_vars is not None:
+ cols_to_vars.update(keras_linear_model.cols_to_vars())
+ return retval
+
+
+class LinearModelTest(test.TestCase):
+
+ def test_raises_if_empty_feature_columns(self):
+ with self.assertRaisesRegexp(ValueError,
+ 'feature_columns must not be empty'):
+ fc.linear_model(features={}, feature_columns=[])
+
+ def test_should_be_feature_column(self):
+ with self.assertRaisesRegexp(ValueError, 'must be a _FeatureColumn'):
+ fc.linear_model(features={'a': [[0]]}, feature_columns='NotSupported')
+
+ def test_should_be_dense_or_categorical_column(self):
+
+ class NotSupportedColumn(fc_old._FeatureColumn):
+
+ @property
+ def name(self):
+ return 'NotSupportedColumn'
+
+ def _transform_feature(self, cache):
+ pass
+
+ @property
+ def _parse_example_spec(self):
+ pass
+
+ with self.assertRaisesRegexp(
+ ValueError, 'must be either a _DenseColumn or _CategoricalColumn'):
+ fc.linear_model(
+ features={'a': [[0]]}, feature_columns=[NotSupportedColumn()])
+
+ def test_does_not_support_dict_columns(self):
+ with self.assertRaisesRegexp(
+ ValueError, 'Expected feature_columns to be iterable, found dict.'):
+ fc.linear_model(
+ features={'a': [[0]]},
+ feature_columns={'a': fc_old.numeric_column('a')})
+
+ def test_raises_if_duplicate_name(self):
+ with self.assertRaisesRegexp(
+ ValueError, 'Duplicate feature column name found for columns'):
+ fc.linear_model(
+ features={'a': [[0]]},
+ feature_columns=[
+ fc_old.numeric_column('a'),
+ fc_old.numeric_column('a')
+ ])
+
+ def test_dense_bias(self):
+ price = fc_old.numeric_column('price')
+ with ops.Graph().as_default():
+ features = {'price': [[1.], [5.]]}
+ predictions = fc.linear_model(features, [price])
+ bias = get_linear_model_bias()
+ price_var = get_linear_model_column_var(price)
+ with _initialized_session() as sess:
+ self.assertAllClose([0.], bias.eval())
+ sess.run(price_var.assign([[10.]]))
+ sess.run(bias.assign([5.]))
+ self.assertAllClose([[15.], [55.]], predictions.eval())
+
+ def test_sparse_bias(self):
+ wire_cast = fc_old.categorical_column_with_hash_bucket('wire_cast', 4)
+ with ops.Graph().as_default():
+ wire_tensor = sparse_tensor.SparseTensor(
+ values=['omar', 'stringer', 'marlo'], # hashed to = [2, 0, 3]
+ indices=[[0, 0], [1, 0], [1, 1]],
+ dense_shape=[2, 2])
+ features = {'wire_cast': wire_tensor}
+ predictions = fc.linear_model(features, [wire_cast])
+ bias = get_linear_model_bias()
+ wire_cast_var = get_linear_model_column_var(wire_cast)
+ with _initialized_session() as sess:
+ self.assertAllClose([0.], bias.eval())
+ self.assertAllClose([[0.], [0.], [0.], [0.]], wire_cast_var.eval())
+ sess.run(wire_cast_var.assign([[10.], [100.], [1000.], [10000.]]))
+ sess.run(bias.assign([5.]))
+ self.assertAllClose([[1005.], [10015.]], predictions.eval())
+
+ def test_dense_and_sparse_bias(self):
+ wire_cast = fc_old.categorical_column_with_hash_bucket('wire_cast', 4)
+ price = fc_old.numeric_column('price')
+ with ops.Graph().as_default():
+ wire_tensor = sparse_tensor.SparseTensor(
+ values=['omar', 'stringer', 'marlo'], # hashed to = [2, 0, 3]
+ indices=[[0, 0], [1, 0], [1, 1]],
+ dense_shape=[2, 2])
+ features = {'wire_cast': wire_tensor, 'price': [[1.], [5.]]}
+ predictions = fc.linear_model(features, [wire_cast, price])
+ bias = get_linear_model_bias()
+ wire_cast_var = get_linear_model_column_var(wire_cast)
+ price_var = get_linear_model_column_var(price)
+ with _initialized_session() as sess:
+ sess.run(wire_cast_var.assign([[10.], [100.], [1000.], [10000.]]))
+ sess.run(bias.assign([5.]))
+ sess.run(price_var.assign([[10.]]))
+ self.assertAllClose([[1015.], [10065.]], predictions.eval())
+
+ def test_dense_and_sparse_column(self):
+ """When the column is both dense and sparse, uses sparse tensors."""
+
+ class _DenseAndSparseColumn(fc_old._DenseColumn, fc_old._CategoricalColumn):
+
+ @property
+ def name(self):
+ return 'dense_and_sparse_column'
+
+ @property
+ def _parse_example_spec(self):
+ return {self.name: parsing_ops.VarLenFeature(self.dtype)}
+
+ def _transform_feature(self, inputs):
+ return inputs.get(self.name)
+
+ @property
+ def _variable_shape(self):
+ raise ValueError('Should not use this method.')
+
+ def _get_dense_tensor(self, inputs, weight_collections=None,
+ trainable=None):
+ raise ValueError('Should not use this method.')
+
+ @property
+ def _num_buckets(self):
+ return 4
+
+ def _get_sparse_tensors(self, inputs, weight_collections=None,
+ trainable=None):
+ sp_tensor = sparse_tensor.SparseTensor(
+ indices=[[0, 0], [1, 0], [1, 1]],
+ values=[2, 0, 3],
+ dense_shape=[2, 2])
+ return fc_old._CategoricalColumn.IdWeightPair(sp_tensor, None)
+
+ dense_and_sparse_column = _DenseAndSparseColumn()
+ with ops.Graph().as_default():
+ sp_tensor = sparse_tensor.SparseTensor(
+ values=['omar', 'stringer', 'marlo'],
+ indices=[[0, 0], [1, 0], [1, 1]],
+ dense_shape=[2, 2])
+ features = {dense_and_sparse_column.name: sp_tensor}
+ predictions = fc.linear_model(features, [dense_and_sparse_column])
+ bias = get_linear_model_bias()
+ dense_and_sparse_column_var = get_linear_model_column_var(
+ dense_and_sparse_column)
+ with _initialized_session() as sess:
+ sess.run(dense_and_sparse_column_var.assign(
+ [[10.], [100.], [1000.], [10000.]]))
+ sess.run(bias.assign([5.]))
+ self.assertAllClose([[1005.], [10015.]], predictions.eval())
+
+ def test_dense_multi_output(self):
+ price = fc_old.numeric_column('price')
+ with ops.Graph().as_default():
+ features = {'price': [[1.], [5.]]}
+ predictions = fc.linear_model(features, [price], units=3)
+ bias = get_linear_model_bias()
+ price_var = get_linear_model_column_var(price)
+ with _initialized_session() as sess:
+ self.assertAllClose(np.zeros((3,)), bias.eval())
+ self.assertAllClose(np.zeros((1, 3)), price_var.eval())
+ sess.run(price_var.assign([[10., 100., 1000.]]))
+ sess.run(bias.assign([5., 6., 7.]))
+ self.assertAllClose([[15., 106., 1007.], [55., 506., 5007.]],
+ predictions.eval())
+
+ def test_sparse_multi_output(self):
+ wire_cast = fc_old.categorical_column_with_hash_bucket('wire_cast', 4)
+ with ops.Graph().as_default():
+ wire_tensor = sparse_tensor.SparseTensor(
+ values=['omar', 'stringer', 'marlo'], # hashed to = [2, 0, 3]
+ indices=[[0, 0], [1, 0], [1, 1]],
+ dense_shape=[2, 2])
+ features = {'wire_cast': wire_tensor}
+ predictions = fc.linear_model(features, [wire_cast], units=3)
+ bias = get_linear_model_bias()
+ wire_cast_var = get_linear_model_column_var(wire_cast)
+ with _initialized_session() as sess:
+ self.assertAllClose(np.zeros((3,)), bias.eval())
+ self.assertAllClose(np.zeros((4, 3)), wire_cast_var.eval())
+ sess.run(
+ wire_cast_var.assign([[10., 11., 12.], [100., 110., 120.], [
+ 1000., 1100., 1200.
+ ], [10000., 11000., 12000.]]))
+ sess.run(bias.assign([5., 6., 7.]))
+ self.assertAllClose([[1005., 1106., 1207.], [10015., 11017., 12019.]],
+ predictions.eval())
+
+ def test_dense_multi_dimension(self):
+ price = fc_old.numeric_column('price', shape=2)
+ with ops.Graph().as_default():
+ features = {'price': [[1., 2.], [5., 6.]]}
+ predictions = fc.linear_model(features, [price])
+ price_var = get_linear_model_column_var(price)
+ with _initialized_session() as sess:
+ self.assertAllClose([[0.], [0.]], price_var.eval())
+ sess.run(price_var.assign([[10.], [100.]]))
+ self.assertAllClose([[210.], [650.]], predictions.eval())
+
+ def test_sparse_multi_rank(self):
+ wire_cast = fc_old.categorical_column_with_hash_bucket('wire_cast', 4)
+ with ops.Graph().as_default():
+ wire_tensor = array_ops.sparse_placeholder(dtypes.string)
+ wire_value = sparse_tensor.SparseTensorValue(
+ values=['omar', 'stringer', 'marlo', 'omar'], # hashed = [2, 0, 3, 2]
+ indices=[[0, 0, 0], [0, 1, 0], [1, 0, 0], [1, 0, 1]],
+ dense_shape=[2, 2, 2])
+ features = {'wire_cast': wire_tensor}
+ predictions = fc.linear_model(features, [wire_cast])
+ wire_cast_var = get_linear_model_column_var(wire_cast)
+ with _initialized_session() as sess:
+ self.assertAllClose(np.zeros((4, 1)), wire_cast_var.eval())
+ self.assertAllClose(
+ np.zeros((2, 1)),
+ predictions.eval(feed_dict={wire_tensor: wire_value}))
+ sess.run(wire_cast_var.assign([[10.], [100.], [1000.], [10000.]]))
+ self.assertAllClose(
+ [[1010.], [11000.]],
+ predictions.eval(feed_dict={wire_tensor: wire_value}))
+
+ def test_sparse_combiner(self):
+ wire_cast = fc_old.categorical_column_with_hash_bucket('wire_cast', 4)
+ with ops.Graph().as_default():
+ wire_tensor = sparse_tensor.SparseTensor(
+ values=['omar', 'stringer', 'marlo'], # hashed to = [2, 0, 3]
+ indices=[[0, 0], [1, 0], [1, 1]],
+ dense_shape=[2, 2])
+ features = {'wire_cast': wire_tensor}
+ predictions = fc.linear_model(
+ features, [wire_cast], sparse_combiner='mean')
+ bias = get_linear_model_bias()
+ wire_cast_var = get_linear_model_column_var(wire_cast)
+ with _initialized_session() as sess:
+ sess.run(wire_cast_var.assign([[10.], [100.], [1000.], [10000.]]))
+ sess.run(bias.assign([5.]))
+ self.assertAllClose([[1005.], [5010.]], predictions.eval())
+
+ def test_sparse_combiner_with_negative_weights(self):
+ wire_cast = fc_old.categorical_column_with_hash_bucket('wire_cast', 4)
+ wire_cast_weights = fc_old.weighted_categorical_column(wire_cast, 'weights')
+
+ with ops.Graph().as_default():
+ wire_tensor = sparse_tensor.SparseTensor(
+ values=['omar', 'stringer', 'marlo'], # hashed to = [2, 0, 3]
+ indices=[[0, 0], [1, 0], [1, 1]],
+ dense_shape=[2, 2])
+ features = {
+ 'wire_cast': wire_tensor,
+ 'weights': constant_op.constant([[1., 1., -1.0]])
+ }
+ predictions = fc.linear_model(
+ features, [wire_cast_weights], sparse_combiner='sum')
+ bias = get_linear_model_bias()
+ wire_cast_var = get_linear_model_column_var(wire_cast)
+ with _initialized_session() as sess:
+ sess.run(wire_cast_var.assign([[10.], [100.], [1000.], [10000.]]))
+ sess.run(bias.assign([5.]))
+ self.assertAllClose([[1005.], [-9985.]], predictions.eval())
+
+ def test_dense_multi_dimension_multi_output(self):
+ price = fc_old.numeric_column('price', shape=2)
+ with ops.Graph().as_default():
+ features = {'price': [[1., 2.], [5., 6.]]}
+ predictions = fc.linear_model(features, [price], units=3)
+ bias = get_linear_model_bias()
+ price_var = get_linear_model_column_var(price)
+ with _initialized_session() as sess:
+ self.assertAllClose(np.zeros((3,)), bias.eval())
+ self.assertAllClose(np.zeros((2, 3)), price_var.eval())
+ sess.run(price_var.assign([[1., 2., 3.], [10., 100., 1000.]]))
+ sess.run(bias.assign([2., 3., 4.]))
+ self.assertAllClose([[23., 205., 2007.], [67., 613., 6019.]],
+ predictions.eval())
+
+ def test_raises_if_shape_mismatch(self):
+ price = fc_old.numeric_column('price', shape=2)
+ with ops.Graph().as_default():
+ features = {'price': [[1.], [5.]]}
+ with self.assertRaisesRegexp(
+ Exception,
+ r'Cannot reshape a tensor with 2 elements to shape \[2,2\]'):
+ fc.linear_model(features, [price])
+
+ def test_dense_reshaping(self):
+ price = fc_old.numeric_column('price', shape=[1, 2])
+ with ops.Graph().as_default():
+ features = {'price': [[[1., 2.]], [[5., 6.]]]}
+ predictions = fc.linear_model(features, [price])
+ bias = get_linear_model_bias()
+ price_var = get_linear_model_column_var(price)
+ with _initialized_session() as sess:
+ self.assertAllClose([0.], bias.eval())
+ self.assertAllClose([[0.], [0.]], price_var.eval())
+ self.assertAllClose([[0.], [0.]], predictions.eval())
+ sess.run(price_var.assign([[10.], [100.]]))
+ self.assertAllClose([[210.], [650.]], predictions.eval())
+
+ def test_dense_multi_column(self):
+ price1 = fc_old.numeric_column('price1', shape=2)
+ price2 = fc_old.numeric_column('price2')
+ with ops.Graph().as_default():
+ features = {
+ 'price1': [[1., 2.], [5., 6.]],
+ 'price2': [[3.], [4.]]
+ }
+ predictions = fc.linear_model(features, [price1, price2])
+ bias = get_linear_model_bias()
+ price1_var = get_linear_model_column_var(price1)
+ price2_var = get_linear_model_column_var(price2)
+ with _initialized_session() as sess:
+ self.assertAllClose([0.], bias.eval())
+ self.assertAllClose([[0.], [0.]], price1_var.eval())
+ self.assertAllClose([[0.]], price2_var.eval())
+ self.assertAllClose([[0.], [0.]], predictions.eval())
+ sess.run(price1_var.assign([[10.], [100.]]))
+ sess.run(price2_var.assign([[1000.]]))
+ sess.run(bias.assign([7.]))
+ self.assertAllClose([[3217.], [4657.]], predictions.eval())
+
+ def test_fills_cols_to_vars(self):
+ price1 = fc_old.numeric_column('price1', shape=2)
+ price2 = fc_old.numeric_column('price2')
+ with ops.Graph().as_default():
+ features = {'price1': [[1., 2.], [5., 6.]], 'price2': [[3.], [4.]]}
+ cols_to_vars = {}
+ fc.linear_model(features, [price1, price2], cols_to_vars=cols_to_vars)
+ bias = get_linear_model_bias()
+ price1_var = get_linear_model_column_var(price1)
+ price2_var = get_linear_model_column_var(price2)
+ self.assertAllEqual(cols_to_vars['bias'], [bias])
+ self.assertAllEqual(cols_to_vars[price1], [price1_var])
+ self.assertAllEqual(cols_to_vars[price2], [price2_var])
+
+ def test_fills_cols_to_vars_partitioned_variables(self):
+ price1 = fc_old.numeric_column('price1', shape=2)
+ price2 = fc_old.numeric_column('price2', shape=3)
+ with ops.Graph().as_default():
+ features = {
+ 'price1': [[1., 2.], [6., 7.]],
+ 'price2': [[3., 4., 5.], [8., 9., 10.]]
+ }
+ cols_to_vars = {}
+ with variable_scope.variable_scope(
+ 'linear',
+ partitioner=partitioned_variables.fixed_size_partitioner(2, axis=0)):
+ fc.linear_model(features, [price1, price2], cols_to_vars=cols_to_vars)
+ with _initialized_session():
+ self.assertEqual([0.], cols_to_vars['bias'][0].eval())
+ # Partitioning shards the [2, 1] price1 var into 2 [1, 1] Variables.
+ self.assertAllEqual([[0.]], cols_to_vars[price1][0].eval())
+ self.assertAllEqual([[0.]], cols_to_vars[price1][1].eval())
+ # Partitioning shards the [3, 1] price2 var into a [2, 1] Variable and
+ # a [1, 1] Variable.
+ self.assertAllEqual([[0.], [0.]], cols_to_vars[price2][0].eval())
+ self.assertAllEqual([[0.]], cols_to_vars[price2][1].eval())
+
+ def test_dense_collection(self):
+ price = fc_old.numeric_column('price')
+ with ops.Graph().as_default() as g:
+ features = {'price': [[1.], [5.]]}
+ fc.linear_model(features, [price], weight_collections=['my-vars'])
+ my_vars = g.get_collection('my-vars')
+ bias = get_linear_model_bias()
+ price_var = get_linear_model_column_var(price)
+ self.assertIn(bias, my_vars)
+ self.assertIn(price_var, my_vars)
+
+ def test_sparse_collection(self):
+ wire_cast = fc_old.categorical_column_with_hash_bucket('wire_cast', 4)
+ with ops.Graph().as_default() as g:
+ wire_tensor = sparse_tensor.SparseTensor(
+ values=['omar'], indices=[[0, 0]], dense_shape=[1, 1])
+ features = {'wire_cast': wire_tensor}
+ fc.linear_model(
+ features, [wire_cast], weight_collections=['my-vars'])
+ my_vars = g.get_collection('my-vars')
+ bias = get_linear_model_bias()
+ wire_cast_var = get_linear_model_column_var(wire_cast)
+ self.assertIn(bias, my_vars)
+ self.assertIn(wire_cast_var, my_vars)
+
+ def test_dense_trainable_default(self):
+ price = fc_old.numeric_column('price')
+ with ops.Graph().as_default() as g:
+ features = {'price': [[1.], [5.]]}
+ fc.linear_model(features, [price])
+ bias = get_linear_model_bias()
+ price_var = get_linear_model_column_var(price)
+ trainable_vars = g.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)
+ self.assertIn(bias, trainable_vars)
+ self.assertIn(price_var, trainable_vars)
+
+ def test_sparse_trainable_default(self):
+ wire_cast = fc_old.categorical_column_with_hash_bucket('wire_cast', 4)
+ with ops.Graph().as_default() as g:
+ wire_tensor = sparse_tensor.SparseTensor(
+ values=['omar'], indices=[[0, 0]], dense_shape=[1, 1])
+ features = {'wire_cast': wire_tensor}
+ fc.linear_model(features, [wire_cast])
+ trainable_vars = g.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)
+ bias = get_linear_model_bias()
+ wire_cast_var = get_linear_model_column_var(wire_cast)
+ self.assertIn(bias, trainable_vars)
+ self.assertIn(wire_cast_var, trainable_vars)
+
+ def test_dense_trainable_false(self):
+ price = fc_old.numeric_column('price')
+ with ops.Graph().as_default() as g:
+ features = {'price': [[1.], [5.]]}
+ fc.linear_model(features, [price], trainable=False)
+ trainable_vars = g.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)
+ self.assertEqual([], trainable_vars)
+
+ def test_sparse_trainable_false(self):
+ wire_cast = fc_old.categorical_column_with_hash_bucket('wire_cast', 4)
+ with ops.Graph().as_default() as g:
+ wire_tensor = sparse_tensor.SparseTensor(
+ values=['omar'], indices=[[0, 0]], dense_shape=[1, 1])
+ features = {'wire_cast': wire_tensor}
+ fc.linear_model(features, [wire_cast], trainable=False)
+ trainable_vars = g.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)
+ self.assertEqual([], trainable_vars)
+
+ def test_column_order(self):
+ price_a = fc_old.numeric_column('price_a')
+ price_b = fc_old.numeric_column('price_b')
+ wire_cast = fc_old.categorical_column_with_hash_bucket('wire_cast', 4)
+ with ops.Graph().as_default() as g:
+ features = {
+ 'price_a': [[1.]],
+ 'price_b': [[3.]],
+ 'wire_cast':
+ sparse_tensor.SparseTensor(
+ values=['omar'], indices=[[0, 0]], dense_shape=[1, 1])
+ }
+ fc.linear_model(
+ features, [price_a, wire_cast, price_b],
+ weight_collections=['my-vars'])
+ my_vars = g.get_collection('my-vars')
+ self.assertIn('price_a', my_vars[0].name)
+ self.assertIn('price_b', my_vars[1].name)
+ self.assertIn('wire_cast', my_vars[2].name)
+
+ with ops.Graph().as_default() as g:
+ features = {
+ 'price_a': [[1.]],
+ 'price_b': [[3.]],
+ 'wire_cast':
+ sparse_tensor.SparseTensor(
+ values=['omar'], indices=[[0, 0]], dense_shape=[1, 1])
+ }
+ fc.linear_model(
+ features, [wire_cast, price_b, price_a],
+ weight_collections=['my-vars'])
+ my_vars = g.get_collection('my-vars')
+ self.assertIn('price_a', my_vars[0].name)
+ self.assertIn('price_b', my_vars[1].name)
+ self.assertIn('wire_cast', my_vars[2].name)
+
+ def test_static_batch_size_mismatch(self):
+ price1 = fc_old.numeric_column('price1')
+ price2 = fc_old.numeric_column('price2')
+ with ops.Graph().as_default():
+ features = {
+ 'price1': [[1.], [5.], [7.]], # batchsize = 3
+ 'price2': [[3.], [4.]] # batchsize = 2
+ }
+ with self.assertRaisesRegexp(
+ ValueError,
+ 'Batch size \(first dimension\) of each feature must be same.'): # pylint: disable=anomalous-backslash-in-string
+ fc.linear_model(features, [price1, price2])
+
+ def test_subset_of_static_batch_size_mismatch(self):
+ price1 = fc_old.numeric_column('price1')
+ price2 = fc_old.numeric_column('price2')
+ price3 = fc_old.numeric_column('price3')
+ with ops.Graph().as_default():
+ features = {
+ 'price1': array_ops.placeholder(dtype=dtypes.int64), # batchsize = 3
+ 'price2': [[3.], [4.]], # batchsize = 2
+ 'price3': [[3.], [4.], [5.]] # batchsize = 3
+ }
+ with self.assertRaisesRegexp(
+ ValueError,
+ 'Batch size \(first dimension\) of each feature must be same.'): # pylint: disable=anomalous-backslash-in-string
+ fc.linear_model(features, [price1, price2, price3])
+
+ def test_runtime_batch_size_mismatch(self):
+ price1 = fc_old.numeric_column('price1')
+ price2 = fc_old.numeric_column('price2')
+ with ops.Graph().as_default():
+ features = {
+ 'price1': array_ops.placeholder(dtype=dtypes.int64), # batchsize = 3
+ 'price2': [[3.], [4.]] # batchsize = 2
+ }
+ predictions = fc.linear_model(features, [price1, price2])
+ with _initialized_session() as sess:
+ with self.assertRaisesRegexp(errors.OpError,
+ 'must have the same size and shape'):
+ sess.run(
+ predictions, feed_dict={features['price1']: [[1.], [5.], [7.]]})
+
+ def test_runtime_batch_size_matches(self):
+ price1 = fc_old.numeric_column('price1')
+ price2 = fc_old.numeric_column('price2')
+ with ops.Graph().as_default():
+ features = {
+ 'price1': array_ops.placeholder(dtype=dtypes.int64), # batchsize = 2
+ 'price2': array_ops.placeholder(dtype=dtypes.int64), # batchsize = 2
+ }
+ predictions = fc.linear_model(features, [price1, price2])
+ with _initialized_session() as sess:
+ sess.run(
+ predictions,
+ feed_dict={
+ features['price1']: [[1.], [5.]],
+ features['price2']: [[1.], [5.]],
+ })
+
+ def test_with_numpy_input_fn(self):
+ price = fc_old.numeric_column('price')
+ price_buckets = fc_old.bucketized_column(
+ price, boundaries=[
+ 0.,
+ 10.,
+ 100.,
+ ])
+ body_style = fc_old.categorical_column_with_vocabulary_list(
+ 'body-style', vocabulary_list=['hardtop', 'wagon', 'sedan'])
+
+ input_fn = numpy_io.numpy_input_fn(
+ x={
+ 'price': np.array([-1., 2., 13., 104.]),
+ 'body-style': np.array(['sedan', 'hardtop', 'wagon', 'sedan']),
+ },
+ batch_size=2,
+ shuffle=False)
+ features = input_fn()
+ net = fc.linear_model(features, [price_buckets, body_style])
+ # self.assertEqual(1 + 3 + 5, net.shape[1])
+ with _initialized_session() as sess:
+ coord = coordinator.Coordinator()
+ threads = queue_runner_impl.start_queue_runners(sess, coord=coord)
+
+ bias = get_linear_model_bias()
+ price_buckets_var = get_linear_model_column_var(price_buckets)
+ body_style_var = get_linear_model_column_var(body_style)
+
+ sess.run(price_buckets_var.assign([[10.], [100.], [1000.], [10000.]]))
+ sess.run(body_style_var.assign([[-10.], [-100.], [-1000.]]))
+ sess.run(bias.assign([5.]))
+
+ self.assertAllClose([[10 - 1000 + 5.], [100 - 10 + 5.]], sess.run(net))
+
+ coord.request_stop()
+ coord.join(threads)
+
+ def test_with_1d_sparse_tensor(self):
+ price = fc_old.numeric_column('price')
+ price_buckets = fc_old.bucketized_column(
+ price, boundaries=[
+ 0.,
+ 10.,
+ 100.,
+ ])
+ body_style = fc_old.categorical_column_with_vocabulary_list(
+ 'body-style', vocabulary_list=['hardtop', 'wagon', 'sedan'])
+
+ # Provides 1-dim tensor and dense tensor.
+ features = {
+ 'price': constant_op.constant([-1., 12.,]),
+ 'body-style': sparse_tensor.SparseTensor(
+ indices=((0,), (1,)),
+ values=('sedan', 'hardtop'),
+ dense_shape=(2,)),
+ }
+ self.assertEqual(1, features['price'].shape.ndims)
+ self.assertEqual(1, features['body-style'].dense_shape.get_shape()[0])
+
+ net = fc.linear_model(features, [price_buckets, body_style])
+ with _initialized_session() as sess:
+ bias = get_linear_model_bias()
+ price_buckets_var = get_linear_model_column_var(price_buckets)
+ body_style_var = get_linear_model_column_var(body_style)
+
+ sess.run(price_buckets_var.assign([[10.], [100.], [1000.], [10000.]]))
+ sess.run(body_style_var.assign([[-10.], [-100.], [-1000.]]))
+ sess.run(bias.assign([5.]))
+
+ self.assertAllClose([[10 - 1000 + 5.], [1000 - 10 + 5.]], sess.run(net))
+
+ def test_with_1d_unknown_shape_sparse_tensor(self):
+ price = fc_old.numeric_column('price')
+ price_buckets = fc_old.bucketized_column(
+ price, boundaries=[
+ 0.,
+ 10.,
+ 100.,
+ ])
+ body_style = fc_old.categorical_column_with_vocabulary_list(
+ 'body-style', vocabulary_list=['hardtop', 'wagon', 'sedan'])
+ country = fc_old.categorical_column_with_vocabulary_list(
+ 'country', vocabulary_list=['US', 'JP', 'CA'])
+
+ # Provides 1-dim tensor and dense tensor.
+ features = {
+ 'price': array_ops.placeholder(dtypes.float32),
+ 'body-style': array_ops.sparse_placeholder(dtypes.string),
+ 'country': array_ops.placeholder(dtypes.string),
+ }
+ self.assertIsNone(features['price'].shape.ndims)
+ self.assertIsNone(features['body-style'].get_shape().ndims)
+
+ price_data = np.array([-1., 12.])
+ body_style_data = sparse_tensor.SparseTensorValue(
+ indices=((0,), (1,)),
+ values=('sedan', 'hardtop'),
+ dense_shape=(2,))
+ country_data = np.array(['US', 'CA'])
+
+ net = fc.linear_model(features, [price_buckets, body_style, country])
+ bias = get_linear_model_bias()
+ price_buckets_var = get_linear_model_column_var(price_buckets)
+ body_style_var = get_linear_model_column_var(body_style)
+ with _initialized_session() as sess:
+ sess.run(price_buckets_var.assign([[10.], [100.], [1000.], [10000.]]))
+ sess.run(body_style_var.assign([[-10.], [-100.], [-1000.]]))
+ sess.run(bias.assign([5.]))
+
+ self.assertAllClose([[10 - 1000 + 5.], [1000 - 10 + 5.]],
+ sess.run(
+ net,
+ feed_dict={
+ features['price']: price_data,
+ features['body-style']: body_style_data,
+ features['country']: country_data
+ }))
+
+ def test_with_rank_0_feature(self):
+ price = fc_old.numeric_column('price')
+ features = {
+ 'price': constant_op.constant(0),
+ }
+ self.assertEqual(0, features['price'].shape.ndims)
+
+ # Static rank 0 should fail
+ with self.assertRaisesRegexp(ValueError, 'Feature .* cannot have rank 0'):
+ fc.linear_model(features, [price])
+
+ # Dynamic rank 0 should fail
+ features = {
+ 'price': array_ops.placeholder(dtypes.float32),
+ }
+ net = fc.linear_model(features, [price])
+ self.assertEqual(1, net.shape[1])
+ with _initialized_session() as sess:
+ with self.assertRaisesOpError('Feature .* cannot have rank 0'):
+ sess.run(net, feed_dict={features['price']: np.array(1)})
+
+ def test_multiple_linear_models(self):
+ price = fc_old.numeric_column('price')
+ with ops.Graph().as_default():
+ features1 = {'price': [[1.], [5.]]}
+ features2 = {'price': [[2.], [10.]]}
+ predictions1 = fc.linear_model(features1, [price])
+ predictions2 = fc.linear_model(features2, [price])
+ bias1 = get_linear_model_bias(name='linear_model')
+ bias2 = get_linear_model_bias(name='linear_model_1')
+ price_var1 = get_linear_model_column_var(price, name='linear_model')
+ price_var2 = get_linear_model_column_var(price, name='linear_model_1')
+ with _initialized_session() as sess:
+ self.assertAllClose([0.], bias1.eval())
+ sess.run(price_var1.assign([[10.]]))
+ sess.run(bias1.assign([5.]))
+ self.assertAllClose([[15.], [55.]], predictions1.eval())
+ self.assertAllClose([0.], bias2.eval())
+ sess.run(price_var2.assign([[10.]]))
+ sess.run(bias2.assign([5.]))
+ self.assertAllClose([[25.], [105.]], predictions2.eval())
+
+
+class _LinearModelTest(test.TestCase):
+
+ def test_raises_if_empty_feature_columns(self):
+ with self.assertRaisesRegexp(ValueError,
+ 'feature_columns must not be empty'):
+ get_keras_linear_model_predictions(features={}, feature_columns=[])
+
+ def test_should_be_feature_column(self):
+ with self.assertRaisesRegexp(ValueError, 'must be a _FeatureColumn'):
+ get_keras_linear_model_predictions(
+ features={'a': [[0]]}, feature_columns='NotSupported')
+
+ def test_should_be_dense_or_categorical_column(self):
+
+ class NotSupportedColumn(fc_old._FeatureColumn):
+
+ @property
+ def name(self):
+ return 'NotSupportedColumn'
+
+ def _transform_feature(self, cache):
+ pass
+
+ @property
+ def _parse_example_spec(self):
+ pass
+
+ with self.assertRaisesRegexp(
+ ValueError, 'must be either a _DenseColumn or _CategoricalColumn'):
+ get_keras_linear_model_predictions(
+ features={'a': [[0]]}, feature_columns=[NotSupportedColumn()])
+
+ def test_does_not_support_dict_columns(self):
+ with self.assertRaisesRegexp(
+ ValueError, 'Expected feature_columns to be iterable, found dict.'):
+ fc.linear_model(
+ features={'a': [[0]]},
+ feature_columns={'a': fc_old.numeric_column('a')})
+
+ def test_raises_if_duplicate_name(self):
+ with self.assertRaisesRegexp(
+ ValueError, 'Duplicate feature column name found for columns'):
+ get_keras_linear_model_predictions(
+ features={'a': [[0]]},
+ feature_columns=[
+ fc_old.numeric_column('a'),
+ fc_old.numeric_column('a')
+ ])
+
+ def test_dense_bias(self):
+ price = fc_old.numeric_column('price')
+ with ops.Graph().as_default():
+ features = {'price': [[1.], [5.]]}
+ predictions = get_keras_linear_model_predictions(features, [price])
+ bias = get_linear_model_bias()
+ price_var = get_linear_model_column_var(price)
+ with _initialized_session() as sess:
+ self.assertAllClose([0.], bias.eval())
+ sess.run(price_var.assign([[10.]]))
+ sess.run(bias.assign([5.]))
+ self.assertAllClose([[15.], [55.]], predictions.eval())
+
+ def test_sparse_bias(self):
+ wire_cast = fc_old.categorical_column_with_hash_bucket('wire_cast', 4)
+ with ops.Graph().as_default():
+ wire_tensor = sparse_tensor.SparseTensor(
+ values=['omar', 'stringer', 'marlo'], # hashed to = [2, 0, 3]
+ indices=[[0, 0], [1, 0], [1, 1]],
+ dense_shape=[2, 2])
+ features = {'wire_cast': wire_tensor}
+ predictions = get_keras_linear_model_predictions(features, [wire_cast])
+ bias = get_linear_model_bias()
+ wire_cast_var = get_linear_model_column_var(wire_cast)
+ with _initialized_session() as sess:
+ self.assertAllClose([0.], bias.eval())
+ self.assertAllClose([[0.], [0.], [0.], [0.]], wire_cast_var.eval())
+ sess.run(wire_cast_var.assign([[10.], [100.], [1000.], [10000.]]))
+ sess.run(bias.assign([5.]))
+ self.assertAllClose([[1005.], [10015.]], predictions.eval())
+
+ def test_dense_and_sparse_bias(self):
+ wire_cast = fc_old.categorical_column_with_hash_bucket('wire_cast', 4)
+ price = fc_old.numeric_column('price')
+ with ops.Graph().as_default():
+ wire_tensor = sparse_tensor.SparseTensor(
+ values=['omar', 'stringer', 'marlo'], # hashed to = [2, 0, 3]
+ indices=[[0, 0], [1, 0], [1, 1]],
+ dense_shape=[2, 2])
+ features = {'wire_cast': wire_tensor, 'price': [[1.], [5.]]}
+ predictions = get_keras_linear_model_predictions(features,
+ [wire_cast, price])
+ bias = get_linear_model_bias()
+ wire_cast_var = get_linear_model_column_var(wire_cast)
+ price_var = get_linear_model_column_var(price)
+ with _initialized_session() as sess:
+ sess.run(wire_cast_var.assign([[10.], [100.], [1000.], [10000.]]))
+ sess.run(bias.assign([5.]))
+ sess.run(price_var.assign([[10.]]))
+ self.assertAllClose([[1015.], [10065.]], predictions.eval())
+
+ def test_dense_and_sparse_column(self):
+ """When the column is both dense and sparse, uses sparse tensors."""
+
+ class _DenseAndSparseColumn(fc_old._DenseColumn, fc_old._CategoricalColumn):
+
+ @property
+ def name(self):
+ return 'dense_and_sparse_column'
+
+ @property
+ def _parse_example_spec(self):
+ return {self.name: parsing_ops.VarLenFeature(self.dtype)}
+
+ def _transform_feature(self, inputs):
+ return inputs.get(self.name)
+
+ @property
+ def _variable_shape(self):
+ raise ValueError('Should not use this method.')
+
+ def _get_dense_tensor(self,
+ inputs,
+ weight_collections=None,
+ trainable=None):
+ raise ValueError('Should not use this method.')
+
+ @property
+ def _num_buckets(self):
+ return 4
+
+ def _get_sparse_tensors(self,
+ inputs,
+ weight_collections=None,
+ trainable=None):
+ sp_tensor = sparse_tensor.SparseTensor(
+ indices=[[0, 0], [1, 0], [1, 1]],
+ values=[2, 0, 3],
+ dense_shape=[2, 2])
+ return fc_old._CategoricalColumn.IdWeightPair(sp_tensor, None)
+
+ dense_and_sparse_column = _DenseAndSparseColumn()
+ with ops.Graph().as_default():
+ sp_tensor = sparse_tensor.SparseTensor(
+ values=['omar', 'stringer', 'marlo'],
+ indices=[[0, 0], [1, 0], [1, 1]],
+ dense_shape=[2, 2])
+ features = {dense_and_sparse_column.name: sp_tensor}
+ predictions = get_keras_linear_model_predictions(
+ features, [dense_and_sparse_column])
+ bias = get_linear_model_bias()
+ dense_and_sparse_column_var = get_linear_model_column_var(
+ dense_and_sparse_column)
+ with _initialized_session() as sess:
+ sess.run(
+ dense_and_sparse_column_var.assign([[10.], [100.], [1000.],
+ [10000.]]))
+ sess.run(bias.assign([5.]))
+ self.assertAllClose([[1005.], [10015.]], predictions.eval())
+
+ def test_dense_multi_output(self):
+ price = fc_old.numeric_column('price')
+ with ops.Graph().as_default():
+ features = {'price': [[1.], [5.]]}
+ predictions = get_keras_linear_model_predictions(
+ features, [price], units=3)
+ bias = get_linear_model_bias()
+ price_var = get_linear_model_column_var(price)
+ with _initialized_session() as sess:
+ self.assertAllClose(np.zeros((3,)), bias.eval())
+ self.assertAllClose(np.zeros((1, 3)), price_var.eval())
+ sess.run(price_var.assign([[10., 100., 1000.]]))
+ sess.run(bias.assign([5., 6., 7.]))
+ self.assertAllClose([[15., 106., 1007.], [55., 506., 5007.]],
+ predictions.eval())
+
+ def test_sparse_multi_output(self):
+ wire_cast = fc_old.categorical_column_with_hash_bucket('wire_cast', 4)
+ with ops.Graph().as_default():
+ wire_tensor = sparse_tensor.SparseTensor(
+ values=['omar', 'stringer', 'marlo'], # hashed to = [2, 0, 3]
+ indices=[[0, 0], [1, 0], [1, 1]],
+ dense_shape=[2, 2])
+ features = {'wire_cast': wire_tensor}
+ predictions = get_keras_linear_model_predictions(
+ features, [wire_cast], units=3)
+ bias = get_linear_model_bias()
+ wire_cast_var = get_linear_model_column_var(wire_cast)
+ with _initialized_session() as sess:
+ self.assertAllClose(np.zeros((3,)), bias.eval())
+ self.assertAllClose(np.zeros((4, 3)), wire_cast_var.eval())
+ sess.run(
+ wire_cast_var.assign([[10., 11., 12.], [100., 110., 120.],
+ [1000., 1100.,
+ 1200.], [10000., 11000., 12000.]]))
+ sess.run(bias.assign([5., 6., 7.]))
+ self.assertAllClose([[1005., 1106., 1207.], [10015., 11017., 12019.]],
+ predictions.eval())
+
+ def test_dense_multi_dimension(self):
+ price = fc_old.numeric_column('price', shape=2)
+ with ops.Graph().as_default():
+ features = {'price': [[1., 2.], [5., 6.]]}
+ predictions = get_keras_linear_model_predictions(features, [price])
+ price_var = get_linear_model_column_var(price)
+ with _initialized_session() as sess:
+ self.assertAllClose([[0.], [0.]], price_var.eval())
+ sess.run(price_var.assign([[10.], [100.]]))
+ self.assertAllClose([[210.], [650.]], predictions.eval())
+
+ def test_sparse_multi_rank(self):
+ wire_cast = fc_old.categorical_column_with_hash_bucket('wire_cast', 4)
+ with ops.Graph().as_default():
+ wire_tensor = array_ops.sparse_placeholder(dtypes.string)
+ wire_value = sparse_tensor.SparseTensorValue(
+ values=['omar', 'stringer', 'marlo', 'omar'], # hashed = [2, 0, 3, 2]
+ indices=[[0, 0, 0], [0, 1, 0], [1, 0, 0], [1, 0, 1]],
+ dense_shape=[2, 2, 2])
+ features = {'wire_cast': wire_tensor}
+ predictions = get_keras_linear_model_predictions(features, [wire_cast])
+ wire_cast_var = get_linear_model_column_var(wire_cast)
+ with _initialized_session() as sess:
+ self.assertAllClose(np.zeros((4, 1)), wire_cast_var.eval())
+ self.assertAllClose(
+ np.zeros((2, 1)),
+ predictions.eval(feed_dict={wire_tensor: wire_value}))
+ sess.run(wire_cast_var.assign([[10.], [100.], [1000.], [10000.]]))
+ self.assertAllClose(
+ [[1010.], [11000.]],
+ predictions.eval(feed_dict={wire_tensor: wire_value}))
+
+ def test_sparse_combiner(self):
+ wire_cast = fc_old.categorical_column_with_hash_bucket('wire_cast', 4)
+ with ops.Graph().as_default():
+ wire_tensor = sparse_tensor.SparseTensor(
+ values=['omar', 'stringer', 'marlo'], # hashed to = [2, 0, 3]
+ indices=[[0, 0], [1, 0], [1, 1]],
+ dense_shape=[2, 2])
+ features = {'wire_cast': wire_tensor}
+ predictions = get_keras_linear_model_predictions(
+ features, [wire_cast], sparse_combiner='mean')
+ bias = get_linear_model_bias()
+ wire_cast_var = get_linear_model_column_var(wire_cast)
+ with _initialized_session() as sess:
+ sess.run(wire_cast_var.assign([[10.], [100.], [1000.], [10000.]]))
+ sess.run(bias.assign([5.]))
+ self.assertAllClose([[1005.], [5010.]], predictions.eval())
+
+ def test_dense_multi_dimension_multi_output(self):
+ price = fc_old.numeric_column('price', shape=2)
+ with ops.Graph().as_default():
+ features = {'price': [[1., 2.], [5., 6.]]}
+ predictions = get_keras_linear_model_predictions(
+ features, [price], units=3)
+ bias = get_linear_model_bias()
+ price_var = get_linear_model_column_var(price)
+ with _initialized_session() as sess:
+ self.assertAllClose(np.zeros((3,)), bias.eval())
+ self.assertAllClose(np.zeros((2, 3)), price_var.eval())
+ sess.run(price_var.assign([[1., 2., 3.], [10., 100., 1000.]]))
+ sess.run(bias.assign([2., 3., 4.]))
+ self.assertAllClose([[23., 205., 2007.], [67., 613., 6019.]],
+ predictions.eval())
+
+ def test_raises_if_shape_mismatch(self):
+ price = fc_old.numeric_column('price', shape=2)
+ with ops.Graph().as_default():
+ features = {'price': [[1.], [5.]]}
+ with self.assertRaisesRegexp(
+ Exception,
+ r'Cannot reshape a tensor with 2 elements to shape \[2,2\]'):
+ get_keras_linear_model_predictions(features, [price])
+
+ def test_dense_reshaping(self):
+ price = fc_old.numeric_column('price', shape=[1, 2])
+ with ops.Graph().as_default():
+ features = {'price': [[[1., 2.]], [[5., 6.]]]}
+ predictions = get_keras_linear_model_predictions(features, [price])
+ bias = get_linear_model_bias()
+ price_var = get_linear_model_column_var(price)
+ with _initialized_session() as sess:
+ self.assertAllClose([0.], bias.eval())
+ self.assertAllClose([[0.], [0.]], price_var.eval())
+ self.assertAllClose([[0.], [0.]], predictions.eval())
+ sess.run(price_var.assign([[10.], [100.]]))
+ self.assertAllClose([[210.], [650.]], predictions.eval())
+
+ def test_dense_multi_column(self):
+ price1 = fc_old.numeric_column('price1', shape=2)
+ price2 = fc_old.numeric_column('price2')
+ with ops.Graph().as_default():
+ features = {'price1': [[1., 2.], [5., 6.]], 'price2': [[3.], [4.]]}
+ predictions = get_keras_linear_model_predictions(features,
+ [price1, price2])
+ bias = get_linear_model_bias()
+ price1_var = get_linear_model_column_var(price1)
+ price2_var = get_linear_model_column_var(price2)
+ with _initialized_session() as sess:
+ self.assertAllClose([0.], bias.eval())
+ self.assertAllClose([[0.], [0.]], price1_var.eval())
+ self.assertAllClose([[0.]], price2_var.eval())
+ self.assertAllClose([[0.], [0.]], predictions.eval())
+ sess.run(price1_var.assign([[10.], [100.]]))
+ sess.run(price2_var.assign([[1000.]]))
+ sess.run(bias.assign([7.]))
+ self.assertAllClose([[3217.], [4657.]], predictions.eval())
+
+ def test_fills_cols_to_vars(self):
+ price1 = fc_old.numeric_column('price1', shape=2)
+ price2 = fc_old.numeric_column('price2')
+ with ops.Graph().as_default():
+ features = {'price1': [[1., 2.], [5., 6.]], 'price2': [[3.], [4.]]}
+ cols_to_vars = {}
+ get_keras_linear_model_predictions(
+ features, [price1, price2], cols_to_vars=cols_to_vars)
+ bias = get_linear_model_bias()
+ price1_var = get_linear_model_column_var(price1)
+ price2_var = get_linear_model_column_var(price2)
+ self.assertAllEqual(cols_to_vars['bias'], [bias])
+ self.assertAllEqual(cols_to_vars[price1], [price1_var])
+ self.assertAllEqual(cols_to_vars[price2], [price2_var])
+
+ def test_fills_cols_to_vars_partitioned_variables(self):
+ price1 = fc_old.numeric_column('price1', shape=2)
+ price2 = fc_old.numeric_column('price2', shape=3)
+ with ops.Graph().as_default():
+ features = {
+ 'price1': [[1., 2.], [6., 7.]],
+ 'price2': [[3., 4., 5.], [8., 9., 10.]]
+ }
+ cols_to_vars = {}
+ with variable_scope.variable_scope(
+ 'linear',
+ partitioner=partitioned_variables.fixed_size_partitioner(2, axis=0)):
+ get_keras_linear_model_predictions(
+ features, [price1, price2], cols_to_vars=cols_to_vars)
+ with _initialized_session():
+ self.assertEqual([0.], cols_to_vars['bias'][0].eval())
+ # Partitioning shards the [2, 1] price1 var into 2 [1, 1] Variables.
+ self.assertAllEqual([[0.]], cols_to_vars[price1][0].eval())
+ self.assertAllEqual([[0.]], cols_to_vars[price1][1].eval())
+ # Partitioning shards the [3, 1] price2 var into a [2, 1] Variable and
+ # a [1, 1] Variable.
+ self.assertAllEqual([[0.], [0.]], cols_to_vars[price2][0].eval())
+ self.assertAllEqual([[0.]], cols_to_vars[price2][1].eval())
+
+ def test_dense_collection(self):
+ price = fc_old.numeric_column('price')
+ with ops.Graph().as_default() as g:
+ features = {'price': [[1.], [5.]]}
+ get_keras_linear_model_predictions(
+ features, [price], weight_collections=['my-vars'])
+ my_vars = g.get_collection('my-vars')
+ bias = get_linear_model_bias()
+ price_var = get_linear_model_column_var(price)
+ self.assertIn(bias, my_vars)
+ self.assertIn(price_var, my_vars)
+
+ def test_sparse_collection(self):
+ wire_cast = fc_old.categorical_column_with_hash_bucket('wire_cast', 4)
+ with ops.Graph().as_default() as g:
+ wire_tensor = sparse_tensor.SparseTensor(
+ values=['omar'], indices=[[0, 0]], dense_shape=[1, 1])
+ features = {'wire_cast': wire_tensor}
+ get_keras_linear_model_predictions(
+ features, [wire_cast], weight_collections=['my-vars'])
+ my_vars = g.get_collection('my-vars')
+ bias = get_linear_model_bias()
+ wire_cast_var = get_linear_model_column_var(wire_cast)
+ self.assertIn(bias, my_vars)
+ self.assertIn(wire_cast_var, my_vars)
+
+ def test_dense_trainable_default(self):
+ price = fc_old.numeric_column('price')
+ with ops.Graph().as_default() as g:
+ features = {'price': [[1.], [5.]]}
+ get_keras_linear_model_predictions(features, [price])
+ bias = get_linear_model_bias()
+ price_var = get_linear_model_column_var(price)
+ trainable_vars = g.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)
+ self.assertIn(bias, trainable_vars)
+ self.assertIn(price_var, trainable_vars)
+
+ def test_sparse_trainable_default(self):
+ wire_cast = fc_old.categorical_column_with_hash_bucket('wire_cast', 4)
+ with ops.Graph().as_default() as g:
+ wire_tensor = sparse_tensor.SparseTensor(
+ values=['omar'], indices=[[0, 0]], dense_shape=[1, 1])
+ features = {'wire_cast': wire_tensor}
+ get_keras_linear_model_predictions(features, [wire_cast])
+ trainable_vars = g.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)
+ bias = get_linear_model_bias()
+ wire_cast_var = get_linear_model_column_var(wire_cast)
+ self.assertIn(bias, trainable_vars)
+ self.assertIn(wire_cast_var, trainable_vars)
+
+ def test_dense_trainable_false(self):
+ price = fc_old.numeric_column('price')
+ with ops.Graph().as_default() as g:
+ features = {'price': [[1.], [5.]]}
+ get_keras_linear_model_predictions(features, [price], trainable=False)
+ trainable_vars = g.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)
+ self.assertEqual([], trainable_vars)
+
+ def test_sparse_trainable_false(self):
+ wire_cast = fc_old.categorical_column_with_hash_bucket('wire_cast', 4)
+ with ops.Graph().as_default() as g:
+ wire_tensor = sparse_tensor.SparseTensor(
+ values=['omar'], indices=[[0, 0]], dense_shape=[1, 1])
+ features = {'wire_cast': wire_tensor}
+ get_keras_linear_model_predictions(features, [wire_cast], trainable=False)
+ trainable_vars = g.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)
+ self.assertEqual([], trainable_vars)
+
+ def test_column_order(self):
+ price_a = fc_old.numeric_column('price_a')
+ price_b = fc_old.numeric_column('price_b')
+ wire_cast = fc_old.categorical_column_with_hash_bucket('wire_cast', 4)
+ with ops.Graph().as_default() as g:
+ features = {
+ 'price_a': [[1.]],
+ 'price_b': [[3.]],
+ 'wire_cast':
+ sparse_tensor.SparseTensor(
+ values=['omar'], indices=[[0, 0]], dense_shape=[1, 1])
+ }
+ get_keras_linear_model_predictions(
+ features, [price_a, wire_cast, price_b],
+ weight_collections=['my-vars'])
+ my_vars = g.get_collection('my-vars')
+ self.assertIn('price_a', my_vars[0].name)
+ self.assertIn('price_b', my_vars[1].name)
+ self.assertIn('wire_cast', my_vars[2].name)
+
+ with ops.Graph().as_default() as g:
+ features = {
+ 'price_a': [[1.]],
+ 'price_b': [[3.]],
+ 'wire_cast':
+ sparse_tensor.SparseTensor(
+ values=['omar'], indices=[[0, 0]], dense_shape=[1, 1])
+ }
+ get_keras_linear_model_predictions(
+ features, [wire_cast, price_b, price_a],
+ weight_collections=['my-vars'])
+ my_vars = g.get_collection('my-vars')
+ self.assertIn('price_a', my_vars[0].name)
+ self.assertIn('price_b', my_vars[1].name)
+ self.assertIn('wire_cast', my_vars[2].name)
+
+ def test_static_batch_size_mismatch(self):
+ price1 = fc_old.numeric_column('price1')
+ price2 = fc_old.numeric_column('price2')
+ with ops.Graph().as_default():
+ features = {
+ 'price1': [[1.], [5.], [7.]], # batchsize = 3
+ 'price2': [[3.], [4.]] # batchsize = 2
+ }
+ with self.assertRaisesRegexp(
+ ValueError,
+ 'Batch size \(first dimension\) of each feature must be same.'): # pylint: disable=anomalous-backslash-in-string
+ get_keras_linear_model_predictions(features, [price1, price2])
+
+ def test_subset_of_static_batch_size_mismatch(self):
+ price1 = fc_old.numeric_column('price1')
+ price2 = fc_old.numeric_column('price2')
+ price3 = fc_old.numeric_column('price3')
+ with ops.Graph().as_default():
+ features = {
+ 'price1': array_ops.placeholder(dtype=dtypes.int64), # batchsize = 3
+ 'price2': [[3.], [4.]], # batchsize = 2
+ 'price3': [[3.], [4.], [5.]] # batchsize = 3
+ }
+ with self.assertRaisesRegexp(
+ ValueError,
+ 'Batch size \(first dimension\) of each feature must be same.'): # pylint: disable=anomalous-backslash-in-string
+ get_keras_linear_model_predictions(features, [price1, price2, price3])
+
+ def test_runtime_batch_size_mismatch(self):
+ price1 = fc_old.numeric_column('price1')
+ price2 = fc_old.numeric_column('price2')
+ with ops.Graph().as_default():
+ features = {
+ 'price1': array_ops.placeholder(dtype=dtypes.int64), # batchsize = 3
+ 'price2': [[3.], [4.]] # batchsize = 2
+ }
+ predictions = get_keras_linear_model_predictions(features,
+ [price1, price2])
+ with _initialized_session() as sess:
+ with self.assertRaisesRegexp(errors.OpError,
+ 'must have the same size and shape'):
+ sess.run(
+ predictions, feed_dict={features['price1']: [[1.], [5.], [7.]]})
+
+ def test_runtime_batch_size_matches(self):
+ price1 = fc_old.numeric_column('price1')
+ price2 = fc_old.numeric_column('price2')
+ with ops.Graph().as_default():
+ features = {
+ 'price1': array_ops.placeholder(dtype=dtypes.int64), # batchsize = 2
+ 'price2': array_ops.placeholder(dtype=dtypes.int64), # batchsize = 2
+ }
+ predictions = get_keras_linear_model_predictions(features,
+ [price1, price2])
+ with _initialized_session() as sess:
+ sess.run(
+ predictions,
+ feed_dict={
+ features['price1']: [[1.], [5.]],
+ features['price2']: [[1.], [5.]],
+ })
+
+ def test_with_numpy_input_fn(self):
+ price = fc_old.numeric_column('price')
+ price_buckets = fc_old.bucketized_column(
+ price, boundaries=[
+ 0.,
+ 10.,
+ 100.,
+ ])
+ body_style = fc_old.categorical_column_with_vocabulary_list(
+ 'body-style', vocabulary_list=['hardtop', 'wagon', 'sedan'])
+
+ input_fn = numpy_io.numpy_input_fn(
+ x={
+ 'price': np.array([-1., 2., 13., 104.]),
+ 'body-style': np.array(['sedan', 'hardtop', 'wagon', 'sedan']),
+ },
+ batch_size=2,
+ shuffle=False)
+ features = input_fn()
+ net = get_keras_linear_model_predictions(features,
+ [price_buckets, body_style])
+ # self.assertEqual(1 + 3 + 5, net.shape[1])
+ with _initialized_session() as sess:
+ coord = coordinator.Coordinator()
+ threads = queue_runner_impl.start_queue_runners(sess, coord=coord)
+
+ bias = get_linear_model_bias()
+ price_buckets_var = get_linear_model_column_var(price_buckets)
+ body_style_var = get_linear_model_column_var(body_style)
+
+ sess.run(price_buckets_var.assign([[10.], [100.], [1000.], [10000.]]))
+ sess.run(body_style_var.assign([[-10.], [-100.], [-1000.]]))
+ sess.run(bias.assign([5.]))
+
+ self.assertAllClose([[10 - 1000 + 5.], [100 - 10 + 5.]], sess.run(net))
+
+ coord.request_stop()
+ coord.join(threads)
+
+ def test_with_1d_sparse_tensor(self):
+ price = fc_old.numeric_column('price')
+ price_buckets = fc_old.bucketized_column(
+ price, boundaries=[
+ 0.,
+ 10.,
+ 100.,
+ ])
+ body_style = fc_old.categorical_column_with_vocabulary_list(
+ 'body-style', vocabulary_list=['hardtop', 'wagon', 'sedan'])
+
+ # Provides 1-dim tensor and dense tensor.
+ features = {
+ 'price':
+ constant_op.constant([
+ -1.,
+ 12.,
+ ]),
+ 'body-style':
+ sparse_tensor.SparseTensor(
+ indices=((0,), (1,)),
+ values=('sedan', 'hardtop'),
+ dense_shape=(2,)),
+ }
+ self.assertEqual(1, features['price'].shape.ndims)
+ self.assertEqual(1, features['body-style'].dense_shape.get_shape()[0])
+
+ net = get_keras_linear_model_predictions(features,
+ [price_buckets, body_style])
+ with _initialized_session() as sess:
+ bias = get_linear_model_bias()
+ price_buckets_var = get_linear_model_column_var(price_buckets)
+ body_style_var = get_linear_model_column_var(body_style)
+
+ sess.run(price_buckets_var.assign([[10.], [100.], [1000.], [10000.]]))
+ sess.run(body_style_var.assign([[-10.], [-100.], [-1000.]]))
+ sess.run(bias.assign([5.]))
+
+ self.assertAllClose([[10 - 1000 + 5.], [1000 - 10 + 5.]], sess.run(net))
+
+ def test_with_1d_unknown_shape_sparse_tensor(self):
+ price = fc_old.numeric_column('price')
+ price_buckets = fc_old.bucketized_column(
+ price, boundaries=[
+ 0.,
+ 10.,
+ 100.,
+ ])
+ body_style = fc_old.categorical_column_with_vocabulary_list(
+ 'body-style', vocabulary_list=['hardtop', 'wagon', 'sedan'])
+ country = fc_old.categorical_column_with_vocabulary_list(
+ 'country', vocabulary_list=['US', 'JP', 'CA'])
+
+ # Provides 1-dim tensor and dense tensor.
+ features = {
+ 'price': array_ops.placeholder(dtypes.float32),
+ 'body-style': array_ops.sparse_placeholder(dtypes.string),
+ 'country': array_ops.placeholder(dtypes.string),
+ }
+ self.assertIsNone(features['price'].shape.ndims)
+ self.assertIsNone(features['body-style'].get_shape().ndims)
+
+ price_data = np.array([-1., 12.])
+ body_style_data = sparse_tensor.SparseTensorValue(
+ indices=((0,), (1,)), values=('sedan', 'hardtop'), dense_shape=(2,))
+ country_data = np.array(['US', 'CA'])
+
+ net = get_keras_linear_model_predictions(
+ features, [price_buckets, body_style, country])
+ bias = get_linear_model_bias()
+ price_buckets_var = get_linear_model_column_var(price_buckets)
+ body_style_var = get_linear_model_column_var(body_style)
+ with _initialized_session() as sess:
+ sess.run(price_buckets_var.assign([[10.], [100.], [1000.], [10000.]]))
+ sess.run(body_style_var.assign([[-10.], [-100.], [-1000.]]))
+ sess.run(bias.assign([5.]))
+
+ self.assertAllClose([[10 - 1000 + 5.], [1000 - 10 + 5.]],
+ sess.run(
+ net,
+ feed_dict={
+ features['price']: price_data,
+ features['body-style']: body_style_data,
+ features['country']: country_data
+ }))
+
+ def test_with_rank_0_feature(self):
+ price = fc_old.numeric_column('price')
+ features = {
+ 'price': constant_op.constant(0),
+ }
+ self.assertEqual(0, features['price'].shape.ndims)
+
+ # Static rank 0 should fail
+ with self.assertRaisesRegexp(ValueError, 'Feature .* cannot have rank 0'):
+ get_keras_linear_model_predictions(features, [price])
+
+ # Dynamic rank 0 should fail
+ features = {
+ 'price': array_ops.placeholder(dtypes.float32),
+ }
+ net = get_keras_linear_model_predictions(features, [price])
+ self.assertEqual(1, net.shape[1])
+ with _initialized_session() as sess:
+ with self.assertRaisesOpError('Feature .* cannot have rank 0'):
+ sess.run(net, feed_dict={features['price']: np.array(1)})
+
+
+class InputLayerTest(test.TestCase):
+
+ @test_util.run_in_graph_and_eager_modes()
+ def test_retrieving_input(self):
+ features = {'a': [0.]}
+ input_layer = InputLayer(fc_old.numeric_column('a'))
+ inputs = self.evaluate(input_layer(features))
+ self.assertAllClose([[0.]], inputs)
+
+ def test_reuses_variables(self):
+ with context.eager_mode():
+ sparse_input = sparse_tensor.SparseTensor(
+ indices=((0, 0), (1, 0), (2, 0)),
+ values=(0, 1, 2),
+ dense_shape=(3, 3))
+
+ # Create feature columns (categorical and embedding).
+ categorical_column = fc_old.categorical_column_with_identity(
+ key='a', num_buckets=3)
+ embedding_dimension = 2
+ def _embedding_column_initializer(shape, dtype, partition_info):
+ del shape # unused
+ del dtype # unused
+ del partition_info # unused
+ embedding_values = (
+ (1, 0), # id 0
+ (0, 1), # id 1
+ (1, 1)) # id 2
+ return embedding_values
+
+ embedding_column = fc_old.embedding_column(
+ categorical_column,
+ dimension=embedding_dimension,
+ initializer=_embedding_column_initializer)
+
+ input_layer = InputLayer([embedding_column])
+ features = {'a': sparse_input}
+
+ inputs = input_layer(features)
+ variables = input_layer.variables
+
+ # Sanity check: test that the inputs are correct.
+ self.assertAllEqual([[1, 0], [0, 1], [1, 1]], inputs)
+
+ # Check that only one variable was created.
+ self.assertEqual(1, len(variables))
+
+ # Check that invoking input_layer on the same features does not create
+ # additional variables
+ _ = input_layer(features)
+ self.assertEqual(1, len(variables))
+ self.assertEqual(variables[0], input_layer.variables[0])
+
+ def test_feature_column_input_layer_gradient(self):
+ with context.eager_mode():
+ sparse_input = sparse_tensor.SparseTensor(
+ indices=((0, 0), (1, 0), (2, 0)),
+ values=(0, 1, 2),
+ dense_shape=(3, 3))
+
+ # Create feature columns (categorical and embedding).
+ categorical_column = fc_old.categorical_column_with_identity(
+ key='a', num_buckets=3)
+ embedding_dimension = 2
+
+ def _embedding_column_initializer(shape, dtype, partition_info):
+ del shape # unused
+ del dtype # unused
+ del partition_info # unused
+ embedding_values = (
+ (1, 0), # id 0
+ (0, 1), # id 1
+ (1, 1)) # id 2
+ return embedding_values
+
+ embedding_column = fc_old.embedding_column(
+ categorical_column,
+ dimension=embedding_dimension,
+ initializer=_embedding_column_initializer)
+
+ input_layer = InputLayer([embedding_column])
+ features = {'a': sparse_input}
+
+ def scale_matrix():
+ matrix = input_layer(features)
+ return 2 * matrix
+
+ # Sanity check: Verify that scale_matrix returns the correct output.
+ self.assertAllEqual([[2, 0], [0, 2], [2, 2]], scale_matrix())
+
+ # Check that the returned gradient is correct.
+ grad_function = backprop.implicit_grad(scale_matrix)
+ grads_and_vars = grad_function()
+ indexed_slice = grads_and_vars[0][0]
+ gradient = grads_and_vars[0][0].values
+
+ self.assertAllEqual([0, 1, 2], indexed_slice.indices)
+ self.assertAllEqual([[2, 2], [2, 2], [2, 2]], gradient)
+
+
+class FunctionalInputLayerTest(test.TestCase):
+
+ def test_raises_if_empty_feature_columns(self):
+ with self.assertRaisesRegexp(ValueError,
+ 'feature_columns must not be empty'):
+ fc.input_layer(features={}, feature_columns=[])
+
+ def test_should_be_dense_column(self):
+ with self.assertRaisesRegexp(ValueError, 'must be a _DenseColumn'):
+ fc.input_layer(
+ features={'a': [[0]]},
+ feature_columns=[
+ fc_old.categorical_column_with_hash_bucket('wire_cast', 4)
+ ])
+
+ def test_does_not_support_dict_columns(self):
+ with self.assertRaisesRegexp(
+ ValueError, 'Expected feature_columns to be iterable, found dict.'):
+ fc.input_layer(
+ features={'a': [[0]]},
+ feature_columns={'a': fc_old.numeric_column('a')})
+
+ def test_bare_column(self):
+ with ops.Graph().as_default():
+ features = features = {'a': [0.]}
+ net = fc.input_layer(features, fc_old.numeric_column('a'))
+ with _initialized_session():
+ self.assertAllClose([[0.]], net.eval())
+
+ def test_column_generator(self):
+ with ops.Graph().as_default():
+ features = features = {'a': [0.], 'b': [1.]}
+ columns = (fc_old.numeric_column(key) for key in features)
+ net = fc.input_layer(features, columns)
+ with _initialized_session():
+ self.assertAllClose([[0., 1.]], net.eval())
+
+ def test_raises_if_duplicate_name(self):
+ with self.assertRaisesRegexp(
+ ValueError, 'Duplicate feature column name found for columns'):
+ fc.input_layer(
+ features={'a': [[0]]},
+ feature_columns=[
+ fc_old.numeric_column('a'),
+ fc_old.numeric_column('a')
+ ])
+
+ def test_one_column(self):
+ price = fc_old.numeric_column('price')
+ with ops.Graph().as_default():
+ features = {'price': [[1.], [5.]]}
+ net = fc.input_layer(features, [price])
+ with _initialized_session():
+ self.assertAllClose([[1.], [5.]], net.eval())
+
+ def test_multi_dimension(self):
+ price = fc_old.numeric_column('price', shape=2)
+ with ops.Graph().as_default():
+ features = {'price': [[1., 2.], [5., 6.]]}
+ net = fc.input_layer(features, [price])
+ with _initialized_session():
+ self.assertAllClose([[1., 2.], [5., 6.]], net.eval())
+
+ def test_raises_if_shape_mismatch(self):
+ price = fc_old.numeric_column('price', shape=2)
+ with ops.Graph().as_default():
+ features = {'price': [[1.], [5.]]}
+ with self.assertRaisesRegexp(
+ Exception,
+ r'Cannot reshape a tensor with 2 elements to shape \[2,2\]'):
+ fc.input_layer(features, [price])
+
+ def test_reshaping(self):
+ price = fc_old.numeric_column('price', shape=[1, 2])
+ with ops.Graph().as_default():
+ features = {'price': [[[1., 2.]], [[5., 6.]]]}
+ net = fc.input_layer(features, [price])
+ with _initialized_session():
+ self.assertAllClose([[1., 2.], [5., 6.]], net.eval())
+
+ def test_multi_column(self):
+ price1 = fc_old.numeric_column('price1', shape=2)
+ price2 = fc_old.numeric_column('price2')
+ with ops.Graph().as_default():
+ features = {
+ 'price1': [[1., 2.], [5., 6.]],
+ 'price2': [[3.], [4.]]
+ }
+ net = fc.input_layer(features, [price1, price2])
+ with _initialized_session():
+ self.assertAllClose([[1., 2., 3.], [5., 6., 4.]], net.eval())
+
+ def test_fills_cols_to_vars(self):
+ # Provide three _DenseColumn's to input_layer: a _NumericColumn, a
+ # _BucketizedColumn, and an _EmbeddingColumn. Only the _EmbeddingColumn
+ # creates a Variable.
+ price1 = fc_old.numeric_column('price1')
+ dense_feature = fc_old.numeric_column('dense_feature')
+ dense_feature_bucketized = fc_old.bucketized_column(
+ dense_feature, boundaries=[0.])
+ some_sparse_column = fc_old.categorical_column_with_hash_bucket(
+ 'sparse_feature', hash_bucket_size=5)
+ some_embedding_column = fc_old.embedding_column(
+ some_sparse_column, dimension=10)
+ with ops.Graph().as_default():
+ features = {
+ 'price1': [[3.], [4.]],
+ 'dense_feature': [[-1.], [4.]],
+ 'sparse_feature': [['a'], ['x']],
+ }
+ cols_to_vars = {}
+ all_cols = [price1, dense_feature_bucketized, some_embedding_column]
+ fc.input_layer(features, all_cols, cols_to_vars=cols_to_vars)
+ self.assertItemsEqual(list(cols_to_vars.keys()), all_cols)
+ self.assertEqual(0, len(cols_to_vars[price1]))
+ self.assertEqual(0, len(cols_to_vars[dense_feature_bucketized]))
+ self.assertEqual(1, len(cols_to_vars[some_embedding_column]))
+ self.assertIsInstance(cols_to_vars[some_embedding_column][0],
+ variables_lib.Variable)
+ self.assertAllEqual(cols_to_vars[some_embedding_column][0].shape, [5, 10])
+
+ def test_fills_cols_to_vars_partitioned_variables(self):
+ price1 = fc_old.numeric_column('price1')
+ dense_feature = fc_old.numeric_column('dense_feature')
+ dense_feature_bucketized = fc_old.bucketized_column(
+ dense_feature, boundaries=[0.])
+ some_sparse_column = fc_old.categorical_column_with_hash_bucket(
+ 'sparse_feature', hash_bucket_size=5)
+ some_embedding_column = fc_old.embedding_column(
+ some_sparse_column, dimension=10)
+ with ops.Graph().as_default():
+ features = {
+ 'price1': [[3.], [4.]],
+ 'dense_feature': [[-1.], [4.]],
+ 'sparse_feature': [['a'], ['x']],
+ }
+ cols_to_vars = {}
+ all_cols = [price1, dense_feature_bucketized, some_embedding_column]
+ with variable_scope.variable_scope(
+ 'input_from_feature_columns',
+ partitioner=partitioned_variables.fixed_size_partitioner(3, axis=0)):
+ fc.input_layer(features, all_cols, cols_to_vars=cols_to_vars)
+ self.assertItemsEqual(list(cols_to_vars.keys()), all_cols)
+ self.assertEqual(0, len(cols_to_vars[price1]))
+ self.assertEqual(0, len(cols_to_vars[dense_feature_bucketized]))
+ self.assertEqual(3, len(cols_to_vars[some_embedding_column]))
+ self.assertAllEqual(cols_to_vars[some_embedding_column][0].shape, [2, 10])
+ self.assertAllEqual(cols_to_vars[some_embedding_column][1].shape, [2, 10])
+ self.assertAllEqual(cols_to_vars[some_embedding_column][2].shape, [1, 10])
+
+ def test_column_order(self):
+ price_a = fc_old.numeric_column('price_a')
+ price_b = fc_old.numeric_column('price_b')
+ with ops.Graph().as_default():
+ features = {
+ 'price_a': [[1.]],
+ 'price_b': [[3.]],
+ }
+ net1 = fc.input_layer(features, [price_a, price_b])
+ net2 = fc.input_layer(features, [price_b, price_a])
+ with _initialized_session():
+ self.assertAllClose([[1., 3.]], net1.eval())
+ self.assertAllClose([[1., 3.]], net2.eval())
+
+ def test_fails_for_categorical_column(self):
+ animal = fc_old.categorical_column_with_identity('animal', num_buckets=4)
+ with ops.Graph().as_default():
+ features = {
+ 'animal':
+ sparse_tensor.SparseTensor(
+ indices=[[0, 0], [0, 1]], values=[1, 2], dense_shape=[1, 2])
+ }
+ with self.assertRaisesRegexp(Exception, 'must be a _DenseColumn'):
+ fc.input_layer(features, [animal])
+
+ def test_static_batch_size_mismatch(self):
+ price1 = fc_old.numeric_column('price1')
+ price2 = fc_old.numeric_column('price2')
+ with ops.Graph().as_default():
+ features = {
+ 'price1': [[1.], [5.], [7.]], # batchsize = 3
+ 'price2': [[3.], [4.]] # batchsize = 2
+ }
+ with self.assertRaisesRegexp(
+ ValueError,
+ 'Batch size \(first dimension\) of each feature must be same.'): # pylint: disable=anomalous-backslash-in-string
+ fc.input_layer(features, [price1, price2])
+
+ def test_subset_of_static_batch_size_mismatch(self):
+ price1 = fc_old.numeric_column('price1')
+ price2 = fc_old.numeric_column('price2')
+ price3 = fc_old.numeric_column('price3')
+ with ops.Graph().as_default():
+ features = {
+ 'price1': array_ops.placeholder(dtype=dtypes.int64), # batchsize = 3
+ 'price2': [[3.], [4.]], # batchsize = 2
+ 'price3': [[3.], [4.], [5.]] # batchsize = 3
+ }
+ with self.assertRaisesRegexp(
+ ValueError,
+ 'Batch size \(first dimension\) of each feature must be same.'): # pylint: disable=anomalous-backslash-in-string
+ fc.input_layer(features, [price1, price2, price3])
+
+ def test_runtime_batch_size_mismatch(self):
+ price1 = fc_old.numeric_column('price1')
+ price2 = fc_old.numeric_column('price2')
+ with ops.Graph().as_default():
+ features = {
+ 'price1': array_ops.placeholder(dtype=dtypes.int64), # batchsize = 3
+ 'price2': [[3.], [4.]] # batchsize = 2
+ }
+ net = fc.input_layer(features, [price1, price2])
+ with _initialized_session() as sess:
+ with self.assertRaisesRegexp(errors.OpError,
+ 'Dimensions of inputs should match'):
+ sess.run(net, feed_dict={features['price1']: [[1.], [5.], [7.]]})
+
+ def test_runtime_batch_size_matches(self):
+ price1 = fc_old.numeric_column('price1')
+ price2 = fc_old.numeric_column('price2')
+ with ops.Graph().as_default():
+ features = {
+ 'price1': array_ops.placeholder(dtype=dtypes.int64), # batchsize = 2
+ 'price2': array_ops.placeholder(dtype=dtypes.int64), # batchsize = 2
+ }
+ net = fc.input_layer(features, [price1, price2])
+ with _initialized_session() as sess:
+ sess.run(
+ net,
+ feed_dict={
+ features['price1']: [[1.], [5.]],
+ features['price2']: [[1.], [5.]],
+ })
+
+ def test_multiple_layers_with_same_embedding_column(self):
+ some_sparse_column = fc_old.categorical_column_with_hash_bucket(
+ 'sparse_feature', hash_bucket_size=5)
+ some_embedding_column = fc_old.embedding_column(
+ some_sparse_column, dimension=10)
+
+ with ops.Graph().as_default():
+ features = {
+ 'sparse_feature': [['a'], ['x']],
+ }
+ all_cols = [some_embedding_column]
+ fc.input_layer(features, all_cols)
+ fc.input_layer(features, all_cols)
+ # Make sure that 2 variables get created in this case.
+ self.assertEqual(2, len(
+ ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)))
+ expected_var_names = [
+ 'input_layer/sparse_feature_embedding/embedding_weights:0',
+ 'input_layer_1/sparse_feature_embedding/embedding_weights:0'
+ ]
+ self.assertItemsEqual(
+ expected_var_names,
+ [v.name for v in ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)])
+
+ def test_multiple_layers_with_same_shared_embedding_column(self):
+ categorical_column_a = fc_old.categorical_column_with_identity(
+ key='aaa', num_buckets=3)
+ categorical_column_b = fc_old.categorical_column_with_identity(
+ key='bbb', num_buckets=3)
+ embedding_dimension = 2
+ embedding_column_b, embedding_column_a = fc_old.shared_embedding_columns(
+ [categorical_column_b, categorical_column_a],
+ dimension=embedding_dimension)
+
+ with ops.Graph().as_default():
+ features = {
+ 'aaa':
+ sparse_tensor.SparseTensor(
+ indices=((0, 0), (1, 0), (1, 1)),
+ values=(0, 1, 0),
+ dense_shape=(2, 2)),
+ 'bbb':
+ sparse_tensor.SparseTensor(
+ indices=((0, 0), (1, 0), (1, 1)),
+ values=(1, 2, 1),
+ dense_shape=(2, 2)),
+ }
+ all_cols = [embedding_column_a, embedding_column_b]
+ fc.input_layer(features, all_cols)
+ fc.input_layer(features, all_cols)
+ # Make sure that only 1 variable gets created in this case.
+ self.assertEqual(1, len(
+ ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)))
+ self.assertItemsEqual(
+ ['input_layer/aaa_bbb_shared_embedding/embedding_weights:0'],
+ [v.name for v in ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)])
+
+ def test_multiple_layers_with_same_shared_embedding_column_diff_graphs(self):
+ categorical_column_a = fc_old.categorical_column_with_identity(
+ key='aaa', num_buckets=3)
+ categorical_column_b = fc_old.categorical_column_with_identity(
+ key='bbb', num_buckets=3)
+ embedding_dimension = 2
+ embedding_column_b, embedding_column_a = fc_old.shared_embedding_columns(
+ [categorical_column_b, categorical_column_a],
+ dimension=embedding_dimension)
+ all_cols = [embedding_column_a, embedding_column_b]
+
+ with ops.Graph().as_default():
+ features = {
+ 'aaa':
+ sparse_tensor.SparseTensor(
+ indices=((0, 0), (1, 0), (1, 1)),
+ values=(0, 1, 0),
+ dense_shape=(2, 2)),
+ 'bbb':
+ sparse_tensor.SparseTensor(
+ indices=((0, 0), (1, 0), (1, 1)),
+ values=(1, 2, 1),
+ dense_shape=(2, 2)),
+ }
+ fc.input_layer(features, all_cols)
+ # Make sure that only 1 variable gets created in this case.
+ self.assertEqual(1, len(
+ ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)))
+
+ with ops.Graph().as_default():
+ features1 = {
+ 'aaa':
+ sparse_tensor.SparseTensor(
+ indices=((0, 0), (1, 0), (1, 1)),
+ values=(0, 1, 0),
+ dense_shape=(2, 2)),
+ 'bbb':
+ sparse_tensor.SparseTensor(
+ indices=((0, 0), (1, 0), (1, 1)),
+ values=(1, 2, 1),
+ dense_shape=(2, 2)),
+ }
+
+ fc.input_layer(features1, all_cols)
+ # Make sure that only 1 variable gets created in this case.
+ self.assertEqual(1, len(
+ ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)))
+ self.assertItemsEqual(
+ ['input_layer/aaa_bbb_shared_embedding/embedding_weights:0'],
+ [v.name for v in ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)])
+
+ def test_with_numpy_input_fn(self):
+ embedding_values = (
+ (1., 2., 3., 4., 5.), # id 0
+ (6., 7., 8., 9., 10.), # id 1
+ (11., 12., 13., 14., 15.) # id 2
+ )
+ def _initializer(shape, dtype, partition_info):
+ del shape, dtype, partition_info
+ return embedding_values
+
+ # price has 1 dimension in input_layer
+ price = fc_old.numeric_column('price')
+ body_style = fc_old.categorical_column_with_vocabulary_list(
+ 'body-style', vocabulary_list=['hardtop', 'wagon', 'sedan'])
+ # one_hot_body_style has 3 dims in input_layer.
+ one_hot_body_style = fc_old.indicator_column(body_style)
+ # embedded_body_style has 5 dims in input_layer.
+ embedded_body_style = fc_old.embedding_column(
+ body_style, dimension=5, initializer=_initializer)
+
+ input_fn = numpy_io.numpy_input_fn(
+ x={
+ 'price': np.array([11., 12., 13., 14.]),
+ 'body-style': np.array(['sedan', 'hardtop', 'wagon', 'sedan']),
+ },
+ batch_size=2,
+ shuffle=False)
+ features = input_fn()
+ net = fc.input_layer(features,
+ [price, one_hot_body_style, embedded_body_style])
+ self.assertEqual(1 + 3 + 5, net.shape[1])
+ with _initialized_session() as sess:
+ coord = coordinator.Coordinator()
+ threads = queue_runner_impl.start_queue_runners(sess, coord=coord)
+
+ # Each row is formed by concatenating `embedded_body_style`,
+ # `one_hot_body_style`, and `price` in order.
+ self.assertAllEqual(
+ [[11., 12., 13., 14., 15., 0., 0., 1., 11.],
+ [1., 2., 3., 4., 5., 1., 0., 0., 12]],
+ sess.run(net))
+
+ coord.request_stop()
+ coord.join(threads)
+
+ def test_with_1d_sparse_tensor(self):
+ embedding_values = (
+ (1., 2., 3., 4., 5.), # id 0
+ (6., 7., 8., 9., 10.), # id 1
+ (11., 12., 13., 14., 15.) # id 2
+ )
+ def _initializer(shape, dtype, partition_info):
+ del shape, dtype, partition_info
+ return embedding_values
+
+ # price has 1 dimension in input_layer
+ price = fc_old.numeric_column('price')
+
+ # one_hot_body_style has 3 dims in input_layer.
+ body_style = fc_old.categorical_column_with_vocabulary_list(
+ 'body-style', vocabulary_list=['hardtop', 'wagon', 'sedan'])
+ one_hot_body_style = fc_old.indicator_column(body_style)
+
+ # embedded_body_style has 5 dims in input_layer.
+ country = fc_old.categorical_column_with_vocabulary_list(
+ 'country', vocabulary_list=['US', 'JP', 'CA'])
+ embedded_country = fc_old.embedding_column(
+ country, dimension=5, initializer=_initializer)
+
+ # Provides 1-dim tensor and dense tensor.
+ features = {
+ 'price': constant_op.constant([11., 12.,]),
+ 'body-style': sparse_tensor.SparseTensor(
+ indices=((0,), (1,)),
+ values=('sedan', 'hardtop'),
+ dense_shape=(2,)),
+ # This is dense tensor for the categorical_column.
+ 'country': constant_op.constant(['CA', 'US']),
+ }
+ self.assertEqual(1, features['price'].shape.ndims)
+ self.assertEqual(1, features['body-style'].dense_shape.get_shape()[0])
+ self.assertEqual(1, features['country'].shape.ndims)
+
+ net = fc.input_layer(features,
+ [price, one_hot_body_style, embedded_country])
+ self.assertEqual(1 + 3 + 5, net.shape[1])
+ with _initialized_session() as sess:
+
+ # Each row is formed by concatenating `embedded_body_style`,
+ # `one_hot_body_style`, and `price` in order.
+ self.assertAllEqual(
+ [[0., 0., 1., 11., 12., 13., 14., 15., 11.],
+ [1., 0., 0., 1., 2., 3., 4., 5., 12.]],
+ sess.run(net))
+
+ def test_with_1d_unknown_shape_sparse_tensor(self):
+ embedding_values = (
+ (1., 2.), # id 0
+ (6., 7.), # id 1
+ (11., 12.) # id 2
+ )
+ def _initializer(shape, dtype, partition_info):
+ del shape, dtype, partition_info
+ return embedding_values
+
+ # price has 1 dimension in input_layer
+ price = fc_old.numeric_column('price')
+
+ # one_hot_body_style has 3 dims in input_layer.
+ body_style = fc_old.categorical_column_with_vocabulary_list(
+ 'body-style', vocabulary_list=['hardtop', 'wagon', 'sedan'])
+ one_hot_body_style = fc_old.indicator_column(body_style)
+
+ # embedded_body_style has 5 dims in input_layer.
+ country = fc_old.categorical_column_with_vocabulary_list(
+ 'country', vocabulary_list=['US', 'JP', 'CA'])
+ embedded_country = fc_old.embedding_column(
+ country, dimension=2, initializer=_initializer)
+
+ # Provides 1-dim tensor and dense tensor.
+ features = {
+ 'price': array_ops.placeholder(dtypes.float32),
+ 'body-style': array_ops.sparse_placeholder(dtypes.string),
+ # This is dense tensor for the categorical_column.
+ 'country': array_ops.placeholder(dtypes.string),
+ }
+ self.assertIsNone(features['price'].shape.ndims)
+ self.assertIsNone(features['body-style'].get_shape().ndims)
+ self.assertIsNone(features['country'].shape.ndims)
+
+ price_data = np.array([11., 12.])
+ body_style_data = sparse_tensor.SparseTensorValue(
+ indices=((0,), (1,)),
+ values=('sedan', 'hardtop'),
+ dense_shape=(2,))
+ country_data = np.array([['US'], ['CA']])
+
+ net = fc.input_layer(features,
+ [price, one_hot_body_style, embedded_country])
+ self.assertEqual(1 + 3 + 2, net.shape[1])
+ with _initialized_session() as sess:
+
+ # Each row is formed by concatenating `embedded_body_style`,
+ # `one_hot_body_style`, and `price` in order.
+ self.assertAllEqual(
+ [[0., 0., 1., 1., 2., 11.], [1., 0., 0., 11., 12., 12.]],
+ sess.run(
+ net,
+ feed_dict={
+ features['price']: price_data,
+ features['body-style']: body_style_data,
+ features['country']: country_data
+ }))
+
+ def test_with_rank_0_feature(self):
+ # price has 1 dimension in input_layer
+ price = fc_old.numeric_column('price')
+ features = {
+ 'price': constant_op.constant(0),
+ }
+ self.assertEqual(0, features['price'].shape.ndims)
+
+ # Static rank 0 should fail
+ with self.assertRaisesRegexp(ValueError, 'Feature .* cannot have rank 0'):
+ fc.input_layer(features, [price])
+
+ # Dynamic rank 0 should fail
+ features = {
+ 'price': array_ops.placeholder(dtypes.float32),
+ }
+ net = fc.input_layer(features, [price])
+ self.assertEqual(1, net.shape[1])
+ with _initialized_session() as sess:
+ with self.assertRaisesOpError('Feature .* cannot have rank 0'):
+ sess.run(net, feed_dict={features['price']: np.array(1)})
+
+
+class MakeParseExampleSpecTest(test.TestCase):
+
+ class _TestFeatureColumn(FeatureColumn,
+ collections.namedtuple('_TestFeatureColumn',
+ ('parse_spec'))):
+
+ @property
+ def name(self):
+ return "_TestFeatureColumn"
+
+ def transform_feature(self, transformation_cache, state_manager):
+ pass
+
+ @property
+ def parse_example_spec(self):
+ return self.parse_spec
+
+ def test_no_feature_columns(self):
+ actual = fc.make_parse_example_spec([])
+ self.assertDictEqual({}, actual)
+
+ def test_invalid_type(self):
+ key1 = 'key1'
+ parse_spec1 = parsing_ops.FixedLenFeature(
+ shape=(2,), dtype=dtypes.float32, default_value=0.)
+ with self.assertRaisesRegexp(
+ ValueError,
+ 'All feature_columns must be FeatureColumn instances.*invalid_column'):
+ fc.make_parse_example_spec(
+ (self._TestFeatureColumn({key1: parse_spec1}), 'invalid_column'))
+
+ def test_one_feature_column(self):
+ key1 = 'key1'
+ parse_spec1 = parsing_ops.FixedLenFeature(
+ shape=(2,), dtype=dtypes.float32, default_value=0.)
+ actual = fc.make_parse_example_spec(
+ (self._TestFeatureColumn({key1: parse_spec1}),))
+ self.assertDictEqual({key1: parse_spec1}, actual)
+
+ def test_two_feature_columns(self):
+ key1 = 'key1'
+ parse_spec1 = parsing_ops.FixedLenFeature(
+ shape=(2,), dtype=dtypes.float32, default_value=0.)
+ key2 = 'key2'
+ parse_spec2 = parsing_ops.VarLenFeature(dtype=dtypes.string)
+ actual = fc.make_parse_example_spec(
+ (self._TestFeatureColumn({key1: parse_spec1}),
+ self._TestFeatureColumn({key2: parse_spec2})))
+ self.assertDictEqual({key1: parse_spec1, key2: parse_spec2}, actual)
+
+ def test_equal_keys_different_parse_spec(self):
+ key1 = 'key1'
+ parse_spec1 = parsing_ops.FixedLenFeature(
+ shape=(2,), dtype=dtypes.float32, default_value=0.)
+ parse_spec2 = parsing_ops.VarLenFeature(dtype=dtypes.string)
+ with self.assertRaisesRegexp(
+ ValueError,
+ 'feature_columns contain different parse_spec for key key1'):
+ fc.make_parse_example_spec(
+ (self._TestFeatureColumn({key1: parse_spec1}),
+ self._TestFeatureColumn({key1: parse_spec2})))
+
+ def test_equal_keys_equal_parse_spec(self):
+ key1 = 'key1'
+ parse_spec1 = parsing_ops.FixedLenFeature(
+ shape=(2,), dtype=dtypes.float32, default_value=0.)
+ actual = fc.make_parse_example_spec(
+ (self._TestFeatureColumn({key1: parse_spec1}),
+ self._TestFeatureColumn({key1: parse_spec1})))
+ self.assertDictEqual({key1: parse_spec1}, actual)
+
+ def test_multiple_features_dict(self):
+ """parse_spc for one column is a dict with length > 1."""
+ key1 = 'key1'
+ parse_spec1 = parsing_ops.FixedLenFeature(
+ shape=(2,), dtype=dtypes.float32, default_value=0.)
+ key2 = 'key2'
+ parse_spec2 = parsing_ops.VarLenFeature(dtype=dtypes.string)
+ key3 = 'key3'
+ parse_spec3 = parsing_ops.VarLenFeature(dtype=dtypes.int32)
+ actual = fc.make_parse_example_spec(
+ (self._TestFeatureColumn({key1: parse_spec1}),
+ self._TestFeatureColumn({key2: parse_spec2, key3: parse_spec3})))
+ self.assertDictEqual(
+ {key1: parse_spec1, key2: parse_spec2, key3: parse_spec3}, actual)
+
+
+def _assert_sparse_tensor_value(test_case, expected, actual):
+ test_case.assertEqual(np.int64, np.array(actual.indices).dtype)
+ test_case.assertAllEqual(expected.indices, actual.indices)
+
+ test_case.assertEqual(
+ np.array(expected.values).dtype, np.array(actual.values).dtype)
+ test_case.assertAllEqual(expected.values, actual.values)
+
+ test_case.assertEqual(np.int64, np.array(actual.dense_shape).dtype)
+ test_case.assertAllEqual(expected.dense_shape, actual.dense_shape)
+
+
+class VocabularyFileCategoricalColumnTest(test.TestCase):
+
+ def setUp(self):
+ super(VocabularyFileCategoricalColumnTest, self).setUp()
+
+ # Contains ints, Golden State Warriors jersey numbers: 30, 35, 11, 23, 22
+ self._warriors_vocabulary_file_name = test.test_src_dir_path(
+ 'python/feature_column/testdata/warriors_vocabulary.txt')
+ self._warriors_vocabulary_size = 5
+
+ # Contains strings, character names from 'The Wire': omar, stringer, marlo
+ self._wire_vocabulary_file_name = test.test_src_dir_path(
+ 'python/feature_column/testdata/wire_vocabulary.txt')
+ self._wire_vocabulary_size = 3
+
+ def test_defaults(self):
+ column = fc.categorical_column_with_vocabulary_file(
+ key='aaa', vocabulary_file='path_to_file', vocabulary_size=3)
+ self.assertEqual('aaa', column.name)
+ self.assertEqual('aaa', column.key)
+ self.assertEqual(3, column.num_buckets)
+ self.assertEqual({
+ 'aaa': parsing_ops.VarLenFeature(dtypes.string)
+ }, column.parse_example_spec)
+
+ def test_key_should_be_string(self):
+ with self.assertRaisesRegexp(ValueError, 'key must be a string.'):
+ fc.categorical_column_with_vocabulary_file(
+ key=('aaa',), vocabulary_file='path_to_file', vocabulary_size=3)
+
+ def test_all_constructor_args(self):
+ column = fc.categorical_column_with_vocabulary_file(
+ key='aaa', vocabulary_file='path_to_file', vocabulary_size=3,
+ num_oov_buckets=4, dtype=dtypes.int32)
+ self.assertEqual(7, column.num_buckets)
+ self.assertEqual({
+ 'aaa': parsing_ops.VarLenFeature(dtypes.int32)
+ }, column.parse_example_spec)
+
+ def test_deep_copy(self):
+ original = fc.categorical_column_with_vocabulary_file(
+ key='aaa', vocabulary_file='path_to_file', vocabulary_size=3,
+ num_oov_buckets=4, dtype=dtypes.int32)
+ for column in (original, copy.deepcopy(original)):
+ self.assertEqual('aaa', column.name)
+ self.assertEqual(7, column.num_buckets)
+ self.assertEqual({
+ 'aaa': parsing_ops.VarLenFeature(dtypes.int32)
+ }, column.parse_example_spec)
+
+ def test_vocabulary_file_none(self):
+ with self.assertRaisesRegexp(ValueError, 'Missing vocabulary_file'):
+ fc.categorical_column_with_vocabulary_file(
+ key='aaa', vocabulary_file=None, vocabulary_size=3)
+
+ def test_vocabulary_file_empty_string(self):
+ with self.assertRaisesRegexp(ValueError, 'Missing vocabulary_file'):
+ fc.categorical_column_with_vocabulary_file(
+ key='aaa', vocabulary_file='', vocabulary_size=3)
+
+ def test_invalid_vocabulary_file(self):
+ column = fc.categorical_column_with_vocabulary_file(
+ key='aaa', vocabulary_file='file_does_not_exist', vocabulary_size=10)
+ inputs = sparse_tensor.SparseTensorValue(
+ indices=((0, 0), (1, 0), (1, 1)),
+ values=('marlo', 'skywalker', 'omar'),
+ dense_shape=(2, 2))
+ column.get_sparse_tensors(FeatureTransformationCache({'aaa': inputs}), None)
+ with self.assertRaisesRegexp(errors.OpError, 'file_does_not_exist'):
+ with self.test_session():
+ lookup_ops.tables_initializer().run()
+
+ def test_invalid_vocabulary_size(self):
+ with self.assertRaisesRegexp(ValueError, 'Invalid vocabulary_size'):
+ fc.categorical_column_with_vocabulary_file(
+ key='aaa', vocabulary_file=self._wire_vocabulary_file_name,
+ vocabulary_size=-1)
+ with self.assertRaisesRegexp(ValueError, 'Invalid vocabulary_size'):
+ fc.categorical_column_with_vocabulary_file(
+ key='aaa', vocabulary_file=self._wire_vocabulary_file_name,
+ vocabulary_size=0)
+
+ def test_too_large_vocabulary_size(self):
+ column = fc.categorical_column_with_vocabulary_file(
+ key='aaa',
+ vocabulary_file=self._wire_vocabulary_file_name,
+ vocabulary_size=self._wire_vocabulary_size + 1)
+ inputs = sparse_tensor.SparseTensorValue(
+ indices=((0, 0), (1, 0), (1, 1)),
+ values=('marlo', 'skywalker', 'omar'),
+ dense_shape=(2, 2))
+ column.get_sparse_tensors(FeatureTransformationCache({'aaa': inputs}), None)
+ with self.assertRaisesRegexp(errors.OpError, 'Invalid vocab_size'):
+ with self.test_session():
+ lookup_ops.tables_initializer().run()
+
+ def test_invalid_num_oov_buckets(self):
+ with self.assertRaisesRegexp(ValueError, 'Invalid num_oov_buckets'):
+ fc.categorical_column_with_vocabulary_file(
+ key='aaa', vocabulary_file='path', vocabulary_size=3,
+ num_oov_buckets=-1)
+
+ def test_invalid_dtype(self):
+ with self.assertRaisesRegexp(ValueError, 'dtype must be string or integer'):
+ fc.categorical_column_with_vocabulary_file(
+ key='aaa', vocabulary_file='path', vocabulary_size=3,
+ dtype=dtypes.float64)
+
+ def test_invalid_buckets_and_default_value(self):
+ with self.assertRaisesRegexp(
+ ValueError, 'both num_oov_buckets and default_value'):
+ fc.categorical_column_with_vocabulary_file(
+ key='aaa',
+ vocabulary_file=self._wire_vocabulary_file_name,
+ vocabulary_size=self._wire_vocabulary_size,
+ num_oov_buckets=100,
+ default_value=2)
+
+ def test_invalid_input_dtype_int32(self):
+ column = fc.categorical_column_with_vocabulary_file(
+ key='aaa',
+ vocabulary_file=self._wire_vocabulary_file_name,
+ vocabulary_size=self._wire_vocabulary_size,
+ dtype=dtypes.string)
+ inputs = sparse_tensor.SparseTensorValue(
+ indices=((0, 0), (1, 0), (1, 1)),
+ values=(12, 24, 36),
+ dense_shape=(2, 2))
+ with self.assertRaisesRegexp(ValueError, 'dtype must be compatible'):
+ column.get_sparse_tensors(
+ FeatureTransformationCache({
+ 'aaa': inputs
+ }), None)
+
+ def test_invalid_input_dtype_string(self):
+ column = fc.categorical_column_with_vocabulary_file(
+ key='aaa',
+ vocabulary_file=self._warriors_vocabulary_file_name,
+ vocabulary_size=self._warriors_vocabulary_size,
+ dtype=dtypes.int32)
+ inputs = sparse_tensor.SparseTensorValue(
+ indices=((0, 0), (1, 0), (1, 1)),
+ values=('omar', 'stringer', 'marlo'),
+ dense_shape=(2, 2))
+ with self.assertRaisesRegexp(ValueError, 'dtype must be compatible'):
+ column.get_sparse_tensors(
+ FeatureTransformationCache({
+ 'aaa': inputs
+ }), None)
+
+ def test_parse_example(self):
+ a = fc.categorical_column_with_vocabulary_file(
+ key='aaa', vocabulary_file='path_to_file', vocabulary_size=3)
+ data = example_pb2.Example(features=feature_pb2.Features(
+ feature={
+ 'aaa':
+ feature_pb2.Feature(bytes_list=feature_pb2.BytesList(
+ value=[b'omar', b'stringer']))
+ }))
+ features = parsing_ops.parse_example(
+ serialized=[data.SerializeToString()],
+ features=fc.make_parse_example_spec([a]))
+ self.assertIn('aaa', features)
+ with self.test_session():
+ _assert_sparse_tensor_value(
+ self,
+ sparse_tensor.SparseTensorValue(
+ indices=[[0, 0], [0, 1]],
+ values=np.array([b'omar', b'stringer'], dtype=np.object_),
+ dense_shape=[1, 2]),
+ features['aaa'].eval())
+
+ def test_get_sparse_tensors(self):
+ column = fc.categorical_column_with_vocabulary_file(
+ key='aaa',
+ vocabulary_file=self._wire_vocabulary_file_name,
+ vocabulary_size=self._wire_vocabulary_size)
+ inputs = sparse_tensor.SparseTensorValue(
+ indices=((0, 0), (1, 0), (1, 1)),
+ values=('marlo', 'skywalker', 'omar'),
+ dense_shape=(2, 2))
+ id_weight_pair = column.get_sparse_tensors(
+ FeatureTransformationCache({
+ 'aaa': inputs
+ }), None)
+ self.assertIsNone(id_weight_pair.weight_tensor)
+ with _initialized_session():
+ _assert_sparse_tensor_value(
+ self,
+ sparse_tensor.SparseTensorValue(
+ indices=inputs.indices,
+ values=np.array((2, -1, 0), dtype=np.int64),
+ dense_shape=inputs.dense_shape),
+ id_weight_pair.id_tensor.eval())
+
+ def test_get_sparse_tensors_none_vocabulary_size(self):
+ column = fc.categorical_column_with_vocabulary_file(
+ key='aaa', vocabulary_file=self._wire_vocabulary_file_name)
+ inputs = sparse_tensor.SparseTensorValue(
+ indices=((0, 0), (1, 0), (1, 1)),
+ values=('marlo', 'skywalker', 'omar'),
+ dense_shape=(2, 2))
+ id_weight_pair = column.get_sparse_tensors(
+ FeatureTransformationCache({
+ 'aaa': inputs
+ }), None)
+ self.assertIsNone(id_weight_pair.weight_tensor)
+ with _initialized_session():
+ _assert_sparse_tensor_value(self,
+ sparse_tensor.SparseTensorValue(
+ indices=inputs.indices,
+ values=np.array(
+ (2, -1, 0), dtype=np.int64),
+ dense_shape=inputs.dense_shape),
+ id_weight_pair.id_tensor.eval())
+
+ def test_transform_feature(self):
+ column = fc.categorical_column_with_vocabulary_file(
+ key='aaa',
+ vocabulary_file=self._wire_vocabulary_file_name,
+ vocabulary_size=self._wire_vocabulary_size)
+ inputs = sparse_tensor.SparseTensorValue(
+ indices=((0, 0), (1, 0), (1, 1)),
+ values=('marlo', 'skywalker', 'omar'),
+ dense_shape=(2, 2))
+ id_tensor = _transform_features({'aaa': inputs}, [column], None)[column]
+ with _initialized_session():
+ _assert_sparse_tensor_value(self,
+ sparse_tensor.SparseTensorValue(
+ indices=inputs.indices,
+ values=np.array(
+ (2, -1, 0), dtype=np.int64),
+ dense_shape=inputs.dense_shape),
+ id_tensor.eval())
+
+ def DISABLED_test_get_sparse_tensors_weight_collections(self):
+ column = fc.categorical_column_with_vocabulary_file(
+ key='aaa',
+ vocabulary_file=self._wire_vocabulary_file_name,
+ vocabulary_size=self._wire_vocabulary_size)
+ inputs = sparse_tensor.SparseTensor(
+ values=['omar', 'stringer', 'marlo'],
+ indices=[[0, 0], [1, 0], [1, 1]],
+ dense_shape=[2, 2])
+ column.get_sparse_tensors(
+ FeatureTransformationCache({
+ 'aaa': inputs
+ }),
+ weight_collections=('my_weights',))
+
+ self.assertItemsEqual(
+ [], ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES))
+ self.assertItemsEqual([], ops.get_collection('my_weights'))
+
+ def test_get_sparse_tensors_dense_input(self):
+ column = fc.categorical_column_with_vocabulary_file(
+ key='aaa',
+ vocabulary_file=self._wire_vocabulary_file_name,
+ vocabulary_size=self._wire_vocabulary_size)
+ id_weight_pair = column.get_sparse_tensors(
+ FeatureTransformationCache({
+ 'aaa': (('marlo', ''), ('skywalker', 'omar'))
+ }), None)
+ self.assertIsNone(id_weight_pair.weight_tensor)
+ with _initialized_session():
+ _assert_sparse_tensor_value(
+ self,
+ sparse_tensor.SparseTensorValue(
+ indices=((0, 0), (1, 0), (1, 1)),
+ values=np.array((2, -1, 0), dtype=np.int64),
+ dense_shape=(2, 2)),
+ id_weight_pair.id_tensor.eval())
+
+ def test_get_sparse_tensors_default_value_in_vocabulary(self):
+ column = fc.categorical_column_with_vocabulary_file(
+ key='aaa',
+ vocabulary_file=self._wire_vocabulary_file_name,
+ vocabulary_size=self._wire_vocabulary_size,
+ default_value=2)
+ inputs = sparse_tensor.SparseTensorValue(
+ indices=((0, 0), (1, 0), (1, 1)),
+ values=('marlo', 'skywalker', 'omar'),
+ dense_shape=(2, 2))
+ id_weight_pair = column.get_sparse_tensors(
+ FeatureTransformationCache({
+ 'aaa': inputs
+ }), None)
+ self.assertIsNone(id_weight_pair.weight_tensor)
+ with _initialized_session():
+ _assert_sparse_tensor_value(
+ self,
+ sparse_tensor.SparseTensorValue(
+ indices=inputs.indices,
+ values=np.array((2, 2, 0), dtype=np.int64),
+ dense_shape=inputs.dense_shape),
+ id_weight_pair.id_tensor.eval())
+
+ def test_get_sparse_tensors_with_oov_buckets(self):
+ column = fc.categorical_column_with_vocabulary_file(
+ key='aaa',
+ vocabulary_file=self._wire_vocabulary_file_name,
+ vocabulary_size=self._wire_vocabulary_size,
+ num_oov_buckets=100)
+ inputs = sparse_tensor.SparseTensorValue(
+ indices=((0, 0), (1, 0), (1, 1), (1, 2)),
+ values=('marlo', 'skywalker', 'omar', 'heisenberg'),
+ dense_shape=(2, 3))
+ id_weight_pair = column.get_sparse_tensors(
+ FeatureTransformationCache({
+ 'aaa': inputs
+ }), None)
+ self.assertIsNone(id_weight_pair.weight_tensor)
+ with _initialized_session():
+ _assert_sparse_tensor_value(
+ self,
+ sparse_tensor.SparseTensorValue(
+ indices=inputs.indices,
+ values=np.array((2, 33, 0, 62), dtype=np.int64),
+ dense_shape=inputs.dense_shape),
+ id_weight_pair.id_tensor.eval())
+
+ def test_get_sparse_tensors_small_vocabulary_size(self):
+ # 'marlo' is the last entry in our vocabulary file, so be setting
+ # `vocabulary_size` to 1 less than number of entries in file, we take
+ # 'marlo' out of the vocabulary.
+ column = fc.categorical_column_with_vocabulary_file(
+ key='aaa',
+ vocabulary_file=self._wire_vocabulary_file_name,
+ vocabulary_size=self._wire_vocabulary_size - 1)
+ inputs = sparse_tensor.SparseTensorValue(
+ indices=((0, 0), (1, 0), (1, 1)),
+ values=('marlo', 'skywalker', 'omar'),
+ dense_shape=(2, 2))
+ id_weight_pair = column.get_sparse_tensors(
+ FeatureTransformationCache({
+ 'aaa': inputs
+ }), None)
+ self.assertIsNone(id_weight_pair.weight_tensor)
+ with _initialized_session():
+ _assert_sparse_tensor_value(
+ self,
+ sparse_tensor.SparseTensorValue(
+ indices=inputs.indices,
+ values=np.array((-1, -1, 0), dtype=np.int64),
+ dense_shape=inputs.dense_shape),
+ id_weight_pair.id_tensor.eval())
+
+ def test_get_sparse_tensors_int32(self):
+ column = fc.categorical_column_with_vocabulary_file(
+ key='aaa',
+ vocabulary_file=self._warriors_vocabulary_file_name,
+ vocabulary_size=self._warriors_vocabulary_size,
+ dtype=dtypes.int32)
+ inputs = sparse_tensor.SparseTensorValue(
+ indices=((0, 0), (1, 0), (1, 1), (2, 2)),
+ values=(11, 100, 30, 22),
+ dense_shape=(3, 3))
+ id_weight_pair = column.get_sparse_tensors(
+ FeatureTransformationCache({
+ 'aaa': inputs
+ }), None)
+ self.assertIsNone(id_weight_pair.weight_tensor)
+ with _initialized_session():
+ _assert_sparse_tensor_value(
+ self,
+ sparse_tensor.SparseTensorValue(
+ indices=inputs.indices,
+ values=np.array((2, -1, 0, 4), dtype=np.int64),
+ dense_shape=inputs.dense_shape),
+ id_weight_pair.id_tensor.eval())
+
+ def test_get_sparse_tensors_int32_dense_input(self):
+ default_value = -100
+ column = fc.categorical_column_with_vocabulary_file(
+ key='aaa',
+ vocabulary_file=self._warriors_vocabulary_file_name,
+ vocabulary_size=self._warriors_vocabulary_size,
+ dtype=dtypes.int32,
+ default_value=default_value)
+ id_weight_pair = column.get_sparse_tensors(
+ FeatureTransformationCache({
+ 'aaa': ((11, -1, -1), (100, 30, -1), (-1, -1, 22))
+ }), None)
+ self.assertIsNone(id_weight_pair.weight_tensor)
+ with _initialized_session():
+ _assert_sparse_tensor_value(
+ self,
+ sparse_tensor.SparseTensorValue(
+ indices=((0, 0), (1, 0), (1, 1), (2, 2)),
+ values=np.array((2, default_value, 0, 4), dtype=np.int64),
+ dense_shape=(3, 3)),
+ id_weight_pair.id_tensor.eval())
+
+ def test_get_sparse_tensors_int32_with_oov_buckets(self):
+ column = fc.categorical_column_with_vocabulary_file(
+ key='aaa',
+ vocabulary_file=self._warriors_vocabulary_file_name,
+ vocabulary_size=self._warriors_vocabulary_size,
+ dtype=dtypes.int32,
+ num_oov_buckets=100)
+ inputs = sparse_tensor.SparseTensorValue(
+ indices=((0, 0), (1, 0), (1, 1), (2, 2)),
+ values=(11, 100, 30, 22),
+ dense_shape=(3, 3))
+ id_weight_pair = column.get_sparse_tensors(
+ FeatureTransformationCache({
+ 'aaa': inputs
+ }), None)
+ self.assertIsNone(id_weight_pair.weight_tensor)
+ with _initialized_session():
+ _assert_sparse_tensor_value(
+ self,
+ sparse_tensor.SparseTensorValue(
+ indices=inputs.indices,
+ values=np.array((2, 60, 0, 4), dtype=np.int64),
+ dense_shape=inputs.dense_shape),
+ id_weight_pair.id_tensor.eval())
+
+ def test_linear_model(self):
+ wire_column = fc_old.categorical_column_with_vocabulary_file(
+ key='wire',
+ vocabulary_file=self._wire_vocabulary_file_name,
+ vocabulary_size=self._wire_vocabulary_size,
+ num_oov_buckets=1)
+ self.assertEqual(4, wire_column._num_buckets)
+ with ops.Graph().as_default():
+ predictions = fc.linear_model({
+ wire_column.name: sparse_tensor.SparseTensorValue(
+ indices=((0, 0), (1, 0), (1, 1)),
+ values=('marlo', 'skywalker', 'omar'),
+ dense_shape=(2, 2))
+ }, (wire_column,))
+ bias = get_linear_model_bias()
+ wire_var = get_linear_model_column_var(wire_column)
+ with _initialized_session():
+ self.assertAllClose((0.,), bias.eval())
+ self.assertAllClose(((0.,), (0.,), (0.,), (0.,)), wire_var.eval())
+ self.assertAllClose(((0.,), (0.,)), predictions.eval())
+ wire_var.assign(((1.,), (2.,), (3.,), (4.,))).eval()
+ # 'marlo' -> 2: wire_var[2] = 3
+ # 'skywalker' -> 3, 'omar' -> 0: wire_var[3] + wire_var[0] = 4+1 = 5
+ self.assertAllClose(((3.,), (5.,)), predictions.eval())
+
+ def test_keras_linear_model(self):
+ wire_column = fc_old.categorical_column_with_vocabulary_file(
+ key='wire',
+ vocabulary_file=self._wire_vocabulary_file_name,
+ vocabulary_size=self._wire_vocabulary_size,
+ num_oov_buckets=1)
+ self.assertEqual(4, wire_column._num_buckets)
+ with ops.Graph().as_default():
+ predictions = get_keras_linear_model_predictions({
+ wire_column.name:
+ sparse_tensor.SparseTensorValue(
+ indices=((0, 0), (1, 0), (1, 1)),
+ values=('marlo', 'skywalker', 'omar'),
+ dense_shape=(2, 2))
+ }, (wire_column,))
+ bias = get_linear_model_bias()
+ wire_var = get_linear_model_column_var(wire_column)
+ with _initialized_session():
+ self.assertAllClose((0.,), bias.eval())
+ self.assertAllClose(((0.,), (0.,), (0.,), (0.,)), wire_var.eval())
+ self.assertAllClose(((0.,), (0.,)), predictions.eval())
+ wire_var.assign(((1.,), (2.,), (3.,), (4.,))).eval()
+ # 'marlo' -> 2: wire_var[2] = 3
+ # 'skywalker' -> 3, 'omar' -> 0: wire_var[3] + wire_var[0] = 4+1 = 5
+ self.assertAllClose(((3.,), (5.,)), predictions.eval())
+
+
+class VocabularyListCategoricalColumnTest(test.TestCase):
+
+ def test_defaults_string(self):
+ column = fc.categorical_column_with_vocabulary_list(
+ key='aaa', vocabulary_list=('omar', 'stringer', 'marlo'))
+ self.assertEqual('aaa', column.name)
+ self.assertEqual('aaa', column.key)
+ self.assertEqual(3, column.num_buckets)
+ self.assertEqual({
+ 'aaa': parsing_ops.VarLenFeature(dtypes.string)
+ }, column.parse_example_spec)
+
+ def test_key_should_be_string(self):
+ with self.assertRaisesRegexp(ValueError, 'key must be a string.'):
+ fc.categorical_column_with_vocabulary_list(
+ key=('aaa',), vocabulary_list=('omar', 'stringer', 'marlo'))
+
+ def test_defaults_int(self):
+ column = fc.categorical_column_with_vocabulary_list(
+ key='aaa', vocabulary_list=(12, 24, 36))
+ self.assertEqual('aaa', column.name)
+ self.assertEqual('aaa', column.key)
+ self.assertEqual(3, column.num_buckets)
+ self.assertEqual({
+ 'aaa': parsing_ops.VarLenFeature(dtypes.int64)
+ }, column.parse_example_spec)
+
+ def test_all_constructor_args(self):
+ column = fc.categorical_column_with_vocabulary_list(
+ key='aaa', vocabulary_list=(12, 24, 36), dtype=dtypes.int32,
+ default_value=-99)
+ self.assertEqual(3, column.num_buckets)
+ self.assertEqual({
+ 'aaa': parsing_ops.VarLenFeature(dtypes.int32)
+ }, column.parse_example_spec)
+
+ def test_deep_copy(self):
+ original = fc.categorical_column_with_vocabulary_list(
+ key='aaa', vocabulary_list=(12, 24, 36), dtype=dtypes.int32)
+ for column in (original, copy.deepcopy(original)):
+ self.assertEqual('aaa', column.name)
+ self.assertEqual(3, column.num_buckets)
+ self.assertEqual({
+ 'aaa': parsing_ops.VarLenFeature(dtypes.int32)
+ }, column.parse_example_spec)
+
+ def test_invalid_dtype(self):
+ with self.assertRaisesRegexp(ValueError, 'dtype must be string or integer'):
+ fc.categorical_column_with_vocabulary_list(
+ key='aaa', vocabulary_list=('omar', 'stringer', 'marlo'),
+ dtype=dtypes.float32)
+
+ def test_invalid_mapping_dtype(self):
+ with self.assertRaisesRegexp(
+ ValueError, r'vocabulary dtype must be string or integer'):
+ fc.categorical_column_with_vocabulary_list(
+ key='aaa', vocabulary_list=(12., 24., 36.))
+
+ def test_mismatched_int_dtype(self):
+ with self.assertRaisesRegexp(
+ ValueError, r'dtype.*and vocabulary dtype.*do not match'):
+ fc.categorical_column_with_vocabulary_list(
+ key='aaa', vocabulary_list=('omar', 'stringer', 'marlo'),
+ dtype=dtypes.int32)
+
+ def test_mismatched_string_dtype(self):
+ with self.assertRaisesRegexp(
+ ValueError, r'dtype.*and vocabulary dtype.*do not match'):
+ fc.categorical_column_with_vocabulary_list(
+ key='aaa', vocabulary_list=(12, 24, 36), dtype=dtypes.string)
+
+ def test_none_mapping(self):
+ with self.assertRaisesRegexp(
+ ValueError, r'vocabulary_list.*must be non-empty'):
+ fc.categorical_column_with_vocabulary_list(
+ key='aaa', vocabulary_list=None)
+
+ def test_empty_mapping(self):
+ with self.assertRaisesRegexp(
+ ValueError, r'vocabulary_list.*must be non-empty'):
+ fc.categorical_column_with_vocabulary_list(
+ key='aaa', vocabulary_list=tuple([]))
+
+ def test_duplicate_mapping(self):
+ with self.assertRaisesRegexp(ValueError, 'Duplicate keys'):
+ fc.categorical_column_with_vocabulary_list(
+ key='aaa', vocabulary_list=(12, 24, 12))
+
+ def test_invalid_num_oov_buckets(self):
+ with self.assertRaisesRegexp(ValueError, 'Invalid num_oov_buckets'):
+ fc.categorical_column_with_vocabulary_list(
+ key='aaa', vocabulary_list=(12, 24, 36),
+ num_oov_buckets=-1)
+
+ def test_invalid_buckets_and_default_value(self):
+ with self.assertRaisesRegexp(
+ ValueError, 'both num_oov_buckets and default_value'):
+ fc.categorical_column_with_vocabulary_list(
+ key='aaa',
+ vocabulary_list=(12, 24, 36),
+ num_oov_buckets=100,
+ default_value=2)
+
+ def test_invalid_input_dtype_int32(self):
+ column = fc.categorical_column_with_vocabulary_list(
+ key='aaa',
+ vocabulary_list=('omar', 'stringer', 'marlo'))
+ inputs = sparse_tensor.SparseTensorValue(
+ indices=((0, 0), (1, 0), (1, 1)),
+ values=(12, 24, 36),
+ dense_shape=(2, 2))
+ with self.assertRaisesRegexp(ValueError, 'dtype must be compatible'):
+ column.get_sparse_tensors(
+ FeatureTransformationCache({
+ 'aaa': inputs
+ }), None)
+
+ def test_invalid_input_dtype_string(self):
+ column = fc.categorical_column_with_vocabulary_list(
+ key='aaa',
+ vocabulary_list=(12, 24, 36))
+ inputs = sparse_tensor.SparseTensorValue(
+ indices=((0, 0), (1, 0), (1, 1)),
+ values=('omar', 'stringer', 'marlo'),
+ dense_shape=(2, 2))
+ with self.assertRaisesRegexp(ValueError, 'dtype must be compatible'):
+ column.get_sparse_tensors(
+ FeatureTransformationCache({
+ 'aaa': inputs
+ }), None)
+
+ def test_parse_example_string(self):
+ a = fc.categorical_column_with_vocabulary_list(
+ key='aaa', vocabulary_list=('omar', 'stringer', 'marlo'))
+ data = example_pb2.Example(features=feature_pb2.Features(
+ feature={
+ 'aaa':
+ feature_pb2.Feature(bytes_list=feature_pb2.BytesList(
+ value=[b'omar', b'stringer']))
+ }))
+ features = parsing_ops.parse_example(
+ serialized=[data.SerializeToString()],
+ features=fc.make_parse_example_spec([a]))
+ self.assertIn('aaa', features)
+ with self.test_session():
+ _assert_sparse_tensor_value(
+ self,
+ sparse_tensor.SparseTensorValue(
+ indices=[[0, 0], [0, 1]],
+ values=np.array([b'omar', b'stringer'], dtype=np.object_),
+ dense_shape=[1, 2]),
+ features['aaa'].eval())
+
+ def test_parse_example_int(self):
+ a = fc.categorical_column_with_vocabulary_list(
+ key='aaa', vocabulary_list=(11, 21, 31))
+ data = example_pb2.Example(features=feature_pb2.Features(
+ feature={
+ 'aaa':
+ feature_pb2.Feature(int64_list=feature_pb2.Int64List(
+ value=[11, 21]))
+ }))
+ features = parsing_ops.parse_example(
+ serialized=[data.SerializeToString()],
+ features=fc.make_parse_example_spec([a]))
+ self.assertIn('aaa', features)
+ with self.test_session():
+ _assert_sparse_tensor_value(
+ self,
+ sparse_tensor.SparseTensorValue(
+ indices=[[0, 0], [0, 1]],
+ values=[11, 21],
+ dense_shape=[1, 2]),
+ features['aaa'].eval())
+
+ def test_get_sparse_tensors(self):
+ column = fc.categorical_column_with_vocabulary_list(
+ key='aaa',
+ vocabulary_list=('omar', 'stringer', 'marlo'))
+ inputs = sparse_tensor.SparseTensorValue(
+ indices=((0, 0), (1, 0), (1, 1)),
+ values=('marlo', 'skywalker', 'omar'),
+ dense_shape=(2, 2))
+ id_weight_pair = column.get_sparse_tensors(
+ FeatureTransformationCache({
+ 'aaa': inputs
+ }), None)
+ self.assertIsNone(id_weight_pair.weight_tensor)
+ with _initialized_session():
+ _assert_sparse_tensor_value(
+ self,
+ sparse_tensor.SparseTensorValue(
+ indices=inputs.indices,
+ values=np.array((2, -1, 0), dtype=np.int64),
+ dense_shape=inputs.dense_shape),
+ id_weight_pair.id_tensor.eval())
+
+ def test_transform_feature(self):
+ column = fc.categorical_column_with_vocabulary_list(
+ key='aaa',
+ vocabulary_list=('omar', 'stringer', 'marlo'))
+ inputs = sparse_tensor.SparseTensorValue(
+ indices=((0, 0), (1, 0), (1, 1)),
+ values=('marlo', 'skywalker', 'omar'),
+ dense_shape=(2, 2))
+ id_tensor = _transform_features({'aaa': inputs}, [column], None)[column]
+ with _initialized_session():
+ _assert_sparse_tensor_value(
+ self,
+ sparse_tensor.SparseTensorValue(
+ indices=inputs.indices,
+ values=np.array((2, -1, 0), dtype=np.int64),
+ dense_shape=inputs.dense_shape),
+ id_tensor.eval())
+
+ def DISABLED_test_get_sparse_tensors_weight_collections(self):
+ column = fc.categorical_column_with_vocabulary_list(
+ key='aaa',
+ vocabulary_list=('omar', 'stringer', 'marlo'))
+ inputs = sparse_tensor.SparseTensor(
+ values=['omar', 'stringer', 'marlo'],
+ indices=[[0, 0], [1, 0], [1, 1]],
+ dense_shape=[2, 2])
+ column.get_sparse_tensors(
+ FeatureTransformationCache({
+ 'aaa': inputs
+ }),
+ weight_collections=('my_weights',))
+
+ self.assertItemsEqual(
+ [], ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES))
+ self.assertItemsEqual([], ops.get_collection('my_weights'))
+
+ def test_get_sparse_tensors_dense_input(self):
+ column = fc.categorical_column_with_vocabulary_list(
+ key='aaa',
+ vocabulary_list=('omar', 'stringer', 'marlo'))
+ id_weight_pair = column.get_sparse_tensors(
+ FeatureTransformationCache({
+ 'aaa': (('marlo', ''), ('skywalker', 'omar'))
+ }), None)
+ self.assertIsNone(id_weight_pair.weight_tensor)
+ with _initialized_session():
+ _assert_sparse_tensor_value(
+ self,
+ sparse_tensor.SparseTensorValue(
+ indices=((0, 0), (1, 0), (1, 1)),
+ values=np.array((2, -1, 0), dtype=np.int64),
+ dense_shape=(2, 2)),
+ id_weight_pair.id_tensor.eval())
+
+ def test_get_sparse_tensors_default_value_in_vocabulary(self):
+ column = fc.categorical_column_with_vocabulary_list(
+ key='aaa',
+ vocabulary_list=('omar', 'stringer', 'marlo'),
+ default_value=2)
+ inputs = sparse_tensor.SparseTensorValue(
+ indices=((0, 0), (1, 0), (1, 1)),
+ values=('marlo', 'skywalker', 'omar'),
+ dense_shape=(2, 2))
+ id_weight_pair = column.get_sparse_tensors(
+ FeatureTransformationCache({
+ 'aaa': inputs
+ }), None)
+ self.assertIsNone(id_weight_pair.weight_tensor)
+ with _initialized_session():
+ _assert_sparse_tensor_value(
+ self,
+ sparse_tensor.SparseTensorValue(
+ indices=inputs.indices,
+ values=np.array((2, 2, 0), dtype=np.int64),
+ dense_shape=inputs.dense_shape),
+ id_weight_pair.id_tensor.eval())
+
+ def test_get_sparse_tensors_with_oov_buckets(self):
+ column = fc.categorical_column_with_vocabulary_list(
+ key='aaa',
+ vocabulary_list=('omar', 'stringer', 'marlo'),
+ num_oov_buckets=100)
+ inputs = sparse_tensor.SparseTensorValue(
+ indices=((0, 0), (1, 0), (1, 1), (1, 2)),
+ values=('marlo', 'skywalker', 'omar', 'heisenberg'),
+ dense_shape=(2, 3))
+ id_weight_pair = column.get_sparse_tensors(
+ FeatureTransformationCache({
+ 'aaa': inputs
+ }), None)
+ self.assertIsNone(id_weight_pair.weight_tensor)
+ with _initialized_session():
+ _assert_sparse_tensor_value(
+ self,
+ sparse_tensor.SparseTensorValue(
+ indices=inputs.indices,
+ values=np.array((2, 33, 0, 62), dtype=np.int64),
+ dense_shape=inputs.dense_shape),
+ id_weight_pair.id_tensor.eval())
+
+ def test_get_sparse_tensors_int32(self):
+ column = fc.categorical_column_with_vocabulary_list(
+ key='aaa',
+ vocabulary_list=np.array((30, 35, 11, 23, 22), dtype=np.int32),
+ dtype=dtypes.int32)
+ inputs = sparse_tensor.SparseTensorValue(
+ indices=((0, 0), (1, 0), (1, 1), (2, 2)),
+ values=np.array((11, 100, 30, 22), dtype=np.int32),
+ dense_shape=(3, 3))
+ id_weight_pair = column.get_sparse_tensors(
+ FeatureTransformationCache({
+ 'aaa': inputs
+ }), None)
+ self.assertIsNone(id_weight_pair.weight_tensor)
+ with _initialized_session():
+ _assert_sparse_tensor_value(
+ self,
+ sparse_tensor.SparseTensorValue(
+ indices=inputs.indices,
+ values=np.array((2, -1, 0, 4), dtype=np.int64),
+ dense_shape=inputs.dense_shape),
+ id_weight_pair.id_tensor.eval())
+
+ def test_get_sparse_tensors_int32_dense_input(self):
+ default_value = -100
+ column = fc.categorical_column_with_vocabulary_list(
+ key='aaa',
+ vocabulary_list=np.array((30, 35, 11, 23, 22), dtype=np.int32),
+ dtype=dtypes.int32,
+ default_value=default_value)
+ id_weight_pair = column.get_sparse_tensors(
+ FeatureTransformationCache({
+ 'aaa':
+ np.array(
+ ((11, -1, -1), (100, 30, -1), (-1, -1, 22)), dtype=np.int32)
+ }), None)
+ self.assertIsNone(id_weight_pair.weight_tensor)
+ with _initialized_session():
+ _assert_sparse_tensor_value(
+ self,
+ sparse_tensor.SparseTensorValue(
+ indices=((0, 0), (1, 0), (1, 1), (2, 2)),
+ values=np.array((2, default_value, 0, 4), dtype=np.int64),
+ dense_shape=(3, 3)),
+ id_weight_pair.id_tensor.eval())
+
+ def test_get_sparse_tensors_int32_with_oov_buckets(self):
+ column = fc.categorical_column_with_vocabulary_list(
+ key='aaa',
+ vocabulary_list=np.array((30, 35, 11, 23, 22), dtype=np.int32),
+ dtype=dtypes.int32,
+ num_oov_buckets=100)
+ inputs = sparse_tensor.SparseTensorValue(
+ indices=((0, 0), (1, 0), (1, 1), (2, 2)),
+ values=(11, 100, 30, 22),
+ dense_shape=(3, 3))
+ id_weight_pair = column.get_sparse_tensors(
+ FeatureTransformationCache({
+ 'aaa': inputs
+ }), None)
+ self.assertIsNone(id_weight_pair.weight_tensor)
+ with _initialized_session():
+ _assert_sparse_tensor_value(
+ self,
+ sparse_tensor.SparseTensorValue(
+ indices=inputs.indices,
+ values=np.array((2, 60, 0, 4), dtype=np.int64),
+ dense_shape=inputs.dense_shape),
+ id_weight_pair.id_tensor.eval())
+
+ def test_linear_model(self):
+ wire_column = fc_old.categorical_column_with_vocabulary_list(
+ key='aaa',
+ vocabulary_list=('omar', 'stringer', 'marlo'),
+ num_oov_buckets=1)
+ self.assertEqual(4, wire_column._num_buckets)
+ with ops.Graph().as_default():
+ predictions = fc.linear_model({
+ wire_column.name: sparse_tensor.SparseTensorValue(
+ indices=((0, 0), (1, 0), (1, 1)),
+ values=('marlo', 'skywalker', 'omar'),
+ dense_shape=(2, 2))
+ }, (wire_column,))
+ bias = get_linear_model_bias()
+ wire_var = get_linear_model_column_var(wire_column)
+ with _initialized_session():
+ self.assertAllClose((0.,), bias.eval())
+ self.assertAllClose(((0.,), (0.,), (0.,), (0.,)), wire_var.eval())
+ self.assertAllClose(((0.,), (0.,)), predictions.eval())
+ wire_var.assign(((1.,), (2.,), (3.,), (4.,))).eval()
+ # 'marlo' -> 2: wire_var[2] = 3
+ # 'skywalker' -> 3, 'omar' -> 0: wire_var[3] + wire_var[0] = 4+1 = 5
+ self.assertAllClose(((3.,), (5.,)), predictions.eval())
+
+ def test_keras_linear_model(self):
+ wire_column = fc_old.categorical_column_with_vocabulary_list(
+ key='aaa',
+ vocabulary_list=('omar', 'stringer', 'marlo'),
+ num_oov_buckets=1)
+ self.assertEqual(4, wire_column._num_buckets)
+ with ops.Graph().as_default():
+ predictions = get_keras_linear_model_predictions({
+ wire_column.name:
+ sparse_tensor.SparseTensorValue(
+ indices=((0, 0), (1, 0), (1, 1)),
+ values=('marlo', 'skywalker', 'omar'),
+ dense_shape=(2, 2))
+ }, (wire_column,))
+ bias = get_linear_model_bias()
+ wire_var = get_linear_model_column_var(wire_column)
+ with _initialized_session():
+ self.assertAllClose((0.,), bias.eval())
+ self.assertAllClose(((0.,), (0.,), (0.,), (0.,)), wire_var.eval())
+ self.assertAllClose(((0.,), (0.,)), predictions.eval())
+ wire_var.assign(((1.,), (2.,), (3.,), (4.,))).eval()
+ # 'marlo' -> 2: wire_var[2] = 3
+ # 'skywalker' -> 3, 'omar' -> 0: wire_var[3] + wire_var[0] = 4+1 = 5
+ self.assertAllClose(((3.,), (5.,)), predictions.eval())
+
+
+class IdentityCategoricalColumnTest(test.TestCase):
+
+ def test_constructor(self):
+ column = fc.categorical_column_with_identity(key='aaa', num_buckets=3)
+ self.assertEqual('aaa', column.name)
+ self.assertEqual('aaa', column.key)
+ self.assertEqual(3, column.num_buckets)
+ self.assertEqual({
+ 'aaa': parsing_ops.VarLenFeature(dtypes.int64)
+ }, column.parse_example_spec)
+
+ def test_key_should_be_string(self):
+ with self.assertRaisesRegexp(ValueError, 'key must be a string.'):
+ fc.categorical_column_with_identity(key=('aaa',), num_buckets=3)
+
+ def test_deep_copy(self):
+ original = fc.categorical_column_with_identity(key='aaa', num_buckets=3)
+ for column in (original, copy.deepcopy(original)):
+ self.assertEqual('aaa', column.name)
+ self.assertEqual(3, column.num_buckets)
+ self.assertEqual({
+ 'aaa': parsing_ops.VarLenFeature(dtypes.int64)
+ }, column.parse_example_spec)
+
+ def test_invalid_num_buckets_zero(self):
+ with self.assertRaisesRegexp(ValueError, 'num_buckets 0 < 1'):
+ fc.categorical_column_with_identity(key='aaa', num_buckets=0)
+
+ def test_invalid_num_buckets_negative(self):
+ with self.assertRaisesRegexp(ValueError, 'num_buckets -1 < 1'):
+ fc.categorical_column_with_identity(key='aaa', num_buckets=-1)
+
+ def test_invalid_default_value_too_small(self):
+ with self.assertRaisesRegexp(ValueError, 'default_value -1 not in range'):
+ fc.categorical_column_with_identity(
+ key='aaa', num_buckets=3, default_value=-1)
+
+ def test_invalid_default_value_too_big(self):
+ with self.assertRaisesRegexp(ValueError, 'default_value 3 not in range'):
+ fc.categorical_column_with_identity(
+ key='aaa', num_buckets=3, default_value=3)
+
+ def test_invalid_input_dtype(self):
+ column = fc.categorical_column_with_identity(key='aaa', num_buckets=3)
+ inputs = sparse_tensor.SparseTensorValue(
+ indices=((0, 0), (1, 0), (1, 1)),
+ values=('omar', 'stringer', 'marlo'),
+ dense_shape=(2, 2))
+ with self.assertRaisesRegexp(ValueError, 'Invalid input, not integer'):
+ column.get_sparse_tensors(
+ FeatureTransformationCache({
+ 'aaa': inputs
+ }), None)
+
+ def test_parse_example(self):
+ a = fc.categorical_column_with_identity(key='aaa', num_buckets=30)
+ data = example_pb2.Example(features=feature_pb2.Features(
+ feature={
+ 'aaa':
+ feature_pb2.Feature(int64_list=feature_pb2.Int64List(
+ value=[11, 21]))
+ }))
+ features = parsing_ops.parse_example(
+ serialized=[data.SerializeToString()],
+ features=fc.make_parse_example_spec([a]))
+ self.assertIn('aaa', features)
+ with self.test_session():
+ _assert_sparse_tensor_value(
+ self,
+ sparse_tensor.SparseTensorValue(
+ indices=[[0, 0], [0, 1]],
+ values=np.array([11, 21], dtype=np.int64),
+ dense_shape=[1, 2]),
+ features['aaa'].eval())
+
+ def test_get_sparse_tensors(self):
+ column = fc.categorical_column_with_identity(key='aaa', num_buckets=3)
+ inputs = sparse_tensor.SparseTensorValue(
+ indices=((0, 0), (1, 0), (1, 1)),
+ values=(0, 1, 0),
+ dense_shape=(2, 2))
+ id_weight_pair = column.get_sparse_tensors(
+ FeatureTransformationCache({
+ 'aaa': inputs
+ }), None)
+ self.assertIsNone(id_weight_pair.weight_tensor)
+ with _initialized_session():
+ _assert_sparse_tensor_value(
+ self,
+ sparse_tensor.SparseTensorValue(
+ indices=inputs.indices,
+ values=np.array((0, 1, 0), dtype=np.int64),
+ dense_shape=inputs.dense_shape),
+ id_weight_pair.id_tensor.eval())
+
+ def test_transform_feature(self):
+ column = fc.categorical_column_with_identity(key='aaa', num_buckets=3)
+ inputs = sparse_tensor.SparseTensorValue(
+ indices=((0, 0), (1, 0), (1, 1)),
+ values=(0, 1, 0),
+ dense_shape=(2, 2))
+ id_tensor = _transform_features({'aaa': inputs}, [column], None)[column]
+ with _initialized_session():
+ _assert_sparse_tensor_value(
+ self,
+ sparse_tensor.SparseTensorValue(
+ indices=inputs.indices,
+ values=np.array((0, 1, 0), dtype=np.int64),
+ dense_shape=inputs.dense_shape),
+ id_tensor.eval())
+
+ def DISABLED_test_get_sparse_tensors_weight_collections(self):
+ column = fc.categorical_column_with_identity(key='aaa', num_buckets=3)
+ inputs = sparse_tensor.SparseTensorValue(
+ indices=((0, 0), (1, 0), (1, 1)),
+ values=(0, 1, 0),
+ dense_shape=(2, 2))
+ column.get_sparse_tensors(
+ FeatureTransformationCache({
+ 'aaa': inputs
+ }),
+ weight_collections=('my_weights',))
+
+ self.assertItemsEqual(
+ [], ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES))
+ self.assertItemsEqual([], ops.get_collection('my_weights'))
+
+ def test_get_sparse_tensors_dense_input(self):
+ column = fc.categorical_column_with_identity(key='aaa', num_buckets=3)
+ id_weight_pair = column.get_sparse_tensors(
+ FeatureTransformationCache({
+ 'aaa': ((0, -1), (1, 0))
+ }), None)
+ self.assertIsNone(id_weight_pair.weight_tensor)
+ with _initialized_session():
+ _assert_sparse_tensor_value(
+ self,
+ sparse_tensor.SparseTensorValue(
+ indices=((0, 0), (1, 0), (1, 1)),
+ values=np.array((0, 1, 0), dtype=np.int64),
+ dense_shape=(2, 2)),
+ id_weight_pair.id_tensor.eval())
+
+ def test_get_sparse_tensors_with_inputs_too_small(self):
+ column = fc.categorical_column_with_identity(key='aaa', num_buckets=3)
+ inputs = sparse_tensor.SparseTensorValue(
+ indices=((0, 0), (1, 0), (1, 1)),
+ values=(1, -1, 0),
+ dense_shape=(2, 2))
+ id_weight_pair = column.get_sparse_tensors(
+ FeatureTransformationCache({
+ 'aaa': inputs
+ }), None)
+ self.assertIsNone(id_weight_pair.weight_tensor)
+ with _initialized_session():
+ with self.assertRaisesRegexp(
+ errors.OpError, 'assert_greater_or_equal_0'):
+ id_weight_pair.id_tensor.eval()
+
+ def test_get_sparse_tensors_with_inputs_too_big(self):
+ column = fc.categorical_column_with_identity(key='aaa', num_buckets=3)
+ inputs = sparse_tensor.SparseTensorValue(
+ indices=((0, 0), (1, 0), (1, 1)),
+ values=(1, 99, 0),
+ dense_shape=(2, 2))
+ id_weight_pair = column.get_sparse_tensors(
+ FeatureTransformationCache({
+ 'aaa': inputs
+ }), None)
+ self.assertIsNone(id_weight_pair.weight_tensor)
+ with _initialized_session():
+ with self.assertRaisesRegexp(
+ errors.OpError, 'assert_less_than_num_buckets'):
+ id_weight_pair.id_tensor.eval()
+
+ def test_get_sparse_tensors_with_default_value(self):
+ column = fc.categorical_column_with_identity(
+ key='aaa', num_buckets=4, default_value=3)
+ inputs = sparse_tensor.SparseTensorValue(
+ indices=((0, 0), (1, 0), (1, 1)),
+ values=(1, -1, 99),
+ dense_shape=(2, 2))
+ id_weight_pair = column.get_sparse_tensors(
+ FeatureTransformationCache({
+ 'aaa': inputs
+ }), None)
+ self.assertIsNone(id_weight_pair.weight_tensor)
+ with _initialized_session():
+ _assert_sparse_tensor_value(
+ self,
+ sparse_tensor.SparseTensorValue(
+ indices=inputs.indices,
+ values=np.array((1, 3, 3), dtype=np.int64),
+ dense_shape=inputs.dense_shape),
+ id_weight_pair.id_tensor.eval())
+
+ def test_get_sparse_tensors_with_default_value_and_placeholder_inputs(self):
+ column = fc.categorical_column_with_identity(
+ key='aaa', num_buckets=4, default_value=3)
+ input_indices = array_ops.placeholder(dtype=dtypes.int64)
+ input_values = array_ops.placeholder(dtype=dtypes.int32)
+ input_shape = array_ops.placeholder(dtype=dtypes.int64)
+ inputs = sparse_tensor.SparseTensorValue(
+ indices=input_indices,
+ values=input_values,
+ dense_shape=input_shape)
+ id_weight_pair = column.get_sparse_tensors(
+ FeatureTransformationCache({
+ 'aaa': inputs
+ }), None)
+ self.assertIsNone(id_weight_pair.weight_tensor)
+ with _initialized_session():
+ _assert_sparse_tensor_value(
+ self,
+ sparse_tensor.SparseTensorValue(
+ indices=np.array(((0, 0), (1, 0), (1, 1)), dtype=np.int64),
+ values=np.array((1, 3, 3), dtype=np.int64),
+ dense_shape=np.array((2, 2), dtype=np.int64)),
+ id_weight_pair.id_tensor.eval(feed_dict={
+ input_indices: ((0, 0), (1, 0), (1, 1)),
+ input_values: (1, -1, 99),
+ input_shape: (2, 2),
+ }))
+
+ def test_linear_model(self):
+ column = fc_old.categorical_column_with_identity(key='aaa', num_buckets=3)
+ self.assertEqual(3, column.num_buckets)
+ with ops.Graph().as_default():
+ predictions = fc.linear_model({
+ column.name: sparse_tensor.SparseTensorValue(
+ indices=((0, 0), (1, 0), (1, 1)),
+ values=(0, 2, 1),
+ dense_shape=(2, 2))
+ }, (column,))
+ bias = get_linear_model_bias()
+ weight_var = get_linear_model_column_var(column)
+ with _initialized_session():
+ self.assertAllClose((0.,), bias.eval())
+ self.assertAllClose(((0.,), (0.,), (0.,)), weight_var.eval())
+ self.assertAllClose(((0.,), (0.,)), predictions.eval())
+ weight_var.assign(((1.,), (2.,), (3.,))).eval()
+ # weight_var[0] = 1
+ # weight_var[2] + weight_var[1] = 3+2 = 5
+ self.assertAllClose(((1.,), (5.,)), predictions.eval())
+
+ def test_keras_linear_model(self):
+ column = fc_old.categorical_column_with_identity(key='aaa', num_buckets=3)
+ self.assertEqual(3, column.num_buckets)
+ with ops.Graph().as_default():
+ predictions = get_keras_linear_model_predictions({
+ column.name:
+ sparse_tensor.SparseTensorValue(
+ indices=((0, 0), (1, 0), (1, 1)),
+ values=(0, 2, 1),
+ dense_shape=(2, 2))
+ }, (column,))
+ bias = get_linear_model_bias()
+ weight_var = get_linear_model_column_var(column)
+ with _initialized_session():
+ self.assertAllClose((0.,), bias.eval())
+ self.assertAllClose(((0.,), (0.,), (0.,)), weight_var.eval())
+ self.assertAllClose(((0.,), (0.,)), predictions.eval())
+ weight_var.assign(((1.,), (2.,), (3.,))).eval()
+ # weight_var[0] = 1
+ # weight_var[2] + weight_var[1] = 3+2 = 5
+ self.assertAllClose(((1.,), (5.,)), predictions.eval())
+
+
+class TransformFeaturesTest(test.TestCase):
+
+ # All transform tests are distributed in column test.
+ # Here we only test multi column case and naming
+ def transform_multi_column(self):
+ bucketized_price = fc.bucketized_column(
+ fc.numeric_column('price'), boundaries=[0, 2, 4, 6])
+ hashed_sparse = fc.categorical_column_with_hash_bucket('wire', 10)
+ with ops.Graph().as_default():
+ features = {
+ 'price': [[-1.], [5.]],
+ 'wire':
+ sparse_tensor.SparseTensor(
+ values=['omar', 'stringer', 'marlo'],
+ indices=[[0, 0], [1, 0], [1, 1]],
+ dense_shape=[2, 2])
+ }
+ transformed = _transform_features(features,
+ [bucketized_price, hashed_sparse], None)
+ with _initialized_session():
+ self.assertIn(bucketized_price.name, transformed[bucketized_price].name)
+ self.assertAllEqual([[0], [3]], transformed[bucketized_price].eval())
+ self.assertIn(hashed_sparse.name, transformed[hashed_sparse].name)
+ self.assertAllEqual([6, 4, 1], transformed[hashed_sparse].values.eval())
+
+ def test_column_order(self):
+ """When the column is both dense and sparse, uses sparse tensors."""
+
+ class _LoggerColumn(FeatureColumn):
+
+ def __init__(self, name):
+ self._name = name
+
+ @property
+ def name(self):
+ return self._name
+
+ def transform_feature(self, transformation_cache, state_manager):
+ self.call_order = call_logger['count']
+ call_logger['count'] += 1
+ return 'Anything'
+
+ @property
+ def parse_example_spec(self):
+ pass
+
+ with ops.Graph().as_default():
+ column1 = _LoggerColumn('1')
+ column2 = _LoggerColumn('2')
+ call_logger = {'count': 0}
+ _transform_features({}, [column1, column2], None)
+ self.assertEqual(0, column1.call_order)
+ self.assertEqual(1, column2.call_order)
+
+ call_logger = {'count': 0}
+ _transform_features({}, [column2, column1], None)
+ self.assertEqual(0, column1.call_order)
+ self.assertEqual(1, column2.call_order)
+
+
+class IndicatorColumnTest(test.TestCase):
+
+ def test_indicator_column(self):
+ a = fc.categorical_column_with_hash_bucket('a', 4)
+ indicator_a = fc.indicator_column(a)
+ self.assertEqual(indicator_a.categorical_column.name, 'a')
+ self.assertEqual(indicator_a.name, 'a_indicator')
+ self.assertEqual(indicator_a.variable_shape, [1, 4])
+
+ b = fc.categorical_column_with_hash_bucket('b', hash_bucket_size=100)
+ indicator_b = fc.indicator_column(b)
+ self.assertEqual(indicator_b.categorical_column.name, 'b')
+ self.assertEqual(indicator_b.name, 'b_indicator')
+ self.assertEqual(indicator_b.variable_shape, [1, 100])
+
+ def test_1D_shape_succeeds(self):
+ animal = fc.indicator_column(
+ fc.categorical_column_with_hash_bucket('animal', 4))
+ transformation_cache = FeatureTransformationCache({
+ 'animal': ['fox', 'fox']
+ })
+ output = transformation_cache.get(animal, None)
+ with self.test_session():
+ self.assertAllEqual([[0., 0., 1., 0.], [0., 0., 1., 0.]], output.eval())
+
+ def test_2D_shape_succeeds(self):
+ # TODO(ispir/cassandrax): Swith to categorical_column_with_keys when ready.
+ animal = fc.indicator_column(
+ fc.categorical_column_with_hash_bucket('animal', 4))
+ transformation_cache = FeatureTransformationCache({
+ 'animal':
+ sparse_tensor.SparseTensor(
+ indices=[[0, 0], [1, 0]],
+ values=['fox', 'fox'],
+ dense_shape=[2, 1])
+ })
+ output = transformation_cache.get(animal, None)
+ with self.test_session():
+ self.assertAllEqual([[0., 0., 1., 0.], [0., 0., 1., 0.]], output.eval())
+
+ def test_multi_hot(self):
+ animal = fc.indicator_column(
+ fc.categorical_column_with_identity('animal', num_buckets=4))
+
+ transformation_cache = FeatureTransformationCache({
+ 'animal':
+ sparse_tensor.SparseTensor(
+ indices=[[0, 0], [0, 1]], values=[1, 1], dense_shape=[1, 2])
+ })
+ output = transformation_cache.get(animal, None)
+ with self.test_session():
+ self.assertAllEqual([[0., 2., 0., 0.]], output.eval())
+
+ def test_multi_hot2(self):
+ animal = fc.indicator_column(
+ fc.categorical_column_with_identity('animal', num_buckets=4))
+ transformation_cache = FeatureTransformationCache({
+ 'animal':
+ sparse_tensor.SparseTensor(
+ indices=[[0, 0], [0, 1]], values=[1, 2], dense_shape=[1, 2])
+ })
+ output = transformation_cache.get(animal, None)
+ with self.test_session():
+ self.assertAllEqual([[0., 1., 1., 0.]], output.eval())
+
+ def test_deep_copy(self):
+ a = fc.categorical_column_with_hash_bucket('a', 4)
+ column = fc.indicator_column(a)
+ column_copy = copy.deepcopy(column)
+ self.assertEqual(column_copy.categorical_column.name, 'a')
+ self.assertEqual(column.name, 'a_indicator')
+ self.assertEqual(column.variable_shape, [1, 4])
+
+ def test_parse_example(self):
+ a = fc.categorical_column_with_vocabulary_list(
+ key='aaa', vocabulary_list=('omar', 'stringer', 'marlo'))
+ a_indicator = fc.indicator_column(a)
+ data = example_pb2.Example(features=feature_pb2.Features(
+ feature={
+ 'aaa':
+ feature_pb2.Feature(bytes_list=feature_pb2.BytesList(
+ value=[b'omar', b'stringer']))
+ }))
+ features = parsing_ops.parse_example(
+ serialized=[data.SerializeToString()],
+ features=fc.make_parse_example_spec([a_indicator]))
+ self.assertIn('aaa', features)
+ with self.test_session():
+ _assert_sparse_tensor_value(
+ self,
+ sparse_tensor.SparseTensorValue(
+ indices=[[0, 0], [0, 1]],
+ values=np.array([b'omar', b'stringer'], dtype=np.object_),
+ dense_shape=[1, 2]),
+ features['aaa'].eval())
+
+ def test_transform(self):
+ a = fc.categorical_column_with_vocabulary_list(
+ key='aaa', vocabulary_list=('omar', 'stringer', 'marlo'))
+ a_indicator = fc.indicator_column(a)
+ features = {
+ 'aaa': sparse_tensor.SparseTensorValue(
+ indices=((0, 0), (1, 0), (1, 1)),
+ values=('marlo', 'skywalker', 'omar'),
+ dense_shape=(2, 2))
+ }
+ indicator_tensor = _transform_features(features, [a_indicator],
+ None)[a_indicator]
+ with _initialized_session():
+ self.assertAllEqual([[0, 0, 1], [1, 0, 0]], indicator_tensor.eval())
+
+ def test_transform_with_weighted_column(self):
+ # Github issue 12557
+ ids = fc.categorical_column_with_vocabulary_list(
+ key='ids', vocabulary_list=('a', 'b', 'c'))
+ weights = fc.weighted_categorical_column(ids, 'weights')
+ indicator = fc.indicator_column(weights)
+ features = {
+ 'ids': constant_op.constant([['c', 'b', 'a']]),
+ 'weights': constant_op.constant([[2., 4., 6.]])
+ }
+ indicator_tensor = _transform_features(features, [indicator],
+ None)[indicator]
+ with _initialized_session():
+ self.assertAllEqual([[6., 4., 2.]], indicator_tensor.eval())
+
+ def test_transform_with_missing_value_in_weighted_column(self):
+ # Github issue 12583
+ ids = fc.categorical_column_with_vocabulary_list(
+ key='ids', vocabulary_list=('a', 'b', 'c'))
+ weights = fc.weighted_categorical_column(ids, 'weights')
+ indicator = fc.indicator_column(weights)
+ features = {
+ 'ids': constant_op.constant([['c', 'b', 'unknown']]),
+ 'weights': constant_op.constant([[2., 4., 6.]])
+ }
+ indicator_tensor = _transform_features(features, [indicator],
+ None)[indicator]
+ with _initialized_session():
+ self.assertAllEqual([[0., 4., 2.]], indicator_tensor.eval())
+
+ def test_transform_with_missing_value_in_categorical_column(self):
+ # Github issue 12583
+ ids = fc.categorical_column_with_vocabulary_list(
+ key='ids', vocabulary_list=('a', 'b', 'c'))
+ indicator = fc.indicator_column(ids)
+ features = {
+ 'ids': constant_op.constant([['c', 'b', 'unknown']]),
+ }
+ indicator_tensor = _transform_features(features, [indicator],
+ None)[indicator]
+ with _initialized_session():
+ self.assertAllEqual([[0., 1., 1.]], indicator_tensor.eval())
+
+ def test_linear_model(self):
+ animal = fc_old.indicator_column(
+ fc_old.categorical_column_with_identity('animal', num_buckets=4))
+ with ops.Graph().as_default():
+ features = {
+ 'animal':
+ sparse_tensor.SparseTensor(
+ indices=[[0, 0], [0, 1]], values=[1, 2], dense_shape=[1, 2])
+ }
+
+ predictions = fc.linear_model(features, [animal])
+ weight_var = get_linear_model_column_var(animal)
+ with _initialized_session():
+ # All should be zero-initialized.
+ self.assertAllClose([[0.], [0.], [0.], [0.]], weight_var.eval())
+ self.assertAllClose([[0.]], predictions.eval())
+ weight_var.assign([[1.], [2.], [3.], [4.]]).eval()
+ self.assertAllClose([[2. + 3.]], predictions.eval())
+
+ def test_keras_linear_model(self):
+ animal = fc_old.indicator_column(
+ fc_old.categorical_column_with_identity('animal', num_buckets=4))
+ with ops.Graph().as_default():
+ features = {
+ 'animal':
+ sparse_tensor.SparseTensor(
+ indices=[[0, 0], [0, 1]], values=[1, 2], dense_shape=[1, 2])
+ }
+
+ predictions = get_keras_linear_model_predictions(features, [animal])
+ weight_var = get_linear_model_column_var(animal)
+ with _initialized_session():
+ # All should be zero-initialized.
+ self.assertAllClose([[0.], [0.], [0.], [0.]], weight_var.eval())
+ self.assertAllClose([[0.]], predictions.eval())
+ weight_var.assign([[1.], [2.], [3.], [4.]]).eval()
+ self.assertAllClose([[2. + 3.]], predictions.eval())
+
+ def test_input_layer(self):
+ animal = fc_old.indicator_column(
+ fc_old.categorical_column_with_identity('animal', num_buckets=4))
+ with ops.Graph().as_default():
+ features = {
+ 'animal':
+ sparse_tensor.SparseTensor(
+ indices=[[0, 0], [0, 1]], values=[1, 2], dense_shape=[1, 2])
+ }
+ net = fc.input_layer(features, [animal])
+ with _initialized_session():
+ self.assertAllClose([[0., 1., 1., 0.]], net.eval())
+
+
+class _TestStateManager(StateManager):
+
+ def __init__(self, trainable=True):
+ # Dict of feature_column to a dict of variables.
+ self._all_variables = {}
+ self._trainable = trainable
+
+ def get_variable(self,
+ feature_column,
+ name,
+ shape,
+ dtype=None,
+ initializer=None):
+ if feature_column not in self._all_variables:
+ self._all_variables[feature_column] = {}
+ var_dict = self._all_variables[feature_column]
+ if name in var_dict:
+ return var_dict[name]
+ else:
+ var = variable_scope.get_variable(
+ name=name,
+ shape=shape,
+ initializer=initializer,
+ trainable=self._trainable)
+ var_dict[name] = var
+ return var
+
+
+class EmbeddingColumnTest(test.TestCase):
+
+ def test_defaults(self):
+ categorical_column = fc.categorical_column_with_identity(
+ key='aaa', num_buckets=3)
+ embedding_dimension = 2
+ embedding_column = fc.embedding_column(
+ categorical_column, dimension=embedding_dimension)
+ self.assertIs(categorical_column, embedding_column.categorical_column)
+ self.assertEqual(embedding_dimension, embedding_column.dimension)
+ self.assertEqual('mean', embedding_column.combiner)
+ self.assertIsNone(embedding_column.ckpt_to_load_from)
+ self.assertIsNone(embedding_column.tensor_name_in_ckpt)
+ self.assertIsNone(embedding_column.max_norm)
+ self.assertTrue(embedding_column.trainable)
+ self.assertEqual('aaa_embedding', embedding_column.name)
+ self.assertEqual((embedding_dimension,), embedding_column.variable_shape)
+ self.assertEqual({
+ 'aaa': parsing_ops.VarLenFeature(dtypes.int64)
+ }, embedding_column.parse_example_spec)
+
+ def test_all_constructor_args(self):
+ categorical_column = fc.categorical_column_with_identity(
+ key='aaa', num_buckets=3)
+ embedding_dimension = 2
+ embedding_column = fc.embedding_column(
+ categorical_column, dimension=embedding_dimension,
+ combiner='my_combiner', initializer=lambda: 'my_initializer',
+ ckpt_to_load_from='my_ckpt', tensor_name_in_ckpt='my_ckpt_tensor',
+ max_norm=42., trainable=False)
+ self.assertIs(categorical_column, embedding_column.categorical_column)
+ self.assertEqual(embedding_dimension, embedding_column.dimension)
+ self.assertEqual('my_combiner', embedding_column.combiner)
+ self.assertEqual('my_ckpt', embedding_column.ckpt_to_load_from)
+ self.assertEqual('my_ckpt_tensor', embedding_column.tensor_name_in_ckpt)
+ self.assertEqual(42., embedding_column.max_norm)
+ self.assertFalse(embedding_column.trainable)
+ self.assertEqual('aaa_embedding', embedding_column.name)
+ self.assertEqual((embedding_dimension,), embedding_column.variable_shape)
+ self.assertEqual({
+ 'aaa': parsing_ops.VarLenFeature(dtypes.int64)
+ }, embedding_column.parse_example_spec)
+
+ def test_deep_copy(self):
+ categorical_column = fc.categorical_column_with_identity(
+ key='aaa', num_buckets=3)
+ embedding_dimension = 2
+ original = fc.embedding_column(
+ categorical_column, dimension=embedding_dimension,
+ combiner='my_combiner', initializer=lambda: 'my_initializer',
+ ckpt_to_load_from='my_ckpt', tensor_name_in_ckpt='my_ckpt_tensor',
+ max_norm=42., trainable=False)
+ for embedding_column in (original, copy.deepcopy(original)):
+ self.assertEqual('aaa', embedding_column.categorical_column.name)
+ self.assertEqual(3, embedding_column.categorical_column.num_buckets)
+ self.assertEqual({
+ 'aaa': parsing_ops.VarLenFeature(dtypes.int64)
+ }, embedding_column.categorical_column.parse_example_spec)
+
+ self.assertEqual(embedding_dimension, embedding_column.dimension)
+ self.assertEqual('my_combiner', embedding_column.combiner)
+ self.assertEqual('my_ckpt', embedding_column.ckpt_to_load_from)
+ self.assertEqual('my_ckpt_tensor', embedding_column.tensor_name_in_ckpt)
+ self.assertEqual(42., embedding_column.max_norm)
+ self.assertFalse(embedding_column.trainable)
+ self.assertEqual('aaa_embedding', embedding_column.name)
+ self.assertEqual((embedding_dimension,), embedding_column.variable_shape)
+ self.assertEqual({
+ 'aaa': parsing_ops.VarLenFeature(dtypes.int64)
+ }, embedding_column.parse_example_spec)
+
+ def test_invalid_initializer(self):
+ categorical_column = fc.categorical_column_with_identity(
+ key='aaa', num_buckets=3)
+ with self.assertRaisesRegexp(ValueError, 'initializer must be callable'):
+ fc.embedding_column(categorical_column, dimension=2, initializer='not_fn')
+
+ def test_parse_example(self):
+ a = fc.categorical_column_with_vocabulary_list(
+ key='aaa', vocabulary_list=('omar', 'stringer', 'marlo'))
+ a_embedded = fc.embedding_column(a, dimension=2)
+ data = example_pb2.Example(features=feature_pb2.Features(
+ feature={
+ 'aaa':
+ feature_pb2.Feature(bytes_list=feature_pb2.BytesList(
+ value=[b'omar', b'stringer']))
+ }))
+ features = parsing_ops.parse_example(
+ serialized=[data.SerializeToString()],
+ features=fc.make_parse_example_spec([a_embedded]))
+ self.assertIn('aaa', features)
+ with self.test_session():
+ _assert_sparse_tensor_value(
+ self,
+ sparse_tensor.SparseTensorValue(
+ indices=[[0, 0], [0, 1]],
+ values=np.array([b'omar', b'stringer'], dtype=np.object_),
+ dense_shape=[1, 2]),
+ features['aaa'].eval())
+
+ def test_transform_feature(self):
+ a = fc.categorical_column_with_identity(key='aaa', num_buckets=3)
+ a_embedded = fc.embedding_column(a, dimension=2)
+ features = {
+ 'aaa': sparse_tensor.SparseTensor(
+ indices=((0, 0), (1, 0), (1, 1)),
+ values=(0, 1, 0),
+ dense_shape=(2, 2))
+ }
+ outputs = _transform_features(features, [a, a_embedded], None)
+ output_a = outputs[a]
+ output_embedded = outputs[a_embedded]
+ with _initialized_session():
+ _assert_sparse_tensor_value(
+ self, output_a.eval(), output_embedded.eval())
+
+ def test_get_dense_tensor(self):
+ # Inputs.
+ vocabulary_size = 3
+ sparse_input = sparse_tensor.SparseTensorValue(
+ # example 0, ids [2]
+ # example 1, ids [0, 1]
+ # example 2, ids []
+ # example 3, ids [1]
+ indices=((0, 0), (1, 0), (1, 4), (3, 0)),
+ values=(2, 0, 1, 1),
+ dense_shape=(4, 5))
+
+ # Embedding variable.
+ embedding_dimension = 2
+ embedding_values = (
+ (1., 2.), # id 0
+ (3., 5.), # id 1
+ (7., 11.) # id 2
+ )
+ def _initializer(shape, dtype, partition_info):
+ self.assertAllEqual((vocabulary_size, embedding_dimension), shape)
+ self.assertEqual(dtypes.float32, dtype)
+ self.assertIsNone(partition_info)
+ return embedding_values
+
+ # Expected lookup result, using combiner='mean'.
+ expected_lookups = (
+ # example 0, ids [2], embedding = [7, 11]
+ (7., 11.),
+ # example 1, ids [0, 1], embedding = mean([1, 2] + [3, 5]) = [2, 3.5]
+ (2., 3.5),
+ # example 2, ids [], embedding = [0, 0]
+ (0., 0.),
+ # example 3, ids [1], embedding = [3, 5]
+ (3., 5.),
+ )
+
+ # Build columns.
+ categorical_column = fc.categorical_column_with_identity(
+ key='aaa', num_buckets=vocabulary_size)
+ embedding_column = fc.embedding_column(
+ categorical_column, dimension=embedding_dimension,
+ initializer=_initializer)
+ state_manager = _TestStateManager()
+
+ # Provide sparse input and get dense result.
+ embedding_lookup = embedding_column.get_dense_tensor(
+ FeatureTransformationCache({
+ 'aaa': sparse_input
+ }), state_manager)
+
+ # Assert expected embedding variable and lookups.
+ global_vars = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
+ self.assertItemsEqual(('embedding_weights:0',),
+ tuple([v.name for v in global_vars]))
+ with _initialized_session():
+ self.assertAllEqual(embedding_values, global_vars[0].eval())
+ self.assertAllEqual(expected_lookups, embedding_lookup.eval())
+
+ def test_get_dense_tensor_3d(self):
+ # Inputs.
+ vocabulary_size = 4
+ sparse_input = sparse_tensor.SparseTensorValue(
+ # example 0, ids [2]
+ # example 1, ids [0, 1]
+ # example 2, ids []
+ # example 3, ids [1]
+ indices=((0, 0, 0), (1, 1, 0), (1, 1, 4), (3, 0, 0), (3, 1, 2)),
+ values=(2, 0, 1, 1, 2),
+ dense_shape=(4, 2, 5))
+
+ # Embedding variable.
+ embedding_dimension = 3
+ embedding_values = (
+ (1., 2., 4.), # id 0
+ (3., 5., 1.), # id 1
+ (7., 11., 2.), # id 2
+ (2., 7., 12.) # id 3
+ )
+ def _initializer(shape, dtype, partition_info):
+ self.assertAllEqual((vocabulary_size, embedding_dimension), shape)
+ self.assertEqual(dtypes.float32, dtype)
+ self.assertIsNone(partition_info)
+ return embedding_values
+
+ # Expected lookup result, using combiner='mean'.
+ expected_lookups = (
+ # example 0, ids [[2], []], embedding = [[7, 11, 2], [0, 0, 0]]
+ ((7., 11., 2.), (0., 0., 0.)),
+ # example 1, ids [[], [0, 1]], embedding
+ # = mean([[], [1, 2, 4] + [3, 5, 1]]) = [[0, 0, 0], [2, 3.5, 2.5]]
+ ((0., 0., 0.), (2., 3.5, 2.5)),
+ # example 2, ids [[], []], embedding = [[0, 0, 0], [0, 0, 0]]
+ ((0., 0., 0.), (0., 0., 0.)),
+ # example 3, ids [[1], [2]], embedding = [[3, 5, 1], [7, 11, 2]]
+ ((3., 5., 1.), (7., 11., 2.)),
+ )
+
+ # Build columns.
+ categorical_column = fc.categorical_column_with_identity(
+ key='aaa', num_buckets=vocabulary_size)
+ embedding_column = fc.embedding_column(
+ categorical_column, dimension=embedding_dimension,
+ initializer=_initializer)
+ state_manager = _TestStateManager()
+
+ # Provide sparse input and get dense result.
+ embedding_lookup = embedding_column.get_dense_tensor(
+ FeatureTransformationCache({
+ 'aaa': sparse_input
+ }), state_manager)
+
+ # Assert expected embedding variable and lookups.
+ global_vars = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
+ self.assertItemsEqual(('embedding_weights:0',),
+ tuple([v.name for v in global_vars]))
+ with _initialized_session():
+ self.assertAllEqual(embedding_values, global_vars[0].eval())
+ self.assertAllEqual(expected_lookups, embedding_lookup.eval())
+
+ def DISABLED_test_get_dense_tensor_weight_collections(self):
+ sparse_input = sparse_tensor.SparseTensorValue(
+ # example 0, ids [2]
+ # example 1, ids [0, 1]
+ # example 2, ids []
+ # example 3, ids [1]
+ indices=((0, 0), (1, 0), (1, 4), (3, 0)),
+ values=(2, 0, 1, 1),
+ dense_shape=(4, 5))
+
+ # Build columns.
+ categorical_column = fc.categorical_column_with_identity(
+ key='aaa', num_buckets=3)
+ embedding_column = fc.embedding_column(categorical_column, dimension=2)
+
+ # Provide sparse input and get dense result.
+ embedding_column.get_dense_tensor(
+ FeatureTransformationCache({
+ 'aaa': sparse_input
+ }),
+ weight_collections=('my_vars',))
+
+ # Assert expected embedding variable and lookups.
+ global_vars = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
+ self.assertItemsEqual(('embedding_weights:0',),
+ tuple([v.name for v in global_vars]))
+ my_vars = ops.get_collection('my_vars')
+ self.assertItemsEqual(
+ ('embedding_weights:0',), tuple([v.name for v in my_vars]))
+
+ def test_get_dense_tensor_placeholder_inputs(self):
+ # Inputs.
+ vocabulary_size = 3
+ sparse_input = sparse_tensor.SparseTensorValue(
+ # example 0, ids [2]
+ # example 1, ids [0, 1]
+ # example 2, ids []
+ # example 3, ids [1]
+ indices=((0, 0), (1, 0), (1, 4), (3, 0)),
+ values=(2, 0, 1, 1),
+ dense_shape=(4, 5))
+
+ # Embedding variable.
+ embedding_dimension = 2
+ embedding_values = (
+ (1., 2.), # id 0
+ (3., 5.), # id 1
+ (7., 11.) # id 2
+ )
+ def _initializer(shape, dtype, partition_info):
+ self.assertAllEqual((vocabulary_size, embedding_dimension), shape)
+ self.assertEqual(dtypes.float32, dtype)
+ self.assertIsNone(partition_info)
+ return embedding_values
+
+ # Expected lookup result, using combiner='mean'.
+ expected_lookups = (
+ # example 0, ids [2], embedding = [7, 11]
+ (7., 11.),
+ # example 1, ids [0, 1], embedding = mean([1, 2] + [3, 5]) = [2, 3.5]
+ (2., 3.5),
+ # example 2, ids [], embedding = [0, 0]
+ (0., 0.),
+ # example 3, ids [1], embedding = [3, 5]
+ (3., 5.),
+ )
+
+ # Build columns.
+ categorical_column = fc.categorical_column_with_identity(
+ key='aaa', num_buckets=vocabulary_size)
+ embedding_column = fc.embedding_column(
+ categorical_column, dimension=embedding_dimension,
+ initializer=_initializer)
+ state_manager = _TestStateManager()
+
+ # Provide sparse input and get dense result.
+ input_indices = array_ops.placeholder(dtype=dtypes.int64)
+ input_values = array_ops.placeholder(dtype=dtypes.int64)
+ input_shape = array_ops.placeholder(dtype=dtypes.int64)
+ embedding_lookup = embedding_column.get_dense_tensor(
+ FeatureTransformationCache({
+ 'aaa':
+ sparse_tensor.SparseTensorValue(
+ indices=input_indices,
+ values=input_values,
+ dense_shape=input_shape)
+ }), state_manager)
+
+ # Assert expected embedding variable and lookups.
+ global_vars = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
+ self.assertItemsEqual(
+ ('embedding_weights:0',), tuple([v.name for v in global_vars]))
+ with _initialized_session():
+ self.assertAllEqual(embedding_values, global_vars[0].eval())
+ self.assertAllEqual(expected_lookups, embedding_lookup.eval(
+ feed_dict={
+ input_indices: sparse_input.indices,
+ input_values: sparse_input.values,
+ input_shape: sparse_input.dense_shape,
+ }))
+
+ def test_get_dense_tensor_restore_from_ckpt(self):
+ # Inputs.
+ vocabulary_size = 3
+ sparse_input = sparse_tensor.SparseTensorValue(
+ # example 0, ids [2]
+ # example 1, ids [0, 1]
+ # example 2, ids []
+ # example 3, ids [1]
+ indices=((0, 0), (1, 0), (1, 4), (3, 0)),
+ values=(2, 0, 1, 1),
+ dense_shape=(4, 5))
+
+ # Embedding variable. The checkpoint file contains _embedding_values.
+ embedding_dimension = 2
+ embedding_values = (
+ (1., 2.), # id 0
+ (3., 5.), # id 1
+ (7., 11.) # id 2
+ )
+ ckpt_path = test.test_src_dir_path(
+ 'python/feature_column/testdata/embedding.ckpt')
+ ckpt_tensor = 'my_embedding'
+
+ # Expected lookup result, using combiner='mean'.
+ expected_lookups = (
+ # example 0, ids [2], embedding = [7, 11]
+ (7., 11.),
+ # example 1, ids [0, 1], embedding = mean([1, 2] + [3, 5]) = [2, 3.5]
+ (2., 3.5),
+ # example 2, ids [], embedding = [0, 0]
+ (0., 0.),
+ # example 3, ids [1], embedding = [3, 5]
+ (3., 5.),
+ )
+
+ # Build columns.
+ categorical_column = fc.categorical_column_with_identity(
+ key='aaa', num_buckets=vocabulary_size)
+ embedding_column = fc.embedding_column(
+ categorical_column, dimension=embedding_dimension,
+ ckpt_to_load_from=ckpt_path,
+ tensor_name_in_ckpt=ckpt_tensor)
+ state_manager = _TestStateManager()
+
+ # Provide sparse input and get dense result.
+ embedding_lookup = embedding_column.get_dense_tensor(
+ FeatureTransformationCache({
+ 'aaa': sparse_input
+ }), state_manager)
+
+ # Assert expected embedding variable and lookups.
+ global_vars = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
+ self.assertItemsEqual(
+ ('embedding_weights:0',), tuple([v.name for v in global_vars]))
+ with _initialized_session():
+ self.assertAllEqual(embedding_values, global_vars[0].eval())
+ self.assertAllEqual(expected_lookups, embedding_lookup.eval())
+
+ def test_linear_model(self):
+ # Inputs.
+ batch_size = 4
+ vocabulary_size = 3
+ sparse_input = sparse_tensor.SparseTensorValue(
+ # example 0, ids [2]
+ # example 1, ids [0, 1]
+ # example 2, ids []
+ # example 3, ids [1]
+ indices=((0, 0), (1, 0), (1, 4), (3, 0)),
+ values=(2, 0, 1, 1),
+ dense_shape=(batch_size, 5))
+
+ # Embedding variable.
+ embedding_dimension = 2
+ embedding_shape = (vocabulary_size, embedding_dimension)
+ zeros_embedding_values = np.zeros(embedding_shape)
+ def _initializer(shape, dtype, partition_info):
+ self.assertAllEqual(embedding_shape, shape)
+ self.assertEqual(dtypes.float32, dtype)
+ self.assertIsNone(partition_info)
+ return zeros_embedding_values
+
+ # Build columns.
+ categorical_column = fc_old.categorical_column_with_identity(
+ key='aaa', num_buckets=vocabulary_size)
+ embedding_column = fc_old.embedding_column(
+ categorical_column,
+ dimension=embedding_dimension,
+ initializer=_initializer)
+
+ with ops.Graph().as_default():
+ predictions = fc.linear_model({
+ categorical_column.name: sparse_input
+ }, (embedding_column,))
+ expected_var_names = (
+ 'linear_model/bias_weights:0',
+ 'linear_model/aaa_embedding/weights:0',
+ 'linear_model/aaa_embedding/embedding_weights:0',
+ )
+ self.assertItemsEqual(
+ expected_var_names,
+ [v.name for v in ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)])
+ trainable_vars = {
+ v.name: v for v in ops.get_collection(
+ ops.GraphKeys.TRAINABLE_VARIABLES)
+ }
+ self.assertItemsEqual(expected_var_names, trainable_vars.keys())
+ bias = trainable_vars['linear_model/bias_weights:0']
+ embedding_weights = trainable_vars[
+ 'linear_model/aaa_embedding/embedding_weights:0']
+ linear_weights = trainable_vars[
+ 'linear_model/aaa_embedding/weights:0']
+ with _initialized_session():
+ # Predictions with all zero weights.
+ self.assertAllClose(np.zeros((1,)), bias.eval())
+ self.assertAllClose(zeros_embedding_values, embedding_weights.eval())
+ self.assertAllClose(
+ np.zeros((embedding_dimension, 1)), linear_weights.eval())
+ self.assertAllClose(np.zeros((batch_size, 1)), predictions.eval())
+
+ # Predictions with all non-zero weights.
+ embedding_weights.assign((
+ (1., 2.), # id 0
+ (3., 5.), # id 1
+ (7., 11.) # id 2
+ )).eval()
+ linear_weights.assign(((4.,), (6.,))).eval()
+ # example 0, ids [2], embedding[0] = [7, 11]
+ # example 1, ids [0, 1], embedding[1] = mean([1, 2] + [3, 5]) = [2, 3.5]
+ # example 2, ids [], embedding[2] = [0, 0]
+ # example 3, ids [1], embedding[3] = [3, 5]
+ # sum(embeddings * linear_weights)
+ # = [4*7 + 6*11, 4*2 + 6*3.5, 4*0 + 6*0, 4*3 + 6*5] = [94, 29, 0, 42]
+ self.assertAllClose(((94.,), (29.,), (0.,), (42.,)), predictions.eval())
+
+ def test_keras_linear_model(self):
+ # Inputs.
+ batch_size = 4
+ vocabulary_size = 3
+ sparse_input = sparse_tensor.SparseTensorValue(
+ # example 0, ids [2]
+ # example 1, ids [0, 1]
+ # example 2, ids []
+ # example 3, ids [1]
+ indices=((0, 0), (1, 0), (1, 4), (3, 0)),
+ values=(2, 0, 1, 1),
+ dense_shape=(batch_size, 5))
+
+ # Embedding variable.
+ embedding_dimension = 2
+ embedding_shape = (vocabulary_size, embedding_dimension)
+ zeros_embedding_values = np.zeros(embedding_shape)
+
+ def _initializer(shape, dtype, partition_info):
+ self.assertAllEqual(embedding_shape, shape)
+ self.assertEqual(dtypes.float32, dtype)
+ self.assertIsNone(partition_info)
+ return zeros_embedding_values
+
+ # Build columns.
+ categorical_column = fc_old.categorical_column_with_identity(
+ key='aaa', num_buckets=vocabulary_size)
+ embedding_column = fc_old.embedding_column(
+ categorical_column,
+ dimension=embedding_dimension,
+ initializer=_initializer)
+
+ with ops.Graph().as_default():
+ predictions = get_keras_linear_model_predictions({
+ categorical_column.name: sparse_input
+ }, (embedding_column,))
+ expected_var_names = (
+ 'linear_model/bias_weights:0',
+ 'linear_model/aaa_embedding/weights:0',
+ 'linear_model/aaa_embedding/embedding_weights:0',
+ )
+ self.assertItemsEqual(
+ expected_var_names,
+ [v.name for v in ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)])
+ trainable_vars = {
+ v.name: v
+ for v in ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)
+ }
+ self.assertItemsEqual(expected_var_names, trainable_vars.keys())
+ bias = trainable_vars['linear_model/bias_weights:0']
+ embedding_weights = trainable_vars[
+ 'linear_model/aaa_embedding/embedding_weights:0']
+ linear_weights = trainable_vars['linear_model/aaa_embedding/weights:0']
+ with _initialized_session():
+ # Predictions with all zero weights.
+ self.assertAllClose(np.zeros((1,)), bias.eval())
+ self.assertAllClose(zeros_embedding_values, embedding_weights.eval())
+ self.assertAllClose(
+ np.zeros((embedding_dimension, 1)), linear_weights.eval())
+ self.assertAllClose(np.zeros((batch_size, 1)), predictions.eval())
+
+ # Predictions with all non-zero weights.
+ embedding_weights.assign((
+ (1., 2.), # id 0
+ (3., 5.), # id 1
+ (7., 11.) # id 2
+ )).eval()
+ linear_weights.assign(((4.,), (6.,))).eval()
+ # example 0, ids [2], embedding[0] = [7, 11]
+ # example 1, ids [0, 1], embedding[1] = mean([1, 2] + [3, 5]) = [2, 3.5]
+ # example 2, ids [], embedding[2] = [0, 0]
+ # example 3, ids [1], embedding[3] = [3, 5]
+ # sum(embeddings * linear_weights)
+ # = [4*7 + 6*11, 4*2 + 6*3.5, 4*0 + 6*0, 4*3 + 6*5] = [94, 29, 0, 42]
+ self.assertAllClose(((94.,), (29.,), (0.,), (42.,)), predictions.eval())
+
+ def test_input_layer(self):
+ # Inputs.
+ vocabulary_size = 3
+ sparse_input = sparse_tensor.SparseTensorValue(
+ # example 0, ids [2]
+ # example 1, ids [0, 1]
+ # example 2, ids []
+ # example 3, ids [1]
+ indices=((0, 0), (1, 0), (1, 4), (3, 0)),
+ values=(2, 0, 1, 1),
+ dense_shape=(4, 5))
+
+ # Embedding variable.
+ embedding_dimension = 2
+ embedding_values = (
+ (1., 2.), # id 0
+ (3., 5.), # id 1
+ (7., 11.) # id 2
+ )
+ def _initializer(shape, dtype, partition_info):
+ self.assertAllEqual((vocabulary_size, embedding_dimension), shape)
+ self.assertEqual(dtypes.float32, dtype)
+ self.assertIsNone(partition_info)
+ return embedding_values
+
+ # Expected lookup result, using combiner='mean'.
+ expected_lookups = (
+ # example 0, ids [2], embedding = [7, 11]
+ (7., 11.),
+ # example 1, ids [0, 1], embedding = mean([1, 2] + [3, 5]) = [2, 3.5]
+ (2., 3.5),
+ # example 2, ids [], embedding = [0, 0]
+ (0., 0.),
+ # example 3, ids [1], embedding = [3, 5]
+ (3., 5.),
+ )
+
+ # Build columns.
+ categorical_column = fc_old.categorical_column_with_identity(
+ key='aaa', num_buckets=vocabulary_size)
+ embedding_column = fc_old.embedding_column(
+ categorical_column,
+ dimension=embedding_dimension,
+ initializer=_initializer)
+
+ # Provide sparse input and get dense result.
+ input_layer = fc.input_layer({'aaa': sparse_input}, (embedding_column,))
+
+ # Assert expected embedding variable and lookups.
+ global_vars = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
+ self.assertItemsEqual(
+ ('input_layer/aaa_embedding/embedding_weights:0',),
+ tuple([v.name for v in global_vars]))
+ trainable_vars = ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)
+ self.assertItemsEqual(
+ ('input_layer/aaa_embedding/embedding_weights:0',),
+ tuple([v.name for v in trainable_vars]))
+ with _initialized_session():
+ self.assertAllEqual(embedding_values, trainable_vars[0].eval())
+ self.assertAllEqual(expected_lookups, input_layer.eval())
+
+ def test_input_layer_not_trainable(self):
+ # Inputs.
+ vocabulary_size = 3
+ sparse_input = sparse_tensor.SparseTensorValue(
+ # example 0, ids [2]
+ # example 1, ids [0, 1]
+ # example 2, ids []
+ # example 3, ids [1]
+ indices=((0, 0), (1, 0), (1, 4), (3, 0)),
+ values=(2, 0, 1, 1),
+ dense_shape=(4, 5))
+
+ # Embedding variable.
+ embedding_dimension = 2
+ embedding_values = (
+ (1., 2.), # id 0
+ (3., 5.), # id 1
+ (7., 11.) # id 2
+ )
+ def _initializer(shape, dtype, partition_info):
+ self.assertAllEqual((vocabulary_size, embedding_dimension), shape)
+ self.assertEqual(dtypes.float32, dtype)
+ self.assertIsNone(partition_info)
+ return embedding_values
+
+ # Expected lookup result, using combiner='mean'.
+ expected_lookups = (
+ # example 0, ids [2], embedding = [7, 11]
+ (7., 11.),
+ # example 1, ids [0, 1], embedding = mean([1, 2] + [3, 5]) = [2, 3.5]
+ (2., 3.5),
+ # example 2, ids [], embedding = [0, 0]
+ (0., 0.),
+ # example 3, ids [1], embedding = [3, 5]
+ (3., 5.),
+ )
+
+ # Build columns.
+ categorical_column = fc_old.categorical_column_with_identity(
+ key='aaa', num_buckets=vocabulary_size)
+ embedding_column = fc_old.embedding_column(
+ categorical_column,
+ dimension=embedding_dimension,
+ initializer=_initializer,
+ trainable=False)
+
+ # Provide sparse input and get dense result.
+ input_layer = fc.input_layer({'aaa': sparse_input}, (embedding_column,))
+
+ # Assert expected embedding variable and lookups.
+ global_vars = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
+ self.assertItemsEqual(
+ ('input_layer/aaa_embedding/embedding_weights:0',),
+ tuple([v.name for v in global_vars]))
+ self.assertItemsEqual(
+ [], ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES))
+ with _initialized_session():
+ self.assertAllEqual(embedding_values, global_vars[0].eval())
+ self.assertAllEqual(expected_lookups, input_layer.eval())
+
+
+class _TestSharedEmbeddingStateManager(StateManager):
+ """Manages the state for shared embedding columns.
+
+ This can handle multiple groups of shared embedding columns.
+ """
+
+ def __init__(self, trainable=True):
+ # Dict of shared_embedding_collection_name to a dict of variables.
+ self._all_variables = {}
+ self._trainable = trainable
+
+ def get_variable(self,
+ feature_column,
+ name,
+ shape,
+ dtype=None,
+ initializer=None):
+ if not isinstance(feature_column, fc.SharedEmbeddingColumn):
+ raise ValueError(
+ 'SharedEmbeddingStateManager can only handle SharedEmbeddingColumns. '
+ 'Given type: {} '.format(type(feature_column)))
+
+ collection_name = feature_column.shared_collection_name
+ if collection_name not in self._all_variables:
+ self._all_variables[collection_name] = {}
+ var_dict = self._all_variables[collection_name]
+ if name in var_dict:
+ return var_dict[name]
+ else:
+ var = variable_scope.get_variable(
+ name=name,
+ shape=shape,
+ initializer=initializer,
+ trainable=self._trainable)
+ var_dict[name] = var
+ return var
+
+
+class SharedEmbeddingColumnTest(test.TestCase):
+
+ def test_defaults(self):
+ categorical_column_a = fc.categorical_column_with_identity(
+ key='aaa', num_buckets=3)
+ categorical_column_b = fc.categorical_column_with_identity(
+ key='bbb', num_buckets=3)
+ embedding_dimension = 2
+ embedding_column_b, embedding_column_a = fc.shared_embedding_columns(
+ [categorical_column_b, categorical_column_a],
+ dimension=embedding_dimension)
+ self.assertIs(categorical_column_a, embedding_column_a.categorical_column)
+ self.assertIs(categorical_column_b, embedding_column_b.categorical_column)
+ self.assertEqual(embedding_dimension, embedding_column_a.dimension)
+ self.assertEqual(embedding_dimension, embedding_column_b.dimension)
+ self.assertEqual('mean', embedding_column_a.combiner)
+ self.assertEqual('mean', embedding_column_b.combiner)
+ self.assertIsNone(embedding_column_a.ckpt_to_load_from)
+ self.assertIsNone(embedding_column_b.ckpt_to_load_from)
+ self.assertEqual('aaa_bbb_shared_embedding',
+ embedding_column_a.shared_collection_name)
+ self.assertEqual('aaa_bbb_shared_embedding',
+ embedding_column_b.shared_collection_name)
+ self.assertIsNone(embedding_column_a.tensor_name_in_ckpt)
+ self.assertIsNone(embedding_column_b.tensor_name_in_ckpt)
+ self.assertIsNone(embedding_column_a.max_norm)
+ self.assertIsNone(embedding_column_b.max_norm)
+ self.assertTrue(embedding_column_a.trainable)
+ self.assertTrue(embedding_column_b.trainable)
+ self.assertEqual('aaa_shared_embedding', embedding_column_a.name)
+ self.assertEqual('bbb_shared_embedding', embedding_column_b.name)
+ self.assertEqual((embedding_dimension,), embedding_column_a.variable_shape)
+ self.assertEqual((embedding_dimension,), embedding_column_b.variable_shape)
+ self.assertEqual({
+ 'aaa': parsing_ops.VarLenFeature(dtypes.int64)
+ }, embedding_column_a.parse_example_spec)
+ self.assertEqual({
+ 'bbb': parsing_ops.VarLenFeature(dtypes.int64)
+ }, embedding_column_b.parse_example_spec)
+
+ def test_all_constructor_args(self):
+ categorical_column_a = fc.categorical_column_with_identity(
+ key='aaa', num_buckets=3)
+ categorical_column_b = fc.categorical_column_with_identity(
+ key='bbb', num_buckets=3)
+ embedding_dimension = 2
+ embedding_column_a, embedding_column_b = fc.shared_embedding_columns(
+ [categorical_column_a, categorical_column_b],
+ dimension=embedding_dimension,
+ combiner='my_combiner',
+ initializer=lambda: 'my_initializer',
+ shared_embedding_collection_name='shared_embedding_collection_name',
+ ckpt_to_load_from='my_ckpt',
+ tensor_name_in_ckpt='my_ckpt_tensor',
+ max_norm=42.,
+ trainable=False)
+ self.assertIs(categorical_column_a, embedding_column_a.categorical_column)
+ self.assertIs(categorical_column_b, embedding_column_b.categorical_column)
+ self.assertEqual(embedding_dimension, embedding_column_a.dimension)
+ self.assertEqual(embedding_dimension, embedding_column_b.dimension)
+ self.assertEqual('my_combiner', embedding_column_a.combiner)
+ self.assertEqual('my_combiner', embedding_column_b.combiner)
+ self.assertEqual('shared_embedding_collection_name',
+ embedding_column_a.shared_collection_name)
+ self.assertEqual('shared_embedding_collection_name',
+ embedding_column_b.shared_collection_name)
+ self.assertEqual('my_ckpt', embedding_column_a.ckpt_to_load_from)
+ self.assertEqual('my_ckpt', embedding_column_b.ckpt_to_load_from)
+ self.assertEqual('my_ckpt_tensor', embedding_column_a.tensor_name_in_ckpt)
+ self.assertEqual('my_ckpt_tensor', embedding_column_b.tensor_name_in_ckpt)
+ self.assertEqual(42., embedding_column_a.max_norm)
+ self.assertEqual(42., embedding_column_b.max_norm)
+ self.assertFalse(embedding_column_a.trainable)
+ self.assertFalse(embedding_column_b.trainable)
+ self.assertEqual('aaa_shared_embedding', embedding_column_a.name)
+ self.assertEqual('bbb_shared_embedding', embedding_column_b.name)
+ self.assertEqual((embedding_dimension,), embedding_column_a.variable_shape)
+ self.assertEqual((embedding_dimension,), embedding_column_b.variable_shape)
+ self.assertEqual({
+ 'aaa': parsing_ops.VarLenFeature(dtypes.int64)
+ }, embedding_column_a.parse_example_spec)
+ self.assertEqual({
+ 'bbb': parsing_ops.VarLenFeature(dtypes.int64)
+ }, embedding_column_b.parse_example_spec)
+
+ def test_deep_copy(self):
+ categorical_column_a = fc.categorical_column_with_identity(
+ key='aaa', num_buckets=3)
+ categorical_column_b = fc.categorical_column_with_identity(
+ key='bbb', num_buckets=3)
+ embedding_dimension = 2
+ original_a, _ = fc.shared_embedding_columns(
+ [categorical_column_a, categorical_column_b],
+ dimension=embedding_dimension,
+ combiner='my_combiner',
+ initializer=lambda: 'my_initializer',
+ shared_embedding_collection_name='shared_embedding_collection_name',
+ ckpt_to_load_from='my_ckpt',
+ tensor_name_in_ckpt='my_ckpt_tensor',
+ max_norm=42., trainable=False)
+ for embedding_column_a in (original_a, copy.deepcopy(original_a)):
+ self.assertEqual('aaa', embedding_column_a.categorical_column.name)
+ self.assertEqual(3, embedding_column_a.categorical_column.num_buckets)
+ self.assertEqual({
+ 'aaa': parsing_ops.VarLenFeature(dtypes.int64)
+ }, embedding_column_a.categorical_column.parse_example_spec)
+
+ self.assertEqual(embedding_dimension, embedding_column_a.dimension)
+ self.assertEqual('my_combiner', embedding_column_a.combiner)
+ self.assertEqual('shared_embedding_collection_name',
+ embedding_column_a.shared_collection_name)
+ self.assertEqual('my_ckpt', embedding_column_a.ckpt_to_load_from)
+ self.assertEqual('my_ckpt_tensor', embedding_column_a.tensor_name_in_ckpt)
+ self.assertEqual(42., embedding_column_a.max_norm)
+ self.assertFalse(embedding_column_a.trainable)
+ self.assertEqual('aaa_shared_embedding', embedding_column_a.name)
+ self.assertEqual((embedding_dimension,),
+ embedding_column_a.variable_shape)
+ self.assertEqual({
+ 'aaa': parsing_ops.VarLenFeature(dtypes.int64)
+ }, embedding_column_a.parse_example_spec)
+
+ def test_invalid_initializer(self):
+ categorical_column_a = fc.categorical_column_with_identity(
+ key='aaa', num_buckets=3)
+ categorical_column_b = fc.categorical_column_with_identity(
+ key='bbb', num_buckets=3)
+ with self.assertRaisesRegexp(ValueError, 'initializer must be callable'):
+ fc.shared_embedding_columns(
+ [categorical_column_a, categorical_column_b], dimension=2,
+ initializer='not_fn')
+
+ def test_incompatible_column_type(self):
+ categorical_column_a = fc.categorical_column_with_identity(
+ key='aaa', num_buckets=3)
+ categorical_column_b = fc.categorical_column_with_identity(
+ key='bbb', num_buckets=3)
+ categorical_column_c = fc.categorical_column_with_hash_bucket(
+ key='ccc', hash_bucket_size=3)
+ with self.assertRaisesRegexp(
+ ValueError, 'all categorical_columns must have the same type.*'
+ 'IdentityCategoricalColumn.*HashedCategoricalColumn'):
+ fc.shared_embedding_columns(
+ [categorical_column_a, categorical_column_b, categorical_column_c],
+ dimension=2)
+
+ def test_weighted_categorical_column_ok(self):
+ categorical_column_a = fc.categorical_column_with_identity(
+ key='aaa', num_buckets=3)
+ weighted_categorical_column_a = fc.weighted_categorical_column(
+ categorical_column_a, weight_feature_key='aaa_weights')
+ categorical_column_b = fc.categorical_column_with_identity(
+ key='bbb', num_buckets=3)
+ weighted_categorical_column_b = fc.weighted_categorical_column(
+ categorical_column_b, weight_feature_key='bbb_weights')
+ fc.shared_embedding_columns(
+ [weighted_categorical_column_a, categorical_column_b], dimension=2)
+ fc.shared_embedding_columns(
+ [categorical_column_a, weighted_categorical_column_b], dimension=2)
+ fc.shared_embedding_columns(
+ [weighted_categorical_column_a, weighted_categorical_column_b],
+ dimension=2)
+
+ def test_parse_example(self):
+ a = fc.categorical_column_with_vocabulary_list(
+ key='aaa', vocabulary_list=('omar', 'stringer', 'marlo'))
+ b = fc.categorical_column_with_vocabulary_list(
+ key='bbb', vocabulary_list=('omar', 'stringer', 'marlo'))
+ a_embedded, b_embedded = fc.shared_embedding_columns(
+ [a, b], dimension=2)
+ data = example_pb2.Example(features=feature_pb2.Features(
+ feature={
+ 'aaa':
+ feature_pb2.Feature(bytes_list=feature_pb2.BytesList(
+ value=[b'omar', b'stringer'])),
+ 'bbb':
+ feature_pb2.Feature(bytes_list=feature_pb2.BytesList(
+ value=[b'stringer', b'marlo'])),
+ }))
+ features = parsing_ops.parse_example(
+ serialized=[data.SerializeToString()],
+ features=fc.make_parse_example_spec([a_embedded, b_embedded]))
+ self.assertIn('aaa', features)
+ self.assertIn('bbb', features)
+ with self.test_session():
+ _assert_sparse_tensor_value(
+ self,
+ sparse_tensor.SparseTensorValue(
+ indices=[[0, 0], [0, 1]],
+ values=np.array([b'omar', b'stringer'], dtype=np.object_),
+ dense_shape=[1, 2]),
+ features['aaa'].eval())
+ _assert_sparse_tensor_value(
+ self,
+ sparse_tensor.SparseTensorValue(
+ indices=[[0, 0], [0, 1]],
+ values=np.array([b'stringer', b'marlo'], dtype=np.object_),
+ dense_shape=[1, 2]),
+ features['bbb'].eval())
+
+ def test_transform_feature(self):
+ a = fc.categorical_column_with_identity(key='aaa', num_buckets=3)
+ b = fc.categorical_column_with_identity(key='bbb', num_buckets=3)
+ a_embedded, b_embedded = fc.shared_embedding_columns(
+ [a, b], dimension=2)
+ features = {
+ 'aaa': sparse_tensor.SparseTensor(
+ indices=((0, 0), (1, 0), (1, 1)),
+ values=(0, 1, 0),
+ dense_shape=(2, 2)),
+ 'bbb': sparse_tensor.SparseTensor(
+ indices=((0, 0), (1, 0), (1, 1)),
+ values=(1, 2, 1),
+ dense_shape=(2, 2)),
+ }
+ outputs = _transform_features(features, [a, a_embedded, b, b_embedded],
+ None)
+ output_a = outputs[a]
+ output_a_embedded = outputs[a_embedded]
+ output_b = outputs[b]
+ output_b_embedded = outputs[b_embedded]
+ with _initialized_session():
+ _assert_sparse_tensor_value(
+ self, output_a.eval(), output_a_embedded.eval())
+ _assert_sparse_tensor_value(
+ self, output_b.eval(), output_b_embedded.eval())
+
+ def test_get_dense_tensor(self):
+ # Inputs.
+ vocabulary_size = 3
+ # -1 values are ignored.
+ input_a = np.array(
+ [[2, -1, -1], # example 0, ids [2]
+ [0, 1, -1]]) # example 1, ids [0, 1]
+ input_b = np.array(
+ [[0, -1, -1], # example 0, ids [0]
+ [-1, -1, -1]]) # example 1, ids []
+ input_features = {
+ 'aaa': input_a,
+ 'bbb': input_b
+ }
+
+ # Embedding variable.
+ embedding_dimension = 2
+ embedding_values = (
+ (1., 2.), # id 0
+ (3., 5.), # id 1
+ (7., 11.) # id 2
+ )
+ def _initializer(shape, dtype, partition_info):
+ self.assertAllEqual((vocabulary_size, embedding_dimension), shape)
+ self.assertEqual(dtypes.float32, dtype)
+ self.assertIsNone(partition_info)
+ return embedding_values
+
+ # Expected lookup result, using combiner='mean'.
+ expected_lookups_a = (
+ # example 0:
+ (7., 11.), # ids [2], embedding = [7, 11]
+ # example 1:
+ (2., 3.5), # ids [0, 1], embedding = mean([1, 2] + [3, 5]) = [2, 3.5]
+ )
+ expected_lookups_b = (
+ # example 0:
+ (1., 2.), # ids [0], embedding = [1, 2]
+ # example 1:
+ (0., 0.), # ids [], embedding = [0, 0]
+ )
+
+ # Build columns.
+ categorical_column_a = fc.categorical_column_with_identity(
+ key='aaa', num_buckets=vocabulary_size)
+ categorical_column_b = fc.categorical_column_with_identity(
+ key='bbb', num_buckets=vocabulary_size)
+ embedding_column_a, embedding_column_b = fc.shared_embedding_columns(
+ [categorical_column_a, categorical_column_b],
+ dimension=embedding_dimension, initializer=_initializer)
+ state_manager = _TestSharedEmbeddingStateManager()
+
+ # Provide sparse input and get dense result.
+ embedding_lookup_a = embedding_column_a.get_dense_tensor(
+ FeatureTransformationCache(input_features), state_manager)
+ embedding_lookup_b = embedding_column_b.get_dense_tensor(
+ FeatureTransformationCache(input_features), state_manager)
+
+ # Assert expected embedding variable and lookups.
+ global_vars = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
+ self.assertItemsEqual(('embedding_weights:0',),
+ tuple([v.name for v in global_vars]))
+ embedding_var = global_vars[0]
+ with _initialized_session():
+ self.assertAllEqual(embedding_values, embedding_var.eval())
+ self.assertAllEqual(expected_lookups_a, embedding_lookup_a.eval())
+ self.assertAllEqual(expected_lookups_b, embedding_lookup_b.eval())
+
+ def DISABLED_test_get_dense_tensor_weight_collections(self):
+ # Inputs.
+ vocabulary_size = 3
+ # -1 values are ignored.
+ input_a = np.array([
+ [2, -1, -1], # example 0, ids [2]
+ [0, 1, -1]
+ ]) # example 1, ids [0, 1]
+ input_b = np.array([
+ [0, -1, -1], # example 0, ids [0]
+ [-1, -1, -1]
+ ]) # example 1, ids []
+ input_features = {'aaa': input_a, 'bbb': input_b}
+
+ # Embedding variable.
+ embedding_dimension = 2
+ embedding_values = (
+ (1., 2.), # id 0
+ (3., 5.), # id 1
+ (7., 11.) # id 2
+ )
+
+ def _initializer(shape, dtype, partition_info):
+ self.assertAllEqual((vocabulary_size, embedding_dimension), shape)
+ self.assertEqual(dtypes.float32, dtype)
+ self.assertIsNone(partition_info)
+ return embedding_values
+
+ # Build columns.
+ categorical_column_a = fc.categorical_column_with_identity(
+ key='aaa', num_buckets=vocabulary_size)
+ categorical_column_b = fc.categorical_column_with_identity(
+ key='bbb', num_buckets=vocabulary_size)
+ embedding_column_a, embedding_column_b = fc.shared_embedding_columns(
+ [categorical_column_a, categorical_column_b],
+ dimension=embedding_dimension,
+ initializer=_initializer)
+
+ fc.input_layer(
+ input_features, [embedding_column_a, embedding_column_b],
+ weight_collections=('my_vars',))
+
+ # Assert expected embedding variable and lookups.
+ global_vars = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
+ self.assertItemsEqual(
+ ('input_layer/aaa_bbb_shared_embedding/embedding_weights:0',),
+ tuple(v.name for v in global_vars))
+ my_vars = ops.get_collection('my_vars')
+ self.assertItemsEqual(
+ ('input_layer/aaa_bbb_shared_embedding/embedding_weights:0',),
+ tuple(v.name for v in my_vars))
+
+ def test_get_dense_tensor_placeholder_inputs(self):
+ # Inputs.
+ vocabulary_size = 3
+ # -1 values are ignored.
+ input_a = np.array(
+ [[2, -1, -1], # example 0, ids [2]
+ [0, 1, -1]]) # example 1, ids [0, 1]
+ input_b = np.array(
+ [[0, -1, -1], # example 0, ids [0]
+ [-1, -1, -1]]) # example 1, ids []
+ # Specify shape, because dense input must have rank specified.
+ input_a_placeholder = array_ops.placeholder(
+ dtype=dtypes.int64, shape=[None, 3])
+ input_b_placeholder = array_ops.placeholder(
+ dtype=dtypes.int64, shape=[None, 3])
+ input_features = {
+ 'aaa': input_a_placeholder,
+ 'bbb': input_b_placeholder,
+ }
+ feed_dict = {
+ input_a_placeholder: input_a,
+ input_b_placeholder: input_b,
+ }
+
+ # Embedding variable.
+ embedding_dimension = 2
+ embedding_values = (
+ (1., 2.), # id 0
+ (3., 5.), # id 1
+ (7., 11.) # id 2
+ )
+ def _initializer(shape, dtype, partition_info):
+ self.assertAllEqual((vocabulary_size, embedding_dimension), shape)
+ self.assertEqual(dtypes.float32, dtype)
+ self.assertIsNone(partition_info)
+ return embedding_values
+
+ # Build columns.
+ categorical_column_a = fc.categorical_column_with_identity(
+ key='aaa', num_buckets=vocabulary_size)
+ categorical_column_b = fc.categorical_column_with_identity(
+ key='bbb', num_buckets=vocabulary_size)
+ embedding_column_a, embedding_column_b = fc.shared_embedding_columns(
+ [categorical_column_a, categorical_column_b],
+ dimension=embedding_dimension, initializer=_initializer)
+ state_manager = _TestSharedEmbeddingStateManager()
+
+ # Provide sparse input and get dense result.
+ embedding_lookup_a = embedding_column_a.get_dense_tensor(
+ FeatureTransformationCache(input_features), state_manager)
+ embedding_lookup_b = embedding_column_b.get_dense_tensor(
+ FeatureTransformationCache(input_features), state_manager)
+
+ with _initialized_session() as sess:
+ sess.run([embedding_lookup_a, embedding_lookup_b], feed_dict=feed_dict)
+
+ def test_linear_model(self):
+ # Inputs.
+ batch_size = 2
+ vocabulary_size = 3
+ # -1 values are ignored.
+ input_a = np.array(
+ [[2, -1, -1], # example 0, ids [2]
+ [0, 1, -1]]) # example 1, ids [0, 1]
+ input_b = np.array(
+ [[0, -1, -1], # example 0, ids [0]
+ [-1, -1, -1]]) # example 1, ids []
+
+ # Embedding variable.
+ embedding_dimension = 2
+ embedding_shape = (vocabulary_size, embedding_dimension)
+ zeros_embedding_values = np.zeros(embedding_shape)
+ def _initializer(shape, dtype, partition_info):
+ self.assertAllEqual(embedding_shape, shape)
+ self.assertEqual(dtypes.float32, dtype)
+ self.assertIsNone(partition_info)
+ return zeros_embedding_values
+
+ # Build columns.
+ categorical_column_a = fc_old.categorical_column_with_identity(
+ key='aaa', num_buckets=vocabulary_size)
+ categorical_column_b = fc_old.categorical_column_with_identity(
+ key='bbb', num_buckets=vocabulary_size)
+ embedding_column_a, embedding_column_b = fc_old.shared_embedding_columns(
+ [categorical_column_a, categorical_column_b],
+ dimension=embedding_dimension,
+ initializer=_initializer)
+
+ with ops.Graph().as_default():
+ predictions = fc.linear_model({
+ categorical_column_a.name: input_a,
+ categorical_column_b.name: input_b,
+ }, (embedding_column_a, embedding_column_b))
+ # Linear weights do not follow the column name. But this is a rare use
+ # case, and fixing it would add too much complexity to the code.
+ expected_var_names = (
+ 'linear_model/bias_weights:0',
+ 'linear_model/aaa_bbb_shared_embedding/weights:0',
+ 'linear_model/aaa_bbb_shared_embedding/embedding_weights:0',
+ 'linear_model/aaa_bbb_shared_embedding_1/weights:0',
+ )
+ self.assertItemsEqual(
+ expected_var_names,
+ [v.name for v in ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)])
+ trainable_vars = {
+ v.name: v for v in ops.get_collection(
+ ops.GraphKeys.TRAINABLE_VARIABLES)
+ }
+ self.assertItemsEqual(expected_var_names, trainable_vars.keys())
+ bias = trainable_vars['linear_model/bias_weights:0']
+ embedding_weights = trainable_vars[
+ 'linear_model/aaa_bbb_shared_embedding/embedding_weights:0']
+ linear_weights_a = trainable_vars[
+ 'linear_model/aaa_bbb_shared_embedding/weights:0']
+ linear_weights_b = trainable_vars[
+ 'linear_model/aaa_bbb_shared_embedding_1/weights:0']
+ with _initialized_session():
+ # Predictions with all zero weights.
+ self.assertAllClose(np.zeros((1,)), bias.eval())
+ self.assertAllClose(zeros_embedding_values, embedding_weights.eval())
+ self.assertAllClose(
+ np.zeros((embedding_dimension, 1)), linear_weights_a.eval())
+ self.assertAllClose(
+ np.zeros((embedding_dimension, 1)), linear_weights_b.eval())
+ self.assertAllClose(np.zeros((batch_size, 1)), predictions.eval())
+
+ # Predictions with all non-zero weights.
+ embedding_weights.assign((
+ (1., 2.), # id 0
+ (3., 5.), # id 1
+ (7., 11.) # id 2
+ )).eval()
+ linear_weights_a.assign(((4.,), (6.,))).eval()
+ # example 0, ids [2], embedding[0] = [7, 11]
+ # example 1, ids [0, 1], embedding[1] = mean([1, 2] + [3, 5]) = [2, 3.5]
+ # sum(embeddings * linear_weights)
+ # = [4*7 + 6*11, 4*2 + 6*3.5] = [94, 29]
+ linear_weights_b.assign(((3.,), (5.,))).eval()
+ # example 0, ids [0], embedding[0] = [1, 2]
+ # example 1, ids [], embedding[1] = 0, 0]
+ # sum(embeddings * linear_weights)
+ # = [3*1 + 5*2, 3*0 +5*0] = [13, 0]
+ self.assertAllClose([[94. + 13.], [29.]], predictions.eval())
+
+ def test_keras_linear_model(self):
+ # Inputs.
+ batch_size = 2
+ vocabulary_size = 3
+ # -1 values are ignored.
+ input_a = np.array([
+ [2, -1, -1], # example 0, ids [2]
+ [0, 1, -1]
+ ]) # example 1, ids [0, 1]
+ input_b = np.array([
+ [0, -1, -1], # example 0, ids [0]
+ [-1, -1, -1]
+ ]) # example 1, ids []
+
+ # Embedding variable.
+ embedding_dimension = 2
+ embedding_shape = (vocabulary_size, embedding_dimension)
+ zeros_embedding_values = np.zeros(embedding_shape)
+
+ def _initializer(shape, dtype, partition_info):
+ self.assertAllEqual(embedding_shape, shape)
+ self.assertEqual(dtypes.float32, dtype)
+ self.assertIsNone(partition_info)
+ return zeros_embedding_values
+
+ # Build columns.
+ categorical_column_a = fc_old.categorical_column_with_identity(
+ key='aaa', num_buckets=vocabulary_size)
+ categorical_column_b = fc_old.categorical_column_with_identity(
+ key='bbb', num_buckets=vocabulary_size)
+ embedding_column_a, embedding_column_b = fc_old.shared_embedding_columns(
+ [categorical_column_a, categorical_column_b],
+ dimension=embedding_dimension,
+ initializer=_initializer)
+
+ with ops.Graph().as_default():
+ predictions = get_keras_linear_model_predictions({
+ categorical_column_a.name: input_a,
+ categorical_column_b.name: input_b,
+ }, (embedding_column_a, embedding_column_b))
+ # Linear weights do not follow the column name. But this is a rare use
+ # case, and fixing it would add too much complexity to the code.
+ expected_var_names = (
+ 'linear_model/bias_weights:0',
+ 'linear_model/aaa_bbb_shared_embedding/weights:0',
+ 'linear_model/aaa_bbb_shared_embedding/embedding_weights:0',
+ 'linear_model/aaa_bbb_shared_embedding_1/weights:0',
+ )
+ self.assertItemsEqual(
+ expected_var_names,
+ [v.name for v in ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)])
+ trainable_vars = {
+ v.name: v
+ for v in ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)
+ }
+ self.assertItemsEqual(expected_var_names, trainable_vars.keys())
+ bias = trainable_vars['linear_model/bias_weights:0']
+ embedding_weights = trainable_vars[
+ 'linear_model/aaa_bbb_shared_embedding/embedding_weights:0']
+ linear_weights_a = trainable_vars[
+ 'linear_model/aaa_bbb_shared_embedding/weights:0']
+ linear_weights_b = trainable_vars[
+ 'linear_model/aaa_bbb_shared_embedding_1/weights:0']
+ with _initialized_session():
+ # Predictions with all zero weights.
+ self.assertAllClose(np.zeros((1,)), bias.eval())
+ self.assertAllClose(zeros_embedding_values, embedding_weights.eval())
+ self.assertAllClose(
+ np.zeros((embedding_dimension, 1)), linear_weights_a.eval())
+ self.assertAllClose(
+ np.zeros((embedding_dimension, 1)), linear_weights_b.eval())
+ self.assertAllClose(np.zeros((batch_size, 1)), predictions.eval())
+
+ # Predictions with all non-zero weights.
+ embedding_weights.assign((
+ (1., 2.), # id 0
+ (3., 5.), # id 1
+ (7., 11.) # id 2
+ )).eval()
+ linear_weights_a.assign(((4.,), (6.,))).eval()
+ # example 0, ids [2], embedding[0] = [7, 11]
+ # example 1, ids [0, 1], embedding[1] = mean([1, 2] + [3, 5]) = [2, 3.5]
+ # sum(embeddings * linear_weights)
+ # = [4*7 + 6*11, 4*2 + 6*3.5] = [94, 29]
+ linear_weights_b.assign(((3.,), (5.,))).eval()
+ # example 0, ids [0], embedding[0] = [1, 2]
+ # example 1, ids [], embedding[1] = 0, 0]
+ # sum(embeddings * linear_weights)
+ # = [3*1 + 5*2, 3*0 +5*0] = [13, 0]
+ self.assertAllClose([[94. + 13.], [29.]], predictions.eval())
+
+ def _test_input_layer(self, trainable=True):
+ # Inputs.
+ vocabulary_size = 3
+ sparse_input_a = sparse_tensor.SparseTensorValue(
+ # example 0, ids [2]
+ # example 1, ids [0, 1]
+ indices=((0, 0), (1, 0), (1, 4)),
+ values=(2, 0, 1),
+ dense_shape=(2, 5))
+ sparse_input_b = sparse_tensor.SparseTensorValue(
+ # example 0, ids [0]
+ # example 1, ids []
+ indices=((0, 0),),
+ values=(0,),
+ dense_shape=(2, 5))
+
+ # Embedding variable.
+ embedding_dimension = 2
+ embedding_values = (
+ (1., 2.), # id 0
+ (3., 5.), # id 1
+ (7., 11.) # id 2
+ )
+ def _initializer(shape, dtype, partition_info):
+ self.assertAllEqual((vocabulary_size, embedding_dimension), shape)
+ self.assertEqual(dtypes.float32, dtype)
+ self.assertIsNone(partition_info)
+ return embedding_values
+
+ # Expected lookup result, using combiner='mean'.
+ expected_lookups = (
+ # example 0:
+ # A ids [2], embedding = [7, 11]
+ # B ids [0], embedding = [1, 2]
+ (7., 11., 1., 2.),
+ # example 1:
+ # A ids [0, 1], embedding = mean([1, 2] + [3, 5]) = [2, 3.5]
+ # B ids [], embedding = [0, 0]
+ (2., 3.5, 0., 0.),
+ )
+
+ # Build columns.
+ categorical_column_a = fc_old.categorical_column_with_identity(
+ key='aaa', num_buckets=vocabulary_size)
+ categorical_column_b = fc_old.categorical_column_with_identity(
+ key='bbb', num_buckets=vocabulary_size)
+ embedding_column_a, embedding_column_b = fc_old.shared_embedding_columns(
+ [categorical_column_a, categorical_column_b],
+ dimension=embedding_dimension,
+ initializer=_initializer,
+ trainable=trainable)
+
+ # Provide sparse input and get dense result.
+ input_layer = fc.input_layer(
+ features={'aaa': sparse_input_a, 'bbb': sparse_input_b},
+ feature_columns=(embedding_column_b, embedding_column_a))
+
+ # Assert expected embedding variable and lookups.
+ global_vars = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
+ self.assertItemsEqual(
+ ['input_layer/aaa_bbb_shared_embedding/embedding_weights:0'],
+ tuple([v.name for v in global_vars]))
+ trainable_vars = ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)
+ if trainable:
+ self.assertItemsEqual(
+ ['input_layer/aaa_bbb_shared_embedding/embedding_weights:0'],
+ tuple([v.name for v in trainable_vars]))
+ else:
+ self.assertItemsEqual([], tuple([v.name for v in trainable_vars]))
+ shared_embedding_vars = global_vars
+ with _initialized_session():
+ self.assertAllEqual(embedding_values, shared_embedding_vars[0].eval())
+ self.assertAllEqual(expected_lookups, input_layer.eval())
+
+ def test_input_layer(self):
+ self._test_input_layer()
+
+ def test_input_layer_no_trainable(self):
+ self._test_input_layer(trainable=False)
+
+
+class WeightedCategoricalColumnTest(test.TestCase):
+
+ def test_defaults(self):
+ column = fc.weighted_categorical_column(
+ categorical_column=fc.categorical_column_with_identity(
+ key='ids', num_buckets=3),
+ weight_feature_key='values')
+ self.assertEqual('ids_weighted_by_values', column.name)
+ self.assertEqual(3, column.num_buckets)
+ self.assertEqual({
+ 'ids': parsing_ops.VarLenFeature(dtypes.int64),
+ 'values': parsing_ops.VarLenFeature(dtypes.float32)
+ }, column.parse_example_spec)
+
+ def test_deep_copy(self):
+ """Tests deepcopy of categorical_column_with_hash_bucket."""
+ original = fc.weighted_categorical_column(
+ categorical_column=fc.categorical_column_with_identity(
+ key='ids', num_buckets=3),
+ weight_feature_key='values')
+ for column in (original, copy.deepcopy(original)):
+ self.assertEqual('ids_weighted_by_values', column.name)
+ self.assertEqual(3, column.num_buckets)
+ self.assertEqual({
+ 'ids': parsing_ops.VarLenFeature(dtypes.int64),
+ 'values': parsing_ops.VarLenFeature(dtypes.float32)
+ }, column.parse_example_spec)
+
+ def test_invalid_dtype_none(self):
+ with self.assertRaisesRegexp(ValueError, 'is not convertible to float'):
+ fc.weighted_categorical_column(
+ categorical_column=fc.categorical_column_with_identity(
+ key='ids', num_buckets=3),
+ weight_feature_key='values',
+ dtype=None)
+
+ def test_invalid_dtype_string(self):
+ with self.assertRaisesRegexp(ValueError, 'is not convertible to float'):
+ fc.weighted_categorical_column(
+ categorical_column=fc.categorical_column_with_identity(
+ key='ids', num_buckets=3),
+ weight_feature_key='values',
+ dtype=dtypes.string)
+
+ def test_invalid_input_dtype(self):
+ column = fc.weighted_categorical_column(
+ categorical_column=fc.categorical_column_with_identity(
+ key='ids', num_buckets=3),
+ weight_feature_key='values')
+ strings = sparse_tensor.SparseTensorValue(
+ indices=((0, 0), (1, 0), (1, 1)),
+ values=('omar', 'stringer', 'marlo'),
+ dense_shape=(2, 2))
+ with self.assertRaisesRegexp(ValueError, 'Bad dtype'):
+ _transform_features({'ids': strings, 'values': strings}, (column,), None)
+
+ def test_column_name_collision(self):
+ with self.assertRaisesRegexp(ValueError, r'Parse config.*already exists'):
+ fc.weighted_categorical_column(
+ categorical_column=fc.categorical_column_with_identity(
+ key='aaa', num_buckets=3),
+ weight_feature_key='aaa').parse_example_spec()
+
+ def test_missing_weights(self):
+ column = fc.weighted_categorical_column(
+ categorical_column=fc.categorical_column_with_identity(
+ key='ids', num_buckets=3),
+ weight_feature_key='values')
+ inputs = sparse_tensor.SparseTensorValue(
+ indices=((0, 0), (1, 0), (1, 1)),
+ values=('omar', 'stringer', 'marlo'),
+ dense_shape=(2, 2))
+ with self.assertRaisesRegexp(
+ ValueError, 'values is not in features dictionary'):
+ _transform_features({'ids': inputs}, (column,), None)
+
+ def test_parse_example(self):
+ a = fc.categorical_column_with_vocabulary_list(
+ key='aaa', vocabulary_list=('omar', 'stringer', 'marlo'))
+ a_weighted = fc.weighted_categorical_column(a, weight_feature_key='weights')
+ data = example_pb2.Example(features=feature_pb2.Features(
+ feature={
+ 'aaa':
+ feature_pb2.Feature(bytes_list=feature_pb2.BytesList(
+ value=[b'omar', b'stringer'])),
+ 'weights':
+ feature_pb2.Feature(float_list=feature_pb2.FloatList(
+ value=[1., 10.]))
+ }))
+ features = parsing_ops.parse_example(
+ serialized=[data.SerializeToString()],
+ features=fc.make_parse_example_spec([a_weighted]))
+ self.assertIn('aaa', features)
+ self.assertIn('weights', features)
+ with self.test_session():
+ _assert_sparse_tensor_value(
+ self,
+ sparse_tensor.SparseTensorValue(
+ indices=[[0, 0], [0, 1]],
+ values=np.array([b'omar', b'stringer'], dtype=np.object_),
+ dense_shape=[1, 2]),
+ features['aaa'].eval())
+ _assert_sparse_tensor_value(
+ self,
+ sparse_tensor.SparseTensorValue(
+ indices=[[0, 0], [0, 1]],
+ values=np.array([1., 10.], dtype=np.float32),
+ dense_shape=[1, 2]),
+ features['weights'].eval())
+
+ def test_transform_features(self):
+ column = fc.weighted_categorical_column(
+ categorical_column=fc.categorical_column_with_identity(
+ key='ids', num_buckets=3),
+ weight_feature_key='values')
+ inputs = sparse_tensor.SparseTensorValue(
+ indices=((0, 0), (1, 0), (1, 1)),
+ values=(0, 1, 0),
+ dense_shape=(2, 2))
+ weights = sparse_tensor.SparseTensorValue(
+ indices=((0, 0), (1, 0), (1, 1)),
+ values=(0.5, 1.0, 0.1),
+ dense_shape=(2, 2))
+ id_tensor, weight_tensor = _transform_features({
+ 'ids': inputs,
+ 'values': weights,
+ }, (column,), None)[column]
+ with _initialized_session():
+ _assert_sparse_tensor_value(
+ self,
+ sparse_tensor.SparseTensorValue(
+ indices=inputs.indices,
+ values=np.array(inputs.values, dtype=np.int64),
+ dense_shape=inputs.dense_shape),
+ id_tensor.eval())
+ _assert_sparse_tensor_value(
+ self,
+ sparse_tensor.SparseTensorValue(
+ indices=weights.indices,
+ values=np.array(weights.values, dtype=np.float32),
+ dense_shape=weights.dense_shape),
+ weight_tensor.eval())
+
+ def test_transform_features_dense_input(self):
+ column = fc.weighted_categorical_column(
+ categorical_column=fc.categorical_column_with_identity(
+ key='ids', num_buckets=3),
+ weight_feature_key='values')
+ weights = sparse_tensor.SparseTensorValue(
+ indices=((0, 0), (1, 0), (1, 1)),
+ values=(0.5, 1.0, 0.1),
+ dense_shape=(2, 2))
+ id_tensor, weight_tensor = _transform_features({
+ 'ids': ((0, -1), (1, 0)),
+ 'values': weights,
+ }, (column,), None)[column]
+ with _initialized_session():
+ _assert_sparse_tensor_value(
+ self,
+ sparse_tensor.SparseTensorValue(
+ indices=((0, 0), (1, 0), (1, 1)),
+ values=np.array((0, 1, 0), dtype=np.int64),
+ dense_shape=(2, 2)),
+ id_tensor.eval())
+ _assert_sparse_tensor_value(
+ self,
+ sparse_tensor.SparseTensorValue(
+ indices=weights.indices,
+ values=np.array(weights.values, dtype=np.float32),
+ dense_shape=weights.dense_shape),
+ weight_tensor.eval())
+
+ def test_transform_features_dense_weights(self):
+ column = fc.weighted_categorical_column(
+ categorical_column=fc.categorical_column_with_identity(
+ key='ids', num_buckets=3),
+ weight_feature_key='values')
+ inputs = sparse_tensor.SparseTensorValue(
+ indices=((0, 0), (1, 0), (1, 1)),
+ values=(2, 1, 0),
+ dense_shape=(2, 2))
+ id_tensor, weight_tensor = _transform_features({
+ 'ids': inputs,
+ 'values': ((.5, 0.), (1., .1)),
+ }, (column,), None)[column]
+ with _initialized_session():
+ _assert_sparse_tensor_value(
+ self,
+ sparse_tensor.SparseTensorValue(
+ indices=inputs.indices,
+ values=np.array(inputs.values, dtype=np.int64),
+ dense_shape=inputs.dense_shape),
+ id_tensor.eval())
+ _assert_sparse_tensor_value(
+ self,
+ sparse_tensor.SparseTensorValue(
+ indices=((0, 0), (1, 0), (1, 1)),
+ values=np.array((.5, 1., .1), dtype=np.float32),
+ dense_shape=(2, 2)),
+ weight_tensor.eval())
+
+ def test_keras_linear_model(self):
+ column = fc_old.weighted_categorical_column(
+ categorical_column=fc_old.categorical_column_with_identity(
+ key='ids', num_buckets=3),
+ weight_feature_key='values')
+ with ops.Graph().as_default():
+ predictions = get_keras_linear_model_predictions({
+ 'ids':
+ sparse_tensor.SparseTensorValue(
+ indices=((0, 0), (1, 0), (1, 1)),
+ values=(0, 2, 1),
+ dense_shape=(2, 2)),
+ 'values':
+ sparse_tensor.SparseTensorValue(
+ indices=((0, 0), (1, 0), (1, 1)),
+ values=(.5, 1., .1),
+ dense_shape=(2, 2))
+ }, (column,))
+ bias = get_linear_model_bias()
+ weight_var = get_linear_model_column_var(column)
+ with _initialized_session():
+ self.assertAllClose((0.,), bias.eval())
+ self.assertAllClose(((0.,), (0.,), (0.,)), weight_var.eval())
+ self.assertAllClose(((0.,), (0.,)), predictions.eval())
+ weight_var.assign(((1.,), (2.,), (3.,))).eval()
+ # weight_var[0] * weights[0, 0] = 1 * .5 = .5
+ # weight_var[2] * weights[1, 0] + weight_var[1] * weights[1, 1]
+ # = 3*1 + 2*.1 = 3+.2 = 3.2
+ self.assertAllClose(((.5,), (3.2,)), predictions.eval())
+
+ def test_keras_linear_model_mismatched_shape(self):
+ column = fc_old.weighted_categorical_column(
+ categorical_column=fc_old.categorical_column_with_identity(
+ key='ids', num_buckets=3),
+ weight_feature_key='values')
+ with ops.Graph().as_default():
+ with self.assertRaisesRegexp(ValueError,
+ r'Dimensions.*are not compatible'):
+ get_keras_linear_model_predictions({
+ 'ids':
+ sparse_tensor.SparseTensorValue(
+ indices=((0, 0), (1, 0), (1, 1)),
+ values=(0, 2, 1),
+ dense_shape=(2, 2)),
+ 'values':
+ sparse_tensor.SparseTensorValue(
+ indices=((0, 0), (0, 1), (1, 0), (1, 1)),
+ values=(.5, 11., 1., .1),
+ dense_shape=(2, 2))
+ }, (column,))
+
+ def test_keras_linear_model_mismatched_dense_values(self):
+ column = fc_old.weighted_categorical_column(
+ categorical_column=fc_old.categorical_column_with_identity(
+ key='ids', num_buckets=3),
+ weight_feature_key='values')
+ with ops.Graph().as_default():
+ predictions = get_keras_linear_model_predictions(
+ {
+ 'ids':
+ sparse_tensor.SparseTensorValue(
+ indices=((0, 0), (1, 0), (1, 1)),
+ values=(0, 2, 1),
+ dense_shape=(2, 2)),
+ 'values': ((.5,), (1.,))
+ }, (column,),
+ sparse_combiner='mean')
+ # Disabling the constant folding optimizer here since it changes the
+ # error message differently on CPU and GPU.
+ config = config_pb2.ConfigProto()
+ config.graph_options.rewrite_options.constant_folding = (
+ rewriter_config_pb2.RewriterConfig.OFF)
+ with _initialized_session(config):
+ with self.assertRaisesRegexp(errors.OpError, 'Incompatible shapes'):
+ predictions.eval()
+
+ def test_keras_linear_model_mismatched_dense_shape(self):
+ column = fc_old.weighted_categorical_column(
+ categorical_column=fc_old.categorical_column_with_identity(
+ key='ids', num_buckets=3),
+ weight_feature_key='values')
+ with ops.Graph().as_default():
+ predictions = get_keras_linear_model_predictions({
+ 'ids':
+ sparse_tensor.SparseTensorValue(
+ indices=((0, 0), (1, 0), (1, 1)),
+ values=(0, 2, 1),
+ dense_shape=(2, 2)),
+ 'values': ((.5,), (1.,), (.1,))
+ }, (column,))
+ bias = get_linear_model_bias()
+ weight_var = get_linear_model_column_var(column)
+ with _initialized_session():
+ self.assertAllClose((0.,), bias.eval())
+ self.assertAllClose(((0.,), (0.,), (0.,)), weight_var.eval())
+ self.assertAllClose(((0.,), (0.,)), predictions.eval())
+ weight_var.assign(((1.,), (2.,), (3.,))).eval()
+ # weight_var[0] * weights[0, 0] = 1 * .5 = .5
+ # weight_var[2] * weights[1, 0] + weight_var[1] * weights[1, 1]
+ # = 3*1 + 2*.1 = 3+.2 = 3.2
+ self.assertAllClose(((.5,), (3.2,)), predictions.eval())
+
+ def test_linear_model(self):
+ column = fc_old.weighted_categorical_column(
+ categorical_column=fc_old.categorical_column_with_identity(
+ key='ids', num_buckets=3),
+ weight_feature_key='values')
+ with ops.Graph().as_default():
+ predictions = fc.linear_model({
+ 'ids': sparse_tensor.SparseTensorValue(
+ indices=((0, 0), (1, 0), (1, 1)),
+ values=(0, 2, 1),
+ dense_shape=(2, 2)),
+ 'values': sparse_tensor.SparseTensorValue(
+ indices=((0, 0), (1, 0), (1, 1)),
+ values=(.5, 1., .1),
+ dense_shape=(2, 2))
+ }, (column,))
+ bias = get_linear_model_bias()
+ weight_var = get_linear_model_column_var(column)
+ with _initialized_session():
+ self.assertAllClose((0.,), bias.eval())
+ self.assertAllClose(((0.,), (0.,), (0.,)), weight_var.eval())
+ self.assertAllClose(((0.,), (0.,)), predictions.eval())
+ weight_var.assign(((1.,), (2.,), (3.,))).eval()
+ # weight_var[0] * weights[0, 0] = 1 * .5 = .5
+ # weight_var[2] * weights[1, 0] + weight_var[1] * weights[1, 1]
+ # = 3*1 + 2*.1 = 3+.2 = 3.2
+ self.assertAllClose(((.5,), (3.2,)), predictions.eval())
+
+ def test_linear_model_mismatched_shape(self):
+ column = fc_old.weighted_categorical_column(
+ categorical_column=fc_old.categorical_column_with_identity(
+ key='ids', num_buckets=3),
+ weight_feature_key='values')
+ with ops.Graph().as_default():
+ with self.assertRaisesRegexp(
+ ValueError, r'Dimensions.*are not compatible'):
+ fc.linear_model({
+ 'ids': sparse_tensor.SparseTensorValue(
+ indices=((0, 0), (1, 0), (1, 1)),
+ values=(0, 2, 1),
+ dense_shape=(2, 2)),
+ 'values': sparse_tensor.SparseTensorValue(
+ indices=((0, 0), (0, 1), (1, 0), (1, 1)),
+ values=(.5, 11., 1., .1),
+ dense_shape=(2, 2))
+ }, (column,))
+
+ def test_linear_model_mismatched_dense_values(self):
+ column = fc_old.weighted_categorical_column(
+ categorical_column=fc_old.categorical_column_with_identity(
+ key='ids', num_buckets=3),
+ weight_feature_key='values')
+ with ops.Graph().as_default():
+ predictions = fc.linear_model(
+ {
+ 'ids':
+ sparse_tensor.SparseTensorValue(
+ indices=((0, 0), (1, 0), (1, 1)),
+ values=(0, 2, 1),
+ dense_shape=(2, 2)),
+ 'values': ((.5,), (1.,))
+ }, (column,),
+ sparse_combiner='mean')
+ # Disabling the constant folding optimizer here since it changes the
+ # error message differently on CPU and GPU.
+ config = config_pb2.ConfigProto()
+ config.graph_options.rewrite_options.constant_folding = (
+ rewriter_config_pb2.RewriterConfig.OFF)
+ with _initialized_session(config):
+ with self.assertRaisesRegexp(errors.OpError, 'Incompatible shapes'):
+ predictions.eval()
+
+ def test_linear_model_mismatched_dense_shape(self):
+ column = fc_old.weighted_categorical_column(
+ categorical_column=fc_old.categorical_column_with_identity(
+ key='ids', num_buckets=3),
+ weight_feature_key='values')
+ with ops.Graph().as_default():
+ predictions = fc.linear_model({
+ 'ids': sparse_tensor.SparseTensorValue(
+ indices=((0, 0), (1, 0), (1, 1)),
+ values=(0, 2, 1),
+ dense_shape=(2, 2)),
+ 'values': ((.5,), (1.,), (.1,))
+ }, (column,))
+ bias = get_linear_model_bias()
+ weight_var = get_linear_model_column_var(column)
+ with _initialized_session():
+ self.assertAllClose((0.,), bias.eval())
+ self.assertAllClose(((0.,), (0.,), (0.,)), weight_var.eval())
+ self.assertAllClose(((0.,), (0.,)), predictions.eval())
+ weight_var.assign(((1.,), (2.,), (3.,))).eval()
+ # weight_var[0] * weights[0, 0] = 1 * .5 = .5
+ # weight_var[2] * weights[1, 0] + weight_var[1] * weights[1, 1]
+ # = 3*1 + 2*.1 = 3+.2 = 3.2
+ self.assertAllClose(((.5,), (3.2,)), predictions.eval())
+
+ # TODO(ptucker): Add test with embedding of weighted categorical.
+
+if __name__ == '__main__':
+ test.main()
diff --git a/tensorflow/python/framework/common_shapes.py b/tensorflow/python/framework/common_shapes.py
index 3c5aebbce8..40788e24c4 100644
--- a/tensorflow/python/framework/common_shapes.py
+++ b/tensorflow/python/framework/common_shapes.py
@@ -28,6 +28,18 @@ from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
+def has_fully_defined_shape(tensor):
+ """Returns true if tensor has a fully defined shape."""
+ return isinstance(tensor, ops.EagerTensor) or tensor.shape.is_fully_defined()
+
+
+def rank(tensor):
+ """Return a rank if it is a tensor, else return None."""
+ if isinstance(tensor, ops.Tensor):
+ return tensor._rank() # pylint: disable=protected-access
+ return None
+
+
def scalar_shape(unused_op):
"""Shape function for ops that output a scalar value."""
return [tensor_shape.scalar()]
diff --git a/tensorflow/python/framework/error_interpolation.py b/tensorflow/python/framework/error_interpolation.py
new file mode 100644
index 0000000000..72d5dc99a8
--- /dev/null
+++ b/tensorflow/python/framework/error_interpolation.py
@@ -0,0 +1,170 @@
+# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""Function for interpolating formatted errors from the TensorFlow runtime.
+
+Exposes the function `interpolate` to interpolate messages with tags of the form
+^^type:name:format^^.
+"""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import collections
+import itertools
+import os
+import re
+import string
+
+import six
+
+from tensorflow.python.util import tf_stack
+
+
+_NAME_REGEX = r"[A-Za-z0-9.][A-Za-z0-9_.\-/]*?"
+_FORMAT_REGEX = r"[A-Za-z0-9_.\-/${}:]+"
+_TAG_REGEX = r"\^\^({name}):({name}):({fmt})\^\^".format(
+ name=_NAME_REGEX, fmt=_FORMAT_REGEX)
+_INTERPOLATION_REGEX = r"^(.*?)({tag})".format(tag=_TAG_REGEX)
+_INTERPOLATION_PATTERN = re.compile(_INTERPOLATION_REGEX)
+
+_ParseTag = collections.namedtuple("_ParseTag", ["type", "name", "format"])
+
+_BAD_FILE_SUBSTRINGS = [
+ os.path.join("tensorflow", "python"),
+ "<embedded",
+]
+
+
+def _parse_message(message):
+ """Parses the message.
+
+ Splits the message into separators and tags. Tags are named tuples
+ representing the string ^^type:name:format^^ and they are separated by
+ separators. For example, in
+ "123^^node:Foo:${file}^^456^^node:Bar:${line}^^789", there are two tags and
+ three separators. The separators are the numeric characters.
+
+ Supported tags after node:<node_name>
+ file: Replaced with the filename in which the node was defined.
+ line: Replaced by the line number at which the node was defined.
+
+ Args:
+ message: String to parse
+
+ Returns:
+ (list of separator strings, list of _ParseTags).
+
+ For example, if message is "123^^node:Foo:${file}^^456" then this function
+ returns (["123", "456"], [_ParseTag("node", "Foo", "${file}")])
+ """
+ seps = []
+ tags = []
+ pos = 0
+ while pos < len(message):
+ match = re.match(_INTERPOLATION_PATTERN, message[pos:])
+ if match:
+ seps.append(match.group(1))
+ tags.append(_ParseTag(match.group(3), match.group(4), match.group(5)))
+ pos += match.end()
+ else:
+ break
+ seps.append(message[pos:])
+ return seps, tags
+
+
+def _get_field_dict_from_traceback(tf_traceback, frame_index):
+ """Convert traceback elements into interpolation dictionary and return."""
+ frame = tf_traceback[frame_index]
+ return {
+ "file": frame[tf_stack.TB_FILENAME],
+ "line": frame[tf_stack.TB_LINENO],
+ }
+
+
+def _find_index_of_defining_frame_for_op(op):
+ """Return index in op._traceback with first 'useful' frame.
+
+ This method reads through the stack stored in op._traceback looking for the
+ innermost frame which (hopefully) belongs to the caller. It accomplishes this
+ by rejecting frames whose filename appears to come from TensorFlow (see
+ error_interpolation._BAD_FILE_SUBSTRINGS for the list of rejected substrings).
+
+ Args:
+ op: the Operation object for which we would like to find the defining
+ location.
+
+ Returns:
+ Integer index into op._traceback where the first non-TF file was found
+ (innermost to outermost), or 0 (for the outermost stack frame) if all files
+ came from TensorFlow.
+ """
+ # pylint: disable=protected-access
+ # Index 0 of tf_traceback is the outermost frame.
+ tf_traceback = tf_stack.convert_stack(op._traceback)
+ size = len(tf_traceback)
+ # pylint: enable=protected-access
+ filenames = [frame[tf_stack.TB_FILENAME] for frame in tf_traceback]
+ # We process the filenames from the innermost frame to outermost.
+ for idx, filename in enumerate(reversed(filenames)):
+ contains_bad_substrings = [ss in filename for ss in _BAD_FILE_SUBSTRINGS]
+ if not any(contains_bad_substrings):
+ return size - idx - 1
+ return 0
+
+
+def interpolate(error_message, graph):
+ """Interpolates an error message.
+
+ The error message can contain tags of the form ^^type:name:format^^ which will
+ be replaced.
+
+ Args:
+ error_message: A string to interpolate.
+ graph: ops.Graph object containing all nodes referenced in the error
+ message.
+
+ Returns:
+ The string with tags of the form ^^type:name:format^^ interpolated.
+ """
+ seps, tags = _parse_message(error_message)
+
+ node_name_to_substitution_dict = {}
+ for name in [t.name for t in tags]:
+ try:
+ op = graph.get_operation_by_name(name)
+ except KeyError:
+ op = None
+
+ if op:
+ frame_index = _find_index_of_defining_frame_for_op(op)
+ # pylint: disable=protected-access
+ field_dict = _get_field_dict_from_traceback(op._traceback, frame_index)
+ # pylint: enable=protected-access
+ else:
+ field_dict = {
+ "file": "<NA>",
+ "line": "<NA>",
+ "func": "<NA>",
+ "code": None,
+ }
+ node_name_to_substitution_dict[name] = field_dict
+
+ subs = [
+ string.Template(tag.format).safe_substitute(
+ node_name_to_substitution_dict[tag.name]) for tag in tags
+ ]
+ return "".join(
+ itertools.chain(*six.moves.zip_longest(seps, subs, fillvalue="")))
diff --git a/tensorflow/python/framework/error_interpolation_test.py b/tensorflow/python/framework/error_interpolation_test.py
new file mode 100644
index 0000000000..b6615317d1
--- /dev/null
+++ b/tensorflow/python/framework/error_interpolation_test.py
@@ -0,0 +1,138 @@
+# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""Tests for tensorflow.python.framework.errors."""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import os
+
+from tensorflow.python.framework import constant_op
+from tensorflow.python.framework import error_interpolation
+from tensorflow.python.platform import test
+from tensorflow.python.util import tf_stack
+
+
+def _make_frame_with_filename(op, idx, filename):
+ """Return a copy of an existing stack frame with a new filename."""
+ stack_frame = list(op._traceback[idx])
+ stack_frame[tf_stack.TB_FILENAME] = filename
+ return tuple(stack_frame)
+
+
+def _modify_op_stack_with_filenames(op, num_user_frames, user_filename,
+ num_inner_tf_frames):
+ """Replace op._traceback with a new traceback using special filenames."""
+ tf_filename = "%d" + error_interpolation._BAD_FILE_SUBSTRINGS[0]
+ user_filename = os.path.join("%d", "my_favorite_file.py")
+
+ num_requested_frames = num_user_frames + num_inner_tf_frames
+ num_actual_frames = len(op._traceback)
+ num_outer_frames = num_actual_frames - num_requested_frames
+ assert num_requested_frames <= num_actual_frames, "Too few real frames."
+
+ # The op's traceback has outermost frame at index 0.
+ stack = []
+ for idx in range(0, num_outer_frames):
+ stack.append(op._traceback[idx])
+ for idx in range(len(stack), len(stack)+num_user_frames):
+ stack.append(_make_frame_with_filename(op, idx, user_filename % idx))
+ for idx in range(len(stack), len(stack)+num_inner_tf_frames):
+ stack.append(_make_frame_with_filename(op, idx, tf_filename % idx))
+ op._traceback = stack
+
+
+class InterpolateTest(test.TestCase):
+
+ def setUp(self):
+ # Add nodes to the graph for retrieval by name later.
+ constant_op.constant(1, name="One")
+ constant_op.constant(2, name="Two")
+ three = constant_op.constant(3, name="Three")
+ self.graph = three.graph
+
+ # Change the list of bad file substrings so that constant_op.py is chosen
+ # as the defining stack frame for constant_op.constant ops.
+ self.old_bad_strings = error_interpolation._BAD_FILE_SUBSTRINGS
+ error_interpolation._BAD_FILE_SUBSTRINGS = [
+ "%sops.py" % os.sep,
+ "%sutil" % os.sep,
+ ]
+
+ def tearDown(self):
+ error_interpolation._BAD_FILE_SUBSTRINGS = self.old_bad_strings
+
+ def testFindIndexOfDefiningFrameForOp(self):
+ local_op = constant_op.constant(42).op
+ user_filename = "hope.py"
+ _modify_op_stack_with_filenames(local_op,
+ num_user_frames=3,
+ user_filename=user_filename,
+ num_inner_tf_frames=5)
+ idx = error_interpolation._find_index_of_defining_frame_for_op(local_op)
+ # Expected frame is 6th from the end because there are 5 inner frames witih
+ # TF filenames.
+ expected_frame = len(local_op._traceback) - 6
+ self.assertEqual(expected_frame, idx)
+
+ def testFindIndexOfDefiningFrameForOpReturnsZeroOnError(self):
+ local_op = constant_op.constant(43).op
+ # Truncate stack to known length.
+ local_op._traceback = local_op._traceback[:7]
+ # Ensure all frames look like TF frames.
+ _modify_op_stack_with_filenames(local_op,
+ num_user_frames=0,
+ user_filename="user_file.py",
+ num_inner_tf_frames=7)
+ idx = error_interpolation._find_index_of_defining_frame_for_op(local_op)
+ self.assertEqual(0, idx)
+
+ def testNothingToDo(self):
+ normal_string = "This is just a normal string"
+ interpolated_string = error_interpolation.interpolate(normal_string,
+ self.graph)
+ self.assertEqual(interpolated_string, normal_string)
+
+ def testOneTag(self):
+ one_tag_string = "^^node:Two:${file}^^"
+ interpolated_string = error_interpolation.interpolate(one_tag_string,
+ self.graph)
+ self.assertTrue(interpolated_string.endswith("constant_op.py"),
+ "interpolated_string '%s' did not end with constant_op.py"
+ % interpolated_string)
+
+ def testOneTagWithAFakeNameResultsInPlaceholders(self):
+ one_tag_string = "^^node:MinusOne:${file}^^"
+ interpolated_string = error_interpolation.interpolate(one_tag_string,
+ self.graph)
+ self.assertEqual(interpolated_string, "<NA>")
+
+ def testTwoTagsNoSeps(self):
+ two_tags_no_seps = "^^node:One:${file}^^^^node:Three:${line}^^"
+ interpolated_string = error_interpolation.interpolate(two_tags_no_seps,
+ self.graph)
+ self.assertRegexpMatches(interpolated_string, "constant_op.py[0-9]+")
+
+ def testTwoTagsWithSeps(self):
+ two_tags_with_seps = ";;;^^node:Two:${file}^^,,,^^node:Three:${line}^^;;;"
+ interpolated_string = error_interpolation.interpolate(two_tags_with_seps,
+ self.graph)
+ expected_regex = "^;;;.*constant_op.py,,,[0-9]*;;;$"
+ self.assertRegexpMatches(interpolated_string, expected_regex)
+
+
+if __name__ == "__main__":
+ test.main()
diff --git a/tensorflow/python/framework/function_test.py b/tensorflow/python/framework/function_test.py
index 15e41ba91f..1707f929b8 100644
--- a/tensorflow/python/framework/function_test.py
+++ b/tensorflow/python/framework/function_test.py
@@ -537,19 +537,25 @@ class FunctionTest(test.TestCase):
def testResourceVarAsImplicitInput(self):
g = ops.Graph()
with g.as_default(), ops.device("cpu:0"):
+ expected_type = dtypes.float32
+ expected_shape = tensor_shape.TensorShape((4, 4))
v = variable_scope.get_variable(
- "var", (4, 4), dtypes.float32, use_resource=True)
+ "var", expected_shape, expected_type, use_resource=True)
@function.Defun()
def Foo():
- return array_ops.identity(v)
+ captured = array_ops.identity(v)
+ self.assertEqual(expected_type, captured.dtype)
+ self.assertEqual(expected_shape, captured.shape)
+ return captured, array_ops.shape(captured)
- y = v.value()
- z = Foo()
+ expected_val = v.value()
+ actual_val, actual_shape = Foo()
with self.test_session(graph=g):
v.initializer.run()
- self.assertAllEqual(y.eval(), z.eval())
+ self.assertAllEqual(expected_val.eval(), actual_val.eval())
+ self.assertAllEqual(expected_shape, actual_shape.eval())
def testDefineErrors(self):
with ops.Graph().as_default():
diff --git a/tensorflow/python/framework/importer.py b/tensorflow/python/framework/importer.py
index 72eb7e0eeb..699d2b70d1 100644
--- a/tensorflow/python/framework/importer.py
+++ b/tensorflow/python/framework/importer.py
@@ -407,11 +407,11 @@ def import_graph_def(graph_def,
_PopulateTFImportGraphDefOptions(options, prefix, input_map,
return_elements)
- # _ProcessNewOps mutates the new operations. _lock ensures a Session.run
- # call cannot occur between creating the TF_Operations in the
+ # _ProcessNewOps mutates the new operations. _mutation_lock ensures a
+ # Session.run call cannot occur between creating the TF_Operations in the
# TF_GraphImportGraphDefWithResults call and mutating the them in
# _ProcessNewOps.
- with graph._lock: # pylint: disable=protected-access
+ with graph._mutation_lock(): # pylint: disable=protected-access
with c_api_util.tf_buffer(graph_def.SerializeToString()) as serialized:
try:
results = c_api.TF_GraphImportGraphDefWithResults(
diff --git a/tensorflow/python/framework/ops.py b/tensorflow/python/framework/ops.py
index 05f9ae21b1..a3b56b0f63 100644
--- a/tensorflow/python/framework/ops.py
+++ b/tensorflow/python/framework/ops.py
@@ -20,7 +20,6 @@ from __future__ import print_function
import collections
import copy
-import linecache
import os
import re
import sys
@@ -48,13 +47,16 @@ from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import op_def_registry
from tensorflow.python.framework import registry
+from tensorflow.python.util import tf_stack
from tensorflow.python.framework import tensor_shape
+from tensorflow.python.framework import traceable_stack
from tensorflow.python.framework import versions
from tensorflow.python.ops import control_flow_util
from tensorflow.python.platform import app
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import compat
from tensorflow.python.util import decorator_utils
+from tensorflow.python.util import lock_util
from tensorflow.python.util import tf_contextlib
from tensorflow.python.util.deprecation import deprecated_args
from tensorflow.python.util.tf_export import tf_export
@@ -705,7 +707,7 @@ class _EagerTensorBase(Tensor):
"""
if self.dtype == dtypes.resource:
raise ValueError("Resource handles are not convertible to numpy.")
- return self.cpu()._numpy() # pylint: disable=protected-access
+ return self._cpu_nograd()._numpy() # pylint: disable=protected-access
# __int__ and __float__ may copy the tensor to CPU and
# only work for scalars; values are cast as per numpy.
@@ -779,8 +781,8 @@ class _EagerTensorBase(Tensor):
def _override_operator(name, func):
setattr(_EagerTensorBase, name, func)
- def _copy(self, ctx=None, device_name=None):
- """Copies tensor to dest device."""
+ def _copy_nograd(self, ctx=None, device_name=None):
+ """Copies tensor to dest device, but doesn't record the operation."""
# pylint: disable=protected-access
# Creates a new tensor on the dest device.
if ctx is None:
@@ -792,7 +794,11 @@ class _EagerTensorBase(Tensor):
new_tensor = self._copy_to_device(context=ctx._handle, device=device_name)
except core._NotOkStatusException as e:
six.raise_from(core._status_to_exception(e.code, e.message), None)
+ return new_tensor
+ def _copy(self, ctx=None, device_name=None):
+ """Copies tensor to dest device."""
+ new_tensor = self._copy_nograd(ctx, device_name)
# Record the copy on tape and define backprop copy as well.
if context.executing_eagerly():
self_device = self.device
@@ -823,6 +829,16 @@ class _EagerTensorBase(Tensor):
"""Returns the number of Tensor dimensions."""
return self.shape.ndims
+ def _cpu_nograd(self):
+ """A copy of this Tensor with contents backed by host memory.
+
+ The copy cannot be differentiated through.
+
+ Returns:
+ A CPU-memory backed Tensor object with the same contents as this Tensor.
+ """
+ return self._copy_nograd(context.context(), "CPU:0")
+
def cpu(self):
"""A copy of this Tensor with contents backed by host memory."""
return self._copy(context.context(), "CPU:0")
@@ -1698,7 +1714,7 @@ class Operation(object):
self._id_value = self._graph._next_id() # pylint: disable=protected-access
self._original_op = original_op
- self._traceback = self._graph._extract_stack() # pylint: disable=protected-access
+ self._traceback = tf_stack.extract_stack()
self._control_flow_context = self.graph._get_control_flow_context() # pylint: disable=protected-access
# Initialize self._c_op.
@@ -2139,7 +2155,7 @@ class Operation(object):
@property
def traceback(self):
"""Returns the call stack from when this operation was constructed."""
- return self._graph._convert_stack(self._traceback) # pylint: disable=protected-access
+ return tf_stack.convert_stack(self._traceback)
@property
def traceback_with_start_lines(self):
@@ -2148,9 +2164,8 @@ class Operation(object):
Returns:
A list of 5-tuples (filename, lineno, name, code, func_start_lineno).
"""
- return self._graph._convert_stack( # pylint: disable=protected-access
- self._traceback,
- include_func_start_lineno=True)
+ return tf_stack.convert_stack(self._traceback,
+ include_func_start_lineno=True)
def _set_attr(self, attr_name, attr_value):
"""Private method used to set an attribute in the node_def."""
@@ -2599,6 +2614,9 @@ def _name_from_scope_name(name):
return name[:-1] if (name and name[-1] == "/") else name
+_MUTATION_LOCK_GROUP = 0
+_SESSION_RUN_LOCK_GROUP = 1
+
@tf_export("Graph")
class Graph(object):
"""A TensorFlow computation, represented as a dataflow graph.
@@ -2648,20 +2666,21 @@ class Graph(object):
def __init__(self):
"""Creates a new, empty Graph."""
- # Protects core state that can be returned via public accessors, as well as
- # synchronizes Session.run calls with methods that create and mutate ops
- # (e.g. Graph.create_op()). This synchronization is necessary because it's
- # illegal to modify an operation after it's been run. Thread-safety is
- # provided on a best-effort basis to support buggy programs, and is not
- # guaranteed by the public `tf.Graph` API.
- #
- # The lock must be reentrant because create_op can be called recursively due
- # to control flow. Without a reentrant lock, many methods would also need a
- # "locked" version or parameter (including generated code).
+ # Protects core state that can be returned via public accessors.
+ # Thread-safety is provided on a best-effort basis to support buggy
+ # programs, and is not guaranteed by the public `tf.Graph` API.
#
# NOTE(mrry): This does not protect the various stacks. A warning will
# be reported if these are used from multiple threads
self._lock = threading.RLock()
+ # The group lock synchronizes Session.run calls with methods that create
+ # and mutate ops (e.g. Graph.create_op()). This synchronization is
+ # necessary because it's illegal to modify an operation after it's been run.
+ # The group lock allows any number of threads to mutate ops at the same time
+ # but if any modification is going on, all Session.run calls have to wait.
+ # Similarly, if one or more Session.run calls are going on, all mutate ops
+ # have to wait until all Session.run calls have finished.
+ self._group_lock = lock_util.GroupLock(num_groups=2)
self._nodes_by_id = dict() # GUARDED_BY(self._lock)
self._next_id_counter = 0 # GUARDED_BY(self._lock)
self._nodes_by_name = dict() # GUARDED_BY(self._lock)
@@ -2706,7 +2725,7 @@ class Graph(object):
self._building_function = False
# Stack of colocate_with ops. After switch_to_thread_local(),
# self._thread_local._colocation_stack is used instead.
- self._graph_colocation_stack = []
+ self._graph_colocation_stack = traceable_stack.TraceableStack()
# Set of tensors that are dangerous to feed!
self._unfeedable_tensors = set()
# Set of operations that are dangerous to fetch!
@@ -2746,36 +2765,6 @@ class Graph(object):
"""Temporary hack; can be overridden to force C API usage."""
return _USE_C_API
- def _convert_stack(self, stack, include_func_start_lineno=False):
- """Converts a stack extracted using _extract_stack() to a traceback stack.
-
- Args:
- stack: A list of n 5-tuples,
- (filename, lineno, name, frame_globals, func_start_lineno).
- include_func_start_lineno: True if function start line number should be
- included as the 5th entry in return tuples.
-
- Returns:
- A list of n 4-tuples or 5-tuples
- (filename, lineno, name, code, [optional: func_start_lineno]), where the
- code tuple element is calculated from the corresponding elements of the
- input tuple.
- """
- ret = []
- for (filename, lineno, name, frame_globals, func_start_lineno,
- unused_frame_info) in stack:
- linecache.checkcache(filename)
- line = linecache.getline(filename, lineno, frame_globals)
- if line:
- line = line.strip()
- else:
- line = None
- if include_func_start_lineno:
- ret.append((filename, lineno, name, line, func_start_lineno))
- else:
- ret.append((filename, lineno, name, line))
- return ret
-
# Note: this method is private because the API of tf.Graph() is public and
# frozen, and this functionality is still not ready for public visibility.
@tf_contextlib.contextmanager
@@ -2783,63 +2772,23 @@ class Graph(object):
# This step makes a copy of the existing stack, and it also initializes
# self._thread_local._variable_creator_stack if it doesn't exist yet.
old = list(self._variable_creator_stack)
- self._thread_local._variable_creator_stack.append(creator)
+ self._thread_local._variable_creator_stack.append(creator) # pylint: disable=protected-access
try:
yield
finally:
- self._thread_local._variable_creator_stack = old
+ self._thread_local._variable_creator_stack = old # pylint: disable=protected-access
# Note: this method is private because the API of tf.Graph() is public and
# frozen, and this functionality is still not ready for public visibility.
@property
def _variable_creator_stack(self):
if not hasattr(self._thread_local, "_variable_creator_stack"):
- self._thread_local._variable_creator_stack = []
- return list(self._thread_local._variable_creator_stack)
+ self._thread_local._variable_creator_stack = [] # pylint: disable=protected-access
+ return list(self._thread_local._variable_creator_stack) # pylint: disable=protected-access
@_variable_creator_stack.setter
def _variable_creator_stack(self, variable_creator_stack):
- self._thread_local._variable_creator_stack = variable_creator_stack
-
- def _extract_stack(self):
- """A lightweight, extensible re-implementation of traceback.extract_stack.
-
- NOTE(mrry): traceback.extract_stack eagerly retrieves the line of code for
- each stack frame using linecache, which results in an abundance of stat()
- calls. This implementation does not retrieve the code, and any consumer
- should apply _convert_stack to the result to obtain a traceback that can
- be formatted etc. using traceback methods.
-
- Derived classes can implement _extract_frame_info() to add extra information
- to the traceback.
-
- Returns:
- A list of 6-tuples
- (filename, lineno, name, frame_globals, func_start_lineno, custom_info)
- corresponding to the call stack of the current thread.
- """
- try:
- raise ZeroDivisionError
- except ZeroDivisionError:
- f = sys.exc_info()[2].tb_frame.f_back
- ret = []
- while f is not None:
- lineno = f.f_lineno
- co = f.f_code
- filename = co.co_filename
- name = co.co_name
- frame_globals = f.f_globals
- func_start_lineno = co.co_firstlineno
- frame_info = self._extract_frame_info(f)
- ret.append((filename, lineno, name, frame_globals, func_start_lineno,
- frame_info))
- f = f.f_back
- ret.reverse()
- return ret
-
- def _extract_frame_info(self, frame): # pylint: disable=unused-argument
- """Extracts custom information from a frame in an op traceback."""
- return None
+ self._thread_local._variable_creator_stack = variable_creator_stack # pylint: disable=protected-access
def _check_not_finalized(self):
"""Check if the graph is finalized.
@@ -3192,9 +3141,9 @@ class Graph(object):
input_ops = set([t.op for t in inputs])
control_inputs = self._control_dependencies_for_inputs(input_ops)
- # _create_op_helper mutates the new Operation. _lock ensures a Session.run
- # call cannot occur between creating and mutating the op.
- with self._lock:
+ # _create_op_helper mutates the new Operation. `_mutation_lock` ensures a
+ # Session.run call cannot occur between creating and mutating the op.
+ with self._mutation_lock():
ret = Operation(
node_def,
self,
@@ -3281,7 +3230,7 @@ class Graph(object):
if self._colocation_stack:
all_colocation_groups = []
- for colocation_op in self._colocation_stack:
+ for colocation_op in self._colocation_stack.peek_objs():
all_colocation_groups.extend(colocation_op.colocation_groups())
if colocation_op.device:
# Make this device match the device of the colocated op, to provide
@@ -4054,10 +4003,10 @@ class Graph(object):
if ignore_existing:
current_stack = self._colocation_stack
- self._colocation_stack = []
+ self._colocation_stack = traceable_stack.TraceableStack()
if op is not None:
- self._colocation_stack.append(op)
+ self._colocation_stack.push_obj(op, name=op.name, offset=1)
try:
yield
@@ -4065,7 +4014,7 @@ class Graph(object):
# Restore device function stack
self._device_function_stack = device_fn_tmp
if op is not None:
- self._colocation_stack.pop()
+ self._colocation_stack.pop_obj()
# Reset the colocation stack if requested.
if ignore_existing:
@@ -4692,11 +4641,15 @@ class Graph(object):
@property
def _colocation_stack(self):
+ """Return thread-local copy of colocation stack."""
if self._stack_state_is_thread_local:
# This may be called from a thread where colocation_stack doesn't yet
# exist.
if not hasattr(self._thread_local, "_colocation_stack"):
- self._thread_local._colocation_stack = self._graph_colocation_stack[:]
+ stack_copy_for_this_thread = self._graph_colocation_stack.copy()
+ # pylint: disable=protected-access
+ self._thread_local._colocation_stack = stack_copy_for_this_thread
+ # pylint: enable=protected-access
return self._thread_local._colocation_stack
else:
return self._graph_colocation_stack
@@ -4727,6 +4680,20 @@ class Graph(object):
else:
self._graph_control_dependencies_stack = control_dependencies
+ def _mutation_lock(self):
+ """Returns a lock to guard code that creates & mutates ops.
+
+ See the comment for self._group_lock for more info.
+ """
+ return self._group_lock.group(_MUTATION_LOCK_GROUP)
+
+ def _session_run_lock(self):
+ """Returns a lock to guard code for Session.run.
+
+ See the comment for self._group_lock for more info.
+ """
+ return self._group_lock.group(_SESSION_RUN_LOCK_GROUP)
+
# TODO(agarwal): currently device directives in an outer eager scope will not
# apply to inner graph mode code. Fix that.
diff --git a/tensorflow/python/framework/python_op_gen.cc b/tensorflow/python/framework/python_op_gen.cc
index ec3748b40e..76d4c2017c 100644
--- a/tensorflow/python/framework/python_op_gen.cc
+++ b/tensorflow/python/framework/python_op_gen.cc
@@ -943,6 +943,7 @@ from tensorflow.python.framework import common_shapes as _common_shapes
from tensorflow.python.framework import op_def_registry as _op_def_registry
from tensorflow.python.framework import ops as _ops
from tensorflow.python.framework import op_def_library as _op_def_library
+from tensorflow.python.util.deprecation import deprecated_endpoints
from tensorflow.python.util.tf_export import tf_export
)");
diff --git a/tensorflow/python/framework/python_op_gen_internal.cc b/tensorflow/python/framework/python_op_gen_internal.cc
index 940bffb906..031b4a384e 100644
--- a/tensorflow/python/framework/python_op_gen_internal.cc
+++ b/tensorflow/python/framework/python_op_gen_internal.cc
@@ -588,10 +588,12 @@ void GenPythonOp::AddExport() {
return;
}
+ // Add @tf_export decorator.
strings::StrAppend(&result_, "@tf_export(");
// Add all endpoint names to tf_export.
bool first_endpoint = true;
+ std::vector<string> deprecated_endpoints;
for (const auto& endpoint : api_def_.endpoint()) {
if (!first_endpoint) {
strings::StrAppend(&result_, ", ");
@@ -601,9 +603,32 @@ void GenPythonOp::AddExport() {
string endpoint_name;
python_op_gen_internal::GenerateLowerCaseOpName(endpoint.name(),
&endpoint_name);
+ if (endpoint.deprecated()) {
+ deprecated_endpoints.push_back(endpoint_name);
+ }
strings::StrAppend(&result_, "'", endpoint_name, "'");
}
strings::StrAppend(&result_, ")\n");
+
+ // If all endpoints are deprecated, add @deprecated decorator.
+ if (!api_def_.deprecation_message().empty()) {
+ const string instructions = api_def_.deprecation_message();
+ strings::StrAppend(&result_, "@deprecated(None, '", instructions, "')\n");
+ }
+ // Add @deprecated_endpoints decorator.
+ if (!deprecated_endpoints.empty()) {
+ strings::StrAppend(&result_, "@deprecated_endpoints(");
+ bool first_endpoint = true;
+ for (auto& endpoint_name : deprecated_endpoints) {
+ if (first_endpoint) {
+ first_endpoint = false;
+ } else {
+ strings::StrAppend(&result_, ", ");
+ }
+ strings::StrAppend(&result_, "'", endpoint_name, "'");
+ }
+ strings::StrAppend(&result_, ")\n");
+ }
}
void GenPythonOp::AddDefLine(const string& function_name,
diff --git a/tensorflow/python/framework/subscribe.py b/tensorflow/python/framework/subscribe.py
index 7797d991da..cee7398974 100644
--- a/tensorflow/python/framework/subscribe.py
+++ b/tensorflow/python/framework/subscribe.py
@@ -47,7 +47,7 @@ def _recursive_apply(tensors, apply_fn):
tensors_type = type(tensors)
if tensors_type is ops.Tensor:
return apply_fn(tensors)
- elif tensors_type is variables.Variable:
+ elif isinstance(tensors, variables.Variable):
return apply_fn(tensors.value())
elif isinstance(tensors, (list, tuple)):
tensors = [_recursive_apply(t, apply_fn) for t in tensors]
diff --git a/tensorflow/python/framework/tensor_util_test.py b/tensorflow/python/framework/tensor_util_test.py
index d6edc13643..395cf43b3f 100644
--- a/tensorflow/python/framework/tensor_util_test.py
+++ b/tensorflow/python/framework/tensor_util_test.py
@@ -50,13 +50,13 @@ class TensorUtilTest(test.TestCase):
def testFloatN(self):
t = tensor_util.make_tensor_proto([10.0, 20.0, 30.0])
if sys.byteorder == "big":
- self.assertProtoEquals("""
+ self.assertProtoEquals(r"""
dtype: DT_FLOAT
tensor_shape { dim { size: 3 } }
tensor_content: "A \000\000A\240\000\000A\360\000\000"
""", t)
else:
- self.assertProtoEquals("""
+ self.assertProtoEquals(r"""
dtype: DT_FLOAT
tensor_shape { dim { size: 3 } }
tensor_content: "\000\000 A\000\000\240A\000\000\360A"
@@ -68,13 +68,13 @@ class TensorUtilTest(test.TestCase):
def testFloatTyped(self):
t = tensor_util.make_tensor_proto([10.0, 20.0, 30.0], dtype=dtypes.float32)
if sys.byteorder == "big":
- self.assertProtoEquals("""
+ self.assertProtoEquals(r"""
dtype: DT_FLOAT
tensor_shape { dim { size: 3 } }
tensor_content: "A \000\000A\240\000\000A\360\000\000"
""", t)
else:
- self.assertProtoEquals("""
+ self.assertProtoEquals(r"""
dtype: DT_FLOAT
tensor_shape { dim { size: 3 } }
tensor_content: "\000\000 A\000\000\240A\000\000\360A"
@@ -86,13 +86,13 @@ class TensorUtilTest(test.TestCase):
def testFloatTypeCoerce(self):
t = tensor_util.make_tensor_proto([10, 20, 30], dtype=dtypes.float32)
if sys.byteorder == "big":
- self.assertProtoEquals("""
+ self.assertProtoEquals(r"""
dtype: DT_FLOAT
tensor_shape { dim { size: 3 } }
tensor_content: "A \000\000A\240\000\000A\360\000\000"
""", t)
else:
- self.assertProtoEquals("""
+ self.assertProtoEquals(r"""
dtype: DT_FLOAT
tensor_shape { dim { size: 3 } }
tensor_content: "\000\000 A\000\000\240A\000\000\360A"
@@ -105,13 +105,13 @@ class TensorUtilTest(test.TestCase):
arr = np.asarray([10, 20, 30], dtype="int")
t = tensor_util.make_tensor_proto(arr, dtype=dtypes.float32)
if sys.byteorder == "big":
- self.assertProtoEquals("""
+ self.assertProtoEquals(r"""
dtype: DT_FLOAT
tensor_shape { dim { size: 3 } }
tensor_content: "A \000\000A\240\000\000A\360\000\000"
""", t)
else:
- self.assertProtoEquals("""
+ self.assertProtoEquals(r"""
dtype: DT_FLOAT
tensor_shape { dim { size: 3 } }
tensor_content: "\000\000 A\000\000\240A\000\000\360A"
@@ -123,13 +123,13 @@ class TensorUtilTest(test.TestCase):
def testFloatSizes(self):
t = tensor_util.make_tensor_proto([10.0, 20.0, 30.0], shape=[1, 3])
if sys.byteorder == "big":
- self.assertProtoEquals("""
+ self.assertProtoEquals(r"""
dtype: DT_FLOAT
tensor_shape { dim { size: 1 } dim { size: 3 } }
tensor_content: "A \000\000A\240\000\000A\360\000\000"
""", t)
else:
- self.assertProtoEquals("""
+ self.assertProtoEquals(r"""
dtype: DT_FLOAT
tensor_shape { dim { size: 1 } dim { size: 3 } }
tensor_content: "\000\000 A\000\000\240A\000\000\360A"
@@ -141,13 +141,13 @@ class TensorUtilTest(test.TestCase):
def testFloatSizes2(self):
t = tensor_util.make_tensor_proto([10.0, 20.0, 30.0], shape=[3, 1])
if sys.byteorder == "big":
- self.assertProtoEquals("""
+ self.assertProtoEquals(r"""
dtype: DT_FLOAT
tensor_shape { dim { size: 3 } dim { size: 1 } }
tensor_content: "A \000\000A\240\000\000A\360\000\000"
""", t)
else:
- self.assertProtoEquals("""
+ self.assertProtoEquals(r"""
dtype: DT_FLOAT
tensor_shape { dim { size: 3 } dim { size: 1 } }
tensor_content: "\000\000 A\000\000\240A\000\000\360A"
@@ -169,13 +169,13 @@ class TensorUtilTest(test.TestCase):
t = tensor_util.make_tensor_proto(
np.array([[10.0, 20.0, 30.0]], dtype=np.float64))
if sys.byteorder == "big":
- self.assertProtoEquals("""
+ self.assertProtoEquals(r"""
dtype: DT_DOUBLE
tensor_shape { dim { size: 1 } dim { size: 3 } }
tensor_content: "@$\000\000\000\000\000\000@4\000\000\000\000\000\000@>\000\000\000\000\000\000"
""", t)
else:
- self.assertProtoEquals("""
+ self.assertProtoEquals(r"""
dtype: DT_DOUBLE
tensor_shape { dim { size: 1 } dim { size: 3 } }
tensor_content: "\000\000\000\000\000\000$@\000\000\000\000\000\0004@\000\000\000\000\000\000>@"
@@ -206,13 +206,13 @@ class TensorUtilTest(test.TestCase):
self.assertEquals(np.float32, a.dtype)
self.assertAllClose(np.array([5.0, 20.0, 30.0], dtype=np.float32), a)
if sys.byteorder == "big":
- self.assertProtoEquals("""
+ self.assertProtoEquals(r"""
dtype: DT_FLOAT
tensor_shape { dim { size: 3 } }
tensor_content: "A \000\000A\240\000\000A\360\000\000"
""", t)
else:
- self.assertProtoEquals("""
+ self.assertProtoEquals(r"""
dtype: DT_FLOAT
tensor_shape { dim { size: 3 } }
tensor_content: "\000\000 A\000\000\240A\000\000\360A"
@@ -299,16 +299,16 @@ class TensorUtilTest(test.TestCase):
def testIntNDefaultType(self):
t = tensor_util.make_tensor_proto([10, 20, 30, 40], shape=[2, 2])
if sys.byteorder == "big":
- self.assertProtoEquals("""
+ self.assertProtoEquals(r"""
dtype: DT_INT32
tensor_shape { dim { size: 2 } dim { size: 2 } }
- tensor_content: "\000\000\000\\n\000\000\000\024\000\000\000\036\000\000\000("
+ tensor_content: "\000\000\000\n\000\000\000\024\000\000\000\036\000\000\000("
""", t)
else:
- self.assertProtoEquals("""
+ self.assertProtoEquals(r"""
dtype: DT_INT32
tensor_shape { dim { size: 2 } dim { size: 2 } }
- tensor_content: "\\n\000\000\000\024\000\000\000\036\000\000\000(\000\000\000"
+ tensor_content: "\n\000\000\000\024\000\000\000\036\000\000\000(\000\000\000"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.int32, a.dtype)
@@ -380,16 +380,16 @@ class TensorUtilTest(test.TestCase):
t = tensor_util.make_tensor_proto(
[10, 20, 30], shape=[1, 3], dtype=dtypes.int64)
if sys.byteorder == "big":
- self.assertProtoEquals("""
+ self.assertProtoEquals(r"""
dtype: DT_INT64
tensor_shape { dim { size: 1 } dim { size: 3 } }
- tensor_content: "\000\000\000\000\000\000\000\\n\000\000\000\000\000\000\000\024\000\000\000\000\000\000\000\036"
+ tensor_content: "\000\000\000\000\000\000\000\n\000\000\000\000\000\000\000\024\000\000\000\000\000\000\000\036"
""", t)
else:
- self.assertProtoEquals("""
+ self.assertProtoEquals(r"""
dtype: DT_INT64
tensor_shape { dim { size: 1 } dim { size: 3 } }
- tensor_content: "\\n\000\000\000\000\000\000\000\024\000\000\000\000\000\000\000\036\000\000\000\000\000\000\000"
+ tensor_content: "\n\000\000\000\000\000\000\000\024\000\000\000\000\000\000\000\036\000\000\000\000\000\000\000"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.int64, a.dtype)
@@ -398,16 +398,16 @@ class TensorUtilTest(test.TestCase):
def testLongNpArray(self):
t = tensor_util.make_tensor_proto(np.array([10, 20, 30]))
if sys.byteorder == "big":
- self.assertProtoEquals("""
+ self.assertProtoEquals(r"""
dtype: DT_INT64
tensor_shape { dim { size: 3 } }
- tensor_content: "\000\000\000\000\000\000\000\\n\000\000\000\000\000\000\000\024\000\000\000\000\000\000\000\036"
+ tensor_content: "\000\000\000\000\000\000\000\n\000\000\000\000\000\000\000\024\000\000\000\000\000\000\000\036"
""", t)
else:
- self.assertProtoEquals("""
+ self.assertProtoEquals(r"""
dtype: DT_INT64
tensor_shape { dim { size: 3 } }
- tensor_content: "\\n\000\000\000\000\000\000\000\024\000\000\000\000\000\000\000\036\000\000\000\000\000\000\000"
+ tensor_content: "\n\000\000\000\000\000\000\000\024\000\000\000\000\000\000\000\036\000\000\000\000\000\000\000"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.int64, a.dtype)
@@ -419,13 +419,13 @@ class TensorUtilTest(test.TestCase):
t = tensor_util.make_tensor_proto(data, dtype=dtypes.qint32)
if sys.byteorder == "big":
- self.assertProtoEquals("""
+ self.assertProtoEquals(r"""
dtype: DT_QINT32
tensor_shape { dim { size: 3 } }
tensor_content: "\000\000\000\025\000\000\000\026\000\000\000\027"
""", t)
else:
- self.assertProtoEquals("""
+ self.assertProtoEquals(r"""
dtype: DT_QINT32
tensor_shape { dim { size: 3 } }
tensor_content: "\025\000\000\000\026\000\000\000\027\000\000\000"
@@ -435,7 +435,7 @@ class TensorUtilTest(test.TestCase):
self.assertAllEqual(np.array(data, dtype=a.dtype), a)
t = tensor_util.make_tensor_proto(data, dtype=dtypes.quint8)
- self.assertProtoEquals("""
+ self.assertProtoEquals(r"""
dtype: DT_QUINT8
tensor_shape { dim { size: 3 } }
tensor_content: "\025\026\027"
@@ -445,7 +445,7 @@ class TensorUtilTest(test.TestCase):
self.assertAllEqual(np.array(data, dtype=a.dtype), a)
t = tensor_util.make_tensor_proto(data, dtype=dtypes.qint8)
- self.assertProtoEquals("""
+ self.assertProtoEquals(r"""
dtype: DT_QINT8
tensor_shape { dim { size: 3 } }
tensor_content: "\025\026\027"
@@ -456,13 +456,13 @@ class TensorUtilTest(test.TestCase):
t = tensor_util.make_tensor_proto(data, dtype=dtypes.quint16)
if sys.byteorder == "big":
- self.assertProtoEquals("""
+ self.assertProtoEquals(r"""
dtype: DT_QUINT16
tensor_shape { dim { size: 3 } }
tensor_content: "\000\025\000\026\000\027"
""", t)
else:
- self.assertProtoEquals("""
+ self.assertProtoEquals(r"""
dtype: DT_QUINT16
tensor_shape { dim { size: 3 } }
tensor_content: "\025\000\026\000\027\000"
@@ -473,13 +473,13 @@ class TensorUtilTest(test.TestCase):
t = tensor_util.make_tensor_proto(data, dtype=dtypes.qint16)
if sys.byteorder == "big":
- self.assertProtoEquals("""
+ self.assertProtoEquals(r"""
dtype: DT_QINT16
tensor_shape { dim { size: 3 } }
tensor_content: "\000\025\000\026\000\027"
""", t)
else:
- self.assertProtoEquals("""
+ self.assertProtoEquals(r"""
dtype: DT_QINT16
tensor_shape { dim { size: 3 } }
tensor_content: "\025\000\026\000\027\000"
diff --git a/tensorflow/python/framework/test_util.py b/tensorflow/python/framework/test_util.py
index 3988238609..2bc2a189fa 100644
--- a/tensorflow/python/framework/test_util.py
+++ b/tensorflow/python/framework/test_util.py
@@ -27,6 +27,7 @@ import random
import re
import tempfile
import threading
+import unittest
import numpy as np
import six
@@ -414,8 +415,28 @@ def assert_no_new_pyobjects_executing_eagerly(f):
f(self, **kwargs)
gc.collect()
previous_count = len(gc.get_objects())
+ collection_sizes_before = {
+ collection: len(ops.get_collection(collection))
+ for collection in ops.get_default_graph().collections}
for _ in range(3):
f(self, **kwargs)
+ # Note that gc.get_objects misses anything that isn't subject to garbage
+ # collection (C types). Collections are a common source of leaks, so we
+ # test for collection sizes explicitly.
+ for collection_key in ops.get_default_graph().collections:
+ collection = ops.get_collection(collection_key)
+ size_before = collection_sizes_before.get(collection_key, 0)
+ if len(collection) > size_before:
+ raise AssertionError(
+ ("Collection %s increased in size from "
+ "%d to %d (current items %s).")
+ % (collection_key, size_before, len(collection), collection))
+ # Make sure our collection checks don't show up as leaked memory by
+ # removing references to temporary variables.
+ del collection
+ del collection_key
+ del size_before
+ del collection_sizes_before
gc.collect()
# There should be no new Python objects hanging around.
new_count = len(gc.get_objects())
@@ -625,16 +646,12 @@ def run_in_graph_and_eager_modes(func=None,
"Did you mean to use `run_all_tests_in_graph_and_eager_modes`?")
def decorated(self, **kwargs):
- with context.graph_mode():
- with self.test_session(use_gpu=use_gpu, config=config):
- f(self, **kwargs)
-
- if reset_test:
- # This decorator runs the wrapped test twice.
- # Reset the test environment between runs.
- self.tearDown()
- self._tempdir = None
- self.setUp()
+ try:
+ with context.graph_mode():
+ with self.test_session(use_gpu=use_gpu, config=config):
+ f(self, **kwargs)
+ except unittest.case.SkipTest:
+ pass
def run_eagerly(self, **kwargs):
if not use_gpu:
@@ -649,6 +666,13 @@ def run_in_graph_and_eager_modes(func=None,
assert_no_garbage_created(run_eagerly))
with context.eager_mode():
+ if reset_test:
+ # This decorator runs the wrapped test twice.
+ # Reset the test environment between runs.
+ self.tearDown()
+ self._tempdir = None
+ self.setUp()
+
run_eagerly(self, **kwargs)
return decorated
diff --git a/tensorflow/python/framework/test_util_test.py b/tensorflow/python/framework/test_util_test.py
index 5498376181..122c14c847 100644
--- a/tensorflow/python/framework/test_util_test.py
+++ b/tensorflow/python/framework/test_util_test.py
@@ -616,7 +616,7 @@ class TestUtilTest(test_util.TensorFlowTestCase):
self.assertIs(test_util.get_node_def_from_graph("foo", graph_def), node_foo)
self.assertIsNone(test_util.get_node_def_from_graph("bar", graph_def))
- def testRunInGraphAndEagerModesOnTestCase(self):
+ def test_run_in_eager_and_graph_modes_test_class(self):
msg = "`run_test_in_graph_and_eager_modes` only supports test methods.*"
with self.assertRaisesRegexp(ValueError, msg):
@test_util.run_in_graph_and_eager_modes()
@@ -624,6 +624,47 @@ class TestUtilTest(test_util.TensorFlowTestCase):
pass
del Foo # Make pylint unused happy.
+ def test_run_in_eager_and_graph_modes_skip_graph_runs_eager(self):
+ modes = []
+ def _test(self):
+ if not context.executing_eagerly():
+ self.skipTest("Skipping in graph mode")
+ modes.append("eager" if context.executing_eagerly() else "graph")
+ test_util.run_in_graph_and_eager_modes(_test)(self)
+ self.assertEqual(modes, ["eager"])
+
+ def test_run_in_eager_and_graph_modes_skip_eager_runs_graph(self):
+ modes = []
+ def _test(self):
+ if context.executing_eagerly():
+ self.skipTest("Skipping in eager mode")
+ modes.append("eager" if context.executing_eagerly() else "graph")
+ test_util.run_in_graph_and_eager_modes(_test)(self)
+ self.assertEqual(modes, ["graph"])
+
+ def test_run_in_graph_and_eager_modes_setup_in_same_mode(self):
+ modes = []
+ mode_name = lambda: "eager" if context.executing_eagerly() else "graph"
+
+ class ExampleTest(test_util.TensorFlowTestCase):
+
+ def runTest(self):
+ pass
+
+ def setUp(self):
+ modes.append("setup_" + mode_name())
+
+ @test_util.run_in_graph_and_eager_modes
+ def testBody(self):
+ modes.append("run_" + mode_name())
+
+ e = ExampleTest()
+ e.setUp()
+ e.testBody()
+
+ self.assertEqual(modes[0:2], ["setup_graph", "run_graph"])
+ self.assertEqual(modes[2:], ["setup_eager", "run_eager"])
+
class GarbageCollectionTest(test_util.TensorFlowTestCase):
diff --git a/tensorflow/python/framework/traceable_stack.py b/tensorflow/python/framework/traceable_stack.py
new file mode 100644
index 0000000000..1b7c6bd7c5
--- /dev/null
+++ b/tensorflow/python/framework/traceable_stack.py
@@ -0,0 +1,135 @@
+# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""A simple stack that associates filename and line numbers with each object."""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+from tensorflow.python.util import tf_stack
+
+
+class TraceableObject(object):
+ """Wrap an object together with its the code definition location."""
+
+ # Return codes for the set_filename_and_line_from_caller() method.
+ SUCCESS, HEURISTIC_USED, FAILURE = (0, 1, 2)
+
+ def __init__(self, obj, name=None, filename=None, lineno=None):
+ self.obj = obj
+ self.name = name
+ self.filename = filename
+ self.lineno = lineno
+
+ def set_filename_and_line_from_caller(self, offset=0):
+ """Set filename and line using the caller's stack frame.
+
+ If the requested stack information is not available, a heuristic may
+ be applied and self.HEURISTIC USED will be returned. If the heuristic
+ fails then no change will be made to the filename and lineno members
+ (None by default) and self.FAILURE will be returned.
+
+ Args:
+ offset: Integer. If 0, the caller's stack frame is used. If 1,
+ the caller's caller's stack frame is used. Larger values are
+ permissible but if out-of-range (larger than the number of stack
+ frames available) the outermost stack frame will be used.
+
+ Returns:
+ TraceableObject.SUCCESS if appropriate stack information was found,
+ TraceableObject.HEURISTIC_USED if the offset was larger than the stack,
+ and TraceableObject.FAILURE if the stack was empty.
+ """
+ # Offset is defined in "Args" as relative to the caller. We are one frame
+ # beyond the caller.
+ local_offset = offset + 1
+
+ frame_records = tf_stack.extract_stack()
+ if not frame_records:
+ return self.FAILURE
+ if len(frame_records) >= local_offset:
+ # Negative indexing is one-indexed instead of zero-indexed.
+ negative_offset = -(local_offset + 1)
+ self.filename, self.lineno = frame_records[negative_offset][:2]
+ return self.SUCCESS
+ else:
+ # If the offset is too large then we use the largest offset possible,
+ # meaning we use the outermost stack frame at index 0.
+ self.filename, self.lineno = frame_records[0][:2]
+ return self.HEURISTIC_USED
+
+ def copy_metadata(self):
+ """Return a TraceableObject like this one, but without the object."""
+ return self.__class__(None, name=self.name, filename=self.filename,
+ lineno=self.lineno)
+
+
+class TraceableStack(object):
+ """A stack of TraceableObjects."""
+
+ def __init__(self, existing_stack=None):
+ """Constructor.
+
+ Args:
+ existing_stack: [TraceableObject, ...] If provided, this object will
+ set its new stack to a SHALLOW COPY of existing_stack.
+ """
+ self._stack = existing_stack[:] if existing_stack else []
+
+ def push_obj(self, obj, name=None, offset=0):
+ """Add object to the stack and record its filename and line information.
+
+ Args:
+ obj: An object to store on the stack.
+ name: A name for the object, used for dict keys in get_item_metadata_dict.
+ offset: Integer. If 0, the caller's stack frame is used. If 1,
+ the caller's caller's stack frame is used.
+
+ Returns:
+ TraceableObject.SUCCESS if appropriate stack information was found,
+ TraceableObject.HEURISTIC_USED if the stack was smaller than expected,
+ and TraceableObject.FAILURE if the stack was empty.
+ """
+ traceable_obj = TraceableObject(obj, name=name)
+ self._stack.append(traceable_obj)
+ # Offset is defined in "Args" as relative to the caller. We are 1 frame
+ # beyond the caller and need to compensate.
+ return traceable_obj.set_filename_and_line_from_caller(offset + 1)
+
+ def pop_obj(self):
+ """Remove last-inserted object and return it, without filename/line info."""
+ return self._stack.pop().obj
+
+ def peek_objs(self):
+ """Return list of stored objects ordered newest to oldest."""
+ return [t_obj.obj for t_obj in reversed(self._stack)]
+
+ def peek_traceable_objs(self):
+ """Return list of stored TraceableObjects ordered newest to oldest."""
+ return list(reversed(self._stack))
+
+ def __len__(self):
+ """Return number of items on the stack, and used for truth-value testing."""
+ return len(self._stack)
+
+ def copy(self):
+ """Return a copy of self referencing the same objects but in a new list.
+
+ This method is implemented to support thread-local stacks.
+
+ Returns:
+ TraceableStack with a new list that holds existing objects.
+ """
+ return TraceableStack(self._stack)
diff --git a/tensorflow/python/framework/traceable_stack_test.py b/tensorflow/python/framework/traceable_stack_test.py
new file mode 100644
index 0000000000..3e7876f631
--- /dev/null
+++ b/tensorflow/python/framework/traceable_stack_test.py
@@ -0,0 +1,133 @@
+# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""Tests for tensorflow.python.framework.traceable_stack."""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+from tensorflow.python.framework import test_util
+from tensorflow.python.framework import traceable_stack
+from tensorflow.python.platform import googletest
+from tensorflow.python.util import tf_inspect as inspect
+
+_LOCAL_OBJECT = lambda x: x
+_THIS_FILENAME = inspect.getsourcefile(_LOCAL_OBJECT)
+
+
+class TraceableObjectTest(test_util.TensorFlowTestCase):
+
+ def testSetFilenameAndLineFromCallerUsesCallersStack(self):
+ t_obj = traceable_stack.TraceableObject(17)
+
+ # Do not separate placeholder from the set_filename_and_line_from_caller()
+ # call one line below it as it is used to calculate the latter's line
+ # number.
+ placeholder = lambda x: x
+ result = t_obj.set_filename_and_line_from_caller()
+
+ expected_lineno = inspect.getsourcelines(placeholder)[1] + 1
+ self.assertEqual(expected_lineno, t_obj.lineno)
+ self.assertEqual(_THIS_FILENAME, t_obj.filename)
+ self.assertEqual(t_obj.SUCCESS, result)
+
+ def testSetFilenameAndLineFromCallerRespectsOffset(self):
+
+ def call_set_filename_and_line_from_caller(t_obj):
+ # We expect to retrieve the line number from _our_ caller.
+ return t_obj.set_filename_and_line_from_caller(offset=1)
+
+ t_obj = traceable_stack.TraceableObject(None)
+ # Do not separate placeholder from the
+ # call_set_filename_and_line_from_caller() call one line below it as it is
+ # used to calculate the latter's line number.
+ placeholder = lambda x: x
+ result = call_set_filename_and_line_from_caller(t_obj)
+
+ expected_lineno = inspect.getsourcelines(placeholder)[1] + 1
+ self.assertEqual(expected_lineno, t_obj.lineno)
+ self.assertEqual(t_obj.SUCCESS, result)
+
+ def testSetFilenameAndLineFromCallerHandlesRidiculousOffset(self):
+ t_obj = traceable_stack.TraceableObject('The quick brown fox.')
+ # This line shouldn't die.
+ result = t_obj.set_filename_and_line_from_caller(offset=300)
+
+ # We expect a heuristic to be used because we are not currently 300 frames
+ # down on the stack. The filename and lineno of the outermost frame are not
+ # predictable -- in some environments the filename is this test file, but in
+ # other environments it is not (e.g. due to a test runner calling this
+ # file). Therefore we only test that the called function knows it applied a
+ # heuristic for the ridiculous stack offset.
+ self.assertEqual(t_obj.HEURISTIC_USED, result)
+
+
+class TraceableStackTest(test_util.TensorFlowTestCase):
+
+ def testPushPeekPopObj(self):
+ t_stack = traceable_stack.TraceableStack()
+ t_stack.push_obj(42.0)
+ t_stack.push_obj('hope')
+
+ expected_lifo_peek = ['hope', 42.0]
+ self.assertEqual(expected_lifo_peek, t_stack.peek_objs())
+
+ self.assertEqual('hope', t_stack.pop_obj())
+ self.assertEqual(42.0, t_stack.pop_obj())
+
+ def testPushPopPreserveLifoOrdering(self):
+ t_stack = traceable_stack.TraceableStack()
+ t_stack.push_obj(0)
+ t_stack.push_obj(1)
+ t_stack.push_obj(2)
+ t_stack.push_obj(3)
+
+ obj_3 = t_stack.pop_obj()
+ obj_2 = t_stack.pop_obj()
+ obj_1 = t_stack.pop_obj()
+ obj_0 = t_stack.pop_obj()
+
+ self.assertEqual(3, obj_3)
+ self.assertEqual(2, obj_2)
+ self.assertEqual(1, obj_1)
+ self.assertEqual(0, obj_0)
+
+ def testPushObjSetsFilenameAndLineInfoForCaller(self):
+ t_stack = traceable_stack.TraceableStack()
+
+ # We expect that the line number recorded for the 1-object will come from
+ # the call to t_stack.push_obj(1). Do not separate the next two lines!
+ placeholder_1 = lambda x: x
+ t_stack.push_obj(1)
+
+ # We expect that the line number recorded for the 2-object will come from
+ # the call to call_push_obj() and _not_ the call to t_stack.push_obj().
+ def call_push_obj(obj):
+ t_stack.push_obj(obj, offset=1)
+
+ # Do not separate the next two lines!
+ placeholder_2 = lambda x: x
+ call_push_obj(2)
+
+ expected_lineno_1 = inspect.getsourcelines(placeholder_1)[1] + 1
+ expected_lineno_2 = inspect.getsourcelines(placeholder_2)[1] + 1
+
+ t_obj_2, t_obj_1 = t_stack.peek_traceable_objs()
+ self.assertEqual(expected_lineno_2, t_obj_2.lineno)
+ self.assertEqual(expected_lineno_1, t_obj_1.lineno)
+
+
+if __name__ == '__main__':
+ googletest.main()
diff --git a/tensorflow/python/keras/BUILD b/tensorflow/python/keras/BUILD
index 8b6b28bc77..01f1184766 100755
--- a/tensorflow/python/keras/BUILD
+++ b/tensorflow/python/keras/BUILD
@@ -451,6 +451,7 @@ cuda_py_test(
"//tensorflow/python:client_testlib",
],
shard_count = 2,
+ tags = ["no_windows_gpu"],
)
py_test(
@@ -720,6 +721,7 @@ py_test(
size = "medium",
srcs = ["preprocessing/image_test.py"],
srcs_version = "PY2AND3",
+ tags = ["nomsan"], # TODO(b/110990716) reenable
deps = [
":keras",
"//tensorflow/python:client_testlib",
@@ -791,6 +793,19 @@ py_test(
)
py_test(
+ name = "training_utils_test",
+ size = "medium",
+ srcs = ["engine/training_utils_test.py"],
+ srcs_version = "PY2AND3",
+ tags = ["notsan"],
+ deps = [
+ ":keras",
+ "//tensorflow/python:client_testlib",
+ "//third_party/py/numpy",
+ ],
+)
+
+py_test(
name = "model_subclassing_test",
size = "medium",
srcs = ["model_subclassing_test.py"],
diff --git a/tensorflow/python/keras/applications/mobilenet.py b/tensorflow/python/keras/applications/mobilenet.py
index e56c695a28..7285e03963 100644
--- a/tensorflow/python/keras/applications/mobilenet.py
+++ b/tensorflow/python/keras/applications/mobilenet.py
@@ -72,13 +72,9 @@ from __future__ import print_function
import os
from tensorflow.python.keras import backend as K
-from tensorflow.python.keras import constraints
-from tensorflow.python.keras import initializers
-from tensorflow.python.keras import regularizers
from tensorflow.python.keras.applications import imagenet_utils
from tensorflow.python.keras.applications.imagenet_utils import _obtain_input_shape
from tensorflow.python.keras.applications.imagenet_utils import decode_predictions
-from tensorflow.python.keras.engine.base_layer import InputSpec
from tensorflow.python.keras.layers import Activation
from tensorflow.python.keras.layers import BatchNormalization
from tensorflow.python.keras.layers import Conv2D
@@ -87,10 +83,10 @@ from tensorflow.python.keras.layers import Dropout
from tensorflow.python.keras.layers import GlobalAveragePooling2D
from tensorflow.python.keras.layers import GlobalMaxPooling2D
from tensorflow.python.keras.layers import Input
+from tensorflow.python.keras.layers import ReLU
from tensorflow.python.keras.layers import Reshape
from tensorflow.python.keras.layers import ZeroPadding2D
from tensorflow.python.keras.models import Model
-from tensorflow.python.keras.utils import conv_utils
from tensorflow.python.keras.utils import layer_utils
from tensorflow.python.keras.utils.data_utils import get_file
from tensorflow.python.platform import tf_logging as logging
@@ -100,10 +96,6 @@ from tensorflow.python.util.tf_export import tf_export
BASE_WEIGHT_PATH = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.6/'
-def relu6(x):
- return K.relu(x, max_value=6)
-
-
@tf_export('keras.applications.mobilenet.preprocess_input')
def preprocess_input(x):
"""Preprocesses a numpy array encoding a batch of images.
@@ -130,12 +122,6 @@ def MobileNet(input_shape=None,
classes=1000):
"""Instantiates the MobileNet architecture.
- To load a MobileNet model via `load_model`, import the custom
- objects `relu6` and pass them to the `custom_objects` parameter.
- E.g.
- model = load_model('mobilenet.h5', custom_objects={
- 'relu6': mobilenet.relu6})
-
Arguments:
input_shape: optional shape tuple, only to be specified
if `include_top` is False (otherwise the input shape
@@ -412,7 +398,7 @@ def _conv_block(inputs, filters, alpha, kernel=(3, 3), strides=(1, 1)):
strides=strides,
name='conv1')(x)
x = BatchNormalization(axis=channel_axis, name='conv1_bn')(x)
- return Activation(relu6, name='conv1_relu')(x)
+ return ReLU(6, name='conv1_relu')(x)
def _depthwise_conv_block(inputs,
@@ -479,7 +465,7 @@ def _depthwise_conv_block(inputs,
use_bias=False,
name='conv_dw_%d' % block_id)(x)
x = BatchNormalization(axis=channel_axis, name='conv_dw_%d_bn' % block_id)(x)
- x = Activation(relu6, name='conv_dw_%d_relu' % block_id)(x)
+ x = ReLU(6, name='conv_dw_%d_relu' % block_id)(x)
x = Conv2D(
pointwise_conv_filters, (1, 1),
@@ -489,4 +475,4 @@ def _depthwise_conv_block(inputs,
name='conv_pw_%d' % block_id)(
x)
x = BatchNormalization(axis=channel_axis, name='conv_pw_%d_bn' % block_id)(x)
- return Activation(relu6, name='conv_pw_%d_relu' % block_id)(x)
+ return ReLU(6, name='conv_pw_%d_relu' % block_id)(x)
diff --git a/tensorflow/python/keras/backend.py b/tensorflow/python/keras/backend.py
index fed779650e..cb3423598b 100644
--- a/tensorflow/python/keras/backend.py
+++ b/tensorflow/python/keras/backend.py
@@ -963,13 +963,14 @@ def zeros(shape, dtype=None, name=None):
[ 0., 0., 0., 0.]], dtype=float32)
```
"""
- if dtype is None:
- dtype = floatx()
- tf_dtype = dtypes_module.as_dtype(dtype)
- v = array_ops.zeros(shape=shape, dtype=tf_dtype, name=name)
- if py_all(v.get_shape().as_list()):
- return variable(v, dtype=dtype, name=name)
- return v
+ with ops.init_scope():
+ if dtype is None:
+ dtype = floatx()
+ tf_dtype = dtypes_module.as_dtype(dtype)
+ v = array_ops.zeros(shape=shape, dtype=tf_dtype, name=name)
+ if py_all(v.get_shape().as_list()):
+ return variable(v, dtype=dtype, name=name)
+ return v
@tf_export('keras.backend.ones')
@@ -996,13 +997,14 @@ def ones(shape, dtype=None, name=None):
[ 1., 1., 1., 1.]], dtype=float32)
```
"""
- if dtype is None:
- dtype = floatx()
- tf_dtype = dtypes_module.as_dtype(dtype)
- v = array_ops.ones(shape=shape, dtype=tf_dtype, name=name)
- if py_all(v.get_shape().as_list()):
- return variable(v, dtype=dtype, name=name)
- return v
+ with ops.init_scope():
+ if dtype is None:
+ dtype = floatx()
+ tf_dtype = dtypes_module.as_dtype(dtype)
+ v = array_ops.ones(shape=shape, dtype=tf_dtype, name=name)
+ if py_all(v.get_shape().as_list()):
+ return variable(v, dtype=dtype, name=name)
+ return v
@tf_export('keras.backend.eye')
@@ -2795,10 +2797,15 @@ class Function(object):
if not isinstance(self.fetches, list):
self.fetches = [self.fetches]
# The main use case of `fetches` being passed to a model is the ability
- # to run custom updates (since the outputs of fetches are never returned).
+ # to run custom updates
# This requires us to wrap fetches in `identity` ops.
self.fetches = [array_ops.identity(x) for x in self.fetches]
self.session_kwargs = session_kwargs
+ # This mapping keeps track of the function that should receive the
+ # output from a fetch in `fetches`: { fetch: function(fetch_output) }
+ # A Callback can use this to register a function with access to the
+ # output values for a fetch it added.
+ self.fetch_callbacks = dict()
if session_kwargs:
raise ValueError('Some keys in session_kwargs are not supported at this '
@@ -2808,6 +2815,7 @@ class Function(object):
self._feed_arrays = None
self._feed_symbols = None
self._symbol_vals = None
+ self._fetches = None
self._session = None
def _make_callable(self, feed_arrays, feed_symbols, symbol_vals, session):
@@ -2853,8 +2861,14 @@ class Function(object):
self._feed_arrays = feed_arrays
self._feed_symbols = feed_symbols
self._symbol_vals = symbol_vals
+ self._fetches = list(self.fetches)
self._session = session
+ def _call_fetch_callbacks(self, fetches_output):
+ for fetch, output in zip(self._fetches, fetches_output):
+ if fetch in self.fetch_callbacks:
+ self.fetch_callbacks[fetch](output)
+
def __call__(self, inputs):
if not isinstance(inputs, (list, tuple)):
raise TypeError('`inputs` should be a list or tuple.')
@@ -2891,14 +2905,14 @@ class Function(object):
np.asarray(self.feed_dict[key], dtype=key.dtype.base_dtype.name))
# Refresh callable if anything has changed.
- if (self._callable_fn is None or
- feed_arrays != self._feed_arrays or
+ if (self._callable_fn is None or feed_arrays != self._feed_arrays or
symbol_vals != self._symbol_vals or
- feed_symbols != self._feed_symbols or
+ feed_symbols != self._feed_symbols or self.fetches != self._fetches or
session != self._session):
self._make_callable(feed_arrays, feed_symbols, symbol_vals, session)
fetched = self._callable_fn(*array_vals)
+ self._call_fetch_callbacks(fetched[-len(self._fetches):])
return fetched[:len(self.outputs)]
@@ -3161,10 +3175,16 @@ def rnn(step_function,
array_ops.stack(
[1, array_ops.shape(output)[1]]))
output = array_ops.where(tiled_mask_t, output, states[0])
- new_states = [
- array_ops.where(tiled_mask_t, new_states[i], states[i])
- for i in range(len(states))
- ]
+
+ masked_states = []
+ for i in range(len(states)):
+ states_dim = array_ops.shape(new_states[i])[1]
+ stacked_states_dim = array_ops.stack([1, states_dim])
+ tiled_mask = array_ops.tile(mask_t, stacked_states_dim)
+ masked_state = array_ops.where(tiled_mask, new_states[i], states[i])
+ masked_states.append(masked_state)
+ new_states = masked_states
+
output_ta_t = output_ta_t.write(time, output)
return (time + 1, output_ta_t) + tuple(new_states)
else:
diff --git a/tensorflow/python/keras/backend_test.py b/tensorflow/python/keras/backend_test.py
index 2ba6c8ef15..36478ea089 100644
--- a/tensorflow/python/keras/backend_test.py
+++ b/tensorflow/python/keras/backend_test.py
@@ -276,6 +276,36 @@ class BackendUtilsTest(test.TestCase):
self.assertEqual(
keras.backend.get_session().run(fetches=[x, y]), [30., 40.])
+ def test_function_fetch_callbacks(self):
+
+ class CallbackStub(object):
+
+ def __init__(self):
+ self.times_called = 0
+ self.callback_result = 0
+
+ def _fetch_callback(self, result):
+ self.times_called += 1
+ self.callback_result = result
+
+ with self.test_session():
+ callback = CallbackStub()
+ x_placeholder = keras.backend.placeholder(shape=())
+ y_placeholder = keras.backend.placeholder(shape=())
+
+ callback_op = x_placeholder * y_placeholder
+
+ f = keras.backend.function(
+ inputs=[x_placeholder, y_placeholder],
+ outputs=[x_placeholder + y_placeholder])
+ f.fetches.append(callback_op)
+ f.fetch_callbacks[callback_op] = callback._fetch_callback
+
+ _ = f([10., 20.])
+
+ self.assertEqual(callback.times_called, 1)
+ self.assertEqual(callback.callback_result, 200)
+
class BackendVariableTest(test.TestCase):
@@ -1077,7 +1107,7 @@ class BackendNNOpsTest(test.TestCase, parameterized.TestCase):
{'go_backwards': False, 'mask': mask, 'unroll': True},
]
with self.test_session():
- for (i, kwargs) in enumerate(kwargs_list):
+ for i, kwargs in enumerate(kwargs_list):
last_output, outputs, new_states = keras.backend.rnn(rnn_fn, inputs,
initial_states,
**kwargs)
@@ -1124,6 +1154,115 @@ class BackendNNOpsTest(test.TestCase, parameterized.TestCase):
for b_s, b_u_s in zip(state_list[2], state_list[3]):
self.assertAllClose(b_s, b_u_s, atol=1e-04)
+ def test_rnn_additional_states(self):
+ # implement a simple RNN
+ num_samples = 4
+ input_dim = 5
+ output_dim = 3
+ timesteps = 6
+
+ input_val = np.random.random(
+ (num_samples, timesteps, input_dim)).astype(np.float32)
+ init_state_val = np.random.random(
+ (num_samples, output_dim)).astype(np.float32)
+ w_i_val = np.random.random((input_dim, output_dim)).astype(np.float32)
+ w_o_val = np.random.random((output_dim, output_dim)).astype(np.float32)
+ np_mask = np.random.randint(2, size=(num_samples, timesteps))
+
+ def rnn_step_fn():
+ w_i = keras.backend.variable(w_i_val)
+ w_o = keras.backend.variable(w_o_val)
+
+ def step_function(x, states):
+ assert len(states) == 2
+ prev_output = states[0]
+ output = keras.backend.dot(x, w_i) + keras.backend.dot(prev_output, w_o)
+ return output, [output,
+ keras.backend.concatenate([output, output], axis=-1)]
+
+ return step_function
+
+ # test default setup
+ last_output_list = [[], [], [], [], [], []]
+ outputs_list = [[], [], [], [], [], []]
+ state_list = [[], [], [], [], [], []]
+ additional_state_list = [[], [], [], [], [], []]
+
+ rnn_fn = rnn_step_fn()
+ inputs = keras.backend.variable(input_val)
+ initial_states = [keras.backend.variable(init_state_val),
+ np.concatenate([init_state_val, init_state_val], axis=-1)]
+ mask = keras.backend.variable(np_mask)
+
+ kwargs_list = [
+ {'go_backwards': False, 'mask': None},
+ {'go_backwards': False, 'mask': None, 'unroll': True},
+ {'go_backwards': True, 'mask': None},
+ {'go_backwards': True, 'mask': None, 'unroll': True},
+ {'go_backwards': False, 'mask': mask},
+ {'go_backwards': False, 'mask': mask, 'unroll': True},
+ ]
+ with self.test_session():
+ for i, kwargs in enumerate(kwargs_list):
+ last_output, outputs, new_states = keras.backend.rnn(rnn_fn, inputs,
+ initial_states,
+ **kwargs)
+ # check static shape inference
+ self.assertEqual(last_output.get_shape().as_list(),
+ [num_samples, output_dim])
+ self.assertEqual(outputs.get_shape().as_list(),
+ [num_samples, timesteps, output_dim])
+ # for state in new_states:
+ # self.assertEquals(state.get_shape().as_list(),
+ # [num_samples, output_dim])
+ self.assertEqual(new_states[0].get_shape().as_list(),
+ [num_samples, output_dim])
+ self.assertEqual(new_states[1].get_shape().as_list(),
+ [num_samples, 2 * output_dim])
+
+ last_output_list[i].append(keras.backend.eval(last_output))
+ outputs_list[i].append(keras.backend.eval(outputs))
+ self.assertEqual(len(new_states), 2)
+ state_list[i].append(keras.backend.eval(new_states[0]))
+ additional_state_list[i].append(keras.backend.eval(new_states[1]))
+
+ def assert_list_pairwise(z_list, atol=1e-05):
+ for (z1, z2) in zip(z_list[1:], z_list[:-1]):
+ self.assertAllClose(z1, z2, atol=atol)
+
+ assert_list_pairwise(last_output_list[0], atol=1e-04)
+ assert_list_pairwise(outputs_list[0], atol=1e-04)
+ assert_list_pairwise(state_list[0], atol=1e-04)
+ assert_list_pairwise(additional_state_list[0], atol=1e-04)
+ assert_list_pairwise(last_output_list[2], atol=1e-04)
+ assert_list_pairwise(outputs_list[2], atol=1e-04)
+ assert_list_pairwise(state_list[2], atol=1e-04)
+ assert_list_pairwise(additional_state_list[2], atol=1e-04)
+
+ for l, u_l in zip(last_output_list[0], last_output_list[1]):
+ self.assertAllClose(l, u_l, atol=1e-04)
+
+ for o, u_o in zip(outputs_list[0], outputs_list[1]):
+ self.assertAllClose(o, u_o, atol=1e-04)
+
+ for s, u_s in zip(state_list[0], state_list[1]):
+ self.assertAllClose(s, u_s, atol=1e-04)
+
+ for s, u_s in zip(additional_state_list[0], additional_state_list[1]):
+ self.assertAllClose(s, u_s, atol=1e-04)
+
+ for b_l, b_u_l in zip(last_output_list[2], last_output_list[3]):
+ self.assertAllClose(b_l, b_u_l, atol=1e-04)
+
+ for b_o, b_u_o in zip(outputs_list[2], outputs_list[3]):
+ self.assertAllClose(b_o, b_u_o, atol=1e-04)
+
+ for b_s, b_u_s in zip(state_list[2], state_list[3]):
+ self.assertAllClose(b_s, b_u_s, atol=1e-04)
+
+ for s, u_s in zip(additional_state_list[2], additional_state_list[3]):
+ self.assertAllClose(s, u_s, atol=1e-04)
+
def test_normalize_batch_in_training(self):
val = np.random.random((10, 3, 10, 10))
x = keras.backend.variable(val)
diff --git a/tensorflow/python/keras/callbacks.py b/tensorflow/python/keras/callbacks.py
index 9f91368e5b..0857a3279f 100644
--- a/tensorflow/python/keras/callbacks.py
+++ b/tensorflow/python/keras/callbacks.py
@@ -24,17 +24,23 @@ from collections import Iterable
from collections import OrderedDict
import csv
import json
+import math
import os
import time
import numpy as np
import six
+from tensorflow.python.framework import dtypes
from tensorflow.python.keras import backend as K
+from tensorflow.python.keras.engine.training_utils import standardize_input_data
from tensorflow.python.keras.utils.generic_utils import Progbar
from tensorflow.python.ops import array_ops
+from tensorflow.python.ops import state_ops
+from tensorflow.python.ops import variables
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.summary import summary as tf_summary
+from tensorflow.python.training import saver
from tensorflow.python.util.tf_export import tf_export
@@ -496,6 +502,9 @@ class EarlyStopping(Callback):
monitored has stopped increasing; in `auto`
mode, the direction is automatically inferred
from the name of the monitored quantity.
+ baseline: baseline value for the monitored quantity.
+ Training will stop if the model doesn't show improvement over the
+ baseline.
"""
def __init__(self,
@@ -503,13 +512,15 @@ class EarlyStopping(Callback):
min_delta=0,
patience=0,
verbose=0,
- mode='auto'):
+ mode='auto',
+ baseline=None):
super(EarlyStopping, self).__init__()
self.monitor = monitor
self.patience = patience
self.verbose = verbose
- self.min_delta = min_delta
+ self.baseline = baseline
+ self.min_delta = abs(min_delta)
self.wait = 0
self.stopped_epoch = 0
@@ -537,7 +548,10 @@ class EarlyStopping(Callback):
# Allow instances to be re-used
self.wait = 0
self.stopped_epoch = 0
- self.best = np.Inf if self.monitor_op == np.less else -np.Inf
+ if self.baseline is not None:
+ self.best = self.baseline
+ else:
+ self.best = np.Inf if self.monitor_op == np.less else -np.Inf
def on_epoch_end(self, epoch, logs=None):
current = logs.get(self.monitor)
@@ -688,7 +702,9 @@ class TensorBoard(Callback):
write_images: whether to write model weights to visualize as
image in TensorBoard.
embeddings_freq: frequency (in epochs) at which selected embedding
- layers will be saved.
+ layers will be saved. If set to 0, embeddings won't be computed.
+ Data to be visualized in TensorBoard's Embedding tab must be passed
+ as `embeddings_data`.
embeddings_layer_names: a list of names of layers to keep eye on. If
None or empty list all the embedding layer will be watched.
embeddings_metadata: a dictionary which maps layer name to a file name
@@ -696,6 +712,10 @@ class TensorBoard(Callback):
[details](https://www.tensorflow.org/how_tos/embedding_viz/#metadata_optional)
about metadata files format. In case if the same metadata file is
used for all embedding layers, string can be passed.
+ embeddings_data: data to be embedded at layers specified in
+ `embeddings_layer_names`. Numpy array (if the model has a single
+ input) or list of Numpy arrays (if the model has multiple inputs).
+ Learn [more about embeddings](https://www.tensorflow.org/programmers_guide/embedding)
"""
# pylint: enable=line-too-long
@@ -706,7 +726,11 @@ class TensorBoard(Callback):
batch_size=32,
write_graph=True,
write_grads=False,
- write_images=False):
+ write_images=False,
+ embeddings_freq=0,
+ embeddings_layer_names=None,
+ embeddings_metadata=None,
+ embeddings_data=None):
super(TensorBoard, self).__init__()
self.log_dir = log_dir
self.histogram_freq = histogram_freq
@@ -715,10 +739,20 @@ class TensorBoard(Callback):
self.write_grads = write_grads
self.write_images = write_images
self.batch_size = batch_size
+ self._current_batch = 0
+ # abstracted writer class to be able to stub for testing
+ self._writer_class = tf_summary.FileWriter
+ self.embeddings_freq = embeddings_freq
+ self.embeddings_layer_names = embeddings_layer_names
+ self.embeddings_metadata = embeddings_metadata
+ self.embeddings_data = embeddings_data
def set_model(self, model):
+ """Sets Keras model and creates summary ops."""
+
self.model = model
self.sess = K.get_session()
+ # only make histogram summary op if it hasn't already been made
if self.histogram_freq and self.merged is None:
for layer in self.model.layers:
for weight in layer.weights:
@@ -763,57 +797,171 @@ class TensorBoard(Callback):
tf_summary.histogram('{}_grad'.format(mapped_weight_name), grads)
if hasattr(layer, 'output'):
- tf_summary.histogram('{}_out'.format(layer.name), layer.output)
+ if isinstance(layer.output, list):
+ for i, output in enumerate(layer.output):
+ tf_summary.histogram('{}_out_{}'.format(layer.name, i), output)
+ else:
+ tf_summary.histogram('{}_out'.format(layer.name), layer.output)
self.merged = tf_summary.merge_all()
if self.write_graph:
- self.writer = tf_summary.FileWriter(self.log_dir, self.sess.graph)
+ self.writer = self._writer_class(self.log_dir, self.sess.graph)
else:
- self.writer = tf_summary.FileWriter(self.log_dir)
+ self.writer = self._writer_class(self.log_dir)
+
+ # If both embedding_freq and embeddings_data are available, we will
+ # visualize embeddings.
+ if self.embeddings_freq and self.embeddings_data is not None:
+ self.embeddings_data = standardize_input_data(self.embeddings_data,
+ model.input_names)
+
+ # If embedding_layer_names are not provided, get all of the embedding
+ # layers from the model.
+ embeddings_layer_names = self.embeddings_layer_names
+ if not embeddings_layer_names:
+ embeddings_layer_names = [
+ layer.name
+ for layer in self.model.layers
+ if type(layer).__name__ == 'Embedding'
+ ]
+
+ self.assign_embeddings = []
+ embeddings_vars = {}
+
+ self.batch_id = batch_id = array_ops.placeholder(dtypes.int32)
+ self.step = step = array_ops.placeholder(dtypes.int32)
- def on_epoch_end(self, epoch, logs=None):
- logs = logs or {}
+ for layer in self.model.layers:
+ if layer.name in embeddings_layer_names:
+ embedding_input = self.model.get_layer(layer.name).output
+ embedding_size = np.prod(embedding_input.shape[1:])
+ embedding_input = array_ops.reshape(embedding_input,
+ (step, int(embedding_size)))
+ shape = (self.embeddings_data[0].shape[0], int(embedding_size))
+ embedding = variables.Variable(
+ array_ops.zeros(shape), name=layer.name + '_embedding')
+ embeddings_vars[layer.name] = embedding
+ batch = state_ops.assign(embedding[batch_id:batch_id + step],
+ embedding_input)
+ self.assign_embeddings.append(batch)
+
+ self.saver = saver.Saver(list(embeddings_vars.values()))
+
+ # Create embeddings_metadata dictionary
+ if isinstance(self.embeddings_metadata, str):
+ embeddings_metadata = {
+ layer_name: self.embeddings_metadata
+ for layer_name in embeddings_vars.keys()
+ }
+ else:
+ # If embedding_metadata is already a dictionary
+ embeddings_metadata = self.embeddings_metadata
+
+ try:
+ from tensorboard.plugins import projector
+ except ImportError:
+ raise ImportError('Failed to import TensorBoard. Please make sure that '
+ 'TensorBoard integration is complete."')
+
+ # TODO(psv): Add integration tests to test embedding visualization
+ # with TensorBoard callback. We are unable to write a unit test for this
+ # because TensorBoard dependency assumes TensorFlow package is installed.
+ config = projector.ProjectorConfig()
+ for layer_name, tensor in embeddings_vars.items():
+ embedding = config.embeddings.add()
+ embedding.tensor_name = tensor.name
+
+ if (embeddings_metadata is not None and
+ layer_name in embeddings_metadata):
+ embedding.metadata_path = embeddings_metadata[layer_name]
+
+ projector.visualize_embeddings(self.writer, config)
+
+ def _fetch_callback(self, summary):
+ self.writer.add_summary(
+ summary,
+ self._epoch + self._current_val_batch / self._validation_batches)
+ self._current_val_batch += 1
- if not self.validation_data and self.histogram_freq:
- raise ValueError('If printing histograms, validation_data must be '
- 'provided, and cannot be a generator.')
- if self.validation_data and self.histogram_freq:
- if epoch % self.histogram_freq == 0:
+ def on_train_begin(self, logs=None):
+ """Checks if histogram summaries can be run."""
+
+ if self.histogram_freq:
+ if 'validation_steps' in self.params:
+ self._validation_batches = self.params['validation_steps']
+ elif self.validation_data:
+ self._validation_batches = math.ceil(
+ self.validation_data[0].shape[0] / self.batch_size)
+ else:
+ raise ValueError('If printing histograms, validation data must be '
+ 'provided.')
+ if self._validation_batches == 0:
+ raise ValueError(
+ 'If printing histograms, validation data must have length > 0.')
- val_data = self.validation_data
- tensors = (
- self.model.inputs + self.model.targets + self.model.sample_weights)
+ def on_epoch_begin(self, epoch, logs=None):
+ """Add histogram op to Model test_function callbacks, reset batch count."""
+
+ # check if histogram summary should be run for this epoch
+ if self.histogram_freq and epoch % self.histogram_freq == 0:
+ self._epoch = epoch
+ self._current_val_batch = 0
+ # add the histogram summary op if it should run this epoch
+ if self.merged not in self.model.test_function.fetches:
+ self.model.test_function.fetches.append(self.merged)
+ self.model.test_function.fetch_callbacks[
+ self.merged] = self._fetch_callback
- if self.model.uses_learning_phase:
- tensors += [K.learning_phase()]
+ def on_epoch_end(self, epoch, logs=None):
+ """Checks if summary ops should run next epoch, logs scalar summaries."""
- assert len(val_data) == len(tensors)
- val_size = val_data[0].shape[0]
+ logs = logs or {}
+
+ # pop the histogram summary op after each epoch
+ if self.histogram_freq:
+ if self.merged in self.model.test_function.fetches:
+ self.model.test_function.fetches.remove(self.merged)
+ if self.merged in self.model.test_function.fetch_callbacks:
+ self.model.test_function.fetch_callbacks.pop(self.merged)
+
+ if self.embeddings_data is None and self.embeddings_freq:
+ raise ValueError('To visualize embeddings, embeddings_data must '
+ 'be provided.')
+
+ if self.embeddings_freq and self.embeddings_data is not None:
+ if epoch % self.embeddings_freq == 0:
+ # We need a second forward-pass here because we're passing
+ # the `embeddings_data` explicitly. This design allows to pass
+ # arbitrary data as `embeddings_data` and results from the fact
+ # that we need to know the size of the `tf.Variable`s which
+ # hold the embeddings in `set_model`. At this point, however,
+ # the `validation_data` is not yet set.
+
+ embeddings_data = self.embeddings_data
+ n_samples = embeddings_data[0].shape[0]
i = 0
- while i < val_size:
- step = min(self.batch_size, val_size - i)
- batch_val = []
- batch_val.append(val_data[0][i:i + step]
- if val_data[0] is not None else None)
- batch_val.append(val_data[1][i:i + step]
- if val_data[1] is not None else None)
- batch_val.append(val_data[2][i:i + step]
- if val_data[2] is not None else None)
- if self.model.uses_learning_phase:
- # do not slice the learning phase
- batch_val = [x[i:i + step] if x is not None else None
- for x in val_data[:-1]]
- batch_val.append(val_data[-1])
+ while i < n_samples:
+ step = min(self.batch_size, n_samples - i)
+ batch = slice(i, i + step)
+
+ if isinstance(self.model.input, list):
+ feed_dict = {
+ model_input: embeddings_data[idx][batch]
+ for idx, model_input in enumerate(self.model.input)
+ }
else:
- batch_val = [x[i:i + step] if x is not None else None
- for x in val_data]
- feed_dict = {}
- for key, val in zip(tensors, batch_val):
- if val is not None:
- feed_dict[key] = val
- result = self.sess.run([self.merged], feed_dict=feed_dict)
- summary_str = result[0]
- self.writer.add_summary(summary_str, epoch)
+ feed_dict = {self.model.input: embeddings_data[0][batch]}
+
+ feed_dict.update({self.batch_id: i, self.step: step})
+
+ if self.model.uses_learning_phase:
+ feed_dict[K.learning_phase()] = False
+
+ self.sess.run(self.assign_embeddings, feed_dict=feed_dict)
+ self.saver.save(self.sess,
+ os.path.join(self.log_dir, 'keras_embedding.ckpt'),
+ epoch)
+
i += self.batch_size
for name, value in logs.items():
diff --git a/tensorflow/python/keras/callbacks_test.py b/tensorflow/python/keras/callbacks_test.py
index 5062a26580..45598cafd3 100644
--- a/tensorflow/python/keras/callbacks_test.py
+++ b/tensorflow/python/keras/callbacks_test.py
@@ -27,6 +27,7 @@ import unittest
import numpy as np
+from tensorflow.core.framework import summary_pb2
from tensorflow.python import keras
from tensorflow.python.keras import testing_utils
from tensorflow.python.platform import test
@@ -273,16 +274,43 @@ class KerasCallbacksTest(test.TestCase):
1, activation='sigmoid'),))
model.compile(
optimizer='sgd', loss='binary_crossentropy', metrics=['accuracy'])
- stopper = keras.callbacks.EarlyStopping(monitor='acc', patience=patience)
weights = model.get_weights()
+ stopper = keras.callbacks.EarlyStopping(monitor='acc', patience=patience)
hist = model.fit(data, labels, callbacks=[stopper], verbose=0, epochs=20)
assert len(hist.epoch) >= patience
# This should allow training to go for at least `patience` epochs
model.set_weights(weights)
hist = model.fit(data, labels, callbacks=[stopper], verbose=0, epochs=20)
- assert len(hist.epoch) >= patience
+ assert len(hist.epoch) >= patience
+
+ def test_EarlyStopping_with_baseline(self):
+ with self.test_session():
+ np.random.seed(1337)
+ baseline = 0.5
+ (data, labels), _ = testing_utils.get_test_data(
+ train_samples=100,
+ test_samples=50,
+ input_shape=(1,),
+ num_classes=NUM_CLASSES)
+ model = keras.models.Sequential((keras.layers.Dense(
+ 1, input_dim=1, activation='relu'), keras.layers.Dense(
+ 1, activation='sigmoid'),))
+ model.compile(
+ optimizer='sgd', loss='binary_crossentropy', metrics=['accuracy'])
+
+ stopper = keras.callbacks.EarlyStopping(monitor='acc',
+ baseline=baseline)
+ hist = model.fit(data, labels, callbacks=[stopper], verbose=0, epochs=20)
+ assert len(hist.epoch) == 1
+
+ patience = 3
+ stopper = keras.callbacks.EarlyStopping(monitor='acc',
+ patience=patience,
+ baseline=baseline)
+ hist = model.fit(data, labels, callbacks=[stopper], verbose=0, epochs=20)
+ assert len(hist.epoch) >= patience
def test_RemoteMonitor(self):
if requests is None:
@@ -785,21 +813,6 @@ class KerasCallbacksTest(test.TestCase):
for cb in cbs:
cb.on_train_end()
- # fit generator with validation data generator should raise ValueError if
- # histogram_freq > 0
- cbs = callbacks_factory(histogram_freq=1)
- with self.assertRaises(ValueError):
- model.fit_generator(
- data_generator(True),
- len(x_train),
- epochs=2,
- validation_data=data_generator(False),
- validation_steps=1,
- callbacks=cbs)
-
- for cb in cbs:
- cb.on_train_end()
-
# Make sure file writer cache is clear to avoid failures during cleanup.
writer_cache.FileWriterCache.clear()
@@ -874,6 +887,130 @@ class KerasCallbacksTest(test.TestCase):
callbacks=callbacks_factory(histogram_freq=1))
assert os.path.isdir(filepath)
+ def test_Tensorboard_histogram_summaries_in_test_function(self):
+
+ class FileWriterStub(object):
+
+ def __init__(self, logdir, graph=None):
+ self.logdir = logdir
+ self.graph = graph
+ self.steps_seen = []
+
+ def add_summary(self, summary, global_step):
+ summary_obj = summary_pb2.Summary()
+
+ # ensure a valid Summary proto is being sent
+ if isinstance(summary, bytes):
+ summary_obj.ParseFromString(summary)
+ else:
+ assert isinstance(summary, summary_pb2.Summary)
+ summary_obj = summary
+
+ # keep track of steps seen for the merged_summary op,
+ # which contains the histogram summaries
+ if len(summary_obj.value) > 1:
+ self.steps_seen.append(global_step)
+
+ def flush(self):
+ pass
+
+ def close(self):
+ pass
+
+ np.random.seed(1337)
+ tmpdir = self.get_temp_dir()
+ self.addCleanup(shutil.rmtree, tmpdir)
+ (x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
+ train_samples=TRAIN_SAMPLES,
+ test_samples=TEST_SAMPLES,
+ input_shape=(INPUT_DIM,),
+ num_classes=NUM_CLASSES)
+ y_test = keras.utils.to_categorical(y_test)
+ y_train = keras.utils.to_categorical(y_train)
+
+ with self.test_session():
+ model = keras.models.Sequential()
+ model.add(
+ keras.layers.Dense(
+ NUM_HIDDEN, input_dim=INPUT_DIM, activation='relu'))
+ # non_trainable_weights: moving_variance, moving_mean
+ model.add(keras.layers.BatchNormalization())
+ model.add(keras.layers.Dense(NUM_CLASSES, activation='softmax'))
+ model.compile(
+ loss='categorical_crossentropy',
+ optimizer='sgd',
+ metrics=['accuracy'])
+ tsb = keras.callbacks.TensorBoard(
+ log_dir=tmpdir,
+ histogram_freq=1,
+ write_images=True,
+ write_grads=True,
+ batch_size=5)
+ tsb._writer_class = FileWriterStub
+ cbks = [tsb]
+
+ # fit with validation data
+ model.fit(
+ x_train,
+ y_train,
+ batch_size=BATCH_SIZE,
+ validation_data=(x_test, y_test),
+ callbacks=cbks,
+ epochs=3,
+ verbose=0)
+
+ self.assertAllEqual(tsb.writer.steps_seen, [0, 0.5, 1, 1.5, 2, 2.5])
+
+ def test_Tensorboard_histogram_summaries_with_generator(self):
+ np.random.seed(1337)
+ tmpdir = self.get_temp_dir()
+ self.addCleanup(shutil.rmtree, tmpdir)
+
+ def generator():
+ x = np.random.randn(10, 100).astype(np.float32)
+ y = np.random.randn(10, 10).astype(np.float32)
+ while True:
+ yield x, y
+
+ with self.test_session():
+ model = keras.models.Sequential()
+ model.add(keras.layers.Dense(10, input_dim=100, activation='relu'))
+ model.add(keras.layers.Dense(10, activation='softmax'))
+ model.compile(
+ loss='categorical_crossentropy',
+ optimizer='sgd',
+ metrics=['accuracy'])
+ tsb = keras.callbacks.TensorBoard(
+ log_dir=tmpdir,
+ histogram_freq=1,
+ write_images=True,
+ write_grads=True,
+ batch_size=5)
+ cbks = [tsb]
+
+ # fit with validation generator
+ model.fit_generator(
+ generator(),
+ steps_per_epoch=2,
+ epochs=2,
+ validation_data=generator(),
+ validation_steps=2,
+ callbacks=cbks,
+ verbose=0)
+
+ with self.assertRaises(ValueError):
+ # fit with validation generator but no
+ # validation_steps
+ model.fit_generator(
+ generator(),
+ steps_per_epoch=2,
+ epochs=2,
+ validation_data=generator(),
+ callbacks=cbks,
+ verbose=0)
+
+ self.assertTrue(os.path.exists(tmpdir))
+
@unittest.skipIf(
os.name == 'nt',
'use_multiprocessing=True does not work on windows properly.')
diff --git a/tensorflow/python/keras/datasets/mnist.py b/tensorflow/python/keras/datasets/mnist.py
index 2a1c8d5f51..a96b581960 100644
--- a/tensorflow/python/keras/datasets/mnist.py
+++ b/tensorflow/python/keras/datasets/mnist.py
@@ -50,5 +50,5 @@ def load_data(path='mnist.npz'):
with np.load(path) as f:
x_train, y_train = f['x_train'], f['y_train']
x_test, y_test = f['x_test'], f['y_test']
-
+
return (x_train, y_train), (x_test, y_test)
diff --git a/tensorflow/python/keras/engine/base_layer.py b/tensorflow/python/keras/engine/base_layer.py
index 4814275fd5..e02792208b 100644
--- a/tensorflow/python/keras/engine/base_layer.py
+++ b/tensorflow/python/keras/engine/base_layer.py
@@ -116,6 +116,7 @@ class Layer(checkpointable.CheckpointableBase):
constraints on inputs that can be accepted by the layer.
"""
+ @checkpointable.no_automatic_dependency_tracking
def __init__(self, trainable=True, name=None, dtype=None, **kwargs):
# These properties should be set by the user via keyword arguments.
# note that 'dtype', 'input_shape' and 'batch_input_shape'
@@ -217,7 +218,7 @@ class Layer(checkpointable.CheckpointableBase):
@activity_regularizer.setter
def activity_regularizer(self, regularizer):
"""Optional regularizer function for the output of this layer."""
- self._activity_regularizer = regularizer
+ self._activity_regularizer = self._no_dependency(regularizer)
@property
def trainable_weights(self):
@@ -459,14 +460,18 @@ class Layer(checkpointable.CheckpointableBase):
"""Alias for `add_weight`."""
return self.add_weight(*args, **kwargs)
- def add_weight(self, name, shape,
+ def add_weight(self,
+ name,
+ shape,
dtype=None,
initializer=None,
regularizer=None,
- trainable=True,
+ trainable=None,
constraint=None,
partitioner=None,
use_resource=None,
+ synchronization=vs.VariableSynchronization.AUTO,
+ aggregation=vs.VariableAggregation.NONE,
getter=None):
"""Adds a new variable to the layer, or gets an existing one; returns it.
@@ -481,10 +486,20 @@ class Layer(checkpointable.CheckpointableBase):
or "non_trainable_variables" (e.g. BatchNorm mean, stddev).
Note, if the current variable scope is marked as non-trainable
then this parameter is ignored and any added variables are also
- marked as non-trainable.
+ marked as non-trainable. `trainable` defaults to `True` unless
+ `synchronization` is set to `ON_READ`.
constraint: constraint instance (callable).
partitioner: Partitioner to be passed to the `Checkpointable` API.
use_resource: Whether to use `ResourceVariable`.
+ synchronization: Indicates when a distributed a variable will be
+ aggregated. Accepted values are constants defined in the class
+ @{tf.VariableSynchronization}. By default the synchronization is set to
+ `AUTO` and the current `DistributionStrategy` chooses
+ when to synchronize. If `synchronization` is set to `ON_READ`,
+ `trainable` must not be set to `True`.
+ aggregation: Indicates how a distributed variable will be aggregated.
+ Accepted values are constants defined in the class
+ @{tf.VariableAggregation}.
getter: Variable getter argument to be passed to the `Checkpointable` API.
Returns:
@@ -495,7 +510,8 @@ class Layer(checkpointable.CheckpointableBase):
Raises:
RuntimeError: If called with partioned variable regularization and
eager execution is enabled.
- ValueError: When giving unsupported dtype and no initializer.
+ ValueError: When giving unsupported dtype and no initializer or when
+ trainable has been set to True with synchronization set as `ON_READ`.
"""
if dtype is None:
dtype = self.dtype or backend.floatx()
@@ -504,6 +520,19 @@ class Layer(checkpointable.CheckpointableBase):
regularizer = regularizers.get(regularizer)
constraint = constraints.get(constraint)
+ if synchronization == vs.VariableSynchronization.ON_READ:
+ if trainable:
+ raise ValueError(
+ 'Synchronization value can be set to '
+ 'VariableSynchronization.ON_READ only for non-trainable variables. '
+ 'You have specified trainable=True and '
+ 'synchronization=VariableSynchronization.ON_READ.')
+ else:
+ # Set trainable to be false when variable is to be synced on read.
+ trainable = False
+ elif trainable is None:
+ trainable = True
+
# Initialize variable when no initializer provided
if initializer is None:
# If dtype is DT_FLOAT, provide a uniform unit scaling initializer
@@ -531,7 +560,9 @@ class Layer(checkpointable.CheckpointableBase):
constraint=constraint,
trainable=trainable and self.trainable,
partitioner=partitioner,
- use_resource=use_resource)
+ use_resource=use_resource,
+ synchronization=synchronization,
+ aggregation=aggregation)
if regularizer is not None:
# TODO(fchollet): in the future, this should be handled at the
@@ -654,11 +685,12 @@ class Layer(checkpointable.CheckpointableBase):
# Handle Keras mask propagation from previous layer to current layer.
previous_mask = None
- if (not hasattr(self, '_compute_previous_mask') or
- self._compute_previous_mask):
+ if build_graph and (not hasattr(self, '_compute_previous_mask') or
+ self._compute_previous_mask):
previous_mask = collect_previous_mask(inputs)
if not hasattr(self, '_call_fn_args'):
- self._call_fn_args = function_utils.fn_args(self.call)
+ self._call_fn_args = self._no_dependency(
+ function_utils.fn_args(self.call))
if ('mask' in self._call_fn_args and 'mask' not in kwargs and
not generic_utils.is_all_none(previous_mask)):
# The previous layer generated a mask, and mask was not explicitly pass
@@ -691,9 +723,10 @@ class Layer(checkpointable.CheckpointableBase):
self._dtype = input_list[0].dtype.base_dtype.name
except AttributeError:
pass
- if all(hasattr(x, 'get_shape') for x in input_list):
- input_shapes = nest.map_structure(lambda x: x.get_shape(), inputs)
+ if all(hasattr(x, 'shape') for x in input_list):
+ input_shapes = nest.map_structure(lambda x: x.shape, inputs)
self.build(input_shapes)
+ self.built = True
# Check input assumptions set after layer building, e.g. input shape.
if build_graph or in_deferred_mode:
@@ -709,7 +742,7 @@ class Layer(checkpointable.CheckpointableBase):
# Deferred mode behavior: use `compute_output_shape` to
# infer the number of outputs of the layer and their shapes.
if input_shapes is None:
- input_shapes = nest.map_structure(lambda x: x.get_shape(), inputs)
+ input_shapes = nest.map_structure(lambda x: x.shape, inputs)
output_shapes = self.compute_output_shape(input_shapes)
output_shapes = nest.flatten(output_shapes)
@@ -729,8 +762,6 @@ class Layer(checkpointable.CheckpointableBase):
if in_deferred_mode or build_graph and have_all_keras_metadata(inputs):
inputs, outputs = self._set_connectivity_metadata_(
inputs, outputs, args, kwargs)
-
- self.built = True
if context.executing_eagerly():
return outputs
@@ -1293,7 +1324,7 @@ class Layer(checkpointable.CheckpointableBase):
', but the layer isn\'t built. '
'You can build it manually via: `' + self.name +
'.build(batch_input_shape)`.')
- weight_shapes = [w.get_shape().as_list() for w in self.weights]
+ weight_shapes = [w.shape.as_list() for w in self.weights]
return int(sum([np.prod(w) for w in weight_shapes]))
@property
@@ -1376,7 +1407,7 @@ class Layer(checkpointable.CheckpointableBase):
if (spec.ndim is not None or
spec.min_ndim is not None or
spec.max_ndim is not None):
- if x.get_shape().ndims is None:
+ if x.shape.ndims is None:
raise ValueError('Input ' + str(input_index) + ' of layer ' +
self.name + ' is incompatible with the layer: '
'its rank is undefined, but the layer requires a '
@@ -1384,29 +1415,29 @@ class Layer(checkpointable.CheckpointableBase):
# Check ndim.
if spec.ndim is not None:
- ndim = x.get_shape().ndims
+ ndim = x.shape.ndims
if ndim != spec.ndim:
raise ValueError('Input ' + str(input_index) + ' of layer ' +
self.name + ' is incompatible with the layer: '
'expected ndim=' + str(spec.ndim) + ', found ndim=' +
str(ndim) + '. Full shape received: ' +
- str(x.get_shape().as_list()))
+ str(x.shape.as_list()))
if spec.max_ndim is not None:
- ndim = x.get_shape().ndims
+ ndim = x.shape.ndims
if ndim is not None and ndim > spec.max_ndim:
raise ValueError('Input ' + str(input_index) + ' of layer ' +
self.name + ' is incompatible with the layer: '
'expected max_ndim=' + str(spec.max_ndim) +
', found ndim=' + str(ndim))
if spec.min_ndim is not None:
- ndim = x.get_shape().ndims
+ ndim = x.shape.ndims
if ndim is not None and ndim < spec.min_ndim:
raise ValueError('Input ' + str(input_index) + ' of layer ' +
self.name + ' is incompatible with the layer: '
': expected min_ndim=' + str(spec.min_ndim) +
', found ndim=' + str(ndim) +
'. Full shape received: ' +
- str(x.get_shape().as_list()))
+ str(x.shape.as_list()))
# Check dtype.
if spec.dtype is not None:
if x.dtype != spec.dtype:
@@ -1416,7 +1447,7 @@ class Layer(checkpointable.CheckpointableBase):
', found dtype=' + str(x.dtype))
# Check specific shape axes.
if spec.axes:
- shape = x.get_shape().as_list()
+ shape = x.shape.as_list()
if shape is not None:
for axis, value in spec.axes.items():
if hasattr(value, 'value'):
@@ -1429,7 +1460,7 @@ class Layer(checkpointable.CheckpointableBase):
' but received input with shape ' + str(shape))
# Check shape.
if spec.shape is not None:
- shape = x.get_shape().as_list()
+ shape = x.shape.as_list()
if shape is not None:
for spec_dim, dim in zip(spec.shape, shape):
if spec_dim is not None and dim is not None:
@@ -1704,12 +1735,12 @@ class DeferredTensor(object):
def __str__(self):
return "DeferredTensor('%s', shape=%s, dtype=%s)" % (self.name,
- self.get_shape(),
+ self.shape,
self.dtype.name)
def __repr__(self):
return "<DeferredTensor '%s' shape=%s dtype=%s>" % (self.name,
- self.get_shape(),
+ self.shape,
self.dtype.name)
@@ -1804,11 +1835,13 @@ def make_variable(name,
dtype=dtypes.float32,
initializer=None,
partition_info=None,
- trainable=True,
+ trainable=None,
caching_device=None,
validate_shape=True,
constraint=None,
use_resource=None,
+ synchronization=vs.VariableSynchronization.AUTO,
+ aggregation=vs.VariableAggregation.NONE,
partitioner=None): # pylint: disable=unused-argument
"""Temporary util to create a variable (relies on `variable_scope.variable`).
@@ -1834,11 +1867,21 @@ def make_variable(name,
or "non_trainable_variables" (e.g. BatchNorm mean, stddev).
Note, if the current variable scope is marked as non-trainable
then this parameter is ignored and any added variables are also
- marked as non-trainable.
+ marked as non-trainable. `trainable` defaults to `True` unless
+ `synchronization` is set to `ON_READ`.
caching_device: Passed to `vs.variable`.
validate_shape: Passed to `vs.variable`.
constraint: Constraint instance (callable).
use_resource: Whether to use a `ResourceVariable`.
+ synchronization: Indicates when a distributed a variable will be
+ aggregated. Accepted values are constants defined in the class
+ @{tf.VariableSynchronization}. By default the synchronization is set to
+ `AUTO` and the current `DistributionStrategy` chooses
+ when to synchronize. If `synchronization` is set to `ON_READ`,
+ `trainable` must not be set to `True`.
+ aggregation: Indicates how a distributed variable will be aggregated.
+ Accepted values are constants defined in the class
+ @{tf.VariableAggregation}.
partitioner: Not handled at this time.
Returns:
@@ -1870,5 +1913,7 @@ def make_variable(name,
dtype=variable_dtype,
validate_shape=validate_shape,
constraint=constraint,
- use_resource=use_resource)
+ use_resource=use_resource,
+ synchronization=synchronization,
+ aggregation=aggregation)
return v
diff --git a/tensorflow/python/keras/engine/network.py b/tensorflow/python/keras/engine/network.py
index 3edb8033ff..a4d96de74f 100644
--- a/tensorflow/python/keras/engine/network.py
+++ b/tensorflow/python/keras/engine/network.py
@@ -44,6 +44,7 @@ from tensorflow.python.ops import variables
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training.checkpointable import base as checkpointable
from tensorflow.python.training.checkpointable import data_structures
+from tensorflow.python.training.checkpointable import layer_utils as checkpointable_layer_utils
from tensorflow.python.training.checkpointable import util as checkpointable_utils
from tensorflow.python.util import nest
from tensorflow.python.util import tf_inspect
@@ -80,6 +81,20 @@ class Network(base_layer.Layer):
# Subclassed network
self._init_subclassed_network(**kwargs)
+ # Several Network methods have "no_automatic_dependency_tracking"
+ # annotations. Since Network does automatic dependency tracking on attribute
+ # assignment, including for common data structures such as lists, by default
+ # we'd have quite a few empty dependencies which users don't care about (or
+ # would need some way to ignore dependencies automatically, which is confusing
+ # when applied to user code). Some attributes, such as _layers, would cause
+ # structural issues (_layers being the place where Layers assigned to tracked
+ # attributes are stored).
+ #
+ # Aside from these aesthetic and structural issues, useless dependencies on
+ # empty lists shouldn't cause issues; adding or removing them will not break
+ # checkpoints, but may cause "all Python objects matched" assertions to fail
+ # (in which case less strict assertions may be substituted if necessary).
+ @checkpointable.no_automatic_dependency_tracking
def _base_init(self, name=None):
# The following are implemented as property functions:
# self.trainable_weights
@@ -134,6 +149,7 @@ class Network(base_layer.Layer):
# restore operations when graph building.
self._in_progress_restore_finalizer = None
+ @checkpointable.no_automatic_dependency_tracking
def _init_graph_network(self, inputs, outputs, name=None):
self._call_convention = base_layer.CallConvention.EXPLICIT_INPUTS_ARGUMENT
# Normalize and set self.inputs, self.outputs.
@@ -292,6 +308,7 @@ class Network(base_layer.Layer):
for layer in self._output_layers:
self.output_names.append(layer.name)
+ @checkpointable.no_automatic_dependency_tracking
def _init_subclassed_network(self, name=None):
self._base_init(name=name)
self._is_graph_network = False
@@ -361,10 +378,31 @@ class Network(base_layer.Layer):
self._track_checkpointable(
layer, name='layer-%d' % layer_index, overwrite=True)
+ def _no_dependency(self, value):
+ """Override to allow `Layer` to disable dependency tracking.
+
+ `CheckpointableBase` defines this method, whose semantics are "if a subclass
+ does dependency tracking, this method exempts `value`." Layer uses
+ `_no_dependency` to exempt some of its attribute assignments (conditional on
+ attribute assignment causing tracking in the subclass).
+
+ Args:
+ value: An object which will be assigned to an object attribute, whose
+ value should not be tracked.
+
+ Returns:
+ A wrapped object which, when assigned to an attribute, will not be
+ tracked (`value` will be stored in the attribute).
+ """
+ return data_structures.NoDependency(value)
+
def __setattr__(self, name, value):
- no_dependency = isinstance(value, checkpointable.NoDependency)
- if no_dependency:
- value = value.value
+ if not getattr(self, '_setattr_tracking', True):
+ super(Network, self).__setattr__(name, value)
+ return
+ no_dependency = isinstance(value, data_structures.NoDependency)
+ value = data_structures.sticky_attribute_assignment(
+ checkpointable=self, value=value, name=name)
if isinstance(value, (
base_layer.Layer,
Network,
@@ -376,7 +414,9 @@ class Network(base_layer.Layer):
'forgot to call `super(YourClass, self).__init__()`.'
' Always start with this line.')
if not is_graph_network:
- if value not in self._layers:
+ # We need to check object identity to avoid de-duplicating empty
+ # container types which compare equal.
+ if not any((layer is value for layer in self._layers)):
self._layers.append(value)
if hasattr(value, '_use_resource_variables'):
# In subclassed models, legacy layers (tf.layers) must always use
@@ -384,12 +424,6 @@ class Network(base_layer.Layer):
value._use_resource_variables = True
if (not no_dependency
and isinstance(value, checkpointable.CheckpointableBase)):
- # Layer (and therefore Network/Model) inherit from CheckpointableBase
- # rather than Checkpointable, which means there is no Checkpointable
- # __setattr__ override (it would be a performance issue for functional
- # layers). Therefore Model tracks Checkpointable objects itself.
- self._track_checkpointable(
- checkpointable=value, name=name, overwrite=True)
if ( # For subclassed models only, users may add extra weights/variables
# simply by assigning them to attributes.
not self._is_graph_network
@@ -492,7 +526,8 @@ class Network(base_layer.Layer):
@property
def layers(self):
- return self._layers
+ return checkpointable_layer_utils.filter_empty_layer_containers(
+ self._layers)
def get_layer(self, name=None, index=None):
"""Retrieves a layer based on either its name (unique) or index.
@@ -665,14 +700,14 @@ class Network(base_layer.Layer):
@property
def trainable_weights(self):
- return layer_utils.gather_trainable_weights(
+ return checkpointable_layer_utils.gather_trainable_weights(
trainable=self.trainable,
sub_layers=self.layers,
extra_variables=self._extra_variables)
@property
def non_trainable_weights(self):
- return layer_utils.gather_non_trainable_weights(
+ return checkpointable_layer_utils.gather_non_trainable_weights(
trainable=self.trainable,
sub_layers=self.layers,
extra_variables=self._extra_variables)
diff --git a/tensorflow/python/keras/engine/saving.py b/tensorflow/python/keras/engine/saving.py
index b9a2e1f25f..d5ccd44604 100644
--- a/tensorflow/python/keras/engine/saving.py
+++ b/tensorflow/python/keras/engine/saving.py
@@ -351,7 +351,10 @@ def preprocess_weights_for_loading(layer,
weights,
original_keras_version=None,
original_backend=None):
- """Converts layers weights from Keras 1 format to Keras 2.
+ """Preprocess layer weights between different Keras formats.
+
+ Converts layers weights from Keras 1 format to Keras 2 and also weights of
+ CuDNN layers in Keras 2.
Arguments:
layer: Layer instance.
@@ -363,7 +366,18 @@ def preprocess_weights_for_loading(layer,
Returns:
A list of weights values (Numpy arrays).
"""
- if layer.__class__.__name__ == 'Bidirectional':
+ def convert_nested_bidirectional(weights):
+ """Converts layers nested in `Bidirectional` wrapper.
+
+ This function uses `preprocess_weights_for_loading()` for converting
+ layers.
+
+ Arguments:
+ weights: List of weights values (Numpy arrays).
+
+ Returns:
+ A list of weights values (Numpy arrays).
+ """
num_weights_per_layer = len(weights) // 2
forward_weights = preprocess_weights_for_loading(
layer.forward_layer, weights[:num_weights_per_layer],
@@ -371,7 +385,69 @@ def preprocess_weights_for_loading(layer,
backward_weights = preprocess_weights_for_loading(
layer.backward_layer, weights[num_weights_per_layer:],
original_keras_version, original_backend)
- weights = forward_weights + backward_weights
+ return forward_weights + backward_weights
+
+ def convert_nested_time_distributed(weights):
+ """Converts layers nested in `TimeDistributed` wrapper.
+
+ This function uses `preprocess_weights_for_loading()` for converting nested
+ layers.
+
+ Arguments:
+ weights: List of weights values (Numpy arrays).
+
+ Returns:
+ A list of weights values (Numpy arrays).
+ """
+ return preprocess_weights_for_loading(
+ layer.layer, weights, original_keras_version, original_backend)
+
+ def convert_nested_model(weights):
+ """Converts layers nested in `Model` or `Sequential`.
+
+ This function uses `preprocess_weights_for_loading()` for converting nested
+ layers.
+
+ Arguments:
+ weights: List of weights values (Numpy arrays).
+
+ Returns:
+ A list of weights values (Numpy arrays).
+ """
+ new_weights = []
+ # trainable weights
+ for sublayer in layer.layers:
+ num_weights = len(sublayer.trainable_weights)
+ if num_weights > 0:
+ new_weights.extend(preprocess_weights_for_loading(
+ layer=sublayer,
+ weights=weights[:num_weights],
+ original_keras_version=original_keras_version,
+ original_backend=original_backend))
+ weights = weights[num_weights:]
+
+ # non-trainable weights
+ for sublayer in layer.layers:
+ num_weights = len([l for l in sublayer.weights
+ if l not in sublayer.trainable_weights])
+ if num_weights > 0:
+ new_weights.extend(preprocess_weights_for_loading(
+ layer=sublayer,
+ weights=weights[:num_weights],
+ original_keras_version=original_keras_version,
+ original_backend=original_backend))
+ weights = weights[num_weights:]
+ return new_weights
+
+ # Convert layers nested in Bidirectional/Model/Sequential.
+ # Both transformation should be ran for both Keras 1->2 conversion
+ # and for conversion of CuDNN layers.
+ if layer.__class__.__name__ == 'Bidirectional':
+ weights = convert_nested_bidirectional(weights)
+ if layer.__class__.__name__ == 'TimeDistributed':
+ weights = convert_nested_time_distributed(weights)
+ elif layer.__class__.__name__ in ['Model', 'Sequential']:
+ weights = convert_nested_model(weights)
if original_keras_version == '1':
if layer.__class__.__name__ == 'TimeDistributed':
@@ -446,35 +522,6 @@ def preprocess_weights_for_loading(layer,
recurrent_kernel = np.transpose(recurrent_kernel, (2, 3, 1, 0))
weights = [kernel, recurrent_kernel, bias]
- if layer.__class__.__name__ in ['Model', 'Sequential']:
- new_weights = []
- # trainable weights
- for sublayer in layer.layers:
- num_weights = len(sublayer.trainable_weights)
- if num_weights > 0:
- new_weights.extend(
- preprocess_weights_for_loading(
- layer=sublayer,
- weights=weights[:num_weights],
- original_keras_version=original_keras_version,
- original_backend=original_backend))
- weights = weights[num_weights:]
-
- # non-trainable weights
- for sublayer in layer.layers:
- num_weights = len([
- l for l in sublayer.weights if l not in sublayer.trainable_weights
- ])
- if num_weights > 0:
- new_weights.extend(
- preprocess_weights_for_loading(
- layer=sublayer,
- weights=weights[:num_weights],
- original_keras_version=original_keras_version,
- original_backend=original_backend))
- weights = weights[num_weights:]
- weights = new_weights
-
conv_layers = ['Conv1D', 'Conv2D', 'Conv3D', 'Conv2DTranspose', 'ConvLSTM2D']
if layer.__class__.__name__ in conv_layers:
if original_backend == 'theano':
@@ -486,6 +533,7 @@ def preprocess_weights_for_loading(layer,
if layer.__class__.__name__ == 'ConvLSTM2D':
weights[1] = np.transpose(weights[1], (3, 2, 0, 1))
+ # convert CuDNN layers
return _convert_rnn_weights(layer, weights)
@@ -624,7 +672,7 @@ def _convert_rnn_weights(layer, weights):
kernels = transform_kernels(weights[0], transpose_input(from_cudnn),
n_gates)
recurrent_kernels = transform_kernels(weights[1], lambda k: k.T, n_gates)
- biases = weights[2].reshape((2, -1) if from_cudnn else -1)
+ biases = np.array(weights[2]).reshape((2, -1) if from_cudnn else -1)
return [kernels, recurrent_kernels, biases]
if bias_shape == (2 * units * n_gates,):
@@ -806,7 +854,16 @@ def load_weights_from_hdf5_group_by_name(f, layers):
str(len(weight_values)) + ' element(s).')
# Set values.
for i in range(len(weight_values)):
- weight_value_tuples.append((symbolic_weights[i], weight_values[i]))
+ if K.int_shape(symbolic_weights[i]) != weight_values[i].shape:
+ raise ValueError('Layer #' + str(k) +' (named "' + layer.name +
+ '"), weight ' + str(symbolic_weights[i]) +
+ ' has shape {}'.format(K.int_shape(
+ symbolic_weights[i])) +
+ ', but the saved weight has shape ' +
+ str(weight_values[i].shape) + '.')
+
+ else:
+ weight_value_tuples.append((symbolic_weights[i], weight_values[i]))
K.batch_set_value(weight_value_tuples)
diff --git a/tensorflow/python/keras/engine/saving_test.py b/tensorflow/python/keras/engine/saving_test.py
index 1a0aa60609..030328f2a6 100644
--- a/tensorflow/python/keras/engine/saving_test.py
+++ b/tensorflow/python/keras/engine/saving_test.py
@@ -21,7 +21,6 @@ from __future__ import print_function
import os
import shutil
import tempfile
-
from absl.testing import parameterized
import numpy as np
@@ -31,6 +30,7 @@ from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
+from tensorflow.python.keras.engine import saving
from tensorflow.python.keras.engine import training
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import random_ops
@@ -248,6 +248,82 @@ class TestWeightSavingAndLoading(test.TestCase, parameterized.TestCase):
self.assertAllClose(y, ref_y)
+ def test_sequential_weight_loading_group_name_with_incorrect_length(self):
+ if h5py is None:
+ return
+
+ temp_dir = self.get_temp_dir()
+ self.addCleanup(shutil.rmtree, temp_dir)
+ h5_path = os.path.join(temp_dir, 'test.h5')
+
+ num_hidden = 5
+ input_dim = 3
+ num_classes = 2
+ with self.test_session():
+ ref_model = keras.models.Sequential()
+ ref_model.add(keras.layers.Dense(num_hidden, input_dim=input_dim,
+ name='d1'))
+ ref_model.add(keras.layers.Dense(num_classes, name='d2'))
+ ref_model.compile(loss=keras.losses.MSE,
+ optimizer=keras.optimizers.RMSprop(lr=0.0001),
+ metrics=[keras.metrics.categorical_accuracy])
+
+ f_ref_model = h5py.File(h5_path, 'w')
+ saving.save_weights_to_hdf5_group(f_ref_model, ref_model.layers)
+
+ f_model = h5py.File(h5_path, 'r')
+ model = keras.models.Sequential()
+ model.add(keras.layers.Dense(num_hidden, use_bias=False,
+ input_dim=input_dim, name='d1'))
+ model.add(keras.layers.Dense(num_classes, name='d2'))
+ model.compile(loss=keras.losses.MSE,
+ optimizer=keras.optimizers.RMSprop(lr=0.0001),
+ metrics=[keras.metrics.categorical_accuracy])
+ with self.assertRaisesRegexp(ValueError,
+ r'Layer #0 \(named \"d1\"\) expects 1 '
+ r'weight\(s\), but the saved weights have 2 '
+ r'element\(s\)\.'):
+ saving.load_weights_from_hdf5_group_by_name(f_model, model.layers)
+
+ def test_sequential_weight_loading_group_name_with_incorrect_shape(self):
+ if h5py is None:
+ return
+
+ temp_dir = self.get_temp_dir()
+ self.addCleanup(shutil.rmtree, temp_dir)
+ h5_path = os.path.join(temp_dir, 'test.h5')
+
+ num_hidden = 5
+ input_dim = 3
+ num_classes = 2
+ with self.test_session():
+ ref_model = keras.models.Sequential()
+ ref_model.add(keras.layers.Dense(num_hidden, input_dim=input_dim,
+ name='d1'))
+ ref_model.add(keras.layers.Dense(num_classes, name='d2'))
+ ref_model.compile(loss=keras.losses.MSE,
+ optimizer=keras.optimizers.RMSprop(lr=0.0001),
+ metrics=[keras.metrics.categorical_accuracy])
+
+ f_ref_model = h5py.File(h5_path, 'w')
+ saving.save_weights_to_hdf5_group(f_ref_model, ref_model.layers)
+
+ f_model = h5py.File(h5_path, 'r')
+ model = keras.models.Sequential()
+ model.add(keras.layers.Dense(num_hidden + 5, input_dim=input_dim,
+ name='d1'))
+ model.add(keras.layers.Dense(num_classes, name='d2'))
+ model.compile(loss=keras.losses.MSE,
+ optimizer=keras.optimizers.RMSprop(lr=0.0001),
+ metrics=[keras.metrics.categorical_accuracy])
+ with self.assertRaisesRegexp(ValueError,
+ r'Layer #0 \(named "d1"\), weight '
+ r'<tf\.Variable \'d1_1\/kernel:0\' '
+ r'shape=\(3, 10\) dtype=float32> has '
+ r'shape \(3, 10\), but the saved weight has '
+ r'shape \(3, 5\)\.'):
+ saving.load_weights_from_hdf5_group_by_name(f_model, model.layers)
+
class TestWholeModelSaving(test.TestCase):
diff --git a/tensorflow/python/keras/engine/sequential.py b/tensorflow/python/keras/engine/sequential.py
index 89b40b5d38..371504a503 100644
--- a/tensorflow/python/keras/engine/sequential.py
+++ b/tensorflow/python/keras/engine/sequential.py
@@ -29,6 +29,7 @@ from tensorflow.python.keras.engine.input_layer import InputLayer
from tensorflow.python.keras.engine.training import Model
from tensorflow.python.keras.utils import layer_utils
from tensorflow.python.platform import tf_logging as logging
+from tensorflow.python.training.checkpointable import base as checkpointable
from tensorflow.python.util.tf_export import tf_export
@@ -108,6 +109,7 @@ class Sequential(Model):
return self._layers[1:]
return self._layers
+ @checkpointable.no_automatic_dependency_tracking
def add(self, layer):
"""Adds a layer instance on top of the layer stack.
@@ -146,8 +148,6 @@ class Sequential(Model):
first_layer = layer.layers[0]
while isinstance(first_layer, (Model, Sequential)):
first_layer = first_layer.layers[0]
- batch_shape = first_layer._batch_input_shape
- dtype = first_layer.dtype
if hasattr(first_layer, '_batch_input_shape'):
batch_shape = first_layer._batch_input_shape
@@ -193,6 +193,7 @@ class Sequential(Model):
else:
self._layers.append(layer)
+ @checkpointable.no_automatic_dependency_tracking
def pop(self):
"""Removes the last layer in the model.
@@ -212,6 +213,7 @@ class Sequential(Model):
self.outputs = [self.layers[-1].output]
self.build()
+ @checkpointable.no_automatic_dependency_tracking
def build(self, input_shape=None):
if input_shape and not self.inputs:
batch_shape = tuple(input_shape)
diff --git a/tensorflow/python/keras/engine/training.py b/tensorflow/python/keras/engine/training.py
index fce6cbdb7a..bd03f4871f 100644
--- a/tensorflow/python/keras/engine/training.py
+++ b/tensorflow/python/keras/engine/training.py
@@ -42,6 +42,7 @@ from tensorflow.python.keras.utils.generic_utils import slice_arrays
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import optimizer as tf_optimizer_module
+from tensorflow.python.training.checkpointable import base as checkpointable
from tensorflow.python.util.tf_export import tf_export
@@ -115,6 +116,7 @@ class Model(Network):
# Create a cache for dataset - uninitialized iterators
self._dataset_iterator_cache = weakref.WeakKeyDictionary()
+ @checkpointable.no_automatic_dependency_tracking
def compile(self,
optimizer,
loss=None,
@@ -178,6 +180,11 @@ class Model(Network):
raise ValueError('Only TF native optimizers are supported in Eager mode.')
self.optimizer = optimizers.get(optimizer)
+ # We've disabled automatic dependency tracking for this method, but do want
+ # to add a checkpoint dependency on the optimizer if it's checkpointable.
+ if isinstance(self.optimizer, checkpointable.CheckpointableBase):
+ self._track_checkpointable(
+ self.optimizer, name='optimizer', overwrite=True)
self.loss = loss
self.metrics = metrics or []
self.loss_weights = loss_weights
@@ -592,7 +599,7 @@ class Model(Network):
# Unconditional updates
updates += self.get_updates_for(None)
# Conditional updates relevant to this model
- updates += self.get_updates_for(self._feed_inputs)
+ updates += self.get_updates_for(self.inputs)
# Stateful metrics updates
updates += self.metrics_updates
# Gets loss and metrics. Updates weights at each call.
@@ -941,6 +948,7 @@ class Model(Network):
str(x[0].shape[0]) + ' samples')
return x, y, sample_weights
+ @checkpointable.no_automatic_dependency_tracking
def _set_inputs(self, inputs, training=None):
"""Set model's input and output specs based on the input data received.
@@ -989,6 +997,7 @@ class Model(Network):
else:
self._symbolic_set_inputs(inputs, training=training)
+ @checkpointable.no_automatic_dependency_tracking
def _eager_set_inputs(self, inputs):
"""Set model's input and output specs based on the input data received.
@@ -1041,6 +1050,7 @@ class Model(Network):
'output_%d' % (i + 1) for i in range(len(dummy_output_values))]
self.built = True
+ @checkpointable.no_automatic_dependency_tracking
def _symbolic_set_inputs(self, inputs, outputs=None, training=None):
"""Set model's inputs and output specs based.
diff --git a/tensorflow/python/keras/engine/training_arrays.py b/tensorflow/python/keras/engine/training_arrays.py
index 281ad9bd50..adefffab11 100644
--- a/tensorflow/python/keras/engine/training_arrays.py
+++ b/tensorflow/python/keras/engine/training_arrays.py
@@ -124,6 +124,10 @@ def fit_loop(model,
callback_metrics = copy.copy(out_labels) + [
'val_' + n for n in out_labels
]
+ # need to create the test_function before start of the first epoch
+ # because TensorBoard callback on_epoch_begin adds summary to the
+ # list of fetches of the test_function
+ model._make_test_function()
else:
callback_metrics = copy.copy(out_labels)
@@ -156,7 +160,7 @@ def fit_loop(model,
callbacks.set_model(callback_model)
- callbacks.set_params({
+ callback_params = {
'batch_size': batch_size,
'epochs': epochs,
'steps': steps_per_epoch,
@@ -164,11 +168,17 @@ def fit_loop(model,
'verbose': verbose,
'do_validation': do_validation,
'metrics': callback_metrics or [],
- })
- callbacks.on_train_begin()
- callback_model.stop_training = False
+ }
+ if validation_steps:
+ callback_params.update({'validation_steps': validation_steps})
+ callbacks.set_params(callback_params)
+
for cbk in callbacks:
cbk.validation_data = val_ins
+ # validation_data must be set before on_train_begin() is called
+ # so that TensorboardCallback can validate its input
+ callbacks.on_train_begin()
+ callback_model.stop_training = False
# To prevent a slowdown, we find beforehand the arrays that need conversion.
feed = model._feed_inputs + model._feed_targets + model._feed_sample_weights
diff --git a/tensorflow/python/keras/engine/training_eager.py b/tensorflow/python/keras/engine/training_eager.py
index e8838cd3bc..397de42985 100644
--- a/tensorflow/python/keras/engine/training_eager.py
+++ b/tensorflow/python/keras/engine/training_eager.py
@@ -34,7 +34,6 @@ from tensorflow.python.keras import losses
from tensorflow.python.keras import metrics as metrics_module
from tensorflow.python.keras.engine import training_utils
from tensorflow.python.keras.utils import generic_utils
-from tensorflow.python.ops import array_ops
from tensorflow.python.platform import tf_logging as logging
@@ -194,7 +193,8 @@ def iterator_fit_loop(model,
callbacks=None,
callback_metrics=None,
validation_steps=None,
- do_validation=False):
+ do_validation=False,
+ batch_size=None):
"""Fit function for eager execution when input is given as dataset iterator.
Updates the given epoch logs.
@@ -224,16 +224,23 @@ def iterator_fit_loop(model,
validation_steps: Number of steps to run validation for (only if doing
validation from data tensors). Ignored with default value of `None`.
do_validation: Boolean value indicating whether we should do validation.
+ batch_size: int, val_inputs and val_targets will be evaled batch by
+ batch with size batch_size if they are array.
Raises:
ValueError: In case of mismatch between given number of inputs and
expectations of the model.
"""
assert isinstance(inputs, iterator_ops.EagerIterator)
+
+ # make sure either x,y or x,y,sample_weights is provided
+ if (not isinstance(inputs.output_shapes, (list, tuple)) or
+ len(inputs.output_shapes) not in (2, 3)):
+ raise ValueError('Please provide either inputs and targets'
+ 'or inputs, targets, and sample_weights')
+
for step_index in range(steps_per_epoch):
- batch_logs = {}
- batch_logs['batch'] = step_index
- batch_logs['size'] = 1
+ batch_logs = {'batch': step_index, 'size': 1}
callbacks.on_batch_begin(step_index, batch_logs)
# Get data from the iterator.
@@ -247,19 +254,21 @@ def iterator_fit_loop(model,
'batches (in this case, %d batches).' % steps_per_epoch * epochs)
break
- if not isinstance(next_element, (list, tuple)) or len(next_element) != 2:
- raise ValueError('Please provide data as a list or tuple of 2 elements '
- ' - input and target pair. Received %s' % next_element)
- x, y = next_element
+ if len(inputs.output_shapes) == 2:
+ x, y = next_element
+ sample_weights = None
+ else:
+ x, y, sample_weights = next_element
# Validate and standardize data.
x, y, sample_weights = model._standardize_user_data(
- x, y, class_weight=class_weight)
+ x, y, sample_weight=sample_weights, class_weight=class_weight)
x = training_utils.cast_if_floating_dtype(x)
y = training_utils.cast_if_floating_dtype(y)
if sample_weights:
sample_weights = [
- ops.convert_to_tensor(val, dtype=backend.floatx())
+ training_utils.cast_if_floating_dtype(
+ ops.convert_to_tensor(val, dtype=backend.floatx()))
if val is not None else None for val in sample_weights
]
@@ -307,122 +316,8 @@ def iterator_fit_loop(model,
val_targets,
sample_weights=val_sample_weights,
steps=validation_steps,
- verbose=0)
- if not isinstance(val_outs, list):
- val_outs = [val_outs]
- # Same labels assumed.
- for l, o in zip(out_labels, val_outs):
- epoch_logs['val_' + l] = o
-
-
-def batch_fit_loop(model,
- inputs,
- targets,
- epoch_logs,
- index_array,
- out_labels,
- callback_model,
- batch_size,
- sample_weights=None,
- val_inputs=None,
- val_targets=None,
- val_sample_weights=None,
- callbacks=None,
- shuffle=True,
- num_train_samples=None,
- do_validation=False):
- """Fit function for eager execution when input is given as arrays or tensors.
-
- Updates the given epoch logs.
-
- Arguments:
- model: Instance of the `Model`.
- inputs: List of input arrays.
- targets: List of target arrays.
- epoch_logs: Dictionary of logs from every epoch.
- index_array: Index array generated from number of training samples.
- out_labels: Output labels generated from model metric names.
- callback_model: Instance of `Model` to callback.
- batch_size: Integer batch size or None if unknown.
- sample_weights: Optional list of sample weight arrays.
- val_inputs: Input data for validation.
- val_targets: Target data for validation.
- val_sample_weights: Sample weight data for validation.
- callbacks: List of callbacks to be called during training.
- shuffle: Whether to shuffle the data at the beginning of each epoch.
- num_train_samples: Integer number of training samples.
- do_validation: Boolean value indicating whether we should do validation.
- """
- # TODO(psv): Create a dataset iterator instead of manually creating batches
- # here and in batch_test_loop, batch_predict_loop.
- if shuffle == 'batch':
- index_array = model._batch_shuffle(index_array, batch_size)
- elif shuffle:
- np.random.shuffle(index_array)
-
- batches = generic_utils.make_batches(num_train_samples, batch_size)
-
- for batch_index, (batch_start, batch_end) in enumerate(batches):
- batch_ids = index_array[batch_start:batch_end]
- inputs_batch = slice_arrays(inputs, batch_ids, contiguous=not shuffle)
- targets_batch = slice_arrays(targets, batch_ids, contiguous=not shuffle)
- if sample_weights:
- sample_weights_batch = slice_arrays(
- sample_weights, batch_ids, contiguous=not shuffle)
- else:
- sample_weights_batch = None
- batch_logs = {}
- batch_logs['batch'] = batch_index
- batch_logs['size'] = len(batch_ids)
-
- callbacks.on_batch_begin(batch_index, batch_logs)
-
- inputs_batch = [
- ops.convert_to_tensor(val, dtype=backend.floatx())
- for val in inputs_batch
- ]
- targets_batch = [
- ops.convert_to_tensor(val, dtype=backend.floatx())
- for val in targets_batch
- ]
- if sample_weights:
- sample_weights_batch = [
- ops.convert_to_tensor(val, dtype=backend.floatx())
- if val is not None else None for val in sample_weights_batch
- ]
-
- outs, loss, loss_metrics = _process_single_batch(
- model,
- inputs_batch,
- targets_batch,
- sample_weights=sample_weights_batch,
- training=True)
-
- if not isinstance(outs, list):
- outs = [outs]
-
- for l, o in zip(out_labels, outs):
- batch_logs[l] = o
- # Required for eager execution
- metrics_results = _eager_metrics_fn(model, outs, targets_batch)
- batch_logs['loss'] = tensor_util.constant_value(backend.mean(loss))
-
- for k, v in zip(model.metrics_names,
- [backend.mean(loss)] + loss_metrics + metrics_results):
- batch_logs[k] = tensor_util.constant_value(v)
- callbacks.on_batch_end(batch_index, batch_logs)
- if callback_model.stop_training:
- break
-
- if batch_index == len(batches) - 1: # Last batch.
- if do_validation:
- val_outs = test_loop(
- model,
- val_inputs,
- val_targets,
- sample_weights=val_sample_weights,
- batch_size=batch_size,
- verbose=0)
+ verbose=0,
+ batch_size=batch_size)
if not isinstance(val_outs, list):
val_outs = [val_outs]
# Same labels assumed.
@@ -451,6 +346,11 @@ def iterator_test_loop(model, inputs, steps, verbose=0):
expectations of the model.
"""
assert isinstance(inputs, iterator_ops.EagerIterator)
+ # make sure either x,y or x,y,sample_weights is provided
+ if (not isinstance(inputs.output_shapes, (list, tuple)) or
+ len(inputs.output_shapes) < 2 or len(inputs.output_shapes) > 3):
+ raise ValueError('Please provide either inputs and targets'
+ 'or inputs, targets, and sample_weights')
outs = []
num_samples = 0
if verbose == 1:
@@ -466,10 +366,11 @@ def iterator_test_loop(model, inputs, steps, verbose=0):
'(in this case, %d batches).', steps)
break
- if not isinstance(next_element, (list, tuple)) or len(next_element) != 2:
- raise ValueError('Please provide data as a list or tuple of 2 elements '
- ' - input and target pair. Received %s' % next_element)
- x, y = next_element
+ if len(inputs.output_shapes) == 2:
+ x, y = next_element
+ sample_weights = None
+ else:
+ x, y, sample_weights = next_element
# Validate and standardize data.
x, y, sample_weights = model._standardize_user_data(x, y)
@@ -512,94 +413,6 @@ def iterator_test_loop(model, inputs, steps, verbose=0):
return outs
-def batch_test_loop(model,
- inputs,
- targets,
- batch_size,
- sample_weights=None,
- verbose=0):
- """Test function for eager execution when input is given as arrays or tensors.
-
- Arguments:
- model: Model instance that is being evaluated in Eager mode.
- inputs: List of input arrays.
- targets: List of target arrays.
- batch_size: Integer batch size.
- sample_weights: Optional list of sample weight arrays.
- verbose: Verbosity mode.
-
- Returns:
- Scalar loss (if the model has a single output and no metrics)
- or list of scalars (if the model has multiple outputs
- and/or metrics). The attribute `model.metrics_names` will give you
- the display labels for the scalar outputs.
- """
- outs = []
- feed_data = inputs + targets
- if sample_weights:
- feed_data += sample_weights
- num_samples = training_utils.check_num_samples(
- feed_data, batch_size=batch_size)
- if verbose == 1:
- progbar = generic_utils.Progbar(target=num_samples)
- batches = generic_utils.make_batches(num_samples, batch_size)
- index_array = np.arange(num_samples)
- for batch_index, (batch_start, batch_end) in enumerate(batches):
- batch_ids = index_array[batch_start:batch_end]
- inputs_batch = slice_arrays(inputs, batch_ids)
- targets_batch = slice_arrays(targets, batch_ids)
- if sample_weights:
- sample_weights_batch = slice_arrays(sample_weights, batch_ids)
- else:
- sample_weights_batch = None
-
- inputs_batch = [
- ops.convert_to_tensor(val, dtype=backend.floatx())
- for val in inputs_batch
- ]
- targets_batch = [
- ops.convert_to_tensor(val, dtype=backend.floatx())
- for val in targets_batch
- ]
- if sample_weights:
- sample_weights_batch = [
- ops.convert_to_tensor(val, dtype=backend.floatx())
- if val is not None else None for val in sample_weights_batch
- ]
-
- loss_outs, loss, loss_metrics = _model_loss(
- model,
- inputs_batch,
- targets_batch,
- sample_weights=sample_weights_batch,
- training=False)
- metrics_results = _eager_metrics_fn(model, loss_outs, targets_batch)
- batch_outs = []
- for _, v in zip(model.metrics_names,
- [backend.mean(loss)] + loss_metrics + metrics_results):
- batch_outs.append(tensor_util.constant_value(v))
-
- if isinstance(batch_outs, list):
- if batch_index == 0:
- for _ in enumerate(batch_outs):
- outs.append(0.)
- for i, batch_out in enumerate(batch_outs):
- outs[i] += batch_out * len(batch_ids)
- else:
- if batch_index == 0:
- outs.append(0.)
- outs[0] += batch_outs * len(batch_ids)
-
- if verbose == 1:
- progbar.update(batch_end)
-
- for i in range(len(outs)):
- outs[i] /= num_samples
- if len(outs) == 1:
- return outs[0]
- return outs
-
-
def iterator_predict_loop(model, inputs, steps, verbose=0):
"""Predict function for eager execution when input is dataset iterator.
@@ -619,6 +432,12 @@ def iterator_predict_loop(model, inputs, steps, verbose=0):
expectations of the model.
"""
assert isinstance(inputs, iterator_ops.EagerIterator)
+ if not isinstance(inputs.output_shapes,
+ (list, tuple)) or len(inputs.output_shapes) > 2:
+ raise ValueError(
+ 'Please provide data as a list or tuple of 1 or 2 elements '
+ ' - input or input and target pair. Received %s. We do not use the '
+ '`target` value here.' % inputs.output_shapes)
outs = []
if verbose == 1:
progbar = generic_utils.Progbar(target=steps)
@@ -634,12 +453,8 @@ def iterator_predict_loop(model, inputs, steps, verbose=0):
'batches (in this case, %d batches).', steps)
break
- if not isinstance(next_element, (list, tuple)) or len(next_element) != 2:
- raise ValueError(
- 'Please provide data as a list or tuple of 2 elements '
- ' - input and target pair. Received %s. We do not use the '
- '`target` value here.' % next_element)
- x, _ = next_element
+ # expects a tuple, where first element of tuple represents inputs
+ x = next_element[0]
# Validate and standardize data.
x, _, _ = model._standardize_user_data(x)
@@ -670,99 +485,6 @@ def iterator_predict_loop(model, inputs, steps, verbose=0):
return outs
-def batch_predict_loop(model, inputs, batch_size, verbose=0):
- """Predict function for eager execution when input is arrays or tensors.
-
- Arguments:
- model: Instance of `Model`.
- inputs: List of input arrays.
- batch_size: Integer batch size.
- verbose: Verbosity mode.
-
- Returns:
- Array of predictions (if the model has a single output)
- or list of arrays of predictions (if the model has multiple outputs).
- """
- outs = []
- num_samples = training_utils.check_num_samples(inputs, batch_size)
- if verbose == 1:
- progbar = generic_utils.Progbar(target=num_samples)
- batches = generic_utils.make_batches(num_samples, batch_size)
- index_array = np.arange(num_samples)
- for batch_index, (batch_start, batch_end) in enumerate(batches):
- batch_ids = index_array[batch_start:batch_end]
- inputs_batch = slice_arrays(inputs, batch_ids)
-
- inputs_batch = [
- ops.convert_to_tensor(val, dtype=backend.floatx())
- for val in inputs_batch
- ]
-
- if len(inputs_batch) == 1:
- if model._expects_training_arg:
- batch_outs = model.call(inputs_batch[0], training=False)
- else:
- batch_outs = model.call(inputs_batch[0])
- else:
- if model._expects_training_arg:
- batch_outs = model.call(inputs_batch, training=False)
- else:
- batch_outs = model.call(inputs_batch)
-
- if not isinstance(batch_outs, list):
- batch_outs = [batch_outs]
- if batch_index == 0:
- # Pre-allocate the results arrays.
- for batch_out in batch_outs:
- dims = batch_out.shape[1:].dims
- dims_list = [d.value for d in dims]
- shape = (num_samples,) + tuple(dims_list)
- outs.append(np.zeros(shape, dtype=batch_out.dtype.as_numpy_dtype))
- for i, batch_out in enumerate(batch_outs):
- outs[i][batch_start:batch_end] = batch_out
- if verbose == 1:
- progbar.update(batch_end)
-
- if len(outs) == 1:
- return outs[0]
- return outs
-
-
-def slice_arrays(arrays, indices, contiguous=True):
- """Slices batches out of provided arrays (workaround for eager tensors).
-
- Unfortunately eager tensors don't have the same slicing behavior as
- Numpy arrays (they follow the same slicing behavior as symbolic TF tensors),
- hence we cannot use `generic_utils.slice_arrays` directly
- and we have to implement this workaround based on `concat`. This has a
- performance cost.
-
- Arguments:
- arrays: Single array or list of arrays.
- indices: List of indices in the array that should be included in the output
- batch.
- contiguous: Boolean flag indicating whether the indices are contiguous.
-
- Returns:
- Slice of data (either single array or list of arrays).
- """
- if any(tensor_util.is_tensor(x) for x in arrays):
- converted_to_list = False
- if not isinstance(arrays, list):
- converted_to_list = True
- arrays = [arrays]
- if not contiguous:
- entries = [[x[i:i + 1] for i in indices] for x in arrays]
- slices = [array_ops.concat(x, axis=0) for x in entries]
- else:
- slices = [x[indices[0]:indices[-1] + 1] for x in arrays]
- if converted_to_list:
- slices = slices[0]
- return slices
- else:
- return generic_utils.slice_arrays(arrays, indices)
-
-
def _process_single_batch(model,
inputs,
targets,
@@ -935,19 +657,24 @@ def fit_loop(model,
Raises:
ValueError: In case of invalid argument values.
"""
+ # Convert training inputs to an EagerIterator
+ inputs, steps_per_epoch = training_utils.convert_to_iterator(
+ x=inputs,
+ y=targets,
+ sample_weights=sample_weights,
+ batch_size=batch_size,
+ steps_per_epoch=steps_per_epoch,
+ epochs=epochs,
+ shuffle=shuffle)
# Required for eager execution
with backend.learning_phase_scope(1):
do_validation = False
if val_inputs:
do_validation = True
- if (steps_per_epoch is None and verbose and inputs and
- hasattr(inputs[0], 'shape') and hasattr(val_inputs[0], 'shape')):
- print('Train on %d samples, validate on %d samples' %
- (inputs[0].shape[0], val_inputs[0].shape[0]))
num_train_samples = None
out_labels = None
- if steps_per_epoch is None or model._is_compiled:
+ if model._is_compiled:
out_labels = model.metrics_names
if do_validation:
callback_metrics = copy.copy(out_labels) + [
@@ -956,28 +683,10 @@ def fit_loop(model,
else:
callback_metrics = copy.copy(out_labels)
- if steps_per_epoch is None:
- if sample_weights:
- feed_data = inputs + targets + sample_weights
- else:
- feed_data = inputs + targets
- num_train_samples = training_utils.check_num_samples(
- feed_data,
- batch_size=batch_size,
- steps=steps_per_epoch,
- steps_name='steps_per_epoch')
-
- if num_train_samples is not None:
- index_array = np.arange(num_train_samples)
-
model.history = cbks.History()
callbacks = [cbks.BaseLogger()] + (callbacks or []) + [model.history]
if verbose:
- if steps_per_epoch is not None:
- count_mode = 'steps'
- else:
- count_mode = 'samples'
- callbacks += [cbks.ProgbarLogger(count_mode)]
+ callbacks += [cbks.ProgbarLogger('steps')]
callbacks = cbks.CallbackList(callbacks)
# it's possible to callback a different model than self
@@ -989,7 +698,7 @@ def fit_loop(model,
callbacks.set_model(callback_model)
- callbacks.set_params({
+ callback_params = {
'batch_size': batch_size,
'epochs': epochs,
'steps': steps_per_epoch,
@@ -997,9 +706,11 @@ def fit_loop(model,
'verbose': verbose,
'do_validation': do_validation,
'metrics': callback_metrics or [],
- })
- callbacks.on_train_begin()
- callback_model.stop_training = False
+ }
+ if validation_steps:
+ callback_params.update({'validation_steps': validation_steps})
+ callbacks.set_params(callback_params)
+
for cbk in callbacks:
if not val_inputs:
cbk.validation_data = []
@@ -1009,47 +720,32 @@ def fit_loop(model,
cbk.validation_data = val_inputs + val_targets + val_sample_weights
else:
cbk.validation_data = val_inputs + val_targets
+ # validation_data must be set before on_train_begin() is called
+ # so that TensorboardCallback can validate its input
+ callbacks.on_train_begin()
+ callback_model.stop_training = False
for epoch in range(initial_epoch, epochs):
callbacks.on_epoch_begin(epoch)
epoch_logs = {}
-
- if steps_per_epoch is not None:
- iterator_fit_loop(
- model,
- inputs,
- class_weight,
- steps_per_epoch=steps_per_epoch,
- callback_model=callback_model,
- out_labels=out_labels,
- epoch_logs=epoch_logs,
- val_inputs=val_inputs,
- val_targets=val_targets,
- val_sample_weights=val_sample_weights,
- epochs=epochs,
- verbose=verbose,
- callbacks=callbacks,
- callback_metrics=callback_metrics,
- validation_steps=validation_steps,
- do_validation=do_validation)
- else:
- batch_fit_loop(
- model,
- inputs,
- targets,
- epoch_logs=epoch_logs,
- index_array=index_array,
- out_labels=out_labels,
- callback_model=callback_model,
- batch_size=batch_size,
- sample_weights=sample_weights,
- val_inputs=val_inputs,
- val_targets=val_targets,
- val_sample_weights=val_sample_weights,
- callbacks=callbacks,
- shuffle=shuffle,
- num_train_samples=num_train_samples,
- do_validation=do_validation)
+ iterator_fit_loop(
+ model,
+ inputs,
+ class_weight,
+ steps_per_epoch=steps_per_epoch,
+ callback_model=callback_model,
+ out_labels=out_labels,
+ epoch_logs=epoch_logs,
+ val_inputs=val_inputs,
+ val_targets=val_targets,
+ val_sample_weights=val_sample_weights,
+ epochs=epochs,
+ verbose=verbose,
+ callbacks=callbacks,
+ callback_metrics=callback_metrics,
+ validation_steps=validation_steps,
+ do_validation=do_validation,
+ batch_size=batch_size)
callbacks.on_epoch_end(epoch, epoch_logs)
if callback_model.stop_training:
break
@@ -1081,17 +777,14 @@ def test_loop(model, inputs, targets,
and/or metrics). The attribute `model.metrics_names` will give you
the display labels for the scalar outputs.
"""
+ inputs, steps = training_utils.convert_to_iterator(
+ x=inputs,
+ y=targets,
+ sample_weights=sample_weights,
+ batch_size=batch_size,
+ steps_per_epoch=steps)
with backend.learning_phase_scope(0):
- if steps is not None:
- return iterator_test_loop(model, inputs, steps, verbose=verbose)
- else:
- return batch_test_loop(
- model,
- inputs,
- targets,
- batch_size=batch_size,
- sample_weights=sample_weights,
- verbose=verbose)
+ return iterator_test_loop(model, inputs, steps, verbose=verbose)
def predict_loop(model, inputs,
@@ -1115,8 +808,6 @@ def predict_loop(model, inputs,
(if the model has multiple outputs).
"""
with backend.learning_phase_scope(0):
- if steps is not None:
- return iterator_predict_loop(model, inputs, steps, verbose=verbose)
- else:
- return batch_predict_loop(
- model, inputs, batch_size=batch_size, verbose=verbose)
+ inputs, steps = training_utils.convert_to_iterator(
+ x=inputs, batch_size=batch_size, steps_per_epoch=steps)
+ return iterator_predict_loop(model, inputs, steps, verbose=verbose)
diff --git a/tensorflow/python/keras/engine/training_generator.py b/tensorflow/python/keras/engine/training_generator.py
index d81b384f0e..432cf2bddd 100644
--- a/tensorflow/python/keras/engine/training_generator.py
+++ b/tensorflow/python/keras/engine/training_generator.py
@@ -96,14 +96,25 @@ def fit_generator(model,
else:
callback_model = model
callbacks.set_model(callback_model)
- callbacks.set_params({
+
+ callback_params = {
'epochs': epochs,
'steps': steps_per_epoch,
'verbose': verbose,
'do_validation': do_validation,
'metrics': callback_metrics,
- })
- callbacks.on_train_begin()
+ }
+ if do_validation:
+ # need to create the test_function before start of the first epoch
+ # because TensorBoard callback on_epoch_begin adds summary to the
+ # list of fetches of the test_function
+ model._make_test_function()
+ # determine the number of validation batches given a generator
+ if validation_steps:
+ callback_params.update({'validation_steps': validation_steps})
+ elif isinstance(validation_data, Sequence):
+ callback_params.update({'validation_steps': len(validation_data)})
+ callbacks.set_params(callback_params)
enqueuer = None
val_enqueuer = None
@@ -149,6 +160,9 @@ def fit_generator(model,
output_generator = generator
callback_model.stop_training = False
+ # validation_data must be set before on_train_begin() is called
+ # so that TensorboardCallback can validate its input
+ callbacks.on_train_begin()
# Construct epoch logs.
epoch_logs = {}
while epoch < epochs:
diff --git a/tensorflow/python/keras/engine/training_utils.py b/tensorflow/python/keras/engine/training_utils.py
index 728a2b493b..dbbc87daf9 100644
--- a/tensorflow/python/keras/engine/training_utils.py
+++ b/tensorflow/python/keras/engine/training_utils.py
@@ -19,9 +19,11 @@ from __future__ import division
from __future__ import print_function
import copy
+import math
import numpy as np
+from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.ops import iterator_ops
from tensorflow.python.eager import context
from tensorflow.python.framework import tensor_util
@@ -31,6 +33,135 @@ from tensorflow.python.keras import metrics as metrics_module
from tensorflow.python.ops import math_ops
+def _map_nested(data, func):
+ """Maps each nested element using func."""
+ if isinstance(data, list):
+ return [_map_nested(nested_data, func) for nested_data in data]
+ elif isinstance(data, tuple):
+ return tuple(_map_nested(nested_data, func) for nested_data in data)
+ elif isinstance(data, dict):
+ return {
+ k: _map_nested(nested_data, func) for k, nested_data in data.items()
+ }
+ else:
+ return func(data)
+
+
+def _nested_all(data, cond_func):
+ """Checks if all elements in a nested structure satisfy cond_func."""
+ if isinstance(data, (tuple, list)):
+ return all([_nested_all(nested_data, cond_func) for nested_data in data])
+ elif isinstance(data, dict):
+ return all(
+ [_nested_all(nested_data, cond_func) for nested_data in data.values()])
+ else:
+ return cond_func(data)
+
+
+def _nested_any(data, cond_func):
+ """Checks if any nested_elements in a nested structure satisfy cond_func."""
+ if isinstance(data, (tuple, list)):
+ return any([_nested_any(nested_data, cond_func) for nested_data in data])
+ elif isinstance(data, dict):
+ return any(
+ [_nested_any(nested_data, cond_func) for nested_data in data.values()])
+ else:
+ return cond_func(data)
+
+
+def _convert_lists_to_tuples(data):
+ """Converts all lists to tuples, since Datasets expect tuples."""
+ if isinstance(data, (tuple, list)):
+ return tuple(_convert_lists_to_tuples(nested_data) for nested_data in data)
+ elif isinstance(data, dict):
+ return {
+ k: _convert_lists_to_tuples(nested_data)
+ for k, nested_data in data.items()
+ }
+ else:
+ return data
+
+
+def _get_batch_axis_size(data):
+ """Returns batch axis shape for nested data."""
+ if isinstance(data, (tuple, list)):
+ return _get_batch_axis_size(data[0])
+ elif isinstance(data, dict):
+ return _get_batch_axis_size(list(data.values()))
+ else:
+ return int(data.shape[0])
+
+
+def convert_to_iterator(x=None,
+ y=None,
+ sample_weights=None,
+ batch_size=None,
+ steps_per_epoch=None,
+ epochs=1,
+ shuffle=False):
+ """Converts NumPy arrays or EagerTensors to an EagerIterator.
+
+ Combines all provided data into a single EagerIterator.
+
+ Arguments:
+ x: NumPy array or EagerTensor, or list of Numpy arrays or EagerTensors
+ representing inputs to a model.
+ y: Optional. NumPy array or EagerTensor, or list of Numpy arrays or
+ EagerTensors representing targets of a model.
+ sample_weights: Optional NumPy array or EagerTensor representing sample
+ weights.
+ batch_size: Used to batch data and calculate how many steps EagerIterator
+ should take per epoch.
+ steps_per_epoch: If provided, how many steps EagerIterator should take per
+ epoch.
+ epochs: Epochs to repeat iterator for.
+ shuffle: Whether to shuffle data after each epoch.
+
+ Raises:
+ ValueError: if steps_per_epoch cannot be calculated from the data
+ provided.
+
+ Returns:
+ (Iterator, steps_per_epoch).
+
+ """
+ if isinstance(x, iterator_ops.EagerIterator):
+ return x, steps_per_epoch
+
+ if not _nested_any(sample_weights, lambda x: x is None):
+ data = (x, y, sample_weights)
+ elif not _nested_any(y, lambda x: x is None):
+ data = (x, y)
+ else:
+ # always wrap in a tuple, so we know y, sample_weights weren't set
+ # even when x has multiple elements
+ data = (x,)
+
+ data = _convert_lists_to_tuples(data)
+ if steps_per_epoch is None and batch_size is not None:
+ num_samples = _get_batch_axis_size(data)
+ steps_per_epoch = int(math.ceil(num_samples / batch_size))
+
+ if steps_per_epoch is None:
+ raise ValueError('Could not determine steps_per_epoch.'
+ 'Please provide either batch_size or'
+ 'steps_per_epoch.')
+
+ # TODO(omalleyt) for NumPy arrays in graph mode
+ # placeholder ops should be used
+ # this is only ideal for eager mode
+ dataset = dataset_ops.Dataset.from_tensor_slices(data)
+
+ if batch_size is not None:
+ dataset = dataset.batch(batch_size)
+ if shuffle:
+ dataset = dataset.shuffle(buffer_size=10000)
+ dataset = dataset.repeat(epochs)
+ iterator = dataset.make_one_shot_iterator()
+
+ return iterator, steps_per_epoch
+
+
def check_num_samples(ins,
batch_size=None,
steps=None,
@@ -128,8 +259,8 @@ def standardize_input_data(data,
except KeyError as e:
raise ValueError('No data provided for "' + e.args[0] + '". Need data '
'for each key in: ' + str(names))
- elif isinstance(data, list):
- if isinstance(data[0], list):
+ elif isinstance(data, (list, tuple)):
+ if isinstance(data[0], (list, tuple)):
data = [np.asarray(d) for d in data]
elif len(names) == 1 and isinstance(data[0], (float, int)):
data = [np.asarray(data)]
@@ -482,6 +613,9 @@ def standardize_weights(y,
Raises:
ValueError: In case of invalid user-provided arguments.
"""
+ # Iterator may return sample_weight as 1-tuple
+ if isinstance(sample_weight, tuple):
+ sample_weight = sample_weight[0]
if sample_weight_mode is not None:
if sample_weight_mode != 'temporal':
raise ValueError('"sample_weight_mode '
diff --git a/tensorflow/python/keras/engine/training_utils_test.py b/tensorflow/python/keras/engine/training_utils_test.py
new file mode 100644
index 0000000000..297a1ae494
--- /dev/null
+++ b/tensorflow/python/keras/engine/training_utils_test.py
@@ -0,0 +1,150 @@
+# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""Tests for training utility functions."""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import numpy as np
+
+from tensorflow.python.framework import ops
+from tensorflow.python.framework import test_util
+from tensorflow.python.keras.engine import training_utils
+from tensorflow.python.platform import test
+
+
+class TrainingUtilTest(test.TestCase):
+
+ @test_util.run_in_graph_and_eager_modes
+ def test_convert_to_iterator_single_numpy(self):
+ batch_size = 2
+ a = np.ones([10, 10])
+ iterator, steps_per_epoch = training_utils.convert_to_iterator(
+ x=a, batch_size=batch_size)
+ self.assertEquals(steps_per_epoch, 5)
+
+ expected_batch = a[:batch_size, :]
+ actual_batch, = iterator.get_next()
+ self.assertAllEqual(expected_batch, actual_batch)
+
+ @test_util.run_in_graph_and_eager_modes
+ def test_convert_to_iterator_single_tensor(self):
+ batch_size = 2
+ a = ops.convert_to_tensor(np.ones([10, 10]))
+ iterator, steps_per_epoch = training_utils.convert_to_iterator(
+ x=a, batch_size=batch_size)
+ self.assertEquals(steps_per_epoch, 5)
+
+ expected_batch = a[:batch_size, :]
+ actual_batch, = iterator.get_next()
+ self.assertAllEqual(expected_batch, actual_batch)
+
+ @test_util.run_in_graph_and_eager_modes
+ def test_convert_to_iterator_y(self):
+ batch_size = 2
+ a = np.ones([10, 100])
+ b = np.ones([10, 10])
+ iterator, steps_per_epoch = training_utils.convert_to_iterator(
+ x=a, y=b, batch_size=batch_size)
+ self.assertEquals(steps_per_epoch, 5)
+
+ expected_x = a[:batch_size, :]
+ expected_y = b[:batch_size, :]
+ actual_x, actual_y = iterator.get_next()
+ self.assertAllEqual(expected_x, actual_x)
+ self.assertAllEqual(expected_y, actual_y)
+
+ @test_util.run_in_graph_and_eager_modes
+ def test_convert_to_iterator_sample_weights(self):
+ batch_size = 2
+ a = ops.convert_to_tensor(np.ones([10, 100]))
+ b = ops.convert_to_tensor(np.ones([10, 10]))
+ sw = ops.convert_to_tensor(np.ones([10]))
+ iterator, steps_per_epoch = training_utils.convert_to_iterator(
+ x=a, y=b, sample_weights=sw, batch_size=batch_size)
+ self.assertEquals(steps_per_epoch, 5)
+
+ expected_x = a[:batch_size, :]
+ expected_y = b[:batch_size, :]
+ expected_sw = sw[:batch_size]
+ actual_x, actual_y, actual_sw = iterator.get_next()
+ self.assertAllEqual(expected_x, actual_x)
+ self.assertAllEqual(expected_y, actual_y)
+ self.assertAllEqual(expected_sw, actual_sw)
+
+ @test_util.run_in_graph_and_eager_modes
+ def test_convert_to_iterator_nested(self):
+ batch_size = 2
+ x = {'1': np.ones([10, 100]), '2': [np.zeros([10, 10]), np.ones([10, 20])]}
+ iterator, steps_per_epoch = training_utils.convert_to_iterator(
+ x=x, batch_size=batch_size)
+ self.assertEquals(steps_per_epoch, 5)
+
+ expected_x1 = x['1'][:batch_size, :]
+ expected_x2_0 = x['2'][0][:batch_size, :]
+ expected_x2_1 = x['2'][1][:batch_size, :]
+
+ actual_x, = iterator.get_next()
+ actual_x1 = actual_x['1'][:batch_size, :]
+ actual_x2_0 = actual_x['2'][0][:batch_size, :]
+ actual_x2_1 = actual_x['2'][1][:batch_size, :]
+
+ self.assertAllEqual(expected_x1, actual_x1)
+ self.assertAllEqual(expected_x2_0, actual_x2_0)
+ self.assertAllEqual(expected_x2_1, actual_x2_1)
+
+ @test_util.run_in_graph_and_eager_modes
+ def test_convert_to_iterator_epochs(self):
+ batch_size = 2
+ a = np.ones([10, 10])
+ iterator, steps_per_epoch = training_utils.convert_to_iterator(
+ x=a, batch_size=batch_size, epochs=2)
+ self.assertEquals(steps_per_epoch, 5)
+
+ expected_batch = a[:batch_size, :]
+ # loop through one whole epoch
+ for _ in range(6):
+ actual_batch, = iterator.get_next()
+ self.assertAllEqual(expected_batch, actual_batch)
+
+ @test_util.run_in_graph_and_eager_modes
+ def test_convert_to_iterator_insufficient_info(self):
+ # with batch_size and steps_per_epoch not set
+ with self.assertRaises(ValueError):
+ a = np.ones([10, 10])
+ _ = training_utils.convert_to_iterator(x=a)
+
+ def test_nested_all(self):
+ nested_data = {'a': True, 'b': [True, True, (False, True)]}
+ all_true = training_utils._nested_all(nested_data, lambda x: x)
+ self.assertEquals(all_true, False)
+
+ nested_data = {'a': True, 'b': [True, True, (True, True)]}
+ all_true = training_utils._nested_all(nested_data, lambda x: x)
+ self.assertEquals(all_true, True)
+
+ def test_nested_any(self):
+ nested_data = [False, {'a': False, 'b': (False, True)}]
+ any_true = training_utils._nested_any(nested_data, lambda x: x)
+ self.assertEquals(any_true, True)
+
+ nested_data = [False, {'a': False, 'b': (False, False)}]
+ any_true = training_utils._nested_any(nested_data, lambda x: x)
+ self.assertEquals(any_true, False)
+
+
+if __name__ == '__main__':
+ test.main()
diff --git a/tensorflow/python/keras/estimator/__init__.py b/tensorflow/python/keras/estimator/__init__.py
index cb86a69990..b244beb5b5 100644
--- a/tensorflow/python/keras/estimator/__init__.py
+++ b/tensorflow/python/keras/estimator/__init__.py
@@ -25,7 +25,7 @@ from tensorflow.python.util.tf_export import tf_export
# everything will work as normal.
try:
- import tensorflow.python.estimator.keras as keras_lib # pylint: disable=g-import-not-at-top
+ from tensorflow.python.estimator import keras as keras_lib # pylint: disable=g-import-not-at-top
model_to_estimator = tf_export('keras.estimator.model_to_estimator')(
keras_lib.model_to_estimator)
except Exception: # pylint: disable=broad-except
diff --git a/tensorflow/python/keras/initializers.py b/tensorflow/python/keras/initializers.py
index b9b2e9ad59..28beb6760d 100644
--- a/tensorflow/python/keras/initializers.py
+++ b/tensorflow/python/keras/initializers.py
@@ -23,6 +23,9 @@ import six
from tensorflow.python.keras.utils.generic_utils import deserialize_keras_object
from tensorflow.python.keras.utils.generic_utils import serialize_keras_object
from tensorflow.python.ops.init_ops import Constant
+from tensorflow.python.ops.init_ops import glorot_normal_initializer
+from tensorflow.python.ops.init_ops import glorot_uniform_initializer
+
from tensorflow.python.ops.init_ops import Identity
from tensorflow.python.ops.init_ops import Initializer # pylint: disable=unused-import
from tensorflow.python.ops.init_ops import Ones
@@ -80,52 +83,6 @@ def lecun_uniform(seed=None):
scale=1., mode='fan_in', distribution='uniform', seed=seed)
-@tf_export('keras.initializers.glorot_normal')
-def glorot_normal(seed=None):
- """Glorot normal initializer, also called Xavier normal initializer.
-
- It draws samples from a truncated normal distribution centered on 0
- with `stddev = sqrt(2 / (fan_in + fan_out))`
- where `fan_in` is the number of input units in the weight tensor
- and `fan_out` is the number of output units in the weight tensor.
-
- Arguments:
- seed: A Python integer. Used to seed the random generator.
-
- Returns:
- An initializer.
-
- References:
- Glorot & Bengio, AISTATS 2010
- http://jmlr.org/proceedings/papers/v9/glorot10a/glorot10a.pdf
- """
- return VarianceScaling(
- scale=1., mode='fan_avg', distribution='normal', seed=seed)
-
-
-@tf_export('keras.initializers.glorot_uniform')
-def glorot_uniform(seed=None):
- """Glorot uniform initializer, also called Xavier uniform initializer.
-
- It draws samples from a uniform distribution within [-limit, limit]
- where `limit` is `sqrt(6 / (fan_in + fan_out))`
- where `fan_in` is the number of input units in the weight tensor
- and `fan_out` is the number of output units in the weight tensor.
-
- Arguments:
- seed: A Python integer. Used to seed the random generator.
-
- Returns:
- An initializer.
-
- References:
- Glorot & Bengio, AISTATS 2010
- http://jmlr.org/proceedings/papers/v9/glorot10a/glorot10a.pdf
- """
- return VarianceScaling(
- scale=1., mode='fan_avg', distribution='uniform', seed=seed)
-
-
@tf_export('keras.initializers.he_normal')
def he_normal(seed=None):
"""He normal initializer.
@@ -179,6 +136,8 @@ normal = random_normal = RandomNormal
truncated_normal = TruncatedNormal
identity = Identity
orthogonal = Orthogonal
+glorot_normal = glorot_normal_initializer
+glorot_uniform = glorot_uniform_initializer
# pylint: enable=invalid-name
diff --git a/tensorflow/python/keras/layers/convolutional_recurrent.py b/tensorflow/python/keras/layers/convolutional_recurrent.py
index 84d794cada..e61dd3043d 100644
--- a/tensorflow/python/keras/layers/convolutional_recurrent.py
+++ b/tensorflow/python/keras/layers/convolutional_recurrent.py
@@ -788,7 +788,7 @@ class ConvLSTM2D(ConvRNN2D):
Arguments:
filters: Integer, the dimensionality of the output space
- (i.e. the number output of filters in the convolution).
+ (i.e. the number of output filters in the convolution).
kernel_size: An integer or tuple/list of n integers, specifying the
dimensions of the convolution window.
strides: An integer or tuple/list of n integers,
diff --git a/tensorflow/python/keras/layers/core.py b/tensorflow/python/keras/layers/core.py
index 2bf6229ccb..f28cade474 100644
--- a/tensorflow/python/keras/layers/core.py
+++ b/tensorflow/python/keras/layers/core.py
@@ -26,6 +26,7 @@ import warnings
import numpy as np
from tensorflow.python.eager import context
+from tensorflow.python.framework import common_shapes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.keras import activations
@@ -929,13 +930,13 @@ class Dense(Layer):
def call(self, inputs):
inputs = ops.convert_to_tensor(inputs, dtype=self.dtype)
- shape = inputs.get_shape().as_list()
- if len(shape) > 2:
+ rank = common_shapes.rank(inputs)
+ if rank > 2:
# Broadcasting is required for the inputs.
- outputs = standard_ops.tensordot(inputs, self.kernel, [[len(shape) - 1],
- [0]])
+ outputs = standard_ops.tensordot(inputs, self.kernel, [[rank - 1], [0]])
# Reshape the output back to the original ndim of the input.
if not context.executing_eagerly():
+ shape = inputs.get_shape().as_list()
output_shape = shape[:-1] + [self.units]
outputs.set_shape(output_shape)
else:
diff --git a/tensorflow/python/keras/layers/cudnn_recurrent_test.py b/tensorflow/python/keras/layers/cudnn_recurrent_test.py
index f1ee441f5f..8fd970239f 100644
--- a/tensorflow/python/keras/layers/cudnn_recurrent_test.py
+++ b/tensorflow/python/keras/layers/cudnn_recurrent_test.py
@@ -18,6 +18,8 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
+import os
+import tempfile
from absl.testing import parameterized
import numpy as np
@@ -217,27 +219,14 @@ class CuDNNTest(test.TestCase, parameterized.TestCase):
out5 = model.predict(np.ones((num_samples, timesteps)))
self.assertNotEqual(out4.max(), out5.max())
- # TODO(psv): Add generic cross product helper function for parametrized tests.
@parameterized.named_parameters(
- ('cudnnlstm_to_lstm_unidirectional_impl_1', 'LSTM', False, False, 1),
- ('cudnnlstm_to_lstm_bidirectional_impl_1', 'LSTM', False, True, 1),
- ('lstm_to_cudnnlstm_unidirectional_impl_1', 'LSTM', True, False, 1),
- ('lstm_to_cudnnlstm_bidirectional_impl_1', 'LSTM', True, True, 1),
- ('cudnngru_to_gru_unidirectional_impl_1', 'GRU', False, False, 1),
- ('cudnngru_to_gru_bidirectional_impl_1', 'GRU', False, True, 1),
- ('gru_to_cudnngru_unidirectional_impl_1', 'GRU', True, False, 1),
- ('gru_to_cudnngru_bidirectional_impl_1', 'GRU', True, True, 1),
- ('cudnnlstm_to_lstm_unidirectional_impl_2', 'LSTM', False, False, 2),
- ('cudnnlstm_to_lstm_bidirectional_impl_2', 'LSTM', False, True, 2),
- ('lstm_to_cudnnlstm_unidirectional_impl_2', 'LSTM', True, False, 2),
- ('lstm_to_cudnnlstm_bidirectional_impl_2', 'LSTM', True, True, 2),
- ('cudnngru_to_gru_unidirectional_impl_2', 'GRU', False, False, 2),
- ('cudnngru_to_gru_bidirectional_impl_2', 'GRU', False, True, 2),
- ('gru_to_cudnngru_unidirectional_impl_2', 'GRU', True, False, 2),
- ('gru_to_cudnngru_bidirectional_impl_2', 'GRU', True, True, 2),
- )
+ *testing_utils.generate_combinations_with_testcase_name(
+ rnn_type=['LSTM', 'GRU'], to_cudnn=[True, False],
+ bidirectional=[True, False], implementation=[1, 2],
+ model_nest_level=[1, 2], model_type=['seq', 'func']))
def test_load_weights_between_noncudnn_rnn(self, rnn_type, to_cudnn,
- bidirectional, implementation):
+ bidirectional, implementation,
+ model_nest_level, model_type):
if test.is_gpu_available(cuda_only=True):
with self.test_session(use_gpu=True):
input_size = 10
@@ -261,14 +250,6 @@ class CuDNNTest(test.TestCase, parameterized.TestCase):
cudnn_rnn_layer_class = keras.layers.CuDNNGRU
rnn_layer_kwargs['reset_after'] = True
- def convert_weights(source_layer, target_layer):
- weights = source_layer.get_weights()
- weights = keras.engine.saving.preprocess_weights_for_loading(
- target_layer, weights)
- target_layer.set_weights(weights)
-
- input_layer = keras.layers.InputLayer(input_shape)
-
layer = rnn_layer_class(units, **rnn_layer_kwargs)
if bidirectional:
layer = keras.layers.Bidirectional(layer)
@@ -277,16 +258,94 @@ class CuDNNTest(test.TestCase, parameterized.TestCase):
if bidirectional:
cudnn_layer = keras.layers.Bidirectional(cudnn_layer)
- model = keras.models.Sequential([input_layer, layer])
- cudnn_model = keras.models.Sequential([input_layer, cudnn_layer])
+ model = self._make_nested_model(input_shape, layer, model_nest_level,
+ model_type)
+ cudnn_model = self._make_nested_model(input_shape, cudnn_layer,
+ model_nest_level, model_type)
+
+ if to_cudnn:
+ self._convert_model_weights(model, cudnn_model)
+ else:
+ self._convert_model_weights(cudnn_model, model)
+
+ self.assertAllClose(model.predict(inputs), cudnn_model.predict(inputs),
+ atol=1e-4)
+
+ def _make_nested_model(self, input_shape, layer, level=1, model_type='func'):
+ # example: make_nested_seq_model((1,), Dense(10), level=2).summary()
+ def make_nested_seq_model(input_shape, layer, level=1):
+ model = layer
+ for i in range(1, level + 1):
+ layers = [keras.layers.InputLayer(input_shape),
+ model] if (i == 1) else [model]
+ model = keras.models.Sequential(layers)
+ return model
+
+ # example: make_nested_func_model((1,), Dense(10), level=2).summary()
+ def make_nested_func_model(input_shape, layer, level=1):
+ model_input = keras.layers.Input(input_shape)
+ model = layer
+ for _ in range(level):
+ model = keras.models.Model(model_input, model(model_input))
+ return model
+
+ if model_type == 'func':
+ return make_nested_func_model(input_shape, layer, level)
+ elif model_type == 'seq':
+ return make_nested_seq_model(input_shape, layer, level)
+
+ def _convert_model_weights(self, source_model, target_model):
+ _, fname = tempfile.mkstemp('.h5')
+ source_model.save_weights(fname)
+ target_model.load_weights(fname)
+ os.remove(fname)
+
+ @parameterized.named_parameters(
+ *testing_utils.generate_combinations_with_testcase_name(
+ rnn_type=['LSTM', 'GRU'], to_cudnn=[True, False]))
+ def test_load_weights_between_noncudnn_rnn_time_distributed(self, rnn_type,
+ to_cudnn):
+ # Similar test as test_load_weights_between_noncudnn_rnn() but has different
+ # rank of input due to usage of TimeDistributed. Issue: #10356.
+ if test.is_gpu_available(cuda_only=True):
+ with self.test_session(use_gpu=True):
+ input_size = 10
+ steps = 6
+ timesteps = 6
+ input_shape = (timesteps, steps, input_size)
+ units = 2
+ num_samples = 32
+ inputs = np.random.random((num_samples, timesteps, steps, input_size))
+
+ rnn_layer_kwargs = {
+ 'recurrent_activation': 'sigmoid',
+ # ensure biases are non-zero and properly converted
+ 'bias_initializer': 'random_uniform',
+ }
+ if rnn_type == 'LSTM':
+ rnn_layer_class = keras.layers.LSTM
+ cudnn_rnn_layer_class = keras.layers.CuDNNLSTM
+ else:
+ rnn_layer_class = keras.layers.GRU
+ cudnn_rnn_layer_class = keras.layers.CuDNNGRU
+ rnn_layer_kwargs['reset_after'] = True
+
+ layer = rnn_layer_class(units, **rnn_layer_kwargs)
+ layer = keras.layers.TimeDistributed(layer)
+
+ cudnn_layer = cudnn_rnn_layer_class(units)
+ cudnn_layer = keras.layers.TimeDistributed(cudnn_layer)
+
+ model = self._make_nested_model(input_shape, layer)
+ cudnn_model = self._make_nested_model(input_shape, cudnn_layer)
if to_cudnn:
- convert_weights(layer, cudnn_layer)
+ self._convert_model_weights(model, cudnn_model)
else:
- convert_weights(cudnn_layer, layer)
+ self._convert_model_weights(cudnn_model, model)
- self.assertAllClose(
- model.predict(inputs), cudnn_model.predict(inputs), atol=1e-4)
+ self.assertAllClose(model.predict(inputs), cudnn_model.predict(inputs),
+ atol=1e-4)
@test_util.run_in_graph_and_eager_modes
def test_cudnnrnn_bidirectional(self):
diff --git a/tensorflow/python/keras/layers/embeddings.py b/tensorflow/python/keras/layers/embeddings.py
index 910fff720f..629a9ec9a1 100644
--- a/tensorflow/python/keras/layers/embeddings.py
+++ b/tensorflow/python/keras/layers/embeddings.py
@@ -112,6 +112,7 @@ class Embedding(Layer):
self.activity_regularizer = regularizers.get(activity_regularizer)
self.embeddings_constraint = constraints.get(embeddings_constraint)
self.mask_zero = mask_zero
+ self.supports_masking = mask_zero
self.input_length = input_length
@tf_utils.shape_type_conversion
@@ -127,8 +128,8 @@ class Embedding(Layer):
def compute_mask(self, inputs, mask=None):
if not self.mask_zero:
return None
- else:
- return math_ops.not_equal(inputs, 0)
+
+ return math_ops.not_equal(inputs, 0)
@tf_utils.shape_type_conversion
def compute_output_shape(self, input_shape):
diff --git a/tensorflow/python/keras/layers/normalization.py b/tensorflow/python/keras/layers/normalization.py
index d4c213eedd..a7835bc0a2 100644
--- a/tensorflow/python/keras/layers/normalization.py
+++ b/tensorflow/python/keras/layers/normalization.py
@@ -34,6 +34,7 @@ from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import state_ops
+from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import distribute as distribute_lib
from tensorflow.python.util.tf_export import tf_export
@@ -180,11 +181,6 @@ class BatchNormalization(Layer):
self.renorm_clipping = renorm_clipping
self.renorm_momentum = renorm_momentum
- def _add_tower_local_variable(self, *args, **kwargs):
- tower_context = distribute_lib.get_tower_context()
- with tower_context.tower_local_var_scope('mean'):
- return self.add_weight(*args, **kwargs)
-
def build(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape)
if not input_shape.ndims:
@@ -312,19 +308,23 @@ class BatchNormalization(Layer):
self._scope.set_partitioner(None)
else:
partitioner = None
- self.moving_mean = self._add_tower_local_variable(
+ self.moving_mean = self.add_weight(
name='moving_mean',
shape=param_shape,
dtype=param_dtype,
initializer=self.moving_mean_initializer,
- trainable=False)
+ synchronization=variable_scope.VariableSynchronization.ON_READ,
+ trainable=False,
+ aggregation=variable_scope.VariableAggregation.MEAN)
- self.moving_variance = self._add_tower_local_variable(
+ self.moving_variance = self.add_weight(
name='moving_variance',
shape=param_shape,
dtype=param_dtype,
initializer=self.moving_variance_initializer,
- trainable=False)
+ synchronization=variable_scope.VariableSynchronization.ON_READ,
+ trainable=False,
+ aggregation=variable_scope.VariableAggregation.MEAN)
if self.renorm:
# Create variables to maintain the moving mean and standard deviation.
@@ -335,12 +335,14 @@ class BatchNormalization(Layer):
# stack to be cleared. The nested ones use a `lambda` to set the desired
# device and ignore any devices that may be set by the custom getter.
def _renorm_variable(name, shape):
- var = self._add_tower_local_variable(
+ var = self.add_weight(
name=name,
shape=shape,
dtype=param_dtype,
initializer=init_ops.zeros_initializer(),
- trainable=False)
+ synchronization=variable_scope.VariableSynchronization.ON_READ,
+ trainable=False,
+ aggregation=variable_scope.VariableAggregation.MEAN)
return var
with distribute_lib.get_distribution_strategy().colocate_vars_with(
@@ -368,7 +370,7 @@ class BatchNormalization(Layer):
decay = ops.convert_to_tensor(1.0 - momentum, name='decay')
if decay.dtype != variable.dtype.base_dtype:
decay = math_ops.cast(decay, variable.dtype.base_dtype)
- update_delta = (variable - value) * decay
+ update_delta = (variable - math_ops.cast(value, variable.dtype)) * decay
return state_ops.assign_sub(variable, update_delta, name=scope)
def _fused_batch_norm(self, inputs, training):
@@ -617,6 +619,10 @@ class BatchNormalization(Layer):
else:
mean, variance = self.moving_mean, self.moving_variance
+ mean = math_ops.cast(mean, inputs.dtype)
+ variance = math_ops.cast(variance, inputs.dtype)
+ if offset is not None:
+ offset = math_ops.cast(offset, inputs.dtype)
outputs = nn.batch_normalization(inputs,
_broadcast(mean),
_broadcast(variance),
diff --git a/tensorflow/python/keras/layers/normalization_test.py b/tensorflow/python/keras/layers/normalization_test.py
index b22f3bd152..a97b4cac46 100644
--- a/tensorflow/python/keras/layers/normalization_test.py
+++ b/tensorflow/python/keras/layers/normalization_test.py
@@ -95,6 +95,24 @@ class NormalizationLayersTest(test.TestCase):
np.testing.assert_allclose(out.mean(), 0.0, atol=1e-1)
np.testing.assert_allclose(out.std(), 1.0, atol=1e-1)
+ def test_batchnorm_mixed_precision(self):
+ with self.test_session():
+ model = keras.models.Sequential()
+ norm = keras.layers.BatchNormalization(input_shape=(10,), momentum=0.8)
+ model.add(norm)
+ model.compile(loss='mse', optimizer='sgd')
+
+ # centered on 5.0, variance 10.0
+ x = np.random.normal(
+ loc=5.0, scale=10.0, size=(1000, 10)).astype(np.float16)
+ model.fit(x, x, epochs=4, verbose=0)
+ out = model.predict(x)
+ out -= keras.backend.eval(norm.beta)
+ out /= keras.backend.eval(norm.gamma)
+
+ np.testing.assert_allclose(out.mean(), 0.0, atol=1e-1)
+ np.testing.assert_allclose(out.std(), 1.0, atol=1e-1)
+
def test_batchnorm_convnet(self):
if test.is_gpu_available(cuda_only=True):
with self.test_session(use_gpu=True):
diff --git a/tensorflow/python/keras/layers/recurrent.py b/tensorflow/python/keras/layers/recurrent.py
index 32d25c5a65..534c0eca08 100644
--- a/tensorflow/python/keras/layers/recurrent.py
+++ b/tensorflow/python/keras/layers/recurrent.py
@@ -37,6 +37,7 @@ from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.platform import tf_logging as logging
+from tensorflow.python.training.checkpointable import base as checkpointable
from tensorflow.python.util.tf_export import tf_export
@@ -235,7 +236,8 @@ class RNN(Layer):
"""Base class for recurrent layers.
Arguments:
- cell: A RNN cell instance. A RNN cell is a class that has:
+ cell: A RNN cell instance or a list of RNN cell instances.
+ A RNN cell is a class that has:
- a `call(input_at_t, states_at_t)` method, returning
`(output_at_t, states_at_t_plus_1)`. The call method of the
cell can also take the optional argument `constants`, see
@@ -248,9 +250,9 @@ class RNN(Layer):
(one size per state). In this case, the first entry
(`state_size[0]`) should be the same as
the size of the cell output.
- It is also possible for `cell` to be a list of RNN cell instances,
- in which cases the cells get stacked on after the other in the RNN,
- implementing an efficient stacked RNN.
+ In the case that `cell` is a list of RNN cell instances, the cells
+ will be stacked on after the other in the RNN, implementing an
+ efficient stacked RNN.
return_sequences: Boolean. Whether to return the last output
in the output sequence, or the full sequence.
return_state: Boolean. Whether to return the last state
@@ -402,6 +404,8 @@ class RNN(Layer):
'one integer per RNN state).')
super(RNN, self).__init__(**kwargs)
self.cell = cell
+ if isinstance(cell, checkpointable.CheckpointableBase):
+ self._track_checkpointable(self.cell, name='cell')
self.return_sequences = return_sequences
self.return_state = return_state
self.go_backwards = go_backwards
diff --git a/tensorflow/python/keras/layers/recurrent_test.py b/tensorflow/python/keras/layers/recurrent_test.py
index 802374d2d2..fefb92826b 100644
--- a/tensorflow/python/keras/layers/recurrent_test.py
+++ b/tensorflow/python/keras/layers/recurrent_test.py
@@ -28,6 +28,7 @@ from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.platform import test
+from tensorflow.python.training.checkpointable import util as checkpointable_util
class RNNTest(test.TestCase):
@@ -556,5 +557,22 @@ class RNNTest(test.TestCase):
[tuple(o.as_list()) for o in output_shape],
expected_output_shape)
+ def test_checkpointable_dependencies(self):
+ rnn = keras.layers.SimpleRNN
+ with self.test_session():
+ x = np.random.random((2, 2, 2))
+ y = np.random.random((2, 2))
+ model = keras.models.Sequential()
+ model.add(rnn(2))
+ model.compile(optimizer='rmsprop', loss='mse')
+ model.fit(x, y, epochs=1, batch_size=1)
+
+ # check whether the model variables are present in the
+ # checkpointable list of objects
+ checkpointed_objects = set(checkpointable_util.list_objects(model))
+ for v in model.variables:
+ self.assertIn(v, checkpointed_objects)
+
+
if __name__ == '__main__':
test.main()
diff --git a/tensorflow/python/keras/layers/wrappers.py b/tensorflow/python/keras/layers/wrappers.py
index 22e1cf0b36..f0c1e76156 100644
--- a/tensorflow/python/keras/layers/wrappers.py
+++ b/tensorflow/python/keras/layers/wrappers.py
@@ -47,7 +47,6 @@ class Wrapper(Layer):
def __init__(self, layer, **kwargs):
assert isinstance(layer, Layer)
self.layer = layer
- self._track_checkpointable(layer, name='layer')
# Tracks mapping of Wrapper inputs to inner layer inputs. Useful when
# the inner layer has update ops that depend on its inputs (as opposed
# to the inputs to the Wrapper layer).
@@ -168,6 +167,39 @@ class TimeDistributed(Wrapper):
'`Layer` instance. You passed: {input}'.format(input=layer))
super(TimeDistributed, self).__init__(layer, **kwargs)
self.supports_masking = True
+ self._track_checkpointable(layer, name='layer')
+
+ def _get_shape_tuple(self, init_tuple, tensor, start_idx, int_shape=None):
+ """Finds non-specific dimensions in the static shapes.
+
+ The static shapes are replaced with the corresponding dynamic shapes of the
+ tensor.
+
+ Arguments:
+ init_tuple: a tuple, the first part of the output shape
+ tensor: the tensor from which to get the (static and dynamic) shapes
+ as the last part of the output shape
+ start_idx: int, which indicate the first dimension to take from
+ the static shape of the tensor
+ int_shape: an alternative static shape to take as the last part
+ of the output shape
+ Returns:
+ The new int_shape with the first part from init_tuple
+ and the last part from either `int_shape` (if provided)
+ or `tensor.shape`, where every `None` is replaced by
+ the corresponding dimension from `tf.shape(tensor)`.
+ """
+ # replace all None in int_shape by K.shape
+ if int_shape is None:
+ int_shape = K.int_shape(tensor)[start_idx:]
+ if not any(not s for s in int_shape):
+ return init_tuple + tuple(int_shape)
+ shape = K.shape(tensor)
+ int_shape = list(int_shape)
+ for i, s in enumerate(int_shape):
+ if not s:
+ int_shape[i] = shape[start_idx + i]
+ return init_tuple + tuple(int_shape)
def build(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
@@ -175,7 +207,10 @@ class TimeDistributed(Wrapper):
self.input_spec = InputSpec(shape=input_shape)
child_input_shape = [input_shape[0]] + input_shape[2:]
if not self.layer.built:
- self.layer.build(child_input_shape)
+ # The base layer class calls a conversion function on the input shape to
+ # convert it to a TensorShape. The conversion function requires a
+ # tuple which is why we cast the shape.
+ self.layer.build(tuple(child_input_shape))
self.layer.built = True
super(TimeDistributed, self).build()
self.built = True
@@ -221,18 +256,24 @@ class TimeDistributed(Wrapper):
input_length = input_shape[1]
if not input_length:
input_length = array_ops.shape(inputs)[1]
+ inner_input_shape = self._get_shape_tuple((-1,), inputs, 2)
# Shape: (num_samples * timesteps, ...). And track the
# transformation in self._input_map.
input_uid = generic_utils.object_list_uid(inputs)
- inputs = array_ops.reshape(inputs, (-1,) + input_shape[2:])
+ inputs = array_ops.reshape(inputs, inner_input_shape)
self._input_map[input_uid] = inputs
# (num_samples * timesteps, ...)
+ if generic_utils.has_arg(self.layer.call, 'mask') and mask is not None:
+ inner_mask_shape = self._get_shape_tuple((-1,), mask, 2)
+ kwargs['mask'] = K.reshape(mask, inner_mask_shape)
y = self.layer.call(inputs, **kwargs)
if hasattr(y, '_uses_learning_phase'):
uses_learning_phase = y._uses_learning_phase
# Shape: (num_samples, timesteps, ...)
output_shape = self.compute_output_shape(input_shape).as_list()
- y = array_ops.reshape(y, (-1, input_length) + tuple(output_shape[2:]))
+ output_shape = self._get_shape_tuple(
+ (-1, input_length), y, 1, output_shape[2:])
+ y = array_ops.reshape(y, output_shape)
# Apply activity regularizer if any:
if (hasattr(self.layer, 'activity_regularizer') and
@@ -244,6 +285,80 @@ class TimeDistributed(Wrapper):
y._uses_learning_phase = True
return y
+ def compute_mask(self, inputs, mask=None):
+ """Computes an output mask tensor for Embedding layer.
+
+ This is based on the inputs, mask, and the inner layer.
+ If batch size is specified:
+ Simply return the input `mask`. (An rnn-based implementation with
+ more than one rnn inputs is required but not supported in tf.keras yet.)
+ Otherwise we call `compute_mask` of the inner layer at each time step.
+ If the output mask at each time step is not `None`:
+ (E.g., inner layer is Masking or RNN)
+ Concatenate all of them and return the concatenation.
+ If the output mask at each time step is `None` and the input mask is not
+ `None`:(E.g., inner layer is Dense)
+ Reduce the input_mask to 2 dimensions and return it.
+ Otherwise (both the output mask and the input mask are `None`):
+ (E.g., `mask` is not used at all)
+ Return `None`.
+
+ Arguments:
+ inputs: Tensor with shape [batch size, timesteps, ...] indicating the
+ input to TimeDistributed. If static shape information is available for
+ "batch size", `mask` is returned unmodified.
+ mask: Either None (indicating no masking) or a Tensor indicating the
+ input mask for TimeDistributed. The shape can be static or dynamic.
+
+ Returns:
+ Either None (no masking), or a [batch size, timesteps, ...] Tensor with
+ an output mask for the TimeDistributed layer with the shape beyond the
+ second dimension being the value of the input mask shape(if the computed
+ output mask is none), an output mask with the shape beyond the first
+ dimension being the value of the mask shape(if mask is not None) or
+ output mask with the shape beyond the first dimension being the
+ value of the computed output shape.
+
+ """
+ # cases need to call the layer.compute_mask when input_mask is None:
+ # Masking layer and Embedding layer with mask_zero
+ input_shape = K.int_shape(inputs)
+ if input_shape[0]:
+ # batch size matters, we currently do not handle mask explicitly
+ return mask
+ inner_mask = mask
+ if inner_mask is not None:
+ inner_mask_shape = self._get_shape_tuple((-1,), mask, 2)
+ inner_mask = K.reshape(inner_mask, inner_mask_shape)
+ input_uid = generic_utils.object_list_uid(inputs)
+ inner_inputs = self._input_map[input_uid]
+ output_mask = self.layer.compute_mask(inner_inputs, inner_mask)
+ if output_mask is None:
+ if mask is None:
+ return None
+ # input_mask is not None, and output_mask is None:
+ # we should return a not-None mask
+ output_mask = mask
+ for _ in range(2, len(K.int_shape(mask))):
+ output_mask = K.any(output_mask, axis=-1)
+ else:
+ # output_mask is not None. We need to reshape it
+ input_length = input_shape[1]
+ if not input_length:
+ input_length = K.shape(inputs)[1]
+ output_mask_int_shape = K.int_shape(output_mask)
+ if output_mask_int_shape is None:
+ # if the output_mask does not have a static shape,
+ # its shape must be the same as mask's
+ if mask is not None:
+ output_mask_int_shape = K.int_shape(mask)
+ else:
+ output_mask_int_shape = K.compute_output_shape(input_shape)[:-1]
+ output_mask_shape = self._get_shape_tuple(
+ (-1, input_length), output_mask, 1, output_mask_int_shape[1:])
+ output_mask = K.reshape(output_mask, output_mask_shape)
+ return output_mask
+
@tf_export('keras.layers.Bidirectional')
class Bidirectional(Wrapper):
@@ -302,6 +417,8 @@ class Bidirectional(Wrapper):
self._num_constants = None
super(Bidirectional, self).__init__(layer, **kwargs)
self.input_spec = layer.input_spec
+ self._track_checkpointable(self.forward_layer, name='forward_layer')
+ self._track_checkpointable(self.backward_layer, name='backward_layer')
@property
def trainable(self):
@@ -411,7 +528,8 @@ class Bidirectional(Wrapper):
else:
return super(Bidirectional, self).__call__(inputs, **kwargs)
- def call(self, inputs,
+ def call(self,
+ inputs,
training=None,
mask=None,
initial_state=None,
diff --git a/tensorflow/python/keras/layers/wrappers_test.py b/tensorflow/python/keras/layers/wrappers_test.py
index c8f0d216e6..0cd774ef0f 100644
--- a/tensorflow/python/keras/layers/wrappers_test.py
+++ b/tensorflow/python/keras/layers/wrappers_test.py
@@ -87,6 +87,8 @@ class TimeDistributedTest(test.TestCase):
# test config
model.get_config()
+ # check whether the model variables are present in the
+ # checkpointable list of objects
checkpointed_objects = set(checkpointable_util.list_objects(model))
for v in model.variables:
self.assertIn(v, checkpointed_objects)
@@ -190,8 +192,8 @@ class TimeDistributedTest(test.TestCase):
x = keras.layers.Input(shape=(3, 2))
layer = keras.layers.TimeDistributed(keras.layers.BatchNormalization())
_ = layer(x)
- assert len(layer.updates) == 2
- assert len(layer.trainable_weights) == 2
+ self.assertEquals(len(layer.updates), 2)
+ self.assertEquals(len(layer.trainable_weights), 2)
layer.trainable = False
assert not layer.updates
assert not layer.trainable_weights
@@ -199,6 +201,62 @@ class TimeDistributedTest(test.TestCase):
assert len(layer.updates) == 2
assert len(layer.trainable_weights) == 2
+ def test_TimeDistributed_with_masked_embedding_and_unspecified_shape(self):
+ with self.test_session():
+ # test with unspecified shape and Embeddings with mask_zero
+ model = keras.models.Sequential()
+ model.add(keras.layers.TimeDistributed(
+ keras.layers.Embedding(5, 6, mask_zero=True),
+ input_shape=(None, None))) # N by t_1 by t_2 by 6
+ model.add(keras.layers.TimeDistributed(
+ keras.layers.SimpleRNN(7, return_sequences=True)))
+ model.add(keras.layers.TimeDistributed(
+ keras.layers.SimpleRNN(8, return_sequences=False)))
+ model.add(keras.layers.SimpleRNN(1, return_sequences=False))
+ model.compile(optimizer='rmsprop', loss='mse')
+ model_input = np.random.randint(low=1, high=5, size=(10, 3, 4),
+ dtype='int32')
+ for i in range(4):
+ model_input[i, i:, i:] = 0
+ model.fit(model_input,
+ np.random.random((10, 1)), epochs=1, batch_size=10)
+ mask_outputs = [model.layers[0].compute_mask(model.input)]
+ for layer in model.layers[1:]:
+ mask_outputs.append(layer.compute_mask(layer.input, mask_outputs[-1]))
+ func = keras.backend.function([model.input], mask_outputs[:-1])
+ mask_outputs_val = func([model_input])
+ ref_mask_val_0 = model_input > 0 # embedding layer
+ ref_mask_val_1 = ref_mask_val_0 # first RNN layer
+ ref_mask_val_2 = np.any(ref_mask_val_1, axis=-1) # second RNN layer
+ ref_mask_val = [ref_mask_val_0, ref_mask_val_1, ref_mask_val_2]
+ for i in range(3):
+ self.assertAllEqual(mask_outputs_val[i], ref_mask_val[i])
+ self.assertIs(mask_outputs[-1], None) # final layer
+
+ def test_TimeDistributed_with_masking_layer(self):
+ with self.test_session():
+ # test with Masking layer
+ model = keras.models.Sequential()
+ model.add(keras.layers.TimeDistributed(keras.layers.Masking(
+ mask_value=0.,), input_shape=(None, 4)))
+ model.add(keras.layers.TimeDistributed(keras.layers.Dense(5)))
+ model.compile(optimizer='rmsprop', loss='mse')
+ model_input = np.random.randint(low=1, high=5, size=(10, 3, 4))
+ for i in range(4):
+ model_input[i, i:, :] = 0.
+ model.compile(optimizer='rmsprop', loss='mse')
+ model.fit(model_input,
+ np.random.random((10, 3, 5)), epochs=1, batch_size=6)
+ mask_outputs = [model.layers[0].compute_mask(model.input)]
+ mask_outputs += [model.layers[1].compute_mask(model.layers[1].input,
+ mask_outputs[-1])]
+ func = keras.backend.function([model.input], mask_outputs)
+ mask_outputs_val = func([model_input])
+ self.assertEqual((mask_outputs_val[0]).all(),
+ model_input.all())
+ self.assertEqual((mask_outputs_val[1]).all(),
+ model_input.all())
+
class BidirectionalTest(test.TestCase):
@@ -222,6 +280,12 @@ class BidirectionalTest(test.TestCase):
model.compile(optimizer=RMSPropOptimizer(0.01), loss='mse')
model.fit(x, y, epochs=1, batch_size=1)
+ # check whether the model variables are present in the
+ # checkpointable list of objects
+ checkpointed_objects = set(checkpointable_util.list_objects(model))
+ for v in model.variables:
+ self.assertIn(v, checkpointed_objects)
+
# test compute output shape
ref_shape = model.layers[-1].output.get_shape()
shape = model.layers[-1].compute_output_shape(
diff --git a/tensorflow/python/keras/model_subclassing_test.py b/tensorflow/python/keras/model_subclassing_test.py
index b7e16a41dd..3ac4852eff 100644
--- a/tensorflow/python/keras/model_subclassing_test.py
+++ b/tensorflow/python/keras/model_subclassing_test.py
@@ -31,7 +31,7 @@ from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.platform import test
-from tensorflow.python.training.checkpointable import base as checkpointable
+from tensorflow.python.training.checkpointable import data_structures
from tensorflow.python.training.rmsprop import RMSPropOptimizer
try:
@@ -679,8 +679,8 @@ class ModelSubclassingTest(test.TestCase):
def __init__(self):
super(Foo, self).__init__()
self.isdep = keras.layers.Dense(1)
- self.notdep = checkpointable.NoDependency(keras.layers.Dense(2))
- self.notdep_var = checkpointable.NoDependency(
+ self.notdep = data_structures.NoDependency(keras.layers.Dense(2))
+ self.notdep_var = data_structures.NoDependency(
resource_variable_ops.ResourceVariable(1., name='notdep_var'))
m = Foo()
diff --git a/tensorflow/python/keras/models_test.py b/tensorflow/python/keras/models_test.py
index ad3819e6e7..1525104ac9 100644
--- a/tensorflow/python/keras/models_test.py
+++ b/tensorflow/python/keras/models_test.py
@@ -37,6 +37,7 @@ class TestModelCloning(test.TestCase):
model = keras.models.Sequential()
model.add(keras.layers.Dense(4, input_shape=(4,)))
+ model.add(keras.layers.BatchNormalization())
model.add(keras.layers.Dropout(0.5))
model.add(keras.layers.Dense(4))
@@ -46,6 +47,8 @@ class TestModelCloning(test.TestCase):
with self.test_session():
# With placeholder creation
new_model = keras.models.clone_model(model)
+ # update ops from batch norm needs to be included
+ self.assertEquals(len(new_model.get_updates_for(new_model.inputs)), 2)
new_model.compile('rmsprop', 'mse')
new_model.train_on_batch(val_a, val_out)
@@ -53,6 +56,7 @@ class TestModelCloning(test.TestCase):
input_a = keras.Input(shape=(4,))
new_model = keras.models.clone_model(
model, input_tensors=input_a)
+ self.assertEquals(len(new_model.get_updates_for(new_model.inputs)), 2)
new_model.compile('rmsprop', 'mse')
new_model.train_on_batch(val_a, val_out)
@@ -60,6 +64,7 @@ class TestModelCloning(test.TestCase):
input_a = keras.backend.variable(val_a)
new_model = keras.models.clone_model(
model, input_tensors=input_a)
+ self.assertEquals(len(new_model.get_updates_for(new_model.inputs)), 2)
new_model.compile('rmsprop', 'mse')
new_model.train_on_batch(None, val_out)
@@ -76,6 +81,7 @@ class TestModelCloning(test.TestCase):
x_a = dense_1(input_a)
x_a = keras.layers.Dropout(0.5)(x_a)
+ x_a = keras.layers.BatchNormalization()(x_a)
x_b = dense_1(input_b)
x_a = dense_2(x_a)
outputs = keras.layers.add([x_a, x_b])
@@ -87,6 +93,7 @@ class TestModelCloning(test.TestCase):
with self.test_session():
# With placeholder creation
new_model = keras.models.clone_model(model)
+ self.assertEquals(len(new_model.get_updates_for(new_model.inputs)), 2)
new_model.compile('rmsprop', 'mse')
new_model.train_on_batch([val_a, val_b], val_out)
@@ -95,6 +102,7 @@ class TestModelCloning(test.TestCase):
input_b = keras.Input(shape=(4,), name='b')
new_model = keras.models.clone_model(
model, input_tensors=[input_a, input_b])
+ self.assertEquals(len(new_model.get_updates_for(new_model.inputs)), 2)
new_model.compile('rmsprop', 'mse')
new_model.train_on_batch([val_a, val_b], val_out)
@@ -103,6 +111,7 @@ class TestModelCloning(test.TestCase):
input_b = keras.backend.variable(val_b)
new_model = keras.models.clone_model(
model, input_tensors=[input_a, input_b])
+ self.assertEquals(len(new_model.get_updates_for(new_model.inputs)), 2)
new_model.compile('rmsprop', 'mse')
new_model.train_on_batch(None, val_out)
diff --git a/tensorflow/python/keras/optimizers.py b/tensorflow/python/keras/optimizers.py
index 34951791b5..0b440185ca 100644
--- a/tensorflow/python/keras/optimizers.py
+++ b/tensorflow/python/keras/optimizers.py
@@ -19,57 +19,22 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
-import copy
-
import six
from six.moves import zip # pylint: disable=redefined-builtin
-from tensorflow.python.framework import dtypes as dtypes_module
-from tensorflow.python.framework import ops
from tensorflow.python.keras import backend as K
from tensorflow.python.keras.utils.generic_utils import deserialize_keras_object
from tensorflow.python.keras.utils.generic_utils import serialize_keras_object
-from tensorflow.python.ops import control_flow_ops
+from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.training import distribute as distribute_lib
from tensorflow.python.training import optimizer as tf_optimizer_module
from tensorflow.python.training import training_util
-from tensorflow.python.training.checkpointable import tracking as checkpointable
+from tensorflow.python.training.checkpointable import base as checkpointable
from tensorflow.python.util.tf_export import tf_export
-def clip_norm(g, c, n):
- """Clip a tensor by norm.
-
- Arguments:
- g: gradient tensor to clip.
- c: clipping threshold.
- n: norm of gradient tensor.
-
- Returns:
- Clipped gradient tensor.
- """
- if c > 0:
- condition = n >= c
- then_expression = lambda: math_ops.scalar_mul(c / n, g)
- else_expression = lambda: g
-
- # saving the shape to avoid converting sparse tensor to dense
- if isinstance(g, ops.Tensor):
- g_shape = copy.copy(g.get_shape())
- elif isinstance(g, ops.IndexedSlices):
- g_shape = copy.copy(g.dense_shape)
- if condition.dtype != dtypes_module.bool:
- condition = math_ops.cast(condition, 'bool')
- g = control_flow_ops.cond(condition, then_expression, else_expression)
- if isinstance(g, ops.Tensor):
- g.set_shape(g_shape)
- elif isinstance(g, ops.IndexedSlices):
- g._dense_shape = g_shape # pylint: disable=protected-access
- return g
-
-
@tf_export('keras.optimizers.Optimizer')
class Optimizer(object):
"""Abstract optimizer base class.
@@ -91,6 +56,9 @@ class Optimizer(object):
if k not in allowed_kwargs:
raise TypeError('Unexpected keyword argument '
'passed to optimizer: ' + str(k))
+ # checks that clipnorm >= 0 and clipvalue >= 0
+ if kwargs[k] < 0:
+ raise ValueError('Expected {} >= 0, received: {}'.format(k, kwargs[k]))
self.__dict__.update(kwargs)
self.updates = []
self.weights = []
@@ -119,12 +87,13 @@ class Optimizer(object):
'gradient defined (i.e. are differentiable). '
'Common ops without gradient: '
'K.argmax, K.round, K.eval.')
- if hasattr(self, 'clipnorm') and self.clipnorm > 0:
- norm = K.sqrt(
- sum([math_ops.reduce_sum(math_ops.square(g)) for g in grads]))
- grads = [clip_norm(g, self.clipnorm, norm) for g in grads]
- if hasattr(self, 'clipvalue') and self.clipvalue > 0:
- grads = [K.clip(g, -self.clipvalue, self.clipvalue) for g in grads]
+ if hasattr(self, 'clipnorm'):
+ grads = [clip_ops.clip_by_norm(g, self.clipnorm) for g in grads]
+ if hasattr(self, 'clipvalue'):
+ grads = [
+ clip_ops.clip_by_value(g, -self.clipvalue, self.clipvalue)
+ for g in grads
+ ]
return grads
def set_weights(self, weights):
@@ -719,12 +688,13 @@ class Nadam(Optimizer):
return dict(list(base_config.items()) + list(config.items()))
-class TFOptimizer(Optimizer, checkpointable.Checkpointable):
+class TFOptimizer(Optimizer, checkpointable.CheckpointableBase):
"""Wrapper class for native TensorFlow optimizers.
"""
def __init__(self, optimizer): # pylint: disable=super-init-not-called
self.optimizer = optimizer
+ self._track_checkpointable(optimizer, name='optimizer')
with K.name_scope(self.__class__.__name__):
self.iterations = K.variable(0, dtype='int64', name='iterations')
diff --git a/tensorflow/python/keras/optimizers_test.py b/tensorflow/python/keras/optimizers_test.py
index 92b0cf3261..55fc3fdcf4 100644
--- a/tensorflow/python/keras/optimizers_test.py
+++ b/tensorflow/python/keras/optimizers_test.py
@@ -145,6 +145,12 @@ class KerasOptimizersTest(test.TestCase):
with self.assertRaises(NotImplementedError):
optimizer.from_config(None)
+ def test_negative_clipvalue_or_clipnorm(self):
+ with self.assertRaises(ValueError):
+ _ = keras.optimizers.SGD(lr=0.01, clipvalue=-0.5)
+ with self.assertRaises(ValueError):
+ _ = keras.optimizers.Adam(clipnorm=-2.0)
+
if __name__ == '__main__':
test.main()
diff --git a/tensorflow/python/keras/testing_utils.py b/tensorflow/python/keras/testing_utils.py
index e7cb45d5e1..17aba7d86c 100644
--- a/tensorflow/python/keras/testing_utils.py
+++ b/tensorflow/python/keras/testing_utils.py
@@ -18,6 +18,7 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
+from collections import OrderedDict
import numpy as np
from tensorflow.python import keras
@@ -183,3 +184,76 @@ def layer_test(layer_cls, kwargs=None, input_shape=None, input_dtype=None,
# for further checks in the caller function
return actual_output
+
+
+def _combine_named_parameters(**kwargs):
+ """Generate combinations based on its keyword arguments.
+
+ Two sets of returned combinations can be concatenated using +. Their product
+ can be computed using `times()`.
+
+ Args:
+ **kwargs: keyword arguments of form `option=[possibilities, ...]`
+ or `option=the_only_possibility`.
+
+ Returns:
+ a list of dictionaries for each combination. Keys in the dictionaries are
+ the keyword argument names. Each key has one value - one of the
+ corresponding keyword argument values.
+ """
+ if not kwargs:
+ return [OrderedDict()]
+
+ sort_by_key = lambda k: k[0][0]
+ kwargs = OrderedDict(sorted(kwargs.items(), key=sort_by_key))
+ first = list(kwargs.items())[0]
+
+ rest = dict(list(kwargs.items())[1:])
+ rest_combined = _combine_named_parameters(**rest)
+
+ key = first[0]
+ values = first[1]
+ if not isinstance(values, list):
+ values = [values]
+
+ combinations = [
+ OrderedDict(sorted(list(combined.items()) + [(key, v)], key=sort_by_key))
+ for v in values
+ for combined in rest_combined
+ ]
+ return combinations
+
+
+def generate_combinations_with_testcase_name(**kwargs):
+ """Generate combinations based on its keyword arguments using combine().
+
+ This function calls combine() and appends a testcase name to the list of
+ dictionaries returned. The 'testcase_name' key is a required for named
+ parameterized tests.
+
+ Args:
+ **kwargs: keyword arguments of form `option=[possibilities, ...]`
+ or `option=the_only_possibility`.
+
+ Returns:
+ a list of dictionaries for each combination. Keys in the dictionaries are
+ the keyword argument names. Each key has one value - one of the
+ corresponding keyword argument values.
+ """
+ combinations = _combine_named_parameters(**kwargs)
+ named_combinations = []
+ for combination in combinations:
+ assert isinstance(combination, OrderedDict)
+ name = ''.join([
+ '_{}_{}'.format(
+ ''.join(filter(str.isalnum, key)),
+ ''.join(filter(str.isalnum, str(value))))
+ for key, value in combination.items()
+ ])
+ named_combinations.append(
+ OrderedDict(
+ list(combination.items()) + [('testcase_name',
+ '_test{}'.format(name))]))
+
+ return named_combinations
+
diff --git a/tensorflow/python/kernel_tests/BUILD b/tensorflow/python/kernel_tests/BUILD
index 8a6614c837..838cf836f1 100644
--- a/tensorflow/python/kernel_tests/BUILD
+++ b/tensorflow/python/kernel_tests/BUILD
@@ -1525,6 +1525,7 @@ cuda_py_test(
"//tensorflow/python:framework_for_generated_wrappers",
"//tensorflow/python:math_ops",
],
+ tags = ["no_windows_gpu"],
)
cuda_py_test(
@@ -2057,6 +2058,7 @@ cuda_py_test(
"//tensorflow/python:framework_for_generated_wrappers",
"//tensorflow/python:math_ops",
],
+ tags = ["no_windows_gpu"],
)
tf_py_test(
@@ -2755,6 +2757,7 @@ cuda_py_test(
"//tensorflow/python:embedding_ops",
"//tensorflow/python:framework",
"//tensorflow/python:framework_for_generated_wrappers",
+ "//tensorflow/python:init_ops",
"//tensorflow/python:linalg_ops",
"//tensorflow/python:math_ops",
"//tensorflow/python:partitioned_variables",
@@ -2842,6 +2845,7 @@ cuda_py_test(
"//tensorflow/python:math_ops",
],
shard_count = 20,
+ tags = ["nomsan"], # TODO(b/110990716) reenable
)
cuda_py_test(
diff --git a/tensorflow/python/kernel_tests/boosted_trees/prediction_ops_test.py b/tensorflow/python/kernel_tests/boosted_trees/prediction_ops_test.py
index 92cd53a031..4e31b1ea2a 100644
--- a/tensorflow/python/kernel_tests/boosted_trees/prediction_ops_test.py
+++ b/tensorflow/python/kernel_tests/boosted_trees/prediction_ops_test.py
@@ -910,7 +910,7 @@ class PredictionOpsTest(test_util.TensorFlowTestCase):
feature_1_values = [11, 27]
# Example 1: tree 0: 1.14, tree 1: 5.0, tree 2: 5.0 = >
- # logit = 0.1*5.0+0.2*5.0+1*5
+ # logit = 0.1*1.14+0.2*5.0+1*5
# Example 2: tree 0: 1.14, tree 1: 7.0, tree 2: -7 = >
# logit= 0.1*1.14+0.2*7.0-1*7.0
expected_logits = [[6.114], [-5.486]]
@@ -925,5 +925,147 @@ class PredictionOpsTest(test_util.TensorFlowTestCase):
self.assertAllClose(expected_logits, logits)
+class FeatureContribsOpsTest(test_util.TensorFlowTestCase):
+ """Tests feature contribs ops for model understanding."""
+
+ def testContribsMultipleTree(self):
+ """Tests that the contribs work when we have multiple trees."""
+ with self.test_session() as session:
+ tree_ensemble_config = boosted_trees_pb2.TreeEnsemble()
+ text_format.Merge(
+ """
+ trees {
+ nodes {
+ bucketized_split {
+ feature_id: 2
+ threshold: 28
+ left_id: 1
+ right_id: 2
+ }
+ metadata {
+ gain: 7.62
+ original_leaf: {scalar: 2.1}
+ }
+ }
+ nodes {
+ leaf {
+ scalar: 1.14
+ }
+ }
+ nodes {
+ leaf {
+ scalar: 8.79
+ }
+ }
+ }
+ trees {
+ nodes {
+ bucketized_split {
+ feature_id: 2
+ threshold: 26
+ left_id: 1
+ right_id: 2
+ }
+ }
+ nodes {
+ bucketized_split {
+ feature_id: 0
+ threshold: 50
+ left_id: 3
+ right_id: 4
+ }
+ metadata {
+ original_leaf: {scalar: 5.5}
+ }
+ }
+ nodes {
+ leaf {
+ scalar: 7.0
+ }
+ }
+ nodes {
+ leaf {
+ scalar: 5.0
+ }
+ }
+ nodes {
+ leaf {
+ scalar: 6.0
+ }
+ }
+ }
+ trees {
+ nodes {
+ bucketized_split {
+ feature_id: 0
+ threshold: 34
+ left_id: 1
+ right_id: 2
+ }
+ }
+ nodes {
+ leaf {
+ scalar: -7.0
+ }
+ }
+ nodes {
+ leaf {
+ scalar: 5.0
+ }
+ }
+ }
+ tree_weights: 0.1
+ tree_weights: 0.2
+ tree_weights: 1.0
+ tree_metadata: {
+ num_layers_grown: 1}
+ tree_metadata: {
+ num_layers_grown: 2}
+ tree_metadata: {
+ num_layers_grown: 1}
+ """, tree_ensemble_config)
+
+ tree_ensemble = boosted_trees_ops.TreeEnsemble(
+ 'ensemble', serialized_proto=tree_ensemble_config.SerializeToString())
+ tree_ensemble_handle = tree_ensemble.resource_handle
+ resources.initialize_resources(resources.shared_resources()).run()
+
+ feature_0_values = [36, 32]
+ feature_1_values = [13, -29] # Unused. Feature is not in above ensemble.
+ feature_2_values = [11, 27]
+
+ # Expected logits are computed by traversing the logit path and
+ # subtracting child logits from parent logits.
+ bias = 2.1 * 0.1 # Root node of tree_0.
+ expected_feature_ids = ((2, 2, 0, 0), (2, 2, 0))
+ # example_0 : (bias, 0.1 * 1.14, 0.2 * 5.5 + .114, 0.2 * 5. + .114,
+ # 1.0 * 5.0 + 0.2 * 5. + .114)
+ # example_1 : (bias, 0.1 * 1.14, 0.2 * 7 + .114,
+ # 1.0 * -7. + 0.2 * 7 + .114)
+ expected_logits_paths = ((bias, 0.114, 1.214, 1.114, 6.114),
+ (bias, 0.114, 1.514, -5.486))
+
+ bucketized_features = [
+ feature_0_values, feature_1_values, feature_2_values
+ ]
+
+ debug_op = boosted_trees_ops.example_debug_outputs(
+ tree_ensemble_handle,
+ bucketized_features=bucketized_features,
+ logits_dimension=1)
+
+ serialized_examples_debug_outputs = session.run(debug_op)
+ feature_ids = []
+ logits_paths = []
+ for example in serialized_examples_debug_outputs:
+ example_debug_outputs = boosted_trees_pb2.DebugOutput()
+ example_debug_outputs.ParseFromString(example)
+ feature_ids.append(example_debug_outputs.feature_ids)
+ logits_paths.append(example_debug_outputs.logits_path)
+
+ self.assertAllClose(feature_ids, expected_feature_ids)
+ self.assertAllClose(logits_paths, expected_logits_paths)
+
+
if __name__ == '__main__':
googletest.main()
diff --git a/tensorflow/python/kernel_tests/boosted_trees/training_ops_test.py b/tensorflow/python/kernel_tests/boosted_trees/training_ops_test.py
index 13b804875e..d55240297a 100644
--- a/tensorflow/python/kernel_tests/boosted_trees/training_ops_test.py
+++ b/tensorflow/python/kernel_tests/boosted_trees/training_ops_test.py
@@ -139,6 +139,49 @@ class UpdateTreeEnsembleOpTest(test_util.TensorFlowTestCase):
self.assertEqual(new_stamp, 1)
self.assertProtoEquals(expected_result, tree_ensemble)
+ def testBiasCenteringOnEmptyEnsemble(self):
+ """Test growing with bias centering on an empty ensemble."""
+ with self.test_session() as session:
+ # Create empty ensemble.
+ tree_ensemble = boosted_trees_ops.TreeEnsemble('ensemble')
+ tree_ensemble_handle = tree_ensemble.resource_handle
+ resources.initialize_resources(resources.shared_resources()).run()
+
+ gradients = np.array([[5.]], dtype=np.float32)
+ hessians = np.array([[24.]], dtype=np.float32)
+
+ # Grow tree ensemble.
+ grow_op = boosted_trees_ops.center_bias(
+ tree_ensemble_handle,
+ mean_gradients=gradients,
+ mean_hessians=hessians,
+ l1=0.0,
+ l2=1.0
+ )
+ session.run(grow_op)
+
+ new_stamp, serialized = session.run(tree_ensemble.serialize())
+
+ tree_ensemble = boosted_trees_pb2.TreeEnsemble()
+ tree_ensemble.ParseFromString(serialized)
+
+ expected_result = """
+ trees {
+ nodes {
+ leaf {
+ scalar: -0.2
+ }
+ }
+ }
+ tree_weights: 1.0
+ tree_metadata {
+ num_layers_grown: 0
+ is_finalized: false
+ }
+ """
+ self.assertEqual(new_stamp, 1)
+ self.assertProtoEquals(expected_result, tree_ensemble)
+
def testGrowExistingEnsembleTreeNotFinalized(self):
"""Test growing an existing ensemble with the last tree not finalized."""
with self.test_session() as session:
@@ -666,7 +709,6 @@ class UpdateTreeEnsembleOpTest(test_util.TensorFlowTestCase):
num_layers_attempted: 1
last_layer_node_start: 1
last_layer_node_end: 3
-
}
""", tree_ensemble_config)
diff --git a/tensorflow/python/kernel_tests/constant_op_eager_test.py b/tensorflow/python/kernel_tests/constant_op_eager_test.py
index 8e9d75667d..a0d5557b92 100644
--- a/tensorflow/python/kernel_tests/constant_op_eager_test.py
+++ b/tensorflow/python/kernel_tests/constant_op_eager_test.py
@@ -32,6 +32,9 @@ from tensorflow.python.util import compat
# TODO(josh11b): add tests with lists/tuples, Shape.
+# TODO(ashankar): Collapse with tests in constant_op_test.py and use something
+# like the test_util.run_in_graph_and_eager_modes decorator to confirm
+# equivalence between graph and eager execution.
class ConstantTest(test.TestCase):
def _testCpu(self, x):
@@ -280,6 +283,34 @@ class ConstantTest(test.TestCase):
with self.assertRaisesRegexp(ValueError, None):
constant_op.constant([[1, 2], [3], [4, 5]])
+ # TODO(ashankar): This test fails with graph construction since
+ # tensor_util.make_tensor_proto (invoked from constant_op.constant)
+ # does not handle iterables (it relies on numpy conversion).
+ # For consistency, should graph construction handle Python objects
+ # that implement the sequence protocol (but not numpy conversion),
+ # or should eager execution fail on such sequences?
+ def testCustomSequence(self):
+
+ # This is inspired by how many objects in pandas are implemented:
+ # - They implement the Python sequence protocol
+ # - But may raise a KeyError on __getitem__(self, 0)
+ # See https://github.com/tensorflow/tensorflow/issues/20347
+ class MySeq(object):
+
+ def __getitem__(self, key):
+ if key != 1 and key != 3:
+ raise KeyError(key)
+ return key
+
+ def __len__(self):
+ return 2
+
+ def __iter__(self):
+ l = list([1, 3])
+ return l.__iter__()
+
+ self.assertAllEqual([1, 3], self.evaluate(constant_op.constant(MySeq())))
+
class AsTensorTest(test.TestCase):
diff --git a/tensorflow/python/kernel_tests/dct_ops_test.py b/tensorflow/python/kernel_tests/dct_ops_test.py
index 93b2ff4561..97d7e2d8f9 100644
--- a/tensorflow/python/kernel_tests/dct_ops_test.py
+++ b/tensorflow/python/kernel_tests/dct_ops_test.py
@@ -40,50 +40,92 @@ def try_import(name): # pylint: disable=invalid-name
fftpack = try_import("scipy.fftpack")
+def _np_dct2(signals, norm=None):
+ """Computes the DCT-II manually with NumPy."""
+ # X_k = sum_{n=0}^{N-1} x_n * cos(\frac{pi}{N} * (n + 0.5) * k) k=0,...,N-1
+ dct_size = signals.shape[-1]
+ dct = np.zeros_like(signals)
+ for k in range(dct_size):
+ phi = np.cos(np.pi * (np.arange(dct_size) + 0.5) * k / dct_size)
+ dct[..., k] = np.sum(signals * phi, axis=-1)
+ # SciPy's `dct` has a scaling factor of 2.0 which we follow.
+ # https://github.com/scipy/scipy/blob/v0.15.1/scipy/fftpack/src/dct.c.src
+ if norm == "ortho":
+ # The orthonormal scaling includes a factor of 0.5 which we combine with
+ # the overall scaling of 2.0 to cancel.
+ dct[..., 0] *= np.sqrt(1.0 / dct_size)
+ dct[..., 1:] *= np.sqrt(2.0 / dct_size)
+ else:
+ dct *= 2.0
+ return dct
+
+
+def _np_dct3(signals, norm=None):
+ """Computes the DCT-III manually with NumPy."""
+ # SciPy's `dct` has a scaling factor of 2.0 which we follow.
+ # https://github.com/scipy/scipy/blob/v0.15.1/scipy/fftpack/src/dct.c.src
+ dct_size = signals.shape[-1]
+ signals = np.array(signals) # make a copy so we can modify
+ if norm == "ortho":
+ signals[..., 0] *= np.sqrt(4.0 / dct_size)
+ signals[..., 1:] *= np.sqrt(2.0 / dct_size)
+ else:
+ signals *= 2.0
+ dct = np.zeros_like(signals)
+ # X_k = 0.5 * x_0 +
+ # sum_{n=1}^{N-1} x_n * cos(\frac{pi}{N} * n * (k + 0.5)) k=0,...,N-1
+ half_x0 = 0.5 * signals[..., 0]
+ for k in range(dct_size):
+ phi = np.cos(np.pi * np.arange(1, dct_size) * (k + 0.5) / dct_size)
+ dct[..., k] = half_x0 + np.sum(signals[..., 1:] * phi, axis=-1)
+ return dct
+
+
+NP_DCT = {2: _np_dct2, 3: _np_dct3}
+NP_IDCT = {2: _np_dct3, 3: _np_dct2}
+
+
class DCTOpsTest(test.TestCase):
- def _np_dct2(self, signals, norm=None):
- """Computes the DCT-II manually with NumPy."""
- # X_k = sum_{n=0}^{N-1} x_n * cos(\frac{pi}{N} * (n + 0.5) * k) k=0,...,N-1
- dct_size = signals.shape[-1]
- dct = np.zeros_like(signals)
- for k in range(dct_size):
- phi = np.cos(np.pi * (np.arange(dct_size) + 0.5) * k / dct_size)
- dct[..., k] = np.sum(signals * phi, axis=-1)
- # SciPy's `dct` has a scaling factor of 2.0 which we follow.
- # https://github.com/scipy/scipy/blob/v0.15.1/scipy/fftpack/src/dct.c.src
- if norm == "ortho":
- # The orthonormal scaling includes a factor of 0.5 which we combine with
- # the overall scaling of 2.0 to cancel.
- dct[..., 0] *= np.sqrt(1.0 / dct_size)
- dct[..., 1:] *= np.sqrt(2.0 / dct_size)
- else:
- dct *= 2.0
- return dct
-
- def _compare(self, signals, norm, atol=5e-4, rtol=5e-4):
- """Compares the DCT to SciPy (if available) and a NumPy implementation."""
- np_dct = self._np_dct2(signals, norm)
- tf_dct = spectral_ops.dct(signals, type=2, norm=norm).eval()
+ def _compare(self, signals, norm, dct_type, atol=5e-4, rtol=5e-4):
+ """Compares (I)DCT to SciPy (if available) and a NumPy implementation."""
+ np_dct = NP_DCT[dct_type](signals, norm)
+ tf_dct = spectral_ops.dct(signals, type=dct_type, norm=norm).eval()
self.assertAllClose(np_dct, tf_dct, atol=atol, rtol=rtol)
+ np_idct = NP_IDCT[dct_type](signals, norm)
+ tf_idct = spectral_ops.idct(signals, type=dct_type, norm=norm).eval()
+ self.assertAllClose(np_idct, tf_idct, atol=atol, rtol=rtol)
if fftpack:
- scipy_dct = fftpack.dct(signals, type=2, norm=norm)
+ scipy_dct = fftpack.dct(signals, type=dct_type, norm=norm)
self.assertAllClose(scipy_dct, tf_dct, atol=atol, rtol=rtol)
+ scipy_idct = fftpack.idct(signals, type=dct_type, norm=norm)
+ self.assertAllClose(scipy_idct, tf_idct, atol=atol, rtol=rtol)
+ # Verify inverse(forward(s)) == s, up to a normalization factor.
+ tf_idct_dct = spectral_ops.idct(
+ tf_dct, type=dct_type, norm=norm).eval()
+ tf_dct_idct = spectral_ops.dct(
+ tf_idct, type=dct_type, norm=norm).eval()
+ if norm is None:
+ tf_idct_dct *= 0.5 / signals.shape[-1]
+ tf_dct_idct *= 0.5 / signals.shape[-1]
+ self.assertAllClose(signals, tf_idct_dct, atol=atol, rtol=rtol)
+ self.assertAllClose(signals, tf_dct_idct, atol=atol, rtol=rtol)
def test_random(self):
"""Test randomly generated batches of data."""
with spectral_ops_test_util.fft_kernel_label_map():
with self.test_session(use_gpu=True):
- for shape in ([2, 20], [1], [2], [3], [10], [2, 20], [2, 3, 25]):
+ for shape in ([1], [2], [3], [10], [2, 20], [2, 3, 25]):
signals = np.random.rand(*shape).astype(np.float32)
for norm in (None, "ortho"):
- self._compare(signals, norm)
+ self._compare(signals, norm, 2)
+ self._compare(signals, norm, 3)
def test_error(self):
signals = np.random.rand(10)
# Unsupported type.
with self.assertRaises(ValueError):
- spectral_ops.dct(signals, type=3)
+ spectral_ops.dct(signals, type=1)
# Unknown normalization.
with self.assertRaises(ValueError):
spectral_ops.dct(signals, norm="bad")
diff --git a/tensorflow/python/kernel_tests/embedding_ops_test.py b/tensorflow/python/kernel_tests/embedding_ops_test.py
index e53ca1dcaa..55d75cb474 100644
--- a/tensorflow/python/kernel_tests/embedding_ops_test.py
+++ b/tensorflow/python/kernel_tests/embedding_ops_test.py
@@ -19,6 +19,7 @@ from __future__ import division
from __future__ import print_function
import itertools
+import math
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
@@ -31,6 +32,7 @@ from tensorflow.python.ops import array_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import embedding_ops
from tensorflow.python.ops import gradient_checker
+from tensorflow.python.ops import init_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import partitioned_variables
@@ -736,6 +738,222 @@ class EmbeddingLookupSparseTest(test.TestCase):
x, sp_ids, sp_weights, combiner="mean")
+class SafeEmbeddingLookupSparseTest(test.TestCase):
+
+ def _random_weights(self, vocab_size=4, embed_dim=4, num_shards=1):
+ assert vocab_size > 0
+ assert embed_dim > 0
+ assert num_shards > 0
+ assert num_shards <= vocab_size
+
+ embedding_weights = partitioned_variables.create_partitioned_variables(
+ shape=[vocab_size, embed_dim],
+ slicing=[num_shards, 1],
+ initializer=init_ops.truncated_normal_initializer(
+ mean=0.0, stddev=1.0 / math.sqrt(vocab_size), dtype=dtypes.float32))
+ for w in embedding_weights:
+ w.initializer.run()
+ embedding_weights = [w.eval() for w in embedding_weights]
+ return embedding_weights
+
+ def _ids_and_weights_2d(self):
+ # Each row demonstrates a test case:
+ # Row 0: multiple valid ids, 1 invalid id, weighted mean
+ # Row 1: all ids are invalid (leaving no valid ids after pruning)
+ # Row 2: no ids to begin with
+ # Row 3: single id
+ # Row 4: all ids have <=0 weight
+ indices = [[0, 0], [0, 1], [0, 2], [1, 0], [3, 0], [4, 0], [4, 1]]
+ ids = [0, 1, -1, -1, 2, 0, 1]
+ weights = [1.0, 2.0, 1.0, 1.0, 3.0, 0.0, -0.5]
+ shape = [5, 4]
+
+ sparse_ids = sparse_tensor.SparseTensor(
+ constant_op.constant(indices, dtypes.int64),
+ constant_op.constant(ids, dtypes.int64),
+ constant_op.constant(shape, dtypes.int64))
+
+ sparse_weights = sparse_tensor.SparseTensor(
+ constant_op.constant(indices, dtypes.int64),
+ constant_op.constant(weights, dtypes.float32),
+ constant_op.constant(shape, dtypes.int64))
+
+ return sparse_ids, sparse_weights
+
+ def _ids_and_weights_3d(self):
+ # Each (2-D) index demonstrates a test case:
+ # Index 0, 0: multiple valid ids, 1 invalid id, weighted mean
+ # Index 0, 1: all ids are invalid (leaving no valid ids after pruning)
+ # Index 0, 2: no ids to begin with
+ # Index 1, 0: single id
+ # Index 1, 1: all ids have <=0 weight
+ # Index 1, 2: no ids to begin with
+ indices = [[0, 0, 0], [0, 0, 1], [0, 0, 2], [0, 1, 0], [1, 0, 0], [1, 1, 0],
+ [1, 1, 1]]
+ ids = [0, 1, -1, -1, 2, 0, 1]
+ weights = [1.0, 2.0, 1.0, 1.0, 3.0, 0.0, -0.5]
+ shape = [2, 3, 4]
+
+ sparse_ids = sparse_tensor.SparseTensor(
+ constant_op.constant(indices, dtypes.int64),
+ constant_op.constant(ids, dtypes.int64),
+ constant_op.constant(shape, dtypes.int64))
+
+ sparse_weights = sparse_tensor.SparseTensor(
+ constant_op.constant(indices, dtypes.int64),
+ constant_op.constant(weights, dtypes.float32),
+ constant_op.constant(shape, dtypes.int64))
+
+ return sparse_ids, sparse_weights
+
+ def test_safe_embedding_lookup_sparse_return_zero_vector(self):
+ with self.test_session():
+ embedding_weights = self._random_weights()
+ sparse_ids, sparse_weights = self._ids_and_weights_2d()
+
+ embedding_lookup_result = (embedding_ops.safe_embedding_lookup_sparse(
+ embedding_weights, sparse_ids, sparse_weights).eval())
+
+ self.assertAllClose(
+ embedding_lookup_result,
+ [(1.0 * embedding_weights[0][0] + 2.0 * embedding_weights[0][1]) /
+ 3.0, [0] * 4, [0] * 4, embedding_weights[0][2], [0] * 4])
+
+ def test_safe_embedding_lookup_sparse_return_special_vector(self):
+ with self.test_session():
+ embedding_weights = self._random_weights()
+ sparse_ids, sparse_weights = self._ids_and_weights_2d()
+
+ embedding_lookup_result = (embedding_ops.safe_embedding_lookup_sparse(
+ embedding_weights, sparse_ids, sparse_weights, default_id=3).eval())
+
+ self.assertAllClose(
+ embedding_lookup_result,
+ [(1.0 * embedding_weights[0][0] + 2.0 * embedding_weights[0][1]) /
+ 3.0, embedding_weights[0][3], embedding_weights[0][3],
+ embedding_weights[0][2], embedding_weights[0][3]])
+
+ def test_safe_embedding_lookup_sparse_no_weights(self):
+ with self.test_session():
+ embedding_weights = self._random_weights()
+ sparse_ids, _ = self._ids_and_weights_2d()
+
+ embedding_lookup_result = (embedding_ops.safe_embedding_lookup_sparse(
+ embedding_weights, sparse_ids, None).eval())
+
+ self.assertAllClose(
+ embedding_lookup_result,
+ [(embedding_weights[0][0] + embedding_weights[0][1]) / 2.0, [0] * 4,
+ [0] * 4, embedding_weights[0][2], (
+ embedding_weights[0][0] + embedding_weights[0][1]) / 2.0])
+
+ def test_safe_embedding_lookup_sparse_partitioned(self):
+ with self.test_session():
+ embedding_weights = self._random_weights(num_shards=3)
+ sparse_ids, _ = self._ids_and_weights_2d()
+
+ embedding_lookup_result = (embedding_ops.safe_embedding_lookup_sparse(
+ embedding_weights, sparse_ids, None).eval())
+
+ embedding_weights = list(itertools.chain(*embedding_weights))
+ self.assertAllClose(embedding_lookup_result,
+ [(embedding_weights[0] + embedding_weights[1]) / 2.0,
+ [0] * 4, [0] * 4, embedding_weights[2],
+ (embedding_weights[0] + embedding_weights[1]) / 2.0])
+
+ def test_safe_embedding_lookup_sparse_partitioned_inconsistent_weights(self):
+ with self.test_session():
+ embedding_weights = self._random_weights(num_shards=3)
+ sparse_ids, sparse_weights = self._ids_and_weights_2d()
+
+ embedding_weights[1] = embedding_weights[1].astype(np.float64)
+ self.assertRaises(TypeError, embedding_ops.safe_embedding_lookup_sparse,
+ embedding_weights, sparse_ids)
+ embedding_weights = [
+ constant_op.constant(w, dtype=dtypes.float64)
+ for w in embedding_weights
+ ]
+ self.assertRaises(ValueError, embedding_ops.safe_embedding_lookup_sparse,
+ embedding_weights, sparse_ids, sparse_weights)
+
+ def test_safe_embedding_lookup_sparse_3d_return_zero_vector(self):
+ with self.test_session():
+ embedding_weights = self._random_weights()
+ sparse_ids, sparse_weights = self._ids_and_weights_3d()
+
+ embedding_lookup_result = (embedding_ops.safe_embedding_lookup_sparse(
+ embedding_weights, sparse_ids, sparse_weights).eval())
+
+ self.assertAllClose(embedding_lookup_result, [[
+ (1.0 * embedding_weights[0][0] + 2.0 * embedding_weights[0][1]) / 3.0,
+ [0] * 4, [0] * 4
+ ], [embedding_weights[0][2], [0] * 4, [0] * 4]])
+
+ def test_safe_embedding_lookup_sparse_3d_return_special_vector(self):
+ with self.test_session():
+ embedding_weights = self._random_weights()
+ sparse_ids, sparse_weights = self._ids_and_weights_3d()
+
+ embedding_lookup_result = (embedding_ops.safe_embedding_lookup_sparse(
+ embedding_weights, sparse_ids, sparse_weights, default_id=3).eval())
+
+ self.assertAllClose(
+ embedding_lookup_result,
+ [[(1.0 * embedding_weights[0][0] + 2.0 * embedding_weights[0][1]) /
+ 3.0, embedding_weights[0][3], embedding_weights[0][3]], [
+ embedding_weights[0][2], embedding_weights[0][3],
+ embedding_weights[0][3]
+ ]])
+
+ def test_safe_embedding_lookup_sparse_3d_no_weights(self):
+ with self.test_session():
+ embedding_weights = self._random_weights()
+ sparse_ids, _ = self._ids_and_weights_3d()
+
+ embedding_lookup_result = (embedding_ops.safe_embedding_lookup_sparse(
+ embedding_weights, sparse_ids, None).eval())
+
+ self.assertAllClose(embedding_lookup_result, [[(
+ embedding_weights[0][0] + embedding_weights[0][1]) / 2.0, [0] * 4, [
+ 0
+ ] * 4], [
+ embedding_weights[0][2],
+ (embedding_weights[0][0] + embedding_weights[0][1]) / 2.0, [0] * 4
+ ]])
+
+ def test_safe_embedding_lookup_sparse_3d_partitioned(self):
+ with self.test_session():
+ embedding_weights = self._random_weights(num_shards=3)
+ sparse_ids, _ = self._ids_and_weights_3d()
+
+ embedding_lookup_result = (embedding_ops.safe_embedding_lookup_sparse(
+ embedding_weights, sparse_ids, None).eval())
+
+ embedding_weights = list(itertools.chain(*embedding_weights))
+ self.assertAllClose(embedding_lookup_result, [[
+ (embedding_weights[0] + embedding_weights[1]) / 2.0, [0] * 4, [0] * 4
+ ], [
+ embedding_weights[2],
+ (embedding_weights[0] + embedding_weights[1]) / 2.0, [0] * 4
+ ]])
+
+ def test_safe_embedding_lookup_sparse_3d_partitioned_inconsistent_weights(
+ self):
+ with self.test_session():
+ embedding_weights = self._random_weights(num_shards=3)
+ sparse_ids, sparse_weights = self._ids_and_weights_3d()
+
+ embedding_weights[1] = embedding_weights[1].astype(np.float64)
+ self.assertRaises(TypeError, embedding_ops.safe_embedding_lookup_sparse,
+ embedding_weights, sparse_ids)
+ embedding_weights = [
+ constant_op.constant(w, dtype=dtypes.float64)
+ for w in embedding_weights
+ ]
+ self.assertRaises(ValueError, embedding_ops.safe_embedding_lookup_sparse,
+ embedding_weights, sparse_ids, sparse_weights)
+
+
class DynamicStitchOpTest(test.TestCase):
def testCint32Cpu(self):
diff --git a/tensorflow/python/kernel_tests/functional_ops_test.py b/tensorflow/python/kernel_tests/functional_ops_test.py
index 1beb0e396e..24800d2b7a 100644
--- a/tensorflow/python/kernel_tests/functional_ops_test.py
+++ b/tensorflow/python/kernel_tests/functional_ops_test.py
@@ -35,6 +35,7 @@ from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
+from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
import tensorflow.python.ops.tensor_array_grad # pylint: disable=unused-import
@@ -604,6 +605,25 @@ class FunctionalOpsTest(test.TestCase):
mul = sess.run(remote_op)
self.assertEqual(mul, [6])
+ def testRemoteFunctionSameDeviceDirectSession(self):
+
+ @function.Defun(dtypes.int32, dtypes.int32)
+ def _remote_fn(a, b):
+ return math_ops.multiply(a, b)
+
+ with ops.device("/cpu:0"):
+ a = variables.Variable(2, dtype=dtypes.int32)
+ b = variables.Variable(3, dtype=dtypes.int32)
+
+ with ops.device("/cpu:0"):
+ remote_op = functional_ops.remote_call(
+ args=[a, b], Tout=[dtypes.int32], f=_remote_fn, target="/cpu:0")
+
+ with self.test_session() as sess:
+ sess.run(variables.global_variables_initializer())
+ mul = sess.run(remote_op)
+ self.assertEqual(mul, [6])
+
def testRemoteFunctionCPUGPU(self):
if not test_util.is_gpu_available():
self.skipTest("No GPU available")
@@ -652,6 +672,24 @@ class FunctionalOpsTest(test.TestCase):
mul = sess.run(remote_op)
self.assertEqual(mul, 9.0)
+ def testRemoteFunctionGPUCPUStrings(self):
+ if not test_util.is_gpu_available():
+ self.skipTest("No GPU available")
+
+ @function.Defun(dtypes.string)
+ def _remote_fn(inp):
+ return array_ops.identity(inp)
+
+ a = array_ops.constant("a")
+
+ with ops.device("/gpu:0"):
+ remote_op = functional_ops.remote_call(
+ args=[a], Tout=[dtypes.string], f=_remote_fn, target="/cpu:0")
+
+ with self.test_session() as sess:
+ ret = sess.run(remote_op)
+ self.assertAllEqual(ret, [b"a"])
+
def testRemoteFunctionCrossProcess(self):
workers, _ = test_util.create_local_cluster(2, 1)
@@ -1043,6 +1081,56 @@ class PartitionedCallTest(test.TestCase):
self.assertTrue(compat.as_bytes("CPU:1") in outputs[1].eval())
self.assertTrue(compat.as_bytes("CPU:2") in outputs[2].eval())
+ def testAssignAddResourceVariable(self):
+
+ v = resource_variable_ops.ResourceVariable(1.0)
+
+ @function.Defun()
+ def AssignAdd():
+ v.assign_add(1.0)
+
+ op = functional_ops.partitioned_call(
+ args=AssignAdd.captured_inputs, f=AssignAdd)
+ _ = self.evaluate(variables.global_variables_initializer())
+ _ = self.evaluate(op)
+ value = self.evaluate(v.read_value())
+ self.assertEqual(value, 2.0)
+
+ def testFunctionWithResourcesOnDifferentDevices(self):
+ if not test_util.is_gpu_available():
+ self.skipTest("No GPUs available.")
+
+ with ops.device("/cpu:0"):
+ v_cpu_zero = resource_variable_ops.ResourceVariable(
+ [0.0, 1.0, 2.0], name="v_cpu_zero")
+
+ with ops.device("/cpu:1"):
+ v_cpu_one = resource_variable_ops.ResourceVariable(
+ [0.0, 1.0, 2.0], name="v_cpu_one")
+
+ with ops.device("/gpu:0"):
+ v_gpu = resource_variable_ops.ResourceVariable(
+ [0.0, 1.0, 2.0], name="v_gpu")
+
+ def sum_gather():
+ cpu_result = math_ops.reduce_sum(array_ops.gather(v_cpu_zero, [1, 2]))
+ also_cpu_result = math_ops.reduce_sum(array_ops.gather(v_cpu_one, [1, 2]))
+ gpu_result = math_ops.reduce_sum(array_ops.gather(v_gpu, [1, 2]))
+ return cpu_result, also_cpu_result, gpu_result
+
+ defined = function.Defun()(sum_gather)
+ with self.test_session(
+ config=config_pb2.ConfigProto(
+ allow_soft_placement=False,
+ log_device_placement=True,
+ device_count={"CPU": 2})) as sess:
+ sess.run(variables.global_variables_initializer())
+ expected = sess.run(sum_gather())
+ result = sess.run(
+ functional_ops.partitioned_call(
+ args=defined.captured_inputs, f=defined))
+ self.assertAllEqual(expected, result)
+
if __name__ == "__main__":
test.main()
diff --git a/tensorflow/python/kernel_tests/init_ops_test.py b/tensorflow/python/kernel_tests/init_ops_test.py
index 795aa67248..f6097ad489 100644
--- a/tensorflow/python/kernel_tests/init_ops_test.py
+++ b/tensorflow/python/kernel_tests/init_ops_test.py
@@ -364,14 +364,52 @@ class UniformUnitScalingInitializationTest(test.TestCase):
class VarianceScalingInitializationTest(test.TestCase):
+ def testTruncatedNormalDistribution(self):
+ shape = [100, 100]
+ expect_mean = 0.
+ expect_var = 1. / shape[0]
+ init = init_ops.variance_scaling_initializer(
+ distribution='truncated_normal')
+
+ with self.test_session(use_gpu=True), \
+ test.mock.patch.object(
+ random_ops, 'truncated_normal', wraps=random_ops.truncated_normal) \
+ as mock_truncated_normal:
+ x = init(shape).eval()
+ self.assertTrue(mock_truncated_normal.called)
+
+ self.assertNear(np.mean(x), expect_mean, err=1e-2)
+ self.assertNear(np.var(x), expect_var, err=1e-2)
+
def testNormalDistribution(self):
shape = [100, 100]
expect_mean = 0.
expect_var = 1. / shape[0]
init = init_ops.variance_scaling_initializer(distribution='normal')
- with self.test_session(use_gpu=True):
+ with self.test_session(use_gpu=True), \
+ test.mock.patch.object(
+ random_ops, 'truncated_normal', wraps=random_ops.truncated_normal) \
+ as mock_truncated_normal:
+ x = init(shape).eval()
+ self.assertTrue(mock_truncated_normal.called)
+
+ self.assertNear(np.mean(x), expect_mean, err=1e-2)
+ self.assertNear(np.var(x), expect_var, err=1e-2)
+
+ def testUntruncatedNormalDistribution(self):
+ shape = [100, 100]
+ expect_mean = 0.
+ expect_var = 1. / shape[0]
+ init = init_ops.variance_scaling_initializer(
+ distribution='untruncated_normal')
+
+ with self.test_session(use_gpu=True), \
+ test.mock.patch.object(
+ random_ops, 'random_normal', wraps=random_ops.random_normal) \
+ as mock_random_normal:
x = init(shape).eval()
+ self.assertTrue(mock_random_normal.called)
self.assertNear(np.mean(x), expect_mean, err=1e-2)
self.assertNear(np.var(x), expect_var, err=1e-2)
@@ -792,7 +830,7 @@ class ConvolutionOrthogonal1dInitializerTest(test.TestCase):
tol = 1e-3
gain = 3.14
# Check orthogonality/isometry by computing the ratio between
- # the 2-norms of the inputs and ouputs.
+ # the 2-norms of the inputs and outputs.
for kernel_size in [[1], [2], [3], [4], [5], [6]]:
convolution = convolutional.conv1d
inputs = random_ops.random_normal(shape, dtype=dtype)
@@ -887,7 +925,7 @@ class ConvolutionOrthogonal2dInitializerTest(test.TestCase):
tol = 1e-3
gain = 3.14
# Check orthogonality/isometry by computing the ratio between
- # the 2-norms of the inputs and ouputs.
+ # the 2-norms of the inputs and outputs.
for kernel_size in [[1, 1], [2, 2], [3, 3], [4, 4], [5, 5]]:
convolution = convolutional.conv2d
inputs = random_ops.random_normal(shape, dtype=dtype)
@@ -1012,7 +1050,7 @@ class ConvolutionOrthogonal3dInitializerTest(test.TestCase):
tol = 1e-3
gain = 3.14
# Check orthogonality/isometry by computing the ratio between
- # the 2-norms of the inputs and ouputs.
+ # the 2-norms of the inputs and outputs.
for kernel_size in [[1, 1, 1], [2, 2, 2], [3, 3, 3]]:
convolution = convolutional.conv3d
inputs = random_ops.random_normal(shape, dtype=dtype)
diff --git a/tensorflow/python/kernel_tests/linalg/BUILD b/tensorflow/python/kernel_tests/linalg/BUILD
index 0123adc2c3..487418e694 100644
--- a/tensorflow/python/kernel_tests/linalg/BUILD
+++ b/tensorflow/python/kernel_tests/linalg/BUILD
@@ -107,6 +107,10 @@ cuda_py_test(
"//tensorflow/python:random_ops",
],
shard_count = 5,
+ tags = [
+ "noasan",
+ "optonly",
+ ],
)
cuda_py_test(
@@ -124,7 +128,10 @@ cuda_py_test(
"//tensorflow/python:random_ops",
],
shard_count = 5,
- tags = ["optonly"], # Test is flaky without optimization.
+ tags = [
+ "noasan",
+ "optonly",
+ ],
)
cuda_py_test(
@@ -141,6 +148,10 @@ cuda_py_test(
"//tensorflow/python:platform_test",
],
shard_count = 5,
+ tags = [
+ "noasan",
+ "optonly",
+ ],
)
cuda_py_test(
@@ -178,11 +189,15 @@ cuda_py_test(
"//tensorflow/python:framework_test_lib",
"//tensorflow/python:platform_test",
],
+ tags = [
+ "noasan",
+ "optonly",
+ ],
)
cuda_py_test(
name = "linear_operator_low_rank_update_test",
- size = "medium",
+ size = "large",
srcs = ["linear_operator_low_rank_update_test.py"],
additional_deps = [
"//tensorflow/python/ops/linalg",
@@ -214,4 +229,8 @@ cuda_py_test(
"//tensorflow/python:platform_test",
],
shard_count = 5,
+ tags = [
+ "noasan",
+ "optonly",
+ ],
)
diff --git a/tensorflow/python/kernel_tests/linalg/linear_operator_block_diag_test.py b/tensorflow/python/kernel_tests/linalg/linear_operator_block_diag_test.py
index 2b80f01b73..3ede2aceaa 100644
--- a/tensorflow/python/kernel_tests/linalg/linear_operator_block_diag_test.py
+++ b/tensorflow/python/kernel_tests/linalg/linear_operator_block_diag_test.py
@@ -80,7 +80,7 @@ class SquareLinearOperatorBlockDiagTest(
build_info((2, 1, 5, 5), blocks=[(2, 1, 2, 2), (1, 3, 3)]),
]
- def _operator_and_mat_and_feed_dict(self, build_info, dtype, use_placeholder):
+ def _operator_and_matrix(self, build_info, dtype, use_placeholder):
shape = list(build_info.shape)
expected_blocks = (
build_info.__dict__["blocks"] if "blocks" in build_info.__dict__
@@ -91,26 +91,19 @@ class SquareLinearOperatorBlockDiagTest(
for block_shape in expected_blocks
]
+ lin_op_matrices = matrices
+
if use_placeholder:
- matrices_ph = [
- array_ops.placeholder(dtype=dtype) for _ in expected_blocks
- ]
- # Evaluate here because (i) you cannot feed a tensor, and (ii)
- # values are random and we want the same value used for both mat and
- # feed_dict.
- matrices = self.evaluate(matrices)
- operator = block_diag.LinearOperatorBlockDiag(
- [linalg.LinearOperatorFullMatrix(
- m_ph, is_square=True) for m_ph in matrices_ph],
- is_square=True)
- feed_dict = {m_ph: m for (m_ph, m) in zip(matrices_ph, matrices)}
- else:
- operator = block_diag.LinearOperatorBlockDiag(
- [linalg.LinearOperatorFullMatrix(
- m, is_square=True) for m in matrices])
- feed_dict = None
- # Should be auto-set.
- self.assertTrue(operator.is_square)
+ lin_op_matrices = [
+ array_ops.placeholder_with_default(
+ matrix, shape=None) for matrix in matrices]
+
+ operator = block_diag.LinearOperatorBlockDiag(
+ [linalg.LinearOperatorFullMatrix(
+ l, is_square=True) for l in lin_op_matrices])
+
+ # Should be auto-set.
+ self.assertTrue(operator.is_square)
# Broadcast the shapes.
expected_shape = list(build_info.shape)
@@ -123,7 +116,7 @@ class SquareLinearOperatorBlockDiagTest(
block_diag_dense.set_shape(
expected_shape[:-2] + [expected_shape[-1], expected_shape[-1]])
- return operator, block_diag_dense, feed_dict
+ return operator, block_diag_dense
def test_is_x_flags(self):
# Matrix with two positive eigenvalues, 1, and 1.
diff --git a/tensorflow/python/kernel_tests/linalg/linear_operator_circulant_test.py b/tensorflow/python/kernel_tests/linalg/linear_operator_circulant_test.py
index 5713d16969..7261d4bb3b 100644
--- a/tensorflow/python/kernel_tests/linalg/linear_operator_circulant_test.py
+++ b/tensorflow/python/kernel_tests/linalg/linear_operator_circulant_test.py
@@ -95,7 +95,7 @@ class LinearOperatorCirculantTestSelfAdjointOperator(
# real, the matrix will not be real.
return [dtypes.complex64]
- def _operator_and_mat_and_feed_dict(self, build_info, dtype, use_placeholder):
+ def _operator_and_matrix(self, build_info, dtype, use_placeholder):
shape = build_info.shape
# For this test class, we are creating real spectrums.
# We also want the spectrum to have eigenvalues bounded away from zero.
@@ -107,22 +107,18 @@ class LinearOperatorCirculantTestSelfAdjointOperator(
# zero, so the operator will still be self-adjoint.
spectrum = math_ops.cast(spectrum, dtype)
+ lin_op_spectrum = spectrum
+
if use_placeholder:
- spectrum_ph = array_ops.placeholder(dtypes.complex64)
- # Evaluate here because (i) you cannot feed a tensor, and (ii)
- # it is random and we want the same value used for both mat and feed_dict.
- spectrum = spectrum.eval()
- operator = linalg.LinearOperatorCirculant(
- spectrum_ph, is_self_adjoint=True, input_output_dtype=dtype)
- feed_dict = {spectrum_ph: spectrum}
- else:
- operator = linalg.LinearOperatorCirculant(
- spectrum, is_self_adjoint=True, input_output_dtype=dtype)
- feed_dict = None
+ lin_op_spectrum = array_ops.placeholder_with_default(
+ spectrum, shape=None)
+
+ operator = linalg.LinearOperatorCirculant(
+ lin_op_spectrum, is_self_adjoint=True, input_output_dtype=dtype)
mat = self._spectrum_to_circulant_1d(spectrum, shape, dtype=dtype)
- return operator, mat, feed_dict
+ return operator, mat
def test_simple_hermitian_spectrum_gives_operator_with_zero_imag_part(self):
with self.test_session():
@@ -149,7 +145,7 @@ class LinearOperatorCirculantTestHermitianSpectrum(
def _dtypes_to_test(self):
return [dtypes.float32, dtypes.complex64]
- def _operator_and_mat_and_feed_dict(self, build_info, dtype, use_placeholder):
+ def _operator_and_matrix(self, build_info, dtype, use_placeholder):
shape = build_info.shape
# For this test class, we are creating Hermitian spectrums.
# We also want the spectrum to have eigenvalues bounded away from zero.
@@ -172,22 +168,18 @@ class LinearOperatorCirculantTestHermitianSpectrum(
spectrum = math_ops.fft(h_c)
+ lin_op_spectrum = spectrum
+
if use_placeholder:
- spectrum_ph = array_ops.placeholder(dtypes.complex64)
- # Evaluate here because (i) you cannot feed a tensor, and (ii)
- # it is random and we want the same value used for both mat and feed_dict.
- spectrum = spectrum.eval()
- operator = linalg.LinearOperatorCirculant(
- spectrum_ph, input_output_dtype=dtype)
- feed_dict = {spectrum_ph: spectrum}
- else:
- operator = linalg.LinearOperatorCirculant(
- spectrum, input_output_dtype=dtype)
- feed_dict = None
+ lin_op_spectrum = array_ops.placeholder_with_default(
+ spectrum, shape=None)
+
+ operator = linalg.LinearOperatorCirculant(
+ lin_op_spectrum, input_output_dtype=dtype)
mat = self._spectrum_to_circulant_1d(spectrum, shape, dtype=dtype)
- return operator, mat, feed_dict
+ return operator, mat
def test_simple_hermitian_spectrum_gives_operator_with_zero_imag_part(self):
with self.test_session():
@@ -213,7 +205,7 @@ class LinearOperatorCirculantTestNonHermitianSpectrum(
def _dtypes_to_test(self):
return [dtypes.complex64]
- def _operator_and_mat_and_feed_dict(self, build_info, dtype, use_placeholder):
+ def _operator_and_matrix(self, build_info, dtype, use_placeholder):
shape = build_info.shape
# Will be well conditioned enough to get accurate solves.
spectrum = linear_operator_test_util.random_sign_uniform(
@@ -222,22 +214,18 @@ class LinearOperatorCirculantTestNonHermitianSpectrum(
minval=1.,
maxval=2.)
+ lin_op_spectrum = spectrum
+
if use_placeholder:
- spectrum_ph = array_ops.placeholder(dtypes.complex64)
- # Evaluate here because (i) you cannot feed a tensor, and (ii)
- # it is random and we want the same value used for both mat and feed_dict.
- spectrum = spectrum.eval()
- operator = linalg.LinearOperatorCirculant(
- spectrum_ph, input_output_dtype=dtype)
- feed_dict = {spectrum_ph: spectrum}
- else:
- operator = linalg.LinearOperatorCirculant(
- spectrum, input_output_dtype=dtype)
- feed_dict = None
+ lin_op_spectrum = array_ops.placeholder_with_default(
+ spectrum, shape=None)
+
+ operator = linalg.LinearOperatorCirculant(
+ lin_op_spectrum, input_output_dtype=dtype)
mat = self._spectrum_to_circulant_1d(spectrum, shape, dtype=dtype)
- return operator, mat, feed_dict
+ return operator, mat
def test_simple_hermitian_spectrum_gives_operator_with_zero_imag_part(self):
with self.test_session():
@@ -432,7 +420,7 @@ class LinearOperatorCirculant2DTestHermitianSpectrum(
def _dtypes_to_test(self):
return [dtypes.float32, dtypes.complex64]
- def _operator_and_mat_and_feed_dict(self, build_info, dtype, use_placeholder):
+ def _operator_and_matrix(self, build_info, dtype, use_placeholder):
shape = build_info.shape
# For this test class, we are creating Hermitian spectrums.
# We also want the spectrum to have eigenvalues bounded away from zero.
@@ -455,22 +443,18 @@ class LinearOperatorCirculant2DTestHermitianSpectrum(
spectrum = math_ops.fft2d(h_c)
+ lin_op_spectrum = spectrum
+
if use_placeholder:
- spectrum_ph = array_ops.placeholder(dtypes.complex64)
- # Evaluate here because (i) you cannot feed a tensor, and (ii)
- # it is random and we want the same value used for both mat and feed_dict.
- spectrum = spectrum.eval()
- operator = linalg.LinearOperatorCirculant2D(
- spectrum_ph, input_output_dtype=dtype)
- feed_dict = {spectrum_ph: spectrum}
- else:
- operator = linalg.LinearOperatorCirculant2D(
- spectrum, input_output_dtype=dtype)
- feed_dict = None
+ lin_op_spectrum = array_ops.placeholder_with_default(
+ spectrum, shape=None)
+
+ operator = linalg.LinearOperatorCirculant2D(
+ lin_op_spectrum, input_output_dtype=dtype)
mat = self._spectrum_to_circulant_2d(spectrum, shape, dtype=dtype)
- return operator, mat, feed_dict
+ return operator, mat
class LinearOperatorCirculant2DTestNonHermitianSpectrum(
@@ -486,7 +470,7 @@ class LinearOperatorCirculant2DTestNonHermitianSpectrum(
def _dtypes_to_test(self):
return [dtypes.complex64]
- def _operator_and_mat_and_feed_dict(self, build_info, dtype, use_placeholder):
+ def _operator_and_matrix(self, build_info, dtype, use_placeholder):
shape = build_info.shape
# Will be well conditioned enough to get accurate solves.
spectrum = linear_operator_test_util.random_sign_uniform(
@@ -495,22 +479,18 @@ class LinearOperatorCirculant2DTestNonHermitianSpectrum(
minval=1.,
maxval=2.)
+ lin_op_spectrum = spectrum
+
if use_placeholder:
- spectrum_ph = array_ops.placeholder(dtypes.complex64)
- # Evaluate here because (i) you cannot feed a tensor, and (ii)
- # it is random and we want the same value used for both mat and feed_dict.
- spectrum = spectrum.eval()
- operator = linalg.LinearOperatorCirculant2D(
- spectrum_ph, input_output_dtype=dtype)
- feed_dict = {spectrum_ph: spectrum}
- else:
- operator = linalg.LinearOperatorCirculant2D(
- spectrum, input_output_dtype=dtype)
- feed_dict = None
+ lin_op_spectrum = array_ops.placeholder_with_default(
+ spectrum, shape=None)
+
+ operator = linalg.LinearOperatorCirculant2D(
+ lin_op_spectrum, input_output_dtype=dtype)
mat = self._spectrum_to_circulant_2d(spectrum, shape, dtype=dtype)
- return operator, mat, feed_dict
+ return operator, mat
def test_real_hermitian_spectrum_gives_real_symmetric_operator(self):
with self.test_session() as sess:
diff --git a/tensorflow/python/kernel_tests/linalg/linear_operator_composition_test.py b/tensorflow/python/kernel_tests/linalg/linear_operator_composition_test.py
index f96b9ccdaa..612a50bcec 100644
--- a/tensorflow/python/kernel_tests/linalg/linear_operator_composition_test.py
+++ b/tensorflow/python/kernel_tests/linalg/linear_operator_composition_test.py
@@ -44,7 +44,7 @@ class SquareLinearOperatorCompositionTest(
self._rtol[dtypes.float32] = 1e-4
self._rtol[dtypes.complex64] = 1e-4
- def _operator_and_mat_and_feed_dict(self, build_info, dtype, use_placeholder):
+ def _operator_and_matrix(self, build_info, dtype, use_placeholder):
sess = ops.get_default_session()
shape = list(build_info.shape)
@@ -56,33 +56,23 @@ class SquareLinearOperatorCompositionTest(
for _ in range(num_operators)
]
+ lin_op_matrices = matrices
+
if use_placeholder:
- matrices_ph = [
- array_ops.placeholder(dtype=dtype) for _ in range(num_operators)
- ]
- # Evaluate here because (i) you cannot feed a tensor, and (ii)
- # values are random and we want the same value used for both mat and
- # feed_dict.
- matrices = sess.run(matrices)
- operator = linalg.LinearOperatorComposition(
- [linalg.LinearOperatorFullMatrix(m_ph) for m_ph in matrices_ph],
- is_square=True)
- feed_dict = {m_ph: m for (m_ph, m) in zip(matrices_ph, matrices)}
- else:
- operator = linalg.LinearOperatorComposition(
- [linalg.LinearOperatorFullMatrix(m) for m in matrices])
- feed_dict = None
- # Should be auto-set.
- self.assertTrue(operator.is_square)
-
- # Convert back to Tensor. Needed if use_placeholder, since then we have
- # already evaluated each matrix to a numpy array.
+ lin_op_matrices = [
+ array_ops.placeholder_with_default(
+ matrix, shape=None) for matrix in matrices]
+
+ operator = linalg.LinearOperatorComposition(
+ [linalg.LinearOperatorFullMatrix(l) for l in lin_op_matrices],
+ is_square=True)
+
matmul_order_list = list(reversed(matrices))
- mat = ops.convert_to_tensor(matmul_order_list[0])
+ mat = matmul_order_list[0]
for other_mat in matmul_order_list[1:]:
mat = math_ops.matmul(other_mat, mat)
- return operator, mat, feed_dict
+ return operator, mat
def test_is_x_flags(self):
# Matrix with two positive eigenvalues, 1, and 1.
@@ -148,7 +138,7 @@ class NonSquareLinearOperatorCompositionTest(
self._rtol[dtypes.float32] = 1e-4
self._rtol[dtypes.complex64] = 1e-4
- def _operator_and_mat_and_feed_dict(self, build_info, dtype, use_placeholder):
+ def _operator_and_matrix(self, build_info, dtype, use_placeholder):
sess = ops.get_default_session()
shape = list(build_info.shape)
@@ -170,30 +160,22 @@ class NonSquareLinearOperatorCompositionTest(
shape_2, dtype=dtype)
]
+ lin_op_matrices = matrices
+
if use_placeholder:
- matrices_ph = [
- array_ops.placeholder(dtype=dtype) for _ in range(num_operators)
- ]
- # Evaluate here because (i) you cannot feed a tensor, and (ii)
- # values are random and we want the same value used for both mat and
- # feed_dict.
- matrices = sess.run(matrices)
- operator = linalg.LinearOperatorComposition(
- [linalg.LinearOperatorFullMatrix(m_ph) for m_ph in matrices_ph])
- feed_dict = {m_ph: m for (m_ph, m) in zip(matrices_ph, matrices)}
- else:
- operator = linalg.LinearOperatorComposition(
- [linalg.LinearOperatorFullMatrix(m) for m in matrices])
- feed_dict = None
-
- # Convert back to Tensor. Needed if use_placeholder, since then we have
- # already evaluated each matrix to a numpy array.
+ lin_op_matrices = [
+ array_ops.placeholder_with_default(
+ matrix, shape=None) for matrix in matrices]
+
+ operator = linalg.LinearOperatorComposition(
+ [linalg.LinearOperatorFullMatrix(l) for l in lin_op_matrices])
+
matmul_order_list = list(reversed(matrices))
- mat = ops.convert_to_tensor(matmul_order_list[0])
+ mat = matmul_order_list[0]
for other_mat in matmul_order_list[1:]:
mat = math_ops.matmul(other_mat, mat)
- return operator, mat, feed_dict
+ return operator, mat
def test_static_shapes(self):
operators = [
diff --git a/tensorflow/python/kernel_tests/linalg/linear_operator_diag_test.py b/tensorflow/python/kernel_tests/linalg/linear_operator_diag_test.py
index 0a0e31c716..83cc8c483f 100644
--- a/tensorflow/python/kernel_tests/linalg/linear_operator_diag_test.py
+++ b/tensorflow/python/kernel_tests/linalg/linear_operator_diag_test.py
@@ -34,25 +34,21 @@ class LinearOperatorDiagTest(
linear_operator_test_util.SquareLinearOperatorDerivedClassTest):
"""Most tests done in the base class LinearOperatorDerivedClassTest."""
- def _operator_and_mat_and_feed_dict(self, build_info, dtype, use_placeholder):
+ def _operator_and_matrix(self, build_info, dtype, use_placeholder):
shape = list(build_info.shape)
diag = linear_operator_test_util.random_sign_uniform(
shape[:-1], minval=1., maxval=2., dtype=dtype)
+
+ lin_op_diag = diag
+
if use_placeholder:
- diag_ph = array_ops.placeholder(dtype=dtype)
- # Evaluate the diag here because (i) you cannot feed a tensor, and (ii)
- # diag is random and we want the same value used for both mat and
- # feed_dict.
- diag = diag.eval()
- operator = linalg.LinearOperatorDiag(diag_ph)
- feed_dict = {diag_ph: diag}
- else:
- operator = linalg.LinearOperatorDiag(diag)
- feed_dict = None
+ lin_op_diag = array_ops.placeholder_with_default(diag, shape=None)
+
+ operator = linalg.LinearOperatorDiag(lin_op_diag)
- mat = array_ops.matrix_diag(diag)
+ matrix = array_ops.matrix_diag(diag)
- return operator, mat, feed_dict
+ return operator, matrix
def test_assert_positive_definite_raises_for_zero_eigenvalue(self):
# Matrix with one positive eigenvalue and one zero eigenvalue.
diff --git a/tensorflow/python/kernel_tests/linalg/linear_operator_full_matrix_test.py b/tensorflow/python/kernel_tests/linalg/linear_operator_full_matrix_test.py
index b3da623b5e..1a40a29ec6 100644
--- a/tensorflow/python/kernel_tests/linalg/linear_operator_full_matrix_test.py
+++ b/tensorflow/python/kernel_tests/linalg/linear_operator_full_matrix_test.py
@@ -20,7 +20,6 @@ from __future__ import print_function
import numpy as np
from tensorflow.python.framework import dtypes
-from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
@@ -36,30 +35,20 @@ class SquareLinearOperatorFullMatrixTest(
linear_operator_test_util.SquareLinearOperatorDerivedClassTest):
"""Most tests done in the base class LinearOperatorDerivedClassTest."""
- def _operator_and_mat_and_feed_dict(self, build_info, dtype, use_placeholder):
+ def _operator_and_matrix(self, build_info, dtype, use_placeholder):
shape = list(build_info.shape)
matrix = linear_operator_test_util.random_positive_definite_matrix(
shape, dtype)
+ lin_op_matrix = matrix
+
if use_placeholder:
- matrix_ph = array_ops.placeholder(dtype=dtype)
- # Evaluate here because (i) you cannot feed a tensor, and (ii)
- # values are random and we want the same value used for both mat and
- # feed_dict.
- matrix = matrix.eval()
- operator = linalg.LinearOperatorFullMatrix(matrix_ph, is_square=True)
- feed_dict = {matrix_ph: matrix}
- else:
- # is_square should be auto-detected here.
- operator = linalg.LinearOperatorFullMatrix(matrix)
- feed_dict = None
+ lin_op_matrix = array_ops.placeholder_with_default(matrix, shape=None)
- # Convert back to Tensor. Needed if use_placeholder, since then we have
- # already evaluated matrix to a numpy array.
- mat = ops.convert_to_tensor(matrix)
+ operator = linalg.LinearOperatorFullMatrix(lin_op_matrix, is_square=True)
- return operator, mat, feed_dict
+ return operator, matrix
def test_is_x_flags(self):
# Matrix with two positive eigenvalues.
@@ -136,32 +125,20 @@ class SquareLinearOperatorFullMatrixSymmetricPositiveDefiniteTest(
def _dtypes_to_test(self):
return [dtypes.float32, dtypes.float64]
- def _operator_and_mat_and_feed_dict(self, build_info, dtype, use_placeholder):
+ def _operator_and_matrix(self, build_info, dtype, use_placeholder):
shape = list(build_info.shape)
matrix = linear_operator_test_util.random_positive_definite_matrix(
shape, dtype, force_well_conditioned=True)
+ lin_op_matrix = matrix
+
if use_placeholder:
- matrix_ph = array_ops.placeholder(dtype=dtype)
- # Evaluate here because (i) you cannot feed a tensor, and (ii)
- # values are random and we want the same value used for both mat and
- # feed_dict.
- matrix = matrix.eval()
- # is_square is auto-set because of self_adjoint/pd.
- operator = linalg.LinearOperatorFullMatrix(
- matrix_ph, is_self_adjoint=True, is_positive_definite=True)
- feed_dict = {matrix_ph: matrix}
- else:
- operator = linalg.LinearOperatorFullMatrix(
- matrix, is_self_adjoint=True, is_positive_definite=True)
- feed_dict = None
-
- # Convert back to Tensor. Needed if use_placeholder, since then we have
- # already evaluated matrix to a numpy array.
- mat = ops.convert_to_tensor(matrix)
-
- return operator, mat, feed_dict
+ lin_op_matrix = array_ops.placeholder_with_default(matrix, shape=None)
+
+ operator = linalg.LinearOperatorFullMatrix(lin_op_matrix, is_square=True)
+
+ return operator, matrix
def test_is_x_flags(self):
# Matrix with two positive eigenvalues.
@@ -210,26 +187,18 @@ class NonSquareLinearOperatorFullMatrixTest(
linear_operator_test_util.NonSquareLinearOperatorDerivedClassTest):
"""Most tests done in the base class LinearOperatorDerivedClassTest."""
- def _operator_and_mat_and_feed_dict(self, build_info, dtype, use_placeholder):
+ def _operator_and_matrix(self, build_info, dtype, use_placeholder):
shape = list(build_info.shape)
matrix = linear_operator_test_util.random_normal(shape, dtype=dtype)
+
+ lin_op_matrix = matrix
+
if use_placeholder:
- matrix_ph = array_ops.placeholder(dtype=dtype)
- # Evaluate here because (i) you cannot feed a tensor, and (ii)
- # values are random and we want the same value used for both mat and
- # feed_dict.
- matrix = matrix.eval()
- operator = linalg.LinearOperatorFullMatrix(matrix_ph)
- feed_dict = {matrix_ph: matrix}
- else:
- operator = linalg.LinearOperatorFullMatrix(matrix)
- feed_dict = None
+ lin_op_matrix = array_ops.placeholder_with_default(matrix, shape=None)
- # Convert back to Tensor. Needed if use_placeholder, since then we have
- # already evaluated matrix to a numpy array.
- mat = ops.convert_to_tensor(matrix)
+ operator = linalg.LinearOperatorFullMatrix(lin_op_matrix, is_square=True)
- return operator, mat, feed_dict
+ return operator, matrix
def test_is_x_flags(self):
matrix = [[3., 2., 1.], [1., 1., 1.]]
diff --git a/tensorflow/python/kernel_tests/linalg/linear_operator_identity_test.py b/tensorflow/python/kernel_tests/linalg/linear_operator_identity_test.py
index 59f63f949e..35dcf4417c 100644
--- a/tensorflow/python/kernel_tests/linalg/linear_operator_identity_test.py
+++ b/tensorflow/python/kernel_tests/linalg/linear_operator_identity_test.py
@@ -43,7 +43,7 @@ class LinearOperatorIdentityTest(
# 16bit.
return [dtypes.float32, dtypes.float64, dtypes.complex64, dtypes.complex128]
- def _operator_and_mat_and_feed_dict(self, build_info, dtype, use_placeholder):
+ def _operator_and_matrix(self, build_info, dtype, use_placeholder):
shape = list(build_info.shape)
assert shape[-1] == shape[-2]
@@ -54,13 +54,7 @@ class LinearOperatorIdentityTest(
num_rows, batch_shape=batch_shape, dtype=dtype)
mat = linalg_ops.eye(num_rows, batch_shape=batch_shape, dtype=dtype)
- # Nothing to feed since LinearOperatorIdentity takes no Tensor args.
- if use_placeholder:
- feed_dict = {}
- else:
- feed_dict = None
-
- return operator, mat, feed_dict
+ return operator, mat
def test_assert_positive_definite(self):
with self.test_session():
@@ -261,7 +255,7 @@ class LinearOperatorScaledIdentityTest(
# 16bit.
return [dtypes.float32, dtypes.float64, dtypes.complex64, dtypes.complex128]
- def _operator_and_mat_and_feed_dict(self, build_info, dtype, use_placeholder):
+ def _operator_and_matrix(self, build_info, dtype, use_placeholder):
shape = list(build_info.shape)
assert shape[-1] == shape[-2]
@@ -274,24 +268,23 @@ class LinearOperatorScaledIdentityTest(
multiplier = linear_operator_test_util.random_sign_uniform(
shape=batch_shape, minval=1., maxval=2., dtype=dtype)
- operator = linalg_lib.LinearOperatorScaledIdentity(num_rows, multiplier)
# Nothing to feed since LinearOperatorScaledIdentity takes no Tensor args.
+ lin_op_multiplier = multiplier
+
if use_placeholder:
- multiplier_ph = array_ops.placeholder(dtype=dtype)
- multiplier = multiplier.eval()
- operator = linalg_lib.LinearOperatorScaledIdentity(
- num_rows, multiplier_ph)
- feed_dict = {multiplier_ph: multiplier}
- else:
- feed_dict = None
+ lin_op_multiplier = array_ops.placeholder_with_default(
+ multiplier, shape=None)
+
+ operator = linalg_lib.LinearOperatorScaledIdentity(
+ num_rows, lin_op_multiplier)
multiplier_matrix = array_ops.expand_dims(
array_ops.expand_dims(multiplier, -1), -1)
- mat = multiplier_matrix * linalg_ops.eye(
+ matrix = multiplier_matrix * linalg_ops.eye(
num_rows, batch_shape=batch_shape, dtype=dtype)
- return operator, mat, feed_dict
+ return operator, matrix
def test_assert_positive_definite_does_not_raise_when_positive(self):
with self.test_session():
diff --git a/tensorflow/python/kernel_tests/linalg/linear_operator_kronecker_test.py b/tensorflow/python/kernel_tests/linalg/linear_operator_kronecker_test.py
index 784c730bbc..e26b946151 100644
--- a/tensorflow/python/kernel_tests/linalg/linear_operator_kronecker_test.py
+++ b/tensorflow/python/kernel_tests/linalg/linear_operator_kronecker_test.py
@@ -101,7 +101,7 @@ class SquareLinearOperatorKroneckerTest(
def _tests_to_skip(self):
return ["det", "solve", "solve_with_broadcast"]
- def _operator_and_mat_and_feed_dict(self, build_info, dtype, use_placeholder):
+ def _operator_and_matrix(self, build_info, dtype, use_placeholder):
shape = list(build_info.shape)
expected_factors = build_info.__dict__["factors"]
matrices = [
@@ -110,26 +110,15 @@ class SquareLinearOperatorKroneckerTest(
for block_shape in expected_factors
]
+ lin_op_matrices = matrices
+
if use_placeholder:
- matrices_ph = [
- array_ops.placeholder(dtype=dtype) for _ in expected_factors
- ]
- # Evaluate here because (i) you cannot feed a tensor, and (ii)
- # values are random and we want the same value used for both mat and
- # feed_dict.
- matrices = self.evaluate(matrices)
- operator = kronecker.LinearOperatorKronecker(
- [linalg.LinearOperatorFullMatrix(
- m_ph, is_square=True) for m_ph in matrices_ph],
- is_square=True)
- feed_dict = {m_ph: m for (m_ph, m) in zip(matrices_ph, matrices)}
- else:
- operator = kronecker.LinearOperatorKronecker(
- [linalg.LinearOperatorFullMatrix(
- m, is_square=True) for m in matrices])
- feed_dict = None
- # Should be auto-set.
- self.assertTrue(operator.is_square)
+ lin_op_matrices = [
+ array_ops.placeholder_with_default(m, shape=None) for m in matrices]
+
+ operator = kronecker.LinearOperatorKronecker(
+ [linalg.LinearOperatorFullMatrix(
+ l, is_square=True) for l in lin_op_matrices])
matrices = linear_operator_util.broadcast_matrix_batch_dims(matrices)
@@ -138,7 +127,7 @@ class SquareLinearOperatorKroneckerTest(
if not use_placeholder:
kronecker_dense.set_shape(shape)
- return operator, kronecker_dense, feed_dict
+ return operator, kronecker_dense
def test_is_x_flags(self):
# Matrix with two positive eigenvalues, 1, and 1.
diff --git a/tensorflow/python/kernel_tests/linalg/linear_operator_low_rank_update_test.py b/tensorflow/python/kernel_tests/linalg/linear_operator_low_rank_update_test.py
index 8095f6419e..0e38dbd48d 100644
--- a/tensorflow/python/kernel_tests/linalg/linear_operator_low_rank_update_test.py
+++ b/tensorflow/python/kernel_tests/linalg/linear_operator_low_rank_update_test.py
@@ -49,12 +49,6 @@ class BaseLinearOperatorLowRankUpdatetest(object):
_use_v = None
@property
- def _dtypes_to_test(self):
- # TODO(langmore) Test complex types once cholesky works with them.
- # See comment in LinearOperatorLowRankUpdate.__init__.
- return [dtypes.float32, dtypes.float64]
-
- @property
def _operator_build_infos(self):
build_info = linear_operator_test_util.OperatorBuildInfo
# Previously we had a (2, 10, 10) shape at the end. We did this to test the
@@ -68,7 +62,16 @@ class BaseLinearOperatorLowRankUpdatetest(object):
build_info((3, 4, 4)),
build_info((2, 1, 4, 4))]
- def _operator_and_mat_and_feed_dict(self, build_info, dtype, use_placeholder):
+ def _gen_positive_diag(self, dtype, diag_shape):
+ if dtype.is_complex:
+ diag = linear_operator_test_util.random_uniform(
+ diag_shape, minval=1e-4, maxval=1., dtype=dtypes.float32)
+ return math_ops.cast(diag, dtype=dtype)
+
+ return linear_operator_test_util.random_uniform(
+ diag_shape, minval=1e-4, maxval=1., dtype=dtype)
+
+ def _operator_and_matrix(self, build_info, dtype, use_placeholder):
# Recall A = L + UDV^H
shape = list(build_info.shape)
diag_shape = shape[:-1]
@@ -78,63 +81,46 @@ class BaseLinearOperatorLowRankUpdatetest(object):
# base_operator L will be a symmetric positive definite diagonal linear
# operator, with condition number as high as 1e4.
- base_diag = linear_operator_test_util.random_uniform(
- diag_shape, minval=1e-4, maxval=1., dtype=dtype)
- base_diag_ph = array_ops.placeholder(dtype=dtype)
+ base_diag = self._gen_positive_diag(dtype, diag_shape)
+ lin_op_base_diag = base_diag
# U
u = linear_operator_test_util.random_normal_correlated_columns(
u_perturbation_shape, dtype=dtype)
- u_ph = array_ops.placeholder(dtype=dtype)
+ lin_op_u = u
# V
v = linear_operator_test_util.random_normal_correlated_columns(
u_perturbation_shape, dtype=dtype)
- v_ph = array_ops.placeholder(dtype=dtype)
+ lin_op_v = v
# D
if self._is_diag_update_positive:
- diag_update = linear_operator_test_util.random_uniform(
- diag_update_shape, minval=1e-4, maxval=1., dtype=dtype)
+ diag_update = self._gen_positive_diag(dtype, diag_update_shape)
else:
diag_update = linear_operator_test_util.random_normal(
diag_update_shape, stddev=1e-4, dtype=dtype)
- diag_update_ph = array_ops.placeholder(dtype=dtype)
+ lin_op_diag_update = diag_update
if use_placeholder:
- # Evaluate here because (i) you cannot feed a tensor, and (ii)
- # values are random and we want the same value used for both mat and
- # feed_dict.
- base_diag = base_diag.eval()
- u = u.eval()
- v = v.eval()
- diag_update = diag_update.eval()
-
- # In all cases, set base_operator to be positive definite.
- base_operator = linalg.LinearOperatorDiag(
- base_diag_ph, is_positive_definite=True)
-
- operator = linalg.LinearOperatorLowRankUpdate(
- base_operator,
- u=u_ph,
- v=v_ph if self._use_v else None,
- diag_update=diag_update_ph if self._use_diag_update else None,
- is_diag_update_positive=self._is_diag_update_positive)
- feed_dict = {
- base_diag_ph: base_diag,
- u_ph: u,
- v_ph: v,
- diag_update_ph: diag_update}
- else:
- base_operator = linalg.LinearOperatorDiag(
- base_diag, is_positive_definite=True)
- operator = linalg.LinearOperatorLowRankUpdate(
- base_operator,
- u,
- v=v if self._use_v else None,
- diag_update=diag_update if self._use_diag_update else None,
- is_diag_update_positive=self._is_diag_update_positive)
- feed_dict = None
+ lin_op_base_diag = array_ops.placeholder_with_default(
+ base_diag, shape=None)
+ lin_op_u = array_ops.placeholder_with_default(u, shape=None)
+ lin_op_v = array_ops.placeholder_with_default(v, shape=None)
+ lin_op_diag_update = array_ops.placeholder_with_default(
+ diag_update, shape=None)
+
+ base_operator = linalg.LinearOperatorDiag(
+ lin_op_base_diag,
+ is_positive_definite=True,
+ is_self_adjoint=True)
+
+ operator = linalg.LinearOperatorLowRankUpdate(
+ base_operator,
+ lin_op_u,
+ v=lin_op_v if self._use_v else None,
+ diag_update=lin_op_diag_update if self._use_diag_update else None,
+ is_diag_update_positive=self._is_diag_update_positive)
# The matrix representing L
base_diag_mat = array_ops.matrix_diag(base_diag)
@@ -146,28 +132,28 @@ class BaseLinearOperatorLowRankUpdatetest(object):
if self._use_v and self._use_diag_update:
# In this case, we have L + UDV^H and it isn't symmetric.
expect_use_cholesky = False
- mat = base_diag_mat + math_ops.matmul(
+ matrix = base_diag_mat + math_ops.matmul(
u, math_ops.matmul(diag_update_mat, v, adjoint_b=True))
elif self._use_v:
# In this case, we have L + UDV^H and it isn't symmetric.
expect_use_cholesky = False
- mat = base_diag_mat + math_ops.matmul(u, v, adjoint_b=True)
+ matrix = base_diag_mat + math_ops.matmul(u, v, adjoint_b=True)
elif self._use_diag_update:
# In this case, we have L + UDU^H, which is PD if D > 0, since L > 0.
expect_use_cholesky = self._is_diag_update_positive
- mat = base_diag_mat + math_ops.matmul(
+ matrix = base_diag_mat + math_ops.matmul(
u, math_ops.matmul(diag_update_mat, u, adjoint_b=True))
else:
# In this case, we have L + UU^H, which is PD since L > 0.
expect_use_cholesky = True
- mat = base_diag_mat + math_ops.matmul(u, u, adjoint_b=True)
+ matrix = base_diag_mat + math_ops.matmul(u, u, adjoint_b=True)
if expect_use_cholesky:
self.assertTrue(operator._use_cholesky)
else:
self.assertFalse(operator._use_cholesky)
- return operator, mat, feed_dict
+ return operator, matrix
class LinearOperatorLowRankUpdatetestWithDiagUseCholesky(
@@ -186,6 +172,7 @@ class LinearOperatorLowRankUpdatetestWithDiagUseCholesky(
self._rtol[dtypes.float32] = 1e-5
self._atol[dtypes.float64] = 1e-10
self._rtol[dtypes.float64] = 1e-10
+ self._rtol[dtypes.complex64] = 1e-4
class LinearOperatorLowRankUpdatetestWithDiagCannotUseCholesky(
@@ -205,6 +192,7 @@ class LinearOperatorLowRankUpdatetestWithDiagCannotUseCholesky(
self._rtol[dtypes.float32] = 1e-4
self._atol[dtypes.float64] = 1e-9
self._rtol[dtypes.float64] = 1e-9
+ self._rtol[dtypes.complex64] = 1e-4
class LinearOperatorLowRankUpdatetestNoDiagUseCholesky(
@@ -223,6 +211,7 @@ class LinearOperatorLowRankUpdatetestNoDiagUseCholesky(
self._rtol[dtypes.float32] = 1e-5
self._atol[dtypes.float64] = 1e-10
self._rtol[dtypes.float64] = 1e-10
+ self._rtol[dtypes.complex64] = 1e-4
class LinearOperatorLowRankUpdatetestNoDiagCannotUseCholesky(
@@ -242,6 +231,7 @@ class LinearOperatorLowRankUpdatetestNoDiagCannotUseCholesky(
self._rtol[dtypes.float32] = 1e-4
self._atol[dtypes.float64] = 1e-9
self._rtol[dtypes.float64] = 1e-9
+ self._rtol[dtypes.complex64] = 1e-4
class LinearOperatorLowRankUpdatetestWithDiagNotSquare(
diff --git a/tensorflow/python/kernel_tests/linalg/linear_operator_lower_triangular_test.py b/tensorflow/python/kernel_tests/linalg/linear_operator_lower_triangular_test.py
index a57d2f085e..b389e0cbdf 100644
--- a/tensorflow/python/kernel_tests/linalg/linear_operator_lower_triangular_test.py
+++ b/tensorflow/python/kernel_tests/linalg/linear_operator_lower_triangular_test.py
@@ -17,7 +17,6 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
-from tensorflow.python.framework import dtypes
from tensorflow.python.framework import random_seed
from tensorflow.python.ops import array_ops
from tensorflow.python.ops.linalg import linalg as linalg_lib
@@ -32,34 +31,23 @@ class LinearOperatorLowerTriangularTest(
linear_operator_test_util.SquareLinearOperatorDerivedClassTest):
"""Most tests done in the base class LinearOperatorDerivedClassTest."""
- @property
- def _dtypes_to_test(self):
- # TODO(langmore) Test complex types once supported by
- # matrix_triangular_solve.
- return [dtypes.float32, dtypes.float64]
-
- def _operator_and_mat_and_feed_dict(self, build_info, dtype, use_placeholder):
+ def _operator_and_matrix(self, build_info, dtype, use_placeholder):
shape = list(build_info.shape)
# Upper triangle will be nonzero, but ignored.
# Use a diagonal that ensures this matrix is well conditioned.
tril = linear_operator_test_util.random_tril_matrix(
shape, dtype=dtype, force_well_conditioned=True, remove_upper=False)
+ lin_op_tril = tril
+
if use_placeholder:
- tril_ph = array_ops.placeholder(dtype=dtype)
- # Evaluate the tril here because (i) you cannot feed a tensor, and (ii)
- # tril is random and we want the same value used for both mat and
- # feed_dict.
- tril = tril.eval()
- operator = linalg.LinearOperatorLowerTriangular(tril_ph)
- feed_dict = {tril_ph: tril}
- else:
- operator = linalg.LinearOperatorLowerTriangular(tril)
- feed_dict = None
+ lin_op_tril = array_ops.placeholder_with_default(lin_op_tril, shape=None)
+
+ operator = linalg.LinearOperatorLowerTriangular(lin_op_tril)
- mat = array_ops.matrix_band_part(tril, -1, 0)
+ matrix = array_ops.matrix_band_part(tril, -1, 0)
- return operator, mat, feed_dict
+ return operator, matrix
def test_assert_non_singular(self):
# Singlular matrix with one positive eigenvalue and one zero eigenvalue.
diff --git a/tensorflow/python/kernel_tests/resource_variable_ops_test.py b/tensorflow/python/kernel_tests/resource_variable_ops_test.py
index 0fb0b8895c..e358293a90 100644
--- a/tensorflow/python/kernel_tests/resource_variable_ops_test.py
+++ b/tensorflow/python/kernel_tests/resource_variable_ops_test.py
@@ -852,5 +852,62 @@ class ResourceVariableOpsTest(test_util.TensorFlowTestCase):
state_ops.scatter_update(v, [0, 1], [0, 1, 2])
+class _MixedPrecisionVariableTest(test_util.TensorFlowTestCase):
+
+ @test_util.run_in_graph_and_eager_modes()
+ def test_dense_var_to_tensor_read_dtype_same_as_var_dtype(self):
+ # read_dtype is same as dtype
+ v = resource_variable_ops.ResourceVariable(1.0, dtype=dtypes.float32)
+ v = resource_variable_ops._MixedPrecisionVariable(v, dtypes.float32)
+ if not context.executing_eagerly():
+ v.initializer.run()
+
+ # dtype is not read_dtype, return NotImplemented
+ self.assertEqual(
+ NotImplemented, v._dense_var_to_tensor(dtype=dtypes.float16))
+ self.assertEqual(NotImplemented,
+ v._dense_var_to_tensor(dtype=dtypes.float16, as_ref=True))
+
+ # as_ref is False
+ t = v._dense_var_to_tensor(as_ref=False)
+ self.assertTrue(isinstance(t, ops.Tensor))
+ self.assertEqual(t.dtype, dtypes.float32)
+ self.assertEqual(self.evaluate(t), 1.0)
+
+ t = v._dense_var_to_tensor(dtype=dtypes.float32, as_ref=False)
+ self.assertTrue(isinstance(t, ops.Tensor))
+ self.assertEqual(t.dtype, dtypes.float32)
+ self.assertEqual(self.evaluate(t), 1.0)
+
+ # as_ref is True
+ self.assertEqual(NotImplemented, v._dense_var_to_tensor(as_ref=True))
+ self.assertEqual(NotImplemented,
+ v._dense_var_to_tensor(dtype=dtypes.float32, as_ref=True))
+
+ @test_util.run_in_graph_and_eager_modes()
+ def test_dense_var_to_tensor_read_dtype_different_from_var_dtype(self):
+ # read_dtype is different from dtype
+ v = resource_variable_ops.ResourceVariable(1.0, dtype=dtypes.float32)
+ v = resource_variable_ops._MixedPrecisionVariable(v, dtypes.float16)
+ if not context.executing_eagerly():
+ v.initializer.run()
+
+ # as_ref is False
+ t = v._dense_var_to_tensor(as_ref=False)
+ self.assertTrue(isinstance(t, ops.Tensor))
+ self.assertEqual(t.dtype, dtypes.float16)
+ self.assertEqual(self.evaluate(t), 1.0)
+
+ t = v._dense_var_to_tensor(dtype=dtypes.float16, as_ref=False)
+ self.assertTrue(isinstance(t, ops.Tensor))
+ self.assertEqual(t.dtype, dtypes.float16)
+ self.assertEqual(self.evaluate(t), 1.0)
+
+ # as_ref is True
+ self.assertEqual(NotImplemented, v._dense_var_to_tensor(as_ref=True))
+ self.assertEqual(NotImplemented,
+ v._dense_var_to_tensor(dtype=dtypes.float16, as_ref=True))
+
+
if __name__ == "__main__":
test.main()
diff --git a/tensorflow/python/kernel_tests/rnn_test.py b/tensorflow/python/kernel_tests/rnn_test.py
index 957baf8c60..acee180a6c 100644
--- a/tensorflow/python/kernel_tests/rnn_test.py
+++ b/tensorflow/python/kernel_tests/rnn_test.py
@@ -268,6 +268,12 @@ class RNNTest(test.TestCase):
self._assert_cell_builds(rnn_cell_impl.GRUCell, f64, 5, 7, 3)
self._assert_cell_builds(rnn_cell_impl.LSTMCell, f32, 5, 7, 3)
self._assert_cell_builds(rnn_cell_impl.LSTMCell, f64, 5, 7, 3)
+ self._assert_cell_builds(contrib_rnn.IndRNNCell, f32, 5, 7, 3)
+ self._assert_cell_builds(contrib_rnn.IndRNNCell, f64, 5, 7, 3)
+ self._assert_cell_builds(contrib_rnn.IndyGRUCell, f32, 5, 7, 3)
+ self._assert_cell_builds(contrib_rnn.IndyGRUCell, f64, 5, 7, 3)
+ self._assert_cell_builds(contrib_rnn.IndyLSTMCell, f32, 5, 7, 3)
+ self._assert_cell_builds(contrib_rnn.IndyLSTMCell, f64, 5, 7, 3)
######### Benchmarking RNN code
diff --git a/tensorflow/python/kernel_tests/sparse_serialization_ops_test.py b/tensorflow/python/kernel_tests/sparse_serialization_ops_test.py
index 27b39a626f..3847cebc7d 100644
--- a/tensorflow/python/kernel_tests/sparse_serialization_ops_test.py
+++ b/tensorflow/python/kernel_tests/sparse_serialization_ops_test.py
@@ -300,6 +300,51 @@ class SerializeSparseTest(test.TestCase):
sparse_ops.serialize_many_sparse, sparse_ops.deserialize_sparse,
dtypes.variant)
+ def testVariantSerializeDeserializeScalar(self):
+ with self.test_session(use_gpu=False) as sess:
+ indices_value = np.array([[]], dtype=np.int64)
+ values_value = np.array([37], dtype=np.int32)
+ shape_value = np.array([], dtype=np.int64)
+ sparse_tensor = self._SparseTensorPlaceholder()
+ serialized = sparse_ops.serialize_sparse(
+ sparse_tensor, out_type=dtypes.variant)
+ deserialized = sparse_ops.deserialize_sparse(
+ serialized, dtype=dtypes.int32)
+ deserialized_value = sess.run(
+ deserialized,
+ feed_dict={
+ sparse_tensor.indices: indices_value,
+ sparse_tensor.values: values_value,
+ sparse_tensor.dense_shape: shape_value
+ })
+ self.assertAllEqual(deserialized_value.indices, indices_value)
+ self.assertAllEqual(deserialized_value.values, values_value)
+ self.assertAllEqual(deserialized_value.dense_shape, shape_value)
+
+ def testVariantSerializeDeserializeScalarBatch(self):
+ with self.test_session(use_gpu=False) as sess:
+ indices_value = np.array([[]], dtype=np.int64)
+ values_value = np.array([37], dtype=np.int32)
+ shape_value = np.array([], dtype=np.int64)
+ sparse_tensor = self._SparseTensorPlaceholder()
+ serialized = sparse_ops.serialize_sparse(
+ sparse_tensor, out_type=dtypes.variant)
+ stacked = array_ops.stack([serialized, serialized])
+ deserialized = sparse_ops.deserialize_sparse(stacked, dtype=dtypes.int32)
+ deserialized_value = sess.run(
+ deserialized,
+ feed_dict={
+ sparse_tensor.indices: indices_value,
+ sparse_tensor.values: values_value,
+ sparse_tensor.dense_shape: shape_value
+ })
+ self.assertAllEqual(deserialized_value.indices,
+ np.array([[0], [1]], dtype=np.int64))
+ self.assertAllEqual(deserialized_value.values,
+ np.array([37, 37], dtype=np.int32))
+ self.assertAllEqual(deserialized_value.dense_shape,
+ np.array([2], dtype=np.int64))
+
def _testDeserializeFailsWrongTypeHelper(self,
serialize_fn,
deserialize_fn,
diff --git a/tensorflow/python/kernel_tests/variable_scope_test.py b/tensorflow/python/kernel_tests/variable_scope_test.py
index 1e59a8c9bf..ae2a0ab29a 100644
--- a/tensorflow/python/kernel_tests/variable_scope_test.py
+++ b/tensorflow/python/kernel_tests/variable_scope_test.py
@@ -1054,7 +1054,7 @@ class VariableScopeTest(test.TestCase):
"testGetCollection_foo/testGetCollection_a:0"
])
- def testGetTrainableVariables(self):
+ def testGetTrainableVariablesWithGetVariable(self):
with self.test_session():
_ = variable_scope.get_variable("testGetTrainableVariables_a", [])
with variable_scope.variable_scope(
@@ -1062,10 +1062,72 @@ class VariableScopeTest(test.TestCase):
_ = variable_scope.get_variable("testGetTrainableVariables_b", [])
_ = variable_scope.get_variable(
"testGetTrainableVariables_c", [], trainable=False)
+
+ # sync `ON_READ` sets trainable=False
+ _ = variable_scope.get_variable(
+ "testGetTrainableVariables_d", [],
+ synchronization=variable_scope.VariableSynchronization.ON_READ)
self.assertEqual(
[v.name for v in scope.trainable_variables()],
- ["testGetTrainableVariables_foo/"
- "testGetTrainableVariables_b:0"])
+ ["testGetTrainableVariables_foo/testGetTrainableVariables_b:0"])
+
+ # All other sync values sets trainable=True
+ _ = variable_scope.get_variable(
+ "testGetTrainableVariables_e", [],
+ synchronization=variable_scope.VariableSynchronization.ON_WRITE)
+ self.assertEqual([v.name for v in scope.trainable_variables()], [
+ "testGetTrainableVariables_foo/testGetTrainableVariables_b:0",
+ "testGetTrainableVariables_foo/testGetTrainableVariables_e:0"
+ ])
+
+ with self.assertRaisesRegexp(
+ ValueError, "Synchronization value can be set to "
+ "VariableSynchronization.ON_READ only for non-trainable variables. "
+ "You have specified trainable=True and "
+ "synchronization=VariableSynchronization.ON_READ."):
+ _ = variable_scope.get_variable(
+ "testGetTrainableVariables_e", [],
+ synchronization=variable_scope.VariableSynchronization.ON_READ,
+ trainable=True)
+
+ def testGetTrainableVariablesWithVariable(self):
+ with self.test_session():
+ _ = variable_scope.variable(1.0, name="testGetTrainableVariables_a")
+ with variable_scope.variable_scope(
+ "testGetTrainableVariables_foo") as scope:
+ _ = variable_scope.variable(1.0, name="testGetTrainableVariables_b")
+ _ = variable_scope.variable(
+ 1.0, name="testGetTrainableVariables_c", trainable=False)
+
+ # sync `ON_READ` sets trainable=False
+ _ = variable_scope.variable(
+ 1.0,
+ name="testGetTrainableVariables_d",
+ synchronization=variable_scope.VariableSynchronization.ON_READ)
+ self.assertEqual(
+ [v.name for v in scope.trainable_variables()],
+ ["testGetTrainableVariables_foo/testGetTrainableVariables_b:0"])
+
+ # All other sync values sets trainable=True
+ _ = variable_scope.variable(
+ 1.0,
+ name="testGetTrainableVariables_e",
+ synchronization=variable_scope.VariableSynchronization.ON_WRITE)
+ self.assertEqual([v.name for v in scope.trainable_variables()], [
+ "testGetTrainableVariables_foo/testGetTrainableVariables_b:0",
+ "testGetTrainableVariables_foo/testGetTrainableVariables_e:0"
+ ])
+
+ with self.assertRaisesRegexp(
+ ValueError, "Synchronization value can be set to "
+ "VariableSynchronization.ON_READ only for non-trainable variables. "
+ "You have specified trainable=True and "
+ "synchronization=VariableSynchronization.ON_READ."):
+ _ = variable_scope.variable(
+ 1.0,
+ name="testGetTrainableVariables_e",
+ synchronization=variable_scope.VariableSynchronization.ON_READ,
+ trainable=True)
def testGetGlobalVariables(self):
with self.test_session():
@@ -1253,6 +1315,31 @@ class VariableScopeWithCustomGetterTest(test.TestCase):
self.assertEqual(v3, v4)
self.assertEqual(3, called[0]) # skipped one in the first new_scope
+ def testSynchronizationAndAggregationWithCustomGetter(self):
+ called = [0]
+ synchronization = variable_scope.VariableSynchronization.AUTO
+ aggregation = variable_scope.VariableAggregation.NONE
+
+ def custom_getter(getter, *args, **kwargs):
+ called[0] += 1
+
+ # Verify synchronization and aggregation kwargs are as expected.
+ self.assertEqual(kwargs["synchronization"], synchronization)
+ self.assertEqual(kwargs["aggregation"], aggregation)
+ return getter(*args, **kwargs)
+
+ with variable_scope.variable_scope("scope", custom_getter=custom_getter):
+ variable_scope.get_variable("v", [1])
+ self.assertEqual(1, called[0])
+
+ with variable_scope.variable_scope("scope", custom_getter=custom_getter):
+ synchronization = variable_scope.VariableSynchronization.ON_READ
+ aggregation = variable_scope.VariableAggregation.MEAN
+ variable_scope.get_variable(
+ "v1", [1], synchronization=synchronization, aggregation=aggregation)
+
+ self.assertEqual(2, called[0])
+
def testCustomGetterWithReuse(self):
# Custom getter can choose to behave differently on reused variables.
def custom_getter(getter, *args, **kwargs):
@@ -1355,6 +1442,23 @@ class VariableScopeWithCustomGetterTest(test.TestCase):
self.assertAllEqual(variable_names, ["forced_name"])
+ called = [False]
+
+ def creater_c(next_creator, **kwargs):
+ called[0] = True
+ self.assertEqual(kwargs["synchronization"],
+ variable_scope.VariableSynchronization.ON_WRITE)
+ self.assertEqual(kwargs["aggregation"],
+ variable_scope.VariableAggregation.MEAN)
+ return next_creator(**kwargs)
+
+ with variable_scope.variable_creator_scope(creater_c):
+ variable_scope.get_variable(
+ "v", [],
+ synchronization=variable_scope.VariableSynchronization.ON_WRITE,
+ aggregation=variable_scope.VariableAggregation.MEAN)
+ self.assertTrue(called[0])
+
class PartitionInfoTest(test.TestCase):
diff --git a/tensorflow/python/kernel_tests/variables_test.py b/tensorflow/python/kernel_tests/variables_test.py
index 62d596da91..2b9c62ad6f 100644
--- a/tensorflow/python/kernel_tests/variables_test.py
+++ b/tensorflow/python/kernel_tests/variables_test.py
@@ -642,6 +642,8 @@ class PartitionedVariableTest(test.TestCase):
iterated_partitions = list(partitioned_variable)
self.assertEqual(2, num_partitions)
self.assertEqual([v0, v1], iterated_partitions)
+ self.assertEqual([2], partitioned_variable.get_shape())
+ self.assertEqual([2], partitioned_variable.shape)
self.assertEqual([2], concatenated.get_shape())
self.assertEqual([2], concatenated.shape)
diff --git a/tensorflow/python/layers/base.py b/tensorflow/python/layers/base.py
index b8969a41ab..cf13b52617 100644
--- a/tensorflow/python/layers/base.py
+++ b/tensorflow/python/layers/base.py
@@ -152,10 +152,17 @@ class Layer(base_layer.Layer):
scope, default_name=self._base_name) as captured_scope:
self._scope = captured_scope
- def add_weight(self, name, shape, dtype=None,
- initializer=None, regularizer=None,
- trainable=True, constraint=None,
+ def add_weight(self,
+ name,
+ shape,
+ dtype=None,
+ initializer=None,
+ regularizer=None,
+ trainable=None,
+ constraint=None,
use_resource=None,
+ synchronization=vs.VariableSynchronization.AUTO,
+ aggregation=vs.VariableAggregation.NONE,
partitioner=None):
"""Adds a new variable to the layer, or gets an existing one; returns it.
@@ -170,9 +177,19 @@ class Layer(base_layer.Layer):
or "non_trainable_variables" (e.g. BatchNorm mean, stddev).
Note, if the current variable scope is marked as non-trainable
then this parameter is ignored and any added variables are also
- marked as non-trainable.
+ marked as non-trainable. `trainable` defaults to `True` unless
+ `synchronization` is set to `ON_READ`.
constraint: constraint instance (callable).
use_resource: Whether to use `ResourceVariable`.
+ synchronization: Indicates when a distributed a variable will be
+ aggregated. Accepted values are constants defined in the class
+ @{tf.VariableSynchronization}. By default the synchronization is set to
+ `AUTO` and the current `DistributionStrategy` chooses
+ when to synchronize. If `synchronization` is set to `ON_READ`,
+ `trainable` must not be set to `True`.
+ aggregation: Indicates how a distributed variable will be aggregated.
+ Accepted values are constants defined in the class
+ @{tf.VariableAggregation}.
partitioner: (optional) partitioner instance (callable). If
provided, when the requested variable is created it will be split
into multiple partitions according to `partitioner`. In this case,
@@ -190,7 +207,21 @@ class Layer(base_layer.Layer):
Raises:
RuntimeError: If called with partioned variable regularization and
eager execution is enabled.
+ ValueError: When trainable has been set to True with synchronization
+ set as `ON_READ`.
"""
+ if synchronization == vs.VariableSynchronization.ON_READ:
+ if trainable:
+ raise ValueError(
+ 'Synchronization value can be set to '
+ 'VariableSynchronization.ON_READ only for non-trainable variables. '
+ 'You have specified trainable=True and '
+ 'synchronization=VariableSynchronization.ON_READ.')
+ else:
+ # Set trainable to be false when variable is to be synced on read.
+ trainable = False
+ elif trainable is None:
+ trainable = True
def _should_add_regularizer(variable, existing_variable_set):
if isinstance(variable, tf_variables.PartitionedVariable):
@@ -240,6 +271,8 @@ class Layer(base_layer.Layer):
constraint=constraint,
partitioner=partitioner,
use_resource=use_resource,
+ synchronization=synchronization,
+ aggregation=aggregation,
getter=vs.get_variable)
if regularizer:
diff --git a/tensorflow/python/layers/base_test.py b/tensorflow/python/layers/base_test.py
index 298e96e711..d2443db665 100644
--- a/tensorflow/python/layers/base_test.py
+++ b/tensorflow/python/layers/base_test.py
@@ -90,12 +90,34 @@ class BaseLayerTest(test.TestCase):
# regularizers only supported in GRAPH mode.
regularizer = lambda x: math_ops.reduce_sum(x) * 1e-3
- variable = layer.add_variable(
+ _ = layer.add_variable(
'reg_var', [2, 2],
initializer=init_ops.zeros_initializer(),
regularizer=regularizer)
self.assertEqual(len(layer.losses), 1)
+ # Test that sync `ON_READ` variables are defaulted to be non-trainable.
+ variable_3 = layer.add_variable(
+ 'sync_on_read_var', [2, 2],
+ initializer=init_ops.zeros_initializer(),
+ synchronization=variable_scope.VariableSynchronization.ON_READ,
+ aggregation=variable_scope.VariableAggregation.SUM)
+ self.assertEqual(layer.non_trainable_variables, [variable_2, variable_3])
+
+ def testInvalidTrainableSynchronizationCombination(self):
+ layer = base_layers.Layer(name='my_layer')
+
+ with self.assertRaisesRegexp(
+ ValueError, 'Synchronization value can be set to '
+ 'VariableSynchronization.ON_READ only for non-trainable variables. '
+ 'You have specified trainable=True and '
+ 'synchronization=VariableSynchronization.ON_READ.'):
+ _ = layer.add_variable(
+ 'v', [2, 2],
+ initializer=init_ops.zeros_initializer(),
+ synchronization=variable_scope.VariableSynchronization.ON_READ,
+ trainable=True)
+
def testReusePartitionedVaraiblesAndRegularizers(self):
regularizer = lambda x: math_ops.reduce_sum(x) * 1e-3
partitioner = partitioned_variables.fixed_size_partitioner(3)
@@ -104,7 +126,7 @@ class BaseLayerTest(test.TestCase):
partitioner=partitioner,
reuse=reuse):
layer = base_layers.Layer(name='my_layer')
- variable = layer.add_variable(
+ _ = layer.add_variable(
'reg_part_var', [4, 4],
initializer=init_ops.zeros_initializer(),
regularizer=regularizer)
diff --git a/tensorflow/python/layers/normalization.py b/tensorflow/python/layers/normalization.py
index ece6667981..f7bc10a6a6 100644
--- a/tensorflow/python/layers/normalization.py
+++ b/tensorflow/python/layers/normalization.py
@@ -44,7 +44,7 @@ class BatchNormalization(keras_layers.BatchNormalization, base.Layer):
normalized, typically the features axis/axes. For instance, after a
`Conv2D` layer with `data_format="channels_first"`, set `axis=1`. If a
list of axes is provided, each axis in `axis` will be normalized
- simultaneously. Default is `-1` which takes uses last axis. Note: when
+ simultaneously. Default is `-1` which uses the last axis. Note: when
using multi-axis batch norm, the `beta`, `gamma`, `moving_mean`, and
`moving_variance` variables are the same rank as the input Tensor, with
dimension size 1 in all reduced (non-axis) dimensions).
diff --git a/tensorflow/python/lib/core/numpy.h b/tensorflow/python/lib/core/numpy.h
index 98354083c7..0098d938a0 100644
--- a/tensorflow/python/lib/core/numpy.h
+++ b/tensorflow/python/lib/core/numpy.h
@@ -31,6 +31,7 @@ limitations under the License.
// Place `<locale>` before <Python.h> to avoid build failure in macOS.
#include <locale>
+
#include <Python.h>
#include "numpy/arrayobject.h"
diff --git a/tensorflow/python/lib/core/py_seq_tensor.cc b/tensorflow/python/lib/core/py_seq_tensor.cc
index 386be35ba2..3b4f12ae31 100644
--- a/tensorflow/python/lib/core/py_seq_tensor.cc
+++ b/tensorflow/python/lib/core/py_seq_tensor.cc
@@ -88,6 +88,41 @@ bool IsPyDimension(PyObject* obj) {
return ret;
}
+// Sets *elem to a NEW reference to an element in seq on success.
+// REQUIRES: PySequence_Check(seq) && PySequence_Length(seq) > 0.
+Status SampleElementFromSequence(PyObject* seq, PyObject** elem) {
+ *elem = PySequence_GetItem(seq, 0);
+ if (*elem != nullptr) return Status::OK();
+ // seq may implement the sequence protocol (i.e., implement __getitem__)
+ // but may legitimately not have a 0-th element (__getitem__(self, 0)
+ // raises a KeyError). For example:
+ // seq = pandas.Series([0, 1, 2], index=[2, 4, 6])
+ //
+ // We don't actually care for the element at key 0, any element will do
+ // for inferring the element types. All elements are expected to
+ // have the same type, and this will be validated when converting
+ // to an EagerTensor.
+ PyErr_Clear();
+ Safe_PyObjectPtr iter(PyObject_GetIter(seq));
+ if (PyErr_Occurred()) {
+ return errors::InvalidArgument("Cannot infer dtype of a ",
+ Py_TYPE(seq)->tp_name,
+ " object: ", PyExceptionFetch());
+ }
+ *elem = PyIter_Next(iter.get());
+ if (PyErr_Occurred()) {
+ return errors::InvalidArgument(
+ "Cannot infer dtype of a ", Py_TYPE(seq)->tp_name,
+ " object, as iter(<object>).next() failed: ", PyExceptionFetch());
+ }
+ if (*elem == nullptr) {
+ return errors::InvalidArgument("Cannot infer dtype of a ",
+ Py_TYPE(seq)->tp_name,
+ " object since it is an empty sequence");
+ }
+ return Status::OK();
+}
+
Status InferShapeAndType(PyObject* obj, TensorShape* shape, DataType* dtype) {
std::vector<Safe_PyObjectPtr> refs_to_clean;
while (true) {
@@ -98,7 +133,9 @@ Status InferShapeAndType(PyObject* obj, TensorShape* shape, DataType* dtype) {
auto length = PySequence_Length(obj);
if (length > 0) {
shape->AddDim(length);
- obj = PySequence_GetItem(obj, 0);
+ PyObject* elem = nullptr;
+ TF_RETURN_IF_ERROR(SampleElementFromSequence(obj, &elem));
+ obj = elem;
refs_to_clean.push_back(make_safe(obj));
continue;
} else if (length == 0) {
diff --git a/tensorflow/python/lib/core/py_util.cc b/tensorflow/python/lib/core/py_util.cc
index 572693b1cf..2ee898ea1d 100644
--- a/tensorflow/python/lib/core/py_util.cc
+++ b/tensorflow/python/lib/core/py_util.cc
@@ -17,6 +17,7 @@ limitations under the License.
// Place `<locale>` before <Python.h> to avoid build failure in macOS.
#include <locale>
+
#include <Python.h>
#include "tensorflow/core/lib/core/errors.h"
diff --git a/tensorflow/python/ops/boosted_trees_ops.py b/tensorflow/python/ops/boosted_trees_ops.py
index 2a2bcdd9d6..868a4f6b84 100644
--- a/tensorflow/python/ops/boosted_trees_ops.py
+++ b/tensorflow/python/ops/boosted_trees_ops.py
@@ -25,6 +25,8 @@ from tensorflow.python.ops import resources
# Re-exporting ops used by other modules.
# pylint: disable=unused-import
from tensorflow.python.ops.gen_boosted_trees_ops import boosted_trees_calculate_best_gains_per_feature as calculate_best_gains_per_feature
+from tensorflow.python.ops.gen_boosted_trees_ops import boosted_trees_center_bias as center_bias
+from tensorflow.python.ops.gen_boosted_trees_ops import boosted_trees_example_debug_outputs as example_debug_outputs
from tensorflow.python.ops.gen_boosted_trees_ops import boosted_trees_make_stats_summary as make_stats_summary
from tensorflow.python.ops.gen_boosted_trees_ops import boosted_trees_predict as predict
from tensorflow.python.ops.gen_boosted_trees_ops import boosted_trees_training_predict as training_predict
diff --git a/tensorflow/python/ops/collective_ops.py b/tensorflow/python/ops/collective_ops.py
index a05fd15eca..98668facd5 100644
--- a/tensorflow/python/ops/collective_ops.py
+++ b/tensorflow/python/ops/collective_ops.py
@@ -22,7 +22,7 @@ from tensorflow.python.ops import gen_collective_ops
def all_reduce(t, group_size, group_key, instance_key, merge_op, final_op,
- subdiv_offsets=(0)):
+ subdiv_offsets=(0,)):
"""Reduces tensors collectively, across devices.
Args:
diff --git a/tensorflow/python/ops/collective_ops_test.py b/tensorflow/python/ops/collective_ops_test.py
index 8e16cffdf4..9cc64ef9f6 100644
--- a/tensorflow/python/ops/collective_ops_test.py
+++ b/tensorflow/python/ops/collective_ops_test.py
@@ -37,11 +37,11 @@ class CollectiveOpTest(test.TestCase):
with ops.device('/CPU:0'):
in0 = constant_op.constant(t0)
colred0 = collective_ops.all_reduce(in0, 2, group_key, instance_key,
- 'Add', 'Div', [0])
+ 'Add', 'Div')
with ops.device('/CPU:1'):
in1 = constant_op.constant(t1)
colred1 = collective_ops.all_reduce(in1, 2, group_key, instance_key,
- 'Add', 'Div', [0])
+ 'Add', 'Div')
run_options = config_pb2.RunOptions()
run_options.experimental.collective_graph_key = 1
results = sess.run([colred0, colred1], options=run_options)
diff --git a/tensorflow/python/ops/control_flow_ops.py b/tensorflow/python/ops/control_flow_ops.py
index 837c144467..888075ba2e 100644
--- a/tensorflow/python/ops/control_flow_ops.py
+++ b/tensorflow/python/ops/control_flow_ops.py
@@ -2932,7 +2932,8 @@ class WhileContext(ControlFlowContext):
return original_body_result, exit_vars
- def BuildLoop(self, pred, body, loop_vars, shape_invariants):
+ def BuildLoop(self, pred, body, loop_vars, shape_invariants,
+ return_same_structure):
"""Add the loop termination condition and body to the graph."""
# Keep original_loop_vars to identify which are TensorArrays
@@ -2943,9 +2944,10 @@ class WhileContext(ControlFlowContext):
loop_vars = ops.convert_n_to_tensor_or_indexed_slices(loop_vars)
try:
self.Enter()
- # _BuildLoop calls _update_input in several places. _lock ensures a
- # Session.run call cannot occur between creating and mutating new ops.
- with ops.get_default_graph()._lock: # pylint: disable=protected-access
+ # _BuildLoop calls _update_input in several places. _mutation_lock()
+ # ensures a Session.run call cannot occur between creating and mutating
+ # new ops.
+ with ops.get_default_graph()._mutation_lock(): # pylint: disable=protected-access
original_body_result, exit_vars = self._BuildLoop(
pred, body, original_loop_vars, loop_vars, shape_invariants)
finally:
@@ -2959,7 +2961,11 @@ class WhileContext(ControlFlowContext):
packed_exit_vars = nest.pack_sequence_as(
structure=original_body_result,
flat_sequence=exit_vars_with_tensor_arrays)
- return packed_exit_vars[0] if len(exit_vars) == 1 else packed_exit_vars
+
+ if return_same_structure:
+ return packed_exit_vars
+ else:
+ return packed_exit_vars[0] if len(exit_vars) == 1 else packed_exit_vars
def _FixControlInputsAndContext(self, enters):
graph = ops.get_default_graph()
@@ -2999,7 +3005,8 @@ def while_loop(cond,
back_prop=True,
swap_memory=False,
name=None,
- maximum_iterations=None):
+ maximum_iterations=None,
+ return_same_structure=False):
"""Repeat `body` while the condition `cond` is true.
`cond` is a callable returning a boolean scalar tensor. `body` is a callable
@@ -3075,11 +3082,16 @@ def while_loop(cond,
to run. If provided, the `cond` output is AND-ed with an additional
condition ensuring the number of iterations executed is no greater than
`maximum_iterations`.
+ return_same_structure: If True, output has same structure as `loop_vars`. If
+ eager execution is enabled, this is ignored (and always treated as True).
Returns:
- The output tensors for the loop variables after the loop. When the length
- of `loop_vars` is 1 this is a Tensor, TensorArray or IndexedSlice and when
- the length of `loop_vars` is greater than 1 it returns a list.
+ The output tensors for the loop variables after the loop.
+ If `return_same_structure` is True, the return value has the same
+ structure as `loop_vars`.
+ If `return_same_structure` is False, the return value is a Tensor,
+ TensorArray or IndexedSlice if the length of `loop_vars` is 1, or a list
+ otherwise.
Raises:
TypeError: if `cond` or `body` is not callable.
@@ -3134,6 +3146,7 @@ def while_loop(cond,
happen is that the thread updating `x` can never get ahead of the
counter thread because the thread incrementing `x` depends on the value
of the counter.
+
```python
import tensorflow as tf
@@ -3215,7 +3228,8 @@ def while_loop(cond,
# be encapsulated in the root context.
if loop_context.outer_context is None:
ops.add_to_collection(ops.GraphKeys.WHILE_CONTEXT, loop_context)
- result = loop_context.BuildLoop(cond, body, loop_vars, shape_invariants)
+ result = loop_context.BuildLoop(cond, body, loop_vars, shape_invariants,
+ return_same_structure)
if maximum_iterations is not None:
return result[1]
else:
diff --git a/tensorflow/python/ops/control_flow_ops_test.py b/tensorflow/python/ops/control_flow_ops_test.py
index 43fe045bcb..153548ae92 100644
--- a/tensorflow/python/ops/control_flow_ops_test.py
+++ b/tensorflow/python/ops/control_flow_ops_test.py
@@ -958,6 +958,28 @@ class WhileLoopTestCase(test_util.TensorFlowTestCase):
# Expect a tuple since that is what the body returns.
self.assertEqual(self.evaluate(r), (10,))
+ def testWhileLoopSameReturnShape_False(self):
+ i = constant_op.constant(0)
+ c = lambda i, _: math_ops.less(i, 10)
+
+ # Body returns a [tensor, []]
+ b = lambda i, _: [math_ops.add(i, 1), []]
+
+ # Should only return the tensor.
+ r = control_flow_ops.while_loop(c, b, [i, []])
+ self.assertEqual(self.evaluate(r), 10)
+
+ def testWhileLoopSameReturnShape_True(self):
+ i = constant_op.constant(0)
+ c = lambda i, _: math_ops.less(i, 10)
+
+ # Body returns a [tensor, []]
+ b = lambda i, _: [math_ops.add(i, 1), []]
+
+ # Should only return the original structure.
+ r = control_flow_ops.while_loop(c, b, [i, []], return_same_structure=True)
+ self.assertEqual(self.evaluate(r), [10, []])
+
if __name__ == "__main__":
googletest.main()
diff --git a/tensorflow/python/ops/distributions/distribution.py b/tensorflow/python/ops/distributions/distribution.py
index 41dcd40188..c03ef967e6 100644
--- a/tensorflow/python/ops/distributions/distribution.py
+++ b/tensorflow/python/ops/distributions/distribution.py
@@ -212,7 +212,7 @@ class ReparameterizationType(object):
reparameterized, and straight-through gradients are either partially
unsupported or are not supported at all. In this case, for purposes of
e.g. RL or variational inference, it is generally safest to wrap the
- sample results in a `stop_gradients` call and instead use policy
+ sample results in a `stop_gradients` call and use policy
gradients / surrogate loss instead.
"""
diff --git a/tensorflow/python/ops/distributions/exponential.py b/tensorflow/python/ops/distributions/exponential.py
index 24bc3f3d3e..4325a14449 100644
--- a/tensorflow/python/ops/distributions/exponential.py
+++ b/tensorflow/python/ops/distributions/exponential.py
@@ -103,9 +103,6 @@ class Exponential(gamma.Gamma):
allow_nan_stats=allow_nan_stats,
validate_args=validate_args,
name=name)
- # While the Gamma distribution is not reparameterizable, the exponential
- # distribution is.
- self._reparameterization_type = True
self._parameters = parameters
self._graph_parents += [self._rate]
diff --git a/tensorflow/python/ops/embedding_ops.py b/tensorflow/python/ops/embedding_ops.py
index c7919e4d4c..27c2fa7017 100644
--- a/tensorflow/python/ops/embedding_ops.py
+++ b/tensorflow/python/ops/embedding_ops.py
@@ -23,6 +23,7 @@ from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
+from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import clip_ops
# Imports gradient definitions.
@@ -30,6 +31,7 @@ from tensorflow.python.ops import data_flow_grad # pylint: disable=unused-impor
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
+from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util.tf_export import tf_export
@@ -479,3 +481,158 @@ def embedding_lookup_sparse(params,
assert False, "Unrecognized combiner"
return embeddings
+
+
+@tf_export("nn.safe_embedding_lookup_sparse")
+def safe_embedding_lookup_sparse(embedding_weights,
+ sparse_ids,
+ sparse_weights=None,
+ combiner='mean',
+ default_id=None,
+ name=None,
+ partition_strategy='div',
+ max_norm=None):
+ """Lookup embedding results, accounting for invalid IDs and empty features.
+
+ The partitioned embedding in `embedding_weights` must all be the same shape
+ except for the first dimension. The first dimension is allowed to vary as the
+ vocabulary size is not necessarily a multiple of `P`. `embedding_weights`
+ may be a `PartitionedVariable` as returned by using `tf.get_variable()` with a
+ partitioner.
+
+ Invalid IDs (< 0) are pruned from input IDs and weights, as well as any IDs
+ with non-positive weight. For an entry with no features, the embedding vector
+ for `default_id` is returned, or the 0-vector if `default_id` is not supplied.
+
+ The ids and weights may be multi-dimensional. Embeddings are always aggregated
+ along the last dimension.
+
+ Args:
+ embedding_weights: A list of `P` float `Tensor`s or values representing
+ partitioned embedding `Tensor`s. Alternatively, a `PartitionedVariable`
+ created by partitioning along dimension 0. The total unpartitioned
+ shape should be `[e_0, e_1, ..., e_m]`, where `e_0` represents the
+ vocab size and `e_1, ..., e_m` are the embedding dimensions.
+ sparse_ids: `SparseTensor` of shape `[d_0, d_1, ..., d_n]` containing the
+ ids. `d_0` is typically batch size.
+ sparse_weights: `SparseTensor` of same shape as `sparse_ids`, containing
+ float weights corresponding to `sparse_ids`, or `None` if all weights
+ are be assumed to be 1.0.
+ combiner: A string specifying how to combine embedding results for each
+ entry. Currently "mean", "sqrtn" and "sum" are supported, with "mean"
+ the default.
+ default_id: The id to use for an entry with no features.
+ name: A name for this operation (optional).
+ partition_strategy: A string specifying the partitioning strategy.
+ Currently `"div"` and `"mod"` are supported. Default is `"div"`.
+ max_norm: If not `None`, all embeddings are l2-normalized to max_norm before
+ combining.
+
+
+ Returns:
+ Dense `Tensor` of shape `[d_0, d_1, ..., d_{n-1}, e_1, ..., e_m]`.
+
+ Raises:
+ ValueError: if `embedding_weights` is empty.
+ """
+ if embedding_weights is None:
+ raise ValueError('Missing embedding_weights %s.' % embedding_weights)
+ if isinstance(embedding_weights, variables.PartitionedVariable):
+ embedding_weights = list(embedding_weights) # get underlying Variables.
+ if not isinstance(embedding_weights, list):
+ embedding_weights = [embedding_weights]
+ if len(embedding_weights) < 1:
+ raise ValueError('Missing embedding_weights %s.' % embedding_weights)
+
+ dtype = sparse_weights.dtype if sparse_weights is not None else None
+ embedding_weights = [
+ ops.convert_to_tensor(w, dtype=dtype) for w in embedding_weights
+ ]
+
+ with ops.name_scope(name, 'embedding_lookup',
+ embedding_weights + [sparse_ids,
+ sparse_weights]) as scope:
+ # Reshape higher-rank sparse ids and weights to linear segment ids.
+ original_shape = sparse_ids.dense_shape
+ original_rank_dim = sparse_ids.dense_shape.get_shape()[0]
+ original_rank = (
+ array_ops.size(original_shape)
+ if original_rank_dim.value is None
+ else original_rank_dim.value)
+ sparse_ids = sparse_ops.sparse_reshape(sparse_ids, [
+ math_ops.reduce_prod(
+ array_ops.slice(original_shape, [0], [original_rank - 1])),
+ array_ops.gather(original_shape, original_rank - 1)])
+ if sparse_weights is not None:
+ sparse_weights = sparse_tensor.SparseTensor(
+ sparse_ids.indices,
+ sparse_weights.values, sparse_ids.dense_shape)
+
+ # Prune invalid ids and weights.
+ sparse_ids, sparse_weights = _prune_invalid_ids(sparse_ids, sparse_weights)
+ if combiner != 'sum':
+ sparse_ids, sparse_weights = _prune_invalid_weights(
+ sparse_ids, sparse_weights)
+
+ # Fill in dummy values for empty features, if necessary.
+ sparse_ids, is_row_empty = sparse_ops.sparse_fill_empty_rows(sparse_ids,
+ default_id or
+ 0)
+ if sparse_weights is not None:
+ sparse_weights, _ = sparse_ops.sparse_fill_empty_rows(sparse_weights, 1.0)
+
+ result = embedding_lookup_sparse(
+ embedding_weights,
+ sparse_ids,
+ sparse_weights,
+ combiner=combiner,
+ partition_strategy=partition_strategy,
+ name=None if default_id is None else scope,
+ max_norm=max_norm)
+
+ if default_id is None:
+ # Broadcast is_row_empty to the same shape as embedding_lookup_result,
+ # for use in Select.
+ is_row_empty = array_ops.tile(
+ array_ops.reshape(is_row_empty, [-1, 1]),
+ array_ops.stack([1, array_ops.shape(result)[1]]))
+
+ result = array_ops.where(is_row_empty,
+ array_ops.zeros_like(result),
+ result,
+ name=scope)
+
+ # Reshape back from linear ids back into higher-dimensional dense result.
+ final_result = array_ops.reshape(
+ result,
+ array_ops.concat([
+ array_ops.slice(
+ math_ops.cast(original_shape, dtypes.int32), [0],
+ [original_rank - 1]),
+ array_ops.slice(array_ops.shape(result), [1], [-1])
+ ], 0))
+ final_result.set_shape(tensor_shape.unknown_shape(
+ (original_rank_dim - 1).value).concatenate(result.get_shape()[1:]))
+ return final_result
+
+
+def _prune_invalid_ids(sparse_ids, sparse_weights):
+ """Prune invalid IDs (< 0) from the input ids and weights."""
+ is_id_valid = math_ops.greater_equal(sparse_ids.values, 0)
+ if sparse_weights is not None:
+ is_id_valid = math_ops.logical_and(
+ is_id_valid,
+ array_ops.ones_like(sparse_weights.values, dtype=dtypes.bool))
+ sparse_ids = sparse_ops.sparse_retain(sparse_ids, is_id_valid)
+ if sparse_weights is not None:
+ sparse_weights = sparse_ops.sparse_retain(sparse_weights, is_id_valid)
+ return sparse_ids, sparse_weights
+
+
+def _prune_invalid_weights(sparse_ids, sparse_weights):
+ """Prune invalid weights (< 0) from the input ids and weights."""
+ if sparse_weights is not None:
+ is_weights_valid = math_ops.greater(sparse_weights.values, 0)
+ sparse_ids = sparse_ops.sparse_retain(sparse_ids, is_weights_valid)
+ sparse_weights = sparse_ops.sparse_retain(sparse_weights, is_weights_valid)
+ return sparse_ids, sparse_weights
diff --git a/tensorflow/python/ops/functional_ops.py b/tensorflow/python/ops/functional_ops.py
index 30413f289a..4ecc74675a 100644
--- a/tensorflow/python/ops/functional_ops.py
+++ b/tensorflow/python/ops/functional_ops.py
@@ -775,7 +775,7 @@ def While(input_, cond, body, name=None, hostmem=None):
a string, non-empty means True and empty means False. If the
tensor is not a scalar, non-emptiness means True and False
otherwise.
- body: . A funcion takes a list of tensors and returns another
+ body: . A function takes a list of tensors and returns another
list tensors. Both lists have the same types as specified
by T.
name: A name for the operation (optional).
@@ -945,6 +945,61 @@ def For(start,
# pylint: enable=invalid-name,protected-access
-def partitioned_call(args, f):
- return gen_functional_ops.partitioned_call(
- args=args, Tout=[o.type for o in f.definition.signature.output_arg], f=f)
+def partitioned_call(args, f, tout=None, executing_eagerly=None):
+ """Executes a function while respecting device annotations.
+
+ Currently, only those functions that execute within the same address space
+ can be executed.
+
+ Args:
+ args: The arguments of the function, including captured inputs.
+ f: The function to execute; an instance of `_DefinedFunction` or
+ `_EagerDefinedFunction`.
+ tout: a list containing the output dtypes enums; if `None`, inferred from
+ the signature of `f`.
+ executing_eagerly: (Optional) A boolean indicating whether the context is
+ executing eagerly. If `None`, fetched from the global context.
+
+ Returns:
+ The list of `Tensor`s returned by invoking `f(args)`. If the function does
+ not return anything, then returns `None` if eager execution is enabled, or
+ the `Operation` if not.
+ """
+
+ if tout is None:
+ tout = tuple(x.type for x in f.definition.signature.output_arg)
+
+ if executing_eagerly is None:
+ executing_eagerly = context.executing_eagerly()
+
+ if executing_eagerly or len(tout):
+ if f.stateful_ops:
+ outputs = gen_functional_ops.stateful_partitioned_call(
+ args=args, Tout=tout, f=f)
+ else:
+ outputs = gen_functional_ops.partitioned_call(args=args, Tout=tout, f=f)
+ return outputs if outputs else None
+
+ # The generated binding returns an empty list for functions that don't
+ # return any Tensors, hence the need to use `create_op` directly.
+ args = [ops.internal_convert_to_tensor(x) for x in args]
+ tin_attr = attr_value_pb2.AttrValue(
+ list=attr_value_pb2.AttrValue.ListValue(
+ type=[x.dtype.as_datatype_enum for x in args]))
+ tout_attr = attr_value_pb2.AttrValue(
+ list=attr_value_pb2.AttrValue.ListValue(type=tout))
+ func_attr = attr_value_pb2.AttrValue(
+ func=attr_value_pb2.NameAttrList(name=f.name))
+
+ graph = ops.get_default_graph()
+ f.add_to_graph(graph)
+ op_name = "StatefulPartitionedCall" if f.stateful_ops else "PartitionedCall"
+ op = graph.create_op(
+ op_name,
+ args,
+ tout,
+ compute_shapes=False,
+ name="PartitionedFunctionCall",
+ attrs={"Tin": tin_attr, "Tout": tout_attr, "f": func_attr})
+ outputs = op.outputs
+ return outputs if outputs else op
diff --git a/tensorflow/python/ops/gradients_impl.py b/tensorflow/python/ops/gradients_impl.py
index 99909ac38e..b64a66be03 100644
--- a/tensorflow/python/ops/gradients_impl.py
+++ b/tensorflow/python/ops/gradients_impl.py
@@ -31,6 +31,7 @@ from tensorflow.core.framework import attr_value_pb2
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
+from tensorflow.python.framework import function
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
@@ -54,6 +55,7 @@ from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import spectral_grad # pylint: disable=unused-import
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.platform import tf_logging as logging
+from tensorflow.python.util import compat
from tensorflow.python.util.tf_export import tf_export
# This is to avoid a circular dependency with cond_v2_impl.
@@ -113,12 +115,14 @@ ops.register_tensor_conversion_function(ops.IndexedSlices,
_IndexedSlicesToTensor)
-def _MarkReachedOps(from_ops, reached_ops):
+def _MarkReachedOps(from_ops, reached_ops, func_graphs):
"""Mark all ops reached from "from_ops".
Args:
from_ops: list of Operations.
reached_ops: set of Operations.
+ func_graphs: list of function._FuncGraphs. This method will traverse through
+ these functions if they capture from_ops or any reachable ops.
"""
queue = collections.deque()
queue.extend(from_ops)
@@ -128,36 +132,11 @@ def _MarkReachedOps(from_ops, reached_ops):
reached_ops.add(op)
for output in op.outputs:
if _IsBackpropagatable(output):
- queue.extend(output.consumers())
+ queue.extend(_Consumers(output, func_graphs))
-def _GatherInputs(to_ops, reached_ops):
- """List all inputs of to_ops that are in reached_ops.
-
- Args:
- to_ops: list of Operations.
- reached_ops: set of Operations.
-
- Returns:
- The list of all inputs of to_ops that are in reached_ops.
- That list includes all elements of to_ops.
- """
- inputs = []
- queue = collections.deque()
- queue.extend(to_ops)
- while queue:
- op = queue.popleft()
- # We are interested in this op.
- if op in reached_ops:
- inputs.append(op)
- # Clear the boolean so we won't add the inputs again.
- reached_ops.remove(op)
- for inp in op.inputs:
- queue.append(inp.op)
- return inputs
-
-
-def _PendingCount(to_ops, from_ops, colocate_gradients_with_ops):
+def _PendingCount(to_ops, from_ops, colocate_gradients_with_ops, func_graphs,
+ xs):
"""Initialize the pending count for ops between two lists of Operations.
'pending_count[op]' indicates the number of backprop inputs
@@ -167,6 +146,11 @@ def _PendingCount(to_ops, from_ops, colocate_gradients_with_ops):
to_ops: list of Operations.
from_ops: list of Operations.
colocate_gradients_with_ops: Python bool. See docstring of gradients().
+ func_graphs: list of function._FuncGraphs. This method will traverse through
+ these functions if they capture from_ops or any reachable ops. This is
+ useful if to_ops occur in a function and from_ops are in an outer function
+ or graph.
+ xs: list of Tensors.
Returns:
A tuple containing: (1) the subset of to_ops reachable from from_ops by a
@@ -177,7 +161,7 @@ def _PendingCount(to_ops, from_ops, colocate_gradients_with_ops):
"""
# Mark reachable ops from from_ops.
reached_ops = set()
- _MarkReachedOps(from_ops, reached_ops)
+ _MarkReachedOps(from_ops, reached_ops, func_graphs)
# X in reached_ops iff X is reachable from from_ops by a path of zero or more
# backpropagatable tensors.
@@ -196,7 +180,7 @@ def _PendingCount(to_ops, from_ops, colocate_gradients_with_ops):
between_op_list.append(op)
# Clear the boolean so we won't add the inputs again.
reached_ops.remove(op)
- for inp in op.inputs:
+ for inp in _Inputs(op, xs):
queue.append(inp.op)
# X in between_ops iff X is on a path of zero or more backpropagatable tensors
# between from_ops and to_ops
@@ -208,7 +192,7 @@ def _PendingCount(to_ops, from_ops, colocate_gradients_with_ops):
# Initialize pending count for between ops.
pending_count = collections.defaultdict(int)
for op in between_op_list:
- for x in op.inputs:
+ for x in _Inputs(op, xs):
if x.op in between_ops:
pending_count[x.op] += 1
@@ -329,7 +313,7 @@ def _VerifyGeneratedGradients(grads, op):
"inputs %d" % (len(grads), op.node_def, len(op.inputs)))
-def _StopOps(from_ops, stop_gradient_ops, pending_count):
+def _StopOps(from_ops, stop_gradient_ops, pending_count, xs):
"""The set of ops that terminate the gradient computation.
This computes the frontier of the forward graph *before* which backprop
@@ -345,6 +329,7 @@ def _StopOps(from_ops, stop_gradient_ops, pending_count):
from_ops: list of Operations.
stop_gradient_ops: list of Operations never to backprop through.
pending_count: mapping from operation to number of backprop inputs.
+ xs: list of Tensors.
Returns:
The set of operations.
@@ -352,7 +337,7 @@ def _StopOps(from_ops, stop_gradient_ops, pending_count):
stop_ops = set()
for op in from_ops:
is_stop_op = True
- for inp in op.inputs:
+ for inp in _Inputs(op, xs):
if pending_count[inp.op] > 0:
is_stop_op = False
break
@@ -372,12 +357,19 @@ def _maybe_colocate_with(op, gradient_uid, colocate_gradients_with_ops): # pyli
yield
-def _SymGrad(op, out_grads):
+def _IsPartitionedCall(op):
+ return op.type == "PartitionedCall" or op.type == "StatefulPartitionedCall"
+
+
+def _SymGrad(op, out_grads, xs):
"""Backprop through a function call node op given its outputs' gradients."""
- f_in = [x for x in op.inputs] + out_grads
- f_types = [x.dtype for x in op.inputs]
+ f_in = [x for x in _Inputs(op, xs)] + out_grads
+ f_types = [x.dtype for x in _Inputs(op, xs)]
f = attr_value_pb2.NameAttrList()
- f.name = op.type
+ if _IsPartitionedCall(op):
+ f.name = op.get_attr("f").name
+ else:
+ f.name = op.type
for k in op.node_def.attr:
f.attr[k].CopyFrom(op.node_def.attr[k])
# TODO(apassos) use a better dtype here
@@ -425,7 +417,7 @@ def _MaybeCompile(scope, op, func, grad_fn):
return grad_fn()
-def _RaiseNoGradWrtInitialLoopValError(op, from_ops):
+def _RaiseNoGradWrtInitialLoopValError(op, from_ops, xs):
"""Raises an error if we backprop through a loop var."""
# Find the nearest 'to_op' reachable from 'op' to provide a more helpful error
# message.
@@ -439,7 +431,7 @@ def _RaiseNoGradWrtInitialLoopValError(op, from_ops):
if curr_op in from_ops:
target_op = curr_op
break
- queue.extend(t.op for t in curr_op.inputs)
+ queue.extend(t.op for t in _Inputs(curr_op, xs))
assert target_op
raise ValueError(
"Cannot compute gradient inside while loop with respect to op '%s'. "
@@ -449,6 +441,68 @@ def _RaiseNoGradWrtInitialLoopValError(op, from_ops):
% target_op.name)
+def _MaybeCaptured(t):
+ """If t is a captured value placeholder, returns the original captured value.
+
+ Args:
+ t: Tensor
+
+ Returns:
+ A tensor, potentially from a different Graph/function._FuncGraph.
+ """
+ # pylint: disable=protected-access
+ if isinstance(t.op.graph, function._FuncGraph) and t.op.type == "Placeholder":
+ for input_t, placeholder_t in t.op.graph._captured.items():
+ if t == placeholder_t:
+ return _MaybeCaptured(input_t)
+ # pylint: enable=protected-access
+ return t
+
+
+# TODO(skyewm): plumbing xs through everywhere is ugly, consider making
+# _GradientsHelper a class with xs as a member variable.
+def _Inputs(op, xs):
+ """Returns the inputs of op, crossing closure boundaries where necessary.
+
+ Args:
+ op: Operation
+ xs: list of Tensors we are differentiating w.r.t.
+
+ Returns:
+ A list of tensors. The tensors may be from multiple
+ Graph/function._FuncGraphs if op is in a function._FuncGraph and has
+ captured inputs.
+ """
+ if isinstance(op.graph, function._FuncGraph): # pylint: disable=protected-access
+ # If we're differentiating w.r.t. `t`, do not attempt to traverse through it
+ # to a captured value. The algorithm needs to "see" `t` in this case, even
+ # if it's a function input for a captured value, whereas usually we'd like
+ # to traverse through these closures as if the captured value was the direct
+ # input to op.
+ return [t if (t in xs) else _MaybeCaptured(t) for t in op.inputs]
+ else:
+ return op.inputs
+
+
+def _Consumers(t, func_graphs):
+ """Returns the consumers of t, crossing closure boundaries where necessary.
+
+ Args:
+ t: Tensor
+ func_graphs: a list of function._FuncGraphs that may have captured t.
+
+ Returns:
+ A list of tensors. The tensors will be from the current graph and/or
+ func_graphs.
+ """
+ consumers = t.consumers()
+ for func in func_graphs:
+ for input_t, placeholder in func._captured.items(): # pylint: disable=protected-access
+ if input_t == t:
+ consumers.extend(_Consumers(placeholder, func_graphs))
+ return consumers
+
+
@tf_export("gradients")
def gradients(ys,
xs,
@@ -534,10 +588,10 @@ def gradients(ys,
RuntimeError: if called in Eager mode.
"""
- # Creating the gradient graph for control flow mutates Operations. _lock
- # ensures a Session.run call cannot occur between creating and mutating new
- # ops.
- with ops.get_default_graph()._lock: # pylint: disable=protected-access
+ # Creating the gradient graph for control flow mutates Operations.
+ # _mutation_lock ensures a Session.run call cannot occur between creating and
+ # mutating new ops.
+ with ops.get_default_graph()._mutation_lock(): # pylint: disable=protected-access
return _GradientsHelper(ys, xs, grad_ys, name, colocate_gradients_with_ops,
gate_gradients, aggregation_method, stop_gradients)
@@ -558,6 +612,14 @@ def _GradientsHelper(ys,
if src_graph is None:
src_graph = ops.get_default_graph()
+ # If src_graph is a _FuncGraph (i.e. a function body), gather it and all
+ # ancestor graphs. This is necessary for correctly handling captured values.
+ func_graphs = []
+ curr_graph = src_graph
+ while isinstance(curr_graph, function._FuncGraph): # pylint: disable=protected-access
+ func_graphs.append(curr_graph)
+ curr_graph = curr_graph._outer_graph # pylint: disable=protected-access
+
ys = _AsList(ys)
xs = _AsList(xs)
stop_gradients = [] if stop_gradients is None else _AsList(stop_gradients)
@@ -592,12 +654,13 @@ def _GradientsHelper(ys,
# Initialize the pending count for ops in the connected subgraph from ys
# to the xs.
if len(ys) > 1:
- ys = [array_ops.identity(y) if y.consumers() else y for y in ys]
+ ys = [array_ops.identity(y) if _Consumers(y, func_graphs) else y
+ for y in ys]
to_ops = [t.op for t in ys]
from_ops = [t.op for t in xs]
stop_gradient_ops = [t.op for t in stop_gradients]
reachable_to_ops, pending_count, loop_state = _PendingCount(
- to_ops, from_ops, colocate_gradients_with_ops)
+ to_ops, from_ops, colocate_gradients_with_ops, func_graphs, xs)
# Iterate over the collected ops.
#
@@ -631,7 +694,7 @@ def _GradientsHelper(ys,
_SetGrad(grads, y, loop_state.ZerosLikeForExit(y))
queue.append(y.op)
- stop_ops = _StopOps(from_ops, stop_gradient_ops, pending_count)
+ stop_ops = _StopOps(from_ops, stop_gradient_ops, pending_count, xs)
while queue:
# generate gradient subgraph for op.
op = queue.popleft()
@@ -645,13 +708,19 @@ def _GradientsHelper(ys,
grad_fn = None
func_call = None
+ is_partitioned_call = _IsPartitionedCall(op)
# pylint: disable=protected-access
- is_func_call = src_graph._is_function(op.type)
+ is_func_call = (
+ src_graph._is_function(op.type) or is_partitioned_call)
# pylint: enable=protected-access
has_out_grads = any(isinstance(g, ops.Tensor) or g for g in out_grads)
if has_out_grads and (op not in stop_ops):
if is_func_call:
- func_call = src_graph._get_function(op.type) # pylint: disable=protected-access
+ if is_partitioned_call:
+ func_call = src_graph._get_function( # pylint: disable=protected-access
+ compat.as_bytes(op.get_attr("f").name))
+ else:
+ func_call = src_graph._get_function(op.type) # pylint: disable=protected-access
# Note that __defun is not set if the graph is
# imported. If it's set, we prefer to access the original
# defun.
@@ -680,7 +749,7 @@ def _GradientsHelper(ys,
op._control_flow_context.IsWhileContext() and
op._control_flow_context ==
ops.get_default_graph()._get_control_flow_context()):
- _RaiseNoGradWrtInitialLoopValError(op, from_ops)
+ _RaiseNoGradWrtInitialLoopValError(op, from_ops, xs)
# pylint: enable=protected-access
if (grad_fn or is_func_call) and has_out_grads:
@@ -712,7 +781,7 @@ def _GradientsHelper(ys,
# For function call ops, we add a 'SymbolicGradient'
# node to the graph to compute gradients.
in_grads = _MaybeCompile(grad_scope, op, func_call,
- lambda: _SymGrad(op, out_grads))
+ lambda: _SymGrad(op, out_grads, xs))
in_grads = _AsList(in_grads)
_VerifyGeneratedGradients(in_grads, op)
if gate_gradients and len([x for x in in_grads
@@ -727,8 +796,8 @@ def _GradientsHelper(ys,
else:
# If no grad_fn is defined or none of out_grads is available,
# just propagate a list of None backwards.
- in_grads = [None] * len(op.inputs)
- for i, (t_in, in_grad) in enumerate(zip(op.inputs, in_grads)):
+ in_grads = [None] * len(_Inputs(op, xs))
+ for i, (t_in, in_grad) in enumerate(zip(_Inputs(op, xs), in_grads)):
if in_grad is not None:
if (isinstance(in_grad, ops.Tensor) and
t_in.dtype != dtypes.resource):
@@ -746,7 +815,8 @@ def _GradientsHelper(ys,
loop_state.ExitGradWhileContext(op, before=False)
# Update pending count for the inputs of op and enqueue ready ops.
- _UpdatePendingAndEnqueueReady(grads, op, queue, pending_count, loop_state)
+ _UpdatePendingAndEnqueueReady(grads, op, queue, pending_count, loop_state,
+ xs)
if loop_state:
loop_state.PostProcessing()
@@ -765,9 +835,10 @@ def _HasAnyNotNoneGrads(grads, op):
return False
-def _UpdatePendingAndEnqueueReady(grads, op, queue, pending_count, loop_state):
+def _UpdatePendingAndEnqueueReady(grads, op, queue, pending_count, loop_state,
+ xs):
"""Update pending count for the inputs of op and enqueue ready ops."""
- for x in op.inputs:
+ for x in _Inputs(op, xs):
pending_count[x.op] -= 1
ready = (pending_count[x.op] == 0)
if loop_state and not ready:
diff --git a/tensorflow/python/ops/gradients_test.py b/tensorflow/python/ops/gradients_test.py
index d81c756f1c..d02fcf4ee2 100644
--- a/tensorflow/python/ops/gradients_test.py
+++ b/tensorflow/python/ops/gradients_test.py
@@ -57,90 +57,8 @@ from tensorflow.python.ops.nn_ops import bias_add
from tensorflow.python.platform import googletest
-def _OpsBetween(to_ops, from_ops):
- """Build the list of operations between two lists of Operations.
-
- Args:
- to_ops: list of Operations.
- from_ops: list of Operations.
-
- Returns:
- The list of operations between "from_ops" and "to_ops", sorted by
- decreasing operation id. This list contains all elements of to_ops.
-
- TODO(touts): Think about returning an empty list if from_ops are not
- reachable from to_ops. Presently it returns to_ops in that case.
- """
- # Ops that are reachable from the output of "input_ops".
- reached_ops = set()
- # We only care to reach up to "output_ops" so we mark the
- # output ops as reached to avoid recursing past them.
- for op in to_ops:
- reached_ops.add(op)
- gradients_impl._MarkReachedOps(from_ops, reached_ops)
- between_ops = gradients_impl._GatherInputs(to_ops, reached_ops)
- between_ops.sort(key=lambda x: -x._id)
- return between_ops
-
-
class GradientsTest(test_util.TensorFlowTestCase):
- def _OpNames(self, op_list):
- return ["%s/%d" % (str(op.name), op._id) for op in op_list]
-
- def _assertOpListEqual(self, ops1, ops2):
- self.assertEquals(self._OpNames(ops1), self._OpNames(ops2))
-
- def testOpsBetweenSimple(self):
- with ops.Graph().as_default():
- t1 = constant(1.0)
- t2 = constant(2.0)
- t3 = array_ops.stack([t1, t2])
- # Full graph
- self._assertOpListEqual([t3.op, t2.op, t1.op],
- _OpsBetween([t3.op], [t1.op, t2.op]))
- # Only t1, t3.
- self._assertOpListEqual([t3.op, t1.op], _OpsBetween([t3.op], [t1.op]))
-
- def testOpsBetweenUnreachable(self):
- with ops.Graph().as_default():
- t1 = constant(1.0)
- t2 = constant(2.0)
- _ = array_ops.stack([t1, t2])
- t4 = constant(1.0)
- t5 = constant(2.0)
- t6 = array_ops.stack([t4, t5])
- # Elements of to_ops are always listed.
- self._assertOpListEqual([t6.op], _OpsBetween([t6.op], [t1.op]))
-
- def testOpsBetweenCut(self):
- with ops.Graph().as_default():
- t1 = constant(1.0)
- t2 = constant(2.0)
- t3 = array_ops.stack([t1, t2])
- t4 = constant([1.0])
- t5 = array_ops.concat([t4, t3], 0)
- t6 = constant([2.0])
- t7 = array_ops.concat([t5, t6], 0)
- self._assertOpListEqual([t7.op, t5.op, t4.op],
- _OpsBetween([t7.op], [t4.op]))
-
- def testOpsBetweenCycle(self):
- with ops.Graph().as_default():
- t1 = constant(1.0)
- t2 = constant(2.0)
- t3 = array_ops.stack([t1, t2])
- t4 = array_ops.concat([t3, t3, t3], 0)
- t5 = constant([1.0])
- t6 = array_ops.concat([t4, t5], 0)
- t7 = array_ops.concat([t6, t3], 0)
- self._assertOpListEqual([t6.op, t4.op, t3.op],
- _OpsBetween([t6.op], [t3.op]))
- self._assertOpListEqual([t7.op, t6.op, t5.op, t4.op, t3.op, t1.op],
- _OpsBetween([t7.op], [t1.op, t5.op]))
- self._assertOpListEqual([t6.op, t5.op, t4.op, t3.op, t2.op],
- _OpsBetween([t6.op], [t2.op, t5.op]))
-
def testGradients(self):
with ops.Graph().as_default():
inp = constant(1.0, shape=[32, 100], name="in")
@@ -519,6 +437,96 @@ class FunctionGradientsTest(test_util.TensorFlowTestCase):
grad_func=grad_func, python_grad_func=self._PythonGradient)
f.add_to_graph(ops.Graph())
+ def testGradientWrtCaptured(self):
+ with ops.Graph().as_default():
+ x = constant_op.constant(1.0, name="x")
+
+ @function.Defun()
+ def Foo():
+ y = math_ops.multiply(x, 2.0, name="y")
+ g = gradients_impl.gradients(y, x)
+ return g[0]
+
+ f = Foo()
+ with self.test_session() as sess:
+ self.assertEqual(sess.run(f), 2.0)
+
+ def testGradientOfCaptured(self):
+ with ops.Graph().as_default():
+ x = constant_op.constant(1.0, name="x")
+ y = math_ops.multiply(x, 2.0, name="y")
+
+ @function.Defun()
+ def Foo():
+ g = gradients_impl.gradients(y, x)
+ return g[0]
+
+ f = Foo()
+ with self.test_session() as sess:
+ self.assertEqual(sess.run(f), 2.0)
+
+ def testCapturedResourceVariable(self):
+ with ops.Graph().as_default():
+ var = resource_variable_ops.ResourceVariable(1.0, name="var")
+
+ @function.Defun()
+ def Foo():
+ y = math_ops.multiply(var, 2.0, name="y")
+ g = gradients_impl.gradients(y, var)
+ return g[0]
+
+ f = Foo()
+ with self.test_session() as sess:
+ sess.run(variables.global_variables_initializer())
+ self.assertEqual(sess.run(f), 2.0)
+
+ def testCapturedNested(self):
+ with ops.Graph().as_default():
+ x1 = constant_op.constant(1.0, name="x1")
+ x2 = constant_op.constant(2.0, name="x2")
+ x3 = math_ops.multiply(x1, x2, name="x3")
+
+ @function.Defun()
+ def Outer():
+ outer1 = array_ops.identity(x1, name="outer1")
+
+ @function.Defun()
+ def Inner():
+ inner1 = array_ops.identity(outer1, name="inner1")
+ inner2 = array_ops.identity(x2, name="inner2")
+ inner3 = array_ops.identity(x3, name="inner3")
+ return gradients_impl.gradients([inner1, inner2, inner3, x1],
+ [x1, x2])
+
+ return Inner()
+
+ x1_grad, x2_grad = Outer()
+ with self.test_session() as sess:
+ # 1.0 + None + 2.0 + 1.0 = 4.0
+ self.assertEqual(sess.run(x1_grad), 4.0)
+ # None + 1.0 + 1.0 + None = 2.0
+ self.assertEqual(sess.run(x2_grad), 2.0)
+
+ def testCapturedFromFunction(self):
+ with ops.Graph().as_default():
+ x = constant_op.constant(1.0, name="x")
+
+ @function.Defun()
+ def Outer():
+ y = math_ops.multiply(x, 2.0, name="y")
+
+ @function.Defun()
+ def Inner():
+ z = math_ops.multiply(y, 3.0, name="z")
+ g = gradients_impl.gradients(z, y)
+ return g[0]
+
+ return Inner()
+
+ z_grad = Outer()
+ with self.test_session() as sess:
+ self.assertEqual(sess.run(z_grad), 3.0)
+
class StopGradientTest(test_util.TensorFlowTestCase):
diff --git a/tensorflow/python/ops/image_ops_impl.py b/tensorflow/python/ops/image_ops_impl.py
index 2c7751f792..9440bab9ee 100644
--- a/tensorflow/python/ops/image_ops_impl.py
+++ b/tensorflow/python/ops/image_ops_impl.py
@@ -55,8 +55,10 @@ ops.NotDifferentiable('SampleDistortedBoundingBoxV2')
ops.NotDifferentiable('ExtractGlimpse')
ops.NotDifferentiable('NonMaxSuppression')
ops.NotDifferentiable('NonMaxSuppressionV2')
+ops.NotDifferentiable('NonMaxSuppressionWithOverlaps')
+# pylint: disable=invalid-name
def _assert(cond, ex_type, msg):
"""A polymorphic assert, works with tensors and boolean expressions.
@@ -1070,15 +1072,16 @@ def resize_images(images,
@tf_export('image.resize_image_with_pad')
-def resize_image_with_pad(image, target_height, target_width,
+def resize_image_with_pad(image,
+ target_height,
+ target_width,
method=ResizeMethod.BILINEAR):
- """
- Resizes and pads an image to a target width and height.
+ """Resizes and pads an image to a target width and height.
Resizes an image to a target width and height by keeping
the aspect ratio the same without distortion. If the target
dimensions don't match the image dimensions, the image
- is resized and then padded with zeroes to match requested
+ is resized and then padded with zeroes to match requested
dimensions.
Args:
@@ -1139,10 +1142,10 @@ def resize_image_with_pad(image, target_height, target_width,
ratio = max_(f_width / f_target_width, f_height / f_target_height)
resized_height_float = f_height / ratio
resized_width_float = f_width / ratio
- resized_height = math_ops.cast(math_ops.floor(resized_height_float),
- dtype=dtypes.int32)
- resized_width = math_ops.cast(math_ops.floor(resized_width_float),
- dtype=dtypes.int32)
+ resized_height = math_ops.cast(
+ math_ops.floor(resized_height_float), dtype=dtypes.int32)
+ resized_width = math_ops.cast(
+ math_ops.floor(resized_width_float), dtype=dtypes.int32)
padding_height = (f_target_height - resized_height_float) / 2
padding_width = (f_target_width - resized_width_float) / 2
@@ -1154,13 +1157,13 @@ def resize_image_with_pad(image, target_height, target_width,
# Resize first, then pad to meet requested dimensions
resized = resize_images(image, [resized_height, resized_width], method)
- padded = pad_to_bounding_box(resized, p_height, p_width,
- target_height, target_width)
+ padded = pad_to_bounding_box(resized, p_height, p_width, target_height,
+ target_width)
if padded.get_shape().ndims is None:
raise ValueError('padded contains no shape.')
- _, padded_height, padded_width, _ = _ImageDimensions(padded, rank=4)
+ _ImageDimensions(padded, rank=4)
if not is_batch:
padded = array_ops.squeeze(padded, squeeze_dims=[0])
@@ -1750,6 +1753,22 @@ def is_jpeg(contents, name=None):
return math_ops.equal(substr, b'\xff\xd8\xff', name=name)
+def _is_png(contents, name=None):
+ r"""Convenience function to check if the 'contents' encodes a PNG image.
+
+ Args:
+ contents: 0-D `string`. The encoded image bytes.
+ name: A name for the operation (optional)
+
+ Returns:
+ A scalar boolean tensor indicating if 'contents' may be a PNG image.
+ is_png is susceptible to false positives.
+ """
+ with ops.name_scope(name, 'is_png'):
+ substr = string_ops.substr(contents, 0, 3)
+ return math_ops.equal(substr, b'\211PN', name=name)
+
+
@tf_export('image.decode_image')
def decode_image(contents, channels=None, dtype=dtypes.uint8, name=None):
"""Convenience function for `decode_bmp`, `decode_gif`, `decode_jpeg`,
@@ -1827,8 +1846,8 @@ def decode_image(contents, channels=None, dtype=dtypes.uint8, name=None):
def check_png():
"""Checks if an image is PNG."""
- is_png = math_ops.equal(substr, b'\211PN', name='is_png')
- return control_flow_ops.cond(is_png, _png, check_gif, name='cond_png')
+ return control_flow_ops.cond(
+ _is_png(contents), _png, check_gif, name='cond_png')
def _jpeg():
"""Decodes a jpeg image."""
@@ -2091,6 +2110,50 @@ def non_max_suppression(boxes,
iou_threshold, score_threshold)
+@tf_export('image.non_max_suppression_overlaps')
+def non_max_suppression_with_overlaps(overlaps,
+ scores,
+ max_output_size,
+ overlap_threshold=0.5,
+ score_threshold=float('-inf'),
+ name=None):
+ """Greedily selects a subset of bounding boxes in descending order of score.
+
+ Prunes away boxes that have high overlap with previously selected boxes.
+ N-by-n overlap values are supplied as square matrix.
+ The output of this operation is a set of integers indexing into the input
+ collection of bounding boxes representing the selected boxes. The bounding
+ box coordinates corresponding to the selected indices can then be obtained
+ using the `tf.gather operation`. For example:
+ selected_indices = tf.image.non_max_suppression_overlaps(
+ overlaps, scores, max_output_size, iou_threshold)
+ selected_boxes = tf.gather(boxes, selected_indices)
+
+ Args:
+ overlaps: A 2-D float `Tensor` of shape `[num_boxes, num_boxes]`.
+ scores: A 1-D float `Tensor` of shape `[num_boxes]` representing a single
+ score corresponding to each box (each row of boxes).
+ max_output_size: A scalar integer `Tensor` representing the maximum number
+ of boxes to be selected by non max suppression.
+ overlap_threshold: A float representing the threshold for deciding whether
+ boxes overlap too much with respect to the provided overlap values.
+ score_threshold: A float representing the threshold for deciding when to
+ remove boxes based on score.
+ name: A name for the operation (optional).
+
+ Returns:
+ selected_indices: A 1-D integer `Tensor` of shape `[M]` representing the
+ selected indices from the overlaps tensor, where `M <= max_output_size`.
+ """
+ with ops.name_scope(name, 'non_max_suppression_overlaps'):
+ overlap_threshold = ops.convert_to_tensor(
+ overlap_threshold, name='overlap_threshold')
+ # pylint: disable=protected-access
+ return gen_image_ops._non_max_suppression_v3(
+ overlaps, scores, max_output_size, overlap_threshold, score_threshold)
+ # pylint: enable=protected-access
+
+
_rgb_to_yiq_kernel = [[0.299, 0.59590059,
0.2115], [0.587, -0.27455667, -0.52273617],
[0.114, -0.32134392, 0.31119955]]
diff --git a/tensorflow/python/ops/image_ops_test.py b/tensorflow/python/ops/image_ops_test.py
index 8e40de140d..cf9761803b 100644
--- a/tensorflow/python/ops/image_ops_test.py
+++ b/tensorflow/python/ops/image_ops_test.py
@@ -2731,7 +2731,7 @@ class ResizeImageWithPadTest(test_util.TensorFlowTestCase):
try:
self._ResizeImageWithPad(x, target_height, target_width,
use_tensor_inputs)
- except Exception as e:
+ except Exception as e: # pylint: disable=broad-except
if err_msg not in str(e):
raise
else:
diff --git a/tensorflow/python/ops/init_ops.py b/tensorflow/python/ops/init_ops.py
index c41e952167..3132f7467f 100644
--- a/tensorflow/python/ops/init_ops.py
+++ b/tensorflow/python/ops/init_ops.py
@@ -43,7 +43,8 @@ from tensorflow.python.ops import linalg_ops_impl
from tensorflow.python.ops import gen_linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
-from tensorflow.python.util.deprecation import deprecated
+from tensorflow.python.util.deprecation import (
+ deprecated, deprecated_arg_values)
from tensorflow.python.util.tf_export import tf_export
@@ -409,8 +410,10 @@ class UniformUnitScaling(Initializer):
class VarianceScaling(Initializer):
"""Initializer capable of adapting its scale to the shape of weights tensors.
- With `distribution="normal"`, samples are drawn from a truncated normal
- distribution centered on zero, with `stddev = sqrt(scale / n)`
+ With `distribution="truncated_normal" or "untruncated_normal"`,
+ samples are drawn from a truncated/untruncated normal
+ distribution with a mean of zero and a standard deviation (after truncation,
+ if used) `stddev = sqrt(scale / n)`
where n is:
- number of input units in the weight tensor, if mode = "fan_in"
- number of output units, if mode = "fan_out"
@@ -433,10 +436,14 @@ class VarianceScaling(Initializer):
"distribution" arguments.
"""
+ @deprecated_arg_values(
+ None,
+ "`normal` is a deprecated alias for `truncated_normal`",
+ distribution="normal")
def __init__(self,
scale=1.0,
mode="fan_in",
- distribution="normal",
+ distribution="truncated_normal",
seed=None,
dtype=dtypes.float32):
if scale <= 0.:
@@ -444,7 +451,8 @@ class VarianceScaling(Initializer):
if mode not in {"fan_in", "fan_out", "fan_avg"}:
raise ValueError("Invalid `mode` argument:", mode)
distribution = distribution.lower()
- if distribution not in {"normal", "uniform"}:
+ if distribution not in {"normal", "uniform",
+ "truncated_normal", "untruncated_normal"}:
raise ValueError("Invalid `distribution` argument:", distribution)
self.scale = scale
self.mode = mode
@@ -466,11 +474,15 @@ class VarianceScaling(Initializer):
scale /= max(1., fan_out)
else:
scale /= max(1., (fan_in + fan_out) / 2.)
- if self.distribution == "normal":
+ if self.distribution == "normal" or self.distribution == "truncated_normal":
# constant taken from scipy.stats.truncnorm.std(a=-2, b=2, loc=0., scale=1.)
stddev = math.sqrt(scale) / .87962566103423978
return random_ops.truncated_normal(
shape, 0.0, stddev, dtype, seed=self.seed)
+ elif self.distribution == "untruncated_normal":
+ stddev = math.sqrt(scale)
+ return random_ops.random_normal(
+ shape, 0.0, stddev, dtype, seed=self.seed)
else:
limit = math.sqrt(3.0 * scale)
return random_ops.random_uniform(
@@ -1124,7 +1136,7 @@ convolutional_orthogonal_3d = ConvolutionOrthogonal3D
# pylint: enable=invalid-name
-@tf_export("glorot_uniform_initializer")
+@tf_export("glorot_uniform_initializer", "keras.initializers.glorot_uniform")
def glorot_uniform_initializer(seed=None, dtype=dtypes.float32):
"""The Glorot uniform initializer, also called Xavier uniform initializer.
@@ -1148,7 +1160,7 @@ def glorot_uniform_initializer(seed=None, dtype=dtypes.float32):
scale=1.0, mode="fan_avg", distribution="uniform", seed=seed, dtype=dtype)
-@tf_export("glorot_normal_initializer")
+@tf_export("glorot_normal_initializer", "keras.initializers.glorot_normal")
def glorot_normal_initializer(seed=None, dtype=dtypes.float32):
"""The Glorot normal initializer, also called Xavier normal initializer.
diff --git a/tensorflow/python/ops/linalg/linear_operator.py b/tensorflow/python/ops/linalg/linear_operator.py
index 8cfe964b1c..20c46fbb82 100644
--- a/tensorflow/python/ops/linalg/linear_operator.py
+++ b/tensorflow/python/ops/linalg/linear_operator.py
@@ -42,7 +42,7 @@ __all__ = ["LinearOperator"]
class LinearOperator(object):
"""Base class defining a [batch of] linear operator[s].
- Subclasses of `LinearOperator` provide a access to common methods on a
+ Subclasses of `LinearOperator` provide access to common methods on a
(batch) matrix, without the need to materialize the matrix. This allows:
* Matrix free computations
@@ -69,11 +69,11 @@ class LinearOperator(object):
#### Shape compatibility
- `LinearOperator` sub classes should operate on a [batch] matrix with
+ `LinearOperator` subclasses should operate on a [batch] matrix with
compatible shape. Class docstrings should define what is meant by compatible
- shape. Some sub-classes may not support batching.
+ shape. Some subclasses may not support batching.
- An example is:
+ Examples:
`x` is a batch matrix with compatible shape for `matmul` if
diff --git a/tensorflow/python/ops/linalg/linear_operator_diag.py b/tensorflow/python/ops/linalg/linear_operator_diag.py
index 5beaea65a5..ed53decc00 100644
--- a/tensorflow/python/ops/linalg/linear_operator_diag.py
+++ b/tensorflow/python/ops/linalg/linear_operator_diag.py
@@ -231,8 +231,11 @@ class LinearOperatorDiag(linear_operator.LinearOperator):
return math_ops.reduce_prod(self._diag, reduction_indices=[-1])
def _log_abs_determinant(self):
- return math_ops.reduce_sum(
+ log_det = math_ops.reduce_sum(
math_ops.log(math_ops.abs(self._diag)), reduction_indices=[-1])
+ if self.dtype.is_complex:
+ log_det = math_ops.cast(log_det, dtype=self.dtype)
+ return log_det
def _solve(self, rhs, adjoint=False, adjoint_arg=False):
diag_term = math_ops.conj(self._diag) if adjoint else self._diag
diff --git a/tensorflow/python/ops/linalg/linear_operator_low_rank_update.py b/tensorflow/python/ops/linalg/linear_operator_low_rank_update.py
index 08e5896e10..2b2bf80f27 100644
--- a/tensorflow/python/ops/linalg/linear_operator_low_rank_update.py
+++ b/tensorflow/python/ops/linalg/linear_operator_low_rank_update.py
@@ -18,16 +18,15 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
-from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
-from tensorflow.python.ops import check_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.linalg import linear_operator
from tensorflow.python.ops.linalg import linear_operator_diag
from tensorflow.python.ops.linalg import linear_operator_identity
from tensorflow.python.ops.linalg import linear_operator_util
+from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util.tf_export import tf_export
__all__ = [
@@ -153,8 +152,7 @@ class LinearOperatorLowRankUpdate(linear_operator.LinearOperator):
`is_X` matrix property hints, which will trigger the appropriate code path.
Args:
- base_operator: Shape `[B1,...,Bb, M, N]` real `float16`, `float32` or
- `float64` `LinearOperator`. This is `L` above.
+ base_operator: Shape `[B1,...,Bb, M, N]`.
u: Shape `[B1,...,Bb, M, K]` `Tensor` of same `dtype` as `base_operator`.
This is `U` above.
diag_update: Optional shape `[B1,...,Bb, K]` `Tensor` with same `dtype`
@@ -183,23 +181,12 @@ class LinearOperatorLowRankUpdate(linear_operator.LinearOperator):
Raises:
ValueError: If `is_X` flags are set in an inconsistent way.
"""
- # TODO(langmore) support complex types.
- # Complex types are not allowed due to tf.cholesky() requiring float.
- # If complex dtypes are allowed, we update the following
- # 1. is_diag_update_positive should still imply that `diag > 0`, but we need
- # to remind the user that this implies diag is real. This is needed
- # because if diag has non-zero imaginary part, it will not be
- # self-adjoint positive definite.
dtype = base_operator.dtype
- allowed_dtypes = [
- dtypes.float16,
- dtypes.float32,
- dtypes.float64,
- ]
- if dtype not in allowed_dtypes:
- raise TypeError(
- "Argument matrix must have dtype in %s. Found: %s"
- % (allowed_dtypes, dtype))
+
+ if diag_update is not None:
+ if is_diag_update_positive and dtype.is_complex:
+ logging.warn("Note: setting is_diag_update_positive with a complex "
+ "dtype means that diagonal is real and positive.")
if diag_update is None:
if is_diag_update_positive is False:
@@ -271,8 +258,6 @@ class LinearOperatorLowRankUpdate(linear_operator.LinearOperator):
self._set_diag_operators(diag_update, is_diag_update_positive)
self._is_diag_update_positive = is_diag_update_positive
- check_ops.assert_same_float_dtype((base_operator, self.u, self.v,
- self._diag_update))
self._check_shapes()
# Pre-compute the so-called "capacitance" matrix
@@ -407,6 +392,8 @@ class LinearOperatorLowRankUpdate(linear_operator.LinearOperator):
else:
det_c = linalg_ops.matrix_determinant(self._capacitance)
log_abs_det_c = math_ops.log(math_ops.abs(det_c))
+ if self.dtype.is_complex:
+ log_abs_det_c = math_ops.cast(log_abs_det_c, dtype=self.dtype)
return log_abs_det_c + log_abs_det_d + log_abs_det_l
diff --git a/tensorflow/python/ops/linalg/linear_operator_lower_triangular.py b/tensorflow/python/ops/linalg/linear_operator_lower_triangular.py
index fb1eb2fedb..ca6d3f5405 100644
--- a/tensorflow/python/ops/linalg/linear_operator_lower_triangular.py
+++ b/tensorflow/python/ops/linalg/linear_operator_lower_triangular.py
@@ -119,8 +119,7 @@ class LinearOperatorLowerTriangular(linear_operator.LinearOperator):
Args:
tril: Shape `[B1,...,Bb, N, N]` with `b >= 0`, `N >= 0`.
The lower triangular part of `tril` defines this operator. The strictly
- upper triangle is ignored. Allowed dtypes: `float16`, `float32`,
- `float64`.
+ upper triangle is ignored.
is_non_singular: Expect that this operator is non-singular.
This operator is non-singular if and only if its diagonal elements are
all non-zero.
@@ -137,7 +136,6 @@ class LinearOperatorLowerTriangular(linear_operator.LinearOperator):
name: A name for this `LinearOperator`.
Raises:
- TypeError: If `diag.dtype` is not an allowed type.
ValueError: If `is_square` is `False`.
"""
@@ -163,12 +161,12 @@ class LinearOperatorLowerTriangular(linear_operator.LinearOperator):
def _check_tril(self, tril):
"""Static check of the `tril` argument."""
- # TODO(langmore) Add complex types once matrix_triangular_solve works for
- # them.
allowed_dtypes = [
dtypes.float16,
dtypes.float32,
dtypes.float64,
+ dtypes.complex64,
+ dtypes.complex128,
]
dtype = tril.dtype
if dtype not in allowed_dtypes:
diff --git a/tensorflow/python/ops/linalg/linear_operator_test_util.py b/tensorflow/python/ops/linalg/linear_operator_test_util.py
index 1b5bb9470c..78c85db557 100644
--- a/tensorflow/python/ops/linalg/linear_operator_test_util.py
+++ b/tensorflow/python/ops/linalg/linear_operator_test_util.py
@@ -102,7 +102,7 @@ class LinearOperatorDerivedClassTest(test.TestCase):
raise NotImplementedError("operator_build_infos has not been implemented.")
@abc.abstractmethod
- def _operator_and_mat_and_feed_dict(self, build_info, dtype, use_placeholder):
+ def _operator_and_matrix(self, build_info, dtype, use_placeholder):
"""Build a batch matrix and an Operator that should have similar behavior.
Every operator acts like a (batch) matrix. This method returns both
@@ -118,9 +118,6 @@ class LinearOperatorDerivedClassTest(test.TestCase):
Returns:
operator: `LinearOperator` subclass instance.
mat: `Tensor` representing operator.
- feed_dict: Dictionary.
- If placholder is True, this must contains everything needed to be fed
- to sess.run calls at runtime to make the operator work.
"""
# Create a matrix as a numpy array with desired shape/dtype.
# Create a LinearOperator that should have the same behavior as the matrix.
@@ -189,12 +186,12 @@ class LinearOperatorDerivedClassTest(test.TestCase):
for dtype in self._dtypes_to_test:
with self.test_session(graph=ops.Graph()) as sess:
sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED
- operator, mat, feed_dict = self._operator_and_mat_and_feed_dict(
+ operator, mat = self._operator_and_matrix(
build_info, dtype, use_placeholder=use_placeholder)
op_dense = operator.to_dense()
if not use_placeholder:
self.assertAllEqual(build_info.shape, op_dense.get_shape())
- op_dense_v, mat_v = sess.run([op_dense, mat], feed_dict=feed_dict)
+ op_dense_v, mat_v = sess.run([op_dense, mat])
self.assertAC(op_dense_v, mat_v)
def test_det(self):
@@ -204,14 +201,13 @@ class LinearOperatorDerivedClassTest(test.TestCase):
for dtype in self._dtypes_to_test:
with self.test_session(graph=ops.Graph()) as sess:
sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED
- operator, mat, feed_dict = self._operator_and_mat_and_feed_dict(
+ operator, mat = self._operator_and_matrix(
build_info, dtype, use_placeholder=use_placeholder)
op_det = operator.determinant()
if not use_placeholder:
self.assertAllEqual(build_info.shape[:-2], op_det.get_shape())
op_det_v, mat_det_v = sess.run(
- [op_det, linalg_ops.matrix_determinant(mat)],
- feed_dict=feed_dict)
+ [op_det, linalg_ops.matrix_determinant(mat)])
self.assertAC(op_det_v, mat_det_v)
def test_log_abs_det(self):
@@ -221,7 +217,7 @@ class LinearOperatorDerivedClassTest(test.TestCase):
for dtype in self._dtypes_to_test:
with self.test_session(graph=ops.Graph()) as sess:
sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED
- operator, mat, feed_dict = self._operator_and_mat_and_feed_dict(
+ operator, mat = self._operator_and_matrix(
build_info, dtype, use_placeholder=use_placeholder)
op_log_abs_det = operator.log_abs_determinant()
_, mat_log_abs_det = linalg.slogdet(mat)
@@ -229,7 +225,7 @@ class LinearOperatorDerivedClassTest(test.TestCase):
self.assertAllEqual(
build_info.shape[:-2], op_log_abs_det.get_shape())
op_log_abs_det_v, mat_log_abs_det_v = sess.run(
- [op_log_abs_det, mat_log_abs_det], feed_dict=feed_dict)
+ [op_log_abs_det, mat_log_abs_det])
self.assertAC(op_log_abs_det_v, mat_log_abs_det_v)
def _test_matmul(self, with_batch):
@@ -246,7 +242,7 @@ class LinearOperatorDerivedClassTest(test.TestCase):
for adjoint_arg in self._adjoint_arg_options:
with self.test_session(graph=ops.Graph()) as sess:
sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED
- operator, mat, feed_dict = self._operator_and_mat_and_feed_dict(
+ operator, mat = self._operator_and_matrix(
build_info, dtype, use_placeholder=use_placeholder)
x = self._make_x(
operator, adjoint=adjoint, with_batch=with_batch)
@@ -264,7 +260,7 @@ class LinearOperatorDerivedClassTest(test.TestCase):
self.assertAllEqual(op_matmul.get_shape(),
mat_matmul.get_shape())
op_matmul_v, mat_matmul_v = sess.run(
- [op_matmul, mat_matmul], feed_dict=feed_dict)
+ [op_matmul, mat_matmul])
self.assertAC(op_matmul_v, mat_matmul_v)
def test_matmul(self):
@@ -289,7 +285,7 @@ class LinearOperatorDerivedClassTest(test.TestCase):
for adjoint_arg in self._adjoint_arg_options:
with self.test_session(graph=ops.Graph()) as sess:
sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED
- operator, mat, feed_dict = self._operator_and_mat_and_feed_dict(
+ operator, mat = self._operator_and_matrix(
build_info, dtype, use_placeholder=use_placeholder)
rhs = self._make_rhs(
operator, adjoint=adjoint, with_batch=with_batch)
@@ -307,8 +303,7 @@ class LinearOperatorDerivedClassTest(test.TestCase):
if not use_placeholder:
self.assertAllEqual(op_solve.get_shape(),
mat_solve.get_shape())
- op_solve_v, mat_solve_v = sess.run(
- [op_solve, mat_solve], feed_dict=feed_dict)
+ op_solve_v, mat_solve_v = sess.run([op_solve, mat_solve])
self.assertAC(op_solve_v, mat_solve_v)
def test_solve(self):
@@ -326,14 +321,13 @@ class LinearOperatorDerivedClassTest(test.TestCase):
for dtype in self._dtypes_to_test:
with self.test_session(graph=ops.Graph()) as sess:
sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED
- operator, mat, feed_dict = self._operator_and_mat_and_feed_dict(
+ operator, mat = self._operator_and_matrix(
build_info, dtype, use_placeholder=use_placeholder)
op_trace = operator.trace()
mat_trace = math_ops.trace(mat)
if not use_placeholder:
self.assertAllEqual(op_trace.get_shape(), mat_trace.get_shape())
- op_trace_v, mat_trace_v = sess.run(
- [op_trace, mat_trace], feed_dict=feed_dict)
+ op_trace_v, mat_trace_v = sess.run([op_trace, mat_trace])
self.assertAC(op_trace_v, mat_trace_v)
def test_add_to_tensor(self):
@@ -343,15 +337,14 @@ class LinearOperatorDerivedClassTest(test.TestCase):
for dtype in self._dtypes_to_test:
with self.test_session(graph=ops.Graph()) as sess:
sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED
- operator, mat, feed_dict = self._operator_and_mat_and_feed_dict(
+ operator, mat = self._operator_and_matrix(
build_info, dtype, use_placeholder=use_placeholder)
op_plus_2mat = operator.add_to_tensor(2 * mat)
if not use_placeholder:
self.assertAllEqual(build_info.shape, op_plus_2mat.get_shape())
- op_plus_2mat_v, mat_v = sess.run(
- [op_plus_2mat, mat], feed_dict=feed_dict)
+ op_plus_2mat_v, mat_v = sess.run([op_plus_2mat, mat])
self.assertAC(op_plus_2mat_v, 3 * mat_v)
@@ -362,7 +355,7 @@ class LinearOperatorDerivedClassTest(test.TestCase):
for dtype in self._dtypes_to_test:
with self.test_session(graph=ops.Graph()) as sess:
sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED
- operator, mat, feed_dict = self._operator_and_mat_and_feed_dict(
+ operator, mat = self._operator_and_matrix(
build_info, dtype, use_placeholder=use_placeholder)
op_diag_part = operator.diag_part()
mat_diag_part = array_ops.matrix_diag_part(mat)
@@ -372,7 +365,7 @@ class LinearOperatorDerivedClassTest(test.TestCase):
op_diag_part.get_shape())
op_diag_part_, mat_diag_part_ = sess.run(
- [op_diag_part, mat_diag_part], feed_dict=feed_dict)
+ [op_diag_part, mat_diag_part])
self.assertAC(op_diag_part_, mat_diag_part_)
diff --git a/tensorflow/python/ops/linalg_ops.py b/tensorflow/python/ops/linalg_ops.py
index a0dfa543f9..f4a93560be 100644
--- a/tensorflow/python/ops/linalg_ops.py
+++ b/tensorflow/python/ops/linalg_ops.py
@@ -401,7 +401,7 @@ def svd(tensor, full_matrices=False, compute_uv=True, name=None):
import tensorflow as tf
import numpy as np
s, u, v = tf.linalg.svd(a)
- tf_a_approx = tf.matmul(u, tf.matmul(tf.linalg.diag(s), v, adjoint_v=True))
+ tf_a_approx = tf.matmul(u, tf.matmul(tf.linalg.diag(s), v, adjoint_b=True))
u, s, v_adj = np.linalg.svd(a, full_matrices=False)
np_a_approx = np.dot(u, np.dot(np.diag(s), v_adj))
# tf_a_approx and np_a_approx should be numerically close.
diff --git a/tensorflow/python/ops/logging_ops.py b/tensorflow/python/ops/logging_ops.py
index 8276047cb6..df41933f8a 100644
--- a/tensorflow/python/ops/logging_ops.py
+++ b/tensorflow/python/ops/logging_ops.py
@@ -35,9 +35,12 @@ from tensorflow.python.util.tf_export import tf_export
# Assert and Print are special symbols in python, so we must
-# have an upper-case version of them. For users with Python 3 or Python 2.7
-# with `from __future__ import print_function`, we also allow lowercase.
-@tf_export("Print", "print")
+# have an upper-case version of them.
+#
+# For users with Python 3 or Python 2.7
+# with `from __future__ import print_function`, we could also allow lowercase.
+# See https://github.com/tensorflow/tensorflow/issues/18053
+@tf_export("Print")
def Print(input_, data, message=None, first_n=None, summarize=None,
name=None):
"""Prints a list of tensors.
diff --git a/tensorflow/python/ops/losses/losses_impl.py b/tensorflow/python/ops/losses/losses_impl.py
index 9ba91772f5..66633c8b12 100644
--- a/tensorflow/python/ops/losses/losses_impl.py
+++ b/tensorflow/python/ops/losses/losses_impl.py
@@ -878,7 +878,8 @@ def sparse_softmax_cross_entropy(
exception when this op is run on CPU, and return `NaN` for corresponding
loss and gradient rows on GPU.
logits: Unscaled log probabilities of shape
- `[d_0, d_1, ..., d_{r-1}, num_classes]` and dtype `float32` or `float64`.
+ `[d_0, d_1, ..., d_{r-1}, num_classes]` and dtype `float16`, `float32` or
+ `float64`.
weights: Coefficients for the loss. This must be scalar or broadcastable to
`labels` (i.e. same rank and each dimension is either 1 or the same).
scope: the scope for the operations performed in computing the loss.
diff --git a/tensorflow/python/ops/math_ops.py b/tensorflow/python/ops/math_ops.py
index cdb6dc8f22..c28dca5137 100644
--- a/tensorflow/python/ops/math_ops.py
+++ b/tensorflow/python/ops/math_ops.py
@@ -37,11 +37,11 @@ from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import gen_nn_ops
from tensorflow.python.ops import gen_sparse_ops
from tensorflow.python.ops import gen_spectral_ops
-from tensorflow.python.platform import tf_logging as logging
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.python.ops.gen_math_ops import *
# pylint: enable=wildcard-import
+from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import compat
from tensorflow.python.util import deprecation
from tensorflow.python.util import nest
@@ -651,6 +651,9 @@ def cast(x, dtype, name=None):
TypeError: If `x` cannot be cast to the `dtype`.
"""
base_type = dtypes.as_dtype(dtype).base_dtype
+ if isinstance(x,
+ (ops.Tensor, _resource_variable_type)) and base_type == x.dtype:
+ return x
with ops.name_scope(name, "Cast", [x]) as name:
if isinstance(x, sparse_tensor.SparseTensor):
values_cast = cast(x.values, base_type, name=name)
@@ -1222,8 +1225,9 @@ def _ReductionDims(x, axis, reduction_indices):
return axis
else:
# Fast path: avoid creating Rank and Range ops if ndims is known.
- if isinstance(x, ops.Tensor) and x._rank() is not None: # pylint: disable=protected-access
- return constant_op.constant(np.arange(x._rank()), dtype=dtypes.int32) # pylint: disable=protected-access
+ rank = common_shapes.rank(x)
+ if rank is not None:
+ return constant_op.constant(np.arange(rank), dtype=dtypes.int32)
if (isinstance(x, sparse_tensor.SparseTensor) and
x.dense_shape.get_shape().is_fully_defined()):
rank = x.dense_shape.get_shape()[0].value # sparse.dense_shape is 1-D.
@@ -1234,8 +1238,8 @@ def _ReductionDims(x, axis, reduction_indices):
def _may_reduce_to_scalar(keepdims, axis, reduction_indices, output):
- """Set a reduction's output's shape to be a scalar if we are certain."""
- if (not output.shape.is_fully_defined()) and (not keepdims) and (
+ """Set a reduction's output shape to be a scalar if we are certain."""
+ if not common_shapes.has_fully_defined_shape(output) and (not keepdims) and (
axis is None) and (reduction_indices is None):
output.set_shape(())
return output
diff --git a/tensorflow/python/ops/math_ops_test.py b/tensorflow/python/ops/math_ops_test.py
index 45e3bd65d2..6b709e5e7f 100644
--- a/tensorflow/python/ops/math_ops_test.py
+++ b/tensorflow/python/ops/math_ops_test.py
@@ -237,8 +237,8 @@ class ApproximateEqualTest(test_util.TensorFlowTestCase):
def testApproximateEqualShape(self):
for dtype in [np.float32, np.double]:
- x = np.array([1, 2], dtype=np.float32)
- y = np.array([[1, 2]], dtype=np.float32)
+ x = np.array([1, 2], dtype=dtype)
+ y = np.array([[1, 2]], dtype=dtype)
# The inputs 'x' and 'y' must have the same shape.
with self.assertRaisesRegexp(
ValueError, "Shapes must be equal rank, but are 1 and 2"):
diff --git a/tensorflow/python/ops/metrics_impl.py b/tensorflow/python/ops/metrics_impl.py
index 5eab12c41d..3aedeb6acd 100644
--- a/tensorflow/python/ops/metrics_impl.py
+++ b/tensorflow/python/ops/metrics_impl.py
@@ -73,15 +73,16 @@ def metric_variable(shape, dtype, validate_shape=True, name=None):
A (non-trainable) variable initialized to zero, or if inside a
`DistributionStrategy` scope a tower-local variable container.
"""
- with distribute_lib.get_tower_context().tower_local_var_scope('sum'):
- # Note that "tower local" implies trainable=False.
- return variable_scope.variable(
- lambda: array_ops.zeros(shape, dtype),
- collections=[
- ops.GraphKeys.LOCAL_VARIABLES, ops.GraphKeys.METRIC_VARIABLES
- ],
- validate_shape=validate_shape,
- name=name)
+ # Note that synchronization "ON_READ" implies trainable=False.
+ return variable_scope.variable(
+ lambda: array_ops.zeros(shape, dtype),
+ collections=[
+ ops.GraphKeys.LOCAL_VARIABLES, ops.GraphKeys.METRIC_VARIABLES
+ ],
+ validate_shape=validate_shape,
+ synchronization=variable_scope.VariableSynchronization.ON_READ,
+ aggregation=variable_scope.VariableAggregation.SUM,
+ name=name)
def _remove_squeezable_dimensions(predictions, labels, weights):
diff --git a/tensorflow/python/ops/nn_ops.py b/tensorflow/python/ops/nn_ops.py
index 0c2f5b06c4..41d54a6c2f 100644
--- a/tensorflow/python/ops/nn_ops.py
+++ b/tensorflow/python/ops/nn_ops.py
@@ -2009,7 +2009,8 @@ def sparse_softmax_cross_entropy_with_logits(
exception when this op is run on CPU, and return `NaN` for corresponding
loss and gradient rows on GPU.
logits: Unscaled log probabilities of shape
- `[d_0, d_1, ..., d_{r-1}, num_classes]` and dtype `float32` or `float64`.
+ `[d_0, d_1, ..., d_{r-1}, num_classes]` and dtype `float16`, `float32`, or
+ `float64`.
name: A name for the operation (optional).
Returns:
@@ -2166,7 +2167,7 @@ def _calc_conv_flops(graph, node):
filter_height = int(filter_shape[0])
filter_width = int(filter_shape[1])
filter_in_depth = int(filter_shape[2])
- output_count = np.prod(output_shape.as_list())
+ output_count = np.prod(output_shape.as_list(), dtype=np.int64)
return ops.OpStats(
"flops",
(output_count * filter_in_depth * filter_height * filter_width * 2))
@@ -2184,7 +2185,7 @@ def _calc_depthwise_conv_flops(graph, node):
output_shape.assert_is_fully_defined()
filter_height = int(filter_shape[0])
filter_width = int(filter_shape[1])
- output_count = np.prod(output_shape.as_list())
+ output_count = np.prod(output_shape.as_list(), dtype=np.int64)
return ops.OpStats("flops", (output_count * filter_height * filter_width * 2))
@@ -2594,7 +2595,7 @@ def _calc_dilation2d_flops(graph, node):
output_shape.assert_is_fully_defined()
filter_height = int(filter_shape[0])
filter_width = int(filter_shape[1])
- output_count = np.prod(output_shape.as_list())
+ output_count = np.prod(output_shape.as_list(), dtype=np.int64)
return ops.OpStats("flops", (output_count * filter_height * filter_width * 2))
diff --git a/tensorflow/python/ops/parallel_for/BUILD b/tensorflow/python/ops/parallel_for/BUILD
new file mode 100644
index 0000000000..6c804a50e7
--- /dev/null
+++ b/tensorflow/python/ops/parallel_for/BUILD
@@ -0,0 +1,128 @@
+package(
+ default_visibility = [
+ "//tensorflow:internal",
+ ],
+)
+
+load("//tensorflow:tensorflow.bzl", "cuda_py_test")
+
+licenses(["notice"]) # Apache 2.0
+
+py_library(
+ name = "parallel_for",
+ srcs = [
+ "__init__.py",
+ "control_flow_ops.py",
+ "gradients.py",
+ "pfor.py",
+ ],
+ srcs_version = "PY2AND3",
+ deps = [
+ ":control_flow_ops",
+ ":gradients",
+ "//tensorflow/python:array_ops",
+ "//tensorflow/python:check_ops",
+ "//tensorflow/python:constant_op",
+ "//tensorflow/python:control_flow_ops",
+ "//tensorflow/python:data_flow_ops",
+ "//tensorflow/python:dtypes",
+ "//tensorflow/python:framework_ops",
+ "//tensorflow/python:functional_ops",
+ "//tensorflow/python:gradients",
+ "//tensorflow/python:math_ops",
+ "//tensorflow/python:nn_ops",
+ "//tensorflow/python:platform",
+ "//tensorflow/python:sparse_ops",
+ "//tensorflow/python:sparse_tensor",
+ "//tensorflow/python:tensor_array_ops",
+ "//tensorflow/python:tensor_shape",
+ "//tensorflow/python:tensor_util",
+ "//tensorflow/python:util",
+ "@absl_py//absl/flags",
+ ],
+)
+
+py_library(
+ name = "pfor_lib",
+ srcs = ["pfor.py"],
+ srcs_version = "PY2AND3",
+ deps = [
+ "//tensorflow/python:array_ops",
+ "//tensorflow/python:check_ops",
+ "//tensorflow/python:constant_op",
+ "//tensorflow/python:control_flow_ops",
+ "//tensorflow/python:data_flow_ops",
+ "//tensorflow/python:dtypes",
+ "//tensorflow/python:framework_ops",
+ "//tensorflow/python:functional_ops",
+ "//tensorflow/python:math_ops",
+ "//tensorflow/python:nn_ops",
+ "//tensorflow/python:platform",
+ "//tensorflow/python:sparse_ops",
+ "//tensorflow/python:sparse_tensor",
+ "//tensorflow/python:tensor_array_ops",
+ "//tensorflow/python:tensor_shape",
+ "//tensorflow/python:tensor_util",
+ "@absl_py//absl/flags",
+ ],
+)
+
+py_library(
+ name = "control_flow_ops",
+ srcs = ["control_flow_ops.py"],
+ srcs_version = "PY2AND3",
+ visibility = ["//visibility:public"],
+ deps = [
+ ":pfor_lib",
+ "//tensorflow/python:array_ops",
+ "//tensorflow/python:control_flow_ops",
+ "//tensorflow/python:dtypes",
+ "//tensorflow/python:framework_ops",
+ "//tensorflow/python:tensor_array_ops",
+ "//tensorflow/python:util",
+ ],
+)
+
+cuda_py_test(
+ name = "control_flow_ops_test",
+ srcs = ["control_flow_ops_test.py"],
+ additional_deps = [
+ ":control_flow_ops",
+ "//tensorflow/core:protos_all_py",
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:gradients",
+ "//tensorflow/python:logging_ops",
+ "//tensorflow/python:parsing_ops",
+ "//tensorflow/python:session",
+ "//tensorflow/python:tensor_array_grad",
+ "//tensorflow/python:random_ops",
+ "//tensorflow/python:util",
+ ],
+)
+
+py_library(
+ name = "gradients",
+ srcs = ["gradients.py"],
+ srcs_version = "PY2AND3",
+ deps = [
+ ":control_flow_ops",
+ "//tensorflow/python:array_ops",
+ "//tensorflow/python:gradients",
+ "//tensorflow/python:util",
+ ],
+)
+
+cuda_py_test(
+ name = "gradients_test",
+ size = "large",
+ srcs = ["gradients_test.py"],
+ additional_deps = [
+ ":control_flow_ops",
+ ":gradients",
+ "//third_party/py/numpy",
+ "//tensorflow/python:layers",
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:random_ops",
+ "//tensorflow/python/ops/losses",
+ ],
+)
diff --git a/tensorflow/python/ops/parallel_for/__init__.py b/tensorflow/python/ops/parallel_for/__init__.py
new file mode 100644
index 0000000000..b49d865968
--- /dev/null
+++ b/tensorflow/python/ops/parallel_for/__init__.py
@@ -0,0 +1,35 @@
+# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""Ops for pfor, for_loop, jacobian."""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+from tensorflow.python.ops.parallel_for import * # pylint: disable=wildcard-import
+from tensorflow.python.ops.parallel_for.control_flow_ops import for_loop
+from tensorflow.python.ops.parallel_for.control_flow_ops import pfor
+from tensorflow.python.ops.parallel_for.gradients import batch_jacobian
+from tensorflow.python.ops.parallel_for.gradients import jacobian
+from tensorflow.python.util.all_util import remove_undocumented
+
+_allowed_symbols = [
+ 'pfor',
+ 'for_loop',
+ 'jacobian',
+ 'batch_jacobian',
+]
+
+remove_undocumented(__name__, _allowed_symbols)
diff --git a/tensorflow/python/ops/parallel_for/control_flow_ops.py b/tensorflow/python/ops/parallel_for/control_flow_ops.py
new file mode 100644
index 0000000000..ccf2eb8214
--- /dev/null
+++ b/tensorflow/python/ops/parallel_for/control_flow_ops.py
@@ -0,0 +1,123 @@
+# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""for_loop and pfor ops."""
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+from tensorflow.python.framework import dtypes
+from tensorflow.python.framework import ops
+from tensorflow.python.ops import array_ops
+from tensorflow.python.ops import control_flow_ops
+from tensorflow.python.ops import tensor_array_ops
+from tensorflow.python.ops.parallel_for.pfor import PFor
+from tensorflow.python.util import nest
+
+
+def for_loop(loop_fn, loop_fn_dtypes, iters):
+ """Runs `loop_fn` `iters` times and stacks the outputs.
+
+
+ Runs `loop_fn` `iters` times, with input values from 0 to `iters - 1`, and
+ stacks corresponding outputs of the different runs.
+
+ Args:
+ loop_fn: A function that takes an int32 scalar tf.Tensor object representing
+ the iteration number, and returns a possibly nested structure of tensor
+ objects. The shape of these outputs should not depend on the input.
+ loop_fn_dtypes: dtypes for the outputs of loop_fn.
+ iters: Number of iterations for which to run loop_fn.
+
+ Returns:
+ Returns a nested structure of stacked output tensor objects with the same
+ nested structure as the output of `loop_fn`.
+ """
+
+ flat_loop_fn_dtypes = nest.flatten(loop_fn_dtypes)
+
+ def while_body(i, *ta_list):
+ """Body of while loop."""
+ fn_output = nest.flatten(loop_fn(i))
+ if len(fn_output) != len(flat_loop_fn_dtypes):
+ raise ValueError(
+ "Number of expected outputs, %d, does not match the number of "
+ "actual outputs, %d, from loop_fn" % (len(flat_loop_fn_dtypes),
+ len(fn_output)))
+ outputs = []
+ for out, ta in zip(fn_output, ta_list):
+ # TODO(agarwal): support returning Operation objects from loop_fn.
+ assert isinstance(out, ops.Tensor)
+ outputs.append(ta.write(i, array_ops.expand_dims(out, 0)))
+ return tuple([i + 1] + outputs)
+
+ ta_list = control_flow_ops.while_loop(
+ lambda i, *ta: i < iters, while_body, [0] + [
+ tensor_array_ops.TensorArray(dtype, iters)
+ for dtype in flat_loop_fn_dtypes
+ ])[1:]
+
+ # TODO(rachelim): enable this for sparse tensors
+ return nest.pack_sequence_as(loop_fn_dtypes, [ta.concat() for ta in ta_list])
+
+
+def pfor(loop_fn, iters):
+ """Equivalent to running `loop_fn` `iters` times and stacking the outputs.
+
+ `pfor` has functionality similar to `for_loop`, i.e. running `loop_fn` `iters`
+ times, with input from 0 to `iters - 1`, and stacking corresponding output of
+ each iteration. However the implementation does not use a tf.while_loop.
+ Instead it adds new operations to the graph that collectively compute the same
+ value as what running `loop_fn` in a loop would compute.
+
+
+ This is an experimental feature and currently has a lot of limitations:
+ - There should be no data depenendency between the different iterations. For
+ example, a future iteration should not depend on a value or side-effect of
+ a previous iteration.
+ - Stateful kernels may mostly not be supported since these often imply a
+ data dependency or ordering of the iterations. We do support a limited set
+ of such stateful kernels though (like RandomFoo, Variable operations like
+ reads, etc).
+ - Conversion works only on a limited set of kernels for which a converter
+ has been registered.
+ - loop_fn cannot currently contain control flow operations like
+ tf.while_loop or tf.cond.
+ - `loop_fn` should return nested structure of Tensors or Operations. However
+ if an Operation is returned, it should have zero outputs.
+ - The shape and dtype of `loop_fn` outputs should not depend on the input
+ to loop_fn.
+
+ Args:
+ loop_fn: A function that takes an int32 scalar tf.Tensor object representing
+ the iteration number, and returns a possibly nested structure of Tensor or
+ Operation objects.
+ iters: Number of iterations for which to run loop_fn.
+
+ Returns:
+ Returns a nested structure of stacked tensor objects with the same nested
+ structure as the output of `loop_fn`.
+ """
+ existing_ops = set(ops.get_default_graph().get_operations())
+ with ops.name_scope("loop_body"):
+ loop_var = array_ops.placeholder(dtypes.int32, shape=[])
+ loop_fn_outputs = loop_fn(loop_var)
+ new_ops = set(ops.get_default_graph().get_operations()) - existing_ops
+ iters = ops.convert_to_tensor(iters)
+ with ops.name_scope("pfor"):
+ converter = PFor(loop_var, iters, new_ops)
+ outputs = []
+ for loop_fn_output in nest.flatten(loop_fn_outputs):
+ outputs.append(converter.convert(loop_fn_output))
+ return nest.pack_sequence_as(loop_fn_outputs, outputs)
diff --git a/tensorflow/python/ops/parallel_for/control_flow_ops_test.py b/tensorflow/python/ops/parallel_for/control_flow_ops_test.py
new file mode 100644
index 0000000000..c0e66cb0b8
--- /dev/null
+++ b/tensorflow/python/ops/parallel_for/control_flow_ops_test.py
@@ -0,0 +1,1404 @@
+# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""Tests for pfor and for_loop."""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import time
+
+from absl import flags
+import numpy as np
+
+from tensorflow.core.example import example_pb2
+from tensorflow.core.example import feature_pb2
+from tensorflow.python.client import session
+from tensorflow.python.framework import constant_op
+from tensorflow.python.framework import dtypes
+from tensorflow.python.framework import ops
+from tensorflow.python.framework import sparse_tensor
+from tensorflow.python.ops import array_ops
+from tensorflow.python.ops import control_flow_ops
+from tensorflow.python.ops import data_flow_ops
+from tensorflow.python.ops import gradients as gradient_ops
+from tensorflow.python.ops import logging_ops
+from tensorflow.python.ops import math_ops
+from tensorflow.python.ops import nn
+from tensorflow.python.ops import parsing_ops
+from tensorflow.python.ops import random_ops
+from tensorflow.python.ops import rnn
+from tensorflow.python.ops import rnn_cell
+from tensorflow.python.ops import tensor_array_grad # pylint: disable=unused-import
+from tensorflow.python.ops import tensor_array_ops
+from tensorflow.python.ops import variables
+from tensorflow.python.ops.parallel_for import control_flow_ops as pfor_control_flow_ops
+from tensorflow.python.platform import test
+from tensorflow.python.util import nest
+
+
+class PForTest(test.TestCase):
+
+ def _run_targets(self, targets1, targets2=None, run_init=True):
+ targets1 = nest.flatten(targets1)
+ targets2 = ([] if targets2 is None else nest.flatten(targets2))
+ assert len(targets1) == len(targets2) or not targets2
+ if run_init:
+ init = variables.global_variables_initializer()
+ self.evaluate(init)
+ return self.evaluate(targets1 + targets2)
+
+ def run_and_assert_equal(self, targets1, targets2):
+ outputs = self._run_targets(targets1, targets2)
+ outputs = nest.flatten(outputs) # flatten SparseTensorValues
+ n = len(outputs) // 2
+ for i in range(n):
+ if outputs[i + n].dtype != np.object:
+ self.assertAllClose(outputs[i + n], outputs[i], rtol=1e-4, atol=1e-5)
+ else:
+ self.assertAllEqual(outputs[i + n], outputs[i])
+
+ def _test_loop_fn(self, loop_fn, iters, loop_fn_dtypes=dtypes.float32):
+ t1 = pfor_control_flow_ops.pfor(loop_fn, iters=iters)
+ t2 = pfor_control_flow_ops.for_loop(loop_fn, loop_fn_dtypes, iters=iters)
+ self.run_and_assert_equal(t1, t2)
+
+ def test_op_conversion_fallback_to_while_loop(self):
+ # Note that we used top_k op for this test. If a converter gets defined for
+ # it, we will need to find another op for which a converter has not been
+ # defined.
+ x = random_ops.random_uniform([3, 2, 4])
+
+ def loop_fn(i):
+ x_i = array_ops.gather(x, i)
+ return nn.top_k(x_i)
+
+ with self.assertRaisesRegexp(ValueError, "No converter defined"):
+ self._test_loop_fn(
+ loop_fn, 3, loop_fn_dtypes=[dtypes.float32, dtypes.int32])
+ flags.FLAGS.op_conversion_fallback_to_while_loop = True
+ self._test_loop_fn(
+ loop_fn, 3, loop_fn_dtypes=[dtypes.float32, dtypes.int32])
+ flags.FLAGS.op_conversion_fallback_to_while_loop = False
+
+
+class ArrayTest(PForTest):
+
+ def test_gather(self):
+ x = random_ops.random_uniform([3, 3, 3])
+
+ def loop_fn(i):
+ outputs = []
+ x_i = array_ops.gather(x, i)
+ for y in [x, x_i]:
+ axes = [0, 2, -1] if y == x else [0]
+ for axis in axes:
+ outputs.append(array_ops.gather(y, 2, axis=axis))
+ outputs.append(array_ops.gather(y, i, axis=axis))
+ outputs.append(array_ops.gather(y, [i], axis=axis))
+ outputs.append(array_ops.gather(y, [i, 2], axis=axis))
+ outputs.append(array_ops.gather(y, [[2, i], [i, 1]], axis=axis))
+ return outputs
+
+ self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.float32] * 20)
+
+ def test_shape(self):
+ x = random_ops.random_uniform([3, 2, 3])
+
+ def loop_fn(i):
+ x_i = array_ops.gather(x, i)
+ return array_ops.shape(x_i), array_ops.shape(x_i, out_type=dtypes.int64)
+
+ self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.int32, dtypes.int64])
+
+ def test_size(self):
+ x = random_ops.random_uniform([3, 2, 3])
+
+ def loop_fn(i):
+ x_i = array_ops.gather(x, i)
+ return array_ops.size(x_i), array_ops.size(x_i, out_type=dtypes.int64)
+
+ self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.int32, dtypes.int64])
+
+ def test_rank(self):
+ x = random_ops.random_uniform([3, 2, 3])
+
+ def loop_fn(i):
+ x_i = array_ops.gather(x, i)
+ return array_ops.rank(x_i)
+
+ self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.int32])
+
+ def test_shape_n(self):
+ x = random_ops.random_uniform([3, 2, 3])
+ y = random_ops.random_uniform([3])
+
+ def loop_fn(i):
+ x_i = array_ops.gather(x, i)
+ y_i = array_ops.gather(y, i)
+ return array_ops.shape_n([x_i, x, y, y_i]), array_ops.shape_n(
+ [x_i, x, y, y_i], out_type=dtypes.int64)
+
+ self._test_loop_fn(
+ loop_fn, 3, loop_fn_dtypes=[dtypes.int32] * 4 + [dtypes.int64] * 4)
+
+ def test_reshape(self):
+ x = random_ops.random_uniform([3, 2, 3])
+
+ def loop_fn(i):
+ x1 = array_ops.gather(x, i)
+ return array_ops.reshape(x1, [-1]), array_ops.reshape(x1, [1, 3, 1, -1])
+
+ self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.float32] * 2)
+
+ def test_expand_dims(self):
+ x = random_ops.random_uniform([3, 2, 3])
+
+ def loop_fn(i):
+ x1 = array_ops.gather(x, i)
+ return array_ops.expand_dims(
+ x1, axis=-1), array_ops.expand_dims(
+ x1, axis=1)
+
+ self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.float32] * 2)
+
+ def test_slice(self):
+ x = random_ops.random_uniform([3, 2, 3])
+
+ def loop_fn(i):
+ x1 = array_ops.gather(x, i)
+ return array_ops.slice(x1, begin=(0, 1), size=(2, 1))
+
+ self._test_loop_fn(loop_fn, 3)
+
+ def test_tile(self):
+ x = random_ops.random_uniform([3, 2, 3])
+
+ def loop_fn(i):
+ x1 = array_ops.gather(x, i)
+ return array_ops.tile(x1, [2, 1])
+
+ self._test_loop_fn(loop_fn, 3)
+
+ def test_tile_loop_dependent(self):
+ x = random_ops.random_uniform([3, 2, 3])
+
+ def loop_fn(i):
+ x1 = array_ops.gather(x, i)
+ return array_ops.tile(x1, [i, 1])
+
+ with self.assertRaisesRegexp(ValueError, "expected to be loop invariant"):
+ pfor_control_flow_ops.pfor(loop_fn, 2)
+
+ def test_pack(self):
+ x = random_ops.random_uniform([3, 2, 3])
+ y = random_ops.random_uniform([2, 3])
+
+ def loop_fn(i):
+ x1 = array_ops.gather(x, i)
+ return array_ops.stack([x1, y], axis=-1)
+
+ self._test_loop_fn(loop_fn, 1)
+
+ def test_unpack(self):
+ x = random_ops.random_uniform([3, 2, 3, 4])
+
+ def loop_fn(i):
+ x_i = array_ops.gather(x, i)
+ return array_ops.unstack(
+ x_i, 4, axis=-1), array_ops.unstack(
+ x_i, 3, axis=1)
+
+ self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.float32] * 7)
+
+ def test_pad(self):
+ x = random_ops.random_uniform([3, 2, 3])
+ padding = constant_op.constant([[1, 2], [3, 4]])
+
+ def loop_fn(i):
+ x1 = array_ops.gather(x, i)
+ return array_ops.pad(x1, padding, mode="CONSTANT")
+
+ self._test_loop_fn(loop_fn, 3)
+
+ def test_split(self):
+ x = random_ops.random_uniform([3, 2, 3])
+
+ def loop_fn(i):
+ x1 = array_ops.gather(x, i)
+ return array_ops.split(x1, 2, axis=0), array_ops.split(x1, 3, axis=-1)
+
+ self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.float32] * 5)
+
+ def test_transpose(self):
+ x = random_ops.random_uniform([3, 2, 3, 4])
+
+ def loop_fn(i):
+ x1 = array_ops.gather(x, i)
+ return array_ops.transpose(x1, [2, 1, 0])
+
+ self._test_loop_fn(loop_fn, 3)
+
+ def test_zeros_like(self):
+ x = random_ops.random_uniform([3, 2, 3])
+
+ def loop_fn(i):
+ x1 = array_ops.gather(x, i)
+ z = array_ops.zeros_like(x1),
+ return z, z + x1
+
+ self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.float32] * 2)
+
+ def test_concat_v2(self):
+ x = random_ops.random_uniform([3, 2, 3])
+ y = random_ops.random_uniform([2, 3])
+
+ def loop_fn(i):
+ x1 = array_ops.gather(x, i)
+ return array_ops.concat(
+ [x1, x1, y], axis=0), array_ops.concat(
+ [x1, x1, y], axis=-1)
+
+ self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.float32] * 2)
+
+ def test_unary_cwise_ops(self):
+ for op in [array_ops.identity, array_ops.stop_gradient]:
+ x = random_ops.random_uniform([3, 5])
+
+ # pylint: disable=cell-var-from-loop
+ def loop_fn(i):
+ x1 = array_ops.gather(x, i)
+ y = op(x1) + x1
+ loss = nn.l2_loss(y)
+ return op(x), y, gradient_ops.gradients(loss, x1)
+
+ # pylint: enable=cell-var-from-loop
+
+ self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.float32] * 3)
+
+ def test_strided_slice(self):
+ x = random_ops.random_uniform([3, 3, 4, 4, 2, 2, 2])
+
+ def loop_fn(i):
+ x_i = array_ops.gather(x, i)
+ y = x_i[:2, ::2, 1::3, ..., array_ops.newaxis, 1]
+ loss = nn.l2_loss(y)
+ return y, gradient_ops.gradients(loss, x_i)
+
+ self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.float32] * 2)
+
+
+class MathTest(PForTest):
+
+ def test_unary_cwise_ops(self):
+ for op in [
+ math_ops.tanh, nn.relu, math_ops.sigmoid, math_ops.negative,
+ math_ops.square
+ ]:
+ x = random_ops.random_uniform([3, 5])
+
+ # pylint: disable=cell-var-from-loop
+ def loop_fn(i):
+ x1 = array_ops.gather(x, i)
+ y = op(x1)
+ loss = math_ops.reduce_sum(y * y)
+ return op(x), y, gradient_ops.gradients(loss, x1)
+
+ # pylint: enable=cell-var-from-loop
+
+ self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.float32] * 3)
+
+ def test_unary_cwise_no_grad(self):
+ for op in [math_ops.ceil, math_ops.floor, math_ops.logical_not]:
+ x = random_ops.random_uniform([3, 5])
+ if op == math_ops.logical_not:
+ x = x > 0
+
+ # pylint: disable=cell-var-from-loop
+ def loop_fn(i):
+ return op(array_ops.gather(x, i))
+
+ # pylint: enable=cell-var-from-loop
+
+ self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=x.dtype)
+
+ def test_binary_cwise_ops(self):
+ logical_ops = [
+ math_ops.logical_and, math_ops.logical_or, math_ops.logical_xor
+ ]
+ bool_ops = [
+ math_ops.less, math_ops.less_equal, math_ops.greater,
+ math_ops.greater_equal, math_ops.equal, math_ops.not_equal
+ ]
+ float_ops = [
+ math_ops.add, math_ops.subtract, math_ops.multiply, math_ops.divide,
+ math_ops.maximum, math_ops.minimum
+ ]
+ for op in logical_ops + bool_ops + float_ops:
+ x = random_ops.random_uniform([7, 3, 5])
+ y = random_ops.random_uniform([3, 5])
+ if op in logical_ops:
+ x = x > 0
+ y = y > 0
+
+ # pylint: disable=cell-var-from-loop
+ def loop_fn(i):
+ x1 = array_ops.gather(x, i)
+ y1 = array_ops.gather(y, i)
+ return op(x, y), op(x1, y), op(x, y1), op(x1, y1), op(x1, x1)
+
+ # pylint: enable=cell-var-from-loop
+
+ dtype = dtypes.float32 if op in float_ops else dtypes.bool
+ self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtype] * 5)
+
+ def test_addn(self):
+ x = random_ops.random_uniform([2, 3, 5])
+ y = random_ops.random_uniform([3, 5])
+ z = random_ops.random_uniform([3, 5])
+
+ def loop_fn(i):
+ x1 = array_ops.gather(x, i)
+ return math_ops.add_n([x1, y, z])
+
+ self._test_loop_fn(loop_fn, 2)
+
+ def test_matmul(self):
+ for tr_a in (True, False):
+ for tr_b in (True, False):
+ for stack_a in (True, False):
+ for stack_b in (True, False):
+ shape_a = (5, 3) if tr_a else (3, 5)
+ if stack_a:
+ shape_a = (2,) + shape_a
+ shape_b = (7, 5) if tr_b else (5, 7)
+ if stack_b:
+ shape_b = (2,) + shape_b
+
+ x = random_ops.random_uniform(shape_a)
+ y = random_ops.random_uniform(shape_b)
+
+ # pylint: disable=cell-var-from-loop
+ def loop_fn(i):
+ a = array_ops.gather(x, i) if stack_a else x
+ b = array_ops.gather(y, i) if stack_b else y
+ return math_ops.matmul(a, b, transpose_a=tr_a, transpose_b=tr_b)
+
+ # pylint: enable=cell-var-from-loop
+
+ self._test_loop_fn(loop_fn, 2)
+
+ def test_batch_matmul(self):
+ for tr_a in (True, False):
+ for tr_b in (True, False):
+ for stack_a in (True, False):
+ for stack_b in (True, False):
+ shape_a = (4, 5, 3) if tr_a else (4, 3, 5)
+ if stack_a:
+ shape_a = (2,) + shape_a
+ shape_b = (4, 7, 5) if tr_b else (4, 5, 7)
+ if stack_b:
+ shape_b = (2,) + shape_b
+
+ x = random_ops.random_uniform(shape_a)
+ y = random_ops.random_uniform(shape_b)
+
+ # pylint: disable=cell-var-from-loop
+ def loop_fn(i):
+ a = array_ops.gather(x, i) if stack_a else x
+ b = array_ops.gather(y, i) if stack_b else y
+ return math_ops.matmul(a, b, transpose_a=tr_a, transpose_b=tr_b)
+
+ # pylint: enable=cell-var-from-loop
+
+ self._test_loop_fn(loop_fn, 2)
+
+ def test_reduction(self):
+ x = random_ops.random_uniform([2, 3, 4, 5])
+ for op in [
+ math_ops.reduce_sum, math_ops.reduce_prod, math_ops.reduce_max,
+ math_ops.reduce_min
+ ]:
+ for axis in ([1], None, [0, 2]):
+ for keepdims in (True, False):
+
+ # pylint: disable=cell-var-from-loop
+ def loop_fn(i):
+ a = array_ops.gather(x, i)
+ return op(a, axis=axis, keepdims=keepdims)
+
+ # pylint: enable=cell-var-from-loop
+
+ self._test_loop_fn(loop_fn, 2)
+
+ def test_cum_sum(self):
+ x = random_ops.random_uniform([2, 3, 4, 5])
+ for axis in (1, -2):
+ for exclusive in (True, False):
+ for reverse in (True, False):
+
+ # pylint: disable=cell-var-from-loop
+ def loop_fn(i):
+ a = array_ops.gather(x, i)
+ return math_ops.cumsum(
+ a, axis=axis, exclusive=exclusive, reverse=reverse)
+
+ # pylint: enable=cell-var-from-loop
+
+ self._test_loop_fn(loop_fn, 2)
+
+ def test_cum_prod(self):
+ x = random_ops.random_uniform([2, 3, 4, 5])
+ for axis in (1, -2):
+ for exclusive in (True, False):
+ for reverse in (True, False):
+
+ # pylint: disable=cell-var-from-loop
+ def loop_fn(i):
+ a = array_ops.gather(x, i)
+ return math_ops.cumprod(
+ a, axis=axis, exclusive=exclusive, reverse=reverse)
+
+ # pylint: enable=cell-var-from-loop
+
+ self._test_loop_fn(loop_fn, 2)
+
+ def test_bias_add(self):
+ x_shape = [2, 3, 4, 5, 6]
+ x = random_ops.random_uniform(x_shape)
+ for data_format in ("NCHW", "NHWC"):
+ bias_dim = 2 if data_format == "NCHW" else -1
+ bias_shape = x_shape[bias_dim]
+ bias = random_ops.random_uniform([bias_shape])
+
+ # pylint: disable=cell-var-from-loop
+ def loop_fn(i):
+ a = array_ops.gather(x, i)
+ y = nn.bias_add(a, bias, data_format=data_format)
+ loss = math_ops.reduce_sum(y * y)
+ return y, gradient_ops.gradients(loss, bias)
+
+ # pylint: enable=cell-var-from-loop
+
+ self._test_loop_fn(
+ loop_fn, 2, loop_fn_dtypes=[dtypes.float32, dtypes.float32])
+
+ def test_unsorted_segment_sum(self):
+ t = random_ops.random_uniform([3, 3, 2])
+ segment_ids = constant_op.constant([[0, 0, 2], [0, 1, 2], [2, 2, 2]])
+ num_segments = 3
+
+ def loop_fn(i):
+ data = array_ops.gather(t, i)
+ data_0 = array_ops.gather(t, 0)
+ seg_ids = array_ops.gather(segment_ids, i)
+ return (math_ops.unsorted_segment_sum(data, seg_ids, num_segments),
+ math_ops.unsorted_segment_sum(data_0, seg_ids, num_segments))
+
+ self._test_loop_fn(loop_fn, 3, [dtypes.float32] * 2)
+
+ def test_cast(self):
+ x = constant_op.constant([[1], [2]])
+ y = constant_op.constant([[1.0], [2.0]])
+
+ def loop_fn(i):
+ return (math_ops.cast(array_ops.gather(x, i), dtypes.float32),
+ math_ops.cast(array_ops.gather(y, i), dtypes.int32))
+
+ self._test_loop_fn(
+ loop_fn, 2, loop_fn_dtypes=[dtypes.float32, dtypes.int32])
+
+ def test_tanh_axpy(self):
+ a = constant_op.constant(3.)
+ x = random_ops.random_uniform([4, 5])
+ y = random_ops.random_uniform([6, 5])
+ n = x.shape[0]
+
+ def loop_fn(i):
+ return math_ops.tanh(a * array_ops.gather(x, i) + array_ops.gather(y, i))
+
+ self._test_loop_fn(loop_fn, n)
+
+ def test_select(self):
+ cond = constant_op.constant([True, False])
+ a = random_ops.random_uniform([2, 3, 5])
+ b = random_ops.random_uniform([2, 3, 5])
+ for cond_shape in [2], [2, 3], [2, 3, 5]:
+ cond = random_ops.random_uniform(cond_shape) > 0.5
+
+ # pylint: disable=cell-var-from-loop
+ def loop_fn(i):
+ a_i = array_ops.gather(a, i)
+ b_i = array_ops.gather(b, i)
+ cond_i = array_ops.gather(cond, i)
+ return array_ops.where(cond_i, a_i, b_i)
+
+ # pylint: enable=cell-var-from-loop
+
+ self._test_loop_fn(loop_fn, 2)
+
+
+class NNTest(PForTest):
+
+ def test_conv2d(self):
+ x = random_ops.random_uniform([3, 2, 12, 12, 3])
+ filt = random_ops.random_uniform([3, 3, 3, 7])
+
+ def loop_fn(i):
+ x1 = array_ops.gather(x, i)
+ return nn.conv2d(
+ x1, filt, strides=[1, 2, 2, 1], padding="VALID", data_format="NHWC")
+
+ self._test_loop_fn(loop_fn, 3)
+
+ def test_conv2d_backprop_input(self):
+ x_shape = [2, 12, 12, 3]
+ filt = random_ops.random_uniform([3, 3, 3, 7])
+ grad = random_ops.random_uniform([3, 2, 5, 5, 7])
+
+ def loop_fn(i):
+ grad1 = array_ops.gather(grad, i)
+ return nn.conv2d_backprop_input(
+ x_shape,
+ filt,
+ grad1,
+ strides=[1, 2, 2, 1],
+ padding="VALID",
+ data_format="NHWC")
+
+ self._test_loop_fn(loop_fn, 3)
+
+ def test_conv2d_backprop_filter(self):
+ x = random_ops.random_uniform([3, 2, 12, 12, 3])
+ x_0 = array_ops.gather(x, 0)
+ filter_sizes = [3, 3, 3, 7]
+ grad = random_ops.random_uniform([3, 2, 5, 5, 7])
+
+ def loop_fn(i):
+ x_i = array_ops.gather(x, i)
+ grad_i = array_ops.gather(grad, i)
+ return [
+ nn.conv2d_backprop_filter(
+ inp,
+ filter_sizes,
+ grad_i,
+ strides=[1, 2, 2, 1],
+ padding="VALID",
+ data_format="NHWC") for inp in [x_i, x_0]
+ ]
+
+ self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.float32] * 2)
+
+ def test_avg_pool(self):
+ x = random_ops.random_uniform([3, 2, 12, 12, 3])
+ ksize = [1, 3, 3, 1]
+
+ def loop_fn(i):
+ x1 = array_ops.gather(x, i)
+ output = nn.avg_pool(
+ x1, ksize, strides=[1, 2, 2, 1], padding="VALID", data_format="NHWC")
+ loss = nn.l2_loss(output)
+ return output, gradient_ops.gradients(loss, x1)
+
+ self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.float32] * 2)
+
+ def test_max_pool(self):
+ x = random_ops.random_uniform([3, 2, 12, 12, 3])
+ ksize = [1, 3, 3, 1]
+
+ def loop_fn(i):
+ x1 = array_ops.gather(x, i)
+ output = nn.max_pool(
+ x1, ksize, strides=[1, 2, 2, 1], padding="VALID", data_format="NHWC")
+ loss = nn.l2_loss(output)
+ return output, gradient_ops.gradients(loss, x1)
+
+ self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.float32] * 2)
+
+ def test_fused_batch_norm(self):
+ data_formats = ["NHWC"]
+ if test.is_gpu_available():
+ data_formats.append("NCHW")
+ for is_training in (True, False):
+ for data_format in data_formats:
+ if data_format == "NCHW":
+ x = random_ops.random_uniform([3, 1, 2, 5, 5])
+ else:
+ x = random_ops.random_uniform([3, 1, 5, 5, 2])
+ scale = random_ops.random_uniform([2])
+ offset = random_ops.random_uniform([2])
+ mean = None if is_training else random_ops.random_uniform([2])
+ variance = None if is_training else random_ops.random_uniform([2])
+
+ # pylint: disable=cell-var-from-loop
+ def loop_fn(i):
+ x1 = array_ops.gather(x, i)
+ outputs = nn.fused_batch_norm(
+ x1,
+ scale,
+ offset,
+ mean=mean,
+ variance=variance,
+ epsilon=0.01,
+ data_format=data_format,
+ is_training=is_training)
+ outputs = list(outputs)
+ # We only test the first value of outputs when is_training is False.
+ # It looks like CPU and GPU have different outputs for batch_mean and
+ # batch_variance for this case.
+ if not is_training:
+ outputs[1] = constant_op.constant(0.)
+ outputs[2] = constant_op.constant(0.)
+ loss = nn.l2_loss(outputs[0])
+ gradients = gradient_ops.gradients(loss, [x1, scale, offset])
+ return outputs + gradients
+
+ # pylint: enable=cell-var-from-loop
+
+ self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.float32] * 6)
+
+ def test_softmax_cross_entropy_with_logits(self):
+ logits = random_ops.random_uniform([3, 2, 4])
+ labels = random_ops.random_uniform([3, 2, 4])
+ labels /= math_ops.reduce_sum(labels, axis=[2], keepdims=True)
+
+ def loop_fn(i):
+ logits_i = array_ops.gather(logits, i)
+ labels_i = array_ops.gather(labels, i)
+ loss = nn.softmax_cross_entropy_with_logits(
+ labels=labels_i, logits=logits_i)
+ return loss, gradient_ops.gradients(math_ops.reduce_sum(loss), logits_i)
+
+ self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.float32] * 2)
+
+
+class RandomTest(PForTest):
+
+ # The random values generated in the two implementations are not guaranteed to
+ # match. So we only check the returned shapes.
+ def run_and_assert_equal(self, targets1, targets2):
+ outputs = self._run_targets(targets1, targets2)
+ n = len(outputs) // 2
+ for i in range(n):
+ self.assertAllEqual(outputs[i].shape, outputs[i + n].shape)
+
+ def test_random_uniform(self):
+
+ def loop_fn(_):
+ return random_ops.random_uniform([3])
+
+ self._test_loop_fn(loop_fn, 5)
+
+ def test_random_uniform_int(self):
+
+ def loop_fn(_):
+ return random_ops.random_uniform([3], maxval=1, dtype=dtypes.int32)
+
+ self._test_loop_fn(loop_fn, 5, loop_fn_dtypes=dtypes.int32)
+
+ def test_random_standard_normal(self):
+
+ def loop_fn(_):
+ return random_ops.random_normal([3])
+
+ self._test_loop_fn(loop_fn, 5)
+
+ def test_truncated_normal(self):
+
+ def loop_fn(_):
+ return random_ops.truncated_normal([3])
+
+ self._test_loop_fn(loop_fn, 5)
+
+ def test_random_gamma(self):
+
+ def loop_fn(_):
+ return random_ops.random_gamma([3], alpha=[0.5])
+
+ self._test_loop_fn(loop_fn, 5)
+
+ def test_random_poisson_v2(self):
+
+ def loop_fn(_):
+ return random_ops.random_poisson(lam=[1.3], shape=[3])
+
+ self._test_loop_fn(loop_fn, 5)
+
+
+class LoggingTest(PForTest):
+
+ def test_print(self):
+ x = random_ops.random_uniform([3, 5])
+
+ def loop_fn(i):
+ x1 = array_ops.gather(x, i)
+ return logging_ops.Print(
+ x1, [x1, "x1", array_ops.shape(x1)], summarize=10)
+
+ self._test_loop_fn(loop_fn, 3)
+
+ def test_assert(self):
+
+ def loop_fn(i):
+ return control_flow_ops.Assert(i < 10, [i, [10], [i + 1]])
+
+ # TODO(agarwal): make this work with for_loop.
+ with session.Session() as sess:
+ sess.run(pfor_control_flow_ops.pfor(loop_fn, 3))
+
+
+class TensorArrayTest(PForTest):
+
+ def test_create_outside_and_read(self):
+
+ ta = tensor_array_ops.TensorArray(
+ dtypes.int32, 2, clear_after_read=False).write(0, 0).write(1, 1)
+
+ def loop_fn(i):
+ return ta.read(i), ta.read(0)
+
+ self._test_loop_fn(loop_fn, 2, [dtypes.int32] * 2)
+
+ def test_create_outside_and_gather(self):
+
+ ta = tensor_array_ops.TensorArray(
+ dtypes.int32, 2, clear_after_read=False).write(0, 0).write(1, 1)
+
+ def loop_fn(i):
+ return ta.gather([i]), ta.gather([0, 1])
+
+ self._test_loop_fn(loop_fn, 2, [dtypes.int32] * 2)
+
+ def test_create_outside_and_write_and_scatter(self):
+
+ t = tensor_array_ops.TensorArray(dtypes.int32, 10, clear_after_read=False)
+ handle = t.handle
+
+ def loop_fn(i):
+ ta = t.write(i + 2, 2 * i).write(i, 5)
+ ta = ta.scatter([4 + i], [4]).scatter([6 + i, 8 + i], [6 + i, 8 + i])
+ return ta.flow
+
+ t1 = pfor_control_flow_ops.pfor(loop_fn, iters=2)
+ out1 = tensor_array_ops.TensorArray(
+ dtypes.int32, handle=handle, flow=t1[-1]).stack()
+ output1 = self._run_targets(out1)
+
+ t2 = pfor_control_flow_ops.for_loop(loop_fn, dtypes.float32, iters=2)
+ out2 = tensor_array_ops.TensorArray(
+ dtypes.int32, handle=handle, flow=t2[-1]).stack()
+ output2 = self._run_targets(out2)
+ self.assertAllClose(output2, output1)
+
+ def test_create_inside_and_write(self):
+
+ def loop_fn(i):
+ # TODO(agarwal): switching the order of writes to ta1 does not work.
+ ta1 = tensor_array_ops.TensorArray(dtypes.int32, 2).write(0, i).write(
+ 1, 1)
+ ta2 = tensor_array_ops.TensorArray(dtypes.int32, 1).write(0, 1)
+ return ta1.stack(), ta2.stack()
+
+ self._test_loop_fn(loop_fn, 3, [dtypes.int32] * 2)
+
+ def test_create_inside_and_scatter(self):
+
+ def loop_fn(i):
+ # TODO(agarwal): switching the order of scatter to ta1 does not work.
+ ta1 = tensor_array_ops.TensorArray(dtypes.int32, 2).scatter(
+ [0], [[i, 2]]).scatter([1], [[1, 2]])
+ ta2 = tensor_array_ops.TensorArray(dtypes.int32,
+ 2).scatter([0], [3]).scatter([1], [4])
+ return ta1.stack(), ta2.stack()
+
+ self._test_loop_fn(loop_fn, 3, [dtypes.int32] * 2)
+
+ def test_create_inside_and_read(self):
+
+ def loop_fn(i):
+ ta1 = tensor_array_ops.TensorArray(
+ dtypes.int32, 2, clear_after_read=False).write(0, i).write(1, 1)
+ ta2 = tensor_array_ops.TensorArray(
+ dtypes.int32, 2, clear_after_read=False).write(0, 1).write(1, 2)
+ # TODO(agarwal): ta1.read(i) currently is not supported.
+ return ta1.read(0), ta2.read(0), ta2.read(i)
+
+ self._test_loop_fn(loop_fn, 2, [dtypes.int32] * 3)
+
+ def test_create_inside_and_gather(self):
+
+ def loop_fn(i):
+ ta1 = tensor_array_ops.TensorArray(
+ dtypes.int32, 2, clear_after_read=False).write(0, i).write(1, 1)
+ ta2 = tensor_array_ops.TensorArray(
+ dtypes.int32, 2, clear_after_read=False).write(0, 1).write(1, 2)
+ # TODO(agarwal): ta1.read(i) currently is not supported.
+ return ta1.gather([0, 1]), ta2.gather([0, 1]), ta2.gather([i])
+
+ self._test_loop_fn(loop_fn, 2, [dtypes.int32] * 3)
+
+ def test_grad(self):
+ x = random_ops.random_uniform([3, 2])
+ ta = tensor_array_ops.TensorArray(
+ dtypes.float32, 3, clear_after_read=False).unstack(x)
+ y = math_ops.square(ta.stack())
+
+ def loop_fn(i):
+ y_i = array_ops.gather(y, i)
+ grad = gradient_ops.gradients(y_i, x)[0]
+ return array_ops.gather(grad, i)
+
+ t1 = pfor_control_flow_ops.pfor(loop_fn, iters=3)
+ # y = x * x. Hence dy/dx = 2 * x.
+ actual_grad = 2.0 * x
+ with session.Session() as sess:
+ actual_grad, computed_grad = sess.run([t1, actual_grad])
+ self.assertAllClose(actual_grad, computed_grad)
+
+
+class StackTest(PForTest):
+
+ def test_stack_inside_loop_invariant(self):
+
+ def loop_fn(_):
+ s = data_flow_ops.stack_v2(max_size=4, elem_type=dtypes.int32)
+ op1 = data_flow_ops.stack_push_v2(s, 1)
+ with ops.control_dependencies([op1]):
+ op2 = data_flow_ops.stack_push_v2(s, 2)
+ with ops.control_dependencies([op2]):
+ e2 = data_flow_ops.stack_pop_v2(s, elem_type=dtypes.int32)
+ with ops.control_dependencies([e2]):
+ e1 = data_flow_ops.stack_pop_v2(s, elem_type=dtypes.int32)
+ return e1, e2
+
+ self._test_loop_fn(loop_fn, 2, [dtypes.int32] * 2)
+
+ def test_stack_inside_push_loop_dependent(self):
+
+ def loop_fn(i):
+ s = data_flow_ops.stack_v2(max_size=4, elem_type=dtypes.int32)
+ op1 = data_flow_ops.stack_push_v2(s, i)
+ with ops.control_dependencies([op1]):
+ op2 = data_flow_ops.stack_push_v2(s, 2)
+ with ops.control_dependencies([op2]):
+ e2 = data_flow_ops.stack_pop_v2(s, elem_type=dtypes.int32)
+ with ops.control_dependencies([e2]):
+ e1 = data_flow_ops.stack_pop_v2(s, elem_type=dtypes.int32)
+ return e1, e2
+
+ self._test_loop_fn(loop_fn, 2, [dtypes.int32] * 2)
+
+ def test_stack_outside_pop(self):
+ s = data_flow_ops.stack_v2(max_size=4, elem_type=dtypes.int32)
+ op = data_flow_ops.stack_push_v2(s, 5)
+ with ops.control_dependencies([op]):
+ op = data_flow_ops.stack_push_v2(s, 6)
+ with ops.control_dependencies([op]):
+ op = data_flow_ops.stack_push_v2(s, 7)
+
+ def loop_fn(_):
+ e1 = data_flow_ops.stack_pop_v2(s, elem_type=dtypes.int32)
+ with ops.control_dependencies([e1]):
+ e2 = data_flow_ops.stack_pop_v2(s, elem_type=dtypes.int32)
+ return e1, e2
+
+ with ops.control_dependencies([op]):
+ e1, e2 = pfor_control_flow_ops.pfor(loop_fn, iters=2)
+ with ops.control_dependencies([e1, e2]):
+ e3 = data_flow_ops.stack_pop_v2(s, elem_type=dtypes.int32)
+ v1, v2, v3 = self._run_targets([e1, e2, e3], run_init=False)
+ self.assertAllEqual([7, 7], v1)
+ self.assertAllEqual([6, 6], v2)
+ self.assertAllEqual(5, v3)
+
+ def test_stack_outside_push(self):
+ s = data_flow_ops.stack_v2(max_size=4, elem_type=dtypes.int32)
+
+ def loop_fn(_):
+ return data_flow_ops.stack_push_v2(s, 7)
+
+ with self.assertRaisesRegexp(ValueError, "StackPushV2 not allowed.*"):
+ pfor_control_flow_ops.pfor(loop_fn, iters=2)
+
+
+# TODO(agarwal): test nested while_loops. This currently requires converting a
+# tf.cond.
+class ControlFlowTest(PForTest):
+
+ def test_while_outside_loop(self):
+
+ x = control_flow_ops.while_loop(lambda j: j < 4, lambda j: j + 1, [0])
+
+ def loop_fn(i):
+ return x + i
+
+ self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.int32])
+
+ def test_invariant_while(self):
+
+ def loop_fn(_):
+ return control_flow_ops.while_loop(lambda j: j < 4, lambda j: j + 1, [0])
+
+ self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.int32])
+
+ def test_invariant_while_with_control_dependency(self):
+
+ def loop_fn(i):
+ with ops.control_dependencies([i]):
+ return control_flow_ops.while_loop(lambda j: j < 4, lambda j: j + 1,
+ [0])
+
+ self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.int32])
+
+ def test_while_with_stateful_ops(self):
+
+ def loop_fn(_):
+ return control_flow_ops.while_loop(
+ lambda j, x: j < 4,
+ lambda j, x: (j + 1, x + random_ops.random_uniform([])), [0, 0.])[0]
+
+ self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.int32])
+
+ def test_while_unstacked_condition(self):
+
+ def loop_fn(i):
+ return control_flow_ops.while_loop(lambda j, x: j < 4,
+ lambda j, x: (j + 1, x + i), [0, 0])
+
+ self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.int32, dtypes.int32])
+
+ def test_while(self):
+ x = random_ops.random_uniform([3, 5])
+ lengths = constant_op.constant([4, 0, 2])
+
+ def loop_fn(i):
+ x_i = array_ops.gather(x, i)
+ lengths_i = array_ops.gather(lengths, i)
+
+ _, total = control_flow_ops.while_loop(
+ lambda j, _: j < lengths_i,
+ lambda j, t: (j + 1, t + array_ops.gather(x_i, j)), [0, 0.])
+ return total
+
+ self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.float32])
+
+ def test_while_jacobian(self):
+ x = random_ops.random_uniform([1, 3])
+ y = random_ops.random_uniform([3, 3])
+
+ # out = x @ y @ y @ y @ y, where @ is matmul operator.
+ _, out = control_flow_ops.while_loop(
+ lambda i, _: i < 4, lambda i, out: (i + 1, math_ops.matmul(out, y)),
+ [0, x])
+
+ def loop_fn(i):
+ out_i = array_ops.gather(out, i, axis=1)
+ return array_ops.reshape(gradient_ops.gradients(out_i, x)[0], [-1])
+
+ out = pfor_control_flow_ops.pfor(loop_fn, iters=3)
+
+ # The above code does not work with tf.while_loop instead of pfor. So we
+ # manually compute the expected output here.
+ # Note that gradient of output w.r.t is (y @ y @ y @ y)^T.
+ expected_output = y
+ for _ in range(3):
+ expected_output = math_ops.matmul(expected_output, y)
+ expected_output = array_ops.transpose(expected_output, [1, 0])
+
+ with session.Session() as sess:
+ out, expected = sess.run([out, expected_output])
+ self.assertAllClose(expected, out)
+
+ def test_tensor_array_as_loop_variable(self):
+
+ def loop_fn(i):
+
+ def body(j, ta):
+ ta = ta.write(j, i + j * j)
+ return j + 1, ta
+
+ _, ta = control_flow_ops.while_loop(
+ lambda j, _: j < 4, body,
+ (0, tensor_array_ops.TensorArray(dtypes.int32, size=4)))
+ return ta.stack()
+
+ self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.int32])
+
+ def test_read_tensor_array_partitioned_indices(self):
+ # Note that tensor array values are pfor loop dependent, and the while loop
+ # termination condition is also dependent on pfor iteration.
+ def loop_fn(i):
+ ta = tensor_array_ops.TensorArray(dtypes.int32, size=6)
+ ta = ta.unstack(i + list(range(5)))
+
+ def body(j, s):
+ return j + 1, s + ta.read(j)
+
+ _, s = control_flow_ops.while_loop(lambda j, _: j < i,
+ body,
+ (0, 0))
+ return s
+
+ self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.int32])
+
+ def test_external_while_loop_grad(self):
+ # Here we test that external while_loops that are extended from inside pfor
+ # (due to gradient calls) are not actually converted. If the below was
+ # converted all pfor iterations would write to the same tensor array
+ # indices.
+ x = constant_op.constant(1.)
+
+ def body(j, ta):
+ ta = ta.write(j, x)
+ return j + 1, ta
+
+ _, ta = control_flow_ops.while_loop(
+ lambda j, _: j < 4, body,
+ (0, tensor_array_ops.TensorArray(dtypes.float32, size=4)))
+ out = ta.stack()
+
+ def loop_fn(i):
+ out_i = array_ops.gather(out, i)
+ return gradient_ops.gradients(out_i, x)[0]
+
+ with session.Session() as sess:
+ # out is [x, x, x]. Hence the gradients should be [1, 1, 1].
+ self.assertAllEqual([1, 1, 1],
+ sess.run(pfor_control_flow_ops.pfor(loop_fn, 3)))
+
+ def test_tensor_array_grad(self):
+ inp = constant_op.constant(np.random.rand(3, 4, 2), dtype=dtypes.float32)
+ ta = tensor_array_ops.TensorArray(dtypes.float32, size=3)
+ ta = ta.unstack(inp)
+
+ def loop_fn(i):
+
+ def body(j, x):
+ value = ta.gather([j])
+ value = array_ops.gather(array_ops.reshape(value, [4, 2]), i)
+ return j + 1, x + value
+
+ _, out = control_flow_ops.while_loop(lambda j, _: j < 3, body,
+ (0, array_ops.zeros([2])))
+ out = math_ops.reduce_prod(out)
+ return out, gradient_ops.gradients(out, inp)[0]
+
+ pfor_out, pfor_out_grad = pfor_control_flow_ops.pfor(loop_fn, 4)
+ # Note that tf.while_loop does not work in the setup above. So we manually
+ # construct the equivalent computation of the above loops here.
+ real_out = math_ops.reduce_sum(inp, reduction_indices=[0])
+ real_out = math_ops.reduce_prod(real_out, reduction_indices=[1])
+ # Note that gradients of real_out will accumulate the gradients across the
+ # output value. Hence we do the same aggregation on pfor_out_grad.
+ real_out_grad = gradient_ops.gradients(real_out, inp)[0]
+ sum_pfor_out_grad = math_ops.reduce_sum(
+ pfor_out_grad, reduction_indices=[0])
+
+ with session.Session() as sess:
+ v1, v2, v1_grad, v2_grad = sess.run(
+ [pfor_out, real_out, sum_pfor_out_grad, real_out_grad])
+ self.assertAllClose(v1, v2)
+ self.assertAllClose(v1_grad, v2_grad)
+
+
+def dynamic_lstm_input_fn(batch_size, state_size, max_steps):
+ # We make inputs and sequence_length constant so that multiple session.run
+ # calls produce the same result.
+ inputs = constant_op.constant(
+ np.random.rand(batch_size, max_steps, state_size), dtype=dtypes.float32)
+ sequence_length = np.random.randint(0, size=[batch_size], high=max_steps + 1)
+ sequence_length = constant_op.constant(sequence_length, dtype=dtypes.int32)
+ return inputs, sequence_length
+
+
+def create_dynamic_lstm(cell_fn, batch_size, state_size, max_steps):
+ cell = cell_fn(state_size)
+ inputs, sequence_length = dynamic_lstm_input_fn(batch_size,
+ state_size,
+ max_steps)
+ inputs_ta = tensor_array_ops.TensorArray(
+ dtypes.float32, size=max_steps, element_shape=[batch_size, state_size])
+ inputs_time_major = array_ops.transpose(inputs, [1, 0, 2])
+ inputs_ta = inputs_ta.unstack(inputs_time_major)
+ zeros = array_ops.zeros([state_size])
+
+ def loop_fn(i):
+ sequence_length_i = array_ops.gather(sequence_length, i)
+
+ def body_fn(t, state, ta):
+ inputs_t = array_ops.expand_dims(
+ array_ops.gather(inputs_ta.read(t), i), 0)
+ output, new_state = cell(inputs_t, state)
+ output = array_ops.reshape(output, [-1])
+ # TODO(agarwal): one optimization that dynamic_rnn uses is to avoid the
+ # array_ops.where when t < min(sequence_length). Doing that requires
+ # supporting tf.cond pfor conversion.
+ done = t >= sequence_length_i
+ output = array_ops.where(done, zeros, output)
+ ta = ta.write(t, output)
+ new_state = [array_ops.where(done, s, ns) for s, ns in
+ zip(nest.flatten(state), nest.flatten(new_state))]
+ new_state = nest.pack_sequence_as(state, new_state)
+ return t + 1, new_state, ta
+
+ def condition_fn(t, _, unused):
+ del unused
+ return t < max_steps
+
+ initial_state = cell.zero_state(1, dtypes.float32)
+ _, state, ta = control_flow_ops.while_loop(condition_fn, body_fn, [
+ 0, initial_state,
+ tensor_array_ops.TensorArray(dtypes.float32, max_steps)
+ ])
+
+ new_state = [array_ops.reshape(x, [-1]) for x in nest.flatten(state)]
+ new_state = nest.pack_sequence_as(initial_state, new_state)
+ return ta.stack(), new_state
+
+ pfor_output = pfor_control_flow_ops.pfor(loop_fn, batch_size)
+ tf_output = rnn.dynamic_rnn(
+ cell,
+ inputs,
+ sequence_length=sequence_length,
+ initial_state=cell.zero_state(batch_size, dtypes.float32))
+ return pfor_output, tf_output
+
+
+class RNNTest(PForTest):
+
+ def test_dynamic_rnn(self):
+ pfor_outputs, tf_outputs = create_dynamic_lstm(rnn_cell.BasicRNNCell,
+ 3, 5, 7)
+ self.run_and_assert_equal(pfor_outputs, tf_outputs)
+
+ def test_dynamic_lstm(self):
+ pfor_outputs, tf_outputs = create_dynamic_lstm(rnn_cell.BasicLSTMCell,
+ 3, 5, 7)
+ self.run_and_assert_equal(pfor_outputs, tf_outputs)
+
+
+# TODO(agarwal): benchmark numbers on GPU for graphs based on while_loop
+# conversion don't look good. Some of it seems like lot of copies between host
+# and device. Optimize that.
+class Benchmarks(test.Benchmark):
+
+ def _run(self, targets, iters, name=None):
+
+ def _done(t):
+ # Note that we don't use tf.control_dependencies since that will not make
+ # sure that the computation on GPU has actually finished. So we fetch the
+ # first element of the output, and assume that this will not be called on
+ # empty tensors.
+ return array_ops.gather(array_ops.reshape(t, [-1]), 0)
+
+ targets = [_done(x) for x in nest.flatten(targets)]
+ sess = session.Session()
+ with sess:
+ init = variables.global_variables_initializer()
+ sess.run(init)
+ sess.run(targets)
+ begin = time.time()
+ for _ in range(iters):
+ sess.run(targets)
+ end = time.time()
+ avg_time_ms = 1000 * (end - begin) / iters
+ self.report_benchmark(iters=iters, wall_time=avg_time_ms, name=name)
+ return avg_time_ms
+
+ def benchmark_basic_while(self):
+ with ops.Graph().as_default():
+
+ def loop_fn(i):
+ _, s = control_flow_ops.while_loop(
+ lambda t, x: t < i,
+ lambda t, x: (t + 1, x + i),
+ [0, 0])
+ return s
+
+ iters = 50
+ pfor_output = pfor_control_flow_ops.pfor(loop_fn, iters)
+ for_loop_output = pfor_control_flow_ops.for_loop(loop_fn, dtypes.int32,
+ iters)
+ self._run(pfor_output, 100, name="pfor_basic")
+ self._run(for_loop_output, 100, name="for_loop_basic")
+
+ def benchmark_dynamic_rnn(self):
+ with ops.Graph().as_default():
+ pfor_outputs, tf_outputs = create_dynamic_lstm(rnn_cell.BasicRNNCell,
+ 128, 512, 16)
+ self._run(pfor_outputs, 100, name="pfor_rnn")
+ self._run(tf_outputs, 100, name="tf_rnn")
+
+ def benchmark_dynamic_lstm(self):
+ with ops.Graph().as_default():
+ pfor_outputs, tf_outputs = create_dynamic_lstm(rnn_cell.BasicLSTMCell,
+ 128, 512, 16)
+ self._run(pfor_outputs, 100, name="pfor_lstm")
+ self._run(tf_outputs, 100, name="tf_lstm")
+
+
+class SparseTest(PForTest):
+
+ def test_var_loop_len(self):
+ num_iters = array_ops.placeholder(dtypes.int32)
+
+ def loop_fn(_):
+ return sparse_tensor.SparseTensor([[0], [1], [2]], [4, 5, 6],
+ [3]) # [0, 2, 0]
+
+ pfor = pfor_control_flow_ops.pfor(loop_fn, num_iters)
+ with self.test_session() as sess:
+ sess.run(pfor, feed_dict={num_iters: 3})
+
+ def test_sparse_result_none_stacked(self):
+ num_iters = 10
+
+ def loop_fn(_):
+ return sparse_tensor.SparseTensor([[0], [1], [2]], [4, 5, 6],
+ [3]) # [0, 2, 0]
+
+ pfor = pfor_control_flow_ops.pfor(loop_fn, num_iters)
+
+ indices = [[i, j] for i in range(num_iters) for j in range(3)]
+ values = [4, 5, 6] * num_iters
+ dense_shapes = [num_iters, 3]
+ # Expected result: [[4, 5, 6], [4, 5, 6], [4, 5, 6], ...]
+ manual = sparse_tensor.SparseTensor(indices, values, dense_shapes)
+ self.run_and_assert_equal(pfor, manual)
+
+ def test_sparse_result_all_stacked(self):
+ num_iters = 10
+
+ def loop_fn(i):
+ i = array_ops.expand_dims(math_ops.cast(i, dtypes.int64), 0)
+ indices = array_ops.expand_dims(i, 0)
+ return sparse_tensor.SparseTensor(indices, i, i + 1) # [0, ..., 0, i]
+
+ # Expected result: [[0], [0, 1], [0, 0, 2], [0, 0, 0, 3], ...]
+ pfor = pfor_control_flow_ops.pfor(loop_fn, num_iters)
+ manual = sparse_tensor.SparseTensor([[i, i] for i in range(num_iters)],
+ list(range(num_iters)),
+ (num_iters, num_iters))
+ self.run_and_assert_equal(pfor, manual)
+
+ def test_sparse_result_indices_stacked(self):
+ num_iters = 10
+
+ def loop_fn(i):
+ i = array_ops.expand_dims(math_ops.cast(i, dtypes.int64), 0)
+ indices = array_ops.expand_dims(i, 0)
+ return sparse_tensor.SparseTensor(indices, [1], [num_iters])
+
+ # Expected result: identity matrix size num_iters * num_iters
+ pfor = pfor_control_flow_ops.pfor(loop_fn, num_iters)
+ manual = sparse_tensor.SparseTensor([[i, i] for i in range(num_iters)],
+ [1] * num_iters, (num_iters, num_iters))
+ self.run_and_assert_equal(pfor, manual)
+
+ def test_sparse_result_values_stacked(self):
+ num_iters = 10
+
+ def loop_fn(i):
+ i = array_ops.expand_dims(math_ops.cast(i, dtypes.int64), 0)
+ return sparse_tensor.SparseTensor([[0]], i, [num_iters]) # [i, 0, ..., 0]
+
+ # Expected result: [[1, 0, ...], [2, 0, ...], [3, 0, ...], ...]
+ pfor = pfor_control_flow_ops.pfor(loop_fn, num_iters)
+ manual = sparse_tensor.SparseTensor([[i, 0] for i in range(num_iters)],
+ list(range(num_iters)),
+ (num_iters, num_iters))
+ self.run_and_assert_equal(pfor, manual)
+
+ def test_sparse_result_shapes_stacked(self):
+ num_iters = 10
+
+ def loop_fn(i):
+ i = array_ops.expand_dims(math_ops.cast(i, dtypes.int64), 0)
+ return sparse_tensor.SparseTensor([[0]], [1], i + 1) # [1, 0, ..., 0]
+
+ # Expected result: [[1, 0, 0, ...], [1, 0, 0, ...], ...]
+ pfor = pfor_control_flow_ops.pfor(loop_fn, num_iters)
+ manual = sparse_tensor.SparseTensor([[i, 0] for i in range(num_iters)],
+ [1] * num_iters, (num_iters, num_iters))
+ self.run_and_assert_equal(pfor, manual)
+
+ def test_sparse_result_shapes_stacked_2D(self):
+ num_iters = 10
+
+ def loop_fn(i):
+ i = array_ops.expand_dims(math_ops.cast(i + 1, dtypes.int64), 0)
+ shape = array_ops.concat([i, i], 0)
+ return sparse_tensor.SparseTensor([[0, 0]], [1], shape) # [1, 0, ..., 0]
+
+ # Expected result: [[[1, 0, ...], [0, ..., 0], [0, ..., 0], ...], ...]
+ pfor = pfor_control_flow_ops.pfor(loop_fn, num_iters)
+ manual = sparse_tensor.SparseTensor([[i, 0, 0] for i in range(num_iters)],
+ [1] * num_iters,
+ (num_iters, num_iters, num_iters))
+ self.run_and_assert_equal(pfor, manual)
+
+
+class ParsingTest(PForTest):
+
+ def test_decode_csv(self):
+ csv_tensor = constant_op.constant([["1:2:3"], ["::"], ["7:8:9"]])
+ kwargs = {"record_defaults": [[10], [20], [30]], "field_delim": ":"}
+
+ def loop_fn(i):
+ line = array_ops.gather(csv_tensor, i)
+ return parsing_ops.decode_csv(line, **kwargs)
+
+ self._test_loop_fn(loop_fn, iters=3, loop_fn_dtypes=[dtypes.int32] * 3)
+
+ def test_parse_single_example(self):
+
+ def _int64_feature(*values):
+ return feature_pb2.Feature(int64_list=feature_pb2.Int64List(value=values))
+
+ def _bytes_feature(*values):
+ return feature_pb2.Feature(
+ bytes_list=feature_pb2.BytesList(
+ value=[v.encode("utf-8") for v in values]))
+
+ examples = constant_op.constant([
+ example_pb2.Example(
+ features=feature_pb2.Features(
+ feature={
+ "dense_int": _int64_feature(i),
+ "dense_str": _bytes_feature(str(i)),
+ "sparse_int": _int64_feature(i, i * 2, i * 4, i * 8),
+ "sparse_str": _bytes_feature(*["abc"] * i)
+ })).SerializeToString() for i in range(10)
+ ])
+
+ features = {
+ "dense_int": parsing_ops.FixedLenFeature((), dtypes.int64, 0),
+ "dense_str": parsing_ops.FixedLenFeature((), dtypes.string, ""),
+ "sparse_int": parsing_ops.VarLenFeature(dtypes.int64),
+ "sparse_str": parsing_ops.VarLenFeature(dtypes.string),
+ }
+
+ def loop_fn(i):
+ example_proto = array_ops.gather(examples, i)
+ f = parsing_ops.parse_single_example(example_proto, features)
+ return f
+
+ pfor = pfor_control_flow_ops.pfor(loop_fn, iters=10)
+ manual = parsing_ops.parse_example(examples, features)
+ self.run_and_assert_equal(pfor, manual)
+
+
+if __name__ == "__main__":
+ test.main()
diff --git a/tensorflow/python/ops/parallel_for/gradients.py b/tensorflow/python/ops/parallel_for/gradients.py
new file mode 100644
index 0000000000..ee3d5c9b86
--- /dev/null
+++ b/tensorflow/python/ops/parallel_for/gradients.py
@@ -0,0 +1,126 @@
+# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""Jacobian ops."""
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+from tensorflow.python.framework import ops
+from tensorflow.python.ops import array_ops
+from tensorflow.python.ops import check_ops
+from tensorflow.python.ops import gradients as gradient_ops
+from tensorflow.python.ops.parallel_for import control_flow_ops
+from tensorflow.python.util import nest
+
+
+def jacobian(output, inputs, use_pfor=True):
+ """Computes jacobian of `output` w.r.t. `inputs`.
+
+ Args:
+ output: A tensor.
+ inputs: A tensor or a nested structure of tensor objects.
+ use_pfor: If true, uses pfor for computing the jacobian. Else uses
+ tf.while_loop.
+
+ Returns:
+ A tensor or a nested strucutre of tensors with the same structure as
+ `inputs`. Each entry is the jacobian of `output` w.rt. to the corresponding
+ value in `inputs`. If output has shape [y_1, ..., y_n] and inputs_i has
+ shape [x_1, ..., x_m], the corresponding jacobian has shape
+ [y_1, ..., y_n, x_1, ..., x_m].
+ """
+ flat_inputs = nest.flatten(inputs)
+ output_shape = array_ops.shape(output)
+ output = array_ops.reshape(output, [-1])
+
+ def loop_fn(i):
+ y = array_ops.gather(output, i)
+ return gradient_ops.gradients(y, flat_inputs)
+
+ try:
+ output_size = int(output.shape[0])
+ except TypeError:
+ output_size = array_ops.shape(output)[0]
+
+ if use_pfor:
+ pfor_outputs = control_flow_ops.pfor(loop_fn, output_size)
+ else:
+ pfor_outputs = control_flow_ops.for_loop(
+ loop_fn, [output.dtype] * len(flat_inputs), output_size)
+
+ for i, out in enumerate(pfor_outputs):
+ new_shape = array_ops.concat(
+ [output_shape, array_ops.shape(out)[1:]], axis=0)
+ out = array_ops.reshape(out, new_shape)
+ pfor_outputs[i] = out
+
+ return nest.pack_sequence_as(inputs, pfor_outputs)
+
+
+def batch_jacobian(output, inp, use_pfor=True):
+ """Computes and stacks jacobians of `output[i,...]` w.r.t. `input[i,...]`.
+
+ e.g.
+ x = tf.constant([[1, 2], [3, 4]], dtype=tf.float32)
+ y = x * x
+ jacobian = batch_jacobian(y, x)
+ # => [[[2, 0], [0, 4]], [[6, 0], [0, 8]]]
+
+ Args:
+ output: A tensor with shape [b, y1, ..., y_n]. `output[i,...]` should
+ only depend on `inp[i,...]`.
+ inp: A tensor with shape [b, x1, ..., x_m]
+ use_pfor: If true, uses pfor for computing the Jacobian. Else uses a
+ tf.while_loop.
+
+ Returns:
+ A tensor `t` with shape [b, y_1, ..., y_n, x1, ..., x_m] where `t[i, ...]`
+ is the jacobian of `output[i, ...]` w.r.t. `inp[i, ...]`, i.e. stacked
+ per-example jacobians.
+
+ Raises:
+ ValueError: if first dimension of `output` and `inp` do not match.
+ """
+ output_shape = output.shape
+ if not output_shape[0].is_compatible_with(inp.shape[0]):
+ raise ValueError("Need first dimension of output shape (%s) and inp shape "
+ "(%s) to match." % (output.shape, inp.shape))
+ if output_shape.is_fully_defined():
+ batch_size = int(output_shape[0])
+ output_row_size = output_shape.num_elements() // batch_size
+ else:
+ output_shape = array_ops.shape(output)
+ batch_size = output_shape[0]
+ output_row_size = array_ops.size(output) // batch_size
+ inp_shape = array_ops.shape(inp)
+ # Flatten output to 2-D.
+ with ops.control_dependencies(
+ [check_ops.assert_equal(batch_size, inp_shape[0])]):
+ output = array_ops.reshape(output, [batch_size, output_row_size])
+
+ def loop_fn(i):
+ y = array_ops.gather(output, i, axis=1)
+ return gradient_ops.gradients(y, inp)[0]
+
+ if use_pfor:
+ pfor_output = control_flow_ops.pfor(loop_fn, output_row_size)
+ else:
+ pfor_output = control_flow_ops.for_loop(loop_fn, output.dtype,
+ output_row_size)
+ pfor_output = array_ops.reshape(pfor_output,
+ [output_row_size, batch_size, -1])
+ output = array_ops.transpose(pfor_output, [1, 0, 2])
+ new_shape = array_ops.concat([output_shape, inp_shape[1:]], axis=0)
+ return array_ops.reshape(output, new_shape)
diff --git a/tensorflow/python/ops/parallel_for/gradients_test.py b/tensorflow/python/ops/parallel_for/gradients_test.py
new file mode 100644
index 0000000000..3a6d9149ad
--- /dev/null
+++ b/tensorflow/python/ops/parallel_for/gradients_test.py
@@ -0,0 +1,579 @@
+# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""Tests for jacobian and batch_jacobian ops."""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import functools
+import os
+import time
+
+import numpy as np
+
+from tensorflow.python.client import session
+from tensorflow.python.framework import constant_op
+from tensorflow.python.framework import dtypes
+from tensorflow.python.framework import errors
+from tensorflow.python.framework import ops
+from tensorflow.python.keras.engine import training as keras_training
+from tensorflow.python.layers import layers as tf_layers
+from tensorflow.python.ops import array_ops
+from tensorflow.python.ops import gradients as gradient_ops
+from tensorflow.python.ops import math_ops
+from tensorflow.python.ops import nn
+from tensorflow.python.ops import random_ops
+from tensorflow.python.ops import rnn
+from tensorflow.python.ops import rnn_cell
+from tensorflow.python.ops import variables
+from tensorflow.python.ops.losses import losses
+from tensorflow.python.ops.parallel_for import control_flow_ops
+from tensorflow.python.ops.parallel_for import gradients
+from tensorflow.python.platform import test
+from tensorflow.python.util import nest
+
+
+class FullyConnectedModel(object):
+
+ def __init__(self, activation_size, num_layers):
+ self._layers = [
+ tf_layers.Dense(activation_size, activation=nn.relu)
+ for _ in range(num_layers)
+ ]
+
+ def __call__(self, inp):
+ activation = inp
+ for layer in self._layers:
+ activation = layer(activation)
+ return activation
+
+
+def fully_connected_model_fn(batch_size, activation_size, num_layers):
+ model = FullyConnectedModel(activation_size, num_layers)
+ inp = random_ops.random_normal([batch_size, activation_size])
+ return inp, model(inp)
+
+
+def lstm_model_fn(batch_size, state_size, steps):
+ inputs = [
+ random_ops.random_normal([batch_size, state_size]) for _ in range(steps)
+ ]
+ cell = rnn_cell.BasicLSTMCell(state_size)
+ init_state = cell.zero_state(batch_size, dtypes.float32)
+ state = init_state
+ for inp in inputs:
+ _, state = cell(inp, state)
+ return init_state.c, state.c
+
+
+def dynamic_lstm_model_fn(batch_size, state_size, max_steps):
+ # We make inputs and sequence_length constant so that multiple session.run
+ # calls produce the same result.
+ inputs = constant_op.constant(
+ np.random.rand(batch_size, max_steps, state_size), dtype=dtypes.float32)
+ sequence_length = constant_op.constant(
+ np.random.randint(0, size=[batch_size], high=max_steps + 1),
+ dtype=dtypes.int32)
+
+ cell = rnn_cell.BasicLSTMCell(state_size)
+ initial_state = cell.zero_state(batch_size, dtypes.float32)
+ return inputs, rnn.dynamic_rnn(
+ cell,
+ inputs,
+ sequence_length=sequence_length,
+ initial_state=initial_state)
+
+
+def create_fc_batch_jacobian(batch_size, activation_size, num_layers):
+ inp, output = fully_connected_model_fn(batch_size, activation_size,
+ num_layers)
+ pfor_jacobian = gradients.batch_jacobian(output, inp, use_pfor=True)
+ while_jacobian = gradients.batch_jacobian(output, inp, use_pfor=False)
+ return pfor_jacobian, while_jacobian
+
+
+def create_lstm_batch_jacobian(batch_size, state_size, steps):
+ inp, output = lstm_model_fn(batch_size, state_size, steps)
+ pfor_jacobian = gradients.batch_jacobian(output, inp, use_pfor=True)
+ while_jacobian = gradients.batch_jacobian(output, inp, use_pfor=False)
+ return pfor_jacobian, while_jacobian
+
+
+def create_dynamic_lstm_batch_jacobian(batch_size, state_size, max_steps):
+ inp, (_, final_state) = dynamic_lstm_model_fn(batch_size, state_size,
+ max_steps)
+ pfor_jacobian = gradients.batch_jacobian(final_state.c, inp, use_pfor=True)
+ # Note that use_pfor=False does not work above given the current limitations
+ # on implementation of while_loop. So we statically unroll the looping in the
+ # jacobian computation.
+ while_gradients = [
+ gradient_ops.gradients(array_ops.gather(final_state.c, i, axis=1), inp)[0]
+ for i in range(state_size)
+ ]
+ return pfor_jacobian, while_gradients
+
+
+def create_lstm_batch_hessian(batch_size, state_size, steps):
+ inp, output = lstm_model_fn(batch_size, state_size, steps)
+ pfor_jacobian = gradients.batch_jacobian(output, inp, use_pfor=True)
+ pfor_jacobian = array_ops.reshape(pfor_jacobian, [batch_size, -1])
+ pfor_hessian = gradients.batch_jacobian(pfor_jacobian, inp, use_pfor=True)
+ # TODO(agarwal): using two nested while_loop doesn't seem to work here.
+ # Hence we use pfor_jacobian for computing while_hessian.
+ while_jacobian = pfor_jacobian
+ while_hessian = gradients.batch_jacobian(while_jacobian, inp, use_pfor=False)
+ return pfor_hessian, while_hessian
+
+
+def create_lstm_hessian(batch_size, state_size, steps):
+ _, output = lstm_model_fn(batch_size, state_size, steps)
+ weights = variables.trainable_variables()
+ pfor_jacobians = gradients.jacobian(output, weights, use_pfor=True)
+ pfor_hessians = [
+ gradients.jacobian(x, weights, use_pfor=True) for x in pfor_jacobians
+ ]
+ # TODO(agarwal): using two nested while_loop doesn't seem to work here.
+ # Hence we use pfor_jacobians for computing while_hessians.
+ while_jacobians = pfor_jacobians
+ while_hessians = [
+ gradients.jacobian(x, weights, use_pfor=False) for x in while_jacobians
+ ]
+ return pfor_hessians, while_hessians
+
+
+def create_fc_per_eg_grad(batch_size, activation_size, num_layers):
+ inp = random_ops.random_normal([batch_size, activation_size])
+ layers = [
+ tf_layers.Dense(activation_size, activation=nn.relu)
+ for _ in range(num_layers)
+ ]
+ projection = tf_layers.Dense(1)
+
+ def model_fn(activation):
+ for layer in layers:
+ activation = layer(activation)
+ activation = projection(activation)
+ activation = nn.l2_loss(activation)
+ return gradient_ops.gradients(activation, variables.trainable_variables())
+
+ def loop_fn(i):
+ return model_fn(array_ops.expand_dims(array_ops.gather(inp, i), 0))
+
+ pfor_outputs = control_flow_ops.pfor(loop_fn, batch_size)
+ loop_fn_dtypes = [x.dtype for x in variables.trainable_variables()]
+ while_outputs = control_flow_ops.for_loop(loop_fn, loop_fn_dtypes, batch_size)
+ return pfor_outputs, while_outputs
+
+
+def create_lstm_per_eg_grad(batch_size, state_size, steps):
+ inputs = [
+ random_ops.random_normal([batch_size, state_size]) for _ in range(steps)
+ ]
+ cell = rnn_cell.BasicLSTMCell(state_size)
+ init_state = cell.zero_state(batch_size, dtypes.float32)
+
+ def model_fn(inps, init_state):
+ state = init_state
+ for inp in inps:
+ _, state = cell(inp, state)
+ output = nn.l2_loss(state.c)
+ return gradient_ops.gradients(output, variables.trainable_variables())
+
+ def loop_fn(i):
+ loop_inputs = [
+ array_ops.expand_dims(array_ops.gather(x, i), 0) for x in inputs
+ ]
+ loop_init_state = rnn_cell.LSTMStateTuple(
+ *[array_ops.expand_dims(array_ops.gather(x, i), 0) for x in init_state])
+ return model_fn(loop_inputs, loop_init_state)
+
+ pfor_outputs = control_flow_ops.pfor(loop_fn, batch_size)
+ loop_fn_dtypes = [x.dtype for x in variables.trainable_variables()]
+ while_outputs = control_flow_ops.for_loop(loop_fn, loop_fn_dtypes, batch_size)
+ return pfor_outputs, while_outputs
+
+
+# Importing the code from tensorflow_models seems to cause errors. Hence we
+# duplicate the model definition here.
+# TODO(agarwal): Use the version in tensorflow_models/official instead.
+class Mnist(keras_training.Model):
+
+ def __init__(self, data_format):
+ """Creates a model for classifying a hand-written digit.
+
+ Args:
+ data_format: Either 'channels_first' or 'channels_last'.
+ """
+ super(Mnist, self).__init__()
+ if data_format == "channels_first":
+ self._input_shape = [-1, 1, 28, 28]
+ else:
+ assert data_format == "channels_last"
+ self._input_shape = [-1, 28, 28, 1]
+
+ self.conv1 = tf_layers.Conv2D(
+ 32, 5, padding="same", data_format=data_format, activation=nn.relu)
+ self.conv2 = tf_layers.Conv2D(
+ 64, 5, padding="same", data_format=data_format, activation=nn.relu)
+ self.fc1 = tf_layers.Dense(1024, activation=nn.relu)
+ self.fc2 = tf_layers.Dense(10)
+ self.dropout = tf_layers.Dropout(0.4)
+ self.max_pool2d = tf_layers.MaxPooling2D(
+ (2, 2), (2, 2), padding="same", data_format=data_format)
+
+ def __call__(self, inputs, training):
+ """Add operations to classify a batch of input images.
+
+ Args:
+ inputs: A Tensor representing a batch of input images.
+ training: A boolean. Set to True to add operations required only when
+ training the classifier.
+
+ Returns:
+ A logits Tensor with shape [<batch_size>, 10].
+ """
+ y = array_ops.reshape(inputs, self._input_shape)
+ y = self.conv1(y)
+ y = self.max_pool2d(y)
+ y = self.conv2(y)
+ y = self.max_pool2d(y)
+ y = tf_layers.flatten(y)
+ y = self.fc1(y)
+ y = self.dropout(y, training=training)
+ return self.fc2(y)
+
+
+def create_mnist_per_eg_grad(batch_size, data_format, training):
+ images = random_ops.random_uniform([batch_size, 28, 28])
+ sparse_labels = np.random.randint(
+ low=0, high=10, size=[batch_size]).astype(np.int32)
+ labels = np.zeros((batch_size, 10)).astype(np.float32)
+ labels[np.arange(batch_size), sparse_labels] = 1.
+ model = Mnist(data_format)
+
+ def loop_fn(i):
+ image = array_ops.gather(images, i)
+ label = array_ops.gather(labels, i)
+ logits = array_ops.reshape(model(image, training=training), [-1])
+ loss = losses.softmax_cross_entropy(
+ logits=logits, onehot_labels=label, reduction=losses.Reduction.NONE)
+ return gradient_ops.gradients(loss, variables.trainable_variables())
+
+ pfor_outputs = control_flow_ops.pfor(loop_fn, batch_size)
+ while_outputs = control_flow_ops.for_loop(
+ loop_fn, [dtypes.float32] * len(variables.trainable_variables()),
+ batch_size)
+ return pfor_outputs, while_outputs
+
+
+def create_mnist_per_eg_jacobian(batch_size, data_format, training):
+ images = random_ops.random_uniform([batch_size, 28, 28])
+ model = Mnist(data_format)
+
+ def loop_fn(i, use_pfor):
+ image = array_ops.gather(images, i)
+ logits = array_ops.reshape(model(image, training=training), [-1])
+ return gradients.jacobian(
+ logits, variables.trainable_variables(), use_pfor=use_pfor)
+
+ pfor_outputs = control_flow_ops.pfor(
+ functools.partial(loop_fn, use_pfor=True),
+ batch_size)
+ while_outputs = control_flow_ops.for_loop(
+ functools.partial(loop_fn, use_pfor=False),
+ [dtypes.float32] * len(variables.trainable_variables()), batch_size)
+ return pfor_outputs, while_outputs
+
+
+def create_fc_per_eg_jacobians(batch_size, activation_size, num_layers):
+ model = FullyConnectedModel(activation_size=activation_size,
+ num_layers=num_layers)
+ inp = random_ops.random_normal([batch_size, activation_size])
+ output = model(inp)
+ jacobians = gradients.jacobian(output, variables.trainable_variables())
+
+ def loop_fn(i, use_pfor):
+ inp_i = array_ops.expand_dims(array_ops.gather(inp, i), 0)
+ output = array_ops.reshape(model(inp_i), [-1])
+ return gradients.jacobian(
+ output, variables.trainable_variables(), use_pfor=use_pfor)
+
+ per_eg_jacobians_pfor = control_flow_ops.pfor(
+ functools.partial(loop_fn, use_pfor=True),
+ batch_size)
+ per_eg_jacobians_while = control_flow_ops.for_loop(
+ functools.partial(loop_fn, use_pfor=False),
+ [dtypes.float32] * len(variables.trainable_variables()), batch_size)
+ return jacobians, per_eg_jacobians_pfor, per_eg_jacobians_while
+
+
+class GradientsTest(test.TestCase):
+
+ def run_and_assert_equal(self, targets1, targets2, atol=1e-4, rtol=1e-4):
+ targets1 = nest.flatten(targets1)
+ targets2 = nest.flatten(targets2)
+ assert len(targets1) == len(targets2)
+ init = variables.global_variables_initializer()
+ self.evaluate(init)
+ outputs = self.evaluate(targets1 + targets2)
+ n = len(outputs) // 2
+ for i in range(n):
+ self.assertAllClose(outputs[i], outputs[i + n], rtol=rtol, atol=atol)
+
+ def test_jacobian_fixed_shape(self):
+ x = random_ops.random_uniform([2, 2])
+ y = math_ops.matmul(x, x, transpose_a=True)
+ jacobian_pfor = gradients.jacobian(y, x, use_pfor=True)
+ jacobian_while = gradients.jacobian(y, x, use_pfor=False)
+ answer = ops.convert_to_tensor([[
+ gradient_ops.gradients(y[0][0], x)[0],
+ gradient_ops.gradients(y[0][1], x)[0]
+ ], [
+ gradient_ops.gradients(y[1][0], x)[0],
+ gradient_ops.gradients(y[1][1], x)[0]
+ ]])
+ self.run_and_assert_equal(answer, jacobian_pfor)
+ self.run_and_assert_equal(answer, jacobian_while)
+
+ def test_jacobian_unknown_shape(self):
+ with self.test_session() as sess:
+ x = array_ops.placeholder(dtypes.float32, shape=[None, None])
+ y = math_ops.matmul(x, x, transpose_a=True)
+ jacobian_pfor = gradients.jacobian(y, x, use_pfor=True)
+ jacobian_while = gradients.jacobian(y, x, use_pfor=False)
+ answer = ops.convert_to_tensor([[
+ gradient_ops.gradients(y[0][0], x)[0],
+ gradient_ops.gradients(y[0][1], x)[0]
+ ], [
+ gradient_ops.gradients(y[1][0], x)[0],
+ gradient_ops.gradients(y[1][1], x)[0]
+ ]])
+ ans, pfor_value, while_value = sess.run(
+ [answer, jacobian_pfor, jacobian_while],
+ feed_dict={x: [[1, 2], [3, 4]]})
+ self.assertAllClose(ans, pfor_value)
+ self.assertAllClose(ans, while_value)
+
+ def test_batch_jacobian_bad_shapes(self):
+ x = random_ops.random_uniform([2, 2])
+ y = random_ops.random_uniform([3, 2])
+ with self.assertRaisesRegexp(ValueError, "Need first dimension of output"):
+ gradients.batch_jacobian(y, x, use_pfor=True)
+
+ def test_batch_jacobian_bad_unknown_shapes(self):
+ with self.test_session() as sess:
+ x = array_ops.placeholder(dtypes.float32)
+ y = array_ops.concat([x, x], axis=0)
+ jacobian = gradients.batch_jacobian(y, x)
+ with self.assertRaisesRegexp(errors.InvalidArgumentError,
+ "assertion failed"):
+ sess.run(jacobian, feed_dict={x: [[1, 2], [3, 4]]})
+
+ def test_batch_jacobian_fixed_shape(self):
+ x = random_ops.random_uniform([2, 3, 5])
+ y = x * x
+ batch_jacobian_pfor = gradients.batch_jacobian(y, x, use_pfor=True)
+ batch_jacobian_while = gradients.batch_jacobian(y, x, use_pfor=False)
+ two_x = 2 * x
+ answer = array_ops.stack(
+ [array_ops.diag(two_x[0]),
+ array_ops.diag(two_x[1])])
+ self.run_and_assert_equal(answer, batch_jacobian_pfor)
+ self.run_and_assert_equal(answer, batch_jacobian_while)
+
+ def test_batch_jacobian_unknown_shape(self):
+ with self.test_session() as sess:
+ x = array_ops.placeholder(dtypes.float32)
+ y = x * x
+ batch_jacobian_pfor = gradients.batch_jacobian(y, x, use_pfor=True)
+ batch_jacobian_while = gradients.batch_jacobian(y, x, use_pfor=False)
+ two_x = 2 * x
+ answer = array_ops.stack(
+ [array_ops.diag(two_x[0]),
+ array_ops.diag(two_x[1])])
+ ans, pfor_value, while_value = sess.run(
+ [answer, batch_jacobian_pfor, batch_jacobian_while],
+ feed_dict={x: [[1, 2], [3, 4]]})
+ self.assertAllClose(ans, pfor_value)
+ self.assertAllClose(ans, while_value)
+
+ def test_fc_batch_jacobian(self):
+ pfor_jacobian, while_jacobian = create_fc_batch_jacobian(8, 4, 2)
+ self.run_and_assert_equal(pfor_jacobian, while_jacobian)
+
+ def test_lstm_batch_jacobian(self):
+ pfor_jacobian, while_jacobian = create_lstm_batch_jacobian(8, 4, 2)
+ self.run_and_assert_equal(pfor_jacobian, while_jacobian)
+
+ def test_dynamic_lstm_batch_jacobian(self):
+ pfor_jacobian, while_gradients = create_dynamic_lstm_batch_jacobian(8, 4, 3)
+ with session.Session() as sess:
+ init = variables.global_variables_initializer()
+ sess.run(init)
+ pfor = sess.run(pfor_jacobian)
+ for i in range(4):
+ while_i = sess.run(while_gradients[i])
+ self.assertAllClose(while_i, pfor[:, i, ...])
+
+ def test_lstm_hessian(self):
+ pfor_hessian, while_hessian = create_lstm_hessian(2, 2, 2)
+ self.run_and_assert_equal(pfor_hessian, while_hessian)
+
+ def test_lstm_batch_hessian(self):
+ pfor_hessian, while_hessian = create_lstm_batch_hessian(2, 2, 2)
+ self.run_and_assert_equal(pfor_hessian, while_hessian)
+
+ def test_fc_per_eg_grad(self):
+ pfor_outputs, while_outputs = create_fc_per_eg_grad(8, 4, 2)
+ self.run_and_assert_equal(pfor_outputs, while_outputs)
+
+ def test_lstm_per_eg_grad(self):
+ pfor_outputs, while_outputs = create_lstm_per_eg_grad(8, 4, 2)
+ self.run_and_assert_equal(pfor_outputs, while_outputs)
+
+ def test_mnist_per_eg_grad(self):
+ # It looks like CUDNN_CONVOLUTION_BWD_DATA_ALGO_WINOGRAD_NONFUSED
+ # configuration of Winograd can cause low precision output resulting in
+ # tests failing. So we disable that here.
+ os.environ["TF_ENABLE_WINOGRAD_NONFUSED"] = "0"
+ data_format = ("channels_first"
+ if test.is_gpu_available() else "channels_last")
+ # Note that we we are setting training=False here so that dropout produces
+ # the same result with pfor and with while_loop.
+ pfor_outputs, while_outputs = create_mnist_per_eg_grad(
+ 4, data_format, training=False)
+ self.run_and_assert_equal(pfor_outputs, while_outputs, rtol=1e-3)
+ os.environ.pop("TF_ENABLE_WINOGRAD_NONFUSED", None)
+
+ def test_mnist_per_eg_jacobian(self):
+ # It looks like CUDNN_CONVOLUTION_BWD_DATA_ALGO_WINOGRAD_NONFUSED
+ # configuration of Winograd can cause low precision output resulting in
+ # tests failing. So we disable that here.
+ os.environ["TF_ENABLE_WINOGRAD_NONFUSED"] = "0"
+ data_format = ("channels_first"
+ if test.is_gpu_available() else "channels_last")
+ # Note that we we are setting training=False here so that dropout produces
+ # the same result with pfor and with while_loop.
+ pfor_outputs, while_outputs = create_mnist_per_eg_jacobian(
+ 2, data_format, training=False)
+ self.run_and_assert_equal(pfor_outputs, while_outputs, rtol=1e-3)
+ os.environ.pop("TF_ENABLE_WINOGRAD_NONFUSED", None)
+
+ def test_fc_jacobian(self):
+ jacobians, per_eg_jacobians_pfor, per_eg_jacobians_while = (
+ create_fc_per_eg_jacobians(batch_size=8,
+ activation_size=4,
+ num_layers=2))
+ self.run_and_assert_equal(jacobians, per_eg_jacobians_pfor,
+ rtol=2e-3, atol=1e-3)
+ self.run_and_assert_equal(jacobians, per_eg_jacobians_while,
+ rtol=2e-3, atol=1e-3)
+
+
+class GradientsBenchmarks(test.Benchmark):
+
+ def _run(self, targets, iters, name=None):
+
+ def _done(t):
+ # Note that we don't use tf.control_dependencies since that will not make
+ # sure that the computation on GPU has actually finished. So we fetch the
+ # first element of the output, and assume that this will not be called on
+ # empty tensors.
+ return array_ops.gather(array_ops.reshape(t, [-1]), 0)
+
+ targets = [_done(x) for x in nest.flatten(targets)]
+ sess = session.Session()
+ with sess:
+ init = variables.global_variables_initializer()
+ sess.run(init)
+ sess.run(targets)
+ begin = time.time()
+ for _ in range(iters):
+ sess.run(targets)
+ end = time.time()
+ avg_time_ms = 1000 * (end - begin) / iters
+ self.report_benchmark(iters=iters, wall_time=avg_time_ms, name=name)
+ return avg_time_ms
+
+ def benchmark_fc_batch_jacobian(self):
+ with ops.Graph().as_default():
+ pfor_jacobian, while_jacobian = create_fc_batch_jacobian(100, 32, 20)
+ self._run(pfor_jacobian, 100, name="fc_batch_jacobian_pfor")
+ self._run(while_jacobian, 20, name="fc_batch_jacobian_while")
+
+ def benchmark_lstm_batch_jacobian(self):
+ with ops.Graph().as_default():
+ pfor_jacobian, while_jacobian = create_lstm_batch_jacobian(100, 32, 8)
+ self._run(pfor_jacobian, 100, name="lstm_batch_jacobian_pfor")
+ self._run(while_jacobian, 20, name="lstm_batch_jacobian_while")
+
+ def benchmark_lstm_hessian(self):
+ with ops.Graph().as_default():
+ pfor_hessian, while_hessian = create_lstm_hessian(2, 2, 10)
+ self._run(pfor_hessian, 20, name="lstm_hessian_pfor")
+ self._run(while_hessian, 3, name="lstm_hessian_while_pfor")
+
+ def benchmark_lstm_batch_hessian(self):
+ with ops.Graph().as_default():
+ pfor_hessian, while_hessian = create_lstm_batch_hessian(4, 4, 10)
+ self._run(pfor_hessian, 100, name="lstm_batch_hessian_pfor")
+ self._run(while_hessian, 20, name="lstm_batch_hessian_while_pfor")
+
+ def benchmark_fc_per_eg_grad(self):
+ with ops.Graph().as_default():
+ pfor_outputs, while_outputs = create_fc_per_eg_grad(100, 32, 3)
+ self._run(pfor_outputs, 100, name="fc_per_eg_grad_pfor")
+ self._run(while_outputs, 20, name="fc_per_eg_grad_while")
+
+ def benchmark_lstm_per_eg_grad(self):
+ with ops.Graph().as_default():
+ pfor_outputs, while_outputs = create_lstm_per_eg_grad(100, 32, 8)
+ self._run(pfor_outputs, 100, name="lstm_per_eg_grad_pfor")
+ self._run(while_outputs, 20, name="lstm_per_eg_grad_while")
+
+ def benchmark_mnist_per_eg_grad(self):
+ with ops.Graph().as_default():
+ data_format = ("channels_first"
+ if test.is_gpu_available() else "channels_last")
+ pfor_outputs, while_outputs = create_mnist_per_eg_grad(
+ 128, data_format, training=True)
+ self._run(pfor_outputs, 20, name="mnist_per_eg_grad_pfor")
+ self._run(while_outputs, 20, name="mnist_per_eg_grad_while")
+
+ def benchmark_mnist_per_eg_jacobian(self):
+ with ops.Graph().as_default():
+ data_format = ("channels_first"
+ if test.is_gpu_available() else "channels_last")
+ pfor_outputs, while_outputs = create_mnist_per_eg_jacobian(
+ 16, data_format, training=True)
+ self._run(pfor_outputs, 20, name="mnist_per_eg_jacobian_pfor")
+ self._run(while_outputs, 20, name="mnist_per_eg_jacobian_while")
+
+ def benchmark_fc_per_eg_jacobian(self):
+ with ops.Graph().as_default():
+ jacobians, per_eg_jacobians_pfor, per_eg_jacobians_while = (
+ create_fc_per_eg_jacobians(batch_size=128,
+ activation_size=32,
+ num_layers=3))
+ self._run(jacobians, 30, name="fc_jacobians_pfor")
+ self._run(per_eg_jacobians_pfor, 100,
+ name="fc_per_eg_jacobians_pfor")
+ self._run(per_eg_jacobians_while, 10,
+ name="fc_per_eg_jacobians_while")
+
+
+if __name__ == "__main__":
+ test.main()
diff --git a/tensorflow/python/ops/parallel_for/pfor.py b/tensorflow/python/ops/parallel_for/pfor.py
new file mode 100644
index 0000000000..77ec3bc0d4
--- /dev/null
+++ b/tensorflow/python/ops/parallel_for/pfor.py
@@ -0,0 +1,2552 @@
+# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""Compiled parallel-for loop."""
+# pylint: disable=missing-docstring
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import collections
+
+from absl import flags
+
+from tensorflow.python.framework import constant_op
+from tensorflow.python.framework import dtypes
+from tensorflow.python.framework import ops
+from tensorflow.python.framework import sparse_tensor
+from tensorflow.python.framework import tensor_shape
+from tensorflow.python.framework import tensor_util
+from tensorflow.python.ops import array_ops
+from tensorflow.python.ops import check_ops
+from tensorflow.python.ops import control_flow_ops
+from tensorflow.python.ops import data_flow_ops
+from tensorflow.python.ops import functional_ops
+from tensorflow.python.ops import gen_parsing_ops
+from tensorflow.python.ops import gen_sparse_ops
+from tensorflow.python.ops import math_ops
+from tensorflow.python.ops import nn_ops
+from tensorflow.python.ops import parsing_ops
+from tensorflow.python.ops import sparse_ops
+from tensorflow.python.ops import tensor_array_ops
+from tensorflow.python.platform import tf_logging as logging
+from tensorflow.python.util import nest
+
+flags.DEFINE_bool(
+ "op_conversion_fallback_to_while_loop", False,
+ "If true, falls back to using a while loop for ops for "
+ "which a converter is not defined.")
+
+
+def _stack(t, length):
+ """stacks `t` `length` times."""
+ ones = array_ops.ones_like(array_ops.shape(t))
+ multiples = array_ops.concat([length, ones], 0)
+ t = array_ops.tile(array_ops.expand_dims(t, 0), multiples)
+ return wrap(t, True)
+
+
+# The following stateful ops can be safely called once, and with the same
+# signature as the unconverted version, if their inputs are loop invariant.
+# TODO(agarwal): implement a strategy for converting Variable reads/writes. The
+# plan is to map each read/write in the loop_fn to a corresponding merged
+# read/write in the converted graph. Writes need to be mergeable (e.g.
+# AssignAdd) to be used in `pfor`. Given a certain read/write order in the
+# loop_fn, doing a one-to-one conversion will simulate executing such
+# instructions in lock-step across all iterations.
+passthrough_stateful_ops = set([
+ "VariableV2",
+ "VarHandleOp",
+ "ReadVariableOp",
+ "StackV2",
+ "TensorArrayWriteV3",
+ "TensorArrayReadV3",
+ "TensorArraySizeV3",
+])
+
+
+def _is_stateful_pfor_op(op):
+ if isinstance(op, WhileOp):
+ return op.is_stateful
+ if op.type == "Const":
+ # Const didn't have an op_def.
+ return False
+ if op.type in passthrough_stateful_ops:
+ return False
+ assert hasattr(op, "op_def") and op.op_def is not None, op
+ return op.op_def.is_stateful
+
+
+# pylint: disable=protected-access
+class WhileOp(object):
+ """Object for storing state for converting the outputs of a while_loop."""
+
+ def __init__(self, exit_node, pfor_ops):
+ """Initializer.
+
+ Args:
+ exit_node: A tensor output from the while_loop.
+ pfor_ops: list of ops inside the current pfor loop.
+ """
+ self._pfor_ops = set(pfor_ops)
+ self._pfor_op_ids = set([x._id for x in pfor_ops])
+ assert isinstance(exit_node, ops.Tensor)
+ self._while_context = exit_node.op._get_control_flow_context()
+ assert isinstance(self._while_context, control_flow_ops.WhileContext)
+ self._context_name = self._while_context.name
+ self._condition = self._while_context.pivot.op.inputs[0]
+ # Parts of an external while_loop could be created inside a pfor loop.
+ # However for the purpose here, we declare such loops to be external. Also
+ # note that we check if the condition was created inside or outside to
+ # determine if the while_loop was first created inside or outside.
+ # TODO(agarwal): check that the Enter and Exit of this loop are unstacked.
+ self._is_inside_loop = self.op_is_inside_loop(self._condition.op)
+ if self._is_inside_loop:
+ for e in self._while_context.loop_exits:
+ assert self.op_is_inside_loop(e.op)
+
+ # Note the code below tries to reverse engineer an existing while_loop graph
+ # by assuming the following pattern of nodes.
+ #
+ # NextIteration <---- Body <--- Enter
+ # | ^
+ # V ___| Y
+ # Enter -> Merge -> Switch___
+ # ^ | N
+ # | V
+ # LoopCond Exit
+
+ # Node that elements in the list below correspond one-to-one with each
+ # other. i.e. these lists are the same size, and the i_th entry corresponds
+ # to different Operations/Tensors of a single cycle as illustrated above.
+ # List of Switch ops (ops.Operation) that feed into an Exit Node.
+ self._exit_switches = []
+ # List of inputs (ops.Tensor) to NextIteration.
+ self._body_outputs = []
+ # List of list of control inputs of the NextIteration nodes.
+ self._next_iter_control_inputs = []
+ # List of Merge ops (ops.Operation).
+ self._enter_merges = []
+ # List of output (ops.Tensor) of Exit nodes.
+ self._outputs = []
+
+ # List of Enter Tensors.
+ # There are two types of Enter nodes:
+ # - The Enter nodes that are used in the `loop_vars` argument to
+ # `while_loop` (see
+ # https://www.tensorflow.org/api_docs/python/tf/while_loop). We collect
+ # these Enter nodes immediately below by tracing backwards from the Exit
+ # nodes via Exit <- Switch <- Merge <- Enter. You can see this chain in the
+ # diagram above. This allows us to have a 1:1 correspondence between the
+ # self._outputs and the first elements in self._enters.
+ # - The Enter nodes that are used only by the body. They don't appear in the
+ # `loop_vars` and are not returned from the `while_loop`. In Python code,
+ # they are usually captured by the body lambda. We collect them below by
+ # iterating over all the ops in the graph. They are appended to the end of
+ # self._enters or self._direct_enters, and don't correspond to any outputs
+ # in self._outputs. Note that we keep the resource/variant Enter nodes in
+ # self._direct_enters and the constructed while_loop's body uses them
+ # directly as opposed to passing them as loop variables. This is done
+ # because the while_body cannot partition the resource/variant Tensors, so
+ # it has to leave them unchanged.
+ self._enters = []
+ self._direct_enters = []
+
+ for e in self._while_context.loop_exits:
+ self._outputs.append(e.op.outputs[0])
+ switch = e.op.inputs[0].op
+ assert switch.type == "Switch", switch
+ self._exit_switches.append(switch)
+ merge = switch.inputs[0].op
+ assert merge.type == "Merge", merge
+ self._enter_merges.append(merge)
+ enter = merge.inputs[0].op
+ assert enter.type == "Enter", enter
+ self._enters.append(enter.outputs[0])
+ next_iter = merge.inputs[1].op
+ assert next_iter.type == "NextIteration", next_iter
+ self._body_outputs.append(next_iter.inputs[0])
+ self._next_iter_control_inputs.append(next_iter.control_inputs)
+
+ # Collect all the Enter nodes that are not part of `loop_vars`, the second
+ # category described above.
+ # Also track whether the loop body has any stateful ops.
+ self._is_stateful = False
+ for op in ops.get_default_graph().get_operations():
+ # TODO(agarwal): make sure this works with nested case.
+ control_flow_context = op._get_control_flow_context()
+ if control_flow_context is None:
+ continue
+ if control_flow_context.name == self._context_name:
+ self._is_stateful |= _is_stateful_pfor_op(op)
+ if op.type == "Enter":
+ output = op.outputs[0]
+ if output not in self._enters:
+ if output.dtype in (dtypes.resource, dtypes.variant):
+ if output not in self._direct_enters:
+ self._direct_enters.append(output)
+ else:
+ self._enters.append(output)
+
+ def __str__(self):
+ """String representation."""
+ return "while_loop(%s)" % self.name
+
+ @property
+ def inputs(self):
+ """Input to all the Enter nodes."""
+ return [x.op.inputs[0] for x in self._enters + self._direct_enters]
+
+ @property
+ def control_inputs(self):
+ """Control input to all the Enter nodes."""
+ control_inputs = []
+ for x in self._enters + self._direct_enters:
+ control_inputs.extend(x.op.control_inputs)
+ return control_inputs
+
+ @property
+ def outputs(self):
+ """Outputs of all the Exit nodes."""
+ return self._outputs
+
+ @property
+ def name(self):
+ """Context name for the while loop."""
+ return self._context_name
+
+ @property
+ def is_inside_loop(self):
+ """Returns true if the while_loop was created inside the pfor."""
+ return self._is_inside_loop
+
+ def op_is_inside_loop(self, op):
+ """True if op was created inside the pfor loop body."""
+ assert isinstance(op, ops.Operation)
+ # Note that we use self._pfor_op_ids for the check and not self._pfor_ops
+ # since it appears there tensorflow API could return different python
+ # objects representing the same Operation node.
+ return op._id in self._pfor_op_ids
+
+ @property
+ def is_stateful(self):
+ return self._is_stateful
+
+ @property
+ def pfor_converter(self):
+ """Return a converter for the while loop."""
+ return self
+
+ def _init_pfor(self, parent_pfor, indices, cond_stacked, inputs,
+ inputs_stacked):
+ """Create a PFor object for converting parts of the while_loop.
+
+ Args:
+ parent_pfor: PFor object being used for converting the while_loop.
+ indices: int32 Tensor of ids for the iterations that are still active
+ (i.e. did not exit the while_loop).
+ cond_stacked: True if the while_loop condition is stacked.
+ inputs: list of input Tensors corresponding 1-to-1 with self._enters. Note
+ that these Tensors are a subset of the loop variables for the generated
+ while_loop.
+ inputs_stacked: List of booleans corresponding 1-to-1 with `inputs`,
+ indicating if the value is stacked or not.
+
+ Returns:
+ A PFor instance. The instance is initialized by adding conversion mappings
+ of nodes that will be external to the conversion that the returned
+ instance will be used for. e.g. Enter nodes as well as Merge and Switch
+ outputs are mapped to converted values.
+ """
+ num_outputs = len(self._outputs)
+ assert len(inputs) == len(self._enters)
+ assert len(inputs_stacked) == len(self._enters)
+ loop_var = parent_pfor.loop_var
+ loop_len = array_ops.size(indices)
+ pfor = PFor(
+ loop_var,
+ loop_len,
+ pfor_ops=self._pfor_ops,
+ all_indices=indices,
+ all_indices_partitioned=cond_stacked)
+ # Map all inputs of Enter nodes in self._direct_enters to their converted
+ # values.
+ for enter in self._direct_enters:
+ enter_input = enter.op.inputs[0]
+ converted_enter, stacked, is_sparse_stacked = parent_pfor._convert_helper(
+ enter_input)
+ # Since these are resources / variants, they should be unstacked.
+ assert not stacked and not is_sparse_stacked, (enter, converted_enter)
+ pfor._add_conversion(enter, wrap(converted_enter, False))
+
+ # Map all Enter nodes to the inputs.
+ for enter, inp, stacked in zip(self._enters, inputs, inputs_stacked):
+ pfor._add_conversion(enter, wrap(inp, stacked))
+ # Map outputs of Switch and Merge.
+ for i in range(num_outputs):
+ wrapped_inp = wrap(inputs[i], inputs_stacked[i])
+ merge = self._enter_merges[i]
+ pfor._add_conversion(merge.outputs[0], wrapped_inp)
+ # Note that second output of Merge is typically not used, except possibly
+ # as a control dependency. To avoid trying to output the correct value, we
+ # employ a hack here. We output a dummy invalid value with an incorrect
+ # dtype. This will allow control dependency to work but if using it as an
+ # input, it should typically lead to errors during graph construction due
+ # to dtype mismatch.
+ # TODO(agarwal): Check in the original graph to see if there are any
+ # consumers of this Tensor that use it as an input.
+ pfor._add_conversion(merge.outputs[1],
+ wrap(constant_op.constant(-1.0), False))
+ switch = self._exit_switches[i]
+ # Don't need to worry about switch.output[0] which will feed to Exit node.
+ pfor._add_conversion(switch.outputs[1], wrapped_inp)
+ return pfor
+
+ def _convert_enter(self, parent_pfor, enter):
+ """Converts an Enter node."""
+ inp, stacked, _ = parent_pfor._convert_helper(enter.op.inputs[0])
+ control_inputs = [
+ parent_pfor._convert_helper(x).t for x in enter.op.control_inputs
+ ]
+ if control_inputs:
+ with ops.control_dependencies(control_inputs):
+ inp = array_ops.identity(inp)
+ return inp, stacked
+
+ def _maybe_stacked(self, cache, inp):
+ """Heuristic to figue out if the coverting inp leads to a stacked value.
+
+
+ Args:
+ cache: map from Tensor to boolean indicating stacked/unstacked.
+ inp: input Tensor.
+
+ Returns:
+ True if `inp` could get stacked. If the function returns False, the
+ converted value should be guaranteed to be unstacked. If returning True,
+ it may or may not be stacked.
+ """
+ if inp in cache:
+ return cache[inp]
+ if not self.op_is_inside_loop(inp.op):
+ return False
+ op = inp.op
+ output = False
+ if op.type in [
+ "Shape",
+ "Rank"
+ "ShapeN",
+ "ZerosLike",
+ "TensorArrayV3",
+ "TensorArraySizeV3",
+ ]:
+ output = False
+ elif _is_stateful_pfor_op(op):
+ # This may be fairly aggressive.
+ output = True
+ elif op.type == "Exit":
+ # This may be fairly aggressive.
+ output = True
+ else:
+ for t in op.inputs:
+ if self._maybe_stacked(cache, t):
+ output = True
+ break
+ cache[inp] = output
+ return output
+
+ def _create_init_values(self, pfor_input):
+ """Create arguments passed to converted while_loop."""
+ with ops.name_scope("while_init"):
+ loop_len_vector = pfor_input.pfor.loop_len_vector
+ loop_len = loop_len_vector[0]
+ num_outputs = len(self._outputs)
+
+ inputs = []
+ maybe_stacked_cache = {}
+ # Convert all the Enters. Need to do this before checking for stacking
+ # below.
+ for i, enter in enumerate(self._enters):
+ inp, stacked = self._convert_enter(pfor_input.pfor, enter)
+ inputs.append(inp)
+ maybe_stacked_cache[enter] = stacked
+ # Since this enter node is part of the `loop_vars`, it corresponds to an
+ # output and its preceding switch. We mark this switch's output the same
+ # stackness, to act at the base case for the logic below. Below, we will
+ # be going through the body figuring out which inputs might need to be
+ # stacked and which inputs can safely remain unstacked.
+ if i < num_outputs:
+ maybe_stacked_cache[self._exit_switches[i].outputs[1]] = stacked
+
+ # Shape invariants for init_values corresponding to self._enters.
+ input_shape_invariants = []
+ # TensorArrays for outputs of converted while loop
+ output_tas = []
+ # Shape invariants for output TensorArrays.
+ ta_shape_invariants = []
+ # List of booleans indicating stackness of inputs, i.e. tensors
+ # corresponding to self._enters.
+ inputs_stacked = []
+ for i, inp in enumerate(inputs):
+ enter = self._enters[i]
+ inp_stacked = self._maybe_stacked(maybe_stacked_cache, enter)
+ # Note that even when an input is unstacked, the body could make it
+ # stacked. we use a heuristic below to figure out if body may be making
+ # it stacked.
+ if i < num_outputs:
+ body_output = self._body_outputs[i]
+ if enter.op in self._pfor_ops:
+ body_output_stacked = self._maybe_stacked(maybe_stacked_cache,
+ body_output)
+ else:
+ # If constructed outside of pfor loop, then the output would not be
+ # stacked.
+ body_output_stacked = False
+ if body_output_stacked and not inp_stacked:
+ inp = _stack(inp, loop_len_vector).t
+ inputs[i] = inp
+ inp_stacked = True
+ # TODO(agarwal): other attributes for the TensorArray ?
+ output_tas.append(tensor_array_ops.TensorArray(inp.dtype, loop_len))
+ ta_shape_invariants.append(tensor_shape.TensorShape(None))
+
+ inputs_stacked.append(inp_stacked)
+ input_shape_invariants.append(tensor_shape.TensorShape(None))
+
+ # See documentation for __call__ for the structure of init_values.
+ init_values = [True, pfor_input.pfor.all_indices] + inputs + output_tas
+ # TODO(agarwal): try stricter shape invariants
+ shape_invariants = (
+ [tensor_shape.TensorShape(None),
+ tensor_shape.TensorShape(None)
+ ] + input_shape_invariants + ta_shape_invariants)
+
+ return init_values, inputs_stacked, shape_invariants
+
+ def _process_cond_unstacked(self, conditions, indices, inputs, output_tas):
+ """Handles case when condition is unstacked.
+
+ Note that all iterations end together. So we don't need to partition the
+ inputs. When all iterations are done, we write the inputs to the
+ TensorArrays. Note that we only write to index 0 of output_tas. Since all
+ iterations end together, they can all be output together.
+ """
+ not_all_done = array_ops.reshape(conditions, [])
+ new_output_tas = []
+ # pylint: disable=cell-var-from-loop
+ for i, out_ta in enumerate(output_tas):
+ inp = inputs[i]
+ new_output_tas.append(
+ control_flow_ops.cond(not_all_done,
+ lambda: out_ta,
+ lambda: out_ta.write(0, inp)))
+ # pylint: enable=cell-var-from-loop
+ return not_all_done, indices, inputs, new_output_tas
+
+ def _process_cond_stacked(self, conditions, indices, inputs, inputs_stacked,
+ output_tas):
+ num_outputs = len(self._outputs)
+ # Compute if all iterations are done.
+ not_all_done = math_ops.reduce_any(conditions)
+ conditions_int = math_ops.cast(conditions, dtypes.int32)
+ # Partition the indices.
+ done_indices, new_indices = data_flow_ops.dynamic_partition(
+ indices, conditions_int, 2)
+
+ new_inputs = []
+ new_output_tas = []
+ for i, (inp, stacked) in enumerate(zip(inputs, inputs_stacked)):
+ # Partition the inputs.
+ if stacked:
+ done_inp, new_inp = data_flow_ops.dynamic_partition(
+ inp, conditions_int, 2)
+ else:
+ # TODO(agarwal): avoid this stacking. See TODO earlier in
+ # _process_cond_unstacked.
+ done_inp = _stack(inp, [array_ops.size(done_indices)]).t
+ new_inp = inp
+ new_inputs.append(new_inp)
+ # For iterations that are done, write them to TensorArrays.
+ if i < num_outputs:
+ out_ta = output_tas[i]
+ # Note that done_indices can be empty. done_inp should also be empty in
+ # that case.
+ new_output_tas.append(out_ta.scatter(done_indices, done_inp))
+ return not_all_done, new_indices, new_inputs, new_output_tas
+
+ def _process_body(self, pfor_input, inputs_stacked,
+ new_indices, cond_stacked, new_inputs,
+ not_all_done):
+ """Convert the body function."""
+
+ def true_fn(control_inputs, body_pfor, body_output, stacked):
+ """Converts the body function for all but last iteration.
+
+ This essentially converts body_output. Additionally, it needs to handle
+ any control dependencies on the NextIteration node. So it creates another
+ Identity node with the converted dependencies.
+ """
+ converted_control_inp = []
+ for x in control_inputs:
+ for t in x.outputs:
+ converted_control_inp.append(body_pfor._convert_helper(t).t)
+ if stacked:
+ # Note convert always does the stacking.
+ output = body_pfor.convert(body_output)
+ else:
+ output, convert_stacked, _ = body_pfor._convert_helper(body_output)
+ assert convert_stacked == stacked, body_output
+ with ops.control_dependencies(converted_control_inp):
+ return array_ops.identity(output)
+
+ body_pfor = self._init_pfor(pfor_input.pfor, new_indices,
+ cond_stacked, new_inputs,
+ inputs_stacked)
+ new_outputs = []
+
+ for i, (body_output, stacked) in enumerate(
+ zip(self._body_outputs, inputs_stacked)):
+ control_inp = self._next_iter_control_inputs[i]
+ out_dtype = body_output.dtype
+ # Note that we want to run the body only if not all pfor iterations are
+ # done. If all are done, we return empty tensors since these values will
+ # not be used. Notice that the value returned by the loop is based on
+ # TensorArrays and not directly on these returned values.
+ # pylint: disable=cell-var-from-loop
+ new_output = control_flow_ops.cond(
+ not_all_done,
+ lambda: true_fn(control_inp, body_pfor, body_output, stacked),
+ lambda: constant_op.constant([], dtype=out_dtype))
+ # pylint: enable=cell-var-from-loop
+ new_outputs.append(new_output)
+ return new_outputs
+
+ def __call__(self, pfor_input):
+ """Converter for the while_loop.
+
+ The conversion of a while_loop is another while_loop.
+
+ The arguments to this converted while_loop are as follows:
+ not_all_done: Boolean scalar Tensor indicating if all the pfor iterations
+ are done.
+ indices: int32 1-D Tensor storing the id of the iterations that are not
+ done.
+ args: Remaining arguments. These can be divided into 3 categories:
+ - First set of arguments are the tensors that correspond to the initial
+ elements of self._enters. The elements that appear in original while
+ loop's `loop_vars`.
+ - The second set of arguments are the tensors that correspond to the
+ remaining elements of self._enters. These are the tensors that directly
+ enter the original while loop body.
+ - Finally, the last set of arguments are TensorArrays. These TensorArrays
+ correspond to the outputs of the original while_loop, i.e. to the
+ elements in self._outputs. Each TensorArray has `PFor.loop_len`
+ elements, i.e. the number of pfor iterations. At the end, the i'th
+ element of each TensorArray will contain the output computed by the
+ i'th iteration of pfor. Note that elements can be written into these
+ tensors arrays in any order, depending on when the corresponding pfor
+ iteration is done.
+ If the original while_loop had `k` tensors in its `loop_vars` and its body
+ directly captured `m` tensors, the `args` will contain `2 * k + m` values.
+
+ In each iteration, the while_loop body recomputes the condition for all
+ active pfor iterations to see which of them are now done. It then partitions
+ all the inputs and passes them along to the converted body. Values for all
+ the iterations that are done are written to TensorArrays indexed by the pfor
+ iteration number. When all iterations are done, the TensorArrays are stacked
+ to get the final value.
+
+ Args:
+ pfor_input: A PForInput object corresponding to the output of any Exit
+ node from this while loop.
+
+ Returns:
+ List of converted outputs.
+ """
+ # Create init_values that will be passed to the while_loop.
+ init_values, inputs_stacked, shape_invariants = self._create_init_values(
+ pfor_input)
+ # Note that we use a list as a hack since we need the nested function body
+ # to set the value of cond_is_stacked. python2.x doesn't support nonlocal
+ # variables.
+ cond_is_stacked = [None]
+
+ def cond(not_all_done, *_):
+ return not_all_done
+
+ def body(not_all_done, indices, *args):
+ # See documentatin for __call__ for the structure of *args.
+ num_enters = len(self._enters)
+ inputs = args[:num_enters]
+ output_tas = args[num_enters:]
+ # TODO(agarwal): see which outputs have consumers and only populate the
+ # TensorArrays corresponding to those. Or do those paths get trimmed out
+ # from inside the while_loop body?
+ assert len(inputs) >= len(output_tas)
+ assert len(inputs) == len(inputs_stacked)
+
+ # Convert condition
+ with ops.name_scope("while_cond"):
+ # Note that we set cond_stacked to True here. At this point we don't
+ # know if it could be loop invariant, hence the conservative value is
+ # to assume stacked.
+ cond_pfor = self._init_pfor(pfor_input.pfor, indices,
+ cond_stacked=True,
+ inputs=inputs,
+ inputs_stacked=inputs_stacked)
+ conditions, cond_stacked, _ = cond_pfor._convert_helper(self._condition)
+ cond_is_stacked[0] = cond_stacked
+
+ # Recompute the new condition, write outputs of done iterations, and
+ # partition the inputs if needed.
+ if not cond_stacked:
+ (not_all_done, new_indices,
+ new_inputs, new_output_tas) = self._process_cond_unstacked(
+ conditions, indices, inputs, output_tas)
+ else:
+ (not_all_done, new_indices,
+ new_inputs, new_output_tas) = self._process_cond_stacked(
+ conditions, indices, inputs, inputs_stacked, output_tas)
+
+ # Convert body
+ with ops.name_scope("while_body"):
+ # Compute the outputs from the body.
+ new_outputs = self._process_body(pfor_input, inputs_stacked,
+ new_indices, cond_stacked, new_inputs,
+ not_all_done)
+
+ # Note that the first num_outputs new values of inputs are computed using
+ # the body. Rest of them were direct Enters into the condition/body and
+ # the partitioning done earlier is sufficient to give the new value.
+ num_outputs = len(self._outputs)
+ new_args = ([not_all_done, new_indices] + new_outputs + list(
+ new_inputs[num_outputs:]) + new_output_tas)
+ return tuple(new_args)
+
+ while_outputs = control_flow_ops.while_loop(
+ cond, body, init_values, shape_invariants=shape_invariants)
+ output_tas = while_outputs[-len(self._outputs):]
+ outputs = []
+ assert cond_is_stacked[0] is not None
+ for inp_stacked, ta in zip(inputs_stacked, output_tas):
+ if cond_is_stacked[0]:
+ outputs.append(wrap(ta.stack(), True))
+ else:
+ # Note that if while_loop condition is unstacked, all iterations exit at
+ # the same time and we wrote those outputs in index 0 of the tensor
+ # array.
+ outputs.append(wrap(ta.read(0), inp_stacked))
+ return outputs
+
+
+class _PforInput(object):
+ """Input object passed to registered pfor converters."""
+
+ def __init__(self, pfor, op, inputs):
+ """Creates a _PforInput object.
+
+ Args:
+ pfor: PFor converter object.
+ op: the Operation object that is being converted.
+ inputs: list of WrappedTensor objects representing converted values of the
+ inputs of `op`.
+ """
+ self.pfor = pfor
+ self._op = op
+ self._inputs = inputs
+
+ def stack_inputs(self, stack_indices=None):
+ """Stacks unstacked inputs at `stack_indices`.
+
+ Args:
+ stack_indices: indices of inputs at which stacking is done. If None,
+ stacking is done at all indices.
+ """
+ if stack_indices is None:
+ stack_indices = range(len(self._inputs))
+ length = self.pfor.loop_len_vector
+ for i in stack_indices:
+ inp = self._inputs[i]
+ if not inp.is_stacked:
+ self._inputs[i] = _stack(inp.t, length)
+
+ def expanddim_inputs_for_broadcast(self):
+ """Reshapes stacked inputs to prepare them for broadcast.
+
+ Since stacked inputs have an extra leading dimension, automatic broadcasting
+ rules could incorrectly try to expand dimensions before that leading
+ dimension. To avoid that, we reshape these stacked inputs to the maximum
+ rank they will need to be broadcasted to.
+ """
+ if not self._inputs:
+ return
+
+ # Find max rank
+ def _get_rank(x):
+ rank = array_ops.rank(x.t)
+ if not x.is_stacked:
+ rank += 1
+ return rank
+
+ ranks = [_get_rank(x) for x in self._inputs]
+ max_rank = ranks[0]
+ for rank in ranks[1:]:
+ max_rank = math_ops.maximum(rank, max_rank)
+
+ for i, inp in enumerate(self._inputs):
+ if inp.is_stacked:
+ shape = array_ops.shape(inp.t)
+ rank_diff = array_ops.reshape(max_rank - ranks[i], [1])
+ ones = array_ops.tile([1], rank_diff)
+ new_shape = array_ops.concat([shape[:1], ones, shape[1:]], axis=0)
+ self._inputs[i] = wrap(array_ops.reshape(inp.t, new_shape), True)
+
+ @property
+ def inputs(self):
+ return self._inputs
+
+ @property
+ def num_inputs(self):
+ return len(self._inputs)
+
+ def input(self, index):
+ assert len(self._inputs) > index, (index, self._inputs)
+ return self._inputs[index]
+
+ def stacked_input(self, index):
+ t, is_stacked, _ = self.input(index)
+ if not is_stacked:
+ op_type = self.op_type
+ op_def = getattr(self._op, "op_def", None)
+ if op_def is None:
+ input_name = "at index %d" % index
+ else:
+ input_name = "\"%s\"" % op_def.input_arg[index].name
+ raise ValueError("Input %s of op \"%s\" expected to be not loop invariant"
+ ".\nError while converting op %s"
+ "with converted inputs\n%s" % (input_name, op_type,
+ self._op, self.inputs))
+ return t
+
+ def unstacked_input(self, index):
+ t, is_stacked, _ = self.input(index)
+ if is_stacked:
+ op_type = self.op_type
+ op_def = getattr(self._op, "op_def", None)
+ if op_def is None:
+ input_name = "at index %d" % index
+ else:
+ input_name = "\"%s\"" % op_def.input_arg[index].name
+ raise ValueError("Input %s of op \"%s\" expected to be loop invariant"
+ ".\nError while converting op %s"
+ "with converted inputs\n%s" % (input_name, op_type,
+ self._op, self.inputs))
+ return t
+
+ @property
+ def op(self):
+ return self._op
+
+ @property
+ def op_type(self):
+ return self._op.type
+
+ def get_attr(self, attr):
+ return self._op.get_attr(attr)
+
+ @property
+ def outputs(self):
+ return self._op.outputs
+
+ def output(self, index):
+ assert index < len(self._op.outputs)
+ return self._op.outputs[index]
+
+
+_pfor_converter_registry = {}
+
+
+class RegisterPFor(object):
+ """Utility to register converters for pfor.
+
+ Usage:
+ @RegisterPFor(foo_op_type)
+ def _foo_converter(pfor_input):
+ ...
+
+ The above will register conversion function `_foo_converter` for handling
+ conversion of `foo_op_type`. During conversion, the registered functin will be
+ called with a single argument of type `PForInput` which will contain state
+ needed for the conversion. This registered function should output a list of
+ WrappedTensor object with the same length as the number of outputs of op being
+ converted. If the op had zero outputs, then it should return a ops.Operation
+ object.
+ """
+
+ def __init__(self, op_type):
+ """Creates an object to register a converter for op with type `op_type`."""
+ self.op_type = op_type
+
+ def __call__(self, converter):
+ name = self.op_type
+ assert name not in _pfor_converter_registry, "Re-registering %s " % name
+ _pfor_converter_registry[name] = converter
+ return converter
+
+
+class RegisterPForWithArgs(RegisterPFor):
+ """Utility to register converters for pfor.
+
+ Usage:
+ @RegisteRPFor(foo_op_type, foo=value, ....)
+ def _foo_converter(pfor_input, foo=None, ....):
+ ...
+
+ See RegisterPFor for details on the conversion function.
+ `RegisterPForWithArgs` allows binding extra arguments to the
+ conversion function at registration time.
+ """
+
+ def __init__(self, op_type, *args, **kw_args):
+ super(RegisterPForWithArgs, self).__init__(op_type)
+ self._args = args
+ self._kw_args = kw_args
+
+ def __call__(self, converter):
+
+ def _f(pfor_input):
+ return converter(pfor_input, self.op_type, *self._args, **self._kw_args)
+
+ super(RegisterPForWithArgs, self).__call__(_f)
+ return converter
+
+
+def _create_op(op_type, inputs, op_dtypes, attrs=None):
+ """Utility to create an op."""
+ return ops.get_default_graph().create_op(
+ op_type, inputs, op_dtypes, attrs=attrs, compute_device=True)
+
+
+WrappedTensor = collections.namedtuple("WrappedTensor",
+ ["t", "is_stacked", "is_sparse_stacked"])
+"""Wrapper around the result of a Tensor conversion.
+
+The additional fields are useful for keeping track of the conversion state as
+data flows through the ops in the loop body. For every op whose output is a
+Tensor, its converter should return either a WrappedTensor or a list of
+WrappedTensors.
+
+Args:
+ t: The converted tensor
+ is_stacked: True if the tensor is stacked, i.e. represents the results of all
+ the iterations of the loop, where each row i of the tensor corresponds to
+ that op's output on iteration i of the loop. False if the tensor is not
+ stacked, i.e. represents the result of the op on of a single iteration of
+ the loop, where the result does not vary between iterations.
+ is_sparse_stacked: True if the tensor corresponds to a component tensor
+ (indices, values, or dense_shape) of a sparse tensor, and has been logically
+ stacked via a sparse conversion.
+"""
+
+
+def wrap(tensor, is_stacked=True, is_sparse_stacked=False):
+ """Helper to create a WrappedTensor object."""
+ assert isinstance(is_stacked, bool)
+ assert isinstance(is_sparse_stacked, bool)
+ assert isinstance(tensor, ops.Tensor)
+ assert not is_sparse_stacked or is_stacked, ("If the wrapped tensor is "
+ "stacked via a sparse "
+ "conversion, it must also be "
+ "stacked.")
+ return WrappedTensor(tensor, is_stacked, is_sparse_stacked)
+
+
+def _fallback_converter(pfor_input):
+ logging.warn("Using a while_loop for converting %s", pfor_input.op_type)
+ output_dtypes = [x.dtype for x in pfor_input.outputs]
+ iters = pfor_input.pfor.loop_len_vector[0]
+
+ def while_body(i, *ta_list):
+ """Body of while loop."""
+ inputs = [
+ x[i, ...] if stacked else x for x, stacked, _ in pfor_input.inputs
+ ]
+ op_outputs = _create_op(
+ pfor_input.op_type,
+ inputs,
+ output_dtypes,
+ attrs=pfor_input.op.node_def.attr).outputs
+
+ outputs = []
+ for out, ta in zip(op_outputs, ta_list):
+ assert isinstance(out, ops.Tensor)
+ outputs.append(ta.write(i, array_ops.expand_dims(out, 0)))
+ return tuple([i + 1] + outputs)
+
+ ta_list = control_flow_ops.while_loop(
+ lambda i, *ta: i < iters, while_body, [0] + [
+ tensor_array_ops.TensorArray(dtype, iters) for dtype in output_dtypes
+ ])[1:]
+ return tuple([wrap(ta.concat(), True) for ta in ta_list])
+
+
+class PFor(object):
+ """Implementation of rewrite of parallel-for loops.
+
+ This class takes a DAG or a set of DAGs representing the body of a
+ parallel-for loop, and adds new operations to the graph that implements
+ functionality equivalent to running that loop body for a specified number of
+ iterations. This new set of nodes may or may not use a tensorflow loop
+ construct.
+
+ The process of conversion does not delete or change any existing operations.
+ It only adds operations that efficiently implement the equivalent
+ functionality. We refer to the added ops as "converted ops".
+
+ The conversion process uses a simple greedy heuristic. It walks the loop body
+ and tries to express the functionality of running each node in a loop with a
+ new set of nodes. When converting an op several cases are possible:
+ - The op is not inside the loop body. Hence it can be used as is.
+ - The op does not depend on the iteration number and is stateless. In this
+ case, it can be used as is.
+ - The op is not stateful, and depends on iteration number only through control
+ dependencies. In this case, we can create a single op with same inputs and
+ attributes, but with "converted" control dependencies.
+ - The op is not stateful, and all its inputs are loop invariant. In this
+ case, similar to above, we can create a single op with same inputs and
+ attributes, but with "converted" control dependencies.
+ - The op is stateful or at least one of the inputs is not loop invariant. In
+ this case, we run the registered converter for that op to create a set of
+ converted ops. All nodes in the set will have converted control dependencies
+ corresponding to control dependencies of the original op. If the op returned
+ multiple outputs, "converted outputs" could be produced by different ops in
+ this set.
+ """
+
+ def __init__(self,
+ loop_var,
+ loop_len,
+ pfor_ops,
+ all_indices=None,
+ all_indices_partitioned=False):
+ """Creates an object to rewrite a parallel-for loop.
+
+ Args:
+ loop_var: ops.Tensor output of a Placeholder operation. The value should
+ be an int32 scalar representing the loop iteration number.
+ loop_len: A scalar or scalar Tensor representing the number of iterations
+ the loop is run for.
+ pfor_ops: List of all ops inside the loop body.
+ all_indices: If not None, an int32 vector with size `loop_len`
+ representing the iteration ids that are still active. These values
+ should be unique and sorted. However they may not be contiguous. This is
+ typically the case when inside a control flow construct which has
+ partitioned the indices of the iterations that are being converted.
+ all_indices_partitioned: If True, this object is being constructed from a
+ control flow construct where not all the pfor iterations are guaranteed
+ to be active.
+ """
+ assert isinstance(loop_var, ops.Tensor)
+ assert loop_var.op.type == "Placeholder"
+ self._loop_var = loop_var
+ loop_len_value = tensor_util.constant_value(loop_len)
+ if loop_len_value is not None:
+ loop_len = loop_len_value
+ self._loop_len_vector = array_ops.reshape(loop_len, [1])
+ self._all_indices_partitioned = all_indices_partitioned
+ if all_indices_partitioned:
+ assert all_indices is not None
+ self.all_indices = (
+ math_ops.range(loop_len) if all_indices is None else all_indices)
+
+ self._conversion_map = {}
+ self._conversion_map[loop_var] = wrap(self.all_indices, True)
+ self._pfor_ops = set(pfor_ops)
+ self._pfor_op_ids = set([x._id for x in pfor_ops])
+
+ def op_is_inside_loop(self, op):
+ """True if op was created inside the pfor loop body."""
+ assert isinstance(op, ops.Operation)
+ # Note that we use self._pfor_op_ids for the check and not self._pfor_ops
+ # since it appears there tensorflow API could return different python
+ # objects representing the same Operation node.
+ return op._id in self._pfor_op_ids
+
+ def _convert_sparse(self, y):
+ """Returns the converted value corresponding to SparseTensor y.
+
+ For SparseTensors, instead of stacking the component tensors separately,
+ resulting in component tensors with shapes (N, m, rank), (N, m), and (N,
+ rank) respectively for indices, values, and dense_shape (where N is the loop
+ length and m is the number of sparse tensor values per loop iter), we want
+ to logically stack the SparseTensors, to create a SparseTensor whose
+ components are size (N * m, rank + 1), (N * m, ), and (rank + 1,)
+ respectively.
+
+ Here, we try to get the conversion of each component tensor.
+ If the tensors are stacked via a sparse conversion, return the resulting
+ SparseTensor composed of the converted components. Otherwise, the component
+ tensors are either unstacked or stacked naively. In the latter case, we
+ unstack the component tensors to reform loop_len SparseTensor elements,
+ then correctly batch them.
+
+ The unstacked tensors must have the same rank. Each dimension of each
+ SparseTensor will expand to be the largest among all SparseTensor elements
+ for that dimension. For example, if there are N SparseTensors of rank 3
+ being stacked, with N dense shapes, where the i_th shape is (x_i, y_i, z_i),
+ the new dense shape will be (N, max_i(x_i), max_i(y_i), max_i(z_i)).
+
+ Args:
+ y: A tf.SparseTensor.
+
+ Returns:
+ A tf.SparseTensor that is the converted value corresponding to y.
+ """
+ outputs = [
+ self._convert_helper(t) for t in (y.indices, y.values, y.dense_shape)
+ ]
+ assert all(isinstance(o, WrappedTensor) for o in outputs)
+
+ if all(w.is_sparse_stacked for w in outputs):
+ return sparse_tensor.SparseTensor(*[w.t for w in outputs])
+
+ assert not any(w.is_sparse_stacked for w in outputs), (
+ "Error converting SparseTensor. All components should be logically "
+ "stacked, or none.")
+
+ # If component tensors were not sparsely stacked, they are either unstacked
+ # or stacked without knowledge that they are components of sparse tensors.
+ # In this case, we have to restack them.
+ return self._restack_sparse_tensor_logically(
+ *[self._unwrap_or_tile(w) for w in outputs])
+
+ def _restack_sparse_tensor_logically(self, indices, values, shape):
+ sparse_tensor_rank = indices.get_shape()[-1].value
+ if sparse_tensor_rank is not None:
+ sparse_tensor_rank += 1
+
+ def map_fn(args):
+ res = gen_sparse_ops.serialize_sparse(
+ args[0], args[1], args[2], out_type=dtypes.variant)
+ return res
+
+ # Applies a map function to the component tensors to serialize each
+ # sparse tensor element and batch them all, then deserializes the batch.
+ # TODO(rachelim): Try to do this without map_fn -- add the right offsets
+ # to shape and indices tensors instead.
+ result = functional_ops.map_fn(
+ map_fn, [indices, values, shape], dtype=dtypes.variant)
+ return sparse_ops.deserialize_sparse(
+ result, dtype=values.dtype, rank=sparse_tensor_rank)
+
+ def _unwrap_or_tile(self, wrapped_tensor):
+ """Given a wrapped tensor, unwrap if stacked. Otherwise, tiles it."""
+ output, is_stacked = wrapped_tensor.t, wrapped_tensor.is_stacked
+ if is_stacked:
+ return output
+ else:
+ return _stack(output, self._loop_len_vector).t
+
+ def convert(self, y):
+ """Returns the converted value corresponding to y.
+
+ Args:
+ y: A ops.Tensor or a ops.Operation object. If latter, y should not have
+ any outputs.
+
+ Returns:
+ If y does not need to be converted, it returns y as is. Else it returns
+ the "converted value" corresponding to y.
+ """
+ if isinstance(y, sparse_tensor.SparseTensor):
+ return self._convert_sparse(y)
+ output = self._convert_helper(y)
+ if isinstance(output, WrappedTensor):
+ assert isinstance(y, ops.Tensor)
+ return self._unwrap_or_tile(output)
+ else:
+ assert isinstance(y, ops.Operation)
+ assert not y.outputs
+ assert isinstance(output, ops.Operation)
+ return output
+
+ def _was_converted(self, t):
+ """True if t is not a conversion of itself."""
+ converted_t = self._conversion_map[t]
+ return converted_t.t is not t
+
+ def _add_conversion(self, old_output, new_output):
+ self._conversion_map[old_output] = new_output
+
+ def _convert_helper(self, op_or_tensor):
+ stack = [op_or_tensor]
+ while stack:
+ y = stack[0]
+ if y in self._conversion_map:
+ assert isinstance(self._conversion_map[y],
+ (WrappedTensor, ops.Operation))
+ stack.pop(0)
+ continue
+ if isinstance(y, ops.Operation):
+ assert not y.outputs, (
+ "We only support converting Operation objects with no outputs. "
+ "Got %s", y)
+ y_op = y
+ else:
+ assert isinstance(y, ops.Tensor), y
+ y_op = y.op
+
+ is_while_loop = y_op.type == "Exit"
+ if is_while_loop:
+ while_op = WhileOp(y, pfor_ops=self._pfor_ops)
+ is_inside_loop = while_op.is_inside_loop
+ # If all nodes in the while_loop graph were created inside the pfor, we
+ # treat the whole loop subgraph as a single op (y_op) and try to convert
+ # it. For while_loops that are created completely or partially outside,
+ # we treat them as external and should be able to simply return the Exit
+ # node output as is without needing any conversion. Note that for
+ # while_loops that are partially constructed inside, we assume they will
+ # be loop invariant. If that is not the case, it will create runtime
+ # errors since the converted graph would depend on the self._loop_var
+ # placeholder.
+ if is_inside_loop:
+ y_op = while_op
+ else:
+ is_inside_loop = self.op_is_inside_loop(y_op)
+
+ # If this op was not created inside the loop body, we will return as is.
+ # 1. Convert inputs and control inputs.
+
+ def _add_to_stack(x):
+ if x not in self._conversion_map:
+ stack.insert(0, x)
+ return True
+ else:
+ return False
+
+ if is_inside_loop:
+ added_to_stack = False
+ for inp in y_op.inputs:
+ added_to_stack |= _add_to_stack(inp)
+ for cinp in y_op.control_inputs:
+ if cinp.outputs:
+ for t in cinp.outputs:
+ added_to_stack |= _add_to_stack(t)
+ else:
+ added_to_stack |= _add_to_stack(cinp)
+ if added_to_stack:
+ continue
+
+ converted_inputs = [self._conversion_map[inp] for inp in y_op.inputs]
+ some_input_converted = any(
+ [self._was_converted(x) for x in y_op.inputs])
+ some_input_stacked = any([x.is_stacked for x in converted_inputs])
+
+ converted_control_ops = set()
+ some_control_input_converted = False
+ for cinp in y_op.control_inputs:
+ if cinp.outputs:
+ for t in cinp.outputs:
+ converted_t = self._conversion_map[t]
+ if self._was_converted(t):
+ some_control_input_converted = True
+ converted_control_ops.add(converted_t.t.op)
+ else:
+ converted_cinp = self._conversion_map[cinp]
+ assert isinstance(converted_cinp, ops.Operation)
+ if converted_cinp != cinp:
+ some_control_input_converted = True
+ converted_control_ops.add(converted_cinp)
+ converted_control_ops = list(converted_control_ops)
+ is_stateful = _is_stateful_pfor_op(y_op)
+ else:
+ converted_inputs = []
+ converted_control_ops = []
+ logging.vlog(3, "converting op:%s\ninputs:%s\ncontrol_inputs:%s", y_op,
+ converted_inputs, converted_control_ops)
+
+ # 2. Convert y_op
+ # If converting a while_loop, we let the while_loop convertor deal with
+ # putting the control dependencies appropriately.
+ control_dependencies = [] if is_while_loop else converted_control_ops
+ with ops.control_dependencies(control_dependencies), ops.name_scope(
+ y_op.name + "/pfor/"):
+ # None of the inputs and control inputs were converted.
+ if (not is_inside_loop or
+ (not is_stateful and not some_input_converted and
+ not some_control_input_converted)):
+ if y == y_op:
+ assert not isinstance(y_op, WhileOp)
+ new_outputs = y_op
+ else:
+ new_outputs = [wrap(x, False) for x in y_op.outputs]
+ elif not (is_stateful or is_while_loop or some_input_stacked):
+ # All inputs are unstacked or uncoverted but some control inputs are
+ # converted.
+ # TODO(rachelim): Handle the case where some inputs are sparsely
+ # stacked (i.e. any([x.is_sparse_stacked for x in converted_inputs]))
+ new_op = _create_op(y_op.type, [x.t for x in converted_inputs],
+ [x.dtype for x in y_op.outputs],
+ y_op.node_def.attr)
+ if y == y_op:
+ new_outputs = new_op
+ else:
+ new_outputs = [wrap(x, False) for x in new_op.outputs]
+ else:
+ # Either some inputs are not loop invariant or op is stateful.
+ if hasattr(y_op, "pfor_converter"):
+ converter = y_op.pfor_converter
+ else:
+ converter = _pfor_converter_registry.get(y_op.type, None)
+ if converter is None:
+ if flags.FLAGS.op_conversion_fallback_to_while_loop:
+ converter = _fallback_converter
+ else:
+ raise ValueError(
+ "No converter defined for %s\n%s\ninputs: %s. "
+ "\nEither add a converter or set "
+ "--op_conversion_fallback_to_while_loop=True, "
+ "which may run slower" % (y_op.type, y_op, converted_inputs))
+ # TODO(rachelim): Handle the case where some inputs are sparsely
+ # stacked. We should only call the converter if it supports handling
+ # those inputs.
+ new_outputs = converter(_PforInput(self, y_op, converted_inputs))
+ if isinstance(new_outputs, WrappedTensor):
+ new_outputs = [new_outputs]
+ assert isinstance(new_outputs,
+ (list, tuple, ops.Operation)), new_outputs
+ logging.vlog(2, "converted %s %s", y_op, new_outputs)
+
+ # Insert into self._conversion_map
+ if y == y_op:
+ assert isinstance(new_outputs, ops.Operation)
+ self._add_conversion(y_op, new_outputs)
+ else:
+ for old_output, new_output in zip(y_op.outputs, new_outputs):
+ assert isinstance(new_output, WrappedTensor), (new_output, y, y_op)
+ self._add_conversion(old_output, new_output)
+ stack.pop(0)
+
+ return self._conversion_map[op_or_tensor]
+
+ @property
+ def loop_len_vector(self):
+ """Returns a single element vector whose value is number of iterations."""
+ return self._loop_len_vector
+
+ @property
+ def loop_var(self):
+ """Returns placeholder loop variable."""
+ return self._loop_var
+
+ @property
+ def pfor_ops(self):
+ return self._pfor_ops
+
+ @property
+ def all_indices_partitioned(self):
+ """all_indices_partitioned property.
+
+ Returns:
+ True if we are inside a control flow construct and not all pfor iterations
+ may be active.
+ """
+ return self._all_indices_partitioned
+
+# nn_ops
+
+
+def _flatten_first_two_dims(x):
+ """Merges first two dimensions."""
+ old_shape = array_ops.shape(x)
+ new_shape = array_ops.concat([[-1], old_shape[2:]], axis=0)
+ return array_ops.reshape(x, new_shape)
+
+
+def _unflatten_first_dim(x, first_dim):
+ """Splits first dimension into [first_dim, -1]."""
+ old_shape = array_ops.shape(x)
+ new_shape = array_ops.concat([first_dim, [-1], old_shape[1:]], axis=0)
+ return array_ops.reshape(x, new_shape)
+
+
+def _inputs_with_flattening(pfor_input, input_indices):
+ """Stacks and flattens first dim of inputs at indices `input_indices`."""
+ if input_indices is None:
+ input_indices = []
+ pfor_input.stack_inputs(stack_indices=input_indices)
+ inputs = []
+ for i in range(pfor_input.num_inputs):
+ if i in input_indices:
+ inp = pfor_input.stacked_input(i)
+ inp = _flatten_first_two_dims(inp)
+ else:
+ inp = pfor_input.unstacked_input(i)
+ inputs.append(inp)
+ return inputs
+
+
+@RegisterPForWithArgs("Conv2D", dims=[0])
+@RegisterPForWithArgs("AvgPool", dims=[0])
+@RegisterPForWithArgs("MaxPool", dims=[0])
+@RegisterPForWithArgs("MaxPoolGrad", dims=[0, 1, 2])
+@RegisterPForWithArgs("SoftmaxCrossEntropyWithLogits", dims=[0, 1])
+def _convert_flatten_batch(pfor_input, op_type, dims):
+ del op_type
+ inputs = _inputs_with_flattening(pfor_input, dims)
+ outputs = _create_op(
+ pfor_input.op_type,
+ inputs, [x.dtype for x in pfor_input.outputs],
+ attrs=pfor_input.op.node_def.attr).outputs
+ n = pfor_input.pfor.loop_len_vector
+ outputs = [_unflatten_first_dim(x, n) for x in outputs]
+ return [wrap(x, True) for x in outputs]
+
+
+_channel_flatten_input_cache = {}
+
+
+def _channel_flatten_input(x, data_format):
+ """Merge the stack dimension with the channel dimension.
+
+ If S is pfor's stacking dimension, then,
+ - for SNCHW, we transpose to NSCHW. If N dimension has size 1, the transpose
+ should be cheap.
+ - for SNHWC, we transpose to NHWCS.
+ We then merge the S and C dimension.
+
+ Args:
+ x: ops.Tensor to transform.
+ data_format: "NCHW" or "NHWC".
+
+ Returns:
+ A 3-element tuple with the transformed value, along with the shape for
+ reshape and order for transpose required to transform back.
+ """
+
+ graph = ops.get_default_graph()
+ cache_key = (graph, x, data_format)
+ if cache_key not in _channel_flatten_input_cache:
+ x_shape = array_ops.shape(x)
+ if data_format == b"NCHW":
+ order = [1, 0, 2, 3, 4]
+ shape = array_ops.concat([x_shape[1:2], [-1], x_shape[3:]], axis=0)
+ reverse_order = order
+ else:
+ order = [1, 2, 3, 0, 4]
+ shape = array_ops.concat([x_shape[1:4], [-1]], axis=0)
+ reverse_order = [3, 0, 1, 2, 4]
+ # Move S dimension next to C dimension.
+ x = array_ops.transpose(x, order)
+ reverse_shape = array_ops.shape(x)
+ # Reshape to merge the S and C dimension.
+ x = array_ops.reshape(x, shape)
+ outputs = x, reverse_order, reverse_shape
+ _channel_flatten_input_cache[cache_key] = outputs
+ else:
+ outputs = _channel_flatten_input_cache[cache_key]
+ return outputs
+
+
+# Note that with training=True, running FusedBatchNorm on individual examples
+# is very different from running FusedBatchNorm on a batch of those examples.
+# This is because, for the latter case, the operation can be considered as first
+# computing the mean and variance over all the examples and then using these
+# to scale all those examples. This creates a data dependency between these
+# different "iterations" since the inputs to the scaling step depends on the
+# statistics coming from all these inputs.
+# As with other kernels, the conversion here effectively runs the kernel
+# independently for each iteration, and returns outputs by stacking outputs from
+# each of those iterations.
+@RegisterPFor("FusedBatchNorm")
+def _convert_fused_batch_norm(pfor_input):
+ is_training = pfor_input.get_attr("is_training")
+ # When BatchNorm is used with training=False, mean and variance are provided
+ # externally and used as is by the op. Thus, we can merge the S and N
+ # dimensions as we do for regular operations.
+ # When BatchNorm is used with training=True, mean and variance are computed
+ # for each channel across the batch dimension (first one). If we merge S and N
+ # dimensions, mean and variances will be computed over a larger set. So, we
+ # merge the S and C dimensions instead.
+ if not is_training:
+ # We return zeros for batch_mean and batch_variance output. Note that CPU
+ # and GPU seem to have different behavior for those two outputs. CPU outputs
+ # zero because these values are not used during inference. GPU outputs
+ # something, probably real means and variances.
+ inputs = _inputs_with_flattening(pfor_input, [0])
+ outputs = _create_op(
+ pfor_input.op_type,
+ inputs, [x.dtype for x in pfor_input.outputs],
+ attrs=pfor_input.op.node_def.attr).outputs
+ y = outputs[0]
+ n = pfor_input.pfor.loop_len_vector
+ y = _unflatten_first_dim(y, n)
+ mean = pfor_input.unstacked_input(3)
+ zeros = array_ops.zeros_like(mean)
+ return [wrap(y, True), wrap(zeros, False), wrap(zeros, False)]
+
+ pfor_input.stack_inputs()
+ data_format = pfor_input.get_attr("data_format")
+ # We merge the first dimension with the "C" dimension, run FusedBatchNorm, and
+ # then transpose back.
+ x = pfor_input.stacked_input(0)
+ x, reverse_order, reverse_shape = _channel_flatten_input(x, data_format)
+ # Note that we stack all the other inputs as well so that they are the same
+ # size as the new size of the channel dimension.
+ inputs = [x] + [
+ array_ops.reshape(pfor_input.stacked_input(i), [-1])
+ for i in range(1, pfor_input.num_inputs)
+ ]
+ outputs = _create_op(
+ pfor_input.op_type,
+ inputs, [x.dtype for x in pfor_input.outputs],
+ attrs=pfor_input.op.node_def.attr).outputs
+ y = outputs[0]
+ y = array_ops.reshape(y, reverse_shape)
+ y = array_ops.transpose(y, reverse_order)
+ n = pfor_input.pfor.loop_len_vector
+ outputs = [_unflatten_first_dim(x, n) for x in outputs[1:]]
+ outputs = [y] + outputs
+ return [wrap(x, True) for x in outputs]
+
+
+@RegisterPFor("FusedBatchNormGrad")
+def _convert_fused_batch_norm_grad(pfor_input):
+ pfor_input.stack_inputs()
+ data_format = pfor_input.get_attr("data_format")
+ y_backprop = pfor_input.stacked_input(0)
+ y_backprop, _, _ = _channel_flatten_input(y_backprop, data_format)
+ x = pfor_input.stacked_input(1)
+ x, x_reverse_order, x_reverse_shape = _channel_flatten_input(x, data_format)
+ inputs = [y_backprop, x] + [
+ array_ops.reshape(pfor_input.stacked_input(i), [-1])
+ for i in range(2, pfor_input.num_inputs)
+ ]
+ outputs = _create_op(
+ pfor_input.op_type,
+ inputs, [x.dtype for x in pfor_input.outputs],
+ attrs=pfor_input.op.node_def.attr).outputs
+ x_backprop = outputs[0]
+ x_backprop = array_ops.reshape(x_backprop, x_reverse_shape)
+ x_backprop = array_ops.transpose(x_backprop, x_reverse_order)
+ n = pfor_input.pfor.loop_len_vector
+ outputs = [_unflatten_first_dim(x, n) for x in outputs[1:]]
+ outputs = [x_backprop] + outputs
+ return [wrap(output, True) for output in outputs]
+
+
+@RegisterPForWithArgs("Conv2DBackpropInput", flatten_dims=[2], shape_dim=0)
+@RegisterPForWithArgs("AvgPoolGrad", flatten_dims=[1], shape_dim=0)
+def _convert_flatten_batch_shape_input(pfor_input, op_type, flatten_dims,
+ shape_dim):
+ del op_type
+ inputs = _inputs_with_flattening(pfor_input, flatten_dims)
+ n = pfor_input.pfor.loop_len_vector
+ # Adjust the `input_sizes` input.
+ ones = array_ops.ones(
+ [array_ops.shape(inputs[shape_dim])[0] - 1], dtype=n.dtype)
+ inputs[shape_dim] *= array_ops.concat([n, ones], axis=0)
+ outputs = _create_op(
+ pfor_input.op_type,
+ inputs, [x.dtype for x in pfor_input.outputs],
+ attrs=pfor_input.op.node_def.attr).outputs
+ outputs = [_unflatten_first_dim(x, n) for x in outputs]
+ return [wrap(x, True) for x in outputs]
+
+
+@RegisterPFor("Conv2DBackpropFilter")
+def _convert_conv2d_backprop_filter(pfor_input):
+ pfor_input.stack_inputs(stack_indices=[2])
+ inputs, inputs_stacked, _ = pfor_input.input(0)
+ filter_sizes = pfor_input.unstacked_input(1)
+ grads = pfor_input.stacked_input(2)
+ strides = pfor_input.get_attr("strides")
+ padding = pfor_input.get_attr("padding")
+ use_cudnn_on_gpu = pfor_input.get_attr("use_cudnn_on_gpu")
+ data_format = pfor_input.get_attr("data_format")
+ dilations = pfor_input.get_attr("dilations")
+ if inputs_stacked:
+ # TODO(agarwal): Implement this efficiently.
+ logging.warn("Conv2DBackpropFilter uses a while_loop. Fix that!")
+
+ def while_body(i, ta):
+ inp_i = inputs[i, ...]
+ grad_i = grads[i, ...]
+ output = nn_ops.conv2d_backprop_filter(
+ inp_i,
+ filter_sizes,
+ grad_i,
+ strides=strides,
+ padding=padding,
+ use_cudnn_on_gpu=use_cudnn_on_gpu,
+ data_format=data_format,
+ dilations=dilations)
+ return i + 1, ta.write(i, array_ops.expand_dims(output, 0))
+
+ n = array_ops.reshape(pfor_input.pfor.loop_len_vector, [])
+ _, ta = control_flow_ops.while_loop(
+ lambda i, ta: i < n, while_body,
+ (0, tensor_array_ops.TensorArray(inputs.dtype, n)))
+ output = ta.concat()
+ return wrap(output, True)
+ else:
+ # We merge the stack dimension with the channel dimension of the gradients
+ # and pretend we had a larger filter (see change to filter_sizes below).
+ # Once the filter backprop is computed, we reshape and transpose back
+ # appropriately.
+ grads, _, _ = _channel_flatten_input(grads, data_format)
+ n = pfor_input.pfor.loop_len_vector
+ old_filter_sizes = filter_sizes
+ filter_sizes *= array_ops.concat([[1, 1, 1], n], axis=0)
+ output = nn_ops.conv2d_backprop_filter(
+ inputs,
+ filter_sizes,
+ grads,
+ strides=strides,
+ padding=padding,
+ use_cudnn_on_gpu=use_cudnn_on_gpu,
+ data_format=data_format,
+ dilations=dilations)
+ new_filter_shape = array_ops.concat([old_filter_sizes[:3], n, [-1]], axis=0)
+ output = array_ops.reshape(output, new_filter_shape)
+ output = array_ops.transpose(output, [3, 0, 1, 2, 4])
+ return wrap(output, True)
+
+
+# array_ops
+
+
+@RegisterPForWithArgs("Identity", array_ops.identity)
+@RegisterPForWithArgs("StopGradient", array_ops.stop_gradient)
+def _convert_identity(pfor_input, op_type, op_func):
+ del op_type
+ return wrap(op_func(*[x.t for x in pfor_input.inputs]), True)
+
+
+@RegisterPFor("Reshape")
+def _convert_reshape(pfor_input):
+ t = pfor_input.stacked_input(0)
+ shape = pfor_input.unstacked_input(1)
+ new_dim = array_ops.shape(t)[:1]
+ new_shape = array_ops.concat([new_dim, shape], axis=0)
+ return wrap(array_ops.reshape(t, new_shape), True)
+
+
+@RegisterPFor("ExpandDims")
+def _convert_expanddims(pfor_input):
+ t = pfor_input.stacked_input(0)
+ dim = pfor_input.unstacked_input(1)
+ dim += math_ops.cast(dim >= 0, dtypes.int32)
+ return wrap(array_ops.expand_dims(t, axis=dim), True)
+
+
+@RegisterPFor("Slice")
+def _convert_slice(pfor_input):
+ t = pfor_input.stacked_input(0)
+ begin = pfor_input.unstacked_input(1)
+ size = pfor_input.unstacked_input(2)
+ begin = array_ops.concat([[0], begin], axis=0)
+ size = array_ops.concat([[-1], size], axis=0)
+ return wrap(array_ops.slice(t, begin, size), True)
+
+
+@RegisterPFor("Tile")
+def _convert_tile(pfor_input):
+ t = pfor_input.stacked_input(0)
+ multiples = pfor_input.unstacked_input(1)
+ multiples = array_ops.concat([[1], multiples], 0)
+ return wrap(array_ops.tile(t, multiples), True)
+
+
+@RegisterPFor("Pack")
+def _convert_pack(pfor_input):
+ pfor_input.stack_inputs()
+ axis = pfor_input.get_attr("axis")
+ if axis >= 0:
+ axis += 1
+ return wrap(
+ array_ops.stack([x.t for x in pfor_input.inputs], axis=axis), True)
+
+
+@RegisterPFor("Unpack")
+def _convert_unpack(pfor_input):
+ value = pfor_input.stacked_input(0)
+ axis = pfor_input.get_attr("axis")
+ if axis >= 0:
+ axis += 1
+ num = pfor_input.get_attr("num")
+ return [wrap(x, True) for x in array_ops.unstack(value, axis=axis, num=num)]
+
+
+@RegisterPFor("Pad")
+def _convert_pad(pfor_input):
+ t = pfor_input.stacked_input(0)
+ paddings = pfor_input.unstacked_input(1)
+ paddings = array_ops.concat([[[0, 0]], paddings], 0)
+ return wrap(array_ops.pad(t, paddings, mode="CONSTANT"), True)
+
+
+@RegisterPFor("Split")
+def _convert_split(pfor_input):
+ split_dim = pfor_input.unstacked_input(0)
+ t = pfor_input.stacked_input(1)
+ num_split = pfor_input.get_attr("num_split")
+ split_dim += math_ops.cast(split_dim >= 0, dtypes.int32)
+ return [wrap(x, True) for x in array_ops.split(t, num_split, axis=split_dim)]
+
+
+@RegisterPFor("Transpose")
+def _convert_transpose(pfor_input):
+ t = pfor_input.stacked_input(0)
+ perm = pfor_input.unstacked_input(1)
+ new_perm = array_ops.concat([[0], perm + 1], axis=0)
+ return wrap(array_ops.transpose(t, new_perm), True)
+
+
+@RegisterPFor("ZerosLike")
+def _convert_zeroslike(pfor_input):
+ t = pfor_input.stacked_input(0)
+ shape = array_ops.shape(t)[1:]
+ return wrap(array_ops.zeros(shape, dtype=t.dtype), False)
+
+
+@RegisterPFor("Gather")
+@RegisterPFor("GatherV2")
+def _convert_gather(pfor_input):
+ param, param_stacked, _ = pfor_input.input(0)
+ indices, indices_stacked, _ = pfor_input.input(1)
+ op_type = pfor_input.op_type
+ if op_type == "Gather":
+ validate_indices = pfor_input.get_attr("validate_indices")
+ axis = 0
+ else:
+ validate_indices = None
+ axis = pfor_input.unstacked_input(2)
+ axis_value = tensor_util.constant_value(axis)
+ if axis_value is not None:
+ axis = axis_value
+ if indices_stacked and not param_stacked:
+ if indices == pfor_input.pfor.all_indices and axis == 0:
+ param_shape0 = param.shape[0].value
+ indices_shape0 = indices.shape[0].value
+ if param_shape0 is not None and indices_shape0 == param_shape0:
+ # Note that with loops and conditionals, indices may not be contiguous.
+ # However they will be sorted and unique. So if the shape matches, then
+ # it must be picking up all the rows of param.
+ return wrap(param, True)
+ # TODO(agarwal): use array_ops.slice here.
+ output = array_ops.gather(
+ param, indices, validate_indices=validate_indices, axis=axis)
+ if axis != 0:
+ axis = control_flow_ops.cond(
+ axis < 0, lambda: axis + array_ops.rank(param), lambda: axis)
+ order = array_ops.concat(
+ [[axis],
+ math_ops.range(axis),
+ math_ops.range(axis + 1, array_ops.rank(output))],
+ axis=0)
+ output = control_flow_ops.cond(
+ math_ops.equal(axis, 0), lambda: output,
+ lambda: array_ops.transpose(output, order))
+ return wrap(output, True)
+ if param_stacked:
+ loop_len_vector = pfor_input.pfor.loop_len_vector
+ pfor_input.stack_inputs(stack_indices=[1])
+ indices = pfor_input.stacked_input(1)
+ param_flat = _flatten_first_two_dims(param)
+
+ # Recompute indices to handle stacked param.
+ indices_offset = math_ops.range(
+ loop_len_vector[0]) * array_ops.shape(param)[1]
+ # Reshape indices_offset to allow broadcast addition
+ ones = array_ops.ones([array_ops.rank(indices) - 1], dtype=dtypes.int32)
+ new_shape = array_ops.concat([loop_len_vector, ones], axis=0)
+ indices_offset = array_ops.reshape(indices_offset, new_shape)
+ indices += indices_offset
+
+ # TODO(agarwal): handle axis != 0. May need to transpose param or
+ # array_ops.gather_nd.
+ if isinstance(axis, ops.Tensor):
+ axis_value = tensor_util.constant_value(axis)
+ else:
+ try:
+ axis_value = int(axis)
+ except TypeError:
+ axis_value = None
+ msg = ("Gather, where indices and param are both loop dependent, currently "
+ "requires axis=0")
+ if axis_value is not None and axis_value != 0:
+ raise ValueError("Error while converting %s. %s. Got axis=%d" %
+ (pfor_input.op, msg, axis))
+ with ops.control_dependencies(
+ [check_ops.assert_equal(axis, 0, message=msg)]):
+ output = array_ops.gather(param_flat, indices)
+ return wrap(output, True)
+
+
+@RegisterPFor("ConcatV2")
+def _convert_concatv2(pfor_input):
+ n = pfor_input.num_inputs
+ pfor_input.stack_inputs(stack_indices=range(n - 1))
+ axis = pfor_input.unstacked_input(n - 1)
+ axis += math_ops.cast(axis >= 0, axis.dtype)
+ return wrap(
+ array_ops.concat([x.t for x in pfor_input.inputs[:n - 1]], axis=axis),
+ True)
+
+
+@RegisterPFor("StridedSlice")
+def _convert_strided_slice(pfor_input):
+ inp = pfor_input.stacked_input(0)
+ begin = pfor_input.unstacked_input(1)
+ end = pfor_input.unstacked_input(2)
+ strides = pfor_input.unstacked_input(3)
+ begin_mask = pfor_input.get_attr("begin_mask")
+ end_mask = pfor_input.get_attr("end_mask")
+ ellipsis_mask = pfor_input.get_attr("ellipsis_mask")
+ new_axis_mask = pfor_input.get_attr("new_axis_mask")
+ shrink_axis_mask = pfor_input.get_attr("shrink_axis_mask")
+
+ begin = array_ops.concat([[0], begin], axis=0)
+ end = array_ops.concat([[0], end], axis=0)
+ strides = array_ops.concat([[1], strides], axis=0)
+ begin_mask = begin_mask << 1 | 1
+ end_mask = end_mask << 1 | 1
+ ellipsis_mask <<= 1
+ new_axis_mask <<= 1
+ shrink_axis_mask <<= 1
+ return wrap(
+ array_ops.strided_slice(
+ inp,
+ begin,
+ end,
+ strides,
+ begin_mask=begin_mask,
+ end_mask=end_mask,
+ ellipsis_mask=ellipsis_mask,
+ new_axis_mask=new_axis_mask,
+ shrink_axis_mask=shrink_axis_mask), True)
+
+
+@RegisterPFor("StridedSliceGrad")
+def _convert_strided_slice_grad(pfor_input):
+ shape = pfor_input.unstacked_input(0)
+ begin = pfor_input.unstacked_input(1)
+ end = pfor_input.unstacked_input(2)
+ strides = pfor_input.unstacked_input(3)
+ dy = pfor_input.stacked_input(4)
+ begin_mask = pfor_input.get_attr("begin_mask")
+ end_mask = pfor_input.get_attr("end_mask")
+ ellipsis_mask = pfor_input.get_attr("ellipsis_mask")
+ new_axis_mask = pfor_input.get_attr("new_axis_mask")
+ shrink_axis_mask = pfor_input.get_attr("shrink_axis_mask")
+
+ shape = array_ops.concat([pfor_input.pfor.loop_len_vector, shape], axis=0)
+ begin = array_ops.concat([[0], begin], axis=0)
+ end = array_ops.concat([[0], end], axis=0)
+ strides = array_ops.concat([[1], strides], axis=0)
+ begin_mask = begin_mask << 1 | 1
+ end_mask = end_mask << 1 | 1
+ ellipsis_mask <<= 1
+ new_axis_mask <<= 1
+ shrink_axis_mask <<= 1
+ return wrap(
+ array_ops.strided_slice_grad(
+ shape,
+ begin,
+ end,
+ strides,
+ dy,
+ begin_mask=begin_mask,
+ end_mask=end_mask,
+ ellipsis_mask=ellipsis_mask,
+ new_axis_mask=new_axis_mask,
+ shrink_axis_mask=shrink_axis_mask), True)
+
+
+# math_ops
+
+
+@RegisterPFor("MatMul")
+def _convert_matmul(pfor_input):
+ # TODO(agarwal): Check if tiling is faster than two transposes.
+ a, a_stacked, _ = pfor_input.input(0)
+ b, b_stacked, _ = pfor_input.input(1)
+ tr_a = pfor_input.get_attr("transpose_a")
+ tr_b = pfor_input.get_attr("transpose_b")
+ if a_stacked and b_stacked:
+ output = wrap(math_ops.matmul(a, b, adjoint_a=tr_a, adjoint_b=tr_b), True)
+ return output
+ elif a_stacked:
+ if tr_a:
+ a = array_ops.transpose(a, [0, 2, 1])
+ if a.shape.is_fully_defined():
+ x, y, z = a.shape
+ else:
+ x, y, z = [
+ array_ops.reshape(i, [])
+ for i in array_ops.split(array_ops.shape(a), 3)
+ ]
+ a = array_ops.reshape(a, [x * y, z])
+ prod = math_ops.matmul(a, b, transpose_b=tr_b)
+ return wrap(array_ops.reshape(prod, [x, y, -1]), True)
+ else:
+ assert b_stacked
+ if tr_b:
+ perm = [2, 0, 1]
+ b = array_ops.transpose(b, perm)
+ else:
+ # As an optimization, if one of the first two dimensions is 1, then we can
+ # reshape instead of transpose.
+ # TODO(agarwal): This check can be done inside Transpose kernel.
+ b_shape = array_ops.shape(b)
+ min_dim = math_ops.minimum(b_shape[0], b_shape[1])
+ perm = control_flow_ops.cond(
+ math_ops.equal(min_dim, 1), lambda: [0, 1, 2], lambda: [1, 0, 2])
+ new_shape = array_ops.stack([b_shape[1], b_shape[0], b_shape[2]])
+ b = array_ops.transpose(b, perm)
+ b = array_ops.reshape(b, new_shape)
+
+ if b.shape.is_fully_defined():
+ x, y, z = b.shape
+ else:
+ x, y, z = [
+ array_ops.reshape(i, [])
+ for i in array_ops.split(array_ops.shape(b), 3)
+ ]
+ b = array_ops.reshape(b, [x, y * z])
+ prod = math_ops.matmul(a, b, transpose_a=tr_a)
+ prod = array_ops.reshape(prod, [-1, y, z])
+ prod = array_ops.transpose(prod, [1, 0, 2])
+ return wrap(prod, True)
+
+
+@RegisterPFor("BatchMatMul")
+def _convert_batch_mat_mul(pfor_input):
+ # TODO(agarwal): There may be a more efficient way to do this instead of
+ # stacking the inputs.
+ pfor_input.stack_inputs()
+ x = pfor_input.stacked_input(0)
+ y = pfor_input.stacked_input(1)
+ adj_x = pfor_input.get_attr("adj_x")
+ adj_y = pfor_input.get_attr("adj_y")
+
+ x = _flatten_first_two_dims(x)
+ y = _flatten_first_two_dims(y)
+ output = math_ops.matmul(x, y, adjoint_a=adj_x, adjoint_b=adj_y)
+ output = _unflatten_first_dim(output, pfor_input.pfor.loop_len_vector)
+ return wrap(output, True)
+
+
+@RegisterPForWithArgs("Sum", math_ops.reduce_sum)
+@RegisterPForWithArgs("Prod", math_ops.reduce_prod)
+@RegisterPForWithArgs("Max", math_ops.reduce_max)
+@RegisterPForWithArgs("Min", math_ops.reduce_min)
+def _convert_reduction(pfor_input, _, op_func):
+ t = pfor_input.stacked_input(0)
+ indices = pfor_input.unstacked_input(1)
+ # Shift positive indices by one to account for the extra dimension.
+ indices += math_ops.cast(indices >= 0, dtypes.int32)
+ keep_dims = pfor_input.get_attr("keep_dims")
+ return wrap(op_func(t, indices, keepdims=keep_dims), True)
+
+
+@RegisterPForWithArgs("Cumsum", math_ops.cumsum)
+@RegisterPForWithArgs("Cumprod", math_ops.cumprod)
+def _convert_cumfoo(pfor_input, _, op_func):
+ t = pfor_input.stacked_input(0)
+ axis = pfor_input.unstacked_input(1)
+ # Shift positive indices by one to account for the extra dimension.
+ axis += math_ops.cast(axis >= 0, dtypes.int32)
+ exclusive = pfor_input.get_attr("exclusive")
+ reverse = pfor_input.get_attr("reverse")
+ return wrap(op_func(t, axis, exclusive=exclusive, reverse=reverse), True)
+
+
+@RegisterPFor("BiasAdd")
+def _convert_biasadd(pfor_input):
+ t = pfor_input.stacked_input(0)
+ bias = pfor_input.unstacked_input(1)
+ data_format = pfor_input.get_attr("data_format")
+ if data_format != b"NCHW":
+ return wrap(nn_ops.bias_add(t, bias, data_format=data_format), True)
+ shape = array_ops.shape(t)
+ flattened_shape = array_ops.concat([[-1], shape[2:]], axis=0)
+ t = array_ops.reshape(t, flattened_shape)
+ t = nn_ops.bias_add(t, bias, data_format=b"NCHW")
+ t = array_ops.reshape(t, shape)
+ return wrap(t, True)
+
+
+@RegisterPFor("UnsortedSegmentSum")
+def _convert_unsortedsegmentsum(pfor_input):
+ data, data_stacked, _ = pfor_input.input(0)
+ # TODO(agarwal): handle unstacked?
+ segment_ids = pfor_input.stacked_input(1)
+ # TODO(agarwal): handle stacked?
+ num_segments = pfor_input.unstacked_input(2)
+ if not data_stacked:
+ data = _stack(data, pfor_input.pfor.loop_len_vector).t
+ segment_shape = array_ops.shape(segment_ids)
+ n = segment_shape[0]
+ ones = array_ops.ones_like(segment_shape)[1:]
+ segment_offset = num_segments * math_ops.range(n)
+ segment_offset = array_ops.reshape(segment_offset,
+ array_ops.concat([[n], ones], axis=0))
+ segment_ids += segment_offset
+ num_segments *= n
+ output = math_ops.unsorted_segment_sum(data, segment_ids, num_segments)
+ new_output_shape = array_ops.concat(
+ [[n, -1], array_ops.shape(output)[1:]], axis=0)
+ output = array_ops.reshape(output, new_output_shape)
+ return wrap(output, True)
+
+
+@RegisterPFor("Cast")
+def _convert_cast(pfor_input):
+ inp = pfor_input.stacked_input(0)
+ dtype = pfor_input.get_attr("DstT")
+ return wrap(math_ops.cast(inp, dtype), True)
+
+
+# Note that ops handled here do not have attributes except "T", and hence don't
+# need extra arguments passed to the cwise_op call below.
+@RegisterPForWithArgs("Add", math_ops.add)
+@RegisterPForWithArgs("Ceil", math_ops.ceil)
+@RegisterPForWithArgs("Equal", math_ops.equal)
+@RegisterPForWithArgs("NotEqual", math_ops.not_equal)
+@RegisterPForWithArgs("Floor", math_ops.floor)
+@RegisterPForWithArgs("Greater", math_ops.greater)
+@RegisterPForWithArgs("GreaterEqual", math_ops.greater_equal)
+@RegisterPForWithArgs("Less", math_ops.less)
+@RegisterPForWithArgs("LessEqual", math_ops.less_equal)
+@RegisterPForWithArgs("LogicalOr", math_ops.logical_or)
+@RegisterPForWithArgs("LogicalAnd", math_ops.logical_and)
+@RegisterPForWithArgs("LogicalNot", math_ops.logical_not)
+@RegisterPForWithArgs("LogicalXor", math_ops.logical_xor)
+@RegisterPForWithArgs("Maximum", math_ops.maximum)
+@RegisterPForWithArgs("Minimum", math_ops.minimum)
+@RegisterPForWithArgs("Mul", math_ops.multiply)
+@RegisterPForWithArgs("Neg", math_ops.negative)
+@RegisterPForWithArgs("RealDiv", math_ops.divide)
+@RegisterPForWithArgs("Relu", nn_ops.relu)
+@RegisterPForWithArgs("Sigmoid", math_ops.sigmoid)
+@RegisterPForWithArgs("Square", math_ops.square)
+@RegisterPForWithArgs("Sub", math_ops.subtract)
+@RegisterPForWithArgs("Tanh", math_ops.tanh)
+def _convert_cwise(pfor_input, op_type, op_func):
+ del op_type
+ pfor_input.expanddim_inputs_for_broadcast()
+ return wrap(op_func(*[x.t for x in pfor_input.inputs]), True)
+
+
+@RegisterPFor("Shape")
+def _convert_shape(pfor_input):
+ out_type = pfor_input.get_attr("out_type")
+ return wrap(
+ array_ops.shape(pfor_input.stacked_input(0), out_type=out_type)[1:],
+ False)
+
+
+@RegisterPFor("ShapeN")
+def _convert_shape_n(pfor_input):
+ out_type = pfor_input.get_attr("out_type")
+ shapes = [
+ array_ops.shape(x, out_type=out_type)[1:]
+ if stacked else array_ops.shape(x) for x, stacked, _ in pfor_input.inputs
+ ]
+ return [wrap(x, False) for x in shapes]
+
+
+@RegisterPFor("Size")
+def _convert_size(pfor_input):
+ out_type = pfor_input.get_attr("out_type")
+ n = math_ops.cast(pfor_input.pfor.loop_len_vector[0], out_type)
+ return wrap(
+ array_ops.size(pfor_input.stacked_input(0), out_type=out_type) // n,
+ False)
+
+
+@RegisterPFor("Rank")
+def _convert_rank(pfor_input):
+ return wrap(array_ops.rank(pfor_input.stacked_input(0)) - 1, False)
+
+
+@RegisterPFor("AddN")
+def _convert_addn(pfor_input):
+ # AddN does not support broadcasting.
+ pfor_input.stack_inputs()
+ return wrap(math_ops.add_n([x.t for x in pfor_input.inputs]), True)
+
+
+@RegisterPFor("BiasAddGrad")
+def _convert_biasaddgrad(pfor_input):
+ grad = pfor_input.stacked_input(0)
+ fmt = pfor_input.get_attr("data_format")
+ if fmt == b"NCHW":
+ output = math_ops.reduce_sum(grad, axis=[1, 3, 4], keepdims=False)
+ else:
+ grad_shape = array_ops.shape(grad)
+ last_dim_shape = grad_shape[-1]
+ first_dim_shape = grad_shape[0]
+ output = array_ops.reshape(grad, [first_dim_shape, -1, last_dim_shape])
+ output = math_ops.reduce_sum(output, axis=[1], keepdims=False)
+ return wrap(output, True)
+
+
+# Some required ops are not exposed under the tf namespace. Hence relying on
+# _create_op to create them.
+@RegisterPForWithArgs("ReluGrad")
+@RegisterPForWithArgs("TanhGrad")
+@RegisterPForWithArgs("SigmoidGrad")
+def _convert_grads(pfor_input, op_type, *args, **kw_args):
+ del args
+ del kw_args
+ # TODO(agarwal): Looks like these ops don't support broadcasting. Hence we
+ # have to use tiling here.
+ pfor_input.stack_inputs()
+ outputs = _create_op(
+ op_type, [x.t for x in pfor_input.inputs],
+ [x.dtype for x in pfor_input.outputs],
+ attrs=pfor_input.op.node_def.attr).outputs
+ return [wrap(x, True) for x in outputs]
+
+
+@RegisterPFor("Select")
+def _convert_select(pfor_input):
+ pfor_input.stack_inputs()
+ cond = pfor_input.stacked_input(0)
+ t = pfor_input.stacked_input(1)
+ e = pfor_input.stacked_input(2)
+ cond_rank = array_ops.rank(cond)
+ cond, t, e = control_flow_ops.cond(
+ cond_rank > 1, lambda: _inputs_with_flattening(pfor_input, [0, 1, 2]),
+ lambda: [cond, t, e])
+ outputs = _create_op(
+ pfor_input.op_type, [cond, t, e], [x.dtype for x in pfor_input.outputs],
+ attrs=pfor_input.op.node_def.attr).outputs
+ n = pfor_input.pfor.loop_len_vector
+ out = control_flow_ops.cond(cond_rank > 1,
+ lambda: _unflatten_first_dim(outputs[0], n),
+ lambda: outputs[0])
+ return [wrap(out, True) for x in outputs]
+
+
+# random_ops
+
+
+@RegisterPForWithArgs("RandomUniform")
+@RegisterPForWithArgs("RandomUniformInt")
+@RegisterPForWithArgs("RandomStandardNormal")
+@RegisterPForWithArgs("TruncatedNormal")
+@RegisterPForWithArgs("RandomGamma")
+@RegisterPForWithArgs("RandomPoissonV2")
+def _convert_random(pfor_input, op_type, *args, **kw_args):
+ del args
+ del kw_args
+ inputs = [pfor_input.unstacked_input(i) for i in range(pfor_input.num_inputs)]
+ # inputs[0] is "shape"
+ inputs[0] = array_ops.concat(
+ [pfor_input.pfor.loop_len_vector, inputs[0]], axis=0)
+ logging.warning(
+ "Note that %s inside pfor op may not give same output as "
+ "inside a sequential loop.", op_type)
+ outputs = _create_op(
+ op_type,
+ inputs, [x.dtype for x in pfor_input.outputs],
+ attrs=pfor_input.op.node_def.attr).outputs
+ return [wrap(x, True) for x in outputs]
+
+
+# logging_ops
+
+
+@RegisterPFor("Assert")
+def _convert_assert(pfor_input):
+ cond, cond_stacked, _ = pfor_input.input(0)
+ if cond_stacked:
+ cond = math_ops.reduce_all(cond)
+
+ data_list = [x.t for x in pfor_input.inputs][1:]
+ return _create_op("Assert", [cond] + data_list, [],
+ attrs=pfor_input.op.node_def.attr)
+
+
+@RegisterPFor("Print")
+def _convert_print(pfor_input):
+ # Note that we don't stack all the inputs. Hence unstacked values are printed
+ # once here vs multiple times in a while_loop.
+ pfor_input.stack_inputs([0])
+ outputs = _create_op(
+ "Print", [x.t for x in pfor_input.inputs],
+ [x.dtype for x in pfor_input.outputs],
+ attrs=pfor_input.op.node_def.attr).outputs
+ return [wrap(x, True) for x in outputs]
+
+
+# data_flow_ops
+
+# TensorArray conversion is tricky since we don't support arrays of
+# TensorArrays. For converting them, we consider two distinct cases:
+#
+# 1. The array is constructed outside the pfor call, and read/written inside the
+# loop.
+# This is an easier case since we don't need to make an array of TensorArrays.
+# A correctness requirement is that these parallel iterations shouldn't attempt
+# to write to the same location. Hence at conversion time we disallow indices to
+# be loop-invariant as that would guarantee a collision. Even if the indices are
+# not loop-invariant, they could conflict and that shall trigger runtime errors.
+#
+# 2. The array is constructed and used entirely inside each pfor iteration.
+# For simplicity, here we require that the indices used for write/scatter are
+# "unstacked". Otherwise it becomes hard to merge the TensorArrays created in
+# different pfor iterations. We consider two sub_cases:
+#
+# 2a Elements written to the array are "stacked"
+# To simulate multiple TensorArrays, we may increase the dimension of each
+# element of the array. i.e. the i_th row of the j_th entry of the converted
+# TensorArray corresponds to to the j_th entry of the TensorArray in the i_th
+# pfor iteration.
+#
+# 2b Elements written to the array are "unstacked"
+# In this case we don't increase the dimensions to avoid redundant tiling. Each
+# iteration is trying to write the same value. So we convert that to a single
+# write.
+#
+# Here are some tricks used to implement the above:
+# - TensorArrayV3 constructor encodes the element shape as an attr. Instead of
+# trying to trace whether future writes are stacked or unstacked in order to set
+# this attr, we set it to correspond to unknown shape.
+# - We use the "flow" output of the different ops to track whether the array
+# elements are stacked or unstacked. If a stacked write/scatter is done, we make
+# the flow stacked as well.
+# - We use some heuristic traversal of the graph to track whether the
+# TensorArray handle was created inside or outside the pfor loop.
+
+
+@RegisterPFor("TensorArrayV3")
+def _convert_tensor_array_v3(pfor_input):
+ size = pfor_input.unstacked_input(0)
+ dtype = pfor_input.get_attr("dtype")
+ dynamic_size = pfor_input.get_attr("dynamic_size")
+ clear_after_read = pfor_input.get_attr("clear_after_read")
+ identical_element_shapes = pfor_input.get_attr("identical_element_shapes")
+ tensor_array_name = pfor_input.get_attr("tensor_array_name")
+ handle, flow = data_flow_ops.tensor_array_v3(
+ size,
+ dtype=dtype,
+ # We don't set element shape since we don't know if writes are stacked or
+ # not yet.
+ element_shape=None,
+ dynamic_size=dynamic_size,
+ clear_after_read=clear_after_read,
+ identical_element_shapes=identical_element_shapes,
+ tensor_array_name=tensor_array_name)
+ # Note we keep flow unstacked for now since we don't know if writes will be
+ # stacked or not.
+ return wrap(handle, False), wrap(flow, False)
+
+
+@RegisterPFor("TensorArraySizeV3")
+def _convert_tensor_array_size_v3(pfor_input):
+ handle = pfor_input.unstacked_input(0)
+ flow, flow_stacked, _ = pfor_input.input(1)
+ if flow_stacked:
+ flow = _unstack_flow(flow)
+ size = data_flow_ops.tensor_array_size_v3(handle, flow)
+ return wrap(size, False)
+
+
+def _handle_inside_pfor(pfor_input, handle):
+ """Returns True if handle was created inside the pfor loop."""
+ # We use some heuristic to find the original TensorArray creation op.
+ # The logic should handle the common cases (except cond based subgraphs).
+ # In theory the user could perform different operations on the handle (like
+ # Reshape, stack multiple handles, etc) which could break this logic.
+ # TODO(agarwal): handle Switch/Merge.
+ while handle.op.type in ("Enter", "Identity"):
+ handle = handle.op.inputs[0]
+ if handle.op.type not in [
+ "TensorArrayV3", "TensorArrayGradV3", "TensorArrayGradWithShape"]:
+ raise ValueError("Unable to find source for handle %s" % handle)
+ else:
+ return pfor_input.pfor.op_is_inside_loop(handle.op)
+
+
+def _unstack_flow(value):
+ # TODO(agarwal): consider looking if this is a Tile op then get its input.
+ # This may avoid running the Tile operations.
+ return array_ops.gather(value, 0)
+
+
+@RegisterPFor("TensorArrayReadV3")
+def _convert_tensor_array_read_v3(pfor_input):
+ handle = pfor_input.unstacked_input(0)
+ index, index_stacked, _ = pfor_input.input(1)
+ dtype = pfor_input.get_attr("dtype")
+ flow, flow_stacked, _ = pfor_input.input(2)
+ if flow_stacked:
+ flow = _unstack_flow(flow)
+
+ is_inside_pfor = _handle_inside_pfor(pfor_input, pfor_input.op.inputs[0])
+ if is_inside_pfor:
+ # Note that if we are inside a control flow construct inside the pfor, and
+ # only some of the iterations are doing the read (i.e.
+ # `all_indices_partitioned` is True), then the read operation should only
+ # return values for the currently active pfor iterations (`all_indices`
+ # below). Hence, whenever the returned value is stacked (i.e. `flow` is
+ # stacked), we may need to do an extra gather after reading the values. Also
+ # note that if `is_inside` is false, then values in the tensor array are
+ # unstacked. So the check is only needed in this branch.
+ all_indices = pfor_input.pfor.all_indices
+ all_indices_partitioned = pfor_input.pfor.all_indices_partitioned
+ # Note: flow_stacked indicates if values in the TensorArray are stacked or
+ # not.
+ if index_stacked:
+ if flow_stacked:
+ raise ValueError(
+ "It looks like TensorArrayReadV3 was called on a TensorArray whose"
+ " values are not loop-invariant, and the read indices were also"
+ " not loop invariant. This is currently unsupported.")
+ value = data_flow_ops.tensor_array_gather_v3(
+ handle, index, flow, dtype=dtype)
+ return wrap(value, True)
+ value = data_flow_ops.tensor_array_read_v3(
+ handle, index, flow, dtype=dtype)
+ if flow_stacked and all_indices_partitioned:
+ value = array_ops.gather(value, all_indices)
+ return wrap(value, flow_stacked)
+ # Values in the TensorArray should be unstacked (since different iterations
+ # couldn't write to the same location). So whether output is stacked or not
+ # depends on index_stacked.
+ if index_stacked:
+ value = data_flow_ops.tensor_array_gather_v3(
+ handle, index, flow, dtype=dtype)
+ else:
+ value = data_flow_ops.tensor_array_read_v3(
+ handle, index, flow, dtype=dtype)
+ return wrap(value, index_stacked)
+
+
+@RegisterPFor("TensorArrayWriteV3")
+def _convert_tensor_array_write_v3(pfor_input):
+ handle = pfor_input.unstacked_input(0)
+ index, index_stacked, _ = pfor_input.input(1)
+ value, value_stacked, _ = pfor_input.input(2)
+ flow, flow_stacked, _ = pfor_input.input(3)
+ if value_stacked and pfor_input.pfor.all_indices_partitioned:
+ # Looks like we are in a control flow in a pfor where not all iterations are
+ # active now. We don't allow that since that could lead to different indices
+ # having different shapes which will be hard to merge later.
+ raise ValueError("Writing non loop invariant values to TensorArray from "
+ "inside a while_loop/cond not supported.")
+ if flow_stacked:
+ flow = _unstack_flow(flow)
+ is_inside = _handle_inside_pfor(pfor_input, pfor_input.op.inputs[0])
+ if is_inside:
+ if index_stacked:
+ raise ValueError("Need indices for %s to be loop invariant" % handle)
+ if not flow_stacked and not value_stacked:
+ flow_out = data_flow_ops.tensor_array_write_v3(handle, index, value, flow)
+ return wrap(flow_out, False)
+ else:
+ if not value_stacked:
+ value = _stack(value, pfor_input.pfor.loop_len_vector).t
+ # TODO(agarwal): Note that if flow is unstacked and value is stacked, then
+ # this may or may not be a safe situation. flow is unstacked both for a
+ # freshly created TensorArray, as well as after unstacked values are
+ # written to it. If it is the latter, then we cannot write a stacked value
+ # now since that may cause runtime errors due to different shapes in the
+ # array. At the moment we are not able to handle this gracefully and
+ # distinguish between the two cases. That would require some heuristic
+ # traversal of the graph to figure out whether all the writes are
+ # unstacked or not.
+ flow_out = data_flow_ops.tensor_array_write_v3(handle, index, value, flow)
+ return _stack(flow_out, pfor_input.pfor.loop_len_vector)
+ else:
+ if not index_stacked:
+ raise ValueError("Need indices for %s to be not loop invariant" % handle)
+ # Note that even when index_stacked is true, actual values in index may
+ # still not be unique. However that will cause runtime error when executing
+ # the scatter operation below.
+ if not value_stacked:
+ value = _stack(value, pfor_input.pfor.loop_len_vector).t
+ flow_out = data_flow_ops.tensor_array_scatter_v3(handle, index, value, flow)
+ return _stack(flow_out, pfor_input.pfor.loop_len_vector)
+
+
+def _transpose_first_two_dims(value):
+ # TODO(agarwal): optimize if one of the dims == 1.
+ value_shape = array_ops.shape(value)
+ v0 = value_shape[0]
+ v1 = value_shape[1]
+ value = array_ops.reshape(value, [v0, v1, -1])
+ value = array_ops.transpose(value, [1, 0, 2])
+ new_shape = array_ops.concat([[v1, v0], value_shape[2:]], axis=0)
+ return array_ops.reshape(value, new_shape)
+
+
+@RegisterPFor("TensorArrayGatherV3")
+def _convert_tensor_array_gather_v3(pfor_input):
+ handle = pfor_input.unstacked_input(0)
+ indices, indices_stacked, _ = pfor_input.input(1)
+ indices = array_ops.reshape(indices, [-1])
+ flow, flow_stacked, _ = pfor_input.input(2)
+ if flow_stacked:
+ flow = _unstack_flow(flow)
+ dtype = pfor_input.get_attr("dtype")
+ # TODO(agarwal): support element_shape attr?
+
+ n = pfor_input.pfor.loop_len_vector
+ value = data_flow_ops.tensor_array_gather_v3(
+ handle, indices, flow, dtype=dtype)
+ is_inside = _handle_inside_pfor(pfor_input, pfor_input.op.inputs[0])
+ if is_inside:
+ # flow_stacked indicates if values in the TensorArray are stacked or not.
+ if indices_stacked:
+ if flow_stacked:
+ raise ValueError(
+ "It looks like TensorArrayGatherV3 was called on a TensorArray "
+ "whose values are not loop-invariant, and the indices were also "
+ "not loop invariant. This is currently unsupported.")
+ else:
+ value = _unflatten_first_dim(value, n)
+ return wrap(value, True)
+ else:
+ if flow_stacked:
+ # Since elements in this array are stacked and `value` was produced by
+ # gather, its first two dims are "gathered elements" and "stack
+ # dimension". Our semantics require these two to be flipped.
+ value = _transpose_first_two_dims(value)
+ return wrap(value, flow_stacked)
+ else:
+ # Values in the TensorArray should be unstacked (since different iterations
+ # couldn't write to the same location). So whether output is stacked or not
+ # depends on indices_stacked.
+ if indices_stacked:
+ value = _unflatten_first_dim(value, n)
+ return wrap(value, indices_stacked)
+
+
+@RegisterPFor("TensorArrayScatterV3")
+def _convert_tensor_array_scatter_v3(pfor_input):
+ handle = pfor_input.unstacked_input(0)
+ indices, indices_stacked, _ = pfor_input.input(1)
+ indices = array_ops.reshape(indices, [-1])
+ value, value_stacked, _ = pfor_input.input(2)
+ flow, flow_stacked, _ = pfor_input.input(3)
+
+ if flow_stacked:
+ flow = _unstack_flow(flow)
+
+ is_inside = _handle_inside_pfor(pfor_input, pfor_input.op.inputs[0])
+ if is_inside:
+ if indices_stacked:
+ raise ValueError("Need indices for %s to be loop invariant" % handle)
+ # Note that flow_stacked indicates if existing values in the array are
+ # stacked or not.
+ if not flow_stacked and not value_stacked:
+ flow_out = data_flow_ops.tensor_array_scatter_v3(handle, indices, value,
+ flow)
+ return wrap(flow_out, False)
+ if not value_stacked:
+ # TODO(agarwal): tile in the second dimension directly instead of
+ # transposing below.
+ value = _stack(value, pfor_input.pfor.loop_len_vector).t
+
+ value = _transpose_first_two_dims(value)
+ # TODO(agarwal): Note that if a previous write was unstacked, flow will be
+ # unstacked, and a stacked value may be written here which may cause
+ # runtime error due to different elements having different shape. We do
+ # not try to prevent that.
+ flow_out = data_flow_ops.tensor_array_scatter_v3(handle, indices, value,
+ flow)
+ return _stack(flow_out, pfor_input.pfor.loop_len_vector)
+ if not indices_stacked:
+ raise ValueError("Need indices for %s to be not loop invariant" % handle)
+ if not value_stacked:
+ value = _stack(value, pfor_input.pfor.loop_len_vector).t
+ value = _flatten_first_two_dims(value)
+ flow_out = data_flow_ops.tensor_array_scatter_v3(handle, indices, value,
+ flow)
+ return _stack(flow_out, pfor_input.pfor.loop_len_vector)
+
+
+@RegisterPFor("TensorArrayGradV3")
+def _convert_tensor_array_grad_v3(pfor_input):
+ handle = pfor_input.unstacked_input(0)
+ flow, flow_stacked, _ = pfor_input.input(1)
+ if flow_stacked:
+ flow = _unstack_flow(flow)
+ source = pfor_input.get_attr("source")
+ # TODO(agarwal): For now, we assume that gradients are stacked if the
+ # TensorArrayGradV3 call is being done inside the pfor. Getting that wrong
+ # will give runtime error due to incorrect shape being written to the
+ # accumulator. It is difficult to know in advance if gradients written will be
+ # stacked or not. Note that flow being stacked is not indicative of the
+ # gradient being stacked or not. Revisit this later.
+ shape_to_prepend = pfor_input.pfor.loop_len_vector
+ grad_handle, flow_out = data_flow_ops.tensor_array_grad_with_shape(
+ handle=handle,
+ flow_in=flow,
+ shape_to_prepend=shape_to_prepend,
+ source=source)
+ flow_out = _stack(flow_out, pfor_input.pfor.loop_len_vector).t
+ return [wrap(grad_handle, False), wrap(flow_out, True)]
+
+
+# StackV2 conversion is tricky since we don't have arrays of StackV2. So similar
+# to TensorArrays, we convert them by changing the dimension of the elements
+# inside the stack.
+#
+# We consider two cases:
+#
+# 1. StackV2 is constructed and used entirely inside the pfor loop.
+# We keep a single Stack and perform the push/pop operations of all the
+# iterations in lock-step. We also assume that all the iterations perform these
+# operations. In case of dynamic control flow, if only some of the iterations
+# try to perform a push/pop, then the conversion may not work correctly and may
+# cause undefined behavior.
+# TODO(agarwal): test StackV2 with dynamic control flow.
+#
+# 2. StackV2 is constructed outside the pfor loop.
+# Performing stack push/pop in a parallel fashion is ill-defined. However given
+# that reading stacks created externally is a common operation when computing
+# jacobians, we provide some special semantics here as follows.
+# - disallow push operations to the stack
+# - pop operations are performed in lock step by all iterations, similar to the
+# case when the stack is created inside. A single value is popped during the
+# lock-step operation and broadcast to all the iterations. Values in the stack
+# are assumed to be loop-invariant.
+#
+# Some other implementation details:
+# We use an ugly logic to find whether values in Stack data structure are
+# loop invariant or not. When converting push/pop operations, we keep track of
+# whether the last conversion used a stacked value or not (see _stack_cache
+# below). As a result if an unstacked value is written first, subsequent stacked
+# writes are disallowed when they could have been allowed in theory.
+
+# Map from cache key based on StackV2 handle to a bool indicating whether values
+# are stacked or not.
+# TODO(agarwal): move _stack_cache inside pfor?
+_stack_cache = {}
+
+
+def _stack_cache_key(pfor_input):
+ """Create cache key corresponding to a stack handle."""
+ op_type = pfor_input.op_type
+ assert op_type in ["StackPushV2", "StackPopV2"], op_type
+ orig_handle = pfor_input.op.inputs[0]
+ while orig_handle.op.type in ["Identity", "Enter"]:
+ orig_handle = orig_handle.op.inputs[0]
+ assert orig_handle.op.type == "StackV2", orig_handle.op
+ return ops.get_default_graph(), pfor_input.pfor, orig_handle
+
+
+def _stack_handle_inside_pfor(handle, pfor_input):
+ while handle.op.type in ["Identity", "Enter"]:
+ handle = handle.op.inputs[0]
+ assert handle.op.type == "StackV2", (
+ "Unable to find StackV2 op. Got %s" % handle.op)
+ return pfor_input.pfor.op_is_inside_loop(handle.op)
+
+
+@RegisterPFor("StackPushV2")
+def _convert_stack_push_v2(pfor_input):
+ handle = pfor_input.unstacked_input(0)
+ elem, elem_stacked, _ = pfor_input.input(1)
+ swap_memory = pfor_input.get_attr("swap_memory")
+
+ if not _stack_handle_inside_pfor(pfor_input.op.inputs[0], pfor_input):
+ raise ValueError("StackPushV2 not allowed on stacks created outside pfor")
+ stack_cache_key = _stack_cache_key(pfor_input)
+ stacked = _stack_cache.get(stack_cache_key, None)
+ if stacked is None:
+ stacked = elem_stacked
+ _stack_cache[stack_cache_key] = stacked
+ else:
+ # If we previously made it unstacked then we can't revert to being stacked.
+ if not stacked and elem_stacked:
+ raise ValueError(
+ "It looks like the stack was previously determined to be loop"
+ " invariant, but we are now trying to push a loop dependent value"
+ " to it. This is currently unsupported.")
+ if stacked and not elem_stacked:
+ elem = _stack(elem, pfor_input.pfor.loop_len_vector).t
+ out = data_flow_ops.stack_push_v2(handle, elem, swap_memory=swap_memory)
+ return wrap(out, stacked)
+
+
+# Note that inputs to this convertor will be unstacked. However it should get
+# called since it is a stateful op.
+@RegisterPFor("StackPopV2")
+def _convert_stack_pop_v2(pfor_input):
+ handle = pfor_input.unstacked_input(0)
+ stack_cache_key = _stack_cache_key(pfor_input)
+ stacked = _stack_cache.get(stack_cache_key, None)
+ # If a StackPushV2 has not been converted yet, we default to unstacked since
+ # the push could be outside of pfor, or the covertor may not be called if the
+ # inputs are unconverted.
+ if stacked is None:
+ stacked = False
+ _stack_cache[stack_cache_key] = False
+ elem_type = pfor_input.get_attr("elem_type")
+ out = data_flow_ops.stack_pop_v2(handle, elem_type)
+ return wrap(out, stacked)
+
+
+# parsing_ops
+
+
+@RegisterPFor("DecodeCSV")
+def _convert_decode_csv(pfor_input):
+ lines = pfor_input.stacked_input(0)
+ record_defaults = [
+ pfor_input.unstacked_input(i) for i in range(1, pfor_input.num_inputs)
+ ]
+ field_delim = pfor_input.get_attr("field_delim")
+ use_quote_delim = pfor_input.get_attr("use_quote_delim")
+ select_cols = pfor_input.get_attr("select_cols")
+ if not select_cols:
+ select_cols = None
+ return [
+ wrap(t, True) for t in parsing_ops.decode_csv(
+ lines,
+ record_defaults,
+ field_delim=field_delim,
+ use_quote_delim=use_quote_delim,
+ select_cols=select_cols)
+ ]
+
+
+@RegisterPFor("ParseSingleExample")
+def _convert_parse_single_example(pfor_input):
+ serialized = pfor_input.stacked_input(0)
+ dense_defaults = [
+ pfor_input.unstacked_input(i) for i in range(1, pfor_input.num_inputs)
+ ]
+ sparse_keys = pfor_input.get_attr("sparse_keys")
+ dense_keys = pfor_input.get_attr("dense_keys")
+ sparse_types = pfor_input.get_attr("sparse_types")
+ dense_shapes = pfor_input.get_attr("dense_shapes")
+ output = gen_parsing_ops.parse_example(
+ serialized=serialized,
+ names=[],
+ dense_defaults=dense_defaults,
+ sparse_keys=sparse_keys,
+ dense_keys=dense_keys,
+ sparse_types=sparse_types,
+ dense_shapes=dense_shapes)
+ return [wrap(t, True, True) for t in nest.flatten(output)]
diff --git a/tensorflow/python/ops/resource_variable_ops.py b/tensorflow/python/ops/resource_variable_ops.py
index 2033674a92..1f56ad25bf 100644
--- a/tensorflow/python/ops/resource_variable_ops.py
+++ b/tensorflow/python/ops/resource_variable_ops.py
@@ -181,7 +181,8 @@ def shape_safe_assign_variable_handle(handle, shape, value, name=None):
name=name)
-class ResourceVariable(variables.Variable):
+# TODO(apassos) make this be variables.Variable
+class ResourceVariable(variables.RefVariable):
"""Variable based on resource handles.
See the @{$variables$Variables How To} for a high level overview.
@@ -867,6 +868,19 @@ class ResourceVariable(variables.Variable):
__array_priority__ = 100
+ def is_initialized(self, name=None):
+ """Checks whether a resource variable has been initialized.
+
+ Outputs boolean scalar indicating whether the tensor has been initialized.
+
+ Args:
+ name: A name for the operation (optional).
+
+ Returns:
+ A `Tensor` of type `bool`.
+ """
+ return gen_resource_variable_ops.var_is_initialized_op(self.handle, name)
+
def assign_sub(self, delta, use_locking=None, name=None, read_value=True):
"""Subtracts a value from this variable.
@@ -999,32 +1013,28 @@ class ResourceVariable(variables.Variable):
def __imul__(self, unused_other):
raise RuntimeError("Variable *= value not supported. Use "
- "variable.assign_mul(value) to modify the variable "
- "value and variable = variable * value to get a new "
- "Tensor object.")
+ "`var.assign(var * value)` to modify the variable or "
+ "`var = var * value` to get a new Tensor object.")
def __idiv__(self, unused_other):
raise RuntimeError("Variable /= value not supported. Use "
- "variable.assign_div(value) to modify the variable "
- "value and variable = variable / value to get a new "
- "Tensor object.")
+ "`var.assign(var / value)` to modify the variable or "
+ "`var = var / value` to get a new Tensor object.")
def __itruediv__(self, unused_other):
raise RuntimeError("Variable /= value not supported. Use "
- "variable.assign_div(value) to modify the variable "
- "value and variable = variable / value to get a new "
- "Tensor object.")
+ "`var.assign(var / value)` to modify the variable or "
+ "`var = var / value` to get a new Tensor object.")
def __irealdiv__(self, unused_other):
raise RuntimeError("Variable /= value not supported. Use "
- "variable.assign_div(value) to modify the variable "
- "value and variable = variable / value to get a new "
- "Tensor object.")
+ "`var.assign(var / value)` to modify the variable or "
+ "`var = var / value` to get a new Tensor object.")
def __ipow__(self, unused_other):
raise RuntimeError("Variable **= value not supported. Use "
- "value and variable = variable ** value to get a new "
- "Tensor object.")
+ "`var.assign(var ** value)` to modify the variable or "
+ "`var = var ** value` to get a new Tensor object.")
pywrap_tensorflow.TFE_Py_RegisterResourceVariableType(ResourceVariable)
@@ -1095,6 +1105,113 @@ class _UnreadVariable(ResourceVariable):
ops.register_tensor_conversion_function(_UnreadVariable, _dense_var_to_tensor)
ops.register_dense_tensor_like_type(_UnreadVariable)
+
+class _MixedPrecisionVariable(ResourceVariable):
+ """Represents a variable that can return in desired dtype when read.
+
+ In mixed precision training, it is usually desirable to use different dtypes
+ for variables and computation. This class will be used to wrap created
+ ResourceVariable when mixed precision training is enabled. It allows layers to
+ perform computation in a different dtype than their variable dtypes, in order
+ to achieve higher performance without causing quality loss.
+ """
+
+ def __init__(self, var, read_dtype):
+ """Creates a MixedPrecisionVariable.
+
+ Args:
+ var: A ResourceVariable instance.
+ read_dtype: A tf.DType, the returned dtype when read, default to None.
+ Casting is performed if read_dtype is not None and differs from
+ var.dtype.
+ Returns:
+ An MixedPrecisionVariable instance.
+ Raises:
+ ValueError: if var is not a ResourceVariable instance, or read_dtype is
+ not a tf.DType instance.
+ """
+ # pylint: disable=super-init-not-called
+ # We do not call super init on purpose.
+ if not isinstance(var, ResourceVariable):
+ raise ValueError("InvalidArgument: var must be a ResourceVariable type.")
+ if not isinstance(read_dtype, dtypes.DType):
+ raise ValueError("InvalidArgument: read_dtype must be a tf.DType type.")
+
+ self._var = var
+ self._trainable = var.trainable
+ self._save_slice_info = None
+ self._graph_key = ops.get_default_graph()._graph_key # pylint: disable=protected-access
+ self._in_graph_mode = var._in_graph_mode # pylint: disable=protected-access
+ self._handle = var.handle
+ self._shape = var.shape
+ self._initial_value = None
+ if isinstance(self.handle, ops.EagerTensor):
+ self._handle_name = ""
+ else:
+ self._handle_name = self.handle.name
+ self._unique_id = var._unique_id # pylint: disable=protected-access
+ self._dtype = var.dtype
+ self._constraint = None
+ self._cached_value = None
+ self._is_initialized_op = var._is_initialized_op # pylint: disable=protected-access
+ self._initializer_op = var._initializer_op # pylint: disable=protected-access
+ # This needs to be set before read_value() is called.
+ self._read_dtype = read_dtype
+ if context.executing_eagerly():
+ self._graph_element = None
+ else:
+ self._graph_element = self.read_value()
+ self._handle_deleter = (
+ var._handle_deleter if not self._in_graph_mode # pylint: disable=protected-access
+ else None)
+ # pylint: enable=super-init-not-called
+
+ @property
+ def name(self):
+ return self._var.name
+
+ def value(self):
+ return self._read_variable_op()
+
+ def read_value(self):
+ return self._read_variable_op()
+
+ def _read_variable_op(self):
+ with ops.colocate_with(self._handle):
+ res = gen_resource_variable_ops.read_variable_op(self._handle,
+ self._dtype)
+ if self._read_dtype != self._dtype:
+ return math_ops.cast(res, self._read_dtype)
+ else:
+ return res
+
+ def set_shape(self, shape):
+ self._shape = shape
+ self._cached_shape_as_list = None
+
+ @property
+ def op(self):
+ """The op for this variable."""
+ return self._var.op
+
+ @property
+ def read_dtype(self):
+ """The dtype of the returned tensor when reading the var."""
+ return self._read_dtype
+
+ def _dense_var_to_tensor(self, dtype=None, name=None, as_ref=False):
+ del name
+ dtype = dtype or self.read_dtype
+ if dtype != self.read_dtype or as_ref:
+ return NotImplemented
+ else:
+ res = self.value()
+ return res
+
+ def _should_act_as_resource_variable(self):
+ """To pass resource_variable_ops.is_resource_variable check."""
+ pass
+
# Register a conversion function which reads the value of the variable,
# allowing instances of the class to be used as tensors.
diff --git a/tensorflow/python/ops/rnn.py b/tensorflow/python/ops/rnn.py
index 215140e987..deba133fb9 100644
--- a/tensorflow/python/ops/rnn.py
+++ b/tensorflow/python/ops/rnn.py
@@ -26,6 +26,7 @@ from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
+from tensorflow.python.ops import control_flow_util
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import rnn_cell_impl
from tensorflow.python.ops import tensor_array_ops
@@ -131,6 +132,18 @@ def _maybe_tensor_shape_from_tensor(shape):
return shape
+def _should_cache():
+ """Returns True if a default caching device should be set, otherwise False."""
+ if context.executing_eagerly():
+ return False
+ # Don't set a caching device when running in a loop, since it is possible that
+ # train steps could be wrapped in a tf.while_loop. In that scenario caching
+ # prevents forward computations in loop iterations from re-reading the
+ # updated weights.
+ ctxt = ops.get_default_graph()._get_control_flow_context() # pylint: disable=protected-access
+ return control_flow_util.GetContainingWhileContext(ctxt) is None
+
+
# pylint: disable=unused-argument
def _rnn_step(
time, sequence_length, min_sequence_length, max_sequence_length,
@@ -558,7 +571,7 @@ def dynamic_rnn(cell, inputs, sequence_length=None, initial_state=None,
# Create a new scope in which the caching device is either
# determined by the parent scope, or is set to place the cached
# Variable using the same placement as for the rest of the RNN.
- if not context.executing_eagerly():
+ if _should_cache():
if varscope.caching_device is None:
varscope.set_caching_device(lambda op: op.device)
@@ -1015,7 +1028,7 @@ def raw_rnn(cell, loop_fn,
# determined by the parent scope, or is set to place the cached
# Variable using the same placement as for the rest of the RNN.
with vs.variable_scope(scope or "rnn") as varscope:
- if not context.executing_eagerly():
+ if _should_cache():
if varscope.caching_device is None:
varscope.set_caching_device(lambda op: op.device)
@@ -1228,7 +1241,7 @@ def static_rnn(cell,
# determined by the parent scope, or is set to place the cached
# Variable using the same placement as for the rest of the RNN.
with vs.variable_scope(scope or "rnn") as varscope:
- if not context.executing_eagerly():
+ if _should_cache():
if varscope.caching_device is None:
varscope.set_caching_device(lambda op: op.device)
diff --git a/tensorflow/python/ops/rnn_cell_impl.py b/tensorflow/python/ops/rnn_cell_impl.py
index 82a044a0d4..70805fd572 100644
--- a/tensorflow/python/ops/rnn_cell_impl.py
+++ b/tensorflow/python/ops/rnn_cell_impl.py
@@ -47,7 +47,6 @@ from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.ops import variables as tf_variables
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training.checkpointable import base as checkpointable
-from tensorflow.python.training.checkpointable import tracking as checkpointable_tracking
from tensorflow.python.util import nest
from tensorflow.python.util.tf_export import tf_export
@@ -55,16 +54,6 @@ from tensorflow.python.util.tf_export import tf_export
_BIAS_VARIABLE_NAME = "bias"
_WEIGHTS_VARIABLE_NAME = "kernel"
-
-# TODO(jblespiau): Remove this function when we are sure there are no longer
-# any usage (even if protected, it is being used). Prefer assert_like_rnncell.
-def _like_rnncell(cell):
- """Checks that a given object is an RNNCell by using duck typing."""
- conditions = [hasattr(cell, "output_size"), hasattr(cell, "state_size"),
- hasattr(cell, "zero_state"), callable(cell)]
- return all(conditions)
-
-
# This can be used with self.assertRaisesRegexp for assert_like_rnncell.
ASSERT_LIKE_RNNCELL_ERROR_REGEXP = "is not an RNNCell"
@@ -1330,48 +1319,3 @@ class MultiRNNCell(RNNCell):
array_ops.concat(new_states, 1))
return cur_inp, new_states
-
-
-class _SlimRNNCell(RNNCell, checkpointable_tracking.NotCheckpointable):
- """A simple wrapper for slim.rnn_cells."""
-
- def __init__(self, cell_fn):
- """Create a SlimRNNCell from a cell_fn.
-
- Args:
- cell_fn: a function which takes (inputs, state, scope) and produces the
- outputs and the new_state. Additionally when called with inputs=None and
- state=None it should return (initial_outputs, initial_state).
-
- Raises:
- TypeError: if cell_fn is not callable
- ValueError: if cell_fn cannot produce a valid initial state.
- """
- if not callable(cell_fn):
- raise TypeError("cell_fn %s needs to be callable", cell_fn)
- self._cell_fn = cell_fn
- self._cell_name = cell_fn.func.__name__
- init_output, init_state = self._cell_fn(None, None)
- output_shape = init_output.get_shape()
- state_shape = init_state.get_shape()
- self._output_size = output_shape.with_rank(2)[1].value
- self._state_size = state_shape.with_rank(2)[1].value
- if self._output_size is None:
- raise ValueError("Initial output created by %s has invalid shape %s" %
- (self._cell_name, output_shape))
- if self._state_size is None:
- raise ValueError("Initial state created by %s has invalid shape %s" %
- (self._cell_name, state_shape))
-
- @property
- def state_size(self):
- return self._state_size
-
- @property
- def output_size(self):
- return self._output_size
-
- def __call__(self, inputs, state, scope=None):
- scope = scope or self._cell_name
- output, state = self._cell_fn(inputs, state, scope=scope)
- return output, state
diff --git a/tensorflow/python/ops/script_ops.py b/tensorflow/python/ops/script_ops.py
index 1e3f662ff3..af103d3cc7 100644
--- a/tensorflow/python/ops/script_ops.py
+++ b/tensorflow/python/ops/script_ops.py
@@ -130,7 +130,7 @@ class FuncRegistry(object):
def __init__(self):
self._lock = threading.Lock()
self._unique_id = 0 # GUARDED_BY(self._lock)
- # Only store weakrefs to the funtions. The strong reference is stored in
+ # Only store weakrefs to the functions. The strong reference is stored in
# the graph.
self._funcs = weakref.WeakValueDictionary()
diff --git a/tensorflow/python/ops/special_math_ops.py b/tensorflow/python/ops/special_math_ops.py
index 1508873b75..9a10abfcf7 100644
--- a/tensorflow/python/ops/special_math_ops.py
+++ b/tensorflow/python/ops/special_math_ops.py
@@ -34,7 +34,7 @@ from tensorflow.python.util.tf_export import tf_export
# TODO(b/27419586) Change docstring for required dtype of x once int allowed
@tf_export('lbeta')
-def lbeta(x, name='lbeta'):
+def lbeta(x, name=None):
r"""Computes \\(ln(|Beta(x)|)\\), reducing along the last dimension.
Given one-dimensional `z = [z_0,...,z_{K-1}]`, we define
@@ -64,7 +64,7 @@ def lbeta(x, name='lbeta'):
# This is consistent with a convention that the sum over the empty set 0, and
# the product is 1.
# This is standard. See https://en.wikipedia.org/wiki/Empty_set.
- with ops.name_scope(name, values=[x]):
+ with ops.name_scope(name, 'lbeta', [x]):
x = ops.convert_to_tensor(x, name='x')
# Note reduce_sum([]) = 0.
@@ -83,7 +83,7 @@ def lbeta(x, name='lbeta'):
@tf_export('math.bessel_i0')
-def bessel_i0(x, name='bessel_i0'):
+def bessel_i0(x, name=None):
"""Computes the Bessel i0 function of `x` element-wise.
Modified Bessel function of order 0.
@@ -102,12 +102,12 @@ def bessel_i0(x, name='bessel_i0'):
Equivalent to scipy.special.i0
@end_compatibility
"""
- with ops.name_scope(name, [x]):
+ with ops.name_scope(name, 'bessel_i0', [x]):
return math_ops.exp(math_ops.abs(x)) * math_ops.bessel_i0e(x)
@tf_export('math.bessel_i1')
-def bessel_i1(x, name='bessel_i1'):
+def bessel_i1(x, name=None):
"""Computes the Bessel i1 function of `x` element-wise.
Modified Bessel function of order 1.
@@ -126,7 +126,7 @@ def bessel_i1(x, name='bessel_i1'):
Equivalent to scipy.special.i1
@end_compatibility
"""
- with ops.name_scope(name, [x]):
+ with ops.name_scope(name, 'bessel_i1', [x]):
return math_ops.exp(math_ops.abs(x)) * math_ops.bessel_i1e(x)
@@ -201,8 +201,8 @@ def einsum(equation, *inputs, **kwargs):
indices in its subscript, or
- the input shapes are inconsistent along a particular axis.
"""
- equation = equation.replace(" ", "")
-
+ equation = equation.replace(' ', '')
+
name = kwargs.pop('name', None)
if kwargs:
raise TypeError('invalid keyword arguments for this function: ' + ', '.join(
diff --git a/tensorflow/python/ops/special_math_ops_test.py b/tensorflow/python/ops/special_math_ops_test.py
index b7e164f149..9bc4098d5b 100644
--- a/tensorflow/python/ops/special_math_ops_test.py
+++ b/tensorflow/python/ops/special_math_ops_test.py
@@ -25,24 +25,25 @@ from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
+from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import special_math_ops
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
-
class LBetaTest(test.TestCase):
+ @test_util.run_in_graph_and_eager_modes
def test_one_dimensional_arg(self):
# Should evaluate to 1 and 1/2.
x_one = [1, 1.]
x_one_half = [2, 1.]
with self.test_session(use_gpu=True):
- self.assertAllClose(1, math_ops.exp(special_math_ops.lbeta(x_one)).eval())
- self.assertAllClose(0.5,
- math_ops.exp(
- special_math_ops.lbeta(x_one_half)).eval())
+ self.assertAllClose(
+ 1, self.evaluate(math_ops.exp(special_math_ops.lbeta(x_one))))
+ self.assertAllClose(
+ 0.5, self.evaluate(math_ops.exp(special_math_ops.lbeta(x_one_half))))
self.assertEqual([], special_math_ops.lbeta(x_one).get_shape())
def test_one_dimensional_arg_dynamic(self):
@@ -53,7 +54,8 @@ class LBetaTest(test.TestCase):
ph = array_ops.placeholder(dtypes.float32)
beta_ph = math_ops.exp(special_math_ops.lbeta(ph))
self.assertAllClose(1, beta_ph.eval(feed_dict={ph: x_one}))
- self.assertAllClose(0.5, beta_ph.eval(feed_dict={ph: x_one_half}))
+ self.assertAllClose(0.5,
+ beta_ph.eval(feed_dict={ph: x_one_half}))
def test_four_dimensional_arg_with_partial_shape_dynamic(self):
x_ = np.ones((3, 2, 3, 4))
@@ -66,15 +68,17 @@ class LBetaTest(test.TestCase):
with self.test_session(use_gpu=True):
x_ph = array_ops.placeholder(dtypes.float32, [3, 2, 3, None])
beta_ph = math_ops.exp(special_math_ops.lbeta(x_ph))
- self.assertAllClose(expected_beta_x, beta_ph.eval(feed_dict={x_ph: x_}))
+ self.assertAllClose(expected_beta_x,
+ beta_ph.eval(feed_dict={x_ph: x_}))
+ @test_util.run_in_graph_and_eager_modes
def test_two_dimensional_arg(self):
# Should evaluate to 1/2.
x_one_half = [[2, 1.], [2, 1.]]
with self.test_session(use_gpu=True):
- self.assertAllClose([0.5, 0.5],
- math_ops.exp(
- special_math_ops.lbeta(x_one_half)).eval())
+ self.assertAllClose(
+ [0.5, 0.5],
+ self.evaluate(math_ops.exp(special_math_ops.lbeta(x_one_half))))
self.assertEqual((2,), special_math_ops.lbeta(x_one_half).get_shape())
def test_two_dimensional_arg_dynamic(self):
@@ -83,50 +87,59 @@ class LBetaTest(test.TestCase):
with self.test_session(use_gpu=True):
ph = array_ops.placeholder(dtypes.float32)
beta_ph = math_ops.exp(special_math_ops.lbeta(ph))
- self.assertAllClose([0.5, 0.5], beta_ph.eval(feed_dict={ph: x_one_half}))
+ self.assertAllClose([0.5, 0.5],
+ beta_ph.eval(feed_dict={ph: x_one_half}))
+ @test_util.run_in_graph_and_eager_modes
def test_two_dimensional_proper_shape(self):
# Should evaluate to 1/2.
x_one_half = [[2, 1.], [2, 1.]]
with self.test_session(use_gpu=True):
- self.assertAllClose([0.5, 0.5],
- math_ops.exp(
- special_math_ops.lbeta(x_one_half)).eval())
+ self.assertAllClose(
+ [0.5, 0.5],
+ self.evaluate(math_ops.exp(special_math_ops.lbeta(x_one_half))))
self.assertEqual(
(2,),
- array_ops.shape(special_math_ops.lbeta(x_one_half)).eval())
+ self.evaluate(array_ops.shape(special_math_ops.lbeta(x_one_half))))
self.assertEqual(
tensor_shape.TensorShape([2]),
special_math_ops.lbeta(x_one_half).get_shape())
+ @test_util.run_in_graph_and_eager_modes
def test_complicated_shape(self):
with self.test_session(use_gpu=True):
x = ops.convert_to_tensor(np.random.rand(3, 2, 2))
- self.assertAllEqual((3, 2),
- array_ops.shape(special_math_ops.lbeta(x)).eval())
+ self.assertAllEqual(
+ (3, 2), self.evaluate(array_ops.shape(special_math_ops.lbeta(x))))
self.assertEqual(
tensor_shape.TensorShape([3, 2]),
special_math_ops.lbeta(x).get_shape())
+ @test_util.run_in_graph_and_eager_modes
def test_length_1_last_dimension_results_in_one(self):
# If there is only one coefficient, the formula still works, and we get one
# as the answer, always.
x_a = [5.5]
x_b = [0.1]
with self.test_session(use_gpu=True):
- self.assertAllClose(1, math_ops.exp(special_math_ops.lbeta(x_a)).eval())
- self.assertAllClose(1, math_ops.exp(special_math_ops.lbeta(x_b)).eval())
+ self.assertAllClose(
+ 1, self.evaluate(math_ops.exp(special_math_ops.lbeta(x_a))))
+ self.assertAllClose(
+ 1, self.evaluate(math_ops.exp(special_math_ops.lbeta(x_b))))
self.assertEqual((), special_math_ops.lbeta(x_a).get_shape())
+ @test_util.run_in_graph_and_eager_modes
def test_empty_rank1_returns_negative_infinity(self):
with self.test_session(use_gpu=True):
x = constant_op.constant([], shape=[0])
lbeta_x = special_math_ops.lbeta(x)
expected_result = constant_op.constant(-np.inf, shape=())
- self.assertAllEqual(expected_result.eval(), lbeta_x.eval())
+ self.assertAllEqual(self.evaluate(expected_result),
+ self.evaluate(lbeta_x))
self.assertEqual(expected_result.get_shape(), lbeta_x.get_shape())
+ @test_util.run_in_graph_and_eager_modes
def test_empty_rank2_with_zero_last_dim_returns_negative_infinity(self):
with self.test_session(use_gpu=True):
event_size = 0
@@ -135,9 +148,11 @@ class LBetaTest(test.TestCase):
lbeta_x = special_math_ops.lbeta(x)
expected_result = constant_op.constant(-np.inf, shape=[batch_size])
- self.assertAllEqual(expected_result.eval(), lbeta_x.eval())
+ self.assertAllEqual(self.evaluate(expected_result),
+ self.evaluate(lbeta_x))
self.assertEqual(expected_result.get_shape(), lbeta_x.get_shape())
+ @test_util.run_in_graph_and_eager_modes
def test_empty_rank2_with_zero_batch_dim_returns_empty(self):
with self.test_session(use_gpu=True):
batch_size = 0
@@ -147,12 +162,14 @@ class LBetaTest(test.TestCase):
expected_result = constant_op.constant([], shape=[batch_size])
- self.assertAllEqual(expected_result.eval(), lbeta_x.eval())
+ self.assertAllEqual(self.evaluate(expected_result),
+ self.evaluate(lbeta_x))
self.assertEqual(expected_result.get_shape(), lbeta_x.get_shape())
class BesselTest(test.TestCase):
+ @test_util.run_in_graph_and_eager_modes
def test_bessel_i0(self):
x_single = np.arange(-3, 3).reshape(1, 3, 2).astype(np.float32)
x_double = np.arange(-3, 3).reshape(1, 3, 2).astype(np.float64)
@@ -165,6 +182,7 @@ class BesselTest(test.TestCase):
except ImportError as e:
tf_logging.warn('Cannot test special functions: %s' % str(e))
+ @test_util.run_in_graph_and_eager_modes
def test_bessel_i1(self):
x_single = np.arange(-3, 3).reshape(1, 3, 2).astype(np.float32)
x_double = np.arange(-3, 3).reshape(1, 3, 2).astype(np.float64)
@@ -316,7 +334,7 @@ class EinsumTest(test.TestCase):
output_tensor = special_math_ops.einsum(axes, *input_tensors)
with self.test_session(use_gpu=True):
- output_value = output_tensor.eval()
+ output_value = self.evaluate(output_tensor)
correct_value = np.einsum(axes, *input_vals)
diff --git a/tensorflow/python/ops/spectral_ops.py b/tensorflow/python/ops/spectral_ops.py
index 28054f50ef..293aace728 100644
--- a/tensorflow/python/ops/spectral_ops.py
+++ b/tensorflow/python/ops/spectral_ops.py
@@ -167,8 +167,8 @@ def _validate_dct_arguments(dct_type, n, axis, norm):
raise NotImplementedError("The DCT length argument is not implemented.")
if axis != -1:
raise NotImplementedError("axis must be -1. Got: %s" % axis)
- if dct_type != 2:
- raise ValueError("Only the Type II DCT is supported.")
+ if dct_type not in (2, 3):
+ raise ValueError("Only Types II and III (I)DCT are supported.")
if norm not in (None, "ortho"):
raise ValueError(
"Unknown normalization. Expected None or 'ortho', got: %s" % norm)
@@ -179,18 +179,20 @@ def _validate_dct_arguments(dct_type, n, axis, norm):
def dct(input, type=2, n=None, axis=-1, norm=None, name=None): # pylint: disable=redefined-builtin
"""Computes the 1D [Discrete Cosine Transform (DCT)][dct] of `input`.
- Currently only Type II is supported. Implemented using a length `2N` padded
- @{tf.spectral.rfft}, as described here: https://dsp.stackexchange.com/a/10606
+ Currently only Types II and III are supported. Type II is implemented using a
+ length `2N` padded @{tf.spectral.rfft}, as described here:
+ https://dsp.stackexchange.com/a/10606. Type III is a fairly straightforward
+ inverse of Type II (i.e. using a length `2N` padded @{tf.spectral.irfft}).
@compatibility(scipy)
- Equivalent to scipy.fftpack.dct for the Type-II DCT.
+ Equivalent to scipy.fftpack.dct for Type-II and Type-III DCT.
https://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.fftpack.dct.html
@end_compatibility
Args:
input: A `[..., samples]` `float32` `Tensor` containing the signals to
take the DCT of.
- type: The DCT type to perform. Must be 2.
+ type: The DCT type to perform. Must be 2 or 3.
n: For future expansion. The length of the transform. Must be `None`.
axis: For future expansion. The axis to compute the DCT along. Must be `-1`.
norm: The normalization to apply. `None` for no normalization or `'ortho'`
@@ -201,8 +203,8 @@ def dct(input, type=2, n=None, axis=-1, norm=None, name=None): # pylint: disabl
A `[..., samples]` `float32` `Tensor` containing the DCT of `input`.
Raises:
- ValueError: If `type` is not `2`, `n` is not `None, `axis` is not `-1`, or
- `norm` is not `None` or `'ortho'`.
+ ValueError: If `type` is not `2` or `3`, `n` is not `None, `axis` is not
+ `-1`, or `norm` is not `None` or `'ortho'`.
[dct]: https://en.wikipedia.org/wiki/Discrete_cosine_transform
"""
@@ -214,22 +216,91 @@ def dct(input, type=2, n=None, axis=-1, norm=None, name=None): # pylint: disabl
axis_dim = input.shape[-1].value or _array_ops.shape(input)[-1]
axis_dim_float = _math_ops.to_float(axis_dim)
- scale = 2.0 * _math_ops.exp(_math_ops.complex(
- 0.0, -_math.pi * _math_ops.range(axis_dim_float) /
- (2.0 * axis_dim_float)))
-
- # TODO(rjryan): Benchmark performance and memory usage of the various
- # approaches to computing a DCT via the RFFT.
- dct2 = _math_ops.real(
- rfft(input, fft_length=[2 * axis_dim])[..., :axis_dim] * scale)
-
- if norm == "ortho":
- n1 = 0.5 * _math_ops.rsqrt(axis_dim_float)
- n2 = n1 * _math_ops.sqrt(2.0)
- # Use tf.pad to make a vector of [n1, n2, n2, n2, ...].
- weights = _array_ops.pad(
- _array_ops.expand_dims(n1, 0), [[0, axis_dim - 1]],
- constant_values=n2)
- dct2 *= weights
-
- return dct2
+ if type == 2:
+ scale = 2.0 * _math_ops.exp(
+ _math_ops.complex(
+ 0.0, -_math_ops.range(axis_dim_float) * _math.pi * 0.5 /
+ axis_dim_float))
+
+ # TODO(rjryan): Benchmark performance and memory usage of the various
+ # approaches to computing a DCT via the RFFT.
+ dct2 = _math_ops.real(
+ rfft(input, fft_length=[2 * axis_dim])[..., :axis_dim] * scale)
+
+ if norm == "ortho":
+ n1 = 0.5 * _math_ops.rsqrt(axis_dim_float)
+ n2 = n1 * _math_ops.sqrt(2.0)
+ # Use tf.pad to make a vector of [n1, n2, n2, n2, ...].
+ weights = _array_ops.pad(
+ _array_ops.expand_dims(n1, 0), [[0, axis_dim - 1]],
+ constant_values=n2)
+ dct2 *= weights
+
+ return dct2
+
+ elif type == 3:
+ if norm == "ortho":
+ n1 = _math_ops.sqrt(axis_dim_float)
+ n2 = n1 * _math_ops.sqrt(0.5)
+ # Use tf.pad to make a vector of [n1, n2, n2, n2, ...].
+ weights = _array_ops.pad(
+ _array_ops.expand_dims(n1, 0), [[0, axis_dim - 1]],
+ constant_values=n2)
+ input *= weights
+ else:
+ input *= axis_dim_float
+ scale = 2.0 * _math_ops.exp(
+ _math_ops.complex(
+ 0.0,
+ _math_ops.range(axis_dim_float) * _math.pi * 0.5 /
+ axis_dim_float))
+ dct3 = _math_ops.real(
+ irfft(
+ scale * _math_ops.complex(input, 0.0),
+ fft_length=[2 * axis_dim]))[..., :axis_dim]
+
+ return dct3
+
+
+# TODO(rjryan): Implement `type`, `n` and `axis` parameters.
+@tf_export("spectral.idct")
+def idct(input, type=2, n=None, axis=-1, norm=None, name=None): # pylint: disable=redefined-builtin
+ """Computes the 1D [Inverse Discrete Cosine Transform (DCT)][idct] of `input`.
+
+ Currently only Types II and III are supported. Type III is the inverse of
+ Type II, and vice versa.
+
+ Note that you must re-normalize by 1/(2n) to obtain an inverse if `norm` is
+ not `'ortho'`. That is:
+ `signal == idct(dct(signal)) * 0.5 / signal.shape[-1]`.
+ When `norm='ortho'`, we have:
+ `signal == idct(dct(signal, norm='ortho'), norm='ortho')`.
+
+ @compatibility(scipy)
+ Equivalent to scipy.fftpack.idct for Type-II and Type-III DCT.
+ https://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.fftpack.idct.html
+ @end_compatibility
+
+ Args:
+ input: A `[..., samples]` `float32` `Tensor` containing the signals to take
+ the DCT of.
+ type: The IDCT type to perform. Must be 2 or 3.
+ n: For future expansion. The length of the transform. Must be `None`.
+ axis: For future expansion. The axis to compute the DCT along. Must be `-1`.
+ norm: The normalization to apply. `None` for no normalization or `'ortho'`
+ for orthonormal normalization.
+ name: An optional name for the operation.
+
+ Returns:
+ A `[..., samples]` `float32` `Tensor` containing the IDCT of `input`.
+
+ Raises:
+ ValueError: If `type` is not `2` or `3`, `n` is not `None, `axis` is not
+ `-1`, or `norm` is not `None` or `'ortho'`.
+
+ [idct]:
+ https://en.wikipedia.org/wiki/Discrete_cosine_transform#Inverse_transforms
+ """
+ _validate_dct_arguments(type, n, axis, norm)
+ inverse_type = {2: 3, 3: 2}[type]
+ return dct(input, type=inverse_type, n=n, axis=axis, norm=norm, name=name)
diff --git a/tensorflow/python/ops/state_ops.py b/tensorflow/python/ops/state_ops.py
index 8cb6a0537e..2c93cf72c7 100644
--- a/tensorflow/python/ops/state_ops.py
+++ b/tensorflow/python/ops/state_ops.py
@@ -19,7 +19,6 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
-from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import gen_resource_variable_ops
@@ -124,9 +123,7 @@ def is_variable_initialized(ref, name=None):
if ref.dtype._is_ref_dtype:
return gen_state_ops.is_variable_initialized(ref=ref, name=name)
# Handle resource variables.
- if context.executing_eagerly() or ref.op.type == "VarHandleOp":
- return gen_resource_variable_ops.var_is_initialized_op(ref.handle,
- name=name)
+ return ref.is_initialized(name=name)
@tf_export("assign_sub")
diff --git a/tensorflow/python/ops/summary_ops_v2.py b/tensorflow/python/ops/summary_ops_v2.py
index b80f84eb7c..00150fe688 100644
--- a/tensorflow/python/ops/summary_ops_v2.py
+++ b/tensorflow/python/ops/summary_ops_v2.py
@@ -306,10 +306,11 @@ def create_db_writer(db_uri,
def _make_summary_writer(name, factory, **kwargs):
resource = gen_summary_ops.summary_writer(shared_name=name)
init_op_fn = lambda: factory(resource, **kwargs)
- # TODO(apassos): Consider doing this instead.
- # if not context.executing_eagerly():
- # ops.get_default_session().run(init_op)
- ops.add_to_collection(_SUMMARY_WRITER_INIT_COLLECTION_NAME, init_op_fn())
+ init_op = init_op_fn()
+ if not context.executing_eagerly():
+ # TODO(apassos): Consider doing this instead.
+ # ops.get_default_session().run(init_op)
+ ops.add_to_collection(_SUMMARY_WRITER_INIT_COLLECTION_NAME, init_op)
return SummaryWriter(resource, init_op_fn)
@@ -380,7 +381,8 @@ def summary_writer_function(name, tensor, function, family=None):
with ops.device("cpu:0"):
op = smart_cond.smart_cond(
should_record_summaries(), record, _nothing, name="")
- ops.add_to_collection(ops.GraphKeys._SUMMARY_COLLECTION, op) # pylint: disable=protected-access
+ if not context.executing_eagerly():
+ ops.add_to_collection(ops.GraphKeys._SUMMARY_COLLECTION, op) # pylint: disable=protected-access
return op
diff --git a/tensorflow/python/ops/variable_scope.py b/tensorflow/python/ops/variable_scope.py
index 47414c28af..77f67c18ee 100644
--- a/tensorflow/python/ops/variable_scope.py
+++ b/tensorflow/python/ops/variable_scope.py
@@ -1,4 +1,4 @@
- # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
+# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -44,9 +44,11 @@ from tensorflow.python.util import function_utils
from tensorflow.python.util import tf_contextlib
from tensorflow.python.util.tf_export import tf_export
-__all__ = ["AUTO_REUSE", "VariableScope", "get_variable_scope",
- "get_variable", "get_local_variable", "variable_scope",
- "variable_op_scope", "no_regularizer"]
+__all__ = [
+ "AUTO_REUSE", "VariableScope", "get_variable_scope", "get_variable",
+ "get_local_variable", "variable_scope", "variable_op_scope",
+ "no_regularizer", "VariableSynchronization", "VariableAggregation"
+]
class _PartitionInfo(object):
@@ -188,6 +190,38 @@ class _ReuseMode(enum.Enum):
# REUSE_FALSE = 2
# REUSE_TRUE = 3
+
+@tf_export("VariableSynchronization")
+class VariableSynchronization(enum.Enum):
+ """Indicates when a distributed variable will be synced."""
+
+ # Indicates that the synchronization will be determined by the current
+ # `DistributionStrategy` (eg. With `MirroredStrategy` this would be
+ # `ON_WRITE`).
+ AUTO = 0
+
+ # Indicates that there will only be one copy of the variable, so there is no
+ # need to sync.
+ NONE = 1
+
+ # Indicates that the variable will be aggregated across devices
+ # every time it is updated.
+ ON_WRITE = 2
+
+ # Indicates that the variable will be aggregated across devices
+ # when it is read (eg. when checkpointing or when evaluating an op that uses
+ # the variable).
+ ON_READ = 3
+
+
+@tf_export("VariableAggregation")
+class VariableAggregation(enum.Enum):
+ """Indicates how a distributed variable will be aggregated."""
+ NONE = 0
+ SUM = 1
+ MEAN = 2
+
+
AUTO_REUSE = _ReuseMode.AUTO_REUSE
tf_export("AUTO_REUSE").export_constant(__name__, "AUTO_REUSE")
AUTO_REUSE.__doc__ = """
@@ -214,11 +248,23 @@ class _VariableStore(object):
self._partitioned_vars = {} # A dict of the stored PartitionedVariables.
self._store_eager_variables = False
- def get_variable(self, name, shape=None, dtype=dtypes.float32,
- initializer=None, regularizer=None, reuse=None,
- trainable=True, collections=None, caching_device=None,
- partitioner=None, validate_shape=True, use_resource=None,
- custom_getter=None, constraint=None):
+ def get_variable(self,
+ name,
+ shape=None,
+ dtype=dtypes.float32,
+ initializer=None,
+ regularizer=None,
+ reuse=None,
+ trainable=None,
+ collections=None,
+ caching_device=None,
+ partitioner=None,
+ validate_shape=True,
+ use_resource=None,
+ custom_getter=None,
+ constraint=None,
+ synchronization=VariableSynchronization.AUTO,
+ aggregation=VariableAggregation.NONE):
"""Gets an existing variable with these parameters or create a new one.
If a variable with the given name is already stored, we return the stored
@@ -254,6 +300,8 @@ class _VariableStore(object):
forced to be False.
trainable: If `True` also add the variable to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
+ `trainable` defaults to `True` unless `synchronization` is
+ set to `ON_READ`.
collections: List of graph collections keys to add the `Variable` to.
Defaults to `[GraphKeys.GLOBAL_VARIABLES]` (see `tf.Variable`).
caching_device: Optional device string or function describing where the
@@ -291,6 +339,15 @@ class _VariableStore(object):
variable and return the Tensor for the projected value
(which must have the same shape). Constraints are not safe to
use when doing asynchronous distributed training.
+ synchronization: Indicates when a distributed a variable will be
+ aggregated. Accepted values are constants defined in the class
+ @{tf.VariableSynchronization}. By default the synchronization is set to
+ `AUTO` and the current `DistributionStrategy` chooses
+ when to synchronize. If `synchronization` is set to `ON_READ`,
+ `trainable` must not be set to `True`.
+ aggregation: Indicates how a distributed variable will be aggregated.
+ Accepted values are constants defined in the class
+ @{tf.VariableAggregation}.
Returns:
The created or existing `Variable` (or `PartitionedVariable`, if a
@@ -343,11 +400,22 @@ class _VariableStore(object):
# it to custom_getter.
# Note: the parameters of _true_getter, and their documentation, match
# *exactly* item-for-item with the docstring of this method.
- def _true_getter(name, shape=None, dtype=dtypes.float32, # pylint: disable=missing-docstring
- initializer=None, regularizer=None, reuse=None,
- trainable=True, collections=None, caching_device=None,
- partitioner=None, validate_shape=True, use_resource=None,
- constraint=None):
+ def _true_getter( # pylint: disable=missing-docstring
+ name,
+ shape=None,
+ dtype=dtypes.float32,
+ initializer=None,
+ regularizer=None,
+ reuse=None,
+ trainable=None,
+ collections=None,
+ caching_device=None,
+ partitioner=None,
+ validate_shape=True,
+ use_resource=None,
+ constraint=None,
+ synchronization=VariableSynchronization.AUTO,
+ aggregation=VariableAggregation.NONE):
is_scalar = (shape is not None
and isinstance(shape, collections_lib.Sequence)
and not shape)
@@ -397,11 +465,24 @@ class _VariableStore(object):
"name was already created with partitioning?" % name)
return self._get_single_variable(
- name=name, shape=shape, dtype=dtype,
- initializer=initializer, regularizer=regularizer, reuse=reuse,
- trainable=trainable, collections=collections,
- caching_device=caching_device, validate_shape=validate_shape,
- use_resource=use_resource, constraint=constraint)
+ name=name,
+ shape=shape,
+ dtype=dtype,
+ initializer=initializer,
+ regularizer=regularizer,
+ reuse=reuse,
+ trainable=trainable,
+ collections=collections,
+ caching_device=caching_device,
+ validate_shape=validate_shape,
+ use_resource=use_resource,
+ constraint=constraint,
+ synchronization=synchronization,
+ aggregation=aggregation)
+
+ # Set trainable value based on synchronization value.
+ trainable = _get_trainable_value(
+ synchronization=synchronization, trainable=trainable)
if custom_getter is not None:
# Handle backwards compatibility with getter arguments that were added
@@ -420,6 +501,8 @@ class _VariableStore(object):
"partitioner": partitioner,
"validate_shape": validate_shape,
"use_resource": use_resource,
+ "synchronization": synchronization,
+ "aggregation": aggregation,
}
# `fn_args` can handle functions, `functools.partial`, `lambda`.
if "constraint" in function_utils.fn_args(custom_getter):
@@ -427,18 +510,36 @@ class _VariableStore(object):
return custom_getter(**custom_getter_kwargs)
else:
return _true_getter(
- name, shape=shape, dtype=dtype,
- initializer=initializer, regularizer=regularizer,
- reuse=reuse, trainable=trainable, collections=collections,
- caching_device=caching_device, partitioner=partitioner,
- validate_shape=validate_shape, use_resource=use_resource,
- constraint=constraint)
-
- def _get_partitioned_variable(
- self, name, partitioner, shape=None, dtype=dtypes.float32,
- initializer=None, regularizer=None, reuse=None,
- trainable=True, collections=None, caching_device=None,
- validate_shape=True, use_resource=None, constraint=None):
+ name,
+ shape=shape,
+ dtype=dtype,
+ initializer=initializer,
+ regularizer=regularizer,
+ reuse=reuse,
+ trainable=trainable,
+ collections=collections,
+ caching_device=caching_device,
+ partitioner=partitioner,
+ validate_shape=validate_shape,
+ use_resource=use_resource,
+ constraint=constraint,
+ synchronization=synchronization,
+ aggregation=aggregation)
+
+ def _get_partitioned_variable(self,
+ name,
+ partitioner,
+ shape=None,
+ dtype=dtypes.float32,
+ initializer=None,
+ regularizer=None,
+ reuse=None,
+ trainable=None,
+ collections=None,
+ caching_device=None,
+ validate_shape=True,
+ use_resource=None,
+ constraint=None):
"""Gets or creates a sharded variable list with these parameters.
The `partitioner` must be a callable that accepts a fully defined
@@ -688,12 +789,14 @@ class _VariableStore(object):
regularizer=None,
partition_info=None,
reuse=None,
- trainable=True,
+ trainable=None,
collections=None,
caching_device=None,
validate_shape=True,
use_resource=None,
- constraint=None):
+ constraint=None,
+ synchronization=VariableSynchronization.AUTO,
+ aggregation=VariableAggregation.NONE):
"""Get or create a single Variable (e.g. a shard or entire variable).
See the documentation of get_variable above (ignore partitioning components)
@@ -713,6 +816,8 @@ class _VariableStore(object):
validate_shape: see get_variable.
use_resource: see get_variable.
constraint: see get_variable.
+ synchronization: see get_variable.
+ aggregation: see get_variable.
Returns:
A Variable. See documentation of get_variable above.
@@ -793,7 +898,9 @@ class _VariableStore(object):
dtype=variable_dtype,
validate_shape=validate_shape,
constraint=constraint,
- use_resource=use_resource)
+ use_resource=use_resource,
+ synchronization=synchronization,
+ aggregation=aggregation)
if context.executing_eagerly() and self._store_eager_variables:
if collections:
ops.add_to_collections(collections, v)
@@ -1045,14 +1152,16 @@ class VariableScope(object):
initializer=None,
regularizer=None,
reuse=None,
- trainable=True,
+ trainable=None,
collections=None,
caching_device=None,
partitioner=None,
validate_shape=True,
use_resource=None,
custom_getter=None,
- constraint=None):
+ constraint=None,
+ synchronization=VariableSynchronization.AUTO,
+ aggregation=VariableAggregation.NONE):
"""Gets an existing variable with this name or create a new one."""
if regularizer is None:
regularizer = self._regularizer
@@ -1090,12 +1199,22 @@ class VariableScope(object):
if dtype is None:
dtype = self._dtype
return var_store.get_variable(
- full_name, shape=shape, dtype=dtype, initializer=initializer,
- regularizer=regularizer, reuse=reuse, trainable=trainable,
- collections=collections, caching_device=caching_device,
- partitioner=partitioner, validate_shape=validate_shape,
- use_resource=use_resource, custom_getter=custom_getter,
- constraint=constraint)
+ full_name,
+ shape=shape,
+ dtype=dtype,
+ initializer=initializer,
+ regularizer=regularizer,
+ reuse=reuse,
+ trainable=trainable,
+ collections=collections,
+ caching_device=caching_device,
+ partitioner=partitioner,
+ validate_shape=validate_shape,
+ use_resource=use_resource,
+ custom_getter=custom_getter,
+ constraint=constraint,
+ synchronization=synchronization,
+ aggregation=aggregation)
def _get_partitioned_variable(self,
var_store,
@@ -1104,7 +1223,7 @@ class VariableScope(object):
dtype=None,
initializer=None,
regularizer=None,
- trainable=True,
+ trainable=None,
collections=None,
caching_device=None,
partitioner=None,
@@ -1319,21 +1438,35 @@ def get_variable(name,
dtype=None,
initializer=None,
regularizer=None,
- trainable=True,
+ trainable=None,
collections=None,
caching_device=None,
partitioner=None,
validate_shape=True,
use_resource=None,
custom_getter=None,
- constraint=None):
+ constraint=None,
+ synchronization=VariableSynchronization.AUTO,
+ aggregation=VariableAggregation.NONE):
return get_variable_scope().get_variable(
- _get_default_variable_store(), name, shape=shape, dtype=dtype,
- initializer=initializer, regularizer=regularizer, trainable=trainable,
- collections=collections, caching_device=caching_device,
- partitioner=partitioner, validate_shape=validate_shape,
- use_resource=use_resource, custom_getter=custom_getter,
- constraint=constraint)
+ _get_default_variable_store(),
+ name,
+ shape=shape,
+ dtype=dtype,
+ initializer=initializer,
+ regularizer=regularizer,
+ trainable=trainable,
+ collections=collections,
+ caching_device=caching_device,
+ partitioner=partitioner,
+ validate_shape=validate_shape,
+ use_resource=use_resource,
+ custom_getter=custom_getter,
+ constraint=constraint,
+ synchronization=synchronization,
+ aggregation=aggregation)
+
+
get_variable_or_local_docstring = (
"""%s
@@ -1430,29 +1563,44 @@ get_variable.__doc__ = get_variable_or_local_docstring % (
# The argument list for get_local_variable must match arguments to get_variable.
# So, if you are updating the arguments, also update arguments to get_variable.
@tf_export("get_local_variable")
-def get_local_variable(name,
- shape=None,
- dtype=None,
- initializer=None,
- regularizer=None,
- trainable=False, # pylint: disable=unused-argument
- collections=None,
- caching_device=None,
- partitioner=None,
- validate_shape=True,
- use_resource=None,
- custom_getter=None,
- constraint=None):
+def get_local_variable( # pylint: disable=missing-docstring
+ name,
+ shape=None,
+ dtype=None,
+ initializer=None,
+ regularizer=None,
+ trainable=False, # pylint: disable=unused-argument
+ collections=None,
+ caching_device=None,
+ partitioner=None,
+ validate_shape=True,
+ use_resource=None,
+ synchronization=VariableSynchronization.AUTO,
+ aggregation=VariableAggregation.NONE,
+ custom_getter=None,
+ constraint=None):
if collections:
collections += [ops.GraphKeys.LOCAL_VARIABLES]
else:
collections = [ops.GraphKeys.LOCAL_VARIABLES]
return get_variable(
- name, shape=shape, dtype=dtype, initializer=initializer,
- regularizer=regularizer, trainable=False, collections=collections,
- caching_device=caching_device, partitioner=partitioner,
- validate_shape=validate_shape, use_resource=use_resource,
- custom_getter=custom_getter, constraint=constraint)
+ name,
+ shape=shape,
+ dtype=dtype,
+ initializer=initializer,
+ regularizer=regularizer,
+ trainable=False,
+ collections=collections,
+ caching_device=caching_device,
+ partitioner=partitioner,
+ validate_shape=validate_shape,
+ use_resource=use_resource,
+ synchronization=synchronization,
+ aggregation=aggregation,
+ custom_getter=custom_getter,
+ constraint=constraint)
+
+
get_local_variable.__doc__ = get_variable_or_local_docstring % (
"Gets an existing *local* variable or creates a new one.",
"Behavior is the same as in `get_variable`, except that variables are\n"
@@ -2202,11 +2350,28 @@ def _compute_slice_dim_and_shape(full_shape, slicing):
return slice_dim, slice_shape
+def _get_trainable_value(synchronization, trainable):
+ """Computes the trainable value based on the given arguments."""
+ if synchronization == VariableSynchronization.ON_READ:
+ if trainable:
+ raise ValueError(
+ "Synchronization value can be set to "
+ "VariableSynchronization.ON_READ only for non-trainable variables. "
+ "You have specified trainable=True and "
+ "synchronization=VariableSynchronization.ON_READ.")
+ else:
+ # Set trainable to be false when variable is to be synced on read.
+ trainable = False
+ elif trainable is None:
+ trainable = True
+ return trainable
+
+
def default_variable_creator(next_creator=None, **kwargs):
"""Default variable creator."""
assert next_creator is None
initial_value = kwargs.get("initial_value", None)
- trainable = kwargs.get("trainable", True)
+ trainable = kwargs.get("trainable", None)
collections = kwargs.get("collections", None)
validate_shape = kwargs.get("validate_shape", True)
caching_device = kwargs.get("caching_device", None)
@@ -2214,6 +2379,12 @@ def default_variable_creator(next_creator=None, **kwargs):
dtype = kwargs.get("dtype", None)
constraint = kwargs.get("constraint", None)
use_resource = kwargs.get("use_resource", None)
+
+ # Set trainable value based on synchronization value.
+ synchronization = kwargs.get("synchronization", VariableSynchronization.AUTO)
+ trainable = _get_trainable_value(
+ synchronization=synchronization, trainable=trainable)
+
if use_resource is None:
use_resource = get_variable_scope().use_resource
if use_resource or (use_resource is None and context.executing_eagerly()):
@@ -2241,25 +2412,35 @@ def _make_getter(captured_getter, captured_previous):
def variable(initial_value=None,
- trainable=True,
+ trainable=None,
collections=None,
validate_shape=True,
caching_device=None,
name=None,
dtype=None,
constraint=None,
- use_resource=None):
+ use_resource=None,
+ synchronization=VariableSynchronization.AUTO,
+ aggregation=VariableAggregation.NONE):
previous_getter = lambda **kwargs: default_variable_creator(None, **kwargs)
for getter in ops.get_default_graph()._variable_creator_stack: # pylint: disable=protected-access
previous_getter = _make_getter(getter, previous_getter)
- return previous_getter(initial_value=initial_value,
- trainable=trainable,
- collections=collections,
- validate_shape=validate_shape,
- caching_device=caching_device,
- name=name, dtype=dtype,
- constraint=constraint,
- use_resource=use_resource)
+
+ # Reset `aggregation` that is explicitly set as `None` to the enum None value.
+ if aggregation is None:
+ aggregation = VariableAggregation.NONE
+ return previous_getter(
+ initial_value=initial_value,
+ trainable=trainable,
+ collections=collections,
+ validate_shape=validate_shape,
+ caching_device=caching_device,
+ name=name,
+ dtype=dtype,
+ constraint=constraint,
+ use_resource=use_resource,
+ synchronization=synchronization,
+ aggregation=aggregation)
@tf_contextlib.contextmanager
@@ -2293,6 +2474,8 @@ def variable_creator_scope(variable_creator):
trainable: If `True`, the default, also adds the variable to the graph
collection `GraphKeys.TRAINABLE_VARIABLES`. This collection is used as
the default list of variables to use by the `Optimizer` classes.
+ `trainable` defaults to `True` unless `synchronization` is
+ set to `ON_READ`.
collections: List of graph collections keys. The new variable is added to
these collections. Defaults to `[GraphKeys.GLOBAL_VARIABLES]`.
validate_shape: If `False`, allows the variable to be initialized with a
@@ -2311,6 +2494,15 @@ def variable_creator_scope(variable_creator):
constraint: A constraint function to be applied to the variable after
updates by some algorithms.
use_resource: if True, a ResourceVariable is always created.
+ synchronization: Indicates when a distributed a variable will be
+ aggregated. Accepted values are constants defined in the class
+ @{tf.VariableSynchronization}. By default the synchronization is set to
+ `AUTO` and the current `DistributionStrategy` chooses
+ when to synchronize. If `synchronization` is set to `ON_READ`,
+ `trainable` must not be set to `True`.
+ aggregation: Indicates how a distributed variable will be aggregated.
+ Accepted values are constants defined in the class
+ @{tf.VariableAggregation}.
This set may grow over time, so it's important the signature of creators is as
mentioned above.
diff --git a/tensorflow/python/ops/variables.py b/tensorflow/python/ops/variables.py
index 4be9f5eb68..87e0de197c 100644
--- a/tensorflow/python/ops/variables.py
+++ b/tensorflow/python/ops/variables.py
@@ -17,6 +17,8 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
+import six
+
from tensorflow.core.framework import attr_value_pb2
from tensorflow.core.framework import variable_pb2
from tensorflow.python.eager import context
@@ -36,8 +38,32 @@ from tensorflow.python.util.deprecation import deprecated
from tensorflow.python.util.tf_export import tf_export
+def _default_variable_creator(_, *args, **kwds):
+ return RefVariable(*args, **kwds)
+
+
+def _make_getter(captured_getter, captured_previous):
+ """To avoid capturing loop variables."""
+ def getter(*args, **kwargs):
+ return captured_getter(captured_previous, *args, **kwargs)
+ return getter
+
+
+class VariableMetaclass(type):
+ """Metaclass to allow construction of tf.Variable to be overridden."""
+
+ def __call__(cls, *args, **kwargs):
+ if cls is Variable:
+ previous_getter = lambda *a, **k: _default_variable_creator(None, *a, **k)
+ # TODO(apassos) use a stack of getters here
+ return previous_getter(*args, **kwargs)
+ else:
+ return super(VariableMetaclass, cls).__call__(*args, **kwargs)
+
+
@tf_export("Variable")
-class Variable(checkpointable.CheckpointableBase):
+class Variable(six.with_metaclass(VariableMetaclass,
+ checkpointable.CheckpointableBase)):
"""See the @{$variables$Variables How To} for a high level overview.
A variable maintains state in the graph across calls to `run()`. You add a
@@ -234,6 +260,554 @@ class Variable(checkpointable.CheckpointableBase):
for details on how variables work in eager execution.
@end_compatibility
"""
+ raise NotImplementedError
+
+ def __repr__(self):
+ raise NotImplementedError
+
+ def value(self):
+ """Returns the last snapshot of this variable.
+
+ You usually do not need to call this method as all ops that need the value
+ of the variable call it automatically through a `convert_to_tensor()` call.
+
+ Returns a `Tensor` which holds the value of the variable. You can not
+ assign a new value to this tensor as it is not a reference to the variable.
+
+ To avoid copies, if the consumer of the returned value is on the same device
+ as the variable, this actually returns the live value of the variable, not
+ a copy. Updates to the variable are seen by the consumer. If the consumer
+ is on a different device it will get a copy of the variable.
+
+ Returns:
+ A `Tensor` containing the value of the variable.
+ """
+ raise NotImplementedError
+
+ def read_value(self):
+ """Returns the value of this variable, read in the current context.
+
+ Can be different from value() if it's on another device, with control
+ dependencies, etc.
+
+ Returns:
+ A `Tensor` containing the value of the variable.
+ """
+ raise NotImplementedError
+
+ def set_shape(self, shape):
+ """Overrides the shape for this variable.
+
+ Args:
+ shape: the `TensorShape` representing the overridden shape.
+ """
+ raise NotImplementedError
+
+ @property
+ def trainable(self):
+ raise NotImplementedError
+
+ def eval(self, session=None):
+ """In a session, computes and returns the value of this variable.
+
+ This is not a graph construction method, it does not add ops to the graph.
+
+ This convenience method requires a session where the graph
+ containing this variable has been launched. If no session is
+ passed, the default session is used. See @{tf.Session} for more
+ information on launching a graph and on sessions.
+
+ ```python
+ v = tf.Variable([1, 2])
+ init = tf.global_variables_initializer()
+
+ with tf.Session() as sess:
+ sess.run(init)
+ # Usage passing the session explicitly.
+ print(v.eval(sess))
+ # Usage with the default session. The 'with' block
+ # above makes 'sess' the default session.
+ print(v.eval())
+ ```
+
+ Args:
+ session: The session to use to evaluate this variable. If
+ none, the default session is used.
+
+ Returns:
+ A numpy `ndarray` with a copy of the value of this variable.
+ """
+ raise NotImplementedError
+
+ def initialized_value(self):
+ """Returns the value of the initialized variable.
+
+ You should use this instead of the variable itself to initialize another
+ variable with a value that depends on the value of this variable.
+
+ ```python
+ # Initialize 'v' with a random tensor.
+ v = tf.Variable(tf.truncated_normal([10, 40]))
+ # Use `initialized_value` to guarantee that `v` has been
+ # initialized before its value is used to initialize `w`.
+ # The random values are picked only once.
+ w = tf.Variable(v.initialized_value() * 2.0)
+ ```
+
+ Returns:
+ A `Tensor` holding the value of this variable after its initializer
+ has run.
+ """
+ raise NotImplementedError
+
+ @property
+ def initial_value(self):
+ """Returns the Tensor used as the initial value for the variable.
+
+ Note that this is different from `initialized_value()` which runs
+ the op that initializes the variable before returning its value.
+ This method returns the tensor that is used by the op that initializes
+ the variable.
+
+ Returns:
+ A `Tensor`.
+ """
+ raise NotImplementedError
+
+ @property
+ def constraint(self):
+ """Returns the constraint function associated with this variable.
+
+ Returns:
+ The constraint function that was passed to the variable constructor.
+ Can be `None` if no constraint was passed.
+ """
+ raise NotImplementedError
+
+ def assign(self, value, use_locking=False):
+ """Assigns a new value to the variable.
+
+ This is essentially a shortcut for `assign(self, value)`.
+
+ Args:
+ value: A `Tensor`. The new value for this variable.
+ use_locking: If `True`, use locking during the assignment.
+
+ Returns:
+ A `Tensor` that will hold the new value of this variable after
+ the assignment has completed.
+ """
+ raise NotImplementedError
+
+ def assign_add(self, delta, use_locking=False):
+ """Adds a value to this variable.
+
+ This is essentially a shortcut for `assign_add(self, delta)`.
+
+ Args:
+ delta: A `Tensor`. The value to add to this variable.
+ use_locking: If `True`, use locking during the operation.
+
+ Returns:
+ A `Tensor` that will hold the new value of this variable after
+ the addition has completed.
+ """
+ raise NotImplementedError
+
+ def assign_sub(self, delta, use_locking=False):
+ """Subtracts a value from this variable.
+
+ This is essentially a shortcut for `assign_sub(self, delta)`.
+
+ Args:
+ delta: A `Tensor`. The value to subtract from this variable.
+ use_locking: If `True`, use locking during the operation.
+
+ Returns:
+ A `Tensor` that will hold the new value of this variable after
+ the subtraction has completed.
+ """
+ raise NotImplementedError
+
+ def scatter_sub(self, sparse_delta, use_locking=False):
+ """Subtracts `IndexedSlices` from this variable.
+
+ This is essentially a shortcut for `scatter_sub(self, sparse_delta.indices,
+ sparse_delta.values)`.
+
+ Args:
+ sparse_delta: `IndexedSlices` to be subtracted from this variable.
+ use_locking: If `True`, use locking during the operation.
+
+ Returns:
+ A `Tensor` that will hold the new value of this variable after
+ the scattered subtraction has completed.
+
+ Raises:
+ ValueError: if `sparse_delta` is not an `IndexedSlices`.
+ """
+ raise NotImplementedError
+
+ def count_up_to(self, limit):
+ """Increments this variable until it reaches `limit`.
+
+ When that Op is run it tries to increment the variable by `1`. If
+ incrementing the variable would bring it above `limit` then the Op raises
+ the exception `OutOfRangeError`.
+
+ If no error is raised, the Op outputs the value of the variable before
+ the increment.
+
+ This is essentially a shortcut for `count_up_to(self, limit)`.
+
+ Args:
+ limit: value at which incrementing the variable raises an error.
+
+ Returns:
+ A `Tensor` that will hold the variable value before the increment. If no
+ other Op modifies this variable, the values produced will all be
+ distinct.
+ """
+ raise NotImplementedError
+
+ def load(self, value, session=None):
+ """Load new value into this variable.
+
+ Writes new value to variable's memory. Doesn't add ops to the graph.
+
+ This convenience method requires a session where the graph
+ containing this variable has been launched. If no session is
+ passed, the default session is used. See @{tf.Session} for more
+ information on launching a graph and on sessions.
+
+ ```python
+ v = tf.Variable([1, 2])
+ init = tf.global_variables_initializer()
+
+ with tf.Session() as sess:
+ sess.run(init)
+ # Usage passing the session explicitly.
+ v.load([2, 3], sess)
+ print(v.eval(sess)) # prints [2 3]
+ # Usage with the default session. The 'with' block
+ # above makes 'sess' the default session.
+ v.load([3, 4], sess)
+ print(v.eval()) # prints [3 4]
+ ```
+
+ Args:
+ value: New variable value
+ session: The session to use to evaluate this variable. If
+ none, the default session is used.
+
+ Raises:
+ ValueError: Session is not passed and no default session
+ """
+ raise NotImplementedError
+
+ # Conversion to tensor.
+ @staticmethod
+ def _TensorConversionFunction(v, dtype=None, name=None, as_ref=False): # pylint: disable=invalid-name
+ """Utility function for converting a Variable to a Tensor."""
+ _ = name
+ if dtype and not dtype.is_compatible_with(v.dtype):
+ raise ValueError(
+ "Incompatible type conversion requested to type '%s' for variable "
+ "of type '%s'" % (dtype.name, v.dtype.name))
+ if as_ref:
+ return v._ref() # pylint: disable=protected-access
+ else:
+ return v.value()
+
+ @staticmethod
+ def _OverloadAllOperators(): # pylint: disable=invalid-name
+ """Register overloads for all operators."""
+ for operator in ops.Tensor.OVERLOADABLE_OPERATORS:
+ Variable._OverloadOperator(operator)
+ # For slicing, bind getitem differently than a tensor (use SliceHelperVar
+ # instead)
+ # pylint: disable=protected-access
+ setattr(Variable, "__getitem__", array_ops._SliceHelperVar)
+
+ @staticmethod
+ def _OverloadOperator(operator): # pylint: disable=invalid-name
+ """Defer an operator overload to `ops.Tensor`.
+
+ We pull the operator out of ops.Tensor dynamically to avoid ordering issues.
+
+ Args:
+ operator: string. The operator name.
+ """
+
+ def _run_op(a, *args):
+ # pylint: disable=protected-access
+ return getattr(ops.Tensor, operator)(a._AsTensor(), *args)
+ # Propagate __doc__ to wrapper
+ try:
+ _run_op.__doc__ = getattr(ops.Tensor, operator).__doc__
+ except AttributeError:
+ pass
+
+ setattr(Variable, operator, _run_op)
+
+ # NOTE(mrry): This enables the Variable's overloaded "right" binary
+ # operators to run when the left operand is an ndarray, because it
+ # accords the Variable class higher priority than an ndarray, or a
+ # numpy matrix.
+ # TODO(mrry): Convert this to using numpy's __numpy_ufunc__
+ # mechanism, which allows more control over how Variables interact
+ # with ndarrays.
+ __array_priority__ = 100
+
+ @property
+ def name(self):
+ """The name of this variable."""
+ raise NotImplementedError
+
+ @property
+ def initializer(self):
+ """The initializer operation for this variable."""
+ raise NotImplementedError
+
+ @property
+ def device(self):
+ """The device of this variable."""
+ raise NotImplementedError
+
+ @property
+ def dtype(self):
+ """The `DType` of this variable."""
+ raise NotImplementedError
+
+ @property
+ def op(self):
+ """The `Operation` of this variable."""
+ raise NotImplementedError
+
+ @property
+ def graph(self):
+ """The `Graph` of this variable."""
+ raise NotImplementedError
+
+ @property
+ def shape(self):
+ """The `TensorShape` of this variable.
+
+ Returns:
+ A `TensorShape`.
+ """
+ raise NotImplementedError
+
+ def get_shape(self):
+ """Alias of Variable.shape."""
+ raise NotImplementedError
+
+ def to_proto(self, export_scope=None):
+ """Converts a `Variable` to a `VariableDef` protocol buffer.
+
+ Args:
+ export_scope: Optional `string`. Name scope to remove.
+
+ Returns:
+ A `VariableDef` protocol buffer, or `None` if the `Variable` is not
+ in the specified name scope.
+ """
+ raise NotImplementedError
+
+ @staticmethod
+ def from_proto(variable_def, import_scope=None):
+ """Returns a `Variable` object created from `variable_def`."""
+ return Variable(variable_def=variable_def,
+ import_scope=import_scope)
+
+ class SaveSliceInfo(object):
+ """Information on how to save this Variable as a slice.
+
+ Provides internal support for saving variables as slices of a larger
+ variable. This API is not public and is subject to change.
+
+ Available properties:
+
+ * full_name
+ * full_shape
+ * var_offset
+ * var_shape
+ """
+
+ def __init__(self,
+ full_name=None,
+ full_shape=None,
+ var_offset=None,
+ var_shape=None,
+ save_slice_info_def=None,
+ import_scope=None):
+ """Create a `SaveSliceInfo`.
+
+ Args:
+ full_name: Name of the full variable of which this `Variable` is a
+ slice.
+ full_shape: Shape of the full variable, as a list of int.
+ var_offset: Offset of this `Variable` into the full variable, as a
+ list of int.
+ var_shape: Shape of this `Variable`, as a list of int.
+ save_slice_info_def: `SaveSliceInfoDef` protocol buffer. If not `None`,
+ recreates the SaveSliceInfo object its contents.
+ `save_slice_info_def` and other arguments are mutually
+ exclusive.
+ import_scope: Optional `string`. Name scope to add. Only used
+ when initializing from protocol buffer.
+ """
+ if save_slice_info_def:
+ assert isinstance(save_slice_info_def, variable_pb2.SaveSliceInfoDef)
+ self.full_name = ops.prepend_name_scope(
+ save_slice_info_def.full_name, import_scope=import_scope)
+ self.full_shape = [i for i in save_slice_info_def.full_shape]
+ self.var_offset = [i for i in save_slice_info_def.var_offset]
+ self.var_shape = [i for i in save_slice_info_def.var_shape]
+ else:
+ self.full_name = full_name
+ self.full_shape = full_shape
+ self.var_offset = var_offset
+ self.var_shape = var_shape
+
+ @property
+ def spec(self):
+ """Computes the spec string used for saving."""
+ full_shape_str = " ".join(["%d" % d for d in self.full_shape]) + " "
+ sl_spec = ":".join([
+ "%d,%d" % (o, s) for o, s in zip(self.var_offset, self.var_shape)
+ ])
+ return full_shape_str + sl_spec
+
+ def to_proto(self, export_scope=None):
+ """Returns a SaveSliceInfoDef() proto.
+
+ Args:
+ export_scope: Optional `string`. Name scope to remove.
+
+ Returns:
+ A `SaveSliceInfoDef` protocol buffer, or None if the `Variable` is not
+ in the specified name scope.
+ """
+ if (export_scope is None or
+ self.full_name.startswith(export_scope)):
+ save_slice_info_def = variable_pb2.SaveSliceInfoDef()
+ save_slice_info_def.full_name = ops.strip_name_scope(
+ self.full_name, export_scope)
+ for i in self.full_shape:
+ save_slice_info_def.full_shape.append(i)
+ for i in self.var_offset:
+ save_slice_info_def.var_offset.append(i)
+ for i in self.var_shape:
+ save_slice_info_def.var_shape.append(i)
+ return save_slice_info_def
+ else:
+ return None
+
+ def __iadd__(self, other):
+ raise NotImplementedError
+
+ def __isub__(self, other):
+ raise NotImplementedError
+
+ def __imul__(self, other):
+ raise NotImplementedError
+
+ def __idiv__(self, other):
+ raise NotImplementedError
+
+ def __itruediv__(self, other):
+ raise NotImplementedError
+
+ def __irealdiv__(self, other):
+ raise NotImplementedError
+
+ def __ipow__(self, other):
+ raise NotImplementedError
+
+
+# TODO(apassos): do not repeat all comments here
+class RefVariable(Variable):
+ """Ref-based implementation of variables."""
+
+ def __init__(self,
+ initial_value=None,
+ trainable=True,
+ collections=None,
+ validate_shape=True,
+ caching_device=None,
+ name=None,
+ variable_def=None,
+ dtype=None,
+ expected_shape=None,
+ import_scope=None,
+ constraint=None):
+ """Creates a new variable with value `initial_value`.
+
+ The new variable is added to the graph collections listed in `collections`,
+ which defaults to `[GraphKeys.GLOBAL_VARIABLES]`.
+
+ If `trainable` is `True` the variable is also added to the graph collection
+ `GraphKeys.TRAINABLE_VARIABLES`.
+
+ This constructor creates both a `variable` Op and an `assign` Op to set the
+ variable to its initial value.
+
+ Args:
+ initial_value: A `Tensor`, or Python object convertible to a `Tensor`,
+ which is the initial value for the Variable. The initial value must have
+ a shape specified unless `validate_shape` is set to False. Can also be a
+ callable with no argument that returns the initial value when called. In
+ that case, `dtype` must be specified. (Note that initializer functions
+ from init_ops.py must first be bound to a shape before being used here.)
+ trainable: If `True`, the default, also adds the variable to the graph
+ collection `GraphKeys.TRAINABLE_VARIABLES`. This collection is used as
+ the default list of variables to use by the `Optimizer` classes.
+ collections: List of graph collections keys. The new variable is added to
+ these collections. Defaults to `[GraphKeys.GLOBAL_VARIABLES]`.
+ validate_shape: If `False`, allows the variable to be initialized with a
+ value of unknown shape. If `True`, the default, the shape of
+ `initial_value` must be known.
+ caching_device: Optional device string describing where the Variable
+ should be cached for reading. Defaults to the Variable's device.
+ If not `None`, caches on another device. Typical use is to cache
+ on the device where the Ops using the Variable reside, to deduplicate
+ copying through `Switch` and other conditional statements.
+ name: Optional name for the variable. Defaults to `'Variable'` and gets
+ uniquified automatically.
+ variable_def: `VariableDef` protocol buffer. If not `None`, recreates
+ the Variable object with its contents, referencing the variable's nodes
+ in the graph, which must already exist. The graph is not changed.
+ `variable_def` and the other arguments are mutually exclusive.
+ dtype: If set, initial_value will be converted to the given type.
+ If `None`, either the datatype will be kept (if `initial_value` is
+ a Tensor), or `convert_to_tensor` will decide.
+ expected_shape: A TensorShape. If set, initial_value is expected
+ to have this shape.
+ import_scope: Optional `string`. Name scope to add to the
+ `Variable.` Only used when initializing from protocol buffer.
+ constraint: An optional projection function to be applied to the variable
+ after being updated by an `Optimizer` (e.g. used to implement norm
+ constraints or value constraints for layer weights). The function must
+ take as input the unprojected Tensor representing the value of the
+ variable and return the Tensor for the projected value
+ (which must have the same shape). Constraints are not safe to
+ use when doing asynchronous distributed training.
+
+ Raises:
+ ValueError: If both `variable_def` and initial_value are specified.
+ ValueError: If the initial value is not specified, or does not have a
+ shape and `validate_shape` is `True`.
+ RuntimeError: If eager execution is enabled.
+
+ @compatibility(eager)
+ `tf.Variable` is not compatible with eager execution. Use
+ `tfe.Variable` instead which is compatible with both eager execution
+ and graph construction. See [the TensorFlow Eager Execution
+ guide](https://github.com/tensorflow/tensorflow/tree/master/tensorflow/contrib/eager/python/g3doc/guide.md#variables-and-optimizers)
+ for details on how variables work in eager execution.
+ @end_compatibility
+ """
if context.executing_eagerly():
raise RuntimeError(
"tf.Variable not supported when eager execution is enabled. "
@@ -1068,12 +1642,6 @@ class Variable(checkpointable.CheckpointableBase):
else:
return None
- @staticmethod
- def from_proto(variable_def, import_scope=None):
- """Returns a `Variable` object created from `variable_def`."""
- return Variable(variable_def=variable_def,
- import_scope=import_scope)
-
def __iadd__(self, other):
logging.log_first_n(
logging.WARN,
@@ -1093,126 +1661,43 @@ class Variable(checkpointable.CheckpointableBase):
def __imul__(self, other):
logging.log_first_n(
logging.WARN,
- "Variable *= will be deprecated. Use variable.assign_mul"
- " if you want assignment to the variable value or 'x = x * y'"
+ "Variable *= will be deprecated. Use `var.assign(var * other)`"
+ " if you want assignment to the variable value or `x = x * y`"
" if you want a new python Tensor object.", 1)
return self * other
def __idiv__(self, other):
logging.log_first_n(
logging.WARN,
- "Variable /= will be deprecated. Use variable.assign_div"
- " if you want assignment to the variable value or 'x = x / y'"
+ "Variable /= will be deprecated. Use `var.assign(var / other)`"
+ " if you want assignment to the variable value or `x = x / y`"
" if you want a new python Tensor object.", 1)
return self / other
def __itruediv__(self, other):
logging.log_first_n(
logging.WARN,
- "Variable /= will be deprecated. Use variable.assign_div"
- " if you want assignment to the variable value or 'x = x / y'"
+ "Variable /= will be deprecated. Use `var.assign(var / other)`"
+ " if you want assignment to the variable value or `x = x / y`"
" if you want a new python Tensor object.", 1)
return self / other
def __irealdiv__(self, other):
logging.log_first_n(
logging.WARN,
- "Variable /= will be deprecated. Use variable.assign_div"
- " if you want assignment to the variable value or 'x = x / y'"
+ "Variable /= will be deprecated. Use `var.assign(var / other)`"
+ " if you want assignment to the variable value or `x = x / y`"
" if you want a new python Tensor object.", 1)
return self / other
def __ipow__(self, other):
logging.log_first_n(
logging.WARN,
- "Variable **= will be deprecated. Use 'x = x ** y'"
+ "Variable **= will be deprecated. Use `var.assign(var ** other)`"
+ " if you want assignment to the variable value or `x = x ** y`"
" if you want a new python Tensor object.", 1)
return self ** other
- class SaveSliceInfo(object):
- """Information on how to save this Variable as a slice.
-
- Provides internal support for saving variables as slices of a larger
- variable. This API is not public and is subject to change.
-
- Available properties:
-
- * full_name
- * full_shape
- * var_offset
- * var_shape
- """
-
- def __init__(self,
- full_name=None,
- full_shape=None,
- var_offset=None,
- var_shape=None,
- save_slice_info_def=None,
- import_scope=None):
- """Create a `SaveSliceInfo`.
-
- Args:
- full_name: Name of the full variable of which this `Variable` is a
- slice.
- full_shape: Shape of the full variable, as a list of int.
- var_offset: Offset of this `Variable` into the full variable, as a
- list of int.
- var_shape: Shape of this `Variable`, as a list of int.
- save_slice_info_def: `SaveSliceInfoDef` protocol buffer. If not `None`,
- recreates the SaveSliceInfo object its contents.
- `save_slice_info_def` and other arguments are mutually
- exclusive.
- import_scope: Optional `string`. Name scope to add. Only used
- when initializing from protocol buffer.
- """
- if save_slice_info_def:
- assert isinstance(save_slice_info_def, variable_pb2.SaveSliceInfoDef)
- self.full_name = ops.prepend_name_scope(
- save_slice_info_def.full_name, import_scope=import_scope)
- self.full_shape = [i for i in save_slice_info_def.full_shape]
- self.var_offset = [i for i in save_slice_info_def.var_offset]
- self.var_shape = [i for i in save_slice_info_def.var_shape]
- else:
- self.full_name = full_name
- self.full_shape = full_shape
- self.var_offset = var_offset
- self.var_shape = var_shape
-
- @property
- def spec(self):
- """Computes the spec string used for saving."""
- full_shape_str = " ".join(["%d" % d for d in self.full_shape]) + " "
- sl_spec = ":".join([
- "%d,%d" % (o, s) for o, s in zip(self.var_offset, self.var_shape)
- ])
- return full_shape_str + sl_spec
-
- def to_proto(self, export_scope=None):
- """Returns a SaveSliceInfoDef() proto.
-
- Args:
- export_scope: Optional `string`. Name scope to remove.
-
- Returns:
- A `SaveSliceInfoDef` protocol buffer, or None if the `Variable` is not
- in the specified name scope.
- """
- if (export_scope is None or
- self.full_name.startswith(export_scope)):
- save_slice_info_def = variable_pb2.SaveSliceInfoDef()
- save_slice_info_def.full_name = ops.strip_name_scope(
- self.full_name, export_scope)
- for i in self.full_shape:
- save_slice_info_def.full_shape.append(i)
- for i in self.var_offset:
- save_slice_info_def.var_offset.append(i)
- for i in self.var_shape:
- save_slice_info_def.var_shape.append(i)
- return save_slice_info_def
- else:
- return None
-
def _set_save_slice_info(self, save_slice_info):
"""Sets the slice info for this `Variable`.
@@ -1403,6 +1888,10 @@ class PartitionedVariable(object):
def dtype(self):
return self._dtype
+ @property
+ def shape(self):
+ return self.get_shape()
+
def get_shape(self):
return self._shape
@@ -1722,6 +2211,8 @@ def report_uninitialized_variables(var_list=None,
var_list.append(op.outputs[0])
with ops.name_scope(name):
# Run all operations on CPU
+ if var_list:
+ init_vars = [state_ops.is_variable_initialized(v) for v in var_list]
with ops.device("/cpu:0"):
if not var_list:
# Return an empty tensor so we only need to check for returned tensor
@@ -1729,9 +2220,7 @@ def report_uninitialized_variables(var_list=None,
return array_ops.constant([], dtype=dtypes.string)
else:
# Get a 1-D boolean tensor listing whether each variable is initialized.
- variables_mask = math_ops.logical_not(
- array_ops.stack(
- [state_ops.is_variable_initialized(v) for v in var_list]))
+ variables_mask = math_ops.logical_not(array_ops.stack(init_vars))
# Get a 1-D string tensor containing all the variable names.
variable_names_tensor = array_ops.constant(
[s.op.name for s in var_list])
diff --git a/tensorflow/python/platform/benchmark.py b/tensorflow/python/platform/benchmark.py
index eba2baaf6f..fa17b17d10 100644
--- a/tensorflow/python/platform/benchmark.py
+++ b/tensorflow/python/platform/benchmark.py
@@ -66,11 +66,11 @@ def _global_report_benchmark(
if not isinstance(extras, dict):
raise TypeError("extras must be a dict")
- logging.info("Benchmark [%s] iters: %d, wall_time: %g, cpu_time: %g,"
- "throughput: %g %s", name, iters if iters is not None else -1,
- wall_time if wall_time is not None else -1, cpu_time if
- cpu_time is not None else -1, throughput if
- throughput is not None else -1, str(extras) if extras else "")
+ logging.info("Benchmark [%s] iters: %d, wall_time: %g, cpu_time: %g,"
+ "throughput: %g %s", name, iters if iters is not None else -1,
+ wall_time if wall_time is not None else -1, cpu_time if
+ cpu_time is not None else -1, throughput if
+ throughput is not None else -1, str(extras) if extras else "")
entries = test_log_pb2.BenchmarkEntries()
entry = entries.entry.add()
diff --git a/tensorflow/python/platform/self_check.py b/tensorflow/python/platform/self_check.py
index 966a094e55..844ae99918 100644
--- a/tensorflow/python/platform/self_check.py
+++ b/tensorflow/python/platform/self_check.py
@@ -78,7 +78,7 @@ def preload_check():
"Could not find %r. TensorFlow requires that this DLL be "
"installed in a directory that is named in your %%PATH%% "
"environment variable. Download and install CUDA %s from "
- "this URL: https://developer.nvidia.com/cuda-toolkit"
+ "this URL: https://developer.nvidia.com/cuda-90-download-archive"
% (build_info.cudart_dll_name, build_info.cuda_version_number))
if hasattr(build_info, "cudnn_dll_name") and hasattr(
diff --git a/tensorflow/python/profiler/model_analyzer_test.py b/tensorflow/python/profiler/model_analyzer_test.py
index f9891f3b1e..c0e16ca536 100644
--- a/tensorflow/python/profiler/model_analyzer_test.py
+++ b/tensorflow/python/profiler/model_analyzer_test.py
@@ -106,7 +106,7 @@ class PrintModelAnalysisTest(test.TestCase):
# Make sure time is profiled.
gap = 1 if test.is_gpu_available() else 2
for i in range(3, 6, gap):
- mat = re.search('(.*)[um]s/(.*)[um]s', metrics[i])
+ mat = re.search('(.*)(?:us|ms|sec)/(.*)(?:us|ms|sec)', metrics[i])
self.assertGreater(float(mat.group(1)), 0.0)
self.assertGreater(float(mat.group(2)), 0.0)
# Make sure device is profiled.
diff --git a/tensorflow/tools/api/generator/BUILD b/tensorflow/python/tools/api/generator/BUILD
index 8c760e6f52..223d1281ba 100644
--- a/tensorflow/tools/api/generator/BUILD
+++ b/tensorflow/python/tools/api/generator/BUILD
@@ -3,8 +3,9 @@
licenses(["notice"]) # Apache 2.0
-load("//tensorflow/tools/api/generator:api_gen.bzl", "ESTIMATOR_API_INIT_FILES")
-load("//tensorflow/tools/api/generator:api_gen.bzl", "TENSORFLOW_API_INIT_FILES")
+load("//tensorflow:tensorflow.bzl", "py_test")
+load("//tensorflow/python/tools/api/generator:api_gen.bzl", "ESTIMATOR_API_INIT_FILES")
+load("//tensorflow/python/tools/api/generator:api_gen.bzl", "TENSORFLOW_API_INIT_FILES")
exports_files(
[
@@ -13,6 +14,18 @@ exports_files(
],
)
+py_binary(
+ name = "create_python_api",
+ srcs = ["//tensorflow/python/tools/api/generator:create_python_api.py"],
+ main = "//tensorflow/python/tools/api/generator:create_python_api.py",
+ srcs_version = "PY2AND3",
+ visibility = ["//visibility:public"],
+ deps = [
+ "//tensorflow/python:no_contrib",
+ "//tensorflow/python/tools/api/generator:doc_srcs",
+ ],
+)
+
py_library(
name = "doc_srcs",
srcs = ["doc_srcs.py"],
diff --git a/tensorflow/tools/api/generator/api_gen.bzl b/tensorflow/python/tools/api/generator/api_gen.bzl
index d746b5d3e4..00e1c4e199 100644
--- a/tensorflow/tools/api/generator/api_gen.bzl
+++ b/tensorflow/python/tools/api/generator/api_gen.bzl
@@ -102,36 +102,41 @@ ESTIMATOR_API_INIT_FILES = [
# END GENERATED ESTIMATOR FILES
]
-# Creates a genrule that generates a directory structure with __init__.py
-# files that import all exported modules (i.e. modules with tf_export
-# decorators).
-#
-# Args:
-# name: name of genrule to create.
-# output_files: List of __init__.py files that should be generated.
-# This list should include file name for every module exported using
-# tf_export. For e.g. if an op is decorated with
-# @tf_export('module1.module2', 'module3'). Then, output_files should
-# include module1/module2/__init__.py and module3/__init__.py.
-# root_init_template: Python init file that should be used as template for
-# root __init__.py file. "# API IMPORTS PLACEHOLDER" comment inside this
-# template will be replaced with root imports collected by this genrule.
-# srcs: genrule sources. If passing root_init_template, the template file
-# must be included in sources.
-# api_name: Name of the project that you want to generate API files for
-# (e.g. "tensorflow" or "estimator").
-# package: Python package containing the @tf_export decorators you want to
-# process
-# package_dep: Python library target containing your package.
-
def gen_api_init_files(
name,
output_files = TENSORFLOW_API_INIT_FILES,
root_init_template = None,
srcs = [],
api_name = "tensorflow",
+ api_version = 2,
package = "tensorflow.python",
- package_dep = "//tensorflow/python:no_contrib"):
+ package_dep = "//tensorflow/python:no_contrib",
+ output_package = "tensorflow"):
+ """Creates API directory structure and __init__.py files.
+
+ Creates a genrule that generates a directory structure with __init__.py
+ files that import all exported modules (i.e. modules with tf_export
+ decorators).
+
+ Args:
+ name: name of genrule to create.
+ output_files: List of __init__.py files that should be generated.
+ This list should include file name for every module exported using
+ tf_export. For e.g. if an op is decorated with
+ @tf_export('module1.module2', 'module3'). Then, output_files should
+ include module1/module2/__init__.py and module3/__init__.py.
+ root_init_template: Python init file that should be used as template for
+ root __init__.py file. "# API IMPORTS PLACEHOLDER" comment inside this
+ template will be replaced with root imports collected by this genrule.
+ srcs: genrule sources. If passing root_init_template, the template file
+ must be included in sources.
+ api_name: Name of the project that you want to generate API files for
+ (e.g. "tensorflow" or "estimator").
+ api_version: TensorFlow API version to generate. Must be either 1 or 2.
+ package: Python package containing the @tf_export decorators you want to
+ process
+ package_dep: Python library target containing your package.
+ """
root_init_template_flag = ""
if root_init_template:
root_init_template_flag = "--root_init_template=$(location " + root_init_template + ")"
@@ -139,13 +144,14 @@ def gen_api_init_files(
api_gen_binary_target = "create_" + package + "_api"
native.py_binary(
name = "create_" + package + "_api",
- srcs = ["//tensorflow/tools/api/generator:create_python_api.py"],
- main = "//tensorflow/tools/api/generator:create_python_api.py",
+ srcs = ["//tensorflow/python/tools/api/generator:create_python_api.py"],
+ main = "//tensorflow/python/tools/api/generator:create_python_api.py",
srcs_version = "PY2AND3",
visibility = ["//visibility:public"],
deps = [
package_dep,
- "//tensorflow/tools/api/generator:doc_srcs",
+ "//tensorflow/python:util",
+ "//tensorflow/python/tools/api/generator:doc_srcs",
],
)
@@ -154,7 +160,9 @@ def gen_api_init_files(
outs = output_files,
cmd = (
"$(location :" + api_gen_binary_target + ") " +
- root_init_template_flag + " --apidir=$(@D) --apiname=" + api_name + " --package=" + package + " $(OUTS)"),
+ root_init_template_flag + " --apidir=$(@D) --apiname=" +
+ api_name + " --apiversion=" + str(api_version) + " --package=" + package +
+ " --output_package=" + output_package + " $(OUTS)"),
srcs = srcs,
tools = [":" + api_gen_binary_target ],
visibility = ["//tensorflow:__pkg__"],
diff --git a/tensorflow/tools/api/generator/create_python_api.py b/tensorflow/python/tools/api/generator/create_python_api.py
index 671b7e387e..863c922216 100644
--- a/tensorflow/tools/api/generator/create_python_api.py
+++ b/tensorflow/python/tools/api/generator/create_python_api.py
@@ -24,11 +24,12 @@ import importlib
import os
import sys
+from tensorflow.python.tools.api.generator import doc_srcs
from tensorflow.python.util import tf_decorator
from tensorflow.python.util import tf_export
-from tensorflow.tools.api.generator import doc_srcs
API_ATTRS = tf_export.API_ATTRS
+API_ATTRS_V1 = tf_export.API_ATTRS_V1
_DEFAULT_PACKAGE = 'tensorflow.python'
_GENFILES_DIR_SUFFIX = 'genfiles/'
@@ -38,14 +39,14 @@ _SYMBOLS_TO_SKIP_EXPLICITLY = {
'tensorflow.python.platform.flags.FLAGS'
}
_GENERATED_FILE_HEADER = """# This file is MACHINE GENERATED! Do not edit.
-# Generated by: tensorflow/tools/api/generator/create_python_api.py script.
+# Generated by: tensorflow/python/tools/api/generator/create_python_api.py script.
\"\"\"%s
\"\"\"
from __future__ import print_function
"""
-_GENERATED_FILE_FOOTER = "\n\ndel print_function\n"
+_GENERATED_FILE_FOOTER = '\n\ndel print_function\n'
class SymbolExposedTwiceError(Exception):
@@ -159,13 +160,16 @@ __all__.remove('print_function')
return module_text_map
-def get_api_init_text(package, api_name):
+def get_api_init_text(package, output_package, api_name, api_version):
"""Get a map from destination module to __init__.py code for that module.
Args:
package: Base python package containing python with target tf_export
decorators.
+ output_package: Base output python package where generated API will
+ be added.
api_name: API you want to generate (e.g. `tensorflow` or `estimator`).
+ api_version: API version you want to generate (`v1` or `v2`).
Returns:
A dictionary where
@@ -173,6 +177,12 @@ def get_api_init_text(package, api_name):
value: (string) text that should be in __init__.py files for
corresponding modules.
"""
+ if api_version == 1:
+ names_attr = API_ATTRS_V1[api_name].names
+ constants_attr = API_ATTRS_V1[api_name].constants
+ else:
+ names_attr = API_ATTRS[api_name].names
+ constants_attr = API_ATTRS[api_name].constants
module_code_builder = _ModuleInitCodeBuilder()
# Traverse over everything imported above. Specifically,
@@ -180,7 +190,7 @@ def get_api_init_text(package, api_name):
for module in list(sys.modules.values()):
# Only look at tensorflow modules.
if (not module or not hasattr(module, '__name__') or
- package not in module.__name__):
+ module.__name__ is None or package not in module.__name__):
continue
# Do not generate __init__.py files for contrib modules for now.
if '.contrib.' in module.__name__ or module.__name__.endswith('.contrib'):
@@ -193,7 +203,7 @@ def get_api_init_text(package, api_name):
attr = getattr(module, module_contents_name)
# If attr is _tf_api_constants attribute, then add the constants.
- if module_contents_name == API_ATTRS[api_name].constants:
+ if module_contents_name == constants_attr:
for exports, value in attr:
for export in exports:
names = export.split('.')
@@ -205,9 +215,8 @@ def get_api_init_text(package, api_name):
_, attr = tf_decorator.unwrap(attr)
# If attr is a symbol with _tf_api_names attribute, then
# add import for it.
- if (hasattr(attr, '__dict__') and
- API_ATTRS[api_name].names in attr.__dict__):
- for export in getattr(attr, API_ATTRS[api_name].names): # pylint: disable=protected-access
+ if (hasattr(attr, '__dict__') and names_attr in attr.__dict__):
+ for export in getattr(attr, names_attr): # pylint: disable=protected-access
names = export.split('.')
dest_module = '.'.join(names[:-1])
module_code_builder.add_import(
@@ -218,7 +227,6 @@ def get_api_init_text(package, api_name):
# For e.g. if we import 'foo.bar.Value'. Then, we also
# import 'bar' in 'foo'.
imported_modules = set(module_code_builder.module_imports.keys())
- import_from = '.'
for module in imported_modules:
if not module:
continue
@@ -229,6 +237,9 @@ def get_api_init_text(package, api_name):
if submodule_index > 0:
parent_module += ('.' + module_split[submodule_index-1] if parent_module
else module_split[submodule_index-1])
+ import_from = output_package
+ if submodule_index > 0:
+ import_from += '.' + '.'.join(module_split[:submodule_index])
module_code_builder.add_import(
-1, parent_module, import_from,
module_split[submodule_index], module_split[submodule_index])
@@ -294,7 +305,8 @@ def get_module_docstring(module_name, package, api_name):
def create_api_files(
- output_files, package, root_init_template, output_dir, api_name):
+ output_files, package, root_init_template, output_dir, output_package,
+ api_name, api_version):
"""Creates __init__.py files for the Python API.
Args:
@@ -306,7 +318,9 @@ def create_api_files(
"#API IMPORTS PLACEHOLDER" comment in the template file will be replaced
with imports.
output_dir: output API root directory.
+ output_package: Base output package where generated API will be added.
api_name: API you want to generate (e.g. `tensorflow` or `estimator`).
+ api_version: API version to generate (`v1` or `v2`).
Raises:
ValueError: if an output file is not under api/ directory,
@@ -323,7 +337,8 @@ def create_api_files(
os.makedirs(os.path.dirname(file_path))
open(file_path, 'a').close()
- module_text_map = get_api_init_text(package, api_name)
+ module_text_map = get_api_init_text(
+ package, output_package, api_name, api_version)
# Add imports to output files.
missing_output_files = []
@@ -381,6 +396,13 @@ def main():
'--apiname', required=True, type=str,
choices=API_ATTRS.keys(),
help='The API you want to generate.')
+ parser.add_argument(
+ '--apiversion', default=2, type=int,
+ choices=[1, 2],
+ help='The API version you want to generate.')
+ parser.add_argument(
+ '--output_package', default='tensorflow', type=str,
+ help='Root output package.')
args = parser.parse_args()
@@ -395,7 +417,8 @@ def main():
# Populate `sys.modules` with modules containing tf_export().
importlib.import_module(args.package)
create_api_files(outputs, args.package, args.root_init_template,
- args.apidir, args.apiname)
+ args.apidir, args.output_package, args.apiname,
+ args.apiversion)
if __name__ == '__main__':
diff --git a/tensorflow/tools/api/generator/create_python_api_test.py b/tensorflow/python/tools/api/generator/create_python_api_test.py
index 651ec9d040..a565a49d96 100644
--- a/tensorflow/tools/api/generator/create_python_api_test.py
+++ b/tensorflow/python/tools/api/generator/create_python_api_test.py
@@ -22,8 +22,8 @@ import imp
import sys
from tensorflow.python.platform import test
+from tensorflow.python.tools.api.generator import create_python_api
from tensorflow.python.util.tf_export import tf_export
-from tensorflow.tools.api.generator import create_python_api
@tf_export('test_op', 'test_op1')
@@ -58,7 +58,8 @@ class CreatePythonApiTest(test.TestCase):
def testFunctionImportIsAdded(self):
imports = create_python_api.get_api_init_text(
package=create_python_api._DEFAULT_PACKAGE,
- api_name='tensorflow')
+ output_package='tensorflow',
+ api_name='tensorflow', api_version=1)
expected_import = (
'from tensorflow.python.test_module '
'import test_op as test_op1')
@@ -75,7 +76,8 @@ class CreatePythonApiTest(test.TestCase):
def testClassImportIsAdded(self):
imports = create_python_api.get_api_init_text(
package=create_python_api._DEFAULT_PACKAGE,
- api_name='tensorflow')
+ output_package='tensorflow',
+ api_name='tensorflow', api_version=2)
expected_import = ('from tensorflow.python.test_module '
'import TestClass')
self.assertTrue(
@@ -85,7 +87,8 @@ class CreatePythonApiTest(test.TestCase):
def testConstantIsAdded(self):
imports = create_python_api.get_api_init_text(
package=create_python_api._DEFAULT_PACKAGE,
- api_name='tensorflow')
+ output_package='tensorflow',
+ api_name='tensorflow', api_version=1)
expected = ('from tensorflow.python.test_module '
'import _TEST_CONSTANT')
self.assertTrue(expected in str(imports),
diff --git a/tensorflow/tools/api/generator/doc_srcs.py b/tensorflow/python/tools/api/generator/doc_srcs.py
index ad1988494d..ad1988494d 100644
--- a/tensorflow/tools/api/generator/doc_srcs.py
+++ b/tensorflow/python/tools/api/generator/doc_srcs.py
diff --git a/tensorflow/tools/api/generator/doc_srcs_test.py b/tensorflow/python/tools/api/generator/doc_srcs_test.py
index 7b8f27c1b1..481d9874a4 100644
--- a/tensorflow/tools/api/generator/doc_srcs_test.py
+++ b/tensorflow/python/tools/api/generator/doc_srcs_test.py
@@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
-"""Tests for tensorflow.tools.api.generator.doc_srcs."""
+"""Tests for tensorflow.python.tools.api.generator.doc_srcs."""
from __future__ import absolute_import
from __future__ import division
@@ -23,7 +23,7 @@ import importlib
import sys
from tensorflow.python.platform import test
-from tensorflow.tools.api.generator import doc_srcs
+from tensorflow.python.tools.api.generator import doc_srcs
FLAGS = None
@@ -39,27 +39,27 @@ class DocSrcsTest(test.TestCase):
file_path += '/'
file_path += '__init__.py'
- if file_path not in FLAGS.outputs:
- self.assertFalse('%s is not a valid API module' % module_name)
+ self.assertIn(
+ file_path, FLAGS.outputs,
+ msg='%s is not a valid API module' % module_name)
def testHaveDocstringOrDocstringModule(self):
for module_name, docsrc in doc_srcs.get_doc_sources(FLAGS.api_name).items():
- if docsrc.docstring and docsrc.docstring_module_name:
- self.assertFalse(
- '%s contains DocSource has both a docstring and a '
- 'docstring_module_name. '
- 'Only one of "docstring" or "docstring_module_name" should be set.'
- % (module_name))
+ self.assertFalse(
+ docsrc.docstring and docsrc.docstring_module_name,
+ msg=('%s contains DocSource has both a docstring and a '
+ 'docstring_module_name. Only one of "docstring" or '
+ '"docstring_module_name" should be set.') % (module_name))
def testDocstringModulesAreValidModules(self):
for _, docsrc in doc_srcs.get_doc_sources(FLAGS.api_name).items():
if docsrc.docstring_module_name:
doc_module_name = '.'.join([
FLAGS.package, docsrc.docstring_module_name])
- if doc_module_name not in sys.modules:
- self.assertFalse(
- 'docsources_module %s is not a valid module under %s.' %
- (docsrc.docstring_module_name, FLAGS.package))
+ self.assertIn(
+ doc_module_name, sys.modules,
+ msg=('docsources_module %s is not a valid module under %s.' %
+ (docsrc.docstring_module_name, FLAGS.package)))
if __name__ == '__main__':
diff --git a/tensorflow/python/training/checkpointable/BUILD b/tensorflow/python/training/checkpointable/BUILD
index 9232b6089a..35007653a0 100644
--- a/tensorflow/python/training/checkpointable/BUILD
+++ b/tensorflow/python/training/checkpointable/BUILD
@@ -47,6 +47,7 @@ py_library(
srcs_version = "PY2AND3",
deps = [
":base",
+ ":data_structures",
],
)
@@ -62,11 +63,18 @@ py_test(
)
py_library(
+ name = "layer_utils",
+ srcs = ["layer_utils.py"],
+ srcs_version = "PY2AND3",
+)
+
+py_library(
name = "data_structures",
srcs = ["data_structures.py"],
srcs_version = "PY2AND3",
deps = [
":base",
+ ":layer_utils",
],
)
diff --git a/tensorflow/python/training/checkpointable/base.py b/tensorflow/python/training/checkpointable/base.py
index 99c8098eca..ee35b01328 100644
--- a/tensorflow/python/training/checkpointable/base.py
+++ b/tensorflow/python/training/checkpointable/base.py
@@ -33,6 +33,7 @@ from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import saveable_object
from tensorflow.python.util import nest
from tensorflow.python.util import serialization
+from tensorflow.python.util import tf_decorator
# Key where the object graph proto is saved in a TensorBundle
@@ -340,6 +341,34 @@ _SlotVariableRestoration = collections.namedtuple(
])
+def no_automatic_dependency_tracking(method):
+ """Disables automatic dependency tracking on attribute assignment.
+
+ Use to decorate any method of a Checkpointable object. Attribute assignment in
+ that method will not add dependencies (also respected in Model). Harmless if
+ used in a class which does not do automatic dependency tracking (which means
+ it's safe to use in base classes which may have subclasses which also inherit
+ from Checkpointable).
+
+ Args:
+ method: The method to decorate.
+ Returns:
+ A decorated method which sets and un-sets automatic dependency tracking for
+ the object the method is called on (not thread safe).
+ """
+
+ def _method_wrapper(self, *args, **kwargs):
+ previous_value = getattr(self, "_setattr_tracking", True)
+ self._setattr_tracking = False # pylint: disable=protected-access
+ try:
+ method(self, *args, **kwargs)
+ finally:
+ self._setattr_tracking = previous_value # pylint: disable=protected-access
+
+ return tf_decorator.make_decorator(
+ target=method, decorator_func=_method_wrapper)
+
+
class CheckpointableBase(object):
"""Base class for `Checkpointable` objects without automatic dependencies.
@@ -349,6 +378,11 @@ class CheckpointableBase(object):
checks.
"""
+ # CheckpointableBase does not do automatic dependency tracking, but uses the
+ # no_automatic_dependency_tracking decorator so it can avoid adding
+ # dependencies if a subclass is Checkpointable / inherits from Model (both of
+ # which have __setattr__ overrides).
+ @no_automatic_dependency_tracking
def _maybe_initialize_checkpointable(self):
"""Initialize dependency management.
@@ -386,6 +420,10 @@ class CheckpointableBase(object):
# building.
self._name_based_restores = set()
+ def _no_dependency(self, value):
+ """If automatic dependency tracking is enabled, ignores `value`."""
+ return value
+
def _name_based_attribute_restore(self, checkpoint):
"""Restore the object's attributes from a name-based checkpoint."""
self._name_based_restores.add(checkpoint)
@@ -463,7 +501,7 @@ class CheckpointableBase(object):
ValueError: If the variable name is not unique.
"""
self._maybe_initialize_checkpointable()
- if not overwrite and self._lookup_dependency(name) is not None:
+ if overwrite and self._lookup_dependency(name) is not None:
raise ValueError(
("A variable named '%s' already exists in this Checkpointable, but "
"Checkpointable._add_variable called to create another with "
@@ -593,9 +631,9 @@ class CheckpointableBase(object):
self._unconditional_checkpoint_dependencies[index] = new_reference
elif current_object is None:
self._unconditional_checkpoint_dependencies.append(new_reference)
- self._unconditional_dependency_names[name] = checkpointable
self._handle_deferred_dependencies(
name=name, checkpointable=checkpointable)
+ self._unconditional_dependency_names[name] = checkpointable
return checkpointable
def _handle_deferred_dependencies(self, name, checkpointable):
@@ -733,28 +771,3 @@ class CheckpointableBase(object):
return {OBJECT_CONFIG_JSON_KEY: functools.partial(
PythonStringStateSaveable,
state_callback=_state_callback)}
-
-
-class NoDependency(object):
- """Allows attribute assignment to `Checkpointable` objects with no dependency.
-
- Example usage:
- ```python
- obj = Checkpointable()
- obj.has_dependency = tf.Variable(0., name="dep")
- obj.no_dependency = NoDependency(tf.Variable(1., name="nodep"))
- assert obj.no_dependency.name == "nodep:0"
- ```
-
- `obj` in this example has a dependency on the variable "dep", and both
- attributes contain un-wrapped `Variable` objects.
-
- `NoDependency` also works with `tf.keras.Model`, but only for checkpoint
- dependencies: wrapping a `Layer` in `NoDependency` will assign the (unwrapped)
- `Layer` to the attribute without a checkpoint dependency, but the `Model` will
- still track the `Layer` (so it will appear in `Model.layers`, and its
- variables will appear in `Model.variables`).
- """
-
- def __init__(self, value):
- self.value = value
diff --git a/tensorflow/python/training/checkpointable/data_structures.py b/tensorflow/python/training/checkpointable/data_structures.py
index 680cf3441f..019d43f09c 100644
--- a/tensorflow/python/training/checkpointable/data_structures.py
+++ b/tensorflow/python/training/checkpointable/data_structures.py
@@ -21,50 +21,127 @@ import collections
import six
-from tensorflow.python.keras.engine import base_layer
-from tensorflow.python.keras.utils import layer_utils
from tensorflow.python.ops import variables
-from tensorflow.python.training.checkpointable import base as checkpointable_lib
-
-
-# TODO(allenl): We could track regular Python data structures which get assigned
-# to Checkpointable objects. Making this work with restore-on-create would be
-# tricky; we'd need to re-create nested structures with our own wrapped objects
-# on assignment to an attribute, and track the user's original structure to make
-# sure they don't modify it except through the wrappers (since we could save the
-# user's updated structure, but would have no way to support restore-on-create
-# for those modifications).
-# TODO(allenl): A dictionary data structure would be good too.
-class CheckpointableDataStructure(checkpointable_lib.CheckpointableBase):
+from tensorflow.python.training.checkpointable import base
+from tensorflow.python.training.checkpointable import layer_utils
+
+
+class NoDependency(object):
+ """Allows attribute assignment to `Checkpointable` objects with no dependency.
+
+ Example usage:
+ ```python
+ obj = Checkpointable()
+ obj.has_dependency = tf.Variable(0., name="dep")
+ obj.no_dependency = NoDependency(tf.Variable(1., name="nodep"))
+ assert obj.no_dependency.name == "nodep:0"
+ ```
+
+ `obj` in this example has a dependency on the variable "dep", and both
+ attributes contain un-wrapped `Variable` objects.
+
+ `NoDependency` also works with `tf.keras.Model`, but only for checkpoint
+ dependencies: wrapping a `Layer` in `NoDependency` will assign the (unwrapped)
+ `Layer` to the attribute without a checkpoint dependency, but the `Model` will
+ still track the `Layer` (so it will appear in `Model.layers`, and its
+ variables will appear in `Model.variables`).
+ """
+
+ def __init__(self, value):
+ self.value = value
+
+
+def _wrap_or_unwrap(value):
+ """Wraps basic data structures, unwraps NoDependency objects."""
+ if isinstance(value, NoDependency):
+ return value.value
+ if isinstance(value, base.CheckpointableBase):
+ return value # Skip conversion for already checkpointable objects.
+ elif isinstance(value, list):
+ return _ListWrapper(value)
+ else:
+ return value
+ # TODO(allenl): Handle other common data structures. Tuples will require
+ # special casing (tuple subclasses are not weak referenceable, so replacement
+ # with a wrapper that subclasses tuple on attribute assignment works poorly,
+ # and replacement with a wrapper that isn't a tuple is also problematic),
+ # probably a tree traversal where the leaves are non-tuples(/namedtuples) to
+ # come up with names. Dictionaries should look like lists.
+
+
+def sticky_attribute_assignment(checkpointable, name, value):
+ """Adds dependencies, generally called from __setattr__.
+
+ This behavior is shared between Checkpointable and Model.
+
+ Respects NoDependency indicators, but otherwise makes checkpointable objects
+ out of common data structures and tracks objects by their attribute names.
+
+ Args:
+ checkpointable: The object to add dependencies to (generally the one having
+ an attribute assigned).
+ name: The attribute name being assigned.
+ value: The value being assigned. Not necessarily a checkpointable object.
+
+ Returns:
+ The value which should be stored in the attribute (unwrapped from a
+ NoDependency object if necessary).
+ """
+ if isinstance(value, NoDependency):
+ add_dependency = False
+ else:
+ add_dependency = True
+ value = _wrap_or_unwrap(value)
+ if not add_dependency:
+ return value
+ if isinstance(value, base.CheckpointableBase):
+ checkpointable._track_checkpointable( # pylint: disable=protected-access
+ value, name=name,
+ # Allow the user to switch the Checkpointable which is tracked by this
+ # name, since assigning a new variable to an attribute has
+ # historically been fine (e.g. Adam did this).
+ overwrite=True)
+ return value
+
+
+class CheckpointableDataStructure(base.CheckpointableBase):
"""Base class for data structures which contain checkpointable objects."""
def __init__(self):
+ # An append-only ordered set
self._layers = []
+
self.trainable = True
self._extra_variables = []
def _track_value(self, value, name):
"""Add a dependency on `value`."""
- if isinstance(value, checkpointable_lib.CheckpointableBase):
- self._track_checkpointable(value, name=name)
- if isinstance(value, variables.Variable):
- self._extra_variables.append(value)
- else:
+ value = sticky_attribute_assignment(
+ checkpointable=self, value=value, name=name)
+ if isinstance(value, variables.Variable):
+ self._extra_variables.append(value)
+ if not isinstance(value, base.CheckpointableBase):
raise ValueError(
("Only checkpointable objects (such as Layers or Optimizers) may be "
"stored in a List object. Got %s, which does not inherit from "
"CheckpointableBase.") % (value,))
- if isinstance(value, (base_layer.Layer, CheckpointableDataStructure)):
- if value not in self._layers:
+ if (isinstance(value, CheckpointableDataStructure)
+ or layer_utils.is_layer(value)):
+ # Check for object-identity rather than with __eq__ to avoid
+ # de-duplicating empty container types. Automatically generated list
+ # wrappers keep things like "[] == []" true, which means "[] in [[]]" is
+ # also true. This becomes not true once one of the lists is mutated.
+ if not any((layer is value for layer in self._layers)):
self._layers.append(value)
if hasattr(value, "_use_resource_variables"):
# In subclassed models, legacy layers (tf.layers) must always use
# resource variables.
value._use_resource_variables = True # pylint: disable=protected-access
+ return value
@property
def layers(self):
- return self._layers
+ return layer_utils.filter_empty_layer_containers(self._layers)
@property
def trainable_weights(self):
@@ -164,24 +241,28 @@ class List(CheckpointableDataStructure, collections.Sequence):
def __init__(self, *args, **kwargs):
"""Construct a new sequence. Arguments are passed to `list()`."""
super(List, self).__init__()
- self._storage = list(*args, **kwargs)
+ self._storage = self._make_storage(*args, **kwargs)
for index, element in enumerate(self._storage):
- self._track_value(element, name=self._name_element(index))
+ self._storage[index] = self._track_value(
+ element, name=self._name_element(index))
+
+ def _make_storage(self, *args, **kwargs):
+ """Determines the backing storage (overridden in subclasses)."""
+ return list(*args, **kwargs)
def _name_element(self, index):
return "%d" % (index,)
def append(self, value):
"""Add a new checkpointable value."""
- self._track_value(value, self._name_element(len(self._storage)))
+ value = self._track_value(value, self._name_element(len(self._storage)))
self._storage.append(value)
def extend(self, values):
"""Add a sequence of checkpointable values."""
- for index_offset, value in enumerate(values):
- self._track_value(
- value, name=self._name_element(len(self._storage) + index_offset))
- self._storage.extend(values)
+ for value in values:
+ self._storage.append(self._track_value(
+ value, name=self._name_element(len(self._storage))))
def __iadd__(self, values):
self.extend(values)
@@ -189,9 +270,12 @@ class List(CheckpointableDataStructure, collections.Sequence):
def __add__(self, other):
if isinstance(other, List):
- return List(self._storage + other._storage) # pylint: disable=protected-access
+ return self.__class__(self._storage + other._storage) # pylint: disable=protected-access
else:
- return List(self._storage + other)
+ return self.__class__(self._storage + other)
+
+ def __radd__(self, other):
+ return self + other
def __getitem__(self, key):
return self._storage[key]
@@ -203,6 +287,144 @@ class List(CheckpointableDataStructure, collections.Sequence):
return "List(%s)" % (repr(self._storage),)
+class _ListWrapper(List, collections.MutableSequence,
+ # Shadowed, but there for isinstance checks.
+ list):
+ """Wraps the built-in `list` to support restore-on-create for variables.
+
+ Unlike `List`, this sequence type is mutable in the same ways built-in lists
+ are. Instead of throwing an error immediately like `List`, it records
+ problematic mutations (e.g. assigning a new element to a position already
+ occupied, meaning both elements get the same names at different times) and
+ refuses to save.
+
+ On assignment to an attribute of a Model or Checkpointable object, Python
+ lists are replaced with _ListWrapper. Wrapping a list in a
+ `tf.contrib.checkpoint.NoDependency` object prevents this.
+ """
+
+ def __init__(self, wrapped_list):
+ """Construct a new list wrapper.
+
+ Args:
+ wrapped_list: The initial value of the data structure. A shallow copy may
+ be maintained for error checking. `wrapped_list` itself should not be
+ modified directly after constructing the `_ListWrapper`, and if changes
+ are detected the `_ListWrapper` will throw an exception on save.
+ """
+ # Monotonic flags which indicate this object would not be restored properly,
+ # and therefore should throw an error on save to avoid giving the impression
+ # that restoring it will work.
+ self._non_append_mutation = False
+ self._external_modification = False
+ super(_ListWrapper, self).__init__(wrapped_list)
+ self._last_wrapped_list_snapshot = list(self._storage)
+
+ def _make_storage(self, wrapped_list):
+ """Use the user's original list for storage."""
+ return wrapped_list
+
+ def _check_external_modification(self):
+ """Checks for any changes to the wrapped list not through the wrapper."""
+ if self._external_modification or self._non_append_mutation:
+ return
+ if self._storage != self._last_wrapped_list_snapshot:
+ self._external_modification = True
+ self._last_wrapped_list_snapshot = None
+
+ def _update_snapshot(self):
+ """Acknowledges tracked changes to the wrapped list."""
+ if self._external_modification or self._non_append_mutation:
+ return
+ self._last_wrapped_list_snapshot = list(self._storage)
+
+ @property
+ def _checkpoint_dependencies(self):
+ self._check_external_modification()
+ if self._non_append_mutation:
+ raise ValueError(
+ ("Unable to save the object %s (a list wrapper constructed to track "
+ "checkpointable TensorFlow objects). A list element was replaced "
+ "(__setitem__), deleted, or inserted. In order to support "
+ "restoration on object creation, tracking is exclusively for "
+ "append-only data structures.\n\nIf you don't need this list "
+ "checkpointed, wrap it in a tf.contrib.checkpoint.NoDependency "
+ "object; it will be automatically un-wrapped and subsequently "
+ "ignored." % (self,)))
+ if self._external_modification:
+ raise ValueError(
+ ("Unable to save the object %s (a list wrapper constructed to track "
+ "checkpointable TensorFlow objects). The wrapped list was modified "
+ "outside the wrapper (its final value was %s, its value when a "
+ "checkpoint dependency was added was %s), which breaks restoration "
+ "on object creation.\n\nIf you don't need this list checkpointed, "
+ "wrap it in a tf.contrib.checkpoint.NoDependency object; it will be "
+ "automatically un-wrapped and subsequently ignored." % (
+ self, self._storage, self._last_wrapped_list_snapshot)))
+ return super(_ListWrapper, self)._checkpoint_dependencies
+
+ def __delitem__(self, key):
+ self._non_append_mutation = True
+ del self._storage[key]
+
+ def __setitem__(self, key, value):
+ self._non_append_mutation = True
+ self._storage[key] = value
+
+ def append(self, value):
+ """Add a new checkpointable value."""
+ self._check_external_modification()
+ super(_ListWrapper, self).append(value)
+ self._update_snapshot()
+
+ def extend(self, values):
+ """Add a sequence of checkpointable values."""
+ self._check_external_modification()
+ super(_ListWrapper, self).extend(values)
+ self._update_snapshot()
+
+ def __eq__(self, other):
+ return self._storage == getattr(other, "_storage", other)
+
+ def __ne__(self, other):
+ return self._storage != getattr(other, "_storage", other)
+
+ def __lt__(self, other):
+ return self._storage < getattr(other, "_storage", other)
+
+ def __le__(self, other):
+ return self._storage <= getattr(other, "_storage", other)
+
+ def __gt__(self, other):
+ return self._storage > getattr(other, "_storage", other)
+
+ def __ge__(self, other):
+ return self._storage >= getattr(other, "_storage", other)
+
+ def __hash__(self):
+ # List wrappers need to compare like regular lists, and so like regular
+ # lists they don't belong in hash tables.
+ raise TypeError("unhashable type: 'ListWrapper'")
+
+ def insert(self, index, obj):
+ self._non_append_mutation = True
+ self._storage.insert(index, obj)
+
+ def _track_value(self, value, name):
+ """Allows storage of non-checkpointable objects."""
+ try:
+ value = super(_ListWrapper, self)._track_value(value=value, name=name)
+ except ValueError:
+ # Even if this value isn't checkpointable, we need to make sure
+ # NoDependency objects get unwrapped.
+ value = sticky_attribute_assignment(
+ checkpointable=self, value=value, name=name)
+ return value
+
+ def __repr__(self):
+ return "ListWrapper(%s)" % (repr(self._storage),)
+
+
class Mapping(CheckpointableDataStructure, collections.Mapping):
"""An append-only checkpointable mapping data structure with string keys.
@@ -217,8 +439,10 @@ class Mapping(CheckpointableDataStructure, collections.Mapping):
"""Construct a new sequence. Arguments are passed to `dict()`."""
super(Mapping, self).__init__()
self._storage = dict(*args, **kwargs)
- for key, value in self._storage.items():
- self._track_value(value, name=self._name_element(key))
+ self._storage.update(
+ {key: self._track_value(
+ value, name=self._name_element(key))
+ for key, value in self._storage.items()})
def _name_element(self, key):
if not isinstance(key, six.string_types):
@@ -228,13 +452,14 @@ class Mapping(CheckpointableDataStructure, collections.Mapping):
return str(key)
def __setitem__(self, key, value):
+ name = self._name_element(key)
+ value = self._track_value(value, name=name)
current_value = self._storage.setdefault(key, value)
if current_value is not value:
raise ValueError(
("Mappings are an append-only data structure. Tried to overwrite the "
"key '%s' with value %s, but it already contains %s")
% (key, value, current_value))
- self._track_value(value, name=self._name_element(key))
def update(self, *args, **kwargs):
for key, value in dict(*args, **kwargs).items():
diff --git a/tensorflow/python/training/checkpointable/data_structures_test.py b/tensorflow/python/training/checkpointable/data_structures_test.py
index ce5852dd6e..ec8c9da809 100644
--- a/tensorflow/python/training/checkpointable/data_structures_test.py
+++ b/tensorflow/python/training/checkpointable/data_structures_test.py
@@ -31,6 +31,7 @@ from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.training.checkpointable import data_structures
+from tensorflow.python.training.checkpointable import tracking
class HasList(training.Model):
@@ -113,6 +114,19 @@ class ListTests(test.TestCase):
model(model_input)
self.assertEqual(2, len(model.losses))
+ def testModelContainersCompareEqual(self):
+ class HasEqualContainers(training.Model):
+
+ def __init__(self):
+ super(HasEqualContainers, self).__init__()
+ self.l1 = []
+ self.l2 = []
+
+ model = HasEqualContainers()
+ model.l1.append(HasEqualContainers())
+ model.l2.append(HasEqualContainers())
+ self.assertEqual([model.l1, model.l2], model.layers)
+
def testNotCheckpointable(self):
class NotCheckpointable(object):
pass
@@ -158,11 +172,62 @@ class ListTests(test.TestCase):
self.assertEqual([v], l.trainable_weights)
self.assertEqual([v2], l.non_trainable_weights)
+ def testListWrapperBasic(self):
+ # _ListWrapper, unlike List, compares like the built-in list type (since it
+ # is used to automatically replace lists).
+ a = tracking.Checkpointable()
+ b = tracking.Checkpointable()
+ self.assertEqual([a, a],
+ [a, a])
+ self.assertEqual(data_structures._ListWrapper([a, a]),
+ data_structures._ListWrapper([a, a]))
+ self.assertEqual([a, a],
+ data_structures._ListWrapper([a, a]))
+ self.assertEqual(data_structures._ListWrapper([a, a]),
+ [a, a])
+ self.assertNotEqual([a, a],
+ [b, a])
+ self.assertNotEqual(data_structures._ListWrapper([a, a]),
+ data_structures._ListWrapper([b, a]))
+ self.assertNotEqual([a, a],
+ data_structures._ListWrapper([b, a]))
+ self.assertLess([a], [a, b])
+ self.assertLess(data_structures._ListWrapper([a]),
+ data_structures._ListWrapper([a, b]))
+ self.assertLessEqual([a], [a, b])
+ self.assertLessEqual(data_structures._ListWrapper([a]),
+ data_structures._ListWrapper([a, b]))
+ self.assertGreater([a, b], [a])
+ self.assertGreater(data_structures._ListWrapper([a, b]),
+ data_structures._ListWrapper([a]))
+ self.assertGreaterEqual([a, b], [a])
+ self.assertGreaterEqual(data_structures._ListWrapper([a, b]),
+ data_structures._ListWrapper([a]))
+ self.assertEqual([a], data_structures._ListWrapper([a]))
+ self.assertEqual([a], list(data_structures.List([a])))
+ self.assertEqual([a, a], data_structures._ListWrapper([a]) + [a])
+ self.assertEqual([a, a], [a] + data_structures._ListWrapper([a]))
+ self.assertIsInstance(data_structures._ListWrapper([a]), list)
+
+ def testWrapperChangesList(self):
+ l = []
+ l_wrapper = data_structures._ListWrapper(l)
+ l_wrapper.append(1)
+ self.assertEqual([1], l)
+
+ def testListChangesWrapper(self):
+ l = []
+ l_wrapper = data_structures._ListWrapper(l)
+ l.append(1)
+ self.assertEqual([1], l_wrapper)
+
def testHashing(self):
has_sequences = set([data_structures.List(),
data_structures.List()])
self.assertEqual(2, len(has_sequences))
self.assertNotIn(data_structures.List(), has_sequences)
+ with self.assertRaises(TypeError):
+ has_sequences.add(data_structures._ListWrapper([]))
class HasMapping(training.Model):
diff --git a/tensorflow/python/training/checkpointable/layer_utils.py b/tensorflow/python/training/checkpointable/layer_utils.py
new file mode 100644
index 0000000000..978fcb2252
--- /dev/null
+++ b/tensorflow/python/training/checkpointable/layer_utils.py
@@ -0,0 +1,93 @@
+# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""Utilities related to layer/model functionality."""
+
+# TODO(b/110718070): Move these functions back to tensorflow/python/keras/utils
+# once __init__ files no longer require all of tf.keras to be imported together.
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+
+def is_layer(obj):
+ """Implicit check for Layer-like objects."""
+ # TODO(b/110718070): Replace with isinstance(obj, base_layer.Layer).
+ return (hasattr(obj, "call")
+ and hasattr(obj, "build")
+ and hasattr(obj, "variables"))
+
+
+def filter_empty_layer_containers(layer_list):
+ """Filter out empty Layer-like containers."""
+ return [layer for layer in layer_list
+ # Filter out only empty Checkpointable data structures. Empty Networks
+ # will still show up in Model.layers.
+ if is_layer(layer) or getattr(layer, "layers", True)]
+
+
+def gather_trainable_weights(trainable, sub_layers, extra_variables):
+ """Lists the trainable weights for an object with sub-layers.
+
+ Args:
+ trainable: Whether the object collecting the variables is trainable.
+ sub_layers: A flat list of Layer objects owned by this object, to collect
+ variables from.
+ extra_variables: Any extra variables to include. Their `.trainable` property
+ is used to categorize them.
+
+ Returns:
+ A list of collected trainable weights/variables.
+ """
+ if not trainable:
+ return []
+ weights = []
+ for layer in sub_layers:
+ weights += layer.trainable_weights
+ trainable_extra_variables = [
+ v for v in extra_variables if v.trainable]
+ return weights + trainable_extra_variables
+
+
+def gather_non_trainable_weights(trainable, sub_layers, extra_variables):
+ """Lists the non-trainable weights for an object with sub-layers.
+
+ Args:
+ trainable: Whether the object collecting the variables is trainable.
+ sub_layers: A flat list of Layer objects owned by this object, to collect
+ variables from.
+ extra_variables: Any extra variables to include. Their `.trainable` property
+ is used to categorize them.
+
+ Returns:
+ A list of collected non-trainable weights/variables.
+ """
+ trainable_extra_variables = []
+ non_trainable_extra_variables = []
+ for v in extra_variables:
+ if v.trainable:
+ trainable_extra_variables.append(v)
+ else:
+ non_trainable_extra_variables.append(v)
+ weights = []
+ for layer in sub_layers:
+ weights += layer.non_trainable_weights
+ if not trainable:
+ trainable_weights = []
+ for layer in sub_layers:
+ trainable_weights += layer.trainable_weights
+ return (trainable_weights + trainable_extra_variables
+ + weights + non_trainable_extra_variables)
+ return weights + non_trainable_extra_variables
diff --git a/tensorflow/python/training/checkpointable/tracking.py b/tensorflow/python/training/checkpointable/tracking.py
index 00e14ac982..bd0bed9d46 100644
--- a/tensorflow/python/training/checkpointable/tracking.py
+++ b/tensorflow/python/training/checkpointable/tracking.py
@@ -18,31 +18,7 @@ from __future__ import division
from __future__ import print_function
from tensorflow.python.training.checkpointable import base
-
-
-class NoDependency(object):
- """Allows attribute assignment to `Checkpointable` objects with no dependency.
-
- Example usage:
- ```python
- obj = Checkpointable()
- obj.has_dependency = tf.Variable(0., name="dep")
- obj.no_dependency = NoDependency(tf.Variable(1., name="nodep"))
- assert obj.no_dependency.name == "nodep:0"
- ```
-
- `obj` in this example has a dependency on the variable "dep", and both
- attributes contain un-wrapped `Variable` objects.
-
- `NoDependency` also works with `tf.keras.Model`, but only for checkpoint
- dependencies: wrapping a `Layer` in `NoDependency` will assign the (unwrapped)
- `Layer` to the attribute without a checkpoint dependency, but the `Model` will
- still track the `Layer` (so it will appear in `Model.layers`, and its
- variables will appear in `Model.variables`).
- """
-
- def __init__(self, value):
- self.value = value
+from tensorflow.python.training.checkpointable import data_structures
class NotCheckpointable(object):
@@ -86,18 +62,11 @@ class Checkpointable(base.CheckpointableBase):
def __setattr__(self, name, value):
"""Support self.foo = checkpointable syntax."""
- # Perform the attribute assignment, and potentially call other __setattr__
- # overrides such as that for tf.keras.Model.
- no_dependency = isinstance(value, NoDependency)
- if no_dependency:
- value = value.value
+ if getattr(self, "_setattr_tracking", True):
+ value = data_structures.sticky_attribute_assignment(
+ checkpointable=self, value=value, name=name)
super(Checkpointable, self).__setattr__(name, value)
- if not no_dependency and isinstance(value, base.CheckpointableBase):
- self._track_checkpointable(
- value, name=name,
- # Allow the user to switch the Checkpointable which is tracked by this
- # name, since assigning a new variable to an attribute has
- # historically been fine (e.g. Adam did this).
- # TODO(allenl): Should this be a warning once Checkpointable save/load
- # is usable?
- overwrite=True)
+
+ def _no_dependency(self, value):
+ """Override to allow CheckpointableBase to disable dependency tracking."""
+ return data_structures.NoDependency(value)
diff --git a/tensorflow/python/training/checkpointable/tracking_test.py b/tensorflow/python/training/checkpointable/tracking_test.py
index baf6f57efb..96da0d6e47 100644
--- a/tensorflow/python/training/checkpointable/tracking_test.py
+++ b/tensorflow/python/training/checkpointable/tracking_test.py
@@ -16,8 +16,19 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
+import os
+
+import numpy
+
+from tensorflow.python.framework import test_util
+from tensorflow.python.keras.engine import training
+from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
+from tensorflow.python.training.checkpointable import base
+from tensorflow.python.training.checkpointable import data_structures
from tensorflow.python.training.checkpointable import tracking
+from tensorflow.python.training.checkpointable import util
+from tensorflow.python.util import nest
class InterfaceTests(test.TestCase):
@@ -27,23 +38,134 @@ class InterfaceTests(test.TestCase):
root.leaf = tracking.Checkpointable()
root.leaf = root.leaf
duplicate_name_dep = tracking.Checkpointable()
- with self.assertRaises(ValueError):
+ with self.assertRaisesRegexp(ValueError, "already declared"):
root._track_checkpointable(duplicate_name_dep, name="leaf")
# No error; we're overriding __setattr__, so we can't really stop people
# from doing this while maintaining backward compatibility.
root.leaf = duplicate_name_dep
root._track_checkpointable(duplicate_name_dep, name="leaf", overwrite=True)
+ self.assertIs(duplicate_name_dep, root._lookup_dependency("leaf"))
+ (_, dep_object), = root._checkpoint_dependencies
+ self.assertIs(duplicate_name_dep, dep_object)
def testNoDependency(self):
root = tracking.Checkpointable()
hasdep = tracking.Checkpointable()
root.hasdep = hasdep
nodep = tracking.Checkpointable()
- root.nodep = tracking.NoDependency(nodep)
+ root.nodep = data_structures.NoDependency(nodep)
self.assertEqual(1, len(root._checkpoint_dependencies))
self.assertIs(root._checkpoint_dependencies[0].ref, root.hasdep)
self.assertIs(root.hasdep, hasdep)
self.assertIs(root.nodep, nodep)
+ class NoDependencyModel(training.Model):
+
+ @base.no_automatic_dependency_tracking
+ def __init__(self):
+ super(NoDependencyModel, self).__init__()
+ self.a = []
+ self.b = tracking.Checkpointable()
+
+ nodeps = NoDependencyModel()
+ self.assertEqual([nodeps], util.list_objects(nodeps))
+
+ def testListBasic(self):
+ a = tracking.Checkpointable()
+ b = tracking.Checkpointable()
+ a.l = [b]
+ c = tracking.Checkpointable()
+ a.l.append(c)
+ a_deps = util.list_objects(a)
+ self.assertIn(b, a_deps)
+ self.assertIn(c, a_deps)
+ direct_a_dep, = a._checkpoint_dependencies
+ self.assertEqual("l", direct_a_dep.name)
+ self.assertIn(b, direct_a_dep.ref)
+ self.assertIn(c, direct_a_dep.ref)
+
+ @test_util.run_in_graph_and_eager_modes
+ def testMutationDirtiesList(self):
+ a = tracking.Checkpointable()
+ b = tracking.Checkpointable()
+ a.l = [b]
+ c = tracking.Checkpointable()
+ a.l.insert(0, c)
+ checkpoint = util.Checkpoint(a=a)
+ with self.assertRaisesRegexp(ValueError, "A list element was replaced"):
+ checkpoint.save(os.path.join(self.get_temp_dir(), "ckpt"))
+
+ @test_util.run_in_graph_and_eager_modes
+ def testOutOfBandEditDirtiesList(self):
+ a = tracking.Checkpointable()
+ b = tracking.Checkpointable()
+ held_reference = [b]
+ a.l = held_reference
+ c = tracking.Checkpointable()
+ held_reference.append(c)
+ checkpoint = util.Checkpoint(a=a)
+ with self.assertRaisesRegexp(ValueError, "The wrapped list was modified"):
+ checkpoint.save(os.path.join(self.get_temp_dir(), "ckpt"))
+
+ @test_util.run_in_graph_and_eager_modes
+ def testNestedLists(self):
+ a = tracking.Checkpointable()
+ a.l = []
+ b = tracking.Checkpointable()
+ a.l.append([b])
+ c = tracking.Checkpointable()
+ a.l[0].append(c)
+ a_deps = util.list_objects(a)
+ self.assertIn(b, a_deps)
+ self.assertIn(c, a_deps)
+ a.l[0].append(1)
+ d = tracking.Checkpointable()
+ a.l[0].append(d)
+ a_deps = util.list_objects(a)
+ self.assertIn(d, a_deps)
+ self.assertIn(b, a_deps)
+ self.assertIn(c, a_deps)
+ self.assertNotIn(1, a_deps)
+ e = tracking.Checkpointable()
+ f = tracking.Checkpointable()
+ a.l1 = [[], [e]]
+ a.l1[0].append(f)
+ a_deps = util.list_objects(a)
+ self.assertIn(e, a_deps)
+ self.assertIn(f, a_deps)
+ checkpoint = util.Checkpoint(a=a)
+ checkpoint.save(os.path.join(self.get_temp_dir(), "ckpt"))
+ a.l[0].append(data_structures.NoDependency([]))
+ a.l[0][-1].append(5)
+ checkpoint.save(os.path.join(self.get_temp_dir(), "ckpt"))
+ # Dirtying the inner list means the root object is unsaveable.
+ a.l[0][1] = 2
+ with self.assertRaisesRegexp(ValueError, "A list element was replaced"):
+ checkpoint.save(os.path.join(self.get_temp_dir(), "ckpt"))
+
+ @test_util.run_in_graph_and_eager_modes
+ def testNoDepList(self):
+ a = training.Model()
+ a.l1 = data_structures.NoDependency([])
+ a.l1.insert(1, 0)
+ self.assertTrue(isinstance(a.l1, list))
+ checkpoint = util.Checkpoint(a=a)
+ checkpoint.save(os.path.join(self.get_temp_dir(), "ckpt"))
+ a.l2 = []
+ a.l2.insert(1, 0)
+ with self.assertRaisesRegexp(ValueError, "A list element was replaced"):
+ checkpoint.save(os.path.join(self.get_temp_dir(), "ckpt"))
+
+ @test_util.run_in_graph_and_eager_modes
+ def testAssertions(self):
+ a = tracking.Checkpointable()
+ a.l = [numpy.zeros([2, 2])]
+ self.assertAllEqual([numpy.zeros([2, 2])], a.l)
+ self.assertAllClose([numpy.zeros([2, 2])], a.l)
+ nest.map_structure(self.assertAllClose, a.l, [numpy.zeros([2, 2])])
+ a.tensors = [array_ops.ones([2, 2]), array_ops.zeros([3, 3])]
+ self.assertAllClose([numpy.ones([2, 2]), numpy.zeros([3, 3])],
+ self.evaluate(a.tensors))
+
if __name__ == "__main__":
test.main()
diff --git a/tensorflow/python/training/checkpointable/util.py b/tensorflow/python/training/checkpointable/util.py
index e0f61137b1..6ae5765b13 100644
--- a/tensorflow/python/training/checkpointable/util.py
+++ b/tensorflow/python/training/checkpointable/util.py
@@ -40,6 +40,7 @@ from tensorflow.python.training import optimizer as optimizer_lib
from tensorflow.python.training import saveable_object as saveable_object_lib
from tensorflow.python.training import saver as saver_lib
from tensorflow.python.training.checkpointable import base
+from tensorflow.python.training.checkpointable import data_structures
from tensorflow.python.training.checkpointable import tracking
from tensorflow.python.util import deprecation
from tensorflow.python.util import tf_contextlib
@@ -93,7 +94,7 @@ class _CheckpointRestoreCoordinator(object):
# use them (for example because of inconsistent references when
# loading). Used to make status assertions fail when loading checkpoints
# that don't quite match.
- self.all_python_objects = weakref.WeakSet()
+ self.all_python_objects = _ObjectIdentityWeakSet()
self.save_path = save_path
self.dtype_map = dtype_map
# When graph building, contains a list of ops to run to restore objects from
@@ -272,11 +273,129 @@ def object_metadata(save_path):
return object_graph_proto
+class _ObjectIdentityWrapper(object):
+ """Wraps an object, mapping __eq__ on wrapper to "is" on wrapped.
+
+ Since __eq__ is based on object identity, it's safe to also define __hash__
+ based on object ids. This lets us add unhashable types like checkpointable
+ _ListWrapper objects to object-identity collections.
+ """
+
+ def __init__(self, wrapped):
+ self._wrapped = wrapped
+
+ @property
+ def unwrapped(self):
+ return self._wrapped
+
+ def __eq__(self, other):
+ if isinstance(other, _ObjectIdentityWrapper):
+ return self._wrapped is other._wrapped # pylint: disable=protected-access
+ return self._wrapped is other
+
+ def __hash__(self):
+ # Wrapper id() is also fine for weakrefs. In fact, we rely on
+ # id(weakref.ref(a)) == id(weakref.ref(a)) and weakref.ref(a) is
+ # weakref.ref(a) in _WeakObjectIdentityWrapper.
+ return id(self._wrapped)
+
+
+class _WeakObjectIdentityWrapper(_ObjectIdentityWrapper):
+
+ def __init__(self, wrapped):
+ super(_WeakObjectIdentityWrapper, self).__init__(weakref.ref(wrapped))
+
+ @property
+ def unwrapped(self):
+ return self._wrapped()
+
+
+class _ObjectIdentityDictionary(collections.MutableMapping):
+ """A mutable mapping data structure which compares using "is".
+
+ This is necessary because we have checkpointable objects (_ListWrapper) which
+ have behavior identical to built-in Python lists (including being unhashable
+ and comparing based on the equality of their contents by default).
+ """
+
+ def __init__(self):
+ self._storage = {}
+
+ def _wrap_key(self, key):
+ return _ObjectIdentityWrapper(key)
+
+ def __getitem__(self, key):
+ return self._storage[self._wrap_key(key)]
+
+ def __setitem__(self, key, value):
+ self._storage[self._wrap_key(key)] = value
+
+ def __delitem__(self, key):
+ del self._storage[self._wrap_key(key)]
+
+ def __len__(self):
+ return len(self._storage)
+
+ def __iter__(self):
+ for key in self._storage:
+ yield key.unwrapped
+
+
+class _ObjectIdentityWeakKeyDictionary(_ObjectIdentityDictionary):
+ """Like weakref.WeakKeyDictionary, but compares objects with "is"."""
+
+ def _wrap_key(self, key):
+ return _WeakObjectIdentityWrapper(key)
+
+ def __len__(self):
+ # Iterate, discarding old weak refs
+ return len(list(self._storage))
+
+ def __iter__(self):
+ keys = self._storage.keys()
+ for key in keys:
+ unwrapped = key.unwrapped
+ if unwrapped is None:
+ del self[key]
+ else:
+ yield unwrapped
+
+
+class _ObjectIdentityWeakSet(collections.MutableSet):
+ """Like weakref.WeakSet, but compares objects with "is"."""
+
+ def __init__(self):
+ self._storage = set()
+
+ def __contains__(self, key):
+ return _WeakObjectIdentityWrapper(key) in self._storage
+
+ def discard(self, key):
+ self._storage.discard(_WeakObjectIdentityWrapper(key))
+
+ def add(self, key):
+ self._storage.add(_WeakObjectIdentityWrapper(key))
+
+ def __len__(self):
+ # Iterate, discarding old weak refs
+ return len(list(self))
+
+ def __iter__(self):
+ keys = list(self._storage)
+ for key in keys:
+ unwrapped = key.unwrapped
+ if unwrapped is None:
+ self.discard(key)
+ else:
+ yield unwrapped
+
+
def _breadth_first_checkpointable_traversal(root_checkpointable):
"""Find shortest paths to all variables owned by dependencies of root."""
bfs_sorted = []
to_visit = collections.deque([root_checkpointable])
- path_to_root = {root_checkpointable: ()}
+ path_to_root = _ObjectIdentityDictionary()
+ path_to_root[root_checkpointable] = ()
while to_visit:
current_checkpointable = to_visit.popleft()
if isinstance(current_checkpointable, tracking.NotCheckpointable):
@@ -337,7 +456,7 @@ def _slot_variable_naming_for_optimizer(optimizer_path):
def _serialize_slot_variables(checkpointable_objects, node_ids, object_names):
"""Gather and name slot variables."""
non_slot_objects = list(checkpointable_objects)
- slot_variables = {}
+ slot_variables = _ObjectIdentityDictionary()
for checkpointable in non_slot_objects:
if isinstance(checkpointable, optimizer_lib.Optimizer):
naming_scheme = _slot_variable_naming_for_optimizer(
@@ -500,11 +619,12 @@ def _serialize_object_graph(root_checkpointable, saveables_cache):
"""
checkpointable_objects, path_to_root = (
_breadth_first_checkpointable_traversal(root_checkpointable))
- object_names = {
- obj: _object_prefix_from_path(path)
- for obj, path in path_to_root.items()}
- node_ids = {node: node_id for node_id, node
- in enumerate(checkpointable_objects)}
+ object_names = _ObjectIdentityDictionary()
+ for obj, path in path_to_root.items():
+ object_names[obj] = _object_prefix_from_path(path)
+ node_ids = _ObjectIdentityDictionary()
+ for node_id, node in enumerate(checkpointable_objects):
+ node_ids[node] = node_id
slot_variables = _serialize_slot_variables(
checkpointable_objects=checkpointable_objects,
node_ids=node_ids,
@@ -535,11 +655,12 @@ def list_objects(root_checkpointable):
# to run.
checkpointable_objects, path_to_root = (
_breadth_first_checkpointable_traversal(root_checkpointable))
- object_names = {
- obj: _object_prefix_from_path(path)
- for obj, path in path_to_root.items()}
- node_ids = {node: node_id for node_id, node
- in enumerate(checkpointable_objects)}
+ object_names = _ObjectIdentityDictionary()
+ for obj, path in path_to_root.items():
+ object_names[obj] = _object_prefix_from_path(path)
+ node_ids = _ObjectIdentityDictionary()
+ for node_id, node in enumerate(checkpointable_objects):
+ node_ids[node] = node_id
_serialize_slot_variables(
checkpointable_objects=checkpointable_objects,
node_ids=node_ids,
@@ -988,7 +1109,7 @@ class CheckpointableSaver(object):
else:
# Maps Checkpointable objects -> attribute names -> SaveableObjects, to
# avoid re-creating SaveableObjects when graph building.
- self._saveable_object_cache = weakref.WeakKeyDictionary()
+ self._saveable_object_cache = _ObjectIdentityWeakKeyDictionary()
@property
def _root_checkpointable(self):
@@ -1310,7 +1431,7 @@ class Checkpoint(tracking.Checkpointable):
with ops.device("/cpu:0"):
# add_variable creates a dependency named "save_counter"; NoDependency
# prevents creating a second dependency named "_save_counter".
- self._save_counter = tracking.NoDependency(
+ self._save_counter = data_structures.NoDependency(
add_variable(self, name="save_counter", initializer=0,
dtype=dtypes.int64))
diff --git a/tensorflow/python/training/checkpointable/util_test.py b/tensorflow/python/training/checkpointable/util_test.py
index 896ea47b97..3c1a4a6f83 100644
--- a/tensorflow/python/training/checkpointable/util_test.py
+++ b/tensorflow/python/training/checkpointable/util_test.py
@@ -102,7 +102,7 @@ class InterfaceTests(test.TestCase):
name="duplicate", initial_value=1.)
duplicate = checkpointable_utils.add_variable(
obj, name="duplicate", shape=[])
- with self.assertRaisesRegexp(ValueError, "'duplicate' already exists"):
+ with self.assertRaisesRegexp(ValueError, "'duplicate'.*already declared"):
checkpointable_utils.add_variable(obj, name="duplicate", shape=[])
self.evaluate(checkpointable_utils.gather_initializers(obj))
diff --git a/tensorflow/python/training/distribute.py b/tensorflow/python/training/distribute.py
index 6a326b65bb..c719045c7f 100644
--- a/tensorflow/python/training/distribute.py
+++ b/tensorflow/python/training/distribute.py
@@ -221,11 +221,11 @@ def has_distribution_strategy():
def get_loss_reduction():
- """Reduce `method_string` corresponding to the last loss reduction."""
+ """Reduce `aggregation` corresponding to the last loss reduction."""
loss_reduction = ops.get_default_graph()._last_loss_reduction # pylint: disable=protected-access
if loss_reduction == losses_impl.Reduction.SUM:
- return "sum"
- return "mean"
+ return variable_scope.VariableAggregation.SUM
+ return variable_scope.VariableAggregation.MEAN
# ------------------------------------------------------------------------------
@@ -539,8 +539,8 @@ class DistributionStrategy(object):
1. Wrap your input dataset in `d.distribute_dataset()` and create an iterator.
2. Define each tower `d.call_for_each_tower()` up to the point of
getting a list of gradient, variable pairs.
- 3. Call `d.reduce("sum", t, v)` or `d.batch_reduce()` to sum the
- gradients (with locality T) into values with locality V(`v`).
+ 3. Call `d.reduce(VariableAggregation.SUM, t, v)` or `d.batch_reduce()` to sum
+ the gradients (with locality T) into values with locality V(`v`).
4. Call `d.update(v)` for each variable to update its value.
Steps 3 and 4 are done automatically by class `Optimizer` if you call
@@ -614,43 +614,6 @@ class DistributionStrategy(object):
# Note: should support "colocate_with" argument.
raise NotImplementedError("must be implemented in descendants")
- def tower_local_var_scope(self, reduce_method):
- """Inside this scope, new variables will not be mirrored.
-
- There will still be one component variable per tower, but there is
- no requirement that they stay in sync. Instead, when saving them
- or calling `read_var()`, we use the value that results when
- calling `reduce()` on all the towers' variables.
-
- Note: tower-local implies not trainable. Instead, it is expected
- that each tower will directly update (using `assign_add()` or
- whatever) its local variable instance but only the aggregated
- value (accessible using `read_var()`) will be exported from the
- model. When it is acceptable to only aggregate on export, we
- greatly reduce communication overhead by using tower-local
- variables.
-
- Note: All component variables will be initialized to the same
- value, using the initialization expression from the first tower.
- The values will match even if the initialization expression uses
- random numbers.
-
- Args:
- reduce_method: String used as a `method_string` to `reduce()`
- to get the value to save when checkpointing.
-
- Returns:
- A context manager.
- """
- def create_tower_local_variable(next_creator, *args, **kwargs):
- _require_distribution_strategy_scope(self)
- kwargs["use_resource"] = True
- kwargs["tower_local_reduce_method"] = reduce_method
- return next_creator(*args, **kwargs)
-
- _require_distribution_strategy_scope(self)
- return variable_scope.variable_creator_scope(create_tower_local_variable)
-
def read_var(self, v):
"""Reads the value of a variable.
@@ -816,12 +779,12 @@ class DistributionStrategy(object):
def _call_for_each_tower(self, fn, *args, **kwargs):
raise NotImplementedError("must be implemented in descendants")
- def reduce(self, method_string, value, destinations=None):
+ def reduce(self, aggregation, value, destinations=None):
"""Combine (via e.g. sum or mean) values across towers.
Args:
- method_string: A string indicating how to combine values, either
- "sum" or "mean".
+ aggregation: Indicates how a variable will be aggregated. Accepted values
+ are @{tf.VariableAggregation.SUM}, @{tf.VariableAggregation.MEAN}.
value: A per-device value with one value per tower.
destinations: An optional mirrored variable, a device string,
list of device strings. The return value will be copied to all
@@ -836,18 +799,21 @@ class DistributionStrategy(object):
# TODO(josh11b): Return an unwrapped value if colocate_with is a
# single device.
_require_cross_tower_context(self)
- assert method_string in ("sum", "mean")
- return self._reduce(method_string, value, destinations)
+ assert aggregation in [
+ variable_scope.VariableAggregation.SUM,
+ variable_scope.VariableAggregation.MEAN
+ ]
+ return self._reduce(aggregation, value, destinations)
- def _reduce(self, method_string, value, destinations):
+ def _reduce(self, aggregation, value, destinations):
raise NotImplementedError("must be implemented in descendants")
- def batch_reduce(self, method_string, value_destination_pairs):
+ def batch_reduce(self, aggregation, value_destination_pairs):
"""Combine multiple `reduce` calls into one for faster execution.
Args:
- method_string: A string indicating how to combine values, either
- "sum" or "mean".
+ aggregation: Indicates how a variable will be aggregated. Accepted values
+ are @{tf.VariableAggregation.SUM}, @{tf.VariableAggregation.MEAN}.
value_destination_pairs: A sequence of (value, destinations)
pairs. See `reduce()` for a description.
@@ -856,12 +822,17 @@ class DistributionStrategy(object):
"""
# TODO(josh11b): More docstring
_require_cross_tower_context(self)
- assert method_string in ("sum", "mean")
- return self._batch_reduce(method_string, value_destination_pairs)
-
- def _batch_reduce(self, method_string, value_destination_pairs):
- return [self.reduce(method_string, t, destinations=v)
- for t, v in value_destination_pairs]
+ assert aggregation in [
+ variable_scope.VariableAggregation.SUM,
+ variable_scope.VariableAggregation.MEAN
+ ]
+ return self._batch_reduce(aggregation, value_destination_pairs)
+
+ def _batch_reduce(self, aggregation, value_destination_pairs):
+ return [
+ self.reduce(aggregation, t, destinations=v)
+ for t, v in value_destination_pairs
+ ]
def update(self, var, fn, *args, **kwargs):
"""Run `fn` to update `var` using inputs mirrored to the same devices.
@@ -1090,10 +1061,6 @@ class TowerContext(object):
finally:
_pop_per_thread_mode()
- def tower_local_var_scope(self, reduce_method):
- """Alias for distribution_strategy.tower_local_var_scope()."""
- return self._distribution_strategy.tower_local_var_scope(reduce_method)
-
@property
def is_single_tower(self):
"""Returns whether there is a single tower or multiple."""
@@ -1140,22 +1107,11 @@ class _DefaultDistributionStrategy(DistributionStrategy):
def creator(next_creator, *args, **kwargs):
_require_distribution_strategy_scope(self)
- kwargs.pop("tower_local_reduce_method", None)
return next_creator(*args, **kwargs)
return _CurrentDistributionContext(
self, variable_scope.variable_creator_scope(creator))
- def tower_local_var_scope(self, reduce_method):
- """Does not set to resource variables."""
- def create_tower_local_variable(next_creator, *args, **kwargs):
- _require_distribution_strategy_scope(self)
- kwargs["trainable"] = False
- return next_creator(*args, **kwargs)
-
- _require_distribution_strategy_scope(self)
- return variable_scope.variable_creator_scope(create_tower_local_variable)
-
def colocate_vars_with(self, colocate_with_variable):
"""Does not require `self.scope`."""
_require_distribution_strategy_scope(self)
@@ -1176,9 +1132,9 @@ class _DefaultDistributionStrategy(DistributionStrategy):
with TowerContext(self, tower_id=0):
return fn(*args, **kwargs)
- def _reduce(self, method_string, value, destinations):
+ def _reduce(self, aggregation, value, destinations):
# TODO(josh11b): Use destinations?
- del method_string, destinations
+ del aggregation, destinations
return value
def _update(self, var, fn, *args, **kwargs):
diff --git a/tensorflow/python/training/distribute_test.py b/tensorflow/python/training/distribute_test.py
index 0a4f19c31f..694145ede7 100644
--- a/tensorflow/python/training/distribute_test.py
+++ b/tensorflow/python/training/distribute_test.py
@@ -29,6 +29,14 @@ class _TestTowerContext(distribute.TowerContext):
return kwargs["test_arg"]
+def _get_test_variable(name, synchronization, aggregation):
+ return {
+ "name": name,
+ "synchronization": synchronization,
+ "aggregation": aggregation
+ }
+
+
class _TestStrategy(distribute.DistributionStrategy):
def _call_for_each_tower(self, fn, *args, **kwargs):
@@ -36,7 +44,8 @@ class _TestStrategy(distribute.DistributionStrategy):
return fn(*args, **kwargs)
def _create_variable(self, next_creator, *args, **kwargs):
- return kwargs["name"]
+ return _get_test_variable(kwargs["name"], kwargs["synchronization"],
+ kwargs["aggregation"])
def _assert_in_default_state(t):
@@ -61,7 +70,11 @@ class TestStrategyTest(test.TestCase):
self.assertTrue(distribute.has_distribution_strategy())
self.assertIs(dist, distribute.get_distribution_strategy())
self.assertEqual("foo", tower_context.merge_call(None, test_arg="foo"))
- self.assertEqual("bar", variable_scope.variable(1.0, name="bar"))
+ expected_value = _get_test_variable(
+ "bar", variable_scope.VariableSynchronization.AUTO,
+ variable_scope.VariableAggregation.NONE)
+ self.assertDictEqual(expected_value,
+ variable_scope.variable(1.0, name="bar"))
with self.assertRaises(RuntimeError):
dist.call_for_each_tower(run_fn)
@@ -77,7 +90,27 @@ class TestStrategyTest(test.TestCase):
self.assertIs(dist, distribute.get_cross_tower_context())
self.assertTrue(distribute.has_distribution_strategy())
self.assertIs(dist, distribute.get_distribution_strategy())
- self.assertEqual("baz", variable_scope.variable(1.0, name="baz"))
+ expected_value = _get_test_variable(
+ "baz", variable_scope.VariableSynchronization.AUTO,
+ variable_scope.VariableAggregation.NONE)
+ self.assertDictEqual(expected_value,
+ variable_scope.variable(1.0, name="baz"))
+ _assert_in_default_state(self)
+
+ def testSettingSynchronizationAndAggregation(self):
+ _assert_in_default_state(self)
+ dist = _TestStrategy()
+ with dist.scope():
+ expected_value = _get_test_variable(
+ "baz", variable_scope.VariableSynchronization.ON_WRITE,
+ variable_scope.VariableAggregation.MEAN)
+ self.assertDictEqual(
+ expected_value,
+ variable_scope.variable(
+ 1.0,
+ name="baz",
+ synchronization=variable_scope.VariableSynchronization.ON_WRITE,
+ aggregation=variable_scope.VariableAggregation.MEAN))
_assert_in_default_state(self)
diff --git a/tensorflow/python/training/optimizer.py b/tensorflow/python/training/optimizer.py
index fe9ffde11c..f75db08059 100644
--- a/tensorflow/python/training/optimizer.py
+++ b/tensorflow/python/training/optimizer.py
@@ -77,9 +77,10 @@ def _deduplicate_indexed_slices(values, indices):
def _var_key(var):
- if context.executing_eagerly():
- return var._unique_id # pylint: disable=protected-access
- return (var.op.graph, var.op.name)
+ # TODO(ashankar): Consolidate handling for eager and graph
+ if hasattr(var, "op"):
+ return (var.op.graph, var.op.name)
+ return var._unique_id # pylint: disable=protected-access
class _OptimizableVariable(object):
@@ -461,7 +462,8 @@ class Optimizer(
# Have to be careful to call distribute_lib.get_loss_reduction()
# *after* loss() is evaluated, so we know what loss reduction it uses.
# TODO(josh11b): Test that we handle weight decay in a reasonable way.
- if distribute_lib.get_loss_reduction() == "mean":
+ if (distribute_lib.get_loss_reduction() ==
+ variable_scope.VariableAggregation.MEAN):
num_towers = distribute_lib.get_distribution_strategy().num_towers
if num_towers > 1:
loss_value *= (1. / num_towers)
@@ -478,7 +480,8 @@ class Optimizer(
"be a function when eager execution is enabled.")
# Scale loss if using a "mean" loss reduction and multiple towers.
- if distribute_lib.get_loss_reduction() == "mean":
+ if (distribute_lib.get_loss_reduction() ==
+ variable_scope.VariableAggregation.MEAN):
num_towers = distribute_lib.get_distribution_strategy().num_towers
if num_towers > 1:
loss *= (1. / num_towers)
@@ -649,7 +652,8 @@ class Optimizer(
towers. If `global_step` was not None, that operation also
increments `global_step`.
"""
- reduced_grads = distribution.batch_reduce("sum", grads_and_vars)
+ reduced_grads = distribution.batch_reduce(
+ variable_scope.VariableAggregation.SUM, grads_and_vars)
var_list = [v for _, v in grads_and_vars]
grads_and_vars = zip(reduced_grads, var_list)
# Note that this is called in a cross-tower context.
diff --git a/tensorflow/python/training/quantize_training.i b/tensorflow/python/training/quantize_training.i
index fb5e47efa0..54d6789616 100644
--- a/tensorflow/python/training/quantize_training.i
+++ b/tensorflow/python/training/quantize_training.i
@@ -73,6 +73,8 @@ def do_quantize_training_on_graphdef(input_graph, num_bits):
do_quantize_training_on_graphdef._tf_api_names = [
'train.do_quantize_training_on_graphdef']
+do_quantize_training_on_graphdef._tf_api_names_v1 = [
+ 'train.do_quantize_training_on_graphdef']
%}
%unignoreall
diff --git a/tensorflow/python/training/saver.py b/tensorflow/python/training/saver.py
index 53ed89e4ab..1ee975fbe4 100644
--- a/tensorflow/python/training/saver.py
+++ b/tensorflow/python/training/saver.py
@@ -22,7 +22,6 @@ from __future__ import print_function
import collections
import os.path
import re
-import sys
import time
import uuid
@@ -1043,8 +1042,8 @@ def get_checkpoint_state(checkpoint_dir, latest_filename=None):
ckpt = CheckpointState()
text_format.Merge(file_content, ckpt)
if not ckpt.model_checkpoint_path:
- raise ValueError("Invalid checkpoint state loaded from %s",
- checkpoint_dir)
+ raise ValueError("Invalid checkpoint state loaded from "
+ + checkpoint_dir)
# For relative model_checkpoint_path and all_model_checkpoint_paths,
# prepend checkpoint_dir.
if not os.path.isabs(ckpt.model_checkpoint_path):
@@ -1706,12 +1705,17 @@ class Saver(object):
save_path: Path where parameters were previously saved.
Raises:
- ValueError: If save_path is None.
+ ValueError: If save_path is None or not a valid checkpoint.
"""
if self._is_empty:
return
if save_path is None:
raise ValueError("Can't load save_path when it is None.")
+
+ if not checkpoint_exists(compat.as_text(save_path)):
+ raise ValueError("The passed save_path is not a valid checkpoint: "
+ + compat.as_text(save_path))
+
logging.info("Restoring parameters from %s", compat.as_text(save_path))
try:
if context.executing_eagerly():
@@ -1719,23 +1723,24 @@ class Saver(object):
else:
sess.run(self.saver_def.restore_op_name,
{self.saver_def.filename_tensor_name: save_path})
- except errors.NotFoundError:
- exception_type, exception_value, exception_traceback = sys.exc_info()
- # The checkpoint would not be loaded successfully as is. Try to parse it
- # as an object-based checkpoint.
- should_reraise = False
+ except errors.NotFoundError as err:
+ # There are three common conditions that might cause this error:
+ # 0. The file is missing. We ignore here, as this is checked above.
+ # 1. This is an object-based checkpoint trying name-based loading.
+ # 2. The graph has been altered and a variable or other name is missing.
+
+ # 1. The checkpoint would not be loaded successfully as is. Try to parse
+ # it as an object-based checkpoint.
try:
reader = pywrap_tensorflow.NewCheckpointReader(save_path)
object_graph_string = reader.get_tensor(
checkpointable.OBJECT_GRAPH_PROTO_KEY)
except errors.NotFoundError:
- # This is not an object-based checkpoint, or the checkpoint doesn't
- # exist. Re-raise the original exception, but do it outside the except
- # block so the object graph lookup isn't included in the stack trace.
- should_reraise = True
- if should_reraise:
- six.reraise(exception_type, exception_value, exception_traceback)
- del exception_traceback # avoid reference cycles
+ # 2. This is not an object-based checkpoint, which likely means there
+ # is a graph mismatch. Re-raise the original error with
+ # a helpful message (b/110263146)
+ raise _wrap_restore_error_with_msg(
+ err, "a Variable name or other graph key that is missing")
# This is an object-based checkpoint. We'll print a warning and then do
# the restore.
@@ -1747,6 +1752,11 @@ class Saver(object):
self._restore_from_object_based_checkpoint(
sess=sess, save_path=save_path,
object_graph_string=object_graph_string)
+ except errors.InvalidArgumentError as err:
+ # There is a mismatch between the graph and the checkpoint being loaded.
+ # We add a more reasonable error message here to help users (b/110263146)
+ raise _wrap_restore_error_with_msg(
+ err, "a mismatch between the current graph and the graph")
def _restore_from_object_based_checkpoint(self, sess, save_path,
object_graph_string):
@@ -2139,6 +2149,14 @@ def _meta_graph_filename(checkpoint_filename, meta_graph_suffix="meta"):
return meta_graph_filename
+def _wrap_restore_error_with_msg(err, extra_verbiage):
+ err_msg = ("Restoring from checkpoint failed. This is most likely "
+ "due to {} from the checkpoint. Please ensure that you "
+ "have not altered the graph expected based on the checkpoint. "
+ "Original error:\n\n{}").format(extra_verbiage, err.message)
+ return err.__class__(err.node_def, err.op, err_msg)
+
+
ops.register_proto_function(
ops.GraphKeys.SAVERS,
proto_type=saver_pb2.SaverDef,
diff --git a/tensorflow/python/training/saver_test.py b/tensorflow/python/training/saver_test.py
index f235300eb5..ae9c244aaf 100644
--- a/tensorflow/python/training/saver_test.py
+++ b/tensorflow/python/training/saver_test.py
@@ -24,10 +24,8 @@ import math
import os
import random
import shutil
-import sys
import tempfile
import time
-import traceback
import numpy as np
import six
@@ -369,8 +367,8 @@ class SaverTest(test.TestCase):
for ver in (saver_pb2.SaverDef.V1, saver_pb2.SaverDef.V2):
with self.test_session() as sess:
save = saver_module.Saver({"v0": v0}, write_version=ver)
- with self.assertRaisesRegexp(errors.NotFoundError,
- "Failed to find any matching files for"):
+ with self.assertRaisesRegexp(
+ ValueError, "The passed save_path is not a valid checkpoint:"):
save.restore(sess, "invalid path")
def testInt64(self):
@@ -3139,27 +3137,33 @@ class CheckpointableCompatibilityTests(test.TestCase):
errors.NotFoundError, "Key b not found in checkpoint"):
b_saver.restore(sess=sess, save_path=save_path)
- def testCheckpointNotFoundErrorRaised(self):
- # Restore does some tricky exception handling to figure out if it should
- # load an object-based checkpoint. Tests that the exception handling isn't
- # too broad.
- a = resource_variable_ops.ResourceVariable(1., name="a")
- saver = saver_module.Saver([a])
- with self.test_session() as sess:
- with self.assertRaisesRegexp(
- errors.NotFoundError,
- "Failed to find any matching files for path_which_does_not_exist"):
- saver.restore(sess=sess, save_path="path_which_does_not_exist")
- try:
- saver.restore(sess=sess, save_path="path_which_does_not_exist")
- except errors.NotFoundError:
- # Make sure we don't have a confusing "During handling of the above
- # exception" block in Python 3.
- # pylint: disable=no-value-for-parameter
- exception_string = "\n".join(
- traceback.format_exception(*sys.exc_info()))
- # pylint: enable=no-value-for-parameter
- self.assertNotIn("NewCheckpointReader", exception_string)
+ with self.assertRaises(errors.NotFoundError) as cs:
+ b_saver.restore(sess=sess, save_path=save_path)
+
+ # Make sure we don't have a confusing "During handling of the above
+ # exception" block in Python 3.
+ self.assertNotIn("NewCheckpointReader", cs.exception.message)
+
+ def testGraphChangedForRestoreErrorRaised(self):
+ checkpoint_directory = self.get_temp_dir()
+ checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
+
+ with ops_lib.Graph().as_default() as g:
+ a = variables.Variable(1., name="a")
+ a_saver = saver_module.Saver([a])
+
+ with self.test_session(graph=g) as sess:
+ sess.run(a.initializer)
+ save_path = a_saver.save(sess=sess, save_path=checkpoint_prefix)
+
+ with ops_lib.Graph().as_default() as g:
+ a = variables.Variable([1.], name="a")
+ a_saver = saver_module.Saver([a])
+ with self.test_session(graph=g) as sess:
+ with self.assertRaisesRegexp(
+ errors.InvalidArgumentError,
+ "a mismatch between the current graph and the graph"):
+ a_saver.restore(sess=sess, save_path=save_path)
def testLoadFromObjectBasedGraph(self):
checkpoint_directory = self.get_temp_dir()
diff --git a/tensorflow/python/training/server_lib.py b/tensorflow/python/training/server_lib.py
index 2f421d1cc0..58cf5277fe 100644
--- a/tensorflow/python/training/server_lib.py
+++ b/tensorflow/python/training/server_lib.py
@@ -42,8 +42,8 @@ def _make_server_def(server_or_cluster_def, job_name, task_index, protocol,
Defaults to the value in `server_or_cluster_def`, if specified. Otherwise
defaults to 0 if the server's job has only one task.
protocol: (Optional.) Specifies the protocol to be used by the server.
- Acceptable values include `"grpc"`. Defaults to the value in
- `server_or_cluster_def`, if specified. Otherwise defaults to `"grpc"`.
+ Acceptable values include `"grpc", "grpc+verbs"`. Defaults to the value
+ in `server_or_cluster_def`, if specified. Otherwise defaults to `"grpc"`.
config: (Options.) A `tf.ConfigProto` that specifies default configuration
options for all sessions that run on this server.
@@ -129,8 +129,9 @@ class Server(object):
job. Defaults to the value in `server_or_cluster_def`, if specified.
Otherwise defaults to 0 if the server's job has only one task.
protocol: (Optional.) Specifies the protocol to be used by the server.
- Acceptable values include `"grpc"`. Defaults to the value in
- `server_or_cluster_def`, if specified. Otherwise defaults to `"grpc"`.
+ Acceptable values include `"grpc", "grpc+verbs"`. Defaults to the
+ value in `server_or_cluster_def`, if specified. Otherwise defaults to
+ `"grpc"`.
config: (Options.) A `tf.ConfigProto` that specifies default
configuration options for all sessions that run on this server.
start: (Optional.) Boolean, indicating whether to start the server
diff --git a/tensorflow/python/util/deprecation.py b/tensorflow/python/util/deprecation.py
index 376be39978..9e2202eaf8 100644
--- a/tensorflow/python/util/deprecation.py
+++ b/tensorflow/python/util/deprecation.py
@@ -37,6 +37,11 @@ _PRINT_DEPRECATION_WARNINGS = True
_PRINTED_WARNING = {}
+class DeprecatedNamesAlreadySet(Exception):
+ """Raised when setting deprecated names multiple times for the same symbol."""
+ pass
+
+
def _add_deprecated_function_notice_to_docstring(doc, date, instructions):
"""Adds a deprecation notice to a docstring for deprecated functions."""
main_text = ['THIS FUNCTION IS DEPRECATED. It will be removed %s.' %
@@ -87,6 +92,27 @@ def _call_location(outer=False):
return '%s:%d' % (entry[1], entry[2])
+def _wrap_decorator(wrapped_function):
+ """Indicate that one function wraps another.
+
+ This decorator wraps a function using `tf_decorator.make_decorator`
+ so that doc generation scripts can pick up original function
+ signature.
+ It would be better to use @functools.wrap decorator, but it would
+ not update function signature to match wrapped function in Python 2.
+
+ Args:
+ wrapped_function: The function that decorated function wraps.
+
+ Returns:
+ Function that accepts wrapper function as an argument and returns
+ `TFDecorator` instance.
+ """
+ def wrapper(wrapper_func):
+ return tf_decorator.make_decorator(wrapped_function, wrapper_func)
+ return wrapper
+
+
def deprecated_alias(deprecated_name, name, func_or_class, warn_once=True):
"""Deprecate a symbol in favor of a new name with identical semantics.
@@ -144,7 +170,7 @@ def deprecated_alias(deprecated_name, name, func_or_class, warn_once=True):
if tf_inspect.isclass(func_or_class):
# Make a new class with __init__ wrapped in a warning.
- class NewClass(func_or_class): # pylint: disable=missing-docstring
+ class _NewClass(func_or_class): # pylint: disable=missing-docstring
__doc__ = decorator_utils.add_notice_to_docstring(
func_or_class.__doc__, 'Please use %s instead.' % name,
'DEPRECATED CLASS',
@@ -153,27 +179,28 @@ def deprecated_alias(deprecated_name, name, func_or_class, warn_once=True):
__name__ = func_or_class.__name__
__module__ = _call_location(outer=True)
+ @_wrap_decorator(func_or_class.__init__)
def __init__(self, *args, **kwargs):
- if hasattr(NewClass.__init__, '__func__'):
+ if hasattr(_NewClass.__init__, '__func__'):
# Python 2
- NewClass.__init__.__func__.__doc__ = func_or_class.__init__.__doc__
+ _NewClass.__init__.__func__.__doc__ = func_or_class.__init__.__doc__
else:
# Python 3
- NewClass.__init__.__doc__ = func_or_class.__init__.__doc__
+ _NewClass.__init__.__doc__ = func_or_class.__init__.__doc__
if _PRINT_DEPRECATION_WARNINGS:
# We're making the alias as we speak. The original may have other
# aliases, so we cannot use it to check for whether it's already been
# warned about.
- if NewClass.__init__ not in _PRINTED_WARNING:
+ if _NewClass.__init__ not in _PRINTED_WARNING:
if warn_once:
- _PRINTED_WARNING[NewClass.__init__] = True
+ _PRINTED_WARNING[_NewClass.__init__] = True
logging.warning(
'From %s: The name %s is deprecated. Please use %s instead.\n',
_call_location(), deprecated_name, name)
- super(NewClass, self).__init__(*args, **kwargs)
+ super(_NewClass, self).__init__(*args, **kwargs)
- return NewClass
+ return _NewClass
else:
decorator_utils.validate_callable(func_or_class, 'deprecated')
@@ -197,6 +224,35 @@ def deprecated_alias(deprecated_name, name, func_or_class, warn_once=True):
func_or_class.__doc__, None, 'Please use %s instead.' % name))
+def deprecated_endpoints(*args):
+ """Decorator for marking endpoints deprecated.
+
+ This decorator does not print deprecation messages.
+ TODO(annarev): eventually start printing deprecation warnings when
+ @deprecation_endpoints decorator is added.
+
+ Args:
+ *args: Deprecated endpoint names.
+
+ Returns:
+ A function that takes symbol as an argument and adds
+ _tf_deprecated_api_names to that symbol.
+ _tf_deprecated_api_names would be set to a list of deprecated
+ endpoint names for the symbol.
+ """
+ def deprecated_wrapper(func):
+ # pylint: disable=protected-access
+ if '_tf_deprecated_api_names' in func.__dict__:
+ raise DeprecatedNamesAlreadySet(
+ 'Cannot set deprecated names for %s to %s. '
+ 'Deprecated names are already set to %s.' % (
+ func.__name__, str(args), str(func._tf_deprecated_api_names)))
+ func._tf_deprecated_api_names = args
+ # pylint: disable=protected-access
+ return func
+ return deprecated_wrapper
+
+
def deprecated(date, instructions, warn_once=True):
"""Decorator for marking functions or methods deprecated.
diff --git a/tensorflow/python/util/deprecation_test.py b/tensorflow/python/util/deprecation_test.py
index bdd0bc48d2..90c73a0a58 100644
--- a/tensorflow/python/util/deprecation_test.py
+++ b/tensorflow/python/util/deprecation_test.py
@@ -22,6 +22,7 @@ from __future__ import print_function
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import deprecation
+from tensorflow.python.util import tf_inspect
class DeprecatedAliasTest(test.TestCase):
@@ -73,6 +74,11 @@ class DeprecatedAliasTest(test.TestCase):
self.assertEqual(["test", "deprecated", "deprecated again"],
MyClass.init_args)
+ # Check __init__ signature matches for doc generation.
+ self.assertEqual(
+ tf_inspect.getfullargspec(MyClass.__init__),
+ tf_inspect.getfullargspec(deprecated_cls.__init__))
+
class DeprecationTest(test.TestCase):
@@ -929,5 +935,27 @@ class DeprecationArgumentsTest(test.TestCase):
self.assertEqual(new_docs, new_docs_ref)
+class DeprecatedEndpointsTest(test.TestCase):
+
+ def testSingleDeprecatedEndpoint(self):
+ @deprecation.deprecated_endpoints("foo1")
+ def foo():
+ pass
+ self.assertEqual(("foo1",), foo._tf_deprecated_api_names)
+
+ def testMultipleDeprecatedEndpoint(self):
+ @deprecation.deprecated_endpoints("foo1", "foo2")
+ def foo():
+ pass
+ self.assertEqual(("foo1", "foo2"), foo._tf_deprecated_api_names)
+
+ def testCannotSetDeprecatedEndpointsTwice(self):
+ with self.assertRaises(deprecation.DeprecatedNamesAlreadySet):
+ @deprecation.deprecated_endpoints("foo1")
+ @deprecation.deprecated_endpoints("foo2")
+ def foo(): # pylint: disable=unused-variable
+ pass
+
+
if __name__ == "__main__":
test.main()
diff --git a/tensorflow/python/util/lock_util_test.py b/tensorflow/python/util/lock_util_test.py
index 2ac640ff99..cda8f95225 100644
--- a/tensorflow/python/util/lock_util_test.py
+++ b/tensorflow/python/util/lock_util_test.py
@@ -19,7 +19,6 @@ from __future__ import division
from __future__ import print_function
import random
-import threading
import time
from absl.testing import parameterized
@@ -48,7 +47,7 @@ class GroupLockTest(test.TestCase, parameterized.TestCase):
finished.add(thread_id)
threads = [
- threading.Thread(target=thread_fn, args=(i,))
+ self.checkedThread(target=thread_fn, args=(i,))
for i in range(num_threads)
]
diff --git a/tensorflow/python/util/nest.py b/tensorflow/python/util/nest.py
index 1104768ae8..d63f59a8c8 100644
--- a/tensorflow/python/util/nest.py
+++ b/tensorflow/python/util/nest.py
@@ -167,11 +167,14 @@ def assert_same_structure(nest1, nest2, check_types=True):
Args:
nest1: an arbitrarily nested structure.
nest2: an arbitrarily nested structure.
- check_types: if `True` (default) types of sequences are checked as
- well, including the keys of dictionaries. If set to `False`, for example
- a list and a tuple of objects will look the same if they have the same
+ check_types: if `True` (default) types of sequences are checked as well,
+ including the keys of dictionaries. If set to `False`, for example a
+ list and a tuple of objects will look the same if they have the same
size. Note that namedtuples with identical name and fields are always
- considered to have the same shallow structure.
+ considered to have the same shallow structure. Two types will also be
+ considered the same if they are both list subtypes (which allows "list"
+ and "_ListWrapper" from checkpointable dependency tracking to compare
+ equal).
Raises:
ValueError: If the two structures do not have the same number of elements or
diff --git a/tensorflow/python/util/py_checkpoint_reader.i b/tensorflow/python/util/py_checkpoint_reader.i
index 8004898cbc..1c73f7f06f 100644
--- a/tensorflow/python/util/py_checkpoint_reader.i
+++ b/tensorflow/python/util/py_checkpoint_reader.i
@@ -166,6 +166,7 @@ def NewCheckpointReader(filepattern):
return CheckpointReader(compat.as_bytes(filepattern), status)
NewCheckpointReader._tf_api_names = ['train.NewCheckpointReader']
+NewCheckpointReader._tf_api_names_v1 = ['train.NewCheckpointReader']
%}
%include "tensorflow/c/checkpoint_reader.h"
diff --git a/tensorflow/python/util/stat_summarizer.i b/tensorflow/python/util/stat_summarizer.i
index 73fa85494b..a5a7984d91 100644
--- a/tensorflow/python/util/stat_summarizer.i
+++ b/tensorflow/python/util/stat_summarizer.i
@@ -27,8 +27,8 @@ limitations under the License.
%ignoreall
-%unignore _NewStatSummarizer;
-%unignore _DeleteStatSummarizer;
+%unignore NewStatSummarizer;
+%unignore DeleteStatSummarizer;
%unignore tensorflow;
%unignore tensorflow::StatSummarizer;
%unignore tensorflow::StatSummarizer::StatSummarizer;
@@ -43,20 +43,20 @@ limitations under the License.
// TODO(ashankar): Remove the unused argument from the API.
%{
-tensorflow::StatSummarizer* _NewStatSummarizer(
+tensorflow::StatSummarizer* NewStatSummarizer(
const string& unused) {
return new tensorflow::StatSummarizer(tensorflow::StatSummarizerOptions());
}
%}
%{
-void _DeleteStatSummarizer(tensorflow::StatSummarizer* ss) {
+void DeleteStatSummarizer(tensorflow::StatSummarizer* ss) {
delete ss;
}
%}
-tensorflow::StatSummarizer* _NewStatSummarizer(const string& unused);
-void _DeleteStatSummarizer(tensorflow::StatSummarizer* ss);
+tensorflow::StatSummarizer* NewStatSummarizer(const string& unused);
+void DeleteStatSummarizer(tensorflow::StatSummarizer* ss);
%extend tensorflow::StatSummarizer {
void ProcessStepStatsStr(const string& step_stats_str) {
@@ -76,16 +76,3 @@ void _DeleteStatSummarizer(tensorflow::StatSummarizer* ss);
%include "tensorflow/core/util/stat_summarizer_options.h"
%include "tensorflow/core/util/stat_summarizer.h"
%unignoreall
-
-%insert("python") %{
-
-# Wrapping NewStatSummarizer and DeletStatSummarizer because
-# SWIG-generated functions are built-in functions and do not support
-# setting _tf_api_names attribute.
-
-def NewStatSummarizer(unused):
- return _NewStatSummarizer(unused)
-
-def DeleteStatSummarizer(stat_summarizer):
- _DeleteStatSummarizer(stat_summarizer)
-%}
diff --git a/tensorflow/python/util/tf_export.py b/tensorflow/python/util/tf_export.py
index e154ffb68a..274f32c21f 100644
--- a/tensorflow/python/util/tf_export.py
+++ b/tensorflow/python/util/tf_export.py
@@ -63,12 +63,63 @@ API_ATTRS = {
'_estimator_api_constants')
}
+API_ATTRS_V1 = {
+ TENSORFLOW_API_NAME: _Attributes(
+ '_tf_api_names_v1',
+ '_tf_api_constants_v1'),
+ ESTIMATOR_API_NAME: _Attributes(
+ '_estimator_api_names_v1',
+ '_estimator_api_constants_v1')
+}
+
class SymbolAlreadyExposedError(Exception):
"""Raised when adding API names to symbol that already has API names."""
pass
+def get_canonical_name_for_symbol(symbol, api_name=TENSORFLOW_API_NAME):
+ """Get canonical name for the API symbol.
+
+ Canonical name is the first non-deprecated endpoint name.
+
+ Args:
+ symbol: API function or class.
+ api_name: API name (tensorflow or estimator).
+
+ Returns:
+ Canonical name for the API symbol (for e.g. initializers.zeros) if
+ canonical name could be determined. Otherwise, returns None.
+ """
+ if not hasattr(symbol, '__dict__'):
+ return None
+ api_names_attr = API_ATTRS[api_name].names
+ _, undecorated_symbol = tf_decorator.unwrap(symbol)
+ if api_names_attr not in undecorated_symbol.__dict__:
+ return None
+ api_names = getattr(undecorated_symbol, api_names_attr)
+ # TODO(annarev): may be add a separate deprecated attribute
+ # for estimator names.
+ deprecated_api_names = undecorated_symbol.__dict__.get(
+ '_tf_deprecated_api_names', [])
+ return get_canonical_name(api_names, deprecated_api_names)
+
+
+def get_canonical_name(api_names, deprecated_api_names):
+ """Get first non-deprecated endpoint name.
+
+ Args:
+ api_names: API names iterable.
+ deprecated_api_names: Deprecated API names iterable.
+ Returns:
+ Canonical name if there is at least one non-deprecated endpoint.
+ Otherwise returns None.
+ """
+ return next(
+ (name for name in api_names if name not in deprecated_api_names),
+ None)
+
+
class api_export(object): # pylint: disable=invalid-name
"""Provides ways to export symbols to the TensorFlow API."""
@@ -78,13 +129,16 @@ class api_export(object): # pylint: disable=invalid-name
Args:
*args: API names in dot delimited format.
**kwargs: Optional keyed arguments.
- overrides: List of symbols that this is overriding
+ v1: Names for the TensorFlow V1 API. If not set, we will use V2 API
+ names both for TensorFlow V1 and V2 APIs.
+ overrides: List of symbols that this is overriding
(those overrided api exports will be removed). Note: passing overrides
has no effect on exporting a constant.
- api_name: Name of the API you want to generate (e.g. `tensorflow` or
+ api_name: Name of the API you want to generate (e.g. `tensorflow` or
`estimator`). Default is `tensorflow`.
"""
self._names = args
+ self._names_v1 = kwargs.get('v1', args)
self._api_name = kwargs.get('api_name', TENSORFLOW_API_NAME)
self._overrides = kwargs.get('overrides', [])
@@ -102,24 +156,27 @@ class api_export(object): # pylint: disable=invalid-name
and kwarg `allow_multiple_exports` not set.
"""
api_names_attr = API_ATTRS[self._api_name].names
-
+ api_names_attr_v1 = API_ATTRS_V1[self._api_name].names
# Undecorate overridden names
for f in self._overrides:
_, undecorated_f = tf_decorator.unwrap(f)
delattr(undecorated_f, api_names_attr)
+ delattr(undecorated_f, api_names_attr_v1)
_, undecorated_func = tf_decorator.unwrap(func)
+ self.set_attr(undecorated_func, api_names_attr, self._names)
+ self.set_attr(undecorated_func, api_names_attr_v1, self._names_v1)
+ return func
+ def set_attr(self, func, api_names_attr, names):
# Check for an existing api. We check if attribute name is in
# __dict__ instead of using hasattr to verify that subclasses have
# their own _tf_api_names as opposed to just inheriting it.
- if api_names_attr in undecorated_func.__dict__:
+ if api_names_attr in func.__dict__:
raise SymbolAlreadyExposedError(
'Symbol %s is already exposed as %s.' %
- (undecorated_func.__name__, getattr(
- undecorated_func, api_names_attr))) # pylint: disable=protected-access
- setattr(undecorated_func, api_names_attr, self._names)
- return func
+ (func.__name__, getattr(func, api_names_attr))) # pylint: disable=protected-access
+ setattr(func, api_names_attr, names)
def export_constant(self, module_name, name):
"""Store export information for constants/string literals.
@@ -140,12 +197,20 @@ class api_export(object): # pylint: disable=invalid-name
name: (string) Current constant name.
"""
module = sys.modules[module_name]
- if not hasattr(module, API_ATTRS[self._api_name].constants):
- setattr(module, API_ATTRS[self._api_name].constants, [])
+ api_constants_attr = API_ATTRS[self._api_name].constants
+ api_constants_attr_v1 = API_ATTRS_V1[self._api_name].constants
+
+ if not hasattr(module, api_constants_attr):
+ setattr(module, api_constants_attr, [])
# pylint: disable=protected-access
- getattr(module, API_ATTRS[self._api_name].constants).append(
+ getattr(module, api_constants_attr).append(
(self._names, name))
+ if not hasattr(module, api_constants_attr_v1):
+ setattr(module, api_constants_attr_v1, [])
+ getattr(module, api_constants_attr_v1).append(
+ (self._names_v1, name))
+
tf_export = functools.partial(api_export, api_name=TENSORFLOW_API_NAME)
estimator_export = functools.partial(tf_export, api_name=ESTIMATOR_API_NAME)
diff --git a/tensorflow/python/util/tf_export_test.py b/tensorflow/python/util/tf_export_test.py
index b9e26ecb33..4ae1dc55e0 100644
--- a/tensorflow/python/util/tf_export_test.py
+++ b/tensorflow/python/util/tf_export_test.py
@@ -60,6 +60,8 @@ class ValidateExportTest(test.TestCase):
for symbol in [_test_function, _test_function, TestClassA, TestClassB]:
if hasattr(symbol, '_tf_api_names'):
del symbol._tf_api_names
+ if hasattr(symbol, '_tf_api_names_v1'):
+ del symbol._tf_api_names_v1
def _CreateMockModule(self, name):
mock_module = self.MockModule(name)
diff --git a/tensorflow/python/util/tf_inspect.py b/tensorflow/python/util/tf_inspect.py
index fbd6561767..ec20998bdd 100644
--- a/tensorflow/python/util/tf_inspect.py
+++ b/tensorflow/python/util/tf_inspect.py
@@ -300,6 +300,16 @@ def getsource(object): # pylint: disable=redefined-builtin
return _inspect.getsource(tf_decorator.unwrap(object)[1])
+def getsourcefile(object): # pylint: disable=redefined-builtin
+ """TFDecorator-aware replacement for inspect.getsourcefile."""
+ return _inspect.getsourcefile(tf_decorator.unwrap(object)[1])
+
+
+def getsourcelines(object): # pylint: disable=redefined-builtin
+ """TFDecorator-aware replacement for inspect.getsourcelines."""
+ return _inspect.getsourcelines(tf_decorator.unwrap(object)[1])
+
+
def isbuiltin(object): # pylint: disable=redefined-builtin
"""TFDecorator-aware replacement for inspect.isbuiltin."""
return _inspect.isbuiltin(tf_decorator.unwrap(object)[1])
diff --git a/tensorflow/python/util/tf_inspect_test.py b/tensorflow/python/util/tf_inspect_test.py
index beaf350de1..2f6021c7d8 100644
--- a/tensorflow/python/util/tf_inspect_test.py
+++ b/tensorflow/python/util/tf_inspect_test.py
@@ -326,6 +326,18 @@ def test_decorated_function_with_defaults(a, b=2, c='Hello'):
self.assertEqual(
expected, tf_inspect.getsource(test_decorated_function_with_defaults))
+ def testGetSourceFile(self):
+ self.assertEqual(
+ __file__,
+ tf_inspect.getsourcefile(test_decorated_function_with_defaults))
+
+ def testGetSourceLines(self):
+ expected = inspect.getsourcelines(
+ test_decorated_function_with_defaults.decorated_target)
+ self.assertEqual(
+ expected,
+ tf_inspect.getsourcelines(test_decorated_function_with_defaults))
+
def testIsBuiltin(self):
self.assertEqual(
tf_inspect.isbuiltin(TestDecoratedClass),
diff --git a/tensorflow/python/util/tf_stack.py b/tensorflow/python/util/tf_stack.py
new file mode 100644
index 0000000000..fe4f4a63eb
--- /dev/null
+++ b/tensorflow/python/util/tf_stack.py
@@ -0,0 +1,103 @@
+# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""Functions used to extract and analyze stacks. Faster than Python libs."""
+# pylint: disable=g-bad-name
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import linecache
+import sys
+
+# Names for indices into TF traceback tuples.
+TB_FILENAME = 0
+TB_LINENO = 1
+TB_FUNCNAME = 2
+TB_CODEDICT = 3 # Dictionary of Python interpreter state.
+
+
+def extract_stack(extract_frame_info_fn=None):
+ """A lightweight, extensible re-implementation of traceback.extract_stack.
+
+ NOTE(mrry): traceback.extract_stack eagerly retrieves the line of code for
+ each stack frame using linecache, which results in an abundance of stat()
+ calls. This implementation does not retrieve the code, and any consumer
+ should apply _convert_stack to the result to obtain a traceback that can
+ be formatted etc. using traceback methods.
+
+ Args:
+ extract_frame_info_fn: Optional callable fn(stack_frame) applied to each
+ stack frame. This callable's return value is stored as the sixth (last)
+ element of the returned tuples. If not provided, the returned tuples
+ will have None as their sixth value.
+
+ Returns:
+ A list of 6-tuples
+ (filename, lineno, name, frame_globals, func_start_lineno, custom_info)
+ corresponding to the call stack of the current thread. The returned tuples
+ have the innermost stack frame at the end, unlike the Python inspect
+ module's stack() function.
+ """
+ default_fn = lambda f: None
+ extract_frame_info_fn = extract_frame_info_fn or default_fn
+ try:
+ raise ZeroDivisionError
+ except ZeroDivisionError:
+ f = sys.exc_info()[2].tb_frame.f_back
+ ret = []
+ while f is not None:
+ lineno = f.f_lineno
+ co = f.f_code
+ filename = co.co_filename
+ name = co.co_name
+ frame_globals = f.f_globals
+ func_start_lineno = co.co_firstlineno
+ frame_info = extract_frame_info_fn(f)
+ ret.append((filename, lineno, name, frame_globals, func_start_lineno,
+ frame_info))
+ f = f.f_back
+ ret.reverse()
+ return ret
+
+
+def convert_stack(stack, include_func_start_lineno=False):
+ """Converts a stack extracted using extract_stack() to a traceback stack.
+
+ Args:
+ stack: A list of n 5-tuples,
+ (filename, lineno, name, frame_globals, func_start_lineno).
+ include_func_start_lineno: True if function start line number should be
+ included as the 5th entry in return tuples.
+
+ Returns:
+ A list of n 4-tuples or 5-tuples
+ (filename, lineno, name, code, [optional: func_start_lineno]), where the
+ code tuple element is calculated from the corresponding elements of the
+ input tuple.
+ """
+ ret = []
+ for (filename, lineno, name, frame_globals, func_start_lineno,
+ unused_frame_info) in stack:
+ linecache.checkcache(filename)
+ line = linecache.getline(filename, lineno, frame_globals)
+ if line:
+ line = line.strip()
+ else:
+ line = None
+ if include_func_start_lineno:
+ ret.append((filename, lineno, name, line, func_start_lineno))
+ else:
+ ret.append((filename, lineno, name, line))
+ return ret
diff --git a/tensorflow/python/util/util.cc b/tensorflow/python/util/util.cc
index c79d8a8445..366f8a0deb 100644
--- a/tensorflow/python/util/util.cc
+++ b/tensorflow/python/util/util.cc
@@ -394,7 +394,11 @@ bool AssertSameStructureHelper(PyObject* o1, PyObject* o2, bool check_types,
type2->tp_name);
return true;
}
- } else if (type1 != type2) {
+ } else if (type1 != type2
+ /* If both sequences are list types, don't complain. This allows
+ one to be a list subclass (e.g. _ListWrapper used for automatic
+ dependency tracking.) */
+ && !(PyList_Check(o1) && PyList_Check(o2))) {
*is_type_error = true;
*error_msg = tensorflow::strings::StrCat(
"The two namedtuples don't have the same sequence type. "
diff --git a/tensorflow/security/advisory/tfsa-2018-001.md b/tensorflow/security/advisory/tfsa-2018-001.md
index bb97543a21..1966789c84 100644
--- a/tensorflow/security/advisory/tfsa-2018-001.md
+++ b/tensorflow/security/advisory/tfsa-2018-001.md
@@ -22,7 +22,7 @@ TensorFlow 1.3.0, 1.3.1, 1.4.0, 1.4.1, 1.5.0, 1.5.1, 1.6.0
### Mitigation
We have patched the vulnerability in GitHub commit
-[49f73c55](https://github.com/tensorflow/tensorflow/commit/49f73c55d56edffebde4bca4a407ad69c1cae4333c55).
+[49f73c55](https://github.com/tensorflow/tensorflow/commit/49f73c55d56edffebde4bca4a407ad69c1cae433).
If users are running TensorFlow in production or on untrusted data, they are
encouraged to apply this patch.
diff --git a/tensorflow/security/index.md b/tensorflow/security/index.md
index ea39e17ab2..0f176151c2 100644
--- a/tensorflow/security/index.md
+++ b/tensorflow/security/index.md
@@ -4,7 +4,7 @@ We regularly publish security advisories about using TensorFlow.
*Note*: In conjunction with these security advisories, we strongly encourage
TensorFlow users to read and understand TensorFlow's security model as outlined
-in (https://github.com/tensorflow/tensorflow/blob/master/SECURITY.md)[SECURITY.md].
+in [SECURITY.md](https://github.com/tensorflow/tensorflow/blob/master/SECURITY.md).
| Advisory Number | Type | Versions affected | Reported by | Additional Information |
|-----------------|--------------------|:-----------------:|-----------------------|-----------------------------|
diff --git a/tensorflow/stream_executor/BUILD b/tensorflow/stream_executor/BUILD
index c68cda0100..e742f8e8d5 100644
--- a/tensorflow/stream_executor/BUILD
+++ b/tensorflow/stream_executor/BUILD
@@ -2,6 +2,7 @@ licenses(["restricted"])
load("@local_config_cuda//cuda:build_defs.bzl", "if_cuda_is_configured")
load("//tensorflow/core:platform/default/build_config_root.bzl", "if_static")
+load("//tensorflow:tensorflow.bzl", "cc_header_only_library")
STREAM_EXECUTOR_HEADERS = glob([
"*.h",
@@ -33,7 +34,6 @@ cc_library(
}),
visibility = ["//visibility:public"],
deps = [
- "//tensorflow/compiler/xla:statusor",
"//tensorflow/core:lib",
"//tensorflow/core:ptr_util",
"@local_config_cuda//cuda:cuda_headers",
@@ -48,11 +48,18 @@ cc_library(
deps = [
"//tensorflow/core:lib",
"//tensorflow/core:ptr_util",
- "//tensorflow/compiler/xla:statusor",
"@local_config_cuda//cuda:cuda_headers",
] + if_static([":stream_executor_impl"]),
)
+cc_header_only_library(
+ name = "stream_executor_headers_lib",
+ visibility = ["//visibility:public"],
+ deps = [
+ ":stream_executor",
+ ],
+)
+
cc_library(
name = "cuda_platform",
srcs = if_cuda_is_configured(
diff --git a/tensorflow/stream_executor/cuda/cuda_dnn.cc b/tensorflow/stream_executor/cuda/cuda_dnn.cc
index d4f2fd2625..9e24a4538c 100644
--- a/tensorflow/stream_executor/cuda/cuda_dnn.cc
+++ b/tensorflow/stream_executor/cuda/cuda_dnn.cc
@@ -3074,6 +3074,22 @@ port::Status CudnnSupport::DoConvolveBackwardDataImpl(
}
}
+ // Cudnn 7.1.4 has a bug if the workspace of the following convolution is not
+ // zero-initialized.
+ // TODO(timshen): Add an nvbugs/ link.
+ if (CUDNN_VERSION >= 7000 &&
+ algorithm_config.algorithm().algo_id() ==
+ CUDNN_CONVOLUTION_BWD_DATA_ALGO_1 &&
+ cudnn_type == CUDNN_DATA_HALF &&
+ algorithm_config.algorithm().tensor_ops_enabled() &&
+ input_descriptor.layout() == dnn::DataLayout::kBatchYXDepth &&
+ filter_descriptor.layout() == dnn::FilterLayout::kOutputInputYX &&
+ output_descriptor.layout() == dnn::DataLayout::kBatchDepthYX &&
+ (convolution_descriptor.vertical_filter_stride() > 1 ||
+ convolution_descriptor.horizontal_filter_stride() > 1)) {
+ stream->ThenMemZero(&scratch, scratch.size());
+ }
+
RETURN_IF_CUDNN_ERROR(
cudnnConvolutionBackwardData(cudnn.handle(),
/*alpha=*/alpha,
@@ -3587,7 +3603,7 @@ bool CudnnSupport::DoPoolForward(
const dnn::BatchDescriptor& input_dimensions,
const DeviceMemory<double>& input_data,
const dnn::BatchDescriptor& output_dimensions,
- DeviceMemory<double>* output_data) {
+ DeviceMemory<double>* output_data, ScratchAllocator* workspace_allocator) {
// Alpha is the scaling factor for input.
double alpha = 1.0;
// Beta is the scaling factor for output.
@@ -3612,7 +3628,7 @@ bool CudnnSupport::DoPoolForward(
const dnn::BatchDescriptor& input_dimensions,
const DeviceMemory<float>& input_data,
const dnn::BatchDescriptor& output_dimensions,
- DeviceMemory<float>* output_data) {
+ DeviceMemory<float>* output_data, ScratchAllocator* workspace_allocator) {
// Alpha is the scaling factor for input.
float alpha = 1.0;
// Beta is the scaling factor for output.
@@ -3637,7 +3653,8 @@ bool CudnnSupport::DoPoolForward(
const dnn::BatchDescriptor& input_dimensions,
const DeviceMemory<Eigen::half>& input_data,
const dnn::BatchDescriptor& output_dimensions,
- DeviceMemory<Eigen::half>* output_data) {
+ DeviceMemory<Eigen::half>* output_data,
+ ScratchAllocator* workspace_allocator) {
// Alpha is the scaling factor for input.
float alpha = 1.0;
// Beta is the scaling factor for output.
@@ -3663,7 +3680,8 @@ bool CudnnSupport::DoPoolBackward(
const dnn::BatchDescriptor& output_dimensions,
const DeviceMemory<double>& output_data,
const DeviceMemory<double>& input_diff_data,
- DeviceMemory<double>* output_diff_data) {
+ DeviceMemory<double>* output_diff_data,
+ ScratchAllocator* workspace_allocator) {
// Alpha is the scaling factor for input.
double alpha = 1.0;
// Beta is the scaling factor for output.
@@ -3692,7 +3710,8 @@ bool CudnnSupport::DoPoolBackward(
const dnn::BatchDescriptor& output_dimensions,
const DeviceMemory<float>& output_data,
const DeviceMemory<float>& input_diff_data,
- DeviceMemory<float>* output_diff_data) {
+ DeviceMemory<float>* output_diff_data,
+ ScratchAllocator* workspace_allocator) {
// Alpha is the scaling factor for input.
float alpha = 1.0;
// Beta is the scaling factor for output.
@@ -3721,7 +3740,8 @@ bool CudnnSupport::DoPoolBackward(
const dnn::BatchDescriptor& output_dimensions,
const DeviceMemory<Eigen::half>& output_data,
const DeviceMemory<Eigen::half>& input_diff_data,
- DeviceMemory<Eigen::half>* output_diff_data) {
+ DeviceMemory<Eigen::half>* output_diff_data,
+ ScratchAllocator* workspace_allocator) {
// Alpha is the scaling factor for input.
float alpha = 1.0;
// Beta is the scaling factor for output.
@@ -3790,7 +3810,8 @@ bool CudnnSupport::DoNormalizeBackwardWithDimensions(
const dnn::BatchDescriptor& dimensions, const DeviceMemory<float>& raw_data,
const DeviceMemory<float>& normalized_data,
const DeviceMemory<float>& normalized_variable_gradient,
- DeviceMemory<float>* raw_variable_gradient) {
+ DeviceMemory<float>* raw_variable_gradient,
+ ScratchAllocator* workspace_allocator) {
// Check for unsupported modes.
if (normalize_descriptor.wrap_around()) {
LOG(ERROR) << "CUDA LRN does not support cudnn-around mode";
diff --git a/tensorflow/stream_executor/cuda/cuda_dnn.h b/tensorflow/stream_executor/cuda/cuda_dnn.h
index c924d41cb5..9d88f971bb 100644
--- a/tensorflow/stream_executor/cuda/cuda_dnn.h
+++ b/tensorflow/stream_executor/cuda/cuda_dnn.h
@@ -515,21 +515,24 @@ class CudnnSupport : public dnn::DnnSupport {
const dnn::BatchDescriptor& input_dimensions,
const DeviceMemory<double>& input_data,
const dnn::BatchDescriptor& output_dimensions,
- DeviceMemory<double>* output_data) override;
+ DeviceMemory<double>* output_data,
+ ScratchAllocator* workspace_allocator) override;
bool DoPoolForward(Stream* stream,
const dnn::PoolingDescriptor& pooling_dimensions,
const dnn::BatchDescriptor& input_dimensions,
const DeviceMemory<float>& input_data,
const dnn::BatchDescriptor& output_dimensions,
- DeviceMemory<float>* output_data) override;
+ DeviceMemory<float>* output_data,
+ ScratchAllocator* workspace_allocator) override;
bool DoPoolForward(Stream* stream,
const dnn::PoolingDescriptor& pooling_dimensions,
const dnn::BatchDescriptor& input_dimensions,
const DeviceMemory<Eigen::half>& input_data,
const dnn::BatchDescriptor& output_dimensions,
- DeviceMemory<Eigen::half>* output_data) override;
+ DeviceMemory<Eigen::half>* output_data,
+ ScratchAllocator* workspace_allocator) override;
bool DoPoolBackward(Stream* stream,
const dnn::PoolingDescriptor& pooling_dimensions,
@@ -538,7 +541,8 @@ class CudnnSupport : public dnn::DnnSupport {
const dnn::BatchDescriptor& output_dimensions,
const DeviceMemory<double>& output_data,
const DeviceMemory<double>& input_diff_data,
- DeviceMemory<double>* output_diff_data) override;
+ DeviceMemory<double>* output_diff_data,
+ ScratchAllocator* workspace_allocator) override;
bool DoPoolBackward(Stream* stream,
const dnn::PoolingDescriptor& pooling_dimensions,
@@ -547,7 +551,8 @@ class CudnnSupport : public dnn::DnnSupport {
const dnn::BatchDescriptor& output_dimensions,
const DeviceMemory<float>& output_data,
const DeviceMemory<float>& input_diff_data,
- DeviceMemory<float>* output_diff_data) override;
+ DeviceMemory<float>* output_diff_data,
+ ScratchAllocator* workspace_allocator) override;
bool DoPoolBackward(Stream* stream,
const dnn::PoolingDescriptor& pooling_dimensions,
@@ -556,7 +561,8 @@ class CudnnSupport : public dnn::DnnSupport {
const dnn::BatchDescriptor& output_dimensions,
const DeviceMemory<Eigen::half>& output_data,
const DeviceMemory<Eigen::half>& input_diff_data,
- DeviceMemory<Eigen::half>* output_diff_data) override;
+ DeviceMemory<Eigen::half>* output_diff_data,
+ ScratchAllocator* workspace_allocator) override;
bool DoNormalize(Stream* stream,
const dnn::NormalizeDescriptor& normalize_descriptor,
@@ -575,7 +581,8 @@ class CudnnSupport : public dnn::DnnSupport {
const DeviceMemory<float>& raw_data,
const DeviceMemory<float>& normalized_data,
const DeviceMemory<float>& normalized_variable_gradient,
- DeviceMemory<float>* raw_variable_gradient) override;
+ DeviceMemory<float>* raw_variable_gradient,
+ ScratchAllocator* workspace_allocator) override;
bool DoDepthConcatenate(
Stream* stream, port::ArraySlice<dnn::BatchDescriptor> input_dimensions,
diff --git a/tensorflow/stream_executor/dnn.h b/tensorflow/stream_executor/dnn.h
index 9eca5abe1a..a7449c2df4 100644
--- a/tensorflow/stream_executor/dnn.h
+++ b/tensorflow/stream_executor/dnn.h
@@ -1552,14 +1552,16 @@ class DnnSupport {
const dnn::BatchDescriptor& input_dimensions,
const DeviceMemory<float>& input_data,
const dnn::BatchDescriptor& output_dimensions,
- DeviceMemory<float>* output_data) = 0;
+ DeviceMemory<float>* output_data,
+ ScratchAllocator* workspace_allocator) = 0;
virtual bool DoPoolForward(Stream* stream,
const dnn::PoolingDescriptor& pooling_dimensions,
const dnn::BatchDescriptor& input_dimensions,
const DeviceMemory<double>& input_data,
const dnn::BatchDescriptor& output_dimensions,
- DeviceMemory<double>* output_data) {
+ DeviceMemory<double>* output_data,
+ ScratchAllocator* workspace_allocator) {
LOG(FATAL) << "DoPoolForward not implemented for double.";
return false;
}
@@ -1569,7 +1571,8 @@ class DnnSupport {
const dnn::BatchDescriptor& input_dimensions,
const DeviceMemory<Eigen::half>& input_data,
const dnn::BatchDescriptor& output_dimensions,
- DeviceMemory<Eigen::half>* output_data) {
+ DeviceMemory<Eigen::half>* output_data,
+ ScratchAllocator* workspace_allocator) {
LOG(FATAL) << "DoPoolForward not implemented for float16.";
return false;
}
@@ -1582,7 +1585,8 @@ class DnnSupport {
const dnn::BatchDescriptor& output_dimensions,
const DeviceMemory<double>& output_data,
const DeviceMemory<double>& input_diff_data,
- DeviceMemory<double>* output_diff_data) {
+ DeviceMemory<double>* output_diff_data,
+ ScratchAllocator* workspace_allocator) {
LOG(FATAL) << "DoPoolBackward not implemented.";
return false;
}
@@ -1594,7 +1598,8 @@ class DnnSupport {
const dnn::BatchDescriptor& output_dimensions,
const DeviceMemory<float>& output_data,
const DeviceMemory<float>& input_diff_data,
- DeviceMemory<float>* output_diff_data) {
+ DeviceMemory<float>* output_diff_data,
+ ScratchAllocator* workspace_allocator) {
LOG(FATAL) << "DoPoolBackward not implemented.";
return false;
}
@@ -1606,7 +1611,8 @@ class DnnSupport {
const dnn::BatchDescriptor& output_dimensions,
const DeviceMemory<Eigen::half>& output_data,
const DeviceMemory<Eigen::half>& input_diff_data,
- DeviceMemory<Eigen::half>* output_diff_data) {
+ DeviceMemory<Eigen::half>* output_diff_data,
+ ScratchAllocator* workspace_allocator) {
LOG(FATAL) << "DoPoolBackward not implemented.";
return false;
}
@@ -1653,7 +1659,8 @@ class DnnSupport {
const DeviceMemory<float>& raw_data,
const DeviceMemory<float>& normalized_data,
const DeviceMemory<float>& normalized_variable_gradient,
- DeviceMemory<float>* raw_variable_gradient) {
+ DeviceMemory<float>* raw_variable_gradient,
+ ScratchAllocator* workspace_allocator) {
return false;
}
diff --git a/tensorflow/stream_executor/event.cc b/tensorflow/stream_executor/event.cc
index 50a6edd80b..52efe771bc 100644
--- a/tensorflow/stream_executor/event.cc
+++ b/tensorflow/stream_executor/event.cc
@@ -15,9 +15,9 @@ limitations under the License.
#include "tensorflow/stream_executor/event.h"
+#include "tensorflow/stream_executor/stream.h"
#include "tensorflow/stream_executor/stream_executor_internal.h"
#include "tensorflow/stream_executor/stream_executor_pimpl.h"
-#include "tensorflow/stream_executor/stream.h"
namespace stream_executor {
@@ -27,9 +27,12 @@ Event::Event(StreamExecutor* stream_exec)
stream_exec_->implementation()->CreateEventImplementation()) {}
Event::~Event() {
- auto status = stream_exec_->DeallocateEvent(this);
- if (!status.ok()) {
- LOG(ERROR) << status.error_message();
+ // Deal with nullptr implementation_, as this event may have been std::moved.
+ if (stream_exec_ && implementation_) {
+ auto status = stream_exec_->DeallocateEvent(this);
+ if (!status.ok()) {
+ LOG(ERROR) << status.error_message();
+ }
}
}
diff --git a/tensorflow/stream_executor/event.h b/tensorflow/stream_executor/event.h
index 1f37262c78..9cc87a7c12 100644
--- a/tensorflow/stream_executor/event.h
+++ b/tensorflow/stream_executor/event.h
@@ -61,6 +61,9 @@ class Event {
// Returns a pointer to the underlying platform-specific implementation.
internal::EventInterface* implementation() { return implementation_.get(); }
+ Event(Event&&) = default;
+ Event& operator=(Event&&) = default;
+
private:
friend class Stream;
diff --git a/tensorflow/stream_executor/host/host_gpu_executor.cc b/tensorflow/stream_executor/host/host_gpu_executor.cc
index 2c4819651a..3cd97b3cf1 100644
--- a/tensorflow/stream_executor/host/host_gpu_executor.cc
+++ b/tensorflow/stream_executor/host/host_gpu_executor.cc
@@ -26,8 +26,6 @@ limitations under the License.
#include "tensorflow/stream_executor/lib/statusor.h"
#include "tensorflow/stream_executor/plugin_registry.h"
-bool FLAGS_stream_executor_cpu_real_clock_rate = false;
-
namespace stream_executor {
namespace host {
@@ -190,11 +188,8 @@ DeviceDescription *HostExecutor::PopulateDeviceDescription() const {
// doesn't result in thrashing or other badness? 4GiB chosen arbitrarily.
builder.set_device_memory_size(static_cast<uint64>(4) * 1024 * 1024 * 1024);
- float cycle_counter_frequency = 1e9;
- if (FLAGS_stream_executor_cpu_real_clock_rate) {
- cycle_counter_frequency = static_cast<float>(
- tensorflow::profile_utils::CpuUtils::GetCycleCounterFrequency());
- }
+ float cycle_counter_frequency = static_cast<float>(
+ tensorflow::profile_utils::CpuUtils::GetCycleCounterFrequency());
builder.set_clock_rate_ghz(cycle_counter_frequency / 1e9);
auto built = builder.Build();
diff --git a/tensorflow/compiler/xla/statusor.cc b/tensorflow/stream_executor/lib/statusor.cc
index 72ab67ff81..e0e851f96e 100644
--- a/tensorflow/compiler/xla/statusor.cc
+++ b/tensorflow/stream_executor/lib/statusor.cc
@@ -13,12 +13,13 @@ See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
-#include "tensorflow/compiler/xla/statusor.h"
+#include "tensorflow/stream_executor/lib/statusor.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/platform/logging.h"
-namespace xla {
+namespace stream_executor {
+namespace port {
namespace internal_statusor {
void Helper::HandleInvalidStatusCtorArg(Status* status) {
@@ -35,4 +36,5 @@ void Helper::Crash(const Status& status) {
}
} // namespace internal_statusor
-} // namespace xla
+} // namespace port
+} // namespace stream_executor
diff --git a/tensorflow/stream_executor/lib/statusor.h b/tensorflow/stream_executor/lib/statusor.h
index dab5909674..3c716acb46 100644
--- a/tensorflow/stream_executor/lib/statusor.h
+++ b/tensorflow/stream_executor/lib/statusor.h
@@ -1,4 +1,4 @@
-/* Copyright 2015 The TensorFlow Authors. All Rights Reserved.
+/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -13,19 +13,297 @@ See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
-// IWYU pragma: private, include "third_party/tensorflow/stream_executor/stream_executor.h"
-
+// StatusOr<T> is the union of a Status object and a T object. StatusOr models
+// the concept of an object that is either a value, or an error Status
+// explaining why such a value is not present. To this end, StatusOr<T> does not
+// allow its Status value to be Status::OK.
+//
+// The primary use-case for StatusOr<T> is as the return value of a
+// function which may fail.
+//
+// Example client usage for a StatusOr<T>, where T is not a pointer:
+//
+// StatusOr<float> result = DoBigCalculationThatCouldFail();
+// if (result.ok()) {
+// float answer = result.ValueOrDie();
+// printf("Big calculation yielded: %f", answer);
+// } else {
+// LOG(ERROR) << result.status();
+// }
+//
+// Example client usage for a StatusOr<T*>:
+//
+// StatusOr<Foo*> result = FooFactory::MakeNewFoo(arg);
+// if (result.ok()) {
+// std::unique_ptr<Foo> foo(result.ValueOrDie());
+// foo->DoSomethingCool();
+// } else {
+// LOG(ERROR) << result.status();
+// }
+//
+// Example client usage for a StatusOr<std::unique_ptr<T>>:
+//
+// StatusOr<std::unique_ptr<Foo>> result = FooFactory::MakeNewFoo(arg);
+// if (result.ok()) {
+// std::unique_ptr<Foo> foo = std::move(result.ValueOrDie());
+// foo->DoSomethingCool();
+// } else {
+// LOG(ERROR) << result.status();
+// }
+//
+// Example factory implementation returning StatusOr<T*>:
+//
+// StatusOr<Foo*> FooFactory::MakeNewFoo(int arg) {
+// if (arg <= 0) {
+// return tensorflow::InvalidArgument("Arg must be positive");
+// } else {
+// return new Foo(arg);
+// }
+// }
+//
+// Note that the assignment operators require that destroying the currently
+// stored value cannot invalidate the argument; in other words, the argument
+// cannot be an alias for the current value, or anything owned by the current
+// value.
#ifndef TENSORFLOW_STREAM_EXECUTOR_LIB_STATUSOR_H_
#define TENSORFLOW_STREAM_EXECUTOR_LIB_STATUSOR_H_
-#include "tensorflow/compiler/xla/statusor.h"
+#include "tensorflow/core/platform/macros.h"
+#include "tensorflow/stream_executor/lib/status.h"
+#include "tensorflow/stream_executor/lib/statusor_internals.h"
namespace stream_executor {
namespace port {
-// Use XLA's StatusOr so we don't duplicate code.
+#if defined(__clang__)
+// Only clang supports warn_unused_result as a type annotation.
+template <typename T>
+class TF_MUST_USE_RESULT StatusOr;
+#endif
+
+template <typename T>
+class StatusOr : private internal_statusor::StatusOrData<T>,
+ private internal_statusor::TraitsBase<
+ std::is_copy_constructible<T>::value,
+ std::is_move_constructible<T>::value> {
+ template <typename U>
+ friend class StatusOr;
+
+ typedef internal_statusor::StatusOrData<T> Base;
+
+ public:
+ typedef T element_type;
+
+ // Constructs a new StatusOr with Status::UNKNOWN status. This is marked
+ // 'explicit' to try to catch cases like 'return {};', where people think
+ // StatusOr<std::vector<int>> will be initialized with an empty vector,
+ // instead of a Status::UNKNOWN status.
+ explicit StatusOr();
+
+ // StatusOr<T> will be copy constructible/assignable if T is copy
+ // constructible.
+ StatusOr(const StatusOr&) = default;
+ StatusOr& operator=(const StatusOr&) = default;
+
+ // StatusOr<T> will be move constructible/assignable if T is move
+ // constructible.
+ StatusOr(StatusOr&&) = default;
+ StatusOr& operator=(StatusOr&&) = default;
+
+ // Conversion copy/move constructor, T must be convertible from U.
+ template <typename U, typename std::enable_if<
+ std::is_convertible<U, T>::value>::type* = nullptr>
+ StatusOr(const StatusOr<U>& other);
+ template <typename U, typename std::enable_if<
+ std::is_convertible<U, T>::value>::type* = nullptr>
+ StatusOr(StatusOr<U>&& other);
+
+ // Conversion copy/move assignment operator, T must be convertible from U.
+ template <typename U, typename std::enable_if<
+ std::is_convertible<U, T>::value>::type* = nullptr>
+ StatusOr& operator=(const StatusOr<U>& other);
+ template <typename U, typename std::enable_if<
+ std::is_convertible<U, T>::value>::type* = nullptr>
+ StatusOr& operator=(StatusOr<U>&& other);
+
+ // Constructs a new StatusOr with the given value. After calling this
+ // constructor, calls to ValueOrDie() will succeed, and calls to status() will
+ // return OK.
+ //
+ // NOTE: Not explicit - we want to use StatusOr<T> as a return type
+ // so it is convenient and sensible to be able to do 'return T()'
+ // when the return type is StatusOr<T>.
+ //
+ // REQUIRES: T is copy constructible.
+ StatusOr(const T& value);
+
+ // Constructs a new StatusOr with the given non-ok status. After calling
+ // this constructor, calls to ValueOrDie() will CHECK-fail.
+ //
+ // NOTE: Not explicit - we want to use StatusOr<T> as a return
+ // value, so it is convenient and sensible to be able to do 'return
+ // Status()' when the return type is StatusOr<T>.
+ //
+ // REQUIRES: !status.ok(). This requirement is DCHECKed.
+ // In optimized builds, passing Status::OK() here will have the effect
+ // of passing tensorflow::error::INTERNAL as a fallback.
+ StatusOr(const Status& status);
+ StatusOr& operator=(const Status& status);
+
+ // TODO(b/62186997): Add operator=(T) overloads.
+
+ // Similar to the `const T&` overload.
+ //
+ // REQUIRES: T is move constructible.
+ StatusOr(T&& value);
+
+ // RValue versions of the operations declared above.
+ StatusOr(Status&& status);
+ StatusOr& operator=(Status&& status);
+
+ // Returns this->status().ok()
+ bool ok() const { return this->status_.ok(); }
+
+ // Returns a reference to our status. If this contains a T, then
+ // returns Status::OK().
+ const Status& status() const &;
+ Status status() &&;
+
+ // Returns a reference to our current value, or CHECK-fails if !this->ok().
+ //
+ // Note: for value types that are cheap to copy, prefer simple code:
+ //
+ // T value = statusor.ValueOrDie();
+ //
+ // Otherwise, if the value type is expensive to copy, but can be left
+ // in the StatusOr, simply assign to a reference:
+ //
+ // T& value = statusor.ValueOrDie(); // or `const T&`
+ //
+ // Otherwise, if the value type supports an efficient move, it can be
+ // used as follows:
+ //
+ // T value = std::move(statusor).ValueOrDie();
+ //
+ // The std::move on statusor instead of on the whole expression enables
+ // warnings about possible uses of the statusor object after the move.
+ // C++ style guide waiver for ref-qualified overloads granted in cl/143176389
+ // See go/ref-qualifiers for more details on such overloads.
+ const T& ValueOrDie() const &;
+ T& ValueOrDie() &;
+ const T&& ValueOrDie() const &&;
+ T&& ValueOrDie() &&;
+
+ T ConsumeValueOrDie() { return std::move(ValueOrDie()); }
+
+ // Ignores any errors. This method does nothing except potentially suppress
+ // complaints from any tools that are checking that errors are not dropped on
+ // the floor.
+ void IgnoreError() const;
+};
+
+////////////////////////////////////////////////////////////////////////////////
+// Implementation details for StatusOr<T>
+
+template <typename T>
+StatusOr<T>::StatusOr() : Base(Status(tensorflow::error::UNKNOWN, "")) {}
+
+template <typename T>
+StatusOr<T>::StatusOr(const T& value) : Base(value) {}
+
+template <typename T>
+StatusOr<T>::StatusOr(const Status& status) : Base(status) {}
+
+template <typename T>
+StatusOr<T>& StatusOr<T>::operator=(const Status& status) {
+ this->Assign(status);
+ return *this;
+}
+
+template <typename T>
+StatusOr<T>::StatusOr(T&& value) : Base(std::move(value)) {}
+
+template <typename T>
+StatusOr<T>::StatusOr(Status&& status) : Base(std::move(status)) {}
+
+template <typename T>
+StatusOr<T>& StatusOr<T>::operator=(Status&& status) {
+ this->Assign(std::move(status));
+ return *this;
+}
+
+template <typename T>
+template <typename U,
+ typename std::enable_if<std::is_convertible<U, T>::value>::type*>
+inline StatusOr<T>::StatusOr(const StatusOr<U>& other)
+ : Base(static_cast<const typename StatusOr<U>::Base&>(other)) {}
+
+template <typename T>
+template <typename U,
+ typename std::enable_if<std::is_convertible<U, T>::value>::type*>
+inline StatusOr<T>& StatusOr<T>::operator=(const StatusOr<U>& other) {
+ if (other.ok())
+ this->Assign(other.ValueOrDie());
+ else
+ this->Assign(other.status());
+ return *this;
+}
+
+template <typename T>
+template <typename U,
+ typename std::enable_if<std::is_convertible<U, T>::value>::type*>
+inline StatusOr<T>::StatusOr(StatusOr<U>&& other)
+ : Base(static_cast<typename StatusOr<U>::Base&&>(other)) {}
+
+template <typename T>
+template <typename U,
+ typename std::enable_if<std::is_convertible<U, T>::value>::type*>
+inline StatusOr<T>& StatusOr<T>::operator=(StatusOr<U>&& other) {
+ if (other.ok()) {
+ this->Assign(std::move(other).ValueOrDie());
+ } else {
+ this->Assign(std::move(other).status());
+ }
+ return *this;
+}
+
+template <typename T>
+const Status& StatusOr<T>::status() const & {
+ return this->status_;
+}
+template <typename T>
+Status StatusOr<T>::status() && {
+ return ok() ? Status::OK() : std::move(this->status_);
+}
+
+template <typename T>
+const T& StatusOr<T>::ValueOrDie() const & {
+ this->EnsureOk();
+ return this->data_;
+}
+
+template <typename T>
+T& StatusOr<T>::ValueOrDie() & {
+ this->EnsureOk();
+ return this->data_;
+}
+
+template <typename T>
+const T&& StatusOr<T>::ValueOrDie() const && {
+ this->EnsureOk();
+ return std::move(this->data_);
+}
+
+template <typename T>
+T&& StatusOr<T>::ValueOrDie() && {
+ this->EnsureOk();
+ return std::move(this->data_);
+}
+
template <typename T>
-using StatusOr = ::xla::StatusOr<T>;
+void StatusOr<T>::IgnoreError() const {
+ // no-op
+}
} // namespace port
} // namespace stream_executor
diff --git a/tensorflow/compiler/xla/statusor_internals.h b/tensorflow/stream_executor/lib/statusor_internals.h
index 14636bd144..09f88f5825 100644
--- a/tensorflow/compiler/xla/statusor_internals.h
+++ b/tensorflow/stream_executor/lib/statusor_internals.h
@@ -13,13 +13,15 @@ See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
-#ifndef TENSORFLOW_COMPILER_XLA_STATUSOR_INTERNALS_H_
-#define TENSORFLOW_COMPILER_XLA_STATUSOR_INTERNALS_H_
+#ifndef TENSORFLOW_STREAM_EXECUTOR_LIB_STATUSOR_INTERNALS_H_
+#define TENSORFLOW_STREAM_EXECUTOR_LIB_STATUSOR_INTERNALS_H_
+
-#include "tensorflow/compiler/xla/status.h"
#include "tensorflow/core/platform/macros.h"
+#include "tensorflow/stream_executor/lib/status.h"
-namespace xla {
+namespace stream_executor {
+namespace port {
namespace internal_statusor {
class Helper {
@@ -240,6 +242,7 @@ struct TraitsBase<false, false> {
};
} // namespace internal_statusor
-} // namespace xla
+} // namespace port
+} // namespace stream_executor
-#endif // TENSORFLOW_COMPILER_XLA_STATUSOR_INTERNALS_H_
+#endif // TENSORFLOW_STREAM_EXECUTOR_LIB_STATUSOR_INTERNALS_H_
diff --git a/tensorflow/compiler/xla/statusor_test.cc b/tensorflow/stream_executor/lib/statusor_test.cc
index 377a618ffb..56584e1892 100644
--- a/tensorflow/compiler/xla/statusor_test.cc
+++ b/tensorflow/stream_executor/lib/statusor_test.cc
@@ -15,18 +15,18 @@ limitations under the License.
// Unit tests for StatusOr
-#include "tensorflow/compiler/xla/statusor.h"
+#include "tensorflow/stream_executor/lib/statusor.h"
#include <memory>
#include <type_traits>
-#include "tensorflow/compiler/xla/test.h"
-#include "tensorflow/compiler/xla/types.h"
+#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/platform/test_benchmark.h"
-namespace xla {
+namespace stream_executor {
+namespace port {
namespace {
class Base1 {
@@ -672,4 +672,5 @@ void BM_StatusOrFactoryFailLongMsg(int iters) {
BENCHMARK(BM_StatusOrFactoryFailLongMsg);
} // namespace
-} // namespace xla
+} // namespace port
+} // namespace stream_executor
diff --git a/tensorflow/stream_executor/stream.cc b/tensorflow/stream_executor/stream.cc
index 0cd0790a72..ca1b8e28e6 100644
--- a/tensorflow/stream_executor/stream.cc
+++ b/tensorflow/stream_executor/stream.cc
@@ -1377,15 +1377,16 @@ Stream &Stream::ThenPoolForward(
const dnn::BatchDescriptor &input_dimensions,
const DeviceMemory<double> &input_data,
const dnn::BatchDescriptor &output_dimensions,
- DeviceMemory<double> *output_data) {
+ DeviceMemory<double> *output_data, ScratchAllocator *workspace_allocator) {
VLOG_CALL(PARAM(pooling_dimensions), PARAM(input_dimensions),
- PARAM(input_data), PARAM(output_dimensions), PARAM(output_data));
+ PARAM(input_data), PARAM(output_dimensions), PARAM(output_data),
+ PARAM(workspace_allocator));
if (ok()) {
if (dnn::DnnSupport *dnn = parent_->AsDnn()) {
CheckError(dnn->DoPoolForward(this, pooling_dimensions, input_dimensions,
- input_data, output_dimensions,
- output_data));
+ input_data, output_dimensions, output_data,
+ workspace_allocator));
} else {
SetError();
LOG(WARNING)
@@ -1401,15 +1402,16 @@ Stream &Stream::ThenPoolForward(
const dnn::BatchDescriptor &input_dimensions,
const DeviceMemory<float> &input_data,
const dnn::BatchDescriptor &output_dimensions,
- DeviceMemory<float> *output_data) {
+ DeviceMemory<float> *output_data, ScratchAllocator *workspace_allocator) {
VLOG_CALL(PARAM(pooling_dimensions), PARAM(input_dimensions),
- PARAM(input_data), PARAM(output_dimensions), PARAM(output_data));
+ PARAM(input_data), PARAM(output_dimensions), PARAM(output_data),
+ PARAM(workspace_allocator));
if (ok()) {
if (dnn::DnnSupport *dnn = parent_->AsDnn()) {
CheckError(dnn->DoPoolForward(this, pooling_dimensions, input_dimensions,
- input_data, output_dimensions,
- output_data));
+ input_data, output_dimensions, output_data,
+ workspace_allocator));
} else {
SetErrorAndLogNoDnnSupport();
}
@@ -1422,15 +1424,17 @@ Stream &Stream::ThenPoolForward(
const dnn::BatchDescriptor &input_dimensions,
const DeviceMemory<Eigen::half> &input_data,
const dnn::BatchDescriptor &output_dimensions,
- DeviceMemory<Eigen::half> *output_data) {
+ DeviceMemory<Eigen::half> *output_data,
+ ScratchAllocator *workspace_allocator) {
VLOG_CALL(PARAM(pooling_dimensions), PARAM(input_dimensions),
- PARAM(input_data), PARAM(output_dimensions), PARAM(output_data));
+ PARAM(input_data), PARAM(output_dimensions), PARAM(output_data),
+ PARAM(workspace_allocator));
if (ok()) {
if (dnn::DnnSupport *dnn = parent_->AsDnn()) {
CheckError(dnn->DoPoolForward(this, pooling_dimensions, input_dimensions,
- input_data, output_dimensions,
- output_data));
+ input_data, output_dimensions, output_data,
+ workspace_allocator));
} else {
SetErrorAndLogNoDnnSupport();
}
@@ -1445,16 +1449,19 @@ Stream &Stream::ThenPoolBackward(
const dnn::BatchDescriptor &output_dimensions,
const DeviceMemory<double> &output_data,
const DeviceMemory<double> &input_diff_data,
- DeviceMemory<double> *output_diff_data) {
+ DeviceMemory<double> *output_diff_data,
+ ScratchAllocator *workspace_allocator) {
VLOG_CALL(PARAM(pooling_dimensions), PARAM(input_dimensions),
PARAM(input_data), PARAM(output_dimensions), PARAM(output_data),
- PARAM(input_diff_data), PARAM(output_diff_data));
+ PARAM(input_diff_data), PARAM(output_diff_data),
+ PARAM(workspace_allocator));
if (ok()) {
if (dnn::DnnSupport *dnn = parent_->AsDnn()) {
CheckError(dnn->DoPoolBackward(this, pooling_dimensions, input_dimensions,
input_data, output_dimensions, output_data,
- input_diff_data, output_diff_data));
+ input_diff_data, output_diff_data,
+ workspace_allocator));
} else {
SetError();
LOG(WARNING)
@@ -1472,16 +1479,19 @@ Stream &Stream::ThenPoolBackward(
const dnn::BatchDescriptor &output_dimensions,
const DeviceMemory<float> &output_data,
const DeviceMemory<float> &input_diff_data,
- DeviceMemory<float> *output_diff_data) {
+ DeviceMemory<float> *output_diff_data,
+ ScratchAllocator *workspace_allocator) {
VLOG_CALL(PARAM(pooling_dimensions), PARAM(input_dimensions),
PARAM(input_data), PARAM(output_dimensions), PARAM(output_data),
- PARAM(input_diff_data), PARAM(output_diff_data));
+ PARAM(input_diff_data), PARAM(output_diff_data),
+ PARAM(workspace_allocator));
if (ok()) {
if (dnn::DnnSupport *dnn = parent_->AsDnn()) {
CheckError(dnn->DoPoolBackward(this, pooling_dimensions, input_dimensions,
input_data, output_dimensions, output_data,
- input_diff_data, output_diff_data));
+ input_diff_data, output_diff_data,
+ workspace_allocator));
} else {
SetErrorAndLogNoDnnSupport();
}
@@ -1496,16 +1506,19 @@ Stream &Stream::ThenPoolBackward(
const dnn::BatchDescriptor &output_dimensions,
const DeviceMemory<Eigen::half> &output_data,
const DeviceMemory<Eigen::half> &input_diff_data,
- DeviceMemory<Eigen::half> *output_diff_data) {
+ DeviceMemory<Eigen::half> *output_diff_data,
+ ScratchAllocator *workspace_allocator) {
VLOG_CALL(PARAM(pooling_dimensions), PARAM(input_dimensions),
PARAM(input_data), PARAM(output_dimensions), PARAM(output_data),
- PARAM(input_diff_data), PARAM(output_diff_data));
+ PARAM(input_diff_data), PARAM(output_diff_data),
+ PARAM(workspace_allocator));
if (ok()) {
if (dnn::DnnSupport *dnn = parent_->AsDnn()) {
CheckError(dnn->DoPoolBackward(this, pooling_dimensions, input_dimensions,
input_data, output_dimensions, output_data,
- input_diff_data, output_diff_data));
+ input_diff_data, output_diff_data,
+ workspace_allocator));
} else {
SetErrorAndLogNoDnnSupport();
}
@@ -1552,16 +1565,18 @@ Stream &Stream::ThenNormalizeBackwardWithDimensions(
const dnn::BatchDescriptor &dimensions, const DeviceMemory<float> &raw_data,
const DeviceMemory<float> &normalized_data,
const DeviceMemory<float> &normalized_variable_gradient,
- DeviceMemory<float> *raw_variable_gradient) {
+ DeviceMemory<float> *raw_variable_gradient,
+ ScratchAllocator *workspace_allocator) {
VLOG_CALL(PARAM(normalize_descriptor), PARAM(dimensions), PARAM(raw_data),
PARAM(normalized_data), PARAM(normalized_variable_gradient),
- PARAM(raw_variable_gradient));
+ PARAM(raw_variable_gradient), PARAM(workspace_allocator));
if (ok()) {
if (dnn::DnnSupport *dnn = parent_->AsDnn()) {
CheckError(dnn->DoNormalizeBackwardWithDimensions(
this, normalize_descriptor, dimensions, raw_data, normalized_data,
- normalized_variable_gradient, raw_variable_gradient));
+ normalized_variable_gradient, raw_variable_gradient,
+ workspace_allocator));
} else {
SetErrorAndLogNoDnnSupport();
}
@@ -5228,24 +5243,11 @@ port::Status Stream::BlockHostUntilDone() {
return status;
}
- port::Status first_error;
- {
- // Wait until all active sub-streams have done their tasks.
- mutex_lock lock(mu_);
- for (auto &stream : sub_streams_) {
- if (!stream.second) {
- first_error.Update(stream.first->BlockHostUntilDone());
- // Set this sub-stream as available.
- stream.second = true;
- }
- }
- }
-
temporary_memory_manager_.DeallocateFinalizedTemporaries();
- first_error.Update(parent_->BlockHostUntilDone(this));
- CheckError(first_error.ok());
- return first_error;
+ port::Status error = parent_->BlockHostUntilDone(this);
+ CheckError(error.ok());
+ return error;
}
} // namespace stream_executor
diff --git a/tensorflow/stream_executor/stream.h b/tensorflow/stream_executor/stream.h
index a32f4105ad..63d64947c8 100644
--- a/tensorflow/stream_executor/stream.h
+++ b/tensorflow/stream_executor/stream.h
@@ -25,6 +25,7 @@ limitations under the License.
#include <functional>
#include <memory>
+#include "tensorflow/core/platform/macros.h"
#include "tensorflow/stream_executor/blas.h"
#include "tensorflow/stream_executor/device_memory.h"
#include "tensorflow/stream_executor/dnn.h"
@@ -628,19 +629,22 @@ class Stream {
const dnn::BatchDescriptor &input_dimensions,
const DeviceMemory<double> &input_data,
const dnn::BatchDescriptor &output_dimensions,
- DeviceMemory<double> *output_data);
+ DeviceMemory<double> *output_data,
+ ScratchAllocator *workspace_allocator = nullptr);
Stream &ThenPoolForward(const dnn::PoolingDescriptor &pooling_dimensions,
const dnn::BatchDescriptor &input_dimensions,
const DeviceMemory<float> &input_data,
const dnn::BatchDescriptor &output_dimensions,
- DeviceMemory<float> *output_data);
+ DeviceMemory<float> *output_data,
+ ScratchAllocator *workspace_allocator = nullptr);
Stream &ThenPoolForward(const dnn::PoolingDescriptor &pooling_dimensions,
const dnn::BatchDescriptor &input_dimensions,
const DeviceMemory<Eigen::half> &input_data,
const dnn::BatchDescriptor &output_dimensions,
- DeviceMemory<Eigen::half> *output_data);
+ DeviceMemory<Eigen::half> *output_data,
+ ScratchAllocator *workspace_allocator = nullptr);
Stream &ThenPoolBackward(const dnn::PoolingDescriptor &pooling_dimensions,
const dnn::BatchDescriptor &input_dimensions,
@@ -648,7 +652,8 @@ class Stream {
const dnn::BatchDescriptor &output_dimensions,
const DeviceMemory<double> &output_data,
const DeviceMemory<double> &input_diff_data,
- DeviceMemory<double> *output_diff_data);
+ DeviceMemory<double> *output_diff_data,
+ ScratchAllocator *workspace_allocator = nullptr);
Stream &ThenPoolBackward(const dnn::PoolingDescriptor &pooling_dimensions,
const dnn::BatchDescriptor &input_dimensions,
@@ -656,7 +661,8 @@ class Stream {
const dnn::BatchDescriptor &output_dimensions,
const DeviceMemory<float> &output_data,
const DeviceMemory<float> &input_diff_data,
- DeviceMemory<float> *output_diff_data);
+ DeviceMemory<float> *output_diff_data,
+ ScratchAllocator *workspace_allocator = nullptr);
Stream &ThenPoolBackward(const dnn::PoolingDescriptor &pooling_dimensions,
const dnn::BatchDescriptor &input_dimensions,
@@ -664,7 +670,8 @@ class Stream {
const dnn::BatchDescriptor &output_dimensions,
const DeviceMemory<Eigen::half> &output_data,
const DeviceMemory<Eigen::half> &input_diff_data,
- DeviceMemory<Eigen::half> *output_diff_data);
+ DeviceMemory<Eigen::half> *output_diff_data,
+ ScratchAllocator *workspace_allocator = nullptr);
Stream &ThenNormalize(const dnn::NormalizeDescriptor &normalize_descriptor,
const DeviceMemory<float> &input_data,
@@ -683,7 +690,8 @@ class Stream {
const DeviceMemory<float> &raw_data,
const DeviceMemory<float> &normalized_data,
const DeviceMemory<float> &normalized_variable_gradient,
- DeviceMemory<float> *raw_variable_gradient);
+ DeviceMemory<float> *raw_variable_gradient,
+ ScratchAllocator *workspace_allocator = nullptr);
Stream &ThenActivate(dnn::ActivationMode activation_mode,
const dnn::BatchDescriptor &dimensions,
@@ -1349,33 +1357,39 @@ class Stream {
DeviceMemory<std::complex<double>> *x, int incx);
// See BlasSupport::DoBlasGemm.
- Stream &ThenBlasGemm(blas::Transpose transa, blas::Transpose transb, uint64 m,
- uint64 n, uint64 k, float alpha,
- const DeviceMemory<Eigen::half> &a, int lda,
- const DeviceMemory<Eigen::half> &b, int ldb, float beta,
- DeviceMemory<Eigen::half> *c, int ldc);
- Stream &ThenBlasGemm(blas::Transpose transa, blas::Transpose transb, uint64 m,
- uint64 n, uint64 k, float alpha,
- const DeviceMemory<float> &a, int lda,
- const DeviceMemory<float> &b, int ldb, float beta,
- DeviceMemory<float> *c, int ldc);
- Stream &ThenBlasGemm(blas::Transpose transa, blas::Transpose transb, uint64 m,
- uint64 n, uint64 k, double alpha,
- const DeviceMemory<double> &a, int lda,
- const DeviceMemory<double> &b, int ldb, double beta,
- DeviceMemory<double> *c, int ldc);
- Stream &ThenBlasGemm(blas::Transpose transa, blas::Transpose transb, uint64 m,
- uint64 n, uint64 k, std::complex<float> alpha,
- const DeviceMemory<std::complex<float>> &a, int lda,
- const DeviceMemory<std::complex<float>> &b, int ldb,
- std::complex<float> beta,
- DeviceMemory<std::complex<float>> *c, int ldc);
- Stream &ThenBlasGemm(blas::Transpose transa, blas::Transpose transb, uint64 m,
- uint64 n, uint64 k, std::complex<double> alpha,
- const DeviceMemory<std::complex<double>> &a, int lda,
- const DeviceMemory<std::complex<double>> &b, int ldb,
- std::complex<double> beta,
- DeviceMemory<std::complex<double>> *c, int ldc);
+ TF_EXPORT Stream &ThenBlasGemm(blas::Transpose transa, blas::Transpose transb,
+ uint64 m, uint64 n, uint64 k, float alpha,
+ const DeviceMemory<Eigen::half> &a, int lda,
+ const DeviceMemory<Eigen::half> &b, int ldb,
+ float beta, DeviceMemory<Eigen::half> *c,
+ int ldc);
+ TF_EXPORT Stream &ThenBlasGemm(blas::Transpose transa, blas::Transpose transb,
+ uint64 m, uint64 n, uint64 k, float alpha,
+ const DeviceMemory<float> &a, int lda,
+ const DeviceMemory<float> &b, int ldb,
+ float beta, DeviceMemory<float> *c, int ldc);
+ TF_EXPORT Stream &ThenBlasGemm(blas::Transpose transa, blas::Transpose transb,
+ uint64 m, uint64 n, uint64 k, double alpha,
+ const DeviceMemory<double> &a, int lda,
+ const DeviceMemory<double> &b, int ldb,
+ double beta, DeviceMemory<double> *c, int ldc);
+ TF_EXPORT Stream &ThenBlasGemm(blas::Transpose transa, blas::Transpose transb,
+ uint64 m, uint64 n, uint64 k,
+ std::complex<float> alpha,
+ const DeviceMemory<std::complex<float>> &a,
+ int lda,
+ const DeviceMemory<std::complex<float>> &b,
+ int ldb, std::complex<float> beta,
+ DeviceMemory<std::complex<float>> *c, int ldc);
+ TF_EXPORT Stream &ThenBlasGemm(blas::Transpose transa, blas::Transpose transb,
+ uint64 m, uint64 n, uint64 k,
+ std::complex<double> alpha,
+ const DeviceMemory<std::complex<double>> &a,
+ int lda,
+ const DeviceMemory<std::complex<double>> &b,
+ int ldb, std::complex<double> beta,
+ DeviceMemory<std::complex<double>> *c,
+ int ldc);
Stream &ThenBlasGemmWithProfiling(blas::Transpose transa,
blas::Transpose transb, uint64 m, uint64 n,
diff --git a/tensorflow/tensorflow.bzl b/tensorflow/tensorflow.bzl
index 6bb393a3f4..955b53f691 100644
--- a/tensorflow/tensorflow.bzl
+++ b/tensorflow/tensorflow.bzl
@@ -24,7 +24,10 @@ load(
"if_mkl",
"if_mkl_lnx_x64"
)
-
+load(
+ "//third_party/mkl_dnn:build_defs.bzl",
+ "if_mkl_open_source_only",
+)
def register_extension_info(**kwargs):
pass
@@ -148,6 +151,12 @@ def if_windows(a):
"//conditions:default": [],
})
+def if_not_windows_cuda(a):
+ return select({
+ clean_dep("//tensorflow:with_cuda_support_windows_override"): [],
+ "//conditions:default": a,
+ })
+
def if_linux_x86_64(a):
return select({
clean_dep("//tensorflow:linux_x86_64"): a,
@@ -174,9 +183,13 @@ def get_win_copts(is_external=False):
"/DEIGEN_AVOID_STL_ARRAY",
"/Iexternal/gemmlowp",
"/wd4018", # -Wno-sign-compare
- "/U_HAS_EXCEPTIONS",
- "/D_HAS_EXCEPTIONS=1",
- "/EHsc", # -fno-exceptions
+ # Bazel's CROSSTOOL currently pass /EHsc to enable exception by
+ # default. We can't pass /EHs-c- to disable exception, otherwise
+ # we will get a waterfall of flag conflict warnings. Wait for
+ # Bazel to fix this.
+ # "/D_HAS_EXCEPTIONS=0",
+ # "/EHs-c-",
+ "/wd4577",
"/DNOGDI",
]
if is_external:
@@ -208,6 +221,7 @@ def tf_copts(android_optimization_level_override="-O2", is_external=False):
+ if_cuda(["-DGOOGLE_CUDA=1"])
+ if_tensorrt(["-DGOOGLE_TENSORRT=1"])
+ if_mkl(["-DINTEL_MKL=1", "-DEIGEN_USE_VML"])
+ + if_mkl_open_source_only(["-DDO_NOT_USE_ML"])
+ if_mkl_lnx_x64(["-fopenmp"])
+ if_android_arm(["-mfpu=neon"])
+ if_linux_x86_64(["-msse3"])
@@ -819,6 +833,9 @@ def tf_cc_test_mkl(srcs,
tags=[],
size="medium",
args=None):
+ # -fno-exceptions in nocopts breaks compilation if header modules are enabled.
+ disable_header_modules = ["-use_header_modules"]
+
for src in srcs:
native.cc_test(
name=src_to_test_name(src),
@@ -844,6 +861,7 @@ def tf_cc_test_mkl(srcs,
tags=tags,
size=size,
args=args,
+ features=disable_header_modules,
nocopts="-fno-exceptions")
@@ -978,16 +996,17 @@ register_extension_info(
label_regex_for_dep = "{extension_name}",
)
-def tf_kernel_library(name,
- prefix=None,
- srcs=None,
- gpu_srcs=None,
- hdrs=None,
- deps=None,
- alwayslink=1,
- copts=None,
- is_external=False,
- **kwargs):
+def tf_kernel_library(
+ name,
+ prefix = None,
+ srcs = None,
+ gpu_srcs = None,
+ hdrs = None,
+ deps = None,
+ alwayslink = 1,
+ copts = None,
+ is_external = False,
+ **kwargs):
"""A rule to build a TensorFlow OpKernel.
May either specify srcs/hdrs or prefix. Similar to tf_cuda_library,
@@ -1017,6 +1036,7 @@ def tf_kernel_library(name,
deps = []
if not copts:
copts = []
+ textual_hdrs = []
copts = copts + tf_copts(is_external=is_external)
if prefix:
if native.glob([prefix + "*.cu.cc"], exclude=["*test*"]):
@@ -1027,8 +1047,13 @@ def tf_kernel_library(name,
srcs = srcs + native.glob(
[prefix + "*.cc"], exclude=[prefix + "*test*", prefix + "*.cu.cc"])
hdrs = hdrs + native.glob(
- [prefix + "*.h"], exclude=[prefix + "*test*", prefix + "*.cu.h"])
-
+ [prefix + "*.h"],
+ exclude = [prefix + "*test*", prefix + "*.cu.h", prefix + "*impl.h"],
+ )
+ textual_hdrs = native.glob(
+ [prefix + "*impl.h"],
+ exclude = [prefix + "*test*", prefix + "*.cu.h"],
+ )
cuda_deps = [clean_dep("//tensorflow/core:gpu_lib")]
if gpu_srcs:
for gpu_src in gpu_srcs:
@@ -1042,6 +1067,7 @@ def tf_kernel_library(name,
name=name,
srcs=srcs,
hdrs=hdrs,
+ textual_hdrs = textual_hdrs,
copts=copts,
cuda_deps=cuda_deps,
linkstatic=1, # Needed since alwayslink is broken in bazel b/27630669
@@ -1075,6 +1101,9 @@ def tf_mkl_kernel_library(name,
hdrs = hdrs + native.glob(
[prefix + "*.h"])
+ # -fno-exceptions in nocopts breaks compilation if header modules are enabled.
+ disable_header_modules = ["-use_header_modules"]
+
native.cc_library(
name=name,
srcs=if_mkl(srcs),
@@ -1082,7 +1111,8 @@ def tf_mkl_kernel_library(name,
deps=deps,
alwayslink=alwayslink,
copts=copts,
- nocopts=nocopts
+ nocopts=nocopts,
+ features = disable_header_modules
)
register_extension_info(
diff --git a/tensorflow/tools/api/golden/tensorflow.-config-proto.-experimental.pbtxt b/tensorflow/tools/api/golden/tensorflow.-config-proto.-experimental.pbtxt
index 9e09a8d48e..ef9fe096a1 100644
--- a/tensorflow/tools/api/golden/tensorflow.-config-proto.-experimental.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.-config-proto.-experimental.pbtxt
@@ -8,5 +8,11 @@ tf_proto {
label: LABEL_OPTIONAL
type: TYPE_STRING
}
+ field {
+ name: "client_handles_error_formatting"
+ number: 2
+ label: LABEL_OPTIONAL
+ type: TYPE_BOOL
+ }
}
}
diff --git a/tensorflow/tools/api/golden/tensorflow.-config-proto.pbtxt b/tensorflow/tools/api/golden/tensorflow.-config-proto.pbtxt
index 4af4ed70ef..eeef15515d 100644
--- a/tensorflow/tools/api/golden/tensorflow.-config-proto.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.-config-proto.pbtxt
@@ -131,6 +131,12 @@ tf_proto {
label: LABEL_OPTIONAL
type: TYPE_STRING
}
+ field {
+ name: "client_handles_error_formatting"
+ number: 2
+ label: LABEL_OPTIONAL
+ type: TYPE_BOOL
+ }
}
}
}
diff --git a/tensorflow/tools/api/golden/tensorflow.-g-p-u-options.pbtxt b/tensorflow/tools/api/golden/tensorflow.-g-p-u-options.pbtxt
index f819b174c0..353e63127d 100644
--- a/tensorflow/tools/api/golden/tensorflow.-g-p-u-options.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.-g-p-u-options.pbtxt
@@ -72,6 +72,12 @@ tf_proto {
label: LABEL_OPTIONAL
type: TYPE_BOOL
}
+ field {
+ name: "num_dev_to_dev_copy_streams"
+ number: 3
+ label: LABEL_OPTIONAL
+ type: TYPE_INT32
+ }
nested_type {
name: "VirtualDevices"
field {
diff --git a/tensorflow/tools/api/golden/tensorflow.-variable-aggregation.pbtxt b/tensorflow/tools/api/golden/tensorflow.-variable-aggregation.pbtxt
new file mode 100644
index 0000000000..36b534af36
--- /dev/null
+++ b/tensorflow/tools/api/golden/tensorflow.-variable-aggregation.pbtxt
@@ -0,0 +1,16 @@
+path: "tensorflow.VariableAggregation"
+tf_class {
+ is_instance: "<enum \'VariableAggregation\'>"
+ member {
+ name: "MEAN"
+ mtype: "<enum \'VariableAggregation\'>"
+ }
+ member {
+ name: "NONE"
+ mtype: "<enum \'VariableAggregation\'>"
+ }
+ member {
+ name: "SUM"
+ mtype: "<enum \'VariableAggregation\'>"
+ }
+}
diff --git a/tensorflow/tools/api/golden/tensorflow.-variable-scope.pbtxt b/tensorflow/tools/api/golden/tensorflow.-variable-scope.pbtxt
index 8e539069da..c13eb7b8bb 100644
--- a/tensorflow/tools/api/golden/tensorflow.-variable-scope.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.-variable-scope.pbtxt
@@ -56,7 +56,7 @@ tf_class {
}
member_method {
name: "get_variable"
- argspec: "args=[\'self\', \'var_store\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'reuse\', \'trainable\', \'collections\', \'caching_device\', \'partitioner\', \'validate_shape\', \'use_resource\', \'custom_getter\', \'constraint\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'var_store\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'reuse\', \'trainable\', \'collections\', \'caching_device\', \'partitioner\', \'validate_shape\', \'use_resource\', \'custom_getter\', \'constraint\', \'synchronization\', \'aggregation\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
}
member_method {
name: "global_variables"
diff --git a/tensorflow/tools/api/golden/tensorflow.-variable-synchronization.pbtxt b/tensorflow/tools/api/golden/tensorflow.-variable-synchronization.pbtxt
new file mode 100644
index 0000000000..7589bb2888
--- /dev/null
+++ b/tensorflow/tools/api/golden/tensorflow.-variable-synchronization.pbtxt
@@ -0,0 +1,20 @@
+path: "tensorflow.VariableSynchronization"
+tf_class {
+ is_instance: "<enum \'VariableSynchronization\'>"
+ member {
+ name: "AUTO"
+ mtype: "<enum \'VariableSynchronization\'>"
+ }
+ member {
+ name: "NONE"
+ mtype: "<enum \'VariableSynchronization\'>"
+ }
+ member {
+ name: "ON_READ"
+ mtype: "<enum \'VariableSynchronization\'>"
+ }
+ member {
+ name: "ON_WRITE"
+ mtype: "<enum \'VariableSynchronization\'>"
+ }
+}
diff --git a/tensorflow/tools/api/golden/tensorflow.estimator.-boosted-trees-classifier.pbtxt b/tensorflow/tools/api/golden/tensorflow.estimator.-boosted-trees-classifier.pbtxt
index 099838fa65..9dbb5d16a4 100644
--- a/tensorflow/tools/api/golden/tensorflow.estimator.-boosted-trees-classifier.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.estimator.-boosted-trees-classifier.pbtxt
@@ -21,7 +21,7 @@ tf_class {
}
member_method {
name: "__init__"
- argspec: "args=[\'self\', \'feature_columns\', \'n_batches_per_layer\', \'model_dir\', \'n_classes\', \'weight_column\', \'label_vocabulary\', \'n_trees\', \'max_depth\', \'learning_rate\', \'l1_regularization\', \'l2_regularization\', \'tree_complexity\', \'min_node_weight\', \'config\'], varargs=None, keywords=None, defaults=[\'None\', \'<object object instance>\', \'None\', \'None\', \'100\', \'6\', \'0.1\', \'0.0\', \'0.0\', \'0.0\', \'0.0\', \'None\'], "
+ argspec: "args=[\'self\', \'feature_columns\', \'n_batches_per_layer\', \'model_dir\', \'n_classes\', \'weight_column\', \'label_vocabulary\', \'n_trees\', \'max_depth\', \'learning_rate\', \'l1_regularization\', \'l2_regularization\', \'tree_complexity\', \'min_node_weight\', \'config\', \'center_bias\'], varargs=None, keywords=None, defaults=[\'None\', \'<object object instance>\', \'None\', \'None\', \'100\', \'6\', \'0.1\', \'0.0\', \'0.0\', \'0.0\', \'0.0\', \'None\', \'False\'], "
}
member_method {
name: "eval_dir"
diff --git a/tensorflow/tools/api/golden/tensorflow.estimator.-boosted-trees-regressor.pbtxt b/tensorflow/tools/api/golden/tensorflow.estimator.-boosted-trees-regressor.pbtxt
index 87bd19a23a..34a30c2874 100644
--- a/tensorflow/tools/api/golden/tensorflow.estimator.-boosted-trees-regressor.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.estimator.-boosted-trees-regressor.pbtxt
@@ -21,7 +21,7 @@ tf_class {
}
member_method {
name: "__init__"
- argspec: "args=[\'self\', \'feature_columns\', \'n_batches_per_layer\', \'model_dir\', \'label_dimension\', \'weight_column\', \'n_trees\', \'max_depth\', \'learning_rate\', \'l1_regularization\', \'l2_regularization\', \'tree_complexity\', \'min_node_weight\', \'config\'], varargs=None, keywords=None, defaults=[\'None\', \'<object object instance>\', \'None\', \'100\', \'6\', \'0.1\', \'0.0\', \'0.0\', \'0.0\', \'0.0\', \'None\'], "
+ argspec: "args=[\'self\', \'feature_columns\', \'n_batches_per_layer\', \'model_dir\', \'label_dimension\', \'weight_column\', \'n_trees\', \'max_depth\', \'learning_rate\', \'l1_regularization\', \'l2_regularization\', \'tree_complexity\', \'min_node_weight\', \'config\', \'center_bias\'], varargs=None, keywords=None, defaults=[\'None\', \'<object object instance>\', \'None\', \'100\', \'6\', \'0.1\', \'0.0\', \'0.0\', \'0.0\', \'0.0\', \'None\', \'False\'], "
}
member_method {
name: "eval_dir"
diff --git a/tensorflow/tools/api/golden/tensorflow.estimator.-d-n-n-classifier.pbtxt b/tensorflow/tools/api/golden/tensorflow.estimator.-d-n-n-classifier.pbtxt
index 111914f643..0c6b7e4a82 100644
--- a/tensorflow/tools/api/golden/tensorflow.estimator.-d-n-n-classifier.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.estimator.-d-n-n-classifier.pbtxt
@@ -21,7 +21,7 @@ tf_class {
}
member_method {
name: "__init__"
- argspec: "args=[\'self\', \'hidden_units\', \'feature_columns\', \'model_dir\', \'n_classes\', \'weight_column\', \'label_vocabulary\', \'optimizer\', \'activation_fn\', \'dropout\', \'input_layer_partitioner\', \'config\', \'warm_start_from\', \'loss_reduction\'], varargs=None, keywords=None, defaults=[\'None\', \'2\', \'None\', \'None\', \'Adagrad\', \'<function relu instance>\', \'None\', \'None\', \'None\', \'None\', \'weighted_sum\'], "
+ argspec: "args=[\'self\', \'hidden_units\', \'feature_columns\', \'model_dir\', \'n_classes\', \'weight_column\', \'label_vocabulary\', \'optimizer\', \'activation_fn\', \'dropout\', \'input_layer_partitioner\', \'config\', \'warm_start_from\', \'loss_reduction\', \'batch_norm\'], varargs=None, keywords=None, defaults=[\'None\', \'2\', \'None\', \'None\', \'Adagrad\', \'<function relu instance>\', \'None\', \'None\', \'None\', \'None\', \'weighted_sum\', \'False\'], "
}
member_method {
name: "eval_dir"
diff --git a/tensorflow/tools/api/golden/tensorflow.estimator.-d-n-n-linear-combined-classifier.pbtxt b/tensorflow/tools/api/golden/tensorflow.estimator.-d-n-n-linear-combined-classifier.pbtxt
index 67e4ee02d0..9c1c072124 100644
--- a/tensorflow/tools/api/golden/tensorflow.estimator.-d-n-n-linear-combined-classifier.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.estimator.-d-n-n-linear-combined-classifier.pbtxt
@@ -21,7 +21,7 @@ tf_class {
}
member_method {
name: "__init__"
- argspec: "args=[\'self\', \'model_dir\', \'linear_feature_columns\', \'linear_optimizer\', \'dnn_feature_columns\', \'dnn_optimizer\', \'dnn_hidden_units\', \'dnn_activation_fn\', \'dnn_dropout\', \'n_classes\', \'weight_column\', \'label_vocabulary\', \'input_layer_partitioner\', \'config\', \'warm_start_from\', \'loss_reduction\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'Ftrl\', \'None\', \'Adagrad\', \'None\', \'<function relu instance>\', \'None\', \'2\', \'None\', \'None\', \'None\', \'None\', \'None\', \'weighted_sum\'], "
+ argspec: "args=[\'self\', \'model_dir\', \'linear_feature_columns\', \'linear_optimizer\', \'dnn_feature_columns\', \'dnn_optimizer\', \'dnn_hidden_units\', \'dnn_activation_fn\', \'dnn_dropout\', \'n_classes\', \'weight_column\', \'label_vocabulary\', \'input_layer_partitioner\', \'config\', \'warm_start_from\', \'loss_reduction\', \'batch_norm\', \'linear_sparse_combiner\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'Ftrl\', \'None\', \'Adagrad\', \'None\', \'<function relu instance>\', \'None\', \'2\', \'None\', \'None\', \'None\', \'None\', \'None\', \'weighted_sum\', \'False\', \'sum\'], "
}
member_method {
name: "eval_dir"
diff --git a/tensorflow/tools/api/golden/tensorflow.estimator.-d-n-n-linear-combined-regressor.pbtxt b/tensorflow/tools/api/golden/tensorflow.estimator.-d-n-n-linear-combined-regressor.pbtxt
index e1289b975e..7391d4b07a 100644
--- a/tensorflow/tools/api/golden/tensorflow.estimator.-d-n-n-linear-combined-regressor.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.estimator.-d-n-n-linear-combined-regressor.pbtxt
@@ -21,7 +21,7 @@ tf_class {
}
member_method {
name: "__init__"
- argspec: "args=[\'self\', \'model_dir\', \'linear_feature_columns\', \'linear_optimizer\', \'dnn_feature_columns\', \'dnn_optimizer\', \'dnn_hidden_units\', \'dnn_activation_fn\', \'dnn_dropout\', \'label_dimension\', \'weight_column\', \'input_layer_partitioner\', \'config\', \'warm_start_from\', \'loss_reduction\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'Ftrl\', \'None\', \'Adagrad\', \'None\', \'<function relu instance>\', \'None\', \'1\', \'None\', \'None\', \'None\', \'None\', \'weighted_sum\'], "
+ argspec: "args=[\'self\', \'model_dir\', \'linear_feature_columns\', \'linear_optimizer\', \'dnn_feature_columns\', \'dnn_optimizer\', \'dnn_hidden_units\', \'dnn_activation_fn\', \'dnn_dropout\', \'label_dimension\', \'weight_column\', \'input_layer_partitioner\', \'config\', \'warm_start_from\', \'loss_reduction\', \'batch_norm\', \'linear_sparse_combiner\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'Ftrl\', \'None\', \'Adagrad\', \'None\', \'<function relu instance>\', \'None\', \'1\', \'None\', \'None\', \'None\', \'None\', \'weighted_sum\', \'False\', \'sum\'], "
}
member_method {
name: "eval_dir"
diff --git a/tensorflow/tools/api/golden/tensorflow.estimator.-d-n-n-regressor.pbtxt b/tensorflow/tools/api/golden/tensorflow.estimator.-d-n-n-regressor.pbtxt
index d030b2f51f..f50e375f7c 100644
--- a/tensorflow/tools/api/golden/tensorflow.estimator.-d-n-n-regressor.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.estimator.-d-n-n-regressor.pbtxt
@@ -21,7 +21,7 @@ tf_class {
}
member_method {
name: "__init__"
- argspec: "args=[\'self\', \'hidden_units\', \'feature_columns\', \'model_dir\', \'label_dimension\', \'weight_column\', \'optimizer\', \'activation_fn\', \'dropout\', \'input_layer_partitioner\', \'config\', \'warm_start_from\', \'loss_reduction\'], varargs=None, keywords=None, defaults=[\'None\', \'1\', \'None\', \'Adagrad\', \'<function relu instance>\', \'None\', \'None\', \'None\', \'None\', \'weighted_sum\'], "
+ argspec: "args=[\'self\', \'hidden_units\', \'feature_columns\', \'model_dir\', \'label_dimension\', \'weight_column\', \'optimizer\', \'activation_fn\', \'dropout\', \'input_layer_partitioner\', \'config\', \'warm_start_from\', \'loss_reduction\', \'batch_norm\'], varargs=None, keywords=None, defaults=[\'None\', \'1\', \'None\', \'Adagrad\', \'<function relu instance>\', \'None\', \'None\', \'None\', \'None\', \'weighted_sum\', \'False\'], "
}
member_method {
name: "eval_dir"
diff --git a/tensorflow/tools/api/golden/tensorflow.estimator.-linear-classifier.pbtxt b/tensorflow/tools/api/golden/tensorflow.estimator.-linear-classifier.pbtxt
index cb578759ee..154f171e89 100644
--- a/tensorflow/tools/api/golden/tensorflow.estimator.-linear-classifier.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.estimator.-linear-classifier.pbtxt
@@ -21,7 +21,7 @@ tf_class {
}
member_method {
name: "__init__"
- argspec: "args=[\'self\', \'feature_columns\', \'model_dir\', \'n_classes\', \'weight_column\', \'label_vocabulary\', \'optimizer\', \'config\', \'partitioner\', \'warm_start_from\', \'loss_reduction\'], varargs=None, keywords=None, defaults=[\'None\', \'2\', \'None\', \'None\', \'Ftrl\', \'None\', \'None\', \'None\', \'weighted_sum\'], "
+ argspec: "args=[\'self\', \'feature_columns\', \'model_dir\', \'n_classes\', \'weight_column\', \'label_vocabulary\', \'optimizer\', \'config\', \'partitioner\', \'warm_start_from\', \'loss_reduction\', \'sparse_combiner\'], varargs=None, keywords=None, defaults=[\'None\', \'2\', \'None\', \'None\', \'Ftrl\', \'None\', \'None\', \'None\', \'weighted_sum\', \'sum\'], "
}
member_method {
name: "eval_dir"
diff --git a/tensorflow/tools/api/golden/tensorflow.estimator.-linear-regressor.pbtxt b/tensorflow/tools/api/golden/tensorflow.estimator.-linear-regressor.pbtxt
index fcd01bb663..4d46d1e6b6 100644
--- a/tensorflow/tools/api/golden/tensorflow.estimator.-linear-regressor.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.estimator.-linear-regressor.pbtxt
@@ -21,7 +21,7 @@ tf_class {
}
member_method {
name: "__init__"
- argspec: "args=[\'self\', \'feature_columns\', \'model_dir\', \'label_dimension\', \'weight_column\', \'optimizer\', \'config\', \'partitioner\', \'warm_start_from\', \'loss_reduction\'], varargs=None, keywords=None, defaults=[\'None\', \'1\', \'None\', \'Ftrl\', \'None\', \'None\', \'None\', \'weighted_sum\'], "
+ argspec: "args=[\'self\', \'feature_columns\', \'model_dir\', \'label_dimension\', \'weight_column\', \'optimizer\', \'config\', \'partitioner\', \'warm_start_from\', \'loss_reduction\', \'sparse_combiner\'], varargs=None, keywords=None, defaults=[\'None\', \'1\', \'None\', \'Ftrl\', \'None\', \'None\', \'None\', \'weighted_sum\', \'sum\'], "
}
member_method {
name: "eval_dir"
diff --git a/tensorflow/tools/api/golden/tensorflow.estimator.-run-config.pbtxt b/tensorflow/tools/api/golden/tensorflow.estimator.-run-config.pbtxt
index c8da55d802..5aa4b3d4fb 100644
--- a/tensorflow/tools/api/golden/tensorflow.estimator.-run-config.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.estimator.-run-config.pbtxt
@@ -51,6 +51,10 @@ tf_class {
mtype: "<type \'property\'>"
}
member {
+ name: "protocol"
+ mtype: "<type \'property\'>"
+ }
+ member {
name: "save_checkpoints_secs"
mtype: "<type \'property\'>"
}
@@ -88,7 +92,7 @@ tf_class {
}
member_method {
name: "__init__"
- argspec: "args=[\'self\', \'model_dir\', \'tf_random_seed\', \'save_summary_steps\', \'save_checkpoints_steps\', \'save_checkpoints_secs\', \'session_config\', \'keep_checkpoint_max\', \'keep_checkpoint_every_n_hours\', \'log_step_count_steps\', \'train_distribute\', \'device_fn\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'100\', \'<object object instance>\', \'<object object instance>\', \'None\', \'5\', \'10000\', \'100\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'model_dir\', \'tf_random_seed\', \'save_summary_steps\', \'save_checkpoints_steps\', \'save_checkpoints_secs\', \'session_config\', \'keep_checkpoint_max\', \'keep_checkpoint_every_n_hours\', \'log_step_count_steps\', \'train_distribute\', \'device_fn\', \'protocol\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'100\', \'<object object instance>\', \'<object object instance>\', \'None\', \'5\', \'10000\', \'100\', \'None\', \'None\', \'None\'], "
}
member_method {
name: "replace"
diff --git a/tensorflow/tools/api/golden/tensorflow.image.pbtxt b/tensorflow/tools/api/golden/tensorflow.image.pbtxt
index e89b4dbffd..6ec3aba775 100644
--- a/tensorflow/tools/api/golden/tensorflow.image.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.image.pbtxt
@@ -121,6 +121,10 @@ tf_module {
argspec: "args=[\'boxes\', \'scores\', \'max_output_size\', \'iou_threshold\', \'score_threshold\', \'name\'], varargs=None, keywords=None, defaults=[\'0.5\', \'-inf\', \'None\'], "
}
member_method {
+ name: "non_max_suppression_overlaps"
+ argspec: "args=[\'overlaps\', \'scores\', \'max_output_size\', \'overlap_threshold\', \'score_threshold\', \'name\'], varargs=None, keywords=None, defaults=[\'0.5\', \'-inf\', \'None\'], "
+ }
+ member_method {
name: "pad_to_bounding_box"
argspec: "args=[\'image\', \'offset_height\', \'offset_width\', \'target_height\', \'target_width\'], varargs=None, keywords=None, defaults=None"
}
diff --git a/tensorflow/tools/api/golden/tensorflow.initializers.variance_scaling.pbtxt b/tensorflow/tools/api/golden/tensorflow.initializers.variance_scaling.pbtxt
index a6b6e5eceb..86340913e2 100644
--- a/tensorflow/tools/api/golden/tensorflow.initializers.variance_scaling.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.initializers.variance_scaling.pbtxt
@@ -5,7 +5,7 @@ tf_class {
is_instance: "<type \'object\'>"
member_method {
name: "__init__"
- argspec: "args=[\'self\', \'scale\', \'mode\', \'distribution\', \'seed\', \'dtype\'], varargs=None, keywords=None, defaults=[\'1.0\', \'fan_in\', \'normal\', \'None\', \"<dtype: \'float32\'>\"], "
+ argspec: "args=[\'self\', \'scale\', \'mode\', \'distribution\', \'seed\', \'dtype\'], varargs=None, keywords=None, defaults=[\'1.0\', \'fan_in\', \'truncated_normal\', \'None\', \"<dtype: \'float32\'>\"], "
}
member_method {
name: "from_config"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.-model.pbtxt b/tensorflow/tools/api/golden/tensorflow.keras.-model.pbtxt
index 11cdd6f0b5..40e82b18b6 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.-model.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.keras.-model.pbtxt
@@ -119,7 +119,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
}
member_method {
name: "apply"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.-sequential.pbtxt b/tensorflow/tools/api/golden/tensorflow.keras.-sequential.pbtxt
index 4afad3e4df..8295905975 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.-sequential.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.keras.-sequential.pbtxt
@@ -124,7 +124,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
}
member_method {
name: "apply"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.callbacks.-early-stopping.pbtxt b/tensorflow/tools/api/golden/tensorflow.keras.callbacks.-early-stopping.pbtxt
index 7b0ad85eaa..f71292856c 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.callbacks.-early-stopping.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.keras.callbacks.-early-stopping.pbtxt
@@ -5,7 +5,7 @@ tf_class {
is_instance: "<type \'object\'>"
member_method {
name: "__init__"
- argspec: "args=[\'self\', \'monitor\', \'min_delta\', \'patience\', \'verbose\', \'mode\'], varargs=None, keywords=None, defaults=[\'val_loss\', \'0\', \'0\', \'0\', \'auto\'], "
+ argspec: "args=[\'self\', \'monitor\', \'min_delta\', \'patience\', \'verbose\', \'mode\', \'baseline\'], varargs=None, keywords=None, defaults=[\'val_loss\', \'0\', \'0\', \'0\', \'auto\', \'None\'], "
}
member_method {
name: "on_batch_begin"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.callbacks.-tensor-board.pbtxt b/tensorflow/tools/api/golden/tensorflow.keras.callbacks.-tensor-board.pbtxt
index 2f52464315..e58ba18c1c 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.callbacks.-tensor-board.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.keras.callbacks.-tensor-board.pbtxt
@@ -5,7 +5,7 @@ tf_class {
is_instance: "<type \'object\'>"
member_method {
name: "__init__"
- argspec: "args=[\'self\', \'log_dir\', \'histogram_freq\', \'batch_size\', \'write_graph\', \'write_grads\', \'write_images\'], varargs=None, keywords=None, defaults=[\'./logs\', \'0\', \'32\', \'True\', \'False\', \'False\'], "
+ argspec: "args=[\'self\', \'log_dir\', \'histogram_freq\', \'batch_size\', \'write_graph\', \'write_grads\', \'write_images\', \'embeddings_freq\', \'embeddings_layer_names\', \'embeddings_metadata\', \'embeddings_data\'], varargs=None, keywords=None, defaults=[\'./logs\', \'0\', \'32\', \'True\', \'False\', \'False\', \'0\', \'None\', \'None\', \'None\'], "
}
member_method {
name: "on_batch_begin"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.initializers.-variance-scaling.pbtxt b/tensorflow/tools/api/golden/tensorflow.keras.initializers.-variance-scaling.pbtxt
index 32a6f6ee88..03f4064b9e 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.initializers.-variance-scaling.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.keras.initializers.-variance-scaling.pbtxt
@@ -5,7 +5,7 @@ tf_class {
is_instance: "<type \'object\'>"
member_method {
name: "__init__"
- argspec: "args=[\'self\', \'scale\', \'mode\', \'distribution\', \'seed\', \'dtype\'], varargs=None, keywords=None, defaults=[\'1.0\', \'fan_in\', \'normal\', \'None\', \"<dtype: \'float32\'>\"], "
+ argspec: "args=[\'self\', \'scale\', \'mode\', \'distribution\', \'seed\', \'dtype\'], varargs=None, keywords=None, defaults=[\'1.0\', \'fan_in\', \'truncated_normal\', \'None\', \"<dtype: \'float32\'>\"], "
}
member_method {
name: "from_config"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.initializers.pbtxt b/tensorflow/tools/api/golden/tensorflow.keras.initializers.pbtxt
index 14a667870d..8645e54302 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.initializers.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.keras.initializers.pbtxt
@@ -90,11 +90,11 @@ tf_module {
}
member_method {
name: "glorot_normal"
- argspec: "args=[\'seed\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ argspec: "args=[\'seed\', \'dtype\'], varargs=None, keywords=None, defaults=[\'None\', \"<dtype: \'float32\'>\"], "
}
member_method {
name: "glorot_uniform"
- argspec: "args=[\'seed\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ argspec: "args=[\'seed\', \'dtype\'], varargs=None, keywords=None, defaults=[\'None\', \"<dtype: \'float32\'>\"], "
}
member_method {
name: "he_normal"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-activation.pbtxt b/tensorflow/tools/api/golden/tensorflow.keras.layers.-activation.pbtxt
index 2bf973debb..86e328888e 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-activation.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.keras.layers.-activation.pbtxt
@@ -98,7 +98,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
}
member_method {
name: "apply"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-activity-regularization.pbtxt b/tensorflow/tools/api/golden/tensorflow.keras.layers.-activity-regularization.pbtxt
index 03f20e72c2..b0ed545781 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-activity-regularization.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.keras.layers.-activity-regularization.pbtxt
@@ -98,7 +98,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
}
member_method {
name: "apply"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-add.pbtxt b/tensorflow/tools/api/golden/tensorflow.keras.layers.-add.pbtxt
index 4b46b8d15a..42f98ed03d 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-add.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.keras.layers.-add.pbtxt
@@ -99,7 +99,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
}
member_method {
name: "apply"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-alpha-dropout.pbtxt b/tensorflow/tools/api/golden/tensorflow.keras.layers.-alpha-dropout.pbtxt
index d8a1c76fd0..000898a4be 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-alpha-dropout.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.keras.layers.-alpha-dropout.pbtxt
@@ -98,7 +98,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
}
member_method {
name: "apply"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-average-pooling1-d.pbtxt b/tensorflow/tools/api/golden/tensorflow.keras.layers.-average-pooling1-d.pbtxt
index 622926bc4b..380b49f99c 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-average-pooling1-d.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.keras.layers.-average-pooling1-d.pbtxt
@@ -99,7 +99,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
}
member_method {
name: "apply"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-average-pooling2-d.pbtxt b/tensorflow/tools/api/golden/tensorflow.keras.layers.-average-pooling2-d.pbtxt
index 82100d8e09..82db5e6137 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-average-pooling2-d.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.keras.layers.-average-pooling2-d.pbtxt
@@ -99,7 +99,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
}
member_method {
name: "apply"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-average-pooling3-d.pbtxt b/tensorflow/tools/api/golden/tensorflow.keras.layers.-average-pooling3-d.pbtxt
index 408061077c..b6ff688ec3 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-average-pooling3-d.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.keras.layers.-average-pooling3-d.pbtxt
@@ -99,7 +99,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
}
member_method {
name: "apply"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-average.pbtxt b/tensorflow/tools/api/golden/tensorflow.keras.layers.-average.pbtxt
index a3c8031104..b41290f8b0 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-average.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.keras.layers.-average.pbtxt
@@ -99,7 +99,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
}
member_method {
name: "apply"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-avg-pool1-d.pbtxt b/tensorflow/tools/api/golden/tensorflow.keras.layers.-avg-pool1-d.pbtxt
index e2dfaca29f..88a033e61f 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-avg-pool1-d.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.keras.layers.-avg-pool1-d.pbtxt
@@ -99,7 +99,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
}
member_method {
name: "apply"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-avg-pool2-d.pbtxt b/tensorflow/tools/api/golden/tensorflow.keras.layers.-avg-pool2-d.pbtxt
index 4f068d2066..c1b9b96044 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-avg-pool2-d.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.keras.layers.-avg-pool2-d.pbtxt
@@ -99,7 +99,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
}
member_method {
name: "apply"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-avg-pool3-d.pbtxt b/tensorflow/tools/api/golden/tensorflow.keras.layers.-avg-pool3-d.pbtxt
index b8c261a743..f59f7727a3 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-avg-pool3-d.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.keras.layers.-avg-pool3-d.pbtxt
@@ -99,7 +99,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
}
member_method {
name: "apply"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-batch-normalization.pbtxt b/tensorflow/tools/api/golden/tensorflow.keras.layers.-batch-normalization.pbtxt
index 4ccd6cace6..7d3744ed92 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-batch-normalization.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.keras.layers.-batch-normalization.pbtxt
@@ -98,7 +98,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
}
member_method {
name: "apply"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-bidirectional.pbtxt b/tensorflow/tools/api/golden/tensorflow.keras.layers.-bidirectional.pbtxt
index 2790e5fd85..3fd4ccdab2 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-bidirectional.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.keras.layers.-bidirectional.pbtxt
@@ -107,7 +107,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
}
member_method {
name: "apply"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-concatenate.pbtxt b/tensorflow/tools/api/golden/tensorflow.keras.layers.-concatenate.pbtxt
index b1326bd0e6..ba21b50be4 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-concatenate.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.keras.layers.-concatenate.pbtxt
@@ -99,7 +99,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
}
member_method {
name: "apply"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-conv-l-s-t-m2-d.pbtxt b/tensorflow/tools/api/golden/tensorflow.keras.layers.-conv-l-s-t-m2-d.pbtxt
index e3ac3dbf28..46f9fa2bbb 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-conv-l-s-t-m2-d.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.keras.layers.-conv-l-s-t-m2-d.pbtxt
@@ -188,7 +188,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
}
member_method {
name: "apply"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-conv1-d.pbtxt b/tensorflow/tools/api/golden/tensorflow.keras.layers.-conv1-d.pbtxt
index 1117a695a3..c3ad326589 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-conv1-d.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.keras.layers.-conv1-d.pbtxt
@@ -99,7 +99,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
}
member_method {
name: "apply"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-conv2-d-transpose.pbtxt b/tensorflow/tools/api/golden/tensorflow.keras.layers.-conv2-d-transpose.pbtxt
index b9de142142..fd9eb43066 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-conv2-d-transpose.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.keras.layers.-conv2-d-transpose.pbtxt
@@ -100,7 +100,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
}
member_method {
name: "apply"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-conv2-d.pbtxt b/tensorflow/tools/api/golden/tensorflow.keras.layers.-conv2-d.pbtxt
index deb535e06e..40d61688f2 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-conv2-d.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.keras.layers.-conv2-d.pbtxt
@@ -99,7 +99,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
}
member_method {
name: "apply"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-conv3-d-transpose.pbtxt b/tensorflow/tools/api/golden/tensorflow.keras.layers.-conv3-d-transpose.pbtxt
index 9a9a223fba..b8c227d725 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-conv3-d-transpose.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.keras.layers.-conv3-d-transpose.pbtxt
@@ -100,7 +100,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
}
member_method {
name: "apply"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-conv3-d.pbtxt b/tensorflow/tools/api/golden/tensorflow.keras.layers.-conv3-d.pbtxt
index 1c59b0bdf6..095d35e574 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-conv3-d.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.keras.layers.-conv3-d.pbtxt
@@ -99,7 +99,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
}
member_method {
name: "apply"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-convolution1-d.pbtxt b/tensorflow/tools/api/golden/tensorflow.keras.layers.-convolution1-d.pbtxt
index 30cf5489f4..8f99961198 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-convolution1-d.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.keras.layers.-convolution1-d.pbtxt
@@ -99,7 +99,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
}
member_method {
name: "apply"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-convolution2-d-transpose.pbtxt b/tensorflow/tools/api/golden/tensorflow.keras.layers.-convolution2-d-transpose.pbtxt
index 0ec69508d5..96d522a016 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-convolution2-d-transpose.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.keras.layers.-convolution2-d-transpose.pbtxt
@@ -100,7 +100,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
}
member_method {
name: "apply"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-convolution2-d.pbtxt b/tensorflow/tools/api/golden/tensorflow.keras.layers.-convolution2-d.pbtxt
index 4cd8928403..de2824dab4 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-convolution2-d.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.keras.layers.-convolution2-d.pbtxt
@@ -99,7 +99,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
}
member_method {
name: "apply"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-convolution3-d-transpose.pbtxt b/tensorflow/tools/api/golden/tensorflow.keras.layers.-convolution3-d-transpose.pbtxt
index 4b4912496d..1d563241d8 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-convolution3-d-transpose.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.keras.layers.-convolution3-d-transpose.pbtxt
@@ -100,7 +100,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
}
member_method {
name: "apply"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-convolution3-d.pbtxt b/tensorflow/tools/api/golden/tensorflow.keras.layers.-convolution3-d.pbtxt
index d0ad9cf567..c87e52c537 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-convolution3-d.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.keras.layers.-convolution3-d.pbtxt
@@ -99,7 +99,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
}
member_method {
name: "apply"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-cropping1-d.pbtxt b/tensorflow/tools/api/golden/tensorflow.keras.layers.-cropping1-d.pbtxt
index 98cff95a7f..dccf5523e3 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-cropping1-d.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.keras.layers.-cropping1-d.pbtxt
@@ -98,7 +98,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
}
member_method {
name: "apply"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-cropping2-d.pbtxt b/tensorflow/tools/api/golden/tensorflow.keras.layers.-cropping2-d.pbtxt
index 2357498b46..7ac4116d92 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-cropping2-d.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.keras.layers.-cropping2-d.pbtxt
@@ -98,7 +98,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
}
member_method {
name: "apply"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-cropping3-d.pbtxt b/tensorflow/tools/api/golden/tensorflow.keras.layers.-cropping3-d.pbtxt
index 3324cbff30..024f72705d 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-cropping3-d.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.keras.layers.-cropping3-d.pbtxt
@@ -98,7 +98,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
}
member_method {
name: "apply"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-cu-d-n-n-g-r-u.pbtxt b/tensorflow/tools/api/golden/tensorflow.keras.layers.-cu-d-n-n-g-r-u.pbtxt
index 6c81823654..4e0233331b 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-cu-d-n-n-g-r-u.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.keras.layers.-cu-d-n-n-g-r-u.pbtxt
@@ -108,7 +108,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
}
member_method {
name: "apply"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-cu-d-n-n-l-s-t-m.pbtxt b/tensorflow/tools/api/golden/tensorflow.keras.layers.-cu-d-n-n-l-s-t-m.pbtxt
index 487e04fd07..32d46ce8f3 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-cu-d-n-n-l-s-t-m.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.keras.layers.-cu-d-n-n-l-s-t-m.pbtxt
@@ -108,7 +108,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
}
member_method {
name: "apply"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-dense.pbtxt b/tensorflow/tools/api/golden/tensorflow.keras.layers.-dense.pbtxt
index 137e7cced4..858486c725 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-dense.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.keras.layers.-dense.pbtxt
@@ -98,7 +98,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
}
member_method {
name: "apply"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-depthwise-conv2-d.pbtxt b/tensorflow/tools/api/golden/tensorflow.keras.layers.-depthwise-conv2-d.pbtxt
index 7161665d25..f65d750926 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-depthwise-conv2-d.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.keras.layers.-depthwise-conv2-d.pbtxt
@@ -100,7 +100,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
}
member_method {
name: "apply"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-dot.pbtxt b/tensorflow/tools/api/golden/tensorflow.keras.layers.-dot.pbtxt
index 24affa2481..2e71ef503d 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-dot.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.keras.layers.-dot.pbtxt
@@ -99,7 +99,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
}
member_method {
name: "apply"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-dropout.pbtxt b/tensorflow/tools/api/golden/tensorflow.keras.layers.-dropout.pbtxt
index 7ba19a4269..42533bcd21 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-dropout.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.keras.layers.-dropout.pbtxt
@@ -98,7 +98,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
}
member_method {
name: "apply"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-e-l-u.pbtxt b/tensorflow/tools/api/golden/tensorflow.keras.layers.-e-l-u.pbtxt
index 503aa9162c..b5df169417 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-e-l-u.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.keras.layers.-e-l-u.pbtxt
@@ -98,7 +98,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
}
member_method {
name: "apply"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-embedding.pbtxt b/tensorflow/tools/api/golden/tensorflow.keras.layers.-embedding.pbtxt
index 1737e590a2..0ea17919a9 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-embedding.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.keras.layers.-embedding.pbtxt
@@ -98,7 +98,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
}
member_method {
name: "apply"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-flatten.pbtxt b/tensorflow/tools/api/golden/tensorflow.keras.layers.-flatten.pbtxt
index 021d024dc2..a33248bc00 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-flatten.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.keras.layers.-flatten.pbtxt
@@ -98,7 +98,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
}
member_method {
name: "apply"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-g-r-u-cell.pbtxt b/tensorflow/tools/api/golden/tensorflow.keras.layers.-g-r-u-cell.pbtxt
index 65387008bf..4ba21a25cd 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-g-r-u-cell.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.keras.layers.-g-r-u-cell.pbtxt
@@ -98,7 +98,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
}
member_method {
name: "apply"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-g-r-u.pbtxt b/tensorflow/tools/api/golden/tensorflow.keras.layers.-g-r-u.pbtxt
index 4f791acf05..a7a570418e 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-g-r-u.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.keras.layers.-g-r-u.pbtxt
@@ -171,7 +171,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
}
member_method {
name: "apply"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-gaussian-dropout.pbtxt b/tensorflow/tools/api/golden/tensorflow.keras.layers.-gaussian-dropout.pbtxt
index abc30e54e0..763bc23113 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-gaussian-dropout.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.keras.layers.-gaussian-dropout.pbtxt
@@ -98,7 +98,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
}
member_method {
name: "apply"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-gaussian-noise.pbtxt b/tensorflow/tools/api/golden/tensorflow.keras.layers.-gaussian-noise.pbtxt
index 20791bb448..3c50a3d7f2 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-gaussian-noise.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.keras.layers.-gaussian-noise.pbtxt
@@ -98,7 +98,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
}
member_method {
name: "apply"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-global-average-pooling1-d.pbtxt b/tensorflow/tools/api/golden/tensorflow.keras.layers.-global-average-pooling1-d.pbtxt
index 449a91d873..ac78bdafad 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-global-average-pooling1-d.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.keras.layers.-global-average-pooling1-d.pbtxt
@@ -99,7 +99,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
}
member_method {
name: "apply"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-global-average-pooling2-d.pbtxt b/tensorflow/tools/api/golden/tensorflow.keras.layers.-global-average-pooling2-d.pbtxt
index bb361e1297..275282d9d2 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-global-average-pooling2-d.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.keras.layers.-global-average-pooling2-d.pbtxt
@@ -99,7 +99,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
}
member_method {
name: "apply"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-global-average-pooling3-d.pbtxt b/tensorflow/tools/api/golden/tensorflow.keras.layers.-global-average-pooling3-d.pbtxt
index e564bf3216..0e31e6058b 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-global-average-pooling3-d.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.keras.layers.-global-average-pooling3-d.pbtxt
@@ -99,7 +99,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
}
member_method {
name: "apply"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-global-avg-pool1-d.pbtxt b/tensorflow/tools/api/golden/tensorflow.keras.layers.-global-avg-pool1-d.pbtxt
index 4cb9cc3ec8..aacd0b1791 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-global-avg-pool1-d.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.keras.layers.-global-avg-pool1-d.pbtxt
@@ -99,7 +99,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
}
member_method {
name: "apply"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-global-avg-pool2-d.pbtxt b/tensorflow/tools/api/golden/tensorflow.keras.layers.-global-avg-pool2-d.pbtxt
index 5ed52b88ae..c236548663 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-global-avg-pool2-d.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.keras.layers.-global-avg-pool2-d.pbtxt
@@ -99,7 +99,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
}
member_method {
name: "apply"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-global-avg-pool3-d.pbtxt b/tensorflow/tools/api/golden/tensorflow.keras.layers.-global-avg-pool3-d.pbtxt
index f4559d29d7..6b9c0290aa 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-global-avg-pool3-d.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.keras.layers.-global-avg-pool3-d.pbtxt
@@ -99,7 +99,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
}
member_method {
name: "apply"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-global-max-pool1-d.pbtxt b/tensorflow/tools/api/golden/tensorflow.keras.layers.-global-max-pool1-d.pbtxt
index 64e2d061e2..0d7b2211e6 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-global-max-pool1-d.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.keras.layers.-global-max-pool1-d.pbtxt
@@ -99,7 +99,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
}
member_method {
name: "apply"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-global-max-pool2-d.pbtxt b/tensorflow/tools/api/golden/tensorflow.keras.layers.-global-max-pool2-d.pbtxt
index 3372ad6453..d080ad6aed 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-global-max-pool2-d.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.keras.layers.-global-max-pool2-d.pbtxt
@@ -99,7 +99,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
}
member_method {
name: "apply"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-global-max-pool3-d.pbtxt b/tensorflow/tools/api/golden/tensorflow.keras.layers.-global-max-pool3-d.pbtxt
index 08a6860bcd..fcb0a109da 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-global-max-pool3-d.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.keras.layers.-global-max-pool3-d.pbtxt
@@ -99,7 +99,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
}
member_method {
name: "apply"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-global-max-pooling1-d.pbtxt b/tensorflow/tools/api/golden/tensorflow.keras.layers.-global-max-pooling1-d.pbtxt
index 22c9eab64f..1d0e22abd0 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-global-max-pooling1-d.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.keras.layers.-global-max-pooling1-d.pbtxt
@@ -99,7 +99,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
}
member_method {
name: "apply"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-global-max-pooling2-d.pbtxt b/tensorflow/tools/api/golden/tensorflow.keras.layers.-global-max-pooling2-d.pbtxt
index 74c405ba9b..653c9f547b 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-global-max-pooling2-d.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.keras.layers.-global-max-pooling2-d.pbtxt
@@ -99,7 +99,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
}
member_method {
name: "apply"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-global-max-pooling3-d.pbtxt b/tensorflow/tools/api/golden/tensorflow.keras.layers.-global-max-pooling3-d.pbtxt
index 39f6f98193..cdbaf82cf6 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-global-max-pooling3-d.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.keras.layers.-global-max-pooling3-d.pbtxt
@@ -99,7 +99,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
}
member_method {
name: "apply"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-input-layer.pbtxt b/tensorflow/tools/api/golden/tensorflow.keras.layers.-input-layer.pbtxt
index 7b25e80b6b..230c5e9034 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-input-layer.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.keras.layers.-input-layer.pbtxt
@@ -98,7 +98,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
}
member_method {
name: "apply"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-l-s-t-m-cell.pbtxt b/tensorflow/tools/api/golden/tensorflow.keras.layers.-l-s-t-m-cell.pbtxt
index 3619b8bfc4..511456e740 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-l-s-t-m-cell.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.keras.layers.-l-s-t-m-cell.pbtxt
@@ -98,7 +98,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
}
member_method {
name: "apply"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-l-s-t-m.pbtxt b/tensorflow/tools/api/golden/tensorflow.keras.layers.-l-s-t-m.pbtxt
index 8ef3d71dd8..4a3492ebd6 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-l-s-t-m.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.keras.layers.-l-s-t-m.pbtxt
@@ -171,7 +171,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
}
member_method {
name: "apply"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-lambda.pbtxt b/tensorflow/tools/api/golden/tensorflow.keras.layers.-lambda.pbtxt
index ecbaa9ce2c..5d05cf689f 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-lambda.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.keras.layers.-lambda.pbtxt
@@ -98,7 +98,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
}
member_method {
name: "apply"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-layer.pbtxt b/tensorflow/tools/api/golden/tensorflow.keras.layers.-layer.pbtxt
index 9b90db1e5e..7efa29be77 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-layer.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.keras.layers.-layer.pbtxt
@@ -97,7 +97,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
}
member_method {
name: "apply"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-leaky-re-l-u.pbtxt b/tensorflow/tools/api/golden/tensorflow.keras.layers.-leaky-re-l-u.pbtxt
index 3c60eaab7f..0ca8e0b52c 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-leaky-re-l-u.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.keras.layers.-leaky-re-l-u.pbtxt
@@ -98,7 +98,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
}
member_method {
name: "apply"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-locally-connected1-d.pbtxt b/tensorflow/tools/api/golden/tensorflow.keras.layers.-locally-connected1-d.pbtxt
index 3dac1ff342..f754fa1da8 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-locally-connected1-d.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.keras.layers.-locally-connected1-d.pbtxt
@@ -98,7 +98,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
}
member_method {
name: "apply"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-locally-connected2-d.pbtxt b/tensorflow/tools/api/golden/tensorflow.keras.layers.-locally-connected2-d.pbtxt
index 7f1b5db4d3..c9516b8f07 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-locally-connected2-d.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.keras.layers.-locally-connected2-d.pbtxt
@@ -98,7 +98,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
}
member_method {
name: "apply"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-masking.pbtxt b/tensorflow/tools/api/golden/tensorflow.keras.layers.-masking.pbtxt
index b3e31000f3..850ecff974 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-masking.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.keras.layers.-masking.pbtxt
@@ -98,7 +98,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
}
member_method {
name: "apply"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-max-pool1-d.pbtxt b/tensorflow/tools/api/golden/tensorflow.keras.layers.-max-pool1-d.pbtxt
index bbd9d1b0dc..7c69e31f9a 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-max-pool1-d.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.keras.layers.-max-pool1-d.pbtxt
@@ -99,7 +99,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
}
member_method {
name: "apply"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-max-pool2-d.pbtxt b/tensorflow/tools/api/golden/tensorflow.keras.layers.-max-pool2-d.pbtxt
index fe72beea80..fba42642d7 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-max-pool2-d.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.keras.layers.-max-pool2-d.pbtxt
@@ -99,7 +99,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
}
member_method {
name: "apply"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-max-pool3-d.pbtxt b/tensorflow/tools/api/golden/tensorflow.keras.layers.-max-pool3-d.pbtxt
index e9bf57b2b0..9c277411ea 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-max-pool3-d.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.keras.layers.-max-pool3-d.pbtxt
@@ -99,7 +99,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
}
member_method {
name: "apply"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-max-pooling1-d.pbtxt b/tensorflow/tools/api/golden/tensorflow.keras.layers.-max-pooling1-d.pbtxt
index 0eecc58a2b..7c2f6ccc8a 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-max-pooling1-d.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.keras.layers.-max-pooling1-d.pbtxt
@@ -99,7 +99,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
}
member_method {
name: "apply"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-max-pooling2-d.pbtxt b/tensorflow/tools/api/golden/tensorflow.keras.layers.-max-pooling2-d.pbtxt
index 96785a7d85..802178dba6 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-max-pooling2-d.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.keras.layers.-max-pooling2-d.pbtxt
@@ -99,7 +99,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
}
member_method {
name: "apply"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-max-pooling3-d.pbtxt b/tensorflow/tools/api/golden/tensorflow.keras.layers.-max-pooling3-d.pbtxt
index 42c46cccb3..e870dfe9ad 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-max-pooling3-d.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.keras.layers.-max-pooling3-d.pbtxt
@@ -99,7 +99,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
}
member_method {
name: "apply"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-maximum.pbtxt b/tensorflow/tools/api/golden/tensorflow.keras.layers.-maximum.pbtxt
index ac816f68d4..c1337ce0cb 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-maximum.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.keras.layers.-maximum.pbtxt
@@ -99,7 +99,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
}
member_method {
name: "apply"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-minimum.pbtxt b/tensorflow/tools/api/golden/tensorflow.keras.layers.-minimum.pbtxt
index 56e32e9d36..ed27a62765 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-minimum.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.keras.layers.-minimum.pbtxt
@@ -99,7 +99,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
}
member_method {
name: "apply"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-multiply.pbtxt b/tensorflow/tools/api/golden/tensorflow.keras.layers.-multiply.pbtxt
index 9ae99563e9..b9f05cb3e5 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-multiply.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.keras.layers.-multiply.pbtxt
@@ -99,7 +99,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
}
member_method {
name: "apply"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-p-re-l-u.pbtxt b/tensorflow/tools/api/golden/tensorflow.keras.layers.-p-re-l-u.pbtxt
index 815f3bc2d1..336d9f76fb 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-p-re-l-u.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.keras.layers.-p-re-l-u.pbtxt
@@ -98,7 +98,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
}
member_method {
name: "apply"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-permute.pbtxt b/tensorflow/tools/api/golden/tensorflow.keras.layers.-permute.pbtxt
index e704992b4a..46282217e0 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-permute.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.keras.layers.-permute.pbtxt
@@ -98,7 +98,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
}
member_method {
name: "apply"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-r-n-n.pbtxt b/tensorflow/tools/api/golden/tensorflow.keras.layers.-r-n-n.pbtxt
index b3a58fa11e..42cd7e87ee 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-r-n-n.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.keras.layers.-r-n-n.pbtxt
@@ -102,7 +102,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
}
member_method {
name: "apply"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-re-l-u.pbtxt b/tensorflow/tools/api/golden/tensorflow.keras.layers.-re-l-u.pbtxt
index f3a96ab895..c00fa79adf 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-re-l-u.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.keras.layers.-re-l-u.pbtxt
@@ -98,7 +98,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
}
member_method {
name: "apply"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-repeat-vector.pbtxt b/tensorflow/tools/api/golden/tensorflow.keras.layers.-repeat-vector.pbtxt
index 78f464583b..9f094a877a 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-repeat-vector.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.keras.layers.-repeat-vector.pbtxt
@@ -98,7 +98,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
}
member_method {
name: "apply"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-reshape.pbtxt b/tensorflow/tools/api/golden/tensorflow.keras.layers.-reshape.pbtxt
index 222344fd04..2f519a2438 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-reshape.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.keras.layers.-reshape.pbtxt
@@ -98,7 +98,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
}
member_method {
name: "apply"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-separable-conv1-d.pbtxt b/tensorflow/tools/api/golden/tensorflow.keras.layers.-separable-conv1-d.pbtxt
index 55fddf576c..6b93116ba0 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-separable-conv1-d.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.keras.layers.-separable-conv1-d.pbtxt
@@ -100,7 +100,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
}
member_method {
name: "apply"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-separable-conv2-d.pbtxt b/tensorflow/tools/api/golden/tensorflow.keras.layers.-separable-conv2-d.pbtxt
index 96314ce498..fd17115e27 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-separable-conv2-d.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.keras.layers.-separable-conv2-d.pbtxt
@@ -100,7 +100,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
}
member_method {
name: "apply"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-separable-convolution1-d.pbtxt b/tensorflow/tools/api/golden/tensorflow.keras.layers.-separable-convolution1-d.pbtxt
index 88bdf99566..4b37a94478 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-separable-convolution1-d.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.keras.layers.-separable-convolution1-d.pbtxt
@@ -100,7 +100,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
}
member_method {
name: "apply"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-separable-convolution2-d.pbtxt b/tensorflow/tools/api/golden/tensorflow.keras.layers.-separable-convolution2-d.pbtxt
index 6eeea7a8d1..5bdadca74a 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-separable-convolution2-d.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.keras.layers.-separable-convolution2-d.pbtxt
@@ -100,7 +100,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
}
member_method {
name: "apply"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-simple-r-n-n-cell.pbtxt b/tensorflow/tools/api/golden/tensorflow.keras.layers.-simple-r-n-n-cell.pbtxt
index 3050d46249..9dfda96fc8 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-simple-r-n-n-cell.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.keras.layers.-simple-r-n-n-cell.pbtxt
@@ -98,7 +98,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
}
member_method {
name: "apply"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-simple-r-n-n.pbtxt b/tensorflow/tools/api/golden/tensorflow.keras.layers.-simple-r-n-n.pbtxt
index dda4c9358b..7b7684ccd2 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-simple-r-n-n.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.keras.layers.-simple-r-n-n.pbtxt
@@ -159,7 +159,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
}
member_method {
name: "apply"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-softmax.pbtxt b/tensorflow/tools/api/golden/tensorflow.keras.layers.-softmax.pbtxt
index cc6275158b..3b15407fca 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-softmax.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.keras.layers.-softmax.pbtxt
@@ -98,7 +98,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
}
member_method {
name: "apply"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-spatial-dropout1-d.pbtxt b/tensorflow/tools/api/golden/tensorflow.keras.layers.-spatial-dropout1-d.pbtxt
index 5eb7e75047..6d04415267 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-spatial-dropout1-d.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.keras.layers.-spatial-dropout1-d.pbtxt
@@ -99,7 +99,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
}
member_method {
name: "apply"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-spatial-dropout2-d.pbtxt b/tensorflow/tools/api/golden/tensorflow.keras.layers.-spatial-dropout2-d.pbtxt
index 500cb8c14e..04950654d5 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-spatial-dropout2-d.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.keras.layers.-spatial-dropout2-d.pbtxt
@@ -99,7 +99,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
}
member_method {
name: "apply"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-spatial-dropout3-d.pbtxt b/tensorflow/tools/api/golden/tensorflow.keras.layers.-spatial-dropout3-d.pbtxt
index 1113a7634f..c424e6dcc8 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-spatial-dropout3-d.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.keras.layers.-spatial-dropout3-d.pbtxt
@@ -99,7 +99,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
}
member_method {
name: "apply"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-stacked-r-n-n-cells.pbtxt b/tensorflow/tools/api/golden/tensorflow.keras.layers.-stacked-r-n-n-cells.pbtxt
index c4b9f93561..1160d2840f 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-stacked-r-n-n-cells.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.keras.layers.-stacked-r-n-n-cells.pbtxt
@@ -102,7 +102,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
}
member_method {
name: "apply"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-subtract.pbtxt b/tensorflow/tools/api/golden/tensorflow.keras.layers.-subtract.pbtxt
index 35ad87ad5d..740a03367b 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-subtract.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.keras.layers.-subtract.pbtxt
@@ -99,7 +99,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
}
member_method {
name: "apply"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-thresholded-re-l-u.pbtxt b/tensorflow/tools/api/golden/tensorflow.keras.layers.-thresholded-re-l-u.pbtxt
index 282c98d79a..a08c583adb 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-thresholded-re-l-u.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.keras.layers.-thresholded-re-l-u.pbtxt
@@ -98,7 +98,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
}
member_method {
name: "apply"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-time-distributed.pbtxt b/tensorflow/tools/api/golden/tensorflow.keras.layers.-time-distributed.pbtxt
index acab93706b..c1294fed0f 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-time-distributed.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.keras.layers.-time-distributed.pbtxt
@@ -103,7 +103,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
}
member_method {
name: "apply"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-up-sampling1-d.pbtxt b/tensorflow/tools/api/golden/tensorflow.keras.layers.-up-sampling1-d.pbtxt
index a5ec228a07..dc401d3ed0 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-up-sampling1-d.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.keras.layers.-up-sampling1-d.pbtxt
@@ -98,7 +98,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
}
member_method {
name: "apply"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-up-sampling2-d.pbtxt b/tensorflow/tools/api/golden/tensorflow.keras.layers.-up-sampling2-d.pbtxt
index d8d8e0bfe9..4b5165ae97 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-up-sampling2-d.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.keras.layers.-up-sampling2-d.pbtxt
@@ -98,7 +98,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
}
member_method {
name: "apply"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-up-sampling3-d.pbtxt b/tensorflow/tools/api/golden/tensorflow.keras.layers.-up-sampling3-d.pbtxt
index 97d6dc06fb..789af15fea 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-up-sampling3-d.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.keras.layers.-up-sampling3-d.pbtxt
@@ -98,7 +98,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
}
member_method {
name: "apply"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-wrapper.pbtxt b/tensorflow/tools/api/golden/tensorflow.keras.layers.-wrapper.pbtxt
index ea9bb41b99..0536a7cee7 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-wrapper.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.keras.layers.-wrapper.pbtxt
@@ -102,7 +102,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
}
member_method {
name: "apply"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-zero-padding1-d.pbtxt b/tensorflow/tools/api/golden/tensorflow.keras.layers.-zero-padding1-d.pbtxt
index e6d1d2e089..8915353ec3 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-zero-padding1-d.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.keras.layers.-zero-padding1-d.pbtxt
@@ -98,7 +98,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
}
member_method {
name: "apply"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-zero-padding2-d.pbtxt b/tensorflow/tools/api/golden/tensorflow.keras.layers.-zero-padding2-d.pbtxt
index f62017305f..6efb5ef15a 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-zero-padding2-d.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.keras.layers.-zero-padding2-d.pbtxt
@@ -98,7 +98,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
}
member_method {
name: "apply"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-zero-padding3-d.pbtxt b/tensorflow/tools/api/golden/tensorflow.keras.layers.-zero-padding3-d.pbtxt
index 07a1fde5bd..4c33c5d0bf 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-zero-padding3-d.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.keras.layers.-zero-padding3-d.pbtxt
@@ -98,7 +98,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
}
member_method {
name: "apply"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.models.-model.pbtxt b/tensorflow/tools/api/golden/tensorflow.keras.models.-model.pbtxt
index 62aa929d32..85f7c2bfed 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.models.-model.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.keras.models.-model.pbtxt
@@ -119,7 +119,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
}
member_method {
name: "apply"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.models.-sequential.pbtxt b/tensorflow/tools/api/golden/tensorflow.keras.models.-sequential.pbtxt
index 93ecbbce9b..5211657414 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.models.-sequential.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.keras.models.-sequential.pbtxt
@@ -124,7 +124,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
}
member_method {
name: "apply"
diff --git a/tensorflow/tools/api/golden/tensorflow.layers.-average-pooling1-d.pbtxt b/tensorflow/tools/api/golden/tensorflow.layers.-average-pooling1-d.pbtxt
index 11067058d5..c82e67526b 100644
--- a/tensorflow/tools/api/golden/tensorflow.layers.-average-pooling1-d.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.layers.-average-pooling1-d.pbtxt
@@ -109,7 +109,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'use_resource\', \'partitioner\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'use_resource\', \'synchronization\', \'aggregation\', \'partitioner\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
}
member_method {
name: "apply"
diff --git a/tensorflow/tools/api/golden/tensorflow.layers.-average-pooling2-d.pbtxt b/tensorflow/tools/api/golden/tensorflow.layers.-average-pooling2-d.pbtxt
index 3259e706d7..1d031cb5f8 100644
--- a/tensorflow/tools/api/golden/tensorflow.layers.-average-pooling2-d.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.layers.-average-pooling2-d.pbtxt
@@ -109,7 +109,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'use_resource\', \'partitioner\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'use_resource\', \'synchronization\', \'aggregation\', \'partitioner\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
}
member_method {
name: "apply"
diff --git a/tensorflow/tools/api/golden/tensorflow.layers.-average-pooling3-d.pbtxt b/tensorflow/tools/api/golden/tensorflow.layers.-average-pooling3-d.pbtxt
index e561f2f415..a8dda6655d 100644
--- a/tensorflow/tools/api/golden/tensorflow.layers.-average-pooling3-d.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.layers.-average-pooling3-d.pbtxt
@@ -109,7 +109,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'use_resource\', \'partitioner\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'use_resource\', \'synchronization\', \'aggregation\', \'partitioner\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
}
member_method {
name: "apply"
diff --git a/tensorflow/tools/api/golden/tensorflow.layers.-batch-normalization.pbtxt b/tensorflow/tools/api/golden/tensorflow.layers.-batch-normalization.pbtxt
index 3124a35c78..97f65ed894 100644
--- a/tensorflow/tools/api/golden/tensorflow.layers.-batch-normalization.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.layers.-batch-normalization.pbtxt
@@ -108,7 +108,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'use_resource\', \'partitioner\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'use_resource\', \'synchronization\', \'aggregation\', \'partitioner\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
}
member_method {
name: "apply"
diff --git a/tensorflow/tools/api/golden/tensorflow.layers.-conv1-d.pbtxt b/tensorflow/tools/api/golden/tensorflow.layers.-conv1-d.pbtxt
index b5ec61255a..ccd9578f0d 100644
--- a/tensorflow/tools/api/golden/tensorflow.layers.-conv1-d.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.layers.-conv1-d.pbtxt
@@ -109,7 +109,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'use_resource\', \'partitioner\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'use_resource\', \'synchronization\', \'aggregation\', \'partitioner\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
}
member_method {
name: "apply"
diff --git a/tensorflow/tools/api/golden/tensorflow.layers.-conv2-d-transpose.pbtxt b/tensorflow/tools/api/golden/tensorflow.layers.-conv2-d-transpose.pbtxt
index b2c89ae66f..9cbb58d721 100644
--- a/tensorflow/tools/api/golden/tensorflow.layers.-conv2-d-transpose.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.layers.-conv2-d-transpose.pbtxt
@@ -110,7 +110,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'use_resource\', \'partitioner\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'use_resource\', \'synchronization\', \'aggregation\', \'partitioner\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
}
member_method {
name: "apply"
diff --git a/tensorflow/tools/api/golden/tensorflow.layers.-conv2-d.pbtxt b/tensorflow/tools/api/golden/tensorflow.layers.-conv2-d.pbtxt
index 9e4f4969dc..c75ea3911e 100644
--- a/tensorflow/tools/api/golden/tensorflow.layers.-conv2-d.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.layers.-conv2-d.pbtxt
@@ -109,7 +109,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'use_resource\', \'partitioner\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'use_resource\', \'synchronization\', \'aggregation\', \'partitioner\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
}
member_method {
name: "apply"
diff --git a/tensorflow/tools/api/golden/tensorflow.layers.-conv3-d-transpose.pbtxt b/tensorflow/tools/api/golden/tensorflow.layers.-conv3-d-transpose.pbtxt
index 9850e6d765..5dc834e514 100644
--- a/tensorflow/tools/api/golden/tensorflow.layers.-conv3-d-transpose.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.layers.-conv3-d-transpose.pbtxt
@@ -110,7 +110,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'use_resource\', \'partitioner\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'use_resource\', \'synchronization\', \'aggregation\', \'partitioner\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
}
member_method {
name: "apply"
diff --git a/tensorflow/tools/api/golden/tensorflow.layers.-conv3-d.pbtxt b/tensorflow/tools/api/golden/tensorflow.layers.-conv3-d.pbtxt
index be113826cc..96ab209874 100644
--- a/tensorflow/tools/api/golden/tensorflow.layers.-conv3-d.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.layers.-conv3-d.pbtxt
@@ -109,7 +109,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'use_resource\', \'partitioner\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'use_resource\', \'synchronization\', \'aggregation\', \'partitioner\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
}
member_method {
name: "apply"
diff --git a/tensorflow/tools/api/golden/tensorflow.layers.-dense.pbtxt b/tensorflow/tools/api/golden/tensorflow.layers.-dense.pbtxt
index 0d951bf633..7e9656b352 100644
--- a/tensorflow/tools/api/golden/tensorflow.layers.-dense.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.layers.-dense.pbtxt
@@ -108,7 +108,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'use_resource\', \'partitioner\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'use_resource\', \'synchronization\', \'aggregation\', \'partitioner\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
}
member_method {
name: "apply"
diff --git a/tensorflow/tools/api/golden/tensorflow.layers.-dropout.pbtxt b/tensorflow/tools/api/golden/tensorflow.layers.-dropout.pbtxt
index f1beeed9ef..e9a2269a6e 100644
--- a/tensorflow/tools/api/golden/tensorflow.layers.-dropout.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.layers.-dropout.pbtxt
@@ -108,7 +108,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'use_resource\', \'partitioner\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'use_resource\', \'synchronization\', \'aggregation\', \'partitioner\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
}
member_method {
name: "apply"
diff --git a/tensorflow/tools/api/golden/tensorflow.layers.-flatten.pbtxt b/tensorflow/tools/api/golden/tensorflow.layers.-flatten.pbtxt
index b75a012811..7d2eaaab2a 100644
--- a/tensorflow/tools/api/golden/tensorflow.layers.-flatten.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.layers.-flatten.pbtxt
@@ -108,7 +108,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'use_resource\', \'partitioner\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'use_resource\', \'synchronization\', \'aggregation\', \'partitioner\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
}
member_method {
name: "apply"
diff --git a/tensorflow/tools/api/golden/tensorflow.layers.-layer.pbtxt b/tensorflow/tools/api/golden/tensorflow.layers.-layer.pbtxt
index 80e0fb228b..8bc3eb26e9 100644
--- a/tensorflow/tools/api/golden/tensorflow.layers.-layer.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.layers.-layer.pbtxt
@@ -106,7 +106,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'use_resource\', \'partitioner\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'use_resource\', \'synchronization\', \'aggregation\', \'partitioner\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
}
member_method {
name: "apply"
diff --git a/tensorflow/tools/api/golden/tensorflow.layers.-max-pooling1-d.pbtxt b/tensorflow/tools/api/golden/tensorflow.layers.-max-pooling1-d.pbtxt
index 50ff484d73..6a0dcce56a 100644
--- a/tensorflow/tools/api/golden/tensorflow.layers.-max-pooling1-d.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.layers.-max-pooling1-d.pbtxt
@@ -109,7 +109,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'use_resource\', \'partitioner\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'use_resource\', \'synchronization\', \'aggregation\', \'partitioner\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
}
member_method {
name: "apply"
diff --git a/tensorflow/tools/api/golden/tensorflow.layers.-max-pooling2-d.pbtxt b/tensorflow/tools/api/golden/tensorflow.layers.-max-pooling2-d.pbtxt
index cea809744c..b6c84edf2a 100644
--- a/tensorflow/tools/api/golden/tensorflow.layers.-max-pooling2-d.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.layers.-max-pooling2-d.pbtxt
@@ -109,7 +109,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'use_resource\', \'partitioner\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'use_resource\', \'synchronization\', \'aggregation\', \'partitioner\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
}
member_method {
name: "apply"
diff --git a/tensorflow/tools/api/golden/tensorflow.layers.-max-pooling3-d.pbtxt b/tensorflow/tools/api/golden/tensorflow.layers.-max-pooling3-d.pbtxt
index ab9e89554c..062a02fa59 100644
--- a/tensorflow/tools/api/golden/tensorflow.layers.-max-pooling3-d.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.layers.-max-pooling3-d.pbtxt
@@ -109,7 +109,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'use_resource\', \'partitioner\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'use_resource\', \'synchronization\', \'aggregation\', \'partitioner\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
}
member_method {
name: "apply"
diff --git a/tensorflow/tools/api/golden/tensorflow.layers.-separable-conv1-d.pbtxt b/tensorflow/tools/api/golden/tensorflow.layers.-separable-conv1-d.pbtxt
index 4362568445..eaad0fb23e 100644
--- a/tensorflow/tools/api/golden/tensorflow.layers.-separable-conv1-d.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.layers.-separable-conv1-d.pbtxt
@@ -110,7 +110,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'use_resource\', \'partitioner\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'use_resource\', \'synchronization\', \'aggregation\', \'partitioner\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
}
member_method {
name: "apply"
diff --git a/tensorflow/tools/api/golden/tensorflow.layers.-separable-conv2-d.pbtxt b/tensorflow/tools/api/golden/tensorflow.layers.-separable-conv2-d.pbtxt
index 3cad824cd3..ece28a8ce9 100644
--- a/tensorflow/tools/api/golden/tensorflow.layers.-separable-conv2-d.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.layers.-separable-conv2-d.pbtxt
@@ -110,7 +110,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'use_resource\', \'partitioner\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'use_resource\', \'synchronization\', \'aggregation\', \'partitioner\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
}
member_method {
name: "apply"
diff --git a/tensorflow/tools/api/golden/tensorflow.math.pbtxt b/tensorflow/tools/api/golden/tensorflow.math.pbtxt
index 25573cb494..a308c76ebc 100644
--- a/tensorflow/tools/api/golden/tensorflow.math.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.math.pbtxt
@@ -34,7 +34,7 @@ tf_module {
}
member_method {
name: "bessel_i0"
- argspec: "args=[\'x\', \'name\'], varargs=None, keywords=None, defaults=[\'bessel_i0\'], "
+ argspec: "args=[\'x\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
}
member_method {
name: "bessel_i0e"
@@ -42,7 +42,7 @@ tf_module {
}
member_method {
name: "bessel_i1"
- argspec: "args=[\'x\', \'name\'], varargs=None, keywords=None, defaults=[\'bessel_i1\'], "
+ argspec: "args=[\'x\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
}
member_method {
name: "bessel_i1e"
diff --git a/tensorflow/tools/api/golden/tensorflow.nn.pbtxt b/tensorflow/tools/api/golden/tensorflow.nn.pbtxt
index 455590d866..d9e5b0d0fc 100644
--- a/tensorflow/tools/api/golden/tensorflow.nn.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.nn.pbtxt
@@ -261,6 +261,10 @@ tf_module {
argspec: "args=[\'x\', \'weights\', \'biases\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
}
member_method {
+ name: "safe_embedding_lookup_sparse"
+ argspec: "args=[\'embedding_weights\', \'sparse_ids\', \'sparse_weights\', \'combiner\', \'default_id\', \'name\', \'partition_strategy\', \'max_norm\'], varargs=None, keywords=None, defaults=[\'None\', \'mean\', \'None\', \'None\', \'div\', \'None\'], "
+ }
+ member_method {
name: "sampled_softmax_loss"
argspec: "args=[\'weights\', \'biases\', \'labels\', \'inputs\', \'num_sampled\', \'num_classes\', \'num_true\', \'sampled_values\', \'remove_accidental_hits\', \'partition_strategy\', \'name\', \'seed\'], varargs=None, keywords=None, defaults=[\'1\', \'None\', \'True\', \'mod\', \'sampled_softmax_loss\', \'None\'], "
}
diff --git a/tensorflow/tools/api/golden/tensorflow.nn.rnn_cell.-basic-l-s-t-m-cell.pbtxt b/tensorflow/tools/api/golden/tensorflow.nn.rnn_cell.-basic-l-s-t-m-cell.pbtxt
index a8d9e120cb..c74773000a 100644
--- a/tensorflow/tools/api/golden/tensorflow.nn.rnn_cell.-basic-l-s-t-m-cell.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.nn.rnn_cell.-basic-l-s-t-m-cell.pbtxt
@@ -117,7 +117,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'use_resource\', \'partitioner\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'use_resource\', \'synchronization\', \'aggregation\', \'partitioner\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
}
member_method {
name: "apply"
diff --git a/tensorflow/tools/api/golden/tensorflow.nn.rnn_cell.-basic-r-n-n-cell.pbtxt b/tensorflow/tools/api/golden/tensorflow.nn.rnn_cell.-basic-r-n-n-cell.pbtxt
index c039890e1f..d251f54806 100644
--- a/tensorflow/tools/api/golden/tensorflow.nn.rnn_cell.-basic-r-n-n-cell.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.nn.rnn_cell.-basic-r-n-n-cell.pbtxt
@@ -117,7 +117,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'use_resource\', \'partitioner\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'use_resource\', \'synchronization\', \'aggregation\', \'partitioner\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
}
member_method {
name: "apply"
diff --git a/tensorflow/tools/api/golden/tensorflow.nn.rnn_cell.-device-wrapper.pbtxt b/tensorflow/tools/api/golden/tensorflow.nn.rnn_cell.-device-wrapper.pbtxt
index 62c393de34..8a63b49180 100644
--- a/tensorflow/tools/api/golden/tensorflow.nn.rnn_cell.-device-wrapper.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.nn.rnn_cell.-device-wrapper.pbtxt
@@ -116,7 +116,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'use_resource\', \'partitioner\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'use_resource\', \'synchronization\', \'aggregation\', \'partitioner\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
}
member_method {
name: "apply"
diff --git a/tensorflow/tools/api/golden/tensorflow.nn.rnn_cell.-dropout-wrapper.pbtxt b/tensorflow/tools/api/golden/tensorflow.nn.rnn_cell.-dropout-wrapper.pbtxt
index f121ba7939..db1aae2757 100644
--- a/tensorflow/tools/api/golden/tensorflow.nn.rnn_cell.-dropout-wrapper.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.nn.rnn_cell.-dropout-wrapper.pbtxt
@@ -120,7 +120,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'use_resource\', \'partitioner\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'use_resource\', \'synchronization\', \'aggregation\', \'partitioner\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
}
member_method {
name: "apply"
diff --git a/tensorflow/tools/api/golden/tensorflow.nn.rnn_cell.-g-r-u-cell.pbtxt b/tensorflow/tools/api/golden/tensorflow.nn.rnn_cell.-g-r-u-cell.pbtxt
index 4583dc32b2..d76eab7eb8 100644
--- a/tensorflow/tools/api/golden/tensorflow.nn.rnn_cell.-g-r-u-cell.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.nn.rnn_cell.-g-r-u-cell.pbtxt
@@ -117,7 +117,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'use_resource\', \'partitioner\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'use_resource\', \'synchronization\', \'aggregation\', \'partitioner\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
}
member_method {
name: "apply"
diff --git a/tensorflow/tools/api/golden/tensorflow.nn.rnn_cell.-l-s-t-m-cell.pbtxt b/tensorflow/tools/api/golden/tensorflow.nn.rnn_cell.-l-s-t-m-cell.pbtxt
index 5016b6ac30..944db6ac93 100644
--- a/tensorflow/tools/api/golden/tensorflow.nn.rnn_cell.-l-s-t-m-cell.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.nn.rnn_cell.-l-s-t-m-cell.pbtxt
@@ -117,7 +117,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'use_resource\', \'partitioner\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'use_resource\', \'synchronization\', \'aggregation\', \'partitioner\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
}
member_method {
name: "apply"
diff --git a/tensorflow/tools/api/golden/tensorflow.nn.rnn_cell.-multi-r-n-n-cell.pbtxt b/tensorflow/tools/api/golden/tensorflow.nn.rnn_cell.-multi-r-n-n-cell.pbtxt
index 59623fc983..72b40cc9f7 100644
--- a/tensorflow/tools/api/golden/tensorflow.nn.rnn_cell.-multi-r-n-n-cell.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.nn.rnn_cell.-multi-r-n-n-cell.pbtxt
@@ -116,7 +116,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'use_resource\', \'partitioner\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'use_resource\', \'synchronization\', \'aggregation\', \'partitioner\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
}
member_method {
name: "apply"
diff --git a/tensorflow/tools/api/golden/tensorflow.nn.rnn_cell.-r-n-n-cell.pbtxt b/tensorflow/tools/api/golden/tensorflow.nn.rnn_cell.-r-n-n-cell.pbtxt
index e2ab5aaee9..a5c2b4aefd 100644
--- a/tensorflow/tools/api/golden/tensorflow.nn.rnn_cell.-r-n-n-cell.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.nn.rnn_cell.-r-n-n-cell.pbtxt
@@ -115,7 +115,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'use_resource\', \'partitioner\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'use_resource\', \'synchronization\', \'aggregation\', \'partitioner\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
}
member_method {
name: "apply"
diff --git a/tensorflow/tools/api/golden/tensorflow.nn.rnn_cell.-residual-wrapper.pbtxt b/tensorflow/tools/api/golden/tensorflow.nn.rnn_cell.-residual-wrapper.pbtxt
index bd2a6d61f8..61d5f04b22 100644
--- a/tensorflow/tools/api/golden/tensorflow.nn.rnn_cell.-residual-wrapper.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.nn.rnn_cell.-residual-wrapper.pbtxt
@@ -116,7 +116,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'use_resource\', \'partitioner\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'use_resource\', \'synchronization\', \'aggregation\', \'partitioner\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
}
member_method {
name: "apply"
diff --git a/tensorflow/tools/api/golden/tensorflow.pbtxt b/tensorflow/tools/api/golden/tensorflow.pbtxt
index 20d61aae9d..5eb42b4db3 100644
--- a/tensorflow/tools/api/golden/tensorflow.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.pbtxt
@@ -258,13 +258,21 @@ tf_module {
}
member {
name: "Variable"
- mtype: "<type \'type\'>"
+ mtype: "<class \'tensorflow.python.ops.variables.VariableMetaclass\'>"
+ }
+ member {
+ name: "VariableAggregation"
+ mtype: "<class \'enum.EnumMeta\'>"
}
member {
name: "VariableScope"
mtype: "<type \'type\'>"
}
member {
+ name: "VariableSynchronization"
+ mtype: "<class \'enum.EnumMeta\'>"
+ }
+ member {
name: "WholeFileReader"
mtype: "<type \'type\'>"
}
@@ -1150,7 +1158,7 @@ tf_module {
}
member_method {
name: "get_local_variable"
- argspec: "args=[\'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'collections\', \'caching_device\', \'partitioner\', \'validate_shape\', \'use_resource\', \'custom_getter\', \'constraint\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'False\', \'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'collections\', \'caching_device\', \'partitioner\', \'validate_shape\', \'use_resource\', \'synchronization\', \'aggregation\', \'custom_getter\', \'constraint\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'False\', \'None\', \'None\', \'None\', \'True\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\', \'None\'], "
}
member_method {
name: "get_seed"
@@ -1166,7 +1174,7 @@ tf_module {
}
member_method {
name: "get_variable"
- argspec: "args=[\'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'collections\', \'caching_device\', \'partitioner\', \'validate_shape\', \'use_resource\', \'custom_getter\', \'constraint\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'collections\', \'caching_device\', \'partitioner\', \'validate_shape\', \'use_resource\', \'custom_getter\', \'constraint\', \'synchronization\', \'aggregation\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
}
member_method {
name: "get_variable_scope"
@@ -1310,7 +1318,7 @@ tf_module {
}
member_method {
name: "lbeta"
- argspec: "args=[\'x\', \'name\'], varargs=None, keywords=None, defaults=[\'lbeta\'], "
+ argspec: "args=[\'x\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
}
member_method {
name: "less"
@@ -1553,10 +1561,6 @@ tf_module {
argspec: "args=[\'x\', \'y\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
}
member_method {
- name: "print"
- argspec: "args=[\'input_\', \'data\', \'message\', \'first_n\', \'summarize\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\'], "
- }
- member_method {
name: "py_func"
argspec: "args=[\'func\', \'inp\', \'Tout\', \'stateful\', \'name\'], varargs=None, keywords=None, defaults=[\'True\', \'None\'], "
}
@@ -2190,7 +2194,7 @@ tf_module {
}
member_method {
name: "while_loop"
- argspec: "args=[\'cond\', \'body\', \'loop_vars\', \'shape_invariants\', \'parallel_iterations\', \'back_prop\', \'swap_memory\', \'name\', \'maximum_iterations\'], varargs=None, keywords=None, defaults=[\'None\', \'10\', \'True\', \'False\', \'None\', \'None\'], "
+ argspec: "args=[\'cond\', \'body\', \'loop_vars\', \'shape_invariants\', \'parallel_iterations\', \'back_prop\', \'swap_memory\', \'name\', \'maximum_iterations\', \'return_same_structure\'], varargs=None, keywords=None, defaults=[\'None\', \'10\', \'True\', \'False\', \'None\', \'None\', \'False\'], "
}
member_method {
name: "write_file"
diff --git a/tensorflow/tools/api/golden/tensorflow.spectral.pbtxt b/tensorflow/tools/api/golden/tensorflow.spectral.pbtxt
index 4f306540cc..6a421ef12d 100644
--- a/tensorflow/tools/api/golden/tensorflow.spectral.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.spectral.pbtxt
@@ -17,6 +17,10 @@ tf_module {
argspec: "args=[\'input\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
}
member_method {
+ name: "idct"
+ argspec: "args=[\'input\', \'type\', \'n\', \'axis\', \'norm\', \'name\'], varargs=None, keywords=None, defaults=[\'2\', \'None\', \'-1\', \'None\', \'None\'], "
+ }
+ member_method {
name: "ifft"
argspec: "args=[\'input\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
}
diff --git a/tensorflow/tools/api/golden/tensorflow.variance_scaling_initializer.pbtxt b/tensorflow/tools/api/golden/tensorflow.variance_scaling_initializer.pbtxt
index a58398d645..09d7bc03b4 100644
--- a/tensorflow/tools/api/golden/tensorflow.variance_scaling_initializer.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.variance_scaling_initializer.pbtxt
@@ -5,7 +5,7 @@ tf_class {
is_instance: "<type \'object\'>"
member_method {
name: "__init__"
- argspec: "args=[\'self\', \'scale\', \'mode\', \'distribution\', \'seed\', \'dtype\'], varargs=None, keywords=None, defaults=[\'1.0\', \'fan_in\', \'normal\', \'None\', \"<dtype: \'float32\'>\"], "
+ argspec: "args=[\'self\', \'scale\', \'mode\', \'distribution\', \'seed\', \'dtype\'], varargs=None, keywords=None, defaults=[\'1.0\', \'fan_in\', \'truncated_normal\', \'None\', \"<dtype: \'float32\'>\"], "
}
member_method {
name: "from_config"
diff --git a/tensorflow/tools/api/lib/python_object_to_proto_visitor.py b/tensorflow/tools/api/lib/python_object_to_proto_visitor.py
index 1cf330e702..3a48cf683c 100644
--- a/tensorflow/tools/api/lib/python_object_to_proto_visitor.py
+++ b/tensorflow/tools/api/lib/python_object_to_proto_visitor.py
@@ -88,6 +88,9 @@ def _SanitizedMRO(obj):
"""
return_list = []
for cls in tf_inspect.getmro(obj):
+ if cls.__name__ == '_NewClass':
+ # Ignore class created by @deprecated_alias decorator.
+ continue
str_repr = str(cls)
return_list.append(str_repr)
if 'tensorflow' not in str_repr:
diff --git a/tensorflow/tools/api/tests/api_compatibility_test.py b/tensorflow/tools/api/tests/api_compatibility_test.py
index 90375a794f..d1b34fb242 100644
--- a/tensorflow/tools/api/tests/api_compatibility_test.py
+++ b/tensorflow/tools/api/tests/api_compatibility_test.py
@@ -34,6 +34,13 @@ import sys
import unittest
import tensorflow as tf
+# pylint: disable=g-import-not-at-top
+try:
+ from tensorflow.compat import v1 as tf_v1
+ # We import compat.v1 as tf_v1 instead.
+ del tf.compat.v1
+except ImportError:
+ tf_v1 = None
from google.protobuf import message
from google.protobuf import text_format
@@ -46,6 +53,7 @@ from tensorflow.tools.api.lib import api_objects_pb2
from tensorflow.tools.api.lib import python_object_to_proto_visitor
from tensorflow.tools.common import public_api
from tensorflow.tools.common import traverse
+# pylint: enable=g-import-not-at-top
# FLAGS defined at the bottom:
@@ -215,25 +223,19 @@ class ApiCompatibilityTest(test.TestCase):
visitor.do_not_descend_map['tf'].append('contrib')
traverse.traverse(tf, visitor)
- @unittest.skipUnless(
- sys.version_info.major == 2,
- 'API compabitility test goldens are generated using python2.')
- def testAPIBackwardsCompatibility(self):
- # Extract all API stuff.
+ def checkBackwardsCompatibility(self, root, golden_file_pattern):
+ # Extract all API stuff.
visitor = python_object_to_proto_visitor.PythonObjectToProtoVisitor()
public_api_visitor = public_api.PublicAPIVisitor(visitor)
public_api_visitor.do_not_descend_map['tf'].append('contrib')
public_api_visitor.do_not_descend_map['tf.GPUOptions'] = ['Experimental']
- traverse.traverse(tf, public_api_visitor)
+ traverse.traverse(root, public_api_visitor)
proto_dict = visitor.GetProtos()
# Read all golden files.
- expression = os.path.join(
- resource_loader.get_root_dir_with_all_resources(),
- _KeyToFilePath('*'))
- golden_file_list = file_io.get_matching_files(expression)
+ golden_file_list = file_io.get_matching_files(golden_file_pattern)
def _ReadFileToProto(filename):
"""Read a filename, create a protobuf from its contents."""
@@ -254,6 +256,26 @@ class ApiCompatibilityTest(test.TestCase):
verbose=FLAGS.verbose_diffs,
update_goldens=FLAGS.update_goldens)
+ @unittest.skipUnless(
+ sys.version_info.major == 2,
+ 'API compabitility test goldens are generated using python2.')
+ def testAPIBackwardsCompatibility(self):
+ golden_file_pattern = os.path.join(
+ resource_loader.get_root_dir_with_all_resources(),
+ _KeyToFilePath('*'))
+ self.checkBackwardsCompatibility(tf, golden_file_pattern)
+
+ @unittest.skipUnless(
+ sys.version_info.major == 2,
+ 'API compabitility test goldens are generated using python2.')
+ def testAPIBackwardsCompatibilityV1(self):
+ if not tf_v1:
+ return
+ golden_file_pattern = os.path.join(
+ resource_loader.get_root_dir_with_all_resources(),
+ _KeyToFilePath('*'))
+ self.checkBackwardsCompatibility(tf_v1, golden_file_pattern)
+
if __name__ == '__main__':
parser = argparse.ArgumentParser()
diff --git a/tensorflow/tools/ci_build/Dockerfile.cpu.ppc64le b/tensorflow/tools/ci_build/Dockerfile.cpu.ppc64le
index e879c34bbd..ada2c63880 100644
--- a/tensorflow/tools/ci_build/Dockerfile.cpu.ppc64le
+++ b/tensorflow/tools/ci_build/Dockerfile.cpu.ppc64le
@@ -7,7 +7,7 @@ COPY install/*.sh /install/
RUN /install/install_bootstrap_deb_packages.sh
RUN add-apt-repository -y ppa:openjdk-r/ppa
RUN /install/install_deb_packages.sh
-RUN apt-get update && apt-get install -y libopenblas-dev
+RUN /install/install_openblas_ppc64le.sh
RUN /install/install_hdf5_ppc64le.sh
RUN /install/install_pip_packages.sh
RUN /install/install_bazel_from_source.sh
diff --git a/tensorflow/tools/ci_build/Dockerfile.gpu.ppc64le b/tensorflow/tools/ci_build/Dockerfile.gpu.ppc64le
index 8967138747..a404f129ab 100644
--- a/tensorflow/tools/ci_build/Dockerfile.gpu.ppc64le
+++ b/tensorflow/tools/ci_build/Dockerfile.gpu.ppc64le
@@ -13,7 +13,7 @@ ARG DEBIAN_FRONTEND=noninteractive
RUN /install/install_bootstrap_deb_packages.sh
RUN add-apt-repository -y ppa:openjdk-r/ppa
RUN /install/install_deb_packages.sh
-RUN apt-get update && apt-get install -y libopenblas-dev
+RUN /install/install_openblas_ppc64le.sh
RUN /install/install_hdf5_ppc64le.sh
RUN /install/install_pip_packages.sh
RUN /install/install_bazel_from_source.sh
diff --git a/tensorflow/tools/ci_build/Dockerfile.rbe.cpu b/tensorflow/tools/ci_build/Dockerfile.rbe.cpu
index 3bc52b9ed6..7e5860aeec 100644
--- a/tensorflow/tools/ci_build/Dockerfile.rbe.cpu
+++ b/tensorflow/tools/ci_build/Dockerfile.rbe.cpu
@@ -1,4 +1,4 @@
-FROM launcher.gcr.io/google/rbe-debian8:r327695
+FROM launcher.gcr.io/google/rbe-ubuntu16-04:r327695
LABEL maintainer="Yu Yi <yiyu@google.com>"
# Copy install scripts
@@ -9,6 +9,6 @@ ENV CC /usr/local/bin/clang
ENV CXX /usr/local/bin/clang++
ENV AR /usr/bin/ar
-# Run pip install script for RBE Debian8 container.
+# Run pip install script for RBE Ubuntu 16-04 container.
RUN /install/install_pip_packages_remote.sh
RUN /install/install_pip_packages.sh
diff --git a/tensorflow/tools/ci_build/ci_parameterized_build.sh b/tensorflow/tools/ci_build/ci_parameterized_build.sh
index 300ba8ea0b..08e2c3edd2 100755
--- a/tensorflow/tools/ci_build/ci_parameterized_build.sh
+++ b/tensorflow/tools/ci_build/ci_parameterized_build.sh
@@ -59,6 +59,9 @@
# TF_BUILD_BAZEL_CLEAN:
# Will perform "bazel clean", if and only if this variable
# is set to any non-empty and non-0 value
+# TF_BAZEL_BUILD_ONLY:
+# If it is set to any non-empty value that is not "0", Bazel
+# will only build specified targets
# TF_GPU_COUNT:
# Run this many parallel tests for serial builds.
# For now, only can be edited for PIP builds.
@@ -128,7 +131,7 @@ BAZEL_CMD="bazel test"
BAZEL_BUILD_ONLY_CMD="bazel build"
BAZEL_CLEAN_CMD="bazel clean"
-DEFAULT_BAZEL_CONFIGS="--config=gcp --config=hdfs"
+DEFAULT_BAZEL_CONFIGS=""
PIP_CMD="${CI_BUILD_DIR}/builds/pip.sh"
PIP_TEST_TUTORIALS_FLAG="--test_tutorials"
@@ -410,6 +413,11 @@ fi
# this flag, and it only affects a few tests.
EXTRA_ARGS="${EXTRA_ARGS} --distinct_host_configuration=false"
+if [[ ! -z "${TF_BAZEL_BUILD_ONLY}" ]] &&
+ [[ "${TF_BAZEL_BUILD_ONLY}" != "0" ]];then
+ BAZEL_CMD=${BAZEL_BUILD_ONLY_CMD}
+fi
+
# Process PIP install-test option
if [[ ${TF_BUILD_IS_PIP} == "no_pip" ]] ||
[[ ${TF_BUILD_IS_PIP} == "both" ]]; then
diff --git a/tensorflow/tools/ci_build/ci_sanity.sh b/tensorflow/tools/ci_build/ci_sanity.sh
index 05676f9551..db37edf809 100755
--- a/tensorflow/tools/ci_build/ci_sanity.sh
+++ b/tensorflow/tools/ci_build/ci_sanity.sh
@@ -349,12 +349,12 @@ do_external_licenses_check(){
# Blacklist
echo ${MISSING_LICENSES_FILE}
- grep -e "@bazel_tools//third_party/" -e "@com_google_absl//absl" -e "@org_tensorflow//" -v ${MISSING_LICENSES_FILE} > temp.txt
+ grep -e "@bazel_tools//third_party/" -e "@com_google_absl//absl" -e "@org_tensorflow//" -e "@com_github_googlecloudplatform_google_cloud_cpp//google" -v ${MISSING_LICENSES_FILE} > temp.txt
mv temp.txt ${MISSING_LICENSES_FILE}
# Whitelist
echo ${EXTRA_LICENSE_FILE}
- grep -e "@bazel_tools//src" -e "@bazel_tools//tools/" -e "@com_google_absl//" -e "//external" -e "@local" -v ${EXTRA_LICENSES_FILE} > temp.txt
+ grep -e "@bazel_tools//src" -e "@bazel_tools//tools/" -e "@com_google_absl//" -e "//external" -e "@local" -e "@com_github_googlecloudplatform_google_cloud_cpp//" -v ${EXTRA_LICENSES_FILE} > temp.txt
mv temp.txt ${EXTRA_LICENSES_FILE}
@@ -543,7 +543,7 @@ SANITY_STEPS=("do_pylint PYTHON2" "do_pylint PYTHON3" "do_check_futures_test" "d
SANITY_STEPS_DESC=("Python 2 pylint" "Python 3 pylint" "Check that python files have certain __future__ imports" "buildifier check" "bazel nobuild" "pip: license check for external dependencies" "C library: license check for external dependencies" "Java Native Library: license check for external dependencies" "Pip Smoke Test: Checking py_test dependencies exist in pip package" "Check load py_test: Check that BUILD files with py_test target properly load py_test" "Code Link Check: Check there are no broken links" "Test entries in /tensorflow/contrib/cmake/python_{modules|protos|protos_cc}.txt for validity and consistency" "Check file names for cases")
INCREMENTAL_FLAG=""
-DEFAULT_BAZEL_CONFIGS="--config=hdfs --config=gcp"
+DEFAULT_BAZEL_CONFIGS=""
# Parse command-line arguments
BAZEL_FLAGS=${DEFAULT_BAZEL_CONFIGS}
diff --git a/tensorflow/tools/ci_build/gpu_build/parallel_gpu_execute.sh b/tensorflow/tools/ci_build/gpu_build/parallel_gpu_execute.sh
index d0816c92b7..75da9bb835 100755
--- a/tensorflow/tools/ci_build/gpu_build/parallel_gpu_execute.sh
+++ b/tensorflow/tools/ci_build/gpu_build/parallel_gpu_execute.sh
@@ -35,6 +35,30 @@ elif [[ ${BASH_VER_MAJOR} -eq 4 ]] && [[ ${BASH_VER_MINOR} -lt 2 ]]; then
exit 1
fi
+function is_absolute {
+ [[ "$1" = /* ]] || [[ "$1" =~ ^[a-zA-Z]:[/\\].* ]]
+}
+
+RUNFILES_MANIFEST_FILE="${TEST_SRCDIR}/MANIFEST"
+function rlocation() {
+ if is_absolute "$1" ; then
+ # If the file path is already fully specified, simply return it.
+ echo "$1"
+ elif [[ -e "$TEST_SRCDIR/$1" ]]; then
+ # If the file exists in the $TEST_SRCDIR then just use it.
+ echo "$TEST_SRCDIR/$1"
+ elif [[ -e "$RUNFILES_MANIFEST_FILE" ]]; then
+ # If a runfiles manifest file exists then use it.
+ echo "$(grep "^$1 " "$RUNFILES_MANIFEST_FILE" | sed 's/[^ ]* //')"
+ fi
+}
+
+TEST_BINARY="$(rlocation $TEST_WORKSPACE/${1#./})"
+shift
+
+# Make sure /var/lock exists, this may not be true under MSYS
+mkdir -p /var/lock
+
TF_GPU_COUNT=${TF_GPU_COUNT:-8}
for i in `seq 0 $((TF_GPU_COUNT-1))`; do
@@ -45,8 +69,8 @@ for i in `seq 0 $((TF_GPU_COUNT-1))`; do
# This export only works within the brackets, so it is isolated to one
# single command.
export CUDA_VISIBLE_DEVICES=$i
- echo "Running test $* on GPU $CUDA_VISIBLE_DEVICES"
- $@
+ echo "Running test $TEST_BINARY $* on GPU $CUDA_VISIBLE_DEVICES"
+ "$TEST_BINARY" $@
)
return_code=$?
flock -u "$lock_fd"
diff --git a/tensorflow/tools/ci_build/install/install_bazel.sh b/tensorflow/tools/ci_build/install/install_bazel.sh
index 3e27a94cf2..adbff8f6ef 100755
--- a/tensorflow/tools/ci_build/install/install_bazel.sh
+++ b/tensorflow/tools/ci_build/install/install_bazel.sh
@@ -15,7 +15,7 @@
# ==============================================================================
# Select bazel version.
-BAZEL_VERSION="0.11.0"
+BAZEL_VERSION="0.14.1"
set +e
local_bazel_ver=$(bazel version 2>&1 | grep -i label | awk '{print $3}')
diff --git a/tensorflow/tools/ci_build/install/install_bazel_from_source.sh b/tensorflow/tools/ci_build/install/install_bazel_from_source.sh
index ddad00c5f0..9d24b3e421 100755
--- a/tensorflow/tools/ci_build/install/install_bazel_from_source.sh
+++ b/tensorflow/tools/ci_build/install/install_bazel_from_source.sh
@@ -18,7 +18,7 @@
# It will compile bazel from source and install it in /usr/local/bin
# Select bazel version.
-BAZEL_VERSION="0.11.0"
+BAZEL_VERSION="0.14.1"
set +e
local_bazel_ver=$(bazel version 2>&1 | grep -i label | awk '{print $3}')
diff --git a/tensorflow/tools/ci_build/install/install_openblas_ppc64le.sh b/tensorflow/tools/ci_build/install/install_openblas_ppc64le.sh
new file mode 100755
index 0000000000..107cc61ff5
--- /dev/null
+++ b/tensorflow/tools/ci_build/install/install_openblas_ppc64le.sh
@@ -0,0 +1,29 @@
+#!/usr/bin/env bash
+# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+
+OPENBLAS_SRC_PATH=/tmp/openblas_src/
+POWER="POWER8"
+USE_OPENMP="USE_OPENMP=1"
+OPENBLAS_INSTALL_PATH="/usr"
+apt-get update
+apt-get install -y gfortran gfortran-5
+rm -rf ${OPENBLAS_SRC_PATH}
+git clone -b release-0.3.0 https://github.com/xianyi/OpenBLAS ${OPENBLAS_SRC_PATH}
+cd ${OPENBLAS_SRC_PATH}
+# Pick up fix for OpenBLAS issue 1571
+git cherry-pick -X theirs 961d25e9c7e4a1758adb1dbeaa15187de69dd052
+make TARGET=${POWER} ${USE_OPENMP} FC=gfortran
+make PREFIX=${OPENBLAS_INSTALL_PATH} install
diff --git a/tensorflow/tools/ci_build/linux/mkl/build-dev-container.sh b/tensorflow/tools/ci_build/linux/mkl/build-dev-container.sh
index bd16d580f5..ad22ebe4eb 100755
--- a/tensorflow/tools/ci_build/linux/mkl/build-dev-container.sh
+++ b/tensorflow/tools/ci_build/linux/mkl/build-dev-container.sh
@@ -25,9 +25,14 @@ function upsearch () {
# Set up WORKSPACE.
WORKSPACE="${WORKSPACE:-$(upsearch WORKSPACE)}"
-TF_DOCKER_BUILD_DEVEL_BRANCH="master"
-TF_DOCKER_BUILD_IMAGE_NAME="intel-mkl/tensorflow"
-TF_DOCKER_BUILD_VERSION="nightly"
+
+TF_DOCKER_BUILD_DEVEL_BRANCH=${TF_DOCKER_BUILD_DEVEL_BRANCH:-master}
+TF_DOCKER_BUILD_IMAGE_NAME=${TF_DOCKER_BUILD_IMAGE_NAME:-intel-mkl/tensorflow}
+TF_DOCKER_BUILD_VERSION=${TF_DOCKER_BUILD_VERSION:-nightly}
+
+echo "TF_DOCKER_BUILD_DEVEL_BRANCH=${TF_DOCKER_BUILD_DEVEL_BRANCH}"
+echo "TF_DOCKER_BUILD_IMAGE_NAME=${TF_DOCKER_BUILD_IMAGE_NAME}"
+echo "TF_DOCKER_BUILD_VERSION=${TF_DOCKER_BUILD_VERSION}"
# build the python 2 container and whl
TF_DOCKER_BUILD_TYPE="MKL" \
diff --git a/tensorflow/tools/ci_build/pi/build_raspberry_pi.sh b/tensorflow/tools/ci_build/pi/build_raspberry_pi.sh
index b8bce57c87..3d27e84b81 100755
--- a/tensorflow/tools/ci_build/pi/build_raspberry_pi.sh
+++ b/tensorflow/tools/ci_build/pi/build_raspberry_pi.sh
@@ -65,6 +65,10 @@ OPENBLAS_SRC_PATH=/tmp/openblas_src/
sudo rm -rf ${OPENBLAS_SRC_PATH}
git clone https://github.com/xianyi/OpenBLAS ${OPENBLAS_SRC_PATH}
cd ${OPENBLAS_SRC_PATH}
+# The commit after this introduced Fortran compile issues. In theory they should
+# be solvable using NOFORTRAN=1 on the make command, but my initial tries didn't
+# work, so pinning to the last know good version.
+git checkout 5a6a2bed9aff0ba8a18651d5514d029c8cae336a
# If this path is changed, you'll also need to update
# cxx_builtin_include_directory in third_party/toolchains/cpus/arm/CROSSTOOL.tpl
OPENBLAS_INSTALL_PATH=/tmp/openblas_install/
diff --git a/tensorflow/tools/ci_build/update_version.py b/tensorflow/tools/ci_build/update_version.py
index 642dde36a7..30c318a58f 100755
--- a/tensorflow/tools/ci_build/update_version.py
+++ b/tensorflow/tools/ci_build/update_version.py
@@ -248,16 +248,6 @@ def update_md_files(old_version, new_version):
replace_string_in_line(r"<version>%s<\/version>" % old_version,
"<version>%s</version>" % new_version, filepath)
- # Update any links to colab notebooks.
- def colab_url(version):
- version_string = "%s.%s.%s" % (version.major, version.minor, version.patch)
- prefix = "https://colab.research.google.com/github/tensorflow/models/blob/r"
- return prefix + version_string + "/"
-
- replace_string_in_line(
- colab_url(old_version), colab_url(new_version),
- "%s/docs_src/get_started/eager.md" % TF_SRC_DIR)
-
def major_minor_change(old_version, new_version):
"""Check if a major or minor change occurred."""
diff --git a/tensorflow/tools/ci_build/windows/bazel/bazel_test_lib.sh b/tensorflow/tools/ci_build/windows/bazel/bazel_test_lib.sh
index a3e07737a4..c03cbd9c66 100644
--- a/tensorflow/tools/ci_build/windows/bazel/bazel_test_lib.sh
+++ b/tensorflow/tools/ci_build/windows/bazel/bazel_test_lib.sh
@@ -23,17 +23,20 @@ function run_configure_for_gpu_build {
# Enable CUDA support
export TF_NEED_CUDA=1
- # TODO(pcloudy): Remove this after TensorFlow uses its own CRSOOTOOL
- # for GPU build on Windows
- export USE_MSVC_WRAPPER=1
-
yes "" | ./configure
}
-function set_gcs_remote_cache_options {
- echo "build --experimental_remote_spawn_cache" >> "${TMP_BAZELRC}"
+function set_remote_cache_options {
+ echo "build --remote_instance_name=projects/tensorflow-testing-cpu" >> "${TMP_BAZELRC}"
echo "build --experimental_remote_platform_override='properties:{name:\"build\" value:\"windows-x64\"}'" >> "${TMP_BAZELRC}"
- echo "build --remote_http_cache=https://storage.googleapis.com/$GCS_BUCKET_NAME" >> "${TMP_BAZELRC}"
+ echo "build --remote_cache=remotebuildexecution.googleapis.com" >> "${TMP_BAZELRC}"
+ echo "build --tls_enabled=true" >> "${TMP_BAZELRC}"
+ echo "build --remote_timeout=3600" >> "${TMP_BAZELRC}"
+ echo "build --auth_enabled=true" >> "${TMP_BAZELRC}"
+ echo "build --spawn_strategy=remote" >> "${TMP_BAZELRC}"
+ echo "build --strategy=Javac=remote" >> "${TMP_BAZELRC}"
+ echo "build --strategy=Closure=remote" >> "${TMP_BAZELRC}"
+ echo "build --genrule_strategy=remote" >> "${TMP_BAZELRC}"
echo "build --google_credentials=$GOOGLE_CLOUD_CREDENTIAL" >> "${TMP_BAZELRC}"
}
diff --git a/tensorflow/tools/ci_build/windows/bazel/common_env.sh b/tensorflow/tools/ci_build/windows/bazel/common_env.sh
index eefa8ee2d5..3af132217e 100644
--- a/tensorflow/tools/ci_build/windows/bazel/common_env.sh
+++ b/tensorflow/tools/ci_build/windows/bazel/common_env.sh
@@ -49,3 +49,15 @@ export PATH="/c/Program Files/Git/cmd:$PATH"
# Make sure we have pip in PATH
export PATH="/c/${PYTHON_BASE_PATH}/Scripts:$PATH"
+
+# Setting default values to CUDA related environment variables
+export TF_CUDA_VERSION=${TF_CUDA_VERSION:-9.0}
+export TF_CUDNN_VERSION=${TF_CUDNN_VERSION:-7.0}
+export TF_CUDA_COMPUTE_CAPABILITIES=${TF_CUDA_COMPUTE_CAPABILITIES:-3.7}
+export CUDA_TOOLKIT_PATH=${CUDA_TOOLKIT_PATH:-"C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v${TF_CUDA_VERSION}"}
+export CUDNN_INSTALL_PATH=${CUDNN_INSTALL_PATH:-"C:/tools/cuda"}
+
+# Add Cuda and Cudnn dll directories into PATH
+export PATH="$(cygpath -u "${CUDA_TOOLKIT_PATH}")/bin:$PATH"
+export PATH="$(cygpath -u "${CUDA_TOOLKIT_PATH}")/extras/CUPTI/libx64:$PATH"
+export PATH="$(cygpath -u "${CUDNN_INSTALL_PATH}")/bin:$PATH"
diff --git a/tensorflow/tools/ci_build/windows/cpu/pip/build_tf_windows.sh b/tensorflow/tools/ci_build/windows/cpu/pip/build_tf_windows.sh
index 5c305f7512..ed73401467 100644
--- a/tensorflow/tools/ci_build/windows/cpu/pip/build_tf_windows.sh
+++ b/tensorflow/tools/ci_build/windows/cpu/pip/build_tf_windows.sh
@@ -59,8 +59,8 @@ release_build=0
for ARG in "$@"; do
if [[ "$ARG" == --skip_test ]]; then
skip_test=1
- elif [[ "$ARG" == --enable_gcs_remote_cache ]]; then
- set_gcs_remote_cache_options
+ elif [[ "$ARG" == --enable_remote_cache ]]; then
+ set_remote_cache_options
elif [[ "$ARG" == --release_build ]]; then
release_build=1
fi
diff --git a/tensorflow/tools/ci_build/windows/gpu/pip/build_tf_windows.sh b/tensorflow/tools/ci_build/windows/gpu/pip/build_tf_windows.sh
index 922bb67bbf..36b2142d95 100644
--- a/tensorflow/tools/ci_build/windows/gpu/pip/build_tf_windows.sh
+++ b/tensorflow/tools/ci_build/windows/gpu/pip/build_tf_windows.sh
@@ -42,9 +42,58 @@ source "tensorflow/tools/ci_build/windows/bazel/common_env.sh" \
source "tensorflow/tools/ci_build/windows/bazel/bazel_test_lib.sh" \
|| { echo "Failed to source bazel_test_lib.sh" >&2; exit 1; }
+# Recreate an empty bazelrc file under source root
+export TMP_BAZELRC=.tmp.bazelrc
+rm -f "${TMP_BAZELRC}"
+touch "${TMP_BAZELRC}"
+
+function cleanup {
+ # Remove all options in .tmp.bazelrc
+ echo "" > "${TMP_BAZELRC}"
+}
+trap cleanup EXIT
+
+skip_test=0
+release_build=0
+
+for ARG in "$@"; do
+ if [[ "$ARG" == --skip_test ]]; then
+ skip_test=1
+ elif [[ "$ARG" == --enable_remote_cache ]]; then
+ set_remote_cache_options
+ elif [[ "$ARG" == --release_build ]]; then
+ release_build=1
+ fi
+done
+
+if [[ "$release_build" != 1 ]]; then
+ # --define=override_eigen_strong_inline=true speeds up the compiling of conv_grad_ops_3d.cc and conv_ops_3d.cc
+ # by 20 minutes. See https://github.com/tensorflow/tensorflow/issues/10521
+ # Because this hurts the performance of TF, we don't enable it in release build.
+ echo "build --define=override_eigen_strong_inline=true" >> "${TMP_BAZELRC}"
+fi
+
+# The host and target platforms are the same in Windows build. So we don't have
+# to distinct them. This helps avoid building the same targets twice.
+echo "build --distinct_host_configuration=false" >> "${TMP_BAZELRC}"
+
+# Enable short object file path to avoid long path issue on Windows.
+echo "startup --output_user_root=${TMPDIR}" >> "${TMP_BAZELRC}"
+
+# Disable nvcc warnings to reduce log file size.
+echo "build --copt=-nvcc_options=disable-warnings" >> "${TMP_BAZELRC}"
+
+if ! grep -q "import %workspace%/${TMP_BAZELRC}" .bazelrc; then
+ echo "import %workspace%/${TMP_BAZELRC}" >> .bazelrc
+fi
+
run_configure_for_gpu_build
-bazel build -c opt tensorflow/tools/pip_package:build_pip_package || exit $?
+bazel build --announce_rc --config=opt tensorflow/tools/pip_package:build_pip_package || exit $?
+
+if [[ "$skip_test" == 1 ]]; then
+ exit 0
+fi
# Create a python test directory to avoid package name conflict
PY_TEST_DIR="py_test_dir"
@@ -56,11 +105,18 @@ create_python_test_dir "${PY_TEST_DIR}"
PIP_NAME=$(ls ${PY_TEST_DIR}/tensorflow-*.whl)
reinstall_tensorflow_pip ${PIP_NAME}
+TF_GPU_COUNT=${TF_GPU_COUNT:-8}
+
# Define no_tensorflow_py_deps=true so that every py_test has no deps anymore,
# which will result testing system installed tensorflow
# GPU tests are very flaky when running concurrently, so set local_test_jobs=1
-bazel test -c opt -k --test_output=errors \
+bazel test --announce_rc --config=opt -k --test_output=errors \
+ --test_env=TF_GPU_COUNT \
+ --run_under=//tensorflow/tools/ci_build/gpu_build:parallel_gpu_execute \
--define=no_tensorflow_py_deps=true --test_lang_filters=py \
- --test_tag_filters=-no_pip,-no_windows,-no_windows_gpu,-no_gpu,-no_pip_gpu,no_oss \
- --build_tag_filters=-no_pip,-no_windows,-no_windows_gpu,-no_gpu,-no_pip_gpu,no_oss \
- --local_test_jobs=1 --build_tests_only //${PY_TEST_DIR}/tensorflow/python/...
+ --test_tag_filters=-no_pip,-no_windows,-no_windows_gpu,-no_gpu,-no_pip_gpu,-no_oss \
+ --build_tag_filters=-no_pip,-no_windows,-no_windows_gpu,-no_gpu,-no_pip_gpu,-no_oss --build_tests_only \
+ --local_test_jobs=$TF_GPU_COUNT --test_timeout="300,450,1200,3600" \
+ --flaky_test_attempts=3 \
+ //${PY_TEST_DIR}/tensorflow/python/... \
+ //${PY_TEST_DIR}/tensorflow/contrib/...
diff --git a/tensorflow/tools/compatibility/ast_edits.py b/tensorflow/tools/compatibility/ast_edits.py
new file mode 100644
index 0000000000..23cc4a21a9
--- /dev/null
+++ b/tensorflow/tools/compatibility/ast_edits.py
@@ -0,0 +1,502 @@
+# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""Upgrader for Python scripts according to an API change specification."""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import ast
+import collections
+import os
+import shutil
+import sys
+import tempfile
+import traceback
+
+
+class APIChangeSpec(object):
+ """This class defines the transformations that need to happen.
+
+ This class must provide the following fields:
+
+ * `function_keyword_renames`: maps function names to a map of old -> new
+ argument names
+ * `function_renames`: maps function names to new function names
+ * `change_to_function`: a set of function names that have changed (for
+ notifications)
+ * `function_reorders`: maps functions whose argument order has changed to the
+ list of arguments in the new order
+ * `function_handle`: maps function names to custom handlers for the function
+
+ For an example, see `TFAPIChangeSpec`.
+ """
+
+
+class _FileEditTuple(
+ collections.namedtuple("_FileEditTuple",
+ ["comment", "line", "start", "old", "new"])):
+ """Each edit that is recorded by a _FileEditRecorder.
+
+ Fields:
+ comment: A description of the edit and why it was made.
+ line: The line number in the file where the edit occurs (1-indexed).
+ start: The line number in the file where the edit occurs (0-indexed).
+ old: text string to remove (this must match what was in file).
+ new: text string to add in place of `old`.
+ """
+
+ __slots__ = ()
+
+
+class _FileEditRecorder(object):
+ """Record changes that need to be done to the file."""
+
+ def __init__(self, filename):
+ # all edits are lists of chars
+ self._filename = filename
+
+ self._line_to_edit = collections.defaultdict(list)
+ self._errors = []
+
+ def process(self, text):
+ """Process a list of strings, each corresponding to the recorded changes.
+
+ Args:
+ text: A list of lines of text (assumed to contain newlines)
+ Returns:
+ A tuple of the modified text and a textual description of what is done.
+ Raises:
+ ValueError: if substitution source location does not have expected text.
+ """
+
+ change_report = ""
+
+ # Iterate of each line
+ for line, edits in self._line_to_edit.items():
+ offset = 0
+ # sort by column so that edits are processed in order in order to make
+ # indexing adjustments cumulative for changes that change the string
+ # length
+ edits.sort(key=lambda x: x.start)
+
+ # Extract each line to a list of characters, because mutable lists
+ # are editable, unlike immutable strings.
+ char_array = list(text[line - 1])
+
+ # Record a description of the change
+ change_report += "%r Line %d\n" % (self._filename, line)
+ change_report += "-" * 80 + "\n\n"
+ for e in edits:
+ change_report += "%s\n" % e.comment
+ change_report += "\n Old: %s" % (text[line - 1])
+
+ # Make underscore buffers for underlining where in the line the edit was
+ change_list = [" "] * len(text[line - 1])
+ change_list_new = [" "] * len(text[line - 1])
+
+ # Iterate for each edit
+ for e in edits:
+ # Create effective start, end by accounting for change in length due
+ # to previous edits
+ start_eff = e.start + offset
+ end_eff = start_eff + len(e.old)
+
+ # Make sure the edit is changing what it should be changing
+ old_actual = "".join(char_array[start_eff:end_eff])
+ if old_actual != e.old:
+ raise ValueError("Expected text %r but got %r" %
+ ("".join(e.old), "".join(old_actual)))
+ # Make the edit
+ char_array[start_eff:end_eff] = list(e.new)
+
+ # Create the underline highlighting of the before and after
+ change_list[e.start:e.start + len(e.old)] = "~" * len(e.old)
+ change_list_new[start_eff:end_eff] = "~" * len(e.new)
+
+ # Keep track of how to generate effective ranges
+ offset += len(e.new) - len(e.old)
+
+ # Finish the report comment
+ change_report += " %s\n" % "".join(change_list)
+ text[line - 1] = "".join(char_array)
+ change_report += " New: %s" % (text[line - 1])
+ change_report += " %s\n\n" % "".join(change_list_new)
+ return "".join(text), change_report, self._errors
+
+ def add(self, comment, line, start, old, new, error=None):
+ """Add a new change that is needed.
+
+ Args:
+ comment: A description of what was changed
+ line: Line number (1 indexed)
+ start: Column offset (0 indexed)
+ old: old text
+ new: new text
+ error: this "edit" is something that cannot be fixed automatically
+ Returns:
+ None
+ """
+
+ self._line_to_edit[line].append(
+ _FileEditTuple(comment, line, start, old, new))
+ if error:
+ self._errors.append("%s:%d: %s" % (self._filename, line, error))
+
+
+class _ASTCallVisitor(ast.NodeVisitor):
+ """AST Visitor that processes function calls.
+
+ Updates function calls from old API version to new API version using a given
+ change spec.
+ """
+
+ def __init__(self, filename, lines, api_change_spec):
+ self._filename = filename
+ self._file_edit = _FileEditRecorder(filename)
+ self._lines = lines
+ self._api_change_spec = api_change_spec
+
+ def process(self, lines):
+ return self._file_edit.process(lines)
+
+ def generic_visit(self, node):
+ ast.NodeVisitor.generic_visit(self, node)
+
+ def _rename_functions(self, node, full_name):
+ function_renames = self._api_change_spec.function_renames
+ try:
+ new_name = function_renames[full_name]
+ self._file_edit.add("Renamed function %r to %r" % (full_name, new_name),
+ node.lineno, node.col_offset, full_name, new_name)
+ except KeyError:
+ pass
+
+ def _get_attribute_full_path(self, node):
+ """Traverse an attribute to generate a full name e.g. tf.foo.bar.
+
+ Args:
+ node: A Node of type Attribute.
+
+ Returns:
+ a '.'-delimited full-name or None if the tree was not a simple form.
+ i.e. `foo()+b).bar` returns None, while `a.b.c` would return "a.b.c".
+ """
+ curr = node
+ items = []
+ while not isinstance(curr, ast.Name):
+ if not isinstance(curr, ast.Attribute):
+ return None
+ items.append(curr.attr)
+ curr = curr.value
+ items.append(curr.id)
+ return ".".join(reversed(items))
+
+ def _find_true_position(self, node):
+ """Return correct line number and column offset for a given node.
+
+ This is necessary mainly because ListComp's location reporting reports
+ the next token after the list comprehension list opening.
+
+ Args:
+ node: Node for which we wish to know the lineno and col_offset
+ """
+ import re
+ find_open = re.compile("^\s*(\\[).*$")
+ find_string_chars = re.compile("['\"]")
+
+ if isinstance(node, ast.ListComp):
+ # Strangely, ast.ListComp returns the col_offset of the first token
+ # after the '[' token which appears to be a bug. Workaround by
+ # explicitly finding the real start of the list comprehension.
+ line = node.lineno
+ col = node.col_offset
+ # loop over lines
+ while 1:
+ # Reverse the text to and regular expression search for whitespace
+ text = self._lines[line - 1]
+ reversed_preceding_text = text[:col][::-1]
+ # First find if a [ can be found with only whitespace between it and
+ # col.
+ m = find_open.match(reversed_preceding_text)
+ if m:
+ new_col_offset = col - m.start(1) - 1
+ return line, new_col_offset
+ else:
+ if (reversed_preceding_text == "" or
+ reversed_preceding_text.isspace()):
+ line = line - 1
+ prev_line = self._lines[line - 1]
+ # TODO(aselle):
+ # this is poor comment detection, but it is good enough for
+ # cases where the comment does not contain string literal starting/
+ # ending characters. If ast gave us start and end locations of the
+ # ast nodes rather than just start, we could use string literal
+ # node ranges to filter out spurious #'s that appear in string
+ # literals.
+ comment_start = prev_line.find("#")
+ if comment_start == -1:
+ col = len(prev_line) - 1
+ elif find_string_chars.search(prev_line[comment_start:]) is None:
+ col = comment_start
+ else:
+ return None, None
+ else:
+ return None, None
+ # Most other nodes return proper locations (with notably does not), but
+ # it is not possible to use that in an argument.
+ return node.lineno, node.col_offset
+
+ def visit_Call(self, node): # pylint: disable=invalid-name
+ """Handle visiting a call node in the AST.
+
+ Args:
+ node: Current Node
+ """
+
+ # Find a simple attribute name path e.g. "tf.foo.bar"
+ full_name = self._get_attribute_full_path(node.func)
+
+ # Make sure the func is marked as being part of a call
+ node.func.is_function_for_call = True
+
+ if full_name:
+ # Call special handlers
+ function_handles = self._api_change_spec.function_handle
+ if full_name in function_handles:
+ function_handles[full_name](self._file_edit, node)
+
+ # Examine any non-keyword argument and make it into a keyword argument
+ # if reordering required.
+ function_reorders = self._api_change_spec.function_reorders
+ function_keyword_renames = (
+ self._api_change_spec.function_keyword_renames)
+
+ if full_name in function_reorders:
+ reordered = function_reorders[full_name]
+ for idx, arg in enumerate(node.args):
+ lineno, col_offset = self._find_true_position(arg)
+ if lineno is None or col_offset is None:
+ self._file_edit.add(
+ "Failed to add keyword %r to reordered function %r" %
+ (reordered[idx], full_name),
+ arg.lineno,
+ arg.col_offset,
+ "",
+ "",
+ error="A necessary keyword argument failed to be inserted.")
+ else:
+ keyword_arg = reordered[idx]
+ if (full_name in function_keyword_renames and
+ keyword_arg in function_keyword_renames[full_name]):
+ keyword_arg = function_keyword_renames[full_name][keyword_arg]
+ self._file_edit.add("Added keyword %r to reordered function %r" %
+ (reordered[idx], full_name), lineno, col_offset,
+ "", keyword_arg + "=")
+
+ # Examine each keyword argument and convert it to the final renamed form
+ renamed_keywords = ({} if full_name not in function_keyword_renames else
+ function_keyword_renames[full_name])
+ for keyword in node.keywords:
+ argkey = keyword.arg
+ argval = keyword.value
+
+ if argkey in renamed_keywords:
+ argval_lineno, argval_col_offset = self._find_true_position(argval)
+ if argval_lineno is not None and argval_col_offset is not None:
+ # TODO(aselle): We should scan backward to find the start of the
+ # keyword key. Unfortunately ast does not give you the location of
+ # keyword keys, so we are forced to infer it from the keyword arg
+ # value.
+ key_start = argval_col_offset - len(argkey) - 1
+ key_end = key_start + len(argkey) + 1
+ if (self._lines[argval_lineno - 1][key_start:key_end] == argkey +
+ "="):
+ self._file_edit.add("Renamed keyword argument from %r to %r" %
+ (argkey,
+ renamed_keywords[argkey]), argval_lineno,
+ argval_col_offset - len(argkey) - 1,
+ argkey + "=", renamed_keywords[argkey] + "=")
+ continue
+ self._file_edit.add(
+ "Failed to rename keyword argument from %r to %r" %
+ (argkey, renamed_keywords[argkey]),
+ argval.lineno,
+ argval.col_offset - len(argkey) - 1,
+ "",
+ "",
+ error="Failed to find keyword lexographically. Fix manually.")
+
+ ast.NodeVisitor.generic_visit(self, node)
+
+ def visit_Attribute(self, node): # pylint: disable=invalid-name
+ """Handle bare Attributes i.e. [tf.foo, tf.bar].
+
+ Args:
+ node: Node that is of type ast.Attribute
+ """
+ full_name = self._get_attribute_full_path(node)
+ if full_name:
+ self._rename_functions(node, full_name)
+ if full_name in self._api_change_spec.change_to_function:
+ if not hasattr(node, "is_function_for_call"):
+ new_text = full_name + "()"
+ self._file_edit.add("Changed %r to %r" % (full_name, new_text),
+ node.lineno, node.col_offset, full_name, new_text)
+
+ ast.NodeVisitor.generic_visit(self, node)
+
+
+class ASTCodeUpgrader(object):
+ """Handles upgrading a set of Python files using a given API change spec."""
+
+ def __init__(self, api_change_spec):
+ if not isinstance(api_change_spec, APIChangeSpec):
+ raise TypeError("Must pass APIChangeSpec to ASTCodeUpgrader, got %s" %
+ type(api_change_spec))
+ self._api_change_spec = api_change_spec
+
+ def process_file(self, in_filename, out_filename):
+ """Process the given python file for incompatible changes.
+
+ Args:
+ in_filename: filename to parse
+ out_filename: output file to write to
+ Returns:
+ A tuple representing number of files processed, log of actions, errors
+ """
+
+ # Write to a temporary file, just in case we are doing an implace modify.
+ with open(in_filename, "r") as in_file, \
+ tempfile.NamedTemporaryFile("w", delete=False) as temp_file:
+ ret = self.process_opened_file(in_filename, in_file, out_filename,
+ temp_file)
+
+ shutil.move(temp_file.name, out_filename)
+ return ret
+
+ # Broad exceptions are required here because ast throws whatever it wants.
+ # pylint: disable=broad-except
+ def process_opened_file(self, in_filename, in_file, out_filename, out_file):
+ """Process the given python file for incompatible changes.
+
+ This function is split out to facilitate StringIO testing from
+ tf_upgrade_test.py.
+
+ Args:
+ in_filename: filename to parse
+ in_file: opened file (or StringIO)
+ out_filename: output file to write to
+ out_file: opened file (or StringIO)
+ Returns:
+ A tuple representing number of files processed, log of actions, errors
+ """
+ process_errors = []
+ text = "-" * 80 + "\n"
+ text += "Processing file %r\n outputting to %r\n" % (in_filename,
+ out_filename)
+ text += "-" * 80 + "\n\n"
+
+ parsed_ast = None
+ lines = in_file.readlines()
+ try:
+ parsed_ast = ast.parse("".join(lines))
+ except Exception:
+ text += "Failed to parse %r\n\n" % in_filename
+ text += traceback.format_exc()
+ if parsed_ast:
+ visitor = _ASTCallVisitor(in_filename, lines, self._api_change_spec)
+ visitor.visit(parsed_ast)
+ out_text, new_text, process_errors = visitor.process(lines)
+ text += new_text
+ if out_file:
+ out_file.write(out_text)
+ text += "\n"
+ return 1, text, process_errors
+
+ # pylint: enable=broad-except
+
+ def process_tree(self, root_directory, output_root_directory,
+ copy_other_files):
+ """Processes upgrades on an entire tree of python files in place.
+
+ Note that only Python files. If you have custom code in other languages,
+ you will need to manually upgrade those.
+
+ Args:
+ root_directory: Directory to walk and process.
+ output_root_directory: Directory to use as base.
+ copy_other_files: Copy files that are not touched by this converter.
+
+ Returns:
+ A tuple of files processed, the report string ofr all files, and errors
+ """
+
+ # make sure output directory doesn't exist
+ if output_root_directory and os.path.exists(output_root_directory):
+ print("Output directory %r must not already exist." %
+ (output_root_directory))
+ sys.exit(1)
+
+ # make sure output directory does not overlap with root_directory
+ norm_root = os.path.split(os.path.normpath(root_directory))
+ norm_output = os.path.split(os.path.normpath(output_root_directory))
+ if norm_root == norm_output:
+ print("Output directory %r same as input directory %r" %
+ (root_directory, output_root_directory))
+ sys.exit(1)
+
+ # Collect list of files to process (we do this to correctly handle if the
+ # user puts the output directory in some sub directory of the input dir)
+ files_to_process = []
+ files_to_copy = []
+ for dir_name, _, file_list in os.walk(root_directory):
+ py_files = [f for f in file_list if f.endswith(".py")]
+ copy_files = [f for f in file_list if not f.endswith(".py")]
+ for filename in py_files:
+ fullpath = os.path.join(dir_name, filename)
+ fullpath_output = os.path.join(output_root_directory,
+ os.path.relpath(fullpath,
+ root_directory))
+ files_to_process.append((fullpath, fullpath_output))
+ if copy_other_files:
+ for filename in copy_files:
+ fullpath = os.path.join(dir_name, filename)
+ fullpath_output = os.path.join(output_root_directory,
+ os.path.relpath(
+ fullpath, root_directory))
+ files_to_copy.append((fullpath, fullpath_output))
+
+ file_count = 0
+ tree_errors = []
+ report = ""
+ report += ("=" * 80) + "\n"
+ report += "Input tree: %r\n" % root_directory
+ report += ("=" * 80) + "\n"
+
+ for input_path, output_path in files_to_process:
+ output_directory = os.path.dirname(output_path)
+ if not os.path.isdir(output_directory):
+ os.makedirs(output_directory)
+ file_count += 1
+ _, l_report, l_errors = self.process_file(input_path, output_path)
+ tree_errors += l_errors
+ report += l_report
+ for input_path, output_path in files_to_copy:
+ output_directory = os.path.dirname(output_path)
+ if not os.path.isdir(output_directory):
+ os.makedirs(output_directory)
+ shutil.copy(input_path, output_path)
+ return file_count, report, tree_errors
diff --git a/tensorflow/tools/docker/Dockerfile.devel b/tensorflow/tools/docker/Dockerfile.devel
index 57a491255e..fd94d64268 100644
--- a/tensorflow/tools/docker/Dockerfile.devel
+++ b/tensorflow/tools/docker/Dockerfile.devel
@@ -63,7 +63,7 @@ RUN echo "startup --batch" >>/etc/bazel.bazelrc
RUN echo "build --spawn_strategy=standalone --genrule_strategy=standalone" \
>>/etc/bazel.bazelrc
# Install the most recent bazel release.
-ENV BAZEL_VERSION 0.11.0
+ENV BAZEL_VERSION 0.14.1
WORKDIR /
RUN mkdir /bazel && \
cd /bazel && \
diff --git a/tensorflow/tools/docker/Dockerfile.devel-cpu-mkl b/tensorflow/tools/docker/Dockerfile.devel-cpu-mkl
new file mode 100644
index 0000000000..6796ad70e5
--- /dev/null
+++ b/tensorflow/tools/docker/Dockerfile.devel-cpu-mkl
@@ -0,0 +1,83 @@
+FROM tensorflow/tensorflow:latest-devel
+
+LABEL maintainer="Clayne Robison<clayne.b.robison@intel.com>"
+
+# These arguments are parameterized. Use --build-args to override.
+ARG TF_BRANCH=r1.9
+ARG WHL_DIR=/whl
+
+RUN apt-get update && apt-get install -y --no-install-recommends \
+ golang \
+ vim \
+ emacs \
+ && \
+ apt-get clean && \
+ rm -rf /var/lib/apt/lists/*
+
+RUN pip --no-cache-dir install --upgrade \
+ pip setuptools
+
+RUN pip --no-cache-dir install wheel
+
+# Download and build TensorFlow.
+WORKDIR /
+RUN rm -rf tensorflow && \
+ git clone https://github.com/tensorflow/tensorflow.git && \
+ cd tensorflow && \
+ git checkout ${TF_BRANCH}
+WORKDIR /tensorflow
+
+# Configure the build for CPU with MKL by accepting default build options and
+# setting library locations
+ENV CI_BUILD_PYTHON=python \
+ LD_LIBRARY_PATH=${LD_LIBRARY_PATH} \
+ PYTHON_BIN_PATH=/usr/bin/python \
+ PYTHON_LIB_PATH=/usr/local/lib/python2.7/dist-packages \
+ CC_OPT_FLAGS='-march=native' \
+ TF_NEED_JEMALLOC=0 \
+ TF_NEED_GCP=1 \
+ TF_NEED_CUDA=0 \
+ TF_NEED_HDFS=0 \
+ TF_NEED_S3=1 \
+ TF_NEED_OPENCL=0 \
+ TF_NEED_GDR=0 \
+ TF_ENABLE_XLA=0 \
+ TF_NEED_VERBS=0 \
+ TF_NEED_MPI=0
+RUN ./configure
+
+# Build and Install TensorFlow.
+# The 'mkl' option builds with Intel(R) Math Kernel Library (MKL), which detects
+# the platform it is currently running on and takes appropriately optimized
+# paths. The -march=native option is for code that is not in MKL, and assumes
+# this container will be run on the same architecture on which it is built.
+RUN LD_LIBRARY_PATH=${LD_LIBRARY_PATH} \
+ bazel build --config=mkl \
+ --config="opt" \
+ --copt="-march=broadwell" \
+ --copt="-O3" \
+ //tensorflow/tools/pip_package:build_pip_package && \
+ mkdir ${WHL_DIR} && \
+ bazel-bin/tensorflow/tools/pip_package/build_pip_package ${WHL_DIR}
+
+# Clean up Bazel cache when done, but leave the whl.
+# This will upgrade the default Tensorflow version with the Intel MKL version
+RUN pip --no-cache-dir install --upgrade ${WHL_DIR}/tensorflow-*.whl && \
+ rm -rf /root/.cache
+
+WORKDIR /root
+
+#add welcome message with instructions
+
+RUN echo '[ ! -z "$TERM" -a -r /etc/motd ] && cat /etc/issue && cat /etc/motd' \
+ >> /etc/bash.bashrc \
+ ; echo "\
+||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||\n\
+| \n\
+| Docker container running Ubuntu \n\
+| with TensorFlow ${TF_BRANCH} optimized for CPU \n\
+| with Intel(R) MKL \n\
+| \n\
+||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||\n\
+\n "\
+ > /etc/motd
diff --git a/tensorflow/tools/docker/Dockerfile.devel-gpu b/tensorflow/tools/docker/Dockerfile.devel-gpu
index 204b5b4dba..44120bf274 100644
--- a/tensorflow/tools/docker/Dockerfile.devel-gpu
+++ b/tensorflow/tools/docker/Dockerfile.devel-gpu
@@ -15,6 +15,8 @@ RUN apt-get update && apt-get install -y --no-install-recommends \
git \
libcudnn7=7.1.4.18-1+cuda9.0 \
libcudnn7-dev=7.1.4.18-1+cuda9.0 \
+ libnccl2=2.2.13-1+cuda9.0 \
+ libnccl-dev=2.2.13-1+cuda9.0 \
libcurl3-dev \
libfreetype6-dev \
libhdf5-serial-dev \
@@ -33,6 +35,15 @@ RUN apt-get update && apt-get install -y --no-install-recommends \
find /usr/local/cuda-9.0/lib64/ -type f -name 'lib*_static.a' -not -name 'libcudart_static.a' -delete && \
rm /usr/lib/x86_64-linux-gnu/libcudnn_static_v7.a
+# Link NCCL libray and header where the build script expects them.
+RUN mkdir /usr/local/cuda-9.0/lib && \
+ ln -s /usr/lib/x86_64-linux-gnu/libnccl.so.2 /usr/local/cuda/lib/libnccl.so.2 && \
+ ln -s /usr/include/nccl.h /usr/local/cuda/include/nccl.h
+
+# TODO(tobyboyd): Remove after license is excluded from BUILD file.
+RUN gunzip /usr/share/doc/libnccl2/NCCL-SLA.txt.gz && \
+ cp /usr/share/doc/libnccl2/NCCL-SLA.txt /usr/local/cuda/
+
RUN curl -fSsL -O https://bootstrap.pypa.io/get-pip.py && \
python get-pip.py && \
rm get-pip.py
@@ -72,7 +83,7 @@ RUN echo "startup --batch" >>/etc/bazel.bazelrc
RUN echo "build --spawn_strategy=standalone --genrule_strategy=standalone" \
>>/etc/bazel.bazelrc
# Install the most recent bazel release.
-ENV BAZEL_VERSION 0.11.0
+ENV BAZEL_VERSION 0.14.1
WORKDIR /
RUN mkdir /bazel && \
cd /bazel && \
@@ -91,10 +102,13 @@ RUN git clone --branch=r1.9 --depth=1 https://github.com/tensorflow/tensorflow.g
ENV CI_BUILD_PYTHON python
ENV LD_LIBRARY_PATH /usr/local/cuda/extras/CUPTI/lib64:$LD_LIBRARY_PATH
ENV TF_NEED_CUDA 1
-ENV TF_CUDA_COMPUTE_CAPABILITIES=3.0,3.5,5.2,6.0,6.1
+ENV TF_CUDA_COMPUTE_CAPABILITIES=3.5,5.2,6.0,6.1,7.0
ENV TF_CUDA_VERSION=9.0
ENV TF_CUDNN_VERSION=7
+# NCCL 2.x
+ENV TF_NCCL_VERSION=2
+
RUN ln -s /usr/local/cuda/lib64/stubs/libcuda.so /usr/local/cuda/lib64/stubs/libcuda.so.1 && \
LD_LIBRARY_PATH=/usr/local/cuda/lib64/stubs:${LD_LIBRARY_PATH} \
tensorflow/tools/ci_build/builds/configured GPU \
diff --git a/tensorflow/tools/docker/Dockerfile.devel-gpu-cuda9-cudnn7 b/tensorflow/tools/docker/Dockerfile.devel-gpu-cuda9-cudnn7
new file mode 100644
index 0000000000..3bedc8cf34
--- /dev/null
+++ b/tensorflow/tools/docker/Dockerfile.devel-gpu-cuda9-cudnn7
@@ -0,0 +1,115 @@
+FROM nvidia/cuda:9.0-cudnn7-devel-ubuntu16.04
+
+LABEL maintainer="Gunhan Gulsoy <gunan@google.com>"
+
+# It is possible to override these for releases.
+ARG TF_BRANCH=master
+ARG BAZEL_VERSION=0.5.4
+ARG TF_AVAILABLE_CPUS=32
+
+RUN apt-get update && apt-get install -y --no-install-recommends \
+ build-essential \
+ curl \
+ git \
+ golang \
+ libcurl3-dev \
+ libfreetype6-dev \
+ libpng12-dev \
+ libzmq3-dev \
+ pkg-config \
+ python-dev \
+ python-pip \
+ rsync \
+ software-properties-common \
+ unzip \
+ zip \
+ zlib1g-dev \
+ openjdk-8-jdk \
+ openjdk-8-jre-headless \
+ wget \
+ && \
+ apt-get clean && \
+ rm -rf /var/lib/apt/lists/*
+
+RUN pip --no-cache-dir install --upgrade \
+ pip setuptools
+
+RUN pip --no-cache-dir install \
+ ipykernel \
+ jupyter \
+ matplotlib \
+ numpy \
+ scipy \
+ sklearn \
+ pandas \
+ wheel \
+ && \
+ python -m ipykernel.kernelspec
+
+# Set up our notebook config.
+COPY jupyter_notebook_config.py /root/.jupyter/
+
+# Jupyter has issues with being run directly:
+# https://github.com/ipython/ipython/issues/7062
+# We just add a little wrapper script.
+COPY run_jupyter.sh /
+
+# Set up Bazel.
+
+# Running bazel inside a `docker build` command causes trouble, cf:
+# https://github.com/bazelbuild/bazel/issues/134
+# The easiest solution is to set up a bazelrc file forcing --batch.
+RUN echo "startup --batch" >>/etc/bazel.bazelrc
+# Similarly, we need to workaround sandboxing issues:
+# https://github.com/bazelbuild/bazel/issues/418
+RUN echo "build --spawn_strategy=standalone --genrule_strategy=standalone" \
+ >>/etc/bazel.bazelrc
+WORKDIR /
+RUN mkdir /bazel && \
+ cd /bazel && \
+ wget --quiet https://github.com/bazelbuild/bazel/releases/download/$BAZEL_VERSION/bazel-$BAZEL_VERSION-installer-linux-x86_64.sh && \
+ wget --quiet https://raw.githubusercontent.com/bazelbuild/bazel/master/LICENSE && \
+ chmod +x bazel-*.sh && \
+ ./bazel-$BAZEL_VERSION-installer-linux-x86_64.sh && \
+ rm -f /bazel/bazel-$BAZEL_VERSION-installer-linux-x86_64.sh
+
+# Download and build TensorFlow.
+WORKDIR /
+RUN git clone https://github.com/tensorflow/tensorflow.git && \
+ cd tensorflow && \
+ git checkout ${TF_BRANCH}
+WORKDIR /tensorflow
+
+# Configure the build for our CUDA configuration.
+ENV CI_BUILD_PYTHON=python \
+ LD_LIBRARY_PATH=/usr/local/cuda/extras/CUPTI/lib64:${LD_LIBRARY_PATH} \
+ CUDNN_INSTALL_PATH=/usr/lib/x86_64-linux-gnu \
+ PYTHON_BIN_PATH=/usr/bin/python \
+ PYTHON_LIB_PATH=/usr/local/lib/python2.7/dist-packages \
+ TF_NEED_CUDA=1 \
+ TF_CUDA_VERSION=9.0 \
+ TF_CUDA_COMPUTE_CAPABILITIES=3.0,3.5,5.2,6.0,6.1,7.0 \
+ TF_CUDNN_VERSION=7
+RUN ./configure
+
+# Build and Install TensorFlow.
+RUN ln -s /usr/local/cuda/lib64/stubs/libcuda.so /usr/local/cuda/lib64/stubs/libcuda.so.1 && \
+ LD_LIBRARY_PATH=/usr/local/cuda/lib64/stubs:${LD_LIBRARY_PATH} \
+ bazel build -c opt \
+ --config=cuda \
+ --cxxopt="-D_GLIBCXX_USE_CXX11_ABI=0" \
+ --jobs=${TF_AVAILABLE_CPUS} \
+ tensorflow/tools/pip_package:build_pip_package && \
+ mkdir /pip_pkg && \
+ bazel-bin/tensorflow/tools/pip_package/build_pip_package /pip_pkg && \
+ pip --no-cache-dir install --upgrade /pip_pkg/tensorflow-*.whl && \
+ rm -rf /pip_pkg && \
+ rm -rf /root/.cache
+# Clean up pip wheel and Bazel cache when done.
+
+WORKDIR /root
+
+# TensorBoard
+EXPOSE 6006
+# IPython
+EXPOSE 8888
diff --git a/tensorflow/tools/docker/Dockerfile.devel-mkl b/tensorflow/tools/docker/Dockerfile.devel-mkl
index aa6d027662..c85641b383 100755
--- a/tensorflow/tools/docker/Dockerfile.devel-mkl
+++ b/tensorflow/tools/docker/Dockerfile.devel-mkl
@@ -73,7 +73,7 @@ RUN echo "startup --batch" >>/etc/bazel.bazelrc
RUN echo "build --spawn_strategy=standalone --genrule_strategy=standalone" \
>>/etc/bazel.bazelrc
# Install the most recent bazel release.
-ENV BAZEL_VERSION 0.11.0
+ENV BAZEL_VERSION 0.14.1
WORKDIR /
RUN mkdir /bazel && \
cd /bazel && \
@@ -86,7 +86,18 @@ RUN mkdir /bazel && \
# Download and build TensorFlow.
WORKDIR /tensorflow
-RUN git clone --branch=${TF_BUILD_VERSION} --depth=1 https://github.com/tensorflow/tensorflow.git .
+
+# Download and build TensorFlow.
+# Enable checking out both tags and branches
+RUN export TAG_PREFIX="v" && \
+ echo ${TF_BUILD_VERSION} | grep -q ^${TAG_PREFIX}; \
+ if [ $? -eq 0 ]; then \
+ git clone --depth=1 https://github.com/tensorflow/tensorflow.git . && \
+ git fetch --tags && \
+ git checkout ${TF_BUILD_VERSION}; \
+ else \
+ git clone --depth=1 --branch=${TF_BUILD_VERSION} https://github.com/tensorflow/tensorflow.git . ; \
+ fi
RUN yes "" | ${PYTHON} configure.py
@@ -103,7 +114,7 @@ COPY .bazelrc /root/.bazelrc
RUN tensorflow/tools/ci_build/builds/configured CPU \
bazel --bazelrc=/root/.bazelrc build -c opt \
- tensorflow/tools/pip_package:build_pip_package && \
+ tensorflow/tools/pip_package:build_pip_package && \
bazel-bin/tensorflow/tools/pip_package/build_pip_package "${WHL_DIR}" && \
${PIP} --no-cache-dir install --upgrade "${WHL_DIR}"/tensorflow-*.whl && \
rm -rf /root/.cache
diff --git a/tensorflow/tools/docker/Dockerfile.gpu b/tensorflow/tools/docker/Dockerfile.gpu
index 9197651ff4..28d4371da3 100644
--- a/tensorflow/tools/docker/Dockerfile.gpu
+++ b/tensorflow/tools/docker/Dockerfile.gpu
@@ -13,6 +13,7 @@ RUN apt-get update && apt-get install -y --no-install-recommends \
cuda-cusparse-9-0 \
curl \
libcudnn7=7.1.4.18-1+cuda9.0 \
+ libnccl2=2.2.13-1+cuda9.0 \
libfreetype6-dev \
libhdf5-serial-dev \
libpng12-dev \
diff --git a/tensorflow/tools/docs/BUILD b/tensorflow/tools/docs/BUILD
index eea712c279..2403e2d966 100644
--- a/tensorflow/tools/docs/BUILD
+++ b/tensorflow/tools/docs/BUILD
@@ -39,6 +39,7 @@ py_library(
visibility = ["//visibility:public"],
deps = [
"//tensorflow/python:platform",
+ "//tensorflow/python:util",
"@astor_archive//:astor",
],
)
@@ -95,6 +96,7 @@ py_binary(
deps = [
":generate_lib",
"//tensorflow:tensorflow_py",
+ "//tensorflow/python:util",
"//tensorflow/python/debug:debug_py",
],
)
diff --git a/tensorflow/tools/docs/doc_generator_visitor.py b/tensorflow/tools/docs/doc_generator_visitor.py
index 259a4694fd..c090dbd8da 100644
--- a/tensorflow/tools/docs/doc_generator_visitor.py
+++ b/tensorflow/tools/docs/doc_generator_visitor.py
@@ -20,6 +20,7 @@ from __future__ import print_function
import six
+from tensorflow.python.util import tf_export
from tensorflow.python.util import tf_inspect
@@ -201,7 +202,6 @@ class DocGeneratorVisitor(object):
raw_duplicates[master_name] = [master_name, full_name]
else:
reverse_index[object_id] = full_name
-
# Decide on master names, rewire duplicates and make a duplicate_of map
# mapping all non-master duplicates to the master name. The master symbol
# does not have an entry in this map.
@@ -211,10 +211,15 @@ class DocGeneratorVisitor(object):
duplicates = {}
for names in raw_duplicates.values():
names = sorted(names)
-
- # Choose the lexicographically first name with the minimum number of
- # submodules. This will prefer highest level namespace for any symbol.
- master_name = min(names, key=lambda name: name.count('.'))
+ master_name = (
+ tf_export.get_canonical_name_for_symbol(self._index[names[0]])
+ if names else None)
+ if master_name:
+ master_name = 'tf.%s' % master_name
+ else:
+ # Choose the lexicographically first name with the minimum number of
+ # submodules. This will prefer highest level namespace for any symbol.
+ master_name = min(names, key=lambda name: name.count('.'))
duplicates[master_name] = names
for name in names:
diff --git a/tensorflow/tools/docs/generate_lib.py b/tensorflow/tools/docs/generate_lib.py
index 67c413cccb..e7634cd5dc 100644
--- a/tensorflow/tools/docs/generate_lib.py
+++ b/tensorflow/tools/docs/generate_lib.py
@@ -388,16 +388,40 @@ def _build_guide_index(guide_src_dir):
class _UpdateTags(py_guide_parser.PyGuideParser):
- """Rewrites a Python guide so that each section has an explicit tag."""
+ """Rewrites a Python guide so that each section has an explicit id tag.
+
+ "section" here refers to blocks delimited by second level headings.
+ """
def process_section(self, line_number, section_title, tag):
self.replace_line(line_number, '<h2 id="%s">%s</h2>' % (tag, section_title))
+def update_id_tags_inplace(src_dir):
+ """Set explicit ids on all second-level headings to ensure back-links work.
+
+ Args:
+ src_dir: The directory of md-files to convert (inplace).
+ """
+ tag_updater = _UpdateTags()
+
+ for dirpath, _, filenames in os.walk(src_dir):
+ for base_name in filenames:
+ if not base_name.endswith('.md'):
+ continue
+ full_path = os.path.join(src_dir, dirpath, base_name)
+
+ # Tag updater loads the file, makes the replacements, and returns the
+ # modified file contents
+ content = tag_updater.process(full_path)
+ with open(full_path, 'w') as f:
+ f.write(content)
+
+
EXCLUDED = set(['__init__.py', 'OWNERS', 'README.txt'])
-def _other_docs(src_dir, output_dir, reference_resolver, file_pattern='*.md'):
+def replace_refs(src_dir, output_dir, reference_resolver, file_pattern='*.md'):
"""Fix @{} references in all files under `src_dir` matching `file_pattern`.
A matching directory structure, with the modified files is
@@ -418,7 +442,6 @@ def _other_docs(src_dir, output_dir, reference_resolver, file_pattern='*.md'):
using fnmatch. Non-matching files are copied unchanged.
"""
# Iterate through all the source files and process them.
- tag_updater = _UpdateTags()
for dirpath, _, filenames in os.walk(src_dir):
# How to get from `dirpath` to api_docs/python/
relative_path_to_root = os.path.relpath(
@@ -435,24 +458,25 @@ def _other_docs(src_dir, output_dir, reference_resolver, file_pattern='*.md'):
continue
full_in_path = os.path.join(dirpath, base_name)
+ # Set the `current_doc_full_name` so bad files can be reported on errors.
reference_resolver.current_doc_full_name = full_in_path
suffix = os.path.relpath(path=full_in_path, start=src_dir)
full_out_path = os.path.join(output_dir, suffix)
+ # Copy files that do not match the file_pattern, unmodified.
if not fnmatch.fnmatch(base_name, file_pattern):
shutil.copyfile(full_in_path, full_out_path)
continue
- if dirpath.endswith('/api_guides/python'):
- content = tag_updater.process(full_in_path)
- else:
- with open(full_in_path, 'rb') as f:
- content = f.read().decode('utf-8')
+
+ with open(full_in_path, 'rb') as f:
+ content = f.read().decode('utf-8')
content = reference_resolver.replace_references(content,
relative_path_to_root)
with open(full_out_path, 'wb') as f:
f.write(content.encode('utf-8'))
+
class DocGenerator(object):
"""Main entry point for generating docs."""
@@ -538,15 +562,43 @@ class DocGenerator(object):
self._do_not_descend_map)
def build(self, flags):
- """Actually build the docs."""
+ """Build all the docs.
+
+ This produces two outputs
+
+ python api docs:
+
+ * generated from modules set with `set_py_modules`.
+ * written to '{FLAGS.output_dir}/api_docs/python/'
+
+ non-api docs:
+
+ * Everything in '{FLAGS.src_dir}' is copied to '{FLAGS.output_dir}'.
+ * '@{}' references in '.md' files are replaced with links.
+ * '.md' files under 'api_guides/python' have explicit ids set for their
+ second level headings.
+
+ Args:
+ flags:
+ * src_dir: Where to fetch the non-api-docs.
+ * base_dir: Base of the docs directory (Used to build correct
+ relative links).
+ * output_dir: Where to write the resulting docs.
+
+ Returns:
+ The number of errors encountered while processing.
+ """
+ # Extract the python api from the _py_modules
doc_index = build_doc_index(flags.src_dir)
visitor = self.run_extraction()
reference_resolver = self.make_reference_resolver(visitor, doc_index)
+ # Build the guide_index for the api_docs back links.
root_title = getattr(flags, 'root_title', 'TensorFlow')
guide_index = _build_guide_index(
os.path.join(flags.src_dir, 'api_guides/python'))
+ # Write the api docs.
parser_config = self.make_parser_config(visitor, reference_resolver,
guide_index, flags.base_dir)
output_dir = os.path.join(flags.output_dir, 'api_docs/python')
@@ -557,8 +609,16 @@ class DocGenerator(object):
yaml_toc=self.yaml_toc,
root_title=root_title,
search_hints=getattr(flags, 'search_hints', True))
- _other_docs(flags.src_dir, flags.output_dir, reference_resolver)
+ # Replace all the @{} references in files under `FLAGS.src_dir`
+ replace_refs(flags.src_dir, flags.output_dir, reference_resolver, '*.md')
+ # Fix the tags in the guide dir.
+ guide_dir = os.path.join(flags.output_dir, 'api_guides/python')
+ if os.path.exists(guide_dir):
+ update_id_tags_inplace(guide_dir)
+
+ # Report all errors found by the reference resolver, and return the error
+ # code.
parser_config.reference_resolver.log_errors()
return parser_config.reference_resolver.num_errors()
diff --git a/tensorflow/tools/docs/generate_lib_test.py b/tensorflow/tools/docs/generate_lib_test.py
index ea6d28a02b..7a6f9fd9f7 100644
--- a/tensorflow/tools/docs/generate_lib_test.py
+++ b/tensorflow/tools/docs/generate_lib_test.py
@@ -51,7 +51,9 @@ class DummyVisitor(object):
class GenerateTest(googletest.TestCase):
- def test_write(self):
+ def get_test_objects(self):
+ # These are all mutable objects, so rebuild them for each test.
+ # Don't cache the objects.
module = sys.modules[__name__]
index = {
@@ -98,6 +100,11 @@ class GenerateTest(googletest.TestCase):
guide_index={},
base_dir=base_dir)
+ return reference_resolver, parser_config
+
+ def test_write(self):
+ _, parser_config = self.get_test_objects()
+
output_dir = googletest.GetTempDir()
generate_lib.write_docs(output_dir, parser_config, yaml_toc=True)
@@ -127,6 +134,107 @@ class GenerateTest(googletest.TestCase):
os.path.exists(
os.path.join(output_dir, 'tf/TestModule/test_function.md')))
+ def test_update_id_tags_inplace(self):
+ test_dir = googletest.GetTempDir()
+ test_sub_dir = os.path.join(test_dir, 'a/b')
+ os.makedirs(test_sub_dir)
+
+ test_path1 = os.path.join(test_dir, 'file1.md')
+ test_path2 = os.path.join(test_sub_dir, 'file2.md')
+ test_path3 = os.path.join(test_sub_dir, 'file3.notmd')
+
+ with open(test_path1, 'w') as f:
+ f.write('## abc&123')
+
+ with open(test_path2, 'w') as f:
+ f.write('# A Level 1 Heading\n')
+ f.write('## A Level 2 Heading')
+
+ with open(test_path3, 'w') as f:
+ f.write("## don\'t change this")
+
+ generate_lib.update_id_tags_inplace(test_dir)
+
+ with open(test_path1) as f:
+ content = f.read()
+
+ self.assertEqual(content, '<h2 id="abc_123">abc&123</h2>')
+
+ with open(test_path2) as f:
+ content = f.read()
+
+ self.assertEqual(
+ content, '# A Level 1 Heading\n'
+ '<h2 id="A_Level_2_Heading">A Level 2 Heading</h2>')
+
+ with open(test_path3) as f:
+ content = f.read()
+
+ self.assertEqual(content, "## don\'t change this")
+
+ def test_replace_refes(self):
+ test_dir = googletest.GetTempDir()
+ test_in_dir = os.path.join(test_dir, 'in')
+ test_in_dir_a = os.path.join(test_dir, 'in/a')
+ test_in_dir_b = os.path.join(test_dir, 'in/b')
+ os.makedirs(test_in_dir)
+ os.makedirs(test_in_dir_a)
+ os.makedirs(test_in_dir_b)
+
+ test_out_dir = os.path.join(test_dir, 'out')
+ os.makedirs(test_out_dir)
+
+ test_path1 = os.path.join(test_in_dir_a, 'file1.md')
+ test_path2 = os.path.join(test_in_dir_b, 'file2.md')
+ test_path3 = os.path.join(test_in_dir_b, 'file3.notmd')
+ test_path4 = os.path.join(test_in_dir_b, 'OWNERS')
+
+ with open(test_path1, 'w') as f:
+ f.write('Use `tf.test_function` to test things.')
+
+ with open(test_path2, 'w') as f:
+ f.write('Use @{tf.TestModule.TestClass.ChildClass} to test things.\n'
+ "`tf.whatever` doesn't exist")
+
+ with open(test_path3, 'w') as f:
+ file3_content = (
+ 'Not a .md file. Should be copied unchanged:'
+ '@{tf.TestModule.TestClass.ChildClass}, `tf.test_function`')
+ f.write(file3_content)
+
+ with open(test_path4, 'w') as f:
+ f.write('')
+
+ reference_resolver, _ = self.get_test_objects()
+ generate_lib.replace_refs(test_in_dir, test_out_dir, reference_resolver,
+ '*.md')
+
+ with open(os.path.join(test_out_dir, 'a/file1.md')) as f:
+ content = f.read()
+ self.assertEqual(
+ content,
+ 'Use <a href="../api_docs/python/tf/TestModule/test_function.md">'
+ '<code>tf.test_function</code></a> to test things.')
+
+ with open(os.path.join(test_out_dir, 'b/file2.md')) as f:
+ content = f.read()
+ self.assertEqual(
+ content,
+ 'Use '
+ '<a href="../api_docs/python/tf/TestModule/TestClass/ChildClass.md">'
+ '<code>tf.TestModule.TestClass.ChildClass</code></a> '
+ 'to test things.\n'
+ '`tf.whatever` doesn\'t exist')
+
+ with open(os.path.join(test_out_dir, 'b/file3.notmd')) as f:
+ content = f.read()
+ self.assertEqual(content, file3_content)
+
+ with self.assertRaises(IOError):
+ # This should fail. The OWNERS file should not be copied
+ with open(os.path.join(test_out_dir, 'b/OWNERS')) as f:
+ content = f.read()
+
if __name__ == '__main__':
googletest.main()
diff --git a/tensorflow/tools/lib_package/BUILD b/tensorflow/tools/lib_package/BUILD
index 05c23cd3ee..44d8a37a8f 100644
--- a/tensorflow/tools/lib_package/BUILD
+++ b/tensorflow/tools/lib_package/BUILD
@@ -115,6 +115,7 @@ genrule(
"//third_party/fft2d:LICENSE",
"@aws//:LICENSE",
"@boringssl//:LICENSE",
+ "@com_github_googlecloudplatform_google_cloud_cpp//:LICENSE",
"@com_googlesource_code_re2//:LICENSE",
"@cub_archive//:LICENSE.TXT",
"@curl//:COPYING",
@@ -142,6 +143,7 @@ genrule(
"@zlib_archive//:zlib.h",
] + if_mkl([
"//third_party/mkl:LICENSE",
+ "//third_party/mkl_dnn:LICENSE",
]),
outs = ["include/tensorflow/c/LICENSE"],
cmd = "$(location :concat_licenses.sh) $(SRCS) >$@",
@@ -156,6 +158,7 @@ genrule(
"//third_party/fft2d:LICENSE",
"@aws//:LICENSE",
"@boringssl//:LICENSE",
+ "@com_github_googlecloudplatform_google_cloud_cpp//:LICENSE",
"@com_googlesource_code_re2//:LICENSE",
"@cub_archive//:LICENSE.TXT",
"@curl//:COPYING",
@@ -180,6 +183,7 @@ genrule(
"@zlib_archive//:zlib.h",
] + if_mkl([
"//third_party/mkl:LICENSE",
+ "//third_party/mkl_dnn:LICENSE",
]),
outs = ["include/tensorflow/jni/LICENSE"],
cmd = "$(location :concat_licenses.sh) $(SRCS) >$@",
diff --git a/tensorflow/tools/pip_package/BUILD b/tensorflow/tools/pip_package/BUILD
index a0caf42331..e661fb1adc 100644
--- a/tensorflow/tools/pip_package/BUILD
+++ b/tensorflow/tools/pip_package/BUILD
@@ -11,7 +11,7 @@ load(
)
load("//third_party/mkl:build_defs.bzl", "if_mkl")
load("//tensorflow:tensorflow.bzl", "if_cuda")
-load("@local_config_tensorrt//:build_defs.bzl", "if_tensorrt")
+load("@local_config_syslibs//:build_defs.bzl", "if_not_system_lib")
load("//tensorflow/core:platform/default/build_config_root.bzl", "tf_additional_license_deps")
# This returns a list of headers of all public header libraries (e.g.,
@@ -104,6 +104,7 @@ COMMON_PIP_DEPS = [
"//tensorflow/python/kernel_tests/testdata:self_adjoint_eig_op_test_files",
"//tensorflow/python/saved_model:saved_model",
"//tensorflow/python/tools:tools_pip",
+ "//tensorflow/python/tools/api/generator:create_python_api",
"//tensorflow/python:test_ops",
"//tensorflow/tools/dist_test/server:grpc_tensorflow_server",
]
@@ -130,6 +131,8 @@ filegroup(
"@astor_archive//:LICENSE",
"@aws//:LICENSE",
"@boringssl//:LICENSE",
+ "@com_github_googleapis_googleapis//:LICENSE",
+ "@com_github_googlecloudplatform_google_cloud_cpp//:LICENSE",
"@com_google_absl//:LICENSE",
"@com_googlesource_code_re2//:LICENSE",
"@cub_archive//:LICENSE.TXT",
@@ -142,7 +145,6 @@ filegroup(
"@gast_archive//:PKG-INFO",
"@gemmlowp//:LICENSE",
"@gif_archive//:COPYING",
- "@grpc//:LICENSE",
"@highwayhash//:LICENSE",
"@jemalloc//:COPYING",
"@jpeg//:LICENSE.md",
@@ -151,8 +153,6 @@ filegroup(
"@lmdb//:LICENSE",
"@local_config_nccl//:LICENSE",
"@local_config_sycl//sycl:LICENSE.text",
- "@grpc//third_party/nanopb:LICENSE.txt",
- "@grpc//third_party/address_sorting:LICENSE",
"@nasm//:LICENSE",
"@nsync//:LICENSE",
"@pcre//:LICENCE",
@@ -166,7 +166,15 @@ filegroup(
"@org_python_pypi_backports_weakref//:LICENSE",
] + if_mkl([
"//third_party/mkl:LICENSE",
- ]) + tf_additional_license_deps(),
+ "//third_party/mkl_dnn:LICENSE",
+ ]) + if_not_system_lib(
+ "grpc",
+ [
+ "@grpc//:LICENSE",
+ "@grpc//third_party/nanopb:LICENSE.txt",
+ "@grpc//third_party/address_sorting:LICENSE",
+ ],
+ ) + tf_additional_license_deps(),
)
sh_binary(
@@ -181,9 +189,7 @@ sh_binary(
"//tensorflow/contrib/lite/python:tflite_convert",
"//tensorflow/contrib/lite/toco/python:toco_from_protos",
],
- }) + if_mkl(["//third_party/mkl:intel_binary_blob"]) + if_tensorrt([
- "//tensorflow/contrib/tensorrt:init_py",
- ]),
+ }) + if_mkl(["//third_party/mkl:intel_binary_blob"]),
)
# A genrule for generating a marker file for the pip package on Windows
diff --git a/tensorflow/tools/pip_package/build_pip_package.sh b/tensorflow/tools/pip_package/build_pip_package.sh
index 9e41514cfa..4101b34a11 100755
--- a/tensorflow/tools/pip_package/build_pip_package.sh
+++ b/tensorflow/tools/pip_package/build_pip_package.sh
@@ -27,7 +27,7 @@ function cp_external() {
pushd .
cd "$src_dir"
- for f in `find . ! -type d ! -name '*.py' ! -name '*local_config_cuda*' ! -name '*local_config_tensorrt*' ! -name '*org_tensorflow*'`; do
+ for f in `find . ! -type d ! -name '*.py' ! -path '*local_config_cuda*' ! -path '*local_config_tensorrt*' ! -path '*local_config_syslibs*' ! -path '*org_tensorflow*'`; do
mkdir -p "${dest_dir}/$(dirname ${f})"
cp "${f}" "${dest_dir}/$(dirname ${f})/"
done
diff --git a/tensorflow/tools/pip_package/setup.py b/tensorflow/tools/pip_package/setup.py
index 55cd4f37c6..c630ca04b8 100644
--- a/tensorflow/tools/pip_package/setup.py
+++ b/tensorflow/tools/pip_package/setup.py
@@ -53,7 +53,7 @@ REQUIRED_PACKAGES = [
'gast >= 0.2.0',
'numpy >= 1.13.3',
'six >= 1.10.0',
- 'protobuf >= 3.4.0',
+ 'protobuf >= 3.6.0',
'setuptools <= 39.1.0',
'tensorboard >= 1.8.0, < 1.9.0',
'termcolor >= 1.1.0',
@@ -170,8 +170,9 @@ class InstallHeaders(Command):
# symlink within the directory hierarchy.
# NOTE(keveman): Figure out how to customize bdist_wheel package so
# we can do the symlink.
- if 'external/eigen_archive/' in install_dir:
- extra_dir = install_dir.replace('external/eigen_archive', '')
+ if 'tensorflow/include/external/eigen_archive/' in install_dir:
+ extra_dir = install_dir.replace(
+ 'tensorflow/include/external/eigen_archive', '')
if not os.path.exists(extra_dir):
self.mkpath(extra_dir)
self.copy_file(header, extra_dir)
@@ -204,13 +205,12 @@ def find_files(pattern, root):
yield os.path.join(dirpath, filename)
-matches = ['../' + x for x in find_files('*', 'external') if '.py' not in x]
-
so_lib_paths = [
i for i in os.listdir('.')
if os.path.isdir(i) and fnmatch.fnmatch(i, '_solib_*')
]
+matches = []
for path in so_lib_paths:
matches.extend(
['../' + x for x in find_files('*', path) if '.py' not in x]
@@ -225,7 +225,7 @@ headers = (list(find_files('*.h', 'tensorflow/core')) +
list(find_files('*.h', 'tensorflow/stream_executor')) +
list(find_files('*.h', 'google/protobuf_archive/src')) +
list(find_files('*', 'third_party/eigen3')) +
- list(find_files('*', 'external/eigen_archive')))
+ list(find_files('*', 'tensorflow/include/external/eigen_archive')))
setup(
name=project_name,
diff --git a/tensorflow/workspace.bzl b/tensorflow/workspace.bzl
index b963bdab30..378de4261c 100644
--- a/tensorflow/workspace.bzl
+++ b/tensorflow/workspace.bzl
@@ -8,6 +8,7 @@ load("//third_party/git:git_configure.bzl", "git_configure")
load("//third_party/py:python_configure.bzl", "python_configure")
load("//third_party/sycl:sycl_configure.bzl", "sycl_configure")
+load("//third_party/systemlibs:syslibs_configure.bzl", "syslibs_configure")
load("//third_party/toolchains/clang6:repo.bzl", "clang6_configure")
load("//third_party/toolchains/cpus/arm:arm_compiler_configure.bzl", "arm_compiler_configure")
load("//third_party:repo.bzl", "tf_http_archive")
@@ -35,6 +36,7 @@ def tf_workspace(path_prefix="", tf_repo_name=""):
nccl_configure(name="local_config_nccl")
git_configure(name="local_config_git")
sycl_configure(name="local_config_sycl")
+ syslibs_configure(name="local_config_syslibs")
python_configure(name="local_config_python")
# For windows bazel build
@@ -161,6 +163,28 @@ def tf_workspace(path_prefix="", tf_repo_name=""):
],
sha256 = "2f945446b71336e7f5a2bcace1abcf0b23fbba368266c6a1be33de3de3b3c912",
strip_prefix = "re2-2018-04-01",
+ system_build_file = clean_dep("//third_party/systemlibs:re2.BUILD"),
+ )
+
+ tf_http_archive(
+ name = "com_github_googlecloudplatform_google_cloud_cpp",
+ urls = [
+ "https://mirror.bazel.build/github.com/GoogleCloudPlatform/google-cloud-cpp/archive/f875700a023bdd706333cde45aee8758b272c357.tar.gz",
+ "https://github.com/GoogleCloudPlatform/google-cloud-cpp/archive/f875700a023bdd706333cde45aee8758b272c357.tar.gz",
+ ],
+ sha256 = "a34f3c50b237686dc870b13baaa6a5836ce3473f2f2a02717299f0ff318372db",
+ strip_prefix = "google-cloud-cpp-f875700a023bdd706333cde45aee8758b272c357",
+ )
+
+ tf_http_archive(
+ name = "com_github_googleapis_googleapis",
+ urls = [
+ "https://mirror.bazel.build/github.com/googleapis/googleapis/archive/f81082ea1e2f85c43649bee26e0d9871d4b41cdb.zip",
+ "https://github.com/googleapis/googleapis/archive/f81082ea1e2f85c43649bee26e0d9871d4b41cdb.zip",
+ ],
+ sha256 = "824870d87a176f26bcef663e92051f532fac756d1a06b404055dc078425f4378",
+ strip_prefix="googleapis-f81082ea1e2f85c43649bee26e0d9871d4b41cdb",
+ build_file = clean_dep("//third_party:googleapis.BUILD"),
)
tf_http_archive(
@@ -198,13 +222,14 @@ def tf_workspace(path_prefix="", tf_repo_name=""):
tf_http_archive(
name = "nasm",
urls = [
- "https://mirror.bazel.build/www.nasm.us/pub/nasm/releasebuilds/2.12.02/nasm-2.12.02.tar.bz2",
- "http://pkgs.fedoraproject.org/repo/pkgs/nasm/nasm-2.12.02.tar.bz2/d15843c3fb7db39af80571ee27ec6fad/nasm-2.12.02.tar.bz2",
- "http://www.nasm.us/pub/nasm/releasebuilds/2.12.02/nasm-2.12.02.tar.bz2",
+ "https://mirror.bazel.build/www.nasm.us/pub/nasm/releasebuilds/2.13.03/nasm-2.13.03.tar.bz2",
+ "http://pkgs.fedoraproject.org/repo/pkgs/nasm/nasm-2.13.03.tar.bz2/sha512/d7a6b4cee8dfd603d8d4c976e5287b5cc542fa0b466ff989b743276a6e28114e64289bf02a7819eca63142a5278aa6eed57773007e5f589e15768e6456a8919d/nasm-2.13.03.tar.bz2",
+ "http://www.nasm.us/pub/nasm/releasebuilds/2.13.03/nasm-2.13.03.tar.bz2",
],
- sha256 = "00b0891c678c065446ca59bcee64719d0096d54d6886e6e472aeee2e170ae324",
- strip_prefix = "nasm-2.12.02",
+ sha256 = "63ec86477ad3f0f6292325fd89e1d93aea2e2fd490070863f17d48f7cd387011",
+ strip_prefix = "nasm-2.13.03",
build_file = clean_dep("//third_party:nasm.BUILD"),
+ system_build_file = clean_dep("//third_party/systemlibs:nasm.BUILD"),
)
tf_http_archive(
@@ -216,6 +241,7 @@ def tf_workspace(path_prefix="", tf_repo_name=""):
sha256 = "1a17020f859cb12711175a67eab5c71fc1904e04b587046218e36106e07eabde",
strip_prefix = "libjpeg-turbo-1.5.3",
build_file = clean_dep("//third_party/jpeg:jpeg.BUILD"),
+ system_build_file = clean_dep("//third_party/systemlibs:jpeg.BUILD"),
)
tf_http_archive(
@@ -228,17 +254,19 @@ def tf_workspace(path_prefix="", tf_repo_name=""):
strip_prefix = "libpng-1.6.34",
build_file = clean_dep("//third_party:png.BUILD"),
patch_file = clean_dep("//third_party:png_fix_rpi.patch"),
+ system_build_file = clean_dep("//third_party/systemlibs:png.BUILD"),
)
tf_http_archive(
name = "org_sqlite",
urls = [
- "https://mirror.bazel.build/www.sqlite.org/2018/sqlite-amalgamation-3230100.zip",
- "https://www.sqlite.org/2018/sqlite-amalgamation-3230100.zip",
+ "https://mirror.bazel.build/www.sqlite.org/2018/sqlite-amalgamation-3240000.zip",
+ "https://www.sqlite.org/2018/sqlite-amalgamation-3240000.zip",
],
- sha256 = "4239a1f69e5721d07d9a374eb84d594225229e54be4ee628da2995f4315d8dfc",
- strip_prefix = "sqlite-amalgamation-3230100",
+ sha256 = "ad68c1216c3a474cf360c7581a4001e952515b3649342100f2d7ca7c8e313da6",
+ strip_prefix = "sqlite-amalgamation-3240000",
build_file = clean_dep("//third_party:sqlite.BUILD"),
+ system_build_file = clean_dep("//third_party/systemlibs:sqlite.BUILD"),
)
tf_http_archive(
@@ -250,6 +278,7 @@ def tf_workspace(path_prefix="", tf_repo_name=""):
sha256 = "34a7377ba834397db019e8eb122e551a49c98f49df75ec3fcc92b9a794a4f6d1",
strip_prefix = "giflib-5.1.4",
build_file = clean_dep("//third_party:gif.BUILD"),
+ system_build_file = clean_dep("//third_party/systemlibs:gif.BUILD"),
)
tf_http_archive(
@@ -261,6 +290,7 @@ def tf_workspace(path_prefix="", tf_repo_name=""):
sha256 = "105f8d68616f8248e24bf0e9372ef04d3cc10104f1980f54d57b2ce73a5ad56a",
strip_prefix = "six-1.10.0",
build_file = clean_dep("//third_party:six.BUILD"),
+ system_build_file = clean_dep("//third_party/systemlibs:six.BUILD"),
)
tf_http_archive(
@@ -272,6 +302,7 @@ def tf_workspace(path_prefix="", tf_repo_name=""):
sha256 = "ff6d2e2962d834acb125cc4dcc80c54a8c17c253f4cc9d9c43b5102a560bb75d",
strip_prefix = "astor-0.6.2",
build_file = clean_dep("//third_party:astor.BUILD"),
+ system_build_file = clean_dep("//third_party/systemlibs:astor.BUILD"),
)
tf_http_archive(
@@ -294,6 +325,7 @@ def tf_workspace(path_prefix="", tf_repo_name=""):
sha256 = "1d6d69ce66211143803fbc56652b41d73b4a400a2891d7bf7a1cdf4c02de613b",
strip_prefix = "termcolor-1.1.0",
build_file = clean_dep("//third_party:termcolor.BUILD"),
+ system_build_file = clean_dep("//third_party/systemlibs:termcolor.BUILD"),
)
tf_http_archive(
@@ -364,11 +396,11 @@ def tf_workspace(path_prefix="", tf_repo_name=""):
tf_http_archive(
name = "nsync",
urls = [
- "https://mirror.bazel.build/github.com/google/nsync/archive/5e8b19a81e5729922629dd505daa651f6ffdf107.tar.gz",
- "https://github.com/google/nsync/archive/5e8b19a81e5729922629dd505daa651f6ffdf107.tar.gz",
+ "https://mirror.bazel.build/github.com/google/nsync/archive/1.20.0.tar.gz",
+ "https://github.com/google/nsync/archive/1.20.0.tar.gz",
],
- sha256 = "2723e6db509779fcf05bd01556e51f2e5179197e2c864cd8010f6b7100a5b1e1",
- strip_prefix = "nsync-5e8b19a81e5729922629dd505daa651f6ffdf107",
+ sha256 = "0c1b03962b2f8450f21e74a5a46116bf2d6009a807c57eb4207e974a8c4bb7dd",
+ strip_prefix = "nsync-1.20.0",
)
tf_http_archive(
@@ -384,11 +416,11 @@ def tf_workspace(path_prefix="", tf_repo_name=""):
tf_http_archive(
name = "com_github_gflags_gflags",
urls = [
- "https://mirror.bazel.build/github.com/gflags/gflags/archive/f8a0efe03aa69b3336d8e228b37d4ccb17324b88.tar.gz",
- "https://github.com/gflags/gflags/archive/f8a0efe03aa69b3336d8e228b37d4ccb17324b88.tar.gz",
+ "https://mirror.bazel.build/github.com/gflags/gflags/archive/v2.2.1.tar.gz",
+ "https://github.com/gflags/gflags/archive/v2.2.1.tar.gz",
],
- sha256 = "4d222fab8f1ede4709cdff417d15a1336f862d7334a81abf76d09c15ecf9acd1",
- strip_prefix = "gflags-f8a0efe03aa69b3336d8e228b37d4ccb17324b88",
+ sha256 = "ae27cdbcd6a2f935baa78e4f21f675649271634c092b1be01469440495609d0e",
+ strip_prefix = "gflags-2.2.1",
)
tf_http_archive(
@@ -400,6 +432,7 @@ def tf_workspace(path_prefix="", tf_repo_name=""):
],
strip_prefix = "pcre-8.42",
build_file = clean_dep("//third_party:pcre.BUILD"),
+ system_build_file = clean_dep("//third_party/systemlibs:pcre.BUILD"),
)
tf_http_archive(
@@ -412,6 +445,7 @@ def tf_workspace(path_prefix="", tf_repo_name=""):
],
strip_prefix = "swig-3.0.8",
build_file = clean_dep("//third_party:swig.BUILD"),
+ system_build_file = clean_dep("//third_party/systemlibs:swig.BUILD"),
)
tf_http_archive(
@@ -423,19 +457,20 @@ def tf_workspace(path_prefix="", tf_repo_name=""):
],
strip_prefix = "curl-7.60.0",
build_file = clean_dep("//third_party:curl.BUILD"),
+ system_build_file = clean_dep("//third_party/systemlibs:curl.BUILD"),
)
tf_http_archive(
name = "grpc",
urls = [
- "https://mirror.bazel.build/github.com/grpc/grpc/archive/d184fa229d75d336aedea0041bd59cb93e7e267f.tar.gz",
- "https://github.com/grpc/grpc/archive/d184fa229d75d336aedea0041bd59cb93e7e267f.tar.gz",
+ "https://mirror.bazel.build/github.com/grpc/grpc/archive/v1.13.0.tar.gz",
+ "https://github.com/grpc/grpc/archive/v1.13.0.tar.gz",
],
- sha256 = "895b31310e718a61f7335759a778c068a6edde1c089883598a0830cbb7075673",
- strip_prefix = "grpc-d184fa229d75d336aedea0041bd59cb93e7e267f",
+ sha256 = "50db9cf2221354485eb7c3bd55a4c27190caef7048a2a1a15fbe60a498f98b44",
+ strip_prefix = "grpc-1.13.0",
+ system_build_file = clean_dep("//third_party/systemlibs:grpc.BUILD"),
)
-
tf_http_archive(
name = "linenoise",
sha256 = "7f51f45887a3d31b4ce4fa5965210a5e64637ceac12720cfce7954d6a2e812f7",
@@ -452,11 +487,11 @@ def tf_workspace(path_prefix="", tf_repo_name=""):
tf_http_archive(
name = "llvm",
urls = [
- "https://mirror.bazel.build/github.com/llvm-mirror/llvm/archive/7f7cea53068238fca7b7e4299793a0c77bea7219.tar.gz",
- "https://github.com/llvm-mirror/llvm/archive/7f7cea53068238fca7b7e4299793a0c77bea7219.tar.gz",
+ "https://mirror.bazel.build/github.com/llvm-mirror/llvm/archive/bd8c8d759852871609ba2e4e79868420f751949d.tar.gz",
+ "https://github.com/llvm-mirror/llvm/archive/bd8c8d759852871609ba2e4e79868420f751949d.tar.gz",
],
- sha256 = "b645507080e07c845607f212d45e4ee79253c3c9b762531f51fbaeceb6b47391",
- strip_prefix = "llvm-7f7cea53068238fca7b7e4299793a0c77bea7219",
+ sha256 = "0c63e8583b213543309e8577ffe87a0cf34cc22269630d2c5c2f0a2345fda4a8",
+ strip_prefix = "llvm-bd8c8d759852871609ba2e4e79868420f751949d",
build_file = clean_dep("//third_party/llvm:llvm.autogenerated.BUILD"),
)
@@ -469,6 +504,7 @@ def tf_workspace(path_prefix="", tf_repo_name=""):
sha256 = "f3927859882eb608868c8c31586bb7eb84562a40a6bf5cc3e13b6b564641ea28",
strip_prefix = "lmdb-LMDB_0.9.22/libraries/liblmdb",
build_file = clean_dep("//third_party:lmdb.BUILD"),
+ system_build_file = clean_dep("//third_party/systemlibs:lmdb.BUILD"),
)
tf_http_archive(
@@ -480,6 +516,7 @@ def tf_workspace(path_prefix="", tf_repo_name=""):
sha256 = "c49deac9e0933bcb7044f08516861a2d560988540b23de2ac1ad443b219afdb6",
strip_prefix = "jsoncpp-1.8.4",
build_file = clean_dep("//third_party:jsoncpp.BUILD"),
+ system_build_file = clean_dep("//third_party/systemlibs:jsoncpp.BUILD"),
)
tf_http_archive(
@@ -501,6 +538,7 @@ def tf_workspace(path_prefix="", tf_repo_name=""):
sha256 = "c3e5e9fdd5004dcb542feda5ee4f0ff0744628baf8ed2dd5d66f8ca1197cb1a1",
strip_prefix = "zlib-1.2.11",
build_file = clean_dep("//third_party:zlib.BUILD"),
+ system_build_file = clean_dep("//third_party/systemlibs:zlib.BUILD"),
)
tf_http_archive(
@@ -522,6 +560,7 @@ def tf_workspace(path_prefix="", tf_repo_name=""):
sha256 = "3dfa02e873ff51a11ee02b9ca391807f0c8ea0529a4924afa645fbf97163f9d4",
strip_prefix = "snappy-1.1.7",
build_file = clean_dep("//third_party:snappy.BUILD"),
+ system_build_file = clean_dep("//third_party/systemlibs:snappy.BUILD"),
)
tf_http_archive(
@@ -538,11 +577,11 @@ def tf_workspace(path_prefix="", tf_repo_name=""):
tf_http_archive(
name = "kafka",
urls = [
- "https://mirror.bazel.build/github.com/edenhill/librdkafka/archive/v0.11.1.tar.gz",
- "https://github.com/edenhill/librdkafka/archive/v0.11.1.tar.gz",
+ "https://mirror.bazel.build/github.com/edenhill/librdkafka/archive/v0.11.4.tar.gz",
+ "https://github.com/edenhill/librdkafka/archive/v0.11.4.tar.gz",
],
- sha256 = "dd035d57c8f19b0b612dd6eefe6e5eebad76f506e302cccb7c2066f25a83585e",
- strip_prefix = "librdkafka-0.11.1",
+ sha256 = "9d8f1eb7b0e29e9ab1168347c939cb7ae5dff00a39cef99e7ef033fd8f92737c",
+ strip_prefix = "librdkafka-0.11.4",
build_file = clean_dep("//third_party:kafka/BUILD"),
patch_file = clean_dep("//third_party/kafka:config.patch"),
)
@@ -592,6 +631,7 @@ def tf_workspace(path_prefix="", tf_repo_name=""):
sha256 = "3c8f25c02e806c3ce0ab5fb7da1817f89fc9732709024e2a81b6b82f7cc792a8",
strip_prefix = "jemalloc-4.4.0",
build_file = clean_dep("//third_party:jemalloc.BUILD"),
+ system_build_file = clean_dep("//third_party/systemlibs:jemalloc.BUILD"),
)
java_import_external(
@@ -662,24 +702,25 @@ def tf_workspace(path_prefix="", tf_repo_name=""):
tf_http_archive(
name = "cython",
- sha256 = "6dcd30b5ceb887b2b965ee7ceb82ea3acb5f0642fe2206c7636b45acea4798e5",
+ sha256 = "bccc9aa050ea02595b2440188813b936eaf345e85fb9692790cecfe095cf91aa",
urls = [
- "https://mirror.bazel.build/github.com/cython/cython/archive/3732784c45cfb040a5b0936951d196f83a12ea17.tar.gz",
- "https://github.com/cython/cython/archive/3732784c45cfb040a5b0936951d196f83a12ea17.tar.gz",
+ "https://mirror.bazel.build/github.com/cython/cython/archive/0.28.4.tar.gz",
+ "https://github.com/cython/cython/archive/0.28.4.tar.gz",
],
- strip_prefix = "cython-3732784c45cfb040a5b0936951d196f83a12ea17",
+ strip_prefix = "cython-0.28.4",
build_file = clean_dep("//third_party:cython.BUILD"),
delete = ["BUILD.bazel"],
+ system_build_file = clean_dep("//third_party/systemlibs:cython.BUILD"),
)
tf_http_archive(
name = "bazel_toolchains",
urls = [
- "https://mirror.bazel.build/github.com/bazelbuild/bazel-toolchains/archive/44200e0c026d86c53470d107b3697a3e46469c43.tar.gz",
- "https://github.com/bazelbuild/bazel-toolchains/archive/44200e0c026d86c53470d107b3697a3e46469c43.tar.gz",
+ "https://mirror.bazel.build/github.com/bazelbuild/bazel-toolchains/archive/37acf1841ab1475c98a152cb9e446460c8ae29e1.tar.gz",
+ "https://github.com/bazelbuild/bazel-toolchains/archive/37acf1841ab1475c98a152cb9e446460c8ae29e1.tar.gz",
],
- strip_prefix = "bazel-toolchains-44200e0c026d86c53470d107b3697a3e46469c43",
- sha256 = "699b55a6916c687f4b7dc092dbbf5f64672cde0dc965f79717735ec4e5416556",
+ strip_prefix = "bazel-toolchains-37acf1841ab1475c98a152cb9e446460c8ae29e1",
+ sha256 = "3b604699685c5c65dd3f6f17425570a4b2f00ddba2f750db15acc72e55bb098b",
)
tf_http_archive(
@@ -702,6 +743,7 @@ def tf_workspace(path_prefix="", tf_repo_name=""):
"https://github.com/google/flatbuffers/archive/v1.9.0.tar.gz",
],
build_file = clean_dep("//third_party/flatbuffers:flatbuffers.BUILD"),
+ system_build_file = clean_dep("//third_party/systemlibs:flatbuffers.BUILD"),
)
native.new_http_archive(
@@ -733,6 +775,14 @@ def tf_workspace(path_prefix="", tf_repo_name=""):
],
build_file = str(Label("//third_party:tflite_mobilenet.BUILD")),
)
+ tf_http_archive(
+ name = "tflite_mobilenet_ssd_quant",
+ sha256 = "a809cd290b4d6a2e8a9d5dad076e0bd695b8091974e0eed1052b480b2f21b6dc",
+ urls = ["https://mirror.bazel.build/storage.googleapis.com/download.tensorflow.org/models/tflite/coco_ssd_mobilenet_v1_0.75_quant_2018_06_29.zip",
+ "https://storage.googleapis.com/download.tensorflow.org/models/tflite/coco_ssd_mobilenet_v1_0.75_quant_2018_06_29.zip",
+ ],
+ build_file = str(Label("//third_party:tflite_mobilenet.BUILD")),
+ )
tf_http_archive(
name = "tflite_conv_actions_frozen",
diff --git a/third_party/aws.BUILD b/third_party/aws.BUILD
index 2dc921933c..5426f79e46 100644
--- a/third_party/aws.BUILD
+++ b/third_party/aws.BUILD
@@ -46,6 +46,8 @@ cc_library(
"aws-cpp-sdk-core/source/utils/xml/**/*.cpp",
"aws-cpp-sdk-core/source/utils/crypto/*.cpp",
"aws-cpp-sdk-core/source/utils/crypto/factory/**/*.cpp",
+ "aws-cpp-sdk-kinesis/include/**/*.h",
+ "aws-cpp-sdk-kinesis/source/**/*.cpp",
"aws-cpp-sdk-s3/include/**/*.h",
"aws-cpp-sdk-s3/source/**/*.cpp",
]),
@@ -72,6 +74,7 @@ cc_library(
}),
includes = [
"aws-cpp-sdk-core/include/",
+ "aws-cpp-sdk-kinesis/include/",
"aws-cpp-sdk-s3/include/",
],
deps = [
diff --git a/third_party/clang_toolchain/download_clang.bzl b/third_party/clang_toolchain/download_clang.bzl
index b61e901037..ab57b9dfa0 100644
--- a/third_party/clang_toolchain/download_clang.bzl
+++ b/third_party/clang_toolchain/download_clang.bzl
@@ -35,18 +35,18 @@ def download_clang(repo_ctx, out_folder):
# Latest CLANG_REVISION and CLANG_SUB_REVISION of the Chromiums's release
# can be found in https://chromium.googlesource.com/chromium/src/tools/clang/+/master/scripts/update.py
- CLANG_REVISION = '334100'
+ CLANG_REVISION = '336424'
CLANG_SUB_REVISION = 1
package_version = '%s-%s' % (CLANG_REVISION, CLANG_SUB_REVISION)
checksums = {
'Linux_x64':
- '3c57420b591601cd14b5babd74b58fcaefa877112938d70cca6f0a1b0b293ab4',
+ '2ea97e047470da648f5d078af008bce6891287592382cee3d53a1187d996da94',
'Mac':
- '97d313996fb97a6138635f963d7ef4efa9f028a8168bb7917cc428b9eab05ebb',
+ 'c6e28909cce63ee35e0d51284d9f0f6e8838f7fb8b7a0dc9536c2ea900552df0',
'Win':
- '52c1d6d20a0733276597f4ced59d18b545769dbf8beb8c6bdc26a7a862da7fc9',
+ '1299fda7c4378bfb81337f7e5f351c8a1f953f51e0744e2170454b8d722f3db7',
}
platform_folder = _get_platform_folder(repo_ctx.os.name)
diff --git a/third_party/codegen.BUILD b/third_party/codegen.BUILD
new file mode 100644
index 0000000000..df436c8163
--- /dev/null
+++ b/third_party/codegen.BUILD
@@ -0,0 +1,16 @@
+# -*- mode: python; -*-
+#
+# Description:
+# Extension to ast that allow ast -> python code generation.
+
+package(default_visibility = ["//visibility:public"])
+
+licenses(["notice"]) # New BSD
+
+exports_files(["LICENSE"])
+
+py_library(
+ name = "com_github_andreif_codegen",
+ srcs = glob(["codegen.py"]),
+ srcs_version = "PY2AND3",
+)
diff --git a/third_party/eigen.BUILD b/third_party/eigen.BUILD
index e54c1a4501..759f8a9be9 100644
--- a/third_party/eigen.BUILD
+++ b/third_party/eigen.BUILD
@@ -69,3 +69,9 @@ cc_library(
includes = ["."],
visibility = ["//visibility:public"],
)
+
+filegroup(
+ name = "eigen_header_files",
+ srcs = EIGEN_MPL2_HEADER_FILES,
+ visibility = ["//visibility:public"],
+)
diff --git a/third_party/eigen3/BUILD b/third_party/eigen3/BUILD
index f661093bc9..203991b50f 100644
--- a/third_party/eigen3/BUILD
+++ b/third_party/eigen3/BUILD
@@ -17,21 +17,23 @@ load("//tensorflow:tensorflow.bzl", "if_mkl")
# INTEL_MKL end
load("//tensorflow:tensorflow.bzl", "if_mkl")
+EIGEN3_THIRD_PARTY_HEADERS = [
+ "Eigen/Core",
+ "Eigen/LU",
+ "Eigen/Cholesky",
+ "Eigen/Eigenvalues",
+ "Eigen/QR",
+ "Eigen/SVD",
+ "unsupported/Eigen/MatrixFunctions",
+ "unsupported/Eigen/SpecialFunctions",
+ "unsupported/Eigen/CXX11/ThreadPool",
+ "unsupported/Eigen/CXX11/Tensor",
+ "unsupported/Eigen/CXX11/FixedPoint",
+] + glob(["unsupported/Eigen/CXX11/src/FixedPoint/*.h"])
+
cc_library(
name = "eigen3",
- hdrs = glob(["unsupported/Eigen/CXX11/src/FixedPoint/*.h"]) + [
- "Eigen/Core",
- "Eigen/LU",
- "Eigen/Cholesky",
- "Eigen/Eigenvalues",
- "Eigen/QR",
- "Eigen/SVD",
- "unsupported/Eigen/MatrixFunctions",
- "unsupported/Eigen/SpecialFunctions",
- "unsupported/Eigen/CXX11/ThreadPool",
- "unsupported/Eigen/CXX11/Tensor",
- "unsupported/Eigen/CXX11/FixedPoint",
- ],
+ hdrs = EIGEN3_THIRD_PARTY_HEADERS,
includes = if_mkl(["./mkl_include"]),
visibility = ["//visibility:public"],
deps = [
@@ -48,3 +50,35 @@ filegroup(
),
visibility = ["//tensorflow:__subpackages__"],
)
+
+filegroup(
+ name = "eigen_third_party_header_files",
+ srcs = EIGEN3_THIRD_PARTY_HEADERS,
+ visibility = ["//visibility:public"],
+)
+
+genrule(
+ name = "install_eigen_headers",
+ srcs = [
+ "@eigen_archive//:eigen_header_files",
+ ":eigen_third_party_header_files",
+ ],
+ outs = ["include"],
+ cmd = """
+ mkdir $@
+ for f in $(locations @eigen_archive//:eigen_header_files) ; do
+ d="$${f%/*}"
+ d="$${d#*external/eigen_archive/}"
+
+ mkdir -p "$@/$${d}"
+ cp "$${f}" "$@/$${d}/"
+ done
+
+ for f in $(locations :eigen_third_party_header_files) ; do
+ d="$${f%/*}"
+
+ mkdir -p "$@/$${d}"
+ cp "$${f}" "$@/$${d}/"
+ done
+ """,
+)
diff --git a/third_party/googleapis.BUILD b/third_party/googleapis.BUILD
new file mode 100644
index 0000000000..95e999af18
--- /dev/null
+++ b/third_party/googleapis.BUILD
@@ -0,0 +1,45 @@
+# Copyright 2018 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+package(default_visibility = ["//visibility:public"])
+licenses(["notice"]) # Apache 2.0
+exports_files(["LICENSE"])
+
+load("@protobuf_archive//:protobuf.bzl", "cc_proto_library")
+
+cc_proto_library(
+ name = "bigtable_protos",
+ srcs = [
+ "google/bigtable/admin/v2/bigtable_instance_admin.proto",
+ "google/bigtable/admin/v2/bigtable_table_admin.proto",
+ "google/bigtable/admin/v2/common.proto",
+ "google/bigtable/admin/v2/instance.proto",
+ "google/bigtable/admin/v2/table.proto",
+ "google/bigtable/v2/bigtable.proto",
+ "google/bigtable/v2/data.proto",
+ "google/iam/v1/iam_policy.proto",
+ "google/iam/v1/policy.proto",
+ "google/longrunning/operations.proto",
+ "google/rpc/status.proto",
+ "google/rpc/error_details.proto",
+ "google/api/annotations.proto",
+ "google/api/auth.proto",
+ "google/api/http.proto",
+ ],
+ include = ".",
+ protoc = "@protobuf_archive//:protoc",
+ default_runtime = "@protobuf_archive//:protobuf",
+ deps = ["@protobuf_archive//:cc_wkt_protos"],
+ use_grpc_plugin = True,
+)
diff --git a/third_party/gpus/crosstool/BUILD.tpl b/third_party/gpus/crosstool/BUILD.tpl
index 98cb326572..f638756d23 100644
--- a/third_party/gpus/crosstool/BUILD.tpl
+++ b/third_party/gpus/crosstool/BUILD.tpl
@@ -7,6 +7,7 @@ cc_toolchain_suite(
toolchains = {
"local|compiler": ":cc-compiler-local",
"darwin|compiler": ":cc-compiler-darwin",
+ "x64_windows|msvc-cl": ":cc-compiler-windows",
},
)
@@ -42,6 +43,20 @@ cc_toolchain(
supports_param_files = 0,
)
+cc_toolchain(
+ name = "cc-compiler-windows",
+ all_files = "%{win_linker_files}",
+ compiler_files = ":empty",
+ cpu = "x64_windows",
+ dwp_files = ":empty",
+ dynamic_runtime_libs = [":empty"],
+ linker_files = "%{win_linker_files}",
+ objcopy_files = ":empty",
+ static_runtime_libs = [":empty"],
+ strip_files = ":empty",
+ supports_param_files = 1,
+)
+
filegroup(
name = "empty",
srcs = [],
@@ -51,3 +66,8 @@ filegroup(
name = "crosstool_wrapper_driver_is_not_gcc",
srcs = ["clang/bin/crosstool_wrapper_driver_is_not_gcc"],
)
+
+filegroup(
+ name = "windows_msvc_wrapper_files",
+ srcs = glob(["windows/msvc_*"]),
+)
diff --git a/third_party/gpus/crosstool/CROSSTOOL.tpl b/third_party/gpus/crosstool/CROSSTOOL.tpl
index 1424ff6511..3972c96a2f 100644
--- a/third_party/gpus/crosstool/CROSSTOOL.tpl
+++ b/third_party/gpus/crosstool/CROSSTOOL.tpl
@@ -22,6 +22,10 @@ default_toolchain {
cpu: "ppc"
toolchain_identifier: "local_linux"
}
+default_toolchain {
+ cpu: "x64_windows"
+ toolchain_identifier: "local_windows"
+}
toolchain {
abi_version: "local"
@@ -537,3 +541,868 @@ toolchain {
%{host_compiler_includes}
}
+
+toolchain {
+ toolchain_identifier: "local_windows"
+ host_system_name: "local"
+ target_system_name: "local"
+
+ abi_version: "local"
+ abi_libc_version: "local"
+ target_cpu: "x64_windows"
+ compiler: "msvc-cl"
+ target_libc: "msvcrt"
+
+%{cxx_builtin_include_directory}
+
+ tool_path {
+ name: "ar"
+ path: "%{msvc_lib_path}"
+ }
+ tool_path {
+ name: "ml"
+ path: "%{msvc_ml_path}"
+ }
+ tool_path {
+ name: "cpp"
+ path: "%{msvc_cl_path}"
+ }
+ tool_path {
+ name: "gcc"
+ path: "%{msvc_cl_path}"
+ }
+ tool_path {
+ name: "gcov"
+ path: "wrapper/bin/msvc_nop.bat"
+ }
+ tool_path {
+ name: "ld"
+ path: "%{msvc_link_path}"
+ }
+ tool_path {
+ name: "nm"
+ path: "wrapper/bin/msvc_nop.bat"
+ }
+ tool_path {
+ name: "objcopy"
+ path: "wrapper/bin/msvc_nop.bat"
+ }
+ tool_path {
+ name: "objdump"
+ path: "wrapper/bin/msvc_nop.bat"
+ }
+ tool_path {
+ name: "strip"
+ path: "wrapper/bin/msvc_nop.bat"
+ }
+ supports_interface_shared_objects: true
+
+ # TODO(pcloudy): Review those flags below, they should be defined by cl.exe
+ compiler_flag: "/DCOMPILER_MSVC"
+
+ # Don't define min/max macros in windows.h.
+ compiler_flag: "/DNOMINMAX"
+
+ # Platform defines.
+ compiler_flag: "/D_WIN32_WINNT=0x0600"
+ # Turn off warning messages.
+ compiler_flag: "/D_CRT_SECURE_NO_DEPRECATE"
+ compiler_flag: "/D_CRT_SECURE_NO_WARNINGS"
+ compiler_flag: "/D_SILENCE_STDEXT_HASH_DEPRECATION_WARNINGS"
+
+ # Useful options to have on for compilation.
+ # Increase the capacity of object files to 2^32 sections.
+ compiler_flag: "/bigobj"
+ # Allocate 500MB for precomputed headers.
+ compiler_flag: "/Zm500"
+ # Use unsigned char by default.
+ compiler_flag: "/J"
+ # Use function level linking.
+ compiler_flag: "/Gy"
+ # Use string pooling.
+ compiler_flag: "/GF"
+ # Catch C++ exceptions only and tell the compiler to assume that functions declared
+ # as extern "C" never throw a C++ exception.
+ compiler_flag: "/EHsc"
+
+ # Globally disabled warnings.
+ # Don't warn about elements of array being be default initialized.
+ compiler_flag: "/wd4351"
+ # Don't warn about no matching delete found.
+ compiler_flag: "/wd4291"
+ # Don't warn about diamond inheritance patterns.
+ compiler_flag: "/wd4250"
+ # Don't warn about insecure functions (e.g. non _s functions).
+ compiler_flag: "/wd4996"
+
+ linker_flag: "/MACHINE:X64"
+
+ feature {
+ name: "no_legacy_features"
+ }
+
+ # Suppress startup banner.
+ feature {
+ name: "nologo"
+ flag_set {
+ action: "c-compile"
+ action: "c++-compile"
+ action: "c++-module-compile"
+ action: "c++-module-codegen"
+ action: "c++-header-parsing"
+ action: "assemble"
+ action: "preprocess-assemble"
+ action: "c++-link-executable"
+ action: "c++-link-dynamic-library"
+ action: "c++-link-nodeps-dynamic-library"
+ action: "c++-link-static-library"
+ flag_group {
+ flag: "/nologo"
+ }
+ }
+ }
+
+ feature {
+ name: 'has_configured_linker_path'
+ }
+
+ # This feature indicates strip is not supported, building stripped binary will just result a copy of orignial binary
+ feature {
+ name: 'no_stripping'
+ }
+
+ # This feature indicates this is a toolchain targeting Windows.
+ feature {
+ name: 'targets_windows'
+ implies: 'copy_dynamic_libraries_to_binary'
+ enabled: true
+ }
+
+ feature {
+ name: 'copy_dynamic_libraries_to_binary'
+ }
+
+ action_config {
+ config_name: 'assemble'
+ action_name: 'assemble'
+ tool {
+ tool_path: '%{msvc_ml_path}'
+ }
+ implies: 'compiler_input_flags'
+ implies: 'compiler_output_flags'
+ implies: 'nologo'
+ implies: 'msvc_env'
+ implies: 'sysroot'
+ }
+
+ action_config {
+ config_name: 'preprocess-assemble'
+ action_name: 'preprocess-assemble'
+ tool {
+ tool_path: '%{msvc_ml_path}'
+ }
+ implies: 'compiler_input_flags'
+ implies: 'compiler_output_flags'
+ implies: 'nologo'
+ implies: 'msvc_env'
+ implies: 'sysroot'
+ }
+
+ action_config {
+ config_name: 'c-compile'
+ action_name: 'c-compile'
+ tool {
+ tool_path: '%{msvc_cl_path}'
+ }
+ implies: 'compiler_input_flags'
+ implies: 'compiler_output_flags'
+ implies: 'legacy_compile_flags'
+ implies: 'nologo'
+ implies: 'msvc_env'
+ implies: 'parse_showincludes'
+ implies: 'user_compile_flags'
+ implies: 'sysroot'
+ implies: 'unfiltered_compile_flags'
+ }
+
+ action_config {
+ config_name: 'c++-compile'
+ action_name: 'c++-compile'
+ tool {
+ tool_path: '%{msvc_cl_path}'
+ }
+ implies: 'compiler_input_flags'
+ implies: 'compiler_output_flags'
+ implies: 'legacy_compile_flags'
+ implies: 'nologo'
+ implies: 'msvc_env'
+ implies: 'parse_showincludes'
+ implies: 'user_compile_flags'
+ implies: 'sysroot'
+ implies: 'unfiltered_compile_flags'
+ }
+
+ action_config {
+ config_name: 'c++-link-executable'
+ action_name: 'c++-link-executable'
+ tool {
+ tool_path: '%{msvc_link_path}'
+ }
+ implies: 'nologo'
+ implies: 'linkstamps'
+ implies: 'output_execpath_flags'
+ implies: 'input_param_flags'
+ implies: 'user_link_flags'
+ implies: 'legacy_link_flags'
+ implies: 'linker_subsystem_flag'
+ implies: 'linker_param_file'
+ implies: 'msvc_env'
+ implies: 'no_stripping'
+ }
+
+ action_config {
+ config_name: 'c++-link-dynamic-library'
+ action_name: 'c++-link-dynamic-library'
+ tool {
+ tool_path: '%{msvc_link_path}'
+ }
+ implies: 'nologo'
+ implies: 'shared_flag'
+ implies: 'linkstamps'
+ implies: 'output_execpath_flags'
+ implies: 'input_param_flags'
+ implies: 'user_link_flags'
+ implies: 'legacy_link_flags'
+ implies: 'linker_subsystem_flag'
+ implies: 'linker_param_file'
+ implies: 'msvc_env'
+ implies: 'no_stripping'
+ implies: 'has_configured_linker_path'
+ implies: 'def_file'
+ }
+
+ action_config {
+ config_name: 'c++-link-nodeps-dynamic-library'
+ action_name: 'c++-link-nodeps-dynamic-library'
+ tool {
+ tool_path: '%{msvc_link_path}'
+ }
+ implies: 'nologo'
+ implies: 'shared_flag'
+ implies: 'linkstamps'
+ implies: 'output_execpath_flags'
+ implies: 'input_param_flags'
+ implies: 'user_link_flags'
+ implies: 'legacy_link_flags'
+ implies: 'linker_subsystem_flag'
+ implies: 'linker_param_file'
+ implies: 'msvc_env'
+ implies: 'no_stripping'
+ implies: 'has_configured_linker_path'
+ implies: 'def_file'
+ }
+
+ action_config {
+ config_name: 'c++-link-static-library'
+ action_name: 'c++-link-static-library'
+ tool {
+ tool_path: '%{msvc_lib_path}'
+ }
+ implies: 'nologo'
+ implies: 'archiver_flags'
+ implies: 'input_param_flags'
+ implies: 'linker_param_file'
+ implies: 'msvc_env'
+ }
+
+ # TODO(b/65151735): Remove legacy_compile_flags feature when legacy fields are
+ # not used in this crosstool
+ feature {
+ name: 'legacy_compile_flags'
+ flag_set {
+ expand_if_all_available: 'legacy_compile_flags'
+ action: 'preprocess-assemble'
+ action: 'c-compile'
+ action: 'c++-compile'
+ action: 'c++-header-parsing'
+ action: 'c++-module-compile'
+ action: 'c++-module-codegen'
+ flag_group {
+ iterate_over: 'legacy_compile_flags'
+ flag: '%{legacy_compile_flags}'
+ }
+ }
+ }
+
+ feature {
+ name: "msvc_env"
+ env_set {
+ action: "c-compile"
+ action: "c++-compile"
+ action: "c++-module-compile"
+ action: "c++-module-codegen"
+ action: "c++-header-parsing"
+ action: "assemble"
+ action: "preprocess-assemble"
+ action: "c++-link-executable"
+ action: "c++-link-dynamic-library"
+ action: "c++-link-nodeps-dynamic-library"
+ action: "c++-link-static-library"
+ env_entry {
+ key: "PATH"
+ value: "%{msvc_env_path}"
+ }
+ env_entry {
+ key: "INCLUDE"
+ value: "%{msvc_env_include}"
+ }
+ env_entry {
+ key: "LIB"
+ value: "%{msvc_env_lib}"
+ }
+ env_entry {
+ key: "TMP"
+ value: "%{msvc_env_tmp}"
+ }
+ env_entry {
+ key: "TEMP"
+ value: "%{msvc_env_tmp}"
+ }
+ }
+ }
+
+ feature {
+ name: 'include_paths'
+ flag_set {
+ action: "assemble"
+ action: 'preprocess-assemble'
+ action: 'c-compile'
+ action: 'c++-compile'
+ action: 'c++-header-parsing'
+ action: 'c++-module-compile'
+ flag_group {
+ iterate_over: 'quote_include_paths'
+ flag: '/I%{quote_include_paths}'
+ }
+ flag_group {
+ iterate_over: 'include_paths'
+ flag: '/I%{include_paths}'
+ }
+ flag_group {
+ iterate_over: 'system_include_paths'
+ flag: '/I%{system_include_paths}'
+ }
+ }
+ }
+
+ feature {
+ name: "preprocessor_defines"
+ flag_set {
+ action: "assemble"
+ action: "preprocess-assemble"
+ action: "c-compile"
+ action: "c++-compile"
+ action: "c++-header-parsing"
+ action: "c++-module-compile"
+ flag_group {
+ flag: "/D%{preprocessor_defines}"
+ iterate_over: "preprocessor_defines"
+ }
+ }
+ }
+
+ # Tell Bazel to parse the output of /showIncludes
+ feature {
+ name: 'parse_showincludes'
+ flag_set {
+ action: 'preprocess-assemble'
+ action: 'c-compile'
+ action: 'c++-compile'
+ action: 'c++-module-compile'
+ action: 'c++-header-parsing'
+ flag_group {
+ flag: "/showIncludes"
+ }
+ }
+ }
+
+
+ feature {
+ name: 'generate_pdb_file'
+ requires: {
+ feature: 'dbg'
+ }
+ requires: {
+ feature: 'fastbuild'
+ }
+ }
+
+ feature {
+ name: 'shared_flag'
+ flag_set {
+ action: 'c++-link-dynamic-library'
+ action: "c++-link-nodeps-dynamic-library"
+ flag_group {
+ flag: '/DLL'
+ }
+ }
+ }
+
+ feature {
+ name: 'linkstamps'
+ flag_set {
+ action: 'c++-link-executable'
+ action: 'c++-link-dynamic-library'
+ action: "c++-link-nodeps-dynamic-library"
+ expand_if_all_available: 'linkstamp_paths'
+ flag_group {
+ iterate_over: 'linkstamp_paths'
+ flag: '%{linkstamp_paths}'
+ }
+ }
+ }
+
+ feature {
+ name: 'output_execpath_flags'
+ flag_set {
+ expand_if_all_available: 'output_execpath'
+ action: 'c++-link-executable'
+ action: 'c++-link-dynamic-library'
+ action: "c++-link-nodeps-dynamic-library"
+ flag_group {
+ flag: '/OUT:%{output_execpath}'
+ }
+ }
+ }
+
+ feature {
+ name: 'archiver_flags'
+ flag_set {
+ expand_if_all_available: 'output_execpath'
+ action: 'c++-link-static-library'
+ flag_group {
+ flag: '/OUT:%{output_execpath}'
+ }
+ }
+ }
+
+ feature {
+ name: 'input_param_flags'
+ flag_set {
+ expand_if_all_available: 'interface_library_output_path'
+ action: 'c++-link-dynamic-library'
+ action: "c++-link-nodeps-dynamic-library"
+ flag_group {
+ flag: "/IMPLIB:%{interface_library_output_path}"
+ }
+ }
+ flag_set {
+ expand_if_all_available: 'libopts'
+ action: 'c++-link-executable'
+ action: 'c++-link-dynamic-library'
+ action: "c++-link-nodeps-dynamic-library"
+ flag_group {
+ iterate_over: 'libopts'
+ flag: '%{libopts}'
+ }
+ }
+ flag_set {
+ expand_if_all_available: 'libraries_to_link'
+ action: 'c++-link-executable'
+ action: 'c++-link-dynamic-library'
+ action: "c++-link-nodeps-dynamic-library"
+ action: 'c++-link-static-library'
+ flag_group {
+ iterate_over: 'libraries_to_link'
+ flag_group {
+ expand_if_equal: {
+ variable: 'libraries_to_link.type'
+ value: 'object_file_group'
+ }
+ iterate_over: 'libraries_to_link.object_files'
+ flag_group {
+ flag: '%{libraries_to_link.object_files}'
+ }
+ }
+ flag_group {
+ expand_if_equal: {
+ variable: 'libraries_to_link.type'
+ value: 'object_file'
+ }
+ flag_group {
+ flag: '%{libraries_to_link.name}'
+ }
+ }
+ flag_group {
+ expand_if_equal: {
+ variable: 'libraries_to_link.type'
+ value: 'interface_library'
+ }
+ flag_group {
+ flag: '%{libraries_to_link.name}'
+ }
+ }
+ flag_group {
+ expand_if_equal: {
+ variable: 'libraries_to_link.type'
+ value: 'static_library'
+ }
+ flag_group {
+ expand_if_false: 'libraries_to_link.is_whole_archive'
+ flag: '%{libraries_to_link.name}'
+ }
+ flag_group {
+ expand_if_true: 'libraries_to_link.is_whole_archive'
+ flag: '/WHOLEARCHIVE:%{libraries_to_link.name}'
+ }
+ }
+ }
+ }
+ }
+
+ # Since this feature is declared earlier in the CROSSTOOL than
+ # "user_link_flags", this feature will be applied prior to it anwyhere they
+ # are both implied. And since "user_link_flags" contains the linkopts from
+ # the build rule, this allows the user to override the /SUBSYSTEM in the BUILD
+ # file.
+ feature {
+ name: 'linker_subsystem_flag'
+ flag_set {
+ action: 'c++-link-executable'
+ action: 'c++-link-dynamic-library'
+ action: "c++-link-nodeps-dynamic-library"
+ flag_group {
+ flag: '/SUBSYSTEM:CONSOLE'
+ }
+ }
+ }
+
+ # The "user_link_flags" contains user-defined linkopts (from build rules)
+ # so it should be defined after features that declare user-overridable flags.
+ # For example the "linker_subsystem_flag" defines a default "/SUBSYSTEM" flag
+ # but we want to let the user override it, therefore "link_flag_subsystem" is
+ # defined earlier in the CROSSTOOL file than "user_link_flags".
+ feature {
+ name: 'user_link_flags'
+ flag_set {
+ expand_if_all_available: 'user_link_flags'
+ action: 'c++-link-executable'
+ action: 'c++-link-dynamic-library'
+ action: "c++-link-nodeps-dynamic-library"
+ flag_group {
+ iterate_over: 'user_link_flags'
+ flag: '%{user_link_flags}'
+ }
+ }
+ }
+ feature {
+ name: 'legacy_link_flags'
+ flag_set {
+ expand_if_all_available: 'legacy_link_flags'
+ action: 'c++-link-executable'
+ action: 'c++-link-dynamic-library'
+ action: "c++-link-nodeps-dynamic-library"
+ flag_group {
+ iterate_over: 'legacy_link_flags'
+ flag: '%{legacy_link_flags}'
+ }
+ }
+ }
+
+ feature {
+ name: 'linker_param_file'
+ flag_set {
+ expand_if_all_available: 'linker_param_file'
+ action: 'c++-link-executable'
+ action: 'c++-link-dynamic-library'
+ action: "c++-link-nodeps-dynamic-library"
+ action: 'c++-link-static-library'
+ flag_group {
+ flag: '@%{linker_param_file}'
+ }
+ }
+ }
+
+ feature {
+ name: 'static_link_msvcrt'
+ }
+
+ feature {
+ name: 'static_link_msvcrt_no_debug'
+ flag_set {
+ action: 'c-compile'
+ action: 'c++-compile'
+ flag_group {
+ flag: "/MT"
+ }
+ }
+ flag_set {
+ action: 'c++-link-executable'
+ action: 'c++-link-dynamic-library'
+ action: "c++-link-nodeps-dynamic-library"
+ flag_group {
+ flag: "/DEFAULTLIB:libcmt.lib"
+ }
+ }
+ requires: { feature: 'fastbuild'}
+ requires: { feature: 'opt'}
+ }
+
+ feature {
+ name: 'dynamic_link_msvcrt_no_debug'
+ flag_set {
+ action: 'c-compile'
+ action: 'c++-compile'
+ flag_group {
+ flag: "/MD"
+ }
+ }
+ flag_set {
+ action: 'c++-link-executable'
+ action: 'c++-link-dynamic-library'
+ action: "c++-link-nodeps-dynamic-library"
+ flag_group {
+ flag: "/DEFAULTLIB:msvcrt.lib"
+ }
+ }
+ requires: { feature: 'fastbuild'}
+ requires: { feature: 'opt'}
+ }
+
+ feature {
+ name: 'static_link_msvcrt_debug'
+ flag_set {
+ action: 'c-compile'
+ action: 'c++-compile'
+ flag_group {
+ flag: "/MTd"
+ }
+ }
+ flag_set {
+ action: 'c++-link-executable'
+ action: 'c++-link-dynamic-library'
+ action: "c++-link-nodeps-dynamic-library"
+ flag_group {
+ flag: "/DEFAULTLIB:libcmtd.lib"
+ }
+ }
+ requires: { feature: 'dbg'}
+ }
+
+ feature {
+ name: 'dynamic_link_msvcrt_debug'
+ flag_set {
+ action: 'c-compile'
+ action: 'c++-compile'
+ flag_group {
+ flag: "/MDd"
+ }
+ }
+ flag_set {
+ action: 'c++-link-executable'
+ action: 'c++-link-dynamic-library'
+ action: "c++-link-nodeps-dynamic-library"
+ flag_group {
+ flag: "/DEFAULTLIB:msvcrtd.lib"
+ }
+ }
+ requires: { feature: 'dbg'}
+ }
+
+ feature {
+ name: 'dbg'
+ flag_set {
+ action: 'c-compile'
+ action: 'c++-compile'
+ flag_group {
+ flag: "/Od"
+ flag: "/Z7"
+ flag: "/DDEBUG"
+ }
+ }
+ flag_set {
+ action: 'c++-link-executable'
+ action: 'c++-link-dynamic-library'
+ action: "c++-link-nodeps-dynamic-library"
+ flag_group {
+ flag: "/DEBUG:FULL"
+ flag: "/INCREMENTAL:NO"
+ }
+ }
+ implies: 'generate_pdb_file'
+ }
+
+ feature {
+ name: 'fastbuild'
+ flag_set {
+ action: 'c-compile'
+ action: 'c++-compile'
+ flag_group {
+ flag: "/Od"
+ flag: "/Z7"
+ flag: "/DDEBUG"
+ }
+ }
+ flag_set {
+ action: 'c++-link-executable'
+ action: 'c++-link-dynamic-library'
+ action: "c++-link-nodeps-dynamic-library"
+ flag_group {
+ flag: "/DEBUG:FASTLINK"
+ flag: "/INCREMENTAL:NO"
+ }
+ }
+ implies: 'generate_pdb_file'
+ }
+
+ feature {
+ name: 'opt'
+ flag_set {
+ action: 'c-compile'
+ action: 'c++-compile'
+ flag_group {
+ flag: "/O2"
+ flag: "/DNDEBUG"
+ }
+ }
+ }
+
+ feature {
+ name: 'user_compile_flags'
+ flag_set {
+ expand_if_all_available: 'user_compile_flags'
+ action: 'preprocess-assemble'
+ action: 'c-compile'
+ action: 'c++-compile'
+ action: 'c++-header-parsing'
+ action: 'c++-module-compile'
+ action: 'c++-module-codegen'
+ flag_group {
+ iterate_over: 'user_compile_flags'
+ flag: '%{user_compile_flags}'
+ }
+ }
+ }
+
+ feature {
+ name: 'sysroot'
+ flag_set {
+ expand_if_all_available: 'sysroot'
+ action: 'assemble'
+ action: 'preprocess-assemble'
+ action: 'c-compile'
+ action: 'c++-compile'
+ action: 'c++-header-parsing'
+ action: 'c++-module-compile'
+ action: 'c++-module-codegen'
+ action: 'c++-link-executable'
+ action: 'c++-link-dynamic-library'
+ action: "c++-link-nodeps-dynamic-library"
+ flag_group {
+ iterate_over: 'sysroot'
+ flag: '--sysroot=%{sysroot}'
+ }
+ }
+ }
+
+ feature {
+ name: 'unfiltered_compile_flags'
+ flag_set {
+ expand_if_all_available: 'unfiltered_compile_flags'
+ action: 'preprocess-assemble'
+ action: 'c-compile'
+ action: 'c++-compile'
+ action: 'c++-header-parsing'
+ action: 'c++-module-compile'
+ action: 'c++-module-codegen'
+ flag_group {
+ iterate_over: 'unfiltered_compile_flags'
+ flag: '%{unfiltered_compile_flags}'
+ }
+ }
+ }
+
+ feature {
+ name: 'compiler_output_flags'
+ flag_set {
+ action: 'assemble'
+ flag_group {
+ expand_if_all_available: 'output_file'
+ expand_if_none_available: 'output_assembly_file'
+ expand_if_none_available: 'output_preprocess_file'
+ flag: '/Fo%{output_file}'
+ flag: '/Zi'
+ }
+ }
+ flag_set {
+ action: 'preprocess-assemble'
+ action: 'c-compile'
+ action: 'c++-compile'
+ action: 'c++-header-parsing'
+ action: 'c++-module-compile'
+ action: 'c++-module-codegen'
+ flag_group {
+ expand_if_all_available: 'output_file'
+ expand_if_none_available: 'output_assembly_file'
+ expand_if_none_available: 'output_preprocess_file'
+ flag: '/Fo%{output_file}'
+ }
+ flag_group {
+ expand_if_all_available: 'output_file'
+ expand_if_all_available: 'output_assembly_file'
+ flag: '/Fa%{output_file}'
+ }
+ flag_group {
+ expand_if_all_available: 'output_file'
+ expand_if_all_available: 'output_preprocess_file'
+ flag: '/P'
+ flag: '/Fi%{output_file}'
+ }
+ }
+ }
+
+ feature {
+ name: 'compiler_input_flags'
+ flag_set {
+ action: 'assemble'
+ action: 'preprocess-assemble'
+ action: 'c-compile'
+ action: 'c++-compile'
+ action: 'c++-header-parsing'
+ action: 'c++-module-compile'
+ action: 'c++-module-codegen'
+ flag_group {
+ expand_if_all_available: 'source_file'
+ flag: '/c'
+ flag: '%{source_file}'
+ }
+ }
+ }
+
+ feature {
+ name : 'def_file',
+ flag_set {
+ expand_if_all_available: 'def_file_path'
+ action: 'c++-link-executable'
+ action: 'c++-link-dynamic-library'
+ action: "c++-link-nodeps-dynamic-library"
+ flag_group {
+ flag: "/DEF:%{def_file_path}"
+ # We can specify a different DLL name in DEF file, /ignore:4070 suppresses
+ # the warning message about DLL name doesn't match the default one.
+ # See https://msdn.microsoft.com/en-us/library/sfkk2fz7.aspx
+ flag: "/ignore:4070"
+ }
+ }
+ }
+
+ feature {
+ name: 'windows_export_all_symbols'
+ }
+
+ feature {
+ name: 'no_windows_export_all_symbols'
+ }
+
+ linking_mode_flags { mode: DYNAMIC }
+}
diff --git a/third_party/gpus/crosstool/clang/bin/crosstool_wrapper_driver_is_not_gcc.tpl b/third_party/gpus/crosstool/clang/bin/crosstool_wrapper_driver_is_not_gcc.tpl
index 2558f46fd5..f4f4d0ee96 100755
--- a/third_party/gpus/crosstool/clang/bin/crosstool_wrapper_driver_is_not_gcc.tpl
+++ b/third_party/gpus/crosstool/clang/bin/crosstool_wrapper_driver_is_not_gcc.tpl
@@ -175,6 +175,11 @@ def InvokeNvcc(argv, log=False):
# any other reliable way to just get the list of source files to be compiled.
src_files = GetOptionValue(argv, 'c')
+ # Pass -w through from host to nvcc, but don't do anything fancier with
+ # warnings-related flags, since they're not necessarily the same across
+ # compilers.
+ warning_options = ' -w' if '-w' in argv else ''
+
if len(src_files) == 0:
return 1
if len(out_file) != 1:
@@ -205,6 +210,7 @@ def InvokeNvcc(argv, log=False):
nvccopts += defines
nvccopts += std_options
nvccopts += m_options
+ nvccopts += warning_options
if depfiles:
# Generate the dependency file
diff --git a/third_party/gpus/crosstool/windows/msvc_wrapper_for_nvcc.bat.tpl b/third_party/gpus/crosstool/windows/msvc_wrapper_for_nvcc.bat.tpl
new file mode 100644
index 0000000000..8f8fb3e423
--- /dev/null
+++ b/third_party/gpus/crosstool/windows/msvc_wrapper_for_nvcc.bat.tpl
@@ -0,0 +1,20 @@
+:: Copyright 2015 The TensorFlow Authors. All Rights Reserved.
+::
+:: Licensed under the Apache License, Version 2.0 (the "License");
+:: you may not use this file except in compliance with the License.
+:: You may obtain a copy of the License at
+::
+:: http://www.apache.org/licenses/LICENSE-2.0
+::
+:: Unless required by applicable law or agreed to in writing, software
+:: distributed under the License is distributed on an "AS IS" BASIS,
+:: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+:: See the License for the specific language governing permissions and
+:: limitations under the License.
+:: =============================================================================
+
+:: Invoke msvc_wrapper_for_nvcc.py, which is located in the same directory.
+@echo OFF
+set arg0=%~0
+for %%F in ("%arg0%") do set DRIVER_BIN=%%~dpF
+"%{python_binary}" -B "%DRIVER_BIN%\msvc_wrapper_for_nvcc.py" %*
diff --git a/third_party/gpus/crosstool/windows/msvc_wrapper_for_nvcc.py.tpl b/third_party/gpus/crosstool/windows/msvc_wrapper_for_nvcc.py.tpl
new file mode 100644
index 0000000000..1a09756813
--- /dev/null
+++ b/third_party/gpus/crosstool/windows/msvc_wrapper_for_nvcc.py.tpl
@@ -0,0 +1,192 @@
+#!/usr/bin/env python
+# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+
+"""Crosstool wrapper for compiling CUDA programs with nvcc on Windows.
+
+DESCRIPTION:
+ This script is the Windows version of //third_party/gpus/crosstool/crosstool_wrapper_is_not_gcc
+"""
+
+from __future__ import print_function
+
+from argparse import ArgumentParser
+import os
+import subprocess
+import re
+import sys
+import pipes
+
+# Template values set by cuda_autoconf.
+CPU_COMPILER = ('%{cpu_compiler}')
+GCC_HOST_COMPILER_PATH = ('%{gcc_host_compiler_path}')
+
+NVCC_PATH = '%{nvcc_path}'
+NVCC_VERSION = '%{cuda_version}'
+NVCC_TEMP_DIR = "%{nvcc_tmp_dir}"
+supported_cuda_compute_capabilities = [ %{cuda_compute_capabilities} ]
+
+def Log(s):
+ print('gpus/crosstool: {0}'.format(s))
+
+
+def GetOptionValue(argv, option):
+ """Extract the list of values for option from options.
+
+ Args:
+ option: The option whose value to extract, without the leading '/'.
+
+ Returns:
+ 1. A list of values, either directly following the option,
+ (eg., /opt val1 val2) or values collected from multiple occurrences of
+ the option (eg., /opt val1 /opt val2).
+ 2. The leftover options.
+ """
+
+ parser = ArgumentParser(prefix_chars='/')
+ parser.add_argument('/' + option, nargs='*', action='append')
+ args, leftover = parser.parse_known_args(argv)
+ if args and vars(args)[option]:
+ return (sum(vars(args)[option], []), leftover)
+ return ([], leftover)
+
+def _update_options(nvcc_options):
+ if NVCC_VERSION in ("7.0",):
+ return nvcc_options
+
+ update_options = { "relaxed-constexpr" : "expt-relaxed-constexpr" }
+ return [ update_options[opt] if opt in update_options else opt
+ for opt in nvcc_options ]
+
+def GetNvccOptions(argv):
+ """Collect the -nvcc_options values from argv.
+
+ Args:
+ argv: A list of strings, possibly the argv passed to main().
+
+ Returns:
+ 1. The string that can be passed directly to nvcc.
+ 2. The leftover options.
+ """
+
+ parser = ArgumentParser()
+ parser.add_argument('-nvcc_options', nargs='*', action='append')
+
+ args, leftover = parser.parse_known_args(argv)
+
+ if args.nvcc_options:
+ options = _update_options(sum(args.nvcc_options, []))
+ return (['--' + a for a in options], leftover)
+ return ([], leftover)
+
+
+def InvokeNvcc(argv, log=False):
+ """Call nvcc with arguments assembled from argv.
+
+ Args:
+ argv: A list of strings, possibly the argv passed to main().
+ log: True if logging is requested.
+
+ Returns:
+ The return value of calling os.system('nvcc ' + args)
+ """
+
+ src_files = [f for f in argv if
+ re.search('\.cpp$|\.cc$|\.c$|\.cxx$|\.C$', f)]
+ if len(src_files) == 0:
+ raise Error('No source files found for cuda compilation.')
+
+ out_file = [ f for f in argv if f.startswith('/Fo') ]
+ if len(out_file) != 1:
+ raise Error('Please sepecify exactly one output file for cuda compilation.')
+ out = ['-o', out_file[0][len('/Fo'):]]
+
+ nvcc_compiler_options, argv = GetNvccOptions(argv)
+
+ opt_option, argv = GetOptionValue(argv, 'O')
+ opt = ['-g', '-G']
+ if (len(opt_option) > 0 and opt_option[0] != 'd'):
+ opt = ['-O2']
+
+ include_options, argv = GetOptionValue(argv, 'I')
+ includes = ["-I " + include for include in include_options]
+
+ defines, argv = GetOptionValue(argv, 'D')
+ defines = ['-D' + define for define in defines]
+
+ undefines, argv = GetOptionValue(argv, 'U')
+ undefines = ['-U' + define for define in undefines]
+
+ # The rest of the unrecongized options should be passed to host compiler
+ host_compiler_options = [option for option in argv if option not in (src_files + out_file)]
+
+ m_options = ["-m64"]
+
+ nvccopts = ['-D_FORCE_INLINES']
+ for capability in supported_cuda_compute_capabilities:
+ capability = capability.replace('.', '')
+ nvccopts += [r'-gencode=arch=compute_%s,"code=sm_%s,compute_%s"' % (
+ capability, capability, capability)]
+ nvccopts += nvcc_compiler_options
+ nvccopts += undefines
+ nvccopts += defines
+ nvccopts += m_options
+ nvccopts += ['--compiler-options="' + " ".join(host_compiler_options) + '"']
+ nvccopts += ['-x', 'cu'] + opt + includes + out + ['-c'] + src_files
+ # If we don't specify --keep-dir, nvcc will generate intermediate files under TEMP
+ # Put them under NVCC_TEMP_DIR instead, then Bazel can ignore files under NVCC_TEMP_DIR during dependency check
+ # http://docs.nvidia.com/cuda/cuda-compiler-driver-nvcc/index.html#options-for-guiding-compiler-driver
+ # Different actions are sharing NVCC_TEMP_DIR, so we cannot remove it if the directory already exists.
+ if os.path.isfile(NVCC_TEMP_DIR):
+ os.remove(NVCC_TEMP_DIR)
+ if not os.path.exists(NVCC_TEMP_DIR):
+ os.makedirs(NVCC_TEMP_DIR)
+ nvccopts += ['--keep', '--keep-dir', NVCC_TEMP_DIR]
+ cmd = [NVCC_PATH] + nvccopts
+ if log:
+ Log(cmd)
+ proc = subprocess.Popen(cmd,
+ stdout=sys.stdout,
+ stderr=sys.stderr,
+ env=os.environ.copy(),
+ shell=True)
+ proc.wait()
+ return proc.returncode
+
+def main():
+ parser = ArgumentParser()
+ parser.add_argument('-x', nargs=1)
+ parser.add_argument('--cuda_log', action='store_true')
+ args, leftover = parser.parse_known_args(sys.argv[1:])
+
+ if args.x and args.x[0] == 'cuda':
+ if args.cuda_log: Log('-x cuda')
+ leftover = [pipes.quote(s) for s in leftover]
+ if args.cuda_log: Log('using nvcc')
+ return InvokeNvcc(leftover, log=args.cuda_log)
+
+ # Strip our flags before passing through to the CPU compiler for files which
+ # are not -x cuda. We can't just pass 'leftover' because it also strips -x.
+ # We not only want to pass -x to the CPU compiler, but also keep it in its
+ # relative location in the argv list (the compiler is actually sensitive to
+ # this).
+ cpu_compiler_flags = [flag for flag in sys.argv[1:]
+ if not flag.startswith(('--cuda_log'))
+ and not flag.startswith(('-nvcc_options'))]
+
+ return subprocess.call([CPU_COMPILER] + cpu_compiler_flags)
+
+if __name__ == '__main__':
+ sys.exit(main())
diff --git a/third_party/gpus/cuda/BUILD.windows.tpl b/third_party/gpus/cuda/BUILD.windows.tpl
new file mode 100644
index 0000000000..ff6b3cc351
--- /dev/null
+++ b/third_party/gpus/cuda/BUILD.windows.tpl
@@ -0,0 +1,163 @@
+licenses(["restricted"]) # MPL2, portions GPL v3, LGPL v3, BSD-like
+
+package(default_visibility = ["//visibility:public"])
+
+config_setting(
+ name = "using_nvcc",
+ values = {
+ "define": "using_cuda_nvcc=true",
+ },
+)
+
+config_setting(
+ name = "using_clang",
+ values = {
+ "define": "using_cuda_clang=true",
+ },
+)
+
+# Equivalent to using_clang && -c opt.
+config_setting(
+ name = "using_clang_opt",
+ values = {
+ "define": "using_cuda_clang=true",
+ "compilation_mode": "opt",
+ },
+)
+
+config_setting(
+ name = "darwin",
+ values = {"cpu": "darwin"},
+ visibility = ["//visibility:public"],
+)
+
+config_setting(
+ name = "freebsd",
+ values = {"cpu": "freebsd"},
+ visibility = ["//visibility:public"],
+)
+
+cc_library(
+ name = "cuda_headers",
+ hdrs = [
+ "cuda/cuda_config.h",
+ %{cuda_headers}
+ ],
+ includes = [
+ ".",
+ "cuda/include",
+ "cuda/include/crt",
+ ],
+ visibility = ["//visibility:public"],
+)
+
+cc_import(
+ name = "cudart_static",
+ # /WHOLEARCHIVE:cudart_static.lib will cause a
+ # "Internal error during CImplib::EmitThunk" error.
+ # Treat this library as interface library to avoid being whole archived when
+ # linking a DLL that depends on this.
+ # TODO(pcloudy): Remove this rule after b/111278841 is resolved.
+ interface_library = "cuda/lib/%{cudart_static_lib}",
+ system_provided = 1,
+ visibility = ["//visibility:public"],
+)
+
+cc_import(
+ name = "cuda_driver",
+ interface_library = "cuda/lib/%{cuda_driver_lib}",
+ system_provided = 1,
+ visibility = ["//visibility:public"],
+)
+
+cc_import(
+ name = "cudart",
+ interface_library = "cuda/lib/%{cudart_lib}",
+ system_provided = 1,
+ visibility = ["//visibility:public"],
+)
+
+cc_import(
+ name = "cublas",
+ interface_library = "cuda/lib/%{cublas_lib}",
+ system_provided = 1,
+ visibility = ["//visibility:public"],
+)
+
+cc_import(
+ name = "cusolver",
+ interface_library = "cuda/lib/%{cusolver_lib}",
+ system_provided = 1,
+ visibility = ["//visibility:public"],
+)
+
+cc_import(
+ name = "cudnn",
+ interface_library = "cuda/lib/%{cudnn_lib}",
+ system_provided = 1,
+ visibility = ["//visibility:public"],
+)
+
+cc_library(
+ name = "cudnn_header",
+ includes = [
+ ".",
+ "cuda/include",
+ ],
+ visibility = ["//visibility:public"],
+)
+
+cc_import(
+ name = "cufft",
+ interface_library = "cuda/lib/%{cufft_lib}",
+ system_provided = 1,
+ visibility = ["//visibility:public"],
+)
+
+cc_import(
+ name = "curand",
+ interface_library = "cuda/lib/%{curand_lib}",
+ system_provided = 1,
+ visibility = ["//visibility:public"],
+)
+
+cc_library(
+ name = "cuda",
+ visibility = ["//visibility:public"],
+ deps = [
+ ":cublas",
+ ":cuda_headers",
+ ":cudart",
+ ":cudnn",
+ ":cufft",
+ ":curand",
+ ],
+)
+
+cc_library(
+ name = "cupti_headers",
+ hdrs = [
+ "cuda/cuda_config.h",
+ ":cuda-extras",
+ ],
+ includes = [
+ ".",
+ "cuda/extras/CUPTI/include/",
+ ],
+ visibility = ["//visibility:public"],
+)
+
+cc_import(
+ name = "cupti_dsos",
+ interface_library = "cuda/lib/%{cupti_lib}",
+ system_provided = 1,
+ visibility = ["//visibility:public"],
+)
+
+cc_library(
+ name = "libdevice_root",
+ data = [":cuda-nvvm"],
+ visibility = ["//visibility:public"],
+)
+
+%{cuda_include_genrules}
diff --git a/third_party/gpus/cuda_configure.bzl b/third_party/gpus/cuda_configure.bzl
index c90c66912d..e848fa175c 100644
--- a/third_party/gpus/cuda_configure.bzl
+++ b/third_party/gpus/cuda_configure.bzl
@@ -20,6 +20,7 @@
`/usr/local/cuda`.
* `TF_CUDA_COMPUTE_CAPABILITIES`: The CUDA compute capabilities. Default is
`3.5,5.2`.
+ * `PYTHON_BIN_PATH`: The python binary path
"""
_GCC_HOST_COMPILER_PATH = "GCC_HOST_COMPILER_PATH"
@@ -31,6 +32,7 @@ _CUDNN_INSTALL_PATH = "CUDNN_INSTALL_PATH"
_TF_CUDA_COMPUTE_CAPABILITIES = "TF_CUDA_COMPUTE_CAPABILITIES"
_TF_CUDA_CONFIG_REPO = "TF_CUDA_CONFIG_REPO"
_TF_DOWNLOAD_CLANG = "TF_DOWNLOAD_CLANG"
+_PYTHON_BIN_PATH = "PYTHON_BIN_PATH"
_DEFAULT_CUDA_VERSION = ""
_DEFAULT_CUDNN_VERSION = ""
@@ -44,12 +46,12 @@ _DEFAULT_CUDA_COMPUTE_CAPABILITIES = ["3.5", "5.2"]
# will be used. For example, when looking for the cudart libraries, the first
# attempt will be lib64/cudart inside the CUDA toolkit.
CUDA_LIB_PATHS = [
- "lib64/",
- "lib64/stubs/",
- "lib/x86_64-linux-gnu/",
- "lib/x64/",
- "lib/",
- "",
+ "lib64/",
+ "lib64/stubs/",
+ "lib/x86_64-linux-gnu/",
+ "lib/x64/",
+ "lib/",
+ "",
]
# Lookup paths for cupti.h, relative to the CUDA toolkit directory.
@@ -57,8 +59,8 @@ CUDA_LIB_PATHS = [
# On most systems, the cupti library is not installed in the same directory as
# the other CUDA libraries but rather in a special extras/CUPTI directory.
CUPTI_HEADER_PATHS = [
- "extras/CUPTI/include/",
- "include/cuda/CUPTI/",
+ "extras/CUPTI/include/",
+ "include/cuda/CUPTI/",
]
# Lookup paths for the cupti library, relative to the
@@ -66,25 +68,25 @@ CUPTI_HEADER_PATHS = [
# On most systems, the cupti library is not installed in the same directory as
# the other CUDA libraries but rather in a special extras/CUPTI directory.
CUPTI_LIB_PATHS = [
- "extras/CUPTI/lib64/",
- "lib/x86_64-linux-gnu",
- "lib64/",
- "extras/CUPTI/libx64/",
- "extras/CUPTI/lib/",
- "lib/",
+ "extras/CUPTI/lib64/",
+ "lib/x86_64-linux-gnu",
+ "lib64/",
+ "extras/CUPTI/libx64/",
+ "extras/CUPTI/lib/",
+ "lib/",
]
# Lookup paths for CUDA headers (cuda.h) relative to the CUDA toolkit directory.
CUDA_INCLUDE_PATHS = [
- "include/",
- "include/cuda/"
+ "include/",
+ "include/cuda/",
]
# Lookup paths for cudnn.h relative to the CUDNN install directory.
CUDNN_INCLUDE_PATHS = [
- "",
- "include/",
- "include/cuda/",
+ "",
+ "include/",
+ "include/cuda/",
]
# Lookup paths for NVVM libdevice relative to the CUDA directory toolkit.
@@ -92,686 +94,841 @@ CUDNN_INCLUDE_PATHS = [
# libdevice implements mathematical functions for GPU kernels, and is provided
# in NVVM bitcode (a subset of LLVM bitcode).
NVVM_LIBDEVICE_PATHS = [
- "nvvm/libdevice/",
- "share/cuda/",
+ "nvvm/libdevice/",
+ "share/cuda/",
+]
+
+# Files used to detect the NVVM libdevice path.
+NVVM_LIBDEVICE_FILES = [
+ # CUDA 9.0 has a single file.
+ "libdevice.10.bc",
+
+ # CUDA 8.0 has separate files for compute versions 2.0, 3.0, 3.5 and 5.0.
+ # Probing for one of them is sufficient.
+ "libdevice.compute_20.10.bc",
]
load("//third_party/clang_toolchain:download_clang.bzl", "download_clang")
+load(
+ "@bazel_tools//tools/cpp:lib_cc_configure.bzl",
+ "escape_string",
+ "get_env_var",
+)
+load(
+ "@bazel_tools//tools/cpp:windows_cc_configure.bzl",
+ "find_msvc_tool",
+ "find_vc_path",
+ "setup_vc_env_vars",
+)
+
+def _get_python_bin(repository_ctx):
+ """Gets the python bin path."""
+ python_bin = repository_ctx.os.environ.get(_PYTHON_BIN_PATH)
+ if python_bin != None:
+ return python_bin
+ python_bin_name = "python.exe" if _is_windows(repository_ctx) else "python"
+ python_bin_path = repository_ctx.which(python_bin_name)
+ if python_bin_path != None:
+ return str(python_bin_path)
+ auto_configure_fail("Cannot find python in PATH, please make sure " +
+ "python is installed and add its directory in PATH, or --define " +
+ "%s='/something/else'.\nPATH=%s" % (
+ _PYTHON_BIN_PATH,
+ repository_ctx.os.environ.get("PATH", ""),
+ ))
+
+def _get_nvcc_tmp_dir_for_windows(repository_ctx):
+ """Return the tmp directory for nvcc to generate intermediate source files."""
+ escaped_tmp_dir = escape_string(
+ get_env_var(repository_ctx, "TMP", "C:\\Windows\\Temp").replace("\\", "\\\\"),
+ )
+ return escaped_tmp_dir + "\\\\nvcc_inter_files_tmp_dir"
+
+def _get_msvc_compiler(repository_ctx):
+ vc_path = find_vc_path(repository_ctx)
+ return find_msvc_tool(repository_ctx, vc_path, "cl.exe").replace("\\", "/")
+
+def _get_win_cuda_defines(repository_ctx):
+ """Return CROSSTOOL defines for Windows"""
+
+ # If we are not on Windows, return empty vaules for Windows specific fields.
+ # This ensures the CROSSTOOL file parser is happy.
+ if not _is_windows(repository_ctx):
+ return {
+ "%{msvc_env_tmp}": "",
+ "%{msvc_env_path}": "",
+ "%{msvc_env_include}": "",
+ "%{msvc_env_lib}": "",
+ "%{msvc_cl_path}": "",
+ "%{msvc_ml_path}": "",
+ "%{msvc_link_path}": "",
+ "%{msvc_lib_path}": "",
+ "%{cxx_builtin_include_directory}": "",
+ }
+
+ vc_path = find_vc_path(repository_ctx)
+ if not vc_path:
+ auto_configure_fail("Visual C++ build tools not found on your machine." +
+ "Please check your installation following https://docs.bazel.build/versions/master/windows.html#using")
+ return {}
+
+ env = setup_vc_env_vars(repository_ctx, vc_path)
+ escaped_paths = escape_string(env["PATH"])
+ escaped_include_paths = escape_string(env["INCLUDE"])
+ escaped_lib_paths = escape_string(env["LIB"])
+ escaped_tmp_dir = escape_string(
+ get_env_var(repository_ctx, "TMP", "C:\\Windows\\Temp").replace("\\", "\\\\"),
+ )
+
+ msvc_cl_path = "windows/msvc_wrapper_for_nvcc.bat"
+ msvc_ml_path = find_msvc_tool(repository_ctx, vc_path, "ml64.exe").replace("\\", "/")
+ msvc_link_path = find_msvc_tool(repository_ctx, vc_path, "link.exe").replace("\\", "/")
+ msvc_lib_path = find_msvc_tool(repository_ctx, vc_path, "lib.exe").replace("\\", "/")
+
+ # nvcc will generate some temporary source files under %{nvcc_tmp_dir}
+ # The generated files are guranteed to have unique name, so they can share the same tmp directory
+ escaped_cxx_include_directories = ["cxx_builtin_include_directory: \"%s\"" % _get_nvcc_tmp_dir_for_windows(repository_ctx)]
+ for path in escaped_include_paths.split(";"):
+ if path:
+ escaped_cxx_include_directories.append("cxx_builtin_include_directory: \"%s\"" % path)
+
+ return {
+ "%{msvc_env_tmp}": escaped_tmp_dir,
+ "%{msvc_env_path}": escaped_paths,
+ "%{msvc_env_include}": escaped_include_paths,
+ "%{msvc_env_lib}": escaped_lib_paths,
+ "%{msvc_cl_path}": msvc_cl_path,
+ "%{msvc_ml_path}": msvc_ml_path,
+ "%{msvc_link_path}": msvc_link_path,
+ "%{msvc_lib_path}": msvc_lib_path,
+ "%{cxx_builtin_include_directory}": "\n".join(escaped_cxx_include_directories),
+ }
# TODO(dzc): Once these functions have been factored out of Bazel's
# cc_configure.bzl, load them from @bazel_tools instead.
# BEGIN cc_configure common functions.
def find_cc(repository_ctx):
- """Find the C++ compiler."""
- # On Windows, we use Bazel's MSVC CROSSTOOL for GPU build
- # Return a dummy value for GCC detection here to avoid error
- if _is_windows(repository_ctx):
- return "/use/--config=win-cuda --cpu=x64_windows_msvc/instead"
-
- if _use_cuda_clang(repository_ctx):
- target_cc_name = "clang"
- cc_path_envvar = _CLANG_CUDA_COMPILER_PATH
- if _flag_enabled(repository_ctx, _TF_DOWNLOAD_CLANG):
- return "extra_tools/bin/clang"
- else:
- target_cc_name = "gcc"
- cc_path_envvar = _GCC_HOST_COMPILER_PATH
- cc_name = target_cc_name
-
- if cc_path_envvar in repository_ctx.os.environ:
- cc_name_from_env = repository_ctx.os.environ[cc_path_envvar].strip()
- if cc_name_from_env:
- cc_name = cc_name_from_env
- if cc_name.startswith("/"):
- # Absolute path, maybe we should make this supported by our which function.
- return cc_name
- cc = repository_ctx.which(cc_name)
- if cc == None:
- fail(("Cannot find {}, either correct your path or set the {}" +
- " environment variable").format(target_cc_name, cc_path_envvar))
- return cc
-
+ """Find the C++ compiler."""
+ if _is_windows(repository_ctx):
+ return _get_msvc_compiler(repository_ctx)
+
+ if _use_cuda_clang(repository_ctx):
+ target_cc_name = "clang"
+ cc_path_envvar = _CLANG_CUDA_COMPILER_PATH
+ if _flag_enabled(repository_ctx, _TF_DOWNLOAD_CLANG):
+ return "extra_tools/bin/clang"
+ else:
+ target_cc_name = "gcc"
+ cc_path_envvar = _GCC_HOST_COMPILER_PATH
+ cc_name = target_cc_name
+
+ if cc_path_envvar in repository_ctx.os.environ:
+ cc_name_from_env = repository_ctx.os.environ[cc_path_envvar].strip()
+ if cc_name_from_env:
+ cc_name = cc_name_from_env
+ if cc_name.startswith("/"):
+ # Absolute path, maybe we should make this supported by our which function.
+ return cc_name
+ cc = repository_ctx.which(cc_name)
+ if cc == None:
+ fail(("Cannot find {}, either correct your path or set the {}" +
+ " environment variable").format(target_cc_name, cc_path_envvar))
+ return cc
_INC_DIR_MARKER_BEGIN = "#include <...>"
-
# OSX add " (framework directory)" at the end of line, strip it.
_OSX_FRAMEWORK_SUFFIX = " (framework directory)"
-_OSX_FRAMEWORK_SUFFIX_LEN = len(_OSX_FRAMEWORK_SUFFIX)
-def _cxx_inc_convert(path):
- """Convert path returned by cc -E xc++ in a complete path."""
- path = path.strip()
- if path.endswith(_OSX_FRAMEWORK_SUFFIX):
- path = path[:-_OSX_FRAMEWORK_SUFFIX_LEN].strip()
- return path
+_OSX_FRAMEWORK_SUFFIX_LEN = len(_OSX_FRAMEWORK_SUFFIX)
+def _cxx_inc_convert(path):
+ """Convert path returned by cc -E xc++ in a complete path."""
+ path = path.strip()
+ if path.endswith(_OSX_FRAMEWORK_SUFFIX):
+ path = path[:-_OSX_FRAMEWORK_SUFFIX_LEN].strip()
+ return path
def _normalize_include_path(repository_ctx, path):
- """Normalizes include paths before writing them to the crosstool.
+ """Normalizes include paths before writing them to the crosstool.
- If path points inside the 'crosstool' folder of the repository, a relative
- path is returned.
- If path points outside the 'crosstool' folder, an absolute path is returned.
- """
- path = str(repository_ctx.path(path))
- crosstool_folder = str(repository_ctx.path(".").get_child('crosstool'))
-
- if path.startswith(crosstool_folder):
- # We drop the path to "$REPO/crosstool" and a trailing path separator.
- return path[len(crosstool_folder)+1:]
- return path
+ If path points inside the 'crosstool' folder of the repository, a relative
+ path is returned.
+ If path points outside the 'crosstool' folder, an absolute path is returned.
+ """
+ path = str(repository_ctx.path(path))
+ crosstool_folder = str(repository_ctx.path(".").get_child("crosstool"))
+ if path.startswith(crosstool_folder):
+ # We drop the path to "$REPO/crosstool" and a trailing path separator.
+ return path[len(crosstool_folder) + 1:]
+ return path
def _get_cxx_inc_directories_impl(repository_ctx, cc, lang_is_cpp):
- """Compute the list of default C or C++ include directories."""
- if lang_is_cpp:
- lang = "c++"
- else:
- lang = "c"
- result = repository_ctx.execute([cc, "-E", "-x" + lang, "-", "-v"])
- index1 = result.stderr.find(_INC_DIR_MARKER_BEGIN)
- if index1 == -1:
- return []
- index1 = result.stderr.find("\n", index1)
- if index1 == -1:
- return []
- index2 = result.stderr.rfind("\n ")
- if index2 == -1 or index2 < index1:
- return []
- index2 = result.stderr.find("\n", index2 + 1)
- if index2 == -1:
- inc_dirs = result.stderr[index1 + 1:]
- else:
- inc_dirs = result.stderr[index1 + 1:index2].strip()
-
- return [
- _normalize_include_path(repository_ctx, _cxx_inc_convert(p))
- for p in inc_dirs.split("\n")
- ]
+ """Compute the list of default C or C++ include directories."""
+ if lang_is_cpp:
+ lang = "c++"
+ else:
+ lang = "c"
+ result = repository_ctx.execute([cc, "-E", "-x" + lang, "-", "-v"])
+ index1 = result.stderr.find(_INC_DIR_MARKER_BEGIN)
+ if index1 == -1:
+ return []
+ index1 = result.stderr.find("\n", index1)
+ if index1 == -1:
+ return []
+ index2 = result.stderr.rfind("\n ")
+ if index2 == -1 or index2 < index1:
+ return []
+ index2 = result.stderr.find("\n", index2 + 1)
+ if index2 == -1:
+ inc_dirs = result.stderr[index1 + 1:]
+ else:
+ inc_dirs = result.stderr[index1 + 1:index2].strip()
+ return [
+ _normalize_include_path(repository_ctx, _cxx_inc_convert(p))
+ for p in inc_dirs.split("\n")
+ ]
def get_cxx_inc_directories(repository_ctx, cc):
- """Compute the list of default C and C++ include directories."""
- # For some reason `clang -xc` sometimes returns include paths that are
- # different from the ones from `clang -xc++`. (Symlink and a dir)
- # So we run the compiler with both `-xc` and `-xc++` and merge resulting lists
- includes_cpp = _get_cxx_inc_directories_impl(repository_ctx, cc, True)
- includes_c = _get_cxx_inc_directories_impl(repository_ctx, cc, False)
+ """Compute the list of default C and C++ include directories."""
- includes_cpp_set = depset(includes_cpp)
- return includes_cpp + [inc for inc in includes_c
- if inc not in includes_cpp_set]
+ # For some reason `clang -xc` sometimes returns include paths that are
+ # different from the ones from `clang -xc++`. (Symlink and a dir)
+ # So we run the compiler with both `-xc` and `-xc++` and merge resulting lists
+ includes_cpp = _get_cxx_inc_directories_impl(repository_ctx, cc, True)
+ includes_c = _get_cxx_inc_directories_impl(repository_ctx, cc, False)
+ includes_cpp_set = depset(includes_cpp)
+ return includes_cpp + [
+ inc
+ for inc in includes_c
+ if inc not in includes_cpp_set
+ ]
def auto_configure_fail(msg):
- """Output failure message when cuda configuration fails."""
- red = "\033[0;31m"
- no_color = "\033[0m"
- fail("\n%sCuda Configuration Error:%s %s\n" % (red, no_color, msg))
-# END cc_configure common functions (see TODO above).
+ """Output failure message when cuda configuration fails."""
+ red = "\033[0;31m"
+ no_color = "\033[0m"
+ fail("\n%sCuda Configuration Error:%s %s\n" % (red, no_color, msg))
+# END cc_configure common functions (see TODO above).
def _host_compiler_includes(repository_ctx, cc):
- """Generates the cxx_builtin_include_directory entries for gcc inc dirs.
-
- Args:
- repository_ctx: The repository context.
- cc: The path to the gcc host compiler.
-
- Returns:
- A string containing the cxx_builtin_include_directory for each of the gcc
- host compiler include directories, which can be added to the CROSSTOOL
- file.
- """
- inc_dirs = get_cxx_inc_directories(repository_ctx, cc)
- inc_entries = []
- for inc_dir in inc_dirs:
- inc_entries.append(" cxx_builtin_include_directory: \"%s\"" % inc_dir)
- return "\n".join(inc_entries)
+ """Generates the cxx_builtin_include_directory entries for gcc inc dirs.
+
+ Args:
+ repository_ctx: The repository context.
+ cc: The path to the gcc host compiler.
+
+ Returns:
+ A string containing the cxx_builtin_include_directory for each of the gcc
+ host compiler include directories, which can be added to the CROSSTOOL
+ file.
+ """
+ inc_dirs = get_cxx_inc_directories(repository_ctx, cc)
+ inc_entries = []
+ for inc_dir in inc_dirs:
+ inc_entries.append(" cxx_builtin_include_directory: \"%s\"" % inc_dir)
+ return "\n".join(inc_entries)
def _cuda_include_path(repository_ctx, cuda_config):
- """Generates the cxx_builtin_include_directory entries for cuda inc dirs.
-
- Args:
- repository_ctx: The repository context.
- cc: The path to the gcc host compiler.
-
- Returns:
- A string containing the cxx_builtin_include_directory for each of the gcc
- host compiler include directories, which can be added to the CROSSTOOL
- file.
- """
- nvcc_path = repository_ctx.path("%s/bin/nvcc%s" %
- (cuda_config.cuda_toolkit_path,
- ".exe" if cuda_config.cpu_value == "Windows" else ""))
- result = repository_ctx.execute([nvcc_path, '-v',
- '/dev/null', '-o', '/dev/null'])
- target_dir = ""
- for one_line in result.stderr.splitlines():
- if one_line.startswith('#$ _TARGET_DIR_='):
- target_dir = (cuda_config.cuda_toolkit_path + '/' +
- one_line.replace('#$ _TARGET_DIR_=', '') + "/include")
- inc_entries = []
- if target_dir != "":
- inc_entries.append(" cxx_builtin_include_directory: \"%s\"" % target_dir)
- default_include = cuda_config.cuda_toolkit_path + '/include'
- inc_entries.append(" cxx_builtin_include_directory: \"%s\"" %
- default_include)
- return "\n".join(inc_entries)
+ """Generates the cxx_builtin_include_directory entries for cuda inc dirs.
+ Args:
+ repository_ctx: The repository context.
+ cc: The path to the gcc host compiler.
-def _enable_cuda(repository_ctx):
- if "TF_NEED_CUDA" in repository_ctx.os.environ:
- enable_cuda = repository_ctx.os.environ["TF_NEED_CUDA"].strip()
- return enable_cuda == "1"
- return False
+ Returns:
+ A string containing the cxx_builtin_include_directory for each of the gcc
+ host compiler include directories, which can be added to the CROSSTOOL
+ file.
+ """
+ nvcc_path = repository_ctx.path("%s/bin/nvcc%s" %
+ (
+ cuda_config.cuda_toolkit_path,
+ ".exe" if cuda_config.cpu_value == "Windows" else "",
+ ))
+ result = repository_ctx.execute([
+ nvcc_path,
+ "-v",
+ "/dev/null",
+ "-o",
+ "/dev/null",
+ ])
+ target_dir = ""
+ for one_line in result.stderr.splitlines():
+ if one_line.startswith("#$ _TARGET_DIR_="):
+ target_dir = (cuda_config.cuda_toolkit_path + "/" +
+ one_line.replace("#$ _TARGET_DIR_=", "") + "/include")
+ inc_entries = []
+ if target_dir != "":
+ inc_entries.append(" cxx_builtin_include_directory: \"%s\"" % target_dir)
+ default_include = cuda_config.cuda_toolkit_path + "/include"
+ inc_entries.append(" cxx_builtin_include_directory: \"%s\"" %
+ default_include)
+ return "\n".join(inc_entries)
+def _enable_cuda(repository_ctx):
+ if "TF_NEED_CUDA" in repository_ctx.os.environ:
+ enable_cuda = repository_ctx.os.environ["TF_NEED_CUDA"].strip()
+ return enable_cuda == "1"
+ return False
def _cuda_toolkit_path(repository_ctx):
- """Finds the cuda toolkit directory.
-
- Args:
- repository_ctx: The repository context.
+ """Finds the cuda toolkit directory.
- Returns:
- A speculative real path of the cuda toolkit install directory.
- """
- cuda_toolkit_path = _DEFAULT_CUDA_TOOLKIT_PATH
- if _CUDA_TOOLKIT_PATH in repository_ctx.os.environ:
- cuda_toolkit_path = repository_ctx.os.environ[_CUDA_TOOLKIT_PATH].strip()
- if not repository_ctx.path(cuda_toolkit_path).exists:
- auto_configure_fail("Cannot find cuda toolkit path.")
- return str(repository_ctx.path(cuda_toolkit_path).realpath)
+ Args:
+ repository_ctx: The repository context.
+ Returns:
+ A speculative real path of the cuda toolkit install directory.
+ """
+ cuda_toolkit_path = _DEFAULT_CUDA_TOOLKIT_PATH
+ if _CUDA_TOOLKIT_PATH in repository_ctx.os.environ:
+ cuda_toolkit_path = repository_ctx.os.environ[_CUDA_TOOLKIT_PATH].strip()
+ if not repository_ctx.path(cuda_toolkit_path).exists:
+ auto_configure_fail("Cannot find cuda toolkit path.")
+ return str(repository_ctx.path(cuda_toolkit_path).realpath)
def _cudnn_install_basedir(repository_ctx):
- """Finds the cudnn install directory."""
- cudnn_install_path = _DEFAULT_CUDNN_INSTALL_PATH
- if _CUDNN_INSTALL_PATH in repository_ctx.os.environ:
- cudnn_install_path = repository_ctx.os.environ[_CUDNN_INSTALL_PATH].strip()
- if not repository_ctx.path(cudnn_install_path).exists:
- auto_configure_fail("Cannot find cudnn install path.")
- return cudnn_install_path
-
+ """Finds the cudnn install directory."""
+ cudnn_install_path = _DEFAULT_CUDNN_INSTALL_PATH
+ if _CUDNN_INSTALL_PATH in repository_ctx.os.environ:
+ cudnn_install_path = repository_ctx.os.environ[_CUDNN_INSTALL_PATH].strip()
+ if not repository_ctx.path(cudnn_install_path).exists:
+ auto_configure_fail("Cannot find cudnn install path.")
+ return cudnn_install_path
def matches_version(environ_version, detected_version):
- """Checks whether the user-specified version matches the detected version.
-
- This function performs a weak matching so that if the user specifies only the
- major or major and minor versions, the versions are still considered matching
- if the version parts match. To illustrate:
-
- environ_version detected_version result
- -----------------------------------------
- 5.1.3 5.1.3 True
- 5.1 5.1.3 True
- 5 5.1 True
- 5.1.3 5.1 False
- 5.2.3 5.1.3 False
-
- Args:
- environ_version: The version specified by the user via environment
- variables.
- detected_version: The version autodetected from the CUDA installation on
- the system.
-
- Returns: True if user-specified version matches detected version and False
- otherwise.
- """
- environ_version_parts = environ_version.split(".")
- detected_version_parts = detected_version.split(".")
- if len(detected_version_parts) < len(environ_version_parts):
- return False
- for i, part in enumerate(detected_version_parts):
- if i >= len(environ_version_parts):
- break
- if part != environ_version_parts[i]:
- return False
- return True
-
+ """Checks whether the user-specified version matches the detected version.
+
+ This function performs a weak matching so that if the user specifies only the
+ major or major and minor versions, the versions are still considered matching
+ if the version parts match. To illustrate:
+
+ environ_version detected_version result
+ -----------------------------------------
+ 5.1.3 5.1.3 True
+ 5.1 5.1.3 True
+ 5 5.1 True
+ 5.1.3 5.1 False
+ 5.2.3 5.1.3 False
+
+ Args:
+ environ_version: The version specified by the user via environment
+ variables.
+ detected_version: The version autodetected from the CUDA installation on
+ the system.
+
+ Returns: True if user-specified version matches detected version and False
+ otherwise.
+ """
+ environ_version_parts = environ_version.split(".")
+ detected_version_parts = detected_version.split(".")
+ if len(detected_version_parts) < len(environ_version_parts):
+ return False
+ for i, part in enumerate(detected_version_parts):
+ if i >= len(environ_version_parts):
+ break
+ if part != environ_version_parts[i]:
+ return False
+ return True
_NVCC_VERSION_PREFIX = "Cuda compilation tools, release "
-
def _cuda_version(repository_ctx, cuda_toolkit_path, cpu_value):
- """Detects the version of CUDA installed on the system.
-
- Args:
- repository_ctx: The repository context.
- cuda_toolkit_path: The CUDA install directory.
-
- Returns:
- String containing the version of CUDA.
- """
- # Run nvcc --version and find the line containing the CUDA version.
- nvcc_path = repository_ctx.path("%s/bin/nvcc%s" %
- (cuda_toolkit_path,
- ".exe" if cpu_value == "Windows" else ""))
- if not nvcc_path.exists:
- auto_configure_fail("Cannot find nvcc at %s" % str(nvcc_path))
- result = repository_ctx.execute([str(nvcc_path), '--version'])
- if result.stderr:
- auto_configure_fail("Error running nvcc --version: %s" % result.stderr)
- lines = result.stdout.splitlines()
- version_line = lines[len(lines) - 1]
- if version_line.find(_NVCC_VERSION_PREFIX) == -1:
- auto_configure_fail(
- "Could not parse CUDA version from nvcc --version. Got: %s" %
- result.stdout)
-
- # Parse the CUDA version from the line containing the CUDA version.
- prefix_removed = version_line.replace(_NVCC_VERSION_PREFIX, '')
- parts = prefix_removed.split(",")
- if len(parts) != 2 or len(parts[0]) < 2:
- auto_configure_fail(
- "Could not parse CUDA version from nvcc --version. Got: %s" %
- result.stdout)
- full_version = parts[1].strip()
- if full_version.startswith('V'):
- full_version = full_version[1:]
-
- # Check whether TF_CUDA_VERSION was set by the user and fail if it does not
- # match the detected version.
- environ_version = ""
- if _TF_CUDA_VERSION in repository_ctx.os.environ:
- environ_version = repository_ctx.os.environ[_TF_CUDA_VERSION].strip()
- if environ_version and not matches_version(environ_version, full_version):
- auto_configure_fail(
- ("CUDA version detected from nvcc (%s) does not match " +
- "TF_CUDA_VERSION (%s)") % (full_version, environ_version))
-
- # We only use the version consisting of the major and minor version numbers.
- version_parts = full_version.split('.')
- if len(version_parts) < 2:
- auto_configure_fail("CUDA version detected from nvcc (%s) is incomplete.")
- if cpu_value == "Windows":
- version = "64_%s%s" % (version_parts[0], version_parts[1])
- else:
- version = "%s.%s" % (version_parts[0], version_parts[1])
- return version
+ """Detects the version of CUDA installed on the system.
+
+ Args:
+ repository_ctx: The repository context.
+ cuda_toolkit_path: The CUDA install directory.
+
+ Returns:
+ String containing the version of CUDA.
+ """
+
+ # Run nvcc --version and find the line containing the CUDA version.
+ nvcc_path = repository_ctx.path("%s/bin/nvcc%s" %
+ (
+ cuda_toolkit_path,
+ ".exe" if cpu_value == "Windows" else "",
+ ))
+ if not nvcc_path.exists:
+ auto_configure_fail("Cannot find nvcc at %s" % str(nvcc_path))
+ result = repository_ctx.execute([str(nvcc_path), "--version"])
+ if result.stderr:
+ auto_configure_fail("Error running nvcc --version: %s" % result.stderr)
+ lines = result.stdout.splitlines()
+ version_line = lines[len(lines) - 1]
+ if version_line.find(_NVCC_VERSION_PREFIX) == -1:
+ auto_configure_fail(
+ "Could not parse CUDA version from nvcc --version. Got: %s" %
+ result.stdout,
+ )
+ # Parse the CUDA version from the line containing the CUDA version.
+ prefix_removed = version_line.replace(_NVCC_VERSION_PREFIX, "")
+ parts = prefix_removed.split(",")
+ if len(parts) != 2 or len(parts[0]) < 2:
+ auto_configure_fail(
+ "Could not parse CUDA version from nvcc --version. Got: %s" %
+ result.stdout,
+ )
+ full_version = parts[1].strip()
+ if full_version.startswith("V"):
+ full_version = full_version[1:]
+
+ # Check whether TF_CUDA_VERSION was set by the user and fail if it does not
+ # match the detected version.
+ environ_version = ""
+ if _TF_CUDA_VERSION in repository_ctx.os.environ:
+ environ_version = repository_ctx.os.environ[_TF_CUDA_VERSION].strip()
+ if environ_version and not matches_version(environ_version, full_version):
+ auto_configure_fail(
+ ("CUDA version detected from nvcc (%s) does not match " +
+ "TF_CUDA_VERSION (%s)") % (full_version, environ_version),
+ )
+
+ # We only use the version consisting of the major and minor version numbers.
+ version_parts = full_version.split(".")
+ if len(version_parts) < 2:
+ auto_configure_fail("CUDA version detected from nvcc (%s) is incomplete.")
+ if cpu_value == "Windows":
+ version = "64_%s%s" % (version_parts[0], version_parts[1])
+ else:
+ version = "%s.%s" % (version_parts[0], version_parts[1])
+ return version
_DEFINE_CUDNN_MAJOR = "#define CUDNN_MAJOR"
_DEFINE_CUDNN_MINOR = "#define CUDNN_MINOR"
_DEFINE_CUDNN_PATCHLEVEL = "#define CUDNN_PATCHLEVEL"
-
def find_cuda_define(repository_ctx, header_dir, header_file, define):
- """Returns the value of a #define in a header file.
-
- Greps through a header file and returns the value of the specified #define.
- If the #define is not found, then raise an error.
-
- Args:
- repository_ctx: The repository context.
- header_dir: The directory containing the header file.
- header_file: The header file name.
- define: The #define to search for.
-
- Returns:
- The value of the #define found in the header.
- """
- # Confirm location of the header and grep for the line defining the macro.
- h_path = repository_ctx.path("%s/%s" % (header_dir, header_file))
- if not h_path.exists:
- auto_configure_fail("Cannot find %s at %s" % (header_file, str(h_path)))
- result = repository_ctx.execute(
- # Grep one more lines as some #defines are splitted into two lines.
- ["grep", "--color=never", "-A1", "-E", define, str(h_path)])
- if result.stderr:
- auto_configure_fail("Error reading %s: %s" % (str(h_path), result.stderr))
-
- # Parse the version from the line defining the macro.
- if result.stdout.find(define) == -1:
- auto_configure_fail("Cannot find line containing '%s' in %s" %
- (define, h_path))
- # Split results to lines
- lines = result.stdout.split('\n')
- num_lines = len(lines)
- for l in range(num_lines):
- line = lines[l]
- if define in line: # Find the line with define
- version = line
- if l != num_lines-1 and line[-1] == '\\': # Add next line, if multiline
- version = version[:-1] + lines[l+1]
- break
- # Remove any comments
- version = version.split("//")[0]
- # Remove define name
- version = version.replace(define, "").strip()
- # Remove the code after the version number.
- version_end = version.find(" ")
- if version_end != -1:
- if version_end == 0:
- auto_configure_fail(
- "Cannot extract the version from line containing '%s' in %s" %
- (define, str(h_path)))
- version = version[:version_end].strip()
- return version
+ """Returns the value of a #define in a header file.
+
+ Greps through a header file and returns the value of the specified #define.
+ If the #define is not found, then raise an error.
+ Args:
+ repository_ctx: The repository context.
+ header_dir: The directory containing the header file.
+ header_file: The header file name.
+ define: The #define to search for.
+
+ Returns:
+ The value of the #define found in the header.
+ """
+
+ # Confirm location of the header and grep for the line defining the macro.
+ h_path = repository_ctx.path("%s/%s" % (header_dir, header_file))
+ if not h_path.exists:
+ auto_configure_fail("Cannot find %s at %s" % (header_file, str(h_path)))
+ result = repository_ctx.execute(
+ # Grep one more lines as some #defines are splitted into two lines.
+ ["grep", "--color=never", "-A1", "-E", define, str(h_path)],
+ )
+ if result.stderr:
+ auto_configure_fail("Error reading %s: %s" % (str(h_path), result.stderr))
+
+ # Parse the version from the line defining the macro.
+ if result.stdout.find(define) == -1:
+ auto_configure_fail("Cannot find line containing '%s' in %s" %
+ (define, h_path))
+
+ # Split results to lines
+ lines = result.stdout.split("\n")
+ num_lines = len(lines)
+ for l in range(num_lines):
+ line = lines[l]
+ if define in line: # Find the line with define
+ version = line
+ if l != num_lines - 1 and line[-1] == "\\": # Add next line, if multiline
+ version = version[:-1] + lines[l + 1]
+ break
+
+ # Remove any comments
+ version = version.split("//")[0]
+
+ # Remove define name
+ version = version.replace(define, "").strip()
+
+ # Remove the code after the version number.
+ version_end = version.find(" ")
+ if version_end != -1:
+ if version_end == 0:
+ auto_configure_fail(
+ "Cannot extract the version from line containing '%s' in %s" %
+ (define, str(h_path)),
+ )
+ version = version[:version_end].strip()
+ return version
def _cudnn_version(repository_ctx, cudnn_install_basedir, cpu_value):
- """Detects the version of cuDNN installed on the system.
-
- Args:
- repository_ctx: The repository context.
- cpu_value: The name of the host operating system.
- cudnn_install_basedir: The cuDNN install directory.
-
- Returns:
- A string containing the version of cuDNN.
- """
- cudnn_header_dir = _find_cudnn_header_dir(repository_ctx,
- cudnn_install_basedir)
- major_version = find_cuda_define(
- repository_ctx, cudnn_header_dir, "cudnn.h", _DEFINE_CUDNN_MAJOR)
- minor_version = find_cuda_define(
- repository_ctx, cudnn_header_dir, "cudnn.h", _DEFINE_CUDNN_MINOR)
- patch_version = find_cuda_define(
- repository_ctx, cudnn_header_dir, "cudnn.h", _DEFINE_CUDNN_PATCHLEVEL)
- full_version = "%s.%s.%s" % (major_version, minor_version, patch_version)
-
- # Check whether TF_CUDNN_VERSION was set by the user and fail if it does not
- # match the detected version.
- environ_version = ""
- if _TF_CUDNN_VERSION in repository_ctx.os.environ:
- environ_version = repository_ctx.os.environ[_TF_CUDNN_VERSION].strip()
- if environ_version and not matches_version(environ_version, full_version):
- cudnn_h_path = repository_ctx.path("%s/include/cudnn.h" %
- cudnn_install_basedir)
- auto_configure_fail(
- ("cuDNN version detected from %s (%s) does not match " +
- "TF_CUDNN_VERSION (%s)") %
- (str(cudnn_h_path), full_version, environ_version))
-
- # We only use the major version since we use the libcudnn libraries that are
- # only versioned with the major version (e.g. libcudnn.so.5).
- version = major_version
- if cpu_value == "Windows":
- version = "64_" + version
- return version
+ """Detects the version of cuDNN installed on the system.
+ Args:
+ repository_ctx: The repository context.
+ cpu_value: The name of the host operating system.
+ cudnn_install_basedir: The cuDNN install directory.
-def _compute_capabilities(repository_ctx):
- """Returns a list of strings representing cuda compute capabilities."""
- if _TF_CUDA_COMPUTE_CAPABILITIES not in repository_ctx.os.environ:
- return _DEFAULT_CUDA_COMPUTE_CAPABILITIES
- capabilities_str = repository_ctx.os.environ[_TF_CUDA_COMPUTE_CAPABILITIES]
- capabilities = capabilities_str.split(",")
- for capability in capabilities:
- # Workaround for Skylark's lack of support for regex. This check should
- # be equivalent to checking:
- # if re.match("[0-9]+.[0-9]+", capability) == None:
- parts = capability.split(".")
- if len(parts) != 2 or not parts[0].isdigit() or not parts[1].isdigit():
- auto_configure_fail("Invalid compute capability: %s" % capability)
- return capabilities
+ Returns:
+ A string containing the version of cuDNN.
+ """
+ cudnn_header_dir = _find_cudnn_header_dir(
+ repository_ctx,
+ cudnn_install_basedir,
+ )
+ major_version = find_cuda_define(
+ repository_ctx,
+ cudnn_header_dir,
+ "cudnn.h",
+ _DEFINE_CUDNN_MAJOR,
+ )
+ minor_version = find_cuda_define(
+ repository_ctx,
+ cudnn_header_dir,
+ "cudnn.h",
+ _DEFINE_CUDNN_MINOR,
+ )
+ patch_version = find_cuda_define(
+ repository_ctx,
+ cudnn_header_dir,
+ "cudnn.h",
+ _DEFINE_CUDNN_PATCHLEVEL,
+ )
+ full_version = "%s.%s.%s" % (major_version, minor_version, patch_version)
+
+ # Check whether TF_CUDNN_VERSION was set by the user and fail if it does not
+ # match the detected version.
+ environ_version = ""
+ if _TF_CUDNN_VERSION in repository_ctx.os.environ:
+ environ_version = repository_ctx.os.environ[_TF_CUDNN_VERSION].strip()
+ if environ_version and not matches_version(environ_version, full_version):
+ cudnn_h_path = repository_ctx.path("%s/include/cudnn.h" %
+ cudnn_install_basedir)
+ auto_configure_fail(
+ ("cuDNN version detected from %s (%s) does not match " +
+ "TF_CUDNN_VERSION (%s)") %
+ (str(cudnn_h_path), full_version, environ_version),
+ )
+ # We only use the major version since we use the libcudnn libraries that are
+ # only versioned with the major version (e.g. libcudnn.so.5).
+ version = major_version
+ if cpu_value == "Windows":
+ version = "64_" + version
+ return version
-def get_cpu_value(repository_ctx):
- """Returns the name of the host operating system.
+def _compute_capabilities(repository_ctx):
+ """Returns a list of strings representing cuda compute capabilities."""
+ if _TF_CUDA_COMPUTE_CAPABILITIES not in repository_ctx.os.environ:
+ return _DEFAULT_CUDA_COMPUTE_CAPABILITIES
+ capabilities_str = repository_ctx.os.environ[_TF_CUDA_COMPUTE_CAPABILITIES]
+ capabilities = capabilities_str.split(",")
+ for capability in capabilities:
+ # Workaround for Skylark's lack of support for regex. This check should
+ # be equivalent to checking:
+ # if re.match("[0-9]+.[0-9]+", capability) == None:
+ parts = capability.split(".")
+ if len(parts) != 2 or not parts[0].isdigit() or not parts[1].isdigit():
+ auto_configure_fail("Invalid compute capability: %s" % capability)
+ return capabilities
- Args:
- repository_ctx: The repository context.
+def get_cpu_value(repository_ctx):
+ """Returns the name of the host operating system.
- Returns:
- A string containing the name of the host operating system.
- """
- os_name = repository_ctx.os.name.lower()
- if os_name.startswith("mac os"):
- return "Darwin"
- if os_name.find("windows") != -1:
- return "Windows"
- result = repository_ctx.execute(["uname", "-s"])
- return result.stdout.strip()
+ Args:
+ repository_ctx: The repository context.
+ Returns:
+ A string containing the name of the host operating system.
+ """
+ os_name = repository_ctx.os.name.lower()
+ if os_name.startswith("mac os"):
+ return "Darwin"
+ if os_name.find("windows") != -1:
+ return "Windows"
+ result = repository_ctx.execute(["uname", "-s"])
+ return result.stdout.strip()
def _is_windows(repository_ctx):
- """Returns true if the host operating system is windows."""
- return get_cpu_value(repository_ctx) == "Windows"
-
-def _lib_name(lib, cpu_value, version="", static=False):
- """Constructs the platform-specific name of a library.
-
- Args:
- lib: The name of the library, such as "cudart"
- cpu_value: The name of the host operating system.
- version: The version of the library.
- static: True the library is static or False if it is a shared object.
-
- Returns:
- The platform-specific name of the library.
- """
- if cpu_value in ("Linux", "FreeBSD"):
- if static:
- return "lib%s.a" % lib
- else:
- if version:
- version = ".%s" % version
- return "lib%s.so%s" % (lib, version)
- elif cpu_value == "Windows":
- return "%s.lib" % lib
- elif cpu_value == "Darwin":
- if static:
- return "lib%s.a" % lib
- else:
- if version:
- version = ".%s" % version
- return "lib%s%s.dylib" % (lib, version)
- else:
- auto_configure_fail("Invalid cpu_value: %s" % cpu_value)
-
-
-def _find_cuda_lib(lib, repository_ctx, cpu_value, basedir, version="",
- static=False):
- """Finds the given CUDA or cuDNN library on the system.
-
- Args:
- lib: The name of the library, such as "cudart"
- repository_ctx: The repository context.
- cpu_value: The name of the host operating system.
- basedir: The install directory of CUDA or cuDNN.
- version: The version of the library.
- static: True if static library, False if shared object.
-
- Returns:
- Returns a struct with the following fields:
- file_name: The basename of the library found on the system.
- path: The full path to the library.
- """
- file_name = _lib_name(lib, cpu_value, version, static)
- for relative_path in CUDA_LIB_PATHS:
- path = repository_ctx.path("%s/%s%s" % (basedir, relative_path, file_name))
- if path.exists:
- return struct(file_name=file_name, path=str(path.realpath))
- auto_configure_fail("Cannot find cuda library %s" % file_name)
+ """Returns true if the host operating system is windows."""
+ return get_cpu_value(repository_ctx) == "Windows"
+def _lib_name(lib, cpu_value, version = "", static = False):
+ """Constructs the platform-specific name of a library.
-def _find_cupti_header_dir(repository_ctx, cuda_config):
- """Returns the path to the directory containing cupti.h
+ Args:
+ lib: The name of the library, such as "cudart"
+ cpu_value: The name of the host operating system.
+ version: The version of the library.
+ static: True the library is static or False if it is a shared object.
+
+ Returns:
+ The platform-specific name of the library.
+ """
+ if cpu_value in ("Linux", "FreeBSD"):
+ if static:
+ return "lib%s.a" % lib
+ else:
+ if version:
+ version = ".%s" % version
+ return "lib%s.so%s" % (lib, version)
+ elif cpu_value == "Windows":
+ return "%s.lib" % lib
+ elif cpu_value == "Darwin":
+ if static:
+ return "lib%s.a" % lib
+ elif version:
+ version = ".%s" % version
+ return "lib%s%s.dylib" % (lib, version)
+ else:
+ auto_configure_fail("Invalid cpu_value: %s" % cpu_value)
+
+def _find_cuda_lib(
+ lib,
+ repository_ctx,
+ cpu_value,
+ basedir,
+ version = "",
+ static = False):
+ """Finds the given CUDA or cuDNN library on the system.
+
+ Args:
+ lib: The name of the library, such as "cudart"
+ repository_ctx: The repository context.
+ cpu_value: The name of the host operating system.
+ basedir: The install directory of CUDA or cuDNN.
+ version: The version of the library.
+ static: True if static library, False if shared object.
+
+ Returns:
+ Returns a struct with the following fields:
+ file_name: The basename of the library found on the system.
+ path: The full path to the library.
+ """
+ file_name = _lib_name(lib, cpu_value, version, static)
+ for relative_path in CUDA_LIB_PATHS:
+ path = repository_ctx.path("%s/%s%s" % (basedir, relative_path, file_name))
+ if path.exists:
+ return struct(file_name = file_name, path = str(path.realpath))
+ auto_configure_fail("Cannot find cuda library %s" % file_name)
- On most systems, the cupti library is not installed in the same directory as
- the other CUDA libraries but rather in a special extras/CUPTI directory.
+def _find_cupti_header_dir(repository_ctx, cuda_config):
+ """Returns the path to the directory containing cupti.h
- Args:
- repository_ctx: The repository context.
- cuda_config: The CUDA config as returned by _get_cuda_config
+ On most systems, the cupti library is not installed in the same directory as
+ the other CUDA libraries but rather in a special extras/CUPTI directory.
- Returns:
- The path of the directory containing the cupti header.
- """
- cuda_toolkit_path = cuda_config.cuda_toolkit_path
- for relative_path in CUPTI_HEADER_PATHS:
- if repository_ctx.path("%s/%scupti.h" % (cuda_toolkit_path, relative_path)).exists:
- return ("%s/%s" % (cuda_toolkit_path, relative_path))[:-1]
- auto_configure_fail("Cannot find cupti.h under %s" % ", ".join([cuda_toolkit_path + "/" + s for s in CUPTI_HEADER_PATHS]))
+ Args:
+ repository_ctx: The repository context.
+ cuda_config: The CUDA config as returned by _get_cuda_config
+ Returns:
+ The path of the directory containing the cupti header.
+ """
+ cuda_toolkit_path = cuda_config.cuda_toolkit_path
+ for relative_path in CUPTI_HEADER_PATHS:
+ if repository_ctx.path("%s/%scupti.h" % (cuda_toolkit_path, relative_path)).exists:
+ return ("%s/%s" % (cuda_toolkit_path, relative_path))[:-1]
+ auto_configure_fail("Cannot find cupti.h under %s" % ", ".join([cuda_toolkit_path + "/" + s for s in CUPTI_HEADER_PATHS]))
def _find_cupti_lib(repository_ctx, cuda_config):
- """Finds the cupti library on the system.
-
- On most systems, the cupti library is not installed in the same directory as
- the other CUDA libraries but rather in a special extras/CUPTI directory.
-
- Args:
- repository_ctx: The repository context.
- cuda_config: The cuda configuration as returned by _get_cuda_config.
-
- Returns:
- Returns a struct with the following fields:
- file_name: The basename of the library found on the system.
- path: The full path to the library.
- """
- file_name = _lib_name("cupti", cuda_config.cpu_value,
- cuda_config.cuda_version)
- cuda_toolkit_path = cuda_config.cuda_toolkit_path
- for relative_path in CUPTI_LIB_PATHS:
- path = repository_ctx.path(
- "%s/%s%s" % (cuda_toolkit_path, relative_path, file_name))
- if path.exists:
- return struct(file_name=file_name, path=str(path.realpath))
-
- auto_configure_fail("Cannot find cupti library %s" % file_name)
+ """Finds the cupti library on the system.
+
+ On most systems, the cupti library is not installed in the same directory as
+ the other CUDA libraries but rather in a special extras/CUPTI directory.
+
+ Args:
+ repository_ctx: The repository context.
+ cuda_config: The cuda configuration as returned by _get_cuda_config.
+
+ Returns:
+ Returns a struct with the following fields:
+ file_name: The basename of the library found on the system.
+ path: The full path to the library.
+ """
+ file_name = _lib_name(
+ "cupti",
+ cuda_config.cpu_value,
+ cuda_config.cuda_version,
+ )
+ cuda_toolkit_path = cuda_config.cuda_toolkit_path
+ for relative_path in CUPTI_LIB_PATHS:
+ path = repository_ctx.path(
+ "%s/%s%s" % (cuda_toolkit_path, relative_path, file_name),
+ )
+ if path.exists:
+ return struct(file_name = file_name, path = str(path.realpath))
+
+ auto_configure_fail("Cannot find cupti library %s" % file_name)
def _find_libs(repository_ctx, cuda_config):
- """Returns the CUDA and cuDNN libraries on the system.
-
- Args:
- repository_ctx: The repository context.
- cuda_config: The CUDA config as returned by _get_cuda_config
-
- Returns:
- Map of library names to structs of filename and path.
- """
- cpu_value = cuda_config.cpu_value
- return {
- "cuda": _find_cuda_lib("cuda", repository_ctx, cpu_value, cuda_config.cuda_toolkit_path),
- "cudart": _find_cuda_lib(
- "cudart", repository_ctx, cpu_value, cuda_config.cuda_toolkit_path,
- cuda_config.cuda_version),
- "cudart_static": _find_cuda_lib(
- "cudart_static", repository_ctx, cpu_value,
- cuda_config.cuda_toolkit_path, cuda_config.cuda_version, static=True),
- "cublas": _find_cuda_lib(
- "cublas", repository_ctx, cpu_value, cuda_config.cuda_toolkit_path,
- cuda_config.cuda_version),
- "cusolver": _find_cuda_lib(
- "cusolver", repository_ctx, cpu_value, cuda_config.cuda_toolkit_path,
- cuda_config.cuda_version),
- "curand": _find_cuda_lib(
- "curand", repository_ctx, cpu_value, cuda_config.cuda_toolkit_path,
- cuda_config.cuda_version),
- "cufft": _find_cuda_lib(
- "cufft", repository_ctx, cpu_value, cuda_config.cuda_toolkit_path,
- cuda_config.cuda_version),
- "cudnn": _find_cuda_lib(
- "cudnn", repository_ctx, cpu_value, cuda_config.cudnn_install_basedir,
- cuda_config.cudnn_version),
- "cupti": _find_cupti_lib(repository_ctx, cuda_config)
- }
+ """Returns the CUDA and cuDNN libraries on the system.
+ Args:
+ repository_ctx: The repository context.
+ cuda_config: The CUDA config as returned by _get_cuda_config
-def _find_cuda_include_path(repository_ctx, cuda_config):
- """Returns the path to the directory containing cuda.h
+ Returns:
+ Map of library names to structs of filename and path.
+ """
+ cpu_value = cuda_config.cpu_value
+ return {
+ "cuda": _find_cuda_lib("cuda", repository_ctx, cpu_value, cuda_config.cuda_toolkit_path),
+ "cudart": _find_cuda_lib(
+ "cudart",
+ repository_ctx,
+ cpu_value,
+ cuda_config.cuda_toolkit_path,
+ cuda_config.cuda_version,
+ ),
+ "cudart_static": _find_cuda_lib(
+ "cudart_static",
+ repository_ctx,
+ cpu_value,
+ cuda_config.cuda_toolkit_path,
+ cuda_config.cuda_version,
+ static = True,
+ ),
+ "cublas": _find_cuda_lib(
+ "cublas",
+ repository_ctx,
+ cpu_value,
+ cuda_config.cuda_toolkit_path,
+ cuda_config.cuda_version,
+ ),
+ "cusolver": _find_cuda_lib(
+ "cusolver",
+ repository_ctx,
+ cpu_value,
+ cuda_config.cuda_toolkit_path,
+ cuda_config.cuda_version,
+ ),
+ "curand": _find_cuda_lib(
+ "curand",
+ repository_ctx,
+ cpu_value,
+ cuda_config.cuda_toolkit_path,
+ cuda_config.cuda_version,
+ ),
+ "cufft": _find_cuda_lib(
+ "cufft",
+ repository_ctx,
+ cpu_value,
+ cuda_config.cuda_toolkit_path,
+ cuda_config.cuda_version,
+ ),
+ "cudnn": _find_cuda_lib(
+ "cudnn",
+ repository_ctx,
+ cpu_value,
+ cuda_config.cudnn_install_basedir,
+ cuda_config.cudnn_version,
+ ),
+ "cupti": _find_cupti_lib(repository_ctx, cuda_config),
+ }
- Args:
- repository_ctx: The repository context.
- cuda_config: The CUDA config as returned by _get_cuda_config
+def _find_cuda_include_path(repository_ctx, cuda_config):
+ """Returns the path to the directory containing cuda.h
- Returns:
- The path of the directory containing the CUDA headers.
- """
- cuda_toolkit_path = cuda_config.cuda_toolkit_path
- for relative_path in CUDA_INCLUDE_PATHS:
- if repository_ctx.path("%s/%scuda.h" % (cuda_toolkit_path, relative_path)).exists:
- return ("%s/%s" % (cuda_toolkit_path, relative_path))[:-1]
- auto_configure_fail("Cannot find cuda.h under %s" % cuda_toolkit_path)
+ Args:
+ repository_ctx: The repository context.
+ cuda_config: The CUDA config as returned by _get_cuda_config
+ Returns:
+ The path of the directory containing the CUDA headers.
+ """
+ cuda_toolkit_path = cuda_config.cuda_toolkit_path
+ for relative_path in CUDA_INCLUDE_PATHS:
+ if repository_ctx.path("%s/%scuda.h" % (cuda_toolkit_path, relative_path)).exists:
+ return ("%s/%s" % (cuda_toolkit_path, relative_path))[:-1]
+ auto_configure_fail("Cannot find cuda.h under %s" % cuda_toolkit_path)
def _find_cudnn_header_dir(repository_ctx, cudnn_install_basedir):
- """Returns the path to the directory containing cudnn.h
-
- Args:
- repository_ctx: The repository context.
- cudnn_install_basedir: The cudnn install directory as returned by
- _cudnn_install_basedir.
+ """Returns the path to the directory containing cudnn.h
- Returns:
- The path of the directory containing the cudnn header.
- """
- for relative_path in CUDA_INCLUDE_PATHS:
- if repository_ctx.path("%s/%scudnn.h" % (cudnn_install_basedir, relative_path)).exists:
- return ("%s/%s" % (cudnn_install_basedir, relative_path))[:-1]
- if repository_ctx.path("/usr/include/cudnn.h").exists:
- return "/usr/include"
- auto_configure_fail("Cannot find cudnn.h under %s" % cudnn_install_basedir)
+ Args:
+ repository_ctx: The repository context.
+ cudnn_install_basedir: The cudnn install directory as returned by
+ _cudnn_install_basedir.
+ Returns:
+ The path of the directory containing the cudnn header.
+ """
+ for relative_path in CUDA_INCLUDE_PATHS:
+ if repository_ctx.path("%s/%scudnn.h" % (cudnn_install_basedir, relative_path)).exists:
+ return ("%s/%s" % (cudnn_install_basedir, relative_path))[:-1]
+ if repository_ctx.path("/usr/include/cudnn.h").exists:
+ return "/usr/include"
+ auto_configure_fail("Cannot find cudnn.h under %s" % cudnn_install_basedir)
def _find_nvvm_libdevice_dir(repository_ctx, cuda_config):
- """Returns the path to the directory containing libdevice in bitcode format.
+ """Returns the path to the directory containing libdevice in bitcode format.
- Args:
- repository_ctx: The repository context.
- cuda_config: The CUDA config as returned by _get_cuda_config
-
- Returns:
- The path of the directory containing the CUDA headers.
- """
- cuda_toolkit_path = cuda_config.cuda_toolkit_path
- for relative_path in NVVM_LIBDEVICE_PATHS:
- if repository_ctx.path("%s/%slibdevice.10.bc" % (cuda_toolkit_path, relative_path)).exists:
- return ("%s/%s" % (cuda_toolkit_path, relative_path))[:-1]
- auto_configure_fail("Cannot find libdevice.10.bc under %s" % cuda_toolkit_path)
+ Args:
+ repository_ctx: The repository context.
+ cuda_config: The CUDA config as returned by _get_cuda_config
+ Returns:
+ The path of the directory containing the CUDA headers.
+ """
+ cuda_toolkit_path = cuda_config.cuda_toolkit_path
+ for libdevice_file in NVVM_LIBDEVICE_FILES:
+ for relative_path in NVVM_LIBDEVICE_PATHS:
+ if repository_ctx.path("%s/%s%s" % (cuda_toolkit_path, relative_path, libdevice_file)).exists:
+ return ("%s/%s" % (cuda_toolkit_path, relative_path))[:-1]
+ auto_configure_fail("Cannot find libdevice*.bc files under %s" % cuda_toolkit_path)
def _cudart_static_linkopt(cpu_value):
- """Returns additional platform-specific linkopts for cudart."""
- return "" if cpu_value == "Darwin" else "\"-lrt\","
+ """Returns additional platform-specific linkopts for cudart."""
+ return "" if cpu_value == "Darwin" else "\"-lrt\","
def _get_cuda_config(repository_ctx):
- """Detects and returns information about the CUDA installation on the system.
-
- Args:
- repository_ctx: The repository context.
-
- Returns:
- A struct containing the following fields:
- cuda_toolkit_path: The CUDA toolkit installation directory.
- cudnn_install_basedir: The cuDNN installation directory.
- cuda_version: The version of CUDA on the system.
- cudnn_version: The version of cuDNN on the system.
- compute_capabilities: A list of the system's CUDA compute capabilities.
- cpu_value: The name of the host operating system.
- """
- cpu_value = get_cpu_value(repository_ctx)
- cuda_toolkit_path = _cuda_toolkit_path(repository_ctx)
- cuda_version = _cuda_version(repository_ctx, cuda_toolkit_path, cpu_value)
- cudnn_install_basedir = _cudnn_install_basedir(repository_ctx)
- cudnn_version = _cudnn_version(repository_ctx, cudnn_install_basedir, cpu_value)
- return struct(
- cuda_toolkit_path = cuda_toolkit_path,
- cudnn_install_basedir = cudnn_install_basedir,
- cuda_version = cuda_version,
- cudnn_version = cudnn_version,
- compute_capabilities = _compute_capabilities(repository_ctx),
- cpu_value = cpu_value)
-
-
-def _tpl(repository_ctx, tpl, substitutions={}, out=None):
- if not out:
- out = tpl.replace(":", "/")
- repository_ctx.template(
- out,
- Label("//third_party/gpus/%s.tpl" % tpl),
- substitutions)
-
+ """Detects and returns information about the CUDA installation on the system.
+
+ Args:
+ repository_ctx: The repository context.
+
+ Returns:
+ A struct containing the following fields:
+ cuda_toolkit_path: The CUDA toolkit installation directory.
+ cudnn_install_basedir: The cuDNN installation directory.
+ cuda_version: The version of CUDA on the system.
+ cudnn_version: The version of cuDNN on the system.
+ compute_capabilities: A list of the system's CUDA compute capabilities.
+ cpu_value: The name of the host operating system.
+ """
+ cpu_value = get_cpu_value(repository_ctx)
+ cuda_toolkit_path = _cuda_toolkit_path(repository_ctx)
+ cuda_version = _cuda_version(repository_ctx, cuda_toolkit_path, cpu_value)
+ cudnn_install_basedir = _cudnn_install_basedir(repository_ctx)
+ cudnn_version = _cudnn_version(repository_ctx, cudnn_install_basedir, cpu_value)
+ return struct(
+ cuda_toolkit_path = cuda_toolkit_path,
+ cudnn_install_basedir = cudnn_install_basedir,
+ cuda_version = cuda_version,
+ cudnn_version = cudnn_version,
+ compute_capabilities = _compute_capabilities(repository_ctx),
+ cpu_value = cpu_value,
+ )
+
+def _tpl(repository_ctx, tpl, substitutions = {}, out = None):
+ if not out:
+ out = tpl.replace(":", "/")
+ repository_ctx.template(
+ out,
+ Label("//third_party/gpus/%s.tpl" % tpl),
+ substitutions,
+ )
def _file(repository_ctx, label):
- repository_ctx.template(
- label.replace(":", "/"),
- Label("//third_party/gpus/%s.tpl" % label),
- {})
-
+ repository_ctx.template(
+ label.replace(":", "/"),
+ Label("//third_party/gpus/%s.tpl" % label),
+ {},
+ )
_DUMMY_CROSSTOOL_BZL_FILE = """
def error_gpu_disabled():
@@ -792,379 +949,498 @@ def error_gpu_disabled():
)
"""
-
_DUMMY_CROSSTOOL_BUILD_FILE = """
load("//crosstool:error_gpu_disabled.bzl", "error_gpu_disabled")
error_gpu_disabled()
"""
-
def _create_dummy_repository(repository_ctx):
- cpu_value = get_cpu_value(repository_ctx)
-
- # Set up BUILD file for cuda/.
- _tpl(repository_ctx, "cuda:build_defs.bzl",
- {
- "%{cuda_is_configured}": "False",
- "%{cuda_extra_copts}": "[]",
- })
- _tpl(repository_ctx, "cuda:BUILD",
- {
- "%{cuda_driver_lib}": _lib_name("cuda", cpu_value),
- "%{cudart_static_lib}": _lib_name("cudart_static", cpu_value,
- static=True),
- "%{cudart_static_linkopt}": _cudart_static_linkopt(cpu_value),
- "%{cudart_lib}": _lib_name("cudart", cpu_value),
- "%{cublas_lib}": _lib_name("cublas", cpu_value),
- "%{cusolver_lib}": _lib_name("cusolver", cpu_value),
- "%{cudnn_lib}": _lib_name("cudnn", cpu_value),
- "%{cufft_lib}": _lib_name("cufft", cpu_value),
- "%{curand_lib}": _lib_name("curand", cpu_value),
- "%{cupti_lib}": _lib_name("cupti", cpu_value),
- "%{cuda_include_genrules}": '',
- "%{cuda_headers}": '',
- })
-
- # Create dummy files for the CUDA toolkit since they are still required by
- # tensorflow/core/platform/default/build_config:cuda.
- repository_ctx.file("cuda/cuda/include/cuda.h", "")
- repository_ctx.file("cuda/cuda/include/cublas.h", "")
- repository_ctx.file("cuda/cuda/include/cudnn.h", "")
- repository_ctx.file("cuda/cuda/extras/CUPTI/include/cupti.h", "")
- repository_ctx.file("cuda/cuda/lib/%s" % _lib_name("cuda", cpu_value))
- repository_ctx.file("cuda/cuda/lib/%s" % _lib_name("cudart", cpu_value))
- repository_ctx.file("cuda/cuda/lib/%s" % _lib_name("cudart_static", cpu_value))
- repository_ctx.file("cuda/cuda/lib/%s" % _lib_name("cublas", cpu_value))
- repository_ctx.file("cuda/cuda/lib/%s" % _lib_name("cusolver", cpu_value))
- repository_ctx.file("cuda/cuda/lib/%s" % _lib_name("cudnn", cpu_value))
- repository_ctx.file("cuda/cuda/lib/%s" % _lib_name("curand", cpu_value))
- repository_ctx.file("cuda/cuda/lib/%s" % _lib_name("cufft", cpu_value))
- repository_ctx.file("cuda/cuda/lib/%s" % _lib_name("cupti", cpu_value))
-
- # Set up cuda_config.h, which is used by
- # tensorflow/stream_executor/dso_loader.cc.
- _tpl(repository_ctx, "cuda:cuda_config.h",
- {
- "%{cuda_version}": _DEFAULT_CUDA_VERSION,
- "%{cudnn_version}": _DEFAULT_CUDNN_VERSION,
- "%{cuda_compute_capabilities}": ",".join([
- "CudaVersion(\"%s\")" % c
- for c in _DEFAULT_CUDA_COMPUTE_CAPABILITIES]),
- "%{cuda_toolkit_path}": _DEFAULT_CUDA_TOOLKIT_PATH,
- }, "cuda/cuda/cuda_config.h")
-
- # If cuda_configure is not configured to build with GPU support, and the user
- # attempts to build with --config=cuda, add a dummy build rule to intercept
- # this and fail with an actionable error message.
- repository_ctx.file("crosstool/error_gpu_disabled.bzl",
- _DUMMY_CROSSTOOL_BZL_FILE)
- repository_ctx.file("crosstool/BUILD", _DUMMY_CROSSTOOL_BUILD_FILE)
-
-
-def _execute(repository_ctx, cmdline, error_msg=None, error_details=None,
- empty_stdout_fine=False):
- """Executes an arbitrary shell command.
-
- Args:
- repository_ctx: the repository_ctx object
- cmdline: list of strings, the command to execute
- error_msg: string, a summary of the error if the command fails
- error_details: string, details about the error or steps to fix it
- empty_stdout_fine: bool, if True, an empty stdout result is fine, otherwise
- it's an error
- Return:
- the result of repository_ctx.execute(cmdline)
- """
- result = repository_ctx.execute(cmdline)
- if result.stderr or not (empty_stdout_fine or result.stdout):
- auto_configure_fail(
- "\n".join([
- error_msg.strip() if error_msg else "Repository command failed",
- result.stderr.strip(),
- error_details if error_details else ""]))
- return result
-
+ cpu_value = get_cpu_value(repository_ctx)
+
+ # Set up BUILD file for cuda/.
+ _tpl(
+ repository_ctx,
+ "cuda:build_defs.bzl",
+ {
+ "%{cuda_is_configured}": "False",
+ "%{cuda_extra_copts}": "[]",
+ },
+ )
+ _tpl(
+ repository_ctx,
+ "cuda:BUILD",
+ {
+ "%{cuda_driver_lib}": _lib_name("cuda", cpu_value),
+ "%{cudart_static_lib}": _lib_name(
+ "cudart_static",
+ cpu_value,
+ static = True,
+ ),
+ "%{cudart_static_linkopt}": _cudart_static_linkopt(cpu_value),
+ "%{cudart_lib}": _lib_name("cudart", cpu_value),
+ "%{cublas_lib}": _lib_name("cublas", cpu_value),
+ "%{cusolver_lib}": _lib_name("cusolver", cpu_value),
+ "%{cudnn_lib}": _lib_name("cudnn", cpu_value),
+ "%{cufft_lib}": _lib_name("cufft", cpu_value),
+ "%{curand_lib}": _lib_name("curand", cpu_value),
+ "%{cupti_lib}": _lib_name("cupti", cpu_value),
+ "%{cuda_include_genrules}": "",
+ "%{cuda_headers}": "",
+ },
+ )
+
+ # Create dummy files for the CUDA toolkit since they are still required by
+ # tensorflow/core/platform/default/build_config:cuda.
+ repository_ctx.file("cuda/cuda/include/cuda.h", "")
+ repository_ctx.file("cuda/cuda/include/cublas.h", "")
+ repository_ctx.file("cuda/cuda/include/cudnn.h", "")
+ repository_ctx.file("cuda/cuda/extras/CUPTI/include/cupti.h", "")
+ repository_ctx.file("cuda/cuda/lib/%s" % _lib_name("cuda", cpu_value))
+ repository_ctx.file("cuda/cuda/lib/%s" % _lib_name("cudart", cpu_value))
+ repository_ctx.file("cuda/cuda/lib/%s" % _lib_name("cudart_static", cpu_value))
+ repository_ctx.file("cuda/cuda/lib/%s" % _lib_name("cublas", cpu_value))
+ repository_ctx.file("cuda/cuda/lib/%s" % _lib_name("cusolver", cpu_value))
+ repository_ctx.file("cuda/cuda/lib/%s" % _lib_name("cudnn", cpu_value))
+ repository_ctx.file("cuda/cuda/lib/%s" % _lib_name("curand", cpu_value))
+ repository_ctx.file("cuda/cuda/lib/%s" % _lib_name("cufft", cpu_value))
+ repository_ctx.file("cuda/cuda/lib/%s" % _lib_name("cupti", cpu_value))
+
+ # Set up cuda_config.h, which is used by
+ # tensorflow/stream_executor/dso_loader.cc.
+ _tpl(
+ repository_ctx,
+ "cuda:cuda_config.h",
+ {
+ "%{cuda_version}": _DEFAULT_CUDA_VERSION,
+ "%{cudnn_version}": _DEFAULT_CUDNN_VERSION,
+ "%{cuda_compute_capabilities}": ",".join([
+ "CudaVersion(\"%s\")" % c
+ for c in _DEFAULT_CUDA_COMPUTE_CAPABILITIES
+ ]),
+ "%{cuda_toolkit_path}": _DEFAULT_CUDA_TOOLKIT_PATH,
+ },
+ "cuda/cuda/cuda_config.h",
+ )
+
+ # If cuda_configure is not configured to build with GPU support, and the user
+ # attempts to build with --config=cuda, add a dummy build rule to intercept
+ # this and fail with an actionable error message.
+ repository_ctx.file(
+ "crosstool/error_gpu_disabled.bzl",
+ _DUMMY_CROSSTOOL_BZL_FILE,
+ )
+ repository_ctx.file("crosstool/BUILD", _DUMMY_CROSSTOOL_BUILD_FILE)
+
+def _execute(
+ repository_ctx,
+ cmdline,
+ error_msg = None,
+ error_details = None,
+ empty_stdout_fine = False):
+ """Executes an arbitrary shell command.
+
+ Args:
+ repository_ctx: the repository_ctx object
+ cmdline: list of strings, the command to execute
+ error_msg: string, a summary of the error if the command fails
+ error_details: string, details about the error or steps to fix it
+ empty_stdout_fine: bool, if True, an empty stdout result is fine, otherwise
+ it's an error
+ Return:
+ the result of repository_ctx.execute(cmdline)
+ """
+ result = repository_ctx.execute(cmdline)
+ if result.stderr or not (empty_stdout_fine or result.stdout):
+ auto_configure_fail(
+ "\n".join([
+ error_msg.strip() if error_msg else "Repository command failed",
+ result.stderr.strip(),
+ error_details if error_details else "",
+ ]),
+ )
+ return result
def _norm_path(path):
- """Returns a path with '/' and remove the trailing slash."""
- path = path.replace("\\", "/")
- if path[-1] == "/":
- path = path[:-1]
- return path
-
-
-def symlink_genrule_for_dir(repository_ctx, src_dir, dest_dir, genrule_name,
- src_files = [], dest_files = []):
- """Returns a genrule to symlink(or copy if on Windows) a set of files.
-
- If src_dir is passed, files will be read from the given directory; otherwise
- we assume files are in src_files and dest_files
- """
- if src_dir != None:
- src_dir = _norm_path(src_dir)
- dest_dir = _norm_path(dest_dir)
- files = '\n'.join(sorted(_read_dir(repository_ctx, src_dir).splitlines()))
- # Create a list with the src_dir stripped to use for outputs.
- dest_files = files.replace(src_dir, '').splitlines()
- src_files = files.splitlines()
- command = []
- if not _is_windows(repository_ctx):
- # We clear folders that might have been generated previously to avoid
- # undesired inclusions
- command.append('if [ -d "$(@D)/extras" ]; then rm $(@D)/extras -drf; fi')
- command.append('if [ -d "$(@D)/include" ]; then rm $(@D)/include -drf; fi')
- command.append('if [ -d "$(@D)/lib" ]; then rm $(@D)/lib -drf; fi')
- command.append('if [ -d "$(@D)/nvvm" ]; then rm $(@D)/nvvm -drf; fi')
- outs = []
- for i in range(len(dest_files)):
- if dest_files[i] != "":
- # If we have only one file to link we do not want to use the dest_dir, as
- # $(@D) will include the full path to the file.
- dest = '$(@D)/' + dest_dir + dest_files[i] if len(dest_files) != 1 else '$(@D)/' + dest_files[i]
- # On Windows, symlink is not supported, so we just copy all the files.
- cmd = 'cp -f' if _is_windows(repository_ctx) else 'ln -s'
- command.append(cmd + ' "%s" "%s"' % (src_files[i] , dest))
- outs.append(' "' + dest_dir + dest_files[i] + '",')
- genrule = _genrule(src_dir, genrule_name, " && ".join(command),
- "\n".join(outs))
- return genrule
-
+ """Returns a path with '/' and remove the trailing slash."""
+ path = path.replace("\\", "/")
+ if path[-1] == "/":
+ path = path[:-1]
+ return path
+
+def symlink_genrule_for_dir(
+ repository_ctx,
+ src_dir,
+ dest_dir,
+ genrule_name,
+ src_files = [],
+ dest_files = []):
+ """Returns a genrule to symlink(or copy if on Windows) a set of files.
+
+ If src_dir is passed, files will be read from the given directory; otherwise
+ we assume files are in src_files and dest_files
+ """
+ if src_dir != None:
+ src_dir = _norm_path(src_dir)
+ dest_dir = _norm_path(dest_dir)
+ files = "\n".join(sorted(_read_dir(repository_ctx, src_dir).splitlines()))
+
+ # Create a list with the src_dir stripped to use for outputs.
+ dest_files = files.replace(src_dir, "").splitlines()
+ src_files = files.splitlines()
+ command = []
+ if not _is_windows(repository_ctx):
+ # We clear folders that might have been generated previously to avoid
+ # undesired inclusions
+ command.append('if [ -d "$(@D)/extras" ]; then rm $(@D)/extras -drf; fi')
+ command.append('if [ -d "$(@D)/include" ]; then rm $(@D)/include -drf; fi')
+ command.append('if [ -d "$(@D)/lib" ]; then rm $(@D)/lib -drf; fi')
+ command.append('if [ -d "$(@D)/nvvm" ]; then rm $(@D)/nvvm -drf; fi')
+ outs = []
+ for i in range(len(dest_files)):
+ if dest_files[i] != "":
+ # If we have only one file to link we do not want to use the dest_dir, as
+ # $(@D) will include the full path to the file.
+ dest = "$(@D)/" + dest_dir + dest_files[i] if len(dest_files) != 1 else "$(@D)/" + dest_files[i]
+
+ # On Windows, symlink is not supported, so we just copy all the files.
+ cmd = "cp -f" if _is_windows(repository_ctx) else "ln -s"
+ command.append(cmd + ' "%s" "%s"' % (src_files[i], dest))
+ outs.append(' "' + dest_dir + dest_files[i] + '",')
+ genrule = _genrule(
+ src_dir,
+ genrule_name,
+ " && ".join(command),
+ "\n".join(outs),
+ )
+ return genrule
def _genrule(src_dir, genrule_name, command, outs):
- """Returns a string with a genrule.
-
- Genrule executes the given command and produces the given outputs.
- """
- return (
- 'genrule(\n' +
- ' name = "' +
- genrule_name + '",\n' +
- ' outs = [\n' +
- outs +
- '\n ],\n' +
- ' cmd = """\n' +
- command +
- '\n """,\n' +
- ')\n'
- )
+ """Returns a string with a genrule.
+ Genrule executes the given command and produces the given outputs.
+ """
+ return (
+ "genrule(\n" +
+ ' name = "' +
+ genrule_name + '",\n' +
+ " outs = [\n" +
+ outs +
+ "\n ],\n" +
+ ' cmd = """\n' +
+ command +
+ '\n """,\n' +
+ ")\n"
+ )
def _read_dir(repository_ctx, src_dir):
- """Returns a string with all files in a directory.
-
- Finds all files inside a directory, traversing subfolders and following
- symlinks. The returned string contains the full path of all files
- separated by line breaks.
- """
- if _is_windows(repository_ctx):
- src_dir = src_dir.replace("/", "\\")
- find_result = _execute(
- repository_ctx, ["cmd.exe", "/c", "dir", src_dir, "/b", "/s", "/a-d"],
- empty_stdout_fine=True)
- # src_files will be used in genrule.outs where the paths must
- # use forward slashes.
- result = find_result.stdout.replace("\\", "/")
- else:
- find_result = _execute(
- repository_ctx, ["find", src_dir, "-follow", "-type", "f"],
- empty_stdout_fine=True)
- result = find_result.stdout
- return result
+ """Returns a string with all files in a directory.
+
+ Finds all files inside a directory, traversing subfolders and following
+ symlinks. The returned string contains the full path of all files
+ separated by line breaks.
+ """
+ if _is_windows(repository_ctx):
+ src_dir = src_dir.replace("/", "\\")
+ find_result = _execute(
+ repository_ctx,
+ ["cmd.exe", "/c", "dir", src_dir, "/b", "/s", "/a-d"],
+ empty_stdout_fine = True,
+ )
+
+ # src_files will be used in genrule.outs where the paths must
+ # use forward slashes.
+ result = find_result.stdout.replace("\\", "/")
+ else:
+ find_result = _execute(
+ repository_ctx,
+ ["find", src_dir, "-follow", "-type", "f"],
+ empty_stdout_fine = True,
+ )
+ result = find_result.stdout
+ return result
def _flag_enabled(repository_ctx, flag_name):
- if flag_name in repository_ctx.os.environ:
- value = repository_ctx.os.environ[flag_name].strip()
- return value == "1"
- return False
+ if flag_name in repository_ctx.os.environ:
+ value = repository_ctx.os.environ[flag_name].strip()
+ return value == "1"
+ return False
def _use_cuda_clang(repository_ctx):
- return _flag_enabled(repository_ctx, "TF_CUDA_CLANG")
+ return _flag_enabled(repository_ctx, "TF_CUDA_CLANG")
def _compute_cuda_extra_copts(repository_ctx, compute_capabilities):
- if _use_cuda_clang(repository_ctx):
- capability_flags = ["--cuda-gpu-arch=sm_" +
- cap.replace(".", "") for cap in compute_capabilities]
- else:
- # Capabilities are handled in the "crosstool_wrapper_driver_is_not_gcc" for nvcc
- capability_flags = []
- return str(capability_flags)
+ if _use_cuda_clang(repository_ctx):
+ capability_flags = ["--cuda-gpu-arch=sm_" +
+ cap.replace(".", "") for cap in compute_capabilities]
+ else:
+ # Capabilities are handled in the "crosstool_wrapper_driver_is_not_gcc" for nvcc
+ capability_flags = []
+ return str(capability_flags)
def _create_local_cuda_repository(repository_ctx):
- """Creates the repository containing files set up to build with CUDA."""
- cuda_config = _get_cuda_config(repository_ctx)
-
- cuda_include_path = _find_cuda_include_path(repository_ctx, cuda_config)
- cudnn_header_dir = _find_cudnn_header_dir(repository_ctx,
- cuda_config.cudnn_install_basedir)
- cupti_header_dir = _find_cupti_header_dir(repository_ctx, cuda_config)
- nvvm_libdevice_dir = _find_nvvm_libdevice_dir(repository_ctx, cuda_config)
-
- # Set up symbolic links for the cuda toolkit by creating genrules to do
- # symlinking. We create one genrule for each directory we want to track under
- # cuda_toolkit_path
- cuda_toolkit_path = cuda_config.cuda_toolkit_path
- genrules = [symlink_genrule_for_dir(repository_ctx,
- cuda_include_path, "cuda/include", "cuda-include")]
- genrules.append(symlink_genrule_for_dir(repository_ctx,
- nvvm_libdevice_dir, "cuda/nvvm/libdevice", "cuda-nvvm"))
- genrules.append(symlink_genrule_for_dir(repository_ctx,
- cupti_header_dir, "cuda/extras/CUPTI/include", "cuda-extras"))
-
- cuda_libs = _find_libs(repository_ctx, cuda_config)
- cuda_lib_src = []
- cuda_lib_dest = []
- for lib in cuda_libs.values():
- cuda_lib_src.append(lib.path)
- cuda_lib_dest.append("cuda/lib/" + lib.file_name)
- genrules.append(symlink_genrule_for_dir(repository_ctx, None, "", "cuda-lib",
- cuda_lib_src, cuda_lib_dest))
-
- # Set up the symbolic links for cudnn if cndnn was not installed to
- # CUDA_TOOLKIT_PATH.
- included_files = _read_dir(repository_ctx, cuda_include_path).replace(
- cuda_include_path, '').splitlines()
- if '/cudnn.h' not in included_files:
- genrules.append(symlink_genrule_for_dir(repository_ctx, None,
- "cuda/include/", "cudnn-include", [cudnn_header_dir + "/cudnn.h"],
- ["cudnn.h"]))
- else:
- genrules.append(
- 'filegroup(\n' +
+ """Creates the repository containing files set up to build with CUDA."""
+ cuda_config = _get_cuda_config(repository_ctx)
+
+ cuda_include_path = _find_cuda_include_path(repository_ctx, cuda_config)
+ cudnn_header_dir = _find_cudnn_header_dir(
+ repository_ctx,
+ cuda_config.cudnn_install_basedir,
+ )
+ cupti_header_dir = _find_cupti_header_dir(repository_ctx, cuda_config)
+ nvvm_libdevice_dir = _find_nvvm_libdevice_dir(repository_ctx, cuda_config)
+
+ # Set up symbolic links for the cuda toolkit by creating genrules to do
+ # symlinking. We create one genrule for each directory we want to track under
+ # cuda_toolkit_path
+ cuda_toolkit_path = cuda_config.cuda_toolkit_path
+ genrules = [symlink_genrule_for_dir(
+ repository_ctx,
+ cuda_include_path,
+ "cuda/include",
+ "cuda-include",
+ )]
+ genrules.append(symlink_genrule_for_dir(
+ repository_ctx,
+ nvvm_libdevice_dir,
+ "cuda/nvvm/libdevice",
+ "cuda-nvvm",
+ ))
+ genrules.append(symlink_genrule_for_dir(
+ repository_ctx,
+ cupti_header_dir,
+ "cuda/extras/CUPTI/include",
+ "cuda-extras",
+ ))
+
+ cuda_libs = _find_libs(repository_ctx, cuda_config)
+ cuda_lib_src = []
+ cuda_lib_dest = []
+ for lib in cuda_libs.values():
+ cuda_lib_src.append(lib.path)
+ cuda_lib_dest.append("cuda/lib/" + lib.file_name)
+ genrules.append(symlink_genrule_for_dir(
+ repository_ctx,
+ None,
+ "",
+ "cuda-lib",
+ cuda_lib_src,
+ cuda_lib_dest,
+ ))
+
+ # Set up the symbolic links for cudnn if cndnn was not installed to
+ # CUDA_TOOLKIT_PATH.
+ included_files = _read_dir(repository_ctx, cuda_include_path).replace(
+ cuda_include_path,
+ "",
+ ).splitlines()
+ if "/cudnn.h" not in included_files:
+ genrules.append(symlink_genrule_for_dir(
+ repository_ctx,
+ None,
+ "cuda/include/",
+ "cudnn-include",
+ [cudnn_header_dir + "/cudnn.h"],
+ ["cudnn.h"],
+ ))
+ else:
+ genrules.append(
+ "filegroup(\n" +
' name = "cudnn-include",\n' +
- ' srcs = [],\n' +
- ')\n'
+ " srcs = [],\n" +
+ ")\n",
)
- # Set up BUILD file for cuda/
- _tpl(repository_ctx, "cuda:build_defs.bzl",
- {
- "%{cuda_is_configured}": "True",
- "%{cuda_extra_copts}": _compute_cuda_extra_copts(
- repository_ctx, cuda_config.compute_capabilities),
- })
- _tpl(repository_ctx, "cuda:BUILD",
- {
- "%{cuda_driver_lib}": cuda_libs["cuda"].file_name,
- "%{cudart_static_lib}": cuda_libs["cudart_static"].file_name,
- "%{cudart_static_linkopt}": _cudart_static_linkopt(
- cuda_config.cpu_value),
- "%{cudart_lib}": cuda_libs["cudart"].file_name,
- "%{cublas_lib}": cuda_libs["cublas"].file_name,
- "%{cusolver_lib}": cuda_libs["cusolver"].file_name,
- "%{cudnn_lib}": cuda_libs["cudnn"].file_name,
- "%{cufft_lib}": cuda_libs["cufft"].file_name,
- "%{curand_lib}": cuda_libs["curand"].file_name,
- "%{cupti_lib}": cuda_libs["cupti"].file_name,
- "%{cuda_include_genrules}": "\n".join(genrules),
- "%{cuda_headers}": ('":cuda-include",\n' +
- ' ":cudnn-include",')
- })
-
- is_cuda_clang = _use_cuda_clang(repository_ctx)
-
- should_download_clang = is_cuda_clang and _flag_enabled(
- repository_ctx, _TF_DOWNLOAD_CLANG)
- if should_download_clang:
- download_clang(repository_ctx, "crosstool/extra_tools")
-
- # Set up crosstool/
- cc = find_cc(repository_ctx)
- cc_fullpath = cc if not should_download_clang else "crosstool/" + cc
-
- host_compiler_includes = _host_compiler_includes(repository_ctx, cc_fullpath)
- cuda_defines = {}
- if is_cuda_clang:
- cuda_defines["%{host_compiler_path}"] = str(cc)
- cuda_defines["%{host_compiler_warnings}"] = """
+ # Set up BUILD file for cuda/
+ _tpl(
+ repository_ctx,
+ "cuda:build_defs.bzl",
+ {
+ "%{cuda_is_configured}": "True",
+ "%{cuda_extra_copts}": _compute_cuda_extra_copts(
+ repository_ctx,
+ cuda_config.compute_capabilities,
+ ),
+ },
+ )
+ _tpl(
+ repository_ctx,
+ "cuda:BUILD.windows" if _is_windows(repository_ctx) else "cuda:BUILD",
+ {
+ "%{cuda_driver_lib}": cuda_libs["cuda"].file_name,
+ "%{cudart_static_lib}": cuda_libs["cudart_static"].file_name,
+ "%{cudart_static_linkopt}": _cudart_static_linkopt(
+ cuda_config.cpu_value,
+ ),
+ "%{cudart_lib}": cuda_libs["cudart"].file_name,
+ "%{cublas_lib}": cuda_libs["cublas"].file_name,
+ "%{cusolver_lib}": cuda_libs["cusolver"].file_name,
+ "%{cudnn_lib}": cuda_libs["cudnn"].file_name,
+ "%{cufft_lib}": cuda_libs["cufft"].file_name,
+ "%{curand_lib}": cuda_libs["curand"].file_name,
+ "%{cupti_lib}": cuda_libs["cupti"].file_name,
+ "%{cuda_include_genrules}": "\n".join(genrules),
+ "%{cuda_headers}": ('":cuda-include",\n' +
+ ' ":cudnn-include",'),
+ },
+ "cuda/BUILD",
+ )
+
+ is_cuda_clang = _use_cuda_clang(repository_ctx)
+
+ should_download_clang = is_cuda_clang and _flag_enabled(
+ repository_ctx,
+ _TF_DOWNLOAD_CLANG,
+ )
+ if should_download_clang:
+ download_clang(repository_ctx, "crosstool/extra_tools")
+
+ # Set up crosstool/
+ cc = find_cc(repository_ctx)
+ cc_fullpath = cc if not should_download_clang else "crosstool/" + cc
+
+ host_compiler_includes = _host_compiler_includes(repository_ctx, cc_fullpath)
+ cuda_defines = {}
+ if is_cuda_clang:
+ cuda_defines["%{host_compiler_path}"] = str(cc)
+ cuda_defines["%{host_compiler_warnings}"] = """
# Some parts of the codebase set -Werror and hit this warning, so
# switch it off for now.
flag: "-Wno-invalid-partial-specialization"
"""
- cuda_defines["%{host_compiler_includes}"] = host_compiler_includes
- _tpl(repository_ctx, "crosstool:BUILD", {"%{linker_files}": ":empty"})
- repository_ctx.file("crosstool/clang/bin/crosstool_wrapper_driver_is_not_gcc", "")
- else:
- cuda_defines["%{host_compiler_path}"] = "clang/bin/crosstool_wrapper_driver_is_not_gcc"
- cuda_defines["%{host_compiler_warnings}"] = ""
- # TODO(klimek): We currently need to inject "/" as builtin directory path
- # to disable bazel's dependency checks.
- # The problem is that:
- # - the python rules symlink the python headers into the bazel root
- # - the rules use 'includes' in the BUILD file to redirect includes of the
- # python headers through those paths
- # - bazel currently uses -isystem for include paths specified via 'includes'
- # - gcc follows symlinks when resolving files via -isystem paths, and puts
- # the resolved paths into the .d file, which makes the dependency check
- # fail for bazel
- # There are multiple possible ways to solve this:
- # 1. make bazel not use -isystem for paths specified via 'includes'
- # 2. cp the headers instead of symlinking them
- #
- # Once this is fixed, the right builtin directory path is:
- # (host_compiler_includes +
- # "\n cxx_builtin_include_directory: \"%s\"" % cuda_include_path)
- # The cuda directory needs to be passed, as there is currently no rule
- # providing the cuda headers in the same way the python headers are
- # provided.
- cuda_defines["%{host_compiler_includes}"] = "\n cxx_builtin_include_directory: \"/\""
- nvcc_path = str(repository_ctx.path("%s/bin/nvcc%s" %
- (cuda_config.cuda_toolkit_path,
- ".exe" if cuda_config.cpu_value == "Windows" else "")))
- _tpl(repository_ctx, "crosstool:BUILD",
- {"%{linker_files}": ":crosstool_wrapper_driver_is_not_gcc"})
- _tpl(repository_ctx,
- "crosstool:clang/bin/crosstool_wrapper_driver_is_not_gcc",
- {
- "%{cpu_compiler}": str(cc),
- "%{cuda_version}": cuda_config.cuda_version,
- "%{nvcc_path}": nvcc_path,
- "%{gcc_host_compiler_path}": str(cc),
- "%{cuda_compute_capabilities}": ", ".join(
- ["\"%s\"" % c for c in cuda_config.compute_capabilities]),
- })
- _tpl(repository_ctx, "crosstool:CROSSTOOL", cuda_defines, out="crosstool/CROSSTOOL")
-
- # Set up cuda_config.h, which is used by
- # tensorflow/stream_executor/dso_loader.cc.
- _tpl(repository_ctx, "cuda:cuda_config.h",
- {
- "%{cuda_version}": cuda_config.cuda_version,
- "%{cudnn_version}": cuda_config.cudnn_version,
- "%{cuda_compute_capabilities}": ",".join(
- ["CudaVersion(\"%s\")" % c
- for c in cuda_config.compute_capabilities]),
- "%{cuda_toolkit_path}": cuda_config.cuda_toolkit_path,
- }, "cuda/cuda/cuda_config.h")
+ cuda_defines["%{host_compiler_includes}"] = host_compiler_includes
+ _tpl(repository_ctx, "crosstool:BUILD", {"%{linker_files}": ":empty", "%{win_linker_files}": ":empty"})
+ repository_ctx.file("crosstool/clang/bin/crosstool_wrapper_driver_is_not_gcc", "")
+ repository_ctx.file("crosstool/windows/msvc_wrapper_for_nvcc.py", "")
+ repository_ctx.file("crosstool/windows/msvc_wrapper_for_nvcc.bat", "")
+ else:
+ cuda_defines["%{host_compiler_path}"] = "clang/bin/crosstool_wrapper_driver_is_not_gcc"
+ cuda_defines["%{host_compiler_warnings}"] = ""
+
+ # TODO(klimek): We currently need to inject "/" as builtin directory path
+ # to disable bazel's dependency checks.
+ # The problem is that:
+ # - the python rules symlink the python headers into the bazel root
+ # - the rules use 'includes' in the BUILD file to redirect includes of the
+ # python headers through those paths
+ # - bazel currently uses -isystem for include paths specified via 'includes'
+ # - gcc follows symlinks when resolving files via -isystem paths, and puts
+ # the resolved paths into the .d file, which makes the dependency check
+ # fail for bazel
+ # There are multiple possible ways to solve this:
+ # 1. make bazel not use -isystem for paths specified via 'includes'
+ # 2. cp the headers instead of symlinking them
+ #
+ # Once this is fixed, the right builtin directory path is:
+ # (host_compiler_includes +
+ # "\n cxx_builtin_include_directory: \"%s\"" % cuda_include_path)
+ # The cuda directory needs to be passed, as there is currently no rule
+ # providing the cuda headers in the same way the python headers are
+ # provided.
+ cuda_defines["%{host_compiler_includes}"] = "\n cxx_builtin_include_directory: \"/\""
+ nvcc_path = str(repository_ctx.path("%s/bin/nvcc%s" %
+ (
+ cuda_config.cuda_toolkit_path,
+ ".exe" if _is_windows(repository_ctx) else "",
+ )))
+ _tpl(
+ repository_ctx,
+ "crosstool:BUILD",
+ {
+ "%{linker_files}": ":crosstool_wrapper_driver_is_not_gcc",
+ "%{win_linker_files}": ":windows_msvc_wrapper_files",
+ },
+ )
+ wrapper_defines = {
+ "%{cpu_compiler}": str(cc),
+ "%{cuda_version}": cuda_config.cuda_version,
+ "%{nvcc_path}": nvcc_path,
+ "%{gcc_host_compiler_path}": str(cc),
+ "%{cuda_compute_capabilities}": ", ".join(
+ ["\"%s\"" % c for c in cuda_config.compute_capabilities],
+ ),
+ "%{nvcc_tmp_dir}": _get_nvcc_tmp_dir_for_windows(repository_ctx),
+ }
+ _tpl(
+ repository_ctx,
+ "crosstool:clang/bin/crosstool_wrapper_driver_is_not_gcc",
+ wrapper_defines,
+ )
+ _tpl(
+ repository_ctx,
+ "crosstool:windows/msvc_wrapper_for_nvcc.py",
+ wrapper_defines,
+ )
+ _tpl(
+ repository_ctx,
+ "crosstool:windows/msvc_wrapper_for_nvcc.bat",
+ {
+ "%{python_binary}": _get_python_bin(repository_ctx),
+ },
+ )
+
+ _tpl(
+ repository_ctx,
+ "crosstool:CROSSTOOL",
+ cuda_defines + _get_win_cuda_defines(repository_ctx),
+ out = "crosstool/CROSSTOOL",
+ )
+
+ # Set up cuda_config.h, which is used by
+ # tensorflow/stream_executor/dso_loader.cc.
+ _tpl(
+ repository_ctx,
+ "cuda:cuda_config.h",
+ {
+ "%{cuda_version}": cuda_config.cuda_version,
+ "%{cudnn_version}": cuda_config.cudnn_version,
+ "%{cuda_compute_capabilities}": ",".join(
+ [
+ "CudaVersion(\"%s\")" % c
+ for c in cuda_config.compute_capabilities
+ ],
+ ),
+ "%{cuda_toolkit_path}": cuda_config.cuda_toolkit_path,
+ },
+ "cuda/cuda/cuda_config.h",
+ )
def _create_remote_cuda_repository(repository_ctx, remote_config_repo):
- """Creates pointers to a remotely configured repo set up to build with CUDA."""
- _tpl(repository_ctx, "cuda:build_defs.bzl",
- {
- "%{cuda_is_configured}": "True",
- "%{cuda_extra_copts}": _compute_cuda_extra_copts(
- repository_ctx, _compute_capabilities(repository_ctx)),
-
- })
- _tpl(repository_ctx, "cuda:remote.BUILD",
- {
- "%{remote_cuda_repo}": remote_config_repo,
- }, "cuda/BUILD")
- _tpl(repository_ctx, "crosstool:remote.BUILD", {
- "%{remote_cuda_repo}": remote_config_repo,
- }, "crosstool/BUILD")
+ """Creates pointers to a remotely configured repo set up to build with CUDA."""
+ _tpl(
+ repository_ctx,
+ "cuda:build_defs.bzl",
+ {
+ "%{cuda_is_configured}": "True",
+ "%{cuda_extra_copts}": _compute_cuda_extra_copts(
+ repository_ctx,
+ _compute_capabilities(repository_ctx),
+ ),
+ },
+ )
+ _tpl(
+ repository_ctx,
+ "cuda:remote.BUILD",
+ {
+ "%{remote_cuda_repo}": remote_config_repo,
+ },
+ "cuda/BUILD",
+ )
+ _tpl(repository_ctx, "crosstool:remote.BUILD", {
+ "%{remote_cuda_repo}": remote_config_repo,
+ }, "crosstool/BUILD")
def _cuda_autoconf_impl(repository_ctx):
- """Implementation of the cuda_autoconf repository rule."""
- if not _enable_cuda(repository_ctx):
- _create_dummy_repository(repository_ctx)
- else:
- if _TF_CUDA_CONFIG_REPO in repository_ctx.os.environ:
- _create_remote_cuda_repository(repository_ctx,
- repository_ctx.os.environ[_TF_CUDA_CONFIG_REPO])
+ """Implementation of the cuda_autoconf repository rule."""
+ if not _enable_cuda(repository_ctx):
+ _create_dummy_repository(repository_ctx)
+ elif _TF_CUDA_CONFIG_REPO in repository_ctx.os.environ:
+ _create_remote_cuda_repository(
+ repository_ctx,
+ repository_ctx.os.environ[_TF_CUDA_CONFIG_REPO],
+ )
else:
- _create_local_cuda_repository(repository_ctx)
-
+ _create_local_cuda_repository(repository_ctx)
cuda_configure = repository_rule(
implementation = _cuda_autoconf_impl,
@@ -1181,6 +1457,7 @@ cuda_configure = repository_rule(
_TF_CUDA_COMPUTE_CAPABILITIES,
_TF_CUDA_CONFIG_REPO,
"NVVMIR_LIBRARY_DIR",
+ _PYTHON_BIN_PATH,
],
)
diff --git a/third_party/kafka/BUILD b/third_party/kafka/BUILD
index a839ca717e..75792b0d87 100644
--- a/third_party/kafka/BUILD
+++ b/third_party/kafka/BUILD
@@ -60,6 +60,8 @@ cc_library(
"src/rdkafka_event.h",
"src/rdkafka_feature.c",
"src/rdkafka_feature.h",
+ "src/rdkafka_header.c",
+ "src/rdkafka_header.h",
"src/rdkafka_int.h",
"src/rdkafka_interceptor.c",
"src/rdkafka_interceptor.h",
@@ -93,7 +95,6 @@ cc_library(
"src/rdkafka_sasl_int.h",
"src/rdkafka_sasl_plain.c",
"src/rdkafka_subscription.c",
- "src/rdkafka_subscription.h",
"src/rdkafka_timer.c",
"src/rdkafka_timer.h",
"src/rdkafka_topic.c",
@@ -105,6 +106,8 @@ cc_library(
"src/rdlist.h",
"src/rdlog.c",
"src/rdlog.h",
+ "src/rdmurmur2.c",
+ "src/rdmurmur2.h",
"src/rdports.c",
"src/rdports.h",
"src/rdposix.h",
diff --git a/third_party/llvm/llvm.autogenerated.BUILD b/third_party/llvm/llvm.autogenerated.BUILD
index 4f645fa260..c3b9ec4c25 100644
--- a/third_party/llvm/llvm.autogenerated.BUILD
+++ b/third_party/llvm/llvm.autogenerated.BUILD
@@ -11,7 +11,11 @@ load(
"cmake_var_string",
"expand_cmake_vars",
"gentbl",
- "llvm_target_cmake_vars",
+ "llvm_all_cmake_vars",
+ "llvm_copts",
+ "llvm_defines",
+ "llvm_linkopts",
+ "llvm_support_platform_specific_srcs_glob",
)
load(
"@org_tensorflow//third_party:common.bzl",
@@ -24,9 +28,7 @@ llvm_host_triple = "x86_64-unknown-linux_gnu"
llvm_targets = [
"AArch64",
- # Uncomment to enable the AMDGPU backend.
- # TODO(phawkins): use a configure-time test.
- # "AMDGPU",
+ "AMDGPU",
"ARM",
"NVPTX",
"PowerPC",
@@ -39,147 +41,25 @@ llvm_target_asm_printers = llvm_targets
llvm_target_disassemblers = llvm_targets
-# TODO(phawkins): the set of CMake variables was hardcoded for expediency.
-# However, we should really detect many of these via configure-time tests.
-
-# The set of CMake variables common to all targets.
-cmake_vars = {
- # Headers
- "HAVE_DIRENT_H": 1,
- "HAVE_DLFCN_H": 1,
- "HAVE_ERRNO_H": 1,
- "HAVE_EXECINFO_H": 1,
- "HAVE_FCNTL_H": 1,
- "HAVE_INTTYPES_H": 1,
- "HAVE_PTHREAD_H": 1,
- "HAVE_SIGNAL_H": 1,
- "HAVE_STDINT_H": 1,
- "HAVE_SYS_IOCTL_H": 1,
- "HAVE_SYS_MMAN_H": 1,
- "HAVE_SYS_PARAM_H": 1,
- "HAVE_SYS_RESOURCE_H": 1,
- "HAVE_SYS_STAT_H": 1,
- "HAVE_SYS_TIME_H": 1,
- "HAVE_SYS_TYPES_H": 1,
- "HAVE_TERMIOS_H": 1,
- "HAVE_UNISTD_H": 1,
- "HAVE_ZLIB_H": 1,
-
- # Features
- "HAVE_BACKTRACE": 1,
- "BACKTRACE_HEADER": "execinfo.h",
- "HAVE_DLOPEN": 1,
- "HAVE_FUTIMES": 1,
- "HAVE_GETCWD": 1,
- "HAVE_GETPAGESIZE": 1,
- "HAVE_GETRLIMIT": 1,
- "HAVE_GETRUSAGE": 1,
- "HAVE_GETTIMEOFDAY": 1,
- "HAVE_INT64_T": 1,
- "HAVE_ISATTY": 1,
- "HAVE_LIBEDIT": 1,
- "HAVE_LIBPTHREAD": 1,
- "HAVE_LIBZ": 1,
- "HAVE_MKDTEMP": 1,
- "HAVE_MKSTEMP": 1,
- "HAVE_MKTEMP": 1,
- "HAVE_PREAD": 1,
- "HAVE_PTHREAD_GETSPECIFIC": 1,
- "HAVE_PTHREAD_MUTEX_LOCK": 1,
- "HAVE_PTHREAD_RWLOCK_INIT": 1,
- "HAVE_REALPATH": 1,
- "HAVE_SBRK": 1,
- "HAVE_SETENV": 1,
- "HAVE_SETRLIMIT": 1,
- "HAVE_SIGALTSTACK": 1,
- "HAVE_STRERROR": 1,
- "HAVE_STRERROR_R": 1,
- "HAVE_STRTOLL": 1,
- "HAVE_SYSCONF": 1,
- "HAVE_UINT64_T": 1,
- "HAVE__UNWIND_BACKTRACE": 1,
-
- # LLVM features
- "ENABLE_BACKTRACES": 1,
- "LLVM_BINDIR": "/dev/null",
- "LLVM_DISABLE_ABI_BREAKING_CHECKS_ENFORCING": 0,
- "LLVM_ENABLE_ABI_BREAKING_CHECKS": 0,
- "LLVM_ENABLE_THREADS": 1,
- "LLVM_ENABLE_ZLIB": 1,
- "LLVM_HAS_ATOMICS": 1,
- "LLVM_INCLUDEDIR": "/dev/null",
- "LLVM_INFODIR": "/dev/null",
- "LLVM_MANDIR": "/dev/null",
- "LLVM_NATIVE_TARGET": 1,
- "LLVM_NATIVE_TARGETINFO": 1,
- "LLVM_NATIVE_TARGETMC": 1,
- "LLVM_NATIVE_ASMPRINTER": 1,
- "LLVM_NATIVE_ASMPARSER": 1,
- "LLVM_NATIVE_DISASSEMBLER": 1,
- "LLVM_ON_UNIX": 1,
- "LLVM_PREFIX": "/dev/null",
- "LLVM_VERSION_MAJOR": 0,
- "LLVM_VERSION_MINOR": 0,
- "LLVM_VERSION_PATCH": 0,
- "LTDL_SHLIB_EXT": ".so",
- "PACKAGE_NAME": "llvm",
- "PACKAGE_STRING": "llvm tensorflow-trunk",
- "PACKAGE_VERSION": "tensorflow-trunk",
- "RETSIGTYPE": "void",
-}
-
-# CMake variables specific to the Linux platform
-linux_cmake_vars = {
- "HAVE_MALLOC_H": 1,
- "HAVE_LINK_H": 1,
- "HAVE_MALLINFO": 1,
- "HAVE_FUTIMENS": 1,
-}
-
-# CMake variables specific to the Darwin (Mac OS X) platform.
-darwin_cmake_vars = {
- "HAVE_MALLOC_MALLOC_H": 1,
-}
-
-# Select a set of CMake variables based on the platform.
-# TODO(phawkins): use a better method to select the right host triple, rather
-# than hardcoding x86_64.
-all_cmake_vars = select({
- "@org_tensorflow//tensorflow:darwin": cmake_var_string(
- cmake_vars + llvm_target_cmake_vars("X86", "x86_64-apple-darwin") +
- darwin_cmake_vars,
- ),
- "@org_tensorflow//tensorflow:linux_ppc64le": cmake_var_string(
- cmake_vars +
- llvm_target_cmake_vars("PowerPC", "powerpc64le-unknown-linux_gnu") +
- linux_cmake_vars,
- ),
- "//conditions:default": cmake_var_string(
- cmake_vars +
- llvm_target_cmake_vars("X86", "x86_64-unknown-linux_gnu") +
- linux_cmake_vars,
- ),
-})
-
# Performs CMake variable substitutions on configuration header files.
expand_cmake_vars(
name = "config_gen",
src = "include/llvm/Config/config.h.cmake",
- cmake_vars = all_cmake_vars,
+ cmake_vars = llvm_all_cmake_vars,
dst = "include/llvm/Config/config.h",
)
expand_cmake_vars(
name = "llvm_config_gen",
src = "include/llvm/Config/llvm-config.h.cmake",
- cmake_vars = all_cmake_vars,
+ cmake_vars = llvm_all_cmake_vars,
dst = "include/llvm/Config/llvm-config.h",
)
expand_cmake_vars(
name = "abi_breaking_gen",
src = "include/llvm/Config/abi-breaking.h.cmake",
- cmake_vars = all_cmake_vars,
+ cmake_vars = llvm_all_cmake_vars,
dst = "include/llvm/Config/abi-breaking.h",
)
@@ -240,14 +120,7 @@ cc_library(
"include/llvm/Config/config.h",
"include/llvm/Config/llvm-config.h",
],
- defines = [
- "LLVM_ENABLE_STATS",
- "__STDC_LIMIT_MACROS",
- "__STDC_CONSTANT_MACROS",
- "__STDC_FORMAT_MACROS",
- "_DEBUG",
- "LLVM_BUILD_GLOBAL_ISEL",
- ],
+ defines = llvm_defines,
includes = ["include"],
)
@@ -263,17 +136,6 @@ genrule(
# Rules that apply the LLVM tblgen tool.
gentbl(
- name = "intrinsics_gen",
- tbl_outs = [("-gen-intrinsic", "include/llvm/IR/Intrinsics.inc")],
- tblgen = ":llvm-tblgen",
- td_file = "include/llvm/IR/Intrinsics.td",
- td_srcs = glob([
- "include/llvm/CodeGen/*.td",
- "include/llvm/IR/Intrinsics*.td",
- ]),
-)
-
-gentbl(
name = "attributes_gen",
tbl_outs = [("-gen-attrs", "include/llvm/IR/Attributes.inc")],
tblgen = ":llvm-tblgen",
@@ -306,6 +168,28 @@ gentbl(
]) + ["include/llvm/TableGen/SearchableTable.td"],
)
+gentbl(
+ name = "intrinsic_enums_gen",
+ tbl_outs = [("-gen-intrinsic-enums", "include/llvm/IR/IntrinsicEnums.inc")],
+ tblgen = ":llvm-tblgen",
+ td_file = "include/llvm/IR/Intrinsics.td",
+ td_srcs = glob([
+ "include/llvm/CodeGen/*.td",
+ "include/llvm/IR/Intrinsics*.td",
+ ]),
+)
+
+gentbl(
+ name = "intrinsics_impl_gen",
+ tbl_outs = [("-gen-intrinsic-impl", "include/llvm/IR/IntrinsicImpl.inc")],
+ tblgen = ":llvm-tblgen",
+ td_file = "include/llvm/IR/Intrinsics.td",
+ td_srcs = glob([
+ "include/llvm/CodeGen/*.td",
+ "include/llvm/IR/Intrinsics*.td",
+ ]),
+)
+
# Binary targets used by Tensorflow.
cc_binary(
name = "llvm-tblgen",
@@ -313,11 +197,8 @@ cc_binary(
"utils/TableGen/*.cpp",
"utils/TableGen/*.h",
]),
- linkopts = [
- "-lm",
- "-ldl",
- "-lpthread",
- ],
+ copts = llvm_copts,
+ linkopts = llvm_linkopts,
stamp = 0,
deps = [
":config",
@@ -333,11 +214,8 @@ cc_binary(
"utils/FileCheck/*.cpp",
"utils/FileCheck/*.h",
]),
- linkopts = [
- "-ldl",
- "-lm",
- "-lpthread",
- ],
+ copts = llvm_copts,
+ linkopts = llvm_linkopts,
stamp = 0,
deps = [":support"],
)
@@ -376,13 +254,31 @@ llvm_target_list = [
("-gen-dag-isel", "lib/Target/AMDGPU/AMDGPUGenDAGISel.inc"),
("-gen-callingconv", "lib/Target/AMDGPU/AMDGPUGenCallingConv.inc"),
("-gen-subtarget", "lib/Target/AMDGPU/AMDGPUGenSubtargetInfo.inc"),
- ("-gen-tgt-intrinsic", "lib/Target/AMDGPU/AMDGPUGenIntrinsics.inc"),
+ ("-gen-tgt-intrinsic-impl", "lib/Target/AMDGPU/AMDGPUGenIntrinsicImpl.inc"),
+ ("-gen-tgt-intrinsic-enums", "lib/Target/AMDGPU/AMDGPUGenIntrinsicEnums.inc"),
("-gen-emitter", "lib/Target/AMDGPU/AMDGPUGenMCCodeEmitter.inc"),
("-gen-dfa-packetizer", "lib/Target/AMDGPU/AMDGPUGenDFAPacketizer.inc"),
("-gen-asm-writer", "lib/Target/AMDGPU/AMDGPUGenAsmWriter.inc"),
("-gen-asm-matcher", "lib/Target/AMDGPU/AMDGPUGenAsmMatcher.inc"),
("-gen-disassembler", "lib/Target/AMDGPU/AMDGPUGenDisassemblerTables.inc"),
("-gen-pseudo-lowering", "lib/Target/AMDGPU/AMDGPUGenMCPseudoLowering.inc"),
+ ("-gen-searchable-tables", "lib/Target/AMDGPU/AMDGPUGenSearchableTables.inc"),
+ ("-gen-global-isel", "lib/Target/AMDGPU/AMDGPUGenGlobalISel.inc"),
+ ],
+ },
+ {
+ "name": "AMDGPU",
+ "lower_name": "amdgpu_r600",
+ "short_name": "R600",
+ "tbl_outs": [
+ ("-gen-asm-writer", "lib/Target/AMDGPU/R600GenAsmWriter.inc"),
+ ("-gen-callingconv", "lib/Target/AMDGPU/R600GenCallingConv.inc"),
+ ("-gen-dag-isel", "lib/Target/AMDGPU/R600GenDAGISel.inc"),
+ ("-gen-dfa-packetizer", "lib/Target/AMDGPU/R600GenDFAPacketizer.inc"),
+ ("-gen-instr-info", "lib/Target/AMDGPU/R600GenInstrInfo.inc"),
+ ("-gen-emitter", "lib/Target/AMDGPU/R600GenMCCodeEmitter.inc"),
+ ("-gen-register-info", "lib/Target/AMDGPU/R600GenRegisterInfo.inc"),
+ ("-gen-subtarget", "lib/Target/AMDGPU/R600GenSubtargetInfo.inc"),
],
},
{
@@ -508,7 +404,7 @@ cc_library(
"include/llvm/Target/AArch64/AsmParser/*.inc",
"lib/Target/AArch64/AsmParser/*.h",
]),
- copts = ["-Iexternal/llvm/lib/Target/AArch64"],
+ copts = llvm_copts + ["-Iexternal/llvm/lib/Target/AArch64"],
deps = [
":aarch64_desc",
":aarch64_info",
@@ -533,7 +429,7 @@ cc_library(
"include/llvm/Target/AArch64/InstPrinter/*.inc",
"lib/Target/AArch64/InstPrinter/*.h",
]),
- copts = ["-Iexternal/llvm/lib/Target/AArch64"],
+ copts = llvm_copts + ["-Iexternal/llvm/lib/Target/AArch64"],
deps = [
":aarch64_target_gen",
":aarch64_utils",
@@ -556,7 +452,7 @@ cc_library(
"include/llvm/Target/AArch64/*.inc",
"lib/Target/AArch64/*.h",
]),
- copts = ["-Iexternal/llvm/lib/Target/AArch64"],
+ copts = llvm_copts + ["-Iexternal/llvm/lib/Target/AArch64"],
deps = [
":aarch64_asm_printer",
":aarch64_desc",
@@ -589,14 +485,15 @@ cc_library(
"include/llvm/Target/AArch64/MCTargetDesc/*.inc",
"lib/Target/AArch64/MCTargetDesc/*.h",
]),
- copts = ["-Iexternal/llvm/lib/Target/AArch64"],
+ copts = llvm_copts + ["-Iexternal/llvm/lib/Target/AArch64"],
deps = [
":aarch64_asm_printer",
":aarch64_info",
":aarch64_target_gen",
":attributes_gen",
":config",
- ":intrinsics_gen",
+ ":intrinsic_enums_gen",
+ ":intrinsics_impl_gen",
":mc",
":support",
],
@@ -615,7 +512,7 @@ cc_library(
"include/llvm/Target/AArch64/Disassembler/*.inc",
"lib/Target/AArch64/Disassembler/*.h",
]),
- copts = ["-Iexternal/llvm/lib/Target/AArch64"],
+ copts = llvm_copts + ["-Iexternal/llvm/lib/Target/AArch64"],
deps = [
":aarch64_desc",
":aarch64_info",
@@ -643,7 +540,7 @@ cc_library(
"lib/Target/AArch64/AArch64*.h",
"lib/Target/AArch64/TargetInfo/*.h",
]),
- copts = ["-Iexternal/llvm/lib/Target/AArch64"],
+ copts = llvm_copts + ["-Iexternal/llvm/lib/Target/AArch64"],
deps = [
":code_gen",
":config",
@@ -666,7 +563,7 @@ cc_library(
"include/llvm/Target/AArch64/Utils/*.inc",
"lib/Target/AArch64/Utils/*.h",
]),
- copts = ["-Iexternal/llvm/lib/Target/AArch64"],
+ copts = llvm_copts + ["-Iexternal/llvm/lib/Target/AArch64"],
deps = [
":aarch64_target_gen",
":config",
@@ -688,6 +585,7 @@ cc_library(
"include/llvm/Transforms/AggressiveInstCombine/*.def",
"include/llvm/Transforms/AggressiveInstCombine/*.inc",
]),
+ copts = llvm_copts,
deps = [
":analysis",
":config",
@@ -712,6 +610,7 @@ cc_library(
"include/llvm/Analysis/*.def",
"include/llvm/Analysis/*.inc",
]),
+ copts = llvm_copts,
deps = [
":binary_format",
":config",
@@ -735,7 +634,7 @@ cc_library(
"include/llvm/Target/AMDGPU/MCTargetDesc/*.inc",
"lib/Target/AMDGPU/MCTargetDesc/*.h",
]),
- copts = ["-Iexternal/llvm/lib/Target/AMDGPU"],
+ copts = llvm_copts + ["-Iexternal/llvm/lib/Target/AMDGPU"],
deps = [
":amdgpu_asm_printer",
":amdgpu_info",
@@ -760,7 +659,7 @@ cc_library(
"include/llvm/Target/AMDGPU/Disassembler/*.inc",
"lib/Target/AMDGPU/Disassembler/*.h",
]),
- copts = ["-Iexternal/llvm/lib/Target/AMDGPU"],
+ copts = llvm_copts + ["-Iexternal/llvm/lib/Target/AMDGPU"],
deps = [
":amdgpu_desc",
":amdgpu_info",
@@ -785,8 +684,9 @@ cc_library(
"include/llvm/Target/AMDGPU/TargetInfo/*.inc",
"lib/Target/AMDGPU/TargetInfo/*.h",
]),
- copts = ["-Iexternal/llvm/lib/Target/AMDGPU"],
+ copts = llvm_copts + ["-Iexternal/llvm/lib/Target/AMDGPU"],
deps = [
+ ":amdgpu_r600_target_gen",
":amdgpu_target_gen",
":config",
":core",
@@ -807,8 +707,9 @@ cc_library(
"include/llvm/Target/AMDGPU/Utils/*.inc",
"lib/Target/AMDGPU/Utils/*.h",
]),
- copts = ["-Iexternal/llvm/lib/Target/AMDGPU"],
+ copts = llvm_copts + ["-Iexternal/llvm/lib/Target/AMDGPU"],
deps = [
+ ":amdgpu_r600_target_gen",
":amdgpu_target_gen",
":config",
":core",
@@ -830,7 +731,7 @@ cc_library(
"include/llvm/Target/AMDGPU/AsmParser/*.inc",
"lib/Target/AMDGPU/AsmParser/*.h",
]),
- copts = ["-Iexternal/llvm/lib/Target/AMDGPU"],
+ copts = llvm_copts + ["-Iexternal/llvm/lib/Target/AMDGPU"],
deps = [
":amdgpu_desc",
":amdgpu_info",
@@ -855,7 +756,7 @@ cc_library(
"include/llvm/Target/AMDGPU/InstPrinter/*.inc",
"lib/Target/AMDGPU/InstPrinter/*.h",
]),
- copts = ["-Iexternal/llvm/lib/Target/AMDGPU"],
+ copts = llvm_copts + ["-Iexternal/llvm/lib/Target/AMDGPU"],
deps = [
":amdgpu_utils",
":config",
@@ -877,7 +778,7 @@ cc_library(
"include/llvm/Target/AMDGPU/*.inc",
"lib/Target/AMDGPU/*.h",
]),
- copts = ["-Iexternal/llvm/lib/Target/AMDGPU"],
+ copts = llvm_copts + ["-Iexternal/llvm/lib/Target/AMDGPU"],
deps = [
":amdgpu_asm_printer",
":amdgpu_desc",
@@ -913,7 +814,7 @@ cc_library(
"include/llvm/Target/ARM/AsmParser/*.inc",
"lib/Target/ARM/AsmParser/*.h",
]),
- copts = ["-Iexternal/llvm/lib/Target/ARM"],
+ copts = llvm_copts + ["-Iexternal/llvm/lib/Target/ARM"],
deps = [
":arm_desc",
":arm_info",
@@ -939,7 +840,7 @@ cc_library(
"lib/Target/ARM/*.h",
"lib/Target/ARM/InstPrinter/*.h",
]),
- copts = ["-Iexternal/llvm/lib/Target/ARM"],
+ copts = llvm_copts + ["-Iexternal/llvm/lib/Target/ARM"],
deps = [
":arm_info",
":arm_target_gen",
@@ -963,7 +864,7 @@ cc_library(
"include/llvm/Target/ARM/*.inc",
"lib/Target/ARM/*.h",
]),
- copts = ["-Iexternal/llvm/lib/Target/ARM"],
+ copts = llvm_copts + ["-Iexternal/llvm/lib/Target/ARM"],
deps = [
":analysis",
":arm_asm_printer",
@@ -980,6 +881,7 @@ cc_library(
":selection_dag",
":support",
":target",
+ ":transform_utils",
],
)
@@ -998,14 +900,15 @@ cc_library(
"include/llvm/Target/ARM/MCTargetDesc/*.inc",
"lib/Target/ARM/MCTargetDesc/*.h",
]),
- copts = ["-Iexternal/llvm/lib/Target/ARM"],
+ copts = llvm_copts + ["-Iexternal/llvm/lib/Target/ARM"],
deps = [
":arm_asm_printer",
":arm_info",
":arm_target_gen",
":attributes_gen",
":config",
- ":intrinsics_gen",
+ ":intrinsic_enums_gen",
+ ":intrinsics_impl_gen",
":mc",
":mc_disassembler",
":support",
@@ -1025,7 +928,7 @@ cc_library(
"include/llvm/Target/ARM/Disassembler/*.inc",
"lib/Target/ARM/Disassembler/*.h",
]),
- copts = ["-Iexternal/llvm/lib/Target/ARM"],
+ copts = llvm_copts + ["-Iexternal/llvm/lib/Target/ARM"],
deps = [
":arm_desc",
":arm_info",
@@ -1050,7 +953,7 @@ cc_library(
"include/llvm/Target/ARM/TargetInfo/*.inc",
"lib/Target/ARM/TargetInfo/*.h",
]),
- copts = ["-Iexternal/llvm/lib/Target/ARM"],
+ copts = llvm_copts + ["-Iexternal/llvm/lib/Target/ARM"],
deps = [
":arm_target_gen",
":config",
@@ -1073,7 +976,7 @@ cc_library(
"include/llvm/Target/ARM/Utils/*.inc",
"lib/Target/ARM/Utils/*.h",
]),
- copts = ["-Iexternal/llvm/lib/Target/ARM"],
+ copts = llvm_copts + ["-Iexternal/llvm/lib/Target/ARM"],
deps = [
":arm_target_gen",
":config",
@@ -1095,6 +998,7 @@ cc_library(
"include/llvm/AsmParser/*.def",
"include/llvm/AsmParser/*.inc",
]),
+ copts = llvm_copts,
deps = [
":binary_format",
":config",
@@ -1117,6 +1021,7 @@ cc_library(
"include/llvm/CodeGen/AsmPrinter/*.inc",
"lib/CodeGen/AsmPrinter/*.def",
]),
+ copts = llvm_copts,
deps = [
":analysis",
":binary_format",
@@ -1147,6 +1052,7 @@ cc_library(
"include/llvm/BinaryFormat/ELFRelocs/*.def",
"include/llvm/BinaryFormat/WasmRelocs/*.def",
]),
+ copts = llvm_copts,
deps = [
":config",
":support",
@@ -1167,6 +1073,7 @@ cc_library(
"include/llvm/Bitcode/Reader/*.inc",
"include/llvm/Bitcode/BitstreamReader.h",
]),
+ copts = llvm_copts,
deps = [
":config",
":core",
@@ -1190,6 +1097,7 @@ cc_library(
"include/llvm/Bitcode/BitcodeWriterPass.h",
"include/llvm/Bitcode/BitstreamWriter.h",
]),
+ copts = llvm_copts,
deps = [
":analysis",
":config",
@@ -1214,6 +1122,7 @@ cc_library(
"include/llvm/CodeGen/*.inc",
"include/llvm/CodeGen/**/*.h",
]),
+ copts = llvm_copts,
deps = [
":analysis",
":bit_reader",
@@ -1251,12 +1160,14 @@ cc_library(
"include/llvm/*.h",
"include/llvm/Analysis/*.def",
]),
+ copts = llvm_copts,
deps = [
":attributes_compat_gen",
":attributes_gen",
":binary_format",
":config",
- ":intrinsics_gen",
+ ":intrinsic_enums_gen",
+ ":intrinsics_impl_gen",
":support",
],
)
@@ -1274,6 +1185,7 @@ cc_library(
"include/llvm/DebugInfo/CodeView/*.def",
"include/llvm/DebugInfo/CodeView/*.inc",
]),
+ copts = llvm_copts,
deps = [
":binary_format",
":config",
@@ -1295,6 +1207,7 @@ cc_library(
"include/llvm/DebugInfo/MSF/*.def",
"include/llvm/DebugInfo/MSF/*.inc",
]),
+ copts = llvm_copts,
deps = [
":config",
":support",
@@ -1314,6 +1227,7 @@ cc_library(
"include/llvm/Demangle/*.def",
"include/llvm/Demangle/*.inc",
]),
+ copts = llvm_copts,
deps = [":config"],
)
@@ -1330,6 +1244,7 @@ cc_library(
"include/llvm/ExecutionEngine/*.def",
"include/llvm/ExecutionEngine/*.inc",
]),
+ copts = llvm_copts,
deps = [
":config",
":core",
@@ -1354,6 +1269,7 @@ cc_library(
"include/llvm/CodeGen/GlobalISel/*.def",
"include/llvm/CodeGen/GlobalISel/*.inc",
]),
+ copts = llvm_copts,
deps = [
":analysis",
":code_gen",
@@ -1383,6 +1299,7 @@ cc_library(
"include/llvm/Transforms/InstrProfiling.h",
"include/llvm/Transforms/PGOInstrumentation.h",
]),
+ copts = llvm_copts,
deps = [
":analysis",
":config",
@@ -1407,6 +1324,7 @@ cc_library(
"include/llvm/Transforms/InstCombine/*.def",
"include/llvm/Transforms/InstCombine/*.inc",
]),
+ copts = llvm_copts,
deps = [
":analysis",
":config",
@@ -1433,6 +1351,7 @@ cc_library(
"include/llvm/Transforms/IPO/*.def",
"include/llvm/Transforms/IPO/*.inc",
]),
+ copts = llvm_copts,
deps = [
":aggressive_inst_combine",
":analysis",
@@ -1466,6 +1385,7 @@ cc_library(
"include/llvm/IRReader/*.def",
"include/llvm/IRReader/*.inc",
]),
+ copts = llvm_copts,
deps = [
":asm_parser",
":bit_reader",
@@ -1488,6 +1408,7 @@ cc_library(
"include/llvm/Linker/*.def",
"include/llvm/Linker/*.inc",
]),
+ copts = llvm_copts,
deps = [
":config",
":core",
@@ -1509,6 +1430,7 @@ cc_library(
"include/llvm/MC/*.def",
"include/llvm/MC/*.inc",
]),
+ copts = llvm_copts,
deps = [
":binary_format",
":config",
@@ -1530,6 +1452,7 @@ cc_library(
"include/llvm/MC/MCDisassembler/*.def",
"include/llvm/MC/MCDisassembler/*.inc",
]),
+ copts = llvm_copts,
deps = [
":config",
":mc",
@@ -1550,6 +1473,7 @@ cc_library(
"include/llvm/MC/MCParser/*.def",
"include/llvm/MC/MCParser/*.inc",
]),
+ copts = llvm_copts,
deps = [
":config",
":mc",
@@ -1570,7 +1494,7 @@ cc_library(
"include/llvm/Target/NVPTX/InstPrinter/*.inc",
"lib/Target/NVPTX/InstPrinter/*.h",
]),
- copts = ["-Iexternal/llvm/lib/Target/NVPTX"],
+ copts = llvm_copts + ["-Iexternal/llvm/lib/Target/NVPTX"],
deps = [
"nvptx_target_gen",
":attributes_gen",
@@ -1594,7 +1518,7 @@ cc_library(
"include/llvm/Target/NVPTX/*.inc",
"lib/Target/NVPTX/*.h",
]),
- copts = ["-Iexternal/llvm/lib/Target/NVPTX"],
+ copts = llvm_copts + ["-Iexternal/llvm/lib/Target/NVPTX"],
deps = [
":analysis",
":asm_printer",
@@ -1628,7 +1552,7 @@ cc_library(
"include/llvm/Target/NVPTX/MCTargetDesc/*.inc",
"lib/Target/NVPTX/MCTargetDesc/*.h",
]),
- copts = ["-Iexternal/llvm/lib/Target/NVPTX"],
+ copts = llvm_copts + ["-Iexternal/llvm/lib/Target/NVPTX"],
deps = [
"nvptx_target_gen",
":config",
@@ -1654,7 +1578,7 @@ cc_library(
"lib/Target/NVPTX/NVPTX.h",
"lib/Target/NVPTX/TargetInfo/*.h",
]),
- copts = ["-Iexternal/llvm/lib/Target/NVPTX"],
+ copts = llvm_copts + ["-Iexternal/llvm/lib/Target/NVPTX"],
deps = [
"nvptx_target_gen",
":attributes_gen",
@@ -1678,6 +1602,7 @@ cc_library(
"include/llvm/Object/*.def",
"include/llvm/Object/*.inc",
]),
+ copts = llvm_copts,
deps = [
":binary_format",
":bit_reader",
@@ -1703,6 +1628,7 @@ cc_library(
"include/llvm/Transforms/ObjCARC/*.def",
"include/llvm/Transforms/ObjCARC/*.inc",
]),
+ copts = llvm_copts,
deps = [
":analysis",
":config",
@@ -1725,13 +1651,16 @@ cc_library(
"include/llvm/ExecutionEngine/Orc/*.def",
"include/llvm/ExecutionEngine/Orc/*.inc",
]),
+ copts = llvm_copts,
deps = [
":config",
":core",
":execution_engine",
+ ":mc",
":object",
":runtime_dyld",
":support",
+ ":target",
":transform_utils",
],
)
@@ -1749,7 +1678,7 @@ cc_library(
"include/llvm/Target/PowerPC/AsmParser/*.inc",
"lib/Target/PowerPC/AsmParser/*.h",
]),
- copts = ["-Iexternal/llvm/lib/Target/PowerPC"],
+ copts = llvm_copts + ["-Iexternal/llvm/lib/Target/PowerPC"],
deps = [
":config",
":mc",
@@ -1773,11 +1702,12 @@ cc_library(
"include/llvm/Target/PowerPC/InstPrinter/*.inc",
"lib/Target/PowerPC/InstPrinter/*.h",
]),
- copts = ["-Iexternal/llvm/lib/Target/PowerPC"],
+ copts = llvm_copts + ["-Iexternal/llvm/lib/Target/PowerPC"],
deps = [
":attributes_gen",
":config",
- ":intrinsics_gen",
+ ":intrinsic_enums_gen",
+ ":intrinsics_impl_gen",
":mc",
":powerpc_info",
":powerpc_target_gen",
@@ -1798,7 +1728,7 @@ cc_library(
"include/llvm/Target/PowerPC/*.inc",
"lib/Target/PowerPC/*.h",
]),
- copts = ["-Iexternal/llvm/lib/Target/PowerPC"],
+ copts = llvm_copts + ["-Iexternal/llvm/lib/Target/PowerPC"],
deps = [
":analysis",
":asm_printer",
@@ -1830,11 +1760,12 @@ cc_library(
"include/llvm/Target/PowerPC/MCTargetDesc/*.inc",
"lib/Target/PowerPC/MCTargetDesc/*.h",
]),
- copts = ["-Iexternal/llvm/lib/Target/PowerPC"],
+ copts = llvm_copts + ["-Iexternal/llvm/lib/Target/PowerPC"],
deps = [
":attributes_gen",
":config",
- ":intrinsics_gen",
+ ":intrinsic_enums_gen",
+ ":intrinsics_impl_gen",
":mc",
":powerpc_asm_printer",
":powerpc_info",
@@ -1856,7 +1787,7 @@ cc_library(
"include/llvm/Target/PowerPC/Disassembler/*.inc",
"lib/Target/PowerPC/Disassembler/*.h",
]),
- copts = ["-Iexternal/llvm/lib/Target/PowerPC"],
+ copts = llvm_copts + ["-Iexternal/llvm/lib/Target/PowerPC"],
deps = [
":config",
":mc_disassembler",
@@ -1880,12 +1811,11 @@ cc_library(
"lib/Target/PowerPC/PPC*.h",
"lib/Target/PowerPC/TargetInfo/*.h",
]),
- copts = ["-Iexternal/llvm/lib/Target/PowerPC"],
+ copts = llvm_copts + ["-Iexternal/llvm/lib/Target/PowerPC"],
deps = [
":attributes_gen",
":config",
":core",
- ":intrinsics_gen",
":powerpc_target_gen",
":support",
":target",
@@ -1905,6 +1835,7 @@ cc_library(
"include/llvm/ProfileData/*.def",
"include/llvm/ProfileData/*.inc",
]),
+ copts = llvm_copts,
deps = [
":config",
":core",
@@ -1933,6 +1864,7 @@ cc_library(
"include/llvm/ExecutionEngine/RTDyldMemoryManager.h",
"include/llvm/ExecutionEngine/RuntimeDyld*.h",
]),
+ copts = llvm_copts,
deps = [
":config",
":mc",
@@ -1960,6 +1892,7 @@ cc_library(
"include/llvm/Transforms/IPO.h",
"include/llvm/Transforms/IPO/SCCP.h",
]),
+ copts = llvm_copts,
deps = [
":aggressive_inst_combine",
":analysis",
@@ -1985,6 +1918,7 @@ cc_library(
"include/llvm/CodeGen/SelectionDAG/*.def",
"include/llvm/CodeGen/SelectionDAG/*.inc",
]),
+ copts = llvm_copts,
deps = [
":analysis",
":code_gen",
@@ -2003,14 +1937,12 @@ cc_library(
"lib/Support/*.c",
"lib/Support/*.cpp",
"lib/Support/*.inc",
- "lib/Support/Unix/*.inc",
- "lib/Support/Unix/*.h",
"include/llvm-c/*.h",
"include/llvm/CodeGen/MachineValueType.h",
"include/llvm/BinaryFormat/COFF.h",
"include/llvm/BinaryFormat/MachO.h",
"lib/Support/*.h",
- ]),
+ ] + llvm_support_platform_specific_srcs_glob),
hdrs = glob([
"include/llvm/Support/*.h",
"include/llvm/Support/*.def",
@@ -2022,6 +1954,7 @@ cc_library(
"include/llvm/BinaryFormat/MachO.def",
"include/llvm/Support/VCSRevision.h",
],
+ copts = llvm_copts,
deps = [
":config",
":demangle",
@@ -2044,6 +1977,7 @@ cc_library(
"include/llvm/TableGen/*.inc",
"include/llvm/Target/*.def",
]),
+ copts = llvm_copts,
deps = [
":config",
":mc",
@@ -2069,6 +2003,7 @@ cc_library(
"include/llvm/CodeGen/*.def",
"include/llvm/CodeGen/*.inc",
]),
+ copts = llvm_copts,
deps = [
":analysis",
":config",
@@ -2093,6 +2028,7 @@ cc_library(
"include/llvm/Transforms/Utils/*.def",
"include/llvm/Transforms/Utils/*.inc",
]),
+ copts = llvm_copts,
deps = [
":analysis",
":config",
@@ -2116,6 +2052,7 @@ cc_library(
"include/llvm/Transforms/Vectorize/*.inc",
"include/llvm/Transforms/Vectorize.h",
]),
+ copts = llvm_copts,
deps = [
":analysis",
":config",
@@ -2139,7 +2076,7 @@ cc_library(
"include/llvm/Target/X86/AsmParser/*.inc",
"lib/Target/X86/AsmParser/*.h",
]),
- copts = ["-Iexternal/llvm/lib/Target/X86"],
+ copts = llvm_copts + ["-Iexternal/llvm/lib/Target/X86"],
deps = [
":config",
":mc",
@@ -2164,7 +2101,7 @@ cc_library(
"include/llvm/Target/X86/InstPrinter/*.inc",
"lib/Target/X86/InstPrinter/*.h",
]),
- copts = ["-Iexternal/llvm/lib/Target/X86"],
+ copts = llvm_copts + ["-Iexternal/llvm/lib/Target/X86"],
deps = [
":config",
":mc",
@@ -2188,7 +2125,7 @@ cc_library(
"include/llvm/Target/X86/*.inc",
"lib/Target/X86/*.h",
]),
- copts = ["-Iexternal/llvm/lib/Target/X86"],
+ copts = llvm_copts + ["-Iexternal/llvm/lib/Target/X86"],
deps = [
":analysis",
":asm_printer",
@@ -2221,7 +2158,7 @@ cc_library(
"include/llvm/Target/X86/MCTargetDesc/*.inc",
"lib/Target/X86/MCTargetDesc/*.h",
]),
- copts = ["-Iexternal/llvm/lib/Target/X86"],
+ copts = llvm_copts + ["-Iexternal/llvm/lib/Target/X86"],
deps = [
":config",
":mc",
@@ -2246,7 +2183,7 @@ cc_library(
"include/llvm/Target/X86/Disassembler/*.inc",
"lib/Target/X86/Disassembler/*.h",
]),
- copts = ["-Iexternal/llvm/lib/Target/X86"],
+ copts = llvm_copts + ["-Iexternal/llvm/lib/Target/X86"],
deps = [
":config",
":mc_disassembler",
@@ -2269,7 +2206,7 @@ cc_library(
"include/llvm/Target/X86/TargetInfo/*.inc",
"lib/Target/X86/TargetInfo/*.h",
]),
- copts = ["-Iexternal/llvm/lib/Target/X86"],
+ copts = llvm_copts + ["-Iexternal/llvm/lib/Target/X86"],
deps = [
":config",
":mc",
@@ -2291,7 +2228,7 @@ cc_library(
"include/llvm/Target/X86/Utils/*.inc",
"lib/Target/X86/Utils/*.h",
]),
- copts = ["-Iexternal/llvm/lib/Target/X86"],
+ copts = llvm_copts + ["-Iexternal/llvm/lib/Target/X86"],
deps = [
":code_gen",
":config",
diff --git a/third_party/llvm/llvm.bzl b/third_party/llvm/llvm.bzl
index 0efcf319bd..dfdacafceb 100644
--- a/third_party/llvm/llvm.bzl
+++ b/third_party/llvm/llvm.bzl
@@ -105,3 +105,143 @@ def expand_cmake_vars(name, src, dst, cmake_vars):
"< $< > $@")
)
+# TODO(phawkins): the set of CMake variables was hardcoded for expediency.
+# However, we should really detect many of these via configure-time tests.
+
+# The set of CMake variables common to all targets.
+cmake_vars = {
+ # Headers
+ "HAVE_DIRENT_H": 1,
+ "HAVE_DLFCN_H": 1,
+ "HAVE_ERRNO_H": 1,
+ "HAVE_EXECINFO_H": 1,
+ "HAVE_FCNTL_H": 1,
+ "HAVE_INTTYPES_H": 1,
+ "HAVE_PTHREAD_H": 1,
+ "HAVE_SIGNAL_H": 1,
+ "HAVE_STDINT_H": 1,
+ "HAVE_SYS_IOCTL_H": 1,
+ "HAVE_SYS_MMAN_H": 1,
+ "HAVE_SYS_PARAM_H": 1,
+ "HAVE_SYS_RESOURCE_H": 1,
+ "HAVE_SYS_STAT_H": 1,
+ "HAVE_SYS_TIME_H": 1,
+ "HAVE_SYS_TYPES_H": 1,
+ "HAVE_TERMIOS_H": 1,
+ "HAVE_UNISTD_H": 1,
+ "HAVE_ZLIB_H": 1,
+
+ # Features
+ "HAVE_BACKTRACE": 1,
+ "BACKTRACE_HEADER": "execinfo.h",
+ "HAVE_DLOPEN": 1,
+ "HAVE_FUTIMES": 1,
+ "HAVE_GETCWD": 1,
+ "HAVE_GETPAGESIZE": 1,
+ "HAVE_GETRLIMIT": 1,
+ "HAVE_GETRUSAGE": 1,
+ "HAVE_GETTIMEOFDAY": 1,
+ "HAVE_INT64_T": 1,
+ "HAVE_ISATTY": 1,
+ "HAVE_LIBEDIT": 1,
+ "HAVE_LIBPTHREAD": 1,
+ "HAVE_LIBZ": 1,
+ "HAVE_MKDTEMP": 1,
+ "HAVE_MKSTEMP": 1,
+ "HAVE_MKTEMP": 1,
+ "HAVE_PREAD": 1,
+ "HAVE_PTHREAD_GETSPECIFIC": 1,
+ "HAVE_PTHREAD_MUTEX_LOCK": 1,
+ "HAVE_PTHREAD_RWLOCK_INIT": 1,
+ "HAVE_REALPATH": 1,
+ "HAVE_SBRK": 1,
+ "HAVE_SETENV": 1,
+ "HAVE_SETRLIMIT": 1,
+ "HAVE_SIGALTSTACK": 1,
+ "HAVE_STRERROR": 1,
+ "HAVE_STRERROR_R": 1,
+ "HAVE_STRTOLL": 1,
+ "HAVE_SYSCONF": 1,
+ "HAVE_UINT64_T": 1,
+ "HAVE__UNWIND_BACKTRACE": 1,
+
+ # LLVM features
+ "ENABLE_BACKTRACES": 1,
+ "LLVM_BINDIR": "/dev/null",
+ "LLVM_DISABLE_ABI_BREAKING_CHECKS_ENFORCING": 0,
+ "LLVM_ENABLE_ABI_BREAKING_CHECKS": 0,
+ "LLVM_ENABLE_THREADS": 1,
+ "LLVM_ENABLE_ZLIB": 1,
+ "LLVM_HAS_ATOMICS": 1,
+ "LLVM_INCLUDEDIR": "/dev/null",
+ "LLVM_INFODIR": "/dev/null",
+ "LLVM_MANDIR": "/dev/null",
+ "LLVM_NATIVE_TARGET": 1,
+ "LLVM_NATIVE_TARGETINFO": 1,
+ "LLVM_NATIVE_TARGETMC": 1,
+ "LLVM_NATIVE_ASMPRINTER": 1,
+ "LLVM_NATIVE_ASMPARSER": 1,
+ "LLVM_NATIVE_DISASSEMBLER": 1,
+ "LLVM_ON_UNIX": 1,
+ "LLVM_PREFIX": "/dev/null",
+ "LLVM_VERSION_MAJOR": 0,
+ "LLVM_VERSION_MINOR": 0,
+ "LLVM_VERSION_PATCH": 0,
+ "LTDL_SHLIB_EXT": ".so",
+ "PACKAGE_NAME": "llvm",
+ "PACKAGE_STRING": "llvm tensorflow-trunk",
+ "PACKAGE_VERSION": "tensorflow-trunk",
+ "RETSIGTYPE": "void",
+}
+
+# CMake variables specific to the Linux platform
+linux_cmake_vars = {
+ "HAVE_MALLOC_H": 1,
+ "HAVE_LINK_H": 1,
+ "HAVE_MALLINFO": 1,
+ "HAVE_FUTIMENS": 1,
+}
+
+# CMake variables specific to the Darwin (Mac OS X) platform.
+darwin_cmake_vars = {
+ "HAVE_MALLOC_MALLOC_H": 1,
+}
+
+# Select a set of CMake variables based on the platform.
+# TODO(phawkins): use a better method to select the right host triple, rather
+# than hardcoding x86_64.
+llvm_all_cmake_vars = select({
+ "@org_tensorflow//tensorflow:darwin": cmake_var_string(
+ cmake_vars + llvm_target_cmake_vars("X86", "x86_64-apple-darwin") +
+ darwin_cmake_vars),
+ "@org_tensorflow//tensorflow:linux_ppc64le": cmake_var_string(
+ cmake_vars +
+ llvm_target_cmake_vars("PowerPC", "powerpc64le-unknown-linux_gnu") +
+ linux_cmake_vars,
+ ),
+ "//conditions:default": cmake_var_string(
+ cmake_vars +
+ llvm_target_cmake_vars("X86", "x86_64-unknown-linux_gnu") +
+ linux_cmake_vars),
+
+})
+
+llvm_linkopts = ["-ldl", "-lm", "-lpthread"]
+
+llvm_defines = [
+ "LLVM_ENABLE_STATS",
+ "__STDC_LIMIT_MACROS",
+ "__STDC_CONSTANT_MACROS",
+ "__STDC_FORMAT_MACROS",
+ "_DEBUG",
+ "LLVM_BUILD_GLOBAL_ISEL",
+]
+
+llvm_copts = []
+
+# Platform specific sources for libSupport.
+
+llvm_support_platform_specific_srcs_glob = [
+ "lib/Support/Unix/*.inc",
+ "lib/Support/Unix/*.h",
+]
diff --git a/third_party/mkl/LICENSE b/third_party/mkl/LICENSE
new file mode 100644
index 0000000000..9c8f3ea087
--- /dev/null
+++ b/third_party/mkl/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright {yyyy} {name of copyright owner}
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License. \ No newline at end of file
diff --git a/third_party/mkl_dnn/BUILD b/third_party/mkl_dnn/BUILD
index 5b01f6e3e4..d075809ee9 100644
--- a/third_party/mkl_dnn/BUILD
+++ b/third_party/mkl_dnn/BUILD
@@ -1 +1,11 @@
licenses(["notice"])
+
+exports_files(["LICENSE"])
+
+config_setting(
+ name = "using_mkl_dnn_only",
+ values = {
+ "define": "using_mkl_dnn_only=true",
+ },
+ visibility = ["//visibility:public"],
+)
diff --git a/third_party/mkl_dnn/LICENSE b/third_party/mkl_dnn/LICENSE
new file mode 100644
index 0000000000..8dada3edaf
--- /dev/null
+++ b/third_party/mkl_dnn/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright {yyyy} {name of copyright owner}
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/third_party/mkl_dnn/build_defs.bzl b/third_party/mkl_dnn/build_defs.bzl
new file mode 100644
index 0000000000..7ce2a7d9b0
--- /dev/null
+++ b/third_party/mkl_dnn/build_defs.bzl
@@ -0,0 +1,13 @@
+def if_mkl_open_source_only(if_true, if_false = []):
+ """Shorthand for select()'ing on whether we're building with
+ MKL-DNN open source lib only, without depending on MKL binary form.
+
+ Returns a select statement which evaluates to if_true if we're building
+ with MKL-DNN open source lib only. Otherwise,
+ the select statement evaluates to if_false.
+
+ """
+ return select({
+ str(Label("//third_party/mkl_dnn:using_mkl_dnn_only")): if_true,
+ "//conditions:default": if_false,
+ })
diff --git a/third_party/mkl_dnn/mkldnn.BUILD b/third_party/mkl_dnn/mkldnn.BUILD
index 68f24aabae..57d2e1292b 100644
--- a/third_party/mkl_dnn/mkldnn.BUILD
+++ b/third_party/mkl_dnn/mkldnn.BUILD
@@ -1,5 +1,10 @@
exports_files(["LICENSE"])
+load(
+ "@org_tensorflow//third_party/mkl_dnn:build_defs.bzl",
+ "if_mkl_open_source_only",
+)
+
config_setting(
name = "clang_linux_x86_64",
values = {
@@ -15,7 +20,14 @@ cc_library(
"src/cpu/*.cpp",
]),
hdrs = glob(["include/*"]),
- copts = ["-fexceptions"] + select({
+ copts = [
+ "-fexceptions",
+ "-DUSE_MKL",
+ "-DUSE_CBLAS",
+ ] + if_mkl_open_source_only([
+ "-UUSE_MKL",
+ "-UUSE_CBLAS",
+ ]) + select({
"@org_tensorflow//tensorflow:linux_x86_64": [
"-fopenmp", # only works with gcc
],
@@ -33,4 +45,19 @@ cc_library(
],
nocopts = "-fno-exceptions",
visibility = ["//visibility:public"],
+ deps = select({
+ "@org_tensorflow//tensorflow:linux_x86_64": [
+ "@mkl_linux//:mkl_headers",
+ "@mkl_linux//:mkl_libs_linux",
+ ],
+ "@org_tensorflow//tensorflow:darwin": [
+ "@mkl_darwin//:mkl_headers",
+ "@mkl_darwin//:mkl_libs_darwin",
+ ],
+ "@org_tensorflow//tensorflow:windows": [
+ "@mkl_windows//:mkl_headers",
+ "@mkl_windows//:mkl_libs_windows",
+ ],
+ "//conditions:default": [],
+ }),
)
diff --git a/third_party/nanopb.BUILD b/third_party/nanopb.BUILD
new file mode 100644
index 0000000000..d21866911b
--- /dev/null
+++ b/third_party/nanopb.BUILD
@@ -0,0 +1,23 @@
+# Description:
+# Nanopb, a tiny ANSI C protobuf implementation for use on embedded devices.
+
+licenses(["notice"]) # zlib license
+
+exports_files(["LICENSE.txt"])
+
+cc_library(
+ name = "nanopb",
+ srcs = [
+ "pb_common.c",
+ "pb_decode.c",
+ "pb_encode.c",
+ ],
+ hdrs = [
+ "pb.h",
+ "pb_common.h",
+ "pb_decode.h",
+ "pb_encode.h",
+ ],
+ includes = ["."],
+ visibility = ["//visibility:public"],
+)
diff --git a/third_party/nasm.BUILD b/third_party/nasm.BUILD
index 341d58068b..89330eac54 100644
--- a/third_party/nasm.BUILD
+++ b/third_party/nasm.BUILD
@@ -8,45 +8,93 @@ exports_files(["LICENSE"])
cc_binary(
name = "nasm",
srcs = [
- "assemble.c",
- "assemble.h",
- "compiler.h",
- "crc64.c",
- "directiv.c",
- "directiv.h",
- "disp8.c",
- "disp8.h",
- "eval.c",
- "eval.h",
- "exprlib.c",
- "float.c",
- "float.h",
- "hashtbl.c",
- "hashtbl.h",
- "iflag.c",
- "iflag.h",
- "iflaggen.h",
- "ilog2.c",
- "insns.h",
- "insnsa.c",
- "insnsb.c",
- "insnsi.h",
- "labels.c",
- "labels.h",
- "lib/strlcpy.c",
- "listing.c",
- "listing.h",
- "macros.c",
- "md5.h",
- "md5c.c",
- "nasm.c",
- "nasm.h",
- "nasmlib.c",
- "nasmlib.h",
- "opflags.h",
+ "asm/assemble.c",
+ "asm/assemble.h",
+ "asm/directbl.c",
+ "asm/directiv.c",
+ "asm/directiv.h",
+ "asm/error.c",
+ "asm/eval.c",
+ "asm/eval.h",
+ "asm/exprdump.c",
+ "asm/exprlib.c",
+ "asm/float.c",
+ "asm/float.h",
+ "asm/labels.c",
+ "asm/listing.c",
+ "asm/listing.h",
+ "asm/nasm.c",
+ "asm/parser.c",
+ "asm/parser.h",
+ "asm/pptok.c",
+ "asm/pptok.h",
+ "asm/pragma.c",
+ "asm/preproc.c",
+ "asm/preproc.h",
+ "asm/preproc-nop.c",
+ "asm/quote.c",
+ "asm/quote.h",
+ "asm/rdstrnum.c",
+ "asm/segalloc.c",
+ "asm/stdscan.c",
+ "asm/stdscan.h",
+ "asm/strfunc.c",
+ "asm/tokens.h",
+ "asm/tokhash.c",
+ "common/common.c",
+ "config/unknown.h",
+ "disasm/disasm.c",
+ "disasm/disasm.h",
+ "disasm/sync.c",
+ "disasm/sync.h",
+ "include/compiler.h",
+ "include/disp8.h",
+ "include/error.h",
+ "include/hashtbl.h",
+ "include/iflag.h",
+ "include/insns.h",
+ "include/labels.h",
+ "include/md5.h",
+ "include/nasm.h",
+ "include/nasmint.h",
+ "include/nasmlib.h",
+ "include/opflags.h",
+ "include/perfhash.h",
+ "include/raa.h",
+ "include/rbtree.h",
+ "include/rdoff.h",
+ "include/saa.h",
+ "include/strlist.h",
+ "include/tables.h",
+ "include/ver.h",
+ "macros/macros.c",
+ "nasmlib/badenum.c",
+ "nasmlib/bsi.c",
+ "nasmlib/crc64.c",
+ "nasmlib/file.c",
+ "nasmlib/file.h",
+ "nasmlib/filename.c",
+ "nasmlib/hashtbl.c",
+ "nasmlib/ilog2.c",
+ "nasmlib/malloc.c",
+ "nasmlib/md5c.c",
+ "nasmlib/mmap.c",
+ "nasmlib/path.c",
+ "nasmlib/perfhash.c",
+ "nasmlib/raa.c",
+ "nasmlib/rbtree.c",
+ "nasmlib/readnum.c",
+ "nasmlib/realpath.c",
+ "nasmlib/saa.c",
+ "nasmlib/srcfile.c",
+ "nasmlib/string.c",
+ "nasmlib/strlist.c",
+ "nasmlib/ver.c",
+ "nasmlib/zerobuf.c",
"output/codeview.c",
"output/dwarf.h",
"output/elf.h",
+ "output/legacy.c",
"output/nulldbg.c",
"output/nullout.c",
"output/outaout.c",
@@ -56,9 +104,6 @@ cc_binary(
"output/outdbg.c",
"output/outelf.c",
"output/outelf.h",
- "output/outelf32.c",
- "output/outelf64.c",
- "output/outelfx32.c",
"output/outform.c",
"output/outform.h",
"output/outieee.c",
@@ -69,35 +114,31 @@ cc_binary(
"output/outrdf2.c",
"output/pecoff.h",
"output/stabs.h",
- "parser.c",
- "parser.h",
- "pptok.c",
- "pptok.h",
- "preproc.c",
- "preproc.h",
- "preproc-nop.c",
- "quote.c",
- "quote.h",
- "raa.c",
- "raa.h",
- "rbtree.c",
- "rbtree.h",
- "rdoff/rdoff.h",
- "realpath.c",
- "regflags.c",
- "regs.h",
- "regvals.c",
- "saa.c",
- "saa.h",
- "srcfile.c",
- "stdscan.c",
- "stdscan.h",
- "strfunc.c",
- "tables.h",
- "tokens.h",
- "tokhash.c",
- "ver.c",
+ "stdlib/snprintf.c",
+ "stdlib/strlcpy.c",
+ "stdlib/strnlen.c",
+ "stdlib/vsnprintf.c",
"version.h",
+ "x86/disp8.c",
+ "x86/iflag.c",
+ "x86/iflaggen.h",
+ "x86/insnsa.c",
+ "x86/insnsb.c",
+ "x86/insnsd.c",
+ "x86/insnsi.h",
+ "x86/insnsn.c",
+ "x86/regdis.c",
+ "x86/regdis.h",
+ "x86/regflags.c",
+ "x86/regs.c",
+ "x86/regs.h",
+ "x86/regvals.c",
+ ],
+ includes = [
+ "asm",
+ "include",
+ "output",
+ "x86",
],
copts = select({
":windows": [],
@@ -110,7 +151,10 @@ cc_binary(
defines = select({
":windows": [],
":windows_msvc": [],
- "//conditions:default": ["HAVE_SNPRINTF"],
+ "//conditions:default": [
+ "HAVE_SNPRINTF",
+ "HAVE_SYS_TYPES_H",
+ ],
}),
visibility = ["@jpeg//:__pkg__"],
)
diff --git a/third_party/nccl/nccl_configure.bzl b/third_party/nccl/nccl_configure.bzl
index 9dfcb18369..5d1ebf0686 100644
--- a/third_party/nccl/nccl_configure.bzl
+++ b/third_party/nccl/nccl_configure.bzl
@@ -47,10 +47,10 @@ alias(
)
"""
+# Local build results in dynamic link and the license should not be included.
_NCCL_LOCAL_BUILD_TEMPLATE = """
filegroup(
name = "LICENSE",
- data = ["nccl/NCCL-SLA.txt"],
visibility = ["//visibility:public"],
)
diff --git a/third_party/repo.bzl b/third_party/repo.bzl
index cb67d3e961..5cb42691c5 100644
--- a/third_party/repo.bzl
+++ b/third_party/repo.bzl
@@ -16,7 +16,6 @@
_SINGLE_URL_WHITELIST = depset([
"arm_compiler",
- "ortools_archive",
])
def _is_windows(ctx):
@@ -36,6 +35,15 @@ def _get_env_var(ctx, name):
else:
return None
+# Checks if we should use the system lib instead of the bundled one
+def _use_system_lib(ctx, name):
+ syslibenv = _get_env_var(ctx, "TF_SYSTEM_LIBS")
+ if syslibenv:
+ for n in syslibenv.strip().split(","):
+ if n.strip() == name:
+ return True
+ return False
+
# Executes specified command with arguments and calls 'fail' if it exited with
# non-zero code
def _execute_and_check_ret_code(repo_ctx, cmd_and_args):
@@ -76,17 +84,28 @@ def _tf_http_archive(ctx):
"Even if you don't have permission to mirror the file, please " +
"put the correctly formatted mirror URL there anyway, because " +
"someone will come along shortly thereafter and mirror the file.")
- ctx.download_and_extract(
- ctx.attr.urls,
- "",
- ctx.attr.sha256,
- ctx.attr.type,
- ctx.attr.strip_prefix)
- if ctx.attr.delete:
- _apply_delete(ctx, ctx.attr.delete)
- if ctx.attr.patch_file != None:
- _apply_patch(ctx, ctx.attr.patch_file)
- if ctx.attr.build_file != None:
+
+ use_syslib = _use_system_lib(ctx, ctx.attr.name)
+ if not use_syslib:
+ ctx.download_and_extract(
+ ctx.attr.urls,
+ "",
+ ctx.attr.sha256,
+ ctx.attr.type,
+ ctx.attr.strip_prefix)
+ if ctx.attr.delete:
+ _apply_delete(ctx, ctx.attr.delete)
+ if ctx.attr.patch_file != None:
+ _apply_patch(ctx, ctx.attr.patch_file)
+
+ if use_syslib and ctx.attr.system_build_file != None:
+ # Use BUILD.bazel to avoid conflict with third party projects with
+ # BUILD or build (directory) underneath.
+ ctx.template("BUILD.bazel", ctx.attr.system_build_file, {
+ "%prefix%": ".." if _repos_are_siblings() else "external",
+ }, False)
+
+ elif ctx.attr.build_file != None:
# Use BUILD.bazel to avoid conflict with third party projects with
# BUILD or build (directory) underneath.
ctx.template("BUILD.bazel", ctx.attr.build_file, {
@@ -103,7 +122,11 @@ tf_http_archive = repository_rule(
"delete": attr.string_list(),
"patch_file": attr.label(),
"build_file": attr.label(),
- })
+ "system_build_file": attr.label(),
+ },
+ environ=[
+ "TF_SYSTEM_LIBS",
+ ])
"""Downloads and creates Bazel repos for dependencies.
This is a swappable replacement for both http_archive() and
diff --git a/third_party/systemlibs/BUILD b/third_party/systemlibs/BUILD
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/third_party/systemlibs/BUILD
diff --git a/third_party/systemlibs/BUILD.tpl b/third_party/systemlibs/BUILD.tpl
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/third_party/systemlibs/BUILD.tpl
diff --git a/third_party/systemlibs/astor.BUILD b/third_party/systemlibs/astor.BUILD
new file mode 100644
index 0000000000..497ec4bcea
--- /dev/null
+++ b/third_party/systemlibs/astor.BUILD
@@ -0,0 +1,12 @@
+licenses(["notice"]) # New BSD
+
+filegroup(
+ name = "LICENSE",
+ visibility = ["//visibility:public"],
+)
+
+py_library(
+ name = "astor",
+ srcs_version = "PY2AND3",
+ visibility = ["//visibility:public"],
+)
diff --git a/third_party/systemlibs/build_defs.bzl.tpl b/third_party/systemlibs/build_defs.bzl.tpl
new file mode 100644
index 0000000000..3faa46c581
--- /dev/null
+++ b/third_party/systemlibs/build_defs.bzl.tpl
@@ -0,0 +1,32 @@
+# -*- Python -*-
+"""Skylark macros for system libraries.
+"""
+
+SYSTEM_LIBS_ENABLED = %{syslibs_enabled}
+
+SYSTEM_LIBS_LIST = [
+%{syslibs_list}
+]
+
+
+def if_any_system_libs(a, b=[]):
+ """Conditional which evaluates to 'a' if any system libraries are configured."""
+ if SYSTEM_LIBS_ENABLED:
+ return a
+ else:
+ return b
+
+
+def if_system_lib(lib, a, b=[]):
+ """Conditional which evaluates to 'a' if we're using the system version of lib"""
+
+ if SYSTEM_LIBS_ENABLED and lib in SYSTEM_LIBS_LIST:
+ return a
+ else:
+ return b
+
+
+def if_not_system_lib(lib, a, b=[]):
+ """Conditional which evaluates to 'a' if we're using the system version of lib"""
+
+ return if_system_lib(lib, b, a)
diff --git a/third_party/systemlibs/curl.BUILD b/third_party/systemlibs/curl.BUILD
new file mode 100644
index 0000000000..c5f125caa9
--- /dev/null
+++ b/third_party/systemlibs/curl.BUILD
@@ -0,0 +1,12 @@
+licenses(["notice"]) # MIT/X derivative license
+
+filegroup(
+ name = "COPYING",
+ visibility = ["//visibility:public"],
+)
+
+cc_library(
+ name = "curl",
+ linkopts = ["-lcurl"],
+ visibility = ["//visibility:public"],
+)
diff --git a/third_party/systemlibs/cython.BUILD b/third_party/systemlibs/cython.BUILD
new file mode 100644
index 0000000000..1d52587676
--- /dev/null
+++ b/third_party/systemlibs/cython.BUILD
@@ -0,0 +1,13 @@
+licenses(["notice"]) # Apache-2.0
+
+genrule(
+ name = "lncython",
+ outs = ["cython"],
+ cmd = "ln -s $$(which cython) $@",
+)
+
+sh_binary(
+ name = "cython_binary",
+ srcs = ["cython"],
+ visibility = ["//visibility:public"],
+)
diff --git a/third_party/systemlibs/flatbuffers.BUILD b/third_party/systemlibs/flatbuffers.BUILD
new file mode 100644
index 0000000000..14fceada82
--- /dev/null
+++ b/third_party/systemlibs/flatbuffers.BUILD
@@ -0,0 +1,38 @@
+licenses(["notice"]) # Apache 2.0
+
+filegroup(
+ name = "LICENSE.txt",
+ visibility = ["//visibility:public"],
+)
+
+# Public flatc library to compile flatbuffer files at runtime.
+cc_library(
+ name = "flatbuffers",
+ linkopts = ["-lflatbuffers"],
+ visibility = ["//visibility:public"],
+)
+
+# Public flatc compiler library.
+cc_library(
+ name = "flatc_library",
+ linkopts = ["-lflatbuffers"],
+ visibility = ["//visibility:public"],
+)
+
+genrule(
+ name = "lnflatc",
+ outs = ["flatc.bin"],
+ cmd = "ln -s $$(which flatc) $@",
+)
+
+# Public flatc compiler.
+sh_binary(
+ name = "flatc",
+ srcs = ["flatc.bin"],
+ visibility = ["//visibility:public"],
+)
+
+cc_library(
+ name = "runtime_cc",
+ visibility = ["//visibility:public"],
+)
diff --git a/third_party/systemlibs/gif.BUILD b/third_party/systemlibs/gif.BUILD
new file mode 100644
index 0000000000..5eb2c918ba
--- /dev/null
+++ b/third_party/systemlibs/gif.BUILD
@@ -0,0 +1,12 @@
+licenses(["notice"]) # MIT
+
+filegroup(
+ name = "COPYING",
+ visibility = ["//visibility:public"],
+)
+
+cc_library(
+ name = "gif",
+ linkopts = ["-lgif"],
+ visibility = ["//visibility:public"],
+)
diff --git a/third_party/systemlibs/grpc.BUILD b/third_party/systemlibs/grpc.BUILD
new file mode 100644
index 0000000000..fd90eb0dd3
--- /dev/null
+++ b/third_party/systemlibs/grpc.BUILD
@@ -0,0 +1,54 @@
+licenses(["notice"]) # Apache v2
+
+filegroup(
+ name = "LICENSE",
+ visibility = ["//visibility:public"],
+)
+
+cc_library(
+ name = "grpc",
+ linkopts = ["-lgrpc"],
+ visibility = ["//visibility:public"],
+)
+
+cc_library(
+ name = "grpc++",
+ linkopts = ["-lgrpc++"],
+ visibility = ["//visibility:public"],
+)
+
+cc_library(
+ name = "grpc_unsecure",
+ linkopts = ["-lgrpc_unsecure"],
+ visibility = ["//visibility:public"],
+)
+
+cc_library(
+ name = "grpc++_unsecure",
+ linkopts = ["-lgrpc++_unsecure"],
+ visibility = ["//visibility:public"],
+)
+
+genrule(
+ name = "ln_grpc_cpp_plugin",
+ outs = ["grpc_cpp_plugin.bin"],
+ cmd = "ln -s $$(which grpc_cpp_plugin) $@",
+)
+
+sh_binary(
+ name = "grpc_cpp_plugin",
+ srcs = ["grpc_cpp_plugin.bin"],
+ visibility = ["//visibility:public"],
+)
+
+genrule(
+ name = "ln_grpc_python_plugin",
+ outs = ["grpc_python_plugin.bin"],
+ cmd = "ln -s $$(which grpc_python_plugin) $@",
+)
+
+sh_binary(
+ name = "grpc_python_plugin",
+ srcs = ["grpc_python_plugin.bin"],
+ visibility = ["//visibility:public"],
+)
diff --git a/third_party/systemlibs/jemalloc.BUILD b/third_party/systemlibs/jemalloc.BUILD
new file mode 100644
index 0000000000..6a48d582ba
--- /dev/null
+++ b/third_party/systemlibs/jemalloc.BUILD
@@ -0,0 +1,30 @@
+licenses(["notice"]) # BSD
+
+filegroup(
+ name = "COPYING",
+ visibility = ["//visibility:public"],
+)
+
+cc_library(
+ name = "jemalloc_headers",
+ defines = [
+ "jemalloc_posix_memalign=posix_memalign",
+ "jemalloc_malloc=malloc",
+ "jemalloc_realloc=realloc",
+ "jemalloc_free=free",
+ ],
+ visibility = ["//visibility:public"],
+)
+
+cc_library(
+ name = "jemalloc_impl",
+ linkopts = ["-ljemalloc"],
+ defines = [
+ "jemalloc_posix_memalign=posix_memalign",
+ "jemalloc_malloc=malloc",
+ "jemalloc_realloc=realloc",
+ "jemalloc_free=free",
+ ],
+ visibility = ["//visibility:public"],
+ deps = [":jemalloc_headers"],
+)
diff --git a/third_party/systemlibs/jpeg.BUILD b/third_party/systemlibs/jpeg.BUILD
new file mode 100644
index 0000000000..f4f52da9bd
--- /dev/null
+++ b/third_party/systemlibs/jpeg.BUILD
@@ -0,0 +1,12 @@
+licenses(["notice"]) # custom notice-style license, see LICENSE.md
+
+filegroup(
+ name = "LICENSE.md",
+ visibility = ["//visibility:public"],
+)
+
+cc_library(
+ name = "jpeg",
+ linkopts = ["-ljpeg"],
+ visibility = ["//visibility:public"],
+)
diff --git a/third_party/systemlibs/jsoncpp.BUILD b/third_party/systemlibs/jsoncpp.BUILD
new file mode 100644
index 0000000000..cf91917cfb
--- /dev/null
+++ b/third_party/systemlibs/jsoncpp.BUILD
@@ -0,0 +1,37 @@
+licenses(["unencumbered"]) # Public Domain or MIT
+
+filegroup(
+ name = "LICENSE",
+ visibility = ["//visibility:public"],
+)
+
+HEADERS = [
+ "include/json/autolink.h",
+ "include/json/config.h",
+ "include/json/features.h",
+ "include/json/forwards.h",
+ "include/json/json.h",
+ "include/json/reader.h",
+ "include/json/value.h",
+ "include/json/version.h",
+ "include/json/writer.h",
+]
+
+genrule(
+ name = "link_headers",
+ outs = HEADERS,
+ cmd = """
+ for i in $(OUTS); do
+ i=$${i##*/}
+ ln -vsf /usr/include/jsoncpp/json/$$i $(@D)/include/json/$$i
+ done
+ """,
+)
+
+cc_library(
+ name = "jsoncpp",
+ hdrs = HEADERS,
+ includes = ["."],
+ linkopts = ["-ljsoncpp"],
+ visibility = ["//visibility:public"],
+)
diff --git a/third_party/systemlibs/lmdb.BUILD b/third_party/systemlibs/lmdb.BUILD
new file mode 100644
index 0000000000..6177b095ec
--- /dev/null
+++ b/third_party/systemlibs/lmdb.BUILD
@@ -0,0 +1,12 @@
+licenses(["notice"]) # OpenLDAP Public License
+
+filegroup(
+ name = "LICENSE",
+ visibility = ["//visibility:public"],
+)
+
+cc_library(
+ name = "lmdb",
+ linkopts = ["-llmdb"],
+ visibility = ["//visibility:public"],
+)
diff --git a/third_party/systemlibs/nasm.BUILD b/third_party/systemlibs/nasm.BUILD
new file mode 100644
index 0000000000..10ef8d8832
--- /dev/null
+++ b/third_party/systemlibs/nasm.BUILD
@@ -0,0 +1,12 @@
+licenses(["notice"]) # BSD 2-clause
+
+filegroup(
+ name = "LICENSE",
+ visibility = ["//visibility:public"],
+)
+
+sh_binary(
+ name = "nasm",
+ srcs = ["nasm"],
+ visibility = ["@jpeg//:__pkg__"],
+)
diff --git a/third_party/systemlibs/pcre.BUILD b/third_party/systemlibs/pcre.BUILD
new file mode 100644
index 0000000000..df74238847
--- /dev/null
+++ b/third_party/systemlibs/pcre.BUILD
@@ -0,0 +1,12 @@
+licenses(["notice"]) # BSD
+
+filegroup(
+ name = "LICENCE",
+ visibility = ["//visibility:public"],
+)
+
+cc_library(
+ name = "pcre",
+ linkopts = ["-lpcre"],
+ visibility = ["//visibility:public"],
+)
diff --git a/third_party/systemlibs/png.BUILD b/third_party/systemlibs/png.BUILD
new file mode 100644
index 0000000000..fc6b6f2d8b
--- /dev/null
+++ b/third_party/systemlibs/png.BUILD
@@ -0,0 +1,12 @@
+licenses(["notice"]) # BSD/MIT-like license
+
+filegroup(
+ name = "LICENSE",
+ visibility = ["//visibility:public"],
+)
+
+cc_library(
+ name = "png",
+ linkopts = ["-lpng"],
+ visibility = ["//visibility:public"],
+)
diff --git a/third_party/systemlibs/re2.BUILD b/third_party/systemlibs/re2.BUILD
new file mode 100644
index 0000000000..c18e252dbc
--- /dev/null
+++ b/third_party/systemlibs/re2.BUILD
@@ -0,0 +1,12 @@
+licenses(["notice"]) # BSD/MIT-like license
+
+filegroup(
+ name = "LICENSE",
+ visibility = ["//visibility:public"],
+)
+
+cc_library(
+ name = "re2",
+ linkopts = ["-lre2"],
+ visibility = ["//visibility:public"],
+)
diff --git a/third_party/systemlibs/six.BUILD b/third_party/systemlibs/six.BUILD
new file mode 100644
index 0000000000..ff9b1a540b
--- /dev/null
+++ b/third_party/systemlibs/six.BUILD
@@ -0,0 +1,11 @@
+licenses(["notice"]) # MIT
+
+filegroup(
+ name = "LICENSE",
+ visibility = ["//visibility:public"],
+)
+
+py_library(
+ name = "six",
+ visibility = ["//visibility:public"],
+)
diff --git a/third_party/systemlibs/snappy.BUILD b/third_party/systemlibs/snappy.BUILD
new file mode 100644
index 0000000000..fd2db9e2df
--- /dev/null
+++ b/third_party/systemlibs/snappy.BUILD
@@ -0,0 +1,12 @@
+licenses(["notice"]) # BSD 3-Clause
+
+filegroup(
+ name = "COPYING",
+ visibility = ["//visibility:public"],
+)
+
+cc_library(
+ name = "snappy",
+ linkopts = ["-lsnappy"],
+ visibility = ["//visibility:public"],
+)
diff --git a/third_party/systemlibs/sqlite.BUILD b/third_party/systemlibs/sqlite.BUILD
new file mode 100644
index 0000000000..20ee1ebbef
--- /dev/null
+++ b/third_party/systemlibs/sqlite.BUILD
@@ -0,0 +1,15 @@
+licenses(["unencumbered"]) # Public Domain
+
+# Production build of SQLite library that's baked into TensorFlow.
+cc_library(
+ name = "org_sqlite",
+ linkopts = ["-lsqlite3"],
+ visibility = ["//visibility:public"],
+)
+
+# This is a Copybara sync helper for Google.
+py_library(
+ name = "python",
+ srcs_version = "PY2AND3",
+ visibility = ["//visibility:public"],
+)
diff --git a/third_party/systemlibs/swig.BUILD b/third_party/systemlibs/swig.BUILD
new file mode 100644
index 0000000000..4c9b74dadb
--- /dev/null
+++ b/third_party/systemlibs/swig.BUILD
@@ -0,0 +1,23 @@
+licenses(["restricted"]) # GPLv3
+
+filegroup(
+ name = "LICENSE",
+ visibility = ["//visibility:public"],
+)
+
+filegroup(
+ name = "templates",
+ visibility = ["//visibility:public"],
+)
+
+genrule(
+ name = "lnswiglink",
+ outs = ["swiglink"],
+ cmd = "ln -s $$(which swig) $@",
+)
+
+sh_binary(
+ name = "swig",
+ srcs = ["swiglink"],
+ visibility = ["//visibility:public"],
+)
diff --git a/third_party/systemlibs/syslibs_configure.bzl b/third_party/systemlibs/syslibs_configure.bzl
new file mode 100644
index 0000000000..07a44c317e
--- /dev/null
+++ b/third_party/systemlibs/syslibs_configure.bzl
@@ -0,0 +1,160 @@
+# -*- Python -*-
+"""Repository rule for system library autoconfiguration.
+
+`syslibs_configure` depends on the following environment variables:
+
+ * `TF_SYSTEM_LIBS`: list of third party dependencies that should use
+ the system version instead
+"""
+
+_TF_SYSTEM_LIBS="TF_SYSTEM_LIBS"
+
+VALID_LIBS=[
+ "astor_archive",
+ "com_googlesource_code_re2",
+ "curl",
+ "cython",
+ "flatbuffers",
+ "gif_archive",
+ "grpc",
+ "jemalloc",
+ "jpeg",
+ "jsoncpp_git",
+ "lmdb",
+ "nasm",
+ "org_sqlite",
+ "pcre",
+ "png_archive",
+ "six_archive",
+ "snappy",
+ "swig",
+ "termcolor_archive",
+ "zlib_archive",
+]
+
+
+def auto_configure_fail(msg):
+ """Output failure message when syslibs configuration fails."""
+ red = "\033[0;31m"
+ no_color = "\033[0m"
+ fail("\n%sSystem Library Configuration Error:%s %s\n" % (red, no_color, msg))
+
+
+def _is_windows(repository_ctx):
+ """Returns true if the host operating system is windows."""
+ os_name = repository_ctx.os.name.lower()
+ if os_name.find("windows") != -1:
+ return True
+ return False
+
+
+def _enable_syslibs(repository_ctx):
+ s = repository_ctx.os.environ.get(_TF_SYSTEM_LIBS, '').strip()
+ if not _is_windows(repository_ctx) and s != None and s != '':
+ return True
+ return False
+
+
+def _get_system_lib_list(repository_ctx):
+ """Gets the list of deps that should use the system lib.
+
+ Args:
+ repository_ctx: The repository context.
+
+ Returns:
+ A string version of a python list
+ """
+ if _TF_SYSTEM_LIBS not in repository_ctx.os.environ:
+ return []
+
+ libenv = repository_ctx.os.environ[_TF_SYSTEM_LIBS].strip()
+ libs = []
+
+ for lib in list(libenv.split(',')):
+ lib = lib.strip()
+ if lib == "":
+ continue
+ if lib not in VALID_LIBS:
+ auto_configure_fail("Invalid system lib set: %s" % lib)
+ return []
+ libs.append(lib)
+
+ return libs
+
+
+def _format_system_lib_list(repository_ctx):
+ """Formats the list of deps that should use the system lib.
+
+ Args:
+ repository_ctx: The repository context.
+
+ Returns:
+ A list of the names of deps that should use the system lib.
+ """
+ libs = _get_system_lib_list(repository_ctx)
+ ret = ''
+ for lib in libs:
+ ret += "'%s',\n" % lib
+
+ return ret
+
+
+def _tpl(repository_ctx, tpl, substitutions={}, out=None):
+ if not out:
+ out = tpl.replace(":", "")
+ repository_ctx.template(
+ out,
+ Label("//third_party/systemlibs%s.tpl" % tpl),
+ substitutions,
+ False)
+
+
+def _create_dummy_repository(repository_ctx):
+ """Creates the dummy repository to build with all bundled libraries."""
+
+ _tpl(repository_ctx, ":BUILD")
+ _tpl(repository_ctx, ":build_defs.bzl",
+ {
+ "%{syslibs_enabled}": 'False',
+ "%{syslibs_list}": '',
+ })
+
+
+def _create_local_repository(repository_ctx):
+ """Creates the repository to build with system libraries."""
+
+ _tpl(repository_ctx, ":BUILD")
+ _tpl(repository_ctx, ":build_defs.bzl",
+ {
+ "%{syslibs_enabled}": 'True',
+ "%{syslibs_list}": _format_system_lib_list(repository_ctx),
+ })
+
+
+def _syslibs_autoconf_impl(repository_ctx):
+ """Implementation of the syslibs_configure repository rule."""
+ if not _enable_syslibs(repository_ctx):
+ _create_dummy_repository(repository_ctx)
+ else:
+ _create_local_repository(repository_ctx)
+
+
+syslibs_configure = repository_rule(
+ implementation = _syslibs_autoconf_impl,
+ environ = [
+ _TF_SYSTEM_LIBS,
+ ],
+)
+
+"""Configures the build to link to system libraries
+instead of using bundled versions.
+
+Add the following to your WORKSPACE FILE:
+
+```python
+syslibs_configure(name = "local_config_syslibs")
+```
+
+Args:
+ name: A unique name for this workspace rule.
+"""
diff --git a/third_party/systemlibs/termcolor.BUILD b/third_party/systemlibs/termcolor.BUILD
new file mode 100644
index 0000000000..915eb621d5
--- /dev/null
+++ b/third_party/systemlibs/termcolor.BUILD
@@ -0,0 +1,12 @@
+licenses(["notice"]) # MIT
+
+filegroup(
+ name = "COPYING.txt",
+ visibility = ["//visibility:public"],
+)
+
+py_library(
+ name = "termcolor",
+ srcs_version = "PY2AND3",
+ visibility = ["//visibility:public"],
+)
diff --git a/third_party/systemlibs/zlib.BUILD b/third_party/systemlibs/zlib.BUILD
new file mode 100644
index 0000000000..69462ae6cb
--- /dev/null
+++ b/third_party/systemlibs/zlib.BUILD
@@ -0,0 +1,12 @@
+licenses(["notice"]) # BSD/MIT-like license (for zlib)
+
+filegroup(
+ name = "zlib.h",
+ visibility = ["//visibility:public"],
+)
+
+cc_library(
+ name = "zlib",
+ linkopts = ["-lz"],
+ visibility = ["//visibility:public"],
+)
diff --git a/third_party/toolchains/BUILD b/third_party/toolchains/BUILD
new file mode 100644
index 0000000000..fc3183a754
--- /dev/null
+++ b/third_party/toolchains/BUILD
@@ -0,0 +1,22 @@
+licenses(["restricted"])
+
+package(default_visibility = ["//visibility:public"])
+
+# Platform for use with remote execution with
+# custom container based off RBE Ubuntu16_04
+# http://gcr.io/cloud-marketplace/google/rbe-ubuntu16-04
+# Built with //tensorflow/tools/ci_build/Dockerfile.rbe.cpu
+platform(
+ name = "rbe_ubuntu16_04-tf",
+ constraint_values = [
+ "@bazel_tools//platforms:x86_64",
+ "@bazel_tools//platforms:linux",
+ "@bazel_tools//tools/cpp:clang",
+ "@bazel_toolchains//constraints:xenial",
+ ],
+ remote_execution_properties = """
+ properties: {
+ name: "container-image"
+ value:"docker://gcr.io/asci-toolchain/nosla-ubuntu16_04-tf@sha256:800a7b68cabef15419695c188ed33ed70adf678c2371b97b236f3ae26c38274d"
+ }""",
+)
diff --git a/third_party/toolchains/clang6/CROSSTOOL.tpl b/third_party/toolchains/clang6/CROSSTOOL.tpl
index 6b7e5a8808..ffba9850bb 100644
--- a/third_party/toolchains/clang6/CROSSTOOL.tpl
+++ b/third_party/toolchains/clang6/CROSSTOOL.tpl
@@ -76,9 +76,6 @@ toolchain {
# This adds a little bit more durability to our Clang build.
#
- # At the moment, this only only be needed for:
- # - add_boringssl_s390x.patch: --Wa,--noexecstack
- #
# Folks who do maintenance work on TF Bazel Clang should consider
# commenting out these lines, while doing that work, to gain a better
# understanding of what the intersection of support looks like between GCC
diff --git a/tools/bazel.rc b/tools/bazel.rc
index 1c1e6afb65..913c4bc333 100644
--- a/tools/bazel.rc
+++ b/tools/bazel.rc
@@ -27,6 +27,10 @@ build --define framework_shared_object=true
build:mkl --define=using_mkl=true
build:mkl -c opt
+# This config option is used to enable MKL-DNN open source library only,
+# without depending on MKL binary version.
+build:mkl_open_source_only --define=using_mkl_dnn_only=true
+
build:download_clang --crosstool_top=@local_config_download_clang//:toolchain
build:download_clang --define=using_clang=true
@@ -36,8 +40,6 @@ build:cuda --define=using_cuda=true --define=using_cuda_nvcc=true
build:cuda_clang --crosstool_top=@local_config_cuda//crosstool:toolchain
build:cuda_clang --define=using_cuda=true --define=using_cuda_clang=true --define=using_clang=true
-build:win-cuda --define=using_cuda=true --define=using_cuda_nvcc=true
-
build:mkl --define=using_mkl=true
build:sycl --crosstool_top=@local_config_sycl//crosstool:toolchain